diff --git a/.cargo/config.in b/.cargo/config.in index 1f85a4361f54..c72c7fd558bd 100644 --- a/.cargo/config.in +++ b/.cargo/config.in @@ -30,14 +30,14 @@ git = "https://github.com/gfx-rs/d3d12-rs" rev = "b940b1d71" replace-with = "vendored-sources" -[source."git+https://github.com/gfx-rs/naga?rev=b99d58ea435090e561377949f428bce2c18451bb"] +[source."git+https://github.com/gfx-rs/naga?rev=76003dc0035d53a474d366dcdf49d2e4d12e921f"] git = "https://github.com/gfx-rs/naga" -rev = "b99d58ea435090e561377949f428bce2c18451bb" +rev = "76003dc0035d53a474d366dcdf49d2e4d12e921f" replace-with = "vendored-sources" -[source."git+https://github.com/gfx-rs/wgpu?rev=f71a1bc736fde37509262ca03e91d8f56a13aeb5"] +[source."git+https://github.com/gfx-rs/wgpu?rev=dcad7dfba92dd85c3ca21bb553a61834e01b04f5"] git = "https://github.com/gfx-rs/wgpu" -rev = "f71a1bc736fde37509262ca03e91d8f56a13aeb5" +rev = "dcad7dfba92dd85c3ca21bb553a61834e01b04f5" replace-with = "vendored-sources" [source."git+https://github.com/glandium/warp?rev=4af45fae95bc98b0eba1ef0db17e1dac471bb23d"] diff --git a/Cargo.lock b/Cargo.lock index f5a6cb87bf5e..9732d97842fb 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -140,9 +140,9 @@ dependencies = [ [[package]] name = "ash" -version = "0.37.2+1.3.238" +version = "0.37.3+1.3.251" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28bf19c1f0a470be5fbf7522a308a05df06610252c5bcf5143e1b23f629a9a03" +checksum = "39e9c3835d686b0a6084ab4234fcd1b07dbf6e4767dce60874b12356a25ecd4a" dependencies = [ "libloading", ] @@ -831,9 +831,9 @@ checksum = "5827cebf4670468b8772dd191856768aedcb1b0278a04f989f7766351917b9dc" [[package]] name = "core-graphics" -version = "0.22.3" +version = "0.23.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2581bbab3b8ffc6fcbd550bf46c355135d16e9ff2a6ea032ad6b9bf1d7efe4fb" +checksum = "506752f2b58723339d41d013009be97e549860785a366e2a5e75dfbd9725b48e" dependencies = [ "bitflags 1.3.2", "core-foundation", @@ -844,21 +844,20 @@ dependencies = [ [[package]] name = "core-graphics-types" -version = "0.1.1" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3a68b68b3446082644c91ac778bf50cd4104bfb002b5a6a7c44cca5a2c70788b" +checksum = "2bb142d41022986c1d8ff29103a1411c8a3dfad3552f87a4f8dc50d61d4f4e33" dependencies = [ "bitflags 1.3.2", "core-foundation", - "foreign-types", "libc", ] [[package]] name = "core-text" -version = "19.2.0" +version = "20.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "99d74ada66e07c1cefa18f8abfba765b486f250de2e4a999e5727fc0dd4b4a25" +checksum = "02083d15989d6247553e1a63753561555a72b2038bba1e4239db70602a798742" dependencies = [ "core-foundation", "core-graphics", @@ -1784,18 +1783,30 @@ dependencies = [ [[package]] name = "foreign-types" -version = "0.3.2" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f6f339eb8adc052cd2ca78910fda869aefa38d22d5cb648e6485e4d3fc06f3b1" +checksum = "d737d9aa519fb7b749cbc3b962edcf310a8dd1f4b67c91c4f83975dbdd17d965" dependencies = [ + "foreign-types-macros", "foreign-types-shared", ] [[package]] -name = "foreign-types-shared" -version = "0.1.1" +name = "foreign-types-macros" +version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" +checksum = "1a5c6c585bc94aaf2c7b51dd4c2ba22680844aba4c687be581871a6f518c5742" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.18", +] + +[[package]] +name = "foreign-types-shared" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "aa9a19cbb55df58761df49b23516a86d432839add4af60fc256da840f66ed35b" [[package]] name = "form_urlencoded" @@ -2312,21 +2323,21 @@ dependencies = [ [[package]] name = "gpu-alloc" -version = "0.5.3" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fc59e5f710e310e76e6707f86c561dd646f69a8876da9131703b2f717de818d" +checksum = "fbcd2dba93594b227a1f57ee09b8b9da8892c34d55aa332e034a228d0fe6a171" dependencies = [ - "bitflags 1.3.2", + "bitflags 2.999.999", "gpu-alloc-types", ] [[package]] name = "gpu-alloc-types" -version = "0.2.0" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "54804d0d6bc9d7f26db4eaec1ad10def69b599315f487d32c334a80d1efe67a5" +checksum = "98ff03b468aa837d70984d55f5d3f846f6ec31fe34bbb97c4f85219caeee1ca4" dependencies = [ - "bitflags 1.3.2", + "bitflags 2.999.999", ] [[package]] @@ -3170,9 +3181,9 @@ dependencies = [ [[package]] name = "metal" -version = "0.24.0" +version = "0.25.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "de11355d1f6781482d027a3b4d4de7825dcedb197bf573e0596d00008402d060" +checksum = "550b24b0cd4cf923f36bae78eca457b3a10d8a6a14a9c84cb2687b527e6a84af" dependencies = [ "bitflags 1.3.2", "block", @@ -3180,6 +3191,7 @@ dependencies = [ "foreign-types", "log", "objc", + "paste", ] [[package]] @@ -3549,10 +3561,10 @@ checksum = "a2983372caf4480544083767bf2d27defafe32af49ab4df3a0b7fc90793a3664" [[package]] name = "naga" version = "0.12.0" -source = "git+https://github.com/gfx-rs/naga?rev=b99d58ea435090e561377949f428bce2c18451bb#b99d58ea435090e561377949f428bce2c18451bb" +source = "git+https://github.com/gfx-rs/naga?rev=76003dc0035d53a474d366dcdf49d2e4d12e921f#76003dc0035d53a474d366dcdf49d2e4d12e921f" dependencies = [ "bit-set", - "bitflags 1.3.2", + "bitflags 2.999.999", "codespan-reporting", "hexf-parse", "indexmap", @@ -6084,7 +6096,7 @@ dependencies = [ [[package]] name = "wgpu-core" version = "0.16.0" -source = "git+https://github.com/gfx-rs/wgpu?rev=f71a1bc736fde37509262ca03e91d8f56a13aeb5#f71a1bc736fde37509262ca03e91d8f56a13aeb5" +source = "git+https://github.com/gfx-rs/wgpu?rev=dcad7dfba92dd85c3ca21bb553a61834e01b04f5#dcad7dfba92dd85c3ca21bb553a61834e01b04f5" dependencies = [ "arrayvec", "bit-vec", @@ -6107,7 +6119,7 @@ dependencies = [ [[package]] name = "wgpu-hal" version = "0.16.0" -source = "git+https://github.com/gfx-rs/wgpu?rev=f71a1bc736fde37509262ca03e91d8f56a13aeb5#f71a1bc736fde37509262ca03e91d8f56a13aeb5" +source = "git+https://github.com/gfx-rs/wgpu?rev=dcad7dfba92dd85c3ca21bb553a61834e01b04f5#dcad7dfba92dd85c3ca21bb553a61834e01b04f5" dependencies = [ "android_system_properties", "arrayvec", @@ -6117,7 +6129,6 @@ dependencies = [ "block", "core-graphics-types", "d3d12", - "foreign-types", "gpu-alloc", "gpu-descriptor", "js-sys", @@ -6144,7 +6155,7 @@ dependencies = [ [[package]] name = "wgpu-types" version = "0.16.0" -source = "git+https://github.com/gfx-rs/wgpu?rev=f71a1bc736fde37509262ca03e91d8f56a13aeb5#f71a1bc736fde37509262ca03e91d8f56a13aeb5" +source = "git+https://github.com/gfx-rs/wgpu?rev=dcad7dfba92dd85c3ca21bb553a61834e01b04f5#dcad7dfba92dd85c3ca21bb553a61834e01b04f5" dependencies = [ "bitflags 2.999.999", "js-sys", diff --git a/build/rust/bitflags/Cargo.toml b/build/rust/bitflags/Cargo.toml index 74c90b69231f..e3334b29985c 100644 --- a/build/rust/bitflags/Cargo.toml +++ b/build/rust/bitflags/Cargo.toml @@ -7,5 +7,8 @@ license = "MIT/Apache-2.0" [lib] path = "lib.rs" +[features] +serde = [] + [dependencies.bitflags] version = "1.0" diff --git a/gfx/webrender_bindings/Cargo.toml b/gfx/webrender_bindings/Cargo.toml index 69a7eef1c058..f6b3fd637fe6 100644 --- a/gfx/webrender_bindings/Cargo.toml +++ b/gfx/webrender_bindings/Cargo.toml @@ -35,6 +35,6 @@ winapi = "0.3" [target.'cfg(target_os = "macos")'.dependencies] core-foundation = "0.9" -core-graphics = "0.22" -foreign-types = "0.3.0" +core-graphics = "0.23" +foreign-types = "0.5.0" objc = "0.2" diff --git a/gfx/wgpu_bindings/Cargo.toml b/gfx/wgpu_bindings/Cargo.toml index de3cb96af9e3..0bbc968ca6f8 100644 --- a/gfx/wgpu_bindings/Cargo.toml +++ b/gfx/wgpu_bindings/Cargo.toml @@ -17,7 +17,7 @@ default = [] [dependencies.wgc] package = "wgpu-core" git = "https://github.com/gfx-rs/wgpu" -rev = "f71a1bc736fde37509262ca03e91d8f56a13aeb5" +rev = "dcad7dfba92dd85c3ca21bb553a61834e01b04f5" #Note: "replay" shouldn't ideally be needed, # but it allows us to serialize everything across IPC. features = ["replay", "trace", "serial-pass", "strict_asserts", "wgsl"] @@ -27,32 +27,32 @@ features = ["replay", "trace", "serial-pass", "strict_asserts", "wgsl"] [target.'cfg(any(target_os = "macos", target_os = "ios"))'.dependencies.wgc] package = "wgpu-core" git = "https://github.com/gfx-rs/wgpu" -rev = "f71a1bc736fde37509262ca03e91d8f56a13aeb5" +rev = "dcad7dfba92dd85c3ca21bb553a61834e01b04f5" features = ["metal"] # We want the wgpu-core Direct3D backends on Windows. [target.'cfg(windows)'.dependencies.wgc] package = "wgpu-core" git = "https://github.com/gfx-rs/wgpu" -rev = "f71a1bc736fde37509262ca03e91d8f56a13aeb5" +rev = "dcad7dfba92dd85c3ca21bb553a61834e01b04f5" features = ["dx11", "dx12"] # We want the wgpu-core Vulkan backend on Linux and Windows. [target.'cfg(any(windows, all(unix, not(any(target_os = "macos", target_os = "ios")))))'.dependencies.wgc] package = "wgpu-core" git = "https://github.com/gfx-rs/wgpu" -rev = "f71a1bc736fde37509262ca03e91d8f56a13aeb5" +rev = "dcad7dfba92dd85c3ca21bb553a61834e01b04f5" features = ["vulkan"] [dependencies.wgt] package = "wgpu-types" git = "https://github.com/gfx-rs/wgpu" -rev = "f71a1bc736fde37509262ca03e91d8f56a13aeb5" +rev = "dcad7dfba92dd85c3ca21bb553a61834e01b04f5" [dependencies.wgh] package = "wgpu-hal" git = "https://github.com/gfx-rs/wgpu" -rev = "f71a1bc736fde37509262ca03e91d8f56a13aeb5" +rev = "dcad7dfba92dd85c3ca21bb553a61834e01b04f5" [dependencies] bincode = "1" diff --git a/gfx/wgpu_bindings/moz.yaml b/gfx/wgpu_bindings/moz.yaml index 87eadf58db93..a4bf36473b43 100644 --- a/gfx/wgpu_bindings/moz.yaml +++ b/gfx/wgpu_bindings/moz.yaml @@ -20,11 +20,11 @@ origin: # Human-readable identifier for this version/release # Generally "version NNN", "tag SSS", "bookmark SSS" - release: commit f71a1bc736fde37509262ca03e91d8f56a13aeb5 + release: commit dcad7dfba92dd85c3ca21bb553a61834e01b04f5 # Revision to pull in # Must be a long or short commit SHA (long preferred) - revision: f71a1bc736fde37509262ca03e91d8f56a13aeb5 + revision: dcad7dfba92dd85c3ca21bb553a61834e01b04f5 license: ['MIT', 'Apache-2.0'] diff --git a/gfx/wgpu_bindings/src/client.rs b/gfx/wgpu_bindings/src/client.rs index b929cd397074..cb0dbcc5b45d 100644 --- a/gfx/wgpu_bindings/src/client.rs +++ b/gfx/wgpu_bindings/src/client.rs @@ -7,7 +7,7 @@ use crate::{ DropAction, ImageDataLayout, ImplicitLayout, QueueWriteAction, RawString, TextureAction, }; -use wgc::{hub::IdentityManager, id}; +use wgc::{identity::IdentityManager, id}; use wgt::{Backend, TextureFormat}; pub use wgc::command::{compute_ffi::*, render_ffi::*}; diff --git a/gfx/wgpu_bindings/src/identity.rs b/gfx/wgpu_bindings/src/identity.rs index 7d608b275b79..7a014bf9cde0 100644 --- a/gfx/wgpu_bindings/src/identity.rs +++ b/gfx/wgpu_bindings/src/identity.rs @@ -13,7 +13,7 @@ pub struct IdentityRecycler { kind: &'static str, } -impl wgc::hub::IdentityHandler +impl wgc::identity::IdentityHandler for IdentityRecycler { type Input = I; @@ -51,7 +51,7 @@ pub struct IdentityRecyclerFactory { free_surface: extern "C" fn(id::SurfaceId, FactoryParam), } -impl wgc::hub::IdentityHandlerFactory for IdentityRecyclerFactory { +impl wgc::identity::IdentityHandlerFactory for IdentityRecyclerFactory { type Filter = IdentityRecycler; fn spawn(&self) -> Self::Filter { IdentityRecycler { @@ -61,7 +61,7 @@ impl wgc::hub::IdentityHandlerFactory for IdentityRecyclerFactory } } } -impl wgc::hub::IdentityHandlerFactory for IdentityRecyclerFactory { +impl wgc::identity::IdentityHandlerFactory for IdentityRecyclerFactory { type Filter = IdentityRecycler; fn spawn(&self) -> Self::Filter { IdentityRecycler { @@ -71,7 +71,7 @@ impl wgc::hub::IdentityHandlerFactory for IdentityRecyclerFactory } } } -impl wgc::hub::IdentityHandlerFactory for IdentityRecyclerFactory { +impl wgc::identity::IdentityHandlerFactory for IdentityRecyclerFactory { type Filter = IdentityRecycler; fn spawn(&self) -> Self::Filter { IdentityRecycler { @@ -81,7 +81,7 @@ impl wgc::hub::IdentityHandlerFactory for IdentityRecycler } } } -impl wgc::hub::IdentityHandlerFactory for IdentityRecyclerFactory { +impl wgc::identity::IdentityHandlerFactory for IdentityRecyclerFactory { type Filter = IdentityRecycler; fn spawn(&self) -> Self::Filter { IdentityRecycler { @@ -91,7 +91,7 @@ impl wgc::hub::IdentityHandlerFactory for IdentityRecyclerFa } } } -impl wgc::hub::IdentityHandlerFactory for IdentityRecyclerFactory { +impl wgc::identity::IdentityHandlerFactory for IdentityRecyclerFactory { type Filter = IdentityRecycler; fn spawn(&self) -> Self::Filter { IdentityRecycler { @@ -101,7 +101,7 @@ impl wgc::hub::IdentityHandlerFactory for IdentityRecycle } } } -impl wgc::hub::IdentityHandlerFactory for IdentityRecyclerFactory { +impl wgc::identity::IdentityHandlerFactory for IdentityRecyclerFactory { type Filter = IdentityRecycler; fn spawn(&self) -> Self::Filter { IdentityRecycler { @@ -111,7 +111,7 @@ impl wgc::hub::IdentityHandlerFactory for IdentityRecyclerFacto } } } -impl wgc::hub::IdentityHandlerFactory for IdentityRecyclerFactory { +impl wgc::identity::IdentityHandlerFactory for IdentityRecyclerFactory { type Filter = IdentityRecycler; fn spawn(&self) -> Self::Filter { IdentityRecycler { @@ -121,7 +121,7 @@ impl wgc::hub::IdentityHandlerFactory for IdentityRecyclerF } } } -impl wgc::hub::IdentityHandlerFactory for IdentityRecyclerFactory { +impl wgc::identity::IdentityHandlerFactory for IdentityRecyclerFactory { type Filter = IdentityRecycler; fn spawn(&self) -> Self::Filter { IdentityRecycler { @@ -131,7 +131,7 @@ impl wgc::hub::IdentityHandlerFactory for IdentityRecyclerFa } } } -impl wgc::hub::IdentityHandlerFactory for IdentityRecyclerFactory { +impl wgc::identity::IdentityHandlerFactory for IdentityRecyclerFactory { type Filter = IdentityRecycler; fn spawn(&self) -> Self::Filter { IdentityRecycler { @@ -141,7 +141,7 @@ impl wgc::hub::IdentityHandlerFactory for IdentityRecycler } } } -impl wgc::hub::IdentityHandlerFactory for IdentityRecyclerFactory { +impl wgc::identity::IdentityHandlerFactory for IdentityRecyclerFactory { type Filter = IdentityRecycler; fn spawn(&self) -> Self::Filter { IdentityRecycler { @@ -151,7 +151,7 @@ impl wgc::hub::IdentityHandlerFactory for IdentityRecycle } } } -impl wgc::hub::IdentityHandlerFactory for IdentityRecyclerFactory { +impl wgc::identity::IdentityHandlerFactory for IdentityRecyclerFactory { type Filter = IdentityRecycler; fn spawn(&self) -> Self::Filter { IdentityRecycler { @@ -161,7 +161,7 @@ impl wgc::hub::IdentityHandlerFactory for IdentityRecyclerFactor } } } -impl wgc::hub::IdentityHandlerFactory for IdentityRecyclerFactory { +impl wgc::identity::IdentityHandlerFactory for IdentityRecyclerFactory { type Filter = IdentityRecycler; fn spawn(&self) -> Self::Filter { IdentityRecycler { @@ -171,7 +171,7 @@ impl wgc::hub::IdentityHandlerFactory for IdentityRecyclerFactory } } } -impl wgc::hub::IdentityHandlerFactory for IdentityRecyclerFactory { +impl wgc::identity::IdentityHandlerFactory for IdentityRecyclerFactory { type Filter = IdentityRecycler; fn spawn(&self) -> Self::Filter { IdentityRecycler { @@ -181,7 +181,7 @@ impl wgc::hub::IdentityHandlerFactory for IdentityRecyclerF } } } -impl wgc::hub::IdentityHandlerFactory for IdentityRecyclerFactory { +impl wgc::identity::IdentityHandlerFactory for IdentityRecyclerFactory { type Filter = IdentityRecycler; fn spawn(&self) -> Self::Filter { IdentityRecycler { @@ -191,7 +191,7 @@ impl wgc::hub::IdentityHandlerFactory for IdentityRecyclerFactory } } } -impl wgc::hub::IdentityHandlerFactory for IdentityRecyclerFactory { +impl wgc::identity::IdentityHandlerFactory for IdentityRecyclerFactory { type Filter = IdentityRecycler; fn spawn(&self) -> Self::Filter { IdentityRecycler { @@ -201,7 +201,7 @@ impl wgc::hub::IdentityHandlerFactory for IdentityRecyclerFac } } } -impl wgc::hub::IdentityHandlerFactory for IdentityRecyclerFactory { +impl wgc::identity::IdentityHandlerFactory for IdentityRecyclerFactory { type Filter = IdentityRecycler; fn spawn(&self) -> Self::Filter { IdentityRecycler { @@ -211,7 +211,7 @@ impl wgc::hub::IdentityHandlerFactory for IdentityRecyclerFactory } } } -impl wgc::hub::IdentityHandlerFactory for IdentityRecyclerFactory { +impl wgc::identity::IdentityHandlerFactory for IdentityRecyclerFactory { type Filter = IdentityRecycler; fn spawn(&self) -> Self::Filter { IdentityRecycler { @@ -222,4 +222,4 @@ impl wgc::hub::IdentityHandlerFactory for IdentityRecyclerFactory } } -impl wgc::hub::GlobalIdentityHandlerFactory for IdentityRecyclerFactory {} +impl wgc::identity::GlobalIdentityHandlerFactory for IdentityRecyclerFactory {} diff --git a/gfx/wgpu_bindings/src/server.rs b/gfx/wgpu_bindings/src/server.rs index 912495d3075a..90c496a448c3 100644 --- a/gfx/wgpu_bindings/src/server.rs +++ b/gfx/wgpu_bindings/src/server.rs @@ -88,10 +88,10 @@ impl ErrorBuffer { } // hide wgc's global in private -pub struct Global(wgc::hub::Global); +pub struct Global(wgc::global::Global); impl std::ops::Deref for Global { - type Target = wgc::hub::Global; + type Target = wgc::global::Global; fn deref(&self) -> &Self::Target { &self.0 } @@ -110,7 +110,7 @@ pub extern "C" fn wgpu_server_new(factory: IdentityRecyclerFactory) -> *mut Glob ); wgc::instance::parse_backends_from_comma_list(&backends_pref) }; - let global = Global(wgc::hub::Global::new( + let global = Global(wgc::global::Global::new( "wgpu", factory, wgt::InstanceDescriptor { @@ -448,7 +448,7 @@ pub extern "C" fn wgpu_server_buffer_drop(global: &Global, self_id: id::BufferId } impl Global { - fn device_action( + fn device_action( &self, self_id: id::DeviceId, action: DeviceAction, @@ -548,7 +548,7 @@ impl Global { } } - fn texture_action( + fn texture_action( &self, self_id: id::TextureId, action: TextureAction, @@ -564,7 +564,7 @@ impl Global { } } - fn command_encoder_action( + fn command_encoder_action( &self, self_id: id::CommandEncoderId, action: CommandEncoderAction, diff --git a/gfx/wr/wr_glyph_rasterizer/Cargo.toml b/gfx/wr/wr_glyph_rasterizer/Cargo.toml index 1748c54fc47d..ad74273b8243 100644 --- a/gfx/wr/wr_glyph_rasterizer/Cargo.toml +++ b/gfx/wr/wr_glyph_rasterizer/Cargo.toml @@ -46,6 +46,6 @@ dwrote = "0.11" [target.'cfg(target_os = "macos")'.dependencies] core-foundation = "0.9.2" -core-graphics = "0.22.3" -core-text = { version = "19.2", default-features = false } +core-graphics = "0.23" +core-text = { version = "20", default-features = false } objc = "0.2" diff --git a/gfx/wr/wrench/Cargo.toml b/gfx/wr/wrench/Cargo.toml index 1c434b789464..dfdb3ba57dfd 100644 --- a/gfx/wr/wrench/Cargo.toml +++ b/gfx/wr/wrench/Cargo.toml @@ -40,7 +40,7 @@ default-features = false features = ["png"] [target.'cfg(target_os = "macos")'.dependencies] -core-graphics = "0.22" +core-graphics = "0.23" core-foundation = "0.9" [features] diff --git a/supply-chain/audits.toml b/supply-chain/audits.toml index 600499af6a74..4fdaedfe56c5 100644 --- a/supply-chain/audits.toml +++ b/supply-chain/audits.toml @@ -471,6 +471,11 @@ who = "Nicolas Silva " criteria = "safe-to-deploy" delta = "0.37.1+1.3.235 -> 0.37.2+1.3.238" +[[audits.ash]] +who = "Teodor Tanasoaia " +criteria = "safe-to-deploy" +delta = "0.37.2+1.3.238 -> 0.37.3+1.3.251" + [[audits.ashmem]] who = "Matthew Gregan " criteria = "safe-to-deploy" @@ -601,6 +606,11 @@ who = "Nicolas Silva " criteria = "safe-to-deploy" delta = "2.0.2 -> 2.1.0" +[[audits.bitflags]] +who = "Teodor Tanasoaia " +criteria = "safe-to-deploy" +delta = "2.2.1 -> 2.3.2" + [[audits.block-buffer]] who = "Mike Hommey " criteria = "safe-to-deploy" @@ -720,6 +730,21 @@ who = "Mike Hommey " criteria = "safe-to-run" delta = "0.16.0 -> 0.16.2" +[[audits.core-graphics]] +who = "Teodor Tanasoaia " +criteria = "safe-to-deploy" +delta = "0.22.3 -> 0.23.0" + +[[audits.core-graphics-types]] +who = "Teodor Tanasoaia " +criteria = "safe-to-deploy" +delta = "0.1.1 -> 0.1.2" + +[[audits.core-text]] +who = "Teodor Tanasoaia " +criteria = "safe-to-deploy" +delta = "19.2.0 -> 20.0.0" + [[audits.coreaudio-sys]] who = "Mike Hommey " criteria = "safe-to-deploy" @@ -1277,6 +1302,21 @@ criteria = "safe-to-deploy" version = "1.0.7" notes = "Simple hasher implementation with no unsafe code." +[[audits.foreign-types]] +who = "Teodor Tanasoaia " +criteria = "safe-to-deploy" +delta = "0.3.2 -> 0.5.0" + +[[audits.foreign-types-macros]] +who = "Teodor Tanasoaia " +criteria = "safe-to-deploy" +version = "0.2.3" + +[[audits.foreign-types-shared]] +who = "Teodor Tanasoaia " +criteria = "safe-to-deploy" +delta = "0.1.1 -> 0.3.1" + [[audits.fs-err]] who = "Mike Hommey " criteria = "safe-to-deploy" @@ -1546,6 +1586,16 @@ criteria = "safe-to-deploy" delta = "0.5.4 -> 0.6.0" notes = "Mostly bug fixes and some added functionality" +[[audits.gpu-alloc]] +who = "Teodor Tanasoaia " +criteria = "safe-to-deploy" +delta = "0.5.3 -> 0.6.0" + +[[audits.gpu-alloc-types]] +who = "Teodor Tanasoaia " +criteria = "safe-to-deploy" +delta = "0.2.0 -> 0.3.0" + [[audits.gpu-descriptor]] who = "Mike Hommey " criteria = "safe-to-deploy" @@ -1840,6 +1890,11 @@ criteria = "safe-to-deploy" delta = "0.23.1 -> 0.24.0" notes = "This audit treats Dzmitry Malyshau (kvark) as a trusted reviewer." +[[audits.metal]] +who = "Teodor Tanasoaia " +criteria = "safe-to-deploy" +delta = "0.24.0 -> 0.25.0" + [[audits.midir]] who = "Bobby Holley " criteria = "safe-to-deploy" @@ -1946,6 +2001,11 @@ who = "Nicolas Silva " criteria = "safe-to-deploy" delta = "0.11.0@git:f59668ccfaf7bdb3a7e43d84363a21c77357b2fe -> 0.12.0@git:b99d58ea435090e561377949f428bce2c18451bb" +[[audits.naga]] +who = "Teodor Tanasoaia " +criteria = "safe-to-deploy" +delta = "0.12.0@git:b99d58ea435090e561377949f428bce2c18451bb -> 0.12.0@git:76003dc0035d53a474d366dcdf49d2e4d12e921f" + [[audits.net2]] who = "Mike Hommey " criteria = "safe-to-run" @@ -3533,6 +3593,11 @@ who = "Erich Gubler " criteria = "safe-to-deploy" delta = "0.16.0@git:25cb9f61e9a6be572ee42536bbe57aa5f759a700 -> 0.16.0@git:f71a1bc736fde37509262ca03e91d8f56a13aeb5" +[[audits.wgpu-core]] +who = "Teodor Tanasoaia " +criteria = "safe-to-deploy" +delta = "0.16.0@git:f71a1bc736fde37509262ca03e91d8f56a13aeb5 -> 0.16.0@git:dcad7dfba92dd85c3ca21bb553a61834e01b04f5" + [[audits.wgpu-hal]] who = "Dzmitry Malyshau " criteria = "safe-to-deploy" @@ -3611,6 +3676,11 @@ who = "Erich Gubler " criteria = "safe-to-deploy" delta = "0.16.0@git:25cb9f61e9a6be572ee42536bbe57aa5f759a700 -> 0.16.0@git:f71a1bc736fde37509262ca03e91d8f56a13aeb5" +[[audits.wgpu-hal]] +who = "Teodor Tanasoaia " +criteria = "safe-to-deploy" +delta = "0.16.0@git:f71a1bc736fde37509262ca03e91d8f56a13aeb5 -> 0.16.0@git:dcad7dfba92dd85c3ca21bb553a61834e01b04f5" + [[audits.wgpu-types]] who = "Dzmitry Malyshau " criteria = "safe-to-deploy" @@ -3689,6 +3759,11 @@ who = "Erich Gubler " criteria = "safe-to-deploy" delta = "0.16.0@git:25cb9f61e9a6be572ee42536bbe57aa5f759a700 -> 0.16.0@git:f71a1bc736fde37509262ca03e91d8f56a13aeb5" +[[audits.wgpu-types]] +who = "Teodor Tanasoaia " +criteria = "safe-to-deploy" +delta = "0.16.0@git:f71a1bc736fde37509262ca03e91d8f56a13aeb5 -> 0.16.0@git:dcad7dfba92dd85c3ca21bb553a61834e01b04f5" + [[audits.whatsys]] who = "Bobby Holley " criteria = "safe-to-deploy" diff --git a/third_party/rust/ash/.cargo-checksum.json b/third_party/rust/ash/.cargo-checksum.json index ecaa49b60b88..3746880a0876 100644 --- a/third_party/rust/ash/.cargo-checksum.json +++ b/third_party/rust/ash/.cargo-checksum.json @@ -1 +1 @@ -{"files":{"Cargo.toml":"f5ba364e2195938f21eec077ff01ae3977589f75e0dd9315507058d87f229cb3","LICENSE-APACHE":"0e8a410375e1ee5c75c6198b08b534fec6e5edef766cf971bab4a404542636ab","LICENSE-MIT":"c733ccf8ab25df175a63d9d844677b98e3a5ab8f8de1c1616087e6868a7f948e","README.md":"486092d206c93d24e2108dd0df17571a19c0f029306102d0970d0791364d6497","build.rs":"616f1c2f1a0dd883d57690b767691750c1762fb01ab4aeb662825ceecf869e7b","src/device.rs":"3821bad3d35753a9dcb95dfe9c59ae0f70ce55495ec05239fb523dbb1524e232","src/entry.rs":"7e0e9a0a4a71515d453761dc2c8839d5ba3ce4c9a038433279749ae4b3e795d5","src/extensions/experimental/amd.rs":"e89f7cf4d4b9335ae74391fddc85d03106b121b6d964ea8fad35593e9918f4e6","src/extensions/experimental/mod.rs":"41a5366e1c8bd0e1fa47e9cf6fddc8111ed0a6946813be4eefca81da969d1ee9","src/extensions/ext/acquire_drm_display.rs":"095af0b7336a2cc03e0f5584d994db8df1ad14f52360456407725013d33298ad","src/extensions/ext/buffer_device_address.rs":"124033d6e8f3251cf0d44e7d0d51d9ef28770d6c7d2fe283ffa98a32ba4ead55","src/extensions/ext/calibrated_timestamps.rs":"ba0ac6e38e7feb62b482ed9371c11447669d0d88b18bfc5e59ea75d5ed78691c","src/extensions/ext/debug_marker.rs":"33044f1f3f46280eb3f2564c9c8d66f5a470dbac51baeff34d2997f2c6a0fb94","src/extensions/ext/debug_report.rs":"405b179a8b155adbafabfaf9d38a5fb84e78c78dced79e72d64fb507520a85b1","src/extensions/ext/debug_utils.rs":"a496af59111353313013799b8862eacf343cf826c9901212c7838ec2fab40fe5","src/extensions/ext/descriptor_buffer.rs":"ae9c2ce265ada99b2ff99d6c2fe55f4842b9213cfaa2010ea7cf1a738e01c15d","src/extensions/ext/extended_dynamic_state.rs":"feeb16916cf2288713df1dd79545c9f484e3b115a2e93aa00e621b59b92085ff","src/extensions/ext/extended_dynamic_state2.rs":"ac30857e100ef00578ebcdd6d9d0136ebad95454965780836f15b42a88c00497","src/extensions/ext/extended_dynamic_state3.rs":"4e3b95846dff78cb9a467641324075741c2cb71d53233ec52b0727e066faae4d","src/extensions/ext/full_screen_exclusive.rs":"9572aa10253ff7c6d0f79b4ce92114ee48499d260043860db9ea268faf95756b","src/extensions/ext/headless_surface.rs":"dc0540ca8bb9be99975d8f432a062e1dcac53bb43dcd81a0450673fe9a3798cb","src/extensions/ext/image_compression_control.rs":"6f704c54ab6965058d0f9aef8eef9b6d7d713c419270494f8a7f1f560ee827a9","src/extensions/ext/image_drm_format_modifier.rs":"452d9833283dd2913de676747cc54b27df070915bfd8b31184ded4d9f299656d","src/extensions/ext/mesh_shader.rs":"c4c30eb1a8de3db5ad11b4f4f77c41c8981634e47495aee58ac07a6297e8344c","src/extensions/ext/metal_surface.rs":"3175d1d51e49e00cbdeabdc050932d547bffc4b0842a7e30049141ee99dda107","src/extensions/ext/mod.rs":"6daf2b78f4e6bd801b45cdf4213eb0acd7e313b5786c98e218e85ac2baf04676","src/extensions/ext/physical_device_drm.rs":"450fd7e6a136e5e0079836ab65f89a7a0ed90af5469abebd99afd2b14d155c55","src/extensions/ext/private_data.rs":"a92e37ea025d2a8106a2034403de4bfdc954b411a8c1c4f29d955d81fcfbd4a0","src/extensions/ext/sample_locations.rs":"fc664942394362a00eeb1086b63e4ab84df8884bb299db05ef6a14f98434bd1c","src/extensions/ext/tooling_info.rs":"8a4f6570d15e2a1cbea9a8fa8e70cf358aa76336e108bade9c46de25b65bf5cb","src/extensions/khr/acceleration_structure.rs":"935628eee2aa943286a2fe387dcd0655c8a539e52dcbce7769d0fd71b3af64f7","src/extensions/khr/android_surface.rs":"8183a1ba413b894287014b934df3888cb6d53b8f95bbe3f299b91fd481cdd2eb","src/extensions/khr/buffer_device_address.rs":"6ffb9bad123139cff26c77ea5a7a4556bb24a6d3daa47fc3378d67c016157098","src/extensions/khr/copy_commands2.rs":"58717fd89144de4fded43601ebb8997b14a67eb0d18b85f1ab739457cbaef1d0","src/extensions/khr/create_render_pass2.rs":"c4c15abbe9dff9778c2ecd25556ab88bd642facca75d7c69e4023ec157fce12c","src/extensions/khr/deferred_host_operations.rs":"facf3c32ea9b5093a0b8f5c8ca3e89d5517a6e39f5e781deffc91f3170c77443","src/extensions/khr/device_group.rs":"5a54fba7b9f1cd25aa2281eabeca226de5ba76bd1433adb76d4e5d650a7862b5","src/extensions/khr/device_group_creation.rs":"368e5139ade893b71e1a333cab54605b76f51656bb8e4936c492f80069616d84","src/extensions/khr/display.rs":"c8241e5d8d39b74ecc4766faba27efca5cbd31382d8d40ac2f2f4e54f1f8d88d","src/extensions/khr/display_swapchain.rs":"9bbf1d47989a1550a3515c6d98367aa0df67b43ea102be329a658e0f458ae1d9","src/extensions/khr/draw_indirect_count.rs":"14acfbdb21a96db1bd0bad467af341ee327d5a4a78b2727aa5c5707560b157f8","src/extensions/khr/dynamic_rendering.rs":"bc363c863ef269bf905de0a3e7414fc5f37af7da88412086b31f1065d0a1178f","src/extensions/khr/external_fence_fd.rs":"0a89e00c9560e6fc4b63d6295260bb5db79bb595232217ce1723ba0a4aa0af38","src/extensions/khr/external_fence_win32.rs":"a357d7dcea8e50f3b26f5bb2fbe0bcbdc1b94aa4b6bbc784570715bcb06a23a9","src/extensions/khr/external_memory_fd.rs":"624d0bcf61e16ee9e284d71354fc18a8141ec14b2fd8e1d5c1ed0547f0bd2fcf","src/extensions/khr/external_memory_win32.rs":"d731d4e531e55a7ba53bdec2045167cf494ca7b54267edc205be675790a579ef","src/extensions/khr/external_semaphore_fd.rs":"8254d0e50e26761e10b523e4d45c6128d304926880a02144724145fd4d412898","src/extensions/khr/external_semaphore_win32.rs":"2fcc7fb4ba50f94e60d1faeefdb0e4f6940120766143c9ce7257d7c0356b68af","src/extensions/khr/get_memory_requirements2.rs":"56edbc8cb1a335071518fdf52d1f549ab31292551cd5ec2d4168949446600468","src/extensions/khr/get_physical_device_properties2.rs":"a9ad06c559390c43100b7c6ab3a339ef7ce9c6e909d5e934e51fcfee2e18eb2f","src/extensions/khr/get_surface_capabilities2.rs":"6f2f8a0c84c4b962a681a2c02216df6967c1a91da7d6655f82c7eab3052db081","src/extensions/khr/maintenance1.rs":"e19365a3ad201712a871887346564d82a8bddfa8a7134b0f84f48396e0c428dd","src/extensions/khr/maintenance3.rs":"e4022aa249d57a47133df4ab6c6e3884a1613af78ba8d7b12bbeeeb87d0a19ee","src/extensions/khr/maintenance4.rs":"23a1a123dfa5ca32741fca6f14ea0aae358563ba043dda201283dbdd56687a06","src/extensions/khr/mod.rs":"1af17e94a6c0f8a95a60ea407f2b9657eee755d17671e2f7bdcf8eb24fa0ea4d","src/extensions/khr/pipeline_executable_properties.rs":"e643476dde846db4b96d4a1eab9f6cd59f2366dae706f61dfd5b2a58b33e5739","src/extensions/khr/present_wait.rs":"6bc0ff08748650e5aaa303287abecf9cd47328cd285763fdb120c5bc1bed72fd","src/extensions/khr/push_descriptor.rs":"531b9026b2e3377b259a93097ca4a6327951d11c0582dad29ea4b4817b0b7f95","src/extensions/khr/ray_tracing_maintenance1.rs":"281095b60144574189cd63e7d55f537abe46281c3f0c0501937c2e1dc4102fe9","src/extensions/khr/ray_tracing_pipeline.rs":"1d84bbcce85e6f6392828ace82022a713a71fe3a1f85fab6ba30368ac7088e23","src/extensions/khr/surface.rs":"fa60900890cf901a0d3fcb9f066f72917a783b73baff93bb377d8d3f77c18ea5","src/extensions/khr/swapchain.rs":"ec23290bc929b3db2a403d47148426a817b05547073f4d12950f816db0a9b520","src/extensions/khr/synchronization2.rs":"8d4e880ffdda8e9f458fc4ccf6de04b92d6314121071d711404b43236c9816e5","src/extensions/khr/timeline_semaphore.rs":"b554829512a648fd1d9ab493955e7c088c1f1f3a50b8fb933211d2b5a03e5cde","src/extensions/khr/wayland_surface.rs":"2d7b8423e5b856e9185cfef804f7fededd621f60e5aac26d93e69aee055dafd1","src/extensions/khr/win32_surface.rs":"135fa0493b77b322fa3478eba877848e9325bbf70d91f868862f637fbdeff507","src/extensions/khr/xcb_surface.rs":"c85bbce4d65f8c5ee4a11349183757e69df9331af50940f6befe6932ba3f6dc2","src/extensions/khr/xlib_surface.rs":"2175f140157e47af5096069e443799a1db373b2f6e9a0818a76a9398bc97bcea","src/extensions/mod.rs":"717bc2155a3d311ddc485dbf6313a0996e1906d15bd09be0aab776e33368eaa7","src/extensions/mvk/ios_surface.rs":"c268afccb727218f269649056a342688d9a93fa3f4f1888df7733d2d5c5030f6","src/extensions/mvk/macos_surface.rs":"1d9cdd0f818e2c209d5d11b1cbca6705351f097ef64697cb523e3f8e3b1cae93","src/extensions/mvk/mod.rs":"d03ac1a0144d1aca9ed1d0ce0c14b099f1fedb03b8108028b780a34f64de604c","src/extensions/nn/mod.rs":"b32935320aa2af18b129868d52d4042596b3c22046cd650a4e9e75c5f3973eb3","src/extensions/nn/vi_surface.rs":"27f09bf182967353ce5c0dc73ea89eab0f89ac3d5d18b9574b755cb3b6ff214b","src/extensions/nv/coverage_reduction_mode.rs":"428348e0876108b63dbbdce55218403c2481af902aba463633f9d4d62151c4c9","src/extensions/nv/device_diagnostic_checkpoints.rs":"8faca3ef4c99c5179996c8899c15489e1978d01fc7614248ac07057874d78f72","src/extensions/nv/mesh_shader.rs":"2bdbdf131db157d5e4f10beacdcc5cfdf54b4d296a0c0bf1239259ba32d4c76b","src/extensions/nv/mod.rs":"cb7e4f152f515c8a8beefcb7b2ff8a65e6d5932daf34139fb438c60f894e5fc2","src/extensions/nv/ray_tracing.rs":"a5c5faeb624542d13b0bb08e9434ae7604120a89e4e00a8430046c3cc218a9c1","src/instance.rs":"1afd01655c650480bffc1b6963e510632b92dc36733594c12aa3032415669270","src/lib.rs":"3dc222b92b417c0985e7ca5cd131f2966f02f038c578076962f028419edc05fa","src/prelude.rs":"035dbe3e825ff83b0af5b22ed428d95139ff16f65c6039419275a4a5820f499d","src/util.rs":"55eed33491cf1fa29ccf7f591aeba2c0bee2c7eae9c979124f27c800567104aa","src/vk.rs":"234a101311b07c82cca72db451ddf953e4cd385a6b7fd42fa4e8f659e8089e71","src/vk/aliases.rs":"c6d43e3073a3000e8ccaa1a131112a6129c22e699d559a53762fd7d99bf66e30","src/vk/bitflags.rs":"fa4e2337f029f3cb2c91b51a78ab48b2eeed64d07bc9d872783c06624aff5171","src/vk/const_debugs.rs":"b299897a023c200bd973addd736aa7b7110ce108d4012537679c0353d507af65","src/vk/constants.rs":"6e21377b7238d9c94174a97e6ebdc6e3401a2c32ef32e211142448eed1d972d7","src/vk/definitions.rs":"242aace0047bf09fa344771b8443c98d63fbcbdf0af6b38e120d37e2c83bafc5","src/vk/enums.rs":"4faea35666df46f2698b4eed1171dd916180959e06bdae43814f2ac8e9b6ce9b","src/vk/extensions.rs":"2d10226fb700efbf198ab158d2d8af6bb94563905023cd66cd81a1c9a7250dbc","src/vk/feature_extensions.rs":"c875cf2a7b02a8a8fa31f3bc8794ec526c0870b470e7a308b5b256402dc8125e","src/vk/features.rs":"46633698d0275ae55d4372403d76b09763e762e8db9fec0688f40d8204ce221b","src/vk/macros.rs":"5fde17e47d5a102bfc74dfd9ad024938d676950afae7fe932dc697cb95a9f5e4","src/vk/native.rs":"0e3d2c92be3f8b8b62d896136d1bcbdd8a9406f292aa912f646a6ed23fd96cb6","src/vk/platform_types.rs":"fc4143ac65aead425694fd4dd9f50a7c1aebe35e23c872050df5368c31694375","src/vk/prelude.rs":"8e1cf47901f63b64a1490011eb1572a7ce8401a1559cca6f057645bd2d85e035","tests/constant_size_arrays.rs":"fdd726fcebc3b05900a0d24a05826f4a19e99bc71f8f35bacc958a1e10de1013","tests/display.rs":"d294396bb5b2210432724cccc0a56e105bab8743e180d4ad7cc675a200c09539"},"package":"28bf19c1f0a470be5fbf7522a308a05df06610252c5bcf5143e1b23f629a9a03"} \ No newline at end of file +{"files":{"Cargo.toml":"647cc2f6d5950cd9107c10b0a3bd35787b3b34c6c6233c5dd6a2dca8004d1e2e","LICENSE-APACHE":"0e8a410375e1ee5c75c6198b08b534fec6e5edef766cf971bab4a404542636ab","LICENSE-MIT":"c733ccf8ab25df175a63d9d844677b98e3a5ab8f8de1c1616087e6868a7f948e","README.md":"5d1c540b5bb7dcdbb4fb95403b0a178b22c2dbf8fa75db56cc67626196ea2f41","build.rs":"b0109a1596448fb599af8ca0efdbaaf2dba09bbdded8f07f2b5bab885f9a0d42","src/device.rs":"092b983d74b493c7f91d1b6ef21ab72231972392b5cd03ddb0f06b7c4f2a08cc","src/entry.rs":"6b83a893d9efb33c5718868ae9039af0da0d77b673cfee68e009bb83ff6837b7","src/extensions/experimental/amd.rs":"e89f7cf4d4b9335ae74391fddc85d03106b121b6d964ea8fad35593e9918f4e6","src/extensions/experimental/mod.rs":"41a5366e1c8bd0e1fa47e9cf6fddc8111ed0a6946813be4eefca81da969d1ee9","src/extensions/ext/acquire_drm_display.rs":"095af0b7336a2cc03e0f5584d994db8df1ad14f52360456407725013d33298ad","src/extensions/ext/buffer_device_address.rs":"124033d6e8f3251cf0d44e7d0d51d9ef28770d6c7d2fe283ffa98a32ba4ead55","src/extensions/ext/calibrated_timestamps.rs":"ba0ac6e38e7feb62b482ed9371c11447669d0d88b18bfc5e59ea75d5ed78691c","src/extensions/ext/debug_marker.rs":"33044f1f3f46280eb3f2564c9c8d66f5a470dbac51baeff34d2997f2c6a0fb94","src/extensions/ext/debug_report.rs":"405b179a8b155adbafabfaf9d38a5fb84e78c78dced79e72d64fb507520a85b1","src/extensions/ext/debug_utils.rs":"a496af59111353313013799b8862eacf343cf826c9901212c7838ec2fab40fe5","src/extensions/ext/descriptor_buffer.rs":"ae9c2ce265ada99b2ff99d6c2fe55f4842b9213cfaa2010ea7cf1a738e01c15d","src/extensions/ext/extended_dynamic_state.rs":"feeb16916cf2288713df1dd79545c9f484e3b115a2e93aa00e621b59b92085ff","src/extensions/ext/extended_dynamic_state2.rs":"ac30857e100ef00578ebcdd6d9d0136ebad95454965780836f15b42a88c00497","src/extensions/ext/extended_dynamic_state3.rs":"4e3b95846dff78cb9a467641324075741c2cb71d53233ec52b0727e066faae4d","src/extensions/ext/full_screen_exclusive.rs":"6962432157b9209fae038827cf0b6fb2624fc05479c4436088de5a1c200e187b","src/extensions/ext/headless_surface.rs":"dc0540ca8bb9be99975d8f432a062e1dcac53bb43dcd81a0450673fe9a3798cb","src/extensions/ext/image_compression_control.rs":"6f704c54ab6965058d0f9aef8eef9b6d7d713c419270494f8a7f1f560ee827a9","src/extensions/ext/image_drm_format_modifier.rs":"452d9833283dd2913de676747cc54b27df070915bfd8b31184ded4d9f299656d","src/extensions/ext/mesh_shader.rs":"c4c30eb1a8de3db5ad11b4f4f77c41c8981634e47495aee58ac07a6297e8344c","src/extensions/ext/metal_surface.rs":"3175d1d51e49e00cbdeabdc050932d547bffc4b0842a7e30049141ee99dda107","src/extensions/ext/mod.rs":"9514f2a84c3677366889d3107f164cbc6af7862578b596a89f049d4e830f3ce1","src/extensions/ext/physical_device_drm.rs":"450fd7e6a136e5e0079836ab65f89a7a0ed90af5469abebd99afd2b14d155c55","src/extensions/ext/pipeline_properties.rs":"f4bbe96d185c6d87bf7084ff6219ad39faabd45fbd18164f80c708ebd41b9bde","src/extensions/ext/private_data.rs":"a92e37ea025d2a8106a2034403de4bfdc954b411a8c1c4f29d955d81fcfbd4a0","src/extensions/ext/sample_locations.rs":"fc664942394362a00eeb1086b63e4ab84df8884bb299db05ef6a14f98434bd1c","src/extensions/ext/shader_object.rs":"73b30e0a0802ea22cd7d0400214b7fbfd8f0ef4975dcfec5954228df6e40bb6a","src/extensions/ext/tooling_info.rs":"8a4f6570d15e2a1cbea9a8fa8e70cf358aa76336e108bade9c46de25b65bf5cb","src/extensions/khr/acceleration_structure.rs":"935628eee2aa943286a2fe387dcd0655c8a539e52dcbce7769d0fd71b3af64f7","src/extensions/khr/android_surface.rs":"8183a1ba413b894287014b934df3888cb6d53b8f95bbe3f299b91fd481cdd2eb","src/extensions/khr/buffer_device_address.rs":"6ffb9bad123139cff26c77ea5a7a4556bb24a6d3daa47fc3378d67c016157098","src/extensions/khr/copy_commands2.rs":"58717fd89144de4fded43601ebb8997b14a67eb0d18b85f1ab739457cbaef1d0","src/extensions/khr/create_render_pass2.rs":"c4c15abbe9dff9778c2ecd25556ab88bd642facca75d7c69e4023ec157fce12c","src/extensions/khr/deferred_host_operations.rs":"facf3c32ea9b5093a0b8f5c8ca3e89d5517a6e39f5e781deffc91f3170c77443","src/extensions/khr/device_group.rs":"2ff043e8612b71024d3ddebd8238d8e226e91fc25d87593c4299e9722c364964","src/extensions/khr/device_group_creation.rs":"bfcb60fb8621667e59cd07f691fe4f6c84f59316828f1b3b29e42fa950481906","src/extensions/khr/display.rs":"c8241e5d8d39b74ecc4766faba27efca5cbd31382d8d40ac2f2f4e54f1f8d88d","src/extensions/khr/display_swapchain.rs":"9bbf1d47989a1550a3515c6d98367aa0df67b43ea102be329a658e0f458ae1d9","src/extensions/khr/draw_indirect_count.rs":"14acfbdb21a96db1bd0bad467af341ee327d5a4a78b2727aa5c5707560b157f8","src/extensions/khr/dynamic_rendering.rs":"bc363c863ef269bf905de0a3e7414fc5f37af7da88412086b31f1065d0a1178f","src/extensions/khr/external_fence_fd.rs":"0a89e00c9560e6fc4b63d6295260bb5db79bb595232217ce1723ba0a4aa0af38","src/extensions/khr/external_fence_win32.rs":"a357d7dcea8e50f3b26f5bb2fbe0bcbdc1b94aa4b6bbc784570715bcb06a23a9","src/extensions/khr/external_memory_fd.rs":"e9f6a234aec6ca2bd74269b444e2326fedc2f346db8de87f99d4bfda7637af9e","src/extensions/khr/external_memory_win32.rs":"d731d4e531e55a7ba53bdec2045167cf494ca7b54267edc205be675790a579ef","src/extensions/khr/external_semaphore_fd.rs":"8254d0e50e26761e10b523e4d45c6128d304926880a02144724145fd4d412898","src/extensions/khr/external_semaphore_win32.rs":"2fcc7fb4ba50f94e60d1faeefdb0e4f6940120766143c9ce7257d7c0356b68af","src/extensions/khr/get_memory_requirements2.rs":"56edbc8cb1a335071518fdf52d1f549ab31292551cd5ec2d4168949446600468","src/extensions/khr/get_physical_device_properties2.rs":"a9ad06c559390c43100b7c6ab3a339ef7ce9c6e909d5e934e51fcfee2e18eb2f","src/extensions/khr/get_surface_capabilities2.rs":"6f2f8a0c84c4b962a681a2c02216df6967c1a91da7d6655f82c7eab3052db081","src/extensions/khr/maintenance1.rs":"e19365a3ad201712a871887346564d82a8bddfa8a7134b0f84f48396e0c428dd","src/extensions/khr/maintenance3.rs":"e4022aa249d57a47133df4ab6c6e3884a1613af78ba8d7b12bbeeeb87d0a19ee","src/extensions/khr/maintenance4.rs":"638a3804a3bed006c320a4d3378e61b9a13ff5ecb7cf0a8f70f40d37b6489fff","src/extensions/khr/mod.rs":"7a44056ef1c67830234f60a965859cb00e90a6c6f5d717d6092ae08111de74c8","src/extensions/khr/performance_query.rs":"8b634851f16b8021416510bfcc4224ba1214abbda2e67a2f6c553d1b9fd8b7da","src/extensions/khr/pipeline_executable_properties.rs":"e643476dde846db4b96d4a1eab9f6cd59f2366dae706f61dfd5b2a58b33e5739","src/extensions/khr/present_wait.rs":"6bc0ff08748650e5aaa303287abecf9cd47328cd285763fdb120c5bc1bed72fd","src/extensions/khr/push_descriptor.rs":"531b9026b2e3377b259a93097ca4a6327951d11c0582dad29ea4b4817b0b7f95","src/extensions/khr/ray_tracing_maintenance1.rs":"281095b60144574189cd63e7d55f537abe46281c3f0c0501937c2e1dc4102fe9","src/extensions/khr/ray_tracing_pipeline.rs":"1d84bbcce85e6f6392828ace82022a713a71fe3a1f85fab6ba30368ac7088e23","src/extensions/khr/surface.rs":"fa60900890cf901a0d3fcb9f066f72917a783b73baff93bb377d8d3f77c18ea5","src/extensions/khr/swapchain.rs":"c64ba70bf3a5ede9fc218b34fb5559b55b900479290ef3d684c2637abe13bda9","src/extensions/khr/synchronization2.rs":"8d4e880ffdda8e9f458fc4ccf6de04b92d6314121071d711404b43236c9816e5","src/extensions/khr/timeline_semaphore.rs":"b554829512a648fd1d9ab493955e7c088c1f1f3a50b8fb933211d2b5a03e5cde","src/extensions/khr/wayland_surface.rs":"2d7b8423e5b856e9185cfef804f7fededd621f60e5aac26d93e69aee055dafd1","src/extensions/khr/win32_surface.rs":"135fa0493b77b322fa3478eba877848e9325bbf70d91f868862f637fbdeff507","src/extensions/khr/xcb_surface.rs":"c85bbce4d65f8c5ee4a11349183757e69df9331af50940f6befe6932ba3f6dc2","src/extensions/khr/xlib_surface.rs":"2175f140157e47af5096069e443799a1db373b2f6e9a0818a76a9398bc97bcea","src/extensions/mod.rs":"717bc2155a3d311ddc485dbf6313a0996e1906d15bd09be0aab776e33368eaa7","src/extensions/mvk/ios_surface.rs":"c268afccb727218f269649056a342688d9a93fa3f4f1888df7733d2d5c5030f6","src/extensions/mvk/macos_surface.rs":"1d9cdd0f818e2c209d5d11b1cbca6705351f097ef64697cb523e3f8e3b1cae93","src/extensions/mvk/mod.rs":"d03ac1a0144d1aca9ed1d0ce0c14b099f1fedb03b8108028b780a34f64de604c","src/extensions/nn/mod.rs":"b32935320aa2af18b129868d52d4042596b3c22046cd650a4e9e75c5f3973eb3","src/extensions/nn/vi_surface.rs":"27f09bf182967353ce5c0dc73ea89eab0f89ac3d5d18b9574b755cb3b6ff214b","src/extensions/nv/coverage_reduction_mode.rs":"428348e0876108b63dbbdce55218403c2481af902aba463633f9d4d62151c4c9","src/extensions/nv/device_diagnostic_checkpoints.rs":"8faca3ef4c99c5179996c8899c15489e1978d01fc7614248ac07057874d78f72","src/extensions/nv/mesh_shader.rs":"2bdbdf131db157d5e4f10beacdcc5cfdf54b4d296a0c0bf1239259ba32d4c76b","src/extensions/nv/mod.rs":"cb7e4f152f515c8a8beefcb7b2ff8a65e6d5932daf34139fb438c60f894e5fc2","src/extensions/nv/ray_tracing.rs":"a5c5faeb624542d13b0bb08e9434ae7604120a89e4e00a8430046c3cc218a9c1","src/instance.rs":"c70ee7761eca4753d56e879c7551ed318b3cf0d7491dfbe577a60ca7029f056f","src/lib.rs":"3dc222b92b417c0985e7ca5cd131f2966f02f038c578076962f028419edc05fa","src/prelude.rs":"0efd876803db2edb93b34ef21645d0245f4f9476fac000f7de04ddf86f8917e6","src/util.rs":"51df166b0ca0317ea28c87eb42c23477a96a88b849ae867cb989dfc893845d08","src/vk.rs":"234a101311b07c82cca72db451ddf953e4cd385a6b7fd42fa4e8f659e8089e71","src/vk/aliases.rs":"10fd44d0d03d8c7747ab5d6cc9c3f87c4dbcf1ade2bfa86d61d408c328e03723","src/vk/bitflags.rs":"81212ebd4d0ac827abf850ee6c496cac3cf8b9c9bcb9153d16aaaacbbf01cc92","src/vk/const_debugs.rs":"fe1ed13e20854baa86f989fe055eea0863bcf919e33a942828e4df0c427e2e53","src/vk/constants.rs":"33e50507434eabae4f324803712b8211596b7078de35a89650920e7c8bf23868","src/vk/definitions.rs":"79fb0a7fc03aeb1bdc0f905bf459421d4bbd7454eaa9f3813c3a39ac21b77558","src/vk/enums.rs":"90af6f5584b4f975ff2292beb713aef31c37953437fba05503dddb44e279bada","src/vk/extensions.rs":"d5bc4ad7564e71532721bc49371b2dfff7c37204c4a50d2adc18a65cd8c977c0","src/vk/feature_extensions.rs":"c875cf2a7b02a8a8fa31f3bc8794ec526c0870b470e7a308b5b256402dc8125e","src/vk/features.rs":"008be2b5a1b9438fae2e75754c6415344e58e117062496643ef87240031c4434","src/vk/macros.rs":"5fde17e47d5a102bfc74dfd9ad024938d676950afae7fe932dc697cb95a9f5e4","src/vk/native.rs":"7ff91f6ec628ea07e67adbba18cabcb05e24c4f63eeb94056ae9e7e39b56ed2b","src/vk/platform_types.rs":"fc4143ac65aead425694fd4dd9f50a7c1aebe35e23c872050df5368c31694375","src/vk/prelude.rs":"8e1cf47901f63b64a1490011eb1572a7ce8401a1559cca6f057645bd2d85e035","tests/constant_size_arrays.rs":"fdd726fcebc3b05900a0d24a05826f4a19e99bc71f8f35bacc958a1e10de1013","tests/display.rs":"d294396bb5b2210432724cccc0a56e105bab8743e180d4ad7cc675a200c09539"},"package":"39e9c3835d686b0a6084ab4234fcd1b07dbf6e4767dce60874b12356a25ecd4a"} \ No newline at end of file diff --git a/third_party/rust/ash/Cargo.toml b/third_party/rust/ash/Cargo.toml index 429cbf7b24fb..31a170a69b8d 100644 --- a/third_party/rust/ash/Cargo.toml +++ b/third_party/rust/ash/Cargo.toml @@ -10,10 +10,10 @@ # See Cargo.toml.orig for the original contents. [package] -edition = "2018" +edition = "2021" rust-version = "1.59.0" name = "ash" -version = "0.37.2+1.3.238" +version = "0.37.3+1.3.251" authors = [ "Maik Klein ", "Benjamin Saunders ", @@ -28,9 +28,7 @@ keywords = [ ] license = "MIT OR Apache-2.0" repository = "https://github.com/MaikKlein/ash" - -[package.metadata.release] -no-dev-version = true +resolver = "1" [package.metadata.docs.rs] all-features = true @@ -39,6 +37,9 @@ rustdoc-args = [ "docsrs", ] +[package.metadata.release] +no-dev-version = true + [dependencies.libloading] version = "0.7" optional = true diff --git a/third_party/rust/ash/README.md b/third_party/rust/ash/README.md index 12c0892ce3e8..513519c92c0f 100644 --- a/third_party/rust/ash/README.md +++ b/third_party/rust/ash/README.md @@ -8,6 +8,7 @@ A very lightweight wrapper around Vulkan [![LICENSE](https://img.shields.io/badge/license-MIT-blue.svg)](LICENSE-MIT) [![LICENSE](https://img.shields.io/badge/license-Apache--2.0-blue.svg)](LICENSE-APACHE) [![Join the chat at https://gitter.im/MaikKlein/ash](https://badges.gitter.im/MaikKlein/ash.svg)](https://gitter.im/MaikKlein/ash?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) +[![MSRV](https://img.shields.io/badge/rustc-1.60.0+-ab6000.svg)](https://blog.rust-lang.org/2022/04/07/Rust-1.60.0.html) ## Overview - [x] A true Vulkan API without compromises diff --git a/third_party/rust/ash/build.rs b/third_party/rust/ash/build.rs index ba6e61d4927a..7ccdbc4b6e01 100644 --- a/third_party/rust/ash/build.rs +++ b/third_party/rust/ash/build.rs @@ -13,12 +13,12 @@ fn main() { ("windows", "64") => "Lib", _ => "lib", }; - println!("cargo:rustc-link-search={}/{}", var, suffix); + println!("cargo:rustc-link-search={var}/{suffix}"); } let lib = match &*target_family { "windows" => "vulkan-1", _ => "vulkan", }; - println!("cargo:rustc-link-lib={}", lib); + println!("cargo:rustc-link-lib={lib}"); } } diff --git a/third_party/rust/ash/src/device.rs b/third_party/rust/ash/src/device.rs index 60a91ae15580..cf0fca78f2e3 100644 --- a/third_party/rust/ash/src/device.rs +++ b/third_party/rust/ash/src/device.rs @@ -465,34 +465,42 @@ impl Device { #[inline] pub unsafe fn get_device_buffer_memory_requirements( &self, - create_info: &vk::DeviceBufferMemoryRequirements, + memory_requirements: &vk::DeviceBufferMemoryRequirements, out: &mut vk::MemoryRequirements2, ) { - (self.device_fn_1_3.get_device_buffer_memory_requirements)(self.handle, create_info, out) + (self.device_fn_1_3.get_device_buffer_memory_requirements)( + self.handle, + memory_requirements, + out, + ) } /// #[inline] pub unsafe fn get_device_image_memory_requirements( &self, - create_info: &vk::DeviceImageMemoryRequirements, + memory_requirements: &vk::DeviceImageMemoryRequirements, out: &mut vk::MemoryRequirements2, ) { - (self.device_fn_1_3.get_device_image_memory_requirements)(self.handle, create_info, out) + (self.device_fn_1_3.get_device_image_memory_requirements)( + self.handle, + memory_requirements, + out, + ) } /// Retrieve the number of elements to pass to [`get_device_image_sparse_memory_requirements()`][Self::get_device_image_sparse_memory_requirements()] #[inline] pub unsafe fn get_device_image_sparse_memory_requirements_len( &self, - create_info: &vk::DeviceImageMemoryRequirements, + memory_requirements: &vk::DeviceImageMemoryRequirements, ) -> usize { let mut count = 0; (self .device_fn_1_3 .get_device_image_sparse_memory_requirements)( self.handle, - create_info, + memory_requirements, &mut count, std::ptr::null_mut(), ); @@ -506,7 +514,7 @@ impl Device { #[inline] pub unsafe fn get_device_image_sparse_memory_requirements( &self, - create_info: &vk::DeviceImageMemoryRequirements, + memory_requirements: &vk::DeviceImageMemoryRequirements, out: &mut [vk::SparseImageMemoryRequirements2], ) { let mut count = out.len() as u32; @@ -514,7 +522,7 @@ impl Device { .device_fn_1_3 .get_device_image_sparse_memory_requirements)( self.handle, - create_info, + memory_requirements, &mut count, out.as_mut_ptr(), ); @@ -846,6 +854,14 @@ impl Device { (self.device_fn_1_1.trim_command_pool)(self.handle(), command_pool, flags); } + /// + #[inline] + pub unsafe fn get_device_queue2(&self, queue_info: &vk::DeviceQueueInfo2) -> vk::Queue { + let mut queue = mem::zeroed(); + (self.device_fn_1_1.get_device_queue2)(self.handle(), queue_info, &mut queue); + queue + } + /// #[inline] pub unsafe fn create_sampler_ycbcr_conversion( @@ -1499,17 +1515,17 @@ impl Device { #[inline] pub unsafe fn allocate_descriptor_sets( &self, - create_info: &vk::DescriptorSetAllocateInfo, + allocate_info: &vk::DescriptorSetAllocateInfo, ) -> VkResult> { - let mut desc_set = Vec::with_capacity(create_info.descriptor_set_count as usize); + let mut desc_set = Vec::with_capacity(allocate_info.descriptor_set_count as usize); (self.device_fn_1_0.allocate_descriptor_sets)( self.handle(), - create_info, + allocate_info, desc_set.as_mut_ptr(), ) .result()?; - desc_set.set_len(create_info.descriptor_set_count as usize); + desc_set.set_len(allocate_info.descriptor_set_count as usize); Ok(desc_set) } @@ -1786,10 +1802,10 @@ impl Device { pub unsafe fn cmd_begin_render_pass( &self, command_buffer: vk::CommandBuffer, - create_info: &vk::RenderPassBeginInfo, + render_pass_begin: &vk::RenderPassBeginInfo, contents: vk::SubpassContents, ) { - (self.device_fn_1_0.cmd_begin_render_pass)(command_buffer, create_info, contents); + (self.device_fn_1_0.cmd_begin_render_pass)(command_buffer, render_pass_begin, contents); } /// @@ -2488,16 +2504,16 @@ impl Device { #[inline] pub unsafe fn allocate_command_buffers( &self, - create_info: &vk::CommandBufferAllocateInfo, + allocate_info: &vk::CommandBufferAllocateInfo, ) -> VkResult> { - let mut buffers = Vec::with_capacity(create_info.command_buffer_count as usize); + let mut buffers = Vec::with_capacity(allocate_info.command_buffer_count as usize); (self.device_fn_1_0.allocate_command_buffers)( self.handle(), - create_info, + allocate_info, buffers.as_mut_ptr(), ) .result()?; - buffers.set_len(create_info.command_buffer_count as usize); + buffers.set_len(allocate_info.command_buffer_count as usize); Ok(buffers) } @@ -2592,13 +2608,13 @@ impl Device { #[inline] pub unsafe fn allocate_memory( &self, - create_info: &vk::MemoryAllocateInfo, + allocate_info: &vk::MemoryAllocateInfo, allocation_callbacks: Option<&vk::AllocationCallbacks>, ) -> VkResult { let mut memory = mem::zeroed(); (self.device_fn_1_0.allocate_memory)( self.handle(), - create_info, + allocate_info, allocation_callbacks.as_raw_ptr(), &mut memory, ) diff --git a/third_party/rust/ash/src/entry.rs b/third_party/rust/ash/src/entry.rs index 67e6c9a7392a..7df62e7863f0 100644 --- a/third_party/rust/ash/src/entry.rs +++ b/third_party/rust/ash/src/entry.rs @@ -37,9 +37,15 @@ impl Entry { /// development packages installed (e.g. the Vulkan SDK, or Ubuntu's `libvulkan-dev`). /// /// # Safety + /// /// `dlopen`ing native libraries is inherently unsafe. The safety guidelines /// for [`Library::new()`] and [`Library::get()`] apply here. /// + /// No Vulkan functions loaded directly or indirectly from this [`Entry`] + /// may be called after it is [dropped][drop()]. + /// + /// # Example + /// /// ```no_run /// use ash::{vk, Entry}; /// # fn main() -> Result<(), Box> { @@ -86,6 +92,11 @@ impl Entry { /// Note that instance/device functions are still fetched via `vkGetInstanceProcAddr` and /// `vkGetDeviceProcAddr` for maximum performance. /// + /// Any Vulkan function acquired directly or indirectly from this [`Entry`] may be called after it + /// is [dropped][drop()]. + /// + /// # Example + /// /// ```no_run /// use ash::{vk, Entry}; /// # fn main() -> Result<(), Box> { @@ -116,8 +127,12 @@ impl Entry { /// Load Vulkan library at `path` /// /// # Safety + /// /// `dlopen`ing native libraries is inherently unsafe. The safety guidelines /// for [`Library::new()`] and [`Library::get()`] apply here. + /// + /// No Vulkan functions loaded directly or indirectly from this [`Entry`] + /// may be called after it is [dropped][drop()]. #[cfg(feature = "loaded")] #[cfg_attr(docsrs, doc(cfg(feature = "loaded")))] pub unsafe fn load_from(path: impl AsRef) -> Result { @@ -140,8 +155,9 @@ impl Entry { /// Load entry points based on an already-loaded [`vk::StaticFn`] /// /// # Safety - /// `static_fn` must contain valid function pointers that comply with the semantics specified by - /// Vulkan 1.0, which must remain valid for at least the lifetime of the returned [`Entry`]. + /// + /// `static_fn` must contain valid function pointers that comply with the semantics specified + /// by Vulkan 1.0, which must remain valid for at least the lifetime of the returned [`Entry`]. pub unsafe fn from_static_fn(static_fn: vk::StaticFn) -> Self { let load_fn = |name: &std::ffi::CStr| { mem::transmute((static_fn.get_instance_proc_addr)( @@ -176,6 +192,9 @@ impl Entry { } /// + /// + /// # Example + /// /// ```no_run /// # use ash::{Entry, vk}; /// # fn main() -> Result<(), Box> { @@ -217,9 +236,13 @@ impl Entry { /// /// /// # Safety - /// In order for the created [`Instance`] to be valid for the duration of its - /// usage, the [`Entry`](Self) this was called on must be dropped later than the - /// resulting [`Instance`]. + /// + /// The resulting [`Instance`] and any function-pointer objects (e.g. [`Device`][crate::Device] + /// and [extensions][crate::extensions]) loaded from it may not be used after this [`Entry`] + /// object is dropped, unless it was crated using [`Entry::linked()`]. + /// + /// [`Instance`] does _not_ implement [drop][drop()] semantics and can only be destroyed via + /// [`destroy_instance()`][Instance::destroy_instance()]. #[inline] pub unsafe fn create_instance( &self, diff --git a/third_party/rust/ash/src/extensions/ext/full_screen_exclusive.rs b/third_party/rust/ash/src/extensions/ext/full_screen_exclusive.rs index 3cb73cdaeb5c..c5118696d23f 100644 --- a/third_party/rust/ash/src/extensions/ext/full_screen_exclusive.rs +++ b/third_party/rust/ash/src/extensions/ext/full_screen_exclusive.rs @@ -1,9 +1,10 @@ use crate::prelude::*; use crate::vk; -use crate::{Device, Instance}; +use crate::{Device, Entry, Instance}; use std::ffi::CStr; use std::mem; +/// #[derive(Clone)] pub struct FullScreenExclusive { handle: vk::Device, @@ -11,6 +12,13 @@ pub struct FullScreenExclusive { } impl FullScreenExclusive { + /// # Warning + /// [`Instance`] functions cannot be loaded from a [`Device`] and will always panic when called: + /// - [`Self::get_physical_device_surface_present_modes2()`] + /// + /// Load this struct using an [`Instance`] instead via [`Self::new_from_instance()`] if the + /// above [`Instance`] function is called. This will be solved in the next breaking `ash` + /// release: . pub fn new(instance: &Instance, device: &Device) -> Self { let handle = device.handle(); let fp = vk::ExtFullScreenExclusiveFn::load(|name| unsafe { @@ -19,6 +27,19 @@ impl FullScreenExclusive { Self { handle, fp } } + /// Loads all functions on the [`Instance`] instead of [`Device`]. This incurs an extra + /// dispatch table for [`Device`] functions but also allows the [`Instance`] function to be + /// loaded instead of always panicking. See also [`Self::new()`] for more details. + /// + /// It is okay to pass [`vk::Device::null()`] when this struct is only used to call the + /// [`Instance`] function. + pub fn new_from_instance(entry: &Entry, instance: &Instance, device: vk::Device) -> Self { + let fp = vk::ExtFullScreenExclusiveFn::load(|name| unsafe { + mem::transmute(entry.get_instance_proc_addr(instance.handle(), name.as_ptr())) + }); + Self { handle: device, fp } + } + /// #[inline] pub unsafe fn acquire_full_screen_exclusive_mode( @@ -29,6 +50,10 @@ impl FullScreenExclusive { } /// + /// + /// # Warning + /// + /// Function will always panic unless this struct is loaded via [`Self::new_from_instance()`]. #[inline] pub unsafe fn get_physical_device_surface_present_modes2( &self, diff --git a/third_party/rust/ash/src/extensions/ext/mod.rs b/third_party/rust/ash/src/extensions/ext/mod.rs index 26a78f77d622..8d4c5a2f240b 100644 --- a/third_party/rust/ash/src/extensions/ext/mod.rs +++ b/third_party/rust/ash/src/extensions/ext/mod.rs @@ -17,8 +17,10 @@ pub use self::image_drm_format_modifier::ImageDrmFormatModifier; pub use self::mesh_shader::MeshShader; pub use self::metal_surface::MetalSurface; pub use self::physical_device_drm::PhysicalDeviceDrm; +pub use self::pipeline_properties::PipelineProperties; pub use self::private_data::PrivateData; pub use self::sample_locations::SampleLocations; +pub use self::shader_object::ShaderObject; pub use self::tooling_info::ToolingInfo; mod acquire_drm_display; @@ -40,6 +42,8 @@ mod image_drm_format_modifier; mod mesh_shader; mod metal_surface; mod physical_device_drm; +mod pipeline_properties; mod private_data; mod sample_locations; +mod shader_object; mod tooling_info; diff --git a/third_party/rust/ash/src/extensions/ext/pipeline_properties.rs b/third_party/rust/ash/src/extensions/ext/pipeline_properties.rs new file mode 100644 index 000000000000..09b6b19e6f06 --- /dev/null +++ b/third_party/rust/ash/src/extensions/ext/pipeline_properties.rs @@ -0,0 +1,52 @@ +use crate::prelude::*; +use crate::vk; +use crate::{Device, Instance}; +use std::ffi::CStr; +use std::mem; + +/// +#[derive(Clone)] +pub struct PipelineProperties { + handle: vk::Device, + fp: vk::ExtPipelinePropertiesFn, +} + +impl PipelineProperties { + pub fn new(instance: &Instance, device: &Device) -> Self { + let handle = device.handle(); + let fp = vk::ExtPipelinePropertiesFn::load(|name| unsafe { + mem::transmute(instance.get_device_proc_addr(handle, name.as_ptr())) + }); + Self { handle, fp } + } + + /// + #[inline] + pub unsafe fn get_pipeline_properties( + &self, + pipeline_info: &vk::PipelineInfoEXT, + pipeline_properties: &mut impl vk::GetPipelinePropertiesEXTParamPipelineProperties, + ) -> VkResult<()> { + (self.fp.get_pipeline_properties_ext)( + self.handle, + pipeline_info, + <*mut _>::cast(pipeline_properties), + ) + .result() + } + + #[inline] + pub const fn name() -> &'static CStr { + vk::ExtPipelinePropertiesFn::name() + } + + #[inline] + pub fn fp(&self) -> &vk::ExtPipelinePropertiesFn { + &self.fp + } + + #[inline] + pub fn device(&self) -> vk::Device { + self.handle + } +} diff --git a/third_party/rust/ash/src/extensions/ext/shader_object.rs b/third_party/rust/ash/src/extensions/ext/shader_object.rs new file mode 100644 index 000000000000..1b5ff3a95fd2 --- /dev/null +++ b/third_party/rust/ash/src/extensions/ext/shader_object.rs @@ -0,0 +1,719 @@ +use crate::prelude::*; +use crate::vk; +use crate::RawPtr; +use crate::{Device, Instance}; +use std::ffi::CStr; +use std::mem; +use std::ptr; + +/// +#[derive(Clone)] +pub struct ShaderObject { + handle: vk::Device, + fp: vk::ExtShaderObjectFn, +} + +impl ShaderObject { + pub fn new(instance: &Instance, device: &Device) -> Self { + let handle = device.handle(); + let fp = vk::ExtShaderObjectFn::load(|name| unsafe { + mem::transmute(instance.get_device_proc_addr(handle, name.as_ptr())) + }); + Self { handle, fp } + } + + /// + #[inline] + pub unsafe fn create_shaders( + &self, + create_infos: &[vk::ShaderCreateInfoEXT], + allocator: Option<&vk::AllocationCallbacks>, + ) -> VkResult> { + let mut shaders = Vec::with_capacity(create_infos.len()); + (self.fp.create_shaders_ext)( + self.handle, + create_infos.len() as u32, + create_infos.as_ptr(), + allocator.as_raw_ptr(), + shaders.as_mut_ptr(), + ) + .result()?; + shaders.set_len(create_infos.len()); + Ok(shaders) + } + + /// + #[inline] + pub unsafe fn destroy_shader( + &self, + shader: vk::ShaderEXT, + allocator: Option<&vk::AllocationCallbacks>, + ) { + (self.fp.destroy_shader_ext)(self.handle, shader, allocator.as_raw_ptr()) + } + + /// + #[inline] + pub unsafe fn get_shader_binary_data(&self, shader: vk::ShaderEXT) -> VkResult> { + read_into_uninitialized_vector(|count, data: *mut u8| { + (self.fp.get_shader_binary_data_ext)(self.handle, shader, count, data.cast()) + }) + } + + /// + #[inline] + pub unsafe fn cmd_bind_shaders( + &self, + command_buffer: vk::CommandBuffer, + stages: &[vk::ShaderStageFlags], + shaders: &[vk::ShaderEXT], + ) { + assert_eq!(stages.len(), shaders.len()); + (self.fp.cmd_bind_shaders_ext)( + command_buffer, + stages.len() as u32, + stages.as_ptr(), + shaders.as_ptr(), + ) + } + + /// + #[inline] + pub unsafe fn cmd_set_vertex_input( + &self, + command_buffer: vk::CommandBuffer, + vertex_binding_descriptions: &[vk::VertexInputBindingDescription2EXT], + vertex_attribute_descriptions: &[vk::VertexInputAttributeDescription2EXT], + ) { + (self.fp.cmd_set_vertex_input_ext)( + command_buffer, + vertex_binding_descriptions.len() as u32, + vertex_binding_descriptions.as_ptr(), + vertex_attribute_descriptions.len() as u32, + vertex_attribute_descriptions.as_ptr(), + ) + } + + // --- extended_dynamic_state functions --- + + /// + #[inline] + pub unsafe fn cmd_set_cull_mode( + &self, + command_buffer: vk::CommandBuffer, + cull_mode: vk::CullModeFlags, + ) { + (self.fp.cmd_set_cull_mode_ext)(command_buffer, cull_mode) + } + + /// + #[inline] + pub unsafe fn cmd_set_front_face( + &self, + command_buffer: vk::CommandBuffer, + front_face: vk::FrontFace, + ) { + (self.fp.cmd_set_front_face_ext)(command_buffer, front_face) + } + + /// + #[inline] + pub unsafe fn cmd_set_primitive_topology( + &self, + command_buffer: vk::CommandBuffer, + primitive_topology: vk::PrimitiveTopology, + ) { + (self.fp.cmd_set_primitive_topology_ext)(command_buffer, primitive_topology) + } + + /// + #[inline] + pub unsafe fn cmd_set_viewport_with_count( + &self, + command_buffer: vk::CommandBuffer, + viewports: &[vk::Viewport], + ) { + (self.fp.cmd_set_viewport_with_count_ext)( + command_buffer, + viewports.len() as u32, + viewports.as_ptr(), + ) + } + + /// + #[inline] + pub unsafe fn cmd_set_scissor_with_count( + &self, + command_buffer: vk::CommandBuffer, + scissors: &[vk::Rect2D], + ) { + (self.fp.cmd_set_scissor_with_count_ext)( + command_buffer, + scissors.len() as u32, + scissors.as_ptr(), + ) + } + + /// + #[inline] + pub unsafe fn cmd_bind_vertex_buffers2( + &self, + command_buffer: vk::CommandBuffer, + first_binding: u32, + buffers: &[vk::Buffer], + offsets: &[vk::DeviceSize], + sizes: Option<&[vk::DeviceSize]>, + strides: Option<&[vk::DeviceSize]>, + ) { + assert_eq!(offsets.len(), buffers.len()); + let p_sizes = if let Some(sizes) = sizes { + assert_eq!(sizes.len(), buffers.len()); + sizes.as_ptr() + } else { + ptr::null() + }; + let p_strides = if let Some(strides) = strides { + assert_eq!(strides.len(), buffers.len()); + strides.as_ptr() + } else { + ptr::null() + }; + (self.fp.cmd_bind_vertex_buffers2_ext)( + command_buffer, + first_binding, + buffers.len() as u32, + buffers.as_ptr(), + offsets.as_ptr(), + p_sizes, + p_strides, + ) + } + + /// + #[inline] + pub unsafe fn cmd_set_depth_test_enable( + &self, + command_buffer: vk::CommandBuffer, + depth_test_enable: bool, + ) { + (self.fp.cmd_set_depth_test_enable_ext)(command_buffer, depth_test_enable.into()) + } + + /// + #[inline] + pub unsafe fn cmd_set_depth_write_enable( + &self, + command_buffer: vk::CommandBuffer, + depth_write_enable: bool, + ) { + (self.fp.cmd_set_depth_write_enable_ext)(command_buffer, depth_write_enable.into()) + } + + /// + #[inline] + pub unsafe fn cmd_set_depth_compare_op( + &self, + command_buffer: vk::CommandBuffer, + depth_compare_op: vk::CompareOp, + ) { + (self.fp.cmd_set_depth_compare_op_ext)(command_buffer, depth_compare_op) + } + + /// + #[inline] + pub unsafe fn cmd_set_depth_bounds_test_enable( + &self, + command_buffer: vk::CommandBuffer, + depth_bounds_test_enable: bool, + ) { + (self.fp.cmd_set_depth_bounds_test_enable_ext)( + command_buffer, + depth_bounds_test_enable.into(), + ) + } + + /// + #[inline] + pub unsafe fn cmd_set_stencil_test_enable( + &self, + command_buffer: vk::CommandBuffer, + stencil_test_enable: bool, + ) { + (self.fp.cmd_set_stencil_test_enable_ext)(command_buffer, stencil_test_enable.into()) + } + + /// + #[inline] + pub unsafe fn cmd_set_stencil_op( + &self, + command_buffer: vk::CommandBuffer, + face_mask: vk::StencilFaceFlags, + fail_op: vk::StencilOp, + pass_op: vk::StencilOp, + depth_fail_op: vk::StencilOp, + compare_op: vk::CompareOp, + ) { + (self.fp.cmd_set_stencil_op_ext)( + command_buffer, + face_mask, + fail_op, + pass_op, + depth_fail_op, + compare_op, + ) + } + + // --- extended_dynamic_state2 functions --- + + /// + #[inline] + pub unsafe fn cmd_set_patch_control_points( + &self, + command_buffer: vk::CommandBuffer, + patch_control_points: u32, + ) { + (self.fp.cmd_set_patch_control_points_ext)(command_buffer, patch_control_points) + } + + /// + #[inline] + pub unsafe fn cmd_set_rasterizer_discard_enable( + &self, + command_buffer: vk::CommandBuffer, + rasterizer_discard_enable: bool, + ) { + (self.fp.cmd_set_rasterizer_discard_enable_ext)( + command_buffer, + rasterizer_discard_enable.into(), + ) + } + + /// + #[inline] + pub unsafe fn cmd_set_depth_bias_enable( + &self, + command_buffer: vk::CommandBuffer, + depth_bias_enable: bool, + ) { + (self.fp.cmd_set_depth_bias_enable_ext)(command_buffer, depth_bias_enable.into()) + } + + /// + #[inline] + pub unsafe fn cmd_set_logic_op( + &self, + command_buffer: vk::CommandBuffer, + logic_op: vk::LogicOp, + ) { + (self.fp.cmd_set_logic_op_ext)(command_buffer, logic_op) + } + + /// + #[inline] + pub unsafe fn cmd_set_primitive_restart_enable( + &self, + command_buffer: vk::CommandBuffer, + primitive_restart_enable: bool, + ) { + (self.fp.cmd_set_primitive_restart_enable_ext)( + command_buffer, + primitive_restart_enable.into(), + ) + } + + // --- extended_dynamic_state3 functions --- + + /// + #[inline] + pub unsafe fn cmd_set_tessellation_domain_origin( + &self, + command_buffer: vk::CommandBuffer, + domain_origin: vk::TessellationDomainOrigin, + ) { + (self.fp.cmd_set_tessellation_domain_origin_ext)(command_buffer, domain_origin) + } + + /// + #[inline] + pub unsafe fn cmd_set_depth_clamp_enable( + &self, + command_buffer: vk::CommandBuffer, + depth_clamp_enable: bool, + ) { + (self.fp.cmd_set_depth_clamp_enable_ext)(command_buffer, depth_clamp_enable.into()) + } + + /// + #[inline] + pub unsafe fn cmd_set_polygon_mode( + &self, + command_buffer: vk::CommandBuffer, + polygon_mode: vk::PolygonMode, + ) { + (self.fp.cmd_set_polygon_mode_ext)(command_buffer, polygon_mode) + } + + /// + #[inline] + pub unsafe fn cmd_set_rasterization_samples( + &self, + command_buffer: vk::CommandBuffer, + rasterization_samples: vk::SampleCountFlags, + ) { + (self.fp.cmd_set_rasterization_samples_ext)(command_buffer, rasterization_samples) + } + + /// + #[inline] + pub unsafe fn cmd_set_sample_mask( + &self, + command_buffer: vk::CommandBuffer, + samples: vk::SampleCountFlags, + sample_mask: &[vk::SampleMask], + ) { + assert!( + samples.as_raw().is_power_of_two(), + "Only one SampleCount bit must be set" + ); + assert_eq!(samples.as_raw() as usize / 32, sample_mask.len()); + (self.fp.cmd_set_sample_mask_ext)(command_buffer, samples, sample_mask.as_ptr()) + } + + /// + #[inline] + pub unsafe fn cmd_set_alpha_to_coverage_enable( + &self, + command_buffer: vk::CommandBuffer, + alpha_to_coverage_enable: bool, + ) { + (self.fp.cmd_set_alpha_to_coverage_enable_ext)( + command_buffer, + alpha_to_coverage_enable.into(), + ) + } + + /// + #[inline] + pub unsafe fn cmd_set_alpha_to_one_enable( + &self, + command_buffer: vk::CommandBuffer, + alpha_to_one_enable: bool, + ) { + (self.fp.cmd_set_alpha_to_one_enable_ext)(command_buffer, alpha_to_one_enable.into()) + } + + /// + #[inline] + pub unsafe fn cmd_set_logic_op_enable( + &self, + command_buffer: vk::CommandBuffer, + logic_op_enable: bool, + ) { + (self.fp.cmd_set_logic_op_enable_ext)(command_buffer, logic_op_enable.into()) + } + + /// + #[inline] + pub unsafe fn cmd_set_color_blend_enable( + &self, + command_buffer: vk::CommandBuffer, + first_attachment: u32, + color_blend_enables: &[vk::Bool32], + ) { + (self.fp.cmd_set_color_blend_enable_ext)( + command_buffer, + first_attachment, + color_blend_enables.len() as u32, + color_blend_enables.as_ptr(), + ) + } + + /// + #[inline] + pub unsafe fn cmd_set_color_blend_equation( + &self, + command_buffer: vk::CommandBuffer, + first_attachment: u32, + color_blend_equations: &[vk::ColorBlendEquationEXT], + ) { + (self.fp.cmd_set_color_blend_equation_ext)( + command_buffer, + first_attachment, + color_blend_equations.len() as u32, + color_blend_equations.as_ptr(), + ) + } + + /// + #[inline] + pub unsafe fn cmd_set_color_write_mask( + &self, + command_buffer: vk::CommandBuffer, + first_attachment: u32, + color_write_masks: &[vk::ColorComponentFlags], + ) { + (self.fp.cmd_set_color_write_mask_ext)( + command_buffer, + first_attachment, + color_write_masks.len() as u32, + color_write_masks.as_ptr(), + ) + } + + /// + #[inline] + pub unsafe fn cmd_set_rasterization_stream( + &self, + command_buffer: vk::CommandBuffer, + rasterization_stream: u32, + ) { + (self.fp.cmd_set_rasterization_stream_ext)(command_buffer, rasterization_stream) + } + + /// + #[inline] + pub unsafe fn cmd_set_conservative_rasterization_mode( + &self, + command_buffer: vk::CommandBuffer, + conservative_rasterization_mode: vk::ConservativeRasterizationModeEXT, + ) { + (self.fp.cmd_set_conservative_rasterization_mode_ext)( + command_buffer, + conservative_rasterization_mode, + ) + } + + /// + #[inline] + pub unsafe fn cmd_set_extra_primitive_overestimation_size( + &self, + command_buffer: vk::CommandBuffer, + extra_primitive_overestimation_size: f32, + ) { + (self.fp.cmd_set_extra_primitive_overestimation_size_ext)( + command_buffer, + extra_primitive_overestimation_size, + ) + } + + /// + #[inline] + pub unsafe fn cmd_set_depth_clip_enable( + &self, + command_buffer: vk::CommandBuffer, + depth_clip_enable: bool, + ) { + (self.fp.cmd_set_depth_clip_enable_ext)(command_buffer, depth_clip_enable.into()) + } + + /// + #[inline] + pub unsafe fn cmd_set_sample_locations_enable( + &self, + command_buffer: vk::CommandBuffer, + sample_locations_enable: bool, + ) { + (self.fp.cmd_set_sample_locations_enable_ext)( + command_buffer, + sample_locations_enable.into(), + ) + } + + /// + #[inline] + pub unsafe fn cmd_set_color_blend_advanced( + &self, + command_buffer: vk::CommandBuffer, + first_attachment: u32, + color_blend_advanced: &[vk::ColorBlendAdvancedEXT], + ) { + (self.fp.cmd_set_color_blend_advanced_ext)( + command_buffer, + first_attachment, + color_blend_advanced.len() as u32, + color_blend_advanced.as_ptr(), + ) + } + + /// + #[inline] + pub unsafe fn cmd_set_provoking_vertex_mode( + &self, + command_buffer: vk::CommandBuffer, + provoking_vertex_mode: vk::ProvokingVertexModeEXT, + ) { + (self.fp.cmd_set_provoking_vertex_mode_ext)(command_buffer, provoking_vertex_mode) + } + + /// + #[inline] + pub unsafe fn cmd_set_line_rasterization_mode( + &self, + command_buffer: vk::CommandBuffer, + line_rasterization_mode: vk::LineRasterizationModeEXT, + ) { + (self.fp.cmd_set_line_rasterization_mode_ext)(command_buffer, line_rasterization_mode) + } + + /// + #[inline] + pub unsafe fn cmd_set_line_stipple_enable( + &self, + command_buffer: vk::CommandBuffer, + stippled_line_enable: bool, + ) { + (self.fp.cmd_set_line_stipple_enable_ext)(command_buffer, stippled_line_enable.into()) + } + + /// + #[inline] + pub unsafe fn cmd_set_depth_clip_negative_one_to_one( + &self, + command_buffer: vk::CommandBuffer, + negative_one_to_one: bool, + ) { + (self.fp.cmd_set_depth_clip_negative_one_to_one_ext)( + command_buffer, + negative_one_to_one.into(), + ) + } + + /// + #[inline] + pub unsafe fn cmd_set_viewport_w_scaling_enable_nv( + &self, + command_buffer: vk::CommandBuffer, + viewport_w_scaling_enable: bool, + ) { + (self.fp.cmd_set_viewport_w_scaling_enable_nv)( + command_buffer, + viewport_w_scaling_enable.into(), + ) + } + + /// + #[inline] + pub unsafe fn cmd_set_viewport_swizzle_nv( + &self, + command_buffer: vk::CommandBuffer, + first_attachment: u32, + viewport_swizzles: &[vk::ViewportSwizzleNV], + ) { + (self.fp.cmd_set_viewport_swizzle_nv)( + command_buffer, + first_attachment, + viewport_swizzles.len() as u32, + viewport_swizzles.as_ptr(), + ) + } + + /// + #[inline] + pub unsafe fn cmd_set_coverage_to_color_enable_nv( + &self, + command_buffer: vk::CommandBuffer, + coverage_to_color_enable: bool, + ) { + (self.fp.cmd_set_coverage_to_color_enable_nv)( + command_buffer, + coverage_to_color_enable.into(), + ) + } + + /// + #[inline] + pub unsafe fn cmd_set_coverage_to_color_location_nv( + &self, + command_buffer: vk::CommandBuffer, + coverage_to_color_location: u32, + ) { + (self.fp.cmd_set_coverage_to_color_location_nv)(command_buffer, coverage_to_color_location) + } + + /// + #[inline] + pub unsafe fn cmd_set_coverage_modulation_mode_nv( + &self, + command_buffer: vk::CommandBuffer, + coverage_modulation_mode: vk::CoverageModulationModeNV, + ) { + (self.fp.cmd_set_coverage_modulation_mode_nv)(command_buffer, coverage_modulation_mode) + } + + /// + #[inline] + pub unsafe fn cmd_set_coverage_modulation_table_enable_nv( + &self, + command_buffer: vk::CommandBuffer, + coverage_modulation_table_enable: bool, + ) { + (self.fp.cmd_set_coverage_modulation_table_enable_nv)( + command_buffer, + coverage_modulation_table_enable.into(), + ) + } + + /// + #[inline] + pub unsafe fn cmd_set_coverage_modulation_table_nv( + &self, + command_buffer: vk::CommandBuffer, + coverage_modulation_table: &[f32], + ) { + (self.fp.cmd_set_coverage_modulation_table_nv)( + command_buffer, + coverage_modulation_table.len() as u32, + coverage_modulation_table.as_ptr(), + ) + } + + /// + #[inline] + pub unsafe fn cmd_set_shading_rate_image_enable_nv( + &self, + command_buffer: vk::CommandBuffer, + shading_rate_image_enable: bool, + ) { + (self.fp.cmd_set_shading_rate_image_enable_nv)( + command_buffer, + shading_rate_image_enable.into(), + ) + } + + /// + #[inline] + pub unsafe fn cmd_set_representative_fragment_test_enable_nv( + &self, + command_buffer: vk::CommandBuffer, + representative_fragment_test_enable: bool, + ) { + (self.fp.cmd_set_representative_fragment_test_enable_nv)( + command_buffer, + representative_fragment_test_enable.into(), + ) + } + + /// + #[inline] + pub unsafe fn cmd_set_coverage_reduction_mode_nv( + &self, + command_buffer: vk::CommandBuffer, + coverage_reduction_mode: vk::CoverageReductionModeNV, + ) { + (self.fp.cmd_set_coverage_reduction_mode_nv)(command_buffer, coverage_reduction_mode) + } + + pub const fn name() -> &'static CStr { + vk::ExtShaderObjectFn::name() + } + + #[inline] + pub fn fp(&self) -> &vk::ExtShaderObjectFn { + &self.fp + } + + #[inline] + pub fn device(&self) -> vk::Device { + self.handle + } +} diff --git a/third_party/rust/ash/src/extensions/khr/device_group.rs b/third_party/rust/ash/src/extensions/khr/device_group.rs index 356db71e4d55..6863b8356563 100644 --- a/third_party/rust/ash/src/extensions/khr/device_group.rs +++ b/third_party/rust/ash/src/extensions/khr/device_group.rs @@ -2,7 +2,7 @@ use super::Swapchain; use crate::prelude::*; use crate::vk; -use crate::{Device, Instance}; +use crate::{Device, Entry, Instance}; use std::ffi::CStr; use std::mem; @@ -14,6 +14,13 @@ pub struct DeviceGroup { } impl DeviceGroup { + /// # Warning + /// [`Instance`] functions cannot be loaded from a [`Device`] and will always panic when called: + /// - [`Self::get_physical_device_present_rectangles()`] + /// + /// Load this struct using an [`Instance`] instead via [`Self::new_from_instance()`] if the + /// above [`Instance`] function is called. This will be solved in the next breaking `ash` + /// release: . pub fn new(instance: &Instance, device: &Device) -> Self { let handle = device.handle(); let fp = vk::KhrDeviceGroupFn::load(|name| unsafe { @@ -22,6 +29,19 @@ impl DeviceGroup { Self { handle, fp } } + /// Loads all functions on the [`Instance`] instead of [`Device`]. This incurs an extra + /// dispatch table for [`Device`] functions but also allows the [`Instance`] function to be + /// loaded instead of always panicking. See also [`Self::new()`] for more details. + /// + /// It is okay to pass [`vk::Device::null()`] when this struct is only used to call the + /// [`Instance`] function. + pub fn new_from_instance(entry: &Entry, instance: &Instance, device: vk::Device) -> Self { + let fp = vk::KhrDeviceGroupFn::load(|name| unsafe { + mem::transmute(entry.get_instance_proc_addr(instance.handle(), name.as_ptr())) + }); + Self { handle: device, fp } + } + /// #[inline] pub unsafe fn get_device_group_peer_memory_features( @@ -112,6 +132,10 @@ impl DeviceGroup { /// /// [Vulkan 1.1]: https://www.khronos.org/registry/vulkan/specs/1.3-extensions/man/html/VK_VERSION_1_1.html /// [`VK_KHR_surface`]: https://www.khronos.org/registry/vulkan/specs/1.3-extensions/man/html/VK_KHR_surface.html + /// + /// # Warning + /// + /// Function will always panic unless this struct is loaded via [`Self::new_from_instance()`]. #[inline] pub unsafe fn get_physical_device_present_rectangles( &self, diff --git a/third_party/rust/ash/src/extensions/khr/device_group_creation.rs b/third_party/rust/ash/src/extensions/khr/device_group_creation.rs index 9a21a714c921..60f5f337615d 100644 --- a/third_party/rust/ash/src/extensions/khr/device_group_creation.rs +++ b/third_party/rust/ash/src/extensions/khr/device_group_creation.rs @@ -59,8 +59,14 @@ impl DeviceGroupCreation { &self.fp } + #[deprecated = "typo: this function is called `device()`, but returns an `Instance`."] #[inline] pub fn device(&self) -> vk::Instance { self.handle } + + #[inline] + pub fn instance(&self) -> vk::Instance { + self.handle + } } diff --git a/third_party/rust/ash/src/extensions/khr/external_memory_fd.rs b/third_party/rust/ash/src/extensions/khr/external_memory_fd.rs index 902349802f08..77e66c22a7d5 100644 --- a/third_party/rust/ash/src/extensions/khr/external_memory_fd.rs +++ b/third_party/rust/ash/src/extensions/khr/external_memory_fd.rs @@ -21,9 +21,9 @@ impl ExternalMemoryFd { /// #[inline] - pub unsafe fn get_memory_fd(&self, create_info: &vk::MemoryGetFdInfoKHR) -> VkResult { + pub unsafe fn get_memory_fd(&self, get_fd_info: &vk::MemoryGetFdInfoKHR) -> VkResult { let mut fd = -1; - (self.fp.get_memory_fd_khr)(self.handle, create_info, &mut fd).result_with_success(fd) + (self.fp.get_memory_fd_khr)(self.handle, get_fd_info, &mut fd).result_with_success(fd) } /// diff --git a/third_party/rust/ash/src/extensions/khr/maintenance4.rs b/third_party/rust/ash/src/extensions/khr/maintenance4.rs index 003e1a9cf23d..7ec178163611 100644 --- a/third_party/rust/ash/src/extensions/khr/maintenance4.rs +++ b/third_party/rust/ash/src/extensions/khr/maintenance4.rs @@ -22,32 +22,32 @@ impl Maintenance4 { #[inline] pub unsafe fn get_device_buffer_memory_requirements( &self, - create_info: &vk::DeviceBufferMemoryRequirementsKHR, + memory_requirements: &vk::DeviceBufferMemoryRequirementsKHR, out: &mut vk::MemoryRequirements2, ) { - (self.fp.get_device_buffer_memory_requirements_khr)(self.handle, create_info, out) + (self.fp.get_device_buffer_memory_requirements_khr)(self.handle, memory_requirements, out) } /// #[inline] pub unsafe fn get_device_image_memory_requirements( &self, - create_info: &vk::DeviceImageMemoryRequirementsKHR, + memory_requirements: &vk::DeviceImageMemoryRequirementsKHR, out: &mut vk::MemoryRequirements2, ) { - (self.fp.get_device_image_memory_requirements_khr)(self.handle, create_info, out) + (self.fp.get_device_image_memory_requirements_khr)(self.handle, memory_requirements, out) } /// Retrieve the number of elements to pass to [`get_device_image_sparse_memory_requirements()`][Self::get_device_image_sparse_memory_requirements()] #[inline] pub unsafe fn get_device_image_sparse_memory_requirements_len( &self, - create_info: &vk::DeviceImageMemoryRequirementsKHR, + memory_requirements: &vk::DeviceImageMemoryRequirementsKHR, ) -> usize { let mut count = 0; (self.fp.get_device_image_sparse_memory_requirements_khr)( self.handle, - create_info, + memory_requirements, &mut count, std::ptr::null_mut(), ); @@ -61,13 +61,13 @@ impl Maintenance4 { #[inline] pub unsafe fn get_device_image_sparse_memory_requirements( &self, - create_info: &vk::DeviceImageMemoryRequirementsKHR, + memory_requirements: &vk::DeviceImageMemoryRequirementsKHR, out: &mut [vk::SparseImageMemoryRequirements2], ) { let mut count = out.len() as u32; (self.fp.get_device_image_sparse_memory_requirements_khr)( self.handle, - create_info, + memory_requirements, &mut count, out.as_mut_ptr(), ); diff --git a/third_party/rust/ash/src/extensions/khr/mod.rs b/third_party/rust/ash/src/extensions/khr/mod.rs index ea809b43da88..ff84d0dbfe22 100644 --- a/third_party/rust/ash/src/extensions/khr/mod.rs +++ b/third_party/rust/ash/src/extensions/khr/mod.rs @@ -22,6 +22,7 @@ pub use self::get_surface_capabilities2::GetSurfaceCapabilities2; pub use self::maintenance1::Maintenance1; pub use self::maintenance3::Maintenance3; pub use self::maintenance4::Maintenance4; +pub use self::performance_query::PerformanceQuery; pub use self::pipeline_executable_properties::PipelineExecutableProperties; pub use self::present_wait::PresentWait; pub use self::push_descriptor::PushDescriptor; @@ -60,6 +61,7 @@ mod get_surface_capabilities2; mod maintenance1; mod maintenance3; mod maintenance4; +mod performance_query; mod pipeline_executable_properties; mod present_wait; mod push_descriptor; diff --git a/third_party/rust/ash/src/extensions/khr/performance_query.rs b/third_party/rust/ash/src/extensions/khr/performance_query.rs new file mode 100644 index 000000000000..6910b5a1bb69 --- /dev/null +++ b/third_party/rust/ash/src/extensions/khr/performance_query.rs @@ -0,0 +1,121 @@ +use crate::prelude::*; +use crate::vk; +use crate::{Entry, Instance}; +use std::ffi::CStr; +use std::mem; +use std::ptr; + +/// +#[derive(Clone)] +pub struct PerformanceQuery { + handle: vk::Instance, + fp: vk::KhrPerformanceQueryFn, +} + +impl PerformanceQuery { + pub fn new(entry: &Entry, instance: &Instance) -> Self { + let handle = instance.handle(); + let fp = vk::KhrPerformanceQueryFn::load(|name| unsafe { + mem::transmute(entry.get_instance_proc_addr(handle, name.as_ptr())) + }); + Self { handle, fp } + } + + /// Retrieve the number of elements to pass to [`enumerate_physical_device_queue_family_performance_query_counters()`][Self::enumerate_physical_device_queue_family_performance_query_counters()] + #[inline] + pub unsafe fn enumerate_physical_device_queue_family_performance_query_counters_len( + &self, + physical_device: vk::PhysicalDevice, + queue_family_index: u32, + ) -> VkResult { + let mut count = 0; + (self + .fp + .enumerate_physical_device_queue_family_performance_query_counters_khr)( + physical_device, + queue_family_index, + &mut count, + ptr::null_mut(), + ptr::null_mut(), + ) + .result_with_success(count as usize) + } + + /// + /// + /// Call [`enumerate_physical_device_queue_family_performance_query_counters_len()`][Self::enumerate_physical_device_queue_family_performance_query_counters_len()] to query the number of elements to pass to `out_counters` and `out_counter_descriptions`. + /// Be sure to [`Default::default()`]-initialize these elements and optionally set their `p_next` pointer. + #[inline] + pub unsafe fn enumerate_physical_device_queue_family_performance_query_counters( + &self, + physical_device: vk::PhysicalDevice, + queue_family_index: u32, + out_counters: &mut [vk::PerformanceCounterKHR], + out_counter_descriptions: &mut [vk::PerformanceCounterDescriptionKHR], + ) -> VkResult<()> { + assert_eq!(out_counters.len(), out_counter_descriptions.len()); + let mut count = out_counters.len() as u32; + (self + .fp + .enumerate_physical_device_queue_family_performance_query_counters_khr)( + physical_device, + queue_family_index, + &mut count, + out_counters.as_mut_ptr(), + out_counter_descriptions.as_mut_ptr(), + ) + .result()?; + assert_eq!(count as usize, out_counters.len()); + assert_eq!(count as usize, out_counter_descriptions.len()); + Ok(()) + } + + /// + #[inline] + pub unsafe fn get_physical_device_queue_family_performance_query_passes( + &self, + physical_device: vk::PhysicalDevice, + performance_query_create_info: &vk::QueryPoolPerformanceCreateInfoKHR, + ) -> u32 { + let mut num_passes = 0; + (self + .fp + .get_physical_device_queue_family_performance_query_passes_khr)( + physical_device, + performance_query_create_info, + &mut num_passes, + ); + num_passes + } + + /// + #[inline] + pub unsafe fn acquire_profiling_lock( + &self, + device: vk::Device, + info: &vk::AcquireProfilingLockInfoKHR, + ) -> VkResult<()> { + (self.fp.acquire_profiling_lock_khr)(device, info).result() + } + + /// + #[inline] + pub unsafe fn release_profiling_lock(&self, device: vk::Device) { + (self.fp.release_profiling_lock_khr)(device) + } + + #[inline] + pub const fn name() -> &'static CStr { + vk::KhrPerformanceQueryFn::name() + } + + #[inline] + pub fn fp(&self) -> &vk::KhrPerformanceQueryFn { + &self.fp + } + + #[inline] + pub fn instance(&self) -> vk::Instance { + self.handle + } +} diff --git a/third_party/rust/ash/src/extensions/khr/swapchain.rs b/third_party/rust/ash/src/extensions/khr/swapchain.rs index 8f57729de7c6..ee608efd8289 100755 --- a/third_party/rust/ash/src/extensions/khr/swapchain.rs +++ b/third_party/rust/ash/src/extensions/khr/swapchain.rs @@ -3,10 +3,11 @@ use super::DeviceGroup; use crate::prelude::*; use crate::vk; use crate::RawPtr; -use crate::{Device, Instance}; +use crate::{Device, Entry, Instance}; use std::ffi::CStr; use std::mem; +/// #[derive(Clone)] pub struct Swapchain { handle: vk::Device, @@ -14,6 +15,13 @@ pub struct Swapchain { } impl Swapchain { + /// # Warning + /// [`Instance`] functions cannot be loaded from a [`Device`] and will always panic when called: + /// - [`Self::get_physical_device_present_rectangles()`] + /// + /// Load this struct using an [`Instance`] instead via [`Self::new_from_instance()`] if the + /// above [`Instance`] function is called. This will be solved in the next breaking `ash` + /// release: . pub fn new(instance: &Instance, device: &Device) -> Self { let handle = device.handle(); let fp = vk::KhrSwapchainFn::load(|name| unsafe { @@ -22,6 +30,19 @@ impl Swapchain { Self { handle, fp } } + /// Loads all functions on the [`Instance`] instead of [`Device`]. This incurs an extra + /// dispatch table for [`Device`] functions but also allows the [`Instance`] function to be + /// loaded instead of always panicking. See also [`Self::new()`] for more details. + /// + /// It is okay to pass [`vk::Device::null()`] when this struct is only used to call the + /// [`Instance`] function. + pub fn new_from_instance(entry: &Entry, instance: &Instance, device: vk::Device) -> Self { + let fp = vk::KhrSwapchainFn::load(|name| unsafe { + mem::transmute(entry.get_instance_proc_addr(instance.handle(), name.as_ptr())) + }); + Self { handle: device, fp } + } + /// #[inline] pub unsafe fn create_swapchain( @@ -153,6 +174,10 @@ impl Swapchain { /// /// [Vulkan 1.1]: https://www.khronos.org/registry/vulkan/specs/1.3-extensions/man/html/VK_VERSION_1_1.html /// [`VK_KHR_surface`]: https://www.khronos.org/registry/vulkan/specs/1.3-extensions/man/html/VK_KHR_surface.html + /// + /// # Warning + /// + /// Function will always panic unless this struct is loaded via [`Self::new_from_instance()`]. #[inline] pub unsafe fn get_physical_device_present_rectangles( &self, diff --git a/third_party/rust/ash/src/instance.rs b/third_party/rust/ash/src/instance.rs index 8e3bd1a8b9b4..da3ef99e1422 100644 --- a/third_party/rust/ash/src/instance.rs +++ b/third_party/rust/ash/src/instance.rs @@ -1,3 +1,5 @@ +#[cfg(doc)] +use super::Entry; use crate::device::Device; use crate::prelude::*; use crate::vk; @@ -335,9 +337,17 @@ impl Instance { /// /// /// # Safety - /// In order for the created [`Device`] to be valid for the duration of its - /// usage, the [`Instance`] this was called on must be dropped later than the - /// resulting [`Device`]. + /// + /// There is a [parent/child relation] between [`Instance`] and the resulting [`Device`]. The + /// application must not [destroy][Instance::destroy_instance()] the parent [`Instance`] object + /// before first [destroying][Device::destroy_device()] the returned [`Device`] child object. + /// [`Device`] does _not_ implement [drop][drop()] semantics and can only be destroyed via + /// [`destroy_device()`][Device::destroy_device()]. + /// + /// See the [`Entry::create_instance()`] documentation for more destruction ordering rules on + /// [`Instance`]. + /// + /// [parent/child relation]: https://registry.khronos.org/vulkan/specs/1.3-extensions/html/vkspec.html#fundamentals-objectmodel-lifetime #[inline] pub unsafe fn create_device( &self, diff --git a/third_party/rust/ash/src/prelude.rs b/third_party/rust/ash/src/prelude.rs index 321c3aadddb1..e43480326037 100644 --- a/third_party/rust/ash/src/prelude.rs +++ b/third_party/rust/ash/src/prelude.rs @@ -26,13 +26,12 @@ impl vk::Result { } } -/// Repeatedly calls `f` until it does not return [`vk::Result::INCOMPLETE`] anymore, -/// ensuring all available data has been read into the vector. +/// Repeatedly calls `f` until it does not return [`vk::Result::INCOMPLETE`] anymore, ensuring all +/// available data has been read into the vector. /// -/// See for example [`vkEnumerateInstanceExtensionProperties`]: the number of available -/// items may change between calls; [`vk::Result::INCOMPLETE`] is returned when the count -/// increased (and the vector is not large enough after querying the initial size), -/// requiring Ash to try again. +/// See for example [`vkEnumerateInstanceExtensionProperties`]: the number of available items may +/// change between calls; [`vk::Result::INCOMPLETE`] is returned when the count increased (and the +/// vector is not large enough after querying the initial size), requiring Ash to try again. /// /// [`vkEnumerateInstanceExtensionProperties`]: https://www.khronos.org/registry/vulkan/specs/1.3-extensions/man/html/vkEnumerateInstanceExtensionProperties.html pub(crate) unsafe fn read_into_uninitialized_vector, T>( @@ -56,18 +55,17 @@ where } } -/// Repeatedly calls `f` until it does not return [`vk::Result::INCOMPLETE`] anymore, -/// ensuring all available data has been read into the vector. +/// Repeatedly calls `f` until it does not return [`vk::Result::INCOMPLETE`] anymore, ensuring all +/// available data has been read into the vector. /// -/// Items in the target vector are [`default()`][`Default::default()`]-initialized which -/// is required for [`vk::BaseOutStructure`]-like structs where [`vk::BaseOutStructure::s_type`] -/// needs to be a valid type and [`vk::BaseOutStructure::p_next`] a valid or -/// [`null`][`std::ptr::null_mut()`] pointer. +/// Items in the target vector are [`default()`][Default::default()]-initialized which is required +/// for [`vk::BaseOutStructure`]-like structs where [`vk::BaseOutStructure::s_type`] needs to be a +/// valid type and [`vk::BaseOutStructure::p_next`] a valid or [`null`][std::ptr::null_mut()] +/// pointer. /// -/// See for example [`vkEnumerateInstanceExtensionProperties`]: the number of available -/// items may change between calls; [`vk::Result::INCOMPLETE`] is returned when the count -/// increased (and the vector is not large enough after querying the initial size), -/// requiring Ash to try again. +/// See for example [`vkEnumerateInstanceExtensionProperties`]: the number of available items may +/// change between calls; [`vk::Result::INCOMPLETE`] is returned when the count increased (and the +/// vector is not large enough after querying the initial size), requiring Ash to try again. /// /// [`vkEnumerateInstanceExtensionProperties`]: https://www.khronos.org/registry/vulkan/specs/1.3-extensions/man/html/vkEnumerateInstanceExtensionProperties.html pub(crate) unsafe fn read_into_defaulted_vector< @@ -116,7 +114,7 @@ pub(crate) fn debug_flags + Copy>( if !first { f.write_str(" | ")?; } - write!(f, "{:b}", accum)?; + write!(f, "{accum:b}")?; } Ok(()) } diff --git a/third_party/rust/ash/src/util.rs b/third_party/rust/ash/src/util.rs index 119fab9c65d1..9827617ef0c0 100644 --- a/third_party/rust/ash/src/util.rs +++ b/third_party/rust/ash/src/util.rs @@ -104,7 +104,9 @@ impl<'a, T: Copy + 'a> Iterator for AlignIter<'a, T> { /// let words = ash::util::read_spv(&mut std::io::Cursor::new(&SPIRV[..])).unwrap(); /// ``` pub fn read_spv(x: &mut R) -> io::Result> { + // TODO use stream_len() once it is stabilized and remove the subsequent rewind() call let size = x.seek(io::SeekFrom::End(0))?; + x.rewind()?; if size % 4 != 0 { return Err(io::Error::new( io::ErrorKind::InvalidData, @@ -119,7 +121,6 @@ pub fn read_spv(x: &mut R) -> io::Result> { // Zero-initialize the result to prevent read_exact from possibly // reading uninitialized memory. let mut result = vec![0u32; words]; - x.seek(io::SeekFrom::Start(0))?; x.read_exact(unsafe { slice::from_raw_parts_mut(result.as_mut_ptr().cast::(), words * 4) })?; diff --git a/third_party/rust/ash/src/vk/aliases.rs b/third_party/rust/ash/src/vk/aliases.rs index 1e5d20fe2350..7eb57b749150 100644 --- a/third_party/rust/ash/src/vk/aliases.rs +++ b/third_party/rust/ash/src/vk/aliases.rs @@ -200,6 +200,8 @@ pub type PhysicalDeviceSubgroupSizeControlPropertiesEXT = PhysicalDeviceSubgroupSizeControlProperties; pub type PipelineShaderStageRequiredSubgroupSizeCreateInfoEXT = PipelineShaderStageRequiredSubgroupSizeCreateInfo; +pub type ShaderRequiredSubgroupSizeCreateInfoEXT = + PipelineShaderStageRequiredSubgroupSizeCreateInfo; pub type MemoryOpaqueCaptureAddressAllocateInfoKHR = MemoryOpaqueCaptureAddressAllocateInfo; pub type DeviceMemoryOpaqueCaptureAddressInfoKHR = DeviceMemoryOpaqueCaptureAddressInfo; pub type PhysicalDevicePipelineCreationCacheControlFeaturesEXT = diff --git a/third_party/rust/ash/src/vk/bitflags.rs b/third_party/rust/ash/src/vk/bitflags.rs index 66699d721410..b3a5af920586 100644 --- a/third_party/rust/ash/src/vk/bitflags.rs +++ b/third_party/rust/ash/src/vk/bitflags.rs @@ -1280,13 +1280,23 @@ impl VideoEncodeCapabilityFlagsKHR { } #[repr(transparent)] #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[doc = ""] +pub struct VideoEncodeFeedbackFlagsKHR(pub(crate) Flags); +vk_bitflags_wrapped!(VideoEncodeFeedbackFlagsKHR, Flags); +impl VideoEncodeFeedbackFlagsKHR { + pub const BITSTREAM_BUFFER_OFFSET: Self = Self(0b1); + pub const BITSTREAM_BYTES_WRITTEN: Self = Self(0b10); +} +#[repr(transparent)] +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] #[doc = ""] pub struct VideoEncodeRateControlModeFlagsKHR(pub(crate) Flags); vk_bitflags_wrapped!(VideoEncodeRateControlModeFlagsKHR, Flags); impl VideoEncodeRateControlModeFlagsKHR { - pub const NONE: Self = Self(0); - pub const CBR: Self = Self(1); - pub const VBR: Self = Self(2); + pub const DEFAULT: Self = Self(0); + pub const DISABLED: Self = Self(0b1); + pub const CBR: Self = Self(0b10); + pub const VBR: Self = Self(0b100); } #[repr(transparent)] #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] @@ -1319,26 +1329,7 @@ impl VideoEncodeH264CapabilityFlagsEXT { pub const ROW_UNALIGNED_SLICE: Self = Self(0b100_0000_0000_0000_0000_0000); pub const DIFFERENT_SLICE_TYPE: Self = Self(0b1000_0000_0000_0000_0000_0000); pub const B_FRAME_IN_L1_LIST: Self = Self(0b1_0000_0000_0000_0000_0000_0000); -} -#[repr(transparent)] -#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] -#[doc = ""] -pub struct VideoEncodeH264InputModeFlagsEXT(pub(crate) Flags); -vk_bitflags_wrapped!(VideoEncodeH264InputModeFlagsEXT, Flags); -impl VideoEncodeH264InputModeFlagsEXT { - pub const FRAME: Self = Self(0b1); - pub const SLICE: Self = Self(0b10); - pub const NON_VCL: Self = Self(0b100); -} -#[repr(transparent)] -#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] -#[doc = ""] -pub struct VideoEncodeH264OutputModeFlagsEXT(pub(crate) Flags); -vk_bitflags_wrapped!(VideoEncodeH264OutputModeFlagsEXT, Flags); -impl VideoEncodeH264OutputModeFlagsEXT { - pub const FRAME: Self = Self(0b1); - pub const SLICE: Self = Self(0b10); - pub const NON_VCL: Self = Self(0b100); + pub const DIFFERENT_REFERENCE_FINAL_LISTS: Self = Self(0b10_0000_0000_0000_0000_0000_0000); } #[repr(transparent)] #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] @@ -1474,26 +1465,7 @@ impl VideoEncodeH265CapabilityFlagsEXT { pub const DEPENDENT_SLICE_SEGMENT: Self = Self(0b1000_0000_0000_0000_0000_0000); pub const DIFFERENT_SLICE_TYPE: Self = Self(0b1_0000_0000_0000_0000_0000_0000); pub const B_FRAME_IN_L1_LIST: Self = Self(0b10_0000_0000_0000_0000_0000_0000); -} -#[repr(transparent)] -#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] -#[doc = ""] -pub struct VideoEncodeH265InputModeFlagsEXT(pub(crate) Flags); -vk_bitflags_wrapped!(VideoEncodeH265InputModeFlagsEXT, Flags); -impl VideoEncodeH265InputModeFlagsEXT { - pub const FRAME: Self = Self(0b1); - pub const SLICE_SEGMENT: Self = Self(0b10); - pub const NON_VCL: Self = Self(0b100); -} -#[repr(transparent)] -#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] -#[doc = ""] -pub struct VideoEncodeH265OutputModeFlagsEXT(pub(crate) Flags); -vk_bitflags_wrapped!(VideoEncodeH265OutputModeFlagsEXT, Flags); -impl VideoEncodeH265OutputModeFlagsEXT { - pub const FRAME: Self = Self(0b1); - pub const SLICE_SEGMENT: Self = Self(0b10); - pub const NON_VCL: Self = Self(0b100); + pub const DIFFERENT_REFERENCE_FINAL_LISTS: Self = Self(0b100_0000_0000_0000_0000_0000_0000); } #[repr(transparent)] #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] @@ -1641,3 +1613,11 @@ vk_bitflags_wrapped!(MicromapCreateFlagsEXT, Flags); impl MicromapCreateFlagsEXT { pub const DEVICE_ADDRESS_CAPTURE_REPLAY: Self = Self(0b1); } +#[repr(transparent)] +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[doc = ""] +pub struct ShaderCreateFlagsEXT(pub(crate) Flags); +vk_bitflags_wrapped!(ShaderCreateFlagsEXT, Flags); +impl ShaderCreateFlagsEXT { + pub const LINK_STAGE: Self = Self(0b1); +} diff --git a/third_party/rust/ash/src/vk/const_debugs.rs b/third_party/rust/ash/src/vk/const_debugs.rs index 6508306e3020..04ba6415d336 100644 --- a/third_party/rust/ash/src/vk/const_debugs.rs +++ b/third_party/rust/ash/src/vk/const_debugs.rs @@ -318,11 +318,15 @@ impl fmt::Debug for AccessFlags2 { ), (AccessFlags2::MICROMAP_READ_EXT.0, "MICROMAP_READ_EXT"), (AccessFlags2::MICROMAP_WRITE_EXT.0, "MICROMAP_WRITE_EXT"), + (AccessFlags2::RESERVED_49_ARM.0, "RESERVED_49_ARM"), + (AccessFlags2::RESERVED_50_ARM.0, "RESERVED_50_ARM"), (AccessFlags2::OPTICAL_FLOW_READ_NV.0, "OPTICAL_FLOW_READ_NV"), ( AccessFlags2::OPTICAL_FLOW_WRITE_NV.0, "OPTICAL_FLOW_WRITE_NV", ), + (AccessFlags2::RESERVED_47_EXT.0, "RESERVED_47_EXT"), + (AccessFlags2::RESERVED_48_EXT.0, "RESERVED_48_EXT"), ]; debug_flags(f, KNOWN, self.0) } @@ -515,6 +519,7 @@ impl fmt::Debug for BufferCreateFlags { BufferCreateFlags::DESCRIPTOR_BUFFER_CAPTURE_REPLAY_EXT.0, "DESCRIPTOR_BUFFER_CAPTURE_REPLAY_EXT", ), + (BufferCreateFlags::RESERVED_6_KHR.0, "RESERVED_6_KHR"), (BufferCreateFlags::PROTECTED.0, "PROTECTED"), ( BufferCreateFlags::DEVICE_ADDRESS_CAPTURE_REPLAY.0, @@ -655,12 +660,12 @@ impl fmt::Debug for BuildAccelerationStructureFlagsKHR { "ALLOW_OPACITY_MICROMAP_DATA_UPDATE_EXT", ), ( - BuildAccelerationStructureFlagsKHR::RESERVED_9_NV.0, - "RESERVED_9_NV", + BuildAccelerationStructureFlagsKHR::ALLOW_DISPLACEMENT_MICROMAP_UPDATE_NV.0, + "ALLOW_DISPLACEMENT_MICROMAP_UPDATE_NV", ), ( - BuildAccelerationStructureFlagsKHR::RESERVED_10_NV.0, - "RESERVED_10_NV", + BuildAccelerationStructureFlagsKHR::ALLOW_DATA_ACCESS.0, + "ALLOW_DATA_ACCESS", ), ]; debug_flags(f, KNOWN, self.0) @@ -1204,6 +1209,10 @@ impl fmt::Debug for DescriptorSetLayoutCreateFlags { DescriptorSetLayoutCreateFlags::HOST_ONLY_POOL_EXT.0, "HOST_ONLY_POOL_EXT", ), + ( + DescriptorSetLayoutCreateFlags::RESERVED_6_EXT.0, + "RESERVED_6_EXT", + ), ( DescriptorSetLayoutCreateFlags::UPDATE_AFTER_BIND_POOL.0, "UPDATE_AFTER_BIND_POOL", @@ -1444,6 +1453,21 @@ impl fmt::Debug for DiscardRectangleModeEXT { } } } +impl fmt::Debug for DisplacementMicromapFormatNV { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let name = match *self { + Self::TYPE_64_TRIANGLES_64_BYTES => Some("TYPE_64_TRIANGLES_64_BYTES"), + Self::TYPE_256_TRIANGLES_128_BYTES => Some("TYPE_256_TRIANGLES_128_BYTES"), + Self::TYPE_1024_TRIANGLES_128_BYTES => Some("TYPE_1024_TRIANGLES_128_BYTES"), + _ => None, + }; + if let Some(x) = name { + f.write_str(x) + } else { + self.0.fmt(f) + } + } +} impl fmt::Debug for DisplayEventTypeEXT { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { let name = match *self { @@ -1525,6 +1549,7 @@ impl fmt::Debug for DriverId { Self::MESA_VENUS => Some("MESA_VENUS"), Self::MESA_DOZEN => Some("MESA_DOZEN"), Self::MESA_NVK => Some("MESA_NVK"), + Self::IMAGINATION_OPEN_SOURCE_MESA => Some("IMAGINATION_OPEN_SOURCE_MESA"), _ => None, }; if let Some(x) = name { @@ -1548,12 +1573,15 @@ impl fmt::Debug for DynamicState { Self::STENCIL_REFERENCE => Some("STENCIL_REFERENCE"), Self::VIEWPORT_W_SCALING_NV => Some("VIEWPORT_W_SCALING_NV"), Self::DISCARD_RECTANGLE_EXT => Some("DISCARD_RECTANGLE_EXT"), + Self::DISCARD_RECTANGLE_ENABLE_EXT => Some("DISCARD_RECTANGLE_ENABLE_EXT"), + Self::DISCARD_RECTANGLE_MODE_EXT => Some("DISCARD_RECTANGLE_MODE_EXT"), Self::SAMPLE_LOCATIONS_EXT => Some("SAMPLE_LOCATIONS_EXT"), Self::RAY_TRACING_PIPELINE_STACK_SIZE_KHR => { Some("RAY_TRACING_PIPELINE_STACK_SIZE_KHR") } Self::VIEWPORT_SHADING_RATE_PALETTE_NV => Some("VIEWPORT_SHADING_RATE_PALETTE_NV"), Self::VIEWPORT_COARSE_SAMPLE_ORDER_NV => Some("VIEWPORT_COARSE_SAMPLE_ORDER_NV"), + Self::EXCLUSIVE_SCISSOR_ENABLE_NV => Some("EXCLUSIVE_SCISSOR_ENABLE_NV"), Self::EXCLUSIVE_SCISSOR_NV => Some("EXCLUSIVE_SCISSOR_NV"), Self::FRAGMENT_SHADING_RATE_KHR => Some("FRAGMENT_SHADING_RATE_KHR"), Self::LINE_STIPPLE_EXT => Some("LINE_STIPPLE_EXT"), @@ -1600,6 +1628,9 @@ impl fmt::Debug for DynamicState { Some("REPRESENTATIVE_FRAGMENT_TEST_ENABLE_NV") } Self::COVERAGE_REDUCTION_MODE_NV => Some("COVERAGE_REDUCTION_MODE_NV"), + Self::ATTACHMENT_FEEDBACK_LOOP_ENABLE_EXT => { + Some("ATTACHMENT_FEEDBACK_LOOP_ENABLE_EXT") + } Self::CULL_MODE => Some("CULL_MODE"), Self::FRONT_FACE => Some("FRONT_FACE"), Self::PRIMITIVE_TOPOLOGY => Some("PRIMITIVE_TOPOLOGY"), @@ -1680,14 +1711,6 @@ impl fmt::Debug for ExternalFenceHandleTypeFlags { "OPAQUE_WIN32_KMT", ), (ExternalFenceHandleTypeFlags::SYNC_FD.0, "SYNC_FD"), - ( - ExternalFenceHandleTypeFlags::RESERVED_4_NV.0, - "RESERVED_4_NV", - ), - ( - ExternalFenceHandleTypeFlags::RESERVED_5_NV.0, - "RESERVED_5_NV", - ), ]; debug_flags(f, KNOWN, self.0) } @@ -1765,8 +1788,8 @@ impl fmt::Debug for ExternalMemoryHandleTypeFlags { "RDMA_ADDRESS_NV", ), ( - ExternalMemoryHandleTypeFlags::RESERVED_13_NV.0, - "RESERVED_13_NV", + ExternalMemoryHandleTypeFlags::TYPE_530_QNX.0, + "TYPE_530_QNX", ), ]; debug_flags(f, KNOWN, self.0) @@ -1825,10 +1848,6 @@ impl fmt::Debug for ExternalSemaphoreHandleTypeFlags { ExternalSemaphoreHandleTypeFlags::ZIRCON_EVENT_FUCHSIA.0, "ZIRCON_EVENT_FUCHSIA", ), - ( - ExternalSemaphoreHandleTypeFlags::RESERVED_5_NV.0, - "RESERVED_5_NV", - ), ]; debug_flags(f, KNOWN, self.0) } @@ -2190,7 +2209,7 @@ impl fmt::Debug for FormatFeatureFlags { } impl fmt::Debug for FormatFeatureFlags2 { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - const KNOWN : & [(Flags64 , & str)] = & [(FormatFeatureFlags2 :: SAMPLED_IMAGE . 0 , "SAMPLED_IMAGE") , (FormatFeatureFlags2 :: STORAGE_IMAGE . 0 , "STORAGE_IMAGE") , (FormatFeatureFlags2 :: STORAGE_IMAGE_ATOMIC . 0 , "STORAGE_IMAGE_ATOMIC") , (FormatFeatureFlags2 :: UNIFORM_TEXEL_BUFFER . 0 , "UNIFORM_TEXEL_BUFFER") , (FormatFeatureFlags2 :: STORAGE_TEXEL_BUFFER . 0 , "STORAGE_TEXEL_BUFFER") , (FormatFeatureFlags2 :: STORAGE_TEXEL_BUFFER_ATOMIC . 0 , "STORAGE_TEXEL_BUFFER_ATOMIC") , (FormatFeatureFlags2 :: VERTEX_BUFFER . 0 , "VERTEX_BUFFER") , (FormatFeatureFlags2 :: COLOR_ATTACHMENT . 0 , "COLOR_ATTACHMENT") , (FormatFeatureFlags2 :: COLOR_ATTACHMENT_BLEND . 0 , "COLOR_ATTACHMENT_BLEND") , (FormatFeatureFlags2 :: DEPTH_STENCIL_ATTACHMENT . 0 , "DEPTH_STENCIL_ATTACHMENT") , (FormatFeatureFlags2 :: BLIT_SRC . 0 , "BLIT_SRC") , (FormatFeatureFlags2 :: BLIT_DST . 0 , "BLIT_DST") , (FormatFeatureFlags2 :: SAMPLED_IMAGE_FILTER_LINEAR . 0 , "SAMPLED_IMAGE_FILTER_LINEAR") , (FormatFeatureFlags2 :: SAMPLED_IMAGE_FILTER_CUBIC . 0 , "SAMPLED_IMAGE_FILTER_CUBIC") , (FormatFeatureFlags2 :: TRANSFER_SRC . 0 , "TRANSFER_SRC") , (FormatFeatureFlags2 :: TRANSFER_DST . 0 , "TRANSFER_DST") , (FormatFeatureFlags2 :: SAMPLED_IMAGE_FILTER_MINMAX . 0 , "SAMPLED_IMAGE_FILTER_MINMAX") , (FormatFeatureFlags2 :: MIDPOINT_CHROMA_SAMPLES . 0 , "MIDPOINT_CHROMA_SAMPLES") , (FormatFeatureFlags2 :: SAMPLED_IMAGE_YCBCR_CONVERSION_LINEAR_FILTER . 0 , "SAMPLED_IMAGE_YCBCR_CONVERSION_LINEAR_FILTER") , (FormatFeatureFlags2 :: SAMPLED_IMAGE_YCBCR_CONVERSION_SEPARATE_RECONSTRUCTION_FILTER . 0 , "SAMPLED_IMAGE_YCBCR_CONVERSION_SEPARATE_RECONSTRUCTION_FILTER") , (FormatFeatureFlags2 :: SAMPLED_IMAGE_YCBCR_CONVERSION_CHROMA_RECONSTRUCTION_EXPLICIT . 0 , "SAMPLED_IMAGE_YCBCR_CONVERSION_CHROMA_RECONSTRUCTION_EXPLICIT") , (FormatFeatureFlags2 :: SAMPLED_IMAGE_YCBCR_CONVERSION_CHROMA_RECONSTRUCTION_EXPLICIT_FORCEABLE . 0 , "SAMPLED_IMAGE_YCBCR_CONVERSION_CHROMA_RECONSTRUCTION_EXPLICIT_FORCEABLE") , (FormatFeatureFlags2 :: DISJOINT . 0 , "DISJOINT") , (FormatFeatureFlags2 :: COSITED_CHROMA_SAMPLES . 0 , "COSITED_CHROMA_SAMPLES") , (FormatFeatureFlags2 :: STORAGE_READ_WITHOUT_FORMAT . 0 , "STORAGE_READ_WITHOUT_FORMAT") , (FormatFeatureFlags2 :: STORAGE_WRITE_WITHOUT_FORMAT . 0 , "STORAGE_WRITE_WITHOUT_FORMAT") , (FormatFeatureFlags2 :: SAMPLED_IMAGE_DEPTH_COMPARISON . 0 , "SAMPLED_IMAGE_DEPTH_COMPARISON") , (FormatFeatureFlags2 :: VIDEO_DECODE_OUTPUT_KHR . 0 , "VIDEO_DECODE_OUTPUT_KHR") , (FormatFeatureFlags2 :: VIDEO_DECODE_DPB_KHR . 0 , "VIDEO_DECODE_DPB_KHR") , (FormatFeatureFlags2 :: ACCELERATION_STRUCTURE_VERTEX_BUFFER_KHR . 0 , "ACCELERATION_STRUCTURE_VERTEX_BUFFER_KHR") , (FormatFeatureFlags2 :: FRAGMENT_DENSITY_MAP_EXT . 0 , "FRAGMENT_DENSITY_MAP_EXT") , (FormatFeatureFlags2 :: FRAGMENT_SHADING_RATE_ATTACHMENT_KHR . 0 , "FRAGMENT_SHADING_RATE_ATTACHMENT_KHR") , (FormatFeatureFlags2 :: RESERVED_44_EXT . 0 , "RESERVED_44_EXT") , (FormatFeatureFlags2 :: RESERVED_45_EXT . 0 , "RESERVED_45_EXT") , (FormatFeatureFlags2 :: VIDEO_ENCODE_INPUT_KHR . 0 , "VIDEO_ENCODE_INPUT_KHR") , (FormatFeatureFlags2 :: VIDEO_ENCODE_DPB_KHR . 0 , "VIDEO_ENCODE_DPB_KHR") , (FormatFeatureFlags2 :: LINEAR_COLOR_ATTACHMENT_NV . 0 , "LINEAR_COLOR_ATTACHMENT_NV") , (FormatFeatureFlags2 :: WEIGHT_IMAGE_QCOM . 0 , "WEIGHT_IMAGE_QCOM") , (FormatFeatureFlags2 :: WEIGHT_SAMPLED_IMAGE_QCOM . 0 , "WEIGHT_SAMPLED_IMAGE_QCOM") , (FormatFeatureFlags2 :: BLOCK_MATCHING_QCOM . 0 , "BLOCK_MATCHING_QCOM") , (FormatFeatureFlags2 :: BOX_FILTER_SAMPLED_QCOM . 0 , "BOX_FILTER_SAMPLED_QCOM") , (FormatFeatureFlags2 :: RESERVED_39_EXT . 0 , "RESERVED_39_EXT") , (FormatFeatureFlags2 :: OPTICAL_FLOW_IMAGE_NV . 0 , "OPTICAL_FLOW_IMAGE_NV") , (FormatFeatureFlags2 :: OPTICAL_FLOW_VECTOR_NV . 0 , "OPTICAL_FLOW_VECTOR_NV") , (FormatFeatureFlags2 :: OPTICAL_FLOW_COST_NV . 0 , "OPTICAL_FLOW_COST_NV")] ; + const KNOWN : & [(Flags64 , & str)] = & [(FormatFeatureFlags2 :: SAMPLED_IMAGE . 0 , "SAMPLED_IMAGE") , (FormatFeatureFlags2 :: STORAGE_IMAGE . 0 , "STORAGE_IMAGE") , (FormatFeatureFlags2 :: STORAGE_IMAGE_ATOMIC . 0 , "STORAGE_IMAGE_ATOMIC") , (FormatFeatureFlags2 :: UNIFORM_TEXEL_BUFFER . 0 , "UNIFORM_TEXEL_BUFFER") , (FormatFeatureFlags2 :: STORAGE_TEXEL_BUFFER . 0 , "STORAGE_TEXEL_BUFFER") , (FormatFeatureFlags2 :: STORAGE_TEXEL_BUFFER_ATOMIC . 0 , "STORAGE_TEXEL_BUFFER_ATOMIC") , (FormatFeatureFlags2 :: VERTEX_BUFFER . 0 , "VERTEX_BUFFER") , (FormatFeatureFlags2 :: COLOR_ATTACHMENT . 0 , "COLOR_ATTACHMENT") , (FormatFeatureFlags2 :: COLOR_ATTACHMENT_BLEND . 0 , "COLOR_ATTACHMENT_BLEND") , (FormatFeatureFlags2 :: DEPTH_STENCIL_ATTACHMENT . 0 , "DEPTH_STENCIL_ATTACHMENT") , (FormatFeatureFlags2 :: BLIT_SRC . 0 , "BLIT_SRC") , (FormatFeatureFlags2 :: BLIT_DST . 0 , "BLIT_DST") , (FormatFeatureFlags2 :: SAMPLED_IMAGE_FILTER_LINEAR . 0 , "SAMPLED_IMAGE_FILTER_LINEAR") , (FormatFeatureFlags2 :: SAMPLED_IMAGE_FILTER_CUBIC . 0 , "SAMPLED_IMAGE_FILTER_CUBIC") , (FormatFeatureFlags2 :: TRANSFER_SRC . 0 , "TRANSFER_SRC") , (FormatFeatureFlags2 :: TRANSFER_DST . 0 , "TRANSFER_DST") , (FormatFeatureFlags2 :: SAMPLED_IMAGE_FILTER_MINMAX . 0 , "SAMPLED_IMAGE_FILTER_MINMAX") , (FormatFeatureFlags2 :: MIDPOINT_CHROMA_SAMPLES . 0 , "MIDPOINT_CHROMA_SAMPLES") , (FormatFeatureFlags2 :: SAMPLED_IMAGE_YCBCR_CONVERSION_LINEAR_FILTER . 0 , "SAMPLED_IMAGE_YCBCR_CONVERSION_LINEAR_FILTER") , (FormatFeatureFlags2 :: SAMPLED_IMAGE_YCBCR_CONVERSION_SEPARATE_RECONSTRUCTION_FILTER . 0 , "SAMPLED_IMAGE_YCBCR_CONVERSION_SEPARATE_RECONSTRUCTION_FILTER") , (FormatFeatureFlags2 :: SAMPLED_IMAGE_YCBCR_CONVERSION_CHROMA_RECONSTRUCTION_EXPLICIT . 0 , "SAMPLED_IMAGE_YCBCR_CONVERSION_CHROMA_RECONSTRUCTION_EXPLICIT") , (FormatFeatureFlags2 :: SAMPLED_IMAGE_YCBCR_CONVERSION_CHROMA_RECONSTRUCTION_EXPLICIT_FORCEABLE . 0 , "SAMPLED_IMAGE_YCBCR_CONVERSION_CHROMA_RECONSTRUCTION_EXPLICIT_FORCEABLE") , (FormatFeatureFlags2 :: DISJOINT . 0 , "DISJOINT") , (FormatFeatureFlags2 :: COSITED_CHROMA_SAMPLES . 0 , "COSITED_CHROMA_SAMPLES") , (FormatFeatureFlags2 :: STORAGE_READ_WITHOUT_FORMAT . 0 , "STORAGE_READ_WITHOUT_FORMAT") , (FormatFeatureFlags2 :: STORAGE_WRITE_WITHOUT_FORMAT . 0 , "STORAGE_WRITE_WITHOUT_FORMAT") , (FormatFeatureFlags2 :: SAMPLED_IMAGE_DEPTH_COMPARISON . 0 , "SAMPLED_IMAGE_DEPTH_COMPARISON") , (FormatFeatureFlags2 :: VIDEO_DECODE_OUTPUT_KHR . 0 , "VIDEO_DECODE_OUTPUT_KHR") , (FormatFeatureFlags2 :: VIDEO_DECODE_DPB_KHR . 0 , "VIDEO_DECODE_DPB_KHR") , (FormatFeatureFlags2 :: ACCELERATION_STRUCTURE_VERTEX_BUFFER_KHR . 0 , "ACCELERATION_STRUCTURE_VERTEX_BUFFER_KHR") , (FormatFeatureFlags2 :: FRAGMENT_DENSITY_MAP_EXT . 0 , "FRAGMENT_DENSITY_MAP_EXT") , (FormatFeatureFlags2 :: FRAGMENT_SHADING_RATE_ATTACHMENT_KHR . 0 , "FRAGMENT_SHADING_RATE_ATTACHMENT_KHR") , (FormatFeatureFlags2 :: RESERVED_44_EXT . 0 , "RESERVED_44_EXT") , (FormatFeatureFlags2 :: RESERVED_45_EXT . 0 , "RESERVED_45_EXT") , (FormatFeatureFlags2 :: RESERVED_46_EXT . 0 , "RESERVED_46_EXT") , (FormatFeatureFlags2 :: VIDEO_ENCODE_INPUT_KHR . 0 , "VIDEO_ENCODE_INPUT_KHR") , (FormatFeatureFlags2 :: VIDEO_ENCODE_DPB_KHR . 0 , "VIDEO_ENCODE_DPB_KHR") , (FormatFeatureFlags2 :: LINEAR_COLOR_ATTACHMENT_NV . 0 , "LINEAR_COLOR_ATTACHMENT_NV") , (FormatFeatureFlags2 :: WEIGHT_IMAGE_QCOM . 0 , "WEIGHT_IMAGE_QCOM") , (FormatFeatureFlags2 :: WEIGHT_SAMPLED_IMAGE_QCOM . 0 , "WEIGHT_SAMPLED_IMAGE_QCOM") , (FormatFeatureFlags2 :: BLOCK_MATCHING_QCOM . 0 , "BLOCK_MATCHING_QCOM") , (FormatFeatureFlags2 :: BOX_FILTER_SAMPLED_QCOM . 0 , "BOX_FILTER_SAMPLED_QCOM") , (FormatFeatureFlags2 :: RESERVED_39_EXT . 0 , "RESERVED_39_EXT") , (FormatFeatureFlags2 :: OPTICAL_FLOW_IMAGE_NV . 0 , "OPTICAL_FLOW_IMAGE_NV") , (FormatFeatureFlags2 :: OPTICAL_FLOW_VECTOR_NV . 0 , "OPTICAL_FLOW_VECTOR_NV") , (FormatFeatureFlags2 :: OPTICAL_FLOW_COST_NV . 0 , "OPTICAL_FLOW_COST_NV")] ; debug_flags(f, KNOWN, self.0) } } @@ -2546,6 +2565,7 @@ impl fmt::Debug for ImageCreateFlags { ImageCreateFlags::FRAGMENT_DENSITY_MAP_OFFSET_QCOM.0, "FRAGMENT_DENSITY_MAP_OFFSET_QCOM", ), + (ImageCreateFlags::RESERVED_20_KHR.0, "RESERVED_20_KHR"), (ImageCreateFlags::ALIAS.0, "ALIAS"), ( ImageCreateFlags::SPLIT_INSTANCE_BIND_REGIONS.0, @@ -2721,6 +2741,7 @@ impl fmt::Debug for ImageUsageFlags { ImageUsageFlags::SAMPLE_BLOCK_MATCH_QCOM.0, "SAMPLE_BLOCK_MATCH_QCOM", ), + (ImageUsageFlags::RESERVED_23_EXT.0, "RESERVED_23_EXT"), ]; debug_flags(f, KNOWN, self.0) } @@ -2740,7 +2761,6 @@ impl fmt::Debug for ImageViewCreateFlags { ImageViewCreateFlags::FRAGMENT_DENSITY_MAP_DEFERRED_EXT.0, "FRAGMENT_DENSITY_MAP_DEFERRED_EXT", ), - (ImageViewCreateFlags::RESERVED_3_EXT.0, "RESERVED_3_EXT"), ]; debug_flags(f, KNOWN, self.0) } @@ -2925,7 +2945,6 @@ impl fmt::Debug for MemoryHeapFlags { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { const KNOWN: &[(Flags, &str)] = &[ (MemoryHeapFlags::DEVICE_LOCAL.0, "DEVICE_LOCAL"), - (MemoryHeapFlags::RESERVED_2_KHR.0, "RESERVED_2_KHR"), (MemoryHeapFlags::MULTI_INSTANCE.0, "MULTI_INSTANCE"), ]; debug_flags(f, KNOWN, self.0) @@ -2974,6 +2993,12 @@ impl fmt::Debug for MemoryPropertyFlags { debug_flags(f, KNOWN, self.0) } } +impl fmt::Debug for MemoryUnmapFlagsKHR { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + const KNOWN: &[(Flags, &str)] = &[]; + debug_flags(f, KNOWN, self.0) + } +} impl fmt::Debug for MetalSurfaceCreateFlagsEXT { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { const KNOWN: &[(Flags, &str)] = &[]; @@ -2993,6 +3018,7 @@ impl fmt::Debug for MicromapTypeEXT { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { let name = match *self { Self::OPACITY_MICROMAP => Some("OPACITY_MICROMAP"), + Self::DISPLACEMENT_MICROMAP_NV => Some("DISPLACEMENT_MICROMAP_NV"), _ => None, }; if let Some(x) = name { @@ -3307,14 +3333,10 @@ impl fmt::Debug for PipelineBindPoint { } impl fmt::Debug for PipelineCacheCreateFlags { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - const KNOWN: &[(Flags, &str)] = &[ - (PipelineCacheCreateFlags::RESERVED_1_EXT.0, "RESERVED_1_EXT"), - (PipelineCacheCreateFlags::RESERVED_2_KHR.0, "RESERVED_2_KHR"), - ( - PipelineCacheCreateFlags::EXTERNALLY_SYNCHRONIZED.0, - "EXTERNALLY_SYNCHRONIZED", - ), - ]; + const KNOWN: &[(Flags, &str)] = &[( + PipelineCacheCreateFlags::EXTERNALLY_SYNCHRONIZED.0, + "EXTERNALLY_SYNCHRONIZED", + )]; debug_flags(f, KNOWN, self.0) } } @@ -3454,7 +3476,10 @@ impl fmt::Debug for PipelineCreateFlags { PipelineCreateFlags::RAY_TRACING_OPACITY_MICROMAP_EXT.0, "RAY_TRACING_OPACITY_MICROMAP_EXT", ), - (PipelineCreateFlags::RESERVED_28_NV.0, "RESERVED_28_NV"), + ( + PipelineCreateFlags::RAY_TRACING_DISPLACEMENT_MICROMAP_NV.0, + "RAY_TRACING_DISPLACEMENT_MICROMAP_NV", + ), ( PipelineCreateFlags::NO_PROTECTED_ACCESS_EXT.0, "NO_PROTECTED_ACCESS_EXT", @@ -3800,10 +3825,12 @@ impl fmt::Debug for PipelineStageFlags2 { "MICROMAP_BUILD_EXT", ), ( - PipelineStageFlags2::RESEVED_41_HUAWEI.0, - "RESEVED_41_HUAWEI", + PipelineStageFlags2::CLUSTER_CULLING_SHADER_HUAWEI.0, + "CLUSTER_CULLING_SHADER_HUAWEI", ), + (PipelineStageFlags2::RESERVED_43_ARM.0, "RESERVED_43_ARM"), (PipelineStageFlags2::OPTICAL_FLOW_NV.0, "OPTICAL_FLOW_NV"), + (PipelineStageFlags2::RESERVED_42_EXT.0, "RESERVED_42_EXT"), ]; debug_flags(f, KNOWN, self.0) } @@ -3928,7 +3955,8 @@ impl fmt::Debug for PrimitiveTopology { } impl fmt::Debug for PrivateDataSlotCreateFlags { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - const KNOWN: &[(Flags, &str)] = &[]; + const KNOWN: &[(Flags, &str)] = + &[(PrivateDataSlotCreateFlags::RESERVED_0_NV.0, "RESERVED_0_NV")]; debug_flags(f, KNOWN, self.0) } } @@ -4007,6 +4035,10 @@ impl fmt::Debug for QueryPipelineStatisticFlags { QueryPipelineStatisticFlags::MESH_SHADER_INVOCATIONS_EXT.0, "MESH_SHADER_INVOCATIONS_EXT", ), + ( + QueryPipelineStatisticFlags::CLUSTER_CULLING_SHADER_INVOCATIONS_HUAWEI.0, + "CLUSTER_CULLING_SHADER_INVOCATIONS_HUAWEI", + ), ]; debug_flags(f, KNOWN, self.0) } @@ -4076,9 +4108,7 @@ impl fmt::Debug for QueryType { Some("ACCELERATION_STRUCTURE_COMPACTED_SIZE_NV") } Self::PERFORMANCE_QUERY_INTEL => Some("PERFORMANCE_QUERY_INTEL"), - Self::VIDEO_ENCODESTREAM_BUFFER_RANGE_KHR => { - Some("VIDEO_ENCODESTREAM_BUFFER_RANGE_KHR") - } + Self::VIDEO_ENCODE_FEEDBACK_KHR => Some("VIDEO_ENCODE_FEEDBACK_KHR"), Self::MESH_PRIMITIVES_GENERATED_EXT => Some("MESH_PRIMITIVES_GENERATED_EXT"), Self::PRIMITIVES_GENERATED_EXT => Some("PRIMITIVES_GENERATED_EXT"), Self::ACCELERATION_STRUCTURE_SERIALIZATION_BOTTOM_LEVEL_POINTERS_KHR => { @@ -4107,7 +4137,9 @@ impl fmt::Debug for QueueFlags { (QueueFlags::RESERVED_9_EXT.0, "RESERVED_9_EXT"), (QueueFlags::VIDEO_ENCODE_KHR.0, "VIDEO_ENCODE_KHR"), (QueueFlags::RESERVED_7_QCOM.0, "RESERVED_7_QCOM"), + (QueueFlags::RESERVED_11_ARM.0, "RESERVED_11_ARM"), (QueueFlags::OPTICAL_FLOW_NV.0, "OPTICAL_FLOW_NV"), + (QueueFlags::RESERVED_10_EXT.0, "RESERVED_10_EXT"), (QueueFlags::PROTECTED.0, "PROTECTED"), ]; debug_flags(f, KNOWN, self.0) @@ -4206,6 +4238,10 @@ impl fmt::Debug for ResolveModeFlags { (ResolveModeFlags::AVERAGE.0, "AVERAGE"), (ResolveModeFlags::MIN.0, "MIN"), (ResolveModeFlags::MAX.0, "MAX"), + ( + ResolveModeFlags::EXTENSION_469_FLAG_0.0, + "EXTENSION_469_FLAG_0", + ), ]; debug_flags(f, KNOWN, self.0) } @@ -4379,12 +4415,52 @@ impl fmt::Debug for SemaphoreWaitFlags { debug_flags(f, KNOWN, self.0) } } +impl fmt::Debug for ShaderCodeTypeEXT { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let name = match *self { + Self::BINARY => Some("BINARY"), + Self::SPIRV => Some("SPIRV"), + _ => None, + }; + if let Some(x) = name { + f.write_str(x) + } else { + self.0.fmt(f) + } + } +} impl fmt::Debug for ShaderCorePropertiesFlagsAMD { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { const KNOWN: &[(Flags, &str)] = &[]; debug_flags(f, KNOWN, self.0) } } +impl fmt::Debug for ShaderCreateFlagsEXT { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + const KNOWN: &[(Flags, &str)] = &[ + (ShaderCreateFlagsEXT::LINK_STAGE.0, "LINK_STAGE"), + ( + ShaderCreateFlagsEXT::ALLOW_VARYING_SUBGROUP_SIZE.0, + "ALLOW_VARYING_SUBGROUP_SIZE", + ), + ( + ShaderCreateFlagsEXT::REQUIRE_FULL_SUBGROUPS.0, + "REQUIRE_FULL_SUBGROUPS", + ), + (ShaderCreateFlagsEXT::NO_TASK_SHADER.0, "NO_TASK_SHADER"), + (ShaderCreateFlagsEXT::DISPATCH_BASE.0, "DISPATCH_BASE"), + ( + ShaderCreateFlagsEXT::FRAGMENT_SHADING_RATE_ATTACHMENT.0, + "FRAGMENT_SHADING_RATE_ATTACHMENT", + ), + ( + ShaderCreateFlagsEXT::FRAGMENT_DENSITY_MAP_ATTACHMENT.0, + "FRAGMENT_DENSITY_MAP_ATTACHMENT", + ), + ]; + debug_flags(f, KNOWN, self.0) + } +} impl fmt::Debug for ShaderFloatControlsIndependence { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { let name = match *self { @@ -4466,10 +4542,10 @@ impl fmt::Debug for ShaderStageFlags { ShaderStageFlags::SUBPASS_SHADING_HUAWEI.0, "SUBPASS_SHADING_HUAWEI", ), - (ShaderStageFlags::RESERVED_19_HUAWEI.0, "RESERVED_19_HUAWEI"), - (ShaderStageFlags::EXT_483_RESERVE_15.0, "EXT_483_RESERVE_15"), - (ShaderStageFlags::EXT_483_RESERVE_16.0, "EXT_483_RESERVE_16"), - (ShaderStageFlags::EXT_483_RESERVE_17.0, "EXT_483_RESERVE_17"), + ( + ShaderStageFlags::CLUSTER_CULLING_HUAWEI.0, + "CLUSTER_CULLING_HUAWEI", + ), ]; debug_flags(f, KNOWN, self.0) } @@ -4745,9 +4821,6 @@ impl fmt::Debug for StructureType { Self::VIDEO_ENCODE_H264_NALU_SLICE_INFO_EXT => { Some("VIDEO_ENCODE_H264_NALU_SLICE_INFO_EXT") } - Self::VIDEO_ENCODE_H264_EMIT_PICTURE_PARAMETERS_INFO_EXT => { - Some("VIDEO_ENCODE_H264_EMIT_PICTURE_PARAMETERS_INFO_EXT") - } Self::VIDEO_ENCODE_H264_PROFILE_INFO_EXT => Some("VIDEO_ENCODE_H264_PROFILE_INFO_EXT"), Self::VIDEO_ENCODE_H264_RATE_CONTROL_INFO_EXT => { Some("VIDEO_ENCODE_H264_RATE_CONTROL_INFO_EXT") @@ -4755,9 +4828,6 @@ impl fmt::Debug for StructureType { Self::VIDEO_ENCODE_H264_RATE_CONTROL_LAYER_INFO_EXT => { Some("VIDEO_ENCODE_H264_RATE_CONTROL_LAYER_INFO_EXT") } - Self::VIDEO_ENCODE_H264_REFERENCE_LISTS_INFO_EXT => { - Some("VIDEO_ENCODE_H264_REFERENCE_LISTS_INFO_EXT") - } Self::VIDEO_ENCODE_H265_CAPABILITIES_EXT => Some("VIDEO_ENCODE_H265_CAPABILITIES_EXT"), Self::VIDEO_ENCODE_H265_SESSION_PARAMETERS_CREATE_INFO_EXT => { Some("VIDEO_ENCODE_H265_SESSION_PARAMETERS_CREATE_INFO_EXT") @@ -4774,13 +4844,7 @@ impl fmt::Debug for StructureType { Self::VIDEO_ENCODE_H265_NALU_SLICE_SEGMENT_INFO_EXT => { Some("VIDEO_ENCODE_H265_NALU_SLICE_SEGMENT_INFO_EXT") } - Self::VIDEO_ENCODE_H265_EMIT_PICTURE_PARAMETERS_INFO_EXT => { - Some("VIDEO_ENCODE_H265_EMIT_PICTURE_PARAMETERS_INFO_EXT") - } Self::VIDEO_ENCODE_H265_PROFILE_INFO_EXT => Some("VIDEO_ENCODE_H265_PROFILE_INFO_EXT"), - Self::VIDEO_ENCODE_H265_REFERENCE_LISTS_INFO_EXT => { - Some("VIDEO_ENCODE_H265_REFERENCE_LISTS_INFO_EXT") - } Self::VIDEO_ENCODE_H265_RATE_CONTROL_INFO_EXT => { Some("VIDEO_ENCODE_H265_RATE_CONTROL_INFO_EXT") } @@ -5374,6 +5438,8 @@ impl fmt::Debug for StructureType { Self::PIPELINE_EXECUTABLE_INTERNAL_REPRESENTATION_KHR => { Some("PIPELINE_EXECUTABLE_INTERNAL_REPRESENTATION_KHR") } + Self::MEMORY_MAP_INFO_KHR => Some("MEMORY_MAP_INFO_KHR"), + Self::MEMORY_UNMAP_INFO_KHR => Some("MEMORY_UNMAP_INFO_KHR"), Self::PHYSICAL_DEVICE_SHADER_ATOMIC_FLOAT_2_FEATURES_EXT => { Some("PHYSICAL_DEVICE_SHADER_ATOMIC_FLOAT_2_FEATURES_EXT") } @@ -5476,6 +5542,9 @@ impl fmt::Debug for StructureType { } Self::VIDEO_ENCODE_CAPABILITIES_KHR => Some("VIDEO_ENCODE_CAPABILITIES_KHR"), Self::VIDEO_ENCODE_USAGE_INFO_KHR => Some("VIDEO_ENCODE_USAGE_INFO_KHR"), + Self::QUERY_POOL_VIDEO_ENCODE_FEEDBACK_CREATE_INFO_KHR => { + Some("QUERY_POOL_VIDEO_ENCODE_FEEDBACK_CREATE_INFO_KHR") + } Self::PHYSICAL_DEVICE_DIAGNOSTICS_CONFIG_FEATURES_NV => { Some("PHYSICAL_DEVICE_DIAGNOSTICS_CONFIG_FEATURES_NV") } @@ -5483,6 +5552,7 @@ impl fmt::Debug for StructureType { Some("DEVICE_DIAGNOSTICS_CONFIG_CREATE_INFO_NV") } Self::RESERVED_QCOM => Some("RESERVED_QCOM"), + Self::QUERY_LOW_LATENCY_SUPPORT_NV => Some("QUERY_LOW_LATENCY_SUPPORT_NV"), Self::EXPORT_METAL_OBJECT_CREATE_INFO_EXT => { Some("EXPORT_METAL_OBJECT_CREATE_INFO_EXT") } @@ -5736,6 +5806,12 @@ impl fmt::Debug for StructureType { Self::PHYSICAL_DEVICE_IMAGE_2D_VIEW_OF_3D_FEATURES_EXT => { Some("PHYSICAL_DEVICE_IMAGE_2D_VIEW_OF_3D_FEATURES_EXT") } + Self::PHYSICAL_DEVICE_SHADER_TILE_IMAGE_FEATURES_EXT => { + Some("PHYSICAL_DEVICE_SHADER_TILE_IMAGE_FEATURES_EXT") + } + Self::PHYSICAL_DEVICE_SHADER_TILE_IMAGE_PROPERTIES_EXT => { + Some("PHYSICAL_DEVICE_SHADER_TILE_IMAGE_PROPERTIES_EXT") + } Self::MICROMAP_BUILD_INFO_EXT => Some("MICROMAP_BUILD_INFO_EXT"), Self::MICROMAP_VERSION_INFO_EXT => Some("MICROMAP_VERSION_INFO_EXT"), Self::COPY_MICROMAP_INFO_EXT => Some("COPY_MICROMAP_INFO_EXT"), @@ -5752,6 +5828,21 @@ impl fmt::Debug for StructureType { Self::ACCELERATION_STRUCTURE_TRIANGLES_OPACITY_MICROMAP_EXT => { Some("ACCELERATION_STRUCTURE_TRIANGLES_OPACITY_MICROMAP_EXT") } + Self::PHYSICAL_DEVICE_DISPLACEMENT_MICROMAP_FEATURES_NV => { + Some("PHYSICAL_DEVICE_DISPLACEMENT_MICROMAP_FEATURES_NV") + } + Self::PHYSICAL_DEVICE_DISPLACEMENT_MICROMAP_PROPERTIES_NV => { + Some("PHYSICAL_DEVICE_DISPLACEMENT_MICROMAP_PROPERTIES_NV") + } + Self::ACCELERATION_STRUCTURE_TRIANGLES_DISPLACEMENT_MICROMAP_NV => { + Some("ACCELERATION_STRUCTURE_TRIANGLES_DISPLACEMENT_MICROMAP_NV") + } + Self::PHYSICAL_DEVICE_CLUSTER_CULLING_SHADER_FEATURES_HUAWEI => { + Some("PHYSICAL_DEVICE_CLUSTER_CULLING_SHADER_FEATURES_HUAWEI") + } + Self::PHYSICAL_DEVICE_CLUSTER_CULLING_SHADER_PROPERTIES_HUAWEI => { + Some("PHYSICAL_DEVICE_CLUSTER_CULLING_SHADER_PROPERTIES_HUAWEI") + } Self::PHYSICAL_DEVICE_BORDER_COLOR_SWIZZLE_FEATURES_EXT => { Some("PHYSICAL_DEVICE_BORDER_COLOR_SWIZZLE_FEATURES_EXT") } @@ -5761,6 +5852,13 @@ impl fmt::Debug for StructureType { Self::PHYSICAL_DEVICE_PAGEABLE_DEVICE_LOCAL_MEMORY_FEATURES_EXT => { Some("PHYSICAL_DEVICE_PAGEABLE_DEVICE_LOCAL_MEMORY_FEATURES_EXT") } + Self::PHYSICAL_DEVICE_SHADER_CORE_PROPERTIES_ARM => { + Some("PHYSICAL_DEVICE_SHADER_CORE_PROPERTIES_ARM") + } + Self::PHYSICAL_DEVICE_IMAGE_SLICED_VIEW_OF_3D_FEATURES_EXT => { + Some("PHYSICAL_DEVICE_IMAGE_SLICED_VIEW_OF_3D_FEATURES_EXT") + } + Self::IMAGE_VIEW_SLICED_CREATE_INFO_EXT => Some("IMAGE_VIEW_SLICED_CREATE_INFO_EXT"), Self::PHYSICAL_DEVICE_DESCRIPTOR_SET_HOST_MAPPING_FEATURES_VALVE => { Some("PHYSICAL_DEVICE_DESCRIPTOR_SET_HOST_MAPPING_FEATURES_VALVE") } @@ -5866,6 +5964,16 @@ impl fmt::Debug for StructureType { Self::PHYSICAL_DEVICE_PIPELINE_PROTECTED_ACCESS_FEATURES_EXT => { Some("PHYSICAL_DEVICE_PIPELINE_PROTECTED_ACCESS_FEATURES_EXT") } + Self::PHYSICAL_DEVICE_RAY_TRACING_POSITION_FETCH_FEATURES_KHR => { + Some("PHYSICAL_DEVICE_RAY_TRACING_POSITION_FETCH_FEATURES_KHR") + } + Self::PHYSICAL_DEVICE_SHADER_OBJECT_FEATURES_EXT => { + Some("PHYSICAL_DEVICE_SHADER_OBJECT_FEATURES_EXT") + } + Self::PHYSICAL_DEVICE_SHADER_OBJECT_PROPERTIES_EXT => { + Some("PHYSICAL_DEVICE_SHADER_OBJECT_PROPERTIES_EXT") + } + Self::SHADER_CREATE_INFO_EXT => Some("SHADER_CREATE_INFO_EXT"), Self::PHYSICAL_DEVICE_TILE_PROPERTIES_FEATURES_QCOM => { Some("PHYSICAL_DEVICE_TILE_PROPERTIES_FEATURES_QCOM") } @@ -5895,6 +6003,21 @@ impl fmt::Debug for StructureType { Self::PHYSICAL_DEVICE_SHADER_CORE_BUILTINS_PROPERTIES_ARM => { Some("PHYSICAL_DEVICE_SHADER_CORE_BUILTINS_PROPERTIES_ARM") } + Self::PHYSICAL_DEVICE_PIPELINE_LIBRARY_GROUP_HANDLES_FEATURES_EXT => { + Some("PHYSICAL_DEVICE_PIPELINE_LIBRARY_GROUP_HANDLES_FEATURES_EXT") + } + Self::PHYSICAL_DEVICE_DYNAMIC_RENDERING_UNUSED_ATTACHMENTS_FEATURES_EXT => { + Some("PHYSICAL_DEVICE_DYNAMIC_RENDERING_UNUSED_ATTACHMENTS_FEATURES_EXT") + } + Self::PHYSICAL_DEVICE_MULTIVIEW_PER_VIEW_RENDER_AREAS_FEATURES_QCOM => { + Some("PHYSICAL_DEVICE_MULTIVIEW_PER_VIEW_RENDER_AREAS_FEATURES_QCOM") + } + Self::MULTIVIEW_PER_VIEW_RENDER_AREAS_RENDER_PASS_BEGIN_INFO_QCOM => { + Some("MULTIVIEW_PER_VIEW_RENDER_AREAS_RENDER_PASS_BEGIN_INFO_QCOM") + } + Self::PHYSICAL_DEVICE_ATTACHMENT_FEEDBACK_LOOP_DYNAMIC_STATE_FEATURES_EXT => { + Some("PHYSICAL_DEVICE_ATTACHMENT_FEEDBACK_LOOP_DYNAMIC_STATE_FEATURES_EXT") + } Self::PHYSICAL_DEVICE_SUBGROUP_PROPERTIES => { Some("PHYSICAL_DEVICE_SUBGROUP_PROPERTIES") } @@ -6550,6 +6673,7 @@ impl fmt::Debug for VendorId { Self::CODEPLAY => Some("CODEPLAY"), Self::MESA => Some("MESA"), Self::POCL => Some("POCL"), + Self::MOBILEYE => Some("MOBILEYE"), _ => None, }; if let Some(x) = name { @@ -6728,6 +6852,21 @@ impl fmt::Debug for VideoEncodeContentFlagsKHR { debug_flags(f, KNOWN, self.0) } } +impl fmt::Debug for VideoEncodeFeedbackFlagsKHR { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + const KNOWN: &[(Flags, &str)] = &[ + ( + VideoEncodeFeedbackFlagsKHR::BITSTREAM_BUFFER_OFFSET.0, + "BITSTREAM_BUFFER_OFFSET", + ), + ( + VideoEncodeFeedbackFlagsKHR::BITSTREAM_BYTES_WRITTEN.0, + "BITSTREAM_BYTES_WRITTEN", + ), + ]; + debug_flags(f, KNOWN, self.0) + } +} impl fmt::Debug for VideoEncodeFlagsKHR { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { const KNOWN: &[(Flags, &str)] = &[]; @@ -6831,26 +6970,10 @@ impl fmt::Debug for VideoEncodeH264CapabilityFlagsEXT { VideoEncodeH264CapabilityFlagsEXT::B_FRAME_IN_L1_LIST.0, "B_FRAME_IN_L1_LIST", ), - ]; - debug_flags(f, KNOWN, self.0) - } -} -impl fmt::Debug for VideoEncodeH264InputModeFlagsEXT { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - const KNOWN: &[(Flags, &str)] = &[ - (VideoEncodeH264InputModeFlagsEXT::FRAME.0, "FRAME"), - (VideoEncodeH264InputModeFlagsEXT::SLICE.0, "SLICE"), - (VideoEncodeH264InputModeFlagsEXT::NON_VCL.0, "NON_VCL"), - ]; - debug_flags(f, KNOWN, self.0) - } -} -impl fmt::Debug for VideoEncodeH264OutputModeFlagsEXT { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - const KNOWN: &[(Flags, &str)] = &[ - (VideoEncodeH264OutputModeFlagsEXT::FRAME.0, "FRAME"), - (VideoEncodeH264OutputModeFlagsEXT::SLICE.0, "SLICE"), - (VideoEncodeH264OutputModeFlagsEXT::NON_VCL.0, "NON_VCL"), + ( + VideoEncodeH264CapabilityFlagsEXT::DIFFERENT_REFERENCE_FINAL_LISTS.0, + "DIFFERENT_REFERENCE_FINAL_LISTS", + ), ]; debug_flags(f, KNOWN, self.0) } @@ -6977,6 +7100,10 @@ impl fmt::Debug for VideoEncodeH265CapabilityFlagsEXT { VideoEncodeH265CapabilityFlagsEXT::B_FRAME_IN_L1_LIST.0, "B_FRAME_IN_L1_LIST", ), + ( + VideoEncodeH265CapabilityFlagsEXT::DIFFERENT_REFERENCE_FINAL_LISTS.0, + "DIFFERENT_REFERENCE_FINAL_LISTS", + ), ]; debug_flags(f, KNOWN, self.0) } @@ -6991,32 +7118,6 @@ impl fmt::Debug for VideoEncodeH265CtbSizeFlagsEXT { debug_flags(f, KNOWN, self.0) } } -impl fmt::Debug for VideoEncodeH265InputModeFlagsEXT { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - const KNOWN: &[(Flags, &str)] = &[ - (VideoEncodeH265InputModeFlagsEXT::FRAME.0, "FRAME"), - ( - VideoEncodeH265InputModeFlagsEXT::SLICE_SEGMENT.0, - "SLICE_SEGMENT", - ), - (VideoEncodeH265InputModeFlagsEXT::NON_VCL.0, "NON_VCL"), - ]; - debug_flags(f, KNOWN, self.0) - } -} -impl fmt::Debug for VideoEncodeH265OutputModeFlagsEXT { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - const KNOWN: &[(Flags, &str)] = &[ - (VideoEncodeH265OutputModeFlagsEXT::FRAME.0, "FRAME"), - ( - VideoEncodeH265OutputModeFlagsEXT::SLICE_SEGMENT.0, - "SLICE_SEGMENT", - ), - (VideoEncodeH265OutputModeFlagsEXT::NON_VCL.0, "NON_VCL"), - ]; - debug_flags(f, KNOWN, self.0) - } -} impl fmt::Debug for VideoEncodeH265RateControlStructureEXT { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { let name = match *self { @@ -7064,7 +7165,8 @@ impl fmt::Debug for VideoEncodeRateControlFlagsKHR { impl fmt::Debug for VideoEncodeRateControlModeFlagsKHR { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { const KNOWN: &[(Flags, &str)] = &[ - (VideoEncodeRateControlModeFlagsKHR::NONE.0, "NONE"), + (VideoEncodeRateControlModeFlagsKHR::DEFAULT.0, "DEFAULT"), + (VideoEncodeRateControlModeFlagsKHR::DISABLED.0, "DISABLED"), (VideoEncodeRateControlModeFlagsKHR::CBR.0, "CBR"), (VideoEncodeRateControlModeFlagsKHR::VBR.0, "VBR"), ]; diff --git a/third_party/rust/ash/src/vk/constants.rs b/third_party/rust/ash/src/vk/constants.rs index 78da55ec713a..384e28a20a83 100644 --- a/third_party/rust/ash/src/vk/constants.rs +++ b/third_party/rust/ash/src/vk/constants.rs @@ -10,6 +10,7 @@ pub const MAX_MEMORY_HEAPS: usize = 16; pub const LOD_CLAMP_NONE: f32 = 1000.00; pub const REMAINING_MIP_LEVELS: u32 = !0; pub const REMAINING_ARRAY_LAYERS: u32 = !0; +pub const REMAINING_3D_SLICES_EXT: u32 = !0; pub const WHOLE_SIZE: u64 = !0; pub const ATTACHMENT_UNUSED: u32 = !0; pub const TRUE: Bool32 = 1; diff --git a/third_party/rust/ash/src/vk/definitions.rs b/third_party/rust/ash/src/vk/definitions.rs index c73770b05e82..6961285b2204 100644 --- a/third_party/rust/ash/src/vk/definitions.rs +++ b/third_party/rust/ash/src/vk/definitions.rs @@ -56,7 +56,8 @@ pub const API_VERSION_1_1: u32 = make_api_version(0, 1, 1, 0); pub const API_VERSION_1_2: u32 = make_api_version(0, 1, 2, 0); #[doc = ""] pub const API_VERSION_1_3: u32 = make_api_version(0, 1, 3, 0); -pub const HEADER_VERSION: u32 = 238u32; +#[doc = ""] +pub const HEADER_VERSION: u32 = 251; #[doc = ""] pub const HEADER_VERSION_COMPLETE: u32 = make_api_version(0, 1, 3, HEADER_VERSION); #[doc = ""] @@ -326,6 +327,11 @@ vk_bitflags_wrapped!(VideoEncodeFlagsKHR, Flags); #[doc = ""] pub struct VideoEncodeRateControlFlagsKHR(pub(crate) Flags); vk_bitflags_wrapped!(VideoEncodeRateControlFlagsKHR, Flags); +#[repr(transparent)] +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[doc = ""] +pub struct MemoryUnmapFlagsKHR(pub(crate) Flags); +vk_bitflags_wrapped!(MemoryUnmapFlagsKHR, Flags); define_handle!( Instance, INSTANCE, @@ -426,6 +432,12 @@ handle_nondispatchable ! (CuModuleNVX , CU_MODULE_NVX , doc = "" +); handle_nondispatchable!( DisplayKHR, DISPLAY_KHR, @@ -1600,7 +1612,9 @@ pub struct DeviceCreateInfo { pub flags: DeviceCreateFlags, pub queue_create_info_count: u32, pub p_queue_create_infos: *const DeviceQueueCreateInfo, + #[deprecated = "functionality described by this member no longer operates"] pub enabled_layer_count: u32, + #[deprecated = "functionality described by this member no longer operates"] pub pp_enabled_layer_names: *const *const c_char, pub enabled_extension_count: u32, pub pp_enabled_extension_names: *const *const c_char, @@ -1609,6 +1623,7 @@ pub struct DeviceCreateInfo { impl ::std::default::Default for DeviceCreateInfo { #[inline] fn default() -> Self { + #[allow(deprecated)] Self { s_type: Self::STRUCTURE_TYPE, p_next: ::std::ptr::null(), @@ -1664,6 +1679,8 @@ impl<'a> DeviceCreateInfoBuilder<'a> { self } #[inline] + #[deprecated = "functionality described by this member no longer operates"] + #[allow(deprecated)] pub fn enabled_layer_names(mut self, enabled_layer_names: &'a [*const c_char]) -> Self { self.inner.enabled_layer_count = enabled_layer_names.len() as _; self.inner.pp_enabled_layer_names = enabled_layer_names.as_ptr(); @@ -10267,6 +10284,11 @@ impl<'a> FramebufferCreateInfoBuilder<'a> { self } #[inline] + pub fn attachment_count(mut self, attachment_count: u32) -> Self { + self.inner.attachment_count = attachment_count; + self + } + #[inline] pub fn attachments(mut self, attachments: &'a [ImageView]) -> Self { self.inner.attachment_count = attachments.len() as _; self.inner.p_attachments = attachments.as_ptr(); @@ -23884,6 +23906,74 @@ impl<'a> ImageViewUsageCreateInfoBuilder<'a> { #[repr(C)] #[cfg_attr(feature = "debug", derive(Debug))] #[derive(Copy, Clone)] +#[doc = ""] +pub struct ImageViewSlicedCreateInfoEXT { + pub s_type: StructureType, + pub p_next: *const c_void, + pub slice_offset: u32, + pub slice_count: u32, +} +impl ::std::default::Default for ImageViewSlicedCreateInfoEXT { + #[inline] + fn default() -> Self { + Self { + s_type: Self::STRUCTURE_TYPE, + p_next: ::std::ptr::null(), + slice_offset: u32::default(), + slice_count: u32::default(), + } + } +} +unsafe impl TaggedStructure for ImageViewSlicedCreateInfoEXT { + const STRUCTURE_TYPE: StructureType = StructureType::IMAGE_VIEW_SLICED_CREATE_INFO_EXT; +} +impl ImageViewSlicedCreateInfoEXT { + pub fn builder<'a>() -> ImageViewSlicedCreateInfoEXTBuilder<'a> { + ImageViewSlicedCreateInfoEXTBuilder { + inner: Self::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct ImageViewSlicedCreateInfoEXTBuilder<'a> { + inner: ImageViewSlicedCreateInfoEXT, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsImageViewCreateInfo for ImageViewSlicedCreateInfoEXTBuilder<'_> {} +unsafe impl ExtendsImageViewCreateInfo for ImageViewSlicedCreateInfoEXT {} +impl<'a> ::std::ops::Deref for ImageViewSlicedCreateInfoEXTBuilder<'a> { + type Target = ImageViewSlicedCreateInfoEXT; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for ImageViewSlicedCreateInfoEXTBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> ImageViewSlicedCreateInfoEXTBuilder<'a> { + #[inline] + pub fn slice_offset(mut self, slice_offset: u32) -> Self { + self.inner.slice_offset = slice_offset; + self + } + #[inline] + pub fn slice_count(mut self, slice_count: u32) -> Self { + self.inner.slice_count = slice_count; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> ImageViewSlicedCreateInfoEXT { + self.inner + } +} +#[repr(C)] +#[cfg_attr(feature = "debug", derive(Debug))] +#[derive(Copy, Clone)] #[doc = ""] pub struct PipelineTessellationDomainOriginStateCreateInfo { pub s_type: StructureType, @@ -44433,6 +44523,11 @@ unsafe impl ExtendsPipelineShaderStageCreateInfo for PipelineShaderStageRequiredSubgroupSizeCreateInfo { } +unsafe impl ExtendsShaderCreateInfoEXT + for PipelineShaderStageRequiredSubgroupSizeCreateInfoBuilder<'_> +{ +} +unsafe impl ExtendsShaderCreateInfoEXT for PipelineShaderStageRequiredSubgroupSizeCreateInfo {} impl<'a> ::std::ops::Deref for PipelineShaderStageRequiredSubgroupSizeCreateInfoBuilder<'a> { type Target = PipelineShaderStageRequiredSubgroupSizeCreateInfo; fn deref(&self) -> &Self::Target { @@ -44598,6 +44693,98 @@ impl<'a> PhysicalDeviceSubpassShadingPropertiesHUAWEIBuilder<'a> { #[repr(C)] #[cfg_attr(feature = "debug", derive(Debug))] #[derive(Copy, Clone)] +#[doc = ""] +pub struct PhysicalDeviceClusterCullingShaderPropertiesHUAWEI { + pub s_type: StructureType, + pub p_next: *mut c_void, + pub max_work_group_count: [u32; 3], + pub max_work_group_size: [u32; 3], + pub max_output_cluster_count: u32, + pub indirect_buffer_offset_alignment: DeviceSize, +} +impl ::std::default::Default for PhysicalDeviceClusterCullingShaderPropertiesHUAWEI { + #[inline] + fn default() -> Self { + Self { + s_type: Self::STRUCTURE_TYPE, + p_next: ::std::ptr::null_mut(), + max_work_group_count: unsafe { ::std::mem::zeroed() }, + max_work_group_size: unsafe { ::std::mem::zeroed() }, + max_output_cluster_count: u32::default(), + indirect_buffer_offset_alignment: DeviceSize::default(), + } + } +} +unsafe impl TaggedStructure for PhysicalDeviceClusterCullingShaderPropertiesHUAWEI { + const STRUCTURE_TYPE: StructureType = + StructureType::PHYSICAL_DEVICE_CLUSTER_CULLING_SHADER_PROPERTIES_HUAWEI; +} +impl PhysicalDeviceClusterCullingShaderPropertiesHUAWEI { + pub fn builder<'a>() -> PhysicalDeviceClusterCullingShaderPropertiesHUAWEIBuilder<'a> { + PhysicalDeviceClusterCullingShaderPropertiesHUAWEIBuilder { + inner: Self::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct PhysicalDeviceClusterCullingShaderPropertiesHUAWEIBuilder<'a> { + inner: PhysicalDeviceClusterCullingShaderPropertiesHUAWEI, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsPhysicalDeviceProperties2 + for PhysicalDeviceClusterCullingShaderPropertiesHUAWEIBuilder<'_> +{ +} +unsafe impl ExtendsPhysicalDeviceProperties2 + for PhysicalDeviceClusterCullingShaderPropertiesHUAWEI +{ +} +impl<'a> ::std::ops::Deref for PhysicalDeviceClusterCullingShaderPropertiesHUAWEIBuilder<'a> { + type Target = PhysicalDeviceClusterCullingShaderPropertiesHUAWEI; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for PhysicalDeviceClusterCullingShaderPropertiesHUAWEIBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> PhysicalDeviceClusterCullingShaderPropertiesHUAWEIBuilder<'a> { + #[inline] + pub fn max_work_group_count(mut self, max_work_group_count: [u32; 3]) -> Self { + self.inner.max_work_group_count = max_work_group_count; + self + } + #[inline] + pub fn max_work_group_size(mut self, max_work_group_size: [u32; 3]) -> Self { + self.inner.max_work_group_size = max_work_group_size; + self + } + #[inline] + pub fn max_output_cluster_count(mut self, max_output_cluster_count: u32) -> Self { + self.inner.max_output_cluster_count = max_output_cluster_count; + self + } + #[inline] + pub fn indirect_buffer_offset_alignment( + mut self, + indirect_buffer_offset_alignment: DeviceSize, + ) -> Self { + self.inner.indirect_buffer_offset_alignment = indirect_buffer_offset_alignment; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PhysicalDeviceClusterCullingShaderPropertiesHUAWEI { + self.inner + } +} +#[repr(C)] +#[cfg_attr(feature = "debug", derive(Debug))] +#[derive(Copy, Clone)] #[doc = ""] pub struct MemoryOpaqueCaptureAddressAllocateInfo { pub s_type: StructureType, @@ -51237,6 +51424,86 @@ impl<'a> PhysicalDeviceSubpassShadingFeaturesHUAWEIBuilder<'a> { #[repr(C)] #[cfg_attr(feature = "debug", derive(Debug))] #[derive(Copy, Clone)] +#[doc = ""] +pub struct PhysicalDeviceClusterCullingShaderFeaturesHUAWEI { + pub s_type: StructureType, + pub p_next: *mut c_void, + pub clusterculling_shader: Bool32, + pub multiview_cluster_culling_shader: Bool32, +} +impl ::std::default::Default for PhysicalDeviceClusterCullingShaderFeaturesHUAWEI { + #[inline] + fn default() -> Self { + Self { + s_type: Self::STRUCTURE_TYPE, + p_next: ::std::ptr::null_mut(), + clusterculling_shader: Bool32::default(), + multiview_cluster_culling_shader: Bool32::default(), + } + } +} +unsafe impl TaggedStructure for PhysicalDeviceClusterCullingShaderFeaturesHUAWEI { + const STRUCTURE_TYPE: StructureType = + StructureType::PHYSICAL_DEVICE_CLUSTER_CULLING_SHADER_FEATURES_HUAWEI; +} +impl PhysicalDeviceClusterCullingShaderFeaturesHUAWEI { + pub fn builder<'a>() -> PhysicalDeviceClusterCullingShaderFeaturesHUAWEIBuilder<'a> { + PhysicalDeviceClusterCullingShaderFeaturesHUAWEIBuilder { + inner: Self::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct PhysicalDeviceClusterCullingShaderFeaturesHUAWEIBuilder<'a> { + inner: PhysicalDeviceClusterCullingShaderFeaturesHUAWEI, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsPhysicalDeviceFeatures2 + for PhysicalDeviceClusterCullingShaderFeaturesHUAWEIBuilder<'_> +{ +} +unsafe impl ExtendsPhysicalDeviceFeatures2 for PhysicalDeviceClusterCullingShaderFeaturesHUAWEI {} +unsafe impl ExtendsDeviceCreateInfo + for PhysicalDeviceClusterCullingShaderFeaturesHUAWEIBuilder<'_> +{ +} +unsafe impl ExtendsDeviceCreateInfo for PhysicalDeviceClusterCullingShaderFeaturesHUAWEI {} +impl<'a> ::std::ops::Deref for PhysicalDeviceClusterCullingShaderFeaturesHUAWEIBuilder<'a> { + type Target = PhysicalDeviceClusterCullingShaderFeaturesHUAWEI; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for PhysicalDeviceClusterCullingShaderFeaturesHUAWEIBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> PhysicalDeviceClusterCullingShaderFeaturesHUAWEIBuilder<'a> { + #[inline] + pub fn clusterculling_shader(mut self, clusterculling_shader: bool) -> Self { + self.inner.clusterculling_shader = clusterculling_shader.into(); + self + } + #[inline] + pub fn multiview_cluster_culling_shader( + mut self, + multiview_cluster_culling_shader: bool, + ) -> Self { + self.inner.multiview_cluster_culling_shader = multiview_cluster_culling_shader.into(); + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PhysicalDeviceClusterCullingShaderFeaturesHUAWEI { + self.inner + } +} +#[repr(C)] +#[cfg_attr(feature = "debug", derive(Debug))] +#[derive(Copy, Clone)] #[doc = ""] pub struct BufferCopy2 { pub s_type: StructureType, @@ -53276,6 +53543,157 @@ impl<'a> PhysicalDeviceImage2DViewOf3DFeaturesEXTBuilder<'a> { #[repr(C)] #[cfg_attr(feature = "debug", derive(Debug))] #[derive(Copy, Clone)] +#[doc = ""] +pub struct PhysicalDeviceImageSlicedViewOf3DFeaturesEXT { + pub s_type: StructureType, + pub p_next: *mut c_void, + pub image_sliced_view_of3_d: Bool32, +} +impl ::std::default::Default for PhysicalDeviceImageSlicedViewOf3DFeaturesEXT { + #[inline] + fn default() -> Self { + Self { + s_type: Self::STRUCTURE_TYPE, + p_next: ::std::ptr::null_mut(), + image_sliced_view_of3_d: Bool32::default(), + } + } +} +unsafe impl TaggedStructure for PhysicalDeviceImageSlicedViewOf3DFeaturesEXT { + const STRUCTURE_TYPE: StructureType = + StructureType::PHYSICAL_DEVICE_IMAGE_SLICED_VIEW_OF_3D_FEATURES_EXT; +} +impl PhysicalDeviceImageSlicedViewOf3DFeaturesEXT { + pub fn builder<'a>() -> PhysicalDeviceImageSlicedViewOf3DFeaturesEXTBuilder<'a> { + PhysicalDeviceImageSlicedViewOf3DFeaturesEXTBuilder { + inner: Self::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct PhysicalDeviceImageSlicedViewOf3DFeaturesEXTBuilder<'a> { + inner: PhysicalDeviceImageSlicedViewOf3DFeaturesEXT, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsPhysicalDeviceFeatures2 + for PhysicalDeviceImageSlicedViewOf3DFeaturesEXTBuilder<'_> +{ +} +unsafe impl ExtendsPhysicalDeviceFeatures2 for PhysicalDeviceImageSlicedViewOf3DFeaturesEXT {} +unsafe impl ExtendsDeviceCreateInfo for PhysicalDeviceImageSlicedViewOf3DFeaturesEXTBuilder<'_> {} +unsafe impl ExtendsDeviceCreateInfo for PhysicalDeviceImageSlicedViewOf3DFeaturesEXT {} +impl<'a> ::std::ops::Deref for PhysicalDeviceImageSlicedViewOf3DFeaturesEXTBuilder<'a> { + type Target = PhysicalDeviceImageSlicedViewOf3DFeaturesEXT; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for PhysicalDeviceImageSlicedViewOf3DFeaturesEXTBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> PhysicalDeviceImageSlicedViewOf3DFeaturesEXTBuilder<'a> { + #[inline] + pub fn image_sliced_view_of3_d(mut self, image_sliced_view_of3_d: bool) -> Self { + self.inner.image_sliced_view_of3_d = image_sliced_view_of3_d.into(); + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PhysicalDeviceImageSlicedViewOf3DFeaturesEXT { + self.inner + } +} +#[repr(C)] +#[cfg_attr(feature = "debug", derive(Debug))] +#[derive(Copy, Clone)] +#[doc = ""] +pub struct PhysicalDeviceAttachmentFeedbackLoopDynamicStateFeaturesEXT { + pub s_type: StructureType, + pub p_next: *mut c_void, + pub attachment_feedback_loop_dynamic_state: Bool32, +} +impl ::std::default::Default for PhysicalDeviceAttachmentFeedbackLoopDynamicStateFeaturesEXT { + #[inline] + fn default() -> Self { + Self { + s_type: Self::STRUCTURE_TYPE, + p_next: ::std::ptr::null_mut(), + attachment_feedback_loop_dynamic_state: Bool32::default(), + } + } +} +unsafe impl TaggedStructure for PhysicalDeviceAttachmentFeedbackLoopDynamicStateFeaturesEXT { + const STRUCTURE_TYPE: StructureType = + StructureType::PHYSICAL_DEVICE_ATTACHMENT_FEEDBACK_LOOP_DYNAMIC_STATE_FEATURES_EXT; +} +impl PhysicalDeviceAttachmentFeedbackLoopDynamicStateFeaturesEXT { + pub fn builder<'a>() -> PhysicalDeviceAttachmentFeedbackLoopDynamicStateFeaturesEXTBuilder<'a> { + PhysicalDeviceAttachmentFeedbackLoopDynamicStateFeaturesEXTBuilder { + inner: Self::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct PhysicalDeviceAttachmentFeedbackLoopDynamicStateFeaturesEXTBuilder<'a> { + inner: PhysicalDeviceAttachmentFeedbackLoopDynamicStateFeaturesEXT, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsPhysicalDeviceFeatures2 + for PhysicalDeviceAttachmentFeedbackLoopDynamicStateFeaturesEXTBuilder<'_> +{ +} +unsafe impl ExtendsPhysicalDeviceFeatures2 + for PhysicalDeviceAttachmentFeedbackLoopDynamicStateFeaturesEXT +{ +} +unsafe impl ExtendsDeviceCreateInfo + for PhysicalDeviceAttachmentFeedbackLoopDynamicStateFeaturesEXTBuilder<'_> +{ +} +unsafe impl ExtendsDeviceCreateInfo + for PhysicalDeviceAttachmentFeedbackLoopDynamicStateFeaturesEXT +{ +} +impl<'a> ::std::ops::Deref + for PhysicalDeviceAttachmentFeedbackLoopDynamicStateFeaturesEXTBuilder<'a> +{ + type Target = PhysicalDeviceAttachmentFeedbackLoopDynamicStateFeaturesEXT; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut + for PhysicalDeviceAttachmentFeedbackLoopDynamicStateFeaturesEXTBuilder<'a> +{ + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> PhysicalDeviceAttachmentFeedbackLoopDynamicStateFeaturesEXTBuilder<'a> { + #[inline] + pub fn attachment_feedback_loop_dynamic_state( + mut self, + attachment_feedback_loop_dynamic_state: bool, + ) -> Self { + self.inner.attachment_feedback_loop_dynamic_state = + attachment_feedback_loop_dynamic_state.into(); + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PhysicalDeviceAttachmentFeedbackLoopDynamicStateFeaturesEXT { + self.inner + } +} +#[repr(C)] +#[cfg_attr(feature = "debug", derive(Debug))] +#[derive(Copy, Clone)] #[doc = ""] pub struct PhysicalDeviceMutableDescriptorTypeFeaturesEXT { pub s_type: StructureType, @@ -57267,7 +57685,7 @@ impl<'a> VideoDecodeH265SessionParametersCreateInfoKHRBuilder<'a> { pub struct VideoDecodeH265PictureInfoKHR { pub s_type: StructureType, pub p_next: *const c_void, - pub p_std_picture_info: *mut StdVideoDecodeH265PictureInfo, + pub p_std_picture_info: *const StdVideoDecodeH265PictureInfo, pub slice_segment_count: u32, pub p_slice_segment_offsets: *const u32, } @@ -57277,7 +57695,7 @@ impl ::std::default::Default for VideoDecodeH265PictureInfoKHR { Self { s_type: Self::STRUCTURE_TYPE, p_next: ::std::ptr::null(), - p_std_picture_info: ::std::ptr::null_mut(), + p_std_picture_info: ::std::ptr::null(), slice_segment_count: u32::default(), p_slice_segment_offsets: ::std::ptr::null(), } @@ -57314,10 +57732,7 @@ impl<'a> ::std::ops::DerefMut for VideoDecodeH265PictureInfoKHRBuilder<'a> { } impl<'a> VideoDecodeH265PictureInfoKHRBuilder<'a> { #[inline] - pub fn std_picture_info( - mut self, - std_picture_info: &'a mut StdVideoDecodeH265PictureInfo, - ) -> Self { + pub fn std_picture_info(mut self, std_picture_info: &'a StdVideoDecodeH265PictureInfo) -> Self { self.inner.p_std_picture_info = std_picture_info; self } @@ -57989,9 +58404,9 @@ pub struct VideoEncodeInfoKHR { pub p_next: *const c_void, pub flags: VideoEncodeFlagsKHR, pub quality_level: u32, - pub dst_bitstream_buffer: Buffer, - pub dst_bitstream_buffer_offset: DeviceSize, - pub dst_bitstream_buffer_max_range: DeviceSize, + pub dst_buffer: Buffer, + pub dst_buffer_offset: DeviceSize, + pub dst_buffer_range: DeviceSize, pub src_picture_resource: VideoPictureResourceInfoKHR, pub p_setup_reference_slot: *const VideoReferenceSlotInfoKHR, pub reference_slot_count: u32, @@ -58006,9 +58421,9 @@ impl ::std::default::Default for VideoEncodeInfoKHR { p_next: ::std::ptr::null(), flags: VideoEncodeFlagsKHR::default(), quality_level: u32::default(), - dst_bitstream_buffer: Buffer::default(), - dst_bitstream_buffer_offset: DeviceSize::default(), - dst_bitstream_buffer_max_range: DeviceSize::default(), + dst_buffer: Buffer::default(), + dst_buffer_offset: DeviceSize::default(), + dst_buffer_range: DeviceSize::default(), src_picture_resource: VideoPictureResourceInfoKHR::default(), p_setup_reference_slot: ::std::ptr::null(), reference_slot_count: u32::default(), @@ -58057,21 +58472,18 @@ impl<'a> VideoEncodeInfoKHRBuilder<'a> { self } #[inline] - pub fn dst_bitstream_buffer(mut self, dst_bitstream_buffer: Buffer) -> Self { - self.inner.dst_bitstream_buffer = dst_bitstream_buffer; + pub fn dst_buffer(mut self, dst_buffer: Buffer) -> Self { + self.inner.dst_buffer = dst_buffer; self } #[inline] - pub fn dst_bitstream_buffer_offset(mut self, dst_bitstream_buffer_offset: DeviceSize) -> Self { - self.inner.dst_bitstream_buffer_offset = dst_bitstream_buffer_offset; + pub fn dst_buffer_offset(mut self, dst_buffer_offset: DeviceSize) -> Self { + self.inner.dst_buffer_offset = dst_buffer_offset; self } #[inline] - pub fn dst_bitstream_buffer_max_range( - mut self, - dst_bitstream_buffer_max_range: DeviceSize, - ) -> Self { - self.inner.dst_bitstream_buffer_max_range = dst_bitstream_buffer_max_range; + pub fn dst_buffer_range(mut self, dst_buffer_range: DeviceSize) -> Self { + self.inner.dst_buffer_range = dst_buffer_range; self } #[inline] @@ -58128,14 +58540,79 @@ impl<'a> VideoEncodeInfoKHRBuilder<'a> { #[repr(C)] #[cfg_attr(feature = "debug", derive(Debug))] #[derive(Copy, Clone)] +#[doc = ""] +pub struct QueryPoolVideoEncodeFeedbackCreateInfoKHR { + pub s_type: StructureType, + pub p_next: *const c_void, + pub encode_feedback_flags: VideoEncodeFeedbackFlagsKHR, +} +impl ::std::default::Default for QueryPoolVideoEncodeFeedbackCreateInfoKHR { + #[inline] + fn default() -> Self { + Self { + s_type: Self::STRUCTURE_TYPE, + p_next: ::std::ptr::null(), + encode_feedback_flags: VideoEncodeFeedbackFlagsKHR::default(), + } + } +} +unsafe impl TaggedStructure for QueryPoolVideoEncodeFeedbackCreateInfoKHR { + const STRUCTURE_TYPE: StructureType = + StructureType::QUERY_POOL_VIDEO_ENCODE_FEEDBACK_CREATE_INFO_KHR; +} +impl QueryPoolVideoEncodeFeedbackCreateInfoKHR { + pub fn builder<'a>() -> QueryPoolVideoEncodeFeedbackCreateInfoKHRBuilder<'a> { + QueryPoolVideoEncodeFeedbackCreateInfoKHRBuilder { + inner: Self::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct QueryPoolVideoEncodeFeedbackCreateInfoKHRBuilder<'a> { + inner: QueryPoolVideoEncodeFeedbackCreateInfoKHR, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsQueryPoolCreateInfo for QueryPoolVideoEncodeFeedbackCreateInfoKHRBuilder<'_> {} +unsafe impl ExtendsQueryPoolCreateInfo for QueryPoolVideoEncodeFeedbackCreateInfoKHR {} +impl<'a> ::std::ops::Deref for QueryPoolVideoEncodeFeedbackCreateInfoKHRBuilder<'a> { + type Target = QueryPoolVideoEncodeFeedbackCreateInfoKHR; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for QueryPoolVideoEncodeFeedbackCreateInfoKHRBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> QueryPoolVideoEncodeFeedbackCreateInfoKHRBuilder<'a> { + #[inline] + pub fn encode_feedback_flags( + mut self, + encode_feedback_flags: VideoEncodeFeedbackFlagsKHR, + ) -> Self { + self.inner.encode_feedback_flags = encode_feedback_flags; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> QueryPoolVideoEncodeFeedbackCreateInfoKHR { + self.inner + } +} +#[repr(C)] +#[cfg_attr(feature = "debug", derive(Debug))] +#[derive(Copy, Clone)] #[doc = ""] pub struct VideoEncodeRateControlInfoKHR { pub s_type: StructureType, pub p_next: *const c_void, pub flags: VideoEncodeRateControlFlagsKHR, pub rate_control_mode: VideoEncodeRateControlModeFlagsKHR, - pub layer_count: u8, - pub p_layer_configs: *const VideoEncodeRateControlLayerInfoKHR, + pub layer_count: u32, + pub p_layers: *const VideoEncodeRateControlLayerInfoKHR, } impl ::std::default::Default for VideoEncodeRateControlInfoKHR { #[inline] @@ -58145,8 +58622,8 @@ impl ::std::default::Default for VideoEncodeRateControlInfoKHR { p_next: ::std::ptr::null(), flags: VideoEncodeRateControlFlagsKHR::default(), rate_control_mode: VideoEncodeRateControlModeFlagsKHR::default(), - layer_count: u8::default(), - p_layer_configs: ::std::ptr::null(), + layer_count: u32::default(), + p_layers: ::std::ptr::null(), } } } @@ -58194,12 +58671,9 @@ impl<'a> VideoEncodeRateControlInfoKHRBuilder<'a> { self } #[inline] - pub fn layer_configs( - mut self, - layer_configs: &'a [VideoEncodeRateControlLayerInfoKHR], - ) -> Self { - self.inner.layer_count = layer_configs.len() as _; - self.inner.p_layer_configs = layer_configs.as_ptr(); + pub fn layers(mut self, layers: &'a [VideoEncodeRateControlLayerInfoKHR]) -> Self { + self.inner.layer_count = layers.len() as _; + self.inner.p_layers = layers.as_ptr(); self } #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] @@ -58216,8 +58690,8 @@ impl<'a> VideoEncodeRateControlInfoKHRBuilder<'a> { pub struct VideoEncodeRateControlLayerInfoKHR { pub s_type: StructureType, pub p_next: *const c_void, - pub average_bitrate: u32, - pub max_bitrate: u32, + pub average_bitrate: u64, + pub max_bitrate: u64, pub frame_rate_numerator: u32, pub frame_rate_denominator: u32, pub virtual_buffer_size_in_ms: u32, @@ -58229,8 +58703,8 @@ impl ::std::default::Default for VideoEncodeRateControlLayerInfoKHR { Self { s_type: Self::STRUCTURE_TYPE, p_next: ::std::ptr::null(), - average_bitrate: u32::default(), - max_bitrate: u32::default(), + average_bitrate: u64::default(), + max_bitrate: u64::default(), frame_rate_numerator: u32::default(), frame_rate_denominator: u32::default(), virtual_buffer_size_in_ms: u32::default(), @@ -58270,12 +58744,12 @@ impl<'a> ::std::ops::DerefMut for VideoEncodeRateControlLayerInfoKHRBuilder<'a> } impl<'a> VideoEncodeRateControlLayerInfoKHRBuilder<'a> { #[inline] - pub fn average_bitrate(mut self, average_bitrate: u32) -> Self { + pub fn average_bitrate(mut self, average_bitrate: u64) -> Self { self.inner.average_bitrate = average_bitrate; self } #[inline] - pub fn max_bitrate(mut self, max_bitrate: u32) -> Self { + pub fn max_bitrate(mut self, max_bitrate: u64) -> Self { self.inner.max_bitrate = max_bitrate; self } @@ -58335,9 +58809,10 @@ pub struct VideoEncodeCapabilitiesKHR { pub p_next: *mut c_void, pub flags: VideoEncodeCapabilityFlagsKHR, pub rate_control_modes: VideoEncodeRateControlModeFlagsKHR, - pub rate_control_layer_count: u8, - pub quality_level_count: u8, + pub max_rate_control_layers: u32, + pub max_quality_levels: u32, pub input_image_data_fill_alignment: Extent2D, + pub supported_encode_feedback_flags: VideoEncodeFeedbackFlagsKHR, } impl ::std::default::Default for VideoEncodeCapabilitiesKHR { #[inline] @@ -58347,9 +58822,10 @@ impl ::std::default::Default for VideoEncodeCapabilitiesKHR { p_next: ::std::ptr::null_mut(), flags: VideoEncodeCapabilityFlagsKHR::default(), rate_control_modes: VideoEncodeRateControlModeFlagsKHR::default(), - rate_control_layer_count: u8::default(), - quality_level_count: u8::default(), + max_rate_control_layers: u32::default(), + max_quality_levels: u32::default(), input_image_data_fill_alignment: Extent2D::default(), + supported_encode_feedback_flags: VideoEncodeFeedbackFlagsKHR::default(), } } } @@ -58397,13 +58873,13 @@ impl<'a> VideoEncodeCapabilitiesKHRBuilder<'a> { self } #[inline] - pub fn rate_control_layer_count(mut self, rate_control_layer_count: u8) -> Self { - self.inner.rate_control_layer_count = rate_control_layer_count; + pub fn max_rate_control_layers(mut self, max_rate_control_layers: u32) -> Self { + self.inner.max_rate_control_layers = max_rate_control_layers; self } #[inline] - pub fn quality_level_count(mut self, quality_level_count: u8) -> Self { - self.inner.quality_level_count = quality_level_count; + pub fn max_quality_levels(mut self, max_quality_levels: u32) -> Self { + self.inner.max_quality_levels = max_quality_levels; self } #[inline] @@ -58414,6 +58890,14 @@ impl<'a> VideoEncodeCapabilitiesKHRBuilder<'a> { self.inner.input_image_data_fill_alignment = input_image_data_fill_alignment; self } + #[inline] + pub fn supported_encode_feedback_flags( + mut self, + supported_encode_feedback_flags: VideoEncodeFeedbackFlagsKHR, + ) -> Self { + self.inner.supported_encode_feedback_flags = supported_encode_feedback_flags; + self + } #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] #[doc = r" so references to builders can be passed directly to Vulkan functions."] @@ -58429,11 +58913,9 @@ pub struct VideoEncodeH264CapabilitiesEXT { pub s_type: StructureType, pub p_next: *mut c_void, pub flags: VideoEncodeH264CapabilityFlagsEXT, - pub input_mode_flags: VideoEncodeH264InputModeFlagsEXT, - pub output_mode_flags: VideoEncodeH264OutputModeFlagsEXT, - pub max_p_picture_l0_reference_count: u8, - pub max_b_picture_l0_reference_count: u8, - pub max_l1_reference_count: u8, + pub max_p_picture_l0_reference_count: u32, + pub max_b_picture_l0_reference_count: u32, + pub max_l1_reference_count: u32, pub motion_vectors_over_pic_boundaries_flag: Bool32, pub max_bytes_per_pic_denom: u32, pub max_bits_per_mb_denom: u32, @@ -58447,11 +58929,9 @@ impl ::std::default::Default for VideoEncodeH264CapabilitiesEXT { s_type: Self::STRUCTURE_TYPE, p_next: ::std::ptr::null_mut(), flags: VideoEncodeH264CapabilityFlagsEXT::default(), - input_mode_flags: VideoEncodeH264InputModeFlagsEXT::default(), - output_mode_flags: VideoEncodeH264OutputModeFlagsEXT::default(), - max_p_picture_l0_reference_count: u8::default(), - max_b_picture_l0_reference_count: u8::default(), - max_l1_reference_count: u8::default(), + max_p_picture_l0_reference_count: u32::default(), + max_b_picture_l0_reference_count: u32::default(), + max_l1_reference_count: u32::default(), motion_vectors_over_pic_boundaries_flag: Bool32::default(), max_bytes_per_pic_denom: u32::default(), max_bits_per_mb_denom: u32::default(), @@ -58496,22 +58976,9 @@ impl<'a> VideoEncodeH264CapabilitiesEXTBuilder<'a> { self } #[inline] - pub fn input_mode_flags(mut self, input_mode_flags: VideoEncodeH264InputModeFlagsEXT) -> Self { - self.inner.input_mode_flags = input_mode_flags; - self - } - #[inline] - pub fn output_mode_flags( - mut self, - output_mode_flags: VideoEncodeH264OutputModeFlagsEXT, - ) -> Self { - self.inner.output_mode_flags = output_mode_flags; - self - } - #[inline] pub fn max_p_picture_l0_reference_count( mut self, - max_p_picture_l0_reference_count: u8, + max_p_picture_l0_reference_count: u32, ) -> Self { self.inner.max_p_picture_l0_reference_count = max_p_picture_l0_reference_count; self @@ -58519,13 +58986,13 @@ impl<'a> VideoEncodeH264CapabilitiesEXTBuilder<'a> { #[inline] pub fn max_b_picture_l0_reference_count( mut self, - max_b_picture_l0_reference_count: u8, + max_b_picture_l0_reference_count: u32, ) -> Self { self.inner.max_b_picture_l0_reference_count = max_b_picture_l0_reference_count; self } #[inline] - pub fn max_l1_reference_count(mut self, max_l1_reference_count: u8) -> Self { + pub fn max_l1_reference_count(mut self, max_l1_reference_count: u32) -> Self { self.inner.max_l1_reference_count = max_l1_reference_count; self } @@ -58738,7 +59205,6 @@ impl<'a> VideoEncodeH264SessionParametersCreateInfoEXTBuilder<'a> { pub struct VideoEncodeH264DpbSlotInfoEXT { pub s_type: StructureType, pub p_next: *const c_void, - pub slot_index: i8, pub p_std_reference_info: *const StdVideoEncodeH264ReferenceInfo, } impl ::std::default::Default for VideoEncodeH264DpbSlotInfoEXT { @@ -58747,7 +59213,6 @@ impl ::std::default::Default for VideoEncodeH264DpbSlotInfoEXT { Self { s_type: Self::STRUCTURE_TYPE, p_next: ::std::ptr::null(), - slot_index: i8::default(), p_std_reference_info: ::std::ptr::null(), } } @@ -58768,6 +59233,8 @@ pub struct VideoEncodeH264DpbSlotInfoEXTBuilder<'a> { inner: VideoEncodeH264DpbSlotInfoEXT, marker: ::std::marker::PhantomData<&'a ()>, } +unsafe impl ExtendsVideoReferenceSlotInfoKHR for VideoEncodeH264DpbSlotInfoEXTBuilder<'_> {} +unsafe impl ExtendsVideoReferenceSlotInfoKHR for VideoEncodeH264DpbSlotInfoEXT {} impl<'a> ::std::ops::Deref for VideoEncodeH264DpbSlotInfoEXTBuilder<'a> { type Target = VideoEncodeH264DpbSlotInfoEXT; fn deref(&self) -> &Self::Target { @@ -58780,11 +59247,6 @@ impl<'a> ::std::ops::DerefMut for VideoEncodeH264DpbSlotInfoEXTBuilder<'a> { } } impl<'a> VideoEncodeH264DpbSlotInfoEXTBuilder<'a> { - #[inline] - pub fn slot_index(mut self, slot_index: i8) -> Self { - self.inner.slot_index = slot_index; - self - } #[inline] pub fn std_reference_info( mut self, @@ -58807,10 +59269,10 @@ impl<'a> VideoEncodeH264DpbSlotInfoEXTBuilder<'a> { pub struct VideoEncodeH264VclFrameInfoEXT { pub s_type: StructureType, pub p_next: *const c_void, - pub p_reference_final_lists: *const VideoEncodeH264ReferenceListsInfoEXT, + pub p_std_reference_final_lists: *const StdVideoEncodeH264ReferenceListsInfo, pub nalu_slice_entry_count: u32, pub p_nalu_slice_entries: *const VideoEncodeH264NaluSliceInfoEXT, - pub p_current_picture_info: *const StdVideoEncodeH264PictureInfo, + pub p_std_picture_info: *const StdVideoEncodeH264PictureInfo, } impl ::std::default::Default for VideoEncodeH264VclFrameInfoEXT { #[inline] @@ -58818,10 +59280,10 @@ impl ::std::default::Default for VideoEncodeH264VclFrameInfoEXT { Self { s_type: Self::STRUCTURE_TYPE, p_next: ::std::ptr::null(), - p_reference_final_lists: ::std::ptr::null(), + p_std_reference_final_lists: ::std::ptr::null(), nalu_slice_entry_count: u32::default(), p_nalu_slice_entries: ::std::ptr::null(), - p_current_picture_info: ::std::ptr::null(), + p_std_picture_info: ::std::ptr::null(), } } } @@ -58856,11 +59318,11 @@ impl<'a> ::std::ops::DerefMut for VideoEncodeH264VclFrameInfoEXTBuilder<'a> { } impl<'a> VideoEncodeH264VclFrameInfoEXTBuilder<'a> { #[inline] - pub fn reference_final_lists( + pub fn std_reference_final_lists( mut self, - reference_final_lists: &'a VideoEncodeH264ReferenceListsInfoEXT, + std_reference_final_lists: &'a StdVideoEncodeH264ReferenceListsInfo, ) -> Self { - self.inner.p_reference_final_lists = reference_final_lists; + self.inner.p_std_reference_final_lists = std_reference_final_lists; self } #[inline] @@ -58873,11 +59335,8 @@ impl<'a> VideoEncodeH264VclFrameInfoEXTBuilder<'a> { self } #[inline] - pub fn current_picture_info( - mut self, - current_picture_info: &'a StdVideoEncodeH264PictureInfo, - ) -> Self { - self.inner.p_current_picture_info = current_picture_info; + pub fn std_picture_info(mut self, std_picture_info: &'a StdVideoEncodeH264PictureInfo) -> Self { + self.inner.p_std_picture_info = std_picture_info; self } #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] @@ -58890,173 +59349,6 @@ impl<'a> VideoEncodeH264VclFrameInfoEXTBuilder<'a> { #[repr(C)] #[cfg_attr(feature = "debug", derive(Debug))] #[derive(Copy, Clone)] -#[doc = ""] -pub struct VideoEncodeH264ReferenceListsInfoEXT { - pub s_type: StructureType, - pub p_next: *const c_void, - pub reference_list0_entry_count: u8, - pub p_reference_list0_entries: *const VideoEncodeH264DpbSlotInfoEXT, - pub reference_list1_entry_count: u8, - pub p_reference_list1_entries: *const VideoEncodeH264DpbSlotInfoEXT, - pub p_mem_mgmt_ctrl_operations: *const StdVideoEncodeH264RefMemMgmtCtrlOperations, -} -impl ::std::default::Default for VideoEncodeH264ReferenceListsInfoEXT { - #[inline] - fn default() -> Self { - Self { - s_type: Self::STRUCTURE_TYPE, - p_next: ::std::ptr::null(), - reference_list0_entry_count: u8::default(), - p_reference_list0_entries: ::std::ptr::null(), - reference_list1_entry_count: u8::default(), - p_reference_list1_entries: ::std::ptr::null(), - p_mem_mgmt_ctrl_operations: ::std::ptr::null(), - } - } -} -unsafe impl TaggedStructure for VideoEncodeH264ReferenceListsInfoEXT { - const STRUCTURE_TYPE: StructureType = StructureType::VIDEO_ENCODE_H264_REFERENCE_LISTS_INFO_EXT; -} -impl VideoEncodeH264ReferenceListsInfoEXT { - pub fn builder<'a>() -> VideoEncodeH264ReferenceListsInfoEXTBuilder<'a> { - VideoEncodeH264ReferenceListsInfoEXTBuilder { - inner: Self::default(), - marker: ::std::marker::PhantomData, - } - } -} -#[repr(transparent)] -pub struct VideoEncodeH264ReferenceListsInfoEXTBuilder<'a> { - inner: VideoEncodeH264ReferenceListsInfoEXT, - marker: ::std::marker::PhantomData<&'a ()>, -} -impl<'a> ::std::ops::Deref for VideoEncodeH264ReferenceListsInfoEXTBuilder<'a> { - type Target = VideoEncodeH264ReferenceListsInfoEXT; - fn deref(&self) -> &Self::Target { - &self.inner - } -} -impl<'a> ::std::ops::DerefMut for VideoEncodeH264ReferenceListsInfoEXTBuilder<'a> { - fn deref_mut(&mut self) -> &mut Self::Target { - &mut self.inner - } -} -impl<'a> VideoEncodeH264ReferenceListsInfoEXTBuilder<'a> { - #[inline] - pub fn reference_list0_entries( - mut self, - reference_list0_entries: &'a [VideoEncodeH264DpbSlotInfoEXT], - ) -> Self { - self.inner.reference_list0_entry_count = reference_list0_entries.len() as _; - self.inner.p_reference_list0_entries = reference_list0_entries.as_ptr(); - self - } - #[inline] - pub fn reference_list1_entries( - mut self, - reference_list1_entries: &'a [VideoEncodeH264DpbSlotInfoEXT], - ) -> Self { - self.inner.reference_list1_entry_count = reference_list1_entries.len() as _; - self.inner.p_reference_list1_entries = reference_list1_entries.as_ptr(); - self - } - #[inline] - pub fn mem_mgmt_ctrl_operations( - mut self, - mem_mgmt_ctrl_operations: &'a StdVideoEncodeH264RefMemMgmtCtrlOperations, - ) -> Self { - self.inner.p_mem_mgmt_ctrl_operations = mem_mgmt_ctrl_operations; - self - } - #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] - #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] - #[doc = r" so references to builders can be passed directly to Vulkan functions."] - pub fn build(self) -> VideoEncodeH264ReferenceListsInfoEXT { - self.inner - } -} -#[repr(C)] -#[cfg_attr(feature = "debug", derive(Debug))] -#[derive(Copy, Clone)] -#[doc = ""] -pub struct VideoEncodeH264EmitPictureParametersInfoEXT { - pub s_type: StructureType, - pub p_next: *const c_void, - pub sps_id: u8, - pub emit_sps_enable: Bool32, - pub pps_id_entry_count: u32, - pub pps_id_entries: *const u8, -} -impl ::std::default::Default for VideoEncodeH264EmitPictureParametersInfoEXT { - #[inline] - fn default() -> Self { - Self { - s_type: Self::STRUCTURE_TYPE, - p_next: ::std::ptr::null(), - sps_id: u8::default(), - emit_sps_enable: Bool32::default(), - pps_id_entry_count: u32::default(), - pps_id_entries: ::std::ptr::null(), - } - } -} -unsafe impl TaggedStructure for VideoEncodeH264EmitPictureParametersInfoEXT { - const STRUCTURE_TYPE: StructureType = - StructureType::VIDEO_ENCODE_H264_EMIT_PICTURE_PARAMETERS_INFO_EXT; -} -impl VideoEncodeH264EmitPictureParametersInfoEXT { - pub fn builder<'a>() -> VideoEncodeH264EmitPictureParametersInfoEXTBuilder<'a> { - VideoEncodeH264EmitPictureParametersInfoEXTBuilder { - inner: Self::default(), - marker: ::std::marker::PhantomData, - } - } -} -#[repr(transparent)] -pub struct VideoEncodeH264EmitPictureParametersInfoEXTBuilder<'a> { - inner: VideoEncodeH264EmitPictureParametersInfoEXT, - marker: ::std::marker::PhantomData<&'a ()>, -} -unsafe impl ExtendsVideoEncodeInfoKHR for VideoEncodeH264EmitPictureParametersInfoEXTBuilder<'_> {} -unsafe impl ExtendsVideoEncodeInfoKHR for VideoEncodeH264EmitPictureParametersInfoEXT {} -impl<'a> ::std::ops::Deref for VideoEncodeH264EmitPictureParametersInfoEXTBuilder<'a> { - type Target = VideoEncodeH264EmitPictureParametersInfoEXT; - fn deref(&self) -> &Self::Target { - &self.inner - } -} -impl<'a> ::std::ops::DerefMut for VideoEncodeH264EmitPictureParametersInfoEXTBuilder<'a> { - fn deref_mut(&mut self) -> &mut Self::Target { - &mut self.inner - } -} -impl<'a> VideoEncodeH264EmitPictureParametersInfoEXTBuilder<'a> { - #[inline] - pub fn sps_id(mut self, sps_id: u8) -> Self { - self.inner.sps_id = sps_id; - self - } - #[inline] - pub fn emit_sps_enable(mut self, emit_sps_enable: bool) -> Self { - self.inner.emit_sps_enable = emit_sps_enable.into(); - self - } - #[inline] - pub fn pps_id_entries(mut self, pps_id_entries: &'a [u8]) -> Self { - self.inner.pps_id_entry_count = pps_id_entries.len() as _; - self.inner.pps_id_entries = pps_id_entries.as_ptr(); - self - } - #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] - #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] - #[doc = r" so references to builders can be passed directly to Vulkan functions."] - pub fn build(self) -> VideoEncodeH264EmitPictureParametersInfoEXT { - self.inner - } -} -#[repr(C)] -#[cfg_attr(feature = "debug", derive(Debug))] -#[derive(Copy, Clone)] #[doc = ""] pub struct VideoEncodeH264ProfileInfoEXT { pub s_type: StructureType, @@ -59125,8 +59417,8 @@ pub struct VideoEncodeH264NaluSliceInfoEXT { pub s_type: StructureType, pub p_next: *const c_void, pub mb_count: u32, - pub p_reference_final_lists: *const VideoEncodeH264ReferenceListsInfoEXT, - pub p_slice_header_std: *const StdVideoEncodeH264SliceHeader, + pub p_std_reference_final_lists: *const StdVideoEncodeH264ReferenceListsInfo, + pub p_std_slice_header: *const StdVideoEncodeH264SliceHeader, } impl ::std::default::Default for VideoEncodeH264NaluSliceInfoEXT { #[inline] @@ -59135,8 +59427,8 @@ impl ::std::default::Default for VideoEncodeH264NaluSliceInfoEXT { s_type: Self::STRUCTURE_TYPE, p_next: ::std::ptr::null(), mb_count: u32::default(), - p_reference_final_lists: ::std::ptr::null(), - p_slice_header_std: ::std::ptr::null(), + p_std_reference_final_lists: ::std::ptr::null(), + p_std_slice_header: ::std::ptr::null(), } } } @@ -59174,16 +59466,16 @@ impl<'a> VideoEncodeH264NaluSliceInfoEXTBuilder<'a> { self } #[inline] - pub fn reference_final_lists( + pub fn std_reference_final_lists( mut self, - reference_final_lists: &'a VideoEncodeH264ReferenceListsInfoEXT, + std_reference_final_lists: &'a StdVideoEncodeH264ReferenceListsInfo, ) -> Self { - self.inner.p_reference_final_lists = reference_final_lists; + self.inner.p_std_reference_final_lists = std_reference_final_lists; self } #[inline] - pub fn slice_header_std(mut self, slice_header_std: &'a StdVideoEncodeH264SliceHeader) -> Self { - self.inner.p_slice_header_std = slice_header_std; + pub fn std_slice_header(mut self, std_slice_header: &'a StdVideoEncodeH264SliceHeader) -> Self { + self.inner.p_std_slice_header = std_slice_header; self } #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] @@ -59204,7 +59496,7 @@ pub struct VideoEncodeH264RateControlInfoEXT { pub idr_period: u32, pub consecutive_b_frame_count: u32, pub rate_control_structure: VideoEncodeH264RateControlStructureEXT, - pub temporal_layer_count: u8, + pub temporal_layer_count: u32, } impl ::std::default::Default for VideoEncodeH264RateControlInfoEXT { #[inline] @@ -59216,7 +59508,7 @@ impl ::std::default::Default for VideoEncodeH264RateControlInfoEXT { idr_period: u32::default(), consecutive_b_frame_count: u32::default(), rate_control_structure: VideoEncodeH264RateControlStructureEXT::default(), - temporal_layer_count: u8::default(), + temporal_layer_count: u32::default(), } } } @@ -59274,7 +59566,7 @@ impl<'a> VideoEncodeH264RateControlInfoEXTBuilder<'a> { self } #[inline] - pub fn temporal_layer_count(mut self, temporal_layer_count: u8) -> Self { + pub fn temporal_layer_count(mut self, temporal_layer_count: u32) -> Self { self.inner.temporal_layer_count = temporal_layer_count; self } @@ -59404,7 +59696,7 @@ impl<'a> VideoEncodeH264FrameSizeEXTBuilder<'a> { pub struct VideoEncodeH264RateControlLayerInfoEXT { pub s_type: StructureType, pub p_next: *const c_void, - pub temporal_layer_id: u8, + pub temporal_layer_id: u32, pub use_initial_rc_qp: Bool32, pub initial_rc_qp: VideoEncodeH264QpEXT, pub use_min_qp: Bool32, @@ -59420,7 +59712,7 @@ impl ::std::default::Default for VideoEncodeH264RateControlLayerInfoEXT { Self { s_type: Self::STRUCTURE_TYPE, p_next: ::std::ptr::null(), - temporal_layer_id: u8::default(), + temporal_layer_id: u32::default(), use_initial_rc_qp: Bool32::default(), initial_rc_qp: VideoEncodeH264QpEXT::default(), use_min_qp: Bool32::default(), @@ -59469,7 +59761,7 @@ impl<'a> ::std::ops::DerefMut for VideoEncodeH264RateControlLayerInfoEXTBuilder< } impl<'a> VideoEncodeH264RateControlLayerInfoEXTBuilder<'a> { #[inline] - pub fn temporal_layer_id(mut self, temporal_layer_id: u8) -> Self { + pub fn temporal_layer_id(mut self, temporal_layer_id: u32) -> Self { self.inner.temporal_layer_id = temporal_layer_id; self } @@ -59528,25 +59820,23 @@ pub struct VideoEncodeH265CapabilitiesEXT { pub s_type: StructureType, pub p_next: *mut c_void, pub flags: VideoEncodeH265CapabilityFlagsEXT, - pub input_mode_flags: VideoEncodeH265InputModeFlagsEXT, - pub output_mode_flags: VideoEncodeH265OutputModeFlagsEXT, pub ctb_sizes: VideoEncodeH265CtbSizeFlagsEXT, pub transform_block_sizes: VideoEncodeH265TransformBlockSizeFlagsEXT, - pub max_p_picture_l0_reference_count: u8, - pub max_b_picture_l0_reference_count: u8, - pub max_l1_reference_count: u8, - pub max_sub_layers_count: u8, - pub min_log2_min_luma_coding_block_size_minus3: u8, - pub max_log2_min_luma_coding_block_size_minus3: u8, - pub min_log2_min_luma_transform_block_size_minus2: u8, - pub max_log2_min_luma_transform_block_size_minus2: u8, - pub min_max_transform_hierarchy_depth_inter: u8, - pub max_max_transform_hierarchy_depth_inter: u8, - pub min_max_transform_hierarchy_depth_intra: u8, - pub max_max_transform_hierarchy_depth_intra: u8, - pub max_diff_cu_qp_delta_depth: u8, - pub min_max_num_merge_cand: u8, - pub max_max_num_merge_cand: u8, + pub max_p_picture_l0_reference_count: u32, + pub max_b_picture_l0_reference_count: u32, + pub max_l1_reference_count: u32, + pub max_sub_layers_count: u32, + pub min_log2_min_luma_coding_block_size_minus3: u32, + pub max_log2_min_luma_coding_block_size_minus3: u32, + pub min_log2_min_luma_transform_block_size_minus2: u32, + pub max_log2_min_luma_transform_block_size_minus2: u32, + pub min_max_transform_hierarchy_depth_inter: u32, + pub max_max_transform_hierarchy_depth_inter: u32, + pub min_max_transform_hierarchy_depth_intra: u32, + pub max_max_transform_hierarchy_depth_intra: u32, + pub max_diff_cu_qp_delta_depth: u32, + pub min_max_num_merge_cand: u32, + pub max_max_num_merge_cand: u32, } impl ::std::default::Default for VideoEncodeH265CapabilitiesEXT { #[inline] @@ -59555,25 +59845,23 @@ impl ::std::default::Default for VideoEncodeH265CapabilitiesEXT { s_type: Self::STRUCTURE_TYPE, p_next: ::std::ptr::null_mut(), flags: VideoEncodeH265CapabilityFlagsEXT::default(), - input_mode_flags: VideoEncodeH265InputModeFlagsEXT::default(), - output_mode_flags: VideoEncodeH265OutputModeFlagsEXT::default(), ctb_sizes: VideoEncodeH265CtbSizeFlagsEXT::default(), transform_block_sizes: VideoEncodeH265TransformBlockSizeFlagsEXT::default(), - max_p_picture_l0_reference_count: u8::default(), - max_b_picture_l0_reference_count: u8::default(), - max_l1_reference_count: u8::default(), - max_sub_layers_count: u8::default(), - min_log2_min_luma_coding_block_size_minus3: u8::default(), - max_log2_min_luma_coding_block_size_minus3: u8::default(), - min_log2_min_luma_transform_block_size_minus2: u8::default(), - max_log2_min_luma_transform_block_size_minus2: u8::default(), - min_max_transform_hierarchy_depth_inter: u8::default(), - max_max_transform_hierarchy_depth_inter: u8::default(), - min_max_transform_hierarchy_depth_intra: u8::default(), - max_max_transform_hierarchy_depth_intra: u8::default(), - max_diff_cu_qp_delta_depth: u8::default(), - min_max_num_merge_cand: u8::default(), - max_max_num_merge_cand: u8::default(), + max_p_picture_l0_reference_count: u32::default(), + max_b_picture_l0_reference_count: u32::default(), + max_l1_reference_count: u32::default(), + max_sub_layers_count: u32::default(), + min_log2_min_luma_coding_block_size_minus3: u32::default(), + max_log2_min_luma_coding_block_size_minus3: u32::default(), + min_log2_min_luma_transform_block_size_minus2: u32::default(), + max_log2_min_luma_transform_block_size_minus2: u32::default(), + min_max_transform_hierarchy_depth_inter: u32::default(), + max_max_transform_hierarchy_depth_inter: u32::default(), + min_max_transform_hierarchy_depth_intra: u32::default(), + max_max_transform_hierarchy_depth_intra: u32::default(), + max_diff_cu_qp_delta_depth: u32::default(), + min_max_num_merge_cand: u32::default(), + max_max_num_merge_cand: u32::default(), } } } @@ -59613,19 +59901,6 @@ impl<'a> VideoEncodeH265CapabilitiesEXTBuilder<'a> { self } #[inline] - pub fn input_mode_flags(mut self, input_mode_flags: VideoEncodeH265InputModeFlagsEXT) -> Self { - self.inner.input_mode_flags = input_mode_flags; - self - } - #[inline] - pub fn output_mode_flags( - mut self, - output_mode_flags: VideoEncodeH265OutputModeFlagsEXT, - ) -> Self { - self.inner.output_mode_flags = output_mode_flags; - self - } - #[inline] pub fn ctb_sizes(mut self, ctb_sizes: VideoEncodeH265CtbSizeFlagsEXT) -> Self { self.inner.ctb_sizes = ctb_sizes; self @@ -59641,7 +59916,7 @@ impl<'a> VideoEncodeH265CapabilitiesEXTBuilder<'a> { #[inline] pub fn max_p_picture_l0_reference_count( mut self, - max_p_picture_l0_reference_count: u8, + max_p_picture_l0_reference_count: u32, ) -> Self { self.inner.max_p_picture_l0_reference_count = max_p_picture_l0_reference_count; self @@ -59649,25 +59924,25 @@ impl<'a> VideoEncodeH265CapabilitiesEXTBuilder<'a> { #[inline] pub fn max_b_picture_l0_reference_count( mut self, - max_b_picture_l0_reference_count: u8, + max_b_picture_l0_reference_count: u32, ) -> Self { self.inner.max_b_picture_l0_reference_count = max_b_picture_l0_reference_count; self } #[inline] - pub fn max_l1_reference_count(mut self, max_l1_reference_count: u8) -> Self { + pub fn max_l1_reference_count(mut self, max_l1_reference_count: u32) -> Self { self.inner.max_l1_reference_count = max_l1_reference_count; self } #[inline] - pub fn max_sub_layers_count(mut self, max_sub_layers_count: u8) -> Self { + pub fn max_sub_layers_count(mut self, max_sub_layers_count: u32) -> Self { self.inner.max_sub_layers_count = max_sub_layers_count; self } #[inline] pub fn min_log2_min_luma_coding_block_size_minus3( mut self, - min_log2_min_luma_coding_block_size_minus3: u8, + min_log2_min_luma_coding_block_size_minus3: u32, ) -> Self { self.inner.min_log2_min_luma_coding_block_size_minus3 = min_log2_min_luma_coding_block_size_minus3; @@ -59676,7 +59951,7 @@ impl<'a> VideoEncodeH265CapabilitiesEXTBuilder<'a> { #[inline] pub fn max_log2_min_luma_coding_block_size_minus3( mut self, - max_log2_min_luma_coding_block_size_minus3: u8, + max_log2_min_luma_coding_block_size_minus3: u32, ) -> Self { self.inner.max_log2_min_luma_coding_block_size_minus3 = max_log2_min_luma_coding_block_size_minus3; @@ -59685,7 +59960,7 @@ impl<'a> VideoEncodeH265CapabilitiesEXTBuilder<'a> { #[inline] pub fn min_log2_min_luma_transform_block_size_minus2( mut self, - min_log2_min_luma_transform_block_size_minus2: u8, + min_log2_min_luma_transform_block_size_minus2: u32, ) -> Self { self.inner.min_log2_min_luma_transform_block_size_minus2 = min_log2_min_luma_transform_block_size_minus2; @@ -59694,7 +59969,7 @@ impl<'a> VideoEncodeH265CapabilitiesEXTBuilder<'a> { #[inline] pub fn max_log2_min_luma_transform_block_size_minus2( mut self, - max_log2_min_luma_transform_block_size_minus2: u8, + max_log2_min_luma_transform_block_size_minus2: u32, ) -> Self { self.inner.max_log2_min_luma_transform_block_size_minus2 = max_log2_min_luma_transform_block_size_minus2; @@ -59703,7 +59978,7 @@ impl<'a> VideoEncodeH265CapabilitiesEXTBuilder<'a> { #[inline] pub fn min_max_transform_hierarchy_depth_inter( mut self, - min_max_transform_hierarchy_depth_inter: u8, + min_max_transform_hierarchy_depth_inter: u32, ) -> Self { self.inner.min_max_transform_hierarchy_depth_inter = min_max_transform_hierarchy_depth_inter; @@ -59712,7 +59987,7 @@ impl<'a> VideoEncodeH265CapabilitiesEXTBuilder<'a> { #[inline] pub fn max_max_transform_hierarchy_depth_inter( mut self, - max_max_transform_hierarchy_depth_inter: u8, + max_max_transform_hierarchy_depth_inter: u32, ) -> Self { self.inner.max_max_transform_hierarchy_depth_inter = max_max_transform_hierarchy_depth_inter; @@ -59721,7 +59996,7 @@ impl<'a> VideoEncodeH265CapabilitiesEXTBuilder<'a> { #[inline] pub fn min_max_transform_hierarchy_depth_intra( mut self, - min_max_transform_hierarchy_depth_intra: u8, + min_max_transform_hierarchy_depth_intra: u32, ) -> Self { self.inner.min_max_transform_hierarchy_depth_intra = min_max_transform_hierarchy_depth_intra; @@ -59730,24 +60005,24 @@ impl<'a> VideoEncodeH265CapabilitiesEXTBuilder<'a> { #[inline] pub fn max_max_transform_hierarchy_depth_intra( mut self, - max_max_transform_hierarchy_depth_intra: u8, + max_max_transform_hierarchy_depth_intra: u32, ) -> Self { self.inner.max_max_transform_hierarchy_depth_intra = max_max_transform_hierarchy_depth_intra; self } #[inline] - pub fn max_diff_cu_qp_delta_depth(mut self, max_diff_cu_qp_delta_depth: u8) -> Self { + pub fn max_diff_cu_qp_delta_depth(mut self, max_diff_cu_qp_delta_depth: u32) -> Self { self.inner.max_diff_cu_qp_delta_depth = max_diff_cu_qp_delta_depth; self } #[inline] - pub fn min_max_num_merge_cand(mut self, min_max_num_merge_cand: u8) -> Self { + pub fn min_max_num_merge_cand(mut self, min_max_num_merge_cand: u32) -> Self { self.inner.min_max_num_merge_cand = min_max_num_merge_cand; self } #[inline] - pub fn max_max_num_merge_cand(mut self, max_max_num_merge_cand: u8) -> Self { + pub fn max_max_num_merge_cand(mut self, max_max_num_merge_cand: u32) -> Self { self.inner.max_max_num_merge_cand = max_max_num_merge_cand; self } @@ -59948,10 +60223,10 @@ impl<'a> VideoEncodeH265SessionParametersCreateInfoEXTBuilder<'a> { pub struct VideoEncodeH265VclFrameInfoEXT { pub s_type: StructureType, pub p_next: *const c_void, - pub p_reference_final_lists: *const VideoEncodeH265ReferenceListsInfoEXT, + pub p_std_reference_final_lists: *const StdVideoEncodeH265ReferenceListsInfo, pub nalu_slice_segment_entry_count: u32, pub p_nalu_slice_segment_entries: *const VideoEncodeH265NaluSliceSegmentInfoEXT, - pub p_current_picture_info: *const StdVideoEncodeH265PictureInfo, + pub p_std_picture_info: *const StdVideoEncodeH265PictureInfo, } impl ::std::default::Default for VideoEncodeH265VclFrameInfoEXT { #[inline] @@ -59959,10 +60234,10 @@ impl ::std::default::Default for VideoEncodeH265VclFrameInfoEXT { Self { s_type: Self::STRUCTURE_TYPE, p_next: ::std::ptr::null(), - p_reference_final_lists: ::std::ptr::null(), + p_std_reference_final_lists: ::std::ptr::null(), nalu_slice_segment_entry_count: u32::default(), p_nalu_slice_segment_entries: ::std::ptr::null(), - p_current_picture_info: ::std::ptr::null(), + p_std_picture_info: ::std::ptr::null(), } } } @@ -59997,11 +60272,11 @@ impl<'a> ::std::ops::DerefMut for VideoEncodeH265VclFrameInfoEXTBuilder<'a> { } impl<'a> VideoEncodeH265VclFrameInfoEXTBuilder<'a> { #[inline] - pub fn reference_final_lists( + pub fn std_reference_final_lists( mut self, - reference_final_lists: &'a VideoEncodeH265ReferenceListsInfoEXT, + std_reference_final_lists: &'a StdVideoEncodeH265ReferenceListsInfo, ) -> Self { - self.inner.p_reference_final_lists = reference_final_lists; + self.inner.p_std_reference_final_lists = std_reference_final_lists; self } #[inline] @@ -60014,11 +60289,8 @@ impl<'a> VideoEncodeH265VclFrameInfoEXTBuilder<'a> { self } #[inline] - pub fn current_picture_info( - mut self, - current_picture_info: &'a StdVideoEncodeH265PictureInfo, - ) -> Self { - self.inner.p_current_picture_info = current_picture_info; + pub fn std_picture_info(mut self, std_picture_info: &'a StdVideoEncodeH265PictureInfo) -> Self { + self.inner.p_std_picture_info = std_picture_info; self } #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] @@ -60031,106 +60303,13 @@ impl<'a> VideoEncodeH265VclFrameInfoEXTBuilder<'a> { #[repr(C)] #[cfg_attr(feature = "debug", derive(Debug))] #[derive(Copy, Clone)] -#[doc = ""] -pub struct VideoEncodeH265EmitPictureParametersInfoEXT { - pub s_type: StructureType, - pub p_next: *const c_void, - pub vps_id: u8, - pub sps_id: u8, - pub emit_vps_enable: Bool32, - pub emit_sps_enable: Bool32, - pub pps_id_entry_count: u32, - pub pps_id_entries: *const u8, -} -impl ::std::default::Default for VideoEncodeH265EmitPictureParametersInfoEXT { - #[inline] - fn default() -> Self { - Self { - s_type: Self::STRUCTURE_TYPE, - p_next: ::std::ptr::null(), - vps_id: u8::default(), - sps_id: u8::default(), - emit_vps_enable: Bool32::default(), - emit_sps_enable: Bool32::default(), - pps_id_entry_count: u32::default(), - pps_id_entries: ::std::ptr::null(), - } - } -} -unsafe impl TaggedStructure for VideoEncodeH265EmitPictureParametersInfoEXT { - const STRUCTURE_TYPE: StructureType = - StructureType::VIDEO_ENCODE_H265_EMIT_PICTURE_PARAMETERS_INFO_EXT; -} -impl VideoEncodeH265EmitPictureParametersInfoEXT { - pub fn builder<'a>() -> VideoEncodeH265EmitPictureParametersInfoEXTBuilder<'a> { - VideoEncodeH265EmitPictureParametersInfoEXTBuilder { - inner: Self::default(), - marker: ::std::marker::PhantomData, - } - } -} -#[repr(transparent)] -pub struct VideoEncodeH265EmitPictureParametersInfoEXTBuilder<'a> { - inner: VideoEncodeH265EmitPictureParametersInfoEXT, - marker: ::std::marker::PhantomData<&'a ()>, -} -unsafe impl ExtendsVideoEncodeInfoKHR for VideoEncodeH265EmitPictureParametersInfoEXTBuilder<'_> {} -unsafe impl ExtendsVideoEncodeInfoKHR for VideoEncodeH265EmitPictureParametersInfoEXT {} -impl<'a> ::std::ops::Deref for VideoEncodeH265EmitPictureParametersInfoEXTBuilder<'a> { - type Target = VideoEncodeH265EmitPictureParametersInfoEXT; - fn deref(&self) -> &Self::Target { - &self.inner - } -} -impl<'a> ::std::ops::DerefMut for VideoEncodeH265EmitPictureParametersInfoEXTBuilder<'a> { - fn deref_mut(&mut self) -> &mut Self::Target { - &mut self.inner - } -} -impl<'a> VideoEncodeH265EmitPictureParametersInfoEXTBuilder<'a> { - #[inline] - pub fn vps_id(mut self, vps_id: u8) -> Self { - self.inner.vps_id = vps_id; - self - } - #[inline] - pub fn sps_id(mut self, sps_id: u8) -> Self { - self.inner.sps_id = sps_id; - self - } - #[inline] - pub fn emit_vps_enable(mut self, emit_vps_enable: bool) -> Self { - self.inner.emit_vps_enable = emit_vps_enable.into(); - self - } - #[inline] - pub fn emit_sps_enable(mut self, emit_sps_enable: bool) -> Self { - self.inner.emit_sps_enable = emit_sps_enable.into(); - self - } - #[inline] - pub fn pps_id_entries(mut self, pps_id_entries: &'a [u8]) -> Self { - self.inner.pps_id_entry_count = pps_id_entries.len() as _; - self.inner.pps_id_entries = pps_id_entries.as_ptr(); - self - } - #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] - #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] - #[doc = r" so references to builders can be passed directly to Vulkan functions."] - pub fn build(self) -> VideoEncodeH265EmitPictureParametersInfoEXT { - self.inner - } -} -#[repr(C)] -#[cfg_attr(feature = "debug", derive(Debug))] -#[derive(Copy, Clone)] #[doc = ""] pub struct VideoEncodeH265NaluSliceSegmentInfoEXT { pub s_type: StructureType, pub p_next: *const c_void, pub ctb_count: u32, - pub p_reference_final_lists: *const VideoEncodeH265ReferenceListsInfoEXT, - pub p_slice_segment_header_std: *const StdVideoEncodeH265SliceSegmentHeader, + pub p_std_reference_final_lists: *const StdVideoEncodeH265ReferenceListsInfo, + pub p_std_slice_segment_header: *const StdVideoEncodeH265SliceSegmentHeader, } impl ::std::default::Default for VideoEncodeH265NaluSliceSegmentInfoEXT { #[inline] @@ -60139,8 +60318,8 @@ impl ::std::default::Default for VideoEncodeH265NaluSliceSegmentInfoEXT { s_type: Self::STRUCTURE_TYPE, p_next: ::std::ptr::null(), ctb_count: u32::default(), - p_reference_final_lists: ::std::ptr::null(), - p_slice_segment_header_std: ::std::ptr::null(), + p_std_reference_final_lists: ::std::ptr::null(), + p_std_slice_segment_header: ::std::ptr::null(), } } } @@ -60179,19 +60358,19 @@ impl<'a> VideoEncodeH265NaluSliceSegmentInfoEXTBuilder<'a> { self } #[inline] - pub fn reference_final_lists( + pub fn std_reference_final_lists( mut self, - reference_final_lists: &'a VideoEncodeH265ReferenceListsInfoEXT, + std_reference_final_lists: &'a StdVideoEncodeH265ReferenceListsInfo, ) -> Self { - self.inner.p_reference_final_lists = reference_final_lists; + self.inner.p_std_reference_final_lists = std_reference_final_lists; self } #[inline] - pub fn slice_segment_header_std( + pub fn std_slice_segment_header( mut self, - slice_segment_header_std: &'a StdVideoEncodeH265SliceSegmentHeader, + std_slice_segment_header: &'a StdVideoEncodeH265SliceSegmentHeader, ) -> Self { - self.inner.p_slice_segment_header_std = slice_segment_header_std; + self.inner.p_std_slice_segment_header = std_slice_segment_header; self } #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] @@ -60212,7 +60391,7 @@ pub struct VideoEncodeH265RateControlInfoEXT { pub idr_period: u32, pub consecutive_b_frame_count: u32, pub rate_control_structure: VideoEncodeH265RateControlStructureEXT, - pub sub_layer_count: u8, + pub sub_layer_count: u32, } impl ::std::default::Default for VideoEncodeH265RateControlInfoEXT { #[inline] @@ -60224,7 +60403,7 @@ impl ::std::default::Default for VideoEncodeH265RateControlInfoEXT { idr_period: u32::default(), consecutive_b_frame_count: u32::default(), rate_control_structure: VideoEncodeH265RateControlStructureEXT::default(), - sub_layer_count: u8::default(), + sub_layer_count: u32::default(), } } } @@ -60282,7 +60461,7 @@ impl<'a> VideoEncodeH265RateControlInfoEXTBuilder<'a> { self } #[inline] - pub fn sub_layer_count(mut self, sub_layer_count: u8) -> Self { + pub fn sub_layer_count(mut self, sub_layer_count: u32) -> Self { self.inner.sub_layer_count = sub_layer_count; self } @@ -60412,7 +60591,7 @@ impl<'a> VideoEncodeH265FrameSizeEXTBuilder<'a> { pub struct VideoEncodeH265RateControlLayerInfoEXT { pub s_type: StructureType, pub p_next: *const c_void, - pub temporal_id: u8, + pub temporal_id: u32, pub use_initial_rc_qp: Bool32, pub initial_rc_qp: VideoEncodeH265QpEXT, pub use_min_qp: Bool32, @@ -60428,7 +60607,7 @@ impl ::std::default::Default for VideoEncodeH265RateControlLayerInfoEXT { Self { s_type: Self::STRUCTURE_TYPE, p_next: ::std::ptr::null(), - temporal_id: u8::default(), + temporal_id: u32::default(), use_initial_rc_qp: Bool32::default(), initial_rc_qp: VideoEncodeH265QpEXT::default(), use_min_qp: Bool32::default(), @@ -60477,7 +60656,7 @@ impl<'a> ::std::ops::DerefMut for VideoEncodeH265RateControlLayerInfoEXTBuilder< } impl<'a> VideoEncodeH265RateControlLayerInfoEXTBuilder<'a> { #[inline] - pub fn temporal_id(mut self, temporal_id: u8) -> Self { + pub fn temporal_id(mut self, temporal_id: u32) -> Self { self.inner.temporal_id = temporal_id; self } @@ -60598,7 +60777,6 @@ impl<'a> VideoEncodeH265ProfileInfoEXTBuilder<'a> { pub struct VideoEncodeH265DpbSlotInfoEXT { pub s_type: StructureType, pub p_next: *const c_void, - pub slot_index: i8, pub p_std_reference_info: *const StdVideoEncodeH265ReferenceInfo, } impl ::std::default::Default for VideoEncodeH265DpbSlotInfoEXT { @@ -60607,7 +60785,6 @@ impl ::std::default::Default for VideoEncodeH265DpbSlotInfoEXT { Self { s_type: Self::STRUCTURE_TYPE, p_next: ::std::ptr::null(), - slot_index: i8::default(), p_std_reference_info: ::std::ptr::null(), } } @@ -60628,6 +60805,8 @@ pub struct VideoEncodeH265DpbSlotInfoEXTBuilder<'a> { inner: VideoEncodeH265DpbSlotInfoEXT, marker: ::std::marker::PhantomData<&'a ()>, } +unsafe impl ExtendsVideoReferenceSlotInfoKHR for VideoEncodeH265DpbSlotInfoEXTBuilder<'_> {} +unsafe impl ExtendsVideoReferenceSlotInfoKHR for VideoEncodeH265DpbSlotInfoEXT {} impl<'a> ::std::ops::Deref for VideoEncodeH265DpbSlotInfoEXTBuilder<'a> { type Target = VideoEncodeH265DpbSlotInfoEXT; fn deref(&self) -> &Self::Target { @@ -60640,11 +60819,6 @@ impl<'a> ::std::ops::DerefMut for VideoEncodeH265DpbSlotInfoEXTBuilder<'a> { } } impl<'a> VideoEncodeH265DpbSlotInfoEXTBuilder<'a> { - #[inline] - pub fn slot_index(mut self, slot_index: i8) -> Self { - self.inner.slot_index = slot_index; - self - } #[inline] pub fn std_reference_info( mut self, @@ -60663,94 +60837,6 @@ impl<'a> VideoEncodeH265DpbSlotInfoEXTBuilder<'a> { #[repr(C)] #[cfg_attr(feature = "debug", derive(Debug))] #[derive(Copy, Clone)] -#[doc = ""] -pub struct VideoEncodeH265ReferenceListsInfoEXT { - pub s_type: StructureType, - pub p_next: *const c_void, - pub reference_list0_entry_count: u8, - pub p_reference_list0_entries: *const VideoEncodeH265DpbSlotInfoEXT, - pub reference_list1_entry_count: u8, - pub p_reference_list1_entries: *const VideoEncodeH265DpbSlotInfoEXT, - pub p_reference_modifications: *const StdVideoEncodeH265ReferenceModifications, -} -impl ::std::default::Default for VideoEncodeH265ReferenceListsInfoEXT { - #[inline] - fn default() -> Self { - Self { - s_type: Self::STRUCTURE_TYPE, - p_next: ::std::ptr::null(), - reference_list0_entry_count: u8::default(), - p_reference_list0_entries: ::std::ptr::null(), - reference_list1_entry_count: u8::default(), - p_reference_list1_entries: ::std::ptr::null(), - p_reference_modifications: ::std::ptr::null(), - } - } -} -unsafe impl TaggedStructure for VideoEncodeH265ReferenceListsInfoEXT { - const STRUCTURE_TYPE: StructureType = StructureType::VIDEO_ENCODE_H265_REFERENCE_LISTS_INFO_EXT; -} -impl VideoEncodeH265ReferenceListsInfoEXT { - pub fn builder<'a>() -> VideoEncodeH265ReferenceListsInfoEXTBuilder<'a> { - VideoEncodeH265ReferenceListsInfoEXTBuilder { - inner: Self::default(), - marker: ::std::marker::PhantomData, - } - } -} -#[repr(transparent)] -pub struct VideoEncodeH265ReferenceListsInfoEXTBuilder<'a> { - inner: VideoEncodeH265ReferenceListsInfoEXT, - marker: ::std::marker::PhantomData<&'a ()>, -} -impl<'a> ::std::ops::Deref for VideoEncodeH265ReferenceListsInfoEXTBuilder<'a> { - type Target = VideoEncodeH265ReferenceListsInfoEXT; - fn deref(&self) -> &Self::Target { - &self.inner - } -} -impl<'a> ::std::ops::DerefMut for VideoEncodeH265ReferenceListsInfoEXTBuilder<'a> { - fn deref_mut(&mut self) -> &mut Self::Target { - &mut self.inner - } -} -impl<'a> VideoEncodeH265ReferenceListsInfoEXTBuilder<'a> { - #[inline] - pub fn reference_list0_entries( - mut self, - reference_list0_entries: &'a [VideoEncodeH265DpbSlotInfoEXT], - ) -> Self { - self.inner.reference_list0_entry_count = reference_list0_entries.len() as _; - self.inner.p_reference_list0_entries = reference_list0_entries.as_ptr(); - self - } - #[inline] - pub fn reference_list1_entries( - mut self, - reference_list1_entries: &'a [VideoEncodeH265DpbSlotInfoEXT], - ) -> Self { - self.inner.reference_list1_entry_count = reference_list1_entries.len() as _; - self.inner.p_reference_list1_entries = reference_list1_entries.as_ptr(); - self - } - #[inline] - pub fn reference_modifications( - mut self, - reference_modifications: &'a StdVideoEncodeH265ReferenceModifications, - ) -> Self { - self.inner.p_reference_modifications = reference_modifications; - self - } - #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] - #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] - #[doc = r" so references to builders can be passed directly to Vulkan functions."] - pub fn build(self) -> VideoEncodeH265ReferenceListsInfoEXT { - self.inner - } -} -#[repr(C)] -#[cfg_attr(feature = "debug", derive(Debug))] -#[derive(Copy, Clone)] #[doc = ""] pub struct PhysicalDeviceInheritedViewportScissorFeaturesNV { pub s_type: StructureType, @@ -68736,6 +68822,370 @@ impl<'a> AccelerationStructureTrianglesOpacityMicromapEXTBuilder<'a> { #[repr(C)] #[cfg_attr(feature = "debug", derive(Debug))] #[derive(Copy, Clone)] +#[doc = ""] +pub struct PhysicalDeviceDisplacementMicromapFeaturesNV { + pub s_type: StructureType, + pub p_next: *mut c_void, + pub displacement_micromap: Bool32, +} +impl ::std::default::Default for PhysicalDeviceDisplacementMicromapFeaturesNV { + #[inline] + fn default() -> Self { + Self { + s_type: Self::STRUCTURE_TYPE, + p_next: ::std::ptr::null_mut(), + displacement_micromap: Bool32::default(), + } + } +} +unsafe impl TaggedStructure for PhysicalDeviceDisplacementMicromapFeaturesNV { + const STRUCTURE_TYPE: StructureType = + StructureType::PHYSICAL_DEVICE_DISPLACEMENT_MICROMAP_FEATURES_NV; +} +impl PhysicalDeviceDisplacementMicromapFeaturesNV { + pub fn builder<'a>() -> PhysicalDeviceDisplacementMicromapFeaturesNVBuilder<'a> { + PhysicalDeviceDisplacementMicromapFeaturesNVBuilder { + inner: Self::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct PhysicalDeviceDisplacementMicromapFeaturesNVBuilder<'a> { + inner: PhysicalDeviceDisplacementMicromapFeaturesNV, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsPhysicalDeviceFeatures2 + for PhysicalDeviceDisplacementMicromapFeaturesNVBuilder<'_> +{ +} +unsafe impl ExtendsPhysicalDeviceFeatures2 for PhysicalDeviceDisplacementMicromapFeaturesNV {} +unsafe impl ExtendsDeviceCreateInfo for PhysicalDeviceDisplacementMicromapFeaturesNVBuilder<'_> {} +unsafe impl ExtendsDeviceCreateInfo for PhysicalDeviceDisplacementMicromapFeaturesNV {} +impl<'a> ::std::ops::Deref for PhysicalDeviceDisplacementMicromapFeaturesNVBuilder<'a> { + type Target = PhysicalDeviceDisplacementMicromapFeaturesNV; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for PhysicalDeviceDisplacementMicromapFeaturesNVBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> PhysicalDeviceDisplacementMicromapFeaturesNVBuilder<'a> { + #[inline] + pub fn displacement_micromap(mut self, displacement_micromap: bool) -> Self { + self.inner.displacement_micromap = displacement_micromap.into(); + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PhysicalDeviceDisplacementMicromapFeaturesNV { + self.inner + } +} +#[repr(C)] +#[cfg_attr(feature = "debug", derive(Debug))] +#[derive(Copy, Clone)] +#[doc = ""] +pub struct PhysicalDeviceDisplacementMicromapPropertiesNV { + pub s_type: StructureType, + pub p_next: *mut c_void, + pub max_displacement_micromap_subdivision_level: u32, +} +impl ::std::default::Default for PhysicalDeviceDisplacementMicromapPropertiesNV { + #[inline] + fn default() -> Self { + Self { + s_type: Self::STRUCTURE_TYPE, + p_next: ::std::ptr::null_mut(), + max_displacement_micromap_subdivision_level: u32::default(), + } + } +} +unsafe impl TaggedStructure for PhysicalDeviceDisplacementMicromapPropertiesNV { + const STRUCTURE_TYPE: StructureType = + StructureType::PHYSICAL_DEVICE_DISPLACEMENT_MICROMAP_PROPERTIES_NV; +} +impl PhysicalDeviceDisplacementMicromapPropertiesNV { + pub fn builder<'a>() -> PhysicalDeviceDisplacementMicromapPropertiesNVBuilder<'a> { + PhysicalDeviceDisplacementMicromapPropertiesNVBuilder { + inner: Self::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct PhysicalDeviceDisplacementMicromapPropertiesNVBuilder<'a> { + inner: PhysicalDeviceDisplacementMicromapPropertiesNV, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsPhysicalDeviceProperties2 + for PhysicalDeviceDisplacementMicromapPropertiesNVBuilder<'_> +{ +} +unsafe impl ExtendsPhysicalDeviceProperties2 for PhysicalDeviceDisplacementMicromapPropertiesNV {} +impl<'a> ::std::ops::Deref for PhysicalDeviceDisplacementMicromapPropertiesNVBuilder<'a> { + type Target = PhysicalDeviceDisplacementMicromapPropertiesNV; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for PhysicalDeviceDisplacementMicromapPropertiesNVBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> PhysicalDeviceDisplacementMicromapPropertiesNVBuilder<'a> { + #[inline] + pub fn max_displacement_micromap_subdivision_level( + mut self, + max_displacement_micromap_subdivision_level: u32, + ) -> Self { + self.inner.max_displacement_micromap_subdivision_level = + max_displacement_micromap_subdivision_level; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PhysicalDeviceDisplacementMicromapPropertiesNV { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone)] +#[doc = ""] +pub struct AccelerationStructureTrianglesDisplacementMicromapNV { + pub s_type: StructureType, + pub p_next: *mut c_void, + pub displacement_bias_and_scale_format: Format, + pub displacement_vector_format: Format, + pub displacement_bias_and_scale_buffer: DeviceOrHostAddressConstKHR, + pub displacement_bias_and_scale_stride: DeviceSize, + pub displacement_vector_buffer: DeviceOrHostAddressConstKHR, + pub displacement_vector_stride: DeviceSize, + pub displaced_micromap_primitive_flags: DeviceOrHostAddressConstKHR, + pub displaced_micromap_primitive_flags_stride: DeviceSize, + pub index_type: IndexType, + pub index_buffer: DeviceOrHostAddressConstKHR, + pub index_stride: DeviceSize, + pub base_triangle: u32, + pub usage_counts_count: u32, + pub p_usage_counts: *const MicromapUsageEXT, + pub pp_usage_counts: *const *const MicromapUsageEXT, + pub micromap: MicromapEXT, +} +#[cfg(feature = "debug")] +impl fmt::Debug for AccelerationStructureTrianglesDisplacementMicromapNV { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fmt.debug_struct("AccelerationStructureTrianglesDisplacementMicromapNV") + .field("s_type", &self.s_type) + .field("p_next", &self.p_next) + .field( + "displacement_bias_and_scale_format", + &self.displacement_bias_and_scale_format, + ) + .field( + "displacement_vector_format", + &self.displacement_vector_format, + ) + .field("displacement_bias_and_scale_buffer", &"union") + .field( + "displacement_bias_and_scale_stride", + &self.displacement_bias_and_scale_stride, + ) + .field("displacement_vector_buffer", &"union") + .field( + "displacement_vector_stride", + &self.displacement_vector_stride, + ) + .field("displaced_micromap_primitive_flags", &"union") + .field( + "displaced_micromap_primitive_flags_stride", + &self.displaced_micromap_primitive_flags_stride, + ) + .field("index_type", &self.index_type) + .field("index_buffer", &"union") + .field("index_stride", &self.index_stride) + .field("base_triangle", &self.base_triangle) + .field("usage_counts_count", &self.usage_counts_count) + .field("p_usage_counts", &self.p_usage_counts) + .field("pp_usage_counts", &self.pp_usage_counts) + .field("micromap", &self.micromap) + .finish() + } +} +impl ::std::default::Default for AccelerationStructureTrianglesDisplacementMicromapNV { + #[inline] + fn default() -> Self { + Self { + s_type: Self::STRUCTURE_TYPE, + p_next: ::std::ptr::null_mut(), + displacement_bias_and_scale_format: Format::default(), + displacement_vector_format: Format::default(), + displacement_bias_and_scale_buffer: DeviceOrHostAddressConstKHR::default(), + displacement_bias_and_scale_stride: DeviceSize::default(), + displacement_vector_buffer: DeviceOrHostAddressConstKHR::default(), + displacement_vector_stride: DeviceSize::default(), + displaced_micromap_primitive_flags: DeviceOrHostAddressConstKHR::default(), + displaced_micromap_primitive_flags_stride: DeviceSize::default(), + index_type: IndexType::default(), + index_buffer: DeviceOrHostAddressConstKHR::default(), + index_stride: DeviceSize::default(), + base_triangle: u32::default(), + usage_counts_count: u32::default(), + p_usage_counts: ::std::ptr::null(), + pp_usage_counts: ::std::ptr::null(), + micromap: MicromapEXT::default(), + } + } +} +unsafe impl TaggedStructure for AccelerationStructureTrianglesDisplacementMicromapNV { + const STRUCTURE_TYPE: StructureType = + StructureType::ACCELERATION_STRUCTURE_TRIANGLES_DISPLACEMENT_MICROMAP_NV; +} +impl AccelerationStructureTrianglesDisplacementMicromapNV { + pub fn builder<'a>() -> AccelerationStructureTrianglesDisplacementMicromapNVBuilder<'a> { + AccelerationStructureTrianglesDisplacementMicromapNVBuilder { + inner: Self::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct AccelerationStructureTrianglesDisplacementMicromapNVBuilder<'a> { + inner: AccelerationStructureTrianglesDisplacementMicromapNV, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsAccelerationStructureGeometryTrianglesDataKHR + for AccelerationStructureTrianglesDisplacementMicromapNVBuilder<'_> +{ +} +unsafe impl ExtendsAccelerationStructureGeometryTrianglesDataKHR + for AccelerationStructureTrianglesDisplacementMicromapNV +{ +} +impl<'a> ::std::ops::Deref for AccelerationStructureTrianglesDisplacementMicromapNVBuilder<'a> { + type Target = AccelerationStructureTrianglesDisplacementMicromapNV; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for AccelerationStructureTrianglesDisplacementMicromapNVBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> AccelerationStructureTrianglesDisplacementMicromapNVBuilder<'a> { + #[inline] + pub fn displacement_bias_and_scale_format( + mut self, + displacement_bias_and_scale_format: Format, + ) -> Self { + self.inner.displacement_bias_and_scale_format = displacement_bias_and_scale_format; + self + } + #[inline] + pub fn displacement_vector_format(mut self, displacement_vector_format: Format) -> Self { + self.inner.displacement_vector_format = displacement_vector_format; + self + } + #[inline] + pub fn displacement_bias_and_scale_buffer( + mut self, + displacement_bias_and_scale_buffer: DeviceOrHostAddressConstKHR, + ) -> Self { + self.inner.displacement_bias_and_scale_buffer = displacement_bias_and_scale_buffer; + self + } + #[inline] + pub fn displacement_bias_and_scale_stride( + mut self, + displacement_bias_and_scale_stride: DeviceSize, + ) -> Self { + self.inner.displacement_bias_and_scale_stride = displacement_bias_and_scale_stride; + self + } + #[inline] + pub fn displacement_vector_buffer( + mut self, + displacement_vector_buffer: DeviceOrHostAddressConstKHR, + ) -> Self { + self.inner.displacement_vector_buffer = displacement_vector_buffer; + self + } + #[inline] + pub fn displacement_vector_stride(mut self, displacement_vector_stride: DeviceSize) -> Self { + self.inner.displacement_vector_stride = displacement_vector_stride; + self + } + #[inline] + pub fn displaced_micromap_primitive_flags( + mut self, + displaced_micromap_primitive_flags: DeviceOrHostAddressConstKHR, + ) -> Self { + self.inner.displaced_micromap_primitive_flags = displaced_micromap_primitive_flags; + self + } + #[inline] + pub fn displaced_micromap_primitive_flags_stride( + mut self, + displaced_micromap_primitive_flags_stride: DeviceSize, + ) -> Self { + self.inner.displaced_micromap_primitive_flags_stride = + displaced_micromap_primitive_flags_stride; + self + } + #[inline] + pub fn index_type(mut self, index_type: IndexType) -> Self { + self.inner.index_type = index_type; + self + } + #[inline] + pub fn index_buffer(mut self, index_buffer: DeviceOrHostAddressConstKHR) -> Self { + self.inner.index_buffer = index_buffer; + self + } + #[inline] + pub fn index_stride(mut self, index_stride: DeviceSize) -> Self { + self.inner.index_stride = index_stride; + self + } + #[inline] + pub fn base_triangle(mut self, base_triangle: u32) -> Self { + self.inner.base_triangle = base_triangle; + self + } + #[inline] + pub fn usage_counts(mut self, usage_counts: &'a [MicromapUsageEXT]) -> Self { + self.inner.usage_counts_count = usage_counts.len() as _; + self.inner.p_usage_counts = usage_counts.as_ptr(); + self + } + #[inline] + pub fn usage_counts_ptrs(mut self, usage_counts_ptrs: &'a [&'a MicromapUsageEXT]) -> Self { + self.inner.usage_counts_count = usage_counts_ptrs.len() as _; + self.inner.pp_usage_counts = usage_counts_ptrs.as_ptr().cast(); + self + } + #[inline] + pub fn micromap(mut self, micromap: MicromapEXT) -> Self { + self.inner.micromap = micromap; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> AccelerationStructureTrianglesDisplacementMicromapNV { + self.inner + } +} +#[repr(C)] +#[cfg_attr(feature = "debug", derive(Debug))] +#[derive(Copy, Clone)] #[doc = ""] pub struct PipelinePropertiesIdentifierEXT { pub s_type: StructureType, @@ -71870,6 +72320,8 @@ pub struct DeviceFaultVendorBinaryHeaderVersionOneEXT { pub application_name_offset: u32, pub application_version: u32, pub engine_name_offset: u32, + pub engine_version: u32, + pub api_version: u32, } impl ::std::default::Default for DeviceFaultVendorBinaryHeaderVersionOneEXT { #[inline] @@ -71884,6 +72336,8 @@ impl ::std::default::Default for DeviceFaultVendorBinaryHeaderVersionOneEXT { application_name_offset: u32::default(), application_version: u32::default(), engine_name_offset: u32::default(), + engine_version: u32::default(), + api_version: u32::default(), } } } @@ -71960,6 +72414,16 @@ impl<'a> DeviceFaultVendorBinaryHeaderVersionOneEXTBuilder<'a> { self.inner.engine_name_offset = engine_name_offset; self } + #[inline] + pub fn engine_version(mut self, engine_version: u32) -> Self { + self.inner.engine_version = engine_version; + self + } + #[inline] + pub fn api_version(mut self, api_version: u32) -> Self { + self.inner.api_version = api_version; + self + } #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] #[doc = r" so references to builders can be passed directly to Vulkan functions."] @@ -71969,6 +72433,79 @@ impl<'a> DeviceFaultVendorBinaryHeaderVersionOneEXTBuilder<'a> { } #[repr(C)] #[cfg_attr(feature = "debug", derive(Debug))] +#[derive(Copy, Clone)] +#[doc = ""] +pub struct PhysicalDevicePipelineLibraryGroupHandlesFeaturesEXT { + pub s_type: StructureType, + pub p_next: *mut c_void, + pub pipeline_library_group_handles: Bool32, +} +impl ::std::default::Default for PhysicalDevicePipelineLibraryGroupHandlesFeaturesEXT { + #[inline] + fn default() -> Self { + Self { + s_type: Self::STRUCTURE_TYPE, + p_next: ::std::ptr::null_mut(), + pipeline_library_group_handles: Bool32::default(), + } + } +} +unsafe impl TaggedStructure for PhysicalDevicePipelineLibraryGroupHandlesFeaturesEXT { + const STRUCTURE_TYPE: StructureType = + StructureType::PHYSICAL_DEVICE_PIPELINE_LIBRARY_GROUP_HANDLES_FEATURES_EXT; +} +impl PhysicalDevicePipelineLibraryGroupHandlesFeaturesEXT { + pub fn builder<'a>() -> PhysicalDevicePipelineLibraryGroupHandlesFeaturesEXTBuilder<'a> { + PhysicalDevicePipelineLibraryGroupHandlesFeaturesEXTBuilder { + inner: Self::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct PhysicalDevicePipelineLibraryGroupHandlesFeaturesEXTBuilder<'a> { + inner: PhysicalDevicePipelineLibraryGroupHandlesFeaturesEXT, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsPhysicalDeviceFeatures2 + for PhysicalDevicePipelineLibraryGroupHandlesFeaturesEXTBuilder<'_> +{ +} +unsafe impl ExtendsPhysicalDeviceFeatures2 + for PhysicalDevicePipelineLibraryGroupHandlesFeaturesEXT +{ +} +unsafe impl ExtendsDeviceCreateInfo + for PhysicalDevicePipelineLibraryGroupHandlesFeaturesEXTBuilder<'_> +{ +} +unsafe impl ExtendsDeviceCreateInfo for PhysicalDevicePipelineLibraryGroupHandlesFeaturesEXT {} +impl<'a> ::std::ops::Deref for PhysicalDevicePipelineLibraryGroupHandlesFeaturesEXTBuilder<'a> { + type Target = PhysicalDevicePipelineLibraryGroupHandlesFeaturesEXT; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for PhysicalDevicePipelineLibraryGroupHandlesFeaturesEXTBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> PhysicalDevicePipelineLibraryGroupHandlesFeaturesEXTBuilder<'a> { + #[inline] + pub fn pipeline_library_group_handles(mut self, pipeline_library_group_handles: bool) -> Self { + self.inner.pipeline_library_group_handles = pipeline_library_group_handles.into(); + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PhysicalDevicePipelineLibraryGroupHandlesFeaturesEXT { + self.inner + } +} +#[repr(C)] +#[cfg_attr(feature = "debug", derive(Debug))] #[derive(Copy, Clone, Default)] #[doc = ""] pub struct DecompressMemoryRegionNV { @@ -72187,6 +72724,87 @@ impl<'a> PhysicalDeviceShaderCoreBuiltinsFeaturesARMBuilder<'a> { #[repr(C)] #[cfg_attr(feature = "debug", derive(Debug))] #[derive(Copy, Clone)] +#[doc = ""] +pub struct PhysicalDeviceDynamicRenderingUnusedAttachmentsFeaturesEXT { + pub s_type: StructureType, + pub p_next: *mut c_void, + pub dynamic_rendering_unused_attachments: Bool32, +} +impl ::std::default::Default for PhysicalDeviceDynamicRenderingUnusedAttachmentsFeaturesEXT { + #[inline] + fn default() -> Self { + Self { + s_type: Self::STRUCTURE_TYPE, + p_next: ::std::ptr::null_mut(), + dynamic_rendering_unused_attachments: Bool32::default(), + } + } +} +unsafe impl TaggedStructure for PhysicalDeviceDynamicRenderingUnusedAttachmentsFeaturesEXT { + const STRUCTURE_TYPE: StructureType = + StructureType::PHYSICAL_DEVICE_DYNAMIC_RENDERING_UNUSED_ATTACHMENTS_FEATURES_EXT; +} +impl PhysicalDeviceDynamicRenderingUnusedAttachmentsFeaturesEXT { + pub fn builder<'a>() -> PhysicalDeviceDynamicRenderingUnusedAttachmentsFeaturesEXTBuilder<'a> { + PhysicalDeviceDynamicRenderingUnusedAttachmentsFeaturesEXTBuilder { + inner: Self::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct PhysicalDeviceDynamicRenderingUnusedAttachmentsFeaturesEXTBuilder<'a> { + inner: PhysicalDeviceDynamicRenderingUnusedAttachmentsFeaturesEXT, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsPhysicalDeviceFeatures2 + for PhysicalDeviceDynamicRenderingUnusedAttachmentsFeaturesEXTBuilder<'_> +{ +} +unsafe impl ExtendsPhysicalDeviceFeatures2 + for PhysicalDeviceDynamicRenderingUnusedAttachmentsFeaturesEXT +{ +} +unsafe impl ExtendsDeviceCreateInfo + for PhysicalDeviceDynamicRenderingUnusedAttachmentsFeaturesEXTBuilder<'_> +{ +} +unsafe impl ExtendsDeviceCreateInfo for PhysicalDeviceDynamicRenderingUnusedAttachmentsFeaturesEXT {} +impl<'a> ::std::ops::Deref + for PhysicalDeviceDynamicRenderingUnusedAttachmentsFeaturesEXTBuilder<'a> +{ + type Target = PhysicalDeviceDynamicRenderingUnusedAttachmentsFeaturesEXT; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut + for PhysicalDeviceDynamicRenderingUnusedAttachmentsFeaturesEXTBuilder<'a> +{ + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> PhysicalDeviceDynamicRenderingUnusedAttachmentsFeaturesEXTBuilder<'a> { + #[inline] + pub fn dynamic_rendering_unused_attachments( + mut self, + dynamic_rendering_unused_attachments: bool, + ) -> Self { + self.inner.dynamic_rendering_unused_attachments = + dynamic_rendering_unused_attachments.into(); + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PhysicalDeviceDynamicRenderingUnusedAttachmentsFeaturesEXT { + self.inner + } +} +#[repr(C)] +#[cfg_attr(feature = "debug", derive(Debug))] +#[derive(Copy, Clone)] #[doc = ""] pub struct SurfacePresentModeEXT { pub s_type: StructureType, @@ -72480,7 +73098,7 @@ impl<'a> PhysicalDeviceSwapchainMaintenance1FeaturesEXTBuilder<'a> { #[doc = ""] pub struct SwapchainPresentFenceInfoEXT { pub s_type: StructureType, - pub p_next: *mut c_void, + pub p_next: *const c_void, pub swapchain_count: u32, pub p_fences: *const Fence, } @@ -72489,7 +73107,7 @@ impl ::std::default::Default for SwapchainPresentFenceInfoEXT { fn default() -> Self { Self { s_type: Self::STRUCTURE_TYPE, - p_next: ::std::ptr::null_mut(), + p_next: ::std::ptr::null(), swapchain_count: u32::default(), p_fences: ::std::ptr::null(), } @@ -72544,7 +73162,7 @@ impl<'a> SwapchainPresentFenceInfoEXTBuilder<'a> { #[doc = ""] pub struct SwapchainPresentModesCreateInfoEXT { pub s_type: StructureType, - pub p_next: *mut c_void, + pub p_next: *const c_void, pub present_mode_count: u32, pub p_present_modes: *const PresentModeKHR, } @@ -72553,7 +73171,7 @@ impl ::std::default::Default for SwapchainPresentModesCreateInfoEXT { fn default() -> Self { Self { s_type: Self::STRUCTURE_TYPE, - p_next: ::std::ptr::null_mut(), + p_next: ::std::ptr::null(), present_mode_count: u32::default(), p_present_modes: ::std::ptr::null(), } @@ -72608,7 +73226,7 @@ impl<'a> SwapchainPresentModesCreateInfoEXTBuilder<'a> { #[doc = ""] pub struct SwapchainPresentModeInfoEXT { pub s_type: StructureType, - pub p_next: *mut c_void, + pub p_next: *const c_void, pub swapchain_count: u32, pub p_present_modes: *const PresentModeKHR, } @@ -72617,7 +73235,7 @@ impl ::std::default::Default for SwapchainPresentModeInfoEXT { fn default() -> Self { Self { s_type: Self::STRUCTURE_TYPE, - p_next: ::std::ptr::null_mut(), + p_next: ::std::ptr::null(), swapchain_count: u32::default(), p_present_modes: ::std::ptr::null(), } @@ -73176,3 +73794,955 @@ impl<'a> PhysicalDeviceMultiviewPerViewViewportsFeaturesQCOMBuilder<'a> { self.inner } } +#[repr(C)] +#[cfg_attr(feature = "debug", derive(Debug))] +#[derive(Copy, Clone)] +#[doc = ""] +pub struct PhysicalDeviceRayTracingPositionFetchFeaturesKHR { + pub s_type: StructureType, + pub p_next: *mut c_void, + pub ray_tracing_position_fetch: Bool32, +} +impl ::std::default::Default for PhysicalDeviceRayTracingPositionFetchFeaturesKHR { + #[inline] + fn default() -> Self { + Self { + s_type: Self::STRUCTURE_TYPE, + p_next: ::std::ptr::null_mut(), + ray_tracing_position_fetch: Bool32::default(), + } + } +} +unsafe impl TaggedStructure for PhysicalDeviceRayTracingPositionFetchFeaturesKHR { + const STRUCTURE_TYPE: StructureType = + StructureType::PHYSICAL_DEVICE_RAY_TRACING_POSITION_FETCH_FEATURES_KHR; +} +impl PhysicalDeviceRayTracingPositionFetchFeaturesKHR { + pub fn builder<'a>() -> PhysicalDeviceRayTracingPositionFetchFeaturesKHRBuilder<'a> { + PhysicalDeviceRayTracingPositionFetchFeaturesKHRBuilder { + inner: Self::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct PhysicalDeviceRayTracingPositionFetchFeaturesKHRBuilder<'a> { + inner: PhysicalDeviceRayTracingPositionFetchFeaturesKHR, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsPhysicalDeviceFeatures2 + for PhysicalDeviceRayTracingPositionFetchFeaturesKHRBuilder<'_> +{ +} +unsafe impl ExtendsPhysicalDeviceFeatures2 for PhysicalDeviceRayTracingPositionFetchFeaturesKHR {} +unsafe impl ExtendsDeviceCreateInfo + for PhysicalDeviceRayTracingPositionFetchFeaturesKHRBuilder<'_> +{ +} +unsafe impl ExtendsDeviceCreateInfo for PhysicalDeviceRayTracingPositionFetchFeaturesKHR {} +impl<'a> ::std::ops::Deref for PhysicalDeviceRayTracingPositionFetchFeaturesKHRBuilder<'a> { + type Target = PhysicalDeviceRayTracingPositionFetchFeaturesKHR; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for PhysicalDeviceRayTracingPositionFetchFeaturesKHRBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> PhysicalDeviceRayTracingPositionFetchFeaturesKHRBuilder<'a> { + #[inline] + pub fn ray_tracing_position_fetch(mut self, ray_tracing_position_fetch: bool) -> Self { + self.inner.ray_tracing_position_fetch = ray_tracing_position_fetch.into(); + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PhysicalDeviceRayTracingPositionFetchFeaturesKHR { + self.inner + } +} +#[repr(C)] +#[cfg_attr(feature = "debug", derive(Debug))] +#[derive(Copy, Clone)] +#[doc = ""] +pub struct PhysicalDeviceShaderCorePropertiesARM { + pub s_type: StructureType, + pub p_next: *mut c_void, + pub pixel_rate: u32, + pub texel_rate: u32, + pub fma_rate: u32, +} +impl ::std::default::Default for PhysicalDeviceShaderCorePropertiesARM { + #[inline] + fn default() -> Self { + Self { + s_type: Self::STRUCTURE_TYPE, + p_next: ::std::ptr::null_mut(), + pixel_rate: u32::default(), + texel_rate: u32::default(), + fma_rate: u32::default(), + } + } +} +unsafe impl TaggedStructure for PhysicalDeviceShaderCorePropertiesARM { + const STRUCTURE_TYPE: StructureType = StructureType::PHYSICAL_DEVICE_SHADER_CORE_PROPERTIES_ARM; +} +impl PhysicalDeviceShaderCorePropertiesARM { + pub fn builder<'a>() -> PhysicalDeviceShaderCorePropertiesARMBuilder<'a> { + PhysicalDeviceShaderCorePropertiesARMBuilder { + inner: Self::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct PhysicalDeviceShaderCorePropertiesARMBuilder<'a> { + inner: PhysicalDeviceShaderCorePropertiesARM, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsPhysicalDeviceProperties2 for PhysicalDeviceShaderCorePropertiesARMBuilder<'_> {} +unsafe impl ExtendsPhysicalDeviceProperties2 for PhysicalDeviceShaderCorePropertiesARM {} +impl<'a> ::std::ops::Deref for PhysicalDeviceShaderCorePropertiesARMBuilder<'a> { + type Target = PhysicalDeviceShaderCorePropertiesARM; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for PhysicalDeviceShaderCorePropertiesARMBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> PhysicalDeviceShaderCorePropertiesARMBuilder<'a> { + #[inline] + pub fn pixel_rate(mut self, pixel_rate: u32) -> Self { + self.inner.pixel_rate = pixel_rate; + self + } + #[inline] + pub fn texel_rate(mut self, texel_rate: u32) -> Self { + self.inner.texel_rate = texel_rate; + self + } + #[inline] + pub fn fma_rate(mut self, fma_rate: u32) -> Self { + self.inner.fma_rate = fma_rate; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PhysicalDeviceShaderCorePropertiesARM { + self.inner + } +} +#[repr(C)] +#[cfg_attr(feature = "debug", derive(Debug))] +#[derive(Copy, Clone)] +#[doc = ""] +pub struct PhysicalDeviceMultiviewPerViewRenderAreasFeaturesQCOM { + pub s_type: StructureType, + pub p_next: *mut c_void, + pub multiview_per_view_render_areas: Bool32, +} +impl ::std::default::Default for PhysicalDeviceMultiviewPerViewRenderAreasFeaturesQCOM { + #[inline] + fn default() -> Self { + Self { + s_type: Self::STRUCTURE_TYPE, + p_next: ::std::ptr::null_mut(), + multiview_per_view_render_areas: Bool32::default(), + } + } +} +unsafe impl TaggedStructure for PhysicalDeviceMultiviewPerViewRenderAreasFeaturesQCOM { + const STRUCTURE_TYPE: StructureType = + StructureType::PHYSICAL_DEVICE_MULTIVIEW_PER_VIEW_RENDER_AREAS_FEATURES_QCOM; +} +impl PhysicalDeviceMultiviewPerViewRenderAreasFeaturesQCOM { + pub fn builder<'a>() -> PhysicalDeviceMultiviewPerViewRenderAreasFeaturesQCOMBuilder<'a> { + PhysicalDeviceMultiviewPerViewRenderAreasFeaturesQCOMBuilder { + inner: Self::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct PhysicalDeviceMultiviewPerViewRenderAreasFeaturesQCOMBuilder<'a> { + inner: PhysicalDeviceMultiviewPerViewRenderAreasFeaturesQCOM, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsPhysicalDeviceFeatures2 + for PhysicalDeviceMultiviewPerViewRenderAreasFeaturesQCOMBuilder<'_> +{ +} +unsafe impl ExtendsPhysicalDeviceFeatures2 + for PhysicalDeviceMultiviewPerViewRenderAreasFeaturesQCOM +{ +} +unsafe impl ExtendsDeviceCreateInfo + for PhysicalDeviceMultiviewPerViewRenderAreasFeaturesQCOMBuilder<'_> +{ +} +unsafe impl ExtendsDeviceCreateInfo for PhysicalDeviceMultiviewPerViewRenderAreasFeaturesQCOM {} +impl<'a> ::std::ops::Deref for PhysicalDeviceMultiviewPerViewRenderAreasFeaturesQCOMBuilder<'a> { + type Target = PhysicalDeviceMultiviewPerViewRenderAreasFeaturesQCOM; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for PhysicalDeviceMultiviewPerViewRenderAreasFeaturesQCOMBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> PhysicalDeviceMultiviewPerViewRenderAreasFeaturesQCOMBuilder<'a> { + #[inline] + pub fn multiview_per_view_render_areas( + mut self, + multiview_per_view_render_areas: bool, + ) -> Self { + self.inner.multiview_per_view_render_areas = multiview_per_view_render_areas.into(); + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PhysicalDeviceMultiviewPerViewRenderAreasFeaturesQCOM { + self.inner + } +} +#[repr(C)] +#[cfg_attr(feature = "debug", derive(Debug))] +#[derive(Copy, Clone)] +#[doc = ""] +pub struct MultiviewPerViewRenderAreasRenderPassBeginInfoQCOM { + pub s_type: StructureType, + pub p_next: *const c_void, + pub per_view_render_area_count: u32, + pub p_per_view_render_areas: *const Rect2D, +} +impl ::std::default::Default for MultiviewPerViewRenderAreasRenderPassBeginInfoQCOM { + #[inline] + fn default() -> Self { + Self { + s_type: Self::STRUCTURE_TYPE, + p_next: ::std::ptr::null(), + per_view_render_area_count: u32::default(), + p_per_view_render_areas: ::std::ptr::null(), + } + } +} +unsafe impl TaggedStructure for MultiviewPerViewRenderAreasRenderPassBeginInfoQCOM { + const STRUCTURE_TYPE: StructureType = + StructureType::MULTIVIEW_PER_VIEW_RENDER_AREAS_RENDER_PASS_BEGIN_INFO_QCOM; +} +impl MultiviewPerViewRenderAreasRenderPassBeginInfoQCOM { + pub fn builder<'a>() -> MultiviewPerViewRenderAreasRenderPassBeginInfoQCOMBuilder<'a> { + MultiviewPerViewRenderAreasRenderPassBeginInfoQCOMBuilder { + inner: Self::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct MultiviewPerViewRenderAreasRenderPassBeginInfoQCOMBuilder<'a> { + inner: MultiviewPerViewRenderAreasRenderPassBeginInfoQCOM, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsRenderPassBeginInfo + for MultiviewPerViewRenderAreasRenderPassBeginInfoQCOMBuilder<'_> +{ +} +unsafe impl ExtendsRenderPassBeginInfo for MultiviewPerViewRenderAreasRenderPassBeginInfoQCOM {} +unsafe impl ExtendsRenderingInfo for MultiviewPerViewRenderAreasRenderPassBeginInfoQCOMBuilder<'_> {} +unsafe impl ExtendsRenderingInfo for MultiviewPerViewRenderAreasRenderPassBeginInfoQCOM {} +impl<'a> ::std::ops::Deref for MultiviewPerViewRenderAreasRenderPassBeginInfoQCOMBuilder<'a> { + type Target = MultiviewPerViewRenderAreasRenderPassBeginInfoQCOM; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for MultiviewPerViewRenderAreasRenderPassBeginInfoQCOMBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> MultiviewPerViewRenderAreasRenderPassBeginInfoQCOMBuilder<'a> { + #[inline] + pub fn per_view_render_areas(mut self, per_view_render_areas: &'a [Rect2D]) -> Self { + self.inner.per_view_render_area_count = per_view_render_areas.len() as _; + self.inner.p_per_view_render_areas = per_view_render_areas.as_ptr(); + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> MultiviewPerViewRenderAreasRenderPassBeginInfoQCOM { + self.inner + } +} +#[repr(C)] +#[cfg_attr(feature = "debug", derive(Debug))] +#[derive(Copy, Clone)] +#[doc = ""] +pub struct QueryLowLatencySupportNV { + pub s_type: StructureType, + pub p_next: *const c_void, + pub p_queried_low_latency_data: *mut c_void, +} +impl ::std::default::Default for QueryLowLatencySupportNV { + #[inline] + fn default() -> Self { + Self { + s_type: Self::STRUCTURE_TYPE, + p_next: ::std::ptr::null(), + p_queried_low_latency_data: ::std::ptr::null_mut(), + } + } +} +unsafe impl TaggedStructure for QueryLowLatencySupportNV { + const STRUCTURE_TYPE: StructureType = StructureType::QUERY_LOW_LATENCY_SUPPORT_NV; +} +impl QueryLowLatencySupportNV { + pub fn builder<'a>() -> QueryLowLatencySupportNVBuilder<'a> { + QueryLowLatencySupportNVBuilder { + inner: Self::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct QueryLowLatencySupportNVBuilder<'a> { + inner: QueryLowLatencySupportNV, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsSemaphoreCreateInfo for QueryLowLatencySupportNVBuilder<'_> {} +unsafe impl ExtendsSemaphoreCreateInfo for QueryLowLatencySupportNV {} +impl<'a> ::std::ops::Deref for QueryLowLatencySupportNVBuilder<'a> { + type Target = QueryLowLatencySupportNV; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for QueryLowLatencySupportNVBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> QueryLowLatencySupportNVBuilder<'a> { + #[inline] + pub fn queried_low_latency_data(mut self, queried_low_latency_data: *mut c_void) -> Self { + self.inner.p_queried_low_latency_data = queried_low_latency_data; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> QueryLowLatencySupportNV { + self.inner + } +} +#[repr(C)] +#[cfg_attr(feature = "debug", derive(Debug))] +#[derive(Copy, Clone)] +#[doc = ""] +pub struct MemoryMapInfoKHR { + pub s_type: StructureType, + pub p_next: *const c_void, + pub flags: MemoryMapFlags, + pub memory: DeviceMemory, + pub offset: DeviceSize, + pub size: DeviceSize, +} +impl ::std::default::Default for MemoryMapInfoKHR { + #[inline] + fn default() -> Self { + Self { + s_type: Self::STRUCTURE_TYPE, + p_next: ::std::ptr::null(), + flags: MemoryMapFlags::default(), + memory: DeviceMemory::default(), + offset: DeviceSize::default(), + size: DeviceSize::default(), + } + } +} +unsafe impl TaggedStructure for MemoryMapInfoKHR { + const STRUCTURE_TYPE: StructureType = StructureType::MEMORY_MAP_INFO_KHR; +} +impl MemoryMapInfoKHR { + pub fn builder<'a>() -> MemoryMapInfoKHRBuilder<'a> { + MemoryMapInfoKHRBuilder { + inner: Self::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct MemoryMapInfoKHRBuilder<'a> { + inner: MemoryMapInfoKHR, + marker: ::std::marker::PhantomData<&'a ()>, +} +impl<'a> ::std::ops::Deref for MemoryMapInfoKHRBuilder<'a> { + type Target = MemoryMapInfoKHR; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for MemoryMapInfoKHRBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> MemoryMapInfoKHRBuilder<'a> { + #[inline] + pub fn flags(mut self, flags: MemoryMapFlags) -> Self { + self.inner.flags = flags; + self + } + #[inline] + pub fn memory(mut self, memory: DeviceMemory) -> Self { + self.inner.memory = memory; + self + } + #[inline] + pub fn offset(mut self, offset: DeviceSize) -> Self { + self.inner.offset = offset; + self + } + #[inline] + pub fn size(mut self, size: DeviceSize) -> Self { + self.inner.size = size; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> MemoryMapInfoKHR { + self.inner + } +} +#[repr(C)] +#[cfg_attr(feature = "debug", derive(Debug))] +#[derive(Copy, Clone)] +#[doc = ""] +pub struct MemoryUnmapInfoKHR { + pub s_type: StructureType, + pub p_next: *const c_void, + pub flags: MemoryUnmapFlagsKHR, + pub memory: DeviceMemory, +} +impl ::std::default::Default for MemoryUnmapInfoKHR { + #[inline] + fn default() -> Self { + Self { + s_type: Self::STRUCTURE_TYPE, + p_next: ::std::ptr::null(), + flags: MemoryUnmapFlagsKHR::default(), + memory: DeviceMemory::default(), + } + } +} +unsafe impl TaggedStructure for MemoryUnmapInfoKHR { + const STRUCTURE_TYPE: StructureType = StructureType::MEMORY_UNMAP_INFO_KHR; +} +impl MemoryUnmapInfoKHR { + pub fn builder<'a>() -> MemoryUnmapInfoKHRBuilder<'a> { + MemoryUnmapInfoKHRBuilder { + inner: Self::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct MemoryUnmapInfoKHRBuilder<'a> { + inner: MemoryUnmapInfoKHR, + marker: ::std::marker::PhantomData<&'a ()>, +} +impl<'a> ::std::ops::Deref for MemoryUnmapInfoKHRBuilder<'a> { + type Target = MemoryUnmapInfoKHR; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for MemoryUnmapInfoKHRBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> MemoryUnmapInfoKHRBuilder<'a> { + #[inline] + pub fn flags(mut self, flags: MemoryUnmapFlagsKHR) -> Self { + self.inner.flags = flags; + self + } + #[inline] + pub fn memory(mut self, memory: DeviceMemory) -> Self { + self.inner.memory = memory; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> MemoryUnmapInfoKHR { + self.inner + } +} +#[repr(C)] +#[cfg_attr(feature = "debug", derive(Debug))] +#[derive(Copy, Clone)] +#[doc = ""] +pub struct PhysicalDeviceShaderObjectFeaturesEXT { + pub s_type: StructureType, + pub p_next: *mut c_void, + pub shader_object: Bool32, +} +impl ::std::default::Default for PhysicalDeviceShaderObjectFeaturesEXT { + #[inline] + fn default() -> Self { + Self { + s_type: Self::STRUCTURE_TYPE, + p_next: ::std::ptr::null_mut(), + shader_object: Bool32::default(), + } + } +} +unsafe impl TaggedStructure for PhysicalDeviceShaderObjectFeaturesEXT { + const STRUCTURE_TYPE: StructureType = StructureType::PHYSICAL_DEVICE_SHADER_OBJECT_FEATURES_EXT; +} +impl PhysicalDeviceShaderObjectFeaturesEXT { + pub fn builder<'a>() -> PhysicalDeviceShaderObjectFeaturesEXTBuilder<'a> { + PhysicalDeviceShaderObjectFeaturesEXTBuilder { + inner: Self::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct PhysicalDeviceShaderObjectFeaturesEXTBuilder<'a> { + inner: PhysicalDeviceShaderObjectFeaturesEXT, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsPhysicalDeviceFeatures2 for PhysicalDeviceShaderObjectFeaturesEXTBuilder<'_> {} +unsafe impl ExtendsPhysicalDeviceFeatures2 for PhysicalDeviceShaderObjectFeaturesEXT {} +unsafe impl ExtendsDeviceCreateInfo for PhysicalDeviceShaderObjectFeaturesEXTBuilder<'_> {} +unsafe impl ExtendsDeviceCreateInfo for PhysicalDeviceShaderObjectFeaturesEXT {} +impl<'a> ::std::ops::Deref for PhysicalDeviceShaderObjectFeaturesEXTBuilder<'a> { + type Target = PhysicalDeviceShaderObjectFeaturesEXT; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for PhysicalDeviceShaderObjectFeaturesEXTBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> PhysicalDeviceShaderObjectFeaturesEXTBuilder<'a> { + #[inline] + pub fn shader_object(mut self, shader_object: bool) -> Self { + self.inner.shader_object = shader_object.into(); + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PhysicalDeviceShaderObjectFeaturesEXT { + self.inner + } +} +#[repr(C)] +#[cfg_attr(feature = "debug", derive(Debug))] +#[derive(Copy, Clone)] +#[doc = ""] +pub struct PhysicalDeviceShaderObjectPropertiesEXT { + pub s_type: StructureType, + pub p_next: *mut c_void, + pub shader_binary_uuid: [u8; UUID_SIZE], + pub shader_binary_version: u32, +} +impl ::std::default::Default for PhysicalDeviceShaderObjectPropertiesEXT { + #[inline] + fn default() -> Self { + Self { + s_type: Self::STRUCTURE_TYPE, + p_next: ::std::ptr::null_mut(), + shader_binary_uuid: unsafe { ::std::mem::zeroed() }, + shader_binary_version: u32::default(), + } + } +} +unsafe impl TaggedStructure for PhysicalDeviceShaderObjectPropertiesEXT { + const STRUCTURE_TYPE: StructureType = + StructureType::PHYSICAL_DEVICE_SHADER_OBJECT_PROPERTIES_EXT; +} +impl PhysicalDeviceShaderObjectPropertiesEXT { + pub fn builder<'a>() -> PhysicalDeviceShaderObjectPropertiesEXTBuilder<'a> { + PhysicalDeviceShaderObjectPropertiesEXTBuilder { + inner: Self::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct PhysicalDeviceShaderObjectPropertiesEXTBuilder<'a> { + inner: PhysicalDeviceShaderObjectPropertiesEXT, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsPhysicalDeviceProperties2 + for PhysicalDeviceShaderObjectPropertiesEXTBuilder<'_> +{ +} +unsafe impl ExtendsPhysicalDeviceProperties2 for PhysicalDeviceShaderObjectPropertiesEXT {} +impl<'a> ::std::ops::Deref for PhysicalDeviceShaderObjectPropertiesEXTBuilder<'a> { + type Target = PhysicalDeviceShaderObjectPropertiesEXT; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for PhysicalDeviceShaderObjectPropertiesEXTBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> PhysicalDeviceShaderObjectPropertiesEXTBuilder<'a> { + #[inline] + pub fn shader_binary_uuid(mut self, shader_binary_uuid: [u8; UUID_SIZE]) -> Self { + self.inner.shader_binary_uuid = shader_binary_uuid; + self + } + #[inline] + pub fn shader_binary_version(mut self, shader_binary_version: u32) -> Self { + self.inner.shader_binary_version = shader_binary_version; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PhysicalDeviceShaderObjectPropertiesEXT { + self.inner + } +} +#[repr(C)] +#[cfg_attr(feature = "debug", derive(Debug))] +#[derive(Copy, Clone)] +#[doc = ""] +pub struct ShaderCreateInfoEXT { + pub s_type: StructureType, + pub p_next: *const c_void, + pub flags: ShaderCreateFlagsEXT, + pub stage: ShaderStageFlags, + pub next_stage: ShaderStageFlags, + pub code_type: ShaderCodeTypeEXT, + pub code_size: usize, + pub p_code: *const c_void, + pub p_name: *const c_char, + pub set_layout_count: u32, + pub p_set_layouts: *const DescriptorSetLayout, + pub push_constant_range_count: u32, + pub p_push_constant_ranges: *const PushConstantRange, + pub p_specialization_info: *const SpecializationInfo, +} +impl ::std::default::Default for ShaderCreateInfoEXT { + #[inline] + fn default() -> Self { + Self { + s_type: Self::STRUCTURE_TYPE, + p_next: ::std::ptr::null(), + flags: ShaderCreateFlagsEXT::default(), + stage: ShaderStageFlags::default(), + next_stage: ShaderStageFlags::default(), + code_type: ShaderCodeTypeEXT::default(), + code_size: usize::default(), + p_code: ::std::ptr::null(), + p_name: ::std::ptr::null(), + set_layout_count: u32::default(), + p_set_layouts: ::std::ptr::null(), + push_constant_range_count: u32::default(), + p_push_constant_ranges: ::std::ptr::null(), + p_specialization_info: ::std::ptr::null(), + } + } +} +unsafe impl TaggedStructure for ShaderCreateInfoEXT { + const STRUCTURE_TYPE: StructureType = StructureType::SHADER_CREATE_INFO_EXT; +} +impl ShaderCreateInfoEXT { + pub fn builder<'a>() -> ShaderCreateInfoEXTBuilder<'a> { + ShaderCreateInfoEXTBuilder { + inner: Self::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct ShaderCreateInfoEXTBuilder<'a> { + inner: ShaderCreateInfoEXT, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsShaderCreateInfoEXT {} +impl<'a> ::std::ops::Deref for ShaderCreateInfoEXTBuilder<'a> { + type Target = ShaderCreateInfoEXT; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for ShaderCreateInfoEXTBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> ShaderCreateInfoEXTBuilder<'a> { + #[inline] + pub fn flags(mut self, flags: ShaderCreateFlagsEXT) -> Self { + self.inner.flags = flags; + self + } + #[inline] + pub fn stage(mut self, stage: ShaderStageFlags) -> Self { + self.inner.stage = stage; + self + } + #[inline] + pub fn next_stage(mut self, next_stage: ShaderStageFlags) -> Self { + self.inner.next_stage = next_stage; + self + } + #[inline] + pub fn code_type(mut self, code_type: ShaderCodeTypeEXT) -> Self { + self.inner.code_type = code_type; + self + } + #[inline] + pub fn code(mut self, code: &'a [u8]) -> Self { + self.inner.code_size = code.len(); + self.inner.p_code = code.as_ptr().cast(); + self + } + #[inline] + pub fn name(mut self, name: &'a ::std::ffi::CStr) -> Self { + self.inner.p_name = name.as_ptr(); + self + } + #[inline] + pub fn set_layouts(mut self, set_layouts: &'a [DescriptorSetLayout]) -> Self { + self.inner.set_layout_count = set_layouts.len() as _; + self.inner.p_set_layouts = set_layouts.as_ptr(); + self + } + #[inline] + pub fn push_constant_ranges(mut self, push_constant_ranges: &'a [PushConstantRange]) -> Self { + self.inner.push_constant_range_count = push_constant_ranges.len() as _; + self.inner.p_push_constant_ranges = push_constant_ranges.as_ptr(); + self + } + #[inline] + pub fn specialization_info(mut self, specialization_info: &'a SpecializationInfo) -> Self { + self.inner.p_specialization_info = specialization_info; + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next(mut self, next: &'a mut T) -> Self { + unsafe { + let next_ptr = <*const T>::cast(next); + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> ShaderCreateInfoEXT { + self.inner + } +} +#[repr(C)] +#[cfg_attr(feature = "debug", derive(Debug))] +#[derive(Copy, Clone)] +#[doc = ""] +pub struct PhysicalDeviceShaderTileImageFeaturesEXT { + pub s_type: StructureType, + pub p_next: *mut c_void, + pub shader_tile_image_color_read_access: Bool32, + pub shader_tile_image_depth_read_access: Bool32, + pub shader_tile_image_stencil_read_access: Bool32, +} +impl ::std::default::Default for PhysicalDeviceShaderTileImageFeaturesEXT { + #[inline] + fn default() -> Self { + Self { + s_type: Self::STRUCTURE_TYPE, + p_next: ::std::ptr::null_mut(), + shader_tile_image_color_read_access: Bool32::default(), + shader_tile_image_depth_read_access: Bool32::default(), + shader_tile_image_stencil_read_access: Bool32::default(), + } + } +} +unsafe impl TaggedStructure for PhysicalDeviceShaderTileImageFeaturesEXT { + const STRUCTURE_TYPE: StructureType = + StructureType::PHYSICAL_DEVICE_SHADER_TILE_IMAGE_FEATURES_EXT; +} +impl PhysicalDeviceShaderTileImageFeaturesEXT { + pub fn builder<'a>() -> PhysicalDeviceShaderTileImageFeaturesEXTBuilder<'a> { + PhysicalDeviceShaderTileImageFeaturesEXTBuilder { + inner: Self::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct PhysicalDeviceShaderTileImageFeaturesEXTBuilder<'a> { + inner: PhysicalDeviceShaderTileImageFeaturesEXT, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsPhysicalDeviceFeatures2 for PhysicalDeviceShaderTileImageFeaturesEXTBuilder<'_> {} +unsafe impl ExtendsPhysicalDeviceFeatures2 for PhysicalDeviceShaderTileImageFeaturesEXT {} +unsafe impl ExtendsDeviceCreateInfo for PhysicalDeviceShaderTileImageFeaturesEXTBuilder<'_> {} +unsafe impl ExtendsDeviceCreateInfo for PhysicalDeviceShaderTileImageFeaturesEXT {} +impl<'a> ::std::ops::Deref for PhysicalDeviceShaderTileImageFeaturesEXTBuilder<'a> { + type Target = PhysicalDeviceShaderTileImageFeaturesEXT; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for PhysicalDeviceShaderTileImageFeaturesEXTBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> PhysicalDeviceShaderTileImageFeaturesEXTBuilder<'a> { + #[inline] + pub fn shader_tile_image_color_read_access( + mut self, + shader_tile_image_color_read_access: bool, + ) -> Self { + self.inner.shader_tile_image_color_read_access = shader_tile_image_color_read_access.into(); + self + } + #[inline] + pub fn shader_tile_image_depth_read_access( + mut self, + shader_tile_image_depth_read_access: bool, + ) -> Self { + self.inner.shader_tile_image_depth_read_access = shader_tile_image_depth_read_access.into(); + self + } + #[inline] + pub fn shader_tile_image_stencil_read_access( + mut self, + shader_tile_image_stencil_read_access: bool, + ) -> Self { + self.inner.shader_tile_image_stencil_read_access = + shader_tile_image_stencil_read_access.into(); + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PhysicalDeviceShaderTileImageFeaturesEXT { + self.inner + } +} +#[repr(C)] +#[cfg_attr(feature = "debug", derive(Debug))] +#[derive(Copy, Clone)] +#[doc = ""] +pub struct PhysicalDeviceShaderTileImagePropertiesEXT { + pub s_type: StructureType, + pub p_next: *mut c_void, + pub shader_tile_image_coherent_read_accelerated: Bool32, + pub shader_tile_image_read_sample_from_pixel_rate_invocation: Bool32, + pub shader_tile_image_read_from_helper_invocation: Bool32, +} +impl ::std::default::Default for PhysicalDeviceShaderTileImagePropertiesEXT { + #[inline] + fn default() -> Self { + Self { + s_type: Self::STRUCTURE_TYPE, + p_next: ::std::ptr::null_mut(), + shader_tile_image_coherent_read_accelerated: Bool32::default(), + shader_tile_image_read_sample_from_pixel_rate_invocation: Bool32::default(), + shader_tile_image_read_from_helper_invocation: Bool32::default(), + } + } +} +unsafe impl TaggedStructure for PhysicalDeviceShaderTileImagePropertiesEXT { + const STRUCTURE_TYPE: StructureType = + StructureType::PHYSICAL_DEVICE_SHADER_TILE_IMAGE_PROPERTIES_EXT; +} +impl PhysicalDeviceShaderTileImagePropertiesEXT { + pub fn builder<'a>() -> PhysicalDeviceShaderTileImagePropertiesEXTBuilder<'a> { + PhysicalDeviceShaderTileImagePropertiesEXTBuilder { + inner: Self::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct PhysicalDeviceShaderTileImagePropertiesEXTBuilder<'a> { + inner: PhysicalDeviceShaderTileImagePropertiesEXT, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsPhysicalDeviceProperties2 + for PhysicalDeviceShaderTileImagePropertiesEXTBuilder<'_> +{ +} +unsafe impl ExtendsPhysicalDeviceProperties2 for PhysicalDeviceShaderTileImagePropertiesEXT {} +impl<'a> ::std::ops::Deref for PhysicalDeviceShaderTileImagePropertiesEXTBuilder<'a> { + type Target = PhysicalDeviceShaderTileImagePropertiesEXT; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for PhysicalDeviceShaderTileImagePropertiesEXTBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> PhysicalDeviceShaderTileImagePropertiesEXTBuilder<'a> { + #[inline] + pub fn shader_tile_image_coherent_read_accelerated( + mut self, + shader_tile_image_coherent_read_accelerated: bool, + ) -> Self { + self.inner.shader_tile_image_coherent_read_accelerated = + shader_tile_image_coherent_read_accelerated.into(); + self + } + #[inline] + pub fn shader_tile_image_read_sample_from_pixel_rate_invocation( + mut self, + shader_tile_image_read_sample_from_pixel_rate_invocation: bool, + ) -> Self { + self.inner + .shader_tile_image_read_sample_from_pixel_rate_invocation = + shader_tile_image_read_sample_from_pixel_rate_invocation.into(); + self + } + #[inline] + pub fn shader_tile_image_read_from_helper_invocation( + mut self, + shader_tile_image_read_from_helper_invocation: bool, + ) -> Self { + self.inner.shader_tile_image_read_from_helper_invocation = + shader_tile_image_read_from_helper_invocation.into(); + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PhysicalDeviceShaderTileImagePropertiesEXT { + self.inner + } +} diff --git a/third_party/rust/ash/src/vk/enums.rs b/third_party/rust/ash/src/vk/enums.rs index a27a55b40c5e..6201209715f2 100644 --- a/third_party/rust/ash/src/vk/enums.rs +++ b/third_party/rust/ash/src/vk/enums.rs @@ -1744,6 +1744,8 @@ impl VendorId { pub const MESA: Self = Self(0x1_0005); #[doc = "PoCL vendor ID"] pub const POCL: Self = Self(0x1_0006); + #[doc = "Mobileye vendor ID"] + pub const MOBILEYE: Self = Self(0x1_0007); } #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] #[repr(transparent)] @@ -1808,6 +1810,8 @@ impl DriverId { pub const MESA_DOZEN: Self = Self(23); #[doc = "Mesa open source project"] pub const MESA_NVK: Self = Self(24); + #[doc = "Imagination Technologies"] + pub const IMAGINATION_OPEN_SOURCE_MESA: Self = Self(25); } #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] #[repr(transparent)] @@ -2780,6 +2784,43 @@ impl DeviceFaultVendorBinaryHeaderVersionEXT { impl DeviceFaultVendorBinaryHeaderVersionEXT { pub const ONE: Self = Self(1); } +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] +#[repr(transparent)] +#[doc = ""] +pub struct DisplacementMicromapFormatNV(pub(crate) i32); +impl DisplacementMicromapFormatNV { + #[inline] + pub const fn from_raw(x: i32) -> Self { + Self(x) + } + #[inline] + pub const fn as_raw(self) -> i32 { + self.0 + } +} +impl DisplacementMicromapFormatNV { + pub const TYPE_64_TRIANGLES_64_BYTES: Self = Self(1); + pub const TYPE_256_TRIANGLES_128_BYTES: Self = Self(2); + pub const TYPE_1024_TRIANGLES_128_BYTES: Self = Self(3); +} +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] +#[repr(transparent)] +#[doc = ""] +pub struct ShaderCodeTypeEXT(pub(crate) i32); +impl ShaderCodeTypeEXT { + #[inline] + pub const fn from_raw(x: i32) -> Self { + Self(x) + } + #[inline] + pub const fn as_raw(self) -> i32 { + self.0 + } +} +impl ShaderCodeTypeEXT { + pub const BINARY: Self = Self(0); + pub const SPIRV: Self = Self(1); +} impl fmt::Debug for ObjectType { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { let name = match *self { @@ -2828,6 +2869,7 @@ impl fmt::Debug for ObjectType { Self::BUFFER_COLLECTION_FUCHSIA => Some("BUFFER_COLLECTION_FUCHSIA"), Self::MICROMAP_EXT => Some("MICROMAP_EXT"), Self::OPTICAL_FLOW_SESSION_NV => Some("OPTICAL_FLOW_SESSION_NV"), + Self::SHADER_EXT => Some("SHADER_EXT"), Self::SAMPLER_YCBCR_CONVERSION => Some("SAMPLER_YCBCR_CONVERSION"), Self::DESCRIPTOR_UPDATE_TEMPLATE => Some("DESCRIPTOR_UPDATE_TEMPLATE"), Self::PRIVATE_DATA_SLOT => Some("PRIVATE_DATA_SLOT"), @@ -2898,7 +2940,13 @@ impl fmt::Debug for Result { Self::THREAD_DONE_KHR => Some("THREAD_DONE_KHR"), Self::OPERATION_DEFERRED_KHR => Some("OPERATION_DEFERRED_KHR"), Self::OPERATION_NOT_DEFERRED_KHR => Some("OPERATION_NOT_DEFERRED_KHR"), + Self::ERROR_INVALID_VIDEO_STD_PARAMETERS_KHR => { + Some("ERROR_INVALID_VIDEO_STD_PARAMETERS_KHR") + } Self::ERROR_COMPRESSION_EXHAUSTED_EXT => Some("ERROR_COMPRESSION_EXHAUSTED_EXT"), + Self::ERROR_INCOMPATIBLE_SHADER_BINARY_EXT => { + Some("ERROR_INCOMPATIBLE_SHADER_BINARY_EXT") + } Self::ERROR_OUT_OF_POOL_MEMORY => Some("ERROR_OUT_OF_POOL_MEMORY"), Self::ERROR_INVALID_EXTERNAL_HANDLE => Some("ERROR_INVALID_EXTERNAL_HANDLE"), Self::ERROR_FRAGMENTATION => Some("ERROR_FRAGMENTATION"), diff --git a/third_party/rust/ash/src/vk/extensions.rs b/third_party/rust/ash/src/vk/extensions.rs index cf4936d9955b..47ccd9b677d1 100644 --- a/third_party/rust/ash/src/vk/extensions.rs +++ b/third_party/rust/ash/src/vk/extensions.rs @@ -1538,7 +1538,7 @@ impl KhrSamplerMirrorClampToEdgeFn { impl SamplerAddressMode { #[doc = "Note that this defines what was previously a core enum, and so uses the 'value' attribute rather than 'offset', and does not have a suffix. This is a special case, and should not be repeated"] pub const MIRROR_CLAMP_TO_EDGE: Self = Self(4); - #[deprecated = "Alias introduced for consistency with extension suffixing rules"] + #[deprecated = "Introduced for consistency with extension suffixing rules"] pub const MIRROR_CLAMP_TO_EDGE_KHR: Self = Self::MIRROR_CLAMP_TO_EDGE; } impl ImgFilterCubicFn { @@ -3096,7 +3096,7 @@ impl ExtVideoEncodeH264Fn { pub const fn name() -> &'static ::std::ffi::CStr { unsafe { ::std::ffi::CStr::from_bytes_with_nul_unchecked(b"VK_EXT_video_encode_h264\0") } } - pub const SPEC_VERSION: u32 = 9u32; + pub const SPEC_VERSION: u32 = 10u32; } #[derive(Clone)] pub struct ExtVideoEncodeH264Fn {} @@ -3118,11 +3118,9 @@ impl StructureType { pub const VIDEO_ENCODE_H264_VCL_FRAME_INFO_EXT: Self = Self(1_000_038_003); pub const VIDEO_ENCODE_H264_DPB_SLOT_INFO_EXT: Self = Self(1_000_038_004); pub const VIDEO_ENCODE_H264_NALU_SLICE_INFO_EXT: Self = Self(1_000_038_005); - pub const VIDEO_ENCODE_H264_EMIT_PICTURE_PARAMETERS_INFO_EXT: Self = Self(1_000_038_006); pub const VIDEO_ENCODE_H264_PROFILE_INFO_EXT: Self = Self(1_000_038_007); pub const VIDEO_ENCODE_H264_RATE_CONTROL_INFO_EXT: Self = Self(1_000_038_008); pub const VIDEO_ENCODE_H264_RATE_CONTROL_LAYER_INFO_EXT: Self = Self(1_000_038_009); - pub const VIDEO_ENCODE_H264_REFERENCE_LISTS_INFO_EXT: Self = Self(1_000_038_010); } #[doc = "Generated from 'VK_EXT_video_encode_h264'"] impl VideoCodecOperationFlagsKHR { @@ -3133,7 +3131,7 @@ impl ExtVideoEncodeH265Fn { pub const fn name() -> &'static ::std::ffi::CStr { unsafe { ::std::ffi::CStr::from_bytes_with_nul_unchecked(b"VK_EXT_video_encode_h265\0") } } - pub const SPEC_VERSION: u32 = 9u32; + pub const SPEC_VERSION: u32 = 10u32; } #[derive(Clone)] pub struct ExtVideoEncodeH265Fn {} @@ -3155,9 +3153,7 @@ impl StructureType { pub const VIDEO_ENCODE_H265_VCL_FRAME_INFO_EXT: Self = Self(1_000_039_003); pub const VIDEO_ENCODE_H265_DPB_SLOT_INFO_EXT: Self = Self(1_000_039_004); pub const VIDEO_ENCODE_H265_NALU_SLICE_SEGMENT_INFO_EXT: Self = Self(1_000_039_005); - pub const VIDEO_ENCODE_H265_EMIT_PICTURE_PARAMETERS_INFO_EXT: Self = Self(1_000_039_006); pub const VIDEO_ENCODE_H265_PROFILE_INFO_EXT: Self = Self(1_000_039_007); - pub const VIDEO_ENCODE_H265_REFERENCE_LISTS_INFO_EXT: Self = Self(1_000_039_008); pub const VIDEO_ENCODE_H265_RATE_CONTROL_INFO_EXT: Self = Self(1_000_039_009); pub const VIDEO_ENCODE_H265_RATE_CONTROL_LAYER_INFO_EXT: Self = Self(1_000_039_010); } @@ -3545,25 +3541,6 @@ impl ImageCreateFlags { impl StructureType { pub const PHYSICAL_DEVICE_CORNER_SAMPLED_IMAGE_FEATURES_NV: Self = Self(1_000_050_000); } -impl NvExtension52Fn { - #[inline] - pub const fn name() -> &'static ::std::ffi::CStr { - unsafe { ::std::ffi::CStr::from_bytes_with_nul_unchecked(b"VK_NV_extension_52\0") } - } - pub const SPEC_VERSION: u32 = 0u32; -} -#[derive(Clone)] -pub struct NvExtension52Fn {} -unsafe impl Send for NvExtension52Fn {} -unsafe impl Sync for NvExtension52Fn {} -impl NvExtension52Fn { - pub fn load(mut _f: F) -> Self - where - F: FnMut(&::std::ffi::CStr) -> *const c_void, - { - Self {} - } -} impl NvExtension53Fn { #[inline] pub const fn name() -> &'static ::std::ffi::CStr { @@ -6094,6 +6071,25 @@ impl GoogleDisplayTimingFn { impl StructureType { pub const PRESENT_TIMES_INFO_GOOGLE: Self = Self(1_000_092_000); } +impl ReservedDoNotUse94Fn { + #[inline] + pub const fn name() -> &'static ::std::ffi::CStr { + unsafe { ::std::ffi::CStr::from_bytes_with_nul_unchecked(b"VK_RESERVED_do_not_use_94\0") } + } + pub const SPEC_VERSION: u32 = 1u32; +} +#[derive(Clone)] +pub struct ReservedDoNotUse94Fn {} +unsafe impl Send for ReservedDoNotUse94Fn {} +unsafe impl Sync for ReservedDoNotUse94Fn {} +impl ReservedDoNotUse94Fn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + Self {} + } +} impl NvSampleMaskOverrideCoverageFn { #[inline] pub const fn name() -> &'static ::std::ffi::CStr { @@ -6218,7 +6214,7 @@ impl ExtDiscardRectanglesFn { pub const fn name() -> &'static ::std::ffi::CStr { unsafe { ::std::ffi::CStr::from_bytes_with_nul_unchecked(b"VK_EXT_discard_rectangles\0") } } - pub const SPEC_VERSION: u32 = 1u32; + pub const SPEC_VERSION: u32 = 2u32; } #[allow(non_camel_case_types)] pub type PFN_vkCmdSetDiscardRectangleEXT = unsafe extern "system" fn( @@ -6227,9 +6223,19 @@ pub type PFN_vkCmdSetDiscardRectangleEXT = unsafe extern "system" fn( discard_rectangle_count: u32, p_discard_rectangles: *const Rect2D, ); +#[allow(non_camel_case_types)] +pub type PFN_vkCmdSetDiscardRectangleEnableEXT = + unsafe extern "system" fn(command_buffer: CommandBuffer, discard_rectangle_enable: Bool32); +#[allow(non_camel_case_types)] +pub type PFN_vkCmdSetDiscardRectangleModeEXT = unsafe extern "system" fn( + command_buffer: CommandBuffer, + discard_rectangle_mode: DiscardRectangleModeEXT, +); #[derive(Clone)] pub struct ExtDiscardRectanglesFn { pub cmd_set_discard_rectangle_ext: PFN_vkCmdSetDiscardRectangleEXT, + pub cmd_set_discard_rectangle_enable_ext: PFN_vkCmdSetDiscardRectangleEnableEXT, + pub cmd_set_discard_rectangle_mode_ext: PFN_vkCmdSetDiscardRectangleModeEXT, } unsafe impl Send for ExtDiscardRectanglesFn {} unsafe impl Sync for ExtDiscardRectanglesFn {} @@ -6261,12 +6267,54 @@ impl ExtDiscardRectanglesFn { ::std::mem::transmute(val) } }, + cmd_set_discard_rectangle_enable_ext: unsafe { + unsafe extern "system" fn cmd_set_discard_rectangle_enable_ext( + _command_buffer: CommandBuffer, + _discard_rectangle_enable: Bool32, + ) { + panic!(concat!( + "Unable to load ", + stringify!(cmd_set_discard_rectangle_enable_ext) + )) + } + let cname = ::std::ffi::CStr::from_bytes_with_nul_unchecked( + b"vkCmdSetDiscardRectangleEnableEXT\0", + ); + let val = _f(cname); + if val.is_null() { + cmd_set_discard_rectangle_enable_ext + } else { + ::std::mem::transmute(val) + } + }, + cmd_set_discard_rectangle_mode_ext: unsafe { + unsafe extern "system" fn cmd_set_discard_rectangle_mode_ext( + _command_buffer: CommandBuffer, + _discard_rectangle_mode: DiscardRectangleModeEXT, + ) { + panic!(concat!( + "Unable to load ", + stringify!(cmd_set_discard_rectangle_mode_ext) + )) + } + let cname = ::std::ffi::CStr::from_bytes_with_nul_unchecked( + b"vkCmdSetDiscardRectangleModeEXT\0", + ); + let val = _f(cname); + if val.is_null() { + cmd_set_discard_rectangle_mode_ext + } else { + ::std::mem::transmute(val) + } + }, } } } #[doc = "Generated from 'VK_EXT_discard_rectangles'"] impl DynamicState { pub const DISCARD_RECTANGLE_EXT: Self = Self(1_000_099_000); + pub const DISCARD_RECTANGLE_ENABLE_EXT: Self = Self(1_000_099_001); + pub const DISCARD_RECTANGLE_MODE_EXT: Self = Self(1_000_099_002); } #[doc = "Generated from 'VK_EXT_discard_rectangles'"] impl StructureType { @@ -6361,6 +6409,10 @@ impl NvExtension104Fn { Self {} } } +#[doc = "Generated from 'VK_NV_extension_104'"] +impl PrivateDataSlotCreateFlags { + pub const RESERVED_0_NV: Self = Self(0b1); +} impl ExtSwapchainColorspaceFn { #[inline] pub const fn name() -> &'static ::std::ffi::CStr { @@ -8468,6 +8520,25 @@ impl KhrRelaxedBlockLayoutFn { Self {} } } +impl ReservedDoNotUse146Fn { + #[inline] + pub const fn name() -> &'static ::std::ffi::CStr { + unsafe { ::std::ffi::CStr::from_bytes_with_nul_unchecked(b"VK_RESERVED_do_not_use_146\0") } + } + pub const SPEC_VERSION: u32 = 1u32; +} +#[derive(Clone)] +pub struct ReservedDoNotUse146Fn {} +unsafe impl Send for ReservedDoNotUse146Fn {} +unsafe impl Sync for ReservedDoNotUse146Fn {} +impl ReservedDoNotUse146Fn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + Self {} + } +} impl KhrGetMemoryRequirements2Fn { #[inline] pub const fn name() -> &'static ::std::ffi::CStr { @@ -11884,29 +11955,6 @@ impl GoogleExtension195Fn { Self {} } } -impl GoogleExtension196Fn { - #[inline] - pub const fn name() -> &'static ::std::ffi::CStr { - unsafe { ::std::ffi::CStr::from_bytes_with_nul_unchecked(b"VK_GOOGLE_extension_196\0") } - } - pub const SPEC_VERSION: u32 = 0u32; -} -#[derive(Clone)] -pub struct GoogleExtension196Fn {} -unsafe impl Send for GoogleExtension196Fn {} -unsafe impl Sync for GoogleExtension196Fn {} -impl GoogleExtension196Fn { - pub fn load(mut _f: F) -> Self - where - F: FnMut(&::std::ffi::CStr) -> *const c_void, - { - Self {} - } -} -#[doc = "Generated from 'VK_GOOGLE_extension_196'"] -impl PipelineCacheCreateFlags { - pub const RESERVED_1_EXT: Self = Self(0b10); -} impl KhrDriverPropertiesFn { #[inline] pub const fn name() -> &'static ::std::ffi::CStr { @@ -12272,9 +12320,16 @@ impl NvScissorExclusiveFn { pub const fn name() -> &'static ::std::ffi::CStr { unsafe { ::std::ffi::CStr::from_bytes_with_nul_unchecked(b"VK_NV_scissor_exclusive\0") } } - pub const SPEC_VERSION: u32 = 1u32; + pub const SPEC_VERSION: u32 = 2u32; } #[allow(non_camel_case_types)] +pub type PFN_vkCmdSetExclusiveScissorEnableNV = unsafe extern "system" fn( + command_buffer: CommandBuffer, + first_exclusive_scissor: u32, + exclusive_scissor_count: u32, + p_exclusive_scissor_enables: *const Bool32, +); +#[allow(non_camel_case_types)] pub type PFN_vkCmdSetExclusiveScissorNV = unsafe extern "system" fn( command_buffer: CommandBuffer, first_exclusive_scissor: u32, @@ -12283,6 +12338,7 @@ pub type PFN_vkCmdSetExclusiveScissorNV = unsafe extern "system" fn( ); #[derive(Clone)] pub struct NvScissorExclusiveFn { + pub cmd_set_exclusive_scissor_enable_nv: PFN_vkCmdSetExclusiveScissorEnableNV, pub cmd_set_exclusive_scissor_nv: PFN_vkCmdSetExclusiveScissorNV, } unsafe impl Send for NvScissorExclusiveFn {} @@ -12293,6 +12349,28 @@ impl NvScissorExclusiveFn { F: FnMut(&::std::ffi::CStr) -> *const c_void, { Self { + cmd_set_exclusive_scissor_enable_nv: unsafe { + unsafe extern "system" fn cmd_set_exclusive_scissor_enable_nv( + _command_buffer: CommandBuffer, + _first_exclusive_scissor: u32, + _exclusive_scissor_count: u32, + _p_exclusive_scissor_enables: *const Bool32, + ) { + panic!(concat!( + "Unable to load ", + stringify!(cmd_set_exclusive_scissor_enable_nv) + )) + } + let cname = ::std::ffi::CStr::from_bytes_with_nul_unchecked( + b"vkCmdSetExclusiveScissorEnableNV\0", + ); + let val = _f(cname); + if val.is_null() { + cmd_set_exclusive_scissor_enable_nv + } else { + ::std::mem::transmute(val) + } + }, cmd_set_exclusive_scissor_nv: unsafe { unsafe extern "system" fn cmd_set_exclusive_scissor_nv( _command_buffer: CommandBuffer, @@ -12320,6 +12398,7 @@ impl NvScissorExclusiveFn { } #[doc = "Generated from 'VK_NV_scissor_exclusive'"] impl DynamicState { + pub const EXCLUSIVE_SCISSOR_ENABLE_NV: Self = Self(1_000_205_000); pub const EXCLUSIVE_SCISSOR_NV: Self = Self(1_000_205_001); } #[doc = "Generated from 'VK_NV_scissor_exclusive'"] @@ -13331,7 +13410,7 @@ pub type PFN_vkGetPhysicalDeviceFragmentShadingRatesKHR = unsafe extern "system" pub type PFN_vkCmdSetFragmentShadingRateKHR = unsafe extern "system" fn( command_buffer: CommandBuffer, p_fragment_size: *const Extent2D, - combiner_ops: *const [FragmentShadingRateCombinerOpKHR; 2], + combiner_ops: *const [FragmentShadingRateCombinerOpKHR; 2usize], ); #[derive(Clone)] pub struct KhrFragmentShadingRateFn { @@ -13372,7 +13451,7 @@ impl KhrFragmentShadingRateFn { unsafe extern "system" fn cmd_set_fragment_shading_rate_khr( _command_buffer: CommandBuffer, _p_fragment_size: *const Extent2D, - _combiner_ops: *const [FragmentShadingRateCombinerOpKHR; 2], + _combiner_ops: *const [FragmentShadingRateCombinerOpKHR; 2usize], ) { panic!(concat!( "Unable to load ", @@ -15534,28 +15613,84 @@ impl IntelExtension271Fn { } } #[doc = "Generated from 'VK_INTEL_extension_271'"] +impl FormatFeatureFlags2 { + pub const RESERVED_46_EXT: Self = + Self(0b100_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000); +} +#[doc = "Generated from 'VK_INTEL_extension_271'"] impl ImageUsageFlags { pub const RESERVED_22_EXT: Self = Self(0b100_0000_0000_0000_0000_0000); } -impl IntelExtension272Fn { +impl KhrMapMemory2Fn { #[inline] pub const fn name() -> &'static ::std::ffi::CStr { - unsafe { ::std::ffi::CStr::from_bytes_with_nul_unchecked(b"VK_INTEL_extension_272\0") } + unsafe { ::std::ffi::CStr::from_bytes_with_nul_unchecked(b"VK_KHR_map_memory2\0") } } - pub const SPEC_VERSION: u32 = 0u32; + pub const SPEC_VERSION: u32 = 1u32; } +#[allow(non_camel_case_types)] +pub type PFN_vkMapMemory2KHR = unsafe extern "system" fn( + device: Device, + p_memory_map_info: *const MemoryMapInfoKHR, + pp_data: *mut *mut c_void, +) -> Result; +#[allow(non_camel_case_types)] +pub type PFN_vkUnmapMemory2KHR = unsafe extern "system" fn( + device: Device, + p_memory_unmap_info: *const MemoryUnmapInfoKHR, +) -> Result; #[derive(Clone)] -pub struct IntelExtension272Fn {} -unsafe impl Send for IntelExtension272Fn {} -unsafe impl Sync for IntelExtension272Fn {} -impl IntelExtension272Fn { +pub struct KhrMapMemory2Fn { + pub map_memory2_khr: PFN_vkMapMemory2KHR, + pub unmap_memory2_khr: PFN_vkUnmapMemory2KHR, +} +unsafe impl Send for KhrMapMemory2Fn {} +unsafe impl Sync for KhrMapMemory2Fn {} +impl KhrMapMemory2Fn { pub fn load(mut _f: F) -> Self where F: FnMut(&::std::ffi::CStr) -> *const c_void, { - Self {} + Self { + map_memory2_khr: unsafe { + unsafe extern "system" fn map_memory2_khr( + _device: Device, + _p_memory_map_info: *const MemoryMapInfoKHR, + _pp_data: *mut *mut c_void, + ) -> Result { + panic!(concat!("Unable to load ", stringify!(map_memory2_khr))) + } + let cname = ::std::ffi::CStr::from_bytes_with_nul_unchecked(b"vkMapMemory2KHR\0"); + let val = _f(cname); + if val.is_null() { + map_memory2_khr + } else { + ::std::mem::transmute(val) + } + }, + unmap_memory2_khr: unsafe { + unsafe extern "system" fn unmap_memory2_khr( + _device: Device, + _p_memory_unmap_info: *const MemoryUnmapInfoKHR, + ) -> Result { + panic!(concat!("Unable to load ", stringify!(unmap_memory2_khr))) + } + let cname = ::std::ffi::CStr::from_bytes_with_nul_unchecked(b"vkUnmapMemory2KHR\0"); + let val = _f(cname); + if val.is_null() { + unmap_memory2_khr + } else { + ::std::mem::transmute(val) + } + }, + } } } +#[doc = "Generated from 'VK_KHR_map_memory2'"] +impl StructureType { + pub const MEMORY_MAP_INFO_KHR: Self = Self(1_000_271_000); + pub const MEMORY_UNMAP_INFO_KHR: Self = Self(1_000_271_001); +} impl IntelExtension273Fn { #[inline] pub const fn name() -> &'static ::std::ffi::CStr { @@ -16656,21 +16791,12 @@ impl KhrExtension299Fn { Self {} } } -#[doc = "Generated from 'VK_KHR_extension_299'"] -impl MemoryHeapFlags { - pub const RESERVED_2_KHR: Self = Self(0b100); -} -#[doc = "Generated from 'VK_KHR_extension_299'"] -impl PipelineCacheCreateFlags { - pub const RESERVED_1_KHR: Self = Self::RESERVED_1_EXT; - pub const RESERVED_2_KHR: Self = Self(0b100); -} impl KhrVideoEncodeQueueFn { #[inline] pub const fn name() -> &'static ::std::ffi::CStr { unsafe { ::std::ffi::CStr::from_bytes_with_nul_unchecked(b"VK_KHR_video_encode_queue\0") } } - pub const SPEC_VERSION: u32 = 7u32; + pub const SPEC_VERSION: u32 = 8u32; } #[allow(non_camel_case_types)] pub type PFN_vkCmdEncodeVideoKHR = unsafe extern "system" fn( @@ -16747,19 +16873,24 @@ impl PipelineStageFlags2 { } #[doc = "Generated from 'VK_KHR_video_encode_queue'"] impl QueryType { - pub const VIDEO_ENCODESTREAM_BUFFER_RANGE_KHR: Self = Self(1_000_299_000); + pub const VIDEO_ENCODE_FEEDBACK_KHR: Self = Self(1_000_299_000); } #[doc = "Generated from 'VK_KHR_video_encode_queue'"] impl QueueFlags { pub const VIDEO_ENCODE_KHR: Self = Self(0b100_0000); } #[doc = "Generated from 'VK_KHR_video_encode_queue'"] +impl Result { + pub const ERROR_INVALID_VIDEO_STD_PARAMETERS_KHR: Self = Self(-1_000_299_000); +} +#[doc = "Generated from 'VK_KHR_video_encode_queue'"] impl StructureType { pub const VIDEO_ENCODE_INFO_KHR: Self = Self(1_000_299_000); pub const VIDEO_ENCODE_RATE_CONTROL_INFO_KHR: Self = Self(1_000_299_001); pub const VIDEO_ENCODE_RATE_CONTROL_LAYER_INFO_KHR: Self = Self(1_000_299_002); pub const VIDEO_ENCODE_CAPABILITIES_KHR: Self = Self(1_000_299_003); pub const VIDEO_ENCODE_USAGE_INFO_KHR: Self = Self(1_000_299_004); + pub const QUERY_POOL_VIDEO_ENCODE_FEEDBACK_CREATE_INFO_KHR: Self = Self(1_000_299_005); } #[doc = "Generated from 'VK_KHR_video_encode_queue'"] impl VideoCodingControlFlagsKHR { @@ -16931,25 +17062,6 @@ impl NvExtension308Fn { Self {} } } -impl KhrExtension309Fn { - #[inline] - pub const fn name() -> &'static ::std::ffi::CStr { - unsafe { ::std::ffi::CStr::from_bytes_with_nul_unchecked(b"VK_KHR_extension_309\0") } - } - pub const SPEC_VERSION: u32 = 0u32; -} -#[derive(Clone)] -pub struct KhrExtension309Fn {} -unsafe impl Send for KhrExtension309Fn {} -unsafe impl Sync for KhrExtension309Fn {} -impl KhrExtension309Fn { - pub fn load(mut _f: F) -> Self - where - F: FnMut(&::std::ffi::CStr) -> *const c_void, - { - Self {} - } -} impl QcomExtension310Fn { #[inline] pub const fn name() -> &'static ::std::ffi::CStr { @@ -16973,18 +17085,18 @@ impl QcomExtension310Fn { impl StructureType { pub const RESERVED_QCOM: Self = Self(1_000_309_000); } -impl NvExtension311Fn { +impl NvLowLatencyFn { #[inline] pub const fn name() -> &'static ::std::ffi::CStr { - unsafe { ::std::ffi::CStr::from_bytes_with_nul_unchecked(b"VK_NV_extension_311\0") } + unsafe { ::std::ffi::CStr::from_bytes_with_nul_unchecked(b"VK_NV_low_latency\0") } } - pub const SPEC_VERSION: u32 = 0u32; + pub const SPEC_VERSION: u32 = 1u32; } #[derive(Clone)] -pub struct NvExtension311Fn {} -unsafe impl Send for NvExtension311Fn {} -unsafe impl Sync for NvExtension311Fn {} -impl NvExtension311Fn { +pub struct NvLowLatencyFn {} +unsafe impl Send for NvLowLatencyFn {} +unsafe impl Sync for NvLowLatencyFn {} +impl NvLowLatencyFn { pub fn load(mut _f: F) -> Self where F: FnMut(&::std::ffi::CStr) -> *const c_void, @@ -16992,6 +17104,10 @@ impl NvExtension311Fn { Self {} } } +#[doc = "Generated from 'VK_NV_low_latency'"] +impl StructureType { + pub const QUERY_LOW_LATENCY_SUPPORT_NV: Self = Self(1_000_310_000); +} impl ExtMetalObjectsFn { #[inline] pub const fn name() -> &'static ::std::ffi::CStr { @@ -18058,7 +18174,7 @@ impl NvFragmentShadingRateEnumsFn { pub type PFN_vkCmdSetFragmentShadingRateEnumNV = unsafe extern "system" fn( command_buffer: CommandBuffer, shading_rate: FragmentShadingRateNV, - combiner_ops: *const [FragmentShadingRateCombinerOpKHR; 2], + combiner_ops: *const [FragmentShadingRateCombinerOpKHR; 2usize], ); #[derive(Clone)] pub struct NvFragmentShadingRateEnumsFn { @@ -18076,7 +18192,7 @@ impl NvFragmentShadingRateEnumsFn { unsafe extern "system" fn cmd_set_fragment_shading_rate_enum_nv( _command_buffer: CommandBuffer, _shading_rate: FragmentShadingRateNV, - _combiner_ops: *const [FragmentShadingRateCombinerOpKHR; 2], + _combiner_ops: *const [FragmentShadingRateCombinerOpKHR; 2usize], ) { panic!(concat!( "Unable to load ", @@ -18808,7 +18924,7 @@ impl ExtDeviceFaultFn { pub const fn name() -> &'static ::std::ffi::CStr { unsafe { ::std::ffi::CStr::from_bytes_with_nul_unchecked(b"VK_EXT_device_fault\0") } } - pub const SPEC_VERSION: u32 = 1u32; + pub const SPEC_VERSION: u32 = 2u32; } #[allow(non_camel_case_types)] pub type PFN_vkGetDeviceFaultInfoEXT = unsafe extern "system" fn( @@ -20122,6 +20238,10 @@ impl ExtPipelinePropertiesFn { pub const SPEC_VERSION: u32 = 1u32; } #[allow(non_camel_case_types)] +#[doc = "Implemented for all types that can be passed as argument to `pipeline_properties` in [`PFN_vkGetPipelinePropertiesEXT`]"] +pub unsafe trait GetPipelinePropertiesEXTParamPipelineProperties {} +unsafe impl GetPipelinePropertiesEXTParamPipelineProperties for PipelinePropertiesIdentifierEXT {} +#[allow(non_camel_case_types)] pub type PFN_vkGetPipelinePropertiesEXT = unsafe extern "system" fn( device: Device, p_pipeline_info: *const PipelineInfoEXT, @@ -20169,57 +20289,6 @@ impl StructureType { pub const PHYSICAL_DEVICE_PIPELINE_PROPERTIES_FEATURES_EXT: Self = Self(1_000_372_001); pub const PIPELINE_INFO_EXT: Self = Self::PIPELINE_INFO_KHR; } -impl NvExtension374Fn { - #[inline] - pub const fn name() -> &'static ::std::ffi::CStr { - unsafe { ::std::ffi::CStr::from_bytes_with_nul_unchecked(b"VK_NV_extension_374\0") } - } - pub const SPEC_VERSION: u32 = 0u32; -} -#[derive(Clone)] -pub struct NvExtension374Fn {} -unsafe impl Send for NvExtension374Fn {} -unsafe impl Sync for NvExtension374Fn {} -impl NvExtension374Fn { - pub fn load(mut _f: F) -> Self - where - F: FnMut(&::std::ffi::CStr) -> *const c_void, - { - Self {} - } -} -#[doc = "Generated from 'VK_NV_extension_374'"] -impl ExternalFenceHandleTypeFlags { - pub const RESERVED_4_NV: Self = Self(0b1_0000); - pub const RESERVED_5_NV: Self = Self(0b10_0000); -} -#[doc = "Generated from 'VK_NV_extension_374'"] -impl ExternalSemaphoreHandleTypeFlags { - pub const RESERVED_5_NV: Self = Self(0b10_0000); -} -impl NvExtension375Fn { - #[inline] - pub const fn name() -> &'static ::std::ffi::CStr { - unsafe { ::std::ffi::CStr::from_bytes_with_nul_unchecked(b"VK_NV_extension_375\0") } - } - pub const SPEC_VERSION: u32 = 0u32; -} -#[derive(Clone)] -pub struct NvExtension375Fn {} -unsafe impl Send for NvExtension375Fn {} -unsafe impl Sync for NvExtension375Fn {} -impl NvExtension375Fn { - pub fn load(mut _f: F) -> Self - where - F: FnMut(&::std::ffi::CStr) -> *const c_void, - { - Self {} - } -} -#[doc = "Generated from 'VK_NV_extension_375'"] -impl ExternalMemoryHandleTypeFlags { - pub const RESERVED_13_NV: Self = Self(0b10_0000_0000_0000); -} impl ExtExtension376Fn { #[inline] pub const fn name() -> &'static ::std::ffi::CStr { @@ -21012,18 +21081,18 @@ impl KhrPortabilityEnumerationFn { impl InstanceCreateFlags { pub const ENUMERATE_PORTABILITY_KHR: Self = Self(0b1); } -impl KhrExtension396Fn { +impl ExtShaderTileImageFn { #[inline] pub const fn name() -> &'static ::std::ffi::CStr { - unsafe { ::std::ffi::CStr::from_bytes_with_nul_unchecked(b"VK_KHR_extension_396\0") } + unsafe { ::std::ffi::CStr::from_bytes_with_nul_unchecked(b"VK_EXT_shader_tile_image\0") } } - pub const SPEC_VERSION: u32 = 0u32; + pub const SPEC_VERSION: u32 = 1u32; } #[derive(Clone)] -pub struct KhrExtension396Fn {} -unsafe impl Send for KhrExtension396Fn {} -unsafe impl Sync for KhrExtension396Fn {} -impl KhrExtension396Fn { +pub struct ExtShaderTileImageFn {} +unsafe impl Send for ExtShaderTileImageFn {} +unsafe impl Sync for ExtShaderTileImageFn {} +impl ExtShaderTileImageFn { pub fn load(mut _f: F) -> Self where F: FnMut(&::std::ffi::CStr) -> *const c_void, @@ -21031,6 +21100,11 @@ impl KhrExtension396Fn { Self {} } } +#[doc = "Generated from 'VK_EXT_shader_tile_image'"] +impl StructureType { + pub const PHYSICAL_DEVICE_SHADER_TILE_IMAGE_FEATURES_EXT: Self = Self(1_000_395_000); + pub const PHYSICAL_DEVICE_SHADER_TILE_IMAGE_PROPERTIES_EXT: Self = Self(1_000_395_001); +} impl ExtOpacityMicromapFn { #[inline] pub const fn name() -> &'static ::std::ffi::CStr { @@ -21488,18 +21562,18 @@ impl StructureType { pub const MICROMAP_BUILD_SIZES_INFO_EXT: Self = Self(1_000_396_008); pub const ACCELERATION_STRUCTURE_TRIANGLES_OPACITY_MICROMAP_EXT: Self = Self(1_000_396_009); } -impl NvExtension398Fn { +impl NvDisplacementMicromapFn { #[inline] pub const fn name() -> &'static ::std::ffi::CStr { - unsafe { ::std::ffi::CStr::from_bytes_with_nul_unchecked(b"VK_NV_extension_398\0") } + unsafe { ::std::ffi::CStr::from_bytes_with_nul_unchecked(b"VK_NV_displacement_micromap\0") } } - pub const SPEC_VERSION: u32 = 0u32; + pub const SPEC_VERSION: u32 = 1u32; } #[derive(Clone)] -pub struct NvExtension398Fn {} -unsafe impl Send for NvExtension398Fn {} -unsafe impl Sync for NvExtension398Fn {} -impl NvExtension398Fn { +pub struct NvDisplacementMicromapFn {} +unsafe impl Send for NvDisplacementMicromapFn {} +unsafe impl Sync for NvDisplacementMicromapFn {} +impl NvDisplacementMicromapFn { pub fn load(mut _f: F) -> Self where F: FnMut(&::std::ffi::CStr) -> *const c_void, @@ -21507,14 +21581,24 @@ impl NvExtension398Fn { Self {} } } -#[doc = "Generated from 'VK_NV_extension_398'"] +#[doc = "Generated from 'VK_NV_displacement_micromap'"] impl BuildAccelerationStructureFlagsKHR { - pub const RESERVED_9_NV: Self = Self(0b10_0000_0000); - pub const RESERVED_10_NV: Self = Self(0b100_0000_0000); + pub const ALLOW_DISPLACEMENT_MICROMAP_UPDATE_NV: Self = Self(0b10_0000_0000); } -#[doc = "Generated from 'VK_NV_extension_398'"] +#[doc = "Generated from 'VK_NV_displacement_micromap'"] +impl MicromapTypeEXT { + pub const DISPLACEMENT_MICROMAP_NV: Self = Self(1_000_397_000); +} +#[doc = "Generated from 'VK_NV_displacement_micromap'"] impl PipelineCreateFlags { - pub const RESERVED_28_NV: Self = Self(0b1_0000_0000_0000_0000_0000_0000_0000); + pub const RAY_TRACING_DISPLACEMENT_MICROMAP_NV: Self = + Self(0b1_0000_0000_0000_0000_0000_0000_0000); +} +#[doc = "Generated from 'VK_NV_displacement_micromap'"] +impl StructureType { + pub const PHYSICAL_DEVICE_DISPLACEMENT_MICROMAP_FEATURES_NV: Self = Self(1_000_397_000); + pub const PHYSICAL_DEVICE_DISPLACEMENT_MICROMAP_PROPERTIES_NV: Self = Self(1_000_397_001); + pub const ACCELERATION_STRUCTURE_TRIANGLES_DISPLACEMENT_MICROMAP_NV: Self = Self(1_000_397_002); } impl JuiceExtension399Fn { #[inline] @@ -21638,33 +21722,100 @@ impl FbExtension404Fn { Self {} } } -impl HuaweiExtension405Fn { +impl HuaweiClusterCullingShaderFn { #[inline] pub const fn name() -> &'static ::std::ffi::CStr { - unsafe { ::std::ffi::CStr::from_bytes_with_nul_unchecked(b"VK_HUAWEI_extension_405\0") } + unsafe { + ::std::ffi::CStr::from_bytes_with_nul_unchecked(b"VK_HUAWEI_cluster_culling_shader\0") + } } - pub const SPEC_VERSION: u32 = 0u32; + pub const SPEC_VERSION: u32 = 2u32; } +#[allow(non_camel_case_types)] +pub type PFN_vkCmdDrawClusterHUAWEI = unsafe extern "system" fn( + command_buffer: CommandBuffer, + group_count_x: u32, + group_count_y: u32, + group_count_z: u32, +); +#[allow(non_camel_case_types)] +pub type PFN_vkCmdDrawClusterIndirectHUAWEI = + unsafe extern "system" fn(command_buffer: CommandBuffer, buffer: Buffer, offset: DeviceSize); #[derive(Clone)] -pub struct HuaweiExtension405Fn {} -unsafe impl Send for HuaweiExtension405Fn {} -unsafe impl Sync for HuaweiExtension405Fn {} -impl HuaweiExtension405Fn { +pub struct HuaweiClusterCullingShaderFn { + pub cmd_draw_cluster_huawei: PFN_vkCmdDrawClusterHUAWEI, + pub cmd_draw_cluster_indirect_huawei: PFN_vkCmdDrawClusterIndirectHUAWEI, +} +unsafe impl Send for HuaweiClusterCullingShaderFn {} +unsafe impl Sync for HuaweiClusterCullingShaderFn {} +impl HuaweiClusterCullingShaderFn { pub fn load(mut _f: F) -> Self where F: FnMut(&::std::ffi::CStr) -> *const c_void, { - Self {} + Self { + cmd_draw_cluster_huawei: unsafe { + unsafe extern "system" fn cmd_draw_cluster_huawei( + _command_buffer: CommandBuffer, + _group_count_x: u32, + _group_count_y: u32, + _group_count_z: u32, + ) { + panic!(concat!( + "Unable to load ", + stringify!(cmd_draw_cluster_huawei) + )) + } + let cname = + ::std::ffi::CStr::from_bytes_with_nul_unchecked(b"vkCmdDrawClusterHUAWEI\0"); + let val = _f(cname); + if val.is_null() { + cmd_draw_cluster_huawei + } else { + ::std::mem::transmute(val) + } + }, + cmd_draw_cluster_indirect_huawei: unsafe { + unsafe extern "system" fn cmd_draw_cluster_indirect_huawei( + _command_buffer: CommandBuffer, + _buffer: Buffer, + _offset: DeviceSize, + ) { + panic!(concat!( + "Unable to load ", + stringify!(cmd_draw_cluster_indirect_huawei) + )) + } + let cname = ::std::ffi::CStr::from_bytes_with_nul_unchecked( + b"vkCmdDrawClusterIndirectHUAWEI\0", + ); + let val = _f(cname); + if val.is_null() { + cmd_draw_cluster_indirect_huawei + } else { + ::std::mem::transmute(val) + } + }, + } } } -#[doc = "Generated from 'VK_HUAWEI_extension_405'"] +#[doc = "Generated from 'VK_HUAWEI_cluster_culling_shader'"] impl PipelineStageFlags2 { - pub const RESEVED_41_HUAWEI: Self = + pub const CLUSTER_CULLING_SHADER_HUAWEI: Self = Self(0b10_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000); } -#[doc = "Generated from 'VK_HUAWEI_extension_405'"] +#[doc = "Generated from 'VK_HUAWEI_cluster_culling_shader'"] +impl QueryPipelineStatisticFlags { + pub const CLUSTER_CULLING_SHADER_INVOCATIONS_HUAWEI: Self = Self(0b10_0000_0000_0000); +} +#[doc = "Generated from 'VK_HUAWEI_cluster_culling_shader'"] impl ShaderStageFlags { - pub const RESERVED_19_HUAWEI: Self = Self(0b1000_0000_0000_0000_0000); + pub const CLUSTER_CULLING_HUAWEI: Self = Self(0b1000_0000_0000_0000_0000); +} +#[doc = "Generated from 'VK_HUAWEI_cluster_culling_shader'"] +impl StructureType { + pub const PHYSICAL_DEVICE_CLUSTER_CULLING_SHADER_FEATURES_HUAWEI: Self = Self(1_000_404_000); + pub const PHYSICAL_DEVICE_CLUSTER_CULLING_SHADER_PROPERTIES_HUAWEI: Self = Self(1_000_404_001); } impl HuaweiExtension406Fn { #[inline] @@ -21998,18 +22149,20 @@ impl HuaweiExtension415Fn { Self {} } } -impl ArmExtension416Fn { +impl ArmShaderCorePropertiesFn { #[inline] pub const fn name() -> &'static ::std::ffi::CStr { - unsafe { ::std::ffi::CStr::from_bytes_with_nul_unchecked(b"VK_ARM_extension_416\0") } + unsafe { + ::std::ffi::CStr::from_bytes_with_nul_unchecked(b"VK_ARM_shader_core_properties\0") + } } - pub const SPEC_VERSION: u32 = 0u32; + pub const SPEC_VERSION: u32 = 1u32; } #[derive(Clone)] -pub struct ArmExtension416Fn {} -unsafe impl Send for ArmExtension416Fn {} -unsafe impl Sync for ArmExtension416Fn {} -impl ArmExtension416Fn { +pub struct ArmShaderCorePropertiesFn {} +unsafe impl Send for ArmShaderCorePropertiesFn {} +unsafe impl Sync for ArmShaderCorePropertiesFn {} +impl ArmShaderCorePropertiesFn { pub fn load(mut _f: F) -> Self where F: FnMut(&::std::ffi::CStr) -> *const c_void, @@ -22017,6 +22170,10 @@ impl ArmExtension416Fn { Self {} } } +#[doc = "Generated from 'VK_ARM_shader_core_properties'"] +impl StructureType { + pub const PHYSICAL_DEVICE_SHADER_CORE_PROPERTIES_ARM: Self = Self(1_000_415_000); +} impl KhrExtension417Fn { #[inline] pub const fn name() -> &'static ::std::ffi::CStr { @@ -22055,18 +22212,20 @@ impl ArmExtension418Fn { Self {} } } -impl ExtExtension419Fn { +impl ExtImageSlicedViewOf3dFn { #[inline] pub const fn name() -> &'static ::std::ffi::CStr { - unsafe { ::std::ffi::CStr::from_bytes_with_nul_unchecked(b"VK_EXT_extension_419\0") } + unsafe { + ::std::ffi::CStr::from_bytes_with_nul_unchecked(b"VK_EXT_image_sliced_view_of_3d\0") + } } - pub const SPEC_VERSION: u32 = 0u32; + pub const SPEC_VERSION: u32 = 1u32; } #[derive(Clone)] -pub struct ExtExtension419Fn {} -unsafe impl Send for ExtExtension419Fn {} -unsafe impl Sync for ExtExtension419Fn {} -impl ExtExtension419Fn { +pub struct ExtImageSlicedViewOf3dFn {} +unsafe impl Send for ExtImageSlicedViewOf3dFn {} +unsafe impl Sync for ExtImageSlicedViewOf3dFn {} +impl ExtImageSlicedViewOf3dFn { pub fn load(mut _f: F) -> Self where F: FnMut(&::std::ffi::CStr) -> *const c_void, @@ -22074,9 +22233,10 @@ impl ExtExtension419Fn { Self {} } } -#[doc = "Generated from 'VK_EXT_extension_419'"] -impl ImageViewCreateFlags { - pub const RESERVED_3_EXT: Self = Self(0b1000); +#[doc = "Generated from 'VK_EXT_image_sliced_view_of_3d'"] +impl StructureType { + pub const PHYSICAL_DEVICE_IMAGE_SLICED_VIEW_OF_3D_FEATURES_EXT: Self = Self(1_000_418_000); + pub const IMAGE_VIEW_SLICED_CREATE_INFO_EXT: Self = Self(1_000_418_001); } impl ExtExtension420Fn { #[inline] @@ -22633,25 +22793,6 @@ impl KhrExtension435Fn { Self {} } } -impl NvExtension436Fn { - #[inline] - pub const fn name() -> &'static ::std::ffi::CStr { - unsafe { ::std::ffi::CStr::from_bytes_with_nul_unchecked(b"VK_NV_extension_436\0") } - } - pub const SPEC_VERSION: u32 = 0u32; -} -#[derive(Clone)] -pub struct NvExtension436Fn {} -unsafe impl Send for NvExtension436Fn {} -unsafe impl Sync for NvExtension436Fn {} -impl NvExtension436Fn { - pub fn load(mut _f: F) -> Self - where - F: FnMut(&::std::ffi::CStr) -> *const c_void, - { - Self {} - } -} impl ExtExtension437Fn { #[inline] pub const fn name() -> &'static ::std::ffi::CStr { @@ -23025,6 +23166,22 @@ impl ArmExtension453Fn { Self {} } } +#[doc = "Generated from 'VK_ARM_extension_453'"] +impl AccessFlags2 { + pub const RESERVED_49_ARM: Self = + Self(0b10_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000); + pub const RESERVED_50_ARM: Self = + Self(0b100_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000); +} +#[doc = "Generated from 'VK_ARM_extension_453'"] +impl PipelineStageFlags2 { + pub const RESERVED_43_ARM: Self = + Self(0b1000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000); +} +#[doc = "Generated from 'VK_ARM_extension_453'"] +impl QueueFlags { + pub const RESERVED_11_ARM: Self = Self(0b1000_0000_0000); +} impl GoogleExtension454Fn { #[inline] pub const fn name() -> &'static ::std::ffi::CStr { @@ -24039,6 +24196,10 @@ impl ExtExtension461Fn { impl FormatFeatureFlags2 { pub const RESERVED_39_EXT: Self = Self(0b1000_0000_0000_0000_0000_0000_0000_0000_0000_0000); } +#[doc = "Generated from 'VK_EXT_extension_461'"] +impl ImageUsageFlags { + pub const RESERVED_23_EXT: Self = Self(0b1000_0000_0000_0000_0000_0000); +} impl ExtExtension462Fn { #[inline] pub const fn name() -> &'static ::std::ffi::CStr { @@ -24497,6 +24658,10 @@ impl AndroidExtension469Fn { Self {} } } +#[doc = "Generated from 'VK_ANDROID_extension_469'"] +impl ResolveModeFlags { + pub const EXTENSION_469_FLAG_0: Self = Self(0b1_0000); +} impl AmdExtension470Fn { #[inline] pub const fn name() -> &'static ::std::ffi::CStr { @@ -24725,18 +24890,20 @@ impl ExtExtension481Fn { Self {} } } -impl ExtExtension482Fn { +impl KhrRayTracingPositionFetchFn { #[inline] pub const fn name() -> &'static ::std::ffi::CStr { - unsafe { ::std::ffi::CStr::from_bytes_with_nul_unchecked(b"VK_EXT_extension_482\0") } + unsafe { + ::std::ffi::CStr::from_bytes_with_nul_unchecked(b"VK_KHR_ray_tracing_position_fetch\0") + } } - pub const SPEC_VERSION: u32 = 0u32; + pub const SPEC_VERSION: u32 = 1u32; } #[derive(Clone)] -pub struct ExtExtension482Fn {} -unsafe impl Send for ExtExtension482Fn {} -unsafe impl Sync for ExtExtension482Fn {} -impl ExtExtension482Fn { +pub struct KhrRayTracingPositionFetchFn {} +unsafe impl Send for KhrRayTracingPositionFetchFn {} +unsafe impl Sync for KhrRayTracingPositionFetchFn {} +impl KhrRayTracingPositionFetchFn { pub fn load(mut _f: F) -> Self where F: FnMut(&::std::ffi::CStr) -> *const c_void, @@ -24744,30 +24911,1212 @@ impl ExtExtension482Fn { Self {} } } -impl ExtExtension483Fn { +#[doc = "Generated from 'VK_KHR_ray_tracing_position_fetch'"] +impl BuildAccelerationStructureFlagsKHR { + pub const ALLOW_DATA_ACCESS: Self = Self(0b1000_0000_0000); +} +#[doc = "Generated from 'VK_KHR_ray_tracing_position_fetch'"] +impl StructureType { + pub const PHYSICAL_DEVICE_RAY_TRACING_POSITION_FETCH_FEATURES_KHR: Self = Self(1_000_481_000); +} +impl ExtShaderObjectFn { #[inline] pub const fn name() -> &'static ::std::ffi::CStr { - unsafe { ::std::ffi::CStr::from_bytes_with_nul_unchecked(b"VK_EXT_extension_483\0") } + unsafe { ::std::ffi::CStr::from_bytes_with_nul_unchecked(b"VK_EXT_shader_object\0") } } - pub const SPEC_VERSION: u32 = 0u32; + pub const SPEC_VERSION: u32 = 1u32; } +#[allow(non_camel_case_types)] +pub type PFN_vkCreateShadersEXT = unsafe extern "system" fn( + device: Device, + create_info_count: u32, + p_create_infos: *const ShaderCreateInfoEXT, + p_allocator: *const AllocationCallbacks, + p_shaders: *mut ShaderEXT, +) -> Result; +#[allow(non_camel_case_types)] +pub type PFN_vkDestroyShaderEXT = unsafe extern "system" fn( + device: Device, + shader: ShaderEXT, + p_allocator: *const AllocationCallbacks, +); +#[allow(non_camel_case_types)] +pub type PFN_vkGetShaderBinaryDataEXT = unsafe extern "system" fn( + device: Device, + shader: ShaderEXT, + p_data_size: *mut usize, + p_data: *mut c_void, +) -> Result; +#[allow(non_camel_case_types)] +pub type PFN_vkCmdBindShadersEXT = unsafe extern "system" fn( + command_buffer: CommandBuffer, + stage_count: u32, + p_stages: *const ShaderStageFlags, + p_shaders: *const ShaderEXT, +); #[derive(Clone)] -pub struct ExtExtension483Fn {} -unsafe impl Send for ExtExtension483Fn {} -unsafe impl Sync for ExtExtension483Fn {} -impl ExtExtension483Fn { +pub struct ExtShaderObjectFn { + pub create_shaders_ext: PFN_vkCreateShadersEXT, + pub destroy_shader_ext: PFN_vkDestroyShaderEXT, + pub get_shader_binary_data_ext: PFN_vkGetShaderBinaryDataEXT, + pub cmd_bind_shaders_ext: PFN_vkCmdBindShadersEXT, + pub cmd_set_cull_mode_ext: crate::vk::PFN_vkCmdSetCullMode, + pub cmd_set_front_face_ext: crate::vk::PFN_vkCmdSetFrontFace, + pub cmd_set_primitive_topology_ext: crate::vk::PFN_vkCmdSetPrimitiveTopology, + pub cmd_set_viewport_with_count_ext: crate::vk::PFN_vkCmdSetViewportWithCount, + pub cmd_set_scissor_with_count_ext: crate::vk::PFN_vkCmdSetScissorWithCount, + pub cmd_bind_vertex_buffers2_ext: crate::vk::PFN_vkCmdBindVertexBuffers2, + pub cmd_set_depth_test_enable_ext: crate::vk::PFN_vkCmdSetDepthTestEnable, + pub cmd_set_depth_write_enable_ext: crate::vk::PFN_vkCmdSetDepthWriteEnable, + pub cmd_set_depth_compare_op_ext: crate::vk::PFN_vkCmdSetDepthCompareOp, + pub cmd_set_depth_bounds_test_enable_ext: crate::vk::PFN_vkCmdSetDepthBoundsTestEnable, + pub cmd_set_stencil_test_enable_ext: crate::vk::PFN_vkCmdSetStencilTestEnable, + pub cmd_set_stencil_op_ext: crate::vk::PFN_vkCmdSetStencilOp, + pub cmd_set_vertex_input_ext: crate::vk::PFN_vkCmdSetVertexInputEXT, + pub cmd_set_patch_control_points_ext: crate::vk::PFN_vkCmdSetPatchControlPointsEXT, + pub cmd_set_rasterizer_discard_enable_ext: crate::vk::PFN_vkCmdSetRasterizerDiscardEnable, + pub cmd_set_depth_bias_enable_ext: crate::vk::PFN_vkCmdSetDepthBiasEnable, + pub cmd_set_logic_op_ext: crate::vk::PFN_vkCmdSetLogicOpEXT, + pub cmd_set_primitive_restart_enable_ext: crate::vk::PFN_vkCmdSetPrimitiveRestartEnable, + pub cmd_set_tessellation_domain_origin_ext: crate::vk::PFN_vkCmdSetTessellationDomainOriginEXT, + pub cmd_set_depth_clamp_enable_ext: crate::vk::PFN_vkCmdSetDepthClampEnableEXT, + pub cmd_set_polygon_mode_ext: crate::vk::PFN_vkCmdSetPolygonModeEXT, + pub cmd_set_rasterization_samples_ext: crate::vk::PFN_vkCmdSetRasterizationSamplesEXT, + pub cmd_set_sample_mask_ext: crate::vk::PFN_vkCmdSetSampleMaskEXT, + pub cmd_set_alpha_to_coverage_enable_ext: crate::vk::PFN_vkCmdSetAlphaToCoverageEnableEXT, + pub cmd_set_alpha_to_one_enable_ext: crate::vk::PFN_vkCmdSetAlphaToOneEnableEXT, + pub cmd_set_logic_op_enable_ext: crate::vk::PFN_vkCmdSetLogicOpEnableEXT, + pub cmd_set_color_blend_enable_ext: crate::vk::PFN_vkCmdSetColorBlendEnableEXT, + pub cmd_set_color_blend_equation_ext: crate::vk::PFN_vkCmdSetColorBlendEquationEXT, + pub cmd_set_color_write_mask_ext: crate::vk::PFN_vkCmdSetColorWriteMaskEXT, + pub cmd_set_rasterization_stream_ext: crate::vk::PFN_vkCmdSetRasterizationStreamEXT, + pub cmd_set_conservative_rasterization_mode_ext: + crate::vk::PFN_vkCmdSetConservativeRasterizationModeEXT, + pub cmd_set_extra_primitive_overestimation_size_ext: + crate::vk::PFN_vkCmdSetExtraPrimitiveOverestimationSizeEXT, + pub cmd_set_depth_clip_enable_ext: crate::vk::PFN_vkCmdSetDepthClipEnableEXT, + pub cmd_set_sample_locations_enable_ext: crate::vk::PFN_vkCmdSetSampleLocationsEnableEXT, + pub cmd_set_color_blend_advanced_ext: crate::vk::PFN_vkCmdSetColorBlendAdvancedEXT, + pub cmd_set_provoking_vertex_mode_ext: crate::vk::PFN_vkCmdSetProvokingVertexModeEXT, + pub cmd_set_line_rasterization_mode_ext: crate::vk::PFN_vkCmdSetLineRasterizationModeEXT, + pub cmd_set_line_stipple_enable_ext: crate::vk::PFN_vkCmdSetLineStippleEnableEXT, + pub cmd_set_depth_clip_negative_one_to_one_ext: + crate::vk::PFN_vkCmdSetDepthClipNegativeOneToOneEXT, + pub cmd_set_viewport_w_scaling_enable_nv: crate::vk::PFN_vkCmdSetViewportWScalingEnableNV, + pub cmd_set_viewport_swizzle_nv: crate::vk::PFN_vkCmdSetViewportSwizzleNV, + pub cmd_set_coverage_to_color_enable_nv: crate::vk::PFN_vkCmdSetCoverageToColorEnableNV, + pub cmd_set_coverage_to_color_location_nv: crate::vk::PFN_vkCmdSetCoverageToColorLocationNV, + pub cmd_set_coverage_modulation_mode_nv: crate::vk::PFN_vkCmdSetCoverageModulationModeNV, + pub cmd_set_coverage_modulation_table_enable_nv: + crate::vk::PFN_vkCmdSetCoverageModulationTableEnableNV, + pub cmd_set_coverage_modulation_table_nv: crate::vk::PFN_vkCmdSetCoverageModulationTableNV, + pub cmd_set_shading_rate_image_enable_nv: crate::vk::PFN_vkCmdSetShadingRateImageEnableNV, + pub cmd_set_representative_fragment_test_enable_nv: + crate::vk::PFN_vkCmdSetRepresentativeFragmentTestEnableNV, + pub cmd_set_coverage_reduction_mode_nv: crate::vk::PFN_vkCmdSetCoverageReductionModeNV, +} +unsafe impl Send for ExtShaderObjectFn {} +unsafe impl Sync for ExtShaderObjectFn {} +impl ExtShaderObjectFn { pub fn load(mut _f: F) -> Self where F: FnMut(&::std::ffi::CStr) -> *const c_void, { - Self {} + Self { + create_shaders_ext: unsafe { + unsafe extern "system" fn create_shaders_ext( + _device: Device, + _create_info_count: u32, + _p_create_infos: *const ShaderCreateInfoEXT, + _p_allocator: *const AllocationCallbacks, + _p_shaders: *mut ShaderEXT, + ) -> Result { + panic!(concat!("Unable to load ", stringify!(create_shaders_ext))) + } + let cname = + ::std::ffi::CStr::from_bytes_with_nul_unchecked(b"vkCreateShadersEXT\0"); + let val = _f(cname); + if val.is_null() { + create_shaders_ext + } else { + ::std::mem::transmute(val) + } + }, + destroy_shader_ext: unsafe { + unsafe extern "system" fn destroy_shader_ext( + _device: Device, + _shader: ShaderEXT, + _p_allocator: *const AllocationCallbacks, + ) { + panic!(concat!("Unable to load ", stringify!(destroy_shader_ext))) + } + let cname = + ::std::ffi::CStr::from_bytes_with_nul_unchecked(b"vkDestroyShaderEXT\0"); + let val = _f(cname); + if val.is_null() { + destroy_shader_ext + } else { + ::std::mem::transmute(val) + } + }, + get_shader_binary_data_ext: unsafe { + unsafe extern "system" fn get_shader_binary_data_ext( + _device: Device, + _shader: ShaderEXT, + _p_data_size: *mut usize, + _p_data: *mut c_void, + ) -> Result { + panic!(concat!( + "Unable to load ", + stringify!(get_shader_binary_data_ext) + )) + } + let cname = + ::std::ffi::CStr::from_bytes_with_nul_unchecked(b"vkGetShaderBinaryDataEXT\0"); + let val = _f(cname); + if val.is_null() { + get_shader_binary_data_ext + } else { + ::std::mem::transmute(val) + } + }, + cmd_bind_shaders_ext: unsafe { + unsafe extern "system" fn cmd_bind_shaders_ext( + _command_buffer: CommandBuffer, + _stage_count: u32, + _p_stages: *const ShaderStageFlags, + _p_shaders: *const ShaderEXT, + ) { + panic!(concat!("Unable to load ", stringify!(cmd_bind_shaders_ext))) + } + let cname = + ::std::ffi::CStr::from_bytes_with_nul_unchecked(b"vkCmdBindShadersEXT\0"); + let val = _f(cname); + if val.is_null() { + cmd_bind_shaders_ext + } else { + ::std::mem::transmute(val) + } + }, + cmd_set_cull_mode_ext: unsafe { + unsafe extern "system" fn cmd_set_cull_mode_ext( + _command_buffer: CommandBuffer, + _cull_mode: CullModeFlags, + ) { + panic!(concat!( + "Unable to load ", + stringify!(cmd_set_cull_mode_ext) + )) + } + let cname = + ::std::ffi::CStr::from_bytes_with_nul_unchecked(b"vkCmdSetCullModeEXT\0"); + let val = _f(cname); + if val.is_null() { + cmd_set_cull_mode_ext + } else { + ::std::mem::transmute(val) + } + }, + cmd_set_front_face_ext: unsafe { + unsafe extern "system" fn cmd_set_front_face_ext( + _command_buffer: CommandBuffer, + _front_face: FrontFace, + ) { + panic!(concat!( + "Unable to load ", + stringify!(cmd_set_front_face_ext) + )) + } + let cname = + ::std::ffi::CStr::from_bytes_with_nul_unchecked(b"vkCmdSetFrontFaceEXT\0"); + let val = _f(cname); + if val.is_null() { + cmd_set_front_face_ext + } else { + ::std::mem::transmute(val) + } + }, + cmd_set_primitive_topology_ext: unsafe { + unsafe extern "system" fn cmd_set_primitive_topology_ext( + _command_buffer: CommandBuffer, + _primitive_topology: PrimitiveTopology, + ) { + panic!(concat!( + "Unable to load ", + stringify!(cmd_set_primitive_topology_ext) + )) + } + let cname = ::std::ffi::CStr::from_bytes_with_nul_unchecked( + b"vkCmdSetPrimitiveTopologyEXT\0", + ); + let val = _f(cname); + if val.is_null() { + cmd_set_primitive_topology_ext + } else { + ::std::mem::transmute(val) + } + }, + cmd_set_viewport_with_count_ext: unsafe { + unsafe extern "system" fn cmd_set_viewport_with_count_ext( + _command_buffer: CommandBuffer, + _viewport_count: u32, + _p_viewports: *const Viewport, + ) { + panic!(concat!( + "Unable to load ", + stringify!(cmd_set_viewport_with_count_ext) + )) + } + let cname = ::std::ffi::CStr::from_bytes_with_nul_unchecked( + b"vkCmdSetViewportWithCountEXT\0", + ); + let val = _f(cname); + if val.is_null() { + cmd_set_viewport_with_count_ext + } else { + ::std::mem::transmute(val) + } + }, + cmd_set_scissor_with_count_ext: unsafe { + unsafe extern "system" fn cmd_set_scissor_with_count_ext( + _command_buffer: CommandBuffer, + _scissor_count: u32, + _p_scissors: *const Rect2D, + ) { + panic!(concat!( + "Unable to load ", + stringify!(cmd_set_scissor_with_count_ext) + )) + } + let cname = ::std::ffi::CStr::from_bytes_with_nul_unchecked( + b"vkCmdSetScissorWithCountEXT\0", + ); + let val = _f(cname); + if val.is_null() { + cmd_set_scissor_with_count_ext + } else { + ::std::mem::transmute(val) + } + }, + cmd_bind_vertex_buffers2_ext: unsafe { + unsafe extern "system" fn cmd_bind_vertex_buffers2_ext( + _command_buffer: CommandBuffer, + _first_binding: u32, + _binding_count: u32, + _p_buffers: *const Buffer, + _p_offsets: *const DeviceSize, + _p_sizes: *const DeviceSize, + _p_strides: *const DeviceSize, + ) { + panic!(concat!( + "Unable to load ", + stringify!(cmd_bind_vertex_buffers2_ext) + )) + } + let cname = ::std::ffi::CStr::from_bytes_with_nul_unchecked( + b"vkCmdBindVertexBuffers2EXT\0", + ); + let val = _f(cname); + if val.is_null() { + cmd_bind_vertex_buffers2_ext + } else { + ::std::mem::transmute(val) + } + }, + cmd_set_depth_test_enable_ext: unsafe { + unsafe extern "system" fn cmd_set_depth_test_enable_ext( + _command_buffer: CommandBuffer, + _depth_test_enable: Bool32, + ) { + panic!(concat!( + "Unable to load ", + stringify!(cmd_set_depth_test_enable_ext) + )) + } + let cname = ::std::ffi::CStr::from_bytes_with_nul_unchecked( + b"vkCmdSetDepthTestEnableEXT\0", + ); + let val = _f(cname); + if val.is_null() { + cmd_set_depth_test_enable_ext + } else { + ::std::mem::transmute(val) + } + }, + cmd_set_depth_write_enable_ext: unsafe { + unsafe extern "system" fn cmd_set_depth_write_enable_ext( + _command_buffer: CommandBuffer, + _depth_write_enable: Bool32, + ) { + panic!(concat!( + "Unable to load ", + stringify!(cmd_set_depth_write_enable_ext) + )) + } + let cname = ::std::ffi::CStr::from_bytes_with_nul_unchecked( + b"vkCmdSetDepthWriteEnableEXT\0", + ); + let val = _f(cname); + if val.is_null() { + cmd_set_depth_write_enable_ext + } else { + ::std::mem::transmute(val) + } + }, + cmd_set_depth_compare_op_ext: unsafe { + unsafe extern "system" fn cmd_set_depth_compare_op_ext( + _command_buffer: CommandBuffer, + _depth_compare_op: CompareOp, + ) { + panic!(concat!( + "Unable to load ", + stringify!(cmd_set_depth_compare_op_ext) + )) + } + let cname = + ::std::ffi::CStr::from_bytes_with_nul_unchecked(b"vkCmdSetDepthCompareOpEXT\0"); + let val = _f(cname); + if val.is_null() { + cmd_set_depth_compare_op_ext + } else { + ::std::mem::transmute(val) + } + }, + cmd_set_depth_bounds_test_enable_ext: unsafe { + unsafe extern "system" fn cmd_set_depth_bounds_test_enable_ext( + _command_buffer: CommandBuffer, + _depth_bounds_test_enable: Bool32, + ) { + panic!(concat!( + "Unable to load ", + stringify!(cmd_set_depth_bounds_test_enable_ext) + )) + } + let cname = ::std::ffi::CStr::from_bytes_with_nul_unchecked( + b"vkCmdSetDepthBoundsTestEnableEXT\0", + ); + let val = _f(cname); + if val.is_null() { + cmd_set_depth_bounds_test_enable_ext + } else { + ::std::mem::transmute(val) + } + }, + cmd_set_stencil_test_enable_ext: unsafe { + unsafe extern "system" fn cmd_set_stencil_test_enable_ext( + _command_buffer: CommandBuffer, + _stencil_test_enable: Bool32, + ) { + panic!(concat!( + "Unable to load ", + stringify!(cmd_set_stencil_test_enable_ext) + )) + } + let cname = ::std::ffi::CStr::from_bytes_with_nul_unchecked( + b"vkCmdSetStencilTestEnableEXT\0", + ); + let val = _f(cname); + if val.is_null() { + cmd_set_stencil_test_enable_ext + } else { + ::std::mem::transmute(val) + } + }, + cmd_set_stencil_op_ext: unsafe { + unsafe extern "system" fn cmd_set_stencil_op_ext( + _command_buffer: CommandBuffer, + _face_mask: StencilFaceFlags, + _fail_op: StencilOp, + _pass_op: StencilOp, + _depth_fail_op: StencilOp, + _compare_op: CompareOp, + ) { + panic!(concat!( + "Unable to load ", + stringify!(cmd_set_stencil_op_ext) + )) + } + let cname = + ::std::ffi::CStr::from_bytes_with_nul_unchecked(b"vkCmdSetStencilOpEXT\0"); + let val = _f(cname); + if val.is_null() { + cmd_set_stencil_op_ext + } else { + ::std::mem::transmute(val) + } + }, + cmd_set_vertex_input_ext: unsafe { + unsafe extern "system" fn cmd_set_vertex_input_ext( + _command_buffer: CommandBuffer, + _vertex_binding_description_count: u32, + _p_vertex_binding_descriptions: *const VertexInputBindingDescription2EXT, + _vertex_attribute_description_count: u32, + _p_vertex_attribute_descriptions: *const VertexInputAttributeDescription2EXT, + ) { + panic!(concat!( + "Unable to load ", + stringify!(cmd_set_vertex_input_ext) + )) + } + let cname = + ::std::ffi::CStr::from_bytes_with_nul_unchecked(b"vkCmdSetVertexInputEXT\0"); + let val = _f(cname); + if val.is_null() { + cmd_set_vertex_input_ext + } else { + ::std::mem::transmute(val) + } + }, + cmd_set_patch_control_points_ext: unsafe { + unsafe extern "system" fn cmd_set_patch_control_points_ext( + _command_buffer: CommandBuffer, + _patch_control_points: u32, + ) { + panic!(concat!( + "Unable to load ", + stringify!(cmd_set_patch_control_points_ext) + )) + } + let cname = ::std::ffi::CStr::from_bytes_with_nul_unchecked( + b"vkCmdSetPatchControlPointsEXT\0", + ); + let val = _f(cname); + if val.is_null() { + cmd_set_patch_control_points_ext + } else { + ::std::mem::transmute(val) + } + }, + cmd_set_rasterizer_discard_enable_ext: unsafe { + unsafe extern "system" fn cmd_set_rasterizer_discard_enable_ext( + _command_buffer: CommandBuffer, + _rasterizer_discard_enable: Bool32, + ) { + panic!(concat!( + "Unable to load ", + stringify!(cmd_set_rasterizer_discard_enable_ext) + )) + } + let cname = ::std::ffi::CStr::from_bytes_with_nul_unchecked( + b"vkCmdSetRasterizerDiscardEnableEXT\0", + ); + let val = _f(cname); + if val.is_null() { + cmd_set_rasterizer_discard_enable_ext + } else { + ::std::mem::transmute(val) + } + }, + cmd_set_depth_bias_enable_ext: unsafe { + unsafe extern "system" fn cmd_set_depth_bias_enable_ext( + _command_buffer: CommandBuffer, + _depth_bias_enable: Bool32, + ) { + panic!(concat!( + "Unable to load ", + stringify!(cmd_set_depth_bias_enable_ext) + )) + } + let cname = ::std::ffi::CStr::from_bytes_with_nul_unchecked( + b"vkCmdSetDepthBiasEnableEXT\0", + ); + let val = _f(cname); + if val.is_null() { + cmd_set_depth_bias_enable_ext + } else { + ::std::mem::transmute(val) + } + }, + cmd_set_logic_op_ext: unsafe { + unsafe extern "system" fn cmd_set_logic_op_ext( + _command_buffer: CommandBuffer, + _logic_op: LogicOp, + ) { + panic!(concat!("Unable to load ", stringify!(cmd_set_logic_op_ext))) + } + let cname = + ::std::ffi::CStr::from_bytes_with_nul_unchecked(b"vkCmdSetLogicOpEXT\0"); + let val = _f(cname); + if val.is_null() { + cmd_set_logic_op_ext + } else { + ::std::mem::transmute(val) + } + }, + cmd_set_primitive_restart_enable_ext: unsafe { + unsafe extern "system" fn cmd_set_primitive_restart_enable_ext( + _command_buffer: CommandBuffer, + _primitive_restart_enable: Bool32, + ) { + panic!(concat!( + "Unable to load ", + stringify!(cmd_set_primitive_restart_enable_ext) + )) + } + let cname = ::std::ffi::CStr::from_bytes_with_nul_unchecked( + b"vkCmdSetPrimitiveRestartEnableEXT\0", + ); + let val = _f(cname); + if val.is_null() { + cmd_set_primitive_restart_enable_ext + } else { + ::std::mem::transmute(val) + } + }, + cmd_set_tessellation_domain_origin_ext: unsafe { + unsafe extern "system" fn cmd_set_tessellation_domain_origin_ext( + _command_buffer: CommandBuffer, + _domain_origin: TessellationDomainOrigin, + ) { + panic!(concat!( + "Unable to load ", + stringify!(cmd_set_tessellation_domain_origin_ext) + )) + } + let cname = ::std::ffi::CStr::from_bytes_with_nul_unchecked( + b"vkCmdSetTessellationDomainOriginEXT\0", + ); + let val = _f(cname); + if val.is_null() { + cmd_set_tessellation_domain_origin_ext + } else { + ::std::mem::transmute(val) + } + }, + cmd_set_depth_clamp_enable_ext: unsafe { + unsafe extern "system" fn cmd_set_depth_clamp_enable_ext( + _command_buffer: CommandBuffer, + _depth_clamp_enable: Bool32, + ) { + panic!(concat!( + "Unable to load ", + stringify!(cmd_set_depth_clamp_enable_ext) + )) + } + let cname = ::std::ffi::CStr::from_bytes_with_nul_unchecked( + b"vkCmdSetDepthClampEnableEXT\0", + ); + let val = _f(cname); + if val.is_null() { + cmd_set_depth_clamp_enable_ext + } else { + ::std::mem::transmute(val) + } + }, + cmd_set_polygon_mode_ext: unsafe { + unsafe extern "system" fn cmd_set_polygon_mode_ext( + _command_buffer: CommandBuffer, + _polygon_mode: PolygonMode, + ) { + panic!(concat!( + "Unable to load ", + stringify!(cmd_set_polygon_mode_ext) + )) + } + let cname = + ::std::ffi::CStr::from_bytes_with_nul_unchecked(b"vkCmdSetPolygonModeEXT\0"); + let val = _f(cname); + if val.is_null() { + cmd_set_polygon_mode_ext + } else { + ::std::mem::transmute(val) + } + }, + cmd_set_rasterization_samples_ext: unsafe { + unsafe extern "system" fn cmd_set_rasterization_samples_ext( + _command_buffer: CommandBuffer, + _rasterization_samples: SampleCountFlags, + ) { + panic!(concat!( + "Unable to load ", + stringify!(cmd_set_rasterization_samples_ext) + )) + } + let cname = ::std::ffi::CStr::from_bytes_with_nul_unchecked( + b"vkCmdSetRasterizationSamplesEXT\0", + ); + let val = _f(cname); + if val.is_null() { + cmd_set_rasterization_samples_ext + } else { + ::std::mem::transmute(val) + } + }, + cmd_set_sample_mask_ext: unsafe { + unsafe extern "system" fn cmd_set_sample_mask_ext( + _command_buffer: CommandBuffer, + _samples: SampleCountFlags, + _p_sample_mask: *const SampleMask, + ) { + panic!(concat!( + "Unable to load ", + stringify!(cmd_set_sample_mask_ext) + )) + } + let cname = + ::std::ffi::CStr::from_bytes_with_nul_unchecked(b"vkCmdSetSampleMaskEXT\0"); + let val = _f(cname); + if val.is_null() { + cmd_set_sample_mask_ext + } else { + ::std::mem::transmute(val) + } + }, + cmd_set_alpha_to_coverage_enable_ext: unsafe { + unsafe extern "system" fn cmd_set_alpha_to_coverage_enable_ext( + _command_buffer: CommandBuffer, + _alpha_to_coverage_enable: Bool32, + ) { + panic!(concat!( + "Unable to load ", + stringify!(cmd_set_alpha_to_coverage_enable_ext) + )) + } + let cname = ::std::ffi::CStr::from_bytes_with_nul_unchecked( + b"vkCmdSetAlphaToCoverageEnableEXT\0", + ); + let val = _f(cname); + if val.is_null() { + cmd_set_alpha_to_coverage_enable_ext + } else { + ::std::mem::transmute(val) + } + }, + cmd_set_alpha_to_one_enable_ext: unsafe { + unsafe extern "system" fn cmd_set_alpha_to_one_enable_ext( + _command_buffer: CommandBuffer, + _alpha_to_one_enable: Bool32, + ) { + panic!(concat!( + "Unable to load ", + stringify!(cmd_set_alpha_to_one_enable_ext) + )) + } + let cname = ::std::ffi::CStr::from_bytes_with_nul_unchecked( + b"vkCmdSetAlphaToOneEnableEXT\0", + ); + let val = _f(cname); + if val.is_null() { + cmd_set_alpha_to_one_enable_ext + } else { + ::std::mem::transmute(val) + } + }, + cmd_set_logic_op_enable_ext: unsafe { + unsafe extern "system" fn cmd_set_logic_op_enable_ext( + _command_buffer: CommandBuffer, + _logic_op_enable: Bool32, + ) { + panic!(concat!( + "Unable to load ", + stringify!(cmd_set_logic_op_enable_ext) + )) + } + let cname = + ::std::ffi::CStr::from_bytes_with_nul_unchecked(b"vkCmdSetLogicOpEnableEXT\0"); + let val = _f(cname); + if val.is_null() { + cmd_set_logic_op_enable_ext + } else { + ::std::mem::transmute(val) + } + }, + cmd_set_color_blend_enable_ext: unsafe { + unsafe extern "system" fn cmd_set_color_blend_enable_ext( + _command_buffer: CommandBuffer, + _first_attachment: u32, + _attachment_count: u32, + _p_color_blend_enables: *const Bool32, + ) { + panic!(concat!( + "Unable to load ", + stringify!(cmd_set_color_blend_enable_ext) + )) + } + let cname = ::std::ffi::CStr::from_bytes_with_nul_unchecked( + b"vkCmdSetColorBlendEnableEXT\0", + ); + let val = _f(cname); + if val.is_null() { + cmd_set_color_blend_enable_ext + } else { + ::std::mem::transmute(val) + } + }, + cmd_set_color_blend_equation_ext: unsafe { + unsafe extern "system" fn cmd_set_color_blend_equation_ext( + _command_buffer: CommandBuffer, + _first_attachment: u32, + _attachment_count: u32, + _p_color_blend_equations: *const ColorBlendEquationEXT, + ) { + panic!(concat!( + "Unable to load ", + stringify!(cmd_set_color_blend_equation_ext) + )) + } + let cname = ::std::ffi::CStr::from_bytes_with_nul_unchecked( + b"vkCmdSetColorBlendEquationEXT\0", + ); + let val = _f(cname); + if val.is_null() { + cmd_set_color_blend_equation_ext + } else { + ::std::mem::transmute(val) + } + }, + cmd_set_color_write_mask_ext: unsafe { + unsafe extern "system" fn cmd_set_color_write_mask_ext( + _command_buffer: CommandBuffer, + _first_attachment: u32, + _attachment_count: u32, + _p_color_write_masks: *const ColorComponentFlags, + ) { + panic!(concat!( + "Unable to load ", + stringify!(cmd_set_color_write_mask_ext) + )) + } + let cname = + ::std::ffi::CStr::from_bytes_with_nul_unchecked(b"vkCmdSetColorWriteMaskEXT\0"); + let val = _f(cname); + if val.is_null() { + cmd_set_color_write_mask_ext + } else { + ::std::mem::transmute(val) + } + }, + cmd_set_rasterization_stream_ext: unsafe { + unsafe extern "system" fn cmd_set_rasterization_stream_ext( + _command_buffer: CommandBuffer, + _rasterization_stream: u32, + ) { + panic!(concat!( + "Unable to load ", + stringify!(cmd_set_rasterization_stream_ext) + )) + } + let cname = ::std::ffi::CStr::from_bytes_with_nul_unchecked( + b"vkCmdSetRasterizationStreamEXT\0", + ); + let val = _f(cname); + if val.is_null() { + cmd_set_rasterization_stream_ext + } else { + ::std::mem::transmute(val) + } + }, + cmd_set_conservative_rasterization_mode_ext: unsafe { + unsafe extern "system" fn cmd_set_conservative_rasterization_mode_ext( + _command_buffer: CommandBuffer, + _conservative_rasterization_mode: ConservativeRasterizationModeEXT, + ) { + panic!(concat!( + "Unable to load ", + stringify!(cmd_set_conservative_rasterization_mode_ext) + )) + } + let cname = ::std::ffi::CStr::from_bytes_with_nul_unchecked( + b"vkCmdSetConservativeRasterizationModeEXT\0", + ); + let val = _f(cname); + if val.is_null() { + cmd_set_conservative_rasterization_mode_ext + } else { + ::std::mem::transmute(val) + } + }, + cmd_set_extra_primitive_overestimation_size_ext: unsafe { + unsafe extern "system" fn cmd_set_extra_primitive_overestimation_size_ext( + _command_buffer: CommandBuffer, + _extra_primitive_overestimation_size: f32, + ) { + panic!(concat!( + "Unable to load ", + stringify!(cmd_set_extra_primitive_overestimation_size_ext) + )) + } + let cname = ::std::ffi::CStr::from_bytes_with_nul_unchecked( + b"vkCmdSetExtraPrimitiveOverestimationSizeEXT\0", + ); + let val = _f(cname); + if val.is_null() { + cmd_set_extra_primitive_overestimation_size_ext + } else { + ::std::mem::transmute(val) + } + }, + cmd_set_depth_clip_enable_ext: unsafe { + unsafe extern "system" fn cmd_set_depth_clip_enable_ext( + _command_buffer: CommandBuffer, + _depth_clip_enable: Bool32, + ) { + panic!(concat!( + "Unable to load ", + stringify!(cmd_set_depth_clip_enable_ext) + )) + } + let cname = ::std::ffi::CStr::from_bytes_with_nul_unchecked( + b"vkCmdSetDepthClipEnableEXT\0", + ); + let val = _f(cname); + if val.is_null() { + cmd_set_depth_clip_enable_ext + } else { + ::std::mem::transmute(val) + } + }, + cmd_set_sample_locations_enable_ext: unsafe { + unsafe extern "system" fn cmd_set_sample_locations_enable_ext( + _command_buffer: CommandBuffer, + _sample_locations_enable: Bool32, + ) { + panic!(concat!( + "Unable to load ", + stringify!(cmd_set_sample_locations_enable_ext) + )) + } + let cname = ::std::ffi::CStr::from_bytes_with_nul_unchecked( + b"vkCmdSetSampleLocationsEnableEXT\0", + ); + let val = _f(cname); + if val.is_null() { + cmd_set_sample_locations_enable_ext + } else { + ::std::mem::transmute(val) + } + }, + cmd_set_color_blend_advanced_ext: unsafe { + unsafe extern "system" fn cmd_set_color_blend_advanced_ext( + _command_buffer: CommandBuffer, + _first_attachment: u32, + _attachment_count: u32, + _p_color_blend_advanced: *const ColorBlendAdvancedEXT, + ) { + panic!(concat!( + "Unable to load ", + stringify!(cmd_set_color_blend_advanced_ext) + )) + } + let cname = ::std::ffi::CStr::from_bytes_with_nul_unchecked( + b"vkCmdSetColorBlendAdvancedEXT\0", + ); + let val = _f(cname); + if val.is_null() { + cmd_set_color_blend_advanced_ext + } else { + ::std::mem::transmute(val) + } + }, + cmd_set_provoking_vertex_mode_ext: unsafe { + unsafe extern "system" fn cmd_set_provoking_vertex_mode_ext( + _command_buffer: CommandBuffer, + _provoking_vertex_mode: ProvokingVertexModeEXT, + ) { + panic!(concat!( + "Unable to load ", + stringify!(cmd_set_provoking_vertex_mode_ext) + )) + } + let cname = ::std::ffi::CStr::from_bytes_with_nul_unchecked( + b"vkCmdSetProvokingVertexModeEXT\0", + ); + let val = _f(cname); + if val.is_null() { + cmd_set_provoking_vertex_mode_ext + } else { + ::std::mem::transmute(val) + } + }, + cmd_set_line_rasterization_mode_ext: unsafe { + unsafe extern "system" fn cmd_set_line_rasterization_mode_ext( + _command_buffer: CommandBuffer, + _line_rasterization_mode: LineRasterizationModeEXT, + ) { + panic!(concat!( + "Unable to load ", + stringify!(cmd_set_line_rasterization_mode_ext) + )) + } + let cname = ::std::ffi::CStr::from_bytes_with_nul_unchecked( + b"vkCmdSetLineRasterizationModeEXT\0", + ); + let val = _f(cname); + if val.is_null() { + cmd_set_line_rasterization_mode_ext + } else { + ::std::mem::transmute(val) + } + }, + cmd_set_line_stipple_enable_ext: unsafe { + unsafe extern "system" fn cmd_set_line_stipple_enable_ext( + _command_buffer: CommandBuffer, + _stippled_line_enable: Bool32, + ) { + panic!(concat!( + "Unable to load ", + stringify!(cmd_set_line_stipple_enable_ext) + )) + } + let cname = ::std::ffi::CStr::from_bytes_with_nul_unchecked( + b"vkCmdSetLineStippleEnableEXT\0", + ); + let val = _f(cname); + if val.is_null() { + cmd_set_line_stipple_enable_ext + } else { + ::std::mem::transmute(val) + } + }, + cmd_set_depth_clip_negative_one_to_one_ext: unsafe { + unsafe extern "system" fn cmd_set_depth_clip_negative_one_to_one_ext( + _command_buffer: CommandBuffer, + _negative_one_to_one: Bool32, + ) { + panic!(concat!( + "Unable to load ", + stringify!(cmd_set_depth_clip_negative_one_to_one_ext) + )) + } + let cname = ::std::ffi::CStr::from_bytes_with_nul_unchecked( + b"vkCmdSetDepthClipNegativeOneToOneEXT\0", + ); + let val = _f(cname); + if val.is_null() { + cmd_set_depth_clip_negative_one_to_one_ext + } else { + ::std::mem::transmute(val) + } + }, + cmd_set_viewport_w_scaling_enable_nv: unsafe { + unsafe extern "system" fn cmd_set_viewport_w_scaling_enable_nv( + _command_buffer: CommandBuffer, + _viewport_w_scaling_enable: Bool32, + ) { + panic!(concat!( + "Unable to load ", + stringify!(cmd_set_viewport_w_scaling_enable_nv) + )) + } + let cname = ::std::ffi::CStr::from_bytes_with_nul_unchecked( + b"vkCmdSetViewportWScalingEnableNV\0", + ); + let val = _f(cname); + if val.is_null() { + cmd_set_viewport_w_scaling_enable_nv + } else { + ::std::mem::transmute(val) + } + }, + cmd_set_viewport_swizzle_nv: unsafe { + unsafe extern "system" fn cmd_set_viewport_swizzle_nv( + _command_buffer: CommandBuffer, + _first_viewport: u32, + _viewport_count: u32, + _p_viewport_swizzles: *const ViewportSwizzleNV, + ) { + panic!(concat!( + "Unable to load ", + stringify!(cmd_set_viewport_swizzle_nv) + )) + } + let cname = + ::std::ffi::CStr::from_bytes_with_nul_unchecked(b"vkCmdSetViewportSwizzleNV\0"); + let val = _f(cname); + if val.is_null() { + cmd_set_viewport_swizzle_nv + } else { + ::std::mem::transmute(val) + } + }, + cmd_set_coverage_to_color_enable_nv: unsafe { + unsafe extern "system" fn cmd_set_coverage_to_color_enable_nv( + _command_buffer: CommandBuffer, + _coverage_to_color_enable: Bool32, + ) { + panic!(concat!( + "Unable to load ", + stringify!(cmd_set_coverage_to_color_enable_nv) + )) + } + let cname = ::std::ffi::CStr::from_bytes_with_nul_unchecked( + b"vkCmdSetCoverageToColorEnableNV\0", + ); + let val = _f(cname); + if val.is_null() { + cmd_set_coverage_to_color_enable_nv + } else { + ::std::mem::transmute(val) + } + }, + cmd_set_coverage_to_color_location_nv: unsafe { + unsafe extern "system" fn cmd_set_coverage_to_color_location_nv( + _command_buffer: CommandBuffer, + _coverage_to_color_location: u32, + ) { + panic!(concat!( + "Unable to load ", + stringify!(cmd_set_coverage_to_color_location_nv) + )) + } + let cname = ::std::ffi::CStr::from_bytes_with_nul_unchecked( + b"vkCmdSetCoverageToColorLocationNV\0", + ); + let val = _f(cname); + if val.is_null() { + cmd_set_coverage_to_color_location_nv + } else { + ::std::mem::transmute(val) + } + }, + cmd_set_coverage_modulation_mode_nv: unsafe { + unsafe extern "system" fn cmd_set_coverage_modulation_mode_nv( + _command_buffer: CommandBuffer, + _coverage_modulation_mode: CoverageModulationModeNV, + ) { + panic!(concat!( + "Unable to load ", + stringify!(cmd_set_coverage_modulation_mode_nv) + )) + } + let cname = ::std::ffi::CStr::from_bytes_with_nul_unchecked( + b"vkCmdSetCoverageModulationModeNV\0", + ); + let val = _f(cname); + if val.is_null() { + cmd_set_coverage_modulation_mode_nv + } else { + ::std::mem::transmute(val) + } + }, + cmd_set_coverage_modulation_table_enable_nv: unsafe { + unsafe extern "system" fn cmd_set_coverage_modulation_table_enable_nv( + _command_buffer: CommandBuffer, + _coverage_modulation_table_enable: Bool32, + ) { + panic!(concat!( + "Unable to load ", + stringify!(cmd_set_coverage_modulation_table_enable_nv) + )) + } + let cname = ::std::ffi::CStr::from_bytes_with_nul_unchecked( + b"vkCmdSetCoverageModulationTableEnableNV\0", + ); + let val = _f(cname); + if val.is_null() { + cmd_set_coverage_modulation_table_enable_nv + } else { + ::std::mem::transmute(val) + } + }, + cmd_set_coverage_modulation_table_nv: unsafe { + unsafe extern "system" fn cmd_set_coverage_modulation_table_nv( + _command_buffer: CommandBuffer, + _coverage_modulation_table_count: u32, + _p_coverage_modulation_table: *const f32, + ) { + panic!(concat!( + "Unable to load ", + stringify!(cmd_set_coverage_modulation_table_nv) + )) + } + let cname = ::std::ffi::CStr::from_bytes_with_nul_unchecked( + b"vkCmdSetCoverageModulationTableNV\0", + ); + let val = _f(cname); + if val.is_null() { + cmd_set_coverage_modulation_table_nv + } else { + ::std::mem::transmute(val) + } + }, + cmd_set_shading_rate_image_enable_nv: unsafe { + unsafe extern "system" fn cmd_set_shading_rate_image_enable_nv( + _command_buffer: CommandBuffer, + _shading_rate_image_enable: Bool32, + ) { + panic!(concat!( + "Unable to load ", + stringify!(cmd_set_shading_rate_image_enable_nv) + )) + } + let cname = ::std::ffi::CStr::from_bytes_with_nul_unchecked( + b"vkCmdSetShadingRateImageEnableNV\0", + ); + let val = _f(cname); + if val.is_null() { + cmd_set_shading_rate_image_enable_nv + } else { + ::std::mem::transmute(val) + } + }, + cmd_set_representative_fragment_test_enable_nv: unsafe { + unsafe extern "system" fn cmd_set_representative_fragment_test_enable_nv( + _command_buffer: CommandBuffer, + _representative_fragment_test_enable: Bool32, + ) { + panic!(concat!( + "Unable to load ", + stringify!(cmd_set_representative_fragment_test_enable_nv) + )) + } + let cname = ::std::ffi::CStr::from_bytes_with_nul_unchecked( + b"vkCmdSetRepresentativeFragmentTestEnableNV\0", + ); + let val = _f(cname); + if val.is_null() { + cmd_set_representative_fragment_test_enable_nv + } else { + ::std::mem::transmute(val) + } + }, + cmd_set_coverage_reduction_mode_nv: unsafe { + unsafe extern "system" fn cmd_set_coverage_reduction_mode_nv( + _command_buffer: CommandBuffer, + _coverage_reduction_mode: CoverageReductionModeNV, + ) { + panic!(concat!( + "Unable to load ", + stringify!(cmd_set_coverage_reduction_mode_nv) + )) + } + let cname = ::std::ffi::CStr::from_bytes_with_nul_unchecked( + b"vkCmdSetCoverageReductionModeNV\0", + ); + let val = _f(cname); + if val.is_null() { + cmd_set_coverage_reduction_mode_nv + } else { + ::std::mem::transmute(val) + } + }, + } } } -#[doc = "Generated from 'VK_EXT_extension_483'"] -impl ShaderStageFlags { - pub const EXT_483_RESERVE_15: Self = Self(0b1000_0000_0000_0000); - pub const EXT_483_RESERVE_16: Self = Self(0b1_0000_0000_0000_0000); - pub const EXT_483_RESERVE_17: Self = Self(0b10_0000_0000_0000_0000); +#[doc = "Generated from 'VK_EXT_shader_object'"] +impl ObjectType { + pub const SHADER_EXT: Self = Self(1_000_482_000); +} +#[doc = "Generated from 'VK_EXT_shader_object'"] +impl Result { + pub const ERROR_INCOMPATIBLE_SHADER_BINARY_EXT: Self = Self(1_000_482_000); +} +#[doc = "Generated from 'VK_EXT_shader_object'"] +impl ShaderCreateFlagsEXT { + pub const ALLOW_VARYING_SUBGROUP_SIZE: Self = Self(0b10); + pub const REQUIRE_FULL_SUBGROUPS: Self = Self(0b100); + pub const NO_TASK_SHADER: Self = Self(0b1000); + pub const DISPATCH_BASE: Self = Self(0b1_0000); + pub const FRAGMENT_SHADING_RATE_ATTACHMENT: Self = Self(0b10_0000); + pub const FRAGMENT_DENSITY_MAP_ATTACHMENT: Self = Self(0b100_0000); +} +#[doc = "Generated from 'VK_EXT_shader_object'"] +impl StructureType { + pub const PHYSICAL_DEVICE_SHADER_OBJECT_FEATURES_EXT: Self = Self(1_000_482_000); + pub const PHYSICAL_DEVICE_SHADER_OBJECT_PROPERTIES_EXT: Self = Self(1_000_482_001); + pub const SHADER_CREATE_INFO_EXT: Self = Self(1_000_482_002); + pub const SHADER_REQUIRED_SUBGROUP_SIZE_CREATE_INFO_EXT: Self = + Self::PIPELINE_SHADER_STAGE_REQUIRED_SUBGROUP_SIZE_CREATE_INFO; } impl ExtExtension484Fn { #[inline] @@ -24962,25 +26311,6 @@ impl StructureType { pub const PHYSICAL_DEVICE_MULTIVIEW_PER_VIEW_VIEWPORTS_FEATURES_QCOM: Self = Self(1_000_488_000); } -impl NvExtension490Fn { - #[inline] - pub const fn name() -> &'static ::std::ffi::CStr { - unsafe { ::std::ffi::CStr::from_bytes_with_nul_unchecked(b"VK_NV_extension_490\0") } - } - pub const SPEC_VERSION: u32 = 0u32; -} -#[derive(Clone)] -pub struct NvExtension490Fn {} -unsafe impl Send for NvExtension490Fn {} -unsafe impl Sync for NvExtension490Fn {} -impl NvExtension490Fn { - pub fn load(mut _f: F) -> Self - where - F: FnMut(&::std::ffi::CStr) -> *const c_void, - { - Self {} - } -} impl NvRayTracingInvocationReorderFn { #[inline] pub const fn name() -> &'static ::std::ffi::CStr { @@ -25168,18 +26498,22 @@ impl StructureType { pub const PHYSICAL_DEVICE_SHADER_CORE_BUILTINS_FEATURES_ARM: Self = Self(1_000_497_000); pub const PHYSICAL_DEVICE_SHADER_CORE_BUILTINS_PROPERTIES_ARM: Self = Self(1_000_497_001); } -impl ExtExtension499Fn { +impl ExtPipelineLibraryGroupHandlesFn { #[inline] pub const fn name() -> &'static ::std::ffi::CStr { - unsafe { ::std::ffi::CStr::from_bytes_with_nul_unchecked(b"VK_EXT_extension_499\0") } + unsafe { + ::std::ffi::CStr::from_bytes_with_nul_unchecked( + b"VK_EXT_pipeline_library_group_handles\0", + ) + } } - pub const SPEC_VERSION: u32 = 0u32; + pub const SPEC_VERSION: u32 = 1u32; } #[derive(Clone)] -pub struct ExtExtension499Fn {} -unsafe impl Send for ExtExtension499Fn {} -unsafe impl Sync for ExtExtension499Fn {} -impl ExtExtension499Fn { +pub struct ExtPipelineLibraryGroupHandlesFn {} +unsafe impl Send for ExtPipelineLibraryGroupHandlesFn {} +unsafe impl Sync for ExtPipelineLibraryGroupHandlesFn {} +impl ExtPipelineLibraryGroupHandlesFn { pub fn load(mut _f: F) -> Self where F: FnMut(&::std::ffi::CStr) -> *const c_void, @@ -25187,18 +26521,27 @@ impl ExtExtension499Fn { Self {} } } -impl ExtExtension500Fn { +#[doc = "Generated from 'VK_EXT_pipeline_library_group_handles'"] +impl StructureType { + pub const PHYSICAL_DEVICE_PIPELINE_LIBRARY_GROUP_HANDLES_FEATURES_EXT: Self = + Self(1_000_498_000); +} +impl ExtDynamicRenderingUnusedAttachmentsFn { #[inline] pub const fn name() -> &'static ::std::ffi::CStr { - unsafe { ::std::ffi::CStr::from_bytes_with_nul_unchecked(b"VK_EXT_extension_500\0") } + unsafe { + ::std::ffi::CStr::from_bytes_with_nul_unchecked( + b"VK_EXT_dynamic_rendering_unused_attachments\0", + ) + } } - pub const SPEC_VERSION: u32 = 0u32; + pub const SPEC_VERSION: u32 = 1u32; } #[derive(Clone)] -pub struct ExtExtension500Fn {} -unsafe impl Send for ExtExtension500Fn {} -unsafe impl Sync for ExtExtension500Fn {} -impl ExtExtension500Fn { +pub struct ExtDynamicRenderingUnusedAttachmentsFn {} +unsafe impl Send for ExtDynamicRenderingUnusedAttachmentsFn {} +unsafe impl Sync for ExtDynamicRenderingUnusedAttachmentsFn {} +impl ExtDynamicRenderingUnusedAttachmentsFn { pub fn load(mut _f: F) -> Self where F: FnMut(&::std::ffi::CStr) -> *const c_void, @@ -25206,6 +26549,11 @@ impl ExtExtension500Fn { Self {} } } +#[doc = "Generated from 'VK_EXT_dynamic_rendering_unused_attachments'"] +impl StructureType { + pub const PHYSICAL_DEVICE_DYNAMIC_RENDERING_UNUSED_ATTACHMENTS_FEATURES_EXT: Self = + Self(1_000_499_000); +} impl ExtExtension501Fn { #[inline] pub const fn name() -> &'static ::std::ffi::CStr { @@ -25320,3 +26668,578 @@ impl NvExtension506Fn { Self {} } } +impl KhrExtension507Fn { + #[inline] + pub const fn name() -> &'static ::std::ffi::CStr { + unsafe { ::std::ffi::CStr::from_bytes_with_nul_unchecked(b"VK_KHR_extension_507\0") } + } + pub const SPEC_VERSION: u32 = 0u32; +} +#[derive(Clone)] +pub struct KhrExtension507Fn {} +unsafe impl Send for KhrExtension507Fn {} +unsafe impl Sync for KhrExtension507Fn {} +impl KhrExtension507Fn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + Self {} + } +} +impl ExtExtension508Fn { + #[inline] + pub const fn name() -> &'static ::std::ffi::CStr { + unsafe { ::std::ffi::CStr::from_bytes_with_nul_unchecked(b"VK_EXT_extension_508\0") } + } + pub const SPEC_VERSION: u32 = 0u32; +} +#[derive(Clone)] +pub struct ExtExtension508Fn {} +unsafe impl Send for ExtExtension508Fn {} +unsafe impl Sync for ExtExtension508Fn {} +impl ExtExtension508Fn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + Self {} + } +} +#[doc = "Generated from 'VK_EXT_extension_508'"] +impl AccessFlags2 { + pub const RESERVED_47_EXT: Self = + Self(0b1000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000); + pub const RESERVED_48_EXT: Self = + Self(0b1_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000); +} +#[doc = "Generated from 'VK_EXT_extension_508'"] +impl PipelineStageFlags2 { + pub const RESERVED_42_EXT: Self = Self(0b100_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000); +} +#[doc = "Generated from 'VK_EXT_extension_508'"] +impl QueueFlags { + pub const RESERVED_10_EXT: Self = Self(0b100_0000_0000); +} +impl ExtExtension509Fn { + #[inline] + pub const fn name() -> &'static ::std::ffi::CStr { + unsafe { ::std::ffi::CStr::from_bytes_with_nul_unchecked(b"VK_EXT_extension_509\0") } + } + pub const SPEC_VERSION: u32 = 0u32; +} +#[derive(Clone)] +pub struct ExtExtension509Fn {} +unsafe impl Send for ExtExtension509Fn {} +unsafe impl Sync for ExtExtension509Fn {} +impl ExtExtension509Fn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + Self {} + } +} +impl MesaExtension510Fn { + #[inline] + pub const fn name() -> &'static ::std::ffi::CStr { + unsafe { ::std::ffi::CStr::from_bytes_with_nul_unchecked(b"VK_MESA_extension_510\0") } + } + pub const SPEC_VERSION: u32 = 0u32; +} +#[derive(Clone)] +pub struct MesaExtension510Fn {} +unsafe impl Send for MesaExtension510Fn {} +unsafe impl Sync for MesaExtension510Fn {} +impl MesaExtension510Fn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + Self {} + } +} +impl QcomMultiviewPerViewRenderAreasFn { + #[inline] + pub const fn name() -> &'static ::std::ffi::CStr { + unsafe { + ::std::ffi::CStr::from_bytes_with_nul_unchecked( + b"VK_QCOM_multiview_per_view_render_areas\0", + ) + } + } + pub const SPEC_VERSION: u32 = 1u32; +} +#[derive(Clone)] +pub struct QcomMultiviewPerViewRenderAreasFn {} +unsafe impl Send for QcomMultiviewPerViewRenderAreasFn {} +unsafe impl Sync for QcomMultiviewPerViewRenderAreasFn {} +impl QcomMultiviewPerViewRenderAreasFn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + Self {} + } +} +#[doc = "Generated from 'VK_QCOM_multiview_per_view_render_areas'"] +impl StructureType { + pub const PHYSICAL_DEVICE_MULTIVIEW_PER_VIEW_RENDER_AREAS_FEATURES_QCOM: Self = + Self(1_000_510_000); + pub const MULTIVIEW_PER_VIEW_RENDER_AREAS_RENDER_PASS_BEGIN_INFO_QCOM: Self = + Self(1_000_510_001); +} +impl ExtExtension512Fn { + #[inline] + pub const fn name() -> &'static ::std::ffi::CStr { + unsafe { ::std::ffi::CStr::from_bytes_with_nul_unchecked(b"VK_EXT_extension_512\0") } + } + pub const SPEC_VERSION: u32 = 0u32; +} +#[derive(Clone)] +pub struct ExtExtension512Fn {} +unsafe impl Send for ExtExtension512Fn {} +unsafe impl Sync for ExtExtension512Fn {} +impl ExtExtension512Fn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + Self {} + } +} +impl KhrExtension513Fn { + #[inline] + pub const fn name() -> &'static ::std::ffi::CStr { + unsafe { ::std::ffi::CStr::from_bytes_with_nul_unchecked(b"VK_KHR_extension_513\0") } + } + pub const SPEC_VERSION: u32 = 0u32; +} +#[derive(Clone)] +pub struct KhrExtension513Fn {} +unsafe impl Send for KhrExtension513Fn {} +unsafe impl Sync for KhrExtension513Fn {} +impl KhrExtension513Fn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + Self {} + } +} +impl KhrExtension514Fn { + #[inline] + pub const fn name() -> &'static ::std::ffi::CStr { + unsafe { ::std::ffi::CStr::from_bytes_with_nul_unchecked(b"VK_KHR_extension_514\0") } + } + pub const SPEC_VERSION: u32 = 0u32; +} +#[derive(Clone)] +pub struct KhrExtension514Fn {} +unsafe impl Send for KhrExtension514Fn {} +unsafe impl Sync for KhrExtension514Fn {} +impl KhrExtension514Fn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + Self {} + } +} +impl KhrExtension515Fn { + #[inline] + pub const fn name() -> &'static ::std::ffi::CStr { + unsafe { ::std::ffi::CStr::from_bytes_with_nul_unchecked(b"VK_KHR_extension_515\0") } + } + pub const SPEC_VERSION: u32 = 0u32; +} +#[derive(Clone)] +pub struct KhrExtension515Fn {} +unsafe impl Send for KhrExtension515Fn {} +unsafe impl Sync for KhrExtension515Fn {} +impl KhrExtension515Fn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + Self {} + } +} +impl KhrExtension516Fn { + #[inline] + pub const fn name() -> &'static ::std::ffi::CStr { + unsafe { ::std::ffi::CStr::from_bytes_with_nul_unchecked(b"VK_KHR_extension_516\0") } + } + pub const SPEC_VERSION: u32 = 0u32; +} +#[derive(Clone)] +pub struct KhrExtension516Fn {} +unsafe impl Send for KhrExtension516Fn {} +unsafe impl Sync for KhrExtension516Fn {} +impl KhrExtension516Fn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + Self {} + } +} +#[doc = "Generated from 'VK_KHR_extension_516'"] +impl BufferCreateFlags { + pub const RESERVED_6_KHR: Self = Self(0b100_0000); +} +#[doc = "Generated from 'VK_KHR_extension_516'"] +impl ImageCreateFlags { + pub const RESERVED_20_KHR: Self = Self(0b1_0000_0000_0000_0000_0000); +} +impl ExtExtension517Fn { + #[inline] + pub const fn name() -> &'static ::std::ffi::CStr { + unsafe { ::std::ffi::CStr::from_bytes_with_nul_unchecked(b"VK_EXT_extension_517\0") } + } + pub const SPEC_VERSION: u32 = 0u32; +} +#[derive(Clone)] +pub struct ExtExtension517Fn {} +unsafe impl Send for ExtExtension517Fn {} +unsafe impl Sync for ExtExtension517Fn {} +impl ExtExtension517Fn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + Self {} + } +} +#[doc = "Generated from 'VK_EXT_extension_517'"] +impl DescriptorSetLayoutCreateFlags { + pub const RESERVED_6_EXT: Self = Self(0b100_0000); +} +impl MesaExtension518Fn { + #[inline] + pub const fn name() -> &'static ::std::ffi::CStr { + unsafe { ::std::ffi::CStr::from_bytes_with_nul_unchecked(b"VK_MESA_extension_518\0") } + } + pub const SPEC_VERSION: u32 = 0u32; +} +#[derive(Clone)] +pub struct MesaExtension518Fn {} +unsafe impl Send for MesaExtension518Fn {} +unsafe impl Sync for MesaExtension518Fn {} +impl MesaExtension518Fn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + Self {} + } +} +impl QcomExtension519Fn { + #[inline] + pub const fn name() -> &'static ::std::ffi::CStr { + unsafe { ::std::ffi::CStr::from_bytes_with_nul_unchecked(b"VK_QCOM_extension_519\0") } + } + pub const SPEC_VERSION: u32 = 0u32; +} +#[derive(Clone)] +pub struct QcomExtension519Fn {} +unsafe impl Send for QcomExtension519Fn {} +unsafe impl Sync for QcomExtension519Fn {} +impl QcomExtension519Fn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + Self {} + } +} +impl QcomExtension520Fn { + #[inline] + pub const fn name() -> &'static ::std::ffi::CStr { + unsafe { ::std::ffi::CStr::from_bytes_with_nul_unchecked(b"VK_QCOM_extension_520\0") } + } + pub const SPEC_VERSION: u32 = 0u32; +} +#[derive(Clone)] +pub struct QcomExtension520Fn {} +unsafe impl Send for QcomExtension520Fn {} +unsafe impl Sync for QcomExtension520Fn {} +impl QcomExtension520Fn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + Self {} + } +} +impl QcomExtension521Fn { + #[inline] + pub const fn name() -> &'static ::std::ffi::CStr { + unsafe { ::std::ffi::CStr::from_bytes_with_nul_unchecked(b"VK_QCOM_extension_521\0") } + } + pub const SPEC_VERSION: u32 = 0u32; +} +#[derive(Clone)] +pub struct QcomExtension521Fn {} +unsafe impl Send for QcomExtension521Fn {} +unsafe impl Sync for QcomExtension521Fn {} +impl QcomExtension521Fn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + Self {} + } +} +impl QcomExtension522Fn { + #[inline] + pub const fn name() -> &'static ::std::ffi::CStr { + unsafe { ::std::ffi::CStr::from_bytes_with_nul_unchecked(b"VK_QCOM_extension_522\0") } + } + pub const SPEC_VERSION: u32 = 0u32; +} +#[derive(Clone)] +pub struct QcomExtension522Fn {} +unsafe impl Send for QcomExtension522Fn {} +unsafe impl Sync for QcomExtension522Fn {} +impl QcomExtension522Fn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + Self {} + } +} +impl ExtExtension523Fn { + #[inline] + pub const fn name() -> &'static ::std::ffi::CStr { + unsafe { ::std::ffi::CStr::from_bytes_with_nul_unchecked(b"VK_EXT_extension_523\0") } + } + pub const SPEC_VERSION: u32 = 0u32; +} +#[derive(Clone)] +pub struct ExtExtension523Fn {} +unsafe impl Send for ExtExtension523Fn {} +unsafe impl Sync for ExtExtension523Fn {} +impl ExtExtension523Fn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + Self {} + } +} +impl ExtExtension524Fn { + #[inline] + pub const fn name() -> &'static ::std::ffi::CStr { + unsafe { ::std::ffi::CStr::from_bytes_with_nul_unchecked(b"VK_EXT_extension_524\0") } + } + pub const SPEC_VERSION: u32 = 0u32; +} +#[derive(Clone)] +pub struct ExtExtension524Fn {} +unsafe impl Send for ExtExtension524Fn {} +unsafe impl Sync for ExtExtension524Fn {} +impl ExtExtension524Fn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + Self {} + } +} +impl ExtAttachmentFeedbackLoopDynamicStateFn { + #[inline] + pub const fn name() -> &'static ::std::ffi::CStr { + unsafe { + ::std::ffi::CStr::from_bytes_with_nul_unchecked( + b"VK_EXT_attachment_feedback_loop_dynamic_state\0", + ) + } + } + pub const SPEC_VERSION: u32 = 1u32; +} +#[allow(non_camel_case_types)] +pub type PFN_vkCmdSetAttachmentFeedbackLoopEnableEXT = + unsafe extern "system" fn(command_buffer: CommandBuffer, aspect_mask: ImageAspectFlags); +#[derive(Clone)] +pub struct ExtAttachmentFeedbackLoopDynamicStateFn { + pub cmd_set_attachment_feedback_loop_enable_ext: PFN_vkCmdSetAttachmentFeedbackLoopEnableEXT, +} +unsafe impl Send for ExtAttachmentFeedbackLoopDynamicStateFn {} +unsafe impl Sync for ExtAttachmentFeedbackLoopDynamicStateFn {} +impl ExtAttachmentFeedbackLoopDynamicStateFn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + Self { + cmd_set_attachment_feedback_loop_enable_ext: unsafe { + unsafe extern "system" fn cmd_set_attachment_feedback_loop_enable_ext( + _command_buffer: CommandBuffer, + _aspect_mask: ImageAspectFlags, + ) { + panic!(concat!( + "Unable to load ", + stringify!(cmd_set_attachment_feedback_loop_enable_ext) + )) + } + let cname = ::std::ffi::CStr::from_bytes_with_nul_unchecked( + b"vkCmdSetAttachmentFeedbackLoopEnableEXT\0", + ); + let val = _f(cname); + if val.is_null() { + cmd_set_attachment_feedback_loop_enable_ext + } else { + ::std::mem::transmute(val) + } + }, + } + } +} +#[doc = "Generated from 'VK_EXT_attachment_feedback_loop_dynamic_state'"] +impl DynamicState { + pub const ATTACHMENT_FEEDBACK_LOOP_ENABLE_EXT: Self = Self(1_000_524_000); +} +#[doc = "Generated from 'VK_EXT_attachment_feedback_loop_dynamic_state'"] +impl StructureType { + pub const PHYSICAL_DEVICE_ATTACHMENT_FEEDBACK_LOOP_DYNAMIC_STATE_FEATURES_EXT: Self = + Self(1_000_524_000); +} +impl ExtExtension526Fn { + #[inline] + pub const fn name() -> &'static ::std::ffi::CStr { + unsafe { ::std::ffi::CStr::from_bytes_with_nul_unchecked(b"VK_EXT_extension_526\0") } + } + pub const SPEC_VERSION: u32 = 0u32; +} +#[derive(Clone)] +pub struct ExtExtension526Fn {} +unsafe impl Send for ExtExtension526Fn {} +unsafe impl Sync for ExtExtension526Fn {} +impl ExtExtension526Fn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + Self {} + } +} +impl ExtExtension527Fn { + #[inline] + pub const fn name() -> &'static ::std::ffi::CStr { + unsafe { ::std::ffi::CStr::from_bytes_with_nul_unchecked(b"VK_EXT_extension_527\0") } + } + pub const SPEC_VERSION: u32 = 0u32; +} +#[derive(Clone)] +pub struct ExtExtension527Fn {} +unsafe impl Send for ExtExtension527Fn {} +unsafe impl Sync for ExtExtension527Fn {} +impl ExtExtension527Fn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + Self {} + } +} +impl ExtExtension528Fn { + #[inline] + pub const fn name() -> &'static ::std::ffi::CStr { + unsafe { ::std::ffi::CStr::from_bytes_with_nul_unchecked(b"VK_EXT_extension_528\0") } + } + pub const SPEC_VERSION: u32 = 0u32; +} +#[derive(Clone)] +pub struct ExtExtension528Fn {} +unsafe impl Send for ExtExtension528Fn {} +unsafe impl Sync for ExtExtension528Fn {} +impl ExtExtension528Fn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + Self {} + } +} +impl KhrExtension529Fn { + #[inline] + pub const fn name() -> &'static ::std::ffi::CStr { + unsafe { ::std::ffi::CStr::from_bytes_with_nul_unchecked(b"VK_KHR_extension_529\0") } + } + pub const SPEC_VERSION: u32 = 0u32; +} +#[derive(Clone)] +pub struct KhrExtension529Fn {} +unsafe impl Send for KhrExtension529Fn {} +unsafe impl Sync for KhrExtension529Fn {} +impl KhrExtension529Fn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + Self {} + } +} +impl QnxExtension530Fn { + #[inline] + pub const fn name() -> &'static ::std::ffi::CStr { + unsafe { ::std::ffi::CStr::from_bytes_with_nul_unchecked(b"VK_QNX_extension_530\0") } + } + pub const SPEC_VERSION: u32 = 0u32; +} +#[derive(Clone)] +pub struct QnxExtension530Fn {} +unsafe impl Send for QnxExtension530Fn {} +unsafe impl Sync for QnxExtension530Fn {} +impl QnxExtension530Fn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + Self {} + } +} +#[doc = "Generated from 'VK_QNX_extension_530'"] +impl ExternalMemoryHandleTypeFlags { + pub const TYPE_530_QNX: Self = Self(0b100_0000_0000_0000); +} +impl MsftExtension531Fn { + #[inline] + pub const fn name() -> &'static ::std::ffi::CStr { + unsafe { ::std::ffi::CStr::from_bytes_with_nul_unchecked(b"VK_MSFT_extension_531\0") } + } + pub const SPEC_VERSION: u32 = 0u32; +} +#[derive(Clone)] +pub struct MsftExtension531Fn {} +unsafe impl Send for MsftExtension531Fn {} +unsafe impl Sync for MsftExtension531Fn {} +impl MsftExtension531Fn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + Self {} + } +} +impl KhrExtension532Fn { + #[inline] + pub const fn name() -> &'static ::std::ffi::CStr { + unsafe { ::std::ffi::CStr::from_bytes_with_nul_unchecked(b"VK_KHR_extension_532\0") } + } + pub const SPEC_VERSION: u32 = 0u32; +} +#[derive(Clone)] +pub struct KhrExtension532Fn {} +unsafe impl Send for KhrExtension532Fn {} +unsafe impl Sync for KhrExtension532Fn {} +impl KhrExtension532Fn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + Self {} + } +} diff --git a/third_party/rust/ash/src/vk/features.rs b/third_party/rust/ash/src/vk/features.rs index 4a52e377eff7..f7ddc8e6be48 100644 --- a/third_party/rust/ash/src/vk/features.rs +++ b/third_party/rust/ash/src/vk/features.rs @@ -999,7 +999,7 @@ pub type PFN_vkCmdSetDepthBias = unsafe extern "system" fn( ); #[allow(non_camel_case_types)] pub type PFN_vkCmdSetBlendConstants = - unsafe extern "system" fn(command_buffer: CommandBuffer, blend_constants: *const [f32; 4]); + unsafe extern "system" fn(command_buffer: CommandBuffer, blend_constants: *const [f32; 4usize]); #[allow(non_camel_case_types)] pub type PFN_vkCmdSetDepthBounds = unsafe extern "system" fn( command_buffer: CommandBuffer, @@ -2876,7 +2876,7 @@ impl DeviceFnV1_0 { cmd_set_blend_constants: unsafe { unsafe extern "system" fn cmd_set_blend_constants( _command_buffer: CommandBuffer, - _blend_constants: *const [f32; 4], + _blend_constants: *const [f32; 4usize], ) { panic!(concat!( "Unable to load ", diff --git a/third_party/rust/ash/src/vk/native.rs b/third_party/rust/ash/src/vk/native.rs index 02edfba5c8c5..03e0ad526104 100644 --- a/third_party/rust/ash/src/vk/native.rs +++ b/third_party/rust/ash/src/vk/native.rs @@ -1,4 +1,4 @@ -/* automatically generated by rust-bindgen 0.61.0 */ +/* automatically generated by rust-bindgen 0.64.0 */ #[repr(C)] #[derive(Copy, Clone, Debug, Default, Eq, Hash, Ord, PartialEq, PartialOrd)] @@ -7050,42 +7050,48 @@ impl StdVideoEncodeH264ReferenceInfoFlags { #[repr(C)] #[repr(align(4))] #[derive(Debug, Copy, Clone)] -pub struct StdVideoEncodeH264RefMgmtFlags { +pub struct StdVideoEncodeH264ReferenceListsInfoFlags { pub _bitfield_align_1: [u8; 0], pub _bitfield_1: __BindgenBitfieldUnit<[u8; 1usize]>, pub __bindgen_padding_0: [u8; 3usize], } #[test] -fn bindgen_test_layout_StdVideoEncodeH264RefMgmtFlags() { +fn bindgen_test_layout_StdVideoEncodeH264ReferenceListsInfoFlags() { assert_eq!( - ::std::mem::size_of::(), + ::std::mem::size_of::(), 4usize, - concat!("Size of: ", stringify!(StdVideoEncodeH264RefMgmtFlags)) + concat!( + "Size of: ", + stringify!(StdVideoEncodeH264ReferenceListsInfoFlags) + ) ); assert_eq!( - ::std::mem::align_of::(), + ::std::mem::align_of::(), 4usize, - concat!("Alignment of ", stringify!(StdVideoEncodeH264RefMgmtFlags)) + concat!( + "Alignment of ", + stringify!(StdVideoEncodeH264ReferenceListsInfoFlags) + ) ); } -impl StdVideoEncodeH264RefMgmtFlags { +impl StdVideoEncodeH264ReferenceListsInfoFlags { #[inline] - pub fn ref_pic_list_modification_l0_flag(&self) -> u32 { + pub fn ref_pic_list_modification_flag_l0(&self) -> u32 { unsafe { ::std::mem::transmute(self._bitfield_1.get(0usize, 1u8) as u32) } } #[inline] - pub fn set_ref_pic_list_modification_l0_flag(&mut self, val: u32) { + pub fn set_ref_pic_list_modification_flag_l0(&mut self, val: u32) { unsafe { let val: u32 = ::std::mem::transmute(val); self._bitfield_1.set(0usize, 1u8, val as u64) } } #[inline] - pub fn ref_pic_list_modification_l1_flag(&self) -> u32 { + pub fn ref_pic_list_modification_flag_l1(&self) -> u32 { unsafe { ::std::mem::transmute(self._bitfield_1.get(1usize, 1u8) as u32) } } #[inline] - pub fn set_ref_pic_list_modification_l1_flag(&mut self, val: u32) { + pub fn set_ref_pic_list_modification_flag_l1(&mut self, val: u32) { unsafe { let val: u32 = ::std::mem::transmute(val); self._bitfield_1.set(1usize, 1u8, val as u64) @@ -7093,19 +7099,19 @@ impl StdVideoEncodeH264RefMgmtFlags { } #[inline] pub fn new_bitfield_1( - ref_pic_list_modification_l0_flag: u32, - ref_pic_list_modification_l1_flag: u32, + ref_pic_list_modification_flag_l0: u32, + ref_pic_list_modification_flag_l1: u32, ) -> __BindgenBitfieldUnit<[u8; 1usize]> { let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 1usize]> = Default::default(); __bindgen_bitfield_unit.set(0usize, 1u8, { - let ref_pic_list_modification_l0_flag: u32 = - unsafe { ::std::mem::transmute(ref_pic_list_modification_l0_flag) }; - ref_pic_list_modification_l0_flag as u64 + let ref_pic_list_modification_flag_l0: u32 = + unsafe { ::std::mem::transmute(ref_pic_list_modification_flag_l0) }; + ref_pic_list_modification_flag_l0 as u64 }); __bindgen_bitfield_unit.set(1usize, 1u8, { - let ref_pic_list_modification_l1_flag: u32 = - unsafe { ::std::mem::transmute(ref_pic_list_modification_l1_flag) }; - ref_pic_list_modification_l1_flag as u64 + let ref_pic_list_modification_flag_l1: u32 = + unsafe { ::std::mem::transmute(ref_pic_list_modification_flag_l1) }; + ref_pic_list_modification_flag_l1 as u64 }); __bindgen_bitfield_unit } @@ -7255,34 +7261,39 @@ fn bindgen_test_layout_StdVideoEncodeH264RefPicMarkingEntry() { } #[repr(C)] #[derive(Debug, Copy, Clone)] -pub struct StdVideoEncodeH264RefMemMgmtCtrlOperations { - pub flags: StdVideoEncodeH264RefMgmtFlags, +pub struct StdVideoEncodeH264ReferenceListsInfo { + pub flags: StdVideoEncodeH264ReferenceListsInfoFlags, + pub refPicList0EntryCount: u8, + pub refPicList1EntryCount: u8, pub refList0ModOpCount: u8, - pub pRefList0ModOperations: *const StdVideoEncodeH264RefListModEntry, pub refList1ModOpCount: u8, - pub pRefList1ModOperations: *const StdVideoEncodeH264RefListModEntry, pub refPicMarkingOpCount: u8, + pub reserved1: [u8; 7usize], + pub pRefPicList0Entries: *const u8, + pub pRefPicList1Entries: *const u8, + pub pRefList0ModOperations: *const StdVideoEncodeH264RefListModEntry, + pub pRefList1ModOperations: *const StdVideoEncodeH264RefListModEntry, pub pRefPicMarkingOperations: *const StdVideoEncodeH264RefPicMarkingEntry, } #[test] -fn bindgen_test_layout_StdVideoEncodeH264RefMemMgmtCtrlOperations() { - const UNINIT: ::std::mem::MaybeUninit = +fn bindgen_test_layout_StdVideoEncodeH264ReferenceListsInfo() { + const UNINIT: ::std::mem::MaybeUninit = ::std::mem::MaybeUninit::uninit(); let ptr = UNINIT.as_ptr(); assert_eq!( - ::std::mem::size_of::(), - 48usize, + ::std::mem::size_of::(), + 56usize, concat!( "Size of: ", - stringify!(StdVideoEncodeH264RefMemMgmtCtrlOperations) + stringify!(StdVideoEncodeH264ReferenceListsInfo) ) ); assert_eq!( - ::std::mem::align_of::(), + ::std::mem::align_of::(), 8usize, concat!( "Alignment of ", - stringify!(StdVideoEncodeH264RefMemMgmtCtrlOperations) + stringify!(StdVideoEncodeH264ReferenceListsInfo) ) ); assert_eq!( @@ -7290,67 +7301,117 @@ fn bindgen_test_layout_StdVideoEncodeH264RefMemMgmtCtrlOperations() { 0usize, concat!( "Offset of field: ", - stringify!(StdVideoEncodeH264RefMemMgmtCtrlOperations), + stringify!(StdVideoEncodeH264ReferenceListsInfo), "::", stringify!(flags) ) ); assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).refList0ModOpCount) as usize - ptr as usize }, + unsafe { ::std::ptr::addr_of!((*ptr).refPicList0EntryCount) as usize - ptr as usize }, 4usize, concat!( "Offset of field: ", - stringify!(StdVideoEncodeH264RefMemMgmtCtrlOperations), + stringify!(StdVideoEncodeH264ReferenceListsInfo), + "::", + stringify!(refPicList0EntryCount) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).refPicList1EntryCount) as usize - ptr as usize }, + 5usize, + concat!( + "Offset of field: ", + stringify!(StdVideoEncodeH264ReferenceListsInfo), + "::", + stringify!(refPicList1EntryCount) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).refList0ModOpCount) as usize - ptr as usize }, + 6usize, + concat!( + "Offset of field: ", + stringify!(StdVideoEncodeH264ReferenceListsInfo), "::", stringify!(refList0ModOpCount) ) ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).pRefList0ModOperations) as usize - ptr as usize }, - 8usize, - concat!( - "Offset of field: ", - stringify!(StdVideoEncodeH264RefMemMgmtCtrlOperations), - "::", - stringify!(pRefList0ModOperations) - ) - ); assert_eq!( unsafe { ::std::ptr::addr_of!((*ptr).refList1ModOpCount) as usize - ptr as usize }, - 16usize, + 7usize, concat!( "Offset of field: ", - stringify!(StdVideoEncodeH264RefMemMgmtCtrlOperations), + stringify!(StdVideoEncodeH264ReferenceListsInfo), "::", stringify!(refList1ModOpCount) ) ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).pRefList1ModOperations) as usize - ptr as usize }, - 24usize, - concat!( - "Offset of field: ", - stringify!(StdVideoEncodeH264RefMemMgmtCtrlOperations), - "::", - stringify!(pRefList1ModOperations) - ) - ); assert_eq!( unsafe { ::std::ptr::addr_of!((*ptr).refPicMarkingOpCount) as usize - ptr as usize }, - 32usize, + 8usize, concat!( "Offset of field: ", - stringify!(StdVideoEncodeH264RefMemMgmtCtrlOperations), + stringify!(StdVideoEncodeH264ReferenceListsInfo), "::", stringify!(refPicMarkingOpCount) ) ); assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).pRefPicMarkingOperations) as usize - ptr as usize }, + unsafe { ::std::ptr::addr_of!((*ptr).reserved1) as usize - ptr as usize }, + 9usize, + concat!( + "Offset of field: ", + stringify!(StdVideoEncodeH264ReferenceListsInfo), + "::", + stringify!(reserved1) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).pRefPicList0Entries) as usize - ptr as usize }, + 16usize, + concat!( + "Offset of field: ", + stringify!(StdVideoEncodeH264ReferenceListsInfo), + "::", + stringify!(pRefPicList0Entries) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).pRefPicList1Entries) as usize - ptr as usize }, + 24usize, + concat!( + "Offset of field: ", + stringify!(StdVideoEncodeH264ReferenceListsInfo), + "::", + stringify!(pRefPicList1Entries) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).pRefList0ModOperations) as usize - ptr as usize }, + 32usize, + concat!( + "Offset of field: ", + stringify!(StdVideoEncodeH264ReferenceListsInfo), + "::", + stringify!(pRefList0ModOperations) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).pRefList1ModOperations) as usize - ptr as usize }, 40usize, concat!( "Offset of field: ", - stringify!(StdVideoEncodeH264RefMemMgmtCtrlOperations), + stringify!(StdVideoEncodeH264ReferenceListsInfo), + "::", + stringify!(pRefList1ModOperations) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).pRefPicMarkingOperations) as usize - ptr as usize }, + 48usize, + concat!( + "Offset of field: ", + stringify!(StdVideoEncodeH264ReferenceListsInfo), "::", stringify!(pRefPicMarkingOperations) ) @@ -7362,6 +7423,7 @@ pub struct StdVideoEncodeH264PictureInfo { pub flags: StdVideoEncodeH264PictureInfoFlags, pub seq_parameter_set_id: u8, pub pic_parameter_set_id: u8, + pub reserved1: u16, pub pictureType: StdVideoH264PictureType, pub frame_num: u32, pub PicOrderCnt: i32, @@ -7411,6 +7473,16 @@ fn bindgen_test_layout_StdVideoEncodeH264PictureInfo() { stringify!(pic_parameter_set_id) ) ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).reserved1) as usize - ptr as usize }, + 6usize, + concat!( + "Offset of field: ", + stringify!(StdVideoEncodeH264PictureInfo), + "::", + stringify!(reserved1) + ) + ); assert_eq!( unsafe { ::std::ptr::addr_of!((*ptr).pictureType) as usize - ptr as usize }, 8usize, @@ -7446,6 +7518,7 @@ fn bindgen_test_layout_StdVideoEncodeH264PictureInfo() { #[derive(Debug, Copy, Clone)] pub struct StdVideoEncodeH264ReferenceInfo { pub flags: StdVideoEncodeH264ReferenceInfoFlags, + pub pictureType: StdVideoH264PictureType, pub FrameNum: u32, pub PicOrderCnt: i32, pub long_term_pic_num: u16, @@ -7458,7 +7531,7 @@ fn bindgen_test_layout_StdVideoEncodeH264ReferenceInfo() { let ptr = UNINIT.as_ptr(); assert_eq!( ::std::mem::size_of::(), - 16usize, + 20usize, concat!("Size of: ", stringify!(StdVideoEncodeH264ReferenceInfo)) ); assert_eq!( @@ -7477,8 +7550,18 @@ fn bindgen_test_layout_StdVideoEncodeH264ReferenceInfo() { ) ); assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).FrameNum) as usize - ptr as usize }, + unsafe { ::std::ptr::addr_of!((*ptr).pictureType) as usize - ptr as usize }, 4usize, + concat!( + "Offset of field: ", + stringify!(StdVideoEncodeH264ReferenceInfo), + "::", + stringify!(pictureType) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).FrameNum) as usize - ptr as usize }, + 8usize, concat!( "Offset of field: ", stringify!(StdVideoEncodeH264ReferenceInfo), @@ -7488,7 +7571,7 @@ fn bindgen_test_layout_StdVideoEncodeH264ReferenceInfo() { ); assert_eq!( unsafe { ::std::ptr::addr_of!((*ptr).PicOrderCnt) as usize - ptr as usize }, - 8usize, + 12usize, concat!( "Offset of field: ", stringify!(StdVideoEncodeH264ReferenceInfo), @@ -7498,7 +7581,7 @@ fn bindgen_test_layout_StdVideoEncodeH264ReferenceInfo() { ); assert_eq!( unsafe { ::std::ptr::addr_of!((*ptr).long_term_pic_num) as usize - ptr as usize }, - 12usize, + 16usize, concat!( "Offset of field: ", stringify!(StdVideoEncodeH264ReferenceInfo), @@ -7508,7 +7591,7 @@ fn bindgen_test_layout_StdVideoEncodeH264ReferenceInfo() { ); assert_eq!( unsafe { ::std::ptr::addr_of!((*ptr).long_term_frame_idx) as usize - ptr as usize }, - 14usize, + 18usize, concat!( "Offset of field: ", stringify!(StdVideoEncodeH264ReferenceInfo), @@ -7530,6 +7613,8 @@ pub struct StdVideoEncodeH264SliceHeader { pub disable_deblocking_filter_idc: StdVideoH264DisableDeblockingFilterIdc, pub slice_alpha_c0_offset_div2: i8, pub slice_beta_offset_div2: i8, + pub reserved1: u16, + pub reserved2: u32, pub pWeightTable: *const StdVideoEncodeH264WeightTable, } #[test] @@ -7653,6 +7738,26 @@ fn bindgen_test_layout_StdVideoEncodeH264SliceHeader() { stringify!(slice_beta_offset_div2) ) ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).reserved1) as usize - ptr as usize }, + 26usize, + concat!( + "Offset of field: ", + stringify!(StdVideoEncodeH264SliceHeader), + "::", + stringify!(reserved1) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).reserved2) as usize - ptr as usize }, + 28usize, + concat!( + "Offset of field: ", + stringify!(StdVideoEncodeH264SliceHeader), + "::", + stringify!(reserved2) + ) + ); assert_eq!( unsafe { ::std::ptr::addr_of!((*ptr).pWeightTable) as usize - ptr as usize }, 32usize, @@ -8511,31 +8616,31 @@ fn bindgen_test_layout_StdVideoEncodeH265SliceSegmentHeader() { #[repr(C)] #[repr(align(4))] #[derive(Debug, Copy, Clone)] -pub struct StdVideoEncodeH265ReferenceModificationFlags { +pub struct StdVideoEncodeH265ReferenceListsInfoFlags { pub _bitfield_align_1: [u8; 0], pub _bitfield_1: __BindgenBitfieldUnit<[u8; 1usize]>, pub __bindgen_padding_0: [u8; 3usize], } #[test] -fn bindgen_test_layout_StdVideoEncodeH265ReferenceModificationFlags() { +fn bindgen_test_layout_StdVideoEncodeH265ReferenceListsInfoFlags() { assert_eq!( - ::std::mem::size_of::(), + ::std::mem::size_of::(), 4usize, concat!( "Size of: ", - stringify!(StdVideoEncodeH265ReferenceModificationFlags) + stringify!(StdVideoEncodeH265ReferenceListsInfoFlags) ) ); assert_eq!( - ::std::mem::align_of::(), + ::std::mem::align_of::(), 4usize, concat!( "Alignment of ", - stringify!(StdVideoEncodeH265ReferenceModificationFlags) + stringify!(StdVideoEncodeH265ReferenceListsInfoFlags) ) ); } -impl StdVideoEncodeH265ReferenceModificationFlags { +impl StdVideoEncodeH265ReferenceListsInfoFlags { #[inline] pub fn ref_pic_list_modification_flag_l0(&self) -> u32 { unsafe { ::std::mem::transmute(self._bitfield_1.get(0usize, 1u8) as u32) } @@ -8579,32 +8684,35 @@ impl StdVideoEncodeH265ReferenceModificationFlags { } #[repr(C)] #[derive(Debug, Copy, Clone)] -pub struct StdVideoEncodeH265ReferenceModifications { - pub flags: StdVideoEncodeH265ReferenceModificationFlags, - pub referenceList0ModificationsCount: u8, - pub pReferenceList0Modifications: *const u8, - pub referenceList1ModificationsCount: u8, - pub pReferenceList1Modifications: *const u8, +pub struct StdVideoEncodeH265ReferenceListsInfo { + pub flags: StdVideoEncodeH265ReferenceListsInfoFlags, + pub num_ref_idx_l0_active_minus1: u8, + pub num_ref_idx_l1_active_minus1: u8, + pub reserved1: u16, + pub pRefPicList0Entries: *const u8, + pub pRefPicList1Entries: *const u8, + pub pRefList0Modifications: *const u8, + pub pRefList1Modifications: *const u8, } #[test] -fn bindgen_test_layout_StdVideoEncodeH265ReferenceModifications() { - const UNINIT: ::std::mem::MaybeUninit = +fn bindgen_test_layout_StdVideoEncodeH265ReferenceListsInfo() { + const UNINIT: ::std::mem::MaybeUninit = ::std::mem::MaybeUninit::uninit(); let ptr = UNINIT.as_ptr(); assert_eq!( - ::std::mem::size_of::(), - 32usize, + ::std::mem::size_of::(), + 40usize, concat!( "Size of: ", - stringify!(StdVideoEncodeH265ReferenceModifications) + stringify!(StdVideoEncodeH265ReferenceListsInfo) ) ); assert_eq!( - ::std::mem::align_of::(), + ::std::mem::align_of::(), 8usize, concat!( "Alignment of ", - stringify!(StdVideoEncodeH265ReferenceModifications) + stringify!(StdVideoEncodeH265ReferenceListsInfo) ) ); assert_eq!( @@ -8612,57 +8720,83 @@ fn bindgen_test_layout_StdVideoEncodeH265ReferenceModifications() { 0usize, concat!( "Offset of field: ", - stringify!(StdVideoEncodeH265ReferenceModifications), + stringify!(StdVideoEncodeH265ReferenceListsInfo), "::", stringify!(flags) ) ); assert_eq!( unsafe { - ::std::ptr::addr_of!((*ptr).referenceList0ModificationsCount) as usize - ptr as usize + ::std::ptr::addr_of!((*ptr).num_ref_idx_l0_active_minus1) as usize - ptr as usize }, 4usize, concat!( "Offset of field: ", - stringify!(StdVideoEncodeH265ReferenceModifications), + stringify!(StdVideoEncodeH265ReferenceListsInfo), "::", - stringify!(referenceList0ModificationsCount) + stringify!(num_ref_idx_l0_active_minus1) ) ); assert_eq!( unsafe { - ::std::ptr::addr_of!((*ptr).pReferenceList0Modifications) as usize - ptr as usize + ::std::ptr::addr_of!((*ptr).num_ref_idx_l1_active_minus1) as usize - ptr as usize }, + 5usize, + concat!( + "Offset of field: ", + stringify!(StdVideoEncodeH265ReferenceListsInfo), + "::", + stringify!(num_ref_idx_l1_active_minus1) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).reserved1) as usize - ptr as usize }, + 6usize, + concat!( + "Offset of field: ", + stringify!(StdVideoEncodeH265ReferenceListsInfo), + "::", + stringify!(reserved1) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).pRefPicList0Entries) as usize - ptr as usize }, 8usize, concat!( "Offset of field: ", - stringify!(StdVideoEncodeH265ReferenceModifications), + stringify!(StdVideoEncodeH265ReferenceListsInfo), "::", - stringify!(pReferenceList0Modifications) + stringify!(pRefPicList0Entries) ) ); assert_eq!( - unsafe { - ::std::ptr::addr_of!((*ptr).referenceList1ModificationsCount) as usize - ptr as usize - }, + unsafe { ::std::ptr::addr_of!((*ptr).pRefPicList1Entries) as usize - ptr as usize }, 16usize, concat!( "Offset of field: ", - stringify!(StdVideoEncodeH265ReferenceModifications), + stringify!(StdVideoEncodeH265ReferenceListsInfo), "::", - stringify!(referenceList1ModificationsCount) + stringify!(pRefPicList1Entries) ) ); assert_eq!( - unsafe { - ::std::ptr::addr_of!((*ptr).pReferenceList1Modifications) as usize - ptr as usize - }, + unsafe { ::std::ptr::addr_of!((*ptr).pRefList0Modifications) as usize - ptr as usize }, 24usize, concat!( "Offset of field: ", - stringify!(StdVideoEncodeH265ReferenceModifications), + stringify!(StdVideoEncodeH265ReferenceListsInfo), "::", - stringify!(pReferenceList1Modifications) + stringify!(pRefList0Modifications) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).pRefList1Modifications) as usize - ptr as usize }, + 32usize, + concat!( + "Offset of field: ", + stringify!(StdVideoEncodeH265ReferenceListsInfo), + "::", + stringify!(pRefList1Modifications) ) ); } @@ -8786,8 +8920,8 @@ pub struct StdVideoEncodeH265PictureInfo { pub sps_video_parameter_set_id: u8, pub pps_seq_parameter_set_id: u8, pub pps_pic_parameter_set_id: u8, - pub PicOrderCntVal: i32, pub TemporalId: u8, + pub PicOrderCntVal: i32, } #[test] fn bindgen_test_layout_StdVideoEncodeH265PictureInfo() { @@ -8796,7 +8930,7 @@ fn bindgen_test_layout_StdVideoEncodeH265PictureInfo() { let ptr = UNINIT.as_ptr(); assert_eq!( ::std::mem::size_of::(), - 20usize, + 16usize, concat!("Size of: ", stringify!(StdVideoEncodeH265PictureInfo)) ); assert_eq!( @@ -8854,6 +8988,16 @@ fn bindgen_test_layout_StdVideoEncodeH265PictureInfo() { stringify!(pps_pic_parameter_set_id) ) ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).TemporalId) as usize - ptr as usize }, + 11usize, + concat!( + "Offset of field: ", + stringify!(StdVideoEncodeH265PictureInfo), + "::", + stringify!(TemporalId) + ) + ); assert_eq!( unsafe { ::std::ptr::addr_of!((*ptr).PicOrderCntVal) as usize - ptr as usize }, 12usize, @@ -8864,16 +9008,6 @@ fn bindgen_test_layout_StdVideoEncodeH265PictureInfo() { stringify!(PicOrderCntVal) ) ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).TemporalId) as usize - ptr as usize }, - 16usize, - concat!( - "Offset of field: ", - stringify!(StdVideoEncodeH265PictureInfo), - "::", - stringify!(TemporalId) - ) - ); } #[repr(C)] #[repr(align(4))] @@ -8947,6 +9081,7 @@ impl StdVideoEncodeH265ReferenceInfoFlags { #[derive(Debug, Copy, Clone)] pub struct StdVideoEncodeH265ReferenceInfo { pub flags: StdVideoEncodeH265ReferenceInfoFlags, + pub PictureType: StdVideoH265PictureType, pub PicOrderCntVal: i32, pub TemporalId: u8, } @@ -8957,7 +9092,7 @@ fn bindgen_test_layout_StdVideoEncodeH265ReferenceInfo() { let ptr = UNINIT.as_ptr(); assert_eq!( ::std::mem::size_of::(), - 12usize, + 16usize, concat!("Size of: ", stringify!(StdVideoEncodeH265ReferenceInfo)) ); assert_eq!( @@ -8976,8 +9111,18 @@ fn bindgen_test_layout_StdVideoEncodeH265ReferenceInfo() { ) ); assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).PicOrderCntVal) as usize - ptr as usize }, + unsafe { ::std::ptr::addr_of!((*ptr).PictureType) as usize - ptr as usize }, 4usize, + concat!( + "Offset of field: ", + stringify!(StdVideoEncodeH265ReferenceInfo), + "::", + stringify!(PictureType) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).PicOrderCntVal) as usize - ptr as usize }, + 8usize, concat!( "Offset of field: ", stringify!(StdVideoEncodeH265ReferenceInfo), @@ -8987,7 +9132,7 @@ fn bindgen_test_layout_StdVideoEncodeH265ReferenceInfo() { ); assert_eq!( unsafe { ::std::ptr::addr_of!((*ptr).TemporalId) as usize - ptr as usize }, - 8usize, + 12usize, concat!( "Offset of field: ", stringify!(StdVideoEncodeH265ReferenceInfo), diff --git a/third_party/rust/core-graphics-types/.cargo-checksum.json b/third_party/rust/core-graphics-types/.cargo-checksum.json index 31da816670f6..cc00991ad20d 100644 --- a/third_party/rust/core-graphics-types/.cargo-checksum.json +++ b/third_party/rust/core-graphics-types/.cargo-checksum.json @@ -1 +1 @@ -{"files":{"Cargo.toml":"e2e38d84cf514ad088ebc1909123ef4f707db6855528f938eb57d1e06c5a23ef","src/base.rs":"3764010f0c416db49be8a9e191fffb6848e8287edbdd5b12972613e301720ded","src/geometry.rs":"b94e50a16b8540dc6f37bfe4b1549ac68974cd6ba6c0bbd9209559f8a46d86eb","src/lib.rs":"31700ac9508fd32005bafd1c12a86a6803d198e9b1a71166a7391e642c091cd1"},"package":"3a68b68b3446082644c91ac778bf50cd4104bfb002b5a6a7c44cca5a2c70788b"} \ No newline at end of file +{"files":{"Cargo.toml":"cac07dfd7a2fcbb169a0c471a8a139ade8da91f92b7fbc198f3f2185d9c94d16","src/base.rs":"561db031cc746eab35a10fe72e10c314615b11e13cd48366fcdb2223196308a7","src/geometry.rs":"4ed03b07a4c5ba0f090689d31ef6eab8ec5b8f6eb7fb4e04fdf65e5ad8cd70ea","src/lib.rs":"31700ac9508fd32005bafd1c12a86a6803d198e9b1a71166a7391e642c091cd1"},"package":"2bb142d41022986c1d8ff29103a1411c8a3dfad3552f87a4f8dc50d61d4f4e33"} \ No newline at end of file diff --git a/third_party/rust/core-graphics-types/Cargo.toml b/third_party/rust/core-graphics-types/Cargo.toml index fbc4836458dc..365717dfb42f 100644 --- a/third_party/rust/core-graphics-types/Cargo.toml +++ b/third_party/rust/core-graphics-types/Cargo.toml @@ -3,31 +3,29 @@ # When uploading crates to the registry Cargo will automatically # "normalize" Cargo.toml files for maximal compatibility # with all versions of Cargo and also rewrite `path` dependencies -# to registry (e.g., crates.io) dependencies +# to registry (e.g., crates.io) dependencies. # -# If you believe there's an error in this file please file an -# issue against the rust-lang/cargo repository. If you're -# editing this file be aware that the upstream Cargo.toml -# will likely look very different (and much more reasonable) +# If you are reading this file be aware that the original Cargo.toml +# will likely look very different (and much more reasonable). +# See Cargo.toml.orig for the original contents. [package] name = "core-graphics-types" -version = "0.1.1" +version = "0.1.2" authors = ["The Servo Project Developers"] description = "Bindings for some fundamental Core Graphics types" homepage = "https://github.com/servo/core-foundation-rs" -license = "MIT / Apache-2.0" +license = "MIT OR Apache-2.0" repository = "https://github.com/servo/core-foundation-rs" + [package.metadata.docs.rs] default-target = "x86_64-apple-darwin" + [dependencies.bitflags] version = "1.0" [dependencies.core-foundation] version = "0.9" -[dependencies.foreign-types] -version = "0.3.0" - [dependencies.libc] version = "0.2" diff --git a/third_party/rust/core-graphics-types/src/base.rs b/third_party/rust/core-graphics-types/src/base.rs index d5efc3b62e13..03a85f155910 100644 --- a/third_party/rust/core-graphics-types/src/base.rs +++ b/third_party/rust/core-graphics-types/src/base.rs @@ -26,7 +26,18 @@ pub type CGFloat = libc::c_double; #[cfg(not(target_pointer_width = "64"))] pub type CGFloat = libc::c_float; -pub type CGError = libc::int32_t; +pub type CGError = i32; pub type CGGlyph = libc::c_ushort; +pub const kCGErrorSuccess: CGError = 0; +pub const kCGErrorFailure: CGError = 1000; +pub const kCGErrorIllegalArgument: CGError = 1001; +pub const kCGErrorInvalidConnection: CGError = 1002; +pub const kCGErrorInvalidContext: CGError = 1003; +pub const kCGErrorCannotComplete: CGError = 1004; +pub const kCGErrorNotImplemented: CGError = 1006; +pub const kCGErrorRangeCheck: CGError = 1007; +pub const kCGErrorTypeCheck: CGError = 1008; +pub const kCGErrorInvalidOperation: CGError = 1010; +pub const kCGErrorNoneAvailable: CGError = 1011; diff --git a/third_party/rust/core-graphics-types/src/geometry.rs b/third_party/rust/core-graphics-types/src/geometry.rs index 906c605ec640..586075270c9f 100644 --- a/third_party/rust/core-graphics-types/src/geometry.rs +++ b/third_party/rust/core-graphics-types/src/geometry.rs @@ -138,6 +138,11 @@ impl CGRect { ffi::CGRectApplyAffineTransform(*self, *t) } } + + #[inline] + pub fn contains(&self, point: &CGPoint) -> bool { + unsafe { ffi::CGRectContainsPoint(*self,*point) == 1 } + } } #[repr(C)] @@ -190,6 +195,8 @@ mod ffi { pub fn CGPointApplyAffineTransform(point: CGPoint, t: CGAffineTransform) -> CGPoint; pub fn CGRectApplyAffineTransform(rect: CGRect, t: CGAffineTransform) -> CGRect; pub fn CGSizeApplyAffineTransform(size: CGSize, t: CGAffineTransform) -> CGSize; + + pub fn CGRectContainsPoint(rect:CGRect, point: CGPoint) -> boolean_t; } } diff --git a/third_party/rust/core-graphics/.cargo-checksum.json b/third_party/rust/core-graphics/.cargo-checksum.json index 9dafee4db221..37468e19bb8a 100644 --- a/third_party/rust/core-graphics/.cargo-checksum.json +++ b/third_party/rust/core-graphics/.cargo-checksum.json @@ -1 +1 @@ -{"files":{"COPYRIGHT":"ec82b96487e9e778ee610c7ab245162464782cfa1f555c2299333f8dbe5c036a","Cargo.toml":"453b079ac8a6df8842e7e8a37222318c500f68e1605db7adbdf87337830df593","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"62065228e42caebca7e7d7db1204cbb867033de5982ca4009928915e4095f3a3","README.md":"6745c3c38183d2eda9b1fa265fb0a95018db5c110cbabc00b32327d951bbe2ea","src/base.rs":"838683ff67253f4aff1d4b4177531210ca73d4e61f05c5d96a8f196b2a88c787","src/color.rs":"4c8ec4ab828cbc1b2a1538a34a51f5b380927f2f1daf187dff6f732f57a43656","src/color_space.rs":"b3d7ee8a21703c789160867cb8eb2188bd1daa193e3d030f21adb6f1a6f872de","src/context.rs":"8bda7f9ecb5be768b09a29cc3b0a4f329f55d2a2ab74030d121610283862d833","src/data_provider.rs":"b25201fdea43ea1a019c68aa5e997725d04d0824a238354ddc9f2dd8a6835cc4","src/display.rs":"9db5e5440fd302849b13b48393cab4db95447df8d5057c4534a9d8be948ca480","src/event.rs":"17c601ca0b8a0d806fc576cc6cee63a784deaf4246793cf6ce3abcb562de15c5","src/event_source.rs":"d55a4f5b5e62789325028febc51bbf54c74b15ab1a4e70c6ad749a2f9753e081","src/font.rs":"2a7ac5024f17550dd2b6eb97f6971559f930c163eac3a6625d6d55703fd5e96e","src/geometry.rs":"8e12dc89835406bfa514de8fb58f5fd435724d1ddb97dc3a70392efbcf1c42ed","src/gradient.rs":"8ee8661706f36914d08e903840c4f07414b38ba40ea4a482d34b900ac6ac7cf9","src/image.rs":"a5a5df8c0f310455f038eeb16688015f481688cb417f8e8f424a4c1d2a1cdd57","src/lib.rs":"78264571227db6fc9194cb90d64beaff1738a501f88b5da55eb17ae42592d26f","src/path.rs":"c429afeaed999b02ac00f89a867b5fc64f1e223039079a4e0529306b734ff117","src/private.rs":"da3fd61338bab2d8e26aa5433b2e18ecd2a0a408c62e1ac2b33a0f87f2dad88a","src/sys.rs":"3077395beb77193530b713aa681cb61f7b86fa79e4e4060133b6d61cf9f47e09","src/window.rs":"2f6c3dc958ae2c0c9e2fc5033300b96e60ed0abee9823ea1f03797d64df0911a"},"package":"2581bbab3b8ffc6fcbd550bf46c355135d16e9ff2a6ea032ad6b9bf1d7efe4fb"} \ No newline at end of file +{"files":{"COPYRIGHT":"ec82b96487e9e778ee610c7ab245162464782cfa1f555c2299333f8dbe5c036a","Cargo.toml":"a0088b6d4f15d2f3abbd411834b84eaa7179b374133fb57b8d3d2048149cc494","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"62065228e42caebca7e7d7db1204cbb867033de5982ca4009928915e4095f3a3","README.md":"6745c3c38183d2eda9b1fa265fb0a95018db5c110cbabc00b32327d951bbe2ea","src/access.rs":"919937f8fe61966543c602e30d5d59d61e0b7313bb6b33c25aa600ec5e43345e","src/base.rs":"838683ff67253f4aff1d4b4177531210ca73d4e61f05c5d96a8f196b2a88c787","src/color.rs":"4c8ec4ab828cbc1b2a1538a34a51f5b380927f2f1daf187dff6f732f57a43656","src/color_space.rs":"6a0be06bb7ecdf9d20c461912359d22e0ffd7151c6977a8d9b3b0efdd7929442","src/context.rs":"93d543bd2b77ee6b3132b4378b1652576354fae9bb675cde63e0347d81fec334","src/data_provider.rs":"88e1cdd75c35cff44150f0aac75de6ac715acbf4328abef1b2f191b54401c650","src/display.rs":"391f24669ad1ad5dcd6d15b5e418b6b07731186d3c18d94c8c0935ab348fb3b9","src/event.rs":"7c2ce02fa67cd4613745709563846b57b12fdde105a65d16d29f7a300e9c0f7b","src/event_source.rs":"a81f8bbd41bf4c4e3e91d6a6eab31356728fbf08787aca899ed03259808dfe4a","src/font.rs":"43cad7dfcc49f5f2de408146c6e38843b2d0092b3f5880fbf80e43e18b100f25","src/geometry.rs":"8e12dc89835406bfa514de8fb58f5fd435724d1ddb97dc3a70392efbcf1c42ed","src/gradient.rs":"dd957f14b77d448fa1e51c2e67849364062aa1d5bd60405f060f9a094c628061","src/image.rs":"72076f34a805b1d2336e425cc29d2ec3583046b847e9284ef7413f7eca2acb9a","src/lib.rs":"ca6aabcc5f307a9b15cd3e531d7fb9f54bdf58d9717d38e007e4facf1ca8c9a4","src/path.rs":"9389719ee257d86f50075b294b1b9df8fb3fc93b3442deee868e188489613a2d","src/private.rs":"da3fd61338bab2d8e26aa5433b2e18ecd2a0a408c62e1ac2b33a0f87f2dad88a","src/sys.rs":"3077395beb77193530b713aa681cb61f7b86fa79e4e4060133b6d61cf9f47e09","src/window.rs":"2f6c3dc958ae2c0c9e2fc5033300b96e60ed0abee9823ea1f03797d64df0911a"},"package":"506752f2b58723339d41d013009be97e549860785a366e2a5e75dfbd9725b48e"} \ No newline at end of file diff --git a/third_party/rust/core-graphics/Cargo.toml b/third_party/rust/core-graphics/Cargo.toml index b3871d54f4cc..645a88a6b9c0 100644 --- a/third_party/rust/core-graphics/Cargo.toml +++ b/third_party/rust/core-graphics/Cargo.toml @@ -3,23 +3,25 @@ # When uploading crates to the registry Cargo will automatically # "normalize" Cargo.toml files for maximal compatibility # with all versions of Cargo and also rewrite `path` dependencies -# to registry (e.g., crates.io) dependencies +# to registry (e.g., crates.io) dependencies. # -# If you believe there's an error in this file please file an -# issue against the rust-lang/cargo repository. If you're -# editing this file be aware that the upstream Cargo.toml -# will likely look very different (and much more reasonable) +# If you are reading this file be aware that the original Cargo.toml +# will likely look very different (and much more reasonable). +# See Cargo.toml.orig for the original contents. [package] name = "core-graphics" -version = "0.22.3" +version = "0.23.0" authors = ["The Servo Project Developers"] description = "Bindings to Core Graphics for macOS" -homepage = "https://github.com/servo/core-graphics-rs" -license = "MIT / Apache-2.0" +homepage = "https://github.com/servo/core-foundation-rs" +readme = "README.md" +license = "MIT OR Apache-2.0" repository = "https://github.com/servo/core-foundation-rs" + [package.metadata.docs.rs] default-target = "x86_64-apple-darwin" + [dependencies.bitflags] version = "1.0" @@ -30,7 +32,7 @@ version = "0.9" version = "0.1" [dependencies.foreign-types] -version = "0.3.0" +version = "0.5.0" [dependencies.libc] version = "0.2" diff --git a/third_party/rust/core-graphics/src/access.rs b/third_party/rust/core-graphics/src/access.rs new file mode 100644 index 000000000000..4e852c85ae84 --- /dev/null +++ b/third_party/rust/core-graphics/src/access.rs @@ -0,0 +1,26 @@ +pub use base::boolean_t; + +#[derive(Default)] +pub struct ScreenCaptureAccess; + +impl ScreenCaptureAccess { + /// If current app not in list, will open window. + /// Return the same result as preflight. + #[inline] + pub fn request(&self) -> bool { + unsafe { CGRequestScreenCaptureAccess() == 1 } + } + + /// Return true if has access + #[inline] + pub fn preflight(&self) -> bool { + unsafe { CGPreflightScreenCaptureAccess() == 1 } + } +} + +#[link(name = "CoreGraphics", kind = "framework")] +extern "C" { + // Screen Capture Access + fn CGRequestScreenCaptureAccess() -> boolean_t; + fn CGPreflightScreenCaptureAccess() -> boolean_t; +} diff --git a/third_party/rust/core-graphics/src/color_space.rs b/third_party/rust/core-graphics/src/color_space.rs index 9eef09c30b19..2b29635756b0 100644 --- a/third_party/rust/core-graphics/src/color_space.rs +++ b/third_party/rust/core-graphics/src/color_space.rs @@ -13,11 +13,11 @@ use foreign_types::ForeignType; foreign_type! { #[doc(hidden)] - type CType = ::sys::CGColorSpace; - fn drop = |p| CFRelease(p as *mut _); - fn clone = |p| CFRetain(p as *const _) as *mut _; - pub struct CGColorSpace; - pub struct CGColorSpaceRef; + pub unsafe type CGColorSpace { + type CType = ::sys::CGColorSpace; + fn drop = |p| CFRelease(p as *mut _); + fn clone = |p| CFRetain(p as *const _) as *mut _; + } } impl CGColorSpace { @@ -53,13 +53,61 @@ impl CGColorSpace { #[link(name = "CoreGraphics", kind = "framework")] extern { + /// The Display P3 color space, created by Apple. + pub static kCGColorSpaceDisplayP3: CFStringRef; + /// The Display P3 color space, using the HLG transfer function. + pub static kCGColorSpaceDisplayP3_HLG: CFStringRef; + /// The Display P3 color space with a linear transfer function + /// and extended-range values. + pub static kCGColorSpaceExtendedLinearDisplayP3: CFStringRef; + /// The standard Red Green Blue (sRGB) color space. pub static kCGColorSpaceSRGB: CFStringRef; - pub static kCGColorSpaceAdobeRGB1998: CFStringRef; - pub static kCGColorSpaceGenericGray: CFStringRef; - pub static kCGColorSpaceGenericRGB: CFStringRef; - pub static kCGColorSpaceGenericCMYK: CFStringRef; - pub static kCGColorSpaceGenericRGBLinear: CFStringRef; + /// The sRGB color space with a linear transfer function. + pub static kCGColorSpaceLinearSRGB: CFStringRef; + /// The extended sRGB color space. + pub static kCGColorSpaceExtendedSRGB: CFStringRef; + /// The sRGB color space with a linear transfer function and + /// extended-range values. + pub static kCGColorSpaceExtendedLinearSRGB: CFStringRef; + /// The generic gray color space that has an exponential transfer + /// function with a power of 2.2. pub static kCGColorSpaceGenericGrayGamma2_2: CFStringRef; + /// The gray color space using a linear transfer function. + pub static kCGColorSpaceLinearGray: CFStringRef; + /// The extended gray color space. + pub static kCGColorSpaceExtendedGray: CFStringRef; + /// The extended gray color space with a linear transfer function. + pub static kCGColorSpaceExtendedLinearGray: CFStringRef; + /// The generic RGB color space with a linear transfer function. + pub static kCGColorSpaceGenericRGBLinear: CFStringRef; + /// The generic CMYK color space. + pub static kCGColorSpaceGenericCMYK: CFStringRef; + /// The XYZ color space, as defined by the CIE 1931 standard. + pub static kCGColorSpaceGenericXYZ: CFStringRef; + /// The generic LAB color space. + pub static kCGColorSpaceGenericLab: CFStringRef; + /// The ACEScg color space. + pub static kCGColorSpaceACESCGLinear: CFStringRef; + /// The Adobe RGB (1998) color space. + pub static kCGColorSpaceAdobeRGB1998: CFStringRef; + /// The DCI P3 color space, which is the digital cinema standard. + pub static kCGColorSpaceDCIP3: CFStringRef; + /// The recommendation of the International Telecommunication Union + /// (ITU) Radiocommunication sector for the BT.709 color space. + pub static kCGColorSpaceITUR_709: CFStringRef; + /// The Reference Output Medium Metric (ROMM) RGB color space. + pub static kCGColorSpaceROMMRGB: CFStringRef; + /// The recommendation of the International Telecommunication Union + /// (ITU) Radiocommunication sector for the BT.2020 color space. + pub static kCGColorSpaceITUR_2020: CFStringRef; + /// The recommendation of the International Telecommunication Union + /// (ITU) Radiocommunication sector for the BT.2020 color space, with + /// a linear transfer function and extended range values. + pub static kCGColorSpaceExtendedLinearITUR_2020: CFStringRef; + /// The name of the generic RGB color space. + pub static kCGColorSpaceGenericRGB: CFStringRef; + /// The name of the generic gray color space. + pub static kCGColorSpaceGenericGray: CFStringRef; fn CGColorSpaceCreateDeviceRGB() -> ::sys::CGColorSpaceRef; fn CGColorSpaceCreateDeviceGray() -> ::sys::CGColorSpaceRef; diff --git a/third_party/rust/core-graphics/src/context.rs b/third_party/rust/core-graphics/src/context.rs index 4efc0f70a371..aca5fb74d52d 100644 --- a/third_party/rust/core-graphics/src/context.rs +++ b/third_party/rust/core-graphics/src/context.rs @@ -109,11 +109,11 @@ pub enum CGInterpolationQuality { foreign_type! { #[doc(hidden)] - type CType = ::sys::CGContext; - fn drop = |cs| CGContextRelease(cs); - fn clone = |p| CGContextRetain(p); - pub struct CGContext; - pub struct CGContextRef; + pub unsafe type CGContext { + type CType = ::sys::CGContext; + fn drop = |cs| CGContextRelease(cs); + fn clone = |p| CGContextRetain(p); + } } impl CGContext { @@ -389,6 +389,12 @@ impl CGContextRef { } } + pub fn reset_clip(&self) { + unsafe { + CGContextResetClip(self.as_ptr()); + } + } + pub fn draw_path(&self, mode: CGPathDrawingMode) { unsafe { CGContextDrawPath(self.as_ptr(), mode); @@ -612,6 +618,12 @@ impl CGContextRef { CGContextSetShadowWithColor(self.as_ptr(), offset, blur, color.as_concrete_TypeRef()); } } + + pub fn set_alpha(&self, alpha: CGFloat) { + unsafe { + CGContextSetAlpha(self.as_ptr(), alpha); + } + } } #[test] @@ -708,6 +720,7 @@ extern { fn CGContextEOFillPath(c: ::sys::CGContextRef); fn CGContextClip(c: ::sys::CGContextRef); fn CGContextEOClip(c: ::sys::CGContextRef); + fn CGContextResetClip(c: ::sys::CGContextRef); fn CGContextStrokePath(c: ::sys::CGContextRef); fn CGContextSetRGBFillColor(context: ::sys::CGContextRef, red: CGFloat, @@ -771,5 +784,7 @@ extern { fn CGContextSetShadow(c: ::sys::CGContextRef, offset: CGSize, blur: CGFloat); fn CGContextSetShadowWithColor(c: ::sys::CGContextRef, offset: CGSize, blur: CGFloat, color: ::sys::CGColorRef); + + fn CGContextSetAlpha(c: ::sys::CGContextRef, alpha: CGFloat); } diff --git a/third_party/rust/core-graphics/src/data_provider.rs b/third_party/rust/core-graphics/src/data_provider.rs index e8bb0a024188..38d515e397ff 100644 --- a/third_party/rust/core-graphics/src/data_provider.rs +++ b/third_party/rust/core-graphics/src/data_provider.rs @@ -32,11 +32,11 @@ pub type CGDataProviderGetBytesAtPositionCallback = Option Option { + unsafe { + let image_ref = CGDisplayCreateImageForRect(self.id, bounds); + if !image_ref.is_null() { + Some(CGImage::from_ptr(image_ref)) + } else { + None + } + } + } + /// Returns a composite image based on a dynamically generated list of /// windows. #[inline] @@ -604,7 +629,7 @@ impl CGDisplayMode { 16 } else if pixel_encoding.eq_ignore_ascii_case(IO8BitIndexedPixels) { 8 - }else{ + } else { 0 } } @@ -621,6 +646,7 @@ extern "C" { pub static kCGDisplayShowDuplicateLowResolutionModes: CFStringRef; + pub fn CGDisplayModeRetain(mode: ::sys::CGDisplayModeRef); pub fn CGDisplayModeRelease(mode: ::sys::CGDisplayModeRef); pub fn CGMainDisplayID() -> CGDirectDisplayID; @@ -657,7 +683,17 @@ extern "C" { pub fn CGDisplayPixelsWide(display: CGDirectDisplayID) -> libc::size_t; pub fn CGDisplayBounds(display: CGDirectDisplayID) -> CGRect; pub fn CGDisplayCreateImage(display: CGDirectDisplayID) -> ::sys::CGImageRef; + pub fn CGDisplayCreateImageForRect( + display: CGDirectDisplayID, + rect: CGRect, + ) -> ::sys::CGImageRef; + // Capturing and Releasing Displays + pub fn CGDisplayCapture(display: CGDirectDisplayID) -> CGError; + pub fn CGDisplayRelease(display: CGDirectDisplayID) -> CGError; + pub fn CGShieldingWindowLevel() -> CGWindowLevel; + + // Configuring Displays pub fn CGBeginDisplayConfiguration(config: *mut CGDisplayConfigRef) -> CGError; pub fn CGCancelDisplayConfiguration(config: CGDisplayConfigRef) -> CGError; pub fn CGCompleteDisplayConfiguration( @@ -681,6 +717,7 @@ extern "C" { x: i32, y: i32, ) -> CGError; + pub fn CGRestorePermanentDisplayConfiguration(); pub fn CGDisplayCopyDisplayMode(display: CGDirectDisplayID) -> ::sys::CGDisplayModeRef; pub fn CGDisplayModeGetHeight(mode: ::sys::CGDisplayModeRef) -> libc::size_t; @@ -696,6 +733,11 @@ extern "C" { display: CGDirectDisplayID, options: CFDictionaryRef, ) -> CFArrayRef; + pub fn CGDisplaySetDisplayMode( + display: CGDirectDisplayID, + mode: ::sys::CGDisplayModeRef, + options: CFDictionaryRef, + ) -> CGError; // mouse stuff pub fn CGDisplayHideCursor(display: CGDirectDisplayID) -> CGError; @@ -704,6 +746,32 @@ extern "C" { pub fn CGWarpMouseCursorPosition(point: CGPoint) -> CGError; pub fn CGAssociateMouseAndMouseCursorPosition(connected: boolean_t) -> CGError; + // Display Fade Effects + pub fn CGConfigureDisplayFadeEffect( + config: CGDisplayConfigRef, + fadeOutSeconds: CGDisplayFadeInterval, + fadeInSeconds: CGDisplayFadeInterval, + fadeRed: f32, + fadeGreen: f32, + fadeBlue: f32, + ) -> CGError; + pub fn CGAcquireDisplayFadeReservation( + seconds: CGDisplayReservationInterval, + token: *mut CGDisplayFadeReservationToken, + ) -> CGError; + pub fn CGDisplayFade( + token: CGDisplayFadeReservationToken, + duration: CGDisplayFadeInterval, + startBlend: CGDisplayBlendFraction, + endBlend: CGDisplayBlendFraction, + redBlend: f32, + greenBlend: f32, + blueBlend: f32, + synchronous: boolean_t, + ) -> CGError; + // CGDisplayFadeOperationInProgress + pub fn CGReleaseDisplayFadeReservation(token: CGDisplayFadeReservationToken) -> CGError; + // Window Services Reference pub fn CGWindowListCopyWindowInfo( option: CGWindowListOption, diff --git a/third_party/rust/core-graphics/src/event.rs b/third_party/rust/core-graphics/src/event.rs index 8cf9c6262e90..3f2d8c143d2c 100644 --- a/third_party/rust/core-graphics/src/event.rs +++ b/third_party/rust/core-graphics/src/event.rs @@ -505,7 +505,7 @@ impl<'tap_life> CGEventTap<'tap_life> { callback_ref: Box::from_raw(cbr), }) } else { - Box::from_raw(cbr); + let _ = Box::from_raw(cbr); Err(()) } } @@ -518,11 +518,11 @@ impl<'tap_life> CGEventTap<'tap_life> { foreign_type! { #[doc(hidden)] - type CType = ::sys::CGEvent; - fn drop = |p| CFRelease(p as *mut _); - fn clone = |p| CFRetain(p as *const _) as *mut _; - pub struct CGEvent; - pub struct CGEventRef; + pub unsafe type CGEvent { + type CType = ::sys::CGEvent; + fn drop = |p| CFRelease(p as *mut _); + fn clone = |p| CFRetain(p as *const _) as *mut _; + } } impl CGEvent { @@ -607,12 +607,16 @@ impl CGEvent { } } - pub fn location(&self) -> CGPoint { + pub fn post_from_tap(&self, tap_proxy: CGEventTapProxy) { unsafe { - CGEventGetLocation(self.as_ptr()) + CGEventTapPostEvent(tap_proxy, self.as_ptr()); } } + pub fn location(&self) -> CGPoint { + unsafe { CGEventGetLocation(self.as_ptr()) } + } + #[cfg(feature = "elcapitan")] pub fn post_to_pid(&self, pid: libc::pid_t) { unsafe { @@ -734,6 +738,8 @@ extern { /// taps. fn CGEventPost(tapLocation: CGEventTapLocation, event: ::sys::CGEventRef); + fn CGEventTapPostEvent(tapProxy: CGEventTapProxy, event: ::sys::CGEventRef); + #[cfg(feature = "elcapitan")] /// Post an event to a specified process ID fn CGEventPostToPid(pid: libc::pid_t, event: ::sys::CGEventRef); diff --git a/third_party/rust/core-graphics/src/event_source.rs b/third_party/rust/core-graphics/src/event_source.rs index ce13f5a91cfd..6db64a2dca15 100644 --- a/third_party/rust/core-graphics/src/event_source.rs +++ b/third_party/rust/core-graphics/src/event_source.rs @@ -12,11 +12,11 @@ pub enum CGEventSourceStateID { foreign_type! { #[doc(hidden)] - type CType = ::sys::CGEventSource; - fn drop = |p| CFRelease(p as *mut _); - fn clone = |p| CFRetain(p as *const _) as *mut _; - pub struct CGEventSource; - pub struct CGEventSourceRef; + pub unsafe type CGEventSource { + type CType = ::sys::CGEventSource; + fn drop = |p| CFRelease(p as *mut _); + fn clone = |p| CFRetain(p as *const _) as *mut _; + } } impl CGEventSource { diff --git a/third_party/rust/core-graphics/src/font.rs b/third_party/rust/core-graphics/src/font.rs index adf7a1493284..739b8ddf3291 100644 --- a/third_party/rust/core-graphics/src/font.rs +++ b/third_party/rust/core-graphics/src/font.rs @@ -15,6 +15,7 @@ use core_foundation::string::{CFString, CFStringRef}; use core_foundation::dictionary::{CFDictionary, CFDictionaryRef}; use data_provider::CGDataProvider; use geometry::CGRect; +use std::ptr::NonNull; use foreign_types::ForeignType; @@ -24,16 +25,13 @@ pub use core_graphics_types::base::CGGlyph; foreign_type! { #[doc(hidden)] - type CType = ::sys::CGFont; - fn drop = |p| CFRelease(p as *mut _); - fn clone = |p| CFRetain(p as *const _) as *mut _; - pub struct CGFont; - pub struct CGFontRef; + pub unsafe type CGFont: Send + Sync { + type CType = ::sys::CGFont; + fn drop = |p| CFRelease(p as *mut _); + fn clone = |p| CFRetain(p as *const _) as *mut _; + } } -unsafe impl Send for CGFont {} -unsafe impl Sync for CGFont {} - impl CGFont { pub fn type_id() -> CFTypeID { unsafe { @@ -44,10 +42,9 @@ impl CGFont { pub fn from_data_provider(provider: CGDataProvider) -> Result { unsafe { let font_ref = CGFontCreateWithDataProvider(provider.as_ptr()); - if !font_ref.is_null() { - Ok(CGFont::from_ptr(font_ref)) - } else { - Err(()) + match NonNull::new(font_ref) { + Some(font_ref) => Ok(CGFont(font_ref)), + None => Err(()), } } } @@ -55,10 +52,9 @@ impl CGFont { pub fn from_name(name: &CFString) -> Result { unsafe { let font_ref = CGFontCreateWithFontName(name.as_concrete_TypeRef()); - if !font_ref.is_null() { - Ok(CGFont::from_ptr(font_ref)) - } else { - Err(()) + match NonNull::new(font_ref) { + Some(font_ref) => Ok(CGFont(font_ref)), + None => Err(()), } } } @@ -67,10 +63,9 @@ impl CGFont { unsafe { let font_ref = CGFontCreateCopyWithVariations(self.as_ptr(), vars.as_concrete_TypeRef()); - if !font_ref.is_null() { - Ok(CGFont::from_ptr(font_ref)) - } else { - Err(()) + match NonNull::new(font_ref) { + Some(font_ref) => Ok(CGFont(font_ref)), + None => Err(()), } } } diff --git a/third_party/rust/core-graphics/src/gradient.rs b/third_party/rust/core-graphics/src/gradient.rs index 5c6cde846e1d..a4fb2bf613bf 100644 --- a/third_party/rust/core-graphics/src/gradient.rs +++ b/third_party/rust/core-graphics/src/gradient.rs @@ -29,11 +29,11 @@ bitflags! { foreign_type! { #[doc(hidden)] - type CType = ::sys::CGGradient; - fn drop = |p| CFRelease(p as *mut _); - fn clone = |p| CFRetain(p as *const _) as *mut _; - pub struct CGGradient; - pub struct CGGradientRef; + pub unsafe type CGGradient { + type CType = ::sys::CGGradient; + fn drop = |p| CFRelease(p as *mut _); + fn clone = |p| CFRetain(p as *const _) as *mut _; + } } impl CGGradient { diff --git a/third_party/rust/core-graphics/src/image.rs b/third_party/rust/core-graphics/src/image.rs index f1b95096d9eb..9e9323f4a1d2 100644 --- a/third_party/rust/core-graphics/src/image.rs +++ b/third_party/rust/core-graphics/src/image.rs @@ -32,11 +32,11 @@ pub enum CGImageByteOrderInfo { foreign_type! { #[doc(hidden)] - type CType = ::sys::CGImage; - fn drop = CGImageRelease; - fn clone = |p| CFRetain(p as *const _) as *mut _; - pub struct CGImage; - pub struct CGImageRef; + pub unsafe type CGImage { + type CType = ::sys::CGImage; + fn drop = CGImageRelease; + fn clone = |p| CFRetain(p as *const _) as *mut _; + } } impl CGImage { diff --git a/third_party/rust/core-graphics/src/lib.rs b/third_party/rust/core-graphics/src/lib.rs index 5238774e7cec..514558fb481a 100644 --- a/third_party/rust/core-graphics/src/lib.rs +++ b/third_party/rust/core-graphics/src/lib.rs @@ -41,3 +41,5 @@ pub mod private; pub mod image; pub mod path; pub mod sys; +#[cfg(target_os = "macos")] +pub mod access; \ No newline at end of file diff --git a/third_party/rust/core-graphics/src/path.rs b/third_party/rust/core-graphics/src/path.rs index dc1c80464472..c5ac93f40b07 100644 --- a/third_party/rust/core-graphics/src/path.rs +++ b/third_party/rust/core-graphics/src/path.rs @@ -21,11 +21,11 @@ use std::slice; foreign_type! { #[doc(hidden)] - type CType = ::sys::CGPath; - fn drop = |p| CFRelease(p as *mut _); - fn clone = |p| CFRetain(p as *const _) as *mut _; - pub struct CGPath; - pub struct CGPathRef; + pub unsafe type CGPath { + type CType = ::sys::CGPath; + fn drop = |p| CFRelease(p as *mut _); + fn clone = |p| CFRetain(p as *const _) as *mut _; + } } impl CGPath { @@ -35,7 +35,7 @@ impl CGPath { None => ptr::null(), Some(transform) => transform as *const CGAffineTransform, }; - CGPath(CGPathCreateWithRect(rect, transform)) + CGPath::from_ptr(CGPathCreateWithRect(rect, transform)) } } diff --git a/third_party/rust/core-text/.cargo-checksum.json b/third_party/rust/core-text/.cargo-checksum.json index 057b26119112..10bc5db09c80 100644 --- a/third_party/rust/core-text/.cargo-checksum.json +++ b/third_party/rust/core-text/.cargo-checksum.json @@ -1 +1 @@ -{"files":{"COPYRIGHT":"ec82b96487e9e778ee610c7ab245162464782cfa1f555c2299333f8dbe5c036a","Cargo.toml":"26a527860e530d18a5ce446ffb595f63a178504506fb4a283993402295053afe","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"62065228e42caebca7e7d7db1204cbb867033de5982ca4009928915e4095f3a3","README.md":"98d25015857a430aac32f34bdc979a1a66e672a0ea42c5f92dd9cfe23c1fccfd","src/font.rs":"17d957510eee8d96ef2bc1b5bc97491917ac0336ad3dbf48416dda3685cef579","src/font_collection.rs":"02de0ce2a61683314897a521d31ab9cc572a8b10ceda2ac47181fbe18bf4f235","src/font_descriptor.rs":"43a2fec6bca9689d8172f363ab20ce8c633696974e1653cad2e072e3af776528","src/font_manager.rs":"6e5056a42868187e1f4e696c181ca247423308652d3c68867c708616031876e4","src/frame.rs":"ed1e2aad7be9dafc3e9729f2caecefd4214a9552f834932a414239146142069a","src/framesetter.rs":"13e34b4111cee5f023aa05e2220d2a6f102e96fd18c51a356992bffd6c9fc7c1","src/lib.rs":"1c662e51874eb43ff52a8a1af131d1b2fd84095c3d949a271dc895bf56fd0fc6","src/line.rs":"02fab7f07c3f6a003a7c35ffeb3d37546ae58e4da7fe05f26e520f283397a602","src/run.rs":"b86e9b9b39effe4a79c6002880d95f214742d448029a3111e288734abe75b827","src/string_attributes.rs":"ea0f854d64097d3626a03002323e2276e28affae7a698aaadd89dd6b744dd80f"},"package":"99d74ada66e07c1cefa18f8abfba765b486f250de2e4a999e5727fc0dd4b4a25"} \ No newline at end of file +{"files":{"COPYRIGHT":"ec82b96487e9e778ee610c7ab245162464782cfa1f555c2299333f8dbe5c036a","Cargo.toml":"7a776cd901d7f63a97ad6c1576b00f267ad782ef79ff4cdfbc2cb507a2c144d5","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"62065228e42caebca7e7d7db1204cbb867033de5982ca4009928915e4095f3a3","README.md":"98d25015857a430aac32f34bdc979a1a66e672a0ea42c5f92dd9cfe23c1fccfd","src/font.rs":"149d4646abc01a884e16d45dde77b44d3151cab27c301c65fa22dda638ff3c48","src/font_collection.rs":"02de0ce2a61683314897a521d31ab9cc572a8b10ceda2ac47181fbe18bf4f235","src/font_descriptor.rs":"43a2fec6bca9689d8172f363ab20ce8c633696974e1653cad2e072e3af776528","src/font_manager.rs":"6e5056a42868187e1f4e696c181ca247423308652d3c68867c708616031876e4","src/frame.rs":"ed1e2aad7be9dafc3e9729f2caecefd4214a9552f834932a414239146142069a","src/framesetter.rs":"13e34b4111cee5f023aa05e2220d2a6f102e96fd18c51a356992bffd6c9fc7c1","src/lib.rs":"1c662e51874eb43ff52a8a1af131d1b2fd84095c3d949a271dc895bf56fd0fc6","src/line.rs":"02fab7f07c3f6a003a7c35ffeb3d37546ae58e4da7fe05f26e520f283397a602","src/run.rs":"b86e9b9b39effe4a79c6002880d95f214742d448029a3111e288734abe75b827","src/string_attributes.rs":"ea0f854d64097d3626a03002323e2276e28affae7a698aaadd89dd6b744dd80f"},"package":"02083d15989d6247553e1a63753561555a72b2038bba1e4239db70602a798742"} \ No newline at end of file diff --git a/third_party/rust/core-text/Cargo.toml b/third_party/rust/core-text/Cargo.toml index a674dc3327fb..26b9c510bb02 100644 --- a/third_party/rust/core-text/Cargo.toml +++ b/third_party/rust/core-text/Cargo.toml @@ -3,30 +3,32 @@ # When uploading crates to the registry Cargo will automatically # "normalize" Cargo.toml files for maximal compatibility # with all versions of Cargo and also rewrite `path` dependencies -# to registry (e.g., crates.io) dependencies +# to registry (e.g., crates.io) dependencies. # -# If you believe there's an error in this file please file an -# issue against the rust-lang/cargo repository. If you're -# editing this file be aware that the upstream Cargo.toml -# will likely look very different (and much more reasonable) +# If you are reading this file be aware that the original Cargo.toml +# will likely look very different (and much more reasonable). +# See Cargo.toml.orig for the original contents. [package] name = "core-text" -version = "19.2.0" +version = "20.0.0" authors = ["The Servo Project Developers"] description = "Bindings to the Core Text framework." -license = "MIT/Apache-2.0" +readme = "README.md" +license = "MIT OR Apache-2.0" repository = "https://github.com/servo/core-foundation-rs" + [package.metadata.docs.rs] default-target = "x86_64-apple-darwin" + [dependencies.core-foundation] version = "0.9" [dependencies.core-graphics] -version = "0.22.0" +version = "0.23.0" [dependencies.foreign-types] -version = "0.3" +version = "0.5" [dependencies.libc] version = "0.2" diff --git a/third_party/rust/core-text/src/font.rs b/third_party/rust/core-text/src/font.rs index ebe6d580f578..87a8a530950a 100644 --- a/third_party/rust/core-text/src/font.rs +++ b/third_party/rust/core-text/src/font.rs @@ -78,6 +78,54 @@ pub const kCTFontOptionsDefault: CTFontOptions = 0; pub const kCTFontOptionsPreventAutoActivation: CTFontOptions = 1 << 0; pub const kCTFontOptionsPreferSystemFont: CTFontOptions = 1 << 2; +pub enum CTFontNameSpecifier { + Copyright, + Family, + SubFamily, + Style, + Unique, + Full, + Version, + PostScript, + Trademark, + Manufacturer, + Designer, + Description, + VendorURL, + DesignerURL, + License, + LicenseURL, + SampleText, + PostScriptCID, +} + +impl Into for CTFontNameSpecifier { + fn into(self) -> CFStringRef { + unsafe { + match self { + CTFontNameSpecifier::Copyright => kCTFontCopyrightNameKey, + CTFontNameSpecifier::Family => kCTFontFamilyNameKey, + CTFontNameSpecifier::SubFamily => kCTFontSubFamilyNameKey, + CTFontNameSpecifier::Style => kCTFontStyleNameKey, + CTFontNameSpecifier::Unique => kCTFontUniqueNameKey, + CTFontNameSpecifier::Full => kCTFontFullNameKey, + CTFontNameSpecifier::Version => kCTFontVersionNameKey, + CTFontNameSpecifier::PostScript => kCTFontPostScriptNameKey, + CTFontNameSpecifier::Trademark => kCTFontTrademarkNameKey, + CTFontNameSpecifier::Manufacturer => kCTFontManufacturerNameKey, + CTFontNameSpecifier::Designer => kCTFontDesignerNameKey, + CTFontNameSpecifier::Description => kCTFontDescriptionNameKey, + CTFontNameSpecifier::VendorURL => kCTFontVendorURLNameKey, + CTFontNameSpecifier::DesignerURL => kCTFontDesignerURLNameKey, + CTFontNameSpecifier::License => kCTFontLicenseNameKey, + CTFontNameSpecifier::LicenseURL => kCTFontLicenseURLNameKey, + CTFontNameSpecifier::SampleText => kCTFontSampleTextNameKey, + CTFontNameSpecifier::PostScriptCID => kCTFontPostScriptCIDNameKey, + } + } + } +} + #[repr(C)] pub struct __CTFont(c_void); @@ -144,6 +192,29 @@ pub fn new_from_name(name: &str, pt_size: f64) -> Result { } } +pub fn new_ui_font_for_language(ui_type: CTFontUIFontType, + size: f64, + language: Option) + -> CTFont { + unsafe { + let font_ref = CTFontCreateUIFontForLanguage( + ui_type, + size, + language.as_ref() + .map(|x| x.as_concrete_TypeRef()) + .unwrap_or(std::ptr::null()), + ); + if font_ref.is_null() { + // CTFontCreateUIFontForLanguage can fail, but is unlikely to do so during + // normal usage (if you pass a bad ui_type it will). To make things more + // convenient, just panic if it fails. + panic!(); + } else { + CTFont::wrap_under_create_rule(font_ref) + } + } +} + impl CTFont { // Properties pub fn symbolic_traits(&self) -> CTFontSymbolicTraits { @@ -198,46 +269,45 @@ impl CTFont { } // Names - pub fn family_name(&self) -> String { + pub fn get_string_by_name_key(&self, name_key: CTFontNameSpecifier) -> Option { unsafe { - let value = get_string_by_name_key(self, kCTFontFamilyNameKey); - value.expect("Fonts should always have a family name.") + let result = CTFontCopyName(self.as_concrete_TypeRef(), name_key.into()); + if result.is_null() { + None + } else { + Some(CFString::wrap_under_create_rule(result).to_string()) + } } } + pub fn family_name(&self) -> String { + let value = self.get_string_by_name_key(CTFontNameSpecifier::Family); + value.expect("Fonts should always have a family name.") + } + pub fn face_name(&self) -> String { - unsafe { - let value = get_string_by_name_key(self, kCTFontSubFamilyNameKey); - value.expect("Fonts should always have a face name.") - } + let value = self.get_string_by_name_key(CTFontNameSpecifier::SubFamily); + value.expect("Fonts should always have a face name.") } pub fn unique_name(&self) -> String { - unsafe { - let value = get_string_by_name_key(self, kCTFontUniqueNameKey); - value.expect("Fonts should always have a unique name.") - } + let value = self.get_string_by_name_key(CTFontNameSpecifier::Unique); + value.expect("Fonts should always have a unique name.") } pub fn postscript_name(&self) -> String { - unsafe { - let value = get_string_by_name_key(self, kCTFontPostScriptNameKey); - value.expect("Fonts should always have a PostScript name.") - } + let value = self.get_string_by_name_key(CTFontNameSpecifier::PostScript); + value.expect("Fonts should always have a PostScript name.") } pub fn display_name(&self) -> String { - unsafe { - let value = get_string_by_name_key(self, kCTFontFullNameKey); - value.expect("Fonts should always have a PostScript name.") - } + let value = self.get_string_by_name_key(CTFontNameSpecifier::Full); + value.expect("Fonts should always have a PostScript name.") } pub fn style_name(&self) -> String { - unsafe { - let value = get_string_by_name_key(self, kCTFontStyleNameKey); - value.expect("Fonts should always have a style name.") - } + let value = self.get_string_by_name_key(CTFontNameSpecifier::Style); + value.expect("Fonts should always have a style name.") } pub fn all_traits(&self) -> CTFontTraits { @@ -437,30 +507,17 @@ impl CTFont { } // Helper methods -fn get_string_by_name_key(font: &CTFont, name_key: CFStringRef) -> Option { - unsafe { - let result = CTFontCopyName(font.as_concrete_TypeRef(), name_key); - if result.is_null() { - None - } else { - Some(CFString::wrap_under_create_rule(result).to_string()) - } - } -} - pub fn debug_font_names(font: &CTFont) { - fn get_key(font: &CTFont, key: CFStringRef) -> String { - get_string_by_name_key(font, key).unwrap() + fn get_key(font: &CTFont, key: CTFontNameSpecifier) -> String { + font.get_string_by_name_key(key).unwrap() } - unsafe { - println!("kCTFontFamilyNameKey: {}", get_key(font, kCTFontFamilyNameKey)); - println!("kCTFontSubFamilyNameKey: {}", get_key(font, kCTFontSubFamilyNameKey)); - println!("kCTFontStyleNameKey: {}", get_key(font, kCTFontStyleNameKey)); - println!("kCTFontUniqueNameKey: {}", get_key(font, kCTFontUniqueNameKey)); - println!("kCTFontFullNameKey: {}", get_key(font, kCTFontFullNameKey)); - println!("kCTFontPostScriptNameKey: {}", get_key(font, kCTFontPostScriptNameKey)); - } + println!("kCTFontFamilyNameKey: {}", get_key(font, CTFontNameSpecifier::Family)); + println!("kCTFontSubFamilyNameKey: {}", get_key(font, CTFontNameSpecifier::SubFamily)); + println!("kCTFontStyleNameKey: {}", get_key(font, CTFontNameSpecifier::Style)); + println!("kCTFontUniqueNameKey: {}", get_key(font, CTFontNameSpecifier::Unique)); + println!("kCTFontFullNameKey: {}", get_key(font, CTFontNameSpecifier::Full)); + println!("kCTFontPostScriptNameKey: {}", get_key(font, CTFontNameSpecifier::PostScript)); } pub fn debug_font_traits(font: &CTFont) { @@ -494,28 +551,30 @@ extern { */ /* Name Specifier Constants */ - //static kCTFontCopyrightNameKey: CFStringRef; + static kCTFontCopyrightNameKey: CFStringRef; static kCTFontFamilyNameKey: CFStringRef; static kCTFontSubFamilyNameKey: CFStringRef; static kCTFontStyleNameKey: CFStringRef; static kCTFontUniqueNameKey: CFStringRef; static kCTFontFullNameKey: CFStringRef; - //static kCTFontVersionNameKey: CFStringRef; + static kCTFontVersionNameKey: CFStringRef; static kCTFontPostScriptNameKey: CFStringRef; - //static kCTFontTrademarkNameKey: CFStringRef; - //static kCTFontManufacturerNameKey: CFStringRef; - //static kCTFontDesignerNameKey: CFStringRef; - //static kCTFontDescriptionNameKey: CFStringRef; - //static kCTFontVendorURLNameKey: CFStringRef; - //static kCTFontDesignerURLNameKey: CFStringRef; - //static kCTFontLicenseNameKey: CFStringRef; - //static kCTFontLicenseURLNameKey: CFStringRef; - //static kCTFontSampleTextNameKey: CFStringRef; - //static kCTFontPostScriptCIDNameKey: CFStringRef; + static kCTFontTrademarkNameKey: CFStringRef; + static kCTFontManufacturerNameKey: CFStringRef; + static kCTFontDesignerNameKey: CFStringRef; + static kCTFontDescriptionNameKey: CFStringRef; + static kCTFontVendorURLNameKey: CFStringRef; + static kCTFontDesignerURLNameKey: CFStringRef; + static kCTFontLicenseNameKey: CFStringRef; + static kCTFontLicenseURLNameKey: CFStringRef; + static kCTFontSampleTextNameKey: CFStringRef; + static kCTFontPostScriptCIDNameKey: CFStringRef; - //static kCTFontVariationAxisIdentifierKey: CFStringRef; + #[cfg(test)] + static kCTFontVariationAxisIdentifierKey: CFStringRef; //static kCTFontVariationAxisMinimumValueKey: CFStringRef; - //static kCTFontVariationAxisMaximumValueKey: CFStringRef; + #[cfg(test)] + static kCTFontVariationAxisMaximumValueKey: CFStringRef; //static kCTFontVariationAxisDefaultValueKey: CFStringRef; //static kCTFontVariationAxisNameKey: CFStringRef; @@ -539,7 +598,6 @@ extern { fn CTFontCreateWithFontDescriptor(descriptor: CTFontDescriptorRef, size: CGFloat, matrix: *const CGAffineTransform) -> CTFontRef; //fn CTFontCreateWithFontDescriptorAndOptions - #[cfg(test)] fn CTFontCreateUIFontForLanguage(uiType: CTFontUIFontType, size: CGFloat, language: CFStringRef) -> CTFontRef; fn CTFontCreateCopyWithAttributes(font: CTFontRef, size: CGFloat, matrix: *const CGAffineTransform, attributes: CTFontDescriptorRef) -> CTFontRef; @@ -691,11 +749,9 @@ fn macos_version() -> (i32, i32, i32) { #[test] fn copy_system_font() { - let small = unsafe { - CTFont::wrap_under_create_rule( - CTFontCreateUIFontForLanguage(kCTFontSystemDetailFontType, 19., std::ptr::null()) - ) - }; + use crate::*; + + let small = new_ui_font_for_language(kCTFontSystemDetailFontType, 19., None); let big = small.clone_with_font_size(20.); // ensure that we end up with different fonts for the different sizes before 10.15 @@ -712,6 +768,12 @@ fn copy_system_font() { let ctfont = new_from_descriptor(&desc, 20.); assert_eq!(big.postscript_name(), ctfont.postscript_name()); + // check that we can construct a new version by attributes + let attr = desc.attributes(); + let desc_from_attr = font_descriptor::new_from_attributes(&attr); + let font_from_attr = new_from_descriptor(&desc_from_attr, 19.); + assert_eq!(font_from_attr.postscript_name(), small.postscript_name()); + // on newer versions of macos we can't construct by name anymore if macos_version() < (10, 13, 0) { let ui_font_by_name = new_from_name(&small.postscript_name(), 19.).unwrap(); @@ -741,4 +803,165 @@ fn copy_system_font() { assert!(matching.attributes().find(CFString::from_static_string("NSFontSizeAttribute")).is_none()); assert_eq!(small.postscript_name(), cgfont.postscript_name()); -} \ No newline at end of file +} + +// Tests what happens when variations have values not inbetween min and max +#[test] +fn out_of_range_variations() { + use crate::*; + + let small = new_ui_font_for_language(kCTFontSystemDetailFontType, 19., None); + + let axes = small.get_variation_axes(); + if macos_version() < (10, 12, 0) { + assert!(axes.is_none()); + return; + } + let axes = axes.unwrap(); + let mut vals = Vec::new(); + dbg!(&axes); + for axis in axes.iter() { + let tag = axis.find(unsafe { kCTFontVariationAxisIdentifierKey } ) + .unwrap().downcast::().unwrap().to_i64().unwrap(); + let max = axis.find(unsafe { kCTFontVariationAxisMaximumValueKey } ) + .unwrap().downcast::().unwrap().to_f64().unwrap(); + vals.push((CFNumber::from(tag), CFNumber::from(max + 1.))); + + } + let vals_dict = CFDictionary::from_CFType_pairs(&vals); + let variation_attribute = unsafe { CFString::wrap_under_get_rule(font_descriptor::kCTFontVariationAttribute) }; + let attrs_dict = CFDictionary::from_CFType_pairs(&[(variation_attribute.clone(), vals_dict)]); + let ct_var_font_desc = small.copy_descriptor().create_copy_with_attributes(attrs_dict.to_untyped()).unwrap(); + let variation_font = crate::font::new_from_descriptor(&ct_var_font_desc, 19.); + let var_desc = variation_font.copy_descriptor(); + let var_attrs = var_desc.attributes(); + dbg!(&var_attrs); + // attributes greater than max are dropped on macOS <= 11 + // on macOS 12 they seem to be preserved as is. + let var_attrs = var_attrs.find(variation_attribute); + if macos_version() >= (12, 0, 0) { + let var_attrs = var_attrs.unwrap().downcast::().unwrap(); + assert!(!var_attrs.is_empty()); + let var_attrs: CFDictionary = unsafe { std::mem::transmute(var_attrs) }; + // attributes greater than max remain + for axis in axes.iter() { + let tag = axis.find(unsafe { kCTFontVariationAxisIdentifierKey } ) + .unwrap(); + let max = axis.find(unsafe { kCTFontVariationAxisMaximumValueKey } ) + .unwrap().downcast::().unwrap().to_f64().unwrap(); + let val = var_attrs.find(tag.clone()).unwrap().downcast::().unwrap().to_f64().unwrap(); + assert_eq!(val, max + 1.); + } + } else if macos_version() >= (10, 15, 0) { + assert!(var_attrs.is_none()); + } else { + let var_attrs = var_attrs.unwrap().downcast::().unwrap(); + assert!(var_attrs.is_empty()); + } +} + +#[test] +fn equal_descriptor_different_font() { + use crate::*; + + let variation_attribute = unsafe { CFString::wrap_under_get_rule(font_descriptor::kCTFontVariationAttribute) }; + let size_attribute = unsafe { CFString::wrap_under_get_rule(font_descriptor::kCTFontSizeAttribute) }; + + let sys_font = new_ui_font_for_language(kCTFontSystemDetailFontType, 19., None); + + + // but we can still construct the CGFont by name + let create_vars = |desc| { + let mut vals: Vec<(CFNumber, CFNumber)> = Vec::new(); + vals.push((CFNumber::from(0x6f70737a), CFNumber::from(17.))); + let vals_dict = CFDictionary::from_CFType_pairs(&vals); + let attrs_dict = CFDictionary::from_CFType_pairs(&[(variation_attribute.clone(), vals_dict)]); + let size_attrs_dict = CFDictionary::from_CFType_pairs(&[(size_attribute.clone(), CFNumber::from(120.))]); + dbg!(&desc); + let from_font_desc = new_from_descriptor(&desc, 120.).copy_descriptor(); + let resized_font_desc = desc.create_copy_with_attributes(size_attrs_dict.to_untyped()).unwrap(); + if macos_version() >= (11, 0, 0) { + assert_eq!(from_font_desc, resized_font_desc); + } else { + // we won't have a name if we're using system font desc + if from_font_desc.attributes().find(unsafe { font_descriptor::kCTFontNameAttribute }).is_none() { + // it's surprising that desc's are the not equal but the attributes are + assert_ne!(from_font_desc, resized_font_desc); + assert_eq!(from_font_desc.attributes().to_untyped(), resized_font_desc.attributes().to_untyped()); + } else { + if macos_version() >= (10, 13, 0) { + // this is unsurprising + assert_ne!(from_font_desc, resized_font_desc); + assert_ne!(from_font_desc.attributes().to_untyped(), resized_font_desc.attributes().to_untyped()); + } else { + assert_ne!(from_font_desc, resized_font_desc); + assert_eq!(from_font_desc.attributes().to_untyped(), resized_font_desc.attributes().to_untyped()); + } + } + } + + let from_font_desc = from_font_desc.create_copy_with_attributes(attrs_dict.to_untyped()).unwrap(); + let resized_font_desc = resized_font_desc.create_copy_with_attributes(attrs_dict.to_untyped()).unwrap(); + (from_font_desc, resized_font_desc) + }; + + // setting the variation works properly if we use system font desc + let (from_font_desc, resized_font_desc) = create_vars(sys_font.copy_descriptor()); + assert_eq!(from_font_desc, resized_font_desc); + assert!(resized_font_desc.attributes().find(variation_attribute.clone()).is_some()); + + // but doesn't if we refer to it by name + let ps = sys_font.postscript_name(); + let cgfont = CGFont::from_name(&CFString::new(&ps)).unwrap(); + let ctfont = new_from_CGFont(&cgfont, 0.); + + let (from_font_desc, resized_font_desc) = create_vars(ctfont.copy_descriptor()); + if macos_version() >= (10, 15, 0) { + assert_ne!(from_font_desc, resized_font_desc); + } + + if macos_version() >= (10, 13, 0) { + assert!(from_font_desc.attributes().find(variation_attribute.clone()).is_some()); + if macos_version() >= (11, 0, 0) { + assert!(resized_font_desc.attributes().find(variation_attribute).is_none()); + } else { + assert!(resized_font_desc.attributes().find(variation_attribute).is_some()); + }; + } +} + +#[test] +fn system_font_variation() { + use crate::*; + + let small = new_ui_font_for_language(kCTFontSystemDetailFontType, 19., None); + + // but we can still construct the CGFont by name + let ps = small.postscript_name(); + let cgfont = CGFont::from_name(&CFString::new(&ps)).unwrap(); + let cgfont = new_from_CGFont(&cgfont, 0.); + let desc = cgfont.copy_descriptor(); + + let mut vals: Vec<(CFNumber, CFNumber)> = Vec::new(); + vals.push((CFNumber::from(0x6f70737a /* opsz */), CFNumber::from(17.))); + let vals_dict = CFDictionary::from_CFType_pairs(&vals); + let variation_attribute = unsafe { CFString::wrap_under_get_rule(font_descriptor::kCTFontVariationAttribute) }; + let attrs_dict = CFDictionary::from_CFType_pairs(&[(variation_attribute, vals_dict)]); + let ct_var_font_desc = desc.create_copy_with_attributes(attrs_dict.to_untyped()).unwrap(); + let attrs = ct_var_font_desc.attributes(); + let var_attr = attrs.find(CFString::from_static_string("NSCTFontVariationAttribute")); + if macos_version() >= (11, 0, 0) { + // the variation goes away + assert!(var_attr.is_none()); + } else { + assert!(var_attr.is_some()); + } + + dbg!(ct_var_font_desc); +} + +#[test] +fn ui_font() { + // pass some garbagey inputs + new_ui_font_for_language(kCTFontSystemDetailFontType, 10000009., Some(CFString::from("Gofld"))); +} diff --git a/third_party/rust/foreign-types-macros/.cargo-checksum.json b/third_party/rust/foreign-types-macros/.cargo-checksum.json new file mode 100644 index 000000000000..576d885de8f6 --- /dev/null +++ b/third_party/rust/foreign-types-macros/.cargo-checksum.json @@ -0,0 +1 @@ +{"files":{"Cargo.toml":"829c9da0910b673669e0401c6affd6564c94c49cf9629740104ffabf46f54d08","LICENSE-APACHE":"c6596eb7be8581c18be736c846fb9173b69eccf6ef94c5135893ec56bd92ba08","LICENSE-MIT":"333ea3aaa3cadb819f4acd9f9153f9feee060a995ca8710f32bc5bd9a4b91734","src/build.rs":"bee8027592dff9052e1235a6b1bfd0ae715389265247131c3e1ed865e561ba40","src/lib.rs":"7bb43075bede4947ee8f07a05e4d1b00e0daedadbde15f1b386f97514853ad1c","src/parse.rs":"d8cc189b7108602dee81ff5436a9480dd45397101f7faad875027441e1477ef1"},"package":"1a5c6c585bc94aaf2c7b51dd4c2ba22680844aba4c687be581871a6f518c5742"} \ No newline at end of file diff --git a/third_party/rust/foreign-types-macros/Cargo.toml b/third_party/rust/foreign-types-macros/Cargo.toml new file mode 100644 index 000000000000..584d1844c5c2 --- /dev/null +++ b/third_party/rust/foreign-types-macros/Cargo.toml @@ -0,0 +1,35 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g., crates.io) dependencies. +# +# If you are reading this file be aware that the original Cargo.toml +# will likely look very different (and much more reasonable). +# See Cargo.toml.orig for the original contents. + +[package] +edition = "2018" +name = "foreign-types-macros" +version = "0.2.3" +authors = ["Steven Fackler "] +description = "An internal crate used by foreign-types" +license = "MIT/Apache-2.0" +repository = "https://github.com/sfackler/foreign-types" + +[lib] +proc-macro = true + +[dependencies.proc-macro2] +version = "1.0" + +[dependencies.quote] +version = "1.0" + +[dependencies.syn] +version = "2.0" +features = ["full"] + +[features] +std = [] diff --git a/third_party/rust/foreign-types-macros/LICENSE-APACHE b/third_party/rust/foreign-types-macros/LICENSE-APACHE new file mode 100644 index 000000000000..8f71f43fee3f --- /dev/null +++ b/third_party/rust/foreign-types-macros/LICENSE-APACHE @@ -0,0 +1,202 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + diff --git a/third_party/rust/foreign-types-macros/LICENSE-MIT b/third_party/rust/foreign-types-macros/LICENSE-MIT new file mode 100644 index 000000000000..bb76d014691e --- /dev/null +++ b/third_party/rust/foreign-types-macros/LICENSE-MIT @@ -0,0 +1,19 @@ +Copyright (c) 2017 The foreign-types Developers + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/third_party/rust/foreign-types-macros/src/build.rs b/third_party/rust/foreign-types-macros/src/build.rs new file mode 100644 index 000000000000..b50610f0f32b --- /dev/null +++ b/third_party/rust/foreign-types-macros/src/build.rs @@ -0,0 +1,261 @@ +use proc_macro2::TokenStream; +use quote::quote; +use syn::{Ident, Path}; + +use crate::parse::{ForeignType, Input}; + +fn ref_name(input: &ForeignType) -> Ident { + Ident::new(&format!("{}Ref", input.name), input.name.span()) +} + +pub fn build(input: Input) -> TokenStream { + let types = input + .types + .iter() + .map(|t| build_foreign_type(&input.crate_, t)); + quote! { + #(#types)* + } +} + +fn build_foreign_type(crate_: &Path, input: &ForeignType) -> TokenStream { + let decls = build_decls(crate_, input); + let oibits = build_oibits(crate_, input); + let foreign_impls = build_foreign_impls(crate_, input); + let drop_impl = build_drop_impl(crate_, input); + let deref_impls = build_deref_impls(crate_, input); + let borrow_impls = build_borrow_impls(crate_, input); + let as_ref_impls = build_as_ref_impls(crate_, input); + let clone_impl = build_clone_impl(crate_, input); + let to_owned_impl = build_to_owned_impl(crate_, input); + + quote! { + #decls + #oibits + #foreign_impls + #drop_impl + #deref_impls + #borrow_impls + #as_ref_impls + #clone_impl + #to_owned_impl + } +} + +fn build_decls(crate_: &Path, input: &ForeignType) -> TokenStream { + let attrs = &input.attrs; + let vis = &input.visibility; + let name = &input.name; + let generics = &input.generics; + let ctype = &input.ctype; + let phantom_data = input + .phantom_data + .as_ref() + .map(|d| quote!(, #crate_::export::PhantomData<#d>)); + let ref_name = ref_name(input); + let ref_docs = format!( + "A borrowed reference to a [`{name}`](struct.{name}.html).", + name = name + ); + + quote! { + #(#attrs)* + #[repr(transparent)] + #vis struct #name #generics(#crate_::export::NonNull<#ctype> #phantom_data); + + #[doc = #ref_docs] + #vis struct #ref_name #generics(#crate_::Opaque #phantom_data); + } +} + +fn build_oibits(crate_: &Path, input: &ForeignType) -> TokenStream { + let oibits = input.oibits.iter().map(|t| build_oibit(crate_, input, t)); + + quote! { + #(#oibits)* + } +} + +fn build_oibit(crate_: &Path, input: &ForeignType, oibit: &Ident) -> TokenStream { + let name = &input.name; + let ref_name = ref_name(input); + let (impl_generics, ty_generics, _) = input.generics.split_for_impl(); + + quote! { + unsafe impl #impl_generics #crate_::export::#oibit for #name #ty_generics {} + unsafe impl #impl_generics #crate_::export::#oibit for #ref_name #ty_generics {} + } +} + +fn build_foreign_impls(crate_: &Path, input: &ForeignType) -> TokenStream { + let name = &input.name; + let ctype = &input.ctype; + let ref_name = ref_name(input); + let (impl_generics, ty_generics, _) = input.generics.split_for_impl(); + let phantom_data = input + .phantom_data + .as_ref() + .map(|_| quote!(, #crate_::export::PhantomData)); + + quote! { + unsafe impl #impl_generics #crate_::ForeignType for #name #ty_generics { + type CType = #ctype; + type Ref = #ref_name #ty_generics; + + #[inline] + unsafe fn from_ptr(ptr: *mut #ctype) -> #name #ty_generics { + debug_assert!(!ptr.is_null()); + #name(<#crate_::export::NonNull<_>>::new_unchecked(ptr) #phantom_data) + } + + #[inline] + fn as_ptr(&self) -> *mut #ctype { + <#crate_::export::NonNull<_>>::as_ptr(self.0) + } + } + + unsafe impl #impl_generics #crate_::ForeignTypeRef for #ref_name #ty_generics { + type CType = #ctype; + } + } +} + +fn build_drop_impl(crate_: &Path, input: &ForeignType) -> TokenStream { + let name = &input.name; + let drop = &input.drop; + let (impl_generics, ty_generics, _) = input.generics.split_for_impl(); + + quote! { + impl #impl_generics #crate_::export::Drop for #name #ty_generics { + #[inline] + fn drop(&mut self) { + unsafe { + (#drop)(#crate_::ForeignType::as_ptr(self)); + } + } + } + } +} + +fn build_deref_impls(crate_: &Path, input: &ForeignType) -> TokenStream { + let name = &input.name; + let ref_name = ref_name(input); + let (impl_generics, ty_generics, _) = input.generics.split_for_impl(); + + quote! { + impl #impl_generics #crate_::export::Deref for #name #ty_generics { + type Target = #ref_name #ty_generics; + + #[inline] + fn deref(&self) -> &#ref_name #ty_generics { + unsafe { + #crate_::ForeignTypeRef::from_ptr(#crate_::ForeignType::as_ptr(self)) + } + } + } + + impl #impl_generics #crate_::export::DerefMut for #name #ty_generics { + #[inline] + fn deref_mut(&mut self) -> &mut #ref_name #ty_generics { + unsafe { + #crate_::ForeignTypeRef::from_ptr_mut(#crate_::ForeignType::as_ptr(self)) + } + } + } + } +} + +fn build_borrow_impls(crate_: &Path, input: &ForeignType) -> TokenStream { + let name = &input.name; + let ref_name = ref_name(input); + let (impl_generics, ty_generics, _) = input.generics.split_for_impl(); + + quote! { + impl #impl_generics #crate_::export::Borrow<#ref_name #ty_generics> for #name #ty_generics { + #[inline] + fn borrow(&self) -> &#ref_name #ty_generics { + &**self + } + } + + impl #impl_generics #crate_::export::BorrowMut<#ref_name #ty_generics> for #name #ty_generics { + #[inline] + fn borrow_mut(&mut self) -> &mut #ref_name #ty_generics { + &mut **self + } + } + } +} + +fn build_as_ref_impls(crate_: &Path, input: &ForeignType) -> TokenStream { + let name = &input.name; + let ref_name = ref_name(input); + let (impl_generics, ty_generics, _) = input.generics.split_for_impl(); + + quote! { + impl #impl_generics #crate_::export::AsRef<#ref_name #ty_generics> for #name #ty_generics { + #[inline] + fn as_ref(&self) -> &#ref_name #ty_generics { + &**self + } + } + + impl #impl_generics #crate_::export::AsMut<#ref_name #ty_generics> for #name #ty_generics { + #[inline] + fn as_mut(&mut self) -> &mut #ref_name #ty_generics { + &mut **self + } + } + } +} + +fn build_clone_impl(crate_: &Path, input: &ForeignType) -> TokenStream { + let clone = match &input.clone { + Some(clone) => clone, + None => return quote!(), + }; + let name = &input.name; + let (impl_generics, ty_generics, _) = input.generics.split_for_impl(); + + quote! { + impl #impl_generics #crate_::export::Clone for #name #ty_generics { + #[inline] + fn clone(&self) -> #name #ty_generics { + unsafe { + let ptr = (#clone)(#crate_::ForeignType::as_ptr(self)); + #crate_::ForeignType::from_ptr(ptr) + } + } + } + } +} + +#[cfg(feature = "std")] +fn build_to_owned_impl(crate_: &Path, input: &ForeignType) -> TokenStream { + let clone = match &input.clone { + Some(clone) => clone, + None => return quote!(), + }; + let name = &input.name; + let ref_name = ref_name(input); + let (impl_generics, ty_generics, _) = input.generics.split_for_impl(); + + quote! { + impl #impl_generics #crate_::export::ToOwned for #ref_name #ty_generics { + type Owned = #name #ty_generics; + + #[inline] + fn to_owned(&self) -> #name #ty_generics { + unsafe { + let ptr = (#clone)(#crate_::ForeignTypeRef::as_ptr(self)); + #crate_::ForeignType::from_ptr(ptr) + } + } + } + } +} + +#[cfg(not(feature = "std"))] +fn build_to_owned_impl(_: &Path, _: &ForeignType) -> TokenStream { + quote!() +} diff --git a/third_party/rust/foreign-types-macros/src/lib.rs b/third_party/rust/foreign-types-macros/src/lib.rs new file mode 100644 index 000000000000..77c1e99fbcae --- /dev/null +++ b/third_party/rust/foreign-types-macros/src/lib.rs @@ -0,0 +1,15 @@ +extern crate proc_macro; + +use proc_macro::TokenStream; +use syn::parse_macro_input; + +use crate::parse::Input; + +mod build; +mod parse; + +#[proc_macro] +pub fn foreign_type_impl(input: TokenStream) -> TokenStream { + let input = parse_macro_input!(input as Input); + build::build(input).into() +} diff --git a/third_party/rust/foreign-types-macros/src/parse.rs b/third_party/rust/foreign-types-macros/src/parse.rs new file mode 100644 index 000000000000..ba11dc976c41 --- /dev/null +++ b/third_party/rust/foreign-types-macros/src/parse.rs @@ -0,0 +1,140 @@ +use syn::parse::{self, Parse, ParseStream}; +use syn::punctuated::Punctuated; +use syn::token; +use syn::{braced, Attribute, Expr, Generics, Ident, Path, Token, Type, Visibility}; + +mod kw { + syn::custom_keyword!(Sync); + syn::custom_keyword!(Send); + syn::custom_keyword!(PhantomData); + syn::custom_keyword!(CType); + syn::custom_keyword!(drop); + syn::custom_keyword!(clone); +} + +pub struct Input { + pub crate_: Path, + pub types: Vec, +} + +impl Parse for Input { + fn parse(input: ParseStream) -> parse::Result { + let crate_ = input.parse()?; + let mut types = vec![]; + while !input.is_empty() { + types.push(input.parse()?); + } + + Ok(Input { crate_, types }) + } +} + +pub struct ForeignType { + pub attrs: Vec, + pub visibility: Visibility, + pub name: Ident, + pub generics: Generics, + pub oibits: Punctuated, + pub phantom_data: Option, + pub ctype: Type, + pub drop: Expr, + pub clone: Option, +} + +impl Parse for ForeignType { + fn parse(input: ParseStream) -> parse::Result { + let attrs = input.call(Attribute::parse_outer)?; + let visibility = input.parse()?; + input.parse::()?; + input.parse::()?; + let name = input.parse()?; + let generics = input.parse()?; + let oibits = input.call(parse_oibits)?; + let inner; + braced!(inner in input); + let ctype = inner.call(parse_type::)?; + let phantom_data = inner.call(parse_phantom_data)?; + let drop = inner.call(parse_fn::)?; + let clone = inner.call(parse_clone)?; + + Ok(ForeignType { + attrs, + visibility, + name, + generics, + oibits, + ctype, + phantom_data, + drop, + clone, + }) + } +} + +fn parse_oibit(input: ParseStream) -> parse::Result { + let lookahead = input.lookahead1(); + if lookahead.peek(kw::Sync) || lookahead.peek(kw::Send) { + input.parse() + } else { + Err(lookahead.error()) + } +} + +fn parse_oibits(input: ParseStream) -> parse::Result> { + let mut out = Punctuated::new(); + + if input.parse::>()?.is_some() { + loop { + out.push_value(input.call(parse_oibit)?); + if input.peek(token::Brace) { + break; + } + out.push_punct(input.parse()?); + if input.peek(token::Brace) { + break; + } + } + } + + Ok(out) +} + +fn parse_type(input: ParseStream) -> parse::Result +where + T: Parse, +{ + input.parse::()?; + input.parse::()?; + input.parse::()?; + let type_ = input.parse()?; + input.parse::()?; + Ok(type_) +} + +fn parse_phantom_data(input: ParseStream) -> parse::Result> { + if input.peek(Token![type]) && input.peek2(kw::PhantomData) { + input.call(parse_type::).map(Some) + } else { + Ok(None) + } +} + +fn parse_fn(input: ParseStream) -> parse::Result +where + T: Parse, +{ + input.parse::()?; + input.parse::()?; + input.parse::()?; + let path = input.parse()?; + input.parse::()?; + Ok(path) +} + +fn parse_clone(input: ParseStream) -> parse::Result> { + if input.peek(Token![fn]) && input.peek2(kw::clone) { + input.call(parse_fn::).map(Some) + } else { + Ok(None) + } +} diff --git a/third_party/rust/foreign-types-shared/.cargo-checksum.json b/third_party/rust/foreign-types-shared/.cargo-checksum.json index 2d6d8919ab62..c2aee3a118c3 100644 --- a/third_party/rust/foreign-types-shared/.cargo-checksum.json +++ b/third_party/rust/foreign-types-shared/.cargo-checksum.json @@ -1 +1 @@ -{"files":{"Cargo.toml":"6b91ae600fe7b521537096b2269ab7bfd606f8a1fcd97749634dd03ba18daf53","LICENSE-APACHE":"c6596eb7be8581c18be736c846fb9173b69eccf6ef94c5135893ec56bd92ba08","LICENSE-MIT":"333ea3aaa3cadb819f4acd9f9153f9feee060a995ca8710f32bc5bd9a4b91734","src/lib.rs":"77aed289fd36258c273f033e766e572ee05330249083c017d045f0a75662f5df"},"package":"00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b"} \ No newline at end of file +{"files":{"Cargo.toml":"096ac915be03aa1eb99b12ad4c5552a50bd550de4d1fb9a52e0799319f98f994","LICENSE-APACHE":"c6596eb7be8581c18be736c846fb9173b69eccf6ef94c5135893ec56bd92ba08","LICENSE-MIT":"333ea3aaa3cadb819f4acd9f9153f9feee060a995ca8710f32bc5bd9a4b91734","src/lib.rs":"60aa483e41abdbdc3b5057e3ff75abd63d0e92263d93a50d8fd739910a0b19e8"},"package":"aa9a19cbb55df58761df49b23516a86d432839add4af60fc256da840f66ed35b"} \ No newline at end of file diff --git a/third_party/rust/foreign-types-shared/Cargo.toml b/third_party/rust/foreign-types-shared/Cargo.toml index e2cb783b98b1..e029e2c6d77f 100644 --- a/third_party/rust/foreign-types-shared/Cargo.toml +++ b/third_party/rust/foreign-types-shared/Cargo.toml @@ -3,16 +3,16 @@ # When uploading crates to the registry Cargo will automatically # "normalize" Cargo.toml files for maximal compatibility # with all versions of Cargo and also rewrite `path` dependencies -# to registry (e.g. crates.io) dependencies +# to registry (e.g., crates.io) dependencies. # -# If you believe there's an error in this file please file an -# issue against the rust-lang/cargo repository. If you're -# editing this file be aware that the upstream Cargo.toml -# will likely look very different (and much more reasonable) +# If you are reading this file be aware that the original Cargo.toml +# will likely look very different (and much more reasonable). +# See Cargo.toml.orig for the original contents. [package] +edition = "2018" name = "foreign-types-shared" -version = "0.1.1" +version = "0.3.1" authors = ["Steven Fackler "] description = "An internal crate used by foreign-types" license = "MIT/Apache-2.0" diff --git a/third_party/rust/foreign-types-shared/src/lib.rs b/third_party/rust/foreign-types-shared/src/lib.rs index cbebc335348c..2f58b53f18a4 100644 --- a/third_party/rust/foreign-types-shared/src/lib.rs +++ b/third_party/rust/foreign-types-shared/src/lib.rs @@ -2,17 +2,29 @@ #![no_std] #![warn(missing_docs)] -#![doc(html_root_url="https://docs.rs/foreign-types-shared/0.1")] +#![doc(html_root_url = "https://docs.rs/foreign-types-shared/0.3")] use core::cell::UnsafeCell; +use core::marker::PhantomData; +use core::mem; /// An opaque type used to define `ForeignTypeRef` types. /// /// A type implementing `ForeignTypeRef` should simply be a newtype wrapper around this type. -pub struct Opaque(UnsafeCell<()>); +pub struct Opaque(PhantomData>); /// A type implemented by wrappers over foreign types. -pub trait ForeignType: Sized { +/// +/// # Safety +/// +/// Implementations of `ForeignType` must guarantee the following: +/// - `Self::from_ptr(x).as_ptr() == x` +/// - `Self::from_ptr(x).into_ptr(x) == x` +/// - `Self::from_ptr(x).deref().as_ptr(x) == x` +/// - `Self::from_ptr(x).deref_mut().as_ptr(x) == x` +/// - `Self::from_ptr(x).as_ref().as_ptr(x) == x` +/// - `Self::from_ptr(x).as_mut().as_ptr(x) == x` +pub unsafe trait ForeignType: Sized { /// The raw C type. type CType; @@ -20,26 +32,55 @@ pub trait ForeignType: Sized { type Ref: ForeignTypeRef; /// Constructs an instance of this type from its raw type. + /// + /// # Safety + /// + /// `ptr` must be a valid, owned instance of the native type. unsafe fn from_ptr(ptr: *mut Self::CType) -> Self; /// Returns a raw pointer to the wrapped value. fn as_ptr(&self) -> *mut Self::CType; + + /// Consumes the wrapper and returns the raw pointer. + #[inline] + fn into_ptr(self) -> *mut Self::CType { + let ptr = self.as_ptr(); + mem::forget(self); + ptr + } } /// A trait implemented by types which reference borrowed foreign types. -pub trait ForeignTypeRef: Sized { +/// +/// # Safety +/// +/// Implementations of `ForeignTypeRef` must guarantee the following: +/// +/// - `Self::from_ptr(x).as_ptr() == x` +/// - `Self::from_mut_ptr(x).as_ptr() == x` +pub unsafe trait ForeignTypeRef: Sized { /// The raw C type. type CType; /// Constructs a shared instance of this type from its raw type. + /// + /// # Safety + /// + /// `ptr` must be a valid, immutable, instance of the type for the `'a` lifetime. #[inline] unsafe fn from_ptr<'a>(ptr: *mut Self::CType) -> &'a Self { + debug_assert!(!ptr.is_null()); &*(ptr as *mut _) } /// Constructs a mutable reference of this type from its raw type. + /// + /// # Safety + /// + /// `ptr` must be a valid, unique, instance of the type for the `'a` lifetime. #[inline] unsafe fn from_ptr_mut<'a>(ptr: *mut Self::CType) -> &'a mut Self { + debug_assert!(!ptr.is_null()); &mut *(ptr as *mut _) } diff --git a/third_party/rust/foreign-types/.cargo-checksum.json b/third_party/rust/foreign-types/.cargo-checksum.json index 7e74bd59b4e5..978653cf7c91 100644 --- a/third_party/rust/foreign-types/.cargo-checksum.json +++ b/third_party/rust/foreign-types/.cargo-checksum.json @@ -1 +1 @@ -{"files":{"Cargo.toml":"1a667c28f60115423b68fea369255c2dcb129b8dce10f30e0e1da73d05f9adab","LICENSE-APACHE":"c6596eb7be8581c18be736c846fb9173b69eccf6ef94c5135893ec56bd92ba08","LICENSE-MIT":"333ea3aaa3cadb819f4acd9f9153f9feee060a995ca8710f32bc5bd9a4b91734","README.md":"6f3f1429f2724a481df811842f318d0d3b83160ada953fd869d4b685f7fd72e4","src/lib.rs":"4f0a33bf8ec94a57d1b71e50f1af8ca410985e447f69fdc9c680545b03ea4566"},"package":"f6f339eb8adc052cd2ca78910fda869aefa38d22d5cb648e6485e4d3fc06f3b1"} \ No newline at end of file +{"files":{"Cargo.toml":"2e1b7838114ee31147555eb858dd837f1f8ac80d53f00c62b2c81c5a0121224d","LICENSE-APACHE":"c6596eb7be8581c18be736c846fb9173b69eccf6ef94c5135893ec56bd92ba08","LICENSE-MIT":"333ea3aaa3cadb819f4acd9f9153f9feee060a995ca8710f32bc5bd9a4b91734","README.md":"6f3f1429f2724a481df811842f318d0d3b83160ada953fd869d4b685f7fd72e4","src/lib.rs":"a0039cfb11b235119b81ba8d31987c19d8fd6e1d23abbde57e67747fdd8f0ad0","tests/test.rs":"7913e5e040b75e5b63bf813d25a0491cb8e9b51857a039cf3b56d7f10fc787d3"},"package":"d737d9aa519fb7b749cbc3b962edcf310a8dd1f4b67c91c4f83975dbdd17d965"} \ No newline at end of file diff --git a/third_party/rust/foreign-types/Cargo.toml b/third_party/rust/foreign-types/Cargo.toml index 6ff5567de3b4..b3639ede10ed 100644 --- a/third_party/rust/foreign-types/Cargo.toml +++ b/third_party/rust/foreign-types/Cargo.toml @@ -3,7 +3,7 @@ # When uploading crates to the registry Cargo will automatically # "normalize" Cargo.toml files for maximal compatibility # with all versions of Cargo and also rewrite `path` dependencies -# to registry (e.g. crates.io) dependencies +# to registry (e.g., crates.io) dependencies # # If you believe there's an error in this file please file an # issue against the rust-lang/cargo repository. If you're @@ -11,12 +11,20 @@ # will likely look very different (and much more reasonable) [package] +edition = "2018" name = "foreign-types" -version = "0.3.2" +version = "0.5.0" authors = ["Steven Fackler "] description = "A framework for Rust wrappers over C APIs" readme = "README.md" license = "MIT/Apache-2.0" repository = "https://github.com/sfackler/foreign-types" +[dependencies.foreign-types-macros] +version = "0.2" + [dependencies.foreign-types-shared] -version = "0.1" +version = "0.3" + +[features] +default = ["std"] +std = ["foreign-types-macros/std"] diff --git a/third_party/rust/foreign-types/src/lib.rs b/third_party/rust/foreign-types/src/lib.rs index 6ce959207f42..00b3db950b2a 100644 --- a/third_party/rust/foreign-types/src/lib.rs +++ b/third_party/rust/foreign-types/src/lib.rs @@ -15,6 +15,7 @@ //! ``` //! use foreign_types::{ForeignType, ForeignTypeRef, Opaque}; //! use std::ops::{Deref, DerefMut}; +//! use std::ptr::NonNull; //! //! mod foo_sys { //! pub enum FOO {} @@ -30,7 +31,7 @@ //! // from raw C pointers. //! pub struct FooRef(Opaque); //! -//! impl ForeignTypeRef for FooRef { +//! unsafe impl ForeignTypeRef for FooRef { //! type CType = foo_sys::FOO; //! } //! @@ -38,24 +39,36 @@ //! // //! // It dereferences to `FooRef`, so methods that do not require ownership //! // should be defined there. -//! pub struct Foo(*mut foo_sys::FOO); +//! pub struct Foo(NonNull); +//! +//! unsafe impl Sync for FooRef {} +//! unsafe impl Send for FooRef {} +//! +//! unsafe impl Sync for Foo {} +//! unsafe impl Send for Foo {} //! //! impl Drop for Foo { //! fn drop(&mut self) { -//! unsafe { foo_sys::FOO_free(self.0) } +//! unsafe { foo_sys::FOO_free(self.as_ptr()) } //! } //! } //! -//! impl ForeignType for Foo { +//! unsafe impl ForeignType for Foo { //! type CType = foo_sys::FOO; //! type Ref = FooRef; //! //! unsafe fn from_ptr(ptr: *mut foo_sys::FOO) -> Foo { -//! Foo(ptr) +//! Foo(NonNull::new_unchecked(ptr)) //! } //! //! fn as_ptr(&self) -> *mut foo_sys::FOO { -//! self.0 +//! self.0.as_ptr() +//! } +//! +//! fn into_ptr(self) -> *mut foo_sys::FOO { +//! let inner = self.as_ptr(); +//! ::core::mem::forget(self); +//! inner //! } //! } //! @@ -63,50 +76,60 @@ //! type Target = FooRef; //! //! fn deref(&self) -> &FooRef { -//! unsafe { FooRef::from_ptr(self.0) } +//! unsafe { FooRef::from_ptr(self.as_ptr()) } //! } //! } //! //! impl DerefMut for Foo { //! fn deref_mut(&mut self) -> &mut FooRef { -//! unsafe { FooRef::from_ptr_mut(self.0) } +//! unsafe { FooRef::from_ptr_mut(self.as_ptr()) } //! } //! } +//! +//! // add in Borrow, BorrowMut, AsRef, AsRefMut, Clone, ToOwned... //! ``` //! //! The `foreign_type!` macro can generate this boilerplate for you: //! //! ``` -//! #[macro_use] -//! extern crate foreign_types; +//! use foreign_types::foreign_type; //! //! mod foo_sys { //! pub enum FOO {} //! //! extern { //! pub fn FOO_free(foo: *mut FOO); -//! pub fn FOO_duplicate(foo: *mut FOO) -> *mut FOO; // Optional +//! pub fn FOO_duplicate(foo: *mut FOO) -> *mut FOO; // optional //! } //! } //! //! foreign_type! { -//! type CType = foo_sys::FOO; -//! fn drop = foo_sys::FOO_free; -//! fn clone = foo_sys::FOO_duplicate; // Optional //! /// A Foo. -//! pub struct Foo; -//! /// A borrowed Foo. -//! pub struct FooRef; +//! pub unsafe type Foo +//! : Sync + Send // optional +//! { +//! type CType = foo_sys::FOO; +//! fn drop = foo_sys::FOO_free; +//! fn clone = foo_sys::FOO_duplicate; // optional +//! } +//! +//! /// A Foo with generic parameters. +//! pub unsafe type GenericFoo { +//! type CType = foo_sys::FOO; +//! // This type is added as a `PhantomData` field to handle variance +//! // of the parameters. However, it has no impact on trait impls: +//! // `GenericFoo` is always `Clone`, even if `T` is not. +//! type PhantomData = T; +//! fn drop = foo_sys::FOO_free; +//! fn clone = foo_sys::FOO_duplicate; +//! } //! } //! //! # fn main() {} //! ``` //! //! If `fn clone` is specified, then it must take `CType` as an argument and return a copy of it as `CType`. -//! It will be used to implement `ToOwned` and `Clone`. -//! -//! `#[derive(…)] is permitted before the lines with `pub struct`. -//! `#[doc(hidden)]` before the `type CType` line will hide the `foreign_type!` implementations from documentation. +//! It will be used to implement `Clone`, and if the `std` Cargo feature is enabled, `ToOwned`. //! //! Say we then have a separate type in our C API that contains a `FOO`: //! @@ -128,10 +151,7 @@ //! modify the `FOO`, so we'll define a pair of accessor methods, one immutable and one mutable: //! //! ``` -//! #[macro_use] -//! extern crate foreign_types; -//! -//! use foreign_types::ForeignTypeRef; +//! use foreign_types::{ForeignTypeRef, foreign_type}; //! //! mod foo_sys { //! pub enum FOO {} @@ -145,22 +165,17 @@ //! } //! //! foreign_type! { -//! #[doc(hidden)] -//! type CType = foo_sys::FOO; -//! fn drop = foo_sys::FOO_free; //! /// A Foo. -//! pub struct Foo; -//! /// A borrowed Foo. -//! pub struct FooRef; -//! } +//! pub unsafe type Foo: Sync + Send { +//! type CType = foo_sys::FOO; +//! fn drop = foo_sys::FOO_free; +//! } //! -//! foreign_type! { -//! type CType = foo_sys::BAR; -//! fn drop = foo_sys::BAR_free; -//! /// A Foo. -//! pub struct Bar; -//! /// A borrowed Bar. -//! pub struct BarRef; +//! /// A Bar. +//! pub unsafe type Bar: Sync + Send { +//! type CType = foo_sys::BAR; +//! fn drop = foo_sys::BAR_free; +//! } //! } //! //! impl BarRef { @@ -177,130 +192,59 @@ //! ``` #![no_std] #![warn(missing_docs)] -#![doc(html_root_url="https://docs.rs/foreign-types/0.3")] -extern crate foreign_types_shared; +#![doc(html_root_url = "https://docs.rs/foreign-types/0.5")] +#[cfg(feature = "std")] +extern crate std; + +#[doc(hidden)] +pub use foreign_types_macros::foreign_type_impl; #[doc(inline)] -pub use foreign_types_shared::*; +pub use foreign_types_shared::{ForeignType, ForeignTypeRef, Opaque}; + +#[doc(hidden)] +pub mod export { + pub use core::borrow::{Borrow, BorrowMut}; + pub use core::clone::Clone; + pub use core::convert::{AsMut, AsRef}; + pub use core::marker::{PhantomData, Send, Sync}; + pub use core::ops::{Deref, DerefMut, Drop}; + pub use core::ptr::NonNull; + + #[cfg(feature = "std")] + pub use std::borrow::ToOwned; +} /// A macro to easily define wrappers for foreign types. /// /// # Examples /// /// ``` -/// #[macro_use] -/// extern crate foreign_types; +/// use foreign_types::foreign_type; /// /// # mod openssl_sys { pub type SSL = (); pub unsafe fn SSL_free(_: *mut SSL) {} pub unsafe fn SSL_dup(x: *mut SSL) -> *mut SSL {x} } +/// # mod foo_sys { pub type THING = (); pub unsafe fn THING_free(_: *mut THING) {} } /// foreign_type! { -/// type CType = openssl_sys::SSL; -/// fn drop = openssl_sys::SSL_free; -/// fn clone = openssl_sys::SSL_dup; /// /// Documentation for the owned type. -/// pub struct Ssl; -/// /// Documentation for the borrowed type. -/// pub struct SslRef; +/// pub unsafe type Ssl: Sync + Send { +/// type CType = openssl_sys::SSL; +/// fn drop = openssl_sys::SSL_free; +/// fn clone = openssl_sys::SSL_dup; +/// } +/// +/// /// This type immutably borrows other data and has a limited lifetime! +/// pub unsafe type Thing<'a>: Send { +/// type CType = foo_sys::THING; +/// type PhantomData = &'a (); +/// fn drop = foo_sys::THING_free; +/// } /// } /// /// # fn main() {} /// ``` -#[macro_export] +#[macro_export(local_inner_macros)] macro_rules! foreign_type { - ( - $(#[$impl_attr:meta])* - type CType = $ctype:ty; - fn drop = $drop:expr; - $(fn clone = $clone:expr;)* - $(#[$owned_attr:meta])* - pub struct $owned:ident; - $(#[$borrowed_attr:meta])* - pub struct $borrowed:ident; - ) => { - $(#[$owned_attr])* - pub struct $owned(*mut $ctype); - - $(#[$impl_attr])* - impl $crate::ForeignType for $owned { - type CType = $ctype; - type Ref = $borrowed; - - #[inline] - unsafe fn from_ptr(ptr: *mut $ctype) -> $owned { - $owned(ptr) - } - - #[inline] - fn as_ptr(&self) -> *mut $ctype { - self.0 - } - } - - impl Drop for $owned { - #[inline] - fn drop(&mut self) { - unsafe { $drop(self.0) } - } - } - - $( - impl Clone for $owned { - #[inline] - fn clone(&self) -> $owned { - unsafe { - let handle: *mut $ctype = $clone(self.0); - $crate::ForeignType::from_ptr(handle) - } - } - } - - impl ::std::borrow::ToOwned for $borrowed { - type Owned = $owned; - #[inline] - fn to_owned(&self) -> $owned { - unsafe { - let handle: *mut $ctype = $clone($crate::ForeignTypeRef::as_ptr(self)); - $crate::ForeignType::from_ptr(handle) - } - } - } - )* - - impl ::std::ops::Deref for $owned { - type Target = $borrowed; - - #[inline] - fn deref(&self) -> &$borrowed { - unsafe { $crate::ForeignTypeRef::from_ptr(self.0) } - } - } - - impl ::std::ops::DerefMut for $owned { - #[inline] - fn deref_mut(&mut self) -> &mut $borrowed { - unsafe { $crate::ForeignTypeRef::from_ptr_mut(self.0) } - } - } - - impl ::std::borrow::Borrow<$borrowed> for $owned { - #[inline] - fn borrow(&self) -> &$borrowed { - &**self - } - } - - impl ::std::convert::AsRef<$borrowed> for $owned { - #[inline] - fn as_ref(&self) -> &$borrowed { - &**self - } - } - - $(#[$borrowed_attr])* - pub struct $borrowed($crate::Opaque); - - $(#[$impl_attr])* - impl $crate::ForeignTypeRef for $borrowed { - type CType = $ctype; - } - } + ($($t:tt)*) => { + $crate::foreign_type_impl!($crate $($t)*); + }; } diff --git a/third_party/rust/foreign-types/tests/test.rs b/third_party/rust/foreign-types/tests/test.rs new file mode 100644 index 000000000000..a1df67e7a1de --- /dev/null +++ b/third_party/rust/foreign-types/tests/test.rs @@ -0,0 +1,22 @@ +use foreign_types::foreign_type; + +mod foo_sys { + pub enum FOO {} + + pub unsafe extern "C" fn foo_drop(_: *mut FOO) {} + pub unsafe extern "C" fn foo_clone(ptr: *mut FOO) -> *mut FOO { ptr } +} + +foreign_type! { + pub unsafe type Foo<'a, T>: Sync + Send { + type CType = foo_sys::FOO; + type PhantomData = &'a T; + fn drop = foo_sys::foo_drop; + fn clone = foo_sys::foo_clone; + } + + pub unsafe type Foo2 { + type CType = foo_sys::FOO; + fn drop = foo_sys::foo_drop; + } +} diff --git a/third_party/rust/gpu-alloc-types/.cargo-checksum.json b/third_party/rust/gpu-alloc-types/.cargo-checksum.json index 4ce7fc40c333..9c6be37d5691 100644 --- a/third_party/rust/gpu-alloc-types/.cargo-checksum.json +++ b/third_party/rust/gpu-alloc-types/.cargo-checksum.json @@ -1 +1 @@ -{"files":{"Cargo.toml":"5ec80402fb950b3e70b291c64a49275dd951221b71ee74f8be5a16a74d42d296","src/device.rs":"1f44d08d50fc59d55bd3f92308e4965bde9134e7f5daa03594e122f7cdb1dfa2","src/lib.rs":"0f77c21a5770b54affa146e5f4c15abcb83599de551226d336ee48ec5185f866","src/types.rs":"8c6acec203faa11856076193835f6d9a1924469682e1fdf11a0325936a1255ee"},"package":"54804d0d6bc9d7f26db4eaec1ad10def69b599315f487d32c334a80d1efe67a5"} \ No newline at end of file +{"files":{"Cargo.toml":"ce92a2d32d4b2e50d617595e689b6bca736b7792280480c4057eaebef15bf325","README.md":"5a96b135d18e172f090bd6147830ab3a1b5cefac5f02be28f06cf88eea61b64f","src/device.rs":"8173609d5fb700f8b82d6335729c592ff62c306b12894a9bab9da45a47810e4a","src/lib.rs":"0f77c21a5770b54affa146e5f4c15abcb83599de551226d336ee48ec5185f866","src/types.rs":"aa861f891a63c232441c5a9fe5fed61cac2d62780108c01ebe6cb64a8547b4e2"},"package":"98ff03b468aa837d70984d55f5d3f846f6ec31fe34bbb97c4f85219caeee1ca4"} \ No newline at end of file diff --git a/third_party/rust/gpu-alloc-types/Cargo.toml b/third_party/rust/gpu-alloc-types/Cargo.toml index a4e0210d0b1c..f6fe81577ac9 100644 --- a/third_party/rust/gpu-alloc-types/Cargo.toml +++ b/third_party/rust/gpu-alloc-types/Cargo.toml @@ -3,24 +3,36 @@ # When uploading crates to the registry Cargo will automatically # "normalize" Cargo.toml files for maximal compatibility # with all versions of Cargo and also rewrite `path` dependencies -# to registry (e.g., crates.io) dependencies +# to registry (e.g., crates.io) dependencies. # -# If you believe there's an error in this file please file an -# issue against the rust-lang/cargo repository. If you're -# editing this file be aware that the upstream Cargo.toml -# will likely look very different (and much more reasonable) +# If you are reading this file be aware that the original Cargo.toml +# will likely look very different (and much more reasonable). +# See Cargo.toml.orig for the original contents. [package] edition = "2018" name = "gpu-alloc-types" -version = "0.2.0" +version = "0.3.0" authors = ["Zakarum "] description = "Core types of gpu-alloc crate" homepage = "https://github.com/zakarumych/gpu-alloc" -documentation = "https://docs.rs/gpu-alloc-types/0.2.0" -keywords = ["gpu", "vulkan", "allocation", "no-std"] +documentation = "https://docs.rs/gpu-alloc-types" +readme = "README.md" +keywords = [ + "gpu", + "vulkan", + "allocation", + "no-std", +] +categories = [ + "graphics", + "memory-management", + "no-std", + "game-development", +] license = "MIT OR Apache-2.0" repository = "https://github.com/zakarumych/gpu-alloc" + [dependencies.bitflags] -version = "1.2" +version = "2.0" default-features = false diff --git a/third_party/rust/gpu-alloc-types/README.md b/third_party/rust/gpu-alloc-types/README.md new file mode 100644 index 000000000000..8a93d685c1da --- /dev/null +++ b/third_party/rust/gpu-alloc-types/README.md @@ -0,0 +1,51 @@ +# gpu-alloc + +[![crates](https://img.shields.io/crates/v/gpu-alloc.svg?style=for-the-badge&label=gpu-alloc)](https://crates.io/crates/gpu-alloc) +[![docs](https://img.shields.io/badge/docs.rs-gpu--alloc-66c2a5?style=for-the-badge&labelColor=555555&logoColor=white)](https://docs.rs/gpu-alloc) +[![actions](https://img.shields.io/github/workflow/status/zakarumych/gpu-alloc/Rust/main?style=for-the-badge)](https://github.com/zakarumych/gpu-alloc/actions?query=workflow%3ARust) +[![MIT/Apache](https://img.shields.io/badge/license-MIT%2FApache-blue.svg?style=for-the-badge)](COPYING) +![loc](https://img.shields.io/tokei/lines/github/zakarumych/gpu-alloc?style=for-the-badge) + + +Implementation agnostic memory allocator for Vulkan like APIs. + +This crate is intended to be used as part of safe API implementations.\ +Use with caution. There are unsafe functions all over the place. + +## Usage + +Start with fetching `DeviceProperties` from `gpu-alloc-` crate for the backend of choice.\ +Then create `GpuAllocator` instance and use it for all device memory allocations.\ +`GpuAllocator` will take care for all necessary bookkeeping like memory object count limit, +heap budget and memory mapping. + +#### Backends implementations + +Backend supporting crates should not depend on this crate.\ +Instead they should depend on `gpu-alloc-types` which is much more stable, +allowing to upgrade `gpu-alloc` version without `gpu-alloc-` upgrade. + + +Supported Rust Versions + +The minimum supported version is 1.40. +The current version is not guaranteed to build on Rust versions earlier than the minimum supported version. + +`gpu-alloc-erupt` crate requires version 1.48 or higher due to dependency on `erupt` crate. + +## License + +Licensed under either of + +* Apache License, Version 2.0, ([license/APACHE](license/APACHE) or http://www.apache.org/licenses/LICENSE-2.0) +* MIT license ([license/MIT](license/MIT) or http://opensource.org/licenses/MIT) + +at your option. + +## Contributions + +Unless you explicitly state otherwise, any contribution intentionally submitted for inclusion in the work by you, as defined in the Apache-2.0 license, shall be dual licensed as above, without any additional terms or conditions. + +## Donate + +[![Become a patron](https://c5.patreon.com/external/logo/become_a_patron_button.png)](https://www.patreon.com/zakarum) diff --git a/third_party/rust/gpu-alloc-types/src/device.rs b/third_party/rust/gpu-alloc-types/src/device.rs index d05f2e8fdeb3..eb73366799b9 100644 --- a/third_party/rust/gpu-alloc-types/src/device.rs +++ b/third_party/rust/gpu-alloc-types/src/device.rs @@ -67,6 +67,7 @@ pub struct DeviceProperties<'a> { bitflags::bitflags! { /// Allocation flags #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] + #[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] pub struct AllocationFlags : u8 { /// Specifies that the memory can be used for buffers created /// with flag that allows fetching device address. @@ -84,7 +85,7 @@ pub trait MemoryDevice { /// # Safety /// /// `memory_type` must be valid index for memory type associated with this device. - /// Retreiving this information is implementation specific. + /// Retrieving this information is implementation specific. /// /// `flags` must be supported by the device. unsafe fn allocate_memory( diff --git a/third_party/rust/gpu-alloc-types/src/types.rs b/third_party/rust/gpu-alloc-types/src/types.rs index 9c6fd3bd3089..ddda9fa6b9aa 100644 --- a/third_party/rust/gpu-alloc-types/src/types.rs +++ b/third_party/rust/gpu-alloc-types/src/types.rs @@ -1,6 +1,7 @@ bitflags::bitflags! { /// Memory properties type. #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] + #[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] pub struct MemoryPropertyFlags: u8 { /// This flag is set for device-local memory types. /// Device-local memory is situated "close" to the GPU cores @@ -42,7 +43,7 @@ pub struct MemoryType { /// Heap index of the memory type. pub heap: u32, - /// Propety flags of the memory type. + /// Property flags of the memory type. pub props: MemoryPropertyFlags, } diff --git a/third_party/rust/gpu-alloc/.cargo-checksum.json b/third_party/rust/gpu-alloc/.cargo-checksum.json index 6f41510d581d..51a8d5b20119 100644 --- a/third_party/rust/gpu-alloc/.cargo-checksum.json +++ b/third_party/rust/gpu-alloc/.cargo-checksum.json @@ -1 +1 @@ -{"files":{"Cargo.toml":"d444caa50cc5b4e0ee201da5347df73c6e77d3eedaae9e69d007c24dd0bb7bfa","src/allocator.rs":"c06b84e062859e1dcd21fa5cd71c8faafcca325b53dcc7b2fee3df945163e820","src/block.rs":"6efc369712e3d880a0d16ab1ef18062a88fefae9bf0a4e626de301cbb1b962ee","src/buddy.rs":"f8eacc183f2ee66faba05dec6f33659afa797e765326c96be4a8bafa651c0e47","src/config.rs":"49bb14c27713c6b1c0c91c094b0f00be22bd26efa660ff979fb1ee1193e9d70e","src/error.rs":"11337a66fc32f3423a1355ffd597d2c7ea28c2b7ce07d707fd5905c330edba08","src/freelist.rs":"3fbbcb96e307e0e021b38df6fe04d129e5d954fcd5a0144e388371442ef20916","src/heap.rs":"03f7c623a0389263073ab48797a191ca06c5611006df7b79c9dc8615c130f495","src/lib.rs":"78800455337d231af24814d84f4e5eb61c5181b12daec06fdcee2d2a75ccb221","src/slab.rs":"2df136b7e0a85eec8a198c7f807678d3a2043c46233e7ec63cd2de72fe562477","src/usage.rs":"0e4788c437718807cdd0b88301228cc27a6b8398313a91e1b70c5f2ab04dcbfb","src/util.rs":"31a496ee3407eb6b0dea255e30e154d805a0d08d777be9c06bdf56d1135bf950"},"package":"7fc59e5f710e310e76e6707f86c561dd646f69a8876da9131703b2f717de818d"} \ No newline at end of file +{"files":{"Cargo.toml":"3647062a9589eedcc361f51f1e1f0852ae6be3c028d0dc8f8d3c0c944bbca61e","README.md":"5a96b135d18e172f090bd6147830ab3a1b5cefac5f02be28f06cf88eea61b64f","src/allocator.rs":"28f20bbfc3866b4eb94a025027925268178921393f5ab8eb5febad1acf94dce8","src/block.rs":"3553343eb171e7ef8b771a1582489cbbbe22b3a1aca4d54b9ff0174fd30bf0da","src/buddy.rs":"253df5f689b643cf97cfd27be6512ea84b84d3c2097a35f3f2d73f537ab353b3","src/config.rs":"1851264db7576f92f3b455e4667339e1c48b7f1b88f8fae7be95c6a9badde455","src/error.rs":"50e30bccc4ba3b23d99298a65155eec45f7d91b66773e828460907ebdcb8ee41","src/freelist.rs":"92c9241d899f3e92a70b0eb5af0b557a3a76aa14c4a6d23876c3ed7857b5f15b","src/heap.rs":"347c25a8560d39d1abb807fb5c34a6a51d4786c0be24a92293f9c8217bace340","src/lib.rs":"07077fb465fb471b39db99bb0311082133d1c2044c2e192d20f7bbb9e743dd0b","src/slab.rs":"6a2b818087850bd5615b085d912490dbd46bbca0df28daa86932505b3983c64a","src/usage.rs":"99009f2ff532904de3ef66520336cd25bd1b795afcb0158385e78a5908f8308f","src/util.rs":"ce3fd7a278eb4d4bd030d4db5495313f56dc91b0765c394f88d126f11c2b4e75"},"package":"fbcd2dba93594b227a1f57ee09b8b9da8892c34d55aa332e034a228d0fe6a171"} \ No newline at end of file diff --git a/third_party/rust/gpu-alloc/Cargo.toml b/third_party/rust/gpu-alloc/Cargo.toml index 51c65b43a3b3..6021c6cbf274 100644 --- a/third_party/rust/gpu-alloc/Cargo.toml +++ b/third_party/rust/gpu-alloc/Cargo.toml @@ -12,22 +12,33 @@ [package] edition = "2018" name = "gpu-alloc" -version = "0.5.3" +version = "0.6.0" authors = ["Zakarum "] description = "Implementation agnostic memory allocator for Vulkan like APIs" homepage = "https://github.com/zakarumych/gpu-alloc" documentation = "https://docs.rs/gpu-alloc-types" -readme = "../README.md" -keywords = ["gpu", "vulkan", "allocation", "no-std"] -categories = ["graphics", "memory-management", "no-std", "game-development"] +readme = "README.md" +keywords = [ + "gpu", + "vulkan", + "allocation", + "no-std", +] +categories = [ + "graphics", + "memory-management", + "no-std", + "game-development", +] license = "MIT OR Apache-2.0" repository = "https://github.com/zakarumych/gpu-alloc" + [dependencies.bitflags] -version = "1.2" +version = "2.0" default-features = false [dependencies.gpu-alloc-types] -version = "0.2" +version = "=0.3.0" [dependencies.serde] version = "1.0" @@ -43,4 +54,8 @@ default-features = false [features] default = ["std"] +serde = [ + "dep:serde", + "bitflags/serde", +] std = [] diff --git a/third_party/rust/gpu-alloc/README.md b/third_party/rust/gpu-alloc/README.md new file mode 100644 index 000000000000..8a93d685c1da --- /dev/null +++ b/third_party/rust/gpu-alloc/README.md @@ -0,0 +1,51 @@ +# gpu-alloc + +[![crates](https://img.shields.io/crates/v/gpu-alloc.svg?style=for-the-badge&label=gpu-alloc)](https://crates.io/crates/gpu-alloc) +[![docs](https://img.shields.io/badge/docs.rs-gpu--alloc-66c2a5?style=for-the-badge&labelColor=555555&logoColor=white)](https://docs.rs/gpu-alloc) +[![actions](https://img.shields.io/github/workflow/status/zakarumych/gpu-alloc/Rust/main?style=for-the-badge)](https://github.com/zakarumych/gpu-alloc/actions?query=workflow%3ARust) +[![MIT/Apache](https://img.shields.io/badge/license-MIT%2FApache-blue.svg?style=for-the-badge)](COPYING) +![loc](https://img.shields.io/tokei/lines/github/zakarumych/gpu-alloc?style=for-the-badge) + + +Implementation agnostic memory allocator for Vulkan like APIs. + +This crate is intended to be used as part of safe API implementations.\ +Use with caution. There are unsafe functions all over the place. + +## Usage + +Start with fetching `DeviceProperties` from `gpu-alloc-` crate for the backend of choice.\ +Then create `GpuAllocator` instance and use it for all device memory allocations.\ +`GpuAllocator` will take care for all necessary bookkeeping like memory object count limit, +heap budget and memory mapping. + +#### Backends implementations + +Backend supporting crates should not depend on this crate.\ +Instead they should depend on `gpu-alloc-types` which is much more stable, +allowing to upgrade `gpu-alloc` version without `gpu-alloc-` upgrade. + + +Supported Rust Versions + +The minimum supported version is 1.40. +The current version is not guaranteed to build on Rust versions earlier than the minimum supported version. + +`gpu-alloc-erupt` crate requires version 1.48 or higher due to dependency on `erupt` crate. + +## License + +Licensed under either of + +* Apache License, Version 2.0, ([license/APACHE](license/APACHE) or http://www.apache.org/licenses/LICENSE-2.0) +* MIT license ([license/MIT](license/MIT) or http://opensource.org/licenses/MIT) + +at your option. + +## Contributions + +Unless you explicitly state otherwise, any contribution intentionally submitted for inclusion in the work by you, as defined in the Apache-2.0 license, shall be dual licensed as above, without any additional terms or conditions. + +## Donate + +[![Become a patron](https://c5.patreon.com/external/logo/become_a_patron_button.png)](https://www.patreon.com/zakarum) diff --git a/third_party/rust/gpu-alloc/src/allocator.rs b/third_party/rust/gpu-alloc/src/allocator.rs index 80937c8d0644..3f862ce565b3 100644 --- a/third_party/rust/gpu-alloc/src/allocator.rs +++ b/third_party/rust/gpu-alloc/src/allocator.rs @@ -1,510 +1,611 @@ -use { - crate::{ - align_down, - block::{MemoryBlock, MemoryBlockFlavor}, - buddy::{BuddyAllocator, BuddyBlock}, - config::Config, - error::AllocationError, - freelist::{FreeListAllocator, FreeListBlock}, - heap::Heap, - usage::{MemoryForUsage, UsageFlags}, - MemoryBounds, Request, - }, - alloc::boxed::Box, - core::convert::TryFrom as _, - gpu_alloc_types::{ - AllocationFlags, DeviceProperties, MemoryDevice, MemoryPropertyFlags, MemoryType, - OutOfMemory, - }, -}; - -/// Memory allocator for Vulkan-like APIs. -#[derive(Debug)] -pub struct GpuAllocator { - dedicated_threshold: u64, - preferred_dedicated_threshold: u64, - transient_dedicated_threshold: u64, - max_memory_allocation_size: u64, - memory_for_usage: MemoryForUsage, - memory_types: Box<[MemoryType]>, - memory_heaps: Box<[Heap]>, - allocations_remains: u32, - non_coherent_atom_mask: u64, - starting_free_list_chunk: u64, - final_free_list_chunk: u64, - minimal_buddy_size: u64, - initial_buddy_dedicated_size: u64, - buffer_device_address: bool, - - buddy_allocators: Box<[Option>]>, - freelist_allocators: Box<[Option>]>, -} - -/// Hints for allocator to decide on allocation strategy. -#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)] -#[non_exhaustive] -pub enum Dedicated { - /// Allocation directly from device.\ - /// Very slow. - /// Count of allocations is limited.\ - /// Use with caution.\ - /// Must be used if resource has to be bound to dedicated memory object. - Required, - - /// Hint for allocator that dedicated memory object is preferred.\ - /// Should be used if it is known that resource placed in dedicated memory object - /// would allow for better performance.\ - /// Implementation is allowed to return block to shared memory object. - Preferred, -} - -impl GpuAllocator -where - M: MemoryBounds + 'static, -{ - /// Creates new instance of `GpuAllocator`. - /// Provided `DeviceProperties` should match properties of `MemoryDevice` that will be used - /// with created `GpuAllocator` instance. - #[cfg_attr(feature = "tracing", tracing::instrument)] - pub fn new(config: Config, props: DeviceProperties<'_>) -> Self { - assert!( - props.non_coherent_atom_size.is_power_of_two(), - "`non_coherent_atom_size` must be power of two" - ); - - assert!( - isize::try_from(props.non_coherent_atom_size).is_ok(), - "`non_coherent_atom_size` must fit host address space" - ); - - GpuAllocator { - dedicated_threshold: config.dedicated_threshold, - preferred_dedicated_threshold: config - .preferred_dedicated_threshold - .min(config.dedicated_threshold), - - transient_dedicated_threshold: config - .transient_dedicated_threshold - .max(config.dedicated_threshold), - - max_memory_allocation_size: props.max_memory_allocation_size, - - memory_for_usage: MemoryForUsage::new(props.memory_types.as_ref()), - - memory_types: props.memory_types.as_ref().iter().copied().collect(), - memory_heaps: props - .memory_heaps - .as_ref() - .iter() - .map(|heap| Heap::new(heap.size)) - .collect(), - - buffer_device_address: props.buffer_device_address, - - allocations_remains: props.max_memory_allocation_count, - non_coherent_atom_mask: props.non_coherent_atom_size - 1, - - starting_free_list_chunk: config.starting_free_list_chunk, - final_free_list_chunk: config.final_free_list_chunk, - minimal_buddy_size: config.minimal_buddy_size, - initial_buddy_dedicated_size: config.initial_buddy_dedicated_size, - - buddy_allocators: props.memory_types.as_ref().iter().map(|_| None).collect(), - freelist_allocators: props.memory_types.as_ref().iter().map(|_| None).collect(), - } - } - - /// Allocates memory block from specified `device` according to the `request`. - /// - /// # Safety - /// - /// * `device` must be one with `DeviceProperties` that were provided to create this `GpuAllocator` instance. - /// * Same `device` instance must be used for all interactions with one `GpuAllocator` instance - /// and memory blocks allocated from it. - #[cfg_attr(feature = "tracing", tracing::instrument(skip(self, device)))] - pub unsafe fn alloc( - &mut self, - device: &impl MemoryDevice, - request: Request, - ) -> Result, AllocationError> { - self.alloc_internal(device, request, None) - } - - /// Allocates memory block from specified `device` according to the `request`. - /// This function allows user to force specific allocation strategy. - /// Improper use can lead to suboptimal performance or too large overhead. - /// Prefer `GpuAllocator::alloc` if doubt. - /// - /// # Safety - /// - /// * `device` must be one with `DeviceProperties` that were provided to create this `GpuAllocator` instance. - /// * Same `device` instance must be used for all interactions with one `GpuAllocator` instance - /// and memory blocks allocated from it. - #[cfg_attr(feature = "tracing", tracing::instrument(skip(self, device)))] - pub unsafe fn alloc_with_dedicated( - &mut self, - device: &impl MemoryDevice, - request: Request, - dedicated: Dedicated, - ) -> Result, AllocationError> { - self.alloc_internal(device, request, Some(dedicated)) - } - - unsafe fn alloc_internal( - &mut self, - device: &impl MemoryDevice, - mut request: Request, - dedicated: Option, - ) -> Result, AllocationError> { - enum Strategy { - Buddy, - Dedicated, - FreeList, - } - - request.usage = with_implicit_usage_flags(request.usage); - - if request.usage.contains(UsageFlags::DEVICE_ADDRESS) { - assert!(self.buffer_device_address, "`DEVICE_ADDRESS` cannot be requested when `DeviceProperties::buffer_device_address` is false"); - } - - if request.size > self.max_memory_allocation_size { - return Err(AllocationError::OutOfDeviceMemory); - } - - if let Some(Dedicated::Required) = dedicated { - if self.allocations_remains == 0 { - return Err(AllocationError::TooManyObjects); - } - } - - if 0 == self.memory_for_usage.mask(request.usage) & request.memory_types { - #[cfg(feature = "tracing")] - tracing::error!( - "Cannot serve request {:?}, no memory among bitset `{}` support usage {:?}", - request, - request.memory_types, - request.usage - ); - - return Err(AllocationError::NoCompatibleMemoryTypes); - } - - let transient = request.usage.contains(UsageFlags::TRANSIENT); - - for &index in self.memory_for_usage.types(request.usage) { - if 0 == request.memory_types & (1 << index) { - // Skip memory type incompatible with the request. - continue; - } - - let memory_type = &self.memory_types[index as usize]; - let heap = memory_type.heap; - let heap = &mut self.memory_heaps[heap as usize]; - - let atom_mask = if host_visible_non_coherent(memory_type.props) { - self.non_coherent_atom_mask - } else { - 0 - }; - - let flags = if self.buffer_device_address { - AllocationFlags::DEVICE_ADDRESS - } else { - AllocationFlags::empty() - }; - - let strategy = match (dedicated, transient) { - (Some(Dedicated::Required), _) => Strategy::Dedicated, - (Some(Dedicated::Preferred), _) - if request.size >= self.preferred_dedicated_threshold => - { - Strategy::Dedicated - } - (_, true) => { - let threshold = self.transient_dedicated_threshold.min(heap.size() / 32); - - if request.size < threshold { - Strategy::FreeList - } else { - Strategy::Dedicated - } - } - (_, false) => { - let threshold = self.dedicated_threshold.min(heap.size() / 32); - - if request.size < threshold { - Strategy::Buddy - } else { - Strategy::Dedicated - } - } - }; - - match strategy { - Strategy::Dedicated => { - #[cfg(feature = "tracing")] - tracing::debug!( - "Allocating memory object `{}@{:?}`", - request.size, - memory_type - ); - - match device.allocate_memory(request.size, index, flags) { - Ok(memory) => { - self.allocations_remains -= 1; - heap.alloc(request.size); - - return Ok(MemoryBlock::new( - index, - memory_type.props, - 0, - request.size, - atom_mask, - MemoryBlockFlavor::Dedicated { memory }, - )); - } - Err(OutOfMemory::OutOfDeviceMemory) => continue, - Err(OutOfMemory::OutOfHostMemory) => { - return Err(AllocationError::OutOfHostMemory) - } - } - } - Strategy::FreeList => { - let allocator = match &mut self.freelist_allocators[index as usize] { - Some(allocator) => allocator, - slot => { - let starting_free_list_chunk = match align_down( - self.starting_free_list_chunk.min(heap.size() / 32), - atom_mask, - ) { - 0 => atom_mask, - other => other, - }; - - let final_free_list_chunk = match align_down( - self.final_free_list_chunk - .max(self.starting_free_list_chunk) - .max(self.transient_dedicated_threshold) - .min(heap.size() / 32), - atom_mask, - ) { - 0 => atom_mask, - other => other, - }; - - slot.get_or_insert(FreeListAllocator::new( - starting_free_list_chunk, - final_free_list_chunk, - index, - memory_type.props, - if host_visible_non_coherent(memory_type.props) { - self.non_coherent_atom_mask - } else { - 0 - }, - )) - } - }; - let result = allocator.alloc( - device, - request.size, - request.align_mask, - flags, - heap, - &mut self.allocations_remains, - ); - - match result { - Ok(block) => { - return Ok(MemoryBlock::new( - index, - memory_type.props, - block.offset, - block.size, - atom_mask, - MemoryBlockFlavor::FreeList { - chunk: block.chunk, - ptr: block.ptr, - memory: block.memory, - }, - )) - } - Err(AllocationError::OutOfDeviceMemory) => continue, - Err(err) => return Err(err), - } - } - - Strategy::Buddy => { - let allocator = match &mut self.buddy_allocators[index as usize] { - Some(allocator) => allocator, - slot => { - let minimal_buddy_size = self - .minimal_buddy_size - .min(heap.size() / 1024) - .next_power_of_two(); - - let initial_buddy_dedicated_size = self - .initial_buddy_dedicated_size - .min(heap.size() / 32) - .next_power_of_two(); - - slot.get_or_insert(BuddyAllocator::new( - minimal_buddy_size, - initial_buddy_dedicated_size, - index, - memory_type.props, - if host_visible_non_coherent(memory_type.props) { - self.non_coherent_atom_mask - } else { - 0 - }, - )) - } - }; - let result = allocator.alloc( - device, - request.size, - request.align_mask, - flags, - heap, - &mut self.allocations_remains, - ); - - match result { - Ok(block) => { - return Ok(MemoryBlock::new( - index, - memory_type.props, - block.offset, - block.size, - atom_mask, - MemoryBlockFlavor::Buddy { - chunk: block.chunk, - ptr: block.ptr, - index: block.index, - memory: block.memory, - }, - )) - } - Err(AllocationError::OutOfDeviceMemory) => continue, - Err(err) => return Err(err), - } - } - } - } - - Err(AllocationError::OutOfDeviceMemory) - } - - /// Deallocates memory block previously allocated from this `GpuAllocator` instance. - /// - /// # Safety - /// - /// * Memory block must have been allocated by this `GpuAllocator` instance - /// * `device` must be one with `DeviceProperties` that were provided to create this `GpuAllocator` instance - /// * Same `device` instance must be used for all interactions with one `GpuAllocator` instance - /// and memory blocks allocated from it - #[cfg_attr(feature = "tracing", tracing::instrument(skip(self, device)))] - pub unsafe fn dealloc(&mut self, device: &impl MemoryDevice, block: MemoryBlock) { - let memory_type = block.memory_type(); - let offset = block.offset(); - let size = block.size(); - let flavor = block.deallocate(); - match flavor { - MemoryBlockFlavor::Dedicated { memory } => { - let heap = self.memory_types[memory_type as usize].heap; - device.deallocate_memory(memory); - self.allocations_remains += 1; - self.memory_heaps[heap as usize].dealloc(size); - } - MemoryBlockFlavor::Buddy { - chunk, - ptr, - index, - memory, - } => { - let heap = self.memory_types[memory_type as usize].heap; - let heap = &mut self.memory_heaps[heap as usize]; - - let allocator = self.buddy_allocators[memory_type as usize] - .as_mut() - .expect("Allocator should exist"); - - allocator.dealloc( - device, - BuddyBlock { - memory, - ptr, - offset, - size, - chunk, - index, - }, - heap, - &mut self.allocations_remains, - ); - } - MemoryBlockFlavor::FreeList { chunk, ptr, memory } => { - let heap = self.memory_types[memory_type as usize].heap; - let heap = &mut self.memory_heaps[heap as usize]; - - let allocator = self.freelist_allocators[memory_type as usize] - .as_mut() - .expect("Allocator should exist"); - - allocator.dealloc( - device, - FreeListBlock { - memory, - ptr, - chunk, - offset, - size, - }, - heap, - &mut self.allocations_remains, - ); - } - } - } - - /// Deallocates leftover memory objects. - /// Should be used before dropping. - /// - /// # Safety - /// - /// * `device` must be one with `DeviceProperties` that were provided to create this `GpuAllocator` instance - /// * Same `device` instance must be used for all interactions with one `GpuAllocator` instance - /// and memory blocks allocated from it - #[cfg_attr(feature = "tracing", tracing::instrument(skip(self, device)))] - pub unsafe fn cleanup(&mut self, device: &impl MemoryDevice) { - for (index, allocator) in self - .freelist_allocators - .iter_mut() - .enumerate() - .filter_map(|(index, allocator)| Some((index, allocator.as_mut()?))) - { - let memory_type = &self.memory_types[index]; - let heap = memory_type.heap; - let heap = &mut self.memory_heaps[heap as usize]; - - allocator.cleanup(device, heap, &mut self.allocations_remains); - } - } -} - -fn host_visible_non_coherent(props: MemoryPropertyFlags) -> bool { - (props & (MemoryPropertyFlags::HOST_COHERENT | MemoryPropertyFlags::HOST_VISIBLE)) - == MemoryPropertyFlags::HOST_VISIBLE -} - -fn with_implicit_usage_flags(usage: UsageFlags) -> UsageFlags { - if usage.is_empty() { - UsageFlags::FAST_DEVICE_ACCESS - } else if usage.intersects(UsageFlags::DOWNLOAD | UsageFlags::UPLOAD) { - usage | UsageFlags::HOST_ACCESS - } else { - usage - } -} +use { + crate::{ + align_down, + block::{MemoryBlock, MemoryBlockFlavor}, + buddy::{BuddyAllocator, BuddyBlock}, + config::Config, + error::AllocationError, + freelist::{FreeListAllocator, FreeListBlock}, + heap::Heap, + usage::{MemoryForUsage, UsageFlags}, + MemoryBounds, Request, + }, + alloc::boxed::Box, + core::convert::TryFrom as _, + gpu_alloc_types::{ + AllocationFlags, DeviceProperties, MemoryDevice, MemoryPropertyFlags, MemoryType, + OutOfMemory, + }, +}; + +/// Memory allocator for Vulkan-like APIs. +#[derive(Debug)] +pub struct GpuAllocator { + dedicated_threshold: u64, + preferred_dedicated_threshold: u64, + transient_dedicated_threshold: u64, + max_memory_allocation_size: u64, + memory_for_usage: MemoryForUsage, + memory_types: Box<[MemoryType]>, + memory_heaps: Box<[Heap]>, + allocations_remains: u32, + non_coherent_atom_mask: u64, + starting_free_list_chunk: u64, + final_free_list_chunk: u64, + minimal_buddy_size: u64, + initial_buddy_dedicated_size: u64, + buffer_device_address: bool, + + buddy_allocators: Box<[Option>]>, + freelist_allocators: Box<[Option>]>, +} + +/// Hints for allocator to decide on allocation strategy. +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)] +#[non_exhaustive] +pub enum Dedicated { + /// Allocation directly from device.\ + /// Very slow. + /// Count of allocations is limited.\ + /// Use with caution.\ + /// Must be used if resource has to be bound to dedicated memory object. + Required, + + /// Hint for allocator that dedicated memory object is preferred.\ + /// Should be used if it is known that resource placed in dedicated memory object + /// would allow for better performance.\ + /// Implementation is allowed to return block to shared memory object. + Preferred, +} + +impl GpuAllocator +where + M: MemoryBounds + 'static, +{ + /// Creates new instance of `GpuAllocator`. + /// Provided `DeviceProperties` should match properties of `MemoryDevice` that will be used + /// with created `GpuAllocator` instance. + #[cfg_attr(feature = "tracing", tracing::instrument)] + pub fn new(config: Config, props: DeviceProperties<'_>) -> Self { + assert!( + props.non_coherent_atom_size.is_power_of_two(), + "`non_coherent_atom_size` must be power of two" + ); + + assert!( + isize::try_from(props.non_coherent_atom_size).is_ok(), + "`non_coherent_atom_size` must fit host address space" + ); + + GpuAllocator { + dedicated_threshold: config.dedicated_threshold, + preferred_dedicated_threshold: config + .preferred_dedicated_threshold + .min(config.dedicated_threshold), + + transient_dedicated_threshold: config + .transient_dedicated_threshold + .max(config.dedicated_threshold), + + max_memory_allocation_size: props.max_memory_allocation_size, + + memory_for_usage: MemoryForUsage::new(props.memory_types.as_ref()), + + memory_types: props.memory_types.as_ref().iter().copied().collect(), + memory_heaps: props + .memory_heaps + .as_ref() + .iter() + .map(|heap| Heap::new(heap.size)) + .collect(), + + buffer_device_address: props.buffer_device_address, + + allocations_remains: props.max_memory_allocation_count, + non_coherent_atom_mask: props.non_coherent_atom_size - 1, + + starting_free_list_chunk: config.starting_free_list_chunk, + final_free_list_chunk: config.final_free_list_chunk, + minimal_buddy_size: config.minimal_buddy_size, + initial_buddy_dedicated_size: config.initial_buddy_dedicated_size, + + buddy_allocators: props.memory_types.as_ref().iter().map(|_| None).collect(), + freelist_allocators: props.memory_types.as_ref().iter().map(|_| None).collect(), + } + } + + /// Allocates memory block from specified `device` according to the `request`. + /// + /// # Safety + /// + /// * `device` must be one with `DeviceProperties` that were provided to create this `GpuAllocator` instance. + /// * Same `device` instance must be used for all interactions with one `GpuAllocator` instance + /// and memory blocks allocated from it. + #[cfg_attr(feature = "tracing", tracing::instrument(skip(self, device)))] + pub unsafe fn alloc( + &mut self, + device: &impl MemoryDevice, + request: Request, + ) -> Result, AllocationError> { + self.alloc_internal(device, request, None) + } + + /// Allocates memory block from specified `device` according to the `request`. + /// This function allows user to force specific allocation strategy. + /// Improper use can lead to suboptimal performance or too large overhead. + /// Prefer `GpuAllocator::alloc` if doubt. + /// + /// # Safety + /// + /// * `device` must be one with `DeviceProperties` that were provided to create this `GpuAllocator` instance. + /// * Same `device` instance must be used for all interactions with one `GpuAllocator` instance + /// and memory blocks allocated from it. + #[cfg_attr(feature = "tracing", tracing::instrument(skip(self, device)))] + pub unsafe fn alloc_with_dedicated( + &mut self, + device: &impl MemoryDevice, + request: Request, + dedicated: Dedicated, + ) -> Result, AllocationError> { + self.alloc_internal(device, request, Some(dedicated)) + } + + unsafe fn alloc_internal( + &mut self, + device: &impl MemoryDevice, + mut request: Request, + dedicated: Option, + ) -> Result, AllocationError> { + enum Strategy { + Buddy, + Dedicated, + FreeList, + } + + request.usage = with_implicit_usage_flags(request.usage); + + if request.usage.contains(UsageFlags::DEVICE_ADDRESS) { + assert!(self.buffer_device_address, "`DEVICE_ADDRESS` cannot be requested when `DeviceProperties::buffer_device_address` is false"); + } + + if request.size > self.max_memory_allocation_size { + return Err(AllocationError::OutOfDeviceMemory); + } + + if let Some(Dedicated::Required) = dedicated { + if self.allocations_remains == 0 { + return Err(AllocationError::TooManyObjects); + } + } + + if 0 == self.memory_for_usage.mask(request.usage) & request.memory_types { + #[cfg(feature = "tracing")] + tracing::error!( + "Cannot serve request {:?}, no memory among bitset `{}` support usage {:?}", + request, + request.memory_types, + request.usage + ); + + return Err(AllocationError::NoCompatibleMemoryTypes); + } + + let transient = request.usage.contains(UsageFlags::TRANSIENT); + + for &index in self.memory_for_usage.types(request.usage) { + if 0 == request.memory_types & (1 << index) { + // Skip memory type incompatible with the request. + continue; + } + + let memory_type = &self.memory_types[index as usize]; + let heap = memory_type.heap; + let heap = &mut self.memory_heaps[heap as usize]; + + if request.size > heap.size() { + // Impossible to use memory type from this heap. + continue; + } + + let atom_mask = if host_visible_non_coherent(memory_type.props) { + self.non_coherent_atom_mask + } else { + 0 + }; + + let flags = if self.buffer_device_address { + AllocationFlags::DEVICE_ADDRESS + } else { + AllocationFlags::empty() + }; + + let strategy = match (dedicated, transient) { + (Some(Dedicated::Required), _) => Strategy::Dedicated, + (Some(Dedicated::Preferred), _) + if request.size >= self.preferred_dedicated_threshold => + { + Strategy::Dedicated + } + (_, true) => { + let threshold = self.transient_dedicated_threshold.min(heap.size() / 32); + + if request.size < threshold { + Strategy::FreeList + } else { + Strategy::Dedicated + } + } + (_, false) => { + let threshold = self.dedicated_threshold.min(heap.size() / 32); + + if request.size < threshold { + Strategy::Buddy + } else { + Strategy::Dedicated + } + } + }; + + match strategy { + Strategy::Dedicated => { + #[cfg(feature = "tracing")] + tracing::debug!( + "Allocating memory object `{}@{:?}`", + request.size, + memory_type + ); + + match device.allocate_memory(request.size, index, flags) { + Ok(memory) => { + self.allocations_remains -= 1; + heap.alloc(request.size); + + return Ok(MemoryBlock::new( + index, + memory_type.props, + 0, + request.size, + atom_mask, + MemoryBlockFlavor::Dedicated { memory }, + )); + } + Err(OutOfMemory::OutOfDeviceMemory) => continue, + Err(OutOfMemory::OutOfHostMemory) => { + return Err(AllocationError::OutOfHostMemory) + } + } + } + Strategy::FreeList => { + let allocator = match &mut self.freelist_allocators[index as usize] { + Some(allocator) => allocator, + slot => { + let starting_free_list_chunk = match align_down( + self.starting_free_list_chunk.min(heap.size() / 32), + atom_mask, + ) { + 0 => atom_mask, + other => other, + }; + + let final_free_list_chunk = match align_down( + self.final_free_list_chunk + .max(self.starting_free_list_chunk) + .max(self.transient_dedicated_threshold) + .min(heap.size() / 32), + atom_mask, + ) { + 0 => atom_mask, + other => other, + }; + + slot.get_or_insert(FreeListAllocator::new( + starting_free_list_chunk, + final_free_list_chunk, + index, + memory_type.props, + if host_visible_non_coherent(memory_type.props) { + self.non_coherent_atom_mask + } else { + 0 + }, + )) + } + }; + let result = allocator.alloc( + device, + request.size, + request.align_mask, + flags, + heap, + &mut self.allocations_remains, + ); + + match result { + Ok(block) => { + return Ok(MemoryBlock::new( + index, + memory_type.props, + block.offset, + block.size, + atom_mask, + MemoryBlockFlavor::FreeList { + chunk: block.chunk, + ptr: block.ptr, + memory: block.memory, + }, + )) + } + Err(AllocationError::OutOfDeviceMemory) => continue, + Err(err) => return Err(err), + } + } + + Strategy::Buddy => { + let allocator = match &mut self.buddy_allocators[index as usize] { + Some(allocator) => allocator, + slot => { + let minimal_buddy_size = self + .minimal_buddy_size + .min(heap.size() / 1024) + .next_power_of_two(); + + let initial_buddy_dedicated_size = self + .initial_buddy_dedicated_size + .min(heap.size() / 32) + .next_power_of_two(); + + slot.get_or_insert(BuddyAllocator::new( + minimal_buddy_size, + initial_buddy_dedicated_size, + index, + memory_type.props, + if host_visible_non_coherent(memory_type.props) { + self.non_coherent_atom_mask + } else { + 0 + }, + )) + } + }; + let result = allocator.alloc( + device, + request.size, + request.align_mask, + flags, + heap, + &mut self.allocations_remains, + ); + + match result { + Ok(block) => { + return Ok(MemoryBlock::new( + index, + memory_type.props, + block.offset, + block.size, + atom_mask, + MemoryBlockFlavor::Buddy { + chunk: block.chunk, + ptr: block.ptr, + index: block.index, + memory: block.memory, + }, + )) + } + Err(AllocationError::OutOfDeviceMemory) => continue, + Err(err) => return Err(err), + } + } + } + } + + Err(AllocationError::OutOfDeviceMemory) + } + + /// Creates a memory block from an existing memory allocation, transferring ownership to the allocator. + /// + /// This function allows the [`GpuAllocator`] to manage memory allocated outside of the typical + /// [`GpuAllocator::alloc`] family of functions. + /// + /// # Usage + /// + /// If you need to import external memory, such as a Win32 `HANDLE` or a Linux `dmabuf`, import the device + /// memory using the graphics api and platform dependent functions. Once that is done, call this function + /// to make the [`GpuAllocator`] take ownership of the imported memory. + /// + /// When calling this function, you **must** ensure there are [enough remaining allocations](GpuAllocator::remaining_allocations). + /// + /// # Safety + /// + /// - The `memory` must be allocated with the same device that was provided to create this [`GpuAllocator`] + /// instance. + /// - The `memory` must be valid. + /// - The `props`, `offset` and `size` must match the properties, offset and size of the memory allocation. + /// - The memory must have been allocated with the specified `memory_type`. + /// - There must be enough remaining allocations. + /// - The memory allocation must not come from an existing memory block created by this allocator. + /// - The underlying memory object must be deallocated using the returned [`MemoryBlock`] with + /// [`GpuAllocator::dealloc`]. + pub unsafe fn import_memory( + &mut self, + memory: M, + memory_type: u32, + props: MemoryPropertyFlags, + offset: u64, + size: u64, + ) -> MemoryBlock { + // Get the heap which the imported memory is from. + let heap = self + .memory_types + .get(memory_type as usize) + .expect("Invalid memory type specified when importing memory") + .heap; + let heap = &mut self.memory_heaps[heap as usize]; + + #[cfg(feature = "tracing")] + tracing::debug!( + "Importing memory object {:?} `{}@{:?}`", + memory, + size, + memory_type + ); + + assert_ne!( + self.allocations_remains, 0, + "Out of allocations when importing a memory block. Ensure you check GpuAllocator::remaining_allocations before import." + ); + self.allocations_remains -= 1; + + let atom_mask = if host_visible_non_coherent(props) { + self.non_coherent_atom_mask + } else { + 0 + }; + + heap.alloc(size); + + MemoryBlock::new( + memory_type, + props, + offset, + size, + atom_mask, + MemoryBlockFlavor::Dedicated { memory }, + ) + } + + /// Deallocates memory block previously allocated from this `GpuAllocator` instance. + /// + /// # Safety + /// + /// * Memory block must have been allocated by this `GpuAllocator` instance + /// * `device` must be one with `DeviceProperties` that were provided to create this `GpuAllocator` instance + /// * Same `device` instance must be used for all interactions with one `GpuAllocator` instance + /// and memory blocks allocated from it + #[cfg_attr(feature = "tracing", tracing::instrument(skip(self, device)))] + pub unsafe fn dealloc(&mut self, device: &impl MemoryDevice, block: MemoryBlock) { + let memory_type = block.memory_type(); + let offset = block.offset(); + let size = block.size(); + let flavor = block.deallocate(); + match flavor { + MemoryBlockFlavor::Dedicated { memory } => { + let heap = self.memory_types[memory_type as usize].heap; + device.deallocate_memory(memory); + self.allocations_remains += 1; + self.memory_heaps[heap as usize].dealloc(size); + } + MemoryBlockFlavor::Buddy { + chunk, + ptr, + index, + memory, + } => { + let heap = self.memory_types[memory_type as usize].heap; + let heap = &mut self.memory_heaps[heap as usize]; + + let allocator = self.buddy_allocators[memory_type as usize] + .as_mut() + .expect("Allocator should exist"); + + allocator.dealloc( + device, + BuddyBlock { + memory, + ptr, + offset, + size, + chunk, + index, + }, + heap, + &mut self.allocations_remains, + ); + } + MemoryBlockFlavor::FreeList { chunk, ptr, memory } => { + let heap = self.memory_types[memory_type as usize].heap; + let heap = &mut self.memory_heaps[heap as usize]; + + let allocator = self.freelist_allocators[memory_type as usize] + .as_mut() + .expect("Allocator should exist"); + + allocator.dealloc( + device, + FreeListBlock { + memory, + ptr, + chunk, + offset, + size, + }, + heap, + &mut self.allocations_remains, + ); + } + } + } + + /// Returns the maximum allocation size supported. + pub fn max_allocation_size(&self) -> u64 { + self.max_memory_allocation_size + } + + /// Returns the number of remaining available allocations. + /// + /// This may be useful if you need know if the allocator can allocate a number of allocations ahead of + /// time. This function is also useful for ensuring you do not allocate too much memory outside allocator + /// (such as external memory). + pub fn remaining_allocations(&self) -> u32 { + self.allocations_remains + } + + /// Sets the number of remaining available allocations. + /// + /// # Safety + /// + /// The caller is responsible for ensuring the number of remaining allocations does not exceed how many + /// remaining allocations there actually are on the memory device. + pub unsafe fn set_remaining_allocations(&mut self, remaining: u32) { + self.allocations_remains = remaining; + } + + /// Deallocates leftover memory objects. + /// Should be used before dropping. + /// + /// # Safety + /// + /// * `device` must be one with `DeviceProperties` that were provided to create this `GpuAllocator` instance + /// * Same `device` instance must be used for all interactions with one `GpuAllocator` instance + /// and memory blocks allocated from it + #[cfg_attr(feature = "tracing", tracing::instrument(skip(self, device)))] + pub unsafe fn cleanup(&mut self, device: &impl MemoryDevice) { + for (index, allocator) in self + .freelist_allocators + .iter_mut() + .enumerate() + .filter_map(|(index, allocator)| Some((index, allocator.as_mut()?))) + { + let memory_type = &self.memory_types[index]; + let heap = memory_type.heap; + let heap = &mut self.memory_heaps[heap as usize]; + + allocator.cleanup(device, heap, &mut self.allocations_remains); + } + } +} + +fn host_visible_non_coherent(props: MemoryPropertyFlags) -> bool { + (props & (MemoryPropertyFlags::HOST_COHERENT | MemoryPropertyFlags::HOST_VISIBLE)) + == MemoryPropertyFlags::HOST_VISIBLE +} + +fn with_implicit_usage_flags(usage: UsageFlags) -> UsageFlags { + if usage.is_empty() { + UsageFlags::FAST_DEVICE_ACCESS + } else if usage.intersects(UsageFlags::DOWNLOAD | UsageFlags::UPLOAD) { + usage | UsageFlags::HOST_ACCESS + } else { + usage + } +} diff --git a/third_party/rust/gpu-alloc/src/block.rs b/third_party/rust/gpu-alloc/src/block.rs index 7cac633bf1f9..03e31d9d5205 100644 --- a/third_party/rust/gpu-alloc/src/block.rs +++ b/third_party/rust/gpu-alloc/src/block.rs @@ -1,327 +1,327 @@ -use { - crate::{align_down, align_up, error::MapError}, - alloc::sync::Arc, - core::{ - convert::TryFrom as _, - ptr::{copy_nonoverlapping, NonNull}, - // sync::atomic::{AtomicU8, Ordering::*}, - }, - gpu_alloc_types::{MappedMemoryRange, MemoryDevice, MemoryPropertyFlags}, -}; - -#[derive(Debug)] -struct Relevant; - -impl Drop for Relevant { - fn drop(&mut self) { - report_error_on_drop!("Memory block wasn't deallocated"); - } -} - -/// Memory block allocated by `GpuAllocator`. -#[derive(Debug)] -pub struct MemoryBlock { - memory_type: u32, - props: MemoryPropertyFlags, - offset: u64, - size: u64, - atom_mask: u64, - mapped: bool, - flavor: MemoryBlockFlavor, - relevant: Relevant, -} - -impl MemoryBlock { - pub(crate) fn new( - memory_type: u32, - props: MemoryPropertyFlags, - offset: u64, - size: u64, - atom_mask: u64, - flavor: MemoryBlockFlavor, - ) -> Self { - isize::try_from(atom_mask).expect("`atom_mask` is too large"); - MemoryBlock { - memory_type, - props, - offset, - size, - atom_mask, - flavor, - mapped: false, - relevant: Relevant, - } - } - - pub(crate) fn deallocate(self) -> MemoryBlockFlavor { - core::mem::forget(self.relevant); - self.flavor - } -} - -unsafe impl Sync for MemoryBlock where M: Sync {} -unsafe impl Send for MemoryBlock where M: Send {} - -#[derive(Debug)] -pub(crate) enum MemoryBlockFlavor { - Dedicated { - memory: M, - }, - Buddy { - chunk: usize, - index: usize, - ptr: Option>, - memory: Arc, - }, - FreeList { - chunk: u64, - ptr: Option>, - memory: Arc, - }, -} - -impl MemoryBlock { - /// Returns reference to parent memory object. - #[inline(always)] - pub fn memory(&self) -> &M { - match &self.flavor { - MemoryBlockFlavor::Dedicated { memory } => memory, - MemoryBlockFlavor::Buddy { memory, .. } => &**memory, - MemoryBlockFlavor::FreeList { memory, .. } => &**memory, - } - } - - /// Returns offset in bytes from start of memory object to start of this block. - #[inline(always)] - pub fn offset(&self) -> u64 { - self.offset - } - - /// Returns size of this memory block. - #[inline(always)] - pub fn size(&self) -> u64 { - self.size - } - - /// Returns memory property flags for parent memory object. - #[inline(always)] - pub fn props(&self) -> MemoryPropertyFlags { - self.props - } - - /// Returns index of type of parent memory object. - #[inline(always)] - pub fn memory_type(&self) -> u32 { - self.memory_type - } - - /// Returns pointer to mapped memory range of this block. - /// This blocks becomes mapped. - /// - /// The user of returned pointer must guarantee that any previously submitted command that writes to this range has completed - /// before the host reads from or writes to that range, - /// and that any previously submitted command that reads from that range has completed - /// before the host writes to that region. - /// If the device memory was allocated without the `HOST_COHERENT` property flag set, - /// these guarantees must be made for an extended range: - /// the user must round down the start of the range to the nearest multiple of `non_coherent_atom_size`, - /// and round the end of the range up to the nearest multiple of `non_coherent_atom_size`. - /// - /// # Panics - /// - /// This function panics if block is currently mapped. - /// - /// # Safety - /// - /// `block` must have been allocated from specified `device`. - #[inline(always)] - pub unsafe fn map( - &mut self, - device: &impl MemoryDevice, - offset: u64, - size: usize, - ) -> Result, MapError> { - let size_u64 = u64::try_from(size).expect("`size` doesn't fit device address space"); - assert!(offset < self.size, "`offset` is out of memory block bounds"); - assert!( - size_u64 <= self.size - offset, - "`offset + size` is out of memory block bounds" - ); - - let ptr = match &mut self.flavor { - MemoryBlockFlavor::Dedicated { memory } => { - let end = align_up(offset + size_u64, self.atom_mask) - .expect("mapping end doesn't fit device address space"); - let aligned_offset = align_down(offset, self.atom_mask); - - if !acquire_mapping(&mut self.mapped) { - return Err(MapError::AlreadyMapped); - } - let result = - device.map_memory(memory, self.offset + aligned_offset, end - aligned_offset); - - match result { - // the overflow is checked in `Self::new()` - Ok(ptr) => { - let ptr_offset = (offset - aligned_offset) as isize; - ptr.as_ptr().offset(ptr_offset) - } - Err(err) => { - release_mapping(&mut self.mapped); - return Err(err.into()); - } - } - } - MemoryBlockFlavor::FreeList { ptr: Some(ptr), .. } - | MemoryBlockFlavor::Buddy { ptr: Some(ptr), .. } => { - if !acquire_mapping(&mut self.mapped) { - return Err(MapError::AlreadyMapped); - } - let offset_isize = isize::try_from(offset) - .expect("Buddy and linear block should fit host address space"); - ptr.as_ptr().offset(offset_isize) - } - _ => return Err(MapError::NonHostVisible), - }; - - Ok(NonNull::new_unchecked(ptr)) - } - - /// Unmaps memory range of this block that was previously mapped with `Block::map`. - /// This block becomes unmapped. - /// - /// # Panics - /// - /// This function panics if this block is not currently mapped. - /// - /// # Safety - /// - /// `block` must have been allocated from specified `device`. - #[inline(always)] - pub unsafe fn unmap(&mut self, device: &impl MemoryDevice) -> bool { - if !release_mapping(&mut self.mapped) { - return false; - } - match &mut self.flavor { - MemoryBlockFlavor::Dedicated { memory } => { - device.unmap_memory(memory); - } - MemoryBlockFlavor::Buddy { .. } => {} - MemoryBlockFlavor::FreeList { .. } => {} - } - true - } - - /// Transiently maps block memory range and copies specified data - /// to the mapped memory range. - /// - /// # Panics - /// - /// This function panics if block is currently mapped. - /// - /// # Safety - /// - /// `block` must have been allocated from specified `device`. - /// The caller must guarantee that any previously submitted command that reads or writes to this range has completed. - #[inline(always)] - pub unsafe fn write_bytes( - &mut self, - device: &impl MemoryDevice, - offset: u64, - data: &[u8], - ) -> Result<(), MapError> { - let size = data.len(); - let ptr = self.map(device, offset, size)?; - - copy_nonoverlapping(data.as_ptr(), ptr.as_ptr(), size); - let result = if !self.coherent() { - let aligned_offset = align_down(offset, self.atom_mask); - let end = align_up(offset + data.len() as u64, self.atom_mask).unwrap(); - - device.flush_memory_ranges(&[MappedMemoryRange { - memory: self.memory(), - offset: self.offset + aligned_offset, - size: end - aligned_offset, - }]) - } else { - Ok(()) - }; - - self.unmap(device); - result.map_err(Into::into) - } - - /// Transiently maps block memory range and copies specified data - /// from the mapped memory range. - /// - /// # Panics - /// - /// This function panics if block is currently mapped. - /// - /// # Safety - /// - /// `block` must have been allocated from specified `device`. - /// The caller must guarantee that any previously submitted command that reads to this range has completed. - #[inline(always)] - pub unsafe fn read_bytes( - &mut self, - device: &impl MemoryDevice, - offset: u64, - data: &mut [u8], - ) -> Result<(), MapError> { - #[cfg(feature = "tracing")] - { - if !self.cached() { - tracing::warn!("Reading from non-cached memory may be slow. Consider allocating HOST_CACHED memory block for host reads.") - } - } - - let size = data.len(); - let ptr = self.map(device, offset, size)?; - let result = if !self.coherent() { - let aligned_offset = align_down(offset, self.atom_mask); - let end = align_up(offset + data.len() as u64, self.atom_mask).unwrap(); - - device.invalidate_memory_ranges(&[MappedMemoryRange { - memory: self.memory(), - offset: self.offset + aligned_offset, - size: end - aligned_offset, - }]) - } else { - Ok(()) - }; - if result.is_ok() { - copy_nonoverlapping(ptr.as_ptr(), data.as_mut_ptr(), size); - } - - self.unmap(device); - result.map_err(Into::into) - } - - fn coherent(&self) -> bool { - self.props.contains(MemoryPropertyFlags::HOST_COHERENT) - } - - #[cfg(feature = "tracing")] - fn cached(&self) -> bool { - self.props.contains(MemoryPropertyFlags::HOST_CACHED) - } -} - -fn acquire_mapping(mapped: &mut bool) -> bool { - if *mapped { - false - } else { - *mapped = true; - true - } -} - -fn release_mapping(mapped: &mut bool) -> bool { - if *mapped { - *mapped = false; - true - } else { - false - } -} +use { + crate::{align_down, align_up, error::MapError}, + alloc::sync::Arc, + core::{ + convert::TryFrom as _, + ptr::{copy_nonoverlapping, NonNull}, + // sync::atomic::{AtomicU8, Ordering::*}, + }, + gpu_alloc_types::{MappedMemoryRange, MemoryDevice, MemoryPropertyFlags}, +}; + +#[derive(Debug)] +struct Relevant; + +impl Drop for Relevant { + fn drop(&mut self) { + report_error_on_drop!("Memory block wasn't deallocated"); + } +} + +/// Memory block allocated by `GpuAllocator`. +#[derive(Debug)] +pub struct MemoryBlock { + memory_type: u32, + props: MemoryPropertyFlags, + offset: u64, + size: u64, + atom_mask: u64, + mapped: bool, + flavor: MemoryBlockFlavor, + relevant: Relevant, +} + +impl MemoryBlock { + pub(crate) fn new( + memory_type: u32, + props: MemoryPropertyFlags, + offset: u64, + size: u64, + atom_mask: u64, + flavor: MemoryBlockFlavor, + ) -> Self { + isize::try_from(atom_mask).expect("`atom_mask` is too large"); + MemoryBlock { + memory_type, + props, + offset, + size, + atom_mask, + flavor, + mapped: false, + relevant: Relevant, + } + } + + pub(crate) fn deallocate(self) -> MemoryBlockFlavor { + core::mem::forget(self.relevant); + self.flavor + } +} + +unsafe impl Sync for MemoryBlock where M: Sync {} +unsafe impl Send for MemoryBlock where M: Send {} + +#[derive(Debug)] +pub(crate) enum MemoryBlockFlavor { + Dedicated { + memory: M, + }, + Buddy { + chunk: usize, + index: usize, + ptr: Option>, + memory: Arc, + }, + FreeList { + chunk: u64, + ptr: Option>, + memory: Arc, + }, +} + +impl MemoryBlock { + /// Returns reference to parent memory object. + #[inline(always)] + pub fn memory(&self) -> &M { + match &self.flavor { + MemoryBlockFlavor::Dedicated { memory } => memory, + MemoryBlockFlavor::Buddy { memory, .. } => memory, + MemoryBlockFlavor::FreeList { memory, .. } => memory, + } + } + + /// Returns offset in bytes from start of memory object to start of this block. + #[inline(always)] + pub fn offset(&self) -> u64 { + self.offset + } + + /// Returns size of this memory block. + #[inline(always)] + pub fn size(&self) -> u64 { + self.size + } + + /// Returns memory property flags for parent memory object. + #[inline(always)] + pub fn props(&self) -> MemoryPropertyFlags { + self.props + } + + /// Returns index of type of parent memory object. + #[inline(always)] + pub fn memory_type(&self) -> u32 { + self.memory_type + } + + /// Returns pointer to mapped memory range of this block. + /// This blocks becomes mapped. + /// + /// The user of returned pointer must guarantee that any previously submitted command that writes to this range has completed + /// before the host reads from or writes to that range, + /// and that any previously submitted command that reads from that range has completed + /// before the host writes to that region. + /// If the device memory was allocated without the `HOST_COHERENT` property flag set, + /// these guarantees must be made for an extended range: + /// the user must round down the start of the range to the nearest multiple of `non_coherent_atom_size`, + /// and round the end of the range up to the nearest multiple of `non_coherent_atom_size`. + /// + /// # Panics + /// + /// This function panics if block is currently mapped. + /// + /// # Safety + /// + /// `block` must have been allocated from specified `device`. + #[inline(always)] + pub unsafe fn map( + &mut self, + device: &impl MemoryDevice, + offset: u64, + size: usize, + ) -> Result, MapError> { + let size_u64 = u64::try_from(size).expect("`size` doesn't fit device address space"); + assert!(offset < self.size, "`offset` is out of memory block bounds"); + assert!( + size_u64 <= self.size - offset, + "`offset + size` is out of memory block bounds" + ); + + let ptr = match &mut self.flavor { + MemoryBlockFlavor::Dedicated { memory } => { + let end = align_up(offset + size_u64, self.atom_mask) + .expect("mapping end doesn't fit device address space"); + let aligned_offset = align_down(offset, self.atom_mask); + + if !acquire_mapping(&mut self.mapped) { + return Err(MapError::AlreadyMapped); + } + let result = + device.map_memory(memory, self.offset + aligned_offset, end - aligned_offset); + + match result { + // the overflow is checked in `Self::new()` + Ok(ptr) => { + let ptr_offset = (offset - aligned_offset) as isize; + ptr.as_ptr().offset(ptr_offset) + } + Err(err) => { + release_mapping(&mut self.mapped); + return Err(err.into()); + } + } + } + MemoryBlockFlavor::FreeList { ptr: Some(ptr), .. } + | MemoryBlockFlavor::Buddy { ptr: Some(ptr), .. } => { + if !acquire_mapping(&mut self.mapped) { + return Err(MapError::AlreadyMapped); + } + let offset_isize = isize::try_from(offset) + .expect("Buddy and linear block should fit host address space"); + ptr.as_ptr().offset(offset_isize) + } + _ => return Err(MapError::NonHostVisible), + }; + + Ok(NonNull::new_unchecked(ptr)) + } + + /// Unmaps memory range of this block that was previously mapped with `Block::map`. + /// This block becomes unmapped. + /// + /// # Panics + /// + /// This function panics if this block is not currently mapped. + /// + /// # Safety + /// + /// `block` must have been allocated from specified `device`. + #[inline(always)] + pub unsafe fn unmap(&mut self, device: &impl MemoryDevice) -> bool { + if !release_mapping(&mut self.mapped) { + return false; + } + match &mut self.flavor { + MemoryBlockFlavor::Dedicated { memory } => { + device.unmap_memory(memory); + } + MemoryBlockFlavor::Buddy { .. } => {} + MemoryBlockFlavor::FreeList { .. } => {} + } + true + } + + /// Transiently maps block memory range and copies specified data + /// to the mapped memory range. + /// + /// # Panics + /// + /// This function panics if block is currently mapped. + /// + /// # Safety + /// + /// `block` must have been allocated from specified `device`. + /// The caller must guarantee that any previously submitted command that reads or writes to this range has completed. + #[inline(always)] + pub unsafe fn write_bytes( + &mut self, + device: &impl MemoryDevice, + offset: u64, + data: &[u8], + ) -> Result<(), MapError> { + let size = data.len(); + let ptr = self.map(device, offset, size)?; + + copy_nonoverlapping(data.as_ptr(), ptr.as_ptr(), size); + let result = if !self.coherent() { + let aligned_offset = align_down(offset, self.atom_mask); + let end = align_up(offset + data.len() as u64, self.atom_mask).unwrap(); + + device.flush_memory_ranges(&[MappedMemoryRange { + memory: self.memory(), + offset: self.offset + aligned_offset, + size: end - aligned_offset, + }]) + } else { + Ok(()) + }; + + self.unmap(device); + result.map_err(Into::into) + } + + /// Transiently maps block memory range and copies specified data + /// from the mapped memory range. + /// + /// # Panics + /// + /// This function panics if block is currently mapped. + /// + /// # Safety + /// + /// `block` must have been allocated from specified `device`. + /// The caller must guarantee that any previously submitted command that reads to this range has completed. + #[inline(always)] + pub unsafe fn read_bytes( + &mut self, + device: &impl MemoryDevice, + offset: u64, + data: &mut [u8], + ) -> Result<(), MapError> { + #[cfg(feature = "tracing")] + { + if !self.cached() { + tracing::warn!("Reading from non-cached memory may be slow. Consider allocating HOST_CACHED memory block for host reads.") + } + } + + let size = data.len(); + let ptr = self.map(device, offset, size)?; + let result = if !self.coherent() { + let aligned_offset = align_down(offset, self.atom_mask); + let end = align_up(offset + data.len() as u64, self.atom_mask).unwrap(); + + device.invalidate_memory_ranges(&[MappedMemoryRange { + memory: self.memory(), + offset: self.offset + aligned_offset, + size: end - aligned_offset, + }]) + } else { + Ok(()) + }; + if result.is_ok() { + copy_nonoverlapping(ptr.as_ptr(), data.as_mut_ptr(), size); + } + + self.unmap(device); + result.map_err(Into::into) + } + + fn coherent(&self) -> bool { + self.props.contains(MemoryPropertyFlags::HOST_COHERENT) + } + + #[cfg(feature = "tracing")] + fn cached(&self) -> bool { + self.props.contains(MemoryPropertyFlags::HOST_CACHED) + } +} + +fn acquire_mapping(mapped: &mut bool) -> bool { + if *mapped { + false + } else { + *mapped = true; + true + } +} + +fn release_mapping(mapped: &mut bool) -> bool { + if *mapped { + *mapped = false; + true + } else { + false + } +} diff --git a/third_party/rust/gpu-alloc/src/buddy.rs b/third_party/rust/gpu-alloc/src/buddy.rs index 5691f4d96d75..dea303b948a8 100644 --- a/third_party/rust/gpu-alloc/src/buddy.rs +++ b/third_party/rust/gpu-alloc/src/buddy.rs @@ -1,458 +1,460 @@ -use { - crate::{ - align_up, error::AllocationError, heap::Heap, slab::Slab, unreachable_unchecked, - util::try_arc_unwrap, MemoryBounds, - }, - alloc::{sync::Arc, vec::Vec}, - core::{convert::TryFrom as _, mem::replace, ptr::NonNull}, - gpu_alloc_types::{AllocationFlags, DeviceMapError, MemoryDevice, MemoryPropertyFlags}, -}; - -#[derive(Debug)] -pub(crate) struct BuddyBlock { - pub memory: Arc, - pub ptr: Option>, - pub offset: u64, - pub size: u64, - pub chunk: usize, - pub index: usize, -} - -unsafe impl Sync for BuddyBlock where M: Sync {} -unsafe impl Send for BuddyBlock where M: Send {} - -#[derive(Clone, Copy, Debug)] -enum PairState { - Exhausted, - Ready { - ready: Side, - next: usize, - prev: usize, - }, -} - -impl PairState { - unsafe fn replace_next(&mut self, value: usize) -> usize { - match self { - PairState::Exhausted => unreachable_unchecked(), - PairState::Ready { next, .. } => replace(next, value), - } - } - - unsafe fn replace_prev(&mut self, value: usize) -> usize { - match self { - PairState::Exhausted => unreachable_unchecked(), - PairState::Ready { prev, .. } => replace(prev, value), - } - } -} - -#[derive(Clone, Copy, Debug, PartialEq, Eq)] -enum Side { - Left, - Right, -} -use Side::*; - -#[derive(Debug)] -struct PairEntry { - state: PairState, - chunk: usize, - offset: u64, - parent: Option, -} - -struct SizeBlockEntry { - chunk: usize, - offset: u64, - index: usize, -} - -#[derive(Debug)] -struct Size { - next_ready: usize, - pairs: Slab, -} -#[derive(Debug)] -enum Release { - None, - Parent(usize), - Chunk(usize), -} - -impl Size { - fn new() -> Self { - Size { - pairs: Slab::new(), - next_ready: 0, - } - } - - unsafe fn add_pair_and_acquire_left( - &mut self, - chunk: usize, - offset: u64, - parent: Option, - ) -> SizeBlockEntry { - if self.next_ready < self.pairs.len() { - unreachable_unchecked() - } - - let index = self.pairs.insert(PairEntry { - state: PairState::Exhausted, - chunk, - offset, - parent, - }); - - let entry = self.pairs.get_unchecked_mut(index); - entry.state = PairState::Ready { - next: index, - prev: index, - ready: Right, // Left is allocated. - }; - self.next_ready = index; - - SizeBlockEntry { - chunk, - offset, - index: index << 1, - } - } - - fn acquire(&mut self, size: u64) -> Option { - if self.next_ready >= self.pairs.len() { - return None; - } - - let ready = self.next_ready; - - let entry = unsafe { self.pairs.get_unchecked_mut(ready) }; - let chunk = entry.chunk; - let offset = entry.offset; - - let bit = match entry.state { - PairState::Exhausted => unsafe { unreachable_unchecked() }, - PairState::Ready { ready, next, prev } => { - entry.state = PairState::Exhausted; - - if prev == self.next_ready { - // The only ready entry. - debug_assert_eq!(next, self.next_ready); - self.next_ready = self.pairs.len(); - } else { - let prev_entry = unsafe { self.pairs.get_unchecked_mut(prev) }; - let prev_next = unsafe { prev_entry.state.replace_next(next) }; - debug_assert_eq!(prev_next, self.next_ready); - - let next_entry = unsafe { self.pairs.get_unchecked_mut(next) }; - let next_prev = unsafe { next_entry.state.replace_prev(prev) }; - debug_assert_eq!(next_prev, self.next_ready); - - self.next_ready = next; - } - - match ready { - Left => 0, - Right => 1, - } - } - }; - - Some(SizeBlockEntry { - chunk, - offset: offset + bit as u64 * size, - index: (ready << 1) | bit as usize, - }) - } - - fn release(&mut self, index: usize) -> Release { - let side = match index & 1 { - 0 => Side::Left, - 1 => Side::Right, - _ => unsafe { unreachable_unchecked() }, - }; - let entry_index = index >> 1; - - let len = self.pairs.len(); - - let entry = self.pairs.get_mut(entry_index); - - let chunk = entry.chunk; - let offset = entry.offset; - let parent = entry.parent; - - match entry.state { - PairState::Exhausted => { - if self.next_ready == len { - entry.state = PairState::Ready { - ready: side, - next: entry_index, - prev: entry_index, - }; - self.next_ready = entry_index; - } else { - debug_assert!(self.next_ready < len); - - let next = self.next_ready; - let next_entry = unsafe { self.pairs.get_unchecked_mut(next) }; - let prev = unsafe { next_entry.state.replace_prev(entry_index) }; - - let prev_entry = unsafe { self.pairs.get_unchecked_mut(prev) }; - let prev_next = unsafe { prev_entry.state.replace_next(entry_index) }; - debug_assert_eq!(prev_next, next); - - let entry = unsafe { self.pairs.get_unchecked_mut(entry_index) }; - entry.state = PairState::Ready { - ready: side, - next, - prev, - }; - } - Release::None - } - - PairState::Ready { ready, .. } if ready == side => { - panic!("Attempt to dealloate already free block") - } - - PairState::Ready { next, prev, .. } => { - unsafe { - self.pairs.remove_unchecked(entry_index); - } - - if prev == entry_index { - debug_assert_eq!(next, entry_index); - self.next_ready = self.pairs.len(); - } else { - let prev_entry = unsafe { self.pairs.get_unchecked_mut(prev) }; - let prev_next = unsafe { prev_entry.state.replace_next(next) }; - debug_assert_eq!(prev_next, entry_index); - - let next_entry = unsafe { self.pairs.get_unchecked_mut(next) }; - let next_prev = unsafe { next_entry.state.replace_prev(prev) }; - debug_assert_eq!(next_prev, entry_index); - - self.next_ready = next; - } - - match parent { - Some(parent) => Release::Parent(parent), - None => { - debug_assert_eq!(offset, 0); - Release::Chunk(chunk) - } - } - } - } - } -} - -#[derive(Debug)] -struct Chunk { - memory: Arc, - ptr: Option>, - size: u64, -} - -#[derive(Debug)] -pub(crate) struct BuddyAllocator { - minimal_size: u64, - chunks: Slab>, - sizes: Vec, - memory_type: u32, - props: MemoryPropertyFlags, - atom_mask: u64, -} - -unsafe impl Sync for BuddyAllocator where M: Sync {} -unsafe impl Send for BuddyAllocator where M: Send {} - -impl BuddyAllocator -where - M: MemoryBounds + 'static, -{ - pub fn new( - minimal_size: u64, - initial_dedicated_size: u64, - memory_type: u32, - props: MemoryPropertyFlags, - atom_mask: u64, - ) -> Self { - assert!( - minimal_size.is_power_of_two(), - "Minimal allocation size of buddy allocator must be power of two" - ); - assert!( - initial_dedicated_size.is_power_of_two(), - "Dedicated allocation size of buddy allocator must be power of two" - ); - - let initial_sizes = (initial_dedicated_size - .trailing_zeros() - .saturating_sub(minimal_size.trailing_zeros())) as usize; - - BuddyAllocator { - minimal_size, - chunks: Slab::new(), - sizes: (0..initial_sizes).map(|_| Size::new()).collect(), - memory_type, - props, - atom_mask: atom_mask | (minimal_size - 1), - } - } - - #[cfg_attr(feature = "tracing", tracing::instrument(skip(self, device)))] - pub unsafe fn alloc( - &mut self, - device: &impl MemoryDevice, - size: u64, - align_mask: u64, - flags: AllocationFlags, - heap: &mut Heap, - allocations_remains: &mut u32, - ) -> Result, AllocationError> { - let align_mask = align_mask | self.atom_mask; - - let size = align_up(size, align_mask) - .and_then(|size| size.checked_next_power_of_two()) - .ok_or(AllocationError::OutOfDeviceMemory)?; - - let size_index = size.trailing_zeros() - self.minimal_size.trailing_zeros(); - let size_index = - usize::try_from(size_index).map_err(|_| AllocationError::OutOfDeviceMemory)?; - - while self.sizes.len() <= size_index { - self.sizes.push(Size::new()); - } - - let host_visible = self.host_visible(); - - let mut candidate_size_index = size_index; - - let (mut entry, entry_size_index) = loop { - let sizes_len = self.sizes.len(); - - let candidate_size_entry = &mut self.sizes[candidate_size_index]; - let candidate_size = self.minimal_size << candidate_size_index; - - if let Some(entry) = candidate_size_entry.acquire(candidate_size) { - break (entry, candidate_size_index); - } - - if sizes_len == candidate_size_index + 1 { - // That's size of device allocation. - if *allocations_remains == 0 { - return Err(AllocationError::TooManyObjects); - } - - let chunk_size = self.minimal_size << (candidate_size_index + 1); - let mut memory = device.allocate_memory(chunk_size, self.memory_type, flags)?; - *allocations_remains -= 1; - heap.alloc(chunk_size); - - let ptr = if host_visible { - match device.map_memory(&mut memory, 0, chunk_size) { - Ok(ptr) => Some(ptr), - Err(DeviceMapError::OutOfDeviceMemory) => { - return Err(AllocationError::OutOfDeviceMemory) - } - Err(DeviceMapError::MapFailed) | Err(DeviceMapError::OutOfHostMemory) => { - return Err(AllocationError::OutOfHostMemory) - } - } - } else { - None - }; - - let chunk = self.chunks.insert(Chunk { - memory: Arc::new(memory), - ptr, - size: chunk_size, - }); - - let entry = candidate_size_entry.add_pair_and_acquire_left(chunk, 0, None); - - break (entry, candidate_size_index); - } - - candidate_size_index += 1; - }; - - for size_index in (size_index..entry_size_index).rev() { - let size_entry = &mut self.sizes[size_index]; - entry = - size_entry.add_pair_and_acquire_left(entry.chunk, entry.offset, Some(entry.index)); - } - - let chunk_entry = self.chunks.get_unchecked(entry.chunk); - - debug_assert!( - entry - .offset - .checked_add(size) - .map_or(false, |end| end <= chunk_entry.size), - "Offset + size is not in chunk bounds" - ); - - Ok(BuddyBlock { - memory: chunk_entry.memory.clone(), - ptr: chunk_entry - .ptr - .map(|ptr| NonNull::new_unchecked(ptr.as_ptr().add(entry.offset as usize))), - offset: entry.offset, - size, - chunk: entry.chunk, - index: entry.index, - }) - } - - #[cfg_attr(feature = "tracing", tracing::instrument(skip(self, device)))] - pub unsafe fn dealloc( - &mut self, - device: &impl MemoryDevice, - block: BuddyBlock, - heap: &mut Heap, - allocations_remains: &mut u32, - ) { - debug_assert!(block.size.is_power_of_two()); - - let size_index = - (block.size.trailing_zeros() - self.minimal_size.trailing_zeros()) as usize; - - let mut release_index = block.index; - let mut release_size_index = size_index; - - loop { - match self.sizes[release_size_index].release(release_index) { - Release::Parent(parent) => { - release_size_index += 1; - release_index = parent; - } - Release::Chunk(chunk) => { - debug_assert_eq!(chunk, block.chunk); - debug_assert_eq!( - self.chunks.get(chunk).size, - self.minimal_size << (release_size_index + 1) - ); - let chunk = self.chunks.remove(chunk); - drop(block); - - let memory = try_arc_unwrap(chunk.memory) - .expect("Memory shared after last block deallocated"); - - device.deallocate_memory(memory); - *allocations_remains += 1; - heap.dealloc(chunk.size); - - return; - } - Release::None => return, - } - } - } - - fn host_visible(&self) -> bool { - self.props.contains(MemoryPropertyFlags::HOST_VISIBLE) - } -} +use { + crate::{ + align_up, error::AllocationError, heap::Heap, slab::Slab, unreachable_unchecked, + util::try_arc_unwrap, MemoryBounds, + }, + alloc::{sync::Arc, vec::Vec}, + core::{convert::TryFrom as _, mem::replace, ptr::NonNull}, + gpu_alloc_types::{AllocationFlags, DeviceMapError, MemoryDevice, MemoryPropertyFlags}, +}; + +#[derive(Debug)] +pub(crate) struct BuddyBlock { + pub memory: Arc, + pub ptr: Option>, + pub offset: u64, + pub size: u64, + pub chunk: usize, + pub index: usize, +} + +unsafe impl Sync for BuddyBlock where M: Sync {} +unsafe impl Send for BuddyBlock where M: Send {} + +#[derive(Clone, Copy, Debug)] +enum PairState { + Exhausted, + Ready { + ready: Side, + next: usize, + prev: usize, + }, +} + +impl PairState { + unsafe fn replace_next(&mut self, value: usize) -> usize { + match self { + PairState::Exhausted => unreachable_unchecked(), + PairState::Ready { next, .. } => replace(next, value), + } + } + + unsafe fn replace_prev(&mut self, value: usize) -> usize { + match self { + PairState::Exhausted => unreachable_unchecked(), + PairState::Ready { prev, .. } => replace(prev, value), + } + } +} + +#[derive(Clone, Copy, Debug, PartialEq, Eq)] +enum Side { + Left, + Right, +} +use Side::*; + +#[derive(Debug)] +struct PairEntry { + state: PairState, + chunk: usize, + offset: u64, + parent: Option, +} + +struct SizeBlockEntry { + chunk: usize, + offset: u64, + index: usize, +} + +#[derive(Debug)] +struct Size { + next_ready: usize, + pairs: Slab, +} +#[derive(Debug)] +enum Release { + None, + Parent(usize), + Chunk(usize), +} + +impl Size { + fn new() -> Self { + Size { + pairs: Slab::new(), + next_ready: 0, + } + } + + unsafe fn add_pair_and_acquire_left( + &mut self, + chunk: usize, + offset: u64, + parent: Option, + ) -> SizeBlockEntry { + if self.next_ready < self.pairs.len() { + unreachable_unchecked() + } + + let index = self.pairs.insert(PairEntry { + state: PairState::Exhausted, + chunk, + offset, + parent, + }); + + let entry = self.pairs.get_unchecked_mut(index); + entry.state = PairState::Ready { + next: index, + prev: index, + ready: Right, // Left is allocated. + }; + self.next_ready = index; + + SizeBlockEntry { + chunk, + offset, + index: index << 1, + } + } + + fn acquire(&mut self, size: u64) -> Option { + if self.next_ready >= self.pairs.len() { + return None; + } + + let ready = self.next_ready; + + let entry = unsafe { self.pairs.get_unchecked_mut(ready) }; + let chunk = entry.chunk; + let offset = entry.offset; + + let bit = match entry.state { + PairState::Exhausted => unsafe { unreachable_unchecked() }, + PairState::Ready { ready, next, prev } => { + entry.state = PairState::Exhausted; + + if prev == self.next_ready { + // The only ready entry. + debug_assert_eq!(next, self.next_ready); + self.next_ready = self.pairs.len(); + } else { + let prev_entry = unsafe { self.pairs.get_unchecked_mut(prev) }; + let prev_next = unsafe { prev_entry.state.replace_next(next) }; + debug_assert_eq!(prev_next, self.next_ready); + + let next_entry = unsafe { self.pairs.get_unchecked_mut(next) }; + let next_prev = unsafe { next_entry.state.replace_prev(prev) }; + debug_assert_eq!(next_prev, self.next_ready); + + self.next_ready = next; + } + + match ready { + Left => 0, + Right => 1, + } + } + }; + + Some(SizeBlockEntry { + chunk, + offset: offset + bit as u64 * size, + index: (ready << 1) | bit as usize, + }) + } + + fn release(&mut self, index: usize) -> Release { + let side = match index & 1 { + 0 => Side::Left, + 1 => Side::Right, + _ => unsafe { unreachable_unchecked() }, + }; + let entry_index = index >> 1; + + let len = self.pairs.len(); + + let entry = self.pairs.get_mut(entry_index); + + let chunk = entry.chunk; + let offset = entry.offset; + let parent = entry.parent; + + match entry.state { + PairState::Exhausted => { + if self.next_ready == len { + entry.state = PairState::Ready { + ready: side, + next: entry_index, + prev: entry_index, + }; + self.next_ready = entry_index; + } else { + debug_assert!(self.next_ready < len); + + let next = self.next_ready; + let next_entry = unsafe { self.pairs.get_unchecked_mut(next) }; + let prev = unsafe { next_entry.state.replace_prev(entry_index) }; + + let prev_entry = unsafe { self.pairs.get_unchecked_mut(prev) }; + let prev_next = unsafe { prev_entry.state.replace_next(entry_index) }; + debug_assert_eq!(prev_next, next); + + let entry = unsafe { self.pairs.get_unchecked_mut(entry_index) }; + entry.state = PairState::Ready { + ready: side, + next, + prev, + }; + } + Release::None + } + + PairState::Ready { ready, .. } if ready == side => { + panic!("Attempt to dealloate already free block") + } + + PairState::Ready { next, prev, .. } => { + unsafe { + self.pairs.remove_unchecked(entry_index); + } + + if prev == entry_index { + debug_assert_eq!(next, entry_index); + self.next_ready = self.pairs.len(); + } else { + let prev_entry = unsafe { self.pairs.get_unchecked_mut(prev) }; + let prev_next = unsafe { prev_entry.state.replace_next(next) }; + debug_assert_eq!(prev_next, entry_index); + + let next_entry = unsafe { self.pairs.get_unchecked_mut(next) }; + let next_prev = unsafe { next_entry.state.replace_prev(prev) }; + debug_assert_eq!(next_prev, entry_index); + + self.next_ready = next; + } + + match parent { + Some(parent) => Release::Parent(parent), + None => { + debug_assert_eq!(offset, 0); + Release::Chunk(chunk) + } + } + } + } + } +} + +#[derive(Debug)] +struct Chunk { + memory: Arc, + ptr: Option>, + size: u64, +} + +#[derive(Debug)] +pub(crate) struct BuddyAllocator { + minimal_size: u64, + chunks: Slab>, + sizes: Vec, + memory_type: u32, + props: MemoryPropertyFlags, + atom_mask: u64, +} + +unsafe impl Sync for BuddyAllocator where M: Sync {} +unsafe impl Send for BuddyAllocator where M: Send {} + +impl BuddyAllocator +where + M: MemoryBounds + 'static, +{ + pub fn new( + minimal_size: u64, + initial_dedicated_size: u64, + memory_type: u32, + props: MemoryPropertyFlags, + atom_mask: u64, + ) -> Self { + assert!( + minimal_size.is_power_of_two(), + "Minimal allocation size of buddy allocator must be power of two" + ); + assert!( + initial_dedicated_size.is_power_of_two(), + "Dedicated allocation size of buddy allocator must be power of two" + ); + + let initial_sizes = (initial_dedicated_size + .trailing_zeros() + .saturating_sub(minimal_size.trailing_zeros())) as usize; + + BuddyAllocator { + minimal_size, + chunks: Slab::new(), + sizes: (0..initial_sizes).map(|_| Size::new()).collect(), + memory_type, + props, + atom_mask: atom_mask | (minimal_size - 1), + } + } + + #[cfg_attr(feature = "tracing", tracing::instrument(skip(self, device)))] + pub unsafe fn alloc( + &mut self, + device: &impl MemoryDevice, + size: u64, + align_mask: u64, + flags: AllocationFlags, + heap: &mut Heap, + allocations_remains: &mut u32, + ) -> Result, AllocationError> { + let align_mask = align_mask | self.atom_mask; + + let size = align_up(size, align_mask) + .and_then(|size| size.checked_next_power_of_two()) + .ok_or(AllocationError::OutOfDeviceMemory)?; + + let size = size.max(self.minimal_size); + + let size_index = size.trailing_zeros() - self.minimal_size.trailing_zeros(); + let size_index = + usize::try_from(size_index).map_err(|_| AllocationError::OutOfDeviceMemory)?; + + while self.sizes.len() <= size_index { + self.sizes.push(Size::new()); + } + + let host_visible = self.host_visible(); + + let mut candidate_size_index = size_index; + + let (mut entry, entry_size_index) = loop { + let sizes_len = self.sizes.len(); + + let candidate_size_entry = &mut self.sizes[candidate_size_index]; + let candidate_size = self.minimal_size << candidate_size_index; + + if let Some(entry) = candidate_size_entry.acquire(candidate_size) { + break (entry, candidate_size_index); + } + + if sizes_len == candidate_size_index + 1 { + // That's size of device allocation. + if *allocations_remains == 0 { + return Err(AllocationError::TooManyObjects); + } + + let chunk_size = self.minimal_size << (candidate_size_index + 1); + let mut memory = device.allocate_memory(chunk_size, self.memory_type, flags)?; + *allocations_remains -= 1; + heap.alloc(chunk_size); + + let ptr = if host_visible { + match device.map_memory(&mut memory, 0, chunk_size) { + Ok(ptr) => Some(ptr), + Err(DeviceMapError::OutOfDeviceMemory) => { + return Err(AllocationError::OutOfDeviceMemory) + } + Err(DeviceMapError::MapFailed) | Err(DeviceMapError::OutOfHostMemory) => { + return Err(AllocationError::OutOfHostMemory) + } + } + } else { + None + }; + + let chunk = self.chunks.insert(Chunk { + memory: Arc::new(memory), + ptr, + size: chunk_size, + }); + + let entry = candidate_size_entry.add_pair_and_acquire_left(chunk, 0, None); + + break (entry, candidate_size_index); + } + + candidate_size_index += 1; + }; + + for size_index in (size_index..entry_size_index).rev() { + let size_entry = &mut self.sizes[size_index]; + entry = + size_entry.add_pair_and_acquire_left(entry.chunk, entry.offset, Some(entry.index)); + } + + let chunk_entry = self.chunks.get_unchecked(entry.chunk); + + debug_assert!( + entry + .offset + .checked_add(size) + .map_or(false, |end| end <= chunk_entry.size), + "Offset + size is not in chunk bounds" + ); + + Ok(BuddyBlock { + memory: chunk_entry.memory.clone(), + ptr: chunk_entry + .ptr + .map(|ptr| NonNull::new_unchecked(ptr.as_ptr().add(entry.offset as usize))), + offset: entry.offset, + size, + chunk: entry.chunk, + index: entry.index, + }) + } + + #[cfg_attr(feature = "tracing", tracing::instrument(skip(self, device)))] + pub unsafe fn dealloc( + &mut self, + device: &impl MemoryDevice, + block: BuddyBlock, + heap: &mut Heap, + allocations_remains: &mut u32, + ) { + debug_assert!(block.size.is_power_of_two()); + + let size_index = + (block.size.trailing_zeros() - self.minimal_size.trailing_zeros()) as usize; + + let mut release_index = block.index; + let mut release_size_index = size_index; + + loop { + match self.sizes[release_size_index].release(release_index) { + Release::Parent(parent) => { + release_size_index += 1; + release_index = parent; + } + Release::Chunk(chunk) => { + debug_assert_eq!(chunk, block.chunk); + debug_assert_eq!( + self.chunks.get(chunk).size, + self.minimal_size << (release_size_index + 1) + ); + let chunk = self.chunks.remove(chunk); + drop(block); + + let memory = try_arc_unwrap(chunk.memory) + .expect("Memory shared after last block deallocated"); + + device.deallocate_memory(memory); + *allocations_remains += 1; + heap.dealloc(chunk.size); + + return; + } + Release::None => return, + } + } + } + + fn host_visible(&self) -> bool { + self.props.contains(MemoryPropertyFlags::HOST_VISIBLE) + } +} diff --git a/third_party/rust/gpu-alloc/src/config.rs b/third_party/rust/gpu-alloc/src/config.rs index 0d9da8070ab9..20d53a3d758e 100644 --- a/third_party/rust/gpu-alloc/src/config.rs +++ b/third_party/rust/gpu-alloc/src/config.rs @@ -1,77 +1,77 @@ -/// Configuration for [`GpuAllocator`] -/// -/// [`GpuAllocator`]: type.GpuAllocator -#[derive(Clone, Copy, Debug, PartialEq, Eq)] -#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] -pub struct Config { - /// Size in bytes of request that will be served by dedicated memory object. - /// This value should be large enough to not exhaust memory object limit - /// and not use slow memory object allocation when it is not necessary. - pub dedicated_threshold: u64, - - /// Size in bytes of request that will be served by dedicated memory object if preferred. - /// This value should be large enough to not exhaust memory object limit - /// and not use slow memory object allocation when it is not necessary. - /// - /// This won't make much sense if this value is larger than `dedicated_threshold`. - pub preferred_dedicated_threshold: u64, - - /// Size in bytes of transient memory request that will be served by dedicated memory object. - /// This value should be large enough to not exhaust memory object limit - /// and not use slow memory object allocation when it is not necessary. - /// - /// This won't make much sense if this value is lesser than `dedicated_threshold`. - pub transient_dedicated_threshold: u64, - - /// Size in bytes of first chunk in free-list allocator. - pub starting_free_list_chunk: u64, - - /// Upper limit for size in bytes of chunks in free-list allocator. - pub final_free_list_chunk: u64, - - /// Minimal size for buddy allocator. - pub minimal_buddy_size: u64, - - /// Initial memory object size for buddy allocator. - /// If less than `minimal_buddy_size` then `minimal_buddy_size` is used instead. - pub initial_buddy_dedicated_size: u64, -} - -impl Config { - /// Returns default configuration. - /// - /// This is not `Default` implementation to discourage usage outside of - /// prototyping. - /// - /// Proper configuration should depend on hardware and intended usage.\ - /// But those values can be used as starting point.\ - /// Note that they can simply not work for some platforms with lesser - /// memory capacity than today's "modern" GPU (year 2020). - pub fn i_am_prototyping() -> Self { - // Assume that today's modern GPU is made of 1024 potatoes. - let potato = Config::i_am_potato(); - - Config { - dedicated_threshold: potato.dedicated_threshold * 1024, - preferred_dedicated_threshold: potato.preferred_dedicated_threshold * 1024, - transient_dedicated_threshold: potato.transient_dedicated_threshold * 1024, - starting_free_list_chunk: potato.starting_free_list_chunk * 1024, - final_free_list_chunk: potato.final_free_list_chunk * 1024, - minimal_buddy_size: potato.minimal_buddy_size * 1024, - initial_buddy_dedicated_size: potato.initial_buddy_dedicated_size * 1024, - } - } - - /// Returns default configuration for average sized potato. - pub fn i_am_potato() -> Self { - Config { - dedicated_threshold: 32 * 1024, - preferred_dedicated_threshold: 1024, - transient_dedicated_threshold: 128 * 1024, - starting_free_list_chunk: 8 * 1024, - final_free_list_chunk: 128 * 1024, - minimal_buddy_size: 1, - initial_buddy_dedicated_size: 8 * 1024, - } - } -} +/// Configuration for [`GpuAllocator`] +/// +/// [`GpuAllocator`]: type.GpuAllocator +#[derive(Clone, Copy, Debug, PartialEq, Eq)] +#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] +pub struct Config { + /// Size in bytes of request that will be served by dedicated memory object. + /// This value should be large enough to not exhaust memory object limit + /// and not use slow memory object allocation when it is not necessary. + pub dedicated_threshold: u64, + + /// Size in bytes of request that will be served by dedicated memory object if preferred. + /// This value should be large enough to not exhaust memory object limit + /// and not use slow memory object allocation when it is not necessary. + /// + /// This won't make much sense if this value is larger than `dedicated_threshold`. + pub preferred_dedicated_threshold: u64, + + /// Size in bytes of transient memory request that will be served by dedicated memory object. + /// This value should be large enough to not exhaust memory object limit + /// and not use slow memory object allocation when it is not necessary. + /// + /// This won't make much sense if this value is lesser than `dedicated_threshold`. + pub transient_dedicated_threshold: u64, + + /// Size in bytes of first chunk in free-list allocator. + pub starting_free_list_chunk: u64, + + /// Upper limit for size in bytes of chunks in free-list allocator. + pub final_free_list_chunk: u64, + + /// Minimal size for buddy allocator. + pub minimal_buddy_size: u64, + + /// Initial memory object size for buddy allocator. + /// If less than `minimal_buddy_size` then `minimal_buddy_size` is used instead. + pub initial_buddy_dedicated_size: u64, +} + +impl Config { + /// Returns default configuration. + /// + /// This is not `Default` implementation to discourage usage outside of + /// prototyping. + /// + /// Proper configuration should depend on hardware and intended usage.\ + /// But those values can be used as starting point.\ + /// Note that they can simply not work for some platforms with lesser + /// memory capacity than today's "modern" GPU (year 2020). + pub fn i_am_prototyping() -> Self { + // Assume that today's modern GPU is made of 1024 potatoes. + let potato = Config::i_am_potato(); + + Config { + dedicated_threshold: potato.dedicated_threshold * 1024, + preferred_dedicated_threshold: potato.preferred_dedicated_threshold * 1024, + transient_dedicated_threshold: potato.transient_dedicated_threshold * 1024, + starting_free_list_chunk: potato.starting_free_list_chunk * 1024, + final_free_list_chunk: potato.final_free_list_chunk * 1024, + minimal_buddy_size: potato.minimal_buddy_size * 1024, + initial_buddy_dedicated_size: potato.initial_buddy_dedicated_size * 1024, + } + } + + /// Returns default configuration for average sized potato. + pub fn i_am_potato() -> Self { + Config { + dedicated_threshold: 32 * 1024, + preferred_dedicated_threshold: 1024, + transient_dedicated_threshold: 128 * 1024, + starting_free_list_chunk: 8 * 1024, + final_free_list_chunk: 128 * 1024, + minimal_buddy_size: 1, + initial_buddy_dedicated_size: 8 * 1024, + } + } +} diff --git a/third_party/rust/gpu-alloc/src/error.rs b/third_party/rust/gpu-alloc/src/error.rs index 95cde9bcaa77..98c396b1fea7 100644 --- a/third_party/rust/gpu-alloc/src/error.rs +++ b/third_party/rust/gpu-alloc/src/error.rs @@ -1,116 +1,116 @@ -use { - core::fmt::{self, Display}, - gpu_alloc_types::{DeviceMapError, OutOfMemory}, -}; - -/// Enumeration of possible errors that may occur during memory allocation. -#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)] -pub enum AllocationError { - /// Backend reported that device memory has been exhausted.\ - /// Deallocating device memory from the same heap may increase chance - /// that another allocation would succeed. - OutOfDeviceMemory, - - /// Backend reported that host memory has been exhausted.\ - /// Deallocating host memory may increase chance that another allocation would succeed. - OutOfHostMemory, - - /// Allocation request cannot be fulfilled as no available memory types allowed - /// by `Request.memory_types` mask is compatible with `request.usage`. - NoCompatibleMemoryTypes, - - /// Reached limit on allocated memory objects count.\ - /// Deallocating device memory may increase chance that another allocation would succeed. - /// Especially dedicated memory blocks. - /// - /// If this error is returned when memory heaps are far from exhausted - /// `Config` should be tweaked to allocate larger memory objects. - TooManyObjects, -} - -impl From for AllocationError { - fn from(err: OutOfMemory) -> Self { - match err { - OutOfMemory::OutOfDeviceMemory => AllocationError::OutOfDeviceMemory, - OutOfMemory::OutOfHostMemory => AllocationError::OutOfHostMemory, - } - } -} - -impl Display for AllocationError { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - match self { - AllocationError::OutOfDeviceMemory => fmt.write_str("Device memory exhausted"), - AllocationError::OutOfHostMemory => fmt.write_str("Host memory exhausted"), - AllocationError::NoCompatibleMemoryTypes => fmt.write_str( - "No compatible memory types from requested types support requested usage", - ), - AllocationError::TooManyObjects => { - fmt.write_str("Reached limit on allocated memory objects count") - } - } - } -} - -#[cfg(feature = "std")] -impl std::error::Error for AllocationError {} - -/// Enumeration of possible errors that may occur during memory mapping. -#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)] -pub enum MapError { - /// Backend reported that device memory has been exhausted.\ - /// Deallocating device memory from the same heap may increase chance - /// that another mapping would succeed. - OutOfDeviceMemory, - - /// Backend reported that host memory has been exhausted.\ - /// Deallocating host memory may increase chance that another mapping would succeed. - OutOfHostMemory, - - /// Attempt to map memory block with non-host-visible memory type.\ - /// Ensure to include `UsageFlags::HOST_ACCESS` into allocation request - /// when memory mapping is intended. - NonHostVisible, - - /// Map failed for implementation specific reason.\ - /// For Vulkan backend this includes failed attempt - /// to allocate large enough virtual address space. - MapFailed, - - /// Mapping failed due to block being already mapped. - AlreadyMapped, -} - -impl From for MapError { - fn from(err: DeviceMapError) -> Self { - match err { - DeviceMapError::OutOfDeviceMemory => MapError::OutOfDeviceMemory, - DeviceMapError::OutOfHostMemory => MapError::OutOfHostMemory, - DeviceMapError::MapFailed => MapError::MapFailed, - } - } -} - -impl From for MapError { - fn from(err: OutOfMemory) -> Self { - match err { - OutOfMemory::OutOfDeviceMemory => MapError::OutOfDeviceMemory, - OutOfMemory::OutOfHostMemory => MapError::OutOfHostMemory, - } - } -} - -impl Display for MapError { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - match self { - MapError::OutOfDeviceMemory => fmt.write_str("Device memory exhausted"), - MapError::OutOfHostMemory => fmt.write_str("Host memory exhausted"), - MapError::MapFailed => fmt.write_str("Failed to map memory object"), - MapError::NonHostVisible => fmt.write_str("Impossible to map non-host-visible memory"), - MapError::AlreadyMapped => fmt.write_str("Block is already mapped"), - } - } -} - -#[cfg(feature = "std")] -impl std::error::Error for MapError {} +use { + core::fmt::{self, Display}, + gpu_alloc_types::{DeviceMapError, OutOfMemory}, +}; + +/// Enumeration of possible errors that may occur during memory allocation. +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)] +pub enum AllocationError { + /// Backend reported that device memory has been exhausted.\ + /// Deallocating device memory from the same heap may increase chance + /// that another allocation would succeed. + OutOfDeviceMemory, + + /// Backend reported that host memory has been exhausted.\ + /// Deallocating host memory may increase chance that another allocation would succeed. + OutOfHostMemory, + + /// Allocation request cannot be fulfilled as no available memory types allowed + /// by `Request.memory_types` mask is compatible with `request.usage`. + NoCompatibleMemoryTypes, + + /// Reached limit on allocated memory objects count.\ + /// Deallocating device memory may increase chance that another allocation would succeed. + /// Especially dedicated memory blocks. + /// + /// If this error is returned when memory heaps are far from exhausted + /// `Config` should be tweaked to allocate larger memory objects. + TooManyObjects, +} + +impl From for AllocationError { + fn from(err: OutOfMemory) -> Self { + match err { + OutOfMemory::OutOfDeviceMemory => AllocationError::OutOfDeviceMemory, + OutOfMemory::OutOfHostMemory => AllocationError::OutOfHostMemory, + } + } +} + +impl Display for AllocationError { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + AllocationError::OutOfDeviceMemory => fmt.write_str("Device memory exhausted"), + AllocationError::OutOfHostMemory => fmt.write_str("Host memory exhausted"), + AllocationError::NoCompatibleMemoryTypes => fmt.write_str( + "No compatible memory types from requested types support requested usage", + ), + AllocationError::TooManyObjects => { + fmt.write_str("Reached limit on allocated memory objects count") + } + } + } +} + +#[cfg(feature = "std")] +impl std::error::Error for AllocationError {} + +/// Enumeration of possible errors that may occur during memory mapping. +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)] +pub enum MapError { + /// Backend reported that device memory has been exhausted.\ + /// Deallocating device memory from the same heap may increase chance + /// that another mapping would succeed. + OutOfDeviceMemory, + + /// Backend reported that host memory has been exhausted.\ + /// Deallocating host memory may increase chance that another mapping would succeed. + OutOfHostMemory, + + /// Attempt to map memory block with non-host-visible memory type.\ + /// Ensure to include `UsageFlags::HOST_ACCESS` into allocation request + /// when memory mapping is intended. + NonHostVisible, + + /// Map failed for implementation specific reason.\ + /// For Vulkan backend this includes failed attempt + /// to allocate large enough virtual address space. + MapFailed, + + /// Mapping failed due to block being already mapped. + AlreadyMapped, +} + +impl From for MapError { + fn from(err: DeviceMapError) -> Self { + match err { + DeviceMapError::OutOfDeviceMemory => MapError::OutOfDeviceMemory, + DeviceMapError::OutOfHostMemory => MapError::OutOfHostMemory, + DeviceMapError::MapFailed => MapError::MapFailed, + } + } +} + +impl From for MapError { + fn from(err: OutOfMemory) -> Self { + match err { + OutOfMemory::OutOfDeviceMemory => MapError::OutOfDeviceMemory, + OutOfMemory::OutOfHostMemory => MapError::OutOfHostMemory, + } + } +} + +impl Display for MapError { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + MapError::OutOfDeviceMemory => fmt.write_str("Device memory exhausted"), + MapError::OutOfHostMemory => fmt.write_str("Host memory exhausted"), + MapError::MapFailed => fmt.write_str("Failed to map memory object"), + MapError::NonHostVisible => fmt.write_str("Impossible to map non-host-visible memory"), + MapError::AlreadyMapped => fmt.write_str("Block is already mapped"), + } + } +} + +#[cfg(feature = "std")] +impl std::error::Error for MapError {} diff --git a/third_party/rust/gpu-alloc/src/freelist.rs b/third_party/rust/gpu-alloc/src/freelist.rs index 448a961d4fac..a6ece27be382 100644 --- a/third_party/rust/gpu-alloc/src/freelist.rs +++ b/third_party/rust/gpu-alloc/src/freelist.rs @@ -1,528 +1,528 @@ -use { - crate::{ - align_down, align_up, - error::AllocationError, - heap::Heap, - util::{arc_unwrap, is_arc_unique}, - MemoryBounds, - }, - alloc::{sync::Arc, vec::Vec}, - core::{cmp::Ordering, ptr::NonNull}, - gpu_alloc_types::{AllocationFlags, DeviceMapError, MemoryDevice, MemoryPropertyFlags}, -}; - -unsafe fn opt_ptr_add(ptr: Option>, size: u64) -> Option> { - ptr.map(|ptr| { - // Size is within memory region started at `ptr`. - // size is within `chunk_size` that fits `isize`. - NonNull::new_unchecked(ptr.as_ptr().offset(size as isize)) - }) -} - -#[derive(Debug)] -pub(super) struct FreeList { - array: Vec>, - counter: u64, -} - -impl FreeList { - pub fn new() -> Self { - FreeList { - array: Vec::new(), - counter: 0, - } - } - - pub fn get_block_from_new_memory( - &mut self, - memory: Arc, - memory_size: u64, - ptr: Option>, - align_mask: u64, - size: u64, - ) -> FreeListBlock { - debug_assert!(size <= memory_size); - - self.counter += 1; - self.array.push(FreeListRegion { - memory, - ptr, - chunk: self.counter, - start: 0, - end: memory_size, - }); - self.get_block_at(self.array.len() - 1, align_mask, size) - } - - pub fn get_block(&mut self, align_mask: u64, size: u64) -> Option> { - let (index, _) = self.array.iter().enumerate().rev().find(|(_, region)| { - match region.end.checked_sub(size) { - Some(start) => { - let aligned_start = align_down(start, align_mask); - aligned_start >= region.start - } - None => false, - } - })?; - - Some(self.get_block_at(index, align_mask, size)) - } - - fn get_block_at(&mut self, index: usize, align_mask: u64, size: u64) -> FreeListBlock { - let region = &mut self.array[index]; - - let start = region.end - size; - let aligned_start = align_down(start, align_mask); - - if aligned_start > region.start { - let block = FreeListBlock { - offset: aligned_start, - size: region.end - aligned_start, - chunk: region.chunk, - ptr: unsafe { opt_ptr_add(region.ptr, aligned_start - region.start) }, - memory: region.memory.clone(), - }; - - region.end = aligned_start; - - block - } else { - debug_assert_eq!(aligned_start, region.start); - let region = self.array.remove(index); - region.into_block() - } - } - - pub fn insert_block(&mut self, block: FreeListBlock) { - match self.array.binary_search_by(|b| b.cmp(&block)) { - Ok(_) => { - panic!("Overlapping block found in free list"); - } - Err(index) if self.array.len() > index => match &mut self.array[..=index] { - [] => unreachable!(), - [next] => { - debug_assert!(!next.is_suffix_block(&block)); - - if next.is_prefix_block(&block) { - next.merge_prefix_block(block); - } else { - self.array.insert(0, FreeListRegion::from_block(block)); - } - } - [.., prev, next] => { - debug_assert!(!prev.is_prefix_block(&block)); - debug_assert!(!next.is_suffix_block(&block)); - - if next.is_prefix_block(&block) { - next.merge_prefix_block(block); - - if prev.consecutive(&*next) { - let next = self.array.remove(index); - let prev = &mut self.array[index - 1]; - prev.merge(next); - } - } else if prev.is_suffix_block(&block) { - prev.merge_suffix_block(block); - } else { - self.array.insert(index, FreeListRegion::from_block(block)); - } - } - }, - Err(_) => match &mut self.array[..] { - [] => self.array.push(FreeListRegion::from_block(block)), - [.., prev] => { - debug_assert!(!prev.is_prefix_block(&block)); - if prev.is_suffix_block(&block) { - prev.merge_suffix_block(block); - } else { - self.array.push(FreeListRegion::from_block(block)); - } - } - }, - } - } - - pub fn drain(&mut self, keep_last: bool) -> Option + '_> { - // Time to deallocate - - let len = self.array.len(); - - let mut del = 0; - { - let regions = &mut self.array[..]; - - for i in 0..len { - if (i < len - 1 || !keep_last) && is_arc_unique(&mut regions[i].memory) { - del += 1; - } else if del > 0 { - regions.swap(i - del, i); - } - } - } - - if del > 0 { - Some(self.array.drain(len - del..).map(move |region| { - debug_assert_eq!(region.start, 0); - (unsafe { arc_unwrap(region.memory) }, region.end) - })) - } else { - None - } - } -} - -#[derive(Debug)] -struct FreeListRegion { - memory: Arc, - ptr: Option>, - chunk: u64, - start: u64, - end: u64, -} - -unsafe impl Sync for FreeListRegion where M: Sync {} -unsafe impl Send for FreeListRegion where M: Send {} - -impl FreeListRegion { - pub fn cmp(&self, block: &FreeListBlock) -> Ordering { - debug_assert_eq!( - Arc::ptr_eq(&self.memory, &block.memory), - self.chunk == block.chunk - ); - - if self.chunk == block.chunk { - debug_assert_eq!( - Ord::cmp(&self.start, &block.offset), - Ord::cmp(&self.end, &(block.offset + block.size)), - "Free region {{ start: {}, end: {} }} overlaps with block {{ offset: {}, size: {} }}", - self.start, - self.end, - block.offset, - block.size, - ); - } - - Ord::cmp(&self.chunk, &block.chunk).then(Ord::cmp(&self.start, &block.offset)) - } - - fn from_block(block: FreeListBlock) -> Self { - FreeListRegion { - memory: block.memory, - chunk: block.chunk, - ptr: block.ptr, - start: block.offset, - end: block.offset + block.size, - } - } - - fn into_block(self) -> FreeListBlock { - FreeListBlock { - memory: self.memory, - chunk: self.chunk, - ptr: self.ptr, - offset: self.start, - size: self.end - self.start, - } - } - - fn consecutive(&self, other: &Self) -> bool { - if self.chunk != other.chunk { - return false; - } - - debug_assert!(Arc::ptr_eq(&self.memory, &other.memory)); - - debug_assert_eq!( - Ord::cmp(&self.start, &other.start), - Ord::cmp(&self.end, &other.end) - ); - - self.end == other.start - } - - fn merge(&mut self, next: FreeListRegion) { - debug_assert!(self.consecutive(&next)); - self.end = next.end; - } - - fn is_prefix_block(&self, block: &FreeListBlock) -> bool { - if self.chunk != block.chunk { - return false; - } - - debug_assert!(Arc::ptr_eq(&self.memory, &block.memory)); - - debug_assert_eq!( - Ord::cmp(&self.start, &block.offset), - Ord::cmp(&self.end, &(block.offset + block.size)) - ); - - self.start == (block.offset + block.size) - } - - fn merge_prefix_block(&mut self, block: FreeListBlock) { - debug_assert!(self.is_prefix_block(&block)); - self.start = block.offset; - self.ptr = block.ptr; - } - - fn is_suffix_block(&self, block: &FreeListBlock) -> bool { - if self.chunk != block.chunk { - return false; - } - - debug_assert!(Arc::ptr_eq(&self.memory, &block.memory)); - - debug_assert_eq!( - Ord::cmp(&self.start, &block.offset), - Ord::cmp(&self.end, &(block.offset + block.size)) - ); - - self.end == block.offset - } - - fn merge_suffix_block(&mut self, block: FreeListBlock) { - debug_assert!(self.is_suffix_block(&block)); - self.end += block.size; - } -} - -#[derive(Debug)] -pub struct FreeListBlock { - pub memory: Arc, - pub ptr: Option>, - pub chunk: u64, - pub offset: u64, - pub size: u64, -} - -unsafe impl Sync for FreeListBlock where M: Sync {} -unsafe impl Send for FreeListBlock where M: Send {} - -#[derive(Debug)] -pub(crate) struct FreeListAllocator { - freelist: FreeList, - chunk_size: u64, - final_chunk_size: u64, - memory_type: u32, - props: MemoryPropertyFlags, - atom_mask: u64, - - total_allocations: u64, - total_deallocations: u64, -} - -impl Drop for FreeListAllocator { - fn drop(&mut self) { - match Ord::cmp(&self.total_allocations, &self.total_deallocations) { - Ordering::Equal => {} - Ordering::Greater => { - report_error_on_drop!("Not all blocks were deallocated") - } - Ordering::Less => { - report_error_on_drop!("More blocks deallocated than allocated") - } - } - - if !self.freelist.array.is_empty() { - report_error_on_drop!( - "FreeListAllocator has free blocks on drop. Allocator should be cleaned" - ); - } - } -} - -impl FreeListAllocator -where - M: MemoryBounds + 'static, -{ - pub fn new( - starting_chunk_size: u64, - final_chunk_size: u64, - memory_type: u32, - props: MemoryPropertyFlags, - atom_mask: u64, - ) -> Self { - debug_assert_eq!( - align_down(starting_chunk_size, atom_mask), - starting_chunk_size - ); - - let starting_chunk_size = min(starting_chunk_size, isize::max_value()); - - debug_assert_eq!(align_down(final_chunk_size, atom_mask), final_chunk_size); - let final_chunk_size = min(final_chunk_size, isize::max_value()); - - FreeListAllocator { - freelist: FreeList::new(), - chunk_size: starting_chunk_size, - final_chunk_size, - memory_type, - props, - atom_mask, - - total_allocations: 0, - total_deallocations: 0, - } - } - - #[cfg_attr(feature = "tracing", tracing::instrument(skip(self, device)))] - pub unsafe fn alloc( - &mut self, - device: &impl MemoryDevice, - size: u64, - align_mask: u64, - flags: AllocationFlags, - heap: &mut Heap, - allocations_remains: &mut u32, - ) -> Result, AllocationError> { - debug_assert!( - self.final_chunk_size >= size, - "GpuAllocator must not request allocations equal or greater to chunks size" - ); - - let size = align_up(size, self.atom_mask).expect( - "Any value not greater than final chunk size (which is aligned) has to fit for alignment", - ); - - let align_mask = align_mask | self.atom_mask; - let host_visible = self.host_visible(); - - if size <= self.chunk_size { - // Otherwise there can't be any sufficiently large free blocks - if let Some(block) = self.freelist.get_block(align_mask, size) { - self.total_allocations += 1; - return Ok(block); - } - } - - // New allocation is required. - if *allocations_remains == 0 { - return Err(AllocationError::TooManyObjects); - } - - if size > self.chunk_size { - let multiple = (size - 1) / self.chunk_size + 1; - let multiple = multiple.next_power_of_two(); - - self.chunk_size = (self.chunk_size * multiple).min(self.final_chunk_size); - } - - let mut memory = device.allocate_memory(self.chunk_size, self.memory_type, flags)?; - *allocations_remains -= 1; - heap.alloc(self.chunk_size); - - // Map host visible allocations - let ptr = if host_visible { - match device.map_memory(&mut memory, 0, self.chunk_size) { - Ok(ptr) => Some(ptr), - Err(DeviceMapError::MapFailed) => { - #[cfg(feature = "tracing")] - tracing::error!("Failed to map host-visible memory in linear allocator"); - device.deallocate_memory(memory); - *allocations_remains += 1; - heap.dealloc(self.chunk_size); - - return Err(AllocationError::OutOfHostMemory); - } - Err(DeviceMapError::OutOfDeviceMemory) => { - return Err(AllocationError::OutOfDeviceMemory); - } - Err(DeviceMapError::OutOfHostMemory) => { - return Err(AllocationError::OutOfHostMemory); - } - } - } else { - None - }; - - let memory = Arc::new(memory); - let block = - self.freelist - .get_block_from_new_memory(memory, self.chunk_size, ptr, align_mask, size); - - if self.chunk_size < self.final_chunk_size { - // Double next chunk size - // Limit to final value. - self.chunk_size = (self.chunk_size * 2).min(self.final_chunk_size); - } - - self.total_allocations += 1; - Ok(block) - } - - #[cfg_attr(feature = "tracing", tracing::instrument(skip(self, device)))] - pub unsafe fn dealloc( - &mut self, - device: &impl MemoryDevice, - block: FreeListBlock, - heap: &mut Heap, - allocations_remains: &mut u32, - ) { - debug_assert!(block.size < self.chunk_size); - debug_assert_ne!(block.size, 0); - self.freelist.insert_block(block); - self.total_deallocations += 1; - - if let Some(memory) = self.freelist.drain(true) { - memory.for_each(|(memory, size)| { - device.deallocate_memory(memory); - *allocations_remains += 1; - heap.dealloc(size); - }); - } - } - - /// Deallocates leftover memory objects. - /// Should be used before dropping. - /// - /// # Safety - /// - /// * `device` must be one with `DeviceProperties` that were provided to create this `GpuAllocator` instance - /// * Same `device` instance must be used for all interactions with one `GpuAllocator` instance - /// and memory blocks allocated from it - #[cfg_attr(feature = "tracing", tracing::instrument(skip(self, device)))] - pub unsafe fn cleanup( - &mut self, - device: &impl MemoryDevice, - heap: &mut Heap, - allocations_remains: &mut u32, - ) { - if let Some(memory) = self.freelist.drain(false) { - memory.for_each(|(memory, size)| { - device.deallocate_memory(memory); - *allocations_remains += 1; - heap.dealloc(size); - }); - } - - #[cfg(feature = "tracing")] - { - if self.total_allocations == self.total_deallocations && !self.freelist.array.is_empty() - { - tracing::error!( - "Some regions were not deallocated on cleanup, although all blocks are free. - This is a bug in `FreeBlockAllocator`. - See array of free blocks left: - {:#?}", - self.freelist.array, - ); - } - } - } - - fn host_visible(&self) -> bool { - self.props.contains(MemoryPropertyFlags::HOST_VISIBLE) - } -} - -fn min(l: L, r: R) -> L -where - R: core::convert::TryInto, - L: Ord, -{ - match r.try_into() { - Ok(r) => core::cmp::min(l, r), - Err(_) => l, - } -} +use { + crate::{ + align_down, align_up, + error::AllocationError, + heap::Heap, + util::{arc_unwrap, is_arc_unique}, + MemoryBounds, + }, + alloc::{sync::Arc, vec::Vec}, + core::{cmp::Ordering, ptr::NonNull}, + gpu_alloc_types::{AllocationFlags, DeviceMapError, MemoryDevice, MemoryPropertyFlags}, +}; + +unsafe fn opt_ptr_add(ptr: Option>, size: u64) -> Option> { + ptr.map(|ptr| { + // Size is within memory region started at `ptr`. + // size is within `chunk_size` that fits `isize`. + NonNull::new_unchecked(ptr.as_ptr().offset(size as isize)) + }) +} + +#[derive(Debug)] +pub(super) struct FreeList { + array: Vec>, + counter: u64, +} + +impl FreeList { + pub fn new() -> Self { + FreeList { + array: Vec::new(), + counter: 0, + } + } + + pub fn get_block_from_new_memory( + &mut self, + memory: Arc, + memory_size: u64, + ptr: Option>, + align_mask: u64, + size: u64, + ) -> FreeListBlock { + debug_assert!(size <= memory_size); + + self.counter += 1; + self.array.push(FreeListRegion { + memory, + ptr, + chunk: self.counter, + start: 0, + end: memory_size, + }); + self.get_block_at(self.array.len() - 1, align_mask, size) + } + + pub fn get_block(&mut self, align_mask: u64, size: u64) -> Option> { + let (index, _) = self.array.iter().enumerate().rev().find(|(_, region)| { + match region.end.checked_sub(size) { + Some(start) => { + let aligned_start = align_down(start, align_mask); + aligned_start >= region.start + } + None => false, + } + })?; + + Some(self.get_block_at(index, align_mask, size)) + } + + fn get_block_at(&mut self, index: usize, align_mask: u64, size: u64) -> FreeListBlock { + let region = &mut self.array[index]; + + let start = region.end - size; + let aligned_start = align_down(start, align_mask); + + if aligned_start > region.start { + let block = FreeListBlock { + offset: aligned_start, + size: region.end - aligned_start, + chunk: region.chunk, + ptr: unsafe { opt_ptr_add(region.ptr, aligned_start - region.start) }, + memory: region.memory.clone(), + }; + + region.end = aligned_start; + + block + } else { + debug_assert_eq!(aligned_start, region.start); + let region = self.array.remove(index); + region.into_block() + } + } + + pub fn insert_block(&mut self, block: FreeListBlock) { + match self.array.binary_search_by(|b| b.cmp(&block)) { + Ok(_) => { + panic!("Overlapping block found in free list"); + } + Err(index) if self.array.len() > index => match &mut self.array[..=index] { + [] => unreachable!(), + [next] => { + debug_assert!(!next.is_suffix_block(&block)); + + if next.is_prefix_block(&block) { + next.merge_prefix_block(block); + } else { + self.array.insert(0, FreeListRegion::from_block(block)); + } + } + [.., prev, next] => { + debug_assert!(!prev.is_prefix_block(&block)); + debug_assert!(!next.is_suffix_block(&block)); + + if next.is_prefix_block(&block) { + next.merge_prefix_block(block); + + if prev.consecutive(&*next) { + let next = self.array.remove(index); + let prev = &mut self.array[index - 1]; + prev.merge(next); + } + } else if prev.is_suffix_block(&block) { + prev.merge_suffix_block(block); + } else { + self.array.insert(index, FreeListRegion::from_block(block)); + } + } + }, + Err(_) => match &mut self.array[..] { + [] => self.array.push(FreeListRegion::from_block(block)), + [.., prev] => { + debug_assert!(!prev.is_prefix_block(&block)); + if prev.is_suffix_block(&block) { + prev.merge_suffix_block(block); + } else { + self.array.push(FreeListRegion::from_block(block)); + } + } + }, + } + } + + pub fn drain(&mut self, keep_last: bool) -> Option + '_> { + // Time to deallocate + + let len = self.array.len(); + + let mut del = 0; + { + let regions = &mut self.array[..]; + + for i in 0..len { + if (i < len - 1 || !keep_last) && is_arc_unique(&mut regions[i].memory) { + del += 1; + } else if del > 0 { + regions.swap(i - del, i); + } + } + } + + if del > 0 { + Some(self.array.drain(len - del..).map(move |region| { + debug_assert_eq!(region.start, 0); + (unsafe { arc_unwrap(region.memory) }, region.end) + })) + } else { + None + } + } +} + +#[derive(Debug)] +struct FreeListRegion { + memory: Arc, + ptr: Option>, + chunk: u64, + start: u64, + end: u64, +} + +unsafe impl Sync for FreeListRegion where M: Sync {} +unsafe impl Send for FreeListRegion where M: Send {} + +impl FreeListRegion { + pub fn cmp(&self, block: &FreeListBlock) -> Ordering { + debug_assert_eq!( + Arc::ptr_eq(&self.memory, &block.memory), + self.chunk == block.chunk + ); + + if self.chunk == block.chunk { + debug_assert_eq!( + Ord::cmp(&self.start, &block.offset), + Ord::cmp(&self.end, &(block.offset + block.size)), + "Free region {{ start: {}, end: {} }} overlaps with block {{ offset: {}, size: {} }}", + self.start, + self.end, + block.offset, + block.size, + ); + } + + Ord::cmp(&self.chunk, &block.chunk).then(Ord::cmp(&self.start, &block.offset)) + } + + fn from_block(block: FreeListBlock) -> Self { + FreeListRegion { + memory: block.memory, + chunk: block.chunk, + ptr: block.ptr, + start: block.offset, + end: block.offset + block.size, + } + } + + fn into_block(self) -> FreeListBlock { + FreeListBlock { + memory: self.memory, + chunk: self.chunk, + ptr: self.ptr, + offset: self.start, + size: self.end - self.start, + } + } + + fn consecutive(&self, other: &Self) -> bool { + if self.chunk != other.chunk { + return false; + } + + debug_assert!(Arc::ptr_eq(&self.memory, &other.memory)); + + debug_assert_eq!( + Ord::cmp(&self.start, &other.start), + Ord::cmp(&self.end, &other.end) + ); + + self.end == other.start + } + + fn merge(&mut self, next: FreeListRegion) { + debug_assert!(self.consecutive(&next)); + self.end = next.end; + } + + fn is_prefix_block(&self, block: &FreeListBlock) -> bool { + if self.chunk != block.chunk { + return false; + } + + debug_assert!(Arc::ptr_eq(&self.memory, &block.memory)); + + debug_assert_eq!( + Ord::cmp(&self.start, &block.offset), + Ord::cmp(&self.end, &(block.offset + block.size)) + ); + + self.start == (block.offset + block.size) + } + + fn merge_prefix_block(&mut self, block: FreeListBlock) { + debug_assert!(self.is_prefix_block(&block)); + self.start = block.offset; + self.ptr = block.ptr; + } + + fn is_suffix_block(&self, block: &FreeListBlock) -> bool { + if self.chunk != block.chunk { + return false; + } + + debug_assert!(Arc::ptr_eq(&self.memory, &block.memory)); + + debug_assert_eq!( + Ord::cmp(&self.start, &block.offset), + Ord::cmp(&self.end, &(block.offset + block.size)) + ); + + self.end == block.offset + } + + fn merge_suffix_block(&mut self, block: FreeListBlock) { + debug_assert!(self.is_suffix_block(&block)); + self.end += block.size; + } +} + +#[derive(Debug)] +pub struct FreeListBlock { + pub memory: Arc, + pub ptr: Option>, + pub chunk: u64, + pub offset: u64, + pub size: u64, +} + +unsafe impl Sync for FreeListBlock where M: Sync {} +unsafe impl Send for FreeListBlock where M: Send {} + +#[derive(Debug)] +pub(crate) struct FreeListAllocator { + freelist: FreeList, + chunk_size: u64, + final_chunk_size: u64, + memory_type: u32, + props: MemoryPropertyFlags, + atom_mask: u64, + + total_allocations: u64, + total_deallocations: u64, +} + +impl Drop for FreeListAllocator { + fn drop(&mut self) { + match Ord::cmp(&self.total_allocations, &self.total_deallocations) { + Ordering::Equal => {} + Ordering::Greater => { + report_error_on_drop!("Not all blocks were deallocated") + } + Ordering::Less => { + report_error_on_drop!("More blocks deallocated than allocated") + } + } + + if !self.freelist.array.is_empty() { + report_error_on_drop!( + "FreeListAllocator has free blocks on drop. Allocator should be cleaned" + ); + } + } +} + +impl FreeListAllocator +where + M: MemoryBounds + 'static, +{ + pub fn new( + starting_chunk_size: u64, + final_chunk_size: u64, + memory_type: u32, + props: MemoryPropertyFlags, + atom_mask: u64, + ) -> Self { + debug_assert_eq!( + align_down(starting_chunk_size, atom_mask), + starting_chunk_size + ); + + let starting_chunk_size = min(starting_chunk_size, isize::max_value()); + + debug_assert_eq!(align_down(final_chunk_size, atom_mask), final_chunk_size); + let final_chunk_size = min(final_chunk_size, isize::max_value()); + + FreeListAllocator { + freelist: FreeList::new(), + chunk_size: starting_chunk_size, + final_chunk_size, + memory_type, + props, + atom_mask, + + total_allocations: 0, + total_deallocations: 0, + } + } + + #[cfg_attr(feature = "tracing", tracing::instrument(skip(self, device)))] + pub unsafe fn alloc( + &mut self, + device: &impl MemoryDevice, + size: u64, + align_mask: u64, + flags: AllocationFlags, + heap: &mut Heap, + allocations_remains: &mut u32, + ) -> Result, AllocationError> { + debug_assert!( + self.final_chunk_size >= size, + "GpuAllocator must not request allocations equal or greater to chunks size" + ); + + let size = align_up(size, self.atom_mask).expect( + "Any value not greater than final chunk size (which is aligned) has to fit for alignment", + ); + + let align_mask = align_mask | self.atom_mask; + let host_visible = self.host_visible(); + + if size <= self.chunk_size { + // Otherwise there can't be any sufficiently large free blocks + if let Some(block) = self.freelist.get_block(align_mask, size) { + self.total_allocations += 1; + return Ok(block); + } + } + + // New allocation is required. + if *allocations_remains == 0 { + return Err(AllocationError::TooManyObjects); + } + + if size > self.chunk_size { + let multiple = (size - 1) / self.chunk_size + 1; + let multiple = multiple.next_power_of_two(); + + self.chunk_size = (self.chunk_size * multiple).min(self.final_chunk_size); + } + + let mut memory = device.allocate_memory(self.chunk_size, self.memory_type, flags)?; + *allocations_remains -= 1; + heap.alloc(self.chunk_size); + + // Map host visible allocations + let ptr = if host_visible { + match device.map_memory(&mut memory, 0, self.chunk_size) { + Ok(ptr) => Some(ptr), + Err(DeviceMapError::MapFailed) => { + #[cfg(feature = "tracing")] + tracing::error!("Failed to map host-visible memory in linear allocator"); + device.deallocate_memory(memory); + *allocations_remains += 1; + heap.dealloc(self.chunk_size); + + return Err(AllocationError::OutOfHostMemory); + } + Err(DeviceMapError::OutOfDeviceMemory) => { + return Err(AllocationError::OutOfDeviceMemory); + } + Err(DeviceMapError::OutOfHostMemory) => { + return Err(AllocationError::OutOfHostMemory); + } + } + } else { + None + }; + + let memory = Arc::new(memory); + let block = + self.freelist + .get_block_from_new_memory(memory, self.chunk_size, ptr, align_mask, size); + + if self.chunk_size < self.final_chunk_size { + // Double next chunk size + // Limit to final value. + self.chunk_size = (self.chunk_size * 2).min(self.final_chunk_size); + } + + self.total_allocations += 1; + Ok(block) + } + + #[cfg_attr(feature = "tracing", tracing::instrument(skip(self, device)))] + pub unsafe fn dealloc( + &mut self, + device: &impl MemoryDevice, + block: FreeListBlock, + heap: &mut Heap, + allocations_remains: &mut u32, + ) { + debug_assert!(block.size < self.chunk_size); + debug_assert_ne!(block.size, 0); + self.freelist.insert_block(block); + self.total_deallocations += 1; + + if let Some(memory) = self.freelist.drain(true) { + memory.for_each(|(memory, size)| { + device.deallocate_memory(memory); + *allocations_remains += 1; + heap.dealloc(size); + }); + } + } + + /// Deallocates leftover memory objects. + /// Should be used before dropping. + /// + /// # Safety + /// + /// * `device` must be one with `DeviceProperties` that were provided to create this `GpuAllocator` instance + /// * Same `device` instance must be used for all interactions with one `GpuAllocator` instance + /// and memory blocks allocated from it + #[cfg_attr(feature = "tracing", tracing::instrument(skip(self, device)))] + pub unsafe fn cleanup( + &mut self, + device: &impl MemoryDevice, + heap: &mut Heap, + allocations_remains: &mut u32, + ) { + if let Some(memory) = self.freelist.drain(false) { + memory.for_each(|(memory, size)| { + device.deallocate_memory(memory); + *allocations_remains += 1; + heap.dealloc(size); + }); + } + + #[cfg(feature = "tracing")] + { + if self.total_allocations == self.total_deallocations && !self.freelist.array.is_empty() + { + tracing::error!( + "Some regions were not deallocated on cleanup, although all blocks are free. + This is a bug in `FreeBlockAllocator`. + See array of free blocks left: + {:#?}", + self.freelist.array, + ); + } + } + } + + fn host_visible(&self) -> bool { + self.props.contains(MemoryPropertyFlags::HOST_VISIBLE) + } +} + +fn min(l: L, r: R) -> L +where + R: core::convert::TryInto, + L: Ord, +{ + match r.try_into() { + Ok(r) => core::cmp::min(l, r), + Err(_) => l, + } +} diff --git a/third_party/rust/gpu-alloc/src/heap.rs b/third_party/rust/gpu-alloc/src/heap.rs index fca7538919fc..f049f14768a5 100644 --- a/third_party/rust/gpu-alloc/src/heap.rs +++ b/third_party/rust/gpu-alloc/src/heap.rs @@ -1,32 +1,32 @@ -#[derive(Debug)] -pub(crate) struct Heap { - size: u64, - used: u64, - allocated: u128, - deallocated: u128, -} - -impl Heap { - pub(crate) fn new(size: u64) -> Self { - Heap { - size, - used: 0, - allocated: 0, - deallocated: 0, - } - } - - pub(crate) fn size(&mut self) -> u64 { - self.size - } - - pub(crate) fn alloc(&mut self, size: u64) { - self.used += size; - self.allocated += u128::from(size); - } - - pub(crate) fn dealloc(&mut self, size: u64) { - self.used -= size; - self.deallocated += u128::from(size); - } -} +#[derive(Debug)] +pub(crate) struct Heap { + size: u64, + used: u64, + allocated: u128, + deallocated: u128, +} + +impl Heap { + pub(crate) fn new(size: u64) -> Self { + Heap { + size, + used: 0, + allocated: 0, + deallocated: 0, + } + } + + pub(crate) fn size(&mut self) -> u64 { + self.size + } + + pub(crate) fn alloc(&mut self, size: u64) { + self.used += size; + self.allocated += u128::from(size); + } + + pub(crate) fn dealloc(&mut self, size: u64) { + self.used -= size; + self.deallocated += u128::from(size); + } +} diff --git a/third_party/rust/gpu-alloc/src/lib.rs b/third_party/rust/gpu-alloc/src/lib.rs index 97d04fac7a7e..3533f45124f0 100644 --- a/third_party/rust/gpu-alloc/src/lib.rs +++ b/third_party/rust/gpu-alloc/src/lib.rs @@ -1,124 +1,124 @@ -//! -//! Implementation agnostic memory allocator for Vulkan like APIs. -//! -//! This crate is intended to be used as part of safe API implementations.\ -//! Use with caution. There are unsafe functions all over the place. -//! -//! # Usage -//! -//! Start with fetching `DeviceProperties` from `gpu-alloc-` crate for the backend of choice.\ -//! Then create `GpuAllocator` instance and use it for all device memory allocations.\ -//! `GpuAllocator` will take care for all necessary bookkeeping like memory object count limit, -//! heap budget and memory mapping. -//! -//! ### Backends implementations -//! -//! Backend supporting crates should not depend on this crate.\ -//! Instead they should depend on `gpu-alloc-types` which is much more stable, -//! allowing to upgrade `gpu-alloc` version without `gpu-alloc-` upgrade. -//! - -#![cfg_attr(not(feature = "std"), no_std)] - -extern crate alloc; - -#[cfg(feature = "tracing")] -macro_rules! report_error_on_drop { - ($($tokens:tt)*) => {{ - #[cfg(feature = "std")] - { - if std::thread::panicking() { - return; - } - } - - tracing::error!($($tokens)*) - }}; -} - -#[cfg(all(not(feature = "tracing"), feature = "std"))] -macro_rules! report_error_on_drop { - ($($tokens:tt)*) => {{ - if std::thread::panicking() { - return; - } - eprintln!($($tokens)*) - }}; -} - -#[cfg(all(not(feature = "tracing"), not(feature = "std")))] -macro_rules! report_error_on_drop { - ($($tokens:tt)*) => {{ - panic!($($tokens)*) - }}; -} - -mod allocator; -mod block; -mod buddy; -mod config; -mod error; -mod freelist; -mod heap; -mod slab; -mod usage; -mod util; - -pub use { - self::{allocator::*, block::MemoryBlock, config::*, error::*, usage::*}, - gpu_alloc_types::*, -}; - -/// Memory request for allocator. -#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)] -pub struct Request { - /// Minimal size of memory block required. - /// Returned block may have larger size, - /// use `MemoryBlock::size` to learn actual size of returned block. - pub size: u64, - - /// Minimal alignment mask required. - /// Returned block may have larger alignment, - /// use `MemoryBlock::align` to learn actual alignment of returned block. - pub align_mask: u64, - - /// Intended memory usage. - /// Returned block may support additional usages, - /// use `MemoryBlock::props` to learn memory properties of returned block. - pub usage: UsageFlags, - - /// Bitset for memory types. - /// Returned block will be from memory type corresponding to one of set bits, - /// use `MemoryBlock::memory_type` to learn memory type index of returned block. - pub memory_types: u32, -} - -/// Aligns `value` up to `align_mask` -/// Returns smallest integer not lesser than `value` aligned by `align_mask`. -/// Returns `None` on overflow. -pub(crate) fn align_up(value: u64, align_mask: u64) -> Option { - Some(value.checked_add(align_mask)? & !align_mask) -} - -/// Align `value` down to `align_mask` -/// Returns largest integer not bigger than `value` aligned by `align_mask`. -pub(crate) fn align_down(value: u64, align_mask: u64) -> u64 { - value & !align_mask -} - -#[cfg(debug_assertions)] -#[allow(unused_unsafe)] -unsafe fn unreachable_unchecked() -> ! { - unreachable!() -} - -#[cfg(not(debug_assertions))] -unsafe fn unreachable_unchecked() -> ! { - core::hint::unreachable_unchecked() -} - -// #[cfg(feature = "tracing")] -use core::fmt::Debug as MemoryBounds; - -// #[cfg(not(feature = "tracing"))] -// use core::any::Any as MemoryBounds; +//! +//! Implementation agnostic memory allocator for Vulkan like APIs. +//! +//! This crate is intended to be used as part of safe API implementations.\ +//! Use with caution. There are unsafe functions all over the place. +//! +//! # Usage +//! +//! Start with fetching `DeviceProperties` from `gpu-alloc-` crate for the backend of choice.\ +//! Then create `GpuAllocator` instance and use it for all device memory allocations.\ +//! `GpuAllocator` will take care for all necessary bookkeeping like memory object count limit, +//! heap budget and memory mapping. +//! +//! ### Backends implementations +//! +//! Backend supporting crates should not depend on this crate.\ +//! Instead they should depend on `gpu-alloc-types` which is much more stable, +//! allowing to upgrade `gpu-alloc` version without `gpu-alloc-` upgrade. +//! + +#![cfg_attr(not(feature = "std"), no_std)] + +extern crate alloc; + +#[cfg(feature = "tracing")] +macro_rules! report_error_on_drop { + ($($tokens:tt)*) => {{ + #[cfg(feature = "std")] + { + if std::thread::panicking() { + return; + } + } + + tracing::error!($($tokens)*) + }}; +} + +#[cfg(all(not(feature = "tracing"), feature = "std"))] +macro_rules! report_error_on_drop { + ($($tokens:tt)*) => {{ + if std::thread::panicking() { + return; + } + eprintln!($($tokens)*) + }}; +} + +#[cfg(all(not(feature = "tracing"), not(feature = "std")))] +macro_rules! report_error_on_drop { + ($($tokens:tt)*) => {{ + panic!($($tokens)*) + }}; +} + +mod allocator; +mod block; +mod buddy; +mod config; +mod error; +mod freelist; +mod heap; +mod slab; +mod usage; +mod util; + +pub use { + self::{allocator::*, block::MemoryBlock, config::*, error::*, usage::*}, + gpu_alloc_types::*, +}; + +/// Memory request for allocator. +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)] +pub struct Request { + /// Minimal size of memory block required. + /// Returned block may have larger size, + /// use `MemoryBlock::size` to learn actual size of returned block. + pub size: u64, + + /// Minimal alignment mask required. + /// Returned block may have larger alignment, + /// use `MemoryBlock::align` to learn actual alignment of returned block. + pub align_mask: u64, + + /// Intended memory usage. + /// Returned block may support additional usages, + /// use `MemoryBlock::props` to learn memory properties of returned block. + pub usage: UsageFlags, + + /// Bitset for memory types. + /// Returned block will be from memory type corresponding to one of set bits, + /// use `MemoryBlock::memory_type` to learn memory type index of returned block. + pub memory_types: u32, +} + +/// Aligns `value` up to `align_mask` +/// Returns smallest integer not lesser than `value` aligned by `align_mask`. +/// Returns `None` on overflow. +pub(crate) fn align_up(value: u64, align_mask: u64) -> Option { + Some(value.checked_add(align_mask)? & !align_mask) +} + +/// Align `value` down to `align_mask` +/// Returns largest integer not bigger than `value` aligned by `align_mask`. +pub(crate) fn align_down(value: u64, align_mask: u64) -> u64 { + value & !align_mask +} + +#[cfg(debug_assertions)] +#[allow(unused_unsafe)] +unsafe fn unreachable_unchecked() -> ! { + unreachable!() +} + +#[cfg(not(debug_assertions))] +unsafe fn unreachable_unchecked() -> ! { + core::hint::unreachable_unchecked() +} + +// #[cfg(feature = "tracing")] +use core::fmt::Debug as MemoryBounds; + +// #[cfg(not(feature = "tracing"))] +// use core::any::Any as MemoryBounds; diff --git a/third_party/rust/gpu-alloc/src/slab.rs b/third_party/rust/gpu-alloc/src/slab.rs index b65c73e656af..de413435fe44 100644 --- a/third_party/rust/gpu-alloc/src/slab.rs +++ b/third_party/rust/gpu-alloc/src/slab.rs @@ -1,97 +1,97 @@ -use {crate::unreachable_unchecked, alloc::vec::Vec, core::mem::replace}; - -#[derive(Debug)] -enum Entry { - Vacant(usize), - Occupied(T), -} -#[derive(Debug)] -pub(crate) struct Slab { - next_vacant: usize, - entries: Vec>, -} - -impl Slab { - pub fn new() -> Self { - Slab { - next_vacant: !0, - entries: Vec::new(), - } - } - - /// Inserts value into this linked vec and returns index - /// at which value can be accessed in constant time. - pub fn insert(&mut self, value: T) -> usize { - if self.next_vacant >= self.entries.len() { - self.entries.push(Entry::Occupied(value)); - self.entries.len() - 1 - } else { - match *unsafe { self.entries.get_unchecked(self.next_vacant) } { - Entry::Vacant(next_vacant) => { - unsafe { - *self.entries.get_unchecked_mut(self.next_vacant) = Entry::Occupied(value); - } - replace(&mut self.next_vacant, next_vacant) - } - _ => unsafe { unreachable_unchecked() }, - } - } - } - - pub fn len(&self) -> usize { - self.entries.len() - } - - pub unsafe fn get_unchecked(&self, index: usize) -> &T { - debug_assert!(index < self.len()); - - match self.entries.get_unchecked(index) { - Entry::Occupied(value) => value, - _ => unreachable_unchecked(), - } - } - - pub unsafe fn get_unchecked_mut(&mut self, index: usize) -> &mut T { - debug_assert!(index < self.len()); - - match self.entries.get_unchecked_mut(index) { - Entry::Occupied(value) => value, - _ => unreachable_unchecked(), - } - } - - pub fn get(&self, index: usize) -> &T { - match self.entries.get(index) { - Some(Entry::Occupied(value)) => value, - _ => panic!("Invalid index"), - } - } - - pub fn get_mut(&mut self, index: usize) -> &mut T { - match self.entries.get_mut(index) { - Some(Entry::Occupied(value)) => value, - _ => panic!("Invalid index"), - } - } - - pub unsafe fn remove_unchecked(&mut self, index: usize) -> T { - let entry = replace( - self.entries.get_unchecked_mut(index), - Entry::Vacant(self.next_vacant), - ); - - self.next_vacant = index; - - match entry { - Entry::Occupied(value) => value, - _ => unreachable_unchecked(), - } - } - - pub fn remove(&mut self, index: usize) -> T { - match self.entries.get_mut(index) { - Some(Entry::Occupied(_)) => unsafe { self.remove_unchecked(index) }, - _ => panic!("Invalid index"), - } - } -} +use {crate::unreachable_unchecked, alloc::vec::Vec, core::mem::replace}; + +#[derive(Debug)] +enum Entry { + Vacant(usize), + Occupied(T), +} +#[derive(Debug)] +pub(crate) struct Slab { + next_vacant: usize, + entries: Vec>, +} + +impl Slab { + pub fn new() -> Self { + Slab { + next_vacant: !0, + entries: Vec::new(), + } + } + + /// Inserts value into this linked vec and returns index + /// at which value can be accessed in constant time. + pub fn insert(&mut self, value: T) -> usize { + if self.next_vacant >= self.entries.len() { + self.entries.push(Entry::Occupied(value)); + self.entries.len() - 1 + } else { + match *unsafe { self.entries.get_unchecked(self.next_vacant) } { + Entry::Vacant(next_vacant) => { + unsafe { + *self.entries.get_unchecked_mut(self.next_vacant) = Entry::Occupied(value); + } + replace(&mut self.next_vacant, next_vacant) + } + _ => unsafe { unreachable_unchecked() }, + } + } + } + + pub fn len(&self) -> usize { + self.entries.len() + } + + pub unsafe fn get_unchecked(&self, index: usize) -> &T { + debug_assert!(index < self.len()); + + match self.entries.get_unchecked(index) { + Entry::Occupied(value) => value, + _ => unreachable_unchecked(), + } + } + + pub unsafe fn get_unchecked_mut(&mut self, index: usize) -> &mut T { + debug_assert!(index < self.len()); + + match self.entries.get_unchecked_mut(index) { + Entry::Occupied(value) => value, + _ => unreachable_unchecked(), + } + } + + pub fn get(&self, index: usize) -> &T { + match self.entries.get(index) { + Some(Entry::Occupied(value)) => value, + _ => panic!("Invalid index"), + } + } + + pub fn get_mut(&mut self, index: usize) -> &mut T { + match self.entries.get_mut(index) { + Some(Entry::Occupied(value)) => value, + _ => panic!("Invalid index"), + } + } + + pub unsafe fn remove_unchecked(&mut self, index: usize) -> T { + let entry = replace( + self.entries.get_unchecked_mut(index), + Entry::Vacant(self.next_vacant), + ); + + self.next_vacant = index; + + match entry { + Entry::Occupied(value) => value, + _ => unreachable_unchecked(), + } + } + + pub fn remove(&mut self, index: usize) -> T { + match self.entries.get_mut(index) { + Some(Entry::Occupied(_)) => unsafe { self.remove_unchecked(index) }, + _ => panic!("Invalid index"), + } + } +} diff --git a/third_party/rust/gpu-alloc/src/usage.rs b/third_party/rust/gpu-alloc/src/usage.rs index 9834ef41c2c7..683c79d2faf7 100644 --- a/third_party/rust/gpu-alloc/src/usage.rs +++ b/third_party/rust/gpu-alloc/src/usage.rs @@ -1,167 +1,176 @@ -use { - core::fmt::{self, Debug}, - gpu_alloc_types::{MemoryPropertyFlags, MemoryType}, -}; - -bitflags::bitflags! { - /// Memory usage type. - /// Bits set define intended usage for requested memory. - #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] - pub struct UsageFlags: u8 { - /// Hints for allocator to find memory with faster device access. - /// If no flags is specified than `FAST_DEVICE_ACCESS` is implied. - const FAST_DEVICE_ACCESS = 0x01; - - /// Memory will be accessed from host. - /// This flags guarantees that host memory operations will be available. - /// Otherwise implementation is encouraged to use non-host-accessible memory. - const HOST_ACCESS = 0x02; - - /// Hints allocator that memory will be used for data downloading. - /// Allocator will strongly prefer host-cached memory. - /// Implies `HOST_ACCESS` flag. - const DOWNLOAD = 0x04; - - /// Hints allocator that memory will be used for data uploading. - /// If `DOWNLOAD` flag is not set then allocator will assume that - /// host will access memory in write-only manner and may - /// pick not host-cached. - /// Implies `HOST_ACCESS` flag. - const UPLOAD = 0x08; - - /// Hints allocator that memory will be used for short duration - /// allowing to use faster algorithm with less memory overhead. - /// If use holds returned memory block for too long then - /// effective memory overhead increases instead. - /// Best use case is for staging buffer for single batch of operations. - const TRANSIENT = 0x10; - - /// Requests memory that can be addressed with `u64`. - /// Allows fetching device address for resources bound to that memory. - const DEVICE_ADDRESS = 0x20; - } -} - -#[derive(Clone, Copy, Debug)] -struct MemoryForOneUsage { - mask: u32, - types: [u32; 32], - types_count: u32, -} - -pub(crate) struct MemoryForUsage { - usages: [MemoryForOneUsage; 64], -} - -impl Debug for MemoryForUsage { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt.debug_struct("MemoryForUsage") - .field("usages", &&self.usages[..]) - .finish() - } -} - -impl MemoryForUsage { - pub fn new(memory_types: &[MemoryType]) -> Self { - assert!( - memory_types.len() <= 32, - "Only up to 32 memory types supported" - ); - - let mut mfu = MemoryForUsage { - usages: [MemoryForOneUsage { - mask: 0, - types: [0; 32], - types_count: 0, - }; 64], - }; - - for usage in 0..64 { - mfu.usages[usage as usize] = - one_usage(UsageFlags::from_bits_truncate(usage), memory_types); - } - - mfu - } - - /// Returns mask with bits set for memory type indices that support the - /// usage. - pub fn mask(&self, usage: UsageFlags) -> u32 { - self.usages[usage.bits() as usize].mask - } - - /// Returns slice of memory type indices that support the usage. - /// Earlier memory type has priority over later. - pub fn types(&self, usage: UsageFlags) -> &[u32] { - let usage = &self.usages[usage.bits() as usize]; - &usage.types[..usage.types_count as usize] - } -} - -fn one_usage(usage: UsageFlags, memory_types: &[MemoryType]) -> MemoryForOneUsage { - let mut types = [0; 32]; - let mut types_count = 0; - - for (index, mt) in memory_types.iter().enumerate() { - if compatible(usage, mt.props) { - types[types_count as usize] = index as u32; - types_count += 1; - } - } - - types[..types_count as usize] - .sort_unstable_by_key(|&index| priority(usage, memory_types[index as usize].props)); - - let mask = types[..types_count as usize] - .iter() - .fold(0u32, |mask, index| mask | 1u32 << index); - - MemoryForOneUsage { - mask, - types, - types_count, - } -} - -fn compatible(usage: UsageFlags, flags: MemoryPropertyFlags) -> bool { - type Flags = MemoryPropertyFlags; - if flags.contains(Flags::LAZILY_ALLOCATED) || flags.contains(Flags::PROTECTED) { - // Unsupported - false - } else if usage.intersects(UsageFlags::HOST_ACCESS | UsageFlags::UPLOAD | UsageFlags::DOWNLOAD) - { - // Requires HOST_VISIBLE - flags.contains(Flags::HOST_VISIBLE) - } else { - true - } -} - -/// Returns priority of memory with specified flags for specified usage. -/// Lesser value returned = more prioritized. -fn priority(usage: UsageFlags, flags: MemoryPropertyFlags) -> u32 { - type Flags = MemoryPropertyFlags; - - // Highly prefer device local memory when `FAST_DEVICE_ACCESS` usage is specified - // or usage is empty. - let device_local: bool = flags.contains(Flags::DEVICE_LOCAL) - ^ (usage.is_empty() || usage.contains(UsageFlags::FAST_DEVICE_ACCESS)); - - assert!( - flags.contains(Flags::HOST_VISIBLE) - || !usage - .intersects(UsageFlags::HOST_ACCESS | UsageFlags::UPLOAD | UsageFlags::DOWNLOAD) - ); - - // Prefer cached memory for downloads. - // Or non-cached if downloads are not expected. - let cached: bool = flags.contains(Flags::HOST_CACHED) ^ usage.contains(UsageFlags::DOWNLOAD); - - // Prefer coherent for both uploads and downloads. - // Prefer non-coherent if neither flags is set. - let coherent: bool = flags.contains(Flags::HOST_COHERENT) - ^ (usage.intersects(UsageFlags::UPLOAD | UsageFlags::DOWNLOAD)); - - // Each boolean is false if flags are preferred. - device_local as u32 * 4 + cached as u32 * 2 + coherent as u32 -} +use { + core::fmt::{self, Debug}, + gpu_alloc_types::{MemoryPropertyFlags, MemoryType}, +}; + +bitflags::bitflags! { + /// Memory usage type. + /// Bits set define intended usage for requested memory. + #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] + #[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] + pub struct UsageFlags: u8 { + /// Hints for allocator to find memory with faster device access. + /// If no flags is specified than `FAST_DEVICE_ACCESS` is implied. + const FAST_DEVICE_ACCESS = 0x01; + + /// Memory will be accessed from host. + /// This flags guarantees that host memory operations will be available. + /// Otherwise implementation is encouraged to use non-host-accessible memory. + const HOST_ACCESS = 0x02; + + /// Hints allocator that memory will be used for data downloading. + /// Allocator will strongly prefer host-cached memory. + /// Implies `HOST_ACCESS` flag. + const DOWNLOAD = 0x04; + + /// Hints allocator that memory will be used for data uploading. + /// If `DOWNLOAD` flag is not set then allocator will assume that + /// host will access memory in write-only manner and may + /// pick not host-cached. + /// Implies `HOST_ACCESS` flag. + const UPLOAD = 0x08; + + /// Hints allocator that memory will be used for short duration + /// allowing to use faster algorithm with less memory overhead. + /// If use holds returned memory block for too long then + /// effective memory overhead increases instead. + /// Best use case is for staging buffer for single batch of operations. + const TRANSIENT = 0x10; + + /// Requests memory that can be addressed with `u64`. + /// Allows fetching device address for resources bound to that memory. + const DEVICE_ADDRESS = 0x20; + } +} + +#[derive(Clone, Copy, Debug)] +struct MemoryForOneUsage { + mask: u32, + types: [u32; 32], + types_count: u32, +} + +pub(crate) struct MemoryForUsage { + usages: [MemoryForOneUsage; 64], +} + +impl Debug for MemoryForUsage { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt.debug_struct("MemoryForUsage") + .field("usages", &&self.usages[..]) + .finish() + } +} + +impl MemoryForUsage { + pub fn new(memory_types: &[MemoryType]) -> Self { + assert!( + memory_types.len() <= 32, + "Only up to 32 memory types supported" + ); + + let mut mfu = MemoryForUsage { + usages: [MemoryForOneUsage { + mask: 0, + types: [0; 32], + types_count: 0, + }; 64], + }; + + for usage in 0..64 { + mfu.usages[usage as usize] = + one_usage(UsageFlags::from_bits_truncate(usage), memory_types); + } + + mfu + } + + /// Returns mask with bits set for memory type indices that support the + /// usage. + pub fn mask(&self, usage: UsageFlags) -> u32 { + self.usages[usage.bits() as usize].mask + } + + /// Returns slice of memory type indices that support the usage. + /// Earlier memory type has priority over later. + pub fn types(&self, usage: UsageFlags) -> &[u32] { + let usage = &self.usages[usage.bits() as usize]; + &usage.types[..usage.types_count as usize] + } +} + +fn one_usage(usage: UsageFlags, memory_types: &[MemoryType]) -> MemoryForOneUsage { + let mut types = [0; 32]; + let mut types_count = 0; + + for (index, mt) in memory_types.iter().enumerate() { + if compatible(usage, mt.props) { + types[types_count as usize] = index as u32; + types_count += 1; + } + } + + types[..types_count as usize] + .sort_unstable_by_key(|&index| reverse_priority(usage, memory_types[index as usize].props)); + + let mask = types[..types_count as usize] + .iter() + .fold(0u32, |mask, index| mask | 1u32 << index); + + MemoryForOneUsage { + mask, + types, + types_count, + } +} + +fn compatible(usage: UsageFlags, flags: MemoryPropertyFlags) -> bool { + type Flags = MemoryPropertyFlags; + if flags.contains(Flags::LAZILY_ALLOCATED) || flags.contains(Flags::PROTECTED) { + // Unsupported + false + } else if usage.intersects(UsageFlags::HOST_ACCESS | UsageFlags::UPLOAD | UsageFlags::DOWNLOAD) + { + // Requires HOST_VISIBLE + flags.contains(Flags::HOST_VISIBLE) + } else { + true + } +} + +/// Returns reversed priority of memory with specified flags for specified usage. +/// Lesser value returned = more prioritized. +fn reverse_priority(usage: UsageFlags, flags: MemoryPropertyFlags) -> u32 { + type Flags = MemoryPropertyFlags; + + // Highly prefer device local memory when `FAST_DEVICE_ACCESS` usage is specified + // or usage is empty. + let device_local: bool = flags.contains(Flags::DEVICE_LOCAL) + ^ (usage.is_empty() || usage.contains(UsageFlags::FAST_DEVICE_ACCESS)); + + assert!( + flags.contains(Flags::HOST_VISIBLE) + || !usage + .intersects(UsageFlags::HOST_ACCESS | UsageFlags::UPLOAD | UsageFlags::DOWNLOAD) + ); + + // Prefer non-host-visible memory when host access is not required. + let host_visible: bool = flags.contains(Flags::HOST_VISIBLE) + ^ usage.intersects(UsageFlags::HOST_ACCESS | UsageFlags::UPLOAD | UsageFlags::DOWNLOAD); + + // Prefer cached memory for downloads. + // Or non-cached if downloads are not expected. + let host_cached: bool = + flags.contains(Flags::HOST_CACHED) ^ usage.contains(UsageFlags::DOWNLOAD); + + // Prefer coherent for both uploads and downloads. + // Prefer non-coherent if neither flags is set. + let host_coherent: bool = flags.contains(Flags::HOST_COHERENT) + ^ (usage.intersects(UsageFlags::UPLOAD | UsageFlags::DOWNLOAD)); + + // Each boolean is false if flags are preferred. + device_local as u32 * 8 + + host_visible as u32 * 4 + + host_cached as u32 * 2 + + host_coherent as u32 +} diff --git a/third_party/rust/gpu-alloc/src/util.rs b/third_party/rust/gpu-alloc/src/util.rs index f02259d958d6..c22d30dbbd7c 100644 --- a/third_party/rust/gpu-alloc/src/util.rs +++ b/third_party/rust/gpu-alloc/src/util.rs @@ -1,44 +1,44 @@ -use alloc::sync::Arc; - -/// Guarantees uniqueness only if `Weak` pointers are never created -/// from this `Arc` or clones. -pub(crate) fn is_arc_unique(arc: &mut Arc) -> bool { - let strong_count = Arc::strong_count(&*arc); - debug_assert_ne!(strong_count, 0, "This Arc should exist"); - - debug_assert!( - strong_count > 1 || Arc::get_mut(arc).is_some(), - "`Weak` pointer exists" - ); - - strong_count == 1 -} - -/// Can be used instead of `Arc::try_unwrap(arc).unwrap()` -/// when it is guaranteed to succeed. -pub(crate) unsafe fn arc_unwrap(mut arc: Arc) -> M { - use core::{mem::ManuallyDrop, ptr::read}; - debug_assert!(is_arc_unique(&mut arc)); - - // Get raw pointer to inner value. - let raw = Arc::into_raw(arc); - - // As `Arc` is unique and no Weak pointers exist - // it won't be dereferenced elsewhere. - let inner = read(raw); - - // Cast to `ManuallyDrop` which guarantees to have same layout - // and will skip dropping. - drop(Arc::from_raw(raw as *const ManuallyDrop)); - inner -} - -/// Can be used instead of `Arc::try_unwrap` -/// only if `Weak` pointers are never created from this `Arc` or clones. -pub(crate) unsafe fn try_arc_unwrap(mut arc: Arc) -> Option { - if is_arc_unique(&mut arc) { - Some(arc_unwrap(arc)) - } else { - None - } -} +use alloc::sync::Arc; + +/// Guarantees uniqueness only if `Weak` pointers are never created +/// from this `Arc` or clones. +pub(crate) fn is_arc_unique(arc: &mut Arc) -> bool { + let strong_count = Arc::strong_count(&*arc); + debug_assert_ne!(strong_count, 0, "This Arc should exist"); + + debug_assert!( + strong_count > 1 || Arc::get_mut(arc).is_some(), + "`Weak` pointer exists" + ); + + strong_count == 1 +} + +/// Can be used instead of `Arc::try_unwrap(arc).unwrap()` +/// when it is guaranteed to succeed. +pub(crate) unsafe fn arc_unwrap(mut arc: Arc) -> M { + use core::{mem::ManuallyDrop, ptr::read}; + debug_assert!(is_arc_unique(&mut arc)); + + // Get raw pointer to inner value. + let raw = Arc::into_raw(arc); + + // As `Arc` is unique and no Weak pointers exist + // it won't be dereferenced elsewhere. + let inner = read(raw); + + // Cast to `ManuallyDrop` which guarantees to have same layout + // and will skip dropping. + drop(Arc::from_raw(raw as *const ManuallyDrop)); + inner +} + +/// Can be used instead of `Arc::try_unwrap` +/// only if `Weak` pointers are never created from this `Arc` or clones. +pub(crate) unsafe fn try_arc_unwrap(mut arc: Arc) -> Option { + if is_arc_unique(&mut arc) { + Some(arc_unwrap(arc)) + } else { + None + } +} diff --git a/third_party/rust/metal/.cargo-checksum.json b/third_party/rust/metal/.cargo-checksum.json index 9bc06c85d0c6..e2acac27fb5d 100644 --- a/third_party/rust/metal/.cargo-checksum.json +++ b/third_party/rust/metal/.cargo-checksum.json @@ -1 +1 @@ -{"files":{"Cargo.lock":"52f1dbe7e210e54f2e5a580f0119bf73357efd9ae2cfe13ecc5d97d10e383072","Cargo.toml":"90d4cc28d3a0aaae093de4a76b09c83cdac808f626e74dcd2b59a68785becff1","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"0621878e61f0d0fda054bcbe02df75192c28bde1ecc8289cbd86aeba2dd72720","Makefile":"6fddc61a94f5b31a65b11c1bef8b19c92bff738716998076c5d49c2834223c75","README.md":"6817e12da257b43352f71f595dcc713adf117c734ebf656e6af2d7d04be27cd6","bors.toml":"c2733ec512a08bf76b6a1ed77270810a0edeb888ba24c2a75679e1eb1bd563a5","examples/argument-buffer/main.rs":"a087db6648a4b092520de29616521c704d892e0d3ace935d16f3f339415c4361","examples/bind/main.rs":"a0c85aad05f08666f9b380a7146a8473a6a6fe0db5d523760373093a0af20e5f","examples/bindless/main.rs":"0aaad9e42634c1ea204342a76c1bfe80c8fcf9d3ebe8247846cf63ecb08fb0d5","examples/caps/main.rs":"b7be00c1cc9042140d34ea05051152a7035f316f0bdcd31ac5a660a97e0c4f70","examples/circle/README.md":"e1c97cf5252f0d1f2934ace78b5d839c5f45911f3007dbd2925eeceefb8f0af6","examples/circle/main.rs":"91d80c85a358400ceeddf1ab108a7e1052dd208c4ec72cde925d02284d3cf9d4","examples/circle/screenshot.png":"97bf07c85bf02367447b9c8a81707c124e4a3b420fa386b95ba08b21938f4f2a","examples/circle/shaders.metal":"5e4f40efca5bb386204a09e1b983cc6c634fdf1ca9dd4974227313adbf50e8b5","examples/circle/shaders.metallib":"666a9491d795ef9c0b9c122c7ada571cc2c0e8774d2d89e5b4b996f3dc47962b","examples/compute/compute-argument-buffer.metal":"6530bbd6a0101d9db2893805436f5dc877959e81ea97a27014c0fc52fc9fa77b","examples/compute/compute-argument-buffer.rs":"e3de61fd7cc2f14d9d52300e4878601dbc072bc26d9dafc66115df58f94e0470","examples/compute/embedded-lib.rs":"55f701810fa5270c27ca771e713f9f8cf09e124a997b0b03790b38435593a7ea","examples/compute/main.rs":"f16cbf57cd27dc948ff651251ce26e6bd616cb5d989b8dadb4256c73a9bfba4b","examples/compute/shaders.metal":"f2b15551bb5247b88a3029c3d8ef37c6fa04a4a6cca9f90f069894ed6822b4bf","examples/compute/shaders.metallib":"fef91643e60c0ec99ad2bd2f3916299bcc3e6a80038ea27bed59681badfea7d1","examples/events/main.rs":"9cb35381b0a3918bd7d530171de8f7cceafe3d4851c0f430b4aff1f5c2aae749","examples/fence/main.rs":"47741327e62db1d8bd344b6a9ec26ef13ffb0b56b0dd7077c5d926d43faaeff7","examples/headless-render/README.md":"b1c97b52701cfb41fc0b9e269ba7a7a454d9161746198e2f5789f2636f60842d","examples/headless-render/main.rs":"cf0180839e8d09d4bf403ae947365ac18fa17782172986311bfa04b84f88169e","examples/headless-render/screenshot.png":"01d6ea5791b63b0f01190198756446cf313fc25dc64d0138c1b4f62c9f862dd1","examples/library/main.rs":"a1420ec28a471f28a789b75b3ecf5abb699ed352b337747169914812fb98045a","examples/mps/main.rs":"51f34582bf118f171bbb81d22c11407c7a35f381dbbff2d75c6f8e90d22a2aa1","examples/mps/shaders.metal":"155922d6a4184078ae7ee29504a268e1218f07d908f921eef60e5bfa8a793bda","examples/mps/shaders.metallib":"b62451223549b1e7eb90ec3d3534c0ed4cdfdc581c7df3ffcdc4786a5fcacde4","examples/reflection/main.rs":"01b0cd95558b8c3e547b239a1b2ae7e882ab0f52346f96bcefd2e9616693a98c","examples/shader-dylib/main.rs":"d29ab2105131b8c56a6af6453f1d973e2bda5564f84b652c01e4a4e44b7a70f2","examples/shader-dylib/test_dylib.metal":"3469de785c2c0da784e84758fc0da5a81e474ca15588485d9e04398690641cc8","examples/shader-dylib/test_shader.metal":"1a04ff8ab3288b09d14cd35440b2557e92ddedbff9d07c4144a22e9062e6e1e4","examples/window/README.md":"69655cff298e07887fe70e8a13e27d8a87efcd0cc0da4e15485134e064e1aceb","examples/window/main.rs":"09c508013223de859f33fb69410bde30e8d7f04952850504d8b1f8faf7049b1b","examples/window/screenshot.png":"da7369d7cc297c7f7f6bd773c93e7d708d72442c9c5ff5a20c2f2ee6852552dc","examples/window/shaders.metal":"90dee6c752add5c617dfdca93064b2824b44ff8c01ef63986826f6a1531e95d6","examples/window/shaders.metallib":"16fa82beb70bf16c3501970cae0d5028a747a08164337161dc9c2e8965d4c366","src/argument.rs":"578ca587dfb034a7e8e4551fd93164a86595a4637e5656fc6cc429ae2de6cda2","src/buffer.rs":"80c55c8703340bf0d4d1b5eface518fdf82355ccb897883639cbf7e4933a4344","src/capturedescriptor.rs":"7d90b1e7b87fa9da1e38bba9637cd8d7a18a81f8c3f408149058ed4ea20a6894","src/capturemanager.rs":"bdee9e170da371778452513a9ac363a738ea8bfd3f0870d86c6013459c5af010","src/commandbuffer.rs":"8636043739fb9ea44f443ef315fadfa13675d04827e5fac42abcde0b2c612a0c","src/commandqueue.rs":"5b87621ae4495ae04a5de5ce980d0cde31b4bb0d1e31e932d34c49252240c9d9","src/constants.rs":"d9bea9cb01892f40fb635cef5ca247023c1998b47b33cc5ce39696fac634383b","src/depthstencil.rs":"5bfa4f49940fdc9d12b2af08f7fe710c41f0b7e26bdb6f8709fe17b9a9d7d5eb","src/device.rs":"1af6bc1c60981c60d102338bb9618c3a6bb215994501a64c31997dd1e7976377","src/drawable.rs":"aea0759dc9de002c660e938319dfdfd3e4e82add30ed626ea31e3270698f4b85","src/encoder.rs":"2d0f402f1b286459a7c3b0dbbc640a81a7e3f6698f7ca7a78f5bfa09e9b633e9","src/heap.rs":"fcbb7dfaafb1008fadd75fb2b394776c9843fe98a266e237d7a7b4d80e046b06","src/indirect_encoder.rs":"c61c610ac4607a6b76c551becb88e054c9754b5cc31b144e4b6f26202e6294f7","src/lib.rs":"a297e4d0f831d82bc6fe53ed0692b8d06ac43420e257b26f5ea69a03a459c505","src/library.rs":"985bb5833b55f054b932df425932574d6c65ee2ed11b5e1ccd5a2e279c52d54c","src/mps.rs":"b415be3221754d836fd535f4b5b45ed484d4cc926bd26145e82a9e61d337da4c","src/pipeline/compute.rs":"e4bed01671a466ed460fbc9059ac72dab7cb123a964de63058f0e5aafdc5e2a0","src/pipeline/mod.rs":"5ec3cb524cc03202a3394dad5a7d0b733474f664935353253e76a49aed5513ad","src/pipeline/render.rs":"6738931629f1944d69815622a9f3281e55df0e56dc87d3cdb885cc8c7e2c8820","src/renderpass.rs":"666ca2a35d2bb96564f105a87f03cfbf87d3fd10e6915e473e8ff4c0db49fdef","src/resource.rs":"9753cc15c5d0064e954717c61feb902c218141b1b0e77bce4f9d0653b6d4589b","src/sampler.rs":"adabef3520a18c829bfd9b705897873d9adc37ebe88cfa6864154483dd858b9d","src/sync.rs":"a2c081984630634134b17007a6ff7a016e2aa9070bc46754d3e82500fc25eb53","src/texture.rs":"773bb005cb17c17b4e0138e328fcbeef37d437032406521f874527782d842ef6","src/types.rs":"5a754c8036ff4ab1ec41d01af5fba64f3085f2a9dd1d15d00e907e40b85cf163","src/vertexdescriptor.rs":"5874f54bcc5613adff613ed3e2bb08870b1115ef6d369b21612ce848796b005e"},"package":"de11355d1f6781482d027a3b4d4de7825dcedb197bf573e0596d00008402d060"} \ No newline at end of file +{"files":{"Cargo.lock":"184f7d96e94c01f18ce7fafeaae5bad38d6c1084a0f11196710273dbab02d70a","Cargo.toml":"ff4ac6941b706e0baed1fb4ccc0e6993bd4a1d716a6a85d0997a1de01786870a","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"0621878e61f0d0fda054bcbe02df75192c28bde1ecc8289cbd86aeba2dd72720","Makefile":"99c4c592467981921d1257a4e9e841dce53624448738d0ed52a75431a406f7bc","README.md":"b61a350d23e7c88c34b1cd703083978e17c28a286e4231f1dc0a2383a1f85902","assets/metal.svg":"3a295bd130785bc2fea2f3a91a4f6c7b4b3c40682c3fcb7a75f9887b66b06264","bors.toml":"c2733ec512a08bf76b6a1ed77270810a0edeb888ba24c2a75679e1eb1bd563a5","examples/argument-buffer/main.rs":"a087db6648a4b092520de29616521c704d892e0d3ace935d16f3f339415c4361","examples/bind/main.rs":"a0c85aad05f08666f9b380a7146a8473a6a6fe0db5d523760373093a0af20e5f","examples/bindless/main.rs":"0aaad9e42634c1ea204342a76c1bfe80c8fcf9d3ebe8247846cf63ecb08fb0d5","examples/caps/main.rs":"b7be00c1cc9042140d34ea05051152a7035f316f0bdcd31ac5a660a97e0c4f70","examples/circle/README.md":"e1c97cf5252f0d1f2934ace78b5d839c5f45911f3007dbd2925eeceefb8f0af6","examples/circle/main.rs":"22bd52ccba85debc1ccb3f277750e4759283d3c9b135b9a0da496c747e4cf637","examples/circle/screenshot.png":"97bf07c85bf02367447b9c8a81707c124e4a3b420fa386b95ba08b21938f4f2a","examples/circle/shaders.metal":"5e4f40efca5bb386204a09e1b983cc6c634fdf1ca9dd4974227313adbf50e8b5","examples/circle/shaders.metallib":"666a9491d795ef9c0b9c122c7ada571cc2c0e8774d2d89e5b4b996f3dc47962b","examples/compute/compute-argument-buffer.metal":"6530bbd6a0101d9db2893805436f5dc877959e81ea97a27014c0fc52fc9fa77b","examples/compute/compute-argument-buffer.rs":"e3de61fd7cc2f14d9d52300e4878601dbc072bc26d9dafc66115df58f94e0470","examples/compute/embedded-lib.rs":"55f701810fa5270c27ca771e713f9f8cf09e124a997b0b03790b38435593a7ea","examples/compute/main.rs":"155bfcb6b6289642e4a97582ff8993742fac69dc443cfb23a0b1f170b1e44d4f","examples/compute/shaders.metal":"f2b15551bb5247b88a3029c3d8ef37c6fa04a4a6cca9f90f069894ed6822b4bf","examples/compute/shaders.metallib":"fef91643e60c0ec99ad2bd2f3916299bcc3e6a80038ea27bed59681badfea7d1","examples/events/main.rs":"9cb35381b0a3918bd7d530171de8f7cceafe3d4851c0f430b4aff1f5c2aae749","examples/fence/main.rs":"47741327e62db1d8bd344b6a9ec26ef13ffb0b56b0dd7077c5d926d43faaeff7","examples/headless-render/README.md":"b1c97b52701cfb41fc0b9e269ba7a7a454d9161746198e2f5789f2636f60842d","examples/headless-render/main.rs":"2e6eb5db66f28833d1b53df3f134b4b53907a00036eeb6cdd9e428478e164aae","examples/headless-render/screenshot.png":"01d6ea5791b63b0f01190198756446cf313fc25dc64d0138c1b4f62c9f862dd1","examples/library/main.rs":"a1420ec28a471f28a789b75b3ecf5abb699ed352b337747169914812fb98045a","examples/mesh-shader/main.rs":"49899300f80f2d1e64366176313f69b597b69f3728a52a6a6f56ff7cd54c3c23","examples/mesh-shader/shaders.metal":"6ba934c3edd3ba0b8f6c4ac37be0fd0fe35eeef004e371521b7bf5a2fae9a223","examples/mesh-shader/shaders.metallib":"0af3b7ab0cd6186a90163550b76fab5bd2ef6ba97e791354d4281ca92d4887ff","examples/mps/main.rs":"2cbb532635270bc55cdfa3ee231cc2ee20dd8b3a5e6bf76062cca89ef1e3498f","examples/mps/shaders.metal":"155922d6a4184078ae7ee29504a268e1218f07d908f921eef60e5bfa8a793bda","examples/mps/shaders.metallib":"b62451223549b1e7eb90ec3d3534c0ed4cdfdc581c7df3ffcdc4786a5fcacde4","examples/raytracing/README.md":"6f0d683efac74572099c317ce9f65c3e6ff3c5252c6870c0c38c67f08b37bb01","examples/raytracing/camera.rs":"bed7a1787e1a52060dd0d64ff630a6fb769f15098d0a9d3ea68d4b57aee78b53","examples/raytracing/geometry.rs":"d3db29b4ab2d3d39de74718e0a7133a4e576dc26dcc6b6728c047865fb78952a","examples/raytracing/main.rs":"c3571854cbaaaeea386d7eb8af1fe9ea0492eae9af62b60203f4b6937dc4999a","examples/raytracing/renderer.rs":"d4e704b8f8e61919882aafc009b3a20928902d5b7edb9122d05f3e468d32f613","examples/raytracing/scene.rs":"fc2f214e0ad90e916fdbe2a339567a5dd323ef45b916fa8432c1156b4e94b998","examples/raytracing/screenshot.png":"400bb138f5adb69e4db8626681fb17667e5e112c94864879d9282d5348d970db","examples/raytracing/shaders.metal":"696f6a0ba79d82e2fa0e03eadbff2f6cdeac87acc805c2b7df657b85c1173174","examples/raytracing/shaders.metallib":"249b71998f58ddf8b3de37d79e9cc1f4a3494fba4bd7ba3f5411fb603de9dd5a","examples/reflection/main.rs":"563b49f5c057f1f8d17f3480cbc466e073ea575bfafaf84018a331a984d90a62","examples/shader-dylib/main.rs":"f18f4eb01420e12c196848c42657d41390cf10a3e47e8870025c20a1b29ddc71","examples/shader-dylib/test_dylib.metal":"3469de785c2c0da784e84758fc0da5a81e474ca15588485d9e04398690641cc8","examples/shader-dylib/test_shader.metal":"1a04ff8ab3288b09d14cd35440b2557e92ddedbff9d07c4144a22e9062e6e1e4","examples/window/README.md":"69655cff298e07887fe70e8a13e27d8a87efcd0cc0da4e15485134e064e1aceb","examples/window/main.rs":"09c508013223de859f33fb69410bde30e8d7f04952850504d8b1f8faf7049b1b","examples/window/screenshot.png":"da7369d7cc297c7f7f6bd773c93e7d708d72442c9c5ff5a20c2f2ee6852552dc","examples/window/shaders.metal":"90dee6c752add5c617dfdca93064b2824b44ff8c01ef63986826f6a1531e95d6","examples/window/shaders.metallib":"16fa82beb70bf16c3501970cae0d5028a747a08164337161dc9c2e8965d4c366","src/accelerator_structure.rs":"adb3c65111da0365825b6f7dc686e0c5aad6e3b1c1b5834c480ad4de886475bf","src/argument.rs":"3fa716bdb17bba6fd29289576a13a65689e1f63995eabb26e6c8d00f179ef7d4","src/buffer.rs":"78d9021ab75ef0dad09ff92d126f1ceea241cca606cd7b05553c9351458babed","src/capturedescriptor.rs":"c687c4db298fb83ef640eb34929758c2d7955174a68725e986838e367291e302","src/capturemanager.rs":"c8a42854eebcfb6a7b777d931e368e04a5e35dff2e46c38c481d266dba9b792d","src/commandbuffer.rs":"6c38e9738bb86a095eb4de0dc64ea94277360bf4a35b65ad15a8b69f2ae3a13c","src/commandqueue.rs":"a7d6dee5d064093521465e1da30bded851aa9d59f078942d88926d0f38de82fd","src/computepass.rs":"7c209dc60abc3cb45ed4b1648fd01faaeb561ef980d2b37c3d69a7e5fed9129d","src/constants.rs":"bbfeecf910d4f9ed149a7988d8a79df6e9ad479de81f7fc1294d3434a721b7fd","src/counters.rs":"d36d88f95015eef3655fc114eba1ef680d6a5bf74849389da2d09178244a01c4","src/depthstencil.rs":"d6687878c6e89e1c01b605c3c4e62e32a7d9bb48a8ebd31bf4dcf895e6838588","src/device.rs":"0e8b45d2f2a355485f1e6edafc28a92352f38f0f23aa25d3a0bc30a6e425cb3a","src/drawable.rs":"2006151031fced10a8b347d9cf465ef15f62551f134d3ff1715e3f3831155f23","src/encoder.rs":"aabf819459aa44bba16addfefde979d35c8f34f33e856714a244addc5e28dda7","src/heap.rs":"843536c73cb19643bf7d286433daebf703c28238775d5e006865247c50dabfbf","src/indirect_encoder.rs":"72897df3dd38efd58d8f34f78c3a50012177f725ade9e154071d238206ae7415","src/lib.rs":"56827ece927e6bc72feeb669f030224954215d85c3abba2a9b46f55b95e67d5f","src/library.rs":"e13dae9c74ca3709d1f4e4b5a93c2d2df6a00f1af9c6a65bebd646e426989c97","src/mps.rs":"2db71223faee22ae285ccc6913e79e7a212475ac3e1cc99aff5b335db2499b25","src/pipeline/compute.rs":"6391a19ca5bc5c2d43891a24456087e3945b093d74583cb3ea0adae15ffad98b","src/pipeline/mod.rs":"280916e71b6f3c5fffda6ffee31c616bf4aba6e5a186401ea2febe71c3264145","src/pipeline/render.rs":"bfbbee1d444342e043023dfa8a3f1fde753ed239c93ad026cb9b22a52235eca5","src/renderpass.rs":"01d4bd8001b6b224f8d7ac5fe5cde45bc34712308e2c1ef05c2e1a4b50397c41","src/resource.rs":"319b3f4645f418e2364eb135af992124994ea8c28744ac6f1c589ad1eeca2aad","src/sampler.rs":"780b75b22ab1f6a6675d6e1bd04bd7457660c3325519bffb13a08b1abc64a79c","src/sync.rs":"2cb568bbb81e5f4d2f7ca3d31d6ab8ab493439065d180de4e97ad166f166f7f8","src/texture.rs":"dc26effafd6e46b97a8f6b9ce69870d1de3fd05f290e17808c858c74ab2b0a11","src/types.rs":"d255f9c1b449acdb971616255e1c98d35b3b1ac54d9c388f7cdff6cfc3a8b944","src/vertexdescriptor.rs":"6a1378f270f7adf631319bcc8c8d6831c9f9be55e7b39a7ccfe151af9a9363c4"},"package":"550b24b0cd4cf923f36bae78eca457b3a10d8a6a14a9c84cb2687b527e6a84af"} \ No newline at end of file diff --git a/third_party/rust/metal/Cargo.lock b/third_party/rust/metal/Cargo.lock index 829c3a2b728a..171b3c7b968e 100644 --- a/third_party/rust/metal/Cargo.lock +++ b/third_party/rust/metal/Cargo.lock @@ -3,29 +3,22 @@ version = 3 [[package]] -name = "ab_glyph_rasterizer" -version = "0.1.5" +name = "adler" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a13739d7177fbd22bb0ed28badfff9f372f8bef46c863db4e1c6248f6b223b6e" +checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" [[package]] -name = "adler32" -version = "1.2.0" +name = "arrayref" +version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aae1277d39aeec15cb388266ecc24b11c80469deae6067e17a1a7aa9e5c1f234" +checksum = "6b4930d2cb77ce62f89ee5d5289b4ac049559b1c45539271f5ed4fdc7db34545" [[package]] -name = "andrew" -version = "0.3.1" +name = "arrayvec" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c4afb09dd642feec8408e33f92f3ffc4052946f6b20f32fb99c1f58cd4fa7cf" -dependencies = [ - "bitflags", - "rusttype", - "walkdir", - "xdg", - "xml-rs", -] +checksum = "23b62fc65de8e4e7f52534fb52b0f3ed04746ae267519eef2a83941e8085068b" [[package]] name = "autocfg" @@ -46,19 +39,28 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0d8c1fef690941d3e7788d328517591fecc684c084084702d6ff1641e993699a" [[package]] -name = "byteorder" -version = "1.4.3" +name = "bumpalo" +version = "3.12.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610" +checksum = "3c6ed94e98ecff0c12dd1b04c15ec0d7d9458ca8fe806cea6f12954efe74c63b" + +[[package]] +name = "bytemuck" +version = "1.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "17febce684fd15d89027105661fec94afb475cb995fbc59d2865198446ba2eea" [[package]] name = "calloop" -version = "0.6.5" +version = "0.10.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b036167e76041694579972c28cf4877b4f92da222560ddb49008937b6a6727c" +checksum = "1a59225be45a478d772ce015d9743e49e92798ece9e34eda9a6aa2a6a7f40192" dependencies = [ "log", - "nix 0.18.0", + "nix 0.25.1", + "slotmap", + "thiserror", + "vec_map", ] [[package]] @@ -67,18 +69,21 @@ version = "1.0.73" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2fff2a6927b3bb87f9595d67196a70493f627687a71d87a0d692242c33f58c11" -[[package]] -name = "cfg-if" -version = "0.1.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822" - [[package]] name = "cfg-if" version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" +[[package]] +name = "cmake" +version = "0.1.50" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a31c789563b815f77f4250caee12365734369f942439b7defd71e18a48197130" +dependencies = [ + "cc", +] + [[package]] name = "cocoa" version = "0.24.0" @@ -88,10 +93,10 @@ dependencies = [ "bitflags", "block", "cocoa-foundation", - "core-foundation 0.9.3", - "core-graphics 0.22.3", - "foreign-types", - "libc 0.2.126", + "core-foundation", + "core-graphics", + "foreign-types 0.3.2", + "libc 0.2.144", "objc", ] @@ -103,57 +108,29 @@ checksum = "7ade49b65d560ca58c403a479bb396592b155c0185eada742ee323d1d68d6318" dependencies = [ "bitflags", "block", - "core-foundation 0.9.3", + "core-foundation", "core-graphics-types", - "foreign-types", - "libc 0.2.126", + "foreign-types 0.3.2", + "libc 0.2.144", "objc", ] -[[package]] -name = "core-foundation" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57d24c7a13c43e870e37c1556b74555437870a04514f7685f5b354e090567171" -dependencies = [ - "core-foundation-sys 0.7.0", - "libc 0.2.126", -] - [[package]] name = "core-foundation" version = "0.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "194a7a9e6de53fa55116934067c844d9d749312f75c6f6d0980e8c252f8c2146" dependencies = [ - "core-foundation-sys 0.8.3", - "libc 0.2.126", + "core-foundation-sys", + "libc 0.2.144", ] -[[package]] -name = "core-foundation-sys" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b3a71ab494c0b5b860bdc8407ae08978052417070c2ced38573a9157ad75b8ac" - [[package]] name = "core-foundation-sys" version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5827cebf4670468b8772dd191856768aedcb1b0278a04f989f7766351917b9dc" -[[package]] -name = "core-graphics" -version = "0.19.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b3889374e6ea6ab25dba90bb5d96202f61108058361f6dc72e8b03e6f8bbe923" -dependencies = [ - "bitflags", - "core-foundation 0.7.0", - "foreign-types", - "libc 0.2.126", -] - [[package]] name = "core-graphics" version = "0.22.3" @@ -161,10 +138,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2581bbab3b8ffc6fcbd550bf46c355135d16e9ff2a6ea032ad6b9bf1d7efe4fb" dependencies = [ "bitflags", - "core-foundation 0.9.3", + "core-foundation", "core-graphics-types", - "foreign-types", - "libc 0.2.126", + "foreign-types 0.3.2", + "libc 0.2.144", ] [[package]] @@ -174,22 +151,21 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3a68b68b3446082644c91ac778bf50cd4104bfb002b5a6a7c44cca5a2c70788b" dependencies = [ "bitflags", - "core-foundation 0.9.3", - "foreign-types", - "libc 0.2.126", + "core-foundation", + "foreign-types 0.3.2", + "libc 0.2.144", ] [[package]] -name = "core-video-sys" -version = "0.1.4" +name = "core-text" +version = "19.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34ecad23610ad9757664d644e369246edde1803fcb43ed72876565098a5d3828" +checksum = "99d74ada66e07c1cefa18f8abfba765b486f250de2e4a999e5727fc0dd4b4a25" dependencies = [ - "cfg-if 0.1.10", - "core-foundation-sys 0.7.0", - "core-graphics 0.19.2", - "libc 0.2.126", - "objc", + "core-foundation", + "core-graphics", + "foreign-types 0.3.2", + "libc 0.2.144", ] [[package]] @@ -198,7 +174,30 @@ version = "1.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b540bd8bc810d3885c6ea91e2018302f68baba2129ab3e88f32389ee9370880d" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", +] + +[[package]] +name = "crossfont" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "21fd3add36ea31aba1520aa5288714dd63be506106753226d0eb387a93bc9c45" +dependencies = [ + "cocoa", + "core-foundation", + "core-foundation-sys", + "core-graphics", + "core-text", + "dwrote", + "foreign-types 0.5.0", + "freetype-rs", + "libc 0.2.144", + "log", + "objc", + "once_cell", + "pkg-config", + "servo-fontconfig", + "winapi", ] [[package]] @@ -209,9 +208,9 @@ checksum = "b365fabc795046672053e29c954733ec3b05e4be654ab130fe8f1f94d7051f35" [[package]] name = "darling" -version = "0.10.2" +version = "0.13.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0d706e75d87e35569db781a9b5e2416cff1236a47ed380831f959382ccd5f858" +checksum = "a01d95850c592940db9b8194bc39f4bc0e89dee5c4265e4b1807c34a9aba453c" dependencies = [ "darling_core", "darling_macro", @@ -219,68 +218,27 @@ dependencies = [ [[package]] name = "darling_core" -version = "0.10.2" +version = "0.13.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0c960ae2da4de88a91b2d920c2a7233b400bc33cb28453a2987822d8392519b" +checksum = "859d65a907b6852c9361e3185c862aae7fafd2887876799fa55f5f99dc40d610" dependencies = [ "fnv", "ident_case", "proc-macro2", "quote", "strsim", - "syn", + "syn 1.0.98", ] [[package]] name = "darling_macro" -version = "0.10.2" +version = "0.13.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9b5a2f4ac4969822c62224815d069952656cadc7084fdca9751e6d959189b72" +checksum = "9c972679f83bdf9c42bd905396b6c3588a843a17f0f16dfcfa3e2c5d57441835" dependencies = [ "darling_core", "quote", - "syn", -] - -[[package]] -name = "deflate" -version = "0.8.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "73770f8e1fe7d64df17ca66ad28994a0a623ea497fa69486e14984e715c5d174" -dependencies = [ - "adler32", - "byteorder", -] - -[[package]] -name = "derivative" -version = "2.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fcc3dd5e9e9c0b295d6e1e4d811fb6f157d5ffd784b8d202fc62eac8035a770b" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "dirs" -version = "4.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca3aa72a6f96ea37bbc5aa912f6788242832f75369bdfdadcb0e38423f100059" -dependencies = [ - "dirs-sys", -] - -[[package]] -name = "dirs-sys" -version = "0.3.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b1d1d91c932ef41c0f2663aa8b0ca0342d444d842c06914aa0a7e352d0bada6" -dependencies = [ - "libc 0.2.126", - "redox_users", - "winapi 0.3.9", + "syn 1.0.98", ] [[package]] @@ -289,22 +247,13 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bd0c93bb4b0c6d9b77f4435b0ae98c24d17f1c45b2ff844c6151a07256ca923b" -[[package]] -name = "dlib" -version = "0.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b11f15d1e3268f140f68d390637d5e76d849782d971ae7063e0da69fe9709a76" -dependencies = [ - "libloading 0.6.7", -] - [[package]] name = "dlib" version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ac1b7517328c04c2aa68422fc60a41b92208182142ed04a25879c26c8f878794" dependencies = [ - "libloading 0.7.3", + "libloading", ] [[package]] @@ -313,6 +262,49 @@ version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9ea835d29036a4087793836fa931b08837ad5e957da9e23886b29586fb9b6650" +[[package]] +name = "dwrote" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "439a1c2ba5611ad3ed731280541d36d2e9c4ac5e7fb818a27b604bdc5a6aa65b" +dependencies = [ + "lazy_static", + "libc 0.2.144", + "serde", + "serde_derive", + "winapi", + "wio", +] + +[[package]] +name = "expat-sys" +version = "2.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "658f19728920138342f68408b7cf7644d90d4784353d8ebc32e7e8663dbe45fa" +dependencies = [ + "cmake", + "pkg-config", +] + +[[package]] +name = "fdeflate" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d329bdeac514ee06249dabc27877490f17f5d371ec693360768b838e19f3ae10" +dependencies = [ + "simd-adler32", +] + +[[package]] +name = "flate2" +version = "1.0.26" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3b9429470923de8e8cbd4d2dc513535400b4b3fef0319fb5c4e1f520a7bef743" +dependencies = [ + "crc32fast", + "miniz_oxide", +] + [[package]] name = "fnv" version = "1.0.7" @@ -325,7 +317,28 @@ version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f6f339eb8adc052cd2ca78910fda869aefa38d22d5cb648e6485e4d3fc06f3b1" dependencies = [ - "foreign-types-shared", + "foreign-types-shared 0.1.1", +] + +[[package]] +name = "foreign-types" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d737d9aa519fb7b749cbc3b962edcf310a8dd1f4b67c91c4f83975dbdd17d965" +dependencies = [ + "foreign-types-macros", + "foreign-types-shared 0.3.1", +] + +[[package]] +name = "foreign-types-macros" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1a5c6c585bc94aaf2c7b51dd4c2ba22680844aba4c687be581871a6f518c5742" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.16", ] [[package]] @@ -334,61 +347,89 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" +[[package]] +name = "foreign-types-shared" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "aa9a19cbb55df58761df49b23516a86d432839add4af60fc256da840f66ed35b" + +[[package]] +name = "freetype-rs" +version = "0.26.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "74eadec9d0a5c28c54bb9882e54787275152a4e36ce206b45d7451384e5bf5fb" +dependencies = [ + "bitflags", + "freetype-sys", + "libc 0.2.144", +] + +[[package]] +name = "freetype-sys" +version = "0.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a37d4011c0cc628dfa766fcc195454f4b068d7afdc2adfd28861191d866e731a" +dependencies = [ + "cmake", + "libc 0.2.144", + "pkg-config", +] + [[package]] name = "fuchsia-cprng" version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a06f77d526c1a601b7c4cdd98f54b5eaabffc14d5f2f0296febdc7f357c6d3ba" -[[package]] -name = "fuchsia-zircon" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2e9763c69ebaae630ba35f74888db465e49e259ba1bc0eda7d06f4a067615d82" -dependencies = [ - "bitflags", - "fuchsia-zircon-sys", -] - -[[package]] -name = "fuchsia-zircon-sys" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3dcaa9ae7725d12cdb85b3ad99a434db70b468c09ded17e012d86b5c1010f7a7" - [[package]] name = "getrandom" version = "0.2.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4eb1a864a501629691edf6c15a593b7a51eebaa1e8468e9ddc623de7c9b58ec6" dependencies = [ - "cfg-if 1.0.0", - "libc 0.2.126", + "cfg-if", + "libc 0.2.144", "wasi 0.11.0+wasi-snapshot-preview1", ] +[[package]] +name = "glam" +version = "0.22.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "12f597d56c1bd55a811a1be189459e8fad2bbc272616375602443bdfb37fa774" + +[[package]] +name = "hashbrown" +version = "0.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" + [[package]] name = "ident_case" version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b9e0384b61958566e926dc50660321d12159025e767c18e043daf26b70104c39" +[[package]] +name = "indexmap" +version = "1.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bd070e393353796e801d209ad339e89596eb4c8d430d18ede6a1cced8fafbd99" +dependencies = [ + "autocfg", + "hashbrown", +] + [[package]] name = "instant" version = "0.1.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7a5bbe824c507c5da5956355e86a746d82e0e1464f65d862cc5e71da70e94b2c" dependencies = [ - "cfg-if 1.0.0", -] - -[[package]] -name = "iovec" -version = "0.1.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b2b3ea6ff95e175473f8ffe6a7eb7c00d054240321b84c57051175fe3c1e075e" -dependencies = [ - "libc 0.2.126", + "cfg-if", + "js-sys", + "wasm-bindgen", + "web-sys", ] [[package]] @@ -398,13 +439,12 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8eaf4bc02d17cbdd7ff4c7438cafcdf7fb9a4613313ad11b4f8fefe7d3fa0130" [[package]] -name = "kernel32-sys" -version = "0.2.2" +name = "js-sys" +version = "0.3.63" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7507624b29483431c0ba2d82aece8ca6cdba9382bff4ddd0f7490560c056098d" +checksum = "2f37a4a5928311ac501dee68b3c7613a1037d0edb30c8e5427bd832d55d1b790" dependencies = [ - "winapi 0.2.8", - "winapi-build", + "wasm-bindgen", ] [[package]] @@ -413,12 +453,6 @@ version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" -[[package]] -name = "lazycell" -version = "1.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" - [[package]] name = "libc" version = "0.1.12" @@ -427,19 +461,9 @@ checksum = "e32a70cf75e5846d53a673923498228bbec6a8624708a9ea5645f075d6276122" [[package]] name = "libc" -version = "0.2.126" +version = "0.2.144" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "349d5a591cd28b49e1d1037471617a32ddcda5731b99419008085f72d5a53836" - -[[package]] -name = "libloading" -version = "0.6.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "351a32417a12d5f7e82c368a66781e307834dae04c6ce0cd4456d52989229883" -dependencies = [ - "cfg-if 1.0.0", - "winapi 0.3.9", -] +checksum = "2b00cc1c228a6782d0f076e7b232802e0c5689d41bb5df366f2a6b6621cfdfe1" [[package]] name = "libloading" @@ -447,8 +471,8 @@ version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "efbc0f03f9a775e9f6aed295c6a1ba2253c5757a9e03d55c6caa46a681abcddd" dependencies = [ - "cfg-if 1.0.0", - "winapi 0.3.9", + "cfg-if", + "winapi", ] [[package]] @@ -467,7 +491,7 @@ version = "0.4.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "abb12e687cfb44aa40f41fc3978ef76448f9b6038cad6aef4259d3c095a2382e" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", ] [[package]] @@ -476,7 +500,7 @@ version = "0.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "62bb907fe88d54d8d9ce32a3cceab4218ed2f6b7d35617cafe9adf84e43919cb" dependencies = [ - "libc 0.2.126", + "libc 0.1.12", ] [[package]] @@ -487,16 +511,25 @@ checksum = "2dffe52ecf27772e601905b7522cb4ef790d2cc203488bbd0e2fe85fcb74566d" [[package]] name = "memmap2" -version = "0.1.0" +version = "0.5.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9b70ca2a6103ac8b665dc150b142ef0e4e89df640c9e6cf295d189c3caebe5a" +checksum = "83faa42c0a078c393f6b29d5db232d8be22776a891f8f56e5284faee4a20b327" dependencies = [ - "libc 0.2.126", + "libc 0.2.144", +] + +[[package]] +name = "memoffset" +version = "0.6.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5aa361d4faea93603064a027415f07bd8e1d5c88c9fbf68bf56a285428fd79ce" +dependencies = [ + "autocfg", ] [[package]] name = "metal" -version = "0.24.0" +version = "0.25.0" dependencies = [ "bitflags", "block", @@ -504,10 +537,13 @@ dependencies = [ "core-graphics-types", "cty", "dispatch", - "foreign-types", + "foreign-types 0.5.0", + "glam", "log", "objc", + "paste", "png", + "rand 0.8.5", "sema", "winit", ] @@ -520,134 +556,107 @@ checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" [[package]] name = "miniz_oxide" -version = "0.3.7" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "791daaae1ed6889560f8c4359194f56648355540573244a5448a83ba1ecc7435" +checksum = "e7810e0be55b428ada41041c41f32c9f1a42817901b4ccf45fa3d4b6561e74c7" dependencies = [ - "adler32", + "adler", + "simd-adler32", ] [[package]] name = "mio" -version = "0.6.23" +version = "0.8.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4afd66f5b91bf2a3bc13fad0e21caedac168ca4c707504e75585648ae80e4cc4" +checksum = "5b9d9a46eff5b4ff64b45a9e316a6d1e0bc719ef429cbec4dc630684212bfdf9" dependencies = [ - "cfg-if 0.1.10", - "fuchsia-zircon", - "fuchsia-zircon-sys", - "iovec", - "kernel32-sys", - "libc 0.2.126", + "libc 0.2.144", "log", - "miow", - "net2", - "slab", - "winapi 0.2.8", -] - -[[package]] -name = "mio-extras" -version = "2.0.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "52403fe290012ce777c4626790c8951324a2b9e3316b3143779c72b029742f19" -dependencies = [ - "lazycell", - "log", - "mio", - "slab", -] - -[[package]] -name = "miow" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ebd808424166322d4a38da87083bfddd3ac4c131334ed55856112eb06d46944d" -dependencies = [ - "kernel32-sys", - "net2", - "winapi 0.2.8", - "ws2_32-sys", + "wasi 0.11.0+wasi-snapshot-preview1", + "windows-sys 0.45.0", ] [[package]] name = "ndk" -version = "0.2.1" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5eb167c1febed0a496639034d0c76b3b74263636045db5489eee52143c246e73" +checksum = "451422b7e4718271c8b5b3aadf5adedba43dc76312454b387e98fae0fc951aa0" dependencies = [ + "bitflags", "jni-sys", "ndk-sys", "num_enum", + "raw-window-handle 0.5.2", "thiserror", ] [[package]] -name = "ndk-glue" -version = "0.2.1" +name = "ndk-context" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bdf399b8b7a39c6fb153c4ec32c72fd5fe789df24a647f229c239aa7adb15241" +checksum = "27b02d87554356db9e9a873add8782d4ea6e3e58ea071a9adb9a2e8ddb884a8b" + +[[package]] +name = "ndk-glue" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0434fabdd2c15e0aab768ca31d5b7b333717f03cf02037d5a0a3ff3c278ed67f" dependencies = [ - "lazy_static", - "libc 0.2.126", + "libc 0.2.144", "log", "ndk", + "ndk-context", "ndk-macro", "ndk-sys", + "once_cell", + "parking_lot", ] [[package]] name = "ndk-macro" -version = "0.2.0" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05d1c6307dc424d0f65b9b06e94f88248e6305726b14729fd67a5e47b2dc481d" +checksum = "0df7ac00c4672f9d5aece54ee3347520b7e20f158656c7db2e6de01902eb7a6c" dependencies = [ "darling", "proc-macro-crate", "proc-macro2", "quote", - "syn", + "syn 1.0.98", ] [[package]] name = "ndk-sys" -version = "0.2.2" +version = "0.4.1+23.1.7779620" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e1bcdd74c20ad5d95aacd60ef9ba40fdf77f767051040541df557b7a9b2a2121" - -[[package]] -name = "net2" -version = "0.2.37" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "391630d12b68002ae1e25e8f974306474966550ad82dac6886fb8910c19568ae" +checksum = "3cf2aae958bd232cac5069850591667ad422d263686d75b52a065f9badeee5a3" dependencies = [ - "cfg-if 0.1.10", - "libc 0.2.126", - "winapi 0.3.9", + "jni-sys", ] [[package]] name = "nix" -version = "0.18.0" +version = "0.24.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "83450fe6a6142ddd95fb064b746083fc4ef1705fe81f64a64e1d4b39f54a1055" +checksum = "fa52e972a9a719cecb6864fb88568781eb706bac2cd1d4f04a648542dbf78069" dependencies = [ "bitflags", - "cc", - "cfg-if 0.1.10", - "libc 0.2.126", + "cfg-if", + "libc 0.2.144", + "memoffset", ] [[package]] name = "nix" -version = "0.20.0" +version = "0.25.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa9b4819da1bc61c0ea48b63b7bc8604064dd43013e7cc325df098d49cd7c18a" +checksum = "f346ff70e7dbfd675fe90590b92d59ef2de15a8779ae305ebcbfd3f0caf59be4" dependencies = [ + "autocfg", "bitflags", - "cc", - "cfg-if 1.0.0", - "libc 0.2.126", + "cfg-if", + "libc 0.2.144", + "memoffset", ] [[package]] @@ -662,24 +671,23 @@ dependencies = [ [[package]] name = "num_enum" -version = "0.4.3" +version = "0.5.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca565a7df06f3d4b485494f25ba05da1435950f4dc263440eda7a6fa9b8e36e4" +checksum = "1f646caf906c20226733ed5b1374287eb97e3c2a5c227ce668c1f2ce20ae57c9" dependencies = [ - "derivative", "num_enum_derive", ] [[package]] name = "num_enum_derive" -version = "0.4.3" +version = "0.5.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ffa5a33ddddfee04c0283a7653987d634e880347e96b5b2ed64de07efb59db9d" +checksum = "dcbff9bc912032c62bf65ef1d5aea88983b420f4f839db1e9b0c281a25c9c799" dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn", + "syn 1.0.98", ] [[package]] @@ -703,44 +711,39 @@ dependencies = [ [[package]] name = "once_cell" -version = "1.12.0" +version = "1.17.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7709cef83f0c1f58f666e746a08b21e0085f7440fa6a29cc194d68aac97a4225" - -[[package]] -name = "owned_ttf_parser" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f923fb806c46266c02ab4a5b239735c144bdeda724a50ed058e5226f594cde3" -dependencies = [ - "ttf-parser", -] +checksum = "b7e5500299e16ebb147ae15a00a942af264cf3688f47923b8fc2cd5858f23ad3" [[package]] name = "parking_lot" -version = "0.11.2" +version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d17b78036a60663b797adeaee46f5c9dfebb86948d1255007a1d6be0271ff99" +checksum = "3742b2c103b9f06bc9fff0a37ff4912935851bee6d36f3c02bcc755bcfec228f" dependencies = [ - "instant", "lock_api", "parking_lot_core", ] [[package]] name = "parking_lot_core" -version = "0.8.5" +version = "0.9.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d76e8e1493bcac0d2766c42737f34458f1c8c50c0d23bcb24ea953affb273216" +checksum = "9069cbb9f99e3a5083476ccb29ceb1de18b9118cafa53e90c9551235de2b9521" dependencies = [ - "cfg-if 1.0.0", - "instant", - "libc 0.2.126", + "cfg-if", + "libc 0.2.144", "redox_syscall", "smallvec", - "winapi 0.3.9", + "windows-sys 0.45.0", ] +[[package]] +name = "paste" +version = "1.0.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9f746c4065a8fa3fe23974dd82f15431cc8d40779821001404d10d2e79ca7d79" + [[package]] name = "percent-encoding" version = "2.1.0" @@ -755,39 +758,47 @@ checksum = "1df8c4ec4b0627e53bdf214615ad287367e482558cf84b109250b37464dc03ae" [[package]] name = "png" -version = "0.16.8" +version = "0.17.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c3287920cb847dee3de33d301c463fba14dda99db24214ddf93f83d3021f4c6" +checksum = "aaeebc51f9e7d2c150d3f3bfeb667f2aa985db5ef1e3d212847bdedb488beeaa" dependencies = [ "bitflags", "crc32fast", - "deflate", + "fdeflate", + "flate2", "miniz_oxide", ] [[package]] -name = "proc-macro-crate" -version = "0.1.5" +name = "ppv-lite86" +version = "0.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d6ea3c4595b96363c13943497db34af4460fb474a95c43f4446ad341b8c9785" +checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" + +[[package]] +name = "proc-macro-crate" +version = "1.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f4c021e1093a56626774e81216a4ce732a735e5bad4868a03f3ed65ca0c3919" dependencies = [ - "toml", + "once_cell", + "toml_edit", ] [[package]] name = "proc-macro2" -version = "1.0.40" +version = "1.0.58" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd96a1e8ed2596c337f8eae5f24924ec83f5ad5ab21ea8e455d3566c69fbcaf7" +checksum = "fa1fb82fc0c281dd9671101b66b771ebbe1eaf967b96ac8740dcba4b70005ca8" dependencies = [ "unicode-ident", ] [[package]] name = "quote" -version = "1.0.20" +version = "1.0.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3bcdf212e9776fbcb2d23ab029360416bb1706b1aea2d1a5ba002727cbcab804" +checksum = "8f4f29d145265ec1c483c7c654450edde0bfe043d3938d6972630663356d9500" dependencies = [ "proc-macro2", ] @@ -798,7 +809,7 @@ version = "0.3.23" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "64ac302d8f83c0c1974bf758f6b041c6c8ada916fbb44a609158ca8b064cc76c" dependencies = [ - "libc 0.2.126", + "libc 0.2.144", "rand 0.4.6", ] @@ -809,10 +820,31 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "552840b97013b1a26992c11eac34bdd778e464601a4c2054b5f0bff7c6761293" dependencies = [ "fuchsia-cprng", - "libc 0.2.126", + "libc 0.2.144", "rand_core 0.3.1", "rdrand", - "winapi 0.3.9", + "winapi", +] + +[[package]] +name = "rand" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" +dependencies = [ + "libc 0.2.144", + "rand_chacha", + "rand_core 0.6.4", +] + +[[package]] +name = "rand_chacha" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" +dependencies = [ + "ppv-lite86", + "rand_core 0.6.4", ] [[package]] @@ -831,13 +863,12 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9c33a3c44ca05fa6f1807d8e6743f3824e8509beca625669633be0acbdf509dc" [[package]] -name = "raw-window-handle" -version = "0.3.4" +name = "rand_core" +version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e28f55143d0548dad60bb4fbdc835a3d7ac6acc3324506450c5fdd6e42903a76" +checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" dependencies = [ - "libc 0.2.126", - "raw-window-handle 0.4.3", + "getrandom", ] [[package]] @@ -849,6 +880,12 @@ dependencies = [ "cty", ] +[[package]] +name = "raw-window-handle" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f2ff9a1f06a88b01621b7ae906ef0211290d1c8a168a15542486a8f61c0833b9" + [[package]] name = "rdrand" version = "0.4.0" @@ -868,33 +905,12 @@ dependencies = [ ] [[package]] -name = "redox_users" -version = "0.4.3" +name = "safe_arch" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b033d837a7cf162d7993aded9304e30a83213c648b6e389db233191f891e5c2b" +checksum = "c1ff3d6d9696af502cc3110dacce942840fb06ff4514cad92236ecc455f2ce05" dependencies = [ - "getrandom", - "redox_syscall", - "thiserror", -] - -[[package]] -name = "rusttype" -version = "0.9.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc7c727aded0be18c5b80c1640eae0ac8e396abf6fa8477d96cb37d18ee5ec59" -dependencies = [ - "ab_glyph_rasterizer", - "owned_ttf_parser", -] - -[[package]] -name = "same-file" -version = "1.0.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "93fc1dc3aaa9bfed95e02e6eadabb4baf7e3078b0bd1b4d7b6b0b68378900502" -dependencies = [ - "winapi-util", + "bytemuck", ] [[package]] @@ -909,6 +925,18 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" +[[package]] +name = "sctk-adwaita" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "61270629cc6b4d77ec1907db1033d5c2e1a404c412743621981a871dc9c12339" +dependencies = [ + "crossfont", + "log", + "smithay-client-toolkit", + "tiny-skia", +] + [[package]] name = "sema" version = "0.1.4" @@ -927,10 +955,51 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "61ea8d54c77f8315140a05f4c7237403bf38b72704d031543aa1d16abbf517d1" [[package]] -name = "slab" -version = "0.4.6" +name = "serde_derive" +version = "1.0.163" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eb703cfe953bccee95685111adeedb76fabe4e97549a58d16f03ea7b9367bb32" +checksum = "8c805777e3930c8883389c602315a24224bcc738b63905ef87cd1420353ea93e" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.16", +] + +[[package]] +name = "servo-fontconfig" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c7e3e22fe5fd73d04ebf0daa049d3efe3eae55369ce38ab16d07ddd9ac5c217c" +dependencies = [ + "libc 0.2.144", + "servo-fontconfig-sys", +] + +[[package]] +name = "servo-fontconfig-sys" +version = "5.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e36b879db9892dfa40f95da1c38a835d41634b825fbd8c4c418093d53c24b388" +dependencies = [ + "expat-sys", + "freetype-sys", + "pkg-config", +] + +[[package]] +name = "simd-adler32" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "238abfbb77c1915110ad968465608b68e869e0772622c9656714e73e5a1a522f" + +[[package]] +name = "slotmap" +version = "1.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e1e08e261d0e8f5c43123b7adf3e4ca1690d655377ac93a03b2c9d3e98de1342" +dependencies = [ + "version_check", +] [[package]] name = "smallvec" @@ -940,18 +1009,18 @@ checksum = "2fd0db749597d91ff862fd1d55ea87f7855a744a8425a64695b6fca237d1dad1" [[package]] name = "smithay-client-toolkit" -version = "0.12.3" +version = "0.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4750c76fd5d3ac95fa3ed80fe667d6a3d8590a960e5b575b98eea93339a80b80" +checksum = "f307c47d32d2715eb2e0ece5589057820e0e5e70d07c247d1063e844e107f454" dependencies = [ - "andrew", "bitflags", "calloop", - "dlib 0.4.2", + "dlib", "lazy_static", "log", "memmap2", - "nix 0.18.0", + "nix 0.24.3", + "pkg-config", "wayland-client", "wayland-cursor", "wayland-protocols", @@ -959,9 +1028,9 @@ dependencies = [ [[package]] name = "strsim" -version = "0.9.3" +version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6446ced80d6c486436db5c078dde11a9f73d42b57fb273121e160b84f63d894c" +checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623" [[package]] name = "syn" @@ -974,6 +1043,17 @@ dependencies = [ "unicode-ident", ] +[[package]] +name = "syn" +version = "2.0.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a6f671d4b5ffdb8eadec19c0ae67fe2639df8684bd7bc4b83d986b8db549cf01" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + [[package]] name = "thiserror" version = "1.0.31" @@ -991,7 +1071,7 @@ checksum = "0396bc89e626244658bef819e22d0cc459e795a5ebe878e6ec336d1674a8d79a" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.98", ] [[package]] @@ -1000,25 +1080,52 @@ version = "0.1.44" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6db9e6914ab8b1ae1c260a4ae7a49b6c5611b40328a735b21862567685e73255" dependencies = [ - "libc 0.2.126", + "libc 0.2.144", "wasi 0.10.0+wasi-snapshot-preview1", - "winapi 0.3.9", + "winapi", ] [[package]] -name = "toml" -version = "0.5.9" +name = "tiny-skia" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d82e1a7758622a465f8cee077614c73484dac5b836c02ff6a40d5d1010324d7" +checksum = "642680569bb895b16e4b9d181c60be1ed136fa0c9c7f11d004daf053ba89bf82" dependencies = [ - "serde", + "arrayref", + "arrayvec", + "bytemuck", + "cfg-if", + "png", + "safe_arch", + "tiny-skia-path", ] [[package]] -name = "ttf-parser" +name = "tiny-skia-path" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c114d32f0c2ee43d585367cb013dfaba967ab9f62b90d9af0d696e955e70fa6c" +dependencies = [ + "arrayref", + "bytemuck", +] + +[[package]] +name = "toml_datetime" version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3e5d7cd7ab3e47dda6e56542f4bbf3824c15234958c6e1bd6aaa347e93499fdc" +checksum = "5a76a9312f5ba4c2dec6b9161fdf25d87ad8a09256ccea5a556fef03c706a10f" + +[[package]] +name = "toml_edit" +version = "0.19.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "92d964908cec0d030b812013af25a0e57fddfadb1e066ecc6681d86253129d4f" +dependencies = [ + "indexmap", + "toml_datetime", + "winnow", +] [[package]] name = "unicode-ident" @@ -1027,15 +1134,16 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5bd2fe26506023ed7b5e1e315add59d6f584c621d037f9368fea9cfb988f368c" [[package]] -name = "walkdir" -version = "2.3.2" +name = "vec_map" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "808cf2735cd4b6866113f648b791c6adc5714537bc222d9347bb203386ffda56" -dependencies = [ - "same-file", - "winapi 0.3.9", - "winapi-util", -] +checksum = "f1bddf1187be692e79c5ffeab891132dfb0f236ed36a43c7ed39f1165ee20191" + +[[package]] +name = "version_check" +version = "0.9.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" [[package]] name = "wasi" @@ -1050,15 +1158,69 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] -name = "wayland-client" -version = "0.28.6" +name = "wasm-bindgen" +version = "0.2.86" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e3ab332350e502f159382201394a78e3cc12d0f04db863429260164ea40e0355" +checksum = "5bba0e8cb82ba49ff4e229459ff22a191bbe9a1cb3a341610c9c33efc27ddf73" +dependencies = [ + "cfg-if", + "wasm-bindgen-macro", +] + +[[package]] +name = "wasm-bindgen-backend" +version = "0.2.86" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "19b04bc93f9d6bdee709f6bd2118f57dd6679cf1176a1af464fca3ab0d66d8fb" +dependencies = [ + "bumpalo", + "log", + "once_cell", + "proc-macro2", + "quote", + "syn 2.0.16", + "wasm-bindgen-shared", +] + +[[package]] +name = "wasm-bindgen-macro" +version = "0.2.86" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "14d6b024f1a526bb0234f52840389927257beb670610081360e5a03c5df9c258" +dependencies = [ + "quote", + "wasm-bindgen-macro-support", +] + +[[package]] +name = "wasm-bindgen-macro-support" +version = "0.2.86" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e128beba882dd1eb6200e1dc92ae6c5dbaa4311aa7bb211ca035779e5efc39f8" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.16", + "wasm-bindgen-backend", + "wasm-bindgen-shared", +] + +[[package]] +name = "wasm-bindgen-shared" +version = "0.2.86" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ed9d5b4305409d1fc9482fee2d7f9bcbf24b3972bf59817ef757e23982242a93" + +[[package]] +name = "wayland-client" +version = "0.29.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f3b068c05a039c9f755f881dc50f01732214f5685e379829759088967c46715" dependencies = [ "bitflags", "downcast-rs", - "libc 0.2.126", - "nix 0.20.0", + "libc 0.2.144", + "nix 0.24.3", "scoped-tls", "wayland-commons", "wayland-scanner", @@ -1067,11 +1229,11 @@ dependencies = [ [[package]] name = "wayland-commons" -version = "0.28.6" +version = "0.29.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a21817947c7011bbd0a27e11b17b337bfd022e8544b071a2641232047966fbda" +checksum = "8691f134d584a33a6606d9d717b95c4fa20065605f798a3f350d78dced02a902" dependencies = [ - "nix 0.20.0", + "nix 0.24.3", "once_cell", "smallvec", "wayland-sys", @@ -1079,20 +1241,20 @@ dependencies = [ [[package]] name = "wayland-cursor" -version = "0.28.6" +version = "0.29.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be610084edd1586d45e7bdd275fe345c7c1873598caa464c4fb835dee70fa65a" +checksum = "6865c6b66f13d6257bef1cd40cbfe8ef2f150fb8ebbdb1e8e873455931377661" dependencies = [ - "nix 0.20.0", + "nix 0.24.3", "wayland-client", "xcursor", ] [[package]] name = "wayland-protocols" -version = "0.28.6" +version = "0.29.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "286620ea4d803bacf61fa087a4242ee316693099ee5a140796aaba02b29f861f" +checksum = "b950621f9354b322ee817a23474e479b34be96c2e909c14f7bc0100e9a970bc6" dependencies = [ "bitflags", "wayland-client", @@ -1102,9 +1264,9 @@ dependencies = [ [[package]] name = "wayland-scanner" -version = "0.28.6" +version = "0.29.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce923eb2deb61de332d1f356ec7b6bf37094dc5573952e1c8936db03b54c03f1" +checksum = "8f4303d8fa22ab852f789e75a967f0a2cdc430a607751c0499bada3e451cbd53" dependencies = [ "proc-macro2", "quote", @@ -1113,20 +1275,24 @@ dependencies = [ [[package]] name = "wayland-sys" -version = "0.28.6" +version = "0.29.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d841fca9aed7febf9bed2e9796c49bf58d4152ceda8ac949ebe00868d8f0feb8" +checksum = "be12ce1a3c39ec7dba25594b97b42cb3195d54953ddb9d3d95a7c3902bc6e9d4" dependencies = [ - "dlib 0.5.0", + "dlib", "lazy_static", "pkg-config", ] [[package]] -name = "winapi" -version = "0.2.8" +name = "web-sys" +version = "0.3.63" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "167dc9d6949a9b857f3451275e911c3f44255842c1f7a76f33c55103a909087a" +checksum = "3bdd9ef4e984da1187bf8110c5cf5b845fbc87a23602cdf912386a76fcd3a7c2" +dependencies = [ + "js-sys", + "wasm-bindgen", +] [[package]] name = "winapi" @@ -1138,27 +1304,12 @@ dependencies = [ "winapi-x86_64-pc-windows-gnu", ] -[[package]] -name = "winapi-build" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d315eee3b34aca4797b2da6b13ed88266e6d612562a0c46390af8299fc699bc" - [[package]] name = "winapi-i686-pc-windows-gnu" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" -[[package]] -name = "winapi-util" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70ec6ce85bb158151cae5e5c87f95a8e97d2c0c4b001223f33a334e3ce5de178" -dependencies = [ - "winapi 0.3.9", -] - [[package]] name = "winapi-x86_64-pc-windows-gnu" version = "0.4.0" @@ -1166,44 +1317,163 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" [[package]] -name = "winit" -version = "0.24.0" +name = "windows-sys" +version = "0.36.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da4eda6fce0eb84bd0a33e3c8794eb902e1033d0a1d5a31bc4f19b1b4bbff597" +checksum = "ea04155a16a59f9eab786fe12a4a450e75cdb175f9e0d80da1e17db09f55b8d2" +dependencies = [ + "windows_aarch64_msvc 0.36.1", + "windows_i686_gnu 0.36.1", + "windows_i686_msvc 0.36.1", + "windows_x86_64_gnu 0.36.1", + "windows_x86_64_msvc 0.36.1", +] + +[[package]] +name = "windows-sys" +version = "0.45.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "75283be5efb2831d37ea142365f009c02ec203cd29a3ebecbc093d52315b66d0" +dependencies = [ + "windows-targets", +] + +[[package]] +name = "windows-targets" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e5180c00cd44c9b1c88adb3693291f1cd93605ded80c250a75d472756b4d071" +dependencies = [ + "windows_aarch64_gnullvm", + "windows_aarch64_msvc 0.42.2", + "windows_i686_gnu 0.42.2", + "windows_i686_msvc 0.42.2", + "windows_x86_64_gnu 0.42.2", + "windows_x86_64_gnullvm", + "windows_x86_64_msvc 0.42.2", +] + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "597a5118570b68bc08d8d59125332c54f1ba9d9adeedeef5b99b02ba2b0698f8" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.36.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9bb8c3fd39ade2d67e9874ac4f3db21f0d710bee00fe7cab16949ec184eeaa47" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e08e8864a60f06ef0d0ff4ba04124db8b0fb3be5776a5cd47641e942e58c4d43" + +[[package]] +name = "windows_i686_gnu" +version = "0.36.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "180e6ccf01daf4c426b846dfc66db1fc518f074baa793aa7d9b9aaeffad6a3b6" + +[[package]] +name = "windows_i686_gnu" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c61d927d8da41da96a81f029489353e68739737d3beca43145c8afec9a31a84f" + +[[package]] +name = "windows_i686_msvc" +version = "0.36.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e2e7917148b2812d1eeafaeb22a97e4813dfa60a3f8f78ebe204bcc88f12f024" + +[[package]] +name = "windows_i686_msvc" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "44d840b6ec649f480a41c8d80f9c65108b92d89345dd94027bfe06ac444d1060" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.36.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4dcd171b8776c41b97521e5da127a2d86ad280114807d0b2ab1e462bc764d9e1" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8de912b8b8feb55c064867cf047dda097f92d51efad5b491dfb98f6bbb70cb36" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "26d41b46a36d453748aedef1486d5c7a85db22e56aff34643984ea85514e94a3" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.36.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c811ca4a8c853ef420abd8592ba53ddbbac90410fab6903b3e79972a631f7680" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9aec5da331524158c6d1a4ac0ab1541149c0b9505fde06423b02f5ef0106b9f0" + +[[package]] +name = "winit" +version = "0.27.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bb796d6fbd86b2fd896c9471e6f04d39d750076ebe5680a3958f00f5ab97657c" dependencies = [ "bitflags", "cocoa", - "core-foundation 0.9.3", - "core-graphics 0.22.3", - "core-video-sys", + "core-foundation", + "core-graphics", "dispatch", "instant", - "lazy_static", - "libc 0.2.126", + "libc 0.2.144", "log", "mio", - "mio-extras", "ndk", "ndk-glue", - "ndk-sys", "objc", + "once_cell", "parking_lot", "percent-encoding", - "raw-window-handle 0.3.4", + "raw-window-handle 0.4.3", + "raw-window-handle 0.5.2", + "sctk-adwaita", "smithay-client-toolkit", + "wasm-bindgen", "wayland-client", - "winapi 0.3.9", + "wayland-protocols", + "web-sys", + "windows-sys 0.36.1", "x11-dl", ] [[package]] -name = "ws2_32-sys" -version = "0.2.1" +name = "winnow" +version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d59cefebd0c892fa2dd6de581e937301d8552cb44489cdff035c6187cb63fa5e" +checksum = "61de7bac303dc551fe038e2b3cef0f571087a47571ea6e79a87692ac99b99699" dependencies = [ - "winapi 0.2.8", - "winapi-build", + "memchr", +] + +[[package]] +name = "wio" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5d129932f4644ac2396cb456385cbf9e63b5b30c6e8dc4820bdca4eb082037a5" +dependencies = [ + "winapi", ] [[package]] @@ -1213,7 +1483,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ea26926b4ce81a6f5d9d0f3a0bc401e5a37c6ae14a1bfaa8ff6099ca80038c59" dependencies = [ "lazy_static", - "libc 0.2.126", + "libc 0.2.144", "pkg-config", ] @@ -1226,15 +1496,6 @@ dependencies = [ "nom", ] -[[package]] -name = "xdg" -version = "2.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c4583db5cbd4c4c0303df2d15af80f0539db703fa1c68802d4cbbd2dd0f88f6" -dependencies = [ - "dirs", -] - [[package]] name = "xml-rs" version = "0.8.4" diff --git a/third_party/rust/metal/Cargo.toml b/third_party/rust/metal/Cargo.toml index 9b7c52f8bfc4..abd300f93c09 100644 --- a/third_party/rust/metal/Cargo.toml +++ b/third_party/rust/metal/Cargo.toml @@ -10,9 +10,9 @@ # See Cargo.toml.orig for the original contents. [package] -edition = "2018" +edition = "2021" name = "metal" -version = "0.24.0" +version = "0.25.0" authors = ["GFX Developers"] exclude = [ "guide/**/*", @@ -45,6 +45,9 @@ name = "headless-render" [[example]] name = "library" +[[example]] +name = "raytracing" + [[example]] name = "reflection" @@ -101,7 +104,7 @@ version = "0.2" optional = true [dependencies.foreign-types] -version = "0.3.2" +version = "0.5" [dependencies.log] version = "0.4" @@ -110,20 +113,29 @@ version = "0.4" version = "0.2.4" features = ["objc_exception"] +[dependencies.paste] +version = "1" + [dev-dependencies.cocoa] version = "0.24.0" [dev-dependencies.cty] version = "0.2.1" +[dev-dependencies.glam] +version = "0.22" + [dev-dependencies.png] -version = "0.16" +version = "0.17" + +[dev-dependencies.rand] +version = "0.8" [dev-dependencies.sema] version = "0.1.4" [dev-dependencies.winit] -version = "0.24" +version = "0.27" [features] default = [] diff --git a/third_party/rust/metal/Makefile b/third_party/rust/metal/Makefile index bc3587694534..ee7870c4e35b 100644 --- a/third_party/rust/metal/Makefile +++ b/third_party/rust/metal/Makefile @@ -10,3 +10,7 @@ window: circle: xcrun -sdk macosx metal -c examples/circle/shaders.metal -o examples/circle/shaders.air xcrun -sdk macosx metallib examples/circle/shaders.air -o examples/circle/shaders.metallib + +raytracing: + xcrun -sdk macosx metal -c -g examples/raytracing/shaders.metal -o examples/raytracing/shaders.air + xcrun -sdk macosx metallib examples/raytracing/shaders.air -o examples/raytracing/shaders.metallib diff --git a/third_party/rust/metal/README.md b/third_party/rust/metal/README.md index 7fac036e5f62..ef94a72da852 100644 --- a/third_party/rust/metal/README.md +++ b/third_party/rust/metal/README.md @@ -2,7 +2,18 @@ [![Actions Status](https://github.com/gfx-rs/metal-rs/workflows/ci/badge.svg)](https://github.com/gfx-rs/metal-rs/actions) [![Crates.io](https://img.shields.io/crates/v/metal.svg?label=metal)](https://crates.io/crates/metal) -Unsafe Rust bindings for the Metal 3D Graphics API. +

+ +

+ +

Unsafe Rust bindings for the Metal 3D Graphics API.

+ +## Documentation + +Note that [docs.rs](docs.rs) will fail to build the (albeit limited) documentation for this crate! +They build in a Linux container, but of course this will only compile on MacOS. + +Please build the documentation yourself with `cargo docs`. ## Examples diff --git a/third_party/rust/metal/assets/metal.svg b/third_party/rust/metal/assets/metal.svg new file mode 100644 index 000000000000..9f0c87edfbe4 --- /dev/null +++ b/third_party/rust/metal/assets/metal.svg @@ -0,0 +1,15 @@ + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/third_party/rust/metal/examples/circle/main.rs b/third_party/rust/metal/examples/circle/main.rs index 18da704421a7..ed17a734024b 100644 --- a/third_party/rust/metal/examples/circle/main.rs +++ b/third_party/rust/metal/examples/circle/main.rs @@ -34,19 +34,30 @@ pub struct AAPLVertex { fn main() { // Create a window for viewing the content let event_loop = EventLoop::new(); - let events_loop = winit::event_loop::EventLoop::new(); let size = winit::dpi::LogicalSize::new(800, 600); let window = winit::window::WindowBuilder::new() .with_inner_size(size) .with_title("Metal".to_string()) - .build(&events_loop) + .build(&event_loop) .unwrap(); // Set up the GPU device found in the system let device = Device::system_default().expect("no device found"); println!("Your device is: {}", device.name(),); + // Scaffold required to sample the GPU and CPU timestamps + let mut cpu_start = 0; + let mut gpu_start = 0; + device.sample_timestamps(&mut cpu_start, &mut gpu_start); + let counter_sample_buffer = create_counter_sample_buffer(&device); + let destination_buffer = device.new_buffer( + (std::mem::size_of::() * 4 as usize) as u64, + MTLResourceOptions::StorageModeShared, + ); + let counter_sampling_point = MTLCounterSamplingPoint::AtStageBoundary; + assert!(device.supports_counter_sampling(counter_sampling_point)); + let binary_archive_path = std::path::PathBuf::from(env!("CARGO_MANIFEST_DIR")) .join("examples/circle/binary_archive.metallib"); @@ -141,7 +152,14 @@ fn main() { // Obtain a renderPassDescriptor generated from the view's drawable textures. let render_pass_descriptor = RenderPassDescriptor::new(); - prepare_render_pass_descriptor(&render_pass_descriptor, drawable.texture()); + handle_render_pass_color_attachment( + &render_pass_descriptor, + drawable.texture(), + ); + handle_render_pass_sample_buffer_attachment( + &render_pass_descriptor, + &counter_sample_buffer, + ); // Create a render command encoder. let encoder = @@ -153,11 +171,23 @@ fn main() { encoder.draw_primitives(MTLPrimitiveType::TriangleStrip, 0, 1080); encoder.end_encoding(); + resolve_samples_into_buffer( + &command_buffer, + &counter_sample_buffer, + &destination_buffer, + ); + // Schedule a present once the framebuffer is complete using the current drawable. command_buffer.present_drawable(&drawable); // Finalize rendering here & push the command buffer to the GPU. command_buffer.commit(); + command_buffer.wait_until_completed(); + + let mut cpu_end = 0; + let mut gpu_end = 0; + device.sample_timestamps(&mut cpu_end, &mut gpu_end); + handle_timestamps(&destination_buffer, cpu_start, cpu_end, gpu_start, gpu_end); } _ => (), } @@ -211,7 +241,20 @@ fn create_vertex_points_for_circle() -> Vec { v } -fn prepare_render_pass_descriptor(descriptor: &RenderPassDescriptorRef, texture: &TextureRef) { +fn handle_render_pass_sample_buffer_attachment( + descriptor: &RenderPassDescriptorRef, + counter_sample_buffer: &CounterSampleBufferRef, +) { + let sample_buffer_attachment_descriptor = + descriptor.sample_buffer_attachments().object_at(0).unwrap(); + sample_buffer_attachment_descriptor.set_sample_buffer(&counter_sample_buffer); + sample_buffer_attachment_descriptor.set_start_of_vertex_sample_index(0 as NSUInteger); + sample_buffer_attachment_descriptor.set_end_of_vertex_sample_index(1 as NSUInteger); + sample_buffer_attachment_descriptor.set_start_of_fragment_sample_index(2 as NSUInteger); + sample_buffer_attachment_descriptor.set_end_of_fragment_sample_index(3 as NSUInteger); +} + +fn handle_render_pass_color_attachment(descriptor: &RenderPassDescriptorRef, texture: &TextureRef) { let color_attachment = descriptor.color_attachments().object_at(0).unwrap(); color_attachment.set_texture(Some(texture)); @@ -249,3 +292,86 @@ fn prepare_pipeline_state( .new_render_pipeline_state(&pipeline_state_descriptor) .unwrap() } + +fn resolve_samples_into_buffer( + command_buffer: &CommandBufferRef, + counter_sample_buffer: &CounterSampleBufferRef, + destination_buffer: &BufferRef, +) { + let blit_encoder = command_buffer.new_blit_command_encoder(); + blit_encoder.resolve_counters( + &counter_sample_buffer, + crate::NSRange::new(0_u64, 4), + &destination_buffer, + 0_u64, + ); + blit_encoder.end_encoding(); +} + +fn handle_timestamps( + resolved_sample_buffer: &BufferRef, + cpu_start: u64, + cpu_end: u64, + gpu_start: u64, + gpu_end: u64, +) { + let samples = unsafe { + std::slice::from_raw_parts(resolved_sample_buffer.contents() as *const u64, 4 as usize) + }; + let vertex_pass_start = samples[0]; + let vertex_pass_end = samples[1]; + let fragment_pass_start = samples[2]; + let fragment_pass_end = samples[3]; + + let cpu_time_span = cpu_end - cpu_start; + let gpu_time_span = gpu_end - gpu_start; + + let vertex_micros = microseconds_between_begin( + vertex_pass_start, + vertex_pass_end, + gpu_time_span, + cpu_time_span, + ); + let fragment_micros = microseconds_between_begin( + fragment_pass_start, + fragment_pass_end, + gpu_time_span, + cpu_time_span, + ); + + println!("Vertex pass duration: {:.2} µs", vertex_micros); + println!("Fragment pass duration: {:.2} µs\n", fragment_micros); +} + +fn create_counter_sample_buffer(device: &Device) -> CounterSampleBuffer { + let counter_sample_buffer_desc = metal::CounterSampleBufferDescriptor::new(); + counter_sample_buffer_desc.set_storage_mode(metal::MTLStorageMode::Shared); + counter_sample_buffer_desc.set_sample_count(4_u64); + counter_sample_buffer_desc.set_counter_set(&fetch_timestamp_counter_set(device)); + + device + .new_counter_sample_buffer_with_descriptor(&counter_sample_buffer_desc) + .unwrap() +} + +fn fetch_timestamp_counter_set(device: &Device) -> metal::CounterSet { + let counter_sets = device.counter_sets(); + let mut timestamp_counter = None; + for cs in counter_sets.iter() { + if cs.name() == "timestamp" { + timestamp_counter = Some(cs); + break; + } + } + timestamp_counter + .expect("No timestamp counter found") + .clone() +} + +/// +fn microseconds_between_begin(begin: u64, end: u64, gpu_time_span: u64, cpu_time_span: u64) -> f64 { + let time_span = (end as f64) - (begin as f64); + let nanoseconds = time_span / (gpu_time_span as f64) * (cpu_time_span as f64); + let microseconds = nanoseconds / 1000.0; + return microseconds; +} diff --git a/third_party/rust/metal/examples/compute/main.rs b/third_party/rust/metal/examples/compute/main.rs index 6497c790ae67..066997d9d700 100644 --- a/third_party/rust/metal/examples/compute/main.rs +++ b/third_party/rust/metal/examples/compute/main.rs @@ -1,66 +1,41 @@ -// Copyright 2017 GFX developers -// -// Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be -// copied, modified, or distributed except according to those terms. - use metal::*; use objc::rc::autoreleasepool; -use std::mem; +use std::path::PathBuf; + +const NUM_SAMPLES: u64 = 2; +const NUM_ELEMENTS: u64 = 64 * 64; fn main() { autoreleasepool(|| { - let device = Device::system_default().expect("no device found"); - let command_queue = device.new_command_queue(); + let device = Device::system_default().expect("No device found"); + let mut cpu_start = 0; + let mut gpu_start = 0; + device.sample_timestamps(&mut cpu_start, &mut gpu_start); - let data = [ - 1u32, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, - 24, 25, 26, 27, 28, 29, 30, - ]; - - let buffer = device.new_buffer_with_data( - unsafe { mem::transmute(data.as_ptr()) }, - (data.len() * mem::size_of::()) as u64, - MTLResourceOptions::CPUCacheModeDefaultCache, + let counter_sample_buffer = create_counter_sample_buffer(&device); + let destination_buffer = device.new_buffer( + (std::mem::size_of::() * NUM_SAMPLES as usize) as u64, + MTLResourceOptions::StorageModeShared, ); - let sum = { - let data = [0u32]; - device.new_buffer_with_data( - unsafe { mem::transmute(data.as_ptr()) }, - (data.len() * mem::size_of::()) as u64, - MTLResourceOptions::CPUCacheModeDefaultCache, - ) - }; + let counter_sampling_point = MTLCounterSamplingPoint::AtStageBoundary; + assert!(device.supports_counter_sampling(counter_sampling_point)); + let command_queue = device.new_command_queue(); let command_buffer = command_queue.new_command_buffer(); - command_buffer.set_label("label"); - let block = block::ConcreteBlock::new(move |buffer: &metal::CommandBufferRef| { - println!("{}", buffer.label()); - }) - .copy(); - - command_buffer.add_completed_handler(&block); - - let encoder = command_buffer.new_compute_command_encoder(); - let library_path = std::path::PathBuf::from(env!("CARGO_MANIFEST_DIR")) - .join("examples/compute/shaders.metallib"); - - let library = device.new_library_with_file(library_path).unwrap(); - let kernel = library.get_function("sum", None).unwrap(); - - let pipeline_state_descriptor = ComputePipelineDescriptor::new(); - pipeline_state_descriptor.set_compute_function(Some(&kernel)); - - let pipeline_state = device - .new_compute_pipeline_state_with_function( - pipeline_state_descriptor.compute_function().unwrap(), - ) - .unwrap(); + let compute_pass_descriptor = ComputePassDescriptor::new(); + handle_compute_pass_sample_buffer_attachment( + &compute_pass_descriptor, + &counter_sample_buffer, + ); + let encoder = + command_buffer.compute_command_encoder_with_descriptor(&compute_pass_descriptor); + let pipeline_state = create_pipeline_state(&device); encoder.set_compute_pipeline_state(&pipeline_state); + + let (buffer, sum) = create_input_and_output_buffers(&device); encoder.set_buffer(0, Some(&buffer), 0); encoder.set_buffer(1, Some(&sum), 0); @@ -73,19 +48,141 @@ fn main() { }; let thread_group_size = MTLSize { - width: (data.len() as u64 + width) / width, + width: (NUM_ELEMENTS + width) / width, height: 1, depth: 1, }; encoder.dispatch_thread_groups(thread_group_count, thread_group_size); encoder.end_encoding(); + + resolve_samples_into_buffer(&command_buffer, &counter_sample_buffer, &destination_buffer); + command_buffer.commit(); command_buffer.wait_until_completed(); + let mut cpu_end = 0; + let mut gpu_end = 0; + device.sample_timestamps(&mut cpu_end, &mut gpu_end); let ptr = sum.contents() as *mut u32; + println!("Compute shader sum: {}", unsafe { *ptr }); + unsafe { - assert_eq!(465, *ptr); + assert_eq!(NUM_ELEMENTS as u32, *ptr); } + + handle_timestamps(&destination_buffer, cpu_start, cpu_end, gpu_start, gpu_end); }); } + +fn create_pipeline_state(device: &Device) -> ComputePipelineState { + let library_path = + PathBuf::from(env!("CARGO_MANIFEST_DIR")).join("examples/compute/shaders.metallib"); + let library = device.new_library_with_file(library_path).unwrap(); + let kernel = library.get_function("sum", None).unwrap(); + + let pipeline_state_descriptor = ComputePipelineDescriptor::new(); + pipeline_state_descriptor.set_compute_function(Some(&kernel)); + + device + .new_compute_pipeline_state_with_function( + pipeline_state_descriptor.compute_function().unwrap(), + ) + .unwrap() +} + +fn handle_compute_pass_sample_buffer_attachment( + compute_pass_descriptor: &ComputePassDescriptorRef, + counter_sample_buffer: &CounterSampleBufferRef, +) { + let sample_buffer_attachment_descriptor = compute_pass_descriptor + .sample_buffer_attachments() + .object_at(0) + .unwrap(); + + sample_buffer_attachment_descriptor.set_sample_buffer(&counter_sample_buffer); + sample_buffer_attachment_descriptor.set_start_of_encoder_sample_index(0); + sample_buffer_attachment_descriptor.set_end_of_encoder_sample_index(1); +} + +fn resolve_samples_into_buffer( + command_buffer: &CommandBufferRef, + counter_sample_buffer: &CounterSampleBufferRef, + destination_buffer: &BufferRef, +) { + let blit_encoder = command_buffer.new_blit_command_encoder(); + blit_encoder.resolve_counters( + &counter_sample_buffer, + crate::NSRange::new(0_u64, NUM_SAMPLES), + &destination_buffer, + 0_u64, + ); + blit_encoder.end_encoding(); +} + +fn handle_timestamps( + resolved_sample_buffer: &BufferRef, + cpu_start: u64, + cpu_end: u64, + gpu_start: u64, + gpu_end: u64, +) { + let samples = unsafe { + std::slice::from_raw_parts( + resolved_sample_buffer.contents() as *const u64, + NUM_SAMPLES as usize, + ) + }; + let pass_start = samples[0]; + let pass_end = samples[1]; + + let cpu_time_span = cpu_end - cpu_start; + let gpu_time_span = gpu_end - gpu_start; + + let micros = microseconds_between_begin(pass_start, pass_end, gpu_time_span, cpu_time_span); + println!("Compute pass duration: {} µs", micros); +} + +fn create_counter_sample_buffer(device: &Device) -> CounterSampleBuffer { + let counter_sample_buffer_desc = metal::CounterSampleBufferDescriptor::new(); + counter_sample_buffer_desc.set_storage_mode(metal::MTLStorageMode::Shared); + counter_sample_buffer_desc.set_sample_count(NUM_SAMPLES); + let counter_sets = device.counter_sets(); + + let timestamp_counter = counter_sets.iter().find(|cs| cs.name() == "timestamp"); + + counter_sample_buffer_desc + .set_counter_set(×tamp_counter.expect("No timestamp counter found")); + + device + .new_counter_sample_buffer_with_descriptor(&counter_sample_buffer_desc) + .unwrap() +} + +fn create_input_and_output_buffers(device: &Device) -> (metal::Buffer, metal::Buffer) { + let data = [1u32; 64 * 64]; + + let buffer = device.new_buffer_with_data( + unsafe { std::mem::transmute(data.as_ptr()) }, + (data.len() * std::mem::size_of::()) as u64, + MTLResourceOptions::CPUCacheModeDefaultCache, + ); + + let sum = { + let data = [0u32]; + device.new_buffer_with_data( + unsafe { std::mem::transmute(data.as_ptr()) }, + (data.len() * std::mem::size_of::()) as u64, + MTLResourceOptions::CPUCacheModeDefaultCache, + ) + }; + (buffer, sum) +} + +/// +fn microseconds_between_begin(begin: u64, end: u64, gpu_time_span: u64, cpu_time_span: u64) -> f64 { + let time_span = (end as f64) - (begin as f64); + let nanoseconds = time_span / (gpu_time_span as f64) * (cpu_time_span as f64); + let microseconds = nanoseconds / 1000.0; + return microseconds; +} diff --git a/third_party/rust/metal/examples/headless-render/main.rs b/third_party/rust/metal/examples/headless-render/main.rs index ed68da1a538e..449170251f00 100644 --- a/third_party/rust/metal/examples/headless-render/main.rs +++ b/third_party/rust/metal/examples/headless-render/main.rs @@ -103,7 +103,7 @@ fn save_image(texture: &TextureRef) { let ref mut w = BufWriter::new(file); let mut encoder = png::Encoder::new(w, VIEW_WIDTH as u32, VIEW_HEIGHT as u32); - encoder.set_color(ColorType::RGBA); + encoder.set_color(ColorType::Rgba); encoder.set_depth(png::BitDepth::Eight); let mut writer = encoder.write_header().unwrap(); diff --git a/third_party/rust/metal/examples/mesh-shader/main.rs b/third_party/rust/metal/examples/mesh-shader/main.rs new file mode 100644 index 000000000000..8edb30ce1f5e --- /dev/null +++ b/third_party/rust/metal/examples/mesh-shader/main.rs @@ -0,0 +1,118 @@ +extern crate objc; + +use cocoa::{appkit::NSView, base::id as cocoa_id}; +use core_graphics_types::geometry::CGSize; + +use metal::*; +use objc::{rc::autoreleasepool, runtime::YES}; +use std::mem; +use winit::platform::macos::WindowExtMacOS; + +use winit::{ + event::{Event, WindowEvent}, + event_loop::ControlFlow, +}; + +fn prepare_render_pass_descriptor(descriptor: &RenderPassDescriptorRef, texture: &TextureRef) { + let color_attachment = descriptor.color_attachments().object_at(0).unwrap(); + + color_attachment.set_texture(Some(texture)); + color_attachment.set_load_action(MTLLoadAction::Clear); + color_attachment.set_clear_color(MTLClearColor::new(0.2, 0.2, 0.25, 1.0)); + color_attachment.set_store_action(MTLStoreAction::Store); +} + +fn main() { + let events_loop = winit::event_loop::EventLoop::new(); + let size = winit::dpi::LogicalSize::new(800, 600); + + let window = winit::window::WindowBuilder::new() + .with_inner_size(size) + .with_title("Metal Mesh Shader Example".to_string()) + .build(&events_loop) + .unwrap(); + + let device = Device::system_default().expect("no device found"); + + let layer = MetalLayer::new(); + layer.set_device(&device); + layer.set_pixel_format(MTLPixelFormat::BGRA8Unorm); + layer.set_presents_with_transaction(false); + + unsafe { + let view = window.ns_view() as cocoa_id; + view.setWantsLayer(YES); + view.setLayer(mem::transmute(layer.as_ref())); + } + + let draw_size = window.inner_size(); + layer.set_drawable_size(CGSize::new(draw_size.width as f64, draw_size.height as f64)); + + let library_path = std::path::PathBuf::from(env!("CARGO_MANIFEST_DIR")) + .join("examples/mesh-shader/shaders.metallib"); + let library = device.new_library_with_file(library_path).unwrap(); + + let mesh = library.get_function("mesh_function", None).unwrap(); + let frag = library.get_function("fragment_function", None).unwrap(); + + let pipeline_state_desc = MeshRenderPipelineDescriptor::new(); + pipeline_state_desc + .color_attachments() + .object_at(0) + .unwrap() + .set_pixel_format(MTLPixelFormat::BGRA8Unorm); + pipeline_state_desc.set_mesh_function(Some(&mesh)); + pipeline_state_desc.set_fragment_function(Some(&frag)); + + let pipeline_state = device + .new_mesh_render_pipeline_state(&pipeline_state_desc) + .unwrap(); + + let command_queue = device.new_command_queue(); + + events_loop.run(move |event, _, control_flow| { + autoreleasepool(|| { + *control_flow = ControlFlow::Poll; + + match event { + Event::WindowEvent { event, .. } => match event { + WindowEvent::CloseRequested => *control_flow = ControlFlow::Exit, + WindowEvent::Resized(size) => { + layer.set_drawable_size(CGSize::new(size.width as f64, size.height as f64)); + } + _ => (), + }, + Event::MainEventsCleared => { + window.request_redraw(); + } + Event::RedrawRequested(_) => { + let drawable = match layer.next_drawable() { + Some(drawable) => drawable, + None => return, + }; + + let render_pass_descriptor = RenderPassDescriptor::new(); + + prepare_render_pass_descriptor(&render_pass_descriptor, drawable.texture()); + + let command_buffer = command_queue.new_command_buffer(); + let encoder = + command_buffer.new_render_command_encoder(&render_pass_descriptor); + + encoder.set_render_pipeline_state(&pipeline_state); + encoder.draw_mesh_threads( + MTLSize::new(1, 1, 1), + MTLSize::new(1, 1, 1), + MTLSize::new(1, 1, 1), + ); + + encoder.end_encoding(); + + command_buffer.present_drawable(&drawable); + command_buffer.commit(); + } + _ => {} + } + }); + }); +} diff --git a/third_party/rust/metal/examples/mesh-shader/shaders.metal b/third_party/rust/metal/examples/mesh-shader/shaders.metal new file mode 100644 index 000000000000..1a8253074281 --- /dev/null +++ b/third_party/rust/metal/examples/mesh-shader/shaders.metal @@ -0,0 +1,30 @@ +#include + +using namespace metal; + +struct VertexOut { + float4 position [[position]]; +}; + +using mesh_t = mesh; + +[[mesh]] void mesh_function(mesh_t m) { + VertexOut v; + v.position = float4(-1.0, -1.0, 0.0, 1.0); + + m.set_primitive_count(1); + + m.set_vertex(0, v); + v.position = float4(0.0, 1.0, 0.0, 1.0); + m.set_vertex(1, v); + v.position = float4(1.0, -1.0, 0.0, 1.0); + m.set_vertex(2, v); + + m.set_index(0, 0); + m.set_index(1, 1); + m.set_index(2, 2); +} + +fragment half4 fragment_function() { + return half4(0.1, 1.0, 0.1, 1.0); +} \ No newline at end of file diff --git a/third_party/rust/metal/examples/mesh-shader/shaders.metallib b/third_party/rust/metal/examples/mesh-shader/shaders.metallib new file mode 100644 index 000000000000..4af8d60ddcf2 Binary files /dev/null and b/third_party/rust/metal/examples/mesh-shader/shaders.metallib differ diff --git a/third_party/rust/metal/examples/mps/main.rs b/third_party/rust/metal/examples/mps/main.rs index efff9f4eba13..cc01b7a40da1 100644 --- a/third_party/rust/metal/examples/mps/main.rs +++ b/third_party/rust/metal/examples/mps/main.rs @@ -7,8 +7,8 @@ struct Vertex { xyz: [f32; 3], } -type Ray = MPSRayOriginMinDistanceDirectionMaxDistance; -type Intersection = MPSIntersectionDistancePrimitiveIndexCoordinates; +type Ray = mps::MPSRayOriginMinDistanceDirectionMaxDistance; +type Intersection = mps::MPSIntersectionDistancePrimitiveIndexCoordinates; // Original example taken from https://sergeyreznik.github.io/metal-ray-tracer/part-1/index.html fn main() { @@ -61,25 +61,26 @@ fn main() { ); // Build an acceleration structure using our vertex and index buffers containing the single triangle. - let acceleration_structure = TriangleAccelerationStructure::from_device(&device) + let acceleration_structure = mps::TriangleAccelerationStructure::from_device(&device) .expect("Failed to create acceleration structure"); acceleration_structure.set_vertex_buffer(Some(&vertex_buffer)); acceleration_structure.set_vertex_stride(vertex_stride as u64); acceleration_structure.set_index_buffer(Some(&index_buffer)); - acceleration_structure.set_index_type(MPSDataType::UInt32); + acceleration_structure.set_index_type(mps::MPSDataType::UInt32); acceleration_structure.set_triangle_count(1); - acceleration_structure.set_usage(MPSAccelerationStructureUsage::None); + acceleration_structure.set_usage(mps::MPSAccelerationStructureUsage::None); acceleration_structure.rebuild(); let ray_intersector = - RayIntersector::from_device(&device).expect("Failed to create ray intersector"); + mps::RayIntersector::from_device(&device).expect("Failed to create ray intersector"); ray_intersector.set_ray_stride(mem::size_of::() as u64); - ray_intersector.set_ray_data_type(MPSRayDataType::OriginMinDistanceDirectionMaxDistance); + ray_intersector.set_ray_data_type(mps::MPSRayDataType::OriginMinDistanceDirectionMaxDistance); ray_intersector.set_intersection_stride(mem::size_of::() as u64); - ray_intersector - .set_intersection_data_type(MPSIntersectionDataType::DistancePrimitiveIndexCoordinates); + ray_intersector.set_intersection_data_type( + mps::MPSIntersectionDataType::DistancePrimitiveIndexCoordinates, + ); // Create a buffer to hold generated rays and intersection results let ray_count = 1024; @@ -114,7 +115,7 @@ fn main() { // Intersect rays with triangles inside acceleration structure ray_intersector.encode_intersection_to_command_buffer( &command_buffer, - MPSIntersectionType::Nearest, + mps::MPSIntersectionType::Nearest, &ray_buffer, 0, &intersection_buffer, diff --git a/third_party/rust/metal/examples/raytracing/README.md b/third_party/rust/metal/examples/raytracing/README.md new file mode 100644 index 000000000000..0071e59747c5 --- /dev/null +++ b/third_party/rust/metal/examples/raytracing/README.md @@ -0,0 +1,11 @@ +## Raytracing + +A good showcase of Metal 3 raytracing features. + +![Screenshot of the final render](./screenshot.png) + +## To Run + +``` +cargo run --example raytracing +``` diff --git a/third_party/rust/metal/examples/raytracing/camera.rs b/third_party/rust/metal/examples/raytracing/camera.rs new file mode 100644 index 000000000000..5548445c0667 --- /dev/null +++ b/third_party/rust/metal/examples/raytracing/camera.rs @@ -0,0 +1,20 @@ +use glam::f32::Vec4; + +#[repr(C)] +pub struct Camera { + pub position: Vec4, + pub right: Vec4, + pub up: Vec4, + pub forward: Vec4, +} + +impl Camera { + pub fn new() -> Self { + Self { + position: Vec4::new(0.0, 3.0, 10.0, 0.0), + right: Vec4::new(1.0, 0.0, 0.0, 0.0), + up: Vec4::new(0.0, 1.0, 0.0, 0.0), + forward: Vec4::new(0.0, 0.0, -1.0, 0.0), + } + } +} diff --git a/third_party/rust/metal/examples/raytracing/geometry.rs b/third_party/rust/metal/examples/raytracing/geometry.rs new file mode 100644 index 000000000000..93fdc196d1e1 --- /dev/null +++ b/third_party/rust/metal/examples/raytracing/geometry.rs @@ -0,0 +1,448 @@ +use std::{ + mem::{size_of, transmute}, + sync::Arc, +}; + +use glam::{ + f32::{Mat4, Vec3, Vec4}, + Vec4Swizzles, +}; + +use metal::*; + +pub const GEOMETRY_MASK_TRIANGLE: u32 = 1; +pub const GEOMETRY_MASK_SPHERE: u32 = 2; +pub const GEOMETRY_MASK_LIGHT: u32 = 4; + +pub const FACE_MASK_NONE: u16 = 0; +pub const FACE_MASK_NEGATIVE_X: u16 = 1 << 0; +pub const FACE_MASK_POSITIVE_X: u16 = 1 << 1; +pub const FACE_MASK_NEGATIVE_Y: u16 = 1 << 2; +pub const FACE_MASK_POSITIVE_Y: u16 = 1 << 3; +pub const FACE_MASK_NEGATIVE_Z: u16 = 1 << 4; +pub const FACE_MASK_POSITIVE_Z: u16 = 1 << 5; +pub const FACE_MASK_ALL: u16 = (1 << 6) - 1; + +pub trait Geometry { + fn upload_to_buffers(&mut self) { + todo!() + } + fn clear(&mut self) { + todo!() + } + fn get_geometry_descriptor(&self) -> AccelerationStructureGeometryDescriptor { + todo!() + } + fn get_resources(&self) -> Vec { + todo!() + } + fn get_intersection_function_name(&self) -> Option<&str> { + None + } +} + +pub fn compute_triangle_normal(v0: &Vec3, v1: &Vec3, v2: &Vec3) -> Vec3 { + let e1 = Vec3::normalize(*v1 - *v0); + let e2 = Vec3::normalize(*v2 - *v0); + return Vec3::cross(e1, e2); +} + +#[derive(Default)] +#[repr(C)] +pub struct Triangle { + pub normals: [Vec4; 3], + pub colours: [Vec4; 3], +} + +pub fn get_managed_buffer_storage_mode() -> MTLResourceOptions { + return MTLResourceOptions::StorageModeManaged; +} + +pub struct TriangleGeometry { + pub device: Device, + pub name: String, + pub index_buffer: Option, + pub vertex_position_buffer: Option, + pub vertex_normal_buffer: Option, + pub vertex_colour_buffer: Option, + pub per_primitive_data_buffer: Option, + pub indices: Vec, + pub vertices: Vec, + pub normals: Vec, + pub colours: Vec, + pub triangles: Vec, +} + +impl TriangleGeometry { + pub fn new(device: Device, name: String) -> Self { + Self { + device, + name, + index_buffer: None, + vertex_position_buffer: None, + vertex_normal_buffer: None, + vertex_colour_buffer: None, + per_primitive_data_buffer: None, + indices: Vec::new(), + vertices: Vec::new(), + normals: Vec::new(), + colours: Vec::new(), + triangles: Vec::new(), + } + } + + pub fn add_cube_face_with_cube_vertices( + &mut self, + cube_vertices: &[Vec3], + colour: Vec3, + i0: u16, + i1: u16, + i2: u16, + i3: u16, + inward_normals: bool, + ) { + let v0 = cube_vertices[i0 as usize]; + let v1 = cube_vertices[i1 as usize]; + let v2 = cube_vertices[i2 as usize]; + let v3 = cube_vertices[i3 as usize]; + + let n0 = compute_triangle_normal(&v0, &v1, &v2) * if inward_normals { -1f32 } else { 1f32 }; + let n1 = compute_triangle_normal(&v0, &v2, &v3) * if inward_normals { -1f32 } else { 1f32 }; + + let first_index = self.indices.len(); + let base_index = self.vertices.len() as u16; + + self.indices.push(base_index + 0); + self.indices.push(base_index + 1); + self.indices.push(base_index + 2); + self.indices.push(base_index + 0); + self.indices.push(base_index + 2); + self.indices.push(base_index + 3); + + self.vertices.push(From::from((v0, 0.0))); + self.vertices.push(From::from((v1, 0.0))); + self.vertices.push(From::from((v2, 0.0))); + self.vertices.push(From::from((v3, 0.0))); + + self.normals + .push(From::from((Vec3::normalize(n0 + n1), 0.0))); + self.normals.push(From::from((n0, 0.0))); + self.normals + .push(From::from((Vec3::normalize(n0 + n1), 0.0))); + self.normals.push(From::from((n1, 0.0))); + + for _ in 0..4 { + self.colours.push(From::from((colour, 0.0))); + } + + for triangle_index in 0..2 { + let mut triangle = Triangle::default(); + for i in 0..3 { + let index = self.indices[first_index + triangle_index * 3 + i]; + triangle.normals[i] = self.normals[index as usize]; + triangle.colours[i] = self.colours[index as usize]; + } + self.triangles.push(triangle); + } + } + + pub fn add_cube_with_faces( + &mut self, + face_mask: u16, + colour: Vec3, + transform: Mat4, + inward_normals: bool, + ) { + let mut cube_vertices = [ + Vec3::new(-0.5, -0.5, -0.5), + Vec3::new(0.5, -0.5, -0.5), + Vec3::new(-0.5, 0.5, -0.5), + Vec3::new(0.5, 0.5, -0.5), + Vec3::new(-0.5, -0.5, 0.5), + Vec3::new(0.5, -0.5, 0.5), + Vec3::new(-0.5, 0.5, 0.5), + Vec3::new(0.5, 0.5, 0.5), + ]; + + for i in 0..8 { + let transformed_vertex = Vec4::from((cube_vertices[i], 1.0)); + let transformed_vertex = transform * transformed_vertex; + cube_vertices[i] = transformed_vertex.xyz(); + } + + const CUBE_INDICES: [[u16; 4]; 6] = [ + [0, 4, 6, 2], + [1, 3, 7, 5], + [0, 1, 5, 4], + [2, 6, 7, 3], + [0, 2, 3, 1], + [4, 5, 7, 6], + ]; + + for face in 0..6 { + if face_mask & (1 << face) != 0 { + self.add_cube_face_with_cube_vertices( + &cube_vertices, + colour, + CUBE_INDICES[face][0], + CUBE_INDICES[face][1], + CUBE_INDICES[face][2], + CUBE_INDICES[face][3], + inward_normals, + ); + } + } + } +} + +impl Geometry for TriangleGeometry { + fn upload_to_buffers(&mut self) { + self.index_buffer = Some(unsafe { + self.device.new_buffer_with_data( + transmute(self.indices.as_ptr()), + (self.indices.len() * size_of::()) as NSUInteger, + get_managed_buffer_storage_mode(), + ) + }); + self.vertex_position_buffer = Some(unsafe { + self.device.new_buffer_with_data( + transmute(self.vertices.as_ptr()), + (self.vertices.len() * size_of::()) as NSUInteger, + get_managed_buffer_storage_mode(), + ) + }); + self.vertex_normal_buffer = Some(unsafe { + self.device.new_buffer_with_data( + transmute(self.normals.as_ptr()), + (self.normals.len() * size_of::()) as NSUInteger, + get_managed_buffer_storage_mode(), + ) + }); + self.vertex_colour_buffer = Some(unsafe { + self.device.new_buffer_with_data( + transmute(self.colours.as_ptr()), + (self.colours.len() * size_of::()) as NSUInteger, + get_managed_buffer_storage_mode(), + ) + }); + self.per_primitive_data_buffer = Some(unsafe { + self.device.new_buffer_with_data( + transmute(self.triangles.as_ptr()), + (self.triangles.len() * size_of::()) as NSUInteger, + get_managed_buffer_storage_mode(), + ) + }); + self.index_buffer + .as_ref() + .unwrap() + .did_modify_range(NSRange::new( + 0, + self.index_buffer.as_ref().unwrap().length(), + )); + self.vertex_position_buffer + .as_ref() + .unwrap() + .did_modify_range(NSRange::new( + 0, + self.vertex_position_buffer.as_ref().unwrap().length(), + )); + self.vertex_normal_buffer + .as_ref() + .unwrap() + .did_modify_range(NSRange::new( + 0, + self.vertex_normal_buffer.as_ref().unwrap().length(), + )); + self.vertex_colour_buffer + .as_ref() + .unwrap() + .did_modify_range(NSRange::new( + 0, + self.vertex_colour_buffer.as_ref().unwrap().length(), + )); + self.per_primitive_data_buffer + .as_ref() + .unwrap() + .did_modify_range(NSRange::new( + 0, + self.per_primitive_data_buffer.as_ref().unwrap().length(), + )); + + self.index_buffer + .as_ref() + .unwrap() + .set_label(&format!("index buffer of {}", self.name)); + self.vertex_position_buffer + .as_ref() + .unwrap() + .set_label(&format!("vertex position buffer of {}", self.name)); + self.vertex_normal_buffer + .as_ref() + .unwrap() + .set_label(&format!("vertex normal buffer of {}", self.name)); + self.vertex_colour_buffer + .as_ref() + .unwrap() + .set_label(&format!("vertex colour buffer of {}", self.name)); + self.per_primitive_data_buffer + .as_ref() + .unwrap() + .set_label(&format!("per primitive data buffer of {}", self.name)); + } + + fn clear(&mut self) { + self.indices.clear(); + self.vertices.clear(); + self.normals.clear(); + self.colours.clear(); + self.triangles.clear(); + } + + fn get_geometry_descriptor(&self) -> AccelerationStructureGeometryDescriptor { + let descriptor = AccelerationStructureTriangleGeometryDescriptor::descriptor(); + + descriptor.set_index_buffer(Some(self.index_buffer.as_ref().unwrap())); + descriptor.set_index_type(MTLIndexType::UInt16); + descriptor.set_vertex_buffer(Some(self.vertex_position_buffer.as_ref().unwrap())); + descriptor.set_vertex_stride(size_of::() as NSUInteger); + descriptor.set_triangle_count((self.indices.len() / 3) as NSUInteger); + descriptor + .set_primitive_data_buffer(Some(self.per_primitive_data_buffer.as_ref().unwrap())); + descriptor.set_primitive_data_stride(size_of::() as NSUInteger); + descriptor.set_primitive_data_element_size(size_of::() as NSUInteger); + From::from(descriptor) + } + + fn get_resources(&self) -> Vec { + vec![ + From::from(self.index_buffer.as_ref().unwrap().clone()), + From::from(self.vertex_normal_buffer.as_ref().unwrap().clone()), + From::from(self.vertex_colour_buffer.as_ref().unwrap().clone()), + ] + } +} + +#[repr(C)] +pub struct BoundingBox { + pub min: Vec3, + pub max: Vec3, +} + +#[repr(C)] +pub struct Sphere { + pub origin_radius_squared: Vec4, + pub colour_radius: Vec4, +} + +pub struct SphereGeometry { + pub device: Device, + pub sphere_buffer: Option, + pub bounding_box_buffer: Option, + pub per_primitive_data_buffer: Option, + pub spheres: Vec, +} + +impl SphereGeometry { + pub fn new(device: Device) -> Self { + Self { + device, + sphere_buffer: None, + bounding_box_buffer: None, + per_primitive_data_buffer: None, + spheres: Vec::new(), + } + } + + pub fn add_sphere_with_origin(&mut self, origin: Vec3, radius: f32, colour: Vec3) { + self.spheres.push(Sphere { + origin_radius_squared: Vec4::from((origin, radius * radius)), + colour_radius: Vec4::from((colour, radius)), + }); + } +} + +impl Geometry for SphereGeometry { + fn upload_to_buffers(&mut self) { + self.sphere_buffer = Some(unsafe { + self.device.new_buffer_with_data( + transmute(self.spheres.as_ptr()), + (self.spheres.len() * size_of::()) as NSUInteger, + get_managed_buffer_storage_mode(), + ) + }); + self.sphere_buffer + .as_ref() + .unwrap() + .set_label("sphere buffer"); + let mut bounding_boxes = Vec::new(); + for sphere in &self.spheres { + bounding_boxes.push(BoundingBox { + min: sphere.origin_radius_squared.xyz() - sphere.colour_radius.w, + max: sphere.origin_radius_squared.xyz() + sphere.colour_radius.w, + }); + } + self.bounding_box_buffer = Some(unsafe { + self.device.new_buffer_with_data( + transmute(bounding_boxes.as_ptr()), + (bounding_boxes.len() * size_of::()) as NSUInteger, + get_managed_buffer_storage_mode(), + ) + }); + self.bounding_box_buffer + .as_ref() + .unwrap() + .set_label("bounding box buffer"); + self.sphere_buffer + .as_ref() + .unwrap() + .did_modify_range(NSRange::new( + 0, + self.sphere_buffer.as_ref().unwrap().length(), + )); + self.bounding_box_buffer + .as_ref() + .unwrap() + .did_modify_range(NSRange::new( + 0, + self.bounding_box_buffer.as_ref().unwrap().length(), + )); + } + + fn clear(&mut self) { + self.spheres.clear(); + } + + fn get_geometry_descriptor(&self) -> AccelerationStructureGeometryDescriptor { + let descriptor = AccelerationStructureBoundingBoxGeometryDescriptor::descriptor(); + descriptor.set_bounding_box_buffer(Some(self.bounding_box_buffer.as_ref().unwrap())); + descriptor.set_bounding_box_count(self.spheres.len() as NSUInteger); + descriptor.set_primitive_data_buffer(Some(&self.sphere_buffer.as_ref().unwrap())); + descriptor.set_primitive_data_stride(size_of::() as NSUInteger); + descriptor.set_primitive_data_element_size(size_of::() as NSUInteger); + From::from(descriptor) + } + + fn get_resources(&self) -> Vec { + return vec![From::from(self.sphere_buffer.as_ref().unwrap().clone())]; + } + + fn get_intersection_function_name(&self) -> Option<&str> { + Some("sphereIntersectionFunction") + } +} + +pub struct GeometryInstance { + pub geometry: Arc, + pub transform: Mat4, + pub mask: u32, + pub index_in_scene: NSUInteger, +} + +#[repr(C)] +pub struct AreaLight { + pub position: Vec4, + pub forward: Vec4, + pub right: Vec4, + pub up: Vec4, + pub colour: Vec4, +} diff --git a/third_party/rust/metal/examples/raytracing/main.rs b/third_party/rust/metal/examples/raytracing/main.rs new file mode 100644 index 000000000000..68eaf3df5927 --- /dev/null +++ b/third_party/rust/metal/examples/raytracing/main.rs @@ -0,0 +1,87 @@ +extern crate objc; + +use cocoa::{appkit::NSView, base::id as cocoa_id}; +use core_graphics_types::geometry::CGSize; +use metal::*; +use objc::{rc::autoreleasepool, runtime::YES}; +use std::mem; +use winit::{ + event::{Event, WindowEvent}, + event_loop::ControlFlow, + platform::macos::WindowExtMacOS, +}; + +pub mod camera; +pub mod geometry; +pub mod renderer; +pub mod scene; + +fn find_raytracing_supporting_device() -> Device { + for device in Device::all() { + if !device.supports_raytracing() { + continue; + } + if device.is_low_power() { + continue; + } + return device; + } + + panic!("No device in this machine supports raytracing!") +} + +fn main() { + let events_loop = winit::event_loop::EventLoop::new(); + let size = winit::dpi::LogicalSize::new(800, 600); + + let window = winit::window::WindowBuilder::new() + .with_inner_size(size) + .with_title("Metal Raytracing Example".to_string()) + .build(&events_loop) + .unwrap(); + + let device = find_raytracing_supporting_device(); + + let layer = MetalLayer::new(); + layer.set_device(&device); + layer.set_pixel_format(MTLPixelFormat::RGBA16Float); + layer.set_presents_with_transaction(false); + + unsafe { + let view = window.ns_view() as cocoa_id; + view.setWantsLayer(YES); + view.setLayer(mem::transmute(layer.as_ref())); + } + + let draw_size = window.inner_size(); + let cg_size = CGSize::new(draw_size.width as f64, draw_size.height as f64); + layer.set_drawable_size(cg_size); + + let mut renderer = renderer::Renderer::new(device); + renderer.window_resized(cg_size); + + events_loop.run(move |event, _, control_flow| { + autoreleasepool(|| { + *control_flow = ControlFlow::Poll; + + match event { + Event::WindowEvent { event, .. } => match event { + WindowEvent::CloseRequested => *control_flow = ControlFlow::Exit, + WindowEvent::Resized(size) => { + let size = CGSize::new(size.width as f64, size.height as f64); + layer.set_drawable_size(size); + renderer.window_resized(size); + } + _ => (), + }, + Event::MainEventsCleared => { + window.request_redraw(); + } + Event::RedrawRequested(_) => { + renderer.draw(&layer); + } + _ => {} + } + }); + }); +} diff --git a/third_party/rust/metal/examples/raytracing/renderer.rs b/third_party/rust/metal/examples/raytracing/renderer.rs new file mode 100644 index 000000000000..f483d3e0a81e --- /dev/null +++ b/third_party/rust/metal/examples/raytracing/renderer.rs @@ -0,0 +1,512 @@ +use core_graphics_types::{base::CGFloat, geometry::CGSize}; +use std::{ + collections::BTreeMap, + ffi::c_void, + mem::{size_of, transmute}, + ops::Index, + sync::{Arc, Condvar, Mutex}, +}; + +use glam::{Vec3, Vec4, Vec4Swizzles}; +use rand::{thread_rng, RngCore}; + +use metal::{foreign_types::ForeignType, *}; + +use crate::{camera::Camera, geometry::get_managed_buffer_storage_mode, scene::Scene}; + +#[repr(C)] +struct Uniforms { + pub width: u32, + pub height: u32, + pub frame_index: u32, + pub light_count: u32, + pub camera: Camera, +} + +pub const MAX_FRAMES_IN_FLIGHT: NSUInteger = 3; +pub const ALIGNED_UNIFORMS_SIZE: NSUInteger = (size_of::() as NSUInteger + 255) & !255; +pub const UNIFORM_BUFFER_SIZE: NSUInteger = MAX_FRAMES_IN_FLIGHT * ALIGNED_UNIFORMS_SIZE; + +#[derive(Clone)] +struct Semaphore { + data: Arc<(Mutex, Condvar)>, +} + +impl Semaphore { + fn new(capacity: usize) -> Self { + Self { + data: Arc::new((Mutex::new(capacity), Condvar::new())), + } + } + + fn acquire(&self) { + let mut value = self.data.0.lock().unwrap(); + while *value == 0 { + value = self.data.1.wait(value).unwrap(); + } + *value -= 1; + } + + fn release(&self) { + let mut value = self.data.0.lock().unwrap(); + *value += 1; + self.data.1.notify_one(); + } +} + +pub struct Renderer { + pub device: Device, + pub scene: Scene, + pub uniform_buffer: Buffer, + pub resource_buffer: Buffer, + pub instance_acceleration_structure: AccelerationStructure, + pub accumulation_targets: [Texture; 2], + pub random_texture: Texture, + pub frame_index: NSUInteger, + pub uniform_buffer_index: NSUInteger, + pub uniform_buffer_offset: NSUInteger, + pub size: CGSize, + semaphore: Semaphore, + pub queue: CommandQueue, + instance_buffer: Buffer, + intersection_function_table: IntersectionFunctionTable, + primitive_acceleration_structures: Vec, + raytracing_pipeline: ComputePipelineState, + copy_pipeline: RenderPipelineState, +} + +impl Renderer { + pub fn new(device: Device) -> Self { + let scene = Scene::new(device.clone()); + + let library_path = std::path::PathBuf::from(env!("CARGO_MANIFEST_DIR")) + .join("examples/raytracing/shaders.metallib"); + let library = device.new_library_with_file(library_path).unwrap(); + let queue = device.new_command_queue(); + + let buffer_data = [0u8; UNIFORM_BUFFER_SIZE as usize]; + let uniform_buffer = device.new_buffer_with_data( + buffer_data.as_ptr() as *const c_void, + UNIFORM_BUFFER_SIZE, + get_managed_buffer_storage_mode(), + ); + uniform_buffer.set_label("uniform buffer"); + let resources_stride = { + let mut max = 0; + for geometry in &scene.geometries { + let s = geometry.get_resources().len(); + if s > max { + max = s; + } + } + max + }; + let mut resource_buffer_data = vec![0u64; resources_stride * scene.geometries.len()]; + for geometry_index in 0..scene.geometries.len() { + let geometry = scene.geometries[geometry_index].as_ref(); + let resource_buffer_begin_index = resources_stride * geometry_index; + let resources = geometry.get_resources(); + + for argument_index in 0..resources.len() { + let resource_buffer_index = resource_buffer_begin_index + argument_index; + let resource = resources[argument_index].clone(); + resource_buffer_data[resource_buffer_index] = + if resource.conforms_to_protocol::().unwrap() { + let buffer = unsafe { Buffer::from_ptr(transmute(resource.into_ptr())) }; + buffer.gpu_address() + } else if resource.conforms_to_protocol::().unwrap() { + let texture = unsafe { Texture::from_ptr(transmute(resource.into_ptr())) }; + texture.gpu_resource_id()._impl + } else { + panic!("Unexpected resource!") + } + } + } + let resource_buffer = device.new_buffer_with_data( + resource_buffer_data.as_ptr() as *const c_void, + (resource_buffer_data.len() * size_of::()) as NSUInteger, + get_managed_buffer_storage_mode(), + ); + resource_buffer.set_label("resource buffer"); + resource_buffer.did_modify_range(NSRange::new(0, resource_buffer.length())); + + let mut primitive_acceleration_structures = Vec::new(); + for i in 0..scene.geometries.len() { + let mesh = scene.geometries[i].as_ref(); + let geometry_descriptor = mesh.get_geometry_descriptor(); + geometry_descriptor.set_intersection_function_table_offset(i as NSUInteger); + let geometry_descriptors = Array::from_owned_slice(&[geometry_descriptor]); + let accel_descriptor = PrimitiveAccelerationStructureDescriptor::descriptor(); + accel_descriptor.set_geometry_descriptors(&geometry_descriptors); + let accel_descriptor: AccelerationStructureDescriptor = From::from(accel_descriptor); + primitive_acceleration_structures.push( + Self::new_acceleration_structure_with_descriptor( + &device, + &queue, + &accel_descriptor, + ), + ); + } + + let mut instance_descriptors = vec![ + MTLAccelerationStructureInstanceDescriptor::default(); + scene.geometry_instances.len() + ]; + for instance_index in 0..scene.geometry_instances.len() { + let instance = scene.geometry_instances[instance_index].as_ref(); + let geometry_index = instance.index_in_scene; + instance_descriptors[instance_index].acceleration_structure_index = + geometry_index as u32; + instance_descriptors[instance_index].options = + if instance.geometry.get_intersection_function_name().is_none() { + MTLAccelerationStructureInstanceOptions::Opaque + } else { + MTLAccelerationStructureInstanceOptions::None + }; + instance_descriptors[instance_index].intersection_function_table_offset = 0; + instance_descriptors[instance_index].mask = instance.mask as u32; + for column in 0..4 { + for row in 0..3 { + instance_descriptors[instance_index].transformation_matrix[column][row] = + *instance.transform.col(column).index(row); + } + } + } + let instance_buffer = device.new_buffer_with_data( + instance_descriptors.as_ptr() as *const c_void, + (size_of::() + * scene.geometry_instances.len()) as NSUInteger, + get_managed_buffer_storage_mode(), + ); + instance_buffer.set_label("instance buffer"); + instance_buffer.did_modify_range(NSRange::new(0, instance_buffer.length())); + + let accel_descriptor = InstanceAccelerationStructureDescriptor::descriptor(); + accel_descriptor.set_instanced_acceleration_structures(&Array::from_owned_slice( + &primitive_acceleration_structures, + )); + accel_descriptor.set_instance_count(scene.geometry_instances.len() as NSUInteger); + accel_descriptor.set_instance_descriptor_buffer(&instance_buffer); + let accel_descriptor: AccelerationStructureDescriptor = From::from(accel_descriptor); + let instance_acceleration_structure = + Self::new_acceleration_structure_with_descriptor(&device, &queue, &accel_descriptor); + + let mut intersection_functions = BTreeMap::::new(); + for geometry in &scene.geometries { + if let Some(name) = geometry.get_intersection_function_name() { + if !intersection_functions.contains_key(name) { + let intersection_function = Self::new_specialised_function_with_name( + &library, + resources_stride as u32, + name, + ); + intersection_functions.insert(name.to_string(), intersection_function); + } + } + } + let raytracing_function = Self::new_specialised_function_with_name( + &library, + resources_stride as u32, + "raytracingKernel", + ); + let intersection_function_array: Vec<&FunctionRef> = intersection_functions + .values() + .map(|f| -> &FunctionRef { f }) + .collect(); + let raytracing_pipeline = Self::new_compute_pipeline_state_with_function( + &device, + &raytracing_function, + &intersection_function_array, + ); + let intersection_function_table_descriptor = IntersectionFunctionTableDescriptor::new(); + intersection_function_table_descriptor + .set_function_count(scene.geometries.len() as NSUInteger); + let intersection_function_table = raytracing_pipeline + .new_intersection_function_table_with_descriptor( + &intersection_function_table_descriptor, + ); + for geometry_index in 0..scene.geometries.len() { + let geometry = scene.geometries[geometry_index].as_ref(); + if let Some(intersection_function_name) = geometry.get_intersection_function_name() { + let intersection_function = &intersection_functions[intersection_function_name]; + let handle = raytracing_pipeline + .function_handle_with_function(intersection_function) + .unwrap(); + intersection_function_table.set_function(handle, geometry_index as NSUInteger); + } + } + let render_descriptor = RenderPipelineDescriptor::new(); + render_descriptor + .set_vertex_function(Some(&library.get_function("copyVertex", None).unwrap())); + render_descriptor + .set_fragment_function(Some(&library.get_function("copyFragment", None).unwrap())); + render_descriptor + .color_attachments() + .object_at(0) + .unwrap() + .set_pixel_format(MTLPixelFormat::RGBA16Float); + let copy_pipeline = device + .new_render_pipeline_state(&render_descriptor) + .unwrap(); + + let texture_descriptor = Self::create_target_descriptor(1024, 1024); + let accumulation_targets = [ + device.new_texture(&texture_descriptor), + device.new_texture(&texture_descriptor), + ]; + let random_texture = device.new_texture(&texture_descriptor); + + Self { + device, + scene, + uniform_buffer, + resource_buffer, + instance_acceleration_structure, + accumulation_targets, + random_texture, + frame_index: 0, + uniform_buffer_index: 0, + uniform_buffer_offset: 0, + size: CGSize::new(1024 as CGFloat, 1024 as CGFloat), + semaphore: Semaphore::new((MAX_FRAMES_IN_FLIGHT - 2) as usize), + instance_buffer, + queue, + intersection_function_table, + primitive_acceleration_structures, + raytracing_pipeline, + copy_pipeline, + } + } + + fn create_target_descriptor(width: NSUInteger, height: NSUInteger) -> TextureDescriptor { + let texture_descriptor = TextureDescriptor::new(); + texture_descriptor.set_pixel_format(MTLPixelFormat::RGBA32Float); + texture_descriptor.set_texture_type(MTLTextureType::D2); + texture_descriptor.set_width(width); + texture_descriptor.set_height(height); + texture_descriptor.set_storage_mode(MTLStorageMode::Private); + texture_descriptor.set_usage(MTLTextureUsage::ShaderRead | MTLTextureUsage::ShaderWrite); + texture_descriptor + } + + pub fn window_resized(&mut self, size: CGSize) { + self.size = size; + let texture_descriptor = + Self::create_target_descriptor(size.width as NSUInteger, size.height as NSUInteger); + self.accumulation_targets[0] = self.device.new_texture(&texture_descriptor); + self.accumulation_targets[1] = self.device.new_texture(&texture_descriptor); + texture_descriptor.set_pixel_format(MTLPixelFormat::R32Uint); + texture_descriptor.set_usage(MTLTextureUsage::ShaderRead); + texture_descriptor.set_storage_mode(MTLStorageMode::Managed); + self.random_texture = self.device.new_texture(&texture_descriptor); + let mut rng = thread_rng(); + let mut random_values = vec![0u32; (size.width * size.height) as usize]; + for v in &mut random_values { + *v = rng.next_u32(); + } + self.random_texture.replace_region( + MTLRegion::new_2d(0, 0, size.width as NSUInteger, size.height as NSUInteger), + 0, + random_values.as_ptr() as *const c_void, + size_of::() as NSUInteger * size.width as NSUInteger, + ); + self.frame_index = 0; + } + + fn update_uniforms(&mut self) { + self.uniform_buffer_offset = ALIGNED_UNIFORMS_SIZE * self.uniform_buffer_index; + + let uniforms = unsafe { + &mut *((self.uniform_buffer.contents() as *mut u8) + .add(self.uniform_buffer_offset as usize) as *mut Uniforms) + }; + + let position = self.scene.camera.position; + let target = self.scene.camera.forward; + let up = self.scene.camera.up; + + let forward = Vec3::normalize(target.xyz() - position.xyz()); + let right = Vec3::normalize(Vec3::cross(forward, up.xyz())); + let up = Vec3::normalize(Vec3::cross(right, forward)); + + uniforms.camera.position = position; + uniforms.camera.forward = Vec4::from((forward, 0.0)); + uniforms.camera.right = Vec4::from((right, 0.0)); + uniforms.camera.up = Vec4::from((up, 0.0)); + + let field_of_view = 45.0 * (std::f32::consts::PI / 180.0); + let aspect_ratio = self.size.width as f32 / self.size.height as f32; + let image_plane_height = f32::tan(field_of_view / 2.0); + let image_plane_width = aspect_ratio * image_plane_height; + + uniforms.camera.right *= image_plane_width; + uniforms.camera.up *= image_plane_height; + + uniforms.width = self.size.width as u32; + uniforms.height = self.size.height as u32; + + uniforms.frame_index = self.frame_index as u32; + self.frame_index += 1; + + uniforms.light_count = self.scene.lights.len() as u32; + + self.uniform_buffer.did_modify_range(NSRange { + location: self.uniform_buffer_offset, + length: ALIGNED_UNIFORMS_SIZE, + }); + + self.uniform_buffer_index = (self.uniform_buffer_index + 1) % MAX_FRAMES_IN_FLIGHT; + } + + pub fn draw(&mut self, layer: &MetalLayer) { + self.semaphore.acquire(); + self.update_uniforms(); + let command_buffer = self.queue.new_command_buffer(); + let sem = self.semaphore.clone(); + let block = block::ConcreteBlock::new(move |_| { + sem.release(); + }) + .copy(); + command_buffer.add_completed_handler(&block); + let width = self.size.width as NSUInteger; + let height = self.size.height as NSUInteger; + let threads_per_thread_group = MTLSize::new(8, 8, 1); + let thread_groups = MTLSize::new( + (width + threads_per_thread_group.width - 1) / threads_per_thread_group.width, + (height + threads_per_thread_group.height - 1) / threads_per_thread_group.height, + 1, + ); + let compute_encoder = command_buffer.new_compute_command_encoder(); + compute_encoder.set_buffer(0, Some(&self.uniform_buffer), self.uniform_buffer_offset); + compute_encoder.set_buffer(2, Some(&self.instance_buffer), 0); + compute_encoder.set_buffer(3, Some(&self.scene.lights_buffer), 0); + compute_encoder.set_acceleration_structure(4, Some(&self.instance_acceleration_structure)); + compute_encoder.set_intersection_function_table(5, Some(&self.intersection_function_table)); + compute_encoder.set_texture(0, Some(&self.random_texture)); + compute_encoder.set_texture(1, Some(&self.accumulation_targets[0])); + compute_encoder.set_texture(2, Some(&self.accumulation_targets[1])); + for geometry in &self.scene.geometries { + for resource in geometry.get_resources() { + compute_encoder.use_resource(&resource, MTLResourceUsage::Read); + } + } + for primitive_acceleration_structure in &self.primitive_acceleration_structures { + let resource: Resource = From::from(primitive_acceleration_structure.clone()); + compute_encoder.use_resource(&resource, MTLResourceUsage::Read); + } + compute_encoder.set_compute_pipeline_state(&self.raytracing_pipeline); + compute_encoder.dispatch_thread_groups(thread_groups, threads_per_thread_group); + compute_encoder.end_encoding(); + (self.accumulation_targets[0], self.accumulation_targets[1]) = ( + self.accumulation_targets[1].clone(), + self.accumulation_targets[0].clone(), + ); + if let Some(drawable) = layer.next_drawable() { + let render_pass_descriptor = RenderPassDescriptor::new(); + let colour_attachment = render_pass_descriptor + .color_attachments() + .object_at(0) + .unwrap(); + colour_attachment.set_texture(Some(drawable.texture())); + colour_attachment.set_load_action(MTLLoadAction::Clear); + colour_attachment.set_clear_color(MTLClearColor::new(0.0, 0.0, 0.0, 1.0)); + let render_encoder = command_buffer.new_render_command_encoder(render_pass_descriptor); + render_encoder.set_render_pipeline_state(&self.copy_pipeline); + render_encoder.set_fragment_texture(0, Some(&self.accumulation_targets[0])); + render_encoder.draw_primitives(MTLPrimitiveType::Triangle, 0, 6); + render_encoder.end_encoding(); + command_buffer.present_drawable(&drawable); + } + command_buffer.commit(); + } + + fn new_acceleration_structure_with_descriptor( + device: &Device, + queue: &CommandQueue, + descriptor: &AccelerationStructureDescriptorRef, + ) -> AccelerationStructure { + let accel_sizes = device.acceleration_structure_sizes_with_descriptor(descriptor); + let acceleration_structure = + device.new_acceleration_structure_with_size(accel_sizes.acceleration_structure_size); + let scratch_buffer = device.new_buffer( + accel_sizes.build_scratch_buffer_size, + MTLResourceOptions::StorageModePrivate, + ); + let command_buffer = queue.new_command_buffer(); + let command_encoder = command_buffer.new_acceleration_structure_command_encoder(); + let compacted_size_buffer = device.new_buffer( + size_of::() as NSUInteger, + MTLResourceOptions::StorageModeShared, + ); + command_encoder.build_acceleration_structure( + &acceleration_structure, + &descriptor, + &scratch_buffer, + 0, + ); + command_encoder.write_compacted_acceleration_structure_size( + &acceleration_structure, + &compacted_size_buffer, + 0, + ); + command_encoder.end_encoding(); + command_buffer.commit(); + command_buffer.wait_until_completed(); + let compacted_size: *const u32 = unsafe { transmute(compacted_size_buffer.contents()) }; + let compacted_size = unsafe { *compacted_size } as NSUInteger; + let compacted_acceleration_structure = + device.new_acceleration_structure_with_size(compacted_size); + let command_buffer = queue.new_command_buffer(); + let command_encoder = command_buffer.new_acceleration_structure_command_encoder(); + command_encoder.copy_and_compact_acceleration_structure( + &acceleration_structure, + &compacted_acceleration_structure, + ); + command_encoder.end_encoding(); + command_buffer.commit(); + compacted_acceleration_structure + } + + fn new_specialised_function_with_name( + library: &Library, + resources_stride: u32, + name: &str, + ) -> Function { + let constants = FunctionConstantValues::new(); + let resources_stride = resources_stride * size_of::() as u32; + constants.set_constant_value_at_index( + &resources_stride as *const u32 as *const c_void, + MTLDataType::UInt, + 0, + ); + let v = true; + constants.set_constant_value_at_index( + &v as *const bool as *const c_void, + MTLDataType::Bool, + 1, + ); + constants.set_constant_value_at_index( + &v as *const bool as *const c_void, + MTLDataType::Bool, + 2, + ); + library.get_function(name, Some(constants)).unwrap() + } + + fn new_compute_pipeline_state_with_function( + device: &Device, + function: &Function, + linked_functions: &[&FunctionRef], + ) -> ComputePipelineState { + let linked_functions = { + let lf = LinkedFunctions::new(); + lf.set_functions(linked_functions); + lf + }; + let descriptor = ComputePipelineDescriptor::new(); + descriptor.set_compute_function(Some(function)); + descriptor.set_linked_functions(linked_functions.as_ref()); + descriptor.set_thread_group_size_is_multiple_of_thread_execution_width(true); + device.new_compute_pipeline_state(&descriptor).unwrap() + } +} diff --git a/third_party/rust/metal/examples/raytracing/scene.rs b/third_party/rust/metal/examples/raytracing/scene.rs new file mode 100644 index 000000000000..8ecf8568de15 --- /dev/null +++ b/third_party/rust/metal/examples/raytracing/scene.rs @@ -0,0 +1,135 @@ +use std::{ffi::c_void, mem::size_of, sync::Arc}; + +use glam::{Mat4, Vec3, Vec4}; +use rand::{thread_rng, Rng}; + +use metal::{Buffer, Device, NSRange, NSUInteger}; + +use super::{camera::Camera, geometry::*}; + +pub struct Scene { + pub device: Device, + pub camera: Camera, + pub geometries: Vec>, + pub geometry_instances: Vec>, + pub lights: Vec, + pub lights_buffer: Buffer, +} + +impl Scene { + pub fn new(device: Device) -> Self { + let mut geometries = Vec::>::new(); + let mut light_mesh = TriangleGeometry::new(device.clone(), "light".to_string()); + let transform = Mat4::from_translation(Vec3::new(0.0, 1.0, 0.0)) + * Mat4::from_scale(Vec3::new(0.5, 1.98, 0.5)); + light_mesh.add_cube_with_faces( + FACE_MASK_POSITIVE_Y, + Vec3::new(1.0, 1.0, 1.0), + transform, + true, + ); + light_mesh.upload_to_buffers(); + let light_mesh = Arc::new(light_mesh); + geometries.push(light_mesh.clone()); + + let mut geometry_mesh = TriangleGeometry::new(device.clone(), "geometry".to_string()); + let transform = Mat4::from_translation(Vec3::new(0.0, 1.0, 0.0)) + * Mat4::from_scale(Vec3::new(2.0, 2.0, 2.0)); + geometry_mesh.add_cube_with_faces( + FACE_MASK_NEGATIVE_Y | FACE_MASK_POSITIVE_Y | FACE_MASK_NEGATIVE_Z, + Vec3::new(0.725, 0.71, 0.68), + transform, + true, + ); + geometry_mesh.add_cube_with_faces( + FACE_MASK_NEGATIVE_X, + Vec3::new(0.63, 0.065, 0.05), + transform, + true, + ); + geometry_mesh.add_cube_with_faces( + FACE_MASK_POSITIVE_X, + Vec3::new(0.14, 0.45, 0.091), + transform, + true, + ); + let transform = Mat4::from_translation(Vec3::new(-0.335, 0.6, -0.29)) + * Mat4::from_rotation_y(0.3) + * Mat4::from_scale(Vec3::new(0.6, 1.2, 0.6)); + geometry_mesh.add_cube_with_faces( + FACE_MASK_ALL, + Vec3::new(0.725, 0.71, 0.68), + transform, + false, + ); + geometry_mesh.upload_to_buffers(); + let geometry_mesh = Arc::new(geometry_mesh); + geometries.push(geometry_mesh.clone()); + + let mut sphere_geometry = SphereGeometry::new(device.clone()); + sphere_geometry.add_sphere_with_origin( + Vec3::new(0.3275, 0.3, 0.3725), + 0.3, + Vec3::new(0.725, 0.71, 0.68), + ); + sphere_geometry.upload_to_buffers(); + let sphere_geometry = Arc::new(sphere_geometry); + geometries.push(sphere_geometry.clone()); + + let mut rng = thread_rng(); + let mut geometry_instances = Vec::new(); + let mut lights = Vec::new(); + for y in -1..2 { + for x in -1..2 { + let transform = + Mat4::from_translation(Vec3::new(x as f32 * 2.5, y as f32 * 2.5, 0.0)); + geometry_instances.push(Arc::new(GeometryInstance { + geometry: light_mesh.clone(), + transform, + mask: GEOMETRY_MASK_LIGHT, + index_in_scene: 0, + })); + geometry_instances.push(Arc::new(GeometryInstance { + geometry: geometry_mesh.clone(), + transform, + mask: GEOMETRY_MASK_TRIANGLE, + index_in_scene: 1, + })); + geometry_instances.push(Arc::new(GeometryInstance { + geometry: sphere_geometry.clone(), + transform, + mask: GEOMETRY_MASK_SPHERE, + index_in_scene: 2, + })); + lights.push(AreaLight { + position: Vec4::new(x as f32 * 2.5, y as f32 * 2.5 + 1.98, 0.0, 0.0), + forward: Vec4::new(0.0, -1.0, 0.0, 0.0), + right: Vec4::new(0.25, 0.0, 0.0, 0.0), + up: Vec4::new(0.0, 0.0, 0.25, 0.0), + colour: Vec4::new( + rng.gen_range(0f32..=1.0), + rng.gen_range(0f32..=1.0), + rng.gen_range(0f32..=1.0), + 0.0, + ), + }); + } + } + let lights_buffer = device.new_buffer_with_data( + lights.as_ptr() as *const c_void, + (lights.len() * size_of::()) as NSUInteger, + get_managed_buffer_storage_mode(), + ); + lights_buffer.did_modify_range(NSRange::new(0, lights_buffer.length())); + lights_buffer.set_label("lights buffer"); + + Self { + device, + camera: Camera::new(), + geometries, + geometry_instances, + lights, + lights_buffer, + } + } +} diff --git a/third_party/rust/metal/examples/raytracing/screenshot.png b/third_party/rust/metal/examples/raytracing/screenshot.png new file mode 100644 index 000000000000..417a1d746dea Binary files /dev/null and b/third_party/rust/metal/examples/raytracing/screenshot.png differ diff --git a/third_party/rust/metal/examples/raytracing/shaders.metal b/third_party/rust/metal/examples/raytracing/shaders.metal new file mode 100644 index 000000000000..54aa2a4f4798 --- /dev/null +++ b/third_party/rust/metal/examples/raytracing/shaders.metal @@ -0,0 +1,598 @@ +/* +See LICENSE folder for this sample’s licensing information. + +Abstract: +The Metal shaders used for this sample. +*/ + +#include +#include + +using namespace metal; + +using namespace raytracing; + + +#define GEOMETRY_MASK_TRIANGLE 1 +#define GEOMETRY_MASK_SPHERE 2 +#define GEOMETRY_MASK_LIGHT 4 + +#define GEOMETRY_MASK_GEOMETRY (GEOMETRY_MASK_TRIANGLE | GEOMETRY_MASK_SPHERE) + +#define RAY_MASK_PRIMARY (GEOMETRY_MASK_GEOMETRY | GEOMETRY_MASK_LIGHT) +#define RAY_MASK_SHADOW GEOMETRY_MASK_GEOMETRY +#define RAY_MASK_SECONDARY GEOMETRY_MASK_GEOMETRY + +struct Camera { + vector_float3 position; + vector_float3 right; + vector_float3 up; + vector_float3 forward; +}; + +struct AreaLight { + vector_float3 position; + vector_float3 forward; + vector_float3 right; + vector_float3 up; + vector_float3 color; +}; + +struct Uniforms { + unsigned int width; + unsigned int height; + unsigned int frameIndex; + unsigned int lightCount; + Camera camera; +}; + +struct Sphere { + packed_float3 origin; + float radiusSquared; + packed_float3 color; + float radius; +}; + +struct Triangle { + vector_float3 normals[3]; + vector_float3 colors[3]; +}; + +constant unsigned int resourcesStride [[function_constant(0)]]; +constant bool useIntersectionFunctions [[function_constant(1)]]; +constant bool usePerPrimitiveData [[function_constant(2)]]; +constant bool useResourcesBuffer = !usePerPrimitiveData; + +constant unsigned int primes[] = { + 2, 3, 5, 7, + 11, 13, 17, 19, + 23, 29, 31, 37, + 41, 43, 47, 53, + 59, 61, 67, 71, + 73, 79, 83, 89 +}; + +// Returns the i'th element of the Halton sequence using the d'th prime number as a +// base. The Halton sequence is a low discrepency sequence: the values appear +// random, but are more evenly distributed than a purely random sequence. Each random +// value used to render the image uses a different independent dimension, `d`, +// and each sample (frame) uses a different index `i`. To decorrelate each pixel, +// you can apply a random offset to `i`. +float halton(unsigned int i, unsigned int d) { + unsigned int b = primes[d]; + + float f = 1.0f; + float invB = 1.0f / b; + + float r = 0; + + while (i > 0) { + f = f * invB; + r = r + f * (i % b); + i = i / b; + } + + return r; +} + +// Interpolates the vertex attribute of an arbitrary type across the surface of a triangle +// given the barycentric coordinates and triangle index in an intersection structure. +template +inline T interpolateVertexAttribute(device T *attributes, + IndexType i0, + IndexType i1, + IndexType i2, + float2 uv) { + // Look up value for each vertex. + const T T0 = attributes[i0]; + const T T1 = attributes[i1]; + const T T2 = attributes[i2]; + + // Compute the sum of the vertex attributes weighted by the barycentric coordinates. + // The barycentric coordinates sum to one. + return (1.0f - uv.x - uv.y) * T0 + uv.x * T1 + uv.y * T2; +} + +template +inline T interpolateVertexAttribute(thread T *attributes, float2 uv) { + // Look up the value for each vertex. + const T T0 = attributes[0]; + const T T1 = attributes[1]; + const T T2 = attributes[2]; + + // Compute the sum of the vertex attributes weighted by the barycentric coordinates. + // The barycentric coordinates sum to one. + return (1.0f - uv.x - uv.y) * T0 + uv.x * T1 + uv.y * T2; +} + +// Uses the inversion method to map two uniformly random numbers to a 3D +// unit hemisphere, where the probability of a given sample is proportional to the cosine +// of the angle between the sample direction and the "up" direction (0, 1, 0). +inline float3 sampleCosineWeightedHemisphere(float2 u) { + float phi = 2.0f * M_PI_F * u.x; + + float cos_phi; + float sin_phi = sincos(phi, cos_phi); + + float cos_theta = sqrt(u.y); + float sin_theta = sqrt(1.0f - cos_theta * cos_theta); + + return float3(sin_theta * cos_phi, cos_theta, sin_theta * sin_phi); +} + +// Maps two uniformly random numbers to the surface of a 2D area light +// source and returns the direction to this point, the amount of light that travels +// between the intersection point and the sample point on the light source, as well +// as the distance between these two points. + +inline void sampleAreaLight(constant AreaLight & light, + float2 u, + float3 position, + thread float3 & lightDirection, + thread float3 & lightColor, + thread float & lightDistance) +{ + // Map to -1..1 + u = u * 2.0f - 1.0f; + + // Transform into the light's coordinate system. + float3 samplePosition = light.position + + light.right * u.x + + light.up * u.y; + + // Compute the vector from sample point on the light source to intersection point. + lightDirection = samplePosition - position; + + lightDistance = length(lightDirection); + + float inverseLightDistance = 1.0f / max(lightDistance, 1e-3f); + + // Normalize the light direction. + lightDirection *= inverseLightDistance; + + // Start with the light's color. + lightColor = light.color; + + // Light falls off with the inverse square of the distance to the intersection point. + lightColor *= (inverseLightDistance * inverseLightDistance); + + // Light also falls off with the cosine of the angle between the intersection point + // and the light source. + lightColor *= saturate(dot(-lightDirection, light.forward)); +} + +// Aligns a direction on the unit hemisphere such that the hemisphere's "up" direction +// (0, 1, 0) maps to the given surface normal direction. +inline float3 alignHemisphereWithNormal(float3 sample, float3 normal) { + // Set the "up" vector to the normal + float3 up = normal; + + // Find an arbitrary direction perpendicular to the normal, which becomes the + // "right" vector. + float3 right = normalize(cross(normal, float3(0.0072f, 1.0f, 0.0034f))); + + // Find a third vector perpendicular to the previous two, which becomes the + // "forward" vector. + float3 forward = cross(right, up); + + // Map the direction on the unit hemisphere to the coordinate system aligned + // with the normal. + return sample.x * right + sample.y * up + sample.z * forward; +} + +// Return the type for a bounding box intersection function. +struct BoundingBoxIntersection { + bool accept [[accept_intersection]]; // Whether to accept or reject the intersection. + float distance [[distance]]; // Distance from the ray origin to the intersection point. +}; + +// Resources for a piece of triangle geometry. +struct TriangleResources { + device uint16_t *indices; + device float3 *vertexNormals; + device float3 *vertexColors; +}; + +// Resources for a piece of sphere geometry. +struct SphereResources { + device Sphere *spheres; +}; + +/* + Custom sphere intersection function. The [[intersection]] keyword marks this as an intersection + function. The [[bounding_box]] keyword means that this intersection function handles intersecting rays + with bounding box primitives. To create sphere primitives, the sample creates bounding boxes that + enclose the sphere primitives. + + The [[triangle_data]] and [[instancing]] keywords indicate that the intersector that calls this + intersection function returns barycentric coordinates for triangle intersections and traverses + an instance acceleration structure. These keywords must match between the intersection functions, + intersection function table, intersector, and intersection result to ensure that Metal propagates + data correctly between stages. Using fewer tags when possible may result in better performance, + as Metal may need to store less data and pass less data between stages. For example, if you do not + need barycentric coordinates, omitting [[triangle_data]] means Metal can avoid computing and storing + them. + + The arguments to the intersection function contain information about the ray, primitive to be + tested, and so on. The ray intersector provides this datas when it calls the intersection function. + Metal provides other built-in arguments, but this sample doesn't use them. + */ +[[intersection(bounding_box, triangle_data, instancing)]] +BoundingBoxIntersection sphereIntersectionFunction(// Ray parameters passed to the ray intersector below + float3 origin [[origin]], + float3 direction [[direction]], + float minDistance [[min_distance]], + float maxDistance [[max_distance]], + // Information about the primitive. + unsigned int primitiveIndex [[primitive_id]], + unsigned int geometryIndex [[geometry_intersection_function_table_offset]], + // Custom resources bound to the intersection function table. + device void *resources [[buffer(0), function_constant(useResourcesBuffer)]] + ,const device void* perPrimitiveData [[primitive_data]]) +{ + Sphere sphere; + // Look up the resources for this piece of sphere geometry. + if (usePerPrimitiveData) { + // Per-primitive data points to data from the specified buffer as was configured in the MTLAccelerationStructureBoundingBoxGeometryDescriptor. + sphere = *(const device Sphere*)perPrimitiveData; + } else + { + device SphereResources& sphereResources = *(device SphereResources *)((device char *)resources + resourcesStride * geometryIndex); + // Get the actual sphere enclosed in this bounding box. + sphere = sphereResources.spheres[primitiveIndex]; + } + + // Check for intersection between the ray and sphere mathematically. + float3 oc = origin - sphere.origin; + + float a = dot(direction, direction); + float b = 2 * dot(oc, direction); + float c = dot(oc, oc) - sphere.radiusSquared; + + float disc = b * b - 4 * a * c; + + BoundingBoxIntersection ret; + + if (disc <= 0.0f) { + // If the ray missed the sphere, return false. + ret.accept = false; + } + else { + // Otherwise, compute the intersection distance. + ret.distance = (-b - sqrt(disc)) / (2 * a); + + // The intersection function must also check whether the intersection distance is + // within the acceptable range. Intersection functions do not run in any particular order, + // so the maximum distance may be different from the one passed into the ray intersector. + ret.accept = ret.distance >= minDistance && ret.distance <= maxDistance; + } + + return ret; +} + +__attribute__((always_inline)) +float3 transformPoint(float3 p, float4x4 transform) { + return (transform * float4(p.x, p.y, p.z, 1.0f)).xyz; +} + +__attribute__((always_inline)) +float3 transformDirection(float3 p, float4x4 transform) { + return (transform * float4(p.x, p.y, p.z, 0.0f)).xyz; +} + +// Main ray tracing kernel. +kernel void raytracingKernel( + uint2 tid [[thread_position_in_grid]], + constant Uniforms & uniforms [[buffer(0)]], + texture2d randomTex [[texture(0)]], + texture2d prevTex [[texture(1)]], + texture2d dstTex [[texture(2)]], + device void *resources [[buffer(1), function_constant(useResourcesBuffer)]], + constant MTLAccelerationStructureInstanceDescriptor *instances [[buffer(2)]], + constant AreaLight *areaLights [[buffer(3)]], + instance_acceleration_structure accelerationStructure [[buffer(4)]], + intersection_function_table intersectionFunctionTable [[buffer(5)]] +) +{ + // The sample aligns the thread count to the threadgroup size, which means the thread count + // may be different than the bounds of the texture. Test to make sure this thread + // is referencing a pixel within the bounds of the texture. + if (tid.x >= uniforms.width || tid.y >= uniforms.height) return; + + // The ray to cast. + ray ray; + + // Pixel coordinates for this thread. + float2 pixel = (float2)tid; + + // Apply a random offset to the random number index to decorrelate pixels. + unsigned int offset = randomTex.read(tid).x; + + // Add a random offset to the pixel coordinates for antialiasing. + float2 r = float2(halton(offset + uniforms.frameIndex, 0), + halton(offset + uniforms.frameIndex, 1)); + + pixel += r; + + // Map pixel coordinates to -1..1. + float2 uv = (float2)pixel / float2(uniforms.width, uniforms.height); + uv = uv * 2.0f - 1.0f; + + constant Camera & camera = uniforms.camera; + + // Rays start at the camera position. + ray.origin = camera.position; + + // Map normalized pixel coordinates into camera's coordinate system. + ray.direction = normalize(uv.x * camera.right + + uv.y * camera.up + + camera.forward); + + // Don't limit intersection distance. + ray.max_distance = INFINITY; + + // Start with a fully white color. The kernel scales the light each time the + // ray bounces off of a surface, based on how much of each light component + // the surface absorbs. + float3 color = float3(1.0f, 1.0f, 1.0f); + + float3 accumulatedColor = float3(0.0f, 0.0f, 0.0f); + + // Create an intersector to test for intersection between the ray and the geometry in the scene. + intersector i; + + // If the sample isn't using intersection functions, provide some hints to Metal for + // better performance. + if (!useIntersectionFunctions) { + i.assume_geometry_type(geometry_type::triangle); + i.force_opacity(forced_opacity::opaque); + } + + typename intersector::result_type intersection; + + // Simulate up to three ray bounces. Each bounce propagates light backward along the + // ray's path toward the camera. + for (int bounce = 0; bounce < 3; bounce++) { + // Get the closest intersection, not the first intersection. This is the default, but + // the sample adjusts this property below when it casts shadow rays. + i.accept_any_intersection(false); + + // Check for intersection between the ray and the acceleration structure. If the sample + // isn't using intersection functions, it doesn't need to include one. + if (useIntersectionFunctions) + intersection = i.intersect(ray, accelerationStructure, bounce == 0 ? RAY_MASK_PRIMARY : RAY_MASK_SECONDARY, intersectionFunctionTable); + else + intersection = i.intersect(ray, accelerationStructure, bounce == 0 ? RAY_MASK_PRIMARY : RAY_MASK_SECONDARY); + + // Stop if the ray didn't hit anything and has bounced out of the scene. + if (intersection.type == intersection_type::none) + break; + + unsigned int instanceIndex = intersection.instance_id; + + // Look up the mask for this instance, which indicates what type of geometry the ray hit. + unsigned int mask = instances[instanceIndex].mask; + + // If the ray hit a light source, set the color to white, and stop immediately. + if (mask == GEOMETRY_MASK_LIGHT) { + accumulatedColor = float3(1.0f, 1.0f, 1.0f); + break; + } + + // The ray hit something. Look up the transformation matrix for this instance. + float4x4 objectToWorldSpaceTransform(1.0f); + + for (int column = 0; column < 4; column++) + for (int row = 0; row < 3; row++) + objectToWorldSpaceTransform[column][row] = instances[instanceIndex].transformationMatrix[column][row]; + + // Compute the intersection point in world space. + float3 worldSpaceIntersectionPoint = ray.origin + ray.direction * intersection.distance; + + unsigned primitiveIndex = intersection.primitive_id; + unsigned int geometryIndex = instances[instanceIndex].accelerationStructureIndex; + float2 barycentric_coords = intersection.triangle_barycentric_coord; + + float3 worldSpaceSurfaceNormal = 0.0f; + float3 surfaceColor = 0.0f; + + if (mask & GEOMETRY_MASK_TRIANGLE) { + Triangle triangle; + + float3 objectSpaceSurfaceNormal; + if (usePerPrimitiveData) { + // Per-primitive data points to data from the specified buffer as was configured in the MTLAccelerationStructureTriangleGeometryDescriptor. + triangle = *(const device Triangle*)intersection.primitive_data; + } else + { + // The ray hit a triangle. Look up the corresponding geometry's normal and UV buffers. + device TriangleResources & triangleResources = *(device TriangleResources *)((device char *)resources + resourcesStride * geometryIndex); + + triangle.normals[0] = triangleResources.vertexNormals[triangleResources.indices[primitiveIndex * 3 + 0]]; + triangle.normals[1] = triangleResources.vertexNormals[triangleResources.indices[primitiveIndex * 3 + 1]]; + triangle.normals[2] = triangleResources.vertexNormals[triangleResources.indices[primitiveIndex * 3 + 2]]; + + triangle.colors[0] = triangleResources.vertexColors[triangleResources.indices[primitiveIndex * 3 + 0]]; + triangle.colors[1] = triangleResources.vertexColors[triangleResources.indices[primitiveIndex * 3 + 1]]; + triangle.colors[2] = triangleResources.vertexColors[triangleResources.indices[primitiveIndex * 3 + 2]]; + } + + // Interpolate the vertex normal at the intersection point. + objectSpaceSurfaceNormal = interpolateVertexAttribute(triangle.normals, barycentric_coords); + + // Interpolate the vertex color at the intersection point. + surfaceColor = interpolateVertexAttribute(triangle.colors, barycentric_coords); + + // Transform the normal from object to world space. + worldSpaceSurfaceNormal = normalize(transformDirection(objectSpaceSurfaceNormal, objectToWorldSpaceTransform)); + } + else if (mask & GEOMETRY_MASK_SPHERE) { + Sphere sphere; + if (usePerPrimitiveData) { + // Per-primitive data points to data from the specified buffer as was configured in the MTLAccelerationStructureBoundingBoxGeometryDescriptor. + sphere = *(const device Sphere*)intersection.primitive_data; + } else + { + // The ray hit a sphere. Look up the corresponding sphere buffer. + device SphereResources & sphereResources = *(device SphereResources *)((device char *)resources + resourcesStride * geometryIndex); + sphere = sphereResources.spheres[primitiveIndex]; + } + + // Transform the sphere's origin from object space to world space. + float3 worldSpaceOrigin = transformPoint(sphere.origin, objectToWorldSpaceTransform); + + // Compute the surface normal directly in world space. + worldSpaceSurfaceNormal = normalize(worldSpaceIntersectionPoint - worldSpaceOrigin); + + // The sphere is a uniform color, so you don't need to interpolate the color across the surface. + surfaceColor = sphere.color; + } + + dstTex.write(float4(accumulatedColor, 1.0f), tid); + + // Choose a random light source to sample. + float lightSample = halton(offset + uniforms.frameIndex, 2 + bounce * 5 + 0); + unsigned int lightIndex = min((unsigned int)(lightSample * uniforms.lightCount), uniforms.lightCount - 1); + + // Choose a random point to sample on the light source. + float2 r = float2(halton(offset + uniforms.frameIndex, 2 + bounce * 5 + 1), + halton(offset + uniforms.frameIndex, 2 + bounce * 5 + 2)); + + float3 worldSpaceLightDirection; + float3 lightColor; + float lightDistance; + + // Sample the lighting between the intersection point and the point on the area light. + sampleAreaLight(areaLights[lightIndex], r, worldSpaceIntersectionPoint, worldSpaceLightDirection, + lightColor, lightDistance); + + // Scale the light color by the cosine of the angle between the light direction and + // surface normal. + lightColor *= saturate(dot(worldSpaceSurfaceNormal, worldSpaceLightDirection)); + + // Scale the light color by the number of lights to compensate for the fact that + // the sample samples only one light source at random. + lightColor *= uniforms.lightCount; + + // Scale the ray color by the color of the surface to simulate the surface absorbing light. + color *= surfaceColor; + + // Compute the shadow ray. The shadow ray checks whether the sample position on the + // light source is visible from the current intersection point. + // If it is, the kernel adds lighting to the output image. + struct ray shadowRay; + + // Add a small offset to the intersection point to avoid intersecting the same + // triangle again. + shadowRay.origin = worldSpaceIntersectionPoint + worldSpaceSurfaceNormal * 1e-3f; + + // Travel toward the light source. + shadowRay.direction = worldSpaceLightDirection; + + // Don't overshoot the light source. + shadowRay.max_distance = lightDistance - 1e-3f; + + // Shadow rays check only whether there is an object between the intersection point + // and the light source. Tell Metal to return after finding any intersection. + i.accept_any_intersection(true); + + if (useIntersectionFunctions) + intersection = i.intersect(shadowRay, accelerationStructure, RAY_MASK_SHADOW, intersectionFunctionTable); + else + intersection = i.intersect(shadowRay, accelerationStructure, RAY_MASK_SHADOW); + + // If there was no intersection, then the light source is visible from the original + // intersection point. Add the light's contribution to the image. + if (intersection.type == intersection_type::none) + accumulatedColor += lightColor * color; + + // Choose a random direction to continue the path of the ray. This causes light to + // bounce between surfaces. An app might evaluate a more complicated equation to + // calculate the amount of light that reflects between intersection points. However, + // all the math in this kernel cancels out because this app assumes a simple diffuse + // BRDF and samples the rays with a cosine distribution over the hemisphere (importance + // sampling). This requires that the kernel only multiply the colors together. This + // sampling strategy also reduces the amount of noise in the output image. + r = float2(halton(offset + uniforms.frameIndex, 2 + bounce * 5 + 3), + halton(offset + uniforms.frameIndex, 2 + bounce * 5 + 4)); + + float3 worldSpaceSampleDirection = sampleCosineWeightedHemisphere(r); + worldSpaceSampleDirection = alignHemisphereWithNormal(worldSpaceSampleDirection, worldSpaceSurfaceNormal); + + ray.origin = worldSpaceIntersectionPoint + worldSpaceSurfaceNormal * 1e-3f; + ray.direction = worldSpaceSampleDirection; + } + + // Average this frame's sample with all of the previous frames. + if (uniforms.frameIndex > 0) { + float3 prevColor = prevTex.read(tid).xyz; + prevColor *= uniforms.frameIndex; + + accumulatedColor += prevColor; + accumulatedColor /= (uniforms.frameIndex + 1); + } + + dstTex.write(float4(accumulatedColor, 1.0f), tid); +} + +// Screen filling quad in normalized device coordinates. +constant float2 quadVertices[] = { + float2(-1, -1), + float2(-1, 1), + float2( 1, 1), + float2(-1, -1), + float2( 1, 1), + float2( 1, -1) +}; + +struct CopyVertexOut { + float4 position [[position]]; + float2 uv; +}; + +// Simple vertex shader that passes through NDC quad positions. +vertex CopyVertexOut copyVertex(unsigned short vid [[vertex_id]]) { + float2 position = quadVertices[vid]; + + CopyVertexOut out; + + out.position = float4(position, 0, 1); + out.uv = position * 0.5f + 0.5f; + + return out; +} + +// Simple fragment shader that copies a texture and applies a simple tonemapping function. +fragment float4 copyFragment(CopyVertexOut in [[stage_in]], + texture2d tex) +{ + constexpr sampler sam(min_filter::nearest, mag_filter::nearest, mip_filter::none); + + float3 color = tex.sample(sam, in.uv).xyz; + + // Apply a simple tonemapping function to reduce the dynamic range of the + // input image into a range which the screen can display. + color = color / (1.0f + color); + + return float4(color, 1.0f); +} diff --git a/third_party/rust/metal/examples/raytracing/shaders.metallib b/third_party/rust/metal/examples/raytracing/shaders.metallib new file mode 100644 index 000000000000..0965a64ff59c Binary files /dev/null and b/third_party/rust/metal/examples/raytracing/shaders.metallib differ diff --git a/third_party/rust/metal/examples/reflection/main.rs b/third_party/rust/metal/examples/reflection/main.rs index efe9fc2994e0..058199cc35f7 100644 --- a/third_party/rust/metal/examples/reflection/main.rs +++ b/third_party/rust/metal/examples/reflection/main.rs @@ -8,36 +8,36 @@ use metal::*; use objc::rc::autoreleasepool; -const PROGRAM: &'static str = " - #include \n\ +const PROGRAM: &'static str = r" + #include - using namespace metal;\n\ + using namespace metal; - typedef struct {\n\ - float2 position;\n\ - float3 color;\n\ - } vertex_t;\n\ + typedef struct { + float2 position; + float3 color; + } vertex_t; - struct ColorInOut {\n\ - float4 position [[position]];\n\ - float4 color;\n\ - };\n\ + struct ColorInOut { + float4 position [[position]]; + float4 color; + }; - vertex ColorInOut vs(device vertex_t* vertex_array [[ buffer(0) ]],\n\ - unsigned int vid [[ vertex_id ]])\n\ - {\n\ - ColorInOut out;\n\ + vertex ColorInOut vs(device vertex_t* vertex_array [[ buffer(0) ]], + unsigned int vid [[ vertex_id ]]) + { + ColorInOut out; - out.position = float4(float2(vertex_array[vid].position), 0.0, 1.0);\n\ - out.color = float4(float3(vertex_array[vid].color), 1.0);\n\ + out.position = float4(float2(vertex_array[vid].position), 0.0, 1.0); + out.color = float4(float3(vertex_array[vid].color), 1.0); - return out;\n\ - }\n\ + return out; + } - fragment float4 ps(ColorInOut in [[stage_in]])\n\ - {\n\ - return in.color;\n\ - };\n\ + fragment float4 ps(ColorInOut in [[stage_in]]) + { + return in.color; + }; "; fn main() { diff --git a/third_party/rust/metal/examples/shader-dylib/main.rs b/third_party/rust/metal/examples/shader-dylib/main.rs index b713e20e0672..60b6aa0d0c90 100644 --- a/third_party/rust/metal/examples/shader-dylib/main.rs +++ b/third_party/rust/metal/examples/shader-dylib/main.rs @@ -13,7 +13,7 @@ use winit::{ use std::mem; struct App { - pub device: Device, + pub _device: Device, pub command_queue: CommandQueue, pub layer: MetalLayer, pub image_fill_cps: ComputePipelineState, @@ -98,7 +98,7 @@ impl App { .unwrap(); Self { - device, + _device: device, command_queue, layer, image_fill_cps, diff --git a/third_party/rust/metal/src/accelerator_structure.rs b/third_party/rust/metal/src/accelerator_structure.rs new file mode 100644 index 000000000000..219a69e9efc7 --- /dev/null +++ b/third_party/rust/metal/src/accelerator_structure.rs @@ -0,0 +1,348 @@ +// Copyright 2023 GFX developers +// +// Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be +// copied, modified, or distributed except according to those terms. + +use super::*; + +bitflags! { + #[derive(Default)] + pub struct MTLAccelerationStructureInstanceOptions: u32 { + const None = 0; + const DisableTriangleCulling = (1 << 0); + const TriangleFrontFacingWindingCounterClockwise = (1 << 1); + const Opaque = (1 << 2); + const NonOpaque = (1 << 3); + } +} + +/// See +#[repr(u64)] +#[allow(non_camel_case_types)] +#[derive(Copy, Clone, Debug, Hash, PartialEq, Eq)] +pub enum MTLAccelerationStructureInstanceDescriptorType { + Default = 0, + UserID = 1, + Motion = 2, +} + +#[derive(Clone, Copy, PartialEq, Debug, Default)] +#[repr(C)] +pub struct MTLAccelerationStructureInstanceDescriptor { + pub transformation_matrix: [[f32; 3]; 4], + pub options: MTLAccelerationStructureInstanceOptions, + pub mask: u32, + pub intersection_function_table_offset: u32, + pub acceleration_structure_index: u32, +} + +#[derive(Clone, Copy, PartialEq, Debug, Default)] +#[repr(C)] +pub struct MTLAccelerationStructureUserIDInstanceDescriptor { + pub transformation_matrix: [[f32; 3]; 4], + pub options: MTLAccelerationStructureInstanceOptions, + pub mask: u32, + pub intersection_function_table_offset: u32, + pub acceleration_structure_index: u32, + pub user_id: u32, +} + +pub enum MTLAccelerationStructureDescriptor {} + +foreign_obj_type! { + type CType = MTLAccelerationStructureDescriptor; + pub struct AccelerationStructureDescriptor; + type ParentType = NsObject; +} + +pub enum MTLPrimitiveAccelerationStructureDescriptor {} + +foreign_obj_type! { + type CType = MTLPrimitiveAccelerationStructureDescriptor; + pub struct PrimitiveAccelerationStructureDescriptor; + type ParentType = AccelerationStructureDescriptor; +} + +impl PrimitiveAccelerationStructureDescriptor { + pub fn descriptor() -> Self { + unsafe { + let class = class!(MTLPrimitiveAccelerationStructureDescriptor); + msg_send![class, descriptor] + } + } +} + +impl PrimitiveAccelerationStructureDescriptorRef { + pub fn set_geometry_descriptors( + &self, + descriptors: &ArrayRef, + ) { + unsafe { msg_send![self, setGeometryDescriptors: descriptors] } + } +} + +pub enum MTLAccelerationStructure {} + +foreign_obj_type! { + type CType = MTLAccelerationStructure; + pub struct AccelerationStructure; + type ParentType = Resource; +} + +pub enum MTLAccelerationStructureGeometryDescriptor {} + +foreign_obj_type! { + type CType = MTLAccelerationStructureGeometryDescriptor; + pub struct AccelerationStructureGeometryDescriptor; + type ParentType = NsObject; +} + +impl AccelerationStructureGeometryDescriptorRef { + pub fn set_opaque(&self, opaque: bool) { + unsafe { msg_send![self, setOpaque: opaque] } + } + pub fn set_primitive_data_buffer(&self, buffer: Option<&BufferRef>) { + unsafe { msg_send![self, setPrimitiveDataBuffer: buffer] } + } + + pub fn set_primitive_data_stride(&self, stride: NSUInteger) { + unsafe { msg_send![self, setPrimitiveDataStride: stride] } + } + + pub fn set_primitive_data_element_size(&self, size: NSUInteger) { + unsafe { msg_send![self, setPrimitiveDataElementSize: size] } + } + + pub fn set_intersection_function_table_offset(&self, offset: NSUInteger) { + unsafe { msg_send![self, setIntersectionFunctionTableOffset: offset] } + } +} + +pub enum MTLAccelerationStructureTriangleGeometryDescriptor {} + +foreign_obj_type! { + type CType = MTLAccelerationStructureTriangleGeometryDescriptor; + pub struct AccelerationStructureTriangleGeometryDescriptor; + type ParentType = AccelerationStructureGeometryDescriptor; +} + +impl AccelerationStructureTriangleGeometryDescriptor { + pub fn descriptor() -> Self { + unsafe { + let class = class!(MTLAccelerationStructureTriangleGeometryDescriptor); + msg_send![class, descriptor] + } + } +} + +impl AccelerationStructureTriangleGeometryDescriptorRef { + pub fn set_index_buffer(&self, buffer: Option<&BufferRef>) { + unsafe { msg_send![self, setIndexBuffer: buffer] } + } + + pub fn set_index_buffer_offset(&self, offset: NSUInteger) { + unsafe { msg_send![self, setIndexBufferOffset: offset] } + } + + pub fn set_index_type(&self, t: MTLIndexType) { + unsafe { msg_send![self, setIndexType: t] } + } + + pub fn set_vertex_buffer(&self, buffer: Option<&BufferRef>) { + unsafe { msg_send![self, setVertexBuffer: buffer] } + } + + pub fn set_vertex_buffer_offset(&self, offset: NSUInteger) { + unsafe { msg_send![self, setVertexBufferOffset: offset] } + } + + pub fn set_vertex_stride(&self, stride: NSUInteger) { + unsafe { msg_send![self, setVertexStride: stride] } + } + + pub fn set_triangle_count(&self, count: NSUInteger) { + unsafe { msg_send![self, setTriangleCount: count] } + } + + pub fn set_vertex_format(&self, format: MTLAttributeFormat) { + unsafe { msg_send![self, setVertexFormat: format] } + } + + pub fn set_transformation_matrix_buffer(&self, buffer: Option<&BufferRef>) { + unsafe { msg_send![self, setTransformationMatrixBuffer: buffer] } + } + + pub fn set_transformation_matrix_buffer_offset(&self, offset: NSUInteger) { + unsafe { msg_send![self, setTransformationMatrixBufferOffset: offset] } + } +} + +pub enum MTLAccelerationStructureBoundingBoxGeometryDescriptor {} + +foreign_obj_type! { + type CType = MTLAccelerationStructureBoundingBoxGeometryDescriptor; + pub struct AccelerationStructureBoundingBoxGeometryDescriptor; + type ParentType = AccelerationStructureGeometryDescriptor; +} + +impl AccelerationStructureBoundingBoxGeometryDescriptor { + pub fn descriptor() -> Self { + unsafe { + let class = class!(MTLAccelerationStructureBoundingBoxGeometryDescriptor); + msg_send![class, descriptor] + } + } +} + +impl AccelerationStructureBoundingBoxGeometryDescriptorRef { + pub fn set_bounding_box_buffer(&self, buffer: Option<&BufferRef>) { + unsafe { msg_send![self, setBoundingBoxBuffer: buffer] } + } + + pub fn set_bounding_box_count(&self, count: NSUInteger) { + unsafe { msg_send![self, setBoundingBoxCount: count] } + } +} + +pub enum MTLInstanceAccelerationStructureDescriptor {} + +foreign_obj_type! { + type CType = MTLInstanceAccelerationStructureDescriptor; + pub struct InstanceAccelerationStructureDescriptor; + type ParentType = AccelerationStructureDescriptor; +} + +impl InstanceAccelerationStructureDescriptor { + pub fn descriptor() -> Self { + unsafe { + let class = class!(MTLInstanceAccelerationStructureDescriptor); + msg_send![class, descriptor] + } + } +} + +impl InstanceAccelerationStructureDescriptorRef { + pub fn set_instance_descriptor_type(&self, ty: MTLAccelerationStructureInstanceDescriptorType) { + unsafe { msg_send![self, setInstanceDescriptorType: ty] } + } + + pub fn set_instanced_acceleration_structures( + &self, + instances: &ArrayRef, + ) { + unsafe { msg_send![self, setInstancedAccelerationStructures: instances] } + } + + pub fn set_instance_count(&self, count: NSUInteger) { + unsafe { msg_send![self, setInstanceCount: count] } + } + + pub fn set_instance_descriptor_buffer(&self, buffer: &BufferRef) { + unsafe { msg_send![self, setInstanceDescriptorBuffer: buffer] } + } + + pub fn set_instance_descriptor_buffer_offset(&self, offset: NSUInteger) { + unsafe { msg_send![self, setInstanceDescriptorBufferOffset: offset] } + } + + pub fn set_instance_descriptor_stride(&self, stride: NSUInteger) { + unsafe { msg_send![self, setInstanceDescriptorStride: stride] } + } +} + +pub enum MTLAccelerationStructureCommandEncoder {} + +foreign_obj_type! { + type CType = MTLAccelerationStructureCommandEncoder; + pub struct AccelerationStructureCommandEncoder; + type ParentType = CommandEncoder; +} + +impl AccelerationStructureCommandEncoderRef { + pub fn build_acceleration_structure( + &self, + acceleration_structure: &self::AccelerationStructureRef, + descriptor: &self::AccelerationStructureDescriptorRef, + scratch_buffer: &BufferRef, + scratch_buffer_offset: NSUInteger, + ) { + unsafe { + msg_send![ + self, + buildAccelerationStructure: acceleration_structure + descriptor: descriptor + scratchBuffer: scratch_buffer + scratchBufferOffset: scratch_buffer_offset] + } + } + + pub fn write_compacted_acceleration_structure_size( + &self, + acceleration_structure: &AccelerationStructureRef, + to_buffer: &BufferRef, + offset: NSUInteger, + ) { + unsafe { + msg_send![ + self, + writeCompactedAccelerationStructureSize: acceleration_structure + toBuffer: to_buffer + offset: offset + ] + } + } + + pub fn copy_and_compact_acceleration_structure( + &self, + source: &AccelerationStructureRef, + destination: &AccelerationStructureRef, + ) { + unsafe { + msg_send![ + self, + copyAndCompactAccelerationStructure: source + toAccelerationStructure: destination + ] + } + } +} + +pub enum MTLIntersectionFunctionTableDescriptor {} + +foreign_obj_type! { + type CType = MTLIntersectionFunctionTableDescriptor; + pub struct IntersectionFunctionTableDescriptor; + type ParentType = NsObject; +} + +impl IntersectionFunctionTableDescriptor { + pub fn new() -> Self { + unsafe { + let class = class!(MTLIntersectionFunctionTableDescriptor); + let this: *mut ::CType = msg_send![class, alloc]; + msg_send![this, init] + } + } +} + +impl IntersectionFunctionTableDescriptorRef { + pub fn set_function_count(&self, count: NSUInteger) { + unsafe { msg_send![self, setFunctionCount: count] } + } +} + +pub enum MTLIntersectionFunctionTable {} + +foreign_obj_type! { + type CType = MTLIntersectionFunctionTable; + pub struct IntersectionFunctionTable; + type ParentType = Resource; +} + +impl IntersectionFunctionTableRef { + pub fn set_function(&self, function: &FunctionHandleRef, index: NSUInteger) { + unsafe { msg_send![self, setFunction: function atIndex: index] } + } +} diff --git a/third_party/rust/metal/src/argument.rs b/third_party/rust/metal/src/argument.rs index 1bf556597742..25f7fb20915b 100644 --- a/third_party/rust/metal/src/argument.rs +++ b/third_party/rust/metal/src/argument.rs @@ -8,6 +8,7 @@ use super::{MTLTextureType, NSUInteger}; use objc::runtime::{NO, YES}; +/// See #[repr(u64)] #[allow(non_camel_case_types)] #[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)] @@ -107,7 +108,11 @@ pub enum MTLDataType { RGB9E5Float = 77, } +/// See #[repr(u64)] +#[deprecated( + note = "Since: iOS 8.0–16.0, iPadOS 8.0–16.0, macOS 10.11–13.0, Mac Catalyst 13.1–16.0, tvOS 9.0–16.0" +)] #[allow(non_camel_case_types)] #[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)] pub enum MTLArgumentType { @@ -119,6 +124,7 @@ pub enum MTLArgumentType { Imageblock = 17, } +/// See #[repr(u64)] #[allow(non_camel_case_types)] #[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)] @@ -128,12 +134,12 @@ pub enum MTLArgumentAccess { WriteOnly = 2, } +/// See pub enum MTLStructMember {} foreign_obj_type! { type CType = MTLStructMember; pub struct StructMember; - pub struct StructMemberRef; } impl StructMemberRef { @@ -166,7 +172,6 @@ pub enum MTLStructMemberArray {} foreign_obj_type! { type CType = MTLStructMemberArray; pub struct StructMemberArray; - pub struct StructMemberArrayRef; } impl StructMemberArrayRef { @@ -179,12 +184,12 @@ impl StructMemberArrayRef { } } +/// See pub enum MTLStructType {} foreign_obj_type! { type CType = MTLStructType; pub struct StructType; - pub struct StructTypeRef; } impl StructTypeRef { @@ -199,12 +204,12 @@ impl StructTypeRef { } } +/// See pub enum MTLArrayType {} foreign_obj_type! { type CType = MTLArrayType; pub struct ArrayType; - pub struct ArrayTypeRef; } impl ArrayTypeRef { @@ -229,12 +234,15 @@ impl ArrayTypeRef { } } +/// +#[deprecated( + note = "Since iOS 8.0–16.0, iPadOS 8.0–16.0, macOS 10.11–13.0, Mac Catalyst 13.1–16.0, tvOS 9.0–16.0" +)] pub enum MTLArgument {} foreign_obj_type! { type CType = MTLArgument; pub struct Argument; - pub struct ArgumentRef; } impl ArgumentRef { @@ -300,12 +308,12 @@ impl ArgumentRef { } } +/// See pub enum MTLArgumentDescriptor {} foreign_obj_type! { type CType = MTLArgumentDescriptor; pub struct ArgumentDescriptor; - pub struct ArgumentDescriptorRef; } impl ArgumentDescriptor { diff --git a/third_party/rust/metal/src/buffer.rs b/third_party/rust/metal/src/buffer.rs index e334c54e501f..8f3108af96dc 100644 --- a/third_party/rust/metal/src/buffer.rs +++ b/third_party/rust/metal/src/buffer.rs @@ -7,13 +7,13 @@ use super::*; +/// See pub enum MTLBuffer {} foreign_obj_type! { type CType = MTLBuffer; pub struct Buffer; - pub struct BufferRef; - type ParentType = ResourceRef; + type ParentType = Resource; } impl BufferRef { @@ -64,4 +64,8 @@ impl BufferRef { pub fn remove_all_debug_markers(&self) { unsafe { msg_send![self, removeAllDebugMarkers] } } + + pub fn gpu_address(&self) -> u64 { + unsafe { msg_send![self, gpuAddress] } + } } diff --git a/third_party/rust/metal/src/capturedescriptor.rs b/third_party/rust/metal/src/capturedescriptor.rs index 6f6e369f1a71..1e620cc9a0e8 100644 --- a/third_party/rust/metal/src/capturedescriptor.rs +++ b/third_party/rust/metal/src/capturedescriptor.rs @@ -9,7 +9,7 @@ use super::*; use std::path::Path; -/// https://developer.apple.com/documentation/metal/mtlcapturedestination?language=objc +/// See #[repr(u64)] #[allow(non_camel_case_types)] #[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)] @@ -18,13 +18,12 @@ pub enum MTLCaptureDestination { GpuTraceDocument = 2, } -/// https://developer.apple.com/documentation/metal/mtlcapturedescriptor +/// See pub enum MTLCaptureDescriptor {} foreign_obj_type! { type CType = MTLCaptureDescriptor; pub struct CaptureDescriptor; - pub struct CaptureDescriptorRef; } impl CaptureDescriptor { @@ -37,42 +36,40 @@ impl CaptureDescriptor { } impl CaptureDescriptorRef { - /// https://developer.apple.com/documentation/metal/mtlcapturedescriptor/3237248-captureobject + /// See pub fn set_capture_device(&self, device: &DeviceRef) { unsafe { msg_send![self, setCaptureObject: device] } } - /// https://developer.apple.com/documentation/metal/mtlcapturedescriptor/3237248-captureobject + /// See pub fn set_capture_scope(&self, scope: &CaptureScopeRef) { unsafe { msg_send![self, setCaptureObject: scope] } } - /// https://developer.apple.com/documentation/metal/mtlcapturedescriptor/3237248-captureobject + /// See pub fn set_capture_command_queue(&self, command_queue: &CommandQueueRef) { unsafe { msg_send![self, setCaptureObject: command_queue] } } - /// https://developer.apple.com/documentation/metal/mtlcapturedescriptor/3237250-outputurl + /// See pub fn output_url(&self) -> &Path { - let output_url = unsafe { msg_send![self, outputURL] }; - let output_url = nsstring_as_str(output_url); - - Path::new(output_url) + let url: &URLRef = unsafe { msg_send![self, outputURL] }; + Path::new(url.path()) } - /// https://developer.apple.com/documentation/metal/mtlcapturedescriptor/3237250-outputurl + /// See pub fn set_output_url>(&self, output_url: P) { - let output_url = nsstring_from_str(output_url.as_ref().to_str().unwrap()); - + let output_url_string = String::from("file://") + output_url.as_ref().to_str().unwrap(); + let output_url = URL::new_with_string(&output_url_string); unsafe { msg_send![self, setOutputURL: output_url] } } - /// https://developer.apple.com/documentation/metal/mtlcapturedescriptor?language=objc + /// See pub fn destination(&self) -> MTLCaptureDestination { unsafe { msg_send![self, destination] } } - /// https://developer.apple.com/documentation/metal/mtlcapturedescriptor?language=objc + /// See pub fn set_destination(&self, destination: MTLCaptureDestination) { unsafe { msg_send![self, setDestination: destination] } } diff --git a/third_party/rust/metal/src/capturemanager.rs b/third_party/rust/metal/src/capturemanager.rs index 95180c4239cb..7c9c407cb152 100644 --- a/third_party/rust/metal/src/capturemanager.rs +++ b/third_party/rust/metal/src/capturemanager.rs @@ -8,12 +8,12 @@ use super::*; use std::ffi::CStr; +/// See pub enum MTLCaptureScope {} foreign_obj_type! { type CType = MTLCaptureScope; pub struct CaptureScope; - pub struct CaptureScopeRef; } impl CaptureScopeRef { @@ -33,12 +33,12 @@ impl CaptureScopeRef { } } +/// See pub enum MTLCaptureManager {} foreign_obj_type! { type CType = MTLCaptureManager; pub struct CaptureManager; - pub struct CaptureManagerRef; } impl CaptureManager { @@ -70,13 +70,19 @@ impl CaptureManagerRef { unsafe { msg_send![self, setDefaultCaptureScope: scope] } } - /// https://developer.apple.com/documentation/metal/mtlcapturemanager/3237259-startcapture + /// Starts capturing with the capture session defined by a descriptor object. + /// + /// This function will panic if Metal capture is not enabled. Capture can be enabled by + /// either: + /// 1. Running from Xcode + /// 2. Setting the environment variable `METAL_CAPTURE_ENABLED=1` + /// 3. Adding an info.plist file containing the `MetalCaptureEnabled` key set to `YES` pub fn start_capture(&self, descriptor: &CaptureDescriptorRef) -> Result<(), String> { unsafe { - try_objc! { err => + Ok(try_objc! { err => msg_send![self, startCaptureWithDescriptor: descriptor error: &mut err] - } + }) } } @@ -100,7 +106,7 @@ impl CaptureManagerRef { unsafe { msg_send![self, isCapturing] } } - /// https://developer.apple.com/documentation/metal/mtlcapturemanager/3237260-supportsdestination?language=objc + /// See pub fn supports_destination(&self, destination: MTLCaptureDestination) -> bool { unsafe { msg_send![self, supportsDestination: destination] } } diff --git a/third_party/rust/metal/src/commandbuffer.rs b/third_party/rust/metal/src/commandbuffer.rs index 32a1d8fb9474..ec808f80b687 100644 --- a/third_party/rust/metal/src/commandbuffer.rs +++ b/third_party/rust/metal/src/commandbuffer.rs @@ -9,6 +9,7 @@ use super::*; use block::Block; +/// See #[repr(u32)] #[allow(non_camel_case_types)] #[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)] @@ -21,6 +22,7 @@ pub enum MTLCommandBufferStatus { Error = 5, } +/// See #[repr(u32)] #[allow(non_camel_case_types)] #[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)] @@ -37,6 +39,7 @@ pub enum MTLCommandBufferError { DeviceRemoved = 11, } +/// See #[repr(u32)] #[allow(non_camel_case_types)] #[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)] @@ -47,12 +50,12 @@ pub enum MTLDispatchType { type CommandBufferHandler<'a> = Block<(&'a CommandBufferRef,), ()>; +/// See . pub enum MTLCommandBuffer {} foreign_obj_type! { type CType = MTLCommandBuffer; pub struct CommandBuffer; - pub struct CommandBufferRef; } impl CommandBufferRef { @@ -98,6 +101,10 @@ impl CommandBufferRef { unsafe { msg_send![self, addCompletedHandler: block] } } + pub fn add_scheduled_handler(&self, block: &CommandBufferHandler) { + unsafe { msg_send![self, addScheduledHandler: block] } + } + pub fn new_blit_command_encoder(&self) -> &BlitCommandEncoderRef { unsafe { msg_send![self, blitCommandEncoder] } } @@ -120,6 +127,12 @@ impl CommandBufferRef { unsafe { msg_send![self, parallelRenderCommandEncoderWithDescriptor: descriptor] } } + pub fn new_acceleration_structure_command_encoder( + &self, + ) -> &AccelerationStructureCommandEncoderRef { + unsafe { msg_send![self, accelerationStructureCommandEncoder] } + } + pub fn compute_command_encoder_with_dispatch_type( &self, ty: MTLDispatchType, @@ -127,6 +140,13 @@ impl CommandBufferRef { unsafe { msg_send![self, computeCommandEncoderWithDispatchType: ty] } } + pub fn compute_command_encoder_with_descriptor( + &self, + descriptor: &ComputePassDescriptorRef, + ) -> &ComputeCommandEncoderRef { + unsafe { msg_send![self, computeCommandEncoderWithDescriptor: descriptor] } + } + pub fn encode_signal_event(&self, event: &EventRef, new_value: u64) { unsafe { msg_send![self, diff --git a/third_party/rust/metal/src/commandqueue.rs b/third_party/rust/metal/src/commandqueue.rs index 470d8dbe93b9..7c2dda29c949 100644 --- a/third_party/rust/metal/src/commandqueue.rs +++ b/third_party/rust/metal/src/commandqueue.rs @@ -7,12 +7,12 @@ use super::*; +/// See . pub enum MTLCommandQueue {} foreign_obj_type! { type CType = MTLCommandQueue; pub struct CommandQueue; - pub struct CommandQueueRef; } impl CommandQueueRef { diff --git a/third_party/rust/metal/src/computepass.rs b/third_party/rust/metal/src/computepass.rs new file mode 100644 index 000000000000..50774d6871e0 --- /dev/null +++ b/third_party/rust/metal/src/computepass.rs @@ -0,0 +1,103 @@ +use super::*; + +/// See +pub enum MTLComputePassDescriptor {} + +foreign_obj_type! { + type CType = MTLComputePassDescriptor; + pub struct ComputePassDescriptor; +} + +impl ComputePassDescriptor { + /// Creates a default compute pass descriptor with no attachments. + pub fn new<'a>() -> &'a ComputePassDescriptorRef { + unsafe { msg_send![class!(MTLComputePassDescriptor), computePassDescriptor] } + } +} + +impl ComputePassDescriptorRef { + pub fn sample_buffer_attachments( + &self, + ) -> &ComputePassSampleBufferAttachmentDescriptorArrayRef { + unsafe { msg_send![self, sampleBufferAttachments] } + } +} + +/// See +pub enum MTLComputePassSampleBufferAttachmentDescriptorArray {} + +foreign_obj_type! { + type CType = MTLComputePassSampleBufferAttachmentDescriptorArray; + pub struct ComputePassSampleBufferAttachmentDescriptorArray; +} + +impl ComputePassSampleBufferAttachmentDescriptorArrayRef { + pub fn object_at( + &self, + index: NSUInteger, + ) -> Option<&ComputePassSampleBufferAttachmentDescriptorRef> { + unsafe { msg_send![self, objectAtIndexedSubscript: index] } + } + + pub fn set_object_at( + &self, + index: NSUInteger, + attachment: Option<&ComputePassSampleBufferAttachmentDescriptorRef>, + ) { + unsafe { + msg_send![self, setObject:attachment + atIndexedSubscript:index] + } + } +} + +/// See +pub enum MTLComputePassSampleBufferAttachmentDescriptor {} + +foreign_obj_type! { + type CType = MTLComputePassSampleBufferAttachmentDescriptor; + pub struct ComputePassSampleBufferAttachmentDescriptor; +} + +impl ComputePassSampleBufferAttachmentDescriptor { + pub fn new() -> Self { + let class = class!(MTLComputePassSampleBufferAttachmentDescriptor); + unsafe { msg_send![class, new] } + } +} + +impl ComputePassSampleBufferAttachmentDescriptorRef { + pub fn sample_buffer(&self) -> &CounterSampleBufferRef { + unsafe { msg_send![self, sampleBuffer] } + } + + pub fn set_sample_buffer(&self, sample_buffer: &CounterSampleBufferRef) { + unsafe { msg_send![self, setSampleBuffer: sample_buffer] } + } + + pub fn start_of_encoder_sample_index(&self) -> NSUInteger { + unsafe { msg_send![self, startOfEncoderSampleIndex] } + } + + pub fn set_start_of_encoder_sample_index(&self, start_of_encoder_sample_index: NSUInteger) { + unsafe { + msg_send![ + self, + setStartOfEncoderSampleIndex: start_of_encoder_sample_index + ] + } + } + + pub fn end_of_encoder_sample_index(&self) -> NSUInteger { + unsafe { msg_send![self, endOfEncoderSampleIndex] } + } + + pub fn set_end_of_encoder_sample_index(&self, end_of_encoder_sample_index: NSUInteger) { + unsafe { + msg_send![ + self, + setEndOfEncoderSampleIndex: end_of_encoder_sample_index + ] + } + } +} diff --git a/third_party/rust/metal/src/constants.rs b/third_party/rust/metal/src/constants.rs index 0afbdd7e2920..4e0e7a95953d 100644 --- a/third_party/rust/metal/src/constants.rs +++ b/third_party/rust/metal/src/constants.rs @@ -5,6 +5,7 @@ // http://opensource.org/licenses/MIT>, at your option. This file may not be // copied, modified, or distributed except according to those terms. +/// See #[repr(u64)] #[allow(non_camel_case_types)] #[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] diff --git a/third_party/rust/metal/src/counters.rs b/third_party/rust/metal/src/counters.rs new file mode 100644 index 000000000000..0a39752d2d61 --- /dev/null +++ b/third_party/rust/metal/src/counters.rs @@ -0,0 +1,102 @@ +use crate::MTLStorageMode; + +/// See +pub enum MTLCounterSampleBufferDescriptor {} + +foreign_obj_type! { + type CType = MTLCounterSampleBufferDescriptor; + pub struct CounterSampleBufferDescriptor; +} + +impl CounterSampleBufferDescriptor { + pub fn new() -> Self { + let class = class!(MTLCounterSampleBufferDescriptor); + unsafe { msg_send![class, new] } + } +} + +impl CounterSampleBufferDescriptorRef { + pub fn counter_set(&self) -> &CounterSetRef { + unsafe { msg_send![self, counterSet] } + } + + pub fn set_counter_set(&self, counter_set: &CounterSetRef) { + unsafe { msg_send![self, setCounterSet: counter_set] } + } + + pub fn label(&self) -> &str { + unsafe { + let label = msg_send![self, label]; + crate::nsstring_as_str(label) + } + } + + pub fn set_label(&self, label: &str) { + unsafe { + let nslabel = crate::nsstring_from_str(label); + let () = msg_send![self, setLabel: nslabel]; + } + } + + pub fn sample_count(&self) -> u64 { + unsafe { msg_send![self, sampleCount] } + } + + pub fn set_sample_count(&self, sample_count: u64) { + unsafe { msg_send![self, setSampleCount: sample_count] } + } + + pub fn storage_mode(&self) -> MTLStorageMode { + unsafe { msg_send![self, storageMode] } + } + + pub fn set_storage_mode(&self, storage_mode: MTLStorageMode) { + unsafe { msg_send![self, setStorageMode: storage_mode] } + } +} + +/// See +pub enum MTLCounterSampleBuffer {} + +foreign_obj_type! { + type CType = MTLCounterSampleBuffer; + pub struct CounterSampleBuffer; +} + +/// See +pub enum MTLCounter {} + +foreign_obj_type! { + type CType = MTLCounter; + pub struct Counter; +} + +impl CounterRef {} + +/// See +pub enum MTLCounterSet {} + +foreign_obj_type! { + type CType = MTLCounterSet; + pub struct CounterSet; +} + +impl CounterSetRef { + pub fn name(&self) -> &str { + unsafe { + let name = msg_send![self, name]; + crate::nsstring_as_str(name) + } + } +} + +/// See +pub enum MTLCommonCounterSet {} + +/// See +pub enum MTLCommonCounter {} + +foreign_obj_type! { + type CType = MTLCommonCounter; + pub struct CommonCounter; +} diff --git a/third_party/rust/metal/src/depthstencil.rs b/third_party/rust/metal/src/depthstencil.rs index 5b578e6d4618..36588a9d6f5c 100644 --- a/third_party/rust/metal/src/depthstencil.rs +++ b/third_party/rust/metal/src/depthstencil.rs @@ -8,6 +8,7 @@ use crate::DeviceRef; use objc::runtime::{NO, YES}; +/// See #[repr(u64)] #[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)] pub enum MTLCompareFunction { @@ -21,6 +22,7 @@ pub enum MTLCompareFunction { Always = 7, } +/// See #[repr(u64)] #[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)] pub enum MTLStencilOperation { @@ -34,12 +36,12 @@ pub enum MTLStencilOperation { DecrementWrap = 7, } +/// See pub enum MTLStencilDescriptor {} foreign_obj_type! { type CType = MTLStencilDescriptor; pub struct StencilDescriptor; - pub struct StencilDescriptorRef; } impl StencilDescriptor { @@ -101,12 +103,12 @@ impl StencilDescriptorRef { } } +/// See pub enum MTLDepthStencilDescriptor {} foreign_obj_type! { type CType = MTLDepthStencilDescriptor; pub struct DepthStencilDescriptor; - pub struct DepthStencilDescriptorRef; } impl DepthStencilDescriptor { @@ -172,12 +174,12 @@ impl DepthStencilDescriptorRef { } } +/// See pub enum MTLDepthStencilState {} foreign_obj_type! { type CType = MTLDepthStencilState; pub struct DepthStencilState; - pub struct DepthStencilStateRef; } impl DepthStencilStateRef { diff --git a/third_party/rust/metal/src/device.rs b/third_party/rust/metal/src/device.rs index 1cb0a40783dc..96f1e61433f1 100644 --- a/third_party/rust/metal/src/device.rs +++ b/third_party/rust/metal/src/device.rs @@ -13,8 +13,13 @@ use objc::runtime::{Object, NO, YES}; use std::{ffi::CStr, os::raw::c_char, path::Path, ptr}; -// Available on macOS 10.11+, iOS 8.0+, tvOS 9.0+ +/// Available on macOS 10.11+, iOS 8.0+, tvOS 9.0+ +/// +/// See #[allow(non_camel_case_types)] +#[deprecated( + note = "Since iOS 8.0–16.0 iPadOS 8.0–16.0 macOS 10.11–13.0 Mac Catalyst 13.1–16.0 tvOS 9.0–16.0" +)] #[repr(u64)] #[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)] pub enum MTLFeatureSet { @@ -52,7 +57,9 @@ pub enum MTLFeatureSet { macOS_GPUFamily2_v1 = 10005, } -// Available on macOS 10.15+, iOS 13.0+ +/// Available on macOS 10.15+, iOS 13.0+ +/// +/// See #[repr(i64)] #[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)] #[non_exhaustive] @@ -73,8 +80,10 @@ pub enum MTLGPUFamily { Mac2 = 2002, MacCatalyst1 = 4001, MacCatalyst2 = 4002, + Metal3 = 5001, } +/// See #[repr(u64)] #[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)] pub enum MTLDeviceLocation { @@ -1380,6 +1389,7 @@ impl MTLFeatureSet { } } +/// See #[repr(u64)] #[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)] pub enum MTLArgumentBuffersTier { @@ -1387,6 +1397,7 @@ pub enum MTLArgumentBuffersTier { Tier2 = 1, } +/// See #[repr(u64)] #[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)] pub enum MTLReadWriteTextureTier { @@ -1396,6 +1407,8 @@ pub enum MTLReadWriteTextureTier { } /// Only available on (macos(11.0), ios(14.0)) +/// +/// See #[repr(u64)] #[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)] pub enum MTLCounterSamplingPoint { @@ -1407,6 +1420,9 @@ pub enum MTLCounterSamplingPoint { } /// Only available on (macos(11.0), macCatalyst(14.0), ios(13.0)) +/// Kinda a long name! +/// +/// See #[repr(u64)] #[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)] pub enum MTLSparseTextureRegionAlignmentMode { @@ -1431,6 +1447,7 @@ bitflags! { } } +/// See #[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)] #[repr(C)] pub struct MTLAccelerationStructureSizes { @@ -1480,18 +1497,22 @@ type MTLNewRenderPipelineStateWithReflectionCompletionHandler = extern fn(render type MTLNewComputePipelineStateCompletionHandler = extern fn(computePipelineState: id, error: id); type MTLNewComputePipelineStateWithReflectionCompletionHandler = extern fn(computePipelineState: id, reflection: id, error: id);*/ +/// See pub enum MTLDevice {} foreign_obj_type! { type CType = MTLDevice; pub struct Device; - pub struct DeviceRef; } impl Device { pub fn system_default() -> Option { // `MTLCreateSystemDefaultDevice` may return null if Metal is not supported - unsafe { MTLCreateSystemDefaultDevice().as_mut().map(|x| Self(x)) } + unsafe { + MTLCreateSystemDefaultDevice() + .as_mut() + .map(|x| Self(x.into())) + } } pub fn all() -> Vec { @@ -1878,6 +1899,45 @@ impl DeviceRef { } } + /// Only available on (macos(13.0), ios(16.0)) + pub fn new_mesh_render_pipeline_state_with_reflection( + &self, + descriptor: &MeshRenderPipelineDescriptorRef, + reflection_options: MTLPipelineOption, + ) -> Result<(RenderPipelineState, RenderPipelineReflection), String> { + unsafe { + let mut reflection: *mut Object = ptr::null_mut(); + let pipeline_state: *mut MTLRenderPipelineState = try_objc! { err => + msg_send![self, newRenderPipelineStateWithMeshDescriptor:descriptor + options:reflection_options + reflection:&mut reflection + error:&mut err] + }; + + let state = RenderPipelineState::from_ptr(pipeline_state); + + let () = msg_send![reflection, retain]; + let reflection = RenderPipelineReflection::from_ptr(reflection as _); + + Ok((state, reflection)) + } + } + + /// Only available on (macos(13.0), ios(16.0)) + pub fn new_mesh_render_pipeline_state( + &self, + descriptor: &MeshRenderPipelineDescriptorRef, + ) -> Result { + unsafe { + let pipeline_state: *mut MTLRenderPipelineState = try_objc! { err => + msg_send![self, newRenderPipelineStateWithMeshDescriptor:descriptor + error:&mut err] + }; + + Ok(RenderPipelineState::from_ptr(pipeline_state)) + } + } + pub fn new_compute_pipeline_state_with_function( &self, function: &FunctionRef, @@ -1966,6 +2026,20 @@ impl DeviceRef { } } + pub fn new_counter_sample_buffer_with_descriptor( + &self, + descriptor: &CounterSampleBufferDescriptorRef, + ) -> Result { + unsafe { + let counter_sample_buffer: *mut MTLCounterSampleBuffer = try_objc! { err => + msg_send![self, newCounterSampleBufferWithDescriptor: descriptor error:&mut err] + }; + + assert!(!counter_sample_buffer.is_null()); + Ok(CounterSampleBuffer::from_ptr(counter_sample_buffer)) + } + } + pub fn new_texture(&self, descriptor: &TextureDescriptorRef) -> Texture { unsafe { msg_send![self, newTextureWithDescriptor: descriptor] } } @@ -2114,4 +2188,37 @@ impl DeviceRef { pub fn max_buffer_length(&self) -> NSUInteger { unsafe { msg_send![self, maxBufferLength] } } + + pub fn acceleration_structure_sizes_with_descriptor( + &self, + desc: &AccelerationStructureDescriptorRef, + ) -> MTLAccelerationStructureSizes { + unsafe { msg_send![self, accelerationStructureSizesWithDescriptor: desc] } + } + + pub fn new_acceleration_structure_with_size( + &self, + size: NSUInteger, + ) -> accelerator_structure::AccelerationStructure { + unsafe { msg_send![self, newAccelerationStructureWithSize: size] } + } + + pub fn sample_timestamps(&self, cpu_timestamp: &mut u64, gpu_timestamp: &mut u64) { + unsafe { msg_send![self, sampleTimestamps: cpu_timestamp gpuTimestamp: gpu_timestamp] } + } + + pub fn counter_sets(&self) -> Vec { + unsafe { + let counter_sets: *mut Object = msg_send![self, counterSets]; + let count: NSUInteger = msg_send![counter_sets, count]; + let ret = (0..count) + .map(|i| { + let csp: *mut MTLCounterSet = msg_send![counter_sets, objectAtIndex: i]; + let () = msg_send![csp, retain]; + CounterSet::from_ptr(csp) + }) + .collect(); + ret + } + } } diff --git a/third_party/rust/metal/src/drawable.rs b/third_party/rust/metal/src/drawable.rs index a1ab789b7f0e..5c0358498501 100644 --- a/third_party/rust/metal/src/drawable.rs +++ b/third_party/rust/metal/src/drawable.rs @@ -7,12 +7,12 @@ use super::NSUInteger; +/// See pub enum MTLDrawable {} foreign_obj_type! { type CType = MTLDrawable; pub struct Drawable; - pub struct DrawableRef; } impl DrawableRef { diff --git a/third_party/rust/metal/src/encoder.rs b/third_party/rust/metal/src/encoder.rs index 36c44c28bafd..b55bf3073ed0 100644 --- a/third_party/rust/metal/src/encoder.rs +++ b/third_party/rust/metal/src/encoder.rs @@ -9,6 +9,7 @@ use super::*; use std::ops::Range; +/// See #[repr(u64)] #[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)] pub enum MTLPrimitiveType { @@ -19,6 +20,7 @@ pub enum MTLPrimitiveType { TriangleStrip = 4, } +/// See #[repr(u64)] #[allow(non_camel_case_types)] #[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)] @@ -27,6 +29,7 @@ pub enum MTLIndexType { UInt32 = 1, } +/// See #[repr(u64)] #[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)] pub enum MTLVisibilityResultMode { @@ -35,6 +38,7 @@ pub enum MTLVisibilityResultMode { Counting = 2, } +/// See #[repr(u64)] #[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)] pub enum MTLCullMode { @@ -43,6 +47,7 @@ pub enum MTLCullMode { Back = 2, } +/// See #[repr(u64)] #[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)] pub enum MTLWinding { @@ -50,6 +55,7 @@ pub enum MTLWinding { CounterClockwise = 1, } +/// See #[repr(u64)] #[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)] pub enum MTLDepthClipMode { @@ -57,6 +63,7 @@ pub enum MTLDepthClipMode { Clamp = 1, } +/// See #[repr(u64)] #[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)] pub enum MTLTriangleFillMode { @@ -79,6 +86,7 @@ bitflags! { } } +/// See #[repr(C)] #[derive(Copy, Clone, Debug)] pub struct MTLScissorRect { @@ -88,6 +96,7 @@ pub struct MTLScissorRect { pub height: NSUInteger, } +/// See #[repr(C)] #[derive(Copy, Clone, Debug)] pub struct MTLViewport { @@ -99,6 +108,7 @@ pub struct MTLViewport { pub zfar: f64, } +/// See #[repr(C)] #[derive(Copy, Clone, Debug)] pub struct MTLDrawPrimitivesIndirectArguments { @@ -108,6 +118,7 @@ pub struct MTLDrawPrimitivesIndirectArguments { pub baseInstance: u32, } +/// See #[repr(C)] #[derive(Copy, Clone, Debug)] pub struct MTLDrawIndexedPrimitivesIndirectArguments { @@ -118,6 +129,7 @@ pub struct MTLDrawIndexedPrimitivesIndirectArguments { pub baseInstance: u32, } +/// See #[repr(C)] #[derive(Copy, Clone, Debug)] pub struct VertexAmplificationViewMapping { @@ -125,12 +137,15 @@ pub struct VertexAmplificationViewMapping { pub viewportArrayIndexOffset: u32, } +#[allow(dead_code)] +type MTLVertexAmplicationViewMapping = VertexAmplificationViewMapping; + +/// See pub enum MTLCommandEncoder {} foreign_obj_type! { type CType = MTLCommandEncoder; pub struct CommandEncoder; - pub struct CommandEncoderRef; } impl CommandEncoderRef { @@ -171,13 +186,13 @@ impl CommandEncoderRef { } } +/// See pub enum MTLParallelRenderCommandEncoder {} foreign_obj_type! { type CType = MTLParallelRenderCommandEncoder; pub struct ParallelRenderCommandEncoder; - pub struct ParallelRenderCommandEncoderRef; - type ParentType = CommandEncoderRef; + type ParentType = CommandEncoder; } impl ParallelRenderCommandEncoderRef { @@ -186,13 +201,13 @@ impl ParallelRenderCommandEncoderRef { } } +/// See pub enum MTLRenderCommandEncoder {} foreign_obj_type! { type CType = MTLRenderCommandEncoder; pub struct RenderCommandEncoder; - pub struct RenderCommandEncoderRef; - type ParentType = CommandEncoderRef; + type ParentType = CommandEncoder; } impl RenderCommandEncoderRef { @@ -395,6 +410,308 @@ impl RenderCommandEncoderRef { } } + /// Only available in (macos(12.0), ios(15.0)) + pub fn set_vertex_acceleration_structure( + &self, + index: NSUInteger, + accel: Option<&accelerator_structure::AccelerationStructureRef>, + ) { + unsafe { + msg_send![ + self, + setVertexAccelerationStructure: accel + atBufferIndex: index + ] + } + } + + /// Only available in (macos(12.0), ios(15.0)) + pub fn set_vertex_intersection_function_table( + &self, + index: NSUInteger, + table: Option<&IntersectionFunctionTableRef>, + ) { + unsafe { + msg_send![ + self, + setVertexIntersectionFunctionTable: table + atBufferIndex: index + ] + } + } + + // Specifying Resources for a Object Shader Function + + /// Only available in (macos(13.0), ios(16.0)) + pub fn set_object_buffer( + &self, + index: NSUInteger, + buffer: Option<&BufferRef>, + offset: NSUInteger, + ) { + unsafe { + msg_send![self, + setObjectBuffer:buffer + offset:offset + atIndex:index + ] + } + } + + /// Only available in (macos(13.0), ios(16.0)) + pub fn set_object_buffer_offset(&self, index: NSUInteger, offset: NSUInteger) { + unsafe { + msg_send![self, + setObjectBufferOffset:offset + atIndex:index + ] + } + } + + /// Only available in (macos(13.0), ios(16.0)) + pub fn set_object_bytes( + &self, + index: NSUInteger, + length: NSUInteger, + bytes: *const std::ffi::c_void, + ) { + unsafe { + msg_send![self, + setObjectBytes:bytes + length:length + atIndex:index + ] + } + } + + /// Only available in (macos(13.0), ios(16.0)) + pub fn set_object_sampler_state(&self, index: NSUInteger, sampler: Option<&SamplerStateRef>) { + unsafe { + msg_send![self, + setObjectSamplerState:sampler + atIndex:index + ] + } + } + + /// Only available in (macos(13.0), ios(16.0)) + pub fn set_object_sampler_state_with_lod( + &self, + index: NSUInteger, + sampler: Option<&SamplerStateRef>, + lod_clamp: Range, + ) { + unsafe { + msg_send![self, + setObjectSamplerState:sampler + lodMinClamp:lod_clamp.start + lodMaxClamp:lod_clamp.end + atIndex:index + ] + } + } + + /// Only available in (macos(13.0), ios(16.0)) + pub fn set_object_texture(&self, index: NSUInteger, texture: Option<&TextureRef>) { + unsafe { + msg_send![self, + setObjectTexture:texture + atIndex:index + ] + } + } + + /// Only available in (macos(13.0), ios(16.0)) + pub fn set_object_threadgroup_memory_length(&self, index: NSUInteger, length: NSUInteger) { + unsafe { + msg_send![self, + setObjectThreadgroupMemoryLength: length + atIndex: index + ] + } + } + + /// Only available in (macos(13.0), ios(16.0)) + pub fn set_object_buffers( + &self, + start_index: NSUInteger, + data: &[Option<&BufferRef>], + offsets: &[NSUInteger], + ) { + debug_assert_eq!(offsets.len(), data.len()); + unsafe { + msg_send![self, + setObjectBuffers: data.as_ptr() + offsets: offsets.as_ptr() + withRange: NSRange { + location: start_index, + length: data.len() as _, + } + ] + } + } + + /// Only available in (macos(13.0), ios(16.0)) + pub fn set_object_sampler_states( + &self, + start_index: NSUInteger, + data: &[Option<&SamplerStateRef>], + ) { + unsafe { + msg_send![self, + setObjectSamplerStates: data.as_ptr() + withRange: NSRange { + location: start_index, + length: data.len() as _, + } + ] + } + } + + /// Only available in (macos(13.0), ios(16.0)) + pub fn set_object_textures(&self, start_index: NSUInteger, data: &[Option<&TextureRef>]) { + unsafe { + msg_send![self, + setObjectTextures: data.as_ptr() + withRange: NSRange { + location: start_index, + length: data.len() as _, + } + ] + } + } + + // Specifying Resources for a Mesh Shader + + /// Only available in (macos(13.0), ios(16.0)) + pub fn set_mesh_buffer( + &self, + index: NSUInteger, + buffer: Option<&BufferRef>, + offset: NSUInteger, + ) { + unsafe { + msg_send![self, + setMeshBuffer:buffer + offset:offset + atIndex:index + ] + } + } + + /// Only available in (macos(13.0), ios(16.0)) + pub fn set_mesh_buffer_offset(&self, index: NSUInteger, offset: NSUInteger) { + unsafe { + msg_send![self, + setMeshBufferOffset:offset + atIndex:index + ] + } + } + + /// Only available in (macos(13.0), ios(16.0)) + pub fn set_mesh_bytes( + &self, + index: NSUInteger, + length: NSUInteger, + bytes: *const std::ffi::c_void, + ) { + unsafe { + msg_send![self, + setMeshBytes:bytes + length:length + atIndex:index + ] + } + } + + /// Only available in (macos(13.0), ios(16.0)) + pub fn set_mesh_sampler_state(&self, index: NSUInteger, sampler: Option<&SamplerStateRef>) { + unsafe { + msg_send![self, + setMeshSamplerState:sampler + atIndex:index + ] + } + } + + /// Only available in (macos(13.0), ios(16.0)) + pub fn set_mesh_sampler_state_with_lod( + &self, + index: NSUInteger, + sampler: Option<&SamplerStateRef>, + lod_clamp: Range, + ) { + unsafe { + msg_send![self, + setMeshSamplerState:sampler + lodMinClamp:lod_clamp.start + lodMaxClamp:lod_clamp.end + atIndex:index + ] + } + } + + /// Only available in (macos(13.0), ios(16.0)) + pub fn set_mesh_texture(&self, index: NSUInteger, texture: Option<&TextureRef>) { + unsafe { + msg_send![self, + setMeshTexture:texture + atIndex:index + ] + } + } + + /// Only available in (macos(13.0), ios(16.0)) + pub fn set_mesh_buffers( + &self, + start_index: NSUInteger, + data: &[Option<&BufferRef>], + offsets: &[NSUInteger], + ) { + debug_assert_eq!(offsets.len(), data.len()); + unsafe { + msg_send![self, + setMeshBuffers: data.as_ptr() + offsets: offsets.as_ptr() + withRange: NSRange { + location: start_index, + length: data.len() as _, + } + ] + } + } + + /// Only available in (macos(13.0), ios(16.0)) + pub fn set_mesh_sampler_states( + &self, + start_index: NSUInteger, + data: &[Option<&SamplerStateRef>], + ) { + unsafe { + msg_send![self, + setMeshSamplerStates: data.as_ptr() + withRange: NSRange { + location: start_index, + length: data.len() as _, + } + ] + } + } + + /// Only available in (macos(13.0), ios(16.0)) + pub fn set_mesh_textures(&self, start_index: NSUInteger, data: &[Option<&TextureRef>]) { + unsafe { + msg_send![self, + setMeshTextures: data.as_ptr() + withRange: NSRange { + location: start_index, + length: data.len() as _, + } + ] + } + } + // Specifying Resources for a Fragment Shader Function pub fn set_fragment_bytes( @@ -515,6 +832,36 @@ impl RenderCommandEncoderRef { } } + /// Only available in (macos(12.0), ios(15.0)) + pub fn set_fragment_acceleration_structure( + &self, + index: NSUInteger, + accel: Option<&accelerator_structure::AccelerationStructureRef>, + ) { + unsafe { + msg_send![ + self, + setFragmentAccelerationStructure: accel + atBufferIndex: index + ] + } + } + + /// Only available in (macos(12.0), ios(15.0)) + pub fn set_fragment_intersection_function_table( + &self, + index: NSUInteger, + table: Option<&IntersectionFunctionTableRef>, + ) { + unsafe { + msg_send![ + self, + setFragmentIntersectionFunctionTable: table + atBufferIndex: index + ] + } + } + // Drawing Geometric Primitives pub fn draw_primitives( @@ -674,6 +1021,56 @@ impl RenderCommandEncoderRef { // fn setVertexBuffers_offsets_withRange(self, buffers: *const id, offsets: *const NSUInteger, range: NSRange); // fn setVertexSamplerStates_lodMinClamps_lodMaxClamps_withRange(self, samplers: *const id, lodMinClamps: *const f32, lodMaxClamps: *const f32, range: NSRange); + /// Only available in (macos(13.0), ios(16.0)) + pub fn draw_mesh_threadgroups( + &self, + threadgroups_per_grid: MTLSize, + threads_per_object_threadgroup: MTLSize, + threads_per_mesh_threadgroup: MTLSize, + ) { + unsafe { + msg_send![self, + drawMeshThreadgroups: threadgroups_per_grid + threadsPerObjectThreadgroup: threads_per_object_threadgroup + threadsPerMeshThreadgroup: threads_per_mesh_threadgroup + ] + } + } + + /// Only available in (macos(13.0), ios(16.0)) + pub fn draw_mesh_threadgroups_with_indirect_buffer( + &self, + indirect_buffer: &BufferRef, + indirect_buffer_offset: NSUInteger, + threads_per_object_threadgroup: MTLSize, + threads_per_mesh_threadgroup: MTLSize, + ) { + unsafe { + msg_send![self, + drawMeshThreadgroupsWithIndirectBuffer: indirect_buffer + indirectBufferOffset: indirect_buffer_offset + threadsPerObjectThreadgroup: threads_per_object_threadgroup + threadsPerMeshThreadgroup: threads_per_mesh_threadgroup + ] + } + } + + /// Only available in (macos(13.0), ios(16.0)) + pub fn draw_mesh_threads( + &self, + threads_per_grid: MTLSize, + threads_per_object_threadgroup: MTLSize, + threads_per_mesh_threadgroup: MTLSize, + ) { + unsafe { + msg_send![self, + drawMeshThreads: threads_per_grid + threadsPerObjectThreadgroup: threads_per_object_threadgroup + threadsPerMeshThreadgroup: threads_per_mesh_threadgroup + ] + } + } + /// Adds an untracked resource to the render pass. /// /// Availability: iOS 11.0+, macOS 10.13+ @@ -819,13 +1216,13 @@ impl RenderCommandEncoderRef { } } +/// See pub enum MTLBlitCommandEncoder {} foreign_obj_type! { type CType = MTLBlitCommandEncoder; pub struct BlitCommandEncoder; - pub struct BlitCommandEncoderRef; - type ParentType = CommandEncoderRef; + type ParentType = CommandEncoder; } impl BlitCommandEncoderRef { @@ -922,7 +1319,7 @@ impl BlitCommandEncoderRef { } } - /// https://developer.apple.com/documentation/metal/mtlblitcommandencoder/1400756-copy + /// See pub fn copy_from_texture_to_buffer( &self, source_texture: &TextureRef, @@ -997,15 +1394,49 @@ impl BlitCommandEncoderRef { pub fn wait_for_fence(&self, fence: &FenceRef) { unsafe { msg_send![self, waitForFence: fence] } } + + /// See: + pub fn sample_counters_in_buffer( + &self, + sample_buffer: &CounterSampleBufferRef, + sample_index: NSUInteger, + with_barrier: bool, + ) { + unsafe { + msg_send![self, + sampleCountersInBuffer: sample_buffer + atSampleIndex: sample_index + withBarrier: with_barrier + ] + } + } + + /// See: + pub fn resolve_counters( + &self, + sample_buffer: &CounterSampleBufferRef, + range: crate::NSRange, + destination_buffer: &BufferRef, + destination_offset: NSUInteger, + ) { + unsafe { + msg_send![self, + resolveCounters: sample_buffer + inRange: range + destinationBuffer: destination_buffer + destinationOffset: destination_offset + ] + } + } } +/// See pub enum MTLComputeCommandEncoder {} foreign_obj_type! { type CType = MTLComputeCommandEncoder; pub struct ComputeCommandEncoder; - pub struct ComputeCommandEncoderRef; - type ParentType = CommandEncoderRef; + type ParentType = CommandEncoder; } impl ComputeCommandEncoderRef { @@ -1150,6 +1581,22 @@ impl ComputeCommandEncoderRef { } } + /// Encodes a barrier so that changes to a set of resources made by commands encoded before the + /// barrier are completed before further commands are executed. + /// + /// Availability: iOS 12.0+, macOS 10.14+ + /// + /// # Arguments + /// * `resources`: A slice of resources. + pub fn memory_barrier_with_resources(&self, resources: &[&ResourceRef]) { + unsafe { + msg_send![self, + memoryBarrierWithResources: resources.as_ptr() + count: resources.len() as NSUInteger + ] + } + } + /// Specifies that a resource in an argument buffer can be safely used by a compute pass. /// /// Availability: iOS 11.0+, macOS 10.13+ @@ -1220,14 +1667,60 @@ impl ComputeCommandEncoderRef { pub fn wait_for_fence(&self, fence: &FenceRef) { unsafe { msg_send![self, waitForFence: fence] } } + + /// Only available in (macos(11.0), ios(14.0)) + pub fn set_acceleration_structure( + &self, + index: NSUInteger, + accel: Option<&accelerator_structure::AccelerationStructureRef>, + ) { + unsafe { + msg_send![ + self, + setAccelerationStructure: accel + atBufferIndex: index + ] + } + } + + /// Only available in (macos(11.0), ios(14.0)) + pub fn set_intersection_function_table( + &self, + index: NSUInteger, + table: Option<&IntersectionFunctionTableRef>, + ) { + unsafe { + msg_send![ + self, + setIntersectionFunctionTable: table + atBufferIndex: index + ] + } + } + + /// See: + pub fn sample_counters_in_buffer( + &self, + sample_buffer: &CounterSampleBufferRef, + sample_index: NSUInteger, + with_barrier: bool, + ) { + unsafe { + msg_send![self, + sampleCountersInBuffer: sample_buffer + atSampleIndex: sample_index + withBarrier: with_barrier + ] + } + } } +/// See pub enum MTLArgumentEncoder {} foreign_obj_type! { type CType = MTLArgumentEncoder; pub struct ArgumentEncoder; - pub struct ArgumentEncoderRef; } impl ArgumentEncoderRef { diff --git a/third_party/rust/metal/src/heap.rs b/third_party/rust/metal/src/heap.rs index 9d60142a7bfc..d2c11bbcf63e 100644 --- a/third_party/rust/metal/src/heap.rs +++ b/third_party/rust/metal/src/heap.rs @@ -8,6 +8,8 @@ use super::*; /// Only available on macos(10.15), ios(13.0) +/// +/// See #[repr(u64)] #[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)] pub enum MTLHeapType { @@ -17,12 +19,12 @@ pub enum MTLHeapType { Sparse = 2, } +/// See pub enum MTLHeap {} foreign_obj_type! { type CType = MTLHeap; pub struct Heap; - pub struct HeapRef; } impl HeapRef { @@ -148,12 +150,12 @@ impl HeapRef { } } +/// See pub enum MTLHeapDescriptor {} foreign_obj_type! { type CType = MTLHeapDescriptor; pub struct HeapDescriptor; - pub struct HeapDescriptorRef; } impl HeapDescriptor { diff --git a/third_party/rust/metal/src/indirect_encoder.rs b/third_party/rust/metal/src/indirect_encoder.rs index 88ac41d37727..dddaabf6b88a 100644 --- a/third_party/rust/metal/src/indirect_encoder.rs +++ b/third_party/rust/metal/src/indirect_encoder.rs @@ -1,6 +1,7 @@ use super::*; bitflags! { + /// See #[allow(non_upper_case_globals)] pub struct MTLIndirectCommandType: NSUInteger { const Draw = 1 << 0; @@ -12,12 +13,12 @@ bitflags! { } } +/// See pub enum MTLIndirectCommandBufferDescriptor {} foreign_obj_type! { type CType = MTLIndirectCommandBufferDescriptor; pub struct IndirectCommandBufferDescriptor; - pub struct IndirectCommandBufferDescriptorRef; } impl IndirectCommandBufferDescriptorRef { @@ -82,13 +83,13 @@ impl IndirectCommandBufferDescriptorRef { } } +/// See pub enum MTLIndirectCommandBuffer {} foreign_obj_type! { type CType = MTLIndirectCommandBuffer; pub struct IndirectCommandBuffer; - pub struct IndirectCommandBufferRef; - type ParentType = ResourceRef; + type ParentType = Resource; } impl IndirectCommandBufferRef { @@ -112,12 +113,12 @@ impl IndirectCommandBufferRef { } } +/// See pub enum MTLIndirectRenderCommand {} foreign_obj_type! { type CType = MTLIndirectRenderCommand; pub struct IndirectRenderCommand; - pub struct IndirectRenderCommandRef; } impl IndirectRenderCommandRef { @@ -266,12 +267,12 @@ impl IndirectRenderCommandRef { } } +/// See pub enum MTLIndirectComputeCommand {} foreign_obj_type! { type CType = MTLIndirectComputeCommand; pub struct IndirectComputeCommand; - pub struct IndirectComputeCommandRef; } impl IndirectComputeCommandRef { diff --git a/third_party/rust/metal/src/lib.rs b/third_party/rust/metal/src/lib.rs index bc8427922cd0..308b0e138181 100644 --- a/third_party/rust/metal/src/lib.rs +++ b/third_party/rust/metal/src/lib.rs @@ -1,21 +1,24 @@ -// Copyright 2017 GFX developers +// Copyright 2023 GFX developers // // Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be // copied, modified, or distributed except according to those terms. +#![allow(deprecated)] #![allow(non_snake_case)] #![allow(non_upper_case_globals)] #[macro_use] -extern crate bitflags; +pub extern crate bitflags; #[macro_use] -extern crate log; +pub extern crate log; #[macro_use] -extern crate objc; +pub extern crate objc; #[macro_use] -extern crate foreign_types; +pub extern crate foreign_types; +#[macro_use] +pub extern crate paste; use std::{ borrow::{Borrow, ToOwned}, @@ -29,15 +32,23 @@ use core_graphics_types::{base::CGFloat, geometry::CGSize}; use foreign_types::ForeignType; use objc::runtime::{Object, NO, YES}; +/// See #[cfg(target_pointer_width = "64")] pub type NSInteger = i64; + +/// See #[cfg(not(target_pointer_width = "64"))] pub type NSInteger = i32; + +/// See #[cfg(target_pointer_width = "64")] pub type NSUInteger = u64; + +/// See #[cfg(target_pointer_width = "32")] pub type NSUInteger = u32; +/// See #[repr(C)] #[derive(Copy, Clone)] pub struct NSRange { @@ -82,45 +93,93 @@ fn nsstring_from_str(string: &str) -> *mut objc::runtime::Object { } } +/// Define a Rust wrapper for an Objective-C opaque type. +/// +/// This macro adapts the `foreign-types` crate's [`foreign_type!`] +/// macro to Objective-C, defining Rust types that represent owned and +/// borrowed forms of some underlying Objective-C type, using +/// Objective-C's reference counting to manage its lifetime. +/// +/// Given a use of the form: +/// +/// ```ignore +/// foreign_obj_type! { +/// type CType = MTLBuffer; // underlying Objective-C type +/// pub struct Buffer; // owned Rust type +/// pub struct BufferRef; // borrowed Rust type +/// type ParentType = ResourceRef; // borrowed parent class +/// } +/// ``` +/// +/// This defines the types `Buffer` and `BufferRef` as owning and +/// non-owning types, analogous to `String` and `str`, that manage +/// some underlying `*mut MTLBuffer`: +/// +/// - Both `Buffer` and `BufferRef` implement [`obj::Message`], indicating +/// that they can be sent Objective-C messages. +/// +/// - Dropping a `Buffer` sends the underlying `MTLBuffer` a `release` +/// message, and cloning a `BufferRef` sends a `retain` message and +/// returns a new `Buffer`. +/// +/// - `Buffer` dereferences to `BufferRef`. +/// +/// - `BufferRef` dereferences to its parent type `ResourceRef`. The +/// `ParentType` component is optional; if omitted, the `Ref` type +/// doesn't implement `Deref` or `DerefMut`. +/// +/// - Both `Buffer` and `BufferRef` implement `std::fmt::Debug`, +/// sending an Objective-C `debugDescription` message to the +/// underlying `MTLBuffer`. +/// +/// Following the `foreign_types` crate's nomenclature, the `Ref` +/// suffix indicates that `BufferRef` and `ResourceRef` are non-owning +/// types, used *by reference*, like `&BufferRef` or `&ResourceRef`. +/// These types are not, themselves, references. macro_rules! foreign_obj_type { - {type CType = $raw_ident:ident; - pub struct $owned_ident:ident; - pub struct $ref_ident:ident; - type ParentType = $parent_ref:ident; + { + type CType = $raw_ident:ident; + pub struct $owned_ident:ident; + type ParentType = $parent_ident:ident; } => { foreign_obj_type! { type CType = $raw_ident; pub struct $owned_ident; - pub struct $ref_ident; } - impl ::std::ops::Deref for $ref_ident { - type Target = $parent_ref; + impl ::std::ops::Deref for paste!{[<$owned_ident Ref>]} { + type Target = paste!{[<$parent_ident Ref>]}; #[inline] - fn deref(&self) -> &$parent_ref { - unsafe { &*(self as *const $ref_ident as *const $parent_ref) } + fn deref(&self) -> &Self::Target { + unsafe { &*(self as *const Self as *const Self::Target) } + } + } + + impl ::std::convert::From<$owned_ident> for $parent_ident { + fn from(item: $owned_ident) -> Self { + unsafe { Self::from_ptr(::std::mem::transmute(item.into_ptr())) } } } }; - {type CType = $raw_ident:ident; - pub struct $owned_ident:ident; - pub struct $ref_ident:ident; + { + type CType = $raw_ident:ident; + pub struct $owned_ident:ident; } => { foreign_type! { - type CType = $raw_ident; - fn drop = crate::obj_drop; - fn clone = crate::obj_clone; - pub struct $owned_ident; - pub struct $ref_ident; + pub unsafe type $owned_ident: Sync + Send { + type CType = $raw_ident; + fn drop = crate::obj_drop; + fn clone = crate::obj_clone; + } } unsafe impl ::objc::Message for $raw_ident { } - unsafe impl ::objc::Message for $ref_ident { + unsafe impl ::objc::Message for paste!{[<$owned_ident Ref>]} { } - impl ::std::fmt::Debug for $ref_ident { + impl ::std::fmt::Debug for paste!{[<$owned_ident Ref>]} { fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result { unsafe { let string: *mut ::objc::runtime::Object = msg_send![self, debugDescription]; @@ -142,13 +201,12 @@ macro_rules! try_objc { $err_name: ident => $body:expr } => { { - let mut $err_name: *mut ::objc::runtime::Object = ::std::ptr::null_mut(); + let mut $err_name: *mut Object = ::std::ptr::null_mut(); let value = $body; if !$err_name.is_null() { let desc: *mut Object = msg_send![$err_name, localizedDescription]; let compile_error: *const std::os::raw::c_char = msg_send![desc, UTF8String]; let message = CStr::from_ptr(compile_error).to_string_lossy().into_owned(); - let () = msg_send![$err_name, release]; return Err(message); } value @@ -156,6 +214,7 @@ macro_rules! try_objc { }; } +/// See pub struct NSArray { _phantom: PhantomData, } @@ -164,6 +223,7 @@ pub struct Array(*mut NSArray) where T: ForeignType + 'static, T::Ref: objc::Message + 'static; + pub struct ArrayRef(foreign_types::Opaque, PhantomData) where T: ForeignType + 'static, @@ -197,6 +257,7 @@ where T::Ref: objc::Message + 'static, { } + unsafe impl objc::Message for ArrayRef where T: ForeignType + 'static, @@ -224,7 +285,7 @@ where } } -impl foreign_types::ForeignType for Array +unsafe impl foreign_types::ForeignType for Array where T: ForeignType + 'static, T::Ref: objc::Message + 'static, @@ -241,7 +302,7 @@ where } } -impl foreign_types::ForeignTypeRef for ArrayRef +unsafe impl foreign_types::ForeignTypeRef for ArrayRef where T: ForeignType + 'static, T::Ref: objc::Message + 'static, @@ -284,13 +345,13 @@ where } } +/// See pub enum CAMetalDrawable {} foreign_obj_type! { type CType = CAMetalDrawable; pub struct MetalDrawable; - pub struct MetalDrawableRef; - type ParentType = DrawableRef; + type ParentType = Drawable; } impl MetalDrawableRef { @@ -299,12 +360,34 @@ impl MetalDrawableRef { } } +pub enum NSObject {} + +foreign_obj_type! { + type CType = NSObject; + pub struct NsObject; +} + +impl NsObjectRef { + pub fn conforms_to_protocol(&self) -> Result { + let name = ::std::any::type_name::(); + if let Some(name) = name.split("::").last() { + if let Some(protocol) = objc::runtime::Protocol::get(name) { + Ok(unsafe { msg_send![self, conformsToProtocol: protocol] }) + } else { + Err(format!("Can not find the protocol for type: {}.", name)) + } + } else { + Err(format!("Unexpected type name: {}.", name)) + } + } +} + +// See pub enum CAMetalLayer {} foreign_obj_type! { type CType = CAMetalLayer; pub struct MetalLayer; - pub struct MetalLayerRef; } impl MetalLayer { @@ -453,13 +536,16 @@ impl MetalLayerRef { } } +mod accelerator_structure; mod argument; mod buffer; mod capturedescriptor; mod capturemanager; mod commandbuffer; mod commandqueue; +mod computepass; mod constants; +mod counters; mod depthstencil; mod device; mod drawable; @@ -468,7 +554,7 @@ mod heap; mod indirect_encoder; mod library; #[cfg(feature = "mps")] -mod mps; +pub mod mps; mod pipeline; mod renderpass; mod resource; @@ -480,8 +566,11 @@ mod vertexdescriptor; #[rustfmt::skip] pub use { + accelerator_structure::*, argument::*, buffer::*, + counters::*, + computepass::*, capturedescriptor::*, capturemanager::*, commandbuffer::*, @@ -504,9 +593,6 @@ pub use { sync::*, }; -#[cfg(feature = "mps")] -pub use mps::*; - #[inline] unsafe fn obj_drop(p: *mut T) { msg_send![(p as *mut Object), release] @@ -521,12 +607,12 @@ unsafe fn obj_clone(p: *mut T) -> *mut T { type c_size_t = usize; // TODO: expand supported interface +/// See pub enum NSURL {} foreign_obj_type! { type CType = NSURL; pub struct URL; - pub struct URLRef; } impl URL { @@ -546,4 +632,11 @@ impl URLRef { crate::nsstring_as_str(absolute_string) } } + + pub fn path(&self) -> &str { + unsafe { + let path = msg_send![self, path]; + crate::nsstring_as_str(path) + } + } } diff --git a/third_party/rust/metal/src/library.rs b/third_party/rust/metal/src/library.rs index 9a5a36b62e74..8c7757681e4b 100644 --- a/third_party/rust/metal/src/library.rs +++ b/third_party/rust/metal/src/library.rs @@ -15,6 +15,8 @@ use std::os::raw::{c_char, c_void}; use std::ptr; /// Only available on (macos(10.12), ios(10.0) +/// +/// See #[repr(u64)] #[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)] pub enum MTLPatchType { @@ -23,12 +25,12 @@ pub enum MTLPatchType { Quad = 2, } +/// See pub enum MTLVertexAttribute {} foreign_obj_type! { type CType = MTLVertexAttribute; pub struct VertexAttribute; - pub struct VertexAttributeRef; } impl VertexAttributeRef { @@ -81,12 +83,13 @@ impl VertexAttributeRef { } /// Only available on (macos(10.12), ios(10.0)) +/// +/// See pub enum MTLAttribute {} foreign_obj_type! { type CType = MTLAttribute; pub struct Attribute; - pub struct AttributeRef; } impl AttributeRef { @@ -138,6 +141,7 @@ impl AttributeRef { } } +/// See #[repr(u64)] #[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)] pub enum MTLFunctionType { @@ -151,12 +155,13 @@ pub enum MTLFunctionType { } /// Only available on (macos(10.12), ios(10.0)) +/// +/// See pub enum MTLFunctionConstant {} foreign_obj_type! { type CType = MTLFunctionConstant; pub struct FunctionConstant; - pub struct FunctionConstantRef; } impl FunctionConstantRef { @@ -188,6 +193,8 @@ impl FunctionConstantRef { bitflags! { /// Only available on (macos(11.0), ios(14.0)) + /// + /// See pub struct MTLFunctionOptions: NSUInteger { const None = 0; const CompileToBinary = 1 << 0; @@ -195,12 +202,13 @@ bitflags! { } /// Only available on (macos(11.0), ios(14.0)) +/// +/// See pub enum MTLFunctionDescriptor {} foreign_obj_type! { type CType = MTLFunctionDescriptor; pub struct FunctionDescriptor; - pub struct FunctionDescriptorRef; } impl FunctionDescriptor { @@ -259,22 +267,24 @@ impl FunctionDescriptorRef { } /// Only available on (macos(11.0), ios(14.0)) +/// +/// See pub enum MTLIntersectionFunctionDescriptor {} foreign_obj_type! { type CType = MTLIntersectionFunctionDescriptor; pub struct IntersectionFunctionDescriptor; - pub struct IntersectionFunctionDescriptorRef; - type ParentType = FunctionDescriptorRef; + type ParentType = FunctionDescriptor; } /// Only available on (macos(11.0), ios(14.0)) +/// +/// See pub enum MTLFunctionHandle {} foreign_obj_type! { type CType = MTLFunctionHandle; pub struct FunctionHandle; - pub struct FunctionHandleRef; } impl FunctionHandleRef { @@ -301,12 +311,12 @@ impl FunctionHandleRef { // MTLIntersectionFunctionTableDescriptor // MTLIntersectionFunctionTable +/// See pub enum MTLFunction {} foreign_obj_type! { type CType = MTLFunction; pub struct Function; - pub struct FunctionRef; } impl FunctionRef { @@ -378,6 +388,7 @@ impl FunctionRef { } } +/// See #[repr(u64)] #[derive(Clone, Copy, Debug, Hash, Eq, PartialEq, Ord, PartialOrd)] pub enum MTLLanguageVersion { @@ -393,12 +404,12 @@ pub enum MTLLanguageVersion { V2_4 = 0x20004, } +/// See pub enum MTLFunctionConstantValues {} foreign_obj_type! { type CType = MTLFunctionConstantValues; pub struct FunctionConstantValues; - pub struct FunctionConstantValuesRef; } impl FunctionConstantValues { @@ -438,6 +449,8 @@ impl FunctionConstantValuesRef { } /// Only available on (macos(11.0), ios(14.0)) +/// +/// See #[repr(u64)] #[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)] pub enum MTLLibraryType { @@ -445,12 +458,12 @@ pub enum MTLLibraryType { Dynamic = 1, } +/// See pub enum MTLCompileOptions {} foreign_obj_type! { type CType = MTLCompileOptions; pub struct CompileOptions; - pub struct CompileOptionsRef; } impl CompileOptions { @@ -577,6 +590,7 @@ impl CompileOptionsRef { } } +/// See #[repr(u64)] #[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)] pub enum MTLLibraryError { @@ -590,12 +604,12 @@ pub enum MTLLibraryError { FileNotFound = 6, } +/// See pub enum MTLLibrary {} foreign_obj_type! { type CType = MTLLibrary; pub struct Library; - pub struct LibraryRef; } impl LibraryRef { @@ -719,6 +733,8 @@ impl LibraryRef { } /// Only available on (macos(11.0), ios(14.0)) +/// +/// See #[repr(u64)] #[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)] pub enum MTLDynamicLibraryError { @@ -730,12 +746,12 @@ pub enum MTLDynamicLibraryError { Unsupported = 5, } +/// See pub enum MTLDynamicLibrary {} foreign_obj_type! { type CType = MTLDynamicLibrary; pub struct DynamicLibrary; - pub struct DynamicLibraryRef; } impl DynamicLibraryRef { @@ -787,12 +803,13 @@ impl DynamicLibraryRef { } /// macOS 11.0+ iOS 14.0+ +/// +/// See pub enum MTLBinaryArchiveDescriptor {} foreign_obj_type! { type CType = MTLBinaryArchiveDescriptor; pub struct BinaryArchiveDescriptor; - pub struct BinaryArchiveDescriptorRef; } impl BinaryArchiveDescriptor { @@ -814,12 +831,13 @@ impl BinaryArchiveDescriptorRef { } /// macOS 11.0+ iOS 14.0+ +/// +/// See pub enum MTLBinaryArchive {} foreign_obj_type! { type CType = MTLBinaryArchive; pub struct BinaryArchive; - pub struct BinaryArchiveRef; } impl BinaryArchiveRef { @@ -916,12 +934,13 @@ impl BinaryArchiveRef { } /// macOS 11.0+ iOS 14.0+ +/// +/// See pub enum MTLLinkedFunctions {} foreign_obj_type! { type CType = MTLLinkedFunctions; pub struct LinkedFunctions; - pub struct LinkedFunctionsRef; } impl LinkedFunctions { diff --git a/third_party/rust/metal/src/mps.rs b/third_party/rust/metal/src/mps.rs index 0d4da2eed5dd..bac00346ac5b 100644 --- a/third_party/rust/metal/src/mps.rs +++ b/third_party/rust/metal/src/mps.rs @@ -22,14 +22,15 @@ pub fn mps_supports_device(device: &DeviceRef) -> bool { b == YES } +/// See pub enum MPSKernel {} foreign_obj_type! { type CType = MPSKernel; pub struct Kernel; - pub struct KernelRef; } +/// See pub enum MPSRayDataType { OriginDirection = 0, OriginMinDistanceDirectionMaxDistance = 1, @@ -37,6 +38,7 @@ pub enum MPSRayDataType { } bitflags! { + /// See #[allow(non_upper_case_globals)] pub struct MPSRayMaskOptions: NSUInteger { /// Enable primitive masks @@ -47,6 +49,8 @@ bitflags! { } /// Options that determine the data contained in an intersection result. +/// +/// See pub enum MPSIntersectionDataType { Distance = 0, DistancePrimitiveIndex = 1, @@ -55,6 +59,7 @@ pub enum MPSIntersectionDataType { DistancePrimitiveIndexInstanceIndexCoordinates = 4, } +/// See pub enum MPSIntersectionType { /// Find the closest intersection to the ray's origin along the ray direction. /// This is potentially slower than `Any` but is well suited to primary visibility rays. @@ -64,6 +69,7 @@ pub enum MPSIntersectionType { Any = 1, } +/// See pub enum MPSRayMaskOperator { /// Accept the intersection if `(primitive mask & ray mask) != 0`. And = 0, @@ -89,6 +95,7 @@ pub enum MPSRayMaskOperator { GreaterThanOrEqualTo = 9, } +/// See pub enum MPSTriangleIntersectionTestType { /// Use the default ray/triangle intersection test Default = 0, @@ -98,6 +105,7 @@ pub enum MPSTriangleIntersectionTestType { Watertight = 1, } +/// See #[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)] pub enum MPSAccelerationStructureStatus { Unbuilt = 0, @@ -105,6 +113,7 @@ pub enum MPSAccelerationStructureStatus { } bitflags! { + /// See #[allow(non_upper_case_globals)] pub struct MPSAccelerationStructureUsage: NSUInteger { /// No usage options specified @@ -123,6 +132,7 @@ const MPSDataTypeFloatBit: isize = 0x10000000; const MPSDataTypeSignedBit: isize = 0x20000000; const MPSDataTypeNormalizedBit: isize = 0x40000000; +/// See pub enum MPSDataType { Invalid = 0, @@ -145,13 +155,14 @@ pub enum MPSDataType { } /// A kernel that performs intersection tests between rays and geometry. +/// +/// See pub enum MPSRayIntersector {} foreign_obj_type! { type CType = MPSRayIntersector; pub struct RayIntersector; - pub struct RayIntersectorRef; - type ParentType = KernelRef; + type ParentType = Kernel; } impl RayIntersector { @@ -247,13 +258,14 @@ impl RayIntersectorRef { } } -/// A group of acceleration structures which may be used together in an instance acceleration structure +/// A group of acceleration structures which may be used together in an instance acceleration structure. +/// +/// See pub enum MPSAccelerationStructureGroup {} foreign_obj_type! { type CType = MPSAccelerationStructureGroup; pub struct AccelerationStructureGroup; - pub struct AccelerationStructureGroupRef; } impl AccelerationStructureGroup { @@ -278,12 +290,13 @@ impl AccelerationStructureGroupRef { } /// The base class for data structures that are built over geometry and used to accelerate ray tracing. +/// +/// See pub enum MPSAccelerationStructure {} foreign_obj_type! { type CType = MPSAccelerationStructure; pub struct AccelerationStructure; - pub struct AccelerationStructureRef; } impl AccelerationStructureRef { @@ -312,13 +325,13 @@ impl AccelerationStructureRef { } } +/// See pub enum MPSPolygonAccelerationStructure {} foreign_obj_type! { type CType = MPSPolygonAccelerationStructure; pub struct PolygonAccelerationStructure; - pub struct PolygonAccelerationStructureRef; - type ParentType = AccelerationStructureRef; + type ParentType = AccelerationStructure; } impl PolygonAccelerationStructureRef { @@ -356,13 +369,14 @@ impl PolygonAccelerationStructureRef { } /// An acceleration structure built over triangles. +/// +/// See pub enum MPSTriangleAccelerationStructure {} foreign_obj_type! { type CType = MPSTriangleAccelerationStructure; pub struct TriangleAccelerationStructure; - pub struct TriangleAccelerationStructureRef; - type ParentType = PolygonAccelerationStructureRef; + type ParentType = PolygonAccelerationStructure; } impl TriangleAccelerationStructure { @@ -390,6 +404,7 @@ impl TriangleAccelerationStructureRef { } } +/// See #[repr(u64)] #[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)] pub enum MPSTransformType { @@ -398,13 +413,14 @@ pub enum MPSTransformType { } /// An acceleration structure built over instances of other acceleration structures +/// +/// See pub enum MPSInstanceAccelerationStructure {} foreign_obj_type! { type CType = MPSInstanceAccelerationStructure; pub struct InstanceAccelerationStructure; - pub struct InstanceAccelerationStructureRef; - type ParentType = AccelerationStructureRef; + type ParentType = AccelerationStructure; } impl InstanceAccelerationStructure { @@ -515,6 +531,8 @@ pub struct MPSPackedFloat3 { } /// Represents a 3D ray with an origin, a direction, and an intersection distance range from the origin. +/// +/// See #[repr(C)] pub struct MPSRayOriginMinDistanceDirectionMaxDistance { /// Ray origin. The intersection test will be skipped if the origin contains NaNs or infinities. @@ -533,6 +551,8 @@ pub struct MPSRayOriginMinDistanceDirectionMaxDistance { /// Intersection result which contains the distance from the ray origin to the intersection point, /// the index of the intersected primitive, and the first two barycentric coordinates of the intersection point. +/// +/// See #[repr(C)] pub struct MPSIntersectionDistancePrimitiveIndexCoordinates { /// Distance from the ray origin to the intersection point along the ray direction vector such diff --git a/third_party/rust/metal/src/pipeline/compute.rs b/third_party/rust/metal/src/pipeline/compute.rs index 138b561cb32e..366c8dc01dc7 100644 --- a/third_party/rust/metal/src/pipeline/compute.rs +++ b/third_party/rust/metal/src/pipeline/compute.rs @@ -9,6 +9,7 @@ use super::*; use objc::runtime::{NO, YES}; +/// See #[repr(u64)] #[allow(non_camel_case_types)] #[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)] @@ -67,6 +68,7 @@ pub enum MTLAttributeFormat { Half = 53, } +/// See #[repr(u64)] #[allow(non_camel_case_types)] #[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)] @@ -82,12 +84,12 @@ pub enum MTLStepFunction { ThreadPositionInGridYIndexed = 8, } +/// See pub enum MTLComputePipelineDescriptor {} foreign_obj_type! { type CType = MTLComputePipelineDescriptor; pub struct ComputePipelineDescriptor; - pub struct ComputePipelineDescriptorRef; } impl ComputePipelineDescriptor { @@ -268,12 +270,12 @@ impl ComputePipelineDescriptorRef { } } +/// See pub enum MTLComputePipelineState {} foreign_obj_type! { type CType = MTLComputePipelineState; pub struct ComputePipelineState; - pub struct ComputePipelineStateRef; } impl ComputePipelineStateRef { @@ -327,17 +329,22 @@ impl ComputePipelineStateRef { // API_AVAILABLE(macos(11.0), ios(14.0)); // TODO: newVisibleFunctionTableWithDescriptor // - (nullable id)newVisibleFunctionTableWithDescriptor:(MTLVisibleFunctionTableDescriptor * __nonnull)descriptor - // API_AVAILABLE(macos(11.0), ios(14.0)); - // TODO: newIntersectionFunctionTableWithDescriptor - // - (nullable id )newIntersectionFunctionTableWithDescriptor:(MTLIntersectionFunctionTableDescriptor * _Nonnull)descriptor + + /// Only available on (macos(11.0), ios(14.0)) + pub fn new_intersection_function_table_with_descriptor( + &self, + descriptor: &IntersectionFunctionTableDescriptorRef, + ) -> IntersectionFunctionTable { + unsafe { msg_send![self, newIntersectionFunctionTableWithDescriptor: descriptor] } + } } +/// See pub enum MTLStageInputOutputDescriptor {} foreign_obj_type! { type CType = MTLStageInputOutputDescriptor; pub struct StageInputOutputDescriptor; - pub struct StageInputOutputDescriptorRef; } impl StageInputOutputDescriptor { @@ -379,12 +386,12 @@ impl StageInputOutputDescriptorRef { } } +/// See pub enum MTLAttributeDescriptorArray {} foreign_obj_type! { type CType = MTLAttributeDescriptorArray; pub struct AttributeDescriptorArray; - pub struct AttributeDescriptorArrayRef; } impl AttributeDescriptorArrayRef { @@ -397,12 +404,12 @@ impl AttributeDescriptorArrayRef { } } +/// See pub enum MTLAttributeDescriptor {} foreign_obj_type! { type CType = MTLAttributeDescriptor; pub struct AttributeDescriptor; - pub struct AttributeDescriptorRef; } impl AttributeDescriptorRef { @@ -431,12 +438,12 @@ impl AttributeDescriptorRef { } } +/// See pub enum MTLBufferLayoutDescriptorArray {} foreign_obj_type! { type CType = MTLBufferLayoutDescriptorArray; pub struct BufferLayoutDescriptorArray; - pub struct BufferLayoutDescriptorArrayRef; } impl BufferLayoutDescriptorArrayRef { @@ -453,12 +460,12 @@ impl BufferLayoutDescriptorArrayRef { } } +/// See pub enum MTLBufferLayoutDescriptor {} foreign_obj_type! { type CType = MTLBufferLayoutDescriptor; pub struct BufferLayoutDescriptor; - pub struct BufferLayoutDescriptorRef; } impl BufferLayoutDescriptorRef { diff --git a/third_party/rust/metal/src/pipeline/mod.rs b/third_party/rust/metal/src/pipeline/mod.rs index e65d28d6ca2b..fe46bcad4385 100644 --- a/third_party/rust/metal/src/pipeline/mod.rs +++ b/third_party/rust/metal/src/pipeline/mod.rs @@ -13,6 +13,7 @@ mod render; pub use self::compute::*; pub use self::render::*; +/// See #[repr(u64)] #[allow(non_camel_case_types)] #[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)] @@ -29,12 +30,12 @@ impl Default for MTLMutability { } } +/// See pub enum MTLPipelineBufferDescriptorArray {} foreign_obj_type! { type CType = MTLPipelineBufferDescriptorArray; pub struct PipelineBufferDescriptorArray; - pub struct PipelineBufferDescriptorArrayRef; } impl PipelineBufferDescriptorArrayRef { @@ -51,12 +52,12 @@ impl PipelineBufferDescriptorArrayRef { } } +/// See pub enum MTLPipelineBufferDescriptor {} foreign_obj_type! { type CType = MTLPipelineBufferDescriptor; pub struct PipelineBufferDescriptor; - pub struct PipelineBufferDescriptorRef; } impl PipelineBufferDescriptorRef { diff --git a/third_party/rust/metal/src/pipeline/render.rs b/third_party/rust/metal/src/pipeline/render.rs index a4b3a629e19e..6731c15a24b5 100644 --- a/third_party/rust/metal/src/pipeline/render.rs +++ b/third_party/rust/metal/src/pipeline/render.rs @@ -9,6 +9,7 @@ use super::*; use objc::runtime::{NO, YES}; +/// See #[repr(u64)] #[allow(non_camel_case_types)] #[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)] @@ -34,6 +35,7 @@ pub enum MTLBlendFactor { OneMinusSource1Alpha = 18, } +/// See #[repr(u64)] #[allow(non_camel_case_types)] #[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)] @@ -46,6 +48,7 @@ pub enum MTLBlendOperation { } bitflags! { + /// See pub struct MTLColorWriteMask: NSUInteger { const None = 0; const Red = 0x1 << 3; @@ -56,6 +59,7 @@ bitflags! { } } +/// See #[repr(u64)] #[allow(non_camel_case_types)] #[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)] @@ -71,12 +75,12 @@ pub enum MTLPrimitiveTopologyClass { // TODO: MTLTessellationFactorFormat // TODO: MTLTessellationControlPointIndexType +/// See pub enum MTLRenderPipelineColorAttachmentDescriptor {} foreign_obj_type! { type CType = MTLRenderPipelineColorAttachmentDescriptor; pub struct RenderPipelineColorAttachmentDescriptor; - pub struct RenderPipelineColorAttachmentDescriptorRef; } impl RenderPipelineColorAttachmentDescriptorRef { @@ -159,12 +163,12 @@ impl RenderPipelineColorAttachmentDescriptorRef { } } +/// See pub enum MTLRenderPipelineReflection {} foreign_obj_type! { type CType = MTLRenderPipelineReflection; pub struct RenderPipelineReflection; - pub struct RenderPipelineReflectionRef; } impl RenderPipelineReflection { @@ -209,12 +213,12 @@ impl RenderPipelineReflectionRef { } } +/// TODO: Find documentation link. pub enum MTLArgumentArray {} foreign_obj_type! { type CType = MTLArgumentArray; pub struct ArgumentArray; - pub struct ArgumentArrayRef; } impl ArgumentArrayRef { @@ -227,12 +231,12 @@ impl ArgumentArrayRef { } } +/// See pub enum MTLComputePipelineReflection {} foreign_obj_type! { type CType = MTLComputePipelineReflection; pub struct ComputePipelineReflection; - pub struct ComputePipelineReflectionRef; } impl ComputePipelineReflectionRef { @@ -242,12 +246,274 @@ impl ComputePipelineReflectionRef { } } +/// See +/// Only available in (macos(13.0), ios(16.0)) +pub enum MTLMeshRenderPipelineDescriptor {} + +foreign_obj_type! { + type CType = MTLMeshRenderPipelineDescriptor; + pub struct MeshRenderPipelineDescriptor; +} + +impl MeshRenderPipelineDescriptor { + pub fn new() -> Self { + unsafe { + let class = class!(MTLMeshRenderPipelineDescriptor); + msg_send![class, new] + } + } +} + +impl MeshRenderPipelineDescriptorRef { + pub fn color_attachments(&self) -> &RenderPipelineColorAttachmentDescriptorArrayRef { + unsafe { msg_send![self, colorAttachments] } + } + + pub fn depth_attachment_pixel_format(&self) -> MTLPixelFormat { + unsafe { msg_send![self, depthAttachmentPixelFormat] } + } + + pub fn set_depth_attachment_pixel_format(&self, pixel_format: MTLPixelFormat) { + unsafe { msg_send![self, setDepthAttachmentPixelFormat: pixel_format] } + } + + pub fn fragment_buffers(&self) -> Option<&PipelineBufferDescriptorArrayRef> { + unsafe { msg_send![self, fragmentBuffers] } + } + + pub fn fragment_function(&self) -> Option<&FunctionRef> { + unsafe { msg_send![self, fragmentFunction] } + } + + pub fn set_fragment_function(&self, function: Option<&FunctionRef>) { + unsafe { msg_send![self, setFragmentFunction: function] } + } + + pub fn is_alpha_to_coverage_enabled(&self) -> bool { + unsafe { + match msg_send![self, isAlphaToCoverageEnabled] { + YES => true, + NO => false, + _ => unreachable!(), + } + } + } + + pub fn set_alpha_to_coverage_enabled(&self, enabled: bool) { + unsafe { msg_send![self, setAlphaToCoverageEnabled: enabled] } + } + + pub fn is_alpha_to_one_enabled(&self) -> bool { + unsafe { + match msg_send![self, isAlphaToOneEnabled] { + YES => true, + NO => false, + _ => unreachable!(), + } + } + } + + pub fn set_alpha_to_one_enabled(&self, enabled: bool) { + unsafe { msg_send![self, setAlphaToOneEnabled: enabled] } + } + + pub fn is_rasterization_enabled(&self) -> bool { + unsafe { + match msg_send![self, isRasterizationEnabled] { + YES => true, + NO => false, + _ => unreachable!(), + } + } + } + + pub fn set_rasterization_enabled(&self, enabled: bool) { + unsafe { msg_send![self, setRasterizationEnabled: enabled] } + } + + pub fn label(&self) -> &str { + unsafe { + let label = msg_send![self, label]; + crate::nsstring_as_str(label) + } + } + + pub fn set_label(&self, label: &str) { + unsafe { + let nslabel = crate::nsstring_from_str(label); + let () = msg_send![self, setLabel: nslabel]; + } + } + + pub fn max_total_threadgroups_per_mesh_grid(&self) -> NSUInteger { + unsafe { msg_send![self, maxTotalThreadgroupsPerMeshGrid] } + } + + pub fn set_max_total_threadgroups_per_mesh_grid( + &self, + max_total_threadgroups_per_mesh_grid: NSUInteger, + ) { + unsafe { + msg_send![ + self, + setMaxTotalThreadgroupsPerMeshGrid: max_total_threadgroups_per_mesh_grid + ] + } + } + + pub fn max_total_threads_per_mesh_threadgroup(&self) -> NSUInteger { + unsafe { msg_send![self, maxTotalThreadsPerMeshThreadgroup] } + } + + pub fn set_max_total_threads_per_mesh_threadgroup( + &self, + max_total_threads_per_mesh_threadgroup: NSUInteger, + ) { + unsafe { + msg_send![ + self, + setMaxTotalThreadsPerMeshThreadgroup: max_total_threads_per_mesh_threadgroup + ] + } + } + + pub fn max_total_threads_per_object_threadgroup(&self) -> NSUInteger { + unsafe { msg_send![self, maxTotalThreadsPerObjectThreadgroup] } + } + + pub fn set_max_total_threads_per_object_threadgroup( + &self, + max_total_threads_per_object_threadgroup: NSUInteger, + ) { + unsafe { + msg_send![ + self, + setMaxTotalThreadsPerObjectThreadgroup: max_total_threads_per_object_threadgroup + ] + } + } + + pub fn max_vertex_amplification_count(&self) -> NSUInteger { + unsafe { msg_send![self, maxVertexAmplificationCount] } + } + + pub fn set_max_vertex_amplification_count(&self, max_vertex_amplification_count: NSUInteger) { + unsafe { + msg_send![ + self, + setMaxVertexAmplificationCount: max_vertex_amplification_count + ] + } + } + + pub fn mesh_buffers(&self) -> Option<&PipelineBufferDescriptorArrayRef> { + unsafe { msg_send![self, meshBuffers] } + } + + pub fn mesh_function(&self) -> Option<&FunctionRef> { + unsafe { msg_send![self, meshFunction] } + } + + pub fn set_mesh_function(&self, function: Option<&FunctionRef>) { + unsafe { msg_send![self, setMeshFunction: function] } + } + + pub fn mesh_threadgroup_size_is_multiple_of_thread_execution_width(&self) -> bool { + unsafe { + match msg_send![self, isMeshThreadgroupSizeIsMultipleOfThreadExecutionWidth] { + YES => true, + NO => false, + _ => unreachable!(), + } + } + } + + pub fn set_mesh_threadgroup_size_is_multiple_of_thread_execution_width( + &self, + mesh_threadgroup_size_is_multiple_of_thread_execution_width: bool, + ) { + unsafe { + msg_send![ + self, + setMeshThreadgroupSizeIsMultipleOfThreadExecutionWidth: + mesh_threadgroup_size_is_multiple_of_thread_execution_width + ] + } + } + + pub fn object_buffers(&self) -> Option<&PipelineBufferDescriptorArrayRef> { + unsafe { msg_send![self, objectBuffers] } + } + + pub fn object_function(&self) -> Option<&FunctionRef> { + unsafe { msg_send![self, objectFunction] } + } + + pub fn set_object_function(&self, function: Option<&FunctionRef>) { + unsafe { msg_send![self, setObjectFunction: function] } + } + + pub fn object_threadgroup_size_is_multiple_of_thread_execution_width(&self) -> bool { + unsafe { + match msg_send![ + self, + isObjectThreadgroupSizeIsMultipleOfThreadExecutionWidth + ] { + YES => true, + NO => false, + _ => unreachable!(), + } + } + } + + pub fn set_object_threadgroup_size_is_multiple_of_thread_execution_width( + &self, + object_threadgroup_size_is_multiple_of_thread_execution_width: bool, + ) { + unsafe { + msg_send![ + self, + setObjectThreadgroupSizeIsMultipleOfThreadExecutionWidth: + object_threadgroup_size_is_multiple_of_thread_execution_width + ] + } + } + + pub fn payload_memory_length(&self) -> NSUInteger { + unsafe { msg_send![self, payloadMemoryLength] } + } + + pub fn set_payload_memory_length(&self, payload_memory_length: NSUInteger) { + unsafe { msg_send![self, setPayloadMemoryLength: payload_memory_length] } + } + + pub fn raster_sample_count(&self) -> NSUInteger { + unsafe { msg_send![self, rasterSampleCount] } + } + + pub fn set_raster_sample_count(&self, raster_sample_count: NSUInteger) { + unsafe { msg_send![self, setRasterSampleCount: raster_sample_count] } + } + + pub fn stencil_attachment_pixel_format(&self) -> MTLPixelFormat { + unsafe { msg_send![self, stencilAttachmentPixelFormat] } + } + + pub fn set_stencil_attachment_pixel_format(&self, pixel_format: MTLPixelFormat) { + unsafe { msg_send![self, setStencilAttachmentPixelFormat: pixel_format] } + } + + pub fn reset(&self) { + unsafe { msg_send![self, reset] } + } +} + +/// See pub enum MTLRenderPipelineDescriptor {} foreign_obj_type! { type CType = MTLRenderPipelineDescriptor; pub struct RenderPipelineDescriptor; - pub struct RenderPipelineDescriptorRef; } impl RenderPipelineDescriptor { @@ -460,12 +726,12 @@ impl RenderPipelineDescriptorRef { } } +/// See pub enum MTLRenderPipelineState {} foreign_obj_type! { type CType = MTLRenderPipelineState; pub struct RenderPipelineState; - pub struct RenderPipelineStateRef; } impl RenderPipelineStateRef { @@ -481,12 +747,12 @@ impl RenderPipelineStateRef { } } +/// See pub enum MTLRenderPipelineColorAttachmentDescriptorArray {} foreign_obj_type! { type CType = MTLRenderPipelineColorAttachmentDescriptorArray; pub struct RenderPipelineColorAttachmentDescriptorArray; - pub struct RenderPipelineColorAttachmentDescriptorArrayRef; } impl RenderPipelineColorAttachmentDescriptorArrayRef { diff --git a/third_party/rust/metal/src/renderpass.rs b/third_party/rust/metal/src/renderpass.rs index ed4f60a43da2..f9aa50b5aaaf 100644 --- a/third_party/rust/metal/src/renderpass.rs +++ b/third_party/rust/metal/src/renderpass.rs @@ -7,6 +7,7 @@ use super::*; +/// See #[repr(u64)] #[derive(Copy, Clone, Debug)] pub enum MTLLoadAction { @@ -15,6 +16,7 @@ pub enum MTLLoadAction { Clear = 2, } +/// See #[repr(u64)] #[derive(Copy, Clone, Debug)] pub enum MTLStoreAction { @@ -26,6 +28,7 @@ pub enum MTLStoreAction { CustomSampleDepthStore = 5, } +/// See #[repr(C)] #[derive(Copy, Clone, Debug)] pub struct MTLClearColor { @@ -47,6 +50,7 @@ impl MTLClearColor { } } +/// See #[repr(u32)] #[allow(non_camel_case_types)] pub enum MTLMultisampleStencilResolveFilter { @@ -54,12 +58,12 @@ pub enum MTLMultisampleStencilResolveFilter { DepthResolvedSample = 1, } +/// See pub enum MTLRenderPassAttachmentDescriptor {} foreign_obj_type! { type CType = MTLRenderPassAttachmentDescriptor; pub struct RenderPassAttachmentDescriptor; - pub struct RenderPassAttachmentDescriptorRef; } impl RenderPassAttachmentDescriptorRef { @@ -144,13 +148,13 @@ impl RenderPassAttachmentDescriptorRef { } } +/// See pub enum MTLRenderPassColorAttachmentDescriptor {} foreign_obj_type! { type CType = MTLRenderPassColorAttachmentDescriptor; pub struct RenderPassColorAttachmentDescriptor; - pub struct RenderPassColorAttachmentDescriptorRef; - type ParentType = RenderPassAttachmentDescriptorRef; + type ParentType = RenderPassAttachmentDescriptor; } impl RenderPassColorAttachmentDescriptor { @@ -172,13 +176,13 @@ impl RenderPassColorAttachmentDescriptorRef { } } +/// See pub enum MTLRenderPassDepthAttachmentDescriptor {} foreign_obj_type! { type CType = MTLRenderPassDepthAttachmentDescriptor; pub struct RenderPassDepthAttachmentDescriptor; - pub struct RenderPassDepthAttachmentDescriptorRef; - type ParentType = RenderPassAttachmentDescriptorRef; + type ParentType = RenderPassAttachmentDescriptor; } impl RenderPassDepthAttachmentDescriptorRef { @@ -191,13 +195,13 @@ impl RenderPassDepthAttachmentDescriptorRef { } } +/// See pub enum MTLRenderPassStencilAttachmentDescriptor {} foreign_obj_type! { type CType = MTLRenderPassStencilAttachmentDescriptor; pub struct RenderPassStencilAttachmentDescriptor; - pub struct RenderPassStencilAttachmentDescriptorRef; - type ParentType = RenderPassAttachmentDescriptorRef; + type ParentType = RenderPassAttachmentDescriptor; } impl RenderPassStencilAttachmentDescriptorRef { @@ -221,12 +225,12 @@ impl RenderPassStencilAttachmentDescriptorRef { } } +/// See pub enum MTLRenderPassColorAttachmentDescriptorArray {} foreign_obj_type! { type CType = MTLRenderPassColorAttachmentDescriptorArray; pub struct RenderPassColorAttachmentDescriptorArray; - pub struct RenderPassColorAttachmentDescriptorArrayRef; } impl RenderPassColorAttachmentDescriptorArrayRef { @@ -246,12 +250,117 @@ impl RenderPassColorAttachmentDescriptorArrayRef { } } +/// See +pub enum MTLRenderPassSampleBufferAttachmentDescriptor {} + +foreign_obj_type! { + type CType = MTLRenderPassSampleBufferAttachmentDescriptor; + pub struct RenderPassSampleBufferAttachmentDescriptor; +} + +impl RenderPassSampleBufferAttachmentDescriptor { + pub fn new() -> Self { + let class = class!(MTLRenderPassSampleBufferAttachmentDescriptor); + unsafe { msg_send![class, new] } + } +} + +impl RenderPassSampleBufferAttachmentDescriptorRef { + pub fn sample_buffer(&self) -> &CounterSampleBufferRef { + unsafe { msg_send![self, sampleBuffer] } + } + + pub fn set_sample_buffer(&self, sample_buffer: &CounterSampleBufferRef) { + unsafe { msg_send![self, setSampleBuffer: sample_buffer] } + } + + pub fn start_of_vertex_sample_index(&self) -> NSUInteger { + unsafe { msg_send![self, startOfVertexSampleIndex] } + } + + pub fn set_start_of_vertex_sample_index(&self, start_of_vertex_sample_index: NSUInteger) { + unsafe { + msg_send![ + self, + setStartOfVertexSampleIndex: start_of_vertex_sample_index + ] + } + } + + pub fn end_of_vertex_sample_index(&self) -> NSUInteger { + unsafe { msg_send![self, endOfVertexSampleIndex] } + } + + pub fn set_end_of_vertex_sample_index(&self, end_of_vertex_sample_index: NSUInteger) { + unsafe { msg_send![self, setEndOfVertexSampleIndex: end_of_vertex_sample_index] } + } + + pub fn start_of_fragment_sample_index(&self) -> NSUInteger { + unsafe { msg_send![self, startOfFragmentSampleIndex] } + } + + pub fn set_start_of_fragment_sample_index(&self, start_of_fragment_sample_index: NSUInteger) { + unsafe { + msg_send![ + self, + setStartOfFragmentSampleIndex: start_of_fragment_sample_index + ] + } + } + + pub fn end_of_fragment_sample_index(&self) -> NSUInteger { + unsafe { msg_send![self, endOfFragmentSampleIndex] } + } + + pub fn set_end_of_fragment_sample_index(&self, end_of_fragment_sample_index: NSUInteger) { + unsafe { + msg_send![ + self, + setEndOfFragmentSampleIndex: end_of_fragment_sample_index + ] + } + } +} + +/// See +pub enum MTLRenderPassSampleBufferAttachmentDescriptorArray {} + +foreign_obj_type! { + type CType = MTLRenderPassSampleBufferAttachmentDescriptorArray; + pub struct RenderPassSampleBufferAttachmentDescriptorArray; +} + +impl RenderPassSampleBufferAttachmentDescriptorArrayRef { + pub fn object_at( + &self, + index: NSUInteger, + ) -> Option<&RenderPassSampleBufferAttachmentDescriptorRef> { + unsafe { msg_send![self, objectAtIndexedSubscript: index] } + } + + pub fn set_object_at( + &self, + index: NSUInteger, + attachment: Option<&RenderPassSampleBufferAttachmentDescriptorRef>, + ) { + unsafe { + msg_send![self, setObject:attachment + atIndexedSubscript:index] + } + } +} + +/// ## Important! +/// When configuring a [`MTLTextureDescriptor`] object for use with an attachment, set its usage +/// value to renderTarget if you already know that you intend to use the resulting MTLTexture object in +/// an attachment. This may significantly improve your app’s performance with certain hardware. +/// +/// See pub enum MTLRenderPassDescriptor {} foreign_obj_type! { type CType = MTLRenderPassDescriptor; pub struct RenderPassDescriptor; - pub struct RenderPassDescriptorRef; } impl RenderPassDescriptor { @@ -327,4 +436,8 @@ impl RenderPassDescriptorRef { pub fn set_default_raster_sample_count(&self, count: NSUInteger) { unsafe { msg_send![self, setDefaultRasterSampleCount: count] } } + + pub fn sample_buffer_attachments(&self) -> &RenderPassSampleBufferAttachmentDescriptorArrayRef { + unsafe { msg_send![self, sampleBufferAttachments] } + } } diff --git a/third_party/rust/metal/src/resource.rs b/third_party/rust/metal/src/resource.rs index 8986a9c83894..56c12da8e947 100644 --- a/third_party/rust/metal/src/resource.rs +++ b/third_party/rust/metal/src/resource.rs @@ -5,9 +5,10 @@ // http://opensource.org/licenses/MIT>, at your option. This file may not be // copied, modified, or distributed except according to those terms. -use super::{DeviceRef, HeapRef, NSUInteger}; +use super::*; use objc::runtime::{NO, YES}; +/// See #[repr(u64)] #[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)] pub enum MTLPurgeableState { @@ -17,6 +18,7 @@ pub enum MTLPurgeableState { Empty = 4, } +/// See #[repr(u64)] #[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)] pub enum MTLCPUCacheMode { @@ -24,6 +26,7 @@ pub enum MTLCPUCacheMode { WriteCombined = 1, } +/// See #[repr(u64)] #[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)] pub enum MTLStorageMode { @@ -35,6 +38,8 @@ pub enum MTLStorageMode { } /// Only available on macos(10.15), ios(13.0) +/// +/// See #[repr(u64)] #[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)] pub enum MTLHazardTrackingMode { @@ -51,6 +56,7 @@ pub const MTLResourceHazardTrackingModeShift: NSUInteger = 8; pub const MTLResourceHazardTrackingModeMask: NSUInteger = 0x3 << MTLResourceHazardTrackingModeShift; bitflags! { + /// See #[allow(non_upper_case_globals)] pub struct MTLResourceOptions: NSUInteger { const CPUCacheModeDefaultCache = (MTLCPUCacheMode::DefaultCache as NSUInteger) << MTLResourceCPUCacheModeShift; @@ -75,6 +81,8 @@ bitflags! { /// /// Enabling certain options for certain resources determines whether the Metal driver should /// convert the resource to another format (for example, whether to decompress a color render target). + /// + /// See pub struct MTLResourceUsage: NSUInteger { /// An option that enables reading from the resource. const Read = 1 << 0; @@ -87,6 +95,7 @@ bitflags! { } } +/// See #[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)] #[repr(C)] pub struct MTLSizeAndAlign { @@ -94,12 +103,13 @@ pub struct MTLSizeAndAlign { pub align: NSUInteger, } +/// See pub enum MTLResource {} foreign_obj_type! { type CType = MTLResource; pub struct Resource; - pub struct ResourceRef; + type ParentType = NsObject; } impl ResourceRef { diff --git a/third_party/rust/metal/src/sampler.rs b/third_party/rust/metal/src/sampler.rs index 3dd871a3b48b..e9f6416e2ab2 100644 --- a/third_party/rust/metal/src/sampler.rs +++ b/third_party/rust/metal/src/sampler.rs @@ -7,6 +7,7 @@ use super::{depthstencil::MTLCompareFunction, DeviceRef, NSUInteger}; +/// See #[repr(u64)] #[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)] pub enum MTLSamplerMinMagFilter { @@ -14,6 +15,7 @@ pub enum MTLSamplerMinMagFilter { Linear = 1, } +/// See #[repr(u64)] #[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)] pub enum MTLSamplerMipFilter { @@ -22,6 +24,7 @@ pub enum MTLSamplerMipFilter { Linear = 2, } +/// See #[repr(u64)] #[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)] pub enum MTLSamplerAddressMode { @@ -33,6 +36,7 @@ pub enum MTLSamplerAddressMode { ClampToBorderColor = 5, } +/// See #[repr(u64)] #[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)] pub enum MTLSamplerBorderColor { @@ -41,12 +45,12 @@ pub enum MTLSamplerBorderColor { OpaqueWhite = 2, } +/// See pub enum MTLSamplerDescriptor {} foreign_obj_type! { type CType = MTLSamplerDescriptor; pub struct SamplerDescriptor; - pub struct SamplerDescriptorRef; } impl SamplerDescriptor { @@ -135,12 +139,12 @@ impl SamplerDescriptorRef { } } +/// See pub enum MTLSamplerState {} foreign_obj_type! { type CType = MTLSamplerState; pub struct SamplerState; - pub struct SamplerStateRef; } impl SamplerStateRef { diff --git a/third_party/rust/metal/src/sync.rs b/third_party/rust/metal/src/sync.rs index e4b3d8aa671a..bfdaadc8b5c4 100644 --- a/third_party/rust/metal/src/sync.rs +++ b/third_party/rust/metal/src/sync.rs @@ -12,14 +12,15 @@ use std::mem; #[cfg(feature = "dispatch_queue")] use dispatch; +/// See type MTLSharedEventNotificationBlock<'a> = RcBlock<(&'a SharedEventRef, u64), ()>; +/// See pub enum MTLEvent {} foreign_obj_type! { type CType = MTLEvent; pub struct Event; - pub struct EventRef; } impl EventRef { @@ -28,13 +29,13 @@ impl EventRef { } } +/// See pub enum MTLSharedEvent {} foreign_obj_type! { type CType = MTLSharedEvent; pub struct SharedEvent; - pub struct SharedEventRef; - type ParentType = EventRef; + type ParentType = Event; } impl SharedEventRef { @@ -81,12 +82,12 @@ impl SharedEventRef { } } +/// See pub enum MTLSharedEventListener {} foreign_obj_type! { type CType = MTLSharedEventListener; pub struct SharedEventListener; - pub struct SharedEventListenerRef; } impl SharedEventListener { @@ -108,12 +109,12 @@ impl SharedEventListener { } } +/// See pub enum MTLFence {} foreign_obj_type! { type CType = MTLFence; pub struct Fence; - pub struct FenceRef; } impl FenceRef { diff --git a/third_party/rust/metal/src/texture.rs b/third_party/rust/metal/src/texture.rs index 5e9f1b3d52b1..bf506704c673 100644 --- a/third_party/rust/metal/src/texture.rs +++ b/third_party/rust/metal/src/texture.rs @@ -9,6 +9,7 @@ use super::*; use objc::runtime::{NO, YES}; +/// See #[repr(u64)] #[allow(non_camel_case_types)] #[derive(Copy, Clone, Debug, Hash, PartialEq, Eq)] @@ -21,8 +22,10 @@ pub enum MTLTextureType { Cube = 5, CubeArray = 6, D3 = 7, + D2MultisampleArray = 8, } +/// See #[repr(u64)] #[derive(Copy, Clone, Debug, Hash, PartialEq, Eq)] pub enum MTLTextureCompressionType { @@ -31,6 +34,7 @@ pub enum MTLTextureCompressionType { } bitflags! { + /// See pub struct MTLTextureUsage: NSUInteger { const Unknown = 0x0000; const ShaderRead = 0x0001; @@ -40,12 +44,12 @@ bitflags! { } } +/// See pub enum MTLTextureDescriptor {} foreign_obj_type! { type CType = MTLTextureDescriptor; pub struct TextureDescriptor; - pub struct TextureDescriptorRef; } impl TextureDescriptor { @@ -173,13 +177,13 @@ impl TextureDescriptorRef { } } +/// See pub enum MTLTexture {} foreign_obj_type! { type CType = MTLTexture; pub struct Texture; - pub struct TextureRef; - type ParentType = ResourceRef; + type ParentType = Resource; } impl TextureRef { @@ -345,4 +349,8 @@ impl TextureRef { slices:slices] } } + + pub fn gpu_resource_id(&self) -> MTLResourceID { + unsafe { msg_send![self, gpuResourceID] } + } } diff --git a/third_party/rust/metal/src/types.rs b/third_party/rust/metal/src/types.rs index 3ea937f06154..92f9daf30540 100644 --- a/third_party/rust/metal/src/types.rs +++ b/third_party/rust/metal/src/types.rs @@ -8,6 +8,7 @@ use super::NSUInteger; use std::default::Default; +/// See #[repr(C)] #[derive(Copy, Clone, Debug, Eq, PartialEq, Hash, Default)] pub struct MTLOrigin { @@ -16,6 +17,7 @@ pub struct MTLOrigin { pub z: NSUInteger, } +/// See #[repr(C)] #[derive(Copy, Clone, Debug, Eq, PartialEq, Hash, Default)] pub struct MTLSize { @@ -34,6 +36,7 @@ impl MTLSize { } } +/// See #[repr(C)] #[derive(Copy, Clone, Debug, Eq, PartialEq, Hash, Default)] pub struct MTLRegion { @@ -72,9 +75,16 @@ impl MTLRegion { } } +/// See #[repr(C)] #[derive(Copy, Clone, Debug, PartialEq, Default)] pub struct MTLSamplePosition { pub x: f32, pub y: f32, } + +#[repr(C)] +#[derive(Copy, Clone, Debug, Eq, PartialEq, Hash, Default)] +pub struct MTLResourceID { + pub _impl: u64, +} diff --git a/third_party/rust/metal/src/vertexdescriptor.rs b/third_party/rust/metal/src/vertexdescriptor.rs index 201e1e30ad31..5e01be43593e 100644 --- a/third_party/rust/metal/src/vertexdescriptor.rs +++ b/third_party/rust/metal/src/vertexdescriptor.rs @@ -7,6 +7,7 @@ use super::NSUInteger; +/// See #[repr(u64)] #[allow(non_camel_case_types)] #[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)] @@ -65,6 +66,7 @@ pub enum MTLVertexFormat { Half = 53, } +/// See #[repr(u64)] #[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)] pub enum MTLVertexStepFunction { @@ -75,12 +77,12 @@ pub enum MTLVertexStepFunction { PerPatchControlPoint = 4, } +/// See pub enum MTLVertexBufferLayoutDescriptor {} foreign_obj_type! { type CType = MTLVertexBufferLayoutDescriptor; pub struct VertexBufferLayoutDescriptor; - pub struct VertexBufferLayoutDescriptorRef; } impl VertexBufferLayoutDescriptor { @@ -118,12 +120,12 @@ impl VertexBufferLayoutDescriptorRef { } } +/// See pub enum MTLVertexBufferLayoutDescriptorArray {} foreign_obj_type! { type CType = MTLVertexBufferLayoutDescriptorArray; pub struct VertexBufferLayoutDescriptorArray; - pub struct VertexBufferLayoutDescriptorArrayRef; } impl VertexBufferLayoutDescriptorArrayRef { @@ -143,12 +145,12 @@ impl VertexBufferLayoutDescriptorArrayRef { } } +/// See pub enum MTLVertexAttributeDescriptor {} foreign_obj_type! { type CType = MTLVertexAttributeDescriptor; pub struct VertexAttributeDescriptor; - pub struct VertexAttributeDescriptorRef; } impl VertexAttributeDescriptor { @@ -186,12 +188,12 @@ impl VertexAttributeDescriptorRef { } } +/// See pub enum MTLVertexAttributeDescriptorArray {} foreign_obj_type! { type CType = MTLVertexAttributeDescriptorArray; pub struct VertexAttributeDescriptorArray; - pub struct VertexAttributeDescriptorArrayRef; } impl VertexAttributeDescriptorArrayRef { @@ -211,12 +213,12 @@ impl VertexAttributeDescriptorArrayRef { } } +/// See pub enum MTLVertexDescriptor {} foreign_obj_type! { type CType = MTLVertexDescriptor; pub struct VertexDescriptor; - pub struct VertexDescriptorRef; } impl VertexDescriptor { diff --git a/third_party/rust/naga/.cargo-checksum.json b/third_party/rust/naga/.cargo-checksum.json index 661acb70f0f3..f165f28a41f7 100644 --- a/third_party/rust/naga/.cargo-checksum.json +++ b/third_party/rust/naga/.cargo-checksum.json @@ -1 +1 @@ -{"files":{".github/workflows/ci.yml":"697899467a40e31c57c4511b237cf4f471fc2d96dc6c9abd846bb55033b6f875",".github/workflows/lazy.yml":"3777882e946c4c60c497bc91d4caf82564d49be646391bc735039ee9a7857fa7",".github/workflows/validation-linux.yml":"1b2b1dcaa18cb8833598d35cdcfdddd960ca7720291462275e8f7c5475a355bf",".github/workflows/validation-macos.yml":"8bc6927b151ab897f4ce5fb780ec36772be682f050e34710fb2a67a982a017d0",".github/workflows/validation-windows.yml":"64451900fac68071f79a2c6f074aad0d014fbf8dff517bfd03a614aa13d67e46","CHANGELOG.md":"6094497c5e4e09bb9ae27c2e19acf71e20175f11f8eb982ff7c21d1645ccecbf","Cargo.toml":"9a1bf8775875039196a6077cc92a07faa4b4db2a9113409f7b7bbdca8a93c22d","LICENSE-APACHE":"c71d239df91726fc519c6eb72d318ec65820627232b2f796219e87dcf35d0ab4","LICENSE-MIT":"ca3be8518f5ef097669cea882643fc532025f29972def4fda49df885565a0480","Makefile":"8e623ab7b0b47944db04dd012a22b36b0c0b0b0dec81876a58fe14189b44e3a1","README.md":"4bef75cd21fdd79a6743ec79c3db6839b9d0f88627310f288c45b15baa4fc5d6","benches/criterion.rs":"2356a6c8ea3579ad1d1fd98348b7181ad669880c120e3065711b47c3af5e6ecf","codecov.yml":"729a58d729345908460c9cba0ea95bda5f5173256c407a601d870c68476de9d9","src/arena.rs":"068989016d473ccc35f0c829dfec83d3fd7eb8da05b4b107eadc8dd108376937","src/back/dot/mod.rs":"441b830635f0b1f9479b274cbfe310765a1d89ed6ec28c1f570be3b63d6c516f","src/back/glsl/features.rs":"448eb8fe82b9232e7b37df4318ae84ea78e69d77b14d7cd3274f30eb0d866f30","src/back/glsl/keywords.rs":"3f23b1e63e99a7056c3b223524d5d37000ef7316ae9df25532a726a1157d1dcd","src/back/glsl/mod.rs":"2500068ee57e8b9d526a6f706f3b7a1908ee4ed5bb938a7dcdcf8af3e6d0ca81","src/back/hlsl/conv.rs":"d7e084228eadb729f48a68e4cbface09237974a8ca0ce9608bda8627ca56630e","src/back/hlsl/help.rs":"1846019a9f6d2df68f60061c723bf2ca2bef1b89f885d19529f3b8fc466c86bf","src/back/hlsl/keywords.rs":"a158c52bf9a05514b5a6e29658c2872c1cf2a6f295e8094a6ac4a37f76a761ad","src/back/hlsl/mod.rs":"ba17c2ee42a5943b96c4a2e2357fb795eccb2d885e0b37bfa654d16ba0c01584","src/back/hlsl/storage.rs":"281dcd055eb3ebdda5f0268933dd8a53b18303463c77299f6157e66ee2292dba","src/back/hlsl/writer.rs":"96292432c9dab1e1a15884c53ef6a817d50876bbd1be98074c7af4d23f5a64fa","src/back/mod.rs":"84fa79a967e5509a58c1bb2c9b141d535ea94d01606b4284c3e9a17c6d595a34","src/back/msl/keywords.rs":"e1c2bf804b1a3a56639bedb9c1b3b78fbf920724ef65f91fe2e9fe476ee3fbbf","src/back/msl/mod.rs":"15f94e98fb3dfc743fc0c29e8bb54aed6d7290ffd772d439624ed1f98790711f","src/back/msl/sampler.rs":"210cf28143ac17d6c56a9adf4fc0e3b85b1b16bb8b94771f9304f2a43b7ce78e","src/back/msl/writer.rs":"eeb1191a04655eda6d486877246a331010e76376c3fb0a4411351ca8d85f50bf","src/back/spv/block.rs":"334247c1d41fee5d7d4b37a507f44fb4a85b037dcb32efa6c94f1946a52a1d1d","src/back/spv/helpers.rs":"0ec3381f3ba5856a14d876559f77e21fd5d2f342cb005343a3df2ff58443e4fb","src/back/spv/image.rs":"b7990e409288f4f327be5a9b99a3053caf7ad753f83a280d87ddc05f10f28df5","src/back/spv/index.rs":"7a9815bdb1f323a780d41092371582f6d4210f305bec5c74e882dbb6d2d49bc3","src/back/spv/instructions.rs":"52165d9204e54d1b9be366435e88a069d31fa87050a815721d305884081cc5a9","src/back/spv/layout.rs":"e263de53cd2f9a03ad94b82b434ce636609bc1ed435a2d1132951663bfaa8ebd","src/back/spv/mod.rs":"990a7a83b43e0a7d0c68a5764b2ea090ae300da7163dc2b34acf0afca093dc7e","src/back/spv/ray.rs":"3fd4b49b34127b694a5751020bdc4e6ae378116d1cb75ac71e49dbaa72fc57be","src/back/spv/recyclable.rs":"9448eedf9766ae2b1916282b5945b218017c6067dceac2da1415580ee5796c73","src/back/spv/selection.rs":"81e404abfa0a977f7c1f76ccb37a78d13ccadbda229048dad53cc67687cc39db","src/back/spv/writer.rs":"1f40c7c553a17bcc42de8ae16f9a4dec1d91e6dc648cae09cc071987c6d25113","src/back/wgsl/mod.rs":"2dd12bbea9ace835850192bb68c5760953da6bac6a636073d1eca19381c0c0b6","src/back/wgsl/writer.rs":"0457d7f5c5ce8a0da1ead4c3412bc00ef0999ecf4163c697be4734d88b14180f","src/block.rs":"89c061206f608365ef3ce03c0d205d7205ef9d892035dd3dfb8d252003159f14","src/front/glsl/ast.rs":"1edfa460812783b18c15eae10d3994ec653cefd094f29e0a14f469f2361f139d","src/front/glsl/builtins.rs":"fe5d381ebe07dc13db6a8debaf0145138aa1b119d0ba2d7a4e08643f0014cec1","src/front/glsl/constants.rs":"18af1457493dc2dd2172b1037fb3f7bbfedab1abdd07c2c31f69a9b2c0922d35","src/front/glsl/context.rs":"1e771471699e7dc8102d84aa0c1d0a72fb07efb18a4ce7bd1eca5256c4eb15d4","src/front/glsl/error.rs":"8617dc35fb3b595b897449076a26696c4556cfd77533c9f957feca48989a553c","src/front/glsl/functions.rs":"253bfa82e4b4233d2fc378ee18685c36a82310cba0a92c6e3e1132a29b374f5e","src/front/glsl/lex.rs":"08736ae8beb955da5b0e6e3e0f45995a824995f7096d516a2910417e9c7afa32","src/front/glsl/mod.rs":"50e5687bc6dc04162fe3b0901691122bc26dc95283ace429b5f6b633073cd016","src/front/glsl/offset.rs":"84ed5a0add0ded719036aba862a3161310f9e7165ecb667bb201789ae15726fd","src/front/glsl/parser.rs":"1af2600fb6817cada8f68ad95f97f91dafa8225d4857b28d001194274b63557a","src/front/glsl/parser/declarations.rs":"c86d4170612449b02585a5fe1e746c717dfbe9b9304aa50491ce434190431412","src/front/glsl/parser/expressions.rs":"aaa1049ee444a65b7be6fa090b05709e7637431b3914af82ee683ead60f038e0","src/front/glsl/parser/functions.rs":"f604cdbdd07ba1b0d57f5b1bb47e2cae878be0d7f44a25007330088566f6d14f","src/front/glsl/parser/types.rs":"932d95c60e6abf1e69bbeb670d045571d6afd89d320d426e18ced21325fb4663","src/front/glsl/parser_tests.rs":"c9c81bf985764584f92a08f2f8e1b029681b40fe89768181dec729e11aea6ba1","src/front/glsl/token.rs":"42325adbef5bfc9e6f9e40e7ed3cbfa7ef498c05799fdb7694b123e6450410e1","src/front/glsl/types.rs":"fc57723f04b6c1abd9a667d779be78cca2736805a5a77ae7735f2aa6eec985fc","src/front/glsl/variables.rs":"f9b1f0a72148250954e3c0396ab5d294526aec4018a87c9f448b0f5b130fcbac","src/front/interpolator.rs":"1f19f8634c639981d83aa8aa132ca5634bdc99830212eec3eac74c856bd25274","src/front/mod.rs":"11a271a356909b706c1a8869f0fd7c9533a0624e90c5de989bc839e04cd95728","src/front/spv/convert.rs":"80cf575de903d69c158b067706bfcda85c3ada006516d01e6e3a0678396199e0","src/front/spv/error.rs":"22c1489517b56463090599072ac4cb4f7fee0f25340d6497b23b5d7afffbda08","src/front/spv/function.rs":"724b794f366c7e51420e5717d8263e4ccbad0a8a79cc84c4be453fb4538ef75f","src/front/spv/image.rs":"4932300bf1f879601e6bed45427ba4645400fb57d97f578b49edd4e0bb096b0e","src/front/spv/mod.rs":"b8592156f6cc2d698c970ccbb9aa9a13865455507382b9fdbc30006ea523cb03","src/front/spv/null.rs":"93e364cd1db48e7a6344e8c7a9aa14a18812352c9011c471f305b6657acf9b7c","src/front/type_gen.rs":"c9293aee27effe07a16977c28cf1109d0fdb3a3200db37039ad1793f308ba91e","src/front/wgsl/error.rs":"7bc6a39cf62e2bb7bf6f60ae38511b398a0223dee8ed839efd72852052242e59","src/front/wgsl/index.rs":"2b9a4929a46bd822d3ed6f9a150e24d437e5bdca8293eb748aebe80ce7e74153","src/front/wgsl/lower/construction.rs":"0bb1f32179ca150114aad23943181c0bc255ea616c91553088f0758680daaf5c","src/front/wgsl/lower/mod.rs":"d7d9ca557f5bfe8a1928eef0b2b86bd96eb0dba198a1b3f009377267f1c7850c","src/front/wgsl/mod.rs":"76d721a68369fdaf543397d7c2ef0fa48ed9eeeb1735825c28de8a13477a77a9","src/front/wgsl/parse/ast.rs":"5f86c50980015459505bfcc14f55e3c9d6704a02ed49679441d17fdc61364ef3","src/front/wgsl/parse/conv.rs":"25d7f636af48d361931f9cf8f46b70a07ff82391ebfc6ea434d097e417f314c5","src/front/wgsl/parse/lexer.rs":"15e77c8b45de0b19e75c09b4e3b991f092ccff1df4aadf5cb89f9ce131e97b20","src/front/wgsl/parse/mod.rs":"f875a97dd6fcbc0c4e88786b7a319c08f3d4348f907160984241efaad19281e9","src/front/wgsl/parse/number.rs":"64fe5d5f6b1d6a6acf15f4cad3cda8045ad5d82efda179627f848aba9c47e605","src/front/wgsl/tests.rs":"59b9b4ae86f2b0e2724783e0b0a1674399d03d3af54bc1d03165bc5986f44630","src/keywords/mod.rs":"0138f3931f8af0b0a05174549d0fd2152945b027dc3febefc1bbd676581d2e45","src/keywords/wgsl.rs":"7c3b364b60ca29cb8a68ef781de9ecd28b76b74bed18bf18a35d2ebffaa855ab","src/lib.rs":"3eb9212f6c2f6993fd7eca69aa9fdcf68b20bf66a23ec449b779b764c484f84f","src/proc/index.rs":"9f1be774ac7e47b899066116d292604faed0acae5751628869c7767b919b71aa","src/proc/layouter.rs":"a374b50ecfc1d22504afd0d0c9f4f0e2df3eba3652587dffcf56a5455285bd1a","src/proc/mod.rs":"ec544c74bfae821a6edcfc5b5ffd41b6b4e52e31ba9a04b3ad4d0be983f74b67","src/proc/namer.rs":"098fecb1c41fb32a56d3aa9887833cb07198d4ff4b9ff54e8be7df17cecccdf6","src/proc/terminator.rs":"87c2b316db75f9f4d197e6a2a9372dfd3311c5701785e98a787a4fa90f9d1d84","src/proc/typifier.rs":"2189e973df4c2a7129c10d6640dedfa07c632283b2247cc39846b24dcac4d6e2","src/span.rs":"67cb6293827f1909b0fce5400df8d39e19a377a16256c61f0a3301aa4248a4bd","src/valid/analyzer.rs":"421be289a79d31952ee05b5a15cfb74dc7abaa5e49d3c637bafaff5b39ca8485","src/valid/compose.rs":"6fcc752bd4241e37c8795dcd9e1ce077a3bd5509a36277dfbb756762d6415367","src/valid/expression.rs":"61e0238003144684ab7253a418031af48f7feb6f33edb82efa5332e870ca061a","src/valid/function.rs":"240b7b71e9ad08ee66c50d9350b38d00cfa8df9c45c0d44d0a5b556330ffbe00","src/valid/handles.rs":"1bf09cee6b9ec31dca338da3ef0fda81d4cee4950fca274f295056f5827dcbaf","src/valid/interface.rs":"c200628cf20b8236956e97fbd6a95a9a2eb8547be51bc83cc647b6996f0205b3","src/valid/mod.rs":"239c9139689240c1028c335411f4adc5897691ddf37ce217f63f567463a3ef0e","src/valid/type.rs":"3c22f4efb90c4e4d9adb5881aced462868af0736674181ee3c798f12220ec025"},"package":null} \ No newline at end of file +{"files":{".cargo/config.toml":"d7389d2a0c08ec72b79e83a3c76980903e3f9123625c32e69c798721193e2e74",".github/workflows/ci.yml":"697899467a40e31c57c4511b237cf4f471fc2d96dc6c9abd846bb55033b6f875",".github/workflows/lazy.yml":"3777882e946c4c60c497bc91d4caf82564d49be646391bc735039ee9a7857fa7",".github/workflows/validation-linux.yml":"133affe3d4b539846d5502af73d0ba1cab8ab6246222a072326324e01fd11216",".github/workflows/validation-macos.yml":"b0431e3abc99313587c4245757416850e54fdd71c15c1c646b192b82b95e3c14",".github/workflows/validation-windows.yml":"fce9dcaf24bfdb437f7202779d41fbf8bd68a0c2f10aabf0564255702a59f6f3","CHANGELOG.md":"58dbdd34df49249dd2d3cd52229483e2dd70cf60f9fa55301d97d7ad073f43ba","Cargo.toml":"8b678ff9b8f422a3b96b35b5f9e20e386aa6e7f9e53be4143d62fb98736712c0","LICENSE-APACHE":"c71d239df91726fc519c6eb72d318ec65820627232b2f796219e87dcf35d0ab4","LICENSE-MIT":"ca3be8518f5ef097669cea882643fc532025f29972def4fda49df885565a0480","README.md":"4bef75cd21fdd79a6743ec79c3db6839b9d0f88627310f288c45b15baa4fc5d6","benches/criterion.rs":"2356a6c8ea3579ad1d1fd98348b7181ad669880c120e3065711b47c3af5e6ecf","codecov.yml":"729a58d729345908460c9cba0ea95bda5f5173256c407a601d870c68476de9d9","src/arena.rs":"068989016d473ccc35f0c829dfec83d3fd7eb8da05b4b107eadc8dd108376937","src/back/dot/mod.rs":"688b0d18c16e6440c496a5156229186caeca06b4cab57bf2126c3823b5012c49","src/back/glsl/features.rs":"5de724dbfb630191fdbdd25674b379ab9408432a8faff9363618e9e4cdfa7f10","src/back/glsl/keywords.rs":"3f23b1e63e99a7056c3b223524d5d37000ef7316ae9df25532a726a1157d1dcd","src/back/glsl/mod.rs":"ea437b02300b480fc3bdd27fac3b7a314640348848adaa4cf1fd1fc50241ad67","src/back/hlsl/conv.rs":"f4ab12d3d5bacfc4d2ddc3f9e4145888a8c589a4f8bba30032055d6f06de1f3d","src/back/hlsl/help.rs":"b2556ce20fb481ddfb3ea47c03bbb8874588a7c01eb85813c4fc1831a89a051b","src/back/hlsl/keywords.rs":"e9d7771c48420b73ad109d23a68711a3307f12d4fb0d25da72f49b426610679a","src/back/hlsl/mod.rs":"ba17c2ee42a5943b96c4a2e2357fb795eccb2d885e0b37bfa654d16ba0c01584","src/back/hlsl/storage.rs":"3d3a17e7c28d7d9c3365413a849f010358c93f13317f5a475ec15036e0d42f6c","src/back/hlsl/writer.rs":"60c2525e13b7cb0042e7a615634f3cc7d1996eef21ce38078bd6cf1ea345aece","src/back/mod.rs":"46ff21541925b092bafa5b1b891d50879195aa97723ead5496e56ec3ef4823a1","src/back/msl/keywords.rs":"e1c2bf804b1a3a56639bedb9c1b3b78fbf920724ef65f91fe2e9fe476ee3fbbf","src/back/msl/mod.rs":"15f94e98fb3dfc743fc0c29e8bb54aed6d7290ffd772d439624ed1f98790711f","src/back/msl/sampler.rs":"210cf28143ac17d6c56a9adf4fc0e3b85b1b16bb8b94771f9304f2a43b7ce78e","src/back/msl/writer.rs":"4a10cd14cbbb9aed1a1de19d80ec1f20b8dd7234f08ac86c3cb254c7d6679a5e","src/back/spv/block.rs":"b2ba0dc59857d47b59bc179e2375ed658a9c28e1f0f362310bf2e95820fb6ee5","src/back/spv/helpers.rs":"a4e260130f39c7345decec40dadf1e94419c8f6d236ce7a53b5300aa72952a1b","src/back/spv/image.rs":"ef7eea543c2cdc5c76b8f3ed397dc08d527bc7890edf3e68b8933df6933c03fb","src/back/spv/index.rs":"7a9815bdb1f323a780d41092371582f6d4210f305bec5c74e882dbb6d2d49bc3","src/back/spv/instructions.rs":"88b3b8c6b2f9ebe1a9adcfeacbf7bac422077332cd902baf6678cf56eaf31d32","src/back/spv/layout.rs":"e263de53cd2f9a03ad94b82b434ce636609bc1ed435a2d1132951663bfaa8ebd","src/back/spv/mod.rs":"259f3a0070156d94907d0c104a1562f275c4f83f01e697a319e87481d19c5f39","src/back/spv/ray.rs":"fc33992ef61c4eeb19d213b218c31ef27bc73a7a23dde3fe9cf232a5bad0a878","src/back/spv/recyclable.rs":"9448eedf9766ae2b1916282b5945b218017c6067dceac2da1415580ee5796c73","src/back/spv/selection.rs":"81e404abfa0a977f7c1f76ccb37a78d13ccadbda229048dad53cc67687cc39db","src/back/spv/writer.rs":"30a4ee2e575f8cb11c59072344ebc4787fda78e95f7efc41037842e7d9da09da","src/back/wgsl/mod.rs":"2dd12bbea9ace835850192bb68c5760953da6bac6a636073d1eca19381c0c0b6","src/back/wgsl/writer.rs":"63cf0100f09e381013dbe702e2d2fddf036e74a3106719dc1b433706cf9afc75","src/block.rs":"89c061206f608365ef3ce03c0d205d7205ef9d892035dd3dfb8d252003159f14","src/front/glsl/ast.rs":"3644350edabd0f83c476873ec22715a4b1a12a6cb7c99c98d7e41a6173be4ca5","src/front/glsl/builtins.rs":"fe5d381ebe07dc13db6a8debaf0145138aa1b119d0ba2d7a4e08643f0014cec1","src/front/glsl/constants.rs":"0d91323dc50192f453f8453b088dd7911c7b3eab3e2041d98ff46bbef06e5f5a","src/front/glsl/context.rs":"57cd60f5f2099a4cc47b7e2a7ad368d2e1ee2af319a59415f8219405625b56e2","src/front/glsl/error.rs":"8617dc35fb3b595b897449076a26696c4556cfd77533c9f957feca48989a553c","src/front/glsl/functions.rs":"e1f427be8bd123b09574b1b1ba8adc0fd600c00c0ba81032a9e7bdd2df0343a1","src/front/glsl/lex.rs":"08736ae8beb955da5b0e6e3e0f45995a824995f7096d516a2910417e9c7afa32","src/front/glsl/mod.rs":"50e5687bc6dc04162fe3b0901691122bc26dc95283ace429b5f6b633073cd016","src/front/glsl/offset.rs":"f755eff29868c61452fb8c9f124a6af2070076b8166801968f86f6d105d38298","src/front/glsl/parser.rs":"1af2600fb6817cada8f68ad95f97f91dafa8225d4857b28d001194274b63557a","src/front/glsl/parser/declarations.rs":"1c1c31bca95d36e1b07044052fa7d565dfe3830af38c16ad1d6b1f01c80027ad","src/front/glsl/parser/expressions.rs":"c0b7c1d1e609de8230efde5e8d4c8c3f5f48be50042afc321cfeebfe6e6fbfe3","src/front/glsl/parser/functions.rs":"f604cdbdd07ba1b0d57f5b1bb47e2cae878be0d7f44a25007330088566f6d14f","src/front/glsl/parser/types.rs":"2360e9eaada6f10338e0b832350f2f3de3193575724b4d554f2f0e77f3128afd","src/front/glsl/parser_tests.rs":"c9c81bf985764584f92a08f2f8e1b029681b40fe89768181dec729e11aea6ba1","src/front/glsl/token.rs":"42325adbef5bfc9e6f9e40e7ed3cbfa7ef498c05799fdb7694b123e6450410e1","src/front/glsl/types.rs":"fc57723f04b6c1abd9a667d779be78cca2736805a5a77ae7735f2aa6eec985fc","src/front/glsl/variables.rs":"f9b1f0a72148250954e3c0396ab5d294526aec4018a87c9f448b0f5b130fcbac","src/front/interpolator.rs":"1f19f8634c639981d83aa8aa132ca5634bdc99830212eec3eac74c856bd25274","src/front/mod.rs":"11a271a356909b706c1a8869f0fd7c9533a0624e90c5de989bc839e04cd95728","src/front/spv/convert.rs":"80cf575de903d69c158b067706bfcda85c3ada006516d01e6e3a0678396199e0","src/front/spv/error.rs":"3129fd1fe346441d61d0f641734df7e919919db15df706788d16402ebf480607","src/front/spv/function.rs":"2e31e615c9653bdf8bf56bbeb84923f3e42c05cb2005e5802d2649ba50ad6bd2","src/front/spv/image.rs":"4129b9d1434b2ffa3c9257d99b78993d1b5ecca2880e516f050213023ca476e7","src/front/spv/mod.rs":"db8bcd743468881ba29ad4e4b522724e43bf636ff641549c0b007b6278bf5cda","src/front/spv/null.rs":"fa1772875821559274e1b53d38a02033ee4ee9b738a4466173d400f84319b8f5","src/front/type_gen.rs":"c9293aee27effe07a16977c28cf1109d0fdb3a3200db37039ad1793f308ba91e","src/front/wgsl/error.rs":"62eca11d86964b9c5bc2b04f07bcc110cf96cc80f86f52b9cf69ec5bd17f0ea4","src/front/wgsl/index.rs":"2b9a4929a46bd822d3ed6f9a150e24d437e5bdca8293eb748aebe80ce7e74153","src/front/wgsl/lower/construction.rs":"7683dd9eb8598bbdad99d48209c76d70537e72333ac1ae74c9b39802163cbc89","src/front/wgsl/lower/mod.rs":"3b815ae892b77367c0c349558ad3d68137f02f97a9cf527c33d14f348225c6a8","src/front/wgsl/mod.rs":"30616d1bbc469e8936766de8677f3ab26289f0e7c793a99e04f2060b5adfe573","src/front/wgsl/parse/ast.rs":"5f86c50980015459505bfcc14f55e3c9d6704a02ed49679441d17fdc61364ef3","src/front/wgsl/parse/conv.rs":"25d7f636af48d361931f9cf8f46b70a07ff82391ebfc6ea434d097e417f314c5","src/front/wgsl/parse/lexer.rs":"15e77c8b45de0b19e75c09b4e3b991f092ccff1df4aadf5cb89f9ce131e97b20","src/front/wgsl/parse/mod.rs":"9452d5f94c9718b9d8fae0cf73f3795bec5ada933e887b79e91af98c509dc5df","src/front/wgsl/parse/number.rs":"64fe5d5f6b1d6a6acf15f4cad3cda8045ad5d82efda179627f848aba9c47e605","src/front/wgsl/tests.rs":"59b9b4ae86f2b0e2724783e0b0a1674399d03d3af54bc1d03165bc5986f44630","src/keywords/mod.rs":"0138f3931f8af0b0a05174549d0fd2152945b027dc3febefc1bbd676581d2e45","src/keywords/wgsl.rs":"7c3b364b60ca29cb8a68ef781de9ecd28b76b74bed18bf18a35d2ebffaa855ab","src/lib.rs":"869e03df50f47c8e3d760bd16a5f3315e107352c353609b41d16c3088babd959","src/proc/index.rs":"a70745ef1be8a87c512ba047e30e0cb0a74008df59739b22460d8980e2ea1065","src/proc/layouter.rs":"0512547d57920c83b2dfd62595aacf8f437fb5b07e40e790e2dca19c71f5eecc","src/proc/mod.rs":"56560cc61addc3969fb368aafc3beb00a365755e344ab8e8948e4e9954235347","src/proc/namer.rs":"9a5c95d5e57ab5397d52eb1b091d4f3c8b818420d3b0729b31ae331392b8b648","src/proc/terminator.rs":"13c59bf00f5b26171d971effc421091f5e00dedddd246c2daa44fe65aeda060a","src/proc/typifier.rs":"8f12db5631c32c87b07ec28baf79b06650559ba0a3d8f335bf5e125518db83e5","src/span.rs":"67cb6293827f1909b0fce5400df8d39e19a377a16256c61f0a3301aa4248a4bd","src/valid/analyzer.rs":"ab7c3f3cc4eafa3e9c50f57f1cd57b1fb713a39704b20df7b512bd32dc5c2e65","src/valid/compose.rs":"eebaa461a3a76526361eb5b16dfae42384dfdfde5afa9b1ddca266a2149c68ad","src/valid/expression.rs":"fd6f4fd4a737a1b4bb9590e32307efd9384be3acfae302ea9827a92b93a3e1ff","src/valid/function.rs":"820355bf1e6c3014ae43907e6bd91544c7d56135b13653a8a899bed6ca2fc9f4","src/valid/handles.rs":"35ba7816c5f3864731b08a56dcb00e95ce284845e655e69a5c12b63c923da67a","src/valid/interface.rs":"a92177f34cd4bafb5a8d80c0aef84715b75a59e44e5c31ffb30ddd429c20ec60","src/valid/mod.rs":"c61cd62478e15b678a32118cbbcc92f29f5fd15038c11b07a001977079349a0d","src/valid/type.rs":"8dc7b9833a59e029f6bdb46d879d03be90fedcddc4c37d8d2a6915d990d58070"},"package":null} \ No newline at end of file diff --git a/third_party/rust/naga/.cargo/config.toml b/third_party/rust/naga/.cargo/config.toml new file mode 100644 index 000000000000..4b0140061716 --- /dev/null +++ b/third_party/rust/naga/.cargo/config.toml @@ -0,0 +1,2 @@ +[alias] +xtask = "run --manifest-path xtask/Cargo.toml --" diff --git a/third_party/rust/naga/.github/workflows/validation-linux.yml b/third_party/rust/naga/.github/workflows/validation-linux.yml index e253e0a42ea3..0c1525e7dbac 100644 --- a/third_party/rust/naga/.github/workflows/validation-linux.yml +++ b/third_party/rust/naga/.github/workflows/validation-linux.yml @@ -8,6 +8,7 @@ on: - 'tests/out/dot/*.dot' - 'tests/out/wgsl/*.wgsl' - 'src/front/wgsl/*' + - 'xtask/**' jobs: validate-linux: @@ -19,10 +20,15 @@ jobs: - name: Install tools run: sudo apt-get install spirv-tools glslang-tools graphviz - - run: make validate-spv + - uses: Swatinem/rust-cache@v2 + with: + workspaces: | + xtask -> target - - run: make validate-glsl + - run: cargo xtask validate spv - - run: make validate-dot + - run: cargo xtask validate glsl - - run: make validate-wgsl + - run: cargo xtask validate dot + + - run: cargo xtask validate wgsl diff --git a/third_party/rust/naga/.github/workflows/validation-macos.yml b/third_party/rust/naga/.github/workflows/validation-macos.yml index 535a0622b3fd..c06563dc12a1 100644 --- a/third_party/rust/naga/.github/workflows/validation-macos.yml +++ b/third_party/rust/naga/.github/workflows/validation-macos.yml @@ -4,6 +4,7 @@ on: paths: - '.github/workflows/validation-macos.yml' - 'tests/out/msl/*.msl' + - 'xtask/**' jobs: validate-macos: @@ -12,4 +13,9 @@ jobs: steps: - uses: actions/checkout@v3 - - run: make validate-msl + - uses: Swatinem/rust-cache@v2 + with: + workspaces: | + xtask -> target + + - run: cargo xtask validate msl diff --git a/third_party/rust/naga/.github/workflows/validation-windows.yml b/third_party/rust/naga/.github/workflows/validation-windows.yml index 03942a6782d8..d84b44f62e16 100644 --- a/third_party/rust/naga/.github/workflows/validation-windows.yml +++ b/third_party/rust/naga/.github/workflows/validation-windows.yml @@ -4,6 +4,8 @@ on: paths: - '.github/workflows/validation-windows.yml' - 'tests/out/hlsl/*.hlsl' + - 'tests/out/hlsl/*.ron' + - 'xtask/**' jobs: validate-windows-dxc: @@ -15,8 +17,12 @@ jobs: - name: Add DirectXShaderCompiler uses: napokue/setup-dxc@v1.1.0 - - run: make validate-hlsl-dxc - shell: sh + - uses: Swatinem/rust-cache@v2 + with: + workspaces: | + xtask -> target + + - run: cargo xtask validate hlsl dxc validate-windows-fxc: name: HLSL via FXC @@ -33,5 +39,9 @@ jobs: | Out-File -FilePath $Env:GITHUB_PATH -Encoding utf8 -Append shell: powershell - - run: make validate-hlsl-fxc - shell: sh + - uses: Swatinem/rust-cache@v2 + with: + workspaces: | + xtask -> target + + - run: cargo xtask validate hlsl fxc diff --git a/third_party/rust/naga/CHANGELOG.md b/third_party/rust/naga/CHANGELOG.md index 51f0231a90bc..376bbf8989e5 100644 --- a/third_party/rust/naga/CHANGELOG.md +++ b/third_party/rust/naga/CHANGELOG.md @@ -1,5 +1,76 @@ # Change Log +## v0.12 (2023-04-19) + +#### GENERAL + +- Allow `array_index` to be unsigned. ([#2298](https://github.com/gfx-rs/naga/pull/2298)) **@daxpedda** +- Add ray query support. ([#2256](https://github.com/gfx-rs/naga/pull/2256)) **@kvark** +- Add partial derivative builtins. ([#2277](https://github.com/gfx-rs/naga/pull/2277)) **@evahop** +- Skip `gl_PerVertex` unused builtins in the SPIR-V frontend. ([#2272](https://github.com/gfx-rs/naga/pull/2272)) **@teoxoy** +- Differentiate between `i32` and `u32` in switch statement cases. ([#2269](https://github.com/gfx-rs/naga/pull/2269)) **@evahop** +- Fix zero initialization of workgroup memory. ([#2259](https://github.com/gfx-rs/naga/pull/2259)) **@teoxoy** +- Add `countTrailingZeros`. ([#2243](https://github.com/gfx-rs/naga/pull/2243)) **@gents83** +- Fix texture built-ins where u32 was expected. ([#2245](https://github.com/gfx-rs/naga/pull/2245)) **@evahop** +- Add `countLeadingZeros`. ([#2226](https://github.com/gfx-rs/naga/pull/2226)) **@evahop** +- [glsl/hlsl-out] Write sizes of arrays behind pointers in function arguments. ([#2250](https://github.com/gfx-rs/naga/pull/2250)) **@pluiedev** + +#### VALIDATOR + +- Validate vertex stage returns the position built-in. ([#2264](https://github.com/gfx-rs/naga/pull/2264)) **@teoxoy** +- Enforce discard is only used in the fragment stage. ([#2262](https://github.com/gfx-rs/naga/pull/2262)) **@Uriopass** +- Add `Capabilities::MULTISAMPLED_SHADING`. ([#2255](https://github.com/gfx-rs/naga/pull/2255)) **@teoxoy** +- Add `Capabilities::EARLY_DEPTH_TEST`. ([#2255](https://github.com/gfx-rs/naga/pull/2255)) **@teoxoy** +- Add `Capabilities::MULTIVIEW`. ([#2255](https://github.com/gfx-rs/naga/pull/2255)) **@teoxoy** +- Improve forward declaration validation. ([#2232](https://github.com/gfx-rs/naga/pull/2232)) **@JCapucho** + +#### WGSL-IN + +- Use `alias` instead of `type` for type aliases. ([#2299](https://github.com/gfx-rs/naga/pull/2299)) **@FL33TW00D** +- Add predeclared vector and matrix type aliases. ([#2251](https://github.com/gfx-rs/naga/pull/2251)) **@evahop** +- Improve invalid assignment diagnostic. ([#2233](https://github.com/gfx-rs/naga/pull/2233)) **@SparkyPotato** +- Expect semicolons wherever required. ([#2233](https://github.com/gfx-rs/naga/pull/2233)) **@SparkyPotato** +- Fix panic on invalid zero array size. ([#2233](https://github.com/gfx-rs/naga/pull/2233)) **@SparkyPotato** +- Check for leading `{` while parsing a block. ([#2233](https://github.com/gfx-rs/naga/pull/2233)) **@SparkyPotato** + +#### SPV-IN + +- Don't apply interpolation to fragment shaders outputs. ([#2239](https://github.com/gfx-rs/naga/pull/2239)) **@JCapucho** + +#### GLSL-IN + +- Add switch implicit type conversion. ([#2273](https://github.com/gfx-rs/naga/pull/2273)) **@evahop** +- Document some fields of `naga::front::glsl::context::Context`. ([#2244](https://github.com/gfx-rs/naga/pull/2244)) **@jimblandy** +- Perform output parameters implicit casts. ([#2063](https://github.com/gfx-rs/naga/pull/2063)) **@JCapucho** +- Add `not` vector relational builtin. ([#2227](https://github.com/gfx-rs/naga/pull/2227)) **@JCapucho** +- Add double overloads for relational vector builtins. ([#2227](https://github.com/gfx-rs/naga/pull/2227)) **@JCapucho** +- Add bool overloads for relational vector builtins. ([#2227](https://github.com/gfx-rs/naga/pull/2227)) **@JCapucho** + +#### SPV-OUT + +- Fix invalid spirv being generated from integer dot products. ([#2291](https://github.com/gfx-rs/naga/pull/2291)) **@PyryM** +- Fix adding illegal decorators on fragment outputs. ([#2286](https://github.com/gfx-rs/naga/pull/2286)) **@Wumpf** +- Fix `countLeadingZeros` impl. ([#2258](https://github.com/gfx-rs/naga/pull/2258)) **@teoxoy** +- Cache constant composites. ([#2257](https://github.com/gfx-rs/naga/pull/2257)) **@evahop** +- Support SPIR-V version 1.4. ([#2230](https://github.com/gfx-rs/naga/pull/2230)) **@kvark** + +#### MSL-OUT + +- Replace `per_stage_map` with `per_entry_point_map` ([#2237](https://github.com/gfx-rs/naga/pull/2237)) **@armansito** +- Update `firstLeadingBit` for signed integers ([#2235](https://github.com/gfx-rs/naga/pull/2235)) **@evahop** + +#### HLSL-OUT + +- Use `Interlocked` intrinsic for atomic integers (#2294) ([#2294](https://github.com/gfx-rs/naga/pull/2294)) **@ErichDonGubler** +- Document storage access generation. ([#2295](https://github.com/gfx-rs/naga/pull/2295)) **@jimblandy** +- Emit constructor functions for arrays. ([#2281](https://github.com/gfx-rs/naga/pull/2281)) **@ErichDonGubler** +- Clear `named_expressions` inserted by duplicated blocks. ([#2116](https://github.com/gfx-rs/naga/pull/2116)) **@teoxoy** + +#### GLSL-OUT + +- Skip `invariant` for `gl_FragCoord` on WebGL2. ([#2254](https://github.com/gfx-rs/naga/pull/2254)) **@grovesNL** +- Inject default `gl_PointSize = 1.0` in vertex shaders if `FORCE_POINT_SIZE` option was set. ([#2223](https://github.com/gfx-rs/naga/pull/2223)) **@REASY** + ## v0.11 (2023-01-25) - Move to the Rust 2021 edition ([#2085](https://github.com/gfx-rs/naga/pull/2085)) **@ErichDonGubler** diff --git a/third_party/rust/naga/Cargo.toml b/third_party/rust/naga/Cargo.toml index a5cf96609de0..b4c9fbc4aecd 100644 --- a/third_party/rust/naga/Cargo.toml +++ b/third_party/rust/naga/Cargo.toml @@ -49,7 +49,7 @@ harness = false [dependencies] bit-set = "0.5" -bitflags = "1.0.5" +bitflags = "2" log = "0.4" num-traits = "0.2" rustc-hash = "1.1.0" @@ -107,6 +107,9 @@ ron = "~0.7.1" version = "0.3" features = [] +[dev-dependencies.hlsl-snapshots] +path = "./xtask/hlsl-snapshots" + [dev-dependencies.rspirv] version = "0.11" git = "https://github.com/gfx-rs/rspirv" @@ -123,12 +126,14 @@ features = ["deserialize"] [features] arbitrary = [ "dep:arbitrary", + "bitflags/arbitrary", "indexmap/arbitrary", ] clone = [] default = [] deserialize = [ "serde", + "bitflags/serde", "indexmap/serde-1", ] dot-out = [] @@ -138,6 +143,7 @@ hlsl-out = [] msl-out = [] serialize = [ "serde", + "bitflags/serde", "indexmap/serde-1", ] span = [ diff --git a/third_party/rust/naga/Makefile b/third_party/rust/naga/Makefile deleted file mode 100644 index 744d8a2254e0..000000000000 --- a/third_party/rust/naga/Makefile +++ /dev/null @@ -1,131 +0,0 @@ -.PHONY: all clean validate-spv validate-msl validate-glsl validate-dot validate-wgsl validate-hlsl-dxc validate-hlsl-fxc -.SECONDARY: boids.metal quad.metal -SNAPSHOTS_BASE_IN=tests/in -SNAPSHOTS_BASE_OUT=tests/out - -all: - cargo fmt - cargo test --all-features --workspace - cargo clippy --all-features --workspace -- -D warnings - -clean: - rm *.metal *.air *.metallib *.vert *.frag *.comp *.spv - -bench: - #rm -Rf target/criterion - cargo bench - -%.metal: $(SNAPSHOTS_BASE_IN)/%.wgsl $(wildcard src/*.rs src/**/*.rs examples/*.rs) - cargo run --features wgsl-in,msl-out -- $< $@ - -%.air: %.metal - xcrun -sdk macosx metal -c $< -mmacosx-version-min=10.11 - -%.metallib: %.air - xcrun -sdk macosx metallib $< -o $@ - -%.dot: $(SNAPSHOTS_BASE_IN)/%.wgsl $(wildcard src/*.rs src/front/wgsl/*.rs src/back/dot/*.rs bin/naga.rs) - cargo run --features wgsl-in,dot-out -- $< $@ - -%.png: %.dot - dot -Tpng $< -o $@ - -validate-spv: $(SNAPSHOTS_BASE_OUT)/spv/*.spvasm - @set -e && for file in $^ ; do \ - echo "Validating" $${file#"$(SNAPSHOTS_BASE_OUT)/"}; \ - version_line=$$(head -2 $${file} | tail -1); \ - version=$${version_line#"; Version: "};\ - cat $${file} | spirv-as --target-env spv$${version} -o - | spirv-val; \ - done - -validate-msl: $(SNAPSHOTS_BASE_OUT)/msl/*.msl - @set -e && for file in $^ ; do \ - echo "Validating" $${file#"$(SNAPSHOTS_BASE_OUT)/"}; \ - header=$$(head -n1 $${file}); \ - cat $${file} | xcrun -sdk macosx metal -mmacosx-version-min=10.11 -std=macos-$${header:13:8} -x metal - -o /dev/null; \ - done - -validate-glsl: $(SNAPSHOTS_BASE_OUT)/glsl/*.glsl - @set -e && for file in $(SNAPSHOTS_BASE_OUT)/glsl/*.Vertex.glsl ; do \ - echo "Validating" $${file#"$(SNAPSHOTS_BASE_OUT)/"};\ - cat $${file} | glslangValidator --stdin -S vert; \ - done - @set -e && for file in $(SNAPSHOTS_BASE_OUT)/glsl/*.Fragment.glsl ; do \ - echo "Validating" $${file#"$(SNAPSHOTS_BASE_OUT)/"};\ - cat $${file} | glslangValidator --stdin -S frag; \ - done - @set -e && for file in $(SNAPSHOTS_BASE_OUT)/glsl/*.Compute.glsl ; do \ - echo "Validating" $${file#"$(SNAPSHOTS_BASE_OUT)/"};\ - cat $${file} | glslangValidator --stdin -S comp; \ - done - -validate-dot: $(SNAPSHOTS_BASE_OUT)/dot/*.dot - @set -e && for file in $^ ; do \ - echo "Validating" $${file#"$(SNAPSHOTS_BASE_OUT)/"}; \ - cat $${file} | dot -o /dev/null; \ - done - -validate-wgsl: $(SNAPSHOTS_BASE_OUT)/wgsl/*.wgsl - @set -e && for file in $^ ; do \ - echo "Validating" $${file#"$(SNAPSHOTS_BASE_OUT)/"}; \ - cargo run $${file}; \ - done - -validate-hlsl-dxc: SHELL:=/usr/bin/env bash # required because config files uses arrays -validate-hlsl-dxc: $(SNAPSHOTS_BASE_OUT)/hlsl/*.hlsl - @set -e && for file in $^ ; do \ - DXC_PARAMS="-Wno-parentheses-equality -Zi -Qembed_debug -Od"; \ - echo "Validating" $${file#"$(SNAPSHOTS_BASE_OUT)/"}; \ - config="$$(dirname $${file})/$$(basename $${file}).config"; \ - . $${config}; \ - for (( i=0; i<$${#vertex[@]}; i++ )); do \ - name=`echo $${vertex[i]} | cut -d \: -f 1`; \ - profile=`echo $${vertex[i]} | cut -d \: -f 2`; \ - (set -x; dxc $${file} -T $${profile} -E $${name} $${DXC_PARAMS} > /dev/null); \ - done; \ - for (( i=0; i<$${#fragment[@]}; i++ )); do \ - name=`echo $${fragment[i]} | cut -d \: -f 1`; \ - profile=`echo $${fragment[i]} | cut -d \: -f 2`; \ - (set -x; dxc $${file} -T $${profile} -E $${name} $${DXC_PARAMS} > /dev/null); \ - done; \ - for (( i=0; i<$${#compute[@]}; i++ )); do \ - name=`echo $${compute[i]} | cut -d \: -f 1`; \ - profile=`echo $${compute[i]} | cut -d \: -f 2`; \ - (set -x; dxc $${file} -T $${profile} -E $${name} $${DXC_PARAMS} > /dev/null); \ - done; \ - echo "======================"; \ - done - -validate-hlsl-fxc: SHELL:=/usr/bin/env bash # required because config files uses arrays -validate-hlsl-fxc: $(SNAPSHOTS_BASE_OUT)/hlsl/*.hlsl - @set -e && for file in $^ ; do \ - FXC_PARAMS="-Zi -Od"; \ - echo "Validating" $${file#"$(SNAPSHOTS_BASE_OUT)/"}; \ - config="$$(dirname $${file})/$$(basename $${file}).config"; \ - . $${config}; \ - for (( i=0; i<$${#vertex[@]}; i++ )); do \ - name=`echo $${vertex[i]} | cut -d \: -f 1`; \ - profile=`echo $${vertex[i]} | cut -d \: -f 2`; \ - sm=`echo $${profile} | cut -d \_ -f 2`; \ - if (( sm < 6 )); then \ - (set -x; fxc $${file} -T $${profile} -E $${name} $${FXC_PARAMS} > /dev/null); \ - fi \ - done; \ - for (( i=0; i<$${#fragment[@]}; i++ )); do \ - name=`echo $${fragment[i]} | cut -d \: -f 1`; \ - profile=`echo $${fragment[i]} | cut -d \: -f 2`; \ - sm=`echo $${profile} | cut -d \_ -f 2`; \ - if (( sm < 6 )); then \ - (set -x; fxc $${file} -T $${profile} -E $${name} $${FXC_PARAMS} > /dev/null); \ - fi \ - done; \ - for (( i=0; i<$${#compute[@]}; i++ )); do \ - name=`echo $${compute[i]} | cut -d \: -f 1`; \ - profile=`echo $${compute[i]} | cut -d \: -f 2`; \ - sm=`echo $${profile} | cut -d \_ -f 2`; \ - if (( sm < 6 )); then \ - (set -x; fxc $${file} -T $${profile} -E $${name} $${FXC_PARAMS} > /dev/null); \ - fi \ - done; \ - echo "======================"; \ - done diff --git a/third_party/rust/naga/src/back/dot/mod.rs b/third_party/rust/naga/src/back/dot/mod.rs index 1eebbee06769..85f382d26e36 100644 --- a/third_party/rust/naga/src/back/dot/mod.rs +++ b/third_party/rust/naga/src/back/dot/mod.rs @@ -252,6 +252,11 @@ impl StatementGraph { } "Atomic" } + S::WorkGroupUniformLoad { pointer, result } => { + self.emits.push((id, result)); + self.dependencies.push((id, pointer, "pointer")); + "WorkGroupUniformLoad" + } S::RayQuery { query, ref fun } => { self.dependencies.push((id, query, "query")); match *fun { @@ -397,6 +402,9 @@ fn write_function_expressions( for (handle, expression) in fun.expressions.iter() { use crate::Expression as E; let (label, color_id) = match *expression { + E::Literal(_) => ("Literal".into(), 2), + E::Constant(_) => ("Constant".into(), 2), + E::ZeroValue(_) => ("ZeroValue".into(), 2), E::Access { base, index } => { edges.insert("base", base); edges.insert("index", index); @@ -406,7 +414,6 @@ fn write_function_expressions( edges.insert("base", base); (format!("AccessIndex[{index}]").into(), 1) } - E::Constant(_) => ("Constant".into(), 2), E::Splat { size, value } => { edges.insert("value", value); (format!("Splat{size:?}").into(), 3) @@ -568,6 +575,7 @@ fn write_function_expressions( } E::CallResult(_function) => ("CallResult".into(), 4), E::AtomicResult { .. } => ("AtomicResult".into(), 4), + E::WorkGroupUniformLoadResult { .. } => ("WorkGroupUniformLoadResult".into(), 4), E::ArrayLength(expr) => { edges.insert("", expr); ("ArrayLength".into(), 7) diff --git a/third_party/rust/naga/src/back/glsl/features.rs b/third_party/rust/naga/src/back/glsl/features.rs index 8392969b56e2..c175d9a23cb5 100644 --- a/third_party/rust/naga/src/back/glsl/features.rs +++ b/third_party/rust/naga/src/back/glsl/features.rs @@ -7,6 +7,7 @@ use std::fmt::Write; bitflags::bitflags! { /// Structure used to encode additions to GLSL that aren't supported by all versions. + #[derive(Clone, Copy, Debug, Eq, PartialEq)] pub struct Features: u32 { /// Buffer address space support. const BUFFER_STORAGE = 1; diff --git a/third_party/rust/naga/src/back/glsl/mod.rs b/third_party/rust/naga/src/back/glsl/mod.rs index 9195b968370d..d5fc89bcaac1 100644 --- a/third_party/rust/naga/src/back/glsl/mod.rs +++ b/third_party/rust/naga/src/back/glsl/mod.rs @@ -216,6 +216,7 @@ bitflags::bitflags! { /// Configuration flags for the [`Writer`]. #[cfg_attr(feature = "serialize", derive(serde::Serialize))] #[cfg_attr(feature = "deserialize", derive(serde::Deserialize))] + #[derive(Clone, Copy, Debug, Eq, PartialEq)] pub struct WriterFlags: u32 { /// Flip output Y and extend Z from (0, 1) to (-1, 1). const ADJUST_COORDINATE_SPACE = 0x1; @@ -484,7 +485,14 @@ impl<'a, W: Write> Writer<'a, W> { // Generate a map with names required to write the module let mut names = crate::FastHashMap::default(); let mut namer = proc::Namer::default(); - namer.reset(module, keywords::RESERVED_KEYWORDS, &["gl_"], &mut names); + namer.reset( + module, + keywords::RESERVED_KEYWORDS, + &[], + &[], + &["gl_"], + &mut names, + ); // Build the instance let mut this = Self { @@ -785,20 +793,9 @@ impl<'a, W: Write> Writer<'a, W> { // Write the array size // Writes nothing if `ArraySize::Dynamic` - // Panics if `ArraySize::Constant` has a constant that isn't an sint or uint match size { - crate::ArraySize::Constant(const_handle) => { - match self.module.constants[const_handle].inner { - crate::ConstantInner::Scalar { - width: _, - value: crate::ScalarValue::Uint(size), - } => write!(self.out, "{size}")?, - crate::ConstantInner::Scalar { - width: _, - value: crate::ScalarValue::Sint(size), - } => write!(self.out, "{size}")?, - _ => unreachable!(), - } + crate::ArraySize::Constant(size) => { + write!(self.out, "{size}")?; } crate::ArraySize::Dynamic => (), } @@ -1806,8 +1803,6 @@ impl<'a, W: Write> Writer<'a, W> { Some(self.namer.call(name)) } else if self.need_bake_expressions.contains(&handle) { Some(format!("{}{}", back::BAKE_PREFIX, handle.index())) - } else if info.ref_count == 0 { - Some(self.namer.call("")) } else { None }; @@ -1835,7 +1830,7 @@ impl<'a, W: Write> Writer<'a, W> { if let Some(name) = expr_name { write!(self.out, "{level}")?; - self.write_named_expr(handle, name, ctx)?; + self.write_named_expr(handle, name, handle, ctx)?; } } } @@ -2125,6 +2120,19 @@ impl<'a, W: Write> Writer<'a, W> { self.write_expr(value, ctx)?; writeln!(self.out, ";")? } + Statement::WorkGroupUniformLoad { pointer, result } => { + // GLSL doesn't have pointers, which means that this backend needs to ensure that + // the actual "loading" is happening between the two barriers. + // This is done in `Emit` by never emitting a variable name for pointer variables + self.write_barrier(crate::Barrier::WORK_GROUP, level)?; + + let result_name = format!("{}{}", back::BAKE_PREFIX, result.index()); + write!(self.out, "{level}")?; + // Expressions cannot have side effects, so just writing the expression here is fine. + self.write_named_expr(pointer, result_name, result, ctx)?; + + self.write_barrier(crate::Barrier::WORK_GROUP, level)?; + } // Stores a value into an image. Statement::ImageStore { image, @@ -2267,6 +2275,24 @@ impl<'a, W: Write> Writer<'a, W> { } // Constants are delegated to `write_constant` Expression::Constant(constant) => self.write_constant(constant)?, + Expression::ZeroValue(ty) => { + self.write_zero_init_value(ty)?; + } + Expression::Literal(literal) => { + match literal { + // Floats are written using `Debug` instead of `Display` because it always appends the + // decimal part even it's zero which is needed for a valid glsl float constant + crate::Literal::F64(value) => write!(self.out, "{:?}LF", value)?, + crate::Literal::F32(value) => write!(self.out, "{:?}", value)?, + // Unsigned integers need a `u` at the end + // + // While `core` doesn't necessarily need it, it's allowed and since `es` needs it we + // always write it as the extra branch wouldn't have any benefit in readability + crate::Literal::U32(value) => write!(self.out, "{}u", value)?, + crate::Literal::I32(value) => write!(self.out, "{}", value)?, + crate::Literal::Bool(value) => write!(self.out, "{}", value)?, + } + } // `Splat` needs to actually write down a vector, it's not always inferred in GLSL. Expression::Splat { size: _, value } => { let resolved = ctx.info[expr].ty.inner_with(&self.module.types); @@ -3282,7 +3308,8 @@ impl<'a, W: Write> Writer<'a, W> { // These expressions never show up in `Emit`. Expression::CallResult(_) | Expression::AtomicResult { .. } - | Expression::RayQueryProceedResult => unreachable!(), + | Expression::RayQueryProceedResult + | Expression::WorkGroupUniformLoadResult { .. } => unreachable!(), // `ArrayLength` is written as `expr.length()` and we convert it to a uint Expression::ArrayLength(expr) => { write!(self.out, "uint(")?; @@ -3718,9 +3745,12 @@ impl<'a, W: Write> Writer<'a, W> { &mut self, handle: Handle, name: String, + // The expression which is being named. + // Generally, this is the same as handle, except in WorkGroupUniformLoad + named: Handle, ctx: &back::FunctionCtx, ) -> BackendResult { - match ctx.info[handle].ty { + match ctx.info[named].ty { proc::TypeResolution::Handle(ty_handle) => match self.module.types[ty_handle].inner { TypeInner::Struct { .. } => { let ty_name = &self.names[&NameKey::Type(ty_handle)]; @@ -3735,7 +3765,7 @@ impl<'a, W: Write> Writer<'a, W> { } } - let base_ty_res = &ctx.info[handle].ty; + let base_ty_res = &ctx.info[named].ty; let resolved = base_ty_res.inner_with(&self.module.types); write!(self.out, " {name}")?; @@ -3745,7 +3775,7 @@ impl<'a, W: Write> Writer<'a, W> { write!(self.out, " = ")?; self.write_expr(handle, ctx)?; writeln!(self.out, ";")?; - self.named_expressions.insert(handle, name); + self.named_expressions.insert(named, name); Ok(()) } diff --git a/third_party/rust/naga/src/back/hlsl/conv.rs b/third_party/rust/naga/src/back/hlsl/conv.rs index 5eb24962f6fc..aed2dc0ed0f3 100644 --- a/third_party/rust/naga/src/back/hlsl/conv.rs +++ b/third_party/rust/naga/src/back/hlsl/conv.rs @@ -40,11 +40,7 @@ impl crate::TypeInner { } } - pub(super) fn size_hlsl( - &self, - types: &crate::UniqueArena, - constants: &crate::Arena, - ) -> u32 { + pub(super) fn size_hlsl(&self, gctx: crate::proc::GlobalCtx) -> u32 { match *self { Self::Matrix { columns, @@ -57,27 +53,24 @@ impl crate::TypeInner { } Self::Array { base, size, stride } => { let count = match size { - crate::ArraySize::Constant(handle) => { - constants[handle].to_array_length().unwrap_or(1) - } + crate::ArraySize::Constant(size) => size.get(), // A dynamically-sized array has to have at least one element crate::ArraySize::Dynamic => 1, }; - let last_el_size = types[base].inner.size_hlsl(types, constants); + let last_el_size = gctx.types[base].inner.size_hlsl(gctx); ((count - 1) * stride) + last_el_size } - _ => self.size(constants), + _ => self.size(gctx), } } /// Used to generate the name of the wrapped type constructor pub(super) fn hlsl_type_id<'a>( base: crate::Handle, - types: &crate::UniqueArena, - constants: &crate::Arena, + gctx: crate::proc::GlobalCtx, names: &'a crate::FastHashMap, ) -> Result, Error> { - Ok(match types[base].inner { + Ok(match gctx.types[base].inner { crate::TypeInner::Scalar { kind, width } => Cow::Borrowed(kind.to_hlsl_str(width)?), crate::TypeInner::Vector { size, kind, width } => Cow::Owned(format!( "{}{}", @@ -99,9 +92,8 @@ impl crate::TypeInner { size: crate::ArraySize::Constant(size), .. } => Cow::Owned(format!( - "array{}_{}_", - constants[size].to_array_length().unwrap(), - Self::hlsl_type_id(base, types, constants, names)? + "array{size}_{}_", + Self::hlsl_type_id(base, gctx, names)? )), crate::TypeInner::Struct { .. } => { Cow::Borrowed(&names[&crate::proc::NameKey::Type(base)]) diff --git a/third_party/rust/naga/src/back/hlsl/help.rs b/third_party/rust/naga/src/back/hlsl/help.rs index 8ae9baf62dee..c59816550c2d 100644 --- a/third_party/rust/naga/src/back/hlsl/help.rs +++ b/third_party/rust/naga/src/back/hlsl/help.rs @@ -347,12 +347,7 @@ impl<'a, W: Write> super::Writer<'a, W> { module: &crate::Module, constructor: WrappedConstructor, ) -> BackendResult { - let name = crate::TypeInner::hlsl_type_id( - constructor.ty, - &module.types, - &module.constants, - &self.names, - )?; + let name = crate::TypeInner::hlsl_type_id(constructor.ty, module.to_ctx(), &self.names)?; write!(self.out, "Construct{name}")?; Ok(()) } @@ -411,8 +406,7 @@ impl<'a, W: Write> super::Writer<'a, W> { size: crate::ArraySize::Constant(size), .. } => { - let count = module.constants[size].to_array_length().unwrap(); - for i in 0..count as usize { + for i in 0..size.get() as usize { write_arg(i, base)?; } } @@ -486,8 +480,7 @@ impl<'a, W: Write> super::Writer<'a, W> { write!(self.out, " {RETURN_VARIABLE_NAME}")?; self.write_array_size(module, base, crate::ArraySize::Constant(size))?; write!(self.out, " = {{ ")?; - let count = module.constants[size].to_array_length().unwrap(); - for i in 0..count { + for i in 0..size.get() { if i != 0 { write!(self.out, ", ")?; } diff --git a/third_party/rust/naga/src/back/hlsl/keywords.rs b/third_party/rust/naga/src/back/hlsl/keywords.rs index 7519b767a10e..81b797bbf5ad 100644 --- a/third_party/rust/naga/src/back/hlsl/keywords.rs +++ b/third_party/rust/naga/src/back/hlsl/keywords.rs @@ -1,10 +1,20 @@ -/*! -HLSL Reserved Words -- -- -*/ +// When compiling with FXC without strict mode, these keywords are actually case insensitive. +// If you compile with strict mode and specify a different casing like "Pass" instead in an identifier, FXC will give this error: +// "error X3086: alternate cases for 'pass' are deprecated in strict mode" +// This behavior is not documented anywhere, but as far as I can tell this is the full list. +pub const RESERVED_CASE_INSENSITIVE: &[&str] = &[ + "asm", + "decl", + "pass", + "technique", + "Texture1D", + "Texture2D", + "Texture3D", + "TextureCube", +]; pub const RESERVED: &[&str] = &[ + // FXC keywords, from https://github.com/MicrosoftDocs/win32/blob/c885cb0c63b0e9be80c6a0e6512473ac6f4e771e/desktop-src/direct3dhlsl/dx-graphics-hlsl-appendix-keywords.md?plain=1#L99-L118 "AppendStructuredBuffer", "asm", "asm_fragment", @@ -129,6 +139,7 @@ pub const RESERVED: &[&str] = &[ "void", "volatile", "while", + // FXC reserved keywords, from https://github.com/MicrosoftDocs/win32/blob/c885cb0c63b0e9be80c6a0e6512473ac6f4e771e/desktop-src/direct3dhlsl/dx-graphics-hlsl-appendix-reserved-words.md?plain=1#L19-L38 "auto", "case", "catch", @@ -163,4 +174,728 @@ pub const RESERVED: &[&str] = &[ "unsigned", "using", "virtual", + // FXC intrinsics, from https://github.com/MicrosoftDocs/win32/blob/1682b99e203708f6f5eda972d966e30f3c1588de/desktop-src/direct3dhlsl/dx-graphics-hlsl-intrinsic-functions.md?plain=1#L26-L165 + "abort", + "abs", + "acos", + "all", + "AllMemoryBarrier", + "AllMemoryBarrierWithGroupSync", + "any", + "asdouble", + "asfloat", + "asin", + "asint", + "asuint", + "atan", + "atan2", + "ceil", + "CheckAccessFullyMapped", + "clamp", + "clip", + "cos", + "cosh", + "countbits", + "cross", + "D3DCOLORtoUBYTE4", + "ddx", + "ddx_coarse", + "ddx_fine", + "ddy", + "ddy_coarse", + "ddy_fine", + "degrees", + "determinant", + "DeviceMemoryBarrier", + "DeviceMemoryBarrierWithGroupSync", + "distance", + "dot", + "dst", + "errorf", + "EvaluateAttributeCentroid", + "EvaluateAttributeAtSample", + "EvaluateAttributeSnapped", + "exp", + "exp2", + "f16tof32", + "f32tof16", + "faceforward", + "firstbithigh", + "firstbitlow", + "floor", + "fma", + "fmod", + "frac", + "frexp", + "fwidth", + "GetRenderTargetSampleCount", + "GetRenderTargetSamplePosition", + "GroupMemoryBarrier", + "GroupMemoryBarrierWithGroupSync", + "InterlockedAdd", + "InterlockedAnd", + "InterlockedCompareExchange", + "InterlockedCompareStore", + "InterlockedExchange", + "InterlockedMax", + "InterlockedMin", + "InterlockedOr", + "InterlockedXor", + "isfinite", + "isinf", + "isnan", + "ldexp", + "length", + "lerp", + "lit", + "log", + "log10", + "log2", + "mad", + "max", + "min", + "modf", + "msad4", + "mul", + "noise", + "normalize", + "pow", + "printf", + "Process2DQuadTessFactorsAvg", + "Process2DQuadTessFactorsMax", + "Process2DQuadTessFactorsMin", + "ProcessIsolineTessFactors", + "ProcessQuadTessFactorsAvg", + "ProcessQuadTessFactorsMax", + "ProcessQuadTessFactorsMin", + "ProcessTriTessFactorsAvg", + "ProcessTriTessFactorsMax", + "ProcessTriTessFactorsMin", + "radians", + "rcp", + "reflect", + "refract", + "reversebits", + "round", + "rsqrt", + "saturate", + "sign", + "sin", + "sincos", + "sinh", + "smoothstep", + "sqrt", + "step", + "tan", + "tanh", + "tex1D", + "tex1Dbias", + "tex1Dgrad", + "tex1Dlod", + "tex1Dproj", + "tex2D", + "tex2Dbias", + "tex2Dgrad", + "tex2Dlod", + "tex2Dproj", + "tex3D", + "tex3Dbias", + "tex3Dgrad", + "tex3Dlod", + "tex3Dproj", + "texCUBE", + "texCUBEbias", + "texCUBEgrad", + "texCUBElod", + "texCUBEproj", + "transpose", + "trunc", + // DXC (reserved) keywords, from https://github.com/microsoft/DirectXShaderCompiler/blob/d5d478470d3020a438d3cb810b8d3fe0992e6709/tools/clang/include/clang/Basic/TokenKinds.def#L222-L648 + // with the KEYALL, KEYCXX, BOOLSUPPORT, WCHARSUPPORT, KEYHLSL options enabled (see https://github.com/microsoft/DirectXShaderCompiler/blob/d5d478470d3020a438d3cb810b8d3fe0992e6709/tools/clang/lib/Frontend/CompilerInvocation.cpp#L1199) + "auto", + "break", + "case", + "char", + "const", + "continue", + "default", + "do", + "double", + "else", + "enum", + "extern", + "float", + "for", + "goto", + "if", + "inline", + "int", + "long", + "register", + "return", + "short", + "signed", + "sizeof", + "static", + "struct", + "switch", + "typedef", + "union", + "unsigned", + "void", + "volatile", + "while", + "_Alignas", + "_Alignof", + "_Atomic", + "_Complex", + "_Generic", + "_Imaginary", + "_Noreturn", + "_Static_assert", + "_Thread_local", + "__func__", + "__objc_yes", + "__objc_no", + "asm", + "bool", + "catch", + "class", + "const_cast", + "delete", + "dynamic_cast", + "explicit", + "export", + "false", + "friend", + "mutable", + "namespace", + "new", + "operator", + "private", + "protected", + "public", + "reinterpret_cast", + "static_cast", + "template", + "this", + "throw", + "true", + "try", + "typename", + "typeid", + "using", + "virtual", + "wchar_t", + "_Decimal32", + "_Decimal64", + "_Decimal128", + "__null", + "__alignof", + "__attribute", + "__builtin_choose_expr", + "__builtin_offsetof", + "__builtin_va_arg", + "__extension__", + "__imag", + "__int128", + "__label__", + "__real", + "__thread", + "__FUNCTION__", + "__PRETTY_FUNCTION__", + "__is_nothrow_assignable", + "__is_constructible", + "__is_nothrow_constructible", + "__has_nothrow_assign", + "__has_nothrow_move_assign", + "__has_nothrow_copy", + "__has_nothrow_constructor", + "__has_trivial_assign", + "__has_trivial_move_assign", + "__has_trivial_copy", + "__has_trivial_constructor", + "__has_trivial_move_constructor", + "__has_trivial_destructor", + "__has_virtual_destructor", + "__is_abstract", + "__is_base_of", + "__is_class", + "__is_convertible_to", + "__is_empty", + "__is_enum", + "__is_final", + "__is_literal", + "__is_literal_type", + "__is_pod", + "__is_polymorphic", + "__is_trivial", + "__is_union", + "__is_trivially_constructible", + "__is_trivially_copyable", + "__is_trivially_assignable", + "__underlying_type", + "__is_lvalue_expr", + "__is_rvalue_expr", + "__is_arithmetic", + "__is_floating_point", + "__is_integral", + "__is_complete_type", + "__is_void", + "__is_array", + "__is_function", + "__is_reference", + "__is_lvalue_reference", + "__is_rvalue_reference", + "__is_fundamental", + "__is_object", + "__is_scalar", + "__is_compound", + "__is_pointer", + "__is_member_object_pointer", + "__is_member_function_pointer", + "__is_member_pointer", + "__is_const", + "__is_volatile", + "__is_standard_layout", + "__is_signed", + "__is_unsigned", + "__is_same", + "__is_convertible", + "__array_rank", + "__array_extent", + "__private_extern__", + "__module_private__", + "__declspec", + "__cdecl", + "__stdcall", + "__fastcall", + "__thiscall", + "__vectorcall", + "cbuffer", + "tbuffer", + "packoffset", + "linear", + "centroid", + "nointerpolation", + "noperspective", + "sample", + "column_major", + "row_major", + "in", + "out", + "inout", + "uniform", + "precise", + "center", + "shared", + "groupshared", + "discard", + "snorm", + "unorm", + "point", + "line", + "lineadj", + "triangle", + "triangleadj", + "globallycoherent", + "interface", + "sampler_state", + "technique", + "indices", + "vertices", + "primitives", + "payload", + "Technique", + "technique10", + "technique11", + "__builtin_omp_required_simd_align", + "__pascal", + "__fp16", + "__alignof__", + "__asm", + "__asm__", + "__attribute__", + "__complex", + "__complex__", + "__const", + "__const__", + "__decltype", + "__imag__", + "__inline", + "__inline__", + "__nullptr", + "__real__", + "__restrict", + "__restrict__", + "__signed", + "__signed__", + "__typeof", + "__typeof__", + "__volatile", + "__volatile__", + "_Nonnull", + "_Nullable", + "_Null_unspecified", + "__builtin_convertvector", + "__char16_t", + "__char32_t", + // DXC intrinsics, from https://github.com/microsoft/DirectXShaderCompiler/blob/18c9e114f9c314f93e68fbc72ce207d4ed2e65ae/utils/hct/gen_intrin_main.txt#L86-L376 + "D3DCOLORtoUBYTE4", + "GetRenderTargetSampleCount", + "GetRenderTargetSamplePosition", + "abort", + "abs", + "acos", + "all", + "AllMemoryBarrier", + "AllMemoryBarrierWithGroupSync", + "any", + "asdouble", + "asfloat", + "asfloat16", + "asint16", + "asin", + "asint", + "asuint", + "asuint16", + "atan", + "atan2", + "ceil", + "clamp", + "clip", + "cos", + "cosh", + "countbits", + "cross", + "ddx", + "ddx_coarse", + "ddx_fine", + "ddy", + "ddy_coarse", + "ddy_fine", + "degrees", + "determinant", + "DeviceMemoryBarrier", + "DeviceMemoryBarrierWithGroupSync", + "distance", + "dot", + "dst", + "EvaluateAttributeAtSample", + "EvaluateAttributeCentroid", + "EvaluateAttributeSnapped", + "GetAttributeAtVertex", + "exp", + "exp2", + "f16tof32", + "f32tof16", + "faceforward", + "firstbithigh", + "firstbitlow", + "floor", + "fma", + "fmod", + "frac", + "frexp", + "fwidth", + "GroupMemoryBarrier", + "GroupMemoryBarrierWithGroupSync", + "InterlockedAdd", + "InterlockedMin", + "InterlockedMax", + "InterlockedAnd", + "InterlockedOr", + "InterlockedXor", + "InterlockedCompareStore", + "InterlockedExchange", + "InterlockedCompareExchange", + "InterlockedCompareStoreFloatBitwise", + "InterlockedCompareExchangeFloatBitwise", + "isfinite", + "isinf", + "isnan", + "ldexp", + "length", + "lerp", + "lit", + "log", + "log10", + "log2", + "mad", + "max", + "min", + "modf", + "msad4", + "mul", + "normalize", + "pow", + "printf", + "Process2DQuadTessFactorsAvg", + "Process2DQuadTessFactorsMax", + "Process2DQuadTessFactorsMin", + "ProcessIsolineTessFactors", + "ProcessQuadTessFactorsAvg", + "ProcessQuadTessFactorsMax", + "ProcessQuadTessFactorsMin", + "ProcessTriTessFactorsAvg", + "ProcessTriTessFactorsMax", + "ProcessTriTessFactorsMin", + "radians", + "rcp", + "reflect", + "refract", + "reversebits", + "round", + "rsqrt", + "saturate", + "sign", + "sin", + "sincos", + "sinh", + "smoothstep", + "source_mark", + "sqrt", + "step", + "tan", + "tanh", + "tex1D", + "tex1Dbias", + "tex1Dgrad", + "tex1Dlod", + "tex1Dproj", + "tex2D", + "tex2Dbias", + "tex2Dgrad", + "tex2Dlod", + "tex2Dproj", + "tex3D", + "tex3Dbias", + "tex3Dgrad", + "tex3Dlod", + "tex3Dproj", + "texCUBE", + "texCUBEbias", + "texCUBEgrad", + "texCUBElod", + "texCUBEproj", + "transpose", + "trunc", + "CheckAccessFullyMapped", + "AddUint64", + "NonUniformResourceIndex", + "WaveIsFirstLane", + "WaveGetLaneIndex", + "WaveGetLaneCount", + "WaveActiveAnyTrue", + "WaveActiveAllTrue", + "WaveActiveAllEqual", + "WaveActiveBallot", + "WaveReadLaneAt", + "WaveReadLaneFirst", + "WaveActiveCountBits", + "WaveActiveSum", + "WaveActiveProduct", + "WaveActiveBitAnd", + "WaveActiveBitOr", + "WaveActiveBitXor", + "WaveActiveMin", + "WaveActiveMax", + "WavePrefixCountBits", + "WavePrefixSum", + "WavePrefixProduct", + "WaveMatch", + "WaveMultiPrefixBitAnd", + "WaveMultiPrefixBitOr", + "WaveMultiPrefixBitXor", + "WaveMultiPrefixCountBits", + "WaveMultiPrefixProduct", + "WaveMultiPrefixSum", + "QuadReadLaneAt", + "QuadReadAcrossX", + "QuadReadAcrossY", + "QuadReadAcrossDiagonal", + "QuadAny", + "QuadAll", + "TraceRay", + "ReportHit", + "CallShader", + "IgnoreHit", + "AcceptHitAndEndSearch", + "DispatchRaysIndex", + "DispatchRaysDimensions", + "WorldRayOrigin", + "WorldRayDirection", + "ObjectRayOrigin", + "ObjectRayDirection", + "RayTMin", + "RayTCurrent", + "PrimitiveIndex", + "InstanceID", + "InstanceIndex", + "GeometryIndex", + "HitKind", + "RayFlags", + "ObjectToWorld", + "WorldToObject", + "ObjectToWorld3x4", + "WorldToObject3x4", + "ObjectToWorld4x3", + "WorldToObject4x3", + "dot4add_u8packed", + "dot4add_i8packed", + "dot2add", + "unpack_s8s16", + "unpack_u8u16", + "unpack_s8s32", + "unpack_u8u32", + "pack_s8", + "pack_u8", + "pack_clamp_s8", + "pack_clamp_u8", + "SetMeshOutputCounts", + "DispatchMesh", + "IsHelperLane", + "AllocateRayQuery", + "CreateResourceFromHeap", + "and", + "or", + "select", + // DXC resource and other types, from https://github.com/microsoft/DirectXShaderCompiler/blob/18c9e114f9c314f93e68fbc72ce207d4ed2e65ae/tools/clang/lib/AST/HlslTypes.cpp#L441-#L572 + "InputPatch", + "OutputPatch", + "PointStream", + "LineStream", + "TriangleStream", + "Texture1D", + "RWTexture1D", + "Texture2D", + "RWTexture2D", + "Texture2DMS", + "RWTexture2DMS", + "Texture3D", + "RWTexture3D", + "TextureCube", + "RWTextureCube", + "Texture1DArray", + "RWTexture1DArray", + "Texture2DArray", + "RWTexture2DArray", + "Texture2DMSArray", + "RWTexture2DMSArray", + "TextureCubeArray", + "RWTextureCubeArray", + "FeedbackTexture2D", + "FeedbackTexture2DArray", + "RasterizerOrderedTexture1D", + "RasterizerOrderedTexture2D", + "RasterizerOrderedTexture3D", + "RasterizerOrderedTexture1DArray", + "RasterizerOrderedTexture2DArray", + "RasterizerOrderedBuffer", + "RasterizerOrderedByteAddressBuffer", + "RasterizerOrderedStructuredBuffer", + "ByteAddressBuffer", + "RWByteAddressBuffer", + "StructuredBuffer", + "RWStructuredBuffer", + "AppendStructuredBuffer", + "ConsumeStructuredBuffer", + "Buffer", + "RWBuffer", + "SamplerState", + "SamplerComparisonState", + "ConstantBuffer", + "TextureBuffer", + "RaytracingAccelerationStructure", + // DXC templated types, from https://github.com/microsoft/DirectXShaderCompiler/blob/18c9e114f9c314f93e68fbc72ce207d4ed2e65ae/tools/clang/lib/AST/ASTContextHLSL.cpp + // look for `BuiltinTypeDeclBuilder` + "matrix", + "vector", + "TextureBuffer", + "ConstantBuffer", + "RayQuery", ]; + +// DXC scalar types, from https://github.com/microsoft/DirectXShaderCompiler/blob/18c9e114f9c314f93e68fbc72ce207d4ed2e65ae/tools/clang/lib/AST/ASTContextHLSL.cpp#L48-L254 +// + vector and matrix shorthands +pub const TYPES: &[&str] = &{ + const L: usize = 23 * (1 + 4 + 4 * 4); + let mut res = [""; L]; + let mut c = 0; + + /// For each scalar type, it will additionally generate vector and matrix shorthands + macro_rules! generate { + ([$($roots:literal),*], $x:tt) => { + $( + generate!(@inner push $roots); + generate!(@inner $roots, $x); + )* + }; + + (@inner $root:literal, [$($x:literal),*]) => { + generate!(@inner vector $root, $($x)*); + generate!(@inner matrix $root, $($x)*); + }; + + (@inner vector $root:literal, $($x:literal)*) => { + $( + generate!(@inner push concat!($root, $x)); + )* + }; + + (@inner matrix $root:literal, $($x:literal)*) => { + // Duplicate the list + generate!(@inner matrix $root, $($x)*; $($x)*); + }; + + // The head/tail recursion: pick the first element of the first list and recursively do it for the tail. + (@inner matrix $root:literal, $head:literal $($tail:literal)*; $($x:literal)*) => { + $( + generate!(@inner push concat!($root, $head, "x", $x)); + )* + generate!(@inner matrix $root, $($tail)*; $($x)*); + + }; + + // The end of iteration: we exhausted the list + (@inner matrix $root:literal, ; $($x:literal)*) => {}; + + (@inner push $v:expr) => { + res[c] = $v; + c += 1; + }; + } + + generate!( + [ + "bool", + "int", + "uint", + "dword", + "half", + "float", + "double", + "min10float", + "min16float", + "min12int", + "min16int", + "min16uint", + "int16_t", + "int32_t", + "int64_t", + "uint16_t", + "uint32_t", + "uint64_t", + "float16_t", + "float32_t", + "float64_t", + "int8_t4_packed", + "uint8_t4_packed" + ], + ["1", "2", "3", "4"] + ); + + debug_assert!(c == L); + + res +}; diff --git a/third_party/rust/naga/src/back/hlsl/storage.rs b/third_party/rust/naga/src/back/hlsl/storage.rs index 813dd73649e3..d4eeefe3e15c 100644 --- a/third_party/rust/naga/src/back/hlsl/storage.rs +++ b/third_party/rust/naga/src/back/hlsl/storage.rs @@ -209,17 +209,15 @@ impl super::Writer<'_, W> { } crate::TypeInner::Array { base, - size: crate::ArraySize::Constant(const_handle), - .. + size: crate::ArraySize::Constant(size), + stride, } => { let constructor = super::help::WrappedConstructor { ty: result_ty.handle().unwrap(), }; self.write_wrapped_constructor_function_name(module, constructor)?; write!(self.out, "(")?; - let count = module.constants[const_handle].to_array_length().unwrap(); - let stride = module.types[base].inner.size(&module.constants); - let iter = (0..count).map(|i| (TypeResolution::Handle(base), stride * i)); + let iter = (0..size.get()).map(|i| (TypeResolution::Handle(base), stride * i)); self.write_storage_load_sequence(module, var_handle, iter, func_ctx)?; write!(self.out, ")")?; } @@ -366,8 +364,8 @@ impl super::Writer<'_, W> { } crate::TypeInner::Array { base, - size: crate::ArraySize::Constant(const_handle), - .. + size: crate::ArraySize::Constant(size), + stride, } => { // first, assign the value to a temporary writeln!(self.out, "{level}{{")?; @@ -375,14 +373,12 @@ impl super::Writer<'_, W> { self.write_value_type(module, &module.types[base].inner)?; let depth = level.next().0; write!(self.out, " {STORE_TEMP_NAME}{depth}")?; - self.write_array_size(module, base, crate::ArraySize::Constant(const_handle))?; + self.write_array_size(module, base, crate::ArraySize::Constant(size))?; write!(self.out, " = ")?; self.write_store_value(module, &value, func_ctx)?; writeln!(self.out, ";")?; // then iterate the stores - let count = module.constants[const_handle].to_array_length().unwrap(); - let stride = module.types[base].inner.size(&module.constants); - for i in 0..count { + for i in 0..size.get() { self.temp_access_chain.push(SubAccess::Offset(i * stride)); let sv = StoreValue::TempIndex { depth, diff --git a/third_party/rust/naga/src/back/hlsl/writer.rs b/third_party/rust/naga/src/back/hlsl/writer.rs index a3810c5dab35..45c0a464abe3 100644 --- a/third_party/rust/naga/src/back/hlsl/writer.rs +++ b/third_party/rust/naga/src/back/hlsl/writer.rs @@ -89,8 +89,14 @@ impl<'a, W: fmt::Write> super::Writer<'a, W> { fn reset(&mut self, module: &Module) { self.names.clear(); - self.namer - .reset(module, super::keywords::RESERVED, &[], &mut self.names); + self.namer.reset( + module, + super::keywords::RESERVED, + super::keywords::TYPES, + super::keywords::RESERVED_CASE_INSENSITIVE, + &[], + &mut self.names, + ); self.entry_point_io.clear(); self.named_expressions.clear(); self.wrapped.clear(); @@ -824,15 +830,11 @@ impl<'a, W: fmt::Write> super::Writer<'a, W> { ) -> BackendResult { write!(self.out, "[")?; - // Write the array size - // Writes nothing if `ArraySize::Dynamic` - // Panics if `ArraySize::Constant` has a constant that isn't an sint or uint match size { - crate::ArraySize::Constant(const_handle) => { - let size = module.constants[const_handle].to_array_length().unwrap(); + crate::ArraySize::Constant(size) => { write!(self.out, "{size}")?; } - crate::ArraySize::Dynamic => {} + crate::ArraySize::Dynamic => unreachable!(), } write!(self.out, "]")?; @@ -877,7 +879,7 @@ impl<'a, W: fmt::Write> super::Writer<'a, W> { } } let ty_inner = &module.types[member.ty].inner; - last_offset = member.offset + ty_inner.size_hlsl(&module.types, &module.constants); + last_offset = member.offset + ty_inner.size_hlsl(module.to_ctx()); // The indentation is only for readability write!(self.out, "{}", back::INDENT)?; @@ -1334,15 +1336,13 @@ impl<'a, W: fmt::Write> super::Writer<'a, W> { Some(self.namer.call(name)) } else if self.need_bake_expressions.contains(&handle) { Some(format!("_expr{}", handle.index())) - } else if info.ref_count == 0 { - Some(self.namer.call("")) } else { None }; if let Some(name) = expr_name { write!(self.out, "{level}")?; - self.write_named_expr(module, handle, name, func_ctx)?; + self.write_named_expr(module, handle, name, handle, func_ctx)?; } } } @@ -1903,6 +1903,14 @@ impl<'a, W: fmt::Write> super::Writer<'a, W> { writeln!(self.out, ", {res_name});")?; self.named_expressions.insert(result, res_name); } + Statement::WorkGroupUniformLoad { pointer, result } => { + self.write_barrier(crate::Barrier::WORK_GROUP, level)?; + write!(self.out, "{level}")?; + let name = format!("_expr{}", result.index()); + self.write_named_expr(module, pointer, name, result, func_ctx)?; + + self.write_barrier(crate::Barrier::WORK_GROUP, level)?; + } Statement::Switch { selector, ref cases, @@ -2057,6 +2065,16 @@ impl<'a, W: fmt::Write> super::Writer<'a, W> { match *expression { Expression::Constant(constant) => self.write_constant(module, constant)?, + Expression::ZeroValue(ty) => self.write_default_init(module, ty)?, + Expression::Literal(literal) => match literal { + // Floats are written using `Debug` instead of `Display` because it always appends the + // decimal part even it's zero + crate::Literal::F64(value) => write!(self.out, "{value:?}L")?, + crate::Literal::F32(value) => write!(self.out, "{value:?}")?, + crate::Literal::U32(value) => write!(self.out, "{}u", value)?, + crate::Literal::I32(value) => write!(self.out, "{}", value)?, + crate::Literal::Bool(value) => write!(self.out, "{}", value)?, + }, Expression::Compose { ty, ref components } => { match module.types[ty].inner { TypeInner::Struct { .. } | TypeInner::Array { .. } => { @@ -2574,6 +2592,7 @@ impl<'a, W: fmt::Write> super::Writer<'a, W> { Unpack2x16float, Regular(&'static str), MissingIntOverload(&'static str), + MissingIntReturnType(&'static str), CountTrailingZeros, CountLeadingZeros, } @@ -2642,8 +2661,8 @@ impl<'a, W: fmt::Write> super::Writer<'a, W> { Mf::CountLeadingZeros => Function::CountLeadingZeros, Mf::CountOneBits => Function::MissingIntOverload("countbits"), Mf::ReverseBits => Function::MissingIntOverload("reversebits"), - Mf::FindLsb => Function::Regular("firstbitlow"), - Mf::FindMsb => Function::Regular("firstbithigh"), + Mf::FindLsb => Function::MissingIntReturnType("firstbitlow"), + Mf::FindMsb => Function::MissingIntReturnType("firstbithigh"), Mf::Unpack2x16float => Function::Unpack2x16float, _ => return Err(Error::Unimplemented(format!("write_expr_math {fun:?}"))), }; @@ -2707,6 +2726,21 @@ impl<'a, W: fmt::Write> super::Writer<'a, W> { write!(self.out, ")")?; } } + Function::MissingIntReturnType(fun_name) => { + let scalar_kind = &func_ctx.info[arg] + .ty + .inner_with(&module.types) + .scalar_kind(); + if let Some(ScalarKind::Sint) = *scalar_kind { + write!(self.out, "asint({fun_name}(")?; + self.write_expr(module, arg, func_ctx)?; + write!(self.out, "))")?; + } else { + write!(self.out, "{fun_name}(")?; + self.write_expr(module, arg, func_ctx)?; + write!(self.out, ")")?; + } + } Function::CountTrailingZeros => { match *func_ctx.info[arg].ty.inner_with(&module.types) { TypeInner::Vector { size, kind, .. } => { @@ -2721,9 +2755,9 @@ impl<'a, W: fmt::Write> super::Writer<'a, W> { self.write_expr(module, arg, func_ctx)?; write!(self.out, "))")?; } else { - write!(self.out, "asint(min((32u){s}, asuint(firstbitlow(")?; + write!(self.out, "asint(min((32u){s}, firstbitlow(")?; self.write_expr(module, arg, func_ctx)?; - write!(self.out, "))))")?; + write!(self.out, ")))")?; } } TypeInner::Scalar { kind, .. } => { @@ -2732,9 +2766,9 @@ impl<'a, W: fmt::Write> super::Writer<'a, W> { self.write_expr(module, arg, func_ctx)?; write!(self.out, "))")?; } else { - write!(self.out, "asint(min(32u, asuint(firstbitlow(")?; + write!(self.out, "asint(min(32u, firstbitlow(")?; self.write_expr(module, arg, func_ctx)?; - write!(self.out, "))))")?; + write!(self.out, ")))")?; } } _ => unreachable!(), @@ -2752,31 +2786,36 @@ impl<'a, W: fmt::Write> super::Writer<'a, W> { }; if let ScalarKind::Uint = kind { - write!(self.out, "asuint((31){s} - firstbithigh(")?; + write!(self.out, "((31u){s} - firstbithigh(")?; + self.write_expr(module, arg, func_ctx)?; + write!(self.out, "))")?; } else { write!(self.out, "(")?; self.write_expr(module, arg, func_ctx)?; write!( self.out, - " < (0){s} ? (0){s} : (31){s} - firstbithigh(" + " < (0){s} ? (0){s} : (31){s} - asint(firstbithigh(" )?; + self.write_expr(module, arg, func_ctx)?; + write!(self.out, ")))")?; } } TypeInner::Scalar { kind, .. } => { if let ScalarKind::Uint = kind { - write!(self.out, "asuint(31 - firstbithigh(")?; + write!(self.out, "(31u - firstbithigh(")?; + self.write_expr(module, arg, func_ctx)?; + write!(self.out, "))")?; } else { write!(self.out, "(")?; self.write_expr(module, arg, func_ctx)?; - write!(self.out, " < 0 ? 0 : 31 - firstbithigh(")?; + write!(self.out, " < 0 ? 0 : 31 - asint(firstbithigh(")?; + self.write_expr(module, arg, func_ctx)?; + write!(self.out, ")))")?; } } _ => unreachable!(), } - self.write_expr(module, arg, func_ctx)?; - write!(self.out, "))")?; - return Ok(()); } } @@ -2906,6 +2945,7 @@ impl<'a, W: fmt::Write> super::Writer<'a, W> { // Nothing to do here, since call expression already cached Expression::CallResult(_) | Expression::AtomicResult { .. } + | Expression::WorkGroupUniformLoadResult { .. } | Expression::RayQueryProceedResult => {} } @@ -2996,9 +3036,12 @@ impl<'a, W: fmt::Write> super::Writer<'a, W> { module: &Module, handle: Handle, name: String, + // The expression which is being named. + // Generally, this is the same as handle, except in WorkGroupUniformLoad + named: Handle, ctx: &back::FunctionCtx, ) -> BackendResult { - match ctx.info[handle].ty { + match ctx.info[named].ty { proc::TypeResolution::Handle(ty_handle) => match module.types[ty_handle].inner { TypeInner::Struct { .. } => { let ty_name = &self.names[&NameKey::Type(ty_handle)]; @@ -3013,7 +3056,7 @@ impl<'a, W: fmt::Write> super::Writer<'a, W> { } } - let base_ty_res = &ctx.info[handle].ty; + let base_ty_res = &ctx.info[named].ty; let resolved = base_ty_res.inner_with(&module.types); write!(self.out, " {name}")?; @@ -3024,7 +3067,7 @@ impl<'a, W: fmt::Write> super::Writer<'a, W> { write!(self.out, " = ")?; self.write_expr(module, handle, ctx)?; writeln!(self.out, ";")?; - self.named_expressions.insert(handle, name); + self.named_expressions.insert(named, name); Ok(()) } diff --git a/third_party/rust/naga/src/back/mod.rs b/third_party/rust/naga/src/back/mod.rs index 8467ee787b3a..76fbaf7644be 100644 --- a/third_party/rust/naga/src/back/mod.rs +++ b/third_party/rust/naga/src/back/mod.rs @@ -228,7 +228,7 @@ bitflags::bitflags! { /// we might as well make one back end's life easier.) /// /// [`RayDesc`]: crate::Module::generate_ray_desc_type - #[derive(Default)] + #[derive(Clone, Copy, Debug, Default, Eq, PartialEq)] pub struct RayFlag: u32 { const OPAQUE = 0x01; const NO_OPAQUE = 0x02; diff --git a/third_party/rust/naga/src/back/msl/writer.rs b/third_party/rust/naga/src/back/msl/writer.rs index 88600a8f11be..22a5f56b72ed 100644 --- a/third_party/rust/naga/src/back/msl/writer.rs +++ b/third_party/rust/naga/src/back/msl/writer.rs @@ -77,7 +77,7 @@ const CLAMPED_LOD_LOAD_PREFIX: &str = "clamped_lod_e"; struct TypeContext<'a> { handle: Handle, - module: &'a crate::Module, + gctx: proc::GlobalCtx<'a>, names: &'a FastHashMap, access: crate::StorageAccess, binding: Option<&'a super::ResolvedBinding>, @@ -86,7 +86,7 @@ struct TypeContext<'a> { impl<'a> Display for TypeContext<'a> { fn fmt(&self, out: &mut Formatter<'_>) -> Result<(), FmtError> { - let ty = &self.module.types[self.handle]; + let ty = &self.gctx.types[self.handle]; if ty.needs_alias() && !self.first_time { let name = &self.names[&NameKey::Type(self.handle)]; return write!(out, "{name}"); @@ -221,13 +221,7 @@ impl<'a> Display for TypeContext<'a> { { write!(out, "{NAMESPACE}::array<{base_tyname}, {override_size}>") } else if let crate::ArraySize::Constant(size) = size { - let constant_ctx = ConstantContext { - handle: size, - arena: &self.module.constants, - names: self.names, - first_time: false, - }; - write!(out, "{NAMESPACE}::array<{base_tyname}, {constant_ctx}>") + write!(out, "{NAMESPACE}::array<{base_tyname}, {size}>") } else { unreachable!("metal requires all arrays be constant sized"); } @@ -271,7 +265,7 @@ impl<'a> TypedGlobalVariable<'a> { }; let ty_name = TypeContext { handle: var.ty, - module: self.module, + gctx: self.module.to_ctx(), names: self.names, access: storage_access, binding: self.binding, @@ -399,7 +393,7 @@ fn should_pack_struct_member( } let ty_inner = &module.types[member.ty].inner; - let last_offset = member.offset + ty_inner.size(&module.constants); + let last_offset = member.offset + ty_inner.size(module.to_ctx()); let next_offset = match members.get(index + 1) { Some(next) => next.offset, None => span, @@ -1153,7 +1147,7 @@ impl Writer { crate::TypeInner::Array { base, stride, .. } => ( context.module.types[base] .inner - .size(&context.module.constants), + .size(context.module.to_ctx()), stride, ), _ => return Err(Error::Validation), @@ -1333,6 +1327,42 @@ impl Writer { }; write!(self.out, "{coco}")?; } + crate::Expression::ZeroValue(ty) => { + let ty_name = TypeContext { + handle: ty, + gctx: context.module.to_ctx(), + names: &self.names, + access: crate::StorageAccess::empty(), + binding: None, + first_time: false, + }; + write!(self.out, "{ty_name} {{}}")?; + } + crate::Expression::Literal(literal) => match literal { + crate::Literal::F64(_) => { + return Err(Error::CapabilityNotSupported(valid::Capabilities::FLOAT64)) + } + crate::Literal::F32(value) => { + if value.is_infinite() { + let sign = if value.is_sign_negative() { "-" } else { "" }; + write!(self.out, "{sign}INFINITY")?; + } else if value.is_nan() { + write!(self.out, "NAN")?; + } else { + let suffix = if value.fract() == 0.0 { ".0" } else { "" }; + write!(self.out, "{value}{suffix}")?; + } + } + crate::Literal::U32(value) => { + write!(self.out, "{value}u")?; + } + crate::Literal::I32(value) => { + write!(self.out, "{value}")?; + } + crate::Literal::Bool(value) => { + write!(self.out, "{value}")?; + } + }, crate::Expression::Splat { size, value } => { let scalar_kind = match *context.resolve_type(value) { crate::TypeInner::Scalar { kind, .. } => kind, @@ -1850,6 +1880,7 @@ impl Writer { // has to be a named expression crate::Expression::CallResult(_) | crate::Expression::AtomicResult { .. } + | crate::Expression::WorkGroupUniformLoadResult { .. } | crate::Expression::RayQueryProceedResult => { unreachable!() } @@ -2302,15 +2333,12 @@ impl Writer { // to convert from a wrapped struct to a raw array, e.g. // `float gl_ClipDistance1 [[clip_distance]] [1];`. if let crate::TypeInner::Array { - size: crate::ArraySize::Constant(const_handle), + size: crate::ArraySize::Constant(size), .. } = context.module.types[member.ty].inner { - let size = context.module.constants[const_handle] - .to_array_length() - .unwrap(); write!(self.out, "{comma} {{")?; - for j in 0..size { + for j in 0..size.get() { if j != 0 { write!(self.out, ",")?; } @@ -2423,7 +2451,7 @@ impl Writer { TypeResolution::Handle(ty_handle) => { let ty_name = TypeContext { handle: ty_handle, - module: context.module, + gctx: context.module.to_ctx(), names: &self.names, access: crate::StorageAccess::empty(), binding: None, @@ -2553,8 +2581,6 @@ impl Writer { // Don't assume the names in `named_expressions` are unique, // or even valid. Use the `Namer`. Some(self.namer.call(name)) - } else if info.ref_count == 0 { - Some(self.namer.call("")) } else { // If this expression is an index that we're going to first compare // against a limit, and then actually use as an index, then we may @@ -2815,6 +2841,18 @@ impl Writer { // done writeln!(self.out, ";")?; } + crate::Statement::WorkGroupUniformLoad { pointer, result } => { + self.write_barrier(crate::Barrier::WORK_GROUP, level)?; + + write!(self.out, "{level}")?; + let name = self.namer.call(""); + self.start_baking_expression(result, &context.expression, &name)?; + self.put_load(pointer, &context.expression, true)?; + self.named_expressions.insert(result, name); + + writeln!(self.out, ";")?; + self.write_barrier(crate::Barrier::WORK_GROUP, level)?; + } crate::Statement::RayQuery { query, ref fun } => { match *fun { crate::RayQueryFunction::Initialize { @@ -2954,31 +2992,17 @@ impl Writer { context: &StatementContext, ) -> BackendResult { let pointer_inner = context.expression.resolve_type(pointer); - let (array_size, is_atomic) = match *pointer_inner { + let is_atomic = match *pointer_inner { crate::TypeInner::Pointer { base, .. } => { match context.expression.module.types[base].inner { - crate::TypeInner::Array { - size: crate::ArraySize::Constant(ch), - .. - } => (Some(ch), false), - crate::TypeInner::Atomic { .. } => (None, true), - _ => (None, false), + crate::TypeInner::Atomic { .. } => true, + _ => false, } } - _ => (None, false), + _ => false, }; - // we can't assign fixed-size arrays - if let Some(const_handle) = array_size { - let size = context.expression.module.constants[const_handle] - .to_array_length() - .unwrap(); - write!(self.out, "{level}for(int _i=0; _i<{size}; ++_i) ")?; - self.put_access_chain(pointer, policy, &context.expression)?; - write!(self.out, ".{WRAPPED_ARRAY_FIELD}[_i] = ")?; - self.put_expression(value, &context.expression, true)?; - writeln!(self.out, ".{WRAPPED_ARRAY_FIELD}[_i];")?; - } else if is_atomic { + if is_atomic { write!( self.out, "{level}{NAMESPACE}::atomic_store_explicit({ATOMIC_REFERENCE}" @@ -3006,8 +3030,14 @@ impl Writer { pipeline_options: &PipelineOptions, ) -> Result { self.names.clear(); - self.namer - .reset(module, super::keywords::RESERVED, &[], &mut self.names); + self.namer.reset( + module, + super::keywords::RESERVED, + &[], + &[], + &[], + &mut self.names, + ); self.struct_member_pads.clear(); writeln!( @@ -3133,7 +3163,7 @@ impl Writer { } => { let base_name = TypeContext { handle: base, - module, + gctx: module.to_ctx(), names: &self.names, access: crate::StorageAccess::empty(), binding: None, @@ -3141,14 +3171,7 @@ impl Writer { }; match size { - crate::ArraySize::Constant(const_handle) => { - let coco = ConstantContext { - handle: const_handle, - arena: &module.constants, - names: &self.names, - first_time: false, - }; - + crate::ArraySize::Constant(size) => { writeln!(self.out, "struct {name} {{")?; writeln!( self.out, @@ -3156,7 +3179,7 @@ impl Writer { back::INDENT, base_name, WRAPPED_ARRAY_FIELD, - coco + size )?; writeln!(self.out, "}};")?; } @@ -3178,7 +3201,7 @@ impl Writer { writeln!(self.out, "{}char _pad{}[{}];", back::INDENT, index, pad)?; } let ty_inner = &module.types[member.ty].inner; - last_offset = member.offset + ty_inner.size(&module.constants); + last_offset = member.offset + ty_inner.size(module.to_ctx()); let member_name = &self.names[&NameKey::StructMember(handle, index as u32)]; @@ -3197,7 +3220,7 @@ impl Writer { None => { let base_name = TypeContext { handle: member.ty, - module, + gctx: module.to_ctx(), names: &self.names, access: crate::StorageAccess::empty(), binding: None, @@ -3228,7 +3251,7 @@ impl Writer { _ => { let ty_name = TypeContext { handle, - module, + gctx: module.to_ctx(), names: &self.names, access: crate::StorageAccess::empty(), binding: None, @@ -3288,7 +3311,7 @@ impl Writer { let name = &self.names[&NameKey::Constant(handle)]; let ty_name = TypeContext { handle: ty, - module, + gctx: module.to_ctx(), names: &self.names, access: crate::StorageAccess::empty(), binding: None, @@ -3428,7 +3451,7 @@ impl Writer { Some(ref result) => { let ty_name = TypeContext { handle: result.ty, - module, + gctx: module.to_ctx(), names: &self.names, access: crate::StorageAccess::empty(), binding: None, @@ -3446,7 +3469,7 @@ impl Writer { let name = &self.names[&NameKey::FunctionArgument(fun_handle, index as u32)]; let param_type_name = TypeContext { handle: arg.ty, - module, + gctx: module.to_ctx(), names: &self.names, access: crate::StorageAccess::empty(), binding: None, @@ -3495,7 +3518,7 @@ impl Writer { for (local_handle, local) in fun.local_variables.iter() { let ty_name = TypeContext { handle: local.ty, - module, + gctx: module.to_ctx(), names: &self.names, access: crate::StorageAccess::empty(), binding: None, @@ -3692,7 +3715,7 @@ impl Writer { let name = &self.names[name_key]; let ty_name = TypeContext { handle: ty, - module, + gctx: module.to_ctx(), names: &self.names, access: crate::StorageAccess::empty(), binding: None, @@ -3736,7 +3759,7 @@ impl Writer { for (name, ty, binding) in result_members { let ty_name = TypeContext { handle: ty, - module, + gctx: module.to_ctx(), names: &self.names, access: crate::StorageAccess::empty(), binding: None, @@ -3753,9 +3776,9 @@ impl Writer { let array_len = match module.types[ty].inner { crate::TypeInner::Array { - size: crate::ArraySize::Constant(handle), + size: crate::ArraySize::Constant(size), .. - } => module.constants[handle].to_array_length(), + } => Some(size), _ => None, }; let resolved = options.resolve_local_binding(binding, out_mode)?; @@ -3831,7 +3854,7 @@ impl Writer { let ty_name = TypeContext { handle: ty, - module, + gctx: module.to_ctx(), names: &self.names, access: crate::StorageAccess::empty(), binding: None, @@ -4068,7 +4091,7 @@ impl Writer { let name = &self.names[&NameKey::EntryPointLocal(ep_index as _, local_handle)]; let ty_name = TypeContext { handle: local.ty, - module, + gctx: module.to_ctx(), names: &self.names, access: crate::StorageAccess::empty(), binding: None, diff --git a/third_party/rust/naga/src/back/spv/block.rs b/third_party/rust/naga/src/back/spv/block.rs index 8366df415a79..fa9596655207 100644 --- a/third_party/rust/naga/src/back/spv/block.rs +++ b/third_party/rust/naga/src/back/spv/block.rs @@ -3,8 +3,9 @@ Implementations for `BlockContext` methods. */ use super::{ - index::BoundsCheckResult, make_local, selection::Selection, Block, BlockContext, Dimension, - Error, Instruction, LocalType, LookupType, LoopContext, ResultMember, Writer, WriterFlags, + helpers, index::BoundsCheckResult, make_local, selection::Selection, Block, BlockContext, + Dimension, Error, Instruction, LocalType, LookupType, LoopContext, ResultMember, Writer, + WriterFlags, }; use crate::{arena::Handle, proc::TypeResolution}; use spirv::Word; @@ -117,8 +118,8 @@ impl Writer { width: 4, pointer_space: None, })); - let value0_id = self.get_constant_scalar(crate::ScalarValue::Float(0.0), 4); - let value1_id = self.get_constant_scalar(crate::ScalarValue::Float(1.0), 4); + let zero_scalar_id = self.get_constant_scalar(crate::Literal::F32(0.0)); + let one_scalar_id = self.get_constant_scalar(crate::Literal::F32(1.0)); let original_id = self.id_gen.next(); body.push(Instruction::load( @@ -134,7 +135,7 @@ impl Writer { spirv::GLOp::FClamp, float_type_id, clamp_id, - &[original_id, value0_id, value1_id], + &[original_id, zero_scalar_id, one_scalar_id], )); body.push(Instruction::store(frag_depth_id, clamp_id, None)); @@ -196,9 +197,8 @@ impl<'w> BlockContext<'w> { fn is_intermediate(&self, expr_handle: Handle) -> bool { match self.ir_function.expressions[expr_handle] { crate::Expression::GlobalVariable(handle) => { - let ty = self.ir_module.global_variables[handle].ty; - match self.ir_module.types[ty].inner { - crate::TypeInner::BindingArray { .. } => false, + match self.ir_module.global_variables[handle].space { + crate::AddressSpace::Handle => false, _ => true, } } @@ -220,8 +220,16 @@ impl<'w> BlockContext<'w> { expr_handle: Handle, block: &mut Block, ) -> Result<(), Error> { - let result_type_id = self.get_expression_type_id(&self.fun_info[expr_handle].ty); + let is_named_expression = self + .ir_function + .named_expressions + .contains_key(&expr_handle); + if self.fun_info[expr_handle].ref_count == 0 && !is_named_expression { + return Ok(()); + } + + let result_type_id = self.get_expression_type_id(&self.fun_info[expr_handle].ty); let id = match self.ir_function.expressions[expr_handle] { crate::Expression::Access { base, index: _ } if self.is_intermediate(base) => { // See `is_intermediate`; we'll handle this later in @@ -237,9 +245,15 @@ impl<'w> BlockContext<'w> { crate::TypeInner::BindingArray { base: binding_type, .. } => { + let space = match self.ir_function.expressions[base] { + crate::Expression::GlobalVariable(gvar) => { + self.ir_module.global_variables[gvar].space + } + _ => unreachable!(), + }; let binding_array_false_pointer = LookupType::Local(LocalType::Pointer { base: binding_type, - class: spirv::StorageClass::UniformConstant, + class: helpers::map_storage_class(space), }); let result_id = match self.write_expression_pointer( @@ -265,15 +279,6 @@ impl<'w> BlockContext<'w> { None, )); - if self.fun_info[index].uniformity.non_uniform_result.is_some() { - self.writer.require_any( - "NonUniformEXT", - &[spirv::Capability::ShaderNonUniform], - )?; - self.writer.use_extension("SPV_EXT_descriptor_indexing"); - self.writer - .decorate(load_id, spirv::Decoration::NonUniform, &[]); - } load_id } ref other => { @@ -316,9 +321,15 @@ impl<'w> BlockContext<'w> { crate::TypeInner::BindingArray { base: binding_type, .. } => { + let space = match self.ir_function.expressions[base] { + crate::Expression::GlobalVariable(gvar) => { + self.ir_module.global_variables[gvar].space + } + _ => unreachable!(), + }; let binding_array_false_pointer = LookupType::Local(LocalType::Pointer { base: binding_type, - class: spirv::StorageClass::UniformConstant, + class: helpers::map_storage_class(space), }); let result_id = match self.write_expression_pointer( @@ -356,6 +367,8 @@ impl<'w> BlockContext<'w> { self.writer.global_variables[handle.index()].access_id } crate::Expression::Constant(handle) => self.writer.constant_ids[handle.index()], + crate::Expression::ZeroValue(_) => self.writer.write_constant_null(result_type_id), + crate::Expression::Literal(literal) => self.writer.get_constant_scalar(literal), crate::Expression::Splat { size, value } => { let value_id = self.cached[value]; let components = [value_id; 4]; @@ -702,18 +715,14 @@ impl<'w> BlockContext<'w> { crate::TypeInner::Scalar { width, .. } => (None, width), ref other => unimplemented!("Unexpected saturate({:?})", other), }; - - let mut arg1_id = self - .writer - .get_constant_scalar(crate::ScalarValue::Float(0.0), width); - let mut arg2_id = self - .writer - .get_constant_scalar(crate::ScalarValue::Float(1.0), width); + let kind = crate::ScalarKind::Float; + let mut arg1_id = self.writer.get_constant_scalar_with(0, kind, width)?; + let mut arg2_id = self.writer.get_constant_scalar_with(1, kind, width)?; if let Some(size) = maybe_size { let ty = LocalType::Value { vector_size: Some(size), - kind: crate::ScalarKind::Float, + kind, width, pointer_space: None, } @@ -875,12 +884,13 @@ impl<'w> BlockContext<'w> { arg0_id, )), Mf::CountTrailingZeros => { - let uint = crate::ScalarValue::Uint(32); + let kind = crate::ScalarKind::Uint; + let uint_id = match *arg_ty { crate::TypeInner::Vector { size, width, .. } => { let ty = LocalType::Value { vector_size: Some(size), - kind: crate::ScalarKind::Uint, + kind, width, pointer_space: None, } @@ -889,13 +899,13 @@ impl<'w> BlockContext<'w> { self.temp_list.clear(); self.temp_list.resize( size as _, - self.writer.get_constant_scalar(uint, width), + self.writer.get_constant_scalar_with(32, kind, width)?, ); self.writer.get_constant_composite(ty, &self.temp_list) } crate::TypeInner::Scalar { width, .. } => { - self.writer.get_constant_scalar(uint, width) + self.writer.get_constant_scalar_with(32, kind, width)? } _ => unreachable!(), }; @@ -918,21 +928,23 @@ impl<'w> BlockContext<'w> { )) } Mf::CountLeadingZeros => { - let int = crate::ScalarValue::Sint(31); + let kind = crate::ScalarKind::Sint; let (int_type_id, int_id) = match *arg_ty { crate::TypeInner::Vector { size, width, .. } => { let ty = LocalType::Value { vector_size: Some(size), - kind: crate::ScalarKind::Sint, + kind, width, pointer_space: None, } .into(); self.temp_list.clear(); - self.temp_list - .resize(size as _, self.writer.get_constant_scalar(int, width)); + self.temp_list.resize( + size as _, + self.writer.get_constant_scalar_with(31, kind, width)?, + ); ( self.get_type_id(ty), @@ -942,11 +954,11 @@ impl<'w> BlockContext<'w> { crate::TypeInner::Scalar { width, .. } => ( self.get_type_id(LookupType::Local(LocalType::Value { vector_size: None, - kind: crate::ScalarKind::Sint, + kind, width, pointer_space: None, })), - self.writer.get_constant_scalar(int, width), + self.writer.get_constant_scalar_with(31, kind, width)?, ), _ => unreachable!(), }; @@ -1086,6 +1098,7 @@ impl<'w> BlockContext<'w> { crate::Expression::FunctionArgument(index) => self.function.parameter_id(index), crate::Expression::CallResult(_) | crate::Expression::AtomicResult { .. } + | crate::Expression::WorkGroupUniformLoadResult { .. } | crate::Expression::RayQueryProceedResult => self.cached[expr_handle], crate::Expression::As { expr, @@ -1131,15 +1144,14 @@ impl<'w> BlockContext<'w> { (_, _, None) => Cast::Unary(spirv::Op::Bitcast), // casting to a bool - generate `OpXxxNotEqual` (_, Sk::Bool, Some(_)) => { - let (op, value) = match src_kind { - Sk::Sint => (spirv::Op::INotEqual, crate::ScalarValue::Sint(0)), - Sk::Uint => (spirv::Op::INotEqual, crate::ScalarValue::Uint(0)), - Sk::Float => { - (spirv::Op::FUnordNotEqual, crate::ScalarValue::Float(0.0)) - } + let op = match src_kind { + Sk::Sint | Sk::Uint => spirv::Op::INotEqual, + Sk::Float => spirv::Op::FUnordNotEqual, Sk::Bool => unreachable!(), }; - let zero_scalar_id = self.writer.get_constant_scalar(value, src_width); + let zero_scalar_id = self + .writer + .get_constant_scalar_with(0, src_kind, src_width)?; let zero_id = match src_size { Some(size) => { let ty = LocalType::Value { @@ -1162,21 +1174,10 @@ impl<'w> BlockContext<'w> { } // casting from a bool - generate `OpSelect` (Sk::Bool, _, Some(dst_width)) => { - let (val0, val1) = match kind { - Sk::Sint => { - (crate::ScalarValue::Sint(0), crate::ScalarValue::Sint(1)) - } - Sk::Uint => { - (crate::ScalarValue::Uint(0), crate::ScalarValue::Uint(1)) - } - Sk::Float => ( - crate::ScalarValue::Float(0.0), - crate::ScalarValue::Float(1.0), - ), - Sk::Bool => unreachable!(), - }; - let scalar0_id = self.writer.get_constant_scalar(val0, dst_width); - let scalar1_id = self.writer.get_constant_scalar(val1, dst_width); + let zero_scalar_id = + self.writer.get_constant_scalar_with(0, kind, dst_width)?; + let one_scalar_id = + self.writer.get_constant_scalar_with(1, kind, dst_width)?; let (accept_id, reject_id) = match src_size { Some(size) => { let ty = LocalType::Value { @@ -1188,19 +1189,19 @@ impl<'w> BlockContext<'w> { .into(); self.temp_list.clear(); - self.temp_list.resize(size as _, scalar0_id); + self.temp_list.resize(size as _, zero_scalar_id); let vec0_id = self.writer.get_constant_composite(ty, &self.temp_list); - self.temp_list.fill(scalar1_id); + self.temp_list.fill(one_scalar_id); let vec1_id = self.writer.get_constant_composite(ty, &self.temp_list); (vec1_id, vec0_id) } - None => (scalar1_id, scalar0_id), + None => (one_scalar_id, zero_scalar_id), }; Cast::Ternary(spirv::Op::Select, accept_id, reject_id) @@ -1403,8 +1404,8 @@ impl<'w> BlockContext<'w> { /// Emit any needed bounds-checking expressions to `block`. /// /// Some cases we need to generate a different return type than what the IR gives us. - /// This is because pointers to binding arrays don't exist in the IR, but we need to - /// create them to create an access chain in SPIRV. + /// This is because pointers to binding arrays of handles (such as images or samplers) + /// don't exist in the IR, but we need to create them to create an access chain in SPIRV. /// /// On success, the return value is an [`ExpressionPointer`] value; see the /// documentation for that type. @@ -1434,17 +1435,31 @@ impl<'w> BlockContext<'w> { // but we expect these checks to almost always succeed, and keeping branches to a // minimum is essential. let mut accumulated_checks = None; + // Is true if we are accessing into a binding array of buffers with a non-uniform index. + let mut is_non_uniform_binding_array = false; self.temp_list.clear(); let root_id = loop { expr_handle = match self.ir_function.expressions[expr_handle] { crate::Expression::Access { base, index } => { + if let crate::Expression::GlobalVariable(var_handle) = + self.ir_function.expressions[base] + { + let gvar = &self.ir_module.global_variables[var_handle]; + if let crate::TypeInner::BindingArray { .. } = + self.ir_module.types[gvar.ty].inner + { + is_non_uniform_binding_array |= + self.fun_info[index].uniformity.non_uniform_result.is_some(); + } + } + let index_id = match self.write_bounds_check(base, index, block)? { BoundsCheckResult::KnownInBounds(known_index) => { // Even if the index is known, `OpAccessIndex` // requires expression operands, not literals. - let scalar = crate::ScalarValue::Uint(known_index as u64); - self.writer.get_constant_scalar(scalar, 4) + let scalar = crate::Literal::U32(known_index); + self.writer.get_constant_scalar(scalar) } BoundsCheckResult::Computed(computed_index_id) => computed_index_id, BoundsCheckResult::Conditional(comparison_id) => { @@ -1471,7 +1486,6 @@ impl<'w> BlockContext<'w> { } }; self.temp_list.push(index_id); - base } crate::Expression::AccessIndex { base, index } => { @@ -1494,10 +1508,13 @@ impl<'w> BlockContext<'w> { } }; - let pointer = if self.temp_list.is_empty() { - ExpressionPointer::Ready { - pointer_id: root_id, - } + let (pointer_id, expr_pointer) = if self.temp_list.is_empty() { + ( + root_id, + ExpressionPointer::Ready { + pointer_id: root_id, + }, + ) } else { self.temp_list.reverse(); let pointer_id = self.gen_id(); @@ -1508,16 +1525,21 @@ impl<'w> BlockContext<'w> { // caller to generate the branch, the access, the load or store, and // the zero value (for loads). Otherwise, we can emit the access // ourselves, and just hand them the id of the pointer. - match accumulated_checks { + let expr_pointer = match accumulated_checks { Some(condition) => ExpressionPointer::Conditional { condition, access }, None => { block.body.push(access); ExpressionPointer::Ready { pointer_id } } - } + }; + (pointer_id, expr_pointer) }; + if is_non_uniform_binding_array { + self.writer + .decorate_non_uniform_binding_array_access(pointer_id)?; + } - Ok(pointer) + Ok(expr_pointer) } /// Build the instructions for matrix - matrix column operations @@ -2197,6 +2219,46 @@ impl<'w> BlockContext<'w> { block.body.push(instruction); } + crate::Statement::WorkGroupUniformLoad { pointer, result } => { + self.writer + .write_barrier(crate::Barrier::WORK_GROUP, &mut block); + let result_type_id = self.get_expression_type_id(&self.fun_info[result].ty); + // Embed the body of + match self.write_expression_pointer(pointer, &mut block, None)? { + ExpressionPointer::Ready { pointer_id } => { + let id = self.gen_id(); + block.body.push(Instruction::load( + result_type_id, + id, + pointer_id, + None, + )); + self.cached[result] = id; + } + ExpressionPointer::Conditional { condition, access } => { + self.cached[result] = self.write_conditional_indexed_load( + result_type_id, + condition, + &mut block, + move |id_gen, block| { + // The in-bounds path. Perform the access and the load. + let pointer_id = access.result_id.unwrap(); + let value_id = id_gen.next(); + block.body.push(access); + block.body.push(Instruction::load( + result_type_id, + value_id, + pointer_id, + None, + )); + value_id + }, + ) + } + } + self.writer + .write_barrier(crate::Barrier::WORK_GROUP, &mut block); + } crate::Statement::RayQuery { query, ref fun } => { self.write_ray_query_function(query, fun, &mut block); } diff --git a/third_party/rust/naga/src/back/spv/helpers.rs b/third_party/rust/naga/src/back/spv/helpers.rs index 1ef0db1912b1..5b6226db8517 100644 --- a/third_party/rust/naga/src/back/spv/helpers.rs +++ b/third_party/rust/naga/src/back/spv/helpers.rs @@ -102,7 +102,8 @@ pub fn global_needs_wrapper(ir_module: &crate::Module, var: &crate::GlobalVariab }, None => false, }, - // if it's not a structure, let's wrap it to be able to put "Block" + crate::TypeInner::BindingArray { .. } => false, + // if it's not a structure or a binding array, let's wrap it to be able to put "Block" _ => true, } } diff --git a/third_party/rust/naga/src/back/spv/image.rs b/third_party/rust/naga/src/back/spv/image.rs index 27f352050289..81c9de3755e6 100644 --- a/third_party/rust/naga/src/back/spv/image.rs +++ b/third_party/rust/naga/src/back/spv/image.rs @@ -901,9 +901,7 @@ impl<'w> BlockContext<'w> { depth_id, ); - let zero_id = self - .writer - .get_constant_scalar(crate::ScalarValue::Float(0.0), 4); + let zero_id = self.writer.get_constant_scalar(crate::Literal::F32(0.0)); mask |= spirv::ImageOperands::LOD; inst.add_operand(mask.bits()); diff --git a/third_party/rust/naga/src/back/spv/instructions.rs b/third_party/rust/naga/src/back/spv/instructions.rs index 96d0278285b6..31ed6e231d86 100644 --- a/third_party/rust/naga/src/back/spv/instructions.rs +++ b/third_party/rust/naga/src/back/spv/instructions.rs @@ -343,6 +343,14 @@ impl super::Instruction { instruction } + pub(super) fn constant_32bit(result_type_id: Word, id: Word, value: Word) -> Self { + Self::constant(result_type_id, id, &[value]) + } + + pub(super) fn constant_64bit(result_type_id: Word, id: Word, low: Word, high: Word) -> Self { + Self::constant(result_type_id, id, &[low, high]) + } + pub(super) fn constant(result_type_id: Word, id: Word, values: &[Word]) -> Self { let mut instruction = Self::new(Op::Constant); instruction.set_type(result_type_id); diff --git a/third_party/rust/naga/src/back/spv/mod.rs b/third_party/rust/naga/src/back/spv/mod.rs index 9b084911b137..1e10d2e9c616 100644 --- a/third_party/rust/naga/src/back/spv/mod.rs +++ b/third_party/rust/naga/src/back/spv/mod.rs @@ -176,6 +176,7 @@ struct LocalImageType { bitflags::bitflags! { /// Flags corresponding to the boolean(-ish) parameters to OpTypeImage. + #[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)] pub struct ImageTypeFlags: u8 { const DEPTH = 0x1; const ARRAYED = 0x2; @@ -288,13 +289,19 @@ enum LocalType { image_type_id: Word, }, Sampler, + /// Equivalent to a [`LocalType::Pointer`] whose `base` is a Naga IR [`BindingArray`]. SPIR-V + /// permits duplicated `OpTypePointer` ids, so it's fine to have two different [`LocalType`] + /// representations for pointer types. + /// + /// [`BindingArray`]: crate::TypeInner::BindingArray PointerToBindingArray { base: Handle, - size: u64, + size: u32, + space: crate::AddressSpace, }, BindingArray { base: Handle, - size: u64, + size: u32, }, AccelerationStructure, RayQuery, @@ -448,10 +455,7 @@ impl recyclable::Recyclable for CachedExpressions { #[derive(Eq, Hash, PartialEq)] enum CachedConstant { - Scalar { - value: crate::ScalarValue, - width: crate::Bytes, - }, + Literal(crate::Literal), Composite { ty: LookupType, constituent_ids: Vec, @@ -562,13 +566,12 @@ impl BlockContext<'_> { } fn get_index_constant(&mut self, index: Word) -> Word { - self.writer - .get_constant_scalar(crate::ScalarValue::Uint(index as _), 4) + self.writer.get_constant_scalar(crate::Literal::U32(index)) } fn get_scope_constant(&mut self, scope: Word) -> Word { self.writer - .get_constant_scalar(crate::ScalarValue::Sint(scope as _), 4) + .get_constant_scalar(crate::Literal::I32(scope as _)) } } @@ -622,6 +625,7 @@ pub struct Writer { } bitflags::bitflags! { + #[derive(Clone, Copy, Debug, Eq, PartialEq)] pub struct WriterFlags: u32 { /// Include debug labels for everything. const DEBUG = 0x1; diff --git a/third_party/rust/naga/src/back/spv/ray.rs b/third_party/rust/naga/src/back/spv/ray.rs index 79eb2ff9711f..0b53b9cc5265 100644 --- a/third_party/rust/naga/src/back/spv/ray.rs +++ b/third_party/rust/naga/src/back/spv/ray.rs @@ -117,12 +117,9 @@ impl<'w> BlockContext<'w> { ) -> spirv::Word { let width = 4; let query_id = self.cached[query]; - let intersection_id = self.writer.get_constant_scalar( - crate::ScalarValue::Uint( - spirv::RayQueryIntersection::RayQueryCommittedIntersectionKHR as _, - ), - width, - ); + let intersection_id = self.writer.get_constant_scalar(crate::Literal::U32( + spirv::RayQueryIntersection::RayQueryCommittedIntersectionKHR as _, + )); let flag_type_id = self.get_type_id(LookupType::Local(LocalType::Value { vector_size: None, diff --git a/third_party/rust/naga/src/back/spv/writer.rs b/third_party/rust/naga/src/back/spv/writer.rs index ba235e6d03d0..64e27fae13a2 100644 --- a/third_party/rust/naga/src/back/spv/writer.rs +++ b/third_party/rust/naga/src/back/spv/writer.rs @@ -540,8 +540,7 @@ impl Writer { ); iface.varying_ids.push(varying_id); - let default_value_id = - self.get_constant_scalar(crate::ScalarValue::Float(1.0), 4); + let default_value_id = self.get_constant_scalar(crate::Literal::F32(1.0)); prelude .body .push(Instruction::store(varying_id, default_value_id, None)); @@ -937,13 +936,14 @@ impl Writer { } LocalType::BindingArray { base, size } => { let inner_ty = self.get_type_id(LookupType::Handle(base)); - let scalar_id = self.get_constant_scalar(crate::ScalarValue::Uint(size), 4); + let scalar_id = self.get_constant_scalar(crate::Literal::U32(size)); Instruction::type_array(id, inner_ty, scalar_id) } - LocalType::PointerToBindingArray { base, size } => { + LocalType::PointerToBindingArray { base, size, space } => { let inner_ty = self.get_type_id(LookupType::Local(LocalType::BindingArray { base, size })); - Instruction::type_pointer(id, spirv::StorageClass::UniformConstant, inner_ty) + let class = map_storage_class(space); + Instruction::type_pointer(id, class, inner_ty) } LocalType::AccelerationStructure => Instruction::type_acceleration_structure(id), LocalType::RayQuery => Instruction::type_ray_query(id), @@ -990,8 +990,8 @@ impl Writer { let type_id = self.get_type_id(LookupType::Handle(base)); match size { - crate::ArraySize::Constant(const_handle) => { - let length_id = self.constant_ids[const_handle.index()]; + crate::ArraySize::Constant(length) => { + let length_id = self.get_index_constant(length.get()); Instruction::type_array(id, type_id, length_id) } crate::ArraySize::Dynamic => Instruction::type_runtime_array(id, type_id), @@ -1000,8 +1000,8 @@ impl Writer { crate::TypeInner::BindingArray { base, size } => { let type_id = self.get_type_id(LookupType::Handle(base)); match size { - crate::ArraySize::Constant(const_handle) => { - let length_id = self.constant_ids[const_handle.index()]; + crate::ArraySize::Constant(length) => { + let length_id = self.get_index_constant(length.get()); Instruction::type_array(id, type_id, length_id) } crate::ArraySize::Dynamic => Instruction::type_runtime_array(id, type_id), @@ -1107,20 +1107,29 @@ impl Writer { } pub(super) fn get_index_constant(&mut self, index: Word) -> Word { - self.get_constant_scalar(crate::ScalarValue::Uint(index as _), 4) + self.get_constant_scalar(crate::Literal::U32(index)) } - pub(super) fn get_constant_scalar( + pub(super) fn get_constant_scalar_with( &mut self, - value: crate::ScalarValue, + value: u8, + kind: crate::ScalarKind, width: crate::Bytes, - ) -> Word { - let scalar = CachedConstant::Scalar { value, width }; + ) -> Result { + Ok( + self.get_constant_scalar(crate::Literal::new(value, kind, width).ok_or( + Error::Validation("Unexpected kind and/or width for Literal"), + )?), + ) + } + + pub(super) fn get_constant_scalar(&mut self, value: crate::Literal) -> Word { + let scalar = CachedConstant::Literal(value); if let Some(&id) = self.cached_constants.get(&scalar) { return id; } let id = self.id_gen.next(); - self.write_constant_scalar(id, &value, width, None); + self.write_constant_scalar(id, &value, None); self.cached_constants.insert(scalar, id); id } @@ -1128,8 +1137,7 @@ impl Writer { fn write_constant_scalar( &mut self, id: Word, - value: &crate::ScalarValue, - width: crate::Bytes, + value: &crate::Literal, debug_name: Option<&String>, ) { if self.flags.contains(WriterFlags::DEBUG) { @@ -1140,56 +1148,19 @@ impl Writer { let type_id = self.get_type_id(LookupType::Local(LocalType::Value { vector_size: None, kind: value.scalar_kind(), - width, + width: value.width(), pointer_space: None, })); - let (solo, pair); let instruction = match *value { - crate::ScalarValue::Sint(val) => { - let words = match width { - 4 => { - solo = [val as u32]; - &solo[..] - } - 8 => { - pair = [val as u32, (val >> 32) as u32]; - &pair - } - _ => unreachable!(), - }; - Instruction::constant(type_id, id, words) + crate::Literal::F64(value) => { + let bits = value.to_bits(); + Instruction::constant_64bit(type_id, id, bits as u32, (bits >> 32) as u32) } - crate::ScalarValue::Uint(val) => { - let words = match width { - 4 => { - solo = [val as u32]; - &solo[..] - } - 8 => { - pair = [val as u32, (val >> 32) as u32]; - &pair - } - _ => unreachable!(), - }; - Instruction::constant(type_id, id, words) - } - crate::ScalarValue::Float(val) => { - let words = match width { - 4 => { - solo = [(val as f32).to_bits()]; - &solo[..] - } - 8 => { - let bits = f64::to_bits(val); - pair = [bits as u32, (bits >> 32) as u32]; - &pair - } - _ => unreachable!(), - }; - Instruction::constant(type_id, id, words) - } - crate::ScalarValue::Bool(true) => Instruction::constant_true(type_id, id), - crate::ScalarValue::Bool(false) => Instruction::constant_false(type_id, id), + crate::Literal::F32(value) => Instruction::constant_32bit(type_id, id, value.to_bits()), + crate::Literal::U32(value) => Instruction::constant_32bit(type_id, id, value), + crate::Literal::I32(value) => Instruction::constant_32bit(type_id, id, value as u32), + crate::Literal::Bool(true) => Instruction::constant_true(type_id, id), + crate::Literal::Bool(false) => Instruction::constant_false(type_id, id), }; instruction.to_words(&mut self.logical_layout.declarations); @@ -1579,6 +1550,9 @@ impl Writer { } } + // Note: we should be able to substitute `binding_array`, + // but there is still code that tries to register the pre-substituted type, + // and it is failing on 0. let mut substitute_inner_type_lookup = None; if let Some(ref res_binding) = global_variable.binding { self.decorate(id, Decoration::DescriptorSet, &[res_binding.group]); @@ -1594,7 +1568,8 @@ impl Writer { substitute_inner_type_lookup = Some(LookupType::Local(LocalType::PointerToBindingArray { base, - size: remapped_binding_array_size as u64, + size: remapped_binding_array_size, + space: global_variable.space, })) } } else { @@ -1635,7 +1610,13 @@ impl Writer { // a runtime-sized array. In this case, we need to decorate it with // Block. if let crate::AddressSpace::Storage { .. } = global_variable.space { - self.decorate(inner_type_id, Decoration::Block, &[]); + let decorated_id = match ir_module.types[global_variable.ty].inner { + crate::TypeInner::BindingArray { base, .. } => { + self.get_type_id(LookupType::Handle(base)) + } + _ => inner_type_id, + }; + self.decorate(decorated_id, Decoration::Block, &[]); } if substitute_inner_type_lookup.is_some() { inner_type_id @@ -1801,13 +1782,16 @@ impl Writer { match constant.inner { crate::ConstantInner::Composite { .. } => continue, crate::ConstantInner::Scalar { width, ref value } => { + let literal = crate::Literal::from_scalar(*value, width).ok_or( + Error::Validation("Unexpected kind and/or width for Literal"), + )?; self.constant_ids[handle.index()] = match constant.name { Some(ref name) => { let id = self.id_gen.next(); - self.write_constant_scalar(id, value, width, Some(name)); + self.write_constant_scalar(id, &literal, Some(name)); id } - None => self.get_constant_scalar(*value, width), + None => self.get_constant_scalar(literal), }; } } @@ -1955,6 +1939,13 @@ impl Writer { pub const fn get_capabilities_used(&self) -> &crate::FastHashSet { &self.capabilities_used } + + pub fn decorate_non_uniform_binding_array_access(&mut self, id: Word) -> Result<(), Error> { + self.require_any("NonUniformEXT", &[spirv::Capability::ShaderNonUniform])?; + self.use_extension("SPV_EXT_descriptor_indexing"); + self.decorate(id, spirv::Decoration::NonUniform, &[]); + Ok(()) + } } #[test] diff --git a/third_party/rust/naga/src/back/wgsl/writer.rs b/third_party/rust/naga/src/back/wgsl/writer.rs index 92086c94a8d7..225df0b898aa 100644 --- a/third_party/rust/naga/src/back/wgsl/writer.rs +++ b/third_party/rust/naga/src/back/wgsl/writer.rs @@ -53,6 +53,7 @@ enum Indirection { bitflags::bitflags! { #[cfg_attr(feature = "serialize", derive(serde::Serialize))] #[cfg_attr(feature = "deserialize", derive(serde::Deserialize))] + #[derive(Clone, Copy, Debug, Eq, PartialEq)] pub struct WriterFlags: u32 { /// Always annotate the type information instead of inferring. const EXPLICIT_TYPES = 0x1; @@ -86,6 +87,8 @@ impl Writer { module, crate::keywords::wgsl::RESERVED, // an identifier must not start with two underscore + &[], + &[], &["__"], &mut self.names, ); @@ -244,10 +247,7 @@ impl Writer { for (index, arg) in func.arguments.iter().enumerate() { // Write argument attribute if a binding is present if let Some(ref binding) = arg.binding { - self.write_attributes(&map_binding_to_attribute( - binding, - module.types[arg.ty].inner.scalar_kind(), - ))?; + self.write_attributes(&map_binding_to_attribute(binding))?; } // Write argument name let argument_name = match func_ctx.ty { @@ -274,10 +274,7 @@ impl Writer { if let Some(ref result) = func.result { write!(self.out, " -> ")?; if let Some(ref binding) = result.binding { - self.write_attributes(&map_binding_to_attribute( - binding, - module.types[result.ty].inner.scalar_kind(), - ))?; + self.write_attributes(&map_binding_to_attribute(binding))?; } self.write_type(module, result.ty)?; } @@ -401,10 +398,7 @@ impl Writer { // The indentation is only for readability write!(self.out, "{}", back::INDENT)?; if let Some(ref binding) = member.binding { - self.write_attributes(&map_binding_to_attribute( - binding, - module.types[member.ty].inner.scalar_kind(), - ))?; + self.write_attributes(&map_binding_to_attribute(binding))?; } // Write struct member name and type let member_name = &self.names[&NameKey::StructMember(handle, index as u32)]; @@ -512,10 +506,9 @@ impl Writer { // array -- Dynamic array write!(self.out, "array<")?; match size { - crate::ArraySize::Constant(handle) => { + crate::ArraySize::Constant(len) => { self.write_type(module, base)?; - write!(self.out, ",")?; - self.write_constant(module, handle)?; + write!(self.out, ", {len}")?; } crate::ArraySize::Dynamic => { self.write_type(module, base)?; @@ -527,10 +520,9 @@ impl Writer { // More info https://github.com/gpuweb/gpuweb/issues/2105 write!(self.out, "binding_array<")?; match size { - crate::ArraySize::Constant(handle) => { + crate::ArraySize::Constant(len) => { self.write_type(module, base)?; - write!(self.out, ",")?; - self.write_constant(module, handle)?; + write!(self.out, ", {len}")?; } crate::ArraySize::Dynamic => { self.write_type(module, base)?; @@ -642,11 +634,6 @@ impl Writer { // Otherwise, we could accidentally write variable name instead of full expression. // Also, we use sanitized names! It defense backend from generating variable with name from reserved keywords. Some(self.namer.call(name)) - } else if info.ref_count == 0 { - write!(self.out, "{level}_ = ")?; - self.write_expr(module, handle, func_ctx)?; - writeln!(self.out, ";")?; - continue; } else { let expr = &func_ctx.expressions[handle]; let min_ref_count = expr.bake_ref_count(); @@ -791,6 +778,16 @@ impl Writer { self.write_expr(module, value, func_ctx)?; writeln!(self.out, ");")? } + Statement::WorkGroupUniformLoad { pointer, result } => { + write!(self.out, "{level}")?; + // TODO: Obey named expressions here. + let res_name = format!("{}{}", back::BAKE_PREFIX, result.index()); + self.start_named_expr(module, result, func_ctx, &res_name)?; + self.named_expressions.insert(result, res_name); + write!(self.out, "workgroupUniformLoad(")?; + self.write_expr(module, pointer, func_ctx)?; + writeln!(self.out, ");")?; + } Statement::ImageStore { image, coordinate, @@ -1112,7 +1109,24 @@ impl Writer { // `postfix_expression` forms for member/component access and // subscripting. match *expression { + Expression::Literal(literal) => { + match literal { + // Floats are written using `Debug` instead of `Display` because it always appends the + // decimal part even it's zero + crate::Literal::F64(_) => { + return Err(Error::Custom("unsupported f64 literal".to_string())); + } + crate::Literal::F32(value) => write!(self.out, "{:?}", value)?, + crate::Literal::U32(value) => write!(self.out, "{}u", value)?, + crate::Literal::I32(value) => write!(self.out, "{}", value)?, + crate::Literal::Bool(value) => write!(self.out, "{}", value)?, + } + } Expression::Constant(constant) => self.write_constant(module, constant)?, + Expression::ZeroValue(ty) => { + self.write_type(module, ty)?; + write!(self.out, "()")?; + } Expression::Compose { ty, ref components } => { self.write_type(module, ty)?; write!(self.out, "(")?; @@ -1627,7 +1641,8 @@ impl Writer { // Nothing to do here, since call expression already cached Expression::CallResult(_) | Expression::AtomicResult { .. } - | Expression::RayQueryProceedResult => {} + | Expression::RayQueryProceedResult + | Expression::WorkGroupUniformLoadResult { .. } => {} } Ok(()) @@ -1941,10 +1956,7 @@ const fn address_space_str( ) } -fn map_binding_to_attribute( - binding: &crate::Binding, - scalar_kind: Option, -) -> Vec { +fn map_binding_to_attribute(binding: &crate::Binding) -> Vec { match *binding { crate::Binding::BuiltIn(built_in) => { if let crate::BuiltIn::Position { invariant: true } = built_in { @@ -1957,12 +1969,9 @@ fn map_binding_to_attribute( location, interpolation, sampling, - } => match scalar_kind { - Some(crate::ScalarKind::Float) => vec![ - Attribute::Location(location), - Attribute::Interpolate(interpolation, sampling), - ], - _ => vec![Attribute::Location(location)], - }, + } => vec![ + Attribute::Location(location), + Attribute::Interpolate(interpolation, sampling), + ], } } diff --git a/third_party/rust/naga/src/front/glsl/ast.rs b/third_party/rust/naga/src/front/glsl/ast.rs index cbb21f6de947..138a2276760e 100644 --- a/third_party/rust/naga/src/front/glsl/ast.rs +++ b/third_party/rust/naga/src/front/glsl/ast.rs @@ -3,7 +3,7 @@ use std::{borrow::Cow, fmt}; use super::{builtins::MacroCall, context::ExprPos, Span}; use crate::{ AddressSpace, BinaryOperator, Binding, Constant, Expression, Function, GlobalVariable, Handle, - Interpolation, Sampling, StorageAccess, Type, UnaryOperator, + Interpolation, Literal, Sampling, StorageAccess, Type, UnaryOperator, }; #[derive(Debug, Clone, Copy)] @@ -67,6 +67,7 @@ bitflags::bitflags! { /// builtins overloads can't be generated unless explicitly used, since they might cause /// unneeded capabilities to be requested #[derive(Default)] + #[derive(Clone, Copy, Debug, Eq, PartialEq)] pub struct BuiltinVariations: u32 { /// Request the standard overloads const STANDARD = 1 << 0; @@ -121,7 +122,7 @@ pub enum HirExprKind { base: Handle, field: String, }, - Constant(Handle), + Literal(Literal), Binary { left: Handle, op: BinaryOperator, diff --git a/third_party/rust/naga/src/front/glsl/constants.rs b/third_party/rust/naga/src/front/glsl/constants.rs index 045a9c6ffb51..35569926ce62 100644 --- a/third_party/rust/naga/src/front/glsl/constants.rs +++ b/third_party/rust/naga/src/front/glsl/constants.rs @@ -71,6 +71,8 @@ impl<'a> ConstantSolver<'a> { let span = self.expressions.get_span(expr); match self.expressions[expr] { Expression::Constant(constant) => Ok(constant), + Expression::ZeroValue(ty) => self.register_zero_constant(ty, span), + Expression::Literal(literal) => Ok(self.register_literal(literal, span)), Expression::AccessIndex { base, index } => self.access(base, index as usize), Expression::Access { base, index } => { let index = self.solve(index)?; @@ -275,7 +277,13 @@ impl<'a> ConstantSolver<'a> { } ConstantInner::Composite { ty, .. } => match self.types[ty].inner { TypeInner::Array { size, .. } => match size { - crate::ArraySize::Constant(constant) => Ok(constant), + crate::ArraySize::Constant(size) => Ok(self.register_constant( + ConstantInner::Scalar { + width: 4, + value: ScalarValue::Uint(size.get() as u64), + }, + span, + )), crate::ArraySize::Dynamic => { Err(ConstantSolvingError::ArrayLengthDynamic) } @@ -291,6 +299,7 @@ impl<'a> ConstantSolver<'a> { Expression::Derivative { .. } => Err(ConstantSolvingError::Derivative), Expression::Relational { .. } => Err(ConstantSolvingError::Relational), Expression::CallResult { .. } => Err(ConstantSolvingError::Call), + Expression::WorkGroupUniformLoadResult { .. } => unreachable!(), Expression::AtomicResult { .. } => Err(ConstantSolvingError::Atomic), Expression::FunctionArgument(_) => Err(ConstantSolvingError::FunctionArg), Expression::GlobalVariable(_) => Err(ConstantSolvingError::GlobalVariable), @@ -547,6 +556,101 @@ impl<'a> ConstantSolver<'a> { Ok(self.register_constant(inner, span)) } + fn register_zero_constant( + &mut self, + ty: Handle, + span: crate::Span, + ) -> Result, ConstantSolvingError> { + let inner = match self.types[ty].inner { + TypeInner::Scalar { kind, width } => { + let value = match kind { + ScalarKind::Sint => ScalarValue::Sint(0), + ScalarKind::Uint => ScalarValue::Uint(0), + ScalarKind::Float => ScalarValue::Float(1.0), + ScalarKind::Bool => ScalarValue::Bool(false), + }; + ConstantInner::Scalar { width, value } + } + TypeInner::Vector { size, kind, width } => { + let element_type = self.types.insert( + Type { + name: None, + inner: TypeInner::Scalar { kind, width }, + }, + span, + ); + let element = self.register_zero_constant(element_type, span)?; + let components = std::iter::repeat(element) + .take(size as u8 as usize) + .collect(); + ConstantInner::Composite { ty, components } + } + TypeInner::Matrix { + columns, + rows, + width, + } => { + let column_type = self.types.insert( + Type { + name: None, + inner: TypeInner::Vector { + size: rows, + kind: ScalarKind::Float, + width, + }, + }, + span, + ); + let column = self.register_zero_constant(column_type, span)?; + let components = std::iter::repeat(column) + .take(columns as u8 as usize) + .collect(); + ConstantInner::Composite { ty, components } + } + TypeInner::Array { base, size, .. } => { + let length = match size { + crate::ArraySize::Constant(size) => size.get(), + crate::ArraySize::Dynamic => { + return Err(ConstantSolvingError::ArrayLengthDynamic) + } + }; + let element = self.register_zero_constant(base, span)?; + let components = std::iter::repeat(element).take(length as usize).collect(); + ConstantInner::Composite { ty, components } + } + TypeInner::Struct { ref members, .. } => { + // Make a copy of the member types, for the borrow checker. + let types: Vec> = members.iter().map(|member| member.ty).collect(); + let mut components = vec![]; + for member_ty in types { + let value = self.register_zero_constant(member_ty, span)?; + components.push(value); + } + ConstantInner::Composite { ty, components } + } + ref inner => { + return Err(ConstantSolvingError::NotImplemented(format!( + "zero-value construction for types: {:?}", + inner + ))); + } + }; + + Ok(self.register_constant(inner, span)) + } + + fn register_literal(&mut self, literal: crate::Literal, span: crate::Span) -> Handle { + let (width, value) = match literal { + crate::Literal::F64(n) => (8, ScalarValue::Float(n)), + crate::Literal::F32(n) => (4, ScalarValue::Float(n as f64)), + crate::Literal::U32(n) => (4, ScalarValue::Uint(n as u64)), + crate::Literal::I32(n) => (4, ScalarValue::Sint(n as i64)), + crate::Literal::Bool(b) => (1, ScalarValue::Bool(b)), + }; + + self.register_constant(ConstantInner::Scalar { width, value }, span) + } + fn register_constant(&mut self, inner: ConstantInner, span: crate::Span) -> Handle { self.constants.fetch_or_append( Constant { diff --git a/third_party/rust/naga/src/front/glsl/context.rs b/third_party/rust/naga/src/front/glsl/context.rs index 6df4850efae5..93b7e03b6472 100644 --- a/third_party/rust/naga/src/front/glsl/context.rs +++ b/third_party/rust/naga/src/front/glsl/context.rs @@ -9,9 +9,9 @@ use super::{ }; use crate::{ front::{Emitter, Typifier}, - AddressSpace, Arena, BinaryOperator, Block, Constant, Expression, FastHashMap, - FunctionArgument, Handle, LocalVariable, RelationalFunction, ScalarKind, ScalarValue, Span, - Statement, Type, TypeInner, VectorSize, + AddressSpace, Arena, BinaryOperator, Block, Expression, FastHashMap, FunctionArgument, Handle, + Literal, LocalVariable, RelationalFunction, ScalarKind, ScalarValue, Span, Statement, Type, + TypeInner, VectorSize, }; use std::{convert::TryFrom, ops::Index}; @@ -560,8 +560,8 @@ impl Context { frontend.field_selection(self, pos, body, base, field, meta)? } - HirExprKind::Constant(constant) if pos != ExprPos::Lhs => { - self.add_expression(Expression::Constant(constant), meta, body) + HirExprKind::Literal(literal) if pos != ExprPos::Lhs => { + self.add_expression(Expression::Literal(literal), meta, body) } HirExprKind::Binary { left, op, right } if pos != ExprPos::Lhs => { let (mut left, left_meta) = @@ -1253,24 +1253,14 @@ impl Context { self.add_expression(Expression::Load { pointer }, meta, body) }; - let make_constant_inner = |kind, width| { - let value = match kind { - ScalarKind::Sint => crate::ScalarValue::Sint(1), - ScalarKind::Uint => crate::ScalarValue::Uint(1), - ScalarKind::Float => crate::ScalarValue::Float(1.0), - ScalarKind::Bool => return None, - }; - - Some(crate::ConstantInner::Scalar { width, value }) - }; let res = match *frontend.resolve_type(self, left, meta)? { TypeInner::Scalar { kind, width } => { let ty = TypeInner::Scalar { kind, width }; - make_constant_inner(kind, width).map(|i| (ty, i, None, None)) + Literal::one(kind, width).map(|i| (ty, i, None, None)) } TypeInner::Vector { size, kind, width } => { let ty = TypeInner::Vector { size, kind, width }; - make_constant_inner(kind, width).map(|i| (ty, i, Some(size), None)) + Literal::one(kind, width).map(|i| (ty, i, Some(size), None)) } TypeInner::Matrix { columns, @@ -1282,12 +1272,12 @@ impl Context { rows, width, }; - make_constant_inner(ScalarKind::Float, width) + Literal::one(ScalarKind::Float, width) .map(|i| (ty, i, Some(rows), Some(columns))) } _ => None, }; - let (ty_inner, inner, rows, columns) = match res { + let (ty_inner, literal, rows, columns) = match res { Some(res) => res, None => { frontend.errors.push(Error { @@ -1300,15 +1290,7 @@ impl Context { } }; - let constant_1 = frontend.module.constants.append( - Constant { - name: None, - specialization: None, - inner, - }, - Default::default(), - ); - let mut right = self.add_expression(Expression::Constant(constant_1), meta, body); + let mut right = self.add_expression(Expression::Literal(literal), meta, body); // Glsl allows pre/postfixes operations on vectors and matrices, so if the // target is either of them change the right side of the addition to be splatted @@ -1377,8 +1359,11 @@ impl Context { size: crate::ArraySize::Constant(size), .. } => { - let mut array_length = - self.add_expression(Expression::Constant(size), meta, body); + let mut array_length = self.add_expression( + Expression::Literal(Literal::U32(size.get())), + meta, + body, + ); self.forced_conversion( frontend, &mut array_length, diff --git a/third_party/rust/naga/src/front/glsl/functions.rs b/third_party/rust/naga/src/front/glsl/functions.rs index 10c964b5e0bb..b7b4be8a61ac 100644 --- a/third_party/rust/naga/src/front/glsl/functions.rs +++ b/third_party/rust/naga/src/front/glsl/functions.rs @@ -1316,9 +1316,12 @@ impl Frontend { ), ) { match self.module.types[ty].inner { + // TODO: Better error reporting + // right now we just don't walk the array if the size isn't known at + // compile time and let validation catch it TypeInner::Array { base, - size: crate::ArraySize::Constant(constant), + size: crate::ArraySize::Constant(size), .. } => { let mut location = match binding { @@ -1326,14 +1329,6 @@ impl Frontend { crate::Binding::BuiltIn(_) => return, }; - // TODO: Better error reporting - // right now we just don't walk the array if the size isn't known at - // compile time and let validation catch it - let size = match self.module.constants[constant].to_array_length() { - Some(val) => val, - None => return f(name, pointer, ty, binding, expressions), - }; - let interpolation = self.module.types[base] .inner @@ -1343,7 +1338,7 @@ impl Frontend { _ => crate::Interpolation::Flat, }); - for index in 0..size { + for index in 0..size.get() { let member_pointer = expressions.append( Expression::AccessIndex { base: pointer, @@ -1501,7 +1496,7 @@ impl Frontend { offset: span, }); - span += self.module.types[ty].inner.size(&self.module.constants); + span += self.module.types[ty].inner.size(self.module.to_ctx()); let len = expressions.len(); let load = expressions.append(Expression::Load { pointer }, Default::default()); diff --git a/third_party/rust/naga/src/front/glsl/offset.rs b/third_party/rust/naga/src/front/glsl/offset.rs index d7950489cb84..21481372b5c6 100644 --- a/third_party/rust/naga/src/front/glsl/offset.rs +++ b/third_party/rust/naga/src/front/glsl/offset.rs @@ -16,7 +16,7 @@ use super::{ error::{Error, ErrorKind}, Span, }; -use crate::{proc::Alignment, Arena, Constant, Handle, Type, TypeInner, UniqueArena}; +use crate::{proc::Alignment, Handle, Type, TypeInner, UniqueArena}; /// Struct with information needed for defining a struct member. /// @@ -43,7 +43,6 @@ pub fn calculate_offset( meta: Span, layout: StructLayout, types: &mut UniqueArena, - constants: &Arena, errors: &mut Vec, ) -> TypeAlignSpan { // When using the std430 storage layout, shader storage blocks will be laid out in buffer storage @@ -68,7 +67,7 @@ pub fn calculate_offset( // to rules (1), (2), and (3), and rounded up to the base alignment of a vec4. // TODO: Matrices array TypeInner::Array { base, size, .. } => { - let info = calculate_offset(base, meta, layout, types, constants, errors); + let info = calculate_offset(base, meta, layout, types, errors); let name = types[ty].name.clone(); @@ -81,9 +80,7 @@ pub fn calculate_offset( }; let span = match size { - crate::ArraySize::Constant(s) => { - constants[s].to_array_length().unwrap_or(1) * stride - } + crate::ArraySize::Constant(size) => size.get() * stride, crate::ArraySize::Dynamic => stride, }; @@ -135,7 +132,7 @@ pub fn calculate_offset( let name = types[ty].name.clone(); for member in members.iter_mut() { - let info = calculate_offset(member.ty, meta, layout, types, constants, errors); + let info = calculate_offset(member.ty, meta, layout, types, errors); let member_alignment = info.align; span = member_alignment.round_up(span); diff --git a/third_party/rust/naga/src/front/glsl/parser/declarations.rs b/third_party/rust/naga/src/front/glsl/parser/declarations.rs index 9085469dda16..79bf1bc8cb0b 100644 --- a/third_party/rust/naga/src/front/glsl/parser/declarations.rs +++ b/third_party/rust/naga/src/front/glsl/parser/declarations.rs @@ -642,7 +642,6 @@ impl<'source> ParsingContext<'source> { meta, layout, &mut frontend.module.types, - &frontend.module.constants, &mut frontend.errors, ); diff --git a/third_party/rust/naga/src/front/glsl/parser/expressions.rs b/third_party/rust/naga/src/front/glsl/parser/expressions.rs index f09e58b6f6d9..7e47b2eea710 100644 --- a/third_party/rust/naga/src/front/glsl/parser/expressions.rs +++ b/third_party/rust/naga/src/front/glsl/parser/expressions.rs @@ -1,3 +1,5 @@ +use std::num::NonZeroU32; + use crate::{ front::glsl::{ ast::{FunctionCall, FunctionCallKind, HirExpr, HirExprKind}, @@ -7,8 +9,7 @@ use crate::{ token::{Token, TokenValue}, Error, Frontend, Result, Span, }, - ArraySize, BinaryOperator, Block, Constant, ConstantInner, Handle, ScalarValue, Type, - TypeInner, UnaryOperator, + ArraySize, BinaryOperator, Block, Handle, Literal, Type, TypeInner, UnaryOperator, }; impl<'source> ParsingContext<'source> { @@ -21,20 +22,30 @@ impl<'source> ParsingContext<'source> { ) -> Result> { let mut token = self.bump(frontend)?; - let (width, value) = match token.value { - TokenValue::IntConstant(int) => ( - (int.width / 8) as u8, + let literal = match token.value { + TokenValue::IntConstant(int) => { + if int.width != 32 { + frontend.errors.push(Error { + kind: ErrorKind::SemanticError("Unsupported non-32bit integer".into()), + meta: token.meta, + }); + } if int.signed { - ScalarValue::Sint(int.value as i64) + Literal::I32(int.value as i32) } else { - ScalarValue::Uint(int.value) - }, - ), - TokenValue::FloatConstant(float) => ( - (float.width / 8) as u8, - ScalarValue::Float(float.value as f64), - ), - TokenValue::BoolConstant(value) => (1, ScalarValue::Bool(value)), + Literal::U32(int.value as u32) + } + } + TokenValue::FloatConstant(float) => { + if float.width != 32 { + frontend.errors.push(Error { + kind: ErrorKind::SemanticError("Unsupported floating-point value (expected single-precision floating-point number)".into()), + meta: token.meta, + }); + } + Literal::F32(float.value) + } + TokenValue::BoolConstant(value) => Literal::Bool(value), TokenValue::LeftParen => { let expr = self.parse_expression(frontend, ctx, stmt, body)?; let meta = self.expect(frontend, TokenValue::RightParen)?.meta; @@ -59,18 +70,9 @@ impl<'source> ParsingContext<'source> { } }; - let handle = frontend.module.constants.fetch_or_append( - Constant { - name: None, - specialization: None, - inner: ConstantInner::Scalar { width, value }, - }, - token.meta, - ); - Ok(stmt.hir_exprs.append( HirExpr { - kind: HirExprKind::Constant(handle), + kind: HirExprKind::Literal(literal), meta: token.meta, }, Default::default(), @@ -136,24 +138,23 @@ impl<'source> ParsingContext<'source> { { let span = frontend.module.types.get_span(handle); - let constant = frontend.module.constants.fetch_or_append( - Constant { - name: None, - specialization: None, - inner: ConstantInner::Scalar { - width: 4, - value: ScalarValue::Uint(args.len() as u64), - }, - }, - Span::default(), - ); + let size = u32::try_from(args.len()) + .ok() + .and_then(NonZeroU32::new) + .ok_or(Error { + kind: ErrorKind::SemanticError( + "There must be at least one argument".into(), + ), + meta, + })?; + handle = frontend.module.types.insert( Type { name: None, inner: TypeInner::Array { stride, base, - size: ArraySize::Constant(constant), + size: ArraySize::Constant(size), }, }, span, diff --git a/third_party/rust/naga/src/front/glsl/parser/types.rs b/third_party/rust/naga/src/front/glsl/parser/types.rs index 08a70669a03e..4761e8bc5305 100644 --- a/third_party/rust/naga/src/front/glsl/parser/types.rs +++ b/third_party/rust/naga/src/front/glsl/parser/types.rs @@ -1,3 +1,5 @@ +use std::num::NonZeroU32; + use crate::{ front::glsl::{ ast::{QualifierKey, QualifierValue, StorageQualifier, StructLayout, TypeQualifiers}, @@ -37,26 +39,16 @@ impl<'source> ParsingContext<'source> { ArraySize::Dynamic } else { let (value, constant_span) = self.parse_uint_constant(frontend)?; - let constant = frontend.module.constants.fetch_or_append( - crate::Constant { - name: None, - specialization: None, - inner: crate::ConstantInner::Scalar { - width: 4, - value: crate::ScalarValue::Uint(value as u64), - }, - }, - constant_span, - ); + let size = NonZeroU32::new(value).ok_or(Error { + kind: ErrorKind::SemanticError("Array size must be greater than zero".into()), + meta: constant_span, + })?; let end_span = self.expect(frontend, TokenValue::RightBracket)?.meta; span.subsume(end_span); - ArraySize::Constant(constant) + ArraySize::Constant(size) }; - frontend - .layouter - .update(&frontend.module.types, &frontend.module.constants) - .unwrap(); + frontend.layouter.update(frontend.module.to_ctx()).unwrap(); let stride = frontend.layouter[*ty].to_stride(); *ty = frontend.module.types.insert( Type { diff --git a/third_party/rust/naga/src/front/spv/error.rs b/third_party/rust/naga/src/front/spv/error.rs index 6c9cf384f19d..2f9bf2d1bcac 100644 --- a/third_party/rust/naga/src/front/spv/error.rs +++ b/third_party/rust/naga/src/front/spv/error.rs @@ -78,6 +78,8 @@ pub enum Error { InvalidAccess(crate::Expression), #[error("invalid access index %{0}")] InvalidAccessIndex(spirv::Word), + #[error("invalid index type %{0}")] + InvalidIndexType(spirv::Word), #[error("invalid binding %{0}")] InvalidBinding(spirv::Word), #[error("invalid global var {0:?}")] diff --git a/third_party/rust/naga/src/front/spv/function.rs b/third_party/rust/naga/src/front/spv/function.rs index 5dc781504ed2..97a197497460 100644 --- a/third_party/rust/naga/src/front/spv/function.rs +++ b/third_party/rust/naga/src/front/spv/function.rs @@ -568,6 +568,13 @@ impl> super::Frontend { } impl<'function> BlockContext<'function> { + pub(super) fn gctx(&self) -> crate::proc::GlobalCtx { + crate::proc::GlobalCtx { + types: self.type_arena, + constants: self.const_arena, + } + } + /// Consumes the `BlockContext` producing a Ir [`Block`](crate::Block) fn lower(mut self) -> crate::Block { fn lower_impl( @@ -597,7 +604,11 @@ impl<'function> BlockContext<'function> { crate::Span::default(), ) } - super::BodyFragment::Loop { body, continuing } => { + super::BodyFragment::Loop { + body, + continuing, + break_if, + } => { let body = lower_impl(blocks, bodies, body); let continuing = lower_impl(blocks, bodies, continuing); @@ -605,7 +616,7 @@ impl<'function> BlockContext<'function> { crate::Statement::Loop { body, continuing, - break_if: None, + break_if, }, crate::Span::default(), ) diff --git a/third_party/rust/naga/src/front/spv/image.rs b/third_party/rust/naga/src/front/spv/image.rs index 757843f78962..ddbc8c19b75c 100644 --- a/third_party/rust/naga/src/front/spv/image.rs +++ b/third_party/rust/naga/src/front/spv/image.rs @@ -10,6 +10,7 @@ pub(super) struct LookupSampledImage { bitflags::bitflags! { /// Flags describing sampling method. + #[derive(Clone, Copy, Debug, Eq, PartialEq)] pub struct SamplingFlags: u32 { /// Regular sampling. const REGULAR = 0x1; diff --git a/third_party/rust/naga/src/front/spv/mod.rs b/third_party/rust/naga/src/front/spv/mod.rs index c69a230cb09e..a2124bfa8701 100644 --- a/third_party/rust/naga/src/front/spv/mod.rs +++ b/third_party/rust/naga/src/front/spv/mod.rs @@ -168,7 +168,7 @@ impl crate::ImageDimension { type MemberIndex = u32; bitflags::bitflags! { - #[derive(Default)] + #[derive(Clone, Copy, Debug, Default)] struct DecorationFlags: u32 { const NON_READABLE = 0x1; const NON_WRITABLE = 0x2; @@ -386,8 +386,18 @@ enum BodyFragment { reject: BodyIndex, }, Loop { + /// The body of the loop. Its [`Body::parent`] is the block containing + /// this `Loop` fragment. body: BodyIndex, + + /// The loop's continuing block. This is a grandchild: its + /// [`Body::parent`] is the loop body block, whose index is above. continuing: BodyIndex, + + /// If the SPIR-V loop's back-edge branch is conditional, this is the + /// expression that must be `false` for the back-edge to be taken, with + /// `true` being for the "loop merge" (which breaks out of the loop). + break_if: Option>, }, Switch { selector: Handle, @@ -429,7 +439,7 @@ struct PhiExpression { expressions: Vec<(spirv::Word, spirv::Word)>, } -#[derive(Debug)] +#[derive(Copy, Clone, Debug, PartialEq, Eq)] enum MergeBlockInformation { LoopMerge, LoopContinue, @@ -440,9 +450,17 @@ enum MergeBlockInformation { /// Fragments of Naga IR, to be assembled into `Statements` once data flow is /// resolved. /// -/// We can't build a Naga `Statement` tree directly from SPIR-V blocks for two +/// We can't build a Naga `Statement` tree directly from SPIR-V blocks for three /// main reasons: /// +/// - We parse a function's SPIR-V blocks in the order they appear in the file. +/// Within a function, SPIR-V requires that a block must precede any blocks it +/// structurally dominates, but doesn't say much else about the order in which +/// they must appear. So while we know we'll see control flow header blocks +/// before their child constructs and merge blocks, those children and the +/// merge blocks may appear in any order - perhaps even intermingled with +/// children of other constructs. +/// /// - A SPIR-V expression can be used in any SPIR-V block dominated by its /// definition, whereas Naga expressions are scoped to the rest of their /// subtree. This means that discovering an expression use later in the @@ -452,7 +470,7 @@ enum MergeBlockInformation { /// - We translate SPIR-V OpPhi expressions as Naga local variables in which we /// store the appropriate value before jumping to the OpPhi's block. /// -/// Both cases require us to go back and amend previously generated Naga IR +/// All these cases require us to go back and amend previously generated Naga IR /// based on things we discover later. But modifying old blocks in arbitrary /// spots in a `Statement` tree is awkward. /// @@ -477,18 +495,37 @@ struct BlockContext<'function> { /// Fragments of control-flow-free Naga IR. /// - /// These will be stitched together into a proper `Statement` tree according + /// These will be stitched together into a proper [`Statement`] tree according /// to `bodies`, once parsing is complete. + /// + /// [`Statement`]: crate::Statement blocks: FastHashMap, - /// Map from block label ids to the index of the corresponding `Body` in - /// `bodies`. + /// Map from each SPIR-V block's label id to the index of the [`Body`] in + /// [`bodies`] the block should append its contents to. + /// + /// Since each statement in a Naga [`Block`] dominates the next, we are sure + /// to encounter their SPIR-V blocks in order. Thus, by having this table + /// map a SPIR-V structured control flow construct's merge block to the same + /// body index as its header block, when we encounter the merge block, we + /// will simply pick up building the [`Body`] where the header left off. + /// + /// A function's first block is special: it is the only block we encounter + /// without having seen its label mentioned in advance. (It's simply the + /// first `OpLabel` after the `OpFunction`.) We thus assume that any block + /// missing an entry here must be the first block, which always has body + /// index zero. + /// + /// [`bodies`]: BlockContext::bodies + /// [`Block`]: crate::Block body_for_label: FastHashMap, /// SPIR-V metadata about merge/continue blocks. mergers: FastHashMap, /// A table of `Body` values, each representing a block in the final IR. + /// + /// The first element is always the function's top-level block. bodies: Vec, /// Id of the function currently being processed @@ -1164,7 +1201,6 @@ impl> Frontend { selections: &[spirv::Word], type_arena: &UniqueArena, expressions: &mut Arena, - constants: &Arena, span: crate::Span, ) -> Result, Error> { let selection = match selections.first() { @@ -1173,6 +1209,7 @@ impl> Frontend { }; let root_span = expressions.get_span(root_expr); let root_lookup = self.lookup_type.lookup(root_type_id)?; + let (count, child_type_id) = match type_arena[root_lookup.handle].inner { crate::TypeInner::Struct { ref members, .. } => { let child_member = self @@ -1183,15 +1220,7 @@ impl> Frontend { } crate::TypeInner::Array { size, .. } => { let size = match size { - crate::ArraySize::Constant(handle) => match constants[handle] { - crate::Constant { - specialization: Some(_), - .. - } => return Err(Error::UnsupportedType(root_lookup.handle)), - ref unspecialized => unspecialized - .to_array_length() - .ok_or(Error::InvalidArraySize(handle))?, - }, + crate::ArraySize::Constant(size) => size.get(), // A runtime sized array is not a composite type crate::ArraySize::Dynamic => { return Err(Error::InvalidAccessType(root_type_id)) @@ -1232,7 +1261,6 @@ impl> Frontend { &selections[1..], type_arena, expressions, - constants, span, )?; @@ -1268,10 +1296,26 @@ impl> Frontend { let mut emitter = super::Emitter::default(); emitter.start(ctx.expressions); - // Find the `Body` that this block belongs to. Index zero is the - // function's root `Body`, corresponding to `Function::body`. + // Find the `Body` to which this block contributes. + // + // If this is some SPIR-V structured control flow construct's merge + // block, then `body_idx` will refer to the same `Body` as the header, + // so that we simply pick up accumulating the `Body` where the header + // left off. Each of the statements in a block dominates the next, so + // we're sure to encounter their SPIR-V blocks in order, ensuring that + // the `Body` will be assembled in the proper order. + // + // Note that, unlike every other kind of SPIR-V block, we don't know the + // function's first block's label in advance. Thus, we assume that if + // this block has no entry in `ctx.body_for_label`, it must be the + // function's first block. This always has body index zero. let mut body_idx = *ctx.body_for_label.entry(block_id).or_default(); + + // The Naga IR block this call builds. This will end up as + // `ctx.blocks[&block_id]`, and `ctx.bodies[body_idx]` will refer to it + // via a `BodyFragment::BlockId`. let mut block = crate::Block::new(); + // Stores the merge block as defined by a `OpSelectionMerge` otherwise is `None` // // This is used in `OpSwitch` to promote the `MergeBlockInformation` from @@ -1324,14 +1368,17 @@ impl> Frontend { Op::NoLine => inst.expect(1)?, Op::Undef => { inst.expect(3)?; - let (type_id, id, handle) = - self.parse_null_constant(inst, ctx.type_arena, ctx.const_arena)?; + let type_id = self.next()?; + let id = self.next()?; + let type_lookup = self.lookup_type.lookup(type_id)?; + let ty = type_lookup.handle; + self.lookup_expression.insert( id, LookupExpression { handle: ctx .expressions - .append(crate::Expression::Constant(handle), span), + .append(crate::Expression::ZeroValue(ty), span), type_id, block_id, }, @@ -1675,20 +1722,35 @@ impl> Frontend { let root_type_lookup = self.lookup_type.lookup(root_lexp.type_id)?; let index_lexp = self.lookup_expression.lookup(index_id)?; let index_handle = get_expr_handle!(index_id, index_lexp); + let index_type = self.lookup_type.lookup(index_lexp.type_id)?.handle; let num_components = match ctx.type_arena[root_type_lookup.handle].inner { - crate::TypeInner::Vector { size, .. } => size as usize, + crate::TypeInner::Vector { size, .. } => size as u32, _ => return Err(Error::InvalidVectorType(root_type_lookup.handle)), }; + let mut make_index = |ctx: &mut BlockContext, index: u32| { + make_index_literal( + ctx, + index, + &mut block, + &mut emitter, + index_type, + index_lexp.type_id, + span, + ) + }; + + let index_expr = make_index(ctx, 0)?; let mut handle = ctx.expressions.append( crate::Expression::Access { base: root_handle, - index: self.index_constant_expressions[0], + index: index_expr, }, span, ); - for &index_expr in self.index_constant_expressions[1..num_components].iter() { + for index in 1..num_components { + let index_expr = make_index(ctx, index)?; let access_expr = ctx.expressions.append( crate::Expression::Access { base: root_handle, @@ -1738,31 +1800,25 @@ impl> Frontend { let root_handle = get_expr_handle!(composite_id, root_lexp); let root_type_lookup = self.lookup_type.lookup(root_lexp.type_id)?; let index_lexp = self.lookup_expression.lookup(index_id)?; - let mut index_handle = get_expr_handle!(index_id, index_lexp); + let index_handle = get_expr_handle!(index_id, index_lexp); let index_type = self.lookup_type.lookup(index_lexp.type_id)?.handle; - // SPIR-V allows signed and unsigned indices but naga's is strict about - // types and since the `index_constants` are all signed integers, we need - // to cast the index to a signed integer if it's unsigned. - if let Some(crate::ScalarKind::Uint) = - ctx.type_arena[index_type].inner.scalar_kind() - { - index_handle = ctx.expressions.append( - crate::Expression::As { - expr: index_handle, - kind: crate::ScalarKind::Sint, - convert: None, - }, - span, - ) - } - let num_components = match ctx.type_arena[root_type_lookup.handle].inner { - crate::TypeInner::Vector { size, .. } => size as usize, + crate::TypeInner::Vector { size, .. } => size as u32, _ => return Err(Error::InvalidVectorType(root_type_lookup.handle)), }; - let mut components = Vec::with_capacity(num_components); - for &index_expr in self.index_constant_expressions[..num_components].iter() { + + let mut components = Vec::with_capacity(num_components as usize); + for index in 0..num_components { + let index_expr = make_index_literal( + ctx, + index, + &mut block, + &mut emitter, + index_type, + index_lexp.type_id, + span, + )?; let access_expr = ctx.expressions.append( crate::Expression::Access { base: root_handle, @@ -1880,7 +1936,6 @@ impl> Frontend { &selections, ctx.type_arena, ctx.expressions, - ctx.const_arena, span, )?; @@ -2089,7 +2144,7 @@ impl> Frontend { let result_ty = self.lookup_type.lookup(result_type_id)?; let inner = &ctx.type_arena[result_ty.handle].inner; let kind = inner.scalar_kind().unwrap(); - let size = inner.size(ctx.const_arena) as u8; + let size = inner.size(ctx.gctx()) as u8; let left_cast = ctx.expressions.append( crate::Expression::As { @@ -3079,8 +3134,14 @@ impl> Frontend { inst.expect(2)?; let target_id = self.next()?; - // If this is a branch to a merge or continue block, - // then that ends the current body. + // If this is a branch to a merge or continue block, then + // that ends the current body. + // + // Why can we count on finding an entry here when it's + // needed? SPIR-V requires dominators to appear before + // blocks they dominate, so we will have visited a + // structured control construct's header block before + // anything that could exit it. if let Some(info) = ctx.mergers.get(&target_id) { block.extend(emitter.finish(ctx.expressions)); ctx.blocks.insert(block_id, block); @@ -3092,15 +3153,22 @@ impl> Frontend { return Ok(()); } - // Since the target of the branch has no merge information, - // this must be the only branch to that block. This means - // we can treat it as an extension of the current `Body`. + // If `target_id` has no entry in `ctx.body_for_label`, then + // this must be the only branch to it: // - // NOTE: it's possible that another branch was already made to this block - // setting the body index in which case it SHOULD NOT be overridden. - // For example a switch with falltrough, the OpSwitch will set the body to - // the respective case and the case may branch to another case in which case - // the body index shouldn't be changed + // - We've already established that it's not anybody's merge + // block. + // + // - It can't be a switch case. Only switch header blocks + // and other switch cases can branch to a switch case. + // Switch header blocks must dominate all their cases, so + // they must appear in the file before them, and when we + // see `Op::Switch` we populate `ctx.body_for_label` for + // every switch case. + // + // Thus, `target_id` must be a simple extension of the + // current block, which we dominate, so we know we'll + // encounter it later in the file. ctx.body_for_label.entry(target_id).or_insert(body_idx); break None; @@ -3114,35 +3182,121 @@ impl> Frontend { get_expr_handle!(condition_id, lexp) }; + // HACK(eddyb) Naga doesn't seem to have this helper, + // so it's declared on the fly here for convenience. + #[derive(Copy, Clone)] + struct BranchTarget { + label_id: spirv::Word, + merge_info: Option, + } + let branch_target = |label_id| BranchTarget { + label_id, + merge_info: ctx.mergers.get(&label_id).copied(), + }; + + let true_target = branch_target(self.next()?); + let false_target = branch_target(self.next()?); + + // Consume branch weights + for _ in 4..inst.wc { + let _ = self.next()?; + } + + // Handle `OpBranchConditional`s used at the end of a loop + // body's "continuing" section as a "conditional backedge", + // i.e. a `do`-`while` condition, or `break if` in WGSL. + + // HACK(eddyb) this has to go to the parent *twice*, because + // `OpLoopMerge` left the "continuing" section nested in the + // loop body in terms of `parent`, but not `BodyFragment`. + let parent_body_idx = ctx.bodies[body_idx].parent; + let parent_parent_body_idx = ctx.bodies[parent_body_idx].parent; + match ctx.bodies[parent_parent_body_idx].data[..] { + // The `OpLoopMerge`'s `continuing` block and the loop's + // backedge block may not be the same, but they'll both + // belong to the same body. + [.., BodyFragment::Loop { + body: loop_body_idx, + continuing: loop_continuing_idx, + break_if: ref mut break_if_slot @ None, + }] if body_idx == loop_continuing_idx => { + // Try both orderings of break-vs-backedge, because + // SPIR-V is symmetrical here, unlike WGSL `break if`. + let break_if_cond = [true, false].into_iter().find_map(|true_breaks| { + let (break_candidate, backedge_candidate) = if true_breaks { + (true_target, false_target) + } else { + (false_target, true_target) + }; + + if break_candidate.merge_info + != Some(MergeBlockInformation::LoopMerge) + { + return None; + } + + // HACK(eddyb) since Naga doesn't explicitly track + // backedges, this is checking for the outcome of + // `OpLoopMerge` below (even if it looks weird). + let backedge_candidate_is_backedge = + backedge_candidate.merge_info.is_none() + && ctx.body_for_label.get(&backedge_candidate.label_id) + == Some(&loop_body_idx); + if !backedge_candidate_is_backedge { + return None; + } + + Some(if true_breaks { + condition + } else { + ctx.expressions.append( + crate::Expression::Unary { + op: crate::UnaryOperator::Not, + expr: condition, + }, + span, + ) + }) + }); + + if let Some(break_if_cond) = break_if_cond { + *break_if_slot = Some(break_if_cond); + + // This `OpBranchConditional` ends the "continuing" + // section of the loop body as normal, with the + // `break if` condition having been stashed above. + break None; + } + } + _ => {} + } + block.extend(emitter.finish(ctx.expressions)); ctx.blocks.insert(block_id, block); let body = &mut ctx.bodies[body_idx]; body.data.push(BodyFragment::BlockId(block_id)); - let true_id = self.next()?; - let false_id = self.next()?; - - let same_target = true_id == false_id; + let same_target = true_target.label_id == false_target.label_id; // Start a body block for the `accept` branch. let accept = ctx.bodies.len(); let mut accept_block = Body::with_parent(body_idx); - // If the `OpBranchConditional`target is somebody else's + // If the `OpBranchConditional` target is somebody else's // merge or continue block, then put a `Break` or `Continue` // statement in this new body block. - if let Some(info) = ctx.mergers.get(&true_id) { + if let Some(info) = true_target.merge_info { merger( match same_target { true => &mut ctx.bodies[body_idx], false => &mut accept_block, }, - info, + &info, ) } else { // Note the body index for the block we're branching to. let prev = ctx.body_for_label.insert( - true_id, + true_target.label_id, match same_target { true => body_idx, false => accept, @@ -3161,10 +3315,10 @@ impl> Frontend { let reject = ctx.bodies.len(); let mut reject_block = Body::with_parent(body_idx); - if let Some(info) = ctx.mergers.get(&false_id) { - merger(&mut reject_block, info) + if let Some(info) = false_target.merge_info { + merger(&mut reject_block, &info) } else { - let prev = ctx.body_for_label.insert(false_id, reject); + let prev = ctx.body_for_label.insert(false_target.label_id, reject); debug_assert!(prev.is_none()); } @@ -3177,11 +3331,6 @@ impl> Frontend { reject, }); - // Consume branch weights - for _ in 4..inst.wc { - let _ = self.next()?; - } - return Ok(()); } Op::Switch => { @@ -3351,6 +3500,7 @@ impl> Frontend { parent_body.data.push(BodyFragment::Loop { body: loop_body_idx, continuing: continue_idx, + break_if: None, }); body_idx = loop_body_idx; } @@ -3495,20 +3645,12 @@ impl> Frontend { let semantics_id = self.next()?; let exec_scope_const = self.lookup_constant.lookup(exec_scope_id)?; let semantics_const = self.lookup_constant.lookup(semantics_id)?; - let exec_scope = match ctx.const_arena[exec_scope_const.handle].inner { - crate::ConstantInner::Scalar { - value: crate::ScalarValue::Uint(raw), - width: _, - } => raw as u32, - _ => return Err(Error::InvalidBarrierScope(exec_scope_id)), - }; - let semantics = match ctx.const_arena[semantics_const.handle].inner { - crate::ConstantInner::Scalar { - value: crate::ScalarValue::Uint(raw), - width: _, - } => raw as u32, - _ => return Err(Error::InvalidBarrierMemorySemantics(semantics_id)), - }; + + let exec_scope = resolve_constant(ctx.gctx(), exec_scope_const.handle) + .ok_or(Error::InvalidBarrierScope(exec_scope_id))?; + let semantics = resolve_constant(ctx.gctx(), semantics_const.handle) + .ok_or(Error::InvalidBarrierMemorySemantics(semantics_id))?; + if exec_scope == spirv::Scope::Workgroup as u32 { let mut flags = crate::Barrier::empty(); flags.set( @@ -3703,6 +3845,7 @@ impl> Frontend { } } } + S::WorkGroupUniformLoad { .. } => unreachable!(), } i += 1; } @@ -4384,12 +4527,14 @@ impl> Frontend { let length_id = self.next()?; let length_const = self.lookup_constant.lookup(length_id)?; + let size = resolve_constant(module.to_ctx(), length_const.handle) + .and_then(NonZeroU32::new) + .ok_or(Error::InvalidArraySize(length_const.handle))?; + let decor = self.future_decor.remove(&id).unwrap_or_default(); let base = self.lookup_type.lookup(type_id)?.handle; - self.layouter - .update(&module.types, &module.constants) - .unwrap(); + self.layouter.update(module.to_ctx()).unwrap(); // HACK if the underlying type is an image or a sampler, let's assume // that we're dealing with a binding-array @@ -4427,12 +4572,12 @@ impl> Frontend { { crate::TypeInner::BindingArray { base, - size: crate::ArraySize::Constant(length_const.handle), + size: crate::ArraySize::Constant(size), } } else { crate::TypeInner::Array { base, - size: crate::ArraySize::Constant(length_const.handle), + size: crate::ArraySize::Constant(size), stride: match decor.array_stride { Some(stride) => stride.get(), None => self.layouter[base].to_stride(), @@ -4470,9 +4615,7 @@ impl> Frontend { let decor = self.future_decor.remove(&id).unwrap_or_default(); let base = self.lookup_type.lookup(type_id)?.handle; - self.layouter - .update(&module.types, &module.constants) - .unwrap(); + self.layouter.update(module.to_ctx()).unwrap(); // HACK same case as in `parse_type_array()` let inner = if let crate::TypeInner::Image { .. } | crate::TypeInner::Sampler { .. } = @@ -4523,9 +4666,7 @@ impl> Frontend { .as_ref() .map_or(false, |decor| decor.storage_buffer); - self.layouter - .update(&module.types, &module.constants) - .unwrap(); + self.layouter.update(module.to_ctx()).unwrap(); let mut members = Vec::::with_capacity(inst.wc as usize - 2); let mut member_lookups = Vec::with_capacity(members.capacity()); @@ -4629,7 +4770,7 @@ impl> Frontend { let id = self.next()?; let sample_type_id = self.next()?; let dim = self.next()?; - let _is_depth = self.next()?; + let is_depth = self.next()?; let is_array = self.next()? != 0; let is_msaa = self.next()? != 0; let _is_sampled = self.next()?; @@ -4661,7 +4802,9 @@ impl> Frontend { .ok_or(Error::InvalidImageBaseType(base_handle))?; let inner = crate::TypeInner::Image { - class: if format != 0 { + class: if is_depth == 1 { + crate::ImageClass::Depth { multi: is_msaa } + } else if format != 0 { crate::ImageClass::Storage { format: map_image_format(format)?, access: crate::StorageAccess::default(), @@ -5151,6 +5294,37 @@ impl> Frontend { } } +fn make_index_literal( + ctx: &mut BlockContext, + index: u32, + block: &mut crate::Block, + emitter: &mut super::Emitter, + index_type: Handle, + index_type_id: spirv::Word, + span: crate::Span, +) -> Result, Error> { + block.extend(emitter.finish(ctx.expressions)); + + let literal = match ctx.type_arena[index_type].inner.scalar_kind() { + Some(crate::ScalarKind::Uint) => crate::Literal::U32(index), + Some(crate::ScalarKind::Sint) => crate::Literal::I32(index as i32), + _ => return Err(Error::InvalidIndexType(index_type_id)), + }; + let expr = ctx + .expressions + .append(crate::Expression::Literal(literal), span); + + emitter.start(ctx.expressions); + Ok(expr) +} + +fn resolve_constant( + gctx: crate::proc::GlobalCtx, + constant: Handle, +) -> Option { + gctx.constants[constant].to_array_length() +} + pub fn parse_u8_slice(data: &[u8], options: &Options) -> Result { if data.len() % 4 != 0 { return Err(Error::IncompleteData); diff --git a/third_party/rust/naga/src/front/spv/null.rs b/third_party/rust/naga/src/front/spv/null.rs index 85350c563ed8..eb0b418aaa9c 100644 --- a/third_party/rust/naga/src/front/spv/null.rs +++ b/third_party/rust/naga/src/front/spv/null.rs @@ -84,12 +84,9 @@ pub fn generate_null_constant( } crate::TypeInner::Array { base, - size: crate::ArraySize::Constant(handle), + size: crate::ArraySize::Constant(size), .. } => { - let size = constant_arena[handle] - .to_array_length() - .ok_or(Error::InvalidArraySize(handle))?; let inner = generate_null_constant(base, type_arena, constant_arena, span)?; let value = constant_arena.fetch_or_append( crate::Constant { @@ -101,7 +98,7 @@ pub fn generate_null_constant( ); crate::ConstantInner::Composite { ty, - components: vec![value; size as usize], + components: vec![value; size.get() as usize], } } ref other => { diff --git a/third_party/rust/naga/src/front/wgsl/error.rs b/third_party/rust/naga/src/front/wgsl/error.rs index 82f7c609eef3..dc9a3b178dd4 100644 --- a/third_party/rust/naga/src/front/wgsl/error.rs +++ b/third_party/rust/naga/src/front/wgsl/error.rs @@ -239,7 +239,10 @@ pub enum Error<'a> { found: u32, }, FunctionReturnsVoid(Span), + InvalidWorkGroupUniformLoad(Span), Other, + ExpectedArraySize(Span), + NonPositiveArrayLength(Span), } impl<'a> Error<'a> { @@ -680,11 +683,27 @@ impl<'a> Error<'a> { "perhaps you meant to call the function in a separate statement?".into(), ], }, + Error::InvalidWorkGroupUniformLoad(span) => ParseError { + message: "incorrect type passed to workgroupUniformLoad".into(), + labels: vec![(span, "".into())], + notes: vec!["passed type must be a workgroup pointer".into()], + }, Error::Other => ParseError { message: "other error".to_string(), labels: vec![], notes: vec![], }, + Error::ExpectedArraySize(span) => ParseError { + message: "array element count must resolve to an integer scalar (u32 or i32)" + .to_string(), + labels: vec![(span, "must resolve to u32/i32".into())], + notes: vec![], + }, + Error::NonPositiveArrayLength(span) => ParseError { + message: "array element count must be greater than zero".to_string(), + labels: vec![(span, "must be positive".into())], + notes: vec![], + }, } } } diff --git a/third_party/rust/naga/src/front/wgsl/lower/construction.rs b/third_party/rust/naga/src/front/wgsl/lower/construction.rs index 723d4441f53c..8d7a5cdc2e72 100644 --- a/third_party/rust/naga/src/front/wgsl/lower/construction.rs +++ b/third_party/rust/naga/src/front/wgsl/lower/construction.rs @@ -1,3 +1,5 @@ +use std::num::NonZeroU32; + use crate::front::wgsl::parse::ast; use crate::{Handle, Span}; @@ -199,12 +201,7 @@ impl<'source, 'temp> Lowerer<'source, 'temp> { _ => return Err(Error::TypeNotInferrable(ty_span)), }; - return match ctx.create_zero_value_constant(ty) { - Some(constant) => { - Ok(ctx.interrupt_emitter(crate::Expression::Constant(constant), span)) - } - None => Err(Error::TypeNotConstructible(ty_span)), - }; + return Ok(ctx.interrupt_emitter(crate::Expression::ZeroValue(ty), span)); } // Scalar constructor & conversion (scalar -> scalar) @@ -461,24 +458,13 @@ impl<'source, 'temp> Lowerer<'source, 'temp> { let base = ctx.register_type(components[0])?; - let size = crate::Constant { - name: None, - specialization: None, - inner: crate::ConstantInner::Scalar { - width: 4, - value: crate::ScalarValue::Uint(components.len() as _), - }, - }; - let inner = crate::TypeInner::Array { base, size: crate::ArraySize::Constant( - ctx.module.constants.fetch_or_append(size, Span::UNDEFINED), + NonZeroU32::new(u32::try_from(components.len()).unwrap()).unwrap(), ), stride: { - self.layouter - .update(&ctx.module.types, &ctx.module.constants) - .unwrap(); + self.layouter.update(ctx.module.to_ctx()).unwrap(); self.layouter[base].to_stride() }, }; @@ -645,14 +631,19 @@ impl<'source, 'temp> Lowerer<'source, 'temp> { let base = self.resolve_ast_type(base, ctx.reborrow())?; let size = match size { ast::ArraySize::Constant(expr) => { - crate::ArraySize::Constant(self.constant(expr, ctx.reborrow())?) + let span = ctx.ast_expressions.get_span(expr); + let constant = self.constant(expr, ctx.reborrow())?; + let size = ctx.module.constants[constant] + .to_array_length() + .ok_or(Error::ExpectedArraySize(span))?; + let size = + NonZeroU32::new(size).ok_or(Error::NonPositiveArrayLength(span))?; + crate::ArraySize::Constant(size) } ast::ArraySize::Dynamic => crate::ArraySize::Dynamic, }; - self.layouter - .update(&ctx.module.types, &ctx.module.constants) - .unwrap(); + self.layouter.update(ctx.module.to_ctx()).unwrap(); let ty = ctx.ensure_type_exists(crate::TypeInner::Array { base, size, diff --git a/third_party/rust/naga/src/front/wgsl/lower/mod.rs b/third_party/rust/naga/src/front/wgsl/lower/mod.rs index bc9cce1bee67..0de078353bc1 100644 --- a/third_party/rust/naga/src/front/wgsl/lower/mod.rs +++ b/third_party/rust/naga/src/front/wgsl/lower/mod.rs @@ -1,3 +1,5 @@ +use std::num::NonZeroU32; + use crate::front::wgsl::error::{Error, ExpectedToken, InvalidAssignmentType}; use crate::front::wgsl::index::Index; use crate::front::wgsl::parse::number::Number; @@ -342,85 +344,8 @@ impl<'a> ExpressionContext<'a, '_, '_> { } } - /// Creates a zero value constant of type `ty` - /// - /// Returns `None` if the given `ty` is not a constructible type - fn create_zero_value_constant( - &mut self, - ty: Handle, - ) -> Option> { - let inner = match self.module.types[ty].inner { - crate::TypeInner::Scalar { kind, width } => { - let value = match kind { - crate::ScalarKind::Sint => crate::ScalarValue::Sint(0), - crate::ScalarKind::Uint => crate::ScalarValue::Uint(0), - crate::ScalarKind::Float => crate::ScalarValue::Float(0.), - crate::ScalarKind::Bool => crate::ScalarValue::Bool(false), - }; - crate::ConstantInner::Scalar { width, value } - } - crate::TypeInner::Vector { size, kind, width } => { - let scalar_ty = self.ensure_type_exists(crate::TypeInner::Scalar { width, kind }); - let component = self.create_zero_value_constant(scalar_ty)?; - crate::ConstantInner::Composite { - ty, - components: (0..size as u8).map(|_| component).collect(), - } - } - crate::TypeInner::Matrix { - columns, - rows, - width, - } => { - let vec_ty = self.ensure_type_exists(crate::TypeInner::Vector { - width, - kind: crate::ScalarKind::Float, - size: rows, - }); - let component = self.create_zero_value_constant(vec_ty)?; - crate::ConstantInner::Composite { - ty, - components: (0..columns as u8).map(|_| component).collect(), - } - } - crate::TypeInner::Array { - base, - size: crate::ArraySize::Constant(size), - .. - } => { - let size = self.module.constants[size].to_array_length()?; - let component = self.create_zero_value_constant(base)?; - crate::ConstantInner::Composite { - ty, - components: (0..size).map(|_| component).collect(), - } - } - crate::TypeInner::Struct { ref members, .. } => { - let members = members.clone(); - crate::ConstantInner::Composite { - ty, - components: members - .iter() - .map(|member| self.create_zero_value_constant(member.ty)) - .collect::>()?, - } - } - _ => return None, - }; - - let constant = self.module.constants.fetch_or_append( - crate::Constant { - name: None, - specialization: None, - inner, - }, - Span::UNDEFINED, - ); - Some(constant) - } - fn format_typeinner(&self, inner: &crate::TypeInner) -> String { - inner.to_wgsl(&self.module.types, &self.module.constants) + inner.to_wgsl(self.module.to_ctx()) } fn format_type(&self, handle: Handle) -> String { @@ -701,14 +626,16 @@ impl<'source, 'temp> Lowerer<'source, 'temp> { if let Some(explicit) = explicit_ty { if explicit != inferred_type { let ty = &ctx.module.types[explicit]; - let explicit = ty.name.clone().unwrap_or_else(|| { - ty.inner.to_wgsl(&ctx.module.types, &ctx.module.constants) - }); + let explicit = ty + .name + .clone() + .unwrap_or_else(|| ty.inner.to_wgsl(ctx.module.to_ctx())); let ty = &ctx.module.types[inferred_type]; - let inferred = ty.name.clone().unwrap_or_else(|| { - ty.inner.to_wgsl(&ctx.module.types, &ctx.module.constants) - }); + let inferred = ty + .name + .clone() + .unwrap_or_else(|| ty.inner.to_wgsl(ctx.module.to_ctx())); return Err(Error::InitializationTypeMismatch( c.name.span, @@ -1210,36 +1137,16 @@ impl<'source, 'temp> Lowerer<'source, 'temp> { let (expr, is_reference) = match *expr { ast::Expression::Literal(literal) => { - let inner = match literal { - ast::Literal::Number(Number::F32(f)) => crate::ConstantInner::Scalar { - width: 4, - value: crate::ScalarValue::Float(f as _), - }, - ast::Literal::Number(Number::I32(i)) => crate::ConstantInner::Scalar { - width: 4, - value: crate::ScalarValue::Sint(i as _), - }, - ast::Literal::Number(Number::U32(u)) => crate::ConstantInner::Scalar { - width: 4, - value: crate::ScalarValue::Uint(u as _), - }, + let literal = match literal { + ast::Literal::Number(Number::F32(f)) => crate::Literal::F32(f), + ast::Literal::Number(Number::I32(i)) => crate::Literal::I32(i), + ast::Literal::Number(Number::U32(u)) => crate::Literal::U32(u), ast::Literal::Number(_) => { unreachable!("got abstract numeric type when not expected"); } - ast::Literal::Bool(b) => crate::ConstantInner::Scalar { - width: 1, - value: crate::ScalarValue::Bool(b), - }, + ast::Literal::Bool(b) => crate::Literal::Bool(b), }; - let handle = ctx.module.constants.fetch_or_append( - crate::Constant { - name: None, - specialization: None, - inner, - }, - Span::UNDEFINED, - ); - let handle = ctx.interrupt_emitter(crate::Expression::Constant(handle), span); + let handle = ctx.interrupt_emitter(crate::Expression::Literal(literal), span); return Ok(TypedExpression::non_reference(handle)); } ast::Expression::Ident(ast::IdentExpr::Local(local)) => { @@ -1341,35 +1248,54 @@ impl<'source, 'temp> Lowerer<'source, 'temp> { )); } - if let crate::Expression::Constant(constant) = ctx.naga_expressions[index] { - let span = ctx.naga_expressions.get_span(index); - let index = match ctx.module.constants[constant].inner { - crate::ConstantInner::Scalar { - value: crate::ScalarValue::Uint(int), - .. - } => u32::try_from(int).map_err(|_| Error::BadU32Constant(span)), - crate::ConstantInner::Scalar { - value: crate::ScalarValue::Sint(int), - .. - } => u32::try_from(int).map_err(|_| Error::BadU32Constant(span)), - _ => Err(Error::BadU32Constant(span)), - }?; + match ctx.naga_expressions[index] { + crate::Expression::Constant(constant) => { + let span = ctx.naga_expressions.get_span(index); + let index = match ctx.module.constants[constant].inner { + crate::ConstantInner::Scalar { + value: crate::ScalarValue::Uint(int), + .. + } => u32::try_from(int).map_err(|_| Error::BadU32Constant(span)), + crate::ConstantInner::Scalar { + value: crate::ScalarValue::Sint(int), + .. + } => u32::try_from(int).map_err(|_| Error::BadU32Constant(span)), + _ => Err(Error::BadU32Constant(span)), + }?; - ( - crate::Expression::AccessIndex { - base: expr.handle, - index, - }, - expr.is_reference, - ) - } else { - ( + ( + crate::Expression::AccessIndex { + base: expr.handle, + index, + }, + expr.is_reference, + ) + } + crate::Expression::Literal(literal) => { + let span = ctx.naga_expressions.get_span(index); + let index = match literal { + crate::Literal::U32(n) => n, + crate::Literal::I32(n) => { + u32::try_from(n).map_err(|_| Error::BadU32Constant(span))? + } + _ => return Err(Error::BadU32Constant(span)), + }; + + ( + crate::Expression::AccessIndex { + base: expr.handle, + index, + }, + expr.is_reference, + ) + } + _ => ( crate::Expression::Access { base: expr.handle, index, }, expr.is_reference, - ) + ), } } ast::Expression::Member { base, ref field } => { @@ -1765,6 +1691,36 @@ impl<'source, 'temp> Lowerer<'source, 'temp> { .push(crate::Statement::Barrier(crate::Barrier::WORK_GROUP), span); return Ok(None); } + "workgroupUniformLoad" => { + let mut args = ctx.prepare_args(arguments, 1, span); + let expr = args.next()?; + args.finish()?; + + let pointer = self.expression(expr, ctx.reborrow())?; + ctx.grow_types(pointer)?; + let result_ty = match *ctx.resolved_inner(pointer) { + crate::TypeInner::Pointer { + base, + space: crate::AddressSpace::WorkGroup, + } => base, + ref other => { + log::error!("Type {other:?} passed to workgroupUniformLoad"); + let span = ctx.ast_expressions.get_span(expr); + return Err(Error::InvalidWorkGroupUniformLoad(span)); + } + }; + let result = ctx.interrupt_emitter( + crate::Expression::WorkGroupUniformLoadResult { ty: result_ty }, + span, + ); + ctx.block.push( + crate::Statement::WorkGroupUniformLoad { pointer, result }, + span, + ); + + ctx.grow_types(pointer)?; + return Ok(Some(result)); + } "textureStore" => { let mut args = ctx.prepare_args(arguments, 3, span); @@ -2143,9 +2099,7 @@ impl<'source, 'temp> Lowerer<'source, 'temp> { for member in s.members.iter() { let ty = self.resolve_ast_type(member.ty, ctx.reborrow())?; - self.layouter - .update(&ctx.module.types, &ctx.module.constants) - .unwrap(); + self.layouter.update(ctx.module.to_ctx()).unwrap(); let member_min_size = self.layouter[ty].size; let member_min_alignment = self.layouter[ty].alignment; @@ -2232,16 +2186,20 @@ impl<'source, 'temp> Lowerer<'source, 'temp> { } ast::Type::Array { base, size } => { let base = self.resolve_ast_type(base, ctx.reborrow())?; - self.layouter - .update(&ctx.module.types, &ctx.module.constants) - .unwrap(); + self.layouter.update(ctx.module.to_ctx()).unwrap(); crate::TypeInner::Array { base, size: match size { ast::ArraySize::Constant(constant) => { + let span = ctx.ast_expressions.get_span(constant); let constant = self.constant(constant, ctx.reborrow())?; - crate::ArraySize::Constant(constant) + let size = ctx.module.constants[constant] + .to_array_length() + .ok_or(Error::ExpectedArraySize(span))?; + let size = + NonZeroU32::new(size).ok_or(Error::NonPositiveArrayLength(span))?; + crate::ArraySize::Constant(size) } ast::ArraySize::Dynamic => crate::ArraySize::Dynamic, }, @@ -2267,8 +2225,14 @@ impl<'source, 'temp> Lowerer<'source, 'temp> { base, size: match size { ast::ArraySize::Constant(constant) => { + let span = ctx.ast_expressions.get_span(constant); let constant = self.constant(constant, ctx.reborrow())?; - crate::ArraySize::Constant(constant) + let size = ctx.module.constants[constant] + .to_array_length() + .ok_or(Error::ExpectedArraySize(span))?; + let size = + NonZeroU32::new(size).ok_or(Error::NonPositiveArrayLength(span))?; + crate::ArraySize::Constant(size) } ast::ArraySize::Dynamic => crate::ArraySize::Dynamic, }, diff --git a/third_party/rust/naga/src/front/wgsl/mod.rs b/third_party/rust/naga/src/front/wgsl/mod.rs index eb21fae6c975..b60f6fe6557b 100644 --- a/third_party/rust/naga/src/front/wgsl/mod.rs +++ b/third_party/rust/naga/src/front/wgsl/mod.rs @@ -11,8 +11,6 @@ mod parse; #[cfg(test)] mod tests; -use crate::arena::{Arena, UniqueArena}; - use crate::front::wgsl::error::Error; use crate::front::wgsl::parse::Parser; use thiserror::Error; @@ -100,11 +98,7 @@ impl crate::TypeInner { /// For example `vec3`. /// /// Note: The names of a `TypeInner::Struct` is not known. Therefore this method will simply return "struct" for them. - fn to_wgsl( - &self, - types: &UniqueArena, - constants: &Arena, - ) -> String { + fn to_wgsl(&self, gctx: crate::proc::GlobalCtx) -> String { use crate::TypeInner as Ti; match *self { @@ -128,7 +122,7 @@ impl crate::TypeInner { format!("atomic<{}>", kind.to_wgsl(width)) } Ti::Pointer { base, .. } => { - let base = &types[base]; + let base = &gctx.types[base]; let name = base.name.as_deref().unwrap_or("unknown"); format!("ptr<{name}>") } @@ -136,27 +130,10 @@ impl crate::TypeInner { format!("ptr<{}>", kind.to_wgsl(width)) } Ti::Array { base, size, .. } => { - let member_type = &types[base]; + let member_type = &gctx.types[base]; let base = member_type.name.as_deref().unwrap_or("unknown"); match size { - crate::ArraySize::Constant(size) => { - let constant = &constants[size]; - let size = constant - .name - .clone() - .unwrap_or_else(|| match constant.inner { - crate::ConstantInner::Scalar { - value: crate::ScalarValue::Uint(size), - .. - } => size.to_string(), - crate::ConstantInner::Scalar { - value: crate::ScalarValue::Sint(size), - .. - } => size.to_string(), - _ => "?".to_string(), - }); - format!("array<{base}, {size}>") - } + crate::ArraySize::Constant(size) => format!("array<{base}, {size}>"), crate::ArraySize::Dynamic => format!("array<{base}>"), } } @@ -209,13 +186,10 @@ impl crate::TypeInner { Ti::AccelerationStructure => "acceleration_structure".to_string(), Ti::RayQuery => "ray_query".to_string(), Ti::BindingArray { base, size, .. } => { - let member_type = &types[base]; + let member_type = &gctx.types[base]; let base = member_type.name.as_deref().unwrap_or("unknown"); match size { - crate::ArraySize::Constant(size) => { - let size = constants[size].name.as_deref().unwrap_or("unknown"); - format!("binding_array<{base}, {size}>") - } + crate::ArraySize::Constant(size) => format!("binding_array<{base}, {size}>"), crate::ArraySize::Dynamic => format!("binding_array<{base}>"), } } @@ -226,19 +200,9 @@ impl crate::TypeInner { mod type_inner_tests { #[test] fn to_wgsl() { + use std::num::NonZeroU32; + let mut types = crate::UniqueArena::new(); - let mut constants = crate::Arena::new(); - let c = constants.append( - crate::Constant { - name: Some("C".to_string()), - specialization: None, - inner: crate::ConstantInner::Scalar { - width: 4, - value: crate::ScalarValue::Uint(32), - }, - }, - Default::default(), - ); let mytype1 = types.insert( crate::Type { @@ -261,19 +225,23 @@ mod type_inner_tests { Default::default(), ); + let gctx = crate::proc::GlobalCtx { + types: &types, + constants: &crate::Arena::new(), + }; let array = crate::TypeInner::Array { base: mytype1, stride: 4, - size: crate::ArraySize::Constant(c), + size: crate::ArraySize::Constant(unsafe { NonZeroU32::new_unchecked(32) }), }; - assert_eq!(array.to_wgsl(&types, &constants), "array"); + assert_eq!(array.to_wgsl(gctx), "array"); let mat = crate::TypeInner::Matrix { rows: crate::VectorSize::Quad, columns: crate::VectorSize::Bi, width: 8, }; - assert_eq!(mat.to_wgsl(&types, &constants), "mat2x4"); + assert_eq!(mat.to_wgsl(gctx), "mat2x4"); let ptr = crate::TypeInner::Pointer { base: mytype2, @@ -281,7 +249,7 @@ mod type_inner_tests { access: crate::StorageAccess::default(), }, }; - assert_eq!(ptr.to_wgsl(&types, &constants), "ptr"); + assert_eq!(ptr.to_wgsl(gctx), "ptr"); let img1 = crate::TypeInner::Image { dim: crate::ImageDimension::D2, @@ -291,36 +259,27 @@ mod type_inner_tests { multi: true, }, }; - assert_eq!( - img1.to_wgsl(&types, &constants), - "texture_multisampled_2d" - ); + assert_eq!(img1.to_wgsl(gctx), "texture_multisampled_2d"); let img2 = crate::TypeInner::Image { dim: crate::ImageDimension::Cube, arrayed: true, class: crate::ImageClass::Depth { multi: false }, }; - assert_eq!(img2.to_wgsl(&types, &constants), "texture_depth_cube_array"); + assert_eq!(img2.to_wgsl(gctx), "texture_depth_cube_array"); let img3 = crate::TypeInner::Image { dim: crate::ImageDimension::D2, arrayed: false, class: crate::ImageClass::Depth { multi: true }, }; - assert_eq!( - img3.to_wgsl(&types, &constants), - "texture_depth_multisampled_2d" - ); + assert_eq!(img3.to_wgsl(gctx), "texture_depth_multisampled_2d"); let array = crate::TypeInner::BindingArray { base: mytype1, - size: crate::ArraySize::Constant(c), + size: crate::ArraySize::Constant(unsafe { NonZeroU32::new_unchecked(32) }), }; - assert_eq!( - array.to_wgsl(&types, &constants), - "binding_array" - ); + assert_eq!(array.to_wgsl(gctx), "binding_array"); } } diff --git a/third_party/rust/naga/src/front/wgsl/parse/mod.rs b/third_party/rust/naga/src/front/wgsl/parse/mod.rs index cf9892a1f378..8e7abed704c0 100644 --- a/third_party/rust/naga/src/front/wgsl/parse/mod.rs +++ b/third_party/rust/naga/src/front/wgsl/parse/mod.rs @@ -84,6 +84,18 @@ impl<'a> ExpressionContext<'a, '_, '_> { } Ok(accumulator) } + + fn declare_local(&mut self, name: ast::Ident<'a>) -> Result, Error<'a>> { + let handle = self.locals.append(ast::Local, name.span); + if let Some(old) = self.local_table.add(name.name, handle) { + Err(Error::Redefinition { + previous: self.locals.get_span(old), + current: name.span, + }) + } else { + Ok(handle) + } + } } /// Which grammar rule we are in the midst of parsing. @@ -1612,14 +1624,7 @@ impl Parser { let expr_id = self.general_expression(lexer, ctx.reborrow())?; lexer.expect(Token::Separator(';'))?; - let handle = ctx.locals.append(ast::Local, name.span); - if let Some(old) = ctx.local_table.add(name.name, handle) { - return Err(Error::Redefinition { - previous: ctx.locals.get_span(old), - current: name.span, - }); - } - + let handle = ctx.declare_local(name)?; ast::StatementKind::LocalDecl(ast::LocalDecl::Let(ast::Let { name, ty: given_ty, @@ -1647,14 +1652,7 @@ impl Parser { lexer.expect(Token::Separator(';'))?; - let handle = ctx.locals.append(ast::Local, name.span); - if let Some(old) = ctx.local_table.add(name.name, handle) { - return Err(Error::Redefinition { - previous: ctx.locals.get_span(old), - current: name.span, - }); - } - + let handle = ctx.declare_local(name)?; ast::StatementKind::LocalDecl(ast::LocalDecl::Var(ast::LocalVariable { name, ty, @@ -2013,15 +2011,15 @@ impl Parser { ctx.local_table.push_scope(); lexer.expect(Token::Paren('{'))?; - let mut statements = ast::Block::default(); + let mut block = ast::Block::default(); while !lexer.skip(Token::Paren('}')) { - self.statement(lexer, ctx.reborrow(), &mut statements)?; + self.statement(lexer, ctx.reborrow(), &mut block)?; } ctx.local_table.pop_scope(); let span = self.pop_rule_span(lexer); - Ok((statements, span)) + Ok((block, span)) } fn varying_binding<'a>( @@ -2060,6 +2058,9 @@ impl Parser { unresolved: dependencies, }; + // start a scope that contains arguments as well as the function body + ctx.local_table.push_scope(); + // read parameter list let mut arguments = Vec::new(); lexer.expect(Token::Paren('('))?; @@ -2078,8 +2079,7 @@ impl Parser { lexer.expect(Token::Separator(':'))?; let param_type = self.type_decl(lexer, ctx.reborrow())?; - let handle = ctx.locals.append(ast::Local, param_name.span); - ctx.local_table.add(param_name.name, handle); + let handle = ctx.declare_local(param_name)?; arguments.push(ast::FunctionArgument { name: param_name, ty: param_type, @@ -2097,8 +2097,14 @@ impl Parser { None }; - // read body - let body = self.block(lexer, ctx)?.0; + // do not use `self.block` here, since we must not push a new scope + lexer.expect(Token::Paren('{'))?; + let mut body = ast::Block::default(); + while !lexer.skip(Token::Paren('}')) { + self.statement(lexer, ctx.reborrow(), &mut body)?; + } + + ctx.local_table.pop_scope(); let fun = ast::Function { entry_point: None, diff --git a/third_party/rust/naga/src/lib.rs b/third_party/rust/naga/src/lib.rs index a70015d16d6f..7ac59be50e0d 100644 --- a/third_party/rust/naga/src/lib.rs +++ b/third_party/rust/naga/src/lib.rs @@ -75,8 +75,8 @@ of `Statement`s and other `Expression`s. Naga's rules for when `Expression`s are evaluated are as follows: -- [`Constant`](Expression::Constant) expressions are considered to be - implicitly evaluated before execution begins. +- [`Literal`], [`Constant`], and [`ZeroValue`] expressions are + considered to be implicitly evaluated before execution begins. - [`FunctionArgument`] and [`LocalVariable`] expressions are considered implicitly evaluated upon entry to the function to which they belong. @@ -174,6 +174,8 @@ tree. [`RayQueryProceedResult`]: Expression::RayQueryProceedResult [`CallResult`]: Expression::CallResult [`Constant`]: Expression::Constant +[`ZeroValue`]: Expression::ZeroValue +[`Literal`]: Expression::Literal [`Derivative`]: Expression::Derivative [`FunctionArgument`]: Expression::FunctionArgument [`GlobalVariable`]: Expression::GlobalVariable @@ -200,7 +202,8 @@ tree. clippy::match_like_matches_macro, clippy::collapsible_if, clippy::derive_partial_eq_without_eq, - clippy::needless_borrowed_reference + clippy::needless_borrowed_reference, + clippy::single_match )] #![warn( trivial_casts, @@ -212,7 +215,17 @@ tree. clippy::rest_pat_in_fully_bound_structs, clippy::match_wildcard_for_single_variants )] -#![cfg_attr(not(test), deny(clippy::panic))] +#![deny(clippy::exit)] +#![cfg_attr( + not(test), + warn( + clippy::dbg_macro, + clippy::panic, + clippy::print_stderr, + clippy::print_stdout, + clippy::todo + ) +)] mod arena; pub mod back; @@ -410,7 +423,7 @@ pub enum ScalarKind { #[cfg_attr(feature = "arbitrary", derive(Arbitrary))] pub enum ArraySize { /// The array size is constant. - Constant(Handle), + Constant(std::num::NonZeroU32), /// The array size can change at runtime. Dynamic, } @@ -488,7 +501,7 @@ bitflags::bitflags! { #[cfg_attr(feature = "serialize", derive(Serialize))] #[cfg_attr(feature = "deserialize", derive(Deserialize))] #[cfg_attr(feature = "arbitrary", derive(Arbitrary))] - #[derive(Default)] + #[derive(Clone, Copy, Debug, Default, Eq, Hash, Ord, PartialEq, PartialOrd)] pub struct StorageAccess: u32 { /// Storage can be used as a source for load ops. const LOAD = 0x1; @@ -752,13 +765,12 @@ pub enum TypeInner { /// buffers could have elements that are dynamically sized arrays, each with /// a different length. /// - /// Binding arrays are not [`DATA`]. This means that all binding array - /// globals must be placed in the [`Handle`] address space. Referring to - /// such a global produces a `BindingArray` value directly; there are never - /// pointers to binding arrays. The only operation permitted on - /// `BindingArray` values is indexing, which yields the element by value, - /// not a pointer to the element. (This means that buffer array contents - /// cannot be stored to; [naga#1864] covers lifting this restriction.) + /// Binding arrays are in the same address spaces as their underlying type. + /// As such, referring to an array of images produces an [`Image`] value + /// directly (as opposed to a pointer). The only operation permitted on + /// `BindingArray` values is indexing, which works transparently: indexing + /// a binding array of samplers yields a [`Sampler`], indexing a pointer to the + /// binding array of storage buffers produces a pointer to the storage struct. /// /// Unlike textures and samplers, binding arrays are not [`ARGUMENT`], so /// they cannot be passed as arguments to functions. @@ -774,7 +786,6 @@ pub enum TypeInner { /// [`SamplerArray`]: https://docs.rs/wgpu/latest/wgpu/enum.BindingResource.html#variant.SamplerArray /// [`BufferArray`]: https://docs.rs/wgpu/latest/wgpu/enum.BindingResource.html#variant.BufferArray /// [`DATA`]: crate::valid::TypeFlags::DATA - /// [`Handle`]: AddressSpace::Handle /// [`ARGUMENT`]: crate::valid::TypeFlags::ARGUMENT /// [naga#1864]: https://github.com/gfx-rs/naga/issues/1864 BindingArray { base: Handle, size: ArraySize }, @@ -792,6 +803,18 @@ pub struct Constant { pub inner: ConstantInner, } +#[derive(Debug, Clone, Copy, PartialOrd)] +#[cfg_attr(feature = "serialize", derive(Serialize))] +#[cfg_attr(feature = "deserialize", derive(Deserialize))] +#[cfg_attr(feature = "arbitrary", derive(Arbitrary))] +pub enum Literal { + F64(f64), + F32(f32), + U32(u32), + I32(i32), + Bool(bool), +} + /// A literal scalar value, used in constants. #[derive(Debug, Clone, Copy, PartialOrd)] #[cfg_attr(feature = "serialize", derive(Serialize))] @@ -1172,7 +1195,7 @@ bitflags::bitflags! { #[cfg_attr(feature = "serialize", derive(Serialize))] #[cfg_attr(feature = "deserialize", derive(Deserialize))] #[cfg_attr(feature = "arbitrary", derive(Arbitrary))] - #[derive(Default)] + #[derive(Clone, Copy, Debug, Default, Eq, PartialEq)] pub struct Barrier: u32 { /// Barrier affects all `AddressSpace::Storage` accesses. const STORAGE = 0x1; @@ -1190,6 +1213,13 @@ bitflags::bitflags! { #[cfg_attr(feature = "deserialize", derive(Deserialize))] #[cfg_attr(feature = "arbitrary", derive(Arbitrary))] pub enum Expression { + /// Literal. + Literal(Literal), + /// Constant value. + Constant(Handle), + /// Zero value of a type. + ZeroValue(Handle), + /// Array access with a computed index. /// /// ## Typing rules @@ -1248,8 +1278,6 @@ pub enum Expression { base: Handle, index: u32, }, - /// Constant value. - Constant(Handle), /// Splat scalar into a vector. Splat { size: VectorSize, @@ -1445,6 +1473,13 @@ pub enum Expression { CallResult(Handle), /// Result of an atomic operation. AtomicResult { ty: Handle, comparison: bool }, + /// Result of a [`WorkGroupUniformLoad`] statement. + /// + /// [`WorkGroupUniformLoad`]: Statement::WorkGroupUniformLoad + WorkGroupUniformLoadResult { + /// The type of the result + ty: Handle, + }, /// Get the length of an array. /// The expression must resolve to a pointer to an array with a dynamic size. /// @@ -1598,14 +1633,16 @@ pub enum Statement { /// The `continuing` block and its substatements must not contain `Return` /// or `Kill` statements, or any `Break` or `Continue` statements targeting /// this loop. (It may have `Break` and `Continue` statements targeting - /// loops or switches nested within the `continuing` block.) + /// loops or switches nested within the `continuing` block.) Expressions + /// emitted in `body` are in scope in `continuing`. /// /// If present, `break_if` is an expression which is evaluated after the - /// continuing block. If its value is true, control continues after the - /// `Loop` statement, rather than branching back to the top of body as - /// usual. The `break_if` expression corresponds to a "break if" statement - /// in WGSL, or a loop whose back edge is an `OpBranchConditional` - /// instruction in SPIR-V. + /// continuing block. Expressions emitted in `body` or `continuing` are + /// considered to be in scope. If the expression's value is true, control + /// continues after the `Loop` statement, rather than branching back to the + /// top of body as usual. The `break_if` expression corresponds to a "break + /// if" statement in WGSL, or a loop whose back edge is an + /// `OpBranchConditional` instruction in SPIR-V. /// /// [`Break`]: Statement::Break /// [`Continue`]: Statement::Continue @@ -1702,6 +1739,21 @@ pub enum Statement { /// [`AtomicResult`]: crate::Expression::AtomicResult result: Handle, }, + /// Load uniformly from a uniform pointer in the workgroup address space. + /// + /// Corresponds to the [`workgroupUniformLoad`](https://www.w3.org/TR/WGSL/#workgroupUniformLoad-builtin) + /// built-in function of wgsl, and has the same barrier semantics + WorkGroupUniformLoad { + /// This must be of type [`Pointer`] in the [`WorkGroup`] address space + /// + /// [`Pointer`]: TypeInner::Pointer + /// [`WorkGroup`]: AddressSpace::WorkGroup + pointer: Handle, + /// The [`WorkGroupUniformLoadResult`] expression representing this load's result. + /// + /// [`WorkGroupUniformLoadResult`]: Expression::WorkGroupUniformLoadResult + result: Handle, + }, /// Calls a function. /// /// If the `result` is `Some`, the corresponding expression has to be diff --git a/third_party/rust/naga/src/proc/index.rs b/third_party/rust/naga/src/proc/index.rs index 3fea79ec0197..0ccdfdca5495 100644 --- a/third_party/rust/naga/src/proc/index.rs +++ b/third_party/rust/naga/src/proc/index.rs @@ -390,7 +390,9 @@ impl crate::TypeInner { match *base_inner { Ti::Vector { size, .. } => size as _, Ti::Matrix { columns, .. } => columns as _, - Ti::Array { size, .. } => return size.to_indexable_length(module), + Ti::Array { size, .. } | Ti::BindingArray { size, .. } => { + return size.to_indexable_length(module) + } _ => return Err(IndexableLengthError::TypeNotIndexable), } } @@ -414,23 +416,12 @@ pub enum IndexableLength { } impl crate::ArraySize { - pub fn to_indexable_length( + pub const fn to_indexable_length( self, - module: &crate::Module, + _module: &crate::Module, ) -> Result { Ok(match self { - Self::Constant(k) => { - let constant = &module.constants[k]; - if constant.specialization.is_some() { - // Specializable constants are not supported as array lengths. - // See valid::TypeError::UnsupportedSpecializedArrayLength. - return Err(IndexableLengthError::InvalidArrayLength(k)); - } - let length = constant - .to_array_length() - .ok_or(IndexableLengthError::InvalidArrayLength(k))?; - IndexableLength::Known(length) - } + Self::Constant(length) => IndexableLength::Known(length.get()), Self::Dynamic => IndexableLength::Dynamic, }) } diff --git a/third_party/rust/naga/src/proc/layouter.rs b/third_party/rust/naga/src/proc/layouter.rs index 65369d1cc8fa..11b2250e938f 100644 --- a/third_party/rust/naga/src/proc/layouter.rs +++ b/third_party/rust/naga/src/proc/layouter.rs @@ -1,4 +1,4 @@ -use crate::arena::{Arena, Handle, UniqueArena}; +use crate::arena::Handle; use std::{fmt::Display, num::NonZeroU32, ops}; /// A newtype struct where its only valid values are powers of 2 @@ -165,15 +165,11 @@ impl Layouter { /// constant arenas, and then assume that layouts are available for all /// types. #[allow(clippy::or_fun_call)] - pub fn update( - &mut self, - types: &UniqueArena, - constants: &Arena, - ) -> Result<(), LayoutError> { + pub fn update(&mut self, gctx: super::GlobalCtx) -> Result<(), LayoutError> { use crate::TypeInner as Ti; - for (ty_handle, ty) in types.iter().skip(self.layouts.len()) { - let size = ty.inner.size(constants); + for (ty_handle, ty) in gctx.types.iter().skip(self.layouts.len()) { + let size = ty.inner.size(gctx); let layout = match ty.inner { Ti::Scalar { width, .. } | Ti::Atomic { width, .. } => { let alignment = Alignment::new(width as u32) diff --git a/third_party/rust/naga/src/proc/mod.rs b/third_party/rust/naga/src/proc/mod.rs index a775272a1930..ddcca263c8ca 100644 --- a/third_party/rust/naga/src/proc/mod.rs +++ b/third_party/rust/naga/src/proc/mod.rs @@ -82,6 +82,101 @@ impl super::ScalarKind { } } +impl PartialEq for crate::Literal { + fn eq(&self, other: &Self) -> bool { + match (*self, *other) { + (Self::F64(a), Self::F64(b)) => a.to_bits() == b.to_bits(), + (Self::F32(a), Self::F32(b)) => a.to_bits() == b.to_bits(), + (Self::U32(a), Self::U32(b)) => a == b, + (Self::I32(a), Self::I32(b)) => a == b, + (Self::Bool(a), Self::Bool(b)) => a == b, + _ => false, + } + } +} +impl Eq for crate::Literal {} +impl std::hash::Hash for crate::Literal { + fn hash(&self, hasher: &mut H) { + match *self { + Self::F64(v) => { + hasher.write_u8(0); + v.to_bits().hash(hasher); + } + Self::F32(v) => { + hasher.write_u8(1); + v.to_bits().hash(hasher); + } + Self::U32(v) => { + hasher.write_u8(2); + v.hash(hasher); + } + Self::I32(v) => { + hasher.write_u8(3); + v.hash(hasher); + } + Self::Bool(v) => { + hasher.write_u8(4); + v.hash(hasher); + } + } + } +} + +impl crate::Literal { + pub const fn new(value: u8, kind: crate::ScalarKind, width: crate::Bytes) -> Option { + match (value, kind, width) { + (value, crate::ScalarKind::Float, 8) => Some(Self::F64(value as _)), + (value, crate::ScalarKind::Float, 4) => Some(Self::F32(value as _)), + (value, crate::ScalarKind::Uint, 4) => Some(Self::U32(value as _)), + (value, crate::ScalarKind::Sint, 4) => Some(Self::I32(value as _)), + (1, crate::ScalarKind::Bool, 4) => Some(Self::Bool(true)), + (0, crate::ScalarKind::Bool, 4) => Some(Self::Bool(false)), + _ => None, + } + } + + pub const fn from_scalar(scalar: crate::ScalarValue, width: crate::Bytes) -> Option { + match (scalar, width) { + (crate::ScalarValue::Sint(n), 4) => Some(Self::I32(n as _)), + (crate::ScalarValue::Uint(n), 4) => Some(Self::U32(n as _)), + (crate::ScalarValue::Float(n), 4) => Some(Self::F32(n as _)), + (crate::ScalarValue::Float(n), 8) => Some(Self::F64(n)), + (crate::ScalarValue::Bool(b), _) => Some(Self::Bool(b)), + _ => None, + } + } + + pub const fn zero(kind: crate::ScalarKind, width: crate::Bytes) -> Option { + Self::new(0, kind, width) + } + + pub const fn one(kind: crate::ScalarKind, width: crate::Bytes) -> Option { + Self::new(1, kind, width) + } + + pub const fn width(&self) -> crate::Bytes { + match *self { + Self::F64(_) => 8, + Self::F32(_) | Self::U32(_) | Self::I32(_) => 4, + Self::Bool(_) => 1, + } + } + pub const fn scalar_kind(&self) -> crate::ScalarKind { + match *self { + Self::F64(_) | Self::F32(_) => crate::ScalarKind::Float, + Self::U32(_) => crate::ScalarKind::Uint, + Self::I32(_) => crate::ScalarKind::Sint, + Self::Bool(_) => crate::ScalarKind::Bool, + } + } + pub const fn ty_inner(&self) -> crate::TypeInner { + crate::TypeInner::Scalar { + kind: self.scalar_kind(), + width: self.width(), + } + } +} + pub const POINTER_SPAN: u32 = 4; impl super::TypeInner { @@ -104,7 +199,7 @@ impl super::TypeInner { } /// Get the size of this type. - pub fn size(&self, constants: &super::Arena) -> u32 { + pub fn size(&self, _gctx: GlobalCtx) -> u32 { match *self { Self::Scalar { kind: _, width } | Self::Atomic { kind: _, width } => width as u32, Self::Vector { @@ -125,9 +220,7 @@ impl super::TypeInner { stride, } => { let count = match size { - super::ArraySize::Constant(handle) => { - constants[handle].to_array_length().unwrap_or(1) - } + super::ArraySize::Constant(count) => count.get(), // A dynamically-sized array has to have at least one element super::ArraySize::Dynamic => 1, }; @@ -311,7 +404,9 @@ impl crate::Expression { /// Returns true if the expression is considered emitted at the start of a function. pub const fn needs_pre_emit(&self) -> bool { match *self { - Self::Constant(_) + Self::Literal(_) + | Self::Constant(_) + | Self::ZeroValue(_) | Self::FunctionArgument(_) | Self::GlobalVariable(_) | Self::LocalVariable(_) => true, @@ -478,16 +573,31 @@ impl super::ImageClass { } } +impl crate::Module { + pub const fn to_ctx(&self) -> GlobalCtx<'_> { + GlobalCtx { + types: &self.types, + constants: &self.constants, + } + } +} + +#[derive(Clone, Copy)] +pub struct GlobalCtx<'a> { + pub types: &'a crate::UniqueArena, + pub constants: &'a crate::Arena, +} + #[test] fn test_matrix_size() { - let constants = crate::Arena::new(); + let module = crate::Module::default(); assert_eq!( crate::TypeInner::Matrix { columns: crate::VectorSize::Tri, rows: crate::VectorSize::Tri, width: 4 } - .size(&constants), + .size(module.to_ctx()), 48, ); } diff --git a/third_party/rust/naga/src/proc/namer.rs b/third_party/rust/naga/src/proc/namer.rs index 053126b8acaf..de98f601cc03 100644 --- a/third_party/rust/naga/src/proc/namer.rs +++ b/third_party/rust/naga/src/proc/namer.rs @@ -1,5 +1,6 @@ use crate::{arena::Handle, FastHashMap, FastHashSet}; use std::borrow::Cow; +use std::hash::{Hash, Hasher}; pub type EntryPointIndex = u16; const SEPARATOR: char = '_'; @@ -24,8 +25,9 @@ pub enum NameKey { pub struct Namer { /// The last numeric suffix used for each base name. Zero means "no suffix". unique: FastHashMap, - keywords: FastHashSet, - reserved_prefixes: Vec, + keywords: FastHashSet<&'static str>, + keywords_case_insensitive: FastHashSet>, + reserved_prefixes: Vec<&'static str>, } impl Namer { @@ -102,10 +104,15 @@ impl Namer { } None => { let mut suffixed = base.to_string(); - if base.ends_with(char::is_numeric) || self.keywords.contains(base.as_ref()) { + if base.ends_with(char::is_numeric) + || self.keywords.contains(base.as_ref()) + || self + .keywords_case_insensitive + .contains(&AsciiUniCase(base.as_ref())) + { suffixed.push(SEPARATOR); } - debug_assert!(!self.keywords.contains(&suffixed)); + debug_assert!(!self.keywords.contains::(&suffixed)); // `self.unique` wants to own its keys. This allocates only if we haven't // already done so earlier. self.unique.insert(base.into_owned(), 0); @@ -136,18 +143,30 @@ impl Namer { pub fn reset( &mut self, module: &crate::Module, - reserved_keywords: &[&str], - reserved_prefixes: &[&str], + reserved_keywords: &[&'static str], + extra_reserved_keywords: &[&'static str], + reserved_keywords_case_insensitive: &[&'static str], + reserved_prefixes: &[&'static str], output: &mut FastHashMap, ) { self.reserved_prefixes.clear(); - self.reserved_prefixes - .extend(reserved_prefixes.iter().map(|string| string.to_string())); + self.reserved_prefixes.extend(reserved_prefixes.iter()); self.unique.clear(); self.keywords.clear(); - self.keywords - .extend(reserved_keywords.iter().map(|string| (string.to_string()))); + self.keywords.extend(reserved_keywords.iter()); + self.keywords.extend(extra_reserved_keywords.iter()); + + debug_assert!(reserved_keywords_case_insensitive + .iter() + .all(|s| s.is_ascii())); + self.keywords_case_insensitive.clear(); + self.keywords_case_insensitive.extend( + reserved_keywords_case_insensitive + .iter() + .map(|string| (AsciiUniCase(*string))), + ); + let mut temp = String::new(); for (ty_handle, ty) in module.types.iter() { @@ -252,6 +271,33 @@ impl Namer { } } +/// A string wrapper type with an ascii case insensitive Eq and Hash impl +struct AsciiUniCase + ?Sized>(S); + +impl> PartialEq for AsciiUniCase { + #[inline] + fn eq(&self, other: &Self) -> bool { + self.0.as_ref().eq_ignore_ascii_case(other.0.as_ref()) + } +} + +impl> Eq for AsciiUniCase {} + +impl> Hash for AsciiUniCase { + #[inline] + fn hash(&self, hasher: &mut H) { + for byte in self + .0 + .as_ref() + .as_bytes() + .iter() + .map(|b| b.to_ascii_lowercase()) + { + hasher.write_u8(byte); + } + } +} + #[test] fn test() { let mut namer = Namer::default(); diff --git a/third_party/rust/naga/src/proc/terminator.rs b/third_party/rust/naga/src/proc/terminator.rs index ca0c3f10bcd0..a5239d4eca78 100644 --- a/third_party/rust/naga/src/proc/terminator.rs +++ b/third_party/rust/naga/src/proc/terminator.rs @@ -36,6 +36,7 @@ pub fn ensure_block_returns(block: &mut crate::Block) { | S::Call { .. } | S::RayQuery { .. } | S::Atomic { .. } + | S::WorkGroupUniformLoad { .. } | S::Barrier(_)), ) | None => block.push(S::Return { value: None }, Default::default()), diff --git a/third_party/rust/naga/src/proc/typifier.rs b/third_party/rust/naga/src/proc/typifier.rs index 0bb9019a29b5..f0030f340ff7 100644 --- a/third_party/rust/naga/src/proc/typifier.rs +++ b/third_party/rust/naga/src/proc/typifier.rs @@ -298,6 +298,7 @@ impl<'a> ResolveContext<'a> { width, space, }, + Ti::BindingArray { base, .. } => Ti::Pointer { base, space }, ref other => { log::error!("Access sub-type {:?}", other); return Err(ResolveError::InvalidSubAccess { @@ -401,6 +402,7 @@ impl<'a> ResolveContext<'a> { space, } } + Ti::BindingArray { base, .. } => Ti::Pointer { base, space }, ref other => { log::error!("Access index sub-type {:?}", other); return Err(ResolveError::InvalidSubAccess { @@ -419,6 +421,7 @@ impl<'a> ResolveContext<'a> { } } } + crate::Expression::Literal(lit) => TypeResolution::Value(lit.ty_inner()), crate::Expression::Constant(h) => match self.constants[h].inner { crate::ConstantInner::Scalar { width, ref value } => { TypeResolution::Value(Ti::Scalar { @@ -428,6 +431,7 @@ impl<'a> ResolveContext<'a> { } crate::ConstantInner::Composite { ty, components: _ } => TypeResolution::Handle(ty), }, + crate::Expression::ZeroValue(ty) => TypeResolution::Handle(ty), crate::Expression::Splat { size, value } => match *past(value)?.inner_with(types) { Ti::Scalar { kind, width } => { TypeResolution::Value(Ti::Vector { size, kind, width }) @@ -654,6 +658,7 @@ impl<'a> ResolveContext<'a> { | crate::BinaryOperator::ShiftRight => past(left)?.clone(), }, crate::Expression::AtomicResult { ty, .. } => TypeResolution::Handle(ty), + crate::Expression::WorkGroupUniformLoadResult { ty } => TypeResolution::Handle(ty), crate::Expression::Select { accept, .. } => past(accept)?.clone(), crate::Expression::Derivative { expr, .. } => past(expr)?.clone(), crate::Expression::Relational { fun, argument } => match fun { diff --git a/third_party/rust/naga/src/valid/analyzer.rs b/third_party/rust/naga/src/valid/analyzer.rs index e9b155b6eb5c..8ce63eaf3a05 100644 --- a/third_party/rust/naga/src/valid/analyzer.rs +++ b/third_party/rust/naga/src/valid/analyzer.rs @@ -20,6 +20,7 @@ bitflags::bitflags! { /// Kinds of expressions that require uniform control flow. #[cfg_attr(feature = "serialize", derive(serde::Serialize))] #[cfg_attr(feature = "deserialize", derive(serde::Deserialize))] + #[derive(Clone, Copy, Debug, Eq, PartialEq)] pub struct UniformityRequirements: u8 { const WORK_GROUP_BARRIER = 0x1; const DERIVATIVE = 0x2; @@ -59,6 +60,7 @@ impl Uniformity { } bitflags::bitflags! { + #[derive(Clone, Copy, Debug, PartialEq)] struct ExitFlags: u8 { /// Control flow may return from the function, which makes all the /// subsequent statements within the current function (only!) @@ -117,6 +119,7 @@ bitflags::bitflags! { /// Indicates how a global variable is used. #[cfg_attr(feature = "serialize", derive(serde::Serialize))] #[cfg_attr(feature = "deserialize", derive(serde::Deserialize))] + #[derive(Clone, Copy, Debug, Eq, PartialEq)] pub struct GlobalUse: u8 { /// Data will be read from the variable. const READ = 0x1; @@ -492,7 +495,7 @@ impl FunctionInfo { requirements: UniformityRequirements::empty(), }, // always uniform - E::Constant(_) => Uniformity::new(), + E::Literal(_) | E::Constant(_) | E::ZeroValue(_) => Uniformity::new(), E::Splat { size: _, value } => Uniformity { non_uniform_result: self.add_ref(value), requirements: UniformityRequirements::empty(), @@ -672,12 +675,17 @@ impl FunctionInfo { requirements: UniformityRequirements::empty(), }, E::Math { - arg, arg1, arg2, .. + fun: _, + arg, + arg1, + arg2, + arg3, } => { let arg1_nur = arg1.and_then(|h| self.add_ref(h)); let arg2_nur = arg2.and_then(|h| self.add_ref(h)); + let arg3_nur = arg3.and_then(|h| self.add_ref(h)); Uniformity { - non_uniform_result: self.add_ref(arg).or(arg1_nur).or(arg2_nur), + non_uniform_result: self.add_ref(arg).or(arg1_nur).or(arg2_nur).or(arg3_nur), requirements: UniformityRequirements::empty(), } } @@ -690,6 +698,13 @@ impl FunctionInfo { non_uniform_result: Some(handle), requirements: UniformityRequirements::empty(), }, + E::WorkGroupUniformLoadResult { .. } => Uniformity { + // The result of WorkGroupUniformLoad is always uniform by definition + non_uniform_result: None, + // The call is what cares about uniformity, not the expression + // This expression is never emitted, so this requirement should never be used anyway? + requirements: UniformityRequirements::empty(), + }, E::ArrayLength(expr) => Uniformity { non_uniform_result: self.add_ref_impl(expr, GlobalUse::QUERY), requirements: UniformityRequirements::empty(), @@ -776,6 +791,35 @@ impl FunctionInfo { }, exit: ExitFlags::empty(), }, + S::WorkGroupUniformLoad { pointer, .. } => { + let _condition_nur = self.add_ref(pointer); + + // Don't check that this call occurs in uniform control flow until Naga implements WGSL's standard + // uniformity analysis (https://github.com/gfx-rs/naga/issues/1744). + // The uniformity analysis Naga uses now is less accurate than the one in the WGSL standard, + // causing Naga to reject correct uses of `workgroupUniformLoad` in some interesting programs. + + /* #[cfg(feature = "validate")] + if self + .flags + .contains(super::ValidationFlags::CONTROL_FLOW_UNIFORMITY) + { + let condition_nur = self.add_ref(pointer); + let this_disruptor = + disruptor.or(condition_nur.map(UniformityDisruptor::Expression)); + if let Some(cause) = this_disruptor { + return Err(FunctionError::NonUniformWorkgroupUniformLoad(cause) + .with_span_static(*span, "WorkGroupUniformLoad")); + } + } */ + FunctionUniformity { + result: Uniformity { + non_uniform_result: None, + requirements: UniformityRequirements::WORK_GROUP_BARRIER, + }, + exit: ExitFlags::empty(), + } + } S::Block(ref b) => { self.process_block(b, other_functions, disruptor, expression_arena)? } @@ -829,7 +873,7 @@ impl FunctionInfo { S::Loop { ref body, ref continuing, - break_if: _, + break_if, } => { let body_uniformity = self.process_block(body, other_functions, disruptor, expression_arena)?; @@ -840,6 +884,9 @@ impl FunctionInfo { continuing_disruptor, expression_arena, )?; + if let Some(expr) = break_if { + let _ = self.add_ref(expr); + } body_uniformity | continuing_uniformity } S::Return { value } => FunctionUniformity { diff --git a/third_party/rust/naga/src/valid/compose.rs b/third_party/rust/naga/src/valid/compose.rs index e77d538255ee..87bc2352ceb9 100644 --- a/third_party/rust/naga/src/valid/compose.rs +++ b/third_party/rust/naga/src/valid/compose.rs @@ -1,8 +1,5 @@ #[cfg(feature = "validate")] -use crate::{ - arena::{Arena, UniqueArena}, - proc::TypeResolution, -}; +use crate::proc::TypeResolution; use crate::arena::Handle; @@ -20,18 +17,17 @@ pub enum ComposeError { #[cfg(feature = "validate")] pub fn validate_compose( self_ty_handle: Handle, - constant_arena: &Arena, - type_arena: &UniqueArena, + gctx: crate::proc::GlobalCtx, component_resolutions: impl ExactSizeIterator, ) -> Result<(), ComposeError> { use crate::TypeInner as Ti; - match type_arena[self_ty_handle].inner { + match gctx.types[self_ty_handle].inner { // vectors are composed from scalars or other vectors Ti::Vector { size, kind, width } => { let mut total = 0; for (index, comp_res) in component_resolutions.enumerate() { - total += match *comp_res.inner_with(type_arena) { + total += match *comp_res.inner_with(gctx.types) { Ti::Scalar { kind: comp_kind, width: comp_width, @@ -74,7 +70,7 @@ pub fn validate_compose( }); } for (index, comp_res) in component_resolutions.enumerate() { - if comp_res.inner_with(type_arena) != &inner { + if comp_res.inner_with(gctx.types) != &inner { log::error!("Matrix component[{}] type {:?}", index, comp_res); return Err(ComposeError::ComponentType { index: index as u32, @@ -84,22 +80,21 @@ pub fn validate_compose( } Ti::Array { base, - size: crate::ArraySize::Constant(handle), + size: crate::ArraySize::Constant(count), stride: _, } => { - let count = constant_arena[handle].to_array_length().unwrap(); - if count as usize != component_resolutions.len() { + if count.get() as usize != component_resolutions.len() { return Err(ComposeError::ComponentCount { - expected: count, + expected: count.get(), given: component_resolutions.len() as u32, }); } for (index, comp_res) in component_resolutions.enumerate() { - let base_inner = &type_arena[base].inner; - let comp_res_inner = comp_res.inner_with(type_arena); + let base_inner = &gctx.types[base].inner; + let comp_res_inner = comp_res.inner_with(gctx.types); // We don't support arrays of pointers, but it seems best not to // embed that assumption here, so use `TypeInner::equivalent`. - if !base_inner.equivalent(comp_res_inner, type_arena) { + if !base_inner.equivalent(comp_res_inner, gctx.types) { log::error!("Array component[{}] type {:?}", index, comp_res); return Err(ComposeError::ComponentType { index: index as u32, @@ -116,11 +111,11 @@ pub fn validate_compose( } for (index, (member, comp_res)) in members.iter().zip(component_resolutions).enumerate() { - let member_inner = &type_arena[member.ty].inner; - let comp_res_inner = comp_res.inner_with(type_arena); + let member_inner = &gctx.types[member.ty].inner; + let comp_res_inner = comp_res.inner_with(gctx.types); // We don't support pointers in structs, but it seems best not to embed // that assumption here, so use `TypeInner::equivalent`. - if !comp_res_inner.equivalent(member_inner, type_arena) { + if !comp_res_inner.equivalent(member_inner, gctx.types) { log::error!("Struct component[{}] type {:?}", index, comp_res); return Err(ComposeError::ComponentType { index: index as u32, diff --git a/third_party/rust/naga/src/valid/expression.rs b/third_party/rust/naga/src/valid/expression.rs index 3a81707904f4..0cf40c232062 100644 --- a/third_party/rust/naga/src/valid/expression.rs +++ b/third_party/rust/naga/src/valid/expression.rs @@ -1,4 +1,3 @@ -#[cfg(feature = "validate")] use std::ops::Index; #[cfg(feature = "validate")] @@ -119,6 +118,10 @@ pub enum ExpressionError { InvalidArgumentType(crate::MathFunction, u32, Handle), #[error("Atomic result type can't be {0:?}")] InvalidAtomicResultType(Handle), + #[error( + "workgroupUniformLoad result type can't be {0:?}. It can only be a constructible type." + )] + InvalidWorkGroupUniformLoadResultType(Handle), #[error("Shader requires capability {0:?}")] MissingCapabilities(super::Capabilities), } @@ -244,9 +247,9 @@ impl super::Validator { } => size as u32, Ti::Matrix { columns, .. } => columns as u32, Ti::Array { - size: crate::ArraySize::Constant(handle), + size: crate::ArraySize::Constant(len), .. - } => module.constants[handle].to_array_length().unwrap(), + } => len.get(), Ti::Array { .. } | Ti::BindingArray { .. } => u32::MAX, // can't statically know, but need run-time checks Ti::Pointer { base, .. } if top_level => { resolve_index_limit(module, top, &module.types[base].inner, false)? @@ -269,7 +272,9 @@ impl super::Validator { } ShaderStages::all() } + E::Literal(_value) => ShaderStages::all(), E::Constant(_handle) => ShaderStages::all(), + E::ZeroValue(_type) => ShaderStages::all(), E::Splat { size: _, value } => match resolver[value] { Ti::Scalar { .. } => ShaderStages::all(), ref other => { @@ -299,8 +304,7 @@ impl super::Validator { E::Compose { ref components, ty } => { validate_compose( ty, - &module.constants, - &module.types, + module.to_ctx(), components.iter().map(|&handle| info[handle].ty.clone()), )?; ShaderStages::all() @@ -1411,6 +1415,18 @@ impl super::Validator { } ShaderStages::all() } + E::WorkGroupUniformLoadResult { ty } => { + if self.types[ty.index()] + .flags + // Sized | Constructible is exactly the types currently supported by + // WorkGroupUniformLoad + .contains(TypeFlags::SIZED | TypeFlags::CONSTRUCTIBLE) + { + ShaderStages::COMPUTE + } else { + return Err(ExpressionError::InvalidWorkGroupUniformLoadResultType(ty)); + } + } E::ArrayLength(expr) => match resolver[expr] { Ti::Pointer { base, .. } => { let base_ty = &resolver.types[base]; diff --git a/third_party/rust/naga/src/valid/function.rs b/third_party/rust/naga/src/valid/function.rs index 8ebcddd9e379..41f31b5658ce 100644 --- a/third_party/rust/naga/src/valid/function.rs +++ b/third_party/rust/naga/src/valid/function.rs @@ -149,10 +149,19 @@ pub enum FunctionError { PipelineInputRegularFunction { name: String }, #[error("Functions that are not entry points cannot have `@location` or `@builtin` attributes on their return value types")] PipelineOutputRegularFunction, + #[error("Required uniformity for WorkGroupUniformLoad is not fulfilled because of {0:?}")] + // The actual load statement will be "pointed to" by the span + NonUniformWorkgroupUniformLoad(UniformityDisruptor), + // This is only possible with a misbehaving frontend + #[error("The expression {0:?} for a WorkGroupUniformLoad isn't a WorkgroupUniformLoadResult")] + WorkgroupUniformLoadExpressionMismatch(Handle), + #[error("The expression {0:?} is not valid as a WorkGroupUniformLoad argument. It should be a Pointer in Workgroup address space")] + WorkgroupUniformLoadInvalidPointer(Handle), } bitflags::bitflags! { #[repr(transparent)] + #[derive(Clone, Copy)] struct ControlFlowAbility: u8 { /// The control can return out of this block. const RETURN = 0x1; @@ -409,7 +418,7 @@ impl super::Validator { statements: &crate::Block, context: &BlockContext, ) -> Result> { - use crate::{Statement as S, TypeInner as Ti}; + use crate::{AddressSpace, Statement as S, TypeInner as Ti}; let mut finished = false; let mut stages = super::ShaderStages::all(); for (statement, &span) in statements.span_iter() { @@ -821,6 +830,43 @@ impl super::Validator { } => { self.validate_atomic(pointer, fun, value, result, context)?; } + S::WorkGroupUniformLoad { pointer, result } => { + stages &= super::ShaderStages::COMPUTE; + let pointer_inner = + context.resolve_type(pointer, &self.valid_expression_set)?; + match *pointer_inner { + Ti::Pointer { + space: AddressSpace::WorkGroup, + .. + } => {} + Ti::ValuePointer { + space: AddressSpace::WorkGroup, + .. + } => {} + _ => { + return Err(FunctionError::WorkgroupUniformLoadInvalidPointer(pointer) + .with_span_static(span, "WorkGroupUniformLoad")) + } + } + self.emit_expression(result, context)?; + let ty = match &context.expressions[result] { + &crate::Expression::WorkGroupUniformLoadResult { ty } => ty, + _ => { + return Err(FunctionError::WorkgroupUniformLoadExpressionMismatch( + result, + ) + .with_span_static(span, "WorkGroupUniformLoad")); + } + }; + let expected_pointer_inner = Ti::Pointer { + base: ty, + space: AddressSpace::WorkGroup, + }; + if !expected_pointer_inner.equivalent(pointer_inner, context.types) { + return Err(FunctionError::WorkgroupUniformLoadInvalidPointer(pointer) + .with_span_static(span, "WorkGroupUniformLoad")); + } + } S::RayQuery { query, ref fun } => { let query_var = match *context.get_expression(query) { crate::Expression::LocalVariable(var) => &context.local_vars[var], @@ -894,8 +940,7 @@ impl super::Validator { fn validate_local_var( &self, var: &crate::LocalVariable, - types: &UniqueArena, - constants: &Arena, + gctx: crate::proc::GlobalCtx, ) -> Result<(), LocalVariableError> { log::debug!("var {:?}", var); let type_info = self @@ -910,13 +955,13 @@ impl super::Validator { } if let Some(const_handle) = var.init { - match constants[const_handle].inner { + match gctx.constants[const_handle].inner { crate::ConstantInner::Scalar { width, ref value } => { let ty_inner = crate::TypeInner::Scalar { width, kind: value.scalar_kind(), }; - if types[var.ty].inner != ty_inner { + if gctx.types[var.ty].inner != ty_inner { return Err(LocalVariableError::InitializerType); } } @@ -942,7 +987,7 @@ impl super::Validator { #[cfg(feature = "validate")] for (var_handle, var) in fun.local_variables.iter() { - self.validate_local_var(var, &module.types, &module.constants) + self.validate_local_var(var, module.to_ctx()) .map_err(|source| { FunctionError::LocalVariable { handle: var_handle, diff --git a/third_party/rust/naga/src/valid/handles.rs b/third_party/rust/naga/src/valid/handles.rs index fdd43cd58511..2b0d107626a6 100644 --- a/third_party/rust/naga/src/valid/handles.rs +++ b/third_party/rust/naga/src/valid/handles.rs @@ -9,7 +9,7 @@ use crate::{ use crate::{Arena, UniqueArena}; #[cfg(feature = "validate")] -use super::{TypeError, ValidationError}; +use super::ValidationError; #[cfg(feature = "validate")] use std::{convert::TryInto, hash::Hash, num::NonZeroU32}; @@ -44,33 +44,7 @@ impl super::Validator { // NOTE: Types being first is important. All other forms of validation depend on this. for (this_handle, ty) in types.iter() { - let &crate::Type { - ref name, - ref inner, - } = ty; - - let validate_array_size = |size| { - match size { - crate::ArraySize::Constant(constant) => { - let &crate::Constant { - name: _, - specialization: _, - ref inner, - } = constants.try_get(constant)?; - if !matches!(inner, &crate::ConstantInner::Scalar { .. }) { - return Err(ValidationError::Type { - handle: this_handle, - name: name.clone().unwrap_or_default(), - source: TypeError::InvalidArraySizeConstant(constant), - }); - } - } - crate::ArraySize::Dynamic => (), - }; - Ok(this_handle) - }; - - match *inner { + match ty.inner { crate::TypeInner::Scalar { .. } | crate::TypeInner::Vector { .. } | crate::TypeInner::Matrix { .. } @@ -83,14 +57,9 @@ impl super::Validator { crate::TypeInner::Pointer { base, space: _ } => { this_handle.check_dep(base)?; } - crate::TypeInner::Array { - base, - size, - stride: _, - } - | crate::TypeInner::BindingArray { base, size } => { + crate::TypeInner::Array { base, .. } + | crate::TypeInner::BindingArray { base, .. } => { this_handle.check_dep(base)?; - validate_array_size(size)?; } crate::TypeInner::Struct { ref members, @@ -253,9 +222,13 @@ impl super::Validator { crate::Expression::AccessIndex { base, .. } => { handle.check_dep(base)?; } + crate::Expression::Literal(_value) => {} crate::Expression::Constant(constant) => { validate_constant(constant)?; } + crate::Expression::ZeroValue(ty) => { + validate_type(ty)?; + } crate::Expression::Splat { value, .. } => { handle.check_dep(value)?; } @@ -387,7 +360,9 @@ impl super::Validator { handle.check_dep(function)?; } } - crate::Expression::AtomicResult { .. } | crate::Expression::RayQueryProceedResult => (), + crate::Expression::AtomicResult { .. } + | crate::Expression::RayQueryProceedResult + | crate::Expression::WorkGroupUniformLoadResult { .. } => (), crate::Expression::ArrayLength(array) => { handle.check_dep(array)?; } @@ -498,6 +473,11 @@ impl super::Validator { validate_expr(result)?; Ok(()) } + crate::Statement::WorkGroupUniformLoad { pointer, result } => { + validate_expr(pointer)?; + validate_expr(result)?; + Ok(()) + } crate::Statement::Call { function, ref arguments, diff --git a/third_party/rust/naga/src/valid/interface.rs b/third_party/rust/naga/src/valid/interface.rs index 268490c52ead..47e2f201c834 100644 --- a/third_party/rust/naga/src/valid/interface.rs +++ b/third_party/rust/naga/src/valid/interface.rs @@ -395,12 +395,19 @@ impl super::Validator { pub(super) fn validate_global_var( &self, var: &crate::GlobalVariable, - types: &UniqueArena, + gctx: crate::proc::GlobalCtx, ) -> Result<(), GlobalVariableError> { use super::TypeFlags; log::debug!("var {:?}", var); - let type_info = &self.types[var.ty.index()]; + let inner_ty = match gctx.types[var.ty].inner { + // A binding array is (mostly) supposed to behave the same as a + // series of individually bound resources, so we can (mostly) + // validate a `binding_array` as if it were just a plain `T`. + crate::TypeInner::BindingArray { base, .. } => base, + _ => var.ty, + }; + let type_info = &self.types[inner_ty.index()]; let (required_type_flags, is_resource) = match var.space { crate::AddressSpace::Function => { @@ -437,22 +444,8 @@ impl super::Validator { ) } crate::AddressSpace::Handle => { - match types[var.ty].inner { - crate::TypeInner::Image { .. } - | crate::TypeInner::Sampler { .. } - | crate::TypeInner::BindingArray { .. } - | crate::TypeInner::AccelerationStructure - | crate::TypeInner::RayQuery => {} - _ => { - return Err(GlobalVariableError::InvalidType(var.space)); - } - }; - let inner_ty = match &types[var.ty].inner { - &crate::TypeInner::BindingArray { base, .. } => &types[base].inner, - ty => ty, - }; - if let crate::TypeInner::Image { - class: + match gctx.types[inner_ty].inner { + crate::TypeInner::Image { class, .. } => match class { crate::ImageClass::Storage { format: crate::StorageFormat::R16Unorm @@ -462,17 +455,23 @@ impl super::Validator { | crate::StorageFormat::Rgba16Unorm | crate::StorageFormat::Rgba16Snorm, .. - }, - .. - } = *inner_ty - { - if !self - .capabilities - .contains(Capabilities::STORAGE_TEXTURE_16BIT_NORM_FORMATS) - { - return Err(GlobalVariableError::UnsupportedCapability( - Capabilities::STORAGE_TEXTURE_16BIT_NORM_FORMATS, - )); + } => { + if !self + .capabilities + .contains(Capabilities::STORAGE_TEXTURE_16BIT_NORM_FORMATS) + { + return Err(GlobalVariableError::UnsupportedCapability( + Capabilities::STORAGE_TEXTURE_16BIT_NORM_FORMATS, + )); + } + } + _ => {} + }, + crate::TypeInner::Sampler { .. } + | crate::TypeInner::AccelerationStructure + | crate::TypeInner::RayQuery => {} + _ => { + return Err(GlobalVariableError::InvalidType(var.space)); } } diff --git a/third_party/rust/naga/src/valid/mod.rs b/third_party/rust/naga/src/valid/mod.rs index 6b3a2e145633..2c83e1a8d7c9 100644 --- a/third_party/rust/naga/src/valid/mod.rs +++ b/third_party/rust/naga/src/valid/mod.rs @@ -11,7 +11,7 @@ mod interface; mod r#type; #[cfg(feature = "validate")] -use crate::arena::{Arena, UniqueArena}; +use crate::arena::UniqueArena; use crate::{ arena::Handle, @@ -53,6 +53,7 @@ bitflags::bitflags! { /// by default.) #[cfg_attr(feature = "serialize", derive(serde::Serialize))] #[cfg_attr(feature = "deserialize", derive(serde::Deserialize))] + #[derive(Clone, Copy, Debug, Eq, PartialEq)] pub struct ValidationFlags: u8 { /// Expressions. #[cfg(feature = "validate")] @@ -86,6 +87,7 @@ bitflags::bitflags! { #[must_use] #[cfg_attr(feature = "serialize", derive(serde::Serialize))] #[cfg_attr(feature = "deserialize", derive(serde::Deserialize))] + #[derive(Clone, Copy, Debug, Eq, PartialEq)] pub struct Capabilities: u16 { /// Support for [`AddressSpace:PushConstant`]. const PUSH_CONSTANT = 0x1; @@ -126,6 +128,7 @@ bitflags::bitflags! { /// Validation flags. #[cfg_attr(feature = "serialize", derive(serde::Serialize))] #[cfg_attr(feature = "deserialize", derive(serde::Deserialize))] + #[derive(Clone, Copy, Debug, Eq, PartialEq)] pub struct ShaderStages: u8 { const VERTEX = 0x1; const FRAGMENT = 0x2; @@ -174,10 +177,6 @@ pub struct Validator { pub enum ConstantError { #[error("The type doesn't match the constant")] InvalidType, - #[error("The component handle {0:?} can not be resolved")] - UnresolvedComponent(Handle), - #[error("The array size handle {0:?} can not be resolved")] - UnresolvedSize(Handle), #[error(transparent)] Compose(#[from] ComposeError), } @@ -300,10 +299,9 @@ impl Validator { fn validate_constant( &self, handle: Handle, - constants: &Arena, - types: &UniqueArena, + gctx: crate::proc::GlobalCtx, ) -> Result<(), ConstantError> { - let con = &constants[handle]; + let con = &gctx.constants[handle]; match con.inner { crate::ConstantInner::Scalar { width, ref value } => { if self.check_width(value.scalar_kind(), width).is_err() { @@ -311,25 +309,12 @@ impl Validator { } } crate::ConstantInner::Composite { ty, ref components } => { - match types[ty].inner { - crate::TypeInner::Array { - size: crate::ArraySize::Constant(size_handle), - .. - } if handle <= size_handle => { - return Err(ConstantError::UnresolvedSize(size_handle)); - } - _ => {} - } - if let Some(&comp) = components.iter().find(|&&comp| handle <= comp) { - return Err(ConstantError::UnresolvedComponent(comp)); - } compose::validate_compose( ty, - constants, - types, + gctx, components .iter() - .map(|&component| constants[component].inner.resolve_type()), + .map(|&component| gctx.constants[component].inner.resolve_type()), )?; } } @@ -347,17 +332,15 @@ impl Validator { #[cfg(feature = "validate")] Self::validate_module_handles(module).map_err(|e| e.with_span())?; - self.layouter - .update(&module.types, &module.constants) - .map_err(|e| { - let handle = e.ty; - ValidationError::from(e).with_span_handle(handle, &module.types) - })?; + self.layouter.update(module.to_ctx()).map_err(|e| { + let handle = e.ty; + ValidationError::from(e).with_span_handle(handle, &module.types) + })?; #[cfg(feature = "validate")] if self.flags.contains(ValidationFlags::CONSTANTS) { for (handle, constant) in module.constants.iter() { - self.validate_constant(handle, &module.constants, &module.types) + self.validate_constant(handle, module.to_ctx()) .map_err(|source| { ValidationError::Constant { handle, @@ -377,7 +360,7 @@ impl Validator { for (handle, ty) in module.types.iter() { let ty_info = self - .validate_type(handle, &module.types, &module.constants) + .validate_type(handle, module.to_ctx()) .map_err(|source| { ValidationError::Type { handle, @@ -392,7 +375,7 @@ impl Validator { #[cfg(feature = "validate")] for (var_handle, var) in module.global_variables.iter() { - self.validate_global_var(var, &module.types) + self.validate_global_var(var, module.to_ctx()) .map_err(|source| { ValidationError::GlobalVariable { handle: var_handle, diff --git a/third_party/rust/naga/src/valid/type.rs b/third_party/rust/naga/src/valid/type.rs index d8dd37d09bb2..f8ceb463c66c 100644 --- a/third_party/rust/naga/src/valid/type.rs +++ b/third_party/rust/naga/src/valid/type.rs @@ -1,8 +1,5 @@ use super::Capabilities; -use crate::{ - arena::{Arena, Handle, UniqueArena}, - proc::Alignment, -}; +use crate::{arena::Handle, proc::Alignment}; bitflags::bitflags! { /// Flags associated with [`Type`]s by [`Validator`]. @@ -12,6 +9,7 @@ bitflags::bitflags! { #[cfg_attr(feature = "serialize", derive(serde::Serialize))] #[cfg_attr(feature = "deserialize", derive(serde::Deserialize))] #[repr(transparent)] + #[derive(Clone, Copy, Debug, Eq, PartialEq)] pub struct TypeFlags: u8 { /// Can be used for data variables. /// @@ -96,8 +94,6 @@ pub enum TypeError { InvalidWidth(crate::ScalarKind, crate::Bytes), #[error("The {0:?} scalar width {1} is not supported for an atomic")] InvalidAtomicWidth(crate::ScalarKind, crate::Bytes), - #[error("The base handle {0:?} can not be resolved")] - UnresolvedBase(Handle), #[error("Invalid type for pointer target {0:?}")] InvalidPointerBase(Handle), #[error("Unsized types like {base:?} must be in the `Storage` address space, not `{space:?}`")] @@ -109,16 +105,14 @@ pub enum TypeError { InvalidData(Handle), #[error("Base type {0:?} for the array is invalid")] InvalidArrayBaseType(Handle), - #[error("The constant {0:?} can not be used for an array size")] - InvalidArraySizeConstant(Handle), #[error("The constant {0:?} is specialized, and cannot be used as an array size")] UnsupportedSpecializedArrayLength(Handle), - #[error("Array type {0:?} must have a length of one or more")] - NonPositiveArrayLength(Handle), #[error("Array stride {stride} does not match the expected {expected}")] InvalidArrayStride { stride: u32, expected: u32 }, #[error("Field '{0}' can't be dynamically-sized, has type {1:?}")] InvalidDynamicArray(String, Handle), + #[error("The base handle {0:?} has to be a struct")] + BindingArrayBaseTypeNotStruct(Handle), #[error("Structure member[{index}] at {offset} overlaps the previous member")] MemberOverlap { index: u32, offset: u32 }, #[error( @@ -246,11 +240,10 @@ impl super::Validator { pub(super) fn validate_type( &self, handle: Handle, - types: &UniqueArena, - constants: &Arena, + gctx: crate::proc::GlobalCtx, ) -> Result { use crate::TypeInner as Ti; - Ok(match types[handle].inner { + Ok(match gctx.types[handle].inner { Ti::Scalar { kind, width } => { self.check_width(kind, width)?; let shareable = if kind.is_numeric() { @@ -318,10 +311,6 @@ impl super::Validator { Ti::Pointer { base, space } => { use crate::AddressSpace as As; - if base >= handle { - return Err(TypeError::UnresolvedBase(base)); - } - let base_info = &self.types[base.index()]; if !base_info.flags.contains(TypeFlags::DATA) { return Err(TypeError::InvalidPointerBase(base)); @@ -389,9 +378,6 @@ impl super::Validator { ) } Ti::Array { base, size, stride } => { - if base >= handle { - return Err(TypeError::UnresolvedBase(base)); - } let base_info = &self.types[base.index()]; if !base_info.flags.contains(TypeFlags::DATA | TypeFlags::SIZED) { return Err(TypeError::InvalidArrayBaseType(base)); @@ -425,52 +411,7 @@ impl super::Validator { }; let type_info_mask = match size { - crate::ArraySize::Constant(const_handle) => { - let constant = &constants[const_handle]; - let length_is_positive = match *constant { - crate::Constant { - specialization: Some(_), - .. - } => { - // Many of our back ends don't seem to support - // specializable array lengths. If you want to try to make - // this work, be sure to address all uses of - // `Constant::to_array_length`, which ignores - // specialization. - return Err(TypeError::UnsupportedSpecializedArrayLength( - const_handle, - )); - } - crate::Constant { - inner: - crate::ConstantInner::Scalar { - width: _, - value: crate::ScalarValue::Uint(length), - }, - .. - } => length > 0, - // Accept a signed integer size to avoid - // requiring an explicit uint - // literal. Type inference should make - // this unnecessary. - crate::Constant { - inner: - crate::ConstantInner::Scalar { - width: _, - value: crate::ScalarValue::Sint(length), - }, - .. - } => length > 0, - _ => { - log::warn!("Array size {:?}", constant); - return Err(TypeError::InvalidArraySizeConstant(const_handle)); - } - }; - - if !length_is_positive { - return Err(TypeError::NonPositiveArrayLength(const_handle)); - } - + crate::ArraySize::Constant(_) => { TypeFlags::DATA | TypeFlags::SIZED | TypeFlags::COPY @@ -514,9 +455,6 @@ impl super::Validator { let mut prev_struct_data: Option<(u32, u32)> = None; for (i, member) in members.iter().enumerate() { - if member.ty >= handle { - return Err(TypeError::UnresolvedBase(member.ty)); - } let base_info = &self.types[member.ty.index()]; if !base_info.flags.contains(TypeFlags::DATA) { return Err(TypeError::InvalidData(member.ty)); @@ -545,7 +483,7 @@ impl super::Validator { } } - let base_size = types[member.ty].inner.size(constants); + let base_size = gctx.types[member.ty].inner.size(gctx); min_offset = member.offset + base_size; if min_offset > span { return Err(TypeError::MemberOutOfBounds { @@ -589,14 +527,14 @@ impl super::Validator { } }; - prev_struct_data = match types[member.ty].inner { + prev_struct_data = match gctx.types[member.ty].inner { crate::TypeInner::Struct { span, .. } => Some((span, member.offset)), _ => None, }; // The last field may be an unsized array. if !base_info.flags.contains(TypeFlags::SIZED) { - let is_array = match types[member.ty].inner { + let is_array = match gctx.types[member.ty].inner { crate::TypeInner::Array { .. } => true, _ => false, }; @@ -624,13 +562,35 @@ impl super::Validator { } Ti::AccelerationStructure => { self.require_type_capability(Capabilities::RAY_QUERY)?; - TypeInfo::new(TypeFlags::empty(), Alignment::ONE) + TypeInfo::new(TypeFlags::ARGUMENT, Alignment::ONE) } Ti::RayQuery => { self.require_type_capability(Capabilities::RAY_QUERY)?; TypeInfo::new(TypeFlags::DATA | TypeFlags::SIZED, Alignment::ONE) } - Ti::BindingArray { .. } => TypeInfo::new(TypeFlags::empty(), Alignment::ONE), + Ti::BindingArray { base, size } => { + if base >= handle { + return Err(TypeError::InvalidArrayBaseType(base)); + } + let type_info_mask = match size { + crate::ArraySize::Constant(_) => TypeFlags::SIZED | TypeFlags::HOST_SHAREABLE, + crate::ArraySize::Dynamic => { + // Final type is non-sized + TypeFlags::HOST_SHAREABLE + } + }; + let base_info = &self.types[base.index()]; + + if base_info.flags.contains(TypeFlags::DATA) { + // Currently Naga only supports binding arrays of structs for non-handle types. + match gctx.types[base].inner { + crate::TypeInner::Struct { .. } => {} + _ => return Err(TypeError::BindingArrayBaseTypeNotStruct(base)), + }; + } + + TypeInfo::new(base_info.flags & type_info_mask, Alignment::ONE) + } }) } } diff --git a/third_party/rust/wgpu-core/.cargo-checksum.json b/third_party/rust/wgpu-core/.cargo-checksum.json index 8a179daeb969..74276609faf7 100644 --- a/third_party/rust/wgpu-core/.cargo-checksum.json +++ b/third_party/rust/wgpu-core/.cargo-checksum.json @@ -1 +1 @@ -{"files":{"Cargo.toml":"75edd4854ae0d6fabafa12ef625163a4c49d7546556b0d38bb667fcccbe52087","LICENSE.APACHE":"a6cba85bc92e0cff7a450b1d873c0eaa2e9fc96bf472df0247a26bec77bf3ff9","LICENSE.MIT":"c7fea58d1cfe49634cd92e54fc10a9d871f4b275321a4cd8c09e449122caaeb4","src/binding_model.rs":"984e05f8527ae6d2c72d3f141a006f8365da7c5f95afd24601ed0fe6c98b62b2","src/command/bind.rs":"88ca2bedfcd8392efc960f39ca8506024af495dcf1852cf16ae28fbadbdf748d","src/command/bundle.rs":"74e50b87c003ca3647f65409be9a9d05f81f770fa72a9019898f8741b965a66e","src/command/clear.rs":"324ad776e89321d364e091cab915194fd2e9ffea75ef21bbf7eea637059bfcb7","src/command/compute.rs":"d9c754f7f2ef4cde2b202365337eed2feb995993603c3f0cf4acf32a0ed4f54b","src/command/draw.rs":"14a0319da47e4995c2ad97f1102998b0d4beb2f6d07df8a0cb6f08023185ce7a","src/command/memory_init.rs":"347117a84f35927de91e7ab4cc69d19b5b4507a942596c0c725f27b50c428b1e","src/command/mod.rs":"50d5e476fc00e157f6afc1b40d461165b0d1e2e895abd20624dadf7959c06009","src/command/query.rs":"59adc6efaf01c037b6ba5f2b29b7a0063ced1f0334349b812a9611da77cfd078","src/command/render.rs":"a4c8d79f8c26cbfb1907bcdc009543fb4c4a7a63a779423a54b9de2d61416cdf","src/command/transfer.rs":"9873536468a220ba9d4f20dc2e89782fa0715af711dbac6919e515b868e5cc0b","src/conv.rs":"9654736318f9efdf15695eacaf64218febf01cb945b764ce6a966cbd41345a79","src/device/life.rs":"00575c6c8c4d32304c76f1bdbf9b705603b275ffe9d7dad9838d4c47e118f5d6","src/device/mod.rs":"2799305eb6969bef45d7d757482674dfd106956fec933ce54766b9e52a8230a3","src/device/queue.rs":"cad4e1165a5b3366e0d939b0bd20cefc485332c2061bdcd1130c8416136ae0f4","src/device/trace.rs":"f69aa6af36a6defcf391ddb6cf12a56e37e00b4595e95c284cd7fd400394349c","src/error.rs":"48bc91197cfe254559b305c3954db6afa6441f120cb9fedeb7da96a140df6e2b","src/hub.rs":"1afc5d19f1afd132f43aa9b0d4f4f750f88ca18936974e2ae5fd50cb8763569a","src/id.rs":"10e101272faf04495b25e37284b0c7620392bb8ee7f2a82247d8e0a858087d04","src/init_tracker/buffer.rs":"a0ebf54a1e6d269c7b4aa0ac7bb8b04fd2cea3221a1d058ff33cb683b2aea3e9","src/init_tracker/mod.rs":"0867f79f83555390d0982d1dc6dcf0d4340e10cb89aa633d3c3ecc45deb3c78c","src/init_tracker/texture.rs":"37b6584aaca11c407d91f77002dcbb48d8a4876e27edd1b71b7929ef966f901d","src/instance.rs":"da4ba40a8e2db89bf31f3c8c0874c152ea1ed07169dfc12f8e6b15b5f044372f","src/lib.rs":"a40fc046ae2b3cca1f8f5e4e462c07bb51e56dbb17bf7a5736fef6f122c48e9c","src/pipeline.rs":"1d1a25da8e5ba585a79d38e5972413db83deada2f1704789144d28bca8cf1a1c","src/present.rs":"cf7a4a093087953fb16d1c6a45b85649a3971fa6dc56a9f3f424a478dcb84270","src/resource.rs":"1dedb1eaabad43d87d7c6ea5ecc522a0f64972b9348a699bbafc49238b0748e1","src/track/buffer.rs":"0b7a8c6acfe55e3f45dd1e7ef1b3ee8bd4844f61bbabf62aa4f0c391b822803c","src/track/metadata.rs":"9565f700661e81fd534a751a0e3d26022238ad8784cc868333da7030b3571473","src/track/mod.rs":"1ce1fe9a2d2cee35a2b8ec58c52b1ed633481fab4fef7308cfb980a73eaa5928","src/track/range.rs":"5bbfed6e103b3234d9de8e42057022da6d628c2cc1db6bb51b88f87f2d8adf8b","src/track/stateless.rs":"34942ecdf81b05b75892c891c87e065329870459ff8edfca1f99ec9533c334aa","src/track/texture.rs":"7d38b2f4e0cdb3e56ce1e777d8bca4d73ef40e21c56e11fedd69dc27e789256d","src/validation.rs":"dd761dfc717ab62d36311c277d26f4741b297f5dbb1bd1f1e6b6eff9c6cbc57c"},"package":null} \ No newline at end of file +{"files":{"Cargo.toml":"2d07828ae38b3e0d0d1aa6770a9e0985035ffe31152d49673f8385824f13e0bc","LICENSE.APACHE":"a6cba85bc92e0cff7a450b1d873c0eaa2e9fc96bf472df0247a26bec77bf3ff9","LICENSE.MIT":"c7fea58d1cfe49634cd92e54fc10a9d871f4b275321a4cd8c09e449122caaeb4","src/binding_model.rs":"d92a2c21642d0bece4ce1a9877d08567a86af4991cfe0bf6ecaaaf8f8b9c8d74","src/command/bind.rs":"aa778a7a125496f31220e8aa06a7eee5c5bc524a29e77cc5a314a178a0813a80","src/command/bundle.rs":"7cc0da91c5c2ea0432b2f3e8be9ca4dc1034cdbfa45ba97b285b29c146df9b6f","src/command/clear.rs":"b61144473752e363dfe9c15951702865921b568c8ee5136af7aa4237f015c383","src/command/compute.rs":"96ca2d55d9ba5f1067c701df25eb5e655557b17a45f306e3d8d31bd196839868","src/command/draw.rs":"14a0319da47e4995c2ad97f1102998b0d4beb2f6d07df8a0cb6f08023185ce7a","src/command/memory_init.rs":"b50d3d20dbf659052f19da2e79469ba6435e06370f19d6ef45e1b1128d9900b7","src/command/mod.rs":"c7b7a4dd50636694a835e48f6a65dba8cf873168a02758fae73d6c04d48dfc45","src/command/query.rs":"e12108706de23a2925d180f96dcb870d167c1d4033903d306435395284b7a0d5","src/command/render.rs":"2df5d412607b022ef4f91c50387f6f7363e8d5026c2d35efd9d3445cb6d8035a","src/command/transfer.rs":"c777c6e51afb459d2b5416e31071f24e4215c66f456fee3bd8f7395f9d1c5db1","src/conv.rs":"9654736318f9efdf15695eacaf64218febf01cb945b764ce6a966cbd41345a79","src/device/global.rs":"9031ea7a15c89d4842042c449127545e1a898d980932501695e059b8c0572843","src/device/life.rs":"c935c15c4c7f929e378a5ea930d0d36b47616a49991c236aaa10d25ce5852d15","src/device/mod.rs":"f0f8da084e0959ea1ec19438eda1dcf80325ef117ab1dec777a776e5b45fcef2","src/device/queue.rs":"5466199b4444203c6e57266308e5c9a36e6e9cf166bcbadfd6941bde339601ae","src/device/resource.rs":"a42802528ae679c8d92e4ed18a1a2b7aab20ce389640e7d367db7c81ef7bcb90","src/device/trace.rs":"f69aa6af36a6defcf391ddb6cf12a56e37e00b4595e95c284cd7fd400394349c","src/error.rs":"a4e3e8f2986732dcb159fd2d9664c283a1455bad19ca01e69221c44f6915ff23","src/global.rs":"9f100e8bd1d6e93a32d6edd883bec5e1c356ed2a06b77943c8a6da319f4bf828","src/hal_api.rs":"92a2f0cb80f192693530ed61048919bbad446742c2370bf0944c44b1c5df8362","src/hub.rs":"49f479c3ebed842a4bc8ab2fee00bc02dceb57790fbac8ba33e1bfed795fa675","src/id.rs":"f6245d024586c7fe63ded13b3cb926b940c191bbee56aedc655e8cef74bdd66b","src/identity.rs":"c2e008e652723f7896465bfdafd5a10141cf5866e8c481a8efcf0bdaa9619a6a","src/init_tracker/buffer.rs":"a0ebf54a1e6d269c7b4aa0ac7bb8b04fd2cea3221a1d058ff33cb683b2aea3e9","src/init_tracker/mod.rs":"0867f79f83555390d0982d1dc6dcf0d4340e10cb89aa633d3c3ecc45deb3c78c","src/init_tracker/texture.rs":"37b6584aaca11c407d91f77002dcbb48d8a4876e27edd1b71b7929ef966f901d","src/instance.rs":"915b8942542a5032a3036440824bf361cabd1792cbf2ac9a1f3c6a8534412906","src/lib.rs":"27ff8dd787d41cf412e90d0c4674aa70db59e608f9eb3be485c0bd18e9f13369","src/pipeline.rs":"669219add15448fdf5fe8bc5e03fd6fd1ada2b45b07047fd8c0a9bbbcdecad8b","src/present.rs":"ebcbf2e3b35d13a8dad191126de5f5006233ba8a0fb186303eafe2aeb412dce3","src/registry.rs":"4098413de7f48e9ff15d0246793be47a0d54c95b4c8594baf9fafd222a90ba84","src/resource.rs":"d6d693a1885656449444d842e88f7e56384324e92200f7c04aaa5aa835143f91","src/storage.rs":"bc70689ba299e9b4d9f4992c4d3f4dd36b1d8e71327595094981fdfd624f811a","src/track/buffer.rs":"dd6f632c6f31b15807148d705c516a8a1a8d72d02b137dd3b9d7c939447917cb","src/track/metadata.rs":"a80bd086ce825f7484ce6318a586c482d06fea0efc9c76bfa0124e480cc8b75e","src/track/mod.rs":"04cd09cf5f26262175e48cc3855b79fbd8988916c4367a55d39a4c95784d249b","src/track/range.rs":"5bbfed6e103b3234d9de8e42057022da6d628c2cc1db6bb51b88f87f2d8adf8b","src/track/stateless.rs":"1d786b5e9558672243ba7d913736561065ef2bd5c6105c935e982486d10841f0","src/track/texture.rs":"7d60dc81ba7f7e2c2819525b90e6e6c7760cb0920e36aeefe98e76cedd49d26e","src/validation.rs":"dfcc9830398a2f6153b723f17410f7036313b67eb16b1333d13f72bbaaddf6c9"},"package":null} \ No newline at end of file diff --git a/third_party/rust/wgpu-core/Cargo.toml b/third_party/rust/wgpu-core/Cargo.toml index 7f29a9c0bbee..9d66c9e21c15 100644 --- a/third_party/rust/wgpu-core/Cargo.toml +++ b/third_party/rust/wgpu-core/Cargo.toml @@ -54,7 +54,7 @@ package = "wgpu-hal" [dependencies.naga] version = "0.12.0" git = "https://github.com/gfx-rs/naga" -rev = "b99d58ea435090e561377949f428bce2c18451bb" +rev = "76003dc0035d53a474d366dcdf49d2e4d12e921f" features = [ "clone", "span", diff --git a/third_party/rust/wgpu-core/src/binding_model.rs b/third_party/rust/wgpu-core/src/binding_model.rs index 97f56095f40c..a06476a21216 100644 --- a/third_party/rust/wgpu-core/src/binding_model.rs +++ b/third_party/rust/wgpu-core/src/binding_model.rs @@ -1,9 +1,10 @@ use crate::{ device::{DeviceError, MissingDownlevelFlags, MissingFeatures, SHADER_STAGE_COUNT}, error::{ErrorFormatter, PrettyError}, - hub::{HalApi, Resource}, + hal_api::HalApi, id::{BindGroupLayoutId, BufferId, DeviceId, SamplerId, TextureId, TextureViewId, Valid}, init_tracker::{BufferInitTrackerAction, TextureInitTrackerAction}, + resource::Resource, track::{BindGroupStates, UsageConflict}, validation::{MissingBufferUsageError, MissingTextureUsageError}, FastHashMap, Label, LifeGuard, MultiRefCount, Stored, diff --git a/third_party/rust/wgpu-core/src/command/bind.rs b/third_party/rust/wgpu-core/src/command/bind.rs index fdcb60c52dba..9b87028e381e 100644 --- a/third_party/rust/wgpu-core/src/command/bind.rs +++ b/third_party/rust/wgpu-core/src/command/bind.rs @@ -1,9 +1,10 @@ use crate::{ binding_model::{BindGroup, LateMinBufferBindingSizeMismatch, PipelineLayout}, device::SHADER_STAGE_COUNT, - hub::{HalApi, Storage}, + hal_api::HalApi, id::{BindGroupId, BindGroupLayoutId, PipelineLayoutId, Valid}, pipeline::LateSizedBufferGroup, + storage::Storage, Stored, }; diff --git a/third_party/rust/wgpu-core/src/command/bundle.rs b/third_party/rust/wgpu-core/src/command/bundle.rs index 34d7e84cf493..7a99c90bd640 100644 --- a/third_party/rust/wgpu-core/src/command/bundle.rs +++ b/third_party/rust/wgpu-core/src/command/bundle.rs @@ -71,8 +71,8 @@ called. It goes through the commands and issues them into the native command buffer. Thanks to isolation, it doesn't track any bind group invalidations or index format changes. -[Gdcrbe]: crate::hub::Global::device_create_render_bundle_encoder -[Grbef]: crate::hub::Global::render_bundle_encoder_finish +[Gdcrbe]: crate::global::Global::device_create_render_bundle_encoder +[Grbef]: crate::global::Global::render_bundle_encoder_finish [wrpeb]: crate::command::render_ffi::wgpu_render_pass_execute_bundles !*/ @@ -90,11 +90,14 @@ use crate::{ RenderPassCompatibilityCheckType, RenderPassContext, SHADER_STAGE_COUNT, }, error::{ErrorFormatter, PrettyError}, - hub::{GlobalIdentityHandlerFactory, HalApi, Hub, Resource, Storage, Token}, + hal_api::HalApi, + hub::{Hub, Token}, id, + identity::GlobalIdentityHandlerFactory, init_tracker::{BufferInitTrackerAction, MemoryInitKind, TextureInitTrackerAction}, pipeline::{self, PipelineFlags}, - resource, + resource::{self, Resource}, + storage::Storage, track::RenderBundleScope, validation::check_buffer_usage, Label, LabelHelpers, LifeGuard, Stored, diff --git a/third_party/rust/wgpu-core/src/command/clear.rs b/third_party/rust/wgpu-core/src/command/clear.rs index de11258c4243..9f677298b926 100644 --- a/third_party/rust/wgpu-core/src/command/clear.rs +++ b/third_party/rust/wgpu-core/src/command/clear.rs @@ -5,10 +5,14 @@ use crate::device::trace::Command as TraceCommand; use crate::{ command::CommandBuffer, get_lowest_common_denom, - hub::{self, Global, GlobalIdentityHandlerFactory, HalApi, Token}, + global::Global, + hal_api::HalApi, + hub::Token, id::{BufferId, CommandEncoderId, DeviceId, TextureId, Valid}, + identity::GlobalIdentityHandlerFactory, init_tracker::{MemoryInitKind, TextureInitRange}, resource::{Texture, TextureClearMode}, + storage, track::{TextureSelector, TextureTracker}, }; @@ -232,7 +236,7 @@ impl Global { } pub(crate) fn clear_texture( - storage: &hub::Storage, TextureId>, + storage: &storage::Storage, TextureId>, dst_texture_id: Valid, range: TextureInitRange, encoder: &mut A::CommandEncoder, diff --git a/third_party/rust/wgpu-core/src/command/compute.rs b/third_party/rust/wgpu-core/src/command/compute.rs index d5b514f19485..0a0b4e85e6b0 100644 --- a/third_party/rust/wgpu-core/src/command/compute.rs +++ b/third_party/rust/wgpu-core/src/command/compute.rs @@ -11,11 +11,15 @@ use crate::{ }, device::{MissingDownlevelFlags, MissingFeatures}, error::{ErrorFormatter, PrettyError}, - hub::{Global, GlobalIdentityHandlerFactory, HalApi, Storage, Token}, + global::Global, + hal_api::HalApi, + hub::Token, id, + identity::GlobalIdentityHandlerFactory, init_tracker::MemoryInitKind, pipeline, resource::{self, Buffer, Texture}, + storage::Storage, track::{Tracker, UsageConflict, UsageScope}, validation::{check_buffer_usage, MissingBufferUsageError}, Label, @@ -344,7 +348,12 @@ impl Global { let cmd_buf: &mut CommandBuffer = CommandBuffer::get_encoder_mut(&mut *cmd_buf_guard, encoder_id) .map_pass_err(init_scope)?; - // will be reset to true if recording is done without errors + + // We automatically keep extending command buffers over time, and because + // we want to insert a command buffer _before_ what we're about to record, + // we need to make sure to close the previous one. + cmd_buf.encoder.close(); + // We will reset this to `Recording` if we succeed, acts as a fail-safe. cmd_buf.status = CommandEncoderStatus::Error; let raw = cmd_buf.encoder.open(); @@ -393,6 +402,8 @@ impl Global { raw.begin_compute_pass(&hal_desc); } + let mut intermediate_trackers = Tracker::::new(); + // Immediate texture inits required because of prior discards. Need to // be inserted before texture reads. let mut pending_discard_init_fixups = SurfacesInDiscardState::new(); @@ -580,19 +591,11 @@ impl Global { pipeline: state.pipeline, }; - fixup_discarded_surfaces( - pending_discard_init_fixups.drain(..), - raw, - &texture_guard, - &mut cmd_buf.trackers.textures, - device, - ); - state.is_ready().map_pass_err(scope)?; state .flush_states( raw, - &mut cmd_buf.trackers, + &mut intermediate_trackers, &*bind_group_guard, &*buffer_guard, &*texture_guard, @@ -668,7 +671,7 @@ impl Global { state .flush_states( raw, - &mut cmd_buf.trackers, + &mut intermediate_trackers, &*bind_group_guard, &*buffer_guard, &*texture_guard, @@ -764,20 +767,33 @@ impl Global { unsafe { raw.end_compute_pass(); } + // We've successfully recorded the compute pass, bring the + // command buffer out of the error state. cmd_buf.status = CommandEncoderStatus::Recording; - // There can be entries left in pending_discard_init_fixups if a bind - // group was set, but not used (i.e. no Dispatch occurred) + // Stop the current command buffer. + cmd_buf.encoder.close(); + + // Create a new command buffer, which we will insert _before_ the body of the compute pass. // - // However, we already altered the discard/init_action state on this - // cmd_buf, so we need to apply the promised changes. + // Use that buffer to insert barriers and clear discarded images. + let transit = cmd_buf.encoder.open(); fixup_discarded_surfaces( pending_discard_init_fixups.into_iter(), - raw, + transit, &texture_guard, &mut cmd_buf.trackers.textures, device, ); + CommandBuffer::insert_barriers_from_tracker( + transit, + &mut cmd_buf.trackers, + &intermediate_trackers, + &*buffer_guard, + &*texture_guard, + ); + // Close the command buffer, and swap it with the previous. + cmd_buf.encoder.close_and_swap(); Ok(()) } diff --git a/third_party/rust/wgpu-core/src/command/memory_init.rs b/third_party/rust/wgpu-core/src/command/memory_init.rs index 52735fec5188..e8d5f73139c9 100644 --- a/third_party/rust/wgpu-core/src/command/memory_init.rs +++ b/third_party/rust/wgpu-core/src/command/memory_init.rs @@ -4,10 +4,11 @@ use hal::CommandEncoder; use crate::{ device::Device, - hub::{HalApi, Storage}, + hal_api::HalApi, id::{self, TextureId}, init_tracker::*, resource::{Buffer, Texture}, + storage::Storage, track::{TextureTracker, Tracker}, FastHashMap, }; diff --git a/third_party/rust/wgpu-core/src/command/mod.rs b/third_party/rust/wgpu-core/src/command/mod.rs index 8c5e5f2d409e..311ade7f4326 100644 --- a/third_party/rust/wgpu-core/src/command/mod.rs +++ b/third_party/rust/wgpu-core/src/command/mod.rs @@ -21,9 +21,13 @@ use crate::error::{ErrorFormatter, PrettyError}; use crate::init_tracker::BufferInitTrackerAction; use crate::track::{Tracker, UsageScope}; use crate::{ - hub::{Global, GlobalIdentityHandlerFactory, HalApi, Storage, Token}, + global::Global, + hal_api::HalApi, + hub::Token, id, + identity::GlobalIdentityHandlerFactory, resource::{Buffer, Texture}, + storage::Storage, Label, Stored, }; @@ -51,6 +55,15 @@ struct CommandEncoder { //TODO: handle errors better impl CommandEncoder { + /// Closes the live encoder + fn close_and_swap(&mut self) { + if self.is_open { + self.is_open = false; + let new = unsafe { self.raw.end_encoding().unwrap() }; + self.list.insert(self.list.len() - 1, new); + } + } + fn close(&mut self) { if self.is_open { self.is_open = false; @@ -227,7 +240,7 @@ impl CommandBuffer { } } -impl crate::hub::Resource for CommandBuffer { +impl crate::resource::Resource for CommandBuffer { const TYPE: &'static str = "CommandBuffer"; fn life_guard(&self) -> &crate::LifeGuard { diff --git a/third_party/rust/wgpu-core/src/command/query.rs b/third_party/rust/wgpu-core/src/command/query.rs index 6181f83fa8e2..c34aa48c9c42 100644 --- a/third_party/rust/wgpu-core/src/command/query.rs +++ b/third_party/rust/wgpu-core/src/command/query.rs @@ -4,10 +4,14 @@ use hal::CommandEncoder as _; use crate::device::trace::Command as TraceCommand; use crate::{ command::{CommandBuffer, CommandEncoderError}, - hub::{Global, GlobalIdentityHandlerFactory, HalApi, Storage, Token}, + global::Global, + hal_api::HalApi, + hub::Token, id::{self, Id, TypedId}, + identity::GlobalIdentityHandlerFactory, init_tracker::MemoryInitKind, resource::QuerySet, + storage::Storage, Epoch, FastHashMap, Index, }; use std::{iter, marker::PhantomData}; diff --git a/third_party/rust/wgpu-core/src/command/render.rs b/third_party/rust/wgpu-core/src/command/render.rs index b38e79c4b47a..ea4bcc3e05ce 100644 --- a/third_party/rust/wgpu-core/src/command/render.rs +++ b/third_party/rust/wgpu-core/src/command/render.rs @@ -14,11 +14,15 @@ use crate::{ RenderPassCompatibilityCheckType, RenderPassCompatibilityError, RenderPassContext, }, error::{ErrorFormatter, PrettyError}, - hub::{Global, GlobalIdentityHandlerFactory, HalApi, Storage, Token}, + global::Global, + hal_api::HalApi, + hub::Token, id, + identity::GlobalIdentityHandlerFactory, init_tracker::{MemoryInitKind, TextureInitRange, TextureInitTrackerAction}, pipeline::{self, PipelineFlags}, resource::{self, Buffer, Texture, TextureView, TextureViewNotRenderableReason}, + storage::Storage, track::{TextureSelector, UsageConflict, UsageScope}, validation::{ check_buffer_usage, check_texture_usage, MissingBufferUsageError, MissingTextureUsageError, @@ -1223,9 +1227,12 @@ impl Global { let cmd_buf: &mut CommandBuffer = CommandBuffer::get_encoder_mut(&mut *cmb_guard, encoder_id) .map_pass_err(init_scope)?; - // close everything while the new command encoder is filled + + // We automatically keep extending command buffers over time, and because + // we want to insert a command buffer _before_ what we're about to record, + // we need to make sure to close the previous one. cmd_buf.encoder.close(); - // will be reset to true if recording is done without errors + // We will reset this to `Recording` if we succeed, acts as a fail-safe. cmd_buf.status = CommandEncoderStatus::Error; #[cfg(feature = "trace")] @@ -2175,15 +2182,8 @@ impl Global { ); } - // Before we finish the auxiliary encoder, let's - // get our pass back and place it after. - //Note: we could just hold onto this raw pass while recording the - // auxiliary encoder, but then handling errors and cleaning up - // would be more complicated, so we re-use `open()`/`close()`. - let pass_raw = cmd_buf.encoder.list.pop().unwrap(); - cmd_buf.encoder.close(); - cmd_buf.encoder.list.push(pass_raw); cmd_buf.status = CommandEncoderStatus::Recording; + cmd_buf.encoder.close_and_swap(); Ok(()) } diff --git a/third_party/rust/wgpu-core/src/command/transfer.rs b/third_party/rust/wgpu-core/src/command/transfer.rs index 45b26841082a..58f88a70e17d 100644 --- a/third_party/rust/wgpu-core/src/command/transfer.rs +++ b/third_party/rust/wgpu-core/src/command/transfer.rs @@ -5,13 +5,17 @@ use crate::{ conv, device::{Device, MissingDownlevelFlags}, error::{ErrorFormatter, PrettyError}, - hub::{Global, GlobalIdentityHandlerFactory, HalApi, Storage, Token}, + global::Global, + hal_api::HalApi, + hub::Token, id::{BufferId, CommandEncoderId, TextureId, Valid}, + identity::GlobalIdentityHandlerFactory, init_tracker::{ has_copy_partial_init_tracker_coverage, MemoryInitKind, TextureInitRange, TextureInitTrackerAction, }, resource::{Texture, TextureErrorDimension}, + storage::Storage, track::TextureSelector, }; diff --git a/third_party/rust/wgpu-core/src/device/global.rs b/third_party/rust/wgpu-core/src/device/global.rs new file mode 100644 index 000000000000..0d7879983b67 --- /dev/null +++ b/third_party/rust/wgpu-core/src/device/global.rs @@ -0,0 +1,2672 @@ +#[cfg(feature = "trace")] +use crate::device::trace; +use crate::{ + binding_model, command, conv, + device::{life::WaitIdleError, map_buffer, queue, Device, DeviceError, HostMap}, + global::Global, + hal_api::HalApi, + hub::Token, + id::{self, AdapterId, DeviceId, SurfaceId}, + identity::{GlobalIdentityHandlerFactory, Input}, + init_tracker::TextureInitTracker, + instance::{self, Adapter, Surface}, + pipeline, present, + resource::{self, Buffer, BufferAccessResult, BufferMapState}, + resource::{BufferAccessError, BufferMapOperation, TextureClearMode}, + storage::InvalidId, + validation::check_buffer_usage, + FastHashMap, Label, LabelHelpers as _, Stored, +}; + +use hal::{CommandEncoder as _, Device as _}; +use smallvec::SmallVec; + +use wgt::{BufferAddress, TextureFormat}; + +use std::{borrow::Cow, iter, mem, ops::Range, ptr}; + +use super::{BufferMapPendingClosure, ImplicitPipelineIds, InvalidDevice, UserClosures}; + +impl Global { + pub fn adapter_is_surface_supported( + &self, + adapter_id: AdapterId, + surface_id: SurfaceId, + ) -> Result { + let hub = A::hub(self); + let mut token = Token::root(); + + let (surface_guard, mut token) = self.surfaces.read(&mut token); + let (adapter_guard, mut _token) = hub.adapters.read(&mut token); + let adapter = adapter_guard + .get(adapter_id) + .map_err(|_| instance::IsSurfaceSupportedError::InvalidAdapter)?; + let surface = surface_guard + .get(surface_id) + .map_err(|_| instance::IsSurfaceSupportedError::InvalidSurface)?; + Ok(adapter.is_surface_supported(surface)) + } + + pub fn surface_get_capabilities( + &self, + surface_id: SurfaceId, + adapter_id: AdapterId, + ) -> Result { + profiling::scope!("Surface::get_capabilities"); + self.fetch_adapter_and_surface::(surface_id, adapter_id, |adapter, surface| { + let mut hal_caps = surface.get_capabilities(adapter)?; + + hal_caps.formats.sort_by_key(|f| !f.is_srgb()); + + Ok(wgt::SurfaceCapabilities { + formats: hal_caps.formats, + present_modes: hal_caps.present_modes, + alpha_modes: hal_caps.composite_alpha_modes, + }) + }) + } + + fn fetch_adapter_and_surface< + A: HalApi, + F: FnOnce(&Adapter, &Surface) -> Result, + B, + >( + &self, + surface_id: SurfaceId, + adapter_id: AdapterId, + get_supported_callback: F, + ) -> Result { + let hub = A::hub(self); + let mut token = Token::root(); + + let (surface_guard, mut token) = self.surfaces.read(&mut token); + let (adapter_guard, mut _token) = hub.adapters.read(&mut token); + let adapter = adapter_guard + .get(adapter_id) + .map_err(|_| instance::GetSurfaceSupportError::InvalidAdapter)?; + let surface = surface_guard + .get(surface_id) + .map_err(|_| instance::GetSurfaceSupportError::InvalidSurface)?; + + get_supported_callback(adapter, surface) + } + + pub fn device_features( + &self, + device_id: DeviceId, + ) -> Result { + let hub = A::hub(self); + let mut token = Token::root(); + let (device_guard, _) = hub.devices.read(&mut token); + let device = device_guard.get(device_id).map_err(|_| InvalidDevice)?; + + Ok(device.features) + } + + pub fn device_limits( + &self, + device_id: DeviceId, + ) -> Result { + let hub = A::hub(self); + let mut token = Token::root(); + let (device_guard, _) = hub.devices.read(&mut token); + let device = device_guard.get(device_id).map_err(|_| InvalidDevice)?; + + Ok(device.limits.clone()) + } + + pub fn device_downlevel_properties( + &self, + device_id: DeviceId, + ) -> Result { + let hub = A::hub(self); + let mut token = Token::root(); + let (device_guard, _) = hub.devices.read(&mut token); + let device = device_guard.get(device_id).map_err(|_| InvalidDevice)?; + + Ok(device.downlevel.clone()) + } + + pub fn device_create_buffer( + &self, + device_id: DeviceId, + desc: &resource::BufferDescriptor, + id_in: Input, + ) -> (id::BufferId, Option) { + profiling::scope!("Device::create_buffer"); + + let hub = A::hub(self); + let mut token = Token::root(); + let fid = hub.buffers.prepare(id_in); + + let (device_guard, mut token) = hub.devices.read(&mut token); + let error = loop { + let device = match device_guard.get(device_id) { + Ok(device) => device, + Err(_) => break DeviceError::Invalid.into(), + }; + #[cfg(feature = "trace")] + if let Some(ref trace) = device.trace { + let mut desc = desc.clone(); + let mapped_at_creation = mem::replace(&mut desc.mapped_at_creation, false); + if mapped_at_creation && !desc.usage.contains(wgt::BufferUsages::MAP_WRITE) { + desc.usage |= wgt::BufferUsages::COPY_DST; + } + trace + .lock() + .add(trace::Action::CreateBuffer(fid.id(), desc)); + } + + let mut buffer = match device.create_buffer(device_id, desc, false) { + Ok(buffer) => buffer, + Err(e) => break e, + }; + let ref_count = buffer.life_guard.add_ref(); + + let buffer_use = if !desc.mapped_at_creation { + hal::BufferUses::empty() + } else if desc.usage.contains(wgt::BufferUsages::MAP_WRITE) { + // buffer is mappable, so we are just doing that at start + let map_size = buffer.size; + let ptr = if map_size == 0 { + std::ptr::NonNull::dangling() + } else { + match map_buffer(&device.raw, &mut buffer, 0, map_size, HostMap::Write) { + Ok(ptr) => ptr, + Err(e) => { + let raw = buffer.raw.unwrap(); + device.lock_life(&mut token).schedule_resource_destruction( + queue::TempResource::Buffer(raw), + !0, + ); + break e.into(); + } + } + }; + buffer.map_state = resource::BufferMapState::Active { + ptr, + range: 0..map_size, + host: HostMap::Write, + }; + hal::BufferUses::MAP_WRITE + } else { + // buffer needs staging area for initialization only + let stage_desc = wgt::BufferDescriptor { + label: Some(Cow::Borrowed( + "(wgpu internal) initializing unmappable buffer", + )), + size: desc.size, + usage: wgt::BufferUsages::MAP_WRITE | wgt::BufferUsages::COPY_SRC, + mapped_at_creation: false, + }; + let mut stage = match device.create_buffer(device_id, &stage_desc, true) { + Ok(stage) => stage, + Err(e) => { + let raw = buffer.raw.unwrap(); + device + .lock_life(&mut token) + .schedule_resource_destruction(queue::TempResource::Buffer(raw), !0); + break e; + } + }; + let stage_buffer = stage.raw.unwrap(); + let mapping = match unsafe { device.raw.map_buffer(&stage_buffer, 0..stage.size) } { + Ok(mapping) => mapping, + Err(e) => { + let raw = buffer.raw.unwrap(); + let mut life_lock = device.lock_life(&mut token); + life_lock + .schedule_resource_destruction(queue::TempResource::Buffer(raw), !0); + life_lock.schedule_resource_destruction( + queue::TempResource::Buffer(stage_buffer), + !0, + ); + break DeviceError::from(e).into(); + } + }; + + assert_eq!(buffer.size % wgt::COPY_BUFFER_ALIGNMENT, 0); + // Zero initialize memory and then mark both staging and buffer as initialized + // (it's guaranteed that this is the case by the time the buffer is usable) + unsafe { ptr::write_bytes(mapping.ptr.as_ptr(), 0, buffer.size as usize) }; + buffer.initialization_status.drain(0..buffer.size); + stage.initialization_status.drain(0..buffer.size); + + buffer.map_state = resource::BufferMapState::Init { + ptr: mapping.ptr, + needs_flush: !mapping.is_coherent, + stage_buffer, + }; + hal::BufferUses::COPY_DST + }; + + let id = fid.assign(buffer, &mut token); + log::info!("Created buffer {:?} with {:?}", id, desc); + + device + .trackers + .lock() + .buffers + .insert_single(id, ref_count, buffer_use); + + return (id.0, None); + }; + + let id = fid.assign_error(desc.label.borrow_or_default(), &mut token); + (id, Some(error)) + } + + /// Assign `id_in` an error with the given `label`. + /// + /// Ensure that future attempts to use `id_in` as a buffer ID will propagate + /// the error, following the WebGPU ["contagious invalidity"] style. + /// + /// Firefox uses this function to comply strictly with the WebGPU spec, + /// which requires [`GPUBufferDescriptor`] validation to be generated on the + /// Device timeline and leave the newly created [`GPUBuffer`] invalid. + /// + /// Ideally, we would simply let [`device_create_buffer`] take care of all + /// of this, but some errors must be detected before we can even construct a + /// [`wgpu_types::BufferDescriptor`] to give it. For example, the WebGPU API + /// allows a `GPUBufferDescriptor`'s [`usage`] property to be any WebIDL + /// `unsigned long` value, but we can't construct a + /// [`wgpu_types::BufferUsages`] value from values with unassigned bits + /// set. This means we must validate `usage` before we can call + /// `device_create_buffer`. + /// + /// When that validation fails, we must arrange for the buffer id to be + /// considered invalid. This method provides the means to do so. + /// + /// ["contagious invalidity"]: https://www.w3.org/TR/webgpu/#invalidity + /// [`GPUBufferDescriptor`]: https://www.w3.org/TR/webgpu/#dictdef-gpubufferdescriptor + /// [`GPUBuffer`]: https://www.w3.org/TR/webgpu/#gpubuffer + /// [`wgpu_types::BufferDescriptor`]: wgt::BufferDescriptor + /// [`device_create_buffer`]: Global::device_create_buffer + /// [`usage`]: https://www.w3.org/TR/webgpu/#dom-gputexturedescriptor-usage + /// [`wgpu_types::BufferUsages`]: wgt::BufferUsages + pub fn create_buffer_error(&self, id_in: Input, label: Label) { + let hub = A::hub(self); + let mut token = Token::root(); + let fid = hub.buffers.prepare(id_in); + + fid.assign_error(label.borrow_or_default(), &mut token); + } + + pub fn create_render_bundle_error( + &self, + id_in: Input, + label: Label, + ) { + let hub = A::hub(self); + let mut token = Token::root(); + let fid = hub.render_bundles.prepare(id_in); + + let (_, mut token) = hub.devices.read(&mut token); + fid.assign_error(label.borrow_or_default(), &mut token); + } + + /// Assign `id_in` an error with the given `label`. + /// + /// See `create_buffer_error` for more context and explaination. + pub fn create_texture_error(&self, id_in: Input, label: Label) { + let hub = A::hub(self); + let mut token = Token::root(); + let fid = hub.textures.prepare(id_in); + + fid.assign_error(label.borrow_or_default(), &mut token); + } + + #[cfg(feature = "replay")] + pub fn device_wait_for_buffer( + &self, + device_id: DeviceId, + buffer_id: id::BufferId, + ) -> Result<(), WaitIdleError> { + let hub = A::hub(self); + let mut token = Token::root(); + let (device_guard, mut token) = hub.devices.read(&mut token); + let last_submission = { + let (buffer_guard, _) = hub.buffers.write(&mut token); + match buffer_guard.get(buffer_id) { + Ok(buffer) => buffer.life_guard.life_count(), + Err(_) => return Ok(()), + } + }; + + device_guard + .get(device_id) + .map_err(|_| DeviceError::Invalid)? + .wait_for_submit(last_submission, &mut token) + } + + #[doc(hidden)] + pub fn device_set_buffer_sub_data( + &self, + device_id: DeviceId, + buffer_id: id::BufferId, + offset: BufferAddress, + data: &[u8], + ) -> BufferAccessResult { + profiling::scope!("Device::set_buffer_sub_data"); + + let hub = A::hub(self); + let mut token = Token::root(); + + let (device_guard, mut token) = hub.devices.read(&mut token); + let (mut buffer_guard, _) = hub.buffers.write(&mut token); + let device = device_guard + .get(device_id) + .map_err(|_| DeviceError::Invalid)?; + let buffer = buffer_guard + .get_mut(buffer_id) + .map_err(|_| BufferAccessError::Invalid)?; + check_buffer_usage(buffer.usage, wgt::BufferUsages::MAP_WRITE)?; + //assert!(buffer isn't used by the GPU); + + #[cfg(feature = "trace")] + if let Some(ref trace) = device.trace { + let mut trace = trace.lock(); + let data_path = trace.make_binary("bin", data); + trace.add(trace::Action::WriteBuffer { + id: buffer_id, + data: data_path, + range: offset..offset + data.len() as BufferAddress, + queued: false, + }); + } + + let raw_buf = buffer.raw.as_ref().unwrap(); + unsafe { + let mapping = device + .raw + .map_buffer(raw_buf, offset..offset + data.len() as u64) + .map_err(DeviceError::from)?; + ptr::copy_nonoverlapping(data.as_ptr(), mapping.ptr.as_ptr(), data.len()); + if !mapping.is_coherent { + device + .raw + .flush_mapped_ranges(raw_buf, iter::once(offset..offset + data.len() as u64)); + } + device + .raw + .unmap_buffer(raw_buf) + .map_err(DeviceError::from)?; + } + + Ok(()) + } + + #[doc(hidden)] + pub fn device_get_buffer_sub_data( + &self, + device_id: DeviceId, + buffer_id: id::BufferId, + offset: BufferAddress, + data: &mut [u8], + ) -> BufferAccessResult { + profiling::scope!("Device::get_buffer_sub_data"); + + let hub = A::hub(self); + let mut token = Token::root(); + + let (device_guard, mut token) = hub.devices.read(&mut token); + let (mut buffer_guard, _) = hub.buffers.write(&mut token); + let device = device_guard + .get(device_id) + .map_err(|_| DeviceError::Invalid)?; + let buffer = buffer_guard + .get_mut(buffer_id) + .map_err(|_| BufferAccessError::Invalid)?; + check_buffer_usage(buffer.usage, wgt::BufferUsages::MAP_READ)?; + //assert!(buffer isn't used by the GPU); + + let raw_buf = buffer.raw.as_ref().unwrap(); + unsafe { + let mapping = device + .raw + .map_buffer(raw_buf, offset..offset + data.len() as u64) + .map_err(DeviceError::from)?; + if !mapping.is_coherent { + device.raw.invalidate_mapped_ranges( + raw_buf, + iter::once(offset..offset + data.len() as u64), + ); + } + ptr::copy_nonoverlapping(mapping.ptr.as_ptr(), data.as_mut_ptr(), data.len()); + device + .raw + .unmap_buffer(raw_buf) + .map_err(DeviceError::from)?; + } + + Ok(()) + } + + pub fn buffer_label(&self, id: id::BufferId) -> String { + A::hub(self).buffers.label_for_resource(id) + } + + pub fn buffer_destroy( + &self, + buffer_id: id::BufferId, + ) -> Result<(), resource::DestroyError> { + profiling::scope!("Buffer::destroy"); + + let map_closure; + // Restrict the locks to this scope. + { + let hub = A::hub(self); + let mut token = Token::root(); + + //TODO: lock pending writes separately, keep the device read-only + let (mut device_guard, mut token) = hub.devices.write(&mut token); + + log::info!("Buffer {:?} is destroyed", buffer_id); + let (mut buffer_guard, _) = hub.buffers.write(&mut token); + let buffer = buffer_guard + .get_mut(buffer_id) + .map_err(|_| resource::DestroyError::Invalid)?; + + let device = &mut device_guard[buffer.device_id.value]; + + map_closure = match &buffer.map_state { + &BufferMapState::Waiting(..) // To get the proper callback behavior. + | &BufferMapState::Init { .. } + | &BufferMapState::Active { .. } + => { + self.buffer_unmap_inner(buffer_id, buffer, device) + .unwrap_or(None) + } + _ => None, + }; + + #[cfg(feature = "trace")] + if let Some(ref trace) = device.trace { + trace.lock().add(trace::Action::FreeBuffer(buffer_id)); + } + + let raw = buffer + .raw + .take() + .ok_or(resource::DestroyError::AlreadyDestroyed)?; + let temp = queue::TempResource::Buffer(raw); + + if device.pending_writes.dst_buffers.contains(&buffer_id) { + device.pending_writes.temp_resources.push(temp); + } else { + let last_submit_index = buffer.life_guard.life_count(); + drop(buffer_guard); + device + .lock_life(&mut token) + .schedule_resource_destruction(temp, last_submit_index); + } + } + + // Note: outside the scope where locks are held when calling the callback + if let Some((operation, status)) = map_closure { + operation.callback.call(status); + } + + Ok(()) + } + + pub fn buffer_drop(&self, buffer_id: id::BufferId, wait: bool) { + profiling::scope!("Buffer::drop"); + log::debug!("buffer {:?} is dropped", buffer_id); + + let hub = A::hub(self); + let mut token = Token::root(); + + let (ref_count, last_submit_index, device_id) = { + let (mut buffer_guard, _) = hub.buffers.write(&mut token); + match buffer_guard.get_mut(buffer_id) { + Ok(buffer) => { + let ref_count = buffer.life_guard.ref_count.take().unwrap(); + let last_submit_index = buffer.life_guard.life_count(); + (ref_count, last_submit_index, buffer.device_id.value) + } + Err(InvalidId) => { + hub.buffers.unregister_locked(buffer_id, &mut *buffer_guard); + return; + } + } + }; + + let (device_guard, mut token) = hub.devices.read(&mut token); + let device = &device_guard[device_id]; + { + let mut life_lock = device.lock_life(&mut token); + if device.pending_writes.dst_buffers.contains(&buffer_id) { + life_lock.future_suspected_buffers.push(Stored { + value: id::Valid(buffer_id), + ref_count, + }); + } else { + drop(ref_count); + life_lock + .suspected_resources + .buffers + .push(id::Valid(buffer_id)); + } + } + + if wait { + match device.wait_for_submit(last_submit_index, &mut token) { + Ok(()) => (), + Err(e) => log::error!("Failed to wait for buffer {:?}: {:?}", buffer_id, e), + } + } + } + + pub fn device_create_texture( + &self, + device_id: DeviceId, + desc: &resource::TextureDescriptor, + id_in: Input, + ) -> (id::TextureId, Option) { + profiling::scope!("Device::create_texture"); + + let hub = A::hub(self); + let mut token = Token::root(); + let fid = hub.textures.prepare(id_in); + + let (adapter_guard, mut token) = hub.adapters.read(&mut token); + let (device_guard, mut token) = hub.devices.read(&mut token); + let error = loop { + let device = match device_guard.get(device_id) { + Ok(device) => device, + Err(_) => break DeviceError::Invalid.into(), + }; + #[cfg(feature = "trace")] + if let Some(ref trace) = device.trace { + trace + .lock() + .add(trace::Action::CreateTexture(fid.id(), desc.clone())); + } + + let adapter = &adapter_guard[device.adapter_id.value]; + let texture = match device.create_texture(device_id, adapter, desc) { + Ok(texture) => texture, + Err(error) => break error, + }; + let ref_count = texture.life_guard.add_ref(); + + let id = fid.assign(texture, &mut token); + log::info!("Created texture {:?} with {:?}", id, desc); + + device.trackers.lock().textures.insert_single( + id.0, + ref_count, + hal::TextureUses::UNINITIALIZED, + ); + + return (id.0, None); + }; + + let id = fid.assign_error(desc.label.borrow_or_default(), &mut token); + (id, Some(error)) + } + + /// # Safety + /// + /// - `hal_texture` must be created from `device_id` corresponding raw handle. + /// - `hal_texture` must be created respecting `desc` + /// - `hal_texture` must be initialized + pub unsafe fn create_texture_from_hal( + &self, + hal_texture: A::Texture, + device_id: DeviceId, + desc: &resource::TextureDescriptor, + id_in: Input, + ) -> (id::TextureId, Option) { + profiling::scope!("Device::create_texture"); + + let hub = A::hub(self); + let mut token = Token::root(); + let fid = hub.textures.prepare(id_in); + + let (adapter_guard, mut token) = hub.adapters.read(&mut token); + let (device_guard, mut token) = hub.devices.read(&mut token); + let error = loop { + let device = match device_guard.get(device_id) { + Ok(device) => device, + Err(_) => break DeviceError::Invalid.into(), + }; + + // NB: Any change done through the raw texture handle will not be + // recorded in the replay + #[cfg(feature = "trace")] + if let Some(ref trace) = device.trace { + trace + .lock() + .add(trace::Action::CreateTexture(fid.id(), desc.clone())); + } + + let adapter = &adapter_guard[device.adapter_id.value]; + + let format_features = match device + .describe_format_features(adapter, desc.format) + .map_err(|error| resource::CreateTextureError::MissingFeatures(desc.format, error)) + { + Ok(features) => features, + Err(error) => break error, + }; + + let mut texture = device.create_texture_from_hal( + hal_texture, + conv::map_texture_usage(desc.usage, desc.format.into()), + device_id, + desc, + format_features, + TextureClearMode::None, + ); + if desc.usage.contains(wgt::TextureUsages::COPY_DST) { + texture.hal_usage |= hal::TextureUses::COPY_DST; + } + + texture.initialization_status = TextureInitTracker::new(desc.mip_level_count, 0); + + let ref_count = texture.life_guard.add_ref(); + + let id = fid.assign(texture, &mut token); + log::info!("Created texture {:?} with {:?}", id, desc); + + device.trackers.lock().textures.insert_single( + id.0, + ref_count, + hal::TextureUses::UNINITIALIZED, + ); + + return (id.0, None); + }; + + let id = fid.assign_error(desc.label.borrow_or_default(), &mut token); + (id, Some(error)) + } + + pub fn texture_label(&self, id: id::TextureId) -> String { + A::hub(self).textures.label_for_resource(id) + } + + pub fn texture_destroy( + &self, + texture_id: id::TextureId, + ) -> Result<(), resource::DestroyError> { + profiling::scope!("Texture::destroy"); + + let hub = A::hub(self); + let mut token = Token::root(); + + //TODO: lock pending writes separately, keep the device read-only + let (mut device_guard, mut token) = hub.devices.write(&mut token); + + log::info!("Buffer {:?} is destroyed", texture_id); + let (mut texture_guard, _) = hub.textures.write(&mut token); + let texture = texture_guard + .get_mut(texture_id) + .map_err(|_| resource::DestroyError::Invalid)?; + + let device = &mut device_guard[texture.device_id.value]; + + #[cfg(feature = "trace")] + if let Some(ref trace) = device.trace { + trace.lock().add(trace::Action::FreeTexture(texture_id)); + } + + let last_submit_index = texture.life_guard.life_count(); + + let clear_views = match std::mem::replace(&mut texture.clear_mode, TextureClearMode::None) { + TextureClearMode::BufferCopy => SmallVec::new(), + TextureClearMode::RenderPass { clear_views, .. } => clear_views, + TextureClearMode::None => SmallVec::new(), + }; + + match texture.inner { + resource::TextureInner::Native { ref mut raw } => { + let raw = raw.take().ok_or(resource::DestroyError::AlreadyDestroyed)?; + let temp = queue::TempResource::Texture(raw, clear_views); + + if device.pending_writes.dst_textures.contains(&texture_id) { + device.pending_writes.temp_resources.push(temp); + } else { + drop(texture_guard); + device + .lock_life(&mut token) + .schedule_resource_destruction(temp, last_submit_index); + } + } + resource::TextureInner::Surface { .. } => { + for clear_view in clear_views { + unsafe { + device.raw.destroy_texture_view(clear_view); + } + } + // TODO? + } + } + + Ok(()) + } + + pub fn texture_drop(&self, texture_id: id::TextureId, wait: bool) { + profiling::scope!("Texture::drop"); + log::debug!("texture {:?} is dropped", texture_id); + + let hub = A::hub(self); + let mut token = Token::root(); + + let (ref_count, last_submit_index, device_id) = { + let (mut texture_guard, _) = hub.textures.write(&mut token); + match texture_guard.get_mut(texture_id) { + Ok(texture) => { + let ref_count = texture.life_guard.ref_count.take().unwrap(); + let last_submit_index = texture.life_guard.life_count(); + (ref_count, last_submit_index, texture.device_id.value) + } + Err(InvalidId) => { + hub.textures + .unregister_locked(texture_id, &mut *texture_guard); + return; + } + } + }; + + let (device_guard, mut token) = hub.devices.read(&mut token); + let device = &device_guard[device_id]; + { + let mut life_lock = device.lock_life(&mut token); + if device.pending_writes.dst_textures.contains(&texture_id) { + life_lock.future_suspected_textures.push(Stored { + value: id::Valid(texture_id), + ref_count, + }); + } else { + drop(ref_count); + life_lock + .suspected_resources + .textures + .push(id::Valid(texture_id)); + } + } + + if wait { + match device.wait_for_submit(last_submit_index, &mut token) { + Ok(()) => (), + Err(e) => log::error!("Failed to wait for texture {:?}: {:?}", texture_id, e), + } + } + } + + pub fn texture_create_view( + &self, + texture_id: id::TextureId, + desc: &resource::TextureViewDescriptor, + id_in: Input, + ) -> (id::TextureViewId, Option) { + profiling::scope!("Texture::create_view"); + + let hub = A::hub(self); + let mut token = Token::root(); + let fid = hub.texture_views.prepare(id_in); + + let (device_guard, mut token) = hub.devices.read(&mut token); + let (texture_guard, mut token) = hub.textures.read(&mut token); + let error = loop { + let texture = match texture_guard.get(texture_id) { + Ok(texture) => texture, + Err(_) => break resource::CreateTextureViewError::InvalidTexture, + }; + let device = &device_guard[texture.device_id.value]; + #[cfg(feature = "trace")] + if let Some(ref trace) = device.trace { + trace.lock().add(trace::Action::CreateTextureView { + id: fid.id(), + parent_id: texture_id, + desc: desc.clone(), + }); + } + + let view = match device.create_texture_view(texture, texture_id, desc) { + Ok(view) => view, + Err(e) => break e, + }; + let ref_count = view.life_guard.add_ref(); + let id = fid.assign(view, &mut token); + + device.trackers.lock().views.insert_single(id, ref_count); + return (id.0, None); + }; + + let id = fid.assign_error(desc.label.borrow_or_default(), &mut token); + (id, Some(error)) + } + + pub fn texture_view_label(&self, id: id::TextureViewId) -> String { + A::hub(self).texture_views.label_for_resource(id) + } + + pub fn texture_view_drop( + &self, + texture_view_id: id::TextureViewId, + wait: bool, + ) -> Result<(), resource::TextureViewDestroyError> { + profiling::scope!("TextureView::drop"); + log::debug!("texture view {:?} is dropped", texture_view_id); + + let hub = A::hub(self); + let mut token = Token::root(); + + let (last_submit_index, device_id) = { + let (mut texture_view_guard, _) = hub.texture_views.write(&mut token); + + match texture_view_guard.get_mut(texture_view_id) { + Ok(view) => { + let _ref_count = view.life_guard.ref_count.take(); + let last_submit_index = view.life_guard.life_count(); + (last_submit_index, view.device_id.value) + } + Err(InvalidId) => { + hub.texture_views + .unregister_locked(texture_view_id, &mut *texture_view_guard); + return Ok(()); + } + } + }; + + let (device_guard, mut token) = hub.devices.read(&mut token); + let device = &device_guard[device_id]; + device + .lock_life(&mut token) + .suspected_resources + .texture_views + .push(id::Valid(texture_view_id)); + + if wait { + match device.wait_for_submit(last_submit_index, &mut token) { + Ok(()) => (), + Err(e) => log::error!( + "Failed to wait for texture view {:?}: {:?}", + texture_view_id, + e + ), + } + } + Ok(()) + } + + pub fn device_create_sampler( + &self, + device_id: DeviceId, + desc: &resource::SamplerDescriptor, + id_in: Input, + ) -> (id::SamplerId, Option) { + profiling::scope!("Device::create_sampler"); + + let hub = A::hub(self); + let mut token = Token::root(); + let fid = hub.samplers.prepare(id_in); + + let (device_guard, mut token) = hub.devices.read(&mut token); + let error = loop { + let device = match device_guard.get(device_id) { + Ok(device) => device, + Err(_) => break DeviceError::Invalid.into(), + }; + #[cfg(feature = "trace")] + if let Some(ref trace) = device.trace { + trace + .lock() + .add(trace::Action::CreateSampler(fid.id(), desc.clone())); + } + + let sampler = match device.create_sampler(device_id, desc) { + Ok(sampler) => sampler, + Err(e) => break e, + }; + let ref_count = sampler.life_guard.add_ref(); + let id = fid.assign(sampler, &mut token); + + device.trackers.lock().samplers.insert_single(id, ref_count); + + return (id.0, None); + }; + + let id = fid.assign_error(desc.label.borrow_or_default(), &mut token); + (id, Some(error)) + } + + pub fn sampler_label(&self, id: id::SamplerId) -> String { + A::hub(self).samplers.label_for_resource(id) + } + + pub fn sampler_drop(&self, sampler_id: id::SamplerId) { + profiling::scope!("Sampler::drop"); + log::debug!("sampler {:?} is dropped", sampler_id); + + let hub = A::hub(self); + let mut token = Token::root(); + + let device_id = { + let (mut sampler_guard, _) = hub.samplers.write(&mut token); + match sampler_guard.get_mut(sampler_id) { + Ok(sampler) => { + sampler.life_guard.ref_count.take(); + sampler.device_id.value + } + Err(InvalidId) => { + hub.samplers + .unregister_locked(sampler_id, &mut *sampler_guard); + return; + } + } + }; + + let (device_guard, mut token) = hub.devices.read(&mut token); + device_guard[device_id] + .lock_life(&mut token) + .suspected_resources + .samplers + .push(id::Valid(sampler_id)); + } + + pub fn device_create_bind_group_layout( + &self, + device_id: DeviceId, + desc: &binding_model::BindGroupLayoutDescriptor, + id_in: Input, + ) -> ( + id::BindGroupLayoutId, + Option, + ) { + profiling::scope!("Device::create_bind_group_layout"); + + let mut token = Token::root(); + let hub = A::hub(self); + let fid = hub.bind_group_layouts.prepare(id_in); + + let error = 'outer: loop { + let (device_guard, mut token) = hub.devices.read(&mut token); + let device = match device_guard.get(device_id) { + Ok(device) => device, + Err(_) => break DeviceError::Invalid.into(), + }; + #[cfg(feature = "trace")] + if let Some(ref trace) = device.trace { + trace + .lock() + .add(trace::Action::CreateBindGroupLayout(fid.id(), desc.clone())); + } + + let mut entry_map = FastHashMap::default(); + for entry in desc.entries.iter() { + if entry.binding > device.limits.max_bindings_per_bind_group { + break 'outer binding_model::CreateBindGroupLayoutError::InvalidBindingIndex { + binding: entry.binding, + maximum: device.limits.max_bindings_per_bind_group, + }; + } + if entry_map.insert(entry.binding, *entry).is_some() { + break 'outer binding_model::CreateBindGroupLayoutError::ConflictBinding( + entry.binding, + ); + } + } + + // If there is an equivalent BGL, just bump the refcount and return it. + // This is only applicable for identity filters that are generating new IDs, + // so their inputs are `PhantomData` of size 0. + if mem::size_of::>() == 0 { + let (bgl_guard, _) = hub.bind_group_layouts.read(&mut token); + if let Some(id) = + Device::deduplicate_bind_group_layout(device_id, &entry_map, &*bgl_guard) + { + return (id, None); + } + } + + let layout = match device.create_bind_group_layout( + device_id, + desc.label.borrow_option(), + entry_map, + ) { + Ok(layout) => layout, + Err(e) => break e, + }; + + let id = fid.assign(layout, &mut token); + return (id.0, None); + }; + + let id = fid.assign_error(desc.label.borrow_or_default(), &mut token); + (id, Some(error)) + } + + pub fn bind_group_layout_label(&self, id: id::BindGroupLayoutId) -> String { + A::hub(self).bind_group_layouts.label_for_resource(id) + } + + pub fn bind_group_layout_drop(&self, bind_group_layout_id: id::BindGroupLayoutId) { + profiling::scope!("BindGroupLayout::drop"); + log::debug!("bind group layout {:?} is dropped", bind_group_layout_id); + + let hub = A::hub(self); + let mut token = Token::root(); + let device_id = { + let (mut bind_group_layout_guard, _) = hub.bind_group_layouts.write(&mut token); + match bind_group_layout_guard.get_mut(bind_group_layout_id) { + Ok(layout) => layout.device_id.value, + Err(InvalidId) => { + hub.bind_group_layouts + .unregister_locked(bind_group_layout_id, &mut *bind_group_layout_guard); + return; + } + } + }; + + let (device_guard, mut token) = hub.devices.read(&mut token); + device_guard[device_id] + .lock_life(&mut token) + .suspected_resources + .bind_group_layouts + .push(id::Valid(bind_group_layout_id)); + } + + pub fn device_create_pipeline_layout( + &self, + device_id: DeviceId, + desc: &binding_model::PipelineLayoutDescriptor, + id_in: Input, + ) -> ( + id::PipelineLayoutId, + Option, + ) { + profiling::scope!("Device::create_pipeline_layout"); + + let hub = A::hub(self); + let mut token = Token::root(); + let fid = hub.pipeline_layouts.prepare(id_in); + + let (device_guard, mut token) = hub.devices.read(&mut token); + let error = loop { + let device = match device_guard.get(device_id) { + Ok(device) => device, + Err(_) => break DeviceError::Invalid.into(), + }; + #[cfg(feature = "trace")] + if let Some(ref trace) = device.trace { + trace + .lock() + .add(trace::Action::CreatePipelineLayout(fid.id(), desc.clone())); + } + + let layout = { + let (bgl_guard, _) = hub.bind_group_layouts.read(&mut token); + match device.create_pipeline_layout(device_id, desc, &*bgl_guard) { + Ok(layout) => layout, + Err(e) => break e, + } + }; + + let id = fid.assign(layout, &mut token); + return (id.0, None); + }; + + let id = fid.assign_error(desc.label.borrow_or_default(), &mut token); + (id, Some(error)) + } + + pub fn pipeline_layout_label(&self, id: id::PipelineLayoutId) -> String { + A::hub(self).pipeline_layouts.label_for_resource(id) + } + + pub fn pipeline_layout_drop(&self, pipeline_layout_id: id::PipelineLayoutId) { + profiling::scope!("PipelineLayout::drop"); + log::debug!("pipeline layout {:?} is dropped", pipeline_layout_id); + + let hub = A::hub(self); + let mut token = Token::root(); + let (device_id, ref_count) = { + let (mut pipeline_layout_guard, _) = hub.pipeline_layouts.write(&mut token); + match pipeline_layout_guard.get_mut(pipeline_layout_id) { + Ok(layout) => ( + layout.device_id.value, + layout.life_guard.ref_count.take().unwrap(), + ), + Err(InvalidId) => { + hub.pipeline_layouts + .unregister_locked(pipeline_layout_id, &mut *pipeline_layout_guard); + return; + } + } + }; + + let (device_guard, mut token) = hub.devices.read(&mut token); + device_guard[device_id] + .lock_life(&mut token) + .suspected_resources + .pipeline_layouts + .push(Stored { + value: id::Valid(pipeline_layout_id), + ref_count, + }); + } + + pub fn device_create_bind_group( + &self, + device_id: DeviceId, + desc: &binding_model::BindGroupDescriptor, + id_in: Input, + ) -> (id::BindGroupId, Option) { + profiling::scope!("Device::create_bind_group"); + + let hub = A::hub(self); + let mut token = Token::root(); + let fid = hub.bind_groups.prepare(id_in); + + let (device_guard, mut token) = hub.devices.read(&mut token); + let (bind_group_layout_guard, mut token) = hub.bind_group_layouts.read(&mut token); + + let error = loop { + let device = match device_guard.get(device_id) { + Ok(device) => device, + Err(_) => break DeviceError::Invalid.into(), + }; + #[cfg(feature = "trace")] + if let Some(ref trace) = device.trace { + trace + .lock() + .add(trace::Action::CreateBindGroup(fid.id(), desc.clone())); + } + + let bind_group_layout = match bind_group_layout_guard.get(desc.layout) { + Ok(layout) => layout, + Err(_) => break binding_model::CreateBindGroupError::InvalidLayout, + }; + let bind_group = + match device.create_bind_group(device_id, bind_group_layout, desc, hub, &mut token) + { + Ok(bind_group) => bind_group, + Err(e) => break e, + }; + let ref_count = bind_group.life_guard.add_ref(); + + let id = fid.assign(bind_group, &mut token); + log::debug!("Bind group {:?}", id,); + + device + .trackers + .lock() + .bind_groups + .insert_single(id, ref_count); + return (id.0, None); + }; + + let id = fid.assign_error(desc.label.borrow_or_default(), &mut token); + (id, Some(error)) + } + + pub fn bind_group_label(&self, id: id::BindGroupId) -> String { + A::hub(self).bind_groups.label_for_resource(id) + } + + pub fn bind_group_drop(&self, bind_group_id: id::BindGroupId) { + profiling::scope!("BindGroup::drop"); + log::debug!("bind group {:?} is dropped", bind_group_id); + + let hub = A::hub(self); + let mut token = Token::root(); + + let device_id = { + let (mut bind_group_guard, _) = hub.bind_groups.write(&mut token); + match bind_group_guard.get_mut(bind_group_id) { + Ok(bind_group) => { + bind_group.life_guard.ref_count.take(); + bind_group.device_id.value + } + Err(InvalidId) => { + hub.bind_groups + .unregister_locked(bind_group_id, &mut *bind_group_guard); + return; + } + } + }; + + let (device_guard, mut token) = hub.devices.read(&mut token); + device_guard[device_id] + .lock_life(&mut token) + .suspected_resources + .bind_groups + .push(id::Valid(bind_group_id)); + } + + pub fn device_create_shader_module( + &self, + device_id: DeviceId, + desc: &pipeline::ShaderModuleDescriptor, + source: pipeline::ShaderModuleSource, + id_in: Input, + ) -> ( + id::ShaderModuleId, + Option, + ) { + profiling::scope!("Device::create_shader_module"); + + let hub = A::hub(self); + let mut token = Token::root(); + let fid = hub.shader_modules.prepare(id_in); + + let (device_guard, mut token) = hub.devices.read(&mut token); + let error = loop { + let device = match device_guard.get(device_id) { + Ok(device) => device, + Err(_) => break DeviceError::Invalid.into(), + }; + #[cfg(feature = "trace")] + if let Some(ref trace) = device.trace { + let mut trace = trace.lock(); + let data = match source { + #[cfg(feature = "wgsl")] + pipeline::ShaderModuleSource::Wgsl(ref code) => { + trace.make_binary("wgsl", code.as_bytes()) + } + pipeline::ShaderModuleSource::Naga(ref module) => { + let string = + ron::ser::to_string_pretty(module, ron::ser::PrettyConfig::default()) + .unwrap(); + trace.make_binary("ron", string.as_bytes()) + } + pipeline::ShaderModuleSource::Dummy(_) => { + panic!("found `ShaderModuleSource::Dummy`") + } + }; + trace.add(trace::Action::CreateShaderModule { + id: fid.id(), + desc: desc.clone(), + data, + }); + }; + + let shader = match device.create_shader_module(device_id, desc, source) { + Ok(shader) => shader, + Err(e) => break e, + }; + let id = fid.assign(shader, &mut token); + return (id.0, None); + }; + + let id = fid.assign_error(desc.label.borrow_or_default(), &mut token); + (id, Some(error)) + } + + // Unsafe-ness of internal calls has little to do with unsafe-ness of this. + #[allow(unused_unsafe)] + /// # Safety + /// + /// This function passes SPIR-V binary to the backend as-is and can potentially result in a + /// driver crash. + pub unsafe fn device_create_shader_module_spirv( + &self, + device_id: DeviceId, + desc: &pipeline::ShaderModuleDescriptor, + source: Cow<[u32]>, + id_in: Input, + ) -> ( + id::ShaderModuleId, + Option, + ) { + profiling::scope!("Device::create_shader_module"); + + let hub = A::hub(self); + let mut token = Token::root(); + let fid = hub.shader_modules.prepare(id_in); + + let (device_guard, mut token) = hub.devices.read(&mut token); + let error = loop { + let device = match device_guard.get(device_id) { + Ok(device) => device, + Err(_) => break DeviceError::Invalid.into(), + }; + #[cfg(feature = "trace")] + if let Some(ref trace) = device.trace { + let mut trace = trace.lock(); + let data = trace.make_binary("spv", unsafe { + std::slice::from_raw_parts(source.as_ptr() as *const u8, source.len() * 4) + }); + trace.add(trace::Action::CreateShaderModule { + id: fid.id(), + desc: desc.clone(), + data, + }); + }; + + let shader = + match unsafe { device.create_shader_module_spirv(device_id, desc, &source) } { + Ok(shader) => shader, + Err(e) => break e, + }; + let id = fid.assign(shader, &mut token); + return (id.0, None); + }; + + let id = fid.assign_error(desc.label.borrow_or_default(), &mut token); + (id, Some(error)) + } + + pub fn shader_module_label(&self, id: id::ShaderModuleId) -> String { + A::hub(self).shader_modules.label_for_resource(id) + } + + pub fn shader_module_drop(&self, shader_module_id: id::ShaderModuleId) { + profiling::scope!("ShaderModule::drop"); + log::debug!("shader module {:?} is dropped", shader_module_id); + + let hub = A::hub(self); + let mut token = Token::root(); + let (device_guard, mut token) = hub.devices.read(&mut token); + let (module, _) = hub.shader_modules.unregister(shader_module_id, &mut token); + if let Some(module) = module { + let device = &device_guard[module.device_id.value]; + #[cfg(feature = "trace")] + if let Some(ref trace) = device.trace { + trace + .lock() + .add(trace::Action::DestroyShaderModule(shader_module_id)); + } + unsafe { + device.raw.destroy_shader_module(module.raw); + } + } + } + + pub fn device_create_command_encoder( + &self, + device_id: DeviceId, + desc: &wgt::CommandEncoderDescriptor(command_buffer_id) + } + + pub fn device_create_render_bundle_encoder( + &self, + device_id: DeviceId, + desc: &command::RenderBundleEncoderDescriptor, + ) -> ( + id::RenderBundleEncoderId, + Option, + ) { + profiling::scope!("Device::create_render_bundle_encoder"); + let (encoder, error) = match command::RenderBundleEncoder::new(desc, device_id, None) { + Ok(encoder) => (encoder, None), + Err(e) => (command::RenderBundleEncoder::dummy(device_id), Some(e)), + }; + (Box::into_raw(Box::new(encoder)), error) + } + + pub fn render_bundle_encoder_finish( + &self, + bundle_encoder: command::RenderBundleEncoder, + desc: &command::RenderBundleDescriptor, + id_in: Input, + ) -> (id::RenderBundleId, Option) { + profiling::scope!("RenderBundleEncoder::finish"); + + let hub = A::hub(self); + let mut token = Token::root(); + let fid = hub.render_bundles.prepare(id_in); + + let (device_guard, mut token) = hub.devices.read(&mut token); + let error = loop { + let device = match device_guard.get(bundle_encoder.parent()) { + Ok(device) => device, + Err(_) => break command::RenderBundleError::INVALID_DEVICE, + }; + #[cfg(feature = "trace")] + if let Some(ref trace) = device.trace { + trace.lock().add(trace::Action::CreateRenderBundle { + id: fid.id(), + desc: trace::new_render_bundle_encoder_descriptor( + desc.label.clone(), + &bundle_encoder.context, + bundle_encoder.is_depth_read_only, + bundle_encoder.is_stencil_read_only, + ), + base: bundle_encoder.to_base_pass(), + }); + } + + let render_bundle = match bundle_encoder.finish(desc, device, hub, &mut token) { + Ok(bundle) => bundle, + Err(e) => break e, + }; + + log::debug!("Render bundle"); + let ref_count = render_bundle.life_guard.add_ref(); + let id = fid.assign(render_bundle, &mut token); + + device.trackers.lock().bundles.insert_single(id, ref_count); + return (id.0, None); + }; + + let id = fid.assign_error(desc.label.borrow_or_default(), &mut token); + (id, Some(error)) + } + + pub fn render_bundle_label(&self, id: id::RenderBundleId) -> String { + A::hub(self).render_bundles.label_for_resource(id) + } + + pub fn render_bundle_drop(&self, render_bundle_id: id::RenderBundleId) { + profiling::scope!("RenderBundle::drop"); + log::debug!("render bundle {:?} is dropped", render_bundle_id); + let hub = A::hub(self); + let mut token = Token::root(); + + let (device_guard, mut token) = hub.devices.read(&mut token); + let device_id = { + let (mut bundle_guard, _) = hub.render_bundles.write(&mut token); + match bundle_guard.get_mut(render_bundle_id) { + Ok(bundle) => { + bundle.life_guard.ref_count.take(); + bundle.device_id.value + } + Err(InvalidId) => { + hub.render_bundles + .unregister_locked(render_bundle_id, &mut *bundle_guard); + return; + } + } + }; + + device_guard[device_id] + .lock_life(&mut token) + .suspected_resources + .render_bundles + .push(id::Valid(render_bundle_id)); + } + + pub fn device_create_query_set( + &self, + device_id: DeviceId, + desc: &resource::QuerySetDescriptor, + id_in: Input, + ) -> (id::QuerySetId, Option) { + profiling::scope!("Device::create_query_set"); + + let hub = A::hub(self); + let mut token = Token::root(); + let fid = hub.query_sets.prepare(id_in); + + let (device_guard, mut token) = hub.devices.read(&mut token); + let error = loop { + let device = match device_guard.get(device_id) { + Ok(device) => device, + Err(_) => break DeviceError::Invalid.into(), + }; + #[cfg(feature = "trace")] + if let Some(ref trace) = device.trace { + trace.lock().add(trace::Action::CreateQuerySet { + id: fid.id(), + desc: desc.clone(), + }); + } + + let query_set = match device.create_query_set(device_id, desc) { + Ok(query_set) => query_set, + Err(err) => break err, + }; + + let ref_count = query_set.life_guard.add_ref(); + let id = fid.assign(query_set, &mut token); + + device + .trackers + .lock() + .query_sets + .insert_single(id, ref_count); + + return (id.0, None); + }; + + let id = fid.assign_error("", &mut token); + (id, Some(error)) + } + + pub fn query_set_drop(&self, query_set_id: id::QuerySetId) { + profiling::scope!("QuerySet::drop"); + log::debug!("query set {:?} is dropped", query_set_id); + + let hub = A::hub(self); + let mut token = Token::root(); + + let device_id = { + let (mut query_set_guard, _) = hub.query_sets.write(&mut token); + let query_set = query_set_guard.get_mut(query_set_id).unwrap(); + query_set.life_guard.ref_count.take(); + query_set.device_id.value + }; + + let (device_guard, mut token) = hub.devices.read(&mut token); + let device = &device_guard[device_id]; + + #[cfg(feature = "trace")] + if let Some(ref trace) = device.trace { + trace + .lock() + .add(trace::Action::DestroyQuerySet(query_set_id)); + } + + device + .lock_life(&mut token) + .suspected_resources + .query_sets + .push(id::Valid(query_set_id)); + } + + pub fn query_set_label(&self, id: id::QuerySetId) -> String { + A::hub(self).query_sets.label_for_resource(id) + } + + pub fn device_create_render_pipeline( + &self, + device_id: DeviceId, + desc: &pipeline::RenderPipelineDescriptor, + id_in: Input, + implicit_pipeline_ids: Option>, + ) -> ( + id::RenderPipelineId, + Option, + ) { + profiling::scope!("Device::create_render_pipeline"); + + let hub = A::hub(self); + let mut token = Token::root(); + + let fid = hub.render_pipelines.prepare(id_in); + let implicit_context = implicit_pipeline_ids.map(|ipi| ipi.prepare(hub)); + + let (adapter_guard, mut token) = hub.adapters.read(&mut token); + let (device_guard, mut token) = hub.devices.read(&mut token); + let error = loop { + let device = match device_guard.get(device_id) { + Ok(device) => device, + Err(_) => break DeviceError::Invalid.into(), + }; + let adapter = &adapter_guard[device.adapter_id.value]; + #[cfg(feature = "trace")] + if let Some(ref trace) = device.trace { + trace.lock().add(trace::Action::CreateRenderPipeline { + id: fid.id(), + desc: desc.clone(), + implicit_context: implicit_context.clone(), + }); + } + + let pipeline = match device.create_render_pipeline( + device_id, + adapter, + desc, + implicit_context, + hub, + &mut token, + ) { + Ok(pair) => pair, + Err(e) => break e, + }; + let ref_count = pipeline.life_guard.add_ref(); + + let id = fid.assign(pipeline, &mut token); + log::info!("Created render pipeline {:?} with {:?}", id, desc); + + device + .trackers + .lock() + .render_pipelines + .insert_single(id, ref_count); + + return (id.0, None); + }; + + let id = fid.assign_error(desc.label.borrow_or_default(), &mut token); + (id, Some(error)) + } + + /// Get an ID of one of the bind group layouts. The ID adds a refcount, + /// which needs to be released by calling `bind_group_layout_drop`. + pub fn render_pipeline_get_bind_group_layout( + &self, + pipeline_id: id::RenderPipelineId, + index: u32, + id_in: Input, + ) -> ( + id::BindGroupLayoutId, + Option, + ) { + let hub = A::hub(self); + let mut token = Token::root(); + let (pipeline_layout_guard, mut token) = hub.pipeline_layouts.read(&mut token); + + let error = loop { + let (bgl_guard, mut token) = hub.bind_group_layouts.read(&mut token); + let (_, mut token) = hub.bind_groups.read(&mut token); + let (pipeline_guard, _) = hub.render_pipelines.read(&mut token); + + let pipeline = match pipeline_guard.get(pipeline_id) { + Ok(pipeline) => pipeline, + Err(_) => break binding_model::GetBindGroupLayoutError::InvalidPipeline, + }; + let id = match pipeline_layout_guard[pipeline.layout_id.value] + .bind_group_layout_ids + .get(index as usize) + { + Some(id) => id, + None => break binding_model::GetBindGroupLayoutError::InvalidGroupIndex(index), + }; + + bgl_guard[*id].multi_ref_count.inc(); + return (id.0, None); + }; + + let id = hub + .bind_group_layouts + .prepare(id_in) + .assign_error("", &mut token); + (id, Some(error)) + } + + pub fn render_pipeline_label(&self, id: id::RenderPipelineId) -> String { + A::hub(self).render_pipelines.label_for_resource(id) + } + + pub fn render_pipeline_drop(&self, render_pipeline_id: id::RenderPipelineId) { + profiling::scope!("RenderPipeline::drop"); + log::debug!("render pipeline {:?} is dropped", render_pipeline_id); + let hub = A::hub(self); + let mut token = Token::root(); + let (device_guard, mut token) = hub.devices.read(&mut token); + + let (device_id, layout_id) = { + let (mut pipeline_guard, _) = hub.render_pipelines.write(&mut token); + match pipeline_guard.get_mut(render_pipeline_id) { + Ok(pipeline) => { + pipeline.life_guard.ref_count.take(); + (pipeline.device_id.value, pipeline.layout_id.clone()) + } + Err(InvalidId) => { + hub.render_pipelines + .unregister_locked(render_pipeline_id, &mut *pipeline_guard); + return; + } + } + }; + + let mut life_lock = device_guard[device_id].lock_life(&mut token); + life_lock + .suspected_resources + .render_pipelines + .push(id::Valid(render_pipeline_id)); + life_lock + .suspected_resources + .pipeline_layouts + .push(layout_id); + } + + pub fn device_create_compute_pipeline( + &self, + device_id: DeviceId, + desc: &pipeline::ComputePipelineDescriptor, + id_in: Input, + implicit_pipeline_ids: Option>, + ) -> ( + id::ComputePipelineId, + Option, + ) { + profiling::scope!("Device::create_compute_pipeline"); + + let hub = A::hub(self); + let mut token = Token::root(); + + let fid = hub.compute_pipelines.prepare(id_in); + let implicit_context = implicit_pipeline_ids.map(|ipi| ipi.prepare(hub)); + + let (device_guard, mut token) = hub.devices.read(&mut token); + let error = loop { + let device = match device_guard.get(device_id) { + Ok(device) => device, + Err(_) => break DeviceError::Invalid.into(), + }; + #[cfg(feature = "trace")] + if let Some(ref trace) = device.trace { + trace.lock().add(trace::Action::CreateComputePipeline { + id: fid.id(), + desc: desc.clone(), + implicit_context: implicit_context.clone(), + }); + } + + let pipeline = match device.create_compute_pipeline( + device_id, + desc, + implicit_context, + hub, + &mut token, + ) { + Ok(pair) => pair, + Err(e) => break e, + }; + let ref_count = pipeline.life_guard.add_ref(); + + let id = fid.assign(pipeline, &mut token); + log::info!("Created compute pipeline {:?} with {:?}", id, desc); + + device + .trackers + .lock() + .compute_pipelines + .insert_single(id, ref_count); + return (id.0, None); + }; + + let id = fid.assign_error(desc.label.borrow_or_default(), &mut token); + (id, Some(error)) + } + + /// Get an ID of one of the bind group layouts. The ID adds a refcount, + /// which needs to be released by calling `bind_group_layout_drop`. + pub fn compute_pipeline_get_bind_group_layout( + &self, + pipeline_id: id::ComputePipelineId, + index: u32, + id_in: Input, + ) -> ( + id::BindGroupLayoutId, + Option, + ) { + let hub = A::hub(self); + let mut token = Token::root(); + let (pipeline_layout_guard, mut token) = hub.pipeline_layouts.read(&mut token); + + let error = loop { + let (bgl_guard, mut token) = hub.bind_group_layouts.read(&mut token); + let (_, mut token) = hub.bind_groups.read(&mut token); + let (pipeline_guard, _) = hub.compute_pipelines.read(&mut token); + + let pipeline = match pipeline_guard.get(pipeline_id) { + Ok(pipeline) => pipeline, + Err(_) => break binding_model::GetBindGroupLayoutError::InvalidPipeline, + }; + let id = match pipeline_layout_guard[pipeline.layout_id.value] + .bind_group_layout_ids + .get(index as usize) + { + Some(id) => id, + None => break binding_model::GetBindGroupLayoutError::InvalidGroupIndex(index), + }; + + bgl_guard[*id].multi_ref_count.inc(); + return (id.0, None); + }; + + let id = hub + .bind_group_layouts + .prepare(id_in) + .assign_error("", &mut token); + (id, Some(error)) + } + + pub fn compute_pipeline_label(&self, id: id::ComputePipelineId) -> String { + A::hub(self).compute_pipelines.label_for_resource(id) + } + + pub fn compute_pipeline_drop(&self, compute_pipeline_id: id::ComputePipelineId) { + profiling::scope!("ComputePipeline::drop"); + log::debug!("compute pipeline {:?} is dropped", compute_pipeline_id); + let hub = A::hub(self); + let mut token = Token::root(); + let (device_guard, mut token) = hub.devices.read(&mut token); + + let (device_id, layout_id) = { + let (mut pipeline_guard, _) = hub.compute_pipelines.write(&mut token); + match pipeline_guard.get_mut(compute_pipeline_id) { + Ok(pipeline) => { + pipeline.life_guard.ref_count.take(); + (pipeline.device_id.value, pipeline.layout_id.clone()) + } + Err(InvalidId) => { + hub.compute_pipelines + .unregister_locked(compute_pipeline_id, &mut *pipeline_guard); + return; + } + } + }; + + let mut life_lock = device_guard[device_id].lock_life(&mut token); + life_lock + .suspected_resources + .compute_pipelines + .push(id::Valid(compute_pipeline_id)); + life_lock + .suspected_resources + .pipeline_layouts + .push(layout_id); + } + + pub fn surface_configure( + &self, + surface_id: SurfaceId, + device_id: DeviceId, + config: &wgt::SurfaceConfiguration>, + ) -> Option { + use hal::{Adapter as _, Surface as _}; + use present::ConfigureSurfaceError as E; + profiling::scope!("surface_configure"); + + fn validate_surface_configuration( + config: &mut hal::SurfaceConfiguration, + caps: &hal::SurfaceCapabilities, + ) -> Result<(), E> { + let width = config.extent.width; + let height = config.extent.height; + if width < caps.extents.start().width + || width > caps.extents.end().width + || height < caps.extents.start().height + || height > caps.extents.end().height + { + log::warn!( + "Requested size {}x{} is outside of the supported range: {:?}", + width, + height, + caps.extents + ); + } + if !caps.present_modes.contains(&config.present_mode) { + let new_mode = 'b: loop { + // Automatic present mode checks. + // + // The "Automatic" modes are never supported by the backends. + let fallbacks = match config.present_mode { + wgt::PresentMode::AutoVsync => { + &[wgt::PresentMode::FifoRelaxed, wgt::PresentMode::Fifo][..] + } + // Always end in FIFO to make sure it's always supported + wgt::PresentMode::AutoNoVsync => &[ + wgt::PresentMode::Immediate, + wgt::PresentMode::Mailbox, + wgt::PresentMode::Fifo, + ][..], + _ => { + return Err(E::UnsupportedPresentMode { + requested: config.present_mode, + available: caps.present_modes.clone(), + }); + } + }; + + for &fallback in fallbacks { + if caps.present_modes.contains(&fallback) { + break 'b fallback; + } + } + + unreachable!("Fallback system failed to choose present mode. This is a bug. Mode: {:?}, Options: {:?}", config.present_mode, &caps.present_modes); + }; + + log::info!( + "Automatically choosing presentation mode by rule {:?}. Chose {new_mode:?}", + config.present_mode + ); + config.present_mode = new_mode; + } + if !caps.formats.contains(&config.format) { + return Err(E::UnsupportedFormat { + requested: config.format, + available: caps.formats.clone(), + }); + } + if !caps + .composite_alpha_modes + .contains(&config.composite_alpha_mode) + { + let new_alpha_mode = 'alpha: loop { + // Automatic alpha mode checks. + let fallbacks = match config.composite_alpha_mode { + wgt::CompositeAlphaMode::Auto => &[ + wgt::CompositeAlphaMode::Opaque, + wgt::CompositeAlphaMode::Inherit, + ][..], + _ => { + return Err(E::UnsupportedAlphaMode { + requested: config.composite_alpha_mode, + available: caps.composite_alpha_modes.clone(), + }); + } + }; + + for &fallback in fallbacks { + if caps.composite_alpha_modes.contains(&fallback) { + break 'alpha fallback; + } + } + + unreachable!( + "Fallback system failed to choose alpha mode. This is a bug. \ + AlphaMode: {:?}, Options: {:?}", + config.composite_alpha_mode, &caps.composite_alpha_modes + ); + }; + + log::info!( + "Automatically choosing alpha mode by rule {:?}. Chose {new_alpha_mode:?}", + config.composite_alpha_mode + ); + config.composite_alpha_mode = new_alpha_mode; + } + if !caps.usage.contains(config.usage) { + return Err(E::UnsupportedUsage); + } + if width == 0 || height == 0 { + return Err(E::ZeroArea); + } + Ok(()) + } + + log::info!("configuring surface with {:?}", config); + let hub = A::hub(self); + let mut token = Token::root(); + + let (mut surface_guard, mut token) = self.surfaces.write(&mut token); + let (adapter_guard, mut token) = hub.adapters.read(&mut token); + let (device_guard, _token) = hub.devices.read(&mut token); + + let error = 'outer: loop { + let device = match device_guard.get(device_id) { + Ok(device) => device, + Err(_) => break DeviceError::Invalid.into(), + }; + #[cfg(feature = "trace")] + if let Some(ref trace) = device.trace { + trace + .lock() + .add(trace::Action::ConfigureSurface(surface_id, config.clone())); + } + + let surface = match surface_guard.get_mut(surface_id) { + Ok(surface) => surface, + Err(_) => break E::InvalidSurface, + }; + + let caps = unsafe { + let suf = A::get_surface(surface); + let adapter = &adapter_guard[device.adapter_id.value]; + match adapter.raw.adapter.surface_capabilities(&suf.unwrap().raw) { + Some(caps) => caps, + None => break E::UnsupportedQueueFamily, + } + }; + + let mut hal_view_formats = vec![]; + for format in config.view_formats.iter() { + if *format == config.format { + continue; + } + if !caps.formats.contains(&config.format) { + break 'outer E::UnsupportedFormat { + requested: config.format, + available: caps.formats, + }; + } + if config.format.remove_srgb_suffix() != format.remove_srgb_suffix() { + break 'outer E::InvalidViewFormat(*format, config.format); + } + hal_view_formats.push(*format); + } + + if !hal_view_formats.is_empty() { + if let Err(missing_flag) = + device.require_downlevel_flags(wgt::DownlevelFlags::SURFACE_VIEW_FORMATS) + { + break 'outer E::MissingDownlevelFlags(missing_flag); + } + } + + let num_frames = present::DESIRED_NUM_FRAMES + .clamp(*caps.swap_chain_sizes.start(), *caps.swap_chain_sizes.end()); + let mut hal_config = hal::SurfaceConfiguration { + swap_chain_size: num_frames, + present_mode: config.present_mode, + composite_alpha_mode: config.alpha_mode, + format: config.format, + extent: wgt::Extent3d { + width: config.width, + height: config.height, + depth_or_array_layers: 1, + }, + usage: conv::map_texture_usage(config.usage, hal::FormatAspects::COLOR), + view_formats: hal_view_formats, + }; + + if let Err(error) = validate_surface_configuration(&mut hal_config, &caps) { + break error; + } + + match unsafe { + A::get_surface_mut(surface) + .unwrap() + .raw + .configure(&device.raw, &hal_config) + } { + Ok(()) => (), + Err(error) => { + break match error { + hal::SurfaceError::Outdated | hal::SurfaceError::Lost => E::InvalidSurface, + hal::SurfaceError::Device(error) => E::Device(error.into()), + hal::SurfaceError::Other(message) => { + log::error!("surface configuration failed: {}", message); + E::InvalidSurface + } + } + } + } + + if let Some(present) = surface.presentation.take() { + if present.acquired_texture.is_some() { + break E::PreviousOutputExists; + } + } + + surface.presentation = Some(present::Presentation { + device_id: Stored { + value: id::Valid(device_id), + ref_count: device.life_guard.add_ref(), + }, + config: config.clone(), + num_frames, + acquired_texture: None, + }); + + return None; + }; + + Some(error) + } + + #[cfg(feature = "replay")] + /// Only triange suspected resource IDs. This helps us to avoid ID collisions + /// upon creating new resources when re-playing a trace. + pub fn device_maintain_ids(&self, device_id: DeviceId) -> Result<(), InvalidDevice> { + let hub = A::hub(self); + let mut token = Token::root(); + let (device_guard, mut token) = hub.devices.read(&mut token); + let device = device_guard.get(device_id).map_err(|_| InvalidDevice)?; + device.lock_life(&mut token).triage_suspected( + hub, + &device.trackers, + #[cfg(feature = "trace")] + None, + &mut token, + ); + Ok(()) + } + + /// Check `device_id` for freeable resources and completed buffer mappings. + /// + /// Return `queue_empty` indicating whether there are more queue submissions still in flight. + pub fn device_poll( + &self, + device_id: DeviceId, + maintain: wgt::Maintain, + ) -> Result { + let (closures, queue_empty) = { + if let wgt::Maintain::WaitForSubmissionIndex(submission_index) = maintain { + if submission_index.queue_id != device_id { + return Err(WaitIdleError::WrongSubmissionIndex( + submission_index.queue_id, + device_id, + )); + } + } + + let hub = A::hub(self); + let mut token = Token::root(); + let (device_guard, mut token) = hub.devices.read(&mut token); + device_guard + .get(device_id) + .map_err(|_| DeviceError::Invalid)? + .maintain(hub, maintain, &mut token)? + }; + + closures.fire(); + + Ok(queue_empty) + } + + /// Poll all devices belonging to the backend `A`. + /// + /// If `force_wait` is true, block until all buffer mappings are done. + /// + /// Return `all_queue_empty` indicating whether there are more queue + /// submissions still in flight. + fn poll_devices( + &self, + force_wait: bool, + closures: &mut UserClosures, + ) -> Result { + profiling::scope!("poll_devices"); + + let hub = A::hub(self); + let mut devices_to_drop = vec![]; + let mut all_queue_empty = true; + { + let mut token = Token::root(); + let (device_guard, mut token) = hub.devices.read(&mut token); + + for (id, device) in device_guard.iter(A::VARIANT) { + let maintain = if force_wait { + wgt::Maintain::Wait + } else { + wgt::Maintain::Poll + }; + let (cbs, queue_empty) = device.maintain(hub, maintain, &mut token)?; + all_queue_empty = all_queue_empty && queue_empty; + + // If the device's own `RefCount` clone is the only one left, and + // its submission queue is empty, then it can be freed. + if queue_empty && device.ref_count.load() == 1 { + devices_to_drop.push(id); + } + closures.extend(cbs); + } + } + + for device_id in devices_to_drop { + self.exit_device::(device_id); + } + + Ok(all_queue_empty) + } + + /// Poll all devices on all backends. + /// + /// This is the implementation of `wgpu::Instance::poll_all`. + /// + /// Return `all_queue_empty` indicating whether there are more queue + /// submissions still in flight. + pub fn poll_all_devices(&self, force_wait: bool) -> Result { + let mut closures = UserClosures::default(); + let mut all_queue_empty = true; + + #[cfg(all(feature = "vulkan", not(target_arch = "wasm32")))] + { + all_queue_empty = self.poll_devices::(force_wait, &mut closures)? + && all_queue_empty; + } + #[cfg(all(feature = "metal", any(target_os = "macos", target_os = "ios")))] + { + all_queue_empty = + self.poll_devices::(force_wait, &mut closures)? && all_queue_empty; + } + #[cfg(all(feature = "dx12", windows))] + { + all_queue_empty = + self.poll_devices::(force_wait, &mut closures)? && all_queue_empty; + } + #[cfg(all(feature = "dx11", windows))] + { + all_queue_empty = + self.poll_devices::(force_wait, &mut closures)? && all_queue_empty; + } + #[cfg(feature = "gles")] + { + all_queue_empty = + self.poll_devices::(force_wait, &mut closures)? && all_queue_empty; + } + + closures.fire(); + + Ok(all_queue_empty) + } + + pub fn device_label(&self, id: DeviceId) -> String { + A::hub(self).devices.label_for_resource(id) + } + + pub fn device_start_capture(&self, id: DeviceId) { + let hub = A::hub(self); + let mut token = Token::root(); + let (device_guard, _) = hub.devices.read(&mut token); + if let Ok(device) = device_guard.get(id) { + unsafe { device.raw.start_capture() }; + } + } + + pub fn device_stop_capture(&self, id: DeviceId) { + let hub = A::hub(self); + let mut token = Token::root(); + let (device_guard, _) = hub.devices.read(&mut token); + if let Ok(device) = device_guard.get(id) { + unsafe { device.raw.stop_capture() }; + } + } + + pub fn device_drop(&self, device_id: DeviceId) { + profiling::scope!("Device::drop"); + log::debug!("device {:?} is dropped", device_id); + + let hub = A::hub(self); + let mut token = Token::root(); + + // For now, just drop the `RefCount` in `device.life_guard`, which + // stands for the user's reference to the device. We'll take care of + // cleaning up the device when we're polled, once its queue submissions + // have completed and it is no longer needed by other resources. + let (mut device_guard, _) = hub.devices.write(&mut token); + if let Ok(device) = device_guard.get_mut(device_id) { + device.life_guard.ref_count.take().unwrap(); + } + } + + /// Exit the unreferenced, inactive device `device_id`. + fn exit_device(&self, device_id: DeviceId) { + let hub = A::hub(self); + let mut token = Token::root(); + let mut free_adapter_id = None; + { + let (device, mut _token) = hub.devices.unregister(device_id, &mut token); + if let Some(mut device) = device { + // The things `Device::prepare_to_die` takes care are mostly + // unnecessary here. We know our queue is empty, so we don't + // need to wait for submissions or triage them. We know we were + // just polled, so `life_tracker.free_resources` is empty. + debug_assert!(device.lock_life(&mut _token).queue_empty()); + device.pending_writes.deactivate(); + + // Adapter is only referenced by the device and itself. + // This isn't a robust way to destroy them, we should find a better one. + if device.adapter_id.ref_count.load() == 1 { + free_adapter_id = Some(device.adapter_id.value.0); + } + + device.dispose(); + } + } + + // Free the adapter now that we've dropped the `Device` token. + if let Some(free_adapter_id) = free_adapter_id { + let _ = hub.adapters.unregister(free_adapter_id, &mut token); + } + } + + pub fn buffer_map_async( + &self, + buffer_id: id::BufferId, + range: Range, + op: BufferMapOperation, + ) -> BufferAccessResult { + // User callbacks must not be called while holding buffer_map_async_inner's locks, so we + // defer the error callback if it needs to be called immediately (typically when running + // into errors). + if let Err((op, err)) = self.buffer_map_async_inner::(buffer_id, range, op) { + op.callback.call(Err(err.clone())); + + return Err(err); + } + + Ok(()) + } + + // Returns the mapping callback in case of error so that the callback can be fired outside + // of the locks that are held in this function. + fn buffer_map_async_inner( + &self, + buffer_id: id::BufferId, + range: Range, + op: BufferMapOperation, + ) -> Result<(), (BufferMapOperation, BufferAccessError)> { + profiling::scope!("Buffer::map_async"); + + let hub = A::hub(self); + let mut token = Token::root(); + let (device_guard, mut token) = hub.devices.read(&mut token); + let (pub_usage, internal_use) = match op.host { + HostMap::Read => (wgt::BufferUsages::MAP_READ, hal::BufferUses::MAP_READ), + HostMap::Write => (wgt::BufferUsages::MAP_WRITE, hal::BufferUses::MAP_WRITE), + }; + + if range.start % wgt::MAP_ALIGNMENT != 0 || range.end % wgt::COPY_BUFFER_ALIGNMENT != 0 { + return Err((op, BufferAccessError::UnalignedRange)); + } + + let (device_id, ref_count) = { + let (mut buffer_guard, _) = hub.buffers.write(&mut token); + let buffer = buffer_guard + .get_mut(buffer_id) + .map_err(|_| BufferAccessError::Invalid); + + let buffer = match buffer { + Ok(b) => b, + Err(e) => { + return Err((op, e)); + } + }; + + if let Err(e) = check_buffer_usage(buffer.usage, pub_usage) { + return Err((op, e.into())); + } + + if range.start > range.end { + return Err(( + op, + BufferAccessError::NegativeRange { + start: range.start, + end: range.end, + }, + )); + } + if range.end > buffer.size { + return Err(( + op, + BufferAccessError::OutOfBoundsOverrun { + index: range.end, + max: buffer.size, + }, + )); + } + + buffer.map_state = match buffer.map_state { + resource::BufferMapState::Init { .. } | resource::BufferMapState::Active { .. } => { + return Err((op, BufferAccessError::AlreadyMapped)); + } + resource::BufferMapState::Waiting(_) => { + return Err((op, BufferAccessError::MapAlreadyPending)); + } + resource::BufferMapState::Idle => { + resource::BufferMapState::Waiting(resource::BufferPendingMapping { + range, + op, + _parent_ref_count: buffer.life_guard.add_ref(), + }) + } + }; + log::debug!("Buffer {:?} map state -> Waiting", buffer_id); + + let device = &device_guard[buffer.device_id.value]; + + let ret = (buffer.device_id.value, buffer.life_guard.add_ref()); + + let mut trackers = device.trackers.lock(); + trackers + .buffers + .set_single(&*buffer_guard, buffer_id, internal_use); + trackers.buffers.drain(); + + ret + }; + + let device = &device_guard[device_id]; + + device + .lock_life(&mut token) + .map(id::Valid(buffer_id), ref_count); + + Ok(()) + } + + pub fn buffer_get_mapped_range( + &self, + buffer_id: id::BufferId, + offset: BufferAddress, + size: Option, + ) -> Result<(*mut u8, u64), BufferAccessError> { + profiling::scope!("Buffer::get_mapped_range"); + + let hub = A::hub(self); + let mut token = Token::root(); + let (buffer_guard, _) = hub.buffers.read(&mut token); + let buffer = buffer_guard + .get(buffer_id) + .map_err(|_| BufferAccessError::Invalid)?; + + let range_size = if let Some(size) = size { + size + } else if offset > buffer.size { + 0 + } else { + buffer.size - offset + }; + + if offset % wgt::MAP_ALIGNMENT != 0 { + return Err(BufferAccessError::UnalignedOffset { offset }); + } + if range_size % wgt::COPY_BUFFER_ALIGNMENT != 0 { + return Err(BufferAccessError::UnalignedRangeSize { range_size }); + } + + match buffer.map_state { + resource::BufferMapState::Init { ptr, .. } => { + // offset (u64) can not be < 0, so no need to validate the lower bound + if offset + range_size > buffer.size { + return Err(BufferAccessError::OutOfBoundsOverrun { + index: offset + range_size - 1, + max: buffer.size, + }); + } + unsafe { Ok((ptr.as_ptr().offset(offset as isize), range_size)) } + } + resource::BufferMapState::Active { ptr, ref range, .. } => { + if offset < range.start { + return Err(BufferAccessError::OutOfBoundsUnderrun { + index: offset, + min: range.start, + }); + } + if offset + range_size > range.end { + return Err(BufferAccessError::OutOfBoundsOverrun { + index: offset + range_size - 1, + max: range.end, + }); + } + // ptr points to the beginning of the range we mapped in map_async + // rather thant the beginning of the buffer. + let relative_offset = (offset - range.start) as isize; + unsafe { Ok((ptr.as_ptr().offset(relative_offset), range_size)) } + } + resource::BufferMapState::Idle | resource::BufferMapState::Waiting(_) => { + Err(BufferAccessError::NotMapped) + } + } + } + + fn buffer_unmap_inner( + &self, + buffer_id: id::BufferId, + buffer: &mut Buffer, + device: &mut Device, + ) -> Result, BufferAccessError> { + log::debug!("Buffer {:?} map state -> Idle", buffer_id); + match mem::replace(&mut buffer.map_state, resource::BufferMapState::Idle) { + resource::BufferMapState::Init { + ptr, + stage_buffer, + needs_flush, + } => { + #[cfg(feature = "trace")] + if let Some(ref trace) = device.trace { + let mut trace = trace.lock(); + let data = trace.make_binary("bin", unsafe { + std::slice::from_raw_parts(ptr.as_ptr(), buffer.size as usize) + }); + trace.add(trace::Action::WriteBuffer { + id: buffer_id, + data, + range: 0..buffer.size, + queued: true, + }); + } + let _ = ptr; + if needs_flush { + unsafe { + device + .raw + .flush_mapped_ranges(&stage_buffer, iter::once(0..buffer.size)); + } + } + + let raw_buf = buffer.raw.as_ref().ok_or(BufferAccessError::Destroyed)?; + + buffer.life_guard.use_at(device.active_submission_index + 1); + let region = wgt::BufferSize::new(buffer.size).map(|size| hal::BufferCopy { + src_offset: 0, + dst_offset: 0, + size, + }); + let transition_src = hal::BufferBarrier { + buffer: &stage_buffer, + usage: hal::BufferUses::MAP_WRITE..hal::BufferUses::COPY_SRC, + }; + let transition_dst = hal::BufferBarrier { + buffer: raw_buf, + usage: hal::BufferUses::empty()..hal::BufferUses::COPY_DST, + }; + let encoder = device.pending_writes.activate(); + unsafe { + encoder.transition_buffers( + iter::once(transition_src).chain(iter::once(transition_dst)), + ); + if buffer.size > 0 { + encoder.copy_buffer_to_buffer(&stage_buffer, raw_buf, region.into_iter()); + } + } + device + .pending_writes + .consume_temp(queue::TempResource::Buffer(stage_buffer)); + device.pending_writes.dst_buffers.insert(buffer_id); + } + resource::BufferMapState::Idle => { + return Err(BufferAccessError::NotMapped); + } + resource::BufferMapState::Waiting(pending) => { + return Ok(Some((pending.op, Err(BufferAccessError::MapAborted)))); + } + resource::BufferMapState::Active { ptr, range, host } => { + if host == HostMap::Write { + #[cfg(feature = "trace")] + if let Some(ref trace) = device.trace { + let mut trace = trace.lock(); + let size = range.end - range.start; + let data = trace.make_binary("bin", unsafe { + std::slice::from_raw_parts(ptr.as_ptr(), size as usize) + }); + trace.add(trace::Action::WriteBuffer { + id: buffer_id, + data, + range: range.clone(), + queued: false, + }); + } + let _ = (ptr, range); + } + unsafe { + device + .raw + .unmap_buffer(buffer.raw.as_ref().unwrap()) + .map_err(DeviceError::from)? + }; + } + } + Ok(None) + } + + pub fn buffer_unmap(&self, buffer_id: id::BufferId) -> BufferAccessResult { + profiling::scope!("unmap", "Buffer"); + + let closure; + { + // Restrict the locks to this scope. + let hub = A::hub(self); + let mut token = Token::root(); + + let (mut device_guard, mut token) = hub.devices.write(&mut token); + let (mut buffer_guard, _) = hub.buffers.write(&mut token); + let buffer = buffer_guard + .get_mut(buffer_id) + .map_err(|_| BufferAccessError::Invalid)?; + let device = &mut device_guard[buffer.device_id.value]; + + closure = self.buffer_unmap_inner(buffer_id, buffer, device) + } + + // Note: outside the scope where locks are held when calling the callback + if let Some((operation, status)) = closure? { + operation.callback.call(status); + } + Ok(()) + } +} diff --git a/third_party/rust/wgpu-core/src/device/life.rs b/third_party/rust/wgpu-core/src/device/life.rs index 24995e719ae8..75491d10cb9d 100644 --- a/third_party/rust/wgpu-core/src/device/life.rs +++ b/third_party/rust/wgpu-core/src/device/life.rs @@ -5,8 +5,11 @@ use crate::{ queue::{EncoderInFlight, SubmittedWorkDoneClosure, TempResource}, DeviceError, }, - hub::{GlobalIdentityHandlerFactory, HalApi, Hub, Token}, - id, resource, + hal_api::HalApi, + hub::{Hub, Token}, + id, + identity::GlobalIdentityHandlerFactory, + resource, track::{BindGroupStates, RenderBundleScope, Tracker}, RefCount, Stored, SubmissionIndex, }; diff --git a/third_party/rust/wgpu-core/src/device/mod.rs b/third_party/rust/wgpu-core/src/device/mod.rs index 36d353b6728f..2f749a449552 100644 --- a/third_party/rust/wgpu-core/src/device/mod.rs +++ b/third_party/rust/wgpu-core/src/device/mod.rs @@ -1,35 +1,30 @@ use crate::{ - binding_model, command, conv, + binding_model, device::life::WaitIdleError, - hub::{Global, GlobalIdentityHandlerFactory, HalApi, Hub, Input, InvalidId, Storage, Token}, + hal_api::HalApi, + hub::Hub, id, - init_tracker::{ - BufferInitTracker, BufferInitTrackerAction, MemoryInitKind, TextureInitRange, - TextureInitTracker, TextureInitTrackerAction, - }, - instance::{self, Adapter, Surface}, - pipeline, present, - resource::{self, BufferAccessResult, BufferMapState, TextureViewNotRenderableReason}, + identity::{GlobalIdentityHandlerFactory, Input}, + resource::{Buffer, BufferAccessResult}, resource::{BufferAccessError, BufferMapOperation}, - track::{BindGroupStates, TextureSelector, Tracker}, - validation::{self, check_buffer_usage, check_texture_usage}, - FastHashMap, Label, LabelHelpers as _, LifeGuard, MultiRefCount, RefCount, Stored, - SubmissionIndex, DOWNLEVEL_ERROR_MESSAGE, + Label, DOWNLEVEL_ERROR_MESSAGE, }; use arrayvec::ArrayVec; -use hal::{CommandEncoder as _, Device as _}; -use parking_lot::{Mutex, MutexGuard}; +use hal::Device as _; use smallvec::SmallVec; use thiserror::Error; -use wgt::{BufferAddress, TextureFormat, TextureSampleType, TextureViewDimension}; +use wgt::{BufferAddress, TextureFormat}; -use std::{borrow::Cow, iter, mem, num::NonZeroU32, ops::Range, ptr}; +use std::{iter, num::NonZeroU32, ptr}; +pub mod global; mod life; pub mod queue; +pub mod resource; #[cfg(any(feature = "trace", feature = "replay"))] pub mod trace; +pub use resource::Device; pub const SHADER_STAGE_COUNT: usize = 3; // Should be large enough for the largest possible texture row. This @@ -197,7 +192,7 @@ impl UserClosures { fn map_buffer( raw: &A::Device, - buffer: &mut resource::Buffer, + buffer: &mut Buffer, offset: BufferAddress, size: BufferAddress, kind: HostMap, @@ -289,3099 +284,6 @@ impl CommandAllocator { } } -/// Structure describing a logical device. Some members are internally mutable, -/// stored behind mutexes. -/// -/// TODO: establish clear order of locking for these: -/// `mem_allocator`, `desc_allocator`, `life_tracker`, `trackers`, -/// `render_passes`, `pending_writes`, `trace`. -/// -/// Currently, the rules are: -/// 1. `life_tracker` is locked after `hub.devices`, enforced by the type system -/// 1. `self.trackers` is locked last (unenforced) -/// 1. `self.trace` is locked last (unenforced) -pub struct Device { - pub(crate) raw: A::Device, - pub(crate) adapter_id: Stored, - pub(crate) queue: A::Queue, - pub(crate) zero_buffer: A::Buffer, - //pub(crate) cmd_allocator: command::CommandAllocator, - //mem_allocator: Mutex>, - //desc_allocator: Mutex>, - //Note: The submission index here corresponds to the last submission that is done. - pub(crate) life_guard: LifeGuard, - - /// A clone of `life_guard.ref_count`. - /// - /// Holding a separate clone of the `RefCount` here lets us tell whether the - /// device is referenced by other resources, even if `life_guard.ref_count` - /// was set to `None` by a call to `device_drop`. - ref_count: RefCount, - - command_allocator: Mutex>, - pub(crate) active_submission_index: SubmissionIndex, - fence: A::Fence, - - /// All live resources allocated with this [`Device`]. - /// - /// Has to be locked temporarily only (locked last) - pub(crate) trackers: Mutex>, - // Life tracker should be locked right after the device and before anything else. - life_tracker: Mutex>, - /// Temporary storage for resource management functions. Cleared at the end - /// of every call (unless an error occurs). - temp_suspected: life::SuspectedResources, - pub(crate) alignments: hal::Alignments, - pub(crate) limits: wgt::Limits, - pub(crate) features: wgt::Features, - pub(crate) downlevel: wgt::DownlevelCapabilities, - // TODO: move this behind another mutex. This would allow several methods to - // switch to borrow Device immutably, such as `write_buffer`, `write_texture`, - // and `buffer_unmap`. - pending_writes: queue::PendingWrites, - #[cfg(feature = "trace")] - pub(crate) trace: Option>, -} - -#[derive(Clone, Debug, Error)] -#[non_exhaustive] -pub enum CreateDeviceError { - #[error("Not enough memory left")] - OutOfMemory, - #[error("Failed to create internal buffer for initializing textures")] - FailedToCreateZeroBuffer(#[from] DeviceError), -} - -impl Device { - pub(crate) fn require_features(&self, feature: wgt::Features) -> Result<(), MissingFeatures> { - if self.features.contains(feature) { - Ok(()) - } else { - Err(MissingFeatures(feature)) - } - } - - pub(crate) fn require_downlevel_flags( - &self, - flags: wgt::DownlevelFlags, - ) -> Result<(), MissingDownlevelFlags> { - if self.downlevel.flags.contains(flags) { - Ok(()) - } else { - Err(MissingDownlevelFlags(flags)) - } - } -} - -impl Device { - pub(crate) fn new( - open: hal::OpenDevice, - adapter_id: Stored, - alignments: hal::Alignments, - downlevel: wgt::DownlevelCapabilities, - desc: &DeviceDescriptor, - trace_path: Option<&std::path::Path>, - ) -> Result { - #[cfg(not(feature = "trace"))] - if let Some(_) = trace_path { - log::error!("Feature 'trace' is not enabled"); - } - let fence = - unsafe { open.device.create_fence() }.map_err(|_| CreateDeviceError::OutOfMemory)?; - - let mut com_alloc = CommandAllocator { - free_encoders: Vec::new(), - }; - let pending_encoder = com_alloc - .acquire_encoder(&open.device, &open.queue) - .map_err(|_| CreateDeviceError::OutOfMemory)?; - let mut pending_writes = queue::PendingWrites::::new(pending_encoder); - - // Create zeroed buffer used for texture clears. - let zero_buffer = unsafe { - open.device - .create_buffer(&hal::BufferDescriptor { - label: Some("(wgpu internal) zero init buffer"), - size: ZERO_BUFFER_SIZE, - usage: hal::BufferUses::COPY_SRC | hal::BufferUses::COPY_DST, - memory_flags: hal::MemoryFlags::empty(), - }) - .map_err(DeviceError::from)? - }; - pending_writes.activate(); - unsafe { - pending_writes - .command_encoder - .transition_buffers(iter::once(hal::BufferBarrier { - buffer: &zero_buffer, - usage: hal::BufferUses::empty()..hal::BufferUses::COPY_DST, - })); - pending_writes - .command_encoder - .clear_buffer(&zero_buffer, 0..ZERO_BUFFER_SIZE); - pending_writes - .command_encoder - .transition_buffers(iter::once(hal::BufferBarrier { - buffer: &zero_buffer, - usage: hal::BufferUses::COPY_DST..hal::BufferUses::COPY_SRC, - })); - } - - let life_guard = LifeGuard::new(""); - let ref_count = life_guard.add_ref(); - Ok(Self { - raw: open.device, - adapter_id, - queue: open.queue, - zero_buffer, - life_guard, - ref_count, - command_allocator: Mutex::new(com_alloc), - active_submission_index: 0, - fence, - trackers: Mutex::new(Tracker::new()), - life_tracker: Mutex::new(life::LifetimeTracker::new()), - temp_suspected: life::SuspectedResources::default(), - #[cfg(feature = "trace")] - trace: trace_path.and_then(|path| match trace::Trace::new(path) { - Ok(mut trace) => { - trace.add(trace::Action::Init { - desc: desc.clone(), - backend: A::VARIANT, - }); - Some(Mutex::new(trace)) - } - Err(e) => { - log::error!("Unable to start a trace in '{:?}': {:?}", path, e); - None - } - }), - alignments, - limits: desc.limits.clone(), - features: desc.features, - downlevel, - pending_writes, - }) - } - - fn lock_life<'this, 'token: 'this>( - &'this self, - //TODO: fix this - the token has to be borrowed for the lock - _token: &mut Token<'token, Self>, - ) -> MutexGuard<'this, life::LifetimeTracker> { - self.life_tracker.lock() - } - - /// Check this device for completed commands. - /// - /// The `maintain` argument tells how the maintence function should behave, either - /// blocking or just polling the current state of the gpu. - /// - /// Return a pair `(closures, queue_empty)`, where: - /// - /// - `closures` is a list of actions to take: mapping buffers, notifying the user - /// - /// - `queue_empty` is a boolean indicating whether there are more queue - /// submissions still in flight. (We have to take the locks needed to - /// produce this information for other reasons, so we might as well just - /// return it to our callers.) - fn maintain<'this, 'token: 'this, G: GlobalIdentityHandlerFactory>( - &'this self, - hub: &Hub, - maintain: wgt::Maintain, - token: &mut Token<'token, Self>, - ) -> Result<(UserClosures, bool), WaitIdleError> { - profiling::scope!("Device::maintain"); - let mut life_tracker = self.lock_life(token); - - // Normally, `temp_suspected` exists only to save heap - // allocations: it's cleared at the start of the function - // call, and cleared by the end. But `Global::queue_submit` is - // fallible; if it exits early, it may leave some resources in - // `temp_suspected`. - life_tracker - .suspected_resources - .extend(&self.temp_suspected); - - life_tracker.triage_suspected( - hub, - &self.trackers, - #[cfg(feature = "trace")] - self.trace.as_ref(), - token, - ); - life_tracker.triage_mapped(hub, token); - - let last_done_index = if maintain.is_wait() { - let index_to_wait_for = match maintain { - wgt::Maintain::WaitForSubmissionIndex(submission_index) => { - // We don't need to check to see if the queue id matches - // as we already checked this from inside the poll call. - submission_index.index - } - _ => self.active_submission_index, - }; - unsafe { - self.raw - .wait(&self.fence, index_to_wait_for, CLEANUP_WAIT_MS) - .map_err(DeviceError::from)? - }; - index_to_wait_for - } else { - unsafe { - self.raw - .get_fence_value(&self.fence) - .map_err(DeviceError::from)? - } - }; - - let submission_closures = - life_tracker.triage_submissions(last_done_index, &self.command_allocator); - let mapping_closures = life_tracker.handle_mapping(hub, &self.raw, &self.trackers, token); - life_tracker.cleanup(&self.raw); - - let closures = UserClosures { - mappings: mapping_closures, - submissions: submission_closures, - }; - Ok((closures, life_tracker.queue_empty())) - } - - fn untrack<'this, 'token: 'this, G: GlobalIdentityHandlerFactory>( - &'this mut self, - hub: &Hub, - trackers: &Tracker, - token: &mut Token<'token, Self>, - ) { - self.temp_suspected.clear(); - // As the tracker is cleared/dropped, we need to consider all the resources - // that it references for destruction in the next GC pass. - { - let (bind_group_guard, mut token) = hub.bind_groups.read(token); - let (compute_pipe_guard, mut token) = hub.compute_pipelines.read(&mut token); - let (render_pipe_guard, mut token) = hub.render_pipelines.read(&mut token); - let (query_set_guard, mut token) = hub.query_sets.read(&mut token); - let (buffer_guard, mut token) = hub.buffers.read(&mut token); - let (texture_guard, mut token) = hub.textures.read(&mut token); - let (texture_view_guard, mut token) = hub.texture_views.read(&mut token); - let (sampler_guard, _) = hub.samplers.read(&mut token); - - for id in trackers.buffers.used() { - if buffer_guard[id].life_guard.ref_count.is_none() { - self.temp_suspected.buffers.push(id); - } - } - for id in trackers.textures.used() { - if texture_guard[id].life_guard.ref_count.is_none() { - self.temp_suspected.textures.push(id); - } - } - for id in trackers.views.used() { - if texture_view_guard[id].life_guard.ref_count.is_none() { - self.temp_suspected.texture_views.push(id); - } - } - for id in trackers.bind_groups.used() { - if bind_group_guard[id].life_guard.ref_count.is_none() { - self.temp_suspected.bind_groups.push(id); - } - } - for id in trackers.samplers.used() { - if sampler_guard[id].life_guard.ref_count.is_none() { - self.temp_suspected.samplers.push(id); - } - } - for id in trackers.compute_pipelines.used() { - if compute_pipe_guard[id].life_guard.ref_count.is_none() { - self.temp_suspected.compute_pipelines.push(id); - } - } - for id in trackers.render_pipelines.used() { - if render_pipe_guard[id].life_guard.ref_count.is_none() { - self.temp_suspected.render_pipelines.push(id); - } - } - for id in trackers.query_sets.used() { - if query_set_guard[id].life_guard.ref_count.is_none() { - self.temp_suspected.query_sets.push(id); - } - } - } - - self.lock_life(token) - .suspected_resources - .extend(&self.temp_suspected); - - self.temp_suspected.clear(); - } - - fn create_buffer( - &self, - self_id: id::DeviceId, - desc: &resource::BufferDescriptor, - transient: bool, - ) -> Result, resource::CreateBufferError> { - debug_assert_eq!(self_id.backend(), A::VARIANT); - - if desc.size > self.limits.max_buffer_size { - return Err(resource::CreateBufferError::MaxBufferSize { - requested: desc.size, - maximum: self.limits.max_buffer_size, - }); - } - - if desc.usage.contains(wgt::BufferUsages::INDEX) - && desc.usage.contains( - wgt::BufferUsages::VERTEX - | wgt::BufferUsages::UNIFORM - | wgt::BufferUsages::INDIRECT - | wgt::BufferUsages::STORAGE, - ) - { - self.require_downlevel_flags(wgt::DownlevelFlags::UNRESTRICTED_INDEX_BUFFER)?; - } - - let mut usage = conv::map_buffer_usage(desc.usage); - - if desc.usage.is_empty() || desc.usage.contains_invalid_bits() { - return Err(resource::CreateBufferError::InvalidUsage(desc.usage)); - } - - if !self - .features - .contains(wgt::Features::MAPPABLE_PRIMARY_BUFFERS) - { - use wgt::BufferUsages as Bu; - let write_mismatch = desc.usage.contains(Bu::MAP_WRITE) - && !(Bu::MAP_WRITE | Bu::COPY_SRC).contains(desc.usage); - let read_mismatch = desc.usage.contains(Bu::MAP_READ) - && !(Bu::MAP_READ | Bu::COPY_DST).contains(desc.usage); - if write_mismatch || read_mismatch { - return Err(resource::CreateBufferError::UsageMismatch(desc.usage)); - } - } - - if desc.mapped_at_creation { - if desc.size % wgt::COPY_BUFFER_ALIGNMENT != 0 { - return Err(resource::CreateBufferError::UnalignedSize); - } - if !desc.usage.contains(wgt::BufferUsages::MAP_WRITE) { - // we are going to be copying into it, internally - usage |= hal::BufferUses::COPY_DST; - } - } else { - // We are required to zero out (initialize) all memory. This is done - // on demand using clear_buffer which requires write transfer usage! - usage |= hal::BufferUses::COPY_DST; - } - - let actual_size = if desc.size == 0 { - wgt::COPY_BUFFER_ALIGNMENT - } else if desc.usage.contains(wgt::BufferUsages::VERTEX) { - // Bumping the size by 1 so that we can bind an empty range at the - // end of the buffer. - desc.size + 1 - } else { - desc.size - }; - let clear_remainder = actual_size % wgt::COPY_BUFFER_ALIGNMENT; - let aligned_size = if clear_remainder != 0 { - actual_size + wgt::COPY_BUFFER_ALIGNMENT - clear_remainder - } else { - actual_size - }; - - let mut memory_flags = hal::MemoryFlags::empty(); - memory_flags.set(hal::MemoryFlags::TRANSIENT, transient); - - let hal_desc = hal::BufferDescriptor { - label: desc.label.borrow_option(), - size: aligned_size, - usage, - memory_flags, - }; - let buffer = unsafe { self.raw.create_buffer(&hal_desc) }.map_err(DeviceError::from)?; - - Ok(resource::Buffer { - raw: Some(buffer), - device_id: Stored { - value: id::Valid(self_id), - ref_count: self.life_guard.add_ref(), - }, - usage: desc.usage, - size: desc.size, - initialization_status: BufferInitTracker::new(desc.size), - sync_mapped_writes: None, - map_state: resource::BufferMapState::Idle, - life_guard: LifeGuard::new(desc.label.borrow_or_default()), - }) - } - - fn create_texture_from_hal( - &self, - hal_texture: A::Texture, - hal_usage: hal::TextureUses, - self_id: id::DeviceId, - desc: &resource::TextureDescriptor, - format_features: wgt::TextureFormatFeatures, - clear_mode: resource::TextureClearMode, - ) -> resource::Texture { - debug_assert_eq!(self_id.backend(), A::VARIANT); - - resource::Texture { - inner: resource::TextureInner::Native { - raw: Some(hal_texture), - }, - device_id: Stored { - value: id::Valid(self_id), - ref_count: self.life_guard.add_ref(), - }, - desc: desc.map_label(|_| ()), - hal_usage, - format_features, - initialization_status: TextureInitTracker::new( - desc.mip_level_count, - desc.array_layer_count(), - ), - full_range: TextureSelector { - mips: 0..desc.mip_level_count, - layers: 0..desc.array_layer_count(), - }, - life_guard: LifeGuard::new(desc.label.borrow_or_default()), - clear_mode, - } - } - - fn create_texture( - &self, - self_id: id::DeviceId, - adapter: &Adapter, - desc: &resource::TextureDescriptor, - ) -> Result, resource::CreateTextureError> { - use resource::{CreateTextureError, TextureDimensionError}; - - if desc.usage.is_empty() || desc.usage.contains_invalid_bits() { - return Err(CreateTextureError::InvalidUsage(desc.usage)); - } - - conv::check_texture_dimension_size( - desc.dimension, - desc.size, - desc.sample_count, - &self.limits, - )?; - - if desc.dimension != wgt::TextureDimension::D2 { - // Depth textures can only be 2D - if desc.format.is_depth_stencil_format() { - return Err(CreateTextureError::InvalidDepthDimension( - desc.dimension, - desc.format, - )); - } - // Renderable textures can only be 2D - if desc.usage.contains(wgt::TextureUsages::RENDER_ATTACHMENT) { - return Err(CreateTextureError::InvalidDimensionUsages( - wgt::TextureUsages::RENDER_ATTACHMENT, - desc.dimension, - )); - } - - // Compressed textures can only be 2D - if desc.format.is_compressed() { - return Err(CreateTextureError::InvalidCompressedDimension( - desc.dimension, - desc.format, - )); - } - } - - if desc.format.is_compressed() { - let (block_width, block_height) = desc.format.block_dimensions(); - - if desc.size.width % block_width != 0 { - return Err(CreateTextureError::InvalidDimension( - TextureDimensionError::NotMultipleOfBlockWidth { - width: desc.size.width, - block_width, - format: desc.format, - }, - )); - } - - if desc.size.height % block_height != 0 { - return Err(CreateTextureError::InvalidDimension( - TextureDimensionError::NotMultipleOfBlockHeight { - height: desc.size.height, - block_height, - format: desc.format, - }, - )); - } - } - - let format_features = self - .describe_format_features(adapter, desc.format) - .map_err(|error| CreateTextureError::MissingFeatures(desc.format, error))?; - - if desc.sample_count > 1 { - if desc.mip_level_count != 1 { - return Err(CreateTextureError::InvalidMipLevelCount { - requested: desc.mip_level_count, - maximum: 1, - }); - } - - if desc.size.depth_or_array_layers != 1 { - return Err(CreateTextureError::InvalidDimension( - TextureDimensionError::MultisampledDepthOrArrayLayer( - desc.size.depth_or_array_layers, - ), - )); - } - - if desc.usage.contains(wgt::TextureUsages::STORAGE_BINDING) { - return Err(CreateTextureError::InvalidMultisampledStorageBinding); - } - - if !desc.usage.contains(wgt::TextureUsages::RENDER_ATTACHMENT) { - return Err(CreateTextureError::MultisampledNotRenderAttachment); - } - - if !format_features.flags.intersects( - wgt::TextureFormatFeatureFlags::MULTISAMPLE_X4 - | wgt::TextureFormatFeatureFlags::MULTISAMPLE_X2 - | wgt::TextureFormatFeatureFlags::MULTISAMPLE_X8 - | wgt::TextureFormatFeatureFlags::MULTISAMPLE_X16, - ) { - return Err(CreateTextureError::InvalidMultisampledFormat(desc.format)); - } - - if !format_features - .flags - .sample_count_supported(desc.sample_count) - { - return Err(CreateTextureError::InvalidSampleCount( - desc.sample_count, - desc.format, - )); - }; - } - - let mips = desc.mip_level_count; - let max_levels_allowed = desc.size.max_mips(desc.dimension).min(hal::MAX_MIP_LEVELS); - if mips == 0 || mips > max_levels_allowed { - return Err(CreateTextureError::InvalidMipLevelCount { - requested: mips, - maximum: max_levels_allowed, - }); - } - - let missing_allowed_usages = desc.usage - format_features.allowed_usages; - if !missing_allowed_usages.is_empty() { - // detect downlevel incompatibilities - let wgpu_allowed_usages = desc - .format - .guaranteed_format_features(self.features) - .allowed_usages; - let wgpu_missing_usages = desc.usage - wgpu_allowed_usages; - return Err(CreateTextureError::InvalidFormatUsages( - missing_allowed_usages, - desc.format, - wgpu_missing_usages.is_empty(), - )); - } - - let mut hal_view_formats = vec![]; - for format in desc.view_formats.iter() { - if desc.format == *format { - continue; - } - if desc.format.remove_srgb_suffix() != format.remove_srgb_suffix() { - return Err(CreateTextureError::InvalidViewFormat(*format, desc.format)); - } - hal_view_formats.push(*format); - } - if !hal_view_formats.is_empty() { - self.require_downlevel_flags(wgt::DownlevelFlags::VIEW_FORMATS)?; - } - - // Enforce having COPY_DST/DEPTH_STENCIL_WRITE/COLOR_TARGET otherwise we - // wouldn't be able to initialize the texture. - let hal_usage = conv::map_texture_usage(desc.usage, desc.format.into()) - | if desc.format.is_depth_stencil_format() { - hal::TextureUses::DEPTH_STENCIL_WRITE - } else if desc.usage.contains(wgt::TextureUsages::COPY_DST) { - hal::TextureUses::COPY_DST // (set already) - } else { - // Use COPY_DST only if we can't use COLOR_TARGET - if format_features - .allowed_usages - .contains(wgt::TextureUsages::RENDER_ATTACHMENT) - && desc.dimension == wgt::TextureDimension::D2 - // Render targets dimension must be 2d - { - hal::TextureUses::COLOR_TARGET - } else { - hal::TextureUses::COPY_DST - } - }; - - let hal_desc = hal::TextureDescriptor { - label: desc.label.borrow_option(), - size: desc.size, - mip_level_count: desc.mip_level_count, - sample_count: desc.sample_count, - dimension: desc.dimension, - format: desc.format, - usage: hal_usage, - memory_flags: hal::MemoryFlags::empty(), - view_formats: hal_view_formats, - }; - - let raw_texture = unsafe { - self.raw - .create_texture(&hal_desc) - .map_err(DeviceError::from)? - }; - - let clear_mode = if hal_usage - .intersects(hal::TextureUses::DEPTH_STENCIL_WRITE | hal::TextureUses::COLOR_TARGET) - { - let (is_color, usage) = if desc.format.is_depth_stencil_format() { - (false, hal::TextureUses::DEPTH_STENCIL_WRITE) - } else { - (true, hal::TextureUses::COLOR_TARGET) - }; - let dimension = match desc.dimension { - wgt::TextureDimension::D1 => wgt::TextureViewDimension::D1, - wgt::TextureDimension::D2 => wgt::TextureViewDimension::D2, - wgt::TextureDimension::D3 => unreachable!(), - }; - - let mut clear_views = SmallVec::new(); - for mip_level in 0..desc.mip_level_count { - for array_layer in 0..desc.size.depth_or_array_layers { - let desc = hal::TextureViewDescriptor { - label: Some("(wgpu internal) clear texture view"), - format: desc.format, - dimension, - usage, - range: wgt::ImageSubresourceRange { - aspect: wgt::TextureAspect::All, - base_mip_level: mip_level, - mip_level_count: Some(1), - base_array_layer: array_layer, - array_layer_count: Some(1), - }, - }; - clear_views.push( - unsafe { self.raw.create_texture_view(&raw_texture, &desc) } - .map_err(DeviceError::from)?, - ); - } - } - resource::TextureClearMode::RenderPass { - clear_views, - is_color, - } - } else { - resource::TextureClearMode::BufferCopy - }; - - let mut texture = self.create_texture_from_hal( - raw_texture, - hal_usage, - self_id, - desc, - format_features, - clear_mode, - ); - texture.hal_usage = hal_usage; - Ok(texture) - } - - fn create_texture_view( - &self, - texture: &resource::Texture, - texture_id: id::TextureId, - desc: &resource::TextureViewDescriptor, - ) -> Result, resource::CreateTextureViewError> { - let texture_raw = texture - .inner - .as_raw() - .ok_or(resource::CreateTextureViewError::InvalidTexture)?; - - // resolve TextureViewDescriptor defaults - // https://gpuweb.github.io/gpuweb/#abstract-opdef-resolving-gputextureviewdescriptor-defaults - - let resolved_format = desc.format.unwrap_or_else(|| { - texture - .desc - .format - .aspect_specific_format(desc.range.aspect) - .unwrap_or(texture.desc.format) - }); - - let resolved_dimension = desc - .dimension - .unwrap_or_else(|| match texture.desc.dimension { - wgt::TextureDimension::D1 => wgt::TextureViewDimension::D1, - wgt::TextureDimension::D2 => { - if texture.desc.array_layer_count() == 1 { - wgt::TextureViewDimension::D2 - } else { - wgt::TextureViewDimension::D2Array - } - } - wgt::TextureDimension::D3 => wgt::TextureViewDimension::D3, - }); - - let resolved_mip_level_count = desc.range.mip_level_count.unwrap_or_else(|| { - texture - .desc - .mip_level_count - .saturating_sub(desc.range.base_mip_level) - }); - - let resolved_array_layer_count = - desc.range - .array_layer_count - .unwrap_or_else(|| match resolved_dimension { - wgt::TextureViewDimension::D1 - | wgt::TextureViewDimension::D2 - | wgt::TextureViewDimension::D3 => 1, - wgt::TextureViewDimension::Cube => 6, - wgt::TextureViewDimension::D2Array | wgt::TextureViewDimension::CubeArray => { - texture - .desc - .array_layer_count() - .saturating_sub(desc.range.base_array_layer) - } - }); - - // validate TextureViewDescriptor - - let aspects = hal::FormatAspects::new(texture.desc.format, desc.range.aspect); - if aspects.is_empty() { - return Err(resource::CreateTextureViewError::InvalidAspect { - texture_format: texture.desc.format, - requested_aspect: desc.range.aspect, - }); - } - - let format_is_good = if desc.range.aspect == wgt::TextureAspect::All { - resolved_format == texture.desc.format - || texture.desc.view_formats.contains(&resolved_format) - } else { - Some(resolved_format) - == texture - .desc - .format - .aspect_specific_format(desc.range.aspect) - }; - if !format_is_good { - return Err(resource::CreateTextureViewError::FormatReinterpretation { - texture: texture.desc.format, - view: resolved_format, - }); - } - - // check if multisampled texture is seen as anything but 2D - if texture.desc.sample_count > 1 && resolved_dimension != wgt::TextureViewDimension::D2 { - return Err( - resource::CreateTextureViewError::InvalidMultisampledTextureViewDimension( - resolved_dimension, - ), - ); - } - - // check if the dimension is compatible with the texture - if texture.desc.dimension != resolved_dimension.compatible_texture_dimension() { - return Err( - resource::CreateTextureViewError::InvalidTextureViewDimension { - view: resolved_dimension, - texture: texture.desc.dimension, - }, - ); - } - - match resolved_dimension { - TextureViewDimension::D1 | TextureViewDimension::D2 | TextureViewDimension::D3 => { - if resolved_array_layer_count != 1 { - return Err(resource::CreateTextureViewError::InvalidArrayLayerCount { - requested: resolved_array_layer_count, - dim: resolved_dimension, - }); - } - } - TextureViewDimension::Cube => { - if resolved_array_layer_count != 6 { - return Err( - resource::CreateTextureViewError::InvalidCubemapTextureDepth { - depth: resolved_array_layer_count, - }, - ); - } - } - TextureViewDimension::CubeArray => { - if resolved_array_layer_count % 6 != 0 { - return Err( - resource::CreateTextureViewError::InvalidCubemapArrayTextureDepth { - depth: resolved_array_layer_count, - }, - ); - } - } - _ => {} - } - - match resolved_dimension { - TextureViewDimension::Cube | TextureViewDimension::CubeArray => { - if texture.desc.size.width != texture.desc.size.height { - return Err(resource::CreateTextureViewError::InvalidCubeTextureViewSize); - } - } - _ => {} - } - - if resolved_mip_level_count == 0 { - return Err(resource::CreateTextureViewError::ZeroMipLevelCount); - } - - let mip_level_end = desc - .range - .base_mip_level - .saturating_add(resolved_mip_level_count); - - let level_end = texture.desc.mip_level_count; - if mip_level_end > level_end { - return Err(resource::CreateTextureViewError::TooManyMipLevels { - requested: mip_level_end, - total: level_end, - }); - } - - if resolved_array_layer_count == 0 { - return Err(resource::CreateTextureViewError::ZeroArrayLayerCount); - } - - let array_layer_end = desc - .range - .base_array_layer - .saturating_add(resolved_array_layer_count); - - let layer_end = texture.desc.array_layer_count(); - if array_layer_end > layer_end { - return Err(resource::CreateTextureViewError::TooManyArrayLayers { - requested: array_layer_end, - total: layer_end, - }); - }; - - // https://gpuweb.github.io/gpuweb/#abstract-opdef-renderable-texture-view - let render_extent = 'b: loop { - if !texture - .desc - .usage - .contains(wgt::TextureUsages::RENDER_ATTACHMENT) - { - break 'b Err(TextureViewNotRenderableReason::Usage(texture.desc.usage)); - } - - if resolved_dimension != TextureViewDimension::D2 { - break 'b Err(TextureViewNotRenderableReason::Dimension( - resolved_dimension, - )); - } - - if resolved_mip_level_count != 1 { - break 'b Err(TextureViewNotRenderableReason::MipLevelCount( - resolved_mip_level_count, - )); - } - - if resolved_array_layer_count != 1 { - break 'b Err(TextureViewNotRenderableReason::ArrayLayerCount( - resolved_array_layer_count, - )); - } - - if aspects != hal::FormatAspects::from(texture.desc.format) { - break 'b Err(TextureViewNotRenderableReason::Aspects(aspects)); - } - - break 'b Ok(texture - .desc - .compute_render_extent(desc.range.base_mip_level)); - }; - - // filter the usages based on the other criteria - let usage = { - let mask_copy = !(hal::TextureUses::COPY_SRC | hal::TextureUses::COPY_DST); - let mask_dimension = match resolved_dimension { - wgt::TextureViewDimension::Cube | wgt::TextureViewDimension::CubeArray => { - hal::TextureUses::RESOURCE - } - wgt::TextureViewDimension::D3 => { - hal::TextureUses::RESOURCE - | hal::TextureUses::STORAGE_READ - | hal::TextureUses::STORAGE_READ_WRITE - } - _ => hal::TextureUses::all(), - }; - let mask_mip_level = if resolved_mip_level_count == 1 { - hal::TextureUses::all() - } else { - hal::TextureUses::RESOURCE - }; - texture.hal_usage & mask_copy & mask_dimension & mask_mip_level - }; - - log::debug!( - "Create view for texture {:?} filters usages to {:?}", - texture_id, - usage - ); - - // use the combined depth-stencil format for the view - let format = if resolved_format.is_depth_stencil_component(texture.desc.format) { - texture.desc.format - } else { - resolved_format - }; - - let resolved_range = wgt::ImageSubresourceRange { - aspect: desc.range.aspect, - base_mip_level: desc.range.base_mip_level, - mip_level_count: Some(resolved_mip_level_count), - base_array_layer: desc.range.base_array_layer, - array_layer_count: Some(resolved_array_layer_count), - }; - - let hal_desc = hal::TextureViewDescriptor { - label: desc.label.borrow_option(), - format, - dimension: resolved_dimension, - usage, - range: resolved_range, - }; - - let raw = unsafe { - self.raw - .create_texture_view(texture_raw, &hal_desc) - .map_err(|_| resource::CreateTextureViewError::OutOfMemory)? - }; - - let selector = TextureSelector { - mips: desc.range.base_mip_level..mip_level_end, - layers: desc.range.base_array_layer..array_layer_end, - }; - - Ok(resource::TextureView { - raw, - parent_id: Stored { - value: id::Valid(texture_id), - ref_count: texture.life_guard.add_ref(), - }, - device_id: texture.device_id.clone(), - desc: resource::HalTextureViewDescriptor { - format: resolved_format, - dimension: resolved_dimension, - range: resolved_range, - }, - format_features: texture.format_features, - render_extent, - samples: texture.desc.sample_count, - selector, - life_guard: LifeGuard::new(desc.label.borrow_or_default()), - }) - } - - fn create_sampler( - &self, - self_id: id::DeviceId, - desc: &resource::SamplerDescriptor, - ) -> Result, resource::CreateSamplerError> { - if desc - .address_modes - .iter() - .any(|am| am == &wgt::AddressMode::ClampToBorder) - { - self.require_features(wgt::Features::ADDRESS_MODE_CLAMP_TO_BORDER)?; - } - - if desc.border_color == Some(wgt::SamplerBorderColor::Zero) { - self.require_features(wgt::Features::ADDRESS_MODE_CLAMP_TO_ZERO)?; - } - - if desc.lod_min_clamp < 0.0 { - return Err(resource::CreateSamplerError::InvalidLodMinClamp( - desc.lod_min_clamp, - )); - } - if desc.lod_max_clamp < desc.lod_min_clamp { - return Err(resource::CreateSamplerError::InvalidLodMaxClamp { - lod_min_clamp: desc.lod_min_clamp, - lod_max_clamp: desc.lod_max_clamp, - }); - } - - if desc.anisotropy_clamp < 1 { - return Err(resource::CreateSamplerError::InvalidAnisotropy( - desc.anisotropy_clamp, - )); - } - - if desc.anisotropy_clamp != 1 { - if !matches!(desc.min_filter, wgt::FilterMode::Linear) { - return Err( - resource::CreateSamplerError::InvalidFilterModeWithAnisotropy { - filter_type: resource::SamplerFilterErrorType::MinFilter, - filter_mode: desc.min_filter, - anisotropic_clamp: desc.anisotropy_clamp, - }, - ); - } - if !matches!(desc.mag_filter, wgt::FilterMode::Linear) { - return Err( - resource::CreateSamplerError::InvalidFilterModeWithAnisotropy { - filter_type: resource::SamplerFilterErrorType::MagFilter, - filter_mode: desc.mag_filter, - anisotropic_clamp: desc.anisotropy_clamp, - }, - ); - } - if !matches!(desc.mipmap_filter, wgt::FilterMode::Linear) { - return Err( - resource::CreateSamplerError::InvalidFilterModeWithAnisotropy { - filter_type: resource::SamplerFilterErrorType::MipmapFilter, - filter_mode: desc.mipmap_filter, - anisotropic_clamp: desc.anisotropy_clamp, - }, - ); - } - } - - let anisotropy_clamp = if self - .downlevel - .flags - .contains(wgt::DownlevelFlags::ANISOTROPIC_FILTERING) - { - // Clamp anisotropy clamp to [1, 16] per the wgpu-hal interface - desc.anisotropy_clamp.min(16) - } else { - // If it isn't supported, set this unconditionally to 1 - 1 - }; - - //TODO: check for wgt::DownlevelFlags::COMPARISON_SAMPLERS - - let hal_desc = hal::SamplerDescriptor { - label: desc.label.borrow_option(), - address_modes: desc.address_modes, - mag_filter: desc.mag_filter, - min_filter: desc.min_filter, - mipmap_filter: desc.mipmap_filter, - lod_clamp: desc.lod_min_clamp..desc.lod_max_clamp, - compare: desc.compare, - anisotropy_clamp, - border_color: desc.border_color, - }; - - let raw = unsafe { - self.raw - .create_sampler(&hal_desc) - .map_err(DeviceError::from)? - }; - Ok(resource::Sampler { - raw, - device_id: Stored { - value: id::Valid(self_id), - ref_count: self.life_guard.add_ref(), - }, - life_guard: LifeGuard::new(desc.label.borrow_or_default()), - comparison: desc.compare.is_some(), - filtering: desc.min_filter == wgt::FilterMode::Linear - || desc.mag_filter == wgt::FilterMode::Linear, - }) - } - - fn create_shader_module<'a>( - &self, - self_id: id::DeviceId, - desc: &pipeline::ShaderModuleDescriptor<'a>, - source: pipeline::ShaderModuleSource<'a>, - ) -> Result, pipeline::CreateShaderModuleError> { - let (module, source) = match source { - #[cfg(feature = "wgsl")] - pipeline::ShaderModuleSource::Wgsl(code) => { - profiling::scope!("naga::wgsl::parse_str"); - let module = naga::front::wgsl::parse_str(&code).map_err(|inner| { - pipeline::CreateShaderModuleError::Parsing(pipeline::ShaderError { - source: code.to_string(), - label: desc.label.as_ref().map(|l| l.to_string()), - inner: Box::new(inner), - }) - })?; - (Cow::Owned(module), code.into_owned()) - } - pipeline::ShaderModuleSource::Naga(module) => (module, String::new()), - pipeline::ShaderModuleSource::Dummy(_) => panic!("found `ShaderModuleSource::Dummy`"), - }; - for (_, var) in module.global_variables.iter() { - match var.binding { - Some(ref br) if br.group >= self.limits.max_bind_groups => { - return Err(pipeline::CreateShaderModuleError::InvalidGroupIndex { - bind: br.clone(), - group: br.group, - limit: self.limits.max_bind_groups, - }); - } - _ => continue, - }; - } - - use naga::valid::Capabilities as Caps; - profiling::scope!("naga::validate"); - - let mut caps = Caps::empty(); - caps.set( - Caps::PUSH_CONSTANT, - self.features.contains(wgt::Features::PUSH_CONSTANTS), - ); - caps.set( - Caps::FLOAT64, - self.features.contains(wgt::Features::SHADER_F64), - ); - caps.set( - Caps::PRIMITIVE_INDEX, - self.features - .contains(wgt::Features::SHADER_PRIMITIVE_INDEX), - ); - caps.set( - Caps::SAMPLED_TEXTURE_AND_STORAGE_BUFFER_ARRAY_NON_UNIFORM_INDEXING, - self.features.contains( - wgt::Features::SAMPLED_TEXTURE_AND_STORAGE_BUFFER_ARRAY_NON_UNIFORM_INDEXING, - ), - ); - caps.set( - Caps::UNIFORM_BUFFER_AND_STORAGE_TEXTURE_ARRAY_NON_UNIFORM_INDEXING, - self.features.contains( - wgt::Features::UNIFORM_BUFFER_AND_STORAGE_TEXTURE_ARRAY_NON_UNIFORM_INDEXING, - ), - ); - // TODO: This needs a proper wgpu feature - caps.set( - Caps::SAMPLER_NON_UNIFORM_INDEXING, - self.features.contains( - wgt::Features::SAMPLED_TEXTURE_AND_STORAGE_BUFFER_ARRAY_NON_UNIFORM_INDEXING, - ), - ); - caps.set( - Caps::STORAGE_TEXTURE_16BIT_NORM_FORMATS, - self.features - .contains(wgt::Features::TEXTURE_FORMAT_16BIT_NORM), - ); - caps.set( - Caps::MULTIVIEW, - self.features.contains(wgt::Features::MULTIVIEW), - ); - caps.set( - Caps::EARLY_DEPTH_TEST, - self.features - .contains(wgt::Features::SHADER_EARLY_DEPTH_TEST), - ); - caps.set( - Caps::MULTISAMPLED_SHADING, - self.downlevel - .flags - .contains(wgt::DownlevelFlags::MULTISAMPLED_SHADING), - ); - - let info = naga::valid::Validator::new(naga::valid::ValidationFlags::all(), caps) - .validate(&module) - .map_err(|inner| { - pipeline::CreateShaderModuleError::Validation(pipeline::ShaderError { - source, - label: desc.label.as_ref().map(|l| l.to_string()), - inner: Box::new(inner), - }) - })?; - let interface = - validation::Interface::new(&module, &info, self.features, self.limits.clone()); - let hal_shader = hal::ShaderInput::Naga(hal::NagaShader { module, info }); - - let hal_desc = hal::ShaderModuleDescriptor { - label: desc.label.borrow_option(), - runtime_checks: desc.shader_bound_checks.runtime_checks(), - }; - let raw = match unsafe { self.raw.create_shader_module(&hal_desc, hal_shader) } { - Ok(raw) => raw, - Err(error) => { - return Err(match error { - hal::ShaderError::Device(error) => { - pipeline::CreateShaderModuleError::Device(error.into()) - } - hal::ShaderError::Compilation(ref msg) => { - log::error!("Shader error: {}", msg); - pipeline::CreateShaderModuleError::Generation - } - }) - } - }; - - Ok(pipeline::ShaderModule { - raw, - device_id: Stored { - value: id::Valid(self_id), - ref_count: self.life_guard.add_ref(), - }, - interface: Some(interface), - #[cfg(debug_assertions)] - label: desc.label.borrow_or_default().to_string(), - }) - } - - #[allow(unused_unsafe)] - unsafe fn create_shader_module_spirv<'a>( - &self, - self_id: id::DeviceId, - desc: &pipeline::ShaderModuleDescriptor<'a>, - source: &'a [u32], - ) -> Result, pipeline::CreateShaderModuleError> { - self.require_features(wgt::Features::SPIRV_SHADER_PASSTHROUGH)?; - let hal_desc = hal::ShaderModuleDescriptor { - label: desc.label.borrow_option(), - runtime_checks: desc.shader_bound_checks.runtime_checks(), - }; - let hal_shader = hal::ShaderInput::SpirV(source); - let raw = match unsafe { self.raw.create_shader_module(&hal_desc, hal_shader) } { - Ok(raw) => raw, - Err(error) => { - return Err(match error { - hal::ShaderError::Device(error) => { - pipeline::CreateShaderModuleError::Device(error.into()) - } - hal::ShaderError::Compilation(ref msg) => { - log::error!("Shader error: {}", msg); - pipeline::CreateShaderModuleError::Generation - } - }) - } - }; - - Ok(pipeline::ShaderModule { - raw, - device_id: Stored { - value: id::Valid(self_id), - ref_count: self.life_guard.add_ref(), - }, - interface: None, - #[cfg(debug_assertions)] - label: desc.label.borrow_or_default().to_string(), - }) - } - - fn deduplicate_bind_group_layout( - self_id: id::DeviceId, - entry_map: &binding_model::BindEntryMap, - guard: &Storage, id::BindGroupLayoutId>, - ) -> Option { - guard - .iter(self_id.backend()) - .find(|&(_, bgl)| bgl.device_id.value.0 == self_id && bgl.entries == *entry_map) - .map(|(id, value)| { - value.multi_ref_count.inc(); - id - }) - } - - fn get_introspection_bind_group_layouts<'a>( - pipeline_layout: &binding_model::PipelineLayout, - bgl_guard: &'a Storage, id::BindGroupLayoutId>, - ) -> ArrayVec<&'a binding_model::BindEntryMap, { hal::MAX_BIND_GROUPS }> { - pipeline_layout - .bind_group_layout_ids - .iter() - .map(|&id| &bgl_guard[id].entries) - .collect() - } - - /// Generate information about late-validated buffer bindings for pipelines. - //TODO: should this be combined with `get_introspection_bind_group_layouts` in some way? - fn make_late_sized_buffer_groups<'a>( - shader_binding_sizes: &FastHashMap, - layout: &binding_model::PipelineLayout, - bgl_guard: &'a Storage, id::BindGroupLayoutId>, - ) -> ArrayVec { - // Given the shader-required binding sizes and the pipeline layout, - // return the filtered list of them in the layout order, - // removing those with given `min_binding_size`. - layout - .bind_group_layout_ids - .iter() - .enumerate() - .map(|(group_index, &bgl_id)| pipeline::LateSizedBufferGroup { - shader_sizes: bgl_guard[bgl_id] - .entries - .values() - .filter_map(|entry| match entry.ty { - wgt::BindingType::Buffer { - min_binding_size: None, - .. - } => { - let rb = naga::ResourceBinding { - group: group_index as u32, - binding: entry.binding, - }; - let shader_size = - shader_binding_sizes.get(&rb).map_or(0, |nz| nz.get()); - Some(shader_size) - } - _ => None, - }) - .collect(), - }) - .collect() - } - - fn create_bind_group_layout( - &self, - self_id: id::DeviceId, - label: Option<&str>, - entry_map: binding_model::BindEntryMap, - ) -> Result, binding_model::CreateBindGroupLayoutError> { - #[derive(PartialEq)] - enum WritableStorage { - Yes, - No, - } - - for entry in entry_map.values() { - use wgt::BindingType as Bt; - - let mut required_features = wgt::Features::empty(); - let mut required_downlevel_flags = wgt::DownlevelFlags::empty(); - let (array_feature, writable_storage) = match entry.ty { - Bt::Buffer { - ty: wgt::BufferBindingType::Uniform, - has_dynamic_offset: false, - min_binding_size: _, - } => ( - Some(wgt::Features::BUFFER_BINDING_ARRAY), - WritableStorage::No, - ), - Bt::Buffer { - ty: wgt::BufferBindingType::Uniform, - has_dynamic_offset: true, - min_binding_size: _, - } => ( - Some(wgt::Features::BUFFER_BINDING_ARRAY), - WritableStorage::No, - ), - Bt::Buffer { - ty: wgt::BufferBindingType::Storage { read_only }, - .. - } => ( - Some( - wgt::Features::BUFFER_BINDING_ARRAY - | wgt::Features::STORAGE_RESOURCE_BINDING_ARRAY, - ), - match read_only { - true => WritableStorage::No, - false => WritableStorage::Yes, - }, - ), - Bt::Sampler { .. } => ( - Some(wgt::Features::TEXTURE_BINDING_ARRAY), - WritableStorage::No, - ), - Bt::Texture { - multisampled: true, - sample_type: TextureSampleType::Float { filterable: true }, - .. - } => { - return Err(binding_model::CreateBindGroupLayoutError::Entry { - binding: entry.binding, - error: binding_model::BindGroupLayoutEntryError::SampleTypeFloatFilterableBindingMultisampled, - }); - } - Bt::Texture { .. } => ( - Some(wgt::Features::TEXTURE_BINDING_ARRAY), - WritableStorage::No, - ), - Bt::StorageTexture { - access, - view_dimension, - format: _, - } => { - match view_dimension { - wgt::TextureViewDimension::Cube | wgt::TextureViewDimension::CubeArray => { - return Err(binding_model::CreateBindGroupLayoutError::Entry { - binding: entry.binding, - error: binding_model::BindGroupLayoutEntryError::StorageTextureCube, - }) - } - _ => (), - } - match access { - wgt::StorageTextureAccess::ReadOnly - | wgt::StorageTextureAccess::ReadWrite - if !self.features.contains( - wgt::Features::TEXTURE_ADAPTER_SPECIFIC_FORMAT_FEATURES, - ) => - { - return Err(binding_model::CreateBindGroupLayoutError::Entry { - binding: entry.binding, - error: binding_model::BindGroupLayoutEntryError::StorageTextureReadWrite, - }); - } - _ => (), - } - ( - Some( - wgt::Features::TEXTURE_BINDING_ARRAY - | wgt::Features::STORAGE_RESOURCE_BINDING_ARRAY, - ), - match access { - wgt::StorageTextureAccess::WriteOnly => WritableStorage::Yes, - wgt::StorageTextureAccess::ReadOnly => { - required_features |= - wgt::Features::TEXTURE_ADAPTER_SPECIFIC_FORMAT_FEATURES; - WritableStorage::No - } - wgt::StorageTextureAccess::ReadWrite => { - required_features |= - wgt::Features::TEXTURE_ADAPTER_SPECIFIC_FORMAT_FEATURES; - WritableStorage::Yes - } - }, - ) - } - }; - - // Validate the count parameter - if entry.count.is_some() { - required_features |= array_feature - .ok_or(binding_model::BindGroupLayoutEntryError::ArrayUnsupported) - .map_err(|error| binding_model::CreateBindGroupLayoutError::Entry { - binding: entry.binding, - error, - })?; - } - - if entry.visibility.contains_invalid_bits() { - return Err( - binding_model::CreateBindGroupLayoutError::InvalidVisibility(entry.visibility), - ); - } - - if entry.visibility.contains(wgt::ShaderStages::VERTEX) { - if writable_storage == WritableStorage::Yes { - required_features |= wgt::Features::VERTEX_WRITABLE_STORAGE; - } - if let Bt::Buffer { - ty: wgt::BufferBindingType::Storage { .. }, - .. - } = entry.ty - { - required_downlevel_flags |= wgt::DownlevelFlags::VERTEX_STORAGE; - } - } - if writable_storage == WritableStorage::Yes - && entry.visibility.contains(wgt::ShaderStages::FRAGMENT) - { - required_downlevel_flags |= wgt::DownlevelFlags::FRAGMENT_WRITABLE_STORAGE; - } - - self.require_features(required_features) - .map_err(binding_model::BindGroupLayoutEntryError::MissingFeatures) - .map_err(|error| binding_model::CreateBindGroupLayoutError::Entry { - binding: entry.binding, - error, - })?; - self.require_downlevel_flags(required_downlevel_flags) - .map_err(binding_model::BindGroupLayoutEntryError::MissingDownlevelFlags) - .map_err(|error| binding_model::CreateBindGroupLayoutError::Entry { - binding: entry.binding, - error, - })?; - } - - let bgl_flags = conv::bind_group_layout_flags(self.features); - - let mut hal_bindings = entry_map.values().cloned().collect::>(); - hal_bindings.sort_by_key(|b| b.binding); - let hal_desc = hal::BindGroupLayoutDescriptor { - label, - flags: bgl_flags, - entries: &hal_bindings, - }; - let raw = unsafe { - self.raw - .create_bind_group_layout(&hal_desc) - .map_err(DeviceError::from)? - }; - - let mut count_validator = binding_model::BindingTypeMaxCountValidator::default(); - for entry in entry_map.values() { - count_validator.add_binding(entry); - } - // If a single bind group layout violates limits, the pipeline layout is - // definitely going to violate limits too, lets catch it now. - count_validator - .validate(&self.limits) - .map_err(binding_model::CreateBindGroupLayoutError::TooManyBindings)?; - - Ok(binding_model::BindGroupLayout { - raw, - device_id: Stored { - value: id::Valid(self_id), - ref_count: self.life_guard.add_ref(), - }, - multi_ref_count: MultiRefCount::new(), - dynamic_count: entry_map - .values() - .filter(|b| b.ty.has_dynamic_offset()) - .count(), - count_validator, - entries: entry_map, - #[cfg(debug_assertions)] - label: label.unwrap_or("").to_string(), - }) - } - - fn create_buffer_binding<'a>( - bb: &binding_model::BufferBinding, - binding: u32, - decl: &wgt::BindGroupLayoutEntry, - used_buffer_ranges: &mut Vec, - dynamic_binding_info: &mut Vec, - late_buffer_binding_sizes: &mut FastHashMap, - used: &mut BindGroupStates, - storage: &'a Storage, id::BufferId>, - limits: &wgt::Limits, - ) -> Result, binding_model::CreateBindGroupError> { - use crate::binding_model::CreateBindGroupError as Error; - - let (binding_ty, dynamic, min_size) = match decl.ty { - wgt::BindingType::Buffer { - ty, - has_dynamic_offset, - min_binding_size, - } => (ty, has_dynamic_offset, min_binding_size), - _ => { - return Err(Error::WrongBindingType { - binding, - actual: decl.ty, - expected: "UniformBuffer, StorageBuffer or ReadonlyStorageBuffer", - }) - } - }; - let (pub_usage, internal_use, range_limit) = match binding_ty { - wgt::BufferBindingType::Uniform => ( - wgt::BufferUsages::UNIFORM, - hal::BufferUses::UNIFORM, - limits.max_uniform_buffer_binding_size, - ), - wgt::BufferBindingType::Storage { read_only } => ( - wgt::BufferUsages::STORAGE, - if read_only { - hal::BufferUses::STORAGE_READ - } else { - hal::BufferUses::STORAGE_READ_WRITE - }, - limits.max_storage_buffer_binding_size, - ), - }; - - let (align, align_limit_name) = - binding_model::buffer_binding_type_alignment(limits, binding_ty); - if bb.offset % align as u64 != 0 { - return Err(Error::UnalignedBufferOffset( - bb.offset, - align_limit_name, - align, - )); - } - - let buffer = used - .buffers - .add_single(storage, bb.buffer_id, internal_use) - .ok_or(Error::InvalidBuffer(bb.buffer_id))?; - check_buffer_usage(buffer.usage, pub_usage)?; - let raw_buffer = buffer - .raw - .as_ref() - .ok_or(Error::InvalidBuffer(bb.buffer_id))?; - - let (bind_size, bind_end) = match bb.size { - Some(size) => { - let end = bb.offset + size.get(); - if end > buffer.size { - return Err(Error::BindingRangeTooLarge { - buffer: bb.buffer_id, - range: bb.offset..end, - size: buffer.size, - }); - } - (size.get(), end) - } - None => (buffer.size - bb.offset, buffer.size), - }; - - if bind_size > range_limit as u64 { - return Err(Error::BufferRangeTooLarge { - binding, - given: bind_size as u32, - limit: range_limit, - }); - } - - // Record binding info for validating dynamic offsets - if dynamic { - dynamic_binding_info.push(binding_model::BindGroupDynamicBindingData { - binding_idx: binding, - buffer_size: buffer.size, - binding_range: bb.offset..bind_end, - maximum_dynamic_offset: buffer.size - bind_end, - binding_type: binding_ty, - }); - } - - if let Some(non_zero) = min_size { - let min_size = non_zero.get(); - if min_size > bind_size { - return Err(Error::BindingSizeTooSmall { - buffer: bb.buffer_id, - actual: bind_size, - min: min_size, - }); - } - } else { - let late_size = - wgt::BufferSize::new(bind_size).ok_or(Error::BindingZeroSize(bb.buffer_id))?; - late_buffer_binding_sizes.insert(binding, late_size); - } - - assert_eq!(bb.offset % wgt::COPY_BUFFER_ALIGNMENT, 0); - used_buffer_ranges.extend(buffer.initialization_status.create_action( - bb.buffer_id, - bb.offset..bb.offset + bind_size, - MemoryInitKind::NeedsInitializedMemory, - )); - - Ok(hal::BufferBinding { - buffer: raw_buffer, - offset: bb.offset, - size: bb.size, - }) - } - - fn create_texture_binding( - view: &resource::TextureView, - texture_guard: &Storage, id::TextureId>, - internal_use: hal::TextureUses, - pub_usage: wgt::TextureUsages, - used: &mut BindGroupStates, - used_texture_ranges: &mut Vec, - ) -> Result<(), binding_model::CreateBindGroupError> { - // Careful here: the texture may no longer have its own ref count, - // if it was deleted by the user. - let texture = used - .textures - .add_single( - texture_guard, - view.parent_id.value.0, - view.parent_id.ref_count.clone(), - Some(view.selector.clone()), - internal_use, - ) - .ok_or(binding_model::CreateBindGroupError::InvalidTexture( - view.parent_id.value.0, - ))?; - check_texture_usage(texture.desc.usage, pub_usage)?; - - used_texture_ranges.push(TextureInitTrackerAction { - id: view.parent_id.value.0, - range: TextureInitRange { - mip_range: view.desc.range.mip_range(texture.desc.mip_level_count), - layer_range: view - .desc - .range - .layer_range(texture.desc.array_layer_count()), - }, - kind: MemoryInitKind::NeedsInitializedMemory, - }); - - Ok(()) - } - - fn create_bind_group( - &self, - self_id: id::DeviceId, - layout: &binding_model::BindGroupLayout, - desc: &binding_model::BindGroupDescriptor, - hub: &Hub, - token: &mut Token>, - ) -> Result, binding_model::CreateBindGroupError> { - use crate::binding_model::{BindingResource as Br, CreateBindGroupError as Error}; - { - // Check that the number of entries in the descriptor matches - // the number of entries in the layout. - let actual = desc.entries.len(); - let expected = layout.entries.len(); - if actual != expected { - return Err(Error::BindingsNumMismatch { expected, actual }); - } - } - - // TODO: arrayvec/smallvec, or re-use allocations - // Record binding info for dynamic offset validation - let mut dynamic_binding_info = Vec::new(); - // Map of binding -> shader reflected size - //Note: we can't collect into a vector right away because - // it needs to be in BGL iteration order, not BG entry order. - let mut late_buffer_binding_sizes = FastHashMap::default(); - // fill out the descriptors - let mut used = BindGroupStates::new(); - - let (buffer_guard, mut token) = hub.buffers.read(token); - let (texture_guard, mut token) = hub.textures.read(&mut token); //skip token - let (texture_view_guard, mut token) = hub.texture_views.read(&mut token); - let (sampler_guard, _) = hub.samplers.read(&mut token); - - let mut used_buffer_ranges = Vec::new(); - let mut used_texture_ranges = Vec::new(); - let mut hal_entries = Vec::with_capacity(desc.entries.len()); - let mut hal_buffers = Vec::new(); - let mut hal_samplers = Vec::new(); - let mut hal_textures = Vec::new(); - for entry in desc.entries.iter() { - let binding = entry.binding; - // Find the corresponding declaration in the layout - let decl = layout - .entries - .get(&binding) - .ok_or(Error::MissingBindingDeclaration(binding))?; - let (res_index, count) = match entry.resource { - Br::Buffer(ref bb) => { - let bb = Self::create_buffer_binding( - bb, - binding, - decl, - &mut used_buffer_ranges, - &mut dynamic_binding_info, - &mut late_buffer_binding_sizes, - &mut used, - &*buffer_guard, - &self.limits, - )?; - - let res_index = hal_buffers.len(); - hal_buffers.push(bb); - (res_index, 1) - } - Br::BufferArray(ref bindings_array) => { - let num_bindings = bindings_array.len(); - Self::check_array_binding(self.features, decl.count, num_bindings)?; - - let res_index = hal_buffers.len(); - for bb in bindings_array.iter() { - let bb = Self::create_buffer_binding( - bb, - binding, - decl, - &mut used_buffer_ranges, - &mut dynamic_binding_info, - &mut late_buffer_binding_sizes, - &mut used, - &*buffer_guard, - &self.limits, - )?; - hal_buffers.push(bb); - } - (res_index, num_bindings) - } - Br::Sampler(id) => { - match decl.ty { - wgt::BindingType::Sampler(ty) => { - let sampler = used - .samplers - .add_single(&*sampler_guard, id) - .ok_or(Error::InvalidSampler(id))?; - - // Allowed sampler values for filtering and comparison - let (allowed_filtering, allowed_comparison) = match ty { - wgt::SamplerBindingType::Filtering => (None, false), - wgt::SamplerBindingType::NonFiltering => (Some(false), false), - wgt::SamplerBindingType::Comparison => (None, true), - }; - - if let Some(allowed_filtering) = allowed_filtering { - if allowed_filtering != sampler.filtering { - return Err(Error::WrongSamplerFiltering { - binding, - layout_flt: allowed_filtering, - sampler_flt: sampler.filtering, - }); - } - } - - if allowed_comparison != sampler.comparison { - return Err(Error::WrongSamplerComparison { - binding, - layout_cmp: allowed_comparison, - sampler_cmp: sampler.comparison, - }); - } - - let res_index = hal_samplers.len(); - hal_samplers.push(&sampler.raw); - (res_index, 1) - } - _ => { - return Err(Error::WrongBindingType { - binding, - actual: decl.ty, - expected: "Sampler", - }) - } - } - } - Br::SamplerArray(ref bindings_array) => { - let num_bindings = bindings_array.len(); - Self::check_array_binding(self.features, decl.count, num_bindings)?; - - let res_index = hal_samplers.len(); - for &id in bindings_array.iter() { - let sampler = used - .samplers - .add_single(&*sampler_guard, id) - .ok_or(Error::InvalidSampler(id))?; - hal_samplers.push(&sampler.raw); - } - - (res_index, num_bindings) - } - Br::TextureView(id) => { - let view = used - .views - .add_single(&*texture_view_guard, id) - .ok_or(Error::InvalidTextureView(id))?; - let (pub_usage, internal_use) = Self::texture_use_parameters( - binding, - decl, - view, - "SampledTexture, ReadonlyStorageTexture or WriteonlyStorageTexture", - )?; - Self::create_texture_binding( - view, - &texture_guard, - internal_use, - pub_usage, - &mut used, - &mut used_texture_ranges, - )?; - let res_index = hal_textures.len(); - hal_textures.push(hal::TextureBinding { - view: &view.raw, - usage: internal_use, - }); - (res_index, 1) - } - Br::TextureViewArray(ref bindings_array) => { - let num_bindings = bindings_array.len(); - Self::check_array_binding(self.features, decl.count, num_bindings)?; - - let res_index = hal_textures.len(); - for &id in bindings_array.iter() { - let view = used - .views - .add_single(&*texture_view_guard, id) - .ok_or(Error::InvalidTextureView(id))?; - let (pub_usage, internal_use) = - Self::texture_use_parameters(binding, decl, view, - "SampledTextureArray, ReadonlyStorageTextureArray or WriteonlyStorageTextureArray")?; - Self::create_texture_binding( - view, - &texture_guard, - internal_use, - pub_usage, - &mut used, - &mut used_texture_ranges, - )?; - hal_textures.push(hal::TextureBinding { - view: &view.raw, - usage: internal_use, - }); - } - - (res_index, num_bindings) - } - }; - - hal_entries.push(hal::BindGroupEntry { - binding, - resource_index: res_index as u32, - count: count as u32, - }); - } - - used.optimize(); - - hal_entries.sort_by_key(|entry| entry.binding); - for (a, b) in hal_entries.iter().zip(hal_entries.iter().skip(1)) { - if a.binding == b.binding { - return Err(Error::DuplicateBinding(a.binding)); - } - } - - let hal_desc = hal::BindGroupDescriptor { - label: desc.label.borrow_option(), - layout: &layout.raw, - entries: &hal_entries, - buffers: &hal_buffers, - samplers: &hal_samplers, - textures: &hal_textures, - }; - let raw = unsafe { - self.raw - .create_bind_group(&hal_desc) - .map_err(DeviceError::from)? - }; - - // manually add a dependency on BGL - layout.multi_ref_count.inc(); - - Ok(binding_model::BindGroup { - raw, - device_id: Stored { - value: id::Valid(self_id), - ref_count: self.life_guard.add_ref(), - }, - layout_id: id::Valid(desc.layout), - life_guard: LifeGuard::new(desc.label.borrow_or_default()), - used, - used_buffer_ranges, - used_texture_ranges, - dynamic_binding_info, - // collect in the order of BGL iteration - late_buffer_binding_sizes: layout - .entries - .keys() - .flat_map(|binding| late_buffer_binding_sizes.get(binding).cloned()) - .collect(), - }) - } - - fn check_array_binding( - features: wgt::Features, - count: Option, - num_bindings: usize, - ) -> Result<(), super::binding_model::CreateBindGroupError> { - use super::binding_model::CreateBindGroupError as Error; - - if let Some(count) = count { - let count = count.get() as usize; - if count < num_bindings { - return Err(Error::BindingArrayPartialLengthMismatch { - actual: num_bindings, - expected: count, - }); - } - if count != num_bindings - && !features.contains(wgt::Features::PARTIALLY_BOUND_BINDING_ARRAY) - { - return Err(Error::BindingArrayLengthMismatch { - actual: num_bindings, - expected: count, - }); - } - if num_bindings == 0 { - return Err(Error::BindingArrayZeroLength); - } - } else { - return Err(Error::SingleBindingExpected); - }; - - Ok(()) - } - - fn texture_use_parameters( - binding: u32, - decl: &wgt::BindGroupLayoutEntry, - view: &crate::resource::TextureView, - expected: &'static str, - ) -> Result<(wgt::TextureUsages, hal::TextureUses), binding_model::CreateBindGroupError> { - use crate::binding_model::CreateBindGroupError as Error; - if view - .desc - .aspects() - .contains(hal::FormatAspects::DEPTH | hal::FormatAspects::STENCIL) - { - return Err(Error::DepthStencilAspect); - } - match decl.ty { - wgt::BindingType::Texture { - sample_type, - view_dimension, - multisampled, - } => { - use wgt::TextureSampleType as Tst; - if multisampled != (view.samples != 1) { - return Err(Error::InvalidTextureMultisample { - binding, - layout_multisampled: multisampled, - view_samples: view.samples, - }); - } - let compat_sample_type = view - .desc - .format - .sample_type(Some(view.desc.range.aspect)) - .unwrap(); - match (sample_type, compat_sample_type) { - (Tst::Uint, Tst::Uint) | - (Tst::Sint, Tst::Sint) | - (Tst::Depth, Tst::Depth) | - // if we expect non-filterable, accept anything float - (Tst::Float { filterable: false }, Tst::Float { .. }) | - // if we expect filterable, require it - (Tst::Float { filterable: true }, Tst::Float { filterable: true }) | - // if we expect non-filterable, also accept depth - (Tst::Float { filterable: false }, Tst::Depth) => {} - // if we expect filterable, also accept Float that is defined as - // unfilterable if filterable feature is explicitly enabled (only hit - // if wgt::Features::TEXTURE_ADAPTER_SPECIFIC_FORMAT_FEATURES is - // enabled) - (Tst::Float { filterable: true }, Tst::Float { .. }) if view.format_features.flags.contains(wgt::TextureFormatFeatureFlags::FILTERABLE) => {} - _ => { - return Err(Error::InvalidTextureSampleType { - binding, - layout_sample_type: sample_type, - view_format: view.desc.format, - }) - } - } - if view_dimension != view.desc.dimension { - return Err(Error::InvalidTextureDimension { - binding, - layout_dimension: view_dimension, - view_dimension: view.desc.dimension, - }); - } - Ok(( - wgt::TextureUsages::TEXTURE_BINDING, - hal::TextureUses::RESOURCE, - )) - } - wgt::BindingType::StorageTexture { - access, - format, - view_dimension, - } => { - if format != view.desc.format { - return Err(Error::InvalidStorageTextureFormat { - binding, - layout_format: format, - view_format: view.desc.format, - }); - } - if view_dimension != view.desc.dimension { - return Err(Error::InvalidTextureDimension { - binding, - layout_dimension: view_dimension, - view_dimension: view.desc.dimension, - }); - } - - let mip_level_count = view.selector.mips.end - view.selector.mips.start; - if mip_level_count != 1 { - return Err(Error::InvalidStorageTextureMipLevelCount { - binding, - mip_level_count, - }); - } - - let internal_use = match access { - wgt::StorageTextureAccess::WriteOnly => hal::TextureUses::STORAGE_READ_WRITE, - wgt::StorageTextureAccess::ReadOnly => { - if !view - .format_features - .flags - .contains(wgt::TextureFormatFeatureFlags::STORAGE_READ_WRITE) - { - return Err(Error::StorageReadNotSupported(view.desc.format)); - } - hal::TextureUses::STORAGE_READ - } - wgt::StorageTextureAccess::ReadWrite => { - if !view - .format_features - .flags - .contains(wgt::TextureFormatFeatureFlags::STORAGE_READ_WRITE) - { - return Err(Error::StorageReadNotSupported(view.desc.format)); - } - - hal::TextureUses::STORAGE_READ_WRITE - } - }; - Ok((wgt::TextureUsages::STORAGE_BINDING, internal_use)) - } - _ => Err(Error::WrongBindingType { - binding, - actual: decl.ty, - expected, - }), - } - } - - fn create_pipeline_layout( - &self, - self_id: id::DeviceId, - desc: &binding_model::PipelineLayoutDescriptor, - bgl_guard: &Storage, id::BindGroupLayoutId>, - ) -> Result, binding_model::CreatePipelineLayoutError> { - use crate::binding_model::CreatePipelineLayoutError as Error; - - let bind_group_layouts_count = desc.bind_group_layouts.len(); - let device_max_bind_groups = self.limits.max_bind_groups as usize; - if bind_group_layouts_count > device_max_bind_groups { - return Err(Error::TooManyGroups { - actual: bind_group_layouts_count, - max: device_max_bind_groups, - }); - } - - if !desc.push_constant_ranges.is_empty() { - self.require_features(wgt::Features::PUSH_CONSTANTS)?; - } - - let mut used_stages = wgt::ShaderStages::empty(); - for (index, pc) in desc.push_constant_ranges.iter().enumerate() { - if pc.stages.intersects(used_stages) { - return Err(Error::MoreThanOnePushConstantRangePerStage { - index, - provided: pc.stages, - intersected: pc.stages & used_stages, - }); - } - used_stages |= pc.stages; - - let device_max_pc_size = self.limits.max_push_constant_size; - if device_max_pc_size < pc.range.end { - return Err(Error::PushConstantRangeTooLarge { - index, - range: pc.range.clone(), - max: device_max_pc_size, - }); - } - - if pc.range.start % wgt::PUSH_CONSTANT_ALIGNMENT != 0 { - return Err(Error::MisalignedPushConstantRange { - index, - bound: pc.range.start, - }); - } - if pc.range.end % wgt::PUSH_CONSTANT_ALIGNMENT != 0 { - return Err(Error::MisalignedPushConstantRange { - index, - bound: pc.range.end, - }); - } - } - - let mut count_validator = binding_model::BindingTypeMaxCountValidator::default(); - - // validate total resource counts - for &id in desc.bind_group_layouts.iter() { - let bind_group_layout = bgl_guard - .get(id) - .map_err(|_| Error::InvalidBindGroupLayout(id))?; - count_validator.merge(&bind_group_layout.count_validator); - } - count_validator - .validate(&self.limits) - .map_err(Error::TooManyBindings)?; - - let bgl_vec = desc - .bind_group_layouts - .iter() - .map(|&id| &bgl_guard.get(id).unwrap().raw) - .collect::>(); - let hal_desc = hal::PipelineLayoutDescriptor { - label: desc.label.borrow_option(), - flags: hal::PipelineLayoutFlags::BASE_VERTEX_INSTANCE, - bind_group_layouts: &bgl_vec, - push_constant_ranges: desc.push_constant_ranges.as_ref(), - }; - - let raw = unsafe { - self.raw - .create_pipeline_layout(&hal_desc) - .map_err(DeviceError::from)? - }; - - Ok(binding_model::PipelineLayout { - raw, - device_id: Stored { - value: id::Valid(self_id), - ref_count: self.life_guard.add_ref(), - }, - life_guard: LifeGuard::new(desc.label.borrow_or_default()), - bind_group_layout_ids: desc - .bind_group_layouts - .iter() - .map(|&id| { - // manually add a dependency to BGL - bgl_guard.get(id).unwrap().multi_ref_count.inc(); - id::Valid(id) - }) - .collect(), - push_constant_ranges: desc.push_constant_ranges.iter().cloned().collect(), - }) - } - - //TODO: refactor this. It's the only method of `Device` that registers new objects - // (the pipeline layout). - fn derive_pipeline_layout( - &self, - self_id: id::DeviceId, - implicit_context: Option, - mut derived_group_layouts: ArrayVec, - bgl_guard: &mut Storage, id::BindGroupLayoutId>, - pipeline_layout_guard: &mut Storage, id::PipelineLayoutId>, - ) -> Result { - while derived_group_layouts - .last() - .map_or(false, |map| map.is_empty()) - { - derived_group_layouts.pop(); - } - let mut ids = implicit_context.ok_or(pipeline::ImplicitLayoutError::MissingIds(0))?; - let group_count = derived_group_layouts.len(); - if ids.group_ids.len() < group_count { - log::error!( - "Not enough bind group IDs ({}) specified for the implicit layout ({})", - ids.group_ids.len(), - derived_group_layouts.len() - ); - return Err(pipeline::ImplicitLayoutError::MissingIds(group_count as _)); - } - - for (bgl_id, map) in ids.group_ids.iter_mut().zip(derived_group_layouts) { - match Device::deduplicate_bind_group_layout(self_id, &map, bgl_guard) { - Some(dedup_id) => { - *bgl_id = dedup_id; - } - None => { - let bgl = self.create_bind_group_layout(self_id, None, map)?; - bgl_guard.force_replace(*bgl_id, bgl); - } - }; - } - - let layout_desc = binding_model::PipelineLayoutDescriptor { - label: None, - bind_group_layouts: Cow::Borrowed(&ids.group_ids[..group_count]), - push_constant_ranges: Cow::Borrowed(&[]), //TODO? - }; - let layout = self.create_pipeline_layout(self_id, &layout_desc, bgl_guard)?; - pipeline_layout_guard.force_replace(ids.root_id, layout); - Ok(ids.root_id) - } - - fn create_compute_pipeline( - &self, - self_id: id::DeviceId, - desc: &pipeline::ComputePipelineDescriptor, - implicit_context: Option, - hub: &Hub, - token: &mut Token, - ) -> Result, pipeline::CreateComputePipelineError> { - //TODO: only lock mutable if the layout is derived - let (mut pipeline_layout_guard, mut token) = hub.pipeline_layouts.write(token); - let (mut bgl_guard, mut token) = hub.bind_group_layouts.write(&mut token); - - // This has to be done first, or otherwise the IDs may be pointing to entries - // that are not even in the storage. - if let Some(ref ids) = implicit_context { - pipeline_layout_guard.insert_error(ids.root_id, IMPLICIT_FAILURE); - for &bgl_id in ids.group_ids.iter() { - bgl_guard.insert_error(bgl_id, IMPLICIT_FAILURE); - } - } - - self.require_downlevel_flags(wgt::DownlevelFlags::COMPUTE_SHADERS)?; - - let mut derived_group_layouts = - ArrayVec::::new(); - let mut shader_binding_sizes = FastHashMap::default(); - - let io = validation::StageIo::default(); - let (shader_module_guard, _) = hub.shader_modules.read(&mut token); - - let shader_module = shader_module_guard - .get(desc.stage.module) - .map_err(|_| validation::StageError::InvalidModule)?; - - { - let flag = wgt::ShaderStages::COMPUTE; - let provided_layouts = match desc.layout { - Some(pipeline_layout_id) => Some(Device::get_introspection_bind_group_layouts( - pipeline_layout_guard - .get(pipeline_layout_id) - .map_err(|_| pipeline::CreateComputePipelineError::InvalidLayout)?, - &*bgl_guard, - )), - None => { - for _ in 0..self.limits.max_bind_groups { - derived_group_layouts.push(binding_model::BindEntryMap::default()); - } - None - } - }; - if let Some(ref interface) = shader_module.interface { - let _ = interface.check_stage( - provided_layouts.as_ref().map(|p| p.as_slice()), - &mut derived_group_layouts, - &mut shader_binding_sizes, - &desc.stage.entry_point, - flag, - io, - None, - )?; - } - } - - let pipeline_layout_id = match desc.layout { - Some(id) => id, - None => self.derive_pipeline_layout( - self_id, - implicit_context, - derived_group_layouts, - &mut *bgl_guard, - &mut *pipeline_layout_guard, - )?, - }; - let layout = pipeline_layout_guard - .get(pipeline_layout_id) - .map_err(|_| pipeline::CreateComputePipelineError::InvalidLayout)?; - - let late_sized_buffer_groups = - Device::make_late_sized_buffer_groups(&shader_binding_sizes, layout, &*bgl_guard); - - let pipeline_desc = hal::ComputePipelineDescriptor { - label: desc.label.borrow_option(), - layout: &layout.raw, - stage: hal::ProgrammableStage { - entry_point: desc.stage.entry_point.as_ref(), - module: &shader_module.raw, - }, - }; - - let raw = - unsafe { self.raw.create_compute_pipeline(&pipeline_desc) }.map_err( - |err| match err { - hal::PipelineError::Device(error) => { - pipeline::CreateComputePipelineError::Device(error.into()) - } - hal::PipelineError::Linkage(_stages, msg) => { - pipeline::CreateComputePipelineError::Internal(msg) - } - hal::PipelineError::EntryPoint(_stage) => { - pipeline::CreateComputePipelineError::Internal(EP_FAILURE.to_string()) - } - }, - )?; - - let pipeline = pipeline::ComputePipeline { - raw, - layout_id: Stored { - value: id::Valid(pipeline_layout_id), - ref_count: layout.life_guard.add_ref(), - }, - device_id: Stored { - value: id::Valid(self_id), - ref_count: self.life_guard.add_ref(), - }, - late_sized_buffer_groups, - life_guard: LifeGuard::new(desc.label.borrow_or_default()), - }; - Ok(pipeline) - } - - fn create_render_pipeline( - &self, - self_id: id::DeviceId, - adapter: &Adapter, - desc: &pipeline::RenderPipelineDescriptor, - implicit_context: Option, - hub: &Hub, - token: &mut Token, - ) -> Result, pipeline::CreateRenderPipelineError> { - use wgt::TextureFormatFeatureFlags as Tfff; - - //TODO: only lock mutable if the layout is derived - let (mut pipeline_layout_guard, mut token) = hub.pipeline_layouts.write(token); - let (mut bgl_guard, mut token) = hub.bind_group_layouts.write(&mut token); - - // This has to be done first, or otherwise the IDs may be pointing to entries - // that are not even in the storage. - if let Some(ref ids) = implicit_context { - pipeline_layout_guard.insert_error(ids.root_id, IMPLICIT_FAILURE); - for &bgl_id in ids.group_ids.iter() { - bgl_guard.insert_error(bgl_id, IMPLICIT_FAILURE); - } - } - - let mut derived_group_layouts = - ArrayVec::::new(); - let mut shader_binding_sizes = FastHashMap::default(); - - let num_attachments = desc.fragment.as_ref().map(|f| f.targets.len()).unwrap_or(0); - if num_attachments > hal::MAX_COLOR_ATTACHMENTS { - return Err(pipeline::CreateRenderPipelineError::ColorAttachment( - command::ColorAttachmentError::TooMany { - given: num_attachments, - limit: hal::MAX_COLOR_ATTACHMENTS, - }, - )); - } - - let color_targets = desc - .fragment - .as_ref() - .map_or(&[][..], |fragment| &fragment.targets); - let depth_stencil_state = desc.depth_stencil.as_ref(); - - let cts: ArrayVec<_, { hal::MAX_COLOR_ATTACHMENTS }> = - color_targets.iter().filter_map(|x| x.as_ref()).collect(); - if !cts.is_empty() && { - let first = &cts[0]; - cts[1..] - .iter() - .any(|ct| ct.write_mask != first.write_mask || ct.blend != first.blend) - } { - log::info!("Color targets: {:?}", color_targets); - self.require_downlevel_flags(wgt::DownlevelFlags::INDEPENDENT_BLEND)?; - } - - let mut io = validation::StageIo::default(); - let mut validated_stages = wgt::ShaderStages::empty(); - - let mut vertex_steps = Vec::with_capacity(desc.vertex.buffers.len()); - let mut vertex_buffers = Vec::with_capacity(desc.vertex.buffers.len()); - let mut total_attributes = 0; - for (i, vb_state) in desc.vertex.buffers.iter().enumerate() { - vertex_steps.push(pipeline::VertexStep { - stride: vb_state.array_stride, - mode: vb_state.step_mode, - }); - if vb_state.attributes.is_empty() { - continue; - } - if vb_state.array_stride > self.limits.max_vertex_buffer_array_stride as u64 { - return Err(pipeline::CreateRenderPipelineError::VertexStrideTooLarge { - index: i as u32, - given: vb_state.array_stride as u32, - limit: self.limits.max_vertex_buffer_array_stride, - }); - } - if vb_state.array_stride % wgt::VERTEX_STRIDE_ALIGNMENT != 0 { - return Err(pipeline::CreateRenderPipelineError::UnalignedVertexStride { - index: i as u32, - stride: vb_state.array_stride, - }); - } - vertex_buffers.push(hal::VertexBufferLayout { - array_stride: vb_state.array_stride, - step_mode: vb_state.step_mode, - attributes: vb_state.attributes.as_ref(), - }); - - for attribute in vb_state.attributes.iter() { - if attribute.offset >= 0x10000000 { - return Err( - pipeline::CreateRenderPipelineError::InvalidVertexAttributeOffset { - location: attribute.shader_location, - offset: attribute.offset, - }, - ); - } - - if let wgt::VertexFormat::Float64 - | wgt::VertexFormat::Float64x2 - | wgt::VertexFormat::Float64x3 - | wgt::VertexFormat::Float64x4 = attribute.format - { - self.require_features(wgt::Features::VERTEX_ATTRIBUTE_64BIT)?; - } - - let previous = io.insert( - attribute.shader_location, - validation::InterfaceVar::vertex_attribute(attribute.format), - ); - - if previous.is_some() { - return Err(pipeline::CreateRenderPipelineError::ShaderLocationClash( - attribute.shader_location, - )); - } - } - total_attributes += vb_state.attributes.len(); - } - - if vertex_buffers.len() > self.limits.max_vertex_buffers as usize { - return Err(pipeline::CreateRenderPipelineError::TooManyVertexBuffers { - given: vertex_buffers.len() as u32, - limit: self.limits.max_vertex_buffers, - }); - } - if total_attributes > self.limits.max_vertex_attributes as usize { - return Err( - pipeline::CreateRenderPipelineError::TooManyVertexAttributes { - given: total_attributes as u32, - limit: self.limits.max_vertex_attributes, - }, - ); - } - - if desc.primitive.strip_index_format.is_some() && !desc.primitive.topology.is_strip() { - return Err( - pipeline::CreateRenderPipelineError::StripIndexFormatForNonStripTopology { - strip_index_format: desc.primitive.strip_index_format, - topology: desc.primitive.topology, - }, - ); - } - - if desc.primitive.unclipped_depth { - self.require_features(wgt::Features::DEPTH_CLIP_CONTROL)?; - } - - if desc.primitive.polygon_mode == wgt::PolygonMode::Line { - self.require_features(wgt::Features::POLYGON_MODE_LINE)?; - } - if desc.primitive.polygon_mode == wgt::PolygonMode::Point { - self.require_features(wgt::Features::POLYGON_MODE_POINT)?; - } - - if desc.primitive.conservative { - self.require_features(wgt::Features::CONSERVATIVE_RASTERIZATION)?; - } - - if desc.primitive.conservative && desc.primitive.polygon_mode != wgt::PolygonMode::Fill { - return Err( - pipeline::CreateRenderPipelineError::ConservativeRasterizationNonFillPolygonMode, - ); - } - - for (i, cs) in color_targets.iter().enumerate() { - if let Some(cs) = cs.as_ref() { - let error = loop { - if cs.write_mask.contains_invalid_bits() { - break Some(pipeline::ColorStateError::InvalidWriteMask(cs.write_mask)); - } - - let format_features = self.describe_format_features(adapter, cs.format)?; - if !format_features - .allowed_usages - .contains(wgt::TextureUsages::RENDER_ATTACHMENT) - { - break Some(pipeline::ColorStateError::FormatNotRenderable(cs.format)); - } - let blendable = format_features.flags.contains(Tfff::BLENDABLE); - let filterable = format_features.flags.contains(Tfff::FILTERABLE); - let adapter_specific = self - .features - .contains(wgt::Features::TEXTURE_ADAPTER_SPECIFIC_FORMAT_FEATURES); - // according to WebGPU specifications the texture needs to be - // [`TextureFormatFeatureFlags::FILTERABLE`] if blending is set - use - // [`Features::TEXTURE_ADAPTER_SPECIFIC_FORMAT_FEATURES`] to elude - // this limitation - if cs.blend.is_some() && (!blendable || (!filterable && !adapter_specific)) { - break Some(pipeline::ColorStateError::FormatNotBlendable(cs.format)); - } - if !hal::FormatAspects::from(cs.format).contains(hal::FormatAspects::COLOR) { - break Some(pipeline::ColorStateError::FormatNotColor(cs.format)); - } - if desc.multisample.count > 1 - && !format_features - .flags - .sample_count_supported(desc.multisample.count) - { - break Some(pipeline::ColorStateError::FormatNotMultisampled(cs.format)); - } - - break None; - }; - if let Some(e) = error { - return Err(pipeline::CreateRenderPipelineError::ColorState(i as u8, e)); - } - } - } - - if let Some(ds) = depth_stencil_state { - let error = loop { - let format_features = self.describe_format_features(adapter, ds.format)?; - if !format_features - .allowed_usages - .contains(wgt::TextureUsages::RENDER_ATTACHMENT) - { - break Some(pipeline::DepthStencilStateError::FormatNotRenderable( - ds.format, - )); - } - - let aspect = hal::FormatAspects::from(ds.format); - if ds.is_depth_enabled() && !aspect.contains(hal::FormatAspects::DEPTH) { - break Some(pipeline::DepthStencilStateError::FormatNotDepth(ds.format)); - } - if ds.stencil.is_enabled() && !aspect.contains(hal::FormatAspects::STENCIL) { - break Some(pipeline::DepthStencilStateError::FormatNotStencil( - ds.format, - )); - } - if desc.multisample.count > 1 - && !format_features - .flags - .sample_count_supported(desc.multisample.count) - { - break Some(pipeline::DepthStencilStateError::FormatNotMultisampled( - ds.format, - )); - } - - break None; - }; - if let Some(e) = error { - return Err(pipeline::CreateRenderPipelineError::DepthStencilState(e)); - } - - if ds.bias.clamp != 0.0 { - self.require_downlevel_flags(wgt::DownlevelFlags::DEPTH_BIAS_CLAMP)?; - } - } - - if desc.layout.is_none() { - for _ in 0..self.limits.max_bind_groups { - derived_group_layouts.push(binding_model::BindEntryMap::default()); - } - } - - let samples = { - let sc = desc.multisample.count; - if sc == 0 || sc > 32 || !conv::is_power_of_two_u32(sc) { - return Err(pipeline::CreateRenderPipelineError::InvalidSampleCount(sc)); - } - sc - }; - - let (shader_module_guard, _) = hub.shader_modules.read(&mut token); - - let vertex_stage = { - let stage = &desc.vertex.stage; - let flag = wgt::ShaderStages::VERTEX; - - let shader_module = shader_module_guard.get(stage.module).map_err(|_| { - pipeline::CreateRenderPipelineError::Stage { - stage: flag, - error: validation::StageError::InvalidModule, - } - })?; - - let provided_layouts = match desc.layout { - Some(pipeline_layout_id) => { - let pipeline_layout = pipeline_layout_guard - .get(pipeline_layout_id) - .map_err(|_| pipeline::CreateRenderPipelineError::InvalidLayout)?; - Some(Device::get_introspection_bind_group_layouts( - pipeline_layout, - &*bgl_guard, - )) - } - None => None, - }; - - if let Some(ref interface) = shader_module.interface { - io = interface - .check_stage( - provided_layouts.as_ref().map(|p| p.as_slice()), - &mut derived_group_layouts, - &mut shader_binding_sizes, - &stage.entry_point, - flag, - io, - desc.depth_stencil.as_ref().map(|d| d.depth_compare), - ) - .map_err(|error| pipeline::CreateRenderPipelineError::Stage { - stage: flag, - error, - })?; - validated_stages |= flag; - } - - hal::ProgrammableStage { - module: &shader_module.raw, - entry_point: stage.entry_point.as_ref(), - } - }; - - let fragment_stage = match desc.fragment { - Some(ref fragment) => { - let flag = wgt::ShaderStages::FRAGMENT; - - let shader_module = - shader_module_guard - .get(fragment.stage.module) - .map_err(|_| pipeline::CreateRenderPipelineError::Stage { - stage: flag, - error: validation::StageError::InvalidModule, - })?; - - let provided_layouts = match desc.layout { - Some(pipeline_layout_id) => Some(Device::get_introspection_bind_group_layouts( - pipeline_layout_guard - .get(pipeline_layout_id) - .map_err(|_| pipeline::CreateRenderPipelineError::InvalidLayout)?, - &*bgl_guard, - )), - None => None, - }; - - if validated_stages == wgt::ShaderStages::VERTEX { - if let Some(ref interface) = shader_module.interface { - io = interface - .check_stage( - provided_layouts.as_ref().map(|p| p.as_slice()), - &mut derived_group_layouts, - &mut shader_binding_sizes, - &fragment.stage.entry_point, - flag, - io, - desc.depth_stencil.as_ref().map(|d| d.depth_compare), - ) - .map_err(|error| pipeline::CreateRenderPipelineError::Stage { - stage: flag, - error, - })?; - validated_stages |= flag; - } - } - - Some(hal::ProgrammableStage { - module: &shader_module.raw, - entry_point: fragment.stage.entry_point.as_ref(), - }) - } - None => None, - }; - - if validated_stages.contains(wgt::ShaderStages::FRAGMENT) { - for (i, output) in io.iter() { - match color_targets.get(*i as usize) { - Some(&Some(ref state)) => { - validation::check_texture_format(state.format, &output.ty).map_err( - |pipeline| { - pipeline::CreateRenderPipelineError::ColorState( - *i as u8, - pipeline::ColorStateError::IncompatibleFormat { - pipeline, - shader: output.ty, - }, - ) - }, - )?; - } - _ => { - log::info!( - "The fragment stage {:?} output @location({}) values are ignored", - fragment_stage - .as_ref() - .map_or("", |stage| stage.entry_point), - i - ); - } - } - } - } - let last_stage = match desc.fragment { - Some(_) => wgt::ShaderStages::FRAGMENT, - None => wgt::ShaderStages::VERTEX, - }; - if desc.layout.is_none() && !validated_stages.contains(last_stage) { - return Err(pipeline::ImplicitLayoutError::ReflectionError(last_stage).into()); - } - - let pipeline_layout_id = match desc.layout { - Some(id) => id, - None => self.derive_pipeline_layout( - self_id, - implicit_context, - derived_group_layouts, - &mut *bgl_guard, - &mut *pipeline_layout_guard, - )?, - }; - let layout = pipeline_layout_guard - .get(pipeline_layout_id) - .map_err(|_| pipeline::CreateRenderPipelineError::InvalidLayout)?; - - // Multiview is only supported if the feature is enabled - if desc.multiview.is_some() { - self.require_features(wgt::Features::MULTIVIEW)?; - } - - if !self - .downlevel - .flags - .contains(wgt::DownlevelFlags::BUFFER_BINDINGS_NOT_16_BYTE_ALIGNED) - { - for (binding, size) in shader_binding_sizes.iter() { - if size.get() % 16 != 0 { - return Err(pipeline::CreateRenderPipelineError::UnalignedShader { - binding: binding.binding, - group: binding.group, - size: size.get(), - }); - } - } - } - - let late_sized_buffer_groups = - Device::make_late_sized_buffer_groups(&shader_binding_sizes, layout, &*bgl_guard); - - let pipeline_desc = hal::RenderPipelineDescriptor { - label: desc.label.borrow_option(), - layout: &layout.raw, - vertex_buffers: &vertex_buffers, - vertex_stage, - primitive: desc.primitive, - depth_stencil: desc.depth_stencil.clone(), - multisample: desc.multisample, - fragment_stage, - color_targets, - multiview: desc.multiview, - }; - let raw = - unsafe { self.raw.create_render_pipeline(&pipeline_desc) }.map_err( - |err| match err { - hal::PipelineError::Device(error) => { - pipeline::CreateRenderPipelineError::Device(error.into()) - } - hal::PipelineError::Linkage(stage, msg) => { - pipeline::CreateRenderPipelineError::Internal { stage, error: msg } - } - hal::PipelineError::EntryPoint(stage) => { - pipeline::CreateRenderPipelineError::Internal { - stage: hal::auxil::map_naga_stage(stage), - error: EP_FAILURE.to_string(), - } - } - }, - )?; - - let pass_context = RenderPassContext { - attachments: AttachmentData { - colors: color_targets - .iter() - .map(|state| state.as_ref().map(|s| s.format)) - .collect(), - resolves: ArrayVec::new(), - depth_stencil: depth_stencil_state.as_ref().map(|state| state.format), - }, - sample_count: samples, - multiview: desc.multiview, - }; - - let mut flags = pipeline::PipelineFlags::empty(); - for state in color_targets.iter().filter_map(|s| s.as_ref()) { - if let Some(ref bs) = state.blend { - if bs.color.uses_constant() | bs.alpha.uses_constant() { - flags |= pipeline::PipelineFlags::BLEND_CONSTANT; - } - } - } - if let Some(ds) = depth_stencil_state.as_ref() { - if ds.stencil.is_enabled() && ds.stencil.needs_ref_value() { - flags |= pipeline::PipelineFlags::STENCIL_REFERENCE; - } - if !ds.is_depth_read_only() { - flags |= pipeline::PipelineFlags::WRITES_DEPTH; - } - if !ds.is_stencil_read_only(desc.primitive.cull_mode) { - flags |= pipeline::PipelineFlags::WRITES_STENCIL; - } - } - - let pipeline = pipeline::RenderPipeline { - raw, - layout_id: Stored { - value: id::Valid(pipeline_layout_id), - ref_count: layout.life_guard.add_ref(), - }, - device_id: Stored { - value: id::Valid(self_id), - ref_count: self.life_guard.add_ref(), - }, - pass_context, - flags, - strip_index_format: desc.primitive.strip_index_format, - vertex_steps, - late_sized_buffer_groups, - life_guard: LifeGuard::new(desc.label.borrow_or_default()), - }; - Ok(pipeline) - } - - fn describe_format_features( - &self, - adapter: &Adapter, - format: TextureFormat, - ) -> Result { - self.require_features(format.required_features())?; - - let using_device_features = self - .features - .contains(wgt::Features::TEXTURE_ADAPTER_SPECIFIC_FORMAT_FEATURES); - // If we're running downlevel, we need to manually ask the backend what - // we can use as we can't trust WebGPU. - let downlevel = !self.downlevel.is_webgpu_compliant(); - - if using_device_features || downlevel { - Ok(adapter.get_texture_format_features(format)) - } else { - Ok(format.guaranteed_format_features(self.features)) - } - } - - fn wait_for_submit( - &self, - submission_index: SubmissionIndex, - token: &mut Token, - ) -> Result<(), WaitIdleError> { - let last_done_index = unsafe { - self.raw - .get_fence_value(&self.fence) - .map_err(DeviceError::from)? - }; - if last_done_index < submission_index { - log::info!("Waiting for submission {:?}", submission_index); - unsafe { - self.raw - .wait(&self.fence, submission_index, !0) - .map_err(DeviceError::from)? - }; - let closures = self - .lock_life(token) - .triage_submissions(submission_index, &self.command_allocator); - assert!( - closures.is_empty(), - "wait_for_submit is not expected to work with closures" - ); - } - Ok(()) - } - - fn create_query_set( - &self, - self_id: id::DeviceId, - desc: &resource::QuerySetDescriptor, - ) -> Result, resource::CreateQuerySetError> { - use resource::CreateQuerySetError as Error; - - match desc.ty { - wgt::QueryType::Occlusion => {} - wgt::QueryType::Timestamp => { - self.require_features(wgt::Features::TIMESTAMP_QUERY)?; - } - wgt::QueryType::PipelineStatistics(..) => { - self.require_features(wgt::Features::PIPELINE_STATISTICS_QUERY)?; - } - } - - if desc.count == 0 { - return Err(Error::ZeroCount); - } - - if desc.count > wgt::QUERY_SET_MAX_QUERIES { - return Err(Error::TooManyQueries { - count: desc.count, - maximum: wgt::QUERY_SET_MAX_QUERIES, - }); - } - - let hal_desc = desc.map_label(super::LabelHelpers::borrow_option); - Ok(resource::QuerySet { - raw: unsafe { self.raw.create_query_set(&hal_desc).unwrap() }, - device_id: Stored { - value: id::Valid(self_id), - ref_count: self.life_guard.add_ref(), - }, - life_guard: LifeGuard::new(""), - desc: desc.map_label(|_| ()), - }) - } -} - -impl Device { - pub(crate) fn destroy_buffer(&self, buffer: resource::Buffer) { - if let Some(raw) = buffer.raw { - unsafe { - self.raw.destroy_buffer(raw); - } - } - } - - pub(crate) fn destroy_command_buffer(&self, cmd_buf: command::CommandBuffer) { - let mut baked = cmd_buf.into_baked(); - unsafe { - baked.encoder.reset_all(baked.list.into_iter()); - } - unsafe { - self.raw.destroy_command_encoder(baked.encoder); - } - } - - /// Wait for idle and remove resources that we can, before we die. - pub(crate) fn prepare_to_die(&mut self) { - self.pending_writes.deactivate(); - let mut life_tracker = self.life_tracker.lock(); - let current_index = self.active_submission_index; - if let Err(error) = unsafe { self.raw.wait(&self.fence, current_index, CLEANUP_WAIT_MS) } { - log::error!("failed to wait for the device: {:?}", error); - } - let _ = life_tracker.triage_submissions(current_index, &self.command_allocator); - life_tracker.cleanup(&self.raw); - #[cfg(feature = "trace")] - { - self.trace = None; - } - } - - pub(crate) fn dispose(self) { - self.pending_writes.dispose(&self.raw); - self.command_allocator.into_inner().dispose(&self.raw); - unsafe { - self.raw.destroy_buffer(self.zero_buffer); - self.raw.destroy_fence(self.fence); - self.raw.exit(self.queue); - } - } -} - -impl crate::hub::Resource for Device { - const TYPE: &'static str = "Device"; - - fn life_guard(&self) -> &LifeGuard { - &self.life_guard - } -} - #[derive(Clone, Debug, Error)] #[error("Device is invalid")] pub struct InvalidDevice; @@ -3441,2651 +343,3 @@ impl ImplicitPipelineIds<'_, G> { } } } - -impl Global { - pub fn adapter_is_surface_supported( - &self, - adapter_id: id::AdapterId, - surface_id: id::SurfaceId, - ) -> Result { - let hub = A::hub(self); - let mut token = Token::root(); - - let (surface_guard, mut token) = self.surfaces.read(&mut token); - let (adapter_guard, mut _token) = hub.adapters.read(&mut token); - let adapter = adapter_guard - .get(adapter_id) - .map_err(|_| instance::IsSurfaceSupportedError::InvalidAdapter)?; - let surface = surface_guard - .get(surface_id) - .map_err(|_| instance::IsSurfaceSupportedError::InvalidSurface)?; - Ok(adapter.is_surface_supported(surface)) - } - - pub fn surface_get_capabilities( - &self, - surface_id: id::SurfaceId, - adapter_id: id::AdapterId, - ) -> Result { - profiling::scope!("Surface::get_capabilities"); - self.fetch_adapter_and_surface::(surface_id, adapter_id, |adapter, surface| { - let mut hal_caps = surface.get_capabilities(adapter)?; - - hal_caps.formats.sort_by_key(|f| !f.is_srgb()); - - Ok(wgt::SurfaceCapabilities { - formats: hal_caps.formats, - present_modes: hal_caps.present_modes, - alpha_modes: hal_caps.composite_alpha_modes, - }) - }) - } - - fn fetch_adapter_and_surface< - A: HalApi, - F: FnOnce(&Adapter, &Surface) -> Result, - B, - >( - &self, - surface_id: id::SurfaceId, - adapter_id: id::AdapterId, - get_supported_callback: F, - ) -> Result { - let hub = A::hub(self); - let mut token = Token::root(); - - let (surface_guard, mut token) = self.surfaces.read(&mut token); - let (adapter_guard, mut _token) = hub.adapters.read(&mut token); - let adapter = adapter_guard - .get(adapter_id) - .map_err(|_| instance::GetSurfaceSupportError::InvalidAdapter)?; - let surface = surface_guard - .get(surface_id) - .map_err(|_| instance::GetSurfaceSupportError::InvalidSurface)?; - - get_supported_callback(adapter, surface) - } - - pub fn device_features( - &self, - device_id: id::DeviceId, - ) -> Result { - let hub = A::hub(self); - let mut token = Token::root(); - let (device_guard, _) = hub.devices.read(&mut token); - let device = device_guard.get(device_id).map_err(|_| InvalidDevice)?; - - Ok(device.features) - } - - pub fn device_limits( - &self, - device_id: id::DeviceId, - ) -> Result { - let hub = A::hub(self); - let mut token = Token::root(); - let (device_guard, _) = hub.devices.read(&mut token); - let device = device_guard.get(device_id).map_err(|_| InvalidDevice)?; - - Ok(device.limits.clone()) - } - - pub fn device_downlevel_properties( - &self, - device_id: id::DeviceId, - ) -> Result { - let hub = A::hub(self); - let mut token = Token::root(); - let (device_guard, _) = hub.devices.read(&mut token); - let device = device_guard.get(device_id).map_err(|_| InvalidDevice)?; - - Ok(device.downlevel.clone()) - } - - pub fn device_create_buffer( - &self, - device_id: id::DeviceId, - desc: &resource::BufferDescriptor, - id_in: Input, - ) -> (id::BufferId, Option) { - profiling::scope!("Device::create_buffer"); - - let hub = A::hub(self); - let mut token = Token::root(); - let fid = hub.buffers.prepare(id_in); - - let (device_guard, mut token) = hub.devices.read(&mut token); - let error = loop { - let device = match device_guard.get(device_id) { - Ok(device) => device, - Err(_) => break DeviceError::Invalid.into(), - }; - #[cfg(feature = "trace")] - if let Some(ref trace) = device.trace { - let mut desc = desc.clone(); - let mapped_at_creation = mem::replace(&mut desc.mapped_at_creation, false); - if mapped_at_creation && !desc.usage.contains(wgt::BufferUsages::MAP_WRITE) { - desc.usage |= wgt::BufferUsages::COPY_DST; - } - trace - .lock() - .add(trace::Action::CreateBuffer(fid.id(), desc)); - } - - let mut buffer = match device.create_buffer(device_id, desc, false) { - Ok(buffer) => buffer, - Err(e) => break e, - }; - let ref_count = buffer.life_guard.add_ref(); - - let buffer_use = if !desc.mapped_at_creation { - hal::BufferUses::empty() - } else if desc.usage.contains(wgt::BufferUsages::MAP_WRITE) { - // buffer is mappable, so we are just doing that at start - let map_size = buffer.size; - let ptr = if map_size == 0 { - std::ptr::NonNull::dangling() - } else { - match map_buffer(&device.raw, &mut buffer, 0, map_size, HostMap::Write) { - Ok(ptr) => ptr, - Err(e) => { - let raw = buffer.raw.unwrap(); - device.lock_life(&mut token).schedule_resource_destruction( - queue::TempResource::Buffer(raw), - !0, - ); - break e.into(); - } - } - }; - buffer.map_state = resource::BufferMapState::Active { - ptr, - range: 0..map_size, - host: HostMap::Write, - }; - hal::BufferUses::MAP_WRITE - } else { - // buffer needs staging area for initialization only - let stage_desc = wgt::BufferDescriptor { - label: Some(Cow::Borrowed( - "(wgpu internal) initializing unmappable buffer", - )), - size: desc.size, - usage: wgt::BufferUsages::MAP_WRITE | wgt::BufferUsages::COPY_SRC, - mapped_at_creation: false, - }; - let mut stage = match device.create_buffer(device_id, &stage_desc, true) { - Ok(stage) => stage, - Err(e) => { - let raw = buffer.raw.unwrap(); - device - .lock_life(&mut token) - .schedule_resource_destruction(queue::TempResource::Buffer(raw), !0); - break e; - } - }; - let stage_buffer = stage.raw.unwrap(); - let mapping = match unsafe { device.raw.map_buffer(&stage_buffer, 0..stage.size) } { - Ok(mapping) => mapping, - Err(e) => { - let raw = buffer.raw.unwrap(); - let mut life_lock = device.lock_life(&mut token); - life_lock - .schedule_resource_destruction(queue::TempResource::Buffer(raw), !0); - life_lock.schedule_resource_destruction( - queue::TempResource::Buffer(stage_buffer), - !0, - ); - break DeviceError::from(e).into(); - } - }; - - assert_eq!(buffer.size % wgt::COPY_BUFFER_ALIGNMENT, 0); - // Zero initialize memory and then mark both staging and buffer as initialized - // (it's guaranteed that this is the case by the time the buffer is usable) - unsafe { ptr::write_bytes(mapping.ptr.as_ptr(), 0, buffer.size as usize) }; - buffer.initialization_status.drain(0..buffer.size); - stage.initialization_status.drain(0..buffer.size); - - buffer.map_state = resource::BufferMapState::Init { - ptr: mapping.ptr, - needs_flush: !mapping.is_coherent, - stage_buffer, - }; - hal::BufferUses::COPY_DST - }; - - let id = fid.assign(buffer, &mut token); - log::info!("Created buffer {:?} with {:?}", id, desc); - - device - .trackers - .lock() - .buffers - .insert_single(id, ref_count, buffer_use); - - return (id.0, None); - }; - - let id = fid.assign_error(desc.label.borrow_or_default(), &mut token); - (id, Some(error)) - } - - /// Assign `id_in` an error with the given `label`. - /// - /// Ensure that future attempts to use `id_in` as a buffer ID will propagate - /// the error, following the WebGPU ["contagious invalidity"] style. - /// - /// Firefox uses this function to comply strictly with the WebGPU spec, - /// which requires [`GPUBufferDescriptor`] validation to be generated on the - /// Device timeline and leave the newly created [`GPUBuffer`] invalid. - /// - /// Ideally, we would simply let [`device_create_buffer`] take care of all - /// of this, but some errors must be detected before we can even construct a - /// [`wgpu_types::BufferDescriptor`] to give it. For example, the WebGPU API - /// allows a `GPUBufferDescriptor`'s [`usage`] property to be any WebIDL - /// `unsigned long` value, but we can't construct a - /// [`wgpu_types::BufferUsages`] value from values with unassigned bits - /// set. This means we must validate `usage` before we can call - /// `device_create_buffer`. - /// - /// When that validation fails, we must arrange for the buffer id to be - /// considered invalid. This method provides the means to do so. - /// - /// ["contagious invalidity"]: https://www.w3.org/TR/webgpu/#invalidity - /// [`GPUBufferDescriptor`]: https://www.w3.org/TR/webgpu/#dictdef-gpubufferdescriptor - /// [`GPUBuffer`]: https://www.w3.org/TR/webgpu/#gpubuffer - /// [`wgpu_types::BufferDescriptor`]: wgt::BufferDescriptor - /// [`device_create_buffer`]: Global::device_create_buffer - /// [`usage`]: https://www.w3.org/TR/webgpu/#dom-gputexturedescriptor-usage - /// [`wgpu_types::BufferUsages`]: wgt::BufferUsages - pub fn create_buffer_error(&self, id_in: Input, label: Label) { - let hub = A::hub(self); - let mut token = Token::root(); - let fid = hub.buffers.prepare(id_in); - - fid.assign_error(label.borrow_or_default(), &mut token); - } - - pub fn create_render_bundle_error( - &self, - id_in: Input, - label: Label, - ) { - let hub = A::hub(self); - let mut token = Token::root(); - let fid = hub.render_bundles.prepare(id_in); - - let (_, mut token) = hub.devices.read(&mut token); - fid.assign_error(label.borrow_or_default(), &mut token); - } - - /// Assign `id_in` an error with the given `label`. - /// - /// See `create_buffer_error` for more context and explaination. - pub fn create_texture_error(&self, id_in: Input, label: Label) { - let hub = A::hub(self); - let mut token = Token::root(); - let fid = hub.textures.prepare(id_in); - - fid.assign_error(label.borrow_or_default(), &mut token); - } - - #[cfg(feature = "replay")] - pub fn device_wait_for_buffer( - &self, - device_id: id::DeviceId, - buffer_id: id::BufferId, - ) -> Result<(), WaitIdleError> { - let hub = A::hub(self); - let mut token = Token::root(); - let (device_guard, mut token) = hub.devices.read(&mut token); - let last_submission = { - let (buffer_guard, _) = hub.buffers.write(&mut token); - match buffer_guard.get(buffer_id) { - Ok(buffer) => buffer.life_guard.life_count(), - Err(_) => return Ok(()), - } - }; - - device_guard - .get(device_id) - .map_err(|_| DeviceError::Invalid)? - .wait_for_submit(last_submission, &mut token) - } - - #[doc(hidden)] - pub fn device_set_buffer_sub_data( - &self, - device_id: id::DeviceId, - buffer_id: id::BufferId, - offset: BufferAddress, - data: &[u8], - ) -> BufferAccessResult { - profiling::scope!("Device::set_buffer_sub_data"); - - let hub = A::hub(self); - let mut token = Token::root(); - - let (device_guard, mut token) = hub.devices.read(&mut token); - let (mut buffer_guard, _) = hub.buffers.write(&mut token); - let device = device_guard - .get(device_id) - .map_err(|_| DeviceError::Invalid)?; - let buffer = buffer_guard - .get_mut(buffer_id) - .map_err(|_| BufferAccessError::Invalid)?; - check_buffer_usage(buffer.usage, wgt::BufferUsages::MAP_WRITE)?; - //assert!(buffer isn't used by the GPU); - - #[cfg(feature = "trace")] - if let Some(ref trace) = device.trace { - let mut trace = trace.lock(); - let data_path = trace.make_binary("bin", data); - trace.add(trace::Action::WriteBuffer { - id: buffer_id, - data: data_path, - range: offset..offset + data.len() as BufferAddress, - queued: false, - }); - } - - let raw_buf = buffer.raw.as_ref().unwrap(); - unsafe { - let mapping = device - .raw - .map_buffer(raw_buf, offset..offset + data.len() as u64) - .map_err(DeviceError::from)?; - ptr::copy_nonoverlapping(data.as_ptr(), mapping.ptr.as_ptr(), data.len()); - if !mapping.is_coherent { - device - .raw - .flush_mapped_ranges(raw_buf, iter::once(offset..offset + data.len() as u64)); - } - device - .raw - .unmap_buffer(raw_buf) - .map_err(DeviceError::from)?; - } - - Ok(()) - } - - #[doc(hidden)] - pub fn device_get_buffer_sub_data( - &self, - device_id: id::DeviceId, - buffer_id: id::BufferId, - offset: BufferAddress, - data: &mut [u8], - ) -> BufferAccessResult { - profiling::scope!("Device::get_buffer_sub_data"); - - let hub = A::hub(self); - let mut token = Token::root(); - - let (device_guard, mut token) = hub.devices.read(&mut token); - let (mut buffer_guard, _) = hub.buffers.write(&mut token); - let device = device_guard - .get(device_id) - .map_err(|_| DeviceError::Invalid)?; - let buffer = buffer_guard - .get_mut(buffer_id) - .map_err(|_| BufferAccessError::Invalid)?; - check_buffer_usage(buffer.usage, wgt::BufferUsages::MAP_READ)?; - //assert!(buffer isn't used by the GPU); - - let raw_buf = buffer.raw.as_ref().unwrap(); - unsafe { - let mapping = device - .raw - .map_buffer(raw_buf, offset..offset + data.len() as u64) - .map_err(DeviceError::from)?; - if !mapping.is_coherent { - device.raw.invalidate_mapped_ranges( - raw_buf, - iter::once(offset..offset + data.len() as u64), - ); - } - ptr::copy_nonoverlapping(mapping.ptr.as_ptr(), data.as_mut_ptr(), data.len()); - device - .raw - .unmap_buffer(raw_buf) - .map_err(DeviceError::from)?; - } - - Ok(()) - } - - pub fn buffer_label(&self, id: id::BufferId) -> String { - A::hub(self).buffers.label_for_resource(id) - } - - pub fn buffer_destroy( - &self, - buffer_id: id::BufferId, - ) -> Result<(), resource::DestroyError> { - profiling::scope!("Buffer::destroy"); - - let map_closure; - // Restrict the locks to this scope. - { - let hub = A::hub(self); - let mut token = Token::root(); - - //TODO: lock pending writes separately, keep the device read-only - let (mut device_guard, mut token) = hub.devices.write(&mut token); - - log::info!("Buffer {:?} is destroyed", buffer_id); - let (mut buffer_guard, _) = hub.buffers.write(&mut token); - let buffer = buffer_guard - .get_mut(buffer_id) - .map_err(|_| resource::DestroyError::Invalid)?; - - let device = &mut device_guard[buffer.device_id.value]; - - map_closure = match &buffer.map_state { - &BufferMapState::Waiting(..) // To get the proper callback behavior. - | &BufferMapState::Init { .. } - | &BufferMapState::Active { .. } - => { - self.buffer_unmap_inner(buffer_id, buffer, device) - .unwrap_or(None) - } - _ => None, - }; - - #[cfg(feature = "trace")] - if let Some(ref trace) = device.trace { - trace.lock().add(trace::Action::FreeBuffer(buffer_id)); - } - - let raw = buffer - .raw - .take() - .ok_or(resource::DestroyError::AlreadyDestroyed)?; - let temp = queue::TempResource::Buffer(raw); - - if device.pending_writes.dst_buffers.contains(&buffer_id) { - device.pending_writes.temp_resources.push(temp); - } else { - let last_submit_index = buffer.life_guard.life_count(); - drop(buffer_guard); - device - .lock_life(&mut token) - .schedule_resource_destruction(temp, last_submit_index); - } - } - - // Note: outside the scope where locks are held when calling the callback - if let Some((operation, status)) = map_closure { - operation.callback.call(status); - } - - Ok(()) - } - - pub fn buffer_drop(&self, buffer_id: id::BufferId, wait: bool) { - profiling::scope!("Buffer::drop"); - log::debug!("buffer {:?} is dropped", buffer_id); - - let hub = A::hub(self); - let mut token = Token::root(); - - let (ref_count, last_submit_index, device_id) = { - let (mut buffer_guard, _) = hub.buffers.write(&mut token); - match buffer_guard.get_mut(buffer_id) { - Ok(buffer) => { - let ref_count = buffer.life_guard.ref_count.take().unwrap(); - let last_submit_index = buffer.life_guard.life_count(); - (ref_count, last_submit_index, buffer.device_id.value) - } - Err(InvalidId) => { - hub.buffers.unregister_locked(buffer_id, &mut *buffer_guard); - return; - } - } - }; - - let (device_guard, mut token) = hub.devices.read(&mut token); - let device = &device_guard[device_id]; - { - let mut life_lock = device.lock_life(&mut token); - if device.pending_writes.dst_buffers.contains(&buffer_id) { - life_lock.future_suspected_buffers.push(Stored { - value: id::Valid(buffer_id), - ref_count, - }); - } else { - drop(ref_count); - life_lock - .suspected_resources - .buffers - .push(id::Valid(buffer_id)); - } - } - - if wait { - match device.wait_for_submit(last_submit_index, &mut token) { - Ok(()) => (), - Err(e) => log::error!("Failed to wait for buffer {:?}: {:?}", buffer_id, e), - } - } - } - - pub fn device_create_texture( - &self, - device_id: id::DeviceId, - desc: &resource::TextureDescriptor, - id_in: Input, - ) -> (id::TextureId, Option) { - profiling::scope!("Device::create_texture"); - - let hub = A::hub(self); - let mut token = Token::root(); - let fid = hub.textures.prepare(id_in); - - let (adapter_guard, mut token) = hub.adapters.read(&mut token); - let (device_guard, mut token) = hub.devices.read(&mut token); - let error = loop { - let device = match device_guard.get(device_id) { - Ok(device) => device, - Err(_) => break DeviceError::Invalid.into(), - }; - #[cfg(feature = "trace")] - if let Some(ref trace) = device.trace { - trace - .lock() - .add(trace::Action::CreateTexture(fid.id(), desc.clone())); - } - - let adapter = &adapter_guard[device.adapter_id.value]; - let texture = match device.create_texture(device_id, adapter, desc) { - Ok(texture) => texture, - Err(error) => break error, - }; - let ref_count = texture.life_guard.add_ref(); - - let id = fid.assign(texture, &mut token); - log::info!("Created texture {:?} with {:?}", id, desc); - - device.trackers.lock().textures.insert_single( - id.0, - ref_count, - hal::TextureUses::UNINITIALIZED, - ); - - return (id.0, None); - }; - - let id = fid.assign_error(desc.label.borrow_or_default(), &mut token); - (id, Some(error)) - } - - /// # Safety - /// - /// - `hal_texture` must be created from `device_id` corresponding raw handle. - /// - `hal_texture` must be created respecting `desc` - /// - `hal_texture` must be initialized - pub unsafe fn create_texture_from_hal( - &self, - hal_texture: A::Texture, - device_id: id::DeviceId, - desc: &resource::TextureDescriptor, - id_in: Input, - ) -> (id::TextureId, Option) { - profiling::scope!("Device::create_texture"); - - let hub = A::hub(self); - let mut token = Token::root(); - let fid = hub.textures.prepare(id_in); - - let (adapter_guard, mut token) = hub.adapters.read(&mut token); - let (device_guard, mut token) = hub.devices.read(&mut token); - let error = loop { - let device = match device_guard.get(device_id) { - Ok(device) => device, - Err(_) => break DeviceError::Invalid.into(), - }; - - // NB: Any change done through the raw texture handle will not be - // recorded in the replay - #[cfg(feature = "trace")] - if let Some(ref trace) = device.trace { - trace - .lock() - .add(trace::Action::CreateTexture(fid.id(), desc.clone())); - } - - let adapter = &adapter_guard[device.adapter_id.value]; - - let format_features = match device - .describe_format_features(adapter, desc.format) - .map_err(|error| resource::CreateTextureError::MissingFeatures(desc.format, error)) - { - Ok(features) => features, - Err(error) => break error, - }; - - let mut texture = device.create_texture_from_hal( - hal_texture, - conv::map_texture_usage(desc.usage, desc.format.into()), - device_id, - desc, - format_features, - resource::TextureClearMode::None, - ); - if desc.usage.contains(wgt::TextureUsages::COPY_DST) { - texture.hal_usage |= hal::TextureUses::COPY_DST; - } - - texture.initialization_status = TextureInitTracker::new(desc.mip_level_count, 0); - - let ref_count = texture.life_guard.add_ref(); - - let id = fid.assign(texture, &mut token); - log::info!("Created texture {:?} with {:?}", id, desc); - - device.trackers.lock().textures.insert_single( - id.0, - ref_count, - hal::TextureUses::UNINITIALIZED, - ); - - return (id.0, None); - }; - - let id = fid.assign_error(desc.label.borrow_or_default(), &mut token); - (id, Some(error)) - } - - pub fn texture_label(&self, id: id::TextureId) -> String { - A::hub(self).textures.label_for_resource(id) - } - - pub fn texture_destroy( - &self, - texture_id: id::TextureId, - ) -> Result<(), resource::DestroyError> { - profiling::scope!("Texture::destroy"); - - let hub = A::hub(self); - let mut token = Token::root(); - - //TODO: lock pending writes separately, keep the device read-only - let (mut device_guard, mut token) = hub.devices.write(&mut token); - - log::info!("Buffer {:?} is destroyed", texture_id); - let (mut texture_guard, _) = hub.textures.write(&mut token); - let texture = texture_guard - .get_mut(texture_id) - .map_err(|_| resource::DestroyError::Invalid)?; - - let device = &mut device_guard[texture.device_id.value]; - - #[cfg(feature = "trace")] - if let Some(ref trace) = device.trace { - trace.lock().add(trace::Action::FreeTexture(texture_id)); - } - - let last_submit_index = texture.life_guard.life_count(); - - let clear_views = - match std::mem::replace(&mut texture.clear_mode, resource::TextureClearMode::None) { - resource::TextureClearMode::BufferCopy => SmallVec::new(), - resource::TextureClearMode::RenderPass { clear_views, .. } => clear_views, - resource::TextureClearMode::None => SmallVec::new(), - }; - - match texture.inner { - resource::TextureInner::Native { ref mut raw } => { - let raw = raw.take().ok_or(resource::DestroyError::AlreadyDestroyed)?; - let temp = queue::TempResource::Texture(raw, clear_views); - - if device.pending_writes.dst_textures.contains(&texture_id) { - device.pending_writes.temp_resources.push(temp); - } else { - drop(texture_guard); - device - .lock_life(&mut token) - .schedule_resource_destruction(temp, last_submit_index); - } - } - resource::TextureInner::Surface { .. } => { - for clear_view in clear_views { - unsafe { - device.raw.destroy_texture_view(clear_view); - } - } - // TODO? - } - } - - Ok(()) - } - - pub fn texture_drop(&self, texture_id: id::TextureId, wait: bool) { - profiling::scope!("Texture::drop"); - log::debug!("texture {:?} is dropped", texture_id); - - let hub = A::hub(self); - let mut token = Token::root(); - - let (ref_count, last_submit_index, device_id) = { - let (mut texture_guard, _) = hub.textures.write(&mut token); - match texture_guard.get_mut(texture_id) { - Ok(texture) => { - let ref_count = texture.life_guard.ref_count.take().unwrap(); - let last_submit_index = texture.life_guard.life_count(); - (ref_count, last_submit_index, texture.device_id.value) - } - Err(InvalidId) => { - hub.textures - .unregister_locked(texture_id, &mut *texture_guard); - return; - } - } - }; - - let (device_guard, mut token) = hub.devices.read(&mut token); - let device = &device_guard[device_id]; - { - let mut life_lock = device.lock_life(&mut token); - if device.pending_writes.dst_textures.contains(&texture_id) { - life_lock.future_suspected_textures.push(Stored { - value: id::Valid(texture_id), - ref_count, - }); - } else { - drop(ref_count); - life_lock - .suspected_resources - .textures - .push(id::Valid(texture_id)); - } - } - - if wait { - match device.wait_for_submit(last_submit_index, &mut token) { - Ok(()) => (), - Err(e) => log::error!("Failed to wait for texture {:?}: {:?}", texture_id, e), - } - } - } - - pub fn texture_create_view( - &self, - texture_id: id::TextureId, - desc: &resource::TextureViewDescriptor, - id_in: Input, - ) -> (id::TextureViewId, Option) { - profiling::scope!("Texture::create_view"); - - let hub = A::hub(self); - let mut token = Token::root(); - let fid = hub.texture_views.prepare(id_in); - - let (device_guard, mut token) = hub.devices.read(&mut token); - let (texture_guard, mut token) = hub.textures.read(&mut token); - let error = loop { - let texture = match texture_guard.get(texture_id) { - Ok(texture) => texture, - Err(_) => break resource::CreateTextureViewError::InvalidTexture, - }; - let device = &device_guard[texture.device_id.value]; - #[cfg(feature = "trace")] - if let Some(ref trace) = device.trace { - trace.lock().add(trace::Action::CreateTextureView { - id: fid.id(), - parent_id: texture_id, - desc: desc.clone(), - }); - } - - let view = match device.create_texture_view(texture, texture_id, desc) { - Ok(view) => view, - Err(e) => break e, - }; - let ref_count = view.life_guard.add_ref(); - let id = fid.assign(view, &mut token); - - device.trackers.lock().views.insert_single(id, ref_count); - return (id.0, None); - }; - - let id = fid.assign_error(desc.label.borrow_or_default(), &mut token); - (id, Some(error)) - } - - pub fn texture_view_label(&self, id: id::TextureViewId) -> String { - A::hub(self).texture_views.label_for_resource(id) - } - - pub fn texture_view_drop( - &self, - texture_view_id: id::TextureViewId, - wait: bool, - ) -> Result<(), resource::TextureViewDestroyError> { - profiling::scope!("TextureView::drop"); - log::debug!("texture view {:?} is dropped", texture_view_id); - - let hub = A::hub(self); - let mut token = Token::root(); - - let (last_submit_index, device_id) = { - let (mut texture_view_guard, _) = hub.texture_views.write(&mut token); - - match texture_view_guard.get_mut(texture_view_id) { - Ok(view) => { - let _ref_count = view.life_guard.ref_count.take(); - let last_submit_index = view.life_guard.life_count(); - (last_submit_index, view.device_id.value) - } - Err(InvalidId) => { - hub.texture_views - .unregister_locked(texture_view_id, &mut *texture_view_guard); - return Ok(()); - } - } - }; - - let (device_guard, mut token) = hub.devices.read(&mut token); - let device = &device_guard[device_id]; - device - .lock_life(&mut token) - .suspected_resources - .texture_views - .push(id::Valid(texture_view_id)); - - if wait { - match device.wait_for_submit(last_submit_index, &mut token) { - Ok(()) => (), - Err(e) => log::error!( - "Failed to wait for texture view {:?}: {:?}", - texture_view_id, - e - ), - } - } - Ok(()) - } - - pub fn device_create_sampler( - &self, - device_id: id::DeviceId, - desc: &resource::SamplerDescriptor, - id_in: Input, - ) -> (id::SamplerId, Option) { - profiling::scope!("Device::create_sampler"); - - let hub = A::hub(self); - let mut token = Token::root(); - let fid = hub.samplers.prepare(id_in); - - let (device_guard, mut token) = hub.devices.read(&mut token); - let error = loop { - let device = match device_guard.get(device_id) { - Ok(device) => device, - Err(_) => break DeviceError::Invalid.into(), - }; - #[cfg(feature = "trace")] - if let Some(ref trace) = device.trace { - trace - .lock() - .add(trace::Action::CreateSampler(fid.id(), desc.clone())); - } - - let sampler = match device.create_sampler(device_id, desc) { - Ok(sampler) => sampler, - Err(e) => break e, - }; - let ref_count = sampler.life_guard.add_ref(); - let id = fid.assign(sampler, &mut token); - - device.trackers.lock().samplers.insert_single(id, ref_count); - - return (id.0, None); - }; - - let id = fid.assign_error(desc.label.borrow_or_default(), &mut token); - (id, Some(error)) - } - - pub fn sampler_label(&self, id: id::SamplerId) -> String { - A::hub(self).samplers.label_for_resource(id) - } - - pub fn sampler_drop(&self, sampler_id: id::SamplerId) { - profiling::scope!("Sampler::drop"); - log::debug!("sampler {:?} is dropped", sampler_id); - - let hub = A::hub(self); - let mut token = Token::root(); - - let device_id = { - let (mut sampler_guard, _) = hub.samplers.write(&mut token); - match sampler_guard.get_mut(sampler_id) { - Ok(sampler) => { - sampler.life_guard.ref_count.take(); - sampler.device_id.value - } - Err(InvalidId) => { - hub.samplers - .unregister_locked(sampler_id, &mut *sampler_guard); - return; - } - } - }; - - let (device_guard, mut token) = hub.devices.read(&mut token); - device_guard[device_id] - .lock_life(&mut token) - .suspected_resources - .samplers - .push(id::Valid(sampler_id)); - } - - pub fn device_create_bind_group_layout( - &self, - device_id: id::DeviceId, - desc: &binding_model::BindGroupLayoutDescriptor, - id_in: Input, - ) -> ( - id::BindGroupLayoutId, - Option, - ) { - profiling::scope!("Device::create_bind_group_layout"); - - let mut token = Token::root(); - let hub = A::hub(self); - let fid = hub.bind_group_layouts.prepare(id_in); - - let error = 'outer: loop { - let (device_guard, mut token) = hub.devices.read(&mut token); - let device = match device_guard.get(device_id) { - Ok(device) => device, - Err(_) => break DeviceError::Invalid.into(), - }; - #[cfg(feature = "trace")] - if let Some(ref trace) = device.trace { - trace - .lock() - .add(trace::Action::CreateBindGroupLayout(fid.id(), desc.clone())); - } - - let mut entry_map = FastHashMap::default(); - for entry in desc.entries.iter() { - if entry.binding > device.limits.max_bindings_per_bind_group { - break 'outer binding_model::CreateBindGroupLayoutError::InvalidBindingIndex { - binding: entry.binding, - maximum: device.limits.max_bindings_per_bind_group, - }; - } - if entry_map.insert(entry.binding, *entry).is_some() { - break 'outer binding_model::CreateBindGroupLayoutError::ConflictBinding( - entry.binding, - ); - } - } - - // If there is an equivalent BGL, just bump the refcount and return it. - // This is only applicable for identity filters that are generating new IDs, - // so their inputs are `PhantomData` of size 0. - if mem::size_of::>() == 0 { - let (bgl_guard, _) = hub.bind_group_layouts.read(&mut token); - if let Some(id) = - Device::deduplicate_bind_group_layout(device_id, &entry_map, &*bgl_guard) - { - return (id, None); - } - } - - let layout = match device.create_bind_group_layout( - device_id, - desc.label.borrow_option(), - entry_map, - ) { - Ok(layout) => layout, - Err(e) => break e, - }; - - let id = fid.assign(layout, &mut token); - return (id.0, None); - }; - - let id = fid.assign_error(desc.label.borrow_or_default(), &mut token); - (id, Some(error)) - } - - pub fn bind_group_layout_label(&self, id: id::BindGroupLayoutId) -> String { - A::hub(self).bind_group_layouts.label_for_resource(id) - } - - pub fn bind_group_layout_drop(&self, bind_group_layout_id: id::BindGroupLayoutId) { - profiling::scope!("BindGroupLayout::drop"); - log::debug!("bind group layout {:?} is dropped", bind_group_layout_id); - - let hub = A::hub(self); - let mut token = Token::root(); - let device_id = { - let (mut bind_group_layout_guard, _) = hub.bind_group_layouts.write(&mut token); - match bind_group_layout_guard.get_mut(bind_group_layout_id) { - Ok(layout) => layout.device_id.value, - Err(InvalidId) => { - hub.bind_group_layouts - .unregister_locked(bind_group_layout_id, &mut *bind_group_layout_guard); - return; - } - } - }; - - let (device_guard, mut token) = hub.devices.read(&mut token); - device_guard[device_id] - .lock_life(&mut token) - .suspected_resources - .bind_group_layouts - .push(id::Valid(bind_group_layout_id)); - } - - pub fn device_create_pipeline_layout( - &self, - device_id: id::DeviceId, - desc: &binding_model::PipelineLayoutDescriptor, - id_in: Input, - ) -> ( - id::PipelineLayoutId, - Option, - ) { - profiling::scope!("Device::create_pipeline_layout"); - - let hub = A::hub(self); - let mut token = Token::root(); - let fid = hub.pipeline_layouts.prepare(id_in); - - let (device_guard, mut token) = hub.devices.read(&mut token); - let error = loop { - let device = match device_guard.get(device_id) { - Ok(device) => device, - Err(_) => break DeviceError::Invalid.into(), - }; - #[cfg(feature = "trace")] - if let Some(ref trace) = device.trace { - trace - .lock() - .add(trace::Action::CreatePipelineLayout(fid.id(), desc.clone())); - } - - let layout = { - let (bgl_guard, _) = hub.bind_group_layouts.read(&mut token); - match device.create_pipeline_layout(device_id, desc, &*bgl_guard) { - Ok(layout) => layout, - Err(e) => break e, - } - }; - - let id = fid.assign(layout, &mut token); - return (id.0, None); - }; - - let id = fid.assign_error(desc.label.borrow_or_default(), &mut token); - (id, Some(error)) - } - - pub fn pipeline_layout_label(&self, id: id::PipelineLayoutId) -> String { - A::hub(self).pipeline_layouts.label_for_resource(id) - } - - pub fn pipeline_layout_drop(&self, pipeline_layout_id: id::PipelineLayoutId) { - profiling::scope!("PipelineLayout::drop"); - log::debug!("pipeline layout {:?} is dropped", pipeline_layout_id); - - let hub = A::hub(self); - let mut token = Token::root(); - let (device_id, ref_count) = { - let (mut pipeline_layout_guard, _) = hub.pipeline_layouts.write(&mut token); - match pipeline_layout_guard.get_mut(pipeline_layout_id) { - Ok(layout) => ( - layout.device_id.value, - layout.life_guard.ref_count.take().unwrap(), - ), - Err(InvalidId) => { - hub.pipeline_layouts - .unregister_locked(pipeline_layout_id, &mut *pipeline_layout_guard); - return; - } - } - }; - - let (device_guard, mut token) = hub.devices.read(&mut token); - device_guard[device_id] - .lock_life(&mut token) - .suspected_resources - .pipeline_layouts - .push(Stored { - value: id::Valid(pipeline_layout_id), - ref_count, - }); - } - - pub fn device_create_bind_group( - &self, - device_id: id::DeviceId, - desc: &binding_model::BindGroupDescriptor, - id_in: Input, - ) -> (id::BindGroupId, Option) { - profiling::scope!("Device::create_bind_group"); - - let hub = A::hub(self); - let mut token = Token::root(); - let fid = hub.bind_groups.prepare(id_in); - - let (device_guard, mut token) = hub.devices.read(&mut token); - let (bind_group_layout_guard, mut token) = hub.bind_group_layouts.read(&mut token); - - let error = loop { - let device = match device_guard.get(device_id) { - Ok(device) => device, - Err(_) => break DeviceError::Invalid.into(), - }; - #[cfg(feature = "trace")] - if let Some(ref trace) = device.trace { - trace - .lock() - .add(trace::Action::CreateBindGroup(fid.id(), desc.clone())); - } - - let bind_group_layout = match bind_group_layout_guard.get(desc.layout) { - Ok(layout) => layout, - Err(_) => break binding_model::CreateBindGroupError::InvalidLayout, - }; - let bind_group = - match device.create_bind_group(device_id, bind_group_layout, desc, hub, &mut token) - { - Ok(bind_group) => bind_group, - Err(e) => break e, - }; - let ref_count = bind_group.life_guard.add_ref(); - - let id = fid.assign(bind_group, &mut token); - log::debug!("Bind group {:?}", id,); - - device - .trackers - .lock() - .bind_groups - .insert_single(id, ref_count); - return (id.0, None); - }; - - let id = fid.assign_error(desc.label.borrow_or_default(), &mut token); - (id, Some(error)) - } - - pub fn bind_group_label(&self, id: id::BindGroupId) -> String { - A::hub(self).bind_groups.label_for_resource(id) - } - - pub fn bind_group_drop(&self, bind_group_id: id::BindGroupId) { - profiling::scope!("BindGroup::drop"); - log::debug!("bind group {:?} is dropped", bind_group_id); - - let hub = A::hub(self); - let mut token = Token::root(); - - let device_id = { - let (mut bind_group_guard, _) = hub.bind_groups.write(&mut token); - match bind_group_guard.get_mut(bind_group_id) { - Ok(bind_group) => { - bind_group.life_guard.ref_count.take(); - bind_group.device_id.value - } - Err(InvalidId) => { - hub.bind_groups - .unregister_locked(bind_group_id, &mut *bind_group_guard); - return; - } - } - }; - - let (device_guard, mut token) = hub.devices.read(&mut token); - device_guard[device_id] - .lock_life(&mut token) - .suspected_resources - .bind_groups - .push(id::Valid(bind_group_id)); - } - - pub fn device_create_shader_module( - &self, - device_id: id::DeviceId, - desc: &pipeline::ShaderModuleDescriptor, - source: pipeline::ShaderModuleSource, - id_in: Input, - ) -> ( - id::ShaderModuleId, - Option, - ) { - profiling::scope!("Device::create_shader_module"); - - let hub = A::hub(self); - let mut token = Token::root(); - let fid = hub.shader_modules.prepare(id_in); - - let (device_guard, mut token) = hub.devices.read(&mut token); - let error = loop { - let device = match device_guard.get(device_id) { - Ok(device) => device, - Err(_) => break DeviceError::Invalid.into(), - }; - #[cfg(feature = "trace")] - if let Some(ref trace) = device.trace { - let mut trace = trace.lock(); - let data = match source { - #[cfg(feature = "wgsl")] - pipeline::ShaderModuleSource::Wgsl(ref code) => { - trace.make_binary("wgsl", code.as_bytes()) - } - pipeline::ShaderModuleSource::Naga(ref module) => { - let string = - ron::ser::to_string_pretty(module, ron::ser::PrettyConfig::default()) - .unwrap(); - trace.make_binary("ron", string.as_bytes()) - } - pipeline::ShaderModuleSource::Dummy(_) => { - panic!("found `ShaderModuleSource::Dummy`") - } - }; - trace.add(trace::Action::CreateShaderModule { - id: fid.id(), - desc: desc.clone(), - data, - }); - }; - - let shader = match device.create_shader_module(device_id, desc, source) { - Ok(shader) => shader, - Err(e) => break e, - }; - let id = fid.assign(shader, &mut token); - return (id.0, None); - }; - - let id = fid.assign_error(desc.label.borrow_or_default(), &mut token); - (id, Some(error)) - } - - // Unsafe-ness of internal calls has little to do with unsafe-ness of this. - #[allow(unused_unsafe)] - /// # Safety - /// - /// This function passes SPIR-V binary to the backend as-is and can potentially result in a - /// driver crash. - pub unsafe fn device_create_shader_module_spirv( - &self, - device_id: id::DeviceId, - desc: &pipeline::ShaderModuleDescriptor, - source: Cow<[u32]>, - id_in: Input, - ) -> ( - id::ShaderModuleId, - Option, - ) { - profiling::scope!("Device::create_shader_module"); - - let hub = A::hub(self); - let mut token = Token::root(); - let fid = hub.shader_modules.prepare(id_in); - - let (device_guard, mut token) = hub.devices.read(&mut token); - let error = loop { - let device = match device_guard.get(device_id) { - Ok(device) => device, - Err(_) => break DeviceError::Invalid.into(), - }; - #[cfg(feature = "trace")] - if let Some(ref trace) = device.trace { - let mut trace = trace.lock(); - let data = trace.make_binary("spv", unsafe { - std::slice::from_raw_parts(source.as_ptr() as *const u8, source.len() * 4) - }); - trace.add(trace::Action::CreateShaderModule { - id: fid.id(), - desc: desc.clone(), - data, - }); - }; - - let shader = - match unsafe { device.create_shader_module_spirv(device_id, desc, &source) } { - Ok(shader) => shader, - Err(e) => break e, - }; - let id = fid.assign(shader, &mut token); - return (id.0, None); - }; - - let id = fid.assign_error(desc.label.borrow_or_default(), &mut token); - (id, Some(error)) - } - - pub fn shader_module_label(&self, id: id::ShaderModuleId) -> String { - A::hub(self).shader_modules.label_for_resource(id) - } - - pub fn shader_module_drop(&self, shader_module_id: id::ShaderModuleId) { - profiling::scope!("ShaderModule::drop"); - log::debug!("shader module {:?} is dropped", shader_module_id); - - let hub = A::hub(self); - let mut token = Token::root(); - let (device_guard, mut token) = hub.devices.read(&mut token); - let (module, _) = hub.shader_modules.unregister(shader_module_id, &mut token); - if let Some(module) = module { - let device = &device_guard[module.device_id.value]; - #[cfg(feature = "trace")] - if let Some(ref trace) = device.trace { - trace - .lock() - .add(trace::Action::DestroyShaderModule(shader_module_id)); - } - unsafe { - device.raw.destroy_shader_module(module.raw); - } - } - } - - pub fn device_create_command_encoder( - &self, - device_id: id::DeviceId, - desc: &wgt::CommandEncoderDescriptor(command_buffer_id) - } - - pub fn device_create_render_bundle_encoder( - &self, - device_id: id::DeviceId, - desc: &command::RenderBundleEncoderDescriptor, - ) -> ( - id::RenderBundleEncoderId, - Option, - ) { - profiling::scope!("Device::create_render_bundle_encoder"); - let (encoder, error) = match command::RenderBundleEncoder::new(desc, device_id, None) { - Ok(encoder) => (encoder, None), - Err(e) => (command::RenderBundleEncoder::dummy(device_id), Some(e)), - }; - (Box::into_raw(Box::new(encoder)), error) - } - - pub fn render_bundle_encoder_finish( - &self, - bundle_encoder: command::RenderBundleEncoder, - desc: &command::RenderBundleDescriptor, - id_in: Input, - ) -> (id::RenderBundleId, Option) { - profiling::scope!("RenderBundleEncoder::finish"); - - let hub = A::hub(self); - let mut token = Token::root(); - let fid = hub.render_bundles.prepare(id_in); - - let (device_guard, mut token) = hub.devices.read(&mut token); - let error = loop { - let device = match device_guard.get(bundle_encoder.parent()) { - Ok(device) => device, - Err(_) => break command::RenderBundleError::INVALID_DEVICE, - }; - #[cfg(feature = "trace")] - if let Some(ref trace) = device.trace { - trace.lock().add(trace::Action::CreateRenderBundle { - id: fid.id(), - desc: trace::new_render_bundle_encoder_descriptor( - desc.label.clone(), - &bundle_encoder.context, - bundle_encoder.is_depth_read_only, - bundle_encoder.is_stencil_read_only, - ), - base: bundle_encoder.to_base_pass(), - }); - } - - let render_bundle = match bundle_encoder.finish(desc, device, hub, &mut token) { - Ok(bundle) => bundle, - Err(e) => break e, - }; - - log::debug!("Render bundle"); - let ref_count = render_bundle.life_guard.add_ref(); - let id = fid.assign(render_bundle, &mut token); - - device.trackers.lock().bundles.insert_single(id, ref_count); - return (id.0, None); - }; - - let id = fid.assign_error(desc.label.borrow_or_default(), &mut token); - (id, Some(error)) - } - - pub fn render_bundle_label(&self, id: id::RenderBundleId) -> String { - A::hub(self).render_bundles.label_for_resource(id) - } - - pub fn render_bundle_drop(&self, render_bundle_id: id::RenderBundleId) { - profiling::scope!("RenderBundle::drop"); - log::debug!("render bundle {:?} is dropped", render_bundle_id); - let hub = A::hub(self); - let mut token = Token::root(); - - let (device_guard, mut token) = hub.devices.read(&mut token); - let device_id = { - let (mut bundle_guard, _) = hub.render_bundles.write(&mut token); - match bundle_guard.get_mut(render_bundle_id) { - Ok(bundle) => { - bundle.life_guard.ref_count.take(); - bundle.device_id.value - } - Err(InvalidId) => { - hub.render_bundles - .unregister_locked(render_bundle_id, &mut *bundle_guard); - return; - } - } - }; - - device_guard[device_id] - .lock_life(&mut token) - .suspected_resources - .render_bundles - .push(id::Valid(render_bundle_id)); - } - - pub fn device_create_query_set( - &self, - device_id: id::DeviceId, - desc: &resource::QuerySetDescriptor, - id_in: Input, - ) -> (id::QuerySetId, Option) { - profiling::scope!("Device::create_query_set"); - - let hub = A::hub(self); - let mut token = Token::root(); - let fid = hub.query_sets.prepare(id_in); - - let (device_guard, mut token) = hub.devices.read(&mut token); - let error = loop { - let device = match device_guard.get(device_id) { - Ok(device) => device, - Err(_) => break DeviceError::Invalid.into(), - }; - #[cfg(feature = "trace")] - if let Some(ref trace) = device.trace { - trace.lock().add(trace::Action::CreateQuerySet { - id: fid.id(), - desc: desc.clone(), - }); - } - - let query_set = match device.create_query_set(device_id, desc) { - Ok(query_set) => query_set, - Err(err) => break err, - }; - - let ref_count = query_set.life_guard.add_ref(); - let id = fid.assign(query_set, &mut token); - - device - .trackers - .lock() - .query_sets - .insert_single(id, ref_count); - - return (id.0, None); - }; - - let id = fid.assign_error("", &mut token); - (id, Some(error)) - } - - pub fn query_set_drop(&self, query_set_id: id::QuerySetId) { - profiling::scope!("QuerySet::drop"); - log::debug!("query set {:?} is dropped", query_set_id); - - let hub = A::hub(self); - let mut token = Token::root(); - - let device_id = { - let (mut query_set_guard, _) = hub.query_sets.write(&mut token); - let query_set = query_set_guard.get_mut(query_set_id).unwrap(); - query_set.life_guard.ref_count.take(); - query_set.device_id.value - }; - - let (device_guard, mut token) = hub.devices.read(&mut token); - let device = &device_guard[device_id]; - - #[cfg(feature = "trace")] - if let Some(ref trace) = device.trace { - trace - .lock() - .add(trace::Action::DestroyQuerySet(query_set_id)); - } - - device - .lock_life(&mut token) - .suspected_resources - .query_sets - .push(id::Valid(query_set_id)); - } - - pub fn query_set_label(&self, id: id::QuerySetId) -> String { - A::hub(self).query_sets.label_for_resource(id) - } - - pub fn device_create_render_pipeline( - &self, - device_id: id::DeviceId, - desc: &pipeline::RenderPipelineDescriptor, - id_in: Input, - implicit_pipeline_ids: Option>, - ) -> ( - id::RenderPipelineId, - Option, - ) { - profiling::scope!("Device::create_render_pipeline"); - - let hub = A::hub(self); - let mut token = Token::root(); - - let fid = hub.render_pipelines.prepare(id_in); - let implicit_context = implicit_pipeline_ids.map(|ipi| ipi.prepare(hub)); - - let (adapter_guard, mut token) = hub.adapters.read(&mut token); - let (device_guard, mut token) = hub.devices.read(&mut token); - let error = loop { - let device = match device_guard.get(device_id) { - Ok(device) => device, - Err(_) => break DeviceError::Invalid.into(), - }; - let adapter = &adapter_guard[device.adapter_id.value]; - #[cfg(feature = "trace")] - if let Some(ref trace) = device.trace { - trace.lock().add(trace::Action::CreateRenderPipeline { - id: fid.id(), - desc: desc.clone(), - implicit_context: implicit_context.clone(), - }); - } - - let pipeline = match device.create_render_pipeline( - device_id, - adapter, - desc, - implicit_context, - hub, - &mut token, - ) { - Ok(pair) => pair, - Err(e) => break e, - }; - let ref_count = pipeline.life_guard.add_ref(); - - let id = fid.assign(pipeline, &mut token); - log::info!("Created render pipeline {:?} with {:?}", id, desc); - - device - .trackers - .lock() - .render_pipelines - .insert_single(id, ref_count); - - return (id.0, None); - }; - - let id = fid.assign_error(desc.label.borrow_or_default(), &mut token); - (id, Some(error)) - } - - /// Get an ID of one of the bind group layouts. The ID adds a refcount, - /// which needs to be released by calling `bind_group_layout_drop`. - pub fn render_pipeline_get_bind_group_layout( - &self, - pipeline_id: id::RenderPipelineId, - index: u32, - id_in: Input, - ) -> ( - id::BindGroupLayoutId, - Option, - ) { - let hub = A::hub(self); - let mut token = Token::root(); - let (pipeline_layout_guard, mut token) = hub.pipeline_layouts.read(&mut token); - - let error = loop { - let (bgl_guard, mut token) = hub.bind_group_layouts.read(&mut token); - let (_, mut token) = hub.bind_groups.read(&mut token); - let (pipeline_guard, _) = hub.render_pipelines.read(&mut token); - - let pipeline = match pipeline_guard.get(pipeline_id) { - Ok(pipeline) => pipeline, - Err(_) => break binding_model::GetBindGroupLayoutError::InvalidPipeline, - }; - let id = match pipeline_layout_guard[pipeline.layout_id.value] - .bind_group_layout_ids - .get(index as usize) - { - Some(id) => id, - None => break binding_model::GetBindGroupLayoutError::InvalidGroupIndex(index), - }; - - bgl_guard[*id].multi_ref_count.inc(); - return (id.0, None); - }; - - let id = hub - .bind_group_layouts - .prepare(id_in) - .assign_error("", &mut token); - (id, Some(error)) - } - - pub fn render_pipeline_label(&self, id: id::RenderPipelineId) -> String { - A::hub(self).render_pipelines.label_for_resource(id) - } - - pub fn render_pipeline_drop(&self, render_pipeline_id: id::RenderPipelineId) { - profiling::scope!("RenderPipeline::drop"); - log::debug!("render pipeline {:?} is dropped", render_pipeline_id); - let hub = A::hub(self); - let mut token = Token::root(); - let (device_guard, mut token) = hub.devices.read(&mut token); - - let (device_id, layout_id) = { - let (mut pipeline_guard, _) = hub.render_pipelines.write(&mut token); - match pipeline_guard.get_mut(render_pipeline_id) { - Ok(pipeline) => { - pipeline.life_guard.ref_count.take(); - (pipeline.device_id.value, pipeline.layout_id.clone()) - } - Err(InvalidId) => { - hub.render_pipelines - .unregister_locked(render_pipeline_id, &mut *pipeline_guard); - return; - } - } - }; - - let mut life_lock = device_guard[device_id].lock_life(&mut token); - life_lock - .suspected_resources - .render_pipelines - .push(id::Valid(render_pipeline_id)); - life_lock - .suspected_resources - .pipeline_layouts - .push(layout_id); - } - - pub fn device_create_compute_pipeline( - &self, - device_id: id::DeviceId, - desc: &pipeline::ComputePipelineDescriptor, - id_in: Input, - implicit_pipeline_ids: Option>, - ) -> ( - id::ComputePipelineId, - Option, - ) { - profiling::scope!("Device::create_compute_pipeline"); - - let hub = A::hub(self); - let mut token = Token::root(); - - let fid = hub.compute_pipelines.prepare(id_in); - let implicit_context = implicit_pipeline_ids.map(|ipi| ipi.prepare(hub)); - - let (device_guard, mut token) = hub.devices.read(&mut token); - let error = loop { - let device = match device_guard.get(device_id) { - Ok(device) => device, - Err(_) => break DeviceError::Invalid.into(), - }; - #[cfg(feature = "trace")] - if let Some(ref trace) = device.trace { - trace.lock().add(trace::Action::CreateComputePipeline { - id: fid.id(), - desc: desc.clone(), - implicit_context: implicit_context.clone(), - }); - } - - let pipeline = match device.create_compute_pipeline( - device_id, - desc, - implicit_context, - hub, - &mut token, - ) { - Ok(pair) => pair, - Err(e) => break e, - }; - let ref_count = pipeline.life_guard.add_ref(); - - let id = fid.assign(pipeline, &mut token); - log::info!("Created compute pipeline {:?} with {:?}", id, desc); - - device - .trackers - .lock() - .compute_pipelines - .insert_single(id, ref_count); - return (id.0, None); - }; - - let id = fid.assign_error(desc.label.borrow_or_default(), &mut token); - (id, Some(error)) - } - - /// Get an ID of one of the bind group layouts. The ID adds a refcount, - /// which needs to be released by calling `bind_group_layout_drop`. - pub fn compute_pipeline_get_bind_group_layout( - &self, - pipeline_id: id::ComputePipelineId, - index: u32, - id_in: Input, - ) -> ( - id::BindGroupLayoutId, - Option, - ) { - let hub = A::hub(self); - let mut token = Token::root(); - let (pipeline_layout_guard, mut token) = hub.pipeline_layouts.read(&mut token); - - let error = loop { - let (bgl_guard, mut token) = hub.bind_group_layouts.read(&mut token); - let (_, mut token) = hub.bind_groups.read(&mut token); - let (pipeline_guard, _) = hub.compute_pipelines.read(&mut token); - - let pipeline = match pipeline_guard.get(pipeline_id) { - Ok(pipeline) => pipeline, - Err(_) => break binding_model::GetBindGroupLayoutError::InvalidPipeline, - }; - let id = match pipeline_layout_guard[pipeline.layout_id.value] - .bind_group_layout_ids - .get(index as usize) - { - Some(id) => id, - None => break binding_model::GetBindGroupLayoutError::InvalidGroupIndex(index), - }; - - bgl_guard[*id].multi_ref_count.inc(); - return (id.0, None); - }; - - let id = hub - .bind_group_layouts - .prepare(id_in) - .assign_error("", &mut token); - (id, Some(error)) - } - - pub fn compute_pipeline_label(&self, id: id::ComputePipelineId) -> String { - A::hub(self).compute_pipelines.label_for_resource(id) - } - - pub fn compute_pipeline_drop(&self, compute_pipeline_id: id::ComputePipelineId) { - profiling::scope!("ComputePipeline::drop"); - log::debug!("compute pipeline {:?} is dropped", compute_pipeline_id); - let hub = A::hub(self); - let mut token = Token::root(); - let (device_guard, mut token) = hub.devices.read(&mut token); - - let (device_id, layout_id) = { - let (mut pipeline_guard, _) = hub.compute_pipelines.write(&mut token); - match pipeline_guard.get_mut(compute_pipeline_id) { - Ok(pipeline) => { - pipeline.life_guard.ref_count.take(); - (pipeline.device_id.value, pipeline.layout_id.clone()) - } - Err(InvalidId) => { - hub.compute_pipelines - .unregister_locked(compute_pipeline_id, &mut *pipeline_guard); - return; - } - } - }; - - let mut life_lock = device_guard[device_id].lock_life(&mut token); - life_lock - .suspected_resources - .compute_pipelines - .push(id::Valid(compute_pipeline_id)); - life_lock - .suspected_resources - .pipeline_layouts - .push(layout_id); - } - - pub fn surface_configure( - &self, - surface_id: id::SurfaceId, - device_id: id::DeviceId, - config: &wgt::SurfaceConfiguration>, - ) -> Option { - use hal::{Adapter as _, Surface as _}; - use present::ConfigureSurfaceError as E; - profiling::scope!("surface_configure"); - - fn validate_surface_configuration( - config: &mut hal::SurfaceConfiguration, - caps: &hal::SurfaceCapabilities, - ) -> Result<(), E> { - let width = config.extent.width; - let height = config.extent.height; - if width < caps.extents.start().width - || width > caps.extents.end().width - || height < caps.extents.start().height - || height > caps.extents.end().height - { - log::warn!( - "Requested size {}x{} is outside of the supported range: {:?}", - width, - height, - caps.extents - ); - } - if !caps.present_modes.contains(&config.present_mode) { - let new_mode = 'b: loop { - // Automatic present mode checks. - // - // The "Automatic" modes are never supported by the backends. - let fallbacks = match config.present_mode { - wgt::PresentMode::AutoVsync => { - &[wgt::PresentMode::FifoRelaxed, wgt::PresentMode::Fifo][..] - } - // Always end in FIFO to make sure it's always supported - wgt::PresentMode::AutoNoVsync => &[ - wgt::PresentMode::Immediate, - wgt::PresentMode::Mailbox, - wgt::PresentMode::Fifo, - ][..], - _ => { - return Err(E::UnsupportedPresentMode { - requested: config.present_mode, - available: caps.present_modes.clone(), - }); - } - }; - - for &fallback in fallbacks { - if caps.present_modes.contains(&fallback) { - break 'b fallback; - } - } - - unreachable!("Fallback system failed to choose present mode. This is a bug. Mode: {:?}, Options: {:?}", config.present_mode, &caps.present_modes); - }; - - log::info!( - "Automatically choosing presentation mode by rule {:?}. Chose {new_mode:?}", - config.present_mode - ); - config.present_mode = new_mode; - } - if !caps.formats.contains(&config.format) { - return Err(E::UnsupportedFormat { - requested: config.format, - available: caps.formats.clone(), - }); - } - if !caps - .composite_alpha_modes - .contains(&config.composite_alpha_mode) - { - let new_alpha_mode = 'alpha: loop { - // Automatic alpha mode checks. - let fallbacks = match config.composite_alpha_mode { - wgt::CompositeAlphaMode::Auto => &[ - wgt::CompositeAlphaMode::Opaque, - wgt::CompositeAlphaMode::Inherit, - ][..], - _ => { - return Err(E::UnsupportedAlphaMode { - requested: config.composite_alpha_mode, - available: caps.composite_alpha_modes.clone(), - }); - } - }; - - for &fallback in fallbacks { - if caps.composite_alpha_modes.contains(&fallback) { - break 'alpha fallback; - } - } - - unreachable!( - "Fallback system failed to choose alpha mode. This is a bug. \ - AlphaMode: {:?}, Options: {:?}", - config.composite_alpha_mode, &caps.composite_alpha_modes - ); - }; - - log::info!( - "Automatically choosing alpha mode by rule {:?}. Chose {new_alpha_mode:?}", - config.composite_alpha_mode - ); - config.composite_alpha_mode = new_alpha_mode; - } - if !caps.usage.contains(config.usage) { - return Err(E::UnsupportedUsage); - } - if width == 0 || height == 0 { - return Err(E::ZeroArea); - } - Ok(()) - } - - log::info!("configuring surface with {:?}", config); - let hub = A::hub(self); - let mut token = Token::root(); - - let (mut surface_guard, mut token) = self.surfaces.write(&mut token); - let (adapter_guard, mut token) = hub.adapters.read(&mut token); - let (device_guard, _token) = hub.devices.read(&mut token); - - let error = 'outer: loop { - let device = match device_guard.get(device_id) { - Ok(device) => device, - Err(_) => break DeviceError::Invalid.into(), - }; - #[cfg(feature = "trace")] - if let Some(ref trace) = device.trace { - trace - .lock() - .add(trace::Action::ConfigureSurface(surface_id, config.clone())); - } - - let surface = match surface_guard.get_mut(surface_id) { - Ok(surface) => surface, - Err(_) => break E::InvalidSurface, - }; - - let caps = unsafe { - let suf = A::get_surface(surface); - let adapter = &adapter_guard[device.adapter_id.value]; - match adapter.raw.adapter.surface_capabilities(&suf.unwrap().raw) { - Some(caps) => caps, - None => break E::UnsupportedQueueFamily, - } - }; - - let mut hal_view_formats = vec![]; - for format in config.view_formats.iter() { - if *format == config.format { - continue; - } - if !caps.formats.contains(&config.format) { - break 'outer E::UnsupportedFormat { - requested: config.format, - available: caps.formats, - }; - } - if config.format.remove_srgb_suffix() != format.remove_srgb_suffix() { - break 'outer E::InvalidViewFormat(*format, config.format); - } - hal_view_formats.push(*format); - } - - if !hal_view_formats.is_empty() { - if let Err(missing_flag) = - device.require_downlevel_flags(wgt::DownlevelFlags::SURFACE_VIEW_FORMATS) - { - break 'outer E::MissingDownlevelFlags(missing_flag); - } - } - - let num_frames = present::DESIRED_NUM_FRAMES - .clamp(*caps.swap_chain_sizes.start(), *caps.swap_chain_sizes.end()); - let mut hal_config = hal::SurfaceConfiguration { - swap_chain_size: num_frames, - present_mode: config.present_mode, - composite_alpha_mode: config.alpha_mode, - format: config.format, - extent: wgt::Extent3d { - width: config.width, - height: config.height, - depth_or_array_layers: 1, - }, - usage: conv::map_texture_usage(config.usage, hal::FormatAspects::COLOR), - view_formats: hal_view_formats, - }; - - if let Err(error) = validate_surface_configuration(&mut hal_config, &caps) { - break error; - } - - match unsafe { - A::get_surface_mut(surface) - .unwrap() - .raw - .configure(&device.raw, &hal_config) - } { - Ok(()) => (), - Err(error) => { - break match error { - hal::SurfaceError::Outdated | hal::SurfaceError::Lost => E::InvalidSurface, - hal::SurfaceError::Device(error) => E::Device(error.into()), - hal::SurfaceError::Other(message) => { - log::error!("surface configuration failed: {}", message); - E::InvalidSurface - } - } - } - } - - if let Some(present) = surface.presentation.take() { - if present.acquired_texture.is_some() { - break E::PreviousOutputExists; - } - } - - surface.presentation = Some(present::Presentation { - device_id: Stored { - value: id::Valid(device_id), - ref_count: device.life_guard.add_ref(), - }, - config: config.clone(), - num_frames, - acquired_texture: None, - }); - - return None; - }; - - Some(error) - } - - #[cfg(feature = "replay")] - /// Only triange suspected resource IDs. This helps us to avoid ID collisions - /// upon creating new resources when re-playing a trace. - pub fn device_maintain_ids( - &self, - device_id: id::DeviceId, - ) -> Result<(), InvalidDevice> { - let hub = A::hub(self); - let mut token = Token::root(); - let (device_guard, mut token) = hub.devices.read(&mut token); - let device = device_guard.get(device_id).map_err(|_| InvalidDevice)?; - device.lock_life(&mut token).triage_suspected( - hub, - &device.trackers, - #[cfg(feature = "trace")] - None, - &mut token, - ); - Ok(()) - } - - /// Check `device_id` for freeable resources and completed buffer mappings. - /// - /// Return `queue_empty` indicating whether there are more queue submissions still in flight. - pub fn device_poll( - &self, - device_id: id::DeviceId, - maintain: wgt::Maintain, - ) -> Result { - let (closures, queue_empty) = { - if let wgt::Maintain::WaitForSubmissionIndex(submission_index) = maintain { - if submission_index.queue_id != device_id { - return Err(WaitIdleError::WrongSubmissionIndex( - submission_index.queue_id, - device_id, - )); - } - } - - let hub = A::hub(self); - let mut token = Token::root(); - let (device_guard, mut token) = hub.devices.read(&mut token); - device_guard - .get(device_id) - .map_err(|_| DeviceError::Invalid)? - .maintain(hub, maintain, &mut token)? - }; - - closures.fire(); - - Ok(queue_empty) - } - - /// Poll all devices belonging to the backend `A`. - /// - /// If `force_wait` is true, block until all buffer mappings are done. - /// - /// Return `all_queue_empty` indicating whether there are more queue - /// submissions still in flight. - fn poll_devices( - &self, - force_wait: bool, - closures: &mut UserClosures, - ) -> Result { - profiling::scope!("poll_devices"); - - let hub = A::hub(self); - let mut devices_to_drop = vec![]; - let mut all_queue_empty = true; - { - let mut token = Token::root(); - let (device_guard, mut token) = hub.devices.read(&mut token); - - for (id, device) in device_guard.iter(A::VARIANT) { - let maintain = if force_wait { - wgt::Maintain::Wait - } else { - wgt::Maintain::Poll - }; - let (cbs, queue_empty) = device.maintain(hub, maintain, &mut token)?; - all_queue_empty = all_queue_empty && queue_empty; - - // If the device's own `RefCount` clone is the only one left, and - // its submission queue is empty, then it can be freed. - if queue_empty && device.ref_count.load() == 1 { - devices_to_drop.push(id); - } - closures.extend(cbs); - } - } - - for device_id in devices_to_drop { - self.exit_device::(device_id); - } - - Ok(all_queue_empty) - } - - /// Poll all devices on all backends. - /// - /// This is the implementation of `wgpu::Instance::poll_all`. - /// - /// Return `all_queue_empty` indicating whether there are more queue - /// submissions still in flight. - pub fn poll_all_devices(&self, force_wait: bool) -> Result { - let mut closures = UserClosures::default(); - let mut all_queue_empty = true; - - #[cfg(all(feature = "vulkan", not(target_arch = "wasm32")))] - { - all_queue_empty = self.poll_devices::(force_wait, &mut closures)? - && all_queue_empty; - } - #[cfg(all(feature = "metal", any(target_os = "macos", target_os = "ios")))] - { - all_queue_empty = - self.poll_devices::(force_wait, &mut closures)? && all_queue_empty; - } - #[cfg(all(feature = "dx12", windows))] - { - all_queue_empty = - self.poll_devices::(force_wait, &mut closures)? && all_queue_empty; - } - #[cfg(all(feature = "dx11", windows))] - { - all_queue_empty = - self.poll_devices::(force_wait, &mut closures)? && all_queue_empty; - } - #[cfg(feature = "gles")] - { - all_queue_empty = - self.poll_devices::(force_wait, &mut closures)? && all_queue_empty; - } - - closures.fire(); - - Ok(all_queue_empty) - } - - pub fn device_label(&self, id: id::DeviceId) -> String { - A::hub(self).devices.label_for_resource(id) - } - - pub fn device_start_capture(&self, id: id::DeviceId) { - let hub = A::hub(self); - let mut token = Token::root(); - let (device_guard, _) = hub.devices.read(&mut token); - if let Ok(device) = device_guard.get(id) { - unsafe { device.raw.start_capture() }; - } - } - - pub fn device_stop_capture(&self, id: id::DeviceId) { - let hub = A::hub(self); - let mut token = Token::root(); - let (device_guard, _) = hub.devices.read(&mut token); - if let Ok(device) = device_guard.get(id) { - unsafe { device.raw.stop_capture() }; - } - } - - pub fn device_drop(&self, device_id: id::DeviceId) { - profiling::scope!("Device::drop"); - log::debug!("device {:?} is dropped", device_id); - - let hub = A::hub(self); - let mut token = Token::root(); - - // For now, just drop the `RefCount` in `device.life_guard`, which - // stands for the user's reference to the device. We'll take care of - // cleaning up the device when we're polled, once its queue submissions - // have completed and it is no longer needed by other resources. - let (mut device_guard, _) = hub.devices.write(&mut token); - if let Ok(device) = device_guard.get_mut(device_id) { - device.life_guard.ref_count.take().unwrap(); - } - } - - /// Exit the unreferenced, inactive device `device_id`. - fn exit_device(&self, device_id: id::DeviceId) { - let hub = A::hub(self); - let mut token = Token::root(); - let mut free_adapter_id = None; - { - let (device, mut _token) = hub.devices.unregister(device_id, &mut token); - if let Some(mut device) = device { - // The things `Device::prepare_to_die` takes care are mostly - // unnecessary here. We know our queue is empty, so we don't - // need to wait for submissions or triage them. We know we were - // just polled, so `life_tracker.free_resources` is empty. - debug_assert!(device.lock_life(&mut _token).queue_empty()); - device.pending_writes.deactivate(); - - // Adapter is only referenced by the device and itself. - // This isn't a robust way to destroy them, we should find a better one. - if device.adapter_id.ref_count.load() == 1 { - free_adapter_id = Some(device.adapter_id.value.0); - } - - device.dispose(); - } - } - - // Free the adapter now that we've dropped the `Device` token. - if let Some(free_adapter_id) = free_adapter_id { - let _ = hub.adapters.unregister(free_adapter_id, &mut token); - } - } - - pub fn buffer_map_async( - &self, - buffer_id: id::BufferId, - range: Range, - op: BufferMapOperation, - ) -> BufferAccessResult { - // User callbacks must not be called while holding buffer_map_async_inner's locks, so we - // defer the error callback if it needs to be called immediately (typically when running - // into errors). - if let Err((op, err)) = self.buffer_map_async_inner::(buffer_id, range, op) { - op.callback.call(Err(err.clone())); - - return Err(err); - } - - Ok(()) - } - - // Returns the mapping callback in case of error so that the callback can be fired outside - // of the locks that are held in this function. - fn buffer_map_async_inner( - &self, - buffer_id: id::BufferId, - range: Range, - op: BufferMapOperation, - ) -> Result<(), (BufferMapOperation, BufferAccessError)> { - profiling::scope!("Buffer::map_async"); - - let hub = A::hub(self); - let mut token = Token::root(); - let (device_guard, mut token) = hub.devices.read(&mut token); - let (pub_usage, internal_use) = match op.host { - HostMap::Read => (wgt::BufferUsages::MAP_READ, hal::BufferUses::MAP_READ), - HostMap::Write => (wgt::BufferUsages::MAP_WRITE, hal::BufferUses::MAP_WRITE), - }; - - if range.start % wgt::MAP_ALIGNMENT != 0 || range.end % wgt::COPY_BUFFER_ALIGNMENT != 0 { - return Err((op, BufferAccessError::UnalignedRange)); - } - - let (device_id, ref_count) = { - let (mut buffer_guard, _) = hub.buffers.write(&mut token); - let buffer = buffer_guard - .get_mut(buffer_id) - .map_err(|_| BufferAccessError::Invalid); - - let buffer = match buffer { - Ok(b) => b, - Err(e) => { - return Err((op, e)); - } - }; - - if let Err(e) = check_buffer_usage(buffer.usage, pub_usage) { - return Err((op, e.into())); - } - - if range.start > range.end { - return Err(( - op, - BufferAccessError::NegativeRange { - start: range.start, - end: range.end, - }, - )); - } - if range.end > buffer.size { - return Err(( - op, - BufferAccessError::OutOfBoundsOverrun { - index: range.end, - max: buffer.size, - }, - )); - } - - buffer.map_state = match buffer.map_state { - resource::BufferMapState::Init { .. } | resource::BufferMapState::Active { .. } => { - return Err((op, BufferAccessError::AlreadyMapped)); - } - resource::BufferMapState::Waiting(_) => { - return Err((op, BufferAccessError::MapAlreadyPending)); - } - resource::BufferMapState::Idle => { - resource::BufferMapState::Waiting(resource::BufferPendingMapping { - range, - op, - _parent_ref_count: buffer.life_guard.add_ref(), - }) - } - }; - log::debug!("Buffer {:?} map state -> Waiting", buffer_id); - - let device = &device_guard[buffer.device_id.value]; - - let ret = (buffer.device_id.value, buffer.life_guard.add_ref()); - - let mut trackers = device.trackers.lock(); - trackers - .buffers - .set_single(&*buffer_guard, buffer_id, internal_use); - trackers.buffers.drain(); - - ret - }; - - let device = &device_guard[device_id]; - - device - .lock_life(&mut token) - .map(id::Valid(buffer_id), ref_count); - - Ok(()) - } - - pub fn buffer_get_mapped_range( - &self, - buffer_id: id::BufferId, - offset: BufferAddress, - size: Option, - ) -> Result<(*mut u8, u64), BufferAccessError> { - profiling::scope!("Buffer::get_mapped_range"); - - let hub = A::hub(self); - let mut token = Token::root(); - let (buffer_guard, _) = hub.buffers.read(&mut token); - let buffer = buffer_guard - .get(buffer_id) - .map_err(|_| BufferAccessError::Invalid)?; - - let range_size = if let Some(size) = size { - size - } else if offset > buffer.size { - 0 - } else { - buffer.size - offset - }; - - if offset % wgt::MAP_ALIGNMENT != 0 { - return Err(BufferAccessError::UnalignedOffset { offset }); - } - if range_size % wgt::COPY_BUFFER_ALIGNMENT != 0 { - return Err(BufferAccessError::UnalignedRangeSize { range_size }); - } - - match buffer.map_state { - resource::BufferMapState::Init { ptr, .. } => { - // offset (u64) can not be < 0, so no need to validate the lower bound - if offset + range_size > buffer.size { - return Err(BufferAccessError::OutOfBoundsOverrun { - index: offset + range_size - 1, - max: buffer.size, - }); - } - unsafe { Ok((ptr.as_ptr().offset(offset as isize), range_size)) } - } - resource::BufferMapState::Active { ptr, ref range, .. } => { - if offset < range.start { - return Err(BufferAccessError::OutOfBoundsUnderrun { - index: offset, - min: range.start, - }); - } - if offset + range_size > range.end { - return Err(BufferAccessError::OutOfBoundsOverrun { - index: offset + range_size - 1, - max: range.end, - }); - } - // ptr points to the beginning of the range we mapped in map_async - // rather thant the beginning of the buffer. - let relative_offset = (offset - range.start) as isize; - unsafe { Ok((ptr.as_ptr().offset(relative_offset), range_size)) } - } - resource::BufferMapState::Idle | resource::BufferMapState::Waiting(_) => { - Err(BufferAccessError::NotMapped) - } - } - } - - fn buffer_unmap_inner( - &self, - buffer_id: id::BufferId, - buffer: &mut resource::Buffer, - device: &mut Device, - ) -> Result, BufferAccessError> { - log::debug!("Buffer {:?} map state -> Idle", buffer_id); - match mem::replace(&mut buffer.map_state, resource::BufferMapState::Idle) { - resource::BufferMapState::Init { - ptr, - stage_buffer, - needs_flush, - } => { - #[cfg(feature = "trace")] - if let Some(ref trace) = device.trace { - let mut trace = trace.lock(); - let data = trace.make_binary("bin", unsafe { - std::slice::from_raw_parts(ptr.as_ptr(), buffer.size as usize) - }); - trace.add(trace::Action::WriteBuffer { - id: buffer_id, - data, - range: 0..buffer.size, - queued: true, - }); - } - let _ = ptr; - if needs_flush { - unsafe { - device - .raw - .flush_mapped_ranges(&stage_buffer, iter::once(0..buffer.size)); - } - } - - let raw_buf = buffer.raw.as_ref().ok_or(BufferAccessError::Destroyed)?; - - buffer.life_guard.use_at(device.active_submission_index + 1); - let region = wgt::BufferSize::new(buffer.size).map(|size| hal::BufferCopy { - src_offset: 0, - dst_offset: 0, - size, - }); - let transition_src = hal::BufferBarrier { - buffer: &stage_buffer, - usage: hal::BufferUses::MAP_WRITE..hal::BufferUses::COPY_SRC, - }; - let transition_dst = hal::BufferBarrier { - buffer: raw_buf, - usage: hal::BufferUses::empty()..hal::BufferUses::COPY_DST, - }; - let encoder = device.pending_writes.activate(); - unsafe { - encoder.transition_buffers( - iter::once(transition_src).chain(iter::once(transition_dst)), - ); - if buffer.size > 0 { - encoder.copy_buffer_to_buffer(&stage_buffer, raw_buf, region.into_iter()); - } - } - device - .pending_writes - .consume_temp(queue::TempResource::Buffer(stage_buffer)); - device.pending_writes.dst_buffers.insert(buffer_id); - } - resource::BufferMapState::Idle => { - return Err(BufferAccessError::NotMapped); - } - resource::BufferMapState::Waiting(pending) => { - return Ok(Some((pending.op, Err(BufferAccessError::MapAborted)))); - } - resource::BufferMapState::Active { ptr, range, host } => { - if host == HostMap::Write { - #[cfg(feature = "trace")] - if let Some(ref trace) = device.trace { - let mut trace = trace.lock(); - let size = range.end - range.start; - let data = trace.make_binary("bin", unsafe { - std::slice::from_raw_parts(ptr.as_ptr(), size as usize) - }); - trace.add(trace::Action::WriteBuffer { - id: buffer_id, - data, - range: range.clone(), - queued: false, - }); - } - let _ = (ptr, range); - } - unsafe { - device - .raw - .unmap_buffer(buffer.raw.as_ref().unwrap()) - .map_err(DeviceError::from)? - }; - } - } - Ok(None) - } - - pub fn buffer_unmap(&self, buffer_id: id::BufferId) -> BufferAccessResult { - profiling::scope!("unmap", "Buffer"); - - let closure; - { - // Restrict the locks to this scope. - let hub = A::hub(self); - let mut token = Token::root(); - - let (mut device_guard, mut token) = hub.devices.write(&mut token); - let (mut buffer_guard, _) = hub.buffers.write(&mut token); - let buffer = buffer_guard - .get_mut(buffer_id) - .map_err(|_| BufferAccessError::Invalid)?; - let device = &mut device_guard[buffer.device_id.value]; - - closure = self.buffer_unmap_inner(buffer_id, buffer, device) - } - - // Note: outside the scope where locks are held when calling the callback - if let Some((operation, status)) = closure? { - operation.callback.call(status); - } - Ok(()) - } -} diff --git a/third_party/rust/wgpu-core/src/device/queue.rs b/third_party/rust/wgpu-core/src/device/queue.rs index 8ee687269d5c..98095f4490fe 100644 --- a/third_party/rust/wgpu-core/src/device/queue.rs +++ b/third_party/rust/wgpu-core/src/device/queue.rs @@ -8,8 +8,11 @@ use crate::{ conv, device::{DeviceError, WaitIdleError}, get_lowest_common_denom, - hub::{Global, GlobalIdentityHandlerFactory, HalApi, Input, Token}, + global::Global, + hal_api::HalApi, + hub::Token, id, + identity::{GlobalIdentityHandlerFactory, Input}, init_tracker::{has_copy_partial_init_tracker_coverage, TextureInitRange}, resource::{BufferAccessError, BufferMapState, StagingBuffer, TextureInner}, track, FastHashSet, SubmissionIndex, @@ -477,7 +480,7 @@ impl Global { fn queue_validate_write_buffer_impl( &self, - buffer: &super::resource::Buffer, + buffer: &crate::resource::Buffer, buffer_id: id::BufferId, buffer_offset: u64, buffer_size: u64, diff --git a/third_party/rust/wgpu-core/src/device/resource.rs b/third_party/rust/wgpu-core/src/device/resource.rs new file mode 100644 index 000000000000..00d582850dcd --- /dev/null +++ b/third_party/rust/wgpu-core/src/device/resource.rs @@ -0,0 +1,3137 @@ +#[cfg(feature = "trace")] +use crate::device::trace; +use crate::{ + binding_model, command, conv, + device::life::WaitIdleError, + device::{ + AttachmentData, CommandAllocator, MissingDownlevelFlags, MissingFeatures, + RenderPassContext, CLEANUP_WAIT_MS, + }, + hal_api::HalApi, + hub::{Hub, Token}, + id, + identity::GlobalIdentityHandlerFactory, + init_tracker::{ + BufferInitTracker, BufferInitTrackerAction, MemoryInitKind, TextureInitRange, + TextureInitTracker, TextureInitTrackerAction, + }, + instance::Adapter, + pipeline, + resource::{self, Buffer, TextureViewNotRenderableReason}, + storage::Storage, + track::{BindGroupStates, TextureSelector, Tracker}, + validation::{self, check_buffer_usage, check_texture_usage}, + FastHashMap, LabelHelpers as _, LifeGuard, MultiRefCount, RefCount, Stored, SubmissionIndex, +}; + +use arrayvec::ArrayVec; +use hal::{CommandEncoder as _, Device as _}; +use parking_lot::{Mutex, MutexGuard}; +use smallvec::SmallVec; +use thiserror::Error; +use wgt::{TextureFormat, TextureSampleType, TextureViewDimension}; + +use std::{borrow::Cow, iter, num::NonZeroU32}; + +use super::{ + life, queue, DeviceDescriptor, DeviceError, ImplicitPipelineContext, UserClosures, EP_FAILURE, + IMPLICIT_FAILURE, ZERO_BUFFER_SIZE, +}; + +/// Structure describing a logical device. Some members are internally mutable, +/// stored behind mutexes. +/// +/// TODO: establish clear order of locking for these: +/// `mem_allocator`, `desc_allocator`, `life_tracker`, `trackers`, +/// `render_passes`, `pending_writes`, `trace`. +/// +/// Currently, the rules are: +/// 1. `life_tracker` is locked after `hub.devices`, enforced by the type system +/// 1. `self.trackers` is locked last (unenforced) +/// 1. `self.trace` is locked last (unenforced) +pub struct Device { + pub(crate) raw: A::Device, + pub(crate) adapter_id: Stored, + pub(crate) queue: A::Queue, + pub(crate) zero_buffer: A::Buffer, + //pub(crate) cmd_allocator: command::CommandAllocator, + //mem_allocator: Mutex>, + //desc_allocator: Mutex>, + //Note: The submission index here corresponds to the last submission that is done. + pub(crate) life_guard: LifeGuard, + + /// A clone of `life_guard.ref_count`. + /// + /// Holding a separate clone of the `RefCount` here lets us tell whether the + /// device is referenced by other resources, even if `life_guard.ref_count` + /// was set to `None` by a call to `device_drop`. + pub(super) ref_count: RefCount, + + pub(super) command_allocator: Mutex>, + pub(crate) active_submission_index: SubmissionIndex, + pub(super) fence: A::Fence, + + /// All live resources allocated with this [`Device`]. + /// + /// Has to be locked temporarily only (locked last) + pub(crate) trackers: Mutex>, + // Life tracker should be locked right after the device and before anything else. + life_tracker: Mutex>, + /// Temporary storage for resource management functions. Cleared at the end + /// of every call (unless an error occurs). + pub(super) temp_suspected: life::SuspectedResources, + pub(crate) alignments: hal::Alignments, + pub(crate) limits: wgt::Limits, + pub(crate) features: wgt::Features, + pub(crate) downlevel: wgt::DownlevelCapabilities, + // TODO: move this behind another mutex. This would allow several methods to + // switch to borrow Device immutably, such as `write_buffer`, `write_texture`, + // and `buffer_unmap`. + pub(super) pending_writes: queue::PendingWrites, + #[cfg(feature = "trace")] + pub(crate) trace: Option>, +} + +#[derive(Clone, Debug, Error)] +#[non_exhaustive] +pub enum CreateDeviceError { + #[error("Not enough memory left")] + OutOfMemory, + #[error("Failed to create internal buffer for initializing textures")] + FailedToCreateZeroBuffer(#[from] DeviceError), +} + +impl Device { + pub(crate) fn require_features(&self, feature: wgt::Features) -> Result<(), MissingFeatures> { + if self.features.contains(feature) { + Ok(()) + } else { + Err(MissingFeatures(feature)) + } + } + + pub(crate) fn require_downlevel_flags( + &self, + flags: wgt::DownlevelFlags, + ) -> Result<(), MissingDownlevelFlags> { + if self.downlevel.flags.contains(flags) { + Ok(()) + } else { + Err(MissingDownlevelFlags(flags)) + } + } +} + +impl Device { + pub(crate) fn new( + open: hal::OpenDevice, + adapter_id: Stored, + alignments: hal::Alignments, + downlevel: wgt::DownlevelCapabilities, + desc: &DeviceDescriptor, + trace_path: Option<&std::path::Path>, + ) -> Result { + #[cfg(not(feature = "trace"))] + if let Some(_) = trace_path { + log::error!("Feature 'trace' is not enabled"); + } + let fence = + unsafe { open.device.create_fence() }.map_err(|_| CreateDeviceError::OutOfMemory)?; + + let mut com_alloc = CommandAllocator { + free_encoders: Vec::new(), + }; + let pending_encoder = com_alloc + .acquire_encoder(&open.device, &open.queue) + .map_err(|_| CreateDeviceError::OutOfMemory)?; + let mut pending_writes = queue::PendingWrites::::new(pending_encoder); + + // Create zeroed buffer used for texture clears. + let zero_buffer = unsafe { + open.device + .create_buffer(&hal::BufferDescriptor { + label: Some("(wgpu internal) zero init buffer"), + size: ZERO_BUFFER_SIZE, + usage: hal::BufferUses::COPY_SRC | hal::BufferUses::COPY_DST, + memory_flags: hal::MemoryFlags::empty(), + }) + .map_err(DeviceError::from)? + }; + pending_writes.activate(); + unsafe { + pending_writes + .command_encoder + .transition_buffers(iter::once(hal::BufferBarrier { + buffer: &zero_buffer, + usage: hal::BufferUses::empty()..hal::BufferUses::COPY_DST, + })); + pending_writes + .command_encoder + .clear_buffer(&zero_buffer, 0..ZERO_BUFFER_SIZE); + pending_writes + .command_encoder + .transition_buffers(iter::once(hal::BufferBarrier { + buffer: &zero_buffer, + usage: hal::BufferUses::COPY_DST..hal::BufferUses::COPY_SRC, + })); + } + + let life_guard = LifeGuard::new(""); + let ref_count = life_guard.add_ref(); + Ok(Self { + raw: open.device, + adapter_id, + queue: open.queue, + zero_buffer, + life_guard, + ref_count, + command_allocator: Mutex::new(com_alloc), + active_submission_index: 0, + fence, + trackers: Mutex::new(Tracker::new()), + life_tracker: Mutex::new(life::LifetimeTracker::new()), + temp_suspected: life::SuspectedResources::default(), + #[cfg(feature = "trace")] + trace: trace_path.and_then(|path| match trace::Trace::new(path) { + Ok(mut trace) => { + trace.add(trace::Action::Init { + desc: desc.clone(), + backend: A::VARIANT, + }); + Some(Mutex::new(trace)) + } + Err(e) => { + log::error!("Unable to start a trace in '{:?}': {:?}", path, e); + None + } + }), + alignments, + limits: desc.limits.clone(), + features: desc.features, + downlevel, + pending_writes, + }) + } + + pub(super) fn lock_life<'this, 'token: 'this>( + &'this self, + //TODO: fix this - the token has to be borrowed for the lock + _token: &mut Token<'token, Self>, + ) -> MutexGuard<'this, life::LifetimeTracker> { + self.life_tracker.lock() + } + + /// Check this device for completed commands. + /// + /// The `maintain` argument tells how the maintence function should behave, either + /// blocking or just polling the current state of the gpu. + /// + /// Return a pair `(closures, queue_empty)`, where: + /// + /// - `closures` is a list of actions to take: mapping buffers, notifying the user + /// + /// - `queue_empty` is a boolean indicating whether there are more queue + /// submissions still in flight. (We have to take the locks needed to + /// produce this information for other reasons, so we might as well just + /// return it to our callers.) + pub(super) fn maintain<'this, 'token: 'this, G: GlobalIdentityHandlerFactory>( + &'this self, + hub: &Hub, + maintain: wgt::Maintain, + token: &mut Token<'token, Self>, + ) -> Result<(UserClosures, bool), WaitIdleError> { + profiling::scope!("Device::maintain"); + let mut life_tracker = self.lock_life(token); + + // Normally, `temp_suspected` exists only to save heap + // allocations: it's cleared at the start of the function + // call, and cleared by the end. But `Global::queue_submit` is + // fallible; if it exits early, it may leave some resources in + // `temp_suspected`. + life_tracker + .suspected_resources + .extend(&self.temp_suspected); + + life_tracker.triage_suspected( + hub, + &self.trackers, + #[cfg(feature = "trace")] + self.trace.as_ref(), + token, + ); + life_tracker.triage_mapped(hub, token); + + let last_done_index = if maintain.is_wait() { + let index_to_wait_for = match maintain { + wgt::Maintain::WaitForSubmissionIndex(submission_index) => { + // We don't need to check to see if the queue id matches + // as we already checked this from inside the poll call. + submission_index.index + } + _ => self.active_submission_index, + }; + unsafe { + self.raw + .wait(&self.fence, index_to_wait_for, CLEANUP_WAIT_MS) + .map_err(DeviceError::from)? + }; + index_to_wait_for + } else { + unsafe { + self.raw + .get_fence_value(&self.fence) + .map_err(DeviceError::from)? + } + }; + + let submission_closures = + life_tracker.triage_submissions(last_done_index, &self.command_allocator); + let mapping_closures = life_tracker.handle_mapping(hub, &self.raw, &self.trackers, token); + life_tracker.cleanup(&self.raw); + + let closures = UserClosures { + mappings: mapping_closures, + submissions: submission_closures, + }; + Ok((closures, life_tracker.queue_empty())) + } + + pub(super) fn untrack<'this, 'token: 'this, G: GlobalIdentityHandlerFactory>( + &'this mut self, + hub: &Hub, + trackers: &Tracker, + token: &mut Token<'token, Self>, + ) { + self.temp_suspected.clear(); + // As the tracker is cleared/dropped, we need to consider all the resources + // that it references for destruction in the next GC pass. + { + let (bind_group_guard, mut token) = hub.bind_groups.read(token); + let (compute_pipe_guard, mut token) = hub.compute_pipelines.read(&mut token); + let (render_pipe_guard, mut token) = hub.render_pipelines.read(&mut token); + let (query_set_guard, mut token) = hub.query_sets.read(&mut token); + let (buffer_guard, mut token) = hub.buffers.read(&mut token); + let (texture_guard, mut token) = hub.textures.read(&mut token); + let (texture_view_guard, mut token) = hub.texture_views.read(&mut token); + let (sampler_guard, _) = hub.samplers.read(&mut token); + + for id in trackers.buffers.used() { + if buffer_guard[id].life_guard.ref_count.is_none() { + self.temp_suspected.buffers.push(id); + } + } + for id in trackers.textures.used() { + if texture_guard[id].life_guard.ref_count.is_none() { + self.temp_suspected.textures.push(id); + } + } + for id in trackers.views.used() { + if texture_view_guard[id].life_guard.ref_count.is_none() { + self.temp_suspected.texture_views.push(id); + } + } + for id in trackers.bind_groups.used() { + if bind_group_guard[id].life_guard.ref_count.is_none() { + self.temp_suspected.bind_groups.push(id); + } + } + for id in trackers.samplers.used() { + if sampler_guard[id].life_guard.ref_count.is_none() { + self.temp_suspected.samplers.push(id); + } + } + for id in trackers.compute_pipelines.used() { + if compute_pipe_guard[id].life_guard.ref_count.is_none() { + self.temp_suspected.compute_pipelines.push(id); + } + } + for id in trackers.render_pipelines.used() { + if render_pipe_guard[id].life_guard.ref_count.is_none() { + self.temp_suspected.render_pipelines.push(id); + } + } + for id in trackers.query_sets.used() { + if query_set_guard[id].life_guard.ref_count.is_none() { + self.temp_suspected.query_sets.push(id); + } + } + } + + self.lock_life(token) + .suspected_resources + .extend(&self.temp_suspected); + + self.temp_suspected.clear(); + } + + pub(super) fn create_buffer( + &self, + self_id: id::DeviceId, + desc: &resource::BufferDescriptor, + transient: bool, + ) -> Result, resource::CreateBufferError> { + debug_assert_eq!(self_id.backend(), A::VARIANT); + + if desc.size > self.limits.max_buffer_size { + return Err(resource::CreateBufferError::MaxBufferSize { + requested: desc.size, + maximum: self.limits.max_buffer_size, + }); + } + + if desc.usage.contains(wgt::BufferUsages::INDEX) + && desc.usage.contains( + wgt::BufferUsages::VERTEX + | wgt::BufferUsages::UNIFORM + | wgt::BufferUsages::INDIRECT + | wgt::BufferUsages::STORAGE, + ) + { + self.require_downlevel_flags(wgt::DownlevelFlags::UNRESTRICTED_INDEX_BUFFER)?; + } + + let mut usage = conv::map_buffer_usage(desc.usage); + + if desc.usage.is_empty() || desc.usage.contains_invalid_bits() { + return Err(resource::CreateBufferError::InvalidUsage(desc.usage)); + } + + if !self + .features + .contains(wgt::Features::MAPPABLE_PRIMARY_BUFFERS) + { + use wgt::BufferUsages as Bu; + let write_mismatch = desc.usage.contains(Bu::MAP_WRITE) + && !(Bu::MAP_WRITE | Bu::COPY_SRC).contains(desc.usage); + let read_mismatch = desc.usage.contains(Bu::MAP_READ) + && !(Bu::MAP_READ | Bu::COPY_DST).contains(desc.usage); + if write_mismatch || read_mismatch { + return Err(resource::CreateBufferError::UsageMismatch(desc.usage)); + } + } + + if desc.mapped_at_creation { + if desc.size % wgt::COPY_BUFFER_ALIGNMENT != 0 { + return Err(resource::CreateBufferError::UnalignedSize); + } + if !desc.usage.contains(wgt::BufferUsages::MAP_WRITE) { + // we are going to be copying into it, internally + usage |= hal::BufferUses::COPY_DST; + } + } else { + // We are required to zero out (initialize) all memory. This is done + // on demand using clear_buffer which requires write transfer usage! + usage |= hal::BufferUses::COPY_DST; + } + + let actual_size = if desc.size == 0 { + wgt::COPY_BUFFER_ALIGNMENT + } else if desc.usage.contains(wgt::BufferUsages::VERTEX) { + // Bumping the size by 1 so that we can bind an empty range at the + // end of the buffer. + desc.size + 1 + } else { + desc.size + }; + let clear_remainder = actual_size % wgt::COPY_BUFFER_ALIGNMENT; + let aligned_size = if clear_remainder != 0 { + actual_size + wgt::COPY_BUFFER_ALIGNMENT - clear_remainder + } else { + actual_size + }; + + let mut memory_flags = hal::MemoryFlags::empty(); + memory_flags.set(hal::MemoryFlags::TRANSIENT, transient); + + let hal_desc = hal::BufferDescriptor { + label: desc.label.borrow_option(), + size: aligned_size, + usage, + memory_flags, + }; + let buffer = unsafe { self.raw.create_buffer(&hal_desc) }.map_err(DeviceError::from)?; + + Ok(Buffer { + raw: Some(buffer), + device_id: Stored { + value: id::Valid(self_id), + ref_count: self.life_guard.add_ref(), + }, + usage: desc.usage, + size: desc.size, + initialization_status: BufferInitTracker::new(desc.size), + sync_mapped_writes: None, + map_state: resource::BufferMapState::Idle, + life_guard: LifeGuard::new(desc.label.borrow_or_default()), + }) + } + + pub(super) fn create_texture_from_hal( + &self, + hal_texture: A::Texture, + hal_usage: hal::TextureUses, + self_id: id::DeviceId, + desc: &resource::TextureDescriptor, + format_features: wgt::TextureFormatFeatures, + clear_mode: resource::TextureClearMode, + ) -> resource::Texture { + debug_assert_eq!(self_id.backend(), A::VARIANT); + + resource::Texture { + inner: resource::TextureInner::Native { + raw: Some(hal_texture), + }, + device_id: Stored { + value: id::Valid(self_id), + ref_count: self.life_guard.add_ref(), + }, + desc: desc.map_label(|_| ()), + hal_usage, + format_features, + initialization_status: TextureInitTracker::new( + desc.mip_level_count, + desc.array_layer_count(), + ), + full_range: TextureSelector { + mips: 0..desc.mip_level_count, + layers: 0..desc.array_layer_count(), + }, + life_guard: LifeGuard::new(desc.label.borrow_or_default()), + clear_mode, + } + } + + pub(super) fn create_texture( + &self, + self_id: id::DeviceId, + adapter: &Adapter, + desc: &resource::TextureDescriptor, + ) -> Result, resource::CreateTextureError> { + use resource::{CreateTextureError, TextureDimensionError}; + + if desc.usage.is_empty() || desc.usage.contains_invalid_bits() { + return Err(CreateTextureError::InvalidUsage(desc.usage)); + } + + conv::check_texture_dimension_size( + desc.dimension, + desc.size, + desc.sample_count, + &self.limits, + )?; + + if desc.dimension != wgt::TextureDimension::D2 { + // Depth textures can only be 2D + if desc.format.is_depth_stencil_format() { + return Err(CreateTextureError::InvalidDepthDimension( + desc.dimension, + desc.format, + )); + } + // Renderable textures can only be 2D + if desc.usage.contains(wgt::TextureUsages::RENDER_ATTACHMENT) { + return Err(CreateTextureError::InvalidDimensionUsages( + wgt::TextureUsages::RENDER_ATTACHMENT, + desc.dimension, + )); + } + + // Compressed textures can only be 2D + if desc.format.is_compressed() { + return Err(CreateTextureError::InvalidCompressedDimension( + desc.dimension, + desc.format, + )); + } + } + + if desc.format.is_compressed() { + let (block_width, block_height) = desc.format.block_dimensions(); + + if desc.size.width % block_width != 0 { + return Err(CreateTextureError::InvalidDimension( + TextureDimensionError::NotMultipleOfBlockWidth { + width: desc.size.width, + block_width, + format: desc.format, + }, + )); + } + + if desc.size.height % block_height != 0 { + return Err(CreateTextureError::InvalidDimension( + TextureDimensionError::NotMultipleOfBlockHeight { + height: desc.size.height, + block_height, + format: desc.format, + }, + )); + } + } + + let format_features = self + .describe_format_features(adapter, desc.format) + .map_err(|error| CreateTextureError::MissingFeatures(desc.format, error))?; + + if desc.sample_count > 1 { + if desc.mip_level_count != 1 { + return Err(CreateTextureError::InvalidMipLevelCount { + requested: desc.mip_level_count, + maximum: 1, + }); + } + + if desc.size.depth_or_array_layers != 1 { + return Err(CreateTextureError::InvalidDimension( + TextureDimensionError::MultisampledDepthOrArrayLayer( + desc.size.depth_or_array_layers, + ), + )); + } + + if desc.usage.contains(wgt::TextureUsages::STORAGE_BINDING) { + return Err(CreateTextureError::InvalidMultisampledStorageBinding); + } + + if !desc.usage.contains(wgt::TextureUsages::RENDER_ATTACHMENT) { + return Err(CreateTextureError::MultisampledNotRenderAttachment); + } + + if !format_features.flags.intersects( + wgt::TextureFormatFeatureFlags::MULTISAMPLE_X4 + | wgt::TextureFormatFeatureFlags::MULTISAMPLE_X2 + | wgt::TextureFormatFeatureFlags::MULTISAMPLE_X8 + | wgt::TextureFormatFeatureFlags::MULTISAMPLE_X16, + ) { + return Err(CreateTextureError::InvalidMultisampledFormat(desc.format)); + } + + if !format_features + .flags + .sample_count_supported(desc.sample_count) + { + return Err(CreateTextureError::InvalidSampleCount( + desc.sample_count, + desc.format, + )); + }; + } + + let mips = desc.mip_level_count; + let max_levels_allowed = desc.size.max_mips(desc.dimension).min(hal::MAX_MIP_LEVELS); + if mips == 0 || mips > max_levels_allowed { + return Err(CreateTextureError::InvalidMipLevelCount { + requested: mips, + maximum: max_levels_allowed, + }); + } + + let missing_allowed_usages = desc.usage - format_features.allowed_usages; + if !missing_allowed_usages.is_empty() { + // detect downlevel incompatibilities + let wgpu_allowed_usages = desc + .format + .guaranteed_format_features(self.features) + .allowed_usages; + let wgpu_missing_usages = desc.usage - wgpu_allowed_usages; + return Err(CreateTextureError::InvalidFormatUsages( + missing_allowed_usages, + desc.format, + wgpu_missing_usages.is_empty(), + )); + } + + let mut hal_view_formats = vec![]; + for format in desc.view_formats.iter() { + if desc.format == *format { + continue; + } + if desc.format.remove_srgb_suffix() != format.remove_srgb_suffix() { + return Err(CreateTextureError::InvalidViewFormat(*format, desc.format)); + } + hal_view_formats.push(*format); + } + if !hal_view_formats.is_empty() { + self.require_downlevel_flags(wgt::DownlevelFlags::VIEW_FORMATS)?; + } + + // Enforce having COPY_DST/DEPTH_STENCIL_WRITE/COLOR_TARGET otherwise we + // wouldn't be able to initialize the texture. + let hal_usage = conv::map_texture_usage(desc.usage, desc.format.into()) + | if desc.format.is_depth_stencil_format() { + hal::TextureUses::DEPTH_STENCIL_WRITE + } else if desc.usage.contains(wgt::TextureUsages::COPY_DST) { + hal::TextureUses::COPY_DST // (set already) + } else { + // Use COPY_DST only if we can't use COLOR_TARGET + if format_features + .allowed_usages + .contains(wgt::TextureUsages::RENDER_ATTACHMENT) + && desc.dimension == wgt::TextureDimension::D2 + // Render targets dimension must be 2d + { + hal::TextureUses::COLOR_TARGET + } else { + hal::TextureUses::COPY_DST + } + }; + + let hal_desc = hal::TextureDescriptor { + label: desc.label.borrow_option(), + size: desc.size, + mip_level_count: desc.mip_level_count, + sample_count: desc.sample_count, + dimension: desc.dimension, + format: desc.format, + usage: hal_usage, + memory_flags: hal::MemoryFlags::empty(), + view_formats: hal_view_formats, + }; + + let raw_texture = unsafe { + self.raw + .create_texture(&hal_desc) + .map_err(DeviceError::from)? + }; + + let clear_mode = if hal_usage + .intersects(hal::TextureUses::DEPTH_STENCIL_WRITE | hal::TextureUses::COLOR_TARGET) + { + let (is_color, usage) = if desc.format.is_depth_stencil_format() { + (false, hal::TextureUses::DEPTH_STENCIL_WRITE) + } else { + (true, hal::TextureUses::COLOR_TARGET) + }; + let dimension = match desc.dimension { + wgt::TextureDimension::D1 => wgt::TextureViewDimension::D1, + wgt::TextureDimension::D2 => wgt::TextureViewDimension::D2, + wgt::TextureDimension::D3 => unreachable!(), + }; + + let mut clear_views = SmallVec::new(); + for mip_level in 0..desc.mip_level_count { + for array_layer in 0..desc.size.depth_or_array_layers { + let desc = hal::TextureViewDescriptor { + label: Some("(wgpu internal) clear texture view"), + format: desc.format, + dimension, + usage, + range: wgt::ImageSubresourceRange { + aspect: wgt::TextureAspect::All, + base_mip_level: mip_level, + mip_level_count: Some(1), + base_array_layer: array_layer, + array_layer_count: Some(1), + }, + }; + clear_views.push( + unsafe { self.raw.create_texture_view(&raw_texture, &desc) } + .map_err(DeviceError::from)?, + ); + } + } + resource::TextureClearMode::RenderPass { + clear_views, + is_color, + } + } else { + resource::TextureClearMode::BufferCopy + }; + + let mut texture = self.create_texture_from_hal( + raw_texture, + hal_usage, + self_id, + desc, + format_features, + clear_mode, + ); + texture.hal_usage = hal_usage; + Ok(texture) + } + + pub(super) fn create_texture_view( + &self, + texture: &resource::Texture, + texture_id: id::TextureId, + desc: &resource::TextureViewDescriptor, + ) -> Result, resource::CreateTextureViewError> { + let texture_raw = texture + .inner + .as_raw() + .ok_or(resource::CreateTextureViewError::InvalidTexture)?; + + // resolve TextureViewDescriptor defaults + // https://gpuweb.github.io/gpuweb/#abstract-opdef-resolving-gputextureviewdescriptor-defaults + + let resolved_format = desc.format.unwrap_or_else(|| { + texture + .desc + .format + .aspect_specific_format(desc.range.aspect) + .unwrap_or(texture.desc.format) + }); + + let resolved_dimension = desc + .dimension + .unwrap_or_else(|| match texture.desc.dimension { + wgt::TextureDimension::D1 => wgt::TextureViewDimension::D1, + wgt::TextureDimension::D2 => { + if texture.desc.array_layer_count() == 1 { + wgt::TextureViewDimension::D2 + } else { + wgt::TextureViewDimension::D2Array + } + } + wgt::TextureDimension::D3 => wgt::TextureViewDimension::D3, + }); + + let resolved_mip_level_count = desc.range.mip_level_count.unwrap_or_else(|| { + texture + .desc + .mip_level_count + .saturating_sub(desc.range.base_mip_level) + }); + + let resolved_array_layer_count = + desc.range + .array_layer_count + .unwrap_or_else(|| match resolved_dimension { + wgt::TextureViewDimension::D1 + | wgt::TextureViewDimension::D2 + | wgt::TextureViewDimension::D3 => 1, + wgt::TextureViewDimension::Cube => 6, + wgt::TextureViewDimension::D2Array | wgt::TextureViewDimension::CubeArray => { + texture + .desc + .array_layer_count() + .saturating_sub(desc.range.base_array_layer) + } + }); + + // validate TextureViewDescriptor + + let aspects = hal::FormatAspects::new(texture.desc.format, desc.range.aspect); + if aspects.is_empty() { + return Err(resource::CreateTextureViewError::InvalidAspect { + texture_format: texture.desc.format, + requested_aspect: desc.range.aspect, + }); + } + + let format_is_good = if desc.range.aspect == wgt::TextureAspect::All { + resolved_format == texture.desc.format + || texture.desc.view_formats.contains(&resolved_format) + } else { + Some(resolved_format) + == texture + .desc + .format + .aspect_specific_format(desc.range.aspect) + }; + if !format_is_good { + return Err(resource::CreateTextureViewError::FormatReinterpretation { + texture: texture.desc.format, + view: resolved_format, + }); + } + + // check if multisampled texture is seen as anything but 2D + if texture.desc.sample_count > 1 && resolved_dimension != wgt::TextureViewDimension::D2 { + return Err( + resource::CreateTextureViewError::InvalidMultisampledTextureViewDimension( + resolved_dimension, + ), + ); + } + + // check if the dimension is compatible with the texture + if texture.desc.dimension != resolved_dimension.compatible_texture_dimension() { + return Err( + resource::CreateTextureViewError::InvalidTextureViewDimension { + view: resolved_dimension, + texture: texture.desc.dimension, + }, + ); + } + + match resolved_dimension { + TextureViewDimension::D1 | TextureViewDimension::D2 | TextureViewDimension::D3 => { + if resolved_array_layer_count != 1 { + return Err(resource::CreateTextureViewError::InvalidArrayLayerCount { + requested: resolved_array_layer_count, + dim: resolved_dimension, + }); + } + } + TextureViewDimension::Cube => { + if resolved_array_layer_count != 6 { + return Err( + resource::CreateTextureViewError::InvalidCubemapTextureDepth { + depth: resolved_array_layer_count, + }, + ); + } + } + TextureViewDimension::CubeArray => { + if resolved_array_layer_count % 6 != 0 { + return Err( + resource::CreateTextureViewError::InvalidCubemapArrayTextureDepth { + depth: resolved_array_layer_count, + }, + ); + } + } + _ => {} + } + + match resolved_dimension { + TextureViewDimension::Cube | TextureViewDimension::CubeArray => { + if texture.desc.size.width != texture.desc.size.height { + return Err(resource::CreateTextureViewError::InvalidCubeTextureViewSize); + } + } + _ => {} + } + + if resolved_mip_level_count == 0 { + return Err(resource::CreateTextureViewError::ZeroMipLevelCount); + } + + let mip_level_end = desc + .range + .base_mip_level + .saturating_add(resolved_mip_level_count); + + let level_end = texture.desc.mip_level_count; + if mip_level_end > level_end { + return Err(resource::CreateTextureViewError::TooManyMipLevels { + requested: mip_level_end, + total: level_end, + }); + } + + if resolved_array_layer_count == 0 { + return Err(resource::CreateTextureViewError::ZeroArrayLayerCount); + } + + let array_layer_end = desc + .range + .base_array_layer + .saturating_add(resolved_array_layer_count); + + let layer_end = texture.desc.array_layer_count(); + if array_layer_end > layer_end { + return Err(resource::CreateTextureViewError::TooManyArrayLayers { + requested: array_layer_end, + total: layer_end, + }); + }; + + // https://gpuweb.github.io/gpuweb/#abstract-opdef-renderable-texture-view + let render_extent = 'b: loop { + if !texture + .desc + .usage + .contains(wgt::TextureUsages::RENDER_ATTACHMENT) + { + break 'b Err(TextureViewNotRenderableReason::Usage(texture.desc.usage)); + } + + if !(resolved_dimension == TextureViewDimension::D2 + || (self.features.contains(wgt::Features::MULTIVIEW) + && resolved_dimension == TextureViewDimension::D2Array)) + { + break 'b Err(TextureViewNotRenderableReason::Dimension( + resolved_dimension, + )); + } + + if resolved_mip_level_count != 1 { + break 'b Err(TextureViewNotRenderableReason::MipLevelCount( + resolved_mip_level_count, + )); + } + + if resolved_array_layer_count != 1 + && !(self.features.contains(wgt::Features::MULTIVIEW)) + { + break 'b Err(TextureViewNotRenderableReason::ArrayLayerCount( + resolved_array_layer_count, + )); + } + + if aspects != hal::FormatAspects::from(texture.desc.format) { + break 'b Err(TextureViewNotRenderableReason::Aspects(aspects)); + } + + break 'b Ok(texture + .desc + .compute_render_extent(desc.range.base_mip_level)); + }; + + // filter the usages based on the other criteria + let usage = { + let mask_copy = !(hal::TextureUses::COPY_SRC | hal::TextureUses::COPY_DST); + let mask_dimension = match resolved_dimension { + wgt::TextureViewDimension::Cube | wgt::TextureViewDimension::CubeArray => { + hal::TextureUses::RESOURCE + } + wgt::TextureViewDimension::D3 => { + hal::TextureUses::RESOURCE + | hal::TextureUses::STORAGE_READ + | hal::TextureUses::STORAGE_READ_WRITE + } + _ => hal::TextureUses::all(), + }; + let mask_mip_level = if resolved_mip_level_count == 1 { + hal::TextureUses::all() + } else { + hal::TextureUses::RESOURCE + }; + texture.hal_usage & mask_copy & mask_dimension & mask_mip_level + }; + + log::debug!( + "Create view for texture {:?} filters usages to {:?}", + texture_id, + usage + ); + + // use the combined depth-stencil format for the view + let format = if resolved_format.is_depth_stencil_component(texture.desc.format) { + texture.desc.format + } else { + resolved_format + }; + + let resolved_range = wgt::ImageSubresourceRange { + aspect: desc.range.aspect, + base_mip_level: desc.range.base_mip_level, + mip_level_count: Some(resolved_mip_level_count), + base_array_layer: desc.range.base_array_layer, + array_layer_count: Some(resolved_array_layer_count), + }; + + let hal_desc = hal::TextureViewDescriptor { + label: desc.label.borrow_option(), + format, + dimension: resolved_dimension, + usage, + range: resolved_range, + }; + + let raw = unsafe { + self.raw + .create_texture_view(texture_raw, &hal_desc) + .map_err(|_| resource::CreateTextureViewError::OutOfMemory)? + }; + + let selector = TextureSelector { + mips: desc.range.base_mip_level..mip_level_end, + layers: desc.range.base_array_layer..array_layer_end, + }; + + Ok(resource::TextureView { + raw, + parent_id: Stored { + value: id::Valid(texture_id), + ref_count: texture.life_guard.add_ref(), + }, + device_id: texture.device_id.clone(), + desc: resource::HalTextureViewDescriptor { + format: resolved_format, + dimension: resolved_dimension, + range: resolved_range, + }, + format_features: texture.format_features, + render_extent, + samples: texture.desc.sample_count, + selector, + life_guard: LifeGuard::new(desc.label.borrow_or_default()), + }) + } + + pub(super) fn create_sampler( + &self, + self_id: id::DeviceId, + desc: &resource::SamplerDescriptor, + ) -> Result, resource::CreateSamplerError> { + if desc + .address_modes + .iter() + .any(|am| am == &wgt::AddressMode::ClampToBorder) + { + self.require_features(wgt::Features::ADDRESS_MODE_CLAMP_TO_BORDER)?; + } + + if desc.border_color == Some(wgt::SamplerBorderColor::Zero) { + self.require_features(wgt::Features::ADDRESS_MODE_CLAMP_TO_ZERO)?; + } + + if desc.lod_min_clamp < 0.0 { + return Err(resource::CreateSamplerError::InvalidLodMinClamp( + desc.lod_min_clamp, + )); + } + if desc.lod_max_clamp < desc.lod_min_clamp { + return Err(resource::CreateSamplerError::InvalidLodMaxClamp { + lod_min_clamp: desc.lod_min_clamp, + lod_max_clamp: desc.lod_max_clamp, + }); + } + + if desc.anisotropy_clamp < 1 { + return Err(resource::CreateSamplerError::InvalidAnisotropy( + desc.anisotropy_clamp, + )); + } + + if desc.anisotropy_clamp != 1 { + if !matches!(desc.min_filter, wgt::FilterMode::Linear) { + return Err( + resource::CreateSamplerError::InvalidFilterModeWithAnisotropy { + filter_type: resource::SamplerFilterErrorType::MinFilter, + filter_mode: desc.min_filter, + anisotropic_clamp: desc.anisotropy_clamp, + }, + ); + } + if !matches!(desc.mag_filter, wgt::FilterMode::Linear) { + return Err( + resource::CreateSamplerError::InvalidFilterModeWithAnisotropy { + filter_type: resource::SamplerFilterErrorType::MagFilter, + filter_mode: desc.mag_filter, + anisotropic_clamp: desc.anisotropy_clamp, + }, + ); + } + if !matches!(desc.mipmap_filter, wgt::FilterMode::Linear) { + return Err( + resource::CreateSamplerError::InvalidFilterModeWithAnisotropy { + filter_type: resource::SamplerFilterErrorType::MipmapFilter, + filter_mode: desc.mipmap_filter, + anisotropic_clamp: desc.anisotropy_clamp, + }, + ); + } + } + + let anisotropy_clamp = if self + .downlevel + .flags + .contains(wgt::DownlevelFlags::ANISOTROPIC_FILTERING) + { + // Clamp anisotropy clamp to [1, 16] per the wgpu-hal interface + desc.anisotropy_clamp.min(16) + } else { + // If it isn't supported, set this unconditionally to 1 + 1 + }; + + //TODO: check for wgt::DownlevelFlags::COMPARISON_SAMPLERS + + let hal_desc = hal::SamplerDescriptor { + label: desc.label.borrow_option(), + address_modes: desc.address_modes, + mag_filter: desc.mag_filter, + min_filter: desc.min_filter, + mipmap_filter: desc.mipmap_filter, + lod_clamp: desc.lod_min_clamp..desc.lod_max_clamp, + compare: desc.compare, + anisotropy_clamp, + border_color: desc.border_color, + }; + + let raw = unsafe { + self.raw + .create_sampler(&hal_desc) + .map_err(DeviceError::from)? + }; + Ok(resource::Sampler { + raw, + device_id: Stored { + value: id::Valid(self_id), + ref_count: self.life_guard.add_ref(), + }, + life_guard: LifeGuard::new(desc.label.borrow_or_default()), + comparison: desc.compare.is_some(), + filtering: desc.min_filter == wgt::FilterMode::Linear + || desc.mag_filter == wgt::FilterMode::Linear, + }) + } + + pub(super) fn create_shader_module<'a>( + &self, + self_id: id::DeviceId, + desc: &pipeline::ShaderModuleDescriptor<'a>, + source: pipeline::ShaderModuleSource<'a>, + ) -> Result, pipeline::CreateShaderModuleError> { + let (module, source) = match source { + #[cfg(feature = "wgsl")] + pipeline::ShaderModuleSource::Wgsl(code) => { + profiling::scope!("naga::wgsl::parse_str"); + let module = naga::front::wgsl::parse_str(&code).map_err(|inner| { + pipeline::CreateShaderModuleError::Parsing(pipeline::ShaderError { + source: code.to_string(), + label: desc.label.as_ref().map(|l| l.to_string()), + inner: Box::new(inner), + }) + })?; + (Cow::Owned(module), code.into_owned()) + } + pipeline::ShaderModuleSource::Naga(module) => (module, String::new()), + pipeline::ShaderModuleSource::Dummy(_) => panic!("found `ShaderModuleSource::Dummy`"), + }; + for (_, var) in module.global_variables.iter() { + match var.binding { + Some(ref br) if br.group >= self.limits.max_bind_groups => { + return Err(pipeline::CreateShaderModuleError::InvalidGroupIndex { + bind: br.clone(), + group: br.group, + limit: self.limits.max_bind_groups, + }); + } + _ => continue, + }; + } + + use naga::valid::Capabilities as Caps; + profiling::scope!("naga::validate"); + + let mut caps = Caps::empty(); + caps.set( + Caps::PUSH_CONSTANT, + self.features.contains(wgt::Features::PUSH_CONSTANTS), + ); + caps.set( + Caps::FLOAT64, + self.features.contains(wgt::Features::SHADER_F64), + ); + caps.set( + Caps::PRIMITIVE_INDEX, + self.features + .contains(wgt::Features::SHADER_PRIMITIVE_INDEX), + ); + caps.set( + Caps::SAMPLED_TEXTURE_AND_STORAGE_BUFFER_ARRAY_NON_UNIFORM_INDEXING, + self.features.contains( + wgt::Features::SAMPLED_TEXTURE_AND_STORAGE_BUFFER_ARRAY_NON_UNIFORM_INDEXING, + ), + ); + caps.set( + Caps::UNIFORM_BUFFER_AND_STORAGE_TEXTURE_ARRAY_NON_UNIFORM_INDEXING, + self.features.contains( + wgt::Features::UNIFORM_BUFFER_AND_STORAGE_TEXTURE_ARRAY_NON_UNIFORM_INDEXING, + ), + ); + // TODO: This needs a proper wgpu feature + caps.set( + Caps::SAMPLER_NON_UNIFORM_INDEXING, + self.features.contains( + wgt::Features::SAMPLED_TEXTURE_AND_STORAGE_BUFFER_ARRAY_NON_UNIFORM_INDEXING, + ), + ); + caps.set( + Caps::STORAGE_TEXTURE_16BIT_NORM_FORMATS, + self.features + .contains(wgt::Features::TEXTURE_FORMAT_16BIT_NORM), + ); + caps.set( + Caps::MULTIVIEW, + self.features.contains(wgt::Features::MULTIVIEW), + ); + caps.set( + Caps::EARLY_DEPTH_TEST, + self.features + .contains(wgt::Features::SHADER_EARLY_DEPTH_TEST), + ); + caps.set( + Caps::MULTISAMPLED_SHADING, + self.downlevel + .flags + .contains(wgt::DownlevelFlags::MULTISAMPLED_SHADING), + ); + + let info = naga::valid::Validator::new(naga::valid::ValidationFlags::all(), caps) + .validate(&module) + .map_err(|inner| { + pipeline::CreateShaderModuleError::Validation(pipeline::ShaderError { + source, + label: desc.label.as_ref().map(|l| l.to_string()), + inner: Box::new(inner), + }) + })?; + let interface = + validation::Interface::new(&module, &info, self.features, self.limits.clone()); + let hal_shader = hal::ShaderInput::Naga(hal::NagaShader { module, info }); + + let hal_desc = hal::ShaderModuleDescriptor { + label: desc.label.borrow_option(), + runtime_checks: desc.shader_bound_checks.runtime_checks(), + }; + let raw = match unsafe { self.raw.create_shader_module(&hal_desc, hal_shader) } { + Ok(raw) => raw, + Err(error) => { + return Err(match error { + hal::ShaderError::Device(error) => { + pipeline::CreateShaderModuleError::Device(error.into()) + } + hal::ShaderError::Compilation(ref msg) => { + log::error!("Shader error: {}", msg); + pipeline::CreateShaderModuleError::Generation + } + }) + } + }; + + Ok(pipeline::ShaderModule { + raw, + device_id: Stored { + value: id::Valid(self_id), + ref_count: self.life_guard.add_ref(), + }, + interface: Some(interface), + #[cfg(debug_assertions)] + label: desc.label.borrow_or_default().to_string(), + }) + } + + #[allow(unused_unsafe)] + pub(super) unsafe fn create_shader_module_spirv<'a>( + &self, + self_id: id::DeviceId, + desc: &pipeline::ShaderModuleDescriptor<'a>, + source: &'a [u32], + ) -> Result, pipeline::CreateShaderModuleError> { + self.require_features(wgt::Features::SPIRV_SHADER_PASSTHROUGH)?; + let hal_desc = hal::ShaderModuleDescriptor { + label: desc.label.borrow_option(), + runtime_checks: desc.shader_bound_checks.runtime_checks(), + }; + let hal_shader = hal::ShaderInput::SpirV(source); + let raw = match unsafe { self.raw.create_shader_module(&hal_desc, hal_shader) } { + Ok(raw) => raw, + Err(error) => { + return Err(match error { + hal::ShaderError::Device(error) => { + pipeline::CreateShaderModuleError::Device(error.into()) + } + hal::ShaderError::Compilation(ref msg) => { + log::error!("Shader error: {}", msg); + pipeline::CreateShaderModuleError::Generation + } + }) + } + }; + + Ok(pipeline::ShaderModule { + raw, + device_id: Stored { + value: id::Valid(self_id), + ref_count: self.life_guard.add_ref(), + }, + interface: None, + #[cfg(debug_assertions)] + label: desc.label.borrow_or_default().to_string(), + }) + } + + pub(super) fn deduplicate_bind_group_layout( + self_id: id::DeviceId, + entry_map: &binding_model::BindEntryMap, + guard: &Storage, id::BindGroupLayoutId>, + ) -> Option { + guard + .iter(self_id.backend()) + .find(|&(_, bgl)| bgl.device_id.value.0 == self_id && bgl.entries == *entry_map) + .map(|(id, value)| { + value.multi_ref_count.inc(); + id + }) + } + + fn get_introspection_bind_group_layouts<'a>( + pipeline_layout: &binding_model::PipelineLayout, + bgl_guard: &'a Storage, id::BindGroupLayoutId>, + ) -> ArrayVec<&'a binding_model::BindEntryMap, { hal::MAX_BIND_GROUPS }> { + pipeline_layout + .bind_group_layout_ids + .iter() + .map(|&id| &bgl_guard[id].entries) + .collect() + } + + /// Generate information about late-validated buffer bindings for pipelines. + //TODO: should this be combined with `get_introspection_bind_group_layouts` in some way? + fn make_late_sized_buffer_groups<'a>( + shader_binding_sizes: &FastHashMap, + layout: &binding_model::PipelineLayout, + bgl_guard: &'a Storage, id::BindGroupLayoutId>, + ) -> ArrayVec { + // Given the shader-required binding sizes and the pipeline layout, + // return the filtered list of them in the layout order, + // removing those with given `min_binding_size`. + layout + .bind_group_layout_ids + .iter() + .enumerate() + .map(|(group_index, &bgl_id)| pipeline::LateSizedBufferGroup { + shader_sizes: bgl_guard[bgl_id] + .entries + .values() + .filter_map(|entry| match entry.ty { + wgt::BindingType::Buffer { + min_binding_size: None, + .. + } => { + let rb = naga::ResourceBinding { + group: group_index as u32, + binding: entry.binding, + }; + let shader_size = + shader_binding_sizes.get(&rb).map_or(0, |nz| nz.get()); + Some(shader_size) + } + _ => None, + }) + .collect(), + }) + .collect() + } + + pub(super) fn create_bind_group_layout( + &self, + self_id: id::DeviceId, + label: Option<&str>, + entry_map: binding_model::BindEntryMap, + ) -> Result, binding_model::CreateBindGroupLayoutError> { + #[derive(PartialEq)] + enum WritableStorage { + Yes, + No, + } + + for entry in entry_map.values() { + use wgt::BindingType as Bt; + + let mut required_features = wgt::Features::empty(); + let mut required_downlevel_flags = wgt::DownlevelFlags::empty(); + let (array_feature, writable_storage) = match entry.ty { + Bt::Buffer { + ty: wgt::BufferBindingType::Uniform, + has_dynamic_offset: false, + min_binding_size: _, + } => ( + Some(wgt::Features::BUFFER_BINDING_ARRAY), + WritableStorage::No, + ), + Bt::Buffer { + ty: wgt::BufferBindingType::Uniform, + has_dynamic_offset: true, + min_binding_size: _, + } => ( + Some(wgt::Features::BUFFER_BINDING_ARRAY), + WritableStorage::No, + ), + Bt::Buffer { + ty: wgt::BufferBindingType::Storage { read_only }, + .. + } => ( + Some( + wgt::Features::BUFFER_BINDING_ARRAY + | wgt::Features::STORAGE_RESOURCE_BINDING_ARRAY, + ), + match read_only { + true => WritableStorage::No, + false => WritableStorage::Yes, + }, + ), + Bt::Sampler { .. } => ( + Some(wgt::Features::TEXTURE_BINDING_ARRAY), + WritableStorage::No, + ), + Bt::Texture { + multisampled: true, + sample_type: TextureSampleType::Float { filterable: true }, + .. + } => { + return Err(binding_model::CreateBindGroupLayoutError::Entry { + binding: entry.binding, + error: binding_model::BindGroupLayoutEntryError::SampleTypeFloatFilterableBindingMultisampled, + }); + } + Bt::Texture { .. } => ( + Some(wgt::Features::TEXTURE_BINDING_ARRAY), + WritableStorage::No, + ), + Bt::StorageTexture { + access, + view_dimension, + format: _, + } => { + match view_dimension { + wgt::TextureViewDimension::Cube | wgt::TextureViewDimension::CubeArray => { + return Err(binding_model::CreateBindGroupLayoutError::Entry { + binding: entry.binding, + error: binding_model::BindGroupLayoutEntryError::StorageTextureCube, + }) + } + _ => (), + } + match access { + wgt::StorageTextureAccess::ReadOnly + | wgt::StorageTextureAccess::ReadWrite + if !self.features.contains( + wgt::Features::TEXTURE_ADAPTER_SPECIFIC_FORMAT_FEATURES, + ) => + { + return Err(binding_model::CreateBindGroupLayoutError::Entry { + binding: entry.binding, + error: binding_model::BindGroupLayoutEntryError::StorageTextureReadWrite, + }); + } + _ => (), + } + ( + Some( + wgt::Features::TEXTURE_BINDING_ARRAY + | wgt::Features::STORAGE_RESOURCE_BINDING_ARRAY, + ), + match access { + wgt::StorageTextureAccess::WriteOnly => WritableStorage::Yes, + wgt::StorageTextureAccess::ReadOnly => { + required_features |= + wgt::Features::TEXTURE_ADAPTER_SPECIFIC_FORMAT_FEATURES; + WritableStorage::No + } + wgt::StorageTextureAccess::ReadWrite => { + required_features |= + wgt::Features::TEXTURE_ADAPTER_SPECIFIC_FORMAT_FEATURES; + WritableStorage::Yes + } + }, + ) + } + }; + + // Validate the count parameter + if entry.count.is_some() { + required_features |= array_feature + .ok_or(binding_model::BindGroupLayoutEntryError::ArrayUnsupported) + .map_err(|error| binding_model::CreateBindGroupLayoutError::Entry { + binding: entry.binding, + error, + })?; + } + + if entry.visibility.contains_invalid_bits() { + return Err( + binding_model::CreateBindGroupLayoutError::InvalidVisibility(entry.visibility), + ); + } + + if entry.visibility.contains(wgt::ShaderStages::VERTEX) { + if writable_storage == WritableStorage::Yes { + required_features |= wgt::Features::VERTEX_WRITABLE_STORAGE; + } + if let Bt::Buffer { + ty: wgt::BufferBindingType::Storage { .. }, + .. + } = entry.ty + { + required_downlevel_flags |= wgt::DownlevelFlags::VERTEX_STORAGE; + } + } + if writable_storage == WritableStorage::Yes + && entry.visibility.contains(wgt::ShaderStages::FRAGMENT) + { + required_downlevel_flags |= wgt::DownlevelFlags::FRAGMENT_WRITABLE_STORAGE; + } + + self.require_features(required_features) + .map_err(binding_model::BindGroupLayoutEntryError::MissingFeatures) + .map_err(|error| binding_model::CreateBindGroupLayoutError::Entry { + binding: entry.binding, + error, + })?; + self.require_downlevel_flags(required_downlevel_flags) + .map_err(binding_model::BindGroupLayoutEntryError::MissingDownlevelFlags) + .map_err(|error| binding_model::CreateBindGroupLayoutError::Entry { + binding: entry.binding, + error, + })?; + } + + let bgl_flags = conv::bind_group_layout_flags(self.features); + + let mut hal_bindings = entry_map.values().cloned().collect::>(); + hal_bindings.sort_by_key(|b| b.binding); + let hal_desc = hal::BindGroupLayoutDescriptor { + label, + flags: bgl_flags, + entries: &hal_bindings, + }; + let raw = unsafe { + self.raw + .create_bind_group_layout(&hal_desc) + .map_err(DeviceError::from)? + }; + + let mut count_validator = binding_model::BindingTypeMaxCountValidator::default(); + for entry in entry_map.values() { + count_validator.add_binding(entry); + } + // If a single bind group layout violates limits, the pipeline layout is + // definitely going to violate limits too, lets catch it now. + count_validator + .validate(&self.limits) + .map_err(binding_model::CreateBindGroupLayoutError::TooManyBindings)?; + + Ok(binding_model::BindGroupLayout { + raw, + device_id: Stored { + value: id::Valid(self_id), + ref_count: self.life_guard.add_ref(), + }, + multi_ref_count: MultiRefCount::new(), + dynamic_count: entry_map + .values() + .filter(|b| b.ty.has_dynamic_offset()) + .count(), + count_validator, + entries: entry_map, + #[cfg(debug_assertions)] + label: label.unwrap_or("").to_string(), + }) + } + + fn create_buffer_binding<'a>( + bb: &binding_model::BufferBinding, + binding: u32, + decl: &wgt::BindGroupLayoutEntry, + used_buffer_ranges: &mut Vec, + dynamic_binding_info: &mut Vec, + late_buffer_binding_sizes: &mut FastHashMap, + used: &mut BindGroupStates, + storage: &'a Storage, id::BufferId>, + limits: &wgt::Limits, + ) -> Result, binding_model::CreateBindGroupError> { + use crate::binding_model::CreateBindGroupError as Error; + + let (binding_ty, dynamic, min_size) = match decl.ty { + wgt::BindingType::Buffer { + ty, + has_dynamic_offset, + min_binding_size, + } => (ty, has_dynamic_offset, min_binding_size), + _ => { + return Err(Error::WrongBindingType { + binding, + actual: decl.ty, + expected: "UniformBuffer, StorageBuffer or ReadonlyStorageBuffer", + }) + } + }; + let (pub_usage, internal_use, range_limit) = match binding_ty { + wgt::BufferBindingType::Uniform => ( + wgt::BufferUsages::UNIFORM, + hal::BufferUses::UNIFORM, + limits.max_uniform_buffer_binding_size, + ), + wgt::BufferBindingType::Storage { read_only } => ( + wgt::BufferUsages::STORAGE, + if read_only { + hal::BufferUses::STORAGE_READ + } else { + hal::BufferUses::STORAGE_READ_WRITE + }, + limits.max_storage_buffer_binding_size, + ), + }; + + let (align, align_limit_name) = + binding_model::buffer_binding_type_alignment(limits, binding_ty); + if bb.offset % align as u64 != 0 { + return Err(Error::UnalignedBufferOffset( + bb.offset, + align_limit_name, + align, + )); + } + + let buffer = used + .buffers + .add_single(storage, bb.buffer_id, internal_use) + .ok_or(Error::InvalidBuffer(bb.buffer_id))?; + check_buffer_usage(buffer.usage, pub_usage)?; + let raw_buffer = buffer + .raw + .as_ref() + .ok_or(Error::InvalidBuffer(bb.buffer_id))?; + + let (bind_size, bind_end) = match bb.size { + Some(size) => { + let end = bb.offset + size.get(); + if end > buffer.size { + return Err(Error::BindingRangeTooLarge { + buffer: bb.buffer_id, + range: bb.offset..end, + size: buffer.size, + }); + } + (size.get(), end) + } + None => (buffer.size - bb.offset, buffer.size), + }; + + if bind_size > range_limit as u64 { + return Err(Error::BufferRangeTooLarge { + binding, + given: bind_size as u32, + limit: range_limit, + }); + } + + // Record binding info for validating dynamic offsets + if dynamic { + dynamic_binding_info.push(binding_model::BindGroupDynamicBindingData { + binding_idx: binding, + buffer_size: buffer.size, + binding_range: bb.offset..bind_end, + maximum_dynamic_offset: buffer.size - bind_end, + binding_type: binding_ty, + }); + } + + if let Some(non_zero) = min_size { + let min_size = non_zero.get(); + if min_size > bind_size { + return Err(Error::BindingSizeTooSmall { + buffer: bb.buffer_id, + actual: bind_size, + min: min_size, + }); + } + } else { + let late_size = + wgt::BufferSize::new(bind_size).ok_or(Error::BindingZeroSize(bb.buffer_id))?; + late_buffer_binding_sizes.insert(binding, late_size); + } + + assert_eq!(bb.offset % wgt::COPY_BUFFER_ALIGNMENT, 0); + used_buffer_ranges.extend(buffer.initialization_status.create_action( + bb.buffer_id, + bb.offset..bb.offset + bind_size, + MemoryInitKind::NeedsInitializedMemory, + )); + + Ok(hal::BufferBinding { + buffer: raw_buffer, + offset: bb.offset, + size: bb.size, + }) + } + + fn create_texture_binding( + view: &resource::TextureView, + texture_guard: &Storage, id::TextureId>, + internal_use: hal::TextureUses, + pub_usage: wgt::TextureUsages, + used: &mut BindGroupStates, + used_texture_ranges: &mut Vec, + ) -> Result<(), binding_model::CreateBindGroupError> { + // Careful here: the texture may no longer have its own ref count, + // if it was deleted by the user. + let texture = used + .textures + .add_single( + texture_guard, + view.parent_id.value.0, + view.parent_id.ref_count.clone(), + Some(view.selector.clone()), + internal_use, + ) + .ok_or(binding_model::CreateBindGroupError::InvalidTexture( + view.parent_id.value.0, + ))?; + check_texture_usage(texture.desc.usage, pub_usage)?; + + used_texture_ranges.push(TextureInitTrackerAction { + id: view.parent_id.value.0, + range: TextureInitRange { + mip_range: view.desc.range.mip_range(texture.desc.mip_level_count), + layer_range: view + .desc + .range + .layer_range(texture.desc.array_layer_count()), + }, + kind: MemoryInitKind::NeedsInitializedMemory, + }); + + Ok(()) + } + + pub(super) fn create_bind_group( + &self, + self_id: id::DeviceId, + layout: &binding_model::BindGroupLayout, + desc: &binding_model::BindGroupDescriptor, + hub: &Hub, + token: &mut Token>, + ) -> Result, binding_model::CreateBindGroupError> { + use crate::binding_model::{BindingResource as Br, CreateBindGroupError as Error}; + { + // Check that the number of entries in the descriptor matches + // the number of entries in the layout. + let actual = desc.entries.len(); + let expected = layout.entries.len(); + if actual != expected { + return Err(Error::BindingsNumMismatch { expected, actual }); + } + } + + // TODO: arrayvec/smallvec, or re-use allocations + // Record binding info for dynamic offset validation + let mut dynamic_binding_info = Vec::new(); + // Map of binding -> shader reflected size + //Note: we can't collect into a vector right away because + // it needs to be in BGL iteration order, not BG entry order. + let mut late_buffer_binding_sizes = FastHashMap::default(); + // fill out the descriptors + let mut used = BindGroupStates::new(); + + let (buffer_guard, mut token) = hub.buffers.read(token); + let (texture_guard, mut token) = hub.textures.read(&mut token); //skip token + let (texture_view_guard, mut token) = hub.texture_views.read(&mut token); + let (sampler_guard, _) = hub.samplers.read(&mut token); + + let mut used_buffer_ranges = Vec::new(); + let mut used_texture_ranges = Vec::new(); + let mut hal_entries = Vec::with_capacity(desc.entries.len()); + let mut hal_buffers = Vec::new(); + let mut hal_samplers = Vec::new(); + let mut hal_textures = Vec::new(); + for entry in desc.entries.iter() { + let binding = entry.binding; + // Find the corresponding declaration in the layout + let decl = layout + .entries + .get(&binding) + .ok_or(Error::MissingBindingDeclaration(binding))?; + let (res_index, count) = match entry.resource { + Br::Buffer(ref bb) => { + let bb = Self::create_buffer_binding( + bb, + binding, + decl, + &mut used_buffer_ranges, + &mut dynamic_binding_info, + &mut late_buffer_binding_sizes, + &mut used, + &*buffer_guard, + &self.limits, + )?; + + let res_index = hal_buffers.len(); + hal_buffers.push(bb); + (res_index, 1) + } + Br::BufferArray(ref bindings_array) => { + let num_bindings = bindings_array.len(); + Self::check_array_binding(self.features, decl.count, num_bindings)?; + + let res_index = hal_buffers.len(); + for bb in bindings_array.iter() { + let bb = Self::create_buffer_binding( + bb, + binding, + decl, + &mut used_buffer_ranges, + &mut dynamic_binding_info, + &mut late_buffer_binding_sizes, + &mut used, + &*buffer_guard, + &self.limits, + )?; + hal_buffers.push(bb); + } + (res_index, num_bindings) + } + Br::Sampler(id) => { + match decl.ty { + wgt::BindingType::Sampler(ty) => { + let sampler = used + .samplers + .add_single(&*sampler_guard, id) + .ok_or(Error::InvalidSampler(id))?; + + // Allowed sampler values for filtering and comparison + let (allowed_filtering, allowed_comparison) = match ty { + wgt::SamplerBindingType::Filtering => (None, false), + wgt::SamplerBindingType::NonFiltering => (Some(false), false), + wgt::SamplerBindingType::Comparison => (None, true), + }; + + if let Some(allowed_filtering) = allowed_filtering { + if allowed_filtering != sampler.filtering { + return Err(Error::WrongSamplerFiltering { + binding, + layout_flt: allowed_filtering, + sampler_flt: sampler.filtering, + }); + } + } + + if allowed_comparison != sampler.comparison { + return Err(Error::WrongSamplerComparison { + binding, + layout_cmp: allowed_comparison, + sampler_cmp: sampler.comparison, + }); + } + + let res_index = hal_samplers.len(); + hal_samplers.push(&sampler.raw); + (res_index, 1) + } + _ => { + return Err(Error::WrongBindingType { + binding, + actual: decl.ty, + expected: "Sampler", + }) + } + } + } + Br::SamplerArray(ref bindings_array) => { + let num_bindings = bindings_array.len(); + Self::check_array_binding(self.features, decl.count, num_bindings)?; + + let res_index = hal_samplers.len(); + for &id in bindings_array.iter() { + let sampler = used + .samplers + .add_single(&*sampler_guard, id) + .ok_or(Error::InvalidSampler(id))?; + hal_samplers.push(&sampler.raw); + } + + (res_index, num_bindings) + } + Br::TextureView(id) => { + let view = used + .views + .add_single(&*texture_view_guard, id) + .ok_or(Error::InvalidTextureView(id))?; + let (pub_usage, internal_use) = Self::texture_use_parameters( + binding, + decl, + view, + "SampledTexture, ReadonlyStorageTexture or WriteonlyStorageTexture", + )?; + Self::create_texture_binding( + view, + &texture_guard, + internal_use, + pub_usage, + &mut used, + &mut used_texture_ranges, + )?; + let res_index = hal_textures.len(); + hal_textures.push(hal::TextureBinding { + view: &view.raw, + usage: internal_use, + }); + (res_index, 1) + } + Br::TextureViewArray(ref bindings_array) => { + let num_bindings = bindings_array.len(); + Self::check_array_binding(self.features, decl.count, num_bindings)?; + + let res_index = hal_textures.len(); + for &id in bindings_array.iter() { + let view = used + .views + .add_single(&*texture_view_guard, id) + .ok_or(Error::InvalidTextureView(id))?; + let (pub_usage, internal_use) = + Self::texture_use_parameters(binding, decl, view, + "SampledTextureArray, ReadonlyStorageTextureArray or WriteonlyStorageTextureArray")?; + Self::create_texture_binding( + view, + &texture_guard, + internal_use, + pub_usage, + &mut used, + &mut used_texture_ranges, + )?; + hal_textures.push(hal::TextureBinding { + view: &view.raw, + usage: internal_use, + }); + } + + (res_index, num_bindings) + } + }; + + hal_entries.push(hal::BindGroupEntry { + binding, + resource_index: res_index as u32, + count: count as u32, + }); + } + + used.optimize(); + + hal_entries.sort_by_key(|entry| entry.binding); + for (a, b) in hal_entries.iter().zip(hal_entries.iter().skip(1)) { + if a.binding == b.binding { + return Err(Error::DuplicateBinding(a.binding)); + } + } + + let hal_desc = hal::BindGroupDescriptor { + label: desc.label.borrow_option(), + layout: &layout.raw, + entries: &hal_entries, + buffers: &hal_buffers, + samplers: &hal_samplers, + textures: &hal_textures, + }; + let raw = unsafe { + self.raw + .create_bind_group(&hal_desc) + .map_err(DeviceError::from)? + }; + + // manually add a dependency on BGL + layout.multi_ref_count.inc(); + + Ok(binding_model::BindGroup { + raw, + device_id: Stored { + value: id::Valid(self_id), + ref_count: self.life_guard.add_ref(), + }, + layout_id: id::Valid(desc.layout), + life_guard: LifeGuard::new(desc.label.borrow_or_default()), + used, + used_buffer_ranges, + used_texture_ranges, + dynamic_binding_info, + // collect in the order of BGL iteration + late_buffer_binding_sizes: layout + .entries + .keys() + .flat_map(|binding| late_buffer_binding_sizes.get(binding).cloned()) + .collect(), + }) + } + + fn check_array_binding( + features: wgt::Features, + count: Option, + num_bindings: usize, + ) -> Result<(), super::binding_model::CreateBindGroupError> { + use super::binding_model::CreateBindGroupError as Error; + + if let Some(count) = count { + let count = count.get() as usize; + if count < num_bindings { + return Err(Error::BindingArrayPartialLengthMismatch { + actual: num_bindings, + expected: count, + }); + } + if count != num_bindings + && !features.contains(wgt::Features::PARTIALLY_BOUND_BINDING_ARRAY) + { + return Err(Error::BindingArrayLengthMismatch { + actual: num_bindings, + expected: count, + }); + } + if num_bindings == 0 { + return Err(Error::BindingArrayZeroLength); + } + } else { + return Err(Error::SingleBindingExpected); + }; + + Ok(()) + } + + fn texture_use_parameters( + binding: u32, + decl: &wgt::BindGroupLayoutEntry, + view: &crate::resource::TextureView, + expected: &'static str, + ) -> Result<(wgt::TextureUsages, hal::TextureUses), binding_model::CreateBindGroupError> { + use crate::binding_model::CreateBindGroupError as Error; + if view + .desc + .aspects() + .contains(hal::FormatAspects::DEPTH | hal::FormatAspects::STENCIL) + { + return Err(Error::DepthStencilAspect); + } + match decl.ty { + wgt::BindingType::Texture { + sample_type, + view_dimension, + multisampled, + } => { + use wgt::TextureSampleType as Tst; + if multisampled != (view.samples != 1) { + return Err(Error::InvalidTextureMultisample { + binding, + layout_multisampled: multisampled, + view_samples: view.samples, + }); + } + let compat_sample_type = view + .desc + .format + .sample_type(Some(view.desc.range.aspect)) + .unwrap(); + match (sample_type, compat_sample_type) { + (Tst::Uint, Tst::Uint) | + (Tst::Sint, Tst::Sint) | + (Tst::Depth, Tst::Depth) | + // if we expect non-filterable, accept anything float + (Tst::Float { filterable: false }, Tst::Float { .. }) | + // if we expect filterable, require it + (Tst::Float { filterable: true }, Tst::Float { filterable: true }) | + // if we expect non-filterable, also accept depth + (Tst::Float { filterable: false }, Tst::Depth) => {} + // if we expect filterable, also accept Float that is defined as + // unfilterable if filterable feature is explicitly enabled (only hit + // if wgt::Features::TEXTURE_ADAPTER_SPECIFIC_FORMAT_FEATURES is + // enabled) + (Tst::Float { filterable: true }, Tst::Float { .. }) if view.format_features.flags.contains(wgt::TextureFormatFeatureFlags::FILTERABLE) => {} + _ => { + return Err(Error::InvalidTextureSampleType { + binding, + layout_sample_type: sample_type, + view_format: view.desc.format, + }) + } + } + if view_dimension != view.desc.dimension { + return Err(Error::InvalidTextureDimension { + binding, + layout_dimension: view_dimension, + view_dimension: view.desc.dimension, + }); + } + Ok(( + wgt::TextureUsages::TEXTURE_BINDING, + hal::TextureUses::RESOURCE, + )) + } + wgt::BindingType::StorageTexture { + access, + format, + view_dimension, + } => { + if format != view.desc.format { + return Err(Error::InvalidStorageTextureFormat { + binding, + layout_format: format, + view_format: view.desc.format, + }); + } + if view_dimension != view.desc.dimension { + return Err(Error::InvalidTextureDimension { + binding, + layout_dimension: view_dimension, + view_dimension: view.desc.dimension, + }); + } + + let mip_level_count = view.selector.mips.end - view.selector.mips.start; + if mip_level_count != 1 { + return Err(Error::InvalidStorageTextureMipLevelCount { + binding, + mip_level_count, + }); + } + + let internal_use = match access { + wgt::StorageTextureAccess::WriteOnly => hal::TextureUses::STORAGE_READ_WRITE, + wgt::StorageTextureAccess::ReadOnly => { + if !view + .format_features + .flags + .contains(wgt::TextureFormatFeatureFlags::STORAGE_READ_WRITE) + { + return Err(Error::StorageReadNotSupported(view.desc.format)); + } + hal::TextureUses::STORAGE_READ + } + wgt::StorageTextureAccess::ReadWrite => { + if !view + .format_features + .flags + .contains(wgt::TextureFormatFeatureFlags::STORAGE_READ_WRITE) + { + return Err(Error::StorageReadNotSupported(view.desc.format)); + } + + hal::TextureUses::STORAGE_READ_WRITE + } + }; + Ok((wgt::TextureUsages::STORAGE_BINDING, internal_use)) + } + _ => Err(Error::WrongBindingType { + binding, + actual: decl.ty, + expected, + }), + } + } + + pub(super) fn create_pipeline_layout( + &self, + self_id: id::DeviceId, + desc: &binding_model::PipelineLayoutDescriptor, + bgl_guard: &Storage, id::BindGroupLayoutId>, + ) -> Result, binding_model::CreatePipelineLayoutError> { + use crate::binding_model::CreatePipelineLayoutError as Error; + + let bind_group_layouts_count = desc.bind_group_layouts.len(); + let device_max_bind_groups = self.limits.max_bind_groups as usize; + if bind_group_layouts_count > device_max_bind_groups { + return Err(Error::TooManyGroups { + actual: bind_group_layouts_count, + max: device_max_bind_groups, + }); + } + + if !desc.push_constant_ranges.is_empty() { + self.require_features(wgt::Features::PUSH_CONSTANTS)?; + } + + let mut used_stages = wgt::ShaderStages::empty(); + for (index, pc) in desc.push_constant_ranges.iter().enumerate() { + if pc.stages.intersects(used_stages) { + return Err(Error::MoreThanOnePushConstantRangePerStage { + index, + provided: pc.stages, + intersected: pc.stages & used_stages, + }); + } + used_stages |= pc.stages; + + let device_max_pc_size = self.limits.max_push_constant_size; + if device_max_pc_size < pc.range.end { + return Err(Error::PushConstantRangeTooLarge { + index, + range: pc.range.clone(), + max: device_max_pc_size, + }); + } + + if pc.range.start % wgt::PUSH_CONSTANT_ALIGNMENT != 0 { + return Err(Error::MisalignedPushConstantRange { + index, + bound: pc.range.start, + }); + } + if pc.range.end % wgt::PUSH_CONSTANT_ALIGNMENT != 0 { + return Err(Error::MisalignedPushConstantRange { + index, + bound: pc.range.end, + }); + } + } + + let mut count_validator = binding_model::BindingTypeMaxCountValidator::default(); + + // validate total resource counts + for &id in desc.bind_group_layouts.iter() { + let bind_group_layout = bgl_guard + .get(id) + .map_err(|_| Error::InvalidBindGroupLayout(id))?; + count_validator.merge(&bind_group_layout.count_validator); + } + count_validator + .validate(&self.limits) + .map_err(Error::TooManyBindings)?; + + let bgl_vec = desc + .bind_group_layouts + .iter() + .map(|&id| &bgl_guard.get(id).unwrap().raw) + .collect::>(); + let hal_desc = hal::PipelineLayoutDescriptor { + label: desc.label.borrow_option(), + flags: hal::PipelineLayoutFlags::BASE_VERTEX_INSTANCE, + bind_group_layouts: &bgl_vec, + push_constant_ranges: desc.push_constant_ranges.as_ref(), + }; + + let raw = unsafe { + self.raw + .create_pipeline_layout(&hal_desc) + .map_err(DeviceError::from)? + }; + + Ok(binding_model::PipelineLayout { + raw, + device_id: Stored { + value: id::Valid(self_id), + ref_count: self.life_guard.add_ref(), + }, + life_guard: LifeGuard::new(desc.label.borrow_or_default()), + bind_group_layout_ids: desc + .bind_group_layouts + .iter() + .map(|&id| { + // manually add a dependency to BGL + bgl_guard.get(id).unwrap().multi_ref_count.inc(); + id::Valid(id) + }) + .collect(), + push_constant_ranges: desc.push_constant_ranges.iter().cloned().collect(), + }) + } + + //TODO: refactor this. It's the only method of `Device` that registers new objects + // (the pipeline layout). + fn derive_pipeline_layout( + &self, + self_id: id::DeviceId, + implicit_context: Option, + mut derived_group_layouts: ArrayVec, + bgl_guard: &mut Storage, id::BindGroupLayoutId>, + pipeline_layout_guard: &mut Storage, id::PipelineLayoutId>, + ) -> Result { + while derived_group_layouts + .last() + .map_or(false, |map| map.is_empty()) + { + derived_group_layouts.pop(); + } + let mut ids = implicit_context.ok_or(pipeline::ImplicitLayoutError::MissingIds(0))?; + let group_count = derived_group_layouts.len(); + if ids.group_ids.len() < group_count { + log::error!( + "Not enough bind group IDs ({}) specified for the implicit layout ({})", + ids.group_ids.len(), + derived_group_layouts.len() + ); + return Err(pipeline::ImplicitLayoutError::MissingIds(group_count as _)); + } + + for (bgl_id, map) in ids.group_ids.iter_mut().zip(derived_group_layouts) { + match Device::deduplicate_bind_group_layout(self_id, &map, bgl_guard) { + Some(dedup_id) => { + *bgl_id = dedup_id; + } + None => { + let bgl = self.create_bind_group_layout(self_id, None, map)?; + bgl_guard.force_replace(*bgl_id, bgl); + } + }; + } + + let layout_desc = binding_model::PipelineLayoutDescriptor { + label: None, + bind_group_layouts: Cow::Borrowed(&ids.group_ids[..group_count]), + push_constant_ranges: Cow::Borrowed(&[]), //TODO? + }; + let layout = self.create_pipeline_layout(self_id, &layout_desc, bgl_guard)?; + pipeline_layout_guard.force_replace(ids.root_id, layout); + Ok(ids.root_id) + } + + pub(super) fn create_compute_pipeline( + &self, + self_id: id::DeviceId, + desc: &pipeline::ComputePipelineDescriptor, + implicit_context: Option, + hub: &Hub, + token: &mut Token, + ) -> Result, pipeline::CreateComputePipelineError> { + //TODO: only lock mutable if the layout is derived + let (mut pipeline_layout_guard, mut token) = hub.pipeline_layouts.write(token); + let (mut bgl_guard, mut token) = hub.bind_group_layouts.write(&mut token); + + // This has to be done first, or otherwise the IDs may be pointing to entries + // that are not even in the storage. + if let Some(ref ids) = implicit_context { + pipeline_layout_guard.insert_error(ids.root_id, IMPLICIT_FAILURE); + for &bgl_id in ids.group_ids.iter() { + bgl_guard.insert_error(bgl_id, IMPLICIT_FAILURE); + } + } + + self.require_downlevel_flags(wgt::DownlevelFlags::COMPUTE_SHADERS)?; + + let mut derived_group_layouts = + ArrayVec::::new(); + let mut shader_binding_sizes = FastHashMap::default(); + + let io = validation::StageIo::default(); + let (shader_module_guard, _) = hub.shader_modules.read(&mut token); + + let shader_module = shader_module_guard + .get(desc.stage.module) + .map_err(|_| validation::StageError::InvalidModule)?; + + { + let flag = wgt::ShaderStages::COMPUTE; + let provided_layouts = match desc.layout { + Some(pipeline_layout_id) => Some(Device::get_introspection_bind_group_layouts( + pipeline_layout_guard + .get(pipeline_layout_id) + .map_err(|_| pipeline::CreateComputePipelineError::InvalidLayout)?, + &*bgl_guard, + )), + None => { + for _ in 0..self.limits.max_bind_groups { + derived_group_layouts.push(binding_model::BindEntryMap::default()); + } + None + } + }; + if let Some(ref interface) = shader_module.interface { + let _ = interface.check_stage( + provided_layouts.as_ref().map(|p| p.as_slice()), + &mut derived_group_layouts, + &mut shader_binding_sizes, + &desc.stage.entry_point, + flag, + io, + None, + )?; + } + } + + let pipeline_layout_id = match desc.layout { + Some(id) => id, + None => self.derive_pipeline_layout( + self_id, + implicit_context, + derived_group_layouts, + &mut *bgl_guard, + &mut *pipeline_layout_guard, + )?, + }; + let layout = pipeline_layout_guard + .get(pipeline_layout_id) + .map_err(|_| pipeline::CreateComputePipelineError::InvalidLayout)?; + + let late_sized_buffer_groups = + Device::make_late_sized_buffer_groups(&shader_binding_sizes, layout, &*bgl_guard); + + let pipeline_desc = hal::ComputePipelineDescriptor { + label: desc.label.borrow_option(), + layout: &layout.raw, + stage: hal::ProgrammableStage { + entry_point: desc.stage.entry_point.as_ref(), + module: &shader_module.raw, + }, + }; + + let raw = + unsafe { self.raw.create_compute_pipeline(&pipeline_desc) }.map_err( + |err| match err { + hal::PipelineError::Device(error) => { + pipeline::CreateComputePipelineError::Device(error.into()) + } + hal::PipelineError::Linkage(_stages, msg) => { + pipeline::CreateComputePipelineError::Internal(msg) + } + hal::PipelineError::EntryPoint(_stage) => { + pipeline::CreateComputePipelineError::Internal(EP_FAILURE.to_string()) + } + }, + )?; + + let pipeline = pipeline::ComputePipeline { + raw, + layout_id: Stored { + value: id::Valid(pipeline_layout_id), + ref_count: layout.life_guard.add_ref(), + }, + device_id: Stored { + value: id::Valid(self_id), + ref_count: self.life_guard.add_ref(), + }, + late_sized_buffer_groups, + life_guard: LifeGuard::new(desc.label.borrow_or_default()), + }; + Ok(pipeline) + } + + pub(super) fn create_render_pipeline( + &self, + self_id: id::DeviceId, + adapter: &Adapter, + desc: &pipeline::RenderPipelineDescriptor, + implicit_context: Option, + hub: &Hub, + token: &mut Token, + ) -> Result, pipeline::CreateRenderPipelineError> { + use wgt::TextureFormatFeatureFlags as Tfff; + + //TODO: only lock mutable if the layout is derived + let (mut pipeline_layout_guard, mut token) = hub.pipeline_layouts.write(token); + let (mut bgl_guard, mut token) = hub.bind_group_layouts.write(&mut token); + + // This has to be done first, or otherwise the IDs may be pointing to entries + // that are not even in the storage. + if let Some(ref ids) = implicit_context { + pipeline_layout_guard.insert_error(ids.root_id, IMPLICIT_FAILURE); + for &bgl_id in ids.group_ids.iter() { + bgl_guard.insert_error(bgl_id, IMPLICIT_FAILURE); + } + } + + let mut derived_group_layouts = + ArrayVec::::new(); + let mut shader_binding_sizes = FastHashMap::default(); + + let num_attachments = desc.fragment.as_ref().map(|f| f.targets.len()).unwrap_or(0); + if num_attachments > hal::MAX_COLOR_ATTACHMENTS { + return Err(pipeline::CreateRenderPipelineError::ColorAttachment( + command::ColorAttachmentError::TooMany { + given: num_attachments, + limit: hal::MAX_COLOR_ATTACHMENTS, + }, + )); + } + + let color_targets = desc + .fragment + .as_ref() + .map_or(&[][..], |fragment| &fragment.targets); + let depth_stencil_state = desc.depth_stencil.as_ref(); + + let cts: ArrayVec<_, { hal::MAX_COLOR_ATTACHMENTS }> = + color_targets.iter().filter_map(|x| x.as_ref()).collect(); + if !cts.is_empty() && { + let first = &cts[0]; + cts[1..] + .iter() + .any(|ct| ct.write_mask != first.write_mask || ct.blend != first.blend) + } { + log::info!("Color targets: {:?}", color_targets); + self.require_downlevel_flags(wgt::DownlevelFlags::INDEPENDENT_BLEND)?; + } + + let mut io = validation::StageIo::default(); + let mut validated_stages = wgt::ShaderStages::empty(); + + let mut vertex_steps = Vec::with_capacity(desc.vertex.buffers.len()); + let mut vertex_buffers = Vec::with_capacity(desc.vertex.buffers.len()); + let mut total_attributes = 0; + for (i, vb_state) in desc.vertex.buffers.iter().enumerate() { + vertex_steps.push(pipeline::VertexStep { + stride: vb_state.array_stride, + mode: vb_state.step_mode, + }); + if vb_state.attributes.is_empty() { + continue; + } + if vb_state.array_stride > self.limits.max_vertex_buffer_array_stride as u64 { + return Err(pipeline::CreateRenderPipelineError::VertexStrideTooLarge { + index: i as u32, + given: vb_state.array_stride as u32, + limit: self.limits.max_vertex_buffer_array_stride, + }); + } + if vb_state.array_stride % wgt::VERTEX_STRIDE_ALIGNMENT != 0 { + return Err(pipeline::CreateRenderPipelineError::UnalignedVertexStride { + index: i as u32, + stride: vb_state.array_stride, + }); + } + vertex_buffers.push(hal::VertexBufferLayout { + array_stride: vb_state.array_stride, + step_mode: vb_state.step_mode, + attributes: vb_state.attributes.as_ref(), + }); + + for attribute in vb_state.attributes.iter() { + if attribute.offset >= 0x10000000 { + return Err( + pipeline::CreateRenderPipelineError::InvalidVertexAttributeOffset { + location: attribute.shader_location, + offset: attribute.offset, + }, + ); + } + + if let wgt::VertexFormat::Float64 + | wgt::VertexFormat::Float64x2 + | wgt::VertexFormat::Float64x3 + | wgt::VertexFormat::Float64x4 = attribute.format + { + self.require_features(wgt::Features::VERTEX_ATTRIBUTE_64BIT)?; + } + + let previous = io.insert( + attribute.shader_location, + validation::InterfaceVar::vertex_attribute(attribute.format), + ); + + if previous.is_some() { + return Err(pipeline::CreateRenderPipelineError::ShaderLocationClash( + attribute.shader_location, + )); + } + } + total_attributes += vb_state.attributes.len(); + } + + if vertex_buffers.len() > self.limits.max_vertex_buffers as usize { + return Err(pipeline::CreateRenderPipelineError::TooManyVertexBuffers { + given: vertex_buffers.len() as u32, + limit: self.limits.max_vertex_buffers, + }); + } + if total_attributes > self.limits.max_vertex_attributes as usize { + return Err( + pipeline::CreateRenderPipelineError::TooManyVertexAttributes { + given: total_attributes as u32, + limit: self.limits.max_vertex_attributes, + }, + ); + } + + if desc.primitive.strip_index_format.is_some() && !desc.primitive.topology.is_strip() { + return Err( + pipeline::CreateRenderPipelineError::StripIndexFormatForNonStripTopology { + strip_index_format: desc.primitive.strip_index_format, + topology: desc.primitive.topology, + }, + ); + } + + if desc.primitive.unclipped_depth { + self.require_features(wgt::Features::DEPTH_CLIP_CONTROL)?; + } + + if desc.primitive.polygon_mode == wgt::PolygonMode::Line { + self.require_features(wgt::Features::POLYGON_MODE_LINE)?; + } + if desc.primitive.polygon_mode == wgt::PolygonMode::Point { + self.require_features(wgt::Features::POLYGON_MODE_POINT)?; + } + + if desc.primitive.conservative { + self.require_features(wgt::Features::CONSERVATIVE_RASTERIZATION)?; + } + + if desc.primitive.conservative && desc.primitive.polygon_mode != wgt::PolygonMode::Fill { + return Err( + pipeline::CreateRenderPipelineError::ConservativeRasterizationNonFillPolygonMode, + ); + } + + for (i, cs) in color_targets.iter().enumerate() { + if let Some(cs) = cs.as_ref() { + let error = loop { + if cs.write_mask.contains_invalid_bits() { + break Some(pipeline::ColorStateError::InvalidWriteMask(cs.write_mask)); + } + + let format_features = self.describe_format_features(adapter, cs.format)?; + if !format_features + .allowed_usages + .contains(wgt::TextureUsages::RENDER_ATTACHMENT) + { + break Some(pipeline::ColorStateError::FormatNotRenderable(cs.format)); + } + let blendable = format_features.flags.contains(Tfff::BLENDABLE); + let filterable = format_features.flags.contains(Tfff::FILTERABLE); + let adapter_specific = self + .features + .contains(wgt::Features::TEXTURE_ADAPTER_SPECIFIC_FORMAT_FEATURES); + // according to WebGPU specifications the texture needs to be + // [`TextureFormatFeatureFlags::FILTERABLE`] if blending is set - use + // [`Features::TEXTURE_ADAPTER_SPECIFIC_FORMAT_FEATURES`] to elude + // this limitation + if cs.blend.is_some() && (!blendable || (!filterable && !adapter_specific)) { + break Some(pipeline::ColorStateError::FormatNotBlendable(cs.format)); + } + if !hal::FormatAspects::from(cs.format).contains(hal::FormatAspects::COLOR) { + break Some(pipeline::ColorStateError::FormatNotColor(cs.format)); + } + if desc.multisample.count > 1 + && !format_features + .flags + .sample_count_supported(desc.multisample.count) + { + break Some(pipeline::ColorStateError::FormatNotMultisampled(cs.format)); + } + + break None; + }; + if let Some(e) = error { + return Err(pipeline::CreateRenderPipelineError::ColorState(i as u8, e)); + } + } + } + + if let Some(ds) = depth_stencil_state { + let error = loop { + let format_features = self.describe_format_features(adapter, ds.format)?; + if !format_features + .allowed_usages + .contains(wgt::TextureUsages::RENDER_ATTACHMENT) + { + break Some(pipeline::DepthStencilStateError::FormatNotRenderable( + ds.format, + )); + } + + let aspect = hal::FormatAspects::from(ds.format); + if ds.is_depth_enabled() && !aspect.contains(hal::FormatAspects::DEPTH) { + break Some(pipeline::DepthStencilStateError::FormatNotDepth(ds.format)); + } + if ds.stencil.is_enabled() && !aspect.contains(hal::FormatAspects::STENCIL) { + break Some(pipeline::DepthStencilStateError::FormatNotStencil( + ds.format, + )); + } + if desc.multisample.count > 1 + && !format_features + .flags + .sample_count_supported(desc.multisample.count) + { + break Some(pipeline::DepthStencilStateError::FormatNotMultisampled( + ds.format, + )); + } + + break None; + }; + if let Some(e) = error { + return Err(pipeline::CreateRenderPipelineError::DepthStencilState(e)); + } + + if ds.bias.clamp != 0.0 { + self.require_downlevel_flags(wgt::DownlevelFlags::DEPTH_BIAS_CLAMP)?; + } + } + + if desc.layout.is_none() { + for _ in 0..self.limits.max_bind_groups { + derived_group_layouts.push(binding_model::BindEntryMap::default()); + } + } + + let samples = { + let sc = desc.multisample.count; + if sc == 0 || sc > 32 || !conv::is_power_of_two_u32(sc) { + return Err(pipeline::CreateRenderPipelineError::InvalidSampleCount(sc)); + } + sc + }; + + let (shader_module_guard, _) = hub.shader_modules.read(&mut token); + + let vertex_stage = { + let stage = &desc.vertex.stage; + let flag = wgt::ShaderStages::VERTEX; + + let shader_module = shader_module_guard.get(stage.module).map_err(|_| { + pipeline::CreateRenderPipelineError::Stage { + stage: flag, + error: validation::StageError::InvalidModule, + } + })?; + + let provided_layouts = match desc.layout { + Some(pipeline_layout_id) => { + let pipeline_layout = pipeline_layout_guard + .get(pipeline_layout_id) + .map_err(|_| pipeline::CreateRenderPipelineError::InvalidLayout)?; + Some(Device::get_introspection_bind_group_layouts( + pipeline_layout, + &*bgl_guard, + )) + } + None => None, + }; + + if let Some(ref interface) = shader_module.interface { + io = interface + .check_stage( + provided_layouts.as_ref().map(|p| p.as_slice()), + &mut derived_group_layouts, + &mut shader_binding_sizes, + &stage.entry_point, + flag, + io, + desc.depth_stencil.as_ref().map(|d| d.depth_compare), + ) + .map_err(|error| pipeline::CreateRenderPipelineError::Stage { + stage: flag, + error, + })?; + validated_stages |= flag; + } + + hal::ProgrammableStage { + module: &shader_module.raw, + entry_point: stage.entry_point.as_ref(), + } + }; + + let fragment_stage = match desc.fragment { + Some(ref fragment) => { + let flag = wgt::ShaderStages::FRAGMENT; + + let shader_module = + shader_module_guard + .get(fragment.stage.module) + .map_err(|_| pipeline::CreateRenderPipelineError::Stage { + stage: flag, + error: validation::StageError::InvalidModule, + })?; + + let provided_layouts = match desc.layout { + Some(pipeline_layout_id) => Some(Device::get_introspection_bind_group_layouts( + pipeline_layout_guard + .get(pipeline_layout_id) + .map_err(|_| pipeline::CreateRenderPipelineError::InvalidLayout)?, + &*bgl_guard, + )), + None => None, + }; + + if validated_stages == wgt::ShaderStages::VERTEX { + if let Some(ref interface) = shader_module.interface { + io = interface + .check_stage( + provided_layouts.as_ref().map(|p| p.as_slice()), + &mut derived_group_layouts, + &mut shader_binding_sizes, + &fragment.stage.entry_point, + flag, + io, + desc.depth_stencil.as_ref().map(|d| d.depth_compare), + ) + .map_err(|error| pipeline::CreateRenderPipelineError::Stage { + stage: flag, + error, + })?; + validated_stages |= flag; + } + } + + Some(hal::ProgrammableStage { + module: &shader_module.raw, + entry_point: fragment.stage.entry_point.as_ref(), + }) + } + None => None, + }; + + if validated_stages.contains(wgt::ShaderStages::FRAGMENT) { + for (i, output) in io.iter() { + match color_targets.get(*i as usize) { + Some(&Some(ref state)) => { + validation::check_texture_format(state.format, &output.ty).map_err( + |pipeline| { + pipeline::CreateRenderPipelineError::ColorState( + *i as u8, + pipeline::ColorStateError::IncompatibleFormat { + pipeline, + shader: output.ty, + }, + ) + }, + )?; + } + _ => { + log::info!( + "The fragment stage {:?} output @location({}) values are ignored", + fragment_stage + .as_ref() + .map_or("", |stage| stage.entry_point), + i + ); + } + } + } + } + let last_stage = match desc.fragment { + Some(_) => wgt::ShaderStages::FRAGMENT, + None => wgt::ShaderStages::VERTEX, + }; + if desc.layout.is_none() && !validated_stages.contains(last_stage) { + return Err(pipeline::ImplicitLayoutError::ReflectionError(last_stage).into()); + } + + let pipeline_layout_id = match desc.layout { + Some(id) => id, + None => self.derive_pipeline_layout( + self_id, + implicit_context, + derived_group_layouts, + &mut *bgl_guard, + &mut *pipeline_layout_guard, + )?, + }; + let layout = pipeline_layout_guard + .get(pipeline_layout_id) + .map_err(|_| pipeline::CreateRenderPipelineError::InvalidLayout)?; + + // Multiview is only supported if the feature is enabled + if desc.multiview.is_some() { + self.require_features(wgt::Features::MULTIVIEW)?; + } + + if !self + .downlevel + .flags + .contains(wgt::DownlevelFlags::BUFFER_BINDINGS_NOT_16_BYTE_ALIGNED) + { + for (binding, size) in shader_binding_sizes.iter() { + if size.get() % 16 != 0 { + return Err(pipeline::CreateRenderPipelineError::UnalignedShader { + binding: binding.binding, + group: binding.group, + size: size.get(), + }); + } + } + } + + let late_sized_buffer_groups = + Device::make_late_sized_buffer_groups(&shader_binding_sizes, layout, &*bgl_guard); + + let pipeline_desc = hal::RenderPipelineDescriptor { + label: desc.label.borrow_option(), + layout: &layout.raw, + vertex_buffers: &vertex_buffers, + vertex_stage, + primitive: desc.primitive, + depth_stencil: desc.depth_stencil.clone(), + multisample: desc.multisample, + fragment_stage, + color_targets, + multiview: desc.multiview, + }; + let raw = + unsafe { self.raw.create_render_pipeline(&pipeline_desc) }.map_err( + |err| match err { + hal::PipelineError::Device(error) => { + pipeline::CreateRenderPipelineError::Device(error.into()) + } + hal::PipelineError::Linkage(stage, msg) => { + pipeline::CreateRenderPipelineError::Internal { stage, error: msg } + } + hal::PipelineError::EntryPoint(stage) => { + pipeline::CreateRenderPipelineError::Internal { + stage: hal::auxil::map_naga_stage(stage), + error: EP_FAILURE.to_string(), + } + } + }, + )?; + + let pass_context = RenderPassContext { + attachments: AttachmentData { + colors: color_targets + .iter() + .map(|state| state.as_ref().map(|s| s.format)) + .collect(), + resolves: ArrayVec::new(), + depth_stencil: depth_stencil_state.as_ref().map(|state| state.format), + }, + sample_count: samples, + multiview: desc.multiview, + }; + + let mut flags = pipeline::PipelineFlags::empty(); + for state in color_targets.iter().filter_map(|s| s.as_ref()) { + if let Some(ref bs) = state.blend { + if bs.color.uses_constant() | bs.alpha.uses_constant() { + flags |= pipeline::PipelineFlags::BLEND_CONSTANT; + } + } + } + if let Some(ds) = depth_stencil_state.as_ref() { + if ds.stencil.is_enabled() && ds.stencil.needs_ref_value() { + flags |= pipeline::PipelineFlags::STENCIL_REFERENCE; + } + if !ds.is_depth_read_only() { + flags |= pipeline::PipelineFlags::WRITES_DEPTH; + } + if !ds.is_stencil_read_only(desc.primitive.cull_mode) { + flags |= pipeline::PipelineFlags::WRITES_STENCIL; + } + } + + let pipeline = pipeline::RenderPipeline { + raw, + layout_id: Stored { + value: id::Valid(pipeline_layout_id), + ref_count: layout.life_guard.add_ref(), + }, + device_id: Stored { + value: id::Valid(self_id), + ref_count: self.life_guard.add_ref(), + }, + pass_context, + flags, + strip_index_format: desc.primitive.strip_index_format, + vertex_steps, + late_sized_buffer_groups, + life_guard: LifeGuard::new(desc.label.borrow_or_default()), + }; + Ok(pipeline) + } + + pub(super) fn describe_format_features( + &self, + adapter: &Adapter, + format: TextureFormat, + ) -> Result { + self.require_features(format.required_features())?; + + let using_device_features = self + .features + .contains(wgt::Features::TEXTURE_ADAPTER_SPECIFIC_FORMAT_FEATURES); + // If we're running downlevel, we need to manually ask the backend what + // we can use as we can't trust WebGPU. + let downlevel = !self.downlevel.is_webgpu_compliant(); + + if using_device_features || downlevel { + Ok(adapter.get_texture_format_features(format)) + } else { + Ok(format.guaranteed_format_features(self.features)) + } + } + + pub(super) fn wait_for_submit( + &self, + submission_index: SubmissionIndex, + token: &mut Token, + ) -> Result<(), WaitIdleError> { + let last_done_index = unsafe { + self.raw + .get_fence_value(&self.fence) + .map_err(DeviceError::from)? + }; + if last_done_index < submission_index { + log::info!("Waiting for submission {:?}", submission_index); + unsafe { + self.raw + .wait(&self.fence, submission_index, !0) + .map_err(DeviceError::from)? + }; + let closures = self + .lock_life(token) + .triage_submissions(submission_index, &self.command_allocator); + assert!( + closures.is_empty(), + "wait_for_submit is not expected to work with closures" + ); + } + Ok(()) + } + + pub(super) fn create_query_set( + &self, + self_id: id::DeviceId, + desc: &resource::QuerySetDescriptor, + ) -> Result, resource::CreateQuerySetError> { + use resource::CreateQuerySetError as Error; + + match desc.ty { + wgt::QueryType::Occlusion => {} + wgt::QueryType::Timestamp => { + self.require_features(wgt::Features::TIMESTAMP_QUERY)?; + } + wgt::QueryType::PipelineStatistics(..) => { + self.require_features(wgt::Features::PIPELINE_STATISTICS_QUERY)?; + } + } + + if desc.count == 0 { + return Err(Error::ZeroCount); + } + + if desc.count > wgt::QUERY_SET_MAX_QUERIES { + return Err(Error::TooManyQueries { + count: desc.count, + maximum: wgt::QUERY_SET_MAX_QUERIES, + }); + } + + let hal_desc = desc.map_label(crate::LabelHelpers::borrow_option); + Ok(resource::QuerySet { + raw: unsafe { self.raw.create_query_set(&hal_desc).unwrap() }, + device_id: Stored { + value: id::Valid(self_id), + ref_count: self.life_guard.add_ref(), + }, + life_guard: LifeGuard::new(""), + desc: desc.map_label(|_| ()), + }) + } +} + +impl Device { + pub(crate) fn destroy_buffer(&self, buffer: Buffer) { + if let Some(raw) = buffer.raw { + unsafe { + self.raw.destroy_buffer(raw); + } + } + } + + pub(crate) fn destroy_command_buffer(&self, cmd_buf: command::CommandBuffer) { + let mut baked = cmd_buf.into_baked(); + unsafe { + baked.encoder.reset_all(baked.list.into_iter()); + } + unsafe { + self.raw.destroy_command_encoder(baked.encoder); + } + } + + /// Wait for idle and remove resources that we can, before we die. + pub(crate) fn prepare_to_die(&mut self) { + self.pending_writes.deactivate(); + let mut life_tracker = self.life_tracker.lock(); + let current_index = self.active_submission_index; + if let Err(error) = unsafe { self.raw.wait(&self.fence, current_index, CLEANUP_WAIT_MS) } { + log::error!("failed to wait for the device: {:?}", error); + } + let _ = life_tracker.triage_submissions(current_index, &self.command_allocator); + life_tracker.cleanup(&self.raw); + #[cfg(feature = "trace")] + { + self.trace = None; + } + } + + pub(crate) fn dispose(self) { + self.pending_writes.dispose(&self.raw); + self.command_allocator.into_inner().dispose(&self.raw); + unsafe { + self.raw.destroy_buffer(self.zero_buffer); + self.raw.destroy_fence(self.fence); + self.raw.exit(self.queue); + } + } +} + +impl crate::resource::Resource for Device { + const TYPE: &'static str = "Device"; + + fn life_guard(&self) -> &LifeGuard { + &self.life_guard + } +} diff --git a/third_party/rust/wgpu-core/src/error.rs b/third_party/rust/wgpu-core/src/error.rs index 596595245b8e..3cdecc5369ef 100644 --- a/third_party/rust/wgpu-core/src/error.rs +++ b/third_party/rust/wgpu-core/src/error.rs @@ -1,10 +1,7 @@ use core::fmt; use std::error::Error; -use crate::{ - gfx_select, - hub::{Global, IdentityManagerFactory}, -}; +use crate::{gfx_select, global::Global, identity::IdentityManagerFactory}; pub struct ErrorFormatter<'a> { writer: &'a mut dyn fmt::Write, @@ -28,31 +25,31 @@ impl<'a> ErrorFormatter<'a> { pub fn bind_group_label(&mut self, id: &crate::id::BindGroupId) { let global = self.global; - let label = gfx_select!(id => global.bind_group_label(*id)); + let label: String = gfx_select!(id => global.bind_group_label(*id)); self.label("bind group", &label); } pub fn bind_group_layout_label(&mut self, id: &crate::id::BindGroupLayoutId) { let global = self.global; - let label = gfx_select!(id => global.bind_group_layout_label(*id)); + let label: String = gfx_select!(id => global.bind_group_layout_label(*id)); self.label("bind group layout", &label); } pub fn render_pipeline_label(&mut self, id: &crate::id::RenderPipelineId) { let global = self.global; - let label = gfx_select!(id => global.render_pipeline_label(*id)); + let label: String = gfx_select!(id => global.render_pipeline_label(*id)); self.label("render pipeline", &label); } pub fn compute_pipeline_label(&mut self, id: &crate::id::ComputePipelineId) { let global = self.global; - let label = gfx_select!(id => global.compute_pipeline_label(*id)); + let label: String = gfx_select!(id => global.compute_pipeline_label(*id)); self.label("compute pipeline", &label); } pub fn buffer_label_with_key(&mut self, id: &crate::id::BufferId, key: &str) { let global = self.global; - let label = gfx_select!(id => global.buffer_label(*id)); + let label: String = gfx_select!(id => global.buffer_label(*id)); self.label(key, &label); } @@ -62,7 +59,7 @@ impl<'a> ErrorFormatter<'a> { pub fn texture_label_with_key(&mut self, id: &crate::id::TextureId, key: &str) { let global = self.global; - let label = gfx_select!(id => global.texture_label(*id)); + let label: String = gfx_select!(id => global.texture_label(*id)); self.label(key, &label); } @@ -72,7 +69,7 @@ impl<'a> ErrorFormatter<'a> { pub fn texture_view_label_with_key(&mut self, id: &crate::id::TextureViewId, key: &str) { let global = self.global; - let label = gfx_select!(id => global.texture_view_label(*id)); + let label: String = gfx_select!(id => global.texture_view_label(*id)); self.label(key, &label); } @@ -82,19 +79,19 @@ impl<'a> ErrorFormatter<'a> { pub fn sampler_label(&mut self, id: &crate::id::SamplerId) { let global = self.global; - let label = gfx_select!(id => global.sampler_label(*id)); + let label: String = gfx_select!(id => global.sampler_label(*id)); self.label("sampler", &label); } pub fn command_buffer_label(&mut self, id: &crate::id::CommandBufferId) { let global = self.global; - let label = gfx_select!(id => global.command_buffer_label(*id)); + let label: String = gfx_select!(id => global.command_buffer_label(*id)); self.label("command buffer", &label); } pub fn query_set_label(&mut self, id: &crate::id::QuerySetId) { let global = self.global; - let label = gfx_select!(id => global.query_set_label(*id)); + let label: String = gfx_select!(id => global.query_set_label(*id)); self.label("query set", &label); } } diff --git a/third_party/rust/wgpu-core/src/global.rs b/third_party/rust/wgpu-core/src/global.rs new file mode 100644 index 000000000000..533126596218 --- /dev/null +++ b/third_party/rust/wgpu-core/src/global.rs @@ -0,0 +1,162 @@ +use crate::{ + hal_api::HalApi, + hub::{HubReport, Hubs}, + id, + identity::GlobalIdentityHandlerFactory, + instance::{Instance, Surface}, + registry::Registry, + storage::{Element, StorageReport}, +}; + +#[derive(Debug)] +pub struct GlobalReport { + pub surfaces: StorageReport, + #[cfg(all(feature = "vulkan", not(target_arch = "wasm32")))] + pub vulkan: Option, + #[cfg(all(feature = "metal", any(target_os = "macos", target_os = "ios")))] + pub metal: Option, + #[cfg(all(feature = "dx12", windows))] + pub dx12: Option, + #[cfg(all(feature = "dx11", windows))] + pub dx11: Option, + #[cfg(feature = "gles")] + pub gl: Option, +} + +pub struct Global { + pub instance: Instance, + pub surfaces: Registry, + pub(crate) hubs: Hubs, +} + +impl Global { + pub fn new(name: &str, factory: G, instance_desc: wgt::InstanceDescriptor) -> Self { + profiling::scope!("Global::new"); + Self { + instance: Instance::new(name, instance_desc), + surfaces: Registry::without_backend(&factory, "Surface"), + hubs: Hubs::new(&factory), + } + } + + /// # Safety + /// + /// Refer to the creation of wgpu-hal Instance for every backend. + pub unsafe fn from_hal_instance( + name: &str, + factory: G, + hal_instance: A::Instance, + ) -> Self { + profiling::scope!("Global::new"); + Self { + instance: A::create_instance_from_hal(name, hal_instance), + surfaces: Registry::without_backend(&factory, "Surface"), + hubs: Hubs::new(&factory), + } + } + + /// # Safety + /// + /// - The raw instance handle returned must not be manually destroyed. + pub unsafe fn instance_as_hal(&self) -> Option<&A::Instance> { + A::instance_as_hal(&self.instance) + } + + /// # Safety + /// + /// - The raw handles obtained from the Instance must not be manually destroyed + pub unsafe fn from_instance(factory: G, instance: Instance) -> Self { + profiling::scope!("Global::new"); + Self { + instance, + surfaces: Registry::without_backend(&factory, "Surface"), + hubs: Hubs::new(&factory), + } + } + + pub fn clear_backend(&self, _dummy: ()) { + let mut surface_guard = self.surfaces.data.write(); + let hub = A::hub(self); + // this is used for tests, which keep the adapter + hub.clear(&mut surface_guard, false); + } + + pub fn generate_report(&self) -> GlobalReport { + GlobalReport { + surfaces: self.surfaces.data.read().generate_report(), + #[cfg(all(feature = "vulkan", not(target_arch = "wasm32")))] + vulkan: if self.instance.vulkan.is_some() { + Some(self.hubs.vulkan.generate_report()) + } else { + None + }, + #[cfg(all(feature = "metal", any(target_os = "macos", target_os = "ios")))] + metal: if self.instance.metal.is_some() { + Some(self.hubs.metal.generate_report()) + } else { + None + }, + #[cfg(all(feature = "dx12", windows))] + dx12: if self.instance.dx12.is_some() { + Some(self.hubs.dx12.generate_report()) + } else { + None + }, + #[cfg(all(feature = "dx11", windows))] + dx11: if self.instance.dx11.is_some() { + Some(self.hubs.dx11.generate_report()) + } else { + None + }, + #[cfg(feature = "gles")] + gl: if self.instance.gl.is_some() { + Some(self.hubs.gl.generate_report()) + } else { + None + }, + } + } +} + +impl Drop for Global { + fn drop(&mut self) { + profiling::scope!("Global::drop"); + log::info!("Dropping Global"); + let mut surface_guard = self.surfaces.data.write(); + + // destroy hubs before the instance gets dropped + #[cfg(all(feature = "vulkan", not(target_arch = "wasm32")))] + { + self.hubs.vulkan.clear(&mut surface_guard, true); + } + #[cfg(all(feature = "metal", any(target_os = "macos", target_os = "ios")))] + { + self.hubs.metal.clear(&mut surface_guard, true); + } + #[cfg(all(feature = "dx12", windows))] + { + self.hubs.dx12.clear(&mut surface_guard, true); + } + #[cfg(all(feature = "dx11", windows))] + { + self.hubs.dx11.clear(&mut surface_guard, true); + } + #[cfg(feature = "gles")] + { + self.hubs.gl.clear(&mut surface_guard, true); + } + + // destroy surfaces + for element in surface_guard.map.drain(..) { + if let Element::Occupied(surface, _) = element { + self.instance.destroy_surface(surface); + } + } + } +} + +#[cfg(test)] +fn _test_send_sync(global: &Global) { + fn test_internal(_: T) {} + test_internal(global) +} diff --git a/third_party/rust/wgpu-core/src/hal_api.rs b/third_party/rust/wgpu-core/src/hal_api.rs new file mode 100644 index 000000000000..00180d8ac519 --- /dev/null +++ b/third_party/rust/wgpu-core/src/hal_api.rs @@ -0,0 +1,157 @@ +use wgt::Backend; + +use crate::{ + global::Global, + hub::Hub, + identity::GlobalIdentityHandlerFactory, + instance::{HalSurface, Instance, Surface}, +}; + +pub trait HalApi: hal::Api { + const VARIANT: Backend; + fn create_instance_from_hal(name: &str, hal_instance: Self::Instance) -> Instance; + fn instance_as_hal(instance: &Instance) -> Option<&Self::Instance>; + fn hub(global: &Global) -> &Hub; + fn get_surface(surface: &Surface) -> Option<&HalSurface>; + fn get_surface_mut(surface: &mut Surface) -> Option<&mut HalSurface>; +} + +impl HalApi for hal::api::Empty { + const VARIANT: Backend = Backend::Empty; + fn create_instance_from_hal(_: &str, _: Self::Instance) -> Instance { + unimplemented!("called empty api") + } + fn instance_as_hal(_: &Instance) -> Option<&Self::Instance> { + unimplemented!("called empty api") + } + fn hub(_: &Global) -> &Hub { + unimplemented!("called empty api") + } + fn get_surface(_: &Surface) -> Option<&HalSurface> { + unimplemented!("called empty api") + } + fn get_surface_mut(_: &mut Surface) -> Option<&mut HalSurface> { + unimplemented!("called empty api") + } +} + +#[cfg(all(feature = "vulkan", not(target_arch = "wasm32")))] +impl HalApi for hal::api::Vulkan { + const VARIANT: Backend = Backend::Vulkan; + fn create_instance_from_hal(name: &str, hal_instance: Self::Instance) -> Instance { + Instance { + name: name.to_owned(), + vulkan: Some(hal_instance), + ..Default::default() + } + } + fn instance_as_hal(instance: &Instance) -> Option<&Self::Instance> { + instance.vulkan.as_ref() + } + fn hub(global: &Global) -> &Hub { + &global.hubs.vulkan + } + fn get_surface(surface: &Surface) -> Option<&HalSurface> { + surface.vulkan.as_ref() + } + fn get_surface_mut(surface: &mut Surface) -> Option<&mut HalSurface> { + surface.vulkan.as_mut() + } +} + +#[cfg(all(feature = "metal", any(target_os = "macos", target_os = "ios")))] +impl HalApi for hal::api::Metal { + const VARIANT: Backend = Backend::Metal; + fn create_instance_from_hal(name: &str, hal_instance: Self::Instance) -> Instance { + Instance { + name: name.to_owned(), + metal: Some(hal_instance), + ..Default::default() + } + } + fn instance_as_hal(instance: &Instance) -> Option<&Self::Instance> { + instance.metal.as_ref() + } + fn hub(global: &Global) -> &Hub { + &global.hubs.metal + } + fn get_surface(surface: &Surface) -> Option<&HalSurface> { + surface.metal.as_ref() + } + fn get_surface_mut(surface: &mut Surface) -> Option<&mut HalSurface> { + surface.metal.as_mut() + } +} + +#[cfg(all(feature = "dx12", windows))] +impl HalApi for hal::api::Dx12 { + const VARIANT: Backend = Backend::Dx12; + fn create_instance_from_hal(name: &str, hal_instance: Self::Instance) -> Instance { + Instance { + name: name.to_owned(), + dx12: Some(hal_instance), + ..Default::default() + } + } + fn instance_as_hal(instance: &Instance) -> Option<&Self::Instance> { + instance.dx12.as_ref() + } + fn hub(global: &Global) -> &Hub { + &global.hubs.dx12 + } + fn get_surface(surface: &Surface) -> Option<&HalSurface> { + surface.dx12.as_ref() + } + fn get_surface_mut(surface: &mut Surface) -> Option<&mut HalSurface> { + surface.dx12.as_mut() + } +} + +#[cfg(all(feature = "dx11", windows))] +impl HalApi for hal::api::Dx11 { + const VARIANT: Backend = Backend::Dx11; + fn create_instance_from_hal(name: &str, hal_instance: Self::Instance) -> Instance { + Instance { + name: name.to_owned(), + dx11: Some(hal_instance), + ..Default::default() + } + } + fn instance_as_hal(instance: &Instance) -> Option<&Self::Instance> { + instance.dx11.as_ref() + } + fn hub(global: &Global) -> &Hub { + &global.hubs.dx11 + } + fn get_surface(surface: &Surface) -> Option<&HalSurface> { + surface.dx11.as_ref() + } + fn get_surface_mut(surface: &mut Surface) -> Option<&mut HalSurface> { + surface.dx11.as_mut() + } +} + +#[cfg(feature = "gles")] +impl HalApi for hal::api::Gles { + const VARIANT: Backend = Backend::Gl; + fn create_instance_from_hal(name: &str, hal_instance: Self::Instance) -> Instance { + #[allow(clippy::needless_update)] + Instance { + name: name.to_owned(), + gl: Some(hal_instance), + ..Default::default() + } + } + fn instance_as_hal(instance: &Instance) -> Option<&Self::Instance> { + instance.gl.as_ref() + } + fn hub(global: &Global) -> &Hub { + &global.hubs.gl + } + fn get_surface(surface: &Surface) -> Option<&HalSurface> { + surface.gl.as_ref() + } + fn get_surface_mut(surface: &mut Surface) -> Option<&mut HalSurface> { + surface.gl.as_mut() + } +} diff --git a/third_party/rust/wgpu-core/src/hub.rs b/third_party/rust/wgpu-core/src/hub.rs index 0a3b5d954e79..d3da0be6c219 100644 --- a/third_party/rust/wgpu-core/src/hub.rs +++ b/third_party/rust/wgpu-core/src/hub.rs @@ -140,335 +140,38 @@ creation fails, the id supplied for that resource is marked to indicate as much, allowing subsequent operations using that id to be properly flagged as errors as well. +[`Backend`]: wgt::Backend +[`Global`]: crate::global::Global +[`Global::new`]: crate::global::Global::new [`gfx_select`]: crate::gfx_select -[`Input`]: IdentityHandler::Input -[`process`]: IdentityHandler::process +[`IdentityHandler`]: crate::identity::IdentityHandler +[`Input`]: crate::identity::IdentityHandler::Input +[`process`]: crate::identity::IdentityHandler::process [`Id`]: crate::id::Id -[wrapped in a mutex]: trait.IdentityHandler.html#impl-IdentityHandler%3CI%3E-for-Mutex%3CIdentityManager%3E +[wrapped in a mutex]: ../identity/trait.IdentityHandler.html#impl-IdentityHandler%3CI%3E-for-Mutex%3CIdentityManager%3E [WebGPU]: https://www.w3.org/TR/webgpu/ - +[`IdentityManager`]: crate::identity::IdentityManager +[`Input`]: crate::identity::Input +[`IdentityHandlerFactory`]: crate::identity::IdentityHandlerFactory */ use crate::{ binding_model::{BindGroup, BindGroupLayout, PipelineLayout}, command::{CommandBuffer, RenderBundle}, device::Device, + hal_api::HalApi, id, + identity::GlobalIdentityHandlerFactory, instance::{Adapter, HalSurface, Instance, Surface}, pipeline::{ComputePipeline, RenderPipeline, ShaderModule}, + registry::Registry, resource::{Buffer, QuerySet, Sampler, StagingBuffer, Texture, TextureClearMode, TextureView}, - Epoch, Index, + storage::{Element, Storage, StorageReport}, }; -use parking_lot::{Mutex, RwLock, RwLockReadGuard, RwLockWriteGuard}; -use wgt::Backend; - #[cfg(debug_assertions)] use std::cell::Cell; -use std::{fmt::Debug, marker::PhantomData, mem, ops}; - -/// A simple structure to allocate [`Id`] identifiers. -/// -/// Calling [`alloc`] returns a fresh, never-before-seen id. Calling [`free`] -/// marks an id as dead; it will never be returned again by `alloc`. -/// -/// Use `IdentityManager::default` to construct new instances. -/// -/// `IdentityManager` returns `Id`s whose index values are suitable for use as -/// indices into a `Storage` that holds those ids' referents: -/// -/// - Every live id has a distinct index value. Each live id's index selects a -/// distinct element in the vector. -/// -/// - `IdentityManager` prefers low index numbers. If you size your vector to -/// accommodate the indices produced here, the vector's length will reflect -/// the highwater mark of actual occupancy. -/// -/// - `IdentityManager` reuses the index values of freed ids before returning -/// ids with new index values. Freed vector entries get reused. -/// -/// See the module-level documentation for an overview of how this -/// fits together. -/// -/// [`Id`]: crate::id::Id -/// [`Backend`]: wgt::Backend; -/// [`alloc`]: IdentityManager::alloc -/// [`free`]: IdentityManager::free -#[derive(Debug, Default)] -pub struct IdentityManager { - /// Available index values. If empty, then `epochs.len()` is the next index - /// to allocate. - free: Vec, - - /// The next or currently-live epoch value associated with each `Id` index. - /// - /// If there is a live id with index `i`, then `epochs[i]` is its epoch; any - /// id with the same index but an older epoch is dead. - /// - /// If index `i` is currently unused, `epochs[i]` is the epoch to use in its - /// next `Id`. - epochs: Vec, -} - -impl IdentityManager { - /// Allocate a fresh, never-before-seen id with the given `backend`. - /// - /// The backend is incorporated into the id, so that ids allocated with - /// different `backend` values are always distinct. - pub fn alloc(&mut self, backend: Backend) -> I { - match self.free.pop() { - Some(index) => I::zip(index, self.epochs[index as usize], backend), - None => { - let epoch = 1; - let id = I::zip(self.epochs.len() as Index, epoch, backend); - self.epochs.push(epoch); - id - } - } - } - - /// Free `id`. It will never be returned from `alloc` again. - pub fn free(&mut self, id: I) { - let (index, epoch, _backend) = id.unzip(); - let pe = &mut self.epochs[index as usize]; - assert_eq!(*pe, epoch); - // If the epoch reaches EOL, the index doesn't go - // into the free list, will never be reused again. - if epoch < id::EPOCH_MASK { - *pe = epoch + 1; - self.free.push(index); - } - } -} - -/// An entry in a `Storage::map` table. -#[derive(Debug)] -enum Element { - /// There are no live ids with this index. - Vacant, - - /// There is one live id with this index, allocated at the given - /// epoch. - Occupied(T, Epoch), - - /// Like `Occupied`, but an error occurred when creating the - /// resource. - /// - /// The given `String` is the resource's descriptor label. - Error(Epoch, String), -} - -#[derive(Clone, Debug, Default)] -pub struct StorageReport { - pub num_occupied: usize, - pub num_vacant: usize, - pub num_error: usize, - pub element_size: usize, -} - -impl StorageReport { - pub fn is_empty(&self) -> bool { - self.num_occupied + self.num_vacant + self.num_error == 0 - } -} - -#[derive(Clone, Debug)] -pub(crate) struct InvalidId; - -/// A table of `T` values indexed by the id type `I`. -/// -/// The table is represented as a vector indexed by the ids' index -/// values, so you should use an id allocator like `IdentityManager` -/// that keeps the index values dense and close to zero. -#[derive(Debug)] -pub struct Storage { - map: Vec>, - kind: &'static str, - _phantom: PhantomData, -} - -impl ops::Index> for Storage { - type Output = T; - fn index(&self, id: id::Valid) -> &T { - self.get(id.0).unwrap() - } -} - -impl ops::IndexMut> for Storage { - fn index_mut(&mut self, id: id::Valid) -> &mut T { - self.get_mut(id.0).unwrap() - } -} - -impl Storage { - pub(crate) fn contains(&self, id: I) -> bool { - let (index, epoch, _) = id.unzip(); - match self.map.get(index as usize) { - Some(&Element::Vacant) => false, - Some(&Element::Occupied(_, storage_epoch) | &Element::Error(storage_epoch, _)) => { - storage_epoch == epoch - } - None => false, - } - } - - /// Attempts to get a reference to an item behind a potentially invalid ID. - /// - /// Returns [`None`] if there is an epoch mismatch, or the entry is empty. - /// - /// This function is primarily intended for the `as_hal` family of functions - /// where you may need to fallibly get a object backed by an id that could - /// be in a different hub. - pub(crate) fn try_get(&self, id: I) -> Result, InvalidId> { - let (index, epoch, _) = id.unzip(); - let (result, storage_epoch) = match self.map.get(index as usize) { - Some(&Element::Occupied(ref v, epoch)) => (Ok(Some(v)), epoch), - Some(&Element::Vacant) => return Ok(None), - Some(&Element::Error(epoch, ..)) => (Err(InvalidId), epoch), - None => return Err(InvalidId), - }; - assert_eq!( - epoch, storage_epoch, - "{}[{}] is no longer alive", - self.kind, index - ); - result - } - - /// Get a reference to an item behind a potentially invalid ID. - /// Panics if there is an epoch mismatch, or the entry is empty. - pub(crate) fn get(&self, id: I) -> Result<&T, InvalidId> { - let (index, epoch, _) = id.unzip(); - let (result, storage_epoch) = match self.map.get(index as usize) { - Some(&Element::Occupied(ref v, epoch)) => (Ok(v), epoch), - Some(&Element::Vacant) => panic!("{}[{}] does not exist", self.kind, index), - Some(&Element::Error(epoch, ..)) => (Err(InvalidId), epoch), - None => return Err(InvalidId), - }; - assert_eq!( - epoch, storage_epoch, - "{}[{}] is no longer alive", - self.kind, index - ); - result - } - - /// Get a mutable reference to an item behind a potentially invalid ID. - /// Panics if there is an epoch mismatch, or the entry is empty. - pub(crate) fn get_mut(&mut self, id: I) -> Result<&mut T, InvalidId> { - let (index, epoch, _) = id.unzip(); - let (result, storage_epoch) = match self.map.get_mut(index as usize) { - Some(&mut Element::Occupied(ref mut v, epoch)) => (Ok(v), epoch), - Some(&mut Element::Vacant) | None => panic!("{}[{}] does not exist", self.kind, index), - Some(&mut Element::Error(epoch, ..)) => (Err(InvalidId), epoch), - }; - assert_eq!( - epoch, storage_epoch, - "{}[{}] is no longer alive", - self.kind, index - ); - result - } - - pub(crate) unsafe fn get_unchecked(&self, id: u32) -> &T { - match self.map[id as usize] { - Element::Occupied(ref v, _) => v, - Element::Vacant => panic!("{}[{}] does not exist", self.kind, id), - Element::Error(_, _) => panic!(""), - } - } - - pub(crate) fn label_for_invalid_id(&self, id: I) -> &str { - let (index, _, _) = id.unzip(); - match self.map.get(index as usize) { - Some(&Element::Error(_, ref label)) => label, - _ => "", - } - } - - fn insert_impl(&mut self, index: usize, element: Element) { - if index >= self.map.len() { - self.map.resize_with(index + 1, || Element::Vacant); - } - match std::mem::replace(&mut self.map[index], element) { - Element::Vacant => {} - _ => panic!("Index {index:?} is already occupied"), - } - } - - pub(crate) fn insert(&mut self, id: I, value: T) { - let (index, epoch, _) = id.unzip(); - self.insert_impl(index as usize, Element::Occupied(value, epoch)) - } - - pub(crate) fn insert_error(&mut self, id: I, label: &str) { - let (index, epoch, _) = id.unzip(); - self.insert_impl(index as usize, Element::Error(epoch, label.to_string())) - } - - pub(crate) fn force_replace(&mut self, id: I, value: T) { - let (index, epoch, _) = id.unzip(); - self.map[index as usize] = Element::Occupied(value, epoch); - } - - pub(crate) fn remove(&mut self, id: I) -> Option { - let (index, epoch, _) = id.unzip(); - match std::mem::replace(&mut self.map[index as usize], Element::Vacant) { - Element::Occupied(value, storage_epoch) => { - assert_eq!(epoch, storage_epoch); - Some(value) - } - Element::Error(..) => None, - Element::Vacant => panic!("Cannot remove a vacant resource"), - } - } - - // Prevents panic on out of range access, allows Vacant elements. - pub(crate) fn _try_remove(&mut self, id: I) -> Option { - let (index, epoch, _) = id.unzip(); - if index as usize >= self.map.len() { - None - } else if let Element::Occupied(value, storage_epoch) = - std::mem::replace(&mut self.map[index as usize], Element::Vacant) - { - assert_eq!(epoch, storage_epoch); - Some(value) - } else { - None - } - } - - pub(crate) fn iter(&self, backend: Backend) -> impl Iterator { - self.map - .iter() - .enumerate() - .filter_map(move |(index, x)| match *x { - Element::Occupied(ref value, storage_epoch) => { - Some((I::zip(index as Index, storage_epoch, backend), value)) - } - _ => None, - }) - } - - pub(crate) fn len(&self) -> usize { - self.map.len() - } - - fn generate_report(&self) -> StorageReport { - let mut report = StorageReport { - element_size: mem::size_of::(), - ..Default::default() - }; - for element in self.map.iter() { - match *element { - Element::Occupied(..) => report.num_occupied += 1, - Element::Vacant => report.num_vacant += 1, - Element::Error(..) => report.num_error += 1, - } - } - report - } -} +use std::{fmt::Debug, marker::PhantomData}; /// Type system for enforcing the lock order on [`Hub`] fields. /// @@ -615,7 +318,7 @@ impl<'a, T> Token<'a, T> { /// Return a new token for a locked field. /// /// This should only be used by `Registry` locking methods. - fn new() -> Self { + pub(crate) fn new() -> Self { #[cfg(debug_assertions)] ACTIVE_TOKEN.with(|active| { let old = active.get(); @@ -651,289 +354,6 @@ impl<'a, T> Drop for Token<'a, T> { } } -/// A type that can build true ids from proto-ids, and free true ids. -/// -/// For some implementations, the true id is based on the proto-id. -/// The caller is responsible for providing well-allocated proto-ids. -/// -/// For other implementations, the proto-id carries no information -/// (it's `()`, say), and this `IdentityHandler` type takes care of -/// allocating a fresh true id. -/// -/// See the module-level documentation for details. -pub trait IdentityHandler: Debug { - /// The type of proto-id consumed by this filter, to produce a true id. - type Input: Clone + Debug; - - /// Given a proto-id value `id`, return a true id for `backend`. - fn process(&self, id: Self::Input, backend: Backend) -> I; - - /// Free the true id `id`. - fn free(&self, id: I); -} - -impl IdentityHandler for Mutex { - type Input = (); - fn process(&self, _id: Self::Input, backend: Backend) -> I { - self.lock().alloc(backend) - } - fn free(&self, id: I) { - self.lock().free(id) - } -} - -/// A type that can produce [`IdentityHandler`] filters for ids of type `I`. -/// -/// See the module-level documentation for details. -pub trait IdentityHandlerFactory { - /// The type of filter this factory constructs. - /// - /// "Filter" and "handler" seem to both mean the same thing here: - /// something that can produce true ids from proto-ids. - type Filter: IdentityHandler; - - /// Create an [`IdentityHandler`] implementation that can - /// transform proto-ids into ids of type `I`. - /// - /// [`IdentityHandler`]: IdentityHandler - fn spawn(&self) -> Self::Filter; -} - -/// A global identity handler factory based on [`IdentityManager`]. -/// -/// Each of this type's `IdentityHandlerFactory::spawn` methods -/// returns a `Mutex>`, which allocates fresh `I` -/// ids itself, and takes `()` as its proto-id type. -#[derive(Debug)] -pub struct IdentityManagerFactory; - -impl IdentityHandlerFactory for IdentityManagerFactory { - type Filter = Mutex; - fn spawn(&self) -> Self::Filter { - Mutex::new(IdentityManager::default()) - } -} - -/// A factory that can build [`IdentityHandler`]s for all resource -/// types. -pub trait GlobalIdentityHandlerFactory: - IdentityHandlerFactory - + IdentityHandlerFactory - + IdentityHandlerFactory - + IdentityHandlerFactory - + IdentityHandlerFactory - + IdentityHandlerFactory - + IdentityHandlerFactory - + IdentityHandlerFactory - + IdentityHandlerFactory - + IdentityHandlerFactory - + IdentityHandlerFactory - + IdentityHandlerFactory - + IdentityHandlerFactory - + IdentityHandlerFactory - + IdentityHandlerFactory - + IdentityHandlerFactory - + IdentityHandlerFactory -{ -} - -impl GlobalIdentityHandlerFactory for IdentityManagerFactory {} - -pub type Input = <>::Filter as IdentityHandler>::Input; - -pub trait Resource { - const TYPE: &'static str; - fn life_guard(&self) -> &crate::LifeGuard; - fn label(&self) -> &str { - #[cfg(debug_assertions)] - return &self.life_guard().label; - #[cfg(not(debug_assertions))] - return ""; - } -} - -#[derive(Debug)] -pub struct Registry> { - identity: F::Filter, - data: RwLock>, - backend: Backend, -} - -impl> Registry { - fn new(backend: Backend, factory: &F) -> Self { - Self { - identity: factory.spawn(), - data: RwLock::new(Storage { - map: Vec::new(), - kind: T::TYPE, - _phantom: PhantomData, - }), - backend, - } - } - - fn without_backend(factory: &F, kind: &'static str) -> Self { - Self { - identity: factory.spawn(), - data: RwLock::new(Storage { - map: Vec::new(), - kind, - _phantom: PhantomData, - }), - backend: Backend::Empty, - } - } -} - -#[must_use] -pub(crate) struct FutureId<'a, I: id::TypedId, T> { - id: I, - data: &'a RwLock>, -} - -impl FutureId<'_, I, T> { - #[cfg(feature = "trace")] - pub fn id(&self) -> I { - self.id - } - - pub fn into_id(self) -> I { - self.id - } - - pub fn assign<'a, A: Access>(self, value: T, _: &'a mut Token) -> id::Valid { - self.data.write().insert(self.id, value); - id::Valid(self.id) - } - - pub fn assign_error<'a, A: Access>(self, label: &str, _: &'a mut Token) -> I { - self.data.write().insert_error(self.id, label); - self.id - } -} - -impl> Registry { - pub(crate) fn prepare( - &self, - id_in: >::Input, - ) -> FutureId { - FutureId { - id: self.identity.process(id_in, self.backend), - data: &self.data, - } - } - - /// Acquire read access to this `Registry`'s contents. - /// - /// The caller must present a mutable reference to a `Token`, - /// for some type `A` that comes before this `Registry`'s resource - /// type `T` in the lock ordering. A `Token` grants - /// permission to lock any field; see [`Token::root`]. - /// - /// Once the read lock is acquired, return a new `Token`, along - /// with a read guard for this `Registry`'s [`Storage`], which can - /// be indexed by id to get at the actual resources. - /// - /// The borrow checker ensures that the caller cannot again access - /// its `Token` until it has dropped both the guard and the - /// `Token`. - /// - /// See the [`Hub`] type for more details on locking. - pub(crate) fn read<'a, A: Access>( - &'a self, - _token: &'a mut Token, - ) -> (RwLockReadGuard<'a, Storage>, Token<'a, T>) { - (self.data.read(), Token::new()) - } - - /// Acquire write access to this `Registry`'s contents. - /// - /// The caller must present a mutable reference to a `Token`, - /// for some type `A` that comes before this `Registry`'s resource - /// type `T` in the lock ordering. A `Token` grants - /// permission to lock any field; see [`Token::root`]. - /// - /// Once the lock is acquired, return a new `Token`, along with - /// a write guard for this `Registry`'s [`Storage`], which can be - /// indexed by id to get at the actual resources. - /// - /// The borrow checker ensures that the caller cannot again access - /// its `Token` until it has dropped both the guard and the - /// `Token`. - /// - /// See the [`Hub`] type for more details on locking. - pub(crate) fn write<'a, A: Access>( - &'a self, - _token: &'a mut Token, - ) -> (RwLockWriteGuard<'a, Storage>, Token<'a, T>) { - (self.data.write(), Token::new()) - } - - /// Unregister the resource at `id`. - /// - /// The caller must prove that it already holds a write lock for - /// this `Registry` by passing a mutable reference to this - /// `Registry`'s storage, obtained from the write guard returned - /// by a previous call to [`write`], as the `guard` parameter. - pub fn unregister_locked(&self, id: I, guard: &mut Storage) -> Option { - let value = guard.remove(id); - //Note: careful about the order here! - self.identity.free(id); - //Returning None is legal if it's an error ID - value - } - - /// Unregister the resource at `id` and return its value, if any. - /// - /// The caller must present a mutable reference to a `Token`, - /// for some type `A` that comes before this `Registry`'s resource - /// type `T` in the lock ordering. - /// - /// This returns a `Token`, but it's almost useless, because it - /// doesn't return a lock guard to go with it: its only effect is - /// to make the token you passed to this function inaccessible. - /// However, the `Token` can be used to satisfy some functions' - /// bureacratic expectations that you will have one available. - /// - /// The borrow checker ensures that the caller cannot again access - /// its `Token` until it has dropped both the guard and the - /// `Token`. - /// - /// See the [`Hub`] type for more details on locking. - pub(crate) fn unregister<'a, A: Access>( - &self, - id: I, - _token: &'a mut Token, - ) -> (Option, Token<'a, T>) { - let value = self.data.write().remove(id); - //Note: careful about the order here! - self.identity.free(id); - //Returning None is legal if it's an error ID - (value, Token::new()) - } - - pub fn label_for_resource(&self, id: I) -> String { - let guard = self.data.read(); - - let type_name = guard.kind; - match guard.get(id) { - Ok(res) => { - let label = res.label(); - if label.is_empty() { - format!("<{}-{:?}>", type_name, id.unzip()) - } else { - label.to_string() - } - } - Err(_) => format!( - "", - type_name, - guard.label_for_invalid_id(id) - ), - } - } -} - #[derive(Debug)] pub struct HubReport { pub adapters: StorageReport, @@ -1016,6 +436,7 @@ impl HubReport { /// threads cannot send their `Token`s to other threads (enforced by /// making `Token` neither `Send` nor `Sync`). /// +/// [`Global`]: crate::global::Global /// [`A::hub(global)`]: HalApi::hub /// [`RwLock`]: parking_lot::RwLock /// [`buffers`]: Hub::buffers @@ -1068,7 +489,11 @@ impl Hub { //TODO: instead of having a hacky `with_adapters` parameter, // we should have `clear_device(device_id)` that specifically destroys // everything related to a logical device. - fn clear(&self, surface_guard: &mut Storage, with_adapters: bool) { + pub(crate) fn clear( + &self, + surface_guard: &mut Storage, + with_adapters: bool, + ) { use crate::resource::TextureInner; use hal::{Device as _, Surface as _}; @@ -1258,19 +683,27 @@ impl Hub { pub struct Hubs { #[cfg(all(feature = "vulkan", not(target_arch = "wasm32")))] - vulkan: Hub, + pub(crate) vulkan: Hub, #[cfg(all(feature = "metal", any(target_os = "macos", target_os = "ios")))] - metal: Hub, + pub(crate) metal: Hub, #[cfg(all(feature = "dx12", windows))] - dx12: Hub, + pub(crate) dx12: Hub, #[cfg(all(feature = "dx11", windows))] - dx11: Hub, + pub(crate) dx11: Hub, #[cfg(feature = "gles")] - gl: Hub, + pub(crate) gl: Hub, + #[cfg(all( + not(all(feature = "vulkan", not(target_arch = "wasm32"))), + not(all(feature = "metal", any(target_os = "macos", target_os = "ios"))), + not(all(feature = "dx12", windows)), + not(all(feature = "dx11", windows)), + not(feature = "gles"), + ))] + pub(crate) empty: Hub, } impl Hubs { - fn new(factory: &F) -> Self { + pub(crate) fn new(factory: &F) -> Self { Self { #[cfg(all(feature = "vulkan", not(target_arch = "wasm32")))] vulkan: Hub::new(factory), @@ -1282,322 +715,14 @@ impl Hubs { dx11: Hub::new(factory), #[cfg(feature = "gles")] gl: Hub::new(factory), + #[cfg(all( + not(all(feature = "vulkan", not(target_arch = "wasm32"))), + not(all(feature = "metal", any(target_os = "macos", target_os = "ios"))), + not(all(feature = "dx12", windows)), + not(all(feature = "dx11", windows)), + not(feature = "gles"), + ))] + empty: Hub::new(factory), } } } - -#[derive(Debug)] -pub struct GlobalReport { - pub surfaces: StorageReport, - #[cfg(all(feature = "vulkan", not(target_arch = "wasm32")))] - pub vulkan: Option, - #[cfg(all(feature = "metal", any(target_os = "macos", target_os = "ios")))] - pub metal: Option, - #[cfg(all(feature = "dx12", windows))] - pub dx12: Option, - #[cfg(all(feature = "dx11", windows))] - pub dx11: Option, - #[cfg(feature = "gles")] - pub gl: Option, -} - -pub struct Global { - pub instance: Instance, - pub surfaces: Registry, - hubs: Hubs, -} - -impl Global { - pub fn new(name: &str, factory: G, instance_desc: wgt::InstanceDescriptor) -> Self { - profiling::scope!("Global::new"); - Self { - instance: Instance::new(name, instance_desc), - surfaces: Registry::without_backend(&factory, "Surface"), - hubs: Hubs::new(&factory), - } - } - - /// # Safety - /// - /// Refer to the creation of wgpu-hal Instance for every backend. - pub unsafe fn from_hal_instance( - name: &str, - factory: G, - hal_instance: A::Instance, - ) -> Self { - profiling::scope!("Global::new"); - Self { - instance: A::create_instance_from_hal(name, hal_instance), - surfaces: Registry::without_backend(&factory, "Surface"), - hubs: Hubs::new(&factory), - } - } - - /// # Safety - /// - /// - The raw instance handle returned must not be manually destroyed. - pub unsafe fn instance_as_hal(&self) -> Option<&A::Instance> { - A::instance_as_hal(&self.instance) - } - - /// # Safety - /// - /// - The raw handles obtained from the Instance must not be manually destroyed - pub unsafe fn from_instance(factory: G, instance: Instance) -> Self { - profiling::scope!("Global::new"); - Self { - instance, - surfaces: Registry::without_backend(&factory, "Surface"), - hubs: Hubs::new(&factory), - } - } - - pub fn clear_backend(&self, _dummy: ()) { - let mut surface_guard = self.surfaces.data.write(); - let hub = A::hub(self); - // this is used for tests, which keep the adapter - hub.clear(&mut surface_guard, false); - } - - pub fn generate_report(&self) -> GlobalReport { - GlobalReport { - surfaces: self.surfaces.data.read().generate_report(), - #[cfg(all(feature = "vulkan", not(target_arch = "wasm32")))] - vulkan: if self.instance.vulkan.is_some() { - Some(self.hubs.vulkan.generate_report()) - } else { - None - }, - #[cfg(all(feature = "metal", any(target_os = "macos", target_os = "ios")))] - metal: if self.instance.metal.is_some() { - Some(self.hubs.metal.generate_report()) - } else { - None - }, - #[cfg(all(feature = "dx12", windows))] - dx12: if self.instance.dx12.is_some() { - Some(self.hubs.dx12.generate_report()) - } else { - None - }, - #[cfg(all(feature = "dx11", windows))] - dx11: if self.instance.dx11.is_some() { - Some(self.hubs.dx11.generate_report()) - } else { - None - }, - #[cfg(feature = "gles")] - gl: if self.instance.gl.is_some() { - Some(self.hubs.gl.generate_report()) - } else { - None - }, - } - } -} - -impl Drop for Global { - fn drop(&mut self) { - profiling::scope!("Global::drop"); - log::info!("Dropping Global"); - let mut surface_guard = self.surfaces.data.write(); - - // destroy hubs before the instance gets dropped - #[cfg(all(feature = "vulkan", not(target_arch = "wasm32")))] - { - self.hubs.vulkan.clear(&mut surface_guard, true); - } - #[cfg(all(feature = "metal", any(target_os = "macos", target_os = "ios")))] - { - self.hubs.metal.clear(&mut surface_guard, true); - } - #[cfg(all(feature = "dx12", windows))] - { - self.hubs.dx12.clear(&mut surface_guard, true); - } - #[cfg(all(feature = "dx11", windows))] - { - self.hubs.dx11.clear(&mut surface_guard, true); - } - #[cfg(feature = "gles")] - { - self.hubs.gl.clear(&mut surface_guard, true); - } - - // destroy surfaces - for element in surface_guard.map.drain(..) { - if let Element::Occupied(surface, _) = element { - self.instance.destroy_surface(surface); - } - } - } -} - -pub trait HalApi: hal::Api { - const VARIANT: Backend; - fn create_instance_from_hal(name: &str, hal_instance: Self::Instance) -> Instance; - fn instance_as_hal(instance: &Instance) -> Option<&Self::Instance>; - fn hub(global: &Global) -> &Hub; - fn get_surface(surface: &Surface) -> Option<&HalSurface>; - fn get_surface_mut(surface: &mut Surface) -> Option<&mut HalSurface>; -} - -impl HalApi for hal::api::Empty { - const VARIANT: Backend = Backend::Empty; - fn create_instance_from_hal(_: &str, _: Self::Instance) -> Instance { - unimplemented!("called empty api") - } - fn instance_as_hal(_: &Instance) -> Option<&Self::Instance> { - unimplemented!("called empty api") - } - fn hub(_: &Global) -> &Hub { - unimplemented!("called empty api") - } - fn get_surface(_: &Surface) -> Option<&HalSurface> { - unimplemented!("called empty api") - } - fn get_surface_mut(_: &mut Surface) -> Option<&mut HalSurface> { - unimplemented!("called empty api") - } -} - -#[cfg(all(feature = "vulkan", not(target_arch = "wasm32")))] -impl HalApi for hal::api::Vulkan { - const VARIANT: Backend = Backend::Vulkan; - fn create_instance_from_hal(name: &str, hal_instance: Self::Instance) -> Instance { - Instance { - name: name.to_owned(), - vulkan: Some(hal_instance), - ..Default::default() - } - } - fn instance_as_hal(instance: &Instance) -> Option<&Self::Instance> { - instance.vulkan.as_ref() - } - fn hub(global: &Global) -> &Hub { - &global.hubs.vulkan - } - fn get_surface(surface: &Surface) -> Option<&HalSurface> { - surface.vulkan.as_ref() - } - fn get_surface_mut(surface: &mut Surface) -> Option<&mut HalSurface> { - surface.vulkan.as_mut() - } -} - -#[cfg(all(feature = "metal", any(target_os = "macos", target_os = "ios")))] -impl HalApi for hal::api::Metal { - const VARIANT: Backend = Backend::Metal; - fn create_instance_from_hal(name: &str, hal_instance: Self::Instance) -> Instance { - Instance { - name: name.to_owned(), - metal: Some(hal_instance), - ..Default::default() - } - } - fn instance_as_hal(instance: &Instance) -> Option<&Self::Instance> { - instance.metal.as_ref() - } - fn hub(global: &Global) -> &Hub { - &global.hubs.metal - } - fn get_surface(surface: &Surface) -> Option<&HalSurface> { - surface.metal.as_ref() - } - fn get_surface_mut(surface: &mut Surface) -> Option<&mut HalSurface> { - surface.metal.as_mut() - } -} - -#[cfg(all(feature = "dx12", windows))] -impl HalApi for hal::api::Dx12 { - const VARIANT: Backend = Backend::Dx12; - fn create_instance_from_hal(name: &str, hal_instance: Self::Instance) -> Instance { - Instance { - name: name.to_owned(), - dx12: Some(hal_instance), - ..Default::default() - } - } - fn instance_as_hal(instance: &Instance) -> Option<&Self::Instance> { - instance.dx12.as_ref() - } - fn hub(global: &Global) -> &Hub { - &global.hubs.dx12 - } - fn get_surface(surface: &Surface) -> Option<&HalSurface> { - surface.dx12.as_ref() - } - fn get_surface_mut(surface: &mut Surface) -> Option<&mut HalSurface> { - surface.dx12.as_mut() - } -} - -#[cfg(all(feature = "dx11", windows))] -impl HalApi for hal::api::Dx11 { - const VARIANT: Backend = Backend::Dx11; - fn create_instance_from_hal(name: &str, hal_instance: Self::Instance) -> Instance { - Instance { - name: name.to_owned(), - dx11: Some(hal_instance), - ..Default::default() - } - } - fn instance_as_hal(instance: &Instance) -> Option<&Self::Instance> { - instance.dx11.as_ref() - } - fn hub(global: &Global) -> &Hub { - &global.hubs.dx11 - } - fn get_surface(surface: &Surface) -> Option<&HalSurface> { - surface.dx11.as_ref() - } - fn get_surface_mut(surface: &mut Surface) -> Option<&mut HalSurface> { - surface.dx11.as_mut() - } -} - -#[cfg(feature = "gles")] -impl HalApi for hal::api::Gles { - const VARIANT: Backend = Backend::Gl; - fn create_instance_from_hal(name: &str, hal_instance: Self::Instance) -> Instance { - #[allow(clippy::needless_update)] - Instance { - name: name.to_owned(), - gl: Some(hal_instance), - ..Default::default() - } - } - fn instance_as_hal(instance: &Instance) -> Option<&Self::Instance> { - instance.gl.as_ref() - } - fn hub(global: &Global) -> &Hub { - &global.hubs.gl - } - fn get_surface(surface: &Surface) -> Option<&HalSurface> { - surface.gl.as_ref() - } - fn get_surface_mut(surface: &mut Surface) -> Option<&mut HalSurface> { - surface.gl.as_mut() - } -} - -#[cfg(test)] -fn _test_send_sync(global: &Global) { - fn test_internal(_: T) {} - test_internal(global) -} - -#[test] -fn test_epoch_end_of_life() { - use id::TypedId as _; - let mut man = IdentityManager::default(); - man.epochs.push(id::EPOCH_MASK); - man.free.push(0); - let id1 = man.alloc::(Backend::Empty); - assert_eq!(id1.unzip().0, 0); - man.free(id1); - let id2 = man.alloc::(Backend::Empty); - // confirm that the index 0 is no longer re-used - assert_eq!(id2.unzip().0, 1); -} diff --git a/third_party/rust/wgpu-core/src/id.rs b/third_party/rust/wgpu-core/src/id.rs index 9c654e3ca9b2..6aa2aa287267 100644 --- a/third_party/rust/wgpu-core/src/id.rs +++ b/third_party/rust/wgpu-core/src/id.rs @@ -42,10 +42,10 @@ type Dummy = hal::api::Empty; /// `X` type with the resource type `X`, for some specific backend /// `A`. /// -/// [`Global`]: crate::hub::Global +/// [`Global`]: crate::global::Global /// [`Hub`]: crate::hub::Hub /// [`Hub`]: crate::hub::Hub -/// [`Storage`]: crate::hub::Storage +/// [`Storage`]: crate::storage::Storage /// [`Texture`]: crate::resource::Texture /// [`Index`]: std::ops::Index /// [`IndexMut`]: std::ops::IndexMut diff --git a/third_party/rust/wgpu-core/src/identity.rs b/third_party/rust/wgpu-core/src/identity.rs new file mode 100644 index 000000000000..5f7cc0dc5a99 --- /dev/null +++ b/third_party/rust/wgpu-core/src/identity.rs @@ -0,0 +1,183 @@ +use parking_lot::Mutex; +use wgt::Backend; + +use crate::{id, Epoch, Index}; +use std::fmt::Debug; + +/// A simple structure to allocate [`Id`] identifiers. +/// +/// Calling [`alloc`] returns a fresh, never-before-seen id. Calling [`free`] +/// marks an id as dead; it will never be returned again by `alloc`. +/// +/// Use `IdentityManager::default` to construct new instances. +/// +/// `IdentityManager` returns `Id`s whose index values are suitable for use as +/// indices into a `Storage` that holds those ids' referents: +/// +/// - Every live id has a distinct index value. Each live id's index selects a +/// distinct element in the vector. +/// +/// - `IdentityManager` prefers low index numbers. If you size your vector to +/// accommodate the indices produced here, the vector's length will reflect +/// the highwater mark of actual occupancy. +/// +/// - `IdentityManager` reuses the index values of freed ids before returning +/// ids with new index values. Freed vector entries get reused. +/// +/// See the module-level documentation for an overview of how this +/// fits together. +/// +/// [`Id`]: crate::id::Id +/// [`Backend`]: wgt::Backend; +/// [`alloc`]: IdentityManager::alloc +/// [`free`]: IdentityManager::free +#[derive(Debug, Default)] +pub struct IdentityManager { + /// Available index values. If empty, then `epochs.len()` is the next index + /// to allocate. + free: Vec, + + /// The next or currently-live epoch value associated with each `Id` index. + /// + /// If there is a live id with index `i`, then `epochs[i]` is its epoch; any + /// id with the same index but an older epoch is dead. + /// + /// If index `i` is currently unused, `epochs[i]` is the epoch to use in its + /// next `Id`. + epochs: Vec, +} + +impl IdentityManager { + /// Allocate a fresh, never-before-seen id with the given `backend`. + /// + /// The backend is incorporated into the id, so that ids allocated with + /// different `backend` values are always distinct. + pub fn alloc(&mut self, backend: Backend) -> I { + match self.free.pop() { + Some(index) => I::zip(index, self.epochs[index as usize], backend), + None => { + let epoch = 1; + let id = I::zip(self.epochs.len() as Index, epoch, backend); + self.epochs.push(epoch); + id + } + } + } + + /// Free `id`. It will never be returned from `alloc` again. + pub fn free(&mut self, id: I) { + let (index, epoch, _backend) = id.unzip(); + let pe = &mut self.epochs[index as usize]; + assert_eq!(*pe, epoch); + // If the epoch reaches EOL, the index doesn't go + // into the free list, will never be reused again. + if epoch < id::EPOCH_MASK { + *pe = epoch + 1; + self.free.push(index); + } + } +} + +/// A type that can build true ids from proto-ids, and free true ids. +/// +/// For some implementations, the true id is based on the proto-id. +/// The caller is responsible for providing well-allocated proto-ids. +/// +/// For other implementations, the proto-id carries no information +/// (it's `()`, say), and this `IdentityHandler` type takes care of +/// allocating a fresh true id. +/// +/// See the module-level documentation for details. +pub trait IdentityHandler: Debug { + /// The type of proto-id consumed by this filter, to produce a true id. + type Input: Clone + Debug; + + /// Given a proto-id value `id`, return a true id for `backend`. + fn process(&self, id: Self::Input, backend: Backend) -> I; + + /// Free the true id `id`. + fn free(&self, id: I); +} + +impl IdentityHandler for Mutex { + type Input = (); + fn process(&self, _id: Self::Input, backend: Backend) -> I { + self.lock().alloc(backend) + } + fn free(&self, id: I) { + self.lock().free(id) + } +} + +/// A type that can produce [`IdentityHandler`] filters for ids of type `I`. +/// +/// See the module-level documentation for details. +pub trait IdentityHandlerFactory { + /// The type of filter this factory constructs. + /// + /// "Filter" and "handler" seem to both mean the same thing here: + /// something that can produce true ids from proto-ids. + type Filter: IdentityHandler; + + /// Create an [`IdentityHandler`] implementation that can + /// transform proto-ids into ids of type `I`. + /// + /// [`IdentityHandler`]: IdentityHandler + fn spawn(&self) -> Self::Filter; +} + +/// A global identity handler factory based on [`IdentityManager`]. +/// +/// Each of this type's `IdentityHandlerFactory::spawn` methods +/// returns a `Mutex>`, which allocates fresh `I` +/// ids itself, and takes `()` as its proto-id type. +#[derive(Debug)] +pub struct IdentityManagerFactory; + +impl IdentityHandlerFactory for IdentityManagerFactory { + type Filter = Mutex; + fn spawn(&self) -> Self::Filter { + Mutex::new(IdentityManager::default()) + } +} + +/// A factory that can build [`IdentityHandler`]s for all resource +/// types. +pub trait GlobalIdentityHandlerFactory: + IdentityHandlerFactory + + IdentityHandlerFactory + + IdentityHandlerFactory + + IdentityHandlerFactory + + IdentityHandlerFactory + + IdentityHandlerFactory + + IdentityHandlerFactory + + IdentityHandlerFactory + + IdentityHandlerFactory + + IdentityHandlerFactory + + IdentityHandlerFactory + + IdentityHandlerFactory + + IdentityHandlerFactory + + IdentityHandlerFactory + + IdentityHandlerFactory + + IdentityHandlerFactory + + IdentityHandlerFactory +{ +} + +impl GlobalIdentityHandlerFactory for IdentityManagerFactory {} + +pub type Input = <>::Filter as IdentityHandler>::Input; + +#[test] +fn test_epoch_end_of_life() { + use id::TypedId as _; + let mut man = IdentityManager::default(); + man.epochs.push(id::EPOCH_MASK); + man.free.push(0); + let id1 = man.alloc::(Backend::Empty); + assert_eq!(id1.unzip().0, 0); + man.free(id1); + let id2 = man.alloc::(Backend::Empty); + // confirm that the index 0 is no longer re-used + assert_eq!(id2.unzip().0, 1); +} diff --git a/third_party/rust/wgpu-core/src/instance.rs b/third_party/rust/wgpu-core/src/instance.rs index c5989f4bf451..b291de78cde9 100644 --- a/third_party/rust/wgpu-core/src/instance.rs +++ b/third_party/rust/wgpu-core/src/instance.rs @@ -1,7 +1,10 @@ use crate::{ device::{Device, DeviceDescriptor}, - hub::{Global, GlobalIdentityHandlerFactory, HalApi, Input, Token}, + global::Global, + hal_api::HalApi, + hub::Token, id::{AdapterId, DeviceId, SurfaceId, Valid}, + identity::{GlobalIdentityHandlerFactory, Input}, present::Presentation, LabelHelpers, LifeGuard, Stored, DOWNLEVEL_WARNING_MESSAGE, }; @@ -140,7 +143,7 @@ pub struct Surface { pub gl: Option>, } -impl crate::hub::Resource for Surface { +impl crate::resource::Resource for Surface { const TYPE: &'static str = "Surface"; fn life_guard(&self) -> &LifeGuard { @@ -356,7 +359,7 @@ impl Adapter { } } -impl crate::hub::Resource for Adapter { +impl crate::resource::Resource for Adapter { const TYPE: &'static str = "Adapter"; fn life_guard(&self) -> &LifeGuard { @@ -517,7 +520,11 @@ impl Global { id.0 } - #[cfg(all(target_arch = "wasm32", not(target_os = "emscripten")))] + #[cfg(all( + target_arch = "wasm32", + not(target_os = "emscripten"), + feature = "gles" + ))] pub fn create_surface_webgl_canvas( &self, canvas: web_sys::HtmlCanvasElement, @@ -544,7 +551,11 @@ impl Global { Ok(id.0) } - #[cfg(all(target_arch = "wasm32", not(target_os = "emscripten")))] + #[cfg(all( + target_arch = "wasm32", + not(target_os = "emscripten"), + feature = "gles" + ))] pub fn create_surface_webgl_offscreen_canvas( &self, canvas: web_sys::OffscreenCanvas, diff --git a/third_party/rust/wgpu-core/src/lib.rs b/third_party/rust/wgpu-core/src/lib.rs index fc80f0017d66..d14ea9c0bdad 100644 --- a/third_party/rust/wgpu-core/src/lib.rs +++ b/third_party/rust/wgpu-core/src/lib.rs @@ -3,6 +3,17 @@ * into other language-specific user-friendly libraries. */ +// When we have no backends, we end up with a lot of dead or otherwise unreachable code. +#![cfg_attr( + all( + not(all(feature = "vulkan", not(target_arch = "wasm32"))), + not(all(feature = "metal", any(target_os = "macos", target_os = "ios"))), + not(all(feature = "dx12", windows)), + not(all(feature = "dx11", windows)), + not(feature = "gles"), + ), + allow(unused, clippy::let_and_return) +)] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] #![allow( // It is much clearer to assert negative conditions with eq! false @@ -42,13 +53,18 @@ pub mod command; mod conv; pub mod device; pub mod error; +pub mod global; +pub mod hal_api; pub mod hub; pub mod id; +pub mod identity; mod init_tracker; pub mod instance; pub mod pipeline; pub mod present; +pub mod registry; pub mod resource; +pub mod storage; mod track; mod validation; @@ -355,7 +371,7 @@ define_backend_caller! { gfx_if_gles, gfx_if_gles_hidden, "gles" if feature = "g /// identifiers to select backends dynamically, even though many `wgpu_core` /// methods are compiled and optimized for a specific back end. /// -/// This macro is typically used to call methods on [`wgpu_core::hub::Global`], +/// This macro is typically used to call methods on [`wgpu_core::global::Global`], /// many of which take a single `hal::Api` type parameter. For example, to /// create a new buffer on the device indicated by `device_id`, one would say: /// @@ -375,13 +391,13 @@ define_backend_caller! { gfx_if_gles, gfx_if_gles_hidden, "gles" if feature = "g /// That `gfx_select!` call uses `device_id`'s backend to select the right /// backend type `A` for a call to `Global::device_create_buffer`. /// -/// However, there's nothing about this macro that is specific to `hub::Global`. +/// However, there's nothing about this macro that is specific to `global::Global`. /// For example, Firefox's embedding of `wgpu_core` defines its own types with /// methods that take `hal::Api` type parameters. Firefox uses `gfx_select!` to /// dynamically dispatch to the right specialization based on the resource's id. /// /// [`wgpu_types::Backend`]: wgt::Backend -/// [`wgpu_core::hub::Global`]: crate::hub::Global +/// [`wgpu_core::global::Global`]: crate::global::Global /// [`Id`]: id::Id #[macro_export] macro_rules! gfx_select { diff --git a/third_party/rust/wgpu-core/src/pipeline.rs b/third_party/rust/wgpu-core/src/pipeline.rs index beb71dca163e..da06b652ea73 100644 --- a/third_party/rust/wgpu-core/src/pipeline.rs +++ b/third_party/rust/wgpu-core/src/pipeline.rs @@ -2,8 +2,8 @@ use crate::{ binding_model::{CreateBindGroupLayoutError, CreatePipelineLayoutError}, command::ColorAttachmentError, device::{DeviceError, MissingDownlevelFlags, MissingFeatures, RenderPassContext}, - hub::Resource, id::{DeviceId, PipelineLayoutId, ShaderModuleId}, + resource::Resource, validation, Label, LifeGuard, Stored, }; use arrayvec::ArrayVec; diff --git a/third_party/rust/wgpu-core/src/present.rs b/third_party/rust/wgpu-core/src/present.rs index f3989bdec69f..c9df46ad9304 100644 --- a/third_party/rust/wgpu-core/src/present.rs +++ b/third_party/rust/wgpu-core/src/present.rs @@ -16,8 +16,11 @@ use crate::device::trace::Action; use crate::{ conv, device::{DeviceError, MissingDownlevelFlags}, - hub::{Global, GlobalIdentityHandlerFactory, HalApi, Input, Token}, + global::Global, + hal_api::HalApi, + hub::Token, id::{DeviceId, SurfaceId, TextureId, Valid}, + identity::{GlobalIdentityHandlerFactory, Input}, init_tracker::TextureInitTracker, resource, track, LifeGuard, Stored, }; diff --git a/third_party/rust/wgpu-core/src/registry.rs b/third_party/rust/wgpu-core/src/registry.rs new file mode 100644 index 000000000000..ef47f7d2447d --- /dev/null +++ b/third_party/rust/wgpu-core/src/registry.rs @@ -0,0 +1,200 @@ +use std::marker::PhantomData; + +use parking_lot::{RwLock, RwLockReadGuard, RwLockWriteGuard}; +use wgt::Backend; + +use crate::{ + hub::{Access, Token}, + id, + identity::{IdentityHandler, IdentityHandlerFactory}, + resource::Resource, + storage::Storage, +}; + +#[derive(Debug)] +pub struct Registry> { + identity: F::Filter, + pub(crate) data: RwLock>, + backend: Backend, +} + +impl> Registry { + pub(crate) fn new(backend: Backend, factory: &F) -> Self { + Self { + identity: factory.spawn(), + data: RwLock::new(Storage { + map: Vec::new(), + kind: T::TYPE, + _phantom: PhantomData, + }), + backend, + } + } + + pub(crate) fn without_backend(factory: &F, kind: &'static str) -> Self { + Self { + identity: factory.spawn(), + data: RwLock::new(Storage { + map: Vec::new(), + kind, + _phantom: PhantomData, + }), + backend: Backend::Empty, + } + } +} + +#[must_use] +pub(crate) struct FutureId<'a, I: id::TypedId, T> { + id: I, + data: &'a RwLock>, +} + +impl FutureId<'_, I, T> { + #[cfg(feature = "trace")] + pub fn id(&self) -> I { + self.id + } + + pub fn into_id(self) -> I { + self.id + } + + pub fn assign<'a, A: Access>(self, value: T, _: &'a mut Token) -> id::Valid { + self.data.write().insert(self.id, value); + id::Valid(self.id) + } + + pub fn assign_error<'a, A: Access>(self, label: &str, _: &'a mut Token) -> I { + self.data.write().insert_error(self.id, label); + self.id + } +} + +impl> Registry { + pub(crate) fn prepare( + &self, + id_in: >::Input, + ) -> FutureId { + FutureId { + id: self.identity.process(id_in, self.backend), + data: &self.data, + } + } + + /// Acquire read access to this `Registry`'s contents. + /// + /// The caller must present a mutable reference to a `Token`, + /// for some type `A` that comes before this `Registry`'s resource + /// type `T` in the lock ordering. A `Token` grants + /// permission to lock any field; see [`Token::root`]. + /// + /// Once the read lock is acquired, return a new `Token`, along + /// with a read guard for this `Registry`'s [`Storage`], which can + /// be indexed by id to get at the actual resources. + /// + /// The borrow checker ensures that the caller cannot again access + /// its `Token` until it has dropped both the guard and the + /// `Token`. + /// + /// See the [`Hub`] type for more details on locking. + /// + /// [`Hub`]: crate::hub::Hub + pub(crate) fn read<'a, A: Access>( + &'a self, + _token: &'a mut Token, + ) -> (RwLockReadGuard<'a, Storage>, Token<'a, T>) { + (self.data.read(), Token::new()) + } + + /// Acquire write access to this `Registry`'s contents. + /// + /// The caller must present a mutable reference to a `Token`, + /// for some type `A` that comes before this `Registry`'s resource + /// type `T` in the lock ordering. A `Token` grants + /// permission to lock any field; see [`Token::root`]. + /// + /// Once the lock is acquired, return a new `Token`, along with + /// a write guard for this `Registry`'s [`Storage`], which can be + /// indexed by id to get at the actual resources. + /// + /// The borrow checker ensures that the caller cannot again access + /// its `Token` until it has dropped both the guard and the + /// `Token`. + /// + /// See the [`Hub`] type for more details on locking. + /// + /// [`Hub`]: crate::hub::Hub + pub(crate) fn write<'a, A: Access>( + &'a self, + _token: &'a mut Token, + ) -> (RwLockWriteGuard<'a, Storage>, Token<'a, T>) { + (self.data.write(), Token::new()) + } + + /// Unregister the resource at `id`. + /// + /// The caller must prove that it already holds a write lock for + /// this `Registry` by passing a mutable reference to this + /// `Registry`'s storage, obtained from the write guard returned + /// by a previous call to [`write`], as the `guard` parameter. + pub fn unregister_locked(&self, id: I, guard: &mut Storage) -> Option { + let value = guard.remove(id); + //Note: careful about the order here! + self.identity.free(id); + //Returning None is legal if it's an error ID + value + } + + /// Unregister the resource at `id` and return its value, if any. + /// + /// The caller must present a mutable reference to a `Token`, + /// for some type `A` that comes before this `Registry`'s resource + /// type `T` in the lock ordering. + /// + /// This returns a `Token`, but it's almost useless, because it + /// doesn't return a lock guard to go with it: its only effect is + /// to make the token you passed to this function inaccessible. + /// However, the `Token` can be used to satisfy some functions' + /// bureacratic expectations that you will have one available. + /// + /// The borrow checker ensures that the caller cannot again access + /// its `Token` until it has dropped both the guard and the + /// `Token`. + /// + /// See the [`Hub`] type for more details on locking. + /// + /// [`Hub`]: crate::hub::Hub + pub(crate) fn unregister<'a, A: Access>( + &self, + id: I, + _token: &'a mut Token, + ) -> (Option, Token<'a, T>) { + let value = self.data.write().remove(id); + //Note: careful about the order here! + self.identity.free(id); + //Returning None is legal if it's an error ID + (value, Token::new()) + } + + pub fn label_for_resource(&self, id: I) -> String { + let guard = self.data.read(); + + let type_name = guard.kind; + match guard.get(id) { + Ok(res) => { + let label = res.label(); + if label.is_empty() { + format!("<{}-{:?}>", type_name, id.unzip()) + } else { + label.to_string() + } + } + Err(_) => format!( + "", + type_name, + guard.label_for_invalid_id(id) + ), + } + } +} diff --git a/third_party/rust/wgpu-core/src/resource.rs b/third_party/rust/wgpu-core/src/resource.rs index 6ea7b9cce0d7..4aca271e9e65 100644 --- a/third_party/rust/wgpu-core/src/resource.rs +++ b/third_party/rust/wgpu-core/src/resource.rs @@ -1,7 +1,10 @@ use crate::{ device::{DeviceError, HostMap, MissingDownlevelFlags, MissingFeatures}, - hub::{Global, GlobalIdentityHandlerFactory, HalApi, Resource, Token}, + global::Global, + hal_api::HalApi, + hub::Token, id::{AdapterId, DeviceId, SurfaceId, TextureId, Valid}, + identity::GlobalIdentityHandlerFactory, init_tracker::{BufferInitTracker, TextureInitTracker}, track::TextureSelector, validation::MissingBufferUsageError, @@ -13,6 +16,17 @@ use thiserror::Error; use std::{borrow::Borrow, ops::Range, ptr::NonNull}; +pub trait Resource { + const TYPE: &'static str; + fn life_guard(&self) -> &LifeGuard; + fn label(&self) -> &str { + #[cfg(debug_assertions)] + return &self.life_guard().label; + #[cfg(not(debug_assertions))] + return ""; + } +} + /// The status code provided to the buffer mapping callback. /// /// This is very similar to `BufferAccessResult`, except that this is FFI-friendly. diff --git a/third_party/rust/wgpu-core/src/storage.rs b/third_party/rust/wgpu-core/src/storage.rs new file mode 100644 index 000000000000..07387b019410 --- /dev/null +++ b/third_party/rust/wgpu-core/src/storage.rs @@ -0,0 +1,234 @@ +use std::{marker::PhantomData, mem, ops}; + +use wgt::Backend; + +use crate::{id, Epoch, Index}; + +/// An entry in a `Storage::map` table. +#[derive(Debug)] +pub(crate) enum Element { + /// There are no live ids with this index. + Vacant, + + /// There is one live id with this index, allocated at the given + /// epoch. + Occupied(T, Epoch), + + /// Like `Occupied`, but an error occurred when creating the + /// resource. + /// + /// The given `String` is the resource's descriptor label. + Error(Epoch, String), +} + +#[derive(Clone, Debug, Default)] +pub struct StorageReport { + pub num_occupied: usize, + pub num_vacant: usize, + pub num_error: usize, + pub element_size: usize, +} + +impl StorageReport { + pub fn is_empty(&self) -> bool { + self.num_occupied + self.num_vacant + self.num_error == 0 + } +} + +#[derive(Clone, Debug)] +pub(crate) struct InvalidId; + +/// A table of `T` values indexed by the id type `I`. +/// +/// The table is represented as a vector indexed by the ids' index +/// values, so you should use an id allocator like `IdentityManager` +/// that keeps the index values dense and close to zero. +#[derive(Debug)] +pub struct Storage { + pub(crate) map: Vec>, + pub(crate) kind: &'static str, + pub(crate) _phantom: PhantomData, +} + +impl ops::Index> for Storage { + type Output = T; + fn index(&self, id: id::Valid) -> &T { + self.get(id.0).unwrap() + } +} + +impl ops::IndexMut> for Storage { + fn index_mut(&mut self, id: id::Valid) -> &mut T { + self.get_mut(id.0).unwrap() + } +} + +impl Storage { + pub(crate) fn contains(&self, id: I) -> bool { + let (index, epoch, _) = id.unzip(); + match self.map.get(index as usize) { + Some(&Element::Vacant) => false, + Some(&Element::Occupied(_, storage_epoch) | &Element::Error(storage_epoch, _)) => { + storage_epoch == epoch + } + None => false, + } + } + + /// Attempts to get a reference to an item behind a potentially invalid ID. + /// + /// Returns [`None`] if there is an epoch mismatch, or the entry is empty. + /// + /// This function is primarily intended for the `as_hal` family of functions + /// where you may need to fallibly get a object backed by an id that could + /// be in a different hub. + pub(crate) fn try_get(&self, id: I) -> Result, InvalidId> { + let (index, epoch, _) = id.unzip(); + let (result, storage_epoch) = match self.map.get(index as usize) { + Some(&Element::Occupied(ref v, epoch)) => (Ok(Some(v)), epoch), + Some(&Element::Vacant) => return Ok(None), + Some(&Element::Error(epoch, ..)) => (Err(InvalidId), epoch), + None => return Err(InvalidId), + }; + assert_eq!( + epoch, storage_epoch, + "{}[{}] is no longer alive", + self.kind, index + ); + result + } + + /// Get a reference to an item behind a potentially invalid ID. + /// Panics if there is an epoch mismatch, or the entry is empty. + pub(crate) fn get(&self, id: I) -> Result<&T, InvalidId> { + let (index, epoch, _) = id.unzip(); + let (result, storage_epoch) = match self.map.get(index as usize) { + Some(&Element::Occupied(ref v, epoch)) => (Ok(v), epoch), + Some(&Element::Vacant) => panic!("{}[{}] does not exist", self.kind, index), + Some(&Element::Error(epoch, ..)) => (Err(InvalidId), epoch), + None => return Err(InvalidId), + }; + assert_eq!( + epoch, storage_epoch, + "{}[{}] is no longer alive", + self.kind, index + ); + result + } + + /// Get a mutable reference to an item behind a potentially invalid ID. + /// Panics if there is an epoch mismatch, or the entry is empty. + pub(crate) fn get_mut(&mut self, id: I) -> Result<&mut T, InvalidId> { + let (index, epoch, _) = id.unzip(); + let (result, storage_epoch) = match self.map.get_mut(index as usize) { + Some(&mut Element::Occupied(ref mut v, epoch)) => (Ok(v), epoch), + Some(&mut Element::Vacant) | None => panic!("{}[{}] does not exist", self.kind, index), + Some(&mut Element::Error(epoch, ..)) => (Err(InvalidId), epoch), + }; + assert_eq!( + epoch, storage_epoch, + "{}[{}] is no longer alive", + self.kind, index + ); + result + } + + pub(crate) unsafe fn get_unchecked(&self, id: u32) -> &T { + match self.map[id as usize] { + Element::Occupied(ref v, _) => v, + Element::Vacant => panic!("{}[{}] does not exist", self.kind, id), + Element::Error(_, _) => panic!(""), + } + } + + pub(crate) fn label_for_invalid_id(&self, id: I) -> &str { + let (index, _, _) = id.unzip(); + match self.map.get(index as usize) { + Some(&Element::Error(_, ref label)) => label, + _ => "", + } + } + + fn insert_impl(&mut self, index: usize, element: Element) { + if index >= self.map.len() { + self.map.resize_with(index + 1, || Element::Vacant); + } + match std::mem::replace(&mut self.map[index], element) { + Element::Vacant => {} + _ => panic!("Index {index:?} is already occupied"), + } + } + + pub(crate) fn insert(&mut self, id: I, value: T) { + let (index, epoch, _) = id.unzip(); + self.insert_impl(index as usize, Element::Occupied(value, epoch)) + } + + pub(crate) fn insert_error(&mut self, id: I, label: &str) { + let (index, epoch, _) = id.unzip(); + self.insert_impl(index as usize, Element::Error(epoch, label.to_string())) + } + + pub(crate) fn force_replace(&mut self, id: I, value: T) { + let (index, epoch, _) = id.unzip(); + self.map[index as usize] = Element::Occupied(value, epoch); + } + + pub(crate) fn remove(&mut self, id: I) -> Option { + let (index, epoch, _) = id.unzip(); + match std::mem::replace(&mut self.map[index as usize], Element::Vacant) { + Element::Occupied(value, storage_epoch) => { + assert_eq!(epoch, storage_epoch); + Some(value) + } + Element::Error(..) => None, + Element::Vacant => panic!("Cannot remove a vacant resource"), + } + } + + // Prevents panic on out of range access, allows Vacant elements. + pub(crate) fn _try_remove(&mut self, id: I) -> Option { + let (index, epoch, _) = id.unzip(); + if index as usize >= self.map.len() { + None + } else if let Element::Occupied(value, storage_epoch) = + std::mem::replace(&mut self.map[index as usize], Element::Vacant) + { + assert_eq!(epoch, storage_epoch); + Some(value) + } else { + None + } + } + + pub(crate) fn iter(&self, backend: Backend) -> impl Iterator { + self.map + .iter() + .enumerate() + .filter_map(move |(index, x)| match *x { + Element::Occupied(ref value, storage_epoch) => { + Some((I::zip(index as Index, storage_epoch, backend), value)) + } + _ => None, + }) + } + + pub(crate) fn len(&self) -> usize { + self.map.len() + } + + pub(crate) fn generate_report(&self) -> StorageReport { + let mut report = StorageReport { + element_size: mem::size_of::(), + ..Default::default() + }; + for element in self.map.iter() { + match *element { + Element::Occupied(..) => report.num_occupied += 1, + Element::Vacant => report.num_vacant += 1, + Element::Error(..) => report.num_error += 1, + } + } + report + } +} diff --git a/third_party/rust/wgpu-core/src/track/buffer.rs b/third_party/rust/wgpu-core/src/track/buffer.rs index b7682968b205..b5e61c5a224a 100644 --- a/third_party/rust/wgpu-core/src/track/buffer.rs +++ b/third_party/rust/wgpu-core/src/track/buffer.rs @@ -9,9 +9,10 @@ use std::{borrow::Cow, marker::PhantomData, vec::Drain}; use super::PendingTransition; use crate::{ - hub, + hal_api::HalApi, id::{BufferId, TypedId, Valid}, resource::Buffer, + storage, track::{ invalid_resource_state, skip_barrier, ResourceMetadata, ResourceMetadataProvider, ResourceUses, UsageConflict, @@ -41,12 +42,12 @@ impl ResourceUses for BufferUses { } /// Stores all the buffers that a bind group stores. -pub(crate) struct BufferBindGroupState { +pub(crate) struct BufferBindGroupState { buffers: Vec<(Valid, RefCount, BufferUses)>, _phantom: PhantomData, } -impl BufferBindGroupState { +impl BufferBindGroupState { pub fn new() -> Self { Self { buffers: Vec::new(), @@ -72,7 +73,7 @@ impl BufferBindGroupState { /// Adds the given resource with the given state. pub fn add_single<'a>( &mut self, - storage: &'a hub::Storage, BufferId>, + storage: &'a storage::Storage, BufferId>, id: BufferId, state: BufferUses, ) -> Option<&'a Buffer> { @@ -87,13 +88,13 @@ impl BufferBindGroupState { /// Stores all buffer state within a single usage scope. #[derive(Debug)] -pub(crate) struct BufferUsageScope { +pub(crate) struct BufferUsageScope { state: Vec, metadata: ResourceMetadata, } -impl BufferUsageScope { +impl BufferUsageScope { pub fn new() -> Self { Self { state: Vec::new(), @@ -215,7 +216,7 @@ impl BufferUsageScope { /// the vectors will be extended. A call to set_size is not needed. pub fn merge_single<'a>( &mut self, - storage: &'a hub::Storage, BufferId>, + storage: &'a storage::Storage, BufferId>, id: BufferId, new_state: BufferUses, ) -> Result<&'a Buffer, UsageConflict> { @@ -248,7 +249,7 @@ impl BufferUsageScope { } /// Stores all buffer state within a command buffer or device. -pub(crate) struct BufferTracker { +pub(crate) struct BufferTracker { start: Vec, end: Vec, @@ -256,7 +257,7 @@ pub(crate) struct BufferTracker { temp: Vec>, } -impl BufferTracker { +impl BufferTracker { pub fn new() -> Self { Self { start: Vec::new(), @@ -348,7 +349,7 @@ impl BufferTracker { /// the vectors will be extended. A call to set_size is not needed. pub fn set_single<'a>( &mut self, - storage: &'a hub::Storage, BufferId>, + storage: &'a storage::Storage, BufferId>, id: BufferId, state: BufferUses, ) -> Option<(&'a Buffer, Option>)> { @@ -588,7 +589,7 @@ impl BufferStateProvider<'_> { /// Indexes must be valid indexes into all arrays passed in /// to this function, either directly or via metadata or provider structs. #[inline(always)] -unsafe fn insert_or_merge( +unsafe fn insert_or_merge( life_guard: Option<&LifeGuard>, start_states: Option<&mut [BufferUses]>, current_states: &mut [BufferUses], @@ -645,7 +646,7 @@ unsafe fn insert_or_merge( /// Indexes must be valid indexes into all arrays passed in /// to this function, either directly or via metadata or provider structs. #[inline(always)] -unsafe fn insert_or_barrier_update( +unsafe fn insert_or_barrier_update( life_guard: Option<&LifeGuard>, start_states: Option<&mut [BufferUses]>, current_states: &mut [BufferUses], @@ -690,7 +691,7 @@ unsafe fn insert_or_barrier_update( } #[inline(always)] -unsafe fn insert( +unsafe fn insert( life_guard: Option<&LifeGuard>, start_states: Option<&mut [BufferUses]>, current_states: &mut [BufferUses], @@ -723,7 +724,7 @@ unsafe fn insert( } #[inline(always)] -unsafe fn merge( +unsafe fn merge( current_states: &mut [BufferUses], index32: u32, index: usize, diff --git a/third_party/rust/wgpu-core/src/track/metadata.rs b/third_party/rust/wgpu-core/src/track/metadata.rs index 73da0d6c5d3f..8561d93bee03 100644 --- a/third_party/rust/wgpu-core/src/track/metadata.rs +++ b/third_party/rust/wgpu-core/src/track/metadata.rs @@ -1,7 +1,7 @@ //! The `ResourceMetadata` type. use crate::{ - hub, + hal_api::HalApi, id::{self, TypedId}, Epoch, LifeGuard, RefCount, }; @@ -17,7 +17,7 @@ use wgt::strict_assert; /// members, but a bit vector tracks occupancy, so iteration touches /// only occupied elements. #[derive(Debug)] -pub(super) struct ResourceMetadata { +pub(super) struct ResourceMetadata { /// If the resource with index `i` is a member, `owned[i]` is `true`. owned: BitVec, @@ -31,7 +31,7 @@ pub(super) struct ResourceMetadata { _phantom: PhantomData, } -impl ResourceMetadata { +impl ResourceMetadata { pub(super) fn new() -> Self { Self { owned: BitVec::default(), @@ -172,7 +172,7 @@ impl ResourceMetadata { /// /// This is used to abstract over the various places /// trackers can get new resource metadata from. -pub(super) enum ResourceMetadataProvider<'a, A: hub::HalApi> { +pub(super) enum ResourceMetadataProvider<'a, A: HalApi> { /// Comes directly from explicit values. Direct { epoch: Epoch, @@ -183,7 +183,7 @@ pub(super) enum ResourceMetadataProvider<'a, A: hub::HalApi> { /// The epoch is given directly, but the life count comes from the resource itself. Resource { epoch: Epoch }, } -impl ResourceMetadataProvider<'_, A> { +impl ResourceMetadataProvider<'_, A> { /// Get the epoch and an owned refcount from this. /// /// # Safety diff --git a/third_party/rust/wgpu-core/src/track/mod.rs b/third_party/rust/wgpu-core/src/track/mod.rs index ad61e5e859d0..111b4e961833 100644 --- a/third_party/rust/wgpu-core/src/track/mod.rs +++ b/third_party/rust/wgpu-core/src/track/mod.rs @@ -100,9 +100,10 @@ mod stateless; mod texture; use crate::{ - binding_model, command, conv, hub, + binding_model, command, conv, + hal_api::HalApi, id::{self, TypedId}, - pipeline, resource, + pipeline, resource, storage, }; use std::{fmt, ops}; @@ -312,14 +313,14 @@ impl fmt::Display for InvalidUse { /// /// All bind group states are sorted by their ID so that when adding to a tracker, /// they are added in the most efficient order possible (assending order). -pub(crate) struct BindGroupStates { +pub(crate) struct BindGroupStates { pub buffers: BufferBindGroupState, pub textures: TextureBindGroupState, pub views: StatelessBindGroupSate, id::TextureViewId>, pub samplers: StatelessBindGroupSate, id::SamplerId>, } -impl BindGroupStates { +impl BindGroupStates { pub fn new() -> Self { Self { buffers: BufferBindGroupState::new(), @@ -344,7 +345,7 @@ impl BindGroupStates { /// This is a render bundle specific usage scope. It includes stateless resources /// that are not normally included in a usage scope, but are used by render bundles /// and need to be owned by the render bundles. -pub(crate) struct RenderBundleScope { +pub(crate) struct RenderBundleScope { pub buffers: BufferUsageScope, pub textures: TextureUsageScope, // Don't need to track views and samplers, they are never used directly, only by bind groups. @@ -353,14 +354,14 @@ pub(crate) struct RenderBundleScope { pub query_sets: StatelessTracker, id::QuerySetId>, } -impl RenderBundleScope { +impl RenderBundleScope { /// Create the render bundle scope and pull the maximum IDs from the hubs. pub fn new( - buffers: &hub::Storage, id::BufferId>, - textures: &hub::Storage, id::TextureId>, - bind_groups: &hub::Storage, id::BindGroupId>, - render_pipelines: &hub::Storage, id::RenderPipelineId>, - query_sets: &hub::Storage, id::QuerySetId>, + buffers: &storage::Storage, id::BufferId>, + textures: &storage::Storage, id::TextureId>, + bind_groups: &storage::Storage, id::BindGroupId>, + render_pipelines: &storage::Storage, id::RenderPipelineId>, + query_sets: &storage::Storage, id::QuerySetId>, ) -> Self { let mut value = Self { buffers: BufferUsageScope::new(), @@ -390,7 +391,7 @@ impl RenderBundleScope { /// length of the storage given at the call to `new`. pub unsafe fn merge_bind_group( &mut self, - textures: &hub::Storage, id::TextureId>, + textures: &storage::Storage, id::TextureId>, bind_group: &BindGroupStates, ) -> Result<(), UsageConflict> { unsafe { self.buffers.merge_bind_group(&bind_group.buffers)? }; @@ -406,16 +407,16 @@ impl RenderBundleScope { /// A usage scope tracker. Only needs to store stateful resources as stateless /// resources cannot possibly have a usage conflict. #[derive(Debug)] -pub(crate) struct UsageScope { +pub(crate) struct UsageScope { pub buffers: BufferUsageScope, pub textures: TextureUsageScope, } -impl UsageScope { +impl UsageScope { /// Create the render bundle scope and pull the maximum IDs from the hubs. pub fn new( - buffers: &hub::Storage, id::BufferId>, - textures: &hub::Storage, id::TextureId>, + buffers: &storage::Storage, id::BufferId>, + textures: &storage::Storage, id::TextureId>, ) -> Self { let mut value = Self { buffers: BufferUsageScope::new(), @@ -439,7 +440,7 @@ impl UsageScope { /// length of the storage given at the call to `new`. pub unsafe fn merge_bind_group( &mut self, - textures: &hub::Storage, id::TextureId>, + textures: &storage::Storage, id::TextureId>, bind_group: &BindGroupStates, ) -> Result<(), UsageConflict> { unsafe { @@ -462,7 +463,7 @@ impl UsageScope { /// length of the storage given at the call to `new`. pub unsafe fn merge_render_bundle( &mut self, - textures: &hub::Storage, id::TextureId>, + textures: &storage::Storage, id::TextureId>, render_bundle: &RenderBundleScope, ) -> Result<(), UsageConflict> { self.buffers.merge_usage_scope(&render_bundle.buffers)?; @@ -474,7 +475,7 @@ impl UsageScope { } /// A full double sided tracker used by CommandBuffers and the Device. -pub(crate) struct Tracker { +pub(crate) struct Tracker { pub buffers: BufferTracker, pub textures: TextureTracker, pub views: StatelessTracker, id::TextureViewId>, @@ -486,7 +487,7 @@ pub(crate) struct Tracker { pub query_sets: StatelessTracker, id::QuerySetId>, } -impl Tracker { +impl Tracker { pub fn new() -> Self { Self { buffers: BufferTracker::new(), @@ -504,17 +505,19 @@ impl Tracker { /// Pull the maximum IDs from the hubs. pub fn set_size( &mut self, - buffers: Option<&hub::Storage, id::BufferId>>, - textures: Option<&hub::Storage, id::TextureId>>, - views: Option<&hub::Storage, id::TextureViewId>>, - samplers: Option<&hub::Storage, id::SamplerId>>, - bind_groups: Option<&hub::Storage, id::BindGroupId>>, + buffers: Option<&storage::Storage, id::BufferId>>, + textures: Option<&storage::Storage, id::TextureId>>, + views: Option<&storage::Storage, id::TextureViewId>>, + samplers: Option<&storage::Storage, id::SamplerId>>, + bind_groups: Option<&storage::Storage, id::BindGroupId>>, compute_pipelines: Option< - &hub::Storage, id::ComputePipelineId>, + &storage::Storage, id::ComputePipelineId>, >, - render_pipelines: Option<&hub::Storage, id::RenderPipelineId>>, - bundles: Option<&hub::Storage, id::RenderBundleId>>, - query_sets: Option<&hub::Storage, id::QuerySetId>>, + render_pipelines: Option< + &storage::Storage, id::RenderPipelineId>, + >, + bundles: Option<&storage::Storage, id::RenderBundleId>>, + query_sets: Option<&storage::Storage, id::QuerySetId>>, ) { if let Some(buffers) = buffers { self.buffers.set_size(buffers.len()); @@ -569,7 +572,7 @@ impl Tracker { /// value given to `set_size` pub unsafe fn set_and_remove_from_usage_scope_sparse( &mut self, - textures: &hub::Storage, id::TextureId>, + textures: &storage::Storage, id::TextureId>, scope: &mut UsageScope, bind_group: &BindGroupStates, ) { diff --git a/third_party/rust/wgpu-core/src/track/stateless.rs b/third_party/rust/wgpu-core/src/track/stateless.rs index 1d0fd5997ad5..bb4206b357b0 100644 --- a/third_party/rust/wgpu-core/src/track/stateless.rs +++ b/third_party/rust/wgpu-core/src/track/stateless.rs @@ -7,8 +7,9 @@ use std::marker::PhantomData; use crate::{ - hub, + hal_api::HalApi, id::{TypedId, Valid}, + resource, storage, track::ResourceMetadata, RefCount, }; @@ -20,7 +21,7 @@ pub(crate) struct StatelessBindGroupSate { _phantom: PhantomData, } -impl StatelessBindGroupSate { +impl StatelessBindGroupSate { pub fn new() -> Self { Self { resources: Vec::new(), @@ -44,7 +45,11 @@ impl StatelessBindGroupSate { } /// Adds the given resource. - pub fn add_single<'a>(&mut self, storage: &'a hub::Storage, id: Id) -> Option<&'a T> { + pub fn add_single<'a>( + &mut self, + storage: &'a storage::Storage, + id: Id, + ) -> Option<&'a T> { let resource = storage.get(id).ok()?; self.resources @@ -55,13 +60,13 @@ impl StatelessBindGroupSate { } /// Stores all resource state within a command buffer or device. -pub(crate) struct StatelessTracker { +pub(crate) struct StatelessTracker { metadata: ResourceMetadata, _phantom: PhantomData<(T, Id)>, } -impl StatelessTracker { +impl StatelessTracker { pub fn new() -> Self { Self { metadata: ResourceMetadata::new(), @@ -117,7 +122,11 @@ impl StatelessTracker { /// /// If the ID is higher than the length of internal vectors, /// the vectors will be extended. A call to set_size is not needed. - pub fn add_single<'a>(&mut self, storage: &'a hub::Storage, id: Id) -> Option<&'a T> { + pub fn add_single<'a>( + &mut self, + storage: &'a storage::Storage, + id: Id, + ) -> Option<&'a T> { let item = storage.get(id).ok()?; let (index32, epoch, _) = id.unzip(); diff --git a/third_party/rust/wgpu-core/src/track/texture.rs b/third_party/rust/wgpu-core/src/track/texture.rs index 6db2bab725fa..8b0926adf1a6 100644 --- a/third_party/rust/wgpu-core/src/track/texture.rs +++ b/third_party/rust/wgpu-core/src/track/texture.rs @@ -21,9 +21,10 @@ use super::{range::RangedStates, PendingTransition}; use crate::{ - hub, + hal_api::HalApi, id::{TextureId, TypedId, Valid}, resource::Texture, + storage, track::{ invalid_resource_state, skip_barrier, ResourceMetadata, ResourceMetadataProvider, ResourceUses, UsageConflict, @@ -148,7 +149,7 @@ impl ComplexTextureState { } /// Stores all the textures that a bind group stores. -pub(crate) struct TextureBindGroupState { +pub(crate) struct TextureBindGroupState { textures: Vec<( Valid, Option, @@ -158,7 +159,7 @@ pub(crate) struct TextureBindGroupState { _phantom: PhantomData, } -impl TextureBindGroupState { +impl TextureBindGroupState { pub fn new() -> Self { Self { textures: Vec::new(), @@ -184,7 +185,7 @@ impl TextureBindGroupState { /// Adds the given resource with the given state. pub fn add_single<'a>( &mut self, - storage: &'a hub::Storage, TextureId>, + storage: &'a storage::Storage, TextureId>, id: TextureId, ref_count: RefCount, selector: Option, @@ -219,13 +220,13 @@ impl TextureStateSet { /// Stores all texture state within a single usage scope. #[derive(Debug)] -pub(crate) struct TextureUsageScope { +pub(crate) struct TextureUsageScope { set: TextureStateSet, metadata: ResourceMetadata, } -impl TextureUsageScope { +impl TextureUsageScope { pub fn new() -> Self { Self { set: TextureStateSet::new(), @@ -278,7 +279,7 @@ impl TextureUsageScope { /// the vectors will be extended. A call to set_size is not needed. pub fn merge_usage_scope( &mut self, - storage: &hub::Storage, TextureId>, + storage: &storage::Storage, TextureId>, scope: &Self, ) -> Result<(), UsageConflict> { let incoming_size = scope.set.simple.len(); @@ -325,7 +326,7 @@ impl TextureUsageScope { /// method is called. pub unsafe fn merge_bind_group( &mut self, - storage: &hub::Storage, TextureId>, + storage: &storage::Storage, TextureId>, bind_group: &TextureBindGroupState, ) -> Result<(), UsageConflict> { for &(id, ref selector, ref ref_count, state) in &bind_group.textures { @@ -350,7 +351,7 @@ impl TextureUsageScope { /// method is called. pub unsafe fn merge_single( &mut self, - storage: &hub::Storage, TextureId>, + storage: &storage::Storage, TextureId>, id: Valid, selector: Option, ref_count: &RefCount, @@ -382,7 +383,7 @@ impl TextureUsageScope { } /// Stores all texture state within a command buffer or device. -pub(crate) struct TextureTracker { +pub(crate) struct TextureTracker { start_set: TextureStateSet, end_set: TextureStateSet, @@ -392,7 +393,7 @@ pub(crate) struct TextureTracker { _phantom: PhantomData, } -impl TextureTracker { +impl TextureTracker { pub fn new() -> Self { Self { start_set: TextureStateSet::new(), @@ -563,7 +564,7 @@ impl TextureTracker { /// the vectors will be extended. A call to set_size is not needed. pub fn set_from_tracker( &mut self, - storage: &hub::Storage, TextureId>, + storage: &storage::Storage, TextureId>, tracker: &Self, ) { let incoming_size = tracker.start_set.simple.len(); @@ -609,7 +610,7 @@ impl TextureTracker { /// the vectors will be extended. A call to set_size is not needed. pub fn set_from_usage_scope( &mut self, - storage: &hub::Storage, TextureId>, + storage: &storage::Storage, TextureId>, scope: &TextureUsageScope, ) { let incoming_size = scope.set.simple.len(); @@ -661,7 +662,7 @@ impl TextureTracker { /// method is called. pub unsafe fn set_and_remove_from_usage_scope_sparse( &mut self, - storage: &hub::Storage, TextureId>, + storage: &storage::Storage, TextureId>, scope: &mut TextureUsageScope, bind_group_state: &TextureBindGroupState, ) { @@ -875,8 +876,8 @@ impl<'a> TextureStateProvider<'a> { /// Helper function that gets what is needed from the texture storage /// out of the texture storage. #[inline(always)] -unsafe fn texture_data_from_texture( - storage: &hub::Storage, TextureId>, +unsafe fn texture_data_from_texture( + storage: &storage::Storage, TextureId>, index32: u32, ) -> (&LifeGuard, &TextureSelector) { let texture = unsafe { storage.get_unchecked(index32) }; @@ -893,7 +894,7 @@ unsafe fn texture_data_from_texture( /// Indexes must be valid indexes into all arrays passed in /// to this function, either directly or via metadata or provider structs. #[inline(always)] -unsafe fn insert_or_merge( +unsafe fn insert_or_merge( texture_data: (&LifeGuard, &TextureSelector), current_state_set: &mut TextureStateSet, resource_metadata: &mut ResourceMetadata, @@ -951,7 +952,7 @@ unsafe fn insert_or_merge( /// Indexes must be valid indexes into all arrays passed in /// to this function, either directly or via metadata or provider structs. #[inline(always)] -unsafe fn insert_or_barrier_update( +unsafe fn insert_or_barrier_update( texture_data: (&LifeGuard, &TextureSelector), start_state: Option<&mut TextureStateSet>, current_state_set: &mut TextureStateSet, @@ -1008,7 +1009,7 @@ unsafe fn insert_or_barrier_update( } #[inline(always)] -unsafe fn insert( +unsafe fn insert( texture_data: Option<(&LifeGuard, &TextureSelector)>, start_state: Option<&mut TextureStateSet>, end_state: &mut TextureStateSet, @@ -1096,7 +1097,7 @@ unsafe fn insert( } #[inline(always)] -unsafe fn merge( +unsafe fn merge( texture_data: (&LifeGuard, &TextureSelector), current_state_set: &mut TextureStateSet, index32: u32, diff --git a/third_party/rust/wgpu-core/src/validation.rs b/third_party/rust/wgpu-core/src/validation.rs index eadc0005d007..83cb90530a45 100644 --- a/third_party/rust/wgpu-core/src/validation.rs +++ b/third_party/rust/wgpu-core/src/validation.rs @@ -915,7 +915,7 @@ impl Interface { size: wgt::BufferSize::new(stride as u64).unwrap(), }, ref other => ResourceType::Buffer { - size: wgt::BufferSize::new(other.size(&module.constants) as u64).unwrap(), + size: wgt::BufferSize::new(other.size(module.to_ctx()) as u64).unwrap(), }, }; let handle = resources.append( diff --git a/third_party/rust/wgpu-hal/.cargo-checksum.json b/third_party/rust/wgpu-hal/.cargo-checksum.json index 74450cad9549..4f1378b169de 100644 --- a/third_party/rust/wgpu-hal/.cargo-checksum.json +++ b/third_party/rust/wgpu-hal/.cargo-checksum.json @@ -1 +1 @@ -{"files":{"Cargo.toml":"fbdbc7be59db718e8925f31802a7729abd732bbe01fbcb36409ac2e82f356976","LICENSE.APACHE":"a6cba85bc92e0cff7a450b1d873c0eaa2e9fc96bf472df0247a26bec77bf3ff9","LICENSE.MIT":"c7fea58d1cfe49634cd92e54fc10a9d871f4b275321a4cd8c09e449122caaeb4","README.md":"78377f5876fafd77963eff7e3c2ba3a7e3ad5cf9201b09ed5612e49c2288eb18","examples/halmark/main.rs":"2f80ca7029cd86c96cf1251d486c508f8c14e9de76f541e2325e5504ad11c656","examples/halmark/shader.wgsl":"26c256ec36d6f0e9a1647431ca772766bee4382d64eaa718ba7b488dcfb6bcca","examples/raw-gles.em.html":"70fbe68394a1a4522192de1dcfaf7d399f60d7bdf5de70b708f9bb0417427546","examples/raw-gles.rs":"3e2701628e6b525a19313ddeb15a50af87311afcfa1869cddb9790546ae4c0b9","src/auxil/dxgi/conv.rs":"c1e8d584e408712097d075455375e04d1895a0ff9a494305c16d658277ce7b90","src/auxil/dxgi/exception.rs":"f885049c33535134f58a0911b18b87c31d522ec27b4fefd4bdd047ad2c3422d0","src/auxil/dxgi/factory.rs":"e3d3984ede07235096e4d963ba0425ce950070d7e7226679aa1d4ac65d4ff336","src/auxil/dxgi/mod.rs":"a202564d9ac97530b16a234b87d180cd345aae705e082a9b1177dcde813645f9","src/auxil/dxgi/result.rs":"20c8eb03d738062dff198feca6327addb9882ed0462be842c789eadf7dca0573","src/auxil/dxgi/time.rs":"b6f966b250e9424d5d7e4065f2108cba87197c1e30baae6d87083055d1bc5a4b","src/auxil/mod.rs":"c38f0d3b10804d1c1d7e3b8e4a975fcb87271f8b1904f2f4a3153cceddafb56b","src/auxil/renderdoc.rs":"c2f849f70f576b0c9b0d32dd155b6a6353f74dff59cbeeaa994a12789d047c0f","src/dx11/adapter.rs":"621c7d06ebac419f17f88c64b1a7cd2930499f2826f62a209cd93c42c47c316c","src/dx11/command.rs":"cdad8dcdb800acba56c931f1726ddada652af18db0f066465af643f82a034492","src/dx11/device.rs":"96ccd8d6645839f3daf832ddf569676643ac92d1b332ab9a0c8563b3b5026295","src/dx11/instance.rs":"8a552abf39a455208c1e406d4161e67c60c83c07bd3821256e05268d796287c8","src/dx11/library.rs":"c53e7ccd92590836e2e689a6e6a792c94d392fd41fc31771a27389115a21fe36","src/dx11/mod.rs":"9684e99b5302d4219014bacd8271cc9e068d19a2f42fb341a55d3b777a84d57c","src/dx12/adapter.rs":"df5a07cee905bc15fe588085bb2df753b1100f9476fa5ba1ea9da43190f59e1c","src/dx12/command.rs":"798916d9c0c517a8634697a3f59926184d39354aa850147328bae2e2ffdfb510","src/dx12/conv.rs":"4746ab2e46d8fbf4eac66c4dde11ca932f7ca9ba8177b5316eb12710a89700c2","src/dx12/descriptor.rs":"8765855f9bfd54d6884f1a0da0a3c859deff821559d04aec8bd307fe0756f715","src/dx12/device.rs":"8cd2f86d61052d234c9d833a6ef814b4c09310edacebc7dfdbcae189026b31b2","src/dx12/instance.rs":"bc84c923fad598a3a3a2f5ba291232d24ecab2aff8f43cafbcd0fe74922fb874","src/dx12/mod.rs":"a4a48b924b1d97a8f362dcc9cfdd589a4145d56cda040802d25d36d28e99762a","src/dx12/shader_compilation.rs":"18541616bf390d66695a1d66d6bf6df697f81cb98384ace56924fda567508bb8","src/dx12/suballocation.rs":"1893a77dac08d0cadd16677b4c80b27220ad6057d81069c2524de6cee84520ac","src/dx12/types.rs":"29b11268d11c1b7437de3dac403af574ec8aa84fd9954a95a16d80f5808be44d","src/dx12/view.rs":"c09241520377e6a47ad8822c69075e6e16e6c558e18bb64398c8d7424fc57dcf","src/empty.rs":"78c5cc9a37e7401a2618dcabbe38f012de45e5a0b4793cfc70ef7a6874087b15","src/gles/adapter.rs":"1f74dcf1a8823c429d9b0d53037d281de3fc1e70b6d27e403426b7ad7ddbf728","src/gles/command.rs":"ca3e45324cc971801334d16a7613c11b375c3ea4622b95d65c855767c62dab90","src/gles/conv.rs":"84164c6b93afdbd07b037cdbee4c94dd40df0a21d42703481d0c293e92a13efd","src/gles/device.rs":"c9e6b108115dc73841b79b0d65ebfbf9f9629e19eb89a67aa86b8d471a31fc84","src/gles/egl.rs":"ef6c2fe12e655551adfd30eb0ac3ff69da203af87142219926ceca064fe51ac2","src/gles/emscripten.rs":"19bb73a9d140645f3f32cd48b002151711a9b8456e213eab5f3a2be79239e147","src/gles/mod.rs":"f641feae35a52ca66aa9364fe57d7232d2b540be5e72bbb3bf5b268156bfc4cf","src/gles/queue.rs":"d8bdfbe04987ce99cb1b1cb7393a368d812dd78d1753334417237c97229a1b4b","src/gles/shaders/clear.frag":"aac702eed9ece5482db5ba6783a678b119a5e7802b1ecf93f4975dee8acab0b3","src/gles/shaders/clear.vert":"8f636168e1da2cac48091c466a543c3b09fb4a0dd8c60c1c9bf34cc890766740","src/gles/shaders/srgb_present.frag":"dd9a43c339a2fa4ccf7f6a1854c6f400cabf271a7d5e9230768e9f39d47f3ff5","src/gles/shaders/srgb_present.vert":"6e85d489403d80b81cc94790730bb53b309dfc5eeede8f1ea3412a660f31d357","src/gles/web.rs":"3de8dab74d06e0633618ac486c95b6c1d446da107f514be30742899c7686c9ea","src/lib.rs":"b84c4ea9926522587dd0de6b30aac9fd6693840755fa959a184de825fb59d257","src/metal/adapter.rs":"d99636bf9ca166ea7d224161870b9732363583e958fa8c747043e3894968560b","src/metal/command.rs":"3c9492241d88d8fe70966bbda0bf1b17e8c8429ae8cc0dfa374650ad4bf3df76","src/metal/conv.rs":"e4aeafcddc75b2f9b54245faedaf0566f1e63681807ae786ceb46ac35f0e13bf","src/metal/device.rs":"88f6c1a2a9e7b9a53919391303952d9814d0906bc753258d45b23993791f8ab2","src/metal/mod.rs":"b3bbda971a93244f817fc65a9bfa9f584f3f4acc1f5d75aac061ee93cbbd3259","src/metal/surface.rs":"1659973bcbf7fae9761848db0fec5f393841a4407188df0f82260ebcd180f047","src/metal/time.rs":"c32d69f30e846dfcc0e39e01097fb80df63b2bebb6586143bb62494999850246","src/vulkan/adapter.rs":"cbdf570f4379d7d4fdac58811575c54ee05cc062ecc3f53b7f3ce7c16f6ff6c0","src/vulkan/command.rs":"ed82a645664a8e691a0bac18a039f2ba33ebb31a064ba1b8a255586581ab5558","src/vulkan/conv.rs":"313bd4b00b4c5d04823be1035964d55b9d8491ece1b9ce1fd15c65e46aa56bd3","src/vulkan/device.rs":"c65d03b542afee665f0f957d32f78b4c7acfeb95718bbcb06768b4f78a4b1c5f","src/vulkan/instance.rs":"02c51b4366dd7bc9876b1c132007a2099a06fa9af0cef6a97d77b12abd6c6f49","src/vulkan/mod.rs":"5161384ad5a19bb0451d6ac51b400dddbd295c38cecb4120611bb7cc78c2d30e"},"package":null} \ No newline at end of file +{"files":{"Cargo.toml":"7b65030e5341336fc6d1ce3ae41859ada8c341076240ff4797bfb5a4ec661a04","LICENSE.APACHE":"a6cba85bc92e0cff7a450b1d873c0eaa2e9fc96bf472df0247a26bec77bf3ff9","LICENSE.MIT":"c7fea58d1cfe49634cd92e54fc10a9d871f4b275321a4cd8c09e449122caaeb4","README.md":"78377f5876fafd77963eff7e3c2ba3a7e3ad5cf9201b09ed5612e49c2288eb18","examples/halmark/main.rs":"246e117a481144bfe1c39676ecb5aaf9fc9a6a42df6a17af27ae894f9637e537","examples/halmark/shader.wgsl":"26c256ec36d6f0e9a1647431ca772766bee4382d64eaa718ba7b488dcfb6bcca","examples/raw-gles.em.html":"70fbe68394a1a4522192de1dcfaf7d399f60d7bdf5de70b708f9bb0417427546","examples/raw-gles.rs":"a98d82eadf0b0ade89de6d41402e03f6e1b177d27a9d93c9ae9c5c320f90796c","src/auxil/dxgi/conv.rs":"c1e8d584e408712097d075455375e04d1895a0ff9a494305c16d658277ce7b90","src/auxil/dxgi/exception.rs":"f885049c33535134f58a0911b18b87c31d522ec27b4fefd4bdd047ad2c3422d0","src/auxil/dxgi/factory.rs":"e3d3984ede07235096e4d963ba0425ce950070d7e7226679aa1d4ac65d4ff336","src/auxil/dxgi/mod.rs":"a202564d9ac97530b16a234b87d180cd345aae705e082a9b1177dcde813645f9","src/auxil/dxgi/result.rs":"20c8eb03d738062dff198feca6327addb9882ed0462be842c789eadf7dca0573","src/auxil/dxgi/time.rs":"b6f966b250e9424d5d7e4065f2108cba87197c1e30baae6d87083055d1bc5a4b","src/auxil/mod.rs":"c38f0d3b10804d1c1d7e3b8e4a975fcb87271f8b1904f2f4a3153cceddafb56b","src/auxil/renderdoc.rs":"c2f849f70f576b0c9b0d32dd155b6a6353f74dff59cbeeaa994a12789d047c0f","src/dx11/adapter.rs":"621c7d06ebac419f17f88c64b1a7cd2930499f2826f62a209cd93c42c47c316c","src/dx11/command.rs":"cdad8dcdb800acba56c931f1726ddada652af18db0f066465af643f82a034492","src/dx11/device.rs":"96ccd8d6645839f3daf832ddf569676643ac92d1b332ab9a0c8563b3b5026295","src/dx11/instance.rs":"8a552abf39a455208c1e406d4161e67c60c83c07bd3821256e05268d796287c8","src/dx11/library.rs":"c53e7ccd92590836e2e689a6e6a792c94d392fd41fc31771a27389115a21fe36","src/dx11/mod.rs":"9684e99b5302d4219014bacd8271cc9e068d19a2f42fb341a55d3b777a84d57c","src/dx12/adapter.rs":"604f85e7b46347203d116bc0422c0134fafe1ee00e41fd70ffbc27a6c7a6b22e","src/dx12/command.rs":"798916d9c0c517a8634697a3f59926184d39354aa850147328bae2e2ffdfb510","src/dx12/conv.rs":"4746ab2e46d8fbf4eac66c4dde11ca932f7ca9ba8177b5316eb12710a89700c2","src/dx12/descriptor.rs":"8765855f9bfd54d6884f1a0da0a3c859deff821559d04aec8bd307fe0756f715","src/dx12/device.rs":"8cd2f86d61052d234c9d833a6ef814b4c09310edacebc7dfdbcae189026b31b2","src/dx12/instance.rs":"bc84c923fad598a3a3a2f5ba291232d24ecab2aff8f43cafbcd0fe74922fb874","src/dx12/mod.rs":"a4a48b924b1d97a8f362dcc9cfdd589a4145d56cda040802d25d36d28e99762a","src/dx12/shader_compilation.rs":"18541616bf390d66695a1d66d6bf6df697f81cb98384ace56924fda567508bb8","src/dx12/suballocation.rs":"1893a77dac08d0cadd16677b4c80b27220ad6057d81069c2524de6cee84520ac","src/dx12/types.rs":"29b11268d11c1b7437de3dac403af574ec8aa84fd9954a95a16d80f5808be44d","src/dx12/view.rs":"c09241520377e6a47ad8822c69075e6e16e6c558e18bb64398c8d7424fc57dcf","src/empty.rs":"78c5cc9a37e7401a2618dcabbe38f012de45e5a0b4793cfc70ef7a6874087b15","src/gles/adapter.rs":"40e083dbaf1e67ed6e869693686dc0244c03dbe4a82a3a1fc4703eb696959651","src/gles/command.rs":"ca3e45324cc971801334d16a7613c11b375c3ea4622b95d65c855767c62dab90","src/gles/conv.rs":"84164c6b93afdbd07b037cdbee4c94dd40df0a21d42703481d0c293e92a13efd","src/gles/device.rs":"c9e6b108115dc73841b79b0d65ebfbf9f9629e19eb89a67aa86b8d471a31fc84","src/gles/egl.rs":"54e8415c641571ed508602673439ecaa420804f75cc8f6631c96252dd84317eb","src/gles/emscripten.rs":"19bb73a9d140645f3f32cd48b002151711a9b8456e213eab5f3a2be79239e147","src/gles/mod.rs":"f641feae35a52ca66aa9364fe57d7232d2b540be5e72bbb3bf5b268156bfc4cf","src/gles/queue.rs":"de443af27a89e491f19fba8042a1d91aea0adda7709959f4762b15b850151a94","src/gles/shaders/clear.frag":"aac702eed9ece5482db5ba6783a678b119a5e7802b1ecf93f4975dee8acab0b3","src/gles/shaders/clear.vert":"8f636168e1da2cac48091c466a543c3b09fb4a0dd8c60c1c9bf34cc890766740","src/gles/shaders/srgb_present.frag":"dd9a43c339a2fa4ccf7f6a1854c6f400cabf271a7d5e9230768e9f39d47f3ff5","src/gles/shaders/srgb_present.vert":"6e85d489403d80b81cc94790730bb53b309dfc5eeede8f1ea3412a660f31d357","src/gles/web.rs":"3de8dab74d06e0633618ac486c95b6c1d446da107f514be30742899c7686c9ea","src/lib.rs":"b84c4ea9926522587dd0de6b30aac9fd6693840755fa959a184de825fb59d257","src/metal/adapter.rs":"d99636bf9ca166ea7d224161870b9732363583e958fa8c747043e3894968560b","src/metal/command.rs":"05bfa64dad2ed988400299bf121db16a827d379cedb01bdca8c9c4b883849472","src/metal/conv.rs":"e4aeafcddc75b2f9b54245faedaf0566f1e63681807ae786ceb46ac35f0e13bf","src/metal/device.rs":"9805324e022c961fc580a31ec8f50d33cf0f87f8bc698f7fbe6a1ed18702245f","src/metal/mod.rs":"1665ed043146e199ac75fab3612f11bcfd117bd4d59245c05b0b75b2b15280e7","src/metal/surface.rs":"1659973bcbf7fae9761848db0fec5f393841a4407188df0f82260ebcd180f047","src/metal/time.rs":"c32d69f30e846dfcc0e39e01097fb80df63b2bebb6586143bb62494999850246","src/vulkan/adapter.rs":"cbdf570f4379d7d4fdac58811575c54ee05cc062ecc3f53b7f3ce7c16f6ff6c0","src/vulkan/command.rs":"f3a84dc73f93297247882e3f253df8e4239ae40326a00844b4ea14caf176a4a9","src/vulkan/conv.rs":"284f002063260811ba3fcfe75d6dc1c4c2eb2bc8b6622eac781157b7683478cd","src/vulkan/device.rs":"c65d03b542afee665f0f957d32f78b4c7acfeb95718bbcb06768b4f78a4b1c5f","src/vulkan/instance.rs":"72b7be43bdbe073181a29b93a9d829671e445f4316f389d76ef231a4ffc16e6f","src/vulkan/mod.rs":"238645222954994d117728d98d74b8dade1d4f4fe505592a3335f5b267b5fab6"},"package":null} \ No newline at end of file diff --git a/third_party/rust/wgpu-hal/Cargo.toml b/third_party/rust/wgpu-hal/Cargo.toml index 61471209c112..5a394840b12d 100644 --- a/third_party/rust/wgpu-hal/Cargo.toml +++ b/third_party/rust/wgpu-hal/Cargo.toml @@ -58,13 +58,13 @@ rustc-hash = "1.1" thiserror = "1" [dependencies.glow] -version = "0.12.1" +version = "0.12.2" optional = true [dependencies.naga] version = "0.12.0" git = "https://github.com/gfx-rs/naga" -rev = "b99d58ea435090e561377949f428bce2c18451bb" +rev = "76003dc0035d53a474d366dcdf49d2e4d12e921f" features = ["clone"] [dependencies.profiling] @@ -77,13 +77,14 @@ path = "../wgpu-types" package = "wgpu-types" [dev-dependencies] +cfg-if = "1" env_logger = "0.10" winit = "0.27.1" [dev-dependencies.naga] version = "0.12.0" git = "https://github.com/gfx-rs/naga" -rev = "b99d58ea435090e561377949f428bce2c18451bb" +rev = "76003dc0035d53a474d366dcdf49d2e4d12e921f" features = ["wgsl-in"] [features] @@ -121,7 +122,6 @@ gles = [ metal = [ "naga/msl-out", "block", - "foreign-types", ] renderdoc = [ "libloading", @@ -138,8 +138,8 @@ vulkan = [ windows_rs = ["gpu-allocator"] [target."cfg(all(target_arch = \"wasm32\", not(target_os = \"emscripten\")))".dependencies] -js-sys = "0.3.61" -wasm-bindgen = "0.2.84" +js-sys = "0.3.63" +wasm-bindgen = "0.2.86" [target."cfg(all(target_arch = \"wasm32\", not(target_os = \"emscripten\")))".dependencies.web-sys] version = "0.3.61" @@ -152,23 +152,19 @@ features = [ [target."cfg(any(target_os=\"macos\", target_os=\"ios\"))".dependencies] core-graphics-types = "0.1" -metal = "0.24.0" +metal = "0.25.0" objc = "0.2.5" [target."cfg(any(target_os=\"macos\", target_os=\"ios\"))".dependencies.block] version = "0.1" optional = true -[target."cfg(any(target_os=\"macos\", target_os=\"ios\"))".dependencies.foreign-types] -version = "0.3" -optional = true - [target."cfg(not(target_arch = \"wasm32\"))".dependencies.ash] -version = "0.37.2" +version = "0.37.3" optional = true [target."cfg(not(target_arch = \"wasm32\"))".dependencies.gpu-alloc] -version = "0.5" +version = "0.6" optional = true [target."cfg(not(target_arch = \"wasm32\"))".dependencies.gpu-descriptor] diff --git a/third_party/rust/wgpu-hal/examples/halmark/main.rs b/third_party/rust/wgpu-hal/examples/halmark/main.rs index 588de610633e..61c1584a2562 100644 --- a/third_party/rust/wgpu-hal/examples/halmark/main.rs +++ b/third_party/rust/wgpu-hal/examples/halmark/main.rs @@ -751,26 +751,28 @@ impl Example { } } -#[cfg(all(feature = "metal"))] -type Api = hal::api::Metal; -#[cfg(all(feature = "vulkan", not(feature = "metal")))] -type Api = hal::api::Vulkan; -#[cfg(all(feature = "gles", not(feature = "metal"), not(feature = "vulkan")))] -type Api = hal::api::Gles; -#[cfg(all( - feature = "dx12", - not(feature = "metal"), - not(feature = "vulkan"), - not(feature = "gles") -))] -type Api = hal::api::Dx12; -#[cfg(not(any( - feature = "metal", - feature = "vulkan", - feature = "gles", - feature = "dx12" -)))] -type Api = hal::api::Empty; +cfg_if::cfg_if! { + // Apple + Metal + if #[cfg(all(any(target_os = "macos", target_os = "ios"), feature = "metal"))] { + type Api = hal::api::Metal; + } + // Wasm + Vulkan + else if #[cfg(all(not(target_arch = "wasm32"), feature = "vulkan"))] { + type Api = hal::api::Vulkan; + } + // Windows + DX12 + else if #[cfg(all(windows, feature = "dx12"))] { + type Api = hal::api::Dx12; + } + // Anything + GLES + else if #[cfg(feature = "gles")] { + type Api = hal::api::Gles; + } + // Fallback + else { + type Api = hal::api::Empty; + } +} fn main() { env_logger::init(); diff --git a/third_party/rust/wgpu-hal/examples/raw-gles.rs b/third_party/rust/wgpu-hal/examples/raw-gles.rs index d9dfc492faa5..1bf2ead0f565 100644 --- a/third_party/rust/wgpu-hal/examples/raw-gles.rs +++ b/third_party/rust/wgpu-hal/examples/raw-gles.rs @@ -119,6 +119,7 @@ fn main() { #[cfg(all(target_arch = "wasm32", not(target_os = "emscripten")))] fn main() {} +#[cfg(any(not(target_arch = "wasm32"), target_os = "emscripten"))] fn fill_screen(exposed: &hal::ExposedAdapter, width: u32, height: u32) { use hal::{Adapter as _, CommandEncoder as _, Device as _, Queue as _}; diff --git a/third_party/rust/wgpu-hal/src/dx12/adapter.rs b/third_party/rust/wgpu-hal/src/dx12/adapter.rs index 13530afb3eb5..805d77376e7a 100644 --- a/third_party/rust/wgpu-hal/src/dx12/adapter.rs +++ b/third_party/rust/wgpu-hal/src/dx12/adapter.rs @@ -74,6 +74,29 @@ impl super::Adapter { profiling::scope!("feature queries"); + // Detect the highest supported feature level. + let d3d_feature_level = [ + d3d12::FeatureLevel::L12_1, + d3d12::FeatureLevel::L12_0, + d3d12::FeatureLevel::L11_1, + d3d12::FeatureLevel::L11_0, + ]; + let mut device_levels: d3d12_ty::D3D12_FEATURE_DATA_FEATURE_LEVELS = + unsafe { mem::zeroed() }; + device_levels.NumFeatureLevels = d3d_feature_level.len() as u32; + device_levels.pFeatureLevelsRequested = d3d_feature_level.as_ptr().cast(); + unsafe { + device.CheckFeatureSupport( + d3d12_ty::D3D12_FEATURE_FEATURE_LEVELS, + &mut device_levels as *mut _ as *mut _, + mem::size_of::() as _, + ) + }; + // This cast should never fail because we only requested feature levels that are already in the enum. + let max_feature_level = + d3d12::FeatureLevel::try_from(device_levels.MaxSupportedFeatureLevel) + .expect("Unexpected feature level"); + // We have found a possible adapter. // Acquire the device information. let mut desc: dxgi1_2::DXGI_ADAPTER_DESC2 = unsafe { mem::zeroed() }; @@ -182,11 +205,18 @@ impl super::Adapter { // Theoretically vram limited, but in practice 2^20 is the limit let tier3_practical_descriptor_limit = 1 << 20; - let (full_heap_count, _uav_count) = match options.ResourceBindingTier { - d3d12_ty::D3D12_RESOURCE_BINDING_TIER_1 => ( - d3d12_ty::D3D12_MAX_SHADER_VISIBLE_DESCRIPTOR_HEAP_SIZE_TIER_1, - 8, // conservative, is 64 on feature level 11.1 - ), + let (full_heap_count, uav_count) = match options.ResourceBindingTier { + d3d12_ty::D3D12_RESOURCE_BINDING_TIER_1 => { + let uav_count = match max_feature_level { + d3d12::FeatureLevel::L11_0 => 8, + _ => 64, + }; + + ( + d3d12_ty::D3D12_MAX_SHADER_VISIBLE_DESCRIPTOR_HEAP_SIZE_TIER_1, + uav_count, + ) + } d3d12_ty::D3D12_RESOURCE_BINDING_TIER_2 => ( d3d12_ty::D3D12_MAX_SHADER_VISIBLE_DESCRIPTOR_HEAP_SIZE_TIER_2, 64, @@ -284,9 +314,10 @@ impl super::Adapter { _ => d3d12_ty::D3D12_MAX_SHADER_VISIBLE_SAMPLER_HEAP_SIZE, }, // these both account towards `uav_count`, but we can't express the limit as as sum - max_storage_buffers_per_shader_stage: base.max_storage_buffers_per_shader_stage, - max_storage_textures_per_shader_stage: base - .max_storage_textures_per_shader_stage, + // of the two, so we divide it by 4 to account for the worst case scenario + // (2 shader stages, with both using 16 storage textures and 16 storage buffers) + max_storage_buffers_per_shader_stage: uav_count / 4, + max_storage_textures_per_shader_stage: uav_count / 4, max_uniform_buffers_per_shader_stage: full_heap_count, max_uniform_buffer_binding_size: d3d12_ty::D3D12_REQ_CONSTANT_BUFFER_ELEMENT_COUNT * 16, diff --git a/third_party/rust/wgpu-hal/src/gles/adapter.rs b/third_party/rust/wgpu-hal/src/gles/adapter.rs index b14857ae2293..dac13f27df70 100644 --- a/third_party/rust/wgpu-hal/src/gles/adapter.rs +++ b/third_party/rust/wgpu-hal/src/gles/adapter.rs @@ -713,10 +713,12 @@ impl crate::Adapter for super::Adapter { | Tfc::MULTISAMPLE_X16 } else if max_samples >= 8 { Tfc::MULTISAMPLE_X2 | Tfc::MULTISAMPLE_X4 | Tfc::MULTISAMPLE_X8 - } else if max_samples >= 4 { - Tfc::MULTISAMPLE_X2 | Tfc::MULTISAMPLE_X4 } else { - Tfc::MULTISAMPLE_X2 + // The lowest supported level in GLE3.0/WebGL2 is 4X + // (see GL_MAX_SAMPLES in https://registry.khronos.org/OpenGL-Refpages/es3.0/html/glGet.xhtml). + // On some platforms, like iOS Safari, `get_parameter_i32(MAX_SAMPLES)` returns 0, + // so we always fall back to supporting 4x here. + Tfc::MULTISAMPLE_X2 | Tfc::MULTISAMPLE_X4 } }; diff --git a/third_party/rust/wgpu-hal/src/gles/egl.rs b/third_party/rust/wgpu-hal/src/gles/egl.rs index 13e217f9d9a7..e5e2572cf1ec 100644 --- a/third_party/rust/wgpu-hal/src/gles/egl.rs +++ b/third_party/rust/wgpu-hal/src/gles/egl.rs @@ -1152,15 +1152,17 @@ impl crate::Surface for Surface { khronos_egl::SINGLE_BUFFER }, ]; - match self.srgb_kind { - SrgbFrameBufferKind::None => {} - SrgbFrameBufferKind::Core => { - attributes.push(khronos_egl::GL_COLORSPACE); - attributes.push(khronos_egl::GL_COLORSPACE_SRGB); - } - SrgbFrameBufferKind::Khr => { - attributes.push(EGL_GL_COLORSPACE_KHR as i32); - attributes.push(EGL_GL_COLORSPACE_SRGB_KHR as i32); + if config.format.is_srgb() { + match self.srgb_kind { + SrgbFrameBufferKind::None => {} + SrgbFrameBufferKind::Core => { + attributes.push(khronos_egl::GL_COLORSPACE); + attributes.push(khronos_egl::GL_COLORSPACE_SRGB); + } + SrgbFrameBufferKind::Khr => { + attributes.push(EGL_GL_COLORSPACE_KHR as i32); + attributes.push(EGL_GL_COLORSPACE_SRGB_KHR as i32); + } } } attributes.push(khronos_egl::ATTRIB_NONE as i32); diff --git a/third_party/rust/wgpu-hal/src/gles/queue.rs b/third_party/rust/wgpu-hal/src/gles/queue.rs index 87a82524fc52..7e5143501c06 100644 --- a/third_party/rust/wgpu-hal/src/gles/queue.rs +++ b/third_party/rust/wgpu-hal/src/gles/queue.rs @@ -1186,7 +1186,7 @@ impl super::Queue { C::SetDepthBias(bias) => { if bias.is_enabled() { unsafe { gl.enable(glow::POLYGON_OFFSET_FILL) }; - unsafe { gl.polygon_offset(bias.constant as f32, bias.slope_scale) }; + unsafe { gl.polygon_offset(bias.slope_scale, bias.constant as f32) }; } else { unsafe { gl.disable(glow::POLYGON_OFFSET_FILL) }; } diff --git a/third_party/rust/wgpu-hal/src/metal/command.rs b/third_party/rust/wgpu-hal/src/metal/command.rs index 866e163a6410..34712859ef7b 100644 --- a/third_party/rust/wgpu-hal/src/metal/command.rs +++ b/third_party/rust/wgpu-hal/src/metal/command.rs @@ -127,8 +127,8 @@ impl crate::CommandEncoder for super::CommandEncoder { unsafe fn end_encoding(&mut self) -> Result { self.leave_blit(); - assert!(self.state.render.is_none()); - assert!(self.state.compute.is_none()); + debug_assert!(self.state.render.is_none()); + debug_assert!(self.state.compute.is_none()); Ok(super::CommandBuffer { raw: self.raw_cmd_buf.take().unwrap(), }) @@ -355,6 +355,10 @@ impl crate::CommandEncoder for super::CommandEncoder { self.begin_pass(); self.state.index = None; + assert!(self.state.blit.is_none()); + assert!(self.state.compute.is_none()); + assert!(self.state.render.is_none()); + objc::rc::autoreleasepool(|| { let descriptor = metal::RenderPassDescriptor::new(); //TODO: set visibility results buffer @@ -910,6 +914,9 @@ impl crate::CommandEncoder for super::CommandEncoder { self.begin_pass(); let raw = self.raw_cmd_buf.as_ref().unwrap(); + debug_assert!(self.state.blit.is_none()); + debug_assert!(self.state.compute.is_none()); + debug_assert!(self.state.render.is_none()); objc::rc::autoreleasepool(|| { let encoder = raw.new_compute_command_encoder(); if let Some(label) = desc.label { diff --git a/third_party/rust/wgpu-hal/src/metal/device.rs b/third_party/rust/wgpu-hal/src/metal/device.rs index f8a1ad9a9ff9..e5771a4bc725 100644 --- a/third_party/rust/wgpu-hal/src/metal/device.rs +++ b/third_party/rust/wgpu-hal/src/metal/device.rs @@ -175,7 +175,7 @@ impl super::Device { match var.space { naga::AddressSpace::WorkGroup => { if !ep_info[var_handle].is_empty() { - let size = module.types[var.ty].inner.size(&module.constants); + let size = module.types[var.ty].inner.size(module.to_ctx()); wg_memory_sizes.push(size); } } @@ -325,7 +325,7 @@ impl crate::Device for super::Device { &self, desc: &crate::TextureDescriptor, ) -> DeviceResult { - use foreign_types::ForeignTypeRef; + use metal::foreign_types::ForeignType as _; let mtl_format = self.shared.private_caps.map_format(desc.format); diff --git a/third_party/rust/wgpu-hal/src/metal/mod.rs b/third_party/rust/wgpu-hal/src/metal/mod.rs index b77685bd9411..6611f29548a0 100644 --- a/third_party/rust/wgpu-hal/src/metal/mod.rs +++ b/third_party/rust/wgpu-hal/src/metal/mod.rs @@ -13,6 +13,11 @@ end of the VS buffer table. !*/ +// `MTLFeatureSet` is superseded by `MTLGpuFamily`. +// However, `MTLGpuFamily` is only supported starting MacOS 10.15, whereas our minimum target is MacOS 10.13, +// See https://github.com/gpuweb/gpuweb/issues/1069 for minimum spec. +// TODO: Eventually all deprecated features should be abstracted and use new api when available. +#[allow(deprecated)] mod adapter; mod command; mod conv; @@ -28,7 +33,7 @@ use std::{ }; use arrayvec::ArrayVec; -use foreign_types::ForeignTypeRef as _; +use metal::foreign_types::ForeignTypeRef as _; use parking_lot::Mutex; #[derive(Clone)] diff --git a/third_party/rust/wgpu-hal/src/vulkan/command.rs b/third_party/rust/wgpu-hal/src/vulkan/command.rs index f6c871026cc7..417367689b6c 100644 --- a/third_party/rust/wgpu-hal/src/vulkan/command.rs +++ b/third_party/rust/wgpu-hal/src/vulkan/command.rs @@ -157,7 +157,11 @@ impl crate::CommandEncoder for super::CommandEncoder { vk_barriers.clear(); for bar in barriers { - let range = conv::map_subresource_range(&bar.range, bar.texture.format); + let range = conv::map_subresource_range_combined_aspect( + &bar.range, + bar.texture.format, + &self.device.private_caps, + ); let (src_stage, src_access) = conv::map_texture_usage_to_barrier(bar.usage.start); let src_layout = conv::derive_image_layout(bar.usage.start, bar.texture.format); src_stages |= src_stage; diff --git a/third_party/rust/wgpu-hal/src/vulkan/conv.rs b/third_party/rust/wgpu-hal/src/vulkan/conv.rs index a26f3765b90f..e2398c2689ad 100644 --- a/third_party/rust/wgpu-hal/src/vulkan/conv.rs +++ b/third_party/rust/wgpu-hal/src/vulkan/conv.rs @@ -598,6 +598,20 @@ pub fn map_subresource_range( } } +// Special subresource range mapping for dealing with barriers +// so that we account for the "hidden" depth aspect in emulated Stencil8. +pub(super) fn map_subresource_range_combined_aspect( + range: &wgt::ImageSubresourceRange, + format: wgt::TextureFormat, + private_caps: &super::PrivateCapabilities, +) -> vk::ImageSubresourceRange { + let mut range = map_subresource_range(range, format); + if !private_caps.texture_s8 && format == wgt::TextureFormat::Stencil8 { + range.aspect_mask |= vk::ImageAspectFlags::DEPTH; + } + range +} + pub fn map_subresource_layers( base: &crate::TextureCopyBase, ) -> (vk::ImageSubresourceLayers, vk::Offset3D) { diff --git a/third_party/rust/wgpu-hal/src/vulkan/instance.rs b/third_party/rust/wgpu-hal/src/vulkan/instance.rs index 101f303c16a1..8ae477da9f25 100644 --- a/third_party/rust/wgpu-hal/src/vulkan/instance.rs +++ b/third_party/rust/wgpu-hal/src/vulkan/instance.rs @@ -14,15 +14,39 @@ unsafe extern "system" fn debug_utils_messenger_callback( message_severity: vk::DebugUtilsMessageSeverityFlagsEXT, message_type: vk::DebugUtilsMessageTypeFlagsEXT, callback_data_ptr: *const vk::DebugUtilsMessengerCallbackDataEXT, - _user_data: *mut c_void, + user_data: *mut c_void, ) -> vk::Bool32 { - const VUID_VKSWAPCHAINCREATEINFOKHR_IMAGEEXTENT_01274: i32 = 0x7cd0911d; use std::borrow::Cow; if thread::panicking() { return vk::FALSE; } + let cd = unsafe { &*callback_data_ptr }; + let user_data = unsafe { &*(user_data as *mut super::DebugUtilsMessengerUserData) }; + + const VUID_VKCMDENDDEBUGUTILSLABELEXT_COMMANDBUFFER_01912: i32 = 0x56146426; + if cd.message_id_number == VUID_VKCMDENDDEBUGUTILSLABELEXT_COMMANDBUFFER_01912 { + // https://github.com/KhronosGroup/Vulkan-ValidationLayers/issues/5671 + // Versions 1.3.240 through 1.3.250 return a spurious error here if + // the debug range start and end appear in different command buffers. + let khronos_validation_layer = + std::ffi::CStr::from_bytes_with_nul(b"Khronos Validation Layer\0").unwrap(); + if user_data.validation_layer_description.as_ref() == khronos_validation_layer + && user_data.validation_layer_spec_version >= vk::make_api_version(0, 1, 3, 240) + && user_data.validation_layer_spec_version <= vk::make_api_version(0, 1, 3, 250) + { + return vk::FALSE; + } + } + + // Silence Vulkan Validation error "VUID-VkSwapchainCreateInfoKHR-imageExtent-01274" + // - it's a false positive due to the inherent racy-ness of surface resizing + const VUID_VKSWAPCHAINCREATEINFOKHR_IMAGEEXTENT_01274: i32 = 0x7cd0911d; + if cd.message_id_number == VUID_VKSWAPCHAINCREATEINFOKHR_IMAGEEXTENT_01274 { + return vk::FALSE; + } + let level = match message_severity { vk::DebugUtilsMessageSeverityFlagsEXT::VERBOSE => log::Level::Debug, vk::DebugUtilsMessageSeverityFlagsEXT::INFO => log::Level::Info, @@ -31,8 +55,6 @@ unsafe extern "system" fn debug_utils_messenger_callback( _ => log::Level::Warn, }; - let cd = unsafe { &*callback_data_ptr }; - let message_id_name = if cd.p_message_id_name.is_null() { Cow::from("") } else { @@ -44,12 +66,6 @@ unsafe extern "system" fn debug_utils_messenger_callback( unsafe { CStr::from_ptr(cd.p_message) }.to_string_lossy() }; - // Silence Vulkan Validation error "VUID-VkSwapchainCreateInfoKHR-imageExtent-01274" - // - it's a false positive due to the inherent racy-ness of surface resizing - if cd.message_id_number == VUID_VKSWAPCHAINCREATEINFOKHR_IMAGEEXTENT_01274 { - return vk::FALSE; - } - let _ = std::panic::catch_unwind(|| { log::log!( level, @@ -236,12 +252,16 @@ impl super::Instance { /// - `extensions` must be a superset of `required_extensions()` and must be created from the /// same entry, driver_api_version and flags. /// - `android_sdk_version` is ignored and can be `0` for all platforms besides Android + /// + /// If `debug_utils_user_data` is `Some`, then the validation layer is + /// available, so create a [`vk::DebugUtilsMessengerEXT`]. #[allow(clippy::too_many_arguments)] pub unsafe fn from_raw( entry: ash::Entry, raw_instance: ash::Instance, driver_api_version: u32, android_sdk_version: u32, + debug_utils_user_data: Option, extensions: Vec<&'static CStr>, flags: crate::InstanceFlags, has_nv_optimus: bool, @@ -249,36 +269,52 @@ impl super::Instance { ) -> Result { log::info!("Instance version: 0x{:x}", driver_api_version); - let debug_utils = if extensions.contains(&ext::DebugUtils::name()) { - log::info!("Enabling debug utils"); - let extension = ext::DebugUtils::new(&entry, &raw_instance); - // having ERROR unconditionally because Vk doesn't like empty flags - let mut severity = vk::DebugUtilsMessageSeverityFlagsEXT::ERROR; - if log::max_level() >= log::LevelFilter::Debug { - severity |= vk::DebugUtilsMessageSeverityFlagsEXT::VERBOSE; + let debug_utils = if let Some(debug_callback_user_data) = debug_utils_user_data { + if extensions.contains(&ext::DebugUtils::name()) { + log::info!("Enabling debug utils"); + // Move the callback data to the heap, to ensure it will never be + // moved. + let callback_data = Box::new(debug_callback_user_data); + + let extension = ext::DebugUtils::new(&entry, &raw_instance); + // having ERROR unconditionally because Vk doesn't like empty flags + let mut severity = vk::DebugUtilsMessageSeverityFlagsEXT::ERROR; + if log::max_level() >= log::LevelFilter::Debug { + severity |= vk::DebugUtilsMessageSeverityFlagsEXT::VERBOSE; + } + if log::max_level() >= log::LevelFilter::Info { + severity |= vk::DebugUtilsMessageSeverityFlagsEXT::INFO; + } + if log::max_level() >= log::LevelFilter::Warn { + severity |= vk::DebugUtilsMessageSeverityFlagsEXT::WARNING; + } + let user_data_ptr: *const super::DebugUtilsMessengerUserData = &*callback_data; + let vk_info = vk::DebugUtilsMessengerCreateInfoEXT::builder() + .flags(vk::DebugUtilsMessengerCreateFlagsEXT::empty()) + .message_severity(severity) + .message_type( + vk::DebugUtilsMessageTypeFlagsEXT::GENERAL + | vk::DebugUtilsMessageTypeFlagsEXT::VALIDATION + | vk::DebugUtilsMessageTypeFlagsEXT::PERFORMANCE, + ) + .pfn_user_callback(Some(debug_utils_messenger_callback)) + .user_data(user_data_ptr as *mut _); + let messenger = + unsafe { extension.create_debug_utils_messenger(&vk_info, None) }.unwrap(); + Some(super::DebugUtils { + extension, + messenger, + callback_data, + }) + } else { + log::info!("Debug utils not enabled: extension not listed"); + None } - if log::max_level() >= log::LevelFilter::Info { - severity |= vk::DebugUtilsMessageSeverityFlagsEXT::INFO; - } - if log::max_level() >= log::LevelFilter::Warn { - severity |= vk::DebugUtilsMessageSeverityFlagsEXT::WARNING; - } - let vk_info = vk::DebugUtilsMessengerCreateInfoEXT::builder() - .flags(vk::DebugUtilsMessengerCreateFlagsEXT::empty()) - .message_severity(severity) - .message_type( - vk::DebugUtilsMessageTypeFlagsEXT::GENERAL - | vk::DebugUtilsMessageTypeFlagsEXT::VALIDATION - | vk::DebugUtilsMessageTypeFlagsEXT::PERFORMANCE, - ) - .pfn_user_callback(Some(debug_utils_messenger_callback)); - let messenger = - unsafe { extension.create_debug_utils_messenger(&vk_info, None) }.unwrap(); - Some(super::DebugUtils { - extension, - messenger, - }) } else { + log::info!( + "Debug utils not enabled: \ + debug_utils_user_data not passed to Instance::from_raw" + ); None }; @@ -543,31 +579,42 @@ impl crate::Instance for super::Instance { crate::InstanceError })?; + fn find_layer<'layers>( + instance_layers: &'layers [vk::LayerProperties], + name: &CStr, + ) -> Option<&'layers vk::LayerProperties> { + instance_layers + .iter() + .find(|inst_layer| cstr_from_bytes_until_nul(&inst_layer.layer_name) == Some(name)) + } + let nv_optimus_layer = CStr::from_bytes_with_nul(b"VK_LAYER_NV_optimus\0").unwrap(); - let has_nv_optimus = instance_layers.iter().any(|inst_layer| { - cstr_from_bytes_until_nul(&inst_layer.layer_name) == Some(nv_optimus_layer) - }); + let has_nv_optimus = find_layer(&instance_layers, nv_optimus_layer).is_some(); - // Check requested layers against the available layers - let layers = { - let mut layers: Vec<&'static CStr> = Vec::new(); - if desc.flags.contains(crate::InstanceFlags::VALIDATION) { - layers.push(CStr::from_bytes_with_nul(b"VK_LAYER_KHRONOS_validation\0").unwrap()); + let mut layers: Vec<&'static CStr> = Vec::new(); + + // Request validation layer if asked. + let mut debug_callback_user_data = None; + if desc.flags.contains(crate::InstanceFlags::VALIDATION) { + let validation_layer_name = + CStr::from_bytes_with_nul(b"VK_LAYER_KHRONOS_validation\0").unwrap(); + if let Some(layer_properties) = find_layer(&instance_layers, validation_layer_name) { + layers.push(validation_layer_name); + debug_callback_user_data = Some(super::DebugUtilsMessengerUserData { + validation_layer_description: cstr_from_bytes_until_nul( + &layer_properties.description, + ) + .unwrap() + .to_owned(), + validation_layer_spec_version: layer_properties.spec_version, + }); + } else { + log::warn!( + "InstanceFlags::VALIDATION requested, but unable to find layer: {}", + validation_layer_name.to_string_lossy() + ); } - - // Only keep available layers. - layers.retain(|&layer| { - if instance_layers.iter().any(|inst_layer| { - cstr_from_bytes_until_nul(&inst_layer.layer_name) == Some(layer) - }) { - true - } else { - log::warn!("Unable to find layer: {}", layer.to_string_lossy()); - false - } - }); - layers - }; + } #[cfg(target_os = "android")] let android_sdk_version = { @@ -619,6 +666,7 @@ impl crate::Instance for super::Instance { vk_instance, driver_api_version, android_sdk_version, + debug_callback_user_data, extensions, desc.flags, has_nv_optimus, diff --git a/third_party/rust/wgpu-hal/src/vulkan/mod.rs b/third_party/rust/wgpu-hal/src/vulkan/mod.rs index 27200dc4e015..6bea1433593e 100644 --- a/third_party/rust/wgpu-hal/src/vulkan/mod.rs +++ b/third_party/rust/wgpu-hal/src/vulkan/mod.rs @@ -75,6 +75,27 @@ impl crate::Api for Api { struct DebugUtils { extension: ext::DebugUtils, messenger: vk::DebugUtilsMessengerEXT, + + /// Owning pointer to the debug messenger callback user data. + /// + /// `InstanceShared::drop` destroys the debug messenger before + /// dropping this, so the callback should never receive a dangling + /// user data pointer. + #[allow(dead_code)] + callback_data: Box, +} + +/// User data needed by `instance::debug_utils_messenger_callback`. +/// +/// When we create the [`vk::DebugUtilsMessengerEXT`], the `pUserData` +/// pointer refers to one of these values. +#[derive(Debug)] +pub struct DebugUtilsMessengerUserData { + /// Validation layer description, from `vk::LayerProperties`. + validation_layer_description: std::ffi::CString, + + /// Validation layer specification version, from `vk::LayerProperties`. + validation_layer_spec_version: u32, } pub struct InstanceShared { diff --git a/third_party/rust/wgpu-types/.cargo-checksum.json b/third_party/rust/wgpu-types/.cargo-checksum.json index 40c31ddcd561..6c0e3f129a79 100644 --- a/third_party/rust/wgpu-types/.cargo-checksum.json +++ b/third_party/rust/wgpu-types/.cargo-checksum.json @@ -1 +1 @@ -{"files":{"Cargo.toml":"d7e3d65db826d7d01aa7cc071e6d6a08a77cc9dfc9b9b95607fe7df6e53c3c35","LICENSE.APACHE":"a6cba85bc92e0cff7a450b1d873c0eaa2e9fc96bf472df0247a26bec77bf3ff9","LICENSE.MIT":"c7fea58d1cfe49634cd92e54fc10a9d871f4b275321a4cd8c09e449122caaeb4","src/assertions.rs":"3fe98027aa73970c8ab7874a3e13dbfd6faa87df2081beb5c83aeec4c60f372f","src/lib.rs":"e7022ccb7c678ccc727821138398788c33fc483147bec2332bbd90900663200f","src/math.rs":"4d03039736dd6926feb139bc68734cb59df34ede310427bbf059e5c925e0af3b"},"package":null} \ No newline at end of file +{"files":{"Cargo.toml":"926187278f8bd476f1605c0a7b5a9f6e11fe499f1793c06860370f5b197bcd9b","LICENSE.APACHE":"a6cba85bc92e0cff7a450b1d873c0eaa2e9fc96bf472df0247a26bec77bf3ff9","LICENSE.MIT":"c7fea58d1cfe49634cd92e54fc10a9d871f4b275321a4cd8c09e449122caaeb4","src/assertions.rs":"3fe98027aa73970c8ab7874a3e13dbfd6faa87df2081beb5c83aeec4c60f372f","src/lib.rs":"264f1fddc89dc273d6942a36c5240797835f8132a54e189ba7985ca56eff597a","src/math.rs":"4d03039736dd6926feb139bc68734cb59df34ede310427bbf059e5c925e0af3b"},"package":null} \ No newline at end of file diff --git a/third_party/rust/wgpu-types/Cargo.toml b/third_party/rust/wgpu-types/Cargo.toml index 23e5082cc251..50b4829c34fa 100644 --- a/third_party/rust/wgpu-types/Cargo.toml +++ b/third_party/rust/wgpu-types/Cargo.toml @@ -56,7 +56,7 @@ strict_asserts = [] trace = ["serde"] [target."cfg(target_arch = \"wasm32\")".dependencies] -js-sys = "0.3.60" +js-sys = "0.3.63" [target."cfg(target_arch = \"wasm32\")".dependencies.web-sys] version = "0.3.60" diff --git a/third_party/rust/wgpu-types/src/lib.rs b/third_party/rust/wgpu-types/src/lib.rs index 445931a6e918..7ce5652380d3 100644 --- a/third_party/rust/wgpu-types/src/lib.rs +++ b/third_party/rust/wgpu-types/src/lib.rs @@ -111,6 +111,21 @@ pub enum Backend { BrowserWebGpu = 6, } +impl Backend { + /// Returns the string name of the backend. + pub fn to_str(self) -> &'static str { + match self { + Backend::Empty => "empty", + Backend::Vulkan => "vulkan", + Backend::Metal => "metal", + Backend::Dx12 => "dx12", + Backend::Dx11 => "dx11", + Backend::Gl => "gl", + Backend::BrowserWebGpu => "webgpu", + } + } +} + /// Power Preference when choosing a physical adapter. /// /// Corresponds to [WebGPU `GPUPowerPreference`]( @@ -844,7 +859,7 @@ pub struct Limits { pub max_samplers_per_shader_stage: u32, /// Amount of storage buffers visible in a single shader stage. Defaults to 8. Higher is "better". pub max_storage_buffers_per_shader_stage: u32, - /// Amount of storage textures visible in a single shader stage. Defaults to 8. Higher is "better". + /// Amount of storage textures visible in a single shader stage. Defaults to 4. Higher is "better". pub max_storage_textures_per_shader_stage: u32, /// Amount of uniform buffers visible in a single shader stage. Defaults to 12. Higher is "better". pub max_uniform_buffers_per_shader_stage: u32, @@ -3025,6 +3040,92 @@ impl TextureFormat { } } + /// Returns the number of components this format has. + pub fn components(&self) -> u8 { + self.components_with_aspect(TextureAspect::All) + } + + /// Returns the number of components this format has taking into account the `aspect`. + /// + /// The `aspect` is only relevant for combined depth-stencil formats. + pub fn components_with_aspect(&self, aspect: TextureAspect) -> u8 { + match *self { + Self::R8Unorm + | Self::R8Snorm + | Self::R8Uint + | Self::R8Sint + | Self::R16Unorm + | Self::R16Snorm + | Self::R16Uint + | Self::R16Sint + | Self::R16Float + | Self::R32Uint + | Self::R32Sint + | Self::R32Float => 1, + + Self::Rg8Unorm + | Self::Rg8Snorm + | Self::Rg8Uint + | Self::Rg8Sint + | Self::Rg16Unorm + | Self::Rg16Snorm + | Self::Rg16Uint + | Self::Rg16Sint + | Self::Rg16Float + | Self::Rg32Uint + | Self::Rg32Sint + | Self::Rg32Float => 2, + + Self::Rgba8Unorm + | Self::Rgba8UnormSrgb + | Self::Rgba8Snorm + | Self::Rgba8Uint + | Self::Rgba8Sint + | Self::Bgra8Unorm + | Self::Bgra8UnormSrgb + | Self::Rgba16Unorm + | Self::Rgba16Snorm + | Self::Rgba16Uint + | Self::Rgba16Sint + | Self::Rgba16Float + | Self::Rgba32Uint + | Self::Rgba32Sint + | Self::Rgba32Float => 4, + + Self::Rgb9e5Ufloat | Self::Rg11b10Float => 3, + Self::Rgb10a2Unorm => 4, + + Self::Stencil8 | Self::Depth16Unorm | Self::Depth24Plus | Self::Depth32Float => 1, + + Self::Depth24PlusStencil8 | Self::Depth32FloatStencil8 => match aspect { + TextureAspect::All => 2, + TextureAspect::DepthOnly | TextureAspect::StencilOnly => 1, + }, + + Self::Bc4RUnorm | Self::Bc4RSnorm => 1, + Self::Bc5RgUnorm | Self::Bc5RgSnorm => 2, + Self::Bc6hRgbUfloat | Self::Bc6hRgbFloat => 3, + Self::Bc1RgbaUnorm + | Self::Bc1RgbaUnormSrgb + | Self::Bc2RgbaUnorm + | Self::Bc2RgbaUnormSrgb + | Self::Bc3RgbaUnorm + | Self::Bc3RgbaUnormSrgb + | Self::Bc7RgbaUnorm + | Self::Bc7RgbaUnormSrgb => 4, + + Self::EacR11Unorm | Self::EacR11Snorm => 1, + Self::EacRg11Unorm | Self::EacRg11Snorm => 2, + Self::Etc2Rgb8Unorm | Self::Etc2Rgb8UnormSrgb => 3, + Self::Etc2Rgb8A1Unorm + | Self::Etc2Rgb8A1UnormSrgb + | Self::Etc2Rgba8Unorm + | Self::Etc2Rgba8UnormSrgb => 4, + + Self::Astc { .. } => 4, + } + } + /// Strips the `Srgb` suffix from the given texture format. pub fn remove_srgb_suffix(&self) -> TextureFormat { match *self { @@ -4276,7 +4377,7 @@ impl_bitflags!(BufferUsages); pub struct BufferDescriptor { /// Debug label of a buffer. This will show up in graphics debuggers for easy identification. pub label: L, - /// Size of a buffer. + /// Size of a buffer, in bytes. pub size: BufferAddress, /// Usages of a buffer. If the buffer is used in any way that isn't specified here, the operation /// will panic. @@ -5196,11 +5297,16 @@ pub struct RenderBundleDepthStencil { /// /// This must match the [`RenderPassDepthStencilAttachment::depth_ops`] of the renderpass this render bundle is executed in. /// If depth_ops is `Some(..)` this must be false. If it is `None` this must be true. + /// + /// [`RenderPassDepthStencilAttachment::depth_ops`]: ../wgpu/struct.RenderPassDepthStencilAttachment.html#structfield.depth_ops pub depth_read_only: bool, + /// If the stencil aspect of the depth stencil attachment is going to be written to. /// /// This must match the [`RenderPassDepthStencilAttachment::stencil_ops`] of the renderpass this render bundle is executed in. /// If depth_ops is `Some(..)` this must be false. If it is `None` this must be true. + /// + /// [`RenderPassDepthStencilAttachment::stencil_ops`]: ../wgpu/struct.RenderPassDepthStencilAttachment.html#structfield.stencil_ops pub stencil_read_only: bool, } @@ -5531,18 +5637,41 @@ pub enum BindingType { Buffer { /// Sub-type of the buffer binding. ty: BufferBindingType, + /// Indicates that the binding has a dynamic offset. /// - /// One offset must be passed to [`RenderPass::set_bind_group`][RPsbg] for each dynamic - /// binding in increasing order of binding number. + /// One offset must be passed to [`RenderPass::set_bind_group`][RPsbg] + /// for each dynamic binding in increasing order of binding number. /// /// [RPsbg]: ../wgpu/struct.RenderPass.html#method.set_bind_group #[cfg_attr(any(feature = "trace", feature = "replay"), serde(default))] has_dynamic_offset: bool, - /// Minimum size of the corresponding `BufferBinding` required to match this entry. - /// When pipeline is created, the size has to cover at least the corresponding structure in the shader - /// plus one element of the unbound array, which can only be last in the structure. - /// If `None`, the check is performed at draw call time instead of pipeline and bind group creation. + + /// The minimum size for a [`BufferBinding`] matching this entry, in bytes. + /// + /// If this is `Some(size)`: + /// + /// - When calling [`create_bind_group`], the resource at this bind point + /// must be a [`BindingResource::Buffer`] whose effective size is at + /// least `size`. + /// + /// - When calling [`create_render_pipeline`] or [`create_compute_pipeline`], + /// `size` must be at least the [minimum buffer binding size] for the + /// shader module global at this bind point: large enough to hold the + /// global's value, along with one element of a trailing runtime-sized + /// array, if present. + /// + /// If this is `None`: + /// + /// - Each draw or dispatch command checks that the buffer range at this + /// bind point satisfies the [minimum buffer binding size]. + /// + /// [`BufferBinding`]: ../wgpu/struct.BufferBinding.html + /// [`create_bind_group`]: ../wgpu/struct.Device.html#method.create_bind_group + /// [`BindingResource::Buffer`]: ../wgpu/enum.BindingResource.html#variant.Buffer + /// [minimum buffer binding size]: https://www.w3.org/TR/webgpu/#minimum-buffer-binding-size + /// [`create_render_pipeline`]: ../wgpu/struct.Device.html#method.create_render_pipeline + /// [`create_compute_pipeline`]: ../wgpu/struct.Device.html#method.create_compute_pipeline #[cfg_attr(any(feature = "trace", feature = "replay"), serde(default))] min_binding_size: Option, }, @@ -5747,7 +5876,7 @@ pub enum ExternalImageSource { HTMLCanvasElement(web_sys::HtmlCanvasElement), /// Copy from a off-screen canvas. /// - /// Requies [`DownlevelFlags::EXTERNAL_TEXTURE_OFFSCREEN_CANVAS`] + /// Requies [`DownlevelFlags::UNRESTRICTED_EXTERNAL_TEXTURE_COPIES`] OffscreenCanvas(web_sys::OffscreenCanvas), }