Bug 1923913 - build(webgpu): update WGPU to d70ef62e9e0683789f745c6a4354495f39354c15 r=webgpu-reviewers,supply-chain-reviewers,teoxoy

Differential Revision: https://phabricator.services.mozilla.com/D225272
This commit is contained in:
Erich Gubler 2024-10-15 19:48:12 +00:00
parent 7a485af13f
commit 9d9039f9a0
28 changed files with 666 additions and 681 deletions

View File

@ -25,9 +25,9 @@ git = "https://github.com/franziskuskiefer/cose-rust"
rev = "43c22248d136c8b38fe42ea709d08da6355cf04b"
replace-with = "vendored-sources"
[source."git+https://github.com/gfx-rs/wgpu?rev=ee0d1703e5f4a267ce9b87d50b824190b45b5a79"]
[source."git+https://github.com/gfx-rs/wgpu?rev=d70ef62e9e0683789f745c6a4354495f39354c15"]
git = "https://github.com/gfx-rs/wgpu"
rev = "ee0d1703e5f4a267ce9b87d50b824190b45b5a79"
rev = "d70ef62e9e0683789f745c6a4354495f39354c15"
replace-with = "vendored-sources"
[source."git+https://github.com/hsivonen/any_all_workaround?rev=7fb1b7034c9f172aade21ee1c8554e8d8a48af80"]

8
Cargo.lock generated
View File

@ -4217,7 +4217,7 @@ checksum = "a2983372caf4480544083767bf2d27defafe32af49ab4df3a0b7fc90793a3664"
[[package]]
name = "naga"
version = "22.0.0"
source = "git+https://github.com/gfx-rs/wgpu?rev=ee0d1703e5f4a267ce9b87d50b824190b45b5a79#ee0d1703e5f4a267ce9b87d50b824190b45b5a79"
source = "git+https://github.com/gfx-rs/wgpu?rev=d70ef62e9e0683789f745c6a4354495f39354c15#d70ef62e9e0683789f745c6a4354495f39354c15"
dependencies = [
"arrayvec",
"bit-set",
@ -7014,7 +7014,7 @@ dependencies = [
[[package]]
name = "wgpu-core"
version = "22.0.0"
source = "git+https://github.com/gfx-rs/wgpu?rev=ee0d1703e5f4a267ce9b87d50b824190b45b5a79#ee0d1703e5f4a267ce9b87d50b824190b45b5a79"
source = "git+https://github.com/gfx-rs/wgpu?rev=d70ef62e9e0683789f745c6a4354495f39354c15#d70ef62e9e0683789f745c6a4354495f39354c15"
dependencies = [
"arrayvec",
"bit-vec",
@ -7039,7 +7039,7 @@ dependencies = [
[[package]]
name = "wgpu-hal"
version = "22.0.0"
source = "git+https://github.com/gfx-rs/wgpu?rev=ee0d1703e5f4a267ce9b87d50b824190b45b5a79#ee0d1703e5f4a267ce9b87d50b824190b45b5a79"
source = "git+https://github.com/gfx-rs/wgpu?rev=d70ef62e9e0683789f745c6a4354495f39354c15#d70ef62e9e0683789f745c6a4354495f39354c15"
dependencies = [
"android_system_properties",
"arrayvec",
@ -7078,7 +7078,7 @@ dependencies = [
[[package]]
name = "wgpu-types"
version = "22.0.0"
source = "git+https://github.com/gfx-rs/wgpu?rev=ee0d1703e5f4a267ce9b87d50b824190b45b5a79#ee0d1703e5f4a267ce9b87d50b824190b45b5a79"
source = "git+https://github.com/gfx-rs/wgpu?rev=d70ef62e9e0683789f745c6a4354495f39354c15#d70ef62e9e0683789f745c6a4354495f39354c15"
dependencies = [
"bitflags 2.6.0",
"js-sys",

View File

@ -17,7 +17,7 @@ default = []
[dependencies.wgc]
package = "wgpu-core"
git = "https://github.com/gfx-rs/wgpu"
rev = "ee0d1703e5f4a267ce9b87d50b824190b45b5a79"
rev = "d70ef62e9e0683789f745c6a4354495f39354c15"
# TODO: remove the replay feature on the next update containing https://github.com/gfx-rs/wgpu/pull/5182
features = ["serde", "replay", "trace", "strict_asserts", "wgsl", "api_log_info"]
@ -26,32 +26,32 @@ features = ["serde", "replay", "trace", "strict_asserts", "wgsl", "api_log_info"
[target.'cfg(any(target_os = "macos", target_os = "ios"))'.dependencies.wgc]
package = "wgpu-core"
git = "https://github.com/gfx-rs/wgpu"
rev = "ee0d1703e5f4a267ce9b87d50b824190b45b5a79"
rev = "d70ef62e9e0683789f745c6a4354495f39354c15"
features = ["metal"]
# We want the wgpu-core Direct3D backends on Windows.
[target.'cfg(windows)'.dependencies.wgc]
package = "wgpu-core"
git = "https://github.com/gfx-rs/wgpu"
rev = "ee0d1703e5f4a267ce9b87d50b824190b45b5a79"
rev = "d70ef62e9e0683789f745c6a4354495f39354c15"
features = ["dx12"]
# We want the wgpu-core Vulkan backend on Linux and Windows.
[target.'cfg(any(windows, all(unix, not(any(target_os = "macos", target_os = "ios")))))'.dependencies.wgc]
package = "wgpu-core"
git = "https://github.com/gfx-rs/wgpu"
rev = "ee0d1703e5f4a267ce9b87d50b824190b45b5a79"
rev = "d70ef62e9e0683789f745c6a4354495f39354c15"
features = ["vulkan"]
[dependencies.wgt]
package = "wgpu-types"
git = "https://github.com/gfx-rs/wgpu"
rev = "ee0d1703e5f4a267ce9b87d50b824190b45b5a79"
rev = "d70ef62e9e0683789f745c6a4354495f39354c15"
[dependencies.wgh]
package = "wgpu-hal"
git = "https://github.com/gfx-rs/wgpu"
rev = "ee0d1703e5f4a267ce9b87d50b824190b45b5a79"
rev = "d70ef62e9e0683789f745c6a4354495f39354c15"
features = ["oom_panic", "device_lost_panic", "internal_error_panic"]
[target.'cfg(windows)'.dependencies]

View File

@ -20,11 +20,11 @@ origin:
# Human-readable identifier for this version/release
# Generally "version NNN", "tag SSS", "bookmark SSS"
release: ee0d1703e5f4a267ce9b87d50b824190b45b5a79 (Fri Oct 4 13:21:59 2024 -0400).
release: d70ef62e9e0683789f745c6a4354495f39354c15 (2024-10-10T15:24:49Z).
# Revision to pull in
# Must be a long or short commit SHA (long preferred)
revision: ee0d1703e5f4a267ce9b87d50b824190b45b5a79
revision: d70ef62e9e0683789f745c6a4354495f39354c15
license: ['MIT', 'Apache-2.0']

View File

@ -3254,11 +3254,11 @@ delta = "0.20.0 -> 22.0.0"
[[audits.naga]]
who = [
"Teodor Tanasoaia <ttanasoaia@mozilla.com>",
"Erich Gubler <erichdongubler@gmail.com>",
"Jim Blandy <jimb@red-bean.com>",
"Erich Gubler <erichdongubler@gmail.com>",
]
criteria = "safe-to-deploy"
delta = "22.0.0 -> 22.0.0@git:ee0d1703e5f4a267ce9b87d50b824190b45b5a79"
delta = "22.0.0 -> 22.0.0@git:d70ef62e9e0683789f745c6a4354495f39354c15"
importable = false
[[audits.net2]]
@ -5308,11 +5308,11 @@ delta = "0.20.0 -> 22.0.0"
[[audits.wgpu-core]]
who = [
"Teodor Tanasoaia <ttanasoaia@mozilla.com>",
"Erich Gubler <erichdongubler@gmail.com>",
"Jim Blandy <jimb@red-bean.com>",
"Erich Gubler <erichdongubler@gmail.com>",
]
criteria = "safe-to-deploy"
delta = "22.0.0 -> 22.0.0@git:ee0d1703e5f4a267ce9b87d50b824190b45b5a79"
delta = "22.0.0 -> 22.0.0@git:d70ef62e9e0683789f745c6a4354495f39354c15"
importable = false
[[audits.wgpu-hal]]
@ -5381,11 +5381,11 @@ delta = "0.20.0 -> 22.0.0"
[[audits.wgpu-hal]]
who = [
"Teodor Tanasoaia <ttanasoaia@mozilla.com>",
"Erich Gubler <erichdongubler@gmail.com>",
"Jim Blandy <jimb@red-bean.com>",
"Erich Gubler <erichdongubler@gmail.com>",
]
criteria = "safe-to-deploy"
delta = "22.0.0 -> 22.0.0@git:ee0d1703e5f4a267ce9b87d50b824190b45b5a79"
delta = "22.0.0 -> 22.0.0@git:d70ef62e9e0683789f745c6a4354495f39354c15"
importable = false
[[audits.wgpu-types]]
@ -5454,11 +5454,11 @@ delta = "0.20.0 -> 22.0.0"
[[audits.wgpu-types]]
who = [
"Teodor Tanasoaia <ttanasoaia@mozilla.com>",
"Erich Gubler <erichdongubler@gmail.com>",
"Jim Blandy <jimb@red-bean.com>",
"Erich Gubler <erichdongubler@gmail.com>",
]
criteria = "safe-to-deploy"
delta = "22.0.0 -> 22.0.0@git:ee0d1703e5f4a267ce9b87d50b824190b45b5a79"
delta = "22.0.0 -> 22.0.0@git:d70ef62e9e0683789f745c6a4354495f39354c15"
importable = false
[[audits.whatsys]]

File diff suppressed because one or more lines are too long

View File

@ -579,7 +579,7 @@ impl<'a, W> Writer<'a, W> {
crate::BuiltIn::ViewIndex => {
self.features.request(Features::MULTI_VIEW)
}
crate::BuiltIn::InstanceIndex => {
crate::BuiltIn::InstanceIndex | crate::BuiltIn::DrawID => {
self.features.request(Features::INSTANCE_INDEX)
}
_ => {}

View File

@ -238,7 +238,7 @@ bitflags::bitflags! {
/// additional functions on shadows and arrays of shadows.
const TEXTURE_SHADOW_LOD = 0x2;
/// Supports ARB_shader_draw_parameters on the host, which provides
/// support for `gl_BaseInstanceARB`, `gl_BaseVertexARB`, and `gl_DrawIDARB`.
/// support for `gl_BaseInstanceARB`, `gl_BaseVertexARB`, `gl_DrawIDARB`, and `gl_DrawID`.
const DRAW_PARAMETERS = 0x4;
/// Include unused global variables, constants and functions. By default the output will exclude
/// global variables that are not used in the specified entrypoint (including indirect use),
@ -4719,6 +4719,7 @@ const fn glsl_built_in(built_in: crate::BuiltIn, options: VaryingOptions) -> &'s
}
Bi::PointSize => "gl_PointSize",
Bi::VertexIndex => "uint(gl_VertexID)",
Bi::DrawID => "gl_DrawID",
// fragment
Bi::FragDepth => "gl_FragDepth",
Bi::PointCoord => "gl_PointCoord",

View File

@ -178,7 +178,7 @@ impl crate::BuiltIn {
Self::BaseInstance | Self::BaseVertex | Self::WorkGroupSize => {
return Err(Error::Unimplemented(format!("builtin {self:?}")))
}
Self::PointSize | Self::ViewIndex | Self::PointCoord => {
Self::PointSize | Self::ViewIndex | Self::PointCoord | Self::DrawID => {
return Err(Error::Custom(format!("Unsupported builtin {self:?}")))
}
})

View File

@ -568,7 +568,7 @@ impl ResolvedBinding {
Bi::SubgroupId => "simdgroup_index_in_threadgroup",
Bi::SubgroupSize => "threads_per_simdgroup",
Bi::SubgroupInvocationId => "thread_index_in_simdgroup",
Bi::CullDistance | Bi::ViewIndex => {
Bi::CullDistance | Bi::ViewIndex | Bi::DrawID => {
return Err(Error::UnsupportedBuiltIn(built_in))
}
};

View File

@ -33,6 +33,7 @@ const RAY_QUERY_FIELD_INTERSECTION: &str = "intersection";
const RAY_QUERY_FIELD_READY: &str = "ready";
const RAY_QUERY_FUN_MAP_INTERSECTION: &str = "_map_intersection_type";
pub(crate) const ATOMIC_COMP_EXCH_FUNCTION: &str = "naga_atomic_compare_exchange_weak_explicit";
pub(crate) const MODF_FUNCTION: &str = "naga_modf";
pub(crate) const FREXP_FUNCTION: &str = "naga_frexp";
@ -1279,42 +1280,6 @@ impl<W: Write> Writer<W> {
Ok(())
}
fn put_atomic_operation(
&mut self,
pointer: Handle<crate::Expression>,
key: &str,
value: Handle<crate::Expression>,
context: &ExpressionContext,
) -> BackendResult {
// If the pointer we're passing to the atomic operation needs to be conditional
// for `ReadZeroSkipWrite`, the condition needs to *surround* the atomic op, and
// the pointer operand should be unchecked.
let policy = context.choose_bounds_check_policy(pointer);
let checked = policy == index::BoundsCheckPolicy::ReadZeroSkipWrite
&& self.put_bounds_checks(pointer, context, back::Level(0), "")?;
// If requested and successfully put bounds checks, continue the ternary expression.
if checked {
write!(self.out, " ? ")?;
}
write!(
self.out,
"{NAMESPACE}::atomic_{key}_explicit({ATOMIC_REFERENCE}"
)?;
self.put_access_chain(pointer, policy, context)?;
write!(self.out, ", ")?;
self.put_expression(value, context, true)?;
write!(self.out, ", {NAMESPACE}::memory_order_relaxed)")?;
// Finish the ternary expression.
if checked {
write!(self.out, " : DefaultConstructible()")?;
}
Ok(())
}
/// Emit code for the arithmetic expression of the dot product.
///
fn put_dot_product(
@ -3182,24 +3147,65 @@ impl<W: Write> Writer<W> {
value,
result,
} => {
let context = &context.expression;
// This backend supports `SHADER_INT64_ATOMIC_MIN_MAX` but not
// `SHADER_INT64_ATOMIC_ALL_OPS`, so we can assume that if `result` is
// `Some`, we are not operating on a 64-bit value, and that if we are
// operating on a 64-bit value, `result` is `None`.
write!(self.out, "{level}")?;
let fun_str = if let Some(result) = result {
let fun_key = if let Some(result) = result {
let res_name = Baked(result).to_string();
self.start_baking_expression(result, &context.expression, &res_name)?;
self.start_baking_expression(result, context, &res_name)?;
self.named_expressions.insert(result, res_name);
fun.to_msl()?
} else if context.expression.resolve_type(value).scalar_width() == Some(8) {
fun.to_msl()
} else if context.resolve_type(value).scalar_width() == Some(8) {
fun.to_msl_64_bit()?
} else {
fun.to_msl()?
fun.to_msl()
};
self.put_atomic_operation(pointer, fun_str, value, &context.expression)?;
// done
// If the pointer we're passing to the atomic operation needs to be conditional
// for `ReadZeroSkipWrite`, the condition needs to *surround* the atomic op, and
// the pointer operand should be unchecked.
let policy = context.choose_bounds_check_policy(pointer);
let checked = policy == index::BoundsCheckPolicy::ReadZeroSkipWrite
&& self.put_bounds_checks(pointer, context, back::Level(0), "")?;
// If requested and successfully put bounds checks, continue the ternary expression.
if checked {
write!(self.out, " ? ")?;
}
// Put the atomic function invocation.
match *fun {
crate::AtomicFunction::Exchange { compare: Some(cmp) } => {
write!(self.out, "{ATOMIC_COMP_EXCH_FUNCTION}({ATOMIC_REFERENCE}")?;
self.put_access_chain(pointer, policy, context)?;
write!(self.out, ", ")?;
self.put_expression(cmp, context, true)?;
write!(self.out, ", ")?;
self.put_expression(value, context, true)?;
write!(self.out, ")")?;
}
_ => {
write!(
self.out,
"{NAMESPACE}::atomic_{fun_key}_explicit({ATOMIC_REFERENCE}"
)?;
self.put_access_chain(pointer, policy, context)?;
write!(self.out, ", ")?;
self.put_expression(value, context, true)?;
write!(self.out, ", {NAMESPACE}::memory_order_relaxed)")?;
}
}
// Finish the ternary expression.
if checked {
write!(self.out, " : DefaultConstructible()")?;
}
// Done
writeln!(self.out, ";")?;
}
crate::Statement::WorkGroupUniformLoad { pointer, result } => {
@ -3827,7 +3833,33 @@ impl<W: Write> Writer<W> {
}}"
)?;
}
&crate::PredeclaredType::AtomicCompareExchangeWeakResult { .. } => {}
&crate::PredeclaredType::AtomicCompareExchangeWeakResult(scalar) => {
let arg_type_name = scalar.to_msl_name();
let called_func_name = "atomic_compare_exchange_weak_explicit";
let defined_func_name = ATOMIC_COMP_EXCH_FUNCTION;
let struct_name = &self.names[&NameKey::Type(*struct_ty)];
writeln!(self.out)?;
for address_space_name in ["device", "threadgroup"] {
writeln!(
self.out,
"\
template <typename A>
{struct_name} {defined_func_name}(
{address_space_name} A *atomic_ptr,
{arg_type_name} cmp,
{arg_type_name} v
) {{
bool swapped = {NAMESPACE}::{called_func_name}(
atomic_ptr, &cmp, v,
metal::memory_order_relaxed, metal::memory_order_relaxed
);
return {struct_name}{{cmp, swapped}};
}}"
)?;
}
}
}
}
@ -6065,8 +6097,8 @@ fn test_stack_size() {
}
impl crate::AtomicFunction {
fn to_msl(self) -> Result<&'static str, Error> {
Ok(match self {
const fn to_msl(self) -> &'static str {
match self {
Self::Add => "fetch_add",
Self::Subtract => "fetch_sub",
Self::And => "fetch_and",
@ -6075,10 +6107,8 @@ impl crate::AtomicFunction {
Self::Min => "fetch_min",
Self::Max => "fetch_max",
Self::Exchange { compare: None } => "exchange",
Self::Exchange { compare: Some(_) } => Err(Error::FeatureNotImplemented(
"atomic CompareExchange".to_string(),
))?,
})
Self::Exchange { compare: Some(_) } => ATOMIC_COMP_EXCH_FUNCTION,
}
}
fn to_msl_64_bit(self) -> Result<&'static str, Error> {

View File

@ -3,8 +3,8 @@ Implementations for `BlockContext` methods.
*/
use super::{
helpers, index::BoundsCheckResult, make_local, selection::Selection, Block, BlockContext,
Dimension, Error, Instruction, LocalType, LookupType, ResultMember, Writer, WriterFlags,
helpers, index::BoundsCheckResult, selection::Selection, Block, BlockContext, Dimension, Error,
Instruction, LocalType, LookupType, NumericType, ResultMember, Writer, WriterFlags,
};
use crate::{arena::Handle, proc::TypeResolution, Statement};
use spirv::Word;
@ -105,10 +105,9 @@ impl Writer {
position_id: Word,
body: &mut Vec<Instruction>,
) -> Result<(), Error> {
let float_ptr_type_id = self.get_type_id(LookupType::Local(LocalType::Value {
vector_size: None,
scalar: crate::Scalar::F32,
pointer_space: Some(spirv::StorageClass::Output),
let float_ptr_type_id = self.get_type_id(LookupType::Local(LocalType::LocalPointer {
base: NumericType::Scalar(crate::Scalar::F32),
class: spirv::StorageClass::Output,
}));
let index_y_id = self.get_index_constant(1);
let access_id = self.id_gen.next();
@ -119,11 +118,9 @@ impl Writer {
&[index_y_id],
));
let float_type_id = self.get_type_id(LookupType::Local(LocalType::Value {
vector_size: None,
scalar: crate::Scalar::F32,
pointer_space: None,
}));
let float_type_id = self.get_type_id(LookupType::Local(LocalType::Numeric(
NumericType::Scalar(crate::Scalar::F32),
)));
let load_id = self.id_gen.next();
body.push(Instruction::load(float_type_id, load_id, access_id, None));
@ -145,11 +142,9 @@ impl Writer {
frag_depth_id: Word,
body: &mut Vec<Instruction>,
) -> Result<(), Error> {
let float_type_id = self.get_type_id(LookupType::Local(LocalType::Value {
vector_size: None,
scalar: crate::Scalar::F32,
pointer_space: None,
}));
let float_type_id = self.get_type_id(LookupType::Local(LocalType::Numeric(
NumericType::Scalar(crate::Scalar::F32),
)));
let zero_scalar_id = self.get_constant_scalar(crate::Literal::F32(0.0));
let one_scalar_id = self.get_constant_scalar(crate::Literal::F32(1.0));
@ -217,32 +212,6 @@ impl Writer {
}
impl<'w> BlockContext<'w> {
/// Decide whether to put off emitting instructions for `expr_handle`.
///
/// We would like to gather together chains of `Access` and `AccessIndex`
/// Naga expressions into a single `OpAccessChain` SPIR-V instruction. To do
/// this, we don't generate instructions for these exprs when we first
/// encounter them. Their ids in `self.writer.cached.ids` are left as zero. Then,
/// once we encounter a `Load` or `Store` expression that actually needs the
/// chain's value, we call `write_expression_pointer` to handle the whole
/// thing in one fell swoop.
fn is_intermediate(&self, expr_handle: Handle<crate::Expression>) -> bool {
match self.ir_function.expressions[expr_handle] {
crate::Expression::GlobalVariable(handle) => {
self.ir_module.global_variables[handle].space != crate::AddressSpace::Handle
}
crate::Expression::LocalVariable(_) => true,
crate::Expression::FunctionArgument(index) => {
let arg = &self.ir_function.arguments[index as usize];
self.ir_module.types[arg.ty].inner.pointer_space().is_some()
}
// The chain rule: if this `Access...`'s `base` operand was
// previously omitted, then omit this one, too.
_ => self.cached.ids[expr_handle] == 0,
}
}
/// Cache an expression for a value.
pub(super) fn cache_expression_value(
&mut self,
@ -313,18 +282,22 @@ impl<'w> BlockContext<'w> {
id
}
}
crate::Expression::Access { base, index: _ } if self.is_intermediate(base) => {
// See `is_intermediate`; we'll handle this later in
// `write_expression_pointer`.
0
}
crate::Expression::Access { base, index } => {
let base_ty_inner = self.fun_info[base].ty.inner_with(&self.ir_module.types);
match *base_ty_inner {
crate::TypeInner::Pointer { .. } | crate::TypeInner::ValuePointer { .. } => {
// When we have a chain of `Access` and `AccessIndex` expressions
// operating on pointers, we want to generate a single
// `OpAccessChain` instruction for the whole chain. Put off
// generating any code for this until we find the `Expression`
// that actually dereferences the pointer.
0
}
crate::TypeInner::Vector { .. } => {
self.write_vector_access(expr_handle, base, index, block)?
}
// Only binding arrays in the Handle address space will take this path (due to `is_intermediate`)
// Only binding arrays in the `Handle` address space will take this
// path, since we handled the `Pointer` case above.
crate::TypeInner::BindingArray {
base: binding_type, ..
} => {
@ -386,7 +359,6 @@ impl<'w> BlockContext<'w> {
}
};
let (id, variable) = self.writer.promote_access_expression_to_variable(
&self.ir_module.types,
result_type_id,
base_id,
base_ty,
@ -410,13 +382,16 @@ impl<'w> BlockContext<'w> {
}
}
}
crate::Expression::AccessIndex { base, index: _ } if self.is_intermediate(base) => {
// See `is_intermediate`; we'll handle this later in
// `write_expression_pointer`.
0
}
crate::Expression::AccessIndex { base, index } => {
match *self.fun_info[base].ty.inner_with(&self.ir_module.types) {
crate::TypeInner::Pointer { .. } | crate::TypeInner::ValuePointer { .. } => {
// When we have a chain of `Access` and `AccessIndex` expressions
// operating on pointers, we want to generate a single
// `OpAccessChain` instruction for the whole chain. Put off
// generating any code for this until we find the `Expression`
// that actually dereferences the pointer.
0
}
crate::TypeInner::Vector { .. }
| crate::TypeInner::Matrix { .. }
| crate::TypeInner::Array { .. }
@ -830,12 +805,8 @@ impl<'w> BlockContext<'w> {
let mut arg2_id = self.writer.get_constant_scalar_with(1, scalar)?;
if let Some(size) = maybe_size {
let ty = LocalType::Value {
vector_size: Some(size),
scalar,
pointer_space: None,
}
.into();
let ty =
LocalType::Numeric(NumericType::Vector { size, scalar }).into();
self.temp_list.clear();
self.temp_list.resize(size as _, arg1_id);
@ -950,12 +921,9 @@ impl<'w> BlockContext<'w> {
&crate::TypeInner::Vector { size, .. },
&crate::TypeInner::Scalar(scalar),
) => {
let selector_type_id =
self.get_type_id(LookupType::Local(LocalType::Value {
vector_size: Some(size),
scalar,
pointer_space: None,
}));
let selector_type_id = self.get_type_id(LookupType::Local(
LocalType::Numeric(NumericType::Vector { size, scalar }),
));
self.temp_list.clear();
self.temp_list.resize(size as usize, arg2_id);
@ -998,12 +966,8 @@ impl<'w> BlockContext<'w> {
Mf::CountTrailingZeros => {
let uint_id = match *arg_ty {
crate::TypeInner::Vector { size, scalar } => {
let ty = LocalType::Value {
vector_size: Some(size),
scalar,
pointer_space: None,
}
.into();
let ty =
LocalType::Numeric(NumericType::Vector { size, scalar }).into();
self.temp_list.clear();
self.temp_list.resize(
@ -1040,12 +1004,8 @@ impl<'w> BlockContext<'w> {
Mf::CountLeadingZeros => {
let (int_type_id, int_id, width) = match *arg_ty {
crate::TypeInner::Vector { size, scalar } => {
let ty = LocalType::Value {
vector_size: Some(size),
scalar,
pointer_space: None,
}
.into();
let ty =
LocalType::Numeric(NumericType::Vector { size, scalar }).into();
self.temp_list.clear();
self.temp_list.resize(
@ -1061,11 +1021,9 @@ impl<'w> BlockContext<'w> {
)
}
crate::TypeInner::Scalar(scalar) => (
self.get_type_id(LookupType::Local(LocalType::Value {
vector_size: None,
scalar,
pointer_space: None,
})),
self.get_type_id(LookupType::Local(LocalType::Numeric(
NumericType::Scalar(scalar),
))),
self.writer
.get_constant_scalar_with(scalar.width * 8 - 1, scalar)?,
scalar.width,
@ -1130,14 +1088,9 @@ impl<'w> BlockContext<'w> {
.writer
.get_constant_scalar(crate::Literal::U32(bit_width as u32));
let u32_type = self.get_type_id(LookupType::Local(LocalType::Value {
vector_size: None,
scalar: crate::Scalar {
kind: crate::ScalarKind::Uint,
width: 4,
},
pointer_space: None,
}));
let u32_type = self.get_type_id(LookupType::Local(LocalType::Numeric(
NumericType::Scalar(crate::Scalar::U32),
)));
// o = min(offset, w)
let offset_id = self.gen_id();
@ -1186,14 +1139,9 @@ impl<'w> BlockContext<'w> {
.writer
.get_constant_scalar(crate::Literal::U32(bit_width as u32));
let u32_type = self.get_type_id(LookupType::Local(LocalType::Value {
vector_size: None,
scalar: crate::Scalar {
kind: crate::ScalarKind::Uint,
width: 4,
},
pointer_space: None,
}));
let u32_type = self.get_type_id(LookupType::Local(LocalType::Numeric(
NumericType::Scalar(crate::Scalar::U32),
)));
// o = min(offset, w)
let offset_id = self.gen_id();
@ -1259,23 +1207,16 @@ impl<'w> BlockContext<'w> {
Mf::Pack4xU8 => (crate::ScalarKind::Uint, false),
_ => unreachable!(),
};
let uint_type_id = self.get_type_id(LookupType::Local(LocalType::Value {
vector_size: None,
scalar: crate::Scalar {
kind: crate::ScalarKind::Uint,
width: 4,
},
pointer_space: None,
}));
let uint_type_id = self.get_type_id(LookupType::Local(LocalType::Numeric(
NumericType::Scalar(crate::Scalar::U32),
)));
let int_type_id = self.get_type_id(LookupType::Local(LocalType::Value {
vector_size: None,
scalar: crate::Scalar {
let int_type_id = self.get_type_id(LookupType::Local(LocalType::Numeric(
NumericType::Scalar(crate::Scalar {
kind: int_type,
width: 4,
},
pointer_space: None,
}));
}),
)));
let mut last_instruction = Instruction::new(spirv::Op::Nop);
@ -1352,24 +1293,17 @@ impl<'w> BlockContext<'w> {
_ => unreachable!(),
};
let sint_type_id = self.get_type_id(LookupType::Local(LocalType::Value {
vector_size: None,
scalar: crate::Scalar {
kind: crate::ScalarKind::Sint,
width: 4,
},
pointer_space: None,
}));
let sint_type_id = self.get_type_id(LookupType::Local(LocalType::Numeric(
NumericType::Scalar(crate::Scalar::I32),
)));
let eight = self.writer.get_constant_scalar(crate::Literal::U32(8));
let int_type_id = self.get_type_id(LookupType::Local(LocalType::Value {
vector_size: None,
scalar: crate::Scalar {
let int_type_id = self.get_type_id(LookupType::Local(LocalType::Numeric(
NumericType::Scalar(crate::Scalar {
kind: int_type,
width: 4,
},
pointer_space: None,
}));
}),
)));
block
.body
.reserve(usize::from(VEC_LENGTH) * 2 + usize::from(is_signed));
@ -1533,11 +1467,10 @@ impl<'w> BlockContext<'w> {
self.writer.get_constant_scalar_with(0, src_scalar)?;
let zero_id = match src_size {
Some(size) => {
let ty = LocalType::Value {
vector_size: Some(size),
let ty = LocalType::Numeric(NumericType::Vector {
size,
scalar: src_scalar,
pointer_space: None,
}
})
.into();
self.temp_list.clear();
@ -1562,11 +1495,10 @@ impl<'w> BlockContext<'w> {
self.writer.get_constant_scalar_with(1, dst_scalar)?;
let (accept_id, reject_id) = match src_size {
Some(size) => {
let ty = LocalType::Value {
vector_size: Some(size),
let ty = LocalType::Numeric(NumericType::Vector {
size,
scalar: dst_scalar,
pointer_space: None,
}
})
.into();
self.temp_list.clear();
@ -1704,12 +1636,12 @@ impl<'w> BlockContext<'w> {
self.temp_list.clear();
self.temp_list.resize(size as usize, condition_id);
let bool_vector_type_id =
self.get_type_id(LookupType::Local(LocalType::Value {
vector_size: Some(size),
let bool_vector_type_id = self.get_type_id(LookupType::Local(
LocalType::Numeric(NumericType::Vector {
size,
scalar: condition_scalar,
pointer_space: None,
}));
}),
));
let id = self.gen_id();
block.body.push(Instruction::composite_construct(
@ -1809,18 +1741,17 @@ impl<'w> BlockContext<'w> {
Some(ty) => ty,
None => LookupType::Handle(ty_handle),
},
TypeResolution::Value(ref inner) => LookupType::Local(make_local(inner).unwrap()),
TypeResolution::Value(ref inner) => {
LookupType::Local(LocalType::from_inner(inner).unwrap())
}
};
let result_type_id = self.get_type_id(result_lookup_ty);
// The id of the boolean `and` of all dynamic bounds checks up to this point. If
// `None`, then we haven't done any dynamic bounds checks yet.
// The id of the boolean `and` of all dynamic bounds checks up to this point.
//
// When we have a chain of bounds checks, we combine them with `OpLogicalAnd`, not
// a short-circuit branch. This means we might do comparisons we don't need to,
// but we expect these checks to almost always succeed, and keeping branches to a
// minimum is essential.
// See `extend_bounds_check_condition_chain` for a full explanation.
let mut accumulated_checks = None;
// Is true if we are accessing into a binding array with a non-uniform index.
let mut is_non_uniform_binding_array = false;
@ -1828,57 +1759,41 @@ impl<'w> BlockContext<'w> {
let root_id = loop {
expr_handle = match self.ir_function.expressions[expr_handle] {
crate::Expression::Access { base, index } => {
if let crate::Expression::GlobalVariable(var_handle) =
self.ir_function.expressions[base]
{
// The access chain needs to be decorated as NonUniform
// see VUID-RuntimeSpirv-NonUniform-06274
let gvar = &self.ir_module.global_variables[var_handle];
if let crate::TypeInner::BindingArray { .. } =
self.ir_module.types[gvar.ty].inner
{
is_non_uniform_binding_array =
self.fun_info[index].uniformity.non_uniform_result.is_some();
}
}
is_non_uniform_binding_array |=
self.is_nonuniform_binding_array_access(base, index);
let index_id = match self.write_bounds_check(base, index, block)? {
BoundsCheckResult::KnownInBounds(known_index) => {
// Even if the index is known, `OpAccessIndex`
// requires expression operands, not literals.
let scalar = crate::Literal::U32(known_index);
self.writer.get_constant_scalar(scalar)
}
BoundsCheckResult::Computed(computed_index_id) => computed_index_id,
BoundsCheckResult::Conditional(comparison_id) => {
match accumulated_checks {
Some(prior_checks) => {
let combined = self.gen_id();
block.body.push(Instruction::binary(
spirv::Op::LogicalAnd,
self.writer.get_bool_type_id(),
combined,
prior_checks,
comparison_id,
));
accumulated_checks = Some(combined);
}
None => {
// Start a fresh chain of checks.
accumulated_checks = Some(comparison_id);
}
}
// Either way, the index to use is unchanged.
self.cached[index]
}
};
let index = crate::proc::index::GuardedIndex::Expression(index);
let index_id =
self.write_access_chain_index(base, index, &mut accumulated_checks, block)?;
self.temp_list.push(index_id);
base
}
crate::Expression::AccessIndex { base, index } => {
let const_id = self.get_index_constant(index);
self.temp_list.push(const_id);
// Decide whether we're indexing a struct (bounds checks
// forbidden) or anything else (bounds checks required).
let mut base_ty = self.fun_info[base].ty.inner_with(&self.ir_module.types);
if let crate::TypeInner::Pointer { base, .. } = *base_ty {
base_ty = &self.ir_module.types[base].inner;
}
let index_id = if let crate::TypeInner::Struct { .. } = *base_ty {
self.get_index_constant(index)
} else {
// `index` is constant, so this can't possibly require
// setting `is_nonuniform_binding_array_access`.
// Even though the index value is statically known, `base`
// may be a runtime-sized array, so we still need to go
// through the bounds check process.
self.write_access_chain_index(
base,
crate::proc::index::GuardedIndex::Known(index),
&mut accumulated_checks,
block,
)?
};
self.temp_list.push(index_id);
base
}
crate::Expression::GlobalVariable(handle) => {
@ -1933,6 +1848,105 @@ impl<'w> BlockContext<'w> {
Ok(expr_pointer)
}
fn is_nonuniform_binding_array_access(
&mut self,
base: Handle<crate::Expression>,
index: Handle<crate::Expression>,
) -> bool {
let crate::Expression::GlobalVariable(var_handle) = self.ir_function.expressions[base]
else {
return false;
};
// The access chain needs to be decorated as NonUniform
// see VUID-RuntimeSpirv-NonUniform-06274
let gvar = &self.ir_module.global_variables[var_handle];
let crate::TypeInner::BindingArray { .. } = self.ir_module.types[gvar.ty].inner else {
return false;
};
self.fun_info[index].uniformity.non_uniform_result.is_some()
}
/// Compute a single index operand to an `OpAccessChain` instruction.
///
/// Given that we are indexing `base` with `index`, apply the appropriate
/// bounds check policies, emitting code to `block` to clamp `index` or
/// determine whether it's in bounds. Return the SPIR-V instruction id of
/// the index value we should actually use.
///
/// Extend `accumulated_checks` to include the results of any needed bounds
/// checks. See [`BlockContext::extend_bounds_check_condition_chain`].
fn write_access_chain_index(
&mut self,
base: Handle<crate::Expression>,
index: crate::proc::index::GuardedIndex,
accumulated_checks: &mut Option<Word>,
block: &mut Block,
) -> Result<Word, Error> {
match self.write_bounds_check(base, index, block)? {
BoundsCheckResult::KnownInBounds(known_index) => {
// Even if the index is known, `OpAccessChain`
// requires expression operands, not literals.
let scalar = crate::Literal::U32(known_index);
Ok(self.writer.get_constant_scalar(scalar))
}
BoundsCheckResult::Computed(computed_index_id) => Ok(computed_index_id),
BoundsCheckResult::Conditional {
condition_id: condition,
index_id: index,
} => {
self.extend_bounds_check_condition_chain(accumulated_checks, condition, block);
// Use the index from the `Access` expression unchanged.
Ok(index)
}
}
}
/// Add a condition to a chain of bounds checks.
///
/// As we build an `OpAccessChain` instruction govered by
/// [`BoundsCheckPolicy::ReadZeroSkipWrite`], we accumulate a chain of
/// dynamic bounds checks, one for each index in the chain, which must all
/// be true for that `OpAccessChain`'s execution to be well-defined. This
/// function adds the boolean instruction id `comparison_id` to `chain`.
///
/// If `chain` is `None`, that means there are no bounds checks in the chain
/// yet. If chain is `Some(id)`, then `id` is the conjunction of all the
/// bounds checks in the chain.
///
/// When we have multiple bounds checks, we combine them with
/// `OpLogicalAnd`, not a short-circuit branch. This means we might do
/// comparisons we don't need to, but we expect these checks to almost
/// always succeed, and keeping branches to a minimum is essential.
///
/// [`BoundsCheckPolicy::ReadZeroSkipWrite`]: crate::proc::BoundsCheckPolicy
fn extend_bounds_check_condition_chain(
&mut self,
chain: &mut Option<Word>,
comparison_id: Word,
block: &mut Block,
) {
match *chain {
Some(ref mut prior_checks) => {
let combined = self.gen_id();
block.body.push(Instruction::binary(
spirv::Op::LogicalAnd,
self.writer.get_bool_type_id(),
combined,
*prior_checks,
comparison_id,
));
*prior_checks = combined;
}
None => {
// Start a fresh chain of checks.
*chain = Some(comparison_id);
}
}
}
/// Build the instructions for matrix - matrix column operations
#[allow(clippy::too_many_arguments)]
fn write_matrix_matrix_column_op(
@ -1949,11 +1963,11 @@ impl<'w> BlockContext<'w> {
) {
self.temp_list.clear();
let vector_type_id = self.get_type_id(LookupType::Local(LocalType::Value {
vector_size: Some(rows),
scalar: crate::Scalar::float(width),
pointer_space: None,
}));
let vector_type_id =
self.get_type_id(LookupType::Local(LocalType::Numeric(NumericType::Vector {
size: rows,
scalar: crate::Scalar::float(width),
})));
for index in 0..columns as u32 {
let column_id_left = self.gen_id();
@ -2655,20 +2669,15 @@ impl<'w> BlockContext<'w> {
crate::AtomicFunction::Exchange { compare: Some(cmp) } => {
let scalar_type_id = match *value_inner {
crate::TypeInner::Scalar(scalar) => {
self.get_type_id(LookupType::Local(LocalType::Value {
vector_size: None,
scalar,
pointer_space: None,
}))
self.get_type_id(LookupType::Local(LocalType::Numeric(
NumericType::Scalar(scalar),
)))
}
_ => unimplemented!(),
};
let bool_type_id =
self.get_type_id(LookupType::Local(LocalType::Value {
vector_size: None,
scalar: crate::Scalar::BOOL,
pointer_space: None,
}));
let bool_type_id = self.get_type_id(LookupType::Local(
LocalType::Numeric(NumericType::Scalar(crate::Scalar::BOOL)),
));
let cas_result_id = self.gen_id();
let equality_result_id = self.gen_id();

View File

@ -4,7 +4,7 @@ Generating SPIR-V for image operations.
use super::{
selection::{MergeTuple, Selection},
Block, BlockContext, Error, IdGenerator, Instruction, LocalType, LookupType,
Block, BlockContext, Error, IdGenerator, Instruction, LocalType, LookupType, NumericType,
};
use crate::arena::Handle;
use spirv::Word;
@ -126,11 +126,10 @@ impl Load {
// the right SPIR-V type for the access instruction here.
let type_id = match image_class {
crate::ImageClass::Depth { .. } => {
ctx.get_type_id(LookupType::Local(LocalType::Value {
vector_size: Some(crate::VectorSize::Quad),
ctx.get_type_id(LookupType::Local(LocalType::Numeric(NumericType::Vector {
size: crate::VectorSize::Quad,
scalar: crate::Scalar::F32,
pointer_space: None,
}))
})))
}
_ => result_type_id,
};
@ -292,15 +291,15 @@ impl<'w> BlockContext<'w> {
// Find the component type of `coordinates`, and figure out the size the
// combined coordinate vector will have.
let (component_scalar, size) = match *inner_ty {
Ti::Scalar(scalar @ crate::Scalar { width: 4, .. }) => (scalar, Some(Vs::Bi)),
Ti::Scalar(scalar @ crate::Scalar { width: 4, .. }) => (scalar, Vs::Bi),
Ti::Vector {
scalar: scalar @ crate::Scalar { width: 4, .. },
size: Vs::Bi,
} => (scalar, Some(Vs::Tri)),
} => (scalar, Vs::Tri),
Ti::Vector {
scalar: scalar @ crate::Scalar { width: 4, .. },
size: Vs::Tri,
} => (scalar, Some(Vs::Quad)),
} => (scalar, Vs::Quad),
Ti::Vector { size: Vs::Quad, .. } => {
return Err(Error::Validation("extending vec4 coordinate"));
}
@ -340,11 +339,9 @@ impl<'w> BlockContext<'w> {
}
};
let reconciled_array_index_id = if let Some(cast) = cast {
let component_ty_id = self.get_type_id(LookupType::Local(LocalType::Value {
vector_size: None,
scalar: component_scalar,
pointer_space: None,
}));
let component_ty_id = self.get_type_id(LookupType::Local(LocalType::Numeric(
NumericType::Scalar(component_scalar),
)));
let reconciled_id = self.gen_id();
block.body.push(Instruction::unary(
cast,
@ -358,11 +355,11 @@ impl<'w> BlockContext<'w> {
};
// Find the SPIR-V type for the combined coordinates/index vector.
let type_id = self.get_type_id(LookupType::Local(LocalType::Value {
vector_size: size,
scalar: component_scalar,
pointer_space: None,
}));
let type_id =
self.get_type_id(LookupType::Local(LocalType::Numeric(NumericType::Vector {
size,
scalar: component_scalar,
})));
// Schmear the coordinates and index together.
let value_id = self.gen_id();
@ -374,7 +371,7 @@ impl<'w> BlockContext<'w> {
Ok(ImageCoordinates {
value_id,
type_id,
size,
size: Some(size),
})
}
@ -529,11 +526,9 @@ impl<'w> BlockContext<'w> {
&[spirv::Capability::ImageQuery],
)?;
let i32_type_id = self.get_type_id(LookupType::Local(LocalType::Value {
vector_size: None,
scalar: crate::Scalar::I32,
pointer_space: None,
}));
let i32_type_id = self.get_type_id(LookupType::Local(LocalType::Numeric(
NumericType::Scalar(crate::Scalar::I32),
)));
// If `level` is `Some`, clamp it to fall within bounds. This must
// happen first, because we'll use it to query the image size for
@ -616,11 +611,9 @@ impl<'w> BlockContext<'w> {
)?;
let bool_type_id = self.writer.get_bool_type_id();
let i32_type_id = self.get_type_id(LookupType::Local(LocalType::Value {
vector_size: None,
scalar: crate::Scalar::I32,
pointer_space: None,
}));
let i32_type_id = self.get_type_id(LookupType::Local(LocalType::Numeric(
NumericType::Scalar(crate::Scalar::I32),
)));
let null_id = access.out_of_bounds_value(self);
@ -683,11 +676,15 @@ impl<'w> BlockContext<'w> {
);
// Compare the coordinates against the bounds.
let coords_bool_type_id = self.get_type_id(LookupType::Local(LocalType::Value {
vector_size: coordinates.size,
scalar: crate::Scalar::BOOL,
pointer_space: None,
}));
let coords_numeric_type = match coordinates.size {
Some(size) => NumericType::Vector {
size,
scalar: crate::Scalar::BOOL,
},
None => NumericType::Scalar(crate::Scalar::BOOL),
};
let coords_bool_type_id =
self.get_type_id(LookupType::Local(LocalType::Numeric(coords_numeric_type)));
let coords_conds_id = self.gen_id();
selection.block().body.push(Instruction::binary(
spirv::Op::ULessThan,
@ -838,11 +835,10 @@ impl<'w> BlockContext<'w> {
_ => false,
};
let sample_result_type_id = if needs_sub_access {
self.get_type_id(LookupType::Local(LocalType::Value {
vector_size: Some(crate::VectorSize::Quad),
self.get_type_id(LookupType::Local(LocalType::Numeric(NumericType::Vector {
size: crate::VectorSize::Quad,
scalar: crate::Scalar::F32,
pointer_space: None,
}))
})))
} else {
result_type_id
};
@ -1038,11 +1034,16 @@ impl<'w> BlockContext<'w> {
4 => Some(crate::VectorSize::Quad),
_ => None,
};
let extended_size_type_id = self.get_type_id(LookupType::Local(LocalType::Value {
vector_size,
scalar: crate::Scalar::U32,
pointer_space: None,
}));
let vector_numeric_type = match vector_size {
Some(size) => NumericType::Vector {
size,
scalar: crate::Scalar::U32,
},
None => NumericType::Scalar(crate::Scalar::U32),
};
let extended_size_type_id =
self.get_type_id(LookupType::Local(LocalType::Numeric(vector_numeric_type)));
let (query_op, level_id) = match class {
Ic::Sampled { multi: true, .. }
@ -1108,11 +1109,11 @@ impl<'w> BlockContext<'w> {
Id::D2 | Id::Cube => crate::VectorSize::Tri,
Id::D3 => crate::VectorSize::Quad,
};
let extended_size_type_id = self.get_type_id(LookupType::Local(LocalType::Value {
vector_size: Some(vec_size),
scalar: crate::Scalar::U32,
pointer_space: None,
}));
let extended_size_type_id =
self.get_type_id(LookupType::Local(LocalType::Numeric(NumericType::Vector {
size: vec_size,
scalar: crate::Scalar::U32,
})));
let id_extended = self.gen_id();
let mut inst = Instruction::image_query(
spirv::Op::ImageQuerySizeLod,

View File

@ -7,13 +7,17 @@ use super::{
selection::Selection,
Block, BlockContext, Error, IdGenerator, Instruction, Word,
};
use crate::{arena::Handle, proc::BoundsCheckPolicy};
use crate::{
arena::Handle,
proc::{index::GuardedIndex, BoundsCheckPolicy},
};
/// The results of performing a bounds check.
///
/// On success, [`write_bounds_check`](BlockContext::write_bounds_check)
/// returns a value of this type. The caller can assume that the right
/// policy has been applied, and simply do what the variant says.
#[derive(Debug)]
pub(super) enum BoundsCheckResult {
/// The index is statically known and in bounds, with the given value.
KnownInBounds(u32),
@ -36,10 +40,17 @@ pub(super) enum BoundsCheckResult {
///
/// This is returned when [`BoundsCheckPolicy::ReadZeroSkipWrite`]
/// is in force.
Conditional(Word),
Conditional {
/// The access should only be permitted if this value is true.
condition_id: Word,
/// The access should use this index value.
index_id: Word,
},
}
/// A value that we either know at translation time, or need to compute at runtime.
#[derive(Copy, Clone)]
pub(super) enum MaybeKnown<T> {
/// The value is known at shader translation time.
Known(T),
@ -215,7 +226,7 @@ impl<'w> BlockContext<'w> {
let element_type_id = match self.ir_module.types[global.ty].inner {
crate::TypeInner::BindingArray { base, size: _ } => {
let class = map_storage_class(global.space);
self.get_pointer_id(base, class)?
self.get_pointer_id(base, class)
}
_ => return Err(Error::Validation("array length expression case-5")),
};
@ -329,33 +340,26 @@ impl<'w> BlockContext<'w> {
pub(super) fn write_restricted_index(
&mut self,
sequence: Handle<crate::Expression>,
index: Handle<crate::Expression>,
index: GuardedIndex,
block: &mut Block,
) -> Result<BoundsCheckResult, Error> {
let index_id = self.cached[index];
let max_index = self.write_sequence_max_index(sequence, block)?;
// Get the sequence's maximum valid index. Return early if we've already
// done the bounds check.
let max_index_id = match self.write_sequence_max_index(sequence, block)? {
MaybeKnown::Known(known_max_index) => {
if let Ok(known_index) = self
.ir_module
.to_ctx()
.eval_expr_to_u32_from(index, &self.ir_function.expressions)
{
// Both the index and length are known at compile time.
//
// In strict WGSL compliance mode, out-of-bounds indices cannot be
// reported at shader translation time, and must be replaced with
// in-bounds indices at run time. So we cannot assume that
// validation ensured the index was in bounds. Restrict now.
let restricted = std::cmp::min(known_index, known_max_index);
return Ok(BoundsCheckResult::KnownInBounds(restricted));
}
// If both are known, we can compute the index to be used
// right now.
if let (GuardedIndex::Known(index), MaybeKnown::Known(max_index)) = (index, max_index) {
let restricted = std::cmp::min(index, max_index);
return Ok(BoundsCheckResult::KnownInBounds(restricted));
}
self.get_index_constant(known_max_index)
}
MaybeKnown::Computed(max_index_id) => max_index_id,
let index_id = match index {
GuardedIndex::Known(value) => self.get_index_constant(value),
GuardedIndex::Expression(expr) => self.cached[expr],
};
let max_index_id = match max_index {
MaybeKnown::Known(value) => self.get_index_constant(value),
MaybeKnown::Computed(id) => id,
};
// One or the other of the index or length is dynamic, so emit code for
@ -393,48 +397,33 @@ impl<'w> BlockContext<'w> {
fn write_index_comparison(
&mut self,
sequence: Handle<crate::Expression>,
index: Handle<crate::Expression>,
index: GuardedIndex,
block: &mut Block,
) -> Result<BoundsCheckResult, Error> {
let index_id = self.cached[index];
let length = self.write_sequence_length(sequence, block)?;
// Get the sequence's length. Return early if we've already done the
// bounds check.
let length_id = match self.write_sequence_length(sequence, block)? {
MaybeKnown::Known(known_length) => {
if let Ok(known_index) = self
.ir_module
.to_ctx()
.eval_expr_to_u32_from(index, &self.ir_function.expressions)
{
// Both the index and length are known at compile time.
//
// It would be nice to assume that, since we are using the
// `ReadZeroSkipWrite` policy, we are not in strict WGSL
// compliance mode, and thus we can count on the validator to have
// rejected any programs with known out-of-bounds indices, and
// thus just return `KnownInBounds` here without actually
// checking.
//
// But it's also reasonable to expect that bounds check policies
// and error reporting policies should be able to vary
// independently without introducing security holes. So, we should
// support the case where bad indices do not cause validation
// errors, and are handled via `ReadZeroSkipWrite`.
//
// In theory, when `known_index` is bad, we could return a new
// `KnownOutOfBounds` variant here. But it's simpler just to fall
// through and let the bounds check take place. The shader is
// broken anyway, so it doesn't make sense to invest in emitting
// the ideal code for it.
if known_index < known_length {
return Ok(BoundsCheckResult::KnownInBounds(known_index));
}
}
self.get_index_constant(known_length)
// If both are known, we can decide whether the index is in
// bounds right now.
if let (GuardedIndex::Known(index), MaybeKnown::Known(length)) = (index, length) {
if index < length {
return Ok(BoundsCheckResult::KnownInBounds(index));
}
MaybeKnown::Computed(length_id) => length_id,
// In theory, when `index` is bad, we could return a new
// `KnownOutOfBounds` variant here. But it's simpler just to fall
// through and let the bounds check take place. The shader is broken
// anyway, so it doesn't make sense to invest in emitting the ideal
// code for it.
}
let index_id = match index {
GuardedIndex::Known(value) => self.get_index_constant(value),
GuardedIndex::Expression(expr) => self.cached[expr],
};
let length_id = match length {
MaybeKnown::Known(value) => self.get_index_constant(value),
MaybeKnown::Computed(id) => id,
};
// Compare the index against the length.
@ -448,7 +437,10 @@ impl<'w> BlockContext<'w> {
));
// Indicate that we did generate the check.
Ok(BoundsCheckResult::Conditional(condition_id))
Ok(BoundsCheckResult::Conditional {
condition_id,
index_id,
})
}
/// Emit a conditional load for `BoundsCheckPolicy::ReadZeroSkipWrite`.
@ -516,9 +508,12 @@ impl<'w> BlockContext<'w> {
pub(super) fn write_bounds_check(
&mut self,
base: Handle<crate::Expression>,
index: Handle<crate::Expression>,
mut index: GuardedIndex,
block: &mut Block,
) -> Result<BoundsCheckResult, Error> {
// If the value of `index` is known at compile time, find it now.
index.try_resolve_to_constant(self.ir_function, self.ir_module);
let policy = self.writer.bounds_check_policies.choose_policy(
base,
&self.ir_module.types,
@ -530,7 +525,10 @@ impl<'w> BlockContext<'w> {
BoundsCheckPolicy::ReadZeroSkipWrite => {
self.write_index_comparison(base, index, block)?
}
BoundsCheckPolicy::Unchecked => BoundsCheckResult::Computed(self.cached[index]),
BoundsCheckPolicy::Unchecked => match index {
GuardedIndex::Known(value) => BoundsCheckResult::KnownInBounds(value),
GuardedIndex::Expression(expr) => BoundsCheckResult::Computed(self.cached[expr]),
},
})
}
@ -547,7 +545,7 @@ impl<'w> BlockContext<'w> {
let result_type_id = self.get_expression_type_id(&self.fun_info[expr_handle].ty);
let base_id = self.cached[base];
let index_id = self.cached[index];
let index = GuardedIndex::Expression(index);
let result_id = match self.write_bounds_check(base, index, block)? {
BoundsCheckResult::KnownInBounds(known_index) => {
@ -570,12 +568,15 @@ impl<'w> BlockContext<'w> {
));
result_id
}
BoundsCheckResult::Conditional(comparison_id) => {
BoundsCheckResult::Conditional {
condition_id,
index_id,
} => {
// Run-time bounds checks were required. Emit
// conditional load.
self.write_conditional_indexed_load(
result_type_id,
comparison_id,
condition_id,
block,
|id_gen, block| {
// The in-bounds path. Generate the access.

View File

@ -231,6 +231,21 @@ impl LocalImageType {
}
}
/// A numeric type, for use in [`LocalType`].
#[derive(Debug, PartialEq, Hash, Eq, Copy, Clone)]
enum NumericType {
Scalar(crate::Scalar),
Vector {
size: crate::VectorSize,
scalar: crate::Scalar,
},
Matrix {
columns: crate::VectorSize,
rows: crate::VectorSize,
scalar: crate::Scalar,
},
}
/// A SPIR-V type constructed during code generation.
///
/// This is the variant of [`LookupType`] used to represent types that might not
@ -246,9 +261,9 @@ impl LocalImageType {
/// never synthesizes new struct types, so `LocalType` has nothing for that.
///
/// Each `LocalType` variant should be handled identically to its analogous
/// `TypeInner` variant. You can use the [`make_local`] function to help with
/// this, by converting everything possible to a `LocalType` before inspecting
/// it.
/// `TypeInner` variant. You can use the [`LocalType::from_inner`] function to
/// help with this, by converting everything possible to a `LocalType` before
/// inspecting it.
///
/// ## `LocalType` equality and SPIR-V `OpType` uniqueness
///
@ -276,19 +291,11 @@ impl LocalImageType {
/// [`TypeInner`]: crate::TypeInner
#[derive(Debug, PartialEq, Hash, Eq, Copy, Clone)]
enum LocalType {
/// A scalar, vector, or pointer to one of those.
Value {
/// If `None`, this represents a scalar type. If `Some`, this represents
/// a vector type of the given size.
vector_size: Option<crate::VectorSize>,
scalar: crate::Scalar,
pointer_space: Option<spirv::StorageClass>,
},
/// A matrix of floating-point values.
Matrix {
columns: crate::VectorSize,
rows: crate::VectorSize,
width: crate::Bytes,
/// A numeric type.
Numeric(NumericType),
LocalPointer {
base: NumericType,
class: spirv::StorageClass,
},
Pointer {
base: Handle<crate::Type>,
@ -357,52 +364,57 @@ struct LookupFunctionType {
return_type_id: Word,
}
fn make_local(inner: &crate::TypeInner) -> Option<LocalType> {
Some(match *inner {
crate::TypeInner::Scalar(scalar) | crate::TypeInner::Atomic(scalar) => LocalType::Value {
vector_size: None,
scalar,
pointer_space: None,
},
crate::TypeInner::Vector { size, scalar } => LocalType::Value {
vector_size: Some(size),
scalar,
pointer_space: None,
},
crate::TypeInner::Matrix {
columns,
rows,
scalar,
} => LocalType::Matrix {
columns,
rows,
width: scalar.width,
},
crate::TypeInner::Pointer { base, space } => LocalType::Pointer {
base,
class: helpers::map_storage_class(space),
},
crate::TypeInner::ValuePointer {
size,
scalar,
space,
} => LocalType::Value {
vector_size: size,
scalar,
pointer_space: Some(helpers::map_storage_class(space)),
},
crate::TypeInner::Image {
dim,
arrayed,
class,
} => LocalType::Image(LocalImageType::from_inner(dim, arrayed, class)),
crate::TypeInner::Sampler { comparison: _ } => LocalType::Sampler,
crate::TypeInner::AccelerationStructure => LocalType::AccelerationStructure,
crate::TypeInner::RayQuery => LocalType::RayQuery,
crate::TypeInner::Array { .. }
| crate::TypeInner::Struct { .. }
| crate::TypeInner::BindingArray { .. } => return None,
})
impl LocalType {
fn from_inner(inner: &crate::TypeInner) -> Option<Self> {
Some(match *inner {
crate::TypeInner::Scalar(scalar) | crate::TypeInner::Atomic(scalar) => {
LocalType::Numeric(NumericType::Scalar(scalar))
}
crate::TypeInner::Vector { size, scalar } => {
LocalType::Numeric(NumericType::Vector { size, scalar })
}
crate::TypeInner::Matrix {
columns,
rows,
scalar,
} => LocalType::Numeric(NumericType::Matrix {
columns,
rows,
scalar,
}),
crate::TypeInner::Pointer { base, space } => LocalType::Pointer {
base,
class: helpers::map_storage_class(space),
},
crate::TypeInner::ValuePointer {
size: Some(size),
scalar,
space,
} => LocalType::LocalPointer {
base: NumericType::Vector { size, scalar },
class: helpers::map_storage_class(space),
},
crate::TypeInner::ValuePointer {
size: None,
scalar,
space,
} => LocalType::LocalPointer {
base: NumericType::Scalar(scalar),
class: helpers::map_storage_class(space),
},
crate::TypeInner::Image {
dim,
arrayed,
class,
} => LocalType::Image(LocalImageType::from_inner(dim, arrayed, class)),
crate::TypeInner::Sampler { comparison: _ } => LocalType::Sampler,
crate::TypeInner::AccelerationStructure => LocalType::AccelerationStructure,
crate::TypeInner::RayQuery => LocalType::RayQuery,
crate::TypeInner::Array { .. }
| crate::TypeInner::Struct { .. }
| crate::TypeInner::BindingArray { .. } => return None,
})
}
}
#[derive(Debug)]
@ -655,13 +667,8 @@ impl BlockContext<'_> {
.get_constant_scalar(crate::Literal::I32(scope as _))
}
fn get_pointer_id(
&mut self,
handle: Handle<crate::Type>,
class: spirv::StorageClass,
) -> Result<Word, Error> {
self.writer
.get_pointer_id(&self.ir_module.types, handle, class)
fn get_pointer_id(&mut self, handle: Handle<crate::Type>, class: spirv::StorageClass) -> Word {
self.writer.get_pointer_id(handle, class)
}
}

View File

@ -2,7 +2,7 @@
Generating SPIR-V for ray query operations.
*/
use super::{Block, BlockContext, Instruction, LocalType, LookupType};
use super::{Block, BlockContext, Instruction, LocalType, LookupType, NumericType};
use crate::arena::Handle;
impl<'w> BlockContext<'w> {
@ -22,11 +22,9 @@ impl<'w> BlockContext<'w> {
let desc_id = self.cached[descriptor];
let acc_struct_id = self.get_handle_id(acceleration_structure);
let flag_type_id = self.get_type_id(LookupType::Local(LocalType::Value {
vector_size: None,
scalar: crate::Scalar::U32,
pointer_space: None,
}));
let flag_type_id = self.get_type_id(LookupType::Local(LocalType::Numeric(
NumericType::Scalar(crate::Scalar::U32),
)));
let ray_flags_id = self.gen_id();
block.body.push(Instruction::composite_extract(
flag_type_id,
@ -42,11 +40,9 @@ impl<'w> BlockContext<'w> {
&[1],
));
let scalar_type_id = self.get_type_id(LookupType::Local(LocalType::Value {
vector_size: None,
scalar: crate::Scalar::F32,
pointer_space: None,
}));
let scalar_type_id = self.get_type_id(LookupType::Local(LocalType::Numeric(
NumericType::Scalar(crate::Scalar::F32),
)));
let tmin_id = self.gen_id();
block.body.push(Instruction::composite_extract(
scalar_type_id,
@ -62,11 +58,11 @@ impl<'w> BlockContext<'w> {
&[3],
));
let vector_type_id = self.get_type_id(LookupType::Local(LocalType::Value {
vector_size: Some(crate::VectorSize::Tri),
scalar: crate::Scalar::F32,
pointer_space: None,
}));
let vector_type_id =
self.get_type_id(LookupType::Local(LocalType::Numeric(NumericType::Vector {
size: crate::VectorSize::Tri,
scalar: crate::Scalar::F32,
})));
let ray_origin_id = self.gen_id();
block.body.push(Instruction::composite_extract(
vector_type_id,
@ -116,11 +112,9 @@ impl<'w> BlockContext<'w> {
spirv::RayQueryIntersection::RayQueryCommittedIntersectionKHR as _,
));
let flag_type_id = self.get_type_id(LookupType::Local(LocalType::Value {
vector_size: None,
scalar: crate::Scalar::U32,
pointer_space: None,
}));
let flag_type_id = self.get_type_id(LookupType::Local(LocalType::Numeric(
NumericType::Scalar(crate::Scalar::U32),
)));
let kind_id = self.gen_id();
block.body.push(Instruction::ray_query_get_intersection(
spirv::Op::RayQueryGetIntersectionTypeKHR,
@ -170,11 +164,9 @@ impl<'w> BlockContext<'w> {
intersection_id,
));
let scalar_type_id = self.get_type_id(LookupType::Local(LocalType::Value {
vector_size: None,
scalar: crate::Scalar::F32,
pointer_space: None,
}));
let scalar_type_id = self.get_type_id(LookupType::Local(LocalType::Numeric(
NumericType::Scalar(crate::Scalar::F32),
)));
let t_id = self.gen_id();
block.body.push(Instruction::ray_query_get_intersection(
spirv::Op::RayQueryGetIntersectionTKHR,
@ -184,11 +176,11 @@ impl<'w> BlockContext<'w> {
intersection_id,
));
let barycentrics_type_id = self.get_type_id(LookupType::Local(LocalType::Value {
vector_size: Some(crate::VectorSize::Bi),
scalar: crate::Scalar::F32,
pointer_space: None,
}));
let barycentrics_type_id =
self.get_type_id(LookupType::Local(LocalType::Numeric(NumericType::Vector {
size: crate::VectorSize::Bi,
scalar: crate::Scalar::F32,
})));
let barycentrics_id = self.gen_id();
block.body.push(Instruction::ray_query_get_intersection(
spirv::Op::RayQueryGetIntersectionBarycentricsKHR,
@ -198,11 +190,9 @@ impl<'w> BlockContext<'w> {
intersection_id,
));
let bool_type_id = self.get_type_id(LookupType::Local(LocalType::Value {
vector_size: None,
scalar: crate::Scalar::BOOL,
pointer_space: None,
}));
let bool_type_id = self.get_type_id(LookupType::Local(LocalType::Numeric(
NumericType::Scalar(crate::Scalar::BOOL),
)));
let front_face_id = self.gen_id();
block.body.push(Instruction::ray_query_get_intersection(
spirv::Op::RayQueryGetIntersectionFrontFaceKHR,
@ -212,11 +202,12 @@ impl<'w> BlockContext<'w> {
intersection_id,
));
let transform_type_id = self.get_type_id(LookupType::Local(LocalType::Matrix {
columns: crate::VectorSize::Quad,
rows: crate::VectorSize::Tri,
width: 4,
}));
let transform_type_id =
self.get_type_id(LookupType::Local(LocalType::Numeric(NumericType::Matrix {
columns: crate::VectorSize::Quad,
rows: crate::VectorSize::Tri,
scalar: crate::Scalar::F32,
})));
let object_to_world_id = self.gen_id();
block.body.push(Instruction::ray_query_get_intersection(
spirv::Op::RayQueryGetIntersectionObjectToWorldKHR,

View File

@ -1,4 +1,4 @@
use super::{Block, BlockContext, Error, Instruction};
use super::{Block, BlockContext, Error, Instruction, NumericType};
use crate::{
arena::Handle,
back::spv::{LocalType, LookupType},
@ -16,11 +16,11 @@ impl<'w> BlockContext<'w> {
"GroupNonUniformBallot",
&[spirv::Capability::GroupNonUniformBallot],
)?;
let vec4_u32_type_id = self.get_type_id(LookupType::Local(LocalType::Value {
vector_size: Some(crate::VectorSize::Quad),
scalar: crate::Scalar::U32,
pointer_space: None,
}));
let vec4_u32_type_id =
self.get_type_id(LookupType::Local(LocalType::Numeric(NumericType::Vector {
size: crate::VectorSize::Quad,
scalar: crate::Scalar::U32,
})));
let exec_scope_id = self.get_index_constant(spirv::Scope::Subgroup as u32);
let predicate = if let Some(predicate) = *predicate {
self.cached[predicate]

View File

@ -1,10 +1,10 @@
use super::{
block::DebugInfoInner,
helpers::{contains_builtin, global_needs_wrapper, map_storage_class},
make_local, Block, BlockContext, CachedConstant, CachedExpressions, DebugInfo,
EntryPointContext, Error, Function, FunctionArgument, GlobalVariable, IdGenerator, Instruction,
LocalType, LocalVariable, LogicalLayout, LookupFunctionType, LookupType, Options,
PhysicalLayout, PipelineOptions, ResultMember, Writer, WriterFlags, BITS_PER_BYTE,
Block, BlockContext, CachedConstant, CachedExpressions, DebugInfo, EntryPointContext, Error,
Function, FunctionArgument, GlobalVariable, IdGenerator, Instruction, LocalType, LocalVariable,
LogicalLayout, LookupFunctionType, LookupType, NumericType, Options, PhysicalLayout,
PipelineOptions, ResultMember, Writer, WriterFlags, BITS_PER_BYTE,
};
use crate::{
arena::{Handle, HandleVec, UniqueArena},
@ -141,7 +141,6 @@ impl Writer {
#[allow(clippy::too_many_arguments)]
pub(super) fn promote_access_expression_to_variable(
&mut self,
ir_types: &UniqueArena<crate::Type>,
result_type_id: Word,
container_id: Word,
container_ty: Handle<crate::Type>,
@ -149,8 +148,7 @@ impl Writer {
element_ty: Handle<crate::Type>,
block: &mut Block,
) -> Result<(Word, LocalVariable), Error> {
let pointer_type_id =
self.get_pointer_id(ir_types, container_ty, spirv::StorageClass::Function)?;
let pointer_type_id = self.get_pointer_id(container_ty, spirv::StorageClass::Function);
let variable = {
let id = self.id_gen.next();
@ -170,7 +168,7 @@ impl Writer {
let element_pointer_id = self.id_gen.next();
let element_pointer_type_id =
self.get_pointer_id(ir_types, element_ty, spirv::StorageClass::Function)?;
self.get_pointer_id(element_ty, spirv::StorageClass::Function);
block.body.push(Instruction::access_chain(
element_pointer_type_id,
element_pointer_id,
@ -254,7 +252,9 @@ impl Writer {
pub(super) fn get_expression_lookup_type(&mut self, tr: &TypeResolution) -> LookupType {
match *tr {
TypeResolution::Handle(ty_handle) => LookupType::Handle(ty_handle),
TypeResolution::Value(ref inner) => LookupType::Local(make_local(inner).unwrap()),
TypeResolution::Value(ref inner) => {
LookupType::Local(LocalType::from_inner(inner).unwrap())
}
}
}
@ -265,110 +265,65 @@ impl Writer {
pub(super) fn get_pointer_id(
&mut self,
arena: &UniqueArena<crate::Type>,
handle: Handle<crate::Type>,
class: spirv::StorageClass,
) -> Result<Word, Error> {
let ty_id = self.get_type_id(LookupType::Handle(handle));
if let crate::TypeInner::Pointer { .. } = arena[handle].inner {
return Ok(ty_id);
}
let lookup_type = LookupType::Local(LocalType::Pointer {
) -> Word {
self.get_type_id(LookupType::Local(LocalType::Pointer {
base: handle,
class,
});
Ok(if let Some(&id) = self.lookup_type.get(&lookup_type) {
id
} else {
let id = self.id_gen.next();
let instruction = Instruction::type_pointer(id, class, ty_id);
instruction.to_words(&mut self.logical_layout.declarations);
self.lookup_type.insert(lookup_type, id);
id
})
}))
}
pub(super) fn get_uint_type_id(&mut self) -> Word {
let local_type = LocalType::Value {
vector_size: None,
scalar: crate::Scalar::U32,
pointer_space: None,
};
let local_type = LocalType::Numeric(NumericType::Scalar(crate::Scalar::U32));
self.get_type_id(local_type.into())
}
pub(super) fn get_float_type_id(&mut self) -> Word {
let local_type = LocalType::Value {
vector_size: None,
scalar: crate::Scalar::F32,
pointer_space: None,
};
let local_type = LocalType::Numeric(NumericType::Scalar(crate::Scalar::F32));
self.get_type_id(local_type.into())
}
pub(super) fn get_uint3_type_id(&mut self) -> Word {
let local_type = LocalType::Value {
vector_size: Some(crate::VectorSize::Tri),
let local_type = LocalType::Numeric(NumericType::Vector {
size: crate::VectorSize::Tri,
scalar: crate::Scalar::U32,
pointer_space: None,
};
});
self.get_type_id(local_type.into())
}
pub(super) fn get_float_pointer_type_id(&mut self, class: spirv::StorageClass) -> Word {
let lookup_type = LookupType::Local(LocalType::Value {
vector_size: None,
scalar: crate::Scalar::F32,
pointer_space: Some(class),
});
if let Some(&id) = self.lookup_type.get(&lookup_type) {
id
} else {
let id = self.id_gen.next();
let ty_id = self.get_float_type_id();
let instruction = Instruction::type_pointer(id, class, ty_id);
instruction.to_words(&mut self.logical_layout.declarations);
self.lookup_type.insert(lookup_type, id);
id
}
}
pub(super) fn get_uint3_pointer_type_id(&mut self, class: spirv::StorageClass) -> Word {
let lookup_type = LookupType::Local(LocalType::Value {
vector_size: Some(crate::VectorSize::Tri),
scalar: crate::Scalar::U32,
pointer_space: Some(class),
});
if let Some(&id) = self.lookup_type.get(&lookup_type) {
id
} else {
let id = self.id_gen.next();
let ty_id = self.get_uint3_type_id();
let instruction = Instruction::type_pointer(id, class, ty_id);
instruction.to_words(&mut self.logical_layout.declarations);
self.lookup_type.insert(lookup_type, id);
id
}
}
pub(super) fn get_bool_type_id(&mut self) -> Word {
let local_type = LocalType::Value {
vector_size: None,
scalar: crate::Scalar::BOOL,
pointer_space: None,
let local_type = LocalType::LocalPointer {
base: NumericType::Scalar(crate::Scalar::F32),
class,
};
self.get_type_id(local_type.into())
}
pub(super) fn get_bool3_type_id(&mut self) -> Word {
let local_type = LocalType::Value {
vector_size: Some(crate::VectorSize::Tri),
scalar: crate::Scalar::BOOL,
pointer_space: None,
pub(super) fn get_uint3_pointer_type_id(&mut self, class: spirv::StorageClass) -> Word {
let local_type = LocalType::LocalPointer {
base: NumericType::Vector {
size: crate::VectorSize::Tri,
scalar: crate::Scalar::U32,
},
class,
};
self.get_type_id(local_type.into())
}
pub(super) fn get_bool_type_id(&mut self) -> Word {
let local_type = LocalType::Numeric(NumericType::Scalar(crate::Scalar::BOOL));
self.get_type_id(local_type.into())
}
pub(super) fn get_bool3_type_id(&mut self) -> Word {
let local_type = LocalType::Numeric(NumericType::Vector {
size: crate::VectorSize::Tri,
scalar: crate::Scalar::BOOL,
});
self.get_type_id(local_type.into())
}
pub(super) fn decorate(&mut self, id: Word, decoration: spirv::Decoration, operands: &[Word]) {
self.annotations
.push(Instruction::decorate(id, decoration, operands));
@ -398,11 +353,7 @@ impl Writer {
let class = spirv::StorageClass::Input;
let handle_ty = ir_module.types[argument.ty].inner.is_handle();
let argument_type_id = match handle_ty {
true => self.get_pointer_id(
&ir_module.types,
argument.ty,
spirv::StorageClass::UniformConstant,
)?,
true => self.get_pointer_id(argument.ty, spirv::StorageClass::UniformConstant),
false => self.get_type_id(LookupType::Handle(argument.ty)),
};
@ -633,8 +584,7 @@ impl Writer {
gv.handle_id = id;
} else if global_needs_wrapper(ir_module, var) {
let class = map_storage_class(var.space);
let pointer_type_id =
self.get_pointer_id(&ir_module.types, var.ty, class)?;
let pointer_type_id = self.get_pointer_id(var.ty, class);
let index_id = self.get_index_constant(0);
let id = self.id_gen.next();
prelude.body.push(Instruction::access_chain(
@ -693,11 +643,9 @@ impl Writer {
}
let init_word = variable.init.map(|constant| context.cached[constant]);
let pointer_type_id = context.writer.get_pointer_id(
&ir_module.types,
variable.ty,
spirv::StorageClass::Function,
)?;
let pointer_type_id = context
.writer
.get_pointer_id(variable.ty, spirv::StorageClass::Function);
let instruction = Instruction::variable(
pointer_type_id,
id,
@ -933,62 +881,50 @@ impl Writer {
Ok(())
}
fn write_numeric_type_declaration_local(&mut self, id: Word, numeric: NumericType) {
let instruction =
match numeric {
NumericType::Scalar(scalar) => self.make_scalar(id, scalar),
NumericType::Vector { size, scalar } => {
let scalar_id = self.get_type_id(LookupType::Local(LocalType::Numeric(
NumericType::Scalar(scalar),
)));
Instruction::type_vector(id, scalar_id, size)
}
NumericType::Matrix {
columns,
rows,
scalar,
} => {
let column_id = self.get_type_id(LookupType::Local(LocalType::Numeric(
NumericType::Vector { size: rows, scalar },
)));
Instruction::type_matrix(id, column_id, columns)
}
};
instruction.to_words(&mut self.logical_layout.declarations);
}
fn write_type_declaration_local(&mut self, id: Word, local_ty: LocalType) {
let instruction = match local_ty {
LocalType::Value {
vector_size: None,
scalar,
pointer_space: None,
} => self.make_scalar(id, scalar),
LocalType::Value {
vector_size: Some(size),
scalar,
pointer_space: None,
} => {
let scalar_id = self.get_type_id(LookupType::Local(LocalType::Value {
vector_size: None,
scalar,
pointer_space: None,
}));
Instruction::type_vector(id, scalar_id, size)
LocalType::Numeric(numeric) => {
self.write_numeric_type_declaration_local(id, numeric);
return;
}
LocalType::Matrix {
columns,
rows,
width,
} => {
let vector_id = self.get_type_id(LookupType::Local(LocalType::Value {
vector_size: Some(rows),
scalar: crate::Scalar::float(width),
pointer_space: None,
}));
Instruction::type_matrix(id, vector_id, columns)
LocalType::LocalPointer { base, class } => {
let base_id = self.get_type_id(LookupType::Local(LocalType::Numeric(base)));
Instruction::type_pointer(id, class, base_id)
}
LocalType::Pointer { base, class } => {
let type_id = self.get_type_id(LookupType::Handle(base));
Instruction::type_pointer(id, class, type_id)
}
LocalType::Value {
vector_size,
scalar,
pointer_space: Some(class),
} => {
let type_id = self.get_type_id(LookupType::Local(LocalType::Value {
vector_size,
scalar,
pointer_space: None,
}));
Instruction::type_pointer(id, class, type_id)
}
LocalType::Image(image) => {
let local_type = LocalType::Value {
vector_size: None,
scalar: crate::Scalar {
kind: image.sampled_type,
width: 4,
},
pointer_space: None,
};
let local_type = LocalType::Numeric(NumericType::Scalar(crate::Scalar {
kind: image.sampled_type,
width: 4,
}));
let type_id = self.get_type_id(LookupType::Local(local_type));
Instruction::type_image(id, type_id, image.dim, image.flags, image.image_format)
}
@ -1025,7 +961,7 @@ impl Writer {
// because some types which map to the same LocalType have different
// capability requirements. See https://github.com/gfx-rs/wgpu/issues/5569
self.request_type_capabilities(&ty.inner)?;
let id = if let Some(local) = make_local(&ty.inner) {
let id = if let Some(local) = LocalType::from_inner(&ty.inner) {
// This type can be represented as a `LocalType`, so check if we've
// already written an instruction for it. If not, do so now, with
// `write_type_declaration_local`.
@ -1222,11 +1158,9 @@ impl Writer {
self.debugs.push(Instruction::name(id, name));
}
}
let type_id = self.get_type_id(LookupType::Local(LocalType::Value {
vector_size: None,
scalar: value.scalar(),
pointer_space: None,
}));
let type_id = self.get_type_id(LookupType::Local(LocalType::Numeric(NumericType::Scalar(
value.scalar(),
))));
let instruction = match *value {
crate::Literal::F64(value) => {
let bits = value.to_bits();
@ -1511,7 +1445,7 @@ impl Writer {
binding: &crate::Binding,
) -> Result<Word, Error> {
let id = self.id_gen.next();
let pointer_type_id = self.get_pointer_id(&ir_module.types, ty, class)?;
let pointer_type_id = self.get_pointer_id(ty, class);
Instruction::variable(pointer_type_id, id, class, None)
.to_words(&mut self.logical_layout.declarations);
@ -1618,6 +1552,7 @@ impl Writer {
Bi::InstanceIndex => BuiltIn::InstanceIndex,
Bi::PointSize => BuiltIn::PointSize,
Bi::VertexIndex => BuiltIn::VertexIndex,
Bi::DrawID => BuiltIn::DrawIndex,
// fragment
Bi::FragDepth => BuiltIn::FragDepth,
Bi::PointCoord => BuiltIn::PointCoord,
@ -1843,7 +1778,7 @@ impl Writer {
if substitute_inner_type_lookup.is_some() {
inner_type_id
} else {
self.get_pointer_id(&ir_module.types, global_variable.ty, class)?
self.get_pointer_id(global_variable.ty, class)
}
};

View File

@ -1941,9 +1941,8 @@ fn builtin_str(built_in: crate::BuiltIn) -> Result<&'static str, Error> {
| Bi::CullDistance
| Bi::PointSize
| Bi::PointCoord
| Bi::WorkGroupSize => {
return Err(Error::Custom(format!("Unsupported builtin {built_in:?}")))
}
| Bi::WorkGroupSize
| Bi::DrawID => return Err(Error::Custom(format!("Unsupported builtin {built_in:?}"))),
})
}

View File

@ -202,6 +202,7 @@ impl Frontend {
"gl_VertexIndex" => BuiltIn::VertexIndex,
"gl_SampleID" => BuiltIn::SampleIndex,
"gl_LocalInvocationIndex" => BuiltIn::LocalInvocationIndex,
"gl_DrawID" => BuiltIn::DrawID,
_ => return Ok(None),
};

View File

@ -139,6 +139,7 @@ pub(super) fn map_builtin(word: spirv::Word, invariant: bool) -> Result<crate::B
Some(Bi::InstanceIndex) => crate::BuiltIn::InstanceIndex,
Some(Bi::PointSize) => crate::BuiltIn::PointSize,
Some(Bi::VertexIndex) => crate::BuiltIn::VertexIndex,
Some(Bi::DrawIndex) => crate::BuiltIn::DrawID,
// fragment
Some(Bi::FragDepth) => crate::BuiltIn::FragDepth,
Some(Bi::PointCoord) => crate::BuiltIn::PointCoord,

View File

@ -400,6 +400,7 @@ pub enum BuiltIn {
InstanceIndex,
PointSize,
VertexIndex,
DrawID,
// fragment
FragDepth,
PointCoord,

View File

@ -334,7 +334,11 @@ impl GuardedIndex {
/// Make a `GuardedIndex::Known` from a `GuardedIndex::Expression` if possible.
///
/// Return values that are already `Known` unchanged.
fn try_resolve_to_constant(&mut self, function: &crate::Function, module: &crate::Module) {
pub(crate) fn try_resolve_to_constant(
&mut self,
function: &crate::Function,
module: &crate::Module,
) {
if let GuardedIndex::Expression(expr) = *self {
if let Ok(value) = module
.to_ctx()

View File

@ -194,7 +194,11 @@ impl VaryingContext<'_> {
}
let (visible, type_good) = match built_in {
Bi::BaseInstance | Bi::BaseVertex | Bi::InstanceIndex | Bi::VertexIndex => (
Bi::BaseInstance
| Bi::BaseVertex
| Bi::InstanceIndex
| Bi::VertexIndex
| Bi::DrawID => (
self.stage == St::Vertex && !self.output,
*ty_inner == Ti::Scalar(crate::Scalar::U32),
),

File diff suppressed because one or more lines are too long

View File

@ -80,7 +80,7 @@ version = "22.0.0"
path = "../naga"
[dependencies.once_cell]
version = "1.19.0"
version = "1.20.2"
[dependencies.parking_lot]
version = "0.12.1"

File diff suppressed because one or more lines are too long

View File

@ -86,7 +86,7 @@ version = "22.0.0"
path = "../naga"
[dependencies.once_cell]
version = "1.19.0"
version = "1.20.2"
[dependencies.parking_lot]
version = "0.12.1"