mirror of
https://github.com/RPCS3/glslang.git
synced 2024-11-23 19:29:44 +00:00
GL_KHR_memory_scope_semantics
This commit is contained in:
parent
97068d8b30
commit
36831c9bad
@ -39,5 +39,6 @@ static const char* const E_SPV_KHR_16bit_storage = "SPV_KHR_16bit
|
||||
static const char* const E_SPV_KHR_8bit_storage = "SPV_KHR_8bit_storage";
|
||||
static const char* const E_SPV_KHR_storage_buffer_storage_class = "SPV_KHR_storage_buffer_storage_class";
|
||||
static const char* const E_SPV_KHR_post_depth_coverage = "SPV_KHR_post_depth_coverage";
|
||||
static const char* const E_SPV_KHR_vulkan_memory_model = "SPV_KHR_vulkan_memory_model";
|
||||
|
||||
#endif // #ifndef GLSLextKHR_H
|
||||
|
@ -129,6 +129,10 @@ protected:
|
||||
spv::Decoration TranslateInterpolationDecoration(const glslang::TQualifier& qualifier);
|
||||
spv::Decoration TranslateAuxiliaryStorageDecoration(const glslang::TQualifier& qualifier);
|
||||
spv::Decoration TranslateNonUniformDecoration(const glslang::TQualifier& qualifier);
|
||||
spv::Builder::AccessChain::CoherentFlags TranslateCoherent(const glslang::TType& type);
|
||||
spv::MemoryAccessMask TranslateMemoryAccess(const spv::Builder::AccessChain::CoherentFlags &coherentFlags);
|
||||
spv::ImageOperandsMask TranslateImageOperands(const spv::Builder::AccessChain::CoherentFlags &coherentFlags);
|
||||
spv::Scope TranslateMemoryScope(const spv::Builder::AccessChain::CoherentFlags &coherentFlags);
|
||||
spv::BuiltIn TranslateBuiltInDecoration(glslang::TBuiltInVariable, bool memberDeclaration);
|
||||
spv::ImageFormat TranslateImageFormat(const glslang::TType& type);
|
||||
spv::SelectionControlMask TranslateSelectionControl(const glslang::TIntermSelection&) const;
|
||||
@ -327,13 +331,15 @@ spv::Decoration TranslateBlockDecoration(const glslang::TType& type, bool useSto
|
||||
}
|
||||
|
||||
// Translate glslang type to SPIR-V memory decorations.
|
||||
void TranslateMemoryDecoration(const glslang::TQualifier& qualifier, std::vector<spv::Decoration>& memory)
|
||||
void TranslateMemoryDecoration(const glslang::TQualifier& qualifier, std::vector<spv::Decoration>& memory, bool useVulkanMemoryModel)
|
||||
{
|
||||
if (qualifier.coherent)
|
||||
memory.push_back(spv::DecorationCoherent);
|
||||
if (qualifier.volatil) {
|
||||
memory.push_back(spv::DecorationVolatile);
|
||||
memory.push_back(spv::DecorationCoherent);
|
||||
if (!useVulkanMemoryModel) {
|
||||
if (qualifier.coherent)
|
||||
memory.push_back(spv::DecorationCoherent);
|
||||
if (qualifier.volatil) {
|
||||
memory.push_back(spv::DecorationVolatile);
|
||||
memory.push_back(spv::DecorationCoherent);
|
||||
}
|
||||
}
|
||||
if (qualifier.restrict)
|
||||
memory.push_back(spv::DecorationRestrict);
|
||||
@ -450,6 +456,105 @@ spv::Decoration TGlslangToSpvTraverser::TranslateNonUniformDecoration(const glsl
|
||||
return spv::DecorationMax;
|
||||
}
|
||||
|
||||
spv::MemoryAccessMask TGlslangToSpvTraverser::TranslateMemoryAccess(const spv::Builder::AccessChain::CoherentFlags &coherentFlags)
|
||||
{
|
||||
if (!glslangIntermediate->usingVulkanMemoryModel() || coherentFlags.isImage) {
|
||||
return spv::MemoryAccessMaskNone;
|
||||
}
|
||||
spv::MemoryAccessMask mask = spv::MemoryAccessMaskNone;
|
||||
if (coherentFlags.volatil ||
|
||||
coherentFlags.coherent ||
|
||||
coherentFlags.devicecoherent ||
|
||||
coherentFlags.queuefamilycoherent ||
|
||||
coherentFlags.workgroupcoherent ||
|
||||
coherentFlags.subgroupcoherent) {
|
||||
mask = mask | spv::MemoryAccessMakePointerAvailableKHRMask |
|
||||
spv::MemoryAccessMakePointerVisibleKHRMask;
|
||||
}
|
||||
if (coherentFlags.nonprivate) {
|
||||
mask = mask | spv::MemoryAccessNonPrivatePointerKHRMask;
|
||||
}
|
||||
if (coherentFlags.volatil) {
|
||||
mask = mask | spv::MemoryAccessVolatileMask;
|
||||
}
|
||||
if (mask != spv::MemoryAccessMaskNone) {
|
||||
builder.addCapability(spv::CapabilityVulkanMemoryModelKHR);
|
||||
}
|
||||
return mask;
|
||||
}
|
||||
|
||||
spv::ImageOperandsMask TGlslangToSpvTraverser::TranslateImageOperands(const spv::Builder::AccessChain::CoherentFlags &coherentFlags)
|
||||
{
|
||||
if (!glslangIntermediate->usingVulkanMemoryModel()) {
|
||||
return spv::ImageOperandsMaskNone;
|
||||
}
|
||||
spv::ImageOperandsMask mask = spv::ImageOperandsMaskNone;
|
||||
if (coherentFlags.volatil ||
|
||||
coherentFlags.coherent ||
|
||||
coherentFlags.devicecoherent ||
|
||||
coherentFlags.queuefamilycoherent ||
|
||||
coherentFlags.workgroupcoherent ||
|
||||
coherentFlags.subgroupcoherent) {
|
||||
mask = mask | spv::ImageOperandsMakeTexelAvailableKHRMask |
|
||||
spv::ImageOperandsMakeTexelVisibleKHRMask;
|
||||
}
|
||||
if (coherentFlags.nonprivate) {
|
||||
mask = mask | spv::ImageOperandsNonPrivateTexelKHRMask;
|
||||
}
|
||||
if (coherentFlags.volatil) {
|
||||
mask = mask | spv::ImageOperandsVolatileTexelKHRMask;
|
||||
}
|
||||
if (mask != spv::ImageOperandsMaskNone) {
|
||||
builder.addCapability(spv::CapabilityVulkanMemoryModelKHR);
|
||||
}
|
||||
return mask;
|
||||
}
|
||||
|
||||
spv::Builder::AccessChain::CoherentFlags TGlslangToSpvTraverser::TranslateCoherent(const glslang::TType& type)
|
||||
{
|
||||
spv::Builder::AccessChain::CoherentFlags flags;
|
||||
flags.coherent = type.getQualifier().coherent;
|
||||
flags.devicecoherent = type.getQualifier().devicecoherent;
|
||||
flags.queuefamilycoherent = type.getQualifier().queuefamilycoherent;
|
||||
// shared variables are implicitly workgroupcoherent in GLSL.
|
||||
flags.workgroupcoherent = type.getQualifier().workgroupcoherent ||
|
||||
type.getQualifier().storage == glslang::EvqShared;
|
||||
flags.subgroupcoherent = type.getQualifier().subgroupcoherent;
|
||||
// *coherent variables are implicitly nonprivate in GLSL
|
||||
flags.nonprivate = type.getQualifier().nonprivate ||
|
||||
type.getQualifier().subgroupcoherent ||
|
||||
type.getQualifier().workgroupcoherent ||
|
||||
type.getQualifier().queuefamilycoherent ||
|
||||
type.getQualifier().devicecoherent ||
|
||||
type.getQualifier().coherent;
|
||||
flags.volatil = type.getQualifier().volatil;
|
||||
flags.isImage = type.getBasicType() == glslang::EbtSampler;
|
||||
return flags;
|
||||
}
|
||||
|
||||
spv::Scope TGlslangToSpvTraverser::TranslateMemoryScope(const spv::Builder::AccessChain::CoherentFlags &coherentFlags)
|
||||
{
|
||||
spv::Scope scope;
|
||||
if (coherentFlags.coherent) {
|
||||
// coherent defaults to Device scope in the old model, QueueFamilyKHR scope in the new model
|
||||
scope = glslangIntermediate->usingVulkanMemoryModel() ? spv::ScopeQueueFamilyKHR : spv::ScopeDevice;
|
||||
} else if (coherentFlags.devicecoherent) {
|
||||
scope = spv::ScopeDevice;
|
||||
} else if (coherentFlags.queuefamilycoherent) {
|
||||
scope = spv::ScopeQueueFamilyKHR;
|
||||
} else if (coherentFlags.workgroupcoherent) {
|
||||
scope = spv::ScopeWorkgroup;
|
||||
} else if (coherentFlags.subgroupcoherent) {
|
||||
scope = spv::ScopeSubgroup;
|
||||
} else {
|
||||
scope = spv::ScopeMax;
|
||||
}
|
||||
if (glslangIntermediate->usingVulkanMemoryModel() && scope == spv::ScopeDevice) {
|
||||
builder.addCapability(spv::CapabilityVulkanMemoryModelDeviceScopeKHR);
|
||||
}
|
||||
return scope;
|
||||
}
|
||||
|
||||
// Translate a glslang built-in variable to a SPIR-V built in decoration. Also generate
|
||||
// associated capabilities when required. For some built-in variables, a capability
|
||||
// is generated only when using the variable in an executable instruction, but not when
|
||||
@ -979,6 +1084,16 @@ void InheritQualifiers(glslang::TQualifier& child, const glslang::TQualifier& pa
|
||||
child.sample = true;
|
||||
if (parent.coherent)
|
||||
child.coherent = true;
|
||||
if (parent.devicecoherent)
|
||||
child.devicecoherent = true;
|
||||
if (parent.queuefamilycoherent)
|
||||
child.queuefamilycoherent = true;
|
||||
if (parent.workgroupcoherent)
|
||||
child.workgroupcoherent = true;
|
||||
if (parent.subgroupcoherent)
|
||||
child.subgroupcoherent = true;
|
||||
if (parent.nonprivate)
|
||||
child.nonprivate = true;
|
||||
if (parent.volatil)
|
||||
child.volatil = true;
|
||||
if (parent.restrict)
|
||||
@ -1045,7 +1160,12 @@ TGlslangToSpvTraverser::TGlslangToSpvTraverser(unsigned int spvVersion, const gl
|
||||
builder.setSourceText(text);
|
||||
}
|
||||
stdBuiltins = builder.import("GLSL.std.450");
|
||||
builder.setMemoryModel(spv::AddressingModelLogical, spv::MemoryModelGLSL450);
|
||||
if (glslangIntermediate->usingVulkanMemoryModel()) {
|
||||
builder.setMemoryModel(spv::AddressingModelLogical, spv::MemoryModelVulkanKHR);
|
||||
builder.addExtension(spv::E_SPV_KHR_vulkan_memory_model);
|
||||
} else {
|
||||
builder.setMemoryModel(spv::AddressingModelLogical, spv::MemoryModelGLSL450);
|
||||
}
|
||||
shaderEntry = builder.makeEntryPoint(glslangIntermediate->getEntryPointName().c_str());
|
||||
entryPoint = builder.addEntryPoint(executionModel, shaderEntry, glslangIntermediate->getEntryPointName().c_str());
|
||||
|
||||
@ -1351,7 +1471,7 @@ bool TGlslangToSpvTraverser::visitBinary(glslang::TVisit /* visit */, glslang::T
|
||||
|
||||
// store the result
|
||||
builder.setAccessChain(lValue);
|
||||
multiTypeStore(node->getType(), rValue);
|
||||
multiTypeStore(node->getLeft()->getType(), rValue);
|
||||
|
||||
// assignments are expressions having an rValue after they are evaluated...
|
||||
builder.clearAccessChain();
|
||||
@ -1388,7 +1508,7 @@ bool TGlslangToSpvTraverser::visitBinary(glslang::TVisit /* visit */, glslang::T
|
||||
}
|
||||
|
||||
// normal case for indexing array or structure or block
|
||||
builder.accessChainPush(builder.makeIntConstant(spvIndex));
|
||||
builder.accessChainPush(builder.makeIntConstant(spvIndex), TranslateCoherent(node->getLeft()->getType()));
|
||||
|
||||
// Add capabilities here for accessing PointSize and clip/cull distance.
|
||||
// We have deferred generation of associated capabilities until now.
|
||||
@ -1424,7 +1544,7 @@ bool TGlslangToSpvTraverser::visitBinary(glslang::TVisit /* visit */, glslang::T
|
||||
if (! node->getLeft()->getType().isArray() && node->getLeft()->getType().isVector())
|
||||
builder.accessChainPushComponent(index, convertGlslangToSpvType(node->getLeft()->getType()));
|
||||
else
|
||||
builder.accessChainPush(index);
|
||||
builder.accessChainPush(index, TranslateCoherent(node->getLeft()->getType()));
|
||||
}
|
||||
return false;
|
||||
case glslang::EOpVectorSwizzle:
|
||||
@ -1658,11 +1778,11 @@ bool TGlslangToSpvTraverser::visitAggregate(glslang::TVisit visit, glslang::TInt
|
||||
builder.setAccessChainRValue(result);
|
||||
|
||||
return false;
|
||||
} else if (node->getOp() == glslang::EOpImageStore ||
|
||||
#ifdef AMD_EXTENSIONS
|
||||
} else if (node->getOp() == glslang::EOpImageStore || node->getOp() == glslang::EOpImageStoreLod) {
|
||||
#else
|
||||
} else if (node->getOp() == glslang::EOpImageStore) {
|
||||
node->getOp() == glslang::EOpImageStoreLod ||
|
||||
#endif
|
||||
node->getOp() == glslang::EOpImageAtomicStore) {
|
||||
// "imageStore" is a special case, which has no result
|
||||
return false;
|
||||
}
|
||||
@ -1952,6 +2072,10 @@ bool TGlslangToSpvTraverser::visitAggregate(glslang::TVisit visit, glslang::TInt
|
||||
// These all have 0 operands and will naturally finish up in the code below for 0 operands
|
||||
break;
|
||||
|
||||
case glslang::EOpAtomicStore:
|
||||
noReturnValue = true;
|
||||
// fallthrough
|
||||
case glslang::EOpAtomicLoad:
|
||||
case glslang::EOpAtomicAdd:
|
||||
case glslang::EOpAtomicMin:
|
||||
case glslang::EOpAtomicMax:
|
||||
@ -2050,6 +2174,8 @@ bool TGlslangToSpvTraverser::visitAggregate(glslang::TVisit visit, glslang::TInt
|
||||
case glslang::EOpAtomicXor:
|
||||
case glslang::EOpAtomicExchange:
|
||||
case glslang::EOpAtomicCompSwap:
|
||||
case glslang::EOpAtomicLoad:
|
||||
case glslang::EOpAtomicStore:
|
||||
case glslang::EOpAtomicCounterAdd:
|
||||
case glslang::EOpAtomicCounterSubtract:
|
||||
case glslang::EOpAtomicCounterMin:
|
||||
@ -2876,7 +3002,7 @@ void TGlslangToSpvTraverser::decorateStructType(const glslang::TType& type,
|
||||
qualifier.storage == glslang::EvqBuffer) {
|
||||
// Add memory decorations only to top-level members of shader storage block
|
||||
std::vector<spv::Decoration> memory;
|
||||
TranslateMemoryDecoration(memberQualifier, memory);
|
||||
TranslateMemoryDecoration(memberQualifier, memory, glslangIntermediate->usingVulkanMemoryModel());
|
||||
for (unsigned int i = 0; i < memory.size(); ++i)
|
||||
builder.addMemberDecoration(spvType, member, memory[i]);
|
||||
}
|
||||
@ -2987,8 +3113,15 @@ spv::Id TGlslangToSpvTraverser::makeArraySizeId(const glslang::TArraySizes& arra
|
||||
spv::Id TGlslangToSpvTraverser::accessChainLoad(const glslang::TType& type)
|
||||
{
|
||||
spv::Id nominalTypeId = builder.accessChainGetInferredType();
|
||||
|
||||
spv::Builder::AccessChain::CoherentFlags coherentFlags = builder.getAccessChain().coherentFlags;
|
||||
coherentFlags |= TranslateCoherent(type);
|
||||
|
||||
spv::Id loadedId = builder.accessChainLoad(TranslatePrecisionDecoration(type),
|
||||
TranslateNonUniformDecoration(type.getQualifier()), nominalTypeId);
|
||||
TranslateNonUniformDecoration(type.getQualifier()),
|
||||
nominalTypeId,
|
||||
spv::MemoryAccessMask(TranslateMemoryAccess(coherentFlags) & ~spv::MemoryAccessMakePointerAvailableKHRMask),
|
||||
TranslateMemoryScope(coherentFlags));
|
||||
|
||||
// Need to convert to abstract types when necessary
|
||||
if (type.getBasicType() == glslang::EbtBool) {
|
||||
@ -3044,7 +3177,12 @@ void TGlslangToSpvTraverser::accessChainStore(const glslang::TType& type, spv::I
|
||||
}
|
||||
}
|
||||
|
||||
builder.accessChainStore(rvalue);
|
||||
spv::Builder::AccessChain::CoherentFlags coherentFlags = builder.getAccessChain().coherentFlags;
|
||||
coherentFlags |= TranslateCoherent(type);
|
||||
|
||||
builder.accessChainStore(rvalue,
|
||||
spv::MemoryAccessMask(TranslateMemoryAccess(coherentFlags) & ~spv::MemoryAccessMakePointerVisibleKHRMask),
|
||||
TranslateMemoryScope(coherentFlags));
|
||||
}
|
||||
|
||||
// For storing when types match at the glslang level, but not might match at the
|
||||
@ -3090,7 +3228,7 @@ void TGlslangToSpvTraverser::multiTypeStore(const glslang::TType& type, spv::Id
|
||||
// set up the target storage
|
||||
builder.clearAccessChain();
|
||||
builder.setAccessChainLValue(lValue);
|
||||
builder.accessChainPush(builder.makeIntConstant(index));
|
||||
builder.accessChainPush(builder.makeIntConstant(index), TranslateCoherent(type));
|
||||
|
||||
// store the member
|
||||
multiTypeStore(glslangElementType, elementRValue);
|
||||
@ -3110,7 +3248,7 @@ void TGlslangToSpvTraverser::multiTypeStore(const glslang::TType& type, spv::Id
|
||||
// set up the target storage
|
||||
builder.clearAccessChain();
|
||||
builder.setAccessChainLValue(lValue);
|
||||
builder.accessChainPush(builder.makeIntConstant(m));
|
||||
builder.accessChainPush(builder.makeIntConstant(m), TranslateCoherent(type));
|
||||
|
||||
// store the member
|
||||
multiTypeStore(glslangMemberType, memberRValue);
|
||||
@ -3287,11 +3425,11 @@ bool TGlslangToSpvTraverser::originalParam(glslang::TStorageQualifier qualifier,
|
||||
// Make all the functions, skeletally, without actually visiting their bodies.
|
||||
void TGlslangToSpvTraverser::makeFunctions(const glslang::TIntermSequence& glslFunctions)
|
||||
{
|
||||
const auto getParamDecorations = [](std::vector<spv::Decoration>& decorations, const glslang::TType& type) {
|
||||
const auto getParamDecorations = [](std::vector<spv::Decoration>& decorations, const glslang::TType& type, bool useVulkanMemoryModel) {
|
||||
spv::Decoration paramPrecision = TranslatePrecisionDecoration(type);
|
||||
if (paramPrecision != spv::NoPrecision)
|
||||
decorations.push_back(paramPrecision);
|
||||
TranslateMemoryDecoration(type.getQualifier(), decorations);
|
||||
TranslateMemoryDecoration(type.getQualifier(), decorations, useVulkanMemoryModel);
|
||||
};
|
||||
|
||||
for (int f = 0; f < (int)glslFunctions.size(); ++f) {
|
||||
@ -3330,7 +3468,7 @@ void TGlslangToSpvTraverser::makeFunctions(const glslang::TIntermSequence& glslF
|
||||
typeId = builder.makePointer(spv::StorageClassFunction, typeId);
|
||||
else
|
||||
rValueParameters.insert(parameters[p]->getAsSymbolNode()->getId());
|
||||
getParamDecorations(paramDecorations[p], paramType);
|
||||
getParamDecorations(paramDecorations[p], paramType, glslangIntermediate->usingVulkanMemoryModel());
|
||||
paramTypes.push_back(typeId);
|
||||
}
|
||||
|
||||
@ -3420,6 +3558,8 @@ void TGlslangToSpvTraverser::translateArguments(const glslang::TIntermAggregate&
|
||||
case glslang::EOpImageAtomicXor:
|
||||
case glslang::EOpImageAtomicExchange:
|
||||
case glslang::EOpImageAtomicCompSwap:
|
||||
case glslang::EOpImageAtomicLoad:
|
||||
case glslang::EOpImageAtomicStore:
|
||||
if (i == 0)
|
||||
lvalue = true;
|
||||
break;
|
||||
@ -3547,8 +3687,10 @@ spv::Id TGlslangToSpvTraverser::createImageTextureFunctionCall(glslang::TIntermO
|
||||
builder.setLine(node->getLoc().line);
|
||||
|
||||
// Process a GLSL texturing op (will be SPV image)
|
||||
const glslang::TSampler sampler = node->getAsAggregate() ? node->getAsAggregate()->getSequence()[0]->getAsTyped()->getType().getSampler()
|
||||
: node->getAsUnaryNode()->getOperand()->getAsTyped()->getType().getSampler();
|
||||
|
||||
const glslang::TType &imageType = node->getAsAggregate() ? node->getAsAggregate()->getSequence()[0]->getAsTyped()->getType()
|
||||
: node->getAsUnaryNode()->getOperand()->getAsTyped()->getType();
|
||||
const glslang::TSampler sampler = imageType.getSampler();
|
||||
#ifdef AMD_EXTENSIONS
|
||||
bool f16ShadowCompare = (sampler.shadow && node->getAsAggregate())
|
||||
? node->getAsAggregate()->getSequence()[1]->getAsTyped()->getType().getBasicType() == glslang::EbtFloat16
|
||||
@ -3651,22 +3793,38 @@ spv::Id TGlslangToSpvTraverser::createImageTextureFunctionCall(glslang::TIntermO
|
||||
#else
|
||||
if (node->getOp() == glslang::EOpImageLoad) {
|
||||
#endif
|
||||
spv::ImageOperandsMask mask = spv::ImageOperandsMaskNone;
|
||||
if (sampler.ms) {
|
||||
spv::IdImmediate imageOperands = { false, spv::ImageOperandsSampleMask };
|
||||
operands.push_back(imageOperands);
|
||||
spv::IdImmediate imageOperand = { true, *opIt };
|
||||
operands.push_back(imageOperand);
|
||||
mask = mask | spv::ImageOperandsSampleMask;
|
||||
}
|
||||
#ifdef AMD_EXTENSIONS
|
||||
} else if (cracked.lod) {
|
||||
if (cracked.lod) {
|
||||
builder.addExtension(spv::E_SPV_AMD_shader_image_load_store_lod);
|
||||
builder.addCapability(spv::CapabilityImageReadWriteLodAMD);
|
||||
|
||||
spv::IdImmediate imageOperands = { false, spv::ImageOperandsLodMask };
|
||||
operands.push_back(imageOperands);
|
||||
spv::IdImmediate imageOperand = { true, *opIt };
|
||||
operands.push_back(imageOperand);
|
||||
#endif
|
||||
mask = mask | spv::ImageOperandsLodMask;
|
||||
}
|
||||
#endif
|
||||
mask = mask | TranslateImageOperands(TranslateCoherent(imageType));
|
||||
mask = (spv::ImageOperandsMask)(mask & ~spv::ImageOperandsMakeTexelAvailableKHRMask);
|
||||
if (mask) {
|
||||
spv::IdImmediate imageOperands = { false, (unsigned int)mask };
|
||||
operands.push_back(imageOperands);
|
||||
}
|
||||
if (mask & spv::ImageOperandsSampleMask) {
|
||||
spv::IdImmediate imageOperand = { true, *opIt++ };
|
||||
operands.push_back(imageOperand);
|
||||
}
|
||||
#ifdef AMD_EXTENSIONS
|
||||
if (mask & spv::ImageOperandsLodMask) {
|
||||
spv::IdImmediate imageOperand = { true, *opIt++ };
|
||||
operands.push_back(imageOperand);
|
||||
}
|
||||
#endif
|
||||
if (mask & spv::ImageOperandsMakeTexelVisibleKHRMask) {
|
||||
spv::IdImmediate imageOperand = { true, builder.makeUintConstant(TranslateMemoryScope(TranslateCoherent(imageType))) };
|
||||
operands.push_back(imageOperand);
|
||||
}
|
||||
|
||||
if (builder.getImageTypeFormat(builder.getImageType(operands.front().word)) == spv::ImageFormatUnknown)
|
||||
builder.addCapability(spv::CapabilityStorageImageReadWithoutFormat);
|
||||
|
||||
@ -3683,29 +3841,52 @@ spv::Id TGlslangToSpvTraverser::createImageTextureFunctionCall(glslang::TIntermO
|
||||
#else
|
||||
} else if (node->getOp() == glslang::EOpImageStore) {
|
||||
#endif
|
||||
if (sampler.ms) {
|
||||
spv::IdImmediate texel = { true, *(opIt + 1) };
|
||||
operands.push_back(texel);
|
||||
spv::IdImmediate imageOperands = { false, spv::ImageOperandsSampleMask };
|
||||
operands.push_back(imageOperands);
|
||||
spv::IdImmediate imageOperand = { true, *opIt };
|
||||
operands.push_back(imageOperand);
|
||||
#ifdef AMD_EXTENSIONS
|
||||
} else if (cracked.lod) {
|
||||
builder.addExtension(spv::E_SPV_AMD_shader_image_load_store_lod);
|
||||
builder.addCapability(spv::CapabilityImageReadWriteLodAMD);
|
||||
|
||||
// Push the texel value before the operands
|
||||
#ifdef AMD_EXTENSIONS
|
||||
if (sampler.ms || cracked.lod) {
|
||||
#else
|
||||
if (sampler.ms) {
|
||||
#endif
|
||||
spv::IdImmediate texel = { true, *(opIt + 1) };
|
||||
operands.push_back(texel);
|
||||
spv::IdImmediate imageOperands = { false, spv::ImageOperandsLodMask };
|
||||
operands.push_back(imageOperands);
|
||||
spv::IdImmediate imageOperand = { true, *opIt };
|
||||
operands.push_back(imageOperand);
|
||||
#endif
|
||||
} else {
|
||||
spv::IdImmediate texel = { true, *opIt };
|
||||
operands.push_back(texel);
|
||||
}
|
||||
|
||||
spv::ImageOperandsMask mask = spv::ImageOperandsMaskNone;
|
||||
if (sampler.ms) {
|
||||
mask = mask | spv::ImageOperandsSampleMask;
|
||||
}
|
||||
#ifdef AMD_EXTENSIONS
|
||||
if (cracked.lod) {
|
||||
builder.addExtension(spv::E_SPV_AMD_shader_image_load_store_lod);
|
||||
builder.addCapability(spv::CapabilityImageReadWriteLodAMD);
|
||||
mask = mask | spv::ImageOperandsLodMask;
|
||||
}
|
||||
#endif
|
||||
mask = mask | TranslateImageOperands(TranslateCoherent(imageType));
|
||||
mask = (spv::ImageOperandsMask)(mask & ~spv::ImageOperandsMakeTexelVisibleKHRMask);
|
||||
if (mask) {
|
||||
spv::IdImmediate imageOperands = { false, (unsigned int)mask };
|
||||
operands.push_back(imageOperands);
|
||||
}
|
||||
if (mask & spv::ImageOperandsSampleMask) {
|
||||
spv::IdImmediate imageOperand = { true, *opIt++ };
|
||||
operands.push_back(imageOperand);
|
||||
}
|
||||
#ifdef AMD_EXTENSIONS
|
||||
if (mask & spv::ImageOperandsLodMask) {
|
||||
spv::IdImmediate imageOperand = { true, *opIt++ };
|
||||
operands.push_back(imageOperand);
|
||||
}
|
||||
#endif
|
||||
if (mask & spv::ImageOperandsMakeTexelAvailableKHRMask) {
|
||||
spv::IdImmediate imageOperand = { true, builder.makeUintConstant(TranslateMemoryScope(TranslateCoherent(imageType))) };
|
||||
operands.push_back(imageOperand);
|
||||
}
|
||||
|
||||
builder.createNoResultOp(spv::OpImageWrite, operands);
|
||||
if (builder.getImageTypeFormat(builder.getImageType(operands.front().word)) == spv::ImageFormatUnknown)
|
||||
builder.addCapability(spv::CapabilityStorageImageWriteWithoutFormat);
|
||||
@ -3719,21 +3900,37 @@ spv::Id TGlslangToSpvTraverser::createImageTextureFunctionCall(glslang::TIntermO
|
||||
if (builder.getImageTypeFormat(builder.getImageType(operands.front().word)) == spv::ImageFormatUnknown)
|
||||
builder.addCapability(spv::CapabilityStorageImageReadWithoutFormat);
|
||||
|
||||
spv::ImageOperandsMask mask = spv::ImageOperandsMaskNone;
|
||||
if (sampler.ms) {
|
||||
spv::IdImmediate imageOperands = { false, spv::ImageOperandsSampleMask };
|
||||
operands.push_back(imageOperands);
|
||||
spv::IdImmediate imageOperand = { true, *opIt++ };
|
||||
operands.push_back(imageOperand);
|
||||
mask = mask | spv::ImageOperandsSampleMask;
|
||||
}
|
||||
#ifdef AMD_EXTENSIONS
|
||||
} else if (cracked.lod) {
|
||||
if (cracked.lod) {
|
||||
builder.addExtension(spv::E_SPV_AMD_shader_image_load_store_lod);
|
||||
builder.addCapability(spv::CapabilityImageReadWriteLodAMD);
|
||||
|
||||
spv::IdImmediate imageOperands = { false, spv::ImageOperandsLodMask };
|
||||
mask = mask | spv::ImageOperandsLodMask;
|
||||
}
|
||||
#endif
|
||||
mask = mask | TranslateImageOperands(TranslateCoherent(imageType));
|
||||
mask = (spv::ImageOperandsMask)(mask & ~spv::ImageOperandsMakeTexelAvailableKHRMask);
|
||||
if (mask) {
|
||||
spv::IdImmediate imageOperands = { false, (unsigned int)mask };
|
||||
operands.push_back(imageOperands);
|
||||
}
|
||||
if (mask & spv::ImageOperandsSampleMask) {
|
||||
spv::IdImmediate imageOperand = { true, *opIt++ };
|
||||
operands.push_back(imageOperand);
|
||||
}
|
||||
#ifdef AMD_EXTENSIONS
|
||||
if (mask & spv::ImageOperandsLodMask) {
|
||||
spv::IdImmediate imageOperand = { true, *opIt++ };
|
||||
operands.push_back(imageOperand);
|
||||
}
|
||||
#endif
|
||||
if (mask & spv::ImageOperandsMakeTexelVisibleKHRMask) {
|
||||
spv::IdImmediate imageOperand = { true, builder.makeUintConstant(TranslateMemoryScope(TranslateCoherent(imageType))) };
|
||||
operands.push_back(imageOperand);
|
||||
}
|
||||
|
||||
// Create the return type that was a special structure
|
||||
@ -3756,7 +3953,14 @@ spv::Id TGlslangToSpvTraverser::createImageTextureFunctionCall(glslang::TIntermO
|
||||
spv::IdImmediate sample = { true, sampler.ms ? *(opIt++) : builder.makeUintConstant(0) };
|
||||
operands.push_back(sample);
|
||||
|
||||
spv::Id resultTypeId = builder.makePointer(spv::StorageClassImage, resultType());
|
||||
spv::Id resultTypeId;
|
||||
// imageAtomicStore has a void return type so base the pointer type on
|
||||
// the type of the value operand.
|
||||
if (node->getOp() == glslang::EOpImageAtomicStore) {
|
||||
resultTypeId = builder.makePointer(spv::StorageClassImage, builder.getTypeId(operands[2].word));
|
||||
} else {
|
||||
resultTypeId = builder.makePointer(spv::StorageClassImage, resultType());
|
||||
}
|
||||
spv::Id pointer = builder.createOp(spv::OpImageTexelPointer, resultTypeId, operands);
|
||||
|
||||
std::vector<spv::Id> operands;
|
||||
@ -3972,6 +4176,16 @@ spv::Id TGlslangToSpvTraverser::createImageTextureFunctionCall(glslang::TIntermO
|
||||
}
|
||||
}
|
||||
|
||||
// nonprivate
|
||||
if (imageType.getQualifier().nonprivate) {
|
||||
params.nonprivate = true;
|
||||
}
|
||||
|
||||
// volatile
|
||||
if (imageType.getQualifier().volatil) {
|
||||
params.volatil = true;
|
||||
}
|
||||
|
||||
std::vector<spv::Id> result( 1,
|
||||
builder.createTextureCall(precision, resultType(), sparse, cracked.fetch, cracked.proj, cracked.gather, noImplicitLod, params)
|
||||
);
|
||||
@ -5340,8 +5554,14 @@ spv::Id TGlslangToSpvTraverser::createAtomicOperation(glslang::TOperator op, spv
|
||||
opCode = spv::OpAtomicIDecrement;
|
||||
break;
|
||||
case glslang::EOpAtomicCounter:
|
||||
case glslang::EOpImageAtomicLoad:
|
||||
case glslang::EOpAtomicLoad:
|
||||
opCode = spv::OpAtomicLoad;
|
||||
break;
|
||||
case glslang::EOpAtomicStore:
|
||||
case glslang::EOpImageAtomicStore:
|
||||
opCode = spv::OpAtomicStore;
|
||||
break;
|
||||
default:
|
||||
assert(0);
|
||||
break;
|
||||
@ -5352,36 +5572,82 @@ spv::Id TGlslangToSpvTraverser::createAtomicOperation(glslang::TOperator op, spv
|
||||
|
||||
// Sort out the operands
|
||||
// - mapping from glslang -> SPV
|
||||
// - there are extra SPV operands with no glslang source
|
||||
// - there are extra SPV operands that are optional in glslang
|
||||
// - compare-exchange swaps the value and comparator
|
||||
// - compare-exchange has an extra memory semantics
|
||||
// - EOpAtomicCounterDecrement needs a post decrement
|
||||
std::vector<spv::Id> spvAtomicOperands; // hold the spv operands
|
||||
auto opIt = operands.begin(); // walk the glslang operands
|
||||
spvAtomicOperands.push_back(*(opIt++));
|
||||
spvAtomicOperands.push_back(builder.makeUintConstant(spv::ScopeDevice)); // TBD: what is the correct scope?
|
||||
spvAtomicOperands.push_back(builder.makeUintConstant(spv::MemorySemanticsMaskNone)); // TBD: what are the correct memory semantics?
|
||||
if (opCode == spv::OpAtomicCompareExchange) {
|
||||
// There are 2 memory semantics for compare-exchange. And the operand order of "comparator" and "new value" in GLSL
|
||||
// differs from that in SPIR-V. Hence, special processing is required.
|
||||
spvAtomicOperands.push_back(builder.makeUintConstant(spv::MemorySemanticsMaskNone));
|
||||
spvAtomicOperands.push_back(*(opIt + 1));
|
||||
spvAtomicOperands.push_back(*opIt);
|
||||
opIt += 2;
|
||||
spv::Id pointerId = 0, compareId = 0, valueId = 0;
|
||||
// scope defaults to Device in the old model, QueueFamilyKHR in the new model
|
||||
spv::Id scopeId;
|
||||
if (glslangIntermediate->usingVulkanMemoryModel()) {
|
||||
scopeId = builder.makeUintConstant(spv::ScopeQueueFamilyKHR);
|
||||
} else {
|
||||
scopeId = builder.makeUintConstant(spv::ScopeDevice);
|
||||
}
|
||||
// semantics default to relaxed
|
||||
spv::Id semanticsId = builder.makeUintConstant(spv::MemorySemanticsMaskNone);
|
||||
spv::Id semanticsId2 = semanticsId;
|
||||
|
||||
pointerId = operands[0];
|
||||
if (opCode == spv::OpAtomicIIncrement || opCode == spv::OpAtomicIDecrement) {
|
||||
// no additional operands
|
||||
} else if (opCode == spv::OpAtomicCompareExchange) {
|
||||
compareId = operands[1];
|
||||
valueId = operands[2];
|
||||
if (operands.size() > 3) {
|
||||
scopeId = operands[3];
|
||||
semanticsId = builder.makeUintConstant(builder.getConstantScalar(operands[4]) | builder.getConstantScalar(operands[5]));
|
||||
semanticsId2 = builder.makeUintConstant(builder.getConstantScalar(operands[6]) | builder.getConstantScalar(operands[7]));
|
||||
}
|
||||
} else if (opCode == spv::OpAtomicLoad) {
|
||||
if (operands.size() > 1) {
|
||||
scopeId = operands[1];
|
||||
semanticsId = builder.makeUintConstant(builder.getConstantScalar(operands[2]) | builder.getConstantScalar(operands[3]));
|
||||
}
|
||||
} else {
|
||||
// atomic store or RMW
|
||||
valueId = operands[1];
|
||||
if (operands.size() > 2) {
|
||||
scopeId = operands[2];
|
||||
semanticsId = builder.makeUintConstant(builder.getConstantScalar(operands[3]) | builder.getConstantScalar(operands[4]));
|
||||
}
|
||||
}
|
||||
|
||||
// Add the rest of the operands, skipping any that were dealt with above.
|
||||
for (; opIt != operands.end(); ++opIt)
|
||||
spvAtomicOperands.push_back(*opIt);
|
||||
// Check for capabilities
|
||||
unsigned semanticsImmediate = builder.getConstantScalar(semanticsId) | builder.getConstantScalar(semanticsId2);
|
||||
if (semanticsImmediate & (spv::MemorySemanticsMakeAvailableKHRMask | spv::MemorySemanticsMakeVisibleKHRMask | spv::MemorySemanticsOutputMemoryKHRMask)) {
|
||||
builder.addCapability(spv::CapabilityVulkanMemoryModelKHR);
|
||||
}
|
||||
|
||||
spv::Id resultId = builder.createOp(opCode, typeId, spvAtomicOperands);
|
||||
if (glslangIntermediate->usingVulkanMemoryModel() && builder.getConstantScalar(scopeId) == spv::ScopeDevice) {
|
||||
builder.addCapability(spv::CapabilityVulkanMemoryModelDeviceScopeKHR);
|
||||
}
|
||||
|
||||
// GLSL and HLSL atomic-counter decrement return post-decrement value,
|
||||
// while SPIR-V returns pre-decrement value. Translate between these semantics.
|
||||
if (op == glslang::EOpAtomicCounterDecrement)
|
||||
resultId = builder.createBinOp(spv::OpISub, typeId, resultId, builder.makeIntConstant(1));
|
||||
std::vector<spv::Id> spvAtomicOperands; // hold the spv operands
|
||||
spvAtomicOperands.push_back(pointerId);
|
||||
spvAtomicOperands.push_back(scopeId);
|
||||
spvAtomicOperands.push_back(semanticsId);
|
||||
if (opCode == spv::OpAtomicCompareExchange) {
|
||||
spvAtomicOperands.push_back(semanticsId2);
|
||||
spvAtomicOperands.push_back(valueId);
|
||||
spvAtomicOperands.push_back(compareId);
|
||||
} else if (opCode != spv::OpAtomicLoad && opCode != spv::OpAtomicIIncrement && opCode != spv::OpAtomicIDecrement) {
|
||||
spvAtomicOperands.push_back(valueId);
|
||||
}
|
||||
|
||||
return resultId;
|
||||
if (opCode == spv::OpAtomicStore) {
|
||||
builder.createNoResultOp(opCode, spvAtomicOperands);
|
||||
return 0;
|
||||
} else {
|
||||
spv::Id resultId = builder.createOp(opCode, typeId, spvAtomicOperands);
|
||||
|
||||
// GLSL and HLSL atomic-counter decrement return post-decrement value,
|
||||
// while SPIR-V returns pre-decrement value. Translate between these semantics.
|
||||
if (op == glslang::EOpAtomicCounterDecrement)
|
||||
resultId = builder.createBinOp(spv::OpISub, typeId, resultId, builder.makeIntConstant(1));
|
||||
|
||||
return resultId;
|
||||
}
|
||||
}
|
||||
|
||||
// Create group invocation operations.
|
||||
@ -6282,7 +6548,41 @@ spv::Id TGlslangToSpvTraverser::createMiscOperation(glslang::TOperator op, spv::
|
||||
libCall = spv::InterpolateAtVertexAMD;
|
||||
break;
|
||||
#endif
|
||||
|
||||
case glslang::EOpBarrier:
|
||||
{
|
||||
// This is for the extended controlBarrier function, with four operands.
|
||||
// The unextended barrier() goes through createNoArgOperation.
|
||||
assert(operands.size() == 4);
|
||||
unsigned int executionScope = builder.getConstantScalar(operands[0]);
|
||||
unsigned int memoryScope = builder.getConstantScalar(operands[1]);
|
||||
unsigned int semantics = builder.getConstantScalar(operands[2]) | builder.getConstantScalar(operands[3]);
|
||||
builder.createControlBarrier((spv::Scope)executionScope, (spv::Scope)memoryScope, (spv::MemorySemanticsMask)semantics);
|
||||
if (semantics & (spv::MemorySemanticsMakeAvailableKHRMask | spv::MemorySemanticsMakeVisibleKHRMask | spv::MemorySemanticsOutputMemoryKHRMask)) {
|
||||
builder.addCapability(spv::CapabilityVulkanMemoryModelKHR);
|
||||
}
|
||||
if (glslangIntermediate->usingVulkanMemoryModel() && (executionScope == spv::ScopeDevice || memoryScope == spv::ScopeDevice)) {
|
||||
builder.addCapability(spv::CapabilityVulkanMemoryModelDeviceScopeKHR);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
break;
|
||||
case glslang::EOpMemoryBarrier:
|
||||
{
|
||||
// This is for the extended memoryBarrier function, with three operands.
|
||||
// The unextended memoryBarrier() goes through createNoArgOperation.
|
||||
assert(operands.size() == 3);
|
||||
unsigned int memoryScope = builder.getConstantScalar(operands[0]);
|
||||
unsigned int semantics = builder.getConstantScalar(operands[1]) | builder.getConstantScalar(operands[2]);
|
||||
builder.createMemoryBarrier((spv::Scope)memoryScope, (spv::MemorySemanticsMask)semantics);
|
||||
if (semantics & (spv::MemorySemanticsMakeAvailableKHRMask | spv::MemorySemanticsMakeVisibleKHRMask | spv::MemorySemanticsOutputMemoryKHRMask)) {
|
||||
builder.addCapability(spv::CapabilityVulkanMemoryModelKHR);
|
||||
}
|
||||
if (glslangIntermediate->usingVulkanMemoryModel() && memoryScope == spv::ScopeDevice) {
|
||||
builder.addCapability(spv::CapabilityVulkanMemoryModelDeviceScopeKHR);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
return 0;
|
||||
}
|
||||
@ -6351,7 +6651,8 @@ spv::Id TGlslangToSpvTraverser::createMiscOperation(glslang::TOperator op, spv::
|
||||
// Intrinsics with no arguments (or no return value, and no precision).
|
||||
spv::Id TGlslangToSpvTraverser::createNoArgOperation(glslang::TOperator op, spv::Decoration precision, spv::Id typeId)
|
||||
{
|
||||
// TODO: get the barrier operands correct
|
||||
// GLSL memory barriers use queuefamily scope in new model, device scope in old model
|
||||
spv::Scope memoryBarrierScope = glslangIntermediate->usingVulkanMemoryModel() ? spv::ScopeQueueFamilyKHR : spv::ScopeDevice;
|
||||
|
||||
switch (op) {
|
||||
case glslang::EOpEmitVertex:
|
||||
@ -6362,11 +6663,14 @@ spv::Id TGlslangToSpvTraverser::createNoArgOperation(glslang::TOperator op, spv:
|
||||
return 0;
|
||||
case glslang::EOpBarrier:
|
||||
if (glslangIntermediate->getStage() == EShLangTessControl) {
|
||||
builder.createControlBarrier(spv::ScopeWorkgroup, spv::ScopeInvocation, spv::MemorySemanticsMaskNone);
|
||||
// TODO: prefer the following, when available:
|
||||
// builder.createControlBarrier(spv::ScopePatch, spv::ScopePatch,
|
||||
// spv::MemorySemanticsPatchMask |
|
||||
// spv::MemorySemanticsAcquireReleaseMask);
|
||||
if (glslangIntermediate->usingVulkanMemoryModel()) {
|
||||
builder.createControlBarrier(spv::ScopeWorkgroup, spv::ScopeWorkgroup,
|
||||
spv::MemorySemanticsOutputMemoryKHRMask |
|
||||
spv::MemorySemanticsAcquireReleaseMask);
|
||||
builder.addCapability(spv::CapabilityVulkanMemoryModelKHR);
|
||||
} else {
|
||||
builder.createControlBarrier(spv::ScopeWorkgroup, spv::ScopeInvocation, spv::MemorySemanticsMaskNone);
|
||||
}
|
||||
} else {
|
||||
builder.createControlBarrier(spv::ScopeWorkgroup, spv::ScopeWorkgroup,
|
||||
spv::MemorySemanticsWorkgroupMemoryMask |
|
||||
@ -6374,24 +6678,24 @@ spv::Id TGlslangToSpvTraverser::createNoArgOperation(glslang::TOperator op, spv:
|
||||
}
|
||||
return 0;
|
||||
case glslang::EOpMemoryBarrier:
|
||||
builder.createMemoryBarrier(spv::ScopeDevice, spv::MemorySemanticsAllMemory |
|
||||
spv::MemorySemanticsAcquireReleaseMask);
|
||||
builder.createMemoryBarrier(memoryBarrierScope, spv::MemorySemanticsAllMemory |
|
||||
spv::MemorySemanticsAcquireReleaseMask);
|
||||
return 0;
|
||||
case glslang::EOpMemoryBarrierAtomicCounter:
|
||||
builder.createMemoryBarrier(spv::ScopeDevice, spv::MemorySemanticsAtomicCounterMemoryMask |
|
||||
spv::MemorySemanticsAcquireReleaseMask);
|
||||
builder.createMemoryBarrier(memoryBarrierScope, spv::MemorySemanticsAtomicCounterMemoryMask |
|
||||
spv::MemorySemanticsAcquireReleaseMask);
|
||||
return 0;
|
||||
case glslang::EOpMemoryBarrierBuffer:
|
||||
builder.createMemoryBarrier(spv::ScopeDevice, spv::MemorySemanticsUniformMemoryMask |
|
||||
spv::MemorySemanticsAcquireReleaseMask);
|
||||
builder.createMemoryBarrier(memoryBarrierScope, spv::MemorySemanticsUniformMemoryMask |
|
||||
spv::MemorySemanticsAcquireReleaseMask);
|
||||
return 0;
|
||||
case glslang::EOpMemoryBarrierImage:
|
||||
builder.createMemoryBarrier(spv::ScopeDevice, spv::MemorySemanticsImageMemoryMask |
|
||||
spv::MemorySemanticsAcquireReleaseMask);
|
||||
builder.createMemoryBarrier(memoryBarrierScope, spv::MemorySemanticsImageMemoryMask |
|
||||
spv::MemorySemanticsAcquireReleaseMask);
|
||||
return 0;
|
||||
case glslang::EOpMemoryBarrierShared:
|
||||
builder.createMemoryBarrier(spv::ScopeDevice, spv::MemorySemanticsWorkgroupMemoryMask |
|
||||
spv::MemorySemanticsAcquireReleaseMask);
|
||||
builder.createMemoryBarrier(memoryBarrierScope, spv::MemorySemanticsWorkgroupMemoryMask |
|
||||
spv::MemorySemanticsAcquireReleaseMask);
|
||||
return 0;
|
||||
case glslang::EOpGroupMemoryBarrier:
|
||||
builder.createMemoryBarrier(spv::ScopeWorkgroup, spv::MemorySemanticsAllMemory |
|
||||
@ -6520,7 +6824,7 @@ spv::Id TGlslangToSpvTraverser::getSymbolId(const glslang::TIntermSymbol* symbol
|
||||
|
||||
if (symbol->getType().isImage()) {
|
||||
std::vector<spv::Decoration> memory;
|
||||
TranslateMemoryDecoration(symbol->getType().getQualifier(), memory);
|
||||
TranslateMemoryDecoration(symbol->getType().getQualifier(), memory, glslangIntermediate->usingVulkanMemoryModel());
|
||||
for (unsigned int i = 0; i < memory.size(); ++i)
|
||||
builder.addDecoration(id, memory[i]);
|
||||
}
|
||||
|
@ -1216,19 +1216,35 @@ Id Builder::createUndefined(Id type)
|
||||
}
|
||||
|
||||
// Comments in header
|
||||
void Builder::createStore(Id rValue, Id lValue)
|
||||
void Builder::createStore(Id rValue, Id lValue, spv::MemoryAccessMask memoryAccess, spv::Scope scope)
|
||||
{
|
||||
Instruction* store = new Instruction(OpStore);
|
||||
store->addIdOperand(lValue);
|
||||
store->addIdOperand(rValue);
|
||||
|
||||
if (memoryAccess != MemoryAccessMaskNone) {
|
||||
store->addImmediateOperand(memoryAccess);
|
||||
if (memoryAccess & spv::MemoryAccessMakePointerAvailableKHRMask) {
|
||||
store->addIdOperand(makeUintConstant(scope));
|
||||
}
|
||||
}
|
||||
|
||||
buildPoint->addInstruction(std::unique_ptr<Instruction>(store));
|
||||
}
|
||||
|
||||
// Comments in header
|
||||
Id Builder::createLoad(Id lValue)
|
||||
Id Builder::createLoad(Id lValue, spv::MemoryAccessMask memoryAccess, spv::Scope scope)
|
||||
{
|
||||
Instruction* load = new Instruction(getUniqueId(), getDerefTypeId(lValue), OpLoad);
|
||||
load->addIdOperand(lValue);
|
||||
|
||||
if (memoryAccess != MemoryAccessMaskNone) {
|
||||
load->addImmediateOperand(memoryAccess);
|
||||
if (memoryAccess & spv::MemoryAccessMakePointerVisibleKHRMask) {
|
||||
load->addIdOperand(makeUintConstant(scope));
|
||||
}
|
||||
}
|
||||
|
||||
buildPoint->addInstruction(std::unique_ptr<Instruction>(load));
|
||||
|
||||
return load->getResultId();
|
||||
@ -1361,6 +1377,16 @@ void Builder::createNoResultOp(Op opCode, Id operand)
|
||||
buildPoint->addInstruction(std::unique_ptr<Instruction>(op));
|
||||
}
|
||||
|
||||
// An opcode that has multiple operands, no result id, and no type
|
||||
void Builder::createNoResultOp(Op opCode, const std::vector<Id>& operands)
|
||||
{
|
||||
Instruction* op = new Instruction(opCode);
|
||||
for (auto it = operands.cbegin(); it != operands.cend(); ++it) {
|
||||
op->addIdOperand(*it);
|
||||
}
|
||||
buildPoint->addInstruction(std::unique_ptr<Instruction>(op));
|
||||
}
|
||||
|
||||
// An opcode that has multiple operands, no result id, and no type
|
||||
void Builder::createNoResultOp(Op opCode, const std::vector<IdImmediate>& operands)
|
||||
{
|
||||
@ -1679,6 +1705,12 @@ Id Builder::createTextureCall(Decoration precision, Id resultType, bool sparse,
|
||||
mask = (ImageOperandsMask)(mask | ImageOperandsMinLodMask);
|
||||
texArgs[numArgs++] = parameters.lodClamp;
|
||||
}
|
||||
if (parameters.nonprivate) {
|
||||
mask = mask | ImageOperandsNonPrivateTexelKHRMask;
|
||||
}
|
||||
if (parameters.volatil) {
|
||||
mask = mask | ImageOperandsVolatileTexelKHRMask;
|
||||
}
|
||||
if (mask == ImageOperandsMaskNone)
|
||||
--numArgs; // undo speculative reservation for the mask argument
|
||||
else
|
||||
@ -2352,6 +2384,7 @@ void Builder::clearAccessChain()
|
||||
accessChain.component = NoResult;
|
||||
accessChain.preSwizzleBaseType = NoType;
|
||||
accessChain.isRValue = false;
|
||||
accessChain.coherentFlags.clear();
|
||||
}
|
||||
|
||||
// Comments in header
|
||||
@ -2378,7 +2411,7 @@ void Builder::accessChainPushSwizzle(std::vector<unsigned>& swizzle, Id preSwizz
|
||||
}
|
||||
|
||||
// Comments in header
|
||||
void Builder::accessChainStore(Id rvalue)
|
||||
void Builder::accessChainStore(Id rvalue, spv::MemoryAccessMask memoryAccess, spv::Scope scope)
|
||||
{
|
||||
assert(accessChain.isRValue == false);
|
||||
|
||||
@ -2396,11 +2429,11 @@ void Builder::accessChainStore(Id rvalue)
|
||||
source = createLvalueSwizzle(getTypeId(tempBaseId), tempBaseId, source, accessChain.swizzle);
|
||||
}
|
||||
|
||||
createStore(source, base);
|
||||
createStore(source, base, memoryAccess, scope);
|
||||
}
|
||||
|
||||
// Comments in header
|
||||
Id Builder::accessChainLoad(Decoration precision, Decoration nonUniform, Id resultType)
|
||||
Id Builder::accessChainLoad(Decoration precision, Decoration nonUniform, Id resultType, spv::MemoryAccessMask memoryAccess, spv::Scope scope)
|
||||
{
|
||||
Id id;
|
||||
|
||||
@ -2444,7 +2477,7 @@ Id Builder::accessChainLoad(Decoration precision, Decoration nonUniform, Id resu
|
||||
} else {
|
||||
transferAccessChainSwizzle(true);
|
||||
// load through the access chain
|
||||
id = createLoad(collapseAccessChain());
|
||||
id = createLoad(collapseAccessChain(), memoryAccess, scope);
|
||||
setPrecision(id, precision);
|
||||
addDecoration(id, nonUniform);
|
||||
}
|
||||
|
@ -274,10 +274,10 @@ public:
|
||||
Id createUndefined(Id type);
|
||||
|
||||
// Store into an Id and return the l-value
|
||||
void createStore(Id rValue, Id lValue);
|
||||
void createStore(Id rValue, Id lValue, spv::MemoryAccessMask memoryAccess = spv::MemoryAccessMaskNone, spv::Scope scope = spv::ScopeMax);
|
||||
|
||||
// Load from an Id and return it
|
||||
Id createLoad(Id lValue);
|
||||
Id createLoad(Id lValue, spv::MemoryAccessMask memoryAccess = spv::MemoryAccessMaskNone, spv::Scope scope = spv::ScopeMax);
|
||||
|
||||
// Create an OpAccessChain instruction
|
||||
Id createAccessChain(StorageClass, Id base, const std::vector<Id>& offsets);
|
||||
@ -296,6 +296,7 @@ public:
|
||||
|
||||
void createNoResultOp(Op);
|
||||
void createNoResultOp(Op, Id operand);
|
||||
void createNoResultOp(Op, const std::vector<Id>& operands);
|
||||
void createNoResultOp(Op, const std::vector<IdImmediate>& operands);
|
||||
void createControlBarrier(Scope execution, Scope memory, MemorySemanticsMask);
|
||||
void createMemoryBarrier(unsigned executionScope, unsigned memorySemantics);
|
||||
@ -365,6 +366,8 @@ public:
|
||||
Id component;
|
||||
Id texelOut;
|
||||
Id lodClamp;
|
||||
bool nonprivate;
|
||||
bool volatil;
|
||||
};
|
||||
|
||||
// Select the correct texture operation based on all inputs, and emit the correct instruction
|
||||
@ -504,6 +507,43 @@ public:
|
||||
Id component; // a dynamic component index, can coexist with a swizzle, done after the swizzle, NoResult if not present
|
||||
Id preSwizzleBaseType; // dereferenced type, before swizzle or component is applied; NoType unless a swizzle or component is present
|
||||
bool isRValue; // true if 'base' is an r-value, otherwise, base is an l-value
|
||||
|
||||
// Accumulate whether anything in the chain of structures has coherent decorations.
|
||||
struct CoherentFlags {
|
||||
unsigned coherent : 1;
|
||||
unsigned devicecoherent : 1;
|
||||
unsigned queuefamilycoherent : 1;
|
||||
unsigned workgroupcoherent : 1;
|
||||
unsigned subgroupcoherent : 1;
|
||||
unsigned nonprivate : 1;
|
||||
unsigned volatil : 1;
|
||||
unsigned isImage : 1;
|
||||
|
||||
void clear() {
|
||||
coherent = 0;
|
||||
devicecoherent = 0;
|
||||
queuefamilycoherent = 0;
|
||||
workgroupcoherent = 0;
|
||||
subgroupcoherent = 0;
|
||||
nonprivate = 0;
|
||||
volatil = 0;
|
||||
isImage = 0;
|
||||
}
|
||||
|
||||
CoherentFlags() { clear(); }
|
||||
CoherentFlags operator |=(const CoherentFlags &other) {
|
||||
coherent |= other.coherent;
|
||||
devicecoherent |= other.devicecoherent;
|
||||
queuefamilycoherent |= other.queuefamilycoherent;
|
||||
workgroupcoherent |= other.workgroupcoherent;
|
||||
subgroupcoherent |= other.subgroupcoherent;
|
||||
nonprivate |= other.nonprivate;
|
||||
volatil |= other.volatil;
|
||||
isImage |= other.isImage;
|
||||
return *this;
|
||||
}
|
||||
};
|
||||
CoherentFlags coherentFlags;
|
||||
};
|
||||
|
||||
//
|
||||
@ -533,9 +573,10 @@ public:
|
||||
}
|
||||
|
||||
// push offset onto the end of the chain
|
||||
void accessChainPush(Id offset)
|
||||
void accessChainPush(Id offset, AccessChain::CoherentFlags coherentFlags)
|
||||
{
|
||||
accessChain.indexChain.push_back(offset);
|
||||
accessChain.coherentFlags |= coherentFlags;
|
||||
}
|
||||
|
||||
// push new swizzle onto the end of any existing swizzle, merging into a single swizzle
|
||||
@ -553,10 +594,10 @@ public:
|
||||
}
|
||||
|
||||
// use accessChain and swizzle to store value
|
||||
void accessChainStore(Id rvalue);
|
||||
void accessChainStore(Id rvalue, spv::MemoryAccessMask memoryAccess = spv::MemoryAccessMaskNone, spv::Scope scope = spv::ScopeMax);
|
||||
|
||||
// use accessChain and swizzle to load an r-value
|
||||
Id accessChainLoad(Decoration precision, Decoration nonUniform, Id ResultType);
|
||||
Id accessChainLoad(Decoration precision, Decoration nonUniform, Id ResultType, spv::MemoryAccessMask memoryAccess = spv::MemoryAccessMaskNone, spv::Scope scope = spv::ScopeMax);
|
||||
|
||||
// get the direct pointer for an l-value
|
||||
Id accessChainGetLValue();
|
||||
|
@ -535,6 +535,11 @@ void SpirvStream::disassembleInstruction(Id resultId, Id /*typeId*/, Op opCode,
|
||||
case OperandLiteralString:
|
||||
numOperands -= disassembleString();
|
||||
break;
|
||||
case OperandMemoryAccess:
|
||||
outputMask(OperandMemoryAccess, stream[word++]);
|
||||
--numOperands;
|
||||
disassembleIds(numOperands);
|
||||
return;
|
||||
default:
|
||||
assert(operandClass >= OperandSource && operandClass < OperandOpcode);
|
||||
|
||||
|
@ -117,9 +117,10 @@ const char* AddressingString(int addr)
|
||||
const char* MemoryString(int mem)
|
||||
{
|
||||
switch (mem) {
|
||||
case 0: return "Simple";
|
||||
case 1: return "GLSL450";
|
||||
case 2: return "OpenCL";
|
||||
case MemoryModelSimple: return "Simple";
|
||||
case MemoryModelGLSL450: return "GLSL450";
|
||||
case MemoryModelOpenCL: return "OpenCL";
|
||||
case MemoryModelVulkanKHR: return "VulkanKHR";
|
||||
|
||||
default: return "Bad";
|
||||
}
|
||||
@ -499,19 +500,23 @@ const char* ImageChannelDataTypeString(int type)
|
||||
}
|
||||
}
|
||||
|
||||
const int ImageOperandsCeiling = 8;
|
||||
const int ImageOperandsCeiling = 12;
|
||||
|
||||
const char* ImageOperandsString(int format)
|
||||
{
|
||||
switch (format) {
|
||||
case 0: return "Bias";
|
||||
case 1: return "Lod";
|
||||
case 2: return "Grad";
|
||||
case 3: return "ConstOffset";
|
||||
case 4: return "Offset";
|
||||
case 5: return "ConstOffsets";
|
||||
case 6: return "Sample";
|
||||
case 7: return "MinLod";
|
||||
case ImageOperandsBiasShift: return "Bias";
|
||||
case ImageOperandsLodShift: return "Lod";
|
||||
case ImageOperandsGradShift: return "Grad";
|
||||
case ImageOperandsConstOffsetShift: return "ConstOffset";
|
||||
case ImageOperandsOffsetShift: return "Offset";
|
||||
case ImageOperandsConstOffsetsShift: return "ConstOffsets";
|
||||
case ImageOperandsSampleShift: return "Sample";
|
||||
case ImageOperandsMinLodShift: return "MinLod";
|
||||
case ImageOperandsMakeTexelAvailableKHRShift: return "MakeTexelAvailableKHR";
|
||||
case ImageOperandsMakeTexelVisibleKHRShift: return "MakeTexelVisibleKHR";
|
||||
case ImageOperandsNonPrivateTexelKHRShift: return "NonPrivateTexelKHR";
|
||||
case ImageOperandsVolatileTexelKHRShift: return "VolatileTexelKHR";
|
||||
|
||||
case ImageOperandsCeiling:
|
||||
default:
|
||||
@ -645,12 +650,17 @@ const char* MemorySemanticsString(int mem)
|
||||
}
|
||||
}
|
||||
|
||||
const int MemoryAccessCeiling = 6;
|
||||
|
||||
const char* MemoryAccessString(int mem)
|
||||
{
|
||||
switch (mem) {
|
||||
case 0: return "Volatile";
|
||||
case 1: return "Aligned";
|
||||
case 2: return "Nontemporal";
|
||||
case MemoryAccessVolatileShift: return "Volatile";
|
||||
case MemoryAccessAlignedShift: return "Aligned";
|
||||
case MemoryAccessNontemporalShift: return "Nontemporal";
|
||||
case MemoryAccessMakePointerAvailableKHRShift: return "MakePointerAvailableKHR";
|
||||
case MemoryAccessMakePointerVisibleKHRShift: return "MakePointerVisibleKHR";
|
||||
case MemoryAccessNonPrivatePointerKHRShift: return "NonPrivatePointerKHR";
|
||||
|
||||
default: return "Bad";
|
||||
}
|
||||
@ -833,6 +843,9 @@ const char* CapabilityString(int info)
|
||||
case CapabilityUniformTexelBufferArrayNonUniformIndexingEXT: return "CapabilityUniformTexelBufferArrayNonUniformIndexingEXT";
|
||||
case CapabilityStorageTexelBufferArrayNonUniformIndexingEXT: return "CapabilityStorageTexelBufferArrayNonUniformIndexingEXT";
|
||||
|
||||
case CapabilityVulkanMemoryModelKHR: return "CapabilityVulkanMemoryModelKHR";
|
||||
case CapabilityVulkanMemoryModelDeviceScopeKHR: return "CapabilityVulkanMemoryModelDeviceScopeKHR";
|
||||
|
||||
default: return "Bad";
|
||||
}
|
||||
}
|
||||
@ -1245,6 +1258,7 @@ EnumParameters DecorationParams[DecorationCeiling];
|
||||
EnumParameters LoopControlParams[FunctionControlCeiling];
|
||||
EnumParameters SelectionControlParams[SelectControlCeiling];
|
||||
EnumParameters FunctionControlParams[FunctionControlCeiling];
|
||||
EnumParameters MemoryAccessParams[MemoryAccessCeiling];
|
||||
|
||||
// Set up all the parameterizing descriptions of the opcodes, operands, etc.
|
||||
void Parameterize()
|
||||
@ -1400,7 +1414,7 @@ void Parameterize()
|
||||
OperandClassParams[OperandLoop].set(LoopControlCeiling, LoopControlString, LoopControlParams, true);
|
||||
OperandClassParams[OperandFunction].set(FunctionControlCeiling, FunctionControlString, FunctionControlParams, true);
|
||||
OperandClassParams[OperandMemorySemantics].set(0, MemorySemanticsString, nullptr, true);
|
||||
OperandClassParams[OperandMemoryAccess].set(0, MemoryAccessString, nullptr, true);
|
||||
OperandClassParams[OperandMemoryAccess].set(MemoryAccessCeiling, MemoryAccessString, MemoryAccessParams, true);
|
||||
OperandClassParams[OperandScope].set(0, ScopeString, nullptr);
|
||||
OperandClassParams[OperandGroupOperation].set(0, GroupOperationString, nullptr);
|
||||
OperandClassParams[OperandKernelEnqueueFlags].set(0, KernelEnqueueFlagsString, nullptr);
|
||||
@ -1522,10 +1536,14 @@ void Parameterize()
|
||||
|
||||
InstructionDesc[OpLoad].operands.push(OperandId, "'Pointer'");
|
||||
InstructionDesc[OpLoad].operands.push(OperandMemoryAccess, "", true);
|
||||
InstructionDesc[OpLoad].operands.push(OperandLiteralNumber, "", true);
|
||||
InstructionDesc[OpLoad].operands.push(OperandId, "", true);
|
||||
|
||||
InstructionDesc[OpStore].operands.push(OperandId, "'Pointer'");
|
||||
InstructionDesc[OpStore].operands.push(OperandId, "'Object'");
|
||||
InstructionDesc[OpStore].operands.push(OperandMemoryAccess, "", true);
|
||||
InstructionDesc[OpStore].operands.push(OperandLiteralNumber, "", true);
|
||||
InstructionDesc[OpStore].operands.push(OperandId, "", true);
|
||||
|
||||
InstructionDesc[OpPhi].operands.push(OperandVariableIds, "'Variable, Parent, ...'");
|
||||
|
||||
|
@ -87,6 +87,7 @@ enum MemoryModel {
|
||||
MemoryModelSimple = 0,
|
||||
MemoryModelGLSL450 = 1,
|
||||
MemoryModelOpenCL = 2,
|
||||
MemoryModelVulkanKHR = 3,
|
||||
MemoryModelMax = 0x7fffffff,
|
||||
};
|
||||
|
||||
@ -275,6 +276,10 @@ enum ImageOperandsShift {
|
||||
ImageOperandsConstOffsetsShift = 5,
|
||||
ImageOperandsSampleShift = 6,
|
||||
ImageOperandsMinLodShift = 7,
|
||||
ImageOperandsMakeTexelAvailableKHRShift = 8,
|
||||
ImageOperandsMakeTexelVisibleKHRShift = 9,
|
||||
ImageOperandsNonPrivateTexelKHRShift = 10,
|
||||
ImageOperandsVolatileTexelKHRShift = 11,
|
||||
ImageOperandsMax = 0x7fffffff,
|
||||
};
|
||||
|
||||
@ -288,6 +293,10 @@ enum ImageOperandsMask {
|
||||
ImageOperandsConstOffsetsMask = 0x00000020,
|
||||
ImageOperandsSampleMask = 0x00000040,
|
||||
ImageOperandsMinLodMask = 0x00000080,
|
||||
ImageOperandsMakeTexelAvailableKHRMask = 0x00000100,
|
||||
ImageOperandsMakeTexelVisibleKHRMask = 0x00000200,
|
||||
ImageOperandsNonPrivateTexelKHRMask = 0x00000400,
|
||||
ImageOperandsVolatileTexelKHRMask = 0x00000800,
|
||||
};
|
||||
|
||||
enum FPFastMathModeShift {
|
||||
@ -528,6 +537,9 @@ enum MemorySemanticsShift {
|
||||
MemorySemanticsCrossWorkgroupMemoryShift = 9,
|
||||
MemorySemanticsAtomicCounterMemoryShift = 10,
|
||||
MemorySemanticsImageMemoryShift = 11,
|
||||
MemorySemanticsOutputMemoryKHRShift = 12,
|
||||
MemorySemanticsMakeAvailableKHRShift = 13,
|
||||
MemorySemanticsMakeVisibleKHRShift = 14,
|
||||
MemorySemanticsMax = 0x7fffffff,
|
||||
};
|
||||
|
||||
@ -543,12 +555,18 @@ enum MemorySemanticsMask {
|
||||
MemorySemanticsCrossWorkgroupMemoryMask = 0x00000200,
|
||||
MemorySemanticsAtomicCounterMemoryMask = 0x00000400,
|
||||
MemorySemanticsImageMemoryMask = 0x00000800,
|
||||
MemorySemanticsOutputMemoryKHRMask = 0x00001000,
|
||||
MemorySemanticsMakeAvailableKHRMask = 0x00002000,
|
||||
MemorySemanticsMakeVisibleKHRMask = 0x00004000,
|
||||
};
|
||||
|
||||
enum MemoryAccessShift {
|
||||
MemoryAccessVolatileShift = 0,
|
||||
MemoryAccessAlignedShift = 1,
|
||||
MemoryAccessNontemporalShift = 2,
|
||||
MemoryAccessMakePointerAvailableKHRShift = 3,
|
||||
MemoryAccessMakePointerVisibleKHRShift = 4,
|
||||
MemoryAccessNonPrivatePointerKHRShift = 5,
|
||||
MemoryAccessMax = 0x7fffffff,
|
||||
};
|
||||
|
||||
@ -557,6 +575,9 @@ enum MemoryAccessMask {
|
||||
MemoryAccessVolatileMask = 0x00000001,
|
||||
MemoryAccessAlignedMask = 0x00000002,
|
||||
MemoryAccessNontemporalMask = 0x00000004,
|
||||
MemoryAccessMakePointerAvailableKHRMask = 0x00000008,
|
||||
MemoryAccessMakePointerVisibleKHRMask = 0x00000010,
|
||||
MemoryAccessNonPrivatePointerKHRMask = 0x00000020,
|
||||
};
|
||||
|
||||
enum Scope {
|
||||
@ -565,6 +586,7 @@ enum Scope {
|
||||
ScopeWorkgroup = 2,
|
||||
ScopeSubgroup = 3,
|
||||
ScopeInvocation = 4,
|
||||
ScopeQueueFamilyKHR = 5,
|
||||
ScopeMax = 0x7fffffff,
|
||||
};
|
||||
|
||||
@ -708,6 +730,8 @@ enum Capability {
|
||||
CapabilityInputAttachmentArrayNonUniformIndexingEXT = 5310,
|
||||
CapabilityUniformTexelBufferArrayNonUniformIndexingEXT = 5311,
|
||||
CapabilityStorageTexelBufferArrayNonUniformIndexingEXT = 5312,
|
||||
CapabilityVulkanMemoryModelKHR = 5345,
|
||||
CapabilityVulkanMemoryModelDeviceScopeKHR = 5346,
|
||||
CapabilitySubgroupShuffleINTEL = 5568,
|
||||
CapabilitySubgroupBufferBlockIOINTEL = 5569,
|
||||
CapabilitySubgroupImageBlockIOINTEL = 5570,
|
||||
|
243
Test/baseResults/spv.memoryScopeSemantics.comp.out
Normal file
243
Test/baseResults/spv.memoryScopeSemantics.comp.out
Normal file
@ -0,0 +1,243 @@
|
||||
spv.memoryScopeSemantics.comp
|
||||
error: SPIRV-Tools Validation Errors
|
||||
error: Capability Int64Atomics is not allowed by Vulkan 1.0 specification (or requires extension)
|
||||
OpCapability Int64Atomics
|
||||
|
||||
// Module Version 10000
|
||||
// Generated by (magic number): 80007
|
||||
// Id's are bound by 142
|
||||
|
||||
Capability Shader
|
||||
Capability Int64
|
||||
Capability Int64Atomics
|
||||
Capability CapabilityVulkanMemoryModelKHR
|
||||
Capability CapabilityVulkanMemoryModelDeviceScopeKHR
|
||||
Extension "SPV_KHR_vulkan_memory_model"
|
||||
1: ExtInstImport "GLSL.std.450"
|
||||
MemoryModel Logical VulkanKHR
|
||||
EntryPoint GLCompute 4 "main"
|
||||
ExecutionMode 4 LocalSize 1 1 1
|
||||
Source GLSL 450
|
||||
SourceExtension "GL_ARB_gpu_shader_int64"
|
||||
SourceExtension "GL_KHR_memory_scope_semantics"
|
||||
Name 4 "main"
|
||||
Name 8 "origi"
|
||||
Name 10 "atomi"
|
||||
Name 21 "origu"
|
||||
Name 23 "atomu"
|
||||
Name 24 "value"
|
||||
Name 36 "imagei"
|
||||
Name 45 "imageu"
|
||||
Name 65 "BufferU"
|
||||
MemberName 65(BufferU) 0 "x"
|
||||
Name 67 "bufferu"
|
||||
Name 72 "y"
|
||||
Name 77 "BufferI"
|
||||
MemberName 77(BufferI) 0 "x"
|
||||
Name 79 "bufferi"
|
||||
Name 83 "A"
|
||||
MemberName 83(A) 0 "x"
|
||||
Name 84 "BufferJ"
|
||||
MemberName 84(BufferJ) 0 "a"
|
||||
Name 87 "bufferj"
|
||||
Name 98 "BufferK"
|
||||
MemberName 98(BufferK) 0 "x"
|
||||
Name 100 "bufferk"
|
||||
Name 109 "imagej"
|
||||
Name 121 "samp"
|
||||
Name 132 "atomu64"
|
||||
Name 137 "atomi64"
|
||||
Decorate 36(imagei) DescriptorSet 0
|
||||
Decorate 36(imagei) Binding 1
|
||||
Decorate 45(imageu) DescriptorSet 0
|
||||
Decorate 45(imageu) Binding 0
|
||||
MemberDecorate 65(BufferU) 0 Offset 0
|
||||
Decorate 65(BufferU) BufferBlock
|
||||
Decorate 67(bufferu) DescriptorSet 0
|
||||
Decorate 67(bufferu) Binding 2
|
||||
MemberDecorate 77(BufferI) 0 Offset 0
|
||||
Decorate 77(BufferI) BufferBlock
|
||||
Decorate 79(bufferi) DescriptorSet 0
|
||||
Decorate 79(bufferi) Binding 3
|
||||
Decorate 82 ArrayStride 4
|
||||
MemberDecorate 83(A) 0 Offset 0
|
||||
MemberDecorate 84(BufferJ) 0 Offset 0
|
||||
Decorate 84(BufferJ) BufferBlock
|
||||
Decorate 87(bufferj) DescriptorSet 0
|
||||
Decorate 87(bufferj) Binding 4
|
||||
MemberDecorate 98(BufferK) 0 Offset 0
|
||||
Decorate 98(BufferK) Block
|
||||
Decorate 100(bufferk) DescriptorSet 0
|
||||
Decorate 100(bufferk) Binding 7
|
||||
Decorate 109(imagej) DescriptorSet 0
|
||||
Decorate 109(imagej) Binding 5
|
||||
Decorate 121(samp) DescriptorSet 0
|
||||
Decorate 121(samp) Binding 6
|
||||
2: TypeVoid
|
||||
3: TypeFunction 2
|
||||
6: TypeInt 32 1
|
||||
7: TypePointer Function 6(int)
|
||||
9: TypePointer Workgroup 6(int)
|
||||
10(atomi): 9(ptr) Variable Workgroup
|
||||
11: 6(int) Constant 3
|
||||
12: 6(int) Constant 1
|
||||
13: 6(int) Constant 320
|
||||
14: 6(int) Constant 4
|
||||
15: TypeInt 32 0
|
||||
16: 15(int) Constant 5
|
||||
17: 15(int) Constant 0
|
||||
18: 15(int) Constant 324
|
||||
20: TypePointer Function 15(int)
|
||||
22: TypePointer Workgroup 15(int)
|
||||
23(atomu): 22(ptr) Variable Workgroup
|
||||
24(value): 22(ptr) Variable Workgroup
|
||||
26: 15(int) Constant 2
|
||||
28: 6(int) Constant 64
|
||||
29: 6(int) Constant 2
|
||||
30: 15(int) Constant 66
|
||||
33: 15(int) Constant 68
|
||||
34: TypeImage 6(int) 2D nonsampled format:R32i
|
||||
35: TypePointer UniformConstant 34
|
||||
36(imagei): 35(ptr) Variable UniformConstant
|
||||
37: TypeVector 6(int) 2
|
||||
38: 6(int) Constant 0
|
||||
39: 37(ivec2) ConstantComposite 38 38
|
||||
40: TypePointer Image 6(int)
|
||||
43: TypeImage 15(int) 2D nonsampled format:R32ui
|
||||
44: TypePointer UniformConstant 43
|
||||
45(imageu): 44(ptr) Variable UniformConstant
|
||||
46: 15(int) Constant 3
|
||||
47: TypePointer Image 15(int)
|
||||
50: 15(int) Constant 4
|
||||
52: 15(int) Constant 7
|
||||
57: 6(int) Constant 7
|
||||
61: 15(int) Constant 10
|
||||
63: 15(int) Constant 322
|
||||
65(BufferU): TypeStruct 15(int)
|
||||
66: TypePointer Uniform 65(BufferU)
|
||||
67(bufferu): 66(ptr) Variable Uniform
|
||||
68: TypePointer Uniform 15(int)
|
||||
70: 15(int) Constant 1
|
||||
77(BufferI): TypeStruct 15(int)
|
||||
78: TypePointer Uniform 77(BufferI)
|
||||
79(bufferi): 78(ptr) Variable Uniform
|
||||
82: TypeArray 15(int) 26
|
||||
83(A): TypeStruct 82
|
||||
84(BufferJ): TypeStruct 83(A)
|
||||
85: TypeArray 84(BufferJ) 26
|
||||
86: TypePointer Uniform 85
|
||||
87(bufferj): 86(ptr) Variable Uniform
|
||||
94: TypePointer Uniform 83(A)
|
||||
98(BufferK): TypeStruct 15(int)
|
||||
99: TypePointer Uniform 98(BufferK)
|
||||
100(bufferk): 99(ptr) Variable Uniform
|
||||
105: TypeVector 6(int) 4
|
||||
107: TypeArray 34 26
|
||||
108: TypePointer UniformConstant 107
|
||||
109(imagej): 108(ptr) Variable UniformConstant
|
||||
115: 105(ivec4) ConstantComposite 38 38 38 38
|
||||
116: TypeFloat 32
|
||||
117: TypeImage 116(float) 2D sampled format:Unknown
|
||||
118: TypeSampledImage 117
|
||||
119: TypeArray 118 26
|
||||
120: TypePointer UniformConstant 119
|
||||
121(samp): 120(ptr) Variable UniformConstant
|
||||
122: TypePointer UniformConstant 118
|
||||
125: TypeVector 116(float) 2
|
||||
126: 116(float) Constant 0
|
||||
127: 125(fvec2) ConstantComposite 126 126
|
||||
128: TypeVector 116(float) 4
|
||||
130: TypeInt 64 0
|
||||
131: TypePointer Workgroup 130(int64_t)
|
||||
132(atomu64): 131(ptr) Variable Workgroup
|
||||
133:130(int64_t) Constant 7 0
|
||||
135: TypeInt 64 1
|
||||
136: TypePointer Workgroup 135(int64_t)
|
||||
137(atomi64): 136(ptr) Variable Workgroup
|
||||
138:135(int64_t) Constant 10 0
|
||||
4(main): 2 Function None 3
|
||||
5: Label
|
||||
8(origi): 7(ptr) Variable Function
|
||||
21(origu): 20(ptr) Variable Function
|
||||
72(y): 20(ptr) Variable Function
|
||||
19: 6(int) AtomicIAdd 10(atomi) 12 18 11
|
||||
Store 8(origi) 19
|
||||
25: 15(int) Load 24(value) MakePointerVisibleKHR 26
|
||||
27: 15(int) AtomicAnd 23(atomu) 16 17 25
|
||||
Store 21(origu) 27
|
||||
31: 6(int) AtomicLoad 10(atomi) 12 30
|
||||
Store 8(origi) 31
|
||||
32: 15(int) Load 24(value) MakePointerVisibleKHR 26
|
||||
AtomicStore 23(atomu) 12 33 32
|
||||
41: 40(ptr) ImageTexelPointer 36(imagei) 39 17
|
||||
42: 6(int) AtomicLoad 41 12 30
|
||||
Store 8(origi) 42
|
||||
48: 47(ptr) ImageTexelPointer 45(imageu) 39 17
|
||||
49: 15(int) AtomicIAdd 48 12 30 46
|
||||
Store 21(origu) 49
|
||||
51: 47(ptr) ImageTexelPointer 45(imageu) 39 17
|
||||
AtomicStore 51 12 33 50
|
||||
53: 15(int) AtomicOr 23(atomu) 12 17 52
|
||||
Store 21(origu) 53
|
||||
54: 15(int) AtomicXor 23(atomu) 12 17 52
|
||||
Store 21(origu) 54
|
||||
55: 15(int) Load 24(value) MakePointerVisibleKHR 26
|
||||
56: 15(int) AtomicUMin 23(atomu) 12 17 55
|
||||
Store 21(origu) 56
|
||||
58: 6(int) AtomicSMax 10(atomi) 12 17 57
|
||||
Store 8(origi) 58
|
||||
59: 6(int) Load 8(origi)
|
||||
60: 6(int) AtomicExchange 10(atomi) 12 17 59
|
||||
Store 8(origi) 60
|
||||
62: 15(int) Load 24(value) MakePointerVisibleKHR 26
|
||||
64: 15(int) AtomicCompareExchange 23(atomu) 12 63 63 62 61
|
||||
Store 21(origu) 64
|
||||
69: 68(ptr) AccessChain 67(bufferu) 38
|
||||
71: 15(int) AtomicIAdd 69 12 18 70
|
||||
MemoryBarrier 26 18
|
||||
ControlBarrier 26 26 63
|
||||
ControlBarrier 26 26 17
|
||||
73: 68(ptr) AccessChain 67(bufferu) 38
|
||||
74: 15(int) Load 73 MakePointerVisibleKHR NonPrivatePointerKHR 26
|
||||
Store 72(y) 74
|
||||
75: 15(int) Load 72(y)
|
||||
76: 68(ptr) AccessChain 67(bufferu) 38
|
||||
Store 76 75 MakePointerAvailableKHR NonPrivatePointerKHR 26
|
||||
80: 68(ptr) AccessChain 79(bufferi) 38
|
||||
81: 15(int) Load 80 MakePointerVisibleKHR NonPrivatePointerKHR 16
|
||||
Store 72(y) 81
|
||||
88: 68(ptr) AccessChain 87(bufferj) 38 38 38 12
|
||||
89: 15(int) Load 88 Volatile MakePointerVisibleKHR NonPrivatePointerKHR 46
|
||||
Store 72(y) 89
|
||||
90: 15(int) Load 72(y)
|
||||
91: 68(ptr) AccessChain 79(bufferi) 38
|
||||
Store 91 90 MakePointerAvailableKHR NonPrivatePointerKHR 16
|
||||
92: 15(int) Load 72(y)
|
||||
93: 68(ptr) AccessChain 87(bufferj) 38 38 38 12
|
||||
Store 93 92 Volatile MakePointerAvailableKHR NonPrivatePointerKHR 46
|
||||
95: 94(ptr) AccessChain 87(bufferj) 12 38
|
||||
96: 83(A) Load 95 Volatile MakePointerVisibleKHR NonPrivatePointerKHR 46
|
||||
97: 94(ptr) AccessChain 87(bufferj) 38 38
|
||||
Store 97 96 Volatile MakePointerAvailableKHR NonPrivatePointerKHR 46
|
||||
101: 68(ptr) AccessChain 100(bufferk) 38
|
||||
102: 15(int) Load 101 NonPrivatePointerKHR
|
||||
103: 68(ptr) AccessChain 79(bufferi) 38
|
||||
Store 103 102 MakePointerAvailableKHR NonPrivatePointerKHR 16
|
||||
104: 34 Load 36(imagei)
|
||||
106: 105(ivec4) ImageRead 104 39 MakeTexelVisibleKHR NonPrivateTexelKHR VolatileTexelKHR 16
|
||||
110: 35(ptr) AccessChain 109(imagej) 38
|
||||
111: 34 Load 110
|
||||
112: 105(ivec4) ImageRead 111 39 NonPrivateTexelKHR
|
||||
113: 35(ptr) AccessChain 109(imagej) 12
|
||||
114: 34 Load 113
|
||||
ImageWrite 114 39 115 NonPrivateTexelKHR
|
||||
123: 122(ptr) AccessChain 121(samp) 38
|
||||
124: 118 Load 123
|
||||
129: 128(fvec4) ImageSampleExplicitLod 124 127 Lod NonPrivateTexelKHR 126
|
||||
134:130(int64_t) AtomicUMax 132(atomu64) 12 17 133
|
||||
Store 132(atomu64) 134 MakePointerAvailableKHR 26
|
||||
139:130(int64_t) Load 132(atomu64) MakePointerVisibleKHR 26
|
||||
140:135(int64_t) Bitcast 139
|
||||
141:135(int64_t) AtomicCompareExchange 137(atomi64) 12 63 63 140 138
|
||||
Return
|
||||
FunctionEnd
|
17
Test/baseResults/spv.memoryScopeSemantics_Error.comp.out
Normal file
17
Test/baseResults/spv.memoryScopeSemantics_Error.comp.out
Normal file
@ -0,0 +1,17 @@
|
||||
spv.memoryScopeSemantics_Error.comp
|
||||
ERROR: 0:15: 'atomicStore' : gl_SemanticsAcquire must not be used with (image) atomic store
|
||||
ERROR: 0:16: 'imageAtomicLoad' : gl_SemanticsRelease must not be used with (image) atomic load
|
||||
ERROR: 0:17: 'atomicStore' : gl_SemanticsAcquireRelease must not be used with (image) atomic load/store
|
||||
ERROR: 0:18: 'atomicStore' : Invalid semantics value
|
||||
ERROR: 0:19: 'imageAtomicLoad' : Invalid storage class semantics value
|
||||
ERROR: 0:20: 'memoryBarrier' : Semantics must include exactly one of gl_SemanticsRelease, gl_SemanticsAcquire, or gl_SemanticsAcquireRelease
|
||||
ERROR: 0:21: 'memoryBarrier' : Storage class semantics must not be zero
|
||||
ERROR: 0:22: 'memoryBarrier' : Semantics must include exactly one of gl_SemanticsRelease, gl_SemanticsAcquire, or gl_SemanticsAcquireRelease
|
||||
ERROR: 0:23: 'atomicAdd' : Semantics must not include multiple of gl_SemanticsRelease, gl_SemanticsAcquire, or gl_SemanticsAcquireRelease
|
||||
ERROR: 0:24: 'atomicCompSwap' : semUnequal must not be gl_SemanticsRelease or gl_SemanticsAcquireRelease
|
||||
ERROR: 0:25: 'memoryBarrier' : gl_SemanticsMakeVisible requires gl_SemanticsAcquire or gl_SemanticsAcquireRelease
|
||||
ERROR: 0:26: 'memoryBarrier' : gl_SemanticsMakeAvailable requires gl_SemanticsRelease or gl_SemanticsAcquireRelease
|
||||
ERROR: 12 compilation errors. No code generated.
|
||||
|
||||
|
||||
SPIR-V is not generated for failed compile or link
|
@ -11,7 +11,7 @@ spv.specConstant.vert
|
||||
Source GLSL 400
|
||||
Name 4 "main"
|
||||
Name 9 "arraySize"
|
||||
Name 14 "foo(vf4[s2543];"
|
||||
Name 14 "foo(vf4[s2765];"
|
||||
Name 13 "p"
|
||||
Name 17 "builtin_spec_constant("
|
||||
Name 20 "color"
|
||||
@ -102,10 +102,10 @@ spv.specConstant.vert
|
||||
Store 20(color) 46
|
||||
48: 10 Load 22(ucol)
|
||||
Store 47(param) 48
|
||||
49: 2 FunctionCall 14(foo(vf4[s2543];) 47(param)
|
||||
49: 2 FunctionCall 14(foo(vf4[s2765];) 47(param)
|
||||
Return
|
||||
FunctionEnd
|
||||
14(foo(vf4[s2543];): 2 Function None 12
|
||||
14(foo(vf4[s2765];): 2 Function None 12
|
||||
13(p): 11(ptr) FunctionParameter
|
||||
15: Label
|
||||
54: 24(ptr) AccessChain 53(dupUcol) 23
|
||||
|
61
Test/spv.memoryScopeSemantics.comp
Normal file
61
Test/spv.memoryScopeSemantics.comp
Normal file
@ -0,0 +1,61 @@
|
||||
#version 450
|
||||
#extension GL_KHR_memory_scope_semantics : require
|
||||
#extension GL_ARB_gpu_shader_int64 : require
|
||||
|
||||
#pragma use_vulkan_memory_model
|
||||
|
||||
shared uint value;
|
||||
shared int atomi;
|
||||
shared uint atomu;
|
||||
layout(binding = 0, r32ui) workgroupcoherent uniform uimage2D imageu;
|
||||
layout(binding = 1, r32i) volatile coherent uniform iimage2D imagei;
|
||||
layout(binding = 5, r32i) nonprivate uniform iimage2D imagej[2];
|
||||
layout (binding = 2) buffer BufferU { workgroupcoherent uint x; } bufferu;
|
||||
layout (binding = 3) coherent buffer BufferI { uint x; } bufferi;
|
||||
struct A { uint x[2]; };
|
||||
layout (binding = 4) volatile buffer BufferJ { subgroupcoherent A a; } bufferj[2];
|
||||
layout (binding = 6) nonprivate uniform sampler2D samp[2];
|
||||
layout (binding = 7) nonprivate uniform BufferK { uint x; } bufferk;
|
||||
shared uint64_t atomu64;
|
||||
shared int64_t atomi64;
|
||||
|
||||
|
||||
void main()
|
||||
{
|
||||
int origi = atomicAdd(atomi, 3, gl_ScopeDevice, gl_StorageSemanticsBuffer | gl_StorageSemanticsShared, gl_SemanticsRelease);
|
||||
uint origu = atomicAnd(atomu, value);
|
||||
origi = atomicLoad(atomi, gl_ScopeDevice, gl_StorageSemanticsBuffer, gl_SemanticsAcquire);
|
||||
atomicStore(atomu, value, gl_ScopeDevice, gl_StorageSemanticsBuffer, gl_SemanticsRelease);
|
||||
origi = imageAtomicLoad(imagei, ivec2(0,0), gl_ScopeDevice, gl_StorageSemanticsBuffer, gl_SemanticsAcquire);
|
||||
origu = imageAtomicAdd(imageu, ivec2(0,0), 3u, gl_ScopeDevice, gl_StorageSemanticsBuffer, gl_SemanticsAcquire);
|
||||
imageAtomicStore(imageu, ivec2(0,0), 4u, gl_ScopeDevice, gl_StorageSemanticsBuffer, gl_SemanticsRelease);
|
||||
origu = atomicOr(atomu, 7u, gl_ScopeDevice, 0, 0);
|
||||
origu = atomicXor(atomu, 7u, gl_ScopeDevice, 0, 0);
|
||||
origu = atomicMin(atomu, value, gl_ScopeDevice, 0, 0);
|
||||
origi = atomicMax(atomi, 7, gl_ScopeDevice, 0, 0);
|
||||
origi = atomicExchange(atomi, origi, gl_ScopeDevice, 0, 0);
|
||||
origu = atomicCompSwap(atomu, 10u, value, gl_ScopeDevice, gl_StorageSemanticsBuffer | gl_StorageSemanticsShared, gl_SemanticsAcquire, gl_StorageSemanticsBuffer | gl_StorageSemanticsShared, gl_SemanticsAcquire);
|
||||
atomicAdd(bufferu.x, 1, gl_ScopeDevice, gl_StorageSemanticsBuffer | gl_StorageSemanticsShared, gl_SemanticsRelease);
|
||||
memoryBarrier(gl_ScopeWorkgroup, gl_StorageSemanticsBuffer | gl_StorageSemanticsShared, gl_SemanticsRelease);
|
||||
controlBarrier(gl_ScopeWorkgroup, gl_ScopeWorkgroup, gl_StorageSemanticsBuffer | gl_StorageSemanticsShared, gl_SemanticsAcquire);
|
||||
controlBarrier(gl_ScopeWorkgroup, gl_ScopeWorkgroup, 0, 0);
|
||||
|
||||
uint y;
|
||||
y = bufferu.x;
|
||||
bufferu.x = y;
|
||||
y = bufferi.x;
|
||||
y = bufferj[0].a.x[1];
|
||||
bufferi.x = y;
|
||||
bufferj[0].a.x[1] = y;
|
||||
bufferj[0].a = bufferj[1].a;
|
||||
bufferi.x = bufferk.x;
|
||||
|
||||
imageLoad(imagei, ivec2(0,0));
|
||||
imageLoad(imagej[0], ivec2(0,0));
|
||||
imageStore(imagej[1], ivec2(0,0), ivec4(0,0,0,0));
|
||||
texture(samp[0], vec2(0,0));
|
||||
|
||||
atomu64 = atomicMax(atomu64, uint64_t(7), gl_ScopeDevice, 0, 0);
|
||||
atomicCompSwap(atomi64, int64_t(10), int64_t(atomu64), gl_ScopeDevice, gl_StorageSemanticsBuffer | gl_StorageSemanticsShared, gl_SemanticsAcquire, gl_StorageSemanticsBuffer | gl_StorageSemanticsShared, gl_SemanticsAcquire);
|
||||
}
|
||||
|
28
Test/spv.memoryScopeSemantics_Error.comp
Normal file
28
Test/spv.memoryScopeSemantics_Error.comp
Normal file
@ -0,0 +1,28 @@
|
||||
#version 450
|
||||
#extension GL_KHR_memory_scope_semantics : require
|
||||
|
||||
|
||||
shared uint value;
|
||||
shared int atomi;
|
||||
shared uint atomu;
|
||||
layout(binding = 0, r32ui) workgroupcoherent uniform uimage2D imageu;
|
||||
layout(binding = 1, r32i) coherent uniform iimage2D imagei;
|
||||
layout (binding = 2) buffer BufferU { workgroupcoherent uint x; } bufferu;
|
||||
layout (binding = 3) subgroupcoherent buffer BufferI { uint x; } bufferi;
|
||||
|
||||
void main()
|
||||
{
|
||||
atomicStore(atomu, value, gl_ScopeDevice, gl_StorageSemanticsBuffer, gl_SemanticsAcquire);
|
||||
int origi = imageAtomicLoad(imagei, ivec2(0,0), gl_ScopeDevice, gl_StorageSemanticsBuffer, gl_SemanticsRelease);
|
||||
atomicStore(atomu, value, gl_ScopeDevice, gl_StorageSemanticsBuffer, gl_SemanticsAcquireRelease);
|
||||
atomicStore(atomu, value, gl_ScopeDevice, gl_StorageSemanticsBuffer, gl_StorageSemanticsBuffer);
|
||||
origi = imageAtomicLoad(imagei, ivec2(0,0), gl_ScopeDevice, gl_SemanticsAcquire, gl_SemanticsAcquire);
|
||||
memoryBarrier(gl_ScopeWorkgroup, gl_StorageSemanticsBuffer | gl_StorageSemanticsShared, 0);
|
||||
memoryBarrier(gl_ScopeWorkgroup, 0, gl_SemanticsRelease);
|
||||
memoryBarrier(gl_ScopeWorkgroup, gl_StorageSemanticsBuffer | gl_StorageSemanticsShared, gl_SemanticsRelease | gl_SemanticsAcquire);
|
||||
atomicAdd(atomu, value, gl_ScopeDevice, gl_StorageSemanticsBuffer, gl_SemanticsRelease | gl_SemanticsAcquire);
|
||||
uint origu = atomicCompSwap(atomu, 10u, value, gl_ScopeDevice, gl_StorageSemanticsBuffer | gl_StorageSemanticsShared, gl_SemanticsAcquire, gl_StorageSemanticsBuffer | gl_StorageSemanticsShared, gl_SemanticsAcquireRelease);
|
||||
memoryBarrier(gl_ScopeWorkgroup, gl_StorageSemanticsBuffer, gl_SemanticsRelease | gl_SemanticsMakeVisible);
|
||||
memoryBarrier(gl_ScopeWorkgroup, gl_StorageSemanticsBuffer, gl_SemanticsAcquire | gl_SemanticsMakeAvailable);
|
||||
}
|
||||
|
@ -462,6 +462,11 @@ public:
|
||||
void clearMemory()
|
||||
{
|
||||
coherent = false;
|
||||
devicecoherent = false;
|
||||
queuefamilycoherent = false;
|
||||
workgroupcoherent = false;
|
||||
subgroupcoherent = false;
|
||||
nonprivate = false;
|
||||
volatil = false;
|
||||
restrict = false;
|
||||
readonly = false;
|
||||
@ -499,6 +504,11 @@ public:
|
||||
bool patch : 1;
|
||||
bool sample : 1;
|
||||
bool coherent : 1;
|
||||
bool devicecoherent : 1;
|
||||
bool queuefamilycoherent : 1;
|
||||
bool workgroupcoherent : 1;
|
||||
bool subgroupcoherent : 1;
|
||||
bool nonprivate : 1;
|
||||
bool volatil : 1;
|
||||
bool restrict : 1;
|
||||
bool readonly : 1;
|
||||
@ -508,7 +518,11 @@ public:
|
||||
|
||||
bool isMemory() const
|
||||
{
|
||||
return coherent || volatil || restrict || readonly || writeonly;
|
||||
return subgroupcoherent || workgroupcoherent || queuefamilycoherent || devicecoherent || coherent || volatil || restrict || readonly || writeonly || nonprivate;
|
||||
}
|
||||
bool isMemoryQualifierImageAndSSBOOnly() const
|
||||
{
|
||||
return subgroupcoherent || workgroupcoherent || queuefamilycoherent || devicecoherent || coherent || volatil || restrict || readonly || writeonly;
|
||||
}
|
||||
bool isInterpolation() const
|
||||
{
|
||||
@ -1713,6 +1727,16 @@ public:
|
||||
appendStr(" sample");
|
||||
if (qualifier.coherent)
|
||||
appendStr(" coherent");
|
||||
if (qualifier.devicecoherent)
|
||||
appendStr(" devicecoherent");
|
||||
if (qualifier.queuefamilycoherent)
|
||||
appendStr(" queuefamilycoherent");
|
||||
if (qualifier.workgroupcoherent)
|
||||
appendStr(" workgroupcoherent");
|
||||
if (qualifier.subgroupcoherent)
|
||||
appendStr(" subgroupcoherent");
|
||||
if (qualifier.nonprivate)
|
||||
appendStr(" nonprivate");
|
||||
if (qualifier.volatil)
|
||||
appendStr(" volatile");
|
||||
if (qualifier.restrict)
|
||||
|
@ -592,6 +592,8 @@ enum TOperator {
|
||||
EOpAtomicXor,
|
||||
EOpAtomicExchange,
|
||||
EOpAtomicCompSwap,
|
||||
EOpAtomicLoad,
|
||||
EOpAtomicStore,
|
||||
|
||||
EOpAtomicCounterIncrement, // results in pre-increment value
|
||||
EOpAtomicCounterDecrement, // results in post-decrement value
|
||||
@ -784,6 +786,8 @@ enum TOperator {
|
||||
EOpImageAtomicXor,
|
||||
EOpImageAtomicExchange,
|
||||
EOpImageAtomicCompSwap,
|
||||
EOpImageAtomicLoad,
|
||||
EOpImageAtomicStore,
|
||||
|
||||
EOpSubpassLoad,
|
||||
EOpSubpassLoadMS,
|
||||
|
@ -935,27 +935,49 @@ void TBuiltIns::initialize(int version, EProfile profile, const SpvVersion& spvV
|
||||
commonBuiltins.append(
|
||||
"uint atomicAdd(coherent volatile inout uint, uint);"
|
||||
" int atomicAdd(coherent volatile inout int, int);"
|
||||
"uint atomicAdd(coherent volatile inout uint, uint, int, int, int);"
|
||||
" int atomicAdd(coherent volatile inout int, int, int, int, int);"
|
||||
|
||||
"uint atomicMin(coherent volatile inout uint, uint);"
|
||||
" int atomicMin(coherent volatile inout int, int);"
|
||||
"uint atomicMin(coherent volatile inout uint, uint, int, int, int);"
|
||||
" int atomicMin(coherent volatile inout int, int, int, int, int);"
|
||||
|
||||
"uint atomicMax(coherent volatile inout uint, uint);"
|
||||
" int atomicMax(coherent volatile inout int, int);"
|
||||
"uint atomicMax(coherent volatile inout uint, uint, int, int, int);"
|
||||
" int atomicMax(coherent volatile inout int, int, int, int, int);"
|
||||
|
||||
"uint atomicAnd(coherent volatile inout uint, uint);"
|
||||
" int atomicAnd(coherent volatile inout int, int);"
|
||||
"uint atomicAnd(coherent volatile inout uint, uint, int, int, int);"
|
||||
" int atomicAnd(coherent volatile inout int, int, int, int, int);"
|
||||
|
||||
"uint atomicOr (coherent volatile inout uint, uint);"
|
||||
" int atomicOr (coherent volatile inout int, int);"
|
||||
"uint atomicOr (coherent volatile inout uint, uint, int, int, int);"
|
||||
" int atomicOr (coherent volatile inout int, int, int, int, int);"
|
||||
|
||||
"uint atomicXor(coherent volatile inout uint, uint);"
|
||||
" int atomicXor(coherent volatile inout int, int);"
|
||||
"uint atomicXor(coherent volatile inout uint, uint, int, int, int);"
|
||||
" int atomicXor(coherent volatile inout int, int, int, int, int);"
|
||||
|
||||
"uint atomicExchange(coherent volatile inout uint, uint);"
|
||||
" int atomicExchange(coherent volatile inout int, int);"
|
||||
"uint atomicExchange(coherent volatile inout uint, uint, int, int, int);"
|
||||
" int atomicExchange(coherent volatile inout int, int, int, int, int);"
|
||||
|
||||
"uint atomicCompSwap(coherent volatile inout uint, uint, uint);"
|
||||
" int atomicCompSwap(coherent volatile inout int, int, int);"
|
||||
"uint atomicCompSwap(coherent volatile inout uint, uint, uint, int, int, int, int, int);"
|
||||
" int atomicCompSwap(coherent volatile inout int, int, int, int, int, int, int, int);"
|
||||
|
||||
"uint atomicLoad(coherent volatile in uint, int, int, int);"
|
||||
" int atomicLoad(coherent volatile in int, int, int, int);"
|
||||
|
||||
"void atomicStore(coherent volatile out uint, uint, int, int, int);"
|
||||
"void atomicStore(coherent volatile out int, int, int, int, int);"
|
||||
|
||||
"\n");
|
||||
}
|
||||
@ -965,23 +987,49 @@ void TBuiltIns::initialize(int version, EProfile profile, const SpvVersion& spvV
|
||||
commonBuiltins.append(
|
||||
"uint64_t atomicMin(coherent volatile inout uint64_t, uint64_t);"
|
||||
" int64_t atomicMin(coherent volatile inout int64_t, int64_t);"
|
||||
"uint64_t atomicMin(coherent volatile inout uint64_t, uint64_t, int, int, int);"
|
||||
" int64_t atomicMin(coherent volatile inout int64_t, int64_t, int, int, int);"
|
||||
|
||||
"uint64_t atomicMax(coherent volatile inout uint64_t, uint64_t);"
|
||||
" int64_t atomicMax(coherent volatile inout int64_t, int64_t);"
|
||||
"uint64_t atomicMax(coherent volatile inout uint64_t, uint64_t, int, int, int);"
|
||||
" int64_t atomicMax(coherent volatile inout int64_t, int64_t, int, int, int);"
|
||||
|
||||
"uint64_t atomicAnd(coherent volatile inout uint64_t, uint64_t);"
|
||||
" int64_t atomicAnd(coherent volatile inout int64_t, int64_t);"
|
||||
"uint64_t atomicAnd(coherent volatile inout uint64_t, uint64_t, int, int, int);"
|
||||
" int64_t atomicAnd(coherent volatile inout int64_t, int64_t, int, int, int);"
|
||||
|
||||
"uint64_t atomicOr (coherent volatile inout uint64_t, uint64_t);"
|
||||
" int64_t atomicOr (coherent volatile inout int64_t, int64_t);"
|
||||
"uint64_t atomicOr (coherent volatile inout uint64_t, uint64_t, int, int, int);"
|
||||
" int64_t atomicOr (coherent volatile inout int64_t, int64_t, int, int, int);"
|
||||
|
||||
"uint64_t atomicXor(coherent volatile inout uint64_t, uint64_t);"
|
||||
" int64_t atomicXor(coherent volatile inout int64_t, int64_t);"
|
||||
"uint64_t atomicXor(coherent volatile inout uint64_t, uint64_t, int, int, int);"
|
||||
" int64_t atomicXor(coherent volatile inout int64_t, int64_t, int, int, int);"
|
||||
|
||||
" int64_t atomicAdd(coherent volatile inout int64_t, int64_t);"
|
||||
" int64_t atomicExchange(coherent volatile inout int64_t, int64_t);"
|
||||
" int64_t atomicCompSwap(coherent volatile inout int64_t, int64_t, int64_t);"
|
||||
"uint64_t atomicAdd(coherent volatile inout uint64_t, uint64_t);"
|
||||
" int64_t atomicAdd(coherent volatile inout int64_t, int64_t);"
|
||||
"uint64_t atomicAdd(coherent volatile inout uint64_t, uint64_t, int, int, int);"
|
||||
" int64_t atomicAdd(coherent volatile inout int64_t, int64_t, int, int, int);"
|
||||
|
||||
"uint64_t atomicExchange(coherent volatile inout uint64_t, uint64_t);"
|
||||
" int64_t atomicExchange(coherent volatile inout int64_t, int64_t);"
|
||||
"uint64_t atomicExchange(coherent volatile inout uint64_t, uint64_t, int, int, int);"
|
||||
" int64_t atomicExchange(coherent volatile inout int64_t, int64_t, int, int, int);"
|
||||
|
||||
"uint64_t atomicCompSwap(coherent volatile inout uint64_t, uint64_t, uint64_t);"
|
||||
" int64_t atomicCompSwap(coherent volatile inout int64_t, int64_t, int64_t);"
|
||||
"uint64_t atomicCompSwap(coherent volatile inout uint64_t, uint64_t, uint64_t, int, int, int, int, int);"
|
||||
" int64_t atomicCompSwap(coherent volatile inout int64_t, int64_t, int64_t, int, int, int, int, int);"
|
||||
|
||||
"uint64_t atomicLoad(coherent volatile in uint64_t, int, int, int);"
|
||||
" int64_t atomicLoad(coherent volatile in int64_t, int, int, int);"
|
||||
|
||||
"void atomicStore(coherent volatile out uint64_t, uint64_t, int, int, int);"
|
||||
"void atomicStore(coherent volatile out int64_t, int64_t, int, int, int);"
|
||||
"\n");
|
||||
}
|
||||
#endif
|
||||
@ -4693,6 +4741,9 @@ void TBuiltIns::initialize(int version, EProfile profile, const SpvVersion& spvV
|
||||
);
|
||||
}
|
||||
|
||||
commonBuiltins.append("void controlBarrier(int, int, int, int);\n"
|
||||
"void memoryBarrier(int, int, int);\n");
|
||||
|
||||
//============================================================================
|
||||
//
|
||||
// Prototypes for built-in functions seen by fragment shaders only.
|
||||
@ -5807,6 +5858,28 @@ void TBuiltIns::initialize(int version, EProfile profile, const SpvVersion& spvV
|
||||
"\n");
|
||||
}
|
||||
|
||||
if ((profile != EEsProfile && version >= 420) ||
|
||||
(profile == EEsProfile && version >= 310)) {
|
||||
commonBuiltins.append("const int gl_ScopeDevice = 1;\n");
|
||||
commonBuiltins.append("const int gl_ScopeWorkgroup = 2;\n");
|
||||
commonBuiltins.append("const int gl_ScopeSubgroup = 3;\n");
|
||||
commonBuiltins.append("const int gl_ScopeInvocation = 4;\n");
|
||||
commonBuiltins.append("const int gl_ScopeQueueFamily = 5;\n");
|
||||
|
||||
commonBuiltins.append("const int gl_SemanticsRelaxed = 0x0;\n");
|
||||
commonBuiltins.append("const int gl_SemanticsAcquire = 0x2;\n");
|
||||
commonBuiltins.append("const int gl_SemanticsRelease = 0x4;\n");
|
||||
commonBuiltins.append("const int gl_SemanticsAcquireRelease = 0x8;\n");
|
||||
commonBuiltins.append("const int gl_SemanticsMakeAvailable = 0x2000;\n");
|
||||
commonBuiltins.append("const int gl_SemanticsMakeVisible = 0x4000;\n");
|
||||
|
||||
commonBuiltins.append("const int gl_StorageSemanticsNone = 0x0;\n");
|
||||
commonBuiltins.append("const int gl_StorageSemanticsBuffer = 0x40;\n");
|
||||
commonBuiltins.append("const int gl_StorageSemanticsShared = 0x100;\n");
|
||||
commonBuiltins.append("const int gl_StorageSemanticsImage = 0x800;\n");
|
||||
commonBuiltins.append("const int gl_StorageSemanticsOutput = 0x1000;\n");
|
||||
}
|
||||
|
||||
// printf("%s\n", commonBuiltins.c_str());
|
||||
// printf("%s\n", stageBuiltins[EShLangFragment].c_str());
|
||||
}
|
||||
@ -6106,23 +6179,44 @@ void TBuiltIns::addImageFunctions(TSampler sampler, const TString& typeName, int
|
||||
" imageAtomicExchange(volatile coherent "
|
||||
};
|
||||
|
||||
for (size_t i = 0; i < numBuiltins; ++i) {
|
||||
// Loop twice to add prototypes with/without scope/semantics
|
||||
for (int j = 0; j < 2; ++j) {
|
||||
for (size_t i = 0; i < numBuiltins; ++i) {
|
||||
commonBuiltins.append(dataType);
|
||||
commonBuiltins.append(atomicFunc[i]);
|
||||
commonBuiltins.append(imageParams);
|
||||
commonBuiltins.append(", ");
|
||||
commonBuiltins.append(dataType);
|
||||
if (j == 1) {
|
||||
commonBuiltins.append(", int, int, int");
|
||||
}
|
||||
commonBuiltins.append(");\n");
|
||||
}
|
||||
|
||||
commonBuiltins.append(dataType);
|
||||
commonBuiltins.append(atomicFunc[i]);
|
||||
commonBuiltins.append(" imageAtomicCompSwap(volatile coherent ");
|
||||
commonBuiltins.append(imageParams);
|
||||
commonBuiltins.append(", ");
|
||||
commonBuiltins.append(dataType);
|
||||
commonBuiltins.append(", ");
|
||||
commonBuiltins.append(dataType);
|
||||
if (j == 1) {
|
||||
commonBuiltins.append(", int, int, int, int, int");
|
||||
}
|
||||
commonBuiltins.append(");\n");
|
||||
}
|
||||
|
||||
commonBuiltins.append(dataType);
|
||||
commonBuiltins.append(" imageAtomicCompSwap(volatile coherent ");
|
||||
commonBuiltins.append(" imageAtomicLoad(volatile coherent ");
|
||||
commonBuiltins.append(imageParams);
|
||||
commonBuiltins.append(", int, int, int);\n");
|
||||
|
||||
commonBuiltins.append("void imageAtomicStore(volatile coherent ");
|
||||
commonBuiltins.append(imageParams);
|
||||
commonBuiltins.append(", ");
|
||||
commonBuiltins.append(dataType);
|
||||
commonBuiltins.append(", ");
|
||||
commonBuiltins.append(dataType);
|
||||
commonBuiltins.append(");\n");
|
||||
commonBuiltins.append(", int, int, int);\n");
|
||||
|
||||
} else {
|
||||
// not int or uint
|
||||
// GL_ARB_ES3_1_compatibility
|
||||
@ -7969,6 +8063,26 @@ void TBuiltIns::identifyBuiltIns(int version, EProfile profile, const SpvVersion
|
||||
symbolTable.setFunctionExtensions("shadow2DEXT", 1, &E_GL_EXT_shadow_samplers);
|
||||
symbolTable.setFunctionExtensions("shadow2DProjEXT", 1, &E_GL_EXT_shadow_samplers);
|
||||
}
|
||||
|
||||
if (spvVersion.vulkan > 0) {
|
||||
symbolTable.setVariableExtensions("gl_ScopeDevice", 1, &E_GL_KHR_memory_scope_semantics);
|
||||
symbolTable.setVariableExtensions("gl_ScopeWorkgroup", 1, &E_GL_KHR_memory_scope_semantics);
|
||||
symbolTable.setVariableExtensions("gl_ScopeSubgroup", 1, &E_GL_KHR_memory_scope_semantics);
|
||||
symbolTable.setVariableExtensions("gl_ScopeInvocation", 1, &E_GL_KHR_memory_scope_semantics);
|
||||
|
||||
symbolTable.setVariableExtensions("gl_SemanticsRelaxed", 1, &E_GL_KHR_memory_scope_semantics);
|
||||
symbolTable.setVariableExtensions("gl_SemanticsAcquire", 1, &E_GL_KHR_memory_scope_semantics);
|
||||
symbolTable.setVariableExtensions("gl_SemanticsRelease", 1, &E_GL_KHR_memory_scope_semantics);
|
||||
symbolTable.setVariableExtensions("gl_SemanticsAcquireRelease", 1, &E_GL_KHR_memory_scope_semantics);
|
||||
symbolTable.setVariableExtensions("gl_SemanticsMakeAvailable", 1, &E_GL_KHR_memory_scope_semantics);
|
||||
symbolTable.setVariableExtensions("gl_SemanticsMakeVisible", 1, &E_GL_KHR_memory_scope_semantics);
|
||||
|
||||
symbolTable.setVariableExtensions("gl_StorageSemanticsNone", 1, &E_GL_KHR_memory_scope_semantics);
|
||||
symbolTable.setVariableExtensions("gl_StorageSemanticsBuffer", 1, &E_GL_KHR_memory_scope_semantics);
|
||||
symbolTable.setVariableExtensions("gl_StorageSemanticsShared", 1, &E_GL_KHR_memory_scope_semantics);
|
||||
symbolTable.setVariableExtensions("gl_StorageSemanticsImage", 1, &E_GL_KHR_memory_scope_semantics);
|
||||
symbolTable.setVariableExtensions("gl_StorageSemanticsOutput", 1, &E_GL_KHR_memory_scope_semantics);
|
||||
}
|
||||
break;
|
||||
|
||||
case EShLangCompute:
|
||||
@ -8003,6 +8117,8 @@ void TBuiltIns::identifyBuiltIns(int version, EProfile profile, const SpvVersion
|
||||
symbolTable.setFunctionExtensions("groupMemoryBarrier", 1, &E_GL_ARB_compute_shader);
|
||||
}
|
||||
|
||||
symbolTable.setFunctionExtensions("controlBarrier", 1, &E_GL_KHR_memory_scope_semantics);
|
||||
|
||||
// GL_ARB_shader_ballot
|
||||
if (profile != EEsProfile) {
|
||||
symbolTable.setVariableExtensions("gl_SubGroupSizeARB", 1, &E_GL_ARB_shader_ballot);
|
||||
@ -8213,6 +8329,7 @@ void TBuiltIns::identifyBuiltIns(int version, EProfile profile, const SpvVersion
|
||||
symbolTable.relateToOperator("all", EOpAll);
|
||||
|
||||
symbolTable.relateToOperator("barrier", EOpBarrier);
|
||||
symbolTable.relateToOperator("controlBarrier", EOpBarrier);
|
||||
symbolTable.relateToOperator("memoryBarrier", EOpMemoryBarrier);
|
||||
symbolTable.relateToOperator("memoryBarrierAtomicCounter", EOpMemoryBarrierAtomicCounter);
|
||||
symbolTable.relateToOperator("memoryBarrierBuffer", EOpMemoryBarrierBuffer);
|
||||
@ -8226,6 +8343,8 @@ void TBuiltIns::identifyBuiltIns(int version, EProfile profile, const SpvVersion
|
||||
symbolTable.relateToOperator("atomicXor", EOpAtomicXor);
|
||||
symbolTable.relateToOperator("atomicExchange", EOpAtomicExchange);
|
||||
symbolTable.relateToOperator("atomicCompSwap", EOpAtomicCompSwap);
|
||||
symbolTable.relateToOperator("atomicLoad", EOpAtomicLoad);
|
||||
symbolTable.relateToOperator("atomicStore", EOpAtomicStore);
|
||||
|
||||
symbolTable.relateToOperator("atomicCounterIncrement", EOpAtomicCounterIncrement);
|
||||
symbolTable.relateToOperator("atomicCounterDecrement", EOpAtomicCounterDecrement);
|
||||
@ -8270,6 +8389,8 @@ void TBuiltIns::identifyBuiltIns(int version, EProfile profile, const SpvVersion
|
||||
symbolTable.relateToOperator("imageAtomicXor", EOpImageAtomicXor);
|
||||
symbolTable.relateToOperator("imageAtomicExchange", EOpImageAtomicExchange);
|
||||
symbolTable.relateToOperator("imageAtomicCompSwap", EOpImageAtomicCompSwap);
|
||||
symbolTable.relateToOperator("imageAtomicLoad", EOpImageAtomicLoad);
|
||||
symbolTable.relateToOperator("imageAtomicStore", EOpImageAtomicStore);
|
||||
|
||||
symbolTable.relateToOperator("subpassLoad", EOpSubpassLoad);
|
||||
symbolTable.relateToOperator("subpassLoadMS", EOpSubpassLoadMS);
|
||||
|
@ -267,6 +267,10 @@ void TParseContext::handlePragma(const TSourceLoc& loc, const TVector<TString>&
|
||||
if (tokens.size() != 1)
|
||||
error(loc, "extra tokens", "#pragma", "");
|
||||
intermediate.setUseStorageBuffer();
|
||||
} else if (spvVersion.spv > 0 && tokens[0].compare("use_vulkan_memory_model") == 0) {
|
||||
if (tokens.size() != 1)
|
||||
error(loc, "extra tokens", "#pragma", "");
|
||||
intermediate.setUseVulkanMemoryModel();
|
||||
} else if (tokens[0].compare("once") == 0) {
|
||||
warn(loc, "not implemented", "#pragma once", "");
|
||||
} else if (tokens[0].compare("glslang_binary_double_output") == 0)
|
||||
@ -1028,8 +1032,16 @@ TIntermTyped* TParseContext::handleFunctionCall(const TSourceLoc& loc, TFunction
|
||||
const char* message = "argument cannot drop memory qualifier when passed to formal parameter";
|
||||
if (argQualifier.volatil && ! formalQualifier.volatil)
|
||||
error(arguments->getLoc(), message, "volatile", "");
|
||||
if (argQualifier.coherent && ! formalQualifier.coherent)
|
||||
if (argQualifier.coherent && ! (formalQualifier.devicecoherent || formalQualifier.coherent))
|
||||
error(arguments->getLoc(), message, "coherent", "");
|
||||
if (argQualifier.devicecoherent && ! (formalQualifier.devicecoherent || formalQualifier.coherent))
|
||||
error(arguments->getLoc(), message, "devicecoherent", "");
|
||||
if (argQualifier.queuefamilycoherent && ! (formalQualifier.queuefamilycoherent || formalQualifier.devicecoherent || formalQualifier.coherent))
|
||||
error(arguments->getLoc(), message, "queuefamilycoherent", "");
|
||||
if (argQualifier.workgroupcoherent && ! (formalQualifier.workgroupcoherent || formalQualifier.queuefamilycoherent || formalQualifier.devicecoherent || formalQualifier.coherent))
|
||||
error(arguments->getLoc(), message, "workgroupcoherent", "");
|
||||
if (argQualifier.subgroupcoherent && ! (formalQualifier.subgroupcoherent || formalQualifier.workgroupcoherent || formalQualifier.queuefamilycoherent || formalQualifier.devicecoherent || formalQualifier.coherent))
|
||||
error(arguments->getLoc(), message, "subgroupcoherent", "");
|
||||
if (argQualifier.readonly && ! formalQualifier.readonly)
|
||||
error(arguments->getLoc(), message, "readonly", "");
|
||||
if (argQualifier.writeonly && ! formalQualifier.writeonly)
|
||||
@ -1428,6 +1440,159 @@ TIntermTyped* TParseContext::addOutputArgumentConversions(const TFunction& funct
|
||||
return conversionTree;
|
||||
}
|
||||
|
||||
void TParseContext::memorySemanticsCheck(const TSourceLoc& loc, const TFunction& fnCandidate, const TIntermOperator& callNode)
|
||||
{
|
||||
const TIntermSequence* argp = &callNode.getAsAggregate()->getSequence();
|
||||
|
||||
const int gl_SemanticsRelaxed = 0x0;
|
||||
const int gl_SemanticsAcquire = 0x2;
|
||||
const int gl_SemanticsRelease = 0x4;
|
||||
const int gl_SemanticsAcquireRelease = 0x8;
|
||||
const int gl_SemanticsMakeAvailable = 0x2000;
|
||||
const int gl_SemanticsMakeVisible = 0x4000;
|
||||
|
||||
const int gl_StorageSemanticsNone = 0x0;
|
||||
const int gl_StorageSemanticsBuffer = 0x40;
|
||||
const int gl_StorageSemanticsShared = 0x100;
|
||||
const int gl_StorageSemanticsImage = 0x800;
|
||||
const int gl_StorageSemanticsOutput = 0x1000;
|
||||
|
||||
|
||||
unsigned int semantics = 0, storageClassSemantics = 0;
|
||||
unsigned int semantics2 = 0, storageClassSemantics2 = 0;
|
||||
|
||||
// Grab the semantics and storage class semantics from the operands, based on opcode
|
||||
switch (callNode.getOp()) {
|
||||
case EOpAtomicAdd:
|
||||
case EOpAtomicMin:
|
||||
case EOpAtomicMax:
|
||||
case EOpAtomicAnd:
|
||||
case EOpAtomicOr:
|
||||
case EOpAtomicXor:
|
||||
case EOpAtomicExchange:
|
||||
case EOpAtomicStore:
|
||||
storageClassSemantics = (*argp)[3]->getAsConstantUnion()->getConstArray()[0].getIConst();
|
||||
semantics = (*argp)[4]->getAsConstantUnion()->getConstArray()[0].getIConst();
|
||||
break;
|
||||
case EOpAtomicLoad:
|
||||
storageClassSemantics = (*argp)[2]->getAsConstantUnion()->getConstArray()[0].getIConst();
|
||||
semantics = (*argp)[3]->getAsConstantUnion()->getConstArray()[0].getIConst();
|
||||
break;
|
||||
case EOpAtomicCompSwap:
|
||||
storageClassSemantics = (*argp)[4]->getAsConstantUnion()->getConstArray()[0].getIConst();
|
||||
semantics = (*argp)[5]->getAsConstantUnion()->getConstArray()[0].getIConst();
|
||||
storageClassSemantics2 = (*argp)[6]->getAsConstantUnion()->getConstArray()[0].getIConst();
|
||||
semantics2 = (*argp)[7]->getAsConstantUnion()->getConstArray()[0].getIConst();
|
||||
break;
|
||||
|
||||
case EOpImageAtomicAdd:
|
||||
case EOpImageAtomicMin:
|
||||
case EOpImageAtomicMax:
|
||||
case EOpImageAtomicAnd:
|
||||
case EOpImageAtomicOr:
|
||||
case EOpImageAtomicXor:
|
||||
case EOpImageAtomicExchange:
|
||||
case EOpImageAtomicStore:
|
||||
storageClassSemantics = (*argp)[4]->getAsConstantUnion()->getConstArray()[0].getIConst();
|
||||
semantics = (*argp)[5]->getAsConstantUnion()->getConstArray()[0].getIConst();
|
||||
break;
|
||||
case EOpImageAtomicLoad:
|
||||
storageClassSemantics = (*argp)[3]->getAsConstantUnion()->getConstArray()[0].getIConst();
|
||||
semantics = (*argp)[4]->getAsConstantUnion()->getConstArray()[0].getIConst();
|
||||
break;
|
||||
case EOpImageAtomicCompSwap:
|
||||
storageClassSemantics = (*argp)[5]->getAsConstantUnion()->getConstArray()[0].getIConst();
|
||||
semantics = (*argp)[6]->getAsConstantUnion()->getConstArray()[0].getIConst();
|
||||
storageClassSemantics2 = (*argp)[7]->getAsConstantUnion()->getConstArray()[0].getIConst();
|
||||
semantics2 = (*argp)[8]->getAsConstantUnion()->getConstArray()[0].getIConst();
|
||||
break;
|
||||
|
||||
case EOpBarrier:
|
||||
storageClassSemantics = (*argp)[2]->getAsConstantUnion()->getConstArray()[0].getIConst();
|
||||
semantics = (*argp)[3]->getAsConstantUnion()->getConstArray()[0].getIConst();
|
||||
break;
|
||||
case EOpMemoryBarrier:
|
||||
storageClassSemantics = (*argp)[1]->getAsConstantUnion()->getConstArray()[0].getIConst();
|
||||
semantics = (*argp)[2]->getAsConstantUnion()->getConstArray()[0].getIConst();
|
||||
break;
|
||||
}
|
||||
|
||||
if ((semantics & gl_SemanticsAcquire) &&
|
||||
(callNode.getOp() == EOpAtomicStore || callNode.getOp() == EOpImageAtomicStore)) {
|
||||
error(loc, "gl_SemanticsAcquire must not be used with (image) atomic store",
|
||||
fnCandidate.getName().c_str(), "");
|
||||
}
|
||||
if ((semantics & gl_SemanticsRelease) &&
|
||||
(callNode.getOp() == EOpAtomicLoad || callNode.getOp() == EOpImageAtomicLoad)) {
|
||||
error(loc, "gl_SemanticsRelease must not be used with (image) atomic load",
|
||||
fnCandidate.getName().c_str(), "");
|
||||
}
|
||||
if ((semantics & gl_SemanticsAcquireRelease) &&
|
||||
(callNode.getOp() == EOpAtomicStore || callNode.getOp() == EOpImageAtomicStore ||
|
||||
callNode.getOp() == EOpAtomicLoad || callNode.getOp() == EOpImageAtomicLoad)) {
|
||||
error(loc, "gl_SemanticsAcquireRelease must not be used with (image) atomic load/store",
|
||||
fnCandidate.getName().c_str(), "");
|
||||
}
|
||||
if (((semantics | semantics2) & ~(gl_SemanticsAcquire |
|
||||
gl_SemanticsRelease |
|
||||
gl_SemanticsAcquireRelease |
|
||||
gl_SemanticsMakeAvailable |
|
||||
gl_SemanticsMakeVisible))) {
|
||||
error(loc, "Invalid semantics value", fnCandidate.getName().c_str(), "");
|
||||
}
|
||||
if (((storageClassSemantics | storageClassSemantics2) & ~(gl_StorageSemanticsBuffer |
|
||||
gl_StorageSemanticsShared |
|
||||
gl_StorageSemanticsImage |
|
||||
gl_StorageSemanticsOutput))) {
|
||||
error(loc, "Invalid storage class semantics value", fnCandidate.getName().c_str(), "");
|
||||
}
|
||||
|
||||
if (callNode.getOp() == EOpMemoryBarrier) {
|
||||
if (!IsPow2(semantics & (gl_SemanticsAcquire | gl_SemanticsRelease | gl_SemanticsAcquireRelease))) {
|
||||
error(loc, "Semantics must include exactly one of gl_SemanticsRelease, gl_SemanticsAcquire, or "
|
||||
"gl_SemanticsAcquireRelease", fnCandidate.getName().c_str(), "");
|
||||
}
|
||||
} else {
|
||||
if (semantics & (gl_SemanticsAcquire | gl_SemanticsRelease | gl_SemanticsAcquireRelease)) {
|
||||
if (!IsPow2(semantics & (gl_SemanticsAcquire | gl_SemanticsRelease | gl_SemanticsAcquireRelease))) {
|
||||
error(loc, "Semantics must not include multiple of gl_SemanticsRelease, gl_SemanticsAcquire, or "
|
||||
"gl_SemanticsAcquireRelease", fnCandidate.getName().c_str(), "");
|
||||
}
|
||||
}
|
||||
if (semantics2 & (gl_SemanticsAcquire | gl_SemanticsRelease | gl_SemanticsAcquireRelease)) {
|
||||
if (!IsPow2(semantics2 & (gl_SemanticsAcquire | gl_SemanticsRelease | gl_SemanticsAcquireRelease))) {
|
||||
error(loc, "semUnequal must not include multiple of gl_SemanticsRelease, gl_SemanticsAcquire, or "
|
||||
"gl_SemanticsAcquireRelease", fnCandidate.getName().c_str(), "");
|
||||
}
|
||||
}
|
||||
}
|
||||
if (callNode.getOp() == EOpMemoryBarrier) {
|
||||
if (storageClassSemantics == 0) {
|
||||
error(loc, "Storage class semantics must not be zero", fnCandidate.getName().c_str(), "");
|
||||
}
|
||||
}
|
||||
if (callNode.getOp() == EOpBarrier && semantics != 0 && storageClassSemantics == 0) {
|
||||
error(loc, "Storage class semantics must not be zero", fnCandidate.getName().c_str(), "");
|
||||
}
|
||||
if ((callNode.getOp() == EOpAtomicCompSwap || callNode.getOp() == EOpImageAtomicCompSwap) &&
|
||||
(semantics2 & (gl_SemanticsRelease | gl_SemanticsAcquireRelease))) {
|
||||
error(loc, "semUnequal must not be gl_SemanticsRelease or gl_SemanticsAcquireRelease",
|
||||
fnCandidate.getName().c_str(), "");
|
||||
}
|
||||
if ((semantics & gl_SemanticsMakeAvailable) &&
|
||||
!(semantics & (gl_SemanticsRelease | gl_SemanticsAcquireRelease))) {
|
||||
error(loc, "gl_SemanticsMakeAvailable requires gl_SemanticsRelease or gl_SemanticsAcquireRelease",
|
||||
fnCandidate.getName().c_str(), "");
|
||||
}
|
||||
if ((semantics & gl_SemanticsMakeVisible) &&
|
||||
!(semantics & (gl_SemanticsAcquire | gl_SemanticsAcquireRelease))) {
|
||||
error(loc, "gl_SemanticsMakeVisible requires gl_SemanticsAcquire or gl_SemanticsAcquireRelease",
|
||||
fnCandidate.getName().c_str(), "");
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
||||
//
|
||||
// Do additional checking of built-in function calls that is not caught
|
||||
// by normal semantic checks on argument type, extension tagging, etc.
|
||||
@ -1656,6 +1821,8 @@ void TParseContext::builtInOpCheck(const TSourceLoc& loc, const TFunction& fnCan
|
||||
case EOpImageAtomicXor:
|
||||
case EOpImageAtomicExchange:
|
||||
case EOpImageAtomicCompSwap:
|
||||
case EOpImageAtomicLoad:
|
||||
case EOpImageAtomicStore:
|
||||
{
|
||||
// Make sure the image types have the correct layout() format and correct argument types
|
||||
const TType& imageType = arg0->getType();
|
||||
@ -1669,10 +1836,14 @@ void TParseContext::builtInOpCheck(const TSourceLoc& loc, const TFunction& fnCan
|
||||
error(loc, "only supported on image with format r32f", fnCandidate.getName().c_str(), "");
|
||||
}
|
||||
|
||||
if (argp->size() > 4) {
|
||||
requireExtensions(loc, 1, &E_GL_KHR_memory_scope_semantics, fnCandidate.getName().c_str());
|
||||
memorySemanticsCheck(loc, fnCandidate, callNode);
|
||||
}
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
#ifdef NV_EXTENSIONS
|
||||
case EOpAtomicAdd:
|
||||
case EOpAtomicMin:
|
||||
case EOpAtomicMax:
|
||||
@ -1681,13 +1852,19 @@ void TParseContext::builtInOpCheck(const TSourceLoc& loc, const TFunction& fnCan
|
||||
case EOpAtomicXor:
|
||||
case EOpAtomicExchange:
|
||||
case EOpAtomicCompSwap:
|
||||
case EOpAtomicLoad:
|
||||
case EOpAtomicStore:
|
||||
{
|
||||
if (arg0->getType().getBasicType() == EbtInt64 || arg0->getType().getBasicType() == EbtUint64)
|
||||
if (argp->size() > 3) {
|
||||
requireExtensions(loc, 1, &E_GL_KHR_memory_scope_semantics, fnCandidate.getName().c_str());
|
||||
memorySemanticsCheck(loc, fnCandidate, callNode);
|
||||
}
|
||||
#ifdef NV_EXTENSIONS
|
||||
else if (arg0->getType().getBasicType() == EbtInt64 || arg0->getType().getBasicType() == EbtUint64)
|
||||
requireExtensions(loc, 1, &E_GL_NV_shader_atomic_int64, fnCandidate.getName().c_str());
|
||||
|
||||
#endif
|
||||
break;
|
||||
}
|
||||
#endif
|
||||
|
||||
case EOpInterpolateAtCentroid:
|
||||
case EOpInterpolateAtSample:
|
||||
@ -1751,6 +1928,14 @@ void TParseContext::builtInOpCheck(const TSourceLoc& loc, const TFunction& fnCan
|
||||
}
|
||||
break;
|
||||
|
||||
case EOpBarrier:
|
||||
case EOpMemoryBarrier:
|
||||
if (argp->size() > 0) {
|
||||
requireExtensions(loc, 1, &E_GL_KHR_memory_scope_semantics, fnCandidate.getName().c_str());
|
||||
memorySemanticsCheck(loc, fnCandidate, callNode);
|
||||
}
|
||||
break;
|
||||
|
||||
default:
|
||||
break;
|
||||
}
|
||||
@ -2806,8 +2991,11 @@ void TParseContext::globalQualifierTypeCheck(const TSourceLoc& loc, const TQuali
|
||||
if (! symbolTable.atGlobalLevel())
|
||||
return;
|
||||
|
||||
if (qualifier.isMemory() && ! publicType.isImage() && publicType.qualifier.storage != EvqBuffer)
|
||||
if (qualifier.isMemoryQualifierImageAndSSBOOnly() && ! publicType.isImage() && publicType.qualifier.storage != EvqBuffer) {
|
||||
error(loc, "memory qualifiers cannot be used on this type", "", "");
|
||||
} else if (qualifier.isMemory() && (publicType.basicType != EbtSampler) && !publicType.qualifier.isUniformOrBuffer()) {
|
||||
error(loc, "memory qualifiers cannot be used on this type", "", "");
|
||||
}
|
||||
|
||||
if (qualifier.storage == EvqBuffer && publicType.basicType != EbtBlock)
|
||||
error(loc, "buffers can be declared only as blocks", "buffer", "");
|
||||
@ -3020,6 +3208,13 @@ void TParseContext::mergeQualifiers(const TSourceLoc& loc, TQualifier& dst, cons
|
||||
if (dst.precision == EpqNone || (force && src.precision != EpqNone))
|
||||
dst.precision = src.precision;
|
||||
|
||||
if (!force && ((src.coherent && (dst.devicecoherent || dst.queuefamilycoherent || dst.workgroupcoherent || dst.subgroupcoherent)) ||
|
||||
(src.devicecoherent && (dst.coherent || dst.queuefamilycoherent || dst.workgroupcoherent || dst.subgroupcoherent)) ||
|
||||
(src.queuefamilycoherent && (dst.coherent || dst.devicecoherent || dst.workgroupcoherent || dst.subgroupcoherent)) ||
|
||||
(src.workgroupcoherent && (dst.coherent || dst.devicecoherent || dst.queuefamilycoherent || dst.subgroupcoherent)) ||
|
||||
(src.subgroupcoherent && (dst.coherent || dst.devicecoherent || dst.queuefamilycoherent || dst.workgroupcoherent)))) {
|
||||
error(loc, "only one coherent/devicecoherent/queuefamilycoherent/workgroupcoherent/subgroupcoherent qualifier allowed", GetPrecisionQualifierString(src.precision), "");
|
||||
}
|
||||
// Layout qualifiers
|
||||
mergeObjectLayoutQualifiers(dst, src, false);
|
||||
|
||||
@ -3038,6 +3233,11 @@ void TParseContext::mergeQualifiers(const TSourceLoc& loc, TQualifier& dst, cons
|
||||
MERGE_SINGLETON(patch);
|
||||
MERGE_SINGLETON(sample);
|
||||
MERGE_SINGLETON(coherent);
|
||||
MERGE_SINGLETON(devicecoherent);
|
||||
MERGE_SINGLETON(queuefamilycoherent);
|
||||
MERGE_SINGLETON(workgroupcoherent);
|
||||
MERGE_SINGLETON(subgroupcoherent);
|
||||
MERGE_SINGLETON(nonprivate);
|
||||
MERGE_SINGLETON(volatil);
|
||||
MERGE_SINGLETON(restrict);
|
||||
MERGE_SINGLETON(readonly);
|
||||
@ -3862,6 +4062,11 @@ void TParseContext::paramCheckFix(const TSourceLoc& loc, const TQualifier& quali
|
||||
if (qualifier.isMemory()) {
|
||||
type.getQualifier().volatil = qualifier.volatil;
|
||||
type.getQualifier().coherent = qualifier.coherent;
|
||||
type.getQualifier().devicecoherent = qualifier.devicecoherent ;
|
||||
type.getQualifier().queuefamilycoherent = qualifier.queuefamilycoherent;
|
||||
type.getQualifier().workgroupcoherent = qualifier.workgroupcoherent;
|
||||
type.getQualifier().subgroupcoherent = qualifier.subgroupcoherent;
|
||||
type.getQualifier().nonprivate = qualifier.nonprivate;
|
||||
type.getQualifier().readonly = qualifier.readonly;
|
||||
type.getQualifier().writeonly = qualifier.writeonly;
|
||||
type.getQualifier().restrict = qualifier.restrict;
|
||||
|
@ -323,6 +323,7 @@ public:
|
||||
TFunction* handleConstructorCall(const TSourceLoc&, const TPublicType&);
|
||||
void handlePrecisionQualifier(const TSourceLoc&, TQualifier&, TPrecisionQualifier);
|
||||
void checkPrecisionQualifier(const TSourceLoc&, TPrecisionQualifier);
|
||||
void memorySemanticsCheck(const TSourceLoc&, const TFunction&, const TIntermOperator& callNode);
|
||||
|
||||
void assignError(const TSourceLoc&, const char* op, TString left, TString right);
|
||||
void unaryOpError(const TSourceLoc&, const char* op, TString operand);
|
||||
|
@ -380,6 +380,11 @@ void TScanContext::fillInKeywordMap()
|
||||
(*KeywordMap)["varying"] = VARYING;
|
||||
(*KeywordMap)["buffer"] = BUFFER;
|
||||
(*KeywordMap)["coherent"] = COHERENT;
|
||||
(*KeywordMap)["devicecoherent"] = DEVICECOHERENT;
|
||||
(*KeywordMap)["queuefamilycoherent"] = QUEUEFAMILYCOHERENT;
|
||||
(*KeywordMap)["workgroupcoherent"] = WORKGROUPCOHERENT;
|
||||
(*KeywordMap)["subgroupcoherent"] = SUBGROUPCOHERENT;
|
||||
(*KeywordMap)["nonprivate"] = NONPRIVATE;
|
||||
(*KeywordMap)["restrict"] = RESTRICT;
|
||||
(*KeywordMap)["readonly"] = READONLY;
|
||||
(*KeywordMap)["writeonly"] = WRITEONLY;
|
||||
@ -937,6 +942,11 @@ int TScanContext::tokenizeIdentifier()
|
||||
return es30ReservedFromGLSL(420);
|
||||
|
||||
case COHERENT:
|
||||
case DEVICECOHERENT:
|
||||
case QUEUEFAMILYCOHERENT:
|
||||
case WORKGROUPCOHERENT:
|
||||
case SUBGROUPCOHERENT:
|
||||
case NONPRIVATE:
|
||||
case RESTRICT:
|
||||
case READONLY:
|
||||
case WRITEONLY:
|
||||
|
@ -194,6 +194,7 @@ void TParseVersions::initializeExtensionBehavior()
|
||||
extensionBehavior[E_GL_KHR_shader_subgroup_shuffle_relative] = EBhDisable;
|
||||
extensionBehavior[E_GL_KHR_shader_subgroup_clustered] = EBhDisable;
|
||||
extensionBehavior[E_GL_KHR_shader_subgroup_quad] = EBhDisable;
|
||||
extensionBehavior[E_GL_KHR_memory_scope_semantics] = EBhDisable;
|
||||
|
||||
extensionBehavior[E_GL_EXT_shader_non_constant_global_initializers] = EBhDisable;
|
||||
extensionBehavior[E_GL_EXT_shader_image_load_formatted] = EBhDisable;
|
||||
|
@ -148,6 +148,7 @@ const char* const E_GL_KHR_shader_subgroup_shuffle = "GL_KHR_shader_sub
|
||||
const char* const E_GL_KHR_shader_subgroup_shuffle_relative = "GL_KHR_shader_subgroup_shuffle_relative";
|
||||
const char* const E_GL_KHR_shader_subgroup_clustered = "GL_KHR_shader_subgroup_clustered";
|
||||
const char* const E_GL_KHR_shader_subgroup_quad = "GL_KHR_shader_subgroup_quad";
|
||||
const char* const E_GL_KHR_memory_scope_semantics = "GL_KHR_memory_scope_semantics";
|
||||
|
||||
const char* const E_GL_EXT_shader_non_constant_global_initializers = "GL_EXT_shader_non_constant_global_initializers";
|
||||
const char* const E_GL_EXT_shader_image_load_formatted = "GL_EXT_shader_image_load_formatted";
|
||||
|
@ -141,7 +141,7 @@ extern int yylex(YYSTYPE*, TParseContext&);
|
||||
%token <lex> VEC2 VEC3 VEC4
|
||||
%token <lex> MAT2 MAT3 MAT4 CENTROID IN OUT INOUT
|
||||
%token <lex> UNIFORM PATCH SAMPLE BUFFER SHARED NONUNIFORM
|
||||
%token <lex> COHERENT VOLATILE RESTRICT READONLY WRITEONLY
|
||||
%token <lex> COHERENT VOLATILE RESTRICT READONLY WRITEONLY DEVICECOHERENT QUEUEFAMILYCOHERENT WORKGROUPCOHERENT SUBGROUPCOHERENT NONPRIVATE
|
||||
%token <lex> DVEC2 DVEC3 DVEC4 DMAT2 DMAT3 DMAT4
|
||||
%token <lex> F16VEC2 F16VEC3 F16VEC4 F16MAT2 F16MAT3 F16MAT4
|
||||
%token <lex> F32VEC2 F32VEC3 F32VEC4 F32MAT2 F32MAT3 F32MAT4
|
||||
@ -1317,6 +1317,31 @@ storage_qualifier
|
||||
$$.init($1.loc);
|
||||
$$.qualifier.coherent = true;
|
||||
}
|
||||
| DEVICECOHERENT {
|
||||
$$.init($1.loc);
|
||||
parseContext.requireExtensions($1.loc, 1, &E_GL_KHR_memory_scope_semantics, "devicecoherent");
|
||||
$$.qualifier.devicecoherent = true;
|
||||
}
|
||||
| QUEUEFAMILYCOHERENT {
|
||||
$$.init($1.loc);
|
||||
parseContext.requireExtensions($1.loc, 1, &E_GL_KHR_memory_scope_semantics, "queuefamilycoherent");
|
||||
$$.qualifier.queuefamilycoherent = true;
|
||||
}
|
||||
| WORKGROUPCOHERENT {
|
||||
$$.init($1.loc);
|
||||
parseContext.requireExtensions($1.loc, 1, &E_GL_KHR_memory_scope_semantics, "workgroupcoherent");
|
||||
$$.qualifier.workgroupcoherent = true;
|
||||
}
|
||||
| SUBGROUPCOHERENT {
|
||||
$$.init($1.loc);
|
||||
parseContext.requireExtensions($1.loc, 1, &E_GL_KHR_memory_scope_semantics, "subgroupcoherent");
|
||||
$$.qualifier.subgroupcoherent = true;
|
||||
}
|
||||
| NONPRIVATE {
|
||||
$$.init($1.loc);
|
||||
parseContext.requireExtensions($1.loc, 1, &E_GL_KHR_memory_scope_semantics, "nonprivate");
|
||||
$$.qualifier.nonprivate = true;
|
||||
}
|
||||
| VOLATILE {
|
||||
$$.init($1.loc);
|
||||
$$.qualifier.volatil = true;
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -1,8 +1,8 @@
|
||||
/* A Bison parser, made by GNU Bison 3.0. */
|
||||
/* A Bison parser, made by GNU Bison 3.0.4. */
|
||||
|
||||
/* Bison interface for Yacc-like parsers in C
|
||||
|
||||
Copyright (C) 1984, 1989-1990, 2000-2013 Free Software Foundation, Inc.
|
||||
Copyright (C) 1984, 1989-1990, 2000-2015 Free Software Foundation, Inc.
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
@ -130,315 +130,320 @@ extern int yydebug;
|
||||
RESTRICT = 340,
|
||||
READONLY = 341,
|
||||
WRITEONLY = 342,
|
||||
DVEC2 = 343,
|
||||
DVEC3 = 344,
|
||||
DVEC4 = 345,
|
||||
DMAT2 = 346,
|
||||
DMAT3 = 347,
|
||||
DMAT4 = 348,
|
||||
F16VEC2 = 349,
|
||||
F16VEC3 = 350,
|
||||
F16VEC4 = 351,
|
||||
F16MAT2 = 352,
|
||||
F16MAT3 = 353,
|
||||
F16MAT4 = 354,
|
||||
F32VEC2 = 355,
|
||||
F32VEC3 = 356,
|
||||
F32VEC4 = 357,
|
||||
F32MAT2 = 358,
|
||||
F32MAT3 = 359,
|
||||
F32MAT4 = 360,
|
||||
F64VEC2 = 361,
|
||||
F64VEC3 = 362,
|
||||
F64VEC4 = 363,
|
||||
F64MAT2 = 364,
|
||||
F64MAT3 = 365,
|
||||
F64MAT4 = 366,
|
||||
NOPERSPECTIVE = 367,
|
||||
FLAT = 368,
|
||||
SMOOTH = 369,
|
||||
LAYOUT = 370,
|
||||
EXPLICITINTERPAMD = 371,
|
||||
MAT2X2 = 372,
|
||||
MAT2X3 = 373,
|
||||
MAT2X4 = 374,
|
||||
MAT3X2 = 375,
|
||||
MAT3X3 = 376,
|
||||
MAT3X4 = 377,
|
||||
MAT4X2 = 378,
|
||||
MAT4X3 = 379,
|
||||
MAT4X4 = 380,
|
||||
DMAT2X2 = 381,
|
||||
DMAT2X3 = 382,
|
||||
DMAT2X4 = 383,
|
||||
DMAT3X2 = 384,
|
||||
DMAT3X3 = 385,
|
||||
DMAT3X4 = 386,
|
||||
DMAT4X2 = 387,
|
||||
DMAT4X3 = 388,
|
||||
DMAT4X4 = 389,
|
||||
F16MAT2X2 = 390,
|
||||
F16MAT2X3 = 391,
|
||||
F16MAT2X4 = 392,
|
||||
F16MAT3X2 = 393,
|
||||
F16MAT3X3 = 394,
|
||||
F16MAT3X4 = 395,
|
||||
F16MAT4X2 = 396,
|
||||
F16MAT4X3 = 397,
|
||||
F16MAT4X4 = 398,
|
||||
F32MAT2X2 = 399,
|
||||
F32MAT2X3 = 400,
|
||||
F32MAT2X4 = 401,
|
||||
F32MAT3X2 = 402,
|
||||
F32MAT3X3 = 403,
|
||||
F32MAT3X4 = 404,
|
||||
F32MAT4X2 = 405,
|
||||
F32MAT4X3 = 406,
|
||||
F32MAT4X4 = 407,
|
||||
F64MAT2X2 = 408,
|
||||
F64MAT2X3 = 409,
|
||||
F64MAT2X4 = 410,
|
||||
F64MAT3X2 = 411,
|
||||
F64MAT3X3 = 412,
|
||||
F64MAT3X4 = 413,
|
||||
F64MAT4X2 = 414,
|
||||
F64MAT4X3 = 415,
|
||||
F64MAT4X4 = 416,
|
||||
ATOMIC_UINT = 417,
|
||||
SAMPLER1D = 418,
|
||||
SAMPLER2D = 419,
|
||||
SAMPLER3D = 420,
|
||||
SAMPLERCUBE = 421,
|
||||
SAMPLER1DSHADOW = 422,
|
||||
SAMPLER2DSHADOW = 423,
|
||||
SAMPLERCUBESHADOW = 424,
|
||||
SAMPLER1DARRAY = 425,
|
||||
SAMPLER2DARRAY = 426,
|
||||
SAMPLER1DARRAYSHADOW = 427,
|
||||
SAMPLER2DARRAYSHADOW = 428,
|
||||
ISAMPLER1D = 429,
|
||||
ISAMPLER2D = 430,
|
||||
ISAMPLER3D = 431,
|
||||
ISAMPLERCUBE = 432,
|
||||
ISAMPLER1DARRAY = 433,
|
||||
ISAMPLER2DARRAY = 434,
|
||||
USAMPLER1D = 435,
|
||||
USAMPLER2D = 436,
|
||||
USAMPLER3D = 437,
|
||||
USAMPLERCUBE = 438,
|
||||
USAMPLER1DARRAY = 439,
|
||||
USAMPLER2DARRAY = 440,
|
||||
SAMPLER2DRECT = 441,
|
||||
SAMPLER2DRECTSHADOW = 442,
|
||||
ISAMPLER2DRECT = 443,
|
||||
USAMPLER2DRECT = 444,
|
||||
SAMPLERBUFFER = 445,
|
||||
ISAMPLERBUFFER = 446,
|
||||
USAMPLERBUFFER = 447,
|
||||
SAMPLERCUBEARRAY = 448,
|
||||
SAMPLERCUBEARRAYSHADOW = 449,
|
||||
ISAMPLERCUBEARRAY = 450,
|
||||
USAMPLERCUBEARRAY = 451,
|
||||
SAMPLER2DMS = 452,
|
||||
ISAMPLER2DMS = 453,
|
||||
USAMPLER2DMS = 454,
|
||||
SAMPLER2DMSARRAY = 455,
|
||||
ISAMPLER2DMSARRAY = 456,
|
||||
USAMPLER2DMSARRAY = 457,
|
||||
SAMPLEREXTERNALOES = 458,
|
||||
F16SAMPLER1D = 459,
|
||||
F16SAMPLER2D = 460,
|
||||
F16SAMPLER3D = 461,
|
||||
F16SAMPLER2DRECT = 462,
|
||||
F16SAMPLERCUBE = 463,
|
||||
F16SAMPLER1DARRAY = 464,
|
||||
F16SAMPLER2DARRAY = 465,
|
||||
F16SAMPLERCUBEARRAY = 466,
|
||||
F16SAMPLERBUFFER = 467,
|
||||
F16SAMPLER2DMS = 468,
|
||||
F16SAMPLER2DMSARRAY = 469,
|
||||
F16SAMPLER1DSHADOW = 470,
|
||||
F16SAMPLER2DSHADOW = 471,
|
||||
F16SAMPLER1DARRAYSHADOW = 472,
|
||||
F16SAMPLER2DARRAYSHADOW = 473,
|
||||
F16SAMPLER2DRECTSHADOW = 474,
|
||||
F16SAMPLERCUBESHADOW = 475,
|
||||
F16SAMPLERCUBEARRAYSHADOW = 476,
|
||||
SAMPLER = 477,
|
||||
SAMPLERSHADOW = 478,
|
||||
TEXTURE1D = 479,
|
||||
TEXTURE2D = 480,
|
||||
TEXTURE3D = 481,
|
||||
TEXTURECUBE = 482,
|
||||
TEXTURE1DARRAY = 483,
|
||||
TEXTURE2DARRAY = 484,
|
||||
ITEXTURE1D = 485,
|
||||
ITEXTURE2D = 486,
|
||||
ITEXTURE3D = 487,
|
||||
ITEXTURECUBE = 488,
|
||||
ITEXTURE1DARRAY = 489,
|
||||
ITEXTURE2DARRAY = 490,
|
||||
UTEXTURE1D = 491,
|
||||
UTEXTURE2D = 492,
|
||||
UTEXTURE3D = 493,
|
||||
UTEXTURECUBE = 494,
|
||||
UTEXTURE1DARRAY = 495,
|
||||
UTEXTURE2DARRAY = 496,
|
||||
TEXTURE2DRECT = 497,
|
||||
ITEXTURE2DRECT = 498,
|
||||
UTEXTURE2DRECT = 499,
|
||||
TEXTUREBUFFER = 500,
|
||||
ITEXTUREBUFFER = 501,
|
||||
UTEXTUREBUFFER = 502,
|
||||
TEXTURECUBEARRAY = 503,
|
||||
ITEXTURECUBEARRAY = 504,
|
||||
UTEXTURECUBEARRAY = 505,
|
||||
TEXTURE2DMS = 506,
|
||||
ITEXTURE2DMS = 507,
|
||||
UTEXTURE2DMS = 508,
|
||||
TEXTURE2DMSARRAY = 509,
|
||||
ITEXTURE2DMSARRAY = 510,
|
||||
UTEXTURE2DMSARRAY = 511,
|
||||
F16TEXTURE1D = 512,
|
||||
F16TEXTURE2D = 513,
|
||||
F16TEXTURE3D = 514,
|
||||
F16TEXTURE2DRECT = 515,
|
||||
F16TEXTURECUBE = 516,
|
||||
F16TEXTURE1DARRAY = 517,
|
||||
F16TEXTURE2DARRAY = 518,
|
||||
F16TEXTURECUBEARRAY = 519,
|
||||
F16TEXTUREBUFFER = 520,
|
||||
F16TEXTURE2DMS = 521,
|
||||
F16TEXTURE2DMSARRAY = 522,
|
||||
SUBPASSINPUT = 523,
|
||||
SUBPASSINPUTMS = 524,
|
||||
ISUBPASSINPUT = 525,
|
||||
ISUBPASSINPUTMS = 526,
|
||||
USUBPASSINPUT = 527,
|
||||
USUBPASSINPUTMS = 528,
|
||||
F16SUBPASSINPUT = 529,
|
||||
F16SUBPASSINPUTMS = 530,
|
||||
IMAGE1D = 531,
|
||||
IIMAGE1D = 532,
|
||||
UIMAGE1D = 533,
|
||||
IMAGE2D = 534,
|
||||
IIMAGE2D = 535,
|
||||
UIMAGE2D = 536,
|
||||
IMAGE3D = 537,
|
||||
IIMAGE3D = 538,
|
||||
UIMAGE3D = 539,
|
||||
IMAGE2DRECT = 540,
|
||||
IIMAGE2DRECT = 541,
|
||||
UIMAGE2DRECT = 542,
|
||||
IMAGECUBE = 543,
|
||||
IIMAGECUBE = 544,
|
||||
UIMAGECUBE = 545,
|
||||
IMAGEBUFFER = 546,
|
||||
IIMAGEBUFFER = 547,
|
||||
UIMAGEBUFFER = 548,
|
||||
IMAGE1DARRAY = 549,
|
||||
IIMAGE1DARRAY = 550,
|
||||
UIMAGE1DARRAY = 551,
|
||||
IMAGE2DARRAY = 552,
|
||||
IIMAGE2DARRAY = 553,
|
||||
UIMAGE2DARRAY = 554,
|
||||
IMAGECUBEARRAY = 555,
|
||||
IIMAGECUBEARRAY = 556,
|
||||
UIMAGECUBEARRAY = 557,
|
||||
IMAGE2DMS = 558,
|
||||
IIMAGE2DMS = 559,
|
||||
UIMAGE2DMS = 560,
|
||||
IMAGE2DMSARRAY = 561,
|
||||
IIMAGE2DMSARRAY = 562,
|
||||
UIMAGE2DMSARRAY = 563,
|
||||
F16IMAGE1D = 564,
|
||||
F16IMAGE2D = 565,
|
||||
F16IMAGE3D = 566,
|
||||
F16IMAGE2DRECT = 567,
|
||||
F16IMAGECUBE = 568,
|
||||
F16IMAGE1DARRAY = 569,
|
||||
F16IMAGE2DARRAY = 570,
|
||||
F16IMAGECUBEARRAY = 571,
|
||||
F16IMAGEBUFFER = 572,
|
||||
F16IMAGE2DMS = 573,
|
||||
F16IMAGE2DMSARRAY = 574,
|
||||
STRUCT = 575,
|
||||
VOID = 576,
|
||||
WHILE = 577,
|
||||
IDENTIFIER = 578,
|
||||
TYPE_NAME = 579,
|
||||
FLOATCONSTANT = 580,
|
||||
DOUBLECONSTANT = 581,
|
||||
INT16CONSTANT = 582,
|
||||
UINT16CONSTANT = 583,
|
||||
INT32CONSTANT = 584,
|
||||
UINT32CONSTANT = 585,
|
||||
INTCONSTANT = 586,
|
||||
UINTCONSTANT = 587,
|
||||
INT64CONSTANT = 588,
|
||||
UINT64CONSTANT = 589,
|
||||
BOOLCONSTANT = 590,
|
||||
FLOAT16CONSTANT = 591,
|
||||
LEFT_OP = 592,
|
||||
RIGHT_OP = 593,
|
||||
INC_OP = 594,
|
||||
DEC_OP = 595,
|
||||
LE_OP = 596,
|
||||
GE_OP = 597,
|
||||
EQ_OP = 598,
|
||||
NE_OP = 599,
|
||||
AND_OP = 600,
|
||||
OR_OP = 601,
|
||||
XOR_OP = 602,
|
||||
MUL_ASSIGN = 603,
|
||||
DIV_ASSIGN = 604,
|
||||
ADD_ASSIGN = 605,
|
||||
MOD_ASSIGN = 606,
|
||||
LEFT_ASSIGN = 607,
|
||||
RIGHT_ASSIGN = 608,
|
||||
AND_ASSIGN = 609,
|
||||
XOR_ASSIGN = 610,
|
||||
OR_ASSIGN = 611,
|
||||
SUB_ASSIGN = 612,
|
||||
LEFT_PAREN = 613,
|
||||
RIGHT_PAREN = 614,
|
||||
LEFT_BRACKET = 615,
|
||||
RIGHT_BRACKET = 616,
|
||||
LEFT_BRACE = 617,
|
||||
RIGHT_BRACE = 618,
|
||||
DOT = 619,
|
||||
COMMA = 620,
|
||||
COLON = 621,
|
||||
EQUAL = 622,
|
||||
SEMICOLON = 623,
|
||||
BANG = 624,
|
||||
DASH = 625,
|
||||
TILDE = 626,
|
||||
PLUS = 627,
|
||||
STAR = 628,
|
||||
SLASH = 629,
|
||||
PERCENT = 630,
|
||||
LEFT_ANGLE = 631,
|
||||
RIGHT_ANGLE = 632,
|
||||
VERTICAL_BAR = 633,
|
||||
CARET = 634,
|
||||
AMPERSAND = 635,
|
||||
QUESTION = 636,
|
||||
INVARIANT = 637,
|
||||
PRECISE = 638,
|
||||
HIGH_PRECISION = 639,
|
||||
MEDIUM_PRECISION = 640,
|
||||
LOW_PRECISION = 641,
|
||||
PRECISION = 642,
|
||||
PACKED = 643,
|
||||
RESOURCE = 644,
|
||||
SUPERP = 645
|
||||
DEVICECOHERENT = 343,
|
||||
QUEUEFAMILYCOHERENT = 344,
|
||||
WORKGROUPCOHERENT = 345,
|
||||
SUBGROUPCOHERENT = 346,
|
||||
NONPRIVATE = 347,
|
||||
DVEC2 = 348,
|
||||
DVEC3 = 349,
|
||||
DVEC4 = 350,
|
||||
DMAT2 = 351,
|
||||
DMAT3 = 352,
|
||||
DMAT4 = 353,
|
||||
F16VEC2 = 354,
|
||||
F16VEC3 = 355,
|
||||
F16VEC4 = 356,
|
||||
F16MAT2 = 357,
|
||||
F16MAT3 = 358,
|
||||
F16MAT4 = 359,
|
||||
F32VEC2 = 360,
|
||||
F32VEC3 = 361,
|
||||
F32VEC4 = 362,
|
||||
F32MAT2 = 363,
|
||||
F32MAT3 = 364,
|
||||
F32MAT4 = 365,
|
||||
F64VEC2 = 366,
|
||||
F64VEC3 = 367,
|
||||
F64VEC4 = 368,
|
||||
F64MAT2 = 369,
|
||||
F64MAT3 = 370,
|
||||
F64MAT4 = 371,
|
||||
NOPERSPECTIVE = 372,
|
||||
FLAT = 373,
|
||||
SMOOTH = 374,
|
||||
LAYOUT = 375,
|
||||
EXPLICITINTERPAMD = 376,
|
||||
MAT2X2 = 377,
|
||||
MAT2X3 = 378,
|
||||
MAT2X4 = 379,
|
||||
MAT3X2 = 380,
|
||||
MAT3X3 = 381,
|
||||
MAT3X4 = 382,
|
||||
MAT4X2 = 383,
|
||||
MAT4X3 = 384,
|
||||
MAT4X4 = 385,
|
||||
DMAT2X2 = 386,
|
||||
DMAT2X3 = 387,
|
||||
DMAT2X4 = 388,
|
||||
DMAT3X2 = 389,
|
||||
DMAT3X3 = 390,
|
||||
DMAT3X4 = 391,
|
||||
DMAT4X2 = 392,
|
||||
DMAT4X3 = 393,
|
||||
DMAT4X4 = 394,
|
||||
F16MAT2X2 = 395,
|
||||
F16MAT2X3 = 396,
|
||||
F16MAT2X4 = 397,
|
||||
F16MAT3X2 = 398,
|
||||
F16MAT3X3 = 399,
|
||||
F16MAT3X4 = 400,
|
||||
F16MAT4X2 = 401,
|
||||
F16MAT4X3 = 402,
|
||||
F16MAT4X4 = 403,
|
||||
F32MAT2X2 = 404,
|
||||
F32MAT2X3 = 405,
|
||||
F32MAT2X4 = 406,
|
||||
F32MAT3X2 = 407,
|
||||
F32MAT3X3 = 408,
|
||||
F32MAT3X4 = 409,
|
||||
F32MAT4X2 = 410,
|
||||
F32MAT4X3 = 411,
|
||||
F32MAT4X4 = 412,
|
||||
F64MAT2X2 = 413,
|
||||
F64MAT2X3 = 414,
|
||||
F64MAT2X4 = 415,
|
||||
F64MAT3X2 = 416,
|
||||
F64MAT3X3 = 417,
|
||||
F64MAT3X4 = 418,
|
||||
F64MAT4X2 = 419,
|
||||
F64MAT4X3 = 420,
|
||||
F64MAT4X4 = 421,
|
||||
ATOMIC_UINT = 422,
|
||||
SAMPLER1D = 423,
|
||||
SAMPLER2D = 424,
|
||||
SAMPLER3D = 425,
|
||||
SAMPLERCUBE = 426,
|
||||
SAMPLER1DSHADOW = 427,
|
||||
SAMPLER2DSHADOW = 428,
|
||||
SAMPLERCUBESHADOW = 429,
|
||||
SAMPLER1DARRAY = 430,
|
||||
SAMPLER2DARRAY = 431,
|
||||
SAMPLER1DARRAYSHADOW = 432,
|
||||
SAMPLER2DARRAYSHADOW = 433,
|
||||
ISAMPLER1D = 434,
|
||||
ISAMPLER2D = 435,
|
||||
ISAMPLER3D = 436,
|
||||
ISAMPLERCUBE = 437,
|
||||
ISAMPLER1DARRAY = 438,
|
||||
ISAMPLER2DARRAY = 439,
|
||||
USAMPLER1D = 440,
|
||||
USAMPLER2D = 441,
|
||||
USAMPLER3D = 442,
|
||||
USAMPLERCUBE = 443,
|
||||
USAMPLER1DARRAY = 444,
|
||||
USAMPLER2DARRAY = 445,
|
||||
SAMPLER2DRECT = 446,
|
||||
SAMPLER2DRECTSHADOW = 447,
|
||||
ISAMPLER2DRECT = 448,
|
||||
USAMPLER2DRECT = 449,
|
||||
SAMPLERBUFFER = 450,
|
||||
ISAMPLERBUFFER = 451,
|
||||
USAMPLERBUFFER = 452,
|
||||
SAMPLERCUBEARRAY = 453,
|
||||
SAMPLERCUBEARRAYSHADOW = 454,
|
||||
ISAMPLERCUBEARRAY = 455,
|
||||
USAMPLERCUBEARRAY = 456,
|
||||
SAMPLER2DMS = 457,
|
||||
ISAMPLER2DMS = 458,
|
||||
USAMPLER2DMS = 459,
|
||||
SAMPLER2DMSARRAY = 460,
|
||||
ISAMPLER2DMSARRAY = 461,
|
||||
USAMPLER2DMSARRAY = 462,
|
||||
SAMPLEREXTERNALOES = 463,
|
||||
F16SAMPLER1D = 464,
|
||||
F16SAMPLER2D = 465,
|
||||
F16SAMPLER3D = 466,
|
||||
F16SAMPLER2DRECT = 467,
|
||||
F16SAMPLERCUBE = 468,
|
||||
F16SAMPLER1DARRAY = 469,
|
||||
F16SAMPLER2DARRAY = 470,
|
||||
F16SAMPLERCUBEARRAY = 471,
|
||||
F16SAMPLERBUFFER = 472,
|
||||
F16SAMPLER2DMS = 473,
|
||||
F16SAMPLER2DMSARRAY = 474,
|
||||
F16SAMPLER1DSHADOW = 475,
|
||||
F16SAMPLER2DSHADOW = 476,
|
||||
F16SAMPLER1DARRAYSHADOW = 477,
|
||||
F16SAMPLER2DARRAYSHADOW = 478,
|
||||
F16SAMPLER2DRECTSHADOW = 479,
|
||||
F16SAMPLERCUBESHADOW = 480,
|
||||
F16SAMPLERCUBEARRAYSHADOW = 481,
|
||||
SAMPLER = 482,
|
||||
SAMPLERSHADOW = 483,
|
||||
TEXTURE1D = 484,
|
||||
TEXTURE2D = 485,
|
||||
TEXTURE3D = 486,
|
||||
TEXTURECUBE = 487,
|
||||
TEXTURE1DARRAY = 488,
|
||||
TEXTURE2DARRAY = 489,
|
||||
ITEXTURE1D = 490,
|
||||
ITEXTURE2D = 491,
|
||||
ITEXTURE3D = 492,
|
||||
ITEXTURECUBE = 493,
|
||||
ITEXTURE1DARRAY = 494,
|
||||
ITEXTURE2DARRAY = 495,
|
||||
UTEXTURE1D = 496,
|
||||
UTEXTURE2D = 497,
|
||||
UTEXTURE3D = 498,
|
||||
UTEXTURECUBE = 499,
|
||||
UTEXTURE1DARRAY = 500,
|
||||
UTEXTURE2DARRAY = 501,
|
||||
TEXTURE2DRECT = 502,
|
||||
ITEXTURE2DRECT = 503,
|
||||
UTEXTURE2DRECT = 504,
|
||||
TEXTUREBUFFER = 505,
|
||||
ITEXTUREBUFFER = 506,
|
||||
UTEXTUREBUFFER = 507,
|
||||
TEXTURECUBEARRAY = 508,
|
||||
ITEXTURECUBEARRAY = 509,
|
||||
UTEXTURECUBEARRAY = 510,
|
||||
TEXTURE2DMS = 511,
|
||||
ITEXTURE2DMS = 512,
|
||||
UTEXTURE2DMS = 513,
|
||||
TEXTURE2DMSARRAY = 514,
|
||||
ITEXTURE2DMSARRAY = 515,
|
||||
UTEXTURE2DMSARRAY = 516,
|
||||
F16TEXTURE1D = 517,
|
||||
F16TEXTURE2D = 518,
|
||||
F16TEXTURE3D = 519,
|
||||
F16TEXTURE2DRECT = 520,
|
||||
F16TEXTURECUBE = 521,
|
||||
F16TEXTURE1DARRAY = 522,
|
||||
F16TEXTURE2DARRAY = 523,
|
||||
F16TEXTURECUBEARRAY = 524,
|
||||
F16TEXTUREBUFFER = 525,
|
||||
F16TEXTURE2DMS = 526,
|
||||
F16TEXTURE2DMSARRAY = 527,
|
||||
SUBPASSINPUT = 528,
|
||||
SUBPASSINPUTMS = 529,
|
||||
ISUBPASSINPUT = 530,
|
||||
ISUBPASSINPUTMS = 531,
|
||||
USUBPASSINPUT = 532,
|
||||
USUBPASSINPUTMS = 533,
|
||||
F16SUBPASSINPUT = 534,
|
||||
F16SUBPASSINPUTMS = 535,
|
||||
IMAGE1D = 536,
|
||||
IIMAGE1D = 537,
|
||||
UIMAGE1D = 538,
|
||||
IMAGE2D = 539,
|
||||
IIMAGE2D = 540,
|
||||
UIMAGE2D = 541,
|
||||
IMAGE3D = 542,
|
||||
IIMAGE3D = 543,
|
||||
UIMAGE3D = 544,
|
||||
IMAGE2DRECT = 545,
|
||||
IIMAGE2DRECT = 546,
|
||||
UIMAGE2DRECT = 547,
|
||||
IMAGECUBE = 548,
|
||||
IIMAGECUBE = 549,
|
||||
UIMAGECUBE = 550,
|
||||
IMAGEBUFFER = 551,
|
||||
IIMAGEBUFFER = 552,
|
||||
UIMAGEBUFFER = 553,
|
||||
IMAGE1DARRAY = 554,
|
||||
IIMAGE1DARRAY = 555,
|
||||
UIMAGE1DARRAY = 556,
|
||||
IMAGE2DARRAY = 557,
|
||||
IIMAGE2DARRAY = 558,
|
||||
UIMAGE2DARRAY = 559,
|
||||
IMAGECUBEARRAY = 560,
|
||||
IIMAGECUBEARRAY = 561,
|
||||
UIMAGECUBEARRAY = 562,
|
||||
IMAGE2DMS = 563,
|
||||
IIMAGE2DMS = 564,
|
||||
UIMAGE2DMS = 565,
|
||||
IMAGE2DMSARRAY = 566,
|
||||
IIMAGE2DMSARRAY = 567,
|
||||
UIMAGE2DMSARRAY = 568,
|
||||
F16IMAGE1D = 569,
|
||||
F16IMAGE2D = 570,
|
||||
F16IMAGE3D = 571,
|
||||
F16IMAGE2DRECT = 572,
|
||||
F16IMAGECUBE = 573,
|
||||
F16IMAGE1DARRAY = 574,
|
||||
F16IMAGE2DARRAY = 575,
|
||||
F16IMAGECUBEARRAY = 576,
|
||||
F16IMAGEBUFFER = 577,
|
||||
F16IMAGE2DMS = 578,
|
||||
F16IMAGE2DMSARRAY = 579,
|
||||
STRUCT = 580,
|
||||
VOID = 581,
|
||||
WHILE = 582,
|
||||
IDENTIFIER = 583,
|
||||
TYPE_NAME = 584,
|
||||
FLOATCONSTANT = 585,
|
||||
DOUBLECONSTANT = 586,
|
||||
INT16CONSTANT = 587,
|
||||
UINT16CONSTANT = 588,
|
||||
INT32CONSTANT = 589,
|
||||
UINT32CONSTANT = 590,
|
||||
INTCONSTANT = 591,
|
||||
UINTCONSTANT = 592,
|
||||
INT64CONSTANT = 593,
|
||||
UINT64CONSTANT = 594,
|
||||
BOOLCONSTANT = 595,
|
||||
FLOAT16CONSTANT = 596,
|
||||
LEFT_OP = 597,
|
||||
RIGHT_OP = 598,
|
||||
INC_OP = 599,
|
||||
DEC_OP = 600,
|
||||
LE_OP = 601,
|
||||
GE_OP = 602,
|
||||
EQ_OP = 603,
|
||||
NE_OP = 604,
|
||||
AND_OP = 605,
|
||||
OR_OP = 606,
|
||||
XOR_OP = 607,
|
||||
MUL_ASSIGN = 608,
|
||||
DIV_ASSIGN = 609,
|
||||
ADD_ASSIGN = 610,
|
||||
MOD_ASSIGN = 611,
|
||||
LEFT_ASSIGN = 612,
|
||||
RIGHT_ASSIGN = 613,
|
||||
AND_ASSIGN = 614,
|
||||
XOR_ASSIGN = 615,
|
||||
OR_ASSIGN = 616,
|
||||
SUB_ASSIGN = 617,
|
||||
LEFT_PAREN = 618,
|
||||
RIGHT_PAREN = 619,
|
||||
LEFT_BRACKET = 620,
|
||||
RIGHT_BRACKET = 621,
|
||||
LEFT_BRACE = 622,
|
||||
RIGHT_BRACE = 623,
|
||||
DOT = 624,
|
||||
COMMA = 625,
|
||||
COLON = 626,
|
||||
EQUAL = 627,
|
||||
SEMICOLON = 628,
|
||||
BANG = 629,
|
||||
DASH = 630,
|
||||
TILDE = 631,
|
||||
PLUS = 632,
|
||||
STAR = 633,
|
||||
SLASH = 634,
|
||||
PERCENT = 635,
|
||||
LEFT_ANGLE = 636,
|
||||
RIGHT_ANGLE = 637,
|
||||
VERTICAL_BAR = 638,
|
||||
CARET = 639,
|
||||
AMPERSAND = 640,
|
||||
QUESTION = 641,
|
||||
INVARIANT = 642,
|
||||
PRECISE = 643,
|
||||
HIGH_PRECISION = 644,
|
||||
MEDIUM_PRECISION = 645,
|
||||
LOW_PRECISION = 646,
|
||||
PRECISION = 647,
|
||||
PACKED = 648,
|
||||
RESOURCE = 649,
|
||||
SUPERP = 650
|
||||
};
|
||||
#endif
|
||||
|
||||
/* Value type. */
|
||||
#if ! defined YYSTYPE && ! defined YYSTYPE_IS_DECLARED
|
||||
typedef union YYSTYPE YYSTYPE;
|
||||
|
||||
union YYSTYPE
|
||||
{
|
||||
#line 70 "MachineIndependent/glslang.y" /* yacc.c:1909 */
|
||||
@ -476,8 +481,10 @@ union YYSTYPE
|
||||
};
|
||||
} interm;
|
||||
|
||||
#line 480 "MachineIndependent/glslang_tab.cpp.h" /* yacc.c:1909 */
|
||||
#line 485 "MachineIndependent/glslang_tab.cpp.h" /* yacc.c:1909 */
|
||||
};
|
||||
|
||||
typedef union YYSTYPE YYSTYPE;
|
||||
# define YYSTYPE_IS_TRIVIAL 1
|
||||
# define YYSTYPE_IS_DECLARED 1
|
||||
#endif
|
||||
|
@ -871,6 +871,8 @@ bool TOutputTraverser::visitAggregate(TVisit /* visit */, TIntermAggregate* node
|
||||
case EOpAtomicXor: out.debug << "AtomicXor"; break;
|
||||
case EOpAtomicExchange: out.debug << "AtomicExchange"; break;
|
||||
case EOpAtomicCompSwap: out.debug << "AtomicCompSwap"; break;
|
||||
case EOpAtomicLoad: out.debug << "AtomicLoad"; break;
|
||||
case EOpAtomicStore: out.debug << "AtomicStore"; break;
|
||||
|
||||
case EOpAtomicCounterAdd: out.debug << "AtomicCounterAdd"; break;
|
||||
case EOpAtomicCounterSubtract: out.debug << "AtomicCounterSubtract"; break;
|
||||
@ -894,6 +896,8 @@ bool TOutputTraverser::visitAggregate(TVisit /* visit */, TIntermAggregate* node
|
||||
case EOpImageAtomicXor: out.debug << "imageAtomicXor"; break;
|
||||
case EOpImageAtomicExchange: out.debug << "imageAtomicExchange"; break;
|
||||
case EOpImageAtomicCompSwap: out.debug << "imageAtomicCompSwap"; break;
|
||||
case EOpImageAtomicLoad: out.debug << "imageAtomicLoad"; break;
|
||||
case EOpImageAtomicStore: out.debug << "imageAtomicStore"; break;
|
||||
#ifdef AMD_EXTENSIONS
|
||||
case EOpImageLoadLod: out.debug << "imageLoadLod"; break;
|
||||
case EOpImageStoreLod: out.debug << "imageStoreLod"; break;
|
||||
|
@ -524,11 +524,16 @@ void TIntermediate::mergeErrorCheck(TInfoSink& infoSink, const TIntermSymbol& sy
|
||||
}
|
||||
|
||||
// Memory...
|
||||
if (symbol.getQualifier().coherent != unitSymbol.getQualifier().coherent ||
|
||||
symbol.getQualifier().volatil != unitSymbol.getQualifier().volatil ||
|
||||
symbol.getQualifier().restrict != unitSymbol.getQualifier().restrict ||
|
||||
symbol.getQualifier().readonly != unitSymbol.getQualifier().readonly ||
|
||||
symbol.getQualifier().writeonly != unitSymbol.getQualifier().writeonly) {
|
||||
if (symbol.getQualifier().coherent != unitSymbol.getQualifier().coherent ||
|
||||
symbol.getQualifier().devicecoherent != unitSymbol.getQualifier().devicecoherent ||
|
||||
symbol.getQualifier().queuefamilycoherent != unitSymbol.getQualifier().queuefamilycoherent ||
|
||||
symbol.getQualifier().workgroupcoherent != unitSymbol.getQualifier().workgroupcoherent ||
|
||||
symbol.getQualifier().subgroupcoherent != unitSymbol.getQualifier().subgroupcoherent ||
|
||||
symbol.getQualifier().nonprivate != unitSymbol.getQualifier().nonprivate ||
|
||||
symbol.getQualifier().volatil != unitSymbol.getQualifier().volatil ||
|
||||
symbol.getQualifier().restrict != unitSymbol.getQualifier().restrict ||
|
||||
symbol.getQualifier().readonly != unitSymbol.getQualifier().readonly ||
|
||||
symbol.getQualifier().writeonly != unitSymbol.getQualifier().writeonly) {
|
||||
error(infoSink, "Memory qualifiers must match:");
|
||||
writeTypeComparison = true;
|
||||
}
|
||||
|
@ -233,6 +233,7 @@ public:
|
||||
useUnknownFormat(false),
|
||||
hlslOffsets(false),
|
||||
useStorageBuffer(false),
|
||||
useVulkanMemoryModel(false),
|
||||
hlslIoMapping(false),
|
||||
textureSamplerTransformMode(EShTexSampTransKeep),
|
||||
needToLegalize(false),
|
||||
@ -365,6 +366,12 @@ public:
|
||||
processes.addProcess("hlsl-iomap");
|
||||
}
|
||||
bool usingHlslIoMapping() { return hlslIoMapping; }
|
||||
void setUseVulkanMemoryModel()
|
||||
{
|
||||
useVulkanMemoryModel = true;
|
||||
processes.addProcess("use-vulkan-memory-model");
|
||||
}
|
||||
bool usingVulkanMemoryModel() const { return useVulkanMemoryModel; }
|
||||
|
||||
template<class T> T addCounterBufferName(const T& name) const { return name + implicitCounterName; }
|
||||
bool hasCounterBufferName(const TString& name) const {
|
||||
@ -734,6 +741,7 @@ protected:
|
||||
bool useUnknownFormat;
|
||||
bool hlslOffsets;
|
||||
bool useStorageBuffer;
|
||||
bool useVulkanMemoryModel;
|
||||
bool hlslIoMapping;
|
||||
|
||||
std::set<TString> ioAccessed; // set of names of statically read/written I/O that might need extra checking
|
||||
|
@ -286,6 +286,8 @@ INSTANTIATE_TEST_CASE_P(
|
||||
"spv.matrix.frag",
|
||||
"spv.matrix2.frag",
|
||||
"spv.memoryQualifier.frag",
|
||||
"spv.memoryScopeSemantics.comp",
|
||||
"spv.memoryScopeSemantics_Error.comp",
|
||||
"spv.merge-unreachable.frag",
|
||||
"spv.multiStruct.comp",
|
||||
"spv.multiStructFuncall.frag",
|
||||
|
@ -5,14 +5,14 @@
|
||||
"site" : "github",
|
||||
"subrepo" : "KhronosGroup/SPIRV-Tools",
|
||||
"subdir" : "External/spirv-tools",
|
||||
"commit" : "6d27a8350fbc339909834a6ef339c805cb1ab69b"
|
||||
"commit" : "7600fc0e19c3a99bd3ef2c24515cc508ca1d3cfb"
|
||||
},
|
||||
{
|
||||
"name" : "spirv-tools/external/spirv-headers",
|
||||
"site" : "github",
|
||||
"subrepo" : "KhronosGroup/SPIRV-Headers",
|
||||
"subdir" : "External/spirv-tools/external/spirv-headers",
|
||||
"commit" : "ff684ffc6a35d2a58f0f63108877d0064ea33feb"
|
||||
"commit" : "dcf23bdabacc3c54b83b1f9367e7a8adb27f8d87"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user