Revert "Slim down custom glslang more"

This reverts commit 9dffcdfc93.
This commit is contained in:
libretroadmin 2024-06-15 06:22:27 +02:00
parent 2d4f31bc0a
commit 18a1623517
26 changed files with 500 additions and 96 deletions

View File

@ -65,16 +65,22 @@ bool InitProcess()
ThreadInitializeIndex = OS_AllocTLSIndex();
if (ThreadInitializeIndex == OS_INVALID_TLS_INDEX) {
assert(0 && "InitProcess(): Failed to allocate TLS area for init flag");
glslang::ReleaseGlobalLock();
return false;
}
if (! InitializePoolIndex()) {
assert(0 && "InitProcess(): Failed to initialize global pool");
glslang::ReleaseGlobalLock();
return false;
}
if (! InitThread()) {
assert(0 && "InitProcess(): Failed to initialize thread");
glslang::ReleaseGlobalLock();
return false;
}
@ -92,6 +98,7 @@ bool InitThread()
// This function is re-entrant
//
if (ThreadInitializeIndex == OS_INVALID_TLS_INDEX) {
assert(0 && "InitThread(): Process hasn't been initalised.");
return false;
}
@ -99,6 +106,7 @@ bool InitThread()
return true;
if (! OS_SetTLSValue(ThreadInitializeIndex, (void *)1)) {
assert(0 && "InitThread(): Unable to set init flag.");
return false;
}
@ -123,8 +131,10 @@ bool DetachThread()
// Function is re-entrant and this thread may not have been initialized.
//
if (OS_GetTLSValue(ThreadInitializeIndex) != 0) {
if (!OS_SetTLSValue(ThreadInitializeIndex, (void *)0))
if (!OS_SetTLSValue(ThreadInitializeIndex, (void *)0)) {
assert(0 && "DetachThread(): Unable to clear init flag.");
success = false;
}
}
return success;

View File

@ -246,7 +246,7 @@ protected:
//
// Translate glslang profile to SPIR-V source language.
static spv::SourceLanguage TranslateSourceLanguage(glslang::EShSource source, EProfile profile)
spv::SourceLanguage TranslateSourceLanguage(glslang::EShSource source, EProfile profile)
{
switch (source) {
case glslang::EShSourceGlsl:
@ -258,18 +258,17 @@ static spv::SourceLanguage TranslateSourceLanguage(glslang::EShSource source, EP
case EEsProfile:
return spv::SourceLanguageESSL;
default:
break;
return spv::SourceLanguageUnknown;
}
case glslang::EShSourceHlsl:
return spv::SourceLanguageHLSL;
default:
break;
return spv::SourceLanguageUnknown;
}
return spv::SourceLanguageUnknown;
}
// Translate glslang language (stage) to SPIR-V execution model.
static spv::ExecutionModel TranslateExecutionModel(EShLanguage stage)
spv::ExecutionModel TranslateExecutionModel(EShLanguage stage)
{
switch (stage) {
case EShLangVertex: return spv::ExecutionModelVertex;
@ -279,13 +278,13 @@ static spv::ExecutionModel TranslateExecutionModel(EShLanguage stage)
case EShLangFragment: return spv::ExecutionModelFragment;
case EShLangCompute: return spv::ExecutionModelGLCompute;
default:
break;
assert(0);
return spv::ExecutionModelFragment;
}
return spv::ExecutionModelFragment;
}
// Translate glslang sampler type to SPIR-V dimensionality.
static spv::Dim TranslateDimensionality(const glslang::TSampler& sampler)
spv::Dim TranslateDimensionality(const glslang::TSampler& sampler)
{
switch (sampler.dim) {
case glslang::Esd1D: return spv::Dim1D;
@ -296,30 +295,30 @@ static spv::Dim TranslateDimensionality(const glslang::TSampler& sampler)
case glslang::EsdBuffer: return spv::DimBuffer;
case glslang::EsdSubpass: return spv::DimSubpassData;
default:
break;
assert(0);
return spv::Dim2D;
}
return spv::Dim2D;
}
// Translate glslang precision to SPIR-V precision decorations.
static spv::Decoration TranslatePrecisionDecoration(glslang::TPrecisionQualifier glslangPrecision)
spv::Decoration TranslatePrecisionDecoration(glslang::TPrecisionQualifier glslangPrecision)
{
switch (glslangPrecision) {
case glslang::EpqLow: return spv::DecorationRelaxedPrecision;
case glslang::EpqMedium: return spv::DecorationRelaxedPrecision;
default: break;
default:
return spv::NoPrecision;
}
return spv::NoPrecision;
}
// Translate glslang type to SPIR-V precision decorations.
static spv::Decoration TranslatePrecisionDecoration(const glslang::TType& type)
spv::Decoration TranslatePrecisionDecoration(const glslang::TType& type)
{
return TranslatePrecisionDecoration(type.getQualifier().precision);
}
// Translate glslang type to SPIR-V block decorations.
static spv::Decoration TranslateBlockDecoration(const glslang::TType& type, bool useStorageBuffer)
spv::Decoration TranslateBlockDecoration(const glslang::TType& type, bool useStorageBuffer)
{
if (type.getBasicType() == glslang::EbtBlock) {
switch (type.getQualifier().storage) {
@ -328,6 +327,7 @@ static spv::Decoration TranslateBlockDecoration(const glslang::TType& type, bool
case glslang::EvqVaryingIn: return spv::DecorationBlock;
case glslang::EvqVaryingOut: return spv::DecorationBlock;
default:
assert(0);
break;
}
}
@ -385,6 +385,7 @@ spv::Decoration TranslateLayoutDecoration(const glslang::TType& type, glslang::T
assert(type.getQualifier().layoutPacking == glslang::ElpNone);
return spv::DecorationMax;
default:
assert(0);
return spv::DecorationMax;
}
}
@ -430,19 +431,21 @@ spv::Decoration TGlslangToSpvTraverser::TranslateAuxiliaryStorageDecoration(cons
}
// If glslang type is invariant, return SPIR-V invariant decoration.
static spv::Decoration TranslateInvariantDecoration(const glslang::TQualifier& qualifier)
spv::Decoration TranslateInvariantDecoration(const glslang::TQualifier& qualifier)
{
if (qualifier.invariant)
return spv::DecorationInvariant;
return spv::DecorationMax;
else
return spv::DecorationMax;
}
// If glslang type is noContraction, return SPIR-V NoContraction decoration.
static spv::Decoration TranslateNoContractionDecoration(const glslang::TQualifier& qualifier)
spv::Decoration TranslateNoContractionDecoration(const glslang::TQualifier& qualifier)
{
if (qualifier.noContraction)
return spv::DecorationNoContraction;
return spv::DecorationMax;
else
return spv::DecorationMax;
}
// If glslang type is nonUniform, return SPIR-V NonUniform decoration.
@ -895,6 +898,7 @@ spv::StorageClass TGlslangToSpvTraverser::TranslateStorageClass(const glslang::T
case glslang::EvqConstReadOnly: return spv::StorageClassFunction;
case glslang::EvqTemporary: return spv::StorageClassFunction;
default:
assert(0);
break;
}
@ -939,7 +943,7 @@ void TGlslangToSpvTraverser::addIndirectionIndexCapabilities(const glslang::TTyp
// Return whether or not the given type is something that should be tied to a
// descriptor set.
static bool IsDescriptorResource(const glslang::TType& type)
bool IsDescriptorResource(const glslang::TType& type)
{
// uniform and buffer blocks are included, unless it is a push_constant
if (type.getBasicType() == glslang::EbtBlock)
@ -956,7 +960,7 @@ static bool IsDescriptorResource(const glslang::TType& type)
return false;
}
static void InheritQualifiers(glslang::TQualifier& child, const glslang::TQualifier& parent)
void InheritQualifiers(glslang::TQualifier& child, const glslang::TQualifier& parent)
{
if (child.layoutMatrix == glslang::ElmNone)
child.layoutMatrix = parent.layoutMatrix;
@ -989,7 +993,7 @@ static void InheritQualifiers(glslang::TQualifier& child, const glslang::TQualif
child.writeonly = true;
}
static bool HasNonLayoutQualifiers(const glslang::TType& type, const glslang::TQualifier& qualifier)
bool HasNonLayoutQualifiers(const glslang::TType& type, const glslang::TQualifier& qualifier)
{
// This should list qualifiers that simultaneous satisfy:
// - struct members might inherit from a struct declaration
@ -1022,6 +1026,28 @@ TGlslangToSpvTraverser::TGlslangToSpvTraverser(unsigned int spvVersion, const gl
builder.setSource(TranslateSourceLanguage(glslangIntermediate->getSource(), glslangIntermediate->getProfile()),
glslangIntermediate->getVersion());
if (options.generateDebugInfo) {
builder.setEmitOpLines();
builder.setSourceFile(glslangIntermediate->getSourceFile());
// Set the source shader's text. If for SPV version 1.0, include
// a preamble in comments stating the OpModuleProcessed instructions.
// Otherwise, emit those as actual instructions.
std::string text;
const std::vector<std::string>& processes = glslangIntermediate->getProcesses();
for (int p = 0; p < (int)processes.size(); ++p) {
if (glslangIntermediate->getSpv().spv < 0x00010100) {
text.append("// OpModuleProcessed ");
text.append(processes[p]);
text.append("\n");
} else
builder.addModuleProcessed(processes[p]);
}
if (glslangIntermediate->getSpv().spv < 0x00010100 && (int)processes.size() > 0)
text.append("#line 1\n");
text.append(glslangIntermediate->getSourceText());
builder.setSourceText(text);
}
stdBuiltins = builder.import("GLSL.std.450");
builder.setMemoryModel(spv::AddressingModelLogical, spv::MemoryModelGLSL450);
shaderEntry = builder.makeEntryPoint(glslangIntermediate->getEntryPointName().c_str());
@ -2431,6 +2457,7 @@ bool TGlslangToSpvTraverser::visitBranch(glslang::TVisit /* visit */, glslang::T
break;
default:
assert(0);
break;
}
@ -2501,6 +2528,7 @@ spv::Id TGlslangToSpvTraverser::getSampledType(const glslang::TSampler& sampler)
case glslang::EbtInt: return builder.makeIntType(32);
case glslang::EbtUint: return builder.makeUintType(32);
default:
assert(0);
return builder.makeFloatType(32);
}
}
@ -2654,6 +2682,7 @@ spv::Id TGlslangToSpvTraverser::convertGlslangToSpvType(const glslang::TType& ty
}
break;
default:
assert(0);
break;
}
@ -3553,6 +3582,7 @@ spv::Id TGlslangToSpvTraverser::createImageTextureFunctionCall(glslang::TIntermO
case glslang::EOpSparseTexelsResident:
return builder.createUnaryOp(spv::OpImageSparseTexelsResident, builder.makeBoolType(), arguments[0]);
default:
assert(0);
break;
}
}
@ -4347,6 +4377,7 @@ spv::Id TGlslangToSpvTraverser::createBinaryMatrixOperation(spv::Op op, OpDecora
return result;
}
default:
assert(0);
return spv::NoResult;
}
}
@ -5284,6 +5315,7 @@ spv::Id TGlslangToSpvTraverser::createAtomicOperation(glslang::TOperator op, spv
opCode = spv::OpAtomicLoad;
break;
default:
assert(0);
break;
}
@ -5695,7 +5727,7 @@ spv::Id TGlslangToSpvTraverser::createSubgroupOperation(glslang::TOperator op, s
builder.addCapability(spv::CapabilityGroupNonUniformPartitionedNV);
break;
#endif
default: break;
default: assert(0 && "Unhandled subgroup operation!");
}
const bool isUnsigned = typeProxy == glslang::EbtUint || typeProxy == glslang::EbtUint64;
@ -5837,7 +5869,7 @@ spv::Id TGlslangToSpvTraverser::createSubgroupOperation(glslang::TOperator op, s
case glslang::EOpSubgroupQuadSwapHorizontal:
case glslang::EOpSubgroupQuadSwapVertical:
case glslang::EOpSubgroupQuadSwapDiagonal: opCode = spv::OpGroupNonUniformQuadSwap; break;
default: break;
default: assert(0 && "Unhandled subgroup operation!");
}
std::vector<spv::Id> spvGroupOperands;
@ -6213,8 +6245,11 @@ spv::Id TGlslangToSpvTraverser::createMiscOperation(glslang::TOperator op, spv::
switch (consumedOperands) {
case 0:
// should all be handled by visitAggregate and createNoArgOperation
assert(0);
return 0;
case 1:
// should all be handled by createUnaryOperation
assert(0);
return 0;
case 2:
id = builder.createBinOp(opCode, typeId, operands[0], operands[1]);
@ -6620,6 +6655,7 @@ spv::Id TGlslangToSpvTraverser::createSpvConstantFromConstUnionArray(const glsla
spvConsts.push_back(builder.makeBoolConstant(zero ? false : consts[nextConst].getBConst()));
break;
default:
assert(0);
break;
}
++nextConst;
@ -6666,6 +6702,7 @@ spv::Id TGlslangToSpvTraverser::createSpvConstantFromConstUnionArray(const glsla
scalar = builder.makeBoolConstant(zero ? false : consts[nextConst].getBConst(), specConstant);
break;
default:
assert(0);
break;
}
++nextConst;
@ -6827,6 +6864,14 @@ spv::Id TGlslangToSpvTraverser::getExtBuiltins(const char* name)
namespace glslang {
void GetSpirvVersion(std::string& version)
{
const int bufSize = 100;
char buf[bufSize];
snprintf(buf, bufSize, "0x%08x, Revision %d", spv::Version, spv::Revision);
version = buf;
}
// For low-order part of the generator's magic number. Bump up
// when there is a change in the style (e.g., if SSA form changes,
// or a different instruction sequence to do something gets used).
@ -6842,6 +6887,52 @@ int GetSpirvGeneratorVersion()
return 7; // GLSL volatile keyword maps to both SPIR-V decorations Volatile and Coherent
}
// Write SPIR-V out to a binary file
void OutputSpvBin(const std::vector<unsigned int>& spirv, const char* baseName)
{
std::ofstream out;
out.open(baseName, std::ios::binary | std::ios::out);
if (out.fail())
printf("ERROR: Failed to open file: %s\n", baseName);
for (int i = 0; i < (int)spirv.size(); ++i) {
unsigned int word = spirv[i];
out.write((const char*)&word, 4);
}
out.close();
}
// Write SPIR-V out to a text file with 32-bit hexadecimal words
void OutputSpvHex(const std::vector<unsigned int>& spirv, const char* baseName, const char* varName)
{
std::ofstream out;
out.open(baseName, std::ios::binary | std::ios::out);
if (out.fail())
printf("ERROR: Failed to open file: %s\n", baseName);
out << "\t// " <<
glslang::GetSpirvGeneratorVersion() << "." << GLSLANG_MINOR_VERSION << "." << GLSLANG_PATCH_LEVEL <<
std::endl;
if (varName != nullptr) {
out << "\t #pragma once" << std::endl;
out << "const uint32_t " << varName << "[] = {" << std::endl;
}
const int WORDS_PER_LINE = 8;
for (int i = 0; i < (int)spirv.size(); i += WORDS_PER_LINE) {
out << "\t";
for (int j = 0; j < WORDS_PER_LINE && i + j < (int)spirv.size(); ++j) {
const unsigned int word = spirv[i + j];
out << "0x" << std::hex << std::setw(8) << std::setfill('0') << word;
if (i + j + 1 < (int)spirv.size()) {
out << ",";
}
}
out << std::endl;
}
if (varName != nullptr) {
out << "};";
}
out.close();
}
//
// Set up the glslang traversal
//

View File

@ -48,16 +48,20 @@
namespace glslang {
struct SpvOptions {
SpvOptions() : disableOptimizer(false),
SpvOptions() : generateDebugInfo(false), disableOptimizer(true),
optimizeSize(false) { }
bool generateDebugInfo;
bool disableOptimizer;
bool optimizeSize;
};
void GetSpirvVersion(std::string&);
int GetSpirvGeneratorVersion();
void GlslangToSpv(const glslang::TIntermediate& intermediate, std::vector<unsigned int>& spirv,
SpvOptions* options = nullptr);
void GlslangToSpv(const glslang::TIntermediate& intermediate, std::vector<unsigned int>& spirv,
spv::SpvBuildLogger* logger, SpvOptions* options = nullptr);
void OutputSpvBin(const std::vector<unsigned int>& spirv, const char* baseName);
void OutputSpvHex(const std::vector<unsigned int>& spirv, const char* baseName, const char* varName);
}

View File

@ -256,6 +256,8 @@ namespace spv {
spv::Id spirvbin_t::localId(spv::Id id, spv::Id newId)
{
//assert(id != spv::NoResult && newId != spv::NoResult);
if (id > bound()) {
error(std::string("ID out of range: ") + std::to_string(id));
return spirvbin_t::unused;
@ -594,8 +596,7 @@ namespace spv {
return nextInst;
case spv::OperandVariableLiteralId: {
if (opCode == OpSwitch)
{
if (opCode == OpSwitch) {
// word-2 is the position of the selector ID. OpSwitch Literals match its type.
// In case the IDs are currently being remapped, we get the word[-2] ID from
// the circular idBuffer.
@ -610,6 +611,8 @@ namespace spv {
word += literalSize; // literal
idFn(asId(word++)); // label
}
} else {
assert(0); // currentely, only OpSwitch uses OperandVariableLiteralId
}
return nextInst;
@ -659,6 +662,7 @@ namespace spv {
break;
default:
assert(0 && "Unhandled Operand Class");
break;
}
}

View File

@ -533,9 +533,9 @@ Op Builder::getMostBasicTypeClass(Id typeId) const
case OpTypePointer:
return getMostBasicTypeClass(instr->getIdOperand(1));
default:
break;
assert(0);
return OpTypeFloat;
}
return OpTypeFloat;
}
int Builder::getNumTypeConstituents(Id typeId) const
@ -544,6 +544,10 @@ int Builder::getNumTypeConstituents(Id typeId) const
switch (instr->getOpCode())
{
case OpTypeBool:
case OpTypeInt:
case OpTypeFloat:
return 1;
case OpTypeVector:
case OpTypeMatrix:
return instr->getImmediateOperand(1);
@ -554,13 +558,10 @@ int Builder::getNumTypeConstituents(Id typeId) const
}
case OpTypeStruct:
return instr->getNumOperands();
case OpTypeBool:
case OpTypeInt:
case OpTypeFloat:
default:
break;
assert(0);
return 1;
}
return 1;
}
// Return the lowest-level type of scalar that an homogeneous composite is made out of.
@ -586,9 +587,9 @@ Id Builder::getScalarTypeId(Id typeId) const
case OpTypePointer:
return getScalarTypeId(getContainedTypeId(typeId));
default:
break;
assert(0);
return NoResult;
}
return NoResult;
}
// Return the type of 'member' of a composite.
@ -609,9 +610,9 @@ Id Builder::getContainedTypeId(Id typeId, int member) const
case OpTypeStruct:
return instr->getIdOperand(member);
default:
break;
assert(0);
return NoResult;
}
return NoResult;
}
// Return the immediately contained type of a given composite type.
@ -862,6 +863,7 @@ Id Builder::makeFpConstant(Id type, double d, bool specConstant)
break;
}
assert(false);
return NoResult;
}
@ -942,6 +944,7 @@ Id Builder::makeCompositeConstant(Id typeId, const std::vector<Id>& members, boo
}
break;
default:
assert(0);
return makeFloatConstant(0.0);
}
@ -1797,6 +1800,7 @@ Id Builder::createTextureQueryCall(Op opCode, const TextureParameters& parameter
break;
default:
assert(0);
break;
}
if (isArrayedImageType(getImageType(parameters.sampler)))
@ -1822,6 +1826,7 @@ Id Builder::createTextureQueryCall(Op opCode, const TextureParameters& parameter
resultType = isUnsignedResult ? makeUintType(32) : makeIntType(32);
break;
default:
assert(0);
break;
}
@ -2002,6 +2007,8 @@ Id Builder::createConstructor(Decoration precision, const std::vector<Id>& sourc
accumulateVectorConstituents(sources[i]);
else if (isMatrix(sources[i]))
accumulateMatrixConstituents(sources[i]);
else
assert(0);
if (targetComponent >= numTargetComponents)
break;

View File

@ -356,7 +356,7 @@ void SpirvStream::disassembleInstruction(Id resultId, Id /*typeId*/, Op opCode,
switch (stream[word]) {
case 8: idDescriptor[resultId] = "int8_t"; break;
case 16: idDescriptor[resultId] = "int16_t"; break;
default: // fallthrough
default: assert(0); // fallthrough
case 32: idDescriptor[resultId] = "int"; break;
case 64: idDescriptor[resultId] = "int64_t"; break;
}
@ -364,7 +364,7 @@ void SpirvStream::disassembleInstruction(Id resultId, Id /*typeId*/, Op opCode,
case OpTypeFloat:
switch (stream[word]) {
case 16: idDescriptor[resultId] = "float16_t"; break;
default: // fallthrough
default: assert(0); // fallthrough
case 32: idDescriptor[resultId] = "float"; break;
case 64: idDescriptor[resultId] = "float64_t"; break;
}

View File

@ -200,6 +200,12 @@ class TUnorderedMap : public std::unordered_map<K, D, HASH, PRED, pool_allocator
//
typedef std::basic_string<char> TPersistString;
//
// templatized min and max functions.
//
template <class T> T Min(const T a, const T b) { return a < b ? a : b; }
template <class T> T Max(const T a, const T b) { return a > b ? a : b; }
//
// Create a TString object from an integer.
//

View File

@ -55,10 +55,10 @@ enum TPrefixType {
enum TOutputStream {
ENull = 0,
EDebugger = 0x01,
EStdOut = 0x02,
EString = 0x04,
};
//
// Encapsulate info logs for all objects that have them.
//

View File

@ -37,6 +37,10 @@
#ifndef _POOLALLOC_INCLUDED_
#define _POOLALLOC_INCLUDED_
#ifdef _DEBUG
# define GUARD_BLOCKS // define to enable guard block sanity checking
#endif
//
// This header defines an allocator that can be used to efficiently
// allocate a large number of small requests for heap memory, with the
@ -71,12 +75,59 @@ namespace glslang {
class TAllocation {
public:
TAllocation(size_t size, unsigned char* mem, TAllocation* prev = 0) :
size(size), mem(mem), prevAlloc(prev) { }
size(size), mem(mem), prevAlloc(prev) {
// Allocations are bracketed:
// [allocationHeader][initialGuardBlock][userData][finalGuardBlock]
// This would be cleaner with if (guardBlockSize)..., but that
// makes the compiler print warnings about 0 length memsets,
// even with the if() protecting them.
# ifdef GUARD_BLOCKS
memset(preGuard(), guardBlockBeginVal, guardBlockSize);
memset(data(), userDataFill, size);
memset(postGuard(), guardBlockEndVal, guardBlockSize);
# endif
}
void check() const {
checkGuardBlock(preGuard(), guardBlockBeginVal, "before");
checkGuardBlock(postGuard(), guardBlockEndVal, "after");
}
void checkAllocList() const;
// Return total size needed to accommodate user buffer of 'size',
// plus our tracking data.
inline static size_t allocationSize(size_t size) {
return size + 2 * guardBlockSize + headerSize();
}
// Offset from surrounding buffer to get to user data buffer.
inline static unsigned char* offsetAllocation(unsigned char* m) {
return m + guardBlockSize + headerSize();
}
private:
void checkGuardBlock(unsigned char* blockMem, unsigned char val, const char* locText) const;
// Find offsets to pre and post guard blocks, and user data buffer
unsigned char* preGuard() const { return mem + headerSize(); }
unsigned char* data() const { return preGuard() + guardBlockSize; }
unsigned char* postGuard() const { return data() + size; }
size_t size; // size of the user data area
unsigned char* mem; // beginning of our allocation (pts to header)
TAllocation* prevAlloc; // prior allocation in the chain
const static unsigned char guardBlockBeginVal;
const static unsigned char guardBlockEndVal;
const static unsigned char userDataFill;
const static size_t guardBlockSize;
# ifdef GUARD_BLOCKS
inline static size_t headerSize() { return sizeof(TAllocation); }
# else
inline static size_t headerSize() { return 0; }
# endif
};
//
@ -137,11 +188,21 @@ protected:
struct tHeader {
tHeader(tHeader* nextPage, size_t pageCount) :
#ifdef GUARD_BLOCKS
lastAllocation(0),
#endif
nextPage(nextPage), pageCount(pageCount) { }
~tHeader() {
#ifdef GUARD_BLOCKS
if (lastAllocation)
lastAllocation->checkAllocList();
#endif
}
#ifdef GUARD_BLOCKS
TAllocation* lastAllocation;
#endif
tHeader* nextPage;
size_t pageCount;
};
@ -152,6 +213,19 @@ protected:
};
typedef std::vector<tAllocState> tAllocStack;
// Track allocations if and only if we're using guard blocks
#ifndef GUARD_BLOCKS
void* initializeAllocation(tHeader*, unsigned char* memory, size_t) {
#else
void* initializeAllocation(tHeader* block, unsigned char* memory, size_t numBytes) {
new(memory) TAllocation(numBytes, memory, block->lastAllocation);
block->lastAllocation = reinterpret_cast<TAllocation*>(memory);
#endif
// This is optimized entirely away if GUARD_BLOCKS is not defined.
return TAllocation::offsetAllocation(memory);
}
size_t pageSize; // granularity of allocation from the OS
size_t alignment; // all returned allocations will be aligned at
// this granularity, which will be a power of 2

View File

@ -49,6 +49,11 @@ void TInfoSinkBase::append(const char* s)
}
}
//#ifdef _WIN32
// if (outputStream & EDebugger)
// OutputDebugString(s);
//#endif
if (outputStream & EStdOut)
fprintf(stdout, "%s", s);
}
@ -60,6 +65,15 @@ void TInfoSinkBase::append(int count, char c)
sink.append(count, c);
}
//#ifdef _WIN32
// if (outputStream & EDebugger) {
// char str[2];
// str[0] = c;
// str[1] = '\0';
// OutputDebugString(str);
// }
//#endif
if (outputStream & EStdOut)
fprintf(stdout, "%c", c);
}
@ -71,6 +85,11 @@ void TInfoSinkBase::append(const TPersistString& t)
sink.append(t);
}
//#ifdef _WIN32
// if (outputStream & EDebugger)
// OutputDebugString(t.c_str());
//#endif
if (outputStream & EStdOut)
fprintf(stdout, "%s", t.c_str());
}
@ -82,6 +101,11 @@ void TInfoSinkBase::append(const TString& t)
sink.append(t.c_str());
}
//#ifdef _WIN32
// if (outputStream & EDebugger)
// OutputDebugString(t.c_str());
//#endif
if (outputStream & EStdOut)
fprintf(stdout, "%s", t.c_str());
}

View File

@ -5734,7 +5734,7 @@ void TBuiltIns::initialize(int version, EProfile profile, const SpvVersion& spvV
// GL_ARB_shader_ballot
if (profile != EEsProfile && version >= 450) {
const char* ballotDecls =
const char* ballotDecls =
"uniform uint gl_SubGroupSizeARB;"
"in uint gl_SubGroupInvocationARB;"
"in uint64_t gl_SubGroupEqMaskARB;"
@ -5743,7 +5743,7 @@ void TBuiltIns::initialize(int version, EProfile profile, const SpvVersion& spvV
"in uint64_t gl_SubGroupLeMaskARB;"
"in uint64_t gl_SubGroupLtMaskARB;"
"\n";
const char* fragmentBallotDecls =
const char* fragmentBallotDecls =
"uniform uint gl_SubGroupSizeARB;"
"flat in uint gl_SubGroupInvocationARB;"
"flat in uint64_t gl_SubGroupEqMaskARB;"
@ -5770,7 +5770,7 @@ void TBuiltIns::initialize(int version, EProfile profile, const SpvVersion& spvV
// GL_KHR_shader_subgroup
if (spvVersion.vulkan > 0) {
const char* ballotDecls =
const char* ballotDecls =
"in mediump uint gl_SubgroupSize;"
"in mediump uint gl_SubgroupInvocationID;"
"in highp uvec4 gl_SubgroupEqMask;"
@ -5779,7 +5779,7 @@ void TBuiltIns::initialize(int version, EProfile profile, const SpvVersion& spvV
"in highp uvec4 gl_SubgroupLeMask;"
"in highp uvec4 gl_SubgroupLtMask;"
"\n";
const char* fragmentBallotDecls =
const char* fragmentBallotDecls =
"flat in mediump uint gl_SubgroupSize;"
"flat in mediump uint gl_SubgroupInvocationID;"
"flat in highp uvec4 gl_SubgroupEqMask;"
@ -5806,6 +5806,9 @@ void TBuiltIns::initialize(int version, EProfile profile, const SpvVersion& spvV
"flat in highp uint gl_ViewID_OVR;" // GL_OVR_multiview, GL_OVR_multiview2
"\n");
}
// printf("%s\n", commonBuiltins.c_str());
// printf("%s\n", stageBuiltins[EShLangFragment].c_str());
}
//
@ -5933,8 +5936,9 @@ void TBuiltIns::add2ndGenerationSamplingImaging(int version, EProfile profile, c
// sparseTexelsResidentARB()
//
if (profile != EEsProfile && version >= 450)
if (profile != EEsProfile && version >= 450) {
commonBuiltins.append("bool sparseTexelsResidentARB(int code);\n");
}
}
//
@ -7569,7 +7573,7 @@ void TBuiltIns::identifyBuiltIns(int version, EProfile profile, const SpvVersion
symbolTable.setVariableExtensions("gl_ViewIndex", 1, &E_GL_EXT_multiview);
BuiltInVariable("gl_ViewIndex", EbvViewIndex, symbolTable);
}
// GL_KHR_shader_subgroup
if (spvVersion.vulkan > 0) {
symbolTable.setVariableExtensions("gl_SubgroupSize", 1, &E_GL_KHR_shader_subgroup_basic);

View File

@ -1193,6 +1193,10 @@ TIntermTyped* TIntermediate::addShapeConversion(const TType& type, TIntermTyped*
const bool isSimple = (node->getAsSymbolNode() != nullptr) || (node->getAsConstantUnion() != nullptr);
if (!isSimple) {
assert(0); // TODO: use node replicator service when available.
}
for (int x=0; x<matSize; ++x)
rhsAggregate->getSequence().push_back(node);
@ -3818,9 +3822,9 @@ const char* TIntermediate::getResourceName(TResourceType res)
case EResSsbo: return "shift-ssbo-binding";
case EResUav: return "shift-uav-binding";
default:
break;
assert(0); // internal error: should only be called with valid resource types.
return nullptr;
}
return nullptr;
}

View File

@ -588,6 +588,8 @@ void TParseContext::checkIoArrayConsistency(const TSourceLoc& loc, int requiredS
error(loc, "inconsistent input primitive for array size of", feature, name.c_str());
else if (language == EShLangTessControl)
error(loc, "inconsistent output number of vertices for array size of", feature, name.c_str());
else
assert(0);
}
}
@ -1564,6 +1566,7 @@ void TParseContext::builtInOpCheck(const TSourceLoc& loc, const TFunction& fnCan
case EOpTextureGradOffset: arg = 4; break;
case EOpTextureProjGradOffset: arg = 4; break;
default:
assert(0);
break;
}
@ -5000,6 +5003,8 @@ void TParseContext::checkNoShaderLayouts(const TSourceLoc& loc, const TShaderQua
error(loc, message, "max_vertices", "");
else if (language == EShLangTessControl)
error(loc, message, "vertices", "");
else
assert(0);
}
if (shaderQualifiers.blendEquation)
error(loc, message, "blend equation", "");

View File

@ -146,6 +146,42 @@ TPoolAllocator::~TPoolAllocator()
}
}
const unsigned char TAllocation::guardBlockBeginVal = 0xfb;
const unsigned char TAllocation::guardBlockEndVal = 0xfe;
const unsigned char TAllocation::userDataFill = 0xcd;
# ifdef GUARD_BLOCKS
const size_t TAllocation::guardBlockSize = 16;
# else
const size_t TAllocation::guardBlockSize = 0;
# endif
//
// Check a single guard block for damage
//
#ifdef GUARD_BLOCKS
void TAllocation::checkGuardBlock(unsigned char* blockMem, unsigned char val, const char* locText) const
#else
void TAllocation::checkGuardBlock(unsigned char*, unsigned char, const char*) const
#endif
{
#ifdef GUARD_BLOCKS
for (size_t x = 0; x < guardBlockSize; x++) {
if (blockMem[x] != val) {
const int maxSize = 80;
char assertMsg[maxSize];
// We don't print the assert message. It's here just to be helpful.
snprintf(assertMsg, maxSize, "PoolAlloc: Damage %s %zu byte allocation at 0x%p\n",
locText, size, data());
assert(0 && "PoolAlloc: Damage in guard block");
}
}
#else
assert(guardBlockSize == 0);
#endif
}
void TPoolAllocator::push()
{
tAllocState state = { currentPageOffset, inUseList };
@ -205,6 +241,13 @@ void TPoolAllocator::popAll()
void* TPoolAllocator::allocate(size_t numBytes)
{
// If we are using guard blocks, all allocations are bracketed by
// them: [guardblock][allocation][guardblock]. numBytes is how
// much memory the caller asked for. allocationSize is the total
// size including guard blocks. In release build,
// guardBlockSize=0 and this all gets optimized away.
size_t allocationSize = TAllocation::allocationSize(numBytes);
//
// Just keep some interesting statistics.
//
@ -215,23 +258,23 @@ void* TPoolAllocator::allocate(size_t numBytes)
// Do the allocation, most likely case first, for efficiency.
// This step could be moved to be inline sometime.
//
if (currentPageOffset + numBytes <= pageSize) {
if (currentPageOffset + allocationSize <= pageSize) {
//
// Safe to allocate from currentPageOffset.
//
unsigned char* memory = reinterpret_cast<unsigned char*>(inUseList) + currentPageOffset;
currentPageOffset += numBytes;
currentPageOffset += allocationSize;
currentPageOffset = (currentPageOffset + alignmentMask) & ~alignmentMask;
return memory;
return initializeAllocation(inUseList, memory, numBytes);
}
if (numBytes + headerSkip > pageSize) {
if (allocationSize + headerSkip > pageSize) {
//
// Do a multi-page allocation. Don't mix these with the others.
// The OS is efficient and allocating and free-ing multiple pages.
//
size_t numBytesToAlloc = numBytes + headerSkip;
size_t numBytesToAlloc = allocationSize + headerSkip;
tHeader* memory = reinterpret_cast<tHeader*>(::new char[numBytesToAlloc]);
if (memory == 0)
return 0;
@ -264,9 +307,18 @@ void* TPoolAllocator::allocate(size_t numBytes)
inUseList = memory;
unsigned char* ret = reinterpret_cast<unsigned char*>(inUseList) + headerSkip;
currentPageOffset = (headerSkip + numBytes + alignmentMask) & ~alignmentMask;
currentPageOffset = (headerSkip + allocationSize + alignmentMask) & ~alignmentMask;
return ret;
return initializeAllocation(inUseList, ret, numBytes);
}
//
// Check all allocations in a list for damage by calling check on each.
//
void TAllocation::checkAllocList() const
{
for (const TAllocation* alloc = this; alloc != 0; alloc = alloc->prevAlloc)
alloc->check();
}
} // end namespace glslang

View File

@ -129,7 +129,7 @@ int MapVersionToIndex(int version)
case 500: index = 0; break; // HLSL
case 320: index = 15; break;
case 460: index = 16; break;
default: break;
default: assert(0); break;
}
assert(index < VersionCount);
@ -1905,6 +1905,8 @@ const TType* TProgram::getUniformTType(int index) const { return reflection
const TType* TProgram::getUniformBlockTType(int index) const { return reflection->getUniformBlock(index).getType(); }
unsigned TProgram::getLocalSize(int dim) const { return reflection->getLocalSize(dim); }
void TProgram::dumpReflection() { reflection->dump(); }
//
// I/O mapping implementation.
//

View File

@ -334,10 +334,15 @@ TFunction* TFunction::clone() const
return function;
}
// Anonymous members of a given block should be cloned at a higher level,
// where they can all be assured to still end up pointing to a single
// copy of the original container.
TAnonMember* TAnonMember::clone() const { return 0; }
TAnonMember* TAnonMember::clone() const
{
// Anonymous members of a given block should be cloned at a higher level,
// where they can all be assured to still end up pointing to a single
// copy of the original container.
assert(0);
return 0;
}
TSymbolTableLevel* TSymbolTableLevel::clone() const
{

View File

@ -786,7 +786,7 @@ void TParseVersions::updateExtensionBehavior(const char* extension, TExtensionBe
warn(getCurrentLoc(), "extension not supported:", "#extension", extension);
break;
default:
break;
assert(0 && "unexpected behavior");
}
return;

View File

@ -41,7 +41,7 @@
namespace glslang {
// extract integers out of attribute arguments stored in attribute aggregate
bool TAttributeArgs::getInt(int& value, int argNum) const
bool TAttributeArgs::getInt(int& value, int argNum) const
{
const TConstUnion* intConst = getConstUnion(EbtInt, argNum);
@ -54,7 +54,7 @@ bool TAttributeArgs::getInt(int& value, int argNum) const
// extract strings out of attribute arguments stored in attribute aggregate.
// convert to lower case if converToLower is true (for case-insensitive compare convenience)
bool TAttributeArgs::getString(TString& value, int argNum, bool convertToLower) const
bool TAttributeArgs::getString(TString& value, int argNum, bool convertToLower) const
{
const TConstUnion* stringConst = getConstUnion(EbtString, argNum);
@ -107,7 +107,8 @@ TAttributeType TParseContext::attributeFromName(const TString& name) const
return EatDependencyInfinite;
else if (name == "dependency_length")
return EatDependencyLength;
return EatNone;
else
return EatNone;
}
// Make an initial leaf for the grammar from a no-argument attribute

View File

@ -129,6 +129,8 @@ void TIntermediate::merge(TInfoSink& infoSink, TIntermediate& unit)
error(infoSink, "Contradictory layout max_vertices values");
else if (language == EShLangTessControl)
error(infoSink, "Contradictory layout vertices values");
else
assert(0);
}
if (vertexSpacing == EvsNone)
@ -495,8 +497,8 @@ void TIntermediate::finalCheck(TInfoSink& infoSink, bool keepUncalled)
error(infoSink, "At least one shader must specify a layout(max_vertices = value)");
break;
case EShLangFragment:
// for GL_ARB_post_depth_coverage, EarlyFragmentTest is set automatically in
// ParseHelper.cpp. So if we reach here, this must be GL_EXT_post_depth_coverage
// for GL_ARB_post_depth_coverage, EarlyFragmentTest is set automatically in
// ParseHelper.cpp. So if we reach here, this must be GL_EXT_post_depth_coverage
// requiring explicit early_fragment_tests
if (getPostDepthCoverage() && !getEarlyFragmentTests())
error(infoSink, "post_depth_coverage requires early_fragment_tests");
@ -966,6 +968,7 @@ int TIntermediate::computeTypeLocationSize(const TType& type, EShLanguage stage)
return type.getMatrixCols() * computeTypeLocationSize(columnType, stage);
}
assert(0);
return 1;
}
@ -1080,8 +1083,10 @@ unsigned int TIntermediate::computeTypeXfbSize(const TType& type, bool& contains
numComponents = type.getVectorSize();
else if (type.isMatrix())
numComponents = type.getMatrixCols() * type.getMatrixRows();
else
else {
assert(0);
numComponents = 1;
}
if (type.getBasicType() == EbtDouble) {
containsDouble = true;
@ -1263,6 +1268,7 @@ int TIntermediate::getBaseAlignment(const TType& type, int& size, int& stride, b
return alignment;
}
assert(0); // all cases should be covered above
size = baseAlignmentVec4Std140;
return baseAlignmentVec4Std140;
}

View File

@ -333,8 +333,8 @@ protected:
}
virtual int scan(TPpToken*) override;
virtual int getch() override { return EndOfInput; }
virtual void ungetch() override { }
virtual int getch() override { assert(0); return EndOfInput; }
virtual void ungetch() override { assert(0); }
bool peekPasting() override { return prepaste; }
bool endOfReplacementList() override { return mac->body.atEnd(); }
bool isMacroInput() override { return true; }
@ -359,8 +359,8 @@ protected:
return marker;
}
virtual int getch() override { return EndOfInput; }
virtual void ungetch() override { }
virtual int getch() override { assert(0); return EndOfInput; }
virtual void ungetch() override { assert(0); }
static const int marker = -3;
};
@ -368,8 +368,8 @@ protected:
public:
tZeroInput(TPpContext* pp) : tInput(pp) { }
virtual int scan(TPpToken*) override;
virtual int getch() override { return EndOfInput; }
virtual void ungetch() override { }
virtual int getch() override { assert(0); return EndOfInput; }
virtual void ungetch() override { assert(0); }
};
std::vector<tInput*> inputStack;
@ -412,8 +412,8 @@ protected:
public:
tTokenInput(TPpContext* pp, TokenStream* t, bool prepasting) : tInput(pp), tokens(t), lastTokenPastes(prepasting) { }
virtual int scan(TPpToken *ppToken) override { return tokens->getToken(pp->_parseContext, ppToken); }
virtual int getch() override { return EndOfInput; }
virtual void ungetch() override { }
virtual int getch() override { assert(0); return EndOfInput; }
virtual void ungetch() override { assert(0); }
virtual bool peekPasting() override { return tokens->peekTokenizedPasting(lastTokenPastes); }
protected:
TokenStream* tokens;
@ -424,8 +424,8 @@ protected:
public:
tUngotTokenInput(TPpContext* pp, int t, TPpToken* p) : tInput(pp), token(t), lval(*p) { }
virtual int scan(TPpToken *) override;
virtual int getch() override { return EndOfInput; }
virtual void ungetch() override { }
virtual int getch() override { assert(0); return EndOfInput; }
virtual void ungetch() override { assert(0); }
protected:
int token;
TPpToken lval;

View File

@ -807,4 +807,37 @@ bool TReflection::addStage(EShLanguage stage, const TIntermediate& intermediate)
return true;
}
void TReflection::dump()
{
printf("Uniform reflection:\n");
for (size_t i = 0; i < indexToUniform.size(); ++i)
indexToUniform[i].dump();
printf("\n");
printf("Uniform block reflection:\n");
for (size_t i = 0; i < indexToUniformBlock.size(); ++i)
indexToUniformBlock[i].dump();
printf("\n");
printf("Vertex attribute reflection:\n");
for (size_t i = 0; i < indexToAttribute.size(); ++i)
indexToAttribute[i].dump();
printf("\n");
if (getLocalSize(0) > 1) {
static const char* axis[] = { "X", "Y", "Z" };
for (int dim=0; dim<3; ++dim)
if (getLocalSize(dim) > 1)
printf("Local size %s: %d\n", axis[dim], getLocalSize(dim));
printf("\n");
}
// printf("Live names\n");
// for (TNameToIndex::const_iterator it = nameToIndex.begin(); it != nameToIndex.end(); ++it)
// printf("%s: %d\n", it->first.c_str(), it->second);
// printf("\n");
}
} // end namespace glslang

View File

@ -66,6 +66,16 @@ public:
return -1;
return type->getQualifier().layoutBinding;
}
void dump() const
{
printf("%s: offset %d, type %x, size %d, index %d, binding %d",
name.c_str(), offset, glDefineType, size, index, getBinding() );
if (counterIndex != -1)
printf(", counter %d", counterIndex);
printf("\n");
}
static TObjectReflection badReflection() { return TObjectReflection(); }
TString name;
@ -85,7 +95,7 @@ protected:
class TReflection {
public:
TReflection() : badReflection(TObjectReflection::badReflection())
{
{
for (int dim=0; dim<3; ++dim)
localSize[dim] = 0;
}
@ -111,7 +121,8 @@ public:
{
if (i >= 0 && i < (int)indexToUniformBlock.size())
return indexToUniformBlock[i];
return badReflection;
else
return badReflection;
}
// for mapping an attribute index to the attribute's description
@ -120,7 +131,8 @@ public:
{
if (i >= 0 && i < (int)indexToAttribute.size())
return indexToAttribute[i];
return badReflection;
else
return badReflection;
}
// for mapping any name to its index (block names, uniform names and attribute names)
@ -129,7 +141,8 @@ public:
TNameToIndex::const_iterator it = nameToIndex.find(name);
if (it == nameToIndex.end())
return -1;
return it->second;
else
return it->second;
}
// see getIndex(const char*)
@ -138,6 +151,8 @@ public:
// Thread local size
unsigned getLocalSize(int dim) const { return dim <= 2 ? localSize[dim] : 0; }
void dump();
protected:
friend class glslang::TReflectionTraverser;

View File

@ -40,6 +40,7 @@
#include <pthread.h>
#include <semaphore.h>
#include <assert.h>
#include <errno.h>
#include <stdint.h>
#include <cstdio>
@ -118,36 +119,50 @@ OS_TLSIndex OS_AllocTLSIndex()
//
// Create global pool key.
//
if ((pthread_key_create(&pPoolIndex, NULL)) != 0)
if ((pthread_key_create(&pPoolIndex, NULL)) != 0) {
assert(0 && "OS_AllocTLSIndex(): Unable to allocate Thread Local Storage");
return OS_INVALID_TLS_INDEX;
return PthreadKeyToTLSIndex(pPoolIndex);
}
else
return PthreadKeyToTLSIndex(pPoolIndex);
}
bool OS_SetTLSValue(OS_TLSIndex nIndex, void *lpvValue)
{
if (nIndex == OS_INVALID_TLS_INDEX)
if (nIndex == OS_INVALID_TLS_INDEX) {
assert(0 && "OS_SetTLSValue(): Invalid TLS Index");
return false;
}
if (pthread_setspecific(TLSIndexToPthreadKey(nIndex), lpvValue) == 0)
return true;
return false;
else
return false;
}
void* OS_GetTLSValue(OS_TLSIndex nIndex)
{
//
// This function should return 0 if nIndex is invalid.
//
assert(nIndex != OS_INVALID_TLS_INDEX);
return pthread_getspecific(TLSIndexToPthreadKey(nIndex));
}
bool OS_FreeTLSIndex(OS_TLSIndex nIndex)
{
if (nIndex == OS_INVALID_TLS_INDEX)
if (nIndex == OS_INVALID_TLS_INDEX) {
assert(0 && "OS_SetTLSValue(): Invalid TLS Index");
return false;
}
//
// Delete the global pool key.
//
if (pthread_key_delete(TLSIndexToPthreadKey(nIndex)) == 0)
return true;
return false;
else
return false;
}
namespace {
@ -172,4 +187,18 @@ void ReleaseGlobalLock()
pthread_mutex_unlock(&gMutex);
}
// #define DUMP_COUNTERS
void OS_DumpMemoryCounters()
{
#ifdef DUMP_COUNTERS
struct rusage usage;
if (getrusage(RUSAGE_SELF, &usage) == 0)
printf("Working set size: %ld\n", usage.ru_maxrss * 1024);
#else
printf("Recompile with DUMP_COUNTERS defined to see counters.\n");
#endif
}
} // end namespace glslang

View File

@ -70,18 +70,25 @@ inline DWORD ToNativeTLSIndex (OS_TLSIndex nIndex)
OS_TLSIndex OS_AllocTLSIndex()
{
DWORD dwIndex = TlsAlloc();
if (dwIndex == TLS_OUT_OF_INDEXES)
if (dwIndex == TLS_OUT_OF_INDEXES) {
assert(0 && "OS_AllocTLSIndex(): Unable to allocate Thread Local Storage");
return OS_INVALID_TLS_INDEX;
}
return ToGenericTLSIndex(dwIndex);
}
bool OS_SetTLSValue(OS_TLSIndex nIndex, void *lpvValue)
{
if (nIndex == OS_INVALID_TLS_INDEX)
if (nIndex == OS_INVALID_TLS_INDEX) {
assert(0 && "OS_SetTLSValue(): Invalid TLS Index");
return false;
}
if (TlsSetValue(ToNativeTLSIndex(nIndex), lpvValue))
return true;
return false;
else
return false;
}
void* OS_GetTLSValue(OS_TLSIndex nIndex)
@ -92,11 +99,15 @@ void* OS_GetTLSValue(OS_TLSIndex nIndex)
bool OS_FreeTLSIndex(OS_TLSIndex nIndex)
{
if (nIndex == OS_INVALID_TLS_INDEX)
if (nIndex == OS_INVALID_TLS_INDEX) {
assert(0 && "OS_SetTLSValue(): Invalid TLS Index");
return false;
}
if (TlsFree(ToNativeTLSIndex(nIndex)))
return true;
return false;
else
return false;
}
HANDLE GlobalLock;
@ -121,4 +132,17 @@ unsigned int __stdcall EnterGenericThread (void* entry)
return ((TThreadEntrypoint)entry)(0);
}
//#define DUMP_COUNTERS
void OS_DumpMemoryCounters()
{
#ifdef DUMP_COUNTERS
PROCESS_MEMORY_COUNTERS counters;
GetProcessMemoryInfo(GetCurrentProcess(), &counters, sizeof(counters));
printf("Working set size: %d\n", counters.WorkingSetSize);
#else
printf("Recompile with DUMP_COUNTERS defined to see counters.\n");
#endif
}
} // namespace glslang

View File

@ -56,6 +56,8 @@ typedef unsigned int (*TThreadEntrypoint)(void*);
void OS_CleanupThreadData(void);
void OS_DumpMemoryCounters();
} // end namespace glslang
#endif // __OSINCLUDE_H

View File

@ -596,7 +596,7 @@ class TIoMapper;
// notifiy callbacks, this phase ends with a call to endNotifications.
// Phase two starts directly after the call to endNotifications
// and calls all other callbacks to validate and to get the
// bindings, sets, locations, component and color indices.
// bindings, sets, locations, component and color indices.
//
// NOTE: that still limit checks are applied to bindings and sets
// and may result in an error.
@ -685,6 +685,8 @@ public:
const TType* getUniformBlockTType(int index) const; // returns a TType*
const TType* getAttributeTType(int index) const; // returns a TType*
void dumpReflection();
// I/O mapping: apply base offsets and map live unbound variables
// If resolver is not provided it uses the previous approach
// and respects auto assignment and offsets.