2016-03-13 03:11:22 +00:00
|
|
|
//
|
2017-01-06 19:34:14 +00:00
|
|
|
// Copyright (C) 2016 Google, Inc.
|
|
|
|
// Copyright (C) 2016 LunarG, Inc.
|
2016-03-13 03:11:22 +00:00
|
|
|
//
|
2017-01-06 19:34:14 +00:00
|
|
|
// All rights reserved.
|
2016-03-13 03:11:22 +00:00
|
|
|
//
|
2017-01-06 19:34:14 +00:00
|
|
|
// Redistribution and use in source and binary forms, with or without
|
|
|
|
// modification, are permitted provided that the following conditions
|
|
|
|
// are met:
|
2016-03-13 03:11:22 +00:00
|
|
|
//
|
|
|
|
// Redistributions of source code must retain the above copyright
|
|
|
|
// notice, this list of conditions and the following disclaimer.
|
|
|
|
//
|
|
|
|
// Redistributions in binary form must reproduce the above
|
|
|
|
// copyright notice, this list of conditions and the following
|
|
|
|
// disclaimer in the documentation and/or other materials provided
|
|
|
|
// with the distribution.
|
|
|
|
//
|
|
|
|
// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
|
|
|
|
// contributors may be used to endorse or promote products derived
|
|
|
|
// from this software without specific prior written permission.
|
|
|
|
//
|
2017-01-06 19:34:14 +00:00
|
|
|
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|
|
|
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|
|
|
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
|
|
|
|
// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
|
|
|
|
// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
|
|
|
|
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
|
|
|
|
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
|
|
|
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
|
|
|
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
|
|
|
// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
|
|
|
|
// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
|
|
|
// POSSIBILITY OF SUCH DAMAGE.
|
2016-03-13 03:11:22 +00:00
|
|
|
//
|
|
|
|
#ifndef HLSL_PARSE_INCLUDED_
|
|
|
|
#define HLSL_PARSE_INCLUDED_
|
|
|
|
|
|
|
|
#include "../glslang/MachineIndependent/parseVersions.h"
|
|
|
|
#include "../glslang/MachineIndependent/ParseHelper.h"
|
|
|
|
|
|
|
|
namespace glslang {
|
|
|
|
|
2016-10-20 19:07:10 +00:00
|
|
|
class TAttributeMap; // forward declare
|
|
|
|
|
2016-03-13 03:11:22 +00:00
|
|
|
class HlslParseContext : public TParseContextBase {
|
|
|
|
public:
|
|
|
|
HlslParseContext(TSymbolTable&, TIntermediate&, bool parsingBuiltins,
|
2016-06-17 02:59:42 +00:00
|
|
|
int version, EProfile, const SpvVersion& spvVersion, EShLanguage, TInfoSink&,
|
2016-10-31 21:13:43 +00:00
|
|
|
const TString sourceEntryPointName,
|
2016-03-13 03:11:22 +00:00
|
|
|
bool forwardCompatible = false, EShMessages messages = EShMsgDefault);
|
|
|
|
virtual ~HlslParseContext();
|
2016-12-21 11:48:08 +00:00
|
|
|
void initializeExtensionBehavior() override;
|
2016-07-23 02:46:03 +00:00
|
|
|
|
2016-12-21 11:48:08 +00:00
|
|
|
void setLimits(const TBuiltInResource&) override;
|
|
|
|
bool parseShaderStrings(TPpContext&, TInputScanner& input, bool versionWillBeError = false) override;
|
|
|
|
virtual const char* getGlobalUniformBlockName() override { return "$Global"; }
|
2016-03-13 03:11:22 +00:00
|
|
|
|
2016-12-21 11:48:08 +00:00
|
|
|
void reservedPpErrorCheck(const TSourceLoc&, const char* /*name*/, const char* /*op*/) override { }
|
|
|
|
bool lineContinuationCheck(const TSourceLoc&, bool /*endOfComment*/) override { return true; }
|
|
|
|
bool lineDirectiveShouldSetNextLine() const override { return true; }
|
2016-03-13 03:11:22 +00:00
|
|
|
bool builtInName(const TString&);
|
|
|
|
|
2016-12-21 11:48:08 +00:00
|
|
|
void handlePragma(const TSourceLoc&, const TVector<TString>&) override;
|
2016-06-11 22:43:14 +00:00
|
|
|
TIntermTyped* handleVariable(const TSourceLoc&, TSymbol* symbol, const TString* string);
|
2016-03-13 03:11:22 +00:00
|
|
|
TIntermTyped* handleBracketDereference(const TSourceLoc&, TIntermTyped* base, TIntermTyped* index);
|
2016-10-07 02:12:24 +00:00
|
|
|
TIntermTyped* handleBracketOperator(const TSourceLoc&, TIntermTyped* base, TIntermTyped* index);
|
2016-03-13 03:11:22 +00:00
|
|
|
void checkIndex(const TSourceLoc&, const TType&, int& index);
|
|
|
|
|
|
|
|
TIntermTyped* handleBinaryMath(const TSourceLoc&, const char* str, TOperator op, TIntermTyped* left, TIntermTyped* right);
|
|
|
|
TIntermTyped* handleUnaryMath(const TSourceLoc&, const char* str, TOperator op, TIntermTyped* childNode);
|
|
|
|
TIntermTyped* handleDotDereference(const TSourceLoc&, TIntermTyped* base, const TString& field);
|
2017-03-11 16:39:55 +00:00
|
|
|
bool isBuiltInMethod(const TSourceLoc&, TIntermTyped* base, const TString& field);
|
2016-09-16 07:44:43 +00:00
|
|
|
void assignLocations(TVariable& variable);
|
2017-03-12 00:55:28 +00:00
|
|
|
void handleFunctionDeclarator(const TSourceLoc&, TFunction& function, bool prototype);
|
2017-01-19 22:41:47 +00:00
|
|
|
TIntermAggregate* handleFunctionDefinition(const TSourceLoc&, TFunction&, const TAttributeMap&, TIntermNode*& entryPointTree);
|
|
|
|
TIntermNode* transformEntryPoint(const TSourceLoc&, TFunction&, const TAttributeMap&);
|
2016-09-03 01:13:36 +00:00
|
|
|
void handleFunctionBody(const TSourceLoc&, TFunction&, TIntermNode* functionBody, TIntermNode*& node);
|
2017-02-04 00:57:55 +00:00
|
|
|
void remapEntryPointIO(TFunction& function, TVariable*& returnValue, TVector<TVariable*>& inputs, TVector<TVariable*>& outputs);
|
2016-09-19 22:01:41 +00:00
|
|
|
void remapNonEntryPointIO(TFunction& function);
|
2016-08-09 17:28:03 +00:00
|
|
|
TIntermNode* handleReturnValue(const TSourceLoc&, TIntermTyped*);
|
2016-05-13 15:33:42 +00:00
|
|
|
void handleFunctionArgument(TFunction*, TIntermTyped*& arguments, TIntermTyped* newArg);
|
2017-01-13 19:27:52 +00:00
|
|
|
TIntermTyped* handleAssign(const TSourceLoc&, TOperator, TIntermTyped* left, TIntermTyped* right);
|
|
|
|
TIntermTyped* handleAssignToMatrixSwizzle(const TSourceLoc&, TOperator, TIntermTyped* left, TIntermTyped* right);
|
2016-12-24 01:56:57 +00:00
|
|
|
TIntermTyped* handleFunctionCall(const TSourceLoc&, TFunction*, TIntermTyped*);
|
2016-06-09 14:57:35 +00:00
|
|
|
void decomposeIntrinsic(const TSourceLoc&, TIntermTyped*& node, TIntermNode* arguments);
|
2016-06-29 16:58:58 +00:00
|
|
|
void decomposeSampleMethods(const TSourceLoc&, TIntermTyped*& node, TIntermNode* arguments);
|
2017-02-13 00:50:28 +00:00
|
|
|
void decomposeStructBufferMethods(const TSourceLoc&, TIntermTyped*& node, TIntermNode* arguments);
|
2016-11-17 22:04:20 +00:00
|
|
|
void decomposeGeometryMethods(const TSourceLoc&, TIntermTyped*& node, TIntermNode* arguments);
|
2016-12-24 01:56:57 +00:00
|
|
|
void addInputArgumentConversions(const TFunction&, TIntermTyped*&);
|
HLSL: add intrinsic function implicit promotions
This PR handles implicit promotions for intrinsics when there is no exact match,
such as for example clamp(int, bool, float). In this case the int and bool will
be promoted to a float, and the clamp(float, float, float) form used.
These promotions can be mixed with shape conversions, e.g, clamp(int, bool2, float2).
Output conversions are handled either via the existing addOutputArgumentConversion
function, which this PR generalizes to handle either aggregates or unaries, or by
intrinsic decomposition. If there are methods or intrinsics to be decomposed,
then decomposition is responsible for any output conversions, which turns out to
happen automatically in all current cases. This can be revisited once inout
conversions are in place.
Some cases of actual ambiguity were fixed in several tests, e.g, spv.register.autoassign.*
Some intrinsics with only uint versions were expanded to signed ints natively, where the
underlying AST and SPIR-V supports that. E.g, countbits. This avoids extraneous
conversion nodes.
A new function promoteAggregate is added, and used by findFunction. This is essentially
a generalization of the "promote 1st or 2nd arg" algorithm in promoteBinary.
The actual selection proceeds in three steps, as described in the comments in
hlslParseContext::findFunction:
1. Attempt an exact match. If found, use it.
2. If not, obtain the operator from step 1, and promote arguments.
3. Re-select the intrinsic overload from the results of step 2.
2016-11-02 18:42:34 +00:00
|
|
|
TIntermTyped* addOutputArgumentConversions(const TFunction&, TIntermOperator&);
|
2016-03-13 03:11:22 +00:00
|
|
|
void builtInOpCheck(const TSourceLoc&, const TFunction&, TIntermOperator&);
|
2016-03-13 17:24:20 +00:00
|
|
|
TFunction* handleConstructorCall(const TSourceLoc&, const TType&);
|
2017-03-15 03:50:06 +00:00
|
|
|
void handleSemantic(TSourceLoc, TQualifier&, TBuiltInVariable, const TString& upperCase);
|
2016-09-05 18:40:06 +00:00
|
|
|
void handlePackOffset(const TSourceLoc&, TQualifier&, const glslang::TString& location,
|
2016-07-30 16:29:54 +00:00
|
|
|
const glslang::TString* component);
|
2016-09-05 18:40:06 +00:00
|
|
|
void handleRegister(const TSourceLoc&, TQualifier&, const glslang::TString* profile, const glslang::TString& desc,
|
2016-09-05 22:03:12 +00:00
|
|
|
int subComponent, const glslang::TString*);
|
2016-03-13 03:11:22 +00:00
|
|
|
|
2016-07-14 20:45:14 +00:00
|
|
|
TIntermAggregate* handleSamplerTextureCombine(const TSourceLoc& loc, TIntermTyped* argTex, TIntermTyped* argSampler);
|
|
|
|
|
2017-01-14 02:34:22 +00:00
|
|
|
bool parseMatrixSwizzleSelector(const TSourceLoc&, const TString&, int cols, int rows, TSwizzleSelectors<TMatrixSelector>&);
|
|
|
|
int getMatrixComponentsColumn(int rows, const TSwizzleSelectors<TMatrixSelector>&);
|
2016-03-13 03:11:22 +00:00
|
|
|
void assignError(const TSourceLoc&, const char* op, TString left, TString right);
|
|
|
|
void unaryOpError(const TSourceLoc&, const char* op, TString operand);
|
|
|
|
void binaryOpError(const TSourceLoc&, const char* op, TString left, TString right);
|
|
|
|
void variableCheck(TIntermTyped*& nodePtr);
|
|
|
|
void constantValueCheck(TIntermTyped* node, const char* token);
|
|
|
|
void integerCheck(const TIntermTyped* node, const char* token);
|
|
|
|
void globalCheck(const TSourceLoc&, const char* token);
|
|
|
|
bool constructorError(const TSourceLoc&, TIntermNode*, TFunction&, TOperator, TType&);
|
|
|
|
bool constructorTextureSamplerError(const TSourceLoc&, const TFunction&);
|
|
|
|
void arraySizeCheck(const TSourceLoc&, TIntermTyped* expr, TArraySize&);
|
|
|
|
void arraySizeRequiredCheck(const TSourceLoc&, const TArraySizes&);
|
|
|
|
void structArrayCheck(const TSourceLoc&, const TType& structure);
|
|
|
|
void arrayDimMerge(TType& type, const TArraySizes* sizes);
|
|
|
|
bool voidErrorCheck(const TSourceLoc&, const TString&, TBasicType);
|
|
|
|
void boolCheck(const TSourceLoc&, const TIntermTyped*);
|
|
|
|
void globalQualifierFix(const TSourceLoc&, TQualifier&);
|
|
|
|
bool structQualifierErrorCheck(const TSourceLoc&, const TPublicType& pType);
|
2016-09-16 23:10:39 +00:00
|
|
|
void mergeQualifiers(TQualifier& dst, const TQualifier& src);
|
2016-03-13 03:11:22 +00:00
|
|
|
int computeSamplerTypeIndex(TSampler&);
|
2016-11-05 16:15:53 +00:00
|
|
|
TSymbol* redeclareBuiltinVariable(const TSourceLoc&, const TString&, const TQualifier&, const TShaderQualifiers&);
|
2016-03-13 03:11:22 +00:00
|
|
|
void redeclareBuiltinBlock(const TSourceLoc&, TTypeList& typeList, const TString& blockName, const TString* instanceName, TArraySizes* arraySizes);
|
2016-06-17 21:50:47 +00:00
|
|
|
void paramFix(TType& type);
|
2016-03-13 03:11:22 +00:00
|
|
|
void specializationCheck(const TSourceLoc&, const TType&, const char* op);
|
|
|
|
|
2016-08-17 16:22:08 +00:00
|
|
|
void setLayoutQualifier(const TSourceLoc&, TQualifier&, TString&);
|
|
|
|
void setLayoutQualifier(const TSourceLoc&, TQualifier&, TString&, const TIntermTyped*);
|
2016-03-13 03:11:22 +00:00
|
|
|
void mergeObjectLayoutQualifiers(TQualifier& dest, const TQualifier& src, bool inheritOnly);
|
|
|
|
void checkNoShaderLayouts(const TSourceLoc&, const TShaderQualifiers&);
|
|
|
|
|
2016-12-24 01:56:57 +00:00
|
|
|
const TFunction* findFunction(const TSourceLoc& loc, TFunction& call, bool& builtIn, TIntermTyped*& args);
|
2017-02-08 20:59:30 +00:00
|
|
|
void declareTypedef(const TSourceLoc&, TString& identifier, const TType&);
|
2017-02-04 00:57:55 +00:00
|
|
|
void declareStruct(const TSourceLoc&, TString& structName, TType&);
|
2017-03-02 21:30:59 +00:00
|
|
|
TSymbol* lookupUserType(const TString&, TType&);
|
2016-09-27 20:38:57 +00:00
|
|
|
TIntermNode* declareVariable(const TSourceLoc&, TString& identifier, TType&, TIntermTyped* initializer = 0);
|
2016-11-28 00:39:07 +00:00
|
|
|
void lengthenList(const TSourceLoc&, TIntermSequence& list, int size);
|
2016-07-28 21:29:35 +00:00
|
|
|
TIntermTyped* addConstructor(const TSourceLoc&, TIntermNode*, const TType&);
|
2016-03-13 03:11:22 +00:00
|
|
|
TIntermTyped* constructAggregate(TIntermNode*, const TType&, int, const TSourceLoc&);
|
|
|
|
TIntermTyped* constructBuiltIn(const TType&, TOperator, TIntermTyped*, const TSourceLoc&, bool subset);
|
2016-07-25 22:05:33 +00:00
|
|
|
void declareBlock(const TSourceLoc&, TType&, const TString* instanceName = 0, TArraySizes* arraySizes = 0);
|
2016-12-21 11:48:08 +00:00
|
|
|
void finalizeGlobalUniformBlockLayout(TVariable& block) override;
|
2016-03-13 03:11:22 +00:00
|
|
|
void fixBlockLocations(const TSourceLoc&, TQualifier&, TTypeList&, bool memberWithLocation, bool memberWithoutLocation);
|
|
|
|
void fixBlockXfbOffsets(TQualifier&, TTypeList&);
|
2016-09-28 01:13:05 +00:00
|
|
|
void fixBlockUniformOffsets(const TQualifier&, TTypeList&);
|
2016-03-13 03:11:22 +00:00
|
|
|
void addQualifierToExisting(const TSourceLoc&, TQualifier, const TString& identifier);
|
|
|
|
void addQualifierToExisting(const TSourceLoc&, TQualifier, TIdentifierList&);
|
|
|
|
void updateStandaloneQualifierDefaults(const TSourceLoc&, const TPublicType&);
|
|
|
|
void wrapupSwitchSubsequence(TIntermAggregate* statements, TIntermNode* branchNode);
|
|
|
|
TIntermNode* addSwitch(const TSourceLoc&, TIntermTyped* expression, TIntermAggregate* body);
|
|
|
|
|
|
|
|
void updateImplicitArraySize(const TSourceLoc&, TIntermNode*, int index);
|
|
|
|
|
2016-09-20 19:22:58 +00:00
|
|
|
void nestLooping() { ++loopNestingLevel; }
|
|
|
|
void unnestLooping() { --loopNestingLevel; }
|
|
|
|
void nestAnnotations() { ++annotationNestingLevel; }
|
|
|
|
void unnestAnnotations() { --annotationNestingLevel; }
|
2016-09-28 01:13:05 +00:00
|
|
|
int getAnnotationNestingLevel() { return annotationNestingLevel; }
|
2016-09-20 19:22:58 +00:00
|
|
|
void pushScope() { symbolTable.push(); }
|
|
|
|
void popScope() { symbolTable.pop(0); }
|
2016-06-05 17:23:11 +00:00
|
|
|
|
2017-03-19 18:24:29 +00:00
|
|
|
void pushNamespace(const TString& name);
|
|
|
|
void popNamespace();
|
|
|
|
TString* getFullNamespaceName(const TString& localName) const;
|
|
|
|
void addScopeMangler(TString&);
|
2017-03-11 21:13:00 +00:00
|
|
|
|
2016-07-01 06:04:11 +00:00
|
|
|
void pushSwitchSequence(TIntermSequence* sequence) { switchSequenceStack.push_back(sequence); }
|
|
|
|
void popSwitchSequence() { switchSequenceStack.pop_back(); }
|
|
|
|
|
2017-02-07 01:44:52 +00:00
|
|
|
virtual void growGlobalUniformBlock(TSourceLoc&, TType&, TString& memberName, TTypeList* typeList = nullptr) override;
|
2017-02-03 21:06:36 +00:00
|
|
|
|
2016-10-08 01:35:40 +00:00
|
|
|
// Apply L-value conversions. E.g, turning a write to a RWTexture into an ImageStore.
|
|
|
|
TIntermTyped* handleLvalue(const TSourceLoc&, const char* op, TIntermTyped* node);
|
2016-10-08 16:54:52 +00:00
|
|
|
bool lValueErrorCheck(const TSourceLoc&, const char* op, TIntermTyped*) override;
|
2016-10-08 01:35:40 +00:00
|
|
|
|
2016-10-10 21:24:57 +00:00
|
|
|
TLayoutFormat getLayoutFromTxType(const TSourceLoc&, const TType&);
|
|
|
|
|
2016-11-17 22:04:20 +00:00
|
|
|
bool handleOutputGeometry(const TSourceLoc&, const TLayoutGeometry& geometry);
|
|
|
|
bool handleInputGeometry(const TSourceLoc&, const TLayoutGeometry& geometry);
|
|
|
|
|
2016-10-31 21:13:43 +00:00
|
|
|
// Potentially rename shader entry point function
|
|
|
|
void renameShaderFunction(TString*& name) const;
|
|
|
|
|
HLSL: Recursive composite flattening
This PR implements recursive type flattening. For example, an array of structs of other structs
can be flattened to individual member variables at the shader interface.
This is sufficient for many purposes, e.g, uniforms containing opaque types, but is not sufficient
for geometry shader arrayed inputs. That will be handled separately with structure splitting,
which is not implemented by this PR. In the meantime, that case is detected and triggers an error.
The recursive flattening extends the following three aspects of single-level flattening:
- Flattening of structures to individual members with names such as "foo[0].samp[1]";
- Turning constant references to the nested composite type into a reference to a particular
flattened member.
- Shadow copies between arrays of flattened members and the nested composite type.
Previous single-level flattening only flattened at the shader interface, and that is unchanged by this PR.
Internally, shadow copies are, such as if the type is passed to a function.
Also, the reasons for flattening are unchanged. Uniforms containing opaque types, and interface struct
types are flattened. (The latter will change with structure splitting).
One existing test changes: hlsl.structin.vert, which did in fact contain a nested composite type to be
flattened.
Two new tests are added: hlsl.structarray.flatten.frag, and hlsl.structarray.flatten.geom (currently
issues an error until type splitting is online).
The process of arriving at the individual member from chained postfix expressions is more complex than
it was with one level. See large-ish comment above HlslParseContext::flatten() for details.
2016-11-29 00:09:54 +00:00
|
|
|
// Reset data for incrementally built referencing of flattened composite structures
|
|
|
|
void initFlattening() { flattenLevel.push_back(0); flattenOffset.push_back(0); }
|
|
|
|
void finalizeFlattening() { flattenLevel.pop_back(); flattenOffset.pop_back(); }
|
|
|
|
|
2017-02-24 01:04:12 +00:00
|
|
|
// Share struct buffer deep types
|
|
|
|
void shareStructBufferType(TType&);
|
|
|
|
|
2016-03-13 03:11:22 +00:00
|
|
|
protected:
|
HLSL: Recursive composite flattening
This PR implements recursive type flattening. For example, an array of structs of other structs
can be flattened to individual member variables at the shader interface.
This is sufficient for many purposes, e.g, uniforms containing opaque types, but is not sufficient
for geometry shader arrayed inputs. That will be handled separately with structure splitting,
which is not implemented by this PR. In the meantime, that case is detected and triggers an error.
The recursive flattening extends the following three aspects of single-level flattening:
- Flattening of structures to individual members with names such as "foo[0].samp[1]";
- Turning constant references to the nested composite type into a reference to a particular
flattened member.
- Shadow copies between arrays of flattened members and the nested composite type.
Previous single-level flattening only flattened at the shader interface, and that is unchanged by this PR.
Internally, shadow copies are, such as if the type is passed to a function.
Also, the reasons for flattening are unchanged. Uniforms containing opaque types, and interface struct
types are flattened. (The latter will change with structure splitting).
One existing test changes: hlsl.structin.vert, which did in fact contain a nested composite type to be
flattened.
Two new tests are added: hlsl.structarray.flatten.frag, and hlsl.structarray.flatten.geom (currently
issues an error until type splitting is online).
The process of arriving at the individual member from chained postfix expressions is more complex than
it was with one level. See large-ish comment above HlslParseContext::flatten() for details.
2016-11-29 00:09:54 +00:00
|
|
|
struct TFlattenData {
|
|
|
|
TFlattenData() : nextBinding(TQualifier::layoutBindingEnd) { }
|
|
|
|
TFlattenData(int nb) : nextBinding(nb) { }
|
|
|
|
|
|
|
|
TVector<TVariable*> members; // individual flattened variables
|
|
|
|
TVector<int> offsets; // offset to next tree level
|
|
|
|
int nextBinding; // next binding to use.
|
|
|
|
};
|
|
|
|
|
2016-12-30 23:42:57 +00:00
|
|
|
void fixConstInit(const TSourceLoc&, TString& identifier, TType& type, TIntermTyped*& initializer);
|
2016-03-13 03:11:22 +00:00
|
|
|
void inheritGlobalDefaults(TQualifier& dst) const;
|
|
|
|
TVariable* makeInternalVariable(const char* name, const TType&) const;
|
HLSL: inter-stage structure splitting.
This adds structure splitting, which among other things will enable GS support where input structs
are passed, and thus become input arrays of structs in the GS inputs. That is a common GS case.
The salient points of this PR are:
* Structure splitting has been changed from "always between stages" to "only into the VS and out of
the PS". It had previously happened between stages because it's not legal to pass a struct
containing a builtin IO variable.
* Structs passed between stages are now split into a struct containing ONLY user types, and a
collection of loose builtin IO variables, if any. The user-part is passed as a normal struct
between stages, which is valid SPIR-V now that the builtin IO is removed.
* Internal to the shader, a sanitized struct (with IO qualifiers removed) is used, so that e.g,
functions can work unmodified.
* If a builtin IO such as Position occurs in an arrayed struct, for example as an input to a GS,
the array reference is moved to the split-off loose variable, which is given the array dimension
itself.
When passing things around inside the shader, such as over a function call, the the original type
is used in a sanitized form that removes the builtIn qualifications and makes them temporaries.
This means internal function calls do not have to change. However, the type when returned from
the shader will be member-wise copied from the internal sanitized one to the external type.
The sanitized type is used in variable declarations.
When copying split types and unsplit, if a sub-struct contains only user variables, it is copied
as a single entity to avoid more AST verbosity.
Above strategy arrived at with talks with @johnkslang.
This is a big complex change. I'm inclined to leave it as a WIP until it can get some exposure to
real world cases.
2016-12-14 22:22:25 +00:00
|
|
|
TVariable* makeInternalVariable(const TString& name, const TType& type) const {
|
|
|
|
return makeInternalVariable(name.c_str(), type);
|
|
|
|
}
|
HLSL: Recursive composite flattening
This PR implements recursive type flattening. For example, an array of structs of other structs
can be flattened to individual member variables at the shader interface.
This is sufficient for many purposes, e.g, uniforms containing opaque types, but is not sufficient
for geometry shader arrayed inputs. That will be handled separately with structure splitting,
which is not implemented by this PR. In the meantime, that case is detected and triggers an error.
The recursive flattening extends the following three aspects of single-level flattening:
- Flattening of structures to individual members with names such as "foo[0].samp[1]";
- Turning constant references to the nested composite type into a reference to a particular
flattened member.
- Shadow copies between arrays of flattened members and the nested composite type.
Previous single-level flattening only flattened at the shader interface, and that is unchanged by this PR.
Internally, shadow copies are, such as if the type is passed to a function.
Also, the reasons for flattening are unchanged. Uniforms containing opaque types, and interface struct
types are flattened. (The latter will change with structure splitting).
One existing test changes: hlsl.structin.vert, which did in fact contain a nested composite type to be
flattened.
Two new tests are added: hlsl.structarray.flatten.frag, and hlsl.structarray.flatten.geom (currently
issues an error until type splitting is online).
The process of arriving at the individual member from chained postfix expressions is more complex than
it was with one level. See large-ish comment above HlslParseContext::flatten() for details.
2016-11-29 00:09:54 +00:00
|
|
|
TVariable* declareNonArray(const TSourceLoc&, TString& identifier, TType&, bool track);
|
2016-11-05 16:15:53 +00:00
|
|
|
void declareArray(const TSourceLoc&, TString& identifier, const TType&, TSymbol*&, bool track);
|
2016-03-13 03:11:22 +00:00
|
|
|
TIntermNode* executeInitializer(const TSourceLoc&, TIntermTyped* initializer, TVariable* variable);
|
|
|
|
TIntermTyped* convertInitializerList(const TSourceLoc&, const TType&, TIntermTyped* initializer);
|
2016-11-28 06:00:14 +00:00
|
|
|
bool isZeroConstructor(const TIntermNode*);
|
2016-06-13 15:22:28 +00:00
|
|
|
TOperator mapAtomicOp(const TSourceLoc& loc, TOperator op, bool isImage);
|
2016-03-13 03:11:22 +00:00
|
|
|
|
2016-10-08 01:35:40 +00:00
|
|
|
// Return true if this node requires L-value conversion (e.g, to an imageStore).
|
|
|
|
bool shouldConvertLValue(const TIntermNode*) const;
|
|
|
|
|
2016-09-16 19:26:37 +00:00
|
|
|
// Array and struct flattening
|
HLSL: inter-stage structure splitting.
This adds structure splitting, which among other things will enable GS support where input structs
are passed, and thus become input arrays of structs in the GS inputs. That is a common GS case.
The salient points of this PR are:
* Structure splitting has been changed from "always between stages" to "only into the VS and out of
the PS". It had previously happened between stages because it's not legal to pass a struct
containing a builtin IO variable.
* Structs passed between stages are now split into a struct containing ONLY user types, and a
collection of loose builtin IO variables, if any. The user-part is passed as a normal struct
between stages, which is valid SPIR-V now that the builtin IO is removed.
* Internal to the shader, a sanitized struct (with IO qualifiers removed) is used, so that e.g,
functions can work unmodified.
* If a builtin IO such as Position occurs in an arrayed struct, for example as an input to a GS,
the array reference is moved to the split-off loose variable, which is given the array dimension
itself.
When passing things around inside the shader, such as over a function call, the the original type
is used in a sanitized form that removes the builtIn qualifications and makes them temporaries.
This means internal function calls do not have to change. However, the type when returned from
the shader will be member-wise copied from the internal sanitized one to the external type.
The sanitized type is used in variable declarations.
When copying split types and unsplit, if a sub-struct contains only user variables, it is copied
as a single entity to avoid more AST verbosity.
Above strategy arrived at with talks with @johnkslang.
This is a big complex change. I'm inclined to leave it as a WIP until it can get some exposure to
real world cases.
2016-12-14 22:22:25 +00:00
|
|
|
TIntermTyped* flattenAccess(TIntermTyped* base, int member);
|
2016-09-16 19:26:37 +00:00
|
|
|
bool shouldFlattenUniform(const TType&) const;
|
HLSL: Recursive composite flattening
This PR implements recursive type flattening. For example, an array of structs of other structs
can be flattened to individual member variables at the shader interface.
This is sufficient for many purposes, e.g, uniforms containing opaque types, but is not sufficient
for geometry shader arrayed inputs. That will be handled separately with structure splitting,
which is not implemented by this PR. In the meantime, that case is detected and triggers an error.
The recursive flattening extends the following three aspects of single-level flattening:
- Flattening of structures to individual members with names such as "foo[0].samp[1]";
- Turning constant references to the nested composite type into a reference to a particular
flattened member.
- Shadow copies between arrays of flattened members and the nested composite type.
Previous single-level flattening only flattened at the shader interface, and that is unchanged by this PR.
Internally, shadow copies are, such as if the type is passed to a function.
Also, the reasons for flattening are unchanged. Uniforms containing opaque types, and interface struct
types are flattened. (The latter will change with structure splitting).
One existing test changes: hlsl.structin.vert, which did in fact contain a nested composite type to be
flattened.
Two new tests are added: hlsl.structarray.flatten.frag, and hlsl.structarray.flatten.geom (currently
issues an error until type splitting is online).
The process of arriving at the individual member from chained postfix expressions is more complex than
it was with one level. See large-ish comment above HlslParseContext::flatten() for details.
2016-11-29 00:09:54 +00:00
|
|
|
bool wasFlattened(const TIntermTyped* node) const;
|
|
|
|
bool wasFlattened(int id) const { return flattenMap.find(id) != flattenMap.end(); }
|
|
|
|
int addFlattenedMember(const TSourceLoc& loc, const TVariable&, const TType&, TFlattenData&, const TString& name, bool track);
|
|
|
|
bool isFinalFlattening(const TType& type) const { return !(type.isStruct() || type.isArray()); }
|
|
|
|
|
HLSL: inter-stage structure splitting.
This adds structure splitting, which among other things will enable GS support where input structs
are passed, and thus become input arrays of structs in the GS inputs. That is a common GS case.
The salient points of this PR are:
* Structure splitting has been changed from "always between stages" to "only into the VS and out of
the PS". It had previously happened between stages because it's not legal to pass a struct
containing a builtin IO variable.
* Structs passed between stages are now split into a struct containing ONLY user types, and a
collection of loose builtin IO variables, if any. The user-part is passed as a normal struct
between stages, which is valid SPIR-V now that the builtin IO is removed.
* Internal to the shader, a sanitized struct (with IO qualifiers removed) is used, so that e.g,
functions can work unmodified.
* If a builtin IO such as Position occurs in an arrayed struct, for example as an input to a GS,
the array reference is moved to the split-off loose variable, which is given the array dimension
itself.
When passing things around inside the shader, such as over a function call, the the original type
is used in a sanitized form that removes the builtIn qualifications and makes them temporaries.
This means internal function calls do not have to change. However, the type when returned from
the shader will be member-wise copied from the internal sanitized one to the external type.
The sanitized type is used in variable declarations.
When copying split types and unsplit, if a sub-struct contains only user variables, it is copied
as a single entity to avoid more AST verbosity.
Above strategy arrived at with talks with @johnkslang.
This is a big complex change. I'm inclined to leave it as a WIP until it can get some exposure to
real world cases.
2016-12-14 22:22:25 +00:00
|
|
|
// Structure splitting (splits interstage builtin types into its own struct)
|
2016-12-19 22:48:01 +00:00
|
|
|
TIntermTyped* splitAccessStruct(const TSourceLoc& loc, TIntermTyped*& base, int& member);
|
|
|
|
void splitAccessArray(const TSourceLoc& loc, TIntermTyped* base, TIntermTyped* index);
|
|
|
|
TType& split(TType& type, TString name, const TType* outerStructType = nullptr);
|
HLSL: inter-stage structure splitting.
This adds structure splitting, which among other things will enable GS support where input structs
are passed, and thus become input arrays of structs in the GS inputs. That is a common GS case.
The salient points of this PR are:
* Structure splitting has been changed from "always between stages" to "only into the VS and out of
the PS". It had previously happened between stages because it's not legal to pass a struct
containing a builtin IO variable.
* Structs passed between stages are now split into a struct containing ONLY user types, and a
collection of loose builtin IO variables, if any. The user-part is passed as a normal struct
between stages, which is valid SPIR-V now that the builtin IO is removed.
* Internal to the shader, a sanitized struct (with IO qualifiers removed) is used, so that e.g,
functions can work unmodified.
* If a builtin IO such as Position occurs in an arrayed struct, for example as an input to a GS,
the array reference is moved to the split-off loose variable, which is given the array dimension
itself.
When passing things around inside the shader, such as over a function call, the the original type
is used in a sanitized form that removes the builtIn qualifications and makes them temporaries.
This means internal function calls do not have to change. However, the type when returned from
the shader will be member-wise copied from the internal sanitized one to the external type.
The sanitized type is used in variable declarations.
When copying split types and unsplit, if a sub-struct contains only user variables, it is copied
as a single entity to avoid more AST verbosity.
Above strategy arrived at with talks with @johnkslang.
This is a big complex change. I'm inclined to leave it as a WIP until it can get some exposure to
real world cases.
2016-12-14 22:22:25 +00:00
|
|
|
void split(TIntermTyped*);
|
|
|
|
void split(const TVariable&);
|
|
|
|
bool wasSplit(const TIntermTyped* node) const;
|
|
|
|
bool wasSplit(int id) const { return splitIoVars.find(id) != splitIoVars.end(); }
|
|
|
|
TVariable* getSplitIoVar(const TIntermTyped* node) const;
|
|
|
|
TVariable* getSplitIoVar(const TVariable* var) const;
|
|
|
|
TVariable* getSplitIoVar(int id) const;
|
|
|
|
void addInterstageIoToLinkage();
|
Add basic HS/DS implementation.
This obsoletes WIP PR #704, which was built on the pre entry point wrapping master. New version
here uses entry point wrapping.
This is a limited implementation of tessellation shaders. In particular, the following are not functional,
and will be added as separate stages to reduce the size of each PR.
* patchconstantfunctions accepting per-control-point input values, such as
const OutputPatch <hs_out_t, 3> cpv are not implemented.
* patchconstantfunctions whose signature requires an aggregate input type such as
a structure containing builtin variables. Code to synthesize such calls is not
yet present.
These restrictions will be relaxed as soon as possible. Simple cases can compile now: see for example
Test/hulsl.hull.1.tesc - e.g, writing to inner and outer tessellation factors.
PCF invocation is synthesized as an entry point epilogue protected behind a barrier and a test on
invocation ID == 0. If there is an existing invocation ID variable it will be used, otherwise one is
added to the linkage. The PCF and the shader EP interfaces are unioned and builtins appearing in
the PCF but not the EP are also added to the linkage and synthesized as shader inputs.
Parameter matching to (eventually arbitrary) PCF signatures is by builtin variable type. Any user
variables in the PCF signature will result in an error. Overloaded PCF functions will also result in
an error.
[domain()], [partitioning()], [outputtopology()], [outputcontrolpoints()], and [patchconstantfunction()]
attributes to the shader entry point are in place, with the exception of the Pow2 partitioning mode.
2017-01-07 15:54:10 +00:00
|
|
|
void addPatchConstantInvocation();
|
HLSL: inter-stage structure splitting.
This adds structure splitting, which among other things will enable GS support where input structs
are passed, and thus become input arrays of structs in the GS inputs. That is a common GS case.
The salient points of this PR are:
* Structure splitting has been changed from "always between stages" to "only into the VS and out of
the PS". It had previously happened between stages because it's not legal to pass a struct
containing a builtin IO variable.
* Structs passed between stages are now split into a struct containing ONLY user types, and a
collection of loose builtin IO variables, if any. The user-part is passed as a normal struct
between stages, which is valid SPIR-V now that the builtin IO is removed.
* Internal to the shader, a sanitized struct (with IO qualifiers removed) is used, so that e.g,
functions can work unmodified.
* If a builtin IO such as Position occurs in an arrayed struct, for example as an input to a GS,
the array reference is moved to the split-off loose variable, which is given the array dimension
itself.
When passing things around inside the shader, such as over a function call, the the original type
is used in a sanitized form that removes the builtIn qualifications and makes them temporaries.
This means internal function calls do not have to change. However, the type when returned from
the shader will be member-wise copied from the internal sanitized one to the external type.
The sanitized type is used in variable declarations.
When copying split types and unsplit, if a sub-struct contains only user variables, it is copied
as a single entity to avoid more AST verbosity.
Above strategy arrived at with talks with @johnkslang.
This is a big complex change. I'm inclined to leave it as a WIP until it can get some exposure to
real world cases.
2016-12-14 22:22:25 +00:00
|
|
|
|
2016-09-16 19:26:37 +00:00
|
|
|
void flatten(const TSourceLoc& loc, const TVariable& variable);
|
HLSL: Recursive composite flattening
This PR implements recursive type flattening. For example, an array of structs of other structs
can be flattened to individual member variables at the shader interface.
This is sufficient for many purposes, e.g, uniforms containing opaque types, but is not sufficient
for geometry shader arrayed inputs. That will be handled separately with structure splitting,
which is not implemented by this PR. In the meantime, that case is detected and triggers an error.
The recursive flattening extends the following three aspects of single-level flattening:
- Flattening of structures to individual members with names such as "foo[0].samp[1]";
- Turning constant references to the nested composite type into a reference to a particular
flattened member.
- Shadow copies between arrays of flattened members and the nested composite type.
Previous single-level flattening only flattened at the shader interface, and that is unchanged by this PR.
Internally, shadow copies are, such as if the type is passed to a function.
Also, the reasons for flattening are unchanged. Uniforms containing opaque types, and interface struct
types are flattened. (The latter will change with structure splitting).
One existing test changes: hlsl.structin.vert, which did in fact contain a nested composite type to be
flattened.
Two new tests are added: hlsl.structarray.flatten.frag, and hlsl.structarray.flatten.geom (currently
issues an error until type splitting is online).
The process of arriving at the individual member from chained postfix expressions is more complex than
it was with one level. See large-ish comment above HlslParseContext::flatten() for details.
2016-11-29 00:09:54 +00:00
|
|
|
int flatten(const TSourceLoc& loc, const TVariable& variable, const TType&, TFlattenData&, TString name);
|
|
|
|
int flattenStruct(const TSourceLoc& loc, const TVariable& variable, const TType&, TFlattenData&, TString name);
|
|
|
|
int flattenArray(const TSourceLoc& loc, const TVariable& variable, const TType&, TFlattenData&, TString name);
|
2016-09-16 19:26:37 +00:00
|
|
|
|
2017-02-06 03:27:30 +00:00
|
|
|
bool hasUniform(const TQualifier& qualifier) const;
|
|
|
|
void clearUniform(TQualifier& qualifier);
|
|
|
|
bool isInputBuiltIn(const TQualifier& qualifier) const;
|
|
|
|
bool hasInput(const TQualifier& qualifier) const;
|
|
|
|
void correctOutput(TQualifier& qualifier);
|
|
|
|
bool isOutputBuiltIn(const TQualifier& qualifier) const;
|
|
|
|
bool hasOutput(const TQualifier& qualifier) const;
|
|
|
|
void correctInput(TQualifier& qualifier);
|
|
|
|
void correctUniform(TQualifier& qualifier);
|
|
|
|
void clearUniformInputOutput(TQualifier& qualifier);
|
|
|
|
|
2017-02-13 00:50:28 +00:00
|
|
|
// Test method names
|
|
|
|
bool isStructBufferMethod(const TString& name) const;
|
|
|
|
|
2017-02-24 01:04:12 +00:00
|
|
|
TType* getStructBufferContentType(const TType& type) const;
|
|
|
|
bool isStructBufferType(const TType& type) const { return getStructBufferContentType(type) != nullptr; }
|
|
|
|
TIntermTyped* indexStructBufferContent(const TSourceLoc& loc, TIntermTyped* buffer) const;
|
|
|
|
|
|
|
|
// Return true if this type is a reference. This is not currently a type method in case that's
|
|
|
|
// a language specific answer.
|
|
|
|
bool isReference(const TType& type) const { return isStructBufferType(type); }
|
|
|
|
|
Add basic HS/DS implementation.
This obsoletes WIP PR #704, which was built on the pre entry point wrapping master. New version
here uses entry point wrapping.
This is a limited implementation of tessellation shaders. In particular, the following are not functional,
and will be added as separate stages to reduce the size of each PR.
* patchconstantfunctions accepting per-control-point input values, such as
const OutputPatch <hs_out_t, 3> cpv are not implemented.
* patchconstantfunctions whose signature requires an aggregate input type such as
a structure containing builtin variables. Code to synthesize such calls is not
yet present.
These restrictions will be relaxed as soon as possible. Simple cases can compile now: see for example
Test/hulsl.hull.1.tesc - e.g, writing to inner and outer tessellation factors.
PCF invocation is synthesized as an entry point epilogue protected behind a barrier and a test on
invocation ID == 0. If there is an existing invocation ID variable it will be used, otherwise one is
added to the linkage. The PCF and the shader EP interfaces are unioned and builtins appearing in
the PCF but not the EP are also added to the linkage and synthesized as shader inputs.
Parameter matching to (eventually arbitrary) PCF signatures is by builtin variable type. Any user
variables in the PCF signature will result in an error. Overloaded PCF functions will also result in
an error.
[domain()], [partitioning()], [outputtopology()], [outputcontrolpoints()], and [patchconstantfunction()]
attributes to the shader entry point are in place, with the exception of the Pow2 partitioning mode.
2017-01-07 15:54:10 +00:00
|
|
|
// Pass through to base class after remembering builtin mappings.
|
|
|
|
using TParseContextBase::trackLinkage;
|
|
|
|
void trackLinkage(TSymbol& variable) override;
|
|
|
|
|
2017-01-08 21:54:48 +00:00
|
|
|
void finish() override; // post-processing
|
HLSL: inter-stage structure splitting.
This adds structure splitting, which among other things will enable GS support where input structs
are passed, and thus become input arrays of structs in the GS inputs. That is a common GS case.
The salient points of this PR are:
* Structure splitting has been changed from "always between stages" to "only into the VS and out of
the PS". It had previously happened between stages because it's not legal to pass a struct
containing a builtin IO variable.
* Structs passed between stages are now split into a struct containing ONLY user types, and a
collection of loose builtin IO variables, if any. The user-part is passed as a normal struct
between stages, which is valid SPIR-V now that the builtin IO is removed.
* Internal to the shader, a sanitized struct (with IO qualifiers removed) is used, so that e.g,
functions can work unmodified.
* If a builtin IO such as Position occurs in an arrayed struct, for example as an input to a GS,
the array reference is moved to the split-off loose variable, which is given the array dimension
itself.
When passing things around inside the shader, such as over a function call, the the original type
is used in a sanitized form that removes the builtIn qualifications and makes them temporaries.
This means internal function calls do not have to change. However, the type when returned from
the shader will be member-wise copied from the internal sanitized one to the external type.
The sanitized type is used in variable declarations.
When copying split types and unsplit, if a sub-struct contains only user variables, it is copied
as a single entity to avoid more AST verbosity.
Above strategy arrived at with talks with @johnkslang.
This is a big complex change. I'm inclined to leave it as a WIP until it can get some exposure to
real world cases.
2016-12-14 22:22:25 +00:00
|
|
|
|
2016-03-13 03:11:22 +00:00
|
|
|
// Current state of parsing
|
|
|
|
struct TPragma contextPragma;
|
|
|
|
int loopNestingLevel; // 0 if outside all loops
|
2016-09-20 19:22:58 +00:00
|
|
|
int annotationNestingLevel; // 0 if outside all annotations
|
2016-03-13 03:11:22 +00:00
|
|
|
int structNestingLevel; // 0 if outside blocks and structures
|
|
|
|
int controlFlowNestingLevel; // 0 if outside all flow control
|
|
|
|
TList<TIntermSequence*> switchSequenceStack; // case, node, case, case, node, ...; ensure only one node between cases; stack of them for nesting
|
2016-11-26 20:31:47 +00:00
|
|
|
bool postEntryPointReturn; // if inside a function, true if the function is the entry point and this is after a return statement
|
2016-03-13 03:11:22 +00:00
|
|
|
const TType* currentFunctionType; // the return type of the function that's currently being parsed
|
|
|
|
bool functionReturnsValue; // true if a non-void function has a return
|
|
|
|
TBuiltInResource resources;
|
|
|
|
TLimits& limits;
|
|
|
|
|
|
|
|
HlslParseContext(HlslParseContext&);
|
|
|
|
HlslParseContext& operator=(HlslParseContext&);
|
|
|
|
|
|
|
|
static const int maxSamplerIndex = EsdNumDims * (EbtNumTypes * (2 * 2 * 2)); // see computeSamplerTypeIndex()
|
|
|
|
TQualifier globalBufferDefaults;
|
|
|
|
TQualifier globalUniformDefaults;
|
|
|
|
TQualifier globalInputDefaults;
|
|
|
|
TQualifier globalOutputDefaults;
|
|
|
|
TString currentCaller; // name of last function body entered (not valid when at global scope)
|
|
|
|
TIdSetType inductiveLoopIds;
|
|
|
|
TVector<TIntermTyped*> needsIndexLimitationChecking;
|
|
|
|
|
|
|
|
//
|
|
|
|
// Geometry shader input arrays:
|
|
|
|
// - array sizing is based on input primitive and/or explicit size
|
|
|
|
//
|
|
|
|
// Tessellation control output arrays:
|
|
|
|
// - array sizing is based on output layout(vertices=...) and/or explicit size
|
|
|
|
//
|
|
|
|
// Both:
|
|
|
|
// - array sizing is retroactive
|
|
|
|
// - built-in block redeclarations interact with this
|
|
|
|
//
|
|
|
|
// Design:
|
|
|
|
// - use a per-context "resize-list", a list of symbols whose array sizes
|
|
|
|
// can be fixed
|
|
|
|
//
|
|
|
|
// - the resize-list starts empty at beginning of user-shader compilation, it does
|
|
|
|
// not have built-ins in it
|
|
|
|
//
|
|
|
|
// - on built-in array use: copyUp() symbol and add it to the resize-list
|
|
|
|
//
|
|
|
|
// - on user array declaration: add it to the resize-list
|
|
|
|
//
|
|
|
|
// - on block redeclaration: copyUp() symbol and add it to the resize-list
|
|
|
|
// * note, that appropriately gives an error if redeclaring a block that
|
|
|
|
// was already used and hence already copied-up
|
|
|
|
//
|
2017-01-06 07:34:48 +00:00
|
|
|
// - on seeing a layout declaration that sizes the array, fix everything in the
|
2016-03-13 03:11:22 +00:00
|
|
|
// resize-list, giving errors for mismatch
|
|
|
|
//
|
|
|
|
// - on seeing an array size declaration, give errors on mismatch between it and previous
|
|
|
|
// array-sizing declarations
|
|
|
|
//
|
|
|
|
TVector<TSymbol*> ioArraySymbolResizeList;
|
2016-09-09 22:32:09 +00:00
|
|
|
|
HLSL: Recursive composite flattening
This PR implements recursive type flattening. For example, an array of structs of other structs
can be flattened to individual member variables at the shader interface.
This is sufficient for many purposes, e.g, uniforms containing opaque types, but is not sufficient
for geometry shader arrayed inputs. That will be handled separately with structure splitting,
which is not implemented by this PR. In the meantime, that case is detected and triggers an error.
The recursive flattening extends the following three aspects of single-level flattening:
- Flattening of structures to individual members with names such as "foo[0].samp[1]";
- Turning constant references to the nested composite type into a reference to a particular
flattened member.
- Shadow copies between arrays of flattened members and the nested composite type.
Previous single-level flattening only flattened at the shader interface, and that is unchanged by this PR.
Internally, shadow copies are, such as if the type is passed to a function.
Also, the reasons for flattening are unchanged. Uniforms containing opaque types, and interface struct
types are flattened. (The latter will change with structure splitting).
One existing test changes: hlsl.structin.vert, which did in fact contain a nested composite type to be
flattened.
Two new tests are added: hlsl.structarray.flatten.frag, and hlsl.structarray.flatten.geom (currently
issues an error until type splitting is online).
The process of arriving at the individual member from chained postfix expressions is more complex than
it was with one level. See large-ish comment above HlslParseContext::flatten() for details.
2016-11-29 00:09:54 +00:00
|
|
|
TMap<int, TFlattenData> flattenMap;
|
|
|
|
TVector<int> flattenLevel; // nested postfix operator level for flattening
|
|
|
|
TVector<int> flattenOffset; // cumulative offset for flattening
|
|
|
|
|
2017-02-06 03:27:30 +00:00
|
|
|
// IO-type map. Maps a pure symbol-table form of a structure-member list into
|
|
|
|
// each of the (up to) three kinds of IO, as each as different allowed decorations,
|
|
|
|
// but HLSL allows mixing all in the same structure.
|
|
|
|
struct tIoKinds {
|
|
|
|
TTypeList* input;
|
|
|
|
TTypeList* output;
|
|
|
|
TTypeList* uniform;
|
|
|
|
};
|
|
|
|
TMap<const TTypeList*, tIoKinds> ioTypeMap;
|
HLSL: inter-stage structure splitting.
This adds structure splitting, which among other things will enable GS support where input structs
are passed, and thus become input arrays of structs in the GS inputs. That is a common GS case.
The salient points of this PR are:
* Structure splitting has been changed from "always between stages" to "only into the VS and out of
the PS". It had previously happened between stages because it's not legal to pass a struct
containing a builtin IO variable.
* Structs passed between stages are now split into a struct containing ONLY user types, and a
collection of loose builtin IO variables, if any. The user-part is passed as a normal struct
between stages, which is valid SPIR-V now that the builtin IO is removed.
* Internal to the shader, a sanitized struct (with IO qualifiers removed) is used, so that e.g,
functions can work unmodified.
* If a builtin IO such as Position occurs in an arrayed struct, for example as an input to a GS,
the array reference is moved to the split-off loose variable, which is given the array dimension
itself.
When passing things around inside the shader, such as over a function call, the the original type
is used in a sanitized form that removes the builtIn qualifications and makes them temporaries.
This means internal function calls do not have to change. However, the type when returned from
the shader will be member-wise copied from the internal sanitized one to the external type.
The sanitized type is used in variable declarations.
When copying split types and unsplit, if a sub-struct contains only user variables, it is copied
as a single entity to avoid more AST verbosity.
Above strategy arrived at with talks with @johnkslang.
This is a big complex change. I'm inclined to leave it as a WIP until it can get some exposure to
real world cases.
2016-12-14 22:22:25 +00:00
|
|
|
|
|
|
|
// Structure splitting data:
|
2016-12-19 22:48:01 +00:00
|
|
|
TMap<int, TVariable*> splitIoVars; // variables with the builtin interstage IO removed, indexed by unique ID.
|
|
|
|
|
2017-02-24 01:04:12 +00:00
|
|
|
// Structuredbuffer shared types. Typically there are only a few.
|
|
|
|
TVector<TType*> structBufferTypes;
|
|
|
|
|
2016-12-19 22:48:01 +00:00
|
|
|
// The builtin interstage IO map considers e.g, EvqPosition on input and output separately, so that we
|
|
|
|
// can build the linkage correctly if position appears on both sides. Otherwise, multiple positions
|
|
|
|
// are considered identical.
|
|
|
|
struct tInterstageIoData {
|
Add basic HS/DS implementation.
This obsoletes WIP PR #704, which was built on the pre entry point wrapping master. New version
here uses entry point wrapping.
This is a limited implementation of tessellation shaders. In particular, the following are not functional,
and will be added as separate stages to reduce the size of each PR.
* patchconstantfunctions accepting per-control-point input values, such as
const OutputPatch <hs_out_t, 3> cpv are not implemented.
* patchconstantfunctions whose signature requires an aggregate input type such as
a structure containing builtin variables. Code to synthesize such calls is not
yet present.
These restrictions will be relaxed as soon as possible. Simple cases can compile now: see for example
Test/hulsl.hull.1.tesc - e.g, writing to inner and outer tessellation factors.
PCF invocation is synthesized as an entry point epilogue protected behind a barrier and a test on
invocation ID == 0. If there is an existing invocation ID variable it will be used, otherwise one is
added to the linkage. The PCF and the shader EP interfaces are unioned and builtins appearing in
the PCF but not the EP are also added to the linkage and synthesized as shader inputs.
Parameter matching to (eventually arbitrary) PCF signatures is by builtin variable type. Any user
variables in the PCF signature will result in an error. Overloaded PCF functions will also result in
an error.
[domain()], [partitioning()], [outputtopology()], [outputcontrolpoints()], and [patchconstantfunction()]
attributes to the shader entry point are in place, with the exception of the Pow2 partitioning mode.
2017-01-07 15:54:10 +00:00
|
|
|
tInterstageIoData(TBuiltInVariable bi, TStorageQualifier q) :
|
|
|
|
builtIn(bi), storage(q) { }
|
|
|
|
|
2016-12-19 22:48:01 +00:00
|
|
|
tInterstageIoData(const TType& memberType, const TType& storageType) :
|
|
|
|
builtIn(memberType.getQualifier().builtIn),
|
|
|
|
storage(storageType.getQualifier().storage) { }
|
|
|
|
|
|
|
|
TBuiltInVariable builtIn;
|
|
|
|
TStorageQualifier storage;
|
|
|
|
|
|
|
|
// ordering for maps
|
|
|
|
bool operator<(const tInterstageIoData d) const {
|
|
|
|
return (builtIn != d.builtIn) ? (builtIn < d.builtIn) : (storage < d.storage);
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2017-02-06 03:27:30 +00:00
|
|
|
TMap<tInterstageIoData, TVariable*> interstageBuiltInIo; // individual builtin interstage IO vars, indexed by builtin type.
|
2016-12-19 22:48:01 +00:00
|
|
|
|
|
|
|
// We have to move array references to structs containing builtin interstage IO to the split variables.
|
|
|
|
// This is only handled for one level. This stores the index, because we'll need it in the future, since
|
|
|
|
// unlike normal array references, here the index happens before we discover what it applies to.
|
|
|
|
TIntermTyped* builtInIoIndex;
|
|
|
|
TIntermTyped* builtInIoBase;
|
HLSL: inter-stage structure splitting.
This adds structure splitting, which among other things will enable GS support where input structs
are passed, and thus become input arrays of structs in the GS inputs. That is a common GS case.
The salient points of this PR are:
* Structure splitting has been changed from "always between stages" to "only into the VS and out of
the PS". It had previously happened between stages because it's not legal to pass a struct
containing a builtin IO variable.
* Structs passed between stages are now split into a struct containing ONLY user types, and a
collection of loose builtin IO variables, if any. The user-part is passed as a normal struct
between stages, which is valid SPIR-V now that the builtin IO is removed.
* Internal to the shader, a sanitized struct (with IO qualifiers removed) is used, so that e.g,
functions can work unmodified.
* If a builtin IO such as Position occurs in an arrayed struct, for example as an input to a GS,
the array reference is moved to the split-off loose variable, which is given the array dimension
itself.
When passing things around inside the shader, such as over a function call, the the original type
is used in a sanitized form that removes the builtIn qualifications and makes them temporaries.
This means internal function calls do not have to change. However, the type when returned from
the shader will be member-wise copied from the internal sanitized one to the external type.
The sanitized type is used in variable declarations.
When copying split types and unsplit, if a sub-struct contains only user variables, it is copied
as a single entity to avoid more AST verbosity.
Above strategy arrived at with talks with @johnkslang.
This is a big complex change. I'm inclined to leave it as a WIP until it can get some exposure to
real world cases.
2016-12-14 22:22:25 +00:00
|
|
|
|
2016-09-16 07:44:43 +00:00
|
|
|
unsigned int nextInLocation;
|
|
|
|
unsigned int nextOutLocation;
|
2016-10-31 21:13:43 +00:00
|
|
|
|
Add basic HS/DS implementation.
This obsoletes WIP PR #704, which was built on the pre entry point wrapping master. New version
here uses entry point wrapping.
This is a limited implementation of tessellation shaders. In particular, the following are not functional,
and will be added as separate stages to reduce the size of each PR.
* patchconstantfunctions accepting per-control-point input values, such as
const OutputPatch <hs_out_t, 3> cpv are not implemented.
* patchconstantfunctions whose signature requires an aggregate input type such as
a structure containing builtin variables. Code to synthesize such calls is not
yet present.
These restrictions will be relaxed as soon as possible. Simple cases can compile now: see for example
Test/hulsl.hull.1.tesc - e.g, writing to inner and outer tessellation factors.
PCF invocation is synthesized as an entry point epilogue protected behind a barrier and a test on
invocation ID == 0. If there is an existing invocation ID variable it will be used, otherwise one is
added to the linkage. The PCF and the shader EP interfaces are unioned and builtins appearing in
the PCF but not the EP are also added to the linkage and synthesized as shader inputs.
Parameter matching to (eventually arbitrary) PCF signatures is by builtin variable type. Any user
variables in the PCF signature will result in an error. Overloaded PCF functions will also result in
an error.
[domain()], [partitioning()], [outputtopology()], [outputcontrolpoints()], and [patchconstantfunction()]
attributes to the shader entry point are in place, with the exception of the Pow2 partitioning mode.
2017-01-07 15:54:10 +00:00
|
|
|
TString sourceEntryPointName;
|
|
|
|
TFunction* entryPointFunction;
|
|
|
|
TIntermNode* entryPointFunctionBody;
|
|
|
|
|
|
|
|
TString patchConstantFunctionName; // hull shader patch constant function name, from function level attribute.
|
|
|
|
TMap<TBuiltInVariable, TSymbol*> builtInLinkageSymbols; // used for tessellation, finding declared builtins
|
|
|
|
|
2017-03-11 21:13:00 +00:00
|
|
|
TVector<TString> currentTypePrefix;
|
2016-03-13 03:11:22 +00:00
|
|
|
};
|
|
|
|
|
2017-03-20 00:12:37 +00:00
|
|
|
// This is the prefix we use for builtin methods to avoid namespace collisions with
|
|
|
|
// global scope user functions.
|
|
|
|
// TODO: this would be better as a nonparseable character, but that would
|
|
|
|
// require changing the scanner.
|
|
|
|
#define BUILTIN_PREFIX "__BI_"
|
|
|
|
|
2016-03-13 03:11:22 +00:00
|
|
|
} // end namespace glslang
|
|
|
|
|
|
|
|
#endif // HLSL_PARSE_INCLUDED_
|