2016-03-13 03:11:22 +00:00
|
|
|
//
|
2017-01-06 19:34:14 +00:00
|
|
|
// Copyright (C) 2016 Google, Inc.
|
|
|
|
// Copyright (C) 2016 LunarG, Inc.
|
2016-03-13 03:11:22 +00:00
|
|
|
//
|
2017-01-06 19:34:14 +00:00
|
|
|
// All rights reserved.
|
2016-03-13 03:11:22 +00:00
|
|
|
//
|
2017-01-06 19:34:14 +00:00
|
|
|
// Redistribution and use in source and binary forms, with or without
|
|
|
|
// modification, are permitted provided that the following conditions
|
|
|
|
// are met:
|
2016-03-13 03:11:22 +00:00
|
|
|
//
|
|
|
|
// Redistributions of source code must retain the above copyright
|
|
|
|
// notice, this list of conditions and the following disclaimer.
|
|
|
|
//
|
|
|
|
// Redistributions in binary form must reproduce the above
|
|
|
|
// copyright notice, this list of conditions and the following
|
|
|
|
// disclaimer in the documentation and/or other materials provided
|
|
|
|
// with the distribution.
|
|
|
|
//
|
|
|
|
// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
|
|
|
|
// contributors may be used to endorse or promote products derived
|
|
|
|
// from this software without specific prior written permission.
|
|
|
|
//
|
2017-01-06 19:34:14 +00:00
|
|
|
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|
|
|
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|
|
|
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
|
|
|
|
// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
|
|
|
|
// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
|
|
|
|
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
|
|
|
|
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
|
|
|
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
|
|
|
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
|
|
|
// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
|
|
|
|
// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
|
|
|
// POSSIBILITY OF SUCH DAMAGE.
|
2016-03-13 03:11:22 +00:00
|
|
|
//
|
|
|
|
#ifndef HLSL_PARSE_INCLUDED_
|
|
|
|
#define HLSL_PARSE_INCLUDED_
|
|
|
|
|
|
|
|
#include "../glslang/MachineIndependent/parseVersions.h"
|
|
|
|
#include "../glslang/MachineIndependent/ParseHelper.h"
|
|
|
|
|
HLSL: handle multiple clip/cull semantic IDs
HLSL allows several variables to be declared. There are packing rules involved:
e.g, a float3 and a float1 can be packed into a single array[4], while for a
float3 and another float3, the second one will skip the third array entry to
avoid straddling
This is implements that ability. Because there can be multiple variables involved,
and the final output array will often be a different type altogether (to fuse
the values into a single destination), a new variable is synthesized, unlike the prior
clip/cull support which used the declared variable. The new variable name is
taken from one of the declared ones, so the old tests are unchanged.
Several new tests are added to test various packing scenarios.
Only two semantic IDs are supported: 0, and 1, per HLSL rules. This is
encapsulated in
static const int maxClipCullRegs = 2;
and the algorithm (probably :) ) generalizes to larger values, although there
are a few issues around how HLSL would pack (e.g, would 4 scalars be packed into
a single HLSL float4 out reg? Probably, and this algorithm assumes so).
2017-07-05 17:33:06 +00:00
|
|
|
#include <array>
|
|
|
|
|
2016-03-13 03:11:22 +00:00
|
|
|
namespace glslang {
|
|
|
|
|
2016-10-20 19:07:10 +00:00
|
|
|
class TAttributeMap; // forward declare
|
2017-05-17 05:16:26 +00:00
|
|
|
class TFunctionDeclarator;
|
2016-10-20 19:07:10 +00:00
|
|
|
|
2016-03-13 03:11:22 +00:00
|
|
|
class HlslParseContext : public TParseContextBase {
|
|
|
|
public:
|
|
|
|
HlslParseContext(TSymbolTable&, TIntermediate&, bool parsingBuiltins,
|
2016-06-17 02:59:42 +00:00
|
|
|
int version, EProfile, const SpvVersion& spvVersion, EShLanguage, TInfoSink&,
|
2016-10-31 21:13:43 +00:00
|
|
|
const TString sourceEntryPointName,
|
2016-03-13 03:11:22 +00:00
|
|
|
bool forwardCompatible = false, EShMessages messages = EShMsgDefault);
|
|
|
|
virtual ~HlslParseContext();
|
2016-12-21 11:48:08 +00:00
|
|
|
void initializeExtensionBehavior() override;
|
2016-07-23 02:46:03 +00:00
|
|
|
|
2016-12-21 11:48:08 +00:00
|
|
|
void setLimits(const TBuiltInResource&) override;
|
|
|
|
bool parseShaderStrings(TPpContext&, TInputScanner& input, bool versionWillBeError = false) override;
|
2017-03-23 05:21:34 +00:00
|
|
|
virtual const char* getGlobalUniformBlockName() const override { return "$Global"; }
|
|
|
|
virtual void setUniformBlockDefaults(TType& block) const override
|
|
|
|
{
|
|
|
|
block.getQualifier().layoutPacking = ElpStd140;
|
|
|
|
block.getQualifier().layoutMatrix = ElmRowMajor;
|
|
|
|
}
|
2016-03-13 03:11:22 +00:00
|
|
|
|
2016-12-21 11:48:08 +00:00
|
|
|
void reservedPpErrorCheck(const TSourceLoc&, const char* /*name*/, const char* /*op*/) override { }
|
|
|
|
bool lineContinuationCheck(const TSourceLoc&, bool /*endOfComment*/) override { return true; }
|
|
|
|
bool lineDirectiveShouldSetNextLine() const override { return true; }
|
2016-03-13 03:11:22 +00:00
|
|
|
bool builtInName(const TString&);
|
|
|
|
|
2016-12-21 11:48:08 +00:00
|
|
|
void handlePragma(const TSourceLoc&, const TVector<TString>&) override;
|
2017-03-22 00:35:04 +00:00
|
|
|
TIntermTyped* handleVariable(const TSourceLoc&, const TString* string);
|
2016-03-13 03:11:22 +00:00
|
|
|
TIntermTyped* handleBracketDereference(const TSourceLoc&, TIntermTyped* base, TIntermTyped* index);
|
2016-10-07 02:12:24 +00:00
|
|
|
TIntermTyped* handleBracketOperator(const TSourceLoc&, TIntermTyped* base, TIntermTyped* index);
|
2016-03-13 03:11:22 +00:00
|
|
|
|
|
|
|
TIntermTyped* handleBinaryMath(const TSourceLoc&, const char* str, TOperator op, TIntermTyped* left, TIntermTyped* right);
|
|
|
|
TIntermTyped* handleUnaryMath(const TSourceLoc&, const char* str, TOperator op, TIntermTyped* childNode);
|
|
|
|
TIntermTyped* handleDotDereference(const TSourceLoc&, TIntermTyped* base, const TString& field);
|
2017-03-11 16:39:55 +00:00
|
|
|
bool isBuiltInMethod(const TSourceLoc&, TIntermTyped* base, const TString& field);
|
2017-06-20 09:20:59 +00:00
|
|
|
void assignToInterface(TVariable& variable);
|
2017-03-12 00:55:28 +00:00
|
|
|
void handleFunctionDeclarator(const TSourceLoc&, TFunction& function, bool prototype);
|
2017-01-19 22:41:47 +00:00
|
|
|
TIntermAggregate* handleFunctionDefinition(const TSourceLoc&, TFunction&, const TAttributeMap&, TIntermNode*& entryPointTree);
|
|
|
|
TIntermNode* transformEntryPoint(const TSourceLoc&, TFunction&, const TAttributeMap&);
|
2017-03-23 17:56:07 +00:00
|
|
|
void handleEntryPointAttributes(const TSourceLoc&, const TAttributeMap&);
|
2017-09-30 20:34:50 +00:00
|
|
|
void transferTypeAttributes(const TAttributeMap&, TType&);
|
2016-09-03 01:13:36 +00:00
|
|
|
void handleFunctionBody(const TSourceLoc&, TFunction&, TIntermNode* functionBody, TIntermNode*& node);
|
2017-02-04 00:57:55 +00:00
|
|
|
void remapEntryPointIO(TFunction& function, TVariable*& returnValue, TVector<TVariable*>& inputs, TVector<TVariable*>& outputs);
|
2016-09-19 22:01:41 +00:00
|
|
|
void remapNonEntryPointIO(TFunction& function);
|
2016-08-09 17:28:03 +00:00
|
|
|
TIntermNode* handleReturnValue(const TSourceLoc&, TIntermTyped*);
|
2016-05-13 15:33:42 +00:00
|
|
|
void handleFunctionArgument(TFunction*, TIntermTyped*& arguments, TIntermTyped* newArg);
|
2017-01-13 19:27:52 +00:00
|
|
|
TIntermTyped* handleAssign(const TSourceLoc&, TOperator, TIntermTyped* left, TIntermTyped* right);
|
|
|
|
TIntermTyped* handleAssignToMatrixSwizzle(const TSourceLoc&, TOperator, TIntermTyped* left, TIntermTyped* right);
|
2016-12-24 01:56:57 +00:00
|
|
|
TIntermTyped* handleFunctionCall(const TSourceLoc&, TFunction*, TIntermTyped*);
|
HLSL: handle multiple clip/cull semantic IDs
HLSL allows several variables to be declared. There are packing rules involved:
e.g, a float3 and a float1 can be packed into a single array[4], while for a
float3 and another float3, the second one will skip the third array entry to
avoid straddling
This is implements that ability. Because there can be multiple variables involved,
and the final output array will often be a different type altogether (to fuse
the values into a single destination), a new variable is synthesized, unlike the prior
clip/cull support which used the declared variable. The new variable name is
taken from one of the declared ones, so the old tests are unchanged.
Several new tests are added to test various packing scenarios.
Only two semantic IDs are supported: 0, and 1, per HLSL rules. This is
encapsulated in
static const int maxClipCullRegs = 2;
and the algorithm (probably :) ) generalizes to larger values, although there
are a few issues around how HLSL would pack (e.g, would 4 scalars be packed into
a single HLSL float4 out reg? Probably, and this algorithm assumes so).
2017-07-05 17:33:06 +00:00
|
|
|
TIntermAggregate* assignClipCullDistance(const TSourceLoc&, TOperator, int semanticId, TIntermTyped* left, TIntermTyped* right);
|
2016-06-09 14:57:35 +00:00
|
|
|
void decomposeIntrinsic(const TSourceLoc&, TIntermTyped*& node, TIntermNode* arguments);
|
2016-06-29 16:58:58 +00:00
|
|
|
void decomposeSampleMethods(const TSourceLoc&, TIntermTyped*& node, TIntermNode* arguments);
|
2017-02-13 00:50:28 +00:00
|
|
|
void decomposeStructBufferMethods(const TSourceLoc&, TIntermTyped*& node, TIntermNode* arguments);
|
2016-11-17 22:04:20 +00:00
|
|
|
void decomposeGeometryMethods(const TSourceLoc&, TIntermTyped*& node, TIntermNode* arguments);
|
2017-05-17 05:16:26 +00:00
|
|
|
void pushFrontArguments(TIntermTyped* front, TIntermTyped*& arguments);
|
2016-12-24 01:56:57 +00:00
|
|
|
void addInputArgumentConversions(const TFunction&, TIntermTyped*&);
|
2017-05-26 06:01:36 +00:00
|
|
|
void expandArguments(const TSourceLoc&, const TFunction&, TIntermTyped*&);
|
HLSL: add intrinsic function implicit promotions
This PR handles implicit promotions for intrinsics when there is no exact match,
such as for example clamp(int, bool, float). In this case the int and bool will
be promoted to a float, and the clamp(float, float, float) form used.
These promotions can be mixed with shape conversions, e.g, clamp(int, bool2, float2).
Output conversions are handled either via the existing addOutputArgumentConversion
function, which this PR generalizes to handle either aggregates or unaries, or by
intrinsic decomposition. If there are methods or intrinsics to be decomposed,
then decomposition is responsible for any output conversions, which turns out to
happen automatically in all current cases. This can be revisited once inout
conversions are in place.
Some cases of actual ambiguity were fixed in several tests, e.g, spv.register.autoassign.*
Some intrinsics with only uint versions were expanded to signed ints natively, where the
underlying AST and SPIR-V supports that. E.g, countbits. This avoids extraneous
conversion nodes.
A new function promoteAggregate is added, and used by findFunction. This is essentially
a generalization of the "promote 1st or 2nd arg" algorithm in promoteBinary.
The actual selection proceeds in three steps, as described in the comments in
hlslParseContext::findFunction:
1. Attempt an exact match. If found, use it.
2. If not, obtain the operator from step 1, and promote arguments.
3. Re-select the intrinsic overload from the results of step 2.
2016-11-02 18:42:34 +00:00
|
|
|
TIntermTyped* addOutputArgumentConversions(const TFunction&, TIntermOperator&);
|
2016-03-13 03:11:22 +00:00
|
|
|
void builtInOpCheck(const TSourceLoc&, const TFunction&, TIntermOperator&);
|
2017-04-04 03:48:37 +00:00
|
|
|
TFunction* makeConstructorCall(const TSourceLoc&, const TType&);
|
2017-03-15 03:50:06 +00:00
|
|
|
void handleSemantic(TSourceLoc, TQualifier&, TBuiltInVariable, const TString& upperCase);
|
2016-09-05 18:40:06 +00:00
|
|
|
void handlePackOffset(const TSourceLoc&, TQualifier&, const glslang::TString& location,
|
2016-07-30 16:29:54 +00:00
|
|
|
const glslang::TString* component);
|
2016-09-05 18:40:06 +00:00
|
|
|
void handleRegister(const TSourceLoc&, TQualifier&, const glslang::TString* profile, const glslang::TString& desc,
|
2016-09-05 22:03:12 +00:00
|
|
|
int subComponent, const glslang::TString*);
|
2017-04-12 01:45:00 +00:00
|
|
|
TIntermTyped* convertConditionalExpression(const TSourceLoc&, TIntermTyped*, bool mustBeScalar = true);
|
2016-07-14 20:45:14 +00:00
|
|
|
TIntermAggregate* handleSamplerTextureCombine(const TSourceLoc& loc, TIntermTyped* argTex, TIntermTyped* argSampler);
|
|
|
|
|
2017-01-14 02:34:22 +00:00
|
|
|
bool parseMatrixSwizzleSelector(const TSourceLoc&, const TString&, int cols, int rows, TSwizzleSelectors<TMatrixSelector>&);
|
|
|
|
int getMatrixComponentsColumn(int rows, const TSwizzleSelectors<TMatrixSelector>&);
|
2016-03-13 03:11:22 +00:00
|
|
|
void assignError(const TSourceLoc&, const char* op, TString left, TString right);
|
|
|
|
void unaryOpError(const TSourceLoc&, const char* op, TString operand);
|
|
|
|
void binaryOpError(const TSourceLoc&, const char* op, TString left, TString right);
|
|
|
|
void variableCheck(TIntermTyped*& nodePtr);
|
|
|
|
void constantValueCheck(TIntermTyped* node, const char* token);
|
|
|
|
void integerCheck(const TIntermTyped* node, const char* token);
|
|
|
|
void globalCheck(const TSourceLoc&, const char* token);
|
|
|
|
bool constructorError(const TSourceLoc&, TIntermNode*, TFunction&, TOperator, TType&);
|
|
|
|
bool constructorTextureSamplerError(const TSourceLoc&, const TFunction&);
|
|
|
|
void arraySizeCheck(const TSourceLoc&, TIntermTyped* expr, TArraySize&);
|
|
|
|
void arraySizeRequiredCheck(const TSourceLoc&, const TArraySizes&);
|
|
|
|
void structArrayCheck(const TSourceLoc&, const TType& structure);
|
|
|
|
void arrayDimMerge(TType& type, const TArraySizes* sizes);
|
|
|
|
bool voidErrorCheck(const TSourceLoc&, const TString&, TBasicType);
|
|
|
|
void globalQualifierFix(const TSourceLoc&, TQualifier&);
|
|
|
|
bool structQualifierErrorCheck(const TSourceLoc&, const TPublicType& pType);
|
2016-09-16 23:10:39 +00:00
|
|
|
void mergeQualifiers(TQualifier& dst, const TQualifier& src);
|
2016-03-13 03:11:22 +00:00
|
|
|
int computeSamplerTypeIndex(TSampler&);
|
2016-11-05 16:15:53 +00:00
|
|
|
TSymbol* redeclareBuiltinVariable(const TSourceLoc&, const TString&, const TQualifier&, const TShaderQualifiers&);
|
2016-06-17 21:50:47 +00:00
|
|
|
void paramFix(TType& type);
|
2016-03-13 03:11:22 +00:00
|
|
|
void specializationCheck(const TSourceLoc&, const TType&, const char* op);
|
|
|
|
|
2016-08-17 16:22:08 +00:00
|
|
|
void setLayoutQualifier(const TSourceLoc&, TQualifier&, TString&);
|
|
|
|
void setLayoutQualifier(const TSourceLoc&, TQualifier&, TString&, const TIntermTyped*);
|
2016-03-13 03:11:22 +00:00
|
|
|
void mergeObjectLayoutQualifiers(TQualifier& dest, const TQualifier& src, bool inheritOnly);
|
|
|
|
void checkNoShaderLayouts(const TSourceLoc&, const TShaderQualifiers&);
|
|
|
|
|
2017-05-17 05:16:26 +00:00
|
|
|
const TFunction* findFunction(const TSourceLoc& loc, TFunction& call, bool& builtIn, int& thisDepth, TIntermTyped*& args);
|
2017-03-29 05:43:10 +00:00
|
|
|
void declareTypedef(const TSourceLoc&, const TString& identifier, const TType&);
|
2017-02-04 00:57:55 +00:00
|
|
|
void declareStruct(const TSourceLoc&, TString& structName, TType&);
|
2017-03-02 21:30:59 +00:00
|
|
|
TSymbol* lookupUserType(const TString&, TType&);
|
2017-03-29 05:43:10 +00:00
|
|
|
TIntermNode* declareVariable(const TSourceLoc&, const TString& identifier, TType&, TIntermTyped* initializer = 0);
|
2017-04-04 17:47:42 +00:00
|
|
|
void lengthenList(const TSourceLoc&, TIntermSequence& list, int size, TIntermTyped* scalarInit);
|
2017-04-04 03:48:37 +00:00
|
|
|
TIntermTyped* handleConstructor(const TSourceLoc&, TIntermTyped*, const TType&);
|
|
|
|
TIntermTyped* addConstructor(const TSourceLoc&, TIntermTyped*, const TType&);
|
2017-06-14 05:13:10 +00:00
|
|
|
TIntermTyped* convertArray(TIntermTyped*, const TType&);
|
2016-03-13 03:11:22 +00:00
|
|
|
TIntermTyped* constructAggregate(TIntermNode*, const TType&, int, const TSourceLoc&);
|
|
|
|
TIntermTyped* constructBuiltIn(const TType&, TOperator, TIntermTyped*, const TSourceLoc&, bool subset);
|
2016-07-25 22:05:33 +00:00
|
|
|
void declareBlock(const TSourceLoc&, TType&, const TString* instanceName = 0, TArraySizes* arraySizes = 0);
|
2017-04-10 14:19:21 +00:00
|
|
|
void declareStructBufferCounter(const TSourceLoc& loc, const TType& bufferType, const TString& name);
|
2016-03-13 03:11:22 +00:00
|
|
|
void fixBlockLocations(const TSourceLoc&, TQualifier&, TTypeList&, bool memberWithLocation, bool memberWithoutLocation);
|
|
|
|
void fixBlockXfbOffsets(TQualifier&, TTypeList&);
|
2016-09-28 01:13:05 +00:00
|
|
|
void fixBlockUniformOffsets(const TQualifier&, TTypeList&);
|
2016-03-13 03:11:22 +00:00
|
|
|
void addQualifierToExisting(const TSourceLoc&, TQualifier, const TString& identifier);
|
|
|
|
void addQualifierToExisting(const TSourceLoc&, TQualifier, TIdentifierList&);
|
|
|
|
void updateStandaloneQualifierDefaults(const TSourceLoc&, const TPublicType&);
|
|
|
|
void wrapupSwitchSubsequence(TIntermAggregate* statements, TIntermNode* branchNode);
|
2017-07-04 15:23:40 +00:00
|
|
|
TIntermNode* addSwitch(const TSourceLoc&, TIntermTyped* expression, TIntermAggregate* body, TSelectionControl control);
|
2016-03-13 03:11:22 +00:00
|
|
|
|
|
|
|
void updateImplicitArraySize(const TSourceLoc&, TIntermNode*, int index);
|
|
|
|
|
2016-09-20 19:22:58 +00:00
|
|
|
void nestLooping() { ++loopNestingLevel; }
|
|
|
|
void unnestLooping() { --loopNestingLevel; }
|
|
|
|
void nestAnnotations() { ++annotationNestingLevel; }
|
|
|
|
void unnestAnnotations() { --annotationNestingLevel; }
|
2016-09-28 01:13:05 +00:00
|
|
|
int getAnnotationNestingLevel() { return annotationNestingLevel; }
|
2016-09-20 19:22:58 +00:00
|
|
|
void pushScope() { symbolTable.push(); }
|
|
|
|
void popScope() { symbolTable.pop(0); }
|
2016-06-05 17:23:11 +00:00
|
|
|
|
2017-05-17 05:16:26 +00:00
|
|
|
void pushThisScope(const TType&, const TVector<TFunctionDeclarator>&);
|
2017-03-22 05:56:40 +00:00
|
|
|
void popThisScope() { symbolTable.pop(0); }
|
|
|
|
|
|
|
|
void pushImplicitThis(TVariable* thisParameter) { implicitThisStack.push_back(thisParameter); }
|
|
|
|
void popImplicitThis() { implicitThisStack.pop_back(); }
|
|
|
|
TVariable* getImplicitThis(int thisDepth) const { return implicitThisStack[implicitThisStack.size() - thisDepth]; }
|
|
|
|
|
2017-03-19 18:24:29 +00:00
|
|
|
void pushNamespace(const TString& name);
|
|
|
|
void popNamespace();
|
2017-09-12 03:48:19 +00:00
|
|
|
void getFullNamespaceName(TString*&) const;
|
2017-03-19 18:24:29 +00:00
|
|
|
void addScopeMangler(TString&);
|
2017-03-11 21:13:00 +00:00
|
|
|
|
2016-07-01 06:04:11 +00:00
|
|
|
void pushSwitchSequence(TIntermSequence* sequence) { switchSequenceStack.push_back(sequence); }
|
|
|
|
void popSwitchSequence() { switchSequenceStack.pop_back(); }
|
|
|
|
|
2017-03-29 05:43:10 +00:00
|
|
|
virtual void growGlobalUniformBlock(const TSourceLoc&, TType&, const TString& memberName, TTypeList* typeList = nullptr) override;
|
2017-02-03 21:06:36 +00:00
|
|
|
|
2016-10-08 01:35:40 +00:00
|
|
|
// Apply L-value conversions. E.g, turning a write to a RWTexture into an ImageStore.
|
2017-06-02 22:28:39 +00:00
|
|
|
TIntermTyped* handleLvalue(const TSourceLoc&, const char* op, TIntermTyped*& node);
|
2016-10-08 16:54:52 +00:00
|
|
|
bool lValueErrorCheck(const TSourceLoc&, const char* op, TIntermTyped*) override;
|
2016-10-08 01:35:40 +00:00
|
|
|
|
2016-10-10 21:24:57 +00:00
|
|
|
TLayoutFormat getLayoutFromTxType(const TSourceLoc&, const TType&);
|
|
|
|
|
2016-11-17 22:04:20 +00:00
|
|
|
bool handleOutputGeometry(const TSourceLoc&, const TLayoutGeometry& geometry);
|
|
|
|
bool handleInputGeometry(const TSourceLoc&, const TLayoutGeometry& geometry);
|
|
|
|
|
2017-07-04 15:23:40 +00:00
|
|
|
// Determine selection control from attributes
|
|
|
|
TSelectionControl handleSelectionControl(const TAttributeMap& attributes) const;
|
|
|
|
|
2017-05-03 02:14:50 +00:00
|
|
|
// Determine loop control from attributes
|
|
|
|
TLoopControl handleLoopControl(const TAttributeMap& attributes) const;
|
|
|
|
|
2017-02-24 01:04:12 +00:00
|
|
|
// Share struct buffer deep types
|
|
|
|
void shareStructBufferType(TType&);
|
|
|
|
|
HLSL: add methods to track user structure in texture return type.
Some languages allow a restricted set of user structure types returned from texture sampling
operations. Restrictions include the total vector size of all components may not exceed 4,
and the basic types of all members must be identical.
This adds underpinnings for that ability. Because storing a whole TType or even a simple
TTypeList in the TSampler would be expensive, the structure definition is held in a
table outside the TType. The TSampler contains a small bitfield index, currently 4 bits
to support up to 15 separate texture template structure types, but that can be adjusted
up or down. Vector returns are handled as before.
There are abstraction methods accepting and returning a TType (such as may have been parsed
from a grammar). The new methods will accept a texture template type and set the
sampler to the structure if possible, checking a range of error conditions such as whether
the total structure vector components exceed 4, or whether their basic types differe, or
whether the struct contains non-vector-or-scalar members. Another query returns the
appropriate TType for the sampler.
High level summary of design:
In the TSampler, this holds an index into the texture structure return type table:
unsigned int structReturnIndex : structReturnIndexBits;
These are the methods to set or get the return type from the TSampler. They work for vector or structure returns, and potentially could be expanded to handle other things (small arrays?) if ever needed.
bool setTextureReturnType(TSampler& sampler, const TType& retType, const TSourceLoc& loc);
void getTextureReturnType(const TSampler& sampler, const TType& retType, const TSourceLoc& loc) const;
The ``convertReturn`` lambda in ``HlslParseContext::decomposeSampleMethods`` is greatly expanded to know how to copy a vec4 sample return to whatever the structure type should be. This is a little awkward since it involves introducing a comma expression to return the proper aggregate value after a set of memberwise copies.
2017-07-31 19:41:42 +00:00
|
|
|
// Set texture return type of the given sampler. Returns success (not all types are valid).
|
|
|
|
bool setTextureReturnType(TSampler& sampler, const TType& retType, const TSourceLoc& loc);
|
|
|
|
|
|
|
|
// Obtain the sampler return type of the given sampler in retType.
|
|
|
|
void getTextureReturnType(const TSampler& sampler, TType& retType) const;
|
|
|
|
|
2016-03-13 03:11:22 +00:00
|
|
|
protected:
|
HLSL: Recursive composite flattening
This PR implements recursive type flattening. For example, an array of structs of other structs
can be flattened to individual member variables at the shader interface.
This is sufficient for many purposes, e.g, uniforms containing opaque types, but is not sufficient
for geometry shader arrayed inputs. That will be handled separately with structure splitting,
which is not implemented by this PR. In the meantime, that case is detected and triggers an error.
The recursive flattening extends the following three aspects of single-level flattening:
- Flattening of structures to individual members with names such as "foo[0].samp[1]";
- Turning constant references to the nested composite type into a reference to a particular
flattened member.
- Shadow copies between arrays of flattened members and the nested composite type.
Previous single-level flattening only flattened at the shader interface, and that is unchanged by this PR.
Internally, shadow copies are, such as if the type is passed to a function.
Also, the reasons for flattening are unchanged. Uniforms containing opaque types, and interface struct
types are flattened. (The latter will change with structure splitting).
One existing test changes: hlsl.structin.vert, which did in fact contain a nested composite type to be
flattened.
Two new tests are added: hlsl.structarray.flatten.frag, and hlsl.structarray.flatten.geom (currently
issues an error until type splitting is online).
The process of arriving at the individual member from chained postfix expressions is more complex than
it was with one level. See large-ish comment above HlslParseContext::flatten() for details.
2016-11-29 00:09:54 +00:00
|
|
|
struct TFlattenData {
|
2017-06-27 21:17:38 +00:00
|
|
|
TFlattenData() : nextBinding(TQualifier::layoutBindingEnd),
|
|
|
|
nextLocation(TQualifier::layoutLocationEnd) { }
|
|
|
|
TFlattenData(int nb, int nl) : nextBinding(nb), nextLocation(nl) { }
|
HLSL: Recursive composite flattening
This PR implements recursive type flattening. For example, an array of structs of other structs
can be flattened to individual member variables at the shader interface.
This is sufficient for many purposes, e.g, uniforms containing opaque types, but is not sufficient
for geometry shader arrayed inputs. That will be handled separately with structure splitting,
which is not implemented by this PR. In the meantime, that case is detected and triggers an error.
The recursive flattening extends the following three aspects of single-level flattening:
- Flattening of structures to individual members with names such as "foo[0].samp[1]";
- Turning constant references to the nested composite type into a reference to a particular
flattened member.
- Shadow copies between arrays of flattened members and the nested composite type.
Previous single-level flattening only flattened at the shader interface, and that is unchanged by this PR.
Internally, shadow copies are, such as if the type is passed to a function.
Also, the reasons for flattening are unchanged. Uniforms containing opaque types, and interface struct
types are flattened. (The latter will change with structure splitting).
One existing test changes: hlsl.structin.vert, which did in fact contain a nested composite type to be
flattened.
Two new tests are added: hlsl.structarray.flatten.frag, and hlsl.structarray.flatten.geom (currently
issues an error until type splitting is online).
The process of arriving at the individual member from chained postfix expressions is more complex than
it was with one level. See large-ish comment above HlslParseContext::flatten() for details.
2016-11-29 00:09:54 +00:00
|
|
|
|
|
|
|
TVector<TVariable*> members; // individual flattened variables
|
2017-06-27 21:17:38 +00:00
|
|
|
TVector<int> offsets; // offset to next tree level
|
|
|
|
unsigned int nextBinding; // next binding to use.
|
|
|
|
unsigned int nextLocation; // next location to use
|
HLSL: Recursive composite flattening
This PR implements recursive type flattening. For example, an array of structs of other structs
can be flattened to individual member variables at the shader interface.
This is sufficient for many purposes, e.g, uniforms containing opaque types, but is not sufficient
for geometry shader arrayed inputs. That will be handled separately with structure splitting,
which is not implemented by this PR. In the meantime, that case is detected and triggers an error.
The recursive flattening extends the following three aspects of single-level flattening:
- Flattening of structures to individual members with names such as "foo[0].samp[1]";
- Turning constant references to the nested composite type into a reference to a particular
flattened member.
- Shadow copies between arrays of flattened members and the nested composite type.
Previous single-level flattening only flattened at the shader interface, and that is unchanged by this PR.
Internally, shadow copies are, such as if the type is passed to a function.
Also, the reasons for flattening are unchanged. Uniforms containing opaque types, and interface struct
types are flattened. (The latter will change with structure splitting).
One existing test changes: hlsl.structin.vert, which did in fact contain a nested composite type to be
flattened.
Two new tests are added: hlsl.structarray.flatten.frag, and hlsl.structarray.flatten.geom (currently
issues an error until type splitting is online).
The process of arriving at the individual member from chained postfix expressions is more complex than
it was with one level. See large-ish comment above HlslParseContext::flatten() for details.
2016-11-29 00:09:54 +00:00
|
|
|
};
|
|
|
|
|
2017-03-29 05:43:10 +00:00
|
|
|
void fixConstInit(const TSourceLoc&, const TString& identifier, TType& type, TIntermTyped*& initializer);
|
2016-03-13 03:11:22 +00:00
|
|
|
void inheritGlobalDefaults(TQualifier& dst) const;
|
|
|
|
TVariable* makeInternalVariable(const char* name, const TType&) const;
|
HLSL: inter-stage structure splitting.
This adds structure splitting, which among other things will enable GS support where input structs
are passed, and thus become input arrays of structs in the GS inputs. That is a common GS case.
The salient points of this PR are:
* Structure splitting has been changed from "always between stages" to "only into the VS and out of
the PS". It had previously happened between stages because it's not legal to pass a struct
containing a builtin IO variable.
* Structs passed between stages are now split into a struct containing ONLY user types, and a
collection of loose builtin IO variables, if any. The user-part is passed as a normal struct
between stages, which is valid SPIR-V now that the builtin IO is removed.
* Internal to the shader, a sanitized struct (with IO qualifiers removed) is used, so that e.g,
functions can work unmodified.
* If a builtin IO such as Position occurs in an arrayed struct, for example as an input to a GS,
the array reference is moved to the split-off loose variable, which is given the array dimension
itself.
When passing things around inside the shader, such as over a function call, the the original type
is used in a sanitized form that removes the builtIn qualifications and makes them temporaries.
This means internal function calls do not have to change. However, the type when returned from
the shader will be member-wise copied from the internal sanitized one to the external type.
The sanitized type is used in variable declarations.
When copying split types and unsplit, if a sub-struct contains only user variables, it is copied
as a single entity to avoid more AST verbosity.
Above strategy arrived at with talks with @johnkslang.
This is a big complex change. I'm inclined to leave it as a WIP until it can get some exposure to
real world cases.
2016-12-14 22:22:25 +00:00
|
|
|
TVariable* makeInternalVariable(const TString& name, const TType& type) const {
|
|
|
|
return makeInternalVariable(name.c_str(), type);
|
|
|
|
}
|
2017-04-04 17:47:42 +00:00
|
|
|
TIntermSymbol* makeInternalVariableNode(const TSourceLoc&, const char* name, const TType&) const;
|
2017-03-29 05:43:10 +00:00
|
|
|
TVariable* declareNonArray(const TSourceLoc&, const TString& identifier, const TType&, bool track);
|
|
|
|
void declareArray(const TSourceLoc&, const TString& identifier, const TType&, TSymbol*&, bool track);
|
2017-09-30 20:54:18 +00:00
|
|
|
TIntermNode* executeInitializer(const TSourceLoc&, TIntermTyped* initializer, TVariable* variable);
|
2017-04-04 17:47:42 +00:00
|
|
|
TIntermTyped* convertInitializerList(const TSourceLoc&, const TType&, TIntermTyped* initializer, TIntermTyped* scalarInit);
|
|
|
|
bool isScalarConstructor(const TIntermNode*);
|
2016-06-13 15:22:28 +00:00
|
|
|
TOperator mapAtomicOp(const TSourceLoc& loc, TOperator op, bool isImage);
|
2016-03-13 03:11:22 +00:00
|
|
|
|
2016-10-08 01:35:40 +00:00
|
|
|
// Return true if this node requires L-value conversion (e.g, to an imageStore).
|
|
|
|
bool shouldConvertLValue(const TIntermNode*) const;
|
|
|
|
|
2016-09-16 19:26:37 +00:00
|
|
|
// Array and struct flattening
|
HLSL: inter-stage structure splitting.
This adds structure splitting, which among other things will enable GS support where input structs
are passed, and thus become input arrays of structs in the GS inputs. That is a common GS case.
The salient points of this PR are:
* Structure splitting has been changed from "always between stages" to "only into the VS and out of
the PS". It had previously happened between stages because it's not legal to pass a struct
containing a builtin IO variable.
* Structs passed between stages are now split into a struct containing ONLY user types, and a
collection of loose builtin IO variables, if any. The user-part is passed as a normal struct
between stages, which is valid SPIR-V now that the builtin IO is removed.
* Internal to the shader, a sanitized struct (with IO qualifiers removed) is used, so that e.g,
functions can work unmodified.
* If a builtin IO such as Position occurs in an arrayed struct, for example as an input to a GS,
the array reference is moved to the split-off loose variable, which is given the array dimension
itself.
When passing things around inside the shader, such as over a function call, the the original type
is used in a sanitized form that removes the builtIn qualifications and makes them temporaries.
This means internal function calls do not have to change. However, the type when returned from
the shader will be member-wise copied from the internal sanitized one to the external type.
The sanitized type is used in variable declarations.
When copying split types and unsplit, if a sub-struct contains only user variables, it is copied
as a single entity to avoid more AST verbosity.
Above strategy arrived at with talks with @johnkslang.
This is a big complex change. I'm inclined to leave it as a WIP until it can get some exposure to
real world cases.
2016-12-14 22:22:25 +00:00
|
|
|
TIntermTyped* flattenAccess(TIntermTyped* base, int member);
|
2017-10-11 20:03:45 +00:00
|
|
|
TIntermTyped* flattenAccess(int uniqueId, int member, TStorageQualifier outerStorage, const TType&, int subset = -1);
|
2017-10-04 19:27:43 +00:00
|
|
|
int findSubtreeOffset(const TIntermNode&) const;
|
|
|
|
int findSubtreeOffset(const TType&, int subset, const TVector<int>& offsets) const;
|
2017-10-11 20:03:45 +00:00
|
|
|
bool shouldFlatten(const TType&, TStorageQualifier, bool topLevel) const;
|
HLSL: Recursive composite flattening
This PR implements recursive type flattening. For example, an array of structs of other structs
can be flattened to individual member variables at the shader interface.
This is sufficient for many purposes, e.g, uniforms containing opaque types, but is not sufficient
for geometry shader arrayed inputs. That will be handled separately with structure splitting,
which is not implemented by this PR. In the meantime, that case is detected and triggers an error.
The recursive flattening extends the following three aspects of single-level flattening:
- Flattening of structures to individual members with names such as "foo[0].samp[1]";
- Turning constant references to the nested composite type into a reference to a particular
flattened member.
- Shadow copies between arrays of flattened members and the nested composite type.
Previous single-level flattening only flattened at the shader interface, and that is unchanged by this PR.
Internally, shadow copies are, such as if the type is passed to a function.
Also, the reasons for flattening are unchanged. Uniforms containing opaque types, and interface struct
types are flattened. (The latter will change with structure splitting).
One existing test changes: hlsl.structin.vert, which did in fact contain a nested composite type to be
flattened.
Two new tests are added: hlsl.structarray.flatten.frag, and hlsl.structarray.flatten.geom (currently
issues an error until type splitting is online).
The process of arriving at the individual member from chained postfix expressions is more complex than
it was with one level. See large-ish comment above HlslParseContext::flatten() for details.
2016-11-29 00:09:54 +00:00
|
|
|
bool wasFlattened(const TIntermTyped* node) const;
|
|
|
|
bool wasFlattened(int id) const { return flattenMap.find(id) != flattenMap.end(); }
|
2017-08-09 02:02:21 +00:00
|
|
|
int addFlattenedMember(const TVariable&, const TType&, TFlattenData&, const TString& name, bool linkage,
|
|
|
|
const TQualifier& outerQualifier, const TArraySizes* builtInArraySizes);
|
HLSL: Recursive composite flattening
This PR implements recursive type flattening. For example, an array of structs of other structs
can be flattened to individual member variables at the shader interface.
This is sufficient for many purposes, e.g, uniforms containing opaque types, but is not sufficient
for geometry shader arrayed inputs. That will be handled separately with structure splitting,
which is not implemented by this PR. In the meantime, that case is detected and triggers an error.
The recursive flattening extends the following three aspects of single-level flattening:
- Flattening of structures to individual members with names such as "foo[0].samp[1]";
- Turning constant references to the nested composite type into a reference to a particular
flattened member.
- Shadow copies between arrays of flattened members and the nested composite type.
Previous single-level flattening only flattened at the shader interface, and that is unchanged by this PR.
Internally, shadow copies are, such as if the type is passed to a function.
Also, the reasons for flattening are unchanged. Uniforms containing opaque types, and interface struct
types are flattened. (The latter will change with structure splitting).
One existing test changes: hlsl.structin.vert, which did in fact contain a nested composite type to be
flattened.
Two new tests are added: hlsl.structarray.flatten.frag, and hlsl.structarray.flatten.geom (currently
issues an error until type splitting is online).
The process of arriving at the individual member from chained postfix expressions is more complex than
it was with one level. See large-ish comment above HlslParseContext::flatten() for details.
2016-11-29 00:09:54 +00:00
|
|
|
|
2017-05-26 06:01:36 +00:00
|
|
|
// Structure splitting (splits interstage built-in types into its own struct)
|
HLSL: inter-stage structure splitting.
This adds structure splitting, which among other things will enable GS support where input structs
are passed, and thus become input arrays of structs in the GS inputs. That is a common GS case.
The salient points of this PR are:
* Structure splitting has been changed from "always between stages" to "only into the VS and out of
the PS". It had previously happened between stages because it's not legal to pass a struct
containing a builtin IO variable.
* Structs passed between stages are now split into a struct containing ONLY user types, and a
collection of loose builtin IO variables, if any. The user-part is passed as a normal struct
between stages, which is valid SPIR-V now that the builtin IO is removed.
* Internal to the shader, a sanitized struct (with IO qualifiers removed) is used, so that e.g,
functions can work unmodified.
* If a builtin IO such as Position occurs in an arrayed struct, for example as an input to a GS,
the array reference is moved to the split-off loose variable, which is given the array dimension
itself.
When passing things around inside the shader, such as over a function call, the the original type
is used in a sanitized form that removes the builtIn qualifications and makes them temporaries.
This means internal function calls do not have to change. However, the type when returned from
the shader will be member-wise copied from the internal sanitized one to the external type.
The sanitized type is used in variable declarations.
When copying split types and unsplit, if a sub-struct contains only user variables, it is copied
as a single entity to avoid more AST verbosity.
Above strategy arrived at with talks with @johnkslang.
This is a big complex change. I'm inclined to leave it as a WIP until it can get some exposure to
real world cases.
2016-12-14 22:22:25 +00:00
|
|
|
void split(const TVariable&);
|
2017-08-08 05:40:05 +00:00
|
|
|
void splitBuiltIn(const TString& baseName, const TType& memberType, const TArraySizes*, const TQualifier&);
|
|
|
|
const TType& split(const TType& type, const TString& name, const TQualifier&);
|
HLSL: inter-stage structure splitting.
This adds structure splitting, which among other things will enable GS support where input structs
are passed, and thus become input arrays of structs in the GS inputs. That is a common GS case.
The salient points of this PR are:
* Structure splitting has been changed from "always between stages" to "only into the VS and out of
the PS". It had previously happened between stages because it's not legal to pass a struct
containing a builtin IO variable.
* Structs passed between stages are now split into a struct containing ONLY user types, and a
collection of loose builtin IO variables, if any. The user-part is passed as a normal struct
between stages, which is valid SPIR-V now that the builtin IO is removed.
* Internal to the shader, a sanitized struct (with IO qualifiers removed) is used, so that e.g,
functions can work unmodified.
* If a builtin IO such as Position occurs in an arrayed struct, for example as an input to a GS,
the array reference is moved to the split-off loose variable, which is given the array dimension
itself.
When passing things around inside the shader, such as over a function call, the the original type
is used in a sanitized form that removes the builtIn qualifications and makes them temporaries.
This means internal function calls do not have to change. However, the type when returned from
the shader will be member-wise copied from the internal sanitized one to the external type.
The sanitized type is used in variable declarations.
When copying split types and unsplit, if a sub-struct contains only user variables, it is copied
as a single entity to avoid more AST verbosity.
Above strategy arrived at with talks with @johnkslang.
This is a big complex change. I'm inclined to leave it as a WIP until it can get some exposure to
real world cases.
2016-12-14 22:22:25 +00:00
|
|
|
bool wasSplit(const TIntermTyped* node) const;
|
2017-08-07 01:42:42 +00:00
|
|
|
bool wasSplit(int id) const { return splitNonIoVars.find(id) != splitNonIoVars.end(); }
|
|
|
|
TVariable* getSplitNonIoVar(int id) const;
|
Add basic HS/DS implementation.
This obsoletes WIP PR #704, which was built on the pre entry point wrapping master. New version
here uses entry point wrapping.
This is a limited implementation of tessellation shaders. In particular, the following are not functional,
and will be added as separate stages to reduce the size of each PR.
* patchconstantfunctions accepting per-control-point input values, such as
const OutputPatch <hs_out_t, 3> cpv are not implemented.
* patchconstantfunctions whose signature requires an aggregate input type such as
a structure containing builtin variables. Code to synthesize such calls is not
yet present.
These restrictions will be relaxed as soon as possible. Simple cases can compile now: see for example
Test/hulsl.hull.1.tesc - e.g, writing to inner and outer tessellation factors.
PCF invocation is synthesized as an entry point epilogue protected behind a barrier and a test on
invocation ID == 0. If there is an existing invocation ID variable it will be used, otherwise one is
added to the linkage. The PCF and the shader EP interfaces are unioned and builtins appearing in
the PCF but not the EP are also added to the linkage and synthesized as shader inputs.
Parameter matching to (eventually arbitrary) PCF signatures is by builtin variable type. Any user
variables in the PCF signature will result in an error. Overloaded PCF functions will also result in
an error.
[domain()], [partitioning()], [outputtopology()], [outputcontrolpoints()], and [patchconstantfunction()]
attributes to the shader entry point are in place, with the exception of the Pow2 partitioning mode.
2017-01-07 15:54:10 +00:00
|
|
|
void addPatchConstantInvocation();
|
2017-09-27 15:12:51 +00:00
|
|
|
void fixTextureShadowModes();
|
2017-04-20 15:00:56 +00:00
|
|
|
TIntermTyped* makeIntegerIndex(TIntermTyped*);
|
HLSL: inter-stage structure splitting.
This adds structure splitting, which among other things will enable GS support where input structs
are passed, and thus become input arrays of structs in the GS inputs. That is a common GS case.
The salient points of this PR are:
* Structure splitting has been changed from "always between stages" to "only into the VS and out of
the PS". It had previously happened between stages because it's not legal to pass a struct
containing a builtin IO variable.
* Structs passed between stages are now split into a struct containing ONLY user types, and a
collection of loose builtin IO variables, if any. The user-part is passed as a normal struct
between stages, which is valid SPIR-V now that the builtin IO is removed.
* Internal to the shader, a sanitized struct (with IO qualifiers removed) is used, so that e.g,
functions can work unmodified.
* If a builtin IO such as Position occurs in an arrayed struct, for example as an input to a GS,
the array reference is moved to the split-off loose variable, which is given the array dimension
itself.
When passing things around inside the shader, such as over a function call, the the original type
is used in a sanitized form that removes the builtIn qualifications and makes them temporaries.
This means internal function calls do not have to change. However, the type when returned from
the shader will be member-wise copied from the internal sanitized one to the external type.
The sanitized type is used in variable declarations.
When copying split types and unsplit, if a sub-struct contains only user variables, it is copied
as a single entity to avoid more AST verbosity.
Above strategy arrived at with talks with @johnkslang.
This is a big complex change. I'm inclined to leave it as a WIP until it can get some exposure to
real world cases.
2016-12-14 22:22:25 +00:00
|
|
|
|
2017-04-05 17:03:02 +00:00
|
|
|
void fixBuiltInIoType(TType&);
|
2017-03-18 00:51:05 +00:00
|
|
|
|
2017-08-07 01:42:42 +00:00
|
|
|
void flatten(const TVariable& variable, bool linkage);
|
2017-08-09 02:02:21 +00:00
|
|
|
int flatten(const TVariable& variable, const TType&, TFlattenData&, TString name, bool linkage,
|
|
|
|
const TQualifier& outerQualifier, const TArraySizes* builtInArraySizes);
|
|
|
|
int flattenStruct(const TVariable& variable, const TType&, TFlattenData&, TString name, bool linkage,
|
|
|
|
const TQualifier& outerQualifier, const TArraySizes* builtInArraySizes);
|
|
|
|
int flattenArray(const TVariable& variable, const TType&, TFlattenData&, TString name, bool linkage,
|
|
|
|
const TQualifier& outerQualifier);
|
2016-09-16 19:26:37 +00:00
|
|
|
|
2017-02-06 03:27:30 +00:00
|
|
|
bool hasUniform(const TQualifier& qualifier) const;
|
|
|
|
void clearUniform(TQualifier& qualifier);
|
|
|
|
bool isInputBuiltIn(const TQualifier& qualifier) const;
|
|
|
|
bool hasInput(const TQualifier& qualifier) const;
|
|
|
|
void correctOutput(TQualifier& qualifier);
|
|
|
|
bool isOutputBuiltIn(const TQualifier& qualifier) const;
|
|
|
|
bool hasOutput(const TQualifier& qualifier) const;
|
|
|
|
void correctInput(TQualifier& qualifier);
|
|
|
|
void correctUniform(TQualifier& qualifier);
|
|
|
|
void clearUniformInputOutput(TQualifier& qualifier);
|
|
|
|
|
2017-02-13 00:50:28 +00:00
|
|
|
// Test method names
|
|
|
|
bool isStructBufferMethod(const TString& name) const;
|
2017-04-27 17:22:32 +00:00
|
|
|
void counterBufferType(const TSourceLoc& loc, TType& type);
|
2017-02-13 00:50:28 +00:00
|
|
|
|
2017-04-26 14:31:56 +00:00
|
|
|
// Return standard sample position array
|
|
|
|
TIntermConstantUnion* getSamplePosArray(int count);
|
|
|
|
|
2017-02-24 01:04:12 +00:00
|
|
|
TType* getStructBufferContentType(const TType& type) const;
|
|
|
|
bool isStructBufferType(const TType& type) const { return getStructBufferContentType(type) != nullptr; }
|
|
|
|
TIntermTyped* indexStructBufferContent(const TSourceLoc& loc, TIntermTyped* buffer) const;
|
2017-04-10 14:19:21 +00:00
|
|
|
TIntermTyped* getStructBufferCounter(const TSourceLoc& loc, TIntermTyped* buffer);
|
2017-04-27 17:22:32 +00:00
|
|
|
TString getStructBuffCounterName(const TString&) const;
|
|
|
|
void addStructBuffArguments(const TSourceLoc& loc, TIntermAggregate*&);
|
|
|
|
void addStructBufferHiddenCounterParam(const TSourceLoc& loc, TParameter&, TIntermAggregate*&);
|
2017-02-24 01:04:12 +00:00
|
|
|
|
|
|
|
// Return true if this type is a reference. This is not currently a type method in case that's
|
|
|
|
// a language specific answer.
|
|
|
|
bool isReference(const TType& type) const { return isStructBufferType(type); }
|
|
|
|
|
2017-04-10 14:19:21 +00:00
|
|
|
// Return true if this a buffer type that has an associated counter buffer.
|
2017-04-24 01:44:28 +00:00
|
|
|
bool hasStructBuffCounter(const TType&) const;
|
2017-04-10 14:19:21 +00:00
|
|
|
|
|
|
|
// Finalization step: remove unused buffer blocks from linkage (we don't know until the
|
|
|
|
// shader is entirely compiled)
|
|
|
|
void removeUnusedStructBufferCounters();
|
HLSL: handle multiple clip/cull semantic IDs
HLSL allows several variables to be declared. There are packing rules involved:
e.g, a float3 and a float1 can be packed into a single array[4], while for a
float3 and another float3, the second one will skip the third array entry to
avoid straddling
This is implements that ability. Because there can be multiple variables involved,
and the final output array will often be a different type altogether (to fuse
the values into a single destination), a new variable is synthesized, unlike the prior
clip/cull support which used the declared variable. The new variable name is
taken from one of the declared ones, so the old tests are unchanged.
Several new tests are added to test various packing scenarios.
Only two semantic IDs are supported: 0, and 1, per HLSL rules. This is
encapsulated in
static const int maxClipCullRegs = 2;
and the algorithm (probably :) ) generalizes to larger values, although there
are a few issues around how HLSL would pack (e.g, would 4 scalars be packed into
a single HLSL float4 out reg? Probably, and this algorithm assumes so).
2017-07-05 17:33:06 +00:00
|
|
|
|
|
|
|
static bool isClipOrCullDistance(TBuiltInVariable);
|
|
|
|
static bool isClipOrCullDistance(const TQualifier& qual) { return isClipOrCullDistance(qual.builtIn); }
|
|
|
|
static bool isClipOrCullDistance(const TType& type) { return isClipOrCullDistance(type.getQualifier()); }
|
2017-04-10 14:19:21 +00:00
|
|
|
|
2017-09-13 14:44:39 +00:00
|
|
|
// Find the patch constant function (issues error, returns nullptr if not found)
|
|
|
|
const TFunction* findPatchConstantFunction(const TSourceLoc& loc);
|
|
|
|
|
2017-08-04 19:51:54 +00:00
|
|
|
// Pass through to base class after remembering built-in mappings.
|
Add basic HS/DS implementation.
This obsoletes WIP PR #704, which was built on the pre entry point wrapping master. New version
here uses entry point wrapping.
This is a limited implementation of tessellation shaders. In particular, the following are not functional,
and will be added as separate stages to reduce the size of each PR.
* patchconstantfunctions accepting per-control-point input values, such as
const OutputPatch <hs_out_t, 3> cpv are not implemented.
* patchconstantfunctions whose signature requires an aggregate input type such as
a structure containing builtin variables. Code to synthesize such calls is not
yet present.
These restrictions will be relaxed as soon as possible. Simple cases can compile now: see for example
Test/hulsl.hull.1.tesc - e.g, writing to inner and outer tessellation factors.
PCF invocation is synthesized as an entry point epilogue protected behind a barrier and a test on
invocation ID == 0. If there is an existing invocation ID variable it will be used, otherwise one is
added to the linkage. The PCF and the shader EP interfaces are unioned and builtins appearing in
the PCF but not the EP are also added to the linkage and synthesized as shader inputs.
Parameter matching to (eventually arbitrary) PCF signatures is by builtin variable type. Any user
variables in the PCF signature will result in an error. Overloaded PCF functions will also result in
an error.
[domain()], [partitioning()], [outputtopology()], [outputcontrolpoints()], and [patchconstantfunction()]
attributes to the shader entry point are in place, with the exception of the Pow2 partitioning mode.
2017-01-07 15:54:10 +00:00
|
|
|
using TParseContextBase::trackLinkage;
|
|
|
|
void trackLinkage(TSymbol& variable) override;
|
|
|
|
|
2017-01-08 21:54:48 +00:00
|
|
|
void finish() override; // post-processing
|
HLSL: inter-stage structure splitting.
This adds structure splitting, which among other things will enable GS support where input structs
are passed, and thus become input arrays of structs in the GS inputs. That is a common GS case.
The salient points of this PR are:
* Structure splitting has been changed from "always between stages" to "only into the VS and out of
the PS". It had previously happened between stages because it's not legal to pass a struct
containing a builtin IO variable.
* Structs passed between stages are now split into a struct containing ONLY user types, and a
collection of loose builtin IO variables, if any. The user-part is passed as a normal struct
between stages, which is valid SPIR-V now that the builtin IO is removed.
* Internal to the shader, a sanitized struct (with IO qualifiers removed) is used, so that e.g,
functions can work unmodified.
* If a builtin IO such as Position occurs in an arrayed struct, for example as an input to a GS,
the array reference is moved to the split-off loose variable, which is given the array dimension
itself.
When passing things around inside the shader, such as over a function call, the the original type
is used in a sanitized form that removes the builtIn qualifications and makes them temporaries.
This means internal function calls do not have to change. However, the type when returned from
the shader will be member-wise copied from the internal sanitized one to the external type.
The sanitized type is used in variable declarations.
When copying split types and unsplit, if a sub-struct contains only user variables, it is copied
as a single entity to avoid more AST verbosity.
Above strategy arrived at with talks with @johnkslang.
This is a big complex change. I'm inclined to leave it as a WIP until it can get some exposure to
real world cases.
2016-12-14 22:22:25 +00:00
|
|
|
|
2017-03-23 00:39:25 +00:00
|
|
|
// Linkage symbol helpers
|
2017-08-04 19:51:54 +00:00
|
|
|
TIntermSymbol* findTessLinkageSymbol(TBuiltInVariable biType) const;
|
2017-03-23 00:39:25 +00:00
|
|
|
|
2016-03-13 03:11:22 +00:00
|
|
|
// Current state of parsing
|
2016-09-20 19:22:58 +00:00
|
|
|
int annotationNestingLevel; // 0 if outside all annotations
|
2016-03-13 03:11:22 +00:00
|
|
|
|
|
|
|
HlslParseContext(HlslParseContext&);
|
|
|
|
HlslParseContext& operator=(HlslParseContext&);
|
|
|
|
|
|
|
|
static const int maxSamplerIndex = EsdNumDims * (EbtNumTypes * (2 * 2 * 2)); // see computeSamplerTypeIndex()
|
|
|
|
TQualifier globalBufferDefaults;
|
|
|
|
TQualifier globalUniformDefaults;
|
|
|
|
TQualifier globalInputDefaults;
|
|
|
|
TQualifier globalOutputDefaults;
|
|
|
|
TString currentCaller; // name of last function body entered (not valid when at global scope)
|
|
|
|
TIdSetType inductiveLoopIds;
|
|
|
|
TVector<TIntermTyped*> needsIndexLimitationChecking;
|
|
|
|
|
|
|
|
//
|
|
|
|
// Geometry shader input arrays:
|
|
|
|
// - array sizing is based on input primitive and/or explicit size
|
|
|
|
//
|
|
|
|
// Tessellation control output arrays:
|
|
|
|
// - array sizing is based on output layout(vertices=...) and/or explicit size
|
|
|
|
//
|
|
|
|
// Both:
|
|
|
|
// - array sizing is retroactive
|
|
|
|
// - built-in block redeclarations interact with this
|
|
|
|
//
|
|
|
|
// Design:
|
|
|
|
// - use a per-context "resize-list", a list of symbols whose array sizes
|
|
|
|
// can be fixed
|
|
|
|
//
|
|
|
|
// - the resize-list starts empty at beginning of user-shader compilation, it does
|
|
|
|
// not have built-ins in it
|
|
|
|
//
|
|
|
|
// - on built-in array use: copyUp() symbol and add it to the resize-list
|
|
|
|
//
|
|
|
|
// - on user array declaration: add it to the resize-list
|
|
|
|
//
|
|
|
|
// - on block redeclaration: copyUp() symbol and add it to the resize-list
|
|
|
|
// * note, that appropriately gives an error if redeclaring a block that
|
|
|
|
// was already used and hence already copied-up
|
|
|
|
//
|
2017-01-06 07:34:48 +00:00
|
|
|
// - on seeing a layout declaration that sizes the array, fix everything in the
|
2016-03-13 03:11:22 +00:00
|
|
|
// resize-list, giving errors for mismatch
|
|
|
|
//
|
|
|
|
// - on seeing an array size declaration, give errors on mismatch between it and previous
|
|
|
|
// array-sizing declarations
|
|
|
|
//
|
|
|
|
TVector<TSymbol*> ioArraySymbolResizeList;
|
2016-09-09 22:32:09 +00:00
|
|
|
|
HLSL: Recursive composite flattening
This PR implements recursive type flattening. For example, an array of structs of other structs
can be flattened to individual member variables at the shader interface.
This is sufficient for many purposes, e.g, uniforms containing opaque types, but is not sufficient
for geometry shader arrayed inputs. That will be handled separately with structure splitting,
which is not implemented by this PR. In the meantime, that case is detected and triggers an error.
The recursive flattening extends the following three aspects of single-level flattening:
- Flattening of structures to individual members with names such as "foo[0].samp[1]";
- Turning constant references to the nested composite type into a reference to a particular
flattened member.
- Shadow copies between arrays of flattened members and the nested composite type.
Previous single-level flattening only flattened at the shader interface, and that is unchanged by this PR.
Internally, shadow copies are, such as if the type is passed to a function.
Also, the reasons for flattening are unchanged. Uniforms containing opaque types, and interface struct
types are flattened. (The latter will change with structure splitting).
One existing test changes: hlsl.structin.vert, which did in fact contain a nested composite type to be
flattened.
Two new tests are added: hlsl.structarray.flatten.frag, and hlsl.structarray.flatten.geom (currently
issues an error until type splitting is online).
The process of arriving at the individual member from chained postfix expressions is more complex than
it was with one level. See large-ish comment above HlslParseContext::flatten() for details.
2016-11-29 00:09:54 +00:00
|
|
|
TMap<int, TFlattenData> flattenMap;
|
|
|
|
|
2017-02-06 03:27:30 +00:00
|
|
|
// IO-type map. Maps a pure symbol-table form of a structure-member list into
|
|
|
|
// each of the (up to) three kinds of IO, as each as different allowed decorations,
|
|
|
|
// but HLSL allows mixing all in the same structure.
|
|
|
|
struct tIoKinds {
|
|
|
|
TTypeList* input;
|
|
|
|
TTypeList* output;
|
|
|
|
TTypeList* uniform;
|
|
|
|
};
|
|
|
|
TMap<const TTypeList*, tIoKinds> ioTypeMap;
|
HLSL: inter-stage structure splitting.
This adds structure splitting, which among other things will enable GS support where input structs
are passed, and thus become input arrays of structs in the GS inputs. That is a common GS case.
The salient points of this PR are:
* Structure splitting has been changed from "always between stages" to "only into the VS and out of
the PS". It had previously happened between stages because it's not legal to pass a struct
containing a builtin IO variable.
* Structs passed between stages are now split into a struct containing ONLY user types, and a
collection of loose builtin IO variables, if any. The user-part is passed as a normal struct
between stages, which is valid SPIR-V now that the builtin IO is removed.
* Internal to the shader, a sanitized struct (with IO qualifiers removed) is used, so that e.g,
functions can work unmodified.
* If a builtin IO such as Position occurs in an arrayed struct, for example as an input to a GS,
the array reference is moved to the split-off loose variable, which is given the array dimension
itself.
When passing things around inside the shader, such as over a function call, the the original type
is used in a sanitized form that removes the builtIn qualifications and makes them temporaries.
This means internal function calls do not have to change. However, the type when returned from
the shader will be member-wise copied from the internal sanitized one to the external type.
The sanitized type is used in variable declarations.
When copying split types and unsplit, if a sub-struct contains only user variables, it is copied
as a single entity to avoid more AST verbosity.
Above strategy arrived at with talks with @johnkslang.
This is a big complex change. I'm inclined to leave it as a WIP until it can get some exposure to
real world cases.
2016-12-14 22:22:25 +00:00
|
|
|
|
|
|
|
// Structure splitting data:
|
2017-08-07 01:42:42 +00:00
|
|
|
TMap<int, TVariable*> splitNonIoVars; // variables with the built-in interstage IO removed, indexed by unique ID.
|
2016-12-19 22:48:01 +00:00
|
|
|
|
2017-02-24 01:04:12 +00:00
|
|
|
// Structuredbuffer shared types. Typically there are only a few.
|
|
|
|
TVector<TType*> structBufferTypes;
|
HLSL: add methods to track user structure in texture return type.
Some languages allow a restricted set of user structure types returned from texture sampling
operations. Restrictions include the total vector size of all components may not exceed 4,
and the basic types of all members must be identical.
This adds underpinnings for that ability. Because storing a whole TType or even a simple
TTypeList in the TSampler would be expensive, the structure definition is held in a
table outside the TType. The TSampler contains a small bitfield index, currently 4 bits
to support up to 15 separate texture template structure types, but that can be adjusted
up or down. Vector returns are handled as before.
There are abstraction methods accepting and returning a TType (such as may have been parsed
from a grammar). The new methods will accept a texture template type and set the
sampler to the structure if possible, checking a range of error conditions such as whether
the total structure vector components exceed 4, or whether their basic types differe, or
whether the struct contains non-vector-or-scalar members. Another query returns the
appropriate TType for the sampler.
High level summary of design:
In the TSampler, this holds an index into the texture structure return type table:
unsigned int structReturnIndex : structReturnIndexBits;
These are the methods to set or get the return type from the TSampler. They work for vector or structure returns, and potentially could be expanded to handle other things (small arrays?) if ever needed.
bool setTextureReturnType(TSampler& sampler, const TType& retType, const TSourceLoc& loc);
void getTextureReturnType(const TSampler& sampler, const TType& retType, const TSourceLoc& loc) const;
The ``convertReturn`` lambda in ``HlslParseContext::decomposeSampleMethods`` is greatly expanded to know how to copy a vec4 sample return to whatever the structure type should be. This is a little awkward since it involves introducing a comma expression to return the proper aggregate value after a set of memberwise copies.
2017-07-31 19:41:42 +00:00
|
|
|
|
|
|
|
// This tracks texture sample user structure return types. Only a limited number are supported, as
|
|
|
|
// may fit in TSampler::structReturnIndex.
|
|
|
|
TVector<TTypeList*> textureReturnStruct;
|
2017-04-10 14:19:21 +00:00
|
|
|
|
|
|
|
TMap<TString, bool> structBufferCounter;
|
2017-02-24 01:04:12 +00:00
|
|
|
|
2017-08-04 19:51:54 +00:00
|
|
|
// The built-in interstage IO map considers e.g, EvqPosition on input and output separately, so that we
|
2016-12-19 22:48:01 +00:00
|
|
|
// can build the linkage correctly if position appears on both sides. Otherwise, multiple positions
|
|
|
|
// are considered identical.
|
|
|
|
struct tInterstageIoData {
|
Add basic HS/DS implementation.
This obsoletes WIP PR #704, which was built on the pre entry point wrapping master. New version
here uses entry point wrapping.
This is a limited implementation of tessellation shaders. In particular, the following are not functional,
and will be added as separate stages to reduce the size of each PR.
* patchconstantfunctions accepting per-control-point input values, such as
const OutputPatch <hs_out_t, 3> cpv are not implemented.
* patchconstantfunctions whose signature requires an aggregate input type such as
a structure containing builtin variables. Code to synthesize such calls is not
yet present.
These restrictions will be relaxed as soon as possible. Simple cases can compile now: see for example
Test/hulsl.hull.1.tesc - e.g, writing to inner and outer tessellation factors.
PCF invocation is synthesized as an entry point epilogue protected behind a barrier and a test on
invocation ID == 0. If there is an existing invocation ID variable it will be used, otherwise one is
added to the linkage. The PCF and the shader EP interfaces are unioned and builtins appearing in
the PCF but not the EP are also added to the linkage and synthesized as shader inputs.
Parameter matching to (eventually arbitrary) PCF signatures is by builtin variable type. Any user
variables in the PCF signature will result in an error. Overloaded PCF functions will also result in
an error.
[domain()], [partitioning()], [outputtopology()], [outputcontrolpoints()], and [patchconstantfunction()]
attributes to the shader entry point are in place, with the exception of the Pow2 partitioning mode.
2017-01-07 15:54:10 +00:00
|
|
|
tInterstageIoData(TBuiltInVariable bi, TStorageQualifier q) :
|
|
|
|
builtIn(bi), storage(q) { }
|
|
|
|
|
2016-12-19 22:48:01 +00:00
|
|
|
TBuiltInVariable builtIn;
|
|
|
|
TStorageQualifier storage;
|
|
|
|
|
|
|
|
// ordering for maps
|
|
|
|
bool operator<(const tInterstageIoData d) const {
|
|
|
|
return (builtIn != d.builtIn) ? (builtIn < d.builtIn) : (storage < d.storage);
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2017-08-04 19:51:54 +00:00
|
|
|
TMap<tInterstageIoData, TVariable*> splitBuiltIns; // split built-ins, indexed by built-in type.
|
2017-09-13 14:44:39 +00:00
|
|
|
TVariable* inputPatch; // input patch is special for PCF: it's the only non-builtin PCF input,
|
|
|
|
// and is handled as a pseudo-builtin.
|
2016-12-19 22:48:01 +00:00
|
|
|
|
2016-09-16 07:44:43 +00:00
|
|
|
unsigned int nextInLocation;
|
|
|
|
unsigned int nextOutLocation;
|
2016-10-31 21:13:43 +00:00
|
|
|
|
Add basic HS/DS implementation.
This obsoletes WIP PR #704, which was built on the pre entry point wrapping master. New version
here uses entry point wrapping.
This is a limited implementation of tessellation shaders. In particular, the following are not functional,
and will be added as separate stages to reduce the size of each PR.
* patchconstantfunctions accepting per-control-point input values, such as
const OutputPatch <hs_out_t, 3> cpv are not implemented.
* patchconstantfunctions whose signature requires an aggregate input type such as
a structure containing builtin variables. Code to synthesize such calls is not
yet present.
These restrictions will be relaxed as soon as possible. Simple cases can compile now: see for example
Test/hulsl.hull.1.tesc - e.g, writing to inner and outer tessellation factors.
PCF invocation is synthesized as an entry point epilogue protected behind a barrier and a test on
invocation ID == 0. If there is an existing invocation ID variable it will be used, otherwise one is
added to the linkage. The PCF and the shader EP interfaces are unioned and builtins appearing in
the PCF but not the EP are also added to the linkage and synthesized as shader inputs.
Parameter matching to (eventually arbitrary) PCF signatures is by builtin variable type. Any user
variables in the PCF signature will result in an error. Overloaded PCF functions will also result in
an error.
[domain()], [partitioning()], [outputtopology()], [outputcontrolpoints()], and [patchconstantfunction()]
attributes to the shader entry point are in place, with the exception of the Pow2 partitioning mode.
2017-01-07 15:54:10 +00:00
|
|
|
TFunction* entryPointFunction;
|
|
|
|
TIntermNode* entryPointFunctionBody;
|
|
|
|
|
|
|
|
TString patchConstantFunctionName; // hull shader patch constant function name, from function level attribute.
|
2017-08-04 19:51:54 +00:00
|
|
|
TMap<TBuiltInVariable, TSymbol*> builtInTessLinkageSymbols; // used for tessellation, finding declared built-ins
|
Add basic HS/DS implementation.
This obsoletes WIP PR #704, which was built on the pre entry point wrapping master. New version
here uses entry point wrapping.
This is a limited implementation of tessellation shaders. In particular, the following are not functional,
and will be added as separate stages to reduce the size of each PR.
* patchconstantfunctions accepting per-control-point input values, such as
const OutputPatch <hs_out_t, 3> cpv are not implemented.
* patchconstantfunctions whose signature requires an aggregate input type such as
a structure containing builtin variables. Code to synthesize such calls is not
yet present.
These restrictions will be relaxed as soon as possible. Simple cases can compile now: see for example
Test/hulsl.hull.1.tesc - e.g, writing to inner and outer tessellation factors.
PCF invocation is synthesized as an entry point epilogue protected behind a barrier and a test on
invocation ID == 0. If there is an existing invocation ID variable it will be used, otherwise one is
added to the linkage. The PCF and the shader EP interfaces are unioned and builtins appearing in
the PCF but not the EP are also added to the linkage and synthesized as shader inputs.
Parameter matching to (eventually arbitrary) PCF signatures is by builtin variable type. Any user
variables in the PCF signature will result in an error. Overloaded PCF functions will also result in
an error.
[domain()], [partitioning()], [outputtopology()], [outputcontrolpoints()], and [patchconstantfunction()]
attributes to the shader entry point are in place, with the exception of the Pow2 partitioning mode.
2017-01-07 15:54:10 +00:00
|
|
|
|
2017-03-22 05:56:40 +00:00
|
|
|
TVector<TString> currentTypePrefix; // current scoping prefix for nested structures
|
|
|
|
TVector<TVariable*> implicitThisStack; // currently active 'this' variables for nested structures
|
2017-03-30 02:01:13 +00:00
|
|
|
|
|
|
|
TVariable* gsStreamOutput; // geometry shader stream outputs, for emit (Append method)
|
2017-05-12 23:14:31 +00:00
|
|
|
|
2017-08-28 20:02:19 +00:00
|
|
|
TVariable* clipDistanceOutput; // synthesized clip distance out variable (shader might have >1)
|
|
|
|
TVariable* cullDistanceOutput; // synthesized cull distance out variable (shader might have >1)
|
|
|
|
TVariable* clipDistanceInput; // synthesized clip distance in variable (shader might have >1)
|
|
|
|
TVariable* cullDistanceInput; // synthesized cull distance in variable (shader might have >1)
|
HLSL: handle multiple clip/cull semantic IDs
HLSL allows several variables to be declared. There are packing rules involved:
e.g, a float3 and a float1 can be packed into a single array[4], while for a
float3 and another float3, the second one will skip the third array entry to
avoid straddling
This is implements that ability. Because there can be multiple variables involved,
and the final output array will often be a different type altogether (to fuse
the values into a single destination), a new variable is synthesized, unlike the prior
clip/cull support which used the declared variable. The new variable name is
taken from one of the declared ones, so the old tests are unchanged.
Several new tests are added to test various packing scenarios.
Only two semantic IDs are supported: 0, and 1, per HLSL rules. This is
encapsulated in
static const int maxClipCullRegs = 2;
and the algorithm (probably :) ) generalizes to larger values, although there
are a few issues around how HLSL would pack (e.g, would 4 scalars be packed into
a single HLSL float4 out reg? Probably, and this algorithm assumes so).
2017-07-05 17:33:06 +00:00
|
|
|
|
|
|
|
static const int maxClipCullRegs = 2;
|
2017-08-28 20:02:19 +00:00
|
|
|
std::array<int, maxClipCullRegs> clipSemanticNSizeIn; // vector, indexed by clip semantic ID
|
|
|
|
std::array<int, maxClipCullRegs> cullSemanticNSizeIn; // vector, indexed by cull semantic ID
|
|
|
|
std::array<int, maxClipCullRegs> clipSemanticNSizeOut; // vector, indexed by clip semantic ID
|
|
|
|
std::array<int, maxClipCullRegs> cullSemanticNSizeOut; // vector, indexed by cull semantic ID
|
HLSL: handle multiple clip/cull semantic IDs
HLSL allows several variables to be declared. There are packing rules involved:
e.g, a float3 and a float1 can be packed into a single array[4], while for a
float3 and another float3, the second one will skip the third array entry to
avoid straddling
This is implements that ability. Because there can be multiple variables involved,
and the final output array will often be a different type altogether (to fuse
the values into a single destination), a new variable is synthesized, unlike the prior
clip/cull support which used the declared variable. The new variable name is
taken from one of the declared ones, so the old tests are unchanged.
Several new tests are added to test various packing scenarios.
Only two semantic IDs are supported: 0, and 1, per HLSL rules. This is
encapsulated in
static const int maxClipCullRegs = 2;
and the algorithm (probably :) ) generalizes to larger values, although there
are a few issues around how HLSL would pack (e.g, would 4 scalars be packed into
a single HLSL float4 out reg? Probably, and this algorithm assumes so).
2017-07-05 17:33:06 +00:00
|
|
|
|
2017-05-12 23:14:31 +00:00
|
|
|
// This tracks the first (mip level) argument to the .mips[][] operator. Since this can be nested as
|
|
|
|
// in tx.mips[tx.mips[0][1].x][2], we need a stack. We also track the TSourceLoc for error reporting
|
|
|
|
// purposes.
|
|
|
|
struct tMipsOperatorData {
|
|
|
|
tMipsOperatorData(TSourceLoc l, TIntermTyped* m) : loc(l), mipLevel(m) { }
|
|
|
|
TSourceLoc loc;
|
|
|
|
TIntermTyped* mipLevel;
|
|
|
|
};
|
|
|
|
|
|
|
|
TVector<tMipsOperatorData> mipsOperatorMipArg;
|
2017-09-27 15:12:51 +00:00
|
|
|
|
2017-10-05 22:25:52 +00:00
|
|
|
// A texture object may be used with shadow and non-shadow samplers, but both may not be
|
|
|
|
// alive post-DCE in the same shader. We do not know at compilation time which are alive: that's
|
|
|
|
// only known post-DCE. If a texture is used both ways, we create two textures, and
|
|
|
|
// leave the elimiation of one to the optimizer. This maps the shader variant to
|
|
|
|
// the shadow variant.
|
|
|
|
//
|
|
|
|
// This can be removed if and when the texture shadow code in
|
|
|
|
// HlslParseContext::handleSamplerTextureCombine is removed.
|
|
|
|
struct tShadowTextureSymbols {
|
|
|
|
tShadowTextureSymbols() { symId.fill(-1); }
|
|
|
|
|
|
|
|
void set(bool shadow, int id) { symId[int(shadow)] = id; }
|
|
|
|
int get(bool shadow) const { return symId[int(shadow)]; }
|
|
|
|
|
|
|
|
// True if this texture has been seen with both shadow and non-shadow modes
|
|
|
|
bool overloaded() const { return symId[0] != -1 && symId[1] != -1; }
|
|
|
|
bool isShadowId(int id) const { return symId[1] == id; }
|
|
|
|
|
|
|
|
private:
|
|
|
|
std::array<int, 2> symId;
|
|
|
|
};
|
|
|
|
|
|
|
|
TMap<int, tShadowTextureSymbols*> textureShadowVariant;
|
2016-03-13 03:11:22 +00:00
|
|
|
};
|
|
|
|
|
2017-08-04 19:51:54 +00:00
|
|
|
// This is the prefix we use for built-in methods to avoid namespace collisions with
|
2017-03-20 00:12:37 +00:00
|
|
|
// global scope user functions.
|
|
|
|
// TODO: this would be better as a nonparseable character, but that would
|
|
|
|
// require changing the scanner.
|
|
|
|
#define BUILTIN_PREFIX "__BI_"
|
|
|
|
|
2016-03-13 03:11:22 +00:00
|
|
|
} // end namespace glslang
|
|
|
|
|
|
|
|
#endif // HLSL_PARSE_INCLUDED_
|