glslang: Sync with upstream 4fc7a33 for Vulkan SDK 1.2.131
Fixes #36888.
This commit is contained in:
1
thirdparty/glslang/SPIRV/GLSL.ext.EXT.h
vendored
1
thirdparty/glslang/SPIRV/GLSL.ext.EXT.h
vendored
@ -34,5 +34,6 @@ static const char* const E_SPV_EXT_shader_stencil_export = "SPV_EXT_shade
|
||||
static const char* const E_SPV_EXT_shader_viewport_index_layer = "SPV_EXT_shader_viewport_index_layer";
|
||||
static const char* const E_SPV_EXT_fragment_fully_covered = "SPV_EXT_fragment_fully_covered";
|
||||
static const char* const E_SPV_EXT_fragment_invocation_density = "SPV_EXT_fragment_invocation_density";
|
||||
static const char* const E_SPV_EXT_demote_to_helper_invocation = "SPV_EXT_demote_to_helper_invocation";
|
||||
|
||||
#endif // #ifndef GLSLextEXT_H
|
||||
|
||||
3
thirdparty/glslang/SPIRV/GLSL.ext.KHR.h
vendored
3
thirdparty/glslang/SPIRV/GLSL.ext.KHR.h
vendored
@ -41,5 +41,8 @@ static const char* const E_SPV_KHR_storage_buffer_storage_class = "SPV_KHR_stora
|
||||
static const char* const E_SPV_KHR_post_depth_coverage = "SPV_KHR_post_depth_coverage";
|
||||
static const char* const E_SPV_KHR_vulkan_memory_model = "SPV_KHR_vulkan_memory_model";
|
||||
static const char* const E_SPV_EXT_physical_storage_buffer = "SPV_EXT_physical_storage_buffer";
|
||||
static const char* const E_SPV_KHR_physical_storage_buffer = "SPV_KHR_physical_storage_buffer";
|
||||
static const char* const E_SPV_EXT_fragment_shader_interlock = "SPV_EXT_fragment_shader_interlock";
|
||||
static const char* const E_SPV_KHR_shader_clock = "SPV_KHR_shader_clock";
|
||||
|
||||
#endif // #ifndef GLSLextKHR_H
|
||||
|
||||
3
thirdparty/glslang/SPIRV/GLSL.ext.NV.h
vendored
3
thirdparty/glslang/SPIRV/GLSL.ext.NV.h
vendored
@ -75,4 +75,7 @@ const char* const E_SPV_NV_shading_rate = "SPV_NV_shading_rate";
|
||||
//SPV_NV_cooperative_matrix
|
||||
const char* const E_SPV_NV_cooperative_matrix = "SPV_NV_cooperative_matrix";
|
||||
|
||||
//SPV_NV_shader_sm_builtins
|
||||
const char* const E_SPV_NV_shader_sm_builtins = "SPV_NV_shader_sm_builtins";
|
||||
|
||||
#endif // #ifndef GLSLextNV_H
|
||||
|
||||
1689
thirdparty/glslang/SPIRV/GlslangToSpv.cpp
vendored
Normal file → Executable file
1689
thirdparty/glslang/SPIRV/GlslangToSpv.cpp
vendored
Normal file → Executable file
File diff suppressed because it is too large
Load Diff
2
thirdparty/glslang/SPIRV/GlslangToSpv.h
vendored
Normal file → Executable file
2
thirdparty/glslang/SPIRV/GlslangToSpv.h
vendored
Normal file → Executable file
@ -40,7 +40,7 @@
|
||||
#endif
|
||||
|
||||
#include "SpvTools.h"
|
||||
#include "../glslang/Include/intermediate.h"
|
||||
#include "glslang/Include/intermediate.h"
|
||||
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
44
thirdparty/glslang/SPIRV/InReadableOrder.cpp
vendored
44
thirdparty/glslang/SPIRV/InReadableOrder.cpp
vendored
@ -61,17 +61,22 @@ namespace {
|
||||
// Use by calling visit() on the root block.
|
||||
class ReadableOrderTraverser {
|
||||
public:
|
||||
explicit ReadableOrderTraverser(std::function<void(Block*)> callback) : callback_(callback) {}
|
||||
ReadableOrderTraverser(std::function<void(Block*, spv::ReachReason, Block*)> callback)
|
||||
: callback_(callback) {}
|
||||
// Visits the block if it hasn't been visited already and isn't currently
|
||||
// being delayed. Invokes callback(block), then descends into its
|
||||
// being delayed. Invokes callback(block, why, header), then descends into its
|
||||
// successors. Delays merge-block and continue-block processing until all
|
||||
// the branches have been completed.
|
||||
void visit(Block* block)
|
||||
// the branches have been completed. If |block| is an unreachable merge block or
|
||||
// an unreachable continue target, then |header| is the corresponding header block.
|
||||
void visit(Block* block, spv::ReachReason why, Block* header)
|
||||
{
|
||||
assert(block);
|
||||
if (why == spv::ReachViaControlFlow) {
|
||||
reachableViaControlFlow_.insert(block);
|
||||
}
|
||||
if (visited_.count(block) || delayed_.count(block))
|
||||
return;
|
||||
callback_(block);
|
||||
callback_(block, why, header);
|
||||
visited_.insert(block);
|
||||
Block* mergeBlock = nullptr;
|
||||
Block* continueBlock = nullptr;
|
||||
@ -87,27 +92,40 @@ public:
|
||||
delayed_.insert(continueBlock);
|
||||
}
|
||||
}
|
||||
const auto successors = block->getSuccessors();
|
||||
for (auto it = successors.cbegin(); it != successors.cend(); ++it)
|
||||
visit(*it);
|
||||
if (why == spv::ReachViaControlFlow) {
|
||||
const auto& successors = block->getSuccessors();
|
||||
for (auto it = successors.cbegin(); it != successors.cend(); ++it)
|
||||
visit(*it, why, nullptr);
|
||||
}
|
||||
if (continueBlock) {
|
||||
const spv::ReachReason continueWhy =
|
||||
(reachableViaControlFlow_.count(continueBlock) > 0)
|
||||
? spv::ReachViaControlFlow
|
||||
: spv::ReachDeadContinue;
|
||||
delayed_.erase(continueBlock);
|
||||
visit(continueBlock);
|
||||
visit(continueBlock, continueWhy, block);
|
||||
}
|
||||
if (mergeBlock) {
|
||||
const spv::ReachReason mergeWhy =
|
||||
(reachableViaControlFlow_.count(mergeBlock) > 0)
|
||||
? spv::ReachViaControlFlow
|
||||
: spv::ReachDeadMerge;
|
||||
delayed_.erase(mergeBlock);
|
||||
visit(mergeBlock);
|
||||
visit(mergeBlock, mergeWhy, block);
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
std::function<void(Block*)> callback_;
|
||||
std::function<void(Block*, spv::ReachReason, Block*)> callback_;
|
||||
// Whether a block has already been visited or is being delayed.
|
||||
std::unordered_set<Block *> visited_, delayed_;
|
||||
|
||||
// The set of blocks that actually are reached via control flow.
|
||||
std::unordered_set<Block *> reachableViaControlFlow_;
|
||||
};
|
||||
}
|
||||
|
||||
void spv::inReadableOrder(Block* root, std::function<void(Block*)> callback)
|
||||
void spv::inReadableOrder(Block* root, std::function<void(Block*, spv::ReachReason, Block*)> callback)
|
||||
{
|
||||
ReadableOrderTraverser(callback).visit(root);
|
||||
ReadableOrderTraverser(callback).visit(root, spv::ReachViaControlFlow, nullptr);
|
||||
}
|
||||
|
||||
4
thirdparty/glslang/SPIRV/Logger.cpp
vendored
4
thirdparty/glslang/SPIRV/Logger.cpp
vendored
@ -32,6 +32,8 @@
|
||||
// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
||||
// POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
#ifndef GLSLANG_WEB
|
||||
|
||||
#include "Logger.h"
|
||||
|
||||
#include <algorithm>
|
||||
@ -66,3 +68,5 @@ std::string SpvBuildLogger::getAllMessages() const {
|
||||
}
|
||||
|
||||
} // end spv namespace
|
||||
|
||||
#endif
|
||||
9
thirdparty/glslang/SPIRV/Logger.h
vendored
9
thirdparty/glslang/SPIRV/Logger.h
vendored
@ -46,6 +46,14 @@ class SpvBuildLogger {
|
||||
public:
|
||||
SpvBuildLogger() {}
|
||||
|
||||
#ifdef GLSLANG_WEB
|
||||
void tbdFunctionality(const std::string& f) { }
|
||||
void missingFunctionality(const std::string& f) { }
|
||||
void warning(const std::string& w) { }
|
||||
void error(const std::string& e) { errors.push_back(e); }
|
||||
std::string getAllMessages() { return ""; }
|
||||
#else
|
||||
|
||||
// Registers a TBD functionality.
|
||||
void tbdFunctionality(const std::string& f);
|
||||
// Registers a missing functionality.
|
||||
@ -59,6 +67,7 @@ public:
|
||||
// Returns all messages accumulated in the order of:
|
||||
// TBD functionalities, missing functionalities, warnings, errors.
|
||||
std::string getAllMessages() const;
|
||||
#endif
|
||||
|
||||
private:
|
||||
SpvBuildLogger(const SpvBuildLogger&);
|
||||
|
||||
2
thirdparty/glslang/SPIRV/SPVRemapper.h
vendored
2
thirdparty/glslang/SPIRV/SPVRemapper.h
vendored
@ -195,7 +195,7 @@ private:
|
||||
// Header access & set methods
|
||||
spirword_t magic() const { return spv[0]; } // return magic number
|
||||
spirword_t bound() const { return spv[3]; } // return Id bound from header
|
||||
spirword_t bound(spirword_t b) { return spv[3] = b; };
|
||||
spirword_t bound(spirword_t b) { return spv[3] = b; }
|
||||
spirword_t genmagic() const { return spv[2]; } // generator magic
|
||||
spirword_t genmagic(spirword_t m) { return spv[2] = m; }
|
||||
spirword_t schemaNum() const { return spv[4]; } // schema number from header
|
||||
|
||||
80
thirdparty/glslang/SPIRV/SpvBuilder.cpp
vendored
80
thirdparty/glslang/SPIRV/SpvBuilder.cpp
vendored
@ -46,7 +46,9 @@
|
||||
|
||||
#include "SpvBuilder.h"
|
||||
|
||||
#ifndef GLSLANG_WEB
|
||||
#include "hex_float.h"
|
||||
#endif
|
||||
|
||||
#ifndef _WIN32
|
||||
#include <cstdio>
|
||||
@ -230,6 +232,11 @@ Id Builder::makePointerFromForwardPointer(StorageClass storageClass, Id forwardP
|
||||
|
||||
Id Builder::makeIntegerType(int width, bool hasSign)
|
||||
{
|
||||
#ifdef GLSLANG_WEB
|
||||
assert(width == 32);
|
||||
width = 32;
|
||||
#endif
|
||||
|
||||
// try to find it
|
||||
Instruction* type;
|
||||
for (int t = 0; t < (int)groupedTypes[OpTypeInt].size(); ++t) {
|
||||
@ -265,6 +272,11 @@ Id Builder::makeIntegerType(int width, bool hasSign)
|
||||
|
||||
Id Builder::makeFloatType(int width)
|
||||
{
|
||||
#ifdef GLSLANG_WEB
|
||||
assert(width == 32);
|
||||
width = 32;
|
||||
#endif
|
||||
|
||||
// try to find it
|
||||
Instruction* type;
|
||||
for (int t = 0; t < (int)groupedTypes[OpTypeFloat].size(); ++t) {
|
||||
@ -516,6 +528,7 @@ Id Builder::makeImageType(Id sampledType, Dim dim, bool depth, bool arrayed, boo
|
||||
constantsTypesGlobals.push_back(std::unique_ptr<Instruction>(type));
|
||||
module.mapInstruction(type);
|
||||
|
||||
#ifndef GLSLANG_WEB
|
||||
// deal with capabilities
|
||||
switch (dim) {
|
||||
case DimBuffer:
|
||||
@ -561,6 +574,7 @@ Id Builder::makeImageType(Id sampledType, Dim dim, bool depth, bool arrayed, boo
|
||||
addCapability(CapabilityImageMSArray);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
return type->getResultId();
|
||||
}
|
||||
@ -586,7 +600,7 @@ Id Builder::makeSampledImageType(Id imageType)
|
||||
return type->getResultId();
|
||||
}
|
||||
|
||||
#ifdef NV_EXTENSIONS
|
||||
#ifndef GLSLANG_WEB
|
||||
Id Builder::makeAccelerationStructureNVType()
|
||||
{
|
||||
Instruction *type;
|
||||
@ -602,6 +616,7 @@ Id Builder::makeAccelerationStructureNVType()
|
||||
return type->getResultId();
|
||||
}
|
||||
#endif
|
||||
|
||||
Id Builder::getDerefTypeId(Id resultId) const
|
||||
{
|
||||
Id typeId = getTypeId(resultId);
|
||||
@ -939,6 +954,10 @@ Id Builder::makeFloatConstant(float f, bool specConstant)
|
||||
|
||||
Id Builder::makeDoubleConstant(double d, bool specConstant)
|
||||
{
|
||||
#ifdef GLSLANG_WEB
|
||||
assert(0);
|
||||
return NoResult;
|
||||
#else
|
||||
Op opcode = specConstant ? OpSpecConstant : OpConstant;
|
||||
Id typeId = makeFloatType(64);
|
||||
union { double db; unsigned long long ull; } u;
|
||||
@ -963,10 +982,15 @@ Id Builder::makeDoubleConstant(double d, bool specConstant)
|
||||
module.mapInstruction(c);
|
||||
|
||||
return c->getResultId();
|
||||
#endif
|
||||
}
|
||||
|
||||
Id Builder::makeFloat16Constant(float f16, bool specConstant)
|
||||
{
|
||||
#ifdef GLSLANG_WEB
|
||||
assert(0);
|
||||
return NoResult;
|
||||
#else
|
||||
Op opcode = specConstant ? OpSpecConstant : OpConstant;
|
||||
Id typeId = makeFloatType(16);
|
||||
|
||||
@ -991,25 +1015,33 @@ Id Builder::makeFloat16Constant(float f16, bool specConstant)
|
||||
module.mapInstruction(c);
|
||||
|
||||
return c->getResultId();
|
||||
#endif
|
||||
}
|
||||
|
||||
Id Builder::makeFpConstant(Id type, double d, bool specConstant)
|
||||
{
|
||||
assert(isFloatType(type));
|
||||
#ifdef GLSLANG_WEB
|
||||
const int width = 32;
|
||||
assert(width == getScalarTypeWidth(type));
|
||||
#else
|
||||
const int width = getScalarTypeWidth(type);
|
||||
#endif
|
||||
|
||||
switch (getScalarTypeWidth(type)) {
|
||||
case 16:
|
||||
return makeFloat16Constant((float)d, specConstant);
|
||||
case 32:
|
||||
return makeFloatConstant((float)d, specConstant);
|
||||
case 64:
|
||||
return makeDoubleConstant(d, specConstant);
|
||||
default:
|
||||
break;
|
||||
}
|
||||
assert(isFloatType(type));
|
||||
|
||||
assert(false);
|
||||
return NoResult;
|
||||
switch (width) {
|
||||
case 16:
|
||||
return makeFloat16Constant((float)d, specConstant);
|
||||
case 32:
|
||||
return makeFloatConstant((float)d, specConstant);
|
||||
case 64:
|
||||
return makeDoubleConstant(d, specConstant);
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
assert(false);
|
||||
return NoResult;
|
||||
}
|
||||
|
||||
Id Builder::findCompositeConstant(Op typeClass, Id typeId, const std::vector<Id>& comps)
|
||||
@ -1825,7 +1857,7 @@ Id Builder::createTextureCall(Decoration precision, Id resultType, bool sparse,
|
||||
if (parameters.component != NoResult)
|
||||
texArgs[numArgs++] = parameters.component;
|
||||
|
||||
#ifdef NV_EXTENSIONS
|
||||
#ifndef GLSLANG_WEB
|
||||
if (parameters.granularity != NoResult)
|
||||
texArgs[numArgs++] = parameters.granularity;
|
||||
if (parameters.coarse != NoResult)
|
||||
@ -1872,6 +1904,7 @@ Id Builder::createTextureCall(Decoration precision, Id resultType, bool sparse,
|
||||
mask = (ImageOperandsMask)(mask | ImageOperandsConstOffsetsMask);
|
||||
texArgs[numArgs++] = parameters.offsets;
|
||||
}
|
||||
#ifndef GLSLANG_WEB
|
||||
if (parameters.sample) {
|
||||
mask = (ImageOperandsMask)(mask | ImageOperandsSampleMask);
|
||||
texArgs[numArgs++] = parameters.sample;
|
||||
@ -1889,6 +1922,7 @@ Id Builder::createTextureCall(Decoration precision, Id resultType, bool sparse,
|
||||
if (parameters.volatil) {
|
||||
mask = mask | ImageOperandsVolatileTexelKHRMask;
|
||||
}
|
||||
#endif
|
||||
mask = mask | signExtensionMask;
|
||||
if (mask == ImageOperandsMaskNone)
|
||||
--numArgs; // undo speculative reservation for the mask argument
|
||||
@ -1904,10 +1938,9 @@ Id Builder::createTextureCall(Decoration precision, Id resultType, bool sparse,
|
||||
opCode = OpImageSparseFetch;
|
||||
else
|
||||
opCode = OpImageFetch;
|
||||
#ifdef NV_EXTENSIONS
|
||||
#ifndef GLSLANG_WEB
|
||||
} else if (parameters.granularity && parameters.coarse) {
|
||||
opCode = OpImageSampleFootprintNV;
|
||||
#endif
|
||||
} else if (gather) {
|
||||
if (parameters.Dref)
|
||||
if (sparse)
|
||||
@ -1919,6 +1952,7 @@ Id Builder::createTextureCall(Decoration precision, Id resultType, bool sparse,
|
||||
opCode = OpImageSparseGather;
|
||||
else
|
||||
opCode = OpImageGather;
|
||||
#endif
|
||||
} else if (explicitLod) {
|
||||
if (parameters.Dref) {
|
||||
if (proj)
|
||||
@ -2067,11 +2101,7 @@ Id Builder::createTextureQueryCall(Op opCode, const TextureParameters& parameter
|
||||
break;
|
||||
}
|
||||
case OpImageQueryLod:
|
||||
#ifdef AMD_EXTENSIONS
|
||||
resultType = makeVectorType(getScalarTypeId(getTypeId(parameters.coords)), 2);
|
||||
#else
|
||||
resultType = makeVectorType(makeFloatType(32), 2);
|
||||
#endif
|
||||
break;
|
||||
case OpImageQueryLevels:
|
||||
case OpImageQuerySamples:
|
||||
@ -2089,6 +2119,7 @@ Id Builder::createTextureQueryCall(Op opCode, const TextureParameters& parameter
|
||||
if (parameters.lod)
|
||||
query->addIdOperand(parameters.lod);
|
||||
buildPoint->addInstruction(std::unique_ptr<Instruction>(query));
|
||||
addCapability(CapabilityImageQuery);
|
||||
|
||||
return query->getResultId();
|
||||
}
|
||||
@ -2282,7 +2313,12 @@ Id Builder::createMatrixConstructor(Decoration precision, const std::vector<Id>&
|
||||
int numRows = getTypeNumRows(resultTypeId);
|
||||
|
||||
Instruction* instr = module.getInstruction(componentTypeId);
|
||||
unsigned bitCount = instr->getImmediateOperand(0);
|
||||
#ifdef GLSLANG_WEB
|
||||
const unsigned bitCount = 32;
|
||||
assert(bitCount == instr->getImmediateOperand(0));
|
||||
#else
|
||||
const unsigned bitCount = instr->getImmediateOperand(0);
|
||||
#endif
|
||||
|
||||
// Optimize matrix constructed from a bigger matrix
|
||||
if (isMatrix(sources[0]) && getNumColumns(sources[0]) >= numCols && getNumRows(sources[0]) >= numRows) {
|
||||
|
||||
40
thirdparty/glslang/SPIRV/SpvBuilder.h
vendored
40
thirdparty/glslang/SPIRV/SpvBuilder.h
vendored
@ -67,6 +67,7 @@ typedef enum {
|
||||
Spv_1_2 = (1 << 16) | (2 << 8),
|
||||
Spv_1_3 = (1 << 16) | (3 << 8),
|
||||
Spv_1_4 = (1 << 16) | (4 << 8),
|
||||
Spv_1_5 = (1 << 16) | (5 << 8),
|
||||
} SpvVersion;
|
||||
|
||||
class Builder {
|
||||
@ -105,6 +106,20 @@ public:
|
||||
void addModuleProcessed(const std::string& p) { moduleProcesses.push_back(p.c_str()); }
|
||||
void setEmitOpLines() { emitOpLines = true; }
|
||||
void addExtension(const char* ext) { extensions.insert(ext); }
|
||||
void removeExtension(const char* ext)
|
||||
{
|
||||
extensions.erase(ext);
|
||||
}
|
||||
void addIncorporatedExtension(const char* ext, SpvVersion incorporatedVersion)
|
||||
{
|
||||
if (getSpvVersion() < static_cast<unsigned>(incorporatedVersion))
|
||||
addExtension(ext);
|
||||
}
|
||||
void promoteIncorporatedExtension(const char* baseExt, const char* promoExt, SpvVersion incorporatedVersion)
|
||||
{
|
||||
removeExtension(baseExt);
|
||||
addIncorporatedExtension(promoExt, incorporatedVersion);
|
||||
}
|
||||
void addInclude(const std::string& name, const std::string& text)
|
||||
{
|
||||
spv::Id incId = getStringId(name);
|
||||
@ -201,7 +216,11 @@ public:
|
||||
bool isMatrixType(Id typeId) const { return getTypeClass(typeId) == OpTypeMatrix; }
|
||||
bool isStructType(Id typeId) const { return getTypeClass(typeId) == OpTypeStruct; }
|
||||
bool isArrayType(Id typeId) const { return getTypeClass(typeId) == OpTypeArray; }
|
||||
#ifdef GLSLANG_WEB
|
||||
bool isCooperativeMatrixType(Id typeId)const { return false; }
|
||||
#else
|
||||
bool isCooperativeMatrixType(Id typeId)const { return getTypeClass(typeId) == OpTypeCooperativeMatrixNV; }
|
||||
#endif
|
||||
bool isAggregateType(Id typeId) const { return isArrayType(typeId) || isStructType(typeId) || isCooperativeMatrixType(typeId); }
|
||||
bool isImageType(Id typeId) const { return getTypeClass(typeId) == OpTypeImage; }
|
||||
bool isSamplerType(Id typeId) const { return getTypeClass(typeId) == OpTypeSampler; }
|
||||
@ -557,6 +576,14 @@ public:
|
||||
|
||||
// Accumulate whether anything in the chain of structures has coherent decorations.
|
||||
struct CoherentFlags {
|
||||
CoherentFlags() { clear(); }
|
||||
#ifdef GLSLANG_WEB
|
||||
void clear() { }
|
||||
bool isVolatile() const { return false; }
|
||||
CoherentFlags operator |=(const CoherentFlags &other) { return *this; }
|
||||
#else
|
||||
bool isVolatile() const { return volatil; }
|
||||
|
||||
unsigned coherent : 1;
|
||||
unsigned devicecoherent : 1;
|
||||
unsigned queuefamilycoherent : 1;
|
||||
@ -577,7 +604,6 @@ public:
|
||||
isImage = 0;
|
||||
}
|
||||
|
||||
CoherentFlags() { clear(); }
|
||||
CoherentFlags operator |=(const CoherentFlags &other) {
|
||||
coherent |= other.coherent;
|
||||
devicecoherent |= other.devicecoherent;
|
||||
@ -589,6 +615,7 @@ public:
|
||||
isImage |= other.isImage;
|
||||
return *this;
|
||||
}
|
||||
#endif
|
||||
};
|
||||
CoherentFlags coherentFlags;
|
||||
};
|
||||
@ -656,16 +683,21 @@ public:
|
||||
// based on the type of the base and the chain of dereferences.
|
||||
Id accessChainGetInferredType();
|
||||
|
||||
// Add capabilities, extensions, remove unneeded decorations, etc.,
|
||||
// Add capabilities, extensions, remove unneeded decorations, etc.,
|
||||
// based on the resulting SPIR-V.
|
||||
void postProcess();
|
||||
|
||||
// Prune unreachable blocks in the CFG and remove unneeded decorations.
|
||||
void postProcessCFG();
|
||||
|
||||
#ifndef GLSLANG_WEB
|
||||
// Add capabilities, extensions based on instructions in the module.
|
||||
void postProcessFeatures();
|
||||
// Hook to visit each instruction in a block in a function
|
||||
void postProcess(Instruction&);
|
||||
// Hook to visit each instruction in a reachable block in a function.
|
||||
void postProcessReachable(const Instruction&);
|
||||
// Hook to visit each non-32-bit sized float/int operation in a block.
|
||||
void postProcessType(const Instruction&, spv::Id typeId);
|
||||
#endif
|
||||
|
||||
void dump(std::vector<unsigned int>&) const;
|
||||
|
||||
|
||||
78
thirdparty/glslang/SPIRV/SpvPostProcess.cpp
vendored
78
thirdparty/glslang/SPIRV/SpvPostProcess.cpp
vendored
@ -39,6 +39,7 @@
|
||||
#include <cassert>
|
||||
#include <cstdlib>
|
||||
|
||||
#include <unordered_map>
|
||||
#include <unordered_set>
|
||||
#include <algorithm>
|
||||
|
||||
@ -51,16 +52,13 @@ namespace spv {
|
||||
#include "GLSL.std.450.h"
|
||||
#include "GLSL.ext.KHR.h"
|
||||
#include "GLSL.ext.EXT.h"
|
||||
#ifdef AMD_EXTENSIONS
|
||||
#include "GLSL.ext.AMD.h"
|
||||
#endif
|
||||
#ifdef NV_EXTENSIONS
|
||||
#include "GLSL.ext.NV.h"
|
||||
#endif
|
||||
}
|
||||
|
||||
namespace spv {
|
||||
|
||||
#ifndef GLSLANG_WEB
|
||||
// Hook to visit each operand type and result type of an instruction.
|
||||
// Will be called multiple times for one instruction, once for each typed
|
||||
// operand and the result.
|
||||
@ -160,7 +158,6 @@ void Builder::postProcessType(const Instruction& inst, Id typeId)
|
||||
}
|
||||
break;
|
||||
case OpExtInst:
|
||||
#if AMD_EXTENSIONS
|
||||
switch (inst.getImmediateOperand(1)) {
|
||||
case GLSLstd450Frexp:
|
||||
case GLSLstd450FrexpStruct:
|
||||
@ -176,7 +173,6 @@ void Builder::postProcessType(const Instruction& inst, Id typeId)
|
||||
default:
|
||||
break;
|
||||
}
|
||||
#endif
|
||||
break;
|
||||
default:
|
||||
if (basicTypeOp == OpTypeFloat && width == 16)
|
||||
@ -222,12 +218,10 @@ void Builder::postProcess(Instruction& inst)
|
||||
addCapability(CapabilityImageQuery);
|
||||
break;
|
||||
|
||||
#ifdef NV_EXTENSIONS
|
||||
case OpGroupNonUniformPartitionNV:
|
||||
addExtension(E_SPV_NV_shader_subgroup_partitioned);
|
||||
addCapability(CapabilityGroupNonUniformPartitionedNV);
|
||||
break;
|
||||
#endif
|
||||
|
||||
case OpLoad:
|
||||
case OpStore:
|
||||
@ -326,17 +320,16 @@ void Builder::postProcess(Instruction& inst)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Called for each instruction in a reachable block.
|
||||
void Builder::postProcessReachable(const Instruction&)
|
||||
{
|
||||
// did have code here, but questionable to do so without deleting the instructions
|
||||
}
|
||||
#endif
|
||||
|
||||
// comment in header
|
||||
void Builder::postProcess()
|
||||
void Builder::postProcessCFG()
|
||||
{
|
||||
// reachableBlocks is the set of blockss reached via control flow, or which are
|
||||
// unreachable continue targert or unreachable merge.
|
||||
std::unordered_set<const Block*> reachableBlocks;
|
||||
std::unordered_map<Block*, Block*> headerForUnreachableContinue;
|
||||
std::unordered_set<Block*> unreachableMerges;
|
||||
std::unordered_set<Id> unreachableDefinitions;
|
||||
// Collect IDs defined in unreachable blocks. For each function, label the
|
||||
// reachable blocks first. Then for each unreachable block, collect the
|
||||
@ -344,16 +337,41 @@ void Builder::postProcess()
|
||||
for (auto fi = module.getFunctions().cbegin(); fi != module.getFunctions().cend(); fi++) {
|
||||
Function* f = *fi;
|
||||
Block* entry = f->getEntryBlock();
|
||||
inReadableOrder(entry, [&reachableBlocks](const Block* b) { reachableBlocks.insert(b); });
|
||||
inReadableOrder(entry,
|
||||
[&reachableBlocks, &unreachableMerges, &headerForUnreachableContinue]
|
||||
(Block* b, ReachReason why, Block* header) {
|
||||
reachableBlocks.insert(b);
|
||||
if (why == ReachDeadContinue) headerForUnreachableContinue[b] = header;
|
||||
if (why == ReachDeadMerge) unreachableMerges.insert(b);
|
||||
});
|
||||
for (auto bi = f->getBlocks().cbegin(); bi != f->getBlocks().cend(); bi++) {
|
||||
Block* b = *bi;
|
||||
if (reachableBlocks.count(b) == 0) {
|
||||
for (auto ii = b->getInstructions().cbegin(); ii != b->getInstructions().cend(); ii++)
|
||||
if (unreachableMerges.count(b) != 0 || headerForUnreachableContinue.count(b) != 0) {
|
||||
auto ii = b->getInstructions().cbegin();
|
||||
++ii; // Keep potential decorations on the label.
|
||||
for (; ii != b->getInstructions().cend(); ++ii)
|
||||
unreachableDefinitions.insert(ii->get()->getResultId());
|
||||
} else if (reachableBlocks.count(b) == 0) {
|
||||
// The normal case for unreachable code. All definitions are considered dead.
|
||||
for (auto ii = b->getInstructions().cbegin(); ii != b->getInstructions().cend(); ++ii)
|
||||
unreachableDefinitions.insert(ii->get()->getResultId());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Modify unreachable merge blocks and unreachable continue targets.
|
||||
// Delete their contents.
|
||||
for (auto mergeIter = unreachableMerges.begin(); mergeIter != unreachableMerges.end(); ++mergeIter) {
|
||||
(*mergeIter)->rewriteAsCanonicalUnreachableMerge();
|
||||
}
|
||||
for (auto continueIter = headerForUnreachableContinue.begin();
|
||||
continueIter != headerForUnreachableContinue.end();
|
||||
++continueIter) {
|
||||
Block* continue_target = continueIter->first;
|
||||
Block* header = continueIter->second;
|
||||
continue_target->rewriteAsCanonicalUnreachableContinue(header);
|
||||
}
|
||||
|
||||
// Remove unneeded decorations, for unreachable instructions
|
||||
decorations.erase(std::remove_if(decorations.begin(), decorations.end(),
|
||||
[&unreachableDefinitions](std::unique_ptr<Instruction>& I) -> bool {
|
||||
@ -361,7 +379,11 @@ void Builder::postProcess()
|
||||
return unreachableDefinitions.count(decoration_id) != 0;
|
||||
}),
|
||||
decorations.end());
|
||||
}
|
||||
|
||||
#ifndef GLSLANG_WEB
|
||||
// comment in header
|
||||
void Builder::postProcessFeatures() {
|
||||
// Add per-instruction capabilities, extensions, etc.,
|
||||
|
||||
// Look for any 8/16 bit type in physical storage buffer class, and set the
|
||||
@ -371,24 +393,17 @@ void Builder::postProcess()
|
||||
Instruction* type = groupedTypes[OpTypePointer][t];
|
||||
if (type->getImmediateOperand(0) == (unsigned)StorageClassPhysicalStorageBufferEXT) {
|
||||
if (containsType(type->getIdOperand(1), OpTypeInt, 8)) {
|
||||
addExtension(spv::E_SPV_KHR_8bit_storage);
|
||||
addIncorporatedExtension(spv::E_SPV_KHR_8bit_storage, spv::Spv_1_5);
|
||||
addCapability(spv::CapabilityStorageBuffer8BitAccess);
|
||||
}
|
||||
if (containsType(type->getIdOperand(1), OpTypeInt, 16) ||
|
||||
containsType(type->getIdOperand(1), OpTypeFloat, 16)) {
|
||||
addExtension(spv::E_SPV_KHR_16bit_storage);
|
||||
addIncorporatedExtension(spv::E_SPV_KHR_16bit_storage, spv::Spv_1_3);
|
||||
addCapability(spv::CapabilityStorageBuffer16BitAccess);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// process all reachable instructions...
|
||||
for (auto bi = reachableBlocks.cbegin(); bi != reachableBlocks.cend(); ++bi) {
|
||||
const Block* block = *bi;
|
||||
const auto function = [this](const std::unique_ptr<Instruction>& inst) { postProcessReachable(*inst.get()); };
|
||||
std::for_each(block->getInstructions().begin(), block->getInstructions().end(), function);
|
||||
}
|
||||
|
||||
// process all block-contained instructions
|
||||
for (auto fi = module.getFunctions().cbegin(); fi != module.getFunctions().cend(); fi++) {
|
||||
Function* f = *fi;
|
||||
@ -422,5 +437,14 @@ void Builder::postProcess()
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
// comment in header
|
||||
void Builder::postProcess() {
|
||||
postProcessCFG();
|
||||
#ifndef GLSLANG_WEB
|
||||
postProcessFeatures();
|
||||
#endif
|
||||
}
|
||||
|
||||
}; // end spv namespace
|
||||
|
||||
8
thirdparty/glslang/SPIRV/SpvTools.cpp
vendored
8
thirdparty/glslang/SPIRV/SpvTools.cpp
vendored
@ -67,6 +67,8 @@ spv_target_env MapToSpirvToolsEnv(const SpvVersion& spvVersion, spv::SpvBuildLog
|
||||
logger->missingFunctionality("Target version for SPIRV-Tools validator");
|
||||
return spv_target_env::SPV_ENV_VULKAN_1_1;
|
||||
}
|
||||
case glslang::EShTargetVulkan_1_2:
|
||||
return spv_target_env::SPV_ENV_VULKAN_1_2;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
@ -103,7 +105,7 @@ void SpirvToolsDisassemble(std::ostream& out, const std::vector<unsigned int>& s
|
||||
|
||||
// Apply the SPIRV-Tools validator to generated SPIR-V.
|
||||
void SpirvToolsValidate(const glslang::TIntermediate& intermediate, std::vector<unsigned int>& spirv,
|
||||
spv::SpvBuildLogger* logger)
|
||||
spv::SpvBuildLogger* logger, bool prelegalization)
|
||||
{
|
||||
// validate
|
||||
spv_context context = spvContextCreate(MapToSpirvToolsEnv(intermediate.getSpv(), logger));
|
||||
@ -111,6 +113,7 @@ void SpirvToolsValidate(const glslang::TIntermediate& intermediate, std::vector<
|
||||
spv_diagnostic diagnostic = nullptr;
|
||||
spv_validator_options options = spvValidatorOptionsCreate();
|
||||
spvValidatorOptionsSetRelaxBlockLayout(options, intermediate.usingHlslOffsets());
|
||||
spvValidatorOptionsSetBeforeHlslLegalization(options, prelegalization);
|
||||
spvValidateWithOptions(context, options, &binary, &diagnostic);
|
||||
|
||||
// report
|
||||
@ -172,6 +175,7 @@ void SpirvToolsLegalize(const glslang::TIntermediate&, std::vector<unsigned int>
|
||||
if (options->generateDebugInfo) {
|
||||
optimizer.RegisterPass(spvtools::CreatePropagateLineInfoPass());
|
||||
}
|
||||
optimizer.RegisterPass(spvtools::CreateWrapOpKillPass());
|
||||
optimizer.RegisterPass(spvtools::CreateDeadBranchElimPass());
|
||||
optimizer.RegisterPass(spvtools::CreateMergeReturnPass());
|
||||
optimizer.RegisterPass(spvtools::CreateInlineExhaustivePass());
|
||||
@ -195,8 +199,6 @@ void SpirvToolsLegalize(const glslang::TIntermediate&, std::vector<unsigned int>
|
||||
optimizer.RegisterPass(spvtools::CreateDeadInsertElimPass());
|
||||
if (options->optimizeSize) {
|
||||
optimizer.RegisterPass(spvtools::CreateRedundancyEliminationPass());
|
||||
// TODO(greg-lunarg): Add this when AMD driver issues are resolved
|
||||
// optimizer.RegisterPass(CreateCommonUniformElimPass());
|
||||
}
|
||||
optimizer.RegisterPass(spvtools::CreateAggressiveDCEPass());
|
||||
optimizer.RegisterPass(spvtools::CreateCFGCleanupPass());
|
||||
|
||||
8
thirdparty/glslang/SPIRV/SpvTools.h
vendored
8
thirdparty/glslang/SPIRV/SpvTools.h
vendored
@ -41,10 +41,12 @@
|
||||
#ifndef GLSLANG_SPV_TOOLS_H
|
||||
#define GLSLANG_SPV_TOOLS_H
|
||||
|
||||
#ifdef ENABLE_OPT
|
||||
#include <vector>
|
||||
#include <ostream>
|
||||
#endif
|
||||
|
||||
#include "../glslang/MachineIndependent/localintermediate.h"
|
||||
#include "glslang/MachineIndependent/localintermediate.h"
|
||||
#include "Logger.h"
|
||||
|
||||
namespace glslang {
|
||||
@ -59,14 +61,14 @@ struct SpvOptions {
|
||||
bool validate;
|
||||
};
|
||||
|
||||
#if ENABLE_OPT
|
||||
#ifdef ENABLE_OPT
|
||||
|
||||
// Use the SPIRV-Tools disassembler to print SPIR-V.
|
||||
void SpirvToolsDisassemble(std::ostream& out, const std::vector<unsigned int>& spirv);
|
||||
|
||||
// Apply the SPIRV-Tools validator to generated SPIR-V.
|
||||
void SpirvToolsValidate(const glslang::TIntermediate& intermediate, std::vector<unsigned int>& spirv,
|
||||
spv::SpvBuildLogger*);
|
||||
spv::SpvBuildLogger*, bool prelegalization);
|
||||
|
||||
// Apply the SPIRV-Tools optimizer to generated SPIR-V, for the purpose of
|
||||
// legalizing HLSL SPIR-V.
|
||||
|
||||
43
thirdparty/glslang/SPIRV/disassemble.cpp
vendored
43
thirdparty/glslang/SPIRV/disassemble.cpp
vendored
@ -52,26 +52,16 @@ namespace spv {
|
||||
extern "C" {
|
||||
// Include C-based headers that don't have a namespace
|
||||
#include "GLSL.std.450.h"
|
||||
#ifdef AMD_EXTENSIONS
|
||||
#include "GLSL.ext.AMD.h"
|
||||
#endif
|
||||
|
||||
#ifdef NV_EXTENSIONS
|
||||
#include "GLSL.ext.NV.h"
|
||||
#endif
|
||||
}
|
||||
}
|
||||
const char* GlslStd450DebugNames[spv::GLSLstd450Count];
|
||||
|
||||
namespace spv {
|
||||
|
||||
#ifdef AMD_EXTENSIONS
|
||||
static const char* GLSLextAMDGetDebugNames(const char*, unsigned);
|
||||
#endif
|
||||
|
||||
#ifdef NV_EXTENSIONS
|
||||
static const char* GLSLextNVGetDebugNames(const char*, unsigned);
|
||||
#endif
|
||||
|
||||
static void Kill(std::ostream& out, const char* message)
|
||||
{
|
||||
@ -82,15 +72,8 @@ static void Kill(std::ostream& out, const char* message)
|
||||
// used to identify the extended instruction library imported when printing
|
||||
enum ExtInstSet {
|
||||
GLSL450Inst,
|
||||
|
||||
#ifdef AMD_EXTENSIONS
|
||||
GLSLextAMDInst,
|
||||
#endif
|
||||
|
||||
#ifdef NV_EXTENSIONS
|
||||
GLSLextNVInst,
|
||||
#endif
|
||||
|
||||
OpenCLExtInst,
|
||||
};
|
||||
|
||||
@ -499,37 +482,29 @@ void SpirvStream::disassembleInstruction(Id resultId, Id /*typeId*/, Op opCode,
|
||||
const char* name = idDescriptor[stream[word - 2]].c_str();
|
||||
if (0 == memcmp("OpenCL", name, 6)) {
|
||||
extInstSet = OpenCLExtInst;
|
||||
#ifdef AMD_EXTENSIONS
|
||||
} else if (strcmp(spv::E_SPV_AMD_shader_ballot, name) == 0 ||
|
||||
strcmp(spv::E_SPV_AMD_shader_trinary_minmax, name) == 0 ||
|
||||
strcmp(spv::E_SPV_AMD_shader_explicit_vertex_parameter, name) == 0 ||
|
||||
strcmp(spv::E_SPV_AMD_gcn_shader, name) == 0) {
|
||||
extInstSet = GLSLextAMDInst;
|
||||
#endif
|
||||
#ifdef NV_EXTENSIONS
|
||||
}else if (strcmp(spv::E_SPV_NV_sample_mask_override_coverage, name) == 0 ||
|
||||
} else if (strcmp(spv::E_SPV_NV_sample_mask_override_coverage, name) == 0 ||
|
||||
strcmp(spv::E_SPV_NV_geometry_shader_passthrough, name) == 0 ||
|
||||
strcmp(spv::E_SPV_NV_viewport_array2, name) == 0 ||
|
||||
strcmp(spv::E_SPV_NVX_multiview_per_view_attributes, name) == 0 ||
|
||||
strcmp(spv::E_SPV_NV_fragment_shader_barycentric, name) == 0 ||
|
||||
strcmp(spv::E_SPV_NV_mesh_shader, name) == 0) {
|
||||
extInstSet = GLSLextNVInst;
|
||||
#endif
|
||||
}
|
||||
unsigned entrypoint = stream[word - 1];
|
||||
if (extInstSet == GLSL450Inst) {
|
||||
if (entrypoint < GLSLstd450Count) {
|
||||
out << "(" << GlslStd450DebugNames[entrypoint] << ")";
|
||||
}
|
||||
#ifdef AMD_EXTENSIONS
|
||||
} else if (extInstSet == GLSLextAMDInst) {
|
||||
out << "(" << GLSLextAMDGetDebugNames(name, entrypoint) << ")";
|
||||
#endif
|
||||
#ifdef NV_EXTENSIONS
|
||||
}
|
||||
else if (extInstSet == GLSLextNVInst) {
|
||||
out << "(" << GLSLextNVGetDebugNames(name, entrypoint) << ")";
|
||||
#endif
|
||||
}
|
||||
}
|
||||
break;
|
||||
@ -648,9 +623,11 @@ static void GLSLstd450GetDebugNames(const char** names)
|
||||
names[GLSLstd450InterpolateAtCentroid] = "InterpolateAtCentroid";
|
||||
names[GLSLstd450InterpolateAtSample] = "InterpolateAtSample";
|
||||
names[GLSLstd450InterpolateAtOffset] = "InterpolateAtOffset";
|
||||
names[GLSLstd450NMin] = "NMin";
|
||||
names[GLSLstd450NMax] = "NMax";
|
||||
names[GLSLstd450NClamp] = "NClamp";
|
||||
}
|
||||
|
||||
#ifdef AMD_EXTENSIONS
|
||||
static const char* GLSLextAMDGetDebugNames(const char* name, unsigned entrypoint)
|
||||
{
|
||||
if (strcmp(name, spv::E_SPV_AMD_shader_ballot) == 0) {
|
||||
@ -692,18 +669,17 @@ static const char* GLSLextAMDGetDebugNames(const char* name, unsigned entrypoint
|
||||
|
||||
return "Bad";
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef NV_EXTENSIONS
|
||||
static const char* GLSLextNVGetDebugNames(const char* name, unsigned entrypoint)
|
||||
{
|
||||
if (strcmp(name, spv::E_SPV_NV_sample_mask_override_coverage) == 0 ||
|
||||
strcmp(name, spv::E_SPV_NV_geometry_shader_passthrough) == 0 ||
|
||||
strcmp(name, spv::E_ARB_shader_viewport_layer_array) == 0 ||
|
||||
strcmp(name, spv::E_SPV_NV_viewport_array2) == 0 ||
|
||||
strcmp(spv::E_SPV_NVX_multiview_per_view_attributes, name) == 0 ||
|
||||
strcmp(spv::E_SPV_NV_fragment_shader_barycentric, name) == 0 ||
|
||||
strcmp(name, spv::E_SPV_NV_mesh_shader) == 0) {
|
||||
strcmp(name, spv::E_SPV_NVX_multiview_per_view_attributes) == 0 ||
|
||||
strcmp(name, spv::E_SPV_NV_fragment_shader_barycentric) == 0 ||
|
||||
strcmp(name, spv::E_SPV_NV_mesh_shader) == 0 ||
|
||||
strcmp(name, spv::E_SPV_NV_shader_image_footprint) == 0) {
|
||||
switch (entrypoint) {
|
||||
// NV builtins
|
||||
case BuiltInViewportMaskNV: return "ViewportMaskNV";
|
||||
@ -729,6 +705,8 @@ static const char* GLSLextNVGetDebugNames(const char* name, unsigned entrypoint)
|
||||
case CapabilityPerViewAttributesNV: return "PerViewAttributesNV";
|
||||
case CapabilityFragmentBarycentricNV: return "FragmentBarycentricNV";
|
||||
case CapabilityMeshShadingNV: return "MeshShadingNV";
|
||||
case CapabilityImageFootprintNV: return "ImageFootprintNV";
|
||||
case CapabilitySampleMaskOverrideCoverageNV:return "SampleMaskOverrideCoverageNV";
|
||||
|
||||
// NV Decorations
|
||||
case DecorationOverrideCoverageNV: return "OverrideCoverageNV";
|
||||
@ -745,7 +723,6 @@ static const char* GLSLextNVGetDebugNames(const char* name, unsigned entrypoint)
|
||||
}
|
||||
return "Bad";
|
||||
}
|
||||
#endif
|
||||
|
||||
void Disassemble(std::ostream& out, const std::vector<unsigned int>& stream)
|
||||
{
|
||||
|
||||
117
thirdparty/glslang/SPIRV/doc.cpp
vendored
117
thirdparty/glslang/SPIRV/doc.cpp
vendored
@ -50,12 +50,8 @@ namespace spv {
|
||||
// Include C-based headers that don't have a namespace
|
||||
#include "GLSL.ext.KHR.h"
|
||||
#include "GLSL.ext.EXT.h"
|
||||
#ifdef AMD_EXTENSIONS
|
||||
#include "GLSL.ext.AMD.h"
|
||||
#endif
|
||||
#ifdef NV_EXTENSIONS
|
||||
#include "GLSL.ext.NV.h"
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
@ -98,22 +94,17 @@ const char* ExecutionModelString(int model)
|
||||
case 4: return "Fragment";
|
||||
case 5: return "GLCompute";
|
||||
case 6: return "Kernel";
|
||||
#ifdef NV_EXTENSIONS
|
||||
case ExecutionModelTaskNV: return "TaskNV";
|
||||
case ExecutionModelMeshNV: return "MeshNV";
|
||||
#endif
|
||||
|
||||
default: return "Bad";
|
||||
|
||||
#ifdef NV_EXTENSIONS
|
||||
case ExecutionModelRayGenerationNV: return "RayGenerationNV";
|
||||
case ExecutionModelIntersectionNV: return "IntersectionNV";
|
||||
case ExecutionModelAnyHitNV: return "AnyHitNV";
|
||||
case ExecutionModelClosestHitNV: return "ClosestHitNV";
|
||||
case ExecutionModelMissNV: return "MissNV";
|
||||
case ExecutionModelCallableNV: return "CallableNV";
|
||||
#endif
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
@ -183,13 +174,18 @@ const char* ExecutionModeString(int mode)
|
||||
|
||||
case 4446: return "PostDepthCoverage";
|
||||
|
||||
#ifdef NV_EXTENSIONS
|
||||
case ExecutionModeOutputLinesNV: return "OutputLinesNV";
|
||||
case ExecutionModeOutputPrimitivesNV: return "OutputPrimitivesNV";
|
||||
case ExecutionModeOutputTrianglesNV: return "OutputTrianglesNV";
|
||||
case ExecutionModeDerivativeGroupQuadsNV: return "DerivativeGroupQuadsNV";
|
||||
case ExecutionModeDerivativeGroupLinearNV: return "DerivativeGroupLinearNV";
|
||||
#endif
|
||||
|
||||
case ExecutionModePixelInterlockOrderedEXT: return "PixelInterlockOrderedEXT";
|
||||
case ExecutionModePixelInterlockUnorderedEXT: return "PixelInterlockUnorderedEXT";
|
||||
case ExecutionModeSampleInterlockOrderedEXT: return "SampleInterlockOrderedEXT";
|
||||
case ExecutionModeSampleInterlockUnorderedEXT: return "SampleInterlockUnorderedEXT";
|
||||
case ExecutionModeShadingRateInterlockOrderedEXT: return "ShadingRateInterlockOrderedEXT";
|
||||
case ExecutionModeShadingRateInterlockUnorderedEXT: return "ShadingRateInterlockUnorderedEXT";
|
||||
|
||||
case ExecutionModeCeiling:
|
||||
default: return "Bad";
|
||||
@ -213,14 +209,12 @@ const char* StorageClassString(int StorageClass)
|
||||
case 11: return "Image";
|
||||
case 12: return "StorageBuffer";
|
||||
|
||||
#ifdef NV_EXTENSIONS
|
||||
case StorageClassRayPayloadNV: return "RayPayloadNV";
|
||||
case StorageClassHitAttributeNV: return "HitAttributeNV";
|
||||
case StorageClassIncomingRayPayloadNV: return "IncomingRayPayloadNV";
|
||||
case StorageClassShaderRecordBufferNV: return "ShaderRecordBufferNV";
|
||||
case StorageClassCallableDataNV: return "CallableDataNV";
|
||||
case StorageClassIncomingCallableDataNV: return "IncomingCallableDataNV";
|
||||
#endif
|
||||
|
||||
case StorageClassPhysicalStorageBufferEXT: return "PhysicalStorageBufferEXT";
|
||||
|
||||
@ -282,10 +276,7 @@ const char* DecorationString(int decoration)
|
||||
case DecorationCeiling:
|
||||
default: return "Bad";
|
||||
|
||||
#ifdef AMD_EXTENSIONS
|
||||
case DecorationExplicitInterpAMD: return "ExplicitInterpAMD";
|
||||
#endif
|
||||
#ifdef NV_EXTENSIONS
|
||||
case DecorationOverrideCoverageNV: return "OverrideCoverageNV";
|
||||
case DecorationPassthroughNV: return "PassthroughNV";
|
||||
case DecorationViewportRelativeNV: return "ViewportRelativeNV";
|
||||
@ -294,7 +285,6 @@ const char* DecorationString(int decoration)
|
||||
case DecorationPerViewNV: return "PerViewNV";
|
||||
case DecorationPerTaskNV: return "PerTaskNV";
|
||||
case DecorationPerVertexNV: return "PerVertexNV";
|
||||
#endif
|
||||
|
||||
case DecorationNonUniformEXT: return "DecorationNonUniformEXT";
|
||||
case DecorationHlslCounterBufferGOOGLE: return "DecorationHlslCounterBufferGOOGLE";
|
||||
@ -364,7 +354,6 @@ const char* BuiltInString(int builtIn)
|
||||
case 4426: return "DrawIndex";
|
||||
case 5014: return "FragStencilRefEXT";
|
||||
|
||||
#ifdef AMD_EXTENSIONS
|
||||
case 4992: return "BaryCoordNoPerspAMD";
|
||||
case 4993: return "BaryCoordNoPerspCentroidAMD";
|
||||
case 4994: return "BaryCoordNoPerspSampleAMD";
|
||||
@ -372,9 +361,6 @@ const char* BuiltInString(int builtIn)
|
||||
case 4996: return "BaryCoordSmoothCentroidAMD";
|
||||
case 4997: return "BaryCoordSmoothSampleAMD";
|
||||
case 4998: return "BaryCoordPullModelAMD";
|
||||
#endif
|
||||
|
||||
#ifdef NV_EXTENSIONS
|
||||
case BuiltInLaunchIdNV: return "LaunchIdNV";
|
||||
case BuiltInLaunchSizeNV: return "LaunchSizeNV";
|
||||
case BuiltInWorldRayOriginNV: return "WorldRayOriginNV";
|
||||
@ -398,15 +384,12 @@ const char* BuiltInString(int builtIn)
|
||||
// case BuiltInInvocationsPerPixelNV: return "InvocationsPerPixelNV"; // superseded by BuiltInFragInvocationCountEXT
|
||||
case BuiltInBaryCoordNV: return "BaryCoordNV";
|
||||
case BuiltInBaryCoordNoPerspNV: return "BaryCoordNoPerspNV";
|
||||
#endif
|
||||
|
||||
case BuiltInFragSizeEXT: return "FragSizeEXT";
|
||||
case BuiltInFragInvocationCountEXT: return "FragInvocationCountEXT";
|
||||
|
||||
case 5264: return "FullyCoveredEXT";
|
||||
|
||||
|
||||
#ifdef NV_EXTENSIONS
|
||||
case BuiltInTaskCountNV: return "TaskCountNV";
|
||||
case BuiltInPrimitiveCountNV: return "PrimitiveCountNV";
|
||||
case BuiltInPrimitiveIndicesNV: return "PrimitiveIndicesNV";
|
||||
@ -415,7 +398,10 @@ const char* BuiltInString(int builtIn)
|
||||
case BuiltInLayerPerViewNV: return "LayerPerViewNV";
|
||||
case BuiltInMeshViewCountNV: return "MeshViewCountNV";
|
||||
case BuiltInMeshViewIndicesNV: return "MeshViewIndicesNV";
|
||||
#endif
|
||||
case BuiltInWarpsPerSMNV: return "WarpsPerSMNV";
|
||||
case BuiltInSMCountNV: return "SMCountNV";
|
||||
case BuiltInWarpIDNV: return "WarpIDNV";
|
||||
case BuiltInSMIDNV: return "SMIDNV";
|
||||
|
||||
default: return "Bad";
|
||||
}
|
||||
@ -770,11 +756,9 @@ const char* GroupOperationString(int gop)
|
||||
case GroupOperationInclusiveScan: return "InclusiveScan";
|
||||
case GroupOperationExclusiveScan: return "ExclusiveScan";
|
||||
case GroupOperationClusteredReduce: return "ClusteredReduce";
|
||||
#ifdef NV_EXTENSIONS
|
||||
case GroupOperationPartitionedReduceNV: return "PartitionedReduceNV";
|
||||
case GroupOperationPartitionedInclusiveScanNV: return "PartitionedInclusiveScanNV";
|
||||
case GroupOperationPartitionedExclusiveScanNV: return "PartitionedExclusiveScanNV";
|
||||
#endif
|
||||
|
||||
default: return "Bad";
|
||||
}
|
||||
@ -882,26 +866,23 @@ const char* CapabilityString(int info)
|
||||
case CapabilityStoragePushConstant16: return "StoragePushConstant16";
|
||||
case CapabilityStorageInputOutput16: return "StorageInputOutput16";
|
||||
|
||||
case CapabilityStorageBuffer8BitAccess: return "CapabilityStorageBuffer8BitAccess";
|
||||
case CapabilityUniformAndStorageBuffer8BitAccess: return "CapabilityUniformAndStorageBuffer8BitAccess";
|
||||
case CapabilityStoragePushConstant8: return "CapabilityStoragePushConstant8";
|
||||
case CapabilityStorageBuffer8BitAccess: return "StorageBuffer8BitAccess";
|
||||
case CapabilityUniformAndStorageBuffer8BitAccess: return "UniformAndStorageBuffer8BitAccess";
|
||||
case CapabilityStoragePushConstant8: return "StoragePushConstant8";
|
||||
|
||||
case CapabilityDeviceGroup: return "DeviceGroup";
|
||||
case CapabilityMultiView: return "MultiView";
|
||||
|
||||
case CapabilityStencilExportEXT: return "StencilExportEXT";
|
||||
|
||||
#ifdef AMD_EXTENSIONS
|
||||
case CapabilityFloat16ImageAMD: return "Float16ImageAMD";
|
||||
case CapabilityImageGatherBiasLodAMD: return "ImageGatherBiasLodAMD";
|
||||
case CapabilityFragmentMaskAMD: return "FragmentMaskAMD";
|
||||
case CapabilityImageReadWriteLodAMD: return "ImageReadWriteLodAMD";
|
||||
#endif
|
||||
|
||||
case CapabilityAtomicStorageOps: return "AtomicStorageOps";
|
||||
|
||||
case CapabilitySampleMaskPostDepthCoverage: return "SampleMaskPostDepthCoverage";
|
||||
#ifdef NV_EXTENSIONS
|
||||
case CapabilityGeometryShaderPassthroughNV: return "GeometryShaderPassthroughNV";
|
||||
case CapabilityShaderViewportIndexLayerNV: return "ShaderViewportIndexLayerNV";
|
||||
case CapabilityShaderViewportMaskNV: return "ShaderViewportMaskNV";
|
||||
@ -913,33 +894,44 @@ const char* CapabilityString(int info)
|
||||
case CapabilityComputeDerivativeGroupLinearNV: return "ComputeDerivativeGroupLinearNV";
|
||||
case CapabilityFragmentBarycentricNV: return "FragmentBarycentricNV";
|
||||
case CapabilityMeshShadingNV: return "MeshShadingNV";
|
||||
// case CapabilityShadingRateNV: return "ShadingRateNV"; // superseded by CapabilityFragmentDensityEXT
|
||||
#endif
|
||||
case CapabilityImageFootprintNV: return "ImageFootprintNV";
|
||||
// case CapabilityShadingRateNV: return "ShadingRateNV"; // superseded by FragmentDensityEXT
|
||||
case CapabilitySampleMaskOverrideCoverageNV: return "SampleMaskOverrideCoverageNV";
|
||||
case CapabilityFragmentDensityEXT: return "FragmentDensityEXT";
|
||||
|
||||
case CapabilityFragmentFullyCoveredEXT: return "FragmentFullyCoveredEXT";
|
||||
|
||||
case CapabilityShaderNonUniformEXT: return "CapabilityShaderNonUniformEXT";
|
||||
case CapabilityRuntimeDescriptorArrayEXT: return "CapabilityRuntimeDescriptorArrayEXT";
|
||||
case CapabilityInputAttachmentArrayDynamicIndexingEXT: return "CapabilityInputAttachmentArrayDynamicIndexingEXT";
|
||||
case CapabilityUniformTexelBufferArrayDynamicIndexingEXT: return "CapabilityUniformTexelBufferArrayDynamicIndexingEXT";
|
||||
case CapabilityStorageTexelBufferArrayDynamicIndexingEXT: return "CapabilityStorageTexelBufferArrayDynamicIndexingEXT";
|
||||
case CapabilityUniformBufferArrayNonUniformIndexingEXT: return "CapabilityUniformBufferArrayNonUniformIndexingEXT";
|
||||
case CapabilitySampledImageArrayNonUniformIndexingEXT: return "CapabilitySampledImageArrayNonUniformIndexingEXT";
|
||||
case CapabilityStorageBufferArrayNonUniformIndexingEXT: return "CapabilityStorageBufferArrayNonUniformIndexingEXT";
|
||||
case CapabilityStorageImageArrayNonUniformIndexingEXT: return "CapabilityStorageImageArrayNonUniformIndexingEXT";
|
||||
case CapabilityInputAttachmentArrayNonUniformIndexingEXT: return "CapabilityInputAttachmentArrayNonUniformIndexingEXT";
|
||||
case CapabilityUniformTexelBufferArrayNonUniformIndexingEXT: return "CapabilityUniformTexelBufferArrayNonUniformIndexingEXT";
|
||||
case CapabilityStorageTexelBufferArrayNonUniformIndexingEXT: return "CapabilityStorageTexelBufferArrayNonUniformIndexingEXT";
|
||||
case CapabilityShaderNonUniformEXT: return "ShaderNonUniformEXT";
|
||||
case CapabilityRuntimeDescriptorArrayEXT: return "RuntimeDescriptorArrayEXT";
|
||||
case CapabilityInputAttachmentArrayDynamicIndexingEXT: return "InputAttachmentArrayDynamicIndexingEXT";
|
||||
case CapabilityUniformTexelBufferArrayDynamicIndexingEXT: return "UniformTexelBufferArrayDynamicIndexingEXT";
|
||||
case CapabilityStorageTexelBufferArrayDynamicIndexingEXT: return "StorageTexelBufferArrayDynamicIndexingEXT";
|
||||
case CapabilityUniformBufferArrayNonUniformIndexingEXT: return "UniformBufferArrayNonUniformIndexingEXT";
|
||||
case CapabilitySampledImageArrayNonUniformIndexingEXT: return "SampledImageArrayNonUniformIndexingEXT";
|
||||
case CapabilityStorageBufferArrayNonUniformIndexingEXT: return "StorageBufferArrayNonUniformIndexingEXT";
|
||||
case CapabilityStorageImageArrayNonUniformIndexingEXT: return "StorageImageArrayNonUniformIndexingEXT";
|
||||
case CapabilityInputAttachmentArrayNonUniformIndexingEXT: return "InputAttachmentArrayNonUniformIndexingEXT";
|
||||
case CapabilityUniformTexelBufferArrayNonUniformIndexingEXT: return "UniformTexelBufferArrayNonUniformIndexingEXT";
|
||||
case CapabilityStorageTexelBufferArrayNonUniformIndexingEXT: return "StorageTexelBufferArrayNonUniformIndexingEXT";
|
||||
|
||||
case CapabilityVulkanMemoryModelKHR: return "CapabilityVulkanMemoryModelKHR";
|
||||
case CapabilityVulkanMemoryModelDeviceScopeKHR: return "CapabilityVulkanMemoryModelDeviceScopeKHR";
|
||||
case CapabilityVulkanMemoryModelKHR: return "VulkanMemoryModelKHR";
|
||||
case CapabilityVulkanMemoryModelDeviceScopeKHR: return "VulkanMemoryModelDeviceScopeKHR";
|
||||
|
||||
case CapabilityPhysicalStorageBufferAddressesEXT: return "CapabilityPhysicalStorageBufferAddressesEXT";
|
||||
case CapabilityPhysicalStorageBufferAddressesEXT: return "PhysicalStorageBufferAddressesEXT";
|
||||
|
||||
case CapabilityVariablePointers: return "CapabilityVariablePointers";
|
||||
case CapabilityVariablePointers: return "VariablePointers";
|
||||
|
||||
case CapabilityCooperativeMatrixNV: return "CapabilityCooperativeMatrixNV";
|
||||
case CapabilityCooperativeMatrixNV: return "CooperativeMatrixNV";
|
||||
case CapabilityShaderSMBuiltinsNV: return "ShaderSMBuiltinsNV";
|
||||
|
||||
case CapabilityFragmentShaderSampleInterlockEXT: return "CapabilityFragmentShaderSampleInterlockEXT";
|
||||
case CapabilityFragmentShaderPixelInterlockEXT: return "CapabilityFragmentShaderPixelInterlockEXT";
|
||||
case CapabilityFragmentShaderShadingRateInterlockEXT: return "CapabilityFragmentShaderShadingRateInterlockEXT";
|
||||
|
||||
case CapabilityDemoteToHelperInvocationEXT: return "DemoteToHelperInvocationEXT";
|
||||
case CapabilityShaderClockKHR: return "ShaderClockKHR";
|
||||
|
||||
case CapabilityIntegerFunctions2INTEL: return "CapabilityIntegerFunctions2INTEL";
|
||||
|
||||
default: return "Bad";
|
||||
}
|
||||
@ -1316,7 +1308,6 @@ const char* OpcodeString(int op)
|
||||
case 4430: return "OpSubgroupAllEqualKHR";
|
||||
case 4432: return "OpSubgroupReadInvocationKHR";
|
||||
|
||||
#ifdef AMD_EXTENSIONS
|
||||
case 5000: return "OpGroupIAddNonUniformAMD";
|
||||
case 5001: return "OpGroupFAddNonUniformAMD";
|
||||
case 5002: return "OpGroupFMinNonUniformAMD";
|
||||
@ -1328,12 +1319,12 @@ const char* OpcodeString(int op)
|
||||
|
||||
case 5011: return "OpFragmentMaskFetchAMD";
|
||||
case 5012: return "OpFragmentFetchAMD";
|
||||
#endif
|
||||
|
||||
case OpReadClockKHR: return "OpReadClockKHR";
|
||||
|
||||
case OpDecorateStringGOOGLE: return "OpDecorateStringGOOGLE";
|
||||
case OpMemberDecorateStringGOOGLE: return "OpMemberDecorateStringGOOGLE";
|
||||
|
||||
#ifdef NV_EXTENSIONS
|
||||
case OpGroupNonUniformPartitionNV: return "OpGroupNonUniformPartitionNV";
|
||||
case OpReportIntersectionNV: return "OpReportIntersectionNV";
|
||||
case OpIgnoreIntersectionNV: return "OpIgnoreIntersectionNV";
|
||||
@ -1343,13 +1334,17 @@ const char* OpcodeString(int op)
|
||||
case OpExecuteCallableNV: return "OpExecuteCallableNV";
|
||||
case OpImageSampleFootprintNV: return "OpImageSampleFootprintNV";
|
||||
case OpWritePackedPrimitiveIndices4x8NV: return "OpWritePackedPrimitiveIndices4x8NV";
|
||||
#endif
|
||||
|
||||
case OpTypeCooperativeMatrixNV: return "OpTypeCooperativeMatrixNV";
|
||||
case OpCooperativeMatrixLoadNV: return "OpCooperativeMatrixLoadNV";
|
||||
case OpCooperativeMatrixStoreNV: return "OpCooperativeMatrixStoreNV";
|
||||
case OpCooperativeMatrixMulAddNV: return "OpCooperativeMatrixMulAddNV";
|
||||
case OpCooperativeMatrixLengthNV: return "OpCooperativeMatrixLengthNV";
|
||||
case OpDemoteToHelperInvocationEXT: return "OpDemoteToHelperInvocationEXT";
|
||||
case OpIsHelperInvocationEXT: return "OpIsHelperInvocationEXT";
|
||||
|
||||
case OpBeginInvocationInterlockEXT: return "OpBeginInvocationInterlockEXT";
|
||||
case OpEndInvocationInterlockEXT: return "OpEndInvocationInterlockEXT";
|
||||
|
||||
default:
|
||||
return "Bad";
|
||||
@ -1464,6 +1459,8 @@ void Parameterize()
|
||||
InstructionDesc[OpModuleProcessed].setResultAndType(false, false);
|
||||
InstructionDesc[OpTypeCooperativeMatrixNV].setResultAndType(true, false);
|
||||
InstructionDesc[OpCooperativeMatrixStoreNV].setResultAndType(false, false);
|
||||
InstructionDesc[OpBeginInvocationInterlockEXT].setResultAndType(false, false);
|
||||
InstructionDesc[OpEndInvocationInterlockEXT].setResultAndType(false, false);
|
||||
|
||||
// Specific additional context-dependent operands
|
||||
|
||||
@ -2656,7 +2653,6 @@ void Parameterize()
|
||||
|
||||
InstructionDesc[OpModuleProcessed].operands.push(OperandLiteralString, "'process'");
|
||||
|
||||
#ifdef AMD_EXTENSIONS
|
||||
InstructionDesc[OpGroupIAddNonUniformAMD].operands.push(OperandScope, "'Execution'");
|
||||
InstructionDesc[OpGroupIAddNonUniformAMD].operands.push(OperandGroupOperation, "'Operation'");
|
||||
InstructionDesc[OpGroupIAddNonUniformAMD].operands.push(OperandId, "'X'");
|
||||
@ -2695,9 +2691,7 @@ void Parameterize()
|
||||
InstructionDesc[OpFragmentFetchAMD].operands.push(OperandId, "'Image'");
|
||||
InstructionDesc[OpFragmentFetchAMD].operands.push(OperandId, "'Coordinate'");
|
||||
InstructionDesc[OpFragmentFetchAMD].operands.push(OperandId, "'Fragment Index'");
|
||||
#endif
|
||||
|
||||
#ifdef NV_EXTENSIONS
|
||||
InstructionDesc[OpGroupNonUniformPartitionNV].operands.push(OperandId, "X");
|
||||
|
||||
InstructionDesc[OpTypeAccelerationStructureNV].setResultAndType(true, false);
|
||||
@ -2735,7 +2729,6 @@ void Parameterize()
|
||||
|
||||
InstructionDesc[OpWritePackedPrimitiveIndices4x8NV].operands.push(OperandId, "'Index Offset'");
|
||||
InstructionDesc[OpWritePackedPrimitiveIndices4x8NV].operands.push(OperandId, "'Packed Indices'");
|
||||
#endif
|
||||
|
||||
InstructionDesc[OpTypeCooperativeMatrixNV].operands.push(OperandId, "'Component Type'");
|
||||
InstructionDesc[OpTypeCooperativeMatrixNV].operands.push(OperandId, "'Scope'");
|
||||
@ -2762,6 +2755,10 @@ void Parameterize()
|
||||
InstructionDesc[OpCooperativeMatrixMulAddNV].operands.push(OperandId, "'C'");
|
||||
|
||||
InstructionDesc[OpCooperativeMatrixLengthNV].operands.push(OperandId, "'Type'");
|
||||
|
||||
InstructionDesc[OpDemoteToHelperInvocationEXT].setResultAndType(false, false);
|
||||
|
||||
InstructionDesc[OpReadClockKHR].operands.push(OperandScope, "'Scope'");
|
||||
}
|
||||
|
||||
}; // end spv namespace
|
||||
|
||||
104
thirdparty/glslang/SPIRV/spirv.hpp
vendored
104
thirdparty/glslang/SPIRV/spirv.hpp
vendored
@ -91,6 +91,7 @@ enum AddressingModel {
|
||||
AddressingModelLogical = 0,
|
||||
AddressingModelPhysical32 = 1,
|
||||
AddressingModelPhysical64 = 2,
|
||||
AddressingModelPhysicalStorageBuffer64 = 5348,
|
||||
AddressingModelPhysicalStorageBuffer64EXT = 5348,
|
||||
AddressingModelMax = 0x7fffffff,
|
||||
};
|
||||
@ -99,6 +100,7 @@ enum MemoryModel {
|
||||
MemoryModelSimple = 0,
|
||||
MemoryModelGLSL450 = 1,
|
||||
MemoryModelOpenCL = 2,
|
||||
MemoryModelVulkan = 3,
|
||||
MemoryModelVulkanKHR = 3,
|
||||
MemoryModelMax = 0x7fffffff,
|
||||
};
|
||||
@ -154,6 +156,12 @@ enum ExecutionMode {
|
||||
ExecutionModeDerivativeGroupQuadsNV = 5289,
|
||||
ExecutionModeDerivativeGroupLinearNV = 5290,
|
||||
ExecutionModeOutputTrianglesNV = 5298,
|
||||
ExecutionModePixelInterlockOrderedEXT = 5366,
|
||||
ExecutionModePixelInterlockUnorderedEXT = 5367,
|
||||
ExecutionModeSampleInterlockOrderedEXT = 5368,
|
||||
ExecutionModeSampleInterlockUnorderedEXT = 5369,
|
||||
ExecutionModeShadingRateInterlockOrderedEXT = 5370,
|
||||
ExecutionModeShadingRateInterlockUnorderedEXT = 5371,
|
||||
ExecutionModeMax = 0x7fffffff,
|
||||
};
|
||||
|
||||
@ -177,6 +185,7 @@ enum StorageClass {
|
||||
StorageClassHitAttributeNV = 5339,
|
||||
StorageClassIncomingRayPayloadNV = 5342,
|
||||
StorageClassShaderRecordBufferNV = 5343,
|
||||
StorageClassPhysicalStorageBuffer = 5349,
|
||||
StorageClassPhysicalStorageBufferEXT = 5349,
|
||||
StorageClassMax = 0x7fffffff,
|
||||
};
|
||||
@ -305,9 +314,13 @@ enum ImageOperandsShift {
|
||||
ImageOperandsConstOffsetsShift = 5,
|
||||
ImageOperandsSampleShift = 6,
|
||||
ImageOperandsMinLodShift = 7,
|
||||
ImageOperandsMakeTexelAvailableShift = 8,
|
||||
ImageOperandsMakeTexelAvailableKHRShift = 8,
|
||||
ImageOperandsMakeTexelVisibleShift = 9,
|
||||
ImageOperandsMakeTexelVisibleKHRShift = 9,
|
||||
ImageOperandsNonPrivateTexelShift = 10,
|
||||
ImageOperandsNonPrivateTexelKHRShift = 10,
|
||||
ImageOperandsVolatileTexelShift = 11,
|
||||
ImageOperandsVolatileTexelKHRShift = 11,
|
||||
ImageOperandsSignExtendShift = 12,
|
||||
ImageOperandsZeroExtendShift = 13,
|
||||
@ -324,9 +337,13 @@ enum ImageOperandsMask {
|
||||
ImageOperandsConstOffsetsMask = 0x00000020,
|
||||
ImageOperandsSampleMask = 0x00000040,
|
||||
ImageOperandsMinLodMask = 0x00000080,
|
||||
ImageOperandsMakeTexelAvailableMask = 0x00000100,
|
||||
ImageOperandsMakeTexelAvailableKHRMask = 0x00000100,
|
||||
ImageOperandsMakeTexelVisibleMask = 0x00000200,
|
||||
ImageOperandsMakeTexelVisibleKHRMask = 0x00000200,
|
||||
ImageOperandsNonPrivateTexelMask = 0x00000400,
|
||||
ImageOperandsNonPrivateTexelKHRMask = 0x00000400,
|
||||
ImageOperandsVolatileTexelMask = 0x00000800,
|
||||
ImageOperandsVolatileTexelKHRMask = 0x00000800,
|
||||
ImageOperandsSignExtendMask = 0x00001000,
|
||||
ImageOperandsZeroExtendMask = 0x00002000,
|
||||
@ -442,13 +459,17 @@ enum Decoration {
|
||||
DecorationPerViewNV = 5272,
|
||||
DecorationPerTaskNV = 5273,
|
||||
DecorationPerVertexNV = 5285,
|
||||
DecorationNonUniform = 5300,
|
||||
DecorationNonUniformEXT = 5300,
|
||||
DecorationRestrictPointer = 5355,
|
||||
DecorationRestrictPointerEXT = 5355,
|
||||
DecorationAliasedPointer = 5356,
|
||||
DecorationAliasedPointerEXT = 5356,
|
||||
DecorationCounterBuffer = 5634,
|
||||
DecorationHlslCounterBufferGOOGLE = 5634,
|
||||
DecorationHlslSemanticGOOGLE = 5635,
|
||||
DecorationUserSemantic = 5635,
|
||||
DecorationUserTypeGOOGLE = 5636,
|
||||
DecorationMax = 0x7fffffff,
|
||||
};
|
||||
|
||||
@ -551,6 +572,10 @@ enum BuiltIn {
|
||||
BuiltInHitTNV = 5332,
|
||||
BuiltInHitKindNV = 5333,
|
||||
BuiltInIncomingRayFlagsNV = 5351,
|
||||
BuiltInWarpsPerSMNV = 5374,
|
||||
BuiltInSMCountNV = 5375,
|
||||
BuiltInWarpIDNV = 5376,
|
||||
BuiltInSMIDNV = 5377,
|
||||
BuiltInMax = 0x7fffffff,
|
||||
};
|
||||
|
||||
@ -619,9 +644,13 @@ enum MemorySemanticsShift {
|
||||
MemorySemanticsCrossWorkgroupMemoryShift = 9,
|
||||
MemorySemanticsAtomicCounterMemoryShift = 10,
|
||||
MemorySemanticsImageMemoryShift = 11,
|
||||
MemorySemanticsOutputMemoryShift = 12,
|
||||
MemorySemanticsOutputMemoryKHRShift = 12,
|
||||
MemorySemanticsMakeAvailableShift = 13,
|
||||
MemorySemanticsMakeAvailableKHRShift = 13,
|
||||
MemorySemanticsMakeVisibleShift = 14,
|
||||
MemorySemanticsMakeVisibleKHRShift = 14,
|
||||
MemorySemanticsVolatileShift = 15,
|
||||
MemorySemanticsMax = 0x7fffffff,
|
||||
};
|
||||
|
||||
@ -637,17 +666,24 @@ enum MemorySemanticsMask {
|
||||
MemorySemanticsCrossWorkgroupMemoryMask = 0x00000200,
|
||||
MemorySemanticsAtomicCounterMemoryMask = 0x00000400,
|
||||
MemorySemanticsImageMemoryMask = 0x00000800,
|
||||
MemorySemanticsOutputMemoryMask = 0x00001000,
|
||||
MemorySemanticsOutputMemoryKHRMask = 0x00001000,
|
||||
MemorySemanticsMakeAvailableMask = 0x00002000,
|
||||
MemorySemanticsMakeAvailableKHRMask = 0x00002000,
|
||||
MemorySemanticsMakeVisibleMask = 0x00004000,
|
||||
MemorySemanticsMakeVisibleKHRMask = 0x00004000,
|
||||
MemorySemanticsVolatileMask = 0x00008000,
|
||||
};
|
||||
|
||||
enum MemoryAccessShift {
|
||||
MemoryAccessVolatileShift = 0,
|
||||
MemoryAccessAlignedShift = 1,
|
||||
MemoryAccessNontemporalShift = 2,
|
||||
MemoryAccessMakePointerAvailableShift = 3,
|
||||
MemoryAccessMakePointerAvailableKHRShift = 3,
|
||||
MemoryAccessMakePointerVisibleShift = 4,
|
||||
MemoryAccessMakePointerVisibleKHRShift = 4,
|
||||
MemoryAccessNonPrivatePointerShift = 5,
|
||||
MemoryAccessNonPrivatePointerKHRShift = 5,
|
||||
MemoryAccessMax = 0x7fffffff,
|
||||
};
|
||||
@ -657,8 +693,11 @@ enum MemoryAccessMask {
|
||||
MemoryAccessVolatileMask = 0x00000001,
|
||||
MemoryAccessAlignedMask = 0x00000002,
|
||||
MemoryAccessNontemporalMask = 0x00000004,
|
||||
MemoryAccessMakePointerAvailableMask = 0x00000008,
|
||||
MemoryAccessMakePointerAvailableKHRMask = 0x00000008,
|
||||
MemoryAccessMakePointerVisibleMask = 0x00000010,
|
||||
MemoryAccessMakePointerVisibleKHRMask = 0x00000010,
|
||||
MemoryAccessNonPrivatePointerMask = 0x00000020,
|
||||
MemoryAccessNonPrivatePointerKHRMask = 0x00000020,
|
||||
};
|
||||
|
||||
@ -668,6 +707,7 @@ enum Scope {
|
||||
ScopeWorkgroup = 2,
|
||||
ScopeSubgroup = 3,
|
||||
ScopeInvocation = 4,
|
||||
ScopeQueueFamily = 5,
|
||||
ScopeQueueFamilyKHR = 5,
|
||||
ScopeMax = 0x7fffffff,
|
||||
};
|
||||
@ -768,6 +808,8 @@ enum Capability {
|
||||
CapabilityGroupNonUniformShuffleRelative = 66,
|
||||
CapabilityGroupNonUniformClustered = 67,
|
||||
CapabilityGroupNonUniformQuad = 68,
|
||||
CapabilityShaderLayer = 69,
|
||||
CapabilityShaderViewportIndex = 70,
|
||||
CapabilitySubgroupBallotKHR = 4423,
|
||||
CapabilityDrawParameters = 4427,
|
||||
CapabilitySubgroupVoteKHR = 4431,
|
||||
@ -796,6 +838,7 @@ enum Capability {
|
||||
CapabilityFragmentMaskAMD = 5010,
|
||||
CapabilityStencilExportEXT = 5013,
|
||||
CapabilityImageReadWriteLodAMD = 5015,
|
||||
CapabilityShaderClockKHR = 5055,
|
||||
CapabilitySampleMaskOverrideCoverageNV = 5249,
|
||||
CapabilityGeometryShaderPassthroughNV = 5251,
|
||||
CapabilityShaderViewportIndexLayerEXT = 5254,
|
||||
@ -811,28 +854,49 @@ enum Capability {
|
||||
CapabilityFragmentDensityEXT = 5291,
|
||||
CapabilityShadingRateNV = 5291,
|
||||
CapabilityGroupNonUniformPartitionedNV = 5297,
|
||||
CapabilityShaderNonUniform = 5301,
|
||||
CapabilityShaderNonUniformEXT = 5301,
|
||||
CapabilityRuntimeDescriptorArray = 5302,
|
||||
CapabilityRuntimeDescriptorArrayEXT = 5302,
|
||||
CapabilityInputAttachmentArrayDynamicIndexing = 5303,
|
||||
CapabilityInputAttachmentArrayDynamicIndexingEXT = 5303,
|
||||
CapabilityUniformTexelBufferArrayDynamicIndexing = 5304,
|
||||
CapabilityUniformTexelBufferArrayDynamicIndexingEXT = 5304,
|
||||
CapabilityStorageTexelBufferArrayDynamicIndexing = 5305,
|
||||
CapabilityStorageTexelBufferArrayDynamicIndexingEXT = 5305,
|
||||
CapabilityUniformBufferArrayNonUniformIndexing = 5306,
|
||||
CapabilityUniformBufferArrayNonUniformIndexingEXT = 5306,
|
||||
CapabilitySampledImageArrayNonUniformIndexing = 5307,
|
||||
CapabilitySampledImageArrayNonUniformIndexingEXT = 5307,
|
||||
CapabilityStorageBufferArrayNonUniformIndexing = 5308,
|
||||
CapabilityStorageBufferArrayNonUniformIndexingEXT = 5308,
|
||||
CapabilityStorageImageArrayNonUniformIndexing = 5309,
|
||||
CapabilityStorageImageArrayNonUniformIndexingEXT = 5309,
|
||||
CapabilityInputAttachmentArrayNonUniformIndexing = 5310,
|
||||
CapabilityInputAttachmentArrayNonUniformIndexingEXT = 5310,
|
||||
CapabilityUniformTexelBufferArrayNonUniformIndexing = 5311,
|
||||
CapabilityUniformTexelBufferArrayNonUniformIndexingEXT = 5311,
|
||||
CapabilityStorageTexelBufferArrayNonUniformIndexing = 5312,
|
||||
CapabilityStorageTexelBufferArrayNonUniformIndexingEXT = 5312,
|
||||
CapabilityRayTracingNV = 5340,
|
||||
CapabilityVulkanMemoryModel = 5345,
|
||||
CapabilityVulkanMemoryModelKHR = 5345,
|
||||
CapabilityVulkanMemoryModelDeviceScope = 5346,
|
||||
CapabilityVulkanMemoryModelDeviceScopeKHR = 5346,
|
||||
CapabilityPhysicalStorageBufferAddresses = 5347,
|
||||
CapabilityPhysicalStorageBufferAddressesEXT = 5347,
|
||||
CapabilityComputeDerivativeGroupLinearNV = 5350,
|
||||
CapabilityCooperativeMatrixNV = 5357,
|
||||
CapabilityFragmentShaderSampleInterlockEXT = 5363,
|
||||
CapabilityFragmentShaderShadingRateInterlockEXT = 5372,
|
||||
CapabilityShaderSMBuiltinsNV = 5373,
|
||||
CapabilityFragmentShaderPixelInterlockEXT = 5378,
|
||||
CapabilityDemoteToHelperInvocationEXT = 5379,
|
||||
CapabilitySubgroupShuffleINTEL = 5568,
|
||||
CapabilitySubgroupBufferBlockIOINTEL = 5569,
|
||||
CapabilitySubgroupImageBlockIOINTEL = 5570,
|
||||
CapabilitySubgroupImageMediaBlockIOINTEL = 5579,
|
||||
CapabilityIntegerFunctions2INTEL = 5584,
|
||||
CapabilitySubgroupAvcMotionEstimationINTEL = 5696,
|
||||
CapabilitySubgroupAvcMotionEstimationIntraINTEL = 5697,
|
||||
CapabilitySubgroupAvcMotionEstimationChromaINTEL = 5698,
|
||||
@ -1200,6 +1264,7 @@ enum Op {
|
||||
OpGroupSMaxNonUniformAMD = 5007,
|
||||
OpFragmentMaskFetchAMD = 5011,
|
||||
OpFragmentFetchAMD = 5012,
|
||||
OpReadClockKHR = 5056,
|
||||
OpImageSampleFootprintNV = 5283,
|
||||
OpGroupNonUniformPartitionNV = 5296,
|
||||
OpWritePackedPrimitiveIndices4x8NV = 5299,
|
||||
@ -1214,6 +1279,10 @@ enum Op {
|
||||
OpCooperativeMatrixStoreNV = 5360,
|
||||
OpCooperativeMatrixMulAddNV = 5361,
|
||||
OpCooperativeMatrixLengthNV = 5362,
|
||||
OpBeginInvocationInterlockEXT = 5364,
|
||||
OpEndInvocationInterlockEXT = 5365,
|
||||
OpDemoteToHelperInvocationEXT = 5380,
|
||||
OpIsHelperInvocationEXT = 5381,
|
||||
OpSubgroupShuffleINTEL = 5571,
|
||||
OpSubgroupShuffleDownINTEL = 5572,
|
||||
OpSubgroupShuffleUpINTEL = 5573,
|
||||
@ -1224,6 +1293,20 @@ enum Op {
|
||||
OpSubgroupImageBlockWriteINTEL = 5578,
|
||||
OpSubgroupImageMediaBlockReadINTEL = 5580,
|
||||
OpSubgroupImageMediaBlockWriteINTEL = 5581,
|
||||
OpUCountLeadingZerosINTEL = 5585,
|
||||
OpUCountTrailingZerosINTEL = 5586,
|
||||
OpAbsISubINTEL = 5587,
|
||||
OpAbsUSubINTEL = 5588,
|
||||
OpIAddSatINTEL = 5589,
|
||||
OpUAddSatINTEL = 5590,
|
||||
OpIAverageINTEL = 5591,
|
||||
OpUAverageINTEL = 5592,
|
||||
OpIAverageRoundedINTEL = 5593,
|
||||
OpUAverageRoundedINTEL = 5594,
|
||||
OpISubSatINTEL = 5595,
|
||||
OpUSubSatINTEL = 5596,
|
||||
OpIMul32x16INTEL = 5597,
|
||||
OpUMul32x16INTEL = 5598,
|
||||
OpDecorateString = 5632,
|
||||
OpDecorateStringGOOGLE = 5632,
|
||||
OpMemberDecorateString = 5633,
|
||||
@ -1714,6 +1797,7 @@ inline void HasResultAndType(Op opcode, bool *hasResult, bool *hasResultType) {
|
||||
case OpGroupSMaxNonUniformAMD: *hasResult = true; *hasResultType = true; break;
|
||||
case OpFragmentMaskFetchAMD: *hasResult = true; *hasResultType = true; break;
|
||||
case OpFragmentFetchAMD: *hasResult = true; *hasResultType = true; break;
|
||||
case OpReadClockKHR: *hasResult = true; *hasResultType = true; break;
|
||||
case OpImageSampleFootprintNV: *hasResult = true; *hasResultType = true; break;
|
||||
case OpGroupNonUniformPartitionNV: *hasResult = true; *hasResultType = true; break;
|
||||
case OpWritePackedPrimitiveIndices4x8NV: *hasResult = false; *hasResultType = false; break;
|
||||
@ -1728,6 +1812,10 @@ inline void HasResultAndType(Op opcode, bool *hasResult, bool *hasResultType) {
|
||||
case OpCooperativeMatrixStoreNV: *hasResult = false; *hasResultType = false; break;
|
||||
case OpCooperativeMatrixMulAddNV: *hasResult = true; *hasResultType = true; break;
|
||||
case OpCooperativeMatrixLengthNV: *hasResult = true; *hasResultType = true; break;
|
||||
case OpBeginInvocationInterlockEXT: *hasResult = false; *hasResultType = false; break;
|
||||
case OpEndInvocationInterlockEXT: *hasResult = false; *hasResultType = false; break;
|
||||
case OpDemoteToHelperInvocationEXT: *hasResult = false; *hasResultType = false; break;
|
||||
case OpIsHelperInvocationEXT: *hasResult = true; *hasResultType = true; break;
|
||||
case OpSubgroupShuffleINTEL: *hasResult = true; *hasResultType = true; break;
|
||||
case OpSubgroupShuffleDownINTEL: *hasResult = true; *hasResultType = true; break;
|
||||
case OpSubgroupShuffleUpINTEL: *hasResult = true; *hasResultType = true; break;
|
||||
@ -1738,10 +1826,22 @@ inline void HasResultAndType(Op opcode, bool *hasResult, bool *hasResultType) {
|
||||
case OpSubgroupImageBlockWriteINTEL: *hasResult = false; *hasResultType = false; break;
|
||||
case OpSubgroupImageMediaBlockReadINTEL: *hasResult = true; *hasResultType = true; break;
|
||||
case OpSubgroupImageMediaBlockWriteINTEL: *hasResult = false; *hasResultType = false; break;
|
||||
case OpUCountLeadingZerosINTEL: *hasResult = true; *hasResultType = true; break;
|
||||
case OpUCountTrailingZerosINTEL: *hasResult = true; *hasResultType = true; break;
|
||||
case OpAbsISubINTEL: *hasResult = true; *hasResultType = true; break;
|
||||
case OpAbsUSubINTEL: *hasResult = true; *hasResultType = true; break;
|
||||
case OpIAddSatINTEL: *hasResult = true; *hasResultType = true; break;
|
||||
case OpUAddSatINTEL: *hasResult = true; *hasResultType = true; break;
|
||||
case OpIAverageINTEL: *hasResult = true; *hasResultType = true; break;
|
||||
case OpUAverageINTEL: *hasResult = true; *hasResultType = true; break;
|
||||
case OpIAverageRoundedINTEL: *hasResult = true; *hasResultType = true; break;
|
||||
case OpUAverageRoundedINTEL: *hasResult = true; *hasResultType = true; break;
|
||||
case OpISubSatINTEL: *hasResult = true; *hasResultType = true; break;
|
||||
case OpUSubSatINTEL: *hasResult = true; *hasResultType = true; break;
|
||||
case OpIMul32x16INTEL: *hasResult = true; *hasResultType = true; break;
|
||||
case OpUMul32x16INTEL: *hasResult = true; *hasResultType = true; break;
|
||||
case OpDecorateString: *hasResult = false; *hasResultType = false; break;
|
||||
case OpDecorateStringGOOGLE: *hasResult = false; *hasResultType = false; break;
|
||||
case OpMemberDecorateString: *hasResult = false; *hasResultType = false; break;
|
||||
case OpMemberDecorateStringGOOGLE: *hasResult = false; *hasResultType = false; break;
|
||||
case OpVmeImageINTEL: *hasResult = true; *hasResultType = true; break;
|
||||
case OpTypeVmeImageINTEL: *hasResult = true; *hasResultType = false; break;
|
||||
case OpTypeAvcImePayloadINTEL: *hasResult = true; *hasResultType = false; break;
|
||||
|
||||
53
thirdparty/glslang/SPIRV/spvIR.h
vendored
Normal file → Executable file
53
thirdparty/glslang/SPIRV/spvIR.h
vendored
Normal file → Executable file
@ -226,6 +226,36 @@ public:
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
// Change this block into a canonical dead merge block. Delete instructions
|
||||
// as necessary. A canonical dead merge block has only an OpLabel and an
|
||||
// OpUnreachable.
|
||||
void rewriteAsCanonicalUnreachableMerge() {
|
||||
assert(localVariables.empty());
|
||||
// Delete all instructions except for the label.
|
||||
assert(instructions.size() > 0);
|
||||
instructions.resize(1);
|
||||
successors.clear();
|
||||
Instruction* unreachable = new Instruction(OpUnreachable);
|
||||
addInstruction(std::unique_ptr<Instruction>(unreachable));
|
||||
}
|
||||
// Change this block into a canonical dead continue target branching to the
|
||||
// given header ID. Delete instructions as necessary. A canonical dead continue
|
||||
// target has only an OpLabel and an unconditional branch back to the corresponding
|
||||
// header.
|
||||
void rewriteAsCanonicalUnreachableContinue(Block* header) {
|
||||
assert(localVariables.empty());
|
||||
// Delete all instructions except for the label.
|
||||
assert(instructions.size() > 0);
|
||||
instructions.resize(1);
|
||||
successors.clear();
|
||||
// Add OpBranch back to the header.
|
||||
assert(header != nullptr);
|
||||
Instruction* branch = new Instruction(OpBranch);
|
||||
branch->addIdOperand(header->getId());
|
||||
addInstruction(std::unique_ptr<Instruction>(branch));
|
||||
successors.push_back(header);
|
||||
}
|
||||
|
||||
bool isTerminated() const
|
||||
{
|
||||
switch (instructions.back()->getOpCode()) {
|
||||
@ -235,6 +265,7 @@ public:
|
||||
case OpKill:
|
||||
case OpReturn:
|
||||
case OpReturnValue:
|
||||
case OpUnreachable:
|
||||
return true;
|
||||
default:
|
||||
return false;
|
||||
@ -268,10 +299,24 @@ protected:
|
||||
bool unreachable;
|
||||
};
|
||||
|
||||
// The different reasons for reaching a block in the inReadableOrder traversal.
|
||||
enum ReachReason {
|
||||
// Reachable from the entry block via transfers of control, i.e. branches.
|
||||
ReachViaControlFlow = 0,
|
||||
// A continue target that is not reachable via control flow.
|
||||
ReachDeadContinue,
|
||||
// A merge block that is not reachable via control flow.
|
||||
ReachDeadMerge
|
||||
};
|
||||
|
||||
// Traverses the control-flow graph rooted at root in an order suited for
|
||||
// readable code generation. Invokes callback at every node in the traversal
|
||||
// order.
|
||||
void inReadableOrder(Block* root, std::function<void(Block*)> callback);
|
||||
// order. The callback arguments are:
|
||||
// - the block,
|
||||
// - the reason we reached the block,
|
||||
// - if the reason was that block is an unreachable continue or unreachable merge block
|
||||
// then the last parameter is the corresponding header block.
|
||||
void inReadableOrder(Block* root, std::function<void(Block*, ReachReason, Block* header)> callback);
|
||||
|
||||
//
|
||||
// SPIR-V IR Function.
|
||||
@ -321,7 +366,7 @@ public:
|
||||
parameterInstructions[p]->dump(out);
|
||||
|
||||
// Blocks
|
||||
inReadableOrder(blocks[0], [&out](const Block* b) { b->dump(out); });
|
||||
inReadableOrder(blocks[0], [&out](const Block* b, ReachReason, Block*) { b->dump(out); });
|
||||
Instruction end(0, 0, OpFunctionEnd);
|
||||
end.dump(out);
|
||||
}
|
||||
@ -436,6 +481,6 @@ __inline void Block::addInstruction(std::unique_ptr<Instruction> inst)
|
||||
parent.getParent().mapInstruction(raw_instruction);
|
||||
}
|
||||
|
||||
}; // end spv namespace
|
||||
} // end spv namespace
|
||||
|
||||
#endif // spvIR_H
|
||||
|
||||
Reference in New Issue
Block a user