diff --git a/dep/CMakeLists.txt b/dep/CMakeLists.txt index d0ec5e0fb..13581d697 100644 --- a/dep/CMakeLists.txt +++ b/dep/CMakeLists.txt @@ -14,6 +14,9 @@ add_subdirectory(libchdr) add_subdirectory(xxhash) add_subdirectory(rapidjson) +add_subdirectory(glslang) +add_subdirectory(vulkan-loader) + if(ENABLE_DISCORD_PRESENCE) add_subdirectory(discord-rpc) endif() diff --git a/dep/glslang/.clang-format b/dep/glslang/.clang-format new file mode 100644 index 000000000..8c73a52fe --- /dev/null +++ b/dep/glslang/.clang-format @@ -0,0 +1,13 @@ +Language: Cpp +IndentWidth: 4 +PointerAlignment: Left +BreakBeforeBraces: Custom +BraceWrapping: { AfterFunction: true, AfterControlStatement: false } +IndentCaseLabels: false +ReflowComments: false +ColumnLimit: 120 +AccessModifierOffset: -4 +AlignTrailingComments: true +AllowShortBlocksOnASingleLine: false +AllowShortIfStatementsOnASingleLine: false +AllowShortLoopsOnASingleLine: false diff --git a/dep/glslang/CMakeLists.txt b/dep/glslang/CMakeLists.txt new file mode 100644 index 000000000..30325f46c --- /dev/null +++ b/dep/glslang/CMakeLists.txt @@ -0,0 +1,55 @@ +set(SRCS + glslang/CInterface/glslang_c_interface.cpp + glslang/GenericCodeGen/CodeGen.cpp + glslang/GenericCodeGen/Link.cpp + glslang/MachineIndependent/attribute.cpp + glslang/MachineIndependent/Constant.cpp + glslang/MachineIndependent/glslang_tab.cpp + glslang/MachineIndependent/InfoSink.cpp + glslang/MachineIndependent/Initialize.cpp + glslang/MachineIndependent/Intermediate.cpp + glslang/MachineIndependent/intermOut.cpp + glslang/MachineIndependent/IntermTraverse.cpp + glslang/MachineIndependent/iomapper.cpp + glslang/MachineIndependent/limits.cpp + glslang/MachineIndependent/linkValidate.cpp + glslang/MachineIndependent/parseConst.cpp + glslang/MachineIndependent/ParseContextBase.cpp + glslang/MachineIndependent/ParseHelper.cpp + glslang/MachineIndependent/PoolAlloc.cpp + glslang/MachineIndependent/preprocessor/Pp.cpp + glslang/MachineIndependent/preprocessor/PpAtom.cpp + glslang/MachineIndependent/preprocessor/PpContext.cpp + glslang/MachineIndependent/preprocessor/PpScanner.cpp + glslang/MachineIndependent/preprocessor/PpTokens.cpp + glslang/MachineIndependent/propagateNoContraction.cpp + glslang/MachineIndependent/reflection.cpp + glslang/MachineIndependent/RemoveTree.cpp + glslang/MachineIndependent/Scan.cpp + glslang/MachineIndependent/ShaderLang.cpp + glslang/MachineIndependent/SymbolTable.cpp + glslang/MachineIndependent/Versions.cpp + OGLCompilersDLL/InitializeDll.cpp + SPIRV/disassemble.cpp + SPIRV/doc.cpp + SPIRV/GlslangToSpv.cpp + SPIRV/InReadableOrder.cpp + SPIRV/Logger.cpp + SPIRV/SpvBuilder.cpp + SPIRV/SpvPostProcess.cpp + SPIRV/SPVRemapper.cpp + SPIRV/SpvTools.cpp + StandAlone/ResourceLimits.cpp + StandAlone/resource_limits_c.cpp +) + +add_library(glslang ${SRCS}) + +if(WIN32) + target_sources(glslang PRIVATE glslang/OSDependent/Windows/ossource.cpp) +else() + target_sources(glslang PRIVATE glslang/OSDependent/Unix/ossource.cpp) +endif() + +target_include_directories(glslang PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}") + diff --git a/dep/glslang/CODE_OF_CONDUCT.md b/dep/glslang/CODE_OF_CONDUCT.md new file mode 100644 index 000000000..a11610bd3 --- /dev/null +++ b/dep/glslang/CODE_OF_CONDUCT.md @@ -0,0 +1 @@ +A reminder that this issue tracker is managed by the Khronos Group. Interactions here should follow the Khronos Code of Conduct (https://www.khronos.org/developers/code-of-conduct), which prohibits aggressive or derogatory language. Please keep the discussion friendly and civil. diff --git a/dep/glslang/LICENSE.txt b/dep/glslang/LICENSE.txt new file mode 100644 index 000000000..a10c0944f --- /dev/null +++ b/dep/glslang/LICENSE.txt @@ -0,0 +1,108 @@ +Here, glslang proper means core GLSL parsing, HLSL parsing, and SPIR-V code +generation. Glslang proper requires use of two licenses, one that covers +non-preprocessing and an additional one that covers preprocessing. + +Bison was removed long ago. You can build glslang from the source grammar, +using tools of your choice, without using bison or any bison files. + +Other parts, outside of glslang proper, include: + +- gl_types.h, only needed for OpenGL-like reflection, and can be left out of + a parse and codegen project. See it for its license. + +- update_glslang_sources.py, which is not part of the project proper and does + not need to be used. + +- the SPIR-V "remapper", which is optional, but has the same license as + glslang proper + +- Google tests and SPIR-V tools, and anything in the external subdirectory + are external and optional; see them for their respective licenses. + +-------------------------------------------------------------------------------- + +The core of glslang-proper, minus the preprocessor is licenced as follows: + +// +// Copyright (C) 2015-2018 Google, Inc. +// Copyright (C) +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// +// Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// +// Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// Neither the name of 3Dlabs Inc. Ltd. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// + +-------------------------------------------------------------------------------- + +The preprocessor has the core license stated above, plus an additional licence: + +/****************************************************************************\ +Copyright (c) 2002, NVIDIA Corporation. + +NVIDIA Corporation("NVIDIA") supplies this software to you in +consideration of your agreement to the following terms, and your use, +installation, modification or redistribution of this NVIDIA software +constitutes acceptance of these terms. If you do not agree with these +terms, please do not use, install, modify or redistribute this NVIDIA +software. + +In consideration of your agreement to abide by the following terms, and +subject to these terms, NVIDIA grants you a personal, non-exclusive +license, under NVIDIA's copyrights in this original NVIDIA software (the +"NVIDIA Software"), to use, reproduce, modify and redistribute the +NVIDIA Software, with or without modifications, in source and/or binary +forms; provided that if you redistribute the NVIDIA Software, you must +retain the copyright notice of NVIDIA, this notice and the following +text and disclaimers in all such redistributions of the NVIDIA Software. +Neither the name, trademarks, service marks nor logos of NVIDIA +Corporation may be used to endorse or promote products derived from the +NVIDIA Software without specific prior written permission from NVIDIA. +Except as expressly stated in this notice, no other rights or licenses +express or implied, are granted by NVIDIA herein, including but not +limited to any patent rights that may be infringed by your derivative +works or by other works in which the NVIDIA Software may be +incorporated. No hardware is licensed hereunder. + +THE NVIDIA SOFTWARE IS BEING PROVIDED ON AN "AS IS" BASIS, WITHOUT +WARRANTIES OR CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED, +INCLUDING WITHOUT LIMITATION, WARRANTIES OR CONDITIONS OF TITLE, +NON-INFRINGEMENT, MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, OR +ITS USE AND OPERATION EITHER ALONE OR IN COMBINATION WITH OTHER +PRODUCTS. + +IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, +INCIDENTAL, EXEMPLARY, CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED +TO, LOST PROFITS; PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF +USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) OR ARISING IN ANY WAY +OUT OF THE USE, REPRODUCTION, MODIFICATION AND/OR DISTRIBUTION OF THE +NVIDIA SOFTWARE, HOWEVER CAUSED AND WHETHER UNDER THEORY OF CONTRACT, +TORT (INCLUDING NEGLIGENCE), STRICT LIABILITY OR OTHERWISE, EVEN IF +NVIDIA HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +\****************************************************************************/ diff --git a/dep/glslang/OGLCompilersDLL/InitializeDll.cpp b/dep/glslang/OGLCompilersDLL/InitializeDll.cpp new file mode 100644 index 000000000..abea9108b --- /dev/null +++ b/dep/glslang/OGLCompilersDLL/InitializeDll.cpp @@ -0,0 +1,165 @@ +// +// Copyright (C) 2002-2005 3Dlabs Inc. Ltd. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// +// Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// +// Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// Neither the name of 3Dlabs Inc. Ltd. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// + +#define SH_EXPORTING + +#include + +#include "InitializeDll.h" +#include "../glslang/Include/InitializeGlobals.h" +#include "../glslang/Public/ShaderLang.h" +#include "../glslang/Include/PoolAlloc.h" + +namespace glslang { + +OS_TLSIndex ThreadInitializeIndex = OS_INVALID_TLS_INDEX; + +// Per-process initialization. +// Needs to be called at least once before parsing, etc. is done. +// Will also do thread initialization for the calling thread; other +// threads will need to do that explicitly. +bool InitProcess() +{ + glslang::GetGlobalLock(); + + if (ThreadInitializeIndex != OS_INVALID_TLS_INDEX) { + // + // Function is re-entrant. + // + + glslang::ReleaseGlobalLock(); + return true; + } + + ThreadInitializeIndex = OS_AllocTLSIndex(); + + if (ThreadInitializeIndex == OS_INVALID_TLS_INDEX) { + assert(0 && "InitProcess(): Failed to allocate TLS area for init flag"); + + glslang::ReleaseGlobalLock(); + return false; + } + + if (! InitializePoolIndex()) { + assert(0 && "InitProcess(): Failed to initialize global pool"); + + glslang::ReleaseGlobalLock(); + return false; + } + + if (! InitThread()) { + assert(0 && "InitProcess(): Failed to initialize thread"); + + glslang::ReleaseGlobalLock(); + return false; + } + + glslang::ReleaseGlobalLock(); + return true; +} + +// Per-thread scoped initialization. +// Must be called at least once by each new thread sharing the +// symbol tables, etc., needed to parse. +bool InitThread() +{ + // + // This function is re-entrant + // + if (ThreadInitializeIndex == OS_INVALID_TLS_INDEX) { + assert(0 && "InitThread(): Process hasn't been initalised."); + return false; + } + + if (OS_GetTLSValue(ThreadInitializeIndex) != 0) + return true; + + if (! OS_SetTLSValue(ThreadInitializeIndex, (void *)1)) { + assert(0 && "InitThread(): Unable to set init flag."); + return false; + } + + glslang::SetThreadPoolAllocator(nullptr); + + return true; +} + +// Not necessary to call this: InitThread() is reentrant, and the need +// to do per thread tear down has been removed. +// +// This is kept, with memory management removed, to satisfy any exiting +// calls to it that rely on it. +bool DetachThread() +{ + bool success = true; + + if (ThreadInitializeIndex == OS_INVALID_TLS_INDEX) + return true; + + // + // Function is re-entrant and this thread may not have been initialized. + // + if (OS_GetTLSValue(ThreadInitializeIndex) != 0) { + if (!OS_SetTLSValue(ThreadInitializeIndex, (void *)0)) { + assert(0 && "DetachThread(): Unable to clear init flag."); + success = false; + } + } + + return success; +} + +// Not necessary to call this: InitProcess() is reentrant. +// +// This is kept, with memory management removed, to satisfy any exiting +// calls to it that rely on it. +// +// Users of glslang should call shFinalize() or glslang::FinalizeProcess() for +// process-scoped memory tear down. +bool DetachProcess() +{ + bool success = true; + + if (ThreadInitializeIndex == OS_INVALID_TLS_INDEX) + return true; + + success = DetachThread(); + + OS_FreeTLSIndex(ThreadInitializeIndex); + ThreadInitializeIndex = OS_INVALID_TLS_INDEX; + + return success; +} + +} // end namespace glslang diff --git a/dep/glslang/OGLCompilersDLL/InitializeDll.h b/dep/glslang/OGLCompilersDLL/InitializeDll.h new file mode 100644 index 000000000..661cee4d2 --- /dev/null +++ b/dep/glslang/OGLCompilersDLL/InitializeDll.h @@ -0,0 +1,49 @@ +// +// Copyright (C) 2002-2005 3Dlabs Inc. Ltd. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// +// Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// +// Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// Neither the name of 3Dlabs Inc. Ltd. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// +#ifndef __INITIALIZEDLL_H +#define __INITIALIZEDLL_H + +#include "../glslang/OSDependent/osinclude.h" + +namespace glslang { + +bool InitProcess(); +bool InitThread(); +bool DetachThread(); // not called from standalone, perhaps other tools rely on parts of it +bool DetachProcess(); // not called from standalone, perhaps other tools rely on parts of it + +} // end namespace glslang + +#endif // __INITIALIZEDLL_H + diff --git a/dep/glslang/README-spirv-remap.txt b/dep/glslang/README-spirv-remap.txt new file mode 100644 index 000000000..3e5288aac --- /dev/null +++ b/dep/glslang/README-spirv-remap.txt @@ -0,0 +1,137 @@ + +VERSION +-------------------------------------------------------------------------------- +spirv-remap 0.97 + +INTRO: +-------------------------------------------------------------------------------- +spirv-remap is a utility to improve compression of SPIR-V binary files via +entropy reduction, plus optional stripping of debug information and +load/store optimization. It transforms SPIR-V to SPIR-V, remapping IDs. The +resulting modules have an increased ID range (IDs are not as tightly packed +around zero), but will compress better when multiple modules are compressed +together, since compressor's dictionary can find better cross module +commonality. + +Remapping is accomplished via canonicalization. Thus, modules can be +compressed one at a time with no loss of quality relative to operating on +many modules at once. The command line tool operates on multiple modules +only in the trivial repetition sense, for ease of use. The remapper API +only accepts a single module at a time. + +There are two modes of use: command line, and a C++11 API. Both are +described below. + +spirv-remap is currently in an alpha state. Although there are no known +remapping defects, it has only been exercised on one real world game shader +workload. + + +FEEDBACK +-------------------------------------------------------------------------------- +Report defects, enhancements requests, code improvements, etc to: + spvremapper@lunarg.com + + +COMMAND LINE USAGE: +-------------------------------------------------------------------------------- +Examples are given with a verbosity of one (-v), but more verbosity can be +had via -vv, -vvv, etc, or an integer parameter to --verbose, such as +"--verbose 4". With no verbosity, the command is silent and returns 0 on +success, and a positive integer error on failure. + +Pre-built binaries for several OSs are available. Examples presented are +for Linux. Command line arguments can be provided in any order. + +1. Basic ID remapping + +Perform ID remapping on all shaders in "*.spv", writing new files with +the same basenames to /tmp/out_dir. + + spirv-remap -v --map all --input *.spv --output /tmp/out_dir + +2. Perform all possible size reductions + + spirv-remap-linux-64 -v --do-everything --input *.spv --output /tmp/out_dir + +Note that --do-everything is a synonym for: + + --map all --dce all --opt all --strip all + +API USAGE: +-------------------------------------------------------------------------------- + +The public interface to the remapper is defined in SPIRV/SPVRemapper.h as follows: + +namespace spv { + +class spirvbin_t +{ +public: + enum Options { ... }; + spirvbin_t(int verbose = 0); // construct + + // remap an existing binary in memory + void remap(std::vector& spv, std::uint32_t opts = DO_EVERYTHING); + + // Type for error/log handler functions + typedef std::function errorfn_t; + typedef std::function logfn_t; + + // Register error/log handling functions (can be c/c++ fn, lambda fn, or functor) + static void registerErrorHandler(errorfn_t handler) { errorHandler = handler; } + static void registerLogHandler(logfn_t handler) { logHandler = handler; } +}; + +} // namespace spv + +The class definition is in SPVRemapper.cpp. + +remap() accepts an std::vector of SPIR-V words, modifies them per the +request given in 'opts', and leaves the 'spv' container with the result. +It is safe to instantiate one spirvbin_t per thread and process a different +SPIR-V in each. + +The "opts" parameter to remap() accepts a bit mask of desired remapping +options. See REMAPPING AND OPTIMIZATION OPTIONS. + +On error, the function supplied to registerErrorHandler() will be invoked. +This can be a standard C/C++ function, a lambda function, or a functor. +The default handler simply calls exit(5); The error handler is a static +member, so need only be set up once, not once per spirvbin_t instance. + +Log messages are supplied to registerLogHandler(). By default, log +messages are eaten silently. The log handler is also a static member. + +BUILD DEPENDENCIES: +-------------------------------------------------------------------------------- + 1. C++11 compatible compiler + 2. cmake + 3. glslang + + +BUILDING +-------------------------------------------------------------------------------- +The standalone remapper is built along side glslangValidator through its +normal build process. + + +REMAPPING AND OPTIMIZATION OPTIONS +-------------------------------------------------------------------------------- +API: + These are bits defined under spv::spirvbin_t::, and can be + bitwise or-ed together as desired. + + MAP_TYPES = canonicalize type IDs + MAP_NAMES = canonicalize named data + MAP_FUNCS = canonicalize function bodies + DCE_FUNCS = remove dead functions + DCE_VARS = remove dead variables + DCE_TYPES = remove dead types + OPT_LOADSTORE = optimize unneeded load/stores + MAP_ALL = (MAP_TYPES | MAP_NAMES | MAP_FUNCS) + DCE_ALL = (DCE_FUNCS | DCE_VARS | DCE_TYPES) + OPT_ALL = (OPT_LOADSTORE) + ALL_BUT_STRIP = (MAP_ALL | DCE_ALL | OPT_ALL) + DO_EVERYTHING = (STRIP | ALL_BUT_STRIP) + diff --git a/dep/glslang/README.md b/dep/glslang/README.md new file mode 100644 index 000000000..ff844c028 --- /dev/null +++ b/dep/glslang/README.md @@ -0,0 +1,442 @@ +# News + +[![Build Status](https://travis-ci.org/KhronosGroup/glslang.svg?branch=master)](https://travis-ci.org/KhronosGroup/glslang) +[![Build status](https://ci.appveyor.com/api/projects/status/q6fi9cb0qnhkla68/branch/master?svg=true)](https://ci.appveyor.com/project/Khronoswebmaster/glslang/branch/master) + +## Planned Deprecations/Removals + +1. **SPIRV Folder, 1-May, 2020.** Glslang, when installed through CMake, +will install a `SPIRV` folder into `${CMAKE_INSTALL_INCLUDEDIR}`. +This `SPIRV` folder is being moved to `glslang/SPIRV`. +During the transition the `SPIRV` folder will be installed into both locations. +The old install of `SPIRV/` will be removed as a CMake install target no sooner than May 1, 2020. +See issue #1964. + +2. **Visual Studio 2013, 20-July, 2020.** Keeping code compiling for MS Visual Studio 2013 will no longer be +a goal as of July 20, 2020, the fifth anniversary of the release of Visual Studio 2015. + +# Glslang Components and Status + +There are several components: + +### Reference Validator and GLSL/ESSL -> AST Front End + +An OpenGL GLSL and OpenGL|ES GLSL (ESSL) front-end for reference validation and translation of GLSL/ESSL into an internal abstract syntax tree (AST). + +**Status**: Virtually complete, with results carrying similar weight as the specifications. + +### HLSL -> AST Front End + +An HLSL front-end for translation of an approximation of HLSL to glslang's AST form. + +**Status**: Partially complete. Semantics are not reference quality and input is not validated. +This is in contrast to the [DXC project](https://github.com/Microsoft/DirectXShaderCompiler), which receives a much larger investment and attempts to have definitive/reference-level semantics. + +See [issue 362](https://github.com/KhronosGroup/glslang/issues/362) and [issue 701](https://github.com/KhronosGroup/glslang/issues/701) for current status. + +### AST -> SPIR-V Back End + +Translates glslang's AST to the Khronos-specified SPIR-V intermediate language. + +**Status**: Virtually complete. + +### Reflector + +An API for getting reflection information from the AST, reflection types/variables/etc. from the HLL source (not the SPIR-V). + +**Status**: There is a large amount of functionality present, but no specification/goal to measure completeness against. It is accurate for the input HLL and AST, but only approximate for what would later be emitted for SPIR-V. + +### Standalone Wrapper + +`glslangValidator` is command-line tool for accessing the functionality above. + +Status: Complete. + +Tasks waiting to be done are documented as GitHub issues. + +## Other References + +Also see the Khronos landing page for glslang as a reference front end: + +https://www.khronos.org/opengles/sdk/tools/Reference-Compiler/ + +The above page, while not kept up to date, includes additional information regarding glslang as a reference validator. + +# How to Use Glslang + +## Execution of Standalone Wrapper + +To use the standalone binary form, execute `glslangValidator`, and it will print +a usage statement. Basic operation is to give it a file containing a shader, +and it will print out warnings/errors and optionally an AST. + +The applied stage-specific rules are based on the file extension: +* `.vert` for a vertex shader +* `.tesc` for a tessellation control shader +* `.tese` for a tessellation evaluation shader +* `.geom` for a geometry shader +* `.frag` for a fragment shader +* `.comp` for a compute shader + +There is also a non-shader extension +* `.conf` for a configuration file of limits, see usage statement for example + +## Building + +Instead of building manually, you can also download the binaries for your +platform directly from the [master-tot release][master-tot-release] on GitHub. +Those binaries are automatically uploaded by the buildbots after successful +testing and they always reflect the current top of the tree of the master +branch. + +### Dependencies + +* A C++11 compiler. + (For MSVS: 2015 is recommended, 2013 is fully supported/tested, and 2010 support is attempted, but not tested.) +* [CMake][cmake]: for generating compilation targets. +* make: _Linux_, ninja is an alternative, if configured. +* [Python 3.x][python]: for executing SPIRV-Tools scripts. (Optional if not using SPIRV-Tools and the 'External' subdirectory does not exist.) +* [bison][bison]: _optional_, but needed when changing the grammar (glslang.y). +* [googletest][googletest]: _optional_, but should use if making any changes to glslang. + +### Build steps + +The following steps assume a Bash shell. On Windows, that could be the Git Bash +shell or some other shell of your choosing. + +#### 1) Check-Out this project + +```bash +cd +git clone https://github.com/KhronosGroup/glslang.git +``` + +#### 2) Check-Out External Projects + +```bash +cd +git clone https://github.com/google/googletest.git External/googletest +``` + +If you want to use googletest with Visual Studio 2013, you also need to check out an older version: + +```bash +# to use googletest with Visual Studio 2013 +cd External/googletest +git checkout 440527a61e1c91188195f7de212c63c77e8f0a45 +cd ../.. +``` + +If you wish to assure that SPIR-V generated from HLSL is legal for Vulkan, +wish to invoke -Os to reduce SPIR-V size from HLSL or GLSL, or wish to run the +integrated test suite, install spirv-tools with this: + +```bash +./update_glslang_sources.py +``` + +#### 3) Configure + +Assume the source directory is `$SOURCE_DIR` and the build directory is +`$BUILD_DIR`. First ensure the build directory exists, then navigate to it: + +```bash +mkdir -p $BUILD_DIR +cd $BUILD_DIR +``` + +For building on Linux: + +```bash +cmake -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX="$(pwd)/install" $SOURCE_DIR +# "Release" (for CMAKE_BUILD_TYPE) could also be "Debug" or "RelWithDebInfo" +``` + +For building on Android: +```bash +cmake $SOURCE_DIR -G "Unix Makefiles" -DCMAKE_INSTALL_PREFIX="$(pwd)/install" -DANDROID_ABI=arm64-v8a -DCMAKE_BUILD_TYPE=Release -DANDROID_STL=c++_static -DANDROID_PLATFORM=android-24 -DCMAKE_SYSTEM_NAME=Android -DANDROID_TOOLCHAIN=clang -DANDROID_ARM_MODE=arm -DCMAKE_MAKE_PROGRAM=$ANDROID_NDK_ROOT/prebuilt/linux-x86_64/bin/make -DCMAKE_TOOLCHAIN_FILE=$ANDROID_NDK_ROOT/build/cmake/android.toolchain.cmake +# If on Windows will be -DCMAKE_MAKE_PROGRAM=%ANDROID_NDK_ROOT%\prebuilt\windows-x86_64\bin\make.exe +# -G is needed for building on Windows +# -DANDROID_ABI can also be armeabi-v7a for 32 bit +``` + +For building on Windows: + +```bash +cmake $SOURCE_DIR -DCMAKE_INSTALL_PREFIX="$(pwd)/install" +# The CMAKE_INSTALL_PREFIX part is for testing (explained later). +``` + +The CMake GUI also works for Windows (version 3.4.1 tested). + +Also, consider using `git config --global core.fileMode false` (or with `--local`) on Windows +to prevent the addition of execution permission on files. + +#### 4) Build and Install + +```bash +# for Linux: +make -j4 install + +# for Windows: +cmake --build . --config Release --target install +# "Release" (for --config) could also be "Debug", "MinSizeRel", or "RelWithDebInfo" +``` + +If using MSVC, after running CMake to configure, use the +Configuration Manager to check the `INSTALL` project. + +### If you need to change the GLSL grammar + +The grammar in `glslang/MachineIndependent/glslang.y` has to be recompiled with +bison if it changes, the output files are committed to the repo to avoid every +developer needing to have bison configured to compile the project when grammar +changes are quite infrequent. For windows you can get binaries from +[GnuWin32][bison-gnu-win32]. + +The command to rebuild is: + +```bash +m4 -P MachineIndependent/glslang.m4 > MachineIndependent/glslang.y +bison --defines=MachineIndependent/glslang_tab.cpp.h + -t MachineIndependent/glslang.y + -o MachineIndependent/glslang_tab.cpp +``` + +The above commands are also available in the bash script in `updateGrammar`, +when executed from the glslang subdirectory of the glslang repository. +With no arguments it builds the full grammar, and with a "web" argument, +the web grammar subset (see more about the web subset in the next section). + +### Building to WASM for the Web and Node +### Building a standalone JS/WASM library for the Web and Node + +Use the steps in [Build Steps](#build-steps), with the following notes/exceptions: +* `emsdk` needs to be present in your executable search path, *PATH* for + Bash-like environments: + + [Instructions located here](https://emscripten.org/docs/getting_started/downloads.html#sdk-download-and-install) +* Wrap cmake call: `emcmake cmake` +* Set `-DBUILD_TESTING=OFF -DENABLE_OPT=OFF -DINSTALL_GTEST=OFF`. +* Set `-DENABLE_HLSL=OFF` if HLSL is not needed. +* For a standalone JS/WASM library, turn on `-DENABLE_GLSLANG_JS=ON`. +* For building a minimum-size web subset of core glslang: + + turn on `-DENABLE_GLSLANG_WEBMIN=ON` (disables HLSL) + + execute `updateGrammar web` from the glslang subdirectory + (or if using your own scripts, `m4` needs a `-DGLSLANG_WEB` argument) + + optionally, for GLSL compilation error messages, turn on + `-DENABLE_GLSLANG_WEBMIN_DEVEL=ON` +* To get a fully minimized build, make sure to use `brotli` to compress the .js + and .wasm files + +Example: + +```sh +emcmake cmake -DCMAKE_BUILD_TYPE=Release -DENABLE_GLSLANG_JS=ON \ + -DENABLE_HLSL=OFF -DBUILD_TESTING=OFF -DENABLE_OPT=OFF -DINSTALL_GTEST=OFF .. +``` + +## Building glslang - Using vcpkg + +You can download and install glslang using the [vcpkg](https://github.com/Microsoft/vcpkg) dependency manager: + + git clone https://github.com/Microsoft/vcpkg.git + cd vcpkg + ./bootstrap-vcpkg.sh + ./vcpkg integrate install + ./vcpkg install glslang + +The glslang port in vcpkg is kept up to date by Microsoft team members and community contributors. If the version is out of date, please [create an issue or pull request](https://github.com/Microsoft/vcpkg) on the vcpkg repository. + +## Testing + +Right now, there are two test harnesses existing in glslang: one is [Google +Test](gtests/), one is the [`runtests` script](Test/runtests). The former +runs unit tests and single-shader single-threaded integration tests, while +the latter runs multiple-shader linking tests and multi-threaded tests. + +### Running tests + +The [`runtests` script](Test/runtests) requires compiled binaries to be +installed into `$BUILD_DIR/install`. Please make sure you have supplied the +correct configuration to CMake (using `-DCMAKE_INSTALL_PREFIX`) when building; +otherwise, you may want to modify the path in the `runtests` script. + +Running Google Test-backed tests: + +```bash +cd $BUILD_DIR + +# for Linux: +ctest + +# for Windows: +ctest -C {Debug|Release|RelWithDebInfo|MinSizeRel} + +# or, run the test binary directly +# (which gives more fine-grained control like filtering): +/glslangtests +``` + +Running `runtests` script-backed tests: + +```bash +cd $SOURCE_DIR/Test && ./runtests +``` + +If some tests fail with validation errors, there may be a mismatch between the +version of `spirv-val` on the system and the version of glslang. In this +case, it is necessary to run `update_glslang_sources.py`. See "Check-Out +External Projects" above for more details. + +### Contributing tests + +Test results should always be included with a pull request that modifies +functionality. + +If you are writing unit tests, please use the Google Test framework and +place the tests under the `gtests/` directory. + +Integration tests are placed in the `Test/` directory. It contains test input +and a subdirectory `baseResults/` that contains the expected results of the +tests. Both the tests and `baseResults/` are under source-code control. + +Google Test runs those integration tests by reading the test input, compiling +them, and then compare against the expected results in `baseResults/`. The +integration tests to run via Google Test is registered in various +`gtests/*.FromFile.cpp` source files. `glslangtests` provides a command-line +option `--update-mode`, which, if supplied, will overwrite the golden files +under the `baseResults/` directory with real output from that invocation. +For more information, please check `gtests/` directory's +[README](gtests/README.md). + +For the `runtests` script, it will generate current results in the +`localResults/` directory and `diff` them against the `baseResults/`. +When you want to update the tracked test results, they need to be +copied from `localResults/` to `baseResults/`. This can be done by +the `bump` shell script. + +You can add your own private list of tests, not tracked publicly, by using +`localtestlist` to list non-tracked tests. This is automatically read +by `runtests` and included in the `diff` and `bump` process. + +## Programmatic Interfaces + +Another piece of software can programmatically translate shaders to an AST +using one of two different interfaces: +* A new C++ class-oriented interface, or +* The original C functional interface + +The `main()` in `StandAlone/StandAlone.cpp` shows examples using both styles. + +### C++ Class Interface (new, preferred) + +This interface is in roughly the last 1/3 of `ShaderLang.h`. It is in the +glslang namespace and contains the following, here with suggested calls +for generating SPIR-V: + +```cxx +const char* GetEsslVersionString(); +const char* GetGlslVersionString(); +bool InitializeProcess(); +void FinalizeProcess(); + +class TShader + setStrings(...); + setEnvInput(EShSourceHlsl or EShSourceGlsl, stage, EShClientVulkan or EShClientOpenGL, 100); + setEnvClient(EShClientVulkan or EShClientOpenGL, EShTargetVulkan_1_0 or EShTargetVulkan_1_1 or EShTargetOpenGL_450); + setEnvTarget(EShTargetSpv, EShTargetSpv_1_0 or EShTargetSpv_1_3); + bool parse(...); + const char* getInfoLog(); + +class TProgram + void addShader(...); + bool link(...); + const char* getInfoLog(); + Reflection queries +``` + +For just validating (not generating code), substitute these calls: + +```cxx + setEnvInput(EShSourceHlsl or EShSourceGlsl, stage, EShClientNone, 0); + setEnvClient(EShClientNone, 0); + setEnvTarget(EShTargetNone, 0); +``` + +See `ShaderLang.h` and the usage of it in `StandAlone/StandAlone.cpp` for more +details. There is a block comment giving more detail above the calls for +`setEnvInput, setEnvClient, and setEnvTarget`. + +### C Functional Interface (original) + +This interface is in roughly the first 2/3 of `ShaderLang.h`, and referred to +as the `Sh*()` interface, as all the entry points start `Sh`. + +The `Sh*()` interface takes a "compiler" call-back object, which it calls after +building call back that is passed the AST and can then execute a back end on it. + +The following is a simplified resulting run-time call stack: + +```c +ShCompile(shader, compiler) -> compiler(AST) -> +``` + +In practice, `ShCompile()` takes shader strings, default version, and +warning/error and other options for controlling compilation. + +## Basic Internal Operation + +* Initial lexical analysis is done by the preprocessor in + `MachineIndependent/Preprocessor`, and then refined by a GLSL scanner + in `MachineIndependent/Scan.cpp`. There is currently no use of flex. + +* Code is parsed using bison on `MachineIndependent/glslang.y` with the + aid of a symbol table and an AST. The symbol table is not passed on to + the back-end; the intermediate representation stands on its own. + The tree is built by the grammar productions, many of which are + offloaded into `ParseHelper.cpp`, and by `Intermediate.cpp`. + +* The intermediate representation is very high-level, and represented + as an in-memory tree. This serves to lose no information from the + original program, and to have efficient transfer of the result from + parsing to the back-end. In the AST, constants are propagated and + folded, and a very small amount of dead code is eliminated. + + To aid linking and reflection, the last top-level branch in the AST + lists all global symbols. + +* The primary algorithm of the back-end compiler is to traverse the + tree (high-level intermediate representation), and create an internal + object code representation. There is an example of how to do this + in `MachineIndependent/intermOut.cpp`. + +* Reduction of the tree to a linear byte-code style low-level intermediate + representation is likely a good way to generate fully optimized code. + +* There is currently some dead old-style linker-type code still lying around. + +* Memory pool: parsing uses types derived from C++ `std` types, using a + custom allocator that puts them in a memory pool. This makes allocation + of individual container/contents just few cycles and deallocation free. + This pool is popped after the AST is made and processed. + + The use is simple: if you are going to call `new`, there are three cases: + + - the object comes from the pool (its base class has the macro + `POOL_ALLOCATOR_NEW_DELETE` in it) and you do not have to call `delete` + + - it is a `TString`, in which case call `NewPoolTString()`, which gets + it from the pool, and there is no corresponding `delete` + + - the object does not come from the pool, and you have to do normal + C++ memory management of what you `new` + +* Features can be protected by version/extension/stage/profile: + See the comment in `glslang/MachineIndependent/Versions.cpp`. + +[cmake]: https://cmake.org/ +[python]: https://www.python.org/ +[bison]: https://www.gnu.org/software/bison/ +[googletest]: https://github.com/google/googletest +[bison-gnu-win32]: http://gnuwin32.sourceforge.net/packages/bison.htm +[master-tot-release]: https://github.com/KhronosGroup/glslang/releases/tag/master-tot diff --git a/dep/glslang/SPIRV/GLSL.ext.AMD.h b/dep/glslang/SPIRV/GLSL.ext.AMD.h new file mode 100644 index 000000000..009d2f1cf --- /dev/null +++ b/dep/glslang/SPIRV/GLSL.ext.AMD.h @@ -0,0 +1,108 @@ +/* +** Copyright (c) 2014-2016 The Khronos Group Inc. +** +** Permission is hereby granted, free of charge, to any person obtaining a copy +** of this software and/or associated documentation files (the "Materials"), +** to deal in the Materials without restriction, including without limitation +** the rights to use, copy, modify, merge, publish, distribute, sublicense, +** and/or sell copies of the Materials, and to permit persons to whom the +** Materials are furnished to do so, subject to the following conditions: +** +** The above copyright notice and this permission notice shall be included in +** all copies or substantial portions of the Materials. +** +** MODIFICATIONS TO THIS FILE MAY MEAN IT NO LONGER ACCURATELY REFLECTS KHRONOS +** STANDARDS. THE UNMODIFIED, NORMATIVE VERSIONS OF KHRONOS SPECIFICATIONS AND +** HEADER INFORMATION ARE LOCATED AT https://www.khronos.org/registry/ +** +** THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +** OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +** THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +** FROM,OUT OF OR IN CONNECTION WITH THE MATERIALS OR THE USE OR OTHER DEALINGS +** IN THE MATERIALS. +*/ + +#ifndef GLSLextAMD_H +#define GLSLextAMD_H + +static const int GLSLextAMDVersion = 100; +static const int GLSLextAMDRevision = 7; + +// SPV_AMD_shader_ballot +static const char* const E_SPV_AMD_shader_ballot = "SPV_AMD_shader_ballot"; + +enum ShaderBallotAMD { + ShaderBallotBadAMD = 0, // Don't use + + SwizzleInvocationsAMD = 1, + SwizzleInvocationsMaskedAMD = 2, + WriteInvocationAMD = 3, + MbcntAMD = 4, + + ShaderBallotCountAMD +}; + +// SPV_AMD_shader_trinary_minmax +static const char* const E_SPV_AMD_shader_trinary_minmax = "SPV_AMD_shader_trinary_minmax"; + +enum ShaderTrinaryMinMaxAMD { + ShaderTrinaryMinMaxBadAMD = 0, // Don't use + + FMin3AMD = 1, + UMin3AMD = 2, + SMin3AMD = 3, + FMax3AMD = 4, + UMax3AMD = 5, + SMax3AMD = 6, + FMid3AMD = 7, + UMid3AMD = 8, + SMid3AMD = 9, + + ShaderTrinaryMinMaxCountAMD +}; + +// SPV_AMD_shader_explicit_vertex_parameter +static const char* const E_SPV_AMD_shader_explicit_vertex_parameter = "SPV_AMD_shader_explicit_vertex_parameter"; + +enum ShaderExplicitVertexParameterAMD { + ShaderExplicitVertexParameterBadAMD = 0, // Don't use + + InterpolateAtVertexAMD = 1, + + ShaderExplicitVertexParameterCountAMD +}; + +// SPV_AMD_gcn_shader +static const char* const E_SPV_AMD_gcn_shader = "SPV_AMD_gcn_shader"; + +enum GcnShaderAMD { + GcnShaderBadAMD = 0, // Don't use + + CubeFaceIndexAMD = 1, + CubeFaceCoordAMD = 2, + TimeAMD = 3, + + GcnShaderCountAMD +}; + +// SPV_AMD_gpu_shader_half_float +static const char* const E_SPV_AMD_gpu_shader_half_float = "SPV_AMD_gpu_shader_half_float"; + +// SPV_AMD_texture_gather_bias_lod +static const char* const E_SPV_AMD_texture_gather_bias_lod = "SPV_AMD_texture_gather_bias_lod"; + +// SPV_AMD_gpu_shader_int16 +static const char* const E_SPV_AMD_gpu_shader_int16 = "SPV_AMD_gpu_shader_int16"; + +// SPV_AMD_shader_image_load_store_lod +static const char* const E_SPV_AMD_shader_image_load_store_lod = "SPV_AMD_shader_image_load_store_lod"; + +// SPV_AMD_shader_fragment_mask +static const char* const E_SPV_AMD_shader_fragment_mask = "SPV_AMD_shader_fragment_mask"; + +// SPV_AMD_gpu_shader_half_float_fetch +static const char* const E_SPV_AMD_gpu_shader_half_float_fetch = "SPV_AMD_gpu_shader_half_float_fetch"; + +#endif // #ifndef GLSLextAMD_H diff --git a/dep/glslang/SPIRV/GLSL.ext.EXT.h b/dep/glslang/SPIRV/GLSL.ext.EXT.h new file mode 100644 index 000000000..40164b618 --- /dev/null +++ b/dep/glslang/SPIRV/GLSL.ext.EXT.h @@ -0,0 +1,39 @@ +/* +** Copyright (c) 2014-2016 The Khronos Group Inc. +** +** Permission is hereby granted, free of charge, to any person obtaining a copy +** of this software and/or associated documentation files (the "Materials"), +** to deal in the Materials without restriction, including without limitation +** the rights to use, copy, modify, merge, publish, distribute, sublicense, +** and/or sell copies of the Materials, and to permit persons to whom the +** Materials are furnished to do so, subject to the following conditions: +** +** The above copyright notice and this permission notice shall be included in +** all copies or substantial portions of the Materials. +** +** MODIFICATIONS TO THIS FILE MAY MEAN IT NO LONGER ACCURATELY REFLECTS KHRONOS +** STANDARDS. THE UNMODIFIED, NORMATIVE VERSIONS OF KHRONOS SPECIFICATIONS AND +** HEADER INFORMATION ARE LOCATED AT https://www.khronos.org/registry/ +** +** THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +** OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +** THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +** FROM,OUT OF OR IN CONNECTION WITH THE MATERIALS OR THE USE OR OTHER DEALINGS +** IN THE MATERIALS. +*/ + +#ifndef GLSLextEXT_H +#define GLSLextEXT_H + +static const int GLSLextEXTVersion = 100; +static const int GLSLextEXTRevision = 2; + +static const char* const E_SPV_EXT_shader_stencil_export = "SPV_EXT_shader_stencil_export"; +static const char* const E_SPV_EXT_shader_viewport_index_layer = "SPV_EXT_shader_viewport_index_layer"; +static const char* const E_SPV_EXT_fragment_fully_covered = "SPV_EXT_fragment_fully_covered"; +static const char* const E_SPV_EXT_fragment_invocation_density = "SPV_EXT_fragment_invocation_density"; +static const char* const E_SPV_EXT_demote_to_helper_invocation = "SPV_EXT_demote_to_helper_invocation"; + +#endif // #ifndef GLSLextEXT_H diff --git a/dep/glslang/SPIRV/GLSL.ext.KHR.h b/dep/glslang/SPIRV/GLSL.ext.KHR.h new file mode 100644 index 000000000..d783a8f29 --- /dev/null +++ b/dep/glslang/SPIRV/GLSL.ext.KHR.h @@ -0,0 +1,51 @@ +/* +** Copyright (c) 2014-2020 The Khronos Group Inc. +** Modifications Copyright (C) 2020 Advanced Micro Devices, Inc. All rights reserved. +** +** Permission is hereby granted, free of charge, to any person obtaining a copy +** of this software and/or associated documentation files (the "Materials"), +** to deal in the Materials without restriction, including without limitation +** the rights to use, copy, modify, merge, publish, distribute, sublicense, +** and/or sell copies of the Materials, and to permit persons to whom the +** Materials are furnished to do so, subject to the following conditions: +** +** The above copyright notice and this permission notice shall be included in +** all copies or substantial portions of the Materials. +** +** MODIFICATIONS TO THIS FILE MAY MEAN IT NO LONGER ACCURATELY REFLECTS KHRONOS +** STANDARDS. THE UNMODIFIED, NORMATIVE VERSIONS OF KHRONOS SPECIFICATIONS AND +** HEADER INFORMATION ARE LOCATED AT https://www.khronos.org/registry/ +** +** THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +** OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +** THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +** FROM,OUT OF OR IN CONNECTION WITH THE MATERIALS OR THE USE OR OTHER DEALINGS +** IN THE MATERIALS. +*/ + +#ifndef GLSLextKHR_H +#define GLSLextKHR_H + +static const int GLSLextKHRVersion = 100; +static const int GLSLextKHRRevision = 2; + +static const char* const E_SPV_KHR_shader_ballot = "SPV_KHR_shader_ballot"; +static const char* const E_SPV_KHR_subgroup_vote = "SPV_KHR_subgroup_vote"; +static const char* const E_SPV_KHR_device_group = "SPV_KHR_device_group"; +static const char* const E_SPV_KHR_multiview = "SPV_KHR_multiview"; +static const char* const E_SPV_KHR_shader_draw_parameters = "SPV_KHR_shader_draw_parameters"; +static const char* const E_SPV_KHR_16bit_storage = "SPV_KHR_16bit_storage"; +static const char* const E_SPV_KHR_8bit_storage = "SPV_KHR_8bit_storage"; +static const char* const E_SPV_KHR_storage_buffer_storage_class = "SPV_KHR_storage_buffer_storage_class"; +static const char* const E_SPV_KHR_post_depth_coverage = "SPV_KHR_post_depth_coverage"; +static const char* const E_SPV_KHR_vulkan_memory_model = "SPV_KHR_vulkan_memory_model"; +static const char* const E_SPV_EXT_physical_storage_buffer = "SPV_EXT_physical_storage_buffer"; +static const char* const E_SPV_KHR_physical_storage_buffer = "SPV_KHR_physical_storage_buffer"; +static const char* const E_SPV_EXT_fragment_shader_interlock = "SPV_EXT_fragment_shader_interlock"; +static const char* const E_SPV_KHR_shader_clock = "SPV_KHR_shader_clock"; +static const char* const E_SPV_KHR_non_semantic_info = "SPV_KHR_non_semantic_info"; +static const char* const E_SPV_KHR_ray_tracing = "SPV_KHR_ray_tracing"; +static const char* const E_SPV_KHR_ray_query = "SPV_KHR_ray_query"; +#endif // #ifndef GLSLextKHR_H diff --git a/dep/glslang/SPIRV/GLSL.ext.NV.h b/dep/glslang/SPIRV/GLSL.ext.NV.h new file mode 100644 index 000000000..50146da10 --- /dev/null +++ b/dep/glslang/SPIRV/GLSL.ext.NV.h @@ -0,0 +1,81 @@ +/* +** Copyright (c) 2014-2017 The Khronos Group Inc. +** +** Permission is hereby granted, free of charge, to any person obtaining a copy +** of this software and/or associated documentation files (the "Materials"), +** to deal in the Materials without restriction, including without limitation +** the rights to use, copy, modify, merge, publish, distribute, sublicense, +** and/or sell copies of the Materials, and to permit persons to whom the +** Materials are furnished to do so, subject to the following conditions: +** +** The above copyright notice and this permission notice shall be included in +** all copies or substantial portions of the Materials. +** +** MODIFICATIONS TO THIS FILE MAY MEAN IT NO LONGER ACCURATELY REFLECTS KHRONOS +** STANDARDS. THE UNMODIFIED, NORMATIVE VERSIONS OF KHRONOS SPECIFICATIONS AND +** HEADER INFORMATION ARE LOCATED AT https://www.khronos.org/registry/ +** +** THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +** OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +** THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +** FROM,OUT OF OR IN CONNECTION WITH THE MATERIALS OR THE USE OR OTHER DEALINGS +** IN THE MATERIALS. +*/ + +#ifndef GLSLextNV_H +#define GLSLextNV_H + +enum BuiltIn; +enum Decoration; +enum Op; +enum Capability; + +static const int GLSLextNVVersion = 100; +static const int GLSLextNVRevision = 11; + +//SPV_NV_sample_mask_override_coverage +const char* const E_SPV_NV_sample_mask_override_coverage = "SPV_NV_sample_mask_override_coverage"; + +//SPV_NV_geometry_shader_passthrough +const char* const E_SPV_NV_geometry_shader_passthrough = "SPV_NV_geometry_shader_passthrough"; + +//SPV_NV_viewport_array2 +const char* const E_SPV_NV_viewport_array2 = "SPV_NV_viewport_array2"; +const char* const E_ARB_shader_viewport_layer_array = "SPV_ARB_shader_viewport_layer_array"; + +//SPV_NV_stereo_view_rendering +const char* const E_SPV_NV_stereo_view_rendering = "SPV_NV_stereo_view_rendering"; + +//SPV_NVX_multiview_per_view_attributes +const char* const E_SPV_NVX_multiview_per_view_attributes = "SPV_NVX_multiview_per_view_attributes"; + +//SPV_NV_shader_subgroup_partitioned +const char* const E_SPV_NV_shader_subgroup_partitioned = "SPV_NV_shader_subgroup_partitioned"; + +//SPV_NV_fragment_shader_barycentric +const char* const E_SPV_NV_fragment_shader_barycentric = "SPV_NV_fragment_shader_barycentric"; + +//SPV_NV_compute_shader_derivatives +const char* const E_SPV_NV_compute_shader_derivatives = "SPV_NV_compute_shader_derivatives"; + +//SPV_NV_shader_image_footprint +const char* const E_SPV_NV_shader_image_footprint = "SPV_NV_shader_image_footprint"; + +//SPV_NV_mesh_shader +const char* const E_SPV_NV_mesh_shader = "SPV_NV_mesh_shader"; + +//SPV_NV_raytracing +const char* const E_SPV_NV_ray_tracing = "SPV_NV_ray_tracing"; + +//SPV_NV_shading_rate +const char* const E_SPV_NV_shading_rate = "SPV_NV_shading_rate"; + +//SPV_NV_cooperative_matrix +const char* const E_SPV_NV_cooperative_matrix = "SPV_NV_cooperative_matrix"; + +//SPV_NV_shader_sm_builtins +const char* const E_SPV_NV_shader_sm_builtins = "SPV_NV_shader_sm_builtins"; + +#endif // #ifndef GLSLextNV_H diff --git a/dep/glslang/SPIRV/GLSL.std.450.h b/dep/glslang/SPIRV/GLSL.std.450.h new file mode 100644 index 000000000..df31092be --- /dev/null +++ b/dep/glslang/SPIRV/GLSL.std.450.h @@ -0,0 +1,131 @@ +/* +** Copyright (c) 2014-2016 The Khronos Group Inc. +** +** Permission is hereby granted, free of charge, to any person obtaining a copy +** of this software and/or associated documentation files (the "Materials"), +** to deal in the Materials without restriction, including without limitation +** the rights to use, copy, modify, merge, publish, distribute, sublicense, +** and/or sell copies of the Materials, and to permit persons to whom the +** Materials are furnished to do so, subject to the following conditions: +** +** The above copyright notice and this permission notice shall be included in +** all copies or substantial portions of the Materials. +** +** MODIFICATIONS TO THIS FILE MAY MEAN IT NO LONGER ACCURATELY REFLECTS KHRONOS +** STANDARDS. THE UNMODIFIED, NORMATIVE VERSIONS OF KHRONOS SPECIFICATIONS AND +** HEADER INFORMATION ARE LOCATED AT https://www.khronos.org/registry/ +** +** THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +** OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +** THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +** FROM,OUT OF OR IN CONNECTION WITH THE MATERIALS OR THE USE OR OTHER DEALINGS +** IN THE MATERIALS. +*/ + +#ifndef GLSLstd450_H +#define GLSLstd450_H + +static const int GLSLstd450Version = 100; +static const int GLSLstd450Revision = 1; + +enum GLSLstd450 { + GLSLstd450Bad = 0, // Don't use + + GLSLstd450Round = 1, + GLSLstd450RoundEven = 2, + GLSLstd450Trunc = 3, + GLSLstd450FAbs = 4, + GLSLstd450SAbs = 5, + GLSLstd450FSign = 6, + GLSLstd450SSign = 7, + GLSLstd450Floor = 8, + GLSLstd450Ceil = 9, + GLSLstd450Fract = 10, + + GLSLstd450Radians = 11, + GLSLstd450Degrees = 12, + GLSLstd450Sin = 13, + GLSLstd450Cos = 14, + GLSLstd450Tan = 15, + GLSLstd450Asin = 16, + GLSLstd450Acos = 17, + GLSLstd450Atan = 18, + GLSLstd450Sinh = 19, + GLSLstd450Cosh = 20, + GLSLstd450Tanh = 21, + GLSLstd450Asinh = 22, + GLSLstd450Acosh = 23, + GLSLstd450Atanh = 24, + GLSLstd450Atan2 = 25, + + GLSLstd450Pow = 26, + GLSLstd450Exp = 27, + GLSLstd450Log = 28, + GLSLstd450Exp2 = 29, + GLSLstd450Log2 = 30, + GLSLstd450Sqrt = 31, + GLSLstd450InverseSqrt = 32, + + GLSLstd450Determinant = 33, + GLSLstd450MatrixInverse = 34, + + GLSLstd450Modf = 35, // second operand needs an OpVariable to write to + GLSLstd450ModfStruct = 36, // no OpVariable operand + GLSLstd450FMin = 37, + GLSLstd450UMin = 38, + GLSLstd450SMin = 39, + GLSLstd450FMax = 40, + GLSLstd450UMax = 41, + GLSLstd450SMax = 42, + GLSLstd450FClamp = 43, + GLSLstd450UClamp = 44, + GLSLstd450SClamp = 45, + GLSLstd450FMix = 46, + GLSLstd450IMix = 47, // Reserved + GLSLstd450Step = 48, + GLSLstd450SmoothStep = 49, + + GLSLstd450Fma = 50, + GLSLstd450Frexp = 51, // second operand needs an OpVariable to write to + GLSLstd450FrexpStruct = 52, // no OpVariable operand + GLSLstd450Ldexp = 53, + + GLSLstd450PackSnorm4x8 = 54, + GLSLstd450PackUnorm4x8 = 55, + GLSLstd450PackSnorm2x16 = 56, + GLSLstd450PackUnorm2x16 = 57, + GLSLstd450PackHalf2x16 = 58, + GLSLstd450PackDouble2x32 = 59, + GLSLstd450UnpackSnorm2x16 = 60, + GLSLstd450UnpackUnorm2x16 = 61, + GLSLstd450UnpackHalf2x16 = 62, + GLSLstd450UnpackSnorm4x8 = 63, + GLSLstd450UnpackUnorm4x8 = 64, + GLSLstd450UnpackDouble2x32 = 65, + + GLSLstd450Length = 66, + GLSLstd450Distance = 67, + GLSLstd450Cross = 68, + GLSLstd450Normalize = 69, + GLSLstd450FaceForward = 70, + GLSLstd450Reflect = 71, + GLSLstd450Refract = 72, + + GLSLstd450FindILsb = 73, + GLSLstd450FindSMsb = 74, + GLSLstd450FindUMsb = 75, + + GLSLstd450InterpolateAtCentroid = 76, + GLSLstd450InterpolateAtSample = 77, + GLSLstd450InterpolateAtOffset = 78, + + GLSLstd450NMin = 79, + GLSLstd450NMax = 80, + GLSLstd450NClamp = 81, + + GLSLstd450Count +}; + +#endif // #ifndef GLSLstd450_H diff --git a/dep/glslang/SPIRV/GlslangToSpv.cpp b/dep/glslang/SPIRV/GlslangToSpv.cpp new file mode 100644 index 000000000..e41bb5de6 --- /dev/null +++ b/dep/glslang/SPIRV/GlslangToSpv.cpp @@ -0,0 +1,8727 @@ +// +// Copyright (C) 2014-2016 LunarG, Inc. +// Copyright (C) 2015-2020 Google, Inc. +// Copyright (C) 2017 ARM Limited. +// Modifications Copyright (C) 2020 Advanced Micro Devices, Inc. All rights reserved. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// +// Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// +// Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// Neither the name of 3Dlabs Inc. Ltd. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. + +// +// Visit the nodes in the glslang intermediate tree representation to +// translate them to SPIR-V. +// + +#include "spirv.hpp" +#include "GlslangToSpv.h" +#include "SpvBuilder.h" +namespace spv { + #include "GLSL.std.450.h" + #include "GLSL.ext.KHR.h" + #include "GLSL.ext.EXT.h" + #include "GLSL.ext.AMD.h" + #include "GLSL.ext.NV.h" + #include "NonSemanticDebugPrintf.h" +} + +// Glslang includes +#include "../glslang/MachineIndependent/localintermediate.h" +#include "../glslang/MachineIndependent/SymbolTable.h" +#include "../glslang/Include/Common.h" +#include "../glslang/Include/revision.h" + +#include +#include +#include +#include +#include +#include +#include + +namespace { + +namespace { +class SpecConstantOpModeGuard { +public: + SpecConstantOpModeGuard(spv::Builder* builder) + : builder_(builder) { + previous_flag_ = builder->isInSpecConstCodeGenMode(); + } + ~SpecConstantOpModeGuard() { + previous_flag_ ? builder_->setToSpecConstCodeGenMode() + : builder_->setToNormalCodeGenMode(); + } + void turnOnSpecConstantOpMode() { + builder_->setToSpecConstCodeGenMode(); + } + +private: + spv::Builder* builder_; + bool previous_flag_; +}; + +struct OpDecorations { + public: + OpDecorations(spv::Decoration precision, spv::Decoration noContraction, spv::Decoration nonUniform) : + precision(precision) +#ifndef GLSLANG_WEB + , + noContraction(noContraction), + nonUniform(nonUniform) +#endif + { } + + spv::Decoration precision; + +#ifdef GLSLANG_WEB + void addNoContraction(spv::Builder&, spv::Id) const { } + void addNonUniform(spv::Builder&, spv::Id) const { } +#else + void addNoContraction(spv::Builder& builder, spv::Id t) { builder.addDecoration(t, noContraction); } + void addNonUniform(spv::Builder& builder, spv::Id t) { builder.addDecoration(t, nonUniform); } + protected: + spv::Decoration noContraction; + spv::Decoration nonUniform; +#endif + +}; + +} // namespace + +// +// The main holder of information for translating glslang to SPIR-V. +// +// Derives from the AST walking base class. +// +class TGlslangToSpvTraverser : public glslang::TIntermTraverser { +public: + TGlslangToSpvTraverser(unsigned int spvVersion, const glslang::TIntermediate*, spv::SpvBuildLogger* logger, + glslang::SpvOptions& options); + virtual ~TGlslangToSpvTraverser() { } + + bool visitAggregate(glslang::TVisit, glslang::TIntermAggregate*); + bool visitBinary(glslang::TVisit, glslang::TIntermBinary*); + void visitConstantUnion(glslang::TIntermConstantUnion*); + bool visitSelection(glslang::TVisit, glslang::TIntermSelection*); + bool visitSwitch(glslang::TVisit, glslang::TIntermSwitch*); + void visitSymbol(glslang::TIntermSymbol* symbol); + bool visitUnary(glslang::TVisit, glslang::TIntermUnary*); + bool visitLoop(glslang::TVisit, glslang::TIntermLoop*); + bool visitBranch(glslang::TVisit visit, glslang::TIntermBranch*); + + void finishSpv(); + void dumpSpv(std::vector& out); + +protected: + TGlslangToSpvTraverser(TGlslangToSpvTraverser&); + TGlslangToSpvTraverser& operator=(TGlslangToSpvTraverser&); + + spv::Decoration TranslateInterpolationDecoration(const glslang::TQualifier& qualifier); + spv::Decoration TranslateAuxiliaryStorageDecoration(const glslang::TQualifier& qualifier); + spv::Decoration TranslateNonUniformDecoration(const glslang::TQualifier& qualifier); + spv::Builder::AccessChain::CoherentFlags TranslateCoherent(const glslang::TType& type); + spv::MemoryAccessMask TranslateMemoryAccess(const spv::Builder::AccessChain::CoherentFlags &coherentFlags); + spv::ImageOperandsMask TranslateImageOperands(const spv::Builder::AccessChain::CoherentFlags &coherentFlags); + spv::Scope TranslateMemoryScope(const spv::Builder::AccessChain::CoherentFlags &coherentFlags); + spv::BuiltIn TranslateBuiltInDecoration(glslang::TBuiltInVariable, bool memberDeclaration); + spv::ImageFormat TranslateImageFormat(const glslang::TType& type); + spv::SelectionControlMask TranslateSelectionControl(const glslang::TIntermSelection&) const; + spv::SelectionControlMask TranslateSwitchControl(const glslang::TIntermSwitch&) const; + spv::LoopControlMask TranslateLoopControl(const glslang::TIntermLoop&, std::vector& operands) const; + spv::StorageClass TranslateStorageClass(const glslang::TType&); + void addIndirectionIndexCapabilities(const glslang::TType& baseType, const glslang::TType& indexType); + spv::Id createSpvVariable(const glslang::TIntermSymbol*, spv::Id forcedType); + spv::Id getSampledType(const glslang::TSampler&); + spv::Id getInvertedSwizzleType(const glslang::TIntermTyped&); + spv::Id createInvertedSwizzle(spv::Decoration precision, const glslang::TIntermTyped&, spv::Id parentResult); + void convertSwizzle(const glslang::TIntermAggregate&, std::vector& swizzle); + spv::Id convertGlslangToSpvType(const glslang::TType& type, bool forwardReferenceOnly = false); + spv::Id convertGlslangToSpvType(const glslang::TType& type, glslang::TLayoutPacking, const glslang::TQualifier&, + bool lastBufferBlockMember, bool forwardReferenceOnly = false); + bool filterMember(const glslang::TType& member); + spv::Id convertGlslangStructToSpvType(const glslang::TType&, const glslang::TTypeList* glslangStruct, + glslang::TLayoutPacking, const glslang::TQualifier&); + void decorateStructType(const glslang::TType&, const glslang::TTypeList* glslangStruct, glslang::TLayoutPacking, + const glslang::TQualifier&, spv::Id); + spv::Id makeArraySizeId(const glslang::TArraySizes&, int dim); + spv::Id accessChainLoad(const glslang::TType& type); + void accessChainStore(const glslang::TType& type, spv::Id rvalue); + void multiTypeStore(const glslang::TType&, spv::Id rValue); + glslang::TLayoutPacking getExplicitLayout(const glslang::TType& type) const; + int getArrayStride(const glslang::TType& arrayType, glslang::TLayoutPacking, glslang::TLayoutMatrix); + int getMatrixStride(const glslang::TType& matrixType, glslang::TLayoutPacking, glslang::TLayoutMatrix); + void updateMemberOffset(const glslang::TType& structType, const glslang::TType& memberType, int& currentOffset, + int& nextOffset, glslang::TLayoutPacking, glslang::TLayoutMatrix); + void declareUseOfStructMember(const glslang::TTypeList& members, int glslangMember); + + bool isShaderEntryPoint(const glslang::TIntermAggregate* node); + bool writableParam(glslang::TStorageQualifier) const; + bool originalParam(glslang::TStorageQualifier, const glslang::TType&, bool implicitThisParam); + void makeFunctions(const glslang::TIntermSequence&); + void makeGlobalInitializers(const glslang::TIntermSequence&); + void visitFunctions(const glslang::TIntermSequence&); + void handleFunctionEntry(const glslang::TIntermAggregate* node); + void translateArguments(const glslang::TIntermAggregate& node, std::vector& arguments, + spv::Builder::AccessChain::CoherentFlags &lvalueCoherentFlags); + void translateArguments(glslang::TIntermUnary& node, std::vector& arguments); + spv::Id createImageTextureFunctionCall(glslang::TIntermOperator* node); + spv::Id handleUserFunctionCall(const glslang::TIntermAggregate*); + + spv::Id createBinaryOperation(glslang::TOperator op, OpDecorations&, spv::Id typeId, spv::Id left, spv::Id right, + glslang::TBasicType typeProxy, bool reduceComparison = true); + spv::Id createBinaryMatrixOperation(spv::Op, OpDecorations&, spv::Id typeId, spv::Id left, spv::Id right); + spv::Id createUnaryOperation(glslang::TOperator op, OpDecorations&, spv::Id typeId, spv::Id operand, + glslang::TBasicType typeProxy, + const spv::Builder::AccessChain::CoherentFlags &lvalueCoherentFlags); + spv::Id createUnaryMatrixOperation(spv::Op op, OpDecorations&, spv::Id typeId, spv::Id operand, + glslang::TBasicType typeProxy); + spv::Id createConversion(glslang::TOperator op, OpDecorations&, spv::Id destTypeId, spv::Id operand, + glslang::TBasicType typeProxy); + spv::Id createIntWidthConversion(glslang::TOperator op, spv::Id operand, int vectorSize); + spv::Id makeSmearedConstant(spv::Id constant, int vectorSize); + spv::Id createAtomicOperation(glslang::TOperator op, spv::Decoration precision, spv::Id typeId, + std::vector& operands, glslang::TBasicType typeProxy, + const spv::Builder::AccessChain::CoherentFlags &lvalueCoherentFlags); + spv::Id createInvocationsOperation(glslang::TOperator op, spv::Id typeId, std::vector& operands, + glslang::TBasicType typeProxy); + spv::Id CreateInvocationsVectorOperation(spv::Op op, spv::GroupOperation groupOperation, + spv::Id typeId, std::vector& operands); + spv::Id createSubgroupOperation(glslang::TOperator op, spv::Id typeId, std::vector& operands, + glslang::TBasicType typeProxy); + spv::Id createMiscOperation(glslang::TOperator op, spv::Decoration precision, spv::Id typeId, + std::vector& operands, glslang::TBasicType typeProxy); + spv::Id createNoArgOperation(glslang::TOperator op, spv::Decoration precision, spv::Id typeId); + spv::Id getSymbolId(const glslang::TIntermSymbol* node); + void addMeshNVDecoration(spv::Id id, int member, const glslang::TQualifier & qualifier); + spv::Id createSpvConstant(const glslang::TIntermTyped&); + spv::Id createSpvConstantFromConstUnionArray(const glslang::TType& type, const glslang::TConstUnionArray&, + int& nextConst, bool specConstant); + bool isTrivialLeaf(const glslang::TIntermTyped* node); + bool isTrivial(const glslang::TIntermTyped* node); + spv::Id createShortCircuit(glslang::TOperator, glslang::TIntermTyped& left, glslang::TIntermTyped& right); + spv::Id getExtBuiltins(const char* name); + std::pair getForcedType(glslang::TBuiltInVariable builtIn, const glslang::TType&); + spv::Id translateForcedType(spv::Id object); + spv::Id createCompositeConstruct(spv::Id typeId, std::vector constituents); + + glslang::SpvOptions& options; + spv::Function* shaderEntry; + spv::Function* currentFunction; + spv::Instruction* entryPoint; + int sequenceDepth; + + spv::SpvBuildLogger* logger; + + // There is a 1:1 mapping between a spv builder and a module; this is thread safe + spv::Builder builder; + bool inEntryPoint; + bool entryPointTerminated; + bool linkageOnly; // true when visiting the set of objects in the AST present only for + // establishing interface, whether or not they were statically used + std::set iOSet; // all input/output variables from either static use or declaration of interface + const glslang::TIntermediate* glslangIntermediate; + bool nanMinMaxClamp; // true if use NMin/NMax/NClamp instead of FMin/FMax/FClamp + spv::Id stdBuiltins; + spv::Id nonSemanticDebugPrintf; + std::unordered_map extBuiltinMap; + + std::unordered_map symbolValues; + std::unordered_set rValueParameters; // set of formal function parameters passed as rValues, + // rather than a pointer + std::unordered_map functionMap; + std::unordered_map structMap[glslang::ElpCount][glslang::ElmCount]; + // for mapping glslang block indices to spv indices (e.g., due to hidden members): + std::unordered_map> memberRemapper; + // for mapping glslang symbol struct to symbol Id + std::unordered_map glslangTypeToIdMap; + std::stack breakForLoop; // false means break for switch + std::unordered_map counterOriginator; + // Map pointee types for EbtReference to their forward pointers + std::map forwardPointers; + // Type forcing, for when SPIR-V wants a different type than the AST, + // requiring local translation to and from SPIR-V type on every access. + // Maps AST-required-type-id> + std::unordered_map forceType; +}; + +// +// Helper functions for translating glslang representations to SPIR-V enumerants. +// + +// Translate glslang profile to SPIR-V source language. +spv::SourceLanguage TranslateSourceLanguage(glslang::EShSource source, EProfile profile) +{ +#ifdef GLSLANG_WEB + return spv::SourceLanguageESSL; +#endif + + switch (source) { + case glslang::EShSourceGlsl: + switch (profile) { + case ENoProfile: + case ECoreProfile: + case ECompatibilityProfile: + return spv::SourceLanguageGLSL; + case EEsProfile: + return spv::SourceLanguageESSL; + default: + return spv::SourceLanguageUnknown; + } + case glslang::EShSourceHlsl: + return spv::SourceLanguageHLSL; + default: + return spv::SourceLanguageUnknown; + } +} + +// Translate glslang language (stage) to SPIR-V execution model. +spv::ExecutionModel TranslateExecutionModel(EShLanguage stage) +{ + switch (stage) { + case EShLangVertex: return spv::ExecutionModelVertex; + case EShLangFragment: return spv::ExecutionModelFragment; + case EShLangCompute: return spv::ExecutionModelGLCompute; +#ifndef GLSLANG_WEB + case EShLangTessControl: return spv::ExecutionModelTessellationControl; + case EShLangTessEvaluation: return spv::ExecutionModelTessellationEvaluation; + case EShLangGeometry: return spv::ExecutionModelGeometry; + case EShLangRayGen: return spv::ExecutionModelRayGenerationKHR; + case EShLangIntersect: return spv::ExecutionModelIntersectionKHR; + case EShLangAnyHit: return spv::ExecutionModelAnyHitKHR; + case EShLangClosestHit: return spv::ExecutionModelClosestHitKHR; + case EShLangMiss: return spv::ExecutionModelMissKHR; + case EShLangCallable: return spv::ExecutionModelCallableKHR; + case EShLangTaskNV: return spv::ExecutionModelTaskNV; + case EShLangMeshNV: return spv::ExecutionModelMeshNV; +#endif + default: + assert(0); + return spv::ExecutionModelFragment; + } +} + +// Translate glslang sampler type to SPIR-V dimensionality. +spv::Dim TranslateDimensionality(const glslang::TSampler& sampler) +{ + switch (sampler.dim) { + case glslang::Esd1D: return spv::Dim1D; + case glslang::Esd2D: return spv::Dim2D; + case glslang::Esd3D: return spv::Dim3D; + case glslang::EsdCube: return spv::DimCube; + case glslang::EsdRect: return spv::DimRect; + case glslang::EsdBuffer: return spv::DimBuffer; + case glslang::EsdSubpass: return spv::DimSubpassData; + default: + assert(0); + return spv::Dim2D; + } +} + +// Translate glslang precision to SPIR-V precision decorations. +spv::Decoration TranslatePrecisionDecoration(glslang::TPrecisionQualifier glslangPrecision) +{ + switch (glslangPrecision) { + case glslang::EpqLow: return spv::DecorationRelaxedPrecision; + case glslang::EpqMedium: return spv::DecorationRelaxedPrecision; + default: + return spv::NoPrecision; + } +} + +// Translate glslang type to SPIR-V precision decorations. +spv::Decoration TranslatePrecisionDecoration(const glslang::TType& type) +{ + return TranslatePrecisionDecoration(type.getQualifier().precision); +} + +// Translate glslang type to SPIR-V block decorations. +spv::Decoration TranslateBlockDecoration(const glslang::TType& type, bool useStorageBuffer) +{ + if (type.getBasicType() == glslang::EbtBlock) { + switch (type.getQualifier().storage) { + case glslang::EvqUniform: return spv::DecorationBlock; + case glslang::EvqBuffer: return useStorageBuffer ? spv::DecorationBlock : spv::DecorationBufferBlock; + case glslang::EvqVaryingIn: return spv::DecorationBlock; + case glslang::EvqVaryingOut: return spv::DecorationBlock; +#ifndef GLSLANG_WEB + case glslang::EvqPayload: return spv::DecorationBlock; + case glslang::EvqPayloadIn: return spv::DecorationBlock; + case glslang::EvqHitAttr: return spv::DecorationBlock; + case glslang::EvqCallableData: return spv::DecorationBlock; + case glslang::EvqCallableDataIn: return spv::DecorationBlock; +#endif + default: + assert(0); + break; + } + } + + return spv::DecorationMax; +} + +// Translate glslang type to SPIR-V memory decorations. +void TranslateMemoryDecoration(const glslang::TQualifier& qualifier, std::vector& memory, + bool useVulkanMemoryModel) +{ + if (!useVulkanMemoryModel) { + if (qualifier.isCoherent()) + memory.push_back(spv::DecorationCoherent); + if (qualifier.isVolatile()) { + memory.push_back(spv::DecorationVolatile); + memory.push_back(spv::DecorationCoherent); + } + } + if (qualifier.isRestrict()) + memory.push_back(spv::DecorationRestrict); + if (qualifier.isReadOnly()) + memory.push_back(spv::DecorationNonWritable); + if (qualifier.isWriteOnly()) + memory.push_back(spv::DecorationNonReadable); +} + +// Translate glslang type to SPIR-V layout decorations. +spv::Decoration TranslateLayoutDecoration(const glslang::TType& type, glslang::TLayoutMatrix matrixLayout) +{ + if (type.isMatrix()) { + switch (matrixLayout) { + case glslang::ElmRowMajor: + return spv::DecorationRowMajor; + case glslang::ElmColumnMajor: + return spv::DecorationColMajor; + default: + // opaque layouts don't need a majorness + return spv::DecorationMax; + } + } else { + switch (type.getBasicType()) { + default: + return spv::DecorationMax; + break; + case glslang::EbtBlock: + switch (type.getQualifier().storage) { + case glslang::EvqUniform: + case glslang::EvqBuffer: + switch (type.getQualifier().layoutPacking) { + case glslang::ElpShared: return spv::DecorationGLSLShared; + case glslang::ElpPacked: return spv::DecorationGLSLPacked; + default: + return spv::DecorationMax; + } + case glslang::EvqVaryingIn: + case glslang::EvqVaryingOut: + if (type.getQualifier().isTaskMemory()) { + switch (type.getQualifier().layoutPacking) { + case glslang::ElpShared: return spv::DecorationGLSLShared; + case glslang::ElpPacked: return spv::DecorationGLSLPacked; + default: break; + } + } else { + assert(type.getQualifier().layoutPacking == glslang::ElpNone); + } + return spv::DecorationMax; +#ifndef GLSLANG_WEB + case glslang::EvqPayload: + case glslang::EvqPayloadIn: + case glslang::EvqHitAttr: + case glslang::EvqCallableData: + case glslang::EvqCallableDataIn: + return spv::DecorationMax; +#endif + default: + assert(0); + return spv::DecorationMax; + } + } + } +} + +// Translate glslang type to SPIR-V interpolation decorations. +// Returns spv::DecorationMax when no decoration +// should be applied. +spv::Decoration TGlslangToSpvTraverser::TranslateInterpolationDecoration(const glslang::TQualifier& qualifier) +{ + if (qualifier.smooth) + // Smooth decoration doesn't exist in SPIR-V 1.0 + return spv::DecorationMax; + else if (qualifier.isNonPerspective()) + return spv::DecorationNoPerspective; + else if (qualifier.flat) + return spv::DecorationFlat; + else if (qualifier.isExplicitInterpolation()) { + builder.addExtension(spv::E_SPV_AMD_shader_explicit_vertex_parameter); + return spv::DecorationExplicitInterpAMD; + } + else + return spv::DecorationMax; +} + +// Translate glslang type to SPIR-V auxiliary storage decorations. +// Returns spv::DecorationMax when no decoration +// should be applied. +spv::Decoration TGlslangToSpvTraverser::TranslateAuxiliaryStorageDecoration(const glslang::TQualifier& qualifier) +{ + if (qualifier.centroid) + return spv::DecorationCentroid; +#ifndef GLSLANG_WEB + else if (qualifier.patch) + return spv::DecorationPatch; + else if (qualifier.sample) { + builder.addCapability(spv::CapabilitySampleRateShading); + return spv::DecorationSample; + } +#endif + + return spv::DecorationMax; +} + +// If glslang type is invariant, return SPIR-V invariant decoration. +spv::Decoration TranslateInvariantDecoration(const glslang::TQualifier& qualifier) +{ + if (qualifier.invariant) + return spv::DecorationInvariant; + else + return spv::DecorationMax; +} + +// If glslang type is noContraction, return SPIR-V NoContraction decoration. +spv::Decoration TranslateNoContractionDecoration(const glslang::TQualifier& qualifier) +{ +#ifndef GLSLANG_WEB + if (qualifier.isNoContraction()) + return spv::DecorationNoContraction; + else +#endif + return spv::DecorationMax; +} + +// If glslang type is nonUniform, return SPIR-V NonUniform decoration. +spv::Decoration TGlslangToSpvTraverser::TranslateNonUniformDecoration(const glslang::TQualifier& qualifier) +{ +#ifndef GLSLANG_WEB + if (qualifier.isNonUniform()) { + builder.addIncorporatedExtension("SPV_EXT_descriptor_indexing", spv::Spv_1_5); + builder.addCapability(spv::CapabilityShaderNonUniformEXT); + return spv::DecorationNonUniformEXT; + } else +#endif + return spv::DecorationMax; +} + +spv::MemoryAccessMask TGlslangToSpvTraverser::TranslateMemoryAccess( + const spv::Builder::AccessChain::CoherentFlags &coherentFlags) +{ + spv::MemoryAccessMask mask = spv::MemoryAccessMaskNone; + +#ifndef GLSLANG_WEB + if (!glslangIntermediate->usingVulkanMemoryModel() || coherentFlags.isImage) + return mask; + + if (coherentFlags.isVolatile() || coherentFlags.anyCoherent()) { + mask = mask | spv::MemoryAccessMakePointerAvailableKHRMask | + spv::MemoryAccessMakePointerVisibleKHRMask; + } + + if (coherentFlags.nonprivate) { + mask = mask | spv::MemoryAccessNonPrivatePointerKHRMask; + } + if (coherentFlags.volatil) { + mask = mask | spv::MemoryAccessVolatileMask; + } + if (mask != spv::MemoryAccessMaskNone) { + builder.addCapability(spv::CapabilityVulkanMemoryModelKHR); + } +#endif + + return mask; +} + +spv::ImageOperandsMask TGlslangToSpvTraverser::TranslateImageOperands( + const spv::Builder::AccessChain::CoherentFlags &coherentFlags) +{ + spv::ImageOperandsMask mask = spv::ImageOperandsMaskNone; + +#ifndef GLSLANG_WEB + if (!glslangIntermediate->usingVulkanMemoryModel()) + return mask; + + if (coherentFlags.volatil || + coherentFlags.anyCoherent()) { + mask = mask | spv::ImageOperandsMakeTexelAvailableKHRMask | + spv::ImageOperandsMakeTexelVisibleKHRMask; + } + if (coherentFlags.nonprivate) { + mask = mask | spv::ImageOperandsNonPrivateTexelKHRMask; + } + if (coherentFlags.volatil) { + mask = mask | spv::ImageOperandsVolatileTexelKHRMask; + } + if (mask != spv::ImageOperandsMaskNone) { + builder.addCapability(spv::CapabilityVulkanMemoryModelKHR); + } +#endif + + return mask; +} + +spv::Builder::AccessChain::CoherentFlags TGlslangToSpvTraverser::TranslateCoherent(const glslang::TType& type) +{ + spv::Builder::AccessChain::CoherentFlags flags = {}; +#ifndef GLSLANG_WEB + flags.coherent = type.getQualifier().coherent; + flags.devicecoherent = type.getQualifier().devicecoherent; + flags.queuefamilycoherent = type.getQualifier().queuefamilycoherent; + // shared variables are implicitly workgroupcoherent in GLSL. + flags.workgroupcoherent = type.getQualifier().workgroupcoherent || + type.getQualifier().storage == glslang::EvqShared; + flags.subgroupcoherent = type.getQualifier().subgroupcoherent; + flags.shadercallcoherent = type.getQualifier().shadercallcoherent; + flags.volatil = type.getQualifier().volatil; + // *coherent variables are implicitly nonprivate in GLSL + flags.nonprivate = type.getQualifier().nonprivate || + flags.anyCoherent() || + flags.volatil; + flags.isImage = type.getBasicType() == glslang::EbtSampler; +#endif + return flags; +} + +spv::Scope TGlslangToSpvTraverser::TranslateMemoryScope( + const spv::Builder::AccessChain::CoherentFlags &coherentFlags) +{ + spv::Scope scope = spv::ScopeMax; + +#ifndef GLSLANG_WEB + if (coherentFlags.volatil || coherentFlags.coherent) { + // coherent defaults to Device scope in the old model, QueueFamilyKHR scope in the new model + scope = glslangIntermediate->usingVulkanMemoryModel() ? spv::ScopeQueueFamilyKHR : spv::ScopeDevice; + } else if (coherentFlags.devicecoherent) { + scope = spv::ScopeDevice; + } else if (coherentFlags.queuefamilycoherent) { + scope = spv::ScopeQueueFamilyKHR; + } else if (coherentFlags.workgroupcoherent) { + scope = spv::ScopeWorkgroup; + } else if (coherentFlags.subgroupcoherent) { + scope = spv::ScopeSubgroup; + } else if (coherentFlags.shadercallcoherent) { + scope = spv::ScopeShaderCallKHR; + } + if (glslangIntermediate->usingVulkanMemoryModel() && scope == spv::ScopeDevice) { + builder.addCapability(spv::CapabilityVulkanMemoryModelDeviceScopeKHR); + } +#endif + + return scope; +} + +// Translate a glslang built-in variable to a SPIR-V built in decoration. Also generate +// associated capabilities when required. For some built-in variables, a capability +// is generated only when using the variable in an executable instruction, but not when +// just declaring a struct member variable with it. This is true for PointSize, +// ClipDistance, and CullDistance. +spv::BuiltIn TGlslangToSpvTraverser::TranslateBuiltInDecoration(glslang::TBuiltInVariable builtIn, + bool memberDeclaration) +{ + switch (builtIn) { + case glslang::EbvPointSize: +#ifndef GLSLANG_WEB + // Defer adding the capability until the built-in is actually used. + if (! memberDeclaration) { + switch (glslangIntermediate->getStage()) { + case EShLangGeometry: + builder.addCapability(spv::CapabilityGeometryPointSize); + break; + case EShLangTessControl: + case EShLangTessEvaluation: + builder.addCapability(spv::CapabilityTessellationPointSize); + break; + default: + break; + } + } +#endif + return spv::BuiltInPointSize; + + case glslang::EbvPosition: return spv::BuiltInPosition; + case glslang::EbvVertexId: return spv::BuiltInVertexId; + case glslang::EbvInstanceId: return spv::BuiltInInstanceId; + case glslang::EbvVertexIndex: return spv::BuiltInVertexIndex; + case glslang::EbvInstanceIndex: return spv::BuiltInInstanceIndex; + + case glslang::EbvFragCoord: return spv::BuiltInFragCoord; + case glslang::EbvPointCoord: return spv::BuiltInPointCoord; + case glslang::EbvFace: return spv::BuiltInFrontFacing; + case glslang::EbvFragDepth: return spv::BuiltInFragDepth; + + case glslang::EbvNumWorkGroups: return spv::BuiltInNumWorkgroups; + case glslang::EbvWorkGroupSize: return spv::BuiltInWorkgroupSize; + case glslang::EbvWorkGroupId: return spv::BuiltInWorkgroupId; + case glslang::EbvLocalInvocationId: return spv::BuiltInLocalInvocationId; + case glslang::EbvLocalInvocationIndex: return spv::BuiltInLocalInvocationIndex; + case glslang::EbvGlobalInvocationId: return spv::BuiltInGlobalInvocationId; + +#ifndef GLSLANG_WEB + // These *Distance capabilities logically belong here, but if the member is declared and + // then never used, consumers of SPIR-V prefer the capability not be declared. + // They are now generated when used, rather than here when declared. + // Potentially, the specification should be more clear what the minimum + // use needed is to trigger the capability. + // + case glslang::EbvClipDistance: + if (!memberDeclaration) + builder.addCapability(spv::CapabilityClipDistance); + return spv::BuiltInClipDistance; + + case glslang::EbvCullDistance: + if (!memberDeclaration) + builder.addCapability(spv::CapabilityCullDistance); + return spv::BuiltInCullDistance; + + case glslang::EbvViewportIndex: + builder.addCapability(spv::CapabilityMultiViewport); + if (glslangIntermediate->getStage() == EShLangVertex || + glslangIntermediate->getStage() == EShLangTessControl || + glslangIntermediate->getStage() == EShLangTessEvaluation) { + + builder.addIncorporatedExtension(spv::E_SPV_EXT_shader_viewport_index_layer, spv::Spv_1_5); + builder.addCapability(spv::CapabilityShaderViewportIndexLayerEXT); + } + return spv::BuiltInViewportIndex; + + case glslang::EbvSampleId: + builder.addCapability(spv::CapabilitySampleRateShading); + return spv::BuiltInSampleId; + + case glslang::EbvSamplePosition: + builder.addCapability(spv::CapabilitySampleRateShading); + return spv::BuiltInSamplePosition; + + case glslang::EbvSampleMask: + return spv::BuiltInSampleMask; + + case glslang::EbvLayer: + if (glslangIntermediate->getStage() == EShLangMeshNV) { + return spv::BuiltInLayer; + } + builder.addCapability(spv::CapabilityGeometry); + if (glslangIntermediate->getStage() == EShLangVertex || + glslangIntermediate->getStage() == EShLangTessControl || + glslangIntermediate->getStage() == EShLangTessEvaluation) { + + builder.addIncorporatedExtension(spv::E_SPV_EXT_shader_viewport_index_layer, spv::Spv_1_5); + builder.addCapability(spv::CapabilityShaderViewportIndexLayerEXT); + } + return spv::BuiltInLayer; + + case glslang::EbvBaseVertex: + builder.addIncorporatedExtension(spv::E_SPV_KHR_shader_draw_parameters, spv::Spv_1_3); + builder.addCapability(spv::CapabilityDrawParameters); + return spv::BuiltInBaseVertex; + + case glslang::EbvBaseInstance: + builder.addIncorporatedExtension(spv::E_SPV_KHR_shader_draw_parameters, spv::Spv_1_3); + builder.addCapability(spv::CapabilityDrawParameters); + return spv::BuiltInBaseInstance; + + case glslang::EbvDrawId: + builder.addIncorporatedExtension(spv::E_SPV_KHR_shader_draw_parameters, spv::Spv_1_3); + builder.addCapability(spv::CapabilityDrawParameters); + return spv::BuiltInDrawIndex; + + case glslang::EbvPrimitiveId: + if (glslangIntermediate->getStage() == EShLangFragment) + builder.addCapability(spv::CapabilityGeometry); + return spv::BuiltInPrimitiveId; + + case glslang::EbvFragStencilRef: + builder.addExtension(spv::E_SPV_EXT_shader_stencil_export); + builder.addCapability(spv::CapabilityStencilExportEXT); + return spv::BuiltInFragStencilRefEXT; + + case glslang::EbvInvocationId: return spv::BuiltInInvocationId; + case glslang::EbvTessLevelInner: return spv::BuiltInTessLevelInner; + case glslang::EbvTessLevelOuter: return spv::BuiltInTessLevelOuter; + case glslang::EbvTessCoord: return spv::BuiltInTessCoord; + case glslang::EbvPatchVertices: return spv::BuiltInPatchVertices; + case glslang::EbvHelperInvocation: return spv::BuiltInHelperInvocation; + + case glslang::EbvSubGroupSize: + builder.addExtension(spv::E_SPV_KHR_shader_ballot); + builder.addCapability(spv::CapabilitySubgroupBallotKHR); + return spv::BuiltInSubgroupSize; + + case glslang::EbvSubGroupInvocation: + builder.addExtension(spv::E_SPV_KHR_shader_ballot); + builder.addCapability(spv::CapabilitySubgroupBallotKHR); + return spv::BuiltInSubgroupLocalInvocationId; + + case glslang::EbvSubGroupEqMask: + builder.addExtension(spv::E_SPV_KHR_shader_ballot); + builder.addCapability(spv::CapabilitySubgroupBallotKHR); + return spv::BuiltInSubgroupEqMask; + + case glslang::EbvSubGroupGeMask: + builder.addExtension(spv::E_SPV_KHR_shader_ballot); + builder.addCapability(spv::CapabilitySubgroupBallotKHR); + return spv::BuiltInSubgroupGeMask; + + case glslang::EbvSubGroupGtMask: + builder.addExtension(spv::E_SPV_KHR_shader_ballot); + builder.addCapability(spv::CapabilitySubgroupBallotKHR); + return spv::BuiltInSubgroupGtMask; + + case glslang::EbvSubGroupLeMask: + builder.addExtension(spv::E_SPV_KHR_shader_ballot); + builder.addCapability(spv::CapabilitySubgroupBallotKHR); + return spv::BuiltInSubgroupLeMask; + + case glslang::EbvSubGroupLtMask: + builder.addExtension(spv::E_SPV_KHR_shader_ballot); + builder.addCapability(spv::CapabilitySubgroupBallotKHR); + return spv::BuiltInSubgroupLtMask; + + case glslang::EbvNumSubgroups: + builder.addCapability(spv::CapabilityGroupNonUniform); + return spv::BuiltInNumSubgroups; + + case glslang::EbvSubgroupID: + builder.addCapability(spv::CapabilityGroupNonUniform); + return spv::BuiltInSubgroupId; + + case glslang::EbvSubgroupSize2: + builder.addCapability(spv::CapabilityGroupNonUniform); + return spv::BuiltInSubgroupSize; + + case glslang::EbvSubgroupInvocation2: + builder.addCapability(spv::CapabilityGroupNonUniform); + return spv::BuiltInSubgroupLocalInvocationId; + + case glslang::EbvSubgroupEqMask2: + builder.addCapability(spv::CapabilityGroupNonUniform); + builder.addCapability(spv::CapabilityGroupNonUniformBallot); + return spv::BuiltInSubgroupEqMask; + + case glslang::EbvSubgroupGeMask2: + builder.addCapability(spv::CapabilityGroupNonUniform); + builder.addCapability(spv::CapabilityGroupNonUniformBallot); + return spv::BuiltInSubgroupGeMask; + + case glslang::EbvSubgroupGtMask2: + builder.addCapability(spv::CapabilityGroupNonUniform); + builder.addCapability(spv::CapabilityGroupNonUniformBallot); + return spv::BuiltInSubgroupGtMask; + + case glslang::EbvSubgroupLeMask2: + builder.addCapability(spv::CapabilityGroupNonUniform); + builder.addCapability(spv::CapabilityGroupNonUniformBallot); + return spv::BuiltInSubgroupLeMask; + + case glslang::EbvSubgroupLtMask2: + builder.addCapability(spv::CapabilityGroupNonUniform); + builder.addCapability(spv::CapabilityGroupNonUniformBallot); + return spv::BuiltInSubgroupLtMask; + + case glslang::EbvBaryCoordNoPersp: + builder.addExtension(spv::E_SPV_AMD_shader_explicit_vertex_parameter); + return spv::BuiltInBaryCoordNoPerspAMD; + + case glslang::EbvBaryCoordNoPerspCentroid: + builder.addExtension(spv::E_SPV_AMD_shader_explicit_vertex_parameter); + return spv::BuiltInBaryCoordNoPerspCentroidAMD; + + case glslang::EbvBaryCoordNoPerspSample: + builder.addExtension(spv::E_SPV_AMD_shader_explicit_vertex_parameter); + return spv::BuiltInBaryCoordNoPerspSampleAMD; + + case glslang::EbvBaryCoordSmooth: + builder.addExtension(spv::E_SPV_AMD_shader_explicit_vertex_parameter); + return spv::BuiltInBaryCoordSmoothAMD; + + case glslang::EbvBaryCoordSmoothCentroid: + builder.addExtension(spv::E_SPV_AMD_shader_explicit_vertex_parameter); + return spv::BuiltInBaryCoordSmoothCentroidAMD; + + case glslang::EbvBaryCoordSmoothSample: + builder.addExtension(spv::E_SPV_AMD_shader_explicit_vertex_parameter); + return spv::BuiltInBaryCoordSmoothSampleAMD; + + case glslang::EbvBaryCoordPullModel: + builder.addExtension(spv::E_SPV_AMD_shader_explicit_vertex_parameter); + return spv::BuiltInBaryCoordPullModelAMD; + + case glslang::EbvDeviceIndex: + builder.addIncorporatedExtension(spv::E_SPV_KHR_device_group, spv::Spv_1_3); + builder.addCapability(spv::CapabilityDeviceGroup); + return spv::BuiltInDeviceIndex; + + case glslang::EbvViewIndex: + builder.addIncorporatedExtension(spv::E_SPV_KHR_multiview, spv::Spv_1_3); + builder.addCapability(spv::CapabilityMultiView); + return spv::BuiltInViewIndex; + + case glslang::EbvFragSizeEXT: + builder.addExtension(spv::E_SPV_EXT_fragment_invocation_density); + builder.addCapability(spv::CapabilityFragmentDensityEXT); + return spv::BuiltInFragSizeEXT; + + case glslang::EbvFragInvocationCountEXT: + builder.addExtension(spv::E_SPV_EXT_fragment_invocation_density); + builder.addCapability(spv::CapabilityFragmentDensityEXT); + return spv::BuiltInFragInvocationCountEXT; + + case glslang::EbvViewportMaskNV: + if (!memberDeclaration) { + builder.addExtension(spv::E_SPV_NV_viewport_array2); + builder.addCapability(spv::CapabilityShaderViewportMaskNV); + } + return spv::BuiltInViewportMaskNV; + case glslang::EbvSecondaryPositionNV: + if (!memberDeclaration) { + builder.addExtension(spv::E_SPV_NV_stereo_view_rendering); + builder.addCapability(spv::CapabilityShaderStereoViewNV); + } + return spv::BuiltInSecondaryPositionNV; + case glslang::EbvSecondaryViewportMaskNV: + if (!memberDeclaration) { + builder.addExtension(spv::E_SPV_NV_stereo_view_rendering); + builder.addCapability(spv::CapabilityShaderStereoViewNV); + } + return spv::BuiltInSecondaryViewportMaskNV; + case glslang::EbvPositionPerViewNV: + if (!memberDeclaration) { + builder.addExtension(spv::E_SPV_NVX_multiview_per_view_attributes); + builder.addCapability(spv::CapabilityPerViewAttributesNV); + } + return spv::BuiltInPositionPerViewNV; + case glslang::EbvViewportMaskPerViewNV: + if (!memberDeclaration) { + builder.addExtension(spv::E_SPV_NVX_multiview_per_view_attributes); + builder.addCapability(spv::CapabilityPerViewAttributesNV); + } + return spv::BuiltInViewportMaskPerViewNV; + case glslang::EbvFragFullyCoveredNV: + builder.addExtension(spv::E_SPV_EXT_fragment_fully_covered); + builder.addCapability(spv::CapabilityFragmentFullyCoveredEXT); + return spv::BuiltInFullyCoveredEXT; + case glslang::EbvFragmentSizeNV: + builder.addExtension(spv::E_SPV_NV_shading_rate); + builder.addCapability(spv::CapabilityShadingRateNV); + return spv::BuiltInFragmentSizeNV; + case glslang::EbvInvocationsPerPixelNV: + builder.addExtension(spv::E_SPV_NV_shading_rate); + builder.addCapability(spv::CapabilityShadingRateNV); + return spv::BuiltInInvocationsPerPixelNV; + + // ray tracing + case glslang::EbvLaunchId: + return spv::BuiltInLaunchIdKHR; + case glslang::EbvLaunchSize: + return spv::BuiltInLaunchSizeKHR; + case glslang::EbvWorldRayOrigin: + return spv::BuiltInWorldRayOriginKHR; + case glslang::EbvWorldRayDirection: + return spv::BuiltInWorldRayDirectionKHR; + case glslang::EbvObjectRayOrigin: + return spv::BuiltInObjectRayOriginKHR; + case glslang::EbvObjectRayDirection: + return spv::BuiltInObjectRayDirectionKHR; + case glslang::EbvRayTmin: + return spv::BuiltInRayTminKHR; + case glslang::EbvRayTmax: + return spv::BuiltInRayTmaxKHR; + case glslang::EbvInstanceCustomIndex: + return spv::BuiltInInstanceCustomIndexKHR; + case glslang::EbvHitT: + return spv::BuiltInHitTKHR; + case glslang::EbvHitKind: + return spv::BuiltInHitKindKHR; + case glslang::EbvObjectToWorld: + case glslang::EbvObjectToWorld3x4: + return spv::BuiltInObjectToWorldKHR; + case glslang::EbvWorldToObject: + case glslang::EbvWorldToObject3x4: + return spv::BuiltInWorldToObjectKHR; + case glslang::EbvIncomingRayFlags: + return spv::BuiltInIncomingRayFlagsKHR; + case glslang::EbvGeometryIndex: + return spv::BuiltInRayGeometryIndexKHR; + + // barycentrics + case glslang::EbvBaryCoordNV: + builder.addExtension(spv::E_SPV_NV_fragment_shader_barycentric); + builder.addCapability(spv::CapabilityFragmentBarycentricNV); + return spv::BuiltInBaryCoordNV; + case glslang::EbvBaryCoordNoPerspNV: + builder.addExtension(spv::E_SPV_NV_fragment_shader_barycentric); + builder.addCapability(spv::CapabilityFragmentBarycentricNV); + return spv::BuiltInBaryCoordNoPerspNV; + + // mesh shaders + case glslang::EbvTaskCountNV: + return spv::BuiltInTaskCountNV; + case glslang::EbvPrimitiveCountNV: + return spv::BuiltInPrimitiveCountNV; + case glslang::EbvPrimitiveIndicesNV: + return spv::BuiltInPrimitiveIndicesNV; + case glslang::EbvClipDistancePerViewNV: + return spv::BuiltInClipDistancePerViewNV; + case glslang::EbvCullDistancePerViewNV: + return spv::BuiltInCullDistancePerViewNV; + case glslang::EbvLayerPerViewNV: + return spv::BuiltInLayerPerViewNV; + case glslang::EbvMeshViewCountNV: + return spv::BuiltInMeshViewCountNV; + case glslang::EbvMeshViewIndicesNV: + return spv::BuiltInMeshViewIndicesNV; + + // sm builtins + case glslang::EbvWarpsPerSM: + builder.addExtension(spv::E_SPV_NV_shader_sm_builtins); + builder.addCapability(spv::CapabilityShaderSMBuiltinsNV); + return spv::BuiltInWarpsPerSMNV; + case glslang::EbvSMCount: + builder.addExtension(spv::E_SPV_NV_shader_sm_builtins); + builder.addCapability(spv::CapabilityShaderSMBuiltinsNV); + return spv::BuiltInSMCountNV; + case glslang::EbvWarpID: + builder.addExtension(spv::E_SPV_NV_shader_sm_builtins); + builder.addCapability(spv::CapabilityShaderSMBuiltinsNV); + return spv::BuiltInWarpIDNV; + case glslang::EbvSMID: + builder.addExtension(spv::E_SPV_NV_shader_sm_builtins); + builder.addCapability(spv::CapabilityShaderSMBuiltinsNV); + return spv::BuiltInSMIDNV; +#endif + + default: + return spv::BuiltInMax; + } +} + +// Translate glslang image layout format to SPIR-V image format. +spv::ImageFormat TGlslangToSpvTraverser::TranslateImageFormat(const glslang::TType& type) +{ + assert(type.getBasicType() == glslang::EbtSampler); + +#ifdef GLSLANG_WEB + return spv::ImageFormatUnknown; +#endif + + // Check for capabilities + switch (type.getQualifier().getFormat()) { + case glslang::ElfRg32f: + case glslang::ElfRg16f: + case glslang::ElfR11fG11fB10f: + case glslang::ElfR16f: + case glslang::ElfRgba16: + case glslang::ElfRgb10A2: + case glslang::ElfRg16: + case glslang::ElfRg8: + case glslang::ElfR16: + case glslang::ElfR8: + case glslang::ElfRgba16Snorm: + case glslang::ElfRg16Snorm: + case glslang::ElfRg8Snorm: + case glslang::ElfR16Snorm: + case glslang::ElfR8Snorm: + + case glslang::ElfRg32i: + case glslang::ElfRg16i: + case glslang::ElfRg8i: + case glslang::ElfR16i: + case glslang::ElfR8i: + + case glslang::ElfRgb10a2ui: + case glslang::ElfRg32ui: + case glslang::ElfRg16ui: + case glslang::ElfRg8ui: + case glslang::ElfR16ui: + case glslang::ElfR8ui: + builder.addCapability(spv::CapabilityStorageImageExtendedFormats); + break; + + default: + break; + } + + // do the translation + switch (type.getQualifier().getFormat()) { + case glslang::ElfNone: return spv::ImageFormatUnknown; + case glslang::ElfRgba32f: return spv::ImageFormatRgba32f; + case glslang::ElfRgba16f: return spv::ImageFormatRgba16f; + case glslang::ElfR32f: return spv::ImageFormatR32f; + case glslang::ElfRgba8: return spv::ImageFormatRgba8; + case glslang::ElfRgba8Snorm: return spv::ImageFormatRgba8Snorm; + case glslang::ElfRg32f: return spv::ImageFormatRg32f; + case glslang::ElfRg16f: return spv::ImageFormatRg16f; + case glslang::ElfR11fG11fB10f: return spv::ImageFormatR11fG11fB10f; + case glslang::ElfR16f: return spv::ImageFormatR16f; + case glslang::ElfRgba16: return spv::ImageFormatRgba16; + case glslang::ElfRgb10A2: return spv::ImageFormatRgb10A2; + case glslang::ElfRg16: return spv::ImageFormatRg16; + case glslang::ElfRg8: return spv::ImageFormatRg8; + case glslang::ElfR16: return spv::ImageFormatR16; + case glslang::ElfR8: return spv::ImageFormatR8; + case glslang::ElfRgba16Snorm: return spv::ImageFormatRgba16Snorm; + case glslang::ElfRg16Snorm: return spv::ImageFormatRg16Snorm; + case glslang::ElfRg8Snorm: return spv::ImageFormatRg8Snorm; + case glslang::ElfR16Snorm: return spv::ImageFormatR16Snorm; + case glslang::ElfR8Snorm: return spv::ImageFormatR8Snorm; + case glslang::ElfRgba32i: return spv::ImageFormatRgba32i; + case glslang::ElfRgba16i: return spv::ImageFormatRgba16i; + case glslang::ElfRgba8i: return spv::ImageFormatRgba8i; + case glslang::ElfR32i: return spv::ImageFormatR32i; + case glslang::ElfRg32i: return spv::ImageFormatRg32i; + case glslang::ElfRg16i: return spv::ImageFormatRg16i; + case glslang::ElfRg8i: return spv::ImageFormatRg8i; + case glslang::ElfR16i: return spv::ImageFormatR16i; + case glslang::ElfR8i: return spv::ImageFormatR8i; + case glslang::ElfRgba32ui: return spv::ImageFormatRgba32ui; + case glslang::ElfRgba16ui: return spv::ImageFormatRgba16ui; + case glslang::ElfRgba8ui: return spv::ImageFormatRgba8ui; + case glslang::ElfR32ui: return spv::ImageFormatR32ui; + case glslang::ElfRg32ui: return spv::ImageFormatRg32ui; + case glslang::ElfRg16ui: return spv::ImageFormatRg16ui; + case glslang::ElfRgb10a2ui: return spv::ImageFormatRgb10a2ui; + case glslang::ElfRg8ui: return spv::ImageFormatRg8ui; + case glslang::ElfR16ui: return spv::ImageFormatR16ui; + case glslang::ElfR8ui: return spv::ImageFormatR8ui; + default: return spv::ImageFormatMax; + } +} + +spv::SelectionControlMask TGlslangToSpvTraverser::TranslateSelectionControl( + const glslang::TIntermSelection& selectionNode) const +{ + if (selectionNode.getFlatten()) + return spv::SelectionControlFlattenMask; + if (selectionNode.getDontFlatten()) + return spv::SelectionControlDontFlattenMask; + return spv::SelectionControlMaskNone; +} + +spv::SelectionControlMask TGlslangToSpvTraverser::TranslateSwitchControl(const glslang::TIntermSwitch& switchNode) + const +{ + if (switchNode.getFlatten()) + return spv::SelectionControlFlattenMask; + if (switchNode.getDontFlatten()) + return spv::SelectionControlDontFlattenMask; + return spv::SelectionControlMaskNone; +} + +// return a non-0 dependency if the dependency argument must be set +spv::LoopControlMask TGlslangToSpvTraverser::TranslateLoopControl(const glslang::TIntermLoop& loopNode, + std::vector& operands) const +{ + spv::LoopControlMask control = spv::LoopControlMaskNone; + + if (loopNode.getDontUnroll()) + control = control | spv::LoopControlDontUnrollMask; + if (loopNode.getUnroll()) + control = control | spv::LoopControlUnrollMask; + if (unsigned(loopNode.getLoopDependency()) == glslang::TIntermLoop::dependencyInfinite) + control = control | spv::LoopControlDependencyInfiniteMask; + else if (loopNode.getLoopDependency() > 0) { + control = control | spv::LoopControlDependencyLengthMask; + operands.push_back((unsigned int)loopNode.getLoopDependency()); + } + if (glslangIntermediate->getSpv().spv >= glslang::EShTargetSpv_1_4) { + if (loopNode.getMinIterations() > 0) { + control = control | spv::LoopControlMinIterationsMask; + operands.push_back(loopNode.getMinIterations()); + } + if (loopNode.getMaxIterations() < glslang::TIntermLoop::iterationsInfinite) { + control = control | spv::LoopControlMaxIterationsMask; + operands.push_back(loopNode.getMaxIterations()); + } + if (loopNode.getIterationMultiple() > 1) { + control = control | spv::LoopControlIterationMultipleMask; + operands.push_back(loopNode.getIterationMultiple()); + } + if (loopNode.getPeelCount() > 0) { + control = control | spv::LoopControlPeelCountMask; + operands.push_back(loopNode.getPeelCount()); + } + if (loopNode.getPartialCount() > 0) { + control = control | spv::LoopControlPartialCountMask; + operands.push_back(loopNode.getPartialCount()); + } + } + + return control; +} + +// Translate glslang type to SPIR-V storage class. +spv::StorageClass TGlslangToSpvTraverser::TranslateStorageClass(const glslang::TType& type) +{ + if (type.getBasicType() == glslang::EbtRayQuery) + return spv::StorageClassFunction; + if (type.getQualifier().isPipeInput()) + return spv::StorageClassInput; + if (type.getQualifier().isPipeOutput()) + return spv::StorageClassOutput; + + if (glslangIntermediate->getSource() != glslang::EShSourceHlsl || + type.getQualifier().storage == glslang::EvqUniform) { + if (type.isAtomic()) + return spv::StorageClassAtomicCounter; + if (type.containsOpaque()) + return spv::StorageClassUniformConstant; + } + + if (type.getQualifier().isUniformOrBuffer() && + type.getQualifier().isShaderRecord()) { + return spv::StorageClassShaderRecordBufferKHR; + } + + if (glslangIntermediate->usingStorageBuffer() && type.getQualifier().storage == glslang::EvqBuffer) { + builder.addIncorporatedExtension(spv::E_SPV_KHR_storage_buffer_storage_class, spv::Spv_1_3); + return spv::StorageClassStorageBuffer; + } + + if (type.getQualifier().isUniformOrBuffer()) { + if (type.getQualifier().isPushConstant()) + return spv::StorageClassPushConstant; + if (type.getBasicType() == glslang::EbtBlock) + return spv::StorageClassUniform; + return spv::StorageClassUniformConstant; + } + + switch (type.getQualifier().storage) { + case glslang::EvqGlobal: return spv::StorageClassPrivate; + case glslang::EvqConstReadOnly: return spv::StorageClassFunction; + case glslang::EvqTemporary: return spv::StorageClassFunction; + case glslang::EvqShared: return spv::StorageClassWorkgroup; +#ifndef GLSLANG_WEB + case glslang::EvqPayload: return spv::StorageClassRayPayloadKHR; + case glslang::EvqPayloadIn: return spv::StorageClassIncomingRayPayloadKHR; + case glslang::EvqHitAttr: return spv::StorageClassHitAttributeKHR; + case glslang::EvqCallableData: return spv::StorageClassCallableDataKHR; + case glslang::EvqCallableDataIn: return spv::StorageClassIncomingCallableDataKHR; +#endif + default: + assert(0); + break; + } + + return spv::StorageClassFunction; +} + +// Add capabilities pertaining to how an array is indexed. +void TGlslangToSpvTraverser::addIndirectionIndexCapabilities(const glslang::TType& baseType, + const glslang::TType& indexType) +{ +#ifndef GLSLANG_WEB + if (indexType.getQualifier().isNonUniform()) { + // deal with an asserted non-uniform index + // SPV_EXT_descriptor_indexing already added in TranslateNonUniformDecoration + if (baseType.getBasicType() == glslang::EbtSampler) { + if (baseType.getQualifier().hasAttachment()) + builder.addCapability(spv::CapabilityInputAttachmentArrayNonUniformIndexingEXT); + else if (baseType.isImage() && baseType.getSampler().isBuffer()) + builder.addCapability(spv::CapabilityStorageTexelBufferArrayNonUniformIndexingEXT); + else if (baseType.isTexture() && baseType.getSampler().isBuffer()) + builder.addCapability(spv::CapabilityUniformTexelBufferArrayNonUniformIndexingEXT); + else if (baseType.isImage()) + builder.addCapability(spv::CapabilityStorageImageArrayNonUniformIndexingEXT); + else if (baseType.isTexture()) + builder.addCapability(spv::CapabilitySampledImageArrayNonUniformIndexingEXT); + } else if (baseType.getBasicType() == glslang::EbtBlock) { + if (baseType.getQualifier().storage == glslang::EvqBuffer) + builder.addCapability(spv::CapabilityStorageBufferArrayNonUniformIndexingEXT); + else if (baseType.getQualifier().storage == glslang::EvqUniform) + builder.addCapability(spv::CapabilityUniformBufferArrayNonUniformIndexingEXT); + } + } else { + // assume a dynamically uniform index + if (baseType.getBasicType() == glslang::EbtSampler) { + if (baseType.getQualifier().hasAttachment()) { + builder.addIncorporatedExtension("SPV_EXT_descriptor_indexing", spv::Spv_1_5); + builder.addCapability(spv::CapabilityInputAttachmentArrayDynamicIndexingEXT); + } else if (baseType.isImage() && baseType.getSampler().isBuffer()) { + builder.addIncorporatedExtension("SPV_EXT_descriptor_indexing", spv::Spv_1_5); + builder.addCapability(spv::CapabilityStorageTexelBufferArrayDynamicIndexingEXT); + } else if (baseType.isTexture() && baseType.getSampler().isBuffer()) { + builder.addIncorporatedExtension("SPV_EXT_descriptor_indexing", spv::Spv_1_5); + builder.addCapability(spv::CapabilityUniformTexelBufferArrayDynamicIndexingEXT); + } + } + } +#endif +} + +// Return whether or not the given type is something that should be tied to a +// descriptor set. +bool IsDescriptorResource(const glslang::TType& type) +{ + // uniform and buffer blocks are included, unless it is a push_constant + if (type.getBasicType() == glslang::EbtBlock) + return type.getQualifier().isUniformOrBuffer() && + ! type.getQualifier().isShaderRecord() && + ! type.getQualifier().isPushConstant(); + + // non block... + // basically samplerXXX/subpass/sampler/texture are all included + // if they are the global-scope-class, not the function parameter + // (or local, if they ever exist) class. + if (type.getBasicType() == glslang::EbtSampler) + return type.getQualifier().isUniformOrBuffer(); + + // None of the above. + return false; +} + +void InheritQualifiers(glslang::TQualifier& child, const glslang::TQualifier& parent) +{ + if (child.layoutMatrix == glslang::ElmNone) + child.layoutMatrix = parent.layoutMatrix; + + if (parent.invariant) + child.invariant = true; + if (parent.flat) + child.flat = true; + if (parent.centroid) + child.centroid = true; +#ifndef GLSLANG_WEB + if (parent.nopersp) + child.nopersp = true; + if (parent.explicitInterp) + child.explicitInterp = true; + if (parent.perPrimitiveNV) + child.perPrimitiveNV = true; + if (parent.perViewNV) + child.perViewNV = true; + if (parent.perTaskNV) + child.perTaskNV = true; + if (parent.patch) + child.patch = true; + if (parent.sample) + child.sample = true; + if (parent.coherent) + child.coherent = true; + if (parent.devicecoherent) + child.devicecoherent = true; + if (parent.queuefamilycoherent) + child.queuefamilycoherent = true; + if (parent.workgroupcoherent) + child.workgroupcoherent = true; + if (parent.subgroupcoherent) + child.subgroupcoherent = true; + if (parent.shadercallcoherent) + child.shadercallcoherent = true; + if (parent.nonprivate) + child.nonprivate = true; + if (parent.volatil) + child.volatil = true; + if (parent.restrict) + child.restrict = true; + if (parent.readonly) + child.readonly = true; + if (parent.writeonly) + child.writeonly = true; +#endif +} + +bool HasNonLayoutQualifiers(const glslang::TType& type, const glslang::TQualifier& qualifier) +{ + // This should list qualifiers that simultaneous satisfy: + // - struct members might inherit from a struct declaration + // (note that non-block structs don't explicitly inherit, + // only implicitly, meaning no decoration involved) + // - affect decorations on the struct members + // (note smooth does not, and expecting something like volatile + // to effect the whole object) + // - are not part of the offset/st430/etc or row/column-major layout + return qualifier.invariant || (qualifier.hasLocation() && type.getBasicType() == glslang::EbtBlock); +} + +// +// Implement the TGlslangToSpvTraverser class. +// + +TGlslangToSpvTraverser::TGlslangToSpvTraverser(unsigned int spvVersion, + const glslang::TIntermediate* glslangIntermediate, + spv::SpvBuildLogger* buildLogger, glslang::SpvOptions& options) : + TIntermTraverser(true, false, true), + options(options), + shaderEntry(nullptr), currentFunction(nullptr), + sequenceDepth(0), logger(buildLogger), + builder(spvVersion, (glslang::GetKhronosToolId() << 16) | glslang::GetSpirvGeneratorVersion(), logger), + inEntryPoint(false), entryPointTerminated(false), linkageOnly(false), + glslangIntermediate(glslangIntermediate), + nanMinMaxClamp(glslangIntermediate->getNanMinMaxClamp()), + nonSemanticDebugPrintf(0) +{ + spv::ExecutionModel executionModel = TranslateExecutionModel(glslangIntermediate->getStage()); + + builder.clearAccessChain(); + builder.setSource(TranslateSourceLanguage(glslangIntermediate->getSource(), glslangIntermediate->getProfile()), + glslangIntermediate->getVersion()); + + if (options.generateDebugInfo) { + builder.setEmitOpLines(); + builder.setSourceFile(glslangIntermediate->getSourceFile()); + + // Set the source shader's text. If for SPV version 1.0, include + // a preamble in comments stating the OpModuleProcessed instructions. + // Otherwise, emit those as actual instructions. + std::string text; + const std::vector& processes = glslangIntermediate->getProcesses(); + for (int p = 0; p < (int)processes.size(); ++p) { + if (glslangIntermediate->getSpv().spv < glslang::EShTargetSpv_1_1) { + text.append("// OpModuleProcessed "); + text.append(processes[p]); + text.append("\n"); + } else + builder.addModuleProcessed(processes[p]); + } + if (glslangIntermediate->getSpv().spv < glslang::EShTargetSpv_1_1 && (int)processes.size() > 0) + text.append("#line 1\n"); + text.append(glslangIntermediate->getSourceText()); + builder.setSourceText(text); + // Pass name and text for all included files + const std::map& include_txt = glslangIntermediate->getIncludeText(); + for (auto iItr = include_txt.begin(); iItr != include_txt.end(); ++iItr) + builder.addInclude(iItr->first, iItr->second); + } + stdBuiltins = builder.import("GLSL.std.450"); + + spv::AddressingModel addressingModel = spv::AddressingModelLogical; + spv::MemoryModel memoryModel = spv::MemoryModelGLSL450; + + if (glslangIntermediate->usingPhysicalStorageBuffer()) { + addressingModel = spv::AddressingModelPhysicalStorageBuffer64EXT; + builder.addIncorporatedExtension(spv::E_SPV_EXT_physical_storage_buffer, spv::Spv_1_5); + builder.addCapability(spv::CapabilityPhysicalStorageBufferAddressesEXT); + } + if (glslangIntermediate->usingVulkanMemoryModel()) { + memoryModel = spv::MemoryModelVulkanKHR; + builder.addCapability(spv::CapabilityVulkanMemoryModelKHR); + builder.addIncorporatedExtension(spv::E_SPV_KHR_vulkan_memory_model, spv::Spv_1_5); + } + builder.setMemoryModel(addressingModel, memoryModel); + + if (glslangIntermediate->usingVariablePointers()) { + builder.addCapability(spv::CapabilityVariablePointers); + } + + shaderEntry = builder.makeEntryPoint(glslangIntermediate->getEntryPointName().c_str()); + entryPoint = builder.addEntryPoint(executionModel, shaderEntry, glslangIntermediate->getEntryPointName().c_str()); + + // Add the source extensions + const auto& sourceExtensions = glslangIntermediate->getRequestedExtensions(); + for (auto it = sourceExtensions.begin(); it != sourceExtensions.end(); ++it) + builder.addSourceExtension(it->first.c_str()); + + // Add the top-level modes for this shader. + + if (glslangIntermediate->getXfbMode()) { + builder.addCapability(spv::CapabilityTransformFeedback); + builder.addExecutionMode(shaderEntry, spv::ExecutionModeXfb); + } + + if (glslangIntermediate->getLayoutPrimitiveCulling()) { + builder.addCapability(spv::CapabilityRayTraversalPrimitiveCullingProvisionalKHR); + } + + unsigned int mode; + switch (glslangIntermediate->getStage()) { + case EShLangVertex: + builder.addCapability(spv::CapabilityShader); + break; + + case EShLangFragment: + builder.addCapability(spv::CapabilityShader); + if (glslangIntermediate->getPixelCenterInteger()) + builder.addExecutionMode(shaderEntry, spv::ExecutionModePixelCenterInteger); + + if (glslangIntermediate->getOriginUpperLeft()) + builder.addExecutionMode(shaderEntry, spv::ExecutionModeOriginUpperLeft); + else + builder.addExecutionMode(shaderEntry, spv::ExecutionModeOriginLowerLeft); + + if (glslangIntermediate->getEarlyFragmentTests()) + builder.addExecutionMode(shaderEntry, spv::ExecutionModeEarlyFragmentTests); + + if (glslangIntermediate->getPostDepthCoverage()) { + builder.addCapability(spv::CapabilitySampleMaskPostDepthCoverage); + builder.addExecutionMode(shaderEntry, spv::ExecutionModePostDepthCoverage); + builder.addExtension(spv::E_SPV_KHR_post_depth_coverage); + } + + if (glslangIntermediate->getDepth() != glslang::EldUnchanged && glslangIntermediate->isDepthReplacing()) + builder.addExecutionMode(shaderEntry, spv::ExecutionModeDepthReplacing); + +#ifndef GLSLANG_WEB + + switch(glslangIntermediate->getDepth()) { + case glslang::EldGreater: mode = spv::ExecutionModeDepthGreater; break; + case glslang::EldLess: mode = spv::ExecutionModeDepthLess; break; + default: mode = spv::ExecutionModeMax; break; + } + if (mode != spv::ExecutionModeMax) + builder.addExecutionMode(shaderEntry, (spv::ExecutionMode)mode); + switch (glslangIntermediate->getInterlockOrdering()) { + case glslang::EioPixelInterlockOrdered: mode = spv::ExecutionModePixelInterlockOrderedEXT; + break; + case glslang::EioPixelInterlockUnordered: mode = spv::ExecutionModePixelInterlockUnorderedEXT; + break; + case glslang::EioSampleInterlockOrdered: mode = spv::ExecutionModeSampleInterlockOrderedEXT; + break; + case glslang::EioSampleInterlockUnordered: mode = spv::ExecutionModeSampleInterlockUnorderedEXT; + break; + case glslang::EioShadingRateInterlockOrdered: mode = spv::ExecutionModeShadingRateInterlockOrderedEXT; + break; + case glslang::EioShadingRateInterlockUnordered: mode = spv::ExecutionModeShadingRateInterlockUnorderedEXT; + break; + default: mode = spv::ExecutionModeMax; + break; + } + if (mode != spv::ExecutionModeMax) { + builder.addExecutionMode(shaderEntry, (spv::ExecutionMode)mode); + if (mode == spv::ExecutionModeShadingRateInterlockOrderedEXT || + mode == spv::ExecutionModeShadingRateInterlockUnorderedEXT) { + builder.addCapability(spv::CapabilityFragmentShaderShadingRateInterlockEXT); + } else if (mode == spv::ExecutionModePixelInterlockOrderedEXT || + mode == spv::ExecutionModePixelInterlockUnorderedEXT) { + builder.addCapability(spv::CapabilityFragmentShaderPixelInterlockEXT); + } else { + builder.addCapability(spv::CapabilityFragmentShaderSampleInterlockEXT); + } + builder.addExtension(spv::E_SPV_EXT_fragment_shader_interlock); + } +#endif + break; + + case EShLangCompute: + builder.addCapability(spv::CapabilityShader); + builder.addExecutionMode(shaderEntry, spv::ExecutionModeLocalSize, glslangIntermediate->getLocalSize(0), + glslangIntermediate->getLocalSize(1), + glslangIntermediate->getLocalSize(2)); + if (glslangIntermediate->getLayoutDerivativeModeNone() == glslang::LayoutDerivativeGroupQuads) { + builder.addCapability(spv::CapabilityComputeDerivativeGroupQuadsNV); + builder.addExecutionMode(shaderEntry, spv::ExecutionModeDerivativeGroupQuadsNV); + builder.addExtension(spv::E_SPV_NV_compute_shader_derivatives); + } else if (glslangIntermediate->getLayoutDerivativeModeNone() == glslang::LayoutDerivativeGroupLinear) { + builder.addCapability(spv::CapabilityComputeDerivativeGroupLinearNV); + builder.addExecutionMode(shaderEntry, spv::ExecutionModeDerivativeGroupLinearNV); + builder.addExtension(spv::E_SPV_NV_compute_shader_derivatives); + } + break; +#ifndef GLSLANG_WEB + case EShLangTessEvaluation: + case EShLangTessControl: + builder.addCapability(spv::CapabilityTessellation); + + glslang::TLayoutGeometry primitive; + + if (glslangIntermediate->getStage() == EShLangTessControl) { + builder.addExecutionMode(shaderEntry, spv::ExecutionModeOutputVertices, + glslangIntermediate->getVertices()); + primitive = glslangIntermediate->getOutputPrimitive(); + } else { + primitive = glslangIntermediate->getInputPrimitive(); + } + + switch (primitive) { + case glslang::ElgTriangles: mode = spv::ExecutionModeTriangles; break; + case glslang::ElgQuads: mode = spv::ExecutionModeQuads; break; + case glslang::ElgIsolines: mode = spv::ExecutionModeIsolines; break; + default: mode = spv::ExecutionModeMax; break; + } + if (mode != spv::ExecutionModeMax) + builder.addExecutionMode(shaderEntry, (spv::ExecutionMode)mode); + + switch (glslangIntermediate->getVertexSpacing()) { + case glslang::EvsEqual: mode = spv::ExecutionModeSpacingEqual; break; + case glslang::EvsFractionalEven: mode = spv::ExecutionModeSpacingFractionalEven; break; + case glslang::EvsFractionalOdd: mode = spv::ExecutionModeSpacingFractionalOdd; break; + default: mode = spv::ExecutionModeMax; break; + } + if (mode != spv::ExecutionModeMax) + builder.addExecutionMode(shaderEntry, (spv::ExecutionMode)mode); + + switch (glslangIntermediate->getVertexOrder()) { + case glslang::EvoCw: mode = spv::ExecutionModeVertexOrderCw; break; + case glslang::EvoCcw: mode = spv::ExecutionModeVertexOrderCcw; break; + default: mode = spv::ExecutionModeMax; break; + } + if (mode != spv::ExecutionModeMax) + builder.addExecutionMode(shaderEntry, (spv::ExecutionMode)mode); + + if (glslangIntermediate->getPointMode()) + builder.addExecutionMode(shaderEntry, spv::ExecutionModePointMode); + break; + + case EShLangGeometry: + builder.addCapability(spv::CapabilityGeometry); + switch (glslangIntermediate->getInputPrimitive()) { + case glslang::ElgPoints: mode = spv::ExecutionModeInputPoints; break; + case glslang::ElgLines: mode = spv::ExecutionModeInputLines; break; + case glslang::ElgLinesAdjacency: mode = spv::ExecutionModeInputLinesAdjacency; break; + case glslang::ElgTriangles: mode = spv::ExecutionModeTriangles; break; + case glslang::ElgTrianglesAdjacency: mode = spv::ExecutionModeInputTrianglesAdjacency; break; + default: mode = spv::ExecutionModeMax; break; + } + if (mode != spv::ExecutionModeMax) + builder.addExecutionMode(shaderEntry, (spv::ExecutionMode)mode); + + builder.addExecutionMode(shaderEntry, spv::ExecutionModeInvocations, glslangIntermediate->getInvocations()); + + switch (glslangIntermediate->getOutputPrimitive()) { + case glslang::ElgPoints: mode = spv::ExecutionModeOutputPoints; break; + case glslang::ElgLineStrip: mode = spv::ExecutionModeOutputLineStrip; break; + case glslang::ElgTriangleStrip: mode = spv::ExecutionModeOutputTriangleStrip; break; + default: mode = spv::ExecutionModeMax; break; + } + if (mode != spv::ExecutionModeMax) + builder.addExecutionMode(shaderEntry, (spv::ExecutionMode)mode); + builder.addExecutionMode(shaderEntry, spv::ExecutionModeOutputVertices, glslangIntermediate->getVertices()); + break; + + case EShLangRayGen: + case EShLangIntersect: + case EShLangAnyHit: + case EShLangClosestHit: + case EShLangMiss: + case EShLangCallable: + { + auto& extensions = glslangIntermediate->getRequestedExtensions(); + if (extensions.find("GL_NV_ray_tracing") == extensions.end()) { + builder.addCapability(spv::CapabilityRayTracingProvisionalKHR); + builder.addExtension("SPV_KHR_ray_tracing"); + } + else { + builder.addCapability(spv::CapabilityRayTracingNV); + builder.addExtension("SPV_NV_ray_tracing"); + } + break; + } + case EShLangTaskNV: + case EShLangMeshNV: + builder.addCapability(spv::CapabilityMeshShadingNV); + builder.addExtension(spv::E_SPV_NV_mesh_shader); + builder.addExecutionMode(shaderEntry, spv::ExecutionModeLocalSize, glslangIntermediate->getLocalSize(0), + glslangIntermediate->getLocalSize(1), + glslangIntermediate->getLocalSize(2)); + if (glslangIntermediate->getStage() == EShLangMeshNV) { + builder.addExecutionMode(shaderEntry, spv::ExecutionModeOutputVertices, + glslangIntermediate->getVertices()); + builder.addExecutionMode(shaderEntry, spv::ExecutionModeOutputPrimitivesNV, + glslangIntermediate->getPrimitives()); + + switch (glslangIntermediate->getOutputPrimitive()) { + case glslang::ElgPoints: mode = spv::ExecutionModeOutputPoints; break; + case glslang::ElgLines: mode = spv::ExecutionModeOutputLinesNV; break; + case glslang::ElgTriangles: mode = spv::ExecutionModeOutputTrianglesNV; break; + default: mode = spv::ExecutionModeMax; break; + } + if (mode != spv::ExecutionModeMax) + builder.addExecutionMode(shaderEntry, (spv::ExecutionMode)mode); + } + break; +#endif + + default: + break; + } +} + +// Finish creating SPV, after the traversal is complete. +void TGlslangToSpvTraverser::finishSpv() +{ + // Finish the entry point function + if (! entryPointTerminated) { + builder.setBuildPoint(shaderEntry->getLastBlock()); + builder.leaveFunction(); + } + + // finish off the entry-point SPV instruction by adding the Input/Output + for (auto it = iOSet.cbegin(); it != iOSet.cend(); ++it) + entryPoint->addIdOperand(*it); + + // Add capabilities, extensions, remove unneeded decorations, etc., + // based on the resulting SPIR-V. + // Note: WebGPU code generation must have the opportunity to aggressively + // prune unreachable merge blocks and continue targets. + builder.postProcess(); +} + +// Write the SPV into 'out'. +void TGlslangToSpvTraverser::dumpSpv(std::vector& out) +{ + builder.dump(out); +} + +// +// Implement the traversal functions. +// +// Return true from interior nodes to have the external traversal +// continue on to children. Return false if children were +// already processed. +// + +// +// Symbols can turn into +// - uniform/input reads +// - output writes +// - complex lvalue base setups: foo.bar[3].... , where we see foo and start up an access chain +// - something simple that degenerates into the last bullet +// +void TGlslangToSpvTraverser::visitSymbol(glslang::TIntermSymbol* symbol) +{ + SpecConstantOpModeGuard spec_constant_op_mode_setter(&builder); + if (symbol->getType().isStruct()) + glslangTypeToIdMap[symbol->getType().getStruct()] = symbol->getId(); + + if (symbol->getType().getQualifier().isSpecConstant()) + spec_constant_op_mode_setter.turnOnSpecConstantOpMode(); + + // getSymbolId() will set up all the IO decorations on the first call. + // Formal function parameters were mapped during makeFunctions(). + spv::Id id = getSymbolId(symbol); + + if (builder.isPointer(id)) { + // Include all "static use" and "linkage only" interface variables on the OpEntryPoint instruction + // Consider adding to the OpEntryPoint interface list. + // Only looking at structures if they have at least one member. + if (!symbol->getType().isStruct() || symbol->getType().getStruct()->size() > 0) { + spv::StorageClass sc = builder.getStorageClass(id); + // Before SPIR-V 1.4, we only want to include Input and Output. + // Starting with SPIR-V 1.4, we want all globals. + if ((glslangIntermediate->getSpv().spv >= glslang::EShTargetSpv_1_4 && sc != spv::StorageClassFunction) || + (sc == spv::StorageClassInput || sc == spv::StorageClassOutput)) { + iOSet.insert(id); + } + } + + // If the SPIR-V type is required to be different than the AST type + // (for ex SubgroupMasks or 3x4 ObjectToWorld/WorldToObject matrices), + // translate now from the SPIR-V type to the AST type, for the consuming + // operation. + // Note this turns it from an l-value to an r-value. + // Currently, all symbols needing this are inputs; avoid the map lookup when non-input. + if (symbol->getType().getQualifier().storage == glslang::EvqVaryingIn) + id = translateForcedType(id); + } + + // Only process non-linkage-only nodes for generating actual static uses + if (! linkageOnly || symbol->getQualifier().isSpecConstant()) { + // Prepare to generate code for the access + + // L-value chains will be computed left to right. We're on the symbol now, + // which is the left-most part of the access chain, so now is "clear" time, + // followed by setting the base. + builder.clearAccessChain(); + + // For now, we consider all user variables as being in memory, so they are pointers, + // except for + // A) R-Value arguments to a function, which are an intermediate object. + // See comments in handleUserFunctionCall(). + // B) Specialization constants (normal constants don't even come in as a variable), + // These are also pure R-values. + // C) R-Values from type translation, see above call to translateForcedType() + glslang::TQualifier qualifier = symbol->getQualifier(); + if (qualifier.isSpecConstant() || rValueParameters.find(symbol->getId()) != rValueParameters.end() || + !builder.isPointerType(builder.getTypeId(id))) + builder.setAccessChainRValue(id); + else + builder.setAccessChainLValue(id); + } + +#ifdef ENABLE_HLSL + // Process linkage-only nodes for any special additional interface work. + if (linkageOnly) { + if (glslangIntermediate->getHlslFunctionality1()) { + // Map implicit counter buffers to their originating buffers, which should have been + // seen by now, given earlier pruning of unused counters, and preservation of order + // of declaration. + if (symbol->getType().getQualifier().isUniformOrBuffer()) { + if (!glslangIntermediate->hasCounterBufferName(symbol->getName())) { + // Save possible originating buffers for counter buffers, keyed by + // making the potential counter-buffer name. + std::string keyName = symbol->getName().c_str(); + keyName = glslangIntermediate->addCounterBufferName(keyName); + counterOriginator[keyName] = symbol; + } else { + // Handle a counter buffer, by finding the saved originating buffer. + std::string keyName = symbol->getName().c_str(); + auto it = counterOriginator.find(keyName); + if (it != counterOriginator.end()) { + id = getSymbolId(it->second); + if (id != spv::NoResult) { + spv::Id counterId = getSymbolId(symbol); + if (counterId != spv::NoResult) { + builder.addExtension("SPV_GOOGLE_hlsl_functionality1"); + builder.addDecorationId(id, spv::DecorationHlslCounterBufferGOOGLE, counterId); + } + } + } + } + } + } + } +#endif +} + +bool TGlslangToSpvTraverser::visitBinary(glslang::TVisit /* visit */, glslang::TIntermBinary* node) +{ + builder.setLine(node->getLoc().line, node->getLoc().getFilename()); + if (node->getLeft()->getAsSymbolNode() != nullptr && node->getLeft()->getType().isStruct()) { + glslangTypeToIdMap[node->getLeft()->getType().getStruct()] = node->getLeft()->getAsSymbolNode()->getId(); + } + if (node->getRight()->getAsSymbolNode() != nullptr && node->getRight()->getType().isStruct()) { + glslangTypeToIdMap[node->getRight()->getType().getStruct()] = node->getRight()->getAsSymbolNode()->getId(); + } + + SpecConstantOpModeGuard spec_constant_op_mode_setter(&builder); + if (node->getType().getQualifier().isSpecConstant()) + spec_constant_op_mode_setter.turnOnSpecConstantOpMode(); + + // First, handle special cases + switch (node->getOp()) { + case glslang::EOpAssign: + case glslang::EOpAddAssign: + case glslang::EOpSubAssign: + case glslang::EOpMulAssign: + case glslang::EOpVectorTimesMatrixAssign: + case glslang::EOpVectorTimesScalarAssign: + case glslang::EOpMatrixTimesScalarAssign: + case glslang::EOpMatrixTimesMatrixAssign: + case glslang::EOpDivAssign: + case glslang::EOpModAssign: + case glslang::EOpAndAssign: + case glslang::EOpInclusiveOrAssign: + case glslang::EOpExclusiveOrAssign: + case glslang::EOpLeftShiftAssign: + case glslang::EOpRightShiftAssign: + // A bin-op assign "a += b" means the same thing as "a = a + b" + // where a is evaluated before b. For a simple assignment, GLSL + // says to evaluate the left before the right. So, always, left + // node then right node. + { + // get the left l-value, save it away + builder.clearAccessChain(); + node->getLeft()->traverse(this); + spv::Builder::AccessChain lValue = builder.getAccessChain(); + + // evaluate the right + builder.clearAccessChain(); + node->getRight()->traverse(this); + spv::Id rValue = accessChainLoad(node->getRight()->getType()); + + if (node->getOp() != glslang::EOpAssign) { + // the left is also an r-value + builder.setAccessChain(lValue); + spv::Id leftRValue = accessChainLoad(node->getLeft()->getType()); + + // do the operation + OpDecorations decorations = { TranslatePrecisionDecoration(node->getOperationPrecision()), + TranslateNoContractionDecoration(node->getType().getQualifier()), + TranslateNonUniformDecoration(node->getType().getQualifier()) }; + rValue = createBinaryOperation(node->getOp(), decorations, + convertGlslangToSpvType(node->getType()), leftRValue, rValue, + node->getType().getBasicType()); + + // these all need their counterparts in createBinaryOperation() + assert(rValue != spv::NoResult); + } + + // store the result + builder.setAccessChain(lValue); + multiTypeStore(node->getLeft()->getType(), rValue); + + // assignments are expressions having an rValue after they are evaluated... + builder.clearAccessChain(); + builder.setAccessChainRValue(rValue); + } + return false; + case glslang::EOpIndexDirect: + case glslang::EOpIndexDirectStruct: + { + // Structure, array, matrix, or vector indirection with statically known index. + // Get the left part of the access chain. + node->getLeft()->traverse(this); + + // Add the next element in the chain + + const int glslangIndex = node->getRight()->getAsConstantUnion()->getConstArray()[0].getIConst(); + if (! node->getLeft()->getType().isArray() && + node->getLeft()->getType().isVector() && + node->getOp() == glslang::EOpIndexDirect) { + // This is essentially a hard-coded vector swizzle of size 1, + // so short circuit the access-chain stuff with a swizzle. + std::vector swizzle; + swizzle.push_back(glslangIndex); + int dummySize; + builder.accessChainPushSwizzle(swizzle, convertGlslangToSpvType(node->getLeft()->getType()), + TranslateCoherent(node->getLeft()->getType()), + glslangIntermediate->getBaseAlignmentScalar( + node->getLeft()->getType(), dummySize)); + } else { + + // Load through a block reference is performed with a dot operator that + // is mapped to EOpIndexDirectStruct. When we get to the actual reference, + // do a load and reset the access chain. + if (node->getLeft()->isReference() && + !node->getLeft()->getType().isArray() && + node->getOp() == glslang::EOpIndexDirectStruct) + { + spv::Id left = accessChainLoad(node->getLeft()->getType()); + builder.clearAccessChain(); + builder.setAccessChainLValue(left); + } + + int spvIndex = glslangIndex; + if (node->getLeft()->getBasicType() == glslang::EbtBlock && + node->getOp() == glslang::EOpIndexDirectStruct) + { + // This may be, e.g., an anonymous block-member selection, which generally need + // index remapping due to hidden members in anonymous blocks. + int glslangId = glslangTypeToIdMap[node->getLeft()->getType().getStruct()]; + if (memberRemapper.find(glslangId) != memberRemapper.end()) { + std::vector& remapper = memberRemapper[glslangId]; + assert(remapper.size() > 0); + spvIndex = remapper[glslangIndex]; + } + } + + // normal case for indexing array or structure or block + builder.accessChainPush(builder.makeIntConstant(spvIndex), + TranslateCoherent(node->getLeft()->getType()), + node->getLeft()->getType().getBufferReferenceAlignment()); + + // Add capabilities here for accessing PointSize and clip/cull distance. + // We have deferred generation of associated capabilities until now. + if (node->getLeft()->getType().isStruct() && ! node->getLeft()->getType().isArray()) + declareUseOfStructMember(*(node->getLeft()->getType().getStruct()), glslangIndex); + } + } + return false; + case glslang::EOpIndexIndirect: + { + // Array, matrix, or vector indirection with variable index. + // Will use native SPIR-V access-chain for and array indirection; + // matrices are arrays of vectors, so will also work for a matrix. + // Will use the access chain's 'component' for variable index into a vector. + + // This adapter is building access chains left to right. + // Set up the access chain to the left. + node->getLeft()->traverse(this); + + // save it so that computing the right side doesn't trash it + spv::Builder::AccessChain partial = builder.getAccessChain(); + + // compute the next index in the chain + builder.clearAccessChain(); + node->getRight()->traverse(this); + spv::Id index = accessChainLoad(node->getRight()->getType()); + + addIndirectionIndexCapabilities(node->getLeft()->getType(), node->getRight()->getType()); + + // restore the saved access chain + builder.setAccessChain(partial); + + if (! node->getLeft()->getType().isArray() && node->getLeft()->getType().isVector()) { + int dummySize; + builder.accessChainPushComponent(index, convertGlslangToSpvType(node->getLeft()->getType()), + TranslateCoherent(node->getLeft()->getType()), + glslangIntermediate->getBaseAlignmentScalar(node->getLeft()->getType(), + dummySize)); + } else + builder.accessChainPush(index, TranslateCoherent(node->getLeft()->getType()), + node->getLeft()->getType().getBufferReferenceAlignment()); + } + return false; + case glslang::EOpVectorSwizzle: + { + node->getLeft()->traverse(this); + std::vector swizzle; + convertSwizzle(*node->getRight()->getAsAggregate(), swizzle); + int dummySize; + builder.accessChainPushSwizzle(swizzle, convertGlslangToSpvType(node->getLeft()->getType()), + TranslateCoherent(node->getLeft()->getType()), + glslangIntermediate->getBaseAlignmentScalar(node->getLeft()->getType(), + dummySize)); + } + return false; + case glslang::EOpMatrixSwizzle: + logger->missingFunctionality("matrix swizzle"); + return true; + case glslang::EOpLogicalOr: + case glslang::EOpLogicalAnd: + { + + // These may require short circuiting, but can sometimes be done as straight + // binary operations. The right operand must be short circuited if it has + // side effects, and should probably be if it is complex. + if (isTrivial(node->getRight()->getAsTyped())) + break; // handle below as a normal binary operation + // otherwise, we need to do dynamic short circuiting on the right operand + spv::Id result = createShortCircuit(node->getOp(), *node->getLeft()->getAsTyped(), + *node->getRight()->getAsTyped()); + builder.clearAccessChain(); + builder.setAccessChainRValue(result); + } + return false; + default: + break; + } + + // Assume generic binary op... + + // get right operand + builder.clearAccessChain(); + node->getLeft()->traverse(this); + spv::Id left = accessChainLoad(node->getLeft()->getType()); + + // get left operand + builder.clearAccessChain(); + node->getRight()->traverse(this); + spv::Id right = accessChainLoad(node->getRight()->getType()); + + // get result + OpDecorations decorations = { TranslatePrecisionDecoration(node->getOperationPrecision()), + TranslateNoContractionDecoration(node->getType().getQualifier()), + TranslateNonUniformDecoration(node->getType().getQualifier()) }; + spv::Id result = createBinaryOperation(node->getOp(), decorations, + convertGlslangToSpvType(node->getType()), left, right, + node->getLeft()->getType().getBasicType()); + + builder.clearAccessChain(); + if (! result) { + logger->missingFunctionality("unknown glslang binary operation"); + return true; // pick up a child as the place-holder result + } else { + builder.setAccessChainRValue(result); + return false; + } +} + +// Figure out what, if any, type changes are needed when accessing a specific built-in. +// Returns . +// Also see comment for 'forceType', regarding tracking SPIR-V-required types. +std::pair TGlslangToSpvTraverser::getForcedType(glslang::TBuiltInVariable glslangBuiltIn, + const glslang::TType& glslangType) +{ + switch(glslangBuiltIn) + { + case glslang::EbvSubGroupEqMask: + case glslang::EbvSubGroupGeMask: + case glslang::EbvSubGroupGtMask: + case glslang::EbvSubGroupLeMask: + case glslang::EbvSubGroupLtMask: { + // these require changing a 64-bit scaler -> a vector of 32-bit components + if (glslangType.isVector()) + break; + std::pair ret(builder.makeVectorType(builder.makeUintType(32), 4), + builder.makeUintType(64)); + return ret; + } + // There are no SPIR-V builtins defined for these and map onto original non-transposed + // builtins. During visitBinary we insert a transpose + case glslang::EbvWorldToObject3x4: + case glslang::EbvObjectToWorld3x4: { + std::pair ret(builder.makeMatrixType(builder.makeFloatType(32), 4, 3), + builder.makeMatrixType(builder.makeFloatType(32), 3, 4) + ); + return ret; + } + default: + break; + } + + std::pair ret(spv::NoType, spv::NoType); + return ret; +} + +// For an object previously identified (see getForcedType() and forceType) +// as needing type translations, do the translation needed for a load, turning +// an L-value into in R-value. +spv::Id TGlslangToSpvTraverser::translateForcedType(spv::Id object) +{ + const auto forceIt = forceType.find(object); + if (forceIt == forceType.end()) + return object; + + spv::Id desiredTypeId = forceIt->second; + spv::Id objectTypeId = builder.getTypeId(object); + assert(builder.isPointerType(objectTypeId)); + objectTypeId = builder.getContainedTypeId(objectTypeId); + if (builder.isVectorType(objectTypeId) && + builder.getScalarTypeWidth(builder.getContainedTypeId(objectTypeId)) == 32) { + if (builder.getScalarTypeWidth(desiredTypeId) == 64) { + // handle 32-bit v.xy* -> 64-bit + builder.clearAccessChain(); + builder.setAccessChainLValue(object); + object = builder.accessChainLoad(spv::NoPrecision, spv::DecorationMax, objectTypeId); + std::vector components; + components.push_back(builder.createCompositeExtract(object, builder.getContainedTypeId(objectTypeId), 0)); + components.push_back(builder.createCompositeExtract(object, builder.getContainedTypeId(objectTypeId), 1)); + + spv::Id vecType = builder.makeVectorType(builder.getContainedTypeId(objectTypeId), 2); + return builder.createUnaryOp(spv::OpBitcast, desiredTypeId, + builder.createCompositeConstruct(vecType, components)); + } else { + logger->missingFunctionality("forcing 32-bit vector type to non 64-bit scalar"); + } + } else if (builder.isMatrixType(objectTypeId)) { + // There are no SPIR-V builtins defined for 3x4 variants of ObjectToWorld/WorldToObject + // and we insert a transpose after loading the original non-transposed builtins + builder.clearAccessChain(); + builder.setAccessChainLValue(object); + object = builder.accessChainLoad(spv::NoPrecision, spv::DecorationMax, objectTypeId); + return builder.createUnaryOp(spv::OpTranspose, desiredTypeId, object); + + } else { + logger->missingFunctionality("forcing non 32-bit vector type"); + } + + return object; +} + +bool TGlslangToSpvTraverser::visitUnary(glslang::TVisit /* visit */, glslang::TIntermUnary* node) +{ + builder.setLine(node->getLoc().line, node->getLoc().getFilename()); + + SpecConstantOpModeGuard spec_constant_op_mode_setter(&builder); + if (node->getType().getQualifier().isSpecConstant()) + spec_constant_op_mode_setter.turnOnSpecConstantOpMode(); + + spv::Id result = spv::NoResult; + + // try texturing first + result = createImageTextureFunctionCall(node); + if (result != spv::NoResult) { + builder.clearAccessChain(); + builder.setAccessChainRValue(result); + + return false; // done with this node + } + + // Non-texturing. + + if (node->getOp() == glslang::EOpArrayLength) { + // Quite special; won't want to evaluate the operand. + + // Currently, the front-end does not allow .length() on an array until it is sized, + // except for the last block membeor of an SSBO. + // TODO: If this changes, link-time sized arrays might show up here, and need their + // size extracted. + + // Normal .length() would have been constant folded by the front-end. + // So, this has to be block.lastMember.length(). + // SPV wants "block" and member number as the operands, go get them. + + spv::Id length; + if (node->getOperand()->getType().isCoopMat()) { + spec_constant_op_mode_setter.turnOnSpecConstantOpMode(); + + spv::Id typeId = convertGlslangToSpvType(node->getOperand()->getType()); + assert(builder.isCooperativeMatrixType(typeId)); + + length = builder.createCooperativeMatrixLength(typeId); + } else { + glslang::TIntermTyped* block = node->getOperand()->getAsBinaryNode()->getLeft(); + block->traverse(this); + unsigned int member = node->getOperand()->getAsBinaryNode()->getRight()->getAsConstantUnion() + ->getConstArray()[0].getUConst(); + length = builder.createArrayLength(builder.accessChainGetLValue(), member); + } + + // GLSL semantics say the result of .length() is an int, while SPIR-V says + // signedness must be 0. So, convert from SPIR-V unsigned back to GLSL's + // AST expectation of a signed result. + if (glslangIntermediate->getSource() == glslang::EShSourceGlsl) { + if (builder.isInSpecConstCodeGenMode()) { + length = builder.createBinOp(spv::OpIAdd, builder.makeIntType(32), length, builder.makeIntConstant(0)); + } else { + length = builder.createUnaryOp(spv::OpBitcast, builder.makeIntType(32), length); + } + } + + builder.clearAccessChain(); + builder.setAccessChainRValue(length); + + return false; + } + + // Start by evaluating the operand + + // Does it need a swizzle inversion? If so, evaluation is inverted; + // operate first on the swizzle base, then apply the swizzle. + spv::Id invertedType = spv::NoType; + auto resultType = [&invertedType, &node, this](){ return invertedType != spv::NoType ? + invertedType : convertGlslangToSpvType(node->getType()); }; + if (node->getOp() == glslang::EOpInterpolateAtCentroid) + invertedType = getInvertedSwizzleType(*node->getOperand()); + + builder.clearAccessChain(); + TIntermNode *operandNode; + if (invertedType != spv::NoType) + operandNode = node->getOperand()->getAsBinaryNode()->getLeft(); + else + operandNode = node->getOperand(); + + operandNode->traverse(this); + + spv::Id operand = spv::NoResult; + + spv::Builder::AccessChain::CoherentFlags lvalueCoherentFlags; + +#ifndef GLSLANG_WEB + if (node->getOp() == glslang::EOpAtomicCounterIncrement || + node->getOp() == glslang::EOpAtomicCounterDecrement || + node->getOp() == glslang::EOpAtomicCounter || + node->getOp() == glslang::EOpInterpolateAtCentroid || + node->getOp() == glslang::EOpRayQueryProceed || + node->getOp() == glslang::EOpRayQueryGetRayTMin || + node->getOp() == glslang::EOpRayQueryGetRayFlags || + node->getOp() == glslang::EOpRayQueryGetWorldRayOrigin || + node->getOp() == glslang::EOpRayQueryGetWorldRayDirection || + node->getOp() == glslang::EOpRayQueryGetIntersectionCandidateAABBOpaque || + node->getOp() == glslang::EOpRayQueryTerminate || + node->getOp() == glslang::EOpRayQueryConfirmIntersection) { + operand = builder.accessChainGetLValue(); // Special case l-value operands + lvalueCoherentFlags = builder.getAccessChain().coherentFlags; + lvalueCoherentFlags |= TranslateCoherent(operandNode->getAsTyped()->getType()); + } else +#endif + { + operand = accessChainLoad(node->getOperand()->getType()); + } + + OpDecorations decorations = { TranslatePrecisionDecoration(node->getOperationPrecision()), + TranslateNoContractionDecoration(node->getType().getQualifier()), + TranslateNonUniformDecoration(node->getType().getQualifier()) }; + + // it could be a conversion + if (! result) + result = createConversion(node->getOp(), decorations, resultType(), operand, + node->getOperand()->getBasicType()); + + // if not, then possibly an operation + if (! result) + result = createUnaryOperation(node->getOp(), decorations, resultType(), operand, + node->getOperand()->getBasicType(), lvalueCoherentFlags); + + if (result) { + if (invertedType) { + result = createInvertedSwizzle(decorations.precision, *node->getOperand(), result); + decorations.addNonUniform(builder, result); + } + + builder.clearAccessChain(); + builder.setAccessChainRValue(result); + + return false; // done with this node + } + + // it must be a special case, check... + switch (node->getOp()) { + case glslang::EOpPostIncrement: + case glslang::EOpPostDecrement: + case glslang::EOpPreIncrement: + case glslang::EOpPreDecrement: + { + // we need the integer value "1" or the floating point "1.0" to add/subtract + spv::Id one = 0; + if (node->getBasicType() == glslang::EbtFloat) + one = builder.makeFloatConstant(1.0F); +#ifndef GLSLANG_WEB + else if (node->getBasicType() == glslang::EbtDouble) + one = builder.makeDoubleConstant(1.0); + else if (node->getBasicType() == glslang::EbtFloat16) + one = builder.makeFloat16Constant(1.0F); + else if (node->getBasicType() == glslang::EbtInt8 || node->getBasicType() == glslang::EbtUint8) + one = builder.makeInt8Constant(1); + else if (node->getBasicType() == glslang::EbtInt16 || node->getBasicType() == glslang::EbtUint16) + one = builder.makeInt16Constant(1); + else if (node->getBasicType() == glslang::EbtInt64 || node->getBasicType() == glslang::EbtUint64) + one = builder.makeInt64Constant(1); +#endif + else + one = builder.makeIntConstant(1); + glslang::TOperator op; + if (node->getOp() == glslang::EOpPreIncrement || + node->getOp() == glslang::EOpPostIncrement) + op = glslang::EOpAdd; + else + op = glslang::EOpSub; + + spv::Id result = createBinaryOperation(op, decorations, + convertGlslangToSpvType(node->getType()), operand, one, + node->getType().getBasicType()); + assert(result != spv::NoResult); + + // The result of operation is always stored, but conditionally the + // consumed result. The consumed result is always an r-value. + builder.accessChainStore(result); + builder.clearAccessChain(); + if (node->getOp() == glslang::EOpPreIncrement || + node->getOp() == glslang::EOpPreDecrement) + builder.setAccessChainRValue(result); + else + builder.setAccessChainRValue(operand); + } + + return false; + +#ifndef GLSLANG_WEB + case glslang::EOpEmitStreamVertex: + builder.createNoResultOp(spv::OpEmitStreamVertex, operand); + return false; + case glslang::EOpEndStreamPrimitive: + builder.createNoResultOp(spv::OpEndStreamPrimitive, operand); + return false; + case glslang::EOpRayQueryTerminate: + builder.createNoResultOp(spv::OpRayQueryTerminateKHR, operand); + return false; + case glslang::EOpRayQueryConfirmIntersection: + builder.createNoResultOp(spv::OpRayQueryConfirmIntersectionKHR, operand); + return false; +#endif + + default: + logger->missingFunctionality("unknown glslang unary"); + return true; // pick up operand as placeholder result + } +} + +// Construct a composite object, recursively copying members if their types don't match +spv::Id TGlslangToSpvTraverser::createCompositeConstruct(spv::Id resultTypeId, std::vector constituents) +{ + for (int c = 0; c < (int)constituents.size(); ++c) { + spv::Id& constituent = constituents[c]; + spv::Id lType = builder.getContainedTypeId(resultTypeId, c); + spv::Id rType = builder.getTypeId(constituent); + if (lType != rType) { + if (glslangIntermediate->getSpv().spv >= glslang::EShTargetSpv_1_4) { + constituent = builder.createUnaryOp(spv::OpCopyLogical, lType, constituent); + } else if (builder.isStructType(rType)) { + std::vector rTypeConstituents; + int numrTypeConstituents = builder.getNumTypeConstituents(rType); + for (int i = 0; i < numrTypeConstituents; ++i) { + rTypeConstituents.push_back(builder.createCompositeExtract(constituent, + builder.getContainedTypeId(rType, i), i)); + } + constituents[c] = createCompositeConstruct(lType, rTypeConstituents); + } else { + assert(builder.isArrayType(rType)); + std::vector rTypeConstituents; + int numrTypeConstituents = builder.getNumTypeConstituents(rType); + + spv::Id elementRType = builder.getContainedTypeId(rType); + for (int i = 0; i < numrTypeConstituents; ++i) { + rTypeConstituents.push_back(builder.createCompositeExtract(constituent, elementRType, i)); + } + constituents[c] = createCompositeConstruct(lType, rTypeConstituents); + } + } + } + return builder.createCompositeConstruct(resultTypeId, constituents); +} + +bool TGlslangToSpvTraverser::visitAggregate(glslang::TVisit visit, glslang::TIntermAggregate* node) +{ + SpecConstantOpModeGuard spec_constant_op_mode_setter(&builder); + if (node->getType().getQualifier().isSpecConstant()) + spec_constant_op_mode_setter.turnOnSpecConstantOpMode(); + + spv::Id result = spv::NoResult; + spv::Id invertedType = spv::NoType; // to use to override the natural type of the node + std::vector complexLvalues; // for holding swizzling l-values too complex for + // SPIR-V, for an out parameter + std::vector temporaryLvalues; // temporaries to pass, as proxies for complexLValues + + auto resultType = [&invertedType, &node, this](){ return invertedType != spv::NoType ? + invertedType : + convertGlslangToSpvType(node->getType()); }; + + // try texturing + result = createImageTextureFunctionCall(node); + if (result != spv::NoResult) { + builder.clearAccessChain(); + builder.setAccessChainRValue(result); + + return false; + } +#ifndef GLSLANG_WEB + else if (node->getOp() == glslang::EOpImageStore || + node->getOp() == glslang::EOpImageStoreLod || + node->getOp() == glslang::EOpImageAtomicStore) { + // "imageStore" is a special case, which has no result + return false; + } +#endif + + glslang::TOperator binOp = glslang::EOpNull; + bool reduceComparison = true; + bool isMatrix = false; + bool noReturnValue = false; + bool atomic = false; + + spv::Builder::AccessChain::CoherentFlags lvalueCoherentFlags; + + assert(node->getOp()); + + spv::Decoration precision = TranslatePrecisionDecoration(node->getOperationPrecision()); + + switch (node->getOp()) { + case glslang::EOpSequence: + { + if (preVisit) + ++sequenceDepth; + else + --sequenceDepth; + + if (sequenceDepth == 1) { + // If this is the parent node of all the functions, we want to see them + // early, so all call points have actual SPIR-V functions to reference. + // In all cases, still let the traverser visit the children for us. + makeFunctions(node->getAsAggregate()->getSequence()); + + // Also, we want all globals initializers to go into the beginning of the entry point, before + // anything else gets there, so visit out of order, doing them all now. + makeGlobalInitializers(node->getAsAggregate()->getSequence()); + + // Initializers are done, don't want to visit again, but functions and link objects need to be processed, + // so do them manually. + visitFunctions(node->getAsAggregate()->getSequence()); + + return false; + } + + return true; + } + case glslang::EOpLinkerObjects: + { + if (visit == glslang::EvPreVisit) + linkageOnly = true; + else + linkageOnly = false; + + return true; + } + case glslang::EOpComma: + { + // processing from left to right naturally leaves the right-most + // lying around in the access chain + glslang::TIntermSequence& glslangOperands = node->getSequence(); + for (int i = 0; i < (int)glslangOperands.size(); ++i) + glslangOperands[i]->traverse(this); + + return false; + } + case glslang::EOpFunction: + if (visit == glslang::EvPreVisit) { + if (isShaderEntryPoint(node)) { + inEntryPoint = true; + builder.setBuildPoint(shaderEntry->getLastBlock()); + currentFunction = shaderEntry; + } else { + handleFunctionEntry(node); + } + } else { + if (inEntryPoint) + entryPointTerminated = true; + builder.leaveFunction(); + inEntryPoint = false; + } + + return true; + case glslang::EOpParameters: + // Parameters will have been consumed by EOpFunction processing, but not + // the body, so we still visited the function node's children, making this + // child redundant. + return false; + case glslang::EOpFunctionCall: + { + builder.setLine(node->getLoc().line, node->getLoc().getFilename()); + if (node->isUserDefined()) + result = handleUserFunctionCall(node); + if (result) { + builder.clearAccessChain(); + builder.setAccessChainRValue(result); + } else + logger->missingFunctionality("missing user function; linker needs to catch that"); + + return false; + } + case glslang::EOpConstructMat2x2: + case glslang::EOpConstructMat2x3: + case glslang::EOpConstructMat2x4: + case glslang::EOpConstructMat3x2: + case glslang::EOpConstructMat3x3: + case glslang::EOpConstructMat3x4: + case glslang::EOpConstructMat4x2: + case glslang::EOpConstructMat4x3: + case glslang::EOpConstructMat4x4: + case glslang::EOpConstructDMat2x2: + case glslang::EOpConstructDMat2x3: + case glslang::EOpConstructDMat2x4: + case glslang::EOpConstructDMat3x2: + case glslang::EOpConstructDMat3x3: + case glslang::EOpConstructDMat3x4: + case glslang::EOpConstructDMat4x2: + case glslang::EOpConstructDMat4x3: + case glslang::EOpConstructDMat4x4: + case glslang::EOpConstructIMat2x2: + case glslang::EOpConstructIMat2x3: + case glslang::EOpConstructIMat2x4: + case glslang::EOpConstructIMat3x2: + case glslang::EOpConstructIMat3x3: + case glslang::EOpConstructIMat3x4: + case glslang::EOpConstructIMat4x2: + case glslang::EOpConstructIMat4x3: + case glslang::EOpConstructIMat4x4: + case glslang::EOpConstructUMat2x2: + case glslang::EOpConstructUMat2x3: + case glslang::EOpConstructUMat2x4: + case glslang::EOpConstructUMat3x2: + case glslang::EOpConstructUMat3x3: + case glslang::EOpConstructUMat3x4: + case glslang::EOpConstructUMat4x2: + case glslang::EOpConstructUMat4x3: + case glslang::EOpConstructUMat4x4: + case glslang::EOpConstructBMat2x2: + case glslang::EOpConstructBMat2x3: + case glslang::EOpConstructBMat2x4: + case glslang::EOpConstructBMat3x2: + case glslang::EOpConstructBMat3x3: + case glslang::EOpConstructBMat3x4: + case glslang::EOpConstructBMat4x2: + case glslang::EOpConstructBMat4x3: + case glslang::EOpConstructBMat4x4: + case glslang::EOpConstructF16Mat2x2: + case glslang::EOpConstructF16Mat2x3: + case glslang::EOpConstructF16Mat2x4: + case glslang::EOpConstructF16Mat3x2: + case glslang::EOpConstructF16Mat3x3: + case glslang::EOpConstructF16Mat3x4: + case glslang::EOpConstructF16Mat4x2: + case glslang::EOpConstructF16Mat4x3: + case glslang::EOpConstructF16Mat4x4: + isMatrix = true; + // fall through + case glslang::EOpConstructFloat: + case glslang::EOpConstructVec2: + case glslang::EOpConstructVec3: + case glslang::EOpConstructVec4: + case glslang::EOpConstructDouble: + case glslang::EOpConstructDVec2: + case glslang::EOpConstructDVec3: + case glslang::EOpConstructDVec4: + case glslang::EOpConstructFloat16: + case glslang::EOpConstructF16Vec2: + case glslang::EOpConstructF16Vec3: + case glslang::EOpConstructF16Vec4: + case glslang::EOpConstructBool: + case glslang::EOpConstructBVec2: + case glslang::EOpConstructBVec3: + case glslang::EOpConstructBVec4: + case glslang::EOpConstructInt8: + case glslang::EOpConstructI8Vec2: + case glslang::EOpConstructI8Vec3: + case glslang::EOpConstructI8Vec4: + case glslang::EOpConstructUint8: + case glslang::EOpConstructU8Vec2: + case glslang::EOpConstructU8Vec3: + case glslang::EOpConstructU8Vec4: + case glslang::EOpConstructInt16: + case glslang::EOpConstructI16Vec2: + case glslang::EOpConstructI16Vec3: + case glslang::EOpConstructI16Vec4: + case glslang::EOpConstructUint16: + case glslang::EOpConstructU16Vec2: + case glslang::EOpConstructU16Vec3: + case glslang::EOpConstructU16Vec4: + case glslang::EOpConstructInt: + case glslang::EOpConstructIVec2: + case glslang::EOpConstructIVec3: + case glslang::EOpConstructIVec4: + case glslang::EOpConstructUint: + case glslang::EOpConstructUVec2: + case glslang::EOpConstructUVec3: + case glslang::EOpConstructUVec4: + case glslang::EOpConstructInt64: + case glslang::EOpConstructI64Vec2: + case glslang::EOpConstructI64Vec3: + case glslang::EOpConstructI64Vec4: + case glslang::EOpConstructUint64: + case glslang::EOpConstructU64Vec2: + case glslang::EOpConstructU64Vec3: + case glslang::EOpConstructU64Vec4: + case glslang::EOpConstructStruct: + case glslang::EOpConstructTextureSampler: + case glslang::EOpConstructReference: + case glslang::EOpConstructCooperativeMatrix: + { + builder.setLine(node->getLoc().line, node->getLoc().getFilename()); + std::vector arguments; + translateArguments(*node, arguments, lvalueCoherentFlags); + spv::Id constructed; + if (node->getOp() == glslang::EOpConstructTextureSampler) + constructed = builder.createOp(spv::OpSampledImage, resultType(), arguments); + else if (node->getOp() == glslang::EOpConstructStruct || + node->getOp() == glslang::EOpConstructCooperativeMatrix || + node->getType().isArray()) { + std::vector constituents; + for (int c = 0; c < (int)arguments.size(); ++c) + constituents.push_back(arguments[c]); + constructed = createCompositeConstruct(resultType(), constituents); + } else if (isMatrix) + constructed = builder.createMatrixConstructor(precision, arguments, resultType()); + else + constructed = builder.createConstructor(precision, arguments, resultType()); + + builder.clearAccessChain(); + builder.setAccessChainRValue(constructed); + + return false; + } + + // These six are component-wise compares with component-wise results. + // Forward on to createBinaryOperation(), requesting a vector result. + case glslang::EOpLessThan: + case glslang::EOpGreaterThan: + case glslang::EOpLessThanEqual: + case glslang::EOpGreaterThanEqual: + case glslang::EOpVectorEqual: + case glslang::EOpVectorNotEqual: + { + // Map the operation to a binary + binOp = node->getOp(); + reduceComparison = false; + switch (node->getOp()) { + case glslang::EOpVectorEqual: binOp = glslang::EOpVectorEqual; break; + case glslang::EOpVectorNotEqual: binOp = glslang::EOpVectorNotEqual; break; + default: binOp = node->getOp(); break; + } + + break; + } + case glslang::EOpMul: + // component-wise matrix multiply + binOp = glslang::EOpMul; + break; + case glslang::EOpOuterProduct: + // two vectors multiplied to make a matrix + binOp = glslang::EOpOuterProduct; + break; + case glslang::EOpDot: + { + // for scalar dot product, use multiply + glslang::TIntermSequence& glslangOperands = node->getSequence(); + if (glslangOperands[0]->getAsTyped()->getVectorSize() == 1) + binOp = glslang::EOpMul; + break; + } + case glslang::EOpMod: + // when an aggregate, this is the floating-point mod built-in function, + // which can be emitted by the one in createBinaryOperation() + binOp = glslang::EOpMod; + break; + + case glslang::EOpEmitVertex: + case glslang::EOpEndPrimitive: + case glslang::EOpBarrier: + case glslang::EOpMemoryBarrier: + case glslang::EOpMemoryBarrierAtomicCounter: + case glslang::EOpMemoryBarrierBuffer: + case glslang::EOpMemoryBarrierImage: + case glslang::EOpMemoryBarrierShared: + case glslang::EOpGroupMemoryBarrier: + case glslang::EOpDeviceMemoryBarrier: + case glslang::EOpAllMemoryBarrierWithGroupSync: + case glslang::EOpDeviceMemoryBarrierWithGroupSync: + case glslang::EOpWorkgroupMemoryBarrier: + case glslang::EOpWorkgroupMemoryBarrierWithGroupSync: + case glslang::EOpSubgroupBarrier: + case glslang::EOpSubgroupMemoryBarrier: + case glslang::EOpSubgroupMemoryBarrierBuffer: + case glslang::EOpSubgroupMemoryBarrierImage: + case glslang::EOpSubgroupMemoryBarrierShared: + noReturnValue = true; + // These all have 0 operands and will naturally finish up in the code below for 0 operands + break; + + case glslang::EOpAtomicAdd: + case glslang::EOpAtomicMin: + case glslang::EOpAtomicMax: + case glslang::EOpAtomicAnd: + case glslang::EOpAtomicOr: + case glslang::EOpAtomicXor: + case glslang::EOpAtomicExchange: + case glslang::EOpAtomicCompSwap: + atomic = true; + break; + +#ifndef GLSLANG_WEB + case glslang::EOpAtomicStore: + noReturnValue = true; + // fallthrough + case glslang::EOpAtomicLoad: + atomic = true; + break; + + case glslang::EOpAtomicCounterAdd: + case glslang::EOpAtomicCounterSubtract: + case glslang::EOpAtomicCounterMin: + case glslang::EOpAtomicCounterMax: + case glslang::EOpAtomicCounterAnd: + case glslang::EOpAtomicCounterOr: + case glslang::EOpAtomicCounterXor: + case glslang::EOpAtomicCounterExchange: + case glslang::EOpAtomicCounterCompSwap: + builder.addExtension("SPV_KHR_shader_atomic_counter_ops"); + builder.addCapability(spv::CapabilityAtomicStorageOps); + atomic = true; + break; + + case glslang::EOpAbsDifference: + case glslang::EOpAddSaturate: + case glslang::EOpSubSaturate: + case glslang::EOpAverage: + case glslang::EOpAverageRounded: + case glslang::EOpMul32x16: + builder.addCapability(spv::CapabilityIntegerFunctions2INTEL); + builder.addExtension("SPV_INTEL_shader_integer_functions2"); + binOp = node->getOp(); + break; + + case glslang::EOpIgnoreIntersection: + case glslang::EOpTerminateRay: + case glslang::EOpTrace: + case glslang::EOpExecuteCallable: + case glslang::EOpWritePackedPrimitiveIndices4x8NV: + noReturnValue = true; + break; + case glslang::EOpRayQueryInitialize: + case glslang::EOpRayQueryTerminate: + case glslang::EOpRayQueryGenerateIntersection: + case glslang::EOpRayQueryConfirmIntersection: + builder.addExtension("SPV_KHR_ray_query"); + builder.addCapability(spv::CapabilityRayQueryProvisionalKHR); + noReturnValue = true; + break; + case glslang::EOpRayQueryProceed: + case glslang::EOpRayQueryGetIntersectionType: + case glslang::EOpRayQueryGetRayTMin: + case glslang::EOpRayQueryGetRayFlags: + case glslang::EOpRayQueryGetIntersectionT: + case glslang::EOpRayQueryGetIntersectionInstanceCustomIndex: + case glslang::EOpRayQueryGetIntersectionInstanceId: + case glslang::EOpRayQueryGetIntersectionInstanceShaderBindingTableRecordOffset: + case glslang::EOpRayQueryGetIntersectionGeometryIndex: + case glslang::EOpRayQueryGetIntersectionPrimitiveIndex: + case glslang::EOpRayQueryGetIntersectionBarycentrics: + case glslang::EOpRayQueryGetIntersectionFrontFace: + case glslang::EOpRayQueryGetIntersectionCandidateAABBOpaque: + case glslang::EOpRayQueryGetIntersectionObjectRayDirection: + case glslang::EOpRayQueryGetIntersectionObjectRayOrigin: + case glslang::EOpRayQueryGetWorldRayDirection: + case glslang::EOpRayQueryGetWorldRayOrigin: + case glslang::EOpRayQueryGetIntersectionObjectToWorld: + case glslang::EOpRayQueryGetIntersectionWorldToObject: + builder.addExtension("SPV_KHR_ray_query"); + builder.addCapability(spv::CapabilityRayQueryProvisionalKHR); + break; + case glslang::EOpCooperativeMatrixLoad: + case glslang::EOpCooperativeMatrixStore: + noReturnValue = true; + break; + case glslang::EOpBeginInvocationInterlock: + case glslang::EOpEndInvocationInterlock: + builder.addExtension(spv::E_SPV_EXT_fragment_shader_interlock); + noReturnValue = true; + break; +#endif + + case glslang::EOpDebugPrintf: + noReturnValue = true; + break; + + default: + break; + } + + // + // See if it maps to a regular operation. + // + if (binOp != glslang::EOpNull) { + glslang::TIntermTyped* left = node->getSequence()[0]->getAsTyped(); + glslang::TIntermTyped* right = node->getSequence()[1]->getAsTyped(); + assert(left && right); + + builder.clearAccessChain(); + left->traverse(this); + spv::Id leftId = accessChainLoad(left->getType()); + + builder.clearAccessChain(); + right->traverse(this); + spv::Id rightId = accessChainLoad(right->getType()); + + builder.setLine(node->getLoc().line, node->getLoc().getFilename()); + OpDecorations decorations = { precision, + TranslateNoContractionDecoration(node->getType().getQualifier()), + TranslateNonUniformDecoration(node->getType().getQualifier()) }; + result = createBinaryOperation(binOp, decorations, + resultType(), leftId, rightId, + left->getType().getBasicType(), reduceComparison); + + // code above should only make binOp that exists in createBinaryOperation + assert(result != spv::NoResult); + builder.clearAccessChain(); + builder.setAccessChainRValue(result); + + return false; + } + + // + // Create the list of operands. + // + glslang::TIntermSequence& glslangOperands = node->getSequence(); + std::vector operands; + std::vector memoryAccessOperands; + for (int arg = 0; arg < (int)glslangOperands.size(); ++arg) { + // special case l-value operands; there are just a few + bool lvalue = false; + switch (node->getOp()) { + case glslang::EOpModf: + if (arg == 1) + lvalue = true; + break; + + case glslang::EOpRayQueryInitialize: + case glslang::EOpRayQueryTerminate: + case glslang::EOpRayQueryConfirmIntersection: + case glslang::EOpRayQueryProceed: + case glslang::EOpRayQueryGenerateIntersection: + case glslang::EOpRayQueryGetIntersectionType: + case glslang::EOpRayQueryGetIntersectionT: + case glslang::EOpRayQueryGetIntersectionInstanceCustomIndex: + case glslang::EOpRayQueryGetIntersectionInstanceId: + case glslang::EOpRayQueryGetIntersectionInstanceShaderBindingTableRecordOffset: + case glslang::EOpRayQueryGetIntersectionGeometryIndex: + case glslang::EOpRayQueryGetIntersectionPrimitiveIndex: + case glslang::EOpRayQueryGetIntersectionBarycentrics: + case glslang::EOpRayQueryGetIntersectionFrontFace: + case glslang::EOpRayQueryGetIntersectionObjectRayDirection: + case glslang::EOpRayQueryGetIntersectionObjectRayOrigin: + case glslang::EOpRayQueryGetIntersectionObjectToWorld: + case glslang::EOpRayQueryGetIntersectionWorldToObject: + if (arg == 0) + lvalue = true; + break; + + case glslang::EOpAtomicAdd: + case glslang::EOpAtomicMin: + case glslang::EOpAtomicMax: + case glslang::EOpAtomicAnd: + case glslang::EOpAtomicOr: + case glslang::EOpAtomicXor: + case glslang::EOpAtomicExchange: + case glslang::EOpAtomicCompSwap: + if (arg == 0) + lvalue = true; + break; + +#ifndef GLSLANG_WEB + case glslang::EOpFrexp: + if (arg == 1) + lvalue = true; + break; + case glslang::EOpInterpolateAtSample: + case glslang::EOpInterpolateAtOffset: + case glslang::EOpInterpolateAtVertex: + if (arg == 0) { + lvalue = true; + + // Does it need a swizzle inversion? If so, evaluation is inverted; + // operate first on the swizzle base, then apply the swizzle. + // That is, we transform + // + // interpolate(v.zy) -> interpolate(v).zy + // + if (glslangOperands[0]->getAsOperator() && + glslangOperands[0]->getAsOperator()->getOp() == glslang::EOpVectorSwizzle) + invertedType = convertGlslangToSpvType( + glslangOperands[0]->getAsBinaryNode()->getLeft()->getType()); + } + break; + case glslang::EOpAtomicLoad: + case glslang::EOpAtomicStore: + case glslang::EOpAtomicCounterAdd: + case glslang::EOpAtomicCounterSubtract: + case glslang::EOpAtomicCounterMin: + case glslang::EOpAtomicCounterMax: + case glslang::EOpAtomicCounterAnd: + case glslang::EOpAtomicCounterOr: + case glslang::EOpAtomicCounterXor: + case glslang::EOpAtomicCounterExchange: + case glslang::EOpAtomicCounterCompSwap: + if (arg == 0) + lvalue = true; + break; + case glslang::EOpAddCarry: + case glslang::EOpSubBorrow: + if (arg == 2) + lvalue = true; + break; + case glslang::EOpUMulExtended: + case glslang::EOpIMulExtended: + if (arg >= 2) + lvalue = true; + break; + case glslang::EOpCooperativeMatrixLoad: + if (arg == 0 || arg == 1) + lvalue = true; + break; + case glslang::EOpCooperativeMatrixStore: + if (arg == 1) + lvalue = true; + break; +#endif + default: + break; + } + builder.clearAccessChain(); + if (invertedType != spv::NoType && arg == 0) + glslangOperands[0]->getAsBinaryNode()->getLeft()->traverse(this); + else + glslangOperands[arg]->traverse(this); + +#ifndef GLSLANG_WEB + if (node->getOp() == glslang::EOpCooperativeMatrixLoad || + node->getOp() == glslang::EOpCooperativeMatrixStore) { + + if (arg == 1) { + // fold "element" parameter into the access chain + spv::Builder::AccessChain save = builder.getAccessChain(); + builder.clearAccessChain(); + glslangOperands[2]->traverse(this); + + spv::Id elementId = accessChainLoad(glslangOperands[2]->getAsTyped()->getType()); + + builder.setAccessChain(save); + + // Point to the first element of the array. + builder.accessChainPush(elementId, + TranslateCoherent(glslangOperands[arg]->getAsTyped()->getType()), + glslangOperands[arg]->getAsTyped()->getType().getBufferReferenceAlignment()); + + spv::Builder::AccessChain::CoherentFlags coherentFlags = builder.getAccessChain().coherentFlags; + unsigned int alignment = builder.getAccessChain().alignment; + + int memoryAccess = TranslateMemoryAccess(coherentFlags); + if (node->getOp() == glslang::EOpCooperativeMatrixLoad) + memoryAccess &= ~spv::MemoryAccessMakePointerAvailableKHRMask; + if (node->getOp() == glslang::EOpCooperativeMatrixStore) + memoryAccess &= ~spv::MemoryAccessMakePointerVisibleKHRMask; + if (builder.getStorageClass(builder.getAccessChain().base) == + spv::StorageClassPhysicalStorageBufferEXT) { + memoryAccess = (spv::MemoryAccessMask)(memoryAccess | spv::MemoryAccessAlignedMask); + } + + memoryAccessOperands.push_back(spv::IdImmediate(false, memoryAccess)); + + if (memoryAccess & spv::MemoryAccessAlignedMask) { + memoryAccessOperands.push_back(spv::IdImmediate(false, alignment)); + } + + if (memoryAccess & + (spv::MemoryAccessMakePointerAvailableKHRMask | spv::MemoryAccessMakePointerVisibleKHRMask)) { + memoryAccessOperands.push_back(spv::IdImmediate(true, + builder.makeUintConstant(TranslateMemoryScope(coherentFlags)))); + } + } else if (arg == 2) { + continue; + } + } +#endif + + // for l-values, pass the address, for r-values, pass the value + if (lvalue) { + if (invertedType == spv::NoType && !builder.isSpvLvalue()) { + // SPIR-V cannot represent an l-value containing a swizzle that doesn't + // reduce to a simple access chain. So, we need a temporary vector to + // receive the result, and must later swizzle that into the original + // l-value. + complexLvalues.push_back(builder.getAccessChain()); + temporaryLvalues.push_back(builder.createVariable(spv::StorageClassFunction, + builder.accessChainGetInferredType(), "swizzleTemp")); + operands.push_back(temporaryLvalues.back()); + } else { + operands.push_back(builder.accessChainGetLValue()); + } + lvalueCoherentFlags = builder.getAccessChain().coherentFlags; + lvalueCoherentFlags |= TranslateCoherent(glslangOperands[arg]->getAsTyped()->getType()); + } else { + builder.setLine(node->getLoc().line, node->getLoc().getFilename()); + glslang::TOperator glslangOp = node->getOp(); + if (arg == 1 && + (glslangOp == glslang::EOpRayQueryGetIntersectionType || + glslangOp == glslang::EOpRayQueryGetIntersectionT || + glslangOp == glslang::EOpRayQueryGetIntersectionInstanceCustomIndex || + glslangOp == glslang::EOpRayQueryGetIntersectionInstanceId || + glslangOp == glslang::EOpRayQueryGetIntersectionInstanceShaderBindingTableRecordOffset || + glslangOp == glslang::EOpRayQueryGetIntersectionGeometryIndex || + glslangOp == glslang::EOpRayQueryGetIntersectionPrimitiveIndex || + glslangOp == glslang::EOpRayQueryGetIntersectionBarycentrics || + glslangOp == glslang::EOpRayQueryGetIntersectionFrontFace || + glslangOp == glslang::EOpRayQueryGetIntersectionObjectRayDirection || + glslangOp == glslang::EOpRayQueryGetIntersectionObjectRayOrigin || + glslangOp == glslang::EOpRayQueryGetIntersectionObjectToWorld || + glslangOp == glslang::EOpRayQueryGetIntersectionWorldToObject + )) { + bool cond = glslangOperands[arg]->getAsConstantUnion()->getConstArray()[0].getBConst(); + operands.push_back(builder.makeIntConstant(cond ? 1 : 0)); + } + else { + operands.push_back(accessChainLoad(glslangOperands[arg]->getAsTyped()->getType())); + } + + } + } + + builder.setLine(node->getLoc().line, node->getLoc().getFilename()); +#ifndef GLSLANG_WEB + if (node->getOp() == glslang::EOpCooperativeMatrixLoad) { + std::vector idImmOps; + + idImmOps.push_back(spv::IdImmediate(true, operands[1])); // buf + idImmOps.push_back(spv::IdImmediate(true, operands[2])); // stride + idImmOps.push_back(spv::IdImmediate(true, operands[3])); // colMajor + idImmOps.insert(idImmOps.end(), memoryAccessOperands.begin(), memoryAccessOperands.end()); + // get the pointee type + spv::Id typeId = builder.getContainedTypeId(builder.getTypeId(operands[0])); + assert(builder.isCooperativeMatrixType(typeId)); + // do the op + spv::Id result = builder.createOp(spv::OpCooperativeMatrixLoadNV, typeId, idImmOps); + // store the result to the pointer (out param 'm') + builder.createStore(result, operands[0]); + result = 0; + } else if (node->getOp() == glslang::EOpCooperativeMatrixStore) { + std::vector idImmOps; + + idImmOps.push_back(spv::IdImmediate(true, operands[1])); // buf + idImmOps.push_back(spv::IdImmediate(true, operands[0])); // object + idImmOps.push_back(spv::IdImmediate(true, operands[2])); // stride + idImmOps.push_back(spv::IdImmediate(true, operands[3])); // colMajor + idImmOps.insert(idImmOps.end(), memoryAccessOperands.begin(), memoryAccessOperands.end()); + + builder.createNoResultOp(spv::OpCooperativeMatrixStoreNV, idImmOps); + result = 0; + } else +#endif + if (atomic) { + // Handle all atomics + result = createAtomicOperation(node->getOp(), precision, resultType(), operands, node->getBasicType(), + lvalueCoherentFlags); + } else if (node->getOp() == glslang::EOpDebugPrintf) { + if (!nonSemanticDebugPrintf) { + nonSemanticDebugPrintf = builder.import("NonSemantic.DebugPrintf"); + } + result = builder.createBuiltinCall(builder.makeVoidType(), nonSemanticDebugPrintf, spv::NonSemanticDebugPrintfDebugPrintf, operands); + builder.addExtension(spv::E_SPV_KHR_non_semantic_info); + } else { + // Pass through to generic operations. + switch (glslangOperands.size()) { + case 0: + result = createNoArgOperation(node->getOp(), precision, resultType()); + break; + case 1: + { + OpDecorations decorations = { precision, + TranslateNoContractionDecoration(node->getType().getQualifier()), + TranslateNonUniformDecoration(node->getType().getQualifier()) }; + result = createUnaryOperation( + node->getOp(), decorations, + resultType(), operands.front(), + glslangOperands[0]->getAsTyped()->getBasicType(), lvalueCoherentFlags); + } + break; + default: + result = createMiscOperation(node->getOp(), precision, resultType(), operands, node->getBasicType()); + break; + } + + if (invertedType != spv::NoResult) + result = createInvertedSwizzle(precision, *glslangOperands[0]->getAsBinaryNode(), result); + + for (unsigned int i = 0; i < temporaryLvalues.size(); ++i) { + builder.setAccessChain(complexLvalues[i]); + builder.accessChainStore(builder.createLoad(temporaryLvalues[i])); + } + } + + if (noReturnValue) + return false; + + if (! result) { + logger->missingFunctionality("unknown glslang aggregate"); + return true; // pick up a child as a placeholder operand + } else { + builder.clearAccessChain(); + builder.setAccessChainRValue(result); + return false; + } +} + +// This path handles both if-then-else and ?: +// The if-then-else has a node type of void, while +// ?: has either a void or a non-void node type +// +// Leaving the result, when not void: +// GLSL only has r-values as the result of a :?, but +// if we have an l-value, that can be more efficient if it will +// become the base of a complex r-value expression, because the +// next layer copies r-values into memory to use the access-chain mechanism +bool TGlslangToSpvTraverser::visitSelection(glslang::TVisit /* visit */, glslang::TIntermSelection* node) +{ + // see if OpSelect can handle it + const auto isOpSelectable = [&]() { + if (node->getBasicType() == glslang::EbtVoid) + return false; + // OpSelect can do all other types starting with SPV 1.4 + if (glslangIntermediate->getSpv().spv < glslang::EShTargetSpv_1_4) { + // pre-1.4, only scalars and vectors can be handled + if ((!node->getType().isScalar() && !node->getType().isVector())) + return false; + } + return true; + }; + + // See if it simple and safe, or required, to execute both sides. + // Crucially, side effects must be either semantically required or avoided, + // and there are performance trade-offs. + // Return true if required or a good idea (and safe) to execute both sides, + // false otherwise. + const auto bothSidesPolicy = [&]() -> bool { + // do we have both sides? + if (node->getTrueBlock() == nullptr || + node->getFalseBlock() == nullptr) + return false; + + // required? (unless we write additional code to look for side effects + // and make performance trade-offs if none are present) + if (!node->getShortCircuit()) + return true; + + // if not required to execute both, decide based on performance/practicality... + + if (!isOpSelectable()) + return false; + + assert(node->getType() == node->getTrueBlock() ->getAsTyped()->getType() && + node->getType() == node->getFalseBlock()->getAsTyped()->getType()); + + // return true if a single operand to ? : is okay for OpSelect + const auto operandOkay = [](glslang::TIntermTyped* node) { + return node->getAsSymbolNode() || node->getType().getQualifier().isConstant(); + }; + + return operandOkay(node->getTrueBlock() ->getAsTyped()) && + operandOkay(node->getFalseBlock()->getAsTyped()); + }; + + spv::Id result = spv::NoResult; // upcoming result selecting between trueValue and falseValue + // emit the condition before doing anything with selection + node->getCondition()->traverse(this); + spv::Id condition = accessChainLoad(node->getCondition()->getType()); + + // Find a way of executing both sides and selecting the right result. + const auto executeBothSides = [&]() -> void { + // execute both sides + node->getTrueBlock()->traverse(this); + spv::Id trueValue = accessChainLoad(node->getTrueBlock()->getAsTyped()->getType()); + node->getFalseBlock()->traverse(this); + spv::Id falseValue = accessChainLoad(node->getTrueBlock()->getAsTyped()->getType()); + + builder.setLine(node->getLoc().line, node->getLoc().getFilename()); + + // done if void + if (node->getBasicType() == glslang::EbtVoid) + return; + + // emit code to select between trueValue and falseValue + + // see if OpSelect can handle it + if (isOpSelectable()) { + // Emit OpSelect for this selection. + + // smear condition to vector, if necessary (AST is always scalar) + // Before 1.4, smear like for mix(), starting with 1.4, keep it scalar + if (glslangIntermediate->getSpv().spv < glslang::EShTargetSpv_1_4 && builder.isVector(trueValue)) { + condition = builder.smearScalar(spv::NoPrecision, condition, + builder.makeVectorType(builder.makeBoolType(), + builder.getNumComponents(trueValue))); + } + + // OpSelect + result = builder.createTriOp(spv::OpSelect, + convertGlslangToSpvType(node->getType()), condition, + trueValue, falseValue); + + builder.clearAccessChain(); + builder.setAccessChainRValue(result); + } else { + // We need control flow to select the result. + // TODO: Once SPIR-V OpSelect allows arbitrary types, eliminate this path. + result = builder.createVariable(spv::StorageClassFunction, convertGlslangToSpvType(node->getType())); + + // Selection control: + const spv::SelectionControlMask control = TranslateSelectionControl(*node); + + // make an "if" based on the value created by the condition + spv::Builder::If ifBuilder(condition, control, builder); + + // emit the "then" statement + builder.createStore(trueValue, result); + ifBuilder.makeBeginElse(); + // emit the "else" statement + builder.createStore(falseValue, result); + + // finish off the control flow + ifBuilder.makeEndIf(); + + builder.clearAccessChain(); + builder.setAccessChainLValue(result); + } + }; + + // Execute the one side needed, as per the condition + const auto executeOneSide = [&]() { + // Always emit control flow. + if (node->getBasicType() != glslang::EbtVoid) + result = builder.createVariable(spv::StorageClassFunction, convertGlslangToSpvType(node->getType())); + + // Selection control: + const spv::SelectionControlMask control = TranslateSelectionControl(*node); + + // make an "if" based on the value created by the condition + spv::Builder::If ifBuilder(condition, control, builder); + + // emit the "then" statement + if (node->getTrueBlock() != nullptr) { + node->getTrueBlock()->traverse(this); + if (result != spv::NoResult) + builder.createStore(accessChainLoad(node->getTrueBlock()->getAsTyped()->getType()), result); + } + + if (node->getFalseBlock() != nullptr) { + ifBuilder.makeBeginElse(); + // emit the "else" statement + node->getFalseBlock()->traverse(this); + if (result != spv::NoResult) + builder.createStore(accessChainLoad(node->getFalseBlock()->getAsTyped()->getType()), result); + } + + // finish off the control flow + ifBuilder.makeEndIf(); + + if (result != spv::NoResult) { + builder.clearAccessChain(); + builder.setAccessChainLValue(result); + } + }; + + // Try for OpSelect (or a requirement to execute both sides) + if (bothSidesPolicy()) { + SpecConstantOpModeGuard spec_constant_op_mode_setter(&builder); + if (node->getType().getQualifier().isSpecConstant()) + spec_constant_op_mode_setter.turnOnSpecConstantOpMode(); + executeBothSides(); + } else + executeOneSide(); + + return false; +} + +bool TGlslangToSpvTraverser::visitSwitch(glslang::TVisit /* visit */, glslang::TIntermSwitch* node) +{ + // emit and get the condition before doing anything with switch + node->getCondition()->traverse(this); + spv::Id selector = accessChainLoad(node->getCondition()->getAsTyped()->getType()); + + // Selection control: + const spv::SelectionControlMask control = TranslateSwitchControl(*node); + + // browse the children to sort out code segments + int defaultSegment = -1; + std::vector codeSegments; + glslang::TIntermSequence& sequence = node->getBody()->getSequence(); + std::vector caseValues; + std::vector valueIndexToSegment(sequence.size()); // note: probably not all are used, it is an overestimate + for (glslang::TIntermSequence::iterator c = sequence.begin(); c != sequence.end(); ++c) { + TIntermNode* child = *c; + if (child->getAsBranchNode() && child->getAsBranchNode()->getFlowOp() == glslang::EOpDefault) + defaultSegment = (int)codeSegments.size(); + else if (child->getAsBranchNode() && child->getAsBranchNode()->getFlowOp() == glslang::EOpCase) { + valueIndexToSegment[caseValues.size()] = (int)codeSegments.size(); + caseValues.push_back(child->getAsBranchNode()->getExpression()->getAsConstantUnion() + ->getConstArray()[0].getIConst()); + } else + codeSegments.push_back(child); + } + + // handle the case where the last code segment is missing, due to no code + // statements between the last case and the end of the switch statement + if ((caseValues.size() && (int)codeSegments.size() == valueIndexToSegment[caseValues.size() - 1]) || + (int)codeSegments.size() == defaultSegment) + codeSegments.push_back(nullptr); + + // make the switch statement + std::vector segmentBlocks; // returned, as the blocks allocated in the call + builder.makeSwitch(selector, control, (int)codeSegments.size(), caseValues, valueIndexToSegment, defaultSegment, + segmentBlocks); + + // emit all the code in the segments + breakForLoop.push(false); + for (unsigned int s = 0; s < codeSegments.size(); ++s) { + builder.nextSwitchSegment(segmentBlocks, s); + if (codeSegments[s]) + codeSegments[s]->traverse(this); + else + builder.addSwitchBreak(); + } + breakForLoop.pop(); + + builder.endSwitch(segmentBlocks); + + return false; +} + +void TGlslangToSpvTraverser::visitConstantUnion(glslang::TIntermConstantUnion* node) +{ + int nextConst = 0; + spv::Id constant = createSpvConstantFromConstUnionArray(node->getType(), node->getConstArray(), nextConst, false); + + builder.clearAccessChain(); + builder.setAccessChainRValue(constant); +} + +bool TGlslangToSpvTraverser::visitLoop(glslang::TVisit /* visit */, glslang::TIntermLoop* node) +{ + auto blocks = builder.makeNewLoop(); + builder.createBranch(&blocks.head); + + // Loop control: + std::vector operands; + const spv::LoopControlMask control = TranslateLoopControl(*node, operands); + + // Spec requires back edges to target header blocks, and every header block + // must dominate its merge block. Make a header block first to ensure these + // conditions are met. By definition, it will contain OpLoopMerge, followed + // by a block-ending branch. But we don't want to put any other body/test + // instructions in it, since the body/test may have arbitrary instructions, + // including merges of its own. + builder.setLine(node->getLoc().line, node->getLoc().getFilename()); + builder.setBuildPoint(&blocks.head); + builder.createLoopMerge(&blocks.merge, &blocks.continue_target, control, operands); + if (node->testFirst() && node->getTest()) { + spv::Block& test = builder.makeNewBlock(); + builder.createBranch(&test); + + builder.setBuildPoint(&test); + node->getTest()->traverse(this); + spv::Id condition = accessChainLoad(node->getTest()->getType()); + builder.createConditionalBranch(condition, &blocks.body, &blocks.merge); + + builder.setBuildPoint(&blocks.body); + breakForLoop.push(true); + if (node->getBody()) + node->getBody()->traverse(this); + builder.createBranch(&blocks.continue_target); + breakForLoop.pop(); + + builder.setBuildPoint(&blocks.continue_target); + if (node->getTerminal()) + node->getTerminal()->traverse(this); + builder.createBranch(&blocks.head); + } else { + builder.setLine(node->getLoc().line, node->getLoc().getFilename()); + builder.createBranch(&blocks.body); + + breakForLoop.push(true); + builder.setBuildPoint(&blocks.body); + if (node->getBody()) + node->getBody()->traverse(this); + builder.createBranch(&blocks.continue_target); + breakForLoop.pop(); + + builder.setBuildPoint(&blocks.continue_target); + if (node->getTerminal()) + node->getTerminal()->traverse(this); + if (node->getTest()) { + node->getTest()->traverse(this); + spv::Id condition = + accessChainLoad(node->getTest()->getType()); + builder.createConditionalBranch(condition, &blocks.head, &blocks.merge); + } else { + // TODO: unless there was a break/return/discard instruction + // somewhere in the body, this is an infinite loop, so we should + // issue a warning. + builder.createBranch(&blocks.head); + } + } + builder.setBuildPoint(&blocks.merge); + builder.closeLoop(); + return false; +} + +bool TGlslangToSpvTraverser::visitBranch(glslang::TVisit /* visit */, glslang::TIntermBranch* node) +{ + if (node->getExpression()) + node->getExpression()->traverse(this); + + builder.setLine(node->getLoc().line, node->getLoc().getFilename()); + + switch (node->getFlowOp()) { + case glslang::EOpKill: + builder.makeDiscard(); + break; + case glslang::EOpBreak: + if (breakForLoop.top()) + builder.createLoopExit(); + else + builder.addSwitchBreak(); + break; + case glslang::EOpContinue: + builder.createLoopContinue(); + break; + case glslang::EOpReturn: + if (node->getExpression()) { + const glslang::TType& glslangReturnType = node->getExpression()->getType(); + spv::Id returnId = accessChainLoad(glslangReturnType); + if (builder.getTypeId(returnId) != currentFunction->getReturnType()) { + builder.clearAccessChain(); + spv::Id copyId = builder.createVariable(spv::StorageClassFunction, currentFunction->getReturnType()); + builder.setAccessChainLValue(copyId); + multiTypeStore(glslangReturnType, returnId); + returnId = builder.createLoad(copyId); + } + builder.makeReturn(false, returnId); + } else + builder.makeReturn(false); + + builder.clearAccessChain(); + break; + +#ifndef GLSLANG_WEB + case glslang::EOpDemote: + builder.createNoResultOp(spv::OpDemoteToHelperInvocationEXT); + builder.addExtension(spv::E_SPV_EXT_demote_to_helper_invocation); + builder.addCapability(spv::CapabilityDemoteToHelperInvocationEXT); + break; +#endif + + default: + assert(0); + break; + } + + return false; +} + +spv::Id TGlslangToSpvTraverser::createSpvVariable(const glslang::TIntermSymbol* node, spv::Id forcedType) +{ + // First, steer off constants, which are not SPIR-V variables, but + // can still have a mapping to a SPIR-V Id. + // This includes specialization constants. + if (node->getQualifier().isConstant()) { + spv::Id result = createSpvConstant(*node); + if (result != spv::NoResult) + return result; + } + + // Now, handle actual variables + spv::StorageClass storageClass = TranslateStorageClass(node->getType()); + spv::Id spvType = forcedType == spv::NoType ? convertGlslangToSpvType(node->getType()) + : forcedType; + + const bool contains16BitType = node->getType().contains16BitFloat() || + node->getType().contains16BitInt(); + if (contains16BitType) { + switch (storageClass) { + case spv::StorageClassInput: + case spv::StorageClassOutput: + builder.addIncorporatedExtension(spv::E_SPV_KHR_16bit_storage, spv::Spv_1_3); + builder.addCapability(spv::CapabilityStorageInputOutput16); + break; + case spv::StorageClassUniform: + builder.addIncorporatedExtension(spv::E_SPV_KHR_16bit_storage, spv::Spv_1_3); + if (node->getType().getQualifier().storage == glslang::EvqBuffer) + builder.addCapability(spv::CapabilityStorageUniformBufferBlock16); + else + builder.addCapability(spv::CapabilityStorageUniform16); + break; +#ifndef GLSLANG_WEB + case spv::StorageClassPushConstant: + builder.addIncorporatedExtension(spv::E_SPV_KHR_16bit_storage, spv::Spv_1_3); + builder.addCapability(spv::CapabilityStoragePushConstant16); + break; + case spv::StorageClassStorageBuffer: + case spv::StorageClassPhysicalStorageBufferEXT: + builder.addIncorporatedExtension(spv::E_SPV_KHR_16bit_storage, spv::Spv_1_3); + builder.addCapability(spv::CapabilityStorageUniformBufferBlock16); + break; +#endif + default: + if (node->getType().contains16BitFloat()) + builder.addCapability(spv::CapabilityFloat16); + if (node->getType().contains16BitInt()) + builder.addCapability(spv::CapabilityInt16); + break; + } + } + + if (node->getType().contains8BitInt()) { + if (storageClass == spv::StorageClassPushConstant) { + builder.addIncorporatedExtension(spv::E_SPV_KHR_8bit_storage, spv::Spv_1_5); + builder.addCapability(spv::CapabilityStoragePushConstant8); + } else if (storageClass == spv::StorageClassUniform) { + builder.addIncorporatedExtension(spv::E_SPV_KHR_8bit_storage, spv::Spv_1_5); + builder.addCapability(spv::CapabilityUniformAndStorageBuffer8BitAccess); + } else if (storageClass == spv::StorageClassStorageBuffer) { + builder.addIncorporatedExtension(spv::E_SPV_KHR_8bit_storage, spv::Spv_1_5); + builder.addCapability(spv::CapabilityStorageBuffer8BitAccess); + } else { + builder.addCapability(spv::CapabilityInt8); + } + } + + const char* name = node->getName().c_str(); + if (glslang::IsAnonymous(name)) + name = ""; + + return builder.createVariable(storageClass, spvType, name); +} + +// Return type Id of the sampled type. +spv::Id TGlslangToSpvTraverser::getSampledType(const glslang::TSampler& sampler) +{ + switch (sampler.type) { + case glslang::EbtInt: return builder.makeIntType(32); + case glslang::EbtUint: return builder.makeUintType(32); + case glslang::EbtFloat: return builder.makeFloatType(32); +#ifndef GLSLANG_WEB + case glslang::EbtFloat16: + builder.addExtension(spv::E_SPV_AMD_gpu_shader_half_float_fetch); + builder.addCapability(spv::CapabilityFloat16ImageAMD); + return builder.makeFloatType(16); +#endif + default: + assert(0); + return builder.makeFloatType(32); + } +} + +// If node is a swizzle operation, return the type that should be used if +// the swizzle base is first consumed by another operation, before the swizzle +// is applied. +spv::Id TGlslangToSpvTraverser::getInvertedSwizzleType(const glslang::TIntermTyped& node) +{ + if (node.getAsOperator() && + node.getAsOperator()->getOp() == glslang::EOpVectorSwizzle) + return convertGlslangToSpvType(node.getAsBinaryNode()->getLeft()->getType()); + else + return spv::NoType; +} + +// When inverting a swizzle with a parent op, this function +// will apply the swizzle operation to a completed parent operation. +spv::Id TGlslangToSpvTraverser::createInvertedSwizzle(spv::Decoration precision, const glslang::TIntermTyped& node, + spv::Id parentResult) +{ + std::vector swizzle; + convertSwizzle(*node.getAsBinaryNode()->getRight()->getAsAggregate(), swizzle); + return builder.createRvalueSwizzle(precision, convertGlslangToSpvType(node.getType()), parentResult, swizzle); +} + +// Convert a glslang AST swizzle node to a swizzle vector for building SPIR-V. +void TGlslangToSpvTraverser::convertSwizzle(const glslang::TIntermAggregate& node, std::vector& swizzle) +{ + const glslang::TIntermSequence& swizzleSequence = node.getSequence(); + for (int i = 0; i < (int)swizzleSequence.size(); ++i) + swizzle.push_back(swizzleSequence[i]->getAsConstantUnion()->getConstArray()[0].getIConst()); +} + +// Convert from a glslang type to an SPV type, by calling into a +// recursive version of this function. This establishes the inherited +// layout state rooted from the top-level type. +spv::Id TGlslangToSpvTraverser::convertGlslangToSpvType(const glslang::TType& type, bool forwardReferenceOnly) +{ + return convertGlslangToSpvType(type, getExplicitLayout(type), type.getQualifier(), false, forwardReferenceOnly); +} + +// Do full recursive conversion of an arbitrary glslang type to a SPIR-V Id. +// explicitLayout can be kept the same throughout the hierarchical recursive walk. +// Mutually recursive with convertGlslangStructToSpvType(). +spv::Id TGlslangToSpvTraverser::convertGlslangToSpvType(const glslang::TType& type, + glslang::TLayoutPacking explicitLayout, const glslang::TQualifier& qualifier, + bool lastBufferBlockMember, bool forwardReferenceOnly) +{ + spv::Id spvType = spv::NoResult; + + switch (type.getBasicType()) { + case glslang::EbtVoid: + spvType = builder.makeVoidType(); + assert (! type.isArray()); + break; + case glslang::EbtBool: + // "transparent" bool doesn't exist in SPIR-V. The GLSL convention is + // a 32-bit int where non-0 means true. + if (explicitLayout != glslang::ElpNone) + spvType = builder.makeUintType(32); + else + spvType = builder.makeBoolType(); + break; + case glslang::EbtInt: + spvType = builder.makeIntType(32); + break; + case glslang::EbtUint: + spvType = builder.makeUintType(32); + break; + case glslang::EbtFloat: + spvType = builder.makeFloatType(32); + break; +#ifndef GLSLANG_WEB + case glslang::EbtDouble: + spvType = builder.makeFloatType(64); + break; + case glslang::EbtFloat16: + spvType = builder.makeFloatType(16); + break; + case glslang::EbtInt8: + spvType = builder.makeIntType(8); + break; + case glslang::EbtUint8: + spvType = builder.makeUintType(8); + break; + case glslang::EbtInt16: + spvType = builder.makeIntType(16); + break; + case glslang::EbtUint16: + spvType = builder.makeUintType(16); + break; + case glslang::EbtInt64: + spvType = builder.makeIntType(64); + break; + case glslang::EbtUint64: + spvType = builder.makeUintType(64); + break; + case glslang::EbtAtomicUint: + builder.addCapability(spv::CapabilityAtomicStorage); + spvType = builder.makeUintType(32); + break; + case glslang::EbtAccStruct: + spvType = builder.makeAccelerationStructureType(); + break; + case glslang::EbtRayQuery: + spvType = builder.makeRayQueryType(); + break; + case glslang::EbtReference: + { + // Make the forward pointer, then recurse to convert the structure type, then + // patch up the forward pointer with a real pointer type. + if (forwardPointers.find(type.getReferentType()) == forwardPointers.end()) { + spv::Id forwardId = builder.makeForwardPointer(spv::StorageClassPhysicalStorageBufferEXT); + forwardPointers[type.getReferentType()] = forwardId; + } + spvType = forwardPointers[type.getReferentType()]; + if (!forwardReferenceOnly) { + spv::Id referentType = convertGlslangToSpvType(*type.getReferentType()); + builder.makePointerFromForwardPointer(spv::StorageClassPhysicalStorageBufferEXT, + forwardPointers[type.getReferentType()], + referentType); + } + } + break; +#endif + case glslang::EbtSampler: + { + const glslang::TSampler& sampler = type.getSampler(); + if (sampler.isPureSampler()) { + spvType = builder.makeSamplerType(); + } else { + // an image is present, make its type + spvType = builder.makeImageType(getSampledType(sampler), TranslateDimensionality(sampler), + sampler.isShadow(), sampler.isArrayed(), sampler.isMultiSample(), + sampler.isImageClass() ? 2 : 1, TranslateImageFormat(type)); + if (sampler.isCombined()) { + // already has both image and sampler, make the combined type + spvType = builder.makeSampledImageType(spvType); + } + } + } + break; + case glslang::EbtStruct: + case glslang::EbtBlock: + { + // If we've seen this struct type, return it + const glslang::TTypeList* glslangMembers = type.getStruct(); + + // Try to share structs for different layouts, but not yet for other + // kinds of qualification (primarily not yet including interpolant qualification). + if (! HasNonLayoutQualifiers(type, qualifier)) + spvType = structMap[explicitLayout][qualifier.layoutMatrix][glslangMembers]; + if (spvType != spv::NoResult) + break; + + // else, we haven't seen it... + if (type.getBasicType() == glslang::EbtBlock) + memberRemapper[glslangTypeToIdMap[glslangMembers]].resize(glslangMembers->size()); + spvType = convertGlslangStructToSpvType(type, glslangMembers, explicitLayout, qualifier); + } + break; + case glslang::EbtString: + // no type used for OpString + return 0; + default: + assert(0); + break; + } + + if (type.isMatrix()) + spvType = builder.makeMatrixType(spvType, type.getMatrixCols(), type.getMatrixRows()); + else { + // If this variable has a vector element count greater than 1, create a SPIR-V vector + if (type.getVectorSize() > 1) + spvType = builder.makeVectorType(spvType, type.getVectorSize()); + } + + if (type.isCoopMat()) { + builder.addCapability(spv::CapabilityCooperativeMatrixNV); + builder.addExtension(spv::E_SPV_NV_cooperative_matrix); + if (type.getBasicType() == glslang::EbtFloat16) + builder.addCapability(spv::CapabilityFloat16); + if (type.getBasicType() == glslang::EbtUint8 || + type.getBasicType() == glslang::EbtInt8) { + builder.addCapability(spv::CapabilityInt8); + } + + spv::Id scope = makeArraySizeId(*type.getTypeParameters(), 1); + spv::Id rows = makeArraySizeId(*type.getTypeParameters(), 2); + spv::Id cols = makeArraySizeId(*type.getTypeParameters(), 3); + + spvType = builder.makeCooperativeMatrixType(spvType, scope, rows, cols); + } + + if (type.isArray()) { + int stride = 0; // keep this 0 unless doing an explicit layout; 0 will mean no decoration, no stride + + // Do all but the outer dimension + if (type.getArraySizes()->getNumDims() > 1) { + // We need to decorate array strides for types needing explicit layout, except blocks. + if (explicitLayout != glslang::ElpNone && type.getBasicType() != glslang::EbtBlock) { + // Use a dummy glslang type for querying internal strides of + // arrays of arrays, but using just a one-dimensional array. + glslang::TType simpleArrayType(type, 0); // deference type of the array + while (simpleArrayType.getArraySizes()->getNumDims() > 1) + simpleArrayType.getArraySizes()->dereference(); + + // Will compute the higher-order strides here, rather than making a whole + // pile of types and doing repetitive recursion on their contents. + stride = getArrayStride(simpleArrayType, explicitLayout, qualifier.layoutMatrix); + } + + // make the arrays + for (int dim = type.getArraySizes()->getNumDims() - 1; dim > 0; --dim) { + spvType = builder.makeArrayType(spvType, makeArraySizeId(*type.getArraySizes(), dim), stride); + if (stride > 0) + builder.addDecoration(spvType, spv::DecorationArrayStride, stride); + stride *= type.getArraySizes()->getDimSize(dim); + } + } else { + // single-dimensional array, and don't yet have stride + + // We need to decorate array strides for types needing explicit layout, except blocks. + if (explicitLayout != glslang::ElpNone && type.getBasicType() != glslang::EbtBlock) + stride = getArrayStride(type, explicitLayout, qualifier.layoutMatrix); + } + + // Do the outer dimension, which might not be known for a runtime-sized array. + // (Unsized arrays that survive through linking will be runtime-sized arrays) + if (type.isSizedArray()) + spvType = builder.makeArrayType(spvType, makeArraySizeId(*type.getArraySizes(), 0), stride); + else { +#ifndef GLSLANG_WEB + if (!lastBufferBlockMember) { + builder.addIncorporatedExtension("SPV_EXT_descriptor_indexing", spv::Spv_1_5); + builder.addCapability(spv::CapabilityRuntimeDescriptorArrayEXT); + } +#endif + spvType = builder.makeRuntimeArray(spvType); + } + if (stride > 0) + builder.addDecoration(spvType, spv::DecorationArrayStride, stride); + } + + return spvType; +} + +// TODO: this functionality should exist at a higher level, in creating the AST +// +// Identify interface members that don't have their required extension turned on. +// +bool TGlslangToSpvTraverser::filterMember(const glslang::TType& member) +{ +#ifndef GLSLANG_WEB + auto& extensions = glslangIntermediate->getRequestedExtensions(); + + if (member.getFieldName() == "gl_SecondaryViewportMaskNV" && + extensions.find("GL_NV_stereo_view_rendering") == extensions.end()) + return true; + if (member.getFieldName() == "gl_SecondaryPositionNV" && + extensions.find("GL_NV_stereo_view_rendering") == extensions.end()) + return true; + + if (glslangIntermediate->getStage() != EShLangMeshNV) { + if (member.getFieldName() == "gl_ViewportMask" && + extensions.find("GL_NV_viewport_array2") == extensions.end()) + return true; + if (member.getFieldName() == "gl_PositionPerViewNV" && + extensions.find("GL_NVX_multiview_per_view_attributes") == extensions.end()) + return true; + if (member.getFieldName() == "gl_ViewportMaskPerViewNV" && + extensions.find("GL_NVX_multiview_per_view_attributes") == extensions.end()) + return true; + } +#endif + + return false; +}; + +// Do full recursive conversion of a glslang structure (or block) type to a SPIR-V Id. +// explicitLayout can be kept the same throughout the hierarchical recursive walk. +// Mutually recursive with convertGlslangToSpvType(). +spv::Id TGlslangToSpvTraverser::convertGlslangStructToSpvType(const glslang::TType& type, + const glslang::TTypeList* glslangMembers, + glslang::TLayoutPacking explicitLayout, + const glslang::TQualifier& qualifier) +{ + // Create a vector of struct types for SPIR-V to consume + std::vector spvMembers; + int memberDelta = 0; // how much the member's index changes from glslang to SPIR-V, normally 0, + // except sometimes for blocks + std::vector > deferredForwardPointers; + for (int i = 0; i < (int)glslangMembers->size(); i++) { + glslang::TType& glslangMember = *(*glslangMembers)[i].type; + if (glslangMember.hiddenMember()) { + ++memberDelta; + if (type.getBasicType() == glslang::EbtBlock) + memberRemapper[glslangTypeToIdMap[glslangMembers]][i] = -1; + } else { + if (type.getBasicType() == glslang::EbtBlock) { + if (filterMember(glslangMember)) { + memberDelta++; + memberRemapper[glslangTypeToIdMap[glslangMembers]][i] = -1; + continue; + } + memberRemapper[glslangTypeToIdMap[glslangMembers]][i] = i - memberDelta; + } + // modify just this child's view of the qualifier + glslang::TQualifier memberQualifier = glslangMember.getQualifier(); + InheritQualifiers(memberQualifier, qualifier); + + // manually inherit location + if (! memberQualifier.hasLocation() && qualifier.hasLocation()) + memberQualifier.layoutLocation = qualifier.layoutLocation; + + // recurse + bool lastBufferBlockMember = qualifier.storage == glslang::EvqBuffer && + i == (int)glslangMembers->size() - 1; + + // Make forward pointers for any pointer members, and create a list of members to + // convert to spirv types after creating the struct. + if (glslangMember.isReference()) { + if (forwardPointers.find(glslangMember.getReferentType()) == forwardPointers.end()) { + deferredForwardPointers.push_back(std::make_pair(&glslangMember, memberQualifier)); + } + spvMembers.push_back( + convertGlslangToSpvType(glslangMember, explicitLayout, memberQualifier, lastBufferBlockMember, + true)); + } else { + spvMembers.push_back( + convertGlslangToSpvType(glslangMember, explicitLayout, memberQualifier, lastBufferBlockMember, + false)); + } + } + } + + // Make the SPIR-V type + spv::Id spvType = builder.makeStructType(spvMembers, type.getTypeName().c_str()); + if (! HasNonLayoutQualifiers(type, qualifier)) + structMap[explicitLayout][qualifier.layoutMatrix][glslangMembers] = spvType; + + // Decorate it + decorateStructType(type, glslangMembers, explicitLayout, qualifier, spvType); + + for (int i = 0; i < (int)deferredForwardPointers.size(); ++i) { + auto it = deferredForwardPointers[i]; + convertGlslangToSpvType(*it.first, explicitLayout, it.second, false); + } + + return spvType; +} + +void TGlslangToSpvTraverser::decorateStructType(const glslang::TType& type, + const glslang::TTypeList* glslangMembers, + glslang::TLayoutPacking explicitLayout, + const glslang::TQualifier& qualifier, + spv::Id spvType) +{ + // Name and decorate the non-hidden members + int offset = -1; + int locationOffset = 0; // for use within the members of this struct + for (int i = 0; i < (int)glslangMembers->size(); i++) { + glslang::TType& glslangMember = *(*glslangMembers)[i].type; + int member = i; + if (type.getBasicType() == glslang::EbtBlock) { + member = memberRemapper[glslangTypeToIdMap[glslangMembers]][i]; + if (filterMember(glslangMember)) + continue; + } + + // modify just this child's view of the qualifier + glslang::TQualifier memberQualifier = glslangMember.getQualifier(); + InheritQualifiers(memberQualifier, qualifier); + + // using -1 above to indicate a hidden member + if (member < 0) + continue; + + builder.addMemberName(spvType, member, glslangMember.getFieldName().c_str()); + builder.addMemberDecoration(spvType, member, + TranslateLayoutDecoration(glslangMember, memberQualifier.layoutMatrix)); + builder.addMemberDecoration(spvType, member, TranslatePrecisionDecoration(glslangMember)); + // Add interpolation and auxiliary storage decorations only to + // top-level members of Input and Output storage classes + if (type.getQualifier().storage == glslang::EvqVaryingIn || + type.getQualifier().storage == glslang::EvqVaryingOut) { + if (type.getBasicType() == glslang::EbtBlock || + glslangIntermediate->getSource() == glslang::EShSourceHlsl) { + builder.addMemberDecoration(spvType, member, TranslateInterpolationDecoration(memberQualifier)); + builder.addMemberDecoration(spvType, member, TranslateAuxiliaryStorageDecoration(memberQualifier)); +#ifndef GLSLANG_WEB + addMeshNVDecoration(spvType, member, memberQualifier); +#endif + } + } + builder.addMemberDecoration(spvType, member, TranslateInvariantDecoration(memberQualifier)); + +#ifndef GLSLANG_WEB + if (type.getBasicType() == glslang::EbtBlock && + qualifier.storage == glslang::EvqBuffer) { + // Add memory decorations only to top-level members of shader storage block + std::vector memory; + TranslateMemoryDecoration(memberQualifier, memory, glslangIntermediate->usingVulkanMemoryModel()); + for (unsigned int i = 0; i < memory.size(); ++i) + builder.addMemberDecoration(spvType, member, memory[i]); + } + +#endif + + // Location assignment was already completed correctly by the front end, + // just track whether a member needs to be decorated. + // Ignore member locations if the container is an array, as that's + // ill-specified and decisions have been made to not allow this. + if (! type.isArray() && memberQualifier.hasLocation()) + builder.addMemberDecoration(spvType, member, spv::DecorationLocation, memberQualifier.layoutLocation); + + if (qualifier.hasLocation()) // track for upcoming inheritance + locationOffset += glslangIntermediate->computeTypeLocationSize( + glslangMember, glslangIntermediate->getStage()); + + // component, XFB, others + if (glslangMember.getQualifier().hasComponent()) + builder.addMemberDecoration(spvType, member, spv::DecorationComponent, + glslangMember.getQualifier().layoutComponent); + if (glslangMember.getQualifier().hasXfbOffset()) + builder.addMemberDecoration(spvType, member, spv::DecorationOffset, + glslangMember.getQualifier().layoutXfbOffset); + else if (explicitLayout != glslang::ElpNone) { + // figure out what to do with offset, which is accumulating + int nextOffset; + updateMemberOffset(type, glslangMember, offset, nextOffset, explicitLayout, memberQualifier.layoutMatrix); + if (offset >= 0) + builder.addMemberDecoration(spvType, member, spv::DecorationOffset, offset); + offset = nextOffset; + } + + if (glslangMember.isMatrix() && explicitLayout != glslang::ElpNone) + builder.addMemberDecoration(spvType, member, spv::DecorationMatrixStride, + getMatrixStride(glslangMember, explicitLayout, memberQualifier.layoutMatrix)); + + // built-in variable decorations + spv::BuiltIn builtIn = TranslateBuiltInDecoration(glslangMember.getQualifier().builtIn, true); + if (builtIn != spv::BuiltInMax) + builder.addMemberDecoration(spvType, member, spv::DecorationBuiltIn, (int)builtIn); + +#ifndef GLSLANG_WEB + // nonuniform + builder.addMemberDecoration(spvType, member, TranslateNonUniformDecoration(glslangMember.getQualifier())); + + if (glslangIntermediate->getHlslFunctionality1() && memberQualifier.semanticName != nullptr) { + builder.addExtension("SPV_GOOGLE_hlsl_functionality1"); + builder.addMemberDecoration(spvType, member, (spv::Decoration)spv::DecorationHlslSemanticGOOGLE, + memberQualifier.semanticName); + } + + if (builtIn == spv::BuiltInLayer) { + // SPV_NV_viewport_array2 extension + if (glslangMember.getQualifier().layoutViewportRelative){ + builder.addMemberDecoration(spvType, member, (spv::Decoration)spv::DecorationViewportRelativeNV); + builder.addCapability(spv::CapabilityShaderViewportMaskNV); + builder.addExtension(spv::E_SPV_NV_viewport_array2); + } + if (glslangMember.getQualifier().layoutSecondaryViewportRelativeOffset != -2048){ + builder.addMemberDecoration(spvType, member, + (spv::Decoration)spv::DecorationSecondaryViewportRelativeNV, + glslangMember.getQualifier().layoutSecondaryViewportRelativeOffset); + builder.addCapability(spv::CapabilityShaderStereoViewNV); + builder.addExtension(spv::E_SPV_NV_stereo_view_rendering); + } + } + if (glslangMember.getQualifier().layoutPassthrough) { + builder.addMemberDecoration(spvType, member, (spv::Decoration)spv::DecorationPassthroughNV); + builder.addCapability(spv::CapabilityGeometryShaderPassthroughNV); + builder.addExtension(spv::E_SPV_NV_geometry_shader_passthrough); + } +#endif + } + + // Decorate the structure + builder.addDecoration(spvType, TranslateLayoutDecoration(type, qualifier.layoutMatrix)); + builder.addDecoration(spvType, TranslateBlockDecoration(type, glslangIntermediate->usingStorageBuffer())); +} + +// Turn the expression forming the array size into an id. +// This is not quite trivial, because of specialization constants. +// Sometimes, a raw constant is turned into an Id, and sometimes +// a specialization constant expression is. +spv::Id TGlslangToSpvTraverser::makeArraySizeId(const glslang::TArraySizes& arraySizes, int dim) +{ + // First, see if this is sized with a node, meaning a specialization constant: + glslang::TIntermTyped* specNode = arraySizes.getDimNode(dim); + if (specNode != nullptr) { + builder.clearAccessChain(); + specNode->traverse(this); + return accessChainLoad(specNode->getAsTyped()->getType()); + } + + // Otherwise, need a compile-time (front end) size, get it: + int size = arraySizes.getDimSize(dim); + assert(size > 0); + return builder.makeUintConstant(size); +} + +// Wrap the builder's accessChainLoad to: +// - localize handling of RelaxedPrecision +// - use the SPIR-V inferred type instead of another conversion of the glslang type +// (avoids unnecessary work and possible type punning for structures) +// - do conversion of concrete to abstract type +spv::Id TGlslangToSpvTraverser::accessChainLoad(const glslang::TType& type) +{ + spv::Id nominalTypeId = builder.accessChainGetInferredType(); + + spv::Builder::AccessChain::CoherentFlags coherentFlags = builder.getAccessChain().coherentFlags; + coherentFlags |= TranslateCoherent(type); + + unsigned int alignment = builder.getAccessChain().alignment; + alignment |= type.getBufferReferenceAlignment(); + + spv::Id loadedId = builder.accessChainLoad(TranslatePrecisionDecoration(type), + TranslateNonUniformDecoration(type.getQualifier()), + nominalTypeId, + spv::MemoryAccessMask(TranslateMemoryAccess(coherentFlags) & ~spv::MemoryAccessMakePointerAvailableKHRMask), + TranslateMemoryScope(coherentFlags), + alignment); + + // Need to convert to abstract types when necessary + if (type.getBasicType() == glslang::EbtBool) { + if (builder.isScalarType(nominalTypeId)) { + // Conversion for bool + spv::Id boolType = builder.makeBoolType(); + if (nominalTypeId != boolType) + loadedId = builder.createBinOp(spv::OpINotEqual, boolType, loadedId, builder.makeUintConstant(0)); + } else if (builder.isVectorType(nominalTypeId)) { + // Conversion for bvec + int vecSize = builder.getNumTypeComponents(nominalTypeId); + spv::Id bvecType = builder.makeVectorType(builder.makeBoolType(), vecSize); + if (nominalTypeId != bvecType) + loadedId = builder.createBinOp(spv::OpINotEqual, bvecType, loadedId, + makeSmearedConstant(builder.makeUintConstant(0), vecSize)); + } + } + + return loadedId; +} + +// Wrap the builder's accessChainStore to: +// - do conversion of concrete to abstract type +// +// Implicitly uses the existing builder.accessChain as the storage target. +void TGlslangToSpvTraverser::accessChainStore(const glslang::TType& type, spv::Id rvalue) +{ + // Need to convert to abstract types when necessary + if (type.getBasicType() == glslang::EbtBool) { + spv::Id nominalTypeId = builder.accessChainGetInferredType(); + + if (builder.isScalarType(nominalTypeId)) { + // Conversion for bool + spv::Id boolType = builder.makeBoolType(); + if (nominalTypeId != boolType) { + // keep these outside arguments, for determinant order-of-evaluation + spv::Id one = builder.makeUintConstant(1); + spv::Id zero = builder.makeUintConstant(0); + rvalue = builder.createTriOp(spv::OpSelect, nominalTypeId, rvalue, one, zero); + } else if (builder.getTypeId(rvalue) != boolType) + rvalue = builder.createBinOp(spv::OpINotEqual, boolType, rvalue, builder.makeUintConstant(0)); + } else if (builder.isVectorType(nominalTypeId)) { + // Conversion for bvec + int vecSize = builder.getNumTypeComponents(nominalTypeId); + spv::Id bvecType = builder.makeVectorType(builder.makeBoolType(), vecSize); + if (nominalTypeId != bvecType) { + // keep these outside arguments, for determinant order-of-evaluation + spv::Id one = makeSmearedConstant(builder.makeUintConstant(1), vecSize); + spv::Id zero = makeSmearedConstant(builder.makeUintConstant(0), vecSize); + rvalue = builder.createTriOp(spv::OpSelect, nominalTypeId, rvalue, one, zero); + } else if (builder.getTypeId(rvalue) != bvecType) + rvalue = builder.createBinOp(spv::OpINotEqual, bvecType, rvalue, + makeSmearedConstant(builder.makeUintConstant(0), vecSize)); + } + } + + spv::Builder::AccessChain::CoherentFlags coherentFlags = builder.getAccessChain().coherentFlags; + coherentFlags |= TranslateCoherent(type); + + unsigned int alignment = builder.getAccessChain().alignment; + alignment |= type.getBufferReferenceAlignment(); + + builder.accessChainStore(rvalue, + spv::MemoryAccessMask(TranslateMemoryAccess(coherentFlags) & + ~spv::MemoryAccessMakePointerVisibleKHRMask), + TranslateMemoryScope(coherentFlags), alignment); +} + +// For storing when types match at the glslang level, but not might match at the +// SPIR-V level. +// +// This especially happens when a single glslang type expands to multiple +// SPIR-V types, like a struct that is used in a member-undecorated way as well +// as in a member-decorated way. +// +// NOTE: This function can handle any store request; if it's not special it +// simplifies to a simple OpStore. +// +// Implicitly uses the existing builder.accessChain as the storage target. +void TGlslangToSpvTraverser::multiTypeStore(const glslang::TType& type, spv::Id rValue) +{ + // we only do the complex path here if it's an aggregate + if (! type.isStruct() && ! type.isArray()) { + accessChainStore(type, rValue); + return; + } + + // and, it has to be a case of type aliasing + spv::Id rType = builder.getTypeId(rValue); + spv::Id lValue = builder.accessChainGetLValue(); + spv::Id lType = builder.getContainedTypeId(builder.getTypeId(lValue)); + if (lType == rType) { + accessChainStore(type, rValue); + return; + } + + // Recursively (as needed) copy an aggregate type to a different aggregate type, + // where the two types were the same type in GLSL. This requires member + // by member copy, recursively. + + // SPIR-V 1.4 added an instruction to do help do this. + if (glslangIntermediate->getSpv().spv >= glslang::EShTargetSpv_1_4) { + // However, bool in uniform space is changed to int, so + // OpCopyLogical does not work for that. + // TODO: It would be more robust to do a full recursive verification of the types satisfying SPIR-V rules. + bool rBool = builder.containsType(builder.getTypeId(rValue), spv::OpTypeBool, 0); + bool lBool = builder.containsType(lType, spv::OpTypeBool, 0); + if (lBool == rBool) { + spv::Id logicalCopy = builder.createUnaryOp(spv::OpCopyLogical, lType, rValue); + accessChainStore(type, logicalCopy); + return; + } + } + + // If an array, copy element by element. + if (type.isArray()) { + glslang::TType glslangElementType(type, 0); + spv::Id elementRType = builder.getContainedTypeId(rType); + for (int index = 0; index < type.getOuterArraySize(); ++index) { + // get the source member + spv::Id elementRValue = builder.createCompositeExtract(rValue, elementRType, index); + + // set up the target storage + builder.clearAccessChain(); + builder.setAccessChainLValue(lValue); + builder.accessChainPush(builder.makeIntConstant(index), TranslateCoherent(type), + type.getBufferReferenceAlignment()); + + // store the member + multiTypeStore(glslangElementType, elementRValue); + } + } else { + assert(type.isStruct()); + + // loop over structure members + const glslang::TTypeList& members = *type.getStruct(); + for (int m = 0; m < (int)members.size(); ++m) { + const glslang::TType& glslangMemberType = *members[m].type; + + // get the source member + spv::Id memberRType = builder.getContainedTypeId(rType, m); + spv::Id memberRValue = builder.createCompositeExtract(rValue, memberRType, m); + + // set up the target storage + builder.clearAccessChain(); + builder.setAccessChainLValue(lValue); + builder.accessChainPush(builder.makeIntConstant(m), TranslateCoherent(type), + type.getBufferReferenceAlignment()); + + // store the member + multiTypeStore(glslangMemberType, memberRValue); + } + } +} + +// Decide whether or not this type should be +// decorated with offsets and strides, and if so +// whether std140 or std430 rules should be applied. +glslang::TLayoutPacking TGlslangToSpvTraverser::getExplicitLayout(const glslang::TType& type) const +{ + // has to be a block + if (type.getBasicType() != glslang::EbtBlock) + return glslang::ElpNone; + + // has to be a uniform or buffer block or task in/out blocks + if (type.getQualifier().storage != glslang::EvqUniform && + type.getQualifier().storage != glslang::EvqBuffer && + !type.getQualifier().isTaskMemory()) + return glslang::ElpNone; + + // return the layout to use + switch (type.getQualifier().layoutPacking) { + case glslang::ElpStd140: + case glslang::ElpStd430: + case glslang::ElpScalar: + return type.getQualifier().layoutPacking; + default: + return glslang::ElpNone; + } +} + +// Given an array type, returns the integer stride required for that array +int TGlslangToSpvTraverser::getArrayStride(const glslang::TType& arrayType, glslang::TLayoutPacking explicitLayout, + glslang::TLayoutMatrix matrixLayout) +{ + int size; + int stride; + glslangIntermediate->getMemberAlignment(arrayType, size, stride, explicitLayout, + matrixLayout == glslang::ElmRowMajor); + + return stride; +} + +// Given a matrix type, or array (of array) of matrixes type, returns the integer stride required for that matrix +// when used as a member of an interface block +int TGlslangToSpvTraverser::getMatrixStride(const glslang::TType& matrixType, glslang::TLayoutPacking explicitLayout, + glslang::TLayoutMatrix matrixLayout) +{ + glslang::TType elementType; + elementType.shallowCopy(matrixType); + elementType.clearArraySizes(); + + int size; + int stride; + glslangIntermediate->getMemberAlignment(elementType, size, stride, explicitLayout, + matrixLayout == glslang::ElmRowMajor); + + return stride; +} + +// Given a member type of a struct, realign the current offset for it, and compute +// the next (not yet aligned) offset for the next member, which will get aligned +// on the next call. +// 'currentOffset' should be passed in already initialized, ready to modify, and reflecting +// the migration of data from nextOffset -> currentOffset. It should be -1 on the first call. +// -1 means a non-forced member offset (no decoration needed). +void TGlslangToSpvTraverser::updateMemberOffset(const glslang::TType& structType, const glslang::TType& memberType, + int& currentOffset, int& nextOffset, glslang::TLayoutPacking explicitLayout, glslang::TLayoutMatrix matrixLayout) +{ + // this will get a positive value when deemed necessary + nextOffset = -1; + + // override anything in currentOffset with user-set offset + if (memberType.getQualifier().hasOffset()) + currentOffset = memberType.getQualifier().layoutOffset; + + // It could be that current linker usage in glslang updated all the layoutOffset, + // in which case the following code does not matter. But, that's not quite right + // once cross-compilation unit GLSL validation is done, as the original user + // settings are needed in layoutOffset, and then the following will come into play. + + if (explicitLayout == glslang::ElpNone) { + if (! memberType.getQualifier().hasOffset()) + currentOffset = -1; + + return; + } + + // Getting this far means we need explicit offsets + if (currentOffset < 0) + currentOffset = 0; + + // Now, currentOffset is valid (either 0, or from a previous nextOffset), + // but possibly not yet correctly aligned. + + int memberSize; + int dummyStride; + int memberAlignment = glslangIntermediate->getMemberAlignment(memberType, memberSize, dummyStride, explicitLayout, + matrixLayout == glslang::ElmRowMajor); + + // Adjust alignment for HLSL rules + // TODO: make this consistent in early phases of code: + // adjusting this late means inconsistencies with earlier code, which for reflection is an issue + // Until reflection is brought in sync with these adjustments, don't apply to $Global, + // which is the most likely to rely on reflection, and least likely to rely implicit layouts + if (glslangIntermediate->usingHlslOffsets() && + ! memberType.isArray() && memberType.isVector() && structType.getTypeName().compare("$Global") != 0) { + int dummySize; + int componentAlignment = glslangIntermediate->getBaseAlignmentScalar(memberType, dummySize); + if (componentAlignment <= 4) + memberAlignment = componentAlignment; + } + + // Bump up to member alignment + glslang::RoundToPow2(currentOffset, memberAlignment); + + // Bump up to vec4 if there is a bad straddle + if (explicitLayout != glslang::ElpScalar && glslangIntermediate->improperStraddle(memberType, memberSize, + currentOffset)) + glslang::RoundToPow2(currentOffset, 16); + + nextOffset = currentOffset + memberSize; +} + +void TGlslangToSpvTraverser::declareUseOfStructMember(const glslang::TTypeList& members, int glslangMember) +{ + const glslang::TBuiltInVariable glslangBuiltIn = members[glslangMember].type->getQualifier().builtIn; + switch (glslangBuiltIn) + { + case glslang::EbvPointSize: +#ifndef GLSLANG_WEB + case glslang::EbvClipDistance: + case glslang::EbvCullDistance: + case glslang::EbvViewportMaskNV: + case glslang::EbvSecondaryPositionNV: + case glslang::EbvSecondaryViewportMaskNV: + case glslang::EbvPositionPerViewNV: + case glslang::EbvViewportMaskPerViewNV: + case glslang::EbvTaskCountNV: + case glslang::EbvPrimitiveCountNV: + case glslang::EbvPrimitiveIndicesNV: + case glslang::EbvClipDistancePerViewNV: + case glslang::EbvCullDistancePerViewNV: + case glslang::EbvLayerPerViewNV: + case glslang::EbvMeshViewCountNV: + case glslang::EbvMeshViewIndicesNV: +#endif + // Generate the associated capability. Delegate to TranslateBuiltInDecoration. + // Alternately, we could just call this for any glslang built-in, since the + // capability already guards against duplicates. + TranslateBuiltInDecoration(glslangBuiltIn, false); + break; + default: + // Capabilities were already generated when the struct was declared. + break; + } +} + +bool TGlslangToSpvTraverser::isShaderEntryPoint(const glslang::TIntermAggregate* node) +{ + return node->getName().compare(glslangIntermediate->getEntryPointMangledName().c_str()) == 0; +} + +// Does parameter need a place to keep writes, separate from the original? +// Assumes called after originalParam(), which filters out block/buffer/opaque-based +// qualifiers such that we should have only in/out/inout/constreadonly here. +bool TGlslangToSpvTraverser::writableParam(glslang::TStorageQualifier qualifier) const +{ + assert(qualifier == glslang::EvqIn || + qualifier == glslang::EvqOut || + qualifier == glslang::EvqInOut || + qualifier == glslang::EvqConstReadOnly); + return qualifier != glslang::EvqConstReadOnly; +} + +// Is parameter pass-by-original? +bool TGlslangToSpvTraverser::originalParam(glslang::TStorageQualifier qualifier, const glslang::TType& paramType, + bool implicitThisParam) +{ + if (implicitThisParam) // implicit this + return true; + if (glslangIntermediate->getSource() == glslang::EShSourceHlsl) + return paramType.getBasicType() == glslang::EbtBlock; + return paramType.containsOpaque() || // sampler, etc. + (paramType.getBasicType() == glslang::EbtBlock && qualifier == glslang::EvqBuffer); // SSBO +} + +// Make all the functions, skeletally, without actually visiting their bodies. +void TGlslangToSpvTraverser::makeFunctions(const glslang::TIntermSequence& glslFunctions) +{ + const auto getParamDecorations = [&](std::vector& decorations, const glslang::TType& type, + bool useVulkanMemoryModel) { + spv::Decoration paramPrecision = TranslatePrecisionDecoration(type); + if (paramPrecision != spv::NoPrecision) + decorations.push_back(paramPrecision); + TranslateMemoryDecoration(type.getQualifier(), decorations, useVulkanMemoryModel); + if (type.isReference()) { + // Original and non-writable params pass the pointer directly and + // use restrict/aliased, others are stored to a pointer in Function + // memory and use RestrictPointer/AliasedPointer. + if (originalParam(type.getQualifier().storage, type, false) || + !writableParam(type.getQualifier().storage)) { + decorations.push_back(type.getQualifier().isRestrict() ? spv::DecorationRestrict : + spv::DecorationAliased); + } else { + decorations.push_back(type.getQualifier().isRestrict() ? spv::DecorationRestrictPointerEXT : + spv::DecorationAliasedPointerEXT); + } + } + }; + + for (int f = 0; f < (int)glslFunctions.size(); ++f) { + glslang::TIntermAggregate* glslFunction = glslFunctions[f]->getAsAggregate(); + if (! glslFunction || glslFunction->getOp() != glslang::EOpFunction || isShaderEntryPoint(glslFunction)) + continue; + + // We're on a user function. Set up the basic interface for the function now, + // so that it's available to call. Translating the body will happen later. + // + // Typically (except for a "const in" parameter), an address will be passed to the + // function. What it is an address of varies: + // + // - "in" parameters not marked as "const" can be written to without modifying the calling + // argument so that write needs to be to a copy, hence the address of a copy works. + // + // - "const in" parameters can just be the r-value, as no writes need occur. + // + // - "out" and "inout" arguments can't be done as pointers to the calling argument, because + // GLSL has copy-in/copy-out semantics. They can be handled though with a pointer to a copy. + + std::vector paramTypes; + std::vector> paramDecorations; // list of decorations per parameter + glslang::TIntermSequence& parameters = glslFunction->getSequence()[0]->getAsAggregate()->getSequence(); + +#ifdef ENABLE_HLSL + bool implicitThis = (int)parameters.size() > 0 && parameters[0]->getAsSymbolNode()->getName() == + glslangIntermediate->implicitThisName; +#else + bool implicitThis = false; +#endif + + paramDecorations.resize(parameters.size()); + for (int p = 0; p < (int)parameters.size(); ++p) { + const glslang::TType& paramType = parameters[p]->getAsTyped()->getType(); + spv::Id typeId = convertGlslangToSpvType(paramType); + if (originalParam(paramType.getQualifier().storage, paramType, implicitThis && p == 0)) + typeId = builder.makePointer(TranslateStorageClass(paramType), typeId); + else if (writableParam(paramType.getQualifier().storage)) + typeId = builder.makePointer(spv::StorageClassFunction, typeId); + else + rValueParameters.insert(parameters[p]->getAsSymbolNode()->getId()); + getParamDecorations(paramDecorations[p], paramType, glslangIntermediate->usingVulkanMemoryModel()); + paramTypes.push_back(typeId); + } + + spv::Block* functionBlock; + spv::Function *function = builder.makeFunctionEntry(TranslatePrecisionDecoration(glslFunction->getType()), + convertGlslangToSpvType(glslFunction->getType()), + glslFunction->getName().c_str(), paramTypes, + paramDecorations, &functionBlock); + if (implicitThis) + function->setImplicitThis(); + + // Track function to emit/call later + functionMap[glslFunction->getName().c_str()] = function; + + // Set the parameter id's + for (int p = 0; p < (int)parameters.size(); ++p) { + symbolValues[parameters[p]->getAsSymbolNode()->getId()] = function->getParamId(p); + // give a name too + builder.addName(function->getParamId(p), parameters[p]->getAsSymbolNode()->getName().c_str()); + + const glslang::TType& paramType = parameters[p]->getAsTyped()->getType(); + if (paramType.contains8BitInt()) + builder.addCapability(spv::CapabilityInt8); + if (paramType.contains16BitInt()) + builder.addCapability(spv::CapabilityInt16); + if (paramType.contains16BitFloat()) + builder.addCapability(spv::CapabilityFloat16); + } + } +} + +// Process all the initializers, while skipping the functions and link objects +void TGlslangToSpvTraverser::makeGlobalInitializers(const glslang::TIntermSequence& initializers) +{ + builder.setBuildPoint(shaderEntry->getLastBlock()); + for (int i = 0; i < (int)initializers.size(); ++i) { + glslang::TIntermAggregate* initializer = initializers[i]->getAsAggregate(); + if (initializer && initializer->getOp() != glslang::EOpFunction && initializer->getOp() != + glslang::EOpLinkerObjects) { + + // We're on a top-level node that's not a function. Treat as an initializer, whose + // code goes into the beginning of the entry point. + initializer->traverse(this); + } + } +} + +// Process all the functions, while skipping initializers. +void TGlslangToSpvTraverser::visitFunctions(const glslang::TIntermSequence& glslFunctions) +{ + for (int f = 0; f < (int)glslFunctions.size(); ++f) { + glslang::TIntermAggregate* node = glslFunctions[f]->getAsAggregate(); + if (node && (node->getOp() == glslang::EOpFunction || node->getOp() == glslang::EOpLinkerObjects)) + node->traverse(this); + } +} + +void TGlslangToSpvTraverser::handleFunctionEntry(const glslang::TIntermAggregate* node) +{ + // SPIR-V functions should already be in the functionMap from the prepass + // that called makeFunctions(). + currentFunction = functionMap[node->getName().c_str()]; + spv::Block* functionBlock = currentFunction->getEntryBlock(); + builder.setBuildPoint(functionBlock); +} + +void TGlslangToSpvTraverser::translateArguments(const glslang::TIntermAggregate& node, std::vector& arguments, + spv::Builder::AccessChain::CoherentFlags &lvalueCoherentFlags) +{ + const glslang::TIntermSequence& glslangArguments = node.getSequence(); + + glslang::TSampler sampler = {}; + bool cubeCompare = false; +#ifndef GLSLANG_WEB + bool f16ShadowCompare = false; +#endif + if (node.isTexture() || node.isImage()) { + sampler = glslangArguments[0]->getAsTyped()->getType().getSampler(); + cubeCompare = sampler.dim == glslang::EsdCube && sampler.arrayed && sampler.shadow; +#ifndef GLSLANG_WEB + f16ShadowCompare = sampler.shadow && + glslangArguments[1]->getAsTyped()->getType().getBasicType() == glslang::EbtFloat16; +#endif + } + + for (int i = 0; i < (int)glslangArguments.size(); ++i) { + builder.clearAccessChain(); + glslangArguments[i]->traverse(this); + +#ifndef GLSLANG_WEB + // Special case l-value operands + bool lvalue = false; + switch (node.getOp()) { + case glslang::EOpImageAtomicAdd: + case glslang::EOpImageAtomicMin: + case glslang::EOpImageAtomicMax: + case glslang::EOpImageAtomicAnd: + case glslang::EOpImageAtomicOr: + case glslang::EOpImageAtomicXor: + case glslang::EOpImageAtomicExchange: + case glslang::EOpImageAtomicCompSwap: + case glslang::EOpImageAtomicLoad: + case glslang::EOpImageAtomicStore: + if (i == 0) + lvalue = true; + break; + case glslang::EOpSparseImageLoad: + if ((sampler.ms && i == 3) || (! sampler.ms && i == 2)) + lvalue = true; + break; + case glslang::EOpSparseTexture: + if (((cubeCompare || f16ShadowCompare) && i == 3) || (! (cubeCompare || f16ShadowCompare) && i == 2)) + lvalue = true; + break; + case glslang::EOpSparseTextureClamp: + if (((cubeCompare || f16ShadowCompare) && i == 4) || (! (cubeCompare || f16ShadowCompare) && i == 3)) + lvalue = true; + break; + case glslang::EOpSparseTextureLod: + case glslang::EOpSparseTextureOffset: + if ((f16ShadowCompare && i == 4) || (! f16ShadowCompare && i == 3)) + lvalue = true; + break; + case glslang::EOpSparseTextureFetch: + if ((sampler.dim != glslang::EsdRect && i == 3) || (sampler.dim == glslang::EsdRect && i == 2)) + lvalue = true; + break; + case glslang::EOpSparseTextureFetchOffset: + if ((sampler.dim != glslang::EsdRect && i == 4) || (sampler.dim == glslang::EsdRect && i == 3)) + lvalue = true; + break; + case glslang::EOpSparseTextureLodOffset: + case glslang::EOpSparseTextureGrad: + case glslang::EOpSparseTextureOffsetClamp: + if ((f16ShadowCompare && i == 5) || (! f16ShadowCompare && i == 4)) + lvalue = true; + break; + case glslang::EOpSparseTextureGradOffset: + case glslang::EOpSparseTextureGradClamp: + if ((f16ShadowCompare && i == 6) || (! f16ShadowCompare && i == 5)) + lvalue = true; + break; + case glslang::EOpSparseTextureGradOffsetClamp: + if ((f16ShadowCompare && i == 7) || (! f16ShadowCompare && i == 6)) + lvalue = true; + break; + case glslang::EOpSparseTextureGather: + if ((sampler.shadow && i == 3) || (! sampler.shadow && i == 2)) + lvalue = true; + break; + case glslang::EOpSparseTextureGatherOffset: + case glslang::EOpSparseTextureGatherOffsets: + if ((sampler.shadow && i == 4) || (! sampler.shadow && i == 3)) + lvalue = true; + break; + case glslang::EOpSparseTextureGatherLod: + if (i == 3) + lvalue = true; + break; + case glslang::EOpSparseTextureGatherLodOffset: + case glslang::EOpSparseTextureGatherLodOffsets: + if (i == 4) + lvalue = true; + break; + case glslang::EOpSparseImageLoadLod: + if (i == 3) + lvalue = true; + break; + case glslang::EOpImageSampleFootprintNV: + if (i == 4) + lvalue = true; + break; + case glslang::EOpImageSampleFootprintClampNV: + case glslang::EOpImageSampleFootprintLodNV: + if (i == 5) + lvalue = true; + break; + case glslang::EOpImageSampleFootprintGradNV: + if (i == 6) + lvalue = true; + break; + case glslang::EOpImageSampleFootprintGradClampNV: + if (i == 7) + lvalue = true; + break; + default: + break; + } + + if (lvalue) { + arguments.push_back(builder.accessChainGetLValue()); + lvalueCoherentFlags = builder.getAccessChain().coherentFlags; + lvalueCoherentFlags |= TranslateCoherent(glslangArguments[i]->getAsTyped()->getType()); + } else +#endif + arguments.push_back(accessChainLoad(glslangArguments[i]->getAsTyped()->getType())); + } +} + +void TGlslangToSpvTraverser::translateArguments(glslang::TIntermUnary& node, std::vector& arguments) +{ + builder.clearAccessChain(); + node.getOperand()->traverse(this); + arguments.push_back(accessChainLoad(node.getOperand()->getType())); +} + +spv::Id TGlslangToSpvTraverser::createImageTextureFunctionCall(glslang::TIntermOperator* node) +{ + if (! node->isImage() && ! node->isTexture()) + return spv::NoResult; + + builder.setLine(node->getLoc().line, node->getLoc().getFilename()); + + // Process a GLSL texturing op (will be SPV image) + + const glslang::TType &imageType = node->getAsAggregate() + ? node->getAsAggregate()->getSequence()[0]->getAsTyped()->getType() + : node->getAsUnaryNode()->getOperand()->getAsTyped()->getType(); + const glslang::TSampler sampler = imageType.getSampler(); +#ifdef GLSLANG_WEB + const bool f16ShadowCompare = false; +#else + bool f16ShadowCompare = (sampler.shadow && node->getAsAggregate()) + ? node->getAsAggregate()->getSequence()[1]->getAsTyped()->getType().getBasicType() == glslang::EbtFloat16 + : false; +#endif + + const auto signExtensionMask = [&]() { + if (builder.getSpvVersion() >= spv::Spv_1_4) { + if (sampler.type == glslang::EbtUint) + return spv::ImageOperandsZeroExtendMask; + else if (sampler.type == glslang::EbtInt) + return spv::ImageOperandsSignExtendMask; + } + return spv::ImageOperandsMaskNone; + }; + + spv::Builder::AccessChain::CoherentFlags lvalueCoherentFlags; + + std::vector arguments; + if (node->getAsAggregate()) + translateArguments(*node->getAsAggregate(), arguments, lvalueCoherentFlags); + else + translateArguments(*node->getAsUnaryNode(), arguments); + spv::Decoration precision = TranslatePrecisionDecoration(node->getOperationPrecision()); + + spv::Builder::TextureParameters params = { }; + params.sampler = arguments[0]; + + glslang::TCrackedTextureOp cracked; + node->crackTexture(sampler, cracked); + + const bool isUnsignedResult = node->getType().getBasicType() == glslang::EbtUint; + + // Check for queries + if (cracked.query) { + // OpImageQueryLod works on a sampled image, for other queries the image has to be extracted first + if (node->getOp() != glslang::EOpTextureQueryLod && builder.isSampledImage(params.sampler)) + params.sampler = builder.createUnaryOp(spv::OpImage, builder.getImageType(params.sampler), params.sampler); + + switch (node->getOp()) { + case glslang::EOpImageQuerySize: + case glslang::EOpTextureQuerySize: + if (arguments.size() > 1) { + params.lod = arguments[1]; + return builder.createTextureQueryCall(spv::OpImageQuerySizeLod, params, isUnsignedResult); + } else + return builder.createTextureQueryCall(spv::OpImageQuerySize, params, isUnsignedResult); +#ifndef GLSLANG_WEB + case glslang::EOpImageQuerySamples: + case glslang::EOpTextureQuerySamples: + return builder.createTextureQueryCall(spv::OpImageQuerySamples, params, isUnsignedResult); + case glslang::EOpTextureQueryLod: + params.coords = arguments[1]; + return builder.createTextureQueryCall(spv::OpImageQueryLod, params, isUnsignedResult); + case glslang::EOpTextureQueryLevels: + return builder.createTextureQueryCall(spv::OpImageQueryLevels, params, isUnsignedResult); + case glslang::EOpSparseTexelsResident: + return builder.createUnaryOp(spv::OpImageSparseTexelsResident, builder.makeBoolType(), arguments[0]); +#endif + default: + assert(0); + break; + } + } + + int components = node->getType().getVectorSize(); + + if (node->getOp() == glslang::EOpTextureFetch) { + // These must produce 4 components, per SPIR-V spec. We'll add a conversion constructor if needed. + // This will only happen through the HLSL path for operator[], so we do not have to handle e.g. + // the EOpTexture/Proj/Lod/etc family. It would be harmless to do so, but would need more logic + // here around e.g. which ones return scalars or other types. + components = 4; + } + + glslang::TType returnType(node->getType().getBasicType(), glslang::EvqTemporary, components); + + auto resultType = [&returnType,this]{ return convertGlslangToSpvType(returnType); }; + + // Check for image functions other than queries + if (node->isImage()) { + std::vector operands; + auto opIt = arguments.begin(); + spv::IdImmediate image = { true, *(opIt++) }; + operands.push_back(image); + + // Handle subpass operations + // TODO: GLSL should change to have the "MS" only on the type rather than the + // built-in function. + if (cracked.subpass) { + // add on the (0,0) coordinate + spv::Id zero = builder.makeIntConstant(0); + std::vector comps; + comps.push_back(zero); + comps.push_back(zero); + spv::IdImmediate coord = { true, + builder.makeCompositeConstant(builder.makeVectorType(builder.makeIntType(32), 2), comps) }; + operands.push_back(coord); + spv::IdImmediate imageOperands = { false, spv::ImageOperandsMaskNone }; + imageOperands.word = imageOperands.word | signExtensionMask(); + if (sampler.isMultiSample()) { + imageOperands.word = imageOperands.word | spv::ImageOperandsSampleMask; + } + if (imageOperands.word != spv::ImageOperandsMaskNone) { + operands.push_back(imageOperands); + if (sampler.isMultiSample()) { + spv::IdImmediate imageOperand = { true, *(opIt++) }; + operands.push_back(imageOperand); + } + } + spv::Id result = builder.createOp(spv::OpImageRead, resultType(), operands); + builder.setPrecision(result, precision); + return result; + } + + spv::IdImmediate coord = { true, *(opIt++) }; + operands.push_back(coord); + if (node->getOp() == glslang::EOpImageLoad || node->getOp() == glslang::EOpImageLoadLod) { + spv::ImageOperandsMask mask = spv::ImageOperandsMaskNone; + if (sampler.isMultiSample()) { + mask = mask | spv::ImageOperandsSampleMask; + } + if (cracked.lod) { + builder.addExtension(spv::E_SPV_AMD_shader_image_load_store_lod); + builder.addCapability(spv::CapabilityImageReadWriteLodAMD); + mask = mask | spv::ImageOperandsLodMask; + } + mask = mask | TranslateImageOperands(TranslateCoherent(imageType)); + mask = (spv::ImageOperandsMask)(mask & ~spv::ImageOperandsMakeTexelAvailableKHRMask); + mask = mask | signExtensionMask(); + if (mask != spv::ImageOperandsMaskNone) { + spv::IdImmediate imageOperands = { false, (unsigned int)mask }; + operands.push_back(imageOperands); + } + if (mask & spv::ImageOperandsSampleMask) { + spv::IdImmediate imageOperand = { true, *opIt++ }; + operands.push_back(imageOperand); + } + if (mask & spv::ImageOperandsLodMask) { + spv::IdImmediate imageOperand = { true, *opIt++ }; + operands.push_back(imageOperand); + } + if (mask & spv::ImageOperandsMakeTexelVisibleKHRMask) { + spv::IdImmediate imageOperand = { true, + builder.makeUintConstant(TranslateMemoryScope(TranslateCoherent(imageType))) }; + operands.push_back(imageOperand); + } + + if (builder.getImageTypeFormat(builder.getImageType(operands.front().word)) == spv::ImageFormatUnknown) + builder.addCapability(spv::CapabilityStorageImageReadWithoutFormat); + + std::vector result(1, builder.createOp(spv::OpImageRead, resultType(), operands)); + builder.setPrecision(result[0], precision); + + // If needed, add a conversion constructor to the proper size. + if (components != node->getType().getVectorSize()) + result[0] = builder.createConstructor(precision, result, convertGlslangToSpvType(node->getType())); + + return result[0]; + } else if (node->getOp() == glslang::EOpImageStore || node->getOp() == glslang::EOpImageStoreLod) { + + // Push the texel value before the operands + if (sampler.isMultiSample() || cracked.lod) { + spv::IdImmediate texel = { true, *(opIt + 1) }; + operands.push_back(texel); + } else { + spv::IdImmediate texel = { true, *opIt }; + operands.push_back(texel); + } + + spv::ImageOperandsMask mask = spv::ImageOperandsMaskNone; + if (sampler.isMultiSample()) { + mask = mask | spv::ImageOperandsSampleMask; + } + if (cracked.lod) { + builder.addExtension(spv::E_SPV_AMD_shader_image_load_store_lod); + builder.addCapability(spv::CapabilityImageReadWriteLodAMD); + mask = mask | spv::ImageOperandsLodMask; + } + mask = mask | TranslateImageOperands(TranslateCoherent(imageType)); + mask = (spv::ImageOperandsMask)(mask & ~spv::ImageOperandsMakeTexelVisibleKHRMask); + mask = mask | signExtensionMask(); + if (mask != spv::ImageOperandsMaskNone) { + spv::IdImmediate imageOperands = { false, (unsigned int)mask }; + operands.push_back(imageOperands); + } + if (mask & spv::ImageOperandsSampleMask) { + spv::IdImmediate imageOperand = { true, *opIt++ }; + operands.push_back(imageOperand); + } + if (mask & spv::ImageOperandsLodMask) { + spv::IdImmediate imageOperand = { true, *opIt++ }; + operands.push_back(imageOperand); + } + if (mask & spv::ImageOperandsMakeTexelAvailableKHRMask) { + spv::IdImmediate imageOperand = { true, + builder.makeUintConstant(TranslateMemoryScope(TranslateCoherent(imageType))) }; + operands.push_back(imageOperand); + } + + builder.createNoResultOp(spv::OpImageWrite, operands); + if (builder.getImageTypeFormat(builder.getImageType(operands.front().word)) == spv::ImageFormatUnknown) + builder.addCapability(spv::CapabilityStorageImageWriteWithoutFormat); + return spv::NoResult; + } else if (node->getOp() == glslang::EOpSparseImageLoad || + node->getOp() == glslang::EOpSparseImageLoadLod) { + builder.addCapability(spv::CapabilitySparseResidency); + if (builder.getImageTypeFormat(builder.getImageType(operands.front().word)) == spv::ImageFormatUnknown) + builder.addCapability(spv::CapabilityStorageImageReadWithoutFormat); + + spv::ImageOperandsMask mask = spv::ImageOperandsMaskNone; + if (sampler.isMultiSample()) { + mask = mask | spv::ImageOperandsSampleMask; + } + if (cracked.lod) { + builder.addExtension(spv::E_SPV_AMD_shader_image_load_store_lod); + builder.addCapability(spv::CapabilityImageReadWriteLodAMD); + + mask = mask | spv::ImageOperandsLodMask; + } + mask = mask | TranslateImageOperands(TranslateCoherent(imageType)); + mask = (spv::ImageOperandsMask)(mask & ~spv::ImageOperandsMakeTexelAvailableKHRMask); + mask = mask | signExtensionMask(); + if (mask != spv::ImageOperandsMaskNone) { + spv::IdImmediate imageOperands = { false, (unsigned int)mask }; + operands.push_back(imageOperands); + } + if (mask & spv::ImageOperandsSampleMask) { + spv::IdImmediate imageOperand = { true, *opIt++ }; + operands.push_back(imageOperand); + } + if (mask & spv::ImageOperandsLodMask) { + spv::IdImmediate imageOperand = { true, *opIt++ }; + operands.push_back(imageOperand); + } + if (mask & spv::ImageOperandsMakeTexelVisibleKHRMask) { + spv::IdImmediate imageOperand = { true, builder.makeUintConstant(TranslateMemoryScope( + TranslateCoherent(imageType))) }; + operands.push_back(imageOperand); + } + + // Create the return type that was a special structure + spv::Id texelOut = *opIt; + spv::Id typeId0 = resultType(); + spv::Id typeId1 = builder.getDerefTypeId(texelOut); + spv::Id resultTypeId = builder.makeStructResultType(typeId0, typeId1); + + spv::Id resultId = builder.createOp(spv::OpImageSparseRead, resultTypeId, operands); + + // Decode the return type + builder.createStore(builder.createCompositeExtract(resultId, typeId1, 1), texelOut); + return builder.createCompositeExtract(resultId, typeId0, 0); + } else { + // Process image atomic operations + + // GLSL "IMAGE_PARAMS" will involve in constructing an image texel pointer and this pointer, + // as the first source operand, is required by SPIR-V atomic operations. + // For non-MS, the sample value should be 0 + spv::IdImmediate sample = { true, sampler.isMultiSample() ? *(opIt++) : builder.makeUintConstant(0) }; + operands.push_back(sample); + + spv::Id resultTypeId; + // imageAtomicStore has a void return type so base the pointer type on + // the type of the value operand. + if (node->getOp() == glslang::EOpImageAtomicStore) { + resultTypeId = builder.makePointer(spv::StorageClassImage, builder.getTypeId(*opIt)); + } else { + resultTypeId = builder.makePointer(spv::StorageClassImage, resultType()); + } + spv::Id pointer = builder.createOp(spv::OpImageTexelPointer, resultTypeId, operands); + if (imageType.getQualifier().nonUniform) { + builder.addDecoration(pointer, spv::DecorationNonUniformEXT); + } + + std::vector operands; + operands.push_back(pointer); + for (; opIt != arguments.end(); ++opIt) + operands.push_back(*opIt); + + return createAtomicOperation(node->getOp(), precision, resultType(), operands, node->getBasicType(), + lvalueCoherentFlags); + } + } + +#ifndef GLSLANG_WEB + // Check for fragment mask functions other than queries + if (cracked.fragMask) { + assert(sampler.ms); + + auto opIt = arguments.begin(); + std::vector operands; + + // Extract the image if necessary + if (builder.isSampledImage(params.sampler)) + params.sampler = builder.createUnaryOp(spv::OpImage, builder.getImageType(params.sampler), params.sampler); + + operands.push_back(params.sampler); + ++opIt; + + if (sampler.isSubpass()) { + // add on the (0,0) coordinate + spv::Id zero = builder.makeIntConstant(0); + std::vector comps; + comps.push_back(zero); + comps.push_back(zero); + operands.push_back(builder.makeCompositeConstant( + builder.makeVectorType(builder.makeIntType(32), 2), comps)); + } + + for (; opIt != arguments.end(); ++opIt) + operands.push_back(*opIt); + + spv::Op fragMaskOp = spv::OpNop; + if (node->getOp() == glslang::EOpFragmentMaskFetch) + fragMaskOp = spv::OpFragmentMaskFetchAMD; + else if (node->getOp() == glslang::EOpFragmentFetch) + fragMaskOp = spv::OpFragmentFetchAMD; + + builder.addExtension(spv::E_SPV_AMD_shader_fragment_mask); + builder.addCapability(spv::CapabilityFragmentMaskAMD); + return builder.createOp(fragMaskOp, resultType(), operands); + } +#endif + + // Check for texture functions other than queries + bool sparse = node->isSparseTexture(); + bool imageFootprint = node->isImageFootprint(); + bool cubeCompare = sampler.dim == glslang::EsdCube && sampler.isArrayed() && sampler.isShadow(); + + // check for bias argument + bool bias = false; + if (! cracked.lod && ! cracked.grad && ! cracked.fetch && ! cubeCompare) { + int nonBiasArgCount = 2; + if (cracked.gather) + ++nonBiasArgCount; // comp argument should be present when bias argument is present + + if (f16ShadowCompare) + ++nonBiasArgCount; + if (cracked.offset) + ++nonBiasArgCount; + else if (cracked.offsets) + ++nonBiasArgCount; + if (cracked.grad) + nonBiasArgCount += 2; + if (cracked.lodClamp) + ++nonBiasArgCount; + if (sparse) + ++nonBiasArgCount; + if (imageFootprint) + //Following three extra arguments + // int granularity, bool coarse, out gl_TextureFootprint2DNV footprint + nonBiasArgCount += 3; + if ((int)arguments.size() > nonBiasArgCount) + bias = true; + } + + // See if the sampler param should really be just the SPV image part + if (cracked.fetch) { + // a fetch needs to have the image extracted first + if (builder.isSampledImage(params.sampler)) + params.sampler = builder.createUnaryOp(spv::OpImage, builder.getImageType(params.sampler), params.sampler); + } + +#ifndef GLSLANG_WEB + if (cracked.gather) { + const auto& sourceExtensions = glslangIntermediate->getRequestedExtensions(); + if (bias || cracked.lod || + sourceExtensions.find(glslang::E_GL_AMD_texture_gather_bias_lod) != sourceExtensions.end()) { + builder.addExtension(spv::E_SPV_AMD_texture_gather_bias_lod); + builder.addCapability(spv::CapabilityImageGatherBiasLodAMD); + } + } +#endif + + // set the rest of the arguments + + params.coords = arguments[1]; + int extraArgs = 0; + bool noImplicitLod = false; + + // sort out where Dref is coming from + if (cubeCompare || f16ShadowCompare) { + params.Dref = arguments[2]; + ++extraArgs; + } else if (sampler.shadow && cracked.gather) { + params.Dref = arguments[2]; + ++extraArgs; + } else if (sampler.shadow) { + std::vector indexes; + int dRefComp; + if (cracked.proj) + dRefComp = 2; // "The resulting 3rd component of P in the shadow forms is used as Dref" + else + dRefComp = builder.getNumComponents(params.coords) - 1; + indexes.push_back(dRefComp); + params.Dref = builder.createCompositeExtract(params.coords, + builder.getScalarTypeId(builder.getTypeId(params.coords)), indexes); + } + + // lod + if (cracked.lod) { + params.lod = arguments[2 + extraArgs]; + ++extraArgs; + } else if (glslangIntermediate->getStage() != EShLangFragment && + !(glslangIntermediate->getStage() == EShLangCompute && + glslangIntermediate->hasLayoutDerivativeModeNone())) { + // we need to invent the default lod for an explicit lod instruction for a non-fragment stage + noImplicitLod = true; + } + + // multisample + if (sampler.isMultiSample()) { + params.sample = arguments[2 + extraArgs]; // For MS, "sample" should be specified + ++extraArgs; + } + + // gradient + if (cracked.grad) { + params.gradX = arguments[2 + extraArgs]; + params.gradY = arguments[3 + extraArgs]; + extraArgs += 2; + } + + // offset and offsets + if (cracked.offset) { + params.offset = arguments[2 + extraArgs]; + ++extraArgs; + } else if (cracked.offsets) { + params.offsets = arguments[2 + extraArgs]; + ++extraArgs; + } + +#ifndef GLSLANG_WEB + // lod clamp + if (cracked.lodClamp) { + params.lodClamp = arguments[2 + extraArgs]; + ++extraArgs; + } + // sparse + if (sparse) { + params.texelOut = arguments[2 + extraArgs]; + ++extraArgs; + } + // gather component + if (cracked.gather && ! sampler.shadow) { + // default component is 0, if missing, otherwise an argument + if (2 + extraArgs < (int)arguments.size()) { + params.component = arguments[2 + extraArgs]; + ++extraArgs; + } else + params.component = builder.makeIntConstant(0); + } + spv::Id resultStruct = spv::NoResult; + if (imageFootprint) { + //Following three extra arguments + // int granularity, bool coarse, out gl_TextureFootprint2DNV footprint + params.granularity = arguments[2 + extraArgs]; + params.coarse = arguments[3 + extraArgs]; + resultStruct = arguments[4 + extraArgs]; + extraArgs += 3; + } +#endif + // bias + if (bias) { + params.bias = arguments[2 + extraArgs]; + ++extraArgs; + } + +#ifndef GLSLANG_WEB + if (imageFootprint) { + builder.addExtension(spv::E_SPV_NV_shader_image_footprint); + builder.addCapability(spv::CapabilityImageFootprintNV); + + + //resultStructType(OpenGL type) contains 5 elements: + //struct gl_TextureFootprint2DNV { + // uvec2 anchor; + // uvec2 offset; + // uvec2 mask; + // uint lod; + // uint granularity; + //}; + //or + //struct gl_TextureFootprint3DNV { + // uvec3 anchor; + // uvec3 offset; + // uvec2 mask; + // uint lod; + // uint granularity; + //}; + spv::Id resultStructType = builder.getContainedTypeId(builder.getTypeId(resultStruct)); + assert(builder.isStructType(resultStructType)); + + //resType (SPIR-V type) contains 6 elements: + //Member 0 must be a Boolean type scalar(LOD), + //Member 1 must be a vector of integer type, whose Signedness operand is 0(anchor), + //Member 2 must be a vector of integer type, whose Signedness operand is 0(offset), + //Member 3 must be a vector of integer type, whose Signedness operand is 0(mask), + //Member 4 must be a scalar of integer type, whose Signedness operand is 0(lod), + //Member 5 must be a scalar of integer type, whose Signedness operand is 0(granularity). + std::vector members; + members.push_back(resultType()); + for (int i = 0; i < 5; i++) { + members.push_back(builder.getContainedTypeId(resultStructType, i)); + } + spv::Id resType = builder.makeStructType(members, "ResType"); + + //call ImageFootprintNV + spv::Id res = builder.createTextureCall(precision, resType, sparse, cracked.fetch, cracked.proj, + cracked.gather, noImplicitLod, params, signExtensionMask()); + + //copy resType (SPIR-V type) to resultStructType(OpenGL type) + for (int i = 0; i < 5; i++) { + builder.clearAccessChain(); + builder.setAccessChainLValue(resultStruct); + + //Accessing to a struct we created, no coherent flag is set + spv::Builder::AccessChain::CoherentFlags flags; + flags.clear(); + + builder.accessChainPush(builder.makeIntConstant(i), flags, 0); + builder.accessChainStore(builder.createCompositeExtract(res, builder.getContainedTypeId(resType, i+1), + i+1)); + } + return builder.createCompositeExtract(res, resultType(), 0); + } +#endif + + // projective component (might not to move) + // GLSL: "The texture coordinates consumed from P, not including the last component of P, + // are divided by the last component of P." + // SPIR-V: "... (u [, v] [, w], q)... It may be a vector larger than needed, but all + // unused components will appear after all used components." + if (cracked.proj) { + int projSourceComp = builder.getNumComponents(params.coords) - 1; + int projTargetComp; + switch (sampler.dim) { + case glslang::Esd1D: projTargetComp = 1; break; + case glslang::Esd2D: projTargetComp = 2; break; + case glslang::EsdRect: projTargetComp = 2; break; + default: projTargetComp = projSourceComp; break; + } + // copy the projective coordinate if we have to + if (projTargetComp != projSourceComp) { + spv::Id projComp = builder.createCompositeExtract(params.coords, + builder.getScalarTypeId(builder.getTypeId(params.coords)), projSourceComp); + params.coords = builder.createCompositeInsert(projComp, params.coords, + builder.getTypeId(params.coords), projTargetComp); + } + } + +#ifndef GLSLANG_WEB + // nonprivate + if (imageType.getQualifier().nonprivate) { + params.nonprivate = true; + } + + // volatile + if (imageType.getQualifier().volatil) { + params.volatil = true; + } +#endif + + std::vector result( 1, + builder.createTextureCall(precision, resultType(), sparse, cracked.fetch, cracked.proj, cracked.gather, + noImplicitLod, params, signExtensionMask()) + ); + + if (components != node->getType().getVectorSize()) + result[0] = builder.createConstructor(precision, result, convertGlslangToSpvType(node->getType())); + + return result[0]; +} + +spv::Id TGlslangToSpvTraverser::handleUserFunctionCall(const glslang::TIntermAggregate* node) +{ + // Grab the function's pointer from the previously created function + spv::Function* function = functionMap[node->getName().c_str()]; + if (! function) + return 0; + + const glslang::TIntermSequence& glslangArgs = node->getSequence(); + const glslang::TQualifierList& qualifiers = node->getQualifierList(); + + // See comments in makeFunctions() for details about the semantics for parameter passing. + // + // These imply we need a four step process: + // 1. Evaluate the arguments + // 2. Allocate and make copies of in, out, and inout arguments + // 3. Make the call + // 4. Copy back the results + + // 1. Evaluate the arguments and their types + std::vector lValues; + std::vector rValues; + std::vector argTypes; + for (int a = 0; a < (int)glslangArgs.size(); ++a) { + argTypes.push_back(&glslangArgs[a]->getAsTyped()->getType()); + // build l-value + builder.clearAccessChain(); + glslangArgs[a]->traverse(this); + // keep outputs and pass-by-originals as l-values, evaluate others as r-values + if (originalParam(qualifiers[a], *argTypes[a], function->hasImplicitThis() && a == 0) || + writableParam(qualifiers[a])) { + // save l-value + lValues.push_back(builder.getAccessChain()); + } else { + // process r-value + rValues.push_back(accessChainLoad(*argTypes.back())); + } + } + + // 2. Allocate space for anything needing a copy, and if it's "in" or "inout" + // copy the original into that space. + // + // Also, build up the list of actual arguments to pass in for the call + int lValueCount = 0; + int rValueCount = 0; + std::vector spvArgs; + for (int a = 0; a < (int)glslangArgs.size(); ++a) { + spv::Id arg; + if (originalParam(qualifiers[a], *argTypes[a], function->hasImplicitThis() && a == 0)) { + builder.setAccessChain(lValues[lValueCount]); + arg = builder.accessChainGetLValue(); + ++lValueCount; + } else if (writableParam(qualifiers[a])) { + // need space to hold the copy + arg = builder.createVariable(spv::StorageClassFunction, + builder.getContainedTypeId(function->getParamType(a)), "param"); + if (qualifiers[a] == glslang::EvqIn || qualifiers[a] == glslang::EvqInOut) { + // need to copy the input into output space + builder.setAccessChain(lValues[lValueCount]); + spv::Id copy = accessChainLoad(*argTypes[a]); + builder.clearAccessChain(); + builder.setAccessChainLValue(arg); + multiTypeStore(*argTypes[a], copy); + } + ++lValueCount; + } else { + // process r-value, which involves a copy for a type mismatch + if (function->getParamType(a) != convertGlslangToSpvType(*argTypes[a])) { + spv::Id argCopy = builder.createVariable(spv::StorageClassFunction, function->getParamType(a), "arg"); + builder.clearAccessChain(); + builder.setAccessChainLValue(argCopy); + multiTypeStore(*argTypes[a], rValues[rValueCount]); + arg = builder.createLoad(argCopy); + } else + arg = rValues[rValueCount]; + ++rValueCount; + } + spvArgs.push_back(arg); + } + + // 3. Make the call. + spv::Id result = builder.createFunctionCall(function, spvArgs); + builder.setPrecision(result, TranslatePrecisionDecoration(node->getType())); + + // 4. Copy back out an "out" arguments. + lValueCount = 0; + for (int a = 0; a < (int)glslangArgs.size(); ++a) { + if (originalParam(qualifiers[a], *argTypes[a], function->hasImplicitThis() && a == 0)) + ++lValueCount; + else if (writableParam(qualifiers[a])) { + if (qualifiers[a] == glslang::EvqOut || qualifiers[a] == glslang::EvqInOut) { + spv::Id copy = builder.createLoad(spvArgs[a]); + builder.setAccessChain(lValues[lValueCount]); + multiTypeStore(*argTypes[a], copy); + } + ++lValueCount; + } + } + + return result; +} + +// Translate AST operation to SPV operation, already having SPV-based operands/types. +spv::Id TGlslangToSpvTraverser::createBinaryOperation(glslang::TOperator op, OpDecorations& decorations, + spv::Id typeId, spv::Id left, spv::Id right, + glslang::TBasicType typeProxy, bool reduceComparison) +{ + bool isUnsigned = isTypeUnsignedInt(typeProxy); + bool isFloat = isTypeFloat(typeProxy); + bool isBool = typeProxy == glslang::EbtBool; + + spv::Op binOp = spv::OpNop; + bool needMatchingVectors = true; // for non-matrix ops, would a scalar need to smear to match a vector? + bool comparison = false; + + switch (op) { + case glslang::EOpAdd: + case glslang::EOpAddAssign: + if (isFloat) + binOp = spv::OpFAdd; + else + binOp = spv::OpIAdd; + break; + case glslang::EOpSub: + case glslang::EOpSubAssign: + if (isFloat) + binOp = spv::OpFSub; + else + binOp = spv::OpISub; + break; + case glslang::EOpMul: + case glslang::EOpMulAssign: + if (isFloat) + binOp = spv::OpFMul; + else + binOp = spv::OpIMul; + break; + case glslang::EOpVectorTimesScalar: + case glslang::EOpVectorTimesScalarAssign: + if (isFloat && (builder.isVector(left) || builder.isVector(right))) { + if (builder.isVector(right)) + std::swap(left, right); + assert(builder.isScalar(right)); + needMatchingVectors = false; + binOp = spv::OpVectorTimesScalar; + } else if (isFloat) + binOp = spv::OpFMul; + else + binOp = spv::OpIMul; + break; + case glslang::EOpVectorTimesMatrix: + case glslang::EOpVectorTimesMatrixAssign: + binOp = spv::OpVectorTimesMatrix; + break; + case glslang::EOpMatrixTimesVector: + binOp = spv::OpMatrixTimesVector; + break; + case glslang::EOpMatrixTimesScalar: + case glslang::EOpMatrixTimesScalarAssign: + binOp = spv::OpMatrixTimesScalar; + break; + case glslang::EOpMatrixTimesMatrix: + case glslang::EOpMatrixTimesMatrixAssign: + binOp = spv::OpMatrixTimesMatrix; + break; + case glslang::EOpOuterProduct: + binOp = spv::OpOuterProduct; + needMatchingVectors = false; + break; + + case glslang::EOpDiv: + case glslang::EOpDivAssign: + if (isFloat) + binOp = spv::OpFDiv; + else if (isUnsigned) + binOp = spv::OpUDiv; + else + binOp = spv::OpSDiv; + break; + case glslang::EOpMod: + case glslang::EOpModAssign: + if (isFloat) + binOp = spv::OpFMod; + else if (isUnsigned) + binOp = spv::OpUMod; + else + binOp = spv::OpSMod; + break; + case glslang::EOpRightShift: + case glslang::EOpRightShiftAssign: + if (isUnsigned) + binOp = spv::OpShiftRightLogical; + else + binOp = spv::OpShiftRightArithmetic; + break; + case glslang::EOpLeftShift: + case glslang::EOpLeftShiftAssign: + binOp = spv::OpShiftLeftLogical; + break; + case glslang::EOpAnd: + case glslang::EOpAndAssign: + binOp = spv::OpBitwiseAnd; + break; + case glslang::EOpLogicalAnd: + needMatchingVectors = false; + binOp = spv::OpLogicalAnd; + break; + case glslang::EOpInclusiveOr: + case glslang::EOpInclusiveOrAssign: + binOp = spv::OpBitwiseOr; + break; + case glslang::EOpLogicalOr: + needMatchingVectors = false; + binOp = spv::OpLogicalOr; + break; + case glslang::EOpExclusiveOr: + case glslang::EOpExclusiveOrAssign: + binOp = spv::OpBitwiseXor; + break; + case glslang::EOpLogicalXor: + needMatchingVectors = false; + binOp = spv::OpLogicalNotEqual; + break; + + case glslang::EOpAbsDifference: + binOp = isUnsigned ? spv::OpAbsUSubINTEL : spv::OpAbsISubINTEL; + break; + + case glslang::EOpAddSaturate: + binOp = isUnsigned ? spv::OpUAddSatINTEL : spv::OpIAddSatINTEL; + break; + + case glslang::EOpSubSaturate: + binOp = isUnsigned ? spv::OpUSubSatINTEL : spv::OpISubSatINTEL; + break; + + case glslang::EOpAverage: + binOp = isUnsigned ? spv::OpUAverageINTEL : spv::OpIAverageINTEL; + break; + + case glslang::EOpAverageRounded: + binOp = isUnsigned ? spv::OpUAverageRoundedINTEL : spv::OpIAverageRoundedINTEL; + break; + + case glslang::EOpMul32x16: + binOp = isUnsigned ? spv::OpUMul32x16INTEL : spv::OpIMul32x16INTEL; + break; + + case glslang::EOpLessThan: + case glslang::EOpGreaterThan: + case glslang::EOpLessThanEqual: + case glslang::EOpGreaterThanEqual: + case glslang::EOpEqual: + case glslang::EOpNotEqual: + case glslang::EOpVectorEqual: + case glslang::EOpVectorNotEqual: + comparison = true; + break; + default: + break; + } + + // handle mapped binary operations (should be non-comparison) + if (binOp != spv::OpNop) { + assert(comparison == false); + if (builder.isMatrix(left) || builder.isMatrix(right) || + builder.isCooperativeMatrix(left) || builder.isCooperativeMatrix(right)) + return createBinaryMatrixOperation(binOp, decorations, typeId, left, right); + + // No matrix involved; make both operands be the same number of components, if needed + if (needMatchingVectors) + builder.promoteScalar(decorations.precision, left, right); + + spv::Id result = builder.createBinOp(binOp, typeId, left, right); + decorations.addNoContraction(builder, result); + decorations.addNonUniform(builder, result); + return builder.setPrecision(result, decorations.precision); + } + + if (! comparison) + return 0; + + // Handle comparison instructions + + if (reduceComparison && (op == glslang::EOpEqual || op == glslang::EOpNotEqual) + && (builder.isVector(left) || builder.isMatrix(left) || builder.isAggregate(left))) { + spv::Id result = builder.createCompositeCompare(decorations.precision, left, right, op == glslang::EOpEqual); + decorations.addNonUniform(builder, result); + return result; + } + + switch (op) { + case glslang::EOpLessThan: + if (isFloat) + binOp = spv::OpFOrdLessThan; + else if (isUnsigned) + binOp = spv::OpULessThan; + else + binOp = spv::OpSLessThan; + break; + case glslang::EOpGreaterThan: + if (isFloat) + binOp = spv::OpFOrdGreaterThan; + else if (isUnsigned) + binOp = spv::OpUGreaterThan; + else + binOp = spv::OpSGreaterThan; + break; + case glslang::EOpLessThanEqual: + if (isFloat) + binOp = spv::OpFOrdLessThanEqual; + else if (isUnsigned) + binOp = spv::OpULessThanEqual; + else + binOp = spv::OpSLessThanEqual; + break; + case glslang::EOpGreaterThanEqual: + if (isFloat) + binOp = spv::OpFOrdGreaterThanEqual; + else if (isUnsigned) + binOp = spv::OpUGreaterThanEqual; + else + binOp = spv::OpSGreaterThanEqual; + break; + case glslang::EOpEqual: + case glslang::EOpVectorEqual: + if (isFloat) + binOp = spv::OpFOrdEqual; + else if (isBool) + binOp = spv::OpLogicalEqual; + else + binOp = spv::OpIEqual; + break; + case glslang::EOpNotEqual: + case glslang::EOpVectorNotEqual: + if (isFloat) + binOp = spv::OpFOrdNotEqual; + else if (isBool) + binOp = spv::OpLogicalNotEqual; + else + binOp = spv::OpINotEqual; + break; + default: + break; + } + + if (binOp != spv::OpNop) { + spv::Id result = builder.createBinOp(binOp, typeId, left, right); + decorations.addNoContraction(builder, result); + decorations.addNonUniform(builder, result); + return builder.setPrecision(result, decorations.precision); + } + + return 0; +} + +// +// Translate AST matrix operation to SPV operation, already having SPV-based operands/types. +// These can be any of: +// +// matrix * scalar +// scalar * matrix +// matrix * matrix linear algebraic +// matrix * vector +// vector * matrix +// matrix * matrix componentwise +// matrix op matrix op in {+, -, /} +// matrix op scalar op in {+, -, /} +// scalar op matrix op in {+, -, /} +// +spv::Id TGlslangToSpvTraverser::createBinaryMatrixOperation(spv::Op op, OpDecorations& decorations, spv::Id typeId, + spv::Id left, spv::Id right) +{ + bool firstClass = true; + + // First, handle first-class matrix operations (* and matrix/scalar) + switch (op) { + case spv::OpFDiv: + if (builder.isMatrix(left) && builder.isScalar(right)) { + // turn matrix / scalar into a multiply... + spv::Id resultType = builder.getTypeId(right); + right = builder.createBinOp(spv::OpFDiv, resultType, builder.makeFpConstant(resultType, 1.0), right); + op = spv::OpMatrixTimesScalar; + } else + firstClass = false; + break; + case spv::OpMatrixTimesScalar: + if (builder.isMatrix(right) || builder.isCooperativeMatrix(right)) + std::swap(left, right); + assert(builder.isScalar(right)); + break; + case spv::OpVectorTimesMatrix: + assert(builder.isVector(left)); + assert(builder.isMatrix(right)); + break; + case spv::OpMatrixTimesVector: + assert(builder.isMatrix(left)); + assert(builder.isVector(right)); + break; + case spv::OpMatrixTimesMatrix: + assert(builder.isMatrix(left)); + assert(builder.isMatrix(right)); + break; + default: + firstClass = false; + break; + } + + if (builder.isCooperativeMatrix(left) || builder.isCooperativeMatrix(right)) + firstClass = true; + + if (firstClass) { + spv::Id result = builder.createBinOp(op, typeId, left, right); + decorations.addNoContraction(builder, result); + decorations.addNonUniform(builder, result); + return builder.setPrecision(result, decorations.precision); + } + + // Handle component-wise +, -, *, %, and / for all combinations of type. + // The result type of all of them is the same type as the (a) matrix operand. + // The algorithm is to: + // - break the matrix(es) into vectors + // - smear any scalar to a vector + // - do vector operations + // - make a matrix out the vector results + switch (op) { + case spv::OpFAdd: + case spv::OpFSub: + case spv::OpFDiv: + case spv::OpFMod: + case spv::OpFMul: + { + // one time set up... + bool leftMat = builder.isMatrix(left); + bool rightMat = builder.isMatrix(right); + unsigned int numCols = leftMat ? builder.getNumColumns(left) : builder.getNumColumns(right); + int numRows = leftMat ? builder.getNumRows(left) : builder.getNumRows(right); + spv::Id scalarType = builder.getScalarTypeId(typeId); + spv::Id vecType = builder.makeVectorType(scalarType, numRows); + std::vector results; + spv::Id smearVec = spv::NoResult; + if (builder.isScalar(left)) + smearVec = builder.smearScalar(decorations.precision, left, vecType); + else if (builder.isScalar(right)) + smearVec = builder.smearScalar(decorations.precision, right, vecType); + + // do each vector op + for (unsigned int c = 0; c < numCols; ++c) { + std::vector indexes; + indexes.push_back(c); + spv::Id leftVec = leftMat ? builder.createCompositeExtract( left, vecType, indexes) : smearVec; + spv::Id rightVec = rightMat ? builder.createCompositeExtract(right, vecType, indexes) : smearVec; + spv::Id result = builder.createBinOp(op, vecType, leftVec, rightVec); + decorations.addNoContraction(builder, result); + decorations.addNonUniform(builder, result); + results.push_back(builder.setPrecision(result, decorations.precision)); + } + + // put the pieces together + spv::Id result = builder.setPrecision(builder.createCompositeConstruct(typeId, results), decorations.precision); + decorations.addNonUniform(builder, result); + return result; + } + default: + assert(0); + return spv::NoResult; + } +} + +spv::Id TGlslangToSpvTraverser::createUnaryOperation(glslang::TOperator op, OpDecorations& decorations, spv::Id typeId, + spv::Id operand, glslang::TBasicType typeProxy, const spv::Builder::AccessChain::CoherentFlags &lvalueCoherentFlags) +{ + spv::Op unaryOp = spv::OpNop; + int extBuiltins = -1; + int libCall = -1; + bool isUnsigned = isTypeUnsignedInt(typeProxy); + bool isFloat = isTypeFloat(typeProxy); + + switch (op) { + case glslang::EOpNegative: + if (isFloat) { + unaryOp = spv::OpFNegate; + if (builder.isMatrixType(typeId)) + return createUnaryMatrixOperation(unaryOp, decorations, typeId, operand, typeProxy); + } else + unaryOp = spv::OpSNegate; + break; + + case glslang::EOpLogicalNot: + case glslang::EOpVectorLogicalNot: + unaryOp = spv::OpLogicalNot; + break; + case glslang::EOpBitwiseNot: + unaryOp = spv::OpNot; + break; + + case glslang::EOpDeterminant: + libCall = spv::GLSLstd450Determinant; + break; + case glslang::EOpMatrixInverse: + libCall = spv::GLSLstd450MatrixInverse; + break; + case glslang::EOpTranspose: + unaryOp = spv::OpTranspose; + break; + + case glslang::EOpRadians: + libCall = spv::GLSLstd450Radians; + break; + case glslang::EOpDegrees: + libCall = spv::GLSLstd450Degrees; + break; + case glslang::EOpSin: + libCall = spv::GLSLstd450Sin; + break; + case glslang::EOpCos: + libCall = spv::GLSLstd450Cos; + break; + case glslang::EOpTan: + libCall = spv::GLSLstd450Tan; + break; + case glslang::EOpAcos: + libCall = spv::GLSLstd450Acos; + break; + case glslang::EOpAsin: + libCall = spv::GLSLstd450Asin; + break; + case glslang::EOpAtan: + libCall = spv::GLSLstd450Atan; + break; + + case glslang::EOpAcosh: + libCall = spv::GLSLstd450Acosh; + break; + case glslang::EOpAsinh: + libCall = spv::GLSLstd450Asinh; + break; + case glslang::EOpAtanh: + libCall = spv::GLSLstd450Atanh; + break; + case glslang::EOpTanh: + libCall = spv::GLSLstd450Tanh; + break; + case glslang::EOpCosh: + libCall = spv::GLSLstd450Cosh; + break; + case glslang::EOpSinh: + libCall = spv::GLSLstd450Sinh; + break; + + case glslang::EOpLength: + libCall = spv::GLSLstd450Length; + break; + case glslang::EOpNormalize: + libCall = spv::GLSLstd450Normalize; + break; + + case glslang::EOpExp: + libCall = spv::GLSLstd450Exp; + break; + case glslang::EOpLog: + libCall = spv::GLSLstd450Log; + break; + case glslang::EOpExp2: + libCall = spv::GLSLstd450Exp2; + break; + case glslang::EOpLog2: + libCall = spv::GLSLstd450Log2; + break; + case glslang::EOpSqrt: + libCall = spv::GLSLstd450Sqrt; + break; + case glslang::EOpInverseSqrt: + libCall = spv::GLSLstd450InverseSqrt; + break; + + case glslang::EOpFloor: + libCall = spv::GLSLstd450Floor; + break; + case glslang::EOpTrunc: + libCall = spv::GLSLstd450Trunc; + break; + case glslang::EOpRound: + libCall = spv::GLSLstd450Round; + break; + case glslang::EOpRoundEven: + libCall = spv::GLSLstd450RoundEven; + break; + case glslang::EOpCeil: + libCall = spv::GLSLstd450Ceil; + break; + case glslang::EOpFract: + libCall = spv::GLSLstd450Fract; + break; + + case glslang::EOpIsNan: + unaryOp = spv::OpIsNan; + break; + case glslang::EOpIsInf: + unaryOp = spv::OpIsInf; + break; + case glslang::EOpIsFinite: + unaryOp = spv::OpIsFinite; + break; + + case glslang::EOpFloatBitsToInt: + case glslang::EOpFloatBitsToUint: + case glslang::EOpIntBitsToFloat: + case glslang::EOpUintBitsToFloat: + case glslang::EOpDoubleBitsToInt64: + case glslang::EOpDoubleBitsToUint64: + case glslang::EOpInt64BitsToDouble: + case glslang::EOpUint64BitsToDouble: + case glslang::EOpFloat16BitsToInt16: + case glslang::EOpFloat16BitsToUint16: + case glslang::EOpInt16BitsToFloat16: + case glslang::EOpUint16BitsToFloat16: + unaryOp = spv::OpBitcast; + break; + + case glslang::EOpPackSnorm2x16: + libCall = spv::GLSLstd450PackSnorm2x16; + break; + case glslang::EOpUnpackSnorm2x16: + libCall = spv::GLSLstd450UnpackSnorm2x16; + break; + case glslang::EOpPackUnorm2x16: + libCall = spv::GLSLstd450PackUnorm2x16; + break; + case glslang::EOpUnpackUnorm2x16: + libCall = spv::GLSLstd450UnpackUnorm2x16; + break; + case glslang::EOpPackHalf2x16: + libCall = spv::GLSLstd450PackHalf2x16; + break; + case glslang::EOpUnpackHalf2x16: + libCall = spv::GLSLstd450UnpackHalf2x16; + break; +#ifndef GLSLANG_WEB + case glslang::EOpPackSnorm4x8: + libCall = spv::GLSLstd450PackSnorm4x8; + break; + case glslang::EOpUnpackSnorm4x8: + libCall = spv::GLSLstd450UnpackSnorm4x8; + break; + case glslang::EOpPackUnorm4x8: + libCall = spv::GLSLstd450PackUnorm4x8; + break; + case glslang::EOpUnpackUnorm4x8: + libCall = spv::GLSLstd450UnpackUnorm4x8; + break; + case glslang::EOpPackDouble2x32: + libCall = spv::GLSLstd450PackDouble2x32; + break; + case glslang::EOpUnpackDouble2x32: + libCall = spv::GLSLstd450UnpackDouble2x32; + break; +#endif + + case glslang::EOpPackInt2x32: + case glslang::EOpUnpackInt2x32: + case glslang::EOpPackUint2x32: + case glslang::EOpUnpackUint2x32: + case glslang::EOpPack16: + case glslang::EOpPack32: + case glslang::EOpPack64: + case glslang::EOpUnpack32: + case glslang::EOpUnpack16: + case glslang::EOpUnpack8: + case glslang::EOpPackInt2x16: + case glslang::EOpUnpackInt2x16: + case glslang::EOpPackUint2x16: + case glslang::EOpUnpackUint2x16: + case glslang::EOpPackInt4x16: + case glslang::EOpUnpackInt4x16: + case glslang::EOpPackUint4x16: + case glslang::EOpUnpackUint4x16: + case glslang::EOpPackFloat2x16: + case glslang::EOpUnpackFloat2x16: + unaryOp = spv::OpBitcast; + break; + + case glslang::EOpDPdx: + unaryOp = spv::OpDPdx; + break; + case glslang::EOpDPdy: + unaryOp = spv::OpDPdy; + break; + case glslang::EOpFwidth: + unaryOp = spv::OpFwidth; + break; + + case glslang::EOpAny: + unaryOp = spv::OpAny; + break; + case glslang::EOpAll: + unaryOp = spv::OpAll; + break; + + case glslang::EOpAbs: + if (isFloat) + libCall = spv::GLSLstd450FAbs; + else + libCall = spv::GLSLstd450SAbs; + break; + case glslang::EOpSign: + if (isFloat) + libCall = spv::GLSLstd450FSign; + else + libCall = spv::GLSLstd450SSign; + break; + +#ifndef GLSLANG_WEB + case glslang::EOpDPdxFine: + unaryOp = spv::OpDPdxFine; + break; + case glslang::EOpDPdyFine: + unaryOp = spv::OpDPdyFine; + break; + case glslang::EOpFwidthFine: + unaryOp = spv::OpFwidthFine; + break; + case glslang::EOpDPdxCoarse: + unaryOp = spv::OpDPdxCoarse; + break; + case glslang::EOpDPdyCoarse: + unaryOp = spv::OpDPdyCoarse; + break; + case glslang::EOpFwidthCoarse: + unaryOp = spv::OpFwidthCoarse; + break; + case glslang::EOpRayQueryProceed: + unaryOp = spv::OpRayQueryProceedKHR; + break; + case glslang::EOpRayQueryGetRayTMin: + unaryOp = spv::OpRayQueryGetRayTMinKHR; + break; + case glslang::EOpRayQueryGetRayFlags: + unaryOp = spv::OpRayQueryGetRayFlagsKHR; + break; + case glslang::EOpRayQueryGetWorldRayOrigin: + unaryOp = spv::OpRayQueryGetWorldRayOriginKHR; + break; + case glslang::EOpRayQueryGetWorldRayDirection: + unaryOp = spv::OpRayQueryGetWorldRayDirectionKHR; + break; + case glslang::EOpRayQueryGetIntersectionCandidateAABBOpaque: + unaryOp = spv::OpRayQueryGetIntersectionCandidateAABBOpaqueKHR; + break; + case glslang::EOpInterpolateAtCentroid: + if (typeProxy == glslang::EbtFloat16) + builder.addExtension(spv::E_SPV_AMD_gpu_shader_half_float); + libCall = spv::GLSLstd450InterpolateAtCentroid; + break; + case glslang::EOpAtomicCounterIncrement: + case glslang::EOpAtomicCounterDecrement: + case glslang::EOpAtomicCounter: + { + // Handle all of the atomics in one place, in createAtomicOperation() + std::vector operands; + operands.push_back(operand); + return createAtomicOperation(op, decorations.precision, typeId, operands, typeProxy, lvalueCoherentFlags); + } + + case glslang::EOpBitFieldReverse: + unaryOp = spv::OpBitReverse; + break; + case glslang::EOpBitCount: + unaryOp = spv::OpBitCount; + break; + case glslang::EOpFindLSB: + libCall = spv::GLSLstd450FindILsb; + break; + case glslang::EOpFindMSB: + if (isUnsigned) + libCall = spv::GLSLstd450FindUMsb; + else + libCall = spv::GLSLstd450FindSMsb; + break; + + case glslang::EOpCountLeadingZeros: + builder.addCapability(spv::CapabilityIntegerFunctions2INTEL); + builder.addExtension("SPV_INTEL_shader_integer_functions2"); + unaryOp = spv::OpUCountLeadingZerosINTEL; + break; + + case glslang::EOpCountTrailingZeros: + builder.addCapability(spv::CapabilityIntegerFunctions2INTEL); + builder.addExtension("SPV_INTEL_shader_integer_functions2"); + unaryOp = spv::OpUCountTrailingZerosINTEL; + break; + + case glslang::EOpBallot: + case glslang::EOpReadFirstInvocation: + case glslang::EOpAnyInvocation: + case glslang::EOpAllInvocations: + case glslang::EOpAllInvocationsEqual: + case glslang::EOpMinInvocations: + case glslang::EOpMaxInvocations: + case glslang::EOpAddInvocations: + case glslang::EOpMinInvocationsNonUniform: + case glslang::EOpMaxInvocationsNonUniform: + case glslang::EOpAddInvocationsNonUniform: + case glslang::EOpMinInvocationsInclusiveScan: + case glslang::EOpMaxInvocationsInclusiveScan: + case glslang::EOpAddInvocationsInclusiveScan: + case glslang::EOpMinInvocationsInclusiveScanNonUniform: + case glslang::EOpMaxInvocationsInclusiveScanNonUniform: + case glslang::EOpAddInvocationsInclusiveScanNonUniform: + case glslang::EOpMinInvocationsExclusiveScan: + case glslang::EOpMaxInvocationsExclusiveScan: + case glslang::EOpAddInvocationsExclusiveScan: + case glslang::EOpMinInvocationsExclusiveScanNonUniform: + case glslang::EOpMaxInvocationsExclusiveScanNonUniform: + case glslang::EOpAddInvocationsExclusiveScanNonUniform: + { + std::vector operands; + operands.push_back(operand); + return createInvocationsOperation(op, typeId, operands, typeProxy); + } + case glslang::EOpSubgroupAll: + case glslang::EOpSubgroupAny: + case glslang::EOpSubgroupAllEqual: + case glslang::EOpSubgroupBroadcastFirst: + case glslang::EOpSubgroupBallot: + case glslang::EOpSubgroupInverseBallot: + case glslang::EOpSubgroupBallotBitCount: + case glslang::EOpSubgroupBallotInclusiveBitCount: + case glslang::EOpSubgroupBallotExclusiveBitCount: + case glslang::EOpSubgroupBallotFindLSB: + case glslang::EOpSubgroupBallotFindMSB: + case glslang::EOpSubgroupAdd: + case glslang::EOpSubgroupMul: + case glslang::EOpSubgroupMin: + case glslang::EOpSubgroupMax: + case glslang::EOpSubgroupAnd: + case glslang::EOpSubgroupOr: + case glslang::EOpSubgroupXor: + case glslang::EOpSubgroupInclusiveAdd: + case glslang::EOpSubgroupInclusiveMul: + case glslang::EOpSubgroupInclusiveMin: + case glslang::EOpSubgroupInclusiveMax: + case glslang::EOpSubgroupInclusiveAnd: + case glslang::EOpSubgroupInclusiveOr: + case glslang::EOpSubgroupInclusiveXor: + case glslang::EOpSubgroupExclusiveAdd: + case glslang::EOpSubgroupExclusiveMul: + case glslang::EOpSubgroupExclusiveMin: + case glslang::EOpSubgroupExclusiveMax: + case glslang::EOpSubgroupExclusiveAnd: + case glslang::EOpSubgroupExclusiveOr: + case glslang::EOpSubgroupExclusiveXor: + case glslang::EOpSubgroupQuadSwapHorizontal: + case glslang::EOpSubgroupQuadSwapVertical: + case glslang::EOpSubgroupQuadSwapDiagonal: { + std::vector operands; + operands.push_back(operand); + return createSubgroupOperation(op, typeId, operands, typeProxy); + } + case glslang::EOpMbcnt: + extBuiltins = getExtBuiltins(spv::E_SPV_AMD_shader_ballot); + libCall = spv::MbcntAMD; + break; + + case glslang::EOpCubeFaceIndex: + extBuiltins = getExtBuiltins(spv::E_SPV_AMD_gcn_shader); + libCall = spv::CubeFaceIndexAMD; + break; + + case glslang::EOpCubeFaceCoord: + extBuiltins = getExtBuiltins(spv::E_SPV_AMD_gcn_shader); + libCall = spv::CubeFaceCoordAMD; + break; + case glslang::EOpSubgroupPartition: + unaryOp = spv::OpGroupNonUniformPartitionNV; + break; + case glslang::EOpConstructReference: + unaryOp = spv::OpBitcast; + break; +#endif + + case glslang::EOpCopyObject: + unaryOp = spv::OpCopyObject; + break; + + default: + return 0; + } + + spv::Id id; + if (libCall >= 0) { + std::vector args; + args.push_back(operand); + id = builder.createBuiltinCall(typeId, extBuiltins >= 0 ? extBuiltins : stdBuiltins, libCall, args); + } else { + id = builder.createUnaryOp(unaryOp, typeId, operand); + } + + decorations.addNoContraction(builder, id); + decorations.addNonUniform(builder, id); + return builder.setPrecision(id, decorations.precision); +} + +// Create a unary operation on a matrix +spv::Id TGlslangToSpvTraverser::createUnaryMatrixOperation(spv::Op op, OpDecorations& decorations, spv::Id typeId, + spv::Id operand, glslang::TBasicType /* typeProxy */) +{ + // Handle unary operations vector by vector. + // The result type is the same type as the original type. + // The algorithm is to: + // - break the matrix into vectors + // - apply the operation to each vector + // - make a matrix out the vector results + + // get the types sorted out + int numCols = builder.getNumColumns(operand); + int numRows = builder.getNumRows(operand); + spv::Id srcVecType = builder.makeVectorType(builder.getScalarTypeId(builder.getTypeId(operand)), numRows); + spv::Id destVecType = builder.makeVectorType(builder.getScalarTypeId(typeId), numRows); + std::vector results; + + // do each vector op + for (int c = 0; c < numCols; ++c) { + std::vector indexes; + indexes.push_back(c); + spv::Id srcVec = builder.createCompositeExtract(operand, srcVecType, indexes); + spv::Id destVec = builder.createUnaryOp(op, destVecType, srcVec); + decorations.addNoContraction(builder, destVec); + decorations.addNonUniform(builder, destVec); + results.push_back(builder.setPrecision(destVec, decorations.precision)); + } + + // put the pieces together + spv::Id result = builder.setPrecision(builder.createCompositeConstruct(typeId, results), decorations.precision); + decorations.addNonUniform(builder, result); + return result; +} + +// For converting integers where both the bitwidth and the signedness could +// change, but only do the width change here. The caller is still responsible +// for the signedness conversion. +spv::Id TGlslangToSpvTraverser::createIntWidthConversion(glslang::TOperator op, spv::Id operand, int vectorSize) +{ + // Get the result type width, based on the type to convert to. + int width = 32; + switch(op) { + case glslang::EOpConvInt16ToUint8: + case glslang::EOpConvIntToUint8: + case glslang::EOpConvInt64ToUint8: + case glslang::EOpConvUint16ToInt8: + case glslang::EOpConvUintToInt8: + case glslang::EOpConvUint64ToInt8: + width = 8; + break; + case glslang::EOpConvInt8ToUint16: + case glslang::EOpConvIntToUint16: + case glslang::EOpConvInt64ToUint16: + case glslang::EOpConvUint8ToInt16: + case glslang::EOpConvUintToInt16: + case glslang::EOpConvUint64ToInt16: + width = 16; + break; + case glslang::EOpConvInt8ToUint: + case glslang::EOpConvInt16ToUint: + case glslang::EOpConvInt64ToUint: + case glslang::EOpConvUint8ToInt: + case glslang::EOpConvUint16ToInt: + case glslang::EOpConvUint64ToInt: + width = 32; + break; + case glslang::EOpConvInt8ToUint64: + case glslang::EOpConvInt16ToUint64: + case glslang::EOpConvIntToUint64: + case glslang::EOpConvUint8ToInt64: + case glslang::EOpConvUint16ToInt64: + case glslang::EOpConvUintToInt64: + width = 64; + break; + + default: + assert(false && "Default missing"); + break; + } + + // Get the conversion operation and result type, + // based on the target width, but the source type. + spv::Id type = spv::NoType; + spv::Op convOp = spv::OpNop; + switch(op) { + case glslang::EOpConvInt8ToUint16: + case glslang::EOpConvInt8ToUint: + case glslang::EOpConvInt8ToUint64: + case glslang::EOpConvInt16ToUint8: + case glslang::EOpConvInt16ToUint: + case glslang::EOpConvInt16ToUint64: + case glslang::EOpConvIntToUint8: + case glslang::EOpConvIntToUint16: + case glslang::EOpConvIntToUint64: + case glslang::EOpConvInt64ToUint8: + case glslang::EOpConvInt64ToUint16: + case glslang::EOpConvInt64ToUint: + convOp = spv::OpSConvert; + type = builder.makeIntType(width); + break; + default: + convOp = spv::OpUConvert; + type = builder.makeUintType(width); + break; + } + + if (vectorSize > 0) + type = builder.makeVectorType(type, vectorSize); + + return builder.createUnaryOp(convOp, type, operand); +} + +spv::Id TGlslangToSpvTraverser::createConversion(glslang::TOperator op, OpDecorations& decorations, spv::Id destType, + spv::Id operand, glslang::TBasicType typeProxy) +{ + spv::Op convOp = spv::OpNop; + spv::Id zero = 0; + spv::Id one = 0; + + int vectorSize = builder.isVectorType(destType) ? builder.getNumTypeComponents(destType) : 0; + + switch (op) { + case glslang::EOpConvIntToBool: + case glslang::EOpConvUintToBool: + zero = builder.makeUintConstant(0); + zero = makeSmearedConstant(zero, vectorSize); + return builder.createBinOp(spv::OpINotEqual, destType, operand, zero); + case glslang::EOpConvFloatToBool: + zero = builder.makeFloatConstant(0.0F); + zero = makeSmearedConstant(zero, vectorSize); + return builder.createBinOp(spv::OpFOrdNotEqual, destType, operand, zero); + case glslang::EOpConvBoolToFloat: + convOp = spv::OpSelect; + zero = builder.makeFloatConstant(0.0F); + one = builder.makeFloatConstant(1.0F); + break; + + case glslang::EOpConvBoolToInt: + case glslang::EOpConvBoolToInt64: +#ifndef GLSLANG_WEB + if (op == glslang::EOpConvBoolToInt64) { + zero = builder.makeInt64Constant(0); + one = builder.makeInt64Constant(1); + } else +#endif + { + zero = builder.makeIntConstant(0); + one = builder.makeIntConstant(1); + } + + convOp = spv::OpSelect; + break; + + case glslang::EOpConvBoolToUint: + case glslang::EOpConvBoolToUint64: +#ifndef GLSLANG_WEB + if (op == glslang::EOpConvBoolToUint64) { + zero = builder.makeUint64Constant(0); + one = builder.makeUint64Constant(1); + } else +#endif + { + zero = builder.makeUintConstant(0); + one = builder.makeUintConstant(1); + } + + convOp = spv::OpSelect; + break; + + case glslang::EOpConvInt8ToFloat16: + case glslang::EOpConvInt8ToFloat: + case glslang::EOpConvInt8ToDouble: + case glslang::EOpConvInt16ToFloat16: + case glslang::EOpConvInt16ToFloat: + case glslang::EOpConvInt16ToDouble: + case glslang::EOpConvIntToFloat16: + case glslang::EOpConvIntToFloat: + case glslang::EOpConvIntToDouble: + case glslang::EOpConvInt64ToFloat: + case glslang::EOpConvInt64ToDouble: + case glslang::EOpConvInt64ToFloat16: + convOp = spv::OpConvertSToF; + break; + + case glslang::EOpConvUint8ToFloat16: + case glslang::EOpConvUint8ToFloat: + case glslang::EOpConvUint8ToDouble: + case glslang::EOpConvUint16ToFloat16: + case glslang::EOpConvUint16ToFloat: + case glslang::EOpConvUint16ToDouble: + case glslang::EOpConvUintToFloat16: + case glslang::EOpConvUintToFloat: + case glslang::EOpConvUintToDouble: + case glslang::EOpConvUint64ToFloat: + case glslang::EOpConvUint64ToDouble: + case glslang::EOpConvUint64ToFloat16: + convOp = spv::OpConvertUToF; + break; + + case glslang::EOpConvFloat16ToInt8: + case glslang::EOpConvFloatToInt8: + case glslang::EOpConvDoubleToInt8: + case glslang::EOpConvFloat16ToInt16: + case glslang::EOpConvFloatToInt16: + case glslang::EOpConvDoubleToInt16: + case glslang::EOpConvFloat16ToInt: + case glslang::EOpConvFloatToInt: + case glslang::EOpConvDoubleToInt: + case glslang::EOpConvFloat16ToInt64: + case glslang::EOpConvFloatToInt64: + case glslang::EOpConvDoubleToInt64: + convOp = spv::OpConvertFToS; + break; + + case glslang::EOpConvUint8ToInt8: + case glslang::EOpConvInt8ToUint8: + case glslang::EOpConvUint16ToInt16: + case glslang::EOpConvInt16ToUint16: + case glslang::EOpConvUintToInt: + case glslang::EOpConvIntToUint: + case glslang::EOpConvUint64ToInt64: + case glslang::EOpConvInt64ToUint64: + if (builder.isInSpecConstCodeGenMode()) { + // Build zero scalar or vector for OpIAdd. +#ifndef GLSLANG_WEB + if(op == glslang::EOpConvUint8ToInt8 || op == glslang::EOpConvInt8ToUint8) { + zero = builder.makeUint8Constant(0); + } else if (op == glslang::EOpConvUint16ToInt16 || op == glslang::EOpConvInt16ToUint16) { + zero = builder.makeUint16Constant(0); + } else if (op == glslang::EOpConvUint64ToInt64 || op == glslang::EOpConvInt64ToUint64) { + zero = builder.makeUint64Constant(0); + } else +#endif + { + zero = builder.makeUintConstant(0); + } + zero = makeSmearedConstant(zero, vectorSize); + // Use OpIAdd, instead of OpBitcast to do the conversion when + // generating for OpSpecConstantOp instruction. + return builder.createBinOp(spv::OpIAdd, destType, operand, zero); + } + // For normal run-time conversion instruction, use OpBitcast. + convOp = spv::OpBitcast; + break; + + case glslang::EOpConvFloat16ToUint8: + case glslang::EOpConvFloatToUint8: + case glslang::EOpConvDoubleToUint8: + case glslang::EOpConvFloat16ToUint16: + case glslang::EOpConvFloatToUint16: + case glslang::EOpConvDoubleToUint16: + case glslang::EOpConvFloat16ToUint: + case glslang::EOpConvFloatToUint: + case glslang::EOpConvDoubleToUint: + case glslang::EOpConvFloatToUint64: + case glslang::EOpConvDoubleToUint64: + case glslang::EOpConvFloat16ToUint64: + convOp = spv::OpConvertFToU; + break; + +#ifndef GLSLANG_WEB + case glslang::EOpConvInt8ToBool: + case glslang::EOpConvUint8ToBool: + zero = builder.makeUint8Constant(0); + zero = makeSmearedConstant(zero, vectorSize); + return builder.createBinOp(spv::OpINotEqual, destType, operand, zero); + case glslang::EOpConvInt16ToBool: + case glslang::EOpConvUint16ToBool: + zero = builder.makeUint16Constant(0); + zero = makeSmearedConstant(zero, vectorSize); + return builder.createBinOp(spv::OpINotEqual, destType, operand, zero); + case glslang::EOpConvInt64ToBool: + case glslang::EOpConvUint64ToBool: + zero = builder.makeUint64Constant(0); + zero = makeSmearedConstant(zero, vectorSize); + return builder.createBinOp(spv::OpINotEqual, destType, operand, zero); + case glslang::EOpConvDoubleToBool: + zero = builder.makeDoubleConstant(0.0); + zero = makeSmearedConstant(zero, vectorSize); + return builder.createBinOp(spv::OpFOrdNotEqual, destType, operand, zero); + case glslang::EOpConvFloat16ToBool: + zero = builder.makeFloat16Constant(0.0F); + zero = makeSmearedConstant(zero, vectorSize); + return builder.createBinOp(spv::OpFOrdNotEqual, destType, operand, zero); + case glslang::EOpConvBoolToDouble: + convOp = spv::OpSelect; + zero = builder.makeDoubleConstant(0.0); + one = builder.makeDoubleConstant(1.0); + break; + case glslang::EOpConvBoolToFloat16: + convOp = spv::OpSelect; + zero = builder.makeFloat16Constant(0.0F); + one = builder.makeFloat16Constant(1.0F); + break; + case glslang::EOpConvBoolToInt8: + zero = builder.makeInt8Constant(0); + one = builder.makeInt8Constant(1); + convOp = spv::OpSelect; + break; + case glslang::EOpConvBoolToUint8: + zero = builder.makeUint8Constant(0); + one = builder.makeUint8Constant(1); + convOp = spv::OpSelect; + break; + case glslang::EOpConvBoolToInt16: + zero = builder.makeInt16Constant(0); + one = builder.makeInt16Constant(1); + convOp = spv::OpSelect; + break; + case glslang::EOpConvBoolToUint16: + zero = builder.makeUint16Constant(0); + one = builder.makeUint16Constant(1); + convOp = spv::OpSelect; + break; + case glslang::EOpConvDoubleToFloat: + case glslang::EOpConvFloatToDouble: + case glslang::EOpConvDoubleToFloat16: + case glslang::EOpConvFloat16ToDouble: + case glslang::EOpConvFloatToFloat16: + case glslang::EOpConvFloat16ToFloat: + convOp = spv::OpFConvert; + if (builder.isMatrixType(destType)) + return createUnaryMatrixOperation(convOp, decorations, destType, operand, typeProxy); + break; + + case glslang::EOpConvInt8ToInt16: + case glslang::EOpConvInt8ToInt: + case glslang::EOpConvInt8ToInt64: + case glslang::EOpConvInt16ToInt8: + case glslang::EOpConvInt16ToInt: + case glslang::EOpConvInt16ToInt64: + case glslang::EOpConvIntToInt8: + case glslang::EOpConvIntToInt16: + case glslang::EOpConvIntToInt64: + case glslang::EOpConvInt64ToInt8: + case glslang::EOpConvInt64ToInt16: + case glslang::EOpConvInt64ToInt: + convOp = spv::OpSConvert; + break; + + case glslang::EOpConvUint8ToUint16: + case glslang::EOpConvUint8ToUint: + case glslang::EOpConvUint8ToUint64: + case glslang::EOpConvUint16ToUint8: + case glslang::EOpConvUint16ToUint: + case glslang::EOpConvUint16ToUint64: + case glslang::EOpConvUintToUint8: + case glslang::EOpConvUintToUint16: + case glslang::EOpConvUintToUint64: + case glslang::EOpConvUint64ToUint8: + case glslang::EOpConvUint64ToUint16: + case glslang::EOpConvUint64ToUint: + convOp = spv::OpUConvert; + break; + + case glslang::EOpConvInt8ToUint16: + case glslang::EOpConvInt8ToUint: + case glslang::EOpConvInt8ToUint64: + case glslang::EOpConvInt16ToUint8: + case glslang::EOpConvInt16ToUint: + case glslang::EOpConvInt16ToUint64: + case glslang::EOpConvIntToUint8: + case glslang::EOpConvIntToUint16: + case glslang::EOpConvIntToUint64: + case glslang::EOpConvInt64ToUint8: + case glslang::EOpConvInt64ToUint16: + case glslang::EOpConvInt64ToUint: + case glslang::EOpConvUint8ToInt16: + case glslang::EOpConvUint8ToInt: + case glslang::EOpConvUint8ToInt64: + case glslang::EOpConvUint16ToInt8: + case glslang::EOpConvUint16ToInt: + case glslang::EOpConvUint16ToInt64: + case glslang::EOpConvUintToInt8: + case glslang::EOpConvUintToInt16: + case glslang::EOpConvUintToInt64: + case glslang::EOpConvUint64ToInt8: + case glslang::EOpConvUint64ToInt16: + case glslang::EOpConvUint64ToInt: + // OpSConvert/OpUConvert + OpBitCast + operand = createIntWidthConversion(op, operand, vectorSize); + + if (builder.isInSpecConstCodeGenMode()) { + // Build zero scalar or vector for OpIAdd. + switch(op) { + case glslang::EOpConvInt16ToUint8: + case glslang::EOpConvIntToUint8: + case glslang::EOpConvInt64ToUint8: + case glslang::EOpConvUint16ToInt8: + case glslang::EOpConvUintToInt8: + case glslang::EOpConvUint64ToInt8: + zero = builder.makeUint8Constant(0); + break; + case glslang::EOpConvInt8ToUint16: + case glslang::EOpConvIntToUint16: + case glslang::EOpConvInt64ToUint16: + case glslang::EOpConvUint8ToInt16: + case glslang::EOpConvUintToInt16: + case glslang::EOpConvUint64ToInt16: + zero = builder.makeUint16Constant(0); + break; + case glslang::EOpConvInt8ToUint: + case glslang::EOpConvInt16ToUint: + case glslang::EOpConvInt64ToUint: + case glslang::EOpConvUint8ToInt: + case glslang::EOpConvUint16ToInt: + case glslang::EOpConvUint64ToInt: + zero = builder.makeUintConstant(0); + break; + case glslang::EOpConvInt8ToUint64: + case glslang::EOpConvInt16ToUint64: + case glslang::EOpConvIntToUint64: + case glslang::EOpConvUint8ToInt64: + case glslang::EOpConvUint16ToInt64: + case glslang::EOpConvUintToInt64: + zero = builder.makeUint64Constant(0); + break; + default: + assert(false && "Default missing"); + break; + } + zero = makeSmearedConstant(zero, vectorSize); + // Use OpIAdd, instead of OpBitcast to do the conversion when + // generating for OpSpecConstantOp instruction. + return builder.createBinOp(spv::OpIAdd, destType, operand, zero); + } + // For normal run-time conversion instruction, use OpBitcast. + convOp = spv::OpBitcast; + break; + case glslang::EOpConvUint64ToPtr: + convOp = spv::OpConvertUToPtr; + break; + case glslang::EOpConvPtrToUint64: + convOp = spv::OpConvertPtrToU; + break; + case glslang::EOpConvPtrToUvec2: + case glslang::EOpConvUvec2ToPtr: + if (builder.isVector(operand)) + builder.promoteIncorporatedExtension(spv::E_SPV_EXT_physical_storage_buffer, + spv::E_SPV_KHR_physical_storage_buffer, spv::Spv_1_5); + convOp = spv::OpBitcast; + break; +#endif + + default: + break; + } + + spv::Id result = 0; + if (convOp == spv::OpNop) + return result; + + if (convOp == spv::OpSelect) { + zero = makeSmearedConstant(zero, vectorSize); + one = makeSmearedConstant(one, vectorSize); + result = builder.createTriOp(convOp, destType, operand, one, zero); + } else + result = builder.createUnaryOp(convOp, destType, operand); + + result = builder.setPrecision(result, decorations.precision); + decorations.addNonUniform(builder, result); + return result; +} + +spv::Id TGlslangToSpvTraverser::makeSmearedConstant(spv::Id constant, int vectorSize) +{ + if (vectorSize == 0) + return constant; + + spv::Id vectorTypeId = builder.makeVectorType(builder.getTypeId(constant), vectorSize); + std::vector components; + for (int c = 0; c < vectorSize; ++c) + components.push_back(constant); + return builder.makeCompositeConstant(vectorTypeId, components); +} + +// For glslang ops that map to SPV atomic opCodes +spv::Id TGlslangToSpvTraverser::createAtomicOperation(glslang::TOperator op, spv::Decoration /*precision*/, + spv::Id typeId, std::vector& operands, glslang::TBasicType typeProxy, + const spv::Builder::AccessChain::CoherentFlags &lvalueCoherentFlags) +{ + spv::Op opCode = spv::OpNop; + + switch (op) { + case glslang::EOpAtomicAdd: + case glslang::EOpImageAtomicAdd: + case glslang::EOpAtomicCounterAdd: + opCode = spv::OpAtomicIAdd; + break; + case glslang::EOpAtomicCounterSubtract: + opCode = spv::OpAtomicISub; + break; + case glslang::EOpAtomicMin: + case glslang::EOpImageAtomicMin: + case glslang::EOpAtomicCounterMin: + opCode = (typeProxy == glslang::EbtUint || typeProxy == glslang::EbtUint64) ? + spv::OpAtomicUMin : spv::OpAtomicSMin; + break; + case glslang::EOpAtomicMax: + case glslang::EOpImageAtomicMax: + case glslang::EOpAtomicCounterMax: + opCode = (typeProxy == glslang::EbtUint || typeProxy == glslang::EbtUint64) ? + spv::OpAtomicUMax : spv::OpAtomicSMax; + break; + case glslang::EOpAtomicAnd: + case glslang::EOpImageAtomicAnd: + case glslang::EOpAtomicCounterAnd: + opCode = spv::OpAtomicAnd; + break; + case glslang::EOpAtomicOr: + case glslang::EOpImageAtomicOr: + case glslang::EOpAtomicCounterOr: + opCode = spv::OpAtomicOr; + break; + case glslang::EOpAtomicXor: + case glslang::EOpImageAtomicXor: + case glslang::EOpAtomicCounterXor: + opCode = spv::OpAtomicXor; + break; + case glslang::EOpAtomicExchange: + case glslang::EOpImageAtomicExchange: + case glslang::EOpAtomicCounterExchange: + opCode = spv::OpAtomicExchange; + break; + case glslang::EOpAtomicCompSwap: + case glslang::EOpImageAtomicCompSwap: + case glslang::EOpAtomicCounterCompSwap: + opCode = spv::OpAtomicCompareExchange; + break; + case glslang::EOpAtomicCounterIncrement: + opCode = spv::OpAtomicIIncrement; + break; + case glslang::EOpAtomicCounterDecrement: + opCode = spv::OpAtomicIDecrement; + break; + case glslang::EOpAtomicCounter: + case glslang::EOpImageAtomicLoad: + case glslang::EOpAtomicLoad: + opCode = spv::OpAtomicLoad; + break; + case glslang::EOpAtomicStore: + case glslang::EOpImageAtomicStore: + opCode = spv::OpAtomicStore; + break; + default: + assert(0); + break; + } + + if (typeProxy == glslang::EbtInt64 || typeProxy == glslang::EbtUint64) + builder.addCapability(spv::CapabilityInt64Atomics); + + // Sort out the operands + // - mapping from glslang -> SPV + // - there are extra SPV operands that are optional in glslang + // - compare-exchange swaps the value and comparator + // - compare-exchange has an extra memory semantics + // - EOpAtomicCounterDecrement needs a post decrement + spv::Id pointerId = 0, compareId = 0, valueId = 0; + // scope defaults to Device in the old model, QueueFamilyKHR in the new model + spv::Id scopeId; + if (glslangIntermediate->usingVulkanMemoryModel()) { + scopeId = builder.makeUintConstant(spv::ScopeQueueFamilyKHR); + } else { + scopeId = builder.makeUintConstant(spv::ScopeDevice); + } + // semantics default to relaxed + spv::Id semanticsId = builder.makeUintConstant(lvalueCoherentFlags.isVolatile() && + glslangIntermediate->usingVulkanMemoryModel() ? + spv::MemorySemanticsVolatileMask : + spv::MemorySemanticsMaskNone); + spv::Id semanticsId2 = semanticsId; + + pointerId = operands[0]; + if (opCode == spv::OpAtomicIIncrement || opCode == spv::OpAtomicIDecrement) { + // no additional operands + } else if (opCode == spv::OpAtomicCompareExchange) { + compareId = operands[1]; + valueId = operands[2]; + if (operands.size() > 3) { + scopeId = operands[3]; + semanticsId = builder.makeUintConstant( + builder.getConstantScalar(operands[4]) | builder.getConstantScalar(operands[5])); + semanticsId2 = builder.makeUintConstant( + builder.getConstantScalar(operands[6]) | builder.getConstantScalar(operands[7])); + } + } else if (opCode == spv::OpAtomicLoad) { + if (operands.size() > 1) { + scopeId = operands[1]; + semanticsId = builder.makeUintConstant( + builder.getConstantScalar(operands[2]) | builder.getConstantScalar(operands[3])); + } + } else { + // atomic store or RMW + valueId = operands[1]; + if (operands.size() > 2) { + scopeId = operands[2]; + semanticsId = builder.makeUintConstant + (builder.getConstantScalar(operands[3]) | builder.getConstantScalar(operands[4])); + } + } + + // Check for capabilities + unsigned semanticsImmediate = builder.getConstantScalar(semanticsId) | builder.getConstantScalar(semanticsId2); + if (semanticsImmediate & (spv::MemorySemanticsMakeAvailableKHRMask | + spv::MemorySemanticsMakeVisibleKHRMask | + spv::MemorySemanticsOutputMemoryKHRMask | + spv::MemorySemanticsVolatileMask)) { + builder.addCapability(spv::CapabilityVulkanMemoryModelKHR); + } + + if (glslangIntermediate->usingVulkanMemoryModel() && builder.getConstantScalar(scopeId) == spv::ScopeDevice) { + builder.addCapability(spv::CapabilityVulkanMemoryModelDeviceScopeKHR); + } + + std::vector spvAtomicOperands; // hold the spv operands + spvAtomicOperands.push_back(pointerId); + spvAtomicOperands.push_back(scopeId); + spvAtomicOperands.push_back(semanticsId); + if (opCode == spv::OpAtomicCompareExchange) { + spvAtomicOperands.push_back(semanticsId2); + spvAtomicOperands.push_back(valueId); + spvAtomicOperands.push_back(compareId); + } else if (opCode != spv::OpAtomicLoad && opCode != spv::OpAtomicIIncrement && opCode != spv::OpAtomicIDecrement) { + spvAtomicOperands.push_back(valueId); + } + + if (opCode == spv::OpAtomicStore) { + builder.createNoResultOp(opCode, spvAtomicOperands); + return 0; + } else { + spv::Id resultId = builder.createOp(opCode, typeId, spvAtomicOperands); + + // GLSL and HLSL atomic-counter decrement return post-decrement value, + // while SPIR-V returns pre-decrement value. Translate between these semantics. + if (op == glslang::EOpAtomicCounterDecrement) + resultId = builder.createBinOp(spv::OpISub, typeId, resultId, builder.makeIntConstant(1)); + + return resultId; + } +} + +// Create group invocation operations. +spv::Id TGlslangToSpvTraverser::createInvocationsOperation(glslang::TOperator op, spv::Id typeId, + std::vector& operands, glslang::TBasicType typeProxy) +{ + bool isUnsigned = isTypeUnsignedInt(typeProxy); + bool isFloat = isTypeFloat(typeProxy); + + spv::Op opCode = spv::OpNop; + std::vector spvGroupOperands; + spv::GroupOperation groupOperation = spv::GroupOperationMax; + + if (op == glslang::EOpBallot || op == glslang::EOpReadFirstInvocation || + op == glslang::EOpReadInvocation) { + builder.addExtension(spv::E_SPV_KHR_shader_ballot); + builder.addCapability(spv::CapabilitySubgroupBallotKHR); + } else if (op == glslang::EOpAnyInvocation || + op == glslang::EOpAllInvocations || + op == glslang::EOpAllInvocationsEqual) { + builder.addExtension(spv::E_SPV_KHR_subgroup_vote); + builder.addCapability(spv::CapabilitySubgroupVoteKHR); + } else { + builder.addCapability(spv::CapabilityGroups); + if (op == glslang::EOpMinInvocationsNonUniform || + op == glslang::EOpMaxInvocationsNonUniform || + op == glslang::EOpAddInvocationsNonUniform || + op == glslang::EOpMinInvocationsInclusiveScanNonUniform || + op == glslang::EOpMaxInvocationsInclusiveScanNonUniform || + op == glslang::EOpAddInvocationsInclusiveScanNonUniform || + op == glslang::EOpMinInvocationsExclusiveScanNonUniform || + op == glslang::EOpMaxInvocationsExclusiveScanNonUniform || + op == glslang::EOpAddInvocationsExclusiveScanNonUniform) + builder.addExtension(spv::E_SPV_AMD_shader_ballot); + + switch (op) { + case glslang::EOpMinInvocations: + case glslang::EOpMaxInvocations: + case glslang::EOpAddInvocations: + case glslang::EOpMinInvocationsNonUniform: + case glslang::EOpMaxInvocationsNonUniform: + case glslang::EOpAddInvocationsNonUniform: + groupOperation = spv::GroupOperationReduce; + break; + case glslang::EOpMinInvocationsInclusiveScan: + case glslang::EOpMaxInvocationsInclusiveScan: + case glslang::EOpAddInvocationsInclusiveScan: + case glslang::EOpMinInvocationsInclusiveScanNonUniform: + case glslang::EOpMaxInvocationsInclusiveScanNonUniform: + case glslang::EOpAddInvocationsInclusiveScanNonUniform: + groupOperation = spv::GroupOperationInclusiveScan; + break; + case glslang::EOpMinInvocationsExclusiveScan: + case glslang::EOpMaxInvocationsExclusiveScan: + case glslang::EOpAddInvocationsExclusiveScan: + case glslang::EOpMinInvocationsExclusiveScanNonUniform: + case glslang::EOpMaxInvocationsExclusiveScanNonUniform: + case glslang::EOpAddInvocationsExclusiveScanNonUniform: + groupOperation = spv::GroupOperationExclusiveScan; + break; + default: + break; + } + spv::IdImmediate scope = { true, builder.makeUintConstant(spv::ScopeSubgroup) }; + spvGroupOperands.push_back(scope); + if (groupOperation != spv::GroupOperationMax) { + spv::IdImmediate groupOp = { false, (unsigned)groupOperation }; + spvGroupOperands.push_back(groupOp); + } + } + + for (auto opIt = operands.begin(); opIt != operands.end(); ++opIt) { + spv::IdImmediate op = { true, *opIt }; + spvGroupOperands.push_back(op); + } + + switch (op) { + case glslang::EOpAnyInvocation: + opCode = spv::OpSubgroupAnyKHR; + break; + case glslang::EOpAllInvocations: + opCode = spv::OpSubgroupAllKHR; + break; + case glslang::EOpAllInvocationsEqual: + opCode = spv::OpSubgroupAllEqualKHR; + break; + case glslang::EOpReadInvocation: + opCode = spv::OpSubgroupReadInvocationKHR; + if (builder.isVectorType(typeId)) + return CreateInvocationsVectorOperation(opCode, groupOperation, typeId, operands); + break; + case glslang::EOpReadFirstInvocation: + opCode = spv::OpSubgroupFirstInvocationKHR; + break; + case glslang::EOpBallot: + { + // NOTE: According to the spec, the result type of "OpSubgroupBallotKHR" must be a 4 component vector of 32 + // bit integer types. The GLSL built-in function "ballotARB()" assumes the maximum number of invocations in + // a subgroup is 64. Thus, we have to convert uvec4.xy to uint64_t as follow: + // + // result = Bitcast(SubgroupBallotKHR(Predicate).xy) + // + spv::Id uintType = builder.makeUintType(32); + spv::Id uvec4Type = builder.makeVectorType(uintType, 4); + spv::Id result = builder.createOp(spv::OpSubgroupBallotKHR, uvec4Type, spvGroupOperands); + + std::vector components; + components.push_back(builder.createCompositeExtract(result, uintType, 0)); + components.push_back(builder.createCompositeExtract(result, uintType, 1)); + + spv::Id uvec2Type = builder.makeVectorType(uintType, 2); + return builder.createUnaryOp(spv::OpBitcast, typeId, + builder.createCompositeConstruct(uvec2Type, components)); + } + + case glslang::EOpMinInvocations: + case glslang::EOpMaxInvocations: + case glslang::EOpAddInvocations: + case glslang::EOpMinInvocationsInclusiveScan: + case glslang::EOpMaxInvocationsInclusiveScan: + case glslang::EOpAddInvocationsInclusiveScan: + case glslang::EOpMinInvocationsExclusiveScan: + case glslang::EOpMaxInvocationsExclusiveScan: + case glslang::EOpAddInvocationsExclusiveScan: + if (op == glslang::EOpMinInvocations || + op == glslang::EOpMinInvocationsInclusiveScan || + op == glslang::EOpMinInvocationsExclusiveScan) { + if (isFloat) + opCode = spv::OpGroupFMin; + else { + if (isUnsigned) + opCode = spv::OpGroupUMin; + else + opCode = spv::OpGroupSMin; + } + } else if (op == glslang::EOpMaxInvocations || + op == glslang::EOpMaxInvocationsInclusiveScan || + op == glslang::EOpMaxInvocationsExclusiveScan) { + if (isFloat) + opCode = spv::OpGroupFMax; + else { + if (isUnsigned) + opCode = spv::OpGroupUMax; + else + opCode = spv::OpGroupSMax; + } + } else { + if (isFloat) + opCode = spv::OpGroupFAdd; + else + opCode = spv::OpGroupIAdd; + } + + if (builder.isVectorType(typeId)) + return CreateInvocationsVectorOperation(opCode, groupOperation, typeId, operands); + + break; + case glslang::EOpMinInvocationsNonUniform: + case glslang::EOpMaxInvocationsNonUniform: + case glslang::EOpAddInvocationsNonUniform: + case glslang::EOpMinInvocationsInclusiveScanNonUniform: + case glslang::EOpMaxInvocationsInclusiveScanNonUniform: + case glslang::EOpAddInvocationsInclusiveScanNonUniform: + case glslang::EOpMinInvocationsExclusiveScanNonUniform: + case glslang::EOpMaxInvocationsExclusiveScanNonUniform: + case glslang::EOpAddInvocationsExclusiveScanNonUniform: + if (op == glslang::EOpMinInvocationsNonUniform || + op == glslang::EOpMinInvocationsInclusiveScanNonUniform || + op == glslang::EOpMinInvocationsExclusiveScanNonUniform) { + if (isFloat) + opCode = spv::OpGroupFMinNonUniformAMD; + else { + if (isUnsigned) + opCode = spv::OpGroupUMinNonUniformAMD; + else + opCode = spv::OpGroupSMinNonUniformAMD; + } + } + else if (op == glslang::EOpMaxInvocationsNonUniform || + op == glslang::EOpMaxInvocationsInclusiveScanNonUniform || + op == glslang::EOpMaxInvocationsExclusiveScanNonUniform) { + if (isFloat) + opCode = spv::OpGroupFMaxNonUniformAMD; + else { + if (isUnsigned) + opCode = spv::OpGroupUMaxNonUniformAMD; + else + opCode = spv::OpGroupSMaxNonUniformAMD; + } + } + else { + if (isFloat) + opCode = spv::OpGroupFAddNonUniformAMD; + else + opCode = spv::OpGroupIAddNonUniformAMD; + } + + if (builder.isVectorType(typeId)) + return CreateInvocationsVectorOperation(opCode, groupOperation, typeId, operands); + + break; + default: + logger->missingFunctionality("invocation operation"); + return spv::NoResult; + } + + assert(opCode != spv::OpNop); + return builder.createOp(opCode, typeId, spvGroupOperands); +} + +// Create group invocation operations on a vector +spv::Id TGlslangToSpvTraverser::CreateInvocationsVectorOperation(spv::Op op, spv::GroupOperation groupOperation, + spv::Id typeId, std::vector& operands) +{ + assert(op == spv::OpGroupFMin || op == spv::OpGroupUMin || op == spv::OpGroupSMin || + op == spv::OpGroupFMax || op == spv::OpGroupUMax || op == spv::OpGroupSMax || + op == spv::OpGroupFAdd || op == spv::OpGroupIAdd || op == spv::OpGroupBroadcast || + op == spv::OpSubgroupReadInvocationKHR || + op == spv::OpGroupFMinNonUniformAMD || op == spv::OpGroupUMinNonUniformAMD || + op == spv::OpGroupSMinNonUniformAMD || + op == spv::OpGroupFMaxNonUniformAMD || op == spv::OpGroupUMaxNonUniformAMD || + op == spv::OpGroupSMaxNonUniformAMD || + op == spv::OpGroupFAddNonUniformAMD || op == spv::OpGroupIAddNonUniformAMD); + + // Handle group invocation operations scalar by scalar. + // The result type is the same type as the original type. + // The algorithm is to: + // - break the vector into scalars + // - apply the operation to each scalar + // - make a vector out the scalar results + + // get the types sorted out + int numComponents = builder.getNumComponents(operands[0]); + spv::Id scalarType = builder.getScalarTypeId(builder.getTypeId(operands[0])); + std::vector results; + + // do each scalar op + for (int comp = 0; comp < numComponents; ++comp) { + std::vector indexes; + indexes.push_back(comp); + spv::IdImmediate scalar = { true, builder.createCompositeExtract(operands[0], scalarType, indexes) }; + std::vector spvGroupOperands; + if (op == spv::OpSubgroupReadInvocationKHR) { + spvGroupOperands.push_back(scalar); + spv::IdImmediate operand = { true, operands[1] }; + spvGroupOperands.push_back(operand); + } else if (op == spv::OpGroupBroadcast) { + spv::IdImmediate scope = { true, builder.makeUintConstant(spv::ScopeSubgroup) }; + spvGroupOperands.push_back(scope); + spvGroupOperands.push_back(scalar); + spv::IdImmediate operand = { true, operands[1] }; + spvGroupOperands.push_back(operand); + } else { + spv::IdImmediate scope = { true, builder.makeUintConstant(spv::ScopeSubgroup) }; + spvGroupOperands.push_back(scope); + spv::IdImmediate groupOp = { false, (unsigned)groupOperation }; + spvGroupOperands.push_back(groupOp); + spvGroupOperands.push_back(scalar); + } + + results.push_back(builder.createOp(op, scalarType, spvGroupOperands)); + } + + // put the pieces together + return builder.createCompositeConstruct(typeId, results); +} + +// Create subgroup invocation operations. +spv::Id TGlslangToSpvTraverser::createSubgroupOperation(glslang::TOperator op, spv::Id typeId, + std::vector& operands, glslang::TBasicType typeProxy) +{ + // Add the required capabilities. + switch (op) { + case glslang::EOpSubgroupElect: + builder.addCapability(spv::CapabilityGroupNonUniform); + break; + case glslang::EOpSubgroupAll: + case glslang::EOpSubgroupAny: + case glslang::EOpSubgroupAllEqual: + builder.addCapability(spv::CapabilityGroupNonUniform); + builder.addCapability(spv::CapabilityGroupNonUniformVote); + break; + case glslang::EOpSubgroupBroadcast: + case glslang::EOpSubgroupBroadcastFirst: + case glslang::EOpSubgroupBallot: + case glslang::EOpSubgroupInverseBallot: + case glslang::EOpSubgroupBallotBitExtract: + case glslang::EOpSubgroupBallotBitCount: + case glslang::EOpSubgroupBallotInclusiveBitCount: + case glslang::EOpSubgroupBallotExclusiveBitCount: + case glslang::EOpSubgroupBallotFindLSB: + case glslang::EOpSubgroupBallotFindMSB: + builder.addCapability(spv::CapabilityGroupNonUniform); + builder.addCapability(spv::CapabilityGroupNonUniformBallot); + break; + case glslang::EOpSubgroupShuffle: + case glslang::EOpSubgroupShuffleXor: + builder.addCapability(spv::CapabilityGroupNonUniform); + builder.addCapability(spv::CapabilityGroupNonUniformShuffle); + break; + case glslang::EOpSubgroupShuffleUp: + case glslang::EOpSubgroupShuffleDown: + builder.addCapability(spv::CapabilityGroupNonUniform); + builder.addCapability(spv::CapabilityGroupNonUniformShuffleRelative); + break; + case glslang::EOpSubgroupAdd: + case glslang::EOpSubgroupMul: + case glslang::EOpSubgroupMin: + case glslang::EOpSubgroupMax: + case glslang::EOpSubgroupAnd: + case glslang::EOpSubgroupOr: + case glslang::EOpSubgroupXor: + case glslang::EOpSubgroupInclusiveAdd: + case glslang::EOpSubgroupInclusiveMul: + case glslang::EOpSubgroupInclusiveMin: + case glslang::EOpSubgroupInclusiveMax: + case glslang::EOpSubgroupInclusiveAnd: + case glslang::EOpSubgroupInclusiveOr: + case glslang::EOpSubgroupInclusiveXor: + case glslang::EOpSubgroupExclusiveAdd: + case glslang::EOpSubgroupExclusiveMul: + case glslang::EOpSubgroupExclusiveMin: + case glslang::EOpSubgroupExclusiveMax: + case glslang::EOpSubgroupExclusiveAnd: + case glslang::EOpSubgroupExclusiveOr: + case glslang::EOpSubgroupExclusiveXor: + builder.addCapability(spv::CapabilityGroupNonUniform); + builder.addCapability(spv::CapabilityGroupNonUniformArithmetic); + break; + case glslang::EOpSubgroupClusteredAdd: + case glslang::EOpSubgroupClusteredMul: + case glslang::EOpSubgroupClusteredMin: + case glslang::EOpSubgroupClusteredMax: + case glslang::EOpSubgroupClusteredAnd: + case glslang::EOpSubgroupClusteredOr: + case glslang::EOpSubgroupClusteredXor: + builder.addCapability(spv::CapabilityGroupNonUniform); + builder.addCapability(spv::CapabilityGroupNonUniformClustered); + break; + case glslang::EOpSubgroupQuadBroadcast: + case glslang::EOpSubgroupQuadSwapHorizontal: + case glslang::EOpSubgroupQuadSwapVertical: + case glslang::EOpSubgroupQuadSwapDiagonal: + builder.addCapability(spv::CapabilityGroupNonUniform); + builder.addCapability(spv::CapabilityGroupNonUniformQuad); + break; + case glslang::EOpSubgroupPartitionedAdd: + case glslang::EOpSubgroupPartitionedMul: + case glslang::EOpSubgroupPartitionedMin: + case glslang::EOpSubgroupPartitionedMax: + case glslang::EOpSubgroupPartitionedAnd: + case glslang::EOpSubgroupPartitionedOr: + case glslang::EOpSubgroupPartitionedXor: + case glslang::EOpSubgroupPartitionedInclusiveAdd: + case glslang::EOpSubgroupPartitionedInclusiveMul: + case glslang::EOpSubgroupPartitionedInclusiveMin: + case glslang::EOpSubgroupPartitionedInclusiveMax: + case glslang::EOpSubgroupPartitionedInclusiveAnd: + case glslang::EOpSubgroupPartitionedInclusiveOr: + case glslang::EOpSubgroupPartitionedInclusiveXor: + case glslang::EOpSubgroupPartitionedExclusiveAdd: + case glslang::EOpSubgroupPartitionedExclusiveMul: + case glslang::EOpSubgroupPartitionedExclusiveMin: + case glslang::EOpSubgroupPartitionedExclusiveMax: + case glslang::EOpSubgroupPartitionedExclusiveAnd: + case glslang::EOpSubgroupPartitionedExclusiveOr: + case glslang::EOpSubgroupPartitionedExclusiveXor: + builder.addExtension(spv::E_SPV_NV_shader_subgroup_partitioned); + builder.addCapability(spv::CapabilityGroupNonUniformPartitionedNV); + break; + default: assert(0 && "Unhandled subgroup operation!"); + } + + + const bool isUnsigned = isTypeUnsignedInt(typeProxy); + const bool isFloat = isTypeFloat(typeProxy); + const bool isBool = typeProxy == glslang::EbtBool; + + spv::Op opCode = spv::OpNop; + + // Figure out which opcode to use. + switch (op) { + case glslang::EOpSubgroupElect: opCode = spv::OpGroupNonUniformElect; break; + case glslang::EOpSubgroupAll: opCode = spv::OpGroupNonUniformAll; break; + case glslang::EOpSubgroupAny: opCode = spv::OpGroupNonUniformAny; break; + case glslang::EOpSubgroupAllEqual: opCode = spv::OpGroupNonUniformAllEqual; break; + case glslang::EOpSubgroupBroadcast: opCode = spv::OpGroupNonUniformBroadcast; break; + case glslang::EOpSubgroupBroadcastFirst: opCode = spv::OpGroupNonUniformBroadcastFirst; break; + case glslang::EOpSubgroupBallot: opCode = spv::OpGroupNonUniformBallot; break; + case glslang::EOpSubgroupInverseBallot: opCode = spv::OpGroupNonUniformInverseBallot; break; + case glslang::EOpSubgroupBallotBitExtract: opCode = spv::OpGroupNonUniformBallotBitExtract; break; + case glslang::EOpSubgroupBallotBitCount: + case glslang::EOpSubgroupBallotInclusiveBitCount: + case glslang::EOpSubgroupBallotExclusiveBitCount: opCode = spv::OpGroupNonUniformBallotBitCount; break; + case glslang::EOpSubgroupBallotFindLSB: opCode = spv::OpGroupNonUniformBallotFindLSB; break; + case glslang::EOpSubgroupBallotFindMSB: opCode = spv::OpGroupNonUniformBallotFindMSB; break; + case glslang::EOpSubgroupShuffle: opCode = spv::OpGroupNonUniformShuffle; break; + case glslang::EOpSubgroupShuffleXor: opCode = spv::OpGroupNonUniformShuffleXor; break; + case glslang::EOpSubgroupShuffleUp: opCode = spv::OpGroupNonUniformShuffleUp; break; + case glslang::EOpSubgroupShuffleDown: opCode = spv::OpGroupNonUniformShuffleDown; break; + case glslang::EOpSubgroupAdd: + case glslang::EOpSubgroupInclusiveAdd: + case glslang::EOpSubgroupExclusiveAdd: + case glslang::EOpSubgroupClusteredAdd: + case glslang::EOpSubgroupPartitionedAdd: + case glslang::EOpSubgroupPartitionedInclusiveAdd: + case glslang::EOpSubgroupPartitionedExclusiveAdd: + if (isFloat) { + opCode = spv::OpGroupNonUniformFAdd; + } else { + opCode = spv::OpGroupNonUniformIAdd; + } + break; + case glslang::EOpSubgroupMul: + case glslang::EOpSubgroupInclusiveMul: + case glslang::EOpSubgroupExclusiveMul: + case glslang::EOpSubgroupClusteredMul: + case glslang::EOpSubgroupPartitionedMul: + case glslang::EOpSubgroupPartitionedInclusiveMul: + case glslang::EOpSubgroupPartitionedExclusiveMul: + if (isFloat) { + opCode = spv::OpGroupNonUniformFMul; + } else { + opCode = spv::OpGroupNonUniformIMul; + } + break; + case glslang::EOpSubgroupMin: + case glslang::EOpSubgroupInclusiveMin: + case glslang::EOpSubgroupExclusiveMin: + case glslang::EOpSubgroupClusteredMin: + case glslang::EOpSubgroupPartitionedMin: + case glslang::EOpSubgroupPartitionedInclusiveMin: + case glslang::EOpSubgroupPartitionedExclusiveMin: + if (isFloat) { + opCode = spv::OpGroupNonUniformFMin; + } else if (isUnsigned) { + opCode = spv::OpGroupNonUniformUMin; + } else { + opCode = spv::OpGroupNonUniformSMin; + } + break; + case glslang::EOpSubgroupMax: + case glslang::EOpSubgroupInclusiveMax: + case glslang::EOpSubgroupExclusiveMax: + case glslang::EOpSubgroupClusteredMax: + case glslang::EOpSubgroupPartitionedMax: + case glslang::EOpSubgroupPartitionedInclusiveMax: + case glslang::EOpSubgroupPartitionedExclusiveMax: + if (isFloat) { + opCode = spv::OpGroupNonUniformFMax; + } else if (isUnsigned) { + opCode = spv::OpGroupNonUniformUMax; + } else { + opCode = spv::OpGroupNonUniformSMax; + } + break; + case glslang::EOpSubgroupAnd: + case glslang::EOpSubgroupInclusiveAnd: + case glslang::EOpSubgroupExclusiveAnd: + case glslang::EOpSubgroupClusteredAnd: + case glslang::EOpSubgroupPartitionedAnd: + case glslang::EOpSubgroupPartitionedInclusiveAnd: + case glslang::EOpSubgroupPartitionedExclusiveAnd: + if (isBool) { + opCode = spv::OpGroupNonUniformLogicalAnd; + } else { + opCode = spv::OpGroupNonUniformBitwiseAnd; + } + break; + case glslang::EOpSubgroupOr: + case glslang::EOpSubgroupInclusiveOr: + case glslang::EOpSubgroupExclusiveOr: + case glslang::EOpSubgroupClusteredOr: + case glslang::EOpSubgroupPartitionedOr: + case glslang::EOpSubgroupPartitionedInclusiveOr: + case glslang::EOpSubgroupPartitionedExclusiveOr: + if (isBool) { + opCode = spv::OpGroupNonUniformLogicalOr; + } else { + opCode = spv::OpGroupNonUniformBitwiseOr; + } + break; + case glslang::EOpSubgroupXor: + case glslang::EOpSubgroupInclusiveXor: + case glslang::EOpSubgroupExclusiveXor: + case glslang::EOpSubgroupClusteredXor: + case glslang::EOpSubgroupPartitionedXor: + case glslang::EOpSubgroupPartitionedInclusiveXor: + case glslang::EOpSubgroupPartitionedExclusiveXor: + if (isBool) { + opCode = spv::OpGroupNonUniformLogicalXor; + } else { + opCode = spv::OpGroupNonUniformBitwiseXor; + } + break; + case glslang::EOpSubgroupQuadBroadcast: opCode = spv::OpGroupNonUniformQuadBroadcast; break; + case glslang::EOpSubgroupQuadSwapHorizontal: + case glslang::EOpSubgroupQuadSwapVertical: + case glslang::EOpSubgroupQuadSwapDiagonal: opCode = spv::OpGroupNonUniformQuadSwap; break; + default: assert(0 && "Unhandled subgroup operation!"); + } + + // get the right Group Operation + spv::GroupOperation groupOperation = spv::GroupOperationMax; + switch (op) { + default: + break; + case glslang::EOpSubgroupBallotBitCount: + case glslang::EOpSubgroupAdd: + case glslang::EOpSubgroupMul: + case glslang::EOpSubgroupMin: + case glslang::EOpSubgroupMax: + case glslang::EOpSubgroupAnd: + case glslang::EOpSubgroupOr: + case glslang::EOpSubgroupXor: + groupOperation = spv::GroupOperationReduce; + break; + case glslang::EOpSubgroupBallotInclusiveBitCount: + case glslang::EOpSubgroupInclusiveAdd: + case glslang::EOpSubgroupInclusiveMul: + case glslang::EOpSubgroupInclusiveMin: + case glslang::EOpSubgroupInclusiveMax: + case glslang::EOpSubgroupInclusiveAnd: + case glslang::EOpSubgroupInclusiveOr: + case glslang::EOpSubgroupInclusiveXor: + groupOperation = spv::GroupOperationInclusiveScan; + break; + case glslang::EOpSubgroupBallotExclusiveBitCount: + case glslang::EOpSubgroupExclusiveAdd: + case glslang::EOpSubgroupExclusiveMul: + case glslang::EOpSubgroupExclusiveMin: + case glslang::EOpSubgroupExclusiveMax: + case glslang::EOpSubgroupExclusiveAnd: + case glslang::EOpSubgroupExclusiveOr: + case glslang::EOpSubgroupExclusiveXor: + groupOperation = spv::GroupOperationExclusiveScan; + break; + case glslang::EOpSubgroupClusteredAdd: + case glslang::EOpSubgroupClusteredMul: + case glslang::EOpSubgroupClusteredMin: + case glslang::EOpSubgroupClusteredMax: + case glslang::EOpSubgroupClusteredAnd: + case glslang::EOpSubgroupClusteredOr: + case glslang::EOpSubgroupClusteredXor: + groupOperation = spv::GroupOperationClusteredReduce; + break; + case glslang::EOpSubgroupPartitionedAdd: + case glslang::EOpSubgroupPartitionedMul: + case glslang::EOpSubgroupPartitionedMin: + case glslang::EOpSubgroupPartitionedMax: + case glslang::EOpSubgroupPartitionedAnd: + case glslang::EOpSubgroupPartitionedOr: + case glslang::EOpSubgroupPartitionedXor: + groupOperation = spv::GroupOperationPartitionedReduceNV; + break; + case glslang::EOpSubgroupPartitionedInclusiveAdd: + case glslang::EOpSubgroupPartitionedInclusiveMul: + case glslang::EOpSubgroupPartitionedInclusiveMin: + case glslang::EOpSubgroupPartitionedInclusiveMax: + case glslang::EOpSubgroupPartitionedInclusiveAnd: + case glslang::EOpSubgroupPartitionedInclusiveOr: + case glslang::EOpSubgroupPartitionedInclusiveXor: + groupOperation = spv::GroupOperationPartitionedInclusiveScanNV; + break; + case glslang::EOpSubgroupPartitionedExclusiveAdd: + case glslang::EOpSubgroupPartitionedExclusiveMul: + case glslang::EOpSubgroupPartitionedExclusiveMin: + case glslang::EOpSubgroupPartitionedExclusiveMax: + case glslang::EOpSubgroupPartitionedExclusiveAnd: + case glslang::EOpSubgroupPartitionedExclusiveOr: + case glslang::EOpSubgroupPartitionedExclusiveXor: + groupOperation = spv::GroupOperationPartitionedExclusiveScanNV; + break; + } + + // build the instruction + std::vector spvGroupOperands; + + // Every operation begins with the Execution Scope operand. + spv::IdImmediate executionScope = { true, builder.makeUintConstant(spv::ScopeSubgroup) }; + spvGroupOperands.push_back(executionScope); + + // Next, for all operations that use a Group Operation, push that as an operand. + if (groupOperation != spv::GroupOperationMax) { + spv::IdImmediate groupOperand = { false, (unsigned)groupOperation }; + spvGroupOperands.push_back(groupOperand); + } + + // Push back the operands next. + for (auto opIt = operands.cbegin(); opIt != operands.cend(); ++opIt) { + spv::IdImmediate operand = { true, *opIt }; + spvGroupOperands.push_back(operand); + } + + // Some opcodes have additional operands. + spv::Id directionId = spv::NoResult; + switch (op) { + default: break; + case glslang::EOpSubgroupQuadSwapHorizontal: directionId = builder.makeUintConstant(0); break; + case glslang::EOpSubgroupQuadSwapVertical: directionId = builder.makeUintConstant(1); break; + case glslang::EOpSubgroupQuadSwapDiagonal: directionId = builder.makeUintConstant(2); break; + } + if (directionId != spv::NoResult) { + spv::IdImmediate direction = { true, directionId }; + spvGroupOperands.push_back(direction); + } + + return builder.createOp(opCode, typeId, spvGroupOperands); +} + +spv::Id TGlslangToSpvTraverser::createMiscOperation(glslang::TOperator op, spv::Decoration precision, + spv::Id typeId, std::vector& operands, glslang::TBasicType typeProxy) +{ + bool isUnsigned = isTypeUnsignedInt(typeProxy); + bool isFloat = isTypeFloat(typeProxy); + + spv::Op opCode = spv::OpNop; + int extBuiltins = -1; + int libCall = -1; + size_t consumedOperands = operands.size(); + spv::Id typeId0 = 0; + if (consumedOperands > 0) + typeId0 = builder.getTypeId(operands[0]); + spv::Id typeId1 = 0; + if (consumedOperands > 1) + typeId1 = builder.getTypeId(operands[1]); + spv::Id frexpIntType = 0; + + switch (op) { + case glslang::EOpMin: + if (isFloat) + libCall = nanMinMaxClamp ? spv::GLSLstd450NMin : spv::GLSLstd450FMin; + else if (isUnsigned) + libCall = spv::GLSLstd450UMin; + else + libCall = spv::GLSLstd450SMin; + builder.promoteScalar(precision, operands.front(), operands.back()); + break; + case glslang::EOpModf: + libCall = spv::GLSLstd450Modf; + break; + case glslang::EOpMax: + if (isFloat) + libCall = nanMinMaxClamp ? spv::GLSLstd450NMax : spv::GLSLstd450FMax; + else if (isUnsigned) + libCall = spv::GLSLstd450UMax; + else + libCall = spv::GLSLstd450SMax; + builder.promoteScalar(precision, operands.front(), operands.back()); + break; + case glslang::EOpPow: + libCall = spv::GLSLstd450Pow; + break; + case glslang::EOpDot: + opCode = spv::OpDot; + break; + case glslang::EOpAtan: + libCall = spv::GLSLstd450Atan2; + break; + + case glslang::EOpClamp: + if (isFloat) + libCall = nanMinMaxClamp ? spv::GLSLstd450NClamp : spv::GLSLstd450FClamp; + else if (isUnsigned) + libCall = spv::GLSLstd450UClamp; + else + libCall = spv::GLSLstd450SClamp; + builder.promoteScalar(precision, operands.front(), operands[1]); + builder.promoteScalar(precision, operands.front(), operands[2]); + break; + case glslang::EOpMix: + if (! builder.isBoolType(builder.getScalarTypeId(builder.getTypeId(operands.back())))) { + assert(isFloat); + libCall = spv::GLSLstd450FMix; + } else { + opCode = spv::OpSelect; + std::swap(operands.front(), operands.back()); + } + builder.promoteScalar(precision, operands.front(), operands.back()); + break; + case glslang::EOpStep: + libCall = spv::GLSLstd450Step; + builder.promoteScalar(precision, operands.front(), operands.back()); + break; + case glslang::EOpSmoothStep: + libCall = spv::GLSLstd450SmoothStep; + builder.promoteScalar(precision, operands[0], operands[2]); + builder.promoteScalar(precision, operands[1], operands[2]); + break; + + case glslang::EOpDistance: + libCall = spv::GLSLstd450Distance; + break; + case glslang::EOpCross: + libCall = spv::GLSLstd450Cross; + break; + case glslang::EOpFaceForward: + libCall = spv::GLSLstd450FaceForward; + break; + case glslang::EOpReflect: + libCall = spv::GLSLstd450Reflect; + break; + case glslang::EOpRefract: + libCall = spv::GLSLstd450Refract; + break; + case glslang::EOpBarrier: + { + // This is for the extended controlBarrier function, with four operands. + // The unextended barrier() goes through createNoArgOperation. + assert(operands.size() == 4); + unsigned int executionScope = builder.getConstantScalar(operands[0]); + unsigned int memoryScope = builder.getConstantScalar(operands[1]); + unsigned int semantics = builder.getConstantScalar(operands[2]) | builder.getConstantScalar(operands[3]); + builder.createControlBarrier((spv::Scope)executionScope, (spv::Scope)memoryScope, + (spv::MemorySemanticsMask)semantics); + if (semantics & (spv::MemorySemanticsMakeAvailableKHRMask | + spv::MemorySemanticsMakeVisibleKHRMask | + spv::MemorySemanticsOutputMemoryKHRMask | + spv::MemorySemanticsVolatileMask)) { + builder.addCapability(spv::CapabilityVulkanMemoryModelKHR); + } + if (glslangIntermediate->usingVulkanMemoryModel() && (executionScope == spv::ScopeDevice || + memoryScope == spv::ScopeDevice)) { + builder.addCapability(spv::CapabilityVulkanMemoryModelDeviceScopeKHR); + } + return 0; + } + break; + case glslang::EOpMemoryBarrier: + { + // This is for the extended memoryBarrier function, with three operands. + // The unextended memoryBarrier() goes through createNoArgOperation. + assert(operands.size() == 3); + unsigned int memoryScope = builder.getConstantScalar(operands[0]); + unsigned int semantics = builder.getConstantScalar(operands[1]) | builder.getConstantScalar(operands[2]); + builder.createMemoryBarrier((spv::Scope)memoryScope, (spv::MemorySemanticsMask)semantics); + if (semantics & (spv::MemorySemanticsMakeAvailableKHRMask | + spv::MemorySemanticsMakeVisibleKHRMask | + spv::MemorySemanticsOutputMemoryKHRMask | + spv::MemorySemanticsVolatileMask)) { + builder.addCapability(spv::CapabilityVulkanMemoryModelKHR); + } + if (glslangIntermediate->usingVulkanMemoryModel() && memoryScope == spv::ScopeDevice) { + builder.addCapability(spv::CapabilityVulkanMemoryModelDeviceScopeKHR); + } + return 0; + } + break; + +#ifndef GLSLANG_WEB + case glslang::EOpInterpolateAtSample: + if (typeProxy == glslang::EbtFloat16) + builder.addExtension(spv::E_SPV_AMD_gpu_shader_half_float); + libCall = spv::GLSLstd450InterpolateAtSample; + break; + case glslang::EOpInterpolateAtOffset: + if (typeProxy == glslang::EbtFloat16) + builder.addExtension(spv::E_SPV_AMD_gpu_shader_half_float); + libCall = spv::GLSLstd450InterpolateAtOffset; + break; + case glslang::EOpAddCarry: + opCode = spv::OpIAddCarry; + typeId = builder.makeStructResultType(typeId0, typeId0); + consumedOperands = 2; + break; + case glslang::EOpSubBorrow: + opCode = spv::OpISubBorrow; + typeId = builder.makeStructResultType(typeId0, typeId0); + consumedOperands = 2; + break; + case glslang::EOpUMulExtended: + opCode = spv::OpUMulExtended; + typeId = builder.makeStructResultType(typeId0, typeId0); + consumedOperands = 2; + break; + case glslang::EOpIMulExtended: + opCode = spv::OpSMulExtended; + typeId = builder.makeStructResultType(typeId0, typeId0); + consumedOperands = 2; + break; + case glslang::EOpBitfieldExtract: + if (isUnsigned) + opCode = spv::OpBitFieldUExtract; + else + opCode = spv::OpBitFieldSExtract; + break; + case glslang::EOpBitfieldInsert: + opCode = spv::OpBitFieldInsert; + break; + + case glslang::EOpFma: + libCall = spv::GLSLstd450Fma; + break; + case glslang::EOpFrexp: + { + libCall = spv::GLSLstd450FrexpStruct; + assert(builder.isPointerType(typeId1)); + typeId1 = builder.getContainedTypeId(typeId1); + int width = builder.getScalarTypeWidth(typeId1); + if (width == 16) + // Using 16-bit exp operand, enable extension SPV_AMD_gpu_shader_int16 + builder.addExtension(spv::E_SPV_AMD_gpu_shader_int16); + if (builder.getNumComponents(operands[0]) == 1) + frexpIntType = builder.makeIntegerType(width, true); + else + frexpIntType = builder.makeVectorType(builder.makeIntegerType(width, true), + builder.getNumComponents(operands[0])); + typeId = builder.makeStructResultType(typeId0, frexpIntType); + consumedOperands = 1; + } + break; + case glslang::EOpLdexp: + libCall = spv::GLSLstd450Ldexp; + break; + + case glslang::EOpReadInvocation: + return createInvocationsOperation(op, typeId, operands, typeProxy); + + case glslang::EOpSubgroupBroadcast: + case glslang::EOpSubgroupBallotBitExtract: + case glslang::EOpSubgroupShuffle: + case glslang::EOpSubgroupShuffleXor: + case glslang::EOpSubgroupShuffleUp: + case glslang::EOpSubgroupShuffleDown: + case glslang::EOpSubgroupClusteredAdd: + case glslang::EOpSubgroupClusteredMul: + case glslang::EOpSubgroupClusteredMin: + case glslang::EOpSubgroupClusteredMax: + case glslang::EOpSubgroupClusteredAnd: + case glslang::EOpSubgroupClusteredOr: + case glslang::EOpSubgroupClusteredXor: + case glslang::EOpSubgroupQuadBroadcast: + case glslang::EOpSubgroupPartitionedAdd: + case glslang::EOpSubgroupPartitionedMul: + case glslang::EOpSubgroupPartitionedMin: + case glslang::EOpSubgroupPartitionedMax: + case glslang::EOpSubgroupPartitionedAnd: + case glslang::EOpSubgroupPartitionedOr: + case glslang::EOpSubgroupPartitionedXor: + case glslang::EOpSubgroupPartitionedInclusiveAdd: + case glslang::EOpSubgroupPartitionedInclusiveMul: + case glslang::EOpSubgroupPartitionedInclusiveMin: + case glslang::EOpSubgroupPartitionedInclusiveMax: + case glslang::EOpSubgroupPartitionedInclusiveAnd: + case glslang::EOpSubgroupPartitionedInclusiveOr: + case glslang::EOpSubgroupPartitionedInclusiveXor: + case glslang::EOpSubgroupPartitionedExclusiveAdd: + case glslang::EOpSubgroupPartitionedExclusiveMul: + case glslang::EOpSubgroupPartitionedExclusiveMin: + case glslang::EOpSubgroupPartitionedExclusiveMax: + case glslang::EOpSubgroupPartitionedExclusiveAnd: + case glslang::EOpSubgroupPartitionedExclusiveOr: + case glslang::EOpSubgroupPartitionedExclusiveXor: + return createSubgroupOperation(op, typeId, operands, typeProxy); + + case glslang::EOpSwizzleInvocations: + extBuiltins = getExtBuiltins(spv::E_SPV_AMD_shader_ballot); + libCall = spv::SwizzleInvocationsAMD; + break; + case glslang::EOpSwizzleInvocationsMasked: + extBuiltins = getExtBuiltins(spv::E_SPV_AMD_shader_ballot); + libCall = spv::SwizzleInvocationsMaskedAMD; + break; + case glslang::EOpWriteInvocation: + extBuiltins = getExtBuiltins(spv::E_SPV_AMD_shader_ballot); + libCall = spv::WriteInvocationAMD; + break; + + case glslang::EOpMin3: + extBuiltins = getExtBuiltins(spv::E_SPV_AMD_shader_trinary_minmax); + if (isFloat) + libCall = spv::FMin3AMD; + else { + if (isUnsigned) + libCall = spv::UMin3AMD; + else + libCall = spv::SMin3AMD; + } + break; + case glslang::EOpMax3: + extBuiltins = getExtBuiltins(spv::E_SPV_AMD_shader_trinary_minmax); + if (isFloat) + libCall = spv::FMax3AMD; + else { + if (isUnsigned) + libCall = spv::UMax3AMD; + else + libCall = spv::SMax3AMD; + } + break; + case glslang::EOpMid3: + extBuiltins = getExtBuiltins(spv::E_SPV_AMD_shader_trinary_minmax); + if (isFloat) + libCall = spv::FMid3AMD; + else { + if (isUnsigned) + libCall = spv::UMid3AMD; + else + libCall = spv::SMid3AMD; + } + break; + + case glslang::EOpInterpolateAtVertex: + if (typeProxy == glslang::EbtFloat16) + builder.addExtension(spv::E_SPV_AMD_gpu_shader_half_float); + extBuiltins = getExtBuiltins(spv::E_SPV_AMD_shader_explicit_vertex_parameter); + libCall = spv::InterpolateAtVertexAMD; + break; + + case glslang::EOpReportIntersection: + typeId = builder.makeBoolType(); + opCode = spv::OpReportIntersectionKHR; + break; + case glslang::EOpTrace: + builder.createNoResultOp(spv::OpTraceRayKHR, operands); + return 0; + case glslang::EOpExecuteCallable: + builder.createNoResultOp(spv::OpExecuteCallableKHR, operands); + return 0; + + case glslang::EOpRayQueryInitialize: + builder.createNoResultOp(spv::OpRayQueryInitializeKHR, operands); + return 0; + case glslang::EOpRayQueryTerminate: + builder.createNoResultOp(spv::OpRayQueryTerminateKHR, operands); + return 0; + case glslang::EOpRayQueryGenerateIntersection: + builder.createNoResultOp(spv::OpRayQueryGenerateIntersectionKHR, operands); + return 0; + case glslang::EOpRayQueryConfirmIntersection: + builder.createNoResultOp(spv::OpRayQueryConfirmIntersectionKHR, operands); + return 0; + case glslang::EOpRayQueryProceed: + typeId = builder.makeBoolType(); + opCode = spv::OpRayQueryProceedKHR; + break; + case glslang::EOpRayQueryGetIntersectionType: + typeId = builder.makeUintType(32); + opCode = spv::OpRayQueryGetIntersectionTypeKHR; + break; + case glslang::EOpRayQueryGetRayTMin: + typeId = builder.makeFloatType(32); + opCode = spv::OpRayQueryGetRayTMinKHR; + break; + case glslang::EOpRayQueryGetRayFlags: + typeId = builder.makeIntType(32); + opCode = spv::OpRayQueryGetRayFlagsKHR; + break; + case glslang::EOpRayQueryGetIntersectionT: + typeId = builder.makeFloatType(32); + opCode = spv::OpRayQueryGetIntersectionTKHR; + break; + case glslang::EOpRayQueryGetIntersectionInstanceCustomIndex: + typeId = builder.makeIntType(32); + opCode = spv::OpRayQueryGetIntersectionInstanceCustomIndexKHR; + break; + case glslang::EOpRayQueryGetIntersectionInstanceId: + typeId = builder.makeIntType(32); + opCode = spv::OpRayQueryGetIntersectionInstanceIdKHR; + break; + case glslang::EOpRayQueryGetIntersectionInstanceShaderBindingTableRecordOffset: + typeId = builder.makeIntType(32); + opCode = spv::OpRayQueryGetIntersectionInstanceShaderBindingTableRecordOffsetKHR; + break; + case glslang::EOpRayQueryGetIntersectionGeometryIndex: + typeId = builder.makeIntType(32); + opCode = spv::OpRayQueryGetIntersectionGeometryIndexKHR; + break; + case glslang::EOpRayQueryGetIntersectionPrimitiveIndex: + typeId = builder.makeIntType(32); + opCode = spv::OpRayQueryGetIntersectionPrimitiveIndexKHR; + break; + case glslang::EOpRayQueryGetIntersectionBarycentrics: + typeId = builder.makeVectorType(builder.makeFloatType(32), 2); + opCode = spv::OpRayQueryGetIntersectionBarycentricsKHR; + break; + case glslang::EOpRayQueryGetIntersectionFrontFace: + typeId = builder.makeBoolType(); + opCode = spv::OpRayQueryGetIntersectionFrontFaceKHR; + break; + case glslang::EOpRayQueryGetIntersectionCandidateAABBOpaque: + typeId = builder.makeBoolType(); + opCode = spv::OpRayQueryGetIntersectionCandidateAABBOpaqueKHR; + break; + case glslang::EOpRayQueryGetIntersectionObjectRayDirection: + typeId = builder.makeVectorType(builder.makeFloatType(32), 3); + opCode = spv::OpRayQueryGetIntersectionObjectRayDirectionKHR; + break; + case glslang::EOpRayQueryGetIntersectionObjectRayOrigin: + typeId = builder.makeVectorType(builder.makeFloatType(32), 3); + opCode = spv::OpRayQueryGetIntersectionObjectRayOriginKHR; + break; + case glslang::EOpRayQueryGetWorldRayDirection: + typeId = builder.makeVectorType(builder.makeFloatType(32), 3); + opCode = spv::OpRayQueryGetWorldRayDirectionKHR; + break; + case glslang::EOpRayQueryGetWorldRayOrigin: + typeId = builder.makeVectorType(builder.makeFloatType(32), 3); + opCode = spv::OpRayQueryGetWorldRayOriginKHR; + break; + case glslang::EOpRayQueryGetIntersectionObjectToWorld: + typeId = builder.makeMatrixType(builder.makeFloatType(32), 4, 3); + opCode = spv::OpRayQueryGetIntersectionObjectToWorldKHR; + break; + case glslang::EOpRayQueryGetIntersectionWorldToObject: + typeId = builder.makeMatrixType(builder.makeFloatType(32), 4, 3); + opCode = spv::OpRayQueryGetIntersectionWorldToObjectKHR; + break; + case glslang::EOpWritePackedPrimitiveIndices4x8NV: + builder.createNoResultOp(spv::OpWritePackedPrimitiveIndices4x8NV, operands); + return 0; + case glslang::EOpCooperativeMatrixMulAdd: + opCode = spv::OpCooperativeMatrixMulAddNV; + break; +#endif // GLSLANG_WEB + default: + return 0; + } + + spv::Id id = 0; + if (libCall >= 0) { + // Use an extended instruction from the standard library. + // Construct the call arguments, without modifying the original operands vector. + // We might need the remaining arguments, e.g. in the EOpFrexp case. + std::vector callArguments(operands.begin(), operands.begin() + consumedOperands); + id = builder.createBuiltinCall(typeId, extBuiltins >= 0 ? extBuiltins : stdBuiltins, libCall, callArguments); + } else if (opCode == spv::OpDot && !isFloat) { + // int dot(int, int) + // NOTE: never called for scalar/vector1, this is turned into simple mul before this can be reached + const int componentCount = builder.getNumComponents(operands[0]); + spv::Id mulOp = builder.createBinOp(spv::OpIMul, builder.getTypeId(operands[0]), operands[0], operands[1]); + builder.setPrecision(mulOp, precision); + id = builder.createCompositeExtract(mulOp, typeId, 0); + for (int i = 1; i < componentCount; ++i) { + builder.setPrecision(id, precision); + id = builder.createBinOp(spv::OpIAdd, typeId, id, builder.createCompositeExtract(mulOp, typeId, i)); + } + } else { + switch (consumedOperands) { + case 0: + // should all be handled by visitAggregate and createNoArgOperation + assert(0); + return 0; + case 1: + // should all be handled by createUnaryOperation + assert(0); + return 0; + case 2: + id = builder.createBinOp(opCode, typeId, operands[0], operands[1]); + break; + default: + // anything 3 or over doesn't have l-value operands, so all should be consumed + assert(consumedOperands == operands.size()); + id = builder.createOp(opCode, typeId, operands); + break; + } + } + +#ifndef GLSLANG_WEB + // Decode the return types that were structures + switch (op) { + case glslang::EOpAddCarry: + case glslang::EOpSubBorrow: + builder.createStore(builder.createCompositeExtract(id, typeId0, 1), operands[2]); + id = builder.createCompositeExtract(id, typeId0, 0); + break; + case glslang::EOpUMulExtended: + case glslang::EOpIMulExtended: + builder.createStore(builder.createCompositeExtract(id, typeId0, 0), operands[3]); + builder.createStore(builder.createCompositeExtract(id, typeId0, 1), operands[2]); + break; + case glslang::EOpFrexp: + { + assert(operands.size() == 2); + if (builder.isFloatType(builder.getScalarTypeId(typeId1))) { + // "exp" is floating-point type (from HLSL intrinsic) + spv::Id member1 = builder.createCompositeExtract(id, frexpIntType, 1); + member1 = builder.createUnaryOp(spv::OpConvertSToF, typeId1, member1); + builder.createStore(member1, operands[1]); + } else + // "exp" is integer type (from GLSL built-in function) + builder.createStore(builder.createCompositeExtract(id, frexpIntType, 1), operands[1]); + id = builder.createCompositeExtract(id, typeId0, 0); + } + break; + default: + break; + } +#endif + + return builder.setPrecision(id, precision); +} + +// Intrinsics with no arguments (or no return value, and no precision). +spv::Id TGlslangToSpvTraverser::createNoArgOperation(glslang::TOperator op, spv::Decoration precision, spv::Id typeId) +{ + // GLSL memory barriers use queuefamily scope in new model, device scope in old model + spv::Scope memoryBarrierScope = glslangIntermediate->usingVulkanMemoryModel() ? + spv::ScopeQueueFamilyKHR : spv::ScopeDevice; + + switch (op) { + case glslang::EOpBarrier: + if (glslangIntermediate->getStage() == EShLangTessControl) { + if (glslangIntermediate->usingVulkanMemoryModel()) { + builder.createControlBarrier(spv::ScopeWorkgroup, spv::ScopeWorkgroup, + spv::MemorySemanticsOutputMemoryKHRMask | + spv::MemorySemanticsAcquireReleaseMask); + builder.addCapability(spv::CapabilityVulkanMemoryModelKHR); + } else { + builder.createControlBarrier(spv::ScopeWorkgroup, spv::ScopeInvocation, spv::MemorySemanticsMaskNone); + } + } else { + builder.createControlBarrier(spv::ScopeWorkgroup, spv::ScopeWorkgroup, + spv::MemorySemanticsWorkgroupMemoryMask | + spv::MemorySemanticsAcquireReleaseMask); + } + return 0; + case glslang::EOpMemoryBarrier: + builder.createMemoryBarrier(memoryBarrierScope, spv::MemorySemanticsAllMemory | + spv::MemorySemanticsAcquireReleaseMask); + return 0; + case glslang::EOpMemoryBarrierBuffer: + builder.createMemoryBarrier(memoryBarrierScope, spv::MemorySemanticsUniformMemoryMask | + spv::MemorySemanticsAcquireReleaseMask); + return 0; + case glslang::EOpMemoryBarrierShared: + builder.createMemoryBarrier(memoryBarrierScope, spv::MemorySemanticsWorkgroupMemoryMask | + spv::MemorySemanticsAcquireReleaseMask); + return 0; + case glslang::EOpGroupMemoryBarrier: + builder.createMemoryBarrier(spv::ScopeWorkgroup, spv::MemorySemanticsAllMemory | + spv::MemorySemanticsAcquireReleaseMask); + return 0; +#ifndef GLSLANG_WEB + case glslang::EOpMemoryBarrierAtomicCounter: + builder.createMemoryBarrier(memoryBarrierScope, spv::MemorySemanticsAtomicCounterMemoryMask | + spv::MemorySemanticsAcquireReleaseMask); + return 0; + case glslang::EOpMemoryBarrierImage: + builder.createMemoryBarrier(memoryBarrierScope, spv::MemorySemanticsImageMemoryMask | + spv::MemorySemanticsAcquireReleaseMask); + return 0; + case glslang::EOpAllMemoryBarrierWithGroupSync: + builder.createControlBarrier(spv::ScopeWorkgroup, spv::ScopeDevice, + spv::MemorySemanticsAllMemory | + spv::MemorySemanticsAcquireReleaseMask); + return 0; + case glslang::EOpDeviceMemoryBarrier: + builder.createMemoryBarrier(spv::ScopeDevice, spv::MemorySemanticsUniformMemoryMask | + spv::MemorySemanticsImageMemoryMask | + spv::MemorySemanticsAcquireReleaseMask); + return 0; + case glslang::EOpDeviceMemoryBarrierWithGroupSync: + builder.createControlBarrier(spv::ScopeWorkgroup, spv::ScopeDevice, spv::MemorySemanticsUniformMemoryMask | + spv::MemorySemanticsImageMemoryMask | + spv::MemorySemanticsAcquireReleaseMask); + return 0; + case glslang::EOpWorkgroupMemoryBarrier: + builder.createMemoryBarrier(spv::ScopeWorkgroup, spv::MemorySemanticsWorkgroupMemoryMask | + spv::MemorySemanticsAcquireReleaseMask); + return 0; + case glslang::EOpWorkgroupMemoryBarrierWithGroupSync: + builder.createControlBarrier(spv::ScopeWorkgroup, spv::ScopeWorkgroup, + spv::MemorySemanticsWorkgroupMemoryMask | + spv::MemorySemanticsAcquireReleaseMask); + return 0; + case glslang::EOpSubgroupBarrier: + builder.createControlBarrier(spv::ScopeSubgroup, spv::ScopeSubgroup, spv::MemorySemanticsAllMemory | + spv::MemorySemanticsAcquireReleaseMask); + return spv::NoResult; + case glslang::EOpSubgroupMemoryBarrier: + builder.createMemoryBarrier(spv::ScopeSubgroup, spv::MemorySemanticsAllMemory | + spv::MemorySemanticsAcquireReleaseMask); + return spv::NoResult; + case glslang::EOpSubgroupMemoryBarrierBuffer: + builder.createMemoryBarrier(spv::ScopeSubgroup, spv::MemorySemanticsUniformMemoryMask | + spv::MemorySemanticsAcquireReleaseMask); + return spv::NoResult; + case glslang::EOpSubgroupMemoryBarrierImage: + builder.createMemoryBarrier(spv::ScopeSubgroup, spv::MemorySemanticsImageMemoryMask | + spv::MemorySemanticsAcquireReleaseMask); + return spv::NoResult; + case glslang::EOpSubgroupMemoryBarrierShared: + builder.createMemoryBarrier(spv::ScopeSubgroup, spv::MemorySemanticsWorkgroupMemoryMask | + spv::MemorySemanticsAcquireReleaseMask); + return spv::NoResult; + + case glslang::EOpEmitVertex: + builder.createNoResultOp(spv::OpEmitVertex); + return 0; + case glslang::EOpEndPrimitive: + builder.createNoResultOp(spv::OpEndPrimitive); + return 0; + + case glslang::EOpSubgroupElect: { + std::vector operands; + return createSubgroupOperation(op, typeId, operands, glslang::EbtVoid); + } + case glslang::EOpTime: + { + std::vector args; // Dummy arguments + spv::Id id = builder.createBuiltinCall(typeId, getExtBuiltins(spv::E_SPV_AMD_gcn_shader), spv::TimeAMD, args); + return builder.setPrecision(id, precision); + } + case glslang::EOpIgnoreIntersection: + builder.createNoResultOp(spv::OpIgnoreIntersectionKHR); + return 0; + case glslang::EOpTerminateRay: + builder.createNoResultOp(spv::OpTerminateRayKHR); + return 0; + case glslang::EOpRayQueryInitialize: + builder.createNoResultOp(spv::OpRayQueryInitializeKHR); + return 0; + case glslang::EOpRayQueryTerminate: + builder.createNoResultOp(spv::OpRayQueryTerminateKHR); + return 0; + case glslang::EOpRayQueryGenerateIntersection: + builder.createNoResultOp(spv::OpRayQueryGenerateIntersectionKHR); + return 0; + case glslang::EOpRayQueryConfirmIntersection: + builder.createNoResultOp(spv::OpRayQueryConfirmIntersectionKHR); + return 0; + case glslang::EOpBeginInvocationInterlock: + builder.createNoResultOp(spv::OpBeginInvocationInterlockEXT); + return 0; + case glslang::EOpEndInvocationInterlock: + builder.createNoResultOp(spv::OpEndInvocationInterlockEXT); + return 0; + + case glslang::EOpIsHelperInvocation: + { + std::vector args; // Dummy arguments + builder.addExtension(spv::E_SPV_EXT_demote_to_helper_invocation); + builder.addCapability(spv::CapabilityDemoteToHelperInvocationEXT); + return builder.createOp(spv::OpIsHelperInvocationEXT, typeId, args); + } + + case glslang::EOpReadClockSubgroupKHR: { + std::vector args; + args.push_back(builder.makeUintConstant(spv::ScopeSubgroup)); + builder.addExtension(spv::E_SPV_KHR_shader_clock); + builder.addCapability(spv::CapabilityShaderClockKHR); + return builder.createOp(spv::OpReadClockKHR, typeId, args); + } + + case glslang::EOpReadClockDeviceKHR: { + std::vector args; + args.push_back(builder.makeUintConstant(spv::ScopeDevice)); + builder.addExtension(spv::E_SPV_KHR_shader_clock); + builder.addCapability(spv::CapabilityShaderClockKHR); + return builder.createOp(spv::OpReadClockKHR, typeId, args); + } +#endif + default: + break; + } + + logger->missingFunctionality("unknown operation with no arguments"); + + return 0; +} + +spv::Id TGlslangToSpvTraverser::getSymbolId(const glslang::TIntermSymbol* symbol) +{ + auto iter = symbolValues.find(symbol->getId()); + spv::Id id; + if (symbolValues.end() != iter) { + id = iter->second; + return id; + } + + // it was not found, create it + spv::BuiltIn builtIn = TranslateBuiltInDecoration(symbol->getQualifier().builtIn, false); + auto forcedType = getForcedType(symbol->getQualifier().builtIn, symbol->getType()); + id = createSpvVariable(symbol, forcedType.first); + symbolValues[symbol->getId()] = id; + if (forcedType.second != spv::NoType) + forceType[id] = forcedType.second; + + if (symbol->getBasicType() != glslang::EbtBlock) { + builder.addDecoration(id, TranslatePrecisionDecoration(symbol->getType())); + builder.addDecoration(id, TranslateInterpolationDecoration(symbol->getType().getQualifier())); + builder.addDecoration(id, TranslateAuxiliaryStorageDecoration(symbol->getType().getQualifier())); +#ifndef GLSLANG_WEB + addMeshNVDecoration(id, /*member*/ -1, symbol->getType().getQualifier()); + if (symbol->getQualifier().hasComponent()) + builder.addDecoration(id, spv::DecorationComponent, symbol->getQualifier().layoutComponent); + if (symbol->getQualifier().hasIndex()) + builder.addDecoration(id, spv::DecorationIndex, symbol->getQualifier().layoutIndex); +#endif + if (symbol->getType().getQualifier().hasSpecConstantId()) + builder.addDecoration(id, spv::DecorationSpecId, symbol->getType().getQualifier().layoutSpecConstantId); + // atomic counters use this: + if (symbol->getQualifier().hasOffset()) + builder.addDecoration(id, spv::DecorationOffset, symbol->getQualifier().layoutOffset); + } + + if (symbol->getQualifier().hasLocation()) + builder.addDecoration(id, spv::DecorationLocation, symbol->getQualifier().layoutLocation); + builder.addDecoration(id, TranslateInvariantDecoration(symbol->getType().getQualifier())); + if (symbol->getQualifier().hasStream() && glslangIntermediate->isMultiStream()) { + builder.addCapability(spv::CapabilityGeometryStreams); + builder.addDecoration(id, spv::DecorationStream, symbol->getQualifier().layoutStream); + } + if (symbol->getQualifier().hasSet()) + builder.addDecoration(id, spv::DecorationDescriptorSet, symbol->getQualifier().layoutSet); + else if (IsDescriptorResource(symbol->getType())) { + // default to 0 + builder.addDecoration(id, spv::DecorationDescriptorSet, 0); + } + if (symbol->getQualifier().hasBinding()) + builder.addDecoration(id, spv::DecorationBinding, symbol->getQualifier().layoutBinding); + else if (IsDescriptorResource(symbol->getType())) { + // default to 0 + builder.addDecoration(id, spv::DecorationBinding, 0); + } + if (symbol->getQualifier().hasAttachment()) + builder.addDecoration(id, spv::DecorationInputAttachmentIndex, symbol->getQualifier().layoutAttachment); + if (glslangIntermediate->getXfbMode()) { + builder.addCapability(spv::CapabilityTransformFeedback); + if (symbol->getQualifier().hasXfbBuffer()) { + builder.addDecoration(id, spv::DecorationXfbBuffer, symbol->getQualifier().layoutXfbBuffer); + unsigned stride = glslangIntermediate->getXfbStride(symbol->getQualifier().layoutXfbBuffer); + if (stride != glslang::TQualifier::layoutXfbStrideEnd) + builder.addDecoration(id, spv::DecorationXfbStride, stride); + } + if (symbol->getQualifier().hasXfbOffset()) + builder.addDecoration(id, spv::DecorationOffset, symbol->getQualifier().layoutXfbOffset); + } + + // add built-in variable decoration + if (builtIn != spv::BuiltInMax) { + builder.addDecoration(id, spv::DecorationBuiltIn, (int)builtIn); + } + +#ifndef GLSLANG_WEB + if (symbol->getType().isImage()) { + std::vector memory; + TranslateMemoryDecoration(symbol->getType().getQualifier(), memory, + glslangIntermediate->usingVulkanMemoryModel()); + for (unsigned int i = 0; i < memory.size(); ++i) + builder.addDecoration(id, memory[i]); + } + + // nonuniform + builder.addDecoration(id, TranslateNonUniformDecoration(symbol->getType().getQualifier())); + + if (builtIn == spv::BuiltInSampleMask) { + spv::Decoration decoration; + // GL_NV_sample_mask_override_coverage extension + if (glslangIntermediate->getLayoutOverrideCoverage()) + decoration = (spv::Decoration)spv::DecorationOverrideCoverageNV; + else + decoration = (spv::Decoration)spv::DecorationMax; + builder.addDecoration(id, decoration); + if (decoration != spv::DecorationMax) { + builder.addCapability(spv::CapabilitySampleMaskOverrideCoverageNV); + builder.addExtension(spv::E_SPV_NV_sample_mask_override_coverage); + } + } + else if (builtIn == spv::BuiltInLayer) { + // SPV_NV_viewport_array2 extension + if (symbol->getQualifier().layoutViewportRelative) { + builder.addDecoration(id, (spv::Decoration)spv::DecorationViewportRelativeNV); + builder.addCapability(spv::CapabilityShaderViewportMaskNV); + builder.addExtension(spv::E_SPV_NV_viewport_array2); + } + if (symbol->getQualifier().layoutSecondaryViewportRelativeOffset != -2048) { + builder.addDecoration(id, (spv::Decoration)spv::DecorationSecondaryViewportRelativeNV, + symbol->getQualifier().layoutSecondaryViewportRelativeOffset); + builder.addCapability(spv::CapabilityShaderStereoViewNV); + builder.addExtension(spv::E_SPV_NV_stereo_view_rendering); + } + } + + if (symbol->getQualifier().layoutPassthrough) { + builder.addDecoration(id, spv::DecorationPassthroughNV); + builder.addCapability(spv::CapabilityGeometryShaderPassthroughNV); + builder.addExtension(spv::E_SPV_NV_geometry_shader_passthrough); + } + if (symbol->getQualifier().pervertexNV) { + builder.addDecoration(id, spv::DecorationPerVertexNV); + builder.addCapability(spv::CapabilityFragmentBarycentricNV); + builder.addExtension(spv::E_SPV_NV_fragment_shader_barycentric); + } + + if (glslangIntermediate->getHlslFunctionality1() && symbol->getType().getQualifier().semanticName != nullptr) { + builder.addExtension("SPV_GOOGLE_hlsl_functionality1"); + builder.addDecoration(id, (spv::Decoration)spv::DecorationHlslSemanticGOOGLE, + symbol->getType().getQualifier().semanticName); + } + + if (symbol->isReference()) { + builder.addDecoration(id, symbol->getType().getQualifier().restrict ? + spv::DecorationRestrictPointerEXT : spv::DecorationAliasedPointerEXT); + } +#endif + + return id; +} + +#ifndef GLSLANG_WEB +// add per-primitive, per-view. per-task decorations to a struct member (member >= 0) or an object +void TGlslangToSpvTraverser::addMeshNVDecoration(spv::Id id, int member, const glslang::TQualifier& qualifier) +{ + if (member >= 0) { + if (qualifier.perPrimitiveNV) { + // Need to add capability/extension for fragment shader. + // Mesh shader already adds this by default. + if (glslangIntermediate->getStage() == EShLangFragment) { + builder.addCapability(spv::CapabilityMeshShadingNV); + builder.addExtension(spv::E_SPV_NV_mesh_shader); + } + builder.addMemberDecoration(id, (unsigned)member, spv::DecorationPerPrimitiveNV); + } + if (qualifier.perViewNV) + builder.addMemberDecoration(id, (unsigned)member, spv::DecorationPerViewNV); + if (qualifier.perTaskNV) + builder.addMemberDecoration(id, (unsigned)member, spv::DecorationPerTaskNV); + } else { + if (qualifier.perPrimitiveNV) { + // Need to add capability/extension for fragment shader. + // Mesh shader already adds this by default. + if (glslangIntermediate->getStage() == EShLangFragment) { + builder.addCapability(spv::CapabilityMeshShadingNV); + builder.addExtension(spv::E_SPV_NV_mesh_shader); + } + builder.addDecoration(id, spv::DecorationPerPrimitiveNV); + } + if (qualifier.perViewNV) + builder.addDecoration(id, spv::DecorationPerViewNV); + if (qualifier.perTaskNV) + builder.addDecoration(id, spv::DecorationPerTaskNV); + } +} +#endif + +// Make a full tree of instructions to build a SPIR-V specialization constant, +// or regular constant if possible. +// +// TBD: this is not yet done, nor verified to be the best design, it does do the leaf symbols though +// +// Recursively walk the nodes. The nodes form a tree whose leaves are +// regular constants, which themselves are trees that createSpvConstant() +// recursively walks. So, this function walks the "top" of the tree: +// - emit specialization constant-building instructions for specConstant +// - when running into a non-spec-constant, switch to createSpvConstant() +spv::Id TGlslangToSpvTraverser::createSpvConstant(const glslang::TIntermTyped& node) +{ + assert(node.getQualifier().isConstant()); + + // Handle front-end constants first (non-specialization constants). + if (! node.getQualifier().specConstant) { + // hand off to the non-spec-constant path + assert(node.getAsConstantUnion() != nullptr || node.getAsSymbolNode() != nullptr); + int nextConst = 0; + return createSpvConstantFromConstUnionArray(node.getType(), node.getAsConstantUnion() ? + node.getAsConstantUnion()->getConstArray() : node.getAsSymbolNode()->getConstArray(), + nextConst, false); + } + + // We now know we have a specialization constant to build + + // gl_WorkGroupSize is a special case until the front-end handles hierarchical specialization constants, + // even then, it's specialization ids are handled by special case syntax in GLSL: layout(local_size_x = ... + if (node.getType().getQualifier().builtIn == glslang::EbvWorkGroupSize) { + std::vector dimConstId; + for (int dim = 0; dim < 3; ++dim) { + bool specConst = (glslangIntermediate->getLocalSizeSpecId(dim) != glslang::TQualifier::layoutNotSet); + dimConstId.push_back(builder.makeUintConstant(glslangIntermediate->getLocalSize(dim), specConst)); + if (specConst) { + builder.addDecoration(dimConstId.back(), spv::DecorationSpecId, + glslangIntermediate->getLocalSizeSpecId(dim)); + } + } + return builder.makeCompositeConstant(builder.makeVectorType(builder.makeUintType(32), 3), dimConstId, true); + } + + // An AST node labelled as specialization constant should be a symbol node. + // Its initializer should either be a sub tree with constant nodes, or a constant union array. + if (auto* sn = node.getAsSymbolNode()) { + spv::Id result; + if (auto* sub_tree = sn->getConstSubtree()) { + // Traverse the constant constructor sub tree like generating normal run-time instructions. + // During the AST traversal, if the node is marked as 'specConstant', SpecConstantOpModeGuard + // will set the builder into spec constant op instruction generating mode. + sub_tree->traverse(this); + result = accessChainLoad(sub_tree->getType()); + } else if (auto* const_union_array = &sn->getConstArray()) { + int nextConst = 0; + result = createSpvConstantFromConstUnionArray(sn->getType(), *const_union_array, nextConst, true); + } else { + logger->missingFunctionality("Invalid initializer for spec onstant."); + return spv::NoResult; + } + builder.addName(result, sn->getName().c_str()); + return result; + } + + // Neither a front-end constant node, nor a specialization constant node with constant union array or + // constant sub tree as initializer. + logger->missingFunctionality("Neither a front-end constant nor a spec constant."); + return spv::NoResult; +} + +// Use 'consts' as the flattened glslang source of scalar constants to recursively +// build the aggregate SPIR-V constant. +// +// If there are not enough elements present in 'consts', 0 will be substituted; +// an empty 'consts' can be used to create a fully zeroed SPIR-V constant. +// +spv::Id TGlslangToSpvTraverser::createSpvConstantFromConstUnionArray(const glslang::TType& glslangType, + const glslang::TConstUnionArray& consts, int& nextConst, bool specConstant) +{ + // vector of constants for SPIR-V + std::vector spvConsts; + + // Type is used for struct and array constants + spv::Id typeId = convertGlslangToSpvType(glslangType); + + if (glslangType.isArray()) { + glslang::TType elementType(glslangType, 0); + for (int i = 0; i < glslangType.getOuterArraySize(); ++i) + spvConsts.push_back(createSpvConstantFromConstUnionArray(elementType, consts, nextConst, false)); + } else if (glslangType.isMatrix()) { + glslang::TType vectorType(glslangType, 0); + for (int col = 0; col < glslangType.getMatrixCols(); ++col) + spvConsts.push_back(createSpvConstantFromConstUnionArray(vectorType, consts, nextConst, false)); + } else if (glslangType.isCoopMat()) { + glslang::TType componentType(glslangType.getBasicType()); + spvConsts.push_back(createSpvConstantFromConstUnionArray(componentType, consts, nextConst, false)); + } else if (glslangType.isStruct()) { + glslang::TVector::const_iterator iter; + for (iter = glslangType.getStruct()->begin(); iter != glslangType.getStruct()->end(); ++iter) + spvConsts.push_back(createSpvConstantFromConstUnionArray(*iter->type, consts, nextConst, false)); + } else if (glslangType.getVectorSize() > 1) { + for (unsigned int i = 0; i < (unsigned int)glslangType.getVectorSize(); ++i) { + bool zero = nextConst >= consts.size(); + switch (glslangType.getBasicType()) { + case glslang::EbtInt: + spvConsts.push_back(builder.makeIntConstant(zero ? 0 : consts[nextConst].getIConst())); + break; + case glslang::EbtUint: + spvConsts.push_back(builder.makeUintConstant(zero ? 0 : consts[nextConst].getUConst())); + break; + case glslang::EbtFloat: + spvConsts.push_back(builder.makeFloatConstant(zero ? 0.0F : (float)consts[nextConst].getDConst())); + break; + case glslang::EbtBool: + spvConsts.push_back(builder.makeBoolConstant(zero ? false : consts[nextConst].getBConst())); + break; +#ifndef GLSLANG_WEB + case glslang::EbtInt8: + spvConsts.push_back(builder.makeInt8Constant(zero ? 0 : consts[nextConst].getI8Const())); + break; + case glslang::EbtUint8: + spvConsts.push_back(builder.makeUint8Constant(zero ? 0 : consts[nextConst].getU8Const())); + break; + case glslang::EbtInt16: + spvConsts.push_back(builder.makeInt16Constant(zero ? 0 : consts[nextConst].getI16Const())); + break; + case glslang::EbtUint16: + spvConsts.push_back(builder.makeUint16Constant(zero ? 0 : consts[nextConst].getU16Const())); + break; + case glslang::EbtInt64: + spvConsts.push_back(builder.makeInt64Constant(zero ? 0 : consts[nextConst].getI64Const())); + break; + case glslang::EbtUint64: + spvConsts.push_back(builder.makeUint64Constant(zero ? 0 : consts[nextConst].getU64Const())); + break; + case glslang::EbtDouble: + spvConsts.push_back(builder.makeDoubleConstant(zero ? 0.0 : consts[nextConst].getDConst())); + break; + case glslang::EbtFloat16: + spvConsts.push_back(builder.makeFloat16Constant(zero ? 0.0F : (float)consts[nextConst].getDConst())); + break; +#endif + default: + assert(0); + break; + } + ++nextConst; + } + } else { + // we have a non-aggregate (scalar) constant + bool zero = nextConst >= consts.size(); + spv::Id scalar = 0; + switch (glslangType.getBasicType()) { + case glslang::EbtInt: + scalar = builder.makeIntConstant(zero ? 0 : consts[nextConst].getIConst(), specConstant); + break; + case glslang::EbtUint: + scalar = builder.makeUintConstant(zero ? 0 : consts[nextConst].getUConst(), specConstant); + break; + case glslang::EbtFloat: + scalar = builder.makeFloatConstant(zero ? 0.0F : (float)consts[nextConst].getDConst(), specConstant); + break; + case glslang::EbtBool: + scalar = builder.makeBoolConstant(zero ? false : consts[nextConst].getBConst(), specConstant); + break; +#ifndef GLSLANG_WEB + case glslang::EbtInt8: + scalar = builder.makeInt8Constant(zero ? 0 : consts[nextConst].getI8Const(), specConstant); + break; + case glslang::EbtUint8: + scalar = builder.makeUint8Constant(zero ? 0 : consts[nextConst].getU8Const(), specConstant); + break; + case glslang::EbtInt16: + scalar = builder.makeInt16Constant(zero ? 0 : consts[nextConst].getI16Const(), specConstant); + break; + case glslang::EbtUint16: + scalar = builder.makeUint16Constant(zero ? 0 : consts[nextConst].getU16Const(), specConstant); + break; + case glslang::EbtInt64: + scalar = builder.makeInt64Constant(zero ? 0 : consts[nextConst].getI64Const(), specConstant); + break; + case glslang::EbtUint64: + scalar = builder.makeUint64Constant(zero ? 0 : consts[nextConst].getU64Const(), specConstant); + break; + case glslang::EbtDouble: + scalar = builder.makeDoubleConstant(zero ? 0.0 : consts[nextConst].getDConst(), specConstant); + break; + case glslang::EbtFloat16: + scalar = builder.makeFloat16Constant(zero ? 0.0F : (float)consts[nextConst].getDConst(), specConstant); + break; + case glslang::EbtReference: + scalar = builder.makeUint64Constant(zero ? 0 : consts[nextConst].getU64Const(), specConstant); + scalar = builder.createUnaryOp(spv::OpBitcast, typeId, scalar); + break; +#endif + case glslang::EbtString: + scalar = builder.getStringId(consts[nextConst].getSConst()->c_str()); + break; + default: + assert(0); + break; + } + ++nextConst; + return scalar; + } + + return builder.makeCompositeConstant(typeId, spvConsts); +} + +// Return true if the node is a constant or symbol whose reading has no +// non-trivial observable cost or effect. +bool TGlslangToSpvTraverser::isTrivialLeaf(const glslang::TIntermTyped* node) +{ + // don't know what this is + if (node == nullptr) + return false; + + // a constant is safe + if (node->getAsConstantUnion() != nullptr) + return true; + + // not a symbol means non-trivial + if (node->getAsSymbolNode() == nullptr) + return false; + + // a symbol, depends on what's being read + switch (node->getType().getQualifier().storage) { + case glslang::EvqTemporary: + case glslang::EvqGlobal: + case glslang::EvqIn: + case glslang::EvqInOut: + case glslang::EvqConst: + case glslang::EvqConstReadOnly: + case glslang::EvqUniform: + return true; + default: + return false; + } +} + +// A node is trivial if it is a single operation with no side effects. +// HLSL (and/or vectors) are always trivial, as it does not short circuit. +// Otherwise, error on the side of saying non-trivial. +// Return true if trivial. +bool TGlslangToSpvTraverser::isTrivial(const glslang::TIntermTyped* node) +{ + if (node == nullptr) + return false; + + // count non scalars as trivial, as well as anything coming from HLSL + if (! node->getType().isScalarOrVec1() || glslangIntermediate->getSource() == glslang::EShSourceHlsl) + return true; + + // symbols and constants are trivial + if (isTrivialLeaf(node)) + return true; + + // otherwise, it needs to be a simple operation or one or two leaf nodes + + // not a simple operation + const glslang::TIntermBinary* binaryNode = node->getAsBinaryNode(); + const glslang::TIntermUnary* unaryNode = node->getAsUnaryNode(); + if (binaryNode == nullptr && unaryNode == nullptr) + return false; + + // not on leaf nodes + if (binaryNode && (! isTrivialLeaf(binaryNode->getLeft()) || ! isTrivialLeaf(binaryNode->getRight()))) + return false; + + if (unaryNode && ! isTrivialLeaf(unaryNode->getOperand())) { + return false; + } + + switch (node->getAsOperator()->getOp()) { + case glslang::EOpLogicalNot: + case glslang::EOpConvIntToBool: + case glslang::EOpConvUintToBool: + case glslang::EOpConvFloatToBool: + case glslang::EOpConvDoubleToBool: + case glslang::EOpEqual: + case glslang::EOpNotEqual: + case glslang::EOpLessThan: + case glslang::EOpGreaterThan: + case glslang::EOpLessThanEqual: + case glslang::EOpGreaterThanEqual: + case glslang::EOpIndexDirect: + case glslang::EOpIndexDirectStruct: + case glslang::EOpLogicalXor: + case glslang::EOpAny: + case glslang::EOpAll: + return true; + default: + return false; + } +} + +// Emit short-circuiting code, where 'right' is never evaluated unless +// the left side is true (for &&) or false (for ||). +spv::Id TGlslangToSpvTraverser::createShortCircuit(glslang::TOperator op, glslang::TIntermTyped& left, + glslang::TIntermTyped& right) +{ + spv::Id boolTypeId = builder.makeBoolType(); + + // emit left operand + builder.clearAccessChain(); + left.traverse(this); + spv::Id leftId = accessChainLoad(left.getType()); + + // Operands to accumulate OpPhi operands + std::vector phiOperands; + // accumulate left operand's phi information + phiOperands.push_back(leftId); + phiOperands.push_back(builder.getBuildPoint()->getId()); + + // Make the two kinds of operation symmetric with a "!" + // || => emit "if (! left) result = right" + // && => emit "if ( left) result = right" + // + // TODO: this runtime "not" for || could be avoided by adding functionality + // to 'builder' to have an "else" without an "then" + if (op == glslang::EOpLogicalOr) + leftId = builder.createUnaryOp(spv::OpLogicalNot, boolTypeId, leftId); + + // make an "if" based on the left value + spv::Builder::If ifBuilder(leftId, spv::SelectionControlMaskNone, builder); + + // emit right operand as the "then" part of the "if" + builder.clearAccessChain(); + right.traverse(this); + spv::Id rightId = accessChainLoad(right.getType()); + + // accumulate left operand's phi information + phiOperands.push_back(rightId); + phiOperands.push_back(builder.getBuildPoint()->getId()); + + // finish the "if" + ifBuilder.makeEndIf(); + + // phi together the two results + return builder.createOp(spv::OpPhi, boolTypeId, phiOperands); +} + +#ifndef GLSLANG_WEB +// Return type Id of the imported set of extended instructions corresponds to the name. +// Import this set if it has not been imported yet. +spv::Id TGlslangToSpvTraverser::getExtBuiltins(const char* name) +{ + if (extBuiltinMap.find(name) != extBuiltinMap.end()) + return extBuiltinMap[name]; + else { + builder.addExtension(name); + spv::Id extBuiltins = builder.import(name); + extBuiltinMap[name] = extBuiltins; + return extBuiltins; + } +} +#endif + +}; // end anonymous namespace + +namespace glslang { + +void GetSpirvVersion(std::string& version) +{ + const int bufSize = 100; + char buf[bufSize]; + snprintf(buf, bufSize, "0x%08x, Revision %d", spv::Version, spv::Revision); + version = buf; +} + +// For low-order part of the generator's magic number. Bump up +// when there is a change in the style (e.g., if SSA form changes, +// or a different instruction sequence to do something gets used). +int GetSpirvGeneratorVersion() +{ + // return 1; // start + // return 2; // EOpAtomicCounterDecrement gets a post decrement, to map between GLSL -> SPIR-V + // return 3; // change/correct barrier-instruction operands, to match memory model group decisions + // return 4; // some deeper access chains: for dynamic vector component, and local Boolean component + // return 5; // make OpArrayLength result type be an int with signedness of 0 + // return 6; // revert version 5 change, which makes a different (new) kind of incorrect code, + // versions 4 and 6 each generate OpArrayLength as it has long been done + // return 7; // GLSL volatile keyword maps to both SPIR-V decorations Volatile and Coherent + return 8; // switch to new dead block eliminator; use OpUnreachable +} + +// Write SPIR-V out to a binary file +void OutputSpvBin(const std::vector& spirv, const char* baseName) +{ + std::ofstream out; + out.open(baseName, std::ios::binary | std::ios::out); + if (out.fail()) + printf("ERROR: Failed to open file: %s\n", baseName); + for (int i = 0; i < (int)spirv.size(); ++i) { + unsigned int word = spirv[i]; + out.write((const char*)&word, 4); + } + out.close(); +} + +// Write SPIR-V out to a text file with 32-bit hexadecimal words +void OutputSpvHex(const std::vector& spirv, const char* baseName, const char* varName) +{ +#ifndef GLSLANG_WEB + std::ofstream out; + out.open(baseName, std::ios::binary | std::ios::out); + if (out.fail()) + printf("ERROR: Failed to open file: %s\n", baseName); + out << "\t// " << + GetSpirvGeneratorVersion() << "." << GLSLANG_MINOR_VERSION << "." << GLSLANG_PATCH_LEVEL << + std::endl; + if (varName != nullptr) { + out << "\t #pragma once" << std::endl; + out << "const uint32_t " << varName << "[] = {" << std::endl; + } + const int WORDS_PER_LINE = 8; + for (int i = 0; i < (int)spirv.size(); i += WORDS_PER_LINE) { + out << "\t"; + for (int j = 0; j < WORDS_PER_LINE && i + j < (int)spirv.size(); ++j) { + const unsigned int word = spirv[i + j]; + out << "0x" << std::hex << std::setw(8) << std::setfill('0') << word; + if (i + j + 1 < (int)spirv.size()) { + out << ","; + } + } + out << std::endl; + } + if (varName != nullptr) { + out << "};"; + } + out.close(); +#endif +} + +// +// Set up the glslang traversal +// +void GlslangToSpv(const TIntermediate& intermediate, std::vector& spirv, SpvOptions* options) +{ + spv::SpvBuildLogger logger; + GlslangToSpv(intermediate, spirv, &logger, options); +} + +void GlslangToSpv(const TIntermediate& intermediate, std::vector& spirv, + spv::SpvBuildLogger* logger, SpvOptions* options) +{ + TIntermNode* root = intermediate.getTreeRoot(); + + if (root == 0) + return; + + SpvOptions defaultOptions; + if (options == nullptr) + options = &defaultOptions; + + GetThreadPoolAllocator().push(); + + TGlslangToSpvTraverser it(intermediate.getSpv().spv, &intermediate, logger, *options); + root->traverse(&it); + it.finishSpv(); + it.dumpSpv(spirv); + +#if ENABLE_OPT + // If from HLSL, run spirv-opt to "legalize" the SPIR-V for Vulkan + // eg. forward and remove memory writes of opaque types. + bool prelegalization = intermediate.getSource() == EShSourceHlsl; + if ((intermediate.getSource() == EShSourceHlsl || options->optimizeSize) && !options->disableOptimizer) { + SpirvToolsLegalize(intermediate, spirv, logger, options); + prelegalization = false; + } + + if (options->validate) + SpirvToolsValidate(intermediate, spirv, logger, prelegalization); + + if (options->disassemble) + SpirvToolsDisassemble(std::cout, spirv); + +#endif + + GetThreadPoolAllocator().pop(); +} + +}; // end namespace glslang diff --git a/dep/glslang/SPIRV/GlslangToSpv.h b/dep/glslang/SPIRV/GlslangToSpv.h new file mode 100644 index 000000000..3907be43b --- /dev/null +++ b/dep/glslang/SPIRV/GlslangToSpv.h @@ -0,0 +1,61 @@ +// +// Copyright (C) 2014 LunarG, Inc. +// Copyright (C) 2015-2018 Google, Inc. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// +// Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// +// Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// Neither the name of 3Dlabs Inc. Ltd. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. + +#pragma once + +#if defined(_MSC_VER) && _MSC_VER >= 1900 + #pragma warning(disable : 4464) // relative include path contains '..' +#endif + +#include "SpvTools.h" +#include "glslang/Include/intermediate.h" + +#include +#include + +#include "Logger.h" + +namespace glslang { + +void GetSpirvVersion(std::string&); +int GetSpirvGeneratorVersion(); +void GlslangToSpv(const glslang::TIntermediate& intermediate, std::vector& spirv, + SpvOptions* options = nullptr); +void GlslangToSpv(const glslang::TIntermediate& intermediate, std::vector& spirv, + spv::SpvBuildLogger* logger, SpvOptions* options = nullptr); +void OutputSpvBin(const std::vector& spirv, const char* baseName); +void OutputSpvHex(const std::vector& spirv, const char* baseName, const char* varName); + +} diff --git a/dep/glslang/SPIRV/InReadableOrder.cpp b/dep/glslang/SPIRV/InReadableOrder.cpp new file mode 100644 index 000000000..9d9410be9 --- /dev/null +++ b/dep/glslang/SPIRV/InReadableOrder.cpp @@ -0,0 +1,131 @@ +// +// Copyright (C) 2016 Google, Inc. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// +// Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// +// Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// Neither the name of 3Dlabs Inc. Ltd. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. + +// The SPIR-V spec requires code blocks to appear in an order satisfying the +// dominator-tree direction (ie, dominator before the dominated). This is, +// actually, easy to achieve: any pre-order CFG traversal algorithm will do it. +// Because such algorithms visit a block only after traversing some path to it +// from the root, they necessarily visit the block's idom first. +// +// But not every graph-traversal algorithm outputs blocks in an order that +// appears logical to human readers. The problem is that unrelated branches may +// be interspersed with each other, and merge blocks may come before some of the +// branches being merged. +// +// A good, human-readable order of blocks may be achieved by performing +// depth-first search but delaying merge nodes until after all their branches +// have been visited. This is implemented below by the inReadableOrder() +// function. + +#include "spvIR.h" + +#include +#include + +using spv::Block; +using spv::Id; + +namespace { +// Traverses CFG in a readable order, invoking a pre-set callback on each block. +// Use by calling visit() on the root block. +class ReadableOrderTraverser { +public: + ReadableOrderTraverser(std::function callback) + : callback_(callback) {} + // Visits the block if it hasn't been visited already and isn't currently + // being delayed. Invokes callback(block, why, header), then descends into its + // successors. Delays merge-block and continue-block processing until all + // the branches have been completed. If |block| is an unreachable merge block or + // an unreachable continue target, then |header| is the corresponding header block. + void visit(Block* block, spv::ReachReason why, Block* header) + { + assert(block); + if (why == spv::ReachViaControlFlow) { + reachableViaControlFlow_.insert(block); + } + if (visited_.count(block) || delayed_.count(block)) + return; + callback_(block, why, header); + visited_.insert(block); + Block* mergeBlock = nullptr; + Block* continueBlock = nullptr; + auto mergeInst = block->getMergeInstruction(); + if (mergeInst) { + Id mergeId = mergeInst->getIdOperand(0); + mergeBlock = block->getParent().getParent().getInstruction(mergeId)->getBlock(); + delayed_.insert(mergeBlock); + if (mergeInst->getOpCode() == spv::OpLoopMerge) { + Id continueId = mergeInst->getIdOperand(1); + continueBlock = + block->getParent().getParent().getInstruction(continueId)->getBlock(); + delayed_.insert(continueBlock); + } + } + if (why == spv::ReachViaControlFlow) { + const auto& successors = block->getSuccessors(); + for (auto it = successors.cbegin(); it != successors.cend(); ++it) + visit(*it, why, nullptr); + } + if (continueBlock) { + const spv::ReachReason continueWhy = + (reachableViaControlFlow_.count(continueBlock) > 0) + ? spv::ReachViaControlFlow + : spv::ReachDeadContinue; + delayed_.erase(continueBlock); + visit(continueBlock, continueWhy, block); + } + if (mergeBlock) { + const spv::ReachReason mergeWhy = + (reachableViaControlFlow_.count(mergeBlock) > 0) + ? spv::ReachViaControlFlow + : spv::ReachDeadMerge; + delayed_.erase(mergeBlock); + visit(mergeBlock, mergeWhy, block); + } + } + +private: + std::function callback_; + // Whether a block has already been visited or is being delayed. + std::unordered_set visited_, delayed_; + + // The set of blocks that actually are reached via control flow. + std::unordered_set reachableViaControlFlow_; +}; +} + +void spv::inReadableOrder(Block* root, std::function callback) +{ + ReadableOrderTraverser(callback).visit(root, spv::ReachViaControlFlow, nullptr); +} diff --git a/dep/glslang/SPIRV/Logger.cpp b/dep/glslang/SPIRV/Logger.cpp new file mode 100644 index 000000000..cdc8469c4 --- /dev/null +++ b/dep/glslang/SPIRV/Logger.cpp @@ -0,0 +1,72 @@ +// +// Copyright (C) 2016 Google, Inc. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// +// Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// +// Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. + +#ifndef GLSLANG_WEB + +#include "Logger.h" + +#include +#include +#include + +namespace spv { + +void SpvBuildLogger::tbdFunctionality(const std::string& f) +{ + if (std::find(std::begin(tbdFeatures), std::end(tbdFeatures), f) == std::end(tbdFeatures)) + tbdFeatures.push_back(f); +} + +void SpvBuildLogger::missingFunctionality(const std::string& f) +{ + if (std::find(std::begin(missingFeatures), std::end(missingFeatures), f) == std::end(missingFeatures)) + missingFeatures.push_back(f); +} + +std::string SpvBuildLogger::getAllMessages() const { + std::ostringstream messages; + for (auto it = tbdFeatures.cbegin(); it != tbdFeatures.cend(); ++it) + messages << "TBD functionality: " << *it << "\n"; + for (auto it = missingFeatures.cbegin(); it != missingFeatures.cend(); ++it) + messages << "Missing functionality: " << *it << "\n"; + for (auto it = warnings.cbegin(); it != warnings.cend(); ++it) + messages << "warning: " << *it << "\n"; + for (auto it = errors.cbegin(); it != errors.cend(); ++it) + messages << "error: " << *it << "\n"; + return messages.str(); +} + +} // end spv namespace + +#endif diff --git a/dep/glslang/SPIRV/Logger.h b/dep/glslang/SPIRV/Logger.h new file mode 100644 index 000000000..411367c03 --- /dev/null +++ b/dep/glslang/SPIRV/Logger.h @@ -0,0 +1,83 @@ +// +// Copyright (C) 2016 Google, Inc. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// +// Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// +// Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. + +#ifndef GLSLANG_SPIRV_LOGGER_H +#define GLSLANG_SPIRV_LOGGER_H + +#include +#include + +namespace spv { + +// A class for holding all SPIR-V build status messages, including +// missing/TBD functionalities, warnings, and errors. +class SpvBuildLogger { +public: + SpvBuildLogger() {} + +#ifdef GLSLANG_WEB + void tbdFunctionality(const std::string& f) { } + void missingFunctionality(const std::string& f) { } + void warning(const std::string& w) { } + void error(const std::string& e) { errors.push_back(e); } + std::string getAllMessages() { return ""; } +#else + + // Registers a TBD functionality. + void tbdFunctionality(const std::string& f); + // Registers a missing functionality. + void missingFunctionality(const std::string& f); + + // Logs a warning. + void warning(const std::string& w) { warnings.push_back(w); } + // Logs an error. + void error(const std::string& e) { errors.push_back(e); } + + // Returns all messages accumulated in the order of: + // TBD functionalities, missing functionalities, warnings, errors. + std::string getAllMessages() const; +#endif + +private: + SpvBuildLogger(const SpvBuildLogger&); + + std::vector tbdFeatures; + std::vector missingFeatures; + std::vector warnings; + std::vector errors; +}; + +} // end spv namespace + +#endif // GLSLANG_SPIRV_LOGGER_H diff --git a/dep/glslang/SPIRV/NonSemanticDebugPrintf.h b/dep/glslang/SPIRV/NonSemanticDebugPrintf.h new file mode 100644 index 000000000..83796d75e --- /dev/null +++ b/dep/glslang/SPIRV/NonSemanticDebugPrintf.h @@ -0,0 +1,50 @@ +// Copyright (c) 2020 The Khronos Group Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and/or associated documentation files (the +// "Materials"), to deal in the Materials without restriction, including +// without limitation the rights to use, copy, modify, merge, publish, +// distribute, sublicense, and/or sell copies of the Materials, and to +// permit persons to whom the Materials are furnished to do so, subject to +// the following conditions: +// +// The above copyright notice and this permission notice shall be included +// in all copies or substantial portions of the Materials. +// +// MODIFICATIONS TO THIS FILE MAY MEAN IT NO LONGER ACCURATELY REFLECTS +// KHRONOS STANDARDS. THE UNMODIFIED, NORMATIVE VERSIONS OF KHRONOS +// SPECIFICATIONS AND HEADER INFORMATION ARE LOCATED AT +// https://www.khronos.org/registry/ +// +// THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +// IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +// TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +// MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS. +// + +#ifndef SPIRV_UNIFIED1_NonSemanticDebugPrintf_H_ +#define SPIRV_UNIFIED1_NonSemanticDebugPrintf_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +enum { + NonSemanticDebugPrintfRevision = 1, + NonSemanticDebugPrintfRevision_BitWidthPadding = 0x7fffffff +}; + +enum NonSemanticDebugPrintfInstructions { + NonSemanticDebugPrintfDebugPrintf = 1, + NonSemanticDebugPrintfInstructionsMax = 0x7fffffff +}; + + +#ifdef __cplusplus +} +#endif + +#endif // SPIRV_UNIFIED1_NonSemanticDebugPrintf_H_ diff --git a/dep/glslang/SPIRV/SPVRemapper.cpp b/dep/glslang/SPIRV/SPVRemapper.cpp new file mode 100644 index 000000000..fd0bb8950 --- /dev/null +++ b/dep/glslang/SPIRV/SPVRemapper.cpp @@ -0,0 +1,1487 @@ +// +// Copyright (C) 2015 LunarG, Inc. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// +// Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// +// Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// Neither the name of 3Dlabs Inc. Ltd. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// + +#include "SPVRemapper.h" +#include "doc.h" + +#if !defined (use_cpp11) +// ... not supported before C++11 +#else // defined (use_cpp11) + +#include +#include +#include "../glslang/Include/Common.h" + +namespace spv { + + // By default, just abort on error. Can be overridden via RegisterErrorHandler + spirvbin_t::errorfn_t spirvbin_t::errorHandler = [](const std::string&) { exit(5); }; + // By default, eat log messages. Can be overridden via RegisterLogHandler + spirvbin_t::logfn_t spirvbin_t::logHandler = [](const std::string&) { }; + + // This can be overridden to provide other message behavior if needed + void spirvbin_t::msg(int minVerbosity, int indent, const std::string& txt) const + { + if (verbose >= minVerbosity) + logHandler(std::string(indent, ' ') + txt); + } + + // hash opcode, with special handling for OpExtInst + std::uint32_t spirvbin_t::asOpCodeHash(unsigned word) + { + const spv::Op opCode = asOpCode(word); + + std::uint32_t offset = 0; + + switch (opCode) { + case spv::OpExtInst: + offset += asId(word + 4); break; + default: + break; + } + + return opCode * 19 + offset; // 19 = small prime + } + + spirvbin_t::range_t spirvbin_t::literalRange(spv::Op opCode) const + { + static const int maxCount = 1<<30; + + switch (opCode) { + case spv::OpTypeFloat: // fall through... + case spv::OpTypePointer: return range_t(2, 3); + case spv::OpTypeInt: return range_t(2, 4); + // TODO: case spv::OpTypeImage: + // TODO: case spv::OpTypeSampledImage: + case spv::OpTypeSampler: return range_t(3, 8); + case spv::OpTypeVector: // fall through + case spv::OpTypeMatrix: // ... + case spv::OpTypePipe: return range_t(3, 4); + case spv::OpConstant: return range_t(3, maxCount); + default: return range_t(0, 0); + } + } + + spirvbin_t::range_t spirvbin_t::typeRange(spv::Op opCode) const + { + static const int maxCount = 1<<30; + + if (isConstOp(opCode)) + return range_t(1, 2); + + switch (opCode) { + case spv::OpTypeVector: // fall through + case spv::OpTypeMatrix: // ... + case spv::OpTypeSampler: // ... + case spv::OpTypeArray: // ... + case spv::OpTypeRuntimeArray: // ... + case spv::OpTypePipe: return range_t(2, 3); + case spv::OpTypeStruct: // fall through + case spv::OpTypeFunction: return range_t(2, maxCount); + case spv::OpTypePointer: return range_t(3, 4); + default: return range_t(0, 0); + } + } + + spirvbin_t::range_t spirvbin_t::constRange(spv::Op opCode) const + { + static const int maxCount = 1<<30; + + switch (opCode) { + case spv::OpTypeArray: // fall through... + case spv::OpTypeRuntimeArray: return range_t(3, 4); + case spv::OpConstantComposite: return range_t(3, maxCount); + default: return range_t(0, 0); + } + } + + // Return the size of a type in 32-bit words. This currently only + // handles ints and floats, and is only invoked by queries which must be + // integer types. If ever needed, it can be generalized. + unsigned spirvbin_t::typeSizeInWords(spv::Id id) const + { + const unsigned typeStart = idPos(id); + const spv::Op opCode = asOpCode(typeStart); + + if (errorLatch) + return 0; + + switch (opCode) { + case spv::OpTypeInt: // fall through... + case spv::OpTypeFloat: return (spv[typeStart+2]+31)/32; + default: + return 0; + } + } + + // Looks up the type of a given const or variable ID, and + // returns its size in 32-bit words. + unsigned spirvbin_t::idTypeSizeInWords(spv::Id id) const + { + const auto tid_it = idTypeSizeMap.find(id); + if (tid_it == idTypeSizeMap.end()) { + error("type size for ID not found"); + return 0; + } + + return tid_it->second; + } + + // Is this an opcode we should remove when using --strip? + bool spirvbin_t::isStripOp(spv::Op opCode) const + { + switch (opCode) { + case spv::OpSource: + case spv::OpSourceExtension: + case spv::OpName: + case spv::OpMemberName: + case spv::OpLine: return true; + default: return false; + } + } + + // Return true if this opcode is flow control + bool spirvbin_t::isFlowCtrl(spv::Op opCode) const + { + switch (opCode) { + case spv::OpBranchConditional: + case spv::OpBranch: + case spv::OpSwitch: + case spv::OpLoopMerge: + case spv::OpSelectionMerge: + case spv::OpLabel: + case spv::OpFunction: + case spv::OpFunctionEnd: return true; + default: return false; + } + } + + // Return true if this opcode defines a type + bool spirvbin_t::isTypeOp(spv::Op opCode) const + { + switch (opCode) { + case spv::OpTypeVoid: + case spv::OpTypeBool: + case spv::OpTypeInt: + case spv::OpTypeFloat: + case spv::OpTypeVector: + case spv::OpTypeMatrix: + case spv::OpTypeImage: + case spv::OpTypeSampler: + case spv::OpTypeArray: + case spv::OpTypeRuntimeArray: + case spv::OpTypeStruct: + case spv::OpTypeOpaque: + case spv::OpTypePointer: + case spv::OpTypeFunction: + case spv::OpTypeEvent: + case spv::OpTypeDeviceEvent: + case spv::OpTypeReserveId: + case spv::OpTypeQueue: + case spv::OpTypeSampledImage: + case spv::OpTypePipe: return true; + default: return false; + } + } + + // Return true if this opcode defines a constant + bool spirvbin_t::isConstOp(spv::Op opCode) const + { + switch (opCode) { + case spv::OpConstantSampler: + error("unimplemented constant type"); + return true; + + case spv::OpConstantNull: + case spv::OpConstantTrue: + case spv::OpConstantFalse: + case spv::OpConstantComposite: + case spv::OpConstant: + return true; + + default: + return false; + } + } + + const auto inst_fn_nop = [](spv::Op, unsigned) { return false; }; + const auto op_fn_nop = [](spv::Id&) { }; + + // g++ doesn't like these defined in the class proper in an anonymous namespace. + // Dunno why. Also MSVC doesn't like the constexpr keyword. Also dunno why. + // Defining them externally seems to please both compilers, so, here they are. + const spv::Id spirvbin_t::unmapped = spv::Id(-10000); + const spv::Id spirvbin_t::unused = spv::Id(-10001); + const int spirvbin_t::header_size = 5; + + spv::Id spirvbin_t::nextUnusedId(spv::Id id) + { + while (isNewIdMapped(id)) // search for an unused ID + ++id; + + return id; + } + + spv::Id spirvbin_t::localId(spv::Id id, spv::Id newId) + { + //assert(id != spv::NoResult && newId != spv::NoResult); + + if (id > bound()) { + error(std::string("ID out of range: ") + std::to_string(id)); + return spirvbin_t::unused; + } + + if (id >= idMapL.size()) + idMapL.resize(id+1, unused); + + if (newId != unmapped && newId != unused) { + if (isOldIdUnused(id)) { + error(std::string("ID unused in module: ") + std::to_string(id)); + return spirvbin_t::unused; + } + + if (!isOldIdUnmapped(id)) { + error(std::string("ID already mapped: ") + std::to_string(id) + " -> " + + std::to_string(localId(id))); + + return spirvbin_t::unused; + } + + if (isNewIdMapped(newId)) { + error(std::string("ID already used in module: ") + std::to_string(newId)); + return spirvbin_t::unused; + } + + msg(4, 4, std::string("map: ") + std::to_string(id) + " -> " + std::to_string(newId)); + setMapped(newId); + largestNewId = std::max(largestNewId, newId); + } + + return idMapL[id] = newId; + } + + // Parse a literal string from the SPIR binary and return it as an std::string + // Due to C++11 RValue references, this doesn't copy the result string. + std::string spirvbin_t::literalString(unsigned word) const + { + std::string literal; + + literal.reserve(16); + + const char* bytes = reinterpret_cast(spv.data() + word); + + while (bytes && *bytes) + literal += *bytes++; + + return literal; + } + + void spirvbin_t::applyMap() + { + msg(3, 2, std::string("Applying map: ")); + + // Map local IDs through the ID map + process(inst_fn_nop, // ignore instructions + [this](spv::Id& id) { + id = localId(id); + + if (errorLatch) + return; + + assert(id != unused && id != unmapped); + } + ); + } + + // Find free IDs for anything we haven't mapped + void spirvbin_t::mapRemainder() + { + msg(3, 2, std::string("Remapping remainder: ")); + + spv::Id unusedId = 1; // can't use 0: that's NoResult + spirword_t maxBound = 0; + + for (spv::Id id = 0; id < idMapL.size(); ++id) { + if (isOldIdUnused(id)) + continue; + + // Find a new mapping for any used but unmapped IDs + if (isOldIdUnmapped(id)) { + localId(id, unusedId = nextUnusedId(unusedId)); + if (errorLatch) + return; + } + + if (isOldIdUnmapped(id)) { + error(std::string("old ID not mapped: ") + std::to_string(id)); + return; + } + + // Track max bound + maxBound = std::max(maxBound, localId(id) + 1); + + if (errorLatch) + return; + } + + bound(maxBound); // reset header ID bound to as big as it now needs to be + } + + // Mark debug instructions for stripping + void spirvbin_t::stripDebug() + { + // Strip instructions in the stripOp set: debug info. + process( + [&](spv::Op opCode, unsigned start) { + // remember opcodes we want to strip later + if (isStripOp(opCode)) + stripInst(start); + return true; + }, + op_fn_nop); + } + + // Mark instructions that refer to now-removed IDs for stripping + void spirvbin_t::stripDeadRefs() + { + process( + [&](spv::Op opCode, unsigned start) { + // strip opcodes pointing to removed data + switch (opCode) { + case spv::OpName: + case spv::OpMemberName: + case spv::OpDecorate: + case spv::OpMemberDecorate: + if (idPosR.find(asId(start+1)) == idPosR.end()) + stripInst(start); + break; + default: + break; // leave it alone + } + + return true; + }, + op_fn_nop); + + strip(); + } + + // Update local maps of ID, type, etc positions + void spirvbin_t::buildLocalMaps() + { + msg(2, 2, std::string("build local maps: ")); + + mapped.clear(); + idMapL.clear(); +// preserve nameMap, so we don't clear that. + fnPos.clear(); + fnCalls.clear(); + typeConstPos.clear(); + idPosR.clear(); + entryPoint = spv::NoResult; + largestNewId = 0; + + idMapL.resize(bound(), unused); + + int fnStart = 0; + spv::Id fnRes = spv::NoResult; + + // build local Id and name maps + process( + [&](spv::Op opCode, unsigned start) { + unsigned word = start+1; + spv::Id typeId = spv::NoResult; + + if (spv::InstructionDesc[opCode].hasType()) + typeId = asId(word++); + + // If there's a result ID, remember the size of its type + if (spv::InstructionDesc[opCode].hasResult()) { + const spv::Id resultId = asId(word++); + idPosR[resultId] = start; + + if (typeId != spv::NoResult) { + const unsigned idTypeSize = typeSizeInWords(typeId); + + if (errorLatch) + return false; + + if (idTypeSize != 0) + idTypeSizeMap[resultId] = idTypeSize; + } + } + + if (opCode == spv::Op::OpName) { + const spv::Id target = asId(start+1); + const std::string name = literalString(start+2); + nameMap[name] = target; + + } else if (opCode == spv::Op::OpFunctionCall) { + ++fnCalls[asId(start + 3)]; + } else if (opCode == spv::Op::OpEntryPoint) { + entryPoint = asId(start + 2); + } else if (opCode == spv::Op::OpFunction) { + if (fnStart != 0) { + error("nested function found"); + return false; + } + + fnStart = start; + fnRes = asId(start + 2); + } else if (opCode == spv::Op::OpFunctionEnd) { + assert(fnRes != spv::NoResult); + if (fnStart == 0) { + error("function end without function start"); + return false; + } + + fnPos[fnRes] = range_t(fnStart, start + asWordCount(start)); + fnStart = 0; + } else if (isConstOp(opCode)) { + if (errorLatch) + return false; + + assert(asId(start + 2) != spv::NoResult); + typeConstPos.insert(start); + } else if (isTypeOp(opCode)) { + assert(asId(start + 1) != spv::NoResult); + typeConstPos.insert(start); + } + + return false; + }, + + [this](spv::Id& id) { localId(id, unmapped); } + ); + } + + // Validate the SPIR header + void spirvbin_t::validate() const + { + msg(2, 2, std::string("validating: ")); + + if (spv.size() < header_size) { + error("file too short: "); + return; + } + + if (magic() != spv::MagicNumber) { + error("bad magic number"); + return; + } + + // field 1 = version + // field 2 = generator magic + // field 3 = result bound + + if (schemaNum() != 0) { + error("bad schema, must be 0"); + return; + } + } + + int spirvbin_t::processInstruction(unsigned word, instfn_t instFn, idfn_t idFn) + { + const auto instructionStart = word; + const unsigned wordCount = asWordCount(instructionStart); + const int nextInst = word++ + wordCount; + spv::Op opCode = asOpCode(instructionStart); + + if (nextInst > int(spv.size())) { + error("spir instruction terminated too early"); + return -1; + } + + // Base for computing number of operands; will be updated as more is learned + unsigned numOperands = wordCount - 1; + + if (instFn(opCode, instructionStart)) + return nextInst; + + // Read type and result ID from instruction desc table + if (spv::InstructionDesc[opCode].hasType()) { + idFn(asId(word++)); + --numOperands; + } + + if (spv::InstructionDesc[opCode].hasResult()) { + idFn(asId(word++)); + --numOperands; + } + + // Extended instructions: currently, assume everything is an ID. + // TODO: add whatever data we need for exceptions to that + if (opCode == spv::OpExtInst) { + word += 2; // instruction set, and instruction from set + numOperands -= 2; + + for (unsigned op=0; op < numOperands; ++op) + idFn(asId(word++)); // ID + + return nextInst; + } + + // Circular buffer so we can look back at previous unmapped values during the mapping pass. + static const unsigned idBufferSize = 4; + spv::Id idBuffer[idBufferSize]; + unsigned idBufferPos = 0; + + // Store IDs from instruction in our map + for (int op = 0; numOperands > 0; ++op, --numOperands) { + // SpecConstantOp is special: it includes the operands of another opcode which is + // given as a literal in the 3rd word. We will switch over to pretending that the + // opcode being processed is the literal opcode value of the SpecConstantOp. See the + // SPIRV spec for details. This way we will handle IDs and literals as appropriate for + // the embedded op. + if (opCode == spv::OpSpecConstantOp) { + if (op == 0) { + opCode = asOpCode(word++); // this is the opcode embedded in the SpecConstantOp. + --numOperands; + } + } + + switch (spv::InstructionDesc[opCode].operands.getClass(op)) { + case spv::OperandId: + case spv::OperandScope: + case spv::OperandMemorySemantics: + idBuffer[idBufferPos] = asId(word); + idBufferPos = (idBufferPos + 1) % idBufferSize; + idFn(asId(word++)); + break; + + case spv::OperandVariableIds: + for (unsigned i = 0; i < numOperands; ++i) + idFn(asId(word++)); + return nextInst; + + case spv::OperandVariableLiterals: + // for clarity + // if (opCode == spv::OpDecorate && asDecoration(word - 1) == spv::DecorationBuiltIn) { + // ++word; + // --numOperands; + // } + // word += numOperands; + return nextInst; + + case spv::OperandVariableLiteralId: { + if (opCode == OpSwitch) { + // word-2 is the position of the selector ID. OpSwitch Literals match its type. + // In case the IDs are currently being remapped, we get the word[-2] ID from + // the circular idBuffer. + const unsigned literalSizePos = (idBufferPos+idBufferSize-2) % idBufferSize; + const unsigned literalSize = idTypeSizeInWords(idBuffer[literalSizePos]); + const unsigned numLiteralIdPairs = (nextInst-word) / (1+literalSize); + + if (errorLatch) + return -1; + + for (unsigned arg=0; arg instPos; + instPos.reserve(unsigned(spv.size()) / 16); // initial estimate; can grow if needed. + + // Build local table of instruction start positions + process( + [&](spv::Op, unsigned start) { instPos.push_back(start); return true; }, + op_fn_nop); + + if (errorLatch) + return; + + // Window size for context-sensitive canonicalization values + // Empirical best size from a single data set. TODO: Would be a good tunable. + // We essentially perform a little convolution around each instruction, + // to capture the flavor of nearby code, to hopefully match to similar + // code in other modules. + static const unsigned windowSize = 2; + + for (unsigned entry = 0; entry < unsigned(instPos.size()); ++entry) { + const unsigned start = instPos[entry]; + const spv::Op opCode = asOpCode(start); + + if (opCode == spv::OpFunction) + fnId = asId(start + 2); + + if (opCode == spv::OpFunctionEnd) + fnId = spv::NoResult; + + if (fnId != spv::NoResult) { // if inside a function + if (spv::InstructionDesc[opCode].hasResult()) { + const unsigned word = start + (spv::InstructionDesc[opCode].hasType() ? 2 : 1); + const spv::Id resId = asId(word); + std::uint32_t hashval = fnId * 17; // small prime + + for (unsigned i = entry-1; i >= entry-windowSize; --i) { + if (asOpCode(instPos[i]) == spv::OpFunction) + break; + hashval = hashval * 30103 + asOpCodeHash(instPos[i]); // 30103 = semiarbitrary prime + } + + for (unsigned i = entry; i <= entry + windowSize; ++i) { + if (asOpCode(instPos[i]) == spv::OpFunctionEnd) + break; + hashval = hashval * 30103 + asOpCodeHash(instPos[i]); // 30103 = semiarbitrary prime + } + + if (isOldIdUnmapped(resId)) { + localId(resId, nextUnusedId(hashval % softTypeIdLimit + firstMappedID)); + if (errorLatch) + return; + } + + } + } + } + + spv::Op thisOpCode(spv::OpNop); + std::unordered_map opCounter; + int idCounter(0); + fnId = spv::NoResult; + + process( + [&](spv::Op opCode, unsigned start) { + switch (opCode) { + case spv::OpFunction: + // Reset counters at each function + idCounter = 0; + opCounter.clear(); + fnId = asId(start + 2); + break; + + case spv::OpImageSampleImplicitLod: + case spv::OpImageSampleExplicitLod: + case spv::OpImageSampleDrefImplicitLod: + case spv::OpImageSampleDrefExplicitLod: + case spv::OpImageSampleProjImplicitLod: + case spv::OpImageSampleProjExplicitLod: + case spv::OpImageSampleProjDrefImplicitLod: + case spv::OpImageSampleProjDrefExplicitLod: + case spv::OpDot: + case spv::OpCompositeExtract: + case spv::OpCompositeInsert: + case spv::OpVectorShuffle: + case spv::OpLabel: + case spv::OpVariable: + + case spv::OpAccessChain: + case spv::OpLoad: + case spv::OpStore: + case spv::OpCompositeConstruct: + case spv::OpFunctionCall: + ++opCounter[opCode]; + idCounter = 0; + thisOpCode = opCode; + break; + default: + thisOpCode = spv::OpNop; + } + + return false; + }, + + [&](spv::Id& id) { + if (thisOpCode != spv::OpNop) { + ++idCounter; + const std::uint32_t hashval = opCounter[thisOpCode] * thisOpCode * 50047 + idCounter + fnId * 117; + + if (isOldIdUnmapped(id)) + localId(id, nextUnusedId(hashval % softTypeIdLimit + firstMappedID)); + } + }); + } + + // EXPERIMENTAL: forward IO and uniform load/stores into operands + // This produces invalid Schema-0 SPIRV + void spirvbin_t::forwardLoadStores() + { + idset_t fnLocalVars; // set of function local vars + idmap_t idMap; // Map of load result IDs to what they load + + // EXPERIMENTAL: Forward input and access chain loads into consumptions + process( + [&](spv::Op opCode, unsigned start) { + // Add inputs and uniforms to the map + if ((opCode == spv::OpVariable && asWordCount(start) == 4) && + (spv[start+3] == spv::StorageClassUniform || + spv[start+3] == spv::StorageClassUniformConstant || + spv[start+3] == spv::StorageClassInput)) + fnLocalVars.insert(asId(start+2)); + + if (opCode == spv::OpAccessChain && fnLocalVars.count(asId(start+3)) > 0) + fnLocalVars.insert(asId(start+2)); + + if (opCode == spv::OpLoad && fnLocalVars.count(asId(start+3)) > 0) { + idMap[asId(start+2)] = asId(start+3); + stripInst(start); + } + + return false; + }, + + [&](spv::Id& id) { if (idMap.find(id) != idMap.end()) id = idMap[id]; } + ); + + if (errorLatch) + return; + + // EXPERIMENTAL: Implicit output stores + fnLocalVars.clear(); + idMap.clear(); + + process( + [&](spv::Op opCode, unsigned start) { + // Add inputs and uniforms to the map + if ((opCode == spv::OpVariable && asWordCount(start) == 4) && + (spv[start+3] == spv::StorageClassOutput)) + fnLocalVars.insert(asId(start+2)); + + if (opCode == spv::OpStore && fnLocalVars.count(asId(start+1)) > 0) { + idMap[asId(start+2)] = asId(start+1); + stripInst(start); + } + + return false; + }, + op_fn_nop); + + if (errorLatch) + return; + + process( + inst_fn_nop, + [&](spv::Id& id) { if (idMap.find(id) != idMap.end()) id = idMap[id]; } + ); + + if (errorLatch) + return; + + strip(); // strip out data we decided to eliminate + } + + // optimize loads and stores + void spirvbin_t::optLoadStore() + { + idset_t fnLocalVars; // candidates for removal (only locals) + idmap_t idMap; // Map of load result IDs to what they load + blockmap_t blockMap; // Map of IDs to blocks they first appear in + int blockNum = 0; // block count, to avoid crossing flow control + + // Find all the function local pointers stored at most once, and not via access chains + process( + [&](spv::Op opCode, unsigned start) { + const int wordCount = asWordCount(start); + + // Count blocks, so we can avoid crossing flow control + if (isFlowCtrl(opCode)) + ++blockNum; + + // Add local variables to the map + if ((opCode == spv::OpVariable && spv[start+3] == spv::StorageClassFunction && asWordCount(start) == 4)) { + fnLocalVars.insert(asId(start+2)); + return true; + } + + // Ignore process vars referenced via access chain + if ((opCode == spv::OpAccessChain || opCode == spv::OpInBoundsAccessChain) && fnLocalVars.count(asId(start+3)) > 0) { + fnLocalVars.erase(asId(start+3)); + idMap.erase(asId(start+3)); + return true; + } + + if (opCode == spv::OpLoad && fnLocalVars.count(asId(start+3)) > 0) { + const spv::Id varId = asId(start+3); + + // Avoid loads before stores + if (idMap.find(varId) == idMap.end()) { + fnLocalVars.erase(varId); + idMap.erase(varId); + } + + // don't do for volatile references + if (wordCount > 4 && (spv[start+4] & spv::MemoryAccessVolatileMask)) { + fnLocalVars.erase(varId); + idMap.erase(varId); + } + + // Handle flow control + if (blockMap.find(varId) == blockMap.end()) { + blockMap[varId] = blockNum; // track block we found it in. + } else if (blockMap[varId] != blockNum) { + fnLocalVars.erase(varId); // Ignore if crosses flow control + idMap.erase(varId); + } + + return true; + } + + if (opCode == spv::OpStore && fnLocalVars.count(asId(start+1)) > 0) { + const spv::Id varId = asId(start+1); + + if (idMap.find(varId) == idMap.end()) { + idMap[varId] = asId(start+2); + } else { + // Remove if it has more than one store to the same pointer + fnLocalVars.erase(varId); + idMap.erase(varId); + } + + // don't do for volatile references + if (wordCount > 3 && (spv[start+3] & spv::MemoryAccessVolatileMask)) { + fnLocalVars.erase(asId(start+3)); + idMap.erase(asId(start+3)); + } + + // Handle flow control + if (blockMap.find(varId) == blockMap.end()) { + blockMap[varId] = blockNum; // track block we found it in. + } else if (blockMap[varId] != blockNum) { + fnLocalVars.erase(varId); // Ignore if crosses flow control + idMap.erase(varId); + } + + return true; + } + + return false; + }, + + // If local var id used anywhere else, don't eliminate + [&](spv::Id& id) { + if (fnLocalVars.count(id) > 0) { + fnLocalVars.erase(id); + idMap.erase(id); + } + } + ); + + if (errorLatch) + return; + + process( + [&](spv::Op opCode, unsigned start) { + if (opCode == spv::OpLoad && fnLocalVars.count(asId(start+3)) > 0) + idMap[asId(start+2)] = idMap[asId(start+3)]; + return false; + }, + op_fn_nop); + + if (errorLatch) + return; + + // Chase replacements to their origins, in case there is a chain such as: + // 2 = store 1 + // 3 = load 2 + // 4 = store 3 + // 5 = load 4 + // We want to replace uses of 5 with 1. + for (const auto& idPair : idMap) { + spv::Id id = idPair.first; + while (idMap.find(id) != idMap.end()) // Chase to end of chain + id = idMap[id]; + + idMap[idPair.first] = id; // replace with final result + } + + // Remove the load/store/variables for the ones we've discovered + process( + [&](spv::Op opCode, unsigned start) { + if ((opCode == spv::OpLoad && fnLocalVars.count(asId(start+3)) > 0) || + (opCode == spv::OpStore && fnLocalVars.count(asId(start+1)) > 0) || + (opCode == spv::OpVariable && fnLocalVars.count(asId(start+2)) > 0)) { + + stripInst(start); + return true; + } + + return false; + }, + + [&](spv::Id& id) { + if (idMap.find(id) != idMap.end()) id = idMap[id]; + } + ); + + if (errorLatch) + return; + + strip(); // strip out data we decided to eliminate + } + + // remove bodies of uncalled functions + void spirvbin_t::dceFuncs() + { + msg(3, 2, std::string("Removing Dead Functions: ")); + + // TODO: There are more efficient ways to do this. + bool changed = true; + + while (changed) { + changed = false; + + for (auto fn = fnPos.begin(); fn != fnPos.end(); ) { + if (fn->first == entryPoint) { // don't DCE away the entry point! + ++fn; + continue; + } + + const auto call_it = fnCalls.find(fn->first); + + if (call_it == fnCalls.end() || call_it->second == 0) { + changed = true; + stripRange.push_back(fn->second); + + // decrease counts of called functions + process( + [&](spv::Op opCode, unsigned start) { + if (opCode == spv::Op::OpFunctionCall) { + const auto call_it = fnCalls.find(asId(start + 3)); + if (call_it != fnCalls.end()) { + if (--call_it->second <= 0) + fnCalls.erase(call_it); + } + } + + return true; + }, + op_fn_nop, + fn->second.first, + fn->second.second); + + if (errorLatch) + return; + + fn = fnPos.erase(fn); + } else ++fn; + } + } + } + + // remove unused function variables + decorations + void spirvbin_t::dceVars() + { + msg(3, 2, std::string("DCE Vars: ")); + + std::unordered_map varUseCount; + + // Count function variable use + process( + [&](spv::Op opCode, unsigned start) { + if (opCode == spv::OpVariable) { + ++varUseCount[asId(start+2)]; + return true; + } else if (opCode == spv::OpEntryPoint) { + const int wordCount = asWordCount(start); + for (int i = 4; i < wordCount; i++) { + ++varUseCount[asId(start+i)]; + } + return true; + } else + return false; + }, + + [&](spv::Id& id) { if (varUseCount[id]) ++varUseCount[id]; } + ); + + if (errorLatch) + return; + + // Remove single-use function variables + associated decorations and names + process( + [&](spv::Op opCode, unsigned start) { + spv::Id id = spv::NoResult; + if (opCode == spv::OpVariable) + id = asId(start+2); + if (opCode == spv::OpDecorate || opCode == spv::OpName) + id = asId(start+1); + + if (id != spv::NoResult && varUseCount[id] == 1) + stripInst(start); + + return true; + }, + op_fn_nop); + } + + // remove unused types + void spirvbin_t::dceTypes() + { + std::vector isType(bound(), false); + + // for speed, make O(1) way to get to type query (map is log(n)) + for (const auto typeStart : typeConstPos) + isType[asTypeConstId(typeStart)] = true; + + std::unordered_map typeUseCount; + + // This is not the most efficient algorithm, but this is an offline tool, and + // it's easy to write this way. Can be improved opportunistically if needed. + bool changed = true; + while (changed) { + changed = false; + strip(); + typeUseCount.clear(); + + // Count total type usage + process(inst_fn_nop, + [&](spv::Id& id) { if (isType[id]) ++typeUseCount[id]; } + ); + + if (errorLatch) + return; + + // Remove single reference types + for (const auto typeStart : typeConstPos) { + const spv::Id typeId = asTypeConstId(typeStart); + if (typeUseCount[typeId] == 1) { + changed = true; + --typeUseCount[typeId]; + stripInst(typeStart); + } + } + + if (errorLatch) + return; + } + } + +#ifdef NOTDEF + bool spirvbin_t::matchType(const spirvbin_t::globaltypes_t& globalTypes, spv::Id lt, spv::Id gt) const + { + // Find the local type id "lt" and global type id "gt" + const auto lt_it = typeConstPosR.find(lt); + if (lt_it == typeConstPosR.end()) + return false; + + const auto typeStart = lt_it->second; + + // Search for entry in global table + const auto gtype = globalTypes.find(gt); + if (gtype == globalTypes.end()) + return false; + + const auto& gdata = gtype->second; + + // local wordcount and opcode + const int wordCount = asWordCount(typeStart); + const spv::Op opCode = asOpCode(typeStart); + + // no type match if opcodes don't match, or operand count doesn't match + if (opCode != opOpCode(gdata[0]) || wordCount != opWordCount(gdata[0])) + return false; + + const unsigned numOperands = wordCount - 2; // all types have a result + + const auto cmpIdRange = [&](range_t range) { + for (int x=range.first; xsecond; + } + + // Hash types to canonical values. This can return ID collisions (it's a bit + // inevitable): it's up to the caller to handle that gracefully. + std::uint32_t spirvbin_t::hashType(unsigned typeStart) const + { + const unsigned wordCount = asWordCount(typeStart); + const spv::Op opCode = asOpCode(typeStart); + + switch (opCode) { + case spv::OpTypeVoid: return 0; + case spv::OpTypeBool: return 1; + case spv::OpTypeInt: return 3 + (spv[typeStart+3]); + case spv::OpTypeFloat: return 5; + case spv::OpTypeVector: + return 6 + hashType(idPos(spv[typeStart+2])) * (spv[typeStart+3] - 1); + case spv::OpTypeMatrix: + return 30 + hashType(idPos(spv[typeStart+2])) * (spv[typeStart+3] - 1); + case spv::OpTypeImage: + return 120 + hashType(idPos(spv[typeStart+2])) + + spv[typeStart+3] + // dimensionality + spv[typeStart+4] * 8 * 16 + // depth + spv[typeStart+5] * 4 * 16 + // arrayed + spv[typeStart+6] * 2 * 16 + // multisampled + spv[typeStart+7] * 1 * 16; // format + case spv::OpTypeSampler: + return 500; + case spv::OpTypeSampledImage: + return 502; + case spv::OpTypeArray: + return 501 + hashType(idPos(spv[typeStart+2])) * spv[typeStart+3]; + case spv::OpTypeRuntimeArray: + return 5000 + hashType(idPos(spv[typeStart+2])); + case spv::OpTypeStruct: + { + std::uint32_t hash = 10000; + for (unsigned w=2; w < wordCount; ++w) + hash += w * hashType(idPos(spv[typeStart+w])); + return hash; + } + + case spv::OpTypeOpaque: return 6000 + spv[typeStart+2]; + case spv::OpTypePointer: return 100000 + hashType(idPos(spv[typeStart+3])); + case spv::OpTypeFunction: + { + std::uint32_t hash = 200000; + for (unsigned w=2; w < wordCount; ++w) + hash += w * hashType(idPos(spv[typeStart+w])); + return hash; + } + + case spv::OpTypeEvent: return 300000; + case spv::OpTypeDeviceEvent: return 300001; + case spv::OpTypeReserveId: return 300002; + case spv::OpTypeQueue: return 300003; + case spv::OpTypePipe: return 300004; + case spv::OpConstantTrue: return 300007; + case spv::OpConstantFalse: return 300008; + case spv::OpConstantComposite: + { + std::uint32_t hash = 300011 + hashType(idPos(spv[typeStart+1])); + for (unsigned w=3; w < wordCount; ++w) + hash += w * hashType(idPos(spv[typeStart+w])); + return hash; + } + case spv::OpConstant: + { + std::uint32_t hash = 400011 + hashType(idPos(spv[typeStart+1])); + for (unsigned w=3; w < wordCount; ++w) + hash += w * spv[typeStart+w]; + return hash; + } + case spv::OpConstantNull: + { + std::uint32_t hash = 500009 + hashType(idPos(spv[typeStart+1])); + return hash; + } + case spv::OpConstantSampler: + { + std::uint32_t hash = 600011 + hashType(idPos(spv[typeStart+1])); + for (unsigned w=3; w < wordCount; ++w) + hash += w * spv[typeStart+w]; + return hash; + } + + default: + error("unknown type opcode"); + return 0; + } + } + + void spirvbin_t::mapTypeConst() + { + globaltypes_t globalTypeMap; + + msg(3, 2, std::string("Remapping Consts & Types: ")); + + static const std::uint32_t softTypeIdLimit = 3011; // small prime. TODO: get from options + static const std::uint32_t firstMappedID = 8; // offset into ID space + + for (auto& typeStart : typeConstPos) { + const spv::Id resId = asTypeConstId(typeStart); + const std::uint32_t hashval = hashType(typeStart); + + if (errorLatch) + return; + + if (isOldIdUnmapped(resId)) { + localId(resId, nextUnusedId(hashval % softTypeIdLimit + firstMappedID)); + if (errorLatch) + return; + } + } + } + + // Strip a single binary by removing ranges given in stripRange + void spirvbin_t::strip() + { + if (stripRange.empty()) // nothing to do + return; + + // Sort strip ranges in order of traversal + std::sort(stripRange.begin(), stripRange.end()); + + // Allocate a new binary big enough to hold old binary + // We'll step this iterator through the strip ranges as we go through the binary + auto strip_it = stripRange.begin(); + + int strippedPos = 0; + for (unsigned word = 0; word < unsigned(spv.size()); ++word) { + while (strip_it != stripRange.end() && word >= strip_it->second) + ++strip_it; + + if (strip_it == stripRange.end() || word < strip_it->first || word >= strip_it->second) + spv[strippedPos++] = spv[word]; + } + + spv.resize(strippedPos); + stripRange.clear(); + + buildLocalMaps(); + } + + // Strip a single binary by removing ranges given in stripRange + void spirvbin_t::remap(std::uint32_t opts) + { + options = opts; + + // Set up opcode tables from SpvDoc + spv::Parameterize(); + + validate(); // validate header + buildLocalMaps(); // build ID maps + + msg(3, 4, std::string("ID bound: ") + std::to_string(bound())); + + if (options & STRIP) stripDebug(); + if (errorLatch) return; + + strip(); // strip out data we decided to eliminate + if (errorLatch) return; + + if (options & OPT_LOADSTORE) optLoadStore(); + if (errorLatch) return; + + if (options & OPT_FWD_LS) forwardLoadStores(); + if (errorLatch) return; + + if (options & DCE_FUNCS) dceFuncs(); + if (errorLatch) return; + + if (options & DCE_VARS) dceVars(); + if (errorLatch) return; + + if (options & DCE_TYPES) dceTypes(); + if (errorLatch) return; + + strip(); // strip out data we decided to eliminate + if (errorLatch) return; + + stripDeadRefs(); // remove references to things we DCEed + if (errorLatch) return; + + // after the last strip, we must clean any debug info referring to now-deleted data + + if (options & MAP_TYPES) mapTypeConst(); + if (errorLatch) return; + + if (options & MAP_NAMES) mapNames(); + if (errorLatch) return; + + if (options & MAP_FUNCS) mapFnBodies(); + if (errorLatch) return; + + if (options & MAP_ALL) { + mapRemainder(); // map any unmapped IDs + if (errorLatch) return; + + applyMap(); // Now remap each shader to the new IDs we've come up with + if (errorLatch) return; + } + } + + // remap from a memory image + void spirvbin_t::remap(std::vector& in_spv, std::uint32_t opts) + { + spv.swap(in_spv); + remap(opts); + spv.swap(in_spv); + } + +} // namespace SPV + +#endif // defined (use_cpp11) + diff --git a/dep/glslang/SPIRV/SPVRemapper.h b/dep/glslang/SPIRV/SPVRemapper.h new file mode 100644 index 000000000..d6b9c346d --- /dev/null +++ b/dep/glslang/SPIRV/SPVRemapper.h @@ -0,0 +1,304 @@ +// +// Copyright (C) 2015 LunarG, Inc. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// +// Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// +// Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// Neither the name of 3Dlabs Inc. Ltd. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// + +#ifndef SPIRVREMAPPER_H +#define SPIRVREMAPPER_H + +#include +#include +#include +#include + +namespace spv { + +// MSVC defines __cplusplus as an older value, even when it supports almost all of 11. +// We handle that here by making our own symbol. +#if __cplusplus >= 201103L || (defined(_MSC_VER) && _MSC_VER >= 1700) +# define use_cpp11 1 +#endif + +class spirvbin_base_t +{ +public: + enum Options { + NONE = 0, + STRIP = (1<<0), + MAP_TYPES = (1<<1), + MAP_NAMES = (1<<2), + MAP_FUNCS = (1<<3), + DCE_FUNCS = (1<<4), + DCE_VARS = (1<<5), + DCE_TYPES = (1<<6), + OPT_LOADSTORE = (1<<7), + OPT_FWD_LS = (1<<8), // EXPERIMENTAL: PRODUCES INVALID SCHEMA-0 SPIRV + MAP_ALL = (MAP_TYPES | MAP_NAMES | MAP_FUNCS), + DCE_ALL = (DCE_FUNCS | DCE_VARS | DCE_TYPES), + OPT_ALL = (OPT_LOADSTORE), + + ALL_BUT_STRIP = (MAP_ALL | DCE_ALL | OPT_ALL), + DO_EVERYTHING = (STRIP | ALL_BUT_STRIP) + }; +}; + +} // namespace SPV + +#if !defined (use_cpp11) +#include +#include + +namespace spv { +class spirvbin_t : public spirvbin_base_t +{ +public: + spirvbin_t(int /*verbose = 0*/) { } + + void remap(std::vector& /*spv*/, unsigned int /*opts = 0*/) + { + printf("Tool not compiled for C++11, which is required for SPIR-V remapping.\n"); + exit(5); + } +}; + +} // namespace SPV + +#else // defined (use_cpp11) + +#include +#include +#include +#include +#include +#include +#include + +#include "spirv.hpp" +#include "spvIR.h" + +namespace spv { + +// class to hold SPIR-V binary data for remapping, DCE, and debug stripping +class spirvbin_t : public spirvbin_base_t +{ +public: + spirvbin_t(int verbose = 0) : entryPoint(spv::NoResult), largestNewId(0), verbose(verbose), errorLatch(false) + { } + + virtual ~spirvbin_t() { } + + // remap on an existing binary in memory + void remap(std::vector& spv, std::uint32_t opts = DO_EVERYTHING); + + // Type for error/log handler functions + typedef std::function errorfn_t; + typedef std::function logfn_t; + + // Register error/log handling functions (can be lambda fn / functor / etc) + static void registerErrorHandler(errorfn_t handler) { errorHandler = handler; } + static void registerLogHandler(logfn_t handler) { logHandler = handler; } + +protected: + // This can be overridden to provide other message behavior if needed + virtual void msg(int minVerbosity, int indent, const std::string& txt) const; + +private: + // Local to global, or global to local ID map + typedef std::unordered_map idmap_t; + typedef std::unordered_set idset_t; + typedef std::unordered_map blockmap_t; + + void remap(std::uint32_t opts = DO_EVERYTHING); + + // Map of names to IDs + typedef std::unordered_map namemap_t; + + typedef std::uint32_t spirword_t; + + typedef std::pair range_t; + typedef std::function idfn_t; + typedef std::function instfn_t; + + // Special Values for ID map: + static const spv::Id unmapped; // unchanged from default value + static const spv::Id unused; // unused ID + static const int header_size; // SPIR header = 5 words + + class id_iterator_t; + + // For mapping type entries between different shaders + typedef std::vector typeentry_t; + typedef std::map globaltypes_t; + + // A set that preserves position order, and a reverse map + typedef std::set posmap_t; + typedef std::unordered_map posmap_rev_t; + + // Maps and ID to the size of its base type, if known. + typedef std::unordered_map typesize_map_t; + + // handle error + void error(const std::string& txt) const { errorLatch = true; errorHandler(txt); } + + bool isConstOp(spv::Op opCode) const; + bool isTypeOp(spv::Op opCode) const; + bool isStripOp(spv::Op opCode) const; + bool isFlowCtrl(spv::Op opCode) const; + range_t literalRange(spv::Op opCode) const; + range_t typeRange(spv::Op opCode) const; + range_t constRange(spv::Op opCode) const; + unsigned typeSizeInWords(spv::Id id) const; + unsigned idTypeSizeInWords(spv::Id id) const; + + spv::Id& asId(unsigned word) { return spv[word]; } + const spv::Id& asId(unsigned word) const { return spv[word]; } + spv::Op asOpCode(unsigned word) const { return opOpCode(spv[word]); } + std::uint32_t asOpCodeHash(unsigned word); + spv::Decoration asDecoration(unsigned word) const { return spv::Decoration(spv[word]); } + unsigned asWordCount(unsigned word) const { return opWordCount(spv[word]); } + spv::Id asTypeConstId(unsigned word) const { return asId(word + (isTypeOp(asOpCode(word)) ? 1 : 2)); } + unsigned idPos(spv::Id id) const; + + static unsigned opWordCount(spirword_t data) { return data >> spv::WordCountShift; } + static spv::Op opOpCode(spirword_t data) { return spv::Op(data & spv::OpCodeMask); } + + // Header access & set methods + spirword_t magic() const { return spv[0]; } // return magic number + spirword_t bound() const { return spv[3]; } // return Id bound from header + spirword_t bound(spirword_t b) { return spv[3] = b; } + spirword_t genmagic() const { return spv[2]; } // generator magic + spirword_t genmagic(spirword_t m) { return spv[2] = m; } + spirword_t schemaNum() const { return spv[4]; } // schema number from header + + // Mapping fns: get + spv::Id localId(spv::Id id) const { return idMapL[id]; } + + // Mapping fns: set + inline spv::Id localId(spv::Id id, spv::Id newId); + void countIds(spv::Id id); + + // Return next unused new local ID. + // NOTE: boost::dynamic_bitset would be more efficient due to find_next(), + // which std::vector doens't have. + inline spv::Id nextUnusedId(spv::Id id); + + void buildLocalMaps(); + std::string literalString(unsigned word) const; // Return literal as a std::string + int literalStringWords(const std::string& str) const { return (int(str.size())+4)/4; } + + bool isNewIdMapped(spv::Id newId) const { return isMapped(newId); } + bool isOldIdUnmapped(spv::Id oldId) const { return localId(oldId) == unmapped; } + bool isOldIdUnused(spv::Id oldId) const { return localId(oldId) == unused; } + bool isOldIdMapped(spv::Id oldId) const { return !isOldIdUnused(oldId) && !isOldIdUnmapped(oldId); } + bool isFunction(spv::Id oldId) const { return fnPos.find(oldId) != fnPos.end(); } + + // bool matchType(const globaltypes_t& globalTypes, spv::Id lt, spv::Id gt) const; + // spv::Id findType(const globaltypes_t& globalTypes, spv::Id lt) const; + std::uint32_t hashType(unsigned typeStart) const; + + spirvbin_t& process(instfn_t, idfn_t, unsigned begin = 0, unsigned end = 0); + int processInstruction(unsigned word, instfn_t, idfn_t); + + void validate() const; + void mapTypeConst(); + void mapFnBodies(); + void optLoadStore(); + void dceFuncs(); + void dceVars(); + void dceTypes(); + void mapNames(); + void foldIds(); // fold IDs to smallest space + void forwardLoadStores(); // load store forwarding (EXPERIMENTAL) + void offsetIds(); // create relative offset IDs + + void applyMap(); // remap per local name map + void mapRemainder(); // map any IDs we haven't touched yet + void stripDebug(); // strip all debug info + void stripDeadRefs(); // strips debug info for now-dead references after DCE + void strip(); // remove debug symbols + + std::vector spv; // SPIR words + + namemap_t nameMap; // ID names from OpName + + // Since we want to also do binary ops, we can't use std::vector. we could use + // boost::dynamic_bitset, but we're trying to avoid a boost dependency. + typedef std::uint64_t bits_t; + std::vector mapped; // which new IDs have been mapped + static const int mBits = sizeof(bits_t) * 4; + + bool isMapped(spv::Id id) const { return id < maxMappedId() && ((mapped[id/mBits] & (1LL<<(id%mBits))) != 0); } + void setMapped(spv::Id id) { resizeMapped(id); mapped[id/mBits] |= (1LL<<(id%mBits)); } + void resizeMapped(spv::Id id) { if (id >= maxMappedId()) mapped.resize(id/mBits+1, 0); } + size_t maxMappedId() const { return mapped.size() * mBits; } + + // Add a strip range for a given instruction starting at 'start' + // Note: avoiding brace initializers to please older versions os MSVC. + void stripInst(unsigned start) { stripRange.push_back(range_t(start, start + asWordCount(start))); } + + // Function start and end. use unordered_map because we'll have + // many fewer functions than IDs. + std::unordered_map fnPos; + + // Which functions are called, anywhere in the module, with a call count + std::unordered_map fnCalls; + + posmap_t typeConstPos; // word positions that define types & consts (ordered) + posmap_rev_t idPosR; // reverse map from IDs to positions + typesize_map_t idTypeSizeMap; // maps each ID to its type size, if known. + + std::vector idMapL; // ID {M}ap from {L}ocal to {G}lobal IDs + + spv::Id entryPoint; // module entry point + spv::Id largestNewId; // biggest new ID we have mapped anything to + + // Sections of the binary to strip, given as [begin,end) + std::vector stripRange; + + // processing options: + std::uint32_t options; + int verbose; // verbosity level + + // Error latch: this is set if the error handler is ever executed. It would be better to + // use a try/catch block and throw, but that's not desired for certain environments, so + // this is the alternative. + mutable bool errorLatch; + + static errorfn_t errorHandler; + static logfn_t logHandler; +}; + +} // namespace SPV + +#endif // defined (use_cpp11) +#endif // SPIRVREMAPPER_H diff --git a/dep/glslang/SPIRV/SpvBuilder.cpp b/dep/glslang/SPIRV/SpvBuilder.cpp new file mode 100644 index 000000000..6cf70a12d --- /dev/null +++ b/dep/glslang/SPIRV/SpvBuilder.cpp @@ -0,0 +1,3127 @@ +// +// Copyright (C) 2014-2015 LunarG, Inc. +// Copyright (C) 2015-2018 Google, Inc. +// Modifications Copyright (C) 2020 Advanced Micro Devices, Inc. All rights reserved. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// +// Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// +// Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// Neither the name of 3Dlabs Inc. Ltd. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. + +// +// Helper for making SPIR-V IR. Generally, this is documented in the header +// SpvBuilder.h. +// + +#include +#include + +#include +#include + +#include "SpvBuilder.h" + +#ifndef GLSLANG_WEB +#include "hex_float.h" +#endif + +#ifndef _WIN32 + #include +#endif + +namespace spv { + +Builder::Builder(unsigned int spvVersion, unsigned int magicNumber, SpvBuildLogger* buildLogger) : + spvVersion(spvVersion), + source(SourceLanguageUnknown), + sourceVersion(0), + sourceFileStringId(NoResult), + currentLine(0), + currentFile(nullptr), + emitOpLines(false), + addressModel(AddressingModelLogical), + memoryModel(MemoryModelGLSL450), + builderNumber(magicNumber), + buildPoint(0), + uniqueId(0), + entryPointFunction(0), + generatingOpCodeForSpecConst(false), + logger(buildLogger) +{ + clearAccessChain(); +} + +Builder::~Builder() +{ +} + +Id Builder::import(const char* name) +{ + Instruction* import = new Instruction(getUniqueId(), NoType, OpExtInstImport); + import->addStringOperand(name); + module.mapInstruction(import); + + imports.push_back(std::unique_ptr(import)); + return import->getResultId(); +} + +// Emit instruction for non-filename-based #line directives (ie. no filename +// seen yet): emit an OpLine if we've been asked to emit OpLines and the line +// number has changed since the last time, and is a valid line number. +void Builder::setLine(int lineNum) +{ + if (lineNum != 0 && lineNum != currentLine) { + currentLine = lineNum; + if (emitOpLines) + addLine(sourceFileStringId, currentLine, 0); + } +} + +// If no filename, do non-filename-based #line emit. Else do filename-based emit. +// Emit OpLine if we've been asked to emit OpLines and the line number or filename +// has changed since the last time, and line number is valid. +void Builder::setLine(int lineNum, const char* filename) +{ + if (filename == nullptr) { + setLine(lineNum); + return; + } + if ((lineNum != 0 && lineNum != currentLine) || currentFile == nullptr || + strncmp(filename, currentFile, strlen(currentFile) + 1) != 0) { + currentLine = lineNum; + currentFile = filename; + if (emitOpLines) { + spv::Id strId = getStringId(filename); + addLine(strId, currentLine, 0); + } + } +} + +void Builder::addLine(Id fileName, int lineNum, int column) +{ + Instruction* line = new Instruction(OpLine); + line->addIdOperand(fileName); + line->addImmediateOperand(lineNum); + line->addImmediateOperand(column); + buildPoint->addInstruction(std::unique_ptr(line)); +} + +// For creating new groupedTypes (will return old type if the requested one was already made). +Id Builder::makeVoidType() +{ + Instruction* type; + if (groupedTypes[OpTypeVoid].size() == 0) { + type = new Instruction(getUniqueId(), NoType, OpTypeVoid); + groupedTypes[OpTypeVoid].push_back(type); + constantsTypesGlobals.push_back(std::unique_ptr(type)); + module.mapInstruction(type); + } else + type = groupedTypes[OpTypeVoid].back(); + + return type->getResultId(); +} + +Id Builder::makeBoolType() +{ + Instruction* type; + if (groupedTypes[OpTypeBool].size() == 0) { + type = new Instruction(getUniqueId(), NoType, OpTypeBool); + groupedTypes[OpTypeBool].push_back(type); + constantsTypesGlobals.push_back(std::unique_ptr(type)); + module.mapInstruction(type); + } else + type = groupedTypes[OpTypeBool].back(); + + return type->getResultId(); +} + +Id Builder::makeSamplerType() +{ + Instruction* type; + if (groupedTypes[OpTypeSampler].size() == 0) { + type = new Instruction(getUniqueId(), NoType, OpTypeSampler); + groupedTypes[OpTypeSampler].push_back(type); + constantsTypesGlobals.push_back(std::unique_ptr(type)); + module.mapInstruction(type); + } else + type = groupedTypes[OpTypeSampler].back(); + + return type->getResultId(); +} + +Id Builder::makePointer(StorageClass storageClass, Id pointee) +{ + // try to find it + Instruction* type; + for (int t = 0; t < (int)groupedTypes[OpTypePointer].size(); ++t) { + type = groupedTypes[OpTypePointer][t]; + if (type->getImmediateOperand(0) == (unsigned)storageClass && + type->getIdOperand(1) == pointee) + return type->getResultId(); + } + + // not found, make it + type = new Instruction(getUniqueId(), NoType, OpTypePointer); + type->addImmediateOperand(storageClass); + type->addIdOperand(pointee); + groupedTypes[OpTypePointer].push_back(type); + constantsTypesGlobals.push_back(std::unique_ptr(type)); + module.mapInstruction(type); + + return type->getResultId(); +} + +Id Builder::makeForwardPointer(StorageClass storageClass) +{ + // Caching/uniquifying doesn't work here, because we don't know the + // pointee type and there can be multiple forward pointers of the same + // storage type. Somebody higher up in the stack must keep track. + Instruction* type = new Instruction(getUniqueId(), NoType, OpTypeForwardPointer); + type->addImmediateOperand(storageClass); + constantsTypesGlobals.push_back(std::unique_ptr(type)); + module.mapInstruction(type); + + return type->getResultId(); +} + +Id Builder::makePointerFromForwardPointer(StorageClass storageClass, Id forwardPointerType, Id pointee) +{ + // try to find it + Instruction* type; + for (int t = 0; t < (int)groupedTypes[OpTypePointer].size(); ++t) { + type = groupedTypes[OpTypePointer][t]; + if (type->getImmediateOperand(0) == (unsigned)storageClass && + type->getIdOperand(1) == pointee) + return type->getResultId(); + } + + type = new Instruction(forwardPointerType, NoType, OpTypePointer); + type->addImmediateOperand(storageClass); + type->addIdOperand(pointee); + groupedTypes[OpTypePointer].push_back(type); + constantsTypesGlobals.push_back(std::unique_ptr(type)); + module.mapInstruction(type); + + return type->getResultId(); +} + +Id Builder::makeIntegerType(int width, bool hasSign) +{ +#ifdef GLSLANG_WEB + assert(width == 32); + width = 32; +#endif + + // try to find it + Instruction* type; + for (int t = 0; t < (int)groupedTypes[OpTypeInt].size(); ++t) { + type = groupedTypes[OpTypeInt][t]; + if (type->getImmediateOperand(0) == (unsigned)width && + type->getImmediateOperand(1) == (hasSign ? 1u : 0u)) + return type->getResultId(); + } + + // not found, make it + type = new Instruction(getUniqueId(), NoType, OpTypeInt); + type->addImmediateOperand(width); + type->addImmediateOperand(hasSign ? 1 : 0); + groupedTypes[OpTypeInt].push_back(type); + constantsTypesGlobals.push_back(std::unique_ptr(type)); + module.mapInstruction(type); + + // deal with capabilities + switch (width) { + case 8: + case 16: + // these are currently handled by storage-type declarations and post processing + break; + case 64: + addCapability(CapabilityInt64); + break; + default: + break; + } + + return type->getResultId(); +} + +Id Builder::makeFloatType(int width) +{ +#ifdef GLSLANG_WEB + assert(width == 32); + width = 32; +#endif + + // try to find it + Instruction* type; + for (int t = 0; t < (int)groupedTypes[OpTypeFloat].size(); ++t) { + type = groupedTypes[OpTypeFloat][t]; + if (type->getImmediateOperand(0) == (unsigned)width) + return type->getResultId(); + } + + // not found, make it + type = new Instruction(getUniqueId(), NoType, OpTypeFloat); + type->addImmediateOperand(width); + groupedTypes[OpTypeFloat].push_back(type); + constantsTypesGlobals.push_back(std::unique_ptr(type)); + module.mapInstruction(type); + + // deal with capabilities + switch (width) { + case 16: + // currently handled by storage-type declarations and post processing + break; + case 64: + addCapability(CapabilityFloat64); + break; + default: + break; + } + + return type->getResultId(); +} + +// Make a struct without checking for duplication. +// See makeStructResultType() for non-decorated structs +// needed as the result of some instructions, which does +// check for duplicates. +Id Builder::makeStructType(const std::vector& members, const char* name) +{ + // Don't look for previous one, because in the general case, + // structs can be duplicated except for decorations. + + // not found, make it + Instruction* type = new Instruction(getUniqueId(), NoType, OpTypeStruct); + for (int op = 0; op < (int)members.size(); ++op) + type->addIdOperand(members[op]); + groupedTypes[OpTypeStruct].push_back(type); + constantsTypesGlobals.push_back(std::unique_ptr(type)); + module.mapInstruction(type); + addName(type->getResultId(), name); + + return type->getResultId(); +} + +// Make a struct for the simple results of several instructions, +// checking for duplication. +Id Builder::makeStructResultType(Id type0, Id type1) +{ + // try to find it + Instruction* type; + for (int t = 0; t < (int)groupedTypes[OpTypeStruct].size(); ++t) { + type = groupedTypes[OpTypeStruct][t]; + if (type->getNumOperands() != 2) + continue; + if (type->getIdOperand(0) != type0 || + type->getIdOperand(1) != type1) + continue; + return type->getResultId(); + } + + // not found, make it + std::vector members; + members.push_back(type0); + members.push_back(type1); + + return makeStructType(members, "ResType"); +} + +Id Builder::makeVectorType(Id component, int size) +{ + // try to find it + Instruction* type; + for (int t = 0; t < (int)groupedTypes[OpTypeVector].size(); ++t) { + type = groupedTypes[OpTypeVector][t]; + if (type->getIdOperand(0) == component && + type->getImmediateOperand(1) == (unsigned)size) + return type->getResultId(); + } + + // not found, make it + type = new Instruction(getUniqueId(), NoType, OpTypeVector); + type->addIdOperand(component); + type->addImmediateOperand(size); + groupedTypes[OpTypeVector].push_back(type); + constantsTypesGlobals.push_back(std::unique_ptr(type)); + module.mapInstruction(type); + + return type->getResultId(); +} + +Id Builder::makeMatrixType(Id component, int cols, int rows) +{ + assert(cols <= maxMatrixSize && rows <= maxMatrixSize); + + Id column = makeVectorType(component, rows); + + // try to find it + Instruction* type; + for (int t = 0; t < (int)groupedTypes[OpTypeMatrix].size(); ++t) { + type = groupedTypes[OpTypeMatrix][t]; + if (type->getIdOperand(0) == column && + type->getImmediateOperand(1) == (unsigned)cols) + return type->getResultId(); + } + + // not found, make it + type = new Instruction(getUniqueId(), NoType, OpTypeMatrix); + type->addIdOperand(column); + type->addImmediateOperand(cols); + groupedTypes[OpTypeMatrix].push_back(type); + constantsTypesGlobals.push_back(std::unique_ptr(type)); + module.mapInstruction(type); + + return type->getResultId(); +} + +Id Builder::makeCooperativeMatrixType(Id component, Id scope, Id rows, Id cols) +{ + // try to find it + Instruction* type; + for (int t = 0; t < (int)groupedTypes[OpTypeCooperativeMatrixNV].size(); ++t) { + type = groupedTypes[OpTypeCooperativeMatrixNV][t]; + if (type->getIdOperand(0) == component && + type->getIdOperand(1) == scope && + type->getIdOperand(2) == rows && + type->getIdOperand(3) == cols) + return type->getResultId(); + } + + // not found, make it + type = new Instruction(getUniqueId(), NoType, OpTypeCooperativeMatrixNV); + type->addIdOperand(component); + type->addIdOperand(scope); + type->addIdOperand(rows); + type->addIdOperand(cols); + groupedTypes[OpTypeCooperativeMatrixNV].push_back(type); + constantsTypesGlobals.push_back(std::unique_ptr(type)); + module.mapInstruction(type); + + return type->getResultId(); +} + + +// TODO: performance: track arrays per stride +// If a stride is supplied (non-zero) make an array. +// If no stride (0), reuse previous array types. +// 'size' is an Id of a constant or specialization constant of the array size +Id Builder::makeArrayType(Id element, Id sizeId, int stride) +{ + Instruction* type; + if (stride == 0) { + // try to find existing type + for (int t = 0; t < (int)groupedTypes[OpTypeArray].size(); ++t) { + type = groupedTypes[OpTypeArray][t]; + if (type->getIdOperand(0) == element && + type->getIdOperand(1) == sizeId) + return type->getResultId(); + } + } + + // not found, make it + type = new Instruction(getUniqueId(), NoType, OpTypeArray); + type->addIdOperand(element); + type->addIdOperand(sizeId); + groupedTypes[OpTypeArray].push_back(type); + constantsTypesGlobals.push_back(std::unique_ptr(type)); + module.mapInstruction(type); + + return type->getResultId(); +} + +Id Builder::makeRuntimeArray(Id element) +{ + Instruction* type = new Instruction(getUniqueId(), NoType, OpTypeRuntimeArray); + type->addIdOperand(element); + constantsTypesGlobals.push_back(std::unique_ptr(type)); + module.mapInstruction(type); + + return type->getResultId(); +} + +Id Builder::makeFunctionType(Id returnType, const std::vector& paramTypes) +{ + // try to find it + Instruction* type; + for (int t = 0; t < (int)groupedTypes[OpTypeFunction].size(); ++t) { + type = groupedTypes[OpTypeFunction][t]; + if (type->getIdOperand(0) != returnType || (int)paramTypes.size() != type->getNumOperands() - 1) + continue; + bool mismatch = false; + for (int p = 0; p < (int)paramTypes.size(); ++p) { + if (paramTypes[p] != type->getIdOperand(p + 1)) { + mismatch = true; + break; + } + } + if (! mismatch) + return type->getResultId(); + } + + // not found, make it + type = new Instruction(getUniqueId(), NoType, OpTypeFunction); + type->addIdOperand(returnType); + for (int p = 0; p < (int)paramTypes.size(); ++p) + type->addIdOperand(paramTypes[p]); + groupedTypes[OpTypeFunction].push_back(type); + constantsTypesGlobals.push_back(std::unique_ptr(type)); + module.mapInstruction(type); + + return type->getResultId(); +} + +Id Builder::makeImageType(Id sampledType, Dim dim, bool depth, bool arrayed, bool ms, unsigned sampled, + ImageFormat format) +{ + assert(sampled == 1 || sampled == 2); + + // try to find it + Instruction* type; + for (int t = 0; t < (int)groupedTypes[OpTypeImage].size(); ++t) { + type = groupedTypes[OpTypeImage][t]; + if (type->getIdOperand(0) == sampledType && + type->getImmediateOperand(1) == (unsigned int)dim && + type->getImmediateOperand(2) == ( depth ? 1u : 0u) && + type->getImmediateOperand(3) == (arrayed ? 1u : 0u) && + type->getImmediateOperand(4) == ( ms ? 1u : 0u) && + type->getImmediateOperand(5) == sampled && + type->getImmediateOperand(6) == (unsigned int)format) + return type->getResultId(); + } + + // not found, make it + type = new Instruction(getUniqueId(), NoType, OpTypeImage); + type->addIdOperand(sampledType); + type->addImmediateOperand( dim); + type->addImmediateOperand( depth ? 1 : 0); + type->addImmediateOperand(arrayed ? 1 : 0); + type->addImmediateOperand( ms ? 1 : 0); + type->addImmediateOperand(sampled); + type->addImmediateOperand((unsigned int)format); + + groupedTypes[OpTypeImage].push_back(type); + constantsTypesGlobals.push_back(std::unique_ptr(type)); + module.mapInstruction(type); + +#ifndef GLSLANG_WEB + // deal with capabilities + switch (dim) { + case DimBuffer: + if (sampled == 1) + addCapability(CapabilitySampledBuffer); + else + addCapability(CapabilityImageBuffer); + break; + case Dim1D: + if (sampled == 1) + addCapability(CapabilitySampled1D); + else + addCapability(CapabilityImage1D); + break; + case DimCube: + if (arrayed) { + if (sampled == 1) + addCapability(CapabilitySampledCubeArray); + else + addCapability(CapabilityImageCubeArray); + } + break; + case DimRect: + if (sampled == 1) + addCapability(CapabilitySampledRect); + else + addCapability(CapabilityImageRect); + break; + case DimSubpassData: + addCapability(CapabilityInputAttachment); + break; + default: + break; + } + + if (ms) { + if (sampled == 2) { + // Images used with subpass data are not storage + // images, so don't require the capability for them. + if (dim != Dim::DimSubpassData) + addCapability(CapabilityStorageImageMultisample); + if (arrayed) + addCapability(CapabilityImageMSArray); + } + } +#endif + + return type->getResultId(); +} + +Id Builder::makeSampledImageType(Id imageType) +{ + // try to find it + Instruction* type; + for (int t = 0; t < (int)groupedTypes[OpTypeSampledImage].size(); ++t) { + type = groupedTypes[OpTypeSampledImage][t]; + if (type->getIdOperand(0) == imageType) + return type->getResultId(); + } + + // not found, make it + type = new Instruction(getUniqueId(), NoType, OpTypeSampledImage); + type->addIdOperand(imageType); + + groupedTypes[OpTypeSampledImage].push_back(type); + constantsTypesGlobals.push_back(std::unique_ptr(type)); + module.mapInstruction(type); + + return type->getResultId(); +} + +#ifndef GLSLANG_WEB +Id Builder::makeAccelerationStructureType() +{ + Instruction *type; + if (groupedTypes[OpTypeAccelerationStructureKHR].size() == 0) { + type = new Instruction(getUniqueId(), NoType, OpTypeAccelerationStructureKHR); + groupedTypes[OpTypeAccelerationStructureKHR].push_back(type); + constantsTypesGlobals.push_back(std::unique_ptr(type)); + module.mapInstruction(type); + } else { + type = groupedTypes[OpTypeAccelerationStructureKHR].back(); + } + + return type->getResultId(); +} + +Id Builder::makeRayQueryType() +{ + Instruction *type; + if (groupedTypes[OpTypeRayQueryProvisionalKHR].size() == 0) { + type = new Instruction(getUniqueId(), NoType, OpTypeRayQueryProvisionalKHR); + groupedTypes[OpTypeRayQueryProvisionalKHR].push_back(type); + constantsTypesGlobals.push_back(std::unique_ptr(type)); + module.mapInstruction(type); + } else { + type = groupedTypes[OpTypeRayQueryProvisionalKHR].back(); + } + + return type->getResultId(); +} +#endif + +Id Builder::getDerefTypeId(Id resultId) const +{ + Id typeId = getTypeId(resultId); + assert(isPointerType(typeId)); + + return module.getInstruction(typeId)->getIdOperand(1); +} + +Op Builder::getMostBasicTypeClass(Id typeId) const +{ + Instruction* instr = module.getInstruction(typeId); + + Op typeClass = instr->getOpCode(); + switch (typeClass) + { + case OpTypeVector: + case OpTypeMatrix: + case OpTypeArray: + case OpTypeRuntimeArray: + return getMostBasicTypeClass(instr->getIdOperand(0)); + case OpTypePointer: + return getMostBasicTypeClass(instr->getIdOperand(1)); + default: + return typeClass; + } +} + +int Builder::getNumTypeConstituents(Id typeId) const +{ + Instruction* instr = module.getInstruction(typeId); + + switch (instr->getOpCode()) + { + case OpTypeBool: + case OpTypeInt: + case OpTypeFloat: + case OpTypePointer: + return 1; + case OpTypeVector: + case OpTypeMatrix: + return instr->getImmediateOperand(1); + case OpTypeArray: + { + Id lengthId = instr->getIdOperand(1); + return module.getInstruction(lengthId)->getImmediateOperand(0); + } + case OpTypeStruct: + return instr->getNumOperands(); + case OpTypeCooperativeMatrixNV: + // has only one constituent when used with OpCompositeConstruct. + return 1; + default: + assert(0); + return 1; + } +} + +// Return the lowest-level type of scalar that an homogeneous composite is made out of. +// Typically, this is just to find out if something is made out of ints or floats. +// However, it includes returning a structure, if say, it is an array of structure. +Id Builder::getScalarTypeId(Id typeId) const +{ + Instruction* instr = module.getInstruction(typeId); + + Op typeClass = instr->getOpCode(); + switch (typeClass) + { + case OpTypeVoid: + case OpTypeBool: + case OpTypeInt: + case OpTypeFloat: + case OpTypeStruct: + return instr->getResultId(); + case OpTypeVector: + case OpTypeMatrix: + case OpTypeArray: + case OpTypeRuntimeArray: + case OpTypePointer: + return getScalarTypeId(getContainedTypeId(typeId)); + default: + assert(0); + return NoResult; + } +} + +// Return the type of 'member' of a composite. +Id Builder::getContainedTypeId(Id typeId, int member) const +{ + Instruction* instr = module.getInstruction(typeId); + + Op typeClass = instr->getOpCode(); + switch (typeClass) + { + case OpTypeVector: + case OpTypeMatrix: + case OpTypeArray: + case OpTypeRuntimeArray: + case OpTypeCooperativeMatrixNV: + return instr->getIdOperand(0); + case OpTypePointer: + return instr->getIdOperand(1); + case OpTypeStruct: + return instr->getIdOperand(member); + default: + assert(0); + return NoResult; + } +} + +// Return the immediately contained type of a given composite type. +Id Builder::getContainedTypeId(Id typeId) const +{ + return getContainedTypeId(typeId, 0); +} + +// Returns true if 'typeId' is or contains a scalar type declared with 'typeOp' +// of width 'width'. The 'width' is only consumed for int and float types. +// Returns false otherwise. +bool Builder::containsType(Id typeId, spv::Op typeOp, unsigned int width) const +{ + const Instruction& instr = *module.getInstruction(typeId); + + Op typeClass = instr.getOpCode(); + switch (typeClass) + { + case OpTypeInt: + case OpTypeFloat: + return typeClass == typeOp && instr.getImmediateOperand(0) == width; + case OpTypeStruct: + for (int m = 0; m < instr.getNumOperands(); ++m) { + if (containsType(instr.getIdOperand(m), typeOp, width)) + return true; + } + return false; + case OpTypePointer: + return false; + case OpTypeVector: + case OpTypeMatrix: + case OpTypeArray: + case OpTypeRuntimeArray: + return containsType(getContainedTypeId(typeId), typeOp, width); + default: + return typeClass == typeOp; + } +} + +// return true if the type is a pointer to PhysicalStorageBufferEXT or an +// array of such pointers. These require restrict/aliased decorations. +bool Builder::containsPhysicalStorageBufferOrArray(Id typeId) const +{ + const Instruction& instr = *module.getInstruction(typeId); + + Op typeClass = instr.getOpCode(); + switch (typeClass) + { + case OpTypePointer: + return getTypeStorageClass(typeId) == StorageClassPhysicalStorageBufferEXT; + case OpTypeArray: + return containsPhysicalStorageBufferOrArray(getContainedTypeId(typeId)); + default: + return false; + } +} + +// See if a scalar constant of this type has already been created, so it +// can be reused rather than duplicated. (Required by the specification). +Id Builder::findScalarConstant(Op typeClass, Op opcode, Id typeId, unsigned value) +{ + Instruction* constant; + for (int i = 0; i < (int)groupedConstants[typeClass].size(); ++i) { + constant = groupedConstants[typeClass][i]; + if (constant->getOpCode() == opcode && + constant->getTypeId() == typeId && + constant->getImmediateOperand(0) == value) + return constant->getResultId(); + } + + return 0; +} + +// Version of findScalarConstant (see above) for scalars that take two operands (e.g. a 'double' or 'int64'). +Id Builder::findScalarConstant(Op typeClass, Op opcode, Id typeId, unsigned v1, unsigned v2) +{ + Instruction* constant; + for (int i = 0; i < (int)groupedConstants[typeClass].size(); ++i) { + constant = groupedConstants[typeClass][i]; + if (constant->getOpCode() == opcode && + constant->getTypeId() == typeId && + constant->getImmediateOperand(0) == v1 && + constant->getImmediateOperand(1) == v2) + return constant->getResultId(); + } + + return 0; +} + +// Return true if consuming 'opcode' means consuming a constant. +// "constant" here means after final transform to executable code, +// the value consumed will be a constant, so includes specialization. +bool Builder::isConstantOpCode(Op opcode) const +{ + switch (opcode) { + case OpUndef: + case OpConstantTrue: + case OpConstantFalse: + case OpConstant: + case OpConstantComposite: + case OpConstantSampler: + case OpConstantNull: + case OpSpecConstantTrue: + case OpSpecConstantFalse: + case OpSpecConstant: + case OpSpecConstantComposite: + case OpSpecConstantOp: + return true; + default: + return false; + } +} + +// Return true if consuming 'opcode' means consuming a specialization constant. +bool Builder::isSpecConstantOpCode(Op opcode) const +{ + switch (opcode) { + case OpSpecConstantTrue: + case OpSpecConstantFalse: + case OpSpecConstant: + case OpSpecConstantComposite: + case OpSpecConstantOp: + return true; + default: + return false; + } +} + +Id Builder::makeBoolConstant(bool b, bool specConstant) +{ + Id typeId = makeBoolType(); + Instruction* constant; + Op opcode = specConstant ? (b ? OpSpecConstantTrue : OpSpecConstantFalse) : (b ? OpConstantTrue : OpConstantFalse); + + // See if we already made it. Applies only to regular constants, because specialization constants + // must remain distinct for the purpose of applying a SpecId decoration. + if (! specConstant) { + Id existing = 0; + for (int i = 0; i < (int)groupedConstants[OpTypeBool].size(); ++i) { + constant = groupedConstants[OpTypeBool][i]; + if (constant->getTypeId() == typeId && constant->getOpCode() == opcode) + existing = constant->getResultId(); + } + + if (existing) + return existing; + } + + // Make it + Instruction* c = new Instruction(getUniqueId(), typeId, opcode); + constantsTypesGlobals.push_back(std::unique_ptr(c)); + groupedConstants[OpTypeBool].push_back(c); + module.mapInstruction(c); + + return c->getResultId(); +} + +Id Builder::makeIntConstant(Id typeId, unsigned value, bool specConstant) +{ + Op opcode = specConstant ? OpSpecConstant : OpConstant; + + // See if we already made it. Applies only to regular constants, because specialization constants + // must remain distinct for the purpose of applying a SpecId decoration. + if (! specConstant) { + Id existing = findScalarConstant(OpTypeInt, opcode, typeId, value); + if (existing) + return existing; + } + + Instruction* c = new Instruction(getUniqueId(), typeId, opcode); + c->addImmediateOperand(value); + constantsTypesGlobals.push_back(std::unique_ptr(c)); + groupedConstants[OpTypeInt].push_back(c); + module.mapInstruction(c); + + return c->getResultId(); +} + +Id Builder::makeInt64Constant(Id typeId, unsigned long long value, bool specConstant) +{ + Op opcode = specConstant ? OpSpecConstant : OpConstant; + + unsigned op1 = value & 0xFFFFFFFF; + unsigned op2 = value >> 32; + + // See if we already made it. Applies only to regular constants, because specialization constants + // must remain distinct for the purpose of applying a SpecId decoration. + if (! specConstant) { + Id existing = findScalarConstant(OpTypeInt, opcode, typeId, op1, op2); + if (existing) + return existing; + } + + Instruction* c = new Instruction(getUniqueId(), typeId, opcode); + c->addImmediateOperand(op1); + c->addImmediateOperand(op2); + constantsTypesGlobals.push_back(std::unique_ptr(c)); + groupedConstants[OpTypeInt].push_back(c); + module.mapInstruction(c); + + return c->getResultId(); +} + +Id Builder::makeFloatConstant(float f, bool specConstant) +{ + Op opcode = specConstant ? OpSpecConstant : OpConstant; + Id typeId = makeFloatType(32); + union { float fl; unsigned int ui; } u; + u.fl = f; + unsigned value = u.ui; + + // See if we already made it. Applies only to regular constants, because specialization constants + // must remain distinct for the purpose of applying a SpecId decoration. + if (! specConstant) { + Id existing = findScalarConstant(OpTypeFloat, opcode, typeId, value); + if (existing) + return existing; + } + + Instruction* c = new Instruction(getUniqueId(), typeId, opcode); + c->addImmediateOperand(value); + constantsTypesGlobals.push_back(std::unique_ptr(c)); + groupedConstants[OpTypeFloat].push_back(c); + module.mapInstruction(c); + + return c->getResultId(); +} + +Id Builder::makeDoubleConstant(double d, bool specConstant) +{ +#ifdef GLSLANG_WEB + assert(0); + return NoResult; +#else + Op opcode = specConstant ? OpSpecConstant : OpConstant; + Id typeId = makeFloatType(64); + union { double db; unsigned long long ull; } u; + u.db = d; + unsigned long long value = u.ull; + unsigned op1 = value & 0xFFFFFFFF; + unsigned op2 = value >> 32; + + // See if we already made it. Applies only to regular constants, because specialization constants + // must remain distinct for the purpose of applying a SpecId decoration. + if (! specConstant) { + Id existing = findScalarConstant(OpTypeFloat, opcode, typeId, op1, op2); + if (existing) + return existing; + } + + Instruction* c = new Instruction(getUniqueId(), typeId, opcode); + c->addImmediateOperand(op1); + c->addImmediateOperand(op2); + constantsTypesGlobals.push_back(std::unique_ptr(c)); + groupedConstants[OpTypeFloat].push_back(c); + module.mapInstruction(c); + + return c->getResultId(); +#endif +} + +Id Builder::makeFloat16Constant(float f16, bool specConstant) +{ +#ifdef GLSLANG_WEB + assert(0); + return NoResult; +#else + Op opcode = specConstant ? OpSpecConstant : OpConstant; + Id typeId = makeFloatType(16); + + spvutils::HexFloat> fVal(f16); + spvutils::HexFloat> f16Val(0); + fVal.castTo(f16Val, spvutils::kRoundToZero); + + unsigned value = f16Val.value().getAsFloat().get_value(); + + // See if we already made it. Applies only to regular constants, because specialization constants + // must remain distinct for the purpose of applying a SpecId decoration. + if (!specConstant) { + Id existing = findScalarConstant(OpTypeFloat, opcode, typeId, value); + if (existing) + return existing; + } + + Instruction* c = new Instruction(getUniqueId(), typeId, opcode); + c->addImmediateOperand(value); + constantsTypesGlobals.push_back(std::unique_ptr(c)); + groupedConstants[OpTypeFloat].push_back(c); + module.mapInstruction(c); + + return c->getResultId(); +#endif +} + +Id Builder::makeFpConstant(Id type, double d, bool specConstant) +{ +#ifdef GLSLANG_WEB + const int width = 32; + assert(width == getScalarTypeWidth(type)); +#else + const int width = getScalarTypeWidth(type); +#endif + + assert(isFloatType(type)); + + switch (width) { + case 16: + return makeFloat16Constant((float)d, specConstant); + case 32: + return makeFloatConstant((float)d, specConstant); + case 64: + return makeDoubleConstant(d, specConstant); + default: + break; + } + + assert(false); + return NoResult; +} + +Id Builder::findCompositeConstant(Op typeClass, Id typeId, const std::vector& comps) +{ + Instruction* constant = 0; + bool found = false; + for (int i = 0; i < (int)groupedConstants[typeClass].size(); ++i) { + constant = groupedConstants[typeClass][i]; + + if (constant->getTypeId() != typeId) + continue; + + // same contents? + bool mismatch = false; + for (int op = 0; op < constant->getNumOperands(); ++op) { + if (constant->getIdOperand(op) != comps[op]) { + mismatch = true; + break; + } + } + if (! mismatch) { + found = true; + break; + } + } + + return found ? constant->getResultId() : NoResult; +} + +Id Builder::findStructConstant(Id typeId, const std::vector& comps) +{ + Instruction* constant = 0; + bool found = false; + for (int i = 0; i < (int)groupedStructConstants[typeId].size(); ++i) { + constant = groupedStructConstants[typeId][i]; + + // same contents? + bool mismatch = false; + for (int op = 0; op < constant->getNumOperands(); ++op) { + if (constant->getIdOperand(op) != comps[op]) { + mismatch = true; + break; + } + } + if (! mismatch) { + found = true; + break; + } + } + + return found ? constant->getResultId() : NoResult; +} + +// Comments in header +Id Builder::makeCompositeConstant(Id typeId, const std::vector& members, bool specConstant) +{ + Op opcode = specConstant ? OpSpecConstantComposite : OpConstantComposite; + assert(typeId); + Op typeClass = getTypeClass(typeId); + + switch (typeClass) { + case OpTypeVector: + case OpTypeArray: + case OpTypeMatrix: + case OpTypeCooperativeMatrixNV: + if (! specConstant) { + Id existing = findCompositeConstant(typeClass, typeId, members); + if (existing) + return existing; + } + break; + case OpTypeStruct: + if (! specConstant) { + Id existing = findStructConstant(typeId, members); + if (existing) + return existing; + } + break; + default: + assert(0); + return makeFloatConstant(0.0); + } + + Instruction* c = new Instruction(getUniqueId(), typeId, opcode); + for (int op = 0; op < (int)members.size(); ++op) + c->addIdOperand(members[op]); + constantsTypesGlobals.push_back(std::unique_ptr(c)); + if (typeClass == OpTypeStruct) + groupedStructConstants[typeId].push_back(c); + else + groupedConstants[typeClass].push_back(c); + module.mapInstruction(c); + + return c->getResultId(); +} + +Instruction* Builder::addEntryPoint(ExecutionModel model, Function* function, const char* name) +{ + Instruction* entryPoint = new Instruction(OpEntryPoint); + entryPoint->addImmediateOperand(model); + entryPoint->addIdOperand(function->getId()); + entryPoint->addStringOperand(name); + + entryPoints.push_back(std::unique_ptr(entryPoint)); + + return entryPoint; +} + +// Currently relying on the fact that all 'value' of interest are small non-negative values. +void Builder::addExecutionMode(Function* entryPoint, ExecutionMode mode, int value1, int value2, int value3) +{ + Instruction* instr = new Instruction(OpExecutionMode); + instr->addIdOperand(entryPoint->getId()); + instr->addImmediateOperand(mode); + if (value1 >= 0) + instr->addImmediateOperand(value1); + if (value2 >= 0) + instr->addImmediateOperand(value2); + if (value3 >= 0) + instr->addImmediateOperand(value3); + + executionModes.push_back(std::unique_ptr(instr)); +} + +void Builder::addName(Id id, const char* string) +{ + Instruction* name = new Instruction(OpName); + name->addIdOperand(id); + name->addStringOperand(string); + + names.push_back(std::unique_ptr(name)); +} + +void Builder::addMemberName(Id id, int memberNumber, const char* string) +{ + Instruction* name = new Instruction(OpMemberName); + name->addIdOperand(id); + name->addImmediateOperand(memberNumber); + name->addStringOperand(string); + + names.push_back(std::unique_ptr(name)); +} + +void Builder::addDecoration(Id id, Decoration decoration, int num) +{ + if (decoration == spv::DecorationMax) + return; + + Instruction* dec = new Instruction(OpDecorate); + dec->addIdOperand(id); + dec->addImmediateOperand(decoration); + if (num >= 0) + dec->addImmediateOperand(num); + + decorations.push_back(std::unique_ptr(dec)); +} + +void Builder::addDecoration(Id id, Decoration decoration, const char* s) +{ + if (decoration == spv::DecorationMax) + return; + + Instruction* dec = new Instruction(OpDecorateStringGOOGLE); + dec->addIdOperand(id); + dec->addImmediateOperand(decoration); + dec->addStringOperand(s); + + decorations.push_back(std::unique_ptr(dec)); +} + +void Builder::addDecorationId(Id id, Decoration decoration, Id idDecoration) +{ + if (decoration == spv::DecorationMax) + return; + + Instruction* dec = new Instruction(OpDecorateId); + dec->addIdOperand(id); + dec->addImmediateOperand(decoration); + dec->addIdOperand(idDecoration); + + decorations.push_back(std::unique_ptr(dec)); +} + +void Builder::addMemberDecoration(Id id, unsigned int member, Decoration decoration, int num) +{ + if (decoration == spv::DecorationMax) + return; + + Instruction* dec = new Instruction(OpMemberDecorate); + dec->addIdOperand(id); + dec->addImmediateOperand(member); + dec->addImmediateOperand(decoration); + if (num >= 0) + dec->addImmediateOperand(num); + + decorations.push_back(std::unique_ptr(dec)); +} + +void Builder::addMemberDecoration(Id id, unsigned int member, Decoration decoration, const char *s) +{ + if (decoration == spv::DecorationMax) + return; + + Instruction* dec = new Instruction(OpMemberDecorateStringGOOGLE); + dec->addIdOperand(id); + dec->addImmediateOperand(member); + dec->addImmediateOperand(decoration); + dec->addStringOperand(s); + + decorations.push_back(std::unique_ptr(dec)); +} + +// Comments in header +Function* Builder::makeEntryPoint(const char* entryPoint) +{ + assert(! entryPointFunction); + + Block* entry; + std::vector params; + std::vector> decorations; + + entryPointFunction = makeFunctionEntry(NoPrecision, makeVoidType(), entryPoint, params, decorations, &entry); + + return entryPointFunction; +} + +// Comments in header +Function* Builder::makeFunctionEntry(Decoration precision, Id returnType, const char* name, + const std::vector& paramTypes, + const std::vector>& decorations, Block **entry) +{ + // Make the function and initial instructions in it + Id typeId = makeFunctionType(returnType, paramTypes); + Id firstParamId = paramTypes.size() == 0 ? 0 : getUniqueIds((int)paramTypes.size()); + Function* function = new Function(getUniqueId(), returnType, typeId, firstParamId, module); + + // Set up the precisions + setPrecision(function->getId(), precision); + for (unsigned p = 0; p < (unsigned)decorations.size(); ++p) { + for (int d = 0; d < (int)decorations[p].size(); ++d) + addDecoration(firstParamId + p, decorations[p][d]); + } + + // CFG + if (entry) { + *entry = new Block(getUniqueId(), *function); + function->addBlock(*entry); + setBuildPoint(*entry); + } + + if (name) + addName(function->getId(), name); + + functions.push_back(std::unique_ptr(function)); + + return function; +} + +// Comments in header +void Builder::makeReturn(bool implicit, Id retVal) +{ + if (retVal) { + Instruction* inst = new Instruction(NoResult, NoType, OpReturnValue); + inst->addIdOperand(retVal); + buildPoint->addInstruction(std::unique_ptr(inst)); + } else + buildPoint->addInstruction(std::unique_ptr(new Instruction(NoResult, NoType, OpReturn))); + + if (! implicit) + createAndSetNoPredecessorBlock("post-return"); +} + +// Comments in header +void Builder::leaveFunction() +{ + Block* block = buildPoint; + Function& function = buildPoint->getParent(); + assert(block); + + // If our function did not contain a return, add a return void now. + if (! block->isTerminated()) { + if (function.getReturnType() == makeVoidType()) + makeReturn(true); + else { + makeReturn(true, createUndefined(function.getReturnType())); + } + } +} + +// Comments in header +void Builder::makeDiscard() +{ + buildPoint->addInstruction(std::unique_ptr(new Instruction(OpKill))); + createAndSetNoPredecessorBlock("post-discard"); +} + +// Comments in header +Id Builder::createVariable(StorageClass storageClass, Id type, const char* name, Id initializer) +{ + Id pointerType = makePointer(storageClass, type); + Instruction* inst = new Instruction(getUniqueId(), pointerType, OpVariable); + inst->addImmediateOperand(storageClass); + if (initializer != NoResult) + inst->addIdOperand(initializer); + + switch (storageClass) { + case StorageClassFunction: + // Validation rules require the declaration in the entry block + buildPoint->getParent().addLocalVariable(std::unique_ptr(inst)); + break; + + default: + constantsTypesGlobals.push_back(std::unique_ptr(inst)); + module.mapInstruction(inst); + break; + } + + if (name) + addName(inst->getResultId(), name); + + return inst->getResultId(); +} + +// Comments in header +Id Builder::createUndefined(Id type) +{ + Instruction* inst = new Instruction(getUniqueId(), type, OpUndef); + buildPoint->addInstruction(std::unique_ptr(inst)); + return inst->getResultId(); +} + +// av/vis/nonprivate are unnecessary and illegal for some storage classes. +spv::MemoryAccessMask Builder::sanitizeMemoryAccessForStorageClass(spv::MemoryAccessMask memoryAccess, StorageClass sc) + const +{ + switch (sc) { + case spv::StorageClassUniform: + case spv::StorageClassWorkgroup: + case spv::StorageClassStorageBuffer: + case spv::StorageClassPhysicalStorageBufferEXT: + break; + default: + memoryAccess = spv::MemoryAccessMask(memoryAccess & + ~(spv::MemoryAccessMakePointerAvailableKHRMask | + spv::MemoryAccessMakePointerVisibleKHRMask | + spv::MemoryAccessNonPrivatePointerKHRMask)); + break; + } + return memoryAccess; +} + +// Comments in header +void Builder::createStore(Id rValue, Id lValue, spv::MemoryAccessMask memoryAccess, spv::Scope scope, + unsigned int alignment) +{ + Instruction* store = new Instruction(OpStore); + store->addIdOperand(lValue); + store->addIdOperand(rValue); + + memoryAccess = sanitizeMemoryAccessForStorageClass(memoryAccess, getStorageClass(lValue)); + + if (memoryAccess != MemoryAccessMaskNone) { + store->addImmediateOperand(memoryAccess); + if (memoryAccess & spv::MemoryAccessAlignedMask) { + store->addImmediateOperand(alignment); + } + if (memoryAccess & spv::MemoryAccessMakePointerAvailableKHRMask) { + store->addIdOperand(makeUintConstant(scope)); + } + } + + buildPoint->addInstruction(std::unique_ptr(store)); +} + +// Comments in header +Id Builder::createLoad(Id lValue, spv::MemoryAccessMask memoryAccess, spv::Scope scope, unsigned int alignment) +{ + Instruction* load = new Instruction(getUniqueId(), getDerefTypeId(lValue), OpLoad); + load->addIdOperand(lValue); + + memoryAccess = sanitizeMemoryAccessForStorageClass(memoryAccess, getStorageClass(lValue)); + + if (memoryAccess != MemoryAccessMaskNone) { + load->addImmediateOperand(memoryAccess); + if (memoryAccess & spv::MemoryAccessAlignedMask) { + load->addImmediateOperand(alignment); + } + if (memoryAccess & spv::MemoryAccessMakePointerVisibleKHRMask) { + load->addIdOperand(makeUintConstant(scope)); + } + } + + buildPoint->addInstruction(std::unique_ptr(load)); + + return load->getResultId(); +} + +// Comments in header +Id Builder::createAccessChain(StorageClass storageClass, Id base, const std::vector& offsets) +{ + // Figure out the final resulting type. + spv::Id typeId = getTypeId(base); + assert(isPointerType(typeId) && offsets.size() > 0); + typeId = getContainedTypeId(typeId); + for (int i = 0; i < (int)offsets.size(); ++i) { + if (isStructType(typeId)) { + assert(isConstantScalar(offsets[i])); + typeId = getContainedTypeId(typeId, getConstantScalar(offsets[i])); + } else + typeId = getContainedTypeId(typeId, offsets[i]); + } + typeId = makePointer(storageClass, typeId); + + // Make the instruction + Instruction* chain = new Instruction(getUniqueId(), typeId, OpAccessChain); + chain->addIdOperand(base); + for (int i = 0; i < (int)offsets.size(); ++i) + chain->addIdOperand(offsets[i]); + buildPoint->addInstruction(std::unique_ptr(chain)); + + return chain->getResultId(); +} + +Id Builder::createArrayLength(Id base, unsigned int member) +{ + spv::Id intType = makeUintType(32); + Instruction* length = new Instruction(getUniqueId(), intType, OpArrayLength); + length->addIdOperand(base); + length->addImmediateOperand(member); + buildPoint->addInstruction(std::unique_ptr(length)); + + return length->getResultId(); +} + +Id Builder::createCooperativeMatrixLength(Id type) +{ + spv::Id intType = makeUintType(32); + + // Generate code for spec constants if in spec constant operation + // generation mode. + if (generatingOpCodeForSpecConst) { + return createSpecConstantOp(OpCooperativeMatrixLengthNV, intType, std::vector(1, type), std::vector()); + } + + Instruction* length = new Instruction(getUniqueId(), intType, OpCooperativeMatrixLengthNV); + length->addIdOperand(type); + buildPoint->addInstruction(std::unique_ptr(length)); + + return length->getResultId(); +} + +Id Builder::createCompositeExtract(Id composite, Id typeId, unsigned index) +{ + // Generate code for spec constants if in spec constant operation + // generation mode. + if (generatingOpCodeForSpecConst) { + return createSpecConstantOp(OpCompositeExtract, typeId, std::vector(1, composite), + std::vector(1, index)); + } + Instruction* extract = new Instruction(getUniqueId(), typeId, OpCompositeExtract); + extract->addIdOperand(composite); + extract->addImmediateOperand(index); + buildPoint->addInstruction(std::unique_ptr(extract)); + + return extract->getResultId(); +} + +Id Builder::createCompositeExtract(Id composite, Id typeId, const std::vector& indexes) +{ + // Generate code for spec constants if in spec constant operation + // generation mode. + if (generatingOpCodeForSpecConst) { + return createSpecConstantOp(OpCompositeExtract, typeId, std::vector(1, composite), indexes); + } + Instruction* extract = new Instruction(getUniqueId(), typeId, OpCompositeExtract); + extract->addIdOperand(composite); + for (int i = 0; i < (int)indexes.size(); ++i) + extract->addImmediateOperand(indexes[i]); + buildPoint->addInstruction(std::unique_ptr(extract)); + + return extract->getResultId(); +} + +Id Builder::createCompositeInsert(Id object, Id composite, Id typeId, unsigned index) +{ + Instruction* insert = new Instruction(getUniqueId(), typeId, OpCompositeInsert); + insert->addIdOperand(object); + insert->addIdOperand(composite); + insert->addImmediateOperand(index); + buildPoint->addInstruction(std::unique_ptr(insert)); + + return insert->getResultId(); +} + +Id Builder::createCompositeInsert(Id object, Id composite, Id typeId, const std::vector& indexes) +{ + Instruction* insert = new Instruction(getUniqueId(), typeId, OpCompositeInsert); + insert->addIdOperand(object); + insert->addIdOperand(composite); + for (int i = 0; i < (int)indexes.size(); ++i) + insert->addImmediateOperand(indexes[i]); + buildPoint->addInstruction(std::unique_ptr(insert)); + + return insert->getResultId(); +} + +Id Builder::createVectorExtractDynamic(Id vector, Id typeId, Id componentIndex) +{ + Instruction* extract = new Instruction(getUniqueId(), typeId, OpVectorExtractDynamic); + extract->addIdOperand(vector); + extract->addIdOperand(componentIndex); + buildPoint->addInstruction(std::unique_ptr(extract)); + + return extract->getResultId(); +} + +Id Builder::createVectorInsertDynamic(Id vector, Id typeId, Id component, Id componentIndex) +{ + Instruction* insert = new Instruction(getUniqueId(), typeId, OpVectorInsertDynamic); + insert->addIdOperand(vector); + insert->addIdOperand(component); + insert->addIdOperand(componentIndex); + buildPoint->addInstruction(std::unique_ptr(insert)); + + return insert->getResultId(); +} + +// An opcode that has no operands, no result id, and no type +void Builder::createNoResultOp(Op opCode) +{ + Instruction* op = new Instruction(opCode); + buildPoint->addInstruction(std::unique_ptr(op)); +} + +// An opcode that has one id operand, no result id, and no type +void Builder::createNoResultOp(Op opCode, Id operand) +{ + Instruction* op = new Instruction(opCode); + op->addIdOperand(operand); + buildPoint->addInstruction(std::unique_ptr(op)); +} + +// An opcode that has one or more operands, no result id, and no type +void Builder::createNoResultOp(Op opCode, const std::vector& operands) +{ + Instruction* op = new Instruction(opCode); + for (auto it = operands.cbegin(); it != operands.cend(); ++it) { + op->addIdOperand(*it); + } + buildPoint->addInstruction(std::unique_ptr(op)); +} + +// An opcode that has multiple operands, no result id, and no type +void Builder::createNoResultOp(Op opCode, const std::vector& operands) +{ + Instruction* op = new Instruction(opCode); + for (auto it = operands.cbegin(); it != operands.cend(); ++it) { + if (it->isId) + op->addIdOperand(it->word); + else + op->addImmediateOperand(it->word); + } + buildPoint->addInstruction(std::unique_ptr(op)); +} + +void Builder::createControlBarrier(Scope execution, Scope memory, MemorySemanticsMask semantics) +{ + Instruction* op = new Instruction(OpControlBarrier); + op->addIdOperand(makeUintConstant(execution)); + op->addIdOperand(makeUintConstant(memory)); + op->addIdOperand(makeUintConstant(semantics)); + buildPoint->addInstruction(std::unique_ptr(op)); +} + +void Builder::createMemoryBarrier(unsigned executionScope, unsigned memorySemantics) +{ + Instruction* op = new Instruction(OpMemoryBarrier); + op->addIdOperand(makeUintConstant(executionScope)); + op->addIdOperand(makeUintConstant(memorySemantics)); + buildPoint->addInstruction(std::unique_ptr(op)); +} + +// An opcode that has one operands, a result id, and a type +Id Builder::createUnaryOp(Op opCode, Id typeId, Id operand) +{ + // Generate code for spec constants if in spec constant operation + // generation mode. + if (generatingOpCodeForSpecConst) { + return createSpecConstantOp(opCode, typeId, std::vector(1, operand), std::vector()); + } + Instruction* op = new Instruction(getUniqueId(), typeId, opCode); + op->addIdOperand(operand); + buildPoint->addInstruction(std::unique_ptr(op)); + + return op->getResultId(); +} + +Id Builder::createBinOp(Op opCode, Id typeId, Id left, Id right) +{ + // Generate code for spec constants if in spec constant operation + // generation mode. + if (generatingOpCodeForSpecConst) { + std::vector operands(2); + operands[0] = left; operands[1] = right; + return createSpecConstantOp(opCode, typeId, operands, std::vector()); + } + Instruction* op = new Instruction(getUniqueId(), typeId, opCode); + op->addIdOperand(left); + op->addIdOperand(right); + buildPoint->addInstruction(std::unique_ptr(op)); + + return op->getResultId(); +} + +Id Builder::createTriOp(Op opCode, Id typeId, Id op1, Id op2, Id op3) +{ + // Generate code for spec constants if in spec constant operation + // generation mode. + if (generatingOpCodeForSpecConst) { + std::vector operands(3); + operands[0] = op1; + operands[1] = op2; + operands[2] = op3; + return createSpecConstantOp( + opCode, typeId, operands, std::vector()); + } + Instruction* op = new Instruction(getUniqueId(), typeId, opCode); + op->addIdOperand(op1); + op->addIdOperand(op2); + op->addIdOperand(op3); + buildPoint->addInstruction(std::unique_ptr(op)); + + return op->getResultId(); +} + +Id Builder::createOp(Op opCode, Id typeId, const std::vector& operands) +{ + Instruction* op = new Instruction(getUniqueId(), typeId, opCode); + for (auto it = operands.cbegin(); it != operands.cend(); ++it) + op->addIdOperand(*it); + buildPoint->addInstruction(std::unique_ptr(op)); + + return op->getResultId(); +} + +Id Builder::createOp(Op opCode, Id typeId, const std::vector& operands) +{ + Instruction* op = new Instruction(getUniqueId(), typeId, opCode); + for (auto it = operands.cbegin(); it != operands.cend(); ++it) { + if (it->isId) + op->addIdOperand(it->word); + else + op->addImmediateOperand(it->word); + } + buildPoint->addInstruction(std::unique_ptr(op)); + + return op->getResultId(); +} + +Id Builder::createSpecConstantOp(Op opCode, Id typeId, const std::vector& operands, + const std::vector& literals) +{ + Instruction* op = new Instruction(getUniqueId(), typeId, OpSpecConstantOp); + op->addImmediateOperand((unsigned) opCode); + for (auto it = operands.cbegin(); it != operands.cend(); ++it) + op->addIdOperand(*it); + for (auto it = literals.cbegin(); it != literals.cend(); ++it) + op->addImmediateOperand(*it); + module.mapInstruction(op); + constantsTypesGlobals.push_back(std::unique_ptr(op)); + + return op->getResultId(); +} + +Id Builder::createFunctionCall(spv::Function* function, const std::vector& args) +{ + Instruction* op = new Instruction(getUniqueId(), function->getReturnType(), OpFunctionCall); + op->addIdOperand(function->getId()); + for (int a = 0; a < (int)args.size(); ++a) + op->addIdOperand(args[a]); + buildPoint->addInstruction(std::unique_ptr(op)); + + return op->getResultId(); +} + +// Comments in header +Id Builder::createRvalueSwizzle(Decoration precision, Id typeId, Id source, const std::vector& channels) +{ + if (channels.size() == 1) + return setPrecision(createCompositeExtract(source, typeId, channels.front()), precision); + + if (generatingOpCodeForSpecConst) { + std::vector operands(2); + operands[0] = operands[1] = source; + return setPrecision(createSpecConstantOp(OpVectorShuffle, typeId, operands, channels), precision); + } + Instruction* swizzle = new Instruction(getUniqueId(), typeId, OpVectorShuffle); + assert(isVector(source)); + swizzle->addIdOperand(source); + swizzle->addIdOperand(source); + for (int i = 0; i < (int)channels.size(); ++i) + swizzle->addImmediateOperand(channels[i]); + buildPoint->addInstruction(std::unique_ptr(swizzle)); + + return setPrecision(swizzle->getResultId(), precision); +} + +// Comments in header +Id Builder::createLvalueSwizzle(Id typeId, Id target, Id source, const std::vector& channels) +{ + if (channels.size() == 1 && getNumComponents(source) == 1) + return createCompositeInsert(source, target, typeId, channels.front()); + + Instruction* swizzle = new Instruction(getUniqueId(), typeId, OpVectorShuffle); + + assert(isVector(target)); + swizzle->addIdOperand(target); + + assert(getNumComponents(source) == (int)channels.size()); + assert(isVector(source)); + swizzle->addIdOperand(source); + + // Set up an identity shuffle from the base value to the result value + unsigned int components[4]; + int numTargetComponents = getNumComponents(target); + for (int i = 0; i < numTargetComponents; ++i) + components[i] = i; + + // Punch in the l-value swizzle + for (int i = 0; i < (int)channels.size(); ++i) + components[channels[i]] = numTargetComponents + i; + + // finish the instruction with these components selectors + for (int i = 0; i < numTargetComponents; ++i) + swizzle->addImmediateOperand(components[i]); + buildPoint->addInstruction(std::unique_ptr(swizzle)); + + return swizzle->getResultId(); +} + +// Comments in header +void Builder::promoteScalar(Decoration precision, Id& left, Id& right) +{ + int direction = getNumComponents(right) - getNumComponents(left); + + if (direction > 0) + left = smearScalar(precision, left, makeVectorType(getTypeId(left), getNumComponents(right))); + else if (direction < 0) + right = smearScalar(precision, right, makeVectorType(getTypeId(right), getNumComponents(left))); + + return; +} + +// Comments in header +Id Builder::smearScalar(Decoration precision, Id scalar, Id vectorType) +{ + assert(getNumComponents(scalar) == 1); + assert(getTypeId(scalar) == getScalarTypeId(vectorType)); + + int numComponents = getNumTypeComponents(vectorType); + if (numComponents == 1) + return scalar; + + Instruction* smear = nullptr; + if (generatingOpCodeForSpecConst) { + auto members = std::vector(numComponents, scalar); + // Sometime even in spec-constant-op mode, the temporary vector created by + // promoting a scalar might not be a spec constant. This should depend on + // the scalar. + // e.g.: + // const vec2 spec_const_result = a_spec_const_vec2 + a_front_end_const_scalar; + // In such cases, the temporary vector created from a_front_end_const_scalar + // is not a spec constant vector, even though the binary operation node is marked + // as 'specConstant' and we are in spec-constant-op mode. + auto result_id = makeCompositeConstant(vectorType, members, isSpecConstant(scalar)); + smear = module.getInstruction(result_id); + } else { + smear = new Instruction(getUniqueId(), vectorType, OpCompositeConstruct); + for (int c = 0; c < numComponents; ++c) + smear->addIdOperand(scalar); + buildPoint->addInstruction(std::unique_ptr(smear)); + } + + return setPrecision(smear->getResultId(), precision); +} + +// Comments in header +Id Builder::createBuiltinCall(Id resultType, Id builtins, int entryPoint, const std::vector& args) +{ + Instruction* inst = new Instruction(getUniqueId(), resultType, OpExtInst); + inst->addIdOperand(builtins); + inst->addImmediateOperand(entryPoint); + for (int arg = 0; arg < (int)args.size(); ++arg) + inst->addIdOperand(args[arg]); + + buildPoint->addInstruction(std::unique_ptr(inst)); + + return inst->getResultId(); +} + +// Accept all parameters needed to create a texture instruction. +// Create the correct instruction based on the inputs, and make the call. +Id Builder::createTextureCall(Decoration precision, Id resultType, bool sparse, bool fetch, bool proj, bool gather, + bool noImplicitLod, const TextureParameters& parameters, ImageOperandsMask signExtensionMask) +{ + static const int maxTextureArgs = 10; + Id texArgs[maxTextureArgs] = {}; + + // + // Set up the fixed arguments + // + int numArgs = 0; + bool explicitLod = false; + texArgs[numArgs++] = parameters.sampler; + texArgs[numArgs++] = parameters.coords; + if (parameters.Dref != NoResult) + texArgs[numArgs++] = parameters.Dref; + if (parameters.component != NoResult) + texArgs[numArgs++] = parameters.component; + +#ifndef GLSLANG_WEB + if (parameters.granularity != NoResult) + texArgs[numArgs++] = parameters.granularity; + if (parameters.coarse != NoResult) + texArgs[numArgs++] = parameters.coarse; +#endif + + // + // Set up the optional arguments + // + int optArgNum = numArgs; // track which operand, if it exists, is the mask of optional arguments + ++numArgs; // speculatively make room for the mask operand + ImageOperandsMask mask = ImageOperandsMaskNone; // the mask operand + if (parameters.bias) { + mask = (ImageOperandsMask)(mask | ImageOperandsBiasMask); + texArgs[numArgs++] = parameters.bias; + } + if (parameters.lod) { + mask = (ImageOperandsMask)(mask | ImageOperandsLodMask); + texArgs[numArgs++] = parameters.lod; + explicitLod = true; + } else if (parameters.gradX) { + mask = (ImageOperandsMask)(mask | ImageOperandsGradMask); + texArgs[numArgs++] = parameters.gradX; + texArgs[numArgs++] = parameters.gradY; + explicitLod = true; + } else if (noImplicitLod && ! fetch && ! gather) { + // have to explicitly use lod of 0 if not allowed to have them be implicit, and + // we would otherwise be about to issue an implicit instruction + mask = (ImageOperandsMask)(mask | ImageOperandsLodMask); + texArgs[numArgs++] = makeFloatConstant(0.0); + explicitLod = true; + } + if (parameters.offset) { + if (isConstant(parameters.offset)) + mask = (ImageOperandsMask)(mask | ImageOperandsConstOffsetMask); + else { + addCapability(CapabilityImageGatherExtended); + mask = (ImageOperandsMask)(mask | ImageOperandsOffsetMask); + } + texArgs[numArgs++] = parameters.offset; + } + if (parameters.offsets) { + addCapability(CapabilityImageGatherExtended); + mask = (ImageOperandsMask)(mask | ImageOperandsConstOffsetsMask); + texArgs[numArgs++] = parameters.offsets; + } +#ifndef GLSLANG_WEB + if (parameters.sample) { + mask = (ImageOperandsMask)(mask | ImageOperandsSampleMask); + texArgs[numArgs++] = parameters.sample; + } + if (parameters.lodClamp) { + // capability if this bit is used + addCapability(CapabilityMinLod); + + mask = (ImageOperandsMask)(mask | ImageOperandsMinLodMask); + texArgs[numArgs++] = parameters.lodClamp; + } + if (parameters.nonprivate) { + mask = mask | ImageOperandsNonPrivateTexelKHRMask; + } + if (parameters.volatil) { + mask = mask | ImageOperandsVolatileTexelKHRMask; + } +#endif + mask = mask | signExtensionMask; + if (mask == ImageOperandsMaskNone) + --numArgs; // undo speculative reservation for the mask argument + else + texArgs[optArgNum] = mask; + + // + // Set up the instruction + // + Op opCode = OpNop; // All paths below need to set this + if (fetch) { + if (sparse) + opCode = OpImageSparseFetch; + else + opCode = OpImageFetch; +#ifndef GLSLANG_WEB + } else if (parameters.granularity && parameters.coarse) { + opCode = OpImageSampleFootprintNV; + } else if (gather) { + if (parameters.Dref) + if (sparse) + opCode = OpImageSparseDrefGather; + else + opCode = OpImageDrefGather; + else + if (sparse) + opCode = OpImageSparseGather; + else + opCode = OpImageGather; +#endif + } else if (explicitLod) { + if (parameters.Dref) { + if (proj) + if (sparse) + opCode = OpImageSparseSampleProjDrefExplicitLod; + else + opCode = OpImageSampleProjDrefExplicitLod; + else + if (sparse) + opCode = OpImageSparseSampleDrefExplicitLod; + else + opCode = OpImageSampleDrefExplicitLod; + } else { + if (proj) + if (sparse) + opCode = OpImageSparseSampleProjExplicitLod; + else + opCode = OpImageSampleProjExplicitLod; + else + if (sparse) + opCode = OpImageSparseSampleExplicitLod; + else + opCode = OpImageSampleExplicitLod; + } + } else { + if (parameters.Dref) { + if (proj) + if (sparse) + opCode = OpImageSparseSampleProjDrefImplicitLod; + else + opCode = OpImageSampleProjDrefImplicitLod; + else + if (sparse) + opCode = OpImageSparseSampleDrefImplicitLod; + else + opCode = OpImageSampleDrefImplicitLod; + } else { + if (proj) + if (sparse) + opCode = OpImageSparseSampleProjImplicitLod; + else + opCode = OpImageSampleProjImplicitLod; + else + if (sparse) + opCode = OpImageSparseSampleImplicitLod; + else + opCode = OpImageSampleImplicitLod; + } + } + + // See if the result type is expecting a smeared result. + // This happens when a legacy shadow*() call is made, which + // gets a vec4 back instead of a float. + Id smearedType = resultType; + if (! isScalarType(resultType)) { + switch (opCode) { + case OpImageSampleDrefImplicitLod: + case OpImageSampleDrefExplicitLod: + case OpImageSampleProjDrefImplicitLod: + case OpImageSampleProjDrefExplicitLod: + resultType = getScalarTypeId(resultType); + break; + default: + break; + } + } + + Id typeId0 = 0; + Id typeId1 = 0; + + if (sparse) { + typeId0 = resultType; + typeId1 = getDerefTypeId(parameters.texelOut); + resultType = makeStructResultType(typeId0, typeId1); + } + + // Build the SPIR-V instruction + Instruction* textureInst = new Instruction(getUniqueId(), resultType, opCode); + for (int op = 0; op < optArgNum; ++op) + textureInst->addIdOperand(texArgs[op]); + if (optArgNum < numArgs) + textureInst->addImmediateOperand(texArgs[optArgNum]); + for (int op = optArgNum + 1; op < numArgs; ++op) + textureInst->addIdOperand(texArgs[op]); + setPrecision(textureInst->getResultId(), precision); + buildPoint->addInstruction(std::unique_ptr(textureInst)); + + Id resultId = textureInst->getResultId(); + + if (sparse) { + // set capability + addCapability(CapabilitySparseResidency); + + // Decode the return type that was a special structure + createStore(createCompositeExtract(resultId, typeId1, 1), parameters.texelOut); + resultId = createCompositeExtract(resultId, typeId0, 0); + setPrecision(resultId, precision); + } else { + // When a smear is needed, do it, as per what was computed + // above when resultType was changed to a scalar type. + if (resultType != smearedType) + resultId = smearScalar(precision, resultId, smearedType); + } + + return resultId; +} + +// Comments in header +Id Builder::createTextureQueryCall(Op opCode, const TextureParameters& parameters, bool isUnsignedResult) +{ + // Figure out the result type + Id resultType = 0; + switch (opCode) { + case OpImageQuerySize: + case OpImageQuerySizeLod: + { + int numComponents = 0; + switch (getTypeDimensionality(getImageType(parameters.sampler))) { + case Dim1D: + case DimBuffer: + numComponents = 1; + break; + case Dim2D: + case DimCube: + case DimRect: + case DimSubpassData: + numComponents = 2; + break; + case Dim3D: + numComponents = 3; + break; + + default: + assert(0); + break; + } + if (isArrayedImageType(getImageType(parameters.sampler))) + ++numComponents; + + Id intType = isUnsignedResult ? makeUintType(32) : makeIntType(32); + if (numComponents == 1) + resultType = intType; + else + resultType = makeVectorType(intType, numComponents); + + break; + } + case OpImageQueryLod: + resultType = makeVectorType(getScalarTypeId(getTypeId(parameters.coords)), 2); + break; + case OpImageQueryLevels: + case OpImageQuerySamples: + resultType = isUnsignedResult ? makeUintType(32) : makeIntType(32); + break; + default: + assert(0); + break; + } + + Instruction* query = new Instruction(getUniqueId(), resultType, opCode); + query->addIdOperand(parameters.sampler); + if (parameters.coords) + query->addIdOperand(parameters.coords); + if (parameters.lod) + query->addIdOperand(parameters.lod); + buildPoint->addInstruction(std::unique_ptr(query)); + addCapability(CapabilityImageQuery); + + return query->getResultId(); +} + +// External comments in header. +// Operates recursively to visit the composite's hierarchy. +Id Builder::createCompositeCompare(Decoration precision, Id value1, Id value2, bool equal) +{ + Id boolType = makeBoolType(); + Id valueType = getTypeId(value1); + + Id resultId = NoResult; + + int numConstituents = getNumTypeConstituents(valueType); + + // Scalars and Vectors + + if (isScalarType(valueType) || isVectorType(valueType)) { + assert(valueType == getTypeId(value2)); + // These just need a single comparison, just have + // to figure out what it is. + Op op; + switch (getMostBasicTypeClass(valueType)) { + case OpTypeFloat: + op = equal ? OpFOrdEqual : OpFOrdNotEqual; + break; + case OpTypeInt: + default: + op = equal ? OpIEqual : OpINotEqual; + break; + case OpTypeBool: + op = equal ? OpLogicalEqual : OpLogicalNotEqual; + precision = NoPrecision; + break; + } + + if (isScalarType(valueType)) { + // scalar + resultId = createBinOp(op, boolType, value1, value2); + } else { + // vector + resultId = createBinOp(op, makeVectorType(boolType, numConstituents), value1, value2); + setPrecision(resultId, precision); + // reduce vector compares... + resultId = createUnaryOp(equal ? OpAll : OpAny, boolType, resultId); + } + + return setPrecision(resultId, precision); + } + + // Only structs, arrays, and matrices should be left. + // They share in common the reduction operation across their constituents. + assert(isAggregateType(valueType) || isMatrixType(valueType)); + + // Compare each pair of constituents + for (int constituent = 0; constituent < numConstituents; ++constituent) { + std::vector indexes(1, constituent); + Id constituentType1 = getContainedTypeId(getTypeId(value1), constituent); + Id constituentType2 = getContainedTypeId(getTypeId(value2), constituent); + Id constituent1 = createCompositeExtract(value1, constituentType1, indexes); + Id constituent2 = createCompositeExtract(value2, constituentType2, indexes); + + Id subResultId = createCompositeCompare(precision, constituent1, constituent2, equal); + + if (constituent == 0) + resultId = subResultId; + else + resultId = setPrecision(createBinOp(equal ? OpLogicalAnd : OpLogicalOr, boolType, resultId, subResultId), + precision); + } + + return resultId; +} + +// OpCompositeConstruct +Id Builder::createCompositeConstruct(Id typeId, const std::vector& constituents) +{ + assert(isAggregateType(typeId) || (getNumTypeConstituents(typeId) > 1 && + getNumTypeConstituents(typeId) == (int)constituents.size())); + + if (generatingOpCodeForSpecConst) { + // Sometime, even in spec-constant-op mode, the constant composite to be + // constructed may not be a specialization constant. + // e.g.: + // const mat2 m2 = mat2(a_spec_const, a_front_end_const, another_front_end_const, third_front_end_const); + // The first column vector should be a spec constant one, as a_spec_const is a spec constant. + // The second column vector should NOT be spec constant, as it does not contain any spec constants. + // To handle such cases, we check the constituents of the constant vector to determine whether this + // vector should be created as a spec constant. + return makeCompositeConstant(typeId, constituents, + std::any_of(constituents.begin(), constituents.end(), + [&](spv::Id id) { return isSpecConstant(id); })); + } + + Instruction* op = new Instruction(getUniqueId(), typeId, OpCompositeConstruct); + for (int c = 0; c < (int)constituents.size(); ++c) + op->addIdOperand(constituents[c]); + buildPoint->addInstruction(std::unique_ptr(op)); + + return op->getResultId(); +} + +// Vector or scalar constructor +Id Builder::createConstructor(Decoration precision, const std::vector& sources, Id resultTypeId) +{ + Id result = NoResult; + unsigned int numTargetComponents = getNumTypeComponents(resultTypeId); + unsigned int targetComponent = 0; + + // Special case: when calling a vector constructor with a single scalar + // argument, smear the scalar + if (sources.size() == 1 && isScalar(sources[0]) && numTargetComponents > 1) + return smearScalar(precision, sources[0], resultTypeId); + + // accumulate the arguments for OpCompositeConstruct + std::vector constituents; + Id scalarTypeId = getScalarTypeId(resultTypeId); + + // lambda to store the result of visiting an argument component + const auto latchResult = [&](Id comp) { + if (numTargetComponents > 1) + constituents.push_back(comp); + else + result = comp; + ++targetComponent; + }; + + // lambda to visit a vector argument's components + const auto accumulateVectorConstituents = [&](Id sourceArg) { + unsigned int sourceSize = getNumComponents(sourceArg); + unsigned int sourcesToUse = sourceSize; + if (sourcesToUse + targetComponent > numTargetComponents) + sourcesToUse = numTargetComponents - targetComponent; + + for (unsigned int s = 0; s < sourcesToUse; ++s) { + std::vector swiz; + swiz.push_back(s); + latchResult(createRvalueSwizzle(precision, scalarTypeId, sourceArg, swiz)); + } + }; + + // lambda to visit a matrix argument's components + const auto accumulateMatrixConstituents = [&](Id sourceArg) { + unsigned int sourceSize = getNumColumns(sourceArg) * getNumRows(sourceArg); + unsigned int sourcesToUse = sourceSize; + if (sourcesToUse + targetComponent > numTargetComponents) + sourcesToUse = numTargetComponents - targetComponent; + + int col = 0; + int row = 0; + for (unsigned int s = 0; s < sourcesToUse; ++s) { + if (row >= getNumRows(sourceArg)) { + row = 0; + col++; + } + std::vector indexes; + indexes.push_back(col); + indexes.push_back(row); + latchResult(createCompositeExtract(sourceArg, scalarTypeId, indexes)); + row++; + } + }; + + // Go through the source arguments, each one could have either + // a single or multiple components to contribute. + for (unsigned int i = 0; i < sources.size(); ++i) { + + if (isScalar(sources[i]) || isPointer(sources[i])) + latchResult(sources[i]); + else if (isVector(sources[i])) + accumulateVectorConstituents(sources[i]); + else if (isMatrix(sources[i])) + accumulateMatrixConstituents(sources[i]); + else + assert(0); + + if (targetComponent >= numTargetComponents) + break; + } + + // If the result is a vector, make it from the gathered constituents. + if (constituents.size() > 0) + result = createCompositeConstruct(resultTypeId, constituents); + + return setPrecision(result, precision); +} + +// Comments in header +Id Builder::createMatrixConstructor(Decoration precision, const std::vector& sources, Id resultTypeId) +{ + Id componentTypeId = getScalarTypeId(resultTypeId); + int numCols = getTypeNumColumns(resultTypeId); + int numRows = getTypeNumRows(resultTypeId); + + Instruction* instr = module.getInstruction(componentTypeId); +#ifdef GLSLANG_WEB + const unsigned bitCount = 32; + assert(bitCount == instr->getImmediateOperand(0)); +#else + const unsigned bitCount = instr->getImmediateOperand(0); +#endif + + // Optimize matrix constructed from a bigger matrix + if (isMatrix(sources[0]) && getNumColumns(sources[0]) >= numCols && getNumRows(sources[0]) >= numRows) { + // To truncate the matrix to a smaller number of rows/columns, we need to: + // 1. For each column, extract the column and truncate it to the required size using shuffle + // 2. Assemble the resulting matrix from all columns + Id matrix = sources[0]; + Id columnTypeId = getContainedTypeId(resultTypeId); + Id sourceColumnTypeId = getContainedTypeId(getTypeId(matrix)); + + std::vector channels; + for (int row = 0; row < numRows; ++row) + channels.push_back(row); + + std::vector matrixColumns; + for (int col = 0; col < numCols; ++col) { + std::vector indexes; + indexes.push_back(col); + Id colv = createCompositeExtract(matrix, sourceColumnTypeId, indexes); + setPrecision(colv, precision); + + if (numRows != getNumRows(matrix)) { + matrixColumns.push_back(createRvalueSwizzle(precision, columnTypeId, colv, channels)); + } else { + matrixColumns.push_back(colv); + } + } + + return setPrecision(createCompositeConstruct(resultTypeId, matrixColumns), precision); + } + + // Otherwise, will use a two step process + // 1. make a compile-time 2D array of values + // 2. construct a matrix from that array + + // Step 1. + + // initialize the array to the identity matrix + Id ids[maxMatrixSize][maxMatrixSize]; + Id one = (bitCount == 64 ? makeDoubleConstant(1.0) : makeFloatConstant(1.0)); + Id zero = (bitCount == 64 ? makeDoubleConstant(0.0) : makeFloatConstant(0.0)); + for (int col = 0; col < 4; ++col) { + for (int row = 0; row < 4; ++row) { + if (col == row) + ids[col][row] = one; + else + ids[col][row] = zero; + } + } + + // modify components as dictated by the arguments + if (sources.size() == 1 && isScalar(sources[0])) { + // a single scalar; resets the diagonals + for (int col = 0; col < 4; ++col) + ids[col][col] = sources[0]; + } else if (isMatrix(sources[0])) { + // constructing from another matrix; copy over the parts that exist in both the argument and constructee + Id matrix = sources[0]; + int minCols = std::min(numCols, getNumColumns(matrix)); + int minRows = std::min(numRows, getNumRows(matrix)); + for (int col = 0; col < minCols; ++col) { + std::vector indexes; + indexes.push_back(col); + for (int row = 0; row < minRows; ++row) { + indexes.push_back(row); + ids[col][row] = createCompositeExtract(matrix, componentTypeId, indexes); + indexes.pop_back(); + setPrecision(ids[col][row], precision); + } + } + } else { + // fill in the matrix in column-major order with whatever argument components are available + int row = 0; + int col = 0; + + for (int arg = 0; arg < (int)sources.size(); ++arg) { + Id argComp = sources[arg]; + for (int comp = 0; comp < getNumComponents(sources[arg]); ++comp) { + if (getNumComponents(sources[arg]) > 1) { + argComp = createCompositeExtract(sources[arg], componentTypeId, comp); + setPrecision(argComp, precision); + } + ids[col][row++] = argComp; + if (row == numRows) { + row = 0; + col++; + } + } + } + } + + // Step 2: Construct a matrix from that array. + // First make the column vectors, then make the matrix. + + // make the column vectors + Id columnTypeId = getContainedTypeId(resultTypeId); + std::vector matrixColumns; + for (int col = 0; col < numCols; ++col) { + std::vector vectorComponents; + for (int row = 0; row < numRows; ++row) + vectorComponents.push_back(ids[col][row]); + Id column = createCompositeConstruct(columnTypeId, vectorComponents); + setPrecision(column, precision); + matrixColumns.push_back(column); + } + + // make the matrix + return setPrecision(createCompositeConstruct(resultTypeId, matrixColumns), precision); +} + +// Comments in header +Builder::If::If(Id cond, unsigned int ctrl, Builder& gb) : + builder(gb), + condition(cond), + control(ctrl), + elseBlock(0) +{ + function = &builder.getBuildPoint()->getParent(); + + // make the blocks, but only put the then-block into the function, + // the else-block and merge-block will be added later, in order, after + // earlier code is emitted + thenBlock = new Block(builder.getUniqueId(), *function); + mergeBlock = new Block(builder.getUniqueId(), *function); + + // Save the current block, so that we can add in the flow control split when + // makeEndIf is called. + headerBlock = builder.getBuildPoint(); + + function->addBlock(thenBlock); + builder.setBuildPoint(thenBlock); +} + +// Comments in header +void Builder::If::makeBeginElse() +{ + // Close out the "then" by having it jump to the mergeBlock + builder.createBranch(mergeBlock); + + // Make the first else block and add it to the function + elseBlock = new Block(builder.getUniqueId(), *function); + function->addBlock(elseBlock); + + // Start building the else block + builder.setBuildPoint(elseBlock); +} + +// Comments in header +void Builder::If::makeEndIf() +{ + // jump to the merge block + builder.createBranch(mergeBlock); + + // Go back to the headerBlock and make the flow control split + builder.setBuildPoint(headerBlock); + builder.createSelectionMerge(mergeBlock, control); + if (elseBlock) + builder.createConditionalBranch(condition, thenBlock, elseBlock); + else + builder.createConditionalBranch(condition, thenBlock, mergeBlock); + + // add the merge block to the function + function->addBlock(mergeBlock); + builder.setBuildPoint(mergeBlock); +} + +// Comments in header +void Builder::makeSwitch(Id selector, unsigned int control, int numSegments, const std::vector& caseValues, + const std::vector& valueIndexToSegment, int defaultSegment, + std::vector& segmentBlocks) +{ + Function& function = buildPoint->getParent(); + + // make all the blocks + for (int s = 0; s < numSegments; ++s) + segmentBlocks.push_back(new Block(getUniqueId(), function)); + + Block* mergeBlock = new Block(getUniqueId(), function); + + // make and insert the switch's selection-merge instruction + createSelectionMerge(mergeBlock, control); + + // make the switch instruction + Instruction* switchInst = new Instruction(NoResult, NoType, OpSwitch); + switchInst->addIdOperand(selector); + auto defaultOrMerge = (defaultSegment >= 0) ? segmentBlocks[defaultSegment] : mergeBlock; + switchInst->addIdOperand(defaultOrMerge->getId()); + defaultOrMerge->addPredecessor(buildPoint); + for (int i = 0; i < (int)caseValues.size(); ++i) { + switchInst->addImmediateOperand(caseValues[i]); + switchInst->addIdOperand(segmentBlocks[valueIndexToSegment[i]]->getId()); + segmentBlocks[valueIndexToSegment[i]]->addPredecessor(buildPoint); + } + buildPoint->addInstruction(std::unique_ptr(switchInst)); + + // push the merge block + switchMerges.push(mergeBlock); +} + +// Comments in header +void Builder::addSwitchBreak() +{ + // branch to the top of the merge block stack + createBranch(switchMerges.top()); + createAndSetNoPredecessorBlock("post-switch-break"); +} + +// Comments in header +void Builder::nextSwitchSegment(std::vector& segmentBlock, int nextSegment) +{ + int lastSegment = nextSegment - 1; + if (lastSegment >= 0) { + // Close out previous segment by jumping, if necessary, to next segment + if (! buildPoint->isTerminated()) + createBranch(segmentBlock[nextSegment]); + } + Block* block = segmentBlock[nextSegment]; + block->getParent().addBlock(block); + setBuildPoint(block); +} + +// Comments in header +void Builder::endSwitch(std::vector& /*segmentBlock*/) +{ + // Close out previous segment by jumping, if necessary, to next segment + if (! buildPoint->isTerminated()) + addSwitchBreak(); + + switchMerges.top()->getParent().addBlock(switchMerges.top()); + setBuildPoint(switchMerges.top()); + + switchMerges.pop(); +} + +Block& Builder::makeNewBlock() +{ + Function& function = buildPoint->getParent(); + auto block = new Block(getUniqueId(), function); + function.addBlock(block); + return *block; +} + +Builder::LoopBlocks& Builder::makeNewLoop() +{ + // This verbosity is needed to simultaneously get the same behavior + // everywhere (id's in the same order), have a syntax that works + // across lots of versions of C++, have no warnings from pedantic + // compilation modes, and leave the rest of the code alone. + Block& head = makeNewBlock(); + Block& body = makeNewBlock(); + Block& merge = makeNewBlock(); + Block& continue_target = makeNewBlock(); + LoopBlocks blocks(head, body, merge, continue_target); + loops.push(blocks); + return loops.top(); +} + +void Builder::createLoopContinue() +{ + createBranch(&loops.top().continue_target); + // Set up a block for dead code. + createAndSetNoPredecessorBlock("post-loop-continue"); +} + +void Builder::createLoopExit() +{ + createBranch(&loops.top().merge); + // Set up a block for dead code. + createAndSetNoPredecessorBlock("post-loop-break"); +} + +void Builder::closeLoop() +{ + loops.pop(); +} + +void Builder::clearAccessChain() +{ + accessChain.base = NoResult; + accessChain.indexChain.clear(); + accessChain.instr = NoResult; + accessChain.swizzle.clear(); + accessChain.component = NoResult; + accessChain.preSwizzleBaseType = NoType; + accessChain.isRValue = false; + accessChain.coherentFlags.clear(); + accessChain.alignment = 0; +} + +// Comments in header +void Builder::accessChainPushSwizzle(std::vector& swizzle, Id preSwizzleBaseType, + AccessChain::CoherentFlags coherentFlags, unsigned int alignment) +{ + accessChain.coherentFlags |= coherentFlags; + accessChain.alignment |= alignment; + + // swizzles can be stacked in GLSL, but simplified to a single + // one here; the base type doesn't change + if (accessChain.preSwizzleBaseType == NoType) + accessChain.preSwizzleBaseType = preSwizzleBaseType; + + // if needed, propagate the swizzle for the current access chain + if (accessChain.swizzle.size() > 0) { + std::vector oldSwizzle = accessChain.swizzle; + accessChain.swizzle.resize(0); + for (unsigned int i = 0; i < swizzle.size(); ++i) { + assert(swizzle[i] < oldSwizzle.size()); + accessChain.swizzle.push_back(oldSwizzle[swizzle[i]]); + } + } else + accessChain.swizzle = swizzle; + + // determine if we need to track this swizzle anymore + simplifyAccessChainSwizzle(); +} + +// Comments in header +void Builder::accessChainStore(Id rvalue, spv::MemoryAccessMask memoryAccess, spv::Scope scope, unsigned int alignment) +{ + assert(accessChain.isRValue == false); + + transferAccessChainSwizzle(true); + Id base = collapseAccessChain(); + Id source = rvalue; + + // dynamic component should be gone + assert(accessChain.component == NoResult); + + // If swizzle still exists, it is out-of-order or not full, we must load the target vector, + // extract and insert elements to perform writeMask and/or swizzle. + if (accessChain.swizzle.size() > 0) { + Id tempBaseId = createLoad(base); + source = createLvalueSwizzle(getTypeId(tempBaseId), tempBaseId, source, accessChain.swizzle); + } + + // take LSB of alignment + alignment = alignment & ~(alignment & (alignment-1)); + if (getStorageClass(base) == StorageClassPhysicalStorageBufferEXT) { + memoryAccess = (spv::MemoryAccessMask)(memoryAccess | spv::MemoryAccessAlignedMask); + } + + createStore(source, base, memoryAccess, scope, alignment); +} + +// Comments in header +Id Builder::accessChainLoad(Decoration precision, Decoration nonUniform, Id resultType, + spv::MemoryAccessMask memoryAccess, spv::Scope scope, unsigned int alignment) +{ + Id id; + + if (accessChain.isRValue) { + // transfer access chain, but try to stay in registers + transferAccessChainSwizzle(false); + if (accessChain.indexChain.size() > 0) { + Id swizzleBase = accessChain.preSwizzleBaseType != NoType ? accessChain.preSwizzleBaseType : resultType; + + // if all the accesses are constants, we can use OpCompositeExtract + std::vector indexes; + bool constant = true; + for (int i = 0; i < (int)accessChain.indexChain.size(); ++i) { + if (isConstantScalar(accessChain.indexChain[i])) + indexes.push_back(getConstantScalar(accessChain.indexChain[i])); + else { + constant = false; + break; + } + } + + if (constant) { + id = createCompositeExtract(accessChain.base, swizzleBase, indexes); + } else { + Id lValue = NoResult; + if (spvVersion >= Spv_1_4) { + // make a new function variable for this r-value, using an initializer, + // and mark it as NonWritable so that downstream it can be detected as a lookup + // table + lValue = createVariable(StorageClassFunction, getTypeId(accessChain.base), "indexable", + accessChain.base); + addDecoration(lValue, DecorationNonWritable); + } else { + lValue = createVariable(StorageClassFunction, getTypeId(accessChain.base), "indexable"); + // store into it + createStore(accessChain.base, lValue); + } + // move base to the new variable + accessChain.base = lValue; + accessChain.isRValue = false; + + // load through the access chain + id = createLoad(collapseAccessChain()); + } + setPrecision(id, precision); + } else + id = accessChain.base; // no precision, it was set when this was defined + } else { + transferAccessChainSwizzle(true); + + // take LSB of alignment + alignment = alignment & ~(alignment & (alignment-1)); + if (getStorageClass(accessChain.base) == StorageClassPhysicalStorageBufferEXT) { + memoryAccess = (spv::MemoryAccessMask)(memoryAccess | spv::MemoryAccessAlignedMask); + } + + // load through the access chain + id = collapseAccessChain(); + // Apply nonuniform both to the access chain and the loaded value. + // Buffer accesses need the access chain decorated, and this is where + // loaded image types get decorated. TODO: This should maybe move to + // createImageTextureFunctionCall. + addDecoration(id, nonUniform); + id = createLoad(id, memoryAccess, scope, alignment); + setPrecision(id, precision); + addDecoration(id, nonUniform); + } + + // Done, unless there are swizzles to do + if (accessChain.swizzle.size() == 0 && accessChain.component == NoResult) + return id; + + // Do remaining swizzling + + // Do the basic swizzle + if (accessChain.swizzle.size() > 0) { + Id swizzledType = getScalarTypeId(getTypeId(id)); + if (accessChain.swizzle.size() > 1) + swizzledType = makeVectorType(swizzledType, (int)accessChain.swizzle.size()); + id = createRvalueSwizzle(precision, swizzledType, id, accessChain.swizzle); + } + + // Do the dynamic component + if (accessChain.component != NoResult) + id = setPrecision(createVectorExtractDynamic(id, resultType, accessChain.component), precision); + + addDecoration(id, nonUniform); + return id; +} + +Id Builder::accessChainGetLValue() +{ + assert(accessChain.isRValue == false); + + transferAccessChainSwizzle(true); + Id lvalue = collapseAccessChain(); + + // If swizzle exists, it is out-of-order or not full, we must load the target vector, + // extract and insert elements to perform writeMask and/or swizzle. This does not + // go with getting a direct l-value pointer. + assert(accessChain.swizzle.size() == 0); + assert(accessChain.component == NoResult); + + return lvalue; +} + +// comment in header +Id Builder::accessChainGetInferredType() +{ + // anything to operate on? + if (accessChain.base == NoResult) + return NoType; + Id type = getTypeId(accessChain.base); + + // do initial dereference + if (! accessChain.isRValue) + type = getContainedTypeId(type); + + // dereference each index + for (auto it = accessChain.indexChain.cbegin(); it != accessChain.indexChain.cend(); ++it) { + if (isStructType(type)) + type = getContainedTypeId(type, getConstantScalar(*it)); + else + type = getContainedTypeId(type); + } + + // dereference swizzle + if (accessChain.swizzle.size() == 1) + type = getContainedTypeId(type); + else if (accessChain.swizzle.size() > 1) + type = makeVectorType(getContainedTypeId(type), (int)accessChain.swizzle.size()); + + // dereference component selection + if (accessChain.component) + type = getContainedTypeId(type); + + return type; +} + +void Builder::dump(std::vector& out) const +{ + // Header, before first instructions: + out.push_back(MagicNumber); + out.push_back(spvVersion); + out.push_back(builderNumber); + out.push_back(uniqueId + 1); + out.push_back(0); + + // Capabilities + for (auto it = capabilities.cbegin(); it != capabilities.cend(); ++it) { + Instruction capInst(0, 0, OpCapability); + capInst.addImmediateOperand(*it); + capInst.dump(out); + } + + for (auto it = extensions.cbegin(); it != extensions.cend(); ++it) { + Instruction extInst(0, 0, OpExtension); + extInst.addStringOperand(it->c_str()); + extInst.dump(out); + } + + dumpInstructions(out, imports); + Instruction memInst(0, 0, OpMemoryModel); + memInst.addImmediateOperand(addressModel); + memInst.addImmediateOperand(memoryModel); + memInst.dump(out); + + // Instructions saved up while building: + dumpInstructions(out, entryPoints); + dumpInstructions(out, executionModes); + + // Debug instructions + dumpInstructions(out, strings); + dumpSourceInstructions(out); + for (int e = 0; e < (int)sourceExtensions.size(); ++e) { + Instruction sourceExtInst(0, 0, OpSourceExtension); + sourceExtInst.addStringOperand(sourceExtensions[e]); + sourceExtInst.dump(out); + } + dumpInstructions(out, names); + dumpModuleProcesses(out); + + // Annotation instructions + dumpInstructions(out, decorations); + + dumpInstructions(out, constantsTypesGlobals); + dumpInstructions(out, externals); + + // The functions + module.dump(out); +} + +// +// Protected methods. +// + +// Turn the described access chain in 'accessChain' into an instruction(s) +// computing its address. This *cannot* include complex swizzles, which must +// be handled after this is called. +// +// Can generate code. +Id Builder::collapseAccessChain() +{ + assert(accessChain.isRValue == false); + + // did we already emit an access chain for this? + if (accessChain.instr != NoResult) + return accessChain.instr; + + // If we have a dynamic component, we can still transfer + // that into a final operand to the access chain. We need to remap the + // dynamic component through the swizzle to get a new dynamic component to + // update. + // + // This was not done in transferAccessChainSwizzle() because it might + // generate code. + remapDynamicSwizzle(); + if (accessChain.component != NoResult) { + // transfer the dynamic component to the access chain + accessChain.indexChain.push_back(accessChain.component); + accessChain.component = NoResult; + } + + // note that non-trivial swizzling is left pending + + // do we have an access chain? + if (accessChain.indexChain.size() == 0) + return accessChain.base; + + // emit the access chain + StorageClass storageClass = (StorageClass)module.getStorageClass(getTypeId(accessChain.base)); + accessChain.instr = createAccessChain(storageClass, accessChain.base, accessChain.indexChain); + + return accessChain.instr; +} + +// For a dynamic component selection of a swizzle. +// +// Turn the swizzle and dynamic component into just a dynamic component. +// +// Generates code. +void Builder::remapDynamicSwizzle() +{ + // do we have a swizzle to remap a dynamic component through? + if (accessChain.component != NoResult && accessChain.swizzle.size() > 1) { + // build a vector of the swizzle for the component to map into + std::vector components; + for (int c = 0; c < (int)accessChain.swizzle.size(); ++c) + components.push_back(makeUintConstant(accessChain.swizzle[c])); + Id mapType = makeVectorType(makeUintType(32), (int)accessChain.swizzle.size()); + Id map = makeCompositeConstant(mapType, components); + + // use it + accessChain.component = createVectorExtractDynamic(map, makeUintType(32), accessChain.component); + accessChain.swizzle.clear(); + } +} + +// clear out swizzle if it is redundant, that is reselecting the same components +// that would be present without the swizzle. +void Builder::simplifyAccessChainSwizzle() +{ + // If the swizzle has fewer components than the vector, it is subsetting, and must stay + // to preserve that fact. + if (getNumTypeComponents(accessChain.preSwizzleBaseType) > (int)accessChain.swizzle.size()) + return; + + // if components are out of order, it is a swizzle + for (unsigned int i = 0; i < accessChain.swizzle.size(); ++i) { + if (i != accessChain.swizzle[i]) + return; + } + + // otherwise, there is no need to track this swizzle + accessChain.swizzle.clear(); + if (accessChain.component == NoResult) + accessChain.preSwizzleBaseType = NoType; +} + +// To the extent any swizzling can become part of the chain +// of accesses instead of a post operation, make it so. +// If 'dynamic' is true, include transferring the dynamic component, +// otherwise, leave it pending. +// +// Does not generate code. just updates the access chain. +void Builder::transferAccessChainSwizzle(bool dynamic) +{ + // non existent? + if (accessChain.swizzle.size() == 0 && accessChain.component == NoResult) + return; + + // too complex? + // (this requires either a swizzle, or generating code for a dynamic component) + if (accessChain.swizzle.size() > 1) + return; + + // single component, either in the swizzle and/or dynamic component + if (accessChain.swizzle.size() == 1) { + assert(accessChain.component == NoResult); + // handle static component selection + accessChain.indexChain.push_back(makeUintConstant(accessChain.swizzle.front())); + accessChain.swizzle.clear(); + accessChain.preSwizzleBaseType = NoType; + } else if (dynamic && accessChain.component != NoResult) { + assert(accessChain.swizzle.size() == 0); + // handle dynamic component + accessChain.indexChain.push_back(accessChain.component); + accessChain.preSwizzleBaseType = NoType; + accessChain.component = NoResult; + } +} + +// Utility method for creating a new block and setting the insert point to +// be in it. This is useful for flow-control operations that need a "dummy" +// block proceeding them (e.g. instructions after a discard, etc). +void Builder::createAndSetNoPredecessorBlock(const char* /*name*/) +{ + Block* block = new Block(getUniqueId(), buildPoint->getParent()); + block->setUnreachable(); + buildPoint->getParent().addBlock(block); + setBuildPoint(block); + + // if (name) + // addName(block->getId(), name); +} + +// Comments in header +void Builder::createBranch(Block* block) +{ + Instruction* branch = new Instruction(OpBranch); + branch->addIdOperand(block->getId()); + buildPoint->addInstruction(std::unique_ptr(branch)); + block->addPredecessor(buildPoint); +} + +void Builder::createSelectionMerge(Block* mergeBlock, unsigned int control) +{ + Instruction* merge = new Instruction(OpSelectionMerge); + merge->addIdOperand(mergeBlock->getId()); + merge->addImmediateOperand(control); + buildPoint->addInstruction(std::unique_ptr(merge)); +} + +void Builder::createLoopMerge(Block* mergeBlock, Block* continueBlock, unsigned int control, + const std::vector& operands) +{ + Instruction* merge = new Instruction(OpLoopMerge); + merge->addIdOperand(mergeBlock->getId()); + merge->addIdOperand(continueBlock->getId()); + merge->addImmediateOperand(control); + for (int op = 0; op < (int)operands.size(); ++op) + merge->addImmediateOperand(operands[op]); + buildPoint->addInstruction(std::unique_ptr(merge)); +} + +void Builder::createConditionalBranch(Id condition, Block* thenBlock, Block* elseBlock) +{ + Instruction* branch = new Instruction(OpBranchConditional); + branch->addIdOperand(condition); + branch->addIdOperand(thenBlock->getId()); + branch->addIdOperand(elseBlock->getId()); + buildPoint->addInstruction(std::unique_ptr(branch)); + thenBlock->addPredecessor(buildPoint); + elseBlock->addPredecessor(buildPoint); +} + +// OpSource +// [OpSourceContinued] +// ... +void Builder::dumpSourceInstructions(const spv::Id fileId, const std::string& text, + std::vector& out) const +{ + const int maxWordCount = 0xFFFF; + const int opSourceWordCount = 4; + const int nonNullBytesPerInstruction = 4 * (maxWordCount - opSourceWordCount) - 1; + + if (source != SourceLanguageUnknown) { + // OpSource Language Version File Source + Instruction sourceInst(NoResult, NoType, OpSource); + sourceInst.addImmediateOperand(source); + sourceInst.addImmediateOperand(sourceVersion); + // File operand + if (fileId != NoResult) { + sourceInst.addIdOperand(fileId); + // Source operand + if (text.size() > 0) { + int nextByte = 0; + std::string subString; + while ((int)text.size() - nextByte > 0) { + subString = text.substr(nextByte, nonNullBytesPerInstruction); + if (nextByte == 0) { + // OpSource + sourceInst.addStringOperand(subString.c_str()); + sourceInst.dump(out); + } else { + // OpSourcContinued + Instruction sourceContinuedInst(OpSourceContinued); + sourceContinuedInst.addStringOperand(subString.c_str()); + sourceContinuedInst.dump(out); + } + nextByte += nonNullBytesPerInstruction; + } + } else + sourceInst.dump(out); + } else + sourceInst.dump(out); + } +} + +// Dump an OpSource[Continued] sequence for the source and every include file +void Builder::dumpSourceInstructions(std::vector& out) const +{ + dumpSourceInstructions(sourceFileStringId, sourceText, out); + for (auto iItr = includeFiles.begin(); iItr != includeFiles.end(); ++iItr) + dumpSourceInstructions(iItr->first, *iItr->second, out); +} + +void Builder::dumpInstructions(std::vector& out, + const std::vector >& instructions) const +{ + for (int i = 0; i < (int)instructions.size(); ++i) { + instructions[i]->dump(out); + } +} + +void Builder::dumpModuleProcesses(std::vector& out) const +{ + for (int i = 0; i < (int)moduleProcesses.size(); ++i) { + Instruction moduleProcessed(OpModuleProcessed); + moduleProcessed.addStringOperand(moduleProcesses[i]); + moduleProcessed.dump(out); + } +} + +}; // end spv namespace diff --git a/dep/glslang/SPIRV/SpvBuilder.h b/dep/glslang/SPIRV/SpvBuilder.h new file mode 100644 index 000000000..71b90d609 --- /dev/null +++ b/dep/glslang/SPIRV/SpvBuilder.h @@ -0,0 +1,838 @@ +// +// Copyright (C) 2014-2015 LunarG, Inc. +// Copyright (C) 2015-2020 Google, Inc. +// Copyright (C) 2017 ARM Limited. +// Modifications Copyright (C) 2020 Advanced Micro Devices, Inc. All rights reserved. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// +// Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// +// Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// Neither the name of 3Dlabs Inc. Ltd. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. + +// +// "Builder" is an interface to fully build SPIR-V IR. Allocate one of +// these to build (a thread safe) internal SPIR-V representation (IR), +// and then dump it as a binary stream according to the SPIR-V specification. +// +// A Builder has a 1:1 relationship with a SPIR-V module. +// + +#pragma once +#ifndef SpvBuilder_H +#define SpvBuilder_H + +#include "Logger.h" +#include "spirv.hpp" +#include "spvIR.h" + +#include +#include +#include +#include +#include +#include +#include +#include + +namespace spv { + +typedef enum { + Spv_1_0 = (1 << 16), + Spv_1_1 = (1 << 16) | (1 << 8), + Spv_1_2 = (1 << 16) | (2 << 8), + Spv_1_3 = (1 << 16) | (3 << 8), + Spv_1_4 = (1 << 16) | (4 << 8), + Spv_1_5 = (1 << 16) | (5 << 8), +} SpvVersion; + +class Builder { +public: + Builder(unsigned int spvVersion, unsigned int userNumber, SpvBuildLogger* logger); + virtual ~Builder(); + + static const int maxMatrixSize = 4; + + unsigned int getSpvVersion() const { return spvVersion; } + + void setSource(spv::SourceLanguage lang, int version) + { + source = lang; + sourceVersion = version; + } + spv::Id getStringId(const std::string& str) + { + auto sItr = stringIds.find(str); + if (sItr != stringIds.end()) + return sItr->second; + spv::Id strId = getUniqueId(); + Instruction* fileString = new Instruction(strId, NoType, OpString); + const char* file_c_str = str.c_str(); + fileString->addStringOperand(file_c_str); + strings.push_back(std::unique_ptr(fileString)); + module.mapInstruction(fileString); + stringIds[file_c_str] = strId; + return strId; + } + void setSourceFile(const std::string& file) + { + sourceFileStringId = getStringId(file); + } + void setSourceText(const std::string& text) { sourceText = text; } + void addSourceExtension(const char* ext) { sourceExtensions.push_back(ext); } + void addModuleProcessed(const std::string& p) { moduleProcesses.push_back(p.c_str()); } + void setEmitOpLines() { emitOpLines = true; } + void addExtension(const char* ext) { extensions.insert(ext); } + void removeExtension(const char* ext) + { + extensions.erase(ext); + } + void addIncorporatedExtension(const char* ext, SpvVersion incorporatedVersion) + { + if (getSpvVersion() < static_cast(incorporatedVersion)) + addExtension(ext); + } + void promoteIncorporatedExtension(const char* baseExt, const char* promoExt, SpvVersion incorporatedVersion) + { + removeExtension(baseExt); + addIncorporatedExtension(promoExt, incorporatedVersion); + } + void addInclude(const std::string& name, const std::string& text) + { + spv::Id incId = getStringId(name); + includeFiles[incId] = &text; + } + Id import(const char*); + void setMemoryModel(spv::AddressingModel addr, spv::MemoryModel mem) + { + addressModel = addr; + memoryModel = mem; + } + + void addCapability(spv::Capability cap) { capabilities.insert(cap); } + + // To get a new for anything needing a new one. + Id getUniqueId() { return ++uniqueId; } + + // To get a set of new s, e.g., for a set of function parameters + Id getUniqueIds(int numIds) + { + Id id = uniqueId + 1; + uniqueId += numIds; + return id; + } + + // Generate OpLine for non-filename-based #line directives (ie no filename + // seen yet): Log the current line, and if different than the last one, + // issue a new OpLine using the new line and current source file name. + void setLine(int line); + + // If filename null, generate OpLine for non-filename-based line directives, + // else do filename-based: Log the current line and file, and if different + // than the last one, issue a new OpLine using the new line and file + // name. + void setLine(int line, const char* filename); + // Low-level OpLine. See setLine() for a layered helper. + void addLine(Id fileName, int line, int column); + + // For creating new types (will return old type if the requested one was already made). + Id makeVoidType(); + Id makeBoolType(); + Id makePointer(StorageClass, Id pointee); + Id makeForwardPointer(StorageClass); + Id makePointerFromForwardPointer(StorageClass, Id forwardPointerType, Id pointee); + Id makeIntegerType(int width, bool hasSign); // generic + Id makeIntType(int width) { return makeIntegerType(width, true); } + Id makeUintType(int width) { return makeIntegerType(width, false); } + Id makeFloatType(int width); + Id makeStructType(const std::vector& members, const char*); + Id makeStructResultType(Id type0, Id type1); + Id makeVectorType(Id component, int size); + Id makeMatrixType(Id component, int cols, int rows); + Id makeArrayType(Id element, Id sizeId, int stride); // 0 stride means no stride decoration + Id makeRuntimeArray(Id element); + Id makeFunctionType(Id returnType, const std::vector& paramTypes); + Id makeImageType(Id sampledType, Dim, bool depth, bool arrayed, bool ms, unsigned sampled, ImageFormat format); + Id makeSamplerType(); + Id makeSampledImageType(Id imageType); + Id makeCooperativeMatrixType(Id component, Id scope, Id rows, Id cols); + + // accelerationStructureNV type + Id makeAccelerationStructureType(); + // rayQueryEXT type + Id makeRayQueryType(); + + // For querying about types. + Id getTypeId(Id resultId) const { return module.getTypeId(resultId); } + Id getDerefTypeId(Id resultId) const; + Op getOpCode(Id id) const { return module.getInstruction(id)->getOpCode(); } + Op getTypeClass(Id typeId) const { return getOpCode(typeId); } + Op getMostBasicTypeClass(Id typeId) const; + int getNumComponents(Id resultId) const { return getNumTypeComponents(getTypeId(resultId)); } + int getNumTypeConstituents(Id typeId) const; + int getNumTypeComponents(Id typeId) const { return getNumTypeConstituents(typeId); } + Id getScalarTypeId(Id typeId) const; + Id getContainedTypeId(Id typeId) const; + Id getContainedTypeId(Id typeId, int) const; + StorageClass getTypeStorageClass(Id typeId) const { return module.getStorageClass(typeId); } + ImageFormat getImageTypeFormat(Id typeId) const + { return (ImageFormat)module.getInstruction(typeId)->getImmediateOperand(6); } + + bool isPointer(Id resultId) const { return isPointerType(getTypeId(resultId)); } + bool isScalar(Id resultId) const { return isScalarType(getTypeId(resultId)); } + bool isVector(Id resultId) const { return isVectorType(getTypeId(resultId)); } + bool isMatrix(Id resultId) const { return isMatrixType(getTypeId(resultId)); } + bool isCooperativeMatrix(Id resultId)const { return isCooperativeMatrixType(getTypeId(resultId)); } + bool isAggregate(Id resultId) const { return isAggregateType(getTypeId(resultId)); } + bool isSampledImage(Id resultId) const { return isSampledImageType(getTypeId(resultId)); } + + bool isBoolType(Id typeId) + { return groupedTypes[OpTypeBool].size() > 0 && typeId == groupedTypes[OpTypeBool].back()->getResultId(); } + bool isIntType(Id typeId) const + { return getTypeClass(typeId) == OpTypeInt && module.getInstruction(typeId)->getImmediateOperand(1) != 0; } + bool isUintType(Id typeId) const + { return getTypeClass(typeId) == OpTypeInt && module.getInstruction(typeId)->getImmediateOperand(1) == 0; } + bool isFloatType(Id typeId) const { return getTypeClass(typeId) == OpTypeFloat; } + bool isPointerType(Id typeId) const { return getTypeClass(typeId) == OpTypePointer; } + bool isScalarType(Id typeId) const + { return getTypeClass(typeId) == OpTypeFloat || getTypeClass(typeId) == OpTypeInt || + getTypeClass(typeId) == OpTypeBool; } + bool isVectorType(Id typeId) const { return getTypeClass(typeId) == OpTypeVector; } + bool isMatrixType(Id typeId) const { return getTypeClass(typeId) == OpTypeMatrix; } + bool isStructType(Id typeId) const { return getTypeClass(typeId) == OpTypeStruct; } + bool isArrayType(Id typeId) const { return getTypeClass(typeId) == OpTypeArray; } +#ifdef GLSLANG_WEB + bool isCooperativeMatrixType(Id typeId)const { return false; } +#else + bool isCooperativeMatrixType(Id typeId)const { return getTypeClass(typeId) == OpTypeCooperativeMatrixNV; } +#endif + bool isAggregateType(Id typeId) const + { return isArrayType(typeId) || isStructType(typeId) || isCooperativeMatrixType(typeId); } + bool isImageType(Id typeId) const { return getTypeClass(typeId) == OpTypeImage; } + bool isSamplerType(Id typeId) const { return getTypeClass(typeId) == OpTypeSampler; } + bool isSampledImageType(Id typeId) const { return getTypeClass(typeId) == OpTypeSampledImage; } + bool containsType(Id typeId, Op typeOp, unsigned int width) const; + bool containsPhysicalStorageBufferOrArray(Id typeId) const; + + bool isConstantOpCode(Op opcode) const; + bool isSpecConstantOpCode(Op opcode) const; + bool isConstant(Id resultId) const { return isConstantOpCode(getOpCode(resultId)); } + bool isConstantScalar(Id resultId) const { return getOpCode(resultId) == OpConstant; } + bool isSpecConstant(Id resultId) const { return isSpecConstantOpCode(getOpCode(resultId)); } + unsigned int getConstantScalar(Id resultId) const + { return module.getInstruction(resultId)->getImmediateOperand(0); } + StorageClass getStorageClass(Id resultId) const { return getTypeStorageClass(getTypeId(resultId)); } + + int getScalarTypeWidth(Id typeId) const + { + Id scalarTypeId = getScalarTypeId(typeId); + assert(getTypeClass(scalarTypeId) == OpTypeInt || getTypeClass(scalarTypeId) == OpTypeFloat); + return module.getInstruction(scalarTypeId)->getImmediateOperand(0); + } + + int getTypeNumColumns(Id typeId) const + { + assert(isMatrixType(typeId)); + return getNumTypeConstituents(typeId); + } + int getNumColumns(Id resultId) const { return getTypeNumColumns(getTypeId(resultId)); } + int getTypeNumRows(Id typeId) const + { + assert(isMatrixType(typeId)); + return getNumTypeComponents(getContainedTypeId(typeId)); + } + int getNumRows(Id resultId) const { return getTypeNumRows(getTypeId(resultId)); } + + Dim getTypeDimensionality(Id typeId) const + { + assert(isImageType(typeId)); + return (Dim)module.getInstruction(typeId)->getImmediateOperand(1); + } + Id getImageType(Id resultId) const + { + Id typeId = getTypeId(resultId); + assert(isImageType(typeId) || isSampledImageType(typeId)); + return isSampledImageType(typeId) ? module.getInstruction(typeId)->getIdOperand(0) : typeId; + } + bool isArrayedImageType(Id typeId) const + { + assert(isImageType(typeId)); + return module.getInstruction(typeId)->getImmediateOperand(3) != 0; + } + + // For making new constants (will return old constant if the requested one was already made). + Id makeBoolConstant(bool b, bool specConstant = false); + Id makeInt8Constant(int i, bool specConstant = false) + { return makeIntConstant(makeIntType(8), (unsigned)i, specConstant); } + Id makeUint8Constant(unsigned u, bool specConstant = false) + { return makeIntConstant(makeUintType(8), u, specConstant); } + Id makeInt16Constant(int i, bool specConstant = false) + { return makeIntConstant(makeIntType(16), (unsigned)i, specConstant); } + Id makeUint16Constant(unsigned u, bool specConstant = false) + { return makeIntConstant(makeUintType(16), u, specConstant); } + Id makeIntConstant(int i, bool specConstant = false) + { return makeIntConstant(makeIntType(32), (unsigned)i, specConstant); } + Id makeUintConstant(unsigned u, bool specConstant = false) + { return makeIntConstant(makeUintType(32), u, specConstant); } + Id makeInt64Constant(long long i, bool specConstant = false) + { return makeInt64Constant(makeIntType(64), (unsigned long long)i, specConstant); } + Id makeUint64Constant(unsigned long long u, bool specConstant = false) + { return makeInt64Constant(makeUintType(64), u, specConstant); } + Id makeFloatConstant(float f, bool specConstant = false); + Id makeDoubleConstant(double d, bool specConstant = false); + Id makeFloat16Constant(float f16, bool specConstant = false); + Id makeFpConstant(Id type, double d, bool specConstant = false); + + // Turn the array of constants into a proper spv constant of the requested type. + Id makeCompositeConstant(Id type, const std::vector& comps, bool specConst = false); + + // Methods for adding information outside the CFG. + Instruction* addEntryPoint(ExecutionModel, Function*, const char* name); + void addExecutionMode(Function*, ExecutionMode mode, int value1 = -1, int value2 = -1, int value3 = -1); + void addName(Id, const char* name); + void addMemberName(Id, int member, const char* name); + void addDecoration(Id, Decoration, int num = -1); + void addDecoration(Id, Decoration, const char*); + void addDecorationId(Id id, Decoration, Id idDecoration); + void addMemberDecoration(Id, unsigned int member, Decoration, int num = -1); + void addMemberDecoration(Id, unsigned int member, Decoration, const char*); + + // At the end of what block do the next create*() instructions go? + void setBuildPoint(Block* bp) { buildPoint = bp; } + Block* getBuildPoint() const { return buildPoint; } + + // Make the entry-point function. The returned pointer is only valid + // for the lifetime of this builder. + Function* makeEntryPoint(const char*); + + // Make a shader-style function, and create its entry block if entry is non-zero. + // Return the function, pass back the entry. + // The returned pointer is only valid for the lifetime of this builder. + Function* makeFunctionEntry(Decoration precision, Id returnType, const char* name, + const std::vector& paramTypes, const std::vector>& precisions, Block **entry = 0); + + // Create a return. An 'implicit' return is one not appearing in the source + // code. In the case of an implicit return, no post-return block is inserted. + void makeReturn(bool implicit, Id retVal = 0); + + // Generate all the code needed to finish up a function. + void leaveFunction(); + + // Create a discard. + void makeDiscard(); + + // Create a global or function local or IO variable. + Id createVariable(StorageClass, Id type, const char* name = 0, Id initializer = NoResult); + + // Create an intermediate with an undefined value. + Id createUndefined(Id type); + + // Store into an Id and return the l-value + void createStore(Id rValue, Id lValue, spv::MemoryAccessMask memoryAccess = spv::MemoryAccessMaskNone, + spv::Scope scope = spv::ScopeMax, unsigned int alignment = 0); + + // Load from an Id and return it + Id createLoad(Id lValue, spv::MemoryAccessMask memoryAccess = spv::MemoryAccessMaskNone, + spv::Scope scope = spv::ScopeMax, unsigned int alignment = 0); + + // Create an OpAccessChain instruction + Id createAccessChain(StorageClass, Id base, const std::vector& offsets); + + // Create an OpArrayLength instruction + Id createArrayLength(Id base, unsigned int member); + + // Create an OpCooperativeMatrixLengthNV instruction + Id createCooperativeMatrixLength(Id type); + + // Create an OpCompositeExtract instruction + Id createCompositeExtract(Id composite, Id typeId, unsigned index); + Id createCompositeExtract(Id composite, Id typeId, const std::vector& indexes); + Id createCompositeInsert(Id object, Id composite, Id typeId, unsigned index); + Id createCompositeInsert(Id object, Id composite, Id typeId, const std::vector& indexes); + + Id createVectorExtractDynamic(Id vector, Id typeId, Id componentIndex); + Id createVectorInsertDynamic(Id vector, Id typeId, Id component, Id componentIndex); + + void createNoResultOp(Op); + void createNoResultOp(Op, Id operand); + void createNoResultOp(Op, const std::vector& operands); + void createNoResultOp(Op, const std::vector& operands); + void createControlBarrier(Scope execution, Scope memory, MemorySemanticsMask); + void createMemoryBarrier(unsigned executionScope, unsigned memorySemantics); + Id createUnaryOp(Op, Id typeId, Id operand); + Id createBinOp(Op, Id typeId, Id operand1, Id operand2); + Id createTriOp(Op, Id typeId, Id operand1, Id operand2, Id operand3); + Id createOp(Op, Id typeId, const std::vector& operands); + Id createOp(Op, Id typeId, const std::vector& operands); + Id createFunctionCall(spv::Function*, const std::vector&); + Id createSpecConstantOp(Op, Id typeId, const std::vector& operands, const std::vector& literals); + + // Take an rvalue (source) and a set of channels to extract from it to + // make a new rvalue, which is returned. + Id createRvalueSwizzle(Decoration precision, Id typeId, Id source, const std::vector& channels); + + // Take a copy of an lvalue (target) and a source of components, and set the + // source components into the lvalue where the 'channels' say to put them. + // An updated version of the target is returned. + // (No true lvalue or stores are used.) + Id createLvalueSwizzle(Id typeId, Id target, Id source, const std::vector& channels); + + // If both the id and precision are valid, the id + // gets tagged with the requested precision. + // The passed in id is always the returned id, to simplify use patterns. + Id setPrecision(Id id, Decoration precision) + { + if (precision != NoPrecision && id != NoResult) + addDecoration(id, precision); + + return id; + } + + // Can smear a scalar to a vector for the following forms: + // - promoteScalar(scalar, vector) // smear scalar to width of vector + // - promoteScalar(vector, scalar) // smear scalar to width of vector + // - promoteScalar(pointer, scalar) // smear scalar to width of what pointer points to + // - promoteScalar(scalar, scalar) // do nothing + // Other forms are not allowed. + // + // Generally, the type of 'scalar' does not need to be the same type as the components in 'vector'. + // The type of the created vector is a vector of components of the same type as the scalar. + // + // Note: One of the arguments will change, with the result coming back that way rather than + // through the return value. + void promoteScalar(Decoration precision, Id& left, Id& right); + + // Make a value by smearing the scalar to fill the type. + // vectorType should be the correct type for making a vector of scalarVal. + // (No conversions are done.) + Id smearScalar(Decoration precision, Id scalarVal, Id vectorType); + + // Create a call to a built-in function. + Id createBuiltinCall(Id resultType, Id builtins, int entryPoint, const std::vector& args); + + // List of parameters used to create a texture operation + struct TextureParameters { + Id sampler; + Id coords; + Id bias; + Id lod; + Id Dref; + Id offset; + Id offsets; + Id gradX; + Id gradY; + Id sample; + Id component; + Id texelOut; + Id lodClamp; + Id granularity; + Id coarse; + bool nonprivate; + bool volatil; + }; + + // Select the correct texture operation based on all inputs, and emit the correct instruction + Id createTextureCall(Decoration precision, Id resultType, bool sparse, bool fetch, bool proj, bool gather, + bool noImplicit, const TextureParameters&, ImageOperandsMask); + + // Emit the OpTextureQuery* instruction that was passed in. + // Figure out the right return value and type, and return it. + Id createTextureQueryCall(Op, const TextureParameters&, bool isUnsignedResult); + + Id createSamplePositionCall(Decoration precision, Id, Id); + + Id createBitFieldExtractCall(Decoration precision, Id, Id, Id, bool isSigned); + Id createBitFieldInsertCall(Decoration precision, Id, Id, Id, Id); + + // Reduction comparison for composites: For equal and not-equal resulting in a scalar. + Id createCompositeCompare(Decoration precision, Id, Id, bool /* true if for equal, false if for not-equal */); + + // OpCompositeConstruct + Id createCompositeConstruct(Id typeId, const std::vector& constituents); + + // vector or scalar constructor + Id createConstructor(Decoration precision, const std::vector& sources, Id resultTypeId); + + // matrix constructor + Id createMatrixConstructor(Decoration precision, const std::vector& sources, Id constructee); + + // Helper to use for building nested control flow with if-then-else. + class If { + public: + If(Id condition, unsigned int ctrl, Builder& builder); + ~If() {} + + void makeBeginElse(); + void makeEndIf(); + + private: + If(const If&); + If& operator=(If&); + + Builder& builder; + Id condition; + unsigned int control; + Function* function; + Block* headerBlock; + Block* thenBlock; + Block* elseBlock; + Block* mergeBlock; + }; + + // Make a switch statement. A switch has 'numSegments' of pieces of code, not containing + // any case/default labels, all separated by one or more case/default labels. Each possible + // case value v is a jump to the caseValues[v] segment. The defaultSegment is also in this + // number space. How to compute the value is given by 'condition', as in switch(condition). + // + // The SPIR-V Builder will maintain the stack of post-switch merge blocks for nested switches. + // + // Use a defaultSegment < 0 if there is no default segment (to branch to post switch). + // + // Returns the right set of basic blocks to start each code segment with, so that the caller's + // recursion stack can hold the memory for it. + // + void makeSwitch(Id condition, unsigned int control, int numSegments, const std::vector& caseValues, + const std::vector& valueToSegment, int defaultSegment, std::vector& segmentBB); + + // Add a branch to the innermost switch's merge block. + void addSwitchBreak(); + + // Move to the next code segment, passing in the return argument in makeSwitch() + void nextSwitchSegment(std::vector& segmentBB, int segment); + + // Finish off the innermost switch. + void endSwitch(std::vector& segmentBB); + + struct LoopBlocks { + LoopBlocks(Block& head, Block& body, Block& merge, Block& continue_target) : + head(head), body(body), merge(merge), continue_target(continue_target) { } + Block &head, &body, &merge, &continue_target; + private: + LoopBlocks(); + LoopBlocks& operator=(const LoopBlocks&) = delete; + }; + + // Start a new loop and prepare the builder to generate code for it. Until + // closeLoop() is called for this loop, createLoopContinue() and + // createLoopExit() will target its corresponding blocks. + LoopBlocks& makeNewLoop(); + + // Create a new block in the function containing the build point. Memory is + // owned by the function object. + Block& makeNewBlock(); + + // Add a branch to the continue_target of the current (innermost) loop. + void createLoopContinue(); + + // Add an exit (e.g. "break") from the innermost loop that we're currently + // in. + void createLoopExit(); + + // Close the innermost loop that you're in + void closeLoop(); + + // + // Access chain design for an R-Value vs. L-Value: + // + // There is a single access chain the builder is building at + // any particular time. Such a chain can be used to either to a load or + // a store, when desired. + // + // Expressions can be r-values, l-values, or both, or only r-values: + // a[b.c].d = .... // l-value + // ... = a[b.c].d; // r-value, that also looks like an l-value + // ++a[b.c].d; // r-value and l-value + // (x + y)[2]; // r-value only, can't possibly be l-value + // + // Computing an r-value means generating code. Hence, + // r-values should only be computed when they are needed, not speculatively. + // + // Computing an l-value means saving away information for later use in the compiler, + // no code is generated until the l-value is later dereferenced. It is okay + // to speculatively generate an l-value, just not okay to speculatively dereference it. + // + // The base of the access chain (the left-most variable or expression + // from which everything is based) can be set either as an l-value + // or as an r-value. Most efficient would be to set an l-value if one + // is available. If an expression was evaluated, the resulting r-value + // can be set as the chain base. + // + // The users of this single access chain can save and restore if they + // want to nest or manage multiple chains. + // + + struct AccessChain { + Id base; // for l-values, pointer to the base object, for r-values, the base object + std::vector indexChain; + Id instr; // cache the instruction that generates this access chain + std::vector swizzle; // each std::vector element selects the next GLSL component number + Id component; // a dynamic component index, can coexist with a swizzle, + // done after the swizzle, NoResult if not present + Id preSwizzleBaseType; // dereferenced type, before swizzle or component is applied; + // NoType unless a swizzle or component is present + bool isRValue; // true if 'base' is an r-value, otherwise, base is an l-value + unsigned int alignment; // bitwise OR of alignment values passed in. Accumulates worst alignment. + // Only tracks base and (optional) component selection alignment. + + // Accumulate whether anything in the chain of structures has coherent decorations. + struct CoherentFlags { + CoherentFlags() { clear(); } +#ifdef GLSLANG_WEB + void clear() { } + bool isVolatile() const { return false; } + CoherentFlags operator |=(const CoherentFlags &other) { return *this; } +#else + bool isVolatile() const { return volatil; } + bool anyCoherent() const { + return coherent || devicecoherent || queuefamilycoherent || workgroupcoherent || + subgroupcoherent || shadercallcoherent; + } + + unsigned coherent : 1; + unsigned devicecoherent : 1; + unsigned queuefamilycoherent : 1; + unsigned workgroupcoherent : 1; + unsigned subgroupcoherent : 1; + unsigned shadercallcoherent : 1; + unsigned nonprivate : 1; + unsigned volatil : 1; + unsigned isImage : 1; + + void clear() { + coherent = 0; + devicecoherent = 0; + queuefamilycoherent = 0; + workgroupcoherent = 0; + subgroupcoherent = 0; + shadercallcoherent = 0; + nonprivate = 0; + volatil = 0; + isImage = 0; + } + + CoherentFlags operator |=(const CoherentFlags &other) { + coherent |= other.coherent; + devicecoherent |= other.devicecoherent; + queuefamilycoherent |= other.queuefamilycoherent; + workgroupcoherent |= other.workgroupcoherent; + subgroupcoherent |= other.subgroupcoherent; + shadercallcoherent |= other.shadercallcoherent; + nonprivate |= other.nonprivate; + volatil |= other.volatil; + isImage |= other.isImage; + return *this; + } +#endif + }; + CoherentFlags coherentFlags; + }; + + // + // the SPIR-V builder maintains a single active chain that + // the following methods operate on + // + + // for external save and restore + AccessChain getAccessChain() { return accessChain; } + void setAccessChain(AccessChain newChain) { accessChain = newChain; } + + // clear accessChain + void clearAccessChain(); + + // set new base as an l-value base + void setAccessChainLValue(Id lValue) + { + assert(isPointer(lValue)); + accessChain.base = lValue; + } + + // set new base value as an r-value + void setAccessChainRValue(Id rValue) + { + accessChain.isRValue = true; + accessChain.base = rValue; + } + + // push offset onto the end of the chain + void accessChainPush(Id offset, AccessChain::CoherentFlags coherentFlags, unsigned int alignment) + { + accessChain.indexChain.push_back(offset); + accessChain.coherentFlags |= coherentFlags; + accessChain.alignment |= alignment; + } + + // push new swizzle onto the end of any existing swizzle, merging into a single swizzle + void accessChainPushSwizzle(std::vector& swizzle, Id preSwizzleBaseType, + AccessChain::CoherentFlags coherentFlags, unsigned int alignment); + + // push a dynamic component selection onto the access chain, only applicable with a + // non-trivial swizzle or no swizzle + void accessChainPushComponent(Id component, Id preSwizzleBaseType, AccessChain::CoherentFlags coherentFlags, + unsigned int alignment) + { + if (accessChain.swizzle.size() != 1) { + accessChain.component = component; + if (accessChain.preSwizzleBaseType == NoType) + accessChain.preSwizzleBaseType = preSwizzleBaseType; + } + accessChain.coherentFlags |= coherentFlags; + accessChain.alignment |= alignment; + } + + // use accessChain and swizzle to store value + void accessChainStore(Id rvalue, spv::MemoryAccessMask memoryAccess = spv::MemoryAccessMaskNone, + spv::Scope scope = spv::ScopeMax, unsigned int alignment = 0); + + // use accessChain and swizzle to load an r-value + Id accessChainLoad(Decoration precision, Decoration nonUniform, Id ResultType, + spv::MemoryAccessMask memoryAccess = spv::MemoryAccessMaskNone, spv::Scope scope = spv::ScopeMax, + unsigned int alignment = 0); + + // Return whether or not the access chain can be represented in SPIR-V + // as an l-value. + // E.g., a[3].yx cannot be, while a[3].y and a[3].y[x] can be. + bool isSpvLvalue() const { return accessChain.swizzle.size() <= 1; } + + // get the direct pointer for an l-value + Id accessChainGetLValue(); + + // Get the inferred SPIR-V type of the result of the current access chain, + // based on the type of the base and the chain of dereferences. + Id accessChainGetInferredType(); + + // Add capabilities, extensions, remove unneeded decorations, etc., + // based on the resulting SPIR-V. + void postProcess(); + + // Prune unreachable blocks in the CFG and remove unneeded decorations. + void postProcessCFG(); + +#ifndef GLSLANG_WEB + // Add capabilities, extensions based on instructions in the module. + void postProcessFeatures(); + // Hook to visit each instruction in a block in a function + void postProcess(Instruction&); + // Hook to visit each non-32-bit sized float/int operation in a block. + void postProcessType(const Instruction&, spv::Id typeId); +#endif + + void dump(std::vector&) const; + + void createBranch(Block* block); + void createConditionalBranch(Id condition, Block* thenBlock, Block* elseBlock); + void createLoopMerge(Block* mergeBlock, Block* continueBlock, unsigned int control, + const std::vector& operands); + + // Sets to generate opcode for specialization constants. + void setToSpecConstCodeGenMode() { generatingOpCodeForSpecConst = true; } + // Sets to generate opcode for non-specialization constants (normal mode). + void setToNormalCodeGenMode() { generatingOpCodeForSpecConst = false; } + // Check if the builder is generating code for spec constants. + bool isInSpecConstCodeGenMode() { return generatingOpCodeForSpecConst; } + + protected: + Id makeIntConstant(Id typeId, unsigned value, bool specConstant); + Id makeInt64Constant(Id typeId, unsigned long long value, bool specConstant); + Id findScalarConstant(Op typeClass, Op opcode, Id typeId, unsigned value); + Id findScalarConstant(Op typeClass, Op opcode, Id typeId, unsigned v1, unsigned v2); + Id findCompositeConstant(Op typeClass, Id typeId, const std::vector& comps); + Id findStructConstant(Id typeId, const std::vector& comps); + Id collapseAccessChain(); + void remapDynamicSwizzle(); + void transferAccessChainSwizzle(bool dynamic); + void simplifyAccessChainSwizzle(); + void createAndSetNoPredecessorBlock(const char*); + void createSelectionMerge(Block* mergeBlock, unsigned int control); + void dumpSourceInstructions(std::vector&) const; + void dumpSourceInstructions(const spv::Id fileId, const std::string& text, std::vector&) const; + void dumpInstructions(std::vector&, const std::vector >&) const; + void dumpModuleProcesses(std::vector&) const; + spv::MemoryAccessMask sanitizeMemoryAccessForStorageClass(spv::MemoryAccessMask memoryAccess, StorageClass sc) + const; + + unsigned int spvVersion; // the version of SPIR-V to emit in the header + SourceLanguage source; + int sourceVersion; + spv::Id sourceFileStringId; + std::string sourceText; + int currentLine; + const char* currentFile; + bool emitOpLines; + std::set extensions; + std::vector sourceExtensions; + std::vector moduleProcesses; + AddressingModel addressModel; + MemoryModel memoryModel; + std::set capabilities; + int builderNumber; + Module module; + Block* buildPoint; + Id uniqueId; + Function* entryPointFunction; + bool generatingOpCodeForSpecConst; + AccessChain accessChain; + + // special blocks of instructions for output + std::vector > strings; + std::vector > imports; + std::vector > entryPoints; + std::vector > executionModes; + std::vector > names; + std::vector > decorations; + std::vector > constantsTypesGlobals; + std::vector > externals; + std::vector > functions; + + // not output, internally used for quick & dirty canonical (unique) creation + + // map type opcodes to constant inst. + std::unordered_map> groupedConstants; + // map struct-id to constant instructions + std::unordered_map> groupedStructConstants; + // map type opcodes to type instructions + std::unordered_map> groupedTypes; + + // stack of switches + std::stack switchMerges; + + // Our loop stack. + std::stack loops; + + // map from strings to their string ids + std::unordered_map stringIds; + + // map from include file name ids to their contents + std::map includeFiles; + + // The stream for outputting warnings and errors. + SpvBuildLogger* logger; +}; // end Builder class + +}; // end spv namespace + +#endif // SpvBuilder_H diff --git a/dep/glslang/SPIRV/SpvPostProcess.cpp b/dep/glslang/SPIRV/SpvPostProcess.cpp new file mode 100644 index 000000000..d40174d17 --- /dev/null +++ b/dep/glslang/SPIRV/SpvPostProcess.cpp @@ -0,0 +1,450 @@ +// +// Copyright (C) 2018 Google, Inc. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// +// Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// +// Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// Neither the name of 3Dlabs Inc. Ltd. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. + +// +// Post-processing for SPIR-V IR, in internal form, not standard binary form. +// + +#include +#include + +#include +#include +#include + +#include "SpvBuilder.h" + +#include "spirv.hpp" +#include "GlslangToSpv.h" +#include "SpvBuilder.h" +namespace spv { + #include "GLSL.std.450.h" + #include "GLSL.ext.KHR.h" + #include "GLSL.ext.EXT.h" + #include "GLSL.ext.AMD.h" + #include "GLSL.ext.NV.h" +} + +namespace spv { + +#ifndef GLSLANG_WEB +// Hook to visit each operand type and result type of an instruction. +// Will be called multiple times for one instruction, once for each typed +// operand and the result. +void Builder::postProcessType(const Instruction& inst, Id typeId) +{ + // Characterize the type being questioned + Id basicTypeOp = getMostBasicTypeClass(typeId); + int width = 0; + if (basicTypeOp == OpTypeFloat || basicTypeOp == OpTypeInt) + width = getScalarTypeWidth(typeId); + + // Do opcode-specific checks + switch (inst.getOpCode()) { + case OpLoad: + case OpStore: + if (basicTypeOp == OpTypeStruct) { + if (containsType(typeId, OpTypeInt, 8)) + addCapability(CapabilityInt8); + if (containsType(typeId, OpTypeInt, 16)) + addCapability(CapabilityInt16); + if (containsType(typeId, OpTypeFloat, 16)) + addCapability(CapabilityFloat16); + } else { + StorageClass storageClass = getStorageClass(inst.getIdOperand(0)); + if (width == 8) { + switch (storageClass) { + case StorageClassPhysicalStorageBufferEXT: + case StorageClassUniform: + case StorageClassStorageBuffer: + case StorageClassPushConstant: + break; + default: + addCapability(CapabilityInt8); + break; + } + } else if (width == 16) { + switch (storageClass) { + case StorageClassPhysicalStorageBufferEXT: + case StorageClassUniform: + case StorageClassStorageBuffer: + case StorageClassPushConstant: + case StorageClassInput: + case StorageClassOutput: + break; + default: + if (basicTypeOp == OpTypeInt) + addCapability(CapabilityInt16); + if (basicTypeOp == OpTypeFloat) + addCapability(CapabilityFloat16); + break; + } + } + } + break; + case OpAccessChain: + case OpPtrAccessChain: + case OpCopyObject: + break; + case OpFConvert: + case OpSConvert: + case OpUConvert: + // Look for any 8/16-bit storage capabilities. If there are none, assume that + // the convert instruction requires the Float16/Int8/16 capability. + if (containsType(typeId, OpTypeFloat, 16) || containsType(typeId, OpTypeInt, 16)) { + bool foundStorage = false; + for (auto it = capabilities.begin(); it != capabilities.end(); ++it) { + spv::Capability cap = *it; + if (cap == spv::CapabilityStorageInputOutput16 || + cap == spv::CapabilityStoragePushConstant16 || + cap == spv::CapabilityStorageUniformBufferBlock16 || + cap == spv::CapabilityStorageUniform16) { + foundStorage = true; + break; + } + } + if (!foundStorage) { + if (containsType(typeId, OpTypeFloat, 16)) + addCapability(CapabilityFloat16); + if (containsType(typeId, OpTypeInt, 16)) + addCapability(CapabilityInt16); + } + } + if (containsType(typeId, OpTypeInt, 8)) { + bool foundStorage = false; + for (auto it = capabilities.begin(); it != capabilities.end(); ++it) { + spv::Capability cap = *it; + if (cap == spv::CapabilityStoragePushConstant8 || + cap == spv::CapabilityUniformAndStorageBuffer8BitAccess || + cap == spv::CapabilityStorageBuffer8BitAccess) { + foundStorage = true; + break; + } + } + if (!foundStorage) { + addCapability(CapabilityInt8); + } + } + break; + case OpExtInst: + switch (inst.getImmediateOperand(1)) { + case GLSLstd450Frexp: + case GLSLstd450FrexpStruct: + if (getSpvVersion() < glslang::EShTargetSpv_1_3 && containsType(typeId, OpTypeInt, 16)) + addExtension(spv::E_SPV_AMD_gpu_shader_int16); + break; + case GLSLstd450InterpolateAtCentroid: + case GLSLstd450InterpolateAtSample: + case GLSLstd450InterpolateAtOffset: + if (getSpvVersion() < glslang::EShTargetSpv_1_3 && containsType(typeId, OpTypeFloat, 16)) + addExtension(spv::E_SPV_AMD_gpu_shader_half_float); + break; + default: + break; + } + break; + default: + if (basicTypeOp == OpTypeFloat && width == 16) + addCapability(CapabilityFloat16); + if (basicTypeOp == OpTypeInt && width == 16) + addCapability(CapabilityInt16); + if (basicTypeOp == OpTypeInt && width == 8) + addCapability(CapabilityInt8); + break; + } +} + +// Called for each instruction that resides in a block. +void Builder::postProcess(Instruction& inst) +{ + // Add capabilities based simply on the opcode. + switch (inst.getOpCode()) { + case OpExtInst: + switch (inst.getImmediateOperand(1)) { + case GLSLstd450InterpolateAtCentroid: + case GLSLstd450InterpolateAtSample: + case GLSLstd450InterpolateAtOffset: + addCapability(CapabilityInterpolationFunction); + break; + default: + break; + } + break; + case OpDPdxFine: + case OpDPdyFine: + case OpFwidthFine: + case OpDPdxCoarse: + case OpDPdyCoarse: + case OpFwidthCoarse: + addCapability(CapabilityDerivativeControl); + break; + + case OpImageQueryLod: + case OpImageQuerySize: + case OpImageQuerySizeLod: + case OpImageQuerySamples: + case OpImageQueryLevels: + addCapability(CapabilityImageQuery); + break; + + case OpGroupNonUniformPartitionNV: + addExtension(E_SPV_NV_shader_subgroup_partitioned); + addCapability(CapabilityGroupNonUniformPartitionedNV); + break; + + case OpLoad: + case OpStore: + { + // For any load/store to a PhysicalStorageBufferEXT, walk the accesschain + // index list to compute the misalignment. The pre-existing alignment value + // (set via Builder::AccessChain::alignment) only accounts for the base of + // the reference type and any scalar component selection in the accesschain, + // and this function computes the rest from the SPIR-V Offset decorations. + Instruction *accessChain = module.getInstruction(inst.getIdOperand(0)); + if (accessChain->getOpCode() == OpAccessChain) { + Instruction *base = module.getInstruction(accessChain->getIdOperand(0)); + // Get the type of the base of the access chain. It must be a pointer type. + Id typeId = base->getTypeId(); + Instruction *type = module.getInstruction(typeId); + assert(type->getOpCode() == OpTypePointer); + if (type->getImmediateOperand(0) != StorageClassPhysicalStorageBufferEXT) { + break; + } + // Get the pointee type. + typeId = type->getIdOperand(1); + type = module.getInstruction(typeId); + // Walk the index list for the access chain. For each index, find any + // misalignment that can apply when accessing the member/element via + // Offset/ArrayStride/MatrixStride decorations, and bitwise OR them all + // together. + int alignment = 0; + for (int i = 1; i < accessChain->getNumOperands(); ++i) { + Instruction *idx = module.getInstruction(accessChain->getIdOperand(i)); + if (type->getOpCode() == OpTypeStruct) { + assert(idx->getOpCode() == OpConstant); + unsigned int c = idx->getImmediateOperand(0); + + const auto function = [&](const std::unique_ptr& decoration) { + if (decoration.get()->getOpCode() == OpMemberDecorate && + decoration.get()->getIdOperand(0) == typeId && + decoration.get()->getImmediateOperand(1) == c && + (decoration.get()->getImmediateOperand(2) == DecorationOffset || + decoration.get()->getImmediateOperand(2) == DecorationMatrixStride)) { + alignment |= decoration.get()->getImmediateOperand(3); + } + }; + std::for_each(decorations.begin(), decorations.end(), function); + // get the next member type + typeId = type->getIdOperand(c); + type = module.getInstruction(typeId); + } else if (type->getOpCode() == OpTypeArray || + type->getOpCode() == OpTypeRuntimeArray) { + const auto function = [&](const std::unique_ptr& decoration) { + if (decoration.get()->getOpCode() == OpDecorate && + decoration.get()->getIdOperand(0) == typeId && + decoration.get()->getImmediateOperand(1) == DecorationArrayStride) { + alignment |= decoration.get()->getImmediateOperand(2); + } + }; + std::for_each(decorations.begin(), decorations.end(), function); + // Get the element type + typeId = type->getIdOperand(0); + type = module.getInstruction(typeId); + } else { + // Once we get to any non-aggregate type, we're done. + break; + } + } + assert(inst.getNumOperands() >= 3); + unsigned int memoryAccess = inst.getImmediateOperand((inst.getOpCode() == OpStore) ? 2 : 1); + assert(memoryAccess & MemoryAccessAlignedMask); + static_cast(memoryAccess); + // Compute the index of the alignment operand. + int alignmentIdx = 2; + if (inst.getOpCode() == OpStore) + alignmentIdx++; + // Merge new and old (mis)alignment + alignment |= inst.getImmediateOperand(alignmentIdx); + // Pick the LSB + alignment = alignment & ~(alignment & (alignment-1)); + // update the Aligned operand + inst.setImmediateOperand(alignmentIdx, alignment); + } + break; + } + + default: + break; + } + + // Checks based on type + if (inst.getTypeId() != NoType) + postProcessType(inst, inst.getTypeId()); + for (int op = 0; op < inst.getNumOperands(); ++op) { + if (inst.isIdOperand(op)) { + // In blocks, these are always result ids, but we are relying on + // getTypeId() to return NoType for things like OpLabel. + if (getTypeId(inst.getIdOperand(op)) != NoType) + postProcessType(inst, getTypeId(inst.getIdOperand(op))); + } + } +} +#endif + +// comment in header +void Builder::postProcessCFG() +{ + // reachableBlocks is the set of blockss reached via control flow, or which are + // unreachable continue targert or unreachable merge. + std::unordered_set reachableBlocks; + std::unordered_map headerForUnreachableContinue; + std::unordered_set unreachableMerges; + std::unordered_set unreachableDefinitions; + // Collect IDs defined in unreachable blocks. For each function, label the + // reachable blocks first. Then for each unreachable block, collect the + // result IDs of the instructions in it. + for (auto fi = module.getFunctions().cbegin(); fi != module.getFunctions().cend(); fi++) { + Function* f = *fi; + Block* entry = f->getEntryBlock(); + inReadableOrder(entry, + [&reachableBlocks, &unreachableMerges, &headerForUnreachableContinue] + (Block* b, ReachReason why, Block* header) { + reachableBlocks.insert(b); + if (why == ReachDeadContinue) headerForUnreachableContinue[b] = header; + if (why == ReachDeadMerge) unreachableMerges.insert(b); + }); + for (auto bi = f->getBlocks().cbegin(); bi != f->getBlocks().cend(); bi++) { + Block* b = *bi; + if (unreachableMerges.count(b) != 0 || headerForUnreachableContinue.count(b) != 0) { + auto ii = b->getInstructions().cbegin(); + ++ii; // Keep potential decorations on the label. + for (; ii != b->getInstructions().cend(); ++ii) + unreachableDefinitions.insert(ii->get()->getResultId()); + } else if (reachableBlocks.count(b) == 0) { + // The normal case for unreachable code. All definitions are considered dead. + for (auto ii = b->getInstructions().cbegin(); ii != b->getInstructions().cend(); ++ii) + unreachableDefinitions.insert(ii->get()->getResultId()); + } + } + } + + // Modify unreachable merge blocks and unreachable continue targets. + // Delete their contents. + for (auto mergeIter = unreachableMerges.begin(); mergeIter != unreachableMerges.end(); ++mergeIter) { + (*mergeIter)->rewriteAsCanonicalUnreachableMerge(); + } + for (auto continueIter = headerForUnreachableContinue.begin(); + continueIter != headerForUnreachableContinue.end(); + ++continueIter) { + Block* continue_target = continueIter->first; + Block* header = continueIter->second; + continue_target->rewriteAsCanonicalUnreachableContinue(header); + } + + // Remove unneeded decorations, for unreachable instructions + decorations.erase(std::remove_if(decorations.begin(), decorations.end(), + [&unreachableDefinitions](std::unique_ptr& I) -> bool { + Id decoration_id = I.get()->getIdOperand(0); + return unreachableDefinitions.count(decoration_id) != 0; + }), + decorations.end()); +} + +#ifndef GLSLANG_WEB +// comment in header +void Builder::postProcessFeatures() { + // Add per-instruction capabilities, extensions, etc., + + // Look for any 8/16 bit type in physical storage buffer class, and set the + // appropriate capability. This happens in createSpvVariable for other storage + // classes, but there isn't always a variable for physical storage buffer. + for (int t = 0; t < (int)groupedTypes[OpTypePointer].size(); ++t) { + Instruction* type = groupedTypes[OpTypePointer][t]; + if (type->getImmediateOperand(0) == (unsigned)StorageClassPhysicalStorageBufferEXT) { + if (containsType(type->getIdOperand(1), OpTypeInt, 8)) { + addIncorporatedExtension(spv::E_SPV_KHR_8bit_storage, spv::Spv_1_5); + addCapability(spv::CapabilityStorageBuffer8BitAccess); + } + if (containsType(type->getIdOperand(1), OpTypeInt, 16) || + containsType(type->getIdOperand(1), OpTypeFloat, 16)) { + addIncorporatedExtension(spv::E_SPV_KHR_16bit_storage, spv::Spv_1_3); + addCapability(spv::CapabilityStorageBuffer16BitAccess); + } + } + } + + // process all block-contained instructions + for (auto fi = module.getFunctions().cbegin(); fi != module.getFunctions().cend(); fi++) { + Function* f = *fi; + for (auto bi = f->getBlocks().cbegin(); bi != f->getBlocks().cend(); bi++) { + Block* b = *bi; + for (auto ii = b->getInstructions().cbegin(); ii != b->getInstructions().cend(); ii++) + postProcess(*ii->get()); + + // For all local variables that contain pointers to PhysicalStorageBufferEXT, check whether + // there is an existing restrict/aliased decoration. If we don't find one, add Aliased as the + // default. + for (auto vi = b->getLocalVariables().cbegin(); vi != b->getLocalVariables().cend(); vi++) { + const Instruction& inst = *vi->get(); + Id resultId = inst.getResultId(); + if (containsPhysicalStorageBufferOrArray(getDerefTypeId(resultId))) { + bool foundDecoration = false; + const auto function = [&](const std::unique_ptr& decoration) { + if (decoration.get()->getIdOperand(0) == resultId && + decoration.get()->getOpCode() == OpDecorate && + (decoration.get()->getImmediateOperand(1) == spv::DecorationAliasedPointerEXT || + decoration.get()->getImmediateOperand(1) == spv::DecorationRestrictPointerEXT)) { + foundDecoration = true; + } + }; + std::for_each(decorations.begin(), decorations.end(), function); + if (!foundDecoration) { + addDecoration(resultId, spv::DecorationAliasedPointerEXT); + } + } + } + } + } +} +#endif + +// comment in header +void Builder::postProcess() { + postProcessCFG(); +#ifndef GLSLANG_WEB + postProcessFeatures(); +#endif +} + +}; // end spv namespace diff --git a/dep/glslang/SPIRV/SpvTools.cpp b/dep/glslang/SPIRV/SpvTools.cpp new file mode 100644 index 000000000..1e968ba54 --- /dev/null +++ b/dep/glslang/SPIRV/SpvTools.cpp @@ -0,0 +1,217 @@ +// +// Copyright (C) 2014-2016 LunarG, Inc. +// Copyright (C) 2018-2020 Google, Inc. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// +// Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// +// Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// Neither the name of 3Dlabs Inc. Ltd. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. + +// +// Call into SPIRV-Tools to disassemble, validate, and optimize. +// + +#if ENABLE_OPT + +#include +#include + +#include "SpvTools.h" +#include "spirv-tools/optimizer.hpp" +#include "spirv-tools/libspirv.h" + +namespace glslang { + +// Translate glslang's view of target versioning to what SPIRV-Tools uses. +spv_target_env MapToSpirvToolsEnv(const SpvVersion& spvVersion, spv::SpvBuildLogger* logger) +{ + switch (spvVersion.vulkan) { + case glslang::EShTargetVulkan_1_0: + return spv_target_env::SPV_ENV_VULKAN_1_0; + case glslang::EShTargetVulkan_1_1: + switch (spvVersion.spv) { + case EShTargetSpv_1_0: + case EShTargetSpv_1_1: + case EShTargetSpv_1_2: + case EShTargetSpv_1_3: + return spv_target_env::SPV_ENV_VULKAN_1_1; + case EShTargetSpv_1_4: + return spv_target_env::SPV_ENV_VULKAN_1_1_SPIRV_1_4; + default: + logger->missingFunctionality("Target version for SPIRV-Tools validator"); + return spv_target_env::SPV_ENV_VULKAN_1_1; + } + case glslang::EShTargetVulkan_1_2: + return spv_target_env::SPV_ENV_VULKAN_1_2; + default: + break; + } + + if (spvVersion.openGl > 0) + return spv_target_env::SPV_ENV_OPENGL_4_5; + + logger->missingFunctionality("Target version for SPIRV-Tools validator"); + return spv_target_env::SPV_ENV_UNIVERSAL_1_0; +} + + +// Use the SPIRV-Tools disassembler to print SPIR-V. +void SpirvToolsDisassemble(std::ostream& out, const std::vector& spirv) +{ + // disassemble + spv_context context = spvContextCreate(SPV_ENV_UNIVERSAL_1_3); + spv_text text; + spv_diagnostic diagnostic = nullptr; + spvBinaryToText(context, spirv.data(), spirv.size(), + SPV_BINARY_TO_TEXT_OPTION_FRIENDLY_NAMES | SPV_BINARY_TO_TEXT_OPTION_INDENT, + &text, &diagnostic); + + // dump + if (diagnostic == nullptr) + out << text->str; + else + spvDiagnosticPrint(diagnostic); + + // teardown + spvDiagnosticDestroy(diagnostic); + spvContextDestroy(context); +} + +// Apply the SPIRV-Tools validator to generated SPIR-V. +void SpirvToolsValidate(const glslang::TIntermediate& intermediate, std::vector& spirv, + spv::SpvBuildLogger* logger, bool prelegalization) +{ + // validate + spv_context context = spvContextCreate(MapToSpirvToolsEnv(intermediate.getSpv(), logger)); + spv_const_binary_t binary = { spirv.data(), spirv.size() }; + spv_diagnostic diagnostic = nullptr; + spv_validator_options options = spvValidatorOptionsCreate(); + spvValidatorOptionsSetRelaxBlockLayout(options, intermediate.usingHlslOffsets()); + spvValidatorOptionsSetBeforeHlslLegalization(options, prelegalization); + spvValidateWithOptions(context, options, &binary, &diagnostic); + + // report + if (diagnostic != nullptr) { + logger->error("SPIRV-Tools Validation Errors"); + logger->error(diagnostic->error); + } + + // tear down + spvValidatorOptionsDestroy(options); + spvDiagnosticDestroy(diagnostic); + spvContextDestroy(context); +} + +// Apply the SPIRV-Tools optimizer to generated SPIR-V, for the purpose of +// legalizing HLSL SPIR-V. +void SpirvToolsLegalize(const glslang::TIntermediate& intermediate, std::vector& spirv, + spv::SpvBuildLogger* logger, const SpvOptions* options) +{ + spv_target_env target_env = SPV_ENV_UNIVERSAL_1_2; + + spvtools::Optimizer optimizer(target_env); + optimizer.SetMessageConsumer( + [](spv_message_level_t level, const char *source, const spv_position_t &position, const char *message) { + auto &out = std::cerr; + switch (level) + { + case SPV_MSG_FATAL: + case SPV_MSG_INTERNAL_ERROR: + case SPV_MSG_ERROR: + out << "error: "; + break; + case SPV_MSG_WARNING: + out << "warning: "; + break; + case SPV_MSG_INFO: + case SPV_MSG_DEBUG: + out << "info: "; + break; + default: + break; + } + if (source) + { + out << source << ":"; + } + out << position.line << ":" << position.column << ":" << position.index << ":"; + if (message) + { + out << " " << message; + } + out << std::endl; + }); + + // If debug (specifically source line info) is being generated, propagate + // line information into all SPIR-V instructions. This avoids loss of + // information when instructions are deleted or moved. Later, remove + // redundant information to minimize final SPRIR-V size. + if (options->generateDebugInfo) { + optimizer.RegisterPass(spvtools::CreatePropagateLineInfoPass()); + } + optimizer.RegisterPass(spvtools::CreateWrapOpKillPass()); + optimizer.RegisterPass(spvtools::CreateDeadBranchElimPass()); + optimizer.RegisterPass(spvtools::CreateMergeReturnPass()); + optimizer.RegisterPass(spvtools::CreateInlineExhaustivePass()); + optimizer.RegisterPass(spvtools::CreateEliminateDeadFunctionsPass()); + optimizer.RegisterPass(spvtools::CreateScalarReplacementPass()); + optimizer.RegisterPass(spvtools::CreateLocalAccessChainConvertPass()); + optimizer.RegisterPass(spvtools::CreateLocalSingleBlockLoadStoreElimPass()); + optimizer.RegisterPass(spvtools::CreateLocalSingleStoreElimPass()); + optimizer.RegisterPass(spvtools::CreateSimplificationPass()); + optimizer.RegisterPass(spvtools::CreateAggressiveDCEPass()); + optimizer.RegisterPass(spvtools::CreateVectorDCEPass()); + optimizer.RegisterPass(spvtools::CreateDeadInsertElimPass()); + optimizer.RegisterPass(spvtools::CreateAggressiveDCEPass()); + optimizer.RegisterPass(spvtools::CreateDeadBranchElimPass()); + optimizer.RegisterPass(spvtools::CreateBlockMergePass()); + optimizer.RegisterPass(spvtools::CreateLocalMultiStoreElimPass()); + optimizer.RegisterPass(spvtools::CreateIfConversionPass()); + optimizer.RegisterPass(spvtools::CreateSimplificationPass()); + optimizer.RegisterPass(spvtools::CreateAggressiveDCEPass()); + optimizer.RegisterPass(spvtools::CreateVectorDCEPass()); + optimizer.RegisterPass(spvtools::CreateDeadInsertElimPass()); + if (options->optimizeSize) { + optimizer.RegisterPass(spvtools::CreateRedundancyEliminationPass()); + } + optimizer.RegisterPass(spvtools::CreateAggressiveDCEPass()); + optimizer.RegisterPass(spvtools::CreateCFGCleanupPass()); + if (options->generateDebugInfo) { + optimizer.RegisterPass(spvtools::CreateRedundantLineInfoElimPass()); + } + + spvtools::OptimizerOptions spvOptOptions; + optimizer.SetTargetEnv(MapToSpirvToolsEnv(intermediate.getSpv(), logger)); + spvOptOptions.set_run_validator(false); // The validator may run as a separate step later on + optimizer.Run(spirv.data(), spirv.size(), &spirv, spvOptOptions); +} + +}; // end namespace glslang + +#endif diff --git a/dep/glslang/SPIRV/SpvTools.h b/dep/glslang/SPIRV/SpvTools.h new file mode 100644 index 000000000..59c914da0 --- /dev/null +++ b/dep/glslang/SPIRV/SpvTools.h @@ -0,0 +1,82 @@ +// +// Copyright (C) 2014-2016 LunarG, Inc. +// Copyright (C) 2018 Google, Inc. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// +// Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// +// Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// Neither the name of 3Dlabs Inc. Ltd. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. + +// +// Call into SPIRV-Tools to disassemble, validate, and optimize. +// + +#pragma once +#ifndef GLSLANG_SPV_TOOLS_H +#define GLSLANG_SPV_TOOLS_H + +#ifdef ENABLE_OPT +#include +#include +#endif + +#include "glslang/MachineIndependent/localintermediate.h" +#include "Logger.h" + +namespace glslang { + +struct SpvOptions { + SpvOptions() : generateDebugInfo(false), disableOptimizer(true), + optimizeSize(false), disassemble(false), validate(false) { } + bool generateDebugInfo; + bool disableOptimizer; + bool optimizeSize; + bool disassemble; + bool validate; +}; + +#ifdef ENABLE_OPT + +// Use the SPIRV-Tools disassembler to print SPIR-V. +void SpirvToolsDisassemble(std::ostream& out, const std::vector& spirv); + +// Apply the SPIRV-Tools validator to generated SPIR-V. +void SpirvToolsValidate(const glslang::TIntermediate& intermediate, std::vector& spirv, + spv::SpvBuildLogger*, bool prelegalization); + +// Apply the SPIRV-Tools optimizer to generated SPIR-V, for the purpose of +// legalizing HLSL SPIR-V. +void SpirvToolsLegalize(const glslang::TIntermediate& intermediate, std::vector& spirv, + spv::SpvBuildLogger*, const SpvOptions*); + +#endif + +} // end namespace glslang + +#endif // GLSLANG_SPV_TOOLS_H diff --git a/dep/glslang/SPIRV/bitutils.h b/dep/glslang/SPIRV/bitutils.h new file mode 100644 index 000000000..22e44cec2 --- /dev/null +++ b/dep/glslang/SPIRV/bitutils.h @@ -0,0 +1,81 @@ +// Copyright (c) 2015-2016 The Khronos Group Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef LIBSPIRV_UTIL_BITUTILS_H_ +#define LIBSPIRV_UTIL_BITUTILS_H_ + +#include +#include + +namespace spvutils { + +// Performs a bitwise copy of source to the destination type Dest. +template +Dest BitwiseCast(Src source) { + Dest dest; + static_assert(sizeof(source) == sizeof(dest), + "BitwiseCast: Source and destination must have the same size"); + std::memcpy(static_cast(&dest), &source, sizeof(dest)); + return dest; +} + +// SetBits returns an integer of type with bits set +// for position through , counting from the least +// significant bit. In particular when Num == 0, no positions are set to 1. +// A static assert will be triggered if First + Num > sizeof(T) * 8, that is, +// a bit that will not fit in the underlying type is set. +template +struct SetBits { + static_assert(First < sizeof(T) * 8, + "Tried to set a bit that is shifted too far."); + const static T get = (T(1) << First) | SetBits::get; +}; + +template +struct SetBits { + const static T get = T(0); +}; + +// This is all compile-time so we can put our tests right here. +static_assert(SetBits::get == uint32_t(0x00000000), + "SetBits failed"); +static_assert(SetBits::get == uint32_t(0x00000001), + "SetBits failed"); +static_assert(SetBits::get == uint32_t(0x80000000), + "SetBits failed"); +static_assert(SetBits::get == uint32_t(0x00000006), + "SetBits failed"); +static_assert(SetBits::get == uint32_t(0xc0000000), + "SetBits failed"); +static_assert(SetBits::get == uint32_t(0x7FFFFFFF), + "SetBits failed"); +static_assert(SetBits::get == uint32_t(0xFFFFFFFF), + "SetBits failed"); +static_assert(SetBits::get == uint32_t(0xFFFF0000), + "SetBits failed"); + +static_assert(SetBits::get == uint64_t(0x0000000000000001LL), + "SetBits failed"); +static_assert(SetBits::get == uint64_t(0x8000000000000000LL), + "SetBits failed"); +static_assert(SetBits::get == uint64_t(0xc000000000000000LL), + "SetBits failed"); +static_assert(SetBits::get == uint64_t(0x0000000080000000LL), + "SetBits failed"); +static_assert(SetBits::get == uint64_t(0x00000000FFFF0000LL), + "SetBits failed"); + +} // namespace spvutils + +#endif // LIBSPIRV_UTIL_BITUTILS_H_ diff --git a/dep/glslang/SPIRV/disassemble.cpp b/dep/glslang/SPIRV/disassemble.cpp new file mode 100644 index 000000000..4faa89ea3 --- /dev/null +++ b/dep/glslang/SPIRV/disassemble.cpp @@ -0,0 +1,743 @@ +// +// Copyright (C) 2014-2015 LunarG, Inc. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// +// Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// +// Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// Neither the name of 3Dlabs Inc. Ltd. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. + +// +// Disassembler for SPIR-V. +// + +#include +#include +#include +#include +#include +#include +#include + +#include "disassemble.h" +#include "doc.h" +#include "SpvTools.h" + +namespace spv { + extern "C" { + // Include C-based headers that don't have a namespace + #include "GLSL.std.450.h" + #include "GLSL.ext.AMD.h" + #include "GLSL.ext.NV.h" + } +} +const char* GlslStd450DebugNames[spv::GLSLstd450Count]; + +namespace spv { + +static const char* GLSLextAMDGetDebugNames(const char*, unsigned); +static const char* GLSLextNVGetDebugNames(const char*, unsigned); + +static void Kill(std::ostream& out, const char* message) +{ + out << std::endl << "Disassembly failed: " << message << std::endl; + exit(1); +} + +// used to identify the extended instruction library imported when printing +enum ExtInstSet { + GLSL450Inst, + GLSLextAMDInst, + GLSLextNVInst, + OpenCLExtInst, + NonSemanticDebugPrintfExtInst, +}; + +// Container class for a single instance of a SPIR-V stream, with methods for disassembly. +class SpirvStream { +public: + SpirvStream(std::ostream& out, const std::vector& stream) : out(out), stream(stream), word(0), nextNestedControl(0) { } + virtual ~SpirvStream() { } + + void validate(); + void processInstructions(); + +protected: + SpirvStream(const SpirvStream&); + SpirvStream& operator=(const SpirvStream&); + Op getOpCode(int id) const { return idInstruction[id] ? (Op)(stream[idInstruction[id]] & OpCodeMask) : OpNop; } + + // Output methods + void outputIndent(); + void formatId(Id id, std::stringstream&); + void outputResultId(Id id); + void outputTypeId(Id id); + void outputId(Id id); + void outputMask(OperandClass operandClass, unsigned mask); + void disassembleImmediates(int numOperands); + void disassembleIds(int numOperands); + int disassembleString(); + void disassembleInstruction(Id resultId, Id typeId, Op opCode, int numOperands); + + // Data + std::ostream& out; // where to write the disassembly + const std::vector& stream; // the actual word stream + int size; // the size of the word stream + int word; // the next word of the stream to read + + // map each to the instruction that created it + Id bound; + std::vector idInstruction; // the word offset into the stream where the instruction for result [id] starts; 0 if not yet seen (forward reference or function parameter) + + std::vector idDescriptor; // the best text string known for explaining the + + // schema + unsigned int schema; + + // stack of structured-merge points + std::stack nestedControl; + Id nextNestedControl; // need a slight delay for when we are nested +}; + +void SpirvStream::validate() +{ + size = (int)stream.size(); + if (size < 4) + Kill(out, "stream is too short"); + + // Magic number + if (stream[word++] != MagicNumber) { + out << "Bad magic number"; + return; + } + + // Version + out << "// Module Version " << std::hex << stream[word++] << std::endl; + + // Generator's magic number + out << "// Generated by (magic number): " << std::hex << stream[word++] << std::dec << std::endl; + + // Result bound + bound = stream[word++]; + idInstruction.resize(bound); + idDescriptor.resize(bound); + out << "// Id's are bound by " << bound << std::endl; + out << std::endl; + + // Reserved schema, must be 0 for now + schema = stream[word++]; + if (schema != 0) + Kill(out, "bad schema, must be 0"); +} + +// Loop over all the instructions, in order, processing each. +// Boiler plate for each is handled here directly, the rest is dispatched. +void SpirvStream::processInstructions() +{ + // Instructions + while (word < size) { + int instructionStart = word; + + // Instruction wordCount and opcode + unsigned int firstWord = stream[word]; + unsigned wordCount = firstWord >> WordCountShift; + Op opCode = (Op)(firstWord & OpCodeMask); + int nextInst = word + wordCount; + ++word; + + // Presence of full instruction + if (nextInst > size) + Kill(out, "stream instruction terminated too early"); + + // Base for computing number of operands; will be updated as more is learned + unsigned numOperands = wordCount - 1; + + // Type + Id typeId = 0; + if (InstructionDesc[opCode].hasType()) { + typeId = stream[word++]; + --numOperands; + } + + // Result + Id resultId = 0; + if (InstructionDesc[opCode].hasResult()) { + resultId = stream[word++]; + --numOperands; + + // save instruction for future reference + idInstruction[resultId] = instructionStart; + } + + outputResultId(resultId); + outputTypeId(typeId); + outputIndent(); + + // Hand off the Op and all its operands + disassembleInstruction(resultId, typeId, opCode, numOperands); + if (word != nextInst) { + out << " ERROR, incorrect number of operands consumed. At " << word << " instead of " << nextInst << " instruction start was " << instructionStart; + word = nextInst; + } + out << std::endl; + } +} + +void SpirvStream::outputIndent() +{ + for (int i = 0; i < (int)nestedControl.size(); ++i) + out << " "; +} + +void SpirvStream::formatId(Id id, std::stringstream& idStream) +{ + if (id != 0) { + // On instructions with no IDs, this is called with "0", which does not + // have to be within ID bounds on null shaders. + if (id >= bound) + Kill(out, "Bad "); + + idStream << id; + if (idDescriptor[id].size() > 0) + idStream << "(" << idDescriptor[id] << ")"; + } +} + +void SpirvStream::outputResultId(Id id) +{ + const int width = 16; + std::stringstream idStream; + formatId(id, idStream); + out << std::setw(width) << std::right << idStream.str(); + if (id != 0) + out << ":"; + else + out << " "; + + if (nestedControl.size() && id == nestedControl.top()) + nestedControl.pop(); +} + +void SpirvStream::outputTypeId(Id id) +{ + const int width = 12; + std::stringstream idStream; + formatId(id, idStream); + out << std::setw(width) << std::right << idStream.str() << " "; +} + +void SpirvStream::outputId(Id id) +{ + if (id >= bound) + Kill(out, "Bad "); + + out << id; + if (idDescriptor[id].size() > 0) + out << "(" << idDescriptor[id] << ")"; +} + +void SpirvStream::outputMask(OperandClass operandClass, unsigned mask) +{ + if (mask == 0) + out << "None"; + else { + for (int m = 0; m < OperandClassParams[operandClass].ceiling; ++m) { + if (mask & (1 << m)) + out << OperandClassParams[operandClass].getName(m) << " "; + } + } +} + +void SpirvStream::disassembleImmediates(int numOperands) +{ + for (int i = 0; i < numOperands; ++i) { + out << stream[word++]; + if (i < numOperands - 1) + out << " "; + } +} + +void SpirvStream::disassembleIds(int numOperands) +{ + for (int i = 0; i < numOperands; ++i) { + outputId(stream[word++]); + if (i < numOperands - 1) + out << " "; + } +} + +// return the number of operands consumed by the string +int SpirvStream::disassembleString() +{ + int startWord = word; + + out << " \""; + + const char* wordString; + bool done = false; + do { + unsigned int content = stream[word]; + wordString = (const char*)&content; + for (int charCount = 0; charCount < 4; ++charCount) { + if (*wordString == 0) { + done = true; + break; + } + out << *(wordString++); + } + ++word; + } while (! done); + + out << "\""; + + return word - startWord; +} + +void SpirvStream::disassembleInstruction(Id resultId, Id /*typeId*/, Op opCode, int numOperands) +{ + // Process the opcode + + out << (OpcodeString(opCode) + 2); // leave out the "Op" + + if (opCode == OpLoopMerge || opCode == OpSelectionMerge) + nextNestedControl = stream[word]; + else if (opCode == OpBranchConditional || opCode == OpSwitch) { + if (nextNestedControl) { + nestedControl.push(nextNestedControl); + nextNestedControl = 0; + } + } else if (opCode == OpExtInstImport) { + idDescriptor[resultId] = (const char*)(&stream[word]); + } + else { + if (resultId != 0 && idDescriptor[resultId].size() == 0) { + switch (opCode) { + case OpTypeInt: + switch (stream[word]) { + case 8: idDescriptor[resultId] = "int8_t"; break; + case 16: idDescriptor[resultId] = "int16_t"; break; + default: assert(0); // fallthrough + case 32: idDescriptor[resultId] = "int"; break; + case 64: idDescriptor[resultId] = "int64_t"; break; + } + break; + case OpTypeFloat: + switch (stream[word]) { + case 16: idDescriptor[resultId] = "float16_t"; break; + default: assert(0); // fallthrough + case 32: idDescriptor[resultId] = "float"; break; + case 64: idDescriptor[resultId] = "float64_t"; break; + } + break; + case OpTypeBool: + idDescriptor[resultId] = "bool"; + break; + case OpTypeStruct: + idDescriptor[resultId] = "struct"; + break; + case OpTypePointer: + idDescriptor[resultId] = "ptr"; + break; + case OpTypeVector: + if (idDescriptor[stream[word]].size() > 0) { + idDescriptor[resultId].append(idDescriptor[stream[word]].begin(), idDescriptor[stream[word]].begin() + 1); + if (strstr(idDescriptor[stream[word]].c_str(), "8")) { + idDescriptor[resultId].append("8"); + } + if (strstr(idDescriptor[stream[word]].c_str(), "16")) { + idDescriptor[resultId].append("16"); + } + if (strstr(idDescriptor[stream[word]].c_str(), "64")) { + idDescriptor[resultId].append("64"); + } + } + idDescriptor[resultId].append("vec"); + switch (stream[word + 1]) { + case 2: idDescriptor[resultId].append("2"); break; + case 3: idDescriptor[resultId].append("3"); break; + case 4: idDescriptor[resultId].append("4"); break; + case 8: idDescriptor[resultId].append("8"); break; + case 16: idDescriptor[resultId].append("16"); break; + case 32: idDescriptor[resultId].append("32"); break; + default: break; + } + break; + default: + break; + } + } + } + + // Process the operands. Note, a new context-dependent set could be + // swapped in mid-traversal. + + // Handle images specially, so can put out helpful strings. + if (opCode == OpTypeImage) { + out << " "; + disassembleIds(1); + out << " " << DimensionString((Dim)stream[word++]); + out << (stream[word++] != 0 ? " depth" : ""); + out << (stream[word++] != 0 ? " array" : ""); + out << (stream[word++] != 0 ? " multi-sampled" : ""); + switch (stream[word++]) { + case 0: out << " runtime"; break; + case 1: out << " sampled"; break; + case 2: out << " nonsampled"; break; + } + out << " format:" << ImageFormatString((ImageFormat)stream[word++]); + + if (numOperands == 8) { + out << " " << AccessQualifierString(stream[word++]); + } + return; + } + + // Handle all the parameterized operands + for (int op = 0; op < InstructionDesc[opCode].operands.getNum() && numOperands > 0; ++op) { + out << " "; + OperandClass operandClass = InstructionDesc[opCode].operands.getClass(op); + switch (operandClass) { + case OperandId: + case OperandScope: + case OperandMemorySemantics: + disassembleIds(1); + --numOperands; + // Get names for printing "(XXX)" for readability, *after* this id + if (opCode == OpName) + idDescriptor[stream[word - 1]] = (const char*)(&stream[word]); + break; + case OperandVariableIds: + disassembleIds(numOperands); + return; + case OperandImageOperands: + outputMask(OperandImageOperands, stream[word++]); + --numOperands; + disassembleIds(numOperands); + return; + case OperandOptionalLiteral: + case OperandVariableLiterals: + if ((opCode == OpDecorate && stream[word - 1] == DecorationBuiltIn) || + (opCode == OpMemberDecorate && stream[word - 1] == DecorationBuiltIn)) { + out << BuiltInString(stream[word++]); + --numOperands; + ++op; + } + disassembleImmediates(numOperands); + return; + case OperandVariableIdLiteral: + while (numOperands > 0) { + out << std::endl; + outputResultId(0); + outputTypeId(0); + outputIndent(); + out << " Type "; + disassembleIds(1); + out << ", member "; + disassembleImmediates(1); + numOperands -= 2; + } + return; + case OperandVariableLiteralId: + while (numOperands > 0) { + out << std::endl; + outputResultId(0); + outputTypeId(0); + outputIndent(); + out << " case "; + disassembleImmediates(1); + out << ": "; + disassembleIds(1); + numOperands -= 2; + } + return; + case OperandLiteralNumber: + disassembleImmediates(1); + --numOperands; + if (opCode == OpExtInst) { + ExtInstSet extInstSet = GLSL450Inst; + const char* name = idDescriptor[stream[word - 2]].c_str(); + if (strcmp("OpenCL.std", name) == 0) { + extInstSet = OpenCLExtInst; + } else if (strcmp("OpenCL.DebugInfo.100", name) == 0) { + extInstSet = OpenCLExtInst; + } else if (strcmp("NonSemantic.DebugPrintf", name) == 0) { + extInstSet = NonSemanticDebugPrintfExtInst; + } else if (strcmp(spv::E_SPV_AMD_shader_ballot, name) == 0 || + strcmp(spv::E_SPV_AMD_shader_trinary_minmax, name) == 0 || + strcmp(spv::E_SPV_AMD_shader_explicit_vertex_parameter, name) == 0 || + strcmp(spv::E_SPV_AMD_gcn_shader, name) == 0) { + extInstSet = GLSLextAMDInst; + } else if (strcmp(spv::E_SPV_NV_sample_mask_override_coverage, name) == 0 || + strcmp(spv::E_SPV_NV_geometry_shader_passthrough, name) == 0 || + strcmp(spv::E_SPV_NV_viewport_array2, name) == 0 || + strcmp(spv::E_SPV_NVX_multiview_per_view_attributes, name) == 0 || + strcmp(spv::E_SPV_NV_fragment_shader_barycentric, name) == 0 || + strcmp(spv::E_SPV_NV_mesh_shader, name) == 0) { + extInstSet = GLSLextNVInst; + } + unsigned entrypoint = stream[word - 1]; + if (extInstSet == GLSL450Inst) { + if (entrypoint < GLSLstd450Count) { + out << "(" << GlslStd450DebugNames[entrypoint] << ")"; + } + } else if (extInstSet == GLSLextAMDInst) { + out << "(" << GLSLextAMDGetDebugNames(name, entrypoint) << ")"; + } + else if (extInstSet == GLSLextNVInst) { + out << "(" << GLSLextNVGetDebugNames(name, entrypoint) << ")"; + } else if (extInstSet == NonSemanticDebugPrintfExtInst) { + out << "(DebugPrintf)"; + } + } + break; + case OperandOptionalLiteralString: + case OperandLiteralString: + numOperands -= disassembleString(); + break; + case OperandMemoryAccess: + outputMask(OperandMemoryAccess, stream[word++]); + --numOperands; + // Aligned is the only memory access operand that uses an immediate + // value, and it is also the first operand that uses a value at all. + if (stream[word-1] & MemoryAccessAlignedMask) { + disassembleImmediates(1); + numOperands--; + if (numOperands) + out << " "; + } + disassembleIds(numOperands); + return; + default: + assert(operandClass >= OperandSource && operandClass < OperandOpcode); + + if (OperandClassParams[operandClass].bitmask) + outputMask(operandClass, stream[word++]); + else + out << OperandClassParams[operandClass].getName(stream[word++]); + --numOperands; + + break; + } + } + + return; +} + +static void GLSLstd450GetDebugNames(const char** names) +{ + for (int i = 0; i < GLSLstd450Count; ++i) + names[i] = "Unknown"; + + names[GLSLstd450Round] = "Round"; + names[GLSLstd450RoundEven] = "RoundEven"; + names[GLSLstd450Trunc] = "Trunc"; + names[GLSLstd450FAbs] = "FAbs"; + names[GLSLstd450SAbs] = "SAbs"; + names[GLSLstd450FSign] = "FSign"; + names[GLSLstd450SSign] = "SSign"; + names[GLSLstd450Floor] = "Floor"; + names[GLSLstd450Ceil] = "Ceil"; + names[GLSLstd450Fract] = "Fract"; + names[GLSLstd450Radians] = "Radians"; + names[GLSLstd450Degrees] = "Degrees"; + names[GLSLstd450Sin] = "Sin"; + names[GLSLstd450Cos] = "Cos"; + names[GLSLstd450Tan] = "Tan"; + names[GLSLstd450Asin] = "Asin"; + names[GLSLstd450Acos] = "Acos"; + names[GLSLstd450Atan] = "Atan"; + names[GLSLstd450Sinh] = "Sinh"; + names[GLSLstd450Cosh] = "Cosh"; + names[GLSLstd450Tanh] = "Tanh"; + names[GLSLstd450Asinh] = "Asinh"; + names[GLSLstd450Acosh] = "Acosh"; + names[GLSLstd450Atanh] = "Atanh"; + names[GLSLstd450Atan2] = "Atan2"; + names[GLSLstd450Pow] = "Pow"; + names[GLSLstd450Exp] = "Exp"; + names[GLSLstd450Log] = "Log"; + names[GLSLstd450Exp2] = "Exp2"; + names[GLSLstd450Log2] = "Log2"; + names[GLSLstd450Sqrt] = "Sqrt"; + names[GLSLstd450InverseSqrt] = "InverseSqrt"; + names[GLSLstd450Determinant] = "Determinant"; + names[GLSLstd450MatrixInverse] = "MatrixInverse"; + names[GLSLstd450Modf] = "Modf"; + names[GLSLstd450ModfStruct] = "ModfStruct"; + names[GLSLstd450FMin] = "FMin"; + names[GLSLstd450SMin] = "SMin"; + names[GLSLstd450UMin] = "UMin"; + names[GLSLstd450FMax] = "FMax"; + names[GLSLstd450SMax] = "SMax"; + names[GLSLstd450UMax] = "UMax"; + names[GLSLstd450FClamp] = "FClamp"; + names[GLSLstd450SClamp] = "SClamp"; + names[GLSLstd450UClamp] = "UClamp"; + names[GLSLstd450FMix] = "FMix"; + names[GLSLstd450Step] = "Step"; + names[GLSLstd450SmoothStep] = "SmoothStep"; + names[GLSLstd450Fma] = "Fma"; + names[GLSLstd450Frexp] = "Frexp"; + names[GLSLstd450FrexpStruct] = "FrexpStruct"; + names[GLSLstd450Ldexp] = "Ldexp"; + names[GLSLstd450PackSnorm4x8] = "PackSnorm4x8"; + names[GLSLstd450PackUnorm4x8] = "PackUnorm4x8"; + names[GLSLstd450PackSnorm2x16] = "PackSnorm2x16"; + names[GLSLstd450PackUnorm2x16] = "PackUnorm2x16"; + names[GLSLstd450PackHalf2x16] = "PackHalf2x16"; + names[GLSLstd450PackDouble2x32] = "PackDouble2x32"; + names[GLSLstd450UnpackSnorm2x16] = "UnpackSnorm2x16"; + names[GLSLstd450UnpackUnorm2x16] = "UnpackUnorm2x16"; + names[GLSLstd450UnpackHalf2x16] = "UnpackHalf2x16"; + names[GLSLstd450UnpackSnorm4x8] = "UnpackSnorm4x8"; + names[GLSLstd450UnpackUnorm4x8] = "UnpackUnorm4x8"; + names[GLSLstd450UnpackDouble2x32] = "UnpackDouble2x32"; + names[GLSLstd450Length] = "Length"; + names[GLSLstd450Distance] = "Distance"; + names[GLSLstd450Cross] = "Cross"; + names[GLSLstd450Normalize] = "Normalize"; + names[GLSLstd450FaceForward] = "FaceForward"; + names[GLSLstd450Reflect] = "Reflect"; + names[GLSLstd450Refract] = "Refract"; + names[GLSLstd450FindILsb] = "FindILsb"; + names[GLSLstd450FindSMsb] = "FindSMsb"; + names[GLSLstd450FindUMsb] = "FindUMsb"; + names[GLSLstd450InterpolateAtCentroid] = "InterpolateAtCentroid"; + names[GLSLstd450InterpolateAtSample] = "InterpolateAtSample"; + names[GLSLstd450InterpolateAtOffset] = "InterpolateAtOffset"; + names[GLSLstd450NMin] = "NMin"; + names[GLSLstd450NMax] = "NMax"; + names[GLSLstd450NClamp] = "NClamp"; +} + +static const char* GLSLextAMDGetDebugNames(const char* name, unsigned entrypoint) +{ + if (strcmp(name, spv::E_SPV_AMD_shader_ballot) == 0) { + switch (entrypoint) { + case SwizzleInvocationsAMD: return "SwizzleInvocationsAMD"; + case SwizzleInvocationsMaskedAMD: return "SwizzleInvocationsMaskedAMD"; + case WriteInvocationAMD: return "WriteInvocationAMD"; + case MbcntAMD: return "MbcntAMD"; + default: return "Bad"; + } + } else if (strcmp(name, spv::E_SPV_AMD_shader_trinary_minmax) == 0) { + switch (entrypoint) { + case FMin3AMD: return "FMin3AMD"; + case UMin3AMD: return "UMin3AMD"; + case SMin3AMD: return "SMin3AMD"; + case FMax3AMD: return "FMax3AMD"; + case UMax3AMD: return "UMax3AMD"; + case SMax3AMD: return "SMax3AMD"; + case FMid3AMD: return "FMid3AMD"; + case UMid3AMD: return "UMid3AMD"; + case SMid3AMD: return "SMid3AMD"; + default: return "Bad"; + } + } else if (strcmp(name, spv::E_SPV_AMD_shader_explicit_vertex_parameter) == 0) { + switch (entrypoint) { + case InterpolateAtVertexAMD: return "InterpolateAtVertexAMD"; + default: return "Bad"; + } + } + else if (strcmp(name, spv::E_SPV_AMD_gcn_shader) == 0) { + switch (entrypoint) { + case CubeFaceIndexAMD: return "CubeFaceIndexAMD"; + case CubeFaceCoordAMD: return "CubeFaceCoordAMD"; + case TimeAMD: return "TimeAMD"; + default: + break; + } + } + + return "Bad"; +} + +static const char* GLSLextNVGetDebugNames(const char* name, unsigned entrypoint) +{ + if (strcmp(name, spv::E_SPV_NV_sample_mask_override_coverage) == 0 || + strcmp(name, spv::E_SPV_NV_geometry_shader_passthrough) == 0 || + strcmp(name, spv::E_ARB_shader_viewport_layer_array) == 0 || + strcmp(name, spv::E_SPV_NV_viewport_array2) == 0 || + strcmp(name, spv::E_SPV_NVX_multiview_per_view_attributes) == 0 || + strcmp(name, spv::E_SPV_NV_fragment_shader_barycentric) == 0 || + strcmp(name, spv::E_SPV_NV_mesh_shader) == 0 || + strcmp(name, spv::E_SPV_NV_shader_image_footprint) == 0) { + switch (entrypoint) { + // NV builtins + case BuiltInViewportMaskNV: return "ViewportMaskNV"; + case BuiltInSecondaryPositionNV: return "SecondaryPositionNV"; + case BuiltInSecondaryViewportMaskNV: return "SecondaryViewportMaskNV"; + case BuiltInPositionPerViewNV: return "PositionPerViewNV"; + case BuiltInViewportMaskPerViewNV: return "ViewportMaskPerViewNV"; + case BuiltInBaryCoordNV: return "BaryCoordNV"; + case BuiltInBaryCoordNoPerspNV: return "BaryCoordNoPerspNV"; + case BuiltInTaskCountNV: return "TaskCountNV"; + case BuiltInPrimitiveCountNV: return "PrimitiveCountNV"; + case BuiltInPrimitiveIndicesNV: return "PrimitiveIndicesNV"; + case BuiltInClipDistancePerViewNV: return "ClipDistancePerViewNV"; + case BuiltInCullDistancePerViewNV: return "CullDistancePerViewNV"; + case BuiltInLayerPerViewNV: return "LayerPerViewNV"; + case BuiltInMeshViewCountNV: return "MeshViewCountNV"; + case BuiltInMeshViewIndicesNV: return "MeshViewIndicesNV"; + + // NV Capabilities + case CapabilityGeometryShaderPassthroughNV: return "GeometryShaderPassthroughNV"; + case CapabilityShaderViewportMaskNV: return "ShaderViewportMaskNV"; + case CapabilityShaderStereoViewNV: return "ShaderStereoViewNV"; + case CapabilityPerViewAttributesNV: return "PerViewAttributesNV"; + case CapabilityFragmentBarycentricNV: return "FragmentBarycentricNV"; + case CapabilityMeshShadingNV: return "MeshShadingNV"; + case CapabilityImageFootprintNV: return "ImageFootprintNV"; + case CapabilitySampleMaskOverrideCoverageNV:return "SampleMaskOverrideCoverageNV"; + + // NV Decorations + case DecorationOverrideCoverageNV: return "OverrideCoverageNV"; + case DecorationPassthroughNV: return "PassthroughNV"; + case DecorationViewportRelativeNV: return "ViewportRelativeNV"; + case DecorationSecondaryViewportRelativeNV: return "SecondaryViewportRelativeNV"; + case DecorationPerVertexNV: return "PerVertexNV"; + case DecorationPerPrimitiveNV: return "PerPrimitiveNV"; + case DecorationPerViewNV: return "PerViewNV"; + case DecorationPerTaskNV: return "PerTaskNV"; + + default: return "Bad"; + } + } + return "Bad"; +} + +void Disassemble(std::ostream& out, const std::vector& stream) +{ + SpirvStream SpirvStream(out, stream); + spv::Parameterize(); + GLSLstd450GetDebugNames(GlslStd450DebugNames); + SpirvStream.validate(); + SpirvStream.processInstructions(); +} + +}; // end namespace spv diff --git a/dep/glslang/SPIRV/disassemble.h b/dep/glslang/SPIRV/disassemble.h new file mode 100644 index 000000000..b6a463577 --- /dev/null +++ b/dep/glslang/SPIRV/disassemble.h @@ -0,0 +1,53 @@ +// +// Copyright (C) 2014-2015 LunarG, Inc. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// +// Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// +// Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// Neither the name of 3Dlabs Inc. Ltd. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. + +// +// Disassembler for SPIR-V. +// + +#pragma once +#ifndef disassembler_H +#define disassembler_H + +#include +#include + +namespace spv { + + // disassemble with glslang custom disassembler + void Disassemble(std::ostream& out, const std::vector&); + +} // end namespace spv + +#endif // disassembler_H diff --git a/dep/glslang/SPIRV/doc.cpp b/dep/glslang/SPIRV/doc.cpp new file mode 100644 index 000000000..b1f2b820d --- /dev/null +++ b/dep/glslang/SPIRV/doc.cpp @@ -0,0 +1,2888 @@ +// +// Copyright (C) 2014-2015 LunarG, Inc. +// Modifications Copyright (C) 2020 Advanced Micro Devices, Inc. All rights reserved. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// +// Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// +// Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// Neither the name of 3Dlabs Inc. Ltd. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. + +// +// 1) Programmatically fill in instruction/operand information. +// This can be used for disassembly, printing documentation, etc. +// +// 2) Print documentation from this parameterization. +// + +#include "doc.h" + +#include +#include +#include + +namespace spv { + extern "C" { + // Include C-based headers that don't have a namespace + #include "GLSL.ext.KHR.h" + #include "GLSL.ext.EXT.h" + #include "GLSL.ext.AMD.h" + #include "GLSL.ext.NV.h" + } +} + +namespace spv { + +// +// Whole set of functions that translate enumerants to their text strings for +// the specification (or their sanitized versions for auto-generating the +// spirv headers. +// +// Also, for masks the ceilings are declared next to these, to help keep them in sync. +// Ceilings should be +// - one more than the maximum value an enumerant takes on, for non-mask enumerants +// (for non-sparse enums, this is the number of enumerants) +// - the number of bits consumed by the set of masks +// (for non-sparse mask enums, this is the number of enumerants) +// + +const char* SourceString(int source) +{ + switch (source) { + case 0: return "Unknown"; + case 1: return "ESSL"; + case 2: return "GLSL"; + case 3: return "OpenCL_C"; + case 4: return "OpenCL_CPP"; + case 5: return "HLSL"; + + default: return "Bad"; + } +} + +const char* ExecutionModelString(int model) +{ + switch (model) { + case 0: return "Vertex"; + case 1: return "TessellationControl"; + case 2: return "TessellationEvaluation"; + case 3: return "Geometry"; + case 4: return "Fragment"; + case 5: return "GLCompute"; + case 6: return "Kernel"; + case ExecutionModelTaskNV: return "TaskNV"; + case ExecutionModelMeshNV: return "MeshNV"; + + default: return "Bad"; + + case ExecutionModelRayGenerationKHR: return "RayGenerationKHR"; + case ExecutionModelIntersectionKHR: return "IntersectionKHR"; + case ExecutionModelAnyHitKHR: return "AnyHitKHR"; + case ExecutionModelClosestHitKHR: return "ClosestHitKHR"; + case ExecutionModelMissKHR: return "MissKHR"; + case ExecutionModelCallableKHR: return "CallableKHR"; + } +} + +const char* AddressingString(int addr) +{ + switch (addr) { + case 0: return "Logical"; + case 1: return "Physical32"; + case 2: return "Physical64"; + + case AddressingModelPhysicalStorageBuffer64EXT: return "PhysicalStorageBuffer64EXT"; + + default: return "Bad"; + } +} + +const char* MemoryString(int mem) +{ + switch (mem) { + case MemoryModelSimple: return "Simple"; + case MemoryModelGLSL450: return "GLSL450"; + case MemoryModelOpenCL: return "OpenCL"; + case MemoryModelVulkanKHR: return "VulkanKHR"; + + default: return "Bad"; + } +} + +const int ExecutionModeCeiling = 33; + +const char* ExecutionModeString(int mode) +{ + switch (mode) { + case 0: return "Invocations"; + case 1: return "SpacingEqual"; + case 2: return "SpacingFractionalEven"; + case 3: return "SpacingFractionalOdd"; + case 4: return "VertexOrderCw"; + case 5: return "VertexOrderCcw"; + case 6: return "PixelCenterInteger"; + case 7: return "OriginUpperLeft"; + case 8: return "OriginLowerLeft"; + case 9: return "EarlyFragmentTests"; + case 10: return "PointMode"; + case 11: return "Xfb"; + case 12: return "DepthReplacing"; + case 13: return "Bad"; + case 14: return "DepthGreater"; + case 15: return "DepthLess"; + case 16: return "DepthUnchanged"; + case 17: return "LocalSize"; + case 18: return "LocalSizeHint"; + case 19: return "InputPoints"; + case 20: return "InputLines"; + case 21: return "InputLinesAdjacency"; + case 22: return "Triangles"; + case 23: return "InputTrianglesAdjacency"; + case 24: return "Quads"; + case 25: return "Isolines"; + case 26: return "OutputVertices"; + case 27: return "OutputPoints"; + case 28: return "OutputLineStrip"; + case 29: return "OutputTriangleStrip"; + case 30: return "VecTypeHint"; + case 31: return "ContractionOff"; + case 32: return "Bad"; + + case 4446: return "PostDepthCoverage"; + + case ExecutionModeOutputLinesNV: return "OutputLinesNV"; + case ExecutionModeOutputPrimitivesNV: return "OutputPrimitivesNV"; + case ExecutionModeOutputTrianglesNV: return "OutputTrianglesNV"; + case ExecutionModeDerivativeGroupQuadsNV: return "DerivativeGroupQuadsNV"; + case ExecutionModeDerivativeGroupLinearNV: return "DerivativeGroupLinearNV"; + + case ExecutionModePixelInterlockOrderedEXT: return "PixelInterlockOrderedEXT"; + case ExecutionModePixelInterlockUnorderedEXT: return "PixelInterlockUnorderedEXT"; + case ExecutionModeSampleInterlockOrderedEXT: return "SampleInterlockOrderedEXT"; + case ExecutionModeSampleInterlockUnorderedEXT: return "SampleInterlockUnorderedEXT"; + case ExecutionModeShadingRateInterlockOrderedEXT: return "ShadingRateInterlockOrderedEXT"; + case ExecutionModeShadingRateInterlockUnorderedEXT: return "ShadingRateInterlockUnorderedEXT"; + + case ExecutionModeCeiling: + default: return "Bad"; + } +} + +const char* StorageClassString(int StorageClass) +{ + switch (StorageClass) { + case 0: return "UniformConstant"; + case 1: return "Input"; + case 2: return "Uniform"; + case 3: return "Output"; + case 4: return "Workgroup"; + case 5: return "CrossWorkgroup"; + case 6: return "Private"; + case 7: return "Function"; + case 8: return "Generic"; + case 9: return "PushConstant"; + case 10: return "AtomicCounter"; + case 11: return "Image"; + case 12: return "StorageBuffer"; + + case StorageClassRayPayloadKHR: return "RayPayloadKHR"; + case StorageClassHitAttributeKHR: return "HitAttributeKHR"; + case StorageClassIncomingRayPayloadKHR: return "IncomingRayPayloadKHR"; + case StorageClassShaderRecordBufferKHR: return "ShaderRecordBufferKHR"; + case StorageClassCallableDataKHR: return "CallableDataKHR"; + case StorageClassIncomingCallableDataKHR: return "IncomingCallableDataKHR"; + + case StorageClassPhysicalStorageBufferEXT: return "PhysicalStorageBufferEXT"; + + default: return "Bad"; + } +} + +const int DecorationCeiling = 45; + +const char* DecorationString(int decoration) +{ + switch (decoration) { + case 0: return "RelaxedPrecision"; + case 1: return "SpecId"; + case 2: return "Block"; + case 3: return "BufferBlock"; + case 4: return "RowMajor"; + case 5: return "ColMajor"; + case 6: return "ArrayStride"; + case 7: return "MatrixStride"; + case 8: return "GLSLShared"; + case 9: return "GLSLPacked"; + case 10: return "CPacked"; + case 11: return "BuiltIn"; + case 12: return "Bad"; + case 13: return "NoPerspective"; + case 14: return "Flat"; + case 15: return "Patch"; + case 16: return "Centroid"; + case 17: return "Sample"; + case 18: return "Invariant"; + case 19: return "Restrict"; + case 20: return "Aliased"; + case 21: return "Volatile"; + case 22: return "Constant"; + case 23: return "Coherent"; + case 24: return "NonWritable"; + case 25: return "NonReadable"; + case 26: return "Uniform"; + case 27: return "Bad"; + case 28: return "SaturatedConversion"; + case 29: return "Stream"; + case 30: return "Location"; + case 31: return "Component"; + case 32: return "Index"; + case 33: return "Binding"; + case 34: return "DescriptorSet"; + case 35: return "Offset"; + case 36: return "XfbBuffer"; + case 37: return "XfbStride"; + case 38: return "FuncParamAttr"; + case 39: return "FP Rounding Mode"; + case 40: return "FP Fast Math Mode"; + case 41: return "Linkage Attributes"; + case 42: return "NoContraction"; + case 43: return "InputAttachmentIndex"; + case 44: return "Alignment"; + + case DecorationCeiling: + default: return "Bad"; + + case DecorationExplicitInterpAMD: return "ExplicitInterpAMD"; + case DecorationOverrideCoverageNV: return "OverrideCoverageNV"; + case DecorationPassthroughNV: return "PassthroughNV"; + case DecorationViewportRelativeNV: return "ViewportRelativeNV"; + case DecorationSecondaryViewportRelativeNV: return "SecondaryViewportRelativeNV"; + case DecorationPerPrimitiveNV: return "PerPrimitiveNV"; + case DecorationPerViewNV: return "PerViewNV"; + case DecorationPerTaskNV: return "PerTaskNV"; + case DecorationPerVertexNV: return "PerVertexNV"; + + case DecorationNonUniformEXT: return "DecorationNonUniformEXT"; + case DecorationHlslCounterBufferGOOGLE: return "DecorationHlslCounterBufferGOOGLE"; + case DecorationHlslSemanticGOOGLE: return "DecorationHlslSemanticGOOGLE"; + case DecorationRestrictPointerEXT: return "DecorationRestrictPointerEXT"; + case DecorationAliasedPointerEXT: return "DecorationAliasedPointerEXT"; + } +} + +const char* BuiltInString(int builtIn) +{ + switch (builtIn) { + case 0: return "Position"; + case 1: return "PointSize"; + case 2: return "Bad"; + case 3: return "ClipDistance"; + case 4: return "CullDistance"; + case 5: return "VertexId"; + case 6: return "InstanceId"; + case 7: return "PrimitiveId"; + case 8: return "InvocationId"; + case 9: return "Layer"; + case 10: return "ViewportIndex"; + case 11: return "TessLevelOuter"; + case 12: return "TessLevelInner"; + case 13: return "TessCoord"; + case 14: return "PatchVertices"; + case 15: return "FragCoord"; + case 16: return "PointCoord"; + case 17: return "FrontFacing"; + case 18: return "SampleId"; + case 19: return "SamplePosition"; + case 20: return "SampleMask"; + case 21: return "Bad"; + case 22: return "FragDepth"; + case 23: return "HelperInvocation"; + case 24: return "NumWorkgroups"; + case 25: return "WorkgroupSize"; + case 26: return "WorkgroupId"; + case 27: return "LocalInvocationId"; + case 28: return "GlobalInvocationId"; + case 29: return "LocalInvocationIndex"; + case 30: return "WorkDim"; + case 31: return "GlobalSize"; + case 32: return "EnqueuedWorkgroupSize"; + case 33: return "GlobalOffset"; + case 34: return "GlobalLinearId"; + case 35: return "Bad"; + case 36: return "SubgroupSize"; + case 37: return "SubgroupMaxSize"; + case 38: return "NumSubgroups"; + case 39: return "NumEnqueuedSubgroups"; + case 40: return "SubgroupId"; + case 41: return "SubgroupLocalInvocationId"; + case 42: return "VertexIndex"; // TBD: put next to VertexId? + case 43: return "InstanceIndex"; // TBD: put next to InstanceId? + + case 4416: return "SubgroupEqMaskKHR"; + case 4417: return "SubgroupGeMaskKHR"; + case 4418: return "SubgroupGtMaskKHR"; + case 4419: return "SubgroupLeMaskKHR"; + case 4420: return "SubgroupLtMaskKHR"; + case 4438: return "DeviceIndex"; + case 4440: return "ViewIndex"; + case 4424: return "BaseVertex"; + case 4425: return "BaseInstance"; + case 4426: return "DrawIndex"; + case 5014: return "FragStencilRefEXT"; + + case 4992: return "BaryCoordNoPerspAMD"; + case 4993: return "BaryCoordNoPerspCentroidAMD"; + case 4994: return "BaryCoordNoPerspSampleAMD"; + case 4995: return "BaryCoordSmoothAMD"; + case 4996: return "BaryCoordSmoothCentroidAMD"; + case 4997: return "BaryCoordSmoothSampleAMD"; + case 4998: return "BaryCoordPullModelAMD"; + case BuiltInLaunchIdKHR: return "LaunchIdKHR"; + case BuiltInLaunchSizeKHR: return "LaunchSizeKHR"; + case BuiltInWorldRayOriginKHR: return "WorldRayOriginKHR"; + case BuiltInWorldRayDirectionKHR: return "WorldRayDirectionKHR"; + case BuiltInObjectRayOriginKHR: return "ObjectRayOriginKHR"; + case BuiltInObjectRayDirectionKHR: return "ObjectRayDirectionKHR"; + case BuiltInRayTminKHR: return "RayTminKHR"; + case BuiltInRayTmaxKHR: return "RayTmaxKHR"; + case BuiltInInstanceCustomIndexKHR: return "InstanceCustomIndexKHR"; + case BuiltInRayGeometryIndexKHR: return "RayGeometryIndexKHR"; + case BuiltInObjectToWorldKHR: return "ObjectToWorldKHR"; + case BuiltInWorldToObjectKHR: return "WorldToObjectKHR"; + case BuiltInHitTKHR: return "HitTKHR"; + case BuiltInHitKindKHR: return "HitKindKHR"; + case BuiltInIncomingRayFlagsKHR: return "IncomingRayFlagsKHR"; + case BuiltInViewportMaskNV: return "ViewportMaskNV"; + case BuiltInSecondaryPositionNV: return "SecondaryPositionNV"; + case BuiltInSecondaryViewportMaskNV: return "SecondaryViewportMaskNV"; + case BuiltInPositionPerViewNV: return "PositionPerViewNV"; + case BuiltInViewportMaskPerViewNV: return "ViewportMaskPerViewNV"; +// case BuiltInFragmentSizeNV: return "FragmentSizeNV"; // superseded by BuiltInFragSizeEXT +// case BuiltInInvocationsPerPixelNV: return "InvocationsPerPixelNV"; // superseded by BuiltInFragInvocationCountEXT + case BuiltInBaryCoordNV: return "BaryCoordNV"; + case BuiltInBaryCoordNoPerspNV: return "BaryCoordNoPerspNV"; + + case BuiltInFragSizeEXT: return "FragSizeEXT"; + case BuiltInFragInvocationCountEXT: return "FragInvocationCountEXT"; + + case 5264: return "FullyCoveredEXT"; + + case BuiltInTaskCountNV: return "TaskCountNV"; + case BuiltInPrimitiveCountNV: return "PrimitiveCountNV"; + case BuiltInPrimitiveIndicesNV: return "PrimitiveIndicesNV"; + case BuiltInClipDistancePerViewNV: return "ClipDistancePerViewNV"; + case BuiltInCullDistancePerViewNV: return "CullDistancePerViewNV"; + case BuiltInLayerPerViewNV: return "LayerPerViewNV"; + case BuiltInMeshViewCountNV: return "MeshViewCountNV"; + case BuiltInMeshViewIndicesNV: return "MeshViewIndicesNV"; + case BuiltInWarpsPerSMNV: return "WarpsPerSMNV"; + case BuiltInSMCountNV: return "SMCountNV"; + case BuiltInWarpIDNV: return "WarpIDNV"; + case BuiltInSMIDNV: return "SMIDNV"; + + default: return "Bad"; + } +} + +const char* DimensionString(int dim) +{ + switch (dim) { + case 0: return "1D"; + case 1: return "2D"; + case 2: return "3D"; + case 3: return "Cube"; + case 4: return "Rect"; + case 5: return "Buffer"; + case 6: return "SubpassData"; + + default: return "Bad"; + } +} + +const char* SamplerAddressingModeString(int mode) +{ + switch (mode) { + case 0: return "None"; + case 1: return "ClampToEdge"; + case 2: return "Clamp"; + case 3: return "Repeat"; + case 4: return "RepeatMirrored"; + + default: return "Bad"; + } +} + +const char* SamplerFilterModeString(int mode) +{ + switch (mode) { + case 0: return "Nearest"; + case 1: return "Linear"; + + default: return "Bad"; + } +} + +const char* ImageFormatString(int format) +{ + switch (format) { + case 0: return "Unknown"; + + // ES/Desktop float + case 1: return "Rgba32f"; + case 2: return "Rgba16f"; + case 3: return "R32f"; + case 4: return "Rgba8"; + case 5: return "Rgba8Snorm"; + + // Desktop float + case 6: return "Rg32f"; + case 7: return "Rg16f"; + case 8: return "R11fG11fB10f"; + case 9: return "R16f"; + case 10: return "Rgba16"; + case 11: return "Rgb10A2"; + case 12: return "Rg16"; + case 13: return "Rg8"; + case 14: return "R16"; + case 15: return "R8"; + case 16: return "Rgba16Snorm"; + case 17: return "Rg16Snorm"; + case 18: return "Rg8Snorm"; + case 19: return "R16Snorm"; + case 20: return "R8Snorm"; + + // ES/Desktop int + case 21: return "Rgba32i"; + case 22: return "Rgba16i"; + case 23: return "Rgba8i"; + case 24: return "R32i"; + + // Desktop int + case 25: return "Rg32i"; + case 26: return "Rg16i"; + case 27: return "Rg8i"; + case 28: return "R16i"; + case 29: return "R8i"; + + // ES/Desktop uint + case 30: return "Rgba32ui"; + case 31: return "Rgba16ui"; + case 32: return "Rgba8ui"; + case 33: return "R32ui"; + + // Desktop uint + case 34: return "Rgb10a2ui"; + case 35: return "Rg32ui"; + case 36: return "Rg16ui"; + case 37: return "Rg8ui"; + case 38: return "R16ui"; + case 39: return "R8ui"; + + default: + return "Bad"; + } +} + +const char* ImageChannelOrderString(int format) +{ + switch (format) { + case 0: return "R"; + case 1: return "A"; + case 2: return "RG"; + case 3: return "RA"; + case 4: return "RGB"; + case 5: return "RGBA"; + case 6: return "BGRA"; + case 7: return "ARGB"; + case 8: return "Intensity"; + case 9: return "Luminance"; + case 10: return "Rx"; + case 11: return "RGx"; + case 12: return "RGBx"; + case 13: return "Depth"; + case 14: return "DepthStencil"; + case 15: return "sRGB"; + case 16: return "sRGBx"; + case 17: return "sRGBA"; + case 18: return "sBGRA"; + + default: + return "Bad"; + } +} + +const char* ImageChannelDataTypeString(int type) +{ + switch (type) + { + case 0: return "SnormInt8"; + case 1: return "SnormInt16"; + case 2: return "UnormInt8"; + case 3: return "UnormInt16"; + case 4: return "UnormShort565"; + case 5: return "UnormShort555"; + case 6: return "UnormInt101010"; + case 7: return "SignedInt8"; + case 8: return "SignedInt16"; + case 9: return "SignedInt32"; + case 10: return "UnsignedInt8"; + case 11: return "UnsignedInt16"; + case 12: return "UnsignedInt32"; + case 13: return "HalfFloat"; + case 14: return "Float"; + case 15: return "UnormInt24"; + case 16: return "UnormInt101010_2"; + + default: + return "Bad"; + } +} + +const int ImageOperandsCeiling = 14; + +const char* ImageOperandsString(int format) +{ + switch (format) { + case ImageOperandsBiasShift: return "Bias"; + case ImageOperandsLodShift: return "Lod"; + case ImageOperandsGradShift: return "Grad"; + case ImageOperandsConstOffsetShift: return "ConstOffset"; + case ImageOperandsOffsetShift: return "Offset"; + case ImageOperandsConstOffsetsShift: return "ConstOffsets"; + case ImageOperandsSampleShift: return "Sample"; + case ImageOperandsMinLodShift: return "MinLod"; + case ImageOperandsMakeTexelAvailableKHRShift: return "MakeTexelAvailableKHR"; + case ImageOperandsMakeTexelVisibleKHRShift: return "MakeTexelVisibleKHR"; + case ImageOperandsNonPrivateTexelKHRShift: return "NonPrivateTexelKHR"; + case ImageOperandsVolatileTexelKHRShift: return "VolatileTexelKHR"; + case ImageOperandsSignExtendShift: return "SignExtend"; + case ImageOperandsZeroExtendShift: return "ZeroExtend"; + + case ImageOperandsCeiling: + default: + return "Bad"; + } +} + +const char* FPFastMathString(int mode) +{ + switch (mode) { + case 0: return "NotNaN"; + case 1: return "NotInf"; + case 2: return "NSZ"; + case 3: return "AllowRecip"; + case 4: return "Fast"; + + default: return "Bad"; + } +} + +const char* FPRoundingModeString(int mode) +{ + switch (mode) { + case 0: return "RTE"; + case 1: return "RTZ"; + case 2: return "RTP"; + case 3: return "RTN"; + + default: return "Bad"; + } +} + +const char* LinkageTypeString(int type) +{ + switch (type) { + case 0: return "Export"; + case 1: return "Import"; + + default: return "Bad"; + } +} + +const char* FuncParamAttrString(int attr) +{ + switch (attr) { + case 0: return "Zext"; + case 1: return "Sext"; + case 2: return "ByVal"; + case 3: return "Sret"; + case 4: return "NoAlias"; + case 5: return "NoCapture"; + case 6: return "NoWrite"; + case 7: return "NoReadWrite"; + + default: return "Bad"; + } +} + +const char* AccessQualifierString(int attr) +{ + switch (attr) { + case 0: return "ReadOnly"; + case 1: return "WriteOnly"; + case 2: return "ReadWrite"; + + default: return "Bad"; + } +} + +const int SelectControlCeiling = 2; + +const char* SelectControlString(int cont) +{ + switch (cont) { + case 0: return "Flatten"; + case 1: return "DontFlatten"; + + case SelectControlCeiling: + default: return "Bad"; + } +} + +const int LoopControlCeiling = LoopControlPartialCountShift + 1; + +const char* LoopControlString(int cont) +{ + switch (cont) { + case LoopControlUnrollShift: return "Unroll"; + case LoopControlDontUnrollShift: return "DontUnroll"; + case LoopControlDependencyInfiniteShift: return "DependencyInfinite"; + case LoopControlDependencyLengthShift: return "DependencyLength"; + case LoopControlMinIterationsShift: return "MinIterations"; + case LoopControlMaxIterationsShift: return "MaxIterations"; + case LoopControlIterationMultipleShift: return "IterationMultiple"; + case LoopControlPeelCountShift: return "PeelCount"; + case LoopControlPartialCountShift: return "PartialCount"; + + case LoopControlCeiling: + default: return "Bad"; + } +} + +const int FunctionControlCeiling = 4; + +const char* FunctionControlString(int cont) +{ + switch (cont) { + case 0: return "Inline"; + case 1: return "DontInline"; + case 2: return "Pure"; + case 3: return "Const"; + + case FunctionControlCeiling: + default: return "Bad"; + } +} + +const char* MemorySemanticsString(int mem) +{ + // Note: No bits set (None) means "Relaxed" + switch (mem) { + case 0: return "Bad"; // Note: this is a placeholder for 'Consume' + case 1: return "Acquire"; + case 2: return "Release"; + case 3: return "AcquireRelease"; + case 4: return "SequentiallyConsistent"; + case 5: return "Bad"; // Note: reserved for future expansion + case 6: return "UniformMemory"; + case 7: return "SubgroupMemory"; + case 8: return "WorkgroupMemory"; + case 9: return "CrossWorkgroupMemory"; + case 10: return "AtomicCounterMemory"; + case 11: return "ImageMemory"; + + default: return "Bad"; + } +} + +const int MemoryAccessCeiling = 6; + +const char* MemoryAccessString(int mem) +{ + switch (mem) { + case MemoryAccessVolatileShift: return "Volatile"; + case MemoryAccessAlignedShift: return "Aligned"; + case MemoryAccessNontemporalShift: return "Nontemporal"; + case MemoryAccessMakePointerAvailableKHRShift: return "MakePointerAvailableKHR"; + case MemoryAccessMakePointerVisibleKHRShift: return "MakePointerVisibleKHR"; + case MemoryAccessNonPrivatePointerKHRShift: return "NonPrivatePointerKHR"; + + default: return "Bad"; + } +} + +const char* ScopeString(int mem) +{ + switch (mem) { + case 0: return "CrossDevice"; + case 1: return "Device"; + case 2: return "Workgroup"; + case 3: return "Subgroup"; + case 4: return "Invocation"; + + default: return "Bad"; + } +} + +const char* GroupOperationString(int gop) +{ + + switch (gop) + { + case GroupOperationReduce: return "Reduce"; + case GroupOperationInclusiveScan: return "InclusiveScan"; + case GroupOperationExclusiveScan: return "ExclusiveScan"; + case GroupOperationClusteredReduce: return "ClusteredReduce"; + case GroupOperationPartitionedReduceNV: return "PartitionedReduceNV"; + case GroupOperationPartitionedInclusiveScanNV: return "PartitionedInclusiveScanNV"; + case GroupOperationPartitionedExclusiveScanNV: return "PartitionedExclusiveScanNV"; + + default: return "Bad"; + } +} + +const char* KernelEnqueueFlagsString(int flag) +{ + switch (flag) + { + case 0: return "NoWait"; + case 1: return "WaitKernel"; + case 2: return "WaitWorkGroup"; + + default: return "Bad"; + } +} + +const char* KernelProfilingInfoString(int info) +{ + switch (info) + { + case 0: return "CmdExecTime"; + + default: return "Bad"; + } +} + +const char* CapabilityString(int info) +{ + switch (info) + { + case 0: return "Matrix"; + case 1: return "Shader"; + case 2: return "Geometry"; + case 3: return "Tessellation"; + case 4: return "Addresses"; + case 5: return "Linkage"; + case 6: return "Kernel"; + case 7: return "Vector16"; + case 8: return "Float16Buffer"; + case 9: return "Float16"; + case 10: return "Float64"; + case 11: return "Int64"; + case 12: return "Int64Atomics"; + case 13: return "ImageBasic"; + case 14: return "ImageReadWrite"; + case 15: return "ImageMipmap"; + case 16: return "Bad"; + case 17: return "Pipes"; + case 18: return "Groups"; + case 19: return "DeviceEnqueue"; + case 20: return "LiteralSampler"; + case 21: return "AtomicStorage"; + case 22: return "Int16"; + case 23: return "TessellationPointSize"; + case 24: return "GeometryPointSize"; + case 25: return "ImageGatherExtended"; + case 26: return "Bad"; + case 27: return "StorageImageMultisample"; + case 28: return "UniformBufferArrayDynamicIndexing"; + case 29: return "SampledImageArrayDynamicIndexing"; + case 30: return "StorageBufferArrayDynamicIndexing"; + case 31: return "StorageImageArrayDynamicIndexing"; + case 32: return "ClipDistance"; + case 33: return "CullDistance"; + case 34: return "ImageCubeArray"; + case 35: return "SampleRateShading"; + case 36: return "ImageRect"; + case 37: return "SampledRect"; + case 38: return "GenericPointer"; + case 39: return "Int8"; + case 40: return "InputAttachment"; + case 41: return "SparseResidency"; + case 42: return "MinLod"; + case 43: return "Sampled1D"; + case 44: return "Image1D"; + case 45: return "SampledCubeArray"; + case 46: return "SampledBuffer"; + case 47: return "ImageBuffer"; + case 48: return "ImageMSArray"; + case 49: return "StorageImageExtendedFormats"; + case 50: return "ImageQuery"; + case 51: return "DerivativeControl"; + case 52: return "InterpolationFunction"; + case 53: return "TransformFeedback"; + case 54: return "GeometryStreams"; + case 55: return "StorageImageReadWithoutFormat"; + case 56: return "StorageImageWriteWithoutFormat"; + case 57: return "MultiViewport"; + case 61: return "GroupNonUniform"; + case 62: return "GroupNonUniformVote"; + case 63: return "GroupNonUniformArithmetic"; + case 64: return "GroupNonUniformBallot"; + case 65: return "GroupNonUniformShuffle"; + case 66: return "GroupNonUniformShuffleRelative"; + case 67: return "GroupNonUniformClustered"; + case 68: return "GroupNonUniformQuad"; + + case CapabilitySubgroupBallotKHR: return "SubgroupBallotKHR"; + case CapabilityDrawParameters: return "DrawParameters"; + case CapabilitySubgroupVoteKHR: return "SubgroupVoteKHR"; + + case CapabilityStorageUniformBufferBlock16: return "StorageUniformBufferBlock16"; + case CapabilityStorageUniform16: return "StorageUniform16"; + case CapabilityStoragePushConstant16: return "StoragePushConstant16"; + case CapabilityStorageInputOutput16: return "StorageInputOutput16"; + + case CapabilityStorageBuffer8BitAccess: return "StorageBuffer8BitAccess"; + case CapabilityUniformAndStorageBuffer8BitAccess: return "UniformAndStorageBuffer8BitAccess"; + case CapabilityStoragePushConstant8: return "StoragePushConstant8"; + + case CapabilityDeviceGroup: return "DeviceGroup"; + case CapabilityMultiView: return "MultiView"; + + case CapabilityStencilExportEXT: return "StencilExportEXT"; + + case CapabilityFloat16ImageAMD: return "Float16ImageAMD"; + case CapabilityImageGatherBiasLodAMD: return "ImageGatherBiasLodAMD"; + case CapabilityFragmentMaskAMD: return "FragmentMaskAMD"; + case CapabilityImageReadWriteLodAMD: return "ImageReadWriteLodAMD"; + + case CapabilityAtomicStorageOps: return "AtomicStorageOps"; + + case CapabilitySampleMaskPostDepthCoverage: return "SampleMaskPostDepthCoverage"; + case CapabilityGeometryShaderPassthroughNV: return "GeometryShaderPassthroughNV"; + case CapabilityShaderViewportIndexLayerNV: return "ShaderViewportIndexLayerNV"; + case CapabilityShaderViewportMaskNV: return "ShaderViewportMaskNV"; + case CapabilityShaderStereoViewNV: return "ShaderStereoViewNV"; + case CapabilityPerViewAttributesNV: return "PerViewAttributesNV"; + case CapabilityGroupNonUniformPartitionedNV: return "GroupNonUniformPartitionedNV"; + case CapabilityRayTracingNV: return "RayTracingNV"; + case CapabilityRayTracingProvisionalKHR: return "RayTracingProvisionalKHR"; + case CapabilityRayQueryProvisionalKHR: return "RayQueryProvisionalKHR"; + case CapabilityRayTraversalPrimitiveCullingProvisionalKHR: return "RayTraversalPrimitiveCullingProvisionalKHR"; + case CapabilityComputeDerivativeGroupQuadsNV: return "ComputeDerivativeGroupQuadsNV"; + case CapabilityComputeDerivativeGroupLinearNV: return "ComputeDerivativeGroupLinearNV"; + case CapabilityFragmentBarycentricNV: return "FragmentBarycentricNV"; + case CapabilityMeshShadingNV: return "MeshShadingNV"; + case CapabilityImageFootprintNV: return "ImageFootprintNV"; +// case CapabilityShadingRateNV: return "ShadingRateNV"; // superseded by FragmentDensityEXT + case CapabilitySampleMaskOverrideCoverageNV: return "SampleMaskOverrideCoverageNV"; + case CapabilityFragmentDensityEXT: return "FragmentDensityEXT"; + + case CapabilityFragmentFullyCoveredEXT: return "FragmentFullyCoveredEXT"; + + case CapabilityShaderNonUniformEXT: return "ShaderNonUniformEXT"; + case CapabilityRuntimeDescriptorArrayEXT: return "RuntimeDescriptorArrayEXT"; + case CapabilityInputAttachmentArrayDynamicIndexingEXT: return "InputAttachmentArrayDynamicIndexingEXT"; + case CapabilityUniformTexelBufferArrayDynamicIndexingEXT: return "UniformTexelBufferArrayDynamicIndexingEXT"; + case CapabilityStorageTexelBufferArrayDynamicIndexingEXT: return "StorageTexelBufferArrayDynamicIndexingEXT"; + case CapabilityUniformBufferArrayNonUniformIndexingEXT: return "UniformBufferArrayNonUniformIndexingEXT"; + case CapabilitySampledImageArrayNonUniformIndexingEXT: return "SampledImageArrayNonUniformIndexingEXT"; + case CapabilityStorageBufferArrayNonUniformIndexingEXT: return "StorageBufferArrayNonUniformIndexingEXT"; + case CapabilityStorageImageArrayNonUniformIndexingEXT: return "StorageImageArrayNonUniformIndexingEXT"; + case CapabilityInputAttachmentArrayNonUniformIndexingEXT: return "InputAttachmentArrayNonUniformIndexingEXT"; + case CapabilityUniformTexelBufferArrayNonUniformIndexingEXT: return "UniformTexelBufferArrayNonUniformIndexingEXT"; + case CapabilityStorageTexelBufferArrayNonUniformIndexingEXT: return "StorageTexelBufferArrayNonUniformIndexingEXT"; + + case CapabilityVulkanMemoryModelKHR: return "VulkanMemoryModelKHR"; + case CapabilityVulkanMemoryModelDeviceScopeKHR: return "VulkanMemoryModelDeviceScopeKHR"; + + case CapabilityPhysicalStorageBufferAddressesEXT: return "PhysicalStorageBufferAddressesEXT"; + + case CapabilityVariablePointers: return "VariablePointers"; + + case CapabilityCooperativeMatrixNV: return "CooperativeMatrixNV"; + case CapabilityShaderSMBuiltinsNV: return "ShaderSMBuiltinsNV"; + + case CapabilityFragmentShaderSampleInterlockEXT: return "CapabilityFragmentShaderSampleInterlockEXT"; + case CapabilityFragmentShaderPixelInterlockEXT: return "CapabilityFragmentShaderPixelInterlockEXT"; + case CapabilityFragmentShaderShadingRateInterlockEXT: return "CapabilityFragmentShaderShadingRateInterlockEXT"; + + case CapabilityDemoteToHelperInvocationEXT: return "DemoteToHelperInvocationEXT"; + case CapabilityShaderClockKHR: return "ShaderClockKHR"; + + case CapabilityIntegerFunctions2INTEL: return "CapabilityIntegerFunctions2INTEL"; + + default: return "Bad"; + } +} + +const char* OpcodeString(int op) +{ + switch (op) { + case 0: return "OpNop"; + case 1: return "OpUndef"; + case 2: return "OpSourceContinued"; + case 3: return "OpSource"; + case 4: return "OpSourceExtension"; + case 5: return "OpName"; + case 6: return "OpMemberName"; + case 7: return "OpString"; + case 8: return "OpLine"; + case 9: return "Bad"; + case 10: return "OpExtension"; + case 11: return "OpExtInstImport"; + case 12: return "OpExtInst"; + case 13: return "Bad"; + case 14: return "OpMemoryModel"; + case 15: return "OpEntryPoint"; + case 16: return "OpExecutionMode"; + case 17: return "OpCapability"; + case 18: return "Bad"; + case 19: return "OpTypeVoid"; + case 20: return "OpTypeBool"; + case 21: return "OpTypeInt"; + case 22: return "OpTypeFloat"; + case 23: return "OpTypeVector"; + case 24: return "OpTypeMatrix"; + case 25: return "OpTypeImage"; + case 26: return "OpTypeSampler"; + case 27: return "OpTypeSampledImage"; + case 28: return "OpTypeArray"; + case 29: return "OpTypeRuntimeArray"; + case 30: return "OpTypeStruct"; + case 31: return "OpTypeOpaque"; + case 32: return "OpTypePointer"; + case 33: return "OpTypeFunction"; + case 34: return "OpTypeEvent"; + case 35: return "OpTypeDeviceEvent"; + case 36: return "OpTypeReserveId"; + case 37: return "OpTypeQueue"; + case 38: return "OpTypePipe"; + case 39: return "OpTypeForwardPointer"; + case 40: return "Bad"; + case 41: return "OpConstantTrue"; + case 42: return "OpConstantFalse"; + case 43: return "OpConstant"; + case 44: return "OpConstantComposite"; + case 45: return "OpConstantSampler"; + case 46: return "OpConstantNull"; + case 47: return "Bad"; + case 48: return "OpSpecConstantTrue"; + case 49: return "OpSpecConstantFalse"; + case 50: return "OpSpecConstant"; + case 51: return "OpSpecConstantComposite"; + case 52: return "OpSpecConstantOp"; + case 53: return "Bad"; + case 54: return "OpFunction"; + case 55: return "OpFunctionParameter"; + case 56: return "OpFunctionEnd"; + case 57: return "OpFunctionCall"; + case 58: return "Bad"; + case 59: return "OpVariable"; + case 60: return "OpImageTexelPointer"; + case 61: return "OpLoad"; + case 62: return "OpStore"; + case 63: return "OpCopyMemory"; + case 64: return "OpCopyMemorySized"; + case 65: return "OpAccessChain"; + case 66: return "OpInBoundsAccessChain"; + case 67: return "OpPtrAccessChain"; + case 68: return "OpArrayLength"; + case 69: return "OpGenericPtrMemSemantics"; + case 70: return "OpInBoundsPtrAccessChain"; + case 71: return "OpDecorate"; + case 72: return "OpMemberDecorate"; + case 73: return "OpDecorationGroup"; + case 74: return "OpGroupDecorate"; + case 75: return "OpGroupMemberDecorate"; + case 76: return "Bad"; + case 77: return "OpVectorExtractDynamic"; + case 78: return "OpVectorInsertDynamic"; + case 79: return "OpVectorShuffle"; + case 80: return "OpCompositeConstruct"; + case 81: return "OpCompositeExtract"; + case 82: return "OpCompositeInsert"; + case 83: return "OpCopyObject"; + case 84: return "OpTranspose"; + case OpCopyLogical: return "OpCopyLogical"; + case 85: return "Bad"; + case 86: return "OpSampledImage"; + case 87: return "OpImageSampleImplicitLod"; + case 88: return "OpImageSampleExplicitLod"; + case 89: return "OpImageSampleDrefImplicitLod"; + case 90: return "OpImageSampleDrefExplicitLod"; + case 91: return "OpImageSampleProjImplicitLod"; + case 92: return "OpImageSampleProjExplicitLod"; + case 93: return "OpImageSampleProjDrefImplicitLod"; + case 94: return "OpImageSampleProjDrefExplicitLod"; + case 95: return "OpImageFetch"; + case 96: return "OpImageGather"; + case 97: return "OpImageDrefGather"; + case 98: return "OpImageRead"; + case 99: return "OpImageWrite"; + case 100: return "OpImage"; + case 101: return "OpImageQueryFormat"; + case 102: return "OpImageQueryOrder"; + case 103: return "OpImageQuerySizeLod"; + case 104: return "OpImageQuerySize"; + case 105: return "OpImageQueryLod"; + case 106: return "OpImageQueryLevels"; + case 107: return "OpImageQuerySamples"; + case 108: return "Bad"; + case 109: return "OpConvertFToU"; + case 110: return "OpConvertFToS"; + case 111: return "OpConvertSToF"; + case 112: return "OpConvertUToF"; + case 113: return "OpUConvert"; + case 114: return "OpSConvert"; + case 115: return "OpFConvert"; + case 116: return "OpQuantizeToF16"; + case 117: return "OpConvertPtrToU"; + case 118: return "OpSatConvertSToU"; + case 119: return "OpSatConvertUToS"; + case 120: return "OpConvertUToPtr"; + case 121: return "OpPtrCastToGeneric"; + case 122: return "OpGenericCastToPtr"; + case 123: return "OpGenericCastToPtrExplicit"; + case 124: return "OpBitcast"; + case 125: return "Bad"; + case 126: return "OpSNegate"; + case 127: return "OpFNegate"; + case 128: return "OpIAdd"; + case 129: return "OpFAdd"; + case 130: return "OpISub"; + case 131: return "OpFSub"; + case 132: return "OpIMul"; + case 133: return "OpFMul"; + case 134: return "OpUDiv"; + case 135: return "OpSDiv"; + case 136: return "OpFDiv"; + case 137: return "OpUMod"; + case 138: return "OpSRem"; + case 139: return "OpSMod"; + case 140: return "OpFRem"; + case 141: return "OpFMod"; + case 142: return "OpVectorTimesScalar"; + case 143: return "OpMatrixTimesScalar"; + case 144: return "OpVectorTimesMatrix"; + case 145: return "OpMatrixTimesVector"; + case 146: return "OpMatrixTimesMatrix"; + case 147: return "OpOuterProduct"; + case 148: return "OpDot"; + case 149: return "OpIAddCarry"; + case 150: return "OpISubBorrow"; + case 151: return "OpUMulExtended"; + case 152: return "OpSMulExtended"; + case 153: return "Bad"; + case 154: return "OpAny"; + case 155: return "OpAll"; + case 156: return "OpIsNan"; + case 157: return "OpIsInf"; + case 158: return "OpIsFinite"; + case 159: return "OpIsNormal"; + case 160: return "OpSignBitSet"; + case 161: return "OpLessOrGreater"; + case 162: return "OpOrdered"; + case 163: return "OpUnordered"; + case 164: return "OpLogicalEqual"; + case 165: return "OpLogicalNotEqual"; + case 166: return "OpLogicalOr"; + case 167: return "OpLogicalAnd"; + case 168: return "OpLogicalNot"; + case 169: return "OpSelect"; + case 170: return "OpIEqual"; + case 171: return "OpINotEqual"; + case 172: return "OpUGreaterThan"; + case 173: return "OpSGreaterThan"; + case 174: return "OpUGreaterThanEqual"; + case 175: return "OpSGreaterThanEqual"; + case 176: return "OpULessThan"; + case 177: return "OpSLessThan"; + case 178: return "OpULessThanEqual"; + case 179: return "OpSLessThanEqual"; + case 180: return "OpFOrdEqual"; + case 181: return "OpFUnordEqual"; + case 182: return "OpFOrdNotEqual"; + case 183: return "OpFUnordNotEqual"; + case 184: return "OpFOrdLessThan"; + case 185: return "OpFUnordLessThan"; + case 186: return "OpFOrdGreaterThan"; + case 187: return "OpFUnordGreaterThan"; + case 188: return "OpFOrdLessThanEqual"; + case 189: return "OpFUnordLessThanEqual"; + case 190: return "OpFOrdGreaterThanEqual"; + case 191: return "OpFUnordGreaterThanEqual"; + case 192: return "Bad"; + case 193: return "Bad"; + case 194: return "OpShiftRightLogical"; + case 195: return "OpShiftRightArithmetic"; + case 196: return "OpShiftLeftLogical"; + case 197: return "OpBitwiseOr"; + case 198: return "OpBitwiseXor"; + case 199: return "OpBitwiseAnd"; + case 200: return "OpNot"; + case 201: return "OpBitFieldInsert"; + case 202: return "OpBitFieldSExtract"; + case 203: return "OpBitFieldUExtract"; + case 204: return "OpBitReverse"; + case 205: return "OpBitCount"; + case 206: return "Bad"; + case 207: return "OpDPdx"; + case 208: return "OpDPdy"; + case 209: return "OpFwidth"; + case 210: return "OpDPdxFine"; + case 211: return "OpDPdyFine"; + case 212: return "OpFwidthFine"; + case 213: return "OpDPdxCoarse"; + case 214: return "OpDPdyCoarse"; + case 215: return "OpFwidthCoarse"; + case 216: return "Bad"; + case 217: return "Bad"; + case 218: return "OpEmitVertex"; + case 219: return "OpEndPrimitive"; + case 220: return "OpEmitStreamVertex"; + case 221: return "OpEndStreamPrimitive"; + case 222: return "Bad"; + case 223: return "Bad"; + case 224: return "OpControlBarrier"; + case 225: return "OpMemoryBarrier"; + case 226: return "Bad"; + case 227: return "OpAtomicLoad"; + case 228: return "OpAtomicStore"; + case 229: return "OpAtomicExchange"; + case 230: return "OpAtomicCompareExchange"; + case 231: return "OpAtomicCompareExchangeWeak"; + case 232: return "OpAtomicIIncrement"; + case 233: return "OpAtomicIDecrement"; + case 234: return "OpAtomicIAdd"; + case 235: return "OpAtomicISub"; + case 236: return "OpAtomicSMin"; + case 237: return "OpAtomicUMin"; + case 238: return "OpAtomicSMax"; + case 239: return "OpAtomicUMax"; + case 240: return "OpAtomicAnd"; + case 241: return "OpAtomicOr"; + case 242: return "OpAtomicXor"; + case 243: return "Bad"; + case 244: return "Bad"; + case 245: return "OpPhi"; + case 246: return "OpLoopMerge"; + case 247: return "OpSelectionMerge"; + case 248: return "OpLabel"; + case 249: return "OpBranch"; + case 250: return "OpBranchConditional"; + case 251: return "OpSwitch"; + case 252: return "OpKill"; + case 253: return "OpReturn"; + case 254: return "OpReturnValue"; + case 255: return "OpUnreachable"; + case 256: return "OpLifetimeStart"; + case 257: return "OpLifetimeStop"; + case 258: return "Bad"; + case 259: return "OpGroupAsyncCopy"; + case 260: return "OpGroupWaitEvents"; + case 261: return "OpGroupAll"; + case 262: return "OpGroupAny"; + case 263: return "OpGroupBroadcast"; + case 264: return "OpGroupIAdd"; + case 265: return "OpGroupFAdd"; + case 266: return "OpGroupFMin"; + case 267: return "OpGroupUMin"; + case 268: return "OpGroupSMin"; + case 269: return "OpGroupFMax"; + case 270: return "OpGroupUMax"; + case 271: return "OpGroupSMax"; + case 272: return "Bad"; + case 273: return "Bad"; + case 274: return "OpReadPipe"; + case 275: return "OpWritePipe"; + case 276: return "OpReservedReadPipe"; + case 277: return "OpReservedWritePipe"; + case 278: return "OpReserveReadPipePackets"; + case 279: return "OpReserveWritePipePackets"; + case 280: return "OpCommitReadPipe"; + case 281: return "OpCommitWritePipe"; + case 282: return "OpIsValidReserveId"; + case 283: return "OpGetNumPipePackets"; + case 284: return "OpGetMaxPipePackets"; + case 285: return "OpGroupReserveReadPipePackets"; + case 286: return "OpGroupReserveWritePipePackets"; + case 287: return "OpGroupCommitReadPipe"; + case 288: return "OpGroupCommitWritePipe"; + case 289: return "Bad"; + case 290: return "Bad"; + case 291: return "OpEnqueueMarker"; + case 292: return "OpEnqueueKernel"; + case 293: return "OpGetKernelNDrangeSubGroupCount"; + case 294: return "OpGetKernelNDrangeMaxSubGroupSize"; + case 295: return "OpGetKernelWorkGroupSize"; + case 296: return "OpGetKernelPreferredWorkGroupSizeMultiple"; + case 297: return "OpRetainEvent"; + case 298: return "OpReleaseEvent"; + case 299: return "OpCreateUserEvent"; + case 300: return "OpIsValidEvent"; + case 301: return "OpSetUserEventStatus"; + case 302: return "OpCaptureEventProfilingInfo"; + case 303: return "OpGetDefaultQueue"; + case 304: return "OpBuildNDRange"; + case 305: return "OpImageSparseSampleImplicitLod"; + case 306: return "OpImageSparseSampleExplicitLod"; + case 307: return "OpImageSparseSampleDrefImplicitLod"; + case 308: return "OpImageSparseSampleDrefExplicitLod"; + case 309: return "OpImageSparseSampleProjImplicitLod"; + case 310: return "OpImageSparseSampleProjExplicitLod"; + case 311: return "OpImageSparseSampleProjDrefImplicitLod"; + case 312: return "OpImageSparseSampleProjDrefExplicitLod"; + case 313: return "OpImageSparseFetch"; + case 314: return "OpImageSparseGather"; + case 315: return "OpImageSparseDrefGather"; + case 316: return "OpImageSparseTexelsResident"; + case 317: return "OpNoLine"; + case 318: return "OpAtomicFlagTestAndSet"; + case 319: return "OpAtomicFlagClear"; + case 320: return "OpImageSparseRead"; + + case OpModuleProcessed: return "OpModuleProcessed"; + case OpDecorateId: return "OpDecorateId"; + + case 333: return "OpGroupNonUniformElect"; + case 334: return "OpGroupNonUniformAll"; + case 335: return "OpGroupNonUniformAny"; + case 336: return "OpGroupNonUniformAllEqual"; + case 337: return "OpGroupNonUniformBroadcast"; + case 338: return "OpGroupNonUniformBroadcastFirst"; + case 339: return "OpGroupNonUniformBallot"; + case 340: return "OpGroupNonUniformInverseBallot"; + case 341: return "OpGroupNonUniformBallotBitExtract"; + case 342: return "OpGroupNonUniformBallotBitCount"; + case 343: return "OpGroupNonUniformBallotFindLSB"; + case 344: return "OpGroupNonUniformBallotFindMSB"; + case 345: return "OpGroupNonUniformShuffle"; + case 346: return "OpGroupNonUniformShuffleXor"; + case 347: return "OpGroupNonUniformShuffleUp"; + case 348: return "OpGroupNonUniformShuffleDown"; + case 349: return "OpGroupNonUniformIAdd"; + case 350: return "OpGroupNonUniformFAdd"; + case 351: return "OpGroupNonUniformIMul"; + case 352: return "OpGroupNonUniformFMul"; + case 353: return "OpGroupNonUniformSMin"; + case 354: return "OpGroupNonUniformUMin"; + case 355: return "OpGroupNonUniformFMin"; + case 356: return "OpGroupNonUniformSMax"; + case 357: return "OpGroupNonUniformUMax"; + case 358: return "OpGroupNonUniformFMax"; + case 359: return "OpGroupNonUniformBitwiseAnd"; + case 360: return "OpGroupNonUniformBitwiseOr"; + case 361: return "OpGroupNonUniformBitwiseXor"; + case 362: return "OpGroupNonUniformLogicalAnd"; + case 363: return "OpGroupNonUniformLogicalOr"; + case 364: return "OpGroupNonUniformLogicalXor"; + case 365: return "OpGroupNonUniformQuadBroadcast"; + case 366: return "OpGroupNonUniformQuadSwap"; + + case 4421: return "OpSubgroupBallotKHR"; + case 4422: return "OpSubgroupFirstInvocationKHR"; + case 4428: return "OpSubgroupAllKHR"; + case 4429: return "OpSubgroupAnyKHR"; + case 4430: return "OpSubgroupAllEqualKHR"; + case 4432: return "OpSubgroupReadInvocationKHR"; + + case 5000: return "OpGroupIAddNonUniformAMD"; + case 5001: return "OpGroupFAddNonUniformAMD"; + case 5002: return "OpGroupFMinNonUniformAMD"; + case 5003: return "OpGroupUMinNonUniformAMD"; + case 5004: return "OpGroupSMinNonUniformAMD"; + case 5005: return "OpGroupFMaxNonUniformAMD"; + case 5006: return "OpGroupUMaxNonUniformAMD"; + case 5007: return "OpGroupSMaxNonUniformAMD"; + + case 5011: return "OpFragmentMaskFetchAMD"; + case 5012: return "OpFragmentFetchAMD"; + + case OpReadClockKHR: return "OpReadClockKHR"; + + case OpDecorateStringGOOGLE: return "OpDecorateStringGOOGLE"; + case OpMemberDecorateStringGOOGLE: return "OpMemberDecorateStringGOOGLE"; + + case OpGroupNonUniformPartitionNV: return "OpGroupNonUniformPartitionNV"; + case OpReportIntersectionKHR: return "OpReportIntersectionKHR"; + case OpIgnoreIntersectionKHR: return "OpIgnoreIntersectionKHR"; + case OpTerminateRayKHR: return "OpTerminateRayKHR"; + case OpTraceRayKHR: return "OpTraceRayKHR"; + case OpTypeAccelerationStructureKHR: return "OpTypeAccelerationStructureKHR"; + case OpExecuteCallableKHR: return "OpExecuteCallableKHR"; + case OpImageSampleFootprintNV: return "OpImageSampleFootprintNV"; + case OpWritePackedPrimitiveIndices4x8NV: return "OpWritePackedPrimitiveIndices4x8NV"; + + case OpTypeRayQueryProvisionalKHR: return "OpTypeRayQueryProvisionalKHR"; + case OpRayQueryInitializeKHR: return "OpRayQueryInitializeKHR"; + case OpRayQueryTerminateKHR: return "OpRayQueryTerminateKHR"; + case OpRayQueryGenerateIntersectionKHR: return "OpRayQueryGenerateIntersectionKHR"; + case OpRayQueryConfirmIntersectionKHR: return "OpRayQueryConfirmIntersectionKHR"; + case OpRayQueryProceedKHR: return "OpRayQueryProceedKHR"; + case OpRayQueryGetIntersectionTypeKHR: return "OpRayQueryGetIntersectionTypeKHR"; + case OpRayQueryGetRayTMinKHR: return "OpRayQueryGetRayTMinKHR"; + case OpRayQueryGetRayFlagsKHR: return "OpRayQueryGetRayFlagsKHR"; + case OpRayQueryGetIntersectionTKHR: return "OpRayQueryGetIntersectionTKHR"; + case OpRayQueryGetIntersectionInstanceCustomIndexKHR: return "OpRayQueryGetIntersectionInstanceCustomIndexKHR"; + case OpRayQueryGetIntersectionInstanceIdKHR: return "OpRayQueryGetIntersectionInstanceIdKHR"; + case OpRayQueryGetIntersectionInstanceShaderBindingTableRecordOffsetKHR: return "OpRayQueryGetIntersectionInstanceShaderBindingTableRecordOffsetKHR"; + case OpRayQueryGetIntersectionGeometryIndexKHR: return "OpRayQueryGetIntersectionGeometryIndexKHR"; + case OpRayQueryGetIntersectionPrimitiveIndexKHR: return "OpRayQueryGetIntersectionPrimitiveIndexKHR"; + case OpRayQueryGetIntersectionBarycentricsKHR: return "OpRayQueryGetIntersectionBarycentricsKHR"; + case OpRayQueryGetIntersectionFrontFaceKHR: return "OpRayQueryGetIntersectionFrontFaceKHR"; + case OpRayQueryGetIntersectionCandidateAABBOpaqueKHR: return "OpRayQueryGetIntersectionCandidateAABBOpaqueKHR"; + case OpRayQueryGetIntersectionObjectRayDirectionKHR: return "OpRayQueryGetIntersectionObjectRayDirectionKHR"; + case OpRayQueryGetIntersectionObjectRayOriginKHR: return "OpRayQueryGetIntersectionObjectRayOriginKHR"; + case OpRayQueryGetWorldRayDirectionKHR: return "OpRayQueryGetWorldRayDirectionKHR"; + case OpRayQueryGetWorldRayOriginKHR: return "OpRayQueryGetWorldRayOriginKHR"; + case OpRayQueryGetIntersectionObjectToWorldKHR: return "OpRayQueryGetIntersectionObjectToWorldKHR"; + case OpRayQueryGetIntersectionWorldToObjectKHR: return "OpRayQueryGetIntersectionWorldToObjectKHR"; + + case OpTypeCooperativeMatrixNV: return "OpTypeCooperativeMatrixNV"; + case OpCooperativeMatrixLoadNV: return "OpCooperativeMatrixLoadNV"; + case OpCooperativeMatrixStoreNV: return "OpCooperativeMatrixStoreNV"; + case OpCooperativeMatrixMulAddNV: return "OpCooperativeMatrixMulAddNV"; + case OpCooperativeMatrixLengthNV: return "OpCooperativeMatrixLengthNV"; + case OpDemoteToHelperInvocationEXT: return "OpDemoteToHelperInvocationEXT"; + case OpIsHelperInvocationEXT: return "OpIsHelperInvocationEXT"; + + case OpBeginInvocationInterlockEXT: return "OpBeginInvocationInterlockEXT"; + case OpEndInvocationInterlockEXT: return "OpEndInvocationInterlockEXT"; + + default: + return "Bad"; + } +} + +// The set of objects that hold all the instruction/operand +// parameterization information. +InstructionParameters InstructionDesc[OpCodeMask + 1]; +OperandParameters ExecutionModeOperands[ExecutionModeCeiling]; +OperandParameters DecorationOperands[DecorationCeiling]; + +EnumDefinition OperandClassParams[OperandCount]; +EnumParameters ExecutionModeParams[ExecutionModeCeiling]; +EnumParameters ImageOperandsParams[ImageOperandsCeiling]; +EnumParameters DecorationParams[DecorationCeiling]; +EnumParameters LoopControlParams[FunctionControlCeiling]; +EnumParameters SelectionControlParams[SelectControlCeiling]; +EnumParameters FunctionControlParams[FunctionControlCeiling]; +EnumParameters MemoryAccessParams[MemoryAccessCeiling]; + +// Set up all the parameterizing descriptions of the opcodes, operands, etc. +void Parameterize() +{ + // only do this once. + static bool initialized = false; + if (initialized) + return; + initialized = true; + + // Exceptions to having a result and a resulting type . + // (Everything is initialized to have both). + + InstructionDesc[OpNop].setResultAndType(false, false); + InstructionDesc[OpSource].setResultAndType(false, false); + InstructionDesc[OpSourceContinued].setResultAndType(false, false); + InstructionDesc[OpSourceExtension].setResultAndType(false, false); + InstructionDesc[OpExtension].setResultAndType(false, false); + InstructionDesc[OpExtInstImport].setResultAndType(true, false); + InstructionDesc[OpCapability].setResultAndType(false, false); + InstructionDesc[OpMemoryModel].setResultAndType(false, false); + InstructionDesc[OpEntryPoint].setResultAndType(false, false); + InstructionDesc[OpExecutionMode].setResultAndType(false, false); + InstructionDesc[OpTypeVoid].setResultAndType(true, false); + InstructionDesc[OpTypeBool].setResultAndType(true, false); + InstructionDesc[OpTypeInt].setResultAndType(true, false); + InstructionDesc[OpTypeFloat].setResultAndType(true, false); + InstructionDesc[OpTypeVector].setResultAndType(true, false); + InstructionDesc[OpTypeMatrix].setResultAndType(true, false); + InstructionDesc[OpTypeImage].setResultAndType(true, false); + InstructionDesc[OpTypeSampler].setResultAndType(true, false); + InstructionDesc[OpTypeSampledImage].setResultAndType(true, false); + InstructionDesc[OpTypeArray].setResultAndType(true, false); + InstructionDesc[OpTypeRuntimeArray].setResultAndType(true, false); + InstructionDesc[OpTypeStruct].setResultAndType(true, false); + InstructionDesc[OpTypeOpaque].setResultAndType(true, false); + InstructionDesc[OpTypePointer].setResultAndType(true, false); + InstructionDesc[OpTypeForwardPointer].setResultAndType(false, false); + InstructionDesc[OpTypeFunction].setResultAndType(true, false); + InstructionDesc[OpTypeEvent].setResultAndType(true, false); + InstructionDesc[OpTypeDeviceEvent].setResultAndType(true, false); + InstructionDesc[OpTypeReserveId].setResultAndType(true, false); + InstructionDesc[OpTypeQueue].setResultAndType(true, false); + InstructionDesc[OpTypePipe].setResultAndType(true, false); + InstructionDesc[OpFunctionEnd].setResultAndType(false, false); + InstructionDesc[OpStore].setResultAndType(false, false); + InstructionDesc[OpImageWrite].setResultAndType(false, false); + InstructionDesc[OpDecorationGroup].setResultAndType(true, false); + InstructionDesc[OpDecorate].setResultAndType(false, false); + InstructionDesc[OpDecorateId].setResultAndType(false, false); + InstructionDesc[OpDecorateStringGOOGLE].setResultAndType(false, false); + InstructionDesc[OpMemberDecorate].setResultAndType(false, false); + InstructionDesc[OpMemberDecorateStringGOOGLE].setResultAndType(false, false); + InstructionDesc[OpGroupDecorate].setResultAndType(false, false); + InstructionDesc[OpGroupMemberDecorate].setResultAndType(false, false); + InstructionDesc[OpName].setResultAndType(false, false); + InstructionDesc[OpMemberName].setResultAndType(false, false); + InstructionDesc[OpString].setResultAndType(true, false); + InstructionDesc[OpLine].setResultAndType(false, false); + InstructionDesc[OpNoLine].setResultAndType(false, false); + InstructionDesc[OpCopyMemory].setResultAndType(false, false); + InstructionDesc[OpCopyMemorySized].setResultAndType(false, false); + InstructionDesc[OpEmitVertex].setResultAndType(false, false); + InstructionDesc[OpEndPrimitive].setResultAndType(false, false); + InstructionDesc[OpEmitStreamVertex].setResultAndType(false, false); + InstructionDesc[OpEndStreamPrimitive].setResultAndType(false, false); + InstructionDesc[OpControlBarrier].setResultAndType(false, false); + InstructionDesc[OpMemoryBarrier].setResultAndType(false, false); + InstructionDesc[OpAtomicStore].setResultAndType(false, false); + InstructionDesc[OpLoopMerge].setResultAndType(false, false); + InstructionDesc[OpSelectionMerge].setResultAndType(false, false); + InstructionDesc[OpLabel].setResultAndType(true, false); + InstructionDesc[OpBranch].setResultAndType(false, false); + InstructionDesc[OpBranchConditional].setResultAndType(false, false); + InstructionDesc[OpSwitch].setResultAndType(false, false); + InstructionDesc[OpKill].setResultAndType(false, false); + InstructionDesc[OpReturn].setResultAndType(false, false); + InstructionDesc[OpReturnValue].setResultAndType(false, false); + InstructionDesc[OpUnreachable].setResultAndType(false, false); + InstructionDesc[OpLifetimeStart].setResultAndType(false, false); + InstructionDesc[OpLifetimeStop].setResultAndType(false, false); + InstructionDesc[OpCommitReadPipe].setResultAndType(false, false); + InstructionDesc[OpCommitWritePipe].setResultAndType(false, false); + InstructionDesc[OpGroupCommitWritePipe].setResultAndType(false, false); + InstructionDesc[OpGroupCommitReadPipe].setResultAndType(false, false); + InstructionDesc[OpCaptureEventProfilingInfo].setResultAndType(false, false); + InstructionDesc[OpSetUserEventStatus].setResultAndType(false, false); + InstructionDesc[OpRetainEvent].setResultAndType(false, false); + InstructionDesc[OpReleaseEvent].setResultAndType(false, false); + InstructionDesc[OpGroupWaitEvents].setResultAndType(false, false); + InstructionDesc[OpAtomicFlagClear].setResultAndType(false, false); + InstructionDesc[OpModuleProcessed].setResultAndType(false, false); + InstructionDesc[OpTypeCooperativeMatrixNV].setResultAndType(true, false); + InstructionDesc[OpCooperativeMatrixStoreNV].setResultAndType(false, false); + InstructionDesc[OpBeginInvocationInterlockEXT].setResultAndType(false, false); + InstructionDesc[OpEndInvocationInterlockEXT].setResultAndType(false, false); + + // Specific additional context-dependent operands + + ExecutionModeOperands[ExecutionModeInvocations].push(OperandLiteralNumber, "'Number of <>'"); + + ExecutionModeOperands[ExecutionModeLocalSize].push(OperandLiteralNumber, "'x size'"); + ExecutionModeOperands[ExecutionModeLocalSize].push(OperandLiteralNumber, "'y size'"); + ExecutionModeOperands[ExecutionModeLocalSize].push(OperandLiteralNumber, "'z size'"); + + ExecutionModeOperands[ExecutionModeLocalSizeHint].push(OperandLiteralNumber, "'x size'"); + ExecutionModeOperands[ExecutionModeLocalSizeHint].push(OperandLiteralNumber, "'y size'"); + ExecutionModeOperands[ExecutionModeLocalSizeHint].push(OperandLiteralNumber, "'z size'"); + + ExecutionModeOperands[ExecutionModeOutputVertices].push(OperandLiteralNumber, "'Vertex count'"); + ExecutionModeOperands[ExecutionModeVecTypeHint].push(OperandLiteralNumber, "'Vector type'"); + + DecorationOperands[DecorationStream].push(OperandLiteralNumber, "'Stream Number'"); + DecorationOperands[DecorationLocation].push(OperandLiteralNumber, "'Location'"); + DecorationOperands[DecorationComponent].push(OperandLiteralNumber, "'Component'"); + DecorationOperands[DecorationIndex].push(OperandLiteralNumber, "'Index'"); + DecorationOperands[DecorationBinding].push(OperandLiteralNumber, "'Binding Point'"); + DecorationOperands[DecorationDescriptorSet].push(OperandLiteralNumber, "'Descriptor Set'"); + DecorationOperands[DecorationOffset].push(OperandLiteralNumber, "'Byte Offset'"); + DecorationOperands[DecorationXfbBuffer].push(OperandLiteralNumber, "'XFB Buffer Number'"); + DecorationOperands[DecorationXfbStride].push(OperandLiteralNumber, "'XFB Stride'"); + DecorationOperands[DecorationArrayStride].push(OperandLiteralNumber, "'Array Stride'"); + DecorationOperands[DecorationMatrixStride].push(OperandLiteralNumber, "'Matrix Stride'"); + DecorationOperands[DecorationBuiltIn].push(OperandLiteralNumber, "See <>"); + DecorationOperands[DecorationFPRoundingMode].push(OperandFPRoundingMode, "'Floating-Point Rounding Mode'"); + DecorationOperands[DecorationFPFastMathMode].push(OperandFPFastMath, "'Fast-Math Mode'"); + DecorationOperands[DecorationLinkageAttributes].push(OperandLiteralString, "'Name'"); + DecorationOperands[DecorationLinkageAttributes].push(OperandLinkageType, "'Linkage Type'"); + DecorationOperands[DecorationFuncParamAttr].push(OperandFuncParamAttr, "'Function Parameter Attribute'"); + DecorationOperands[DecorationSpecId].push(OperandLiteralNumber, "'Specialization Constant ID'"); + DecorationOperands[DecorationInputAttachmentIndex].push(OperandLiteralNumber, "'Attachment Index'"); + DecorationOperands[DecorationAlignment].push(OperandLiteralNumber, "'Alignment'"); + + OperandClassParams[OperandSource].set(0, SourceString, 0); + OperandClassParams[OperandExecutionModel].set(0, ExecutionModelString, nullptr); + OperandClassParams[OperandAddressing].set(0, AddressingString, nullptr); + OperandClassParams[OperandMemory].set(0, MemoryString, nullptr); + OperandClassParams[OperandExecutionMode].set(ExecutionModeCeiling, ExecutionModeString, ExecutionModeParams); + OperandClassParams[OperandExecutionMode].setOperands(ExecutionModeOperands); + OperandClassParams[OperandStorage].set(0, StorageClassString, nullptr); + OperandClassParams[OperandDimensionality].set(0, DimensionString, nullptr); + OperandClassParams[OperandSamplerAddressingMode].set(0, SamplerAddressingModeString, nullptr); + OperandClassParams[OperandSamplerFilterMode].set(0, SamplerFilterModeString, nullptr); + OperandClassParams[OperandSamplerImageFormat].set(0, ImageFormatString, nullptr); + OperandClassParams[OperandImageChannelOrder].set(0, ImageChannelOrderString, nullptr); + OperandClassParams[OperandImageChannelDataType].set(0, ImageChannelDataTypeString, nullptr); + OperandClassParams[OperandImageOperands].set(ImageOperandsCeiling, ImageOperandsString, ImageOperandsParams, true); + OperandClassParams[OperandFPFastMath].set(0, FPFastMathString, nullptr, true); + OperandClassParams[OperandFPRoundingMode].set(0, FPRoundingModeString, nullptr); + OperandClassParams[OperandLinkageType].set(0, LinkageTypeString, nullptr); + OperandClassParams[OperandFuncParamAttr].set(0, FuncParamAttrString, nullptr); + OperandClassParams[OperandAccessQualifier].set(0, AccessQualifierString, nullptr); + OperandClassParams[OperandDecoration].set(DecorationCeiling, DecorationString, DecorationParams); + OperandClassParams[OperandDecoration].setOperands(DecorationOperands); + OperandClassParams[OperandBuiltIn].set(0, BuiltInString, nullptr); + OperandClassParams[OperandSelect].set(SelectControlCeiling, SelectControlString, SelectionControlParams, true); + OperandClassParams[OperandLoop].set(LoopControlCeiling, LoopControlString, LoopControlParams, true); + OperandClassParams[OperandFunction].set(FunctionControlCeiling, FunctionControlString, FunctionControlParams, true); + OperandClassParams[OperandMemorySemantics].set(0, MemorySemanticsString, nullptr, true); + OperandClassParams[OperandMemoryAccess].set(MemoryAccessCeiling, MemoryAccessString, MemoryAccessParams, true); + OperandClassParams[OperandScope].set(0, ScopeString, nullptr); + OperandClassParams[OperandGroupOperation].set(0, GroupOperationString, nullptr); + OperandClassParams[OperandKernelEnqueueFlags].set(0, KernelEnqueueFlagsString, nullptr); + OperandClassParams[OperandKernelProfilingInfo].set(0, KernelProfilingInfoString, nullptr, true); + OperandClassParams[OperandCapability].set(0, CapabilityString, nullptr); + OperandClassParams[OperandOpcode].set(OpCodeMask + 1, OpcodeString, 0); + + // set name of operator, an initial set of style operands, and the description + + InstructionDesc[OpSource].operands.push(OperandSource, ""); + InstructionDesc[OpSource].operands.push(OperandLiteralNumber, "'Version'"); + InstructionDesc[OpSource].operands.push(OperandId, "'File'", true); + InstructionDesc[OpSource].operands.push(OperandLiteralString, "'Source'", true); + + InstructionDesc[OpSourceContinued].operands.push(OperandLiteralString, "'Continued Source'"); + + InstructionDesc[OpSourceExtension].operands.push(OperandLiteralString, "'Extension'"); + + InstructionDesc[OpName].operands.push(OperandId, "'Target'"); + InstructionDesc[OpName].operands.push(OperandLiteralString, "'Name'"); + + InstructionDesc[OpMemberName].operands.push(OperandId, "'Type'"); + InstructionDesc[OpMemberName].operands.push(OperandLiteralNumber, "'Member'"); + InstructionDesc[OpMemberName].operands.push(OperandLiteralString, "'Name'"); + + InstructionDesc[OpString].operands.push(OperandLiteralString, "'String'"); + + InstructionDesc[OpLine].operands.push(OperandId, "'File'"); + InstructionDesc[OpLine].operands.push(OperandLiteralNumber, "'Line'"); + InstructionDesc[OpLine].operands.push(OperandLiteralNumber, "'Column'"); + + InstructionDesc[OpExtension].operands.push(OperandLiteralString, "'Name'"); + + InstructionDesc[OpExtInstImport].operands.push(OperandLiteralString, "'Name'"); + + InstructionDesc[OpCapability].operands.push(OperandCapability, "'Capability'"); + + InstructionDesc[OpMemoryModel].operands.push(OperandAddressing, ""); + InstructionDesc[OpMemoryModel].operands.push(OperandMemory, ""); + + InstructionDesc[OpEntryPoint].operands.push(OperandExecutionModel, ""); + InstructionDesc[OpEntryPoint].operands.push(OperandId, "'Entry Point'"); + InstructionDesc[OpEntryPoint].operands.push(OperandLiteralString, "'Name'"); + InstructionDesc[OpEntryPoint].operands.push(OperandVariableIds, "'Interface'"); + + InstructionDesc[OpExecutionMode].operands.push(OperandId, "'Entry Point'"); + InstructionDesc[OpExecutionMode].operands.push(OperandExecutionMode, "'Mode'"); + InstructionDesc[OpExecutionMode].operands.push(OperandOptionalLiteral, "See <>"); + + InstructionDesc[OpTypeInt].operands.push(OperandLiteralNumber, "'Width'"); + InstructionDesc[OpTypeInt].operands.push(OperandLiteralNumber, "'Signedness'"); + + InstructionDesc[OpTypeFloat].operands.push(OperandLiteralNumber, "'Width'"); + + InstructionDesc[OpTypeVector].operands.push(OperandId, "'Component Type'"); + InstructionDesc[OpTypeVector].operands.push(OperandLiteralNumber, "'Component Count'"); + + InstructionDesc[OpTypeMatrix].operands.push(OperandId, "'Column Type'"); + InstructionDesc[OpTypeMatrix].operands.push(OperandLiteralNumber, "'Column Count'"); + + InstructionDesc[OpTypeImage].operands.push(OperandId, "'Sampled Type'"); + InstructionDesc[OpTypeImage].operands.push(OperandDimensionality, ""); + InstructionDesc[OpTypeImage].operands.push(OperandLiteralNumber, "'Depth'"); + InstructionDesc[OpTypeImage].operands.push(OperandLiteralNumber, "'Arrayed'"); + InstructionDesc[OpTypeImage].operands.push(OperandLiteralNumber, "'MS'"); + InstructionDesc[OpTypeImage].operands.push(OperandLiteralNumber, "'Sampled'"); + InstructionDesc[OpTypeImage].operands.push(OperandSamplerImageFormat, ""); + InstructionDesc[OpTypeImage].operands.push(OperandAccessQualifier, "", true); + + InstructionDesc[OpTypeSampledImage].operands.push(OperandId, "'Image Type'"); + + InstructionDesc[OpTypeArray].operands.push(OperandId, "'Element Type'"); + InstructionDesc[OpTypeArray].operands.push(OperandId, "'Length'"); + + InstructionDesc[OpTypeRuntimeArray].operands.push(OperandId, "'Element Type'"); + + InstructionDesc[OpTypeStruct].operands.push(OperandVariableIds, "'Member 0 type', +\n'member 1 type', +\n..."); + + InstructionDesc[OpTypeOpaque].operands.push(OperandLiteralString, "The name of the opaque type."); + + InstructionDesc[OpTypePointer].operands.push(OperandStorage, ""); + InstructionDesc[OpTypePointer].operands.push(OperandId, "'Type'"); + + InstructionDesc[OpTypeForwardPointer].operands.push(OperandId, "'Pointer Type'"); + InstructionDesc[OpTypeForwardPointer].operands.push(OperandStorage, ""); + + InstructionDesc[OpTypePipe].operands.push(OperandAccessQualifier, "'Qualifier'"); + + InstructionDesc[OpTypeFunction].operands.push(OperandId, "'Return Type'"); + InstructionDesc[OpTypeFunction].operands.push(OperandVariableIds, "'Parameter 0 Type', +\n'Parameter 1 Type', +\n..."); + + InstructionDesc[OpConstant].operands.push(OperandVariableLiterals, "'Value'"); + + InstructionDesc[OpConstantComposite].operands.push(OperandVariableIds, "'Constituents'"); + + InstructionDesc[OpConstantSampler].operands.push(OperandSamplerAddressingMode, ""); + InstructionDesc[OpConstantSampler].operands.push(OperandLiteralNumber, "'Param'"); + InstructionDesc[OpConstantSampler].operands.push(OperandSamplerFilterMode, ""); + + InstructionDesc[OpSpecConstant].operands.push(OperandVariableLiterals, "'Value'"); + + InstructionDesc[OpSpecConstantComposite].operands.push(OperandVariableIds, "'Constituents'"); + + InstructionDesc[OpSpecConstantOp].operands.push(OperandLiteralNumber, "'Opcode'"); + InstructionDesc[OpSpecConstantOp].operands.push(OperandVariableIds, "'Operands'"); + + InstructionDesc[OpVariable].operands.push(OperandStorage, ""); + InstructionDesc[OpVariable].operands.push(OperandId, "'Initializer'", true); + + InstructionDesc[OpFunction].operands.push(OperandFunction, ""); + InstructionDesc[OpFunction].operands.push(OperandId, "'Function Type'"); + + InstructionDesc[OpFunctionCall].operands.push(OperandId, "'Function'"); + InstructionDesc[OpFunctionCall].operands.push(OperandVariableIds, "'Argument 0', +\n'Argument 1', +\n..."); + + InstructionDesc[OpExtInst].operands.push(OperandId, "'Set'"); + InstructionDesc[OpExtInst].operands.push(OperandLiteralNumber, "'Instruction'"); + InstructionDesc[OpExtInst].operands.push(OperandVariableIds, "'Operand 1', +\n'Operand 2', +\n..."); + + InstructionDesc[OpLoad].operands.push(OperandId, "'Pointer'"); + InstructionDesc[OpLoad].operands.push(OperandMemoryAccess, "", true); + InstructionDesc[OpLoad].operands.push(OperandLiteralNumber, "", true); + InstructionDesc[OpLoad].operands.push(OperandId, "", true); + + InstructionDesc[OpStore].operands.push(OperandId, "'Pointer'"); + InstructionDesc[OpStore].operands.push(OperandId, "'Object'"); + InstructionDesc[OpStore].operands.push(OperandMemoryAccess, "", true); + InstructionDesc[OpStore].operands.push(OperandLiteralNumber, "", true); + InstructionDesc[OpStore].operands.push(OperandId, "", true); + + InstructionDesc[OpPhi].operands.push(OperandVariableIds, "'Variable, Parent, ...'"); + + InstructionDesc[OpDecorate].operands.push(OperandId, "'Target'"); + InstructionDesc[OpDecorate].operands.push(OperandDecoration, ""); + InstructionDesc[OpDecorate].operands.push(OperandVariableLiterals, "See <>."); + + InstructionDesc[OpDecorateId].operands.push(OperandId, "'Target'"); + InstructionDesc[OpDecorateId].operands.push(OperandDecoration, ""); + InstructionDesc[OpDecorateId].operands.push(OperandVariableIds, "See <>."); + + InstructionDesc[OpDecorateStringGOOGLE].operands.push(OperandId, "'Target'"); + InstructionDesc[OpDecorateStringGOOGLE].operands.push(OperandDecoration, ""); + InstructionDesc[OpDecorateStringGOOGLE].operands.push(OperandLiteralString, "'Literal String'"); + + InstructionDesc[OpMemberDecorate].operands.push(OperandId, "'Structure Type'"); + InstructionDesc[OpMemberDecorate].operands.push(OperandLiteralNumber, "'Member'"); + InstructionDesc[OpMemberDecorate].operands.push(OperandDecoration, ""); + InstructionDesc[OpMemberDecorate].operands.push(OperandVariableLiterals, "See <>."); + + InstructionDesc[OpMemberDecorateStringGOOGLE].operands.push(OperandId, "'Structure Type'"); + InstructionDesc[OpMemberDecorateStringGOOGLE].operands.push(OperandLiteralNumber, "'Member'"); + InstructionDesc[OpMemberDecorateStringGOOGLE].operands.push(OperandDecoration, ""); + InstructionDesc[OpMemberDecorateStringGOOGLE].operands.push(OperandLiteralString, "'Literal String'"); + + InstructionDesc[OpGroupDecorate].operands.push(OperandId, "'Decoration Group'"); + InstructionDesc[OpGroupDecorate].operands.push(OperandVariableIds, "'Targets'"); + + InstructionDesc[OpGroupMemberDecorate].operands.push(OperandId, "'Decoration Group'"); + InstructionDesc[OpGroupMemberDecorate].operands.push(OperandVariableIdLiteral, "'Targets'"); + + InstructionDesc[OpVectorExtractDynamic].operands.push(OperandId, "'Vector'"); + InstructionDesc[OpVectorExtractDynamic].operands.push(OperandId, "'Index'"); + + InstructionDesc[OpVectorInsertDynamic].operands.push(OperandId, "'Vector'"); + InstructionDesc[OpVectorInsertDynamic].operands.push(OperandId, "'Component'"); + InstructionDesc[OpVectorInsertDynamic].operands.push(OperandId, "'Index'"); + + InstructionDesc[OpVectorShuffle].operands.push(OperandId, "'Vector 1'"); + InstructionDesc[OpVectorShuffle].operands.push(OperandId, "'Vector 2'"); + InstructionDesc[OpVectorShuffle].operands.push(OperandVariableLiterals, "'Components'"); + + InstructionDesc[OpCompositeConstruct].operands.push(OperandVariableIds, "'Constituents'"); + + InstructionDesc[OpCompositeExtract].operands.push(OperandId, "'Composite'"); + InstructionDesc[OpCompositeExtract].operands.push(OperandVariableLiterals, "'Indexes'"); + + InstructionDesc[OpCompositeInsert].operands.push(OperandId, "'Object'"); + InstructionDesc[OpCompositeInsert].operands.push(OperandId, "'Composite'"); + InstructionDesc[OpCompositeInsert].operands.push(OperandVariableLiterals, "'Indexes'"); + + InstructionDesc[OpCopyObject].operands.push(OperandId, "'Operand'"); + + InstructionDesc[OpCopyMemory].operands.push(OperandId, "'Target'"); + InstructionDesc[OpCopyMemory].operands.push(OperandId, "'Source'"); + InstructionDesc[OpCopyMemory].operands.push(OperandMemoryAccess, "", true); + + InstructionDesc[OpCopyMemorySized].operands.push(OperandId, "'Target'"); + InstructionDesc[OpCopyMemorySized].operands.push(OperandId, "'Source'"); + InstructionDesc[OpCopyMemorySized].operands.push(OperandId, "'Size'"); + InstructionDesc[OpCopyMemorySized].operands.push(OperandMemoryAccess, "", true); + + InstructionDesc[OpSampledImage].operands.push(OperandId, "'Image'"); + InstructionDesc[OpSampledImage].operands.push(OperandId, "'Sampler'"); + + InstructionDesc[OpImage].operands.push(OperandId, "'Sampled Image'"); + + InstructionDesc[OpImageRead].operands.push(OperandId, "'Image'"); + InstructionDesc[OpImageRead].operands.push(OperandId, "'Coordinate'"); + InstructionDesc[OpImageRead].operands.push(OperandImageOperands, "", true); + InstructionDesc[OpImageRead].operands.push(OperandVariableIds, "", true); + + InstructionDesc[OpImageWrite].operands.push(OperandId, "'Image'"); + InstructionDesc[OpImageWrite].operands.push(OperandId, "'Coordinate'"); + InstructionDesc[OpImageWrite].operands.push(OperandId, "'Texel'"); + InstructionDesc[OpImageWrite].operands.push(OperandImageOperands, "", true); + InstructionDesc[OpImageWrite].operands.push(OperandVariableIds, "", true); + + InstructionDesc[OpImageSampleImplicitLod].operands.push(OperandId, "'Sampled Image'"); + InstructionDesc[OpImageSampleImplicitLod].operands.push(OperandId, "'Coordinate'"); + InstructionDesc[OpImageSampleImplicitLod].operands.push(OperandImageOperands, "", true); + InstructionDesc[OpImageSampleImplicitLod].operands.push(OperandVariableIds, "", true); + + InstructionDesc[OpImageSampleExplicitLod].operands.push(OperandId, "'Sampled Image'"); + InstructionDesc[OpImageSampleExplicitLod].operands.push(OperandId, "'Coordinate'"); + InstructionDesc[OpImageSampleExplicitLod].operands.push(OperandImageOperands, "", true); + InstructionDesc[OpImageSampleExplicitLod].operands.push(OperandVariableIds, "", true); + + InstructionDesc[OpImageSampleDrefImplicitLod].operands.push(OperandId, "'Sampled Image'"); + InstructionDesc[OpImageSampleDrefImplicitLod].operands.push(OperandId, "'Coordinate'"); + InstructionDesc[OpImageSampleDrefImplicitLod].operands.push(OperandId, "'D~ref~'"); + InstructionDesc[OpImageSampleDrefImplicitLod].operands.push(OperandImageOperands, "", true); + InstructionDesc[OpImageSampleDrefImplicitLod].operands.push(OperandVariableIds, "", true); + + InstructionDesc[OpImageSampleDrefExplicitLod].operands.push(OperandId, "'Sampled Image'"); + InstructionDesc[OpImageSampleDrefExplicitLod].operands.push(OperandId, "'Coordinate'"); + InstructionDesc[OpImageSampleDrefExplicitLod].operands.push(OperandId, "'D~ref~'"); + InstructionDesc[OpImageSampleDrefExplicitLod].operands.push(OperandImageOperands, "", true); + InstructionDesc[OpImageSampleDrefExplicitLod].operands.push(OperandVariableIds, "", true); + + InstructionDesc[OpImageSampleProjImplicitLod].operands.push(OperandId, "'Sampled Image'"); + InstructionDesc[OpImageSampleProjImplicitLod].operands.push(OperandId, "'Coordinate'"); + InstructionDesc[OpImageSampleProjImplicitLod].operands.push(OperandImageOperands, "", true); + InstructionDesc[OpImageSampleProjImplicitLod].operands.push(OperandVariableIds, "", true); + + InstructionDesc[OpImageSampleProjExplicitLod].operands.push(OperandId, "'Sampled Image'"); + InstructionDesc[OpImageSampleProjExplicitLod].operands.push(OperandId, "'Coordinate'"); + InstructionDesc[OpImageSampleProjExplicitLod].operands.push(OperandImageOperands, "", true); + InstructionDesc[OpImageSampleProjExplicitLod].operands.push(OperandVariableIds, "", true); + + InstructionDesc[OpImageSampleProjDrefImplicitLod].operands.push(OperandId, "'Sampled Image'"); + InstructionDesc[OpImageSampleProjDrefImplicitLod].operands.push(OperandId, "'Coordinate'"); + InstructionDesc[OpImageSampleProjDrefImplicitLod].operands.push(OperandId, "'D~ref~'"); + InstructionDesc[OpImageSampleProjDrefImplicitLod].operands.push(OperandImageOperands, "", true); + InstructionDesc[OpImageSampleProjDrefImplicitLod].operands.push(OperandVariableIds, "", true); + + InstructionDesc[OpImageSampleProjDrefExplicitLod].operands.push(OperandId, "'Sampled Image'"); + InstructionDesc[OpImageSampleProjDrefExplicitLod].operands.push(OperandId, "'Coordinate'"); + InstructionDesc[OpImageSampleProjDrefExplicitLod].operands.push(OperandId, "'D~ref~'"); + InstructionDesc[OpImageSampleProjDrefExplicitLod].operands.push(OperandImageOperands, "", true); + InstructionDesc[OpImageSampleProjDrefExplicitLod].operands.push(OperandVariableIds, "", true); + + InstructionDesc[OpImageFetch].operands.push(OperandId, "'Image'"); + InstructionDesc[OpImageFetch].operands.push(OperandId, "'Coordinate'"); + InstructionDesc[OpImageFetch].operands.push(OperandImageOperands, "", true); + InstructionDesc[OpImageFetch].operands.push(OperandVariableIds, "", true); + + InstructionDesc[OpImageGather].operands.push(OperandId, "'Sampled Image'"); + InstructionDesc[OpImageGather].operands.push(OperandId, "'Coordinate'"); + InstructionDesc[OpImageGather].operands.push(OperandId, "'Component'"); + InstructionDesc[OpImageGather].operands.push(OperandImageOperands, "", true); + InstructionDesc[OpImageGather].operands.push(OperandVariableIds, "", true); + + InstructionDesc[OpImageDrefGather].operands.push(OperandId, "'Sampled Image'"); + InstructionDesc[OpImageDrefGather].operands.push(OperandId, "'Coordinate'"); + InstructionDesc[OpImageDrefGather].operands.push(OperandId, "'D~ref~'"); + InstructionDesc[OpImageDrefGather].operands.push(OperandImageOperands, "", true); + InstructionDesc[OpImageDrefGather].operands.push(OperandVariableIds, "", true); + + InstructionDesc[OpImageSparseSampleImplicitLod].operands.push(OperandId, "'Sampled Image'"); + InstructionDesc[OpImageSparseSampleImplicitLod].operands.push(OperandId, "'Coordinate'"); + InstructionDesc[OpImageSparseSampleImplicitLod].operands.push(OperandImageOperands, "", true); + InstructionDesc[OpImageSparseSampleImplicitLod].operands.push(OperandVariableIds, "", true); + + InstructionDesc[OpImageSparseSampleExplicitLod].operands.push(OperandId, "'Sampled Image'"); + InstructionDesc[OpImageSparseSampleExplicitLod].operands.push(OperandId, "'Coordinate'"); + InstructionDesc[OpImageSparseSampleExplicitLod].operands.push(OperandImageOperands, "", true); + InstructionDesc[OpImageSparseSampleExplicitLod].operands.push(OperandVariableIds, "", true); + + InstructionDesc[OpImageSparseSampleDrefImplicitLod].operands.push(OperandId, "'Sampled Image'"); + InstructionDesc[OpImageSparseSampleDrefImplicitLod].operands.push(OperandId, "'Coordinate'"); + InstructionDesc[OpImageSparseSampleDrefImplicitLod].operands.push(OperandId, "'D~ref~'"); + InstructionDesc[OpImageSparseSampleDrefImplicitLod].operands.push(OperandImageOperands, "", true); + InstructionDesc[OpImageSparseSampleDrefImplicitLod].operands.push(OperandVariableIds, "", true); + + InstructionDesc[OpImageSparseSampleDrefExplicitLod].operands.push(OperandId, "'Sampled Image'"); + InstructionDesc[OpImageSparseSampleDrefExplicitLod].operands.push(OperandId, "'Coordinate'"); + InstructionDesc[OpImageSparseSampleDrefExplicitLod].operands.push(OperandId, "'D~ref~'"); + InstructionDesc[OpImageSparseSampleDrefExplicitLod].operands.push(OperandImageOperands, "", true); + InstructionDesc[OpImageSparseSampleDrefExplicitLod].operands.push(OperandVariableIds, "", true); + + InstructionDesc[OpImageSparseSampleProjImplicitLod].operands.push(OperandId, "'Sampled Image'"); + InstructionDesc[OpImageSparseSampleProjImplicitLod].operands.push(OperandId, "'Coordinate'"); + InstructionDesc[OpImageSparseSampleProjImplicitLod].operands.push(OperandImageOperands, "", true); + InstructionDesc[OpImageSparseSampleProjImplicitLod].operands.push(OperandVariableIds, "", true); + + InstructionDesc[OpImageSparseSampleProjExplicitLod].operands.push(OperandId, "'Sampled Image'"); + InstructionDesc[OpImageSparseSampleProjExplicitLod].operands.push(OperandId, "'Coordinate'"); + InstructionDesc[OpImageSparseSampleProjExplicitLod].operands.push(OperandImageOperands, "", true); + InstructionDesc[OpImageSparseSampleProjExplicitLod].operands.push(OperandVariableIds, "", true); + + InstructionDesc[OpImageSparseSampleProjDrefImplicitLod].operands.push(OperandId, "'Sampled Image'"); + InstructionDesc[OpImageSparseSampleProjDrefImplicitLod].operands.push(OperandId, "'Coordinate'"); + InstructionDesc[OpImageSparseSampleProjDrefImplicitLod].operands.push(OperandId, "'D~ref~'"); + InstructionDesc[OpImageSparseSampleProjDrefImplicitLod].operands.push(OperandImageOperands, "", true); + InstructionDesc[OpImageSparseSampleProjDrefImplicitLod].operands.push(OperandVariableIds, "", true); + + InstructionDesc[OpImageSparseSampleProjDrefExplicitLod].operands.push(OperandId, "'Sampled Image'"); + InstructionDesc[OpImageSparseSampleProjDrefExplicitLod].operands.push(OperandId, "'Coordinate'"); + InstructionDesc[OpImageSparseSampleProjDrefExplicitLod].operands.push(OperandId, "'D~ref~'"); + InstructionDesc[OpImageSparseSampleProjDrefExplicitLod].operands.push(OperandImageOperands, "", true); + InstructionDesc[OpImageSparseSampleProjDrefExplicitLod].operands.push(OperandVariableIds, "", true); + + InstructionDesc[OpImageSparseFetch].operands.push(OperandId, "'Image'"); + InstructionDesc[OpImageSparseFetch].operands.push(OperandId, "'Coordinate'"); + InstructionDesc[OpImageSparseFetch].operands.push(OperandImageOperands, "", true); + InstructionDesc[OpImageSparseFetch].operands.push(OperandVariableIds, "", true); + + InstructionDesc[OpImageSparseGather].operands.push(OperandId, "'Sampled Image'"); + InstructionDesc[OpImageSparseGather].operands.push(OperandId, "'Coordinate'"); + InstructionDesc[OpImageSparseGather].operands.push(OperandId, "'Component'"); + InstructionDesc[OpImageSparseGather].operands.push(OperandImageOperands, "", true); + InstructionDesc[OpImageSparseGather].operands.push(OperandVariableIds, "", true); + + InstructionDesc[OpImageSparseDrefGather].operands.push(OperandId, "'Sampled Image'"); + InstructionDesc[OpImageSparseDrefGather].operands.push(OperandId, "'Coordinate'"); + InstructionDesc[OpImageSparseDrefGather].operands.push(OperandId, "'D~ref~'"); + InstructionDesc[OpImageSparseDrefGather].operands.push(OperandImageOperands, "", true); + InstructionDesc[OpImageSparseDrefGather].operands.push(OperandVariableIds, "", true); + + InstructionDesc[OpImageSparseRead].operands.push(OperandId, "'Image'"); + InstructionDesc[OpImageSparseRead].operands.push(OperandId, "'Coordinate'"); + InstructionDesc[OpImageSparseRead].operands.push(OperandImageOperands, "", true); + InstructionDesc[OpImageSparseRead].operands.push(OperandVariableIds, "", true); + + InstructionDesc[OpImageSparseTexelsResident].operands.push(OperandId, "'Resident Code'"); + + InstructionDesc[OpImageQuerySizeLod].operands.push(OperandId, "'Image'"); + InstructionDesc[OpImageQuerySizeLod].operands.push(OperandId, "'Level of Detail'"); + + InstructionDesc[OpImageQuerySize].operands.push(OperandId, "'Image'"); + + InstructionDesc[OpImageQueryLod].operands.push(OperandId, "'Image'"); + InstructionDesc[OpImageQueryLod].operands.push(OperandId, "'Coordinate'"); + + InstructionDesc[OpImageQueryLevels].operands.push(OperandId, "'Image'"); + + InstructionDesc[OpImageQuerySamples].operands.push(OperandId, "'Image'"); + + InstructionDesc[OpImageQueryFormat].operands.push(OperandId, "'Image'"); + + InstructionDesc[OpImageQueryOrder].operands.push(OperandId, "'Image'"); + + InstructionDesc[OpAccessChain].operands.push(OperandId, "'Base'"); + InstructionDesc[OpAccessChain].operands.push(OperandVariableIds, "'Indexes'"); + + InstructionDesc[OpInBoundsAccessChain].operands.push(OperandId, "'Base'"); + InstructionDesc[OpInBoundsAccessChain].operands.push(OperandVariableIds, "'Indexes'"); + + InstructionDesc[OpPtrAccessChain].operands.push(OperandId, "'Base'"); + InstructionDesc[OpPtrAccessChain].operands.push(OperandId, "'Element'"); + InstructionDesc[OpPtrAccessChain].operands.push(OperandVariableIds, "'Indexes'"); + + InstructionDesc[OpInBoundsPtrAccessChain].operands.push(OperandId, "'Base'"); + InstructionDesc[OpInBoundsPtrAccessChain].operands.push(OperandId, "'Element'"); + InstructionDesc[OpInBoundsPtrAccessChain].operands.push(OperandVariableIds, "'Indexes'"); + + InstructionDesc[OpSNegate].operands.push(OperandId, "'Operand'"); + + InstructionDesc[OpFNegate].operands.push(OperandId, "'Operand'"); + + InstructionDesc[OpNot].operands.push(OperandId, "'Operand'"); + + InstructionDesc[OpAny].operands.push(OperandId, "'Vector'"); + + InstructionDesc[OpAll].operands.push(OperandId, "'Vector'"); + + InstructionDesc[OpConvertFToU].operands.push(OperandId, "'Float Value'"); + + InstructionDesc[OpConvertFToS].operands.push(OperandId, "'Float Value'"); + + InstructionDesc[OpConvertSToF].operands.push(OperandId, "'Signed Value'"); + + InstructionDesc[OpConvertUToF].operands.push(OperandId, "'Unsigned Value'"); + + InstructionDesc[OpUConvert].operands.push(OperandId, "'Unsigned Value'"); + + InstructionDesc[OpSConvert].operands.push(OperandId, "'Signed Value'"); + + InstructionDesc[OpFConvert].operands.push(OperandId, "'Float Value'"); + + InstructionDesc[OpSatConvertSToU].operands.push(OperandId, "'Signed Value'"); + + InstructionDesc[OpSatConvertUToS].operands.push(OperandId, "'Unsigned Value'"); + + InstructionDesc[OpConvertPtrToU].operands.push(OperandId, "'Pointer'"); + + InstructionDesc[OpConvertUToPtr].operands.push(OperandId, "'Integer Value'"); + + InstructionDesc[OpPtrCastToGeneric].operands.push(OperandId, "'Pointer'"); + + InstructionDesc[OpGenericCastToPtr].operands.push(OperandId, "'Pointer'"); + + InstructionDesc[OpGenericCastToPtrExplicit].operands.push(OperandId, "'Pointer'"); + InstructionDesc[OpGenericCastToPtrExplicit].operands.push(OperandStorage, "'Storage'"); + + InstructionDesc[OpGenericPtrMemSemantics].operands.push(OperandId, "'Pointer'"); + + InstructionDesc[OpBitcast].operands.push(OperandId, "'Operand'"); + + InstructionDesc[OpQuantizeToF16].operands.push(OperandId, "'Value'"); + + InstructionDesc[OpTranspose].operands.push(OperandId, "'Matrix'"); + + InstructionDesc[OpCopyLogical].operands.push(OperandId, "'Operand'"); + + InstructionDesc[OpIsNan].operands.push(OperandId, "'x'"); + + InstructionDesc[OpIsInf].operands.push(OperandId, "'x'"); + + InstructionDesc[OpIsFinite].operands.push(OperandId, "'x'"); + + InstructionDesc[OpIsNormal].operands.push(OperandId, "'x'"); + + InstructionDesc[OpSignBitSet].operands.push(OperandId, "'x'"); + + InstructionDesc[OpLessOrGreater].operands.push(OperandId, "'x'"); + InstructionDesc[OpLessOrGreater].operands.push(OperandId, "'y'"); + + InstructionDesc[OpOrdered].operands.push(OperandId, "'x'"); + InstructionDesc[OpOrdered].operands.push(OperandId, "'y'"); + + InstructionDesc[OpUnordered].operands.push(OperandId, "'x'"); + InstructionDesc[OpUnordered].operands.push(OperandId, "'y'"); + + InstructionDesc[OpArrayLength].operands.push(OperandId, "'Structure'"); + InstructionDesc[OpArrayLength].operands.push(OperandLiteralNumber, "'Array member'"); + + InstructionDesc[OpIAdd].operands.push(OperandId, "'Operand 1'"); + InstructionDesc[OpIAdd].operands.push(OperandId, "'Operand 2'"); + + InstructionDesc[OpFAdd].operands.push(OperandId, "'Operand 1'"); + InstructionDesc[OpFAdd].operands.push(OperandId, "'Operand 2'"); + + InstructionDesc[OpISub].operands.push(OperandId, "'Operand 1'"); + InstructionDesc[OpISub].operands.push(OperandId, "'Operand 2'"); + + InstructionDesc[OpFSub].operands.push(OperandId, "'Operand 1'"); + InstructionDesc[OpFSub].operands.push(OperandId, "'Operand 2'"); + + InstructionDesc[OpIMul].operands.push(OperandId, "'Operand 1'"); + InstructionDesc[OpIMul].operands.push(OperandId, "'Operand 2'"); + + InstructionDesc[OpFMul].operands.push(OperandId, "'Operand 1'"); + InstructionDesc[OpFMul].operands.push(OperandId, "'Operand 2'"); + + InstructionDesc[OpUDiv].operands.push(OperandId, "'Operand 1'"); + InstructionDesc[OpUDiv].operands.push(OperandId, "'Operand 2'"); + + InstructionDesc[OpSDiv].operands.push(OperandId, "'Operand 1'"); + InstructionDesc[OpSDiv].operands.push(OperandId, "'Operand 2'"); + + InstructionDesc[OpFDiv].operands.push(OperandId, "'Operand 1'"); + InstructionDesc[OpFDiv].operands.push(OperandId, "'Operand 2'"); + + InstructionDesc[OpUMod].operands.push(OperandId, "'Operand 1'"); + InstructionDesc[OpUMod].operands.push(OperandId, "'Operand 2'"); + + InstructionDesc[OpSRem].operands.push(OperandId, "'Operand 1'"); + InstructionDesc[OpSRem].operands.push(OperandId, "'Operand 2'"); + + InstructionDesc[OpSMod].operands.push(OperandId, "'Operand 1'"); + InstructionDesc[OpSMod].operands.push(OperandId, "'Operand 2'"); + + InstructionDesc[OpFRem].operands.push(OperandId, "'Operand 1'"); + InstructionDesc[OpFRem].operands.push(OperandId, "'Operand 2'"); + + InstructionDesc[OpFMod].operands.push(OperandId, "'Operand 1'"); + InstructionDesc[OpFMod].operands.push(OperandId, "'Operand 2'"); + + InstructionDesc[OpVectorTimesScalar].operands.push(OperandId, "'Vector'"); + InstructionDesc[OpVectorTimesScalar].operands.push(OperandId, "'Scalar'"); + + InstructionDesc[OpMatrixTimesScalar].operands.push(OperandId, "'Matrix'"); + InstructionDesc[OpMatrixTimesScalar].operands.push(OperandId, "'Scalar'"); + + InstructionDesc[OpVectorTimesMatrix].operands.push(OperandId, "'Vector'"); + InstructionDesc[OpVectorTimesMatrix].operands.push(OperandId, "'Matrix'"); + + InstructionDesc[OpMatrixTimesVector].operands.push(OperandId, "'Matrix'"); + InstructionDesc[OpMatrixTimesVector].operands.push(OperandId, "'Vector'"); + + InstructionDesc[OpMatrixTimesMatrix].operands.push(OperandId, "'LeftMatrix'"); + InstructionDesc[OpMatrixTimesMatrix].operands.push(OperandId, "'RightMatrix'"); + + InstructionDesc[OpOuterProduct].operands.push(OperandId, "'Vector 1'"); + InstructionDesc[OpOuterProduct].operands.push(OperandId, "'Vector 2'"); + + InstructionDesc[OpDot].operands.push(OperandId, "'Vector 1'"); + InstructionDesc[OpDot].operands.push(OperandId, "'Vector 2'"); + + InstructionDesc[OpIAddCarry].operands.push(OperandId, "'Operand 1'"); + InstructionDesc[OpIAddCarry].operands.push(OperandId, "'Operand 2'"); + + InstructionDesc[OpISubBorrow].operands.push(OperandId, "'Operand 1'"); + InstructionDesc[OpISubBorrow].operands.push(OperandId, "'Operand 2'"); + + InstructionDesc[OpUMulExtended].operands.push(OperandId, "'Operand 1'"); + InstructionDesc[OpUMulExtended].operands.push(OperandId, "'Operand 2'"); + + InstructionDesc[OpSMulExtended].operands.push(OperandId, "'Operand 1'"); + InstructionDesc[OpSMulExtended].operands.push(OperandId, "'Operand 2'"); + + InstructionDesc[OpShiftRightLogical].operands.push(OperandId, "'Base'"); + InstructionDesc[OpShiftRightLogical].operands.push(OperandId, "'Shift'"); + + InstructionDesc[OpShiftRightArithmetic].operands.push(OperandId, "'Base'"); + InstructionDesc[OpShiftRightArithmetic].operands.push(OperandId, "'Shift'"); + + InstructionDesc[OpShiftLeftLogical].operands.push(OperandId, "'Base'"); + InstructionDesc[OpShiftLeftLogical].operands.push(OperandId, "'Shift'"); + + InstructionDesc[OpLogicalOr].operands.push(OperandId, "'Operand 1'"); + InstructionDesc[OpLogicalOr].operands.push(OperandId, "'Operand 2'"); + + InstructionDesc[OpLogicalAnd].operands.push(OperandId, "'Operand 1'"); + InstructionDesc[OpLogicalAnd].operands.push(OperandId, "'Operand 2'"); + + InstructionDesc[OpLogicalEqual].operands.push(OperandId, "'Operand 1'"); + InstructionDesc[OpLogicalEqual].operands.push(OperandId, "'Operand 2'"); + + InstructionDesc[OpLogicalNotEqual].operands.push(OperandId, "'Operand 1'"); + InstructionDesc[OpLogicalNotEqual].operands.push(OperandId, "'Operand 2'"); + + InstructionDesc[OpLogicalNot].operands.push(OperandId, "'Operand'"); + + InstructionDesc[OpBitwiseOr].operands.push(OperandId, "'Operand 1'"); + InstructionDesc[OpBitwiseOr].operands.push(OperandId, "'Operand 2'"); + + InstructionDesc[OpBitwiseXor].operands.push(OperandId, "'Operand 1'"); + InstructionDesc[OpBitwiseXor].operands.push(OperandId, "'Operand 2'"); + + InstructionDesc[OpBitwiseAnd].operands.push(OperandId, "'Operand 1'"); + InstructionDesc[OpBitwiseAnd].operands.push(OperandId, "'Operand 2'"); + + InstructionDesc[OpBitFieldInsert].operands.push(OperandId, "'Base'"); + InstructionDesc[OpBitFieldInsert].operands.push(OperandId, "'Insert'"); + InstructionDesc[OpBitFieldInsert].operands.push(OperandId, "'Offset'"); + InstructionDesc[OpBitFieldInsert].operands.push(OperandId, "'Count'"); + + InstructionDesc[OpBitFieldSExtract].operands.push(OperandId, "'Base'"); + InstructionDesc[OpBitFieldSExtract].operands.push(OperandId, "'Offset'"); + InstructionDesc[OpBitFieldSExtract].operands.push(OperandId, "'Count'"); + + InstructionDesc[OpBitFieldUExtract].operands.push(OperandId, "'Base'"); + InstructionDesc[OpBitFieldUExtract].operands.push(OperandId, "'Offset'"); + InstructionDesc[OpBitFieldUExtract].operands.push(OperandId, "'Count'"); + + InstructionDesc[OpBitReverse].operands.push(OperandId, "'Base'"); + + InstructionDesc[OpBitCount].operands.push(OperandId, "'Base'"); + + InstructionDesc[OpSelect].operands.push(OperandId, "'Condition'"); + InstructionDesc[OpSelect].operands.push(OperandId, "'Object 1'"); + InstructionDesc[OpSelect].operands.push(OperandId, "'Object 2'"); + + InstructionDesc[OpIEqual].operands.push(OperandId, "'Operand 1'"); + InstructionDesc[OpIEqual].operands.push(OperandId, "'Operand 2'"); + + InstructionDesc[OpFOrdEqual].operands.push(OperandId, "'Operand 1'"); + InstructionDesc[OpFOrdEqual].operands.push(OperandId, "'Operand 2'"); + + InstructionDesc[OpFUnordEqual].operands.push(OperandId, "'Operand 1'"); + InstructionDesc[OpFUnordEqual].operands.push(OperandId, "'Operand 2'"); + + InstructionDesc[OpINotEqual].operands.push(OperandId, "'Operand 1'"); + InstructionDesc[OpINotEqual].operands.push(OperandId, "'Operand 2'"); + + InstructionDesc[OpFOrdNotEqual].operands.push(OperandId, "'Operand 1'"); + InstructionDesc[OpFOrdNotEqual].operands.push(OperandId, "'Operand 2'"); + + InstructionDesc[OpFUnordNotEqual].operands.push(OperandId, "'Operand 1'"); + InstructionDesc[OpFUnordNotEqual].operands.push(OperandId, "'Operand 2'"); + + InstructionDesc[OpULessThan].operands.push(OperandId, "'Operand 1'"); + InstructionDesc[OpULessThan].operands.push(OperandId, "'Operand 2'"); + + InstructionDesc[OpSLessThan].operands.push(OperandId, "'Operand 1'"); + InstructionDesc[OpSLessThan].operands.push(OperandId, "'Operand 2'"); + + InstructionDesc[OpFOrdLessThan].operands.push(OperandId, "'Operand 1'"); + InstructionDesc[OpFOrdLessThan].operands.push(OperandId, "'Operand 2'"); + + InstructionDesc[OpFUnordLessThan].operands.push(OperandId, "'Operand 1'"); + InstructionDesc[OpFUnordLessThan].operands.push(OperandId, "'Operand 2'"); + + InstructionDesc[OpUGreaterThan].operands.push(OperandId, "'Operand 1'"); + InstructionDesc[OpUGreaterThan].operands.push(OperandId, "'Operand 2'"); + + InstructionDesc[OpSGreaterThan].operands.push(OperandId, "'Operand 1'"); + InstructionDesc[OpSGreaterThan].operands.push(OperandId, "'Operand 2'"); + + InstructionDesc[OpFOrdGreaterThan].operands.push(OperandId, "'Operand 1'"); + InstructionDesc[OpFOrdGreaterThan].operands.push(OperandId, "'Operand 2'"); + + InstructionDesc[OpFUnordGreaterThan].operands.push(OperandId, "'Operand 1'"); + InstructionDesc[OpFUnordGreaterThan].operands.push(OperandId, "'Operand 2'"); + + InstructionDesc[OpULessThanEqual].operands.push(OperandId, "'Operand 1'"); + InstructionDesc[OpULessThanEqual].operands.push(OperandId, "'Operand 2'"); + + InstructionDesc[OpSLessThanEqual].operands.push(OperandId, "'Operand 1'"); + InstructionDesc[OpSLessThanEqual].operands.push(OperandId, "'Operand 2'"); + + InstructionDesc[OpFOrdLessThanEqual].operands.push(OperandId, "'Operand 1'"); + InstructionDesc[OpFOrdLessThanEqual].operands.push(OperandId, "'Operand 2'"); + + InstructionDesc[OpFUnordLessThanEqual].operands.push(OperandId, "'Operand 1'"); + InstructionDesc[OpFUnordLessThanEqual].operands.push(OperandId, "'Operand 2'"); + + InstructionDesc[OpUGreaterThanEqual].operands.push(OperandId, "'Operand 1'"); + InstructionDesc[OpUGreaterThanEqual].operands.push(OperandId, "'Operand 2'"); + + InstructionDesc[OpSGreaterThanEqual].operands.push(OperandId, "'Operand 1'"); + InstructionDesc[OpSGreaterThanEqual].operands.push(OperandId, "'Operand 2'"); + + InstructionDesc[OpFOrdGreaterThanEqual].operands.push(OperandId, "'Operand 1'"); + InstructionDesc[OpFOrdGreaterThanEqual].operands.push(OperandId, "'Operand 2'"); + + InstructionDesc[OpFUnordGreaterThanEqual].operands.push(OperandId, "'Operand 1'"); + InstructionDesc[OpFUnordGreaterThanEqual].operands.push(OperandId, "'Operand 2'"); + + InstructionDesc[OpDPdx].operands.push(OperandId, "'P'"); + + InstructionDesc[OpDPdy].operands.push(OperandId, "'P'"); + + InstructionDesc[OpFwidth].operands.push(OperandId, "'P'"); + + InstructionDesc[OpDPdxFine].operands.push(OperandId, "'P'"); + + InstructionDesc[OpDPdyFine].operands.push(OperandId, "'P'"); + + InstructionDesc[OpFwidthFine].operands.push(OperandId, "'P'"); + + InstructionDesc[OpDPdxCoarse].operands.push(OperandId, "'P'"); + + InstructionDesc[OpDPdyCoarse].operands.push(OperandId, "'P'"); + + InstructionDesc[OpFwidthCoarse].operands.push(OperandId, "'P'"); + + InstructionDesc[OpEmitStreamVertex].operands.push(OperandId, "'Stream'"); + + InstructionDesc[OpEndStreamPrimitive].operands.push(OperandId, "'Stream'"); + + InstructionDesc[OpControlBarrier].operands.push(OperandScope, "'Execution'"); + InstructionDesc[OpControlBarrier].operands.push(OperandScope, "'Memory'"); + InstructionDesc[OpControlBarrier].operands.push(OperandMemorySemantics, "'Semantics'"); + + InstructionDesc[OpMemoryBarrier].operands.push(OperandScope, "'Memory'"); + InstructionDesc[OpMemoryBarrier].operands.push(OperandMemorySemantics, "'Semantics'"); + + InstructionDesc[OpImageTexelPointer].operands.push(OperandId, "'Image'"); + InstructionDesc[OpImageTexelPointer].operands.push(OperandId, "'Coordinate'"); + InstructionDesc[OpImageTexelPointer].operands.push(OperandId, "'Sample'"); + + InstructionDesc[OpAtomicLoad].operands.push(OperandId, "'Pointer'"); + InstructionDesc[OpAtomicLoad].operands.push(OperandScope, "'Scope'"); + InstructionDesc[OpAtomicLoad].operands.push(OperandMemorySemantics, "'Semantics'"); + + InstructionDesc[OpAtomicStore].operands.push(OperandId, "'Pointer'"); + InstructionDesc[OpAtomicStore].operands.push(OperandScope, "'Scope'"); + InstructionDesc[OpAtomicStore].operands.push(OperandMemorySemantics, "'Semantics'"); + InstructionDesc[OpAtomicStore].operands.push(OperandId, "'Value'"); + + InstructionDesc[OpAtomicExchange].operands.push(OperandId, "'Pointer'"); + InstructionDesc[OpAtomicExchange].operands.push(OperandScope, "'Scope'"); + InstructionDesc[OpAtomicExchange].operands.push(OperandMemorySemantics, "'Semantics'"); + InstructionDesc[OpAtomicExchange].operands.push(OperandId, "'Value'"); + + InstructionDesc[OpAtomicCompareExchange].operands.push(OperandId, "'Pointer'"); + InstructionDesc[OpAtomicCompareExchange].operands.push(OperandScope, "'Scope'"); + InstructionDesc[OpAtomicCompareExchange].operands.push(OperandMemorySemantics, "'Equal'"); + InstructionDesc[OpAtomicCompareExchange].operands.push(OperandMemorySemantics, "'Unequal'"); + InstructionDesc[OpAtomicCompareExchange].operands.push(OperandId, "'Value'"); + InstructionDesc[OpAtomicCompareExchange].operands.push(OperandId, "'Comparator'"); + + InstructionDesc[OpAtomicCompareExchangeWeak].operands.push(OperandId, "'Pointer'"); + InstructionDesc[OpAtomicCompareExchangeWeak].operands.push(OperandScope, "'Scope'"); + InstructionDesc[OpAtomicCompareExchangeWeak].operands.push(OperandMemorySemantics, "'Equal'"); + InstructionDesc[OpAtomicCompareExchangeWeak].operands.push(OperandMemorySemantics, "'Unequal'"); + InstructionDesc[OpAtomicCompareExchangeWeak].operands.push(OperandId, "'Value'"); + InstructionDesc[OpAtomicCompareExchangeWeak].operands.push(OperandId, "'Comparator'"); + + InstructionDesc[OpAtomicIIncrement].operands.push(OperandId, "'Pointer'"); + InstructionDesc[OpAtomicIIncrement].operands.push(OperandScope, "'Scope'"); + InstructionDesc[OpAtomicIIncrement].operands.push(OperandMemorySemantics, "'Semantics'"); + + InstructionDesc[OpAtomicIDecrement].operands.push(OperandId, "'Pointer'"); + InstructionDesc[OpAtomicIDecrement].operands.push(OperandScope, "'Scope'"); + InstructionDesc[OpAtomicIDecrement].operands.push(OperandMemorySemantics, "'Semantics'"); + + InstructionDesc[OpAtomicIAdd].operands.push(OperandId, "'Pointer'"); + InstructionDesc[OpAtomicIAdd].operands.push(OperandScope, "'Scope'"); + InstructionDesc[OpAtomicIAdd].operands.push(OperandMemorySemantics, "'Semantics'"); + InstructionDesc[OpAtomicIAdd].operands.push(OperandId, "'Value'"); + + InstructionDesc[OpAtomicISub].operands.push(OperandId, "'Pointer'"); + InstructionDesc[OpAtomicISub].operands.push(OperandScope, "'Scope'"); + InstructionDesc[OpAtomicISub].operands.push(OperandMemorySemantics, "'Semantics'"); + InstructionDesc[OpAtomicISub].operands.push(OperandId, "'Value'"); + + InstructionDesc[OpAtomicUMin].operands.push(OperandId, "'Pointer'"); + InstructionDesc[OpAtomicUMin].operands.push(OperandScope, "'Scope'"); + InstructionDesc[OpAtomicUMin].operands.push(OperandMemorySemantics, "'Semantics'"); + InstructionDesc[OpAtomicUMin].operands.push(OperandId, "'Value'"); + + InstructionDesc[OpAtomicUMax].operands.push(OperandId, "'Pointer'"); + InstructionDesc[OpAtomicUMax].operands.push(OperandScope, "'Scope'"); + InstructionDesc[OpAtomicUMax].operands.push(OperandMemorySemantics, "'Semantics'"); + InstructionDesc[OpAtomicUMax].operands.push(OperandId, "'Value'"); + + InstructionDesc[OpAtomicSMin].operands.push(OperandId, "'Pointer'"); + InstructionDesc[OpAtomicSMin].operands.push(OperandScope, "'Scope'"); + InstructionDesc[OpAtomicSMin].operands.push(OperandMemorySemantics, "'Semantics'"); + InstructionDesc[OpAtomicSMin].operands.push(OperandId, "'Value'"); + + InstructionDesc[OpAtomicSMax].operands.push(OperandId, "'Pointer'"); + InstructionDesc[OpAtomicSMax].operands.push(OperandScope, "'Scope'"); + InstructionDesc[OpAtomicSMax].operands.push(OperandMemorySemantics, "'Semantics'"); + InstructionDesc[OpAtomicSMax].operands.push(OperandId, "'Value'"); + + InstructionDesc[OpAtomicAnd].operands.push(OperandId, "'Pointer'"); + InstructionDesc[OpAtomicAnd].operands.push(OperandScope, "'Scope'"); + InstructionDesc[OpAtomicAnd].operands.push(OperandMemorySemantics, "'Semantics'"); + InstructionDesc[OpAtomicAnd].operands.push(OperandId, "'Value'"); + + InstructionDesc[OpAtomicOr].operands.push(OperandId, "'Pointer'"); + InstructionDesc[OpAtomicOr].operands.push(OperandScope, "'Scope'"); + InstructionDesc[OpAtomicOr].operands.push(OperandMemorySemantics, "'Semantics'"); + InstructionDesc[OpAtomicOr].operands.push(OperandId, "'Value'"); + + InstructionDesc[OpAtomicXor].operands.push(OperandId, "'Pointer'"); + InstructionDesc[OpAtomicXor].operands.push(OperandScope, "'Scope'"); + InstructionDesc[OpAtomicXor].operands.push(OperandMemorySemantics, "'Semantics'"); + InstructionDesc[OpAtomicXor].operands.push(OperandId, "'Value'"); + + InstructionDesc[OpAtomicFlagTestAndSet].operands.push(OperandId, "'Pointer'"); + InstructionDesc[OpAtomicFlagTestAndSet].operands.push(OperandScope, "'Scope'"); + InstructionDesc[OpAtomicFlagTestAndSet].operands.push(OperandMemorySemantics, "'Semantics'"); + + InstructionDesc[OpAtomicFlagClear].operands.push(OperandId, "'Pointer'"); + InstructionDesc[OpAtomicFlagClear].operands.push(OperandScope, "'Scope'"); + InstructionDesc[OpAtomicFlagClear].operands.push(OperandMemorySemantics, "'Semantics'"); + + InstructionDesc[OpLoopMerge].operands.push(OperandId, "'Merge Block'"); + InstructionDesc[OpLoopMerge].operands.push(OperandId, "'Continue Target'"); + InstructionDesc[OpLoopMerge].operands.push(OperandLoop, ""); + InstructionDesc[OpLoopMerge].operands.push(OperandOptionalLiteral, ""); + + InstructionDesc[OpSelectionMerge].operands.push(OperandId, "'Merge Block'"); + InstructionDesc[OpSelectionMerge].operands.push(OperandSelect, ""); + + InstructionDesc[OpBranch].operands.push(OperandId, "'Target Label'"); + + InstructionDesc[OpBranchConditional].operands.push(OperandId, "'Condition'"); + InstructionDesc[OpBranchConditional].operands.push(OperandId, "'True Label'"); + InstructionDesc[OpBranchConditional].operands.push(OperandId, "'False Label'"); + InstructionDesc[OpBranchConditional].operands.push(OperandVariableLiterals, "'Branch weights'"); + + InstructionDesc[OpSwitch].operands.push(OperandId, "'Selector'"); + InstructionDesc[OpSwitch].operands.push(OperandId, "'Default'"); + InstructionDesc[OpSwitch].operands.push(OperandVariableLiteralId, "'Target'"); + + + InstructionDesc[OpReturnValue].operands.push(OperandId, "'Value'"); + + InstructionDesc[OpLifetimeStart].operands.push(OperandId, "'Pointer'"); + InstructionDesc[OpLifetimeStart].operands.push(OperandLiteralNumber, "'Size'"); + + InstructionDesc[OpLifetimeStop].operands.push(OperandId, "'Pointer'"); + InstructionDesc[OpLifetimeStop].operands.push(OperandLiteralNumber, "'Size'"); + + InstructionDesc[OpGroupAsyncCopy].operands.push(OperandScope, "'Execution'"); + InstructionDesc[OpGroupAsyncCopy].operands.push(OperandId, "'Destination'"); + InstructionDesc[OpGroupAsyncCopy].operands.push(OperandId, "'Source'"); + InstructionDesc[OpGroupAsyncCopy].operands.push(OperandId, "'Num Elements'"); + InstructionDesc[OpGroupAsyncCopy].operands.push(OperandId, "'Stride'"); + InstructionDesc[OpGroupAsyncCopy].operands.push(OperandId, "'Event'"); + + InstructionDesc[OpGroupWaitEvents].operands.push(OperandScope, "'Execution'"); + InstructionDesc[OpGroupWaitEvents].operands.push(OperandId, "'Num Events'"); + InstructionDesc[OpGroupWaitEvents].operands.push(OperandId, "'Events List'"); + + InstructionDesc[OpGroupAll].operands.push(OperandScope, "'Execution'"); + InstructionDesc[OpGroupAll].operands.push(OperandId, "'Predicate'"); + + InstructionDesc[OpGroupAny].operands.push(OperandScope, "'Execution'"); + InstructionDesc[OpGroupAny].operands.push(OperandId, "'Predicate'"); + + InstructionDesc[OpGroupBroadcast].operands.push(OperandScope, "'Execution'"); + InstructionDesc[OpGroupBroadcast].operands.push(OperandId, "'Value'"); + InstructionDesc[OpGroupBroadcast].operands.push(OperandId, "'LocalId'"); + + InstructionDesc[OpGroupIAdd].operands.push(OperandScope, "'Execution'"); + InstructionDesc[OpGroupIAdd].operands.push(OperandGroupOperation, "'Operation'"); + InstructionDesc[OpGroupIAdd].operands.push(OperandId, "'X'"); + + InstructionDesc[OpGroupFAdd].operands.push(OperandScope, "'Execution'"); + InstructionDesc[OpGroupFAdd].operands.push(OperandGroupOperation, "'Operation'"); + InstructionDesc[OpGroupFAdd].operands.push(OperandId, "'X'"); + + InstructionDesc[OpGroupUMin].operands.push(OperandScope, "'Execution'"); + InstructionDesc[OpGroupUMin].operands.push(OperandGroupOperation, "'Operation'"); + InstructionDesc[OpGroupUMin].operands.push(OperandId, "'X'"); + + InstructionDesc[OpGroupSMin].operands.push(OperandScope, "'Execution'"); + InstructionDesc[OpGroupSMin].operands.push(OperandGroupOperation, "'Operation'"); + InstructionDesc[OpGroupSMin].operands.push(OperandId, "X"); + + InstructionDesc[OpGroupFMin].operands.push(OperandScope, "'Execution'"); + InstructionDesc[OpGroupFMin].operands.push(OperandGroupOperation, "'Operation'"); + InstructionDesc[OpGroupFMin].operands.push(OperandId, "X"); + + InstructionDesc[OpGroupUMax].operands.push(OperandScope, "'Execution'"); + InstructionDesc[OpGroupUMax].operands.push(OperandGroupOperation, "'Operation'"); + InstructionDesc[OpGroupUMax].operands.push(OperandId, "X"); + + InstructionDesc[OpGroupSMax].operands.push(OperandScope, "'Execution'"); + InstructionDesc[OpGroupSMax].operands.push(OperandGroupOperation, "'Operation'"); + InstructionDesc[OpGroupSMax].operands.push(OperandId, "X"); + + InstructionDesc[OpGroupFMax].operands.push(OperandScope, "'Execution'"); + InstructionDesc[OpGroupFMax].operands.push(OperandGroupOperation, "'Operation'"); + InstructionDesc[OpGroupFMax].operands.push(OperandId, "X"); + + InstructionDesc[OpReadPipe].operands.push(OperandId, "'Pipe'"); + InstructionDesc[OpReadPipe].operands.push(OperandId, "'Pointer'"); + InstructionDesc[OpReadPipe].operands.push(OperandId, "'Packet Size'"); + InstructionDesc[OpReadPipe].operands.push(OperandId, "'Packet Alignment'"); + + InstructionDesc[OpWritePipe].operands.push(OperandId, "'Pipe'"); + InstructionDesc[OpWritePipe].operands.push(OperandId, "'Pointer'"); + InstructionDesc[OpWritePipe].operands.push(OperandId, "'Packet Size'"); + InstructionDesc[OpWritePipe].operands.push(OperandId, "'Packet Alignment'"); + + InstructionDesc[OpReservedReadPipe].operands.push(OperandId, "'Pipe'"); + InstructionDesc[OpReservedReadPipe].operands.push(OperandId, "'Reserve Id'"); + InstructionDesc[OpReservedReadPipe].operands.push(OperandId, "'Index'"); + InstructionDesc[OpReservedReadPipe].operands.push(OperandId, "'Pointer'"); + InstructionDesc[OpReservedReadPipe].operands.push(OperandId, "'Packet Size'"); + InstructionDesc[OpReservedReadPipe].operands.push(OperandId, "'Packet Alignment'"); + + InstructionDesc[OpReservedWritePipe].operands.push(OperandId, "'Pipe'"); + InstructionDesc[OpReservedWritePipe].operands.push(OperandId, "'Reserve Id'"); + InstructionDesc[OpReservedWritePipe].operands.push(OperandId, "'Index'"); + InstructionDesc[OpReservedWritePipe].operands.push(OperandId, "'Pointer'"); + InstructionDesc[OpReservedWritePipe].operands.push(OperandId, "'Packet Size'"); + InstructionDesc[OpReservedWritePipe].operands.push(OperandId, "'Packet Alignment'"); + + InstructionDesc[OpReserveReadPipePackets].operands.push(OperandId, "'Pipe'"); + InstructionDesc[OpReserveReadPipePackets].operands.push(OperandId, "'Num Packets'"); + InstructionDesc[OpReserveReadPipePackets].operands.push(OperandId, "'Packet Size'"); + InstructionDesc[OpReserveReadPipePackets].operands.push(OperandId, "'Packet Alignment'"); + + InstructionDesc[OpReserveWritePipePackets].operands.push(OperandId, "'Pipe'"); + InstructionDesc[OpReserveWritePipePackets].operands.push(OperandId, "'Num Packets'"); + InstructionDesc[OpReserveWritePipePackets].operands.push(OperandId, "'Packet Size'"); + InstructionDesc[OpReserveWritePipePackets].operands.push(OperandId, "'Packet Alignment'"); + + InstructionDesc[OpCommitReadPipe].operands.push(OperandId, "'Pipe'"); + InstructionDesc[OpCommitReadPipe].operands.push(OperandId, "'Reserve Id'"); + InstructionDesc[OpCommitReadPipe].operands.push(OperandId, "'Packet Size'"); + InstructionDesc[OpCommitReadPipe].operands.push(OperandId, "'Packet Alignment'"); + + InstructionDesc[OpCommitWritePipe].operands.push(OperandId, "'Pipe'"); + InstructionDesc[OpCommitWritePipe].operands.push(OperandId, "'Reserve Id'"); + InstructionDesc[OpCommitWritePipe].operands.push(OperandId, "'Packet Size'"); + InstructionDesc[OpCommitWritePipe].operands.push(OperandId, "'Packet Alignment'"); + + InstructionDesc[OpIsValidReserveId].operands.push(OperandId, "'Reserve Id'"); + + InstructionDesc[OpGetNumPipePackets].operands.push(OperandId, "'Pipe'"); + InstructionDesc[OpGetNumPipePackets].operands.push(OperandId, "'Packet Size'"); + InstructionDesc[OpGetNumPipePackets].operands.push(OperandId, "'Packet Alignment'"); + + InstructionDesc[OpGetMaxPipePackets].operands.push(OperandId, "'Pipe'"); + InstructionDesc[OpGetMaxPipePackets].operands.push(OperandId, "'Packet Size'"); + InstructionDesc[OpGetMaxPipePackets].operands.push(OperandId, "'Packet Alignment'"); + + InstructionDesc[OpGroupReserveReadPipePackets].operands.push(OperandScope, "'Execution'"); + InstructionDesc[OpGroupReserveReadPipePackets].operands.push(OperandId, "'Pipe'"); + InstructionDesc[OpGroupReserveReadPipePackets].operands.push(OperandId, "'Num Packets'"); + InstructionDesc[OpGroupReserveReadPipePackets].operands.push(OperandId, "'Packet Size'"); + InstructionDesc[OpGroupReserveReadPipePackets].operands.push(OperandId, "'Packet Alignment'"); + + InstructionDesc[OpGroupReserveWritePipePackets].operands.push(OperandScope, "'Execution'"); + InstructionDesc[OpGroupReserveWritePipePackets].operands.push(OperandId, "'Pipe'"); + InstructionDesc[OpGroupReserveWritePipePackets].operands.push(OperandId, "'Num Packets'"); + InstructionDesc[OpGroupReserveWritePipePackets].operands.push(OperandId, "'Packet Size'"); + InstructionDesc[OpGroupReserveWritePipePackets].operands.push(OperandId, "'Packet Alignment'"); + + InstructionDesc[OpGroupCommitReadPipe].operands.push(OperandScope, "'Execution'"); + InstructionDesc[OpGroupCommitReadPipe].operands.push(OperandId, "'Pipe'"); + InstructionDesc[OpGroupCommitReadPipe].operands.push(OperandId, "'Reserve Id'"); + InstructionDesc[OpGroupCommitReadPipe].operands.push(OperandId, "'Packet Size'"); + InstructionDesc[OpGroupCommitReadPipe].operands.push(OperandId, "'Packet Alignment'"); + + InstructionDesc[OpGroupCommitWritePipe].operands.push(OperandScope, "'Execution'"); + InstructionDesc[OpGroupCommitWritePipe].operands.push(OperandId, "'Pipe'"); + InstructionDesc[OpGroupCommitWritePipe].operands.push(OperandId, "'Reserve Id'"); + InstructionDesc[OpGroupCommitWritePipe].operands.push(OperandId, "'Packet Size'"); + InstructionDesc[OpGroupCommitWritePipe].operands.push(OperandId, "'Packet Alignment'"); + + InstructionDesc[OpBuildNDRange].operands.push(OperandId, "'GlobalWorkSize'"); + InstructionDesc[OpBuildNDRange].operands.push(OperandId, "'LocalWorkSize'"); + InstructionDesc[OpBuildNDRange].operands.push(OperandId, "'GlobalWorkOffset'"); + + InstructionDesc[OpCaptureEventProfilingInfo].operands.push(OperandId, "'Event'"); + InstructionDesc[OpCaptureEventProfilingInfo].operands.push(OperandId, "'Profiling Info'"); + InstructionDesc[OpCaptureEventProfilingInfo].operands.push(OperandId, "'Value'"); + + InstructionDesc[OpSetUserEventStatus].operands.push(OperandId, "'Event'"); + InstructionDesc[OpSetUserEventStatus].operands.push(OperandId, "'Status'"); + + InstructionDesc[OpIsValidEvent].operands.push(OperandId, "'Event'"); + + InstructionDesc[OpRetainEvent].operands.push(OperandId, "'Event'"); + + InstructionDesc[OpReleaseEvent].operands.push(OperandId, "'Event'"); + + InstructionDesc[OpGetKernelWorkGroupSize].operands.push(OperandId, "'Invoke'"); + InstructionDesc[OpGetKernelWorkGroupSize].operands.push(OperandId, "'Param'"); + InstructionDesc[OpGetKernelWorkGroupSize].operands.push(OperandId, "'Param Size'"); + InstructionDesc[OpGetKernelWorkGroupSize].operands.push(OperandId, "'Param Align'"); + + InstructionDesc[OpGetKernelPreferredWorkGroupSizeMultiple].operands.push(OperandId, "'Invoke'"); + InstructionDesc[OpGetKernelPreferredWorkGroupSizeMultiple].operands.push(OperandId, "'Param'"); + InstructionDesc[OpGetKernelPreferredWorkGroupSizeMultiple].operands.push(OperandId, "'Param Size'"); + InstructionDesc[OpGetKernelPreferredWorkGroupSizeMultiple].operands.push(OperandId, "'Param Align'"); + + InstructionDesc[OpGetKernelNDrangeSubGroupCount].operands.push(OperandId, "'ND Range'"); + InstructionDesc[OpGetKernelNDrangeSubGroupCount].operands.push(OperandId, "'Invoke'"); + InstructionDesc[OpGetKernelNDrangeSubGroupCount].operands.push(OperandId, "'Param'"); + InstructionDesc[OpGetKernelNDrangeSubGroupCount].operands.push(OperandId, "'Param Size'"); + InstructionDesc[OpGetKernelNDrangeSubGroupCount].operands.push(OperandId, "'Param Align'"); + + InstructionDesc[OpGetKernelNDrangeMaxSubGroupSize].operands.push(OperandId, "'ND Range'"); + InstructionDesc[OpGetKernelNDrangeMaxSubGroupSize].operands.push(OperandId, "'Invoke'"); + InstructionDesc[OpGetKernelNDrangeMaxSubGroupSize].operands.push(OperandId, "'Param'"); + InstructionDesc[OpGetKernelNDrangeMaxSubGroupSize].operands.push(OperandId, "'Param Size'"); + InstructionDesc[OpGetKernelNDrangeMaxSubGroupSize].operands.push(OperandId, "'Param Align'"); + + InstructionDesc[OpEnqueueKernel].operands.push(OperandId, "'Queue'"); + InstructionDesc[OpEnqueueKernel].operands.push(OperandId, "'Flags'"); + InstructionDesc[OpEnqueueKernel].operands.push(OperandId, "'ND Range'"); + InstructionDesc[OpEnqueueKernel].operands.push(OperandId, "'Num Events'"); + InstructionDesc[OpEnqueueKernel].operands.push(OperandId, "'Wait Events'"); + InstructionDesc[OpEnqueueKernel].operands.push(OperandId, "'Ret Event'"); + InstructionDesc[OpEnqueueKernel].operands.push(OperandId, "'Invoke'"); + InstructionDesc[OpEnqueueKernel].operands.push(OperandId, "'Param'"); + InstructionDesc[OpEnqueueKernel].operands.push(OperandId, "'Param Size'"); + InstructionDesc[OpEnqueueKernel].operands.push(OperandId, "'Param Align'"); + InstructionDesc[OpEnqueueKernel].operands.push(OperandVariableIds, "'Local Size'"); + + InstructionDesc[OpEnqueueMarker].operands.push(OperandId, "'Queue'"); + InstructionDesc[OpEnqueueMarker].operands.push(OperandId, "'Num Events'"); + InstructionDesc[OpEnqueueMarker].operands.push(OperandId, "'Wait Events'"); + InstructionDesc[OpEnqueueMarker].operands.push(OperandId, "'Ret Event'"); + + InstructionDesc[OpGroupNonUniformElect].operands.push(OperandScope, "'Execution'"); + + InstructionDesc[OpGroupNonUniformAll].operands.push(OperandScope, "'Execution'"); + InstructionDesc[OpGroupNonUniformAll].operands.push(OperandId, "X"); + + InstructionDesc[OpGroupNonUniformAny].operands.push(OperandScope, "'Execution'"); + InstructionDesc[OpGroupNonUniformAny].operands.push(OperandId, "X"); + + InstructionDesc[OpGroupNonUniformAllEqual].operands.push(OperandScope, "'Execution'"); + InstructionDesc[OpGroupNonUniformAllEqual].operands.push(OperandId, "X"); + + InstructionDesc[OpGroupNonUniformBroadcast].operands.push(OperandScope, "'Execution'"); + InstructionDesc[OpGroupNonUniformBroadcast].operands.push(OperandId, "X"); + InstructionDesc[OpGroupNonUniformBroadcast].operands.push(OperandId, "ID"); + + InstructionDesc[OpGroupNonUniformBroadcastFirst].operands.push(OperandScope, "'Execution'"); + InstructionDesc[OpGroupNonUniformBroadcastFirst].operands.push(OperandId, "X"); + + InstructionDesc[OpGroupNonUniformBallot].operands.push(OperandScope, "'Execution'"); + InstructionDesc[OpGroupNonUniformBallot].operands.push(OperandId, "X"); + + InstructionDesc[OpGroupNonUniformInverseBallot].operands.push(OperandScope, "'Execution'"); + InstructionDesc[OpGroupNonUniformInverseBallot].operands.push(OperandId, "X"); + + InstructionDesc[OpGroupNonUniformBallotBitExtract].operands.push(OperandScope, "'Execution'"); + InstructionDesc[OpGroupNonUniformBallotBitExtract].operands.push(OperandId, "X"); + InstructionDesc[OpGroupNonUniformBallotBitExtract].operands.push(OperandId, "Bit"); + + InstructionDesc[OpGroupNonUniformBallotBitCount].operands.push(OperandScope, "'Execution'"); + InstructionDesc[OpGroupNonUniformBallotBitCount].operands.push(OperandGroupOperation, "'Operation'"); + InstructionDesc[OpGroupNonUniformBallotBitCount].operands.push(OperandId, "X"); + + InstructionDesc[OpGroupNonUniformBallotFindLSB].operands.push(OperandScope, "'Execution'"); + InstructionDesc[OpGroupNonUniformBallotFindLSB].operands.push(OperandId, "X"); + + InstructionDesc[OpGroupNonUniformBallotFindMSB].operands.push(OperandScope, "'Execution'"); + InstructionDesc[OpGroupNonUniformBallotFindMSB].operands.push(OperandId, "X"); + + InstructionDesc[OpGroupNonUniformShuffle].operands.push(OperandScope, "'Execution'"); + InstructionDesc[OpGroupNonUniformShuffle].operands.push(OperandId, "X"); + InstructionDesc[OpGroupNonUniformShuffle].operands.push(OperandId, "'Id'"); + + InstructionDesc[OpGroupNonUniformShuffleXor].operands.push(OperandScope, "'Execution'"); + InstructionDesc[OpGroupNonUniformShuffleXor].operands.push(OperandId, "X"); + InstructionDesc[OpGroupNonUniformShuffleXor].operands.push(OperandId, "Mask"); + + InstructionDesc[OpGroupNonUniformShuffleUp].operands.push(OperandScope, "'Execution'"); + InstructionDesc[OpGroupNonUniformShuffleUp].operands.push(OperandId, "X"); + InstructionDesc[OpGroupNonUniformShuffleUp].operands.push(OperandId, "Offset"); + + InstructionDesc[OpGroupNonUniformShuffleDown].operands.push(OperandScope, "'Execution'"); + InstructionDesc[OpGroupNonUniformShuffleDown].operands.push(OperandId, "X"); + InstructionDesc[OpGroupNonUniformShuffleDown].operands.push(OperandId, "Offset"); + + InstructionDesc[OpGroupNonUniformIAdd].operands.push(OperandScope, "'Execution'"); + InstructionDesc[OpGroupNonUniformIAdd].operands.push(OperandGroupOperation, "'Operation'"); + InstructionDesc[OpGroupNonUniformIAdd].operands.push(OperandId, "X"); + InstructionDesc[OpGroupNonUniformIAdd].operands.push(OperandId, "'ClusterSize'", true); + + InstructionDesc[OpGroupNonUniformFAdd].operands.push(OperandScope, "'Execution'"); + InstructionDesc[OpGroupNonUniformFAdd].operands.push(OperandGroupOperation, "'Operation'"); + InstructionDesc[OpGroupNonUniformFAdd].operands.push(OperandId, "X"); + InstructionDesc[OpGroupNonUniformFAdd].operands.push(OperandId, "'ClusterSize'", true); + + InstructionDesc[OpGroupNonUniformIMul].operands.push(OperandScope, "'Execution'"); + InstructionDesc[OpGroupNonUniformIMul].operands.push(OperandGroupOperation, "'Operation'"); + InstructionDesc[OpGroupNonUniformIMul].operands.push(OperandId, "X"); + InstructionDesc[OpGroupNonUniformIMul].operands.push(OperandId, "'ClusterSize'", true); + + InstructionDesc[OpGroupNonUniformFMul].operands.push(OperandScope, "'Execution'"); + InstructionDesc[OpGroupNonUniformFMul].operands.push(OperandGroupOperation, "'Operation'"); + InstructionDesc[OpGroupNonUniformFMul].operands.push(OperandId, "X"); + InstructionDesc[OpGroupNonUniformFMul].operands.push(OperandId, "'ClusterSize'", true); + + InstructionDesc[OpGroupNonUniformSMin].operands.push(OperandScope, "'Execution'"); + InstructionDesc[OpGroupNonUniformSMin].operands.push(OperandGroupOperation, "'Operation'"); + InstructionDesc[OpGroupNonUniformSMin].operands.push(OperandId, "X"); + InstructionDesc[OpGroupNonUniformSMin].operands.push(OperandId, "'ClusterSize'", true); + + InstructionDesc[OpGroupNonUniformUMin].operands.push(OperandScope, "'Execution'"); + InstructionDesc[OpGroupNonUniformUMin].operands.push(OperandGroupOperation, "'Operation'"); + InstructionDesc[OpGroupNonUniformUMin].operands.push(OperandId, "X"); + InstructionDesc[OpGroupNonUniformUMin].operands.push(OperandId, "'ClusterSize'", true); + + InstructionDesc[OpGroupNonUniformFMin].operands.push(OperandScope, "'Execution'"); + InstructionDesc[OpGroupNonUniformFMin].operands.push(OperandGroupOperation, "'Operation'"); + InstructionDesc[OpGroupNonUniformFMin].operands.push(OperandId, "X"); + InstructionDesc[OpGroupNonUniformFMin].operands.push(OperandId, "'ClusterSize'", true); + + InstructionDesc[OpGroupNonUniformSMax].operands.push(OperandScope, "'Execution'"); + InstructionDesc[OpGroupNonUniformSMax].operands.push(OperandGroupOperation, "'Operation'"); + InstructionDesc[OpGroupNonUniformSMax].operands.push(OperandId, "X"); + InstructionDesc[OpGroupNonUniformSMax].operands.push(OperandId, "'ClusterSize'", true); + + InstructionDesc[OpGroupNonUniformUMax].operands.push(OperandScope, "'Execution'"); + InstructionDesc[OpGroupNonUniformUMax].operands.push(OperandGroupOperation, "'Operation'"); + InstructionDesc[OpGroupNonUniformUMax].operands.push(OperandId, "X"); + InstructionDesc[OpGroupNonUniformUMax].operands.push(OperandId, "'ClusterSize'", true); + + InstructionDesc[OpGroupNonUniformFMax].operands.push(OperandScope, "'Execution'"); + InstructionDesc[OpGroupNonUniformFMax].operands.push(OperandGroupOperation, "'Operation'"); + InstructionDesc[OpGroupNonUniformFMax].operands.push(OperandId, "X"); + InstructionDesc[OpGroupNonUniformFMax].operands.push(OperandId, "'ClusterSize'", true); + + InstructionDesc[OpGroupNonUniformBitwiseAnd].operands.push(OperandScope, "'Execution'"); + InstructionDesc[OpGroupNonUniformBitwiseAnd].operands.push(OperandGroupOperation, "'Operation'"); + InstructionDesc[OpGroupNonUniformBitwiseAnd].operands.push(OperandId, "X"); + InstructionDesc[OpGroupNonUniformBitwiseAnd].operands.push(OperandId, "'ClusterSize'", true); + + InstructionDesc[OpGroupNonUniformBitwiseOr].operands.push(OperandScope, "'Execution'"); + InstructionDesc[OpGroupNonUniformBitwiseOr].operands.push(OperandGroupOperation, "'Operation'"); + InstructionDesc[OpGroupNonUniformBitwiseOr].operands.push(OperandId, "X"); + InstructionDesc[OpGroupNonUniformBitwiseOr].operands.push(OperandId, "'ClusterSize'", true); + + InstructionDesc[OpGroupNonUniformBitwiseXor].operands.push(OperandScope, "'Execution'"); + InstructionDesc[OpGroupNonUniformBitwiseXor].operands.push(OperandGroupOperation, "'Operation'"); + InstructionDesc[OpGroupNonUniformBitwiseXor].operands.push(OperandId, "X"); + InstructionDesc[OpGroupNonUniformBitwiseXor].operands.push(OperandId, "'ClusterSize'", true); + + InstructionDesc[OpGroupNonUniformLogicalAnd].operands.push(OperandScope, "'Execution'"); + InstructionDesc[OpGroupNonUniformLogicalAnd].operands.push(OperandGroupOperation, "'Operation'"); + InstructionDesc[OpGroupNonUniformLogicalAnd].operands.push(OperandId, "X"); + InstructionDesc[OpGroupNonUniformLogicalAnd].operands.push(OperandId, "'ClusterSize'", true); + + InstructionDesc[OpGroupNonUniformLogicalOr].operands.push(OperandScope, "'Execution'"); + InstructionDesc[OpGroupNonUniformLogicalOr].operands.push(OperandGroupOperation, "'Operation'"); + InstructionDesc[OpGroupNonUniformLogicalOr].operands.push(OperandId, "X"); + InstructionDesc[OpGroupNonUniformLogicalOr].operands.push(OperandId, "'ClusterSize'", true); + + InstructionDesc[OpGroupNonUniformLogicalXor].operands.push(OperandScope, "'Execution'"); + InstructionDesc[OpGroupNonUniformLogicalXor].operands.push(OperandGroupOperation, "'Operation'"); + InstructionDesc[OpGroupNonUniformLogicalXor].operands.push(OperandId, "X"); + InstructionDesc[OpGroupNonUniformLogicalXor].operands.push(OperandId, "'ClusterSize'", true); + + InstructionDesc[OpGroupNonUniformQuadBroadcast].operands.push(OperandScope, "'Execution'"); + InstructionDesc[OpGroupNonUniformQuadBroadcast].operands.push(OperandId, "X"); + InstructionDesc[OpGroupNonUniformQuadBroadcast].operands.push(OperandId, "'Id'"); + + InstructionDesc[OpGroupNonUniformQuadSwap].operands.push(OperandScope, "'Execution'"); + InstructionDesc[OpGroupNonUniformQuadSwap].operands.push(OperandId, "X"); + InstructionDesc[OpGroupNonUniformQuadSwap].operands.push(OperandLiteralNumber, "'Direction'"); + + InstructionDesc[OpSubgroupBallotKHR].operands.push(OperandId, "'Predicate'"); + + InstructionDesc[OpSubgroupFirstInvocationKHR].operands.push(OperandId, "'Value'"); + + InstructionDesc[OpSubgroupAnyKHR].operands.push(OperandScope, "'Execution'"); + InstructionDesc[OpSubgroupAnyKHR].operands.push(OperandId, "'Predicate'"); + + InstructionDesc[OpSubgroupAllKHR].operands.push(OperandScope, "'Execution'"); + InstructionDesc[OpSubgroupAllKHR].operands.push(OperandId, "'Predicate'"); + + InstructionDesc[OpSubgroupAllEqualKHR].operands.push(OperandScope, "'Execution'"); + InstructionDesc[OpSubgroupAllEqualKHR].operands.push(OperandId, "'Predicate'"); + + InstructionDesc[OpSubgroupReadInvocationKHR].operands.push(OperandId, "'Value'"); + InstructionDesc[OpSubgroupReadInvocationKHR].operands.push(OperandId, "'Index'"); + + InstructionDesc[OpModuleProcessed].operands.push(OperandLiteralString, "'process'"); + + InstructionDesc[OpGroupIAddNonUniformAMD].operands.push(OperandScope, "'Execution'"); + InstructionDesc[OpGroupIAddNonUniformAMD].operands.push(OperandGroupOperation, "'Operation'"); + InstructionDesc[OpGroupIAddNonUniformAMD].operands.push(OperandId, "'X'"); + + InstructionDesc[OpGroupFAddNonUniformAMD].operands.push(OperandScope, "'Execution'"); + InstructionDesc[OpGroupFAddNonUniformAMD].operands.push(OperandGroupOperation, "'Operation'"); + InstructionDesc[OpGroupFAddNonUniformAMD].operands.push(OperandId, "'X'"); + + InstructionDesc[OpGroupUMinNonUniformAMD].operands.push(OperandScope, "'Execution'"); + InstructionDesc[OpGroupUMinNonUniformAMD].operands.push(OperandGroupOperation, "'Operation'"); + InstructionDesc[OpGroupUMinNonUniformAMD].operands.push(OperandId, "'X'"); + + InstructionDesc[OpGroupSMinNonUniformAMD].operands.push(OperandScope, "'Execution'"); + InstructionDesc[OpGroupSMinNonUniformAMD].operands.push(OperandGroupOperation, "'Operation'"); + InstructionDesc[OpGroupSMinNonUniformAMD].operands.push(OperandId, "X"); + + InstructionDesc[OpGroupFMinNonUniformAMD].operands.push(OperandScope, "'Execution'"); + InstructionDesc[OpGroupFMinNonUniformAMD].operands.push(OperandGroupOperation, "'Operation'"); + InstructionDesc[OpGroupFMinNonUniformAMD].operands.push(OperandId, "X"); + + InstructionDesc[OpGroupUMaxNonUniformAMD].operands.push(OperandScope, "'Execution'"); + InstructionDesc[OpGroupUMaxNonUniformAMD].operands.push(OperandGroupOperation, "'Operation'"); + InstructionDesc[OpGroupUMaxNonUniformAMD].operands.push(OperandId, "X"); + + InstructionDesc[OpGroupSMaxNonUniformAMD].operands.push(OperandScope, "'Execution'"); + InstructionDesc[OpGroupSMaxNonUniformAMD].operands.push(OperandGroupOperation, "'Operation'"); + InstructionDesc[OpGroupSMaxNonUniformAMD].operands.push(OperandId, "X"); + + InstructionDesc[OpGroupFMaxNonUniformAMD].operands.push(OperandScope, "'Execution'"); + InstructionDesc[OpGroupFMaxNonUniformAMD].operands.push(OperandGroupOperation, "'Operation'"); + InstructionDesc[OpGroupFMaxNonUniformAMD].operands.push(OperandId, "X"); + + InstructionDesc[OpFragmentMaskFetchAMD].operands.push(OperandId, "'Image'"); + InstructionDesc[OpFragmentMaskFetchAMD].operands.push(OperandId, "'Coordinate'"); + + InstructionDesc[OpFragmentFetchAMD].operands.push(OperandId, "'Image'"); + InstructionDesc[OpFragmentFetchAMD].operands.push(OperandId, "'Coordinate'"); + InstructionDesc[OpFragmentFetchAMD].operands.push(OperandId, "'Fragment Index'"); + + InstructionDesc[OpGroupNonUniformPartitionNV].operands.push(OperandId, "X"); + + InstructionDesc[OpTypeAccelerationStructureKHR].setResultAndType(true, false); + + InstructionDesc[OpTraceRayKHR].operands.push(OperandId, "'NV Acceleration Structure'"); + InstructionDesc[OpTraceRayKHR].operands.push(OperandId, "'Ray Flags'"); + InstructionDesc[OpTraceRayKHR].operands.push(OperandId, "'Cull Mask'"); + InstructionDesc[OpTraceRayKHR].operands.push(OperandId, "'SBT Record Offset'"); + InstructionDesc[OpTraceRayKHR].operands.push(OperandId, "'SBT Record Stride'"); + InstructionDesc[OpTraceRayKHR].operands.push(OperandId, "'Miss Index'"); + InstructionDesc[OpTraceRayKHR].operands.push(OperandId, "'Ray Origin'"); + InstructionDesc[OpTraceRayKHR].operands.push(OperandId, "'TMin'"); + InstructionDesc[OpTraceRayKHR].operands.push(OperandId, "'Ray Direction'"); + InstructionDesc[OpTraceRayKHR].operands.push(OperandId, "'TMax'"); + InstructionDesc[OpTraceRayKHR].operands.push(OperandId, "'Payload'"); + InstructionDesc[OpTraceRayKHR].setResultAndType(false, false); + + InstructionDesc[OpReportIntersectionKHR].operands.push(OperandId, "'Hit Parameter'"); + InstructionDesc[OpReportIntersectionKHR].operands.push(OperandId, "'Hit Kind'"); + + InstructionDesc[OpIgnoreIntersectionKHR].setResultAndType(false, false); + + InstructionDesc[OpTerminateRayKHR].setResultAndType(false, false); + + InstructionDesc[OpExecuteCallableKHR].operands.push(OperandId, "SBT Record Index"); + InstructionDesc[OpExecuteCallableKHR].operands.push(OperandId, "CallableData ID"); + InstructionDesc[OpExecuteCallableKHR].setResultAndType(false, false); + + // Ray Query + InstructionDesc[OpTypeAccelerationStructureKHR].setResultAndType(true, false); + InstructionDesc[OpTypeRayQueryProvisionalKHR].setResultAndType(true, false); + + InstructionDesc[OpRayQueryInitializeKHR].operands.push(OperandId, "'RayQuery'"); + InstructionDesc[OpRayQueryInitializeKHR].operands.push(OperandId, "'AccelerationS'"); + InstructionDesc[OpRayQueryInitializeKHR].operands.push(OperandId, "'RayFlags'"); + InstructionDesc[OpRayQueryInitializeKHR].operands.push(OperandId, "'CullMask'"); + InstructionDesc[OpRayQueryInitializeKHR].operands.push(OperandId, "'Origin'"); + InstructionDesc[OpRayQueryInitializeKHR].operands.push(OperandId, "'Tmin'"); + InstructionDesc[OpRayQueryInitializeKHR].operands.push(OperandId, "'Direction'"); + InstructionDesc[OpRayQueryInitializeKHR].operands.push(OperandId, "'Tmax'"); + InstructionDesc[OpRayQueryInitializeKHR].setResultAndType(false, false); + + InstructionDesc[OpRayQueryTerminateKHR].operands.push(OperandId, "'RayQuery'"); + InstructionDesc[OpRayQueryTerminateKHR].setResultAndType(false, false); + + InstructionDesc[OpRayQueryGenerateIntersectionKHR].operands.push(OperandId, "'RayQuery'"); + InstructionDesc[OpRayQueryGenerateIntersectionKHR].operands.push(OperandId, "'THit'"); + InstructionDesc[OpRayQueryGenerateIntersectionKHR].setResultAndType(false, false); + + InstructionDesc[OpRayQueryConfirmIntersectionKHR].operands.push(OperandId, "'RayQuery'"); + InstructionDesc[OpRayQueryConfirmIntersectionKHR].setResultAndType(false, false); + + InstructionDesc[OpRayQueryProceedKHR].operands.push(OperandId, "'RayQuery'"); + InstructionDesc[OpRayQueryProceedKHR].setResultAndType(true, true); + + InstructionDesc[OpRayQueryGetIntersectionTypeKHR].operands.push(OperandId, "'RayQuery'"); + InstructionDesc[OpRayQueryGetIntersectionTypeKHR].operands.push(OperandId, "'Committed'"); + InstructionDesc[OpRayQueryGetIntersectionTypeKHR].setResultAndType(true, true); + + InstructionDesc[OpRayQueryGetRayTMinKHR].operands.push(OperandId, "'RayQuery'"); + InstructionDesc[OpRayQueryGetRayTMinKHR].setResultAndType(true, true); + + InstructionDesc[OpRayQueryGetRayFlagsKHR].operands.push(OperandId, "'RayQuery'"); + InstructionDesc[OpRayQueryGetRayFlagsKHR].setResultAndType(true, true); + + InstructionDesc[OpRayQueryGetIntersectionTKHR].operands.push(OperandId, "'RayQuery'"); + InstructionDesc[OpRayQueryGetIntersectionTKHR].operands.push(OperandId, "'Committed'"); + InstructionDesc[OpRayQueryGetIntersectionTKHR].setResultAndType(true, true); + + InstructionDesc[OpRayQueryGetIntersectionInstanceCustomIndexKHR].operands.push(OperandId, "'RayQuery'"); + InstructionDesc[OpRayQueryGetIntersectionInstanceCustomIndexKHR].operands.push(OperandId, "'Committed'"); + InstructionDesc[OpRayQueryGetIntersectionInstanceCustomIndexKHR].setResultAndType(true, true); + + InstructionDesc[OpRayQueryGetIntersectionInstanceIdKHR].operands.push(OperandId, "'RayQuery'"); + InstructionDesc[OpRayQueryGetIntersectionInstanceIdKHR].operands.push(OperandId, "'Committed'"); + InstructionDesc[OpRayQueryGetIntersectionInstanceIdKHR].setResultAndType(true, true); + + InstructionDesc[OpRayQueryGetIntersectionInstanceShaderBindingTableRecordOffsetKHR].operands.push(OperandId, "'RayQuery'"); + InstructionDesc[OpRayQueryGetIntersectionInstanceShaderBindingTableRecordOffsetKHR].operands.push(OperandId, "'Committed'"); + InstructionDesc[OpRayQueryGetIntersectionInstanceShaderBindingTableRecordOffsetKHR].setResultAndType(true, true); + + InstructionDesc[OpRayQueryGetIntersectionGeometryIndexKHR].operands.push(OperandId, "'RayQuery'"); + InstructionDesc[OpRayQueryGetIntersectionGeometryIndexKHR].operands.push(OperandId, "'Committed'"); + InstructionDesc[OpRayQueryGetIntersectionGeometryIndexKHR].setResultAndType(true, true); + + InstructionDesc[OpRayQueryGetIntersectionPrimitiveIndexKHR].operands.push(OperandId, "'RayQuery'"); + InstructionDesc[OpRayQueryGetIntersectionPrimitiveIndexKHR].operands.push(OperandId, "'Committed'"); + InstructionDesc[OpRayQueryGetIntersectionPrimitiveIndexKHR].setResultAndType(true, true); + + InstructionDesc[OpRayQueryGetIntersectionBarycentricsKHR].operands.push(OperandId, "'RayQuery'"); + InstructionDesc[OpRayQueryGetIntersectionBarycentricsKHR].operands.push(OperandId, "'Committed'"); + InstructionDesc[OpRayQueryGetIntersectionBarycentricsKHR].setResultAndType(true, true); + + InstructionDesc[OpRayQueryGetIntersectionFrontFaceKHR].operands.push(OperandId, "'RayQuery'"); + InstructionDesc[OpRayQueryGetIntersectionFrontFaceKHR].operands.push(OperandId, "'Committed'"); + InstructionDesc[OpRayQueryGetIntersectionFrontFaceKHR].setResultAndType(true, true); + + InstructionDesc[OpRayQueryGetIntersectionCandidateAABBOpaqueKHR].operands.push(OperandId, "'RayQuery'"); + InstructionDesc[OpRayQueryGetIntersectionCandidateAABBOpaqueKHR].setResultAndType(true, true); + + InstructionDesc[OpRayQueryGetIntersectionObjectRayDirectionKHR].operands.push(OperandId, "'RayQuery'"); + InstructionDesc[OpRayQueryGetIntersectionObjectRayDirectionKHR].operands.push(OperandId, "'Committed'"); + InstructionDesc[OpRayQueryGetIntersectionObjectRayDirectionKHR].setResultAndType(true, true); + + InstructionDesc[OpRayQueryGetIntersectionObjectRayOriginKHR].operands.push(OperandId, "'RayQuery'"); + InstructionDesc[OpRayQueryGetIntersectionObjectRayOriginKHR].operands.push(OperandId, "'Committed'"); + InstructionDesc[OpRayQueryGetIntersectionObjectRayOriginKHR].setResultAndType(true, true); + + InstructionDesc[OpRayQueryGetWorldRayDirectionKHR].operands.push(OperandId, "'RayQuery'"); + InstructionDesc[OpRayQueryGetWorldRayDirectionKHR].setResultAndType(true, true); + + InstructionDesc[OpRayQueryGetWorldRayOriginKHR].operands.push(OperandId, "'RayQuery'"); + InstructionDesc[OpRayQueryGetWorldRayOriginKHR].setResultAndType(true, true); + + InstructionDesc[OpRayQueryGetIntersectionObjectToWorldKHR].operands.push(OperandId, "'RayQuery'"); + InstructionDesc[OpRayQueryGetIntersectionObjectToWorldKHR].operands.push(OperandId, "'Committed'"); + InstructionDesc[OpRayQueryGetIntersectionObjectToWorldKHR].setResultAndType(true, true); + + InstructionDesc[OpRayQueryGetIntersectionWorldToObjectKHR].operands.push(OperandId, "'RayQuery'"); + InstructionDesc[OpRayQueryGetIntersectionWorldToObjectKHR].operands.push(OperandId, "'Committed'"); + InstructionDesc[OpRayQueryGetIntersectionWorldToObjectKHR].setResultAndType(true, true); + + InstructionDesc[OpImageSampleFootprintNV].operands.push(OperandId, "'Sampled Image'"); + InstructionDesc[OpImageSampleFootprintNV].operands.push(OperandId, "'Coordinate'"); + InstructionDesc[OpImageSampleFootprintNV].operands.push(OperandId, "'Granularity'"); + InstructionDesc[OpImageSampleFootprintNV].operands.push(OperandId, "'Coarse'"); + InstructionDesc[OpImageSampleFootprintNV].operands.push(OperandImageOperands, "", true); + InstructionDesc[OpImageSampleFootprintNV].operands.push(OperandVariableIds, "", true); + + InstructionDesc[OpWritePackedPrimitiveIndices4x8NV].operands.push(OperandId, "'Index Offset'"); + InstructionDesc[OpWritePackedPrimitiveIndices4x8NV].operands.push(OperandId, "'Packed Indices'"); + + InstructionDesc[OpTypeCooperativeMatrixNV].operands.push(OperandId, "'Component Type'"); + InstructionDesc[OpTypeCooperativeMatrixNV].operands.push(OperandId, "'Scope'"); + InstructionDesc[OpTypeCooperativeMatrixNV].operands.push(OperandId, "'Rows'"); + InstructionDesc[OpTypeCooperativeMatrixNV].operands.push(OperandId, "'Columns'"); + + InstructionDesc[OpCooperativeMatrixLoadNV].operands.push(OperandId, "'Pointer'"); + InstructionDesc[OpCooperativeMatrixLoadNV].operands.push(OperandId, "'Stride'"); + InstructionDesc[OpCooperativeMatrixLoadNV].operands.push(OperandId, "'Column Major'"); + InstructionDesc[OpCooperativeMatrixLoadNV].operands.push(OperandMemoryAccess, "'Memory Access'"); + InstructionDesc[OpCooperativeMatrixLoadNV].operands.push(OperandLiteralNumber, "", true); + InstructionDesc[OpCooperativeMatrixLoadNV].operands.push(OperandId, "", true); + + InstructionDesc[OpCooperativeMatrixStoreNV].operands.push(OperandId, "'Pointer'"); + InstructionDesc[OpCooperativeMatrixStoreNV].operands.push(OperandId, "'Object'"); + InstructionDesc[OpCooperativeMatrixStoreNV].operands.push(OperandId, "'Stride'"); + InstructionDesc[OpCooperativeMatrixStoreNV].operands.push(OperandId, "'Column Major'"); + InstructionDesc[OpCooperativeMatrixStoreNV].operands.push(OperandMemoryAccess, "'Memory Access'"); + InstructionDesc[OpCooperativeMatrixStoreNV].operands.push(OperandLiteralNumber, "", true); + InstructionDesc[OpCooperativeMatrixStoreNV].operands.push(OperandId, "", true); + + InstructionDesc[OpCooperativeMatrixMulAddNV].operands.push(OperandId, "'A'"); + InstructionDesc[OpCooperativeMatrixMulAddNV].operands.push(OperandId, "'B'"); + InstructionDesc[OpCooperativeMatrixMulAddNV].operands.push(OperandId, "'C'"); + + InstructionDesc[OpCooperativeMatrixLengthNV].operands.push(OperandId, "'Type'"); + + InstructionDesc[OpDemoteToHelperInvocationEXT].setResultAndType(false, false); + + InstructionDesc[OpReadClockKHR].operands.push(OperandScope, "'Scope'"); +} + +}; // end spv namespace diff --git a/dep/glslang/SPIRV/doc.h b/dep/glslang/SPIRV/doc.h new file mode 100644 index 000000000..293256a2c --- /dev/null +++ b/dep/glslang/SPIRV/doc.h @@ -0,0 +1,258 @@ +// +// Copyright (C) 2014-2015 LunarG, Inc. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// +// Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// +// Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// Neither the name of 3Dlabs Inc. Ltd. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. + +// +// Parameterize the SPIR-V enumerants. +// + +#pragma once + +#include "spirv.hpp" + +#include + +namespace spv { + +// Fill in all the parameters +void Parameterize(); + +// Return the English names of all the enums. +const char* SourceString(int); +const char* AddressingString(int); +const char* MemoryString(int); +const char* ExecutionModelString(int); +const char* ExecutionModeString(int); +const char* StorageClassString(int); +const char* DecorationString(int); +const char* BuiltInString(int); +const char* DimensionString(int); +const char* SelectControlString(int); +const char* LoopControlString(int); +const char* FunctionControlString(int); +const char* SamplerAddressingModeString(int); +const char* SamplerFilterModeString(int); +const char* ImageFormatString(int); +const char* ImageChannelOrderString(int); +const char* ImageChannelTypeString(int); +const char* ImageChannelDataTypeString(int type); +const char* ImageOperandsString(int format); +const char* ImageOperands(int); +const char* FPFastMathString(int); +const char* FPRoundingModeString(int); +const char* LinkageTypeString(int); +const char* FuncParamAttrString(int); +const char* AccessQualifierString(int); +const char* MemorySemanticsString(int); +const char* MemoryAccessString(int); +const char* ExecutionScopeString(int); +const char* GroupOperationString(int); +const char* KernelEnqueueFlagsString(int); +const char* KernelProfilingInfoString(int); +const char* CapabilityString(int); +const char* OpcodeString(int); +const char* ScopeString(int mem); + +// For grouping opcodes into subsections +enum OpcodeClass { + OpClassMisc, + OpClassDebug, + OpClassAnnotate, + OpClassExtension, + OpClassMode, + OpClassType, + OpClassConstant, + OpClassMemory, + OpClassFunction, + OpClassImage, + OpClassConvert, + OpClassComposite, + OpClassArithmetic, + OpClassBit, + OpClassRelationalLogical, + OpClassDerivative, + OpClassFlowControl, + OpClassAtomic, + OpClassPrimitive, + OpClassBarrier, + OpClassGroup, + OpClassDeviceSideEnqueue, + OpClassPipe, + + OpClassCount, + OpClassMissing // all instructions start out as missing +}; + +// For parameterizing operands. +enum OperandClass { + OperandNone, + OperandId, + OperandVariableIds, + OperandOptionalLiteral, + OperandOptionalLiteralString, + OperandVariableLiterals, + OperandVariableIdLiteral, + OperandVariableLiteralId, + OperandLiteralNumber, + OperandLiteralString, + OperandSource, + OperandExecutionModel, + OperandAddressing, + OperandMemory, + OperandExecutionMode, + OperandStorage, + OperandDimensionality, + OperandSamplerAddressingMode, + OperandSamplerFilterMode, + OperandSamplerImageFormat, + OperandImageChannelOrder, + OperandImageChannelDataType, + OperandImageOperands, + OperandFPFastMath, + OperandFPRoundingMode, + OperandLinkageType, + OperandAccessQualifier, + OperandFuncParamAttr, + OperandDecoration, + OperandBuiltIn, + OperandSelect, + OperandLoop, + OperandFunction, + OperandMemorySemantics, + OperandMemoryAccess, + OperandScope, + OperandGroupOperation, + OperandKernelEnqueueFlags, + OperandKernelProfilingInfo, + OperandCapability, + + OperandOpcode, + + OperandCount +}; + +// Any specific enum can have a set of capabilities that allow it: +typedef std::vector EnumCaps; + +// Parameterize a set of operands with their OperandClass(es) and descriptions. +class OperandParameters { +public: + OperandParameters() { } + void push(OperandClass oc, const char* d, bool opt = false) + { + opClass.push_back(oc); + desc.push_back(d); + optional.push_back(opt); + } + void setOptional(); + OperandClass getClass(int op) const { return opClass[op]; } + const char* getDesc(int op) const { return desc[op]; } + bool isOptional(int op) const { return optional[op]; } + int getNum() const { return (int)opClass.size(); } + +protected: + std::vector opClass; + std::vector desc; + std::vector optional; +}; + +// Parameterize an enumerant +class EnumParameters { +public: + EnumParameters() : desc(0) { } + const char* desc; +}; + +// Parameterize a set of enumerants that form an enum +class EnumDefinition : public EnumParameters { +public: + EnumDefinition() : + ceiling(0), bitmask(false), getName(0), enumParams(0), operandParams(0) { } + void set(int ceil, const char* (*name)(int), EnumParameters* ep, bool mask = false) + { + ceiling = ceil; + getName = name; + bitmask = mask; + enumParams = ep; + } + void setOperands(OperandParameters* op) { operandParams = op; } + int ceiling; // ceiling of enumerants + bool bitmask; // true if these enumerants combine into a bitmask + const char* (*getName)(int); // a function that returns the name for each enumerant value (or shift) + EnumParameters* enumParams; // parameters for each individual enumerant + OperandParameters* operandParams; // sets of operands +}; + +// Parameterize an instruction's logical format, including its known set of operands, +// per OperandParameters above. +class InstructionParameters { +public: + InstructionParameters() : + opDesc("TBD"), + opClass(OpClassMissing), + typePresent(true), // most normal, only exceptions have to be spelled out + resultPresent(true) // most normal, only exceptions have to be spelled out + { } + + void setResultAndType(bool r, bool t) + { + resultPresent = r; + typePresent = t; + } + + bool hasResult() const { return resultPresent != 0; } + bool hasType() const { return typePresent != 0; } + + const char* opDesc; + OpcodeClass opClass; + OperandParameters operands; + +protected: + int typePresent : 1; + int resultPresent : 1; +}; + +// The set of objects that hold all the instruction/operand +// parameterization information. +extern InstructionParameters InstructionDesc[]; + +// These hold definitions of the enumerants used for operands +extern EnumDefinition OperandClassParams[]; + +const char* GetOperandDesc(OperandClass operand); +void PrintImmediateRow(int imm, const char* name, const EnumParameters* enumParams, bool caps, bool hex = false); +const char* AccessQualifierString(int attr); + +void PrintOperands(const OperandParameters& operands, int reservedOperands); + +} // end namespace spv diff --git a/dep/glslang/SPIRV/hex_float.h b/dep/glslang/SPIRV/hex_float.h new file mode 100644 index 000000000..8be8e9f7e --- /dev/null +++ b/dep/glslang/SPIRV/hex_float.h @@ -0,0 +1,1078 @@ +// Copyright (c) 2015-2016 The Khronos Group Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef LIBSPIRV_UTIL_HEX_FLOAT_H_ +#define LIBSPIRV_UTIL_HEX_FLOAT_H_ + +#include +#include +#include +#include +#include +#include +#include + +#if defined(_MSC_VER) && _MSC_VER < 1800 +namespace std { +bool isnan(double f) +{ + return ::_isnan(f) != 0; +} +bool isinf(double f) +{ + return ::_finite(f) == 0; +} +} +#endif + +#include "bitutils.h" + +namespace spvutils { + +class Float16 { + public: + Float16(uint16_t v) : val(v) {} + Float16() {} + static bool isNan(const Float16& val) { + return ((val.val & 0x7C00) == 0x7C00) && ((val.val & 0x3FF) != 0); + } + // Returns true if the given value is any kind of infinity. + static bool isInfinity(const Float16& val) { + return ((val.val & 0x7C00) == 0x7C00) && ((val.val & 0x3FF) == 0); + } + Float16(const Float16& other) { val = other.val; } + uint16_t get_value() const { return val; } + + // Returns the maximum normal value. + static Float16 max() { return Float16(0x7bff); } + // Returns the lowest normal value. + static Float16 lowest() { return Float16(0xfbff); } + + private: + uint16_t val; +}; + +// To specialize this type, you must override uint_type to define +// an unsigned integer that can fit your floating point type. +// You must also add a isNan function that returns true if +// a value is Nan. +template +struct FloatProxyTraits { + typedef void uint_type; +}; + +template <> +struct FloatProxyTraits { + typedef uint32_t uint_type; + static bool isNan(float f) { return std::isnan(f); } + // Returns true if the given value is any kind of infinity. + static bool isInfinity(float f) { return std::isinf(f); } + // Returns the maximum normal value. + static float max() { return std::numeric_limits::max(); } + // Returns the lowest normal value. + static float lowest() { return std::numeric_limits::lowest(); } +}; + +template <> +struct FloatProxyTraits { + typedef uint64_t uint_type; + static bool isNan(double f) { return std::isnan(f); } + // Returns true if the given value is any kind of infinity. + static bool isInfinity(double f) { return std::isinf(f); } + // Returns the maximum normal value. + static double max() { return std::numeric_limits::max(); } + // Returns the lowest normal value. + static double lowest() { return std::numeric_limits::lowest(); } +}; + +template <> +struct FloatProxyTraits { + typedef uint16_t uint_type; + static bool isNan(Float16 f) { return Float16::isNan(f); } + // Returns true if the given value is any kind of infinity. + static bool isInfinity(Float16 f) { return Float16::isInfinity(f); } + // Returns the maximum normal value. + static Float16 max() { return Float16::max(); } + // Returns the lowest normal value. + static Float16 lowest() { return Float16::lowest(); } +}; + +// Since copying a floating point number (especially if it is NaN) +// does not guarantee that bits are preserved, this class lets us +// store the type and use it as a float when necessary. +template +class FloatProxy { + public: + typedef typename FloatProxyTraits::uint_type uint_type; + + // Since this is to act similar to the normal floats, + // do not initialize the data by default. + FloatProxy() {} + + // Intentionally non-explicit. This is a proxy type so + // implicit conversions allow us to use it more transparently. + FloatProxy(T val) { data_ = BitwiseCast(val); } + + // Intentionally non-explicit. This is a proxy type so + // implicit conversions allow us to use it more transparently. + FloatProxy(uint_type val) { data_ = val; } + + // This is helpful to have and is guaranteed not to stomp bits. + FloatProxy operator-() const { + return static_cast(data_ ^ + (uint_type(0x1) << (sizeof(T) * 8 - 1))); + } + + // Returns the data as a floating point value. + T getAsFloat() const { return BitwiseCast(data_); } + + // Returns the raw data. + uint_type data() const { return data_; } + + // Returns true if the value represents any type of NaN. + bool isNan() { return FloatProxyTraits::isNan(getAsFloat()); } + // Returns true if the value represents any type of infinity. + bool isInfinity() { return FloatProxyTraits::isInfinity(getAsFloat()); } + + // Returns the maximum normal value. + static FloatProxy max() { + return FloatProxy(FloatProxyTraits::max()); + } + // Returns the lowest normal value. + static FloatProxy lowest() { + return FloatProxy(FloatProxyTraits::lowest()); + } + + private: + uint_type data_; +}; + +template +bool operator==(const FloatProxy& first, const FloatProxy& second) { + return first.data() == second.data(); +} + +// Reads a FloatProxy value as a normal float from a stream. +template +std::istream& operator>>(std::istream& is, FloatProxy& value) { + T float_val; + is >> float_val; + value = FloatProxy(float_val); + return is; +} + +// This is an example traits. It is not meant to be used in practice, but will +// be the default for any non-specialized type. +template +struct HexFloatTraits { + // Integer type that can store this hex-float. + typedef void uint_type; + // Signed integer type that can store this hex-float. + typedef void int_type; + // The numerical type that this HexFloat represents. + typedef void underlying_type; + // The type needed to construct the underlying type. + typedef void native_type; + // The number of bits that are actually relevant in the uint_type. + // This allows us to deal with, for example, 24-bit values in a 32-bit + // integer. + static const uint32_t num_used_bits = 0; + // Number of bits that represent the exponent. + static const uint32_t num_exponent_bits = 0; + // Number of bits that represent the fractional part. + static const uint32_t num_fraction_bits = 0; + // The bias of the exponent. (How much we need to subtract from the stored + // value to get the correct value.) + static const uint32_t exponent_bias = 0; +}; + +// Traits for IEEE float. +// 1 sign bit, 8 exponent bits, 23 fractional bits. +template <> +struct HexFloatTraits> { + typedef uint32_t uint_type; + typedef int32_t int_type; + typedef FloatProxy underlying_type; + typedef float native_type; + static const uint_type num_used_bits = 32; + static const uint_type num_exponent_bits = 8; + static const uint_type num_fraction_bits = 23; + static const uint_type exponent_bias = 127; +}; + +// Traits for IEEE double. +// 1 sign bit, 11 exponent bits, 52 fractional bits. +template <> +struct HexFloatTraits> { + typedef uint64_t uint_type; + typedef int64_t int_type; + typedef FloatProxy underlying_type; + typedef double native_type; + static const uint_type num_used_bits = 64; + static const uint_type num_exponent_bits = 11; + static const uint_type num_fraction_bits = 52; + static const uint_type exponent_bias = 1023; +}; + +// Traits for IEEE half. +// 1 sign bit, 5 exponent bits, 10 fractional bits. +template <> +struct HexFloatTraits> { + typedef uint16_t uint_type; + typedef int16_t int_type; + typedef uint16_t underlying_type; + typedef uint16_t native_type; + static const uint_type num_used_bits = 16; + static const uint_type num_exponent_bits = 5; + static const uint_type num_fraction_bits = 10; + static const uint_type exponent_bias = 15; +}; + +enum round_direction { + kRoundToZero, + kRoundToNearestEven, + kRoundToPositiveInfinity, + kRoundToNegativeInfinity +}; + +// Template class that houses a floating pointer number. +// It exposes a number of constants based on the provided traits to +// assist in interpreting the bits of the value. +template > +class HexFloat { + public: + typedef typename Traits::uint_type uint_type; + typedef typename Traits::int_type int_type; + typedef typename Traits::underlying_type underlying_type; + typedef typename Traits::native_type native_type; + + explicit HexFloat(T f) : value_(f) {} + + T value() const { return value_; } + void set_value(T f) { value_ = f; } + + // These are all written like this because it is convenient to have + // compile-time constants for all of these values. + + // Pass-through values to save typing. + static const uint32_t num_used_bits = Traits::num_used_bits; + static const uint32_t exponent_bias = Traits::exponent_bias; + static const uint32_t num_exponent_bits = Traits::num_exponent_bits; + static const uint32_t num_fraction_bits = Traits::num_fraction_bits; + + // Number of bits to shift left to set the highest relevant bit. + static const uint32_t top_bit_left_shift = num_used_bits - 1; + // How many nibbles (hex characters) the fractional part takes up. + static const uint32_t fraction_nibbles = (num_fraction_bits + 3) / 4; + // If the fractional part does not fit evenly into a hex character (4-bits) + // then we have to left-shift to get rid of leading 0s. This is the amount + // we have to shift (might be 0). + static const uint32_t num_overflow_bits = + fraction_nibbles * 4 - num_fraction_bits; + + // The representation of the fraction, not the actual bits. This + // includes the leading bit that is usually implicit. + static const uint_type fraction_represent_mask = + spvutils::SetBits::get; + + // The topmost bit in the nibble-aligned fraction. + static const uint_type fraction_top_bit = + uint_type(1) << (num_fraction_bits + num_overflow_bits - 1); + + // The least significant bit in the exponent, which is also the bit + // immediately to the left of the significand. + static const uint_type first_exponent_bit = uint_type(1) + << (num_fraction_bits); + + // The mask for the encoded fraction. It does not include the + // implicit bit. + static const uint_type fraction_encode_mask = + spvutils::SetBits::get; + + // The bit that is used as a sign. + static const uint_type sign_mask = uint_type(1) << top_bit_left_shift; + + // The bits that represent the exponent. + static const uint_type exponent_mask = + spvutils::SetBits::get; + + // How far left the exponent is shifted. + static const uint32_t exponent_left_shift = num_fraction_bits; + + // How far from the right edge the fraction is shifted. + static const uint32_t fraction_right_shift = + static_cast(sizeof(uint_type) * 8) - num_fraction_bits; + + // The maximum representable unbiased exponent. + static const int_type max_exponent = + (exponent_mask >> num_fraction_bits) - exponent_bias; + // The minimum representable exponent for normalized numbers. + static const int_type min_exponent = -static_cast(exponent_bias); + + // Returns the bits associated with the value. + uint_type getBits() const { return spvutils::BitwiseCast(value_); } + + // Returns the bits associated with the value, without the leading sign bit. + uint_type getUnsignedBits() const { + return static_cast(spvutils::BitwiseCast(value_) & + ~sign_mask); + } + + // Returns the bits associated with the exponent, shifted to start at the + // lsb of the type. + const uint_type getExponentBits() const { + return static_cast((getBits() & exponent_mask) >> + num_fraction_bits); + } + + // Returns the exponent in unbiased form. This is the exponent in the + // human-friendly form. + const int_type getUnbiasedExponent() const { + return static_cast(getExponentBits() - exponent_bias); + } + + // Returns just the significand bits from the value. + const uint_type getSignificandBits() const { + return getBits() & fraction_encode_mask; + } + + // If the number was normalized, returns the unbiased exponent. + // If the number was denormal, normalize the exponent first. + const int_type getUnbiasedNormalizedExponent() const { + if ((getBits() & ~sign_mask) == 0) { // special case if everything is 0 + return 0; + } + int_type exp = getUnbiasedExponent(); + if (exp == min_exponent) { // We are in denorm land. + uint_type significand_bits = getSignificandBits(); + while ((significand_bits & (first_exponent_bit >> 1)) == 0) { + significand_bits = static_cast(significand_bits << 1); + exp = static_cast(exp - 1); + } + significand_bits &= fraction_encode_mask; + } + return exp; + } + + // Returns the signficand after it has been normalized. + const uint_type getNormalizedSignificand() const { + int_type unbiased_exponent = getUnbiasedNormalizedExponent(); + uint_type significand = getSignificandBits(); + for (int_type i = unbiased_exponent; i <= min_exponent; ++i) { + significand = static_cast(significand << 1); + } + significand &= fraction_encode_mask; + return significand; + } + + // Returns true if this number represents a negative value. + bool isNegative() const { return (getBits() & sign_mask) != 0; } + + // Sets this HexFloat from the individual components. + // Note this assumes EVERY significand is normalized, and has an implicit + // leading one. This means that the only way that this method will set 0, + // is if you set a number so denormalized that it underflows. + // Do not use this method with raw bits extracted from a subnormal number, + // since subnormals do not have an implicit leading 1 in the significand. + // The significand is also expected to be in the + // lowest-most num_fraction_bits of the uint_type. + // The exponent is expected to be unbiased, meaning an exponent of + // 0 actually means 0. + // If underflow_round_up is set, then on underflow, if a number is non-0 + // and would underflow, we round up to the smallest denorm. + void setFromSignUnbiasedExponentAndNormalizedSignificand( + bool negative, int_type exponent, uint_type significand, + bool round_denorm_up) { + bool significand_is_zero = significand == 0; + + if (exponent <= min_exponent) { + // If this was denormalized, then we have to shift the bit on, meaning + // the significand is not zero. + significand_is_zero = false; + significand |= first_exponent_bit; + significand = static_cast(significand >> 1); + } + + while (exponent < min_exponent) { + significand = static_cast(significand >> 1); + ++exponent; + } + + if (exponent == min_exponent) { + if (significand == 0 && !significand_is_zero && round_denorm_up) { + significand = static_cast(0x1); + } + } + + uint_type new_value = 0; + if (negative) { + new_value = static_cast(new_value | sign_mask); + } + exponent = static_cast(exponent + exponent_bias); + assert(exponent >= 0); + + // put it all together + exponent = static_cast((exponent << exponent_left_shift) & + exponent_mask); + significand = static_cast(significand & fraction_encode_mask); + new_value = static_cast(new_value | (exponent | significand)); + value_ = BitwiseCast(new_value); + } + + // Increments the significand of this number by the given amount. + // If this would spill the significand into the implicit bit, + // carry is set to true and the significand is shifted to fit into + // the correct location, otherwise carry is set to false. + // All significands and to_increment are assumed to be within the bounds + // for a valid significand. + static uint_type incrementSignificand(uint_type significand, + uint_type to_increment, bool* carry) { + significand = static_cast(significand + to_increment); + *carry = false; + if (significand & first_exponent_bit) { + *carry = true; + // The implicit 1-bit will have carried, so we should zero-out the + // top bit and shift back. + significand = static_cast(significand & ~first_exponent_bit); + significand = static_cast(significand >> 1); + } + return significand; + } + + // These exist because MSVC throws warnings on negative right-shifts + // even if they are not going to be executed. Eg: + // constant_number < 0? 0: constant_number + // These convert the negative left-shifts into right shifts. + + template + uint_type negatable_left_shift(int_type N, uint_type val) + { + if(N >= 0) + return val << N; + + return val >> -N; + } + + template + uint_type negatable_right_shift(int_type N, uint_type val) + { + if(N >= 0) + return val >> N; + + return val << -N; + } + + // Returns the significand, rounded to fit in a significand in + // other_T. This is shifted so that the most significant + // bit of the rounded number lines up with the most significant bit + // of the returned significand. + template + typename other_T::uint_type getRoundedNormalizedSignificand( + round_direction dir, bool* carry_bit) { + typedef typename other_T::uint_type other_uint_type; + static const int_type num_throwaway_bits = + static_cast(num_fraction_bits) - + static_cast(other_T::num_fraction_bits); + + static const uint_type last_significant_bit = + (num_throwaway_bits < 0) + ? 0 + : negatable_left_shift(num_throwaway_bits, 1u); + static const uint_type first_rounded_bit = + (num_throwaway_bits < 1) + ? 0 + : negatable_left_shift(num_throwaway_bits - 1, 1u); + + static const uint_type throwaway_mask_bits = + num_throwaway_bits > 0 ? num_throwaway_bits : 0; + static const uint_type throwaway_mask = + spvutils::SetBits::get; + + *carry_bit = false; + other_uint_type out_val = 0; + uint_type significand = getNormalizedSignificand(); + // If we are up-casting, then we just have to shift to the right location. + if (num_throwaway_bits <= 0) { + out_val = static_cast(significand); + uint_type shift_amount = static_cast(-num_throwaway_bits); + out_val = static_cast(out_val << shift_amount); + return out_val; + } + + // If every non-representable bit is 0, then we don't have any casting to + // do. + if ((significand & throwaway_mask) == 0) { + return static_cast( + negatable_right_shift(num_throwaway_bits, significand)); + } + + bool round_away_from_zero = false; + // We actually have to narrow the significand here, so we have to follow the + // rounding rules. + switch (dir) { + case kRoundToZero: + break; + case kRoundToPositiveInfinity: + round_away_from_zero = !isNegative(); + break; + case kRoundToNegativeInfinity: + round_away_from_zero = isNegative(); + break; + case kRoundToNearestEven: + // Have to round down, round bit is 0 + if ((first_rounded_bit & significand) == 0) { + break; + } + if (((significand & throwaway_mask) & ~first_rounded_bit) != 0) { + // If any subsequent bit of the rounded portion is non-0 then we round + // up. + round_away_from_zero = true; + break; + } + // We are exactly half-way between 2 numbers, pick even. + if ((significand & last_significant_bit) != 0) { + // 1 for our last bit, round up. + round_away_from_zero = true; + break; + } + break; + } + + if (round_away_from_zero) { + return static_cast( + negatable_right_shift(num_throwaway_bits, incrementSignificand( + significand, last_significant_bit, carry_bit))); + } else { + return static_cast( + negatable_right_shift(num_throwaway_bits, significand)); + } + } + + // Casts this value to another HexFloat. If the cast is widening, + // then round_dir is ignored. If the cast is narrowing, then + // the result is rounded in the direction specified. + // This number will retain Nan and Inf values. + // It will also saturate to Inf if the number overflows, and + // underflow to (0 or min depending on rounding) if the number underflows. + template + void castTo(other_T& other, round_direction round_dir) { + other = other_T(static_cast(0)); + bool negate = isNegative(); + if (getUnsignedBits() == 0) { + if (negate) { + other.set_value(-other.value()); + } + return; + } + uint_type significand = getSignificandBits(); + bool carried = false; + typename other_T::uint_type rounded_significand = + getRoundedNormalizedSignificand(round_dir, &carried); + + int_type exponent = getUnbiasedExponent(); + if (exponent == min_exponent) { + // If we are denormal, normalize the exponent, so that we can encode + // easily. + exponent = static_cast(exponent + 1); + for (uint_type check_bit = first_exponent_bit >> 1; check_bit != 0; + check_bit = static_cast(check_bit >> 1)) { + exponent = static_cast(exponent - 1); + if (check_bit & significand) break; + } + } + + bool is_nan = + (getBits() & exponent_mask) == exponent_mask && significand != 0; + bool is_inf = + !is_nan && + ((exponent + carried) > static_cast(other_T::exponent_bias) || + (significand == 0 && (getBits() & exponent_mask) == exponent_mask)); + + // If we are Nan or Inf we should pass that through. + if (is_inf) { + other.set_value(BitwiseCast( + static_cast( + (negate ? other_T::sign_mask : 0) | other_T::exponent_mask))); + return; + } + if (is_nan) { + typename other_T::uint_type shifted_significand; + shifted_significand = static_cast( + negatable_left_shift( + static_cast(other_T::num_fraction_bits) - + static_cast(num_fraction_bits), significand)); + + // We are some sort of Nan. We try to keep the bit-pattern of the Nan + // as close as possible. If we had to shift off bits so we are 0, then we + // just set the last bit. + other.set_value(BitwiseCast( + static_cast( + (negate ? other_T::sign_mask : 0) | other_T::exponent_mask | + (shifted_significand == 0 ? 0x1 : shifted_significand)))); + return; + } + + bool round_underflow_up = + isNegative() ? round_dir == kRoundToNegativeInfinity + : round_dir == kRoundToPositiveInfinity; + typedef typename other_T::int_type other_int_type; + // setFromSignUnbiasedExponentAndNormalizedSignificand will + // zero out any underflowing value (but retain the sign). + other.setFromSignUnbiasedExponentAndNormalizedSignificand( + negate, static_cast(exponent), rounded_significand, + round_underflow_up); + return; + } + + private: + T value_; + + static_assert(num_used_bits == + Traits::num_exponent_bits + Traits::num_fraction_bits + 1, + "The number of bits do not fit"); + static_assert(sizeof(T) == sizeof(uint_type), "The type sizes do not match"); +}; + +// Returns 4 bits represented by the hex character. +inline uint8_t get_nibble_from_character(int character) { + const char* dec = "0123456789"; + const char* lower = "abcdef"; + const char* upper = "ABCDEF"; + const char* p = nullptr; + if ((p = strchr(dec, character))) { + return static_cast(p - dec); + } else if ((p = strchr(lower, character))) { + return static_cast(p - lower + 0xa); + } else if ((p = strchr(upper, character))) { + return static_cast(p - upper + 0xa); + } + + assert(false && "This was called with a non-hex character"); + return 0; +} + +// Outputs the given HexFloat to the stream. +template +std::ostream& operator<<(std::ostream& os, const HexFloat& value) { + typedef HexFloat HF; + typedef typename HF::uint_type uint_type; + typedef typename HF::int_type int_type; + + static_assert(HF::num_used_bits != 0, + "num_used_bits must be non-zero for a valid float"); + static_assert(HF::num_exponent_bits != 0, + "num_exponent_bits must be non-zero for a valid float"); + static_assert(HF::num_fraction_bits != 0, + "num_fractin_bits must be non-zero for a valid float"); + + const uint_type bits = spvutils::BitwiseCast(value.value()); + const char* const sign = (bits & HF::sign_mask) ? "-" : ""; + const uint_type exponent = static_cast( + (bits & HF::exponent_mask) >> HF::num_fraction_bits); + + uint_type fraction = static_cast((bits & HF::fraction_encode_mask) + << HF::num_overflow_bits); + + const bool is_zero = exponent == 0 && fraction == 0; + const bool is_denorm = exponent == 0 && !is_zero; + + // exponent contains the biased exponent we have to convert it back into + // the normal range. + int_type int_exponent = static_cast(exponent - HF::exponent_bias); + // If the number is all zeros, then we actually have to NOT shift the + // exponent. + int_exponent = is_zero ? 0 : int_exponent; + + // If we are denorm, then start shifting, and decreasing the exponent until + // our leading bit is 1. + + if (is_denorm) { + while ((fraction & HF::fraction_top_bit) == 0) { + fraction = static_cast(fraction << 1); + int_exponent = static_cast(int_exponent - 1); + } + // Since this is denormalized, we have to consume the leading 1 since it + // will end up being implicit. + fraction = static_cast(fraction << 1); // eat the leading 1 + fraction &= HF::fraction_represent_mask; + } + + uint_type fraction_nibbles = HF::fraction_nibbles; + // We do not have to display any trailing 0s, since this represents the + // fractional part. + while (fraction_nibbles > 0 && (fraction & 0xF) == 0) { + // Shift off any trailing values; + fraction = static_cast(fraction >> 4); + --fraction_nibbles; + } + + const auto saved_flags = os.flags(); + const auto saved_fill = os.fill(); + + os << sign << "0x" << (is_zero ? '0' : '1'); + if (fraction_nibbles) { + // Make sure to keep the leading 0s in place, since this is the fractional + // part. + os << "." << std::setw(static_cast(fraction_nibbles)) + << std::setfill('0') << std::hex << fraction; + } + os << "p" << std::dec << (int_exponent >= 0 ? "+" : "") << int_exponent; + + os.flags(saved_flags); + os.fill(saved_fill); + + return os; +} + +// Returns true if negate_value is true and the next character on the +// input stream is a plus or minus sign. In that case we also set the fail bit +// on the stream and set the value to the zero value for its type. +template +inline bool RejectParseDueToLeadingSign(std::istream& is, bool negate_value, + HexFloat& value) { + if (negate_value) { + auto next_char = is.peek(); + if (next_char == '-' || next_char == '+') { + // Fail the parse. Emulate standard behaviour by setting the value to + // the zero value, and set the fail bit on the stream. + value = HexFloat(typename HexFloat::uint_type(0)); + is.setstate(std::ios_base::failbit); + return true; + } + } + return false; +} + +// Parses a floating point number from the given stream and stores it into the +// value parameter. +// If negate_value is true then the number may not have a leading minus or +// plus, and if it successfully parses, then the number is negated before +// being stored into the value parameter. +// If the value cannot be correctly parsed or overflows the target floating +// point type, then set the fail bit on the stream. +// TODO(dneto): Promise C++11 standard behavior in how the value is set in +// the error case, but only after all target platforms implement it correctly. +// In particular, the Microsoft C++ runtime appears to be out of spec. +template +inline std::istream& ParseNormalFloat(std::istream& is, bool negate_value, + HexFloat& value) { + if (RejectParseDueToLeadingSign(is, negate_value, value)) { + return is; + } + T val; + is >> val; + if (negate_value) { + val = -val; + } + value.set_value(val); + // In the failure case, map -0.0 to 0.0. + if (is.fail() && value.getUnsignedBits() == 0u) { + value = HexFloat(typename HexFloat::uint_type(0)); + } + if (val.isInfinity()) { + // Fail the parse. Emulate standard behaviour by setting the value to + // the closest normal value, and set the fail bit on the stream. + value.set_value((value.isNegative() || negate_value) ? T::lowest() + : T::max()); + is.setstate(std::ios_base::failbit); + } + return is; +} + +// Specialization of ParseNormalFloat for FloatProxy values. +// This will parse the float as it were a 32-bit floating point number, +// and then round it down to fit into a Float16 value. +// The number is rounded towards zero. +// If negate_value is true then the number may not have a leading minus or +// plus, and if it successfully parses, then the number is negated before +// being stored into the value parameter. +// If the value cannot be correctly parsed or overflows the target floating +// point type, then set the fail bit on the stream. +// TODO(dneto): Promise C++11 standard behavior in how the value is set in +// the error case, but only after all target platforms implement it correctly. +// In particular, the Microsoft C++ runtime appears to be out of spec. +template <> +inline std::istream& +ParseNormalFloat, HexFloatTraits>>( + std::istream& is, bool negate_value, + HexFloat, HexFloatTraits>>& value) { + // First parse as a 32-bit float. + HexFloat> float_val(0.0f); + ParseNormalFloat(is, negate_value, float_val); + + // Then convert to 16-bit float, saturating at infinities, and + // rounding toward zero. + float_val.castTo(value, kRoundToZero); + + // Overflow on 16-bit behaves the same as for 32- and 64-bit: set the + // fail bit and set the lowest or highest value. + if (Float16::isInfinity(value.value().getAsFloat())) { + value.set_value(value.isNegative() ? Float16::lowest() : Float16::max()); + is.setstate(std::ios_base::failbit); + } + return is; +} + +// Reads a HexFloat from the given stream. +// If the float is not encoded as a hex-float then it will be parsed +// as a regular float. +// This may fail if your stream does not support at least one unget. +// Nan values can be encoded with "0x1.p+exponent_bias". +// This would normally overflow a float and round to +// infinity but this special pattern is the exact representation for a NaN, +// and therefore is actually encoded as the correct NaN. To encode inf, +// either 0x0p+exponent_bias can be specified or any exponent greater than +// exponent_bias. +// Examples using IEEE 32-bit float encoding. +// 0x1.0p+128 (+inf) +// -0x1.0p-128 (-inf) +// +// 0x1.1p+128 (+Nan) +// -0x1.1p+128 (-Nan) +// +// 0x1p+129 (+inf) +// -0x1p+129 (-inf) +template +std::istream& operator>>(std::istream& is, HexFloat& value) { + using HF = HexFloat; + using uint_type = typename HF::uint_type; + using int_type = typename HF::int_type; + + value.set_value(static_cast(0.f)); + + if (is.flags() & std::ios::skipws) { + // If the user wants to skip whitespace , then we should obey that. + while (std::isspace(is.peek())) { + is.get(); + } + } + + auto next_char = is.peek(); + bool negate_value = false; + + if (next_char != '-' && next_char != '0') { + return ParseNormalFloat(is, negate_value, value); + } + + if (next_char == '-') { + negate_value = true; + is.get(); + next_char = is.peek(); + } + + if (next_char == '0') { + is.get(); // We may have to unget this. + auto maybe_hex_start = is.peek(); + if (maybe_hex_start != 'x' && maybe_hex_start != 'X') { + is.unget(); + return ParseNormalFloat(is, negate_value, value); + } else { + is.get(); // Throw away the 'x'; + } + } else { + return ParseNormalFloat(is, negate_value, value); + } + + // This "looks" like a hex-float so treat it as one. + bool seen_p = false; + bool seen_dot = false; + uint_type fraction_index = 0; + + uint_type fraction = 0; + int_type exponent = HF::exponent_bias; + + // Strip off leading zeros so we don't have to special-case them later. + while ((next_char = is.peek()) == '0') { + is.get(); + } + + bool is_denorm = + true; // Assume denorm "representation" until we hear otherwise. + // NB: This does not mean the value is actually denorm, + // it just means that it was written 0. + bool bits_written = false; // Stays false until we write a bit. + while (!seen_p && !seen_dot) { + // Handle characters that are left of the fractional part. + if (next_char == '.') { + seen_dot = true; + } else if (next_char == 'p') { + seen_p = true; + } else if (::isxdigit(next_char)) { + // We know this is not denormalized since we have stripped all leading + // zeroes and we are not a ".". + is_denorm = false; + int number = get_nibble_from_character(next_char); + for (int i = 0; i < 4; ++i, number <<= 1) { + uint_type write_bit = (number & 0x8) ? 0x1 : 0x0; + if (bits_written) { + // If we are here the bits represented belong in the fractional + // part of the float, and we have to adjust the exponent accordingly. + fraction = static_cast( + fraction | + static_cast( + write_bit << (HF::top_bit_left_shift - fraction_index++))); + exponent = static_cast(exponent + 1); + } + bits_written |= write_bit != 0; + } + } else { + // We have not found our exponent yet, so we have to fail. + is.setstate(std::ios::failbit); + return is; + } + is.get(); + next_char = is.peek(); + } + bits_written = false; + while (seen_dot && !seen_p) { + // Handle only fractional parts now. + if (next_char == 'p') { + seen_p = true; + } else if (::isxdigit(next_char)) { + int number = get_nibble_from_character(next_char); + for (int i = 0; i < 4; ++i, number <<= 1) { + uint_type write_bit = (number & 0x8) ? 0x01 : 0x00; + bits_written |= write_bit != 0; + if (is_denorm && !bits_written) { + // Handle modifying the exponent here this way we can handle + // an arbitrary number of hex values without overflowing our + // integer. + exponent = static_cast(exponent - 1); + } else { + fraction = static_cast( + fraction | + static_cast( + write_bit << (HF::top_bit_left_shift - fraction_index++))); + } + } + } else { + // We still have not found our 'p' exponent yet, so this is not a valid + // hex-float. + is.setstate(std::ios::failbit); + return is; + } + is.get(); + next_char = is.peek(); + } + + bool seen_sign = false; + int8_t exponent_sign = 1; + int_type written_exponent = 0; + while (true) { + if ((next_char == '-' || next_char == '+')) { + if (seen_sign) { + is.setstate(std::ios::failbit); + return is; + } + seen_sign = true; + exponent_sign = (next_char == '-') ? -1 : 1; + } else if (::isdigit(next_char)) { + // Hex-floats express their exponent as decimal. + written_exponent = static_cast(written_exponent * 10); + written_exponent = + static_cast(written_exponent + (next_char - '0')); + } else { + break; + } + is.get(); + next_char = is.peek(); + } + + written_exponent = static_cast(written_exponent * exponent_sign); + exponent = static_cast(exponent + written_exponent); + + bool is_zero = is_denorm && (fraction == 0); + if (is_denorm && !is_zero) { + fraction = static_cast(fraction << 1); + exponent = static_cast(exponent - 1); + } else if (is_zero) { + exponent = 0; + } + + if (exponent <= 0 && !is_zero) { + fraction = static_cast(fraction >> 1); + fraction |= static_cast(1) << HF::top_bit_left_shift; + } + + fraction = (fraction >> HF::fraction_right_shift) & HF::fraction_encode_mask; + + const int_type max_exponent = + SetBits::get; + + // Handle actual denorm numbers + while (exponent < 0 && !is_zero) { + fraction = static_cast(fraction >> 1); + exponent = static_cast(exponent + 1); + + fraction &= HF::fraction_encode_mask; + if (fraction == 0) { + // We have underflowed our fraction. We should clamp to zero. + is_zero = true; + exponent = 0; + } + } + + // We have overflowed so we should be inf/-inf. + if (exponent > max_exponent) { + exponent = max_exponent; + fraction = 0; + } + + uint_type output_bits = static_cast( + static_cast(negate_value ? 1 : 0) << HF::top_bit_left_shift); + output_bits |= fraction; + + uint_type shifted_exponent = static_cast( + static_cast(exponent << HF::exponent_left_shift) & + HF::exponent_mask); + output_bits |= shifted_exponent; + + T output_float = spvutils::BitwiseCast(output_bits); + value.set_value(output_float); + + return is; +} + +// Writes a FloatProxy value to a stream. +// Zero and normal numbers are printed in the usual notation, but with +// enough digits to fully reproduce the value. Other values (subnormal, +// NaN, and infinity) are printed as a hex float. +template +std::ostream& operator<<(std::ostream& os, const FloatProxy& value) { + auto float_val = value.getAsFloat(); + switch (std::fpclassify(float_val)) { + case FP_ZERO: + case FP_NORMAL: { + auto saved_precision = os.precision(); + os.precision(std::numeric_limits::digits10); + os << float_val; + os.precision(saved_precision); + } break; + default: + os << HexFloat>(value); + break; + } + return os; +} + +template <> +inline std::ostream& operator<<(std::ostream& os, + const FloatProxy& value) { + os << HexFloat>(value); + return os; +} +} + +#endif // LIBSPIRV_UTIL_HEX_FLOAT_H_ diff --git a/dep/glslang/SPIRV/spirv.hpp b/dep/glslang/SPIRV/spirv.hpp new file mode 100644 index 000000000..dae36cf20 --- /dev/null +++ b/dep/glslang/SPIRV/spirv.hpp @@ -0,0 +1,2114 @@ +// Copyright (c) 2014-2020 The Khronos Group Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and/or associated documentation files (the "Materials"), +// to deal in the Materials without restriction, including without limitation +// the rights to use, copy, modify, merge, publish, distribute, sublicense, +// and/or sell copies of the Materials, and to permit persons to whom the +// Materials are furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Materials. +// +// MODIFICATIONS TO THIS FILE MAY MEAN IT NO LONGER ACCURATELY REFLECTS KHRONOS +// STANDARDS. THE UNMODIFIED, NORMATIVE VERSIONS OF KHRONOS SPECIFICATIONS AND +// HEADER INFORMATION ARE LOCATED AT https://www.khronos.org/registry/ +// +// THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +// THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +// FROM,OUT OF OR IN CONNECTION WITH THE MATERIALS OR THE USE OR OTHER DEALINGS +// IN THE MATERIALS. + +// This header is automatically generated by the same tool that creates +// the Binary Section of the SPIR-V specification. + +// Enumeration tokens for SPIR-V, in various styles: +// C, C++, C++11, JSON, Lua, Python, C#, D +// +// - C will have tokens with a "Spv" prefix, e.g.: SpvSourceLanguageGLSL +// - C++ will have tokens in the "spv" name space, e.g.: spv::SourceLanguageGLSL +// - C++11 will use enum classes in the spv namespace, e.g.: spv::SourceLanguage::GLSL +// - Lua will use tables, e.g.: spv.SourceLanguage.GLSL +// - Python will use dictionaries, e.g.: spv['SourceLanguage']['GLSL'] +// - C# will use enum classes in the Specification class located in the "Spv" namespace, +// e.g.: Spv.Specification.SourceLanguage.GLSL +// - D will have tokens under the "spv" module, e.g: spv.SourceLanguage.GLSL +// +// Some tokens act like mask values, which can be OR'd together, +// while others are mutually exclusive. The mask-like ones have +// "Mask" in their name, and a parallel enum that has the shift +// amount (1 << x) for each corresponding enumerant. + +#ifndef spirv_HPP +#define spirv_HPP + +namespace spv { + +typedef unsigned int Id; + +#define SPV_VERSION 0x10500 +#define SPV_REVISION 3 + +static const unsigned int MagicNumber = 0x07230203; +static const unsigned int Version = 0x00010500; +static const unsigned int Revision = 3; +static const unsigned int OpCodeMask = 0xffff; +static const unsigned int WordCountShift = 16; + +enum SourceLanguage { + SourceLanguageUnknown = 0, + SourceLanguageESSL = 1, + SourceLanguageGLSL = 2, + SourceLanguageOpenCL_C = 3, + SourceLanguageOpenCL_CPP = 4, + SourceLanguageHLSL = 5, + SourceLanguageMax = 0x7fffffff, +}; + +enum ExecutionModel { + ExecutionModelVertex = 0, + ExecutionModelTessellationControl = 1, + ExecutionModelTessellationEvaluation = 2, + ExecutionModelGeometry = 3, + ExecutionModelFragment = 4, + ExecutionModelGLCompute = 5, + ExecutionModelKernel = 6, + ExecutionModelTaskNV = 5267, + ExecutionModelMeshNV = 5268, + ExecutionModelRayGenerationKHR = 5313, + ExecutionModelRayGenerationNV = 5313, + ExecutionModelIntersectionKHR = 5314, + ExecutionModelIntersectionNV = 5314, + ExecutionModelAnyHitKHR = 5315, + ExecutionModelAnyHitNV = 5315, + ExecutionModelClosestHitKHR = 5316, + ExecutionModelClosestHitNV = 5316, + ExecutionModelMissKHR = 5317, + ExecutionModelMissNV = 5317, + ExecutionModelCallableKHR = 5318, + ExecutionModelCallableNV = 5318, + ExecutionModelMax = 0x7fffffff, +}; + +enum AddressingModel { + AddressingModelLogical = 0, + AddressingModelPhysical32 = 1, + AddressingModelPhysical64 = 2, + AddressingModelPhysicalStorageBuffer64 = 5348, + AddressingModelPhysicalStorageBuffer64EXT = 5348, + AddressingModelMax = 0x7fffffff, +}; + +enum MemoryModel { + MemoryModelSimple = 0, + MemoryModelGLSL450 = 1, + MemoryModelOpenCL = 2, + MemoryModelVulkan = 3, + MemoryModelVulkanKHR = 3, + MemoryModelMax = 0x7fffffff, +}; + +enum ExecutionMode { + ExecutionModeInvocations = 0, + ExecutionModeSpacingEqual = 1, + ExecutionModeSpacingFractionalEven = 2, + ExecutionModeSpacingFractionalOdd = 3, + ExecutionModeVertexOrderCw = 4, + ExecutionModeVertexOrderCcw = 5, + ExecutionModePixelCenterInteger = 6, + ExecutionModeOriginUpperLeft = 7, + ExecutionModeOriginLowerLeft = 8, + ExecutionModeEarlyFragmentTests = 9, + ExecutionModePointMode = 10, + ExecutionModeXfb = 11, + ExecutionModeDepthReplacing = 12, + ExecutionModeDepthGreater = 14, + ExecutionModeDepthLess = 15, + ExecutionModeDepthUnchanged = 16, + ExecutionModeLocalSize = 17, + ExecutionModeLocalSizeHint = 18, + ExecutionModeInputPoints = 19, + ExecutionModeInputLines = 20, + ExecutionModeInputLinesAdjacency = 21, + ExecutionModeTriangles = 22, + ExecutionModeInputTrianglesAdjacency = 23, + ExecutionModeQuads = 24, + ExecutionModeIsolines = 25, + ExecutionModeOutputVertices = 26, + ExecutionModeOutputPoints = 27, + ExecutionModeOutputLineStrip = 28, + ExecutionModeOutputTriangleStrip = 29, + ExecutionModeVecTypeHint = 30, + ExecutionModeContractionOff = 31, + ExecutionModeInitializer = 33, + ExecutionModeFinalizer = 34, + ExecutionModeSubgroupSize = 35, + ExecutionModeSubgroupsPerWorkgroup = 36, + ExecutionModeSubgroupsPerWorkgroupId = 37, + ExecutionModeLocalSizeId = 38, + ExecutionModeLocalSizeHintId = 39, + ExecutionModePostDepthCoverage = 4446, + ExecutionModeDenormPreserve = 4459, + ExecutionModeDenormFlushToZero = 4460, + ExecutionModeSignedZeroInfNanPreserve = 4461, + ExecutionModeRoundingModeRTE = 4462, + ExecutionModeRoundingModeRTZ = 4463, + ExecutionModeStencilRefReplacingEXT = 5027, + ExecutionModeOutputLinesNV = 5269, + ExecutionModeOutputPrimitivesNV = 5270, + ExecutionModeDerivativeGroupQuadsNV = 5289, + ExecutionModeDerivativeGroupLinearNV = 5290, + ExecutionModeOutputTrianglesNV = 5298, + ExecutionModePixelInterlockOrderedEXT = 5366, + ExecutionModePixelInterlockUnorderedEXT = 5367, + ExecutionModeSampleInterlockOrderedEXT = 5368, + ExecutionModeSampleInterlockUnorderedEXT = 5369, + ExecutionModeShadingRateInterlockOrderedEXT = 5370, + ExecutionModeShadingRateInterlockUnorderedEXT = 5371, + ExecutionModeMax = 0x7fffffff, +}; + +enum StorageClass { + StorageClassUniformConstant = 0, + StorageClassInput = 1, + StorageClassUniform = 2, + StorageClassOutput = 3, + StorageClassWorkgroup = 4, + StorageClassCrossWorkgroup = 5, + StorageClassPrivate = 6, + StorageClassFunction = 7, + StorageClassGeneric = 8, + StorageClassPushConstant = 9, + StorageClassAtomicCounter = 10, + StorageClassImage = 11, + StorageClassStorageBuffer = 12, + StorageClassCallableDataKHR = 5328, + StorageClassCallableDataNV = 5328, + StorageClassIncomingCallableDataKHR = 5329, + StorageClassIncomingCallableDataNV = 5329, + StorageClassRayPayloadKHR = 5338, + StorageClassRayPayloadNV = 5338, + StorageClassHitAttributeKHR = 5339, + StorageClassHitAttributeNV = 5339, + StorageClassIncomingRayPayloadKHR = 5342, + StorageClassIncomingRayPayloadNV = 5342, + StorageClassShaderRecordBufferKHR = 5343, + StorageClassShaderRecordBufferNV = 5343, + StorageClassPhysicalStorageBuffer = 5349, + StorageClassPhysicalStorageBufferEXT = 5349, + StorageClassMax = 0x7fffffff, +}; + +enum Dim { + Dim1D = 0, + Dim2D = 1, + Dim3D = 2, + DimCube = 3, + DimRect = 4, + DimBuffer = 5, + DimSubpassData = 6, + DimMax = 0x7fffffff, +}; + +enum SamplerAddressingMode { + SamplerAddressingModeNone = 0, + SamplerAddressingModeClampToEdge = 1, + SamplerAddressingModeClamp = 2, + SamplerAddressingModeRepeat = 3, + SamplerAddressingModeRepeatMirrored = 4, + SamplerAddressingModeMax = 0x7fffffff, +}; + +enum SamplerFilterMode { + SamplerFilterModeNearest = 0, + SamplerFilterModeLinear = 1, + SamplerFilterModeMax = 0x7fffffff, +}; + +enum ImageFormat { + ImageFormatUnknown = 0, + ImageFormatRgba32f = 1, + ImageFormatRgba16f = 2, + ImageFormatR32f = 3, + ImageFormatRgba8 = 4, + ImageFormatRgba8Snorm = 5, + ImageFormatRg32f = 6, + ImageFormatRg16f = 7, + ImageFormatR11fG11fB10f = 8, + ImageFormatR16f = 9, + ImageFormatRgba16 = 10, + ImageFormatRgb10A2 = 11, + ImageFormatRg16 = 12, + ImageFormatRg8 = 13, + ImageFormatR16 = 14, + ImageFormatR8 = 15, + ImageFormatRgba16Snorm = 16, + ImageFormatRg16Snorm = 17, + ImageFormatRg8Snorm = 18, + ImageFormatR16Snorm = 19, + ImageFormatR8Snorm = 20, + ImageFormatRgba32i = 21, + ImageFormatRgba16i = 22, + ImageFormatRgba8i = 23, + ImageFormatR32i = 24, + ImageFormatRg32i = 25, + ImageFormatRg16i = 26, + ImageFormatRg8i = 27, + ImageFormatR16i = 28, + ImageFormatR8i = 29, + ImageFormatRgba32ui = 30, + ImageFormatRgba16ui = 31, + ImageFormatRgba8ui = 32, + ImageFormatR32ui = 33, + ImageFormatRgb10a2ui = 34, + ImageFormatRg32ui = 35, + ImageFormatRg16ui = 36, + ImageFormatRg8ui = 37, + ImageFormatR16ui = 38, + ImageFormatR8ui = 39, + ImageFormatMax = 0x7fffffff, +}; + +enum ImageChannelOrder { + ImageChannelOrderR = 0, + ImageChannelOrderA = 1, + ImageChannelOrderRG = 2, + ImageChannelOrderRA = 3, + ImageChannelOrderRGB = 4, + ImageChannelOrderRGBA = 5, + ImageChannelOrderBGRA = 6, + ImageChannelOrderARGB = 7, + ImageChannelOrderIntensity = 8, + ImageChannelOrderLuminance = 9, + ImageChannelOrderRx = 10, + ImageChannelOrderRGx = 11, + ImageChannelOrderRGBx = 12, + ImageChannelOrderDepth = 13, + ImageChannelOrderDepthStencil = 14, + ImageChannelOrdersRGB = 15, + ImageChannelOrdersRGBx = 16, + ImageChannelOrdersRGBA = 17, + ImageChannelOrdersBGRA = 18, + ImageChannelOrderABGR = 19, + ImageChannelOrderMax = 0x7fffffff, +}; + +enum ImageChannelDataType { + ImageChannelDataTypeSnormInt8 = 0, + ImageChannelDataTypeSnormInt16 = 1, + ImageChannelDataTypeUnormInt8 = 2, + ImageChannelDataTypeUnormInt16 = 3, + ImageChannelDataTypeUnormShort565 = 4, + ImageChannelDataTypeUnormShort555 = 5, + ImageChannelDataTypeUnormInt101010 = 6, + ImageChannelDataTypeSignedInt8 = 7, + ImageChannelDataTypeSignedInt16 = 8, + ImageChannelDataTypeSignedInt32 = 9, + ImageChannelDataTypeUnsignedInt8 = 10, + ImageChannelDataTypeUnsignedInt16 = 11, + ImageChannelDataTypeUnsignedInt32 = 12, + ImageChannelDataTypeHalfFloat = 13, + ImageChannelDataTypeFloat = 14, + ImageChannelDataTypeUnormInt24 = 15, + ImageChannelDataTypeUnormInt101010_2 = 16, + ImageChannelDataTypeMax = 0x7fffffff, +}; + +enum ImageOperandsShift { + ImageOperandsBiasShift = 0, + ImageOperandsLodShift = 1, + ImageOperandsGradShift = 2, + ImageOperandsConstOffsetShift = 3, + ImageOperandsOffsetShift = 4, + ImageOperandsConstOffsetsShift = 5, + ImageOperandsSampleShift = 6, + ImageOperandsMinLodShift = 7, + ImageOperandsMakeTexelAvailableShift = 8, + ImageOperandsMakeTexelAvailableKHRShift = 8, + ImageOperandsMakeTexelVisibleShift = 9, + ImageOperandsMakeTexelVisibleKHRShift = 9, + ImageOperandsNonPrivateTexelShift = 10, + ImageOperandsNonPrivateTexelKHRShift = 10, + ImageOperandsVolatileTexelShift = 11, + ImageOperandsVolatileTexelKHRShift = 11, + ImageOperandsSignExtendShift = 12, + ImageOperandsZeroExtendShift = 13, + ImageOperandsMax = 0x7fffffff, +}; + +enum ImageOperandsMask { + ImageOperandsMaskNone = 0, + ImageOperandsBiasMask = 0x00000001, + ImageOperandsLodMask = 0x00000002, + ImageOperandsGradMask = 0x00000004, + ImageOperandsConstOffsetMask = 0x00000008, + ImageOperandsOffsetMask = 0x00000010, + ImageOperandsConstOffsetsMask = 0x00000020, + ImageOperandsSampleMask = 0x00000040, + ImageOperandsMinLodMask = 0x00000080, + ImageOperandsMakeTexelAvailableMask = 0x00000100, + ImageOperandsMakeTexelAvailableKHRMask = 0x00000100, + ImageOperandsMakeTexelVisibleMask = 0x00000200, + ImageOperandsMakeTexelVisibleKHRMask = 0x00000200, + ImageOperandsNonPrivateTexelMask = 0x00000400, + ImageOperandsNonPrivateTexelKHRMask = 0x00000400, + ImageOperandsVolatileTexelMask = 0x00000800, + ImageOperandsVolatileTexelKHRMask = 0x00000800, + ImageOperandsSignExtendMask = 0x00001000, + ImageOperandsZeroExtendMask = 0x00002000, +}; + +enum FPFastMathModeShift { + FPFastMathModeNotNaNShift = 0, + FPFastMathModeNotInfShift = 1, + FPFastMathModeNSZShift = 2, + FPFastMathModeAllowRecipShift = 3, + FPFastMathModeFastShift = 4, + FPFastMathModeMax = 0x7fffffff, +}; + +enum FPFastMathModeMask { + FPFastMathModeMaskNone = 0, + FPFastMathModeNotNaNMask = 0x00000001, + FPFastMathModeNotInfMask = 0x00000002, + FPFastMathModeNSZMask = 0x00000004, + FPFastMathModeAllowRecipMask = 0x00000008, + FPFastMathModeFastMask = 0x00000010, +}; + +enum FPRoundingMode { + FPRoundingModeRTE = 0, + FPRoundingModeRTZ = 1, + FPRoundingModeRTP = 2, + FPRoundingModeRTN = 3, + FPRoundingModeMax = 0x7fffffff, +}; + +enum LinkageType { + LinkageTypeExport = 0, + LinkageTypeImport = 1, + LinkageTypeMax = 0x7fffffff, +}; + +enum AccessQualifier { + AccessQualifierReadOnly = 0, + AccessQualifierWriteOnly = 1, + AccessQualifierReadWrite = 2, + AccessQualifierMax = 0x7fffffff, +}; + +enum FunctionParameterAttribute { + FunctionParameterAttributeZext = 0, + FunctionParameterAttributeSext = 1, + FunctionParameterAttributeByVal = 2, + FunctionParameterAttributeSret = 3, + FunctionParameterAttributeNoAlias = 4, + FunctionParameterAttributeNoCapture = 5, + FunctionParameterAttributeNoWrite = 6, + FunctionParameterAttributeNoReadWrite = 7, + FunctionParameterAttributeMax = 0x7fffffff, +}; + +enum Decoration { + DecorationRelaxedPrecision = 0, + DecorationSpecId = 1, + DecorationBlock = 2, + DecorationBufferBlock = 3, + DecorationRowMajor = 4, + DecorationColMajor = 5, + DecorationArrayStride = 6, + DecorationMatrixStride = 7, + DecorationGLSLShared = 8, + DecorationGLSLPacked = 9, + DecorationCPacked = 10, + DecorationBuiltIn = 11, + DecorationNoPerspective = 13, + DecorationFlat = 14, + DecorationPatch = 15, + DecorationCentroid = 16, + DecorationSample = 17, + DecorationInvariant = 18, + DecorationRestrict = 19, + DecorationAliased = 20, + DecorationVolatile = 21, + DecorationConstant = 22, + DecorationCoherent = 23, + DecorationNonWritable = 24, + DecorationNonReadable = 25, + DecorationUniform = 26, + DecorationUniformId = 27, + DecorationSaturatedConversion = 28, + DecorationStream = 29, + DecorationLocation = 30, + DecorationComponent = 31, + DecorationIndex = 32, + DecorationBinding = 33, + DecorationDescriptorSet = 34, + DecorationOffset = 35, + DecorationXfbBuffer = 36, + DecorationXfbStride = 37, + DecorationFuncParamAttr = 38, + DecorationFPRoundingMode = 39, + DecorationFPFastMathMode = 40, + DecorationLinkageAttributes = 41, + DecorationNoContraction = 42, + DecorationInputAttachmentIndex = 43, + DecorationAlignment = 44, + DecorationMaxByteOffset = 45, + DecorationAlignmentId = 46, + DecorationMaxByteOffsetId = 47, + DecorationNoSignedWrap = 4469, + DecorationNoUnsignedWrap = 4470, + DecorationExplicitInterpAMD = 4999, + DecorationOverrideCoverageNV = 5248, + DecorationPassthroughNV = 5250, + DecorationViewportRelativeNV = 5252, + DecorationSecondaryViewportRelativeNV = 5256, + DecorationPerPrimitiveNV = 5271, + DecorationPerViewNV = 5272, + DecorationPerTaskNV = 5273, + DecorationPerVertexNV = 5285, + DecorationNonUniform = 5300, + DecorationNonUniformEXT = 5300, + DecorationRestrictPointer = 5355, + DecorationRestrictPointerEXT = 5355, + DecorationAliasedPointer = 5356, + DecorationAliasedPointerEXT = 5356, + DecorationCounterBuffer = 5634, + DecorationHlslCounterBufferGOOGLE = 5634, + DecorationHlslSemanticGOOGLE = 5635, + DecorationUserSemantic = 5635, + DecorationUserTypeGOOGLE = 5636, + DecorationMax = 0x7fffffff, +}; + +enum BuiltIn { + BuiltInPosition = 0, + BuiltInPointSize = 1, + BuiltInClipDistance = 3, + BuiltInCullDistance = 4, + BuiltInVertexId = 5, + BuiltInInstanceId = 6, + BuiltInPrimitiveId = 7, + BuiltInInvocationId = 8, + BuiltInLayer = 9, + BuiltInViewportIndex = 10, + BuiltInTessLevelOuter = 11, + BuiltInTessLevelInner = 12, + BuiltInTessCoord = 13, + BuiltInPatchVertices = 14, + BuiltInFragCoord = 15, + BuiltInPointCoord = 16, + BuiltInFrontFacing = 17, + BuiltInSampleId = 18, + BuiltInSamplePosition = 19, + BuiltInSampleMask = 20, + BuiltInFragDepth = 22, + BuiltInHelperInvocation = 23, + BuiltInNumWorkgroups = 24, + BuiltInWorkgroupSize = 25, + BuiltInWorkgroupId = 26, + BuiltInLocalInvocationId = 27, + BuiltInGlobalInvocationId = 28, + BuiltInLocalInvocationIndex = 29, + BuiltInWorkDim = 30, + BuiltInGlobalSize = 31, + BuiltInEnqueuedWorkgroupSize = 32, + BuiltInGlobalOffset = 33, + BuiltInGlobalLinearId = 34, + BuiltInSubgroupSize = 36, + BuiltInSubgroupMaxSize = 37, + BuiltInNumSubgroups = 38, + BuiltInNumEnqueuedSubgroups = 39, + BuiltInSubgroupId = 40, + BuiltInSubgroupLocalInvocationId = 41, + BuiltInVertexIndex = 42, + BuiltInInstanceIndex = 43, + BuiltInSubgroupEqMask = 4416, + BuiltInSubgroupEqMaskKHR = 4416, + BuiltInSubgroupGeMask = 4417, + BuiltInSubgroupGeMaskKHR = 4417, + BuiltInSubgroupGtMask = 4418, + BuiltInSubgroupGtMaskKHR = 4418, + BuiltInSubgroupLeMask = 4419, + BuiltInSubgroupLeMaskKHR = 4419, + BuiltInSubgroupLtMask = 4420, + BuiltInSubgroupLtMaskKHR = 4420, + BuiltInBaseVertex = 4424, + BuiltInBaseInstance = 4425, + BuiltInDrawIndex = 4426, + BuiltInDeviceIndex = 4438, + BuiltInViewIndex = 4440, + BuiltInBaryCoordNoPerspAMD = 4992, + BuiltInBaryCoordNoPerspCentroidAMD = 4993, + BuiltInBaryCoordNoPerspSampleAMD = 4994, + BuiltInBaryCoordSmoothAMD = 4995, + BuiltInBaryCoordSmoothCentroidAMD = 4996, + BuiltInBaryCoordSmoothSampleAMD = 4997, + BuiltInBaryCoordPullModelAMD = 4998, + BuiltInFragStencilRefEXT = 5014, + BuiltInViewportMaskNV = 5253, + BuiltInSecondaryPositionNV = 5257, + BuiltInSecondaryViewportMaskNV = 5258, + BuiltInPositionPerViewNV = 5261, + BuiltInViewportMaskPerViewNV = 5262, + BuiltInFullyCoveredEXT = 5264, + BuiltInTaskCountNV = 5274, + BuiltInPrimitiveCountNV = 5275, + BuiltInPrimitiveIndicesNV = 5276, + BuiltInClipDistancePerViewNV = 5277, + BuiltInCullDistancePerViewNV = 5278, + BuiltInLayerPerViewNV = 5279, + BuiltInMeshViewCountNV = 5280, + BuiltInMeshViewIndicesNV = 5281, + BuiltInBaryCoordNV = 5286, + BuiltInBaryCoordNoPerspNV = 5287, + BuiltInFragSizeEXT = 5292, + BuiltInFragmentSizeNV = 5292, + BuiltInFragInvocationCountEXT = 5293, + BuiltInInvocationsPerPixelNV = 5293, + BuiltInLaunchIdKHR = 5319, + BuiltInLaunchIdNV = 5319, + BuiltInLaunchSizeKHR = 5320, + BuiltInLaunchSizeNV = 5320, + BuiltInWorldRayOriginKHR = 5321, + BuiltInWorldRayOriginNV = 5321, + BuiltInWorldRayDirectionKHR = 5322, + BuiltInWorldRayDirectionNV = 5322, + BuiltInObjectRayOriginKHR = 5323, + BuiltInObjectRayOriginNV = 5323, + BuiltInObjectRayDirectionKHR = 5324, + BuiltInObjectRayDirectionNV = 5324, + BuiltInRayTminKHR = 5325, + BuiltInRayTminNV = 5325, + BuiltInRayTmaxKHR = 5326, + BuiltInRayTmaxNV = 5326, + BuiltInInstanceCustomIndexKHR = 5327, + BuiltInInstanceCustomIndexNV = 5327, + BuiltInObjectToWorldKHR = 5330, + BuiltInObjectToWorldNV = 5330, + BuiltInWorldToObjectKHR = 5331, + BuiltInWorldToObjectNV = 5331, + BuiltInHitTKHR = 5332, + BuiltInHitTNV = 5332, + BuiltInHitKindKHR = 5333, + BuiltInHitKindNV = 5333, + BuiltInIncomingRayFlagsKHR = 5351, + BuiltInIncomingRayFlagsNV = 5351, + BuiltInRayGeometryIndexKHR = 5352, + BuiltInWarpsPerSMNV = 5374, + BuiltInSMCountNV = 5375, + BuiltInWarpIDNV = 5376, + BuiltInSMIDNV = 5377, + BuiltInMax = 0x7fffffff, +}; + +enum SelectionControlShift { + SelectionControlFlattenShift = 0, + SelectionControlDontFlattenShift = 1, + SelectionControlMax = 0x7fffffff, +}; + +enum SelectionControlMask { + SelectionControlMaskNone = 0, + SelectionControlFlattenMask = 0x00000001, + SelectionControlDontFlattenMask = 0x00000002, +}; + +enum LoopControlShift { + LoopControlUnrollShift = 0, + LoopControlDontUnrollShift = 1, + LoopControlDependencyInfiniteShift = 2, + LoopControlDependencyLengthShift = 3, + LoopControlMinIterationsShift = 4, + LoopControlMaxIterationsShift = 5, + LoopControlIterationMultipleShift = 6, + LoopControlPeelCountShift = 7, + LoopControlPartialCountShift = 8, + LoopControlMax = 0x7fffffff, +}; + +enum LoopControlMask { + LoopControlMaskNone = 0, + LoopControlUnrollMask = 0x00000001, + LoopControlDontUnrollMask = 0x00000002, + LoopControlDependencyInfiniteMask = 0x00000004, + LoopControlDependencyLengthMask = 0x00000008, + LoopControlMinIterationsMask = 0x00000010, + LoopControlMaxIterationsMask = 0x00000020, + LoopControlIterationMultipleMask = 0x00000040, + LoopControlPeelCountMask = 0x00000080, + LoopControlPartialCountMask = 0x00000100, +}; + +enum FunctionControlShift { + FunctionControlInlineShift = 0, + FunctionControlDontInlineShift = 1, + FunctionControlPureShift = 2, + FunctionControlConstShift = 3, + FunctionControlMax = 0x7fffffff, +}; + +enum FunctionControlMask { + FunctionControlMaskNone = 0, + FunctionControlInlineMask = 0x00000001, + FunctionControlDontInlineMask = 0x00000002, + FunctionControlPureMask = 0x00000004, + FunctionControlConstMask = 0x00000008, +}; + +enum MemorySemanticsShift { + MemorySemanticsAcquireShift = 1, + MemorySemanticsReleaseShift = 2, + MemorySemanticsAcquireReleaseShift = 3, + MemorySemanticsSequentiallyConsistentShift = 4, + MemorySemanticsUniformMemoryShift = 6, + MemorySemanticsSubgroupMemoryShift = 7, + MemorySemanticsWorkgroupMemoryShift = 8, + MemorySemanticsCrossWorkgroupMemoryShift = 9, + MemorySemanticsAtomicCounterMemoryShift = 10, + MemorySemanticsImageMemoryShift = 11, + MemorySemanticsOutputMemoryShift = 12, + MemorySemanticsOutputMemoryKHRShift = 12, + MemorySemanticsMakeAvailableShift = 13, + MemorySemanticsMakeAvailableKHRShift = 13, + MemorySemanticsMakeVisibleShift = 14, + MemorySemanticsMakeVisibleKHRShift = 14, + MemorySemanticsVolatileShift = 15, + MemorySemanticsMax = 0x7fffffff, +}; + +enum MemorySemanticsMask { + MemorySemanticsMaskNone = 0, + MemorySemanticsAcquireMask = 0x00000002, + MemorySemanticsReleaseMask = 0x00000004, + MemorySemanticsAcquireReleaseMask = 0x00000008, + MemorySemanticsSequentiallyConsistentMask = 0x00000010, + MemorySemanticsUniformMemoryMask = 0x00000040, + MemorySemanticsSubgroupMemoryMask = 0x00000080, + MemorySemanticsWorkgroupMemoryMask = 0x00000100, + MemorySemanticsCrossWorkgroupMemoryMask = 0x00000200, + MemorySemanticsAtomicCounterMemoryMask = 0x00000400, + MemorySemanticsImageMemoryMask = 0x00000800, + MemorySemanticsOutputMemoryMask = 0x00001000, + MemorySemanticsOutputMemoryKHRMask = 0x00001000, + MemorySemanticsMakeAvailableMask = 0x00002000, + MemorySemanticsMakeAvailableKHRMask = 0x00002000, + MemorySemanticsMakeVisibleMask = 0x00004000, + MemorySemanticsMakeVisibleKHRMask = 0x00004000, + MemorySemanticsVolatileMask = 0x00008000, +}; + +enum MemoryAccessShift { + MemoryAccessVolatileShift = 0, + MemoryAccessAlignedShift = 1, + MemoryAccessNontemporalShift = 2, + MemoryAccessMakePointerAvailableShift = 3, + MemoryAccessMakePointerAvailableKHRShift = 3, + MemoryAccessMakePointerVisibleShift = 4, + MemoryAccessMakePointerVisibleKHRShift = 4, + MemoryAccessNonPrivatePointerShift = 5, + MemoryAccessNonPrivatePointerKHRShift = 5, + MemoryAccessMax = 0x7fffffff, +}; + +enum MemoryAccessMask { + MemoryAccessMaskNone = 0, + MemoryAccessVolatileMask = 0x00000001, + MemoryAccessAlignedMask = 0x00000002, + MemoryAccessNontemporalMask = 0x00000004, + MemoryAccessMakePointerAvailableMask = 0x00000008, + MemoryAccessMakePointerAvailableKHRMask = 0x00000008, + MemoryAccessMakePointerVisibleMask = 0x00000010, + MemoryAccessMakePointerVisibleKHRMask = 0x00000010, + MemoryAccessNonPrivatePointerMask = 0x00000020, + MemoryAccessNonPrivatePointerKHRMask = 0x00000020, +}; + +enum Scope { + ScopeCrossDevice = 0, + ScopeDevice = 1, + ScopeWorkgroup = 2, + ScopeSubgroup = 3, + ScopeInvocation = 4, + ScopeQueueFamily = 5, + ScopeQueueFamilyKHR = 5, + ScopeShaderCallKHR = 6, + ScopeMax = 0x7fffffff, +}; + +enum GroupOperation { + GroupOperationReduce = 0, + GroupOperationInclusiveScan = 1, + GroupOperationExclusiveScan = 2, + GroupOperationClusteredReduce = 3, + GroupOperationPartitionedReduceNV = 6, + GroupOperationPartitionedInclusiveScanNV = 7, + GroupOperationPartitionedExclusiveScanNV = 8, + GroupOperationMax = 0x7fffffff, +}; + +enum KernelEnqueueFlags { + KernelEnqueueFlagsNoWait = 0, + KernelEnqueueFlagsWaitKernel = 1, + KernelEnqueueFlagsWaitWorkGroup = 2, + KernelEnqueueFlagsMax = 0x7fffffff, +}; + +enum KernelProfilingInfoShift { + KernelProfilingInfoCmdExecTimeShift = 0, + KernelProfilingInfoMax = 0x7fffffff, +}; + +enum KernelProfilingInfoMask { + KernelProfilingInfoMaskNone = 0, + KernelProfilingInfoCmdExecTimeMask = 0x00000001, +}; + +enum Capability { + CapabilityMatrix = 0, + CapabilityShader = 1, + CapabilityGeometry = 2, + CapabilityTessellation = 3, + CapabilityAddresses = 4, + CapabilityLinkage = 5, + CapabilityKernel = 6, + CapabilityVector16 = 7, + CapabilityFloat16Buffer = 8, + CapabilityFloat16 = 9, + CapabilityFloat64 = 10, + CapabilityInt64 = 11, + CapabilityInt64Atomics = 12, + CapabilityImageBasic = 13, + CapabilityImageReadWrite = 14, + CapabilityImageMipmap = 15, + CapabilityPipes = 17, + CapabilityGroups = 18, + CapabilityDeviceEnqueue = 19, + CapabilityLiteralSampler = 20, + CapabilityAtomicStorage = 21, + CapabilityInt16 = 22, + CapabilityTessellationPointSize = 23, + CapabilityGeometryPointSize = 24, + CapabilityImageGatherExtended = 25, + CapabilityStorageImageMultisample = 27, + CapabilityUniformBufferArrayDynamicIndexing = 28, + CapabilitySampledImageArrayDynamicIndexing = 29, + CapabilityStorageBufferArrayDynamicIndexing = 30, + CapabilityStorageImageArrayDynamicIndexing = 31, + CapabilityClipDistance = 32, + CapabilityCullDistance = 33, + CapabilityImageCubeArray = 34, + CapabilitySampleRateShading = 35, + CapabilityImageRect = 36, + CapabilitySampledRect = 37, + CapabilityGenericPointer = 38, + CapabilityInt8 = 39, + CapabilityInputAttachment = 40, + CapabilitySparseResidency = 41, + CapabilityMinLod = 42, + CapabilitySampled1D = 43, + CapabilityImage1D = 44, + CapabilitySampledCubeArray = 45, + CapabilitySampledBuffer = 46, + CapabilityImageBuffer = 47, + CapabilityImageMSArray = 48, + CapabilityStorageImageExtendedFormats = 49, + CapabilityImageQuery = 50, + CapabilityDerivativeControl = 51, + CapabilityInterpolationFunction = 52, + CapabilityTransformFeedback = 53, + CapabilityGeometryStreams = 54, + CapabilityStorageImageReadWithoutFormat = 55, + CapabilityStorageImageWriteWithoutFormat = 56, + CapabilityMultiViewport = 57, + CapabilitySubgroupDispatch = 58, + CapabilityNamedBarrier = 59, + CapabilityPipeStorage = 60, + CapabilityGroupNonUniform = 61, + CapabilityGroupNonUniformVote = 62, + CapabilityGroupNonUniformArithmetic = 63, + CapabilityGroupNonUniformBallot = 64, + CapabilityGroupNonUniformShuffle = 65, + CapabilityGroupNonUniformShuffleRelative = 66, + CapabilityGroupNonUniformClustered = 67, + CapabilityGroupNonUniformQuad = 68, + CapabilityShaderLayer = 69, + CapabilityShaderViewportIndex = 70, + CapabilitySubgroupBallotKHR = 4423, + CapabilityDrawParameters = 4427, + CapabilitySubgroupVoteKHR = 4431, + CapabilityStorageBuffer16BitAccess = 4433, + CapabilityStorageUniformBufferBlock16 = 4433, + CapabilityStorageUniform16 = 4434, + CapabilityUniformAndStorageBuffer16BitAccess = 4434, + CapabilityStoragePushConstant16 = 4435, + CapabilityStorageInputOutput16 = 4436, + CapabilityDeviceGroup = 4437, + CapabilityMultiView = 4439, + CapabilityVariablePointersStorageBuffer = 4441, + CapabilityVariablePointers = 4442, + CapabilityAtomicStorageOps = 4445, + CapabilitySampleMaskPostDepthCoverage = 4447, + CapabilityStorageBuffer8BitAccess = 4448, + CapabilityUniformAndStorageBuffer8BitAccess = 4449, + CapabilityStoragePushConstant8 = 4450, + CapabilityDenormPreserve = 4464, + CapabilityDenormFlushToZero = 4465, + CapabilitySignedZeroInfNanPreserve = 4466, + CapabilityRoundingModeRTE = 4467, + CapabilityRoundingModeRTZ = 4468, + CapabilityRayQueryProvisionalKHR = 4471, + CapabilityRayTraversalPrimitiveCullingProvisionalKHR = 4478, + CapabilityFloat16ImageAMD = 5008, + CapabilityImageGatherBiasLodAMD = 5009, + CapabilityFragmentMaskAMD = 5010, + CapabilityStencilExportEXT = 5013, + CapabilityImageReadWriteLodAMD = 5015, + CapabilityShaderClockKHR = 5055, + CapabilitySampleMaskOverrideCoverageNV = 5249, + CapabilityGeometryShaderPassthroughNV = 5251, + CapabilityShaderViewportIndexLayerEXT = 5254, + CapabilityShaderViewportIndexLayerNV = 5254, + CapabilityShaderViewportMaskNV = 5255, + CapabilityShaderStereoViewNV = 5259, + CapabilityPerViewAttributesNV = 5260, + CapabilityFragmentFullyCoveredEXT = 5265, + CapabilityMeshShadingNV = 5266, + CapabilityImageFootprintNV = 5282, + CapabilityFragmentBarycentricNV = 5284, + CapabilityComputeDerivativeGroupQuadsNV = 5288, + CapabilityFragmentDensityEXT = 5291, + CapabilityShadingRateNV = 5291, + CapabilityGroupNonUniformPartitionedNV = 5297, + CapabilityShaderNonUniform = 5301, + CapabilityShaderNonUniformEXT = 5301, + CapabilityRuntimeDescriptorArray = 5302, + CapabilityRuntimeDescriptorArrayEXT = 5302, + CapabilityInputAttachmentArrayDynamicIndexing = 5303, + CapabilityInputAttachmentArrayDynamicIndexingEXT = 5303, + CapabilityUniformTexelBufferArrayDynamicIndexing = 5304, + CapabilityUniformTexelBufferArrayDynamicIndexingEXT = 5304, + CapabilityStorageTexelBufferArrayDynamicIndexing = 5305, + CapabilityStorageTexelBufferArrayDynamicIndexingEXT = 5305, + CapabilityUniformBufferArrayNonUniformIndexing = 5306, + CapabilityUniformBufferArrayNonUniformIndexingEXT = 5306, + CapabilitySampledImageArrayNonUniformIndexing = 5307, + CapabilitySampledImageArrayNonUniformIndexingEXT = 5307, + CapabilityStorageBufferArrayNonUniformIndexing = 5308, + CapabilityStorageBufferArrayNonUniformIndexingEXT = 5308, + CapabilityStorageImageArrayNonUniformIndexing = 5309, + CapabilityStorageImageArrayNonUniformIndexingEXT = 5309, + CapabilityInputAttachmentArrayNonUniformIndexing = 5310, + CapabilityInputAttachmentArrayNonUniformIndexingEXT = 5310, + CapabilityUniformTexelBufferArrayNonUniformIndexing = 5311, + CapabilityUniformTexelBufferArrayNonUniformIndexingEXT = 5311, + CapabilityStorageTexelBufferArrayNonUniformIndexing = 5312, + CapabilityStorageTexelBufferArrayNonUniformIndexingEXT = 5312, + CapabilityRayTracingNV = 5340, + CapabilityVulkanMemoryModel = 5345, + CapabilityVulkanMemoryModelKHR = 5345, + CapabilityVulkanMemoryModelDeviceScope = 5346, + CapabilityVulkanMemoryModelDeviceScopeKHR = 5346, + CapabilityPhysicalStorageBufferAddresses = 5347, + CapabilityPhysicalStorageBufferAddressesEXT = 5347, + CapabilityComputeDerivativeGroupLinearNV = 5350, + CapabilityRayTracingProvisionalKHR = 5353, + CapabilityCooperativeMatrixNV = 5357, + CapabilityFragmentShaderSampleInterlockEXT = 5363, + CapabilityFragmentShaderShadingRateInterlockEXT = 5372, + CapabilityShaderSMBuiltinsNV = 5373, + CapabilityFragmentShaderPixelInterlockEXT = 5378, + CapabilityDemoteToHelperInvocationEXT = 5379, + CapabilitySubgroupShuffleINTEL = 5568, + CapabilitySubgroupBufferBlockIOINTEL = 5569, + CapabilitySubgroupImageBlockIOINTEL = 5570, + CapabilitySubgroupImageMediaBlockIOINTEL = 5579, + CapabilityIntegerFunctions2INTEL = 5584, + CapabilitySubgroupAvcMotionEstimationINTEL = 5696, + CapabilitySubgroupAvcMotionEstimationIntraINTEL = 5697, + CapabilitySubgroupAvcMotionEstimationChromaINTEL = 5698, + CapabilityMax = 0x7fffffff, +}; + +enum RayFlagsShift { + RayFlagsOpaqueKHRShift = 0, + RayFlagsNoOpaqueKHRShift = 1, + RayFlagsTerminateOnFirstHitKHRShift = 2, + RayFlagsSkipClosestHitShaderKHRShift = 3, + RayFlagsCullBackFacingTrianglesKHRShift = 4, + RayFlagsCullFrontFacingTrianglesKHRShift = 5, + RayFlagsCullOpaqueKHRShift = 6, + RayFlagsCullNoOpaqueKHRShift = 7, + RayFlagsSkipTrianglesKHRShift = 8, + RayFlagsSkipAABBsKHRShift = 9, + RayFlagsMax = 0x7fffffff, +}; + +enum RayFlagsMask { + RayFlagsMaskNone = 0, + RayFlagsOpaqueKHRMask = 0x00000001, + RayFlagsNoOpaqueKHRMask = 0x00000002, + RayFlagsTerminateOnFirstHitKHRMask = 0x00000004, + RayFlagsSkipClosestHitShaderKHRMask = 0x00000008, + RayFlagsCullBackFacingTrianglesKHRMask = 0x00000010, + RayFlagsCullFrontFacingTrianglesKHRMask = 0x00000020, + RayFlagsCullOpaqueKHRMask = 0x00000040, + RayFlagsCullNoOpaqueKHRMask = 0x00000080, + RayFlagsSkipTrianglesKHRMask = 0x00000100, + RayFlagsSkipAABBsKHRMask = 0x00000200, +}; + +enum RayQueryIntersection { + RayQueryIntersectionRayQueryCandidateIntersectionKHR = 0, + RayQueryIntersectionRayQueryCommittedIntersectionKHR = 1, + RayQueryIntersectionMax = 0x7fffffff, +}; + +enum RayQueryCommittedIntersectionType { + RayQueryCommittedIntersectionTypeRayQueryCommittedIntersectionNoneKHR = 0, + RayQueryCommittedIntersectionTypeRayQueryCommittedIntersectionTriangleKHR = 1, + RayQueryCommittedIntersectionTypeRayQueryCommittedIntersectionGeneratedKHR = 2, + RayQueryCommittedIntersectionTypeMax = 0x7fffffff, +}; + +enum RayQueryCandidateIntersectionType { + RayQueryCandidateIntersectionTypeRayQueryCandidateIntersectionTriangleKHR = 0, + RayQueryCandidateIntersectionTypeRayQueryCandidateIntersectionAABBKHR = 1, + RayQueryCandidateIntersectionTypeMax = 0x7fffffff, +}; + +enum Op { + OpNop = 0, + OpUndef = 1, + OpSourceContinued = 2, + OpSource = 3, + OpSourceExtension = 4, + OpName = 5, + OpMemberName = 6, + OpString = 7, + OpLine = 8, + OpExtension = 10, + OpExtInstImport = 11, + OpExtInst = 12, + OpMemoryModel = 14, + OpEntryPoint = 15, + OpExecutionMode = 16, + OpCapability = 17, + OpTypeVoid = 19, + OpTypeBool = 20, + OpTypeInt = 21, + OpTypeFloat = 22, + OpTypeVector = 23, + OpTypeMatrix = 24, + OpTypeImage = 25, + OpTypeSampler = 26, + OpTypeSampledImage = 27, + OpTypeArray = 28, + OpTypeRuntimeArray = 29, + OpTypeStruct = 30, + OpTypeOpaque = 31, + OpTypePointer = 32, + OpTypeFunction = 33, + OpTypeEvent = 34, + OpTypeDeviceEvent = 35, + OpTypeReserveId = 36, + OpTypeQueue = 37, + OpTypePipe = 38, + OpTypeForwardPointer = 39, + OpConstantTrue = 41, + OpConstantFalse = 42, + OpConstant = 43, + OpConstantComposite = 44, + OpConstantSampler = 45, + OpConstantNull = 46, + OpSpecConstantTrue = 48, + OpSpecConstantFalse = 49, + OpSpecConstant = 50, + OpSpecConstantComposite = 51, + OpSpecConstantOp = 52, + OpFunction = 54, + OpFunctionParameter = 55, + OpFunctionEnd = 56, + OpFunctionCall = 57, + OpVariable = 59, + OpImageTexelPointer = 60, + OpLoad = 61, + OpStore = 62, + OpCopyMemory = 63, + OpCopyMemorySized = 64, + OpAccessChain = 65, + OpInBoundsAccessChain = 66, + OpPtrAccessChain = 67, + OpArrayLength = 68, + OpGenericPtrMemSemantics = 69, + OpInBoundsPtrAccessChain = 70, + OpDecorate = 71, + OpMemberDecorate = 72, + OpDecorationGroup = 73, + OpGroupDecorate = 74, + OpGroupMemberDecorate = 75, + OpVectorExtractDynamic = 77, + OpVectorInsertDynamic = 78, + OpVectorShuffle = 79, + OpCompositeConstruct = 80, + OpCompositeExtract = 81, + OpCompositeInsert = 82, + OpCopyObject = 83, + OpTranspose = 84, + OpSampledImage = 86, + OpImageSampleImplicitLod = 87, + OpImageSampleExplicitLod = 88, + OpImageSampleDrefImplicitLod = 89, + OpImageSampleDrefExplicitLod = 90, + OpImageSampleProjImplicitLod = 91, + OpImageSampleProjExplicitLod = 92, + OpImageSampleProjDrefImplicitLod = 93, + OpImageSampleProjDrefExplicitLod = 94, + OpImageFetch = 95, + OpImageGather = 96, + OpImageDrefGather = 97, + OpImageRead = 98, + OpImageWrite = 99, + OpImage = 100, + OpImageQueryFormat = 101, + OpImageQueryOrder = 102, + OpImageQuerySizeLod = 103, + OpImageQuerySize = 104, + OpImageQueryLod = 105, + OpImageQueryLevels = 106, + OpImageQuerySamples = 107, + OpConvertFToU = 109, + OpConvertFToS = 110, + OpConvertSToF = 111, + OpConvertUToF = 112, + OpUConvert = 113, + OpSConvert = 114, + OpFConvert = 115, + OpQuantizeToF16 = 116, + OpConvertPtrToU = 117, + OpSatConvertSToU = 118, + OpSatConvertUToS = 119, + OpConvertUToPtr = 120, + OpPtrCastToGeneric = 121, + OpGenericCastToPtr = 122, + OpGenericCastToPtrExplicit = 123, + OpBitcast = 124, + OpSNegate = 126, + OpFNegate = 127, + OpIAdd = 128, + OpFAdd = 129, + OpISub = 130, + OpFSub = 131, + OpIMul = 132, + OpFMul = 133, + OpUDiv = 134, + OpSDiv = 135, + OpFDiv = 136, + OpUMod = 137, + OpSRem = 138, + OpSMod = 139, + OpFRem = 140, + OpFMod = 141, + OpVectorTimesScalar = 142, + OpMatrixTimesScalar = 143, + OpVectorTimesMatrix = 144, + OpMatrixTimesVector = 145, + OpMatrixTimesMatrix = 146, + OpOuterProduct = 147, + OpDot = 148, + OpIAddCarry = 149, + OpISubBorrow = 150, + OpUMulExtended = 151, + OpSMulExtended = 152, + OpAny = 154, + OpAll = 155, + OpIsNan = 156, + OpIsInf = 157, + OpIsFinite = 158, + OpIsNormal = 159, + OpSignBitSet = 160, + OpLessOrGreater = 161, + OpOrdered = 162, + OpUnordered = 163, + OpLogicalEqual = 164, + OpLogicalNotEqual = 165, + OpLogicalOr = 166, + OpLogicalAnd = 167, + OpLogicalNot = 168, + OpSelect = 169, + OpIEqual = 170, + OpINotEqual = 171, + OpUGreaterThan = 172, + OpSGreaterThan = 173, + OpUGreaterThanEqual = 174, + OpSGreaterThanEqual = 175, + OpULessThan = 176, + OpSLessThan = 177, + OpULessThanEqual = 178, + OpSLessThanEqual = 179, + OpFOrdEqual = 180, + OpFUnordEqual = 181, + OpFOrdNotEqual = 182, + OpFUnordNotEqual = 183, + OpFOrdLessThan = 184, + OpFUnordLessThan = 185, + OpFOrdGreaterThan = 186, + OpFUnordGreaterThan = 187, + OpFOrdLessThanEqual = 188, + OpFUnordLessThanEqual = 189, + OpFOrdGreaterThanEqual = 190, + OpFUnordGreaterThanEqual = 191, + OpShiftRightLogical = 194, + OpShiftRightArithmetic = 195, + OpShiftLeftLogical = 196, + OpBitwiseOr = 197, + OpBitwiseXor = 198, + OpBitwiseAnd = 199, + OpNot = 200, + OpBitFieldInsert = 201, + OpBitFieldSExtract = 202, + OpBitFieldUExtract = 203, + OpBitReverse = 204, + OpBitCount = 205, + OpDPdx = 207, + OpDPdy = 208, + OpFwidth = 209, + OpDPdxFine = 210, + OpDPdyFine = 211, + OpFwidthFine = 212, + OpDPdxCoarse = 213, + OpDPdyCoarse = 214, + OpFwidthCoarse = 215, + OpEmitVertex = 218, + OpEndPrimitive = 219, + OpEmitStreamVertex = 220, + OpEndStreamPrimitive = 221, + OpControlBarrier = 224, + OpMemoryBarrier = 225, + OpAtomicLoad = 227, + OpAtomicStore = 228, + OpAtomicExchange = 229, + OpAtomicCompareExchange = 230, + OpAtomicCompareExchangeWeak = 231, + OpAtomicIIncrement = 232, + OpAtomicIDecrement = 233, + OpAtomicIAdd = 234, + OpAtomicISub = 235, + OpAtomicSMin = 236, + OpAtomicUMin = 237, + OpAtomicSMax = 238, + OpAtomicUMax = 239, + OpAtomicAnd = 240, + OpAtomicOr = 241, + OpAtomicXor = 242, + OpPhi = 245, + OpLoopMerge = 246, + OpSelectionMerge = 247, + OpLabel = 248, + OpBranch = 249, + OpBranchConditional = 250, + OpSwitch = 251, + OpKill = 252, + OpReturn = 253, + OpReturnValue = 254, + OpUnreachable = 255, + OpLifetimeStart = 256, + OpLifetimeStop = 257, + OpGroupAsyncCopy = 259, + OpGroupWaitEvents = 260, + OpGroupAll = 261, + OpGroupAny = 262, + OpGroupBroadcast = 263, + OpGroupIAdd = 264, + OpGroupFAdd = 265, + OpGroupFMin = 266, + OpGroupUMin = 267, + OpGroupSMin = 268, + OpGroupFMax = 269, + OpGroupUMax = 270, + OpGroupSMax = 271, + OpReadPipe = 274, + OpWritePipe = 275, + OpReservedReadPipe = 276, + OpReservedWritePipe = 277, + OpReserveReadPipePackets = 278, + OpReserveWritePipePackets = 279, + OpCommitReadPipe = 280, + OpCommitWritePipe = 281, + OpIsValidReserveId = 282, + OpGetNumPipePackets = 283, + OpGetMaxPipePackets = 284, + OpGroupReserveReadPipePackets = 285, + OpGroupReserveWritePipePackets = 286, + OpGroupCommitReadPipe = 287, + OpGroupCommitWritePipe = 288, + OpEnqueueMarker = 291, + OpEnqueueKernel = 292, + OpGetKernelNDrangeSubGroupCount = 293, + OpGetKernelNDrangeMaxSubGroupSize = 294, + OpGetKernelWorkGroupSize = 295, + OpGetKernelPreferredWorkGroupSizeMultiple = 296, + OpRetainEvent = 297, + OpReleaseEvent = 298, + OpCreateUserEvent = 299, + OpIsValidEvent = 300, + OpSetUserEventStatus = 301, + OpCaptureEventProfilingInfo = 302, + OpGetDefaultQueue = 303, + OpBuildNDRange = 304, + OpImageSparseSampleImplicitLod = 305, + OpImageSparseSampleExplicitLod = 306, + OpImageSparseSampleDrefImplicitLod = 307, + OpImageSparseSampleDrefExplicitLod = 308, + OpImageSparseSampleProjImplicitLod = 309, + OpImageSparseSampleProjExplicitLod = 310, + OpImageSparseSampleProjDrefImplicitLod = 311, + OpImageSparseSampleProjDrefExplicitLod = 312, + OpImageSparseFetch = 313, + OpImageSparseGather = 314, + OpImageSparseDrefGather = 315, + OpImageSparseTexelsResident = 316, + OpNoLine = 317, + OpAtomicFlagTestAndSet = 318, + OpAtomicFlagClear = 319, + OpImageSparseRead = 320, + OpSizeOf = 321, + OpTypePipeStorage = 322, + OpConstantPipeStorage = 323, + OpCreatePipeFromPipeStorage = 324, + OpGetKernelLocalSizeForSubgroupCount = 325, + OpGetKernelMaxNumSubgroups = 326, + OpTypeNamedBarrier = 327, + OpNamedBarrierInitialize = 328, + OpMemoryNamedBarrier = 329, + OpModuleProcessed = 330, + OpExecutionModeId = 331, + OpDecorateId = 332, + OpGroupNonUniformElect = 333, + OpGroupNonUniformAll = 334, + OpGroupNonUniformAny = 335, + OpGroupNonUniformAllEqual = 336, + OpGroupNonUniformBroadcast = 337, + OpGroupNonUniformBroadcastFirst = 338, + OpGroupNonUniformBallot = 339, + OpGroupNonUniformInverseBallot = 340, + OpGroupNonUniformBallotBitExtract = 341, + OpGroupNonUniformBallotBitCount = 342, + OpGroupNonUniformBallotFindLSB = 343, + OpGroupNonUniformBallotFindMSB = 344, + OpGroupNonUniformShuffle = 345, + OpGroupNonUniformShuffleXor = 346, + OpGroupNonUniformShuffleUp = 347, + OpGroupNonUniformShuffleDown = 348, + OpGroupNonUniformIAdd = 349, + OpGroupNonUniformFAdd = 350, + OpGroupNonUniformIMul = 351, + OpGroupNonUniformFMul = 352, + OpGroupNonUniformSMin = 353, + OpGroupNonUniformUMin = 354, + OpGroupNonUniformFMin = 355, + OpGroupNonUniformSMax = 356, + OpGroupNonUniformUMax = 357, + OpGroupNonUniformFMax = 358, + OpGroupNonUniformBitwiseAnd = 359, + OpGroupNonUniformBitwiseOr = 360, + OpGroupNonUniformBitwiseXor = 361, + OpGroupNonUniformLogicalAnd = 362, + OpGroupNonUniformLogicalOr = 363, + OpGroupNonUniformLogicalXor = 364, + OpGroupNonUniformQuadBroadcast = 365, + OpGroupNonUniformQuadSwap = 366, + OpCopyLogical = 400, + OpPtrEqual = 401, + OpPtrNotEqual = 402, + OpPtrDiff = 403, + OpSubgroupBallotKHR = 4421, + OpSubgroupFirstInvocationKHR = 4422, + OpSubgroupAllKHR = 4428, + OpSubgroupAnyKHR = 4429, + OpSubgroupAllEqualKHR = 4430, + OpSubgroupReadInvocationKHR = 4432, + OpTypeRayQueryProvisionalKHR = 4472, + OpRayQueryInitializeKHR = 4473, + OpRayQueryTerminateKHR = 4474, + OpRayQueryGenerateIntersectionKHR = 4475, + OpRayQueryConfirmIntersectionKHR = 4476, + OpRayQueryProceedKHR = 4477, + OpRayQueryGetIntersectionTypeKHR = 4479, + OpGroupIAddNonUniformAMD = 5000, + OpGroupFAddNonUniformAMD = 5001, + OpGroupFMinNonUniformAMD = 5002, + OpGroupUMinNonUniformAMD = 5003, + OpGroupSMinNonUniformAMD = 5004, + OpGroupFMaxNonUniformAMD = 5005, + OpGroupUMaxNonUniformAMD = 5006, + OpGroupSMaxNonUniformAMD = 5007, + OpFragmentMaskFetchAMD = 5011, + OpFragmentFetchAMD = 5012, + OpReadClockKHR = 5056, + OpImageSampleFootprintNV = 5283, + OpGroupNonUniformPartitionNV = 5296, + OpWritePackedPrimitiveIndices4x8NV = 5299, + OpReportIntersectionKHR = 5334, + OpReportIntersectionNV = 5334, + OpIgnoreIntersectionKHR = 5335, + OpIgnoreIntersectionNV = 5335, + OpTerminateRayKHR = 5336, + OpTerminateRayNV = 5336, + OpTraceNV = 5337, + OpTraceRayKHR = 5337, + OpTypeAccelerationStructureKHR = 5341, + OpTypeAccelerationStructureNV = 5341, + OpExecuteCallableKHR = 5344, + OpExecuteCallableNV = 5344, + OpTypeCooperativeMatrixNV = 5358, + OpCooperativeMatrixLoadNV = 5359, + OpCooperativeMatrixStoreNV = 5360, + OpCooperativeMatrixMulAddNV = 5361, + OpCooperativeMatrixLengthNV = 5362, + OpBeginInvocationInterlockEXT = 5364, + OpEndInvocationInterlockEXT = 5365, + OpDemoteToHelperInvocationEXT = 5380, + OpIsHelperInvocationEXT = 5381, + OpSubgroupShuffleINTEL = 5571, + OpSubgroupShuffleDownINTEL = 5572, + OpSubgroupShuffleUpINTEL = 5573, + OpSubgroupShuffleXorINTEL = 5574, + OpSubgroupBlockReadINTEL = 5575, + OpSubgroupBlockWriteINTEL = 5576, + OpSubgroupImageBlockReadINTEL = 5577, + OpSubgroupImageBlockWriteINTEL = 5578, + OpSubgroupImageMediaBlockReadINTEL = 5580, + OpSubgroupImageMediaBlockWriteINTEL = 5581, + OpUCountLeadingZerosINTEL = 5585, + OpUCountTrailingZerosINTEL = 5586, + OpAbsISubINTEL = 5587, + OpAbsUSubINTEL = 5588, + OpIAddSatINTEL = 5589, + OpUAddSatINTEL = 5590, + OpIAverageINTEL = 5591, + OpUAverageINTEL = 5592, + OpIAverageRoundedINTEL = 5593, + OpUAverageRoundedINTEL = 5594, + OpISubSatINTEL = 5595, + OpUSubSatINTEL = 5596, + OpIMul32x16INTEL = 5597, + OpUMul32x16INTEL = 5598, + OpDecorateString = 5632, + OpDecorateStringGOOGLE = 5632, + OpMemberDecorateString = 5633, + OpMemberDecorateStringGOOGLE = 5633, + OpVmeImageINTEL = 5699, + OpTypeVmeImageINTEL = 5700, + OpTypeAvcImePayloadINTEL = 5701, + OpTypeAvcRefPayloadINTEL = 5702, + OpTypeAvcSicPayloadINTEL = 5703, + OpTypeAvcMcePayloadINTEL = 5704, + OpTypeAvcMceResultINTEL = 5705, + OpTypeAvcImeResultINTEL = 5706, + OpTypeAvcImeResultSingleReferenceStreamoutINTEL = 5707, + OpTypeAvcImeResultDualReferenceStreamoutINTEL = 5708, + OpTypeAvcImeSingleReferenceStreaminINTEL = 5709, + OpTypeAvcImeDualReferenceStreaminINTEL = 5710, + OpTypeAvcRefResultINTEL = 5711, + OpTypeAvcSicResultINTEL = 5712, + OpSubgroupAvcMceGetDefaultInterBaseMultiReferencePenaltyINTEL = 5713, + OpSubgroupAvcMceSetInterBaseMultiReferencePenaltyINTEL = 5714, + OpSubgroupAvcMceGetDefaultInterShapePenaltyINTEL = 5715, + OpSubgroupAvcMceSetInterShapePenaltyINTEL = 5716, + OpSubgroupAvcMceGetDefaultInterDirectionPenaltyINTEL = 5717, + OpSubgroupAvcMceSetInterDirectionPenaltyINTEL = 5718, + OpSubgroupAvcMceGetDefaultIntraLumaShapePenaltyINTEL = 5719, + OpSubgroupAvcMceGetDefaultInterMotionVectorCostTableINTEL = 5720, + OpSubgroupAvcMceGetDefaultHighPenaltyCostTableINTEL = 5721, + OpSubgroupAvcMceGetDefaultMediumPenaltyCostTableINTEL = 5722, + OpSubgroupAvcMceGetDefaultLowPenaltyCostTableINTEL = 5723, + OpSubgroupAvcMceSetMotionVectorCostFunctionINTEL = 5724, + OpSubgroupAvcMceGetDefaultIntraLumaModePenaltyINTEL = 5725, + OpSubgroupAvcMceGetDefaultNonDcLumaIntraPenaltyINTEL = 5726, + OpSubgroupAvcMceGetDefaultIntraChromaModeBasePenaltyINTEL = 5727, + OpSubgroupAvcMceSetAcOnlyHaarINTEL = 5728, + OpSubgroupAvcMceSetSourceInterlacedFieldPolarityINTEL = 5729, + OpSubgroupAvcMceSetSingleReferenceInterlacedFieldPolarityINTEL = 5730, + OpSubgroupAvcMceSetDualReferenceInterlacedFieldPolaritiesINTEL = 5731, + OpSubgroupAvcMceConvertToImePayloadINTEL = 5732, + OpSubgroupAvcMceConvertToImeResultINTEL = 5733, + OpSubgroupAvcMceConvertToRefPayloadINTEL = 5734, + OpSubgroupAvcMceConvertToRefResultINTEL = 5735, + OpSubgroupAvcMceConvertToSicPayloadINTEL = 5736, + OpSubgroupAvcMceConvertToSicResultINTEL = 5737, + OpSubgroupAvcMceGetMotionVectorsINTEL = 5738, + OpSubgroupAvcMceGetInterDistortionsINTEL = 5739, + OpSubgroupAvcMceGetBestInterDistortionsINTEL = 5740, + OpSubgroupAvcMceGetInterMajorShapeINTEL = 5741, + OpSubgroupAvcMceGetInterMinorShapeINTEL = 5742, + OpSubgroupAvcMceGetInterDirectionsINTEL = 5743, + OpSubgroupAvcMceGetInterMotionVectorCountINTEL = 5744, + OpSubgroupAvcMceGetInterReferenceIdsINTEL = 5745, + OpSubgroupAvcMceGetInterReferenceInterlacedFieldPolaritiesINTEL = 5746, + OpSubgroupAvcImeInitializeINTEL = 5747, + OpSubgroupAvcImeSetSingleReferenceINTEL = 5748, + OpSubgroupAvcImeSetDualReferenceINTEL = 5749, + OpSubgroupAvcImeRefWindowSizeINTEL = 5750, + OpSubgroupAvcImeAdjustRefOffsetINTEL = 5751, + OpSubgroupAvcImeConvertToMcePayloadINTEL = 5752, + OpSubgroupAvcImeSetMaxMotionVectorCountINTEL = 5753, + OpSubgroupAvcImeSetUnidirectionalMixDisableINTEL = 5754, + OpSubgroupAvcImeSetEarlySearchTerminationThresholdINTEL = 5755, + OpSubgroupAvcImeSetWeightedSadINTEL = 5756, + OpSubgroupAvcImeEvaluateWithSingleReferenceINTEL = 5757, + OpSubgroupAvcImeEvaluateWithDualReferenceINTEL = 5758, + OpSubgroupAvcImeEvaluateWithSingleReferenceStreaminINTEL = 5759, + OpSubgroupAvcImeEvaluateWithDualReferenceStreaminINTEL = 5760, + OpSubgroupAvcImeEvaluateWithSingleReferenceStreamoutINTEL = 5761, + OpSubgroupAvcImeEvaluateWithDualReferenceStreamoutINTEL = 5762, + OpSubgroupAvcImeEvaluateWithSingleReferenceStreaminoutINTEL = 5763, + OpSubgroupAvcImeEvaluateWithDualReferenceStreaminoutINTEL = 5764, + OpSubgroupAvcImeConvertToMceResultINTEL = 5765, + OpSubgroupAvcImeGetSingleReferenceStreaminINTEL = 5766, + OpSubgroupAvcImeGetDualReferenceStreaminINTEL = 5767, + OpSubgroupAvcImeStripSingleReferenceStreamoutINTEL = 5768, + OpSubgroupAvcImeStripDualReferenceStreamoutINTEL = 5769, + OpSubgroupAvcImeGetStreamoutSingleReferenceMajorShapeMotionVectorsINTEL = 5770, + OpSubgroupAvcImeGetStreamoutSingleReferenceMajorShapeDistortionsINTEL = 5771, + OpSubgroupAvcImeGetStreamoutSingleReferenceMajorShapeReferenceIdsINTEL = 5772, + OpSubgroupAvcImeGetStreamoutDualReferenceMajorShapeMotionVectorsINTEL = 5773, + OpSubgroupAvcImeGetStreamoutDualReferenceMajorShapeDistortionsINTEL = 5774, + OpSubgroupAvcImeGetStreamoutDualReferenceMajorShapeReferenceIdsINTEL = 5775, + OpSubgroupAvcImeGetBorderReachedINTEL = 5776, + OpSubgroupAvcImeGetTruncatedSearchIndicationINTEL = 5777, + OpSubgroupAvcImeGetUnidirectionalEarlySearchTerminationINTEL = 5778, + OpSubgroupAvcImeGetWeightingPatternMinimumMotionVectorINTEL = 5779, + OpSubgroupAvcImeGetWeightingPatternMinimumDistortionINTEL = 5780, + OpSubgroupAvcFmeInitializeINTEL = 5781, + OpSubgroupAvcBmeInitializeINTEL = 5782, + OpSubgroupAvcRefConvertToMcePayloadINTEL = 5783, + OpSubgroupAvcRefSetBidirectionalMixDisableINTEL = 5784, + OpSubgroupAvcRefSetBilinearFilterEnableINTEL = 5785, + OpSubgroupAvcRefEvaluateWithSingleReferenceINTEL = 5786, + OpSubgroupAvcRefEvaluateWithDualReferenceINTEL = 5787, + OpSubgroupAvcRefEvaluateWithMultiReferenceINTEL = 5788, + OpSubgroupAvcRefEvaluateWithMultiReferenceInterlacedINTEL = 5789, + OpSubgroupAvcRefConvertToMceResultINTEL = 5790, + OpSubgroupAvcSicInitializeINTEL = 5791, + OpSubgroupAvcSicConfigureSkcINTEL = 5792, + OpSubgroupAvcSicConfigureIpeLumaINTEL = 5793, + OpSubgroupAvcSicConfigureIpeLumaChromaINTEL = 5794, + OpSubgroupAvcSicGetMotionVectorMaskINTEL = 5795, + OpSubgroupAvcSicConvertToMcePayloadINTEL = 5796, + OpSubgroupAvcSicSetIntraLumaShapePenaltyINTEL = 5797, + OpSubgroupAvcSicSetIntraLumaModeCostFunctionINTEL = 5798, + OpSubgroupAvcSicSetIntraChromaModeCostFunctionINTEL = 5799, + OpSubgroupAvcSicSetBilinearFilterEnableINTEL = 5800, + OpSubgroupAvcSicSetSkcForwardTransformEnableINTEL = 5801, + OpSubgroupAvcSicSetBlockBasedRawSkipSadINTEL = 5802, + OpSubgroupAvcSicEvaluateIpeINTEL = 5803, + OpSubgroupAvcSicEvaluateWithSingleReferenceINTEL = 5804, + OpSubgroupAvcSicEvaluateWithDualReferenceINTEL = 5805, + OpSubgroupAvcSicEvaluateWithMultiReferenceINTEL = 5806, + OpSubgroupAvcSicEvaluateWithMultiReferenceInterlacedINTEL = 5807, + OpSubgroupAvcSicConvertToMceResultINTEL = 5808, + OpSubgroupAvcSicGetIpeLumaShapeINTEL = 5809, + OpSubgroupAvcSicGetBestIpeLumaDistortionINTEL = 5810, + OpSubgroupAvcSicGetBestIpeChromaDistortionINTEL = 5811, + OpSubgroupAvcSicGetPackedIpeLumaModesINTEL = 5812, + OpSubgroupAvcSicGetIpeChromaModeINTEL = 5813, + OpSubgroupAvcSicGetPackedSkcLumaCountThresholdINTEL = 5814, + OpSubgroupAvcSicGetPackedSkcLumaSumThresholdINTEL = 5815, + OpSubgroupAvcSicGetInterRawSadsINTEL = 5816, + OpRayQueryGetRayTMinKHR = 6016, + OpRayQueryGetRayFlagsKHR = 6017, + OpRayQueryGetIntersectionTKHR = 6018, + OpRayQueryGetIntersectionInstanceCustomIndexKHR = 6019, + OpRayQueryGetIntersectionInstanceIdKHR = 6020, + OpRayQueryGetIntersectionInstanceShaderBindingTableRecordOffsetKHR = 6021, + OpRayQueryGetIntersectionGeometryIndexKHR = 6022, + OpRayQueryGetIntersectionPrimitiveIndexKHR = 6023, + OpRayQueryGetIntersectionBarycentricsKHR = 6024, + OpRayQueryGetIntersectionFrontFaceKHR = 6025, + OpRayQueryGetIntersectionCandidateAABBOpaqueKHR = 6026, + OpRayQueryGetIntersectionObjectRayDirectionKHR = 6027, + OpRayQueryGetIntersectionObjectRayOriginKHR = 6028, + OpRayQueryGetWorldRayDirectionKHR = 6029, + OpRayQueryGetWorldRayOriginKHR = 6030, + OpRayQueryGetIntersectionObjectToWorldKHR = 6031, + OpRayQueryGetIntersectionWorldToObjectKHR = 6032, + OpMax = 0x7fffffff, +}; + +#ifdef SPV_ENABLE_UTILITY_CODE +inline void HasResultAndType(Op opcode, bool *hasResult, bool *hasResultType) { + *hasResult = *hasResultType = false; + switch (opcode) { + default: /* unknown opcode */ break; + case OpNop: *hasResult = false; *hasResultType = false; break; + case OpUndef: *hasResult = true; *hasResultType = true; break; + case OpSourceContinued: *hasResult = false; *hasResultType = false; break; + case OpSource: *hasResult = false; *hasResultType = false; break; + case OpSourceExtension: *hasResult = false; *hasResultType = false; break; + case OpName: *hasResult = false; *hasResultType = false; break; + case OpMemberName: *hasResult = false; *hasResultType = false; break; + case OpString: *hasResult = true; *hasResultType = false; break; + case OpLine: *hasResult = false; *hasResultType = false; break; + case OpExtension: *hasResult = false; *hasResultType = false; break; + case OpExtInstImport: *hasResult = true; *hasResultType = false; break; + case OpExtInst: *hasResult = true; *hasResultType = true; break; + case OpMemoryModel: *hasResult = false; *hasResultType = false; break; + case OpEntryPoint: *hasResult = false; *hasResultType = false; break; + case OpExecutionMode: *hasResult = false; *hasResultType = false; break; + case OpCapability: *hasResult = false; *hasResultType = false; break; + case OpTypeVoid: *hasResult = true; *hasResultType = false; break; + case OpTypeBool: *hasResult = true; *hasResultType = false; break; + case OpTypeInt: *hasResult = true; *hasResultType = false; break; + case OpTypeFloat: *hasResult = true; *hasResultType = false; break; + case OpTypeVector: *hasResult = true; *hasResultType = false; break; + case OpTypeMatrix: *hasResult = true; *hasResultType = false; break; + case OpTypeImage: *hasResult = true; *hasResultType = false; break; + case OpTypeSampler: *hasResult = true; *hasResultType = false; break; + case OpTypeSampledImage: *hasResult = true; *hasResultType = false; break; + case OpTypeArray: *hasResult = true; *hasResultType = false; break; + case OpTypeRuntimeArray: *hasResult = true; *hasResultType = false; break; + case OpTypeStruct: *hasResult = true; *hasResultType = false; break; + case OpTypeOpaque: *hasResult = true; *hasResultType = false; break; + case OpTypePointer: *hasResult = true; *hasResultType = false; break; + case OpTypeFunction: *hasResult = true; *hasResultType = false; break; + case OpTypeEvent: *hasResult = true; *hasResultType = false; break; + case OpTypeDeviceEvent: *hasResult = true; *hasResultType = false; break; + case OpTypeReserveId: *hasResult = true; *hasResultType = false; break; + case OpTypeQueue: *hasResult = true; *hasResultType = false; break; + case OpTypePipe: *hasResult = true; *hasResultType = false; break; + case OpTypeForwardPointer: *hasResult = false; *hasResultType = false; break; + case OpConstantTrue: *hasResult = true; *hasResultType = true; break; + case OpConstantFalse: *hasResult = true; *hasResultType = true; break; + case OpConstant: *hasResult = true; *hasResultType = true; break; + case OpConstantComposite: *hasResult = true; *hasResultType = true; break; + case OpConstantSampler: *hasResult = true; *hasResultType = true; break; + case OpConstantNull: *hasResult = true; *hasResultType = true; break; + case OpSpecConstantTrue: *hasResult = true; *hasResultType = true; break; + case OpSpecConstantFalse: *hasResult = true; *hasResultType = true; break; + case OpSpecConstant: *hasResult = true; *hasResultType = true; break; + case OpSpecConstantComposite: *hasResult = true; *hasResultType = true; break; + case OpSpecConstantOp: *hasResult = true; *hasResultType = true; break; + case OpFunction: *hasResult = true; *hasResultType = true; break; + case OpFunctionParameter: *hasResult = true; *hasResultType = true; break; + case OpFunctionEnd: *hasResult = false; *hasResultType = false; break; + case OpFunctionCall: *hasResult = true; *hasResultType = true; break; + case OpVariable: *hasResult = true; *hasResultType = true; break; + case OpImageTexelPointer: *hasResult = true; *hasResultType = true; break; + case OpLoad: *hasResult = true; *hasResultType = true; break; + case OpStore: *hasResult = false; *hasResultType = false; break; + case OpCopyMemory: *hasResult = false; *hasResultType = false; break; + case OpCopyMemorySized: *hasResult = false; *hasResultType = false; break; + case OpAccessChain: *hasResult = true; *hasResultType = true; break; + case OpInBoundsAccessChain: *hasResult = true; *hasResultType = true; break; + case OpPtrAccessChain: *hasResult = true; *hasResultType = true; break; + case OpArrayLength: *hasResult = true; *hasResultType = true; break; + case OpGenericPtrMemSemantics: *hasResult = true; *hasResultType = true; break; + case OpInBoundsPtrAccessChain: *hasResult = true; *hasResultType = true; break; + case OpDecorate: *hasResult = false; *hasResultType = false; break; + case OpMemberDecorate: *hasResult = false; *hasResultType = false; break; + case OpDecorationGroup: *hasResult = true; *hasResultType = false; break; + case OpGroupDecorate: *hasResult = false; *hasResultType = false; break; + case OpGroupMemberDecorate: *hasResult = false; *hasResultType = false; break; + case OpVectorExtractDynamic: *hasResult = true; *hasResultType = true; break; + case OpVectorInsertDynamic: *hasResult = true; *hasResultType = true; break; + case OpVectorShuffle: *hasResult = true; *hasResultType = true; break; + case OpCompositeConstruct: *hasResult = true; *hasResultType = true; break; + case OpCompositeExtract: *hasResult = true; *hasResultType = true; break; + case OpCompositeInsert: *hasResult = true; *hasResultType = true; break; + case OpCopyObject: *hasResult = true; *hasResultType = true; break; + case OpTranspose: *hasResult = true; *hasResultType = true; break; + case OpSampledImage: *hasResult = true; *hasResultType = true; break; + case OpImageSampleImplicitLod: *hasResult = true; *hasResultType = true; break; + case OpImageSampleExplicitLod: *hasResult = true; *hasResultType = true; break; + case OpImageSampleDrefImplicitLod: *hasResult = true; *hasResultType = true; break; + case OpImageSampleDrefExplicitLod: *hasResult = true; *hasResultType = true; break; + case OpImageSampleProjImplicitLod: *hasResult = true; *hasResultType = true; break; + case OpImageSampleProjExplicitLod: *hasResult = true; *hasResultType = true; break; + case OpImageSampleProjDrefImplicitLod: *hasResult = true; *hasResultType = true; break; + case OpImageSampleProjDrefExplicitLod: *hasResult = true; *hasResultType = true; break; + case OpImageFetch: *hasResult = true; *hasResultType = true; break; + case OpImageGather: *hasResult = true; *hasResultType = true; break; + case OpImageDrefGather: *hasResult = true; *hasResultType = true; break; + case OpImageRead: *hasResult = true; *hasResultType = true; break; + case OpImageWrite: *hasResult = false; *hasResultType = false; break; + case OpImage: *hasResult = true; *hasResultType = true; break; + case OpImageQueryFormat: *hasResult = true; *hasResultType = true; break; + case OpImageQueryOrder: *hasResult = true; *hasResultType = true; break; + case OpImageQuerySizeLod: *hasResult = true; *hasResultType = true; break; + case OpImageQuerySize: *hasResult = true; *hasResultType = true; break; + case OpImageQueryLod: *hasResult = true; *hasResultType = true; break; + case OpImageQueryLevels: *hasResult = true; *hasResultType = true; break; + case OpImageQuerySamples: *hasResult = true; *hasResultType = true; break; + case OpConvertFToU: *hasResult = true; *hasResultType = true; break; + case OpConvertFToS: *hasResult = true; *hasResultType = true; break; + case OpConvertSToF: *hasResult = true; *hasResultType = true; break; + case OpConvertUToF: *hasResult = true; *hasResultType = true; break; + case OpUConvert: *hasResult = true; *hasResultType = true; break; + case OpSConvert: *hasResult = true; *hasResultType = true; break; + case OpFConvert: *hasResult = true; *hasResultType = true; break; + case OpQuantizeToF16: *hasResult = true; *hasResultType = true; break; + case OpConvertPtrToU: *hasResult = true; *hasResultType = true; break; + case OpSatConvertSToU: *hasResult = true; *hasResultType = true; break; + case OpSatConvertUToS: *hasResult = true; *hasResultType = true; break; + case OpConvertUToPtr: *hasResult = true; *hasResultType = true; break; + case OpPtrCastToGeneric: *hasResult = true; *hasResultType = true; break; + case OpGenericCastToPtr: *hasResult = true; *hasResultType = true; break; + case OpGenericCastToPtrExplicit: *hasResult = true; *hasResultType = true; break; + case OpBitcast: *hasResult = true; *hasResultType = true; break; + case OpSNegate: *hasResult = true; *hasResultType = true; break; + case OpFNegate: *hasResult = true; *hasResultType = true; break; + case OpIAdd: *hasResult = true; *hasResultType = true; break; + case OpFAdd: *hasResult = true; *hasResultType = true; break; + case OpISub: *hasResult = true; *hasResultType = true; break; + case OpFSub: *hasResult = true; *hasResultType = true; break; + case OpIMul: *hasResult = true; *hasResultType = true; break; + case OpFMul: *hasResult = true; *hasResultType = true; break; + case OpUDiv: *hasResult = true; *hasResultType = true; break; + case OpSDiv: *hasResult = true; *hasResultType = true; break; + case OpFDiv: *hasResult = true; *hasResultType = true; break; + case OpUMod: *hasResult = true; *hasResultType = true; break; + case OpSRem: *hasResult = true; *hasResultType = true; break; + case OpSMod: *hasResult = true; *hasResultType = true; break; + case OpFRem: *hasResult = true; *hasResultType = true; break; + case OpFMod: *hasResult = true; *hasResultType = true; break; + case OpVectorTimesScalar: *hasResult = true; *hasResultType = true; break; + case OpMatrixTimesScalar: *hasResult = true; *hasResultType = true; break; + case OpVectorTimesMatrix: *hasResult = true; *hasResultType = true; break; + case OpMatrixTimesVector: *hasResult = true; *hasResultType = true; break; + case OpMatrixTimesMatrix: *hasResult = true; *hasResultType = true; break; + case OpOuterProduct: *hasResult = true; *hasResultType = true; break; + case OpDot: *hasResult = true; *hasResultType = true; break; + case OpIAddCarry: *hasResult = true; *hasResultType = true; break; + case OpISubBorrow: *hasResult = true; *hasResultType = true; break; + case OpUMulExtended: *hasResult = true; *hasResultType = true; break; + case OpSMulExtended: *hasResult = true; *hasResultType = true; break; + case OpAny: *hasResult = true; *hasResultType = true; break; + case OpAll: *hasResult = true; *hasResultType = true; break; + case OpIsNan: *hasResult = true; *hasResultType = true; break; + case OpIsInf: *hasResult = true; *hasResultType = true; break; + case OpIsFinite: *hasResult = true; *hasResultType = true; break; + case OpIsNormal: *hasResult = true; *hasResultType = true; break; + case OpSignBitSet: *hasResult = true; *hasResultType = true; break; + case OpLessOrGreater: *hasResult = true; *hasResultType = true; break; + case OpOrdered: *hasResult = true; *hasResultType = true; break; + case OpUnordered: *hasResult = true; *hasResultType = true; break; + case OpLogicalEqual: *hasResult = true; *hasResultType = true; break; + case OpLogicalNotEqual: *hasResult = true; *hasResultType = true; break; + case OpLogicalOr: *hasResult = true; *hasResultType = true; break; + case OpLogicalAnd: *hasResult = true; *hasResultType = true; break; + case OpLogicalNot: *hasResult = true; *hasResultType = true; break; + case OpSelect: *hasResult = true; *hasResultType = true; break; + case OpIEqual: *hasResult = true; *hasResultType = true; break; + case OpINotEqual: *hasResult = true; *hasResultType = true; break; + case OpUGreaterThan: *hasResult = true; *hasResultType = true; break; + case OpSGreaterThan: *hasResult = true; *hasResultType = true; break; + case OpUGreaterThanEqual: *hasResult = true; *hasResultType = true; break; + case OpSGreaterThanEqual: *hasResult = true; *hasResultType = true; break; + case OpULessThan: *hasResult = true; *hasResultType = true; break; + case OpSLessThan: *hasResult = true; *hasResultType = true; break; + case OpULessThanEqual: *hasResult = true; *hasResultType = true; break; + case OpSLessThanEqual: *hasResult = true; *hasResultType = true; break; + case OpFOrdEqual: *hasResult = true; *hasResultType = true; break; + case OpFUnordEqual: *hasResult = true; *hasResultType = true; break; + case OpFOrdNotEqual: *hasResult = true; *hasResultType = true; break; + case OpFUnordNotEqual: *hasResult = true; *hasResultType = true; break; + case OpFOrdLessThan: *hasResult = true; *hasResultType = true; break; + case OpFUnordLessThan: *hasResult = true; *hasResultType = true; break; + case OpFOrdGreaterThan: *hasResult = true; *hasResultType = true; break; + case OpFUnordGreaterThan: *hasResult = true; *hasResultType = true; break; + case OpFOrdLessThanEqual: *hasResult = true; *hasResultType = true; break; + case OpFUnordLessThanEqual: *hasResult = true; *hasResultType = true; break; + case OpFOrdGreaterThanEqual: *hasResult = true; *hasResultType = true; break; + case OpFUnordGreaterThanEqual: *hasResult = true; *hasResultType = true; break; + case OpShiftRightLogical: *hasResult = true; *hasResultType = true; break; + case OpShiftRightArithmetic: *hasResult = true; *hasResultType = true; break; + case OpShiftLeftLogical: *hasResult = true; *hasResultType = true; break; + case OpBitwiseOr: *hasResult = true; *hasResultType = true; break; + case OpBitwiseXor: *hasResult = true; *hasResultType = true; break; + case OpBitwiseAnd: *hasResult = true; *hasResultType = true; break; + case OpNot: *hasResult = true; *hasResultType = true; break; + case OpBitFieldInsert: *hasResult = true; *hasResultType = true; break; + case OpBitFieldSExtract: *hasResult = true; *hasResultType = true; break; + case OpBitFieldUExtract: *hasResult = true; *hasResultType = true; break; + case OpBitReverse: *hasResult = true; *hasResultType = true; break; + case OpBitCount: *hasResult = true; *hasResultType = true; break; + case OpDPdx: *hasResult = true; *hasResultType = true; break; + case OpDPdy: *hasResult = true; *hasResultType = true; break; + case OpFwidth: *hasResult = true; *hasResultType = true; break; + case OpDPdxFine: *hasResult = true; *hasResultType = true; break; + case OpDPdyFine: *hasResult = true; *hasResultType = true; break; + case OpFwidthFine: *hasResult = true; *hasResultType = true; break; + case OpDPdxCoarse: *hasResult = true; *hasResultType = true; break; + case OpDPdyCoarse: *hasResult = true; *hasResultType = true; break; + case OpFwidthCoarse: *hasResult = true; *hasResultType = true; break; + case OpEmitVertex: *hasResult = false; *hasResultType = false; break; + case OpEndPrimitive: *hasResult = false; *hasResultType = false; break; + case OpEmitStreamVertex: *hasResult = false; *hasResultType = false; break; + case OpEndStreamPrimitive: *hasResult = false; *hasResultType = false; break; + case OpControlBarrier: *hasResult = false; *hasResultType = false; break; + case OpMemoryBarrier: *hasResult = false; *hasResultType = false; break; + case OpAtomicLoad: *hasResult = true; *hasResultType = true; break; + case OpAtomicStore: *hasResult = false; *hasResultType = false; break; + case OpAtomicExchange: *hasResult = true; *hasResultType = true; break; + case OpAtomicCompareExchange: *hasResult = true; *hasResultType = true; break; + case OpAtomicCompareExchangeWeak: *hasResult = true; *hasResultType = true; break; + case OpAtomicIIncrement: *hasResult = true; *hasResultType = true; break; + case OpAtomicIDecrement: *hasResult = true; *hasResultType = true; break; + case OpAtomicIAdd: *hasResult = true; *hasResultType = true; break; + case OpAtomicISub: *hasResult = true; *hasResultType = true; break; + case OpAtomicSMin: *hasResult = true; *hasResultType = true; break; + case OpAtomicUMin: *hasResult = true; *hasResultType = true; break; + case OpAtomicSMax: *hasResult = true; *hasResultType = true; break; + case OpAtomicUMax: *hasResult = true; *hasResultType = true; break; + case OpAtomicAnd: *hasResult = true; *hasResultType = true; break; + case OpAtomicOr: *hasResult = true; *hasResultType = true; break; + case OpAtomicXor: *hasResult = true; *hasResultType = true; break; + case OpPhi: *hasResult = true; *hasResultType = true; break; + case OpLoopMerge: *hasResult = false; *hasResultType = false; break; + case OpSelectionMerge: *hasResult = false; *hasResultType = false; break; + case OpLabel: *hasResult = true; *hasResultType = false; break; + case OpBranch: *hasResult = false; *hasResultType = false; break; + case OpBranchConditional: *hasResult = false; *hasResultType = false; break; + case OpSwitch: *hasResult = false; *hasResultType = false; break; + case OpKill: *hasResult = false; *hasResultType = false; break; + case OpReturn: *hasResult = false; *hasResultType = false; break; + case OpReturnValue: *hasResult = false; *hasResultType = false; break; + case OpUnreachable: *hasResult = false; *hasResultType = false; break; + case OpLifetimeStart: *hasResult = false; *hasResultType = false; break; + case OpLifetimeStop: *hasResult = false; *hasResultType = false; break; + case OpGroupAsyncCopy: *hasResult = true; *hasResultType = true; break; + case OpGroupWaitEvents: *hasResult = false; *hasResultType = false; break; + case OpGroupAll: *hasResult = true; *hasResultType = true; break; + case OpGroupAny: *hasResult = true; *hasResultType = true; break; + case OpGroupBroadcast: *hasResult = true; *hasResultType = true; break; + case OpGroupIAdd: *hasResult = true; *hasResultType = true; break; + case OpGroupFAdd: *hasResult = true; *hasResultType = true; break; + case OpGroupFMin: *hasResult = true; *hasResultType = true; break; + case OpGroupUMin: *hasResult = true; *hasResultType = true; break; + case OpGroupSMin: *hasResult = true; *hasResultType = true; break; + case OpGroupFMax: *hasResult = true; *hasResultType = true; break; + case OpGroupUMax: *hasResult = true; *hasResultType = true; break; + case OpGroupSMax: *hasResult = true; *hasResultType = true; break; + case OpReadPipe: *hasResult = true; *hasResultType = true; break; + case OpWritePipe: *hasResult = true; *hasResultType = true; break; + case OpReservedReadPipe: *hasResult = true; *hasResultType = true; break; + case OpReservedWritePipe: *hasResult = true; *hasResultType = true; break; + case OpReserveReadPipePackets: *hasResult = true; *hasResultType = true; break; + case OpReserveWritePipePackets: *hasResult = true; *hasResultType = true; break; + case OpCommitReadPipe: *hasResult = false; *hasResultType = false; break; + case OpCommitWritePipe: *hasResult = false; *hasResultType = false; break; + case OpIsValidReserveId: *hasResult = true; *hasResultType = true; break; + case OpGetNumPipePackets: *hasResult = true; *hasResultType = true; break; + case OpGetMaxPipePackets: *hasResult = true; *hasResultType = true; break; + case OpGroupReserveReadPipePackets: *hasResult = true; *hasResultType = true; break; + case OpGroupReserveWritePipePackets: *hasResult = true; *hasResultType = true; break; + case OpGroupCommitReadPipe: *hasResult = false; *hasResultType = false; break; + case OpGroupCommitWritePipe: *hasResult = false; *hasResultType = false; break; + case OpEnqueueMarker: *hasResult = true; *hasResultType = true; break; + case OpEnqueueKernel: *hasResult = true; *hasResultType = true; break; + case OpGetKernelNDrangeSubGroupCount: *hasResult = true; *hasResultType = true; break; + case OpGetKernelNDrangeMaxSubGroupSize: *hasResult = true; *hasResultType = true; break; + case OpGetKernelWorkGroupSize: *hasResult = true; *hasResultType = true; break; + case OpGetKernelPreferredWorkGroupSizeMultiple: *hasResult = true; *hasResultType = true; break; + case OpRetainEvent: *hasResult = false; *hasResultType = false; break; + case OpReleaseEvent: *hasResult = false; *hasResultType = false; break; + case OpCreateUserEvent: *hasResult = true; *hasResultType = true; break; + case OpIsValidEvent: *hasResult = true; *hasResultType = true; break; + case OpSetUserEventStatus: *hasResult = false; *hasResultType = false; break; + case OpCaptureEventProfilingInfo: *hasResult = false; *hasResultType = false; break; + case OpGetDefaultQueue: *hasResult = true; *hasResultType = true; break; + case OpBuildNDRange: *hasResult = true; *hasResultType = true; break; + case OpImageSparseSampleImplicitLod: *hasResult = true; *hasResultType = true; break; + case OpImageSparseSampleExplicitLod: *hasResult = true; *hasResultType = true; break; + case OpImageSparseSampleDrefImplicitLod: *hasResult = true; *hasResultType = true; break; + case OpImageSparseSampleDrefExplicitLod: *hasResult = true; *hasResultType = true; break; + case OpImageSparseSampleProjImplicitLod: *hasResult = true; *hasResultType = true; break; + case OpImageSparseSampleProjExplicitLod: *hasResult = true; *hasResultType = true; break; + case OpImageSparseSampleProjDrefImplicitLod: *hasResult = true; *hasResultType = true; break; + case OpImageSparseSampleProjDrefExplicitLod: *hasResult = true; *hasResultType = true; break; + case OpImageSparseFetch: *hasResult = true; *hasResultType = true; break; + case OpImageSparseGather: *hasResult = true; *hasResultType = true; break; + case OpImageSparseDrefGather: *hasResult = true; *hasResultType = true; break; + case OpImageSparseTexelsResident: *hasResult = true; *hasResultType = true; break; + case OpNoLine: *hasResult = false; *hasResultType = false; break; + case OpAtomicFlagTestAndSet: *hasResult = true; *hasResultType = true; break; + case OpAtomicFlagClear: *hasResult = false; *hasResultType = false; break; + case OpImageSparseRead: *hasResult = true; *hasResultType = true; break; + case OpSizeOf: *hasResult = true; *hasResultType = true; break; + case OpTypePipeStorage: *hasResult = true; *hasResultType = false; break; + case OpConstantPipeStorage: *hasResult = true; *hasResultType = true; break; + case OpCreatePipeFromPipeStorage: *hasResult = true; *hasResultType = true; break; + case OpGetKernelLocalSizeForSubgroupCount: *hasResult = true; *hasResultType = true; break; + case OpGetKernelMaxNumSubgroups: *hasResult = true; *hasResultType = true; break; + case OpTypeNamedBarrier: *hasResult = true; *hasResultType = false; break; + case OpNamedBarrierInitialize: *hasResult = true; *hasResultType = true; break; + case OpMemoryNamedBarrier: *hasResult = false; *hasResultType = false; break; + case OpModuleProcessed: *hasResult = false; *hasResultType = false; break; + case OpExecutionModeId: *hasResult = false; *hasResultType = false; break; + case OpDecorateId: *hasResult = false; *hasResultType = false; break; + case OpGroupNonUniformElect: *hasResult = true; *hasResultType = true; break; + case OpGroupNonUniformAll: *hasResult = true; *hasResultType = true; break; + case OpGroupNonUniformAny: *hasResult = true; *hasResultType = true; break; + case OpGroupNonUniformAllEqual: *hasResult = true; *hasResultType = true; break; + case OpGroupNonUniformBroadcast: *hasResult = true; *hasResultType = true; break; + case OpGroupNonUniformBroadcastFirst: *hasResult = true; *hasResultType = true; break; + case OpGroupNonUniformBallot: *hasResult = true; *hasResultType = true; break; + case OpGroupNonUniformInverseBallot: *hasResult = true; *hasResultType = true; break; + case OpGroupNonUniformBallotBitExtract: *hasResult = true; *hasResultType = true; break; + case OpGroupNonUniformBallotBitCount: *hasResult = true; *hasResultType = true; break; + case OpGroupNonUniformBallotFindLSB: *hasResult = true; *hasResultType = true; break; + case OpGroupNonUniformBallotFindMSB: *hasResult = true; *hasResultType = true; break; + case OpGroupNonUniformShuffle: *hasResult = true; *hasResultType = true; break; + case OpGroupNonUniformShuffleXor: *hasResult = true; *hasResultType = true; break; + case OpGroupNonUniformShuffleUp: *hasResult = true; *hasResultType = true; break; + case OpGroupNonUniformShuffleDown: *hasResult = true; *hasResultType = true; break; + case OpGroupNonUniformIAdd: *hasResult = true; *hasResultType = true; break; + case OpGroupNonUniformFAdd: *hasResult = true; *hasResultType = true; break; + case OpGroupNonUniformIMul: *hasResult = true; *hasResultType = true; break; + case OpGroupNonUniformFMul: *hasResult = true; *hasResultType = true; break; + case OpGroupNonUniformSMin: *hasResult = true; *hasResultType = true; break; + case OpGroupNonUniformUMin: *hasResult = true; *hasResultType = true; break; + case OpGroupNonUniformFMin: *hasResult = true; *hasResultType = true; break; + case OpGroupNonUniformSMax: *hasResult = true; *hasResultType = true; break; + case OpGroupNonUniformUMax: *hasResult = true; *hasResultType = true; break; + case OpGroupNonUniformFMax: *hasResult = true; *hasResultType = true; break; + case OpGroupNonUniformBitwiseAnd: *hasResult = true; *hasResultType = true; break; + case OpGroupNonUniformBitwiseOr: *hasResult = true; *hasResultType = true; break; + case OpGroupNonUniformBitwiseXor: *hasResult = true; *hasResultType = true; break; + case OpGroupNonUniformLogicalAnd: *hasResult = true; *hasResultType = true; break; + case OpGroupNonUniformLogicalOr: *hasResult = true; *hasResultType = true; break; + case OpGroupNonUniformLogicalXor: *hasResult = true; *hasResultType = true; break; + case OpGroupNonUniformQuadBroadcast: *hasResult = true; *hasResultType = true; break; + case OpGroupNonUniformQuadSwap: *hasResult = true; *hasResultType = true; break; + case OpCopyLogical: *hasResult = true; *hasResultType = true; break; + case OpPtrEqual: *hasResult = true; *hasResultType = true; break; + case OpPtrNotEqual: *hasResult = true; *hasResultType = true; break; + case OpPtrDiff: *hasResult = true; *hasResultType = true; break; + case OpSubgroupBallotKHR: *hasResult = true; *hasResultType = true; break; + case OpSubgroupFirstInvocationKHR: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAllKHR: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAnyKHR: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAllEqualKHR: *hasResult = true; *hasResultType = true; break; + case OpSubgroupReadInvocationKHR: *hasResult = true; *hasResultType = true; break; + case OpTypeRayQueryProvisionalKHR: *hasResult = true; *hasResultType = false; break; + case OpRayQueryInitializeKHR: *hasResult = false; *hasResultType = false; break; + case OpRayQueryTerminateKHR: *hasResult = false; *hasResultType = false; break; + case OpRayQueryGenerateIntersectionKHR: *hasResult = false; *hasResultType = false; break; + case OpRayQueryConfirmIntersectionKHR: *hasResult = false; *hasResultType = false; break; + case OpRayQueryProceedKHR: *hasResult = true; *hasResultType = true; break; + case OpRayQueryGetIntersectionTypeKHR: *hasResult = true; *hasResultType = true; break; + case OpGroupIAddNonUniformAMD: *hasResult = true; *hasResultType = true; break; + case OpGroupFAddNonUniformAMD: *hasResult = true; *hasResultType = true; break; + case OpGroupFMinNonUniformAMD: *hasResult = true; *hasResultType = true; break; + case OpGroupUMinNonUniformAMD: *hasResult = true; *hasResultType = true; break; + case OpGroupSMinNonUniformAMD: *hasResult = true; *hasResultType = true; break; + case OpGroupFMaxNonUniformAMD: *hasResult = true; *hasResultType = true; break; + case OpGroupUMaxNonUniformAMD: *hasResult = true; *hasResultType = true; break; + case OpGroupSMaxNonUniformAMD: *hasResult = true; *hasResultType = true; break; + case OpFragmentMaskFetchAMD: *hasResult = true; *hasResultType = true; break; + case OpFragmentFetchAMD: *hasResult = true; *hasResultType = true; break; + case OpReadClockKHR: *hasResult = true; *hasResultType = true; break; + case OpImageSampleFootprintNV: *hasResult = true; *hasResultType = true; break; + case OpGroupNonUniformPartitionNV: *hasResult = true; *hasResultType = true; break; + case OpWritePackedPrimitiveIndices4x8NV: *hasResult = false; *hasResultType = false; break; + case OpReportIntersectionNV: *hasResult = true; *hasResultType = true; break; + case OpIgnoreIntersectionNV: *hasResult = false; *hasResultType = false; break; + case OpTerminateRayNV: *hasResult = false; *hasResultType = false; break; + case OpTraceNV: *hasResult = false; *hasResultType = false; break; + case OpTypeAccelerationStructureNV: *hasResult = true; *hasResultType = false; break; + case OpExecuteCallableNV: *hasResult = false; *hasResultType = false; break; + case OpTypeCooperativeMatrixNV: *hasResult = true; *hasResultType = false; break; + case OpCooperativeMatrixLoadNV: *hasResult = true; *hasResultType = true; break; + case OpCooperativeMatrixStoreNV: *hasResult = false; *hasResultType = false; break; + case OpCooperativeMatrixMulAddNV: *hasResult = true; *hasResultType = true; break; + case OpCooperativeMatrixLengthNV: *hasResult = true; *hasResultType = true; break; + case OpBeginInvocationInterlockEXT: *hasResult = false; *hasResultType = false; break; + case OpEndInvocationInterlockEXT: *hasResult = false; *hasResultType = false; break; + case OpDemoteToHelperInvocationEXT: *hasResult = false; *hasResultType = false; break; + case OpIsHelperInvocationEXT: *hasResult = true; *hasResultType = true; break; + case OpSubgroupShuffleINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupShuffleDownINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupShuffleUpINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupShuffleXorINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupBlockReadINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupBlockWriteINTEL: *hasResult = false; *hasResultType = false; break; + case OpSubgroupImageBlockReadINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupImageBlockWriteINTEL: *hasResult = false; *hasResultType = false; break; + case OpSubgroupImageMediaBlockReadINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupImageMediaBlockWriteINTEL: *hasResult = false; *hasResultType = false; break; + case OpUCountLeadingZerosINTEL: *hasResult = true; *hasResultType = true; break; + case OpUCountTrailingZerosINTEL: *hasResult = true; *hasResultType = true; break; + case OpAbsISubINTEL: *hasResult = true; *hasResultType = true; break; + case OpAbsUSubINTEL: *hasResult = true; *hasResultType = true; break; + case OpIAddSatINTEL: *hasResult = true; *hasResultType = true; break; + case OpUAddSatINTEL: *hasResult = true; *hasResultType = true; break; + case OpIAverageINTEL: *hasResult = true; *hasResultType = true; break; + case OpUAverageINTEL: *hasResult = true; *hasResultType = true; break; + case OpIAverageRoundedINTEL: *hasResult = true; *hasResultType = true; break; + case OpUAverageRoundedINTEL: *hasResult = true; *hasResultType = true; break; + case OpISubSatINTEL: *hasResult = true; *hasResultType = true; break; + case OpUSubSatINTEL: *hasResult = true; *hasResultType = true; break; + case OpIMul32x16INTEL: *hasResult = true; *hasResultType = true; break; + case OpUMul32x16INTEL: *hasResult = true; *hasResultType = true; break; + case OpDecorateString: *hasResult = false; *hasResultType = false; break; + case OpMemberDecorateString: *hasResult = false; *hasResultType = false; break; + case OpVmeImageINTEL: *hasResult = true; *hasResultType = true; break; + case OpTypeVmeImageINTEL: *hasResult = true; *hasResultType = false; break; + case OpTypeAvcImePayloadINTEL: *hasResult = true; *hasResultType = false; break; + case OpTypeAvcRefPayloadINTEL: *hasResult = true; *hasResultType = false; break; + case OpTypeAvcSicPayloadINTEL: *hasResult = true; *hasResultType = false; break; + case OpTypeAvcMcePayloadINTEL: *hasResult = true; *hasResultType = false; break; + case OpTypeAvcMceResultINTEL: *hasResult = true; *hasResultType = false; break; + case OpTypeAvcImeResultINTEL: *hasResult = true; *hasResultType = false; break; + case OpTypeAvcImeResultSingleReferenceStreamoutINTEL: *hasResult = true; *hasResultType = false; break; + case OpTypeAvcImeResultDualReferenceStreamoutINTEL: *hasResult = true; *hasResultType = false; break; + case OpTypeAvcImeSingleReferenceStreaminINTEL: *hasResult = true; *hasResultType = false; break; + case OpTypeAvcImeDualReferenceStreaminINTEL: *hasResult = true; *hasResultType = false; break; + case OpTypeAvcRefResultINTEL: *hasResult = true; *hasResultType = false; break; + case OpTypeAvcSicResultINTEL: *hasResult = true; *hasResultType = false; break; + case OpSubgroupAvcMceGetDefaultInterBaseMultiReferencePenaltyINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcMceSetInterBaseMultiReferencePenaltyINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcMceGetDefaultInterShapePenaltyINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcMceSetInterShapePenaltyINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcMceGetDefaultInterDirectionPenaltyINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcMceSetInterDirectionPenaltyINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcMceGetDefaultIntraLumaShapePenaltyINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcMceGetDefaultInterMotionVectorCostTableINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcMceGetDefaultHighPenaltyCostTableINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcMceGetDefaultMediumPenaltyCostTableINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcMceGetDefaultLowPenaltyCostTableINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcMceSetMotionVectorCostFunctionINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcMceGetDefaultIntraLumaModePenaltyINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcMceGetDefaultNonDcLumaIntraPenaltyINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcMceGetDefaultIntraChromaModeBasePenaltyINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcMceSetAcOnlyHaarINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcMceSetSourceInterlacedFieldPolarityINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcMceSetSingleReferenceInterlacedFieldPolarityINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcMceSetDualReferenceInterlacedFieldPolaritiesINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcMceConvertToImePayloadINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcMceConvertToImeResultINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcMceConvertToRefPayloadINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcMceConvertToRefResultINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcMceConvertToSicPayloadINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcMceConvertToSicResultINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcMceGetMotionVectorsINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcMceGetInterDistortionsINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcMceGetBestInterDistortionsINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcMceGetInterMajorShapeINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcMceGetInterMinorShapeINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcMceGetInterDirectionsINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcMceGetInterMotionVectorCountINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcMceGetInterReferenceIdsINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcMceGetInterReferenceInterlacedFieldPolaritiesINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcImeInitializeINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcImeSetSingleReferenceINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcImeSetDualReferenceINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcImeRefWindowSizeINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcImeAdjustRefOffsetINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcImeConvertToMcePayloadINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcImeSetMaxMotionVectorCountINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcImeSetUnidirectionalMixDisableINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcImeSetEarlySearchTerminationThresholdINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcImeSetWeightedSadINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcImeEvaluateWithSingleReferenceINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcImeEvaluateWithDualReferenceINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcImeEvaluateWithSingleReferenceStreaminINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcImeEvaluateWithDualReferenceStreaminINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcImeEvaluateWithSingleReferenceStreamoutINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcImeEvaluateWithDualReferenceStreamoutINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcImeEvaluateWithSingleReferenceStreaminoutINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcImeEvaluateWithDualReferenceStreaminoutINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcImeConvertToMceResultINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcImeGetSingleReferenceStreaminINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcImeGetDualReferenceStreaminINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcImeStripSingleReferenceStreamoutINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcImeStripDualReferenceStreamoutINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcImeGetStreamoutSingleReferenceMajorShapeMotionVectorsINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcImeGetStreamoutSingleReferenceMajorShapeDistortionsINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcImeGetStreamoutSingleReferenceMajorShapeReferenceIdsINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcImeGetStreamoutDualReferenceMajorShapeMotionVectorsINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcImeGetStreamoutDualReferenceMajorShapeDistortionsINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcImeGetStreamoutDualReferenceMajorShapeReferenceIdsINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcImeGetBorderReachedINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcImeGetTruncatedSearchIndicationINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcImeGetUnidirectionalEarlySearchTerminationINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcImeGetWeightingPatternMinimumMotionVectorINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcImeGetWeightingPatternMinimumDistortionINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcFmeInitializeINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcBmeInitializeINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcRefConvertToMcePayloadINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcRefSetBidirectionalMixDisableINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcRefSetBilinearFilterEnableINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcRefEvaluateWithSingleReferenceINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcRefEvaluateWithDualReferenceINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcRefEvaluateWithMultiReferenceINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcRefEvaluateWithMultiReferenceInterlacedINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcRefConvertToMceResultINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcSicInitializeINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcSicConfigureSkcINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcSicConfigureIpeLumaINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcSicConfigureIpeLumaChromaINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcSicGetMotionVectorMaskINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcSicConvertToMcePayloadINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcSicSetIntraLumaShapePenaltyINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcSicSetIntraLumaModeCostFunctionINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcSicSetIntraChromaModeCostFunctionINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcSicSetBilinearFilterEnableINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcSicSetSkcForwardTransformEnableINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcSicSetBlockBasedRawSkipSadINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcSicEvaluateIpeINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcSicEvaluateWithSingleReferenceINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcSicEvaluateWithDualReferenceINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcSicEvaluateWithMultiReferenceINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcSicEvaluateWithMultiReferenceInterlacedINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcSicConvertToMceResultINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcSicGetIpeLumaShapeINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcSicGetBestIpeLumaDistortionINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcSicGetBestIpeChromaDistortionINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcSicGetPackedIpeLumaModesINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcSicGetIpeChromaModeINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcSicGetPackedSkcLumaCountThresholdINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcSicGetPackedSkcLumaSumThresholdINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcSicGetInterRawSadsINTEL: *hasResult = true; *hasResultType = true; break; + case OpRayQueryGetRayTMinKHR: *hasResult = true; *hasResultType = true; break; + case OpRayQueryGetRayFlagsKHR: *hasResult = true; *hasResultType = true; break; + case OpRayQueryGetIntersectionTKHR: *hasResult = true; *hasResultType = true; break; + case OpRayQueryGetIntersectionInstanceCustomIndexKHR: *hasResult = true; *hasResultType = true; break; + case OpRayQueryGetIntersectionInstanceIdKHR: *hasResult = true; *hasResultType = true; break; + case OpRayQueryGetIntersectionInstanceShaderBindingTableRecordOffsetKHR: *hasResult = true; *hasResultType = true; break; + case OpRayQueryGetIntersectionGeometryIndexKHR: *hasResult = true; *hasResultType = true; break; + case OpRayQueryGetIntersectionPrimitiveIndexKHR: *hasResult = true; *hasResultType = true; break; + case OpRayQueryGetIntersectionBarycentricsKHR: *hasResult = true; *hasResultType = true; break; + case OpRayQueryGetIntersectionFrontFaceKHR: *hasResult = true; *hasResultType = true; break; + case OpRayQueryGetIntersectionCandidateAABBOpaqueKHR: *hasResult = true; *hasResultType = true; break; + case OpRayQueryGetIntersectionObjectRayDirectionKHR: *hasResult = true; *hasResultType = true; break; + case OpRayQueryGetIntersectionObjectRayOriginKHR: *hasResult = true; *hasResultType = true; break; + case OpRayQueryGetWorldRayDirectionKHR: *hasResult = true; *hasResultType = true; break; + case OpRayQueryGetWorldRayOriginKHR: *hasResult = true; *hasResultType = true; break; + case OpRayQueryGetIntersectionObjectToWorldKHR: *hasResult = true; *hasResultType = true; break; + case OpRayQueryGetIntersectionWorldToObjectKHR: *hasResult = true; *hasResultType = true; break; + } +} +#endif /* SPV_ENABLE_UTILITY_CODE */ + +// Overload operator| for mask bit combining + +inline ImageOperandsMask operator|(ImageOperandsMask a, ImageOperandsMask b) { return ImageOperandsMask(unsigned(a) | unsigned(b)); } +inline FPFastMathModeMask operator|(FPFastMathModeMask a, FPFastMathModeMask b) { return FPFastMathModeMask(unsigned(a) | unsigned(b)); } +inline SelectionControlMask operator|(SelectionControlMask a, SelectionControlMask b) { return SelectionControlMask(unsigned(a) | unsigned(b)); } +inline LoopControlMask operator|(LoopControlMask a, LoopControlMask b) { return LoopControlMask(unsigned(a) | unsigned(b)); } +inline FunctionControlMask operator|(FunctionControlMask a, FunctionControlMask b) { return FunctionControlMask(unsigned(a) | unsigned(b)); } +inline MemorySemanticsMask operator|(MemorySemanticsMask a, MemorySemanticsMask b) { return MemorySemanticsMask(unsigned(a) | unsigned(b)); } +inline MemoryAccessMask operator|(MemoryAccessMask a, MemoryAccessMask b) { return MemoryAccessMask(unsigned(a) | unsigned(b)); } +inline KernelProfilingInfoMask operator|(KernelProfilingInfoMask a, KernelProfilingInfoMask b) { return KernelProfilingInfoMask(unsigned(a) | unsigned(b)); } +inline RayFlagsMask operator|(RayFlagsMask a, RayFlagsMask b) { return RayFlagsMask(unsigned(a) | unsigned(b)); } + +} // end namespace spv + +#endif // #ifndef spirv_HPP + diff --git a/dep/glslang/SPIRV/spvIR.h b/dep/glslang/SPIRV/spvIR.h new file mode 100644 index 000000000..6523035e7 --- /dev/null +++ b/dep/glslang/SPIRV/spvIR.h @@ -0,0 +1,485 @@ +// +// Copyright (C) 2014 LunarG, Inc. +// Copyright (C) 2015-2018 Google, Inc. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// +// Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// +// Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// Neither the name of 3Dlabs Inc. Ltd. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. + +// SPIRV-IR +// +// Simple in-memory representation (IR) of SPIRV. Just for holding +// Each function's CFG of blocks. Has this hierarchy: +// - Module, which is a list of +// - Function, which is a list of +// - Block, which is a list of +// - Instruction +// + +#pragma once +#ifndef spvIR_H +#define spvIR_H + +#include "spirv.hpp" + +#include +#include +#include +#include +#include +#include + +namespace spv { + +class Block; +class Function; +class Module; + +const Id NoResult = 0; +const Id NoType = 0; + +const Decoration NoPrecision = DecorationMax; + +#ifdef __GNUC__ +# define POTENTIALLY_UNUSED __attribute__((unused)) +#else +# define POTENTIALLY_UNUSED +#endif + +POTENTIALLY_UNUSED +const MemorySemanticsMask MemorySemanticsAllMemory = + (MemorySemanticsMask)(MemorySemanticsUniformMemoryMask | + MemorySemanticsWorkgroupMemoryMask | + MemorySemanticsAtomicCounterMemoryMask | + MemorySemanticsImageMemoryMask); + +struct IdImmediate { + bool isId; // true if word is an Id, false if word is an immediate + unsigned word; + IdImmediate(bool i, unsigned w) : isId(i), word(w) {} +}; + +// +// SPIR-V IR instruction. +// + +class Instruction { +public: + Instruction(Id resultId, Id typeId, Op opCode) : resultId(resultId), typeId(typeId), opCode(opCode), block(nullptr) { } + explicit Instruction(Op opCode) : resultId(NoResult), typeId(NoType), opCode(opCode), block(nullptr) { } + virtual ~Instruction() {} + void addIdOperand(Id id) { + operands.push_back(id); + idOperand.push_back(true); + } + void addImmediateOperand(unsigned int immediate) { + operands.push_back(immediate); + idOperand.push_back(false); + } + void setImmediateOperand(unsigned idx, unsigned int immediate) { + assert(!idOperand[idx]); + operands[idx] = immediate; + } + + void addStringOperand(const char* str) + { + unsigned int word; + char* wordString = (char*)&word; + char* wordPtr = wordString; + int charCount = 0; + char c; + do { + c = *(str++); + *(wordPtr++) = c; + ++charCount; + if (charCount == 4) { + addImmediateOperand(word); + wordPtr = wordString; + charCount = 0; + } + } while (c != 0); + + // deal with partial last word + if (charCount > 0) { + // pad with 0s + for (; charCount < 4; ++charCount) + *(wordPtr++) = 0; + addImmediateOperand(word); + } + } + bool isIdOperand(int op) const { return idOperand[op]; } + void setBlock(Block* b) { block = b; } + Block* getBlock() const { return block; } + Op getOpCode() const { return opCode; } + int getNumOperands() const + { + assert(operands.size() == idOperand.size()); + return (int)operands.size(); + } + Id getResultId() const { return resultId; } + Id getTypeId() const { return typeId; } + Id getIdOperand(int op) const { + assert(idOperand[op]); + return operands[op]; + } + unsigned int getImmediateOperand(int op) const { + assert(!idOperand[op]); + return operands[op]; + } + + // Write out the binary form. + void dump(std::vector& out) const + { + // Compute the wordCount + unsigned int wordCount = 1; + if (typeId) + ++wordCount; + if (resultId) + ++wordCount; + wordCount += (unsigned int)operands.size(); + + // Write out the beginning of the instruction + out.push_back(((wordCount) << WordCountShift) | opCode); + if (typeId) + out.push_back(typeId); + if (resultId) + out.push_back(resultId); + + // Write out the operands + for (int op = 0; op < (int)operands.size(); ++op) + out.push_back(operands[op]); + } + +protected: + Instruction(const Instruction&); + Id resultId; + Id typeId; + Op opCode; + std::vector operands; // operands, both and immediates (both are unsigned int) + std::vector idOperand; // true for operands that are , false for immediates + Block* block; +}; + +// +// SPIR-V IR block. +// + +class Block { +public: + Block(Id id, Function& parent); + virtual ~Block() + { + } + + Id getId() { return instructions.front()->getResultId(); } + + Function& getParent() const { return parent; } + void addInstruction(std::unique_ptr inst); + void addPredecessor(Block* pred) { predecessors.push_back(pred); pred->successors.push_back(this);} + void addLocalVariable(std::unique_ptr inst) { localVariables.push_back(std::move(inst)); } + const std::vector& getPredecessors() const { return predecessors; } + const std::vector& getSuccessors() const { return successors; } + const std::vector >& getInstructions() const { + return instructions; + } + const std::vector >& getLocalVariables() const { return localVariables; } + void setUnreachable() { unreachable = true; } + bool isUnreachable() const { return unreachable; } + // Returns the block's merge instruction, if one exists (otherwise null). + const Instruction* getMergeInstruction() const { + if (instructions.size() < 2) return nullptr; + const Instruction* nextToLast = (instructions.cend() - 2)->get(); + switch (nextToLast->getOpCode()) { + case OpSelectionMerge: + case OpLoopMerge: + return nextToLast; + default: + return nullptr; + } + return nullptr; + } + + // Change this block into a canonical dead merge block. Delete instructions + // as necessary. A canonical dead merge block has only an OpLabel and an + // OpUnreachable. + void rewriteAsCanonicalUnreachableMerge() { + assert(localVariables.empty()); + // Delete all instructions except for the label. + assert(instructions.size() > 0); + instructions.resize(1); + successors.clear(); + addInstruction(std::unique_ptr(new Instruction(OpUnreachable))); + } + // Change this block into a canonical dead continue target branching to the + // given header ID. Delete instructions as necessary. A canonical dead continue + // target has only an OpLabel and an unconditional branch back to the corresponding + // header. + void rewriteAsCanonicalUnreachableContinue(Block* header) { + assert(localVariables.empty()); + // Delete all instructions except for the label. + assert(instructions.size() > 0); + instructions.resize(1); + successors.clear(); + // Add OpBranch back to the header. + assert(header != nullptr); + Instruction* branch = new Instruction(OpBranch); + branch->addIdOperand(header->getId()); + addInstruction(std::unique_ptr(branch)); + successors.push_back(header); + } + + bool isTerminated() const + { + switch (instructions.back()->getOpCode()) { + case OpBranch: + case OpBranchConditional: + case OpSwitch: + case OpKill: + case OpReturn: + case OpReturnValue: + case OpUnreachable: + return true; + default: + return false; + } + } + + void dump(std::vector& out) const + { + instructions[0]->dump(out); + for (int i = 0; i < (int)localVariables.size(); ++i) + localVariables[i]->dump(out); + for (int i = 1; i < (int)instructions.size(); ++i) + instructions[i]->dump(out); + } + +protected: + Block(const Block&); + Block& operator=(Block&); + + // To enforce keeping parent and ownership in sync: + friend Function; + + std::vector > instructions; + std::vector predecessors, successors; + std::vector > localVariables; + Function& parent; + + // track whether this block is known to be uncreachable (not necessarily + // true for all unreachable blocks, but should be set at least + // for the extraneous ones introduced by the builder). + bool unreachable; +}; + +// The different reasons for reaching a block in the inReadableOrder traversal. +enum ReachReason { + // Reachable from the entry block via transfers of control, i.e. branches. + ReachViaControlFlow = 0, + // A continue target that is not reachable via control flow. + ReachDeadContinue, + // A merge block that is not reachable via control flow. + ReachDeadMerge +}; + +// Traverses the control-flow graph rooted at root in an order suited for +// readable code generation. Invokes callback at every node in the traversal +// order. The callback arguments are: +// - the block, +// - the reason we reached the block, +// - if the reason was that block is an unreachable continue or unreachable merge block +// then the last parameter is the corresponding header block. +void inReadableOrder(Block* root, std::function callback); + +// +// SPIR-V IR Function. +// + +class Function { +public: + Function(Id id, Id resultType, Id functionType, Id firstParam, Module& parent); + virtual ~Function() + { + for (int i = 0; i < (int)parameterInstructions.size(); ++i) + delete parameterInstructions[i]; + + for (int i = 0; i < (int)blocks.size(); ++i) + delete blocks[i]; + } + Id getId() const { return functionInstruction.getResultId(); } + Id getParamId(int p) const { return parameterInstructions[p]->getResultId(); } + Id getParamType(int p) const { return parameterInstructions[p]->getTypeId(); } + + void addBlock(Block* block) { blocks.push_back(block); } + void removeBlock(Block* block) + { + auto found = find(blocks.begin(), blocks.end(), block); + assert(found != blocks.end()); + blocks.erase(found); + delete block; + } + + Module& getParent() const { return parent; } + Block* getEntryBlock() const { return blocks.front(); } + Block* getLastBlock() const { return blocks.back(); } + const std::vector& getBlocks() const { return blocks; } + void addLocalVariable(std::unique_ptr inst); + Id getReturnType() const { return functionInstruction.getTypeId(); } + + void setImplicitThis() { implicitThis = true; } + bool hasImplicitThis() const { return implicitThis; } + + void dump(std::vector& out) const + { + // OpFunction + functionInstruction.dump(out); + + // OpFunctionParameter + for (int p = 0; p < (int)parameterInstructions.size(); ++p) + parameterInstructions[p]->dump(out); + + // Blocks + inReadableOrder(blocks[0], [&out](const Block* b, ReachReason, Block*) { b->dump(out); }); + Instruction end(0, 0, OpFunctionEnd); + end.dump(out); + } + +protected: + Function(const Function&); + Function& operator=(Function&); + + Module& parent; + Instruction functionInstruction; + std::vector parameterInstructions; + std::vector blocks; + bool implicitThis; // true if this is a member function expecting to be passed a 'this' as the first argument +}; + +// +// SPIR-V IR Module. +// + +class Module { +public: + Module() {} + virtual ~Module() + { + // TODO delete things + } + + void addFunction(Function *fun) { functions.push_back(fun); } + + void mapInstruction(Instruction *instruction) + { + spv::Id resultId = instruction->getResultId(); + // map the instruction's result id + if (resultId >= idToInstruction.size()) + idToInstruction.resize(resultId + 16); + idToInstruction[resultId] = instruction; + } + + Instruction* getInstruction(Id id) const { return idToInstruction[id]; } + const std::vector& getFunctions() const { return functions; } + spv::Id getTypeId(Id resultId) const { + return idToInstruction[resultId] == nullptr ? NoType : idToInstruction[resultId]->getTypeId(); + } + StorageClass getStorageClass(Id typeId) const + { + assert(idToInstruction[typeId]->getOpCode() == spv::OpTypePointer); + return (StorageClass)idToInstruction[typeId]->getImmediateOperand(0); + } + + void dump(std::vector& out) const + { + for (int f = 0; f < (int)functions.size(); ++f) + functions[f]->dump(out); + } + +protected: + Module(const Module&); + std::vector functions; + + // map from result id to instruction having that result id + std::vector idToInstruction; + + // map from a result id to its type id +}; + +// +// Implementation (it's here due to circular type definitions). +// + +// Add both +// - the OpFunction instruction +// - all the OpFunctionParameter instructions +__inline Function::Function(Id id, Id resultType, Id functionType, Id firstParamId, Module& parent) + : parent(parent), functionInstruction(id, resultType, OpFunction), implicitThis(false) +{ + // OpFunction + functionInstruction.addImmediateOperand(FunctionControlMaskNone); + functionInstruction.addIdOperand(functionType); + parent.mapInstruction(&functionInstruction); + parent.addFunction(this); + + // OpFunctionParameter + Instruction* typeInst = parent.getInstruction(functionType); + int numParams = typeInst->getNumOperands() - 1; + for (int p = 0; p < numParams; ++p) { + Instruction* param = new Instruction(firstParamId + p, typeInst->getIdOperand(p + 1), OpFunctionParameter); + parent.mapInstruction(param); + parameterInstructions.push_back(param); + } +} + +__inline void Function::addLocalVariable(std::unique_ptr inst) +{ + Instruction* raw_instruction = inst.get(); + blocks[0]->addLocalVariable(std::move(inst)); + parent.mapInstruction(raw_instruction); +} + +__inline Block::Block(Id id, Function& parent) : parent(parent), unreachable(false) +{ + instructions.push_back(std::unique_ptr(new Instruction(id, NoType, OpLabel))); + instructions.back()->setBlock(this); + parent.getParent().mapInstruction(instructions.back().get()); +} + +__inline void Block::addInstruction(std::unique_ptr inst) +{ + Instruction* raw_instruction = inst.get(); + instructions.push_back(std::move(inst)); + raw_instruction->setBlock(this); + if (raw_instruction->getResultId()) + parent.getParent().mapInstruction(raw_instruction); +} + +} // end spv namespace + +#endif // spvIR_H diff --git a/dep/glslang/StandAlone/DirStackFileIncluder.h b/dep/glslang/StandAlone/DirStackFileIncluder.h new file mode 100644 index 000000000..18734130e --- /dev/null +++ b/dep/glslang/StandAlone/DirStackFileIncluder.h @@ -0,0 +1,141 @@ +// +// Copyright (C) 2002-2005 3Dlabs Inc. Ltd. +// Copyright (C) 2017 Google, Inc. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// +// Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// +// Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// Neither the name of 3Dlabs Inc. Ltd. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// + +#pragma once + +#include +#include +#include +#include + +#include "./../glslang/Public/ShaderLang.h" + +// Default include class for normal include convention of search backward +// through the stack of active include paths (for nested includes). +// Can be overridden to customize. +class DirStackFileIncluder : public glslang::TShader::Includer { +public: + DirStackFileIncluder() : externalLocalDirectoryCount(0) { } + + virtual IncludeResult* includeLocal(const char* headerName, + const char* includerName, + size_t inclusionDepth) override + { + return readLocalPath(headerName, includerName, (int)inclusionDepth); + } + + virtual IncludeResult* includeSystem(const char* headerName, + const char* /*includerName*/, + size_t /*inclusionDepth*/) override + { + return readSystemPath(headerName); + } + + // Externally set directories. E.g., from a command-line -I. + // - Most-recently pushed are checked first. + // - All these are checked after the parse-time stack of local directories + // is checked. + // - This only applies to the "local" form of #include. + // - Makes its own copy of the path. + virtual void pushExternalLocalDirectory(const std::string& dir) + { + directoryStack.push_back(dir); + externalLocalDirectoryCount = (int)directoryStack.size(); + } + + virtual void releaseInclude(IncludeResult* result) override + { + if (result != nullptr) { + delete [] static_cast(result->userData); + delete result; + } + } + + virtual ~DirStackFileIncluder() override { } + +protected: + typedef char tUserDataElement; + std::vector directoryStack; + int externalLocalDirectoryCount; + + // Search for a valid "local" path based on combining the stack of include + // directories and the nominal name of the header. + virtual IncludeResult* readLocalPath(const char* headerName, const char* includerName, int depth) + { + // Discard popped include directories, and + // initialize when at parse-time first level. + directoryStack.resize(depth + externalLocalDirectoryCount); + if (depth == 1) + directoryStack.back() = getDirectory(includerName); + + // Find a directory that works, using a reverse search of the include stack. + for (auto it = directoryStack.rbegin(); it != directoryStack.rend(); ++it) { + std::string path = *it + '/' + headerName; + std::replace(path.begin(), path.end(), '\\', '/'); + std::ifstream file(path, std::ios_base::binary | std::ios_base::ate); + if (file) { + directoryStack.push_back(getDirectory(path)); + return newIncludeResult(path, file, (int)file.tellg()); + } + } + + return nullptr; + } + + // Search for a valid path. + // Not implemented yet; returning nullptr signals failure to find. + virtual IncludeResult* readSystemPath(const char* /*headerName*/) const + { + return nullptr; + } + + // Do actual reading of the file, filling in a new include result. + virtual IncludeResult* newIncludeResult(const std::string& path, std::ifstream& file, int length) const + { + char* content = new tUserDataElement [length]; + file.seekg(0, file.beg); + file.read(content, length); + return new IncludeResult(path, content, length, content); + } + + // If no path markers, return current working directory. + // Otherwise, strip file name and return path leading up to it. + virtual std::string getDirectory(const std::string path) const + { + size_t last = path.find_last_of("/\\"); + return last == std::string::npos ? "." : path.substr(0, last); + } +}; diff --git a/dep/glslang/StandAlone/ResourceLimits.cpp b/dep/glslang/StandAlone/ResourceLimits.cpp new file mode 100644 index 000000000..7c7f4c4e4 --- /dev/null +++ b/dep/glslang/StandAlone/ResourceLimits.cpp @@ -0,0 +1,496 @@ +// +// Copyright (C) 2016 Google, Inc. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// +// Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// +// Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. + +#include +#include +#include +#include + +#include "ResourceLimits.h" + +namespace glslang { + +const TBuiltInResource DefaultTBuiltInResource = { + /* .MaxLights = */ 32, + /* .MaxClipPlanes = */ 6, + /* .MaxTextureUnits = */ 32, + /* .MaxTextureCoords = */ 32, + /* .MaxVertexAttribs = */ 64, + /* .MaxVertexUniformComponents = */ 4096, + /* .MaxVaryingFloats = */ 64, + /* .MaxVertexTextureImageUnits = */ 32, + /* .MaxCombinedTextureImageUnits = */ 80, + /* .MaxTextureImageUnits = */ 32, + /* .MaxFragmentUniformComponents = */ 4096, + /* .MaxDrawBuffers = */ 32, + /* .MaxVertexUniformVectors = */ 128, + /* .MaxVaryingVectors = */ 8, + /* .MaxFragmentUniformVectors = */ 16, + /* .MaxVertexOutputVectors = */ 16, + /* .MaxFragmentInputVectors = */ 15, + /* .MinProgramTexelOffset = */ -8, + /* .MaxProgramTexelOffset = */ 7, + /* .MaxClipDistances = */ 8, + /* .MaxComputeWorkGroupCountX = */ 65535, + /* .MaxComputeWorkGroupCountY = */ 65535, + /* .MaxComputeWorkGroupCountZ = */ 65535, + /* .MaxComputeWorkGroupSizeX = */ 1024, + /* .MaxComputeWorkGroupSizeY = */ 1024, + /* .MaxComputeWorkGroupSizeZ = */ 64, + /* .MaxComputeUniformComponents = */ 1024, + /* .MaxComputeTextureImageUnits = */ 16, + /* .MaxComputeImageUniforms = */ 8, + /* .MaxComputeAtomicCounters = */ 8, + /* .MaxComputeAtomicCounterBuffers = */ 1, + /* .MaxVaryingComponents = */ 60, + /* .MaxVertexOutputComponents = */ 64, + /* .MaxGeometryInputComponents = */ 64, + /* .MaxGeometryOutputComponents = */ 128, + /* .MaxFragmentInputComponents = */ 128, + /* .MaxImageUnits = */ 8, + /* .MaxCombinedImageUnitsAndFragmentOutputs = */ 8, + /* .MaxCombinedShaderOutputResources = */ 8, + /* .MaxImageSamples = */ 0, + /* .MaxVertexImageUniforms = */ 0, + /* .MaxTessControlImageUniforms = */ 0, + /* .MaxTessEvaluationImageUniforms = */ 0, + /* .MaxGeometryImageUniforms = */ 0, + /* .MaxFragmentImageUniforms = */ 8, + /* .MaxCombinedImageUniforms = */ 8, + /* .MaxGeometryTextureImageUnits = */ 16, + /* .MaxGeometryOutputVertices = */ 256, + /* .MaxGeometryTotalOutputComponents = */ 1024, + /* .MaxGeometryUniformComponents = */ 1024, + /* .MaxGeometryVaryingComponents = */ 64, + /* .MaxTessControlInputComponents = */ 128, + /* .MaxTessControlOutputComponents = */ 128, + /* .MaxTessControlTextureImageUnits = */ 16, + /* .MaxTessControlUniformComponents = */ 1024, + /* .MaxTessControlTotalOutputComponents = */ 4096, + /* .MaxTessEvaluationInputComponents = */ 128, + /* .MaxTessEvaluationOutputComponents = */ 128, + /* .MaxTessEvaluationTextureImageUnits = */ 16, + /* .MaxTessEvaluationUniformComponents = */ 1024, + /* .MaxTessPatchComponents = */ 120, + /* .MaxPatchVertices = */ 32, + /* .MaxTessGenLevel = */ 64, + /* .MaxViewports = */ 16, + /* .MaxVertexAtomicCounters = */ 0, + /* .MaxTessControlAtomicCounters = */ 0, + /* .MaxTessEvaluationAtomicCounters = */ 0, + /* .MaxGeometryAtomicCounters = */ 0, + /* .MaxFragmentAtomicCounters = */ 8, + /* .MaxCombinedAtomicCounters = */ 8, + /* .MaxAtomicCounterBindings = */ 1, + /* .MaxVertexAtomicCounterBuffers = */ 0, + /* .MaxTessControlAtomicCounterBuffers = */ 0, + /* .MaxTessEvaluationAtomicCounterBuffers = */ 0, + /* .MaxGeometryAtomicCounterBuffers = */ 0, + /* .MaxFragmentAtomicCounterBuffers = */ 1, + /* .MaxCombinedAtomicCounterBuffers = */ 1, + /* .MaxAtomicCounterBufferSize = */ 16384, + /* .MaxTransformFeedbackBuffers = */ 4, + /* .MaxTransformFeedbackInterleavedComponents = */ 64, + /* .MaxCullDistances = */ 8, + /* .MaxCombinedClipAndCullDistances = */ 8, + /* .MaxSamples = */ 4, + /* .maxMeshOutputVerticesNV = */ 256, + /* .maxMeshOutputPrimitivesNV = */ 512, + /* .maxMeshWorkGroupSizeX_NV = */ 32, + /* .maxMeshWorkGroupSizeY_NV = */ 1, + /* .maxMeshWorkGroupSizeZ_NV = */ 1, + /* .maxTaskWorkGroupSizeX_NV = */ 32, + /* .maxTaskWorkGroupSizeY_NV = */ 1, + /* .maxTaskWorkGroupSizeZ_NV = */ 1, + /* .maxMeshViewCountNV = */ 4, + /* .maxDualSourceDrawBuffersEXT = */ 1, + + /* .limits = */ { + /* .nonInductiveForLoops = */ 1, + /* .whileLoops = */ 1, + /* .doWhileLoops = */ 1, + /* .generalUniformIndexing = */ 1, + /* .generalAttributeMatrixVectorIndexing = */ 1, + /* .generalVaryingIndexing = */ 1, + /* .generalSamplerIndexing = */ 1, + /* .generalVariableIndexing = */ 1, + /* .generalConstantMatrixVectorIndexing = */ 1, + }}; + +std::string GetDefaultTBuiltInResourceString() +{ + std::ostringstream ostream; + + ostream << "MaxLights " << DefaultTBuiltInResource.maxLights << "\n" + << "MaxClipPlanes " << DefaultTBuiltInResource.maxClipPlanes << "\n" + << "MaxTextureUnits " << DefaultTBuiltInResource.maxTextureUnits << "\n" + << "MaxTextureCoords " << DefaultTBuiltInResource.maxTextureCoords << "\n" + << "MaxVertexAttribs " << DefaultTBuiltInResource.maxVertexAttribs << "\n" + << "MaxVertexUniformComponents " << DefaultTBuiltInResource.maxVertexUniformComponents << "\n" + << "MaxVaryingFloats " << DefaultTBuiltInResource.maxVaryingFloats << "\n" + << "MaxVertexTextureImageUnits " << DefaultTBuiltInResource.maxVertexTextureImageUnits << "\n" + << "MaxCombinedTextureImageUnits " << DefaultTBuiltInResource.maxCombinedTextureImageUnits << "\n" + << "MaxTextureImageUnits " << DefaultTBuiltInResource.maxTextureImageUnits << "\n" + << "MaxFragmentUniformComponents " << DefaultTBuiltInResource.maxFragmentUniformComponents << "\n" + << "MaxDrawBuffers " << DefaultTBuiltInResource.maxDrawBuffers << "\n" + << "MaxVertexUniformVectors " << DefaultTBuiltInResource.maxVertexUniformVectors << "\n" + << "MaxVaryingVectors " << DefaultTBuiltInResource.maxVaryingVectors << "\n" + << "MaxFragmentUniformVectors " << DefaultTBuiltInResource.maxFragmentUniformVectors << "\n" + << "MaxVertexOutputVectors " << DefaultTBuiltInResource.maxVertexOutputVectors << "\n" + << "MaxFragmentInputVectors " << DefaultTBuiltInResource.maxFragmentInputVectors << "\n" + << "MinProgramTexelOffset " << DefaultTBuiltInResource.minProgramTexelOffset << "\n" + << "MaxProgramTexelOffset " << DefaultTBuiltInResource.maxProgramTexelOffset << "\n" + << "MaxClipDistances " << DefaultTBuiltInResource.maxClipDistances << "\n" + << "MaxComputeWorkGroupCountX " << DefaultTBuiltInResource.maxComputeWorkGroupCountX << "\n" + << "MaxComputeWorkGroupCountY " << DefaultTBuiltInResource.maxComputeWorkGroupCountY << "\n" + << "MaxComputeWorkGroupCountZ " << DefaultTBuiltInResource.maxComputeWorkGroupCountZ << "\n" + << "MaxComputeWorkGroupSizeX " << DefaultTBuiltInResource.maxComputeWorkGroupSizeX << "\n" + << "MaxComputeWorkGroupSizeY " << DefaultTBuiltInResource.maxComputeWorkGroupSizeY << "\n" + << "MaxComputeWorkGroupSizeZ " << DefaultTBuiltInResource.maxComputeWorkGroupSizeZ << "\n" + << "MaxComputeUniformComponents " << DefaultTBuiltInResource.maxComputeUniformComponents << "\n" + << "MaxComputeTextureImageUnits " << DefaultTBuiltInResource.maxComputeTextureImageUnits << "\n" + << "MaxComputeImageUniforms " << DefaultTBuiltInResource.maxComputeImageUniforms << "\n" + << "MaxComputeAtomicCounters " << DefaultTBuiltInResource.maxComputeAtomicCounters << "\n" + << "MaxComputeAtomicCounterBuffers " << DefaultTBuiltInResource.maxComputeAtomicCounterBuffers << "\n" + << "MaxVaryingComponents " << DefaultTBuiltInResource.maxVaryingComponents << "\n" + << "MaxVertexOutputComponents " << DefaultTBuiltInResource.maxVertexOutputComponents << "\n" + << "MaxGeometryInputComponents " << DefaultTBuiltInResource.maxGeometryInputComponents << "\n" + << "MaxGeometryOutputComponents " << DefaultTBuiltInResource.maxGeometryOutputComponents << "\n" + << "MaxFragmentInputComponents " << DefaultTBuiltInResource.maxFragmentInputComponents << "\n" + << "MaxImageUnits " << DefaultTBuiltInResource.maxImageUnits << "\n" + << "MaxCombinedImageUnitsAndFragmentOutputs " << DefaultTBuiltInResource.maxCombinedImageUnitsAndFragmentOutputs << "\n" + << "MaxCombinedShaderOutputResources " << DefaultTBuiltInResource.maxCombinedShaderOutputResources << "\n" + << "MaxImageSamples " << DefaultTBuiltInResource.maxImageSamples << "\n" + << "MaxVertexImageUniforms " << DefaultTBuiltInResource.maxVertexImageUniforms << "\n" + << "MaxTessControlImageUniforms " << DefaultTBuiltInResource.maxTessControlImageUniforms << "\n" + << "MaxTessEvaluationImageUniforms " << DefaultTBuiltInResource.maxTessEvaluationImageUniforms << "\n" + << "MaxGeometryImageUniforms " << DefaultTBuiltInResource.maxGeometryImageUniforms << "\n" + << "MaxFragmentImageUniforms " << DefaultTBuiltInResource.maxFragmentImageUniforms << "\n" + << "MaxCombinedImageUniforms " << DefaultTBuiltInResource.maxCombinedImageUniforms << "\n" + << "MaxGeometryTextureImageUnits " << DefaultTBuiltInResource.maxGeometryTextureImageUnits << "\n" + << "MaxGeometryOutputVertices " << DefaultTBuiltInResource.maxGeometryOutputVertices << "\n" + << "MaxGeometryTotalOutputComponents " << DefaultTBuiltInResource.maxGeometryTotalOutputComponents << "\n" + << "MaxGeometryUniformComponents " << DefaultTBuiltInResource.maxGeometryUniformComponents << "\n" + << "MaxGeometryVaryingComponents " << DefaultTBuiltInResource.maxGeometryVaryingComponents << "\n" + << "MaxTessControlInputComponents " << DefaultTBuiltInResource.maxTessControlInputComponents << "\n" + << "MaxTessControlOutputComponents " << DefaultTBuiltInResource.maxTessControlOutputComponents << "\n" + << "MaxTessControlTextureImageUnits " << DefaultTBuiltInResource.maxTessControlTextureImageUnits << "\n" + << "MaxTessControlUniformComponents " << DefaultTBuiltInResource.maxTessControlUniformComponents << "\n" + << "MaxTessControlTotalOutputComponents " << DefaultTBuiltInResource.maxTessControlTotalOutputComponents << "\n" + << "MaxTessEvaluationInputComponents " << DefaultTBuiltInResource.maxTessEvaluationInputComponents << "\n" + << "MaxTessEvaluationOutputComponents " << DefaultTBuiltInResource.maxTessEvaluationOutputComponents << "\n" + << "MaxTessEvaluationTextureImageUnits " << DefaultTBuiltInResource.maxTessEvaluationTextureImageUnits << "\n" + << "MaxTessEvaluationUniformComponents " << DefaultTBuiltInResource.maxTessEvaluationUniformComponents << "\n" + << "MaxTessPatchComponents " << DefaultTBuiltInResource.maxTessPatchComponents << "\n" + << "MaxPatchVertices " << DefaultTBuiltInResource.maxPatchVertices << "\n" + << "MaxTessGenLevel " << DefaultTBuiltInResource.maxTessGenLevel << "\n" + << "MaxViewports " << DefaultTBuiltInResource.maxViewports << "\n" + << "MaxVertexAtomicCounters " << DefaultTBuiltInResource.maxVertexAtomicCounters << "\n" + << "MaxTessControlAtomicCounters " << DefaultTBuiltInResource.maxTessControlAtomicCounters << "\n" + << "MaxTessEvaluationAtomicCounters " << DefaultTBuiltInResource.maxTessEvaluationAtomicCounters << "\n" + << "MaxGeometryAtomicCounters " << DefaultTBuiltInResource.maxGeometryAtomicCounters << "\n" + << "MaxFragmentAtomicCounters " << DefaultTBuiltInResource.maxFragmentAtomicCounters << "\n" + << "MaxCombinedAtomicCounters " << DefaultTBuiltInResource.maxCombinedAtomicCounters << "\n" + << "MaxAtomicCounterBindings " << DefaultTBuiltInResource.maxAtomicCounterBindings << "\n" + << "MaxVertexAtomicCounterBuffers " << DefaultTBuiltInResource.maxVertexAtomicCounterBuffers << "\n" + << "MaxTessControlAtomicCounterBuffers " << DefaultTBuiltInResource.maxTessControlAtomicCounterBuffers << "\n" + << "MaxTessEvaluationAtomicCounterBuffers " << DefaultTBuiltInResource.maxTessEvaluationAtomicCounterBuffers << "\n" + << "MaxGeometryAtomicCounterBuffers " << DefaultTBuiltInResource.maxGeometryAtomicCounterBuffers << "\n" + << "MaxFragmentAtomicCounterBuffers " << DefaultTBuiltInResource.maxFragmentAtomicCounterBuffers << "\n" + << "MaxCombinedAtomicCounterBuffers " << DefaultTBuiltInResource.maxCombinedAtomicCounterBuffers << "\n" + << "MaxAtomicCounterBufferSize " << DefaultTBuiltInResource.maxAtomicCounterBufferSize << "\n" + << "MaxTransformFeedbackBuffers " << DefaultTBuiltInResource.maxTransformFeedbackBuffers << "\n" + << "MaxTransformFeedbackInterleavedComponents " << DefaultTBuiltInResource.maxTransformFeedbackInterleavedComponents << "\n" + << "MaxCullDistances " << DefaultTBuiltInResource.maxCullDistances << "\n" + << "MaxCombinedClipAndCullDistances " << DefaultTBuiltInResource.maxCombinedClipAndCullDistances << "\n" + << "MaxSamples " << DefaultTBuiltInResource.maxSamples << "\n" + << "MaxMeshOutputVerticesNV " << DefaultTBuiltInResource.maxMeshOutputVerticesNV << "\n" + << "MaxMeshOutputPrimitivesNV " << DefaultTBuiltInResource.maxMeshOutputPrimitivesNV << "\n" + << "MaxMeshWorkGroupSizeX_NV " << DefaultTBuiltInResource.maxMeshWorkGroupSizeX_NV << "\n" + << "MaxMeshWorkGroupSizeY_NV " << DefaultTBuiltInResource.maxMeshWorkGroupSizeY_NV << "\n" + << "MaxMeshWorkGroupSizeZ_NV " << DefaultTBuiltInResource.maxMeshWorkGroupSizeZ_NV << "\n" + << "MaxTaskWorkGroupSizeX_NV " << DefaultTBuiltInResource.maxTaskWorkGroupSizeX_NV << "\n" + << "MaxTaskWorkGroupSizeY_NV " << DefaultTBuiltInResource.maxTaskWorkGroupSizeY_NV << "\n" + << "MaxTaskWorkGroupSizeZ_NV " << DefaultTBuiltInResource.maxTaskWorkGroupSizeZ_NV << "\n" + << "MaxMeshViewCountNV " << DefaultTBuiltInResource.maxMeshViewCountNV << "\n" + << "MaxDualSourceDrawBuffersEXT " << DefaultTBuiltInResource.maxDualSourceDrawBuffersEXT << "\n" + << "nonInductiveForLoops " << DefaultTBuiltInResource.limits.nonInductiveForLoops << "\n" + << "whileLoops " << DefaultTBuiltInResource.limits.whileLoops << "\n" + << "doWhileLoops " << DefaultTBuiltInResource.limits.doWhileLoops << "\n" + << "generalUniformIndexing " << DefaultTBuiltInResource.limits.generalUniformIndexing << "\n" + << "generalAttributeMatrixVectorIndexing " << DefaultTBuiltInResource.limits.generalAttributeMatrixVectorIndexing << "\n" + << "generalVaryingIndexing " << DefaultTBuiltInResource.limits.generalVaryingIndexing << "\n" + << "generalSamplerIndexing " << DefaultTBuiltInResource.limits.generalSamplerIndexing << "\n" + << "generalVariableIndexing " << DefaultTBuiltInResource.limits.generalVariableIndexing << "\n" + << "generalConstantMatrixVectorIndexing " << DefaultTBuiltInResource.limits.generalConstantMatrixVectorIndexing << "\n" + ; + + return ostream.str(); +} + +void DecodeResourceLimits(TBuiltInResource* resources, char* config) +{ + static const char* delims = " \t\n\r"; + + size_t pos = 0; + std::string configStr(config); + + while ((pos = configStr.find_first_not_of(delims, pos)) != std::string::npos) { + const size_t token_s = pos; + const size_t token_e = configStr.find_first_of(delims, token_s); + const size_t value_s = configStr.find_first_not_of(delims, token_e); + const size_t value_e = configStr.find_first_of(delims, value_s); + pos = value_e; + + // Faster to use compare(), but prefering readability. + const std::string tokenStr = configStr.substr(token_s, token_e-token_s); + const std::string valueStr = configStr.substr(value_s, value_e-value_s); + + if (value_s == std::string::npos || ! (valueStr[0] == '-' || isdigit(valueStr[0]))) { + printf("Error: '%s' bad .conf file. Each name must be followed by one number.\n", + valueStr.c_str()); + return; + } + + const int value = std::atoi(valueStr.c_str()); + + if (tokenStr == "MaxLights") + resources->maxLights = value; + else if (tokenStr == "MaxClipPlanes") + resources->maxClipPlanes = value; + else if (tokenStr == "MaxTextureUnits") + resources->maxTextureUnits = value; + else if (tokenStr == "MaxTextureCoords") + resources->maxTextureCoords = value; + else if (tokenStr == "MaxVertexAttribs") + resources->maxVertexAttribs = value; + else if (tokenStr == "MaxVertexUniformComponents") + resources->maxVertexUniformComponents = value; + else if (tokenStr == "MaxVaryingFloats") + resources->maxVaryingFloats = value; + else if (tokenStr == "MaxVertexTextureImageUnits") + resources->maxVertexTextureImageUnits = value; + else if (tokenStr == "MaxCombinedTextureImageUnits") + resources->maxCombinedTextureImageUnits = value; + else if (tokenStr == "MaxTextureImageUnits") + resources->maxTextureImageUnits = value; + else if (tokenStr == "MaxFragmentUniformComponents") + resources->maxFragmentUniformComponents = value; + else if (tokenStr == "MaxDrawBuffers") + resources->maxDrawBuffers = value; + else if (tokenStr == "MaxVertexUniformVectors") + resources->maxVertexUniformVectors = value; + else if (tokenStr == "MaxVaryingVectors") + resources->maxVaryingVectors = value; + else if (tokenStr == "MaxFragmentUniformVectors") + resources->maxFragmentUniformVectors = value; + else if (tokenStr == "MaxVertexOutputVectors") + resources->maxVertexOutputVectors = value; + else if (tokenStr == "MaxFragmentInputVectors") + resources->maxFragmentInputVectors = value; + else if (tokenStr == "MinProgramTexelOffset") + resources->minProgramTexelOffset = value; + else if (tokenStr == "MaxProgramTexelOffset") + resources->maxProgramTexelOffset = value; + else if (tokenStr == "MaxClipDistances") + resources->maxClipDistances = value; + else if (tokenStr == "MaxComputeWorkGroupCountX") + resources->maxComputeWorkGroupCountX = value; + else if (tokenStr == "MaxComputeWorkGroupCountY") + resources->maxComputeWorkGroupCountY = value; + else if (tokenStr == "MaxComputeWorkGroupCountZ") + resources->maxComputeWorkGroupCountZ = value; + else if (tokenStr == "MaxComputeWorkGroupSizeX") + resources->maxComputeWorkGroupSizeX = value; + else if (tokenStr == "MaxComputeWorkGroupSizeY") + resources->maxComputeWorkGroupSizeY = value; + else if (tokenStr == "MaxComputeWorkGroupSizeZ") + resources->maxComputeWorkGroupSizeZ = value; + else if (tokenStr == "MaxComputeUniformComponents") + resources->maxComputeUniformComponents = value; + else if (tokenStr == "MaxComputeTextureImageUnits") + resources->maxComputeTextureImageUnits = value; + else if (tokenStr == "MaxComputeImageUniforms") + resources->maxComputeImageUniforms = value; + else if (tokenStr == "MaxComputeAtomicCounters") + resources->maxComputeAtomicCounters = value; + else if (tokenStr == "MaxComputeAtomicCounterBuffers") + resources->maxComputeAtomicCounterBuffers = value; + else if (tokenStr == "MaxVaryingComponents") + resources->maxVaryingComponents = value; + else if (tokenStr == "MaxVertexOutputComponents") + resources->maxVertexOutputComponents = value; + else if (tokenStr == "MaxGeometryInputComponents") + resources->maxGeometryInputComponents = value; + else if (tokenStr == "MaxGeometryOutputComponents") + resources->maxGeometryOutputComponents = value; + else if (tokenStr == "MaxFragmentInputComponents") + resources->maxFragmentInputComponents = value; + else if (tokenStr == "MaxImageUnits") + resources->maxImageUnits = value; + else if (tokenStr == "MaxCombinedImageUnitsAndFragmentOutputs") + resources->maxCombinedImageUnitsAndFragmentOutputs = value; + else if (tokenStr == "MaxCombinedShaderOutputResources") + resources->maxCombinedShaderOutputResources = value; + else if (tokenStr == "MaxImageSamples") + resources->maxImageSamples = value; + else if (tokenStr == "MaxVertexImageUniforms") + resources->maxVertexImageUniforms = value; + else if (tokenStr == "MaxTessControlImageUniforms") + resources->maxTessControlImageUniforms = value; + else if (tokenStr == "MaxTessEvaluationImageUniforms") + resources->maxTessEvaluationImageUniforms = value; + else if (tokenStr == "MaxGeometryImageUniforms") + resources->maxGeometryImageUniforms = value; + else if (tokenStr == "MaxFragmentImageUniforms") + resources->maxFragmentImageUniforms = value; + else if (tokenStr == "MaxCombinedImageUniforms") + resources->maxCombinedImageUniforms = value; + else if (tokenStr == "MaxGeometryTextureImageUnits") + resources->maxGeometryTextureImageUnits = value; + else if (tokenStr == "MaxGeometryOutputVertices") + resources->maxGeometryOutputVertices = value; + else if (tokenStr == "MaxGeometryTotalOutputComponents") + resources->maxGeometryTotalOutputComponents = value; + else if (tokenStr == "MaxGeometryUniformComponents") + resources->maxGeometryUniformComponents = value; + else if (tokenStr == "MaxGeometryVaryingComponents") + resources->maxGeometryVaryingComponents = value; + else if (tokenStr == "MaxTessControlInputComponents") + resources->maxTessControlInputComponents = value; + else if (tokenStr == "MaxTessControlOutputComponents") + resources->maxTessControlOutputComponents = value; + else if (tokenStr == "MaxTessControlTextureImageUnits") + resources->maxTessControlTextureImageUnits = value; + else if (tokenStr == "MaxTessControlUniformComponents") + resources->maxTessControlUniformComponents = value; + else if (tokenStr == "MaxTessControlTotalOutputComponents") + resources->maxTessControlTotalOutputComponents = value; + else if (tokenStr == "MaxTessEvaluationInputComponents") + resources->maxTessEvaluationInputComponents = value; + else if (tokenStr == "MaxTessEvaluationOutputComponents") + resources->maxTessEvaluationOutputComponents = value; + else if (tokenStr == "MaxTessEvaluationTextureImageUnits") + resources->maxTessEvaluationTextureImageUnits = value; + else if (tokenStr == "MaxTessEvaluationUniformComponents") + resources->maxTessEvaluationUniformComponents = value; + else if (tokenStr == "MaxTessPatchComponents") + resources->maxTessPatchComponents = value; + else if (tokenStr == "MaxPatchVertices") + resources->maxPatchVertices = value; + else if (tokenStr == "MaxTessGenLevel") + resources->maxTessGenLevel = value; + else if (tokenStr == "MaxViewports") + resources->maxViewports = value; + else if (tokenStr == "MaxVertexAtomicCounters") + resources->maxVertexAtomicCounters = value; + else if (tokenStr == "MaxTessControlAtomicCounters") + resources->maxTessControlAtomicCounters = value; + else if (tokenStr == "MaxTessEvaluationAtomicCounters") + resources->maxTessEvaluationAtomicCounters = value; + else if (tokenStr == "MaxGeometryAtomicCounters") + resources->maxGeometryAtomicCounters = value; + else if (tokenStr == "MaxFragmentAtomicCounters") + resources->maxFragmentAtomicCounters = value; + else if (tokenStr == "MaxCombinedAtomicCounters") + resources->maxCombinedAtomicCounters = value; + else if (tokenStr == "MaxAtomicCounterBindings") + resources->maxAtomicCounterBindings = value; + else if (tokenStr == "MaxVertexAtomicCounterBuffers") + resources->maxVertexAtomicCounterBuffers = value; + else if (tokenStr == "MaxTessControlAtomicCounterBuffers") + resources->maxTessControlAtomicCounterBuffers = value; + else if (tokenStr == "MaxTessEvaluationAtomicCounterBuffers") + resources->maxTessEvaluationAtomicCounterBuffers = value; + else if (tokenStr == "MaxGeometryAtomicCounterBuffers") + resources->maxGeometryAtomicCounterBuffers = value; + else if (tokenStr == "MaxFragmentAtomicCounterBuffers") + resources->maxFragmentAtomicCounterBuffers = value; + else if (tokenStr == "MaxCombinedAtomicCounterBuffers") + resources->maxCombinedAtomicCounterBuffers = value; + else if (tokenStr == "MaxAtomicCounterBufferSize") + resources->maxAtomicCounterBufferSize = value; + else if (tokenStr == "MaxTransformFeedbackBuffers") + resources->maxTransformFeedbackBuffers = value; + else if (tokenStr == "MaxTransformFeedbackInterleavedComponents") + resources->maxTransformFeedbackInterleavedComponents = value; + else if (tokenStr == "MaxCullDistances") + resources->maxCullDistances = value; + else if (tokenStr == "MaxCombinedClipAndCullDistances") + resources->maxCombinedClipAndCullDistances = value; + else if (tokenStr == "MaxSamples") + resources->maxSamples = value; + else if (tokenStr == "MaxMeshOutputVerticesNV") + resources->maxMeshOutputVerticesNV = value; + else if (tokenStr == "MaxMeshOutputPrimitivesNV") + resources->maxMeshOutputPrimitivesNV = value; + else if (tokenStr == "MaxMeshWorkGroupSizeX_NV") + resources->maxMeshWorkGroupSizeX_NV = value; + else if (tokenStr == "MaxMeshWorkGroupSizeY_NV") + resources->maxMeshWorkGroupSizeY_NV = value; + else if (tokenStr == "MaxMeshWorkGroupSizeZ_NV") + resources->maxMeshWorkGroupSizeZ_NV = value; + else if (tokenStr == "MaxTaskWorkGroupSizeX_NV") + resources->maxTaskWorkGroupSizeX_NV = value; + else if (tokenStr == "MaxTaskWorkGroupSizeY_NV") + resources->maxTaskWorkGroupSizeY_NV = value; + else if (tokenStr == "MaxTaskWorkGroupSizeZ_NV") + resources->maxTaskWorkGroupSizeZ_NV = value; + else if (tokenStr == "MaxMeshViewCountNV") + resources->maxMeshViewCountNV = value; + else if (tokenStr == "nonInductiveForLoops") + resources->limits.nonInductiveForLoops = (value != 0); + else if (tokenStr == "whileLoops") + resources->limits.whileLoops = (value != 0); + else if (tokenStr == "doWhileLoops") + resources->limits.doWhileLoops = (value != 0); + else if (tokenStr == "generalUniformIndexing") + resources->limits.generalUniformIndexing = (value != 0); + else if (tokenStr == "generalAttributeMatrixVectorIndexing") + resources->limits.generalAttributeMatrixVectorIndexing = (value != 0); + else if (tokenStr == "generalVaryingIndexing") + resources->limits.generalVaryingIndexing = (value != 0); + else if (tokenStr == "generalSamplerIndexing") + resources->limits.generalSamplerIndexing = (value != 0); + else if (tokenStr == "generalVariableIndexing") + resources->limits.generalVariableIndexing = (value != 0); + else if (tokenStr == "generalConstantMatrixVectorIndexing") + resources->limits.generalConstantMatrixVectorIndexing = (value != 0); + else + printf("Warning: unrecognized limit (%s) in configuration file.\n", tokenStr.c_str()); + + } +} + +} // end namespace glslang diff --git a/dep/glslang/StandAlone/ResourceLimits.h b/dep/glslang/StandAlone/ResourceLimits.h new file mode 100644 index 000000000..736248eb3 --- /dev/null +++ b/dep/glslang/StandAlone/ResourceLimits.h @@ -0,0 +1,57 @@ +// +// Copyright (C) 2016 Google, Inc. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// +// Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// +// Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. + +#ifndef _STAND_ALONE_RESOURCE_LIMITS_INCLUDED_ +#define _STAND_ALONE_RESOURCE_LIMITS_INCLUDED_ + +#include + +#include "../glslang/Include/ResourceLimits.h" + +namespace glslang { + +// These are the default resources for TBuiltInResources, used for both +// - parsing this string for the case where the user didn't supply one, +// - dumping out a template for user construction of a config file. +extern const TBuiltInResource DefaultTBuiltInResource; + +// Returns the DefaultTBuiltInResource as a human-readable string. +std::string GetDefaultTBuiltInResourceString(); + +// Decodes the resource limits from |config| to |resources|. +void DecodeResourceLimits(TBuiltInResource* resources, char* config); + +} // end namespace glslang + +#endif // _STAND_ALONE_RESOURCE_LIMITS_INCLUDED_ diff --git a/dep/glslang/StandAlone/Worklist.h b/dep/glslang/StandAlone/Worklist.h new file mode 100644 index 000000000..91b6f516b --- /dev/null +++ b/dep/glslang/StandAlone/Worklist.h @@ -0,0 +1,95 @@ +// +// Copyright (C) 2013 LunarG, Inc. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// +// Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// +// Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// Neither the name of 3Dlabs Inc. Ltd. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// +#ifndef WORKLIST_H_INCLUDED +#define WORKLIST_H_INCLUDED + +#include "../glslang/OSDependent/osinclude.h" +#include +#include +#include + +namespace glslang { + + class TWorkItem { + public: + TWorkItem() { } + explicit TWorkItem(const std::string& s) : + name(s) { } + std::string name; + std::string results; + std::string resultsIndex; + }; + + class TWorklist { + public: + TWorklist() { } + virtual ~TWorklist() { } + + void add(TWorkItem* item) + { + std::lock_guard guard(mutex); + worklist.push_back(item); + } + + bool remove(TWorkItem*& item) + { + std::lock_guard guard(mutex); + + if (worklist.empty()) + return false; + item = worklist.front(); + worklist.pop_front(); + + return true; + } + + int size() + { + return (int)worklist.size(); + } + + bool empty() + { + return worklist.empty(); + } + + protected: + std::mutex mutex; + std::list worklist; + }; + +} // end namespace glslang + +#endif // WORKLIST_H_INCLUDED diff --git a/dep/glslang/StandAlone/resource_limits_c.cpp b/dep/glslang/StandAlone/resource_limits_c.cpp new file mode 100644 index 000000000..a1f681c7b --- /dev/null +++ b/dep/glslang/StandAlone/resource_limits_c.cpp @@ -0,0 +1,65 @@ +/** +BSD 2-Clause License + +Copyright (c) 2020, Travis Fort +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +**/ + +#include "resource_limits_c.h" +#include "ResourceLimits.h" +#include +#include +#include + +const glslang_resource_t* glslang_default_resource(void) +{ + return reinterpret_cast(&glslang::DefaultTBuiltInResource); +} + +#if defined(__clang__) || defined(__GNUC__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wdeprecated-declarations" +#elif defined(_MSC_VER) +#pragma warning(push) +#pragma warning(disable : 4996) +#endif + +const char* glslang_default_resource_string() +{ + std::string cpp_str = glslang::GetDefaultTBuiltInResourceString(); + char* c_str = (char*)malloc(cpp_str.length() + 1); + strcpy(c_str, cpp_str.c_str()); + return c_str; +} + +#if defined(__clang__) || defined(__GNUC__) +#pragma GCC diagnostic pop +#elif defined(_MSC_VER) +#pragma warning(pop) +#endif + +void glslang_decode_resource_limits(glslang_resource_t* resources, char* config) +{ + glslang::DecodeResourceLimits(reinterpret_cast(resources), config); +} diff --git a/dep/glslang/StandAlone/resource_limits_c.h b/dep/glslang/StandAlone/resource_limits_c.h new file mode 100644 index 000000000..108fd5e21 --- /dev/null +++ b/dep/glslang/StandAlone/resource_limits_c.h @@ -0,0 +1,54 @@ +/** +BSD 2-Clause License + +Copyright (c) 2020, Travis Fort +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +**/ + +#ifndef _STAND_ALONE_RESOURCE_LIMITS_C_INCLUDED_ +#define _STAND_ALONE_RESOURCE_LIMITS_C_INCLUDED_ + +#include "../glslang/Include/glslang_c_interface.h" + +#ifdef __cplusplus +extern "C" { +#endif + +// These are the default resources for TBuiltInResources, used for both +// - parsing this string for the case where the user didn't supply one, +// - dumping out a template for user construction of a config file. +const glslang_resource_t* glslang_default_resource(void); + +// Returns the DefaultTBuiltInResource as a human-readable string. +// NOTE: User is responsible for freeing this string. +const char* glslang_default_resource_string(); + +// Decodes the resource limits from |config| to |resources|. +void glslang_decode_resource_limits(glslang_resource_t* resources, char* config); + +#ifdef __cplusplus +} +#endif + +#endif // _STAND_ALONE_RESOURCE_LIMITS_C_INCLUDED_ diff --git a/dep/glslang/glslang.vcxproj b/dep/glslang/glslang.vcxproj new file mode 100644 index 000000000..2d7ca8447 --- /dev/null +++ b/dep/glslang/glslang.vcxproj @@ -0,0 +1,470 @@ + + + + + DebugFast + Win32 + + + DebugFast + x64 + + + Debug + Win32 + + + Debug + x64 + + + ReleaseLTCG + Win32 + + + ReleaseLTCG + x64 + + + Release + Win32 + + + Release + x64 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + {7F909E29-4808-4BD9-A60C-56C51A3AAEC2} + Win32Proj + glslang + 10.0 + + + + StaticLibrary + true + v142 + NotSet + + + StaticLibrary + true + v142 + NotSet + + + StaticLibrary + true + v142 + NotSet + + + StaticLibrary + true + v142 + NotSet + + + StaticLibrary + false + v142 + true + NotSet + false + + + StaticLibrary + false + v142 + true + NotSet + false + + + StaticLibrary + false + v142 + true + NotSet + false + + + StaticLibrary + false + v142 + true + NotSet + false + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + true + $(SolutionDir)build\$(ProjectName)-$(Platform)-$(Configuration)\ + $(SolutionDir)build\$(ProjectName)-$(Platform)-$(Configuration)\ + $(ProjectName)-$(Platform)-$(Configuration) + + + $(SolutionDir)build\$(ProjectName)-$(Platform)-$(Configuration)\ + $(ProjectName)-$(Platform)-$(Configuration) + true + $(SolutionDir)build\$(ProjectName)-$(Platform)-$(Configuration)\ + + + true + $(SolutionDir)build\$(ProjectName)-$(Platform)-$(Configuration)\ + $(SolutionDir)build\$(ProjectName)-$(Platform)-$(Configuration)\ + $(ProjectName)-$(Platform)-$(Configuration) + + + $(SolutionDir)build\$(ProjectName)-$(Platform)-$(Configuration)\ + $(ProjectName)-$(Platform)-$(Configuration) + true + $(SolutionDir)build\$(ProjectName)-$(Platform)-$(Configuration)\ + + + false + $(SolutionDir)build\$(ProjectName)-$(Platform)-$(Configuration)\ + $(SolutionDir)build\$(ProjectName)-$(Platform)-$(Configuration)\ + $(ProjectName)-$(Platform)-$(Configuration) + + + false + $(SolutionDir)build\$(ProjectName)-$(Platform)-$(Configuration)\ + $(SolutionDir)build\$(ProjectName)-$(Platform)-$(Configuration)\ + $(ProjectName)-$(Platform)-$(Configuration) + + + $(SolutionDir)build\$(ProjectName)-$(Platform)-$(Configuration)\ + $(ProjectName)-$(Platform)-$(Configuration) + false + $(SolutionDir)build\$(ProjectName)-$(Platform)-$(Configuration)\ + + + $(SolutionDir)build\$(ProjectName)-$(Platform)-$(Configuration)\ + $(ProjectName)-$(Platform)-$(Configuration) + false + $(SolutionDir)build\$(ProjectName)-$(Platform)-$(Configuration)\ + + + + + + TurnOffAllWarnings + Disabled + _CRT_NONSTDC_NO_DEPRECATE;_CRT_SECURE_NO_WARNINGS;WIN32;_DEBUG;_CONSOLE;_LIB;%(PreprocessorDefinitions) + true + ProgramDatabase + $(ProjectDir);%(AdditionalIncludeDirectories) + false + stdcpp14 + true + + + Windows + true + SDL2.lib;SDL2main.lib;%(AdditionalDependencies) + $(SolutionDir)dep\lib32-debug;%(AdditionalLibraryDirectories) + + + + + + + + TurnOffAllWarnings + Disabled + _CRT_NONSTDC_NO_DEPRECATE;_CRT_SECURE_NO_WARNINGS;WIN32;_DEBUG;_CONSOLE;_LIB;%(PreprocessorDefinitions) + true + ProgramDatabase + $(ProjectDir);%(AdditionalIncludeDirectories) + false + stdcpp14 + true + + + Windows + true + SDL2.lib;SDL2main.lib;%(AdditionalDependencies) + $(SolutionDir)dep\lib64-debug;%(AdditionalLibraryDirectories) + + + + + + + + TurnOffAllWarnings + Disabled + _CRT_NONSTDC_NO_DEPRECATE;_ITERATOR_DEBUG_LEVEL=1;_CRT_SECURE_NO_WARNINGS;WIN32;_DEBUGFAST;_DEBUG;_CONSOLE;_LIB;%(PreprocessorDefinitions) + true + ProgramDatabase + $(ProjectDir);%(AdditionalIncludeDirectories) + Default + false + stdcpp14 + false + true + OnlyExplicitInline + + + Windows + true + SDL2.lib;SDL2main.lib;%(AdditionalDependencies) + $(SolutionDir)dep\lib32-debug;%(AdditionalLibraryDirectories) + + + + + + + + TurnOffAllWarnings + Disabled + _CRT_NONSTDC_NO_DEPRECATE;_ITERATOR_DEBUG_LEVEL=1;_CRT_SECURE_NO_WARNINGS;WIN32;_DEBUGFAST;_DEBUG;_CONSOLE;_LIB;%(PreprocessorDefinitions) + true + ProgramDatabase + $(ProjectDir);%(AdditionalIncludeDirectories) + Default + false + stdcpp14 + false + true + OnlyExplicitInline + + + Windows + true + SDL2.lib;SDL2main.lib;%(AdditionalDependencies) + $(SolutionDir)dep\lib64-debug;%(AdditionalLibraryDirectories) + + + + + + TurnOffAllWarnings + + + MaxSpeed + true + _CRT_NONSTDC_NO_DEPRECATE;_CRT_SECURE_NO_WARNINGS;WIN32;NDEBUG;_CONSOLE;_LIB;%(PreprocessorDefinitions) + $(ProjectDir);%(AdditionalIncludeDirectories) + false + stdcpp14 + true + + + Windows + true + true + true + SDL2.lib;SDL2main.lib;%(AdditionalDependencies) + $(SolutionDir)dep\lib32;%(AdditionalLibraryDirectories) + + + + + + TurnOffAllWarnings + + + MaxSpeed + true + _CRT_NONSTDC_NO_DEPRECATE;_CRT_SECURE_NO_WARNINGS;WIN32;NDEBUG;_CONSOLE;_LIB;%(PreprocessorDefinitions) + $(ProjectDir);%(AdditionalIncludeDirectories) + true + stdcpp14 + true + true + + + Windows + true + true + true + SDL2.lib;SDL2main.lib;%(AdditionalDependencies) + $(SolutionDir)dep\lib32;%(AdditionalLibraryDirectories) + + + + + + TurnOffAllWarnings + + + MaxSpeed + true + _CRT_NONSTDC_NO_DEPRECATE;_CRT_SECURE_NO_WARNINGS;WIN32;NDEBUG;_CONSOLE;_LIB;%(PreprocessorDefinitions) + $(ProjectDir);%(AdditionalIncludeDirectories) + false + stdcpp14 + true + + + Windows + true + true + true + SDL2.lib;SDL2main.lib;%(AdditionalDependencies) + $(SolutionDir)dep\lib64;%(AdditionalLibraryDirectories) + + + + + + TurnOffAllWarnings + + + MaxSpeed + true + _CRT_NONSTDC_NO_DEPRECATE;_CRT_SECURE_NO_WARNINGS;WIN32;NDEBUG;_CONSOLE;_LIB;%(PreprocessorDefinitions) + $(ProjectDir);%(AdditionalIncludeDirectories) + true + stdcpp14 + true + true + + + Windows + true + true + true + SDL2.lib;SDL2main.lib;%(AdditionalDependencies) + $(SolutionDir)dep\lib64;%(AdditionalLibraryDirectories) + + + + + + + \ No newline at end of file diff --git a/dep/glslang/glslang.vcxproj.filters b/dep/glslang/glslang.vcxproj.filters new file mode 100644 index 000000000..37c2deaa2 --- /dev/null +++ b/dep/glslang/glslang.vcxproj.filters @@ -0,0 +1,106 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/dep/glslang/glslang/CInterface/glslang_c_interface.cpp b/dep/glslang/glslang/CInterface/glslang_c_interface.cpp new file mode 100644 index 000000000..c4c24a9e4 --- /dev/null +++ b/dep/glslang/glslang/CInterface/glslang_c_interface.cpp @@ -0,0 +1,461 @@ +/** + This code is based on the glslang_c_interface implementation by Viktor Latypov +**/ + +/** +BSD 2-Clause License + +Copyright (c) 2019, Viktor Latypov +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +**/ + +#include "glslang/Include/glslang_c_interface.h" + +#include "SPIRV/GlslangToSpv.h" +#include "SPIRV/Logger.h" +#include "SPIRV/SpvTools.h" +#include "StandAlone/DirStackFileIncluder.h" +#include "StandAlone/ResourceLimits.h" +#include "glslang/Include/ShHandle.h" + +#include "glslang/Include/ResourceLimits.h" +#include "glslang/MachineIndependent/Versions.h" + +static_assert(int(GLSLANG_STAGE_COUNT) == EShLangCount, ""); +static_assert(int(GLSLANG_STAGE_MASK_COUNT) == EShLanguageMaskCount, ""); +static_assert(int(GLSLANG_SOURCE_COUNT) == glslang::EShSourceCount, ""); +static_assert(int(GLSLANG_CLIENT_COUNT) == glslang::EShClientCount, ""); +static_assert(int(GLSLANG_TARGET_COUNT) == glslang::EShTargetCount, ""); +static_assert(int(GLSLANG_TARGET_CLIENT_VERSION_COUNT) == glslang::EShTargetClientVersionCount, ""); +static_assert(int(GLSLANG_TARGET_LANGUAGE_VERSION_COUNT) == glslang::EShTargetLanguageVersionCount, ""); +static_assert(int(GLSLANG_OPT_LEVEL_COUNT) == EshOptLevelCount, ""); +static_assert(int(GLSLANG_TEX_SAMP_TRANS_COUNT) == EShTexSampTransCount, ""); +static_assert(int(GLSLANG_MSG_COUNT) == EShMsgCount, ""); +static_assert(int(GLSLANG_REFLECTION_COUNT) == EShReflectionCount, ""); +static_assert(int(GLSLANG_PROFILE_COUNT) == EProfileCount, ""); +static_assert(sizeof(glslang_limits_t) == sizeof(TLimits), ""); +static_assert(sizeof(glslang_resource_t) == sizeof(TBuiltInResource), ""); + +typedef struct glslang_shader_s { + glslang::TShader* shader; + std::string preprocessedGLSL; +} glslang_shader_t; + +typedef struct glslang_program_s { + glslang::TProgram* program; + std::vector spirv; + std::string loggerMessages; +} glslang_program_t; + +/* Wrapper/Adapter for C glsl_include_callbacks_t functions + + This class contains a 'glsl_include_callbacks_t' structure + with C include_local/include_system callback pointers. + + This class implement TShader::Includer interface + by redirecting C++ virtual methods to C callbacks. + + The 'IncludeResult' instances produced by this Includer + contain a reference to glsl_include_result_t C structure + to allow its lifetime management by another C callback + (CallbackIncluder::callbacks::free_include_result) +*/ +class CallbackIncluder : public glslang::TShader::Includer { +public: + /* Wrapper of IncludeResult which stores a glsl_include_result object internally */ + class CallbackIncludeResult : public glslang::TShader::Includer::IncludeResult { + public: + CallbackIncludeResult(const std::string& headerName, const char* const headerData, const size_t headerLength, + void* userData, glsl_include_result_t* includeResult) + : glslang::TShader::Includer::IncludeResult(headerName, headerData, headerLength, userData), + includeResult(includeResult) + { + } + + virtual ~CallbackIncludeResult() {} + + protected: + friend class CallbackIncluder; + + glsl_include_result_t* includeResult; + }; + +public: + CallbackIncluder(glsl_include_callbacks_t _callbacks, void* _context) : callbacks(_callbacks), context(_context) {} + + virtual ~CallbackIncluder() {} + + virtual IncludeResult* includeSystem(const char* headerName, const char* includerName, + size_t inclusionDepth) override + { + if (this->callbacks.include_system) { + glsl_include_result_t* result = + this->callbacks.include_system(this->context, headerName, includerName, inclusionDepth); + + return new CallbackIncludeResult(std::string(headerName), result->header_data, result->header_length, + nullptr, result); + } + + return glslang::TShader::Includer::includeSystem(headerName, includerName, inclusionDepth); + } + + virtual IncludeResult* includeLocal(const char* headerName, const char* includerName, + size_t inclusionDepth) override + { + if (this->callbacks.include_local) { + glsl_include_result_t* result = + this->callbacks.include_local(this->context, headerName, includerName, inclusionDepth); + + return new CallbackIncludeResult(std::string(headerName), result->header_data, result->header_length, + nullptr, result); + } + + return glslang::TShader::Includer::includeLocal(headerName, includerName, inclusionDepth); + } + + /* This function only calls free_include_result callback + when the IncludeResult instance is allocated by a C function */ + virtual void releaseInclude(IncludeResult* result) override + { + if (result == nullptr) + return; + + if (this->callbacks.free_include_result && (result->userData == nullptr)) { + CallbackIncludeResult* innerResult = static_cast(result); + /* use internal free() function */ + this->callbacks.free_include_result(this->context, innerResult->includeResult); + /* ignore internal fields of TShader::Includer::IncludeResult */ + delete result; + return; + } + + delete[] static_cast(result->userData); + delete result; + } + +private: + CallbackIncluder() {} + + /* C callback pointers */ + glsl_include_callbacks_t callbacks; + /* User-defined context */ + void* context; +}; + +int glslang_initialize_process() { return static_cast(glslang::InitializeProcess()); } + +void glslang_finalize_process() { glslang::FinalizeProcess(); } + +static EShLanguage c_shader_stage(glslang_stage_t stage) +{ + switch (stage) { + case GLSLANG_STAGE_VERTEX: + return EShLangVertex; + case GLSLANG_STAGE_TESSCONTROL: + return EShLangTessControl; + case GLSLANG_STAGE_TESSEVALUATION: + return EShLangTessEvaluation; + case GLSLANG_STAGE_GEOMETRY: + return EShLangGeometry; + case GLSLANG_STAGE_FRAGMENT: + return EShLangFragment; + case GLSLANG_STAGE_COMPUTE: + return EShLangCompute; + case GLSLANG_STAGE_RAYGEN_NV: + return EShLangRayGen; + case GLSLANG_STAGE_INTERSECT_NV: + return EShLangIntersect; + case GLSLANG_STAGE_ANYHIT_NV: + return EShLangAnyHit; + case GLSLANG_STAGE_CLOSESTHIT_NV: + return EShLangClosestHit; + case GLSLANG_STAGE_MISS_NV: + return EShLangMiss; + case GLSLANG_STAGE_CALLABLE_NV: + return EShLangCallable; + case GLSLANG_STAGE_TASK_NV: + return EShLangTaskNV; + case GLSLANG_STAGE_MESH_NV: + return EShLangMeshNV; + default: + break; + } + return EShLangCount; +} + +static int c_shader_messages(glslang_messages_t messages) +{ +#define CONVERT_MSG(in, out) \ + if ((messages & in) == in) \ + res |= out; + + int res = 0; + + CONVERT_MSG(GLSLANG_MSG_RELAXED_ERRORS_BIT, EShMsgRelaxedErrors); + CONVERT_MSG(GLSLANG_MSG_SUPPRESS_WARNINGS_BIT, EShMsgSuppressWarnings); + CONVERT_MSG(GLSLANG_MSG_AST_BIT, EShMsgAST); + CONVERT_MSG(GLSLANG_MSG_SPV_RULES_BIT, EShMsgSpvRules); + CONVERT_MSG(GLSLANG_MSG_VULKAN_RULES_BIT, EShMsgVulkanRules); + CONVERT_MSG(GLSLANG_MSG_ONLY_PREPROCESSOR_BIT, EShMsgOnlyPreprocessor); + CONVERT_MSG(GLSLANG_MSG_READ_HLSL_BIT, EShMsgReadHlsl); + CONVERT_MSG(GLSLANG_MSG_CASCADING_ERRORS_BIT, EShMsgCascadingErrors); + CONVERT_MSG(GLSLANG_MSG_KEEP_UNCALLED_BIT, EShMsgKeepUncalled); + CONVERT_MSG(GLSLANG_MSG_HLSL_OFFSETS_BIT, EShMsgHlslOffsets); + CONVERT_MSG(GLSLANG_MSG_DEBUG_INFO_BIT, EShMsgDebugInfo); + CONVERT_MSG(GLSLANG_MSG_HLSL_ENABLE_16BIT_TYPES_BIT, EShMsgHlslEnable16BitTypes); + CONVERT_MSG(GLSLANG_MSG_HLSL_LEGALIZATION_BIT, EShMsgHlslLegalization); + CONVERT_MSG(GLSLANG_MSG_HLSL_DX9_COMPATIBLE_BIT, EShMsgHlslDX9Compatible); + CONVERT_MSG(GLSLANG_MSG_BUILTIN_SYMBOL_TABLE_BIT, EShMsgBuiltinSymbolTable); + return res; +#undef CONVERT_MSG +} + +static glslang::EShTargetLanguageVersion +c_shader_target_language_version(glslang_target_language_version_t target_language_version) +{ + switch (target_language_version) { + case GLSLANG_TARGET_SPV_1_0: + return glslang::EShTargetSpv_1_0; + case GLSLANG_TARGET_SPV_1_1: + return glslang::EShTargetSpv_1_1; + case GLSLANG_TARGET_SPV_1_2: + return glslang::EShTargetSpv_1_2; + case GLSLANG_TARGET_SPV_1_3: + return glslang::EShTargetSpv_1_3; + case GLSLANG_TARGET_SPV_1_4: + return glslang::EShTargetSpv_1_4; + case GLSLANG_TARGET_SPV_1_5: + return glslang::EShTargetSpv_1_5; + default: + break; + } + return glslang::EShTargetSpv_1_0; +} + +static glslang::EShClient c_shader_client(glslang_client_t client) +{ + switch (client) { + case GLSLANG_CLIENT_VULKAN: + return glslang::EShClientVulkan; + case GLSLANG_CLIENT_OPENGL: + return glslang::EShClientOpenGL; + default: + break; + } + + return glslang::EShClientNone; +} + +static glslang::EShTargetClientVersion c_shader_client_version(glslang_target_client_version_t client_version) +{ + switch (client_version) { + case GLSLANG_TARGET_VULKAN_1_1: + return glslang::EShTargetVulkan_1_1; + case GLSLANG_TARGET_OPENGL_450: + return glslang::EShTargetOpenGL_450; + default: + break; + } + + return glslang::EShTargetVulkan_1_0; +} + +static glslang::EShTargetLanguage c_shader_target_language(glslang_target_language_t target_language) +{ + if (target_language == GLSLANG_TARGET_NONE) + return glslang::EShTargetNone; + + return glslang::EShTargetSpv; +} + +static glslang::EShSource c_shader_source(glslang_source_t source) +{ + switch (source) { + case GLSLANG_SOURCE_GLSL: + return glslang::EShSourceGlsl; + case GLSLANG_SOURCE_HLSL: + return glslang::EShSourceHlsl; + default: + break; + } + + return glslang::EShSourceNone; +} + +static EProfile c_shader_profile(glslang_profile_t profile) +{ + switch (profile) { + case GLSLANG_BAD_PROFILE: + return EBadProfile; + case GLSLANG_NO_PROFILE: + return ENoProfile; + case GLSLANG_CORE_PROFILE: + return ECoreProfile; + case GLSLANG_COMPATIBILITY_PROFILE: + return ECompatibilityProfile; + case GLSLANG_ES_PROFILE: + return EEsProfile; + case GLSLANG_PROFILE_COUNT: // Should not use this + break; + } + + return EProfile(); +} + +glslang_shader_t* glslang_shader_create(const glslang_input_t* input) +{ + if (!input || !input->code) { + printf("Error creating shader: null input(%p)/input->code\n", input); + + if (input) + printf("input->code = %p\n", input->code); + + return nullptr; + } + + glslang_shader_t* shader = new glslang_shader_t(); + + shader->shader = new glslang::TShader(c_shader_stage(input->stage)); + shader->shader->setStrings(&input->code, 1); + shader->shader->setEnvInput(c_shader_source(input->language), c_shader_stage(input->stage), + c_shader_client(input->client), input->default_version); + shader->shader->setEnvClient(c_shader_client(input->client), c_shader_client_version(input->client_version)); + shader->shader->setEnvTarget(c_shader_target_language(input->target_language), + c_shader_target_language_version(input->target_language_version)); + + return shader; +} + +const char* glslang_shader_get_preprocessed_code(glslang_shader_t* shader) +{ + return shader->preprocessedGLSL.c_str(); +} + +int glslang_shader_preprocess(glslang_shader_t* shader, const glslang_input_t* input) +{ + DirStackFileIncluder Includer; + /* TODO: use custom callbacks if they are available in 'i->callbacks' */ + return shader->shader->preprocess( + reinterpret_cast(input->resource), + input->default_version, + c_shader_profile(input->default_profile), + input->force_default_version_and_profile != 0, + input->forward_compatible != 0, + (EShMessages)c_shader_messages(input->messages), + &shader->preprocessedGLSL, + Includer + ); +} + +int glslang_shader_parse(glslang_shader_t* shader, const glslang_input_t* input) +{ + const char* preprocessedCStr = shader->preprocessedGLSL.c_str(); + shader->shader->setStrings(&preprocessedCStr, 1); + + return shader->shader->parse( + reinterpret_cast(input->resource), + input->default_version, + input->forward_compatible != 0, + (EShMessages)c_shader_messages(input->messages) + ); +} + +const char* glslang_shader_get_info_log(glslang_shader_t* shader) { return shader->shader->getInfoLog(); } + +const char* glslang_shader_get_info_debug_log(glslang_shader_t* shader) { return shader->shader->getInfoDebugLog(); } + +void glslang_shader_delete(glslang_shader_t* shader) +{ + if (!shader) + return; + + delete (shader->shader); + delete (shader); +} + +glslang_program_t* glslang_program_create() +{ + glslang_program_t* p = new glslang_program_t(); + p->program = new glslang::TProgram(); + return p; +} + +void glslang_program_SPIRV_generate(glslang_program_t* program, glslang_stage_t stage) +{ + spv::SpvBuildLogger logger; + glslang::SpvOptions spvOptions; + spvOptions.validate = true; + + const glslang::TIntermediate* intermediate = program->program->getIntermediate(c_shader_stage(stage)); + + glslang::GlslangToSpv(*intermediate, program->spirv, &logger, &spvOptions); + + program->loggerMessages = logger.getAllMessages(); +} + +size_t glslang_program_SPIRV_get_size(glslang_program_t* program) { return program->spirv.size(); } + +void glslang_program_SPIRV_get(glslang_program_t* program, unsigned int* out) +{ + memcpy(out, program->spirv.data(), program->spirv.size() * sizeof(unsigned int)); +} + +unsigned int* glslang_program_SPIRV_get_ptr(glslang_program_t* program) +{ + return program->spirv.data(); +} + +const char* glslang_program_SPIRV_get_messages(glslang_program_t* program) +{ + return program->loggerMessages.empty() ? nullptr : program->loggerMessages.c_str(); +} + +void glslang_program_delete(glslang_program_t* program) +{ + if (!program) + return; + + delete (program->program); + delete (program); +} + +void glslang_program_add_shader(glslang_program_t* program, glslang_shader_t* shader) +{ + program->program->addShader(shader->shader); +} + +int glslang_program_link(glslang_program_t* program, int messages) +{ + return (int)program->program->link((EShMessages)messages); +} + +const char* glslang_program_get_info_log(glslang_program_t* program) +{ + return program->program->getInfoLog(); +} + +const char* glslang_program_get_info_debug_log(glslang_program_t* program) +{ + return program->program->getInfoDebugLog(); +} diff --git a/dep/glslang/glslang/GenericCodeGen/CodeGen.cpp b/dep/glslang/glslang/GenericCodeGen/CodeGen.cpp new file mode 100644 index 000000000..b3c7226df --- /dev/null +++ b/dep/glslang/glslang/GenericCodeGen/CodeGen.cpp @@ -0,0 +1,76 @@ +// +// Copyright (C) 2002-2005 3Dlabs Inc. Ltd. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// +// Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// +// Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// Neither the name of 3Dlabs Inc. Ltd. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// + +#include "../Include/Common.h" +#include "../Include/ShHandle.h" +#include "../MachineIndependent/Versions.h" + +// +// Here is where real machine specific high-level data would be defined. +// +class TGenericCompiler : public TCompiler { +public: + TGenericCompiler(EShLanguage l, int dOptions) : TCompiler(l, infoSink), debugOptions(dOptions) { } + virtual bool compile(TIntermNode* root, int version = 0, EProfile profile = ENoProfile); + TInfoSink infoSink; + int debugOptions; +}; + +// +// This function must be provided to create the actual +// compile object used by higher level code. It returns +// a subclass of TCompiler. +// +TCompiler* ConstructCompiler(EShLanguage language, int debugOptions) +{ + return new TGenericCompiler(language, debugOptions); +} + +// +// Delete the compiler made by ConstructCompiler +// +void DeleteCompiler(TCompiler* compiler) +{ + delete compiler; +} + +// +// Generate code from the given parse tree +// +bool TGenericCompiler::compile(TIntermNode* /*root*/, int /*version*/, EProfile /*profile*/) +{ + haveValidObjectCode = true; + + return haveValidObjectCode; +} diff --git a/dep/glslang/glslang/GenericCodeGen/Link.cpp b/dep/glslang/glslang/GenericCodeGen/Link.cpp new file mode 100644 index 000000000..c38db0f69 --- /dev/null +++ b/dep/glslang/glslang/GenericCodeGen/Link.cpp @@ -0,0 +1,91 @@ +// +// Copyright (C) 2002-2005 3Dlabs Inc. Ltd. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// +// Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// +// Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// Neither the name of 3Dlabs Inc. Ltd. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// + +// +// The top level algorithms for linking multiple +// shaders together. +// +#include "../Include/Common.h" +#include "../Include/ShHandle.h" + +// +// Actual link object, derived from the shader handle base classes. +// +class TGenericLinker : public TLinker { +public: + TGenericLinker(EShExecutable e, int dOptions) : TLinker(e, infoSink), debugOptions(dOptions) { } + bool link(TCompilerList&, TUniformMap*) { return true; } + void getAttributeBindings(ShBindingTable const **) const { } + TInfoSink infoSink; + int debugOptions; +}; + +// +// The internal view of a uniform/float object exchanged with the driver. +// +class TUniformLinkedMap : public TUniformMap { +public: + TUniformLinkedMap() { } + virtual int getLocation(const char*) { return 0; } +}; + +TShHandleBase* ConstructLinker(EShExecutable executable, int debugOptions) +{ + return new TGenericLinker(executable, debugOptions); +} + +void DeleteLinker(TShHandleBase* linker) +{ + delete linker; +} + +TUniformMap* ConstructUniformMap() +{ + return new TUniformLinkedMap(); +} + +void DeleteUniformMap(TUniformMap* map) +{ + delete map; +} + +TShHandleBase* ConstructBindings() +{ + return 0; +} + +void DeleteBindingList(TShHandleBase* bindingList) +{ + delete bindingList; +} diff --git a/dep/glslang/glslang/Include/BaseTypes.h b/dep/glslang/glslang/Include/BaseTypes.h new file mode 100644 index 000000000..b69eaebf2 --- /dev/null +++ b/dep/glslang/glslang/Include/BaseTypes.h @@ -0,0 +1,571 @@ +// +// Copyright (C) 2002-2005 3Dlabs Inc. Ltd. +// Copyright (C) 2012-2013 LunarG, Inc. +// Copyright (C) 2017 ARM Limited. +// Modifications Copyright (C) 2020 Advanced Micro Devices, Inc. All rights reserved. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// +// Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// +// Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// Neither the name of 3Dlabs Inc. Ltd. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// + +#ifndef _BASICTYPES_INCLUDED_ +#define _BASICTYPES_INCLUDED_ + +namespace glslang { + +// +// Basic type. Arrays, vectors, sampler details, etc., are orthogonal to this. +// +enum TBasicType { + EbtVoid, + EbtFloat, + EbtDouble, + EbtFloat16, + EbtInt8, + EbtUint8, + EbtInt16, + EbtUint16, + EbtInt, + EbtUint, + EbtInt64, + EbtUint64, + EbtBool, + EbtAtomicUint, + EbtSampler, + EbtStruct, + EbtBlock, + EbtAccStruct, + EbtReference, + EbtRayQuery, + + // HLSL types that live only temporarily. + EbtString, + + EbtNumTypes +}; + +// +// Storage qualifiers. Should align with different kinds of storage or +// resource or GLSL storage qualifier. Expansion is deprecated. +// +// N.B.: You probably DON'T want to add anything here, but rather just add it +// to the built-in variables. See the comment above TBuiltInVariable. +// +// A new built-in variable will normally be an existing qualifier, like 'in', 'out', etc. +// DO NOT follow the design pattern of, say EvqInstanceId, etc. +// +enum TStorageQualifier { + EvqTemporary, // For temporaries (within a function), read/write + EvqGlobal, // For globals read/write + EvqConst, // User-defined constant values, will be semantically constant and constant folded + EvqVaryingIn, // pipeline input, read only, also supercategory for all built-ins not included in this enum (see TBuiltInVariable) + EvqVaryingOut, // pipeline output, read/write, also supercategory for all built-ins not included in this enum (see TBuiltInVariable) + EvqUniform, // read only, shared with app + EvqBuffer, // read/write, shared with app + EvqShared, // compute shader's read/write 'shared' qualifier + + EvqPayload, + EvqPayloadIn, + EvqHitAttr, + EvqCallableData, + EvqCallableDataIn, + + // parameters + EvqIn, // also, for 'in' in the grammar before we know if it's a pipeline input or an 'in' parameter + EvqOut, // also, for 'out' in the grammar before we know if it's a pipeline output or an 'out' parameter + EvqInOut, + EvqConstReadOnly, // input; also other read-only types having neither a constant value nor constant-value semantics + + // built-ins read by vertex shader + EvqVertexId, + EvqInstanceId, + + // built-ins written by vertex shader + EvqPosition, + EvqPointSize, + EvqClipVertex, + + // built-ins read by fragment shader + EvqFace, + EvqFragCoord, + EvqPointCoord, + + // built-ins written by fragment shader + EvqFragColor, + EvqFragDepth, + + // end of list + EvqLast +}; + +// +// Subcategories of the TStorageQualifier, simply to give a direct mapping +// between built-in variable names and an numerical value (the enum). +// +// For backward compatibility, there is some redundancy between the +// TStorageQualifier and these. Existing members should both be maintained accurately. +// However, any new built-in variable (and any existing non-redundant one) +// must follow the pattern that the specific built-in is here, and only its +// general qualifier is in TStorageQualifier. +// +// Something like gl_Position, which is sometimes 'in' and sometimes 'out' +// shows up as two different built-in variables in a single stage, but +// only has a single enum in TBuiltInVariable, so both the +// TStorageQualifier and the TBuitinVariable are needed to distinguish +// between them. +// +enum TBuiltInVariable { + EbvNone, + EbvNumWorkGroups, + EbvWorkGroupSize, + EbvWorkGroupId, + EbvLocalInvocationId, + EbvGlobalInvocationId, + EbvLocalInvocationIndex, + EbvNumSubgroups, + EbvSubgroupID, + EbvSubGroupSize, + EbvSubGroupInvocation, + EbvSubGroupEqMask, + EbvSubGroupGeMask, + EbvSubGroupGtMask, + EbvSubGroupLeMask, + EbvSubGroupLtMask, + EbvSubgroupSize2, + EbvSubgroupInvocation2, + EbvSubgroupEqMask2, + EbvSubgroupGeMask2, + EbvSubgroupGtMask2, + EbvSubgroupLeMask2, + EbvSubgroupLtMask2, + EbvVertexId, + EbvInstanceId, + EbvVertexIndex, + EbvInstanceIndex, + EbvBaseVertex, + EbvBaseInstance, + EbvDrawId, + EbvPosition, + EbvPointSize, + EbvClipVertex, + EbvClipDistance, + EbvCullDistance, + EbvNormal, + EbvVertex, + EbvMultiTexCoord0, + EbvMultiTexCoord1, + EbvMultiTexCoord2, + EbvMultiTexCoord3, + EbvMultiTexCoord4, + EbvMultiTexCoord5, + EbvMultiTexCoord6, + EbvMultiTexCoord7, + EbvFrontColor, + EbvBackColor, + EbvFrontSecondaryColor, + EbvBackSecondaryColor, + EbvTexCoord, + EbvFogFragCoord, + EbvInvocationId, + EbvPrimitiveId, + EbvLayer, + EbvViewportIndex, + EbvPatchVertices, + EbvTessLevelOuter, + EbvTessLevelInner, + EbvBoundingBox, + EbvTessCoord, + EbvColor, + EbvSecondaryColor, + EbvFace, + EbvFragCoord, + EbvPointCoord, + EbvFragColor, + EbvFragData, + EbvFragDepth, + EbvFragStencilRef, + EbvSampleId, + EbvSamplePosition, + EbvSampleMask, + EbvHelperInvocation, + + EbvBaryCoordNoPersp, + EbvBaryCoordNoPerspCentroid, + EbvBaryCoordNoPerspSample, + EbvBaryCoordSmooth, + EbvBaryCoordSmoothCentroid, + EbvBaryCoordSmoothSample, + EbvBaryCoordPullModel, + + EbvViewIndex, + EbvDeviceIndex, + + EbvFragSizeEXT, + EbvFragInvocationCountEXT, + + EbvSecondaryFragDataEXT, + EbvSecondaryFragColorEXT, + + EbvViewportMaskNV, + EbvSecondaryPositionNV, + EbvSecondaryViewportMaskNV, + EbvPositionPerViewNV, + EbvViewportMaskPerViewNV, + EbvFragFullyCoveredNV, + EbvFragmentSizeNV, + EbvInvocationsPerPixelNV, + // ray tracing + EbvLaunchId, + EbvLaunchSize, + EbvInstanceCustomIndex, + EbvGeometryIndex, + EbvWorldRayOrigin, + EbvWorldRayDirection, + EbvObjectRayOrigin, + EbvObjectRayDirection, + EbvRayTmin, + EbvRayTmax, + EbvHitT, + EbvHitKind, + EbvObjectToWorld, + EbvObjectToWorld3x4, + EbvWorldToObject, + EbvWorldToObject3x4, + EbvIncomingRayFlags, + // barycentrics + EbvBaryCoordNV, + EbvBaryCoordNoPerspNV, + // mesh shaders + EbvTaskCountNV, + EbvPrimitiveCountNV, + EbvPrimitiveIndicesNV, + EbvClipDistancePerViewNV, + EbvCullDistancePerViewNV, + EbvLayerPerViewNV, + EbvMeshViewCountNV, + EbvMeshViewIndicesNV, + + // sm builtins + EbvWarpsPerSM, + EbvSMCount, + EbvWarpID, + EbvSMID, + + // HLSL built-ins that live only temporarily, until they get remapped + // to one of the above. + EbvFragDepthGreater, + EbvFragDepthLesser, + EbvGsOutputStream, + EbvOutputPatch, + EbvInputPatch, + + // structbuffer types + EbvAppendConsume, // no need to differentiate append and consume + EbvRWStructuredBuffer, + EbvStructuredBuffer, + EbvByteAddressBuffer, + EbvRWByteAddressBuffer, + + EbvLast +}; + +// In this enum, order matters; users can assume higher precision is a bigger value +// and EpqNone is 0. +enum TPrecisionQualifier { + EpqNone = 0, + EpqLow, + EpqMedium, + EpqHigh +}; + +#ifdef GLSLANG_WEB +__inline const char* GetStorageQualifierString(TStorageQualifier q) { return ""; } +__inline const char* GetPrecisionQualifierString(TPrecisionQualifier p) { return ""; } +#else +// These will show up in error messages +__inline const char* GetStorageQualifierString(TStorageQualifier q) +{ + switch (q) { + case EvqTemporary: return "temp"; break; + case EvqGlobal: return "global"; break; + case EvqConst: return "const"; break; + case EvqConstReadOnly: return "const (read only)"; break; + case EvqVaryingIn: return "in"; break; + case EvqVaryingOut: return "out"; break; + case EvqUniform: return "uniform"; break; + case EvqBuffer: return "buffer"; break; + case EvqShared: return "shared"; break; + case EvqIn: return "in"; break; + case EvqOut: return "out"; break; + case EvqInOut: return "inout"; break; + case EvqVertexId: return "gl_VertexId"; break; + case EvqInstanceId: return "gl_InstanceId"; break; + case EvqPosition: return "gl_Position"; break; + case EvqPointSize: return "gl_PointSize"; break; + case EvqClipVertex: return "gl_ClipVertex"; break; + case EvqFace: return "gl_FrontFacing"; break; + case EvqFragCoord: return "gl_FragCoord"; break; + case EvqPointCoord: return "gl_PointCoord"; break; + case EvqFragColor: return "fragColor"; break; + case EvqFragDepth: return "gl_FragDepth"; break; + case EvqPayload: return "rayPayloadNV"; break; + case EvqPayloadIn: return "rayPayloadInNV"; break; + case EvqHitAttr: return "hitAttributeNV"; break; + case EvqCallableData: return "callableDataNV"; break; + case EvqCallableDataIn: return "callableDataInNV"; break; + default: return "unknown qualifier"; + } +} + +__inline const char* GetBuiltInVariableString(TBuiltInVariable v) +{ + switch (v) { + case EbvNone: return ""; + case EbvNumWorkGroups: return "NumWorkGroups"; + case EbvWorkGroupSize: return "WorkGroupSize"; + case EbvWorkGroupId: return "WorkGroupID"; + case EbvLocalInvocationId: return "LocalInvocationID"; + case EbvGlobalInvocationId: return "GlobalInvocationID"; + case EbvLocalInvocationIndex: return "LocalInvocationIndex"; + case EbvNumSubgroups: return "NumSubgroups"; + case EbvSubgroupID: return "SubgroupID"; + case EbvSubGroupSize: return "SubGroupSize"; + case EbvSubGroupInvocation: return "SubGroupInvocation"; + case EbvSubGroupEqMask: return "SubGroupEqMask"; + case EbvSubGroupGeMask: return "SubGroupGeMask"; + case EbvSubGroupGtMask: return "SubGroupGtMask"; + case EbvSubGroupLeMask: return "SubGroupLeMask"; + case EbvSubGroupLtMask: return "SubGroupLtMask"; + case EbvSubgroupSize2: return "SubgroupSize"; + case EbvSubgroupInvocation2: return "SubgroupInvocationID"; + case EbvSubgroupEqMask2: return "SubgroupEqMask"; + case EbvSubgroupGeMask2: return "SubgroupGeMask"; + case EbvSubgroupGtMask2: return "SubgroupGtMask"; + case EbvSubgroupLeMask2: return "SubgroupLeMask"; + case EbvSubgroupLtMask2: return "SubgroupLtMask"; + case EbvVertexId: return "VertexId"; + case EbvInstanceId: return "InstanceId"; + case EbvVertexIndex: return "VertexIndex"; + case EbvInstanceIndex: return "InstanceIndex"; + case EbvBaseVertex: return "BaseVertex"; + case EbvBaseInstance: return "BaseInstance"; + case EbvDrawId: return "DrawId"; + case EbvPosition: return "Position"; + case EbvPointSize: return "PointSize"; + case EbvClipVertex: return "ClipVertex"; + case EbvClipDistance: return "ClipDistance"; + case EbvCullDistance: return "CullDistance"; + case EbvNormal: return "Normal"; + case EbvVertex: return "Vertex"; + case EbvMultiTexCoord0: return "MultiTexCoord0"; + case EbvMultiTexCoord1: return "MultiTexCoord1"; + case EbvMultiTexCoord2: return "MultiTexCoord2"; + case EbvMultiTexCoord3: return "MultiTexCoord3"; + case EbvMultiTexCoord4: return "MultiTexCoord4"; + case EbvMultiTexCoord5: return "MultiTexCoord5"; + case EbvMultiTexCoord6: return "MultiTexCoord6"; + case EbvMultiTexCoord7: return "MultiTexCoord7"; + case EbvFrontColor: return "FrontColor"; + case EbvBackColor: return "BackColor"; + case EbvFrontSecondaryColor: return "FrontSecondaryColor"; + case EbvBackSecondaryColor: return "BackSecondaryColor"; + case EbvTexCoord: return "TexCoord"; + case EbvFogFragCoord: return "FogFragCoord"; + case EbvInvocationId: return "InvocationID"; + case EbvPrimitiveId: return "PrimitiveID"; + case EbvLayer: return "Layer"; + case EbvViewportIndex: return "ViewportIndex"; + case EbvPatchVertices: return "PatchVertices"; + case EbvTessLevelOuter: return "TessLevelOuter"; + case EbvTessLevelInner: return "TessLevelInner"; + case EbvBoundingBox: return "BoundingBox"; + case EbvTessCoord: return "TessCoord"; + case EbvColor: return "Color"; + case EbvSecondaryColor: return "SecondaryColor"; + case EbvFace: return "Face"; + case EbvFragCoord: return "FragCoord"; + case EbvPointCoord: return "PointCoord"; + case EbvFragColor: return "FragColor"; + case EbvFragData: return "FragData"; + case EbvFragDepth: return "FragDepth"; + case EbvFragStencilRef: return "FragStencilRef"; + case EbvSampleId: return "SampleId"; + case EbvSamplePosition: return "SamplePosition"; + case EbvSampleMask: return "SampleMaskIn"; + case EbvHelperInvocation: return "HelperInvocation"; + + case EbvBaryCoordNoPersp: return "BaryCoordNoPersp"; + case EbvBaryCoordNoPerspCentroid: return "BaryCoordNoPerspCentroid"; + case EbvBaryCoordNoPerspSample: return "BaryCoordNoPerspSample"; + case EbvBaryCoordSmooth: return "BaryCoordSmooth"; + case EbvBaryCoordSmoothCentroid: return "BaryCoordSmoothCentroid"; + case EbvBaryCoordSmoothSample: return "BaryCoordSmoothSample"; + case EbvBaryCoordPullModel: return "BaryCoordPullModel"; + + case EbvViewIndex: return "ViewIndex"; + case EbvDeviceIndex: return "DeviceIndex"; + + case EbvFragSizeEXT: return "FragSizeEXT"; + case EbvFragInvocationCountEXT: return "FragInvocationCountEXT"; + + case EbvSecondaryFragDataEXT: return "SecondaryFragDataEXT"; + case EbvSecondaryFragColorEXT: return "SecondaryFragColorEXT"; + + case EbvViewportMaskNV: return "ViewportMaskNV"; + case EbvSecondaryPositionNV: return "SecondaryPositionNV"; + case EbvSecondaryViewportMaskNV: return "SecondaryViewportMaskNV"; + case EbvPositionPerViewNV: return "PositionPerViewNV"; + case EbvViewportMaskPerViewNV: return "ViewportMaskPerViewNV"; + case EbvFragFullyCoveredNV: return "FragFullyCoveredNV"; + case EbvFragmentSizeNV: return "FragmentSizeNV"; + case EbvInvocationsPerPixelNV: return "InvocationsPerPixelNV"; + case EbvLaunchId: return "LaunchIdNV"; + case EbvLaunchSize: return "LaunchSizeNV"; + case EbvInstanceCustomIndex: return "InstanceCustomIndexNV"; + case EbvGeometryIndex: return "GeometryIndexEXT"; + case EbvWorldRayOrigin: return "WorldRayOriginNV"; + case EbvWorldRayDirection: return "WorldRayDirectionNV"; + case EbvObjectRayOrigin: return "ObjectRayOriginNV"; + case EbvObjectRayDirection: return "ObjectRayDirectionNV"; + case EbvRayTmin: return "ObjectRayTminNV"; + case EbvRayTmax: return "ObjectRayTmaxNV"; + case EbvHitT: return "HitTNV"; + case EbvHitKind: return "HitKindNV"; + case EbvIncomingRayFlags: return "IncomingRayFlagsNV"; + case EbvObjectToWorld: return "ObjectToWorldNV"; + case EbvWorldToObject: return "WorldToObjectNV"; + + case EbvBaryCoordNV: return "BaryCoordNV"; + case EbvBaryCoordNoPerspNV: return "BaryCoordNoPerspNV"; + + case EbvTaskCountNV: return "TaskCountNV"; + case EbvPrimitiveCountNV: return "PrimitiveCountNV"; + case EbvPrimitiveIndicesNV: return "PrimitiveIndicesNV"; + case EbvClipDistancePerViewNV: return "ClipDistancePerViewNV"; + case EbvCullDistancePerViewNV: return "CullDistancePerViewNV"; + case EbvLayerPerViewNV: return "LayerPerViewNV"; + case EbvMeshViewCountNV: return "MeshViewCountNV"; + case EbvMeshViewIndicesNV: return "MeshViewIndicesNV"; + + case EbvWarpsPerSM: return "WarpsPerSMNV"; + case EbvSMCount: return "SMCountNV"; + case EbvWarpID: return "WarpIDNV"; + case EbvSMID: return "SMIDNV"; + + default: return "unknown built-in variable"; + } +} + +__inline const char* GetPrecisionQualifierString(TPrecisionQualifier p) +{ + switch (p) { + case EpqNone: return ""; break; + case EpqLow: return "lowp"; break; + case EpqMedium: return "mediump"; break; + case EpqHigh: return "highp"; break; + default: return "unknown precision qualifier"; + } +} +#endif + +__inline bool isTypeSignedInt(TBasicType type) +{ + switch (type) { + case EbtInt8: + case EbtInt16: + case EbtInt: + case EbtInt64: + return true; + default: + return false; + } +} + +__inline bool isTypeUnsignedInt(TBasicType type) +{ + switch (type) { + case EbtUint8: + case EbtUint16: + case EbtUint: + case EbtUint64: + return true; + default: + return false; + } +} + +__inline bool isTypeInt(TBasicType type) +{ + return isTypeSignedInt(type) || isTypeUnsignedInt(type); +} + +__inline bool isTypeFloat(TBasicType type) +{ + switch (type) { + case EbtFloat: + case EbtDouble: + case EbtFloat16: + return true; + default: + return false; + } +} + +__inline int getTypeRank(TBasicType type) +{ + int res = -1; + switch(type) { + case EbtInt8: + case EbtUint8: + res = 0; + break; + case EbtInt16: + case EbtUint16: + res = 1; + break; + case EbtInt: + case EbtUint: + res = 2; + break; + case EbtInt64: + case EbtUint64: + res = 3; + break; + default: + assert(false); + break; + } + return res; +} + +} // end namespace glslang + +#endif // _BASICTYPES_INCLUDED_ diff --git a/dep/glslang/glslang/Include/Common.h b/dep/glslang/glslang/Include/Common.h new file mode 100644 index 000000000..733a790cf --- /dev/null +++ b/dep/glslang/glslang/Include/Common.h @@ -0,0 +1,292 @@ +// +// Copyright (C) 2002-2005 3Dlabs Inc. Ltd. +// Copyright (C) 2012-2013 LunarG, Inc. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// +// Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// +// Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// Neither the name of 3Dlabs Inc. Ltd. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// + +#ifndef _COMMON_INCLUDED_ +#define _COMMON_INCLUDED_ + + +#if defined(__ANDROID__) || (defined(_MSC_VER) && _MSC_VER < 1700) +#include +namespace std { +template +std::string to_string(const T& val) { + std::ostringstream os; + os << val; + return os.str(); +} +} +#endif + +#if (defined(_MSC_VER) && _MSC_VER < 1900 /*vs2015*/) || defined MINGW_HAS_SECURE_API + #include + #ifndef snprintf + #define snprintf sprintf_s + #endif + #define safe_vsprintf(buf,max,format,args) vsnprintf_s((buf), (max), (max), (format), (args)) +#elif defined (solaris) + #define safe_vsprintf(buf,max,format,args) vsnprintf((buf), (max), (format), (args)) + #include + #define UINT_PTR uintptr_t +#else + #define safe_vsprintf(buf,max,format,args) vsnprintf((buf), (max), (format), (args)) + #include + #define UINT_PTR uintptr_t +#endif + +#if defined(_MSC_VER) && _MSC_VER < 1800 + #include + inline long long int strtoll (const char* str, char** endptr, int base) + { + return _strtoi64(str, endptr, base); + } + inline unsigned long long int strtoull (const char* str, char** endptr, int base) + { + return _strtoui64(str, endptr, base); + } + inline long long int atoll (const char* str) + { + return strtoll(str, NULL, 10); + } +#endif + +#if defined(_MSC_VER) +#define strdup _strdup +#endif + +/* windows only pragma */ +#ifdef _MSC_VER + #pragma warning(disable : 4786) // Don't warn about too long identifiers + #pragma warning(disable : 4514) // unused inline method + #pragma warning(disable : 4201) // nameless union +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "PoolAlloc.h" + +// +// Put POOL_ALLOCATOR_NEW_DELETE in base classes to make them use this scheme. +// +#define POOL_ALLOCATOR_NEW_DELETE(A) \ + void* operator new(size_t s) { return (A).allocate(s); } \ + void* operator new(size_t, void *_Where) { return (_Where); } \ + void operator delete(void*) { } \ + void operator delete(void *, void *) { } \ + void* operator new[](size_t s) { return (A).allocate(s); } \ + void* operator new[](size_t, void *_Where) { return (_Where); } \ + void operator delete[](void*) { } \ + void operator delete[](void *, void *) { } + +namespace glslang { + + // + // Pool version of string. + // + typedef pool_allocator TStringAllocator; + typedef std::basic_string , TStringAllocator> TString; + +} // end namespace glslang + +// Repackage the std::hash for use by unordered map/set with a TString key. +namespace std { + + template<> struct hash { + std::size_t operator()(const glslang::TString& s) const + { + const unsigned _FNV_offset_basis = 2166136261U; + const unsigned _FNV_prime = 16777619U; + unsigned _Val = _FNV_offset_basis; + size_t _Count = s.size(); + const char* _First = s.c_str(); + for (size_t _Next = 0; _Next < _Count; ++_Next) + { + _Val ^= (unsigned)_First[_Next]; + _Val *= _FNV_prime; + } + + return _Val; + } + }; +} + +namespace glslang { + +inline TString* NewPoolTString(const char* s) +{ + void* memory = GetThreadPoolAllocator().allocate(sizeof(TString)); + return new(memory) TString(s); +} + +template inline T* NewPoolObject(T*) +{ + return new(GetThreadPoolAllocator().allocate(sizeof(T))) T; +} + +template inline T* NewPoolObject(T, int instances) +{ + return new(GetThreadPoolAllocator().allocate(instances * sizeof(T))) T[instances]; +} + +// +// Pool allocator versions of vectors, lists, and maps +// +template class TVector : public std::vector > { +public: + POOL_ALLOCATOR_NEW_DELETE(GetThreadPoolAllocator()) + + typedef typename std::vector >::size_type size_type; + TVector() : std::vector >() {} + TVector(const pool_allocator& a) : std::vector >(a) {} + TVector(size_type i) : std::vector >(i) {} + TVector(size_type i, const T& val) : std::vector >(i, val) {} +}; + +template class TList : public std::list > { +}; + +template > +class TMap : public std::map > > { +}; + +template , class PRED = std::equal_to > +class TUnorderedMap : public std::unordered_map > > { +}; + +// +// Persistent string memory. Should only be used for strings that survive +// across compiles/links. +// +typedef std::basic_string TPersistString; + +// +// templatized min and max functions. +// +template T Min(const T a, const T b) { return a < b ? a : b; } +template T Max(const T a, const T b) { return a > b ? a : b; } + +// +// Create a TString object from an integer. +// +#if defined _MSC_VER || defined MINGW_HAS_SECURE_API +inline const TString String(const int i, const int base = 10) +{ + char text[16]; // 32 bit ints are at most 10 digits in base 10 + _itoa_s(i, text, sizeof(text), base); + return text; +} +#else +inline const TString String(const int i, const int /*base*/ = 10) +{ + char text[16]; // 32 bit ints are at most 10 digits in base 10 + + // we assume base 10 for all cases + snprintf(text, sizeof(text), "%d", i); + + return text; +} +#endif + +struct TSourceLoc { + void init() + { + name = nullptr; string = 0; line = 0; column = 0; + } + void init(int stringNum) { init(); string = stringNum; } + // Returns the name if it exists. Otherwise, returns the string number. + std::string getStringNameOrNum(bool quoteStringName = true) const + { + if (name != nullptr) { + TString qstr = quoteStringName ? ("\"" + *name + "\"") : *name; + std::string ret_str(qstr.c_str()); + return ret_str; + } + return std::to_string((long long)string); + } + const char* getFilename() const + { + if (name == nullptr) + return nullptr; + return name->c_str(); + } + const char* getFilenameStr() const { return name == nullptr ? "" : name->c_str(); } + TString* name; // descriptive name for this string, when a textual name is available, otherwise nullptr + int string; + int line; + int column; +}; + +class TPragmaTable : public TMap { +public: + POOL_ALLOCATOR_NEW_DELETE(GetThreadPoolAllocator()) +}; + +const int MaxTokenLength = 1024; + +template bool IsPow2(T powerOf2) +{ + if (powerOf2 <= 0) + return false; + + return (powerOf2 & (powerOf2 - 1)) == 0; +} + +// Round number up to a multiple of the given powerOf2, which is not +// a power, just a number that must be a power of 2. +template void RoundToPow2(T& number, int powerOf2) +{ + assert(IsPow2(powerOf2)); + number = (number + powerOf2 - 1) & ~(powerOf2 - 1); +} + +template bool IsMultipleOfPow2(T number, int powerOf2) +{ + assert(IsPow2(powerOf2)); + return ! (number & (powerOf2 - 1)); +} + +} // end namespace glslang + +#endif // _COMMON_INCLUDED_ diff --git a/dep/glslang/glslang/Include/ConstantUnion.h b/dep/glslang/glslang/Include/ConstantUnion.h new file mode 100644 index 000000000..c4ffb8577 --- /dev/null +++ b/dep/glslang/glslang/Include/ConstantUnion.h @@ -0,0 +1,974 @@ +// +// Copyright (C) 2002-2005 3Dlabs Inc. Ltd. +// Copyright (C) 2013 LunarG, Inc. +// Copyright (C) 2017 ARM Limited. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// +// Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// +// Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// Neither the name of 3Dlabs Inc. Ltd. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// + +#ifndef _CONSTANT_UNION_INCLUDED_ +#define _CONSTANT_UNION_INCLUDED_ + +#include "../Include/Common.h" +#include "../Include/BaseTypes.h" + +namespace glslang { + +class TConstUnion { +public: + POOL_ALLOCATOR_NEW_DELETE(GetThreadPoolAllocator()) + + TConstUnion() : iConst(0), type(EbtInt) { } + + void setI8Const(signed char i) + { + i8Const = i; + type = EbtInt8; + } + + void setU8Const(unsigned char u) + { + u8Const = u; + type = EbtUint8; + } + + void setI16Const(signed short i) + { + i16Const = i; + type = EbtInt16; + } + + void setU16Const(unsigned short u) + { + u16Const = u; + type = EbtUint16; + } + + void setIConst(int i) + { + iConst = i; + type = EbtInt; + } + + void setUConst(unsigned int u) + { + uConst = u; + type = EbtUint; + } + + void setI64Const(long long i64) + { + i64Const = i64; + type = EbtInt64; + } + + void setU64Const(unsigned long long u64) + { + u64Const = u64; + type = EbtUint64; + } + + void setDConst(double d) + { + dConst = d; + type = EbtDouble; + } + + void setBConst(bool b) + { + bConst = b; + type = EbtBool; + } + + void setSConst(const TString* s) + { + sConst = s; + type = EbtString; + } + + signed char getI8Const() const { return i8Const; } + unsigned char getU8Const() const { return u8Const; } + signed short getI16Const() const { return i16Const; } + unsigned short getU16Const() const { return u16Const; } + int getIConst() const { return iConst; } + unsigned int getUConst() const { return uConst; } + long long getI64Const() const { return i64Const; } + unsigned long long getU64Const() const { return u64Const; } + double getDConst() const { return dConst; } + bool getBConst() const { return bConst; } + const TString* getSConst() const { return sConst; } + + bool operator==(const signed char i) const + { + if (i == i8Const) + return true; + + return false; + } + + bool operator==(const unsigned char u) const + { + if (u == u8Const) + return true; + + return false; + } + + bool operator==(const signed short i) const + { + if (i == i16Const) + return true; + + return false; + } + + bool operator==(const unsigned short u) const + { + if (u == u16Const) + return true; + + return false; + } + + bool operator==(const int i) const + { + if (i == iConst) + return true; + + return false; + } + + bool operator==(const unsigned int u) const + { + if (u == uConst) + return true; + + return false; + } + + bool operator==(const long long i64) const + { + if (i64 == i64Const) + return true; + + return false; + } + + bool operator==(const unsigned long long u64) const + { + if (u64 == u64Const) + return true; + + return false; + } + + bool operator==(const double d) const + { + if (d == dConst) + return true; + + return false; + } + + bool operator==(const bool b) const + { + if (b == bConst) + return true; + + return false; + } + + bool operator==(const TConstUnion& constant) const + { + if (constant.type != type) + return false; + + switch (type) { + case EbtInt: + if (constant.iConst == iConst) + return true; + + break; + case EbtUint: + if (constant.uConst == uConst) + return true; + + break; + case EbtBool: + if (constant.bConst == bConst) + return true; + + break; + case EbtDouble: + if (constant.dConst == dConst) + return true; + + break; + +#ifndef GLSLANG_WEB + case EbtInt16: + if (constant.i16Const == i16Const) + return true; + + break; + case EbtUint16: + if (constant.u16Const == u16Const) + return true; + + break; + case EbtInt8: + if (constant.i8Const == i8Const) + return true; + + break; + case EbtUint8: + if (constant.u8Const == u8Const) + return true; + + break; + case EbtInt64: + if (constant.i64Const == i64Const) + return true; + + break; + case EbtUint64: + if (constant.u64Const == u64Const) + return true; + + break; +#endif + default: + assert(false && "Default missing"); + } + + return false; + } + + bool operator!=(const signed char i) const + { + return !operator==(i); + } + + bool operator!=(const unsigned char u) const + { + return !operator==(u); + } + + bool operator!=(const signed short i) const + { + return !operator==(i); + } + + bool operator!=(const unsigned short u) const + { + return !operator==(u); + } + + bool operator!=(const int i) const + { + return !operator==(i); + } + + bool operator!=(const unsigned int u) const + { + return !operator==(u); + } + + bool operator!=(const long long i) const + { + return !operator==(i); + } + + bool operator!=(const unsigned long long u) const + { + return !operator==(u); + } + + bool operator!=(const float f) const + { + return !operator==(f); + } + + bool operator!=(const bool b) const + { + return !operator==(b); + } + + bool operator!=(const TConstUnion& constant) const + { + return !operator==(constant); + } + + bool operator>(const TConstUnion& constant) const + { + assert(type == constant.type); + switch (type) { + case EbtInt: + if (iConst > constant.iConst) + return true; + + return false; + case EbtUint: + if (uConst > constant.uConst) + return true; + + return false; + case EbtDouble: + if (dConst > constant.dConst) + return true; + + return false; +#ifndef GLSLANG_WEB + case EbtInt8: + if (i8Const > constant.i8Const) + return true; + + return false; + case EbtUint8: + if (u8Const > constant.u8Const) + return true; + + return false; + case EbtInt16: + if (i16Const > constant.i16Const) + return true; + + return false; + case EbtUint16: + if (u16Const > constant.u16Const) + return true; + + return false; + case EbtInt64: + if (i64Const > constant.i64Const) + return true; + + return false; + case EbtUint64: + if (u64Const > constant.u64Const) + return true; + + return false; +#endif + default: + assert(false && "Default missing"); + return false; + } + } + + bool operator<(const TConstUnion& constant) const + { + assert(type == constant.type); + switch (type) { +#ifndef GLSLANG_WEB + case EbtInt8: + if (i8Const < constant.i8Const) + return true; + + return false; + case EbtUint8: + if (u8Const < constant.u8Const) + return true; + + return false; + case EbtInt16: + if (i16Const < constant.i16Const) + return true; + + return false; + case EbtUint16: + if (u16Const < constant.u16Const) + return true; + return false; + case EbtInt64: + if (i64Const < constant.i64Const) + return true; + + return false; + case EbtUint64: + if (u64Const < constant.u64Const) + return true; + + return false; +#endif + case EbtDouble: + if (dConst < constant.dConst) + return true; + + return false; + case EbtInt: + if (iConst < constant.iConst) + return true; + + return false; + case EbtUint: + if (uConst < constant.uConst) + return true; + + return false; + default: + assert(false && "Default missing"); + return false; + } + } + + TConstUnion operator+(const TConstUnion& constant) const + { + TConstUnion returnValue; + assert(type == constant.type); + switch (type) { + case EbtInt: returnValue.setIConst(iConst + constant.iConst); break; + case EbtUint: returnValue.setUConst(uConst + constant.uConst); break; + case EbtDouble: returnValue.setDConst(dConst + constant.dConst); break; +#ifndef GLSLANG_WEB + case EbtInt8: returnValue.setI8Const(i8Const + constant.i8Const); break; + case EbtInt16: returnValue.setI16Const(i16Const + constant.i16Const); break; + case EbtInt64: returnValue.setI64Const(i64Const + constant.i64Const); break; + case EbtUint8: returnValue.setU8Const(u8Const + constant.u8Const); break; + case EbtUint16: returnValue.setU16Const(u16Const + constant.u16Const); break; + case EbtUint64: returnValue.setU64Const(u64Const + constant.u64Const); break; +#endif + default: assert(false && "Default missing"); + } + + return returnValue; + } + + TConstUnion operator-(const TConstUnion& constant) const + { + TConstUnion returnValue; + assert(type == constant.type); + switch (type) { + case EbtInt: returnValue.setIConst(iConst - constant.iConst); break; + case EbtUint: returnValue.setUConst(uConst - constant.uConst); break; + case EbtDouble: returnValue.setDConst(dConst - constant.dConst); break; +#ifndef GLSLANG_WEB + case EbtInt8: returnValue.setI8Const(i8Const - constant.i8Const); break; + case EbtInt16: returnValue.setI16Const(i16Const - constant.i16Const); break; + case EbtInt64: returnValue.setI64Const(i64Const - constant.i64Const); break; + case EbtUint8: returnValue.setU8Const(u8Const - constant.u8Const); break; + case EbtUint16: returnValue.setU16Const(u16Const - constant.u16Const); break; + case EbtUint64: returnValue.setU64Const(u64Const - constant.u64Const); break; +#endif + default: assert(false && "Default missing"); + } + + return returnValue; + } + + TConstUnion operator*(const TConstUnion& constant) const + { + TConstUnion returnValue; + assert(type == constant.type); + switch (type) { + case EbtInt: returnValue.setIConst(iConst * constant.iConst); break; + case EbtUint: returnValue.setUConst(uConst * constant.uConst); break; + case EbtDouble: returnValue.setDConst(dConst * constant.dConst); break; +#ifndef GLSLANG_WEB + case EbtInt8: returnValue.setI8Const(i8Const * constant.i8Const); break; + case EbtInt16: returnValue.setI16Const(i16Const * constant.i16Const); break; + case EbtInt64: returnValue.setI64Const(i64Const * constant.i64Const); break; + case EbtUint8: returnValue.setU8Const(u8Const * constant.u8Const); break; + case EbtUint16: returnValue.setU16Const(u16Const * constant.u16Const); break; + case EbtUint64: returnValue.setU64Const(u64Const * constant.u64Const); break; +#endif + default: assert(false && "Default missing"); + } + + return returnValue; + } + + TConstUnion operator%(const TConstUnion& constant) const + { + TConstUnion returnValue; + assert(type == constant.type); + switch (type) { + case EbtInt: returnValue.setIConst(iConst % constant.iConst); break; + case EbtUint: returnValue.setUConst(uConst % constant.uConst); break; +#ifndef GLSLANG_WEB + case EbtInt8: returnValue.setI8Const(i8Const % constant.i8Const); break; + case EbtInt16: returnValue.setI8Const(i8Const % constant.i16Const); break; + case EbtInt64: returnValue.setI64Const(i64Const % constant.i64Const); break; + case EbtUint8: returnValue.setU8Const(u8Const % constant.u8Const); break; + case EbtUint16: returnValue.setU16Const(u16Const % constant.u16Const); break; + case EbtUint64: returnValue.setU64Const(u64Const % constant.u64Const); break; +#endif + default: assert(false && "Default missing"); + } + + return returnValue; + } + + TConstUnion operator>>(const TConstUnion& constant) const + { + TConstUnion returnValue; + switch (type) { +#ifndef GLSLANG_WEB + case EbtInt8: + switch (constant.type) { + case EbtInt8: returnValue.setI8Const(i8Const >> constant.i8Const); break; + case EbtUint8: returnValue.setI8Const(i8Const >> constant.u8Const); break; + case EbtInt16: returnValue.setI8Const(i8Const >> constant.i16Const); break; + case EbtUint16: returnValue.setI8Const(i8Const >> constant.u16Const); break; + case EbtInt: returnValue.setI8Const(i8Const >> constant.iConst); break; + case EbtUint: returnValue.setI8Const(i8Const >> constant.uConst); break; + case EbtInt64: returnValue.setI8Const(i8Const >> constant.i64Const); break; + case EbtUint64: returnValue.setI8Const(i8Const >> constant.u64Const); break; + default: assert(false && "Default missing"); + } + break; + case EbtUint8: + switch (constant.type) { + case EbtInt8: returnValue.setU8Const(u8Const >> constant.i8Const); break; + case EbtUint8: returnValue.setU8Const(u8Const >> constant.u8Const); break; + case EbtInt16: returnValue.setU8Const(u8Const >> constant.i16Const); break; + case EbtUint16: returnValue.setU8Const(u8Const >> constant.u16Const); break; + case EbtInt: returnValue.setU8Const(u8Const >> constant.iConst); break; + case EbtUint: returnValue.setU8Const(u8Const >> constant.uConst); break; + case EbtInt64: returnValue.setU8Const(u8Const >> constant.i64Const); break; + case EbtUint64: returnValue.setU8Const(u8Const >> constant.u64Const); break; + default: assert(false && "Default missing"); + } + break; + case EbtInt16: + switch (constant.type) { + case EbtInt8: returnValue.setI16Const(i16Const >> constant.i8Const); break; + case EbtUint8: returnValue.setI16Const(i16Const >> constant.u8Const); break; + case EbtInt16: returnValue.setI16Const(i16Const >> constant.i16Const); break; + case EbtUint16: returnValue.setI16Const(i16Const >> constant.u16Const); break; + case EbtInt: returnValue.setI16Const(i16Const >> constant.iConst); break; + case EbtUint: returnValue.setI16Const(i16Const >> constant.uConst); break; + case EbtInt64: returnValue.setI16Const(i16Const >> constant.i64Const); break; + case EbtUint64: returnValue.setI16Const(i16Const >> constant.u64Const); break; + default: assert(false && "Default missing"); + } + break; + case EbtUint16: + switch (constant.type) { + case EbtInt8: returnValue.setU16Const(u16Const >> constant.i8Const); break; + case EbtUint8: returnValue.setU16Const(u16Const >> constant.u8Const); break; + case EbtInt16: returnValue.setU16Const(u16Const >> constant.i16Const); break; + case EbtUint16: returnValue.setU16Const(u16Const >> constant.u16Const); break; + case EbtInt: returnValue.setU16Const(u16Const >> constant.iConst); break; + case EbtUint: returnValue.setU16Const(u16Const >> constant.uConst); break; + case EbtInt64: returnValue.setU16Const(u16Const >> constant.i64Const); break; + case EbtUint64: returnValue.setU16Const(u16Const >> constant.u64Const); break; + default: assert(false && "Default missing"); + } + break; +#endif + case EbtInt: + switch (constant.type) { + case EbtInt: returnValue.setIConst(iConst >> constant.iConst); break; + case EbtUint: returnValue.setIConst(iConst >> constant.uConst); break; +#ifndef GLSLANG_WEB + case EbtInt8: returnValue.setIConst(iConst >> constant.i8Const); break; + case EbtUint8: returnValue.setIConst(iConst >> constant.u8Const); break; + case EbtInt16: returnValue.setIConst(iConst >> constant.i16Const); break; + case EbtUint16: returnValue.setIConst(iConst >> constant.u16Const); break; + case EbtInt64: returnValue.setIConst(iConst >> constant.i64Const); break; + case EbtUint64: returnValue.setIConst(iConst >> constant.u64Const); break; +#endif + default: assert(false && "Default missing"); + } + break; + case EbtUint: + switch (constant.type) { + case EbtInt: returnValue.setUConst(uConst >> constant.iConst); break; + case EbtUint: returnValue.setUConst(uConst >> constant.uConst); break; +#ifndef GLSLANG_WEB + case EbtInt8: returnValue.setUConst(uConst >> constant.i8Const); break; + case EbtUint8: returnValue.setUConst(uConst >> constant.u8Const); break; + case EbtInt16: returnValue.setUConst(uConst >> constant.i16Const); break; + case EbtUint16: returnValue.setUConst(uConst >> constant.u16Const); break; + case EbtInt64: returnValue.setUConst(uConst >> constant.i64Const); break; + case EbtUint64: returnValue.setUConst(uConst >> constant.u64Const); break; +#endif + default: assert(false && "Default missing"); + } + break; +#ifndef GLSLANG_WEB + case EbtInt64: + switch (constant.type) { + case EbtInt8: returnValue.setI64Const(i64Const >> constant.i8Const); break; + case EbtUint8: returnValue.setI64Const(i64Const >> constant.u8Const); break; + case EbtInt16: returnValue.setI64Const(i64Const >> constant.i16Const); break; + case EbtUint16: returnValue.setI64Const(i64Const >> constant.u16Const); break; + case EbtInt: returnValue.setI64Const(i64Const >> constant.iConst); break; + case EbtUint: returnValue.setI64Const(i64Const >> constant.uConst); break; + case EbtInt64: returnValue.setI64Const(i64Const >> constant.i64Const); break; + case EbtUint64: returnValue.setI64Const(i64Const >> constant.u64Const); break; + default: assert(false && "Default missing"); + } + break; + case EbtUint64: + switch (constant.type) { + case EbtInt8: returnValue.setU64Const(u64Const >> constant.i8Const); break; + case EbtUint8: returnValue.setU64Const(u64Const >> constant.u8Const); break; + case EbtInt16: returnValue.setU64Const(u64Const >> constant.i16Const); break; + case EbtUint16: returnValue.setU64Const(u64Const >> constant.u16Const); break; + case EbtInt: returnValue.setU64Const(u64Const >> constant.iConst); break; + case EbtUint: returnValue.setU64Const(u64Const >> constant.uConst); break; + case EbtInt64: returnValue.setU64Const(u64Const >> constant.i64Const); break; + case EbtUint64: returnValue.setU64Const(u64Const >> constant.u64Const); break; + default: assert(false && "Default missing"); + } + break; +#endif + default: assert(false && "Default missing"); + } + + return returnValue; + } + + TConstUnion operator<<(const TConstUnion& constant) const + { + TConstUnion returnValue; + switch (type) { +#ifndef GLSLANG_WEB + case EbtInt8: + switch (constant.type) { + case EbtInt8: returnValue.setI8Const(i8Const << constant.i8Const); break; + case EbtUint8: returnValue.setI8Const(i8Const << constant.u8Const); break; + case EbtInt16: returnValue.setI8Const(i8Const << constant.i16Const); break; + case EbtUint16: returnValue.setI8Const(i8Const << constant.u16Const); break; + case EbtInt: returnValue.setI8Const(i8Const << constant.iConst); break; + case EbtUint: returnValue.setI8Const(i8Const << constant.uConst); break; + case EbtInt64: returnValue.setI8Const(i8Const << constant.i64Const); break; + case EbtUint64: returnValue.setI8Const(i8Const << constant.u64Const); break; + default: assert(false && "Default missing"); + } + break; + case EbtUint8: + switch (constant.type) { + case EbtInt8: returnValue.setU8Const(u8Const << constant.i8Const); break; + case EbtUint8: returnValue.setU8Const(u8Const << constant.u8Const); break; + case EbtInt16: returnValue.setU8Const(u8Const << constant.i16Const); break; + case EbtUint16: returnValue.setU8Const(u8Const << constant.u16Const); break; + case EbtInt: returnValue.setU8Const(u8Const << constant.iConst); break; + case EbtUint: returnValue.setU8Const(u8Const << constant.uConst); break; + case EbtInt64: returnValue.setU8Const(u8Const << constant.i64Const); break; + case EbtUint64: returnValue.setU8Const(u8Const << constant.u64Const); break; + default: assert(false && "Default missing"); + } + break; + case EbtInt16: + switch (constant.type) { + case EbtInt8: returnValue.setI16Const(i16Const << constant.i8Const); break; + case EbtUint8: returnValue.setI16Const(i16Const << constant.u8Const); break; + case EbtInt16: returnValue.setI16Const(i16Const << constant.i16Const); break; + case EbtUint16: returnValue.setI16Const(i16Const << constant.u16Const); break; + case EbtInt: returnValue.setI16Const(i16Const << constant.iConst); break; + case EbtUint: returnValue.setI16Const(i16Const << constant.uConst); break; + case EbtInt64: returnValue.setI16Const(i16Const << constant.i64Const); break; + case EbtUint64: returnValue.setI16Const(i16Const << constant.u64Const); break; + default: assert(false && "Default missing"); + } + break; + case EbtUint16: + switch (constant.type) { + case EbtInt8: returnValue.setU16Const(u16Const << constant.i8Const); break; + case EbtUint8: returnValue.setU16Const(u16Const << constant.u8Const); break; + case EbtInt16: returnValue.setU16Const(u16Const << constant.i16Const); break; + case EbtUint16: returnValue.setU16Const(u16Const << constant.u16Const); break; + case EbtInt: returnValue.setU16Const(u16Const << constant.iConst); break; + case EbtUint: returnValue.setU16Const(u16Const << constant.uConst); break; + case EbtInt64: returnValue.setU16Const(u16Const << constant.i64Const); break; + case EbtUint64: returnValue.setU16Const(u16Const << constant.u64Const); break; + default: assert(false && "Default missing"); + } + break; + case EbtInt64: + switch (constant.type) { + case EbtInt8: returnValue.setI64Const(i64Const << constant.i8Const); break; + case EbtUint8: returnValue.setI64Const(i64Const << constant.u8Const); break; + case EbtInt16: returnValue.setI64Const(i64Const << constant.i16Const); break; + case EbtUint16: returnValue.setI64Const(i64Const << constant.u16Const); break; + case EbtInt: returnValue.setI64Const(i64Const << constant.iConst); break; + case EbtUint: returnValue.setI64Const(i64Const << constant.uConst); break; + case EbtInt64: returnValue.setI64Const(i64Const << constant.i64Const); break; + case EbtUint64: returnValue.setI64Const(i64Const << constant.u64Const); break; + default: assert(false && "Default missing"); + } + break; + case EbtUint64: + switch (constant.type) { + case EbtInt8: returnValue.setU64Const(u64Const << constant.i8Const); break; + case EbtUint8: returnValue.setU64Const(u64Const << constant.u8Const); break; + case EbtInt16: returnValue.setU64Const(u64Const << constant.i16Const); break; + case EbtUint16: returnValue.setU64Const(u64Const << constant.u16Const); break; + case EbtInt: returnValue.setU64Const(u64Const << constant.iConst); break; + case EbtUint: returnValue.setU64Const(u64Const << constant.uConst); break; + case EbtInt64: returnValue.setU64Const(u64Const << constant.i64Const); break; + case EbtUint64: returnValue.setU64Const(u64Const << constant.u64Const); break; + default: assert(false && "Default missing"); + } + break; +#endif + case EbtInt: + switch (constant.type) { + case EbtInt: returnValue.setIConst(iConst << constant.iConst); break; + case EbtUint: returnValue.setIConst(iConst << constant.uConst); break; +#ifndef GLSLANG_WEB + case EbtInt8: returnValue.setIConst(iConst << constant.i8Const); break; + case EbtUint8: returnValue.setIConst(iConst << constant.u8Const); break; + case EbtInt16: returnValue.setIConst(iConst << constant.i16Const); break; + case EbtUint16: returnValue.setIConst(iConst << constant.u16Const); break; + case EbtInt64: returnValue.setIConst(iConst << constant.i64Const); break; + case EbtUint64: returnValue.setIConst(iConst << constant.u64Const); break; +#endif + default: assert(false && "Default missing"); + } + break; + case EbtUint: + switch (constant.type) { + case EbtInt: returnValue.setUConst(uConst << constant.iConst); break; + case EbtUint: returnValue.setUConst(uConst << constant.uConst); break; +#ifndef GLSLANG_WEB + case EbtInt8: returnValue.setUConst(uConst << constant.i8Const); break; + case EbtUint8: returnValue.setUConst(uConst << constant.u8Const); break; + case EbtInt16: returnValue.setUConst(uConst << constant.i16Const); break; + case EbtUint16: returnValue.setUConst(uConst << constant.u16Const); break; + case EbtInt64: returnValue.setUConst(uConst << constant.i64Const); break; + case EbtUint64: returnValue.setUConst(uConst << constant.u64Const); break; +#endif + default: assert(false && "Default missing"); + } + break; + default: assert(false && "Default missing"); + } + + return returnValue; + } + + TConstUnion operator&(const TConstUnion& constant) const + { + TConstUnion returnValue; + assert(type == constant.type); + switch (type) { + case EbtInt: returnValue.setIConst(iConst & constant.iConst); break; + case EbtUint: returnValue.setUConst(uConst & constant.uConst); break; +#ifndef GLSLANG_WEB + case EbtInt8: returnValue.setI8Const(i8Const & constant.i8Const); break; + case EbtUint8: returnValue.setU8Const(u8Const & constant.u8Const); break; + case EbtInt16: returnValue.setI16Const(i16Const & constant.i16Const); break; + case EbtUint16: returnValue.setU16Const(u16Const & constant.u16Const); break; + case EbtInt64: returnValue.setI64Const(i64Const & constant.i64Const); break; + case EbtUint64: returnValue.setU64Const(u64Const & constant.u64Const); break; +#endif + default: assert(false && "Default missing"); + } + + return returnValue; + } + + TConstUnion operator|(const TConstUnion& constant) const + { + TConstUnion returnValue; + assert(type == constant.type); + switch (type) { + case EbtInt: returnValue.setIConst(iConst | constant.iConst); break; + case EbtUint: returnValue.setUConst(uConst | constant.uConst); break; +#ifndef GLSLANG_WEB + case EbtInt8: returnValue.setI8Const(i8Const | constant.i8Const); break; + case EbtUint8: returnValue.setU8Const(u8Const | constant.u8Const); break; + case EbtInt16: returnValue.setI16Const(i16Const | constant.i16Const); break; + case EbtUint16: returnValue.setU16Const(u16Const | constant.u16Const); break; + case EbtInt64: returnValue.setI64Const(i64Const | constant.i64Const); break; + case EbtUint64: returnValue.setU64Const(u64Const | constant.u64Const); break; +#endif + default: assert(false && "Default missing"); + } + + return returnValue; + } + + TConstUnion operator^(const TConstUnion& constant) const + { + TConstUnion returnValue; + assert(type == constant.type); + switch (type) { + case EbtInt: returnValue.setIConst(iConst ^ constant.iConst); break; + case EbtUint: returnValue.setUConst(uConst ^ constant.uConst); break; +#ifndef GLSLANG_WEB + case EbtInt8: returnValue.setI8Const(i8Const ^ constant.i8Const); break; + case EbtUint8: returnValue.setU8Const(u8Const ^ constant.u8Const); break; + case EbtInt16: returnValue.setI16Const(i16Const ^ constant.i16Const); break; + case EbtUint16: returnValue.setU16Const(u16Const ^ constant.u16Const); break; + case EbtInt64: returnValue.setI64Const(i64Const ^ constant.i64Const); break; + case EbtUint64: returnValue.setU64Const(u64Const ^ constant.u64Const); break; +#endif + default: assert(false && "Default missing"); + } + + return returnValue; + } + + TConstUnion operator~() const + { + TConstUnion returnValue; + switch (type) { + case EbtInt: returnValue.setIConst(~iConst); break; + case EbtUint: returnValue.setUConst(~uConst); break; +#ifndef GLSLANG_WEB + case EbtInt8: returnValue.setI8Const(~i8Const); break; + case EbtUint8: returnValue.setU8Const(~u8Const); break; + case EbtInt16: returnValue.setI16Const(~i16Const); break; + case EbtUint16: returnValue.setU16Const(~u16Const); break; + case EbtInt64: returnValue.setI64Const(~i64Const); break; + case EbtUint64: returnValue.setU64Const(~u64Const); break; +#endif + default: assert(false && "Default missing"); + } + + return returnValue; + } + + TConstUnion operator&&(const TConstUnion& constant) const + { + TConstUnion returnValue; + assert(type == constant.type); + switch (type) { + case EbtBool: returnValue.setBConst(bConst && constant.bConst); break; + default: assert(false && "Default missing"); + } + + return returnValue; + } + + TConstUnion operator||(const TConstUnion& constant) const + { + TConstUnion returnValue; + assert(type == constant.type); + switch (type) { + case EbtBool: returnValue.setBConst(bConst || constant.bConst); break; + default: assert(false && "Default missing"); + } + + return returnValue; + } + + TBasicType getType() const { return type; } + +private: + union { + signed char i8Const; // used for i8vec, scalar int8s + unsigned char u8Const; // used for u8vec, scalar uint8s + signed short i16Const; // used for i16vec, scalar int16s + unsigned short u16Const; // used for u16vec, scalar uint16s + int iConst; // used for ivec, scalar ints + unsigned int uConst; // used for uvec, scalar uints + long long i64Const; // used for i64vec, scalar int64s + unsigned long long u64Const; // used for u64vec, scalar uint64s + bool bConst; // used for bvec, scalar bools + double dConst; // used for vec, dvec, mat, dmat, scalar floats and doubles + const TString* sConst; // string constant + }; + + TBasicType type; +}; + +// Encapsulate having a pointer to an array of TConstUnion, +// which only needs to be allocated if its size is going to be +// bigger than 0. +// +// One convenience is being able to use [] to go inside the array, instead +// of C++ assuming it as an array of pointers to vectors. +// +// General usage is that the size is known up front, and it is +// created once with the proper size. +// +class TConstUnionArray { +public: + POOL_ALLOCATOR_NEW_DELETE(GetThreadPoolAllocator()) + + TConstUnionArray() : unionArray(nullptr) { } + virtual ~TConstUnionArray() { } + + explicit TConstUnionArray(int size) + { + if (size == 0) + unionArray = nullptr; + else + unionArray = new TConstUnionVector(size); + } + TConstUnionArray(const TConstUnionArray& a) = default; + TConstUnionArray(const TConstUnionArray& a, int start, int size) + { + unionArray = new TConstUnionVector(size); + for (int i = 0; i < size; ++i) + (*unionArray)[i] = a[start + i]; + } + + // Use this constructor for a smear operation + TConstUnionArray(int size, const TConstUnion& val) + { + unionArray = new TConstUnionVector(size, val); + } + + int size() const { return unionArray ? (int)unionArray->size() : 0; } + TConstUnion& operator[](size_t index) { return (*unionArray)[index]; } + const TConstUnion& operator[](size_t index) const { return (*unionArray)[index]; } + bool operator==(const TConstUnionArray& rhs) const + { + // this includes the case that both are unallocated + if (unionArray == rhs.unionArray) + return true; + + if (! unionArray || ! rhs.unionArray) + return false; + + return *unionArray == *rhs.unionArray; + } + bool operator!=(const TConstUnionArray& rhs) const { return ! operator==(rhs); } + + double dot(const TConstUnionArray& rhs) + { + assert(rhs.unionArray->size() == unionArray->size()); + double sum = 0.0; + + for (size_t comp = 0; comp < unionArray->size(); ++comp) + sum += (*this)[comp].getDConst() * rhs[comp].getDConst(); + + return sum; + } + + bool empty() const { return unionArray == nullptr; } + +protected: + typedef TVector TConstUnionVector; + TConstUnionVector* unionArray; +}; + +} // end namespace glslang + +#endif // _CONSTANT_UNION_INCLUDED_ diff --git a/dep/glslang/glslang/Include/InfoSink.h b/dep/glslang/glslang/Include/InfoSink.h new file mode 100644 index 000000000..dceb603cf --- /dev/null +++ b/dep/glslang/glslang/Include/InfoSink.h @@ -0,0 +1,144 @@ +// +// Copyright (C) 2002-2005 3Dlabs Inc. Ltd. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// +// Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// +// Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// Neither the name of 3Dlabs Inc. Ltd. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// + +#ifndef _INFOSINK_INCLUDED_ +#define _INFOSINK_INCLUDED_ + +#include "../Include/Common.h" +#include + +namespace glslang { + +// +// TPrefixType is used to centralize how info log messages start. +// See below. +// +enum TPrefixType { + EPrefixNone, + EPrefixWarning, + EPrefixError, + EPrefixInternalError, + EPrefixUnimplemented, + EPrefixNote +}; + +enum TOutputStream { + ENull = 0, + EDebugger = 0x01, + EStdOut = 0x02, + EString = 0x04, +}; +// +// Encapsulate info logs for all objects that have them. +// +// The methods are a general set of tools for getting a variety of +// messages and types inserted into the log. +// +class TInfoSinkBase { +public: + TInfoSinkBase() : outputStream(4) {} + void erase() { sink.erase(); } + TInfoSinkBase& operator<<(const TPersistString& t) { append(t); return *this; } + TInfoSinkBase& operator<<(char c) { append(1, c); return *this; } + TInfoSinkBase& operator<<(const char* s) { append(s); return *this; } + TInfoSinkBase& operator<<(int n) { append(String(n)); return *this; } + TInfoSinkBase& operator<<(unsigned int n) { append(String(n)); return *this; } + TInfoSinkBase& operator<<(float n) { const int size = 40; char buf[size]; + snprintf(buf, size, (fabs(n) > 1e-8 && fabs(n) < 1e8) || n == 0.0f ? "%f" : "%g", n); + append(buf); + return *this; } + TInfoSinkBase& operator+(const TPersistString& t) { append(t); return *this; } + TInfoSinkBase& operator+(const TString& t) { append(t); return *this; } + TInfoSinkBase& operator<<(const TString& t) { append(t); return *this; } + TInfoSinkBase& operator+(const char* s) { append(s); return *this; } + const char* c_str() const { return sink.c_str(); } + void prefix(TPrefixType message) { + switch(message) { + case EPrefixNone: break; + case EPrefixWarning: append("WARNING: "); break; + case EPrefixError: append("ERROR: "); break; + case EPrefixInternalError: append("INTERNAL ERROR: "); break; + case EPrefixUnimplemented: append("UNIMPLEMENTED: "); break; + case EPrefixNote: append("NOTE: "); break; + default: append("UNKNOWN ERROR: "); break; + } + } + void location(const TSourceLoc& loc) { + const int maxSize = 24; + char locText[maxSize]; + snprintf(locText, maxSize, ":%d", loc.line); + append(loc.getStringNameOrNum(false).c_str()); + append(locText); + append(": "); + } + void message(TPrefixType message, const char* s) { + prefix(message); + append(s); + append("\n"); + } + void message(TPrefixType message, const char* s, const TSourceLoc& loc) { + prefix(message); + location(loc); + append(s); + append("\n"); + } + + void setOutputStream(int output = 4) + { + outputStream = output; + } + +protected: + void append(const char* s); + + void append(int count, char c); + void append(const TPersistString& t); + void append(const TString& t); + + void checkMem(size_t growth) { if (sink.capacity() < sink.size() + growth + 2) + sink.reserve(sink.capacity() + sink.capacity() / 2); } + void appendToStream(const char* s); + TPersistString sink; + int outputStream; +}; + +} // end namespace glslang + +class TInfoSink { +public: + glslang::TInfoSinkBase info; + glslang::TInfoSinkBase debug; +}; + +#endif // _INFOSINK_INCLUDED_ diff --git a/dep/glslang/glslang/Include/InitializeGlobals.h b/dep/glslang/glslang/Include/InitializeGlobals.h new file mode 100644 index 000000000..95d0a40e9 --- /dev/null +++ b/dep/glslang/glslang/Include/InitializeGlobals.h @@ -0,0 +1,44 @@ +// +// Copyright (C) 2002-2005 3Dlabs Inc. Ltd. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// +// Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// +// Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// Neither the name of 3Dlabs Inc. Ltd. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// + +#ifndef __INITIALIZE_GLOBALS_INCLUDED_ +#define __INITIALIZE_GLOBALS_INCLUDED_ + +namespace glslang { + +bool InitializePoolIndex(); + +} // end namespace glslang + +#endif // __INITIALIZE_GLOBALS_INCLUDED_ diff --git a/dep/glslang/glslang/Include/PoolAlloc.h b/dep/glslang/glslang/Include/PoolAlloc.h new file mode 100644 index 000000000..b8eccb883 --- /dev/null +++ b/dep/glslang/glslang/Include/PoolAlloc.h @@ -0,0 +1,316 @@ +// +// Copyright (C) 2002-2005 3Dlabs Inc. Ltd. +// Copyright (C) 2012-2013 LunarG, Inc. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// +// Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// +// Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// Neither the name of 3Dlabs Inc. Ltd. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// + +#ifndef _POOLALLOC_INCLUDED_ +#define _POOLALLOC_INCLUDED_ + +#ifdef _DEBUG +# define GUARD_BLOCKS // define to enable guard block sanity checking +#endif + +// +// This header defines an allocator that can be used to efficiently +// allocate a large number of small requests for heap memory, with the +// intention that they are not individually deallocated, but rather +// collectively deallocated at one time. +// +// This simultaneously +// +// * Makes each individual allocation much more efficient; the +// typical allocation is trivial. +// * Completely avoids the cost of doing individual deallocation. +// * Saves the trouble of tracking down and plugging a large class of leaks. +// +// Individual classes can use this allocator by supplying their own +// new and delete methods. +// +// STL containers can use this allocator by using the pool_allocator +// class as the allocator (second) template argument. +// + +#include +#include +#include + +namespace glslang { + +// If we are using guard blocks, we must track each individual +// allocation. If we aren't using guard blocks, these +// never get instantiated, so won't have any impact. +// + +class TAllocation { +public: + TAllocation(size_t size, unsigned char* mem, TAllocation* prev = 0) : + size(size), mem(mem), prevAlloc(prev) { + // Allocations are bracketed: + // [allocationHeader][initialGuardBlock][userData][finalGuardBlock] + // This would be cleaner with if (guardBlockSize)..., but that + // makes the compiler print warnings about 0 length memsets, + // even with the if() protecting them. +# ifdef GUARD_BLOCKS + memset(preGuard(), guardBlockBeginVal, guardBlockSize); + memset(data(), userDataFill, size); + memset(postGuard(), guardBlockEndVal, guardBlockSize); +# endif + } + + void check() const { + checkGuardBlock(preGuard(), guardBlockBeginVal, "before"); + checkGuardBlock(postGuard(), guardBlockEndVal, "after"); + } + + void checkAllocList() const; + + // Return total size needed to accommodate user buffer of 'size', + // plus our tracking data. + inline static size_t allocationSize(size_t size) { + return size + 2 * guardBlockSize + headerSize(); + } + + // Offset from surrounding buffer to get to user data buffer. + inline static unsigned char* offsetAllocation(unsigned char* m) { + return m + guardBlockSize + headerSize(); + } + +private: + void checkGuardBlock(unsigned char* blockMem, unsigned char val, const char* locText) const; + + // Find offsets to pre and post guard blocks, and user data buffer + unsigned char* preGuard() const { return mem + headerSize(); } + unsigned char* data() const { return preGuard() + guardBlockSize; } + unsigned char* postGuard() const { return data() + size; } + + size_t size; // size of the user data area + unsigned char* mem; // beginning of our allocation (pts to header) + TAllocation* prevAlloc; // prior allocation in the chain + + const static unsigned char guardBlockBeginVal; + const static unsigned char guardBlockEndVal; + const static unsigned char userDataFill; + + const static size_t guardBlockSize; +# ifdef GUARD_BLOCKS + inline static size_t headerSize() { return sizeof(TAllocation); } +# else + inline static size_t headerSize() { return 0; } +# endif +}; + +// +// There are several stacks. One is to track the pushing and popping +// of the user, and not yet implemented. The others are simply a +// repositories of free pages or used pages. +// +// Page stacks are linked together with a simple header at the beginning +// of each allocation obtained from the underlying OS. Multi-page allocations +// are returned to the OS. Individual page allocations are kept for future +// re-use. +// +// The "page size" used is not, nor must it match, the underlying OS +// page size. But, having it be about that size or equal to a set of +// pages is likely most optimal. +// +class TPoolAllocator { +public: + TPoolAllocator(int growthIncrement = 8*1024, int allocationAlignment = 16); + + // + // Don't call the destructor just to free up the memory, call pop() + // + ~TPoolAllocator(); + + // + // Call push() to establish a new place to pop memory too. Does not + // have to be called to get things started. + // + void push(); + + // + // Call pop() to free all memory allocated since the last call to push(), + // or if no last call to push, frees all memory since first allocation. + // + void pop(); + + // + // Call popAll() to free all memory allocated. + // + void popAll(); + + // + // Call allocate() to actually acquire memory. Returns 0 if no memory + // available, otherwise a properly aligned pointer to 'numBytes' of memory. + // + void* allocate(size_t numBytes); + + // + // There is no deallocate. The point of this class is that + // deallocation can be skipped by the user of it, as the model + // of use is to simultaneously deallocate everything at once + // by calling pop(), and to not have to solve memory leak problems. + // + +protected: + friend struct tHeader; + + struct tHeader { + tHeader(tHeader* nextPage, size_t pageCount) : +#ifdef GUARD_BLOCKS + lastAllocation(0), +#endif + nextPage(nextPage), pageCount(pageCount) { } + + ~tHeader() { +#ifdef GUARD_BLOCKS + if (lastAllocation) + lastAllocation->checkAllocList(); +#endif + } + +#ifdef GUARD_BLOCKS + TAllocation* lastAllocation; +#endif + tHeader* nextPage; + size_t pageCount; + }; + + struct tAllocState { + size_t offset; + tHeader* page; + }; + typedef std::vector tAllocStack; + + // Track allocations if and only if we're using guard blocks +#ifndef GUARD_BLOCKS + void* initializeAllocation(tHeader*, unsigned char* memory, size_t) { +#else + void* initializeAllocation(tHeader* block, unsigned char* memory, size_t numBytes) { + new(memory) TAllocation(numBytes, memory, block->lastAllocation); + block->lastAllocation = reinterpret_cast(memory); +#endif + + // This is optimized entirely away if GUARD_BLOCKS is not defined. + return TAllocation::offsetAllocation(memory); + } + + size_t pageSize; // granularity of allocation from the OS + size_t alignment; // all returned allocations will be aligned at + // this granularity, which will be a power of 2 + size_t alignmentMask; + size_t headerSkip; // amount of memory to skip to make room for the + // header (basically, size of header, rounded + // up to make it aligned + size_t currentPageOffset; // next offset in top of inUseList to allocate from + tHeader* freeList; // list of popped memory + tHeader* inUseList; // list of all memory currently being used + tAllocStack stack; // stack of where to allocate from, to partition pool + + int numCalls; // just an interesting statistic + size_t totalBytes; // just an interesting statistic +private: + TPoolAllocator& operator=(const TPoolAllocator&); // don't allow assignment operator + TPoolAllocator(const TPoolAllocator&); // don't allow default copy constructor +}; + +// +// There could potentially be many pools with pops happening at +// different times. But a simple use is to have a global pop +// with everyone using the same global allocator. +// +extern TPoolAllocator& GetThreadPoolAllocator(); +void SetThreadPoolAllocator(TPoolAllocator* poolAllocator); + +// +// This STL compatible allocator is intended to be used as the allocator +// parameter to templatized STL containers, like vector and map. +// +// It will use the pools for allocation, and not +// do any deallocation, but will still do destruction. +// +template +class pool_allocator { +public: + typedef size_t size_type; + typedef ptrdiff_t difference_type; + typedef T *pointer; + typedef const T *const_pointer; + typedef T& reference; + typedef const T& const_reference; + typedef T value_type; + template + struct rebind { + typedef pool_allocator other; + }; + pointer address(reference x) const { return &x; } + const_pointer address(const_reference x) const { return &x; } + + pool_allocator() : allocator(GetThreadPoolAllocator()) { } + pool_allocator(TPoolAllocator& a) : allocator(a) { } + pool_allocator(const pool_allocator& p) : allocator(p.allocator) { } + + template + pool_allocator(const pool_allocator& p) : allocator(p.getAllocator()) { } + + pointer allocate(size_type n) { + return reinterpret_cast(getAllocator().allocate(n * sizeof(T))); } + pointer allocate(size_type n, const void*) { + return reinterpret_cast(getAllocator().allocate(n * sizeof(T))); } + + void deallocate(void*, size_type) { } + void deallocate(pointer, size_type) { } + + pointer _Charalloc(size_t n) { + return reinterpret_cast(getAllocator().allocate(n)); } + + void construct(pointer p, const T& val) { new ((void *)p) T(val); } + void destroy(pointer p) { p->T::~T(); } + + bool operator==(const pool_allocator& rhs) const { return &getAllocator() == &rhs.getAllocator(); } + bool operator!=(const pool_allocator& rhs) const { return &getAllocator() != &rhs.getAllocator(); } + + size_type max_size() const { return static_cast(-1) / sizeof(T); } + size_type max_size(int size) const { return static_cast(-1) / size; } + + TPoolAllocator& getAllocator() const { return allocator; } + +protected: + pool_allocator& operator=(const pool_allocator&) { return *this; } + TPoolAllocator& allocator; +}; + +} // end namespace glslang + +#endif // _POOLALLOC_INCLUDED_ diff --git a/dep/glslang/glslang/Include/ResourceLimits.h b/dep/glslang/glslang/Include/ResourceLimits.h new file mode 100644 index 000000000..b670cf163 --- /dev/null +++ b/dep/glslang/glslang/Include/ResourceLimits.h @@ -0,0 +1,150 @@ +// +// Copyright (C) 2002-2005 3Dlabs Inc. Ltd. +// Copyright (C) 2013 LunarG, Inc. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// +// Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// +// Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// Neither the name of 3Dlabs Inc. Ltd. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// + +#ifndef _RESOURCE_LIMITS_INCLUDED_ +#define _RESOURCE_LIMITS_INCLUDED_ + +struct TLimits { + bool nonInductiveForLoops; + bool whileLoops; + bool doWhileLoops; + bool generalUniformIndexing; + bool generalAttributeMatrixVectorIndexing; + bool generalVaryingIndexing; + bool generalSamplerIndexing; + bool generalVariableIndexing; + bool generalConstantMatrixVectorIndexing; +}; + +struct TBuiltInResource { + int maxLights; + int maxClipPlanes; + int maxTextureUnits; + int maxTextureCoords; + int maxVertexAttribs; + int maxVertexUniformComponents; + int maxVaryingFloats; + int maxVertexTextureImageUnits; + int maxCombinedTextureImageUnits; + int maxTextureImageUnits; + int maxFragmentUniformComponents; + int maxDrawBuffers; + int maxVertexUniformVectors; + int maxVaryingVectors; + int maxFragmentUniformVectors; + int maxVertexOutputVectors; + int maxFragmentInputVectors; + int minProgramTexelOffset; + int maxProgramTexelOffset; + int maxClipDistances; + int maxComputeWorkGroupCountX; + int maxComputeWorkGroupCountY; + int maxComputeWorkGroupCountZ; + int maxComputeWorkGroupSizeX; + int maxComputeWorkGroupSizeY; + int maxComputeWorkGroupSizeZ; + int maxComputeUniformComponents; + int maxComputeTextureImageUnits; + int maxComputeImageUniforms; + int maxComputeAtomicCounters; + int maxComputeAtomicCounterBuffers; + int maxVaryingComponents; + int maxVertexOutputComponents; + int maxGeometryInputComponents; + int maxGeometryOutputComponents; + int maxFragmentInputComponents; + int maxImageUnits; + int maxCombinedImageUnitsAndFragmentOutputs; + int maxCombinedShaderOutputResources; + int maxImageSamples; + int maxVertexImageUniforms; + int maxTessControlImageUniforms; + int maxTessEvaluationImageUniforms; + int maxGeometryImageUniforms; + int maxFragmentImageUniforms; + int maxCombinedImageUniforms; + int maxGeometryTextureImageUnits; + int maxGeometryOutputVertices; + int maxGeometryTotalOutputComponents; + int maxGeometryUniformComponents; + int maxGeometryVaryingComponents; + int maxTessControlInputComponents; + int maxTessControlOutputComponents; + int maxTessControlTextureImageUnits; + int maxTessControlUniformComponents; + int maxTessControlTotalOutputComponents; + int maxTessEvaluationInputComponents; + int maxTessEvaluationOutputComponents; + int maxTessEvaluationTextureImageUnits; + int maxTessEvaluationUniformComponents; + int maxTessPatchComponents; + int maxPatchVertices; + int maxTessGenLevel; + int maxViewports; + int maxVertexAtomicCounters; + int maxTessControlAtomicCounters; + int maxTessEvaluationAtomicCounters; + int maxGeometryAtomicCounters; + int maxFragmentAtomicCounters; + int maxCombinedAtomicCounters; + int maxAtomicCounterBindings; + int maxVertexAtomicCounterBuffers; + int maxTessControlAtomicCounterBuffers; + int maxTessEvaluationAtomicCounterBuffers; + int maxGeometryAtomicCounterBuffers; + int maxFragmentAtomicCounterBuffers; + int maxCombinedAtomicCounterBuffers; + int maxAtomicCounterBufferSize; + int maxTransformFeedbackBuffers; + int maxTransformFeedbackInterleavedComponents; + int maxCullDistances; + int maxCombinedClipAndCullDistances; + int maxSamples; + int maxMeshOutputVerticesNV; + int maxMeshOutputPrimitivesNV; + int maxMeshWorkGroupSizeX_NV; + int maxMeshWorkGroupSizeY_NV; + int maxMeshWorkGroupSizeZ_NV; + int maxTaskWorkGroupSizeX_NV; + int maxTaskWorkGroupSizeY_NV; + int maxTaskWorkGroupSizeZ_NV; + int maxMeshViewCountNV; + int maxDualSourceDrawBuffersEXT; + + TLimits limits; +}; + +#endif // _RESOURCE_LIMITS_INCLUDED_ diff --git a/dep/glslang/glslang/Include/ShHandle.h b/dep/glslang/glslang/Include/ShHandle.h new file mode 100644 index 000000000..df07bd8ed --- /dev/null +++ b/dep/glslang/glslang/Include/ShHandle.h @@ -0,0 +1,176 @@ +// +// Copyright (C) 2002-2005 3Dlabs Inc. Ltd. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// +// Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// +// Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// Neither the name of 3Dlabs Inc. Ltd. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// + +#ifndef _SHHANDLE_INCLUDED_ +#define _SHHANDLE_INCLUDED_ + +// +// Machine independent part of the compiler private objects +// sent as ShHandle to the driver. +// +// This should not be included by driver code. +// + +#define SH_EXPORTING +#include "../Public/ShaderLang.h" +#include "../MachineIndependent/Versions.h" +#include "InfoSink.h" + +class TCompiler; +class TLinker; +class TUniformMap; + +// +// The base class used to back handles returned to the driver. +// +class TShHandleBase { +public: + TShHandleBase() { pool = new glslang::TPoolAllocator; } + virtual ~TShHandleBase() { delete pool; } + virtual TCompiler* getAsCompiler() { return 0; } + virtual TLinker* getAsLinker() { return 0; } + virtual TUniformMap* getAsUniformMap() { return 0; } + virtual glslang::TPoolAllocator* getPool() const { return pool; } +private: + glslang::TPoolAllocator* pool; +}; + +// +// The base class for the machine dependent linker to derive from +// for managing where uniforms live. +// +class TUniformMap : public TShHandleBase { +public: + TUniformMap() { } + virtual ~TUniformMap() { } + virtual TUniformMap* getAsUniformMap() { return this; } + virtual int getLocation(const char* name) = 0; + virtual TInfoSink& getInfoSink() { return infoSink; } + TInfoSink infoSink; +}; + +class TIntermNode; + +// +// The base class for the machine dependent compiler to derive from +// for managing object code from the compile. +// +class TCompiler : public TShHandleBase { +public: + TCompiler(EShLanguage l, TInfoSink& sink) : infoSink(sink) , language(l), haveValidObjectCode(false) { } + virtual ~TCompiler() { } + EShLanguage getLanguage() { return language; } + virtual TInfoSink& getInfoSink() { return infoSink; } + + virtual bool compile(TIntermNode* root, int version = 0, EProfile profile = ENoProfile) = 0; + + virtual TCompiler* getAsCompiler() { return this; } + virtual bool linkable() { return haveValidObjectCode; } + + TInfoSink& infoSink; +protected: + TCompiler& operator=(TCompiler&); + + EShLanguage language; + bool haveValidObjectCode; +}; + +// +// Link operations are based on a list of compile results... +// +typedef glslang::TVector TCompilerList; +typedef glslang::TVector THandleList; + +// +// The base class for the machine dependent linker to derive from +// to manage the resulting executable. +// + +class TLinker : public TShHandleBase { +public: + TLinker(EShExecutable e, TInfoSink& iSink) : + infoSink(iSink), + executable(e), + haveReturnableObjectCode(false), + appAttributeBindings(0), + fixedAttributeBindings(0), + excludedAttributes(0), + excludedCount(0), + uniformBindings(0) { } + virtual TLinker* getAsLinker() { return this; } + virtual ~TLinker() { } + virtual bool link(TCompilerList&, TUniformMap*) = 0; + virtual bool link(THandleList&) { return false; } + virtual void setAppAttributeBindings(const ShBindingTable* t) { appAttributeBindings = t; } + virtual void setFixedAttributeBindings(const ShBindingTable* t) { fixedAttributeBindings = t; } + virtual void getAttributeBindings(ShBindingTable const **t) const = 0; + virtual void setExcludedAttributes(const int* attributes, int count) { excludedAttributes = attributes; excludedCount = count; } + virtual ShBindingTable* getUniformBindings() const { return uniformBindings; } + virtual const void* getObjectCode() const { return 0; } // a real compiler would be returning object code here + virtual TInfoSink& getInfoSink() { return infoSink; } + TInfoSink& infoSink; +protected: + TLinker& operator=(TLinker&); + EShExecutable executable; + bool haveReturnableObjectCode; // true when objectCode is acceptable to send to driver + + const ShBindingTable* appAttributeBindings; + const ShBindingTable* fixedAttributeBindings; + const int* excludedAttributes; + int excludedCount; + ShBindingTable* uniformBindings; // created by the linker +}; + +// +// This is the interface between the machine independent code +// and the machine dependent code. +// +// The machine dependent code should derive from the classes +// above. Then Construct*() and Delete*() will create and +// destroy the machine dependent objects, which contain the +// above machine independent information. +// +TCompiler* ConstructCompiler(EShLanguage, int); + +TShHandleBase* ConstructLinker(EShExecutable, int); +TShHandleBase* ConstructBindings(); +void DeleteLinker(TShHandleBase*); +void DeleteBindingList(TShHandleBase* bindingList); + +TUniformMap* ConstructUniformMap(); +void DeleteCompiler(TCompiler*); + +void DeleteUniformMap(TUniformMap*); + +#endif // _SHHANDLE_INCLUDED_ diff --git a/dep/glslang/glslang/Include/Types.h b/dep/glslang/glslang/Include/Types.h new file mode 100644 index 000000000..235ea3f1d --- /dev/null +++ b/dep/glslang/glslang/Include/Types.h @@ -0,0 +1,2487 @@ +// +// Copyright (C) 2002-2005 3Dlabs Inc. Ltd. +// Copyright (C) 2012-2016 LunarG, Inc. +// Copyright (C) 2015-2016 Google, Inc. +// Copyright (C) 2017 ARM Limited. +// Modifications Copyright (C) 2020 Advanced Micro Devices, Inc. All rights reserved. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// +// Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// +// Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// Neither the name of 3Dlabs Inc. Ltd. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// + +#ifndef _TYPES_INCLUDED +#define _TYPES_INCLUDED + +#include "../Include/Common.h" +#include "../Include/BaseTypes.h" +#include "../Public/ShaderLang.h" +#include "arrays.h" + +#include + +namespace glslang { + +const int GlslangMaxTypeLength = 200; // TODO: need to print block/struct one member per line, so this can stay bounded + +const char* const AnonymousPrefix = "anon@"; // for something like a block whose members can be directly accessed +inline bool IsAnonymous(const TString& name) +{ + return name.compare(0, 5, AnonymousPrefix) == 0; +} + +// +// Details within a sampler type +// +enum TSamplerDim { + EsdNone, + Esd1D, + Esd2D, + Esd3D, + EsdCube, + EsdRect, + EsdBuffer, + EsdSubpass, // goes only with non-sampled image (image is true) + EsdNumDims +}; + +struct TSampler { // misnomer now; includes images, textures without sampler, and textures with sampler + TBasicType type : 8; // type returned by sampler + TSamplerDim dim : 8; + bool arrayed : 1; + bool shadow : 1; + bool ms : 1; + bool image : 1; // image, combined should be false + bool combined : 1; // true means texture is combined with a sampler, false means texture with no sampler + bool sampler : 1; // true means a pure sampler, other fields should be clear() + +#ifdef GLSLANG_WEB + bool is1D() const { return false; } + bool isBuffer() const { return false; } + bool isRect() const { return false; } + bool isSubpass() const { return false; } + bool isCombined() const { return true; } + bool isImage() const { return false; } + bool isImageClass() const { return false; } + bool isMultiSample() const { return false; } + bool isExternal() const { return false; } + void setExternal(bool e) { } + bool isYuv() const { return false; } +#else + unsigned int vectorSize : 3; // vector return type size. + // Some languages support structures as sample results. Storing the whole structure in the + // TSampler is too large, so there is an index to a separate table. + static const unsigned structReturnIndexBits = 4; // number of index bits to use. + static const unsigned structReturnSlots = (1< TTypeList; + +typedef TVector TIdentifierList; + +// +// Following are a series of helper enums for managing layouts and qualifiers, +// used for TPublicType, TType, others. +// + +enum TLayoutPacking { + ElpNone, + ElpShared, // default, but different than saying nothing + ElpStd140, + ElpStd430, + ElpPacked, + ElpScalar, + ElpCount // If expanding, see bitfield width below +}; + +enum TLayoutMatrix { + ElmNone, + ElmRowMajor, + ElmColumnMajor, // default, but different than saying nothing + ElmCount // If expanding, see bitfield width below +}; + +// Union of geometry shader and tessellation shader geometry types. +// They don't go into TType, but rather have current state per shader or +// active parser type (TPublicType). +enum TLayoutGeometry { + ElgNone, + ElgPoints, + ElgLines, + ElgLinesAdjacency, + ElgLineStrip, + ElgTriangles, + ElgTrianglesAdjacency, + ElgTriangleStrip, + ElgQuads, + ElgIsolines, +}; + +enum TVertexSpacing { + EvsNone, + EvsEqual, + EvsFractionalEven, + EvsFractionalOdd +}; + +enum TVertexOrder { + EvoNone, + EvoCw, + EvoCcw +}; + +// Note: order matters, as type of format is done by comparison. +enum TLayoutFormat { + ElfNone, + + // Float image + ElfRgba32f, + ElfRgba16f, + ElfR32f, + ElfRgba8, + ElfRgba8Snorm, + + ElfEsFloatGuard, // to help with comparisons + + ElfRg32f, + ElfRg16f, + ElfR11fG11fB10f, + ElfR16f, + ElfRgba16, + ElfRgb10A2, + ElfRg16, + ElfRg8, + ElfR16, + ElfR8, + ElfRgba16Snorm, + ElfRg16Snorm, + ElfRg8Snorm, + ElfR16Snorm, + ElfR8Snorm, + + ElfFloatGuard, // to help with comparisons + + // Int image + ElfRgba32i, + ElfRgba16i, + ElfRgba8i, + ElfR32i, + + ElfEsIntGuard, // to help with comparisons + + ElfRg32i, + ElfRg16i, + ElfRg8i, + ElfR16i, + ElfR8i, + + ElfIntGuard, // to help with comparisons + + // Uint image + ElfRgba32ui, + ElfRgba16ui, + ElfRgba8ui, + ElfR32ui, + + ElfEsUintGuard, // to help with comparisons + + ElfRg32ui, + ElfRg16ui, + ElfRgb10a2ui, + ElfRg8ui, + ElfR16ui, + ElfR8ui, + + ElfCount +}; + +enum TLayoutDepth { + EldNone, + EldAny, + EldGreater, + EldLess, + EldUnchanged, + + EldCount +}; + +enum TBlendEquationShift { + // No 'EBlendNone': + // These are used as bit-shift amounts. A mask of such shifts will have type 'int', + // and in that space, 0 means no bits set, or none. In this enum, 0 means (1 << 0), a bit is set. + EBlendMultiply, + EBlendScreen, + EBlendOverlay, + EBlendDarken, + EBlendLighten, + EBlendColordodge, + EBlendColorburn, + EBlendHardlight, + EBlendSoftlight, + EBlendDifference, + EBlendExclusion, + EBlendHslHue, + EBlendHslSaturation, + EBlendHslColor, + EBlendHslLuminosity, + EBlendAllEquations, + + EBlendCount +}; + +enum TInterlockOrdering { + EioNone, + EioPixelInterlockOrdered, + EioPixelInterlockUnordered, + EioSampleInterlockOrdered, + EioSampleInterlockUnordered, + EioShadingRateInterlockOrdered, + EioShadingRateInterlockUnordered, + + EioCount, +}; + +enum TShaderInterface +{ + // Includes both uniform blocks and buffer blocks + EsiUniform = 0, + EsiInput, + EsiOutput, + EsiNone, + + EsiCount +}; + + +class TQualifier { +public: + static const int layoutNotSet = -1; + + void clear() + { + precision = EpqNone; + invariant = false; + makeTemporary(); + declaredBuiltIn = EbvNone; +#ifndef GLSLANG_WEB + noContraction = false; +#endif + } + + // drop qualifiers that don't belong in a temporary variable + void makeTemporary() + { + semanticName = nullptr; + storage = EvqTemporary; + builtIn = EbvNone; + clearInterstage(); + clearMemory(); + specConstant = false; + nonUniform = false; + clearLayout(); + } + + void clearInterstage() + { + clearInterpolation(); +#ifndef GLSLANG_WEB + patch = false; + sample = false; +#endif + } + + void clearInterpolation() + { + centroid = false; + smooth = false; + flat = false; +#ifndef GLSLANG_WEB + nopersp = false; + explicitInterp = false; + pervertexNV = false; + perPrimitiveNV = false; + perViewNV = false; + perTaskNV = false; +#endif + } + + void clearMemory() + { +#ifndef GLSLANG_WEB + coherent = false; + devicecoherent = false; + queuefamilycoherent = false; + workgroupcoherent = false; + subgroupcoherent = false; + shadercallcoherent = false; + nonprivate = false; + volatil = false; + restrict = false; + readonly = false; + writeonly = false; +#endif + } + + const char* semanticName; + TStorageQualifier storage : 6; + TBuiltInVariable builtIn : 9; + TBuiltInVariable declaredBuiltIn : 9; + static_assert(EbvLast < 256, "need to increase size of TBuiltInVariable bitfields!"); + TPrecisionQualifier precision : 3; + bool invariant : 1; // require canonical treatment for cross-shader invariance + bool centroid : 1; + bool smooth : 1; + bool flat : 1; + // having a constant_id is not sufficient: expressions have no id, but are still specConstant + bool specConstant : 1; + bool nonUniform : 1; + bool explicitOffset : 1; + +#ifdef GLSLANG_WEB + bool isWriteOnly() const { return false; } + bool isReadOnly() const { return false; } + bool isRestrict() const { return false; } + bool isCoherent() const { return false; } + bool isVolatile() const { return false; } + bool isSample() const { return false; } + bool isMemory() const { return false; } + bool isMemoryQualifierImageAndSSBOOnly() const { return false; } + bool bufferReferenceNeedsVulkanMemoryModel() const { return false; } + bool isInterpolation() const { return flat || smooth; } + bool isExplicitInterpolation() const { return false; } + bool isAuxiliary() const { return centroid; } + bool isPatch() const { return false; } + bool isNoContraction() const { return false; } + void setNoContraction() { } + bool isPervertexNV() const { return false; } +#else + bool noContraction: 1; // prevent contraction and reassociation, e.g., for 'precise' keyword, and expressions it affects + bool nopersp : 1; + bool explicitInterp : 1; + bool pervertexNV : 1; + bool perPrimitiveNV : 1; + bool perViewNV : 1; + bool perTaskNV : 1; + bool patch : 1; + bool sample : 1; + bool restrict : 1; + bool readonly : 1; + bool writeonly : 1; + bool coherent : 1; + bool volatil : 1; + bool devicecoherent : 1; + bool queuefamilycoherent : 1; + bool workgroupcoherent : 1; + bool subgroupcoherent : 1; + bool shadercallcoherent : 1; + bool nonprivate : 1; + bool isWriteOnly() const { return writeonly; } + bool isReadOnly() const { return readonly; } + bool isRestrict() const { return restrict; } + bool isCoherent() const { return coherent; } + bool isVolatile() const { return volatil; } + bool isSample() const { return sample; } + bool isMemory() const + { + return shadercallcoherent || subgroupcoherent || workgroupcoherent || queuefamilycoherent || devicecoherent || coherent || volatil || restrict || readonly || writeonly || nonprivate; + } + bool isMemoryQualifierImageAndSSBOOnly() const + { + return shadercallcoherent || subgroupcoherent || workgroupcoherent || queuefamilycoherent || devicecoherent || coherent || volatil || restrict || readonly || writeonly; + } + bool bufferReferenceNeedsVulkanMemoryModel() const + { + // include qualifiers that map to load/store availability/visibility/nonprivate memory access operands + return subgroupcoherent || workgroupcoherent || queuefamilycoherent || devicecoherent || coherent || nonprivate; + } + bool isInterpolation() const + { + return flat || smooth || nopersp || explicitInterp; + } + bool isExplicitInterpolation() const + { + return explicitInterp; + } + bool isAuxiliary() const + { + return centroid || patch || sample || pervertexNV; + } + bool isPatch() const { return patch; } + bool isNoContraction() const { return noContraction; } + void setNoContraction() { noContraction = true; } + bool isPervertexNV() const { return pervertexNV; } +#endif + + bool isPipeInput() const + { + switch (storage) { + case EvqVaryingIn: + case EvqFragCoord: + case EvqPointCoord: + case EvqFace: + case EvqVertexId: + case EvqInstanceId: + return true; + default: + return false; + } + } + + bool isPipeOutput() const + { + switch (storage) { + case EvqPosition: + case EvqPointSize: + case EvqClipVertex: + case EvqVaryingOut: + case EvqFragColor: + case EvqFragDepth: + return true; + default: + return false; + } + } + + bool isParamInput() const + { + switch (storage) { + case EvqIn: + case EvqInOut: + case EvqConstReadOnly: + return true; + default: + return false; + } + } + + bool isParamOutput() const + { + switch (storage) { + case EvqOut: + case EvqInOut: + return true; + default: + return false; + } + } + + bool isUniformOrBuffer() const + { + switch (storage) { + case EvqUniform: + case EvqBuffer: + return true; + default: + return false; + } + } + + bool isIo() const + { + switch (storage) { + case EvqUniform: + case EvqBuffer: + case EvqVaryingIn: + case EvqFragCoord: + case EvqPointCoord: + case EvqFace: + case EvqVertexId: + case EvqInstanceId: + case EvqPosition: + case EvqPointSize: + case EvqClipVertex: + case EvqVaryingOut: + case EvqFragColor: + case EvqFragDepth: + return true; + default: + return false; + } + } + + // non-built-in symbols that might link between compilation units + bool isLinkable() const + { + switch (storage) { + case EvqGlobal: + case EvqVaryingIn: + case EvqVaryingOut: + case EvqUniform: + case EvqBuffer: + case EvqShared: + return true; + default: + return false; + } + } + +#ifdef GLSLANG_WEB + bool isPerView() const { return false; } + bool isTaskMemory() const { return false; } + bool isArrayedIo(EShLanguage language) const { return false; } +#else + bool isPerPrimitive() const { return perPrimitiveNV; } + bool isPerView() const { return perViewNV; } + bool isTaskMemory() const { return perTaskNV; } + + // True if this type of IO is supposed to be arrayed with extra level for per-vertex data + bool isArrayedIo(EShLanguage language) const + { + switch (language) { + case EShLangGeometry: + return isPipeInput(); + case EShLangTessControl: + return ! patch && (isPipeInput() || isPipeOutput()); + case EShLangTessEvaluation: + return ! patch && isPipeInput(); + case EShLangFragment: + return pervertexNV && isPipeInput(); + case EShLangMeshNV: + return ! perTaskNV && isPipeOutput(); + + default: + return false; + } + } +#endif + + // Implementing an embedded layout-qualifier class here, since C++ can't have a real class bitfield + void clearLayout() // all layout + { + clearUniformLayout(); + +#ifndef GLSLANG_WEB + layoutPushConstant = false; + layoutBufferReference = false; + layoutPassthrough = false; + layoutViewportRelative = false; + // -2048 as the default value indicating layoutSecondaryViewportRelative is not set + layoutSecondaryViewportRelativeOffset = -2048; + layoutShaderRecord = false; + layoutBufferReferenceAlign = layoutBufferReferenceAlignEnd; + layoutFormat = ElfNone; +#endif + + clearInterstageLayout(); + + layoutSpecConstantId = layoutSpecConstantIdEnd; + } + void clearInterstageLayout() + { + layoutLocation = layoutLocationEnd; + layoutComponent = layoutComponentEnd; +#ifndef GLSLANG_WEB + layoutIndex = layoutIndexEnd; + clearStreamLayout(); + clearXfbLayout(); +#endif + } + +#ifndef GLSLANG_WEB + void clearStreamLayout() + { + layoutStream = layoutStreamEnd; + } + void clearXfbLayout() + { + layoutXfbBuffer = layoutXfbBufferEnd; + layoutXfbStride = layoutXfbStrideEnd; + layoutXfbOffset = layoutXfbOffsetEnd; + } +#endif + + bool hasNonXfbLayout() const + { + return hasUniformLayout() || + hasAnyLocation() || + hasStream() || + hasFormat() || + isShaderRecord() || + isPushConstant() || + hasBufferReference(); + } + bool hasLayout() const + { + return hasNonXfbLayout() || + hasXfb(); + } + TLayoutMatrix layoutMatrix : 3; + TLayoutPacking layoutPacking : 4; + int layoutOffset; + int layoutAlign; + + unsigned int layoutLocation : 12; + static const unsigned int layoutLocationEnd = 0xFFF; + + unsigned int layoutComponent : 3; + static const unsigned int layoutComponentEnd = 4; + + unsigned int layoutSet : 7; + static const unsigned int layoutSetEnd = 0x3F; + + unsigned int layoutBinding : 16; + static const unsigned int layoutBindingEnd = 0xFFFF; + + unsigned int layoutIndex : 8; + static const unsigned int layoutIndexEnd = 0xFF; + + unsigned int layoutStream : 8; + static const unsigned int layoutStreamEnd = 0xFF; + + unsigned int layoutXfbBuffer : 4; + static const unsigned int layoutXfbBufferEnd = 0xF; + + unsigned int layoutXfbStride : 14; + static const unsigned int layoutXfbStrideEnd = 0x3FFF; + + unsigned int layoutXfbOffset : 13; + static const unsigned int layoutXfbOffsetEnd = 0x1FFF; + + unsigned int layoutAttachment : 8; // for input_attachment_index + static const unsigned int layoutAttachmentEnd = 0XFF; + + unsigned int layoutSpecConstantId : 11; + static const unsigned int layoutSpecConstantIdEnd = 0x7FF; + +#ifndef GLSLANG_WEB + // stored as log2 of the actual alignment value + unsigned int layoutBufferReferenceAlign : 6; + static const unsigned int layoutBufferReferenceAlignEnd = 0x3F; + + TLayoutFormat layoutFormat : 8; + + bool layoutPushConstant; + bool layoutBufferReference; + bool layoutPassthrough; + bool layoutViewportRelative; + int layoutSecondaryViewportRelativeOffset; + bool layoutShaderRecord; +#endif + + bool hasUniformLayout() const + { + return hasMatrix() || + hasPacking() || + hasOffset() || + hasBinding() || + hasSet() || + hasAlign(); + } + void clearUniformLayout() // only uniform specific + { + layoutMatrix = ElmNone; + layoutPacking = ElpNone; + layoutOffset = layoutNotSet; + layoutAlign = layoutNotSet; + + layoutSet = layoutSetEnd; + layoutBinding = layoutBindingEnd; +#ifndef GLSLANG_WEB + layoutAttachment = layoutAttachmentEnd; +#endif + } + + bool hasMatrix() const + { + return layoutMatrix != ElmNone; + } + bool hasPacking() const + { + return layoutPacking != ElpNone; + } + bool hasAlign() const + { + return layoutAlign != layoutNotSet; + } + bool hasAnyLocation() const + { + return hasLocation() || + hasComponent() || + hasIndex(); + } + bool hasLocation() const + { + return layoutLocation != layoutLocationEnd; + } + bool hasSet() const + { + return layoutSet != layoutSetEnd; + } + bool hasBinding() const + { + return layoutBinding != layoutBindingEnd; + } +#ifdef GLSLANG_WEB + bool hasOffset() const { return false; } + bool isNonPerspective() const { return false; } + bool hasIndex() const { return false; } + unsigned getIndex() const { return 0; } + bool hasComponent() const { return false; } + bool hasStream() const { return false; } + bool hasFormat() const { return false; } + bool hasXfb() const { return false; } + bool hasXfbBuffer() const { return false; } + bool hasXfbStride() const { return false; } + bool hasXfbOffset() const { return false; } + bool hasAttachment() const { return false; } + TLayoutFormat getFormat() const { return ElfNone; } + bool isPushConstant() const { return false; } + bool isShaderRecord() const { return false; } + bool hasBufferReference() const { return false; } + bool hasBufferReferenceAlign() const { return false; } + bool isNonUniform() const { return false; } +#else + bool hasOffset() const + { + return layoutOffset != layoutNotSet; + } + bool isNonPerspective() const { return nopersp; } + bool hasIndex() const + { + return layoutIndex != layoutIndexEnd; + } + unsigned getIndex() const { return layoutIndex; } + bool hasComponent() const + { + return layoutComponent != layoutComponentEnd; + } + bool hasStream() const + { + return layoutStream != layoutStreamEnd; + } + bool hasFormat() const + { + return layoutFormat != ElfNone; + } + bool hasXfb() const + { + return hasXfbBuffer() || + hasXfbStride() || + hasXfbOffset(); + } + bool hasXfbBuffer() const + { + return layoutXfbBuffer != layoutXfbBufferEnd; + } + bool hasXfbStride() const + { + return layoutXfbStride != layoutXfbStrideEnd; + } + bool hasXfbOffset() const + { + return layoutXfbOffset != layoutXfbOffsetEnd; + } + bool hasAttachment() const + { + return layoutAttachment != layoutAttachmentEnd; + } + TLayoutFormat getFormat() const { return layoutFormat; } + bool isPushConstant() const { return layoutPushConstant; } + bool isShaderRecord() const { return layoutShaderRecord; } + bool hasBufferReference() const { return layoutBufferReference; } + bool hasBufferReferenceAlign() const + { + return layoutBufferReferenceAlign != layoutBufferReferenceAlignEnd; + } + bool isNonUniform() const + { + return nonUniform; + } +#endif + bool hasSpecConstantId() const + { + // Not the same thing as being a specialization constant, this + // is just whether or not it was declared with an ID. + return layoutSpecConstantId != layoutSpecConstantIdEnd; + } + bool isSpecConstant() const + { + // True if type is a specialization constant, whether or not it + // had a specialization-constant ID, and false if it is not a + // true front-end constant. + return specConstant; + } + bool isFrontEndConstant() const + { + // True if the front-end knows the final constant value. + // This allows front-end constant folding. + return storage == EvqConst && ! specConstant; + } + bool isConstant() const + { + // True if is either kind of constant; specialization or regular. + return isFrontEndConstant() || isSpecConstant(); + } + void makeSpecConstant() + { + storage = EvqConst; + specConstant = true; + } + static const char* getLayoutPackingString(TLayoutPacking packing) + { + switch (packing) { + case ElpStd140: return "std140"; +#ifndef GLSLANG_WEB + case ElpPacked: return "packed"; + case ElpShared: return "shared"; + case ElpStd430: return "std430"; + case ElpScalar: return "scalar"; +#endif + default: return "none"; + } + } + static const char* getLayoutMatrixString(TLayoutMatrix m) + { + switch (m) { + case ElmColumnMajor: return "column_major"; + case ElmRowMajor: return "row_major"; + default: return "none"; + } + } +#ifdef GLSLANG_WEB + static const char* getLayoutFormatString(TLayoutFormat f) { return "none"; } +#else + static const char* getLayoutFormatString(TLayoutFormat f) + { + switch (f) { + case ElfRgba32f: return "rgba32f"; + case ElfRgba16f: return "rgba16f"; + case ElfRg32f: return "rg32f"; + case ElfRg16f: return "rg16f"; + case ElfR11fG11fB10f: return "r11f_g11f_b10f"; + case ElfR32f: return "r32f"; + case ElfR16f: return "r16f"; + case ElfRgba16: return "rgba16"; + case ElfRgb10A2: return "rgb10_a2"; + case ElfRgba8: return "rgba8"; + case ElfRg16: return "rg16"; + case ElfRg8: return "rg8"; + case ElfR16: return "r16"; + case ElfR8: return "r8"; + case ElfRgba16Snorm: return "rgba16_snorm"; + case ElfRgba8Snorm: return "rgba8_snorm"; + case ElfRg16Snorm: return "rg16_snorm"; + case ElfRg8Snorm: return "rg8_snorm"; + case ElfR16Snorm: return "r16_snorm"; + case ElfR8Snorm: return "r8_snorm"; + + case ElfRgba32i: return "rgba32i"; + case ElfRgba16i: return "rgba16i"; + case ElfRgba8i: return "rgba8i"; + case ElfRg32i: return "rg32i"; + case ElfRg16i: return "rg16i"; + case ElfRg8i: return "rg8i"; + case ElfR32i: return "r32i"; + case ElfR16i: return "r16i"; + case ElfR8i: return "r8i"; + + case ElfRgba32ui: return "rgba32ui"; + case ElfRgba16ui: return "rgba16ui"; + case ElfRgba8ui: return "rgba8ui"; + case ElfRg32ui: return "rg32ui"; + case ElfRg16ui: return "rg16ui"; + case ElfRgb10a2ui: return "rgb10_a2ui"; + case ElfRg8ui: return "rg8ui"; + case ElfR32ui: return "r32ui"; + case ElfR16ui: return "r16ui"; + case ElfR8ui: return "r8ui"; + default: return "none"; + } + } + static const char* getLayoutDepthString(TLayoutDepth d) + { + switch (d) { + case EldAny: return "depth_any"; + case EldGreater: return "depth_greater"; + case EldLess: return "depth_less"; + case EldUnchanged: return "depth_unchanged"; + default: return "none"; + } + } + static const char* getBlendEquationString(TBlendEquationShift e) + { + switch (e) { + case EBlendMultiply: return "blend_support_multiply"; + case EBlendScreen: return "blend_support_screen"; + case EBlendOverlay: return "blend_support_overlay"; + case EBlendDarken: return "blend_support_darken"; + case EBlendLighten: return "blend_support_lighten"; + case EBlendColordodge: return "blend_support_colordodge"; + case EBlendColorburn: return "blend_support_colorburn"; + case EBlendHardlight: return "blend_support_hardlight"; + case EBlendSoftlight: return "blend_support_softlight"; + case EBlendDifference: return "blend_support_difference"; + case EBlendExclusion: return "blend_support_exclusion"; + case EBlendHslHue: return "blend_support_hsl_hue"; + case EBlendHslSaturation: return "blend_support_hsl_saturation"; + case EBlendHslColor: return "blend_support_hsl_color"; + case EBlendHslLuminosity: return "blend_support_hsl_luminosity"; + case EBlendAllEquations: return "blend_support_all_equations"; + default: return "unknown"; + } + } + static const char* getGeometryString(TLayoutGeometry geometry) + { + switch (geometry) { + case ElgPoints: return "points"; + case ElgLines: return "lines"; + case ElgLinesAdjacency: return "lines_adjacency"; + case ElgLineStrip: return "line_strip"; + case ElgTriangles: return "triangles"; + case ElgTrianglesAdjacency: return "triangles_adjacency"; + case ElgTriangleStrip: return "triangle_strip"; + case ElgQuads: return "quads"; + case ElgIsolines: return "isolines"; + default: return "none"; + } + } + static const char* getVertexSpacingString(TVertexSpacing spacing) + { + switch (spacing) { + case EvsEqual: return "equal_spacing"; + case EvsFractionalEven: return "fractional_even_spacing"; + case EvsFractionalOdd: return "fractional_odd_spacing"; + default: return "none"; + } + } + static const char* getVertexOrderString(TVertexOrder order) + { + switch (order) { + case EvoCw: return "cw"; + case EvoCcw: return "ccw"; + default: return "none"; + } + } + static int mapGeometryToSize(TLayoutGeometry geometry) + { + switch (geometry) { + case ElgPoints: return 1; + case ElgLines: return 2; + case ElgLinesAdjacency: return 4; + case ElgTriangles: return 3; + case ElgTrianglesAdjacency: return 6; + default: return 0; + } + } + static const char* getInterlockOrderingString(TInterlockOrdering order) + { + switch (order) { + case EioPixelInterlockOrdered: return "pixel_interlock_ordered"; + case EioPixelInterlockUnordered: return "pixel_interlock_unordered"; + case EioSampleInterlockOrdered: return "sample_interlock_ordered"; + case EioSampleInterlockUnordered: return "sample_interlock_unordered"; + case EioShadingRateInterlockOrdered: return "shading_rate_interlock_ordered"; + case EioShadingRateInterlockUnordered: return "shading_rate_interlock_unordered"; + default: return "none"; + } + } +#endif +}; + +// Qualifiers that don't need to be keep per object. They have shader scope, not object scope. +// So, they will not be part of TType, TQualifier, etc. +struct TShaderQualifiers { + TLayoutGeometry geometry; // geometry/tessellation shader in/out primitives + bool pixelCenterInteger; // fragment shader + bool originUpperLeft; // fragment shader + int invocations; + int vertices; // for tessellation "vertices", geometry & mesh "max_vertices" + TVertexSpacing spacing; + TVertexOrder order; + bool pointMode; + int localSize[3]; // compute shader + bool localSizeNotDefault[3]; // compute shader + int localSizeSpecId[3]; // compute shader specialization id for gl_WorkGroupSize +#ifndef GLSLANG_WEB + bool earlyFragmentTests; // fragment input + bool postDepthCoverage; // fragment input + TLayoutDepth layoutDepth; + bool blendEquation; // true if any blend equation was specified + int numViews; // multiview extenstions + TInterlockOrdering interlockOrdering; + bool layoutOverrideCoverage; // true if layout override_coverage set + bool layoutDerivativeGroupQuads; // true if layout derivative_group_quadsNV set + bool layoutDerivativeGroupLinear; // true if layout derivative_group_linearNV set + int primitives; // mesh shader "max_primitives"DerivativeGroupLinear; // true if layout derivative_group_linearNV set + bool layoutPrimitiveCulling; // true if layout primitive_culling set + TLayoutDepth getDepth() const { return layoutDepth; } +#else + TLayoutDepth getDepth() const { return EldNone; } +#endif + + void init() + { + geometry = ElgNone; + originUpperLeft = false; + pixelCenterInteger = false; + invocations = TQualifier::layoutNotSet; + vertices = TQualifier::layoutNotSet; + spacing = EvsNone; + order = EvoNone; + pointMode = false; + localSize[0] = 1; + localSize[1] = 1; + localSize[2] = 1; + localSizeNotDefault[0] = false; + localSizeNotDefault[1] = false; + localSizeNotDefault[2] = false; + localSizeSpecId[0] = TQualifier::layoutNotSet; + localSizeSpecId[1] = TQualifier::layoutNotSet; + localSizeSpecId[2] = TQualifier::layoutNotSet; +#ifndef GLSLANG_WEB + earlyFragmentTests = false; + postDepthCoverage = false; + layoutDepth = EldNone; + blendEquation = false; + numViews = TQualifier::layoutNotSet; + layoutOverrideCoverage = false; + layoutDerivativeGroupQuads = false; + layoutDerivativeGroupLinear = false; + layoutPrimitiveCulling = false; + primitives = TQualifier::layoutNotSet; + interlockOrdering = EioNone; +#endif + } + +#ifdef GLSLANG_WEB + bool hasBlendEquation() const { return false; } +#else + bool hasBlendEquation() const { return blendEquation; } +#endif + + // Merge in characteristics from the 'src' qualifier. They can override when + // set, but never erase when not set. + void merge(const TShaderQualifiers& src) + { + if (src.geometry != ElgNone) + geometry = src.geometry; + if (src.pixelCenterInteger) + pixelCenterInteger = src.pixelCenterInteger; + if (src.originUpperLeft) + originUpperLeft = src.originUpperLeft; + if (src.invocations != TQualifier::layoutNotSet) + invocations = src.invocations; + if (src.vertices != TQualifier::layoutNotSet) + vertices = src.vertices; + if (src.spacing != EvsNone) + spacing = src.spacing; + if (src.order != EvoNone) + order = src.order; + if (src.pointMode) + pointMode = true; + for (int i = 0; i < 3; ++i) { + if (src.localSize[i] > 1) + localSize[i] = src.localSize[i]; + } + for (int i = 0; i < 3; ++i) { + localSizeNotDefault[i] = src.localSizeNotDefault[i] || localSizeNotDefault[i]; + } + for (int i = 0; i < 3; ++i) { + if (src.localSizeSpecId[i] != TQualifier::layoutNotSet) + localSizeSpecId[i] = src.localSizeSpecId[i]; + } +#ifndef GLSLANG_WEB + if (src.earlyFragmentTests) + earlyFragmentTests = true; + if (src.postDepthCoverage) + postDepthCoverage = true; + if (src.layoutDepth) + layoutDepth = src.layoutDepth; + if (src.blendEquation) + blendEquation = src.blendEquation; + if (src.numViews != TQualifier::layoutNotSet) + numViews = src.numViews; + if (src.layoutOverrideCoverage) + layoutOverrideCoverage = src.layoutOverrideCoverage; + if (src.layoutDerivativeGroupQuads) + layoutDerivativeGroupQuads = src.layoutDerivativeGroupQuads; + if (src.layoutDerivativeGroupLinear) + layoutDerivativeGroupLinear = src.layoutDerivativeGroupLinear; + if (src.primitives != TQualifier::layoutNotSet) + primitives = src.primitives; + if (src.interlockOrdering != EioNone) + interlockOrdering = src.interlockOrdering; + if (src.layoutPrimitiveCulling) + layoutPrimitiveCulling = src.layoutPrimitiveCulling; +#endif + } +}; + +// +// TPublicType is just temporarily used while parsing and not quite the same +// information kept per node in TType. Due to the bison stack, it can't have +// types that it thinks have non-trivial constructors. It should +// just be used while recognizing the grammar, not anything else. +// Once enough is known about the situation, the proper information +// moved into a TType, or the parse context, etc. +// +class TPublicType { +public: + TBasicType basicType; + TSampler sampler; + TQualifier qualifier; + TShaderQualifiers shaderQualifiers; + int vectorSize : 4; + int matrixCols : 4; + int matrixRows : 4; + bool coopmat : 1; + TArraySizes* arraySizes; + const TType* userDef; + TSourceLoc loc; + TArraySizes* typeParameters; + +#ifdef GLSLANG_WEB + bool isCoopmat() const { return false; } +#else + bool isCoopmat() const { return coopmat; } +#endif + + void initType(const TSourceLoc& l) + { + basicType = EbtVoid; + vectorSize = 1; + matrixRows = 0; + matrixCols = 0; + arraySizes = nullptr; + userDef = nullptr; + loc = l; + typeParameters = nullptr; + coopmat = false; + } + + void initQualifiers(bool global = false) + { + qualifier.clear(); + if (global) + qualifier.storage = EvqGlobal; + } + + void init(const TSourceLoc& l, bool global = false) + { + initType(l); + sampler.clear(); + initQualifiers(global); + shaderQualifiers.init(); + } + + void setVector(int s) + { + matrixRows = 0; + matrixCols = 0; + vectorSize = s; + } + + void setMatrix(int c, int r) + { + matrixRows = r; + matrixCols = c; + vectorSize = 0; + } + + bool isScalar() const + { + return matrixCols == 0 && vectorSize == 1 && arraySizes == nullptr && userDef == nullptr; + } + + // "Image" is a superset of "Subpass" + bool isImage() const { return basicType == EbtSampler && sampler.isImage(); } + bool isSubpass() const { return basicType == EbtSampler && sampler.isSubpass(); } +}; + +// +// Base class for things that have a type. +// +class TType { +public: + POOL_ALLOCATOR_NEW_DELETE(GetThreadPoolAllocator()) + + // for "empty" type (no args) or simple scalar/vector/matrix + explicit TType(TBasicType t = EbtVoid, TStorageQualifier q = EvqTemporary, int vs = 1, int mc = 0, int mr = 0, + bool isVector = false) : + basicType(t), vectorSize(vs), matrixCols(mc), matrixRows(mr), vector1(isVector && vs == 1), coopmat(false), + arraySizes(nullptr), structure(nullptr), fieldName(nullptr), typeName(nullptr), typeParameters(nullptr) + { + sampler.clear(); + qualifier.clear(); + qualifier.storage = q; + assert(!(isMatrix() && vectorSize != 0)); // prevent vectorSize != 0 on matrices + } + // for explicit precision qualifier + TType(TBasicType t, TStorageQualifier q, TPrecisionQualifier p, int vs = 1, int mc = 0, int mr = 0, + bool isVector = false) : + basicType(t), vectorSize(vs), matrixCols(mc), matrixRows(mr), vector1(isVector && vs == 1), coopmat(false), + arraySizes(nullptr), structure(nullptr), fieldName(nullptr), typeName(nullptr), typeParameters(nullptr) + { + sampler.clear(); + qualifier.clear(); + qualifier.storage = q; + qualifier.precision = p; + assert(p >= EpqNone && p <= EpqHigh); + assert(!(isMatrix() && vectorSize != 0)); // prevent vectorSize != 0 on matrices + } + // for turning a TPublicType into a TType, using a shallow copy + explicit TType(const TPublicType& p) : + basicType(p.basicType), + vectorSize(p.vectorSize), matrixCols(p.matrixCols), matrixRows(p.matrixRows), vector1(false), coopmat(p.coopmat), + arraySizes(p.arraySizes), structure(nullptr), fieldName(nullptr), typeName(nullptr), typeParameters(p.typeParameters) + { + if (basicType == EbtSampler) + sampler = p.sampler; + else + sampler.clear(); + qualifier = p.qualifier; + if (p.userDef) { + if (p.userDef->basicType == EbtReference) { + basicType = EbtReference; + referentType = p.userDef->referentType; + } else { + structure = p.userDef->getWritableStruct(); // public type is short-lived; there are no sharing issues + } + typeName = NewPoolTString(p.userDef->getTypeName().c_str()); + } + if (p.isCoopmat() && p.typeParameters && p.typeParameters->getNumDims() > 0) { + int numBits = p.typeParameters->getDimSize(0); + if (p.basicType == EbtFloat && numBits == 16) { + basicType = EbtFloat16; + qualifier.precision = EpqNone; + } else if (p.basicType == EbtUint && numBits == 8) { + basicType = EbtUint8; + qualifier.precision = EpqNone; + } else if (p.basicType == EbtInt && numBits == 8) { + basicType = EbtInt8; + qualifier.precision = EpqNone; + } + } + } + // for construction of sampler types + TType(const TSampler& sampler, TStorageQualifier q = EvqUniform, TArraySizes* as = nullptr) : + basicType(EbtSampler), vectorSize(1), matrixCols(0), matrixRows(0), vector1(false), coopmat(false), + arraySizes(as), structure(nullptr), fieldName(nullptr), typeName(nullptr), + sampler(sampler), typeParameters(nullptr) + { + qualifier.clear(); + qualifier.storage = q; + } + // to efficiently make a dereferenced type + // without ever duplicating the outer structure that will be thrown away + // and using only shallow copy + TType(const TType& type, int derefIndex, bool rowMajor = false) + { + if (type.isArray()) { + shallowCopy(type); + if (type.getArraySizes()->getNumDims() == 1) { + arraySizes = nullptr; + } else { + // want our own copy of the array, so we can edit it + arraySizes = new TArraySizes; + arraySizes->copyDereferenced(*type.arraySizes); + } + } else if (type.basicType == EbtStruct || type.basicType == EbtBlock) { + // do a structure dereference + const TTypeList& memberList = *type.getStruct(); + shallowCopy(*memberList[derefIndex].type); + return; + } else { + // do a vector/matrix dereference + shallowCopy(type); + if (matrixCols > 0) { + // dereference from matrix to vector + if (rowMajor) + vectorSize = matrixCols; + else + vectorSize = matrixRows; + matrixCols = 0; + matrixRows = 0; + if (vectorSize == 1) + vector1 = true; + } else if (isVector()) { + // dereference from vector to scalar + vectorSize = 1; + vector1 = false; + } else if (isCoopMat()) { + coopmat = false; + typeParameters = nullptr; + } + } + } + // for making structures, ... + TType(TTypeList* userDef, const TString& n) : + basicType(EbtStruct), vectorSize(1), matrixCols(0), matrixRows(0), vector1(false), coopmat(false), + arraySizes(nullptr), structure(userDef), fieldName(nullptr), typeParameters(nullptr) + { + sampler.clear(); + qualifier.clear(); + typeName = NewPoolTString(n.c_str()); + } + // For interface blocks + TType(TTypeList* userDef, const TString& n, const TQualifier& q) : + basicType(EbtBlock), vectorSize(1), matrixCols(0), matrixRows(0), vector1(false), coopmat(false), + qualifier(q), arraySizes(nullptr), structure(userDef), fieldName(nullptr), typeParameters(nullptr) + { + sampler.clear(); + typeName = NewPoolTString(n.c_str()); + } + // for block reference (first parameter must be EbtReference) + explicit TType(TBasicType t, const TType &p, const TString& n) : + basicType(t), vectorSize(1), matrixCols(0), matrixRows(0), vector1(false), + arraySizes(nullptr), structure(nullptr), fieldName(nullptr), typeName(nullptr) + { + assert(t == EbtReference); + typeName = NewPoolTString(n.c_str()); + qualifier.clear(); + qualifier.storage = p.qualifier.storage; + referentType = p.clone(); + } + virtual ~TType() {} + + // Not for use across pool pops; it will cause multiple instances of TType to point to the same information. + // This only works if that information (like a structure's list of types) does not change and + // the instances are sharing the same pool. + void shallowCopy(const TType& copyOf) + { + basicType = copyOf.basicType; + sampler = copyOf.sampler; + qualifier = copyOf.qualifier; + vectorSize = copyOf.vectorSize; + matrixCols = copyOf.matrixCols; + matrixRows = copyOf.matrixRows; + vector1 = copyOf.vector1; + arraySizes = copyOf.arraySizes; // copying the pointer only, not the contents + fieldName = copyOf.fieldName; + typeName = copyOf.typeName; + if (isStruct()) { + structure = copyOf.structure; + } else { + referentType = copyOf.referentType; + } + typeParameters = copyOf.typeParameters; + coopmat = copyOf.isCoopMat(); + } + + // Make complete copy of the whole type graph rooted at 'copyOf'. + void deepCopy(const TType& copyOf) + { + TMap copied; // to enable copying a type graph as a graph, not a tree + deepCopy(copyOf, copied); + } + + // Recursively make temporary + void makeTemporary() + { + getQualifier().makeTemporary(); + + if (isStruct()) + for (unsigned int i = 0; i < structure->size(); ++i) + (*structure)[i].type->makeTemporary(); + } + + TType* clone() const + { + TType *newType = new TType(); + newType->deepCopy(*this); + + return newType; + } + + void makeVector() { vector1 = true; } + + virtual void hideMember() { basicType = EbtVoid; vectorSize = 1; } + virtual bool hiddenMember() const { return basicType == EbtVoid; } + + virtual void setFieldName(const TString& n) { fieldName = NewPoolTString(n.c_str()); } + virtual const TString& getTypeName() const + { + assert(typeName); + return *typeName; + } + + virtual const TString& getFieldName() const + { + assert(fieldName); + return *fieldName; + } + TShaderInterface getShaderInterface() const + { + if (basicType != EbtBlock) + return EsiNone; + + switch (qualifier.storage) { + default: + return EsiNone; + case EvqVaryingIn: + return EsiInput; + case EvqVaryingOut: + return EsiOutput; + case EvqUniform: + case EvqBuffer: + return EsiUniform; + } + } + + virtual TBasicType getBasicType() const { return basicType; } + virtual const TSampler& getSampler() const { return sampler; } + virtual TSampler& getSampler() { return sampler; } + + virtual TQualifier& getQualifier() { return qualifier; } + virtual const TQualifier& getQualifier() const { return qualifier; } + + virtual int getVectorSize() const { return vectorSize; } // returns 1 for either scalar or vector of size 1, valid for both + virtual int getMatrixCols() const { return matrixCols; } + virtual int getMatrixRows() const { return matrixRows; } + virtual int getOuterArraySize() const { return arraySizes->getOuterSize(); } + virtual TIntermTyped* getOuterArrayNode() const { return arraySizes->getOuterNode(); } + virtual int getCumulativeArraySize() const { return arraySizes->getCumulativeSize(); } +#ifdef GLSLANG_WEB + bool isArrayOfArrays() const { return false; } +#else + bool isArrayOfArrays() const { return arraySizes != nullptr && arraySizes->getNumDims() > 1; } +#endif + virtual int getImplicitArraySize() const { return arraySizes->getImplicitSize(); } + virtual const TArraySizes* getArraySizes() const { return arraySizes; } + virtual TArraySizes* getArraySizes() { return arraySizes; } + virtual TType* getReferentType() const { return referentType; } + virtual const TArraySizes* getTypeParameters() const { return typeParameters; } + virtual TArraySizes* getTypeParameters() { return typeParameters; } + + virtual bool isScalar() const { return ! isVector() && ! isMatrix() && ! isStruct() && ! isArray(); } + virtual bool isScalarOrVec1() const { return isScalar() || vector1; } + virtual bool isVector() const { return vectorSize > 1 || vector1; } + virtual bool isMatrix() const { return matrixCols ? true : false; } + virtual bool isArray() const { return arraySizes != nullptr; } + virtual bool isSizedArray() const { return isArray() && arraySizes->isSized(); } + virtual bool isUnsizedArray() const { return isArray() && !arraySizes->isSized(); } + virtual bool isArrayVariablyIndexed() const { assert(isArray()); return arraySizes->isVariablyIndexed(); } + virtual void setArrayVariablyIndexed() { assert(isArray()); arraySizes->setVariablyIndexed(); } + virtual void updateImplicitArraySize(int size) { assert(isArray()); arraySizes->updateImplicitSize(size); } + virtual bool isStruct() const { return basicType == EbtStruct || basicType == EbtBlock; } + virtual bool isFloatingDomain() const { return basicType == EbtFloat || basicType == EbtDouble || basicType == EbtFloat16; } + virtual bool isIntegerDomain() const + { + switch (basicType) { + case EbtInt8: + case EbtUint8: + case EbtInt16: + case EbtUint16: + case EbtInt: + case EbtUint: + case EbtInt64: + case EbtUint64: + case EbtAtomicUint: + return true; + default: + break; + } + return false; + } + virtual bool isOpaque() const { return basicType == EbtSampler +#ifndef GLSLANG_WEB + || basicType == EbtAtomicUint || basicType == EbtAccStruct || basicType == EbtRayQuery +#endif + ; } + virtual bool isBuiltIn() const { return getQualifier().builtIn != EbvNone; } + + // "Image" is a superset of "Subpass" + virtual bool isImage() const { return basicType == EbtSampler && getSampler().isImage(); } + virtual bool isSubpass() const { return basicType == EbtSampler && getSampler().isSubpass(); } + virtual bool isTexture() const { return basicType == EbtSampler && getSampler().isTexture(); } + // Check the block-name convention of creating a block without populating it's members: + virtual bool isUnusableName() const { return isStruct() && structure == nullptr; } + virtual bool isParameterized() const { return typeParameters != nullptr; } +#ifdef GLSLANG_WEB + bool isAtomic() const { return false; } + bool isCoopMat() const { return false; } + bool isReference() const { return false; } +#else + bool isAtomic() const { return basicType == EbtAtomicUint; } + bool isCoopMat() const { return coopmat; } + bool isReference() const { return getBasicType() == EbtReference; } +#endif + + // return true if this type contains any subtype which satisfies the given predicate. + template + bool contains(P predicate) const + { + if (predicate(this)) + return true; + + const auto hasa = [predicate](const TTypeLoc& tl) { return tl.type->contains(predicate); }; + + return isStruct() && std::any_of(structure->begin(), structure->end(), hasa); + } + + // Recursively checks if the type contains the given basic type + virtual bool containsBasicType(TBasicType checkType) const + { + return contains([checkType](const TType* t) { return t->basicType == checkType; } ); + } + + // Recursively check the structure for any arrays, needed for some error checks + virtual bool containsArray() const + { + return contains([](const TType* t) { return t->isArray(); } ); + } + + // Check the structure for any structures, needed for some error checks + virtual bool containsStructure() const + { + return contains([this](const TType* t) { return t != this && t->isStruct(); } ); + } + + // Recursively check the structure for any unsized arrays, needed for triggering a copyUp(). + virtual bool containsUnsizedArray() const + { + return contains([](const TType* t) { return t->isUnsizedArray(); } ); + } + + virtual bool containsOpaque() const + { + return contains([](const TType* t) { return t->isOpaque(); } ); + } + + // Recursively checks if the type contains a built-in variable + virtual bool containsBuiltIn() const + { + return contains([](const TType* t) { return t->isBuiltIn(); } ); + } + + virtual bool containsNonOpaque() const + { + const auto nonOpaque = [](const TType* t) { + switch (t->basicType) { + case EbtVoid: + case EbtFloat: + case EbtDouble: + case EbtFloat16: + case EbtInt8: + case EbtUint8: + case EbtInt16: + case EbtUint16: + case EbtInt: + case EbtUint: + case EbtInt64: + case EbtUint64: + case EbtBool: + case EbtReference: + return true; + default: + return false; + } + }; + + return contains(nonOpaque); + } + + virtual bool containsSpecializationSize() const + { + return contains([](const TType* t) { return t->isArray() && t->arraySizes->isOuterSpecialization(); } ); + } + +#ifdef GLSLANG_WEB + bool containsDouble() const { return false; } + bool contains16BitFloat() const { return false; } + bool contains64BitInt() const { return false; } + bool contains16BitInt() const { return false; } + bool contains8BitInt() const { return false; } + bool containsCoopMat() const { return false; } + bool containsReference() const { return false; } +#else + bool containsDouble() const + { + return containsBasicType(EbtDouble); + } + bool contains16BitFloat() const + { + return containsBasicType(EbtFloat16); + } + bool contains64BitInt() const + { + return containsBasicType(EbtInt64) || containsBasicType(EbtUint64); + } + bool contains16BitInt() const + { + return containsBasicType(EbtInt16) || containsBasicType(EbtUint16); + } + bool contains8BitInt() const + { + return containsBasicType(EbtInt8) || containsBasicType(EbtUint8); + } + bool containsCoopMat() const + { + return contains([](const TType* t) { return t->coopmat; } ); + } + bool containsReference() const + { + return containsBasicType(EbtReference); + } +#endif + + // Array editing methods. Array descriptors can be shared across + // type instances. This allows all uses of the same array + // to be updated at once. E.g., all nodes can be explicitly sized + // by tracking and correcting one implicit size. Or, all nodes + // can get the explicit size on a redeclaration that gives size. + // + // N.B.: Don't share with the shared symbol tables (symbols are + // marked as isReadOnly(). Such symbols with arrays that will be + // edited need to copyUp() on first use, so that + // A) the edits don't effect the shared symbol table, and + // B) the edits are shared across all users. + void updateArraySizes(const TType& type) + { + // For when we may already be sharing existing array descriptors, + // keeping the pointers the same, just updating the contents. + assert(arraySizes != nullptr); + assert(type.arraySizes != nullptr); + *arraySizes = *type.arraySizes; + } + void copyArraySizes(const TArraySizes& s) + { + // For setting a fresh new set of array sizes, not yet worrying about sharing. + arraySizes = new TArraySizes; + *arraySizes = s; + } + void transferArraySizes(TArraySizes* s) + { + // For setting an already allocated set of sizes that this type can use + // (no copy made). + arraySizes = s; + } + void clearArraySizes() + { + arraySizes = nullptr; + } + + // Add inner array sizes, to any existing sizes, via copy; the + // sizes passed in can still be reused for other purposes. + void copyArrayInnerSizes(const TArraySizes* s) + { + if (s != nullptr) { + if (arraySizes == nullptr) + copyArraySizes(*s); + else + arraySizes->addInnerSizes(*s); + } + } + void changeOuterArraySize(int s) { arraySizes->changeOuterSize(s); } + + // Recursively make the implicit array size the explicit array size. + // Expicit arrays are compile-time or link-time sized, never run-time sized. + // Sometimes, policy calls for an array to be run-time sized even if it was + // never variably indexed: Don't turn a 'skipNonvariablyIndexed' array into + // an explicit array. + void adoptImplicitArraySizes(bool skipNonvariablyIndexed) + { + if (isUnsizedArray() && !(skipNonvariablyIndexed || isArrayVariablyIndexed())) + changeOuterArraySize(getImplicitArraySize()); + // For multi-dim per-view arrays, set unsized inner dimension size to 1 + if (qualifier.isPerView() && arraySizes && arraySizes->isInnerUnsized()) + arraySizes->clearInnerUnsized(); + if (isStruct() && structure->size() > 0) { + int lastMember = (int)structure->size() - 1; + for (int i = 0; i < lastMember; ++i) + (*structure)[i].type->adoptImplicitArraySizes(false); + // implement the "last member of an SSBO" policy + (*structure)[lastMember].type->adoptImplicitArraySizes(getQualifier().storage == EvqBuffer); + } + } + + + void updateTypeParameters(const TType& type) + { + // For when we may already be sharing existing array descriptors, + // keeping the pointers the same, just updating the contents. + assert(typeParameters != nullptr); + assert(type.typeParameters != nullptr); + *typeParameters = *type.typeParameters; + } + void copyTypeParameters(const TArraySizes& s) + { + // For setting a fresh new set of type parameters, not yet worrying about sharing. + typeParameters = new TArraySizes; + *typeParameters = s; + } + void transferTypeParameters(TArraySizes* s) + { + // For setting an already allocated set of sizes that this type can use + // (no copy made). + typeParameters = s; + } + void clearTypeParameters() + { + typeParameters = nullptr; + } + + // Add inner array sizes, to any existing sizes, via copy; the + // sizes passed in can still be reused for other purposes. + void copyTypeParametersInnerSizes(const TArraySizes* s) + { + if (s != nullptr) { + if (typeParameters == nullptr) + copyTypeParameters(*s); + else + typeParameters->addInnerSizes(*s); + } + } + + + + const char* getBasicString() const + { + return TType::getBasicString(basicType); + } + + static const char* getBasicString(TBasicType t) + { + switch (t) { + case EbtFloat: return "float"; + case EbtInt: return "int"; + case EbtUint: return "uint"; + case EbtSampler: return "sampler/image"; +#ifndef GLSLANG_WEB + case EbtVoid: return "void"; + case EbtDouble: return "double"; + case EbtFloat16: return "float16_t"; + case EbtInt8: return "int8_t"; + case EbtUint8: return "uint8_t"; + case EbtInt16: return "int16_t"; + case EbtUint16: return "uint16_t"; + case EbtInt64: return "int64_t"; + case EbtUint64: return "uint64_t"; + case EbtBool: return "bool"; + case EbtAtomicUint: return "atomic_uint"; + case EbtStruct: return "structure"; + case EbtBlock: return "block"; + case EbtAccStruct: return "accelerationStructureNV"; + case EbtRayQuery: return "rayQueryEXT"; + case EbtReference: return "reference"; +#endif + default: return "unknown type"; + } + } + +#ifdef GLSLANG_WEB + TString getCompleteString() const { return ""; } + const char* getStorageQualifierString() const { return ""; } + const char* getBuiltInVariableString() const { return ""; } + const char* getPrecisionQualifierString() const { return ""; } + TString getBasicTypeString() const { return ""; } +#else + TString getCompleteString() const + { + TString typeString; + + const auto appendStr = [&](const char* s) { typeString.append(s); }; + const auto appendUint = [&](unsigned int u) { typeString.append(std::to_string(u).c_str()); }; + const auto appendInt = [&](int i) { typeString.append(std::to_string(i).c_str()); }; + + if (qualifier.hasLayout()) { + // To reduce noise, skip this if the only layout is an xfb_buffer + // with no triggering xfb_offset. + TQualifier noXfbBuffer = qualifier; + noXfbBuffer.layoutXfbBuffer = TQualifier::layoutXfbBufferEnd; + if (noXfbBuffer.hasLayout()) { + appendStr("layout("); + if (qualifier.hasAnyLocation()) { + appendStr(" location="); + appendUint(qualifier.layoutLocation); + if (qualifier.hasComponent()) { + appendStr(" component="); + appendUint(qualifier.layoutComponent); + } + if (qualifier.hasIndex()) { + appendStr(" index="); + appendUint(qualifier.layoutIndex); + } + } + if (qualifier.hasSet()) { + appendStr(" set="); + appendUint(qualifier.layoutSet); + } + if (qualifier.hasBinding()) { + appendStr(" binding="); + appendUint(qualifier.layoutBinding); + } + if (qualifier.hasStream()) { + appendStr(" stream="); + appendUint(qualifier.layoutStream); + } + if (qualifier.hasMatrix()) { + appendStr(" "); + appendStr(TQualifier::getLayoutMatrixString(qualifier.layoutMatrix)); + } + if (qualifier.hasPacking()) { + appendStr(" "); + appendStr(TQualifier::getLayoutPackingString(qualifier.layoutPacking)); + } + if (qualifier.hasOffset()) { + appendStr(" offset="); + appendInt(qualifier.layoutOffset); + } + if (qualifier.hasAlign()) { + appendStr(" align="); + appendInt(qualifier.layoutAlign); + } + if (qualifier.hasFormat()) { + appendStr(" "); + appendStr(TQualifier::getLayoutFormatString(qualifier.layoutFormat)); + } + if (qualifier.hasXfbBuffer() && qualifier.hasXfbOffset()) { + appendStr(" xfb_buffer="); + appendUint(qualifier.layoutXfbBuffer); + } + if (qualifier.hasXfbOffset()) { + appendStr(" xfb_offset="); + appendUint(qualifier.layoutXfbOffset); + } + if (qualifier.hasXfbStride()) { + appendStr(" xfb_stride="); + appendUint(qualifier.layoutXfbStride); + } + if (qualifier.hasAttachment()) { + appendStr(" input_attachment_index="); + appendUint(qualifier.layoutAttachment); + } + if (qualifier.hasSpecConstantId()) { + appendStr(" constant_id="); + appendUint(qualifier.layoutSpecConstantId); + } + if (qualifier.layoutPushConstant) + appendStr(" push_constant"); + if (qualifier.layoutBufferReference) + appendStr(" buffer_reference"); + if (qualifier.hasBufferReferenceAlign()) { + appendStr(" buffer_reference_align="); + appendUint(1u << qualifier.layoutBufferReferenceAlign); + } + + if (qualifier.layoutPassthrough) + appendStr(" passthrough"); + if (qualifier.layoutViewportRelative) + appendStr(" layoutViewportRelative"); + if (qualifier.layoutSecondaryViewportRelativeOffset != -2048) { + appendStr(" layoutSecondaryViewportRelativeOffset="); + appendInt(qualifier.layoutSecondaryViewportRelativeOffset); + } + if (qualifier.layoutShaderRecord) + appendStr(" shaderRecordNV"); + + appendStr(")"); + } + } + + if (qualifier.invariant) + appendStr(" invariant"); + if (qualifier.noContraction) + appendStr(" noContraction"); + if (qualifier.centroid) + appendStr(" centroid"); + if (qualifier.smooth) + appendStr(" smooth"); + if (qualifier.flat) + appendStr(" flat"); + if (qualifier.nopersp) + appendStr(" noperspective"); + if (qualifier.explicitInterp) + appendStr(" __explicitInterpAMD"); + if (qualifier.pervertexNV) + appendStr(" pervertexNV"); + if (qualifier.perPrimitiveNV) + appendStr(" perprimitiveNV"); + if (qualifier.perViewNV) + appendStr(" perviewNV"); + if (qualifier.perTaskNV) + appendStr(" taskNV"); + if (qualifier.patch) + appendStr(" patch"); + if (qualifier.sample) + appendStr(" sample"); + if (qualifier.coherent) + appendStr(" coherent"); + if (qualifier.devicecoherent) + appendStr(" devicecoherent"); + if (qualifier.queuefamilycoherent) + appendStr(" queuefamilycoherent"); + if (qualifier.workgroupcoherent) + appendStr(" workgroupcoherent"); + if (qualifier.subgroupcoherent) + appendStr(" subgroupcoherent"); + if (qualifier.shadercallcoherent) + appendStr(" shadercallcoherent"); + if (qualifier.nonprivate) + appendStr(" nonprivate"); + if (qualifier.volatil) + appendStr(" volatile"); + if (qualifier.restrict) + appendStr(" restrict"); + if (qualifier.readonly) + appendStr(" readonly"); + if (qualifier.writeonly) + appendStr(" writeonly"); + if (qualifier.specConstant) + appendStr(" specialization-constant"); + if (qualifier.nonUniform) + appendStr(" nonuniform"); + appendStr(" "); + appendStr(getStorageQualifierString()); + if (isArray()) { + for(int i = 0; i < (int)arraySizes->getNumDims(); ++i) { + int size = arraySizes->getDimSize(i); + if (size == UnsizedArraySize && i == 0 && arraySizes->isVariablyIndexed()) + appendStr(" runtime-sized array of"); + else { + if (size == UnsizedArraySize) { + appendStr(" unsized"); + if (i == 0) { + appendStr(" "); + appendInt(arraySizes->getImplicitSize()); + } + } else { + appendStr(" "); + appendInt(arraySizes->getDimSize(i)); + } + appendStr("-element array of"); + } + } + } + if (isParameterized()) { + appendStr("<"); + for(int i = 0; i < (int)typeParameters->getNumDims(); ++i) { + appendInt(typeParameters->getDimSize(i)); + if (i != (int)typeParameters->getNumDims() - 1) + appendStr(", "); + } + appendStr(">"); + } + if (qualifier.precision != EpqNone) { + appendStr(" "); + appendStr(getPrecisionQualifierString()); + } + if (isMatrix()) { + appendStr(" "); + appendInt(matrixCols); + appendStr("X"); + appendInt(matrixRows); + appendStr(" matrix of"); + } else if (isVector()) { + appendStr(" "); + appendInt(vectorSize); + appendStr("-component vector of"); + } + + appendStr(" "); + typeString.append(getBasicTypeString()); + + if (qualifier.builtIn != EbvNone) { + appendStr(" "); + appendStr(getBuiltInVariableString()); + } + + // Add struct/block members + if (isStruct() && structure) { + appendStr("{"); + bool hasHiddenMember = true; + for (size_t i = 0; i < structure->size(); ++i) { + if (! (*structure)[i].type->hiddenMember()) { + if (!hasHiddenMember) + appendStr(", "); + typeString.append((*structure)[i].type->getCompleteString()); + typeString.append(" "); + typeString.append((*structure)[i].type->getFieldName()); + hasHiddenMember = false; + } + } + appendStr("}"); + } + + return typeString; + } + + TString getBasicTypeString() const + { + if (basicType == EbtSampler) + return sampler.getString(); + else + return getBasicString(); + } + + const char* getStorageQualifierString() const { return GetStorageQualifierString(qualifier.storage); } + const char* getBuiltInVariableString() const { return GetBuiltInVariableString(qualifier.builtIn); } + const char* getPrecisionQualifierString() const { return GetPrecisionQualifierString(qualifier.precision); } +#endif + + const TTypeList* getStruct() const { assert(isStruct()); return structure; } + void setStruct(TTypeList* s) { assert(isStruct()); structure = s; } + TTypeList* getWritableStruct() const { assert(isStruct()); return structure; } // This should only be used when known to not be sharing with other threads + void setBasicType(const TBasicType& t) { basicType = t; } + + int computeNumComponents() const + { + int components = 0; + + if (getBasicType() == EbtStruct || getBasicType() == EbtBlock) { + for (TTypeList::const_iterator tl = getStruct()->begin(); tl != getStruct()->end(); tl++) + components += ((*tl).type)->computeNumComponents(); + } else if (matrixCols) + components = matrixCols * matrixRows; + else + components = vectorSize; + + if (arraySizes != nullptr) { + components *= arraySizes->getCumulativeSize(); + } + + return components; + } + + // append this type's mangled name to the passed in 'name' + void appendMangledName(TString& name) const + { + buildMangledName(name); + name += ';' ; + } + + // Do two structure types match? They could be declared independently, + // in different places, but still might satisfy the definition of matching. + // From the spec: + // + // "Structures must have the same name, sequence of type names, and + // type definitions, and member names to be considered the same type. + // This rule applies recursively for nested or embedded types." + // + bool sameStructType(const TType& right) const + { + // Most commonly, they are both nullptr, or the same pointer to the same actual structure + if ((!isStruct() && !right.isStruct()) || + (isStruct() && right.isStruct() && structure == right.structure)) + return true; + + // Both being nullptr was caught above, now they both have to be structures of the same number of elements + if (!isStruct() || !right.isStruct() || + structure->size() != right.structure->size()) + return false; + + // Structure names have to match + if (*typeName != *right.typeName) + return false; + + // Compare the names and types of all the members, which have to match + for (unsigned int i = 0; i < structure->size(); ++i) { + if ((*structure)[i].type->getFieldName() != (*right.structure)[i].type->getFieldName()) + return false; + + if (*(*structure)[i].type != *(*right.structure)[i].type) + return false; + } + + return true; + } + + bool sameReferenceType(const TType& right) const + { + if (isReference() != right.isReference()) + return false; + + if (!isReference() && !right.isReference()) + return true; + + assert(referentType != nullptr); + assert(right.referentType != nullptr); + + if (referentType == right.referentType) + return true; + + return *referentType == *right.referentType; + } + + // See if two types match, in all aspects except arrayness + bool sameElementType(const TType& right) const + { + return basicType == right.basicType && sameElementShape(right); + } + + // See if two type's arrayness match + bool sameArrayness(const TType& right) const + { + return ((arraySizes == nullptr && right.arraySizes == nullptr) || + (arraySizes != nullptr && right.arraySizes != nullptr && *arraySizes == *right.arraySizes)); + } + + // See if two type's arrayness match in everything except their outer dimension + bool sameInnerArrayness(const TType& right) const + { + assert(arraySizes != nullptr && right.arraySizes != nullptr); + return arraySizes->sameInnerArrayness(*right.arraySizes); + } + + // See if two type's parameters match + bool sameTypeParameters(const TType& right) const + { + return ((typeParameters == nullptr && right.typeParameters == nullptr) || + (typeParameters != nullptr && right.typeParameters != nullptr && *typeParameters == *right.typeParameters)); + } + + // See if two type's elements match in all ways except basic type + bool sameElementShape(const TType& right) const + { + return sampler == right.sampler && + vectorSize == right.vectorSize && + matrixCols == right.matrixCols && + matrixRows == right.matrixRows && + vector1 == right.vector1 && + isCoopMat() == right.isCoopMat() && + sameStructType(right) && + sameReferenceType(right); + } + + // See if a cooperative matrix type parameter with unspecified parameters is + // an OK function parameter + bool coopMatParameterOK(const TType& right) const + { + return isCoopMat() && right.isCoopMat() && (getBasicType() == right.getBasicType()) && + typeParameters == nullptr && right.typeParameters != nullptr; + } + + bool sameCoopMatBaseType(const TType &right) const { + bool rv = coopmat && right.coopmat; + if (getBasicType() == EbtFloat || getBasicType() == EbtFloat16) + rv = right.getBasicType() == EbtFloat || right.getBasicType() == EbtFloat16; + else if (getBasicType() == EbtUint || getBasicType() == EbtUint8) + rv = right.getBasicType() == EbtUint || right.getBasicType() == EbtUint8; + else if (getBasicType() == EbtInt || getBasicType() == EbtInt8) + rv = right.getBasicType() == EbtInt || right.getBasicType() == EbtInt8; + else + rv = false; + return rv; + } + + + // See if two types match in all ways (just the actual type, not qualification) + bool operator==(const TType& right) const + { + return sameElementType(right) && sameArrayness(right) && sameTypeParameters(right); + } + + bool operator!=(const TType& right) const + { + return ! operator==(right); + } + + unsigned int getBufferReferenceAlignment() const + { +#ifndef GLSLANG_WEB + if (getBasicType() == glslang::EbtReference) { + return getReferentType()->getQualifier().hasBufferReferenceAlign() ? + (1u << getReferentType()->getQualifier().layoutBufferReferenceAlign) : 16u; + } +#endif + return 0; + } + +protected: + // Require consumer to pick between deep copy and shallow copy. + TType(const TType& type); + TType& operator=(const TType& type); + + // Recursively copy a type graph, while preserving the graph-like + // quality. That is, don't make more than one copy of a structure that + // gets reused multiple times in the type graph. + void deepCopy(const TType& copyOf, TMap& copiedMap) + { + shallowCopy(copyOf); + + if (copyOf.arraySizes) { + arraySizes = new TArraySizes; + *arraySizes = *copyOf.arraySizes; + } + + if (copyOf.typeParameters) { + typeParameters = new TArraySizes; + *typeParameters = *copyOf.typeParameters; + } + + if (copyOf.isStruct() && copyOf.structure) { + auto prevCopy = copiedMap.find(copyOf.structure); + if (prevCopy != copiedMap.end()) + structure = prevCopy->second; + else { + structure = new TTypeList; + copiedMap[copyOf.structure] = structure; + for (unsigned int i = 0; i < copyOf.structure->size(); ++i) { + TTypeLoc typeLoc; + typeLoc.loc = (*copyOf.structure)[i].loc; + typeLoc.type = new TType(); + typeLoc.type->deepCopy(*(*copyOf.structure)[i].type, copiedMap); + structure->push_back(typeLoc); + } + } + } + + if (copyOf.fieldName) + fieldName = NewPoolTString(copyOf.fieldName->c_str()); + if (copyOf.typeName) + typeName = NewPoolTString(copyOf.typeName->c_str()); + } + + + void buildMangledName(TString&) const; + + TBasicType basicType : 8; + int vectorSize : 4; // 1 means either scalar or 1-component vector; see vector1 to disambiguate. + int matrixCols : 4; + int matrixRows : 4; + bool vector1 : 1; // Backward-compatible tracking of a 1-component vector distinguished from a scalar. + // GLSL 4.5 never has a 1-component vector; so this will always be false until such + // functionality is added. + // HLSL does have a 1-component vectors, so this will be true to disambiguate + // from a scalar. + bool coopmat : 1; + TQualifier qualifier; + + TArraySizes* arraySizes; // nullptr unless an array; can be shared across types + // A type can't be both a structure (EbtStruct/EbtBlock) and a reference (EbtReference), so + // conserve space by making these a union + union { + TTypeList* structure; // invalid unless this is a struct; can be shared across types + TType *referentType; // invalid unless this is an EbtReference + }; + TString *fieldName; // for structure field names + TString *typeName; // for structure type name + TSampler sampler; + TArraySizes* typeParameters;// nullptr unless a parameterized type; can be shared across types +}; + +} // end namespace glslang + +#endif // _TYPES_INCLUDED_ diff --git a/dep/glslang/glslang/Include/arrays.h b/dep/glslang/glslang/Include/arrays.h new file mode 100644 index 000000000..7f047d9fb --- /dev/null +++ b/dep/glslang/glslang/Include/arrays.h @@ -0,0 +1,341 @@ +// +// Copyright (C) 2002-2005 3Dlabs Inc. Ltd. +// Copyright (C) 2012-2013 LunarG, Inc. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// +// Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// +// Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// Neither the name of 3Dlabs Inc. Ltd. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// + +// +// Implement types for tracking GLSL arrays, arrays of arrays, etc. +// + +#ifndef _ARRAYS_INCLUDED +#define _ARRAYS_INCLUDED + +#include + +namespace glslang { + +// This is used to mean there is no size yet (unsized), it is waiting to get a size from somewhere else. +const int UnsizedArraySize = 0; + +class TIntermTyped; +extern bool SameSpecializationConstants(TIntermTyped*, TIntermTyped*); + +// Specialization constants need both a nominal size and a node that defines +// the specialization constant being used. Array types are the same when their +// size and specialization constant nodes are the same. +struct TArraySize { + unsigned int size; + TIntermTyped* node; // nullptr means no specialization constant node + bool operator==(const TArraySize& rhs) const + { + if (size != rhs.size) + return false; + if (node == nullptr || rhs.node == nullptr) + return node == rhs.node; + + return SameSpecializationConstants(node, rhs.node); + } +}; + +// +// TSmallArrayVector is used as the container for the set of sizes in TArraySizes. +// It has generic-container semantics, while TArraySizes has array-of-array semantics. +// That is, TSmallArrayVector should be more focused on mechanism and TArraySizes on policy. +// +struct TSmallArrayVector { + // + // TODO: memory: TSmallArrayVector is intended to be smaller. + // Almost all arrays could be handled by two sizes each fitting + // in 16 bits, needing a real vector only in the cases where there + // are more than 3 sizes or a size needing more than 16 bits. + // + POOL_ALLOCATOR_NEW_DELETE(GetThreadPoolAllocator()) + + TSmallArrayVector() : sizes(nullptr) { } + virtual ~TSmallArrayVector() { dealloc(); } + + // For breaking into two non-shared copies, independently modifiable. + TSmallArrayVector& operator=(const TSmallArrayVector& from) + { + if (from.sizes == nullptr) + sizes = nullptr; + else { + alloc(); + *sizes = *from.sizes; + } + + return *this; + } + + int size() const + { + if (sizes == nullptr) + return 0; + return (int)sizes->size(); + } + + unsigned int frontSize() const + { + assert(sizes != nullptr && sizes->size() > 0); + return sizes->front().size; + } + + TIntermTyped* frontNode() const + { + assert(sizes != nullptr && sizes->size() > 0); + return sizes->front().node; + } + + void changeFront(unsigned int s) + { + assert(sizes != nullptr); + // this should only happen for implicitly sized arrays, not specialization constants + assert(sizes->front().node == nullptr); + sizes->front().size = s; + } + + void push_back(unsigned int e, TIntermTyped* n) + { + alloc(); + TArraySize pair = { e, n }; + sizes->push_back(pair); + } + + void push_back(const TSmallArrayVector& newDims) + { + alloc(); + sizes->insert(sizes->end(), newDims.sizes->begin(), newDims.sizes->end()); + } + + void pop_front() + { + assert(sizes != nullptr && sizes->size() > 0); + if (sizes->size() == 1) + dealloc(); + else + sizes->erase(sizes->begin()); + } + + // 'this' should currently not be holding anything, and copyNonFront + // will make it hold a copy of all but the first element of rhs. + // (This would be useful for making a type that is dereferenced by + // one dimension.) + void copyNonFront(const TSmallArrayVector& rhs) + { + assert(sizes == nullptr); + if (rhs.size() > 1) { + alloc(); + sizes->insert(sizes->begin(), rhs.sizes->begin() + 1, rhs.sizes->end()); + } + } + + unsigned int getDimSize(int i) const + { + assert(sizes != nullptr && (int)sizes->size() > i); + return (*sizes)[i].size; + } + + void setDimSize(int i, unsigned int size) const + { + assert(sizes != nullptr && (int)sizes->size() > i); + assert((*sizes)[i].node == nullptr); + (*sizes)[i].size = size; + } + + TIntermTyped* getDimNode(int i) const + { + assert(sizes != nullptr && (int)sizes->size() > i); + return (*sizes)[i].node; + } + + bool operator==(const TSmallArrayVector& rhs) const + { + if (sizes == nullptr && rhs.sizes == nullptr) + return true; + if (sizes == nullptr || rhs.sizes == nullptr) + return false; + return *sizes == *rhs.sizes; + } + bool operator!=(const TSmallArrayVector& rhs) const { return ! operator==(rhs); } + +protected: + TSmallArrayVector(const TSmallArrayVector&); + + void alloc() + { + if (sizes == nullptr) + sizes = new TVector; + } + void dealloc() + { + delete sizes; + sizes = nullptr; + } + + TVector* sizes; // will either hold such a pointer, or in the future, hold the two array sizes +}; + +// +// Represent an array, or array of arrays, to arbitrary depth. This is not +// done through a hierarchy of types in a type tree, rather all contiguous arrayness +// in the type hierarchy is localized into this single cumulative object. +// +// The arrayness in TTtype is a pointer, so that it can be non-allocated and zero +// for the vast majority of types that are non-array types. +// +// Order Policy: these are all identical: +// - left to right order within a contiguous set of ...[..][..][..]... in the source language +// - index order 0, 1, 2, ... within the 'sizes' member below +// - outer-most to inner-most +// +struct TArraySizes { + POOL_ALLOCATOR_NEW_DELETE(GetThreadPoolAllocator()) + + TArraySizes() : implicitArraySize(1), variablyIndexed(false) { } + + // For breaking into two non-shared copies, independently modifiable. + TArraySizes& operator=(const TArraySizes& from) + { + implicitArraySize = from.implicitArraySize; + variablyIndexed = from.variablyIndexed; + sizes = from.sizes; + + return *this; + } + + // translate from array-of-array semantics to container semantics + int getNumDims() const { return sizes.size(); } + int getDimSize(int dim) const { return sizes.getDimSize(dim); } + TIntermTyped* getDimNode(int dim) const { return sizes.getDimNode(dim); } + void setDimSize(int dim, int size) { sizes.setDimSize(dim, size); } + int getOuterSize() const { return sizes.frontSize(); } + TIntermTyped* getOuterNode() const { return sizes.frontNode(); } + int getCumulativeSize() const + { + int size = 1; + for (int d = 0; d < sizes.size(); ++d) { + // this only makes sense in paths that have a known array size + assert(sizes.getDimSize(d) != UnsizedArraySize); + size *= sizes.getDimSize(d); + } + return size; + } + void addInnerSize() { addInnerSize((unsigned)UnsizedArraySize); } + void addInnerSize(int s) { addInnerSize((unsigned)s, nullptr); } + void addInnerSize(int s, TIntermTyped* n) { sizes.push_back((unsigned)s, n); } + void addInnerSize(TArraySize pair) { + sizes.push_back(pair.size, pair.node); + } + void addInnerSizes(const TArraySizes& s) { sizes.push_back(s.sizes); } + void changeOuterSize(int s) { sizes.changeFront((unsigned)s); } + int getImplicitSize() const { return implicitArraySize; } + void updateImplicitSize(int s) { implicitArraySize = std::max(implicitArraySize, s); } + bool isInnerUnsized() const + { + for (int d = 1; d < sizes.size(); ++d) { + if (sizes.getDimSize(d) == (unsigned)UnsizedArraySize) + return true; + } + + return false; + } + bool clearInnerUnsized() + { + for (int d = 1; d < sizes.size(); ++d) { + if (sizes.getDimSize(d) == (unsigned)UnsizedArraySize) + setDimSize(d, 1); + } + + return false; + } + bool isInnerSpecialization() const + { + for (int d = 1; d < sizes.size(); ++d) { + if (sizes.getDimNode(d) != nullptr) + return true; + } + + return false; + } + bool isOuterSpecialization() + { + return sizes.getDimNode(0) != nullptr; + } + + bool hasUnsized() const { return getOuterSize() == UnsizedArraySize || isInnerUnsized(); } + bool isSized() const { return getOuterSize() != UnsizedArraySize; } + void dereference() { sizes.pop_front(); } + void copyDereferenced(const TArraySizes& rhs) + { + assert(sizes.size() == 0); + if (rhs.sizes.size() > 1) + sizes.copyNonFront(rhs.sizes); + } + + bool sameInnerArrayness(const TArraySizes& rhs) const + { + if (sizes.size() != rhs.sizes.size()) + return false; + + for (int d = 1; d < sizes.size(); ++d) { + if (sizes.getDimSize(d) != rhs.sizes.getDimSize(d) || + sizes.getDimNode(d) != rhs.sizes.getDimNode(d)) + return false; + } + + return true; + } + + void setVariablyIndexed() { variablyIndexed = true; } + bool isVariablyIndexed() const { return variablyIndexed; } + + bool operator==(const TArraySizes& rhs) const { return sizes == rhs.sizes; } + bool operator!=(const TArraySizes& rhs) const { return sizes != rhs.sizes; } + +protected: + TSmallArrayVector sizes; + + TArraySizes(const TArraySizes&); + + // For tracking maximum referenced compile-time constant index. + // Applies only to the outer-most dimension. Potentially becomes + // the implicit size of the array, if not variably indexed and + // otherwise legal. + int implicitArraySize; + bool variablyIndexed; // true if array is indexed with a non compile-time constant +}; + +} // end namespace glslang + +#endif // _ARRAYS_INCLUDED_ diff --git a/dep/glslang/glslang/Include/glslang_c_interface.h b/dep/glslang/glslang/Include/glslang_c_interface.h new file mode 100644 index 000000000..50e95b7e8 --- /dev/null +++ b/dep/glslang/glslang/Include/glslang_c_interface.h @@ -0,0 +1,233 @@ +/** + This code is based on the glslang_c_interface implementation by Viktor Latypov +**/ + +/** +BSD 2-Clause License + +Copyright (c) 2019, Viktor Latypov +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +**/ + +#ifndef GLSLANG_C_IFACE_H_INCLUDED +#define GLSLANG_C_IFACE_H_INCLUDED + +#include +#include + +#include "glslang_c_shader_types.h" + +typedef struct glslang_shader_s glslang_shader_t; +typedef struct glslang_program_s glslang_program_t; + +/* TLimits counterpart */ +typedef struct glslang_limits_s { + bool non_inductive_for_loops; + bool while_loops; + bool do_while_loops; + bool general_uniform_indexing; + bool general_attribute_matrix_vector_indexing; + bool general_varying_indexing; + bool general_sampler_indexing; + bool general_variable_indexing; + bool general_constant_matrix_vector_indexing; +} glslang_limits_t; + +/* TBuiltInResource counterpart */ +typedef struct glslang_resource_s { + int max_lights; + int max_clip_planes; + int max_texture_units; + int max_texture_coords; + int max_vertex_attribs; + int max_vertex_uniform_components; + int max_varying_floats; + int max_vertex_texture_image_units; + int max_combined_texture_image_units; + int max_texture_image_units; + int max_fragment_uniform_components; + int max_draw_buffers; + int max_vertex_uniform_vectors; + int max_varying_vectors; + int max_fragment_uniform_vectors; + int max_vertex_output_vectors; + int max_fragment_input_vectors; + int min_program_texel_offset; + int max_program_texel_offset; + int max_clip_distances; + int max_compute_work_group_count_x; + int max_compute_work_group_count_y; + int max_compute_work_group_count_z; + int max_compute_work_group_size_x; + int max_compute_work_group_size_y; + int max_compute_work_group_size_z; + int max_compute_uniform_components; + int max_compute_texture_image_units; + int max_compute_image_uniforms; + int max_compute_atomic_counters; + int max_compute_atomic_counter_buffers; + int max_varying_components; + int max_vertex_output_components; + int max_geometry_input_components; + int max_geometry_output_components; + int max_fragment_input_components; + int max_image_units; + int max_combined_image_units_and_fragment_outputs; + int max_combined_shader_output_resources; + int max_image_samples; + int max_vertex_image_uniforms; + int max_tess_control_image_uniforms; + int max_tess_evaluation_image_uniforms; + int max_geometry_image_uniforms; + int max_fragment_image_uniforms; + int max_combined_image_uniforms; + int max_geometry_texture_image_units; + int max_geometry_output_vertices; + int max_geometry_total_output_components; + int max_geometry_uniform_components; + int max_geometry_varying_components; + int max_tess_control_input_components; + int max_tess_control_output_components; + int max_tess_control_texture_image_units; + int max_tess_control_uniform_components; + int max_tess_control_total_output_components; + int max_tess_evaluation_input_components; + int max_tess_evaluation_output_components; + int max_tess_evaluation_texture_image_units; + int max_tess_evaluation_uniform_components; + int max_tess_patch_components; + int max_patch_vertices; + int max_tess_gen_level; + int max_viewports; + int max_vertex_atomic_counters; + int max_tess_control_atomic_counters; + int max_tess_evaluation_atomic_counters; + int max_geometry_atomic_counters; + int max_fragment_atomic_counters; + int max_combined_atomic_counters; + int max_atomic_counter_bindings; + int max_vertex_atomic_counter_buffers; + int max_tess_control_atomic_counter_buffers; + int max_tess_evaluation_atomic_counter_buffers; + int max_geometry_atomic_counter_buffers; + int max_fragment_atomic_counter_buffers; + int max_combined_atomic_counter_buffers; + int max_atomic_counter_buffer_size; + int max_transform_feedback_buffers; + int max_transform_feedback_interleaved_components; + int max_cull_distances; + int max_combined_clip_and_cull_distances; + int max_samples; + int max_mesh_output_vertices_nv; + int max_mesh_output_primitives_nv; + int max_mesh_work_group_size_x_nv; + int max_mesh_work_group_size_y_nv; + int max_mesh_work_group_size_z_nv; + int max_task_work_group_size_x_nv; + int max_task_work_group_size_y_nv; + int max_task_work_group_size_z_nv; + int max_mesh_view_count_nv; + int maxDualSourceDrawBuffersEXT; + + glslang_limits_t limits; +} glslang_resource_t; + +typedef struct glslang_input_s { + glslang_source_t language; + glslang_stage_t stage; + glslang_client_t client; + glslang_target_client_version_t client_version; + glslang_target_language_t target_language; + glslang_target_language_version_t target_language_version; + /** Shader source code */ + const char* code; + int default_version; + glslang_profile_t default_profile; + int force_default_version_and_profile; + int forward_compatible; + glslang_messages_t messages; + const glslang_resource_t* resource; +} glslang_input_t; + +/* Inclusion result structure allocated by C include_local/include_system callbacks */ +typedef struct glsl_include_result_s { + /* Header file name or NULL if inclusion failed */ + const char* header_name; + + /* Header contents or NULL */ + const char* header_data; + size_t header_length; + +} glsl_include_result_t; + +/* Callback for local file inclusion */ +typedef glsl_include_result_t* (*glsl_include_local_func)(void* ctx, const char* header_name, const char* includer_name, + size_t include_depth); + +/* Callback for system file inclusion */ +typedef glsl_include_result_t* (*glsl_include_system_func)(void* ctx, const char* header_name, + const char* includer_name, size_t include_depth); + +/* Callback for include result destruction */ +typedef int (*glsl_free_include_result_func)(void* ctx, glsl_include_result_t* result); + +/* Collection of callbacks for GLSL preprocessor */ +typedef struct glsl_include_callbacks_s { + glsl_include_system_func include_system; + glsl_include_local_func include_local; + glsl_free_include_result_func free_include_result; +} glsl_include_callbacks_t; + +#ifdef __cplusplus +extern "C" { +#endif + +int glslang_initialize_process(); +void glslang_finalize_process(); + +glslang_shader_t* glslang_shader_create(const glslang_input_t* input); +void glslang_shader_delete(glslang_shader_t* shader); +int glslang_shader_preprocess(glslang_shader_t* shader, const glslang_input_t* input); +int glslang_shader_parse(glslang_shader_t* shader, const glslang_input_t* input); +const char* glslang_shader_get_preprocessed_code(glslang_shader_t* shader); +const char* glslang_shader_get_info_log(glslang_shader_t* shader); +const char* glslang_shader_get_info_debug_log(glslang_shader_t* shader); + +glslang_program_t* glslang_program_create(); +void glslang_program_delete(glslang_program_t* program); +void glslang_program_add_shader(glslang_program_t* program, glslang_shader_t* shader); +int glslang_program_link(glslang_program_t* program, int messages); // glslang_messages_t +void glslang_program_SPIRV_generate(glslang_program_t* program, glslang_stage_t stage); +size_t glslang_program_SPIRV_get_size(glslang_program_t* program); +void glslang_program_SPIRV_get(glslang_program_t* program, unsigned int*); +unsigned int* glslang_program_SPIRV_get_ptr(glslang_program_t* program); +const char* glslang_program_SPIRV_get_messages(glslang_program_t* program); +const char* glslang_program_get_info_log(glslang_program_t* program); +const char* glslang_program_get_info_debug_log(glslang_program_t* program); + +#ifdef __cplusplus +} +#endif + +#endif /* #ifdef GLSLANG_C_IFACE_INCLUDED */ diff --git a/dep/glslang/glslang/Include/glslang_c_shader_types.h b/dep/glslang/glslang/Include/glslang_c_shader_types.h new file mode 100644 index 000000000..15bf1e14f --- /dev/null +++ b/dep/glslang/glslang/Include/glslang_c_shader_types.h @@ -0,0 +1,183 @@ +/** + This code is based on the glslang_c_interface implementation by Viktor Latypov +**/ + +/** +BSD 2-Clause License + +Copyright (c) 2019, Viktor Latypov +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +**/ + +#ifndef C_SHADER_TYPES_H_INCLUDED +#define C_SHADER_TYPES_H_INCLUDED + +#define LAST_ELEMENT_MARKER(x) x + +/* EShLanguage counterpart */ +typedef enum { + GLSLANG_STAGE_VERTEX, + GLSLANG_STAGE_TESSCONTROL, + GLSLANG_STAGE_TESSEVALUATION, + GLSLANG_STAGE_GEOMETRY, + GLSLANG_STAGE_FRAGMENT, + GLSLANG_STAGE_COMPUTE, + GLSLANG_STAGE_RAYGEN_NV, + GLSLANG_STAGE_INTERSECT_NV, + GLSLANG_STAGE_ANYHIT_NV, + GLSLANG_STAGE_CLOSESTHIT_NV, + GLSLANG_STAGE_MISS_NV, + GLSLANG_STAGE_CALLABLE_NV, + GLSLANG_STAGE_TASK_NV, + GLSLANG_STAGE_MESH_NV, + LAST_ELEMENT_MARKER(GLSLANG_STAGE_COUNT), +} glslang_stage_t; // would be better as stage, but this is ancient now + +/* EShLanguageMask counterpart */ +typedef enum { + GLSLANG_STAGE_VERTEX_MASK = (1 << GLSLANG_STAGE_VERTEX), + GLSLANG_STAGE_TESSCONTROL_MASK = (1 << GLSLANG_STAGE_TESSCONTROL), + GLSLANG_STAGE_TESSEVALUATION_MASK = (1 << GLSLANG_STAGE_TESSEVALUATION), + GLSLANG_STAGE_GEOMETRY_MASK = (1 << GLSLANG_STAGE_GEOMETRY), + GLSLANG_STAGE_FRAGMENT_MASK = (1 << GLSLANG_STAGE_FRAGMENT), + GLSLANG_STAGE_COMPUTE_MASK = (1 << GLSLANG_STAGE_COMPUTE), + GLSLANG_STAGE_RAYGEN_NV_MASK = (1 << GLSLANG_STAGE_RAYGEN_NV), + GLSLANG_STAGE_INTERSECT_NV_MASK = (1 << GLSLANG_STAGE_INTERSECT_NV), + GLSLANG_STAGE_ANYHIT_NV_MASK = (1 << GLSLANG_STAGE_ANYHIT_NV), + GLSLANG_STAGE_CLOSESTHIT_NV_MASK = (1 << GLSLANG_STAGE_CLOSESTHIT_NV), + GLSLANG_STAGE_MISS_NV_MASK = (1 << GLSLANG_STAGE_MISS_NV), + GLSLANG_STAGE_CALLABLE_NV_MASK = (1 << GLSLANG_STAGE_CALLABLE_NV), + GLSLANG_STAGE_TASK_NV_MASK = (1 << GLSLANG_STAGE_TASK_NV), + GLSLANG_STAGE_MESH_NV_MASK = (1 << GLSLANG_STAGE_MESH_NV), + LAST_ELEMENT_MARKER(GLSLANG_STAGE_MASK_COUNT), +} glslang_stage_mask_t; + +/* EShSource counterpart */ +typedef enum { + GLSLANG_SOURCE_NONE, + GLSLANG_SOURCE_GLSL, + GLSLANG_SOURCE_HLSL, + LAST_ELEMENT_MARKER(GLSLANG_SOURCE_COUNT), +} glslang_source_t; + +/* EShClient counterpart */ +typedef enum { + GLSLANG_CLIENT_NONE, + GLSLANG_CLIENT_VULKAN, + GLSLANG_CLIENT_OPENGL, + LAST_ELEMENT_MARKER(GLSLANG_CLIENT_COUNT), +} glslang_client_t; + +/* EShTargetLanguage counterpart */ +typedef enum { + GLSLANG_TARGET_NONE, + GLSLANG_TARGET_SPV, + LAST_ELEMENT_MARKER(GLSLANG_TARGET_COUNT), +} glslang_target_language_t; + +/* SH_TARGET_ClientVersion counterpart */ +typedef enum { + GLSLANG_TARGET_VULKAN_1_0 = (1 << 22), + GLSLANG_TARGET_VULKAN_1_1 = (1 << 22) | (1 << 12), + GLSLANG_TARGET_OPENGL_450 = 450, + LAST_ELEMENT_MARKER(GLSLANG_TARGET_CLIENT_VERSION_COUNT), +} glslang_target_client_version_t; + +/* SH_TARGET_LanguageVersion counterpart */ +typedef enum { + GLSLANG_TARGET_SPV_1_0 = (1 << 16), + GLSLANG_TARGET_SPV_1_1 = (1 << 16) | (1 << 8), + GLSLANG_TARGET_SPV_1_2 = (1 << 16) | (2 << 8), + GLSLANG_TARGET_SPV_1_3 = (1 << 16) | (3 << 8), + GLSLANG_TARGET_SPV_1_4 = (1 << 16) | (4 << 8), + GLSLANG_TARGET_SPV_1_5 = (1 << 16) | (5 << 8), + LAST_ELEMENT_MARKER(GLSLANG_TARGET_LANGUAGE_VERSION_COUNT), +} glslang_target_language_version_t; + +/* EShExecutable counterpart */ +typedef enum { GLSLANG_EX_VERTEX_FRAGMENT, GLSLANG_EX_FRAGMENT } glslang_executable_t; + +/* EShOptimizationLevel counterpart */ +typedef enum { + GLSLANG_OPT_NO_GENERATION, + GLSLANG_OPT_NONE, + GLSLANG_OPT_SIMPLE, + GLSLANG_OPT_FULL, + LAST_ELEMENT_MARKER(GLSLANG_OPT_LEVEL_COUNT), +} glslang_optimization_level_t; + +/* EShTextureSamplerTransformMode counterpart */ +typedef enum { + GLSLANG_TEX_SAMP_TRANS_KEEP, + GLSLANG_TEX_SAMP_TRANS_UPGRADE_TEXTURE_REMOVE_SAMPLER, + LAST_ELEMENT_MARKER(GLSLANG_TEX_SAMP_TRANS_COUNT), +} glslang_texture_sampler_transform_mode_t; + +/* EShMessages counterpart */ +typedef enum { + GLSLANG_MSG_DEFAULT_BIT = 0, + GLSLANG_MSG_RELAXED_ERRORS_BIT = (1 << 0), + GLSLANG_MSG_SUPPRESS_WARNINGS_BIT = (1 << 1), + GLSLANG_MSG_AST_BIT = (1 << 2), + GLSLANG_MSG_SPV_RULES_BIT = (1 << 3), + GLSLANG_MSG_VULKAN_RULES_BIT = (1 << 4), + GLSLANG_MSG_ONLY_PREPROCESSOR_BIT = (1 << 5), + GLSLANG_MSG_READ_HLSL_BIT = (1 << 6), + GLSLANG_MSG_CASCADING_ERRORS_BIT = (1 << 7), + GLSLANG_MSG_KEEP_UNCALLED_BIT = (1 << 8), + GLSLANG_MSG_HLSL_OFFSETS_BIT = (1 << 9), + GLSLANG_MSG_DEBUG_INFO_BIT = (1 << 10), + GLSLANG_MSG_HLSL_ENABLE_16BIT_TYPES_BIT = (1 << 11), + GLSLANG_MSG_HLSL_LEGALIZATION_BIT = (1 << 12), + GLSLANG_MSG_HLSL_DX9_COMPATIBLE_BIT = (1 << 13), + GLSLANG_MSG_BUILTIN_SYMBOL_TABLE_BIT = (1 << 14), + LAST_ELEMENT_MARKER(GLSLANG_MSG_COUNT), +} glslang_messages_t; + +/* EShReflectionOptions counterpart */ +typedef enum { + GLSLANG_REFLECTION_DEFAULT_BIT = 0, + GLSLANG_REFLECTION_STRICT_ARRAY_SUFFIX_BIT = (1 << 0), + GLSLANG_REFLECTION_BASIC_ARRAY_SUFFIX_BIT = (1 << 1), + GLSLANG_REFLECTION_INTERMEDIATE_IOO_BIT = (1 << 2), + GLSLANG_REFLECTION_SEPARATE_BUFFERS_BIT = (1 << 3), + GLSLANG_REFLECTION_ALL_BLOCK_VARIABLES_BIT = (1 << 4), + GLSLANG_REFLECTION_UNWRAP_IO_BLOCKS_BIT = (1 << 5), + GLSLANG_REFLECTION_SHARED_STD140_BLOCKS_BIT = (1 << 6), + LAST_ELEMENT_MARKER(GLSLANG_REFLECTION_COUNT), +} glslang_reflection_options_t; + +/* EProfile counterpart (from Versions.h) */ +typedef enum { + GLSLANG_BAD_PROFILE = 0, + GLSLANG_NO_PROFILE = (1 << 0), + GLSLANG_CORE_PROFILE = (1 << 1), + GLSLANG_COMPATIBILITY_PROFILE = (1 << 2), + GLSLANG_ES_PROFILE = (1 << 3), + LAST_ELEMENT_MARKER(GLSLANG_PROFILE_COUNT), +} glslang_profile_t; + +#undef LAST_ELEMENT_MARKER + +#endif diff --git a/dep/glslang/glslang/Include/intermediate.h b/dep/glslang/glslang/Include/intermediate.h new file mode 100644 index 000000000..bf12fcf8f --- /dev/null +++ b/dep/glslang/glslang/Include/intermediate.h @@ -0,0 +1,1805 @@ +// +// Copyright (C) 2002-2005 3Dlabs Inc. Ltd. +// Copyright (C) 2012-2016 LunarG, Inc. +// Copyright (C) 2017 ARM Limited. +// Modifications Copyright (C) 2020 Advanced Micro Devices, Inc. All rights reserved. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// +// Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// +// Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// Neither the name of 3Dlabs Inc. Ltd. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// + +// +// Definition of the in-memory high-level intermediate representation +// of shaders. This is a tree that parser creates. +// +// Nodes in the tree are defined as a hierarchy of classes derived from +// TIntermNode. Each is a node in a tree. There is no preset branching factor; +// each node can have it's own type of list of children. +// + +#ifndef __INTERMEDIATE_H +#define __INTERMEDIATE_H + +#if defined(_MSC_VER) && _MSC_VER >= 1900 + #pragma warning(disable : 4464) // relative include path contains '..' + #pragma warning(disable : 5026) // 'glslang::TIntermUnary': move constructor was implicitly defined as deleted +#endif + +#include "../Include/Common.h" +#include "../Include/Types.h" +#include "../Include/ConstantUnion.h" + +namespace glslang { + +class TIntermediate; + +// +// Operators used by the high-level (parse tree) representation. +// +enum TOperator { + EOpNull, // if in a node, should only mean a node is still being built + EOpSequence, // denotes a list of statements, or parameters, etc. + EOpLinkerObjects, // for aggregate node of objects the linker may need, if not reference by the rest of the AST + EOpFunctionCall, + EOpFunction, // For function definition + EOpParameters, // an aggregate listing the parameters to a function + + // + // Unary operators + // + + EOpNegative, + EOpLogicalNot, + EOpVectorLogicalNot, + EOpBitwiseNot, + + EOpPostIncrement, + EOpPostDecrement, + EOpPreIncrement, + EOpPreDecrement, + + EOpCopyObject, + + // (u)int* -> bool + EOpConvInt8ToBool, + EOpConvUint8ToBool, + EOpConvInt16ToBool, + EOpConvUint16ToBool, + EOpConvIntToBool, + EOpConvUintToBool, + EOpConvInt64ToBool, + EOpConvUint64ToBool, + + // float* -> bool + EOpConvFloat16ToBool, + EOpConvFloatToBool, + EOpConvDoubleToBool, + + // bool -> (u)int* + EOpConvBoolToInt8, + EOpConvBoolToUint8, + EOpConvBoolToInt16, + EOpConvBoolToUint16, + EOpConvBoolToInt, + EOpConvBoolToUint, + EOpConvBoolToInt64, + EOpConvBoolToUint64, + + // bool -> float* + EOpConvBoolToFloat16, + EOpConvBoolToFloat, + EOpConvBoolToDouble, + + // int8_t -> (u)int* + EOpConvInt8ToInt16, + EOpConvInt8ToInt, + EOpConvInt8ToInt64, + EOpConvInt8ToUint8, + EOpConvInt8ToUint16, + EOpConvInt8ToUint, + EOpConvInt8ToUint64, + + // uint8_t -> (u)int* + EOpConvUint8ToInt8, + EOpConvUint8ToInt16, + EOpConvUint8ToInt, + EOpConvUint8ToInt64, + EOpConvUint8ToUint16, + EOpConvUint8ToUint, + EOpConvUint8ToUint64, + + // int8_t -> float* + EOpConvInt8ToFloat16, + EOpConvInt8ToFloat, + EOpConvInt8ToDouble, + + // uint8_t -> float* + EOpConvUint8ToFloat16, + EOpConvUint8ToFloat, + EOpConvUint8ToDouble, + + // int16_t -> (u)int* + EOpConvInt16ToInt8, + EOpConvInt16ToInt, + EOpConvInt16ToInt64, + EOpConvInt16ToUint8, + EOpConvInt16ToUint16, + EOpConvInt16ToUint, + EOpConvInt16ToUint64, + + // uint16_t -> (u)int* + EOpConvUint16ToInt8, + EOpConvUint16ToInt16, + EOpConvUint16ToInt, + EOpConvUint16ToInt64, + EOpConvUint16ToUint8, + EOpConvUint16ToUint, + EOpConvUint16ToUint64, + + // int16_t -> float* + EOpConvInt16ToFloat16, + EOpConvInt16ToFloat, + EOpConvInt16ToDouble, + + // uint16_t -> float* + EOpConvUint16ToFloat16, + EOpConvUint16ToFloat, + EOpConvUint16ToDouble, + + // int32_t -> (u)int* + EOpConvIntToInt8, + EOpConvIntToInt16, + EOpConvIntToInt64, + EOpConvIntToUint8, + EOpConvIntToUint16, + EOpConvIntToUint, + EOpConvIntToUint64, + + // uint32_t -> (u)int* + EOpConvUintToInt8, + EOpConvUintToInt16, + EOpConvUintToInt, + EOpConvUintToInt64, + EOpConvUintToUint8, + EOpConvUintToUint16, + EOpConvUintToUint64, + + // int32_t -> float* + EOpConvIntToFloat16, + EOpConvIntToFloat, + EOpConvIntToDouble, + + // uint32_t -> float* + EOpConvUintToFloat16, + EOpConvUintToFloat, + EOpConvUintToDouble, + + // int64_t -> (u)int* + EOpConvInt64ToInt8, + EOpConvInt64ToInt16, + EOpConvInt64ToInt, + EOpConvInt64ToUint8, + EOpConvInt64ToUint16, + EOpConvInt64ToUint, + EOpConvInt64ToUint64, + + // uint64_t -> (u)int* + EOpConvUint64ToInt8, + EOpConvUint64ToInt16, + EOpConvUint64ToInt, + EOpConvUint64ToInt64, + EOpConvUint64ToUint8, + EOpConvUint64ToUint16, + EOpConvUint64ToUint, + + // int64_t -> float* + EOpConvInt64ToFloat16, + EOpConvInt64ToFloat, + EOpConvInt64ToDouble, + + // uint64_t -> float* + EOpConvUint64ToFloat16, + EOpConvUint64ToFloat, + EOpConvUint64ToDouble, + + // float16_t -> (u)int* + EOpConvFloat16ToInt8, + EOpConvFloat16ToInt16, + EOpConvFloat16ToInt, + EOpConvFloat16ToInt64, + EOpConvFloat16ToUint8, + EOpConvFloat16ToUint16, + EOpConvFloat16ToUint, + EOpConvFloat16ToUint64, + + // float16_t -> float* + EOpConvFloat16ToFloat, + EOpConvFloat16ToDouble, + + // float -> (u)int* + EOpConvFloatToInt8, + EOpConvFloatToInt16, + EOpConvFloatToInt, + EOpConvFloatToInt64, + EOpConvFloatToUint8, + EOpConvFloatToUint16, + EOpConvFloatToUint, + EOpConvFloatToUint64, + + // float -> float* + EOpConvFloatToFloat16, + EOpConvFloatToDouble, + + // float64 _t-> (u)int* + EOpConvDoubleToInt8, + EOpConvDoubleToInt16, + EOpConvDoubleToInt, + EOpConvDoubleToInt64, + EOpConvDoubleToUint8, + EOpConvDoubleToUint16, + EOpConvDoubleToUint, + EOpConvDoubleToUint64, + + // float64_t -> float* + EOpConvDoubleToFloat16, + EOpConvDoubleToFloat, + + // uint64_t <-> pointer + EOpConvUint64ToPtr, + EOpConvPtrToUint64, + + // uvec2 <-> pointer + EOpConvUvec2ToPtr, + EOpConvPtrToUvec2, + + // + // binary operations + // + + EOpAdd, + EOpSub, + EOpMul, + EOpDiv, + EOpMod, + EOpRightShift, + EOpLeftShift, + EOpAnd, + EOpInclusiveOr, + EOpExclusiveOr, + EOpEqual, + EOpNotEqual, + EOpVectorEqual, + EOpVectorNotEqual, + EOpLessThan, + EOpGreaterThan, + EOpLessThanEqual, + EOpGreaterThanEqual, + EOpComma, + + EOpVectorTimesScalar, + EOpVectorTimesMatrix, + EOpMatrixTimesVector, + EOpMatrixTimesScalar, + + EOpLogicalOr, + EOpLogicalXor, + EOpLogicalAnd, + + EOpIndexDirect, + EOpIndexIndirect, + EOpIndexDirectStruct, + + EOpVectorSwizzle, + + EOpMethod, + EOpScoping, + + // + // Built-in functions mapped to operators + // + + EOpRadians, + EOpDegrees, + EOpSin, + EOpCos, + EOpTan, + EOpAsin, + EOpAcos, + EOpAtan, + EOpSinh, + EOpCosh, + EOpTanh, + EOpAsinh, + EOpAcosh, + EOpAtanh, + + EOpPow, + EOpExp, + EOpLog, + EOpExp2, + EOpLog2, + EOpSqrt, + EOpInverseSqrt, + + EOpAbs, + EOpSign, + EOpFloor, + EOpTrunc, + EOpRound, + EOpRoundEven, + EOpCeil, + EOpFract, + EOpModf, + EOpMin, + EOpMax, + EOpClamp, + EOpMix, + EOpStep, + EOpSmoothStep, + + EOpIsNan, + EOpIsInf, + + EOpFma, + + EOpFrexp, + EOpLdexp, + + EOpFloatBitsToInt, + EOpFloatBitsToUint, + EOpIntBitsToFloat, + EOpUintBitsToFloat, + EOpDoubleBitsToInt64, + EOpDoubleBitsToUint64, + EOpInt64BitsToDouble, + EOpUint64BitsToDouble, + EOpFloat16BitsToInt16, + EOpFloat16BitsToUint16, + EOpInt16BitsToFloat16, + EOpUint16BitsToFloat16, + EOpPackSnorm2x16, + EOpUnpackSnorm2x16, + EOpPackUnorm2x16, + EOpUnpackUnorm2x16, + EOpPackSnorm4x8, + EOpUnpackSnorm4x8, + EOpPackUnorm4x8, + EOpUnpackUnorm4x8, + EOpPackHalf2x16, + EOpUnpackHalf2x16, + EOpPackDouble2x32, + EOpUnpackDouble2x32, + EOpPackInt2x32, + EOpUnpackInt2x32, + EOpPackUint2x32, + EOpUnpackUint2x32, + EOpPackFloat2x16, + EOpUnpackFloat2x16, + EOpPackInt2x16, + EOpUnpackInt2x16, + EOpPackUint2x16, + EOpUnpackUint2x16, + EOpPackInt4x16, + EOpUnpackInt4x16, + EOpPackUint4x16, + EOpUnpackUint4x16, + EOpPack16, + EOpPack32, + EOpPack64, + EOpUnpack32, + EOpUnpack16, + EOpUnpack8, + + EOpLength, + EOpDistance, + EOpDot, + EOpCross, + EOpNormalize, + EOpFaceForward, + EOpReflect, + EOpRefract, + + EOpMin3, + EOpMax3, + EOpMid3, + + EOpDPdx, // Fragment only + EOpDPdy, // Fragment only + EOpFwidth, // Fragment only + EOpDPdxFine, // Fragment only + EOpDPdyFine, // Fragment only + EOpFwidthFine, // Fragment only + EOpDPdxCoarse, // Fragment only + EOpDPdyCoarse, // Fragment only + EOpFwidthCoarse, // Fragment only + + EOpInterpolateAtCentroid, // Fragment only + EOpInterpolateAtSample, // Fragment only + EOpInterpolateAtOffset, // Fragment only + EOpInterpolateAtVertex, + + EOpMatrixTimesMatrix, + EOpOuterProduct, + EOpDeterminant, + EOpMatrixInverse, + EOpTranspose, + + EOpFtransform, + + EOpNoise, + + EOpEmitVertex, // geometry only + EOpEndPrimitive, // geometry only + EOpEmitStreamVertex, // geometry only + EOpEndStreamPrimitive, // geometry only + + EOpBarrier, + EOpMemoryBarrier, + EOpMemoryBarrierAtomicCounter, + EOpMemoryBarrierBuffer, + EOpMemoryBarrierImage, + EOpMemoryBarrierShared, // compute only + EOpGroupMemoryBarrier, // compute only + + EOpBallot, + EOpReadInvocation, + EOpReadFirstInvocation, + + EOpAnyInvocation, + EOpAllInvocations, + EOpAllInvocationsEqual, + + EOpSubgroupGuardStart, + EOpSubgroupBarrier, + EOpSubgroupMemoryBarrier, + EOpSubgroupMemoryBarrierBuffer, + EOpSubgroupMemoryBarrierImage, + EOpSubgroupMemoryBarrierShared, // compute only + EOpSubgroupElect, + EOpSubgroupAll, + EOpSubgroupAny, + EOpSubgroupAllEqual, + EOpSubgroupBroadcast, + EOpSubgroupBroadcastFirst, + EOpSubgroupBallot, + EOpSubgroupInverseBallot, + EOpSubgroupBallotBitExtract, + EOpSubgroupBallotBitCount, + EOpSubgroupBallotInclusiveBitCount, + EOpSubgroupBallotExclusiveBitCount, + EOpSubgroupBallotFindLSB, + EOpSubgroupBallotFindMSB, + EOpSubgroupShuffle, + EOpSubgroupShuffleXor, + EOpSubgroupShuffleUp, + EOpSubgroupShuffleDown, + EOpSubgroupAdd, + EOpSubgroupMul, + EOpSubgroupMin, + EOpSubgroupMax, + EOpSubgroupAnd, + EOpSubgroupOr, + EOpSubgroupXor, + EOpSubgroupInclusiveAdd, + EOpSubgroupInclusiveMul, + EOpSubgroupInclusiveMin, + EOpSubgroupInclusiveMax, + EOpSubgroupInclusiveAnd, + EOpSubgroupInclusiveOr, + EOpSubgroupInclusiveXor, + EOpSubgroupExclusiveAdd, + EOpSubgroupExclusiveMul, + EOpSubgroupExclusiveMin, + EOpSubgroupExclusiveMax, + EOpSubgroupExclusiveAnd, + EOpSubgroupExclusiveOr, + EOpSubgroupExclusiveXor, + EOpSubgroupClusteredAdd, + EOpSubgroupClusteredMul, + EOpSubgroupClusteredMin, + EOpSubgroupClusteredMax, + EOpSubgroupClusteredAnd, + EOpSubgroupClusteredOr, + EOpSubgroupClusteredXor, + EOpSubgroupQuadBroadcast, + EOpSubgroupQuadSwapHorizontal, + EOpSubgroupQuadSwapVertical, + EOpSubgroupQuadSwapDiagonal, + + EOpSubgroupPartition, + EOpSubgroupPartitionedAdd, + EOpSubgroupPartitionedMul, + EOpSubgroupPartitionedMin, + EOpSubgroupPartitionedMax, + EOpSubgroupPartitionedAnd, + EOpSubgroupPartitionedOr, + EOpSubgroupPartitionedXor, + EOpSubgroupPartitionedInclusiveAdd, + EOpSubgroupPartitionedInclusiveMul, + EOpSubgroupPartitionedInclusiveMin, + EOpSubgroupPartitionedInclusiveMax, + EOpSubgroupPartitionedInclusiveAnd, + EOpSubgroupPartitionedInclusiveOr, + EOpSubgroupPartitionedInclusiveXor, + EOpSubgroupPartitionedExclusiveAdd, + EOpSubgroupPartitionedExclusiveMul, + EOpSubgroupPartitionedExclusiveMin, + EOpSubgroupPartitionedExclusiveMax, + EOpSubgroupPartitionedExclusiveAnd, + EOpSubgroupPartitionedExclusiveOr, + EOpSubgroupPartitionedExclusiveXor, + + EOpSubgroupGuardStop, + + EOpMinInvocations, + EOpMaxInvocations, + EOpAddInvocations, + EOpMinInvocationsNonUniform, + EOpMaxInvocationsNonUniform, + EOpAddInvocationsNonUniform, + EOpMinInvocationsInclusiveScan, + EOpMaxInvocationsInclusiveScan, + EOpAddInvocationsInclusiveScan, + EOpMinInvocationsInclusiveScanNonUniform, + EOpMaxInvocationsInclusiveScanNonUniform, + EOpAddInvocationsInclusiveScanNonUniform, + EOpMinInvocationsExclusiveScan, + EOpMaxInvocationsExclusiveScan, + EOpAddInvocationsExclusiveScan, + EOpMinInvocationsExclusiveScanNonUniform, + EOpMaxInvocationsExclusiveScanNonUniform, + EOpAddInvocationsExclusiveScanNonUniform, + EOpSwizzleInvocations, + EOpSwizzleInvocationsMasked, + EOpWriteInvocation, + EOpMbcnt, + + EOpCubeFaceIndex, + EOpCubeFaceCoord, + EOpTime, + + EOpAtomicAdd, + EOpAtomicMin, + EOpAtomicMax, + EOpAtomicAnd, + EOpAtomicOr, + EOpAtomicXor, + EOpAtomicExchange, + EOpAtomicCompSwap, + EOpAtomicLoad, + EOpAtomicStore, + + EOpAtomicCounterIncrement, // results in pre-increment value + EOpAtomicCounterDecrement, // results in post-decrement value + EOpAtomicCounter, + EOpAtomicCounterAdd, + EOpAtomicCounterSubtract, + EOpAtomicCounterMin, + EOpAtomicCounterMax, + EOpAtomicCounterAnd, + EOpAtomicCounterOr, + EOpAtomicCounterXor, + EOpAtomicCounterExchange, + EOpAtomicCounterCompSwap, + + EOpAny, + EOpAll, + + EOpCooperativeMatrixLoad, + EOpCooperativeMatrixStore, + EOpCooperativeMatrixMulAdd, + + EOpBeginInvocationInterlock, // Fragment only + EOpEndInvocationInterlock, // Fragment only + + EOpIsHelperInvocation, + + EOpDebugPrintf, + + // + // Branch + // + + EOpKill, // Fragment only + EOpReturn, + EOpBreak, + EOpContinue, + EOpCase, + EOpDefault, + EOpDemote, // Fragment only + + // + // Constructors + // + + EOpConstructGuardStart, + EOpConstructInt, // these first scalar forms also identify what implicit conversion is needed + EOpConstructUint, + EOpConstructInt8, + EOpConstructUint8, + EOpConstructInt16, + EOpConstructUint16, + EOpConstructInt64, + EOpConstructUint64, + EOpConstructBool, + EOpConstructFloat, + EOpConstructDouble, + // Keep vector and matrix constructors in a consistent relative order for + // TParseContext::constructBuiltIn, which converts between 8/16/32 bit + // vector constructors + EOpConstructVec2, + EOpConstructVec3, + EOpConstructVec4, + EOpConstructMat2x2, + EOpConstructMat2x3, + EOpConstructMat2x4, + EOpConstructMat3x2, + EOpConstructMat3x3, + EOpConstructMat3x4, + EOpConstructMat4x2, + EOpConstructMat4x3, + EOpConstructMat4x4, + EOpConstructDVec2, + EOpConstructDVec3, + EOpConstructDVec4, + EOpConstructBVec2, + EOpConstructBVec3, + EOpConstructBVec4, + EOpConstructI8Vec2, + EOpConstructI8Vec3, + EOpConstructI8Vec4, + EOpConstructU8Vec2, + EOpConstructU8Vec3, + EOpConstructU8Vec4, + EOpConstructI16Vec2, + EOpConstructI16Vec3, + EOpConstructI16Vec4, + EOpConstructU16Vec2, + EOpConstructU16Vec3, + EOpConstructU16Vec4, + EOpConstructIVec2, + EOpConstructIVec3, + EOpConstructIVec4, + EOpConstructUVec2, + EOpConstructUVec3, + EOpConstructUVec4, + EOpConstructI64Vec2, + EOpConstructI64Vec3, + EOpConstructI64Vec4, + EOpConstructU64Vec2, + EOpConstructU64Vec3, + EOpConstructU64Vec4, + EOpConstructDMat2x2, + EOpConstructDMat2x3, + EOpConstructDMat2x4, + EOpConstructDMat3x2, + EOpConstructDMat3x3, + EOpConstructDMat3x4, + EOpConstructDMat4x2, + EOpConstructDMat4x3, + EOpConstructDMat4x4, + EOpConstructIMat2x2, + EOpConstructIMat2x3, + EOpConstructIMat2x4, + EOpConstructIMat3x2, + EOpConstructIMat3x3, + EOpConstructIMat3x4, + EOpConstructIMat4x2, + EOpConstructIMat4x3, + EOpConstructIMat4x4, + EOpConstructUMat2x2, + EOpConstructUMat2x3, + EOpConstructUMat2x4, + EOpConstructUMat3x2, + EOpConstructUMat3x3, + EOpConstructUMat3x4, + EOpConstructUMat4x2, + EOpConstructUMat4x3, + EOpConstructUMat4x4, + EOpConstructBMat2x2, + EOpConstructBMat2x3, + EOpConstructBMat2x4, + EOpConstructBMat3x2, + EOpConstructBMat3x3, + EOpConstructBMat3x4, + EOpConstructBMat4x2, + EOpConstructBMat4x3, + EOpConstructBMat4x4, + EOpConstructFloat16, + EOpConstructF16Vec2, + EOpConstructF16Vec3, + EOpConstructF16Vec4, + EOpConstructF16Mat2x2, + EOpConstructF16Mat2x3, + EOpConstructF16Mat2x4, + EOpConstructF16Mat3x2, + EOpConstructF16Mat3x3, + EOpConstructF16Mat3x4, + EOpConstructF16Mat4x2, + EOpConstructF16Mat4x3, + EOpConstructF16Mat4x4, + EOpConstructStruct, + EOpConstructTextureSampler, + EOpConstructNonuniform, // expected to be transformed away, not present in final AST + EOpConstructReference, + EOpConstructCooperativeMatrix, + EOpConstructGuardEnd, + + // + // moves + // + + EOpAssign, + EOpAddAssign, + EOpSubAssign, + EOpMulAssign, + EOpVectorTimesMatrixAssign, + EOpVectorTimesScalarAssign, + EOpMatrixTimesScalarAssign, + EOpMatrixTimesMatrixAssign, + EOpDivAssign, + EOpModAssign, + EOpAndAssign, + EOpInclusiveOrAssign, + EOpExclusiveOrAssign, + EOpLeftShiftAssign, + EOpRightShiftAssign, + + // + // Array operators + // + + // Can apply to arrays, vectors, or matrices. + // Can be decomposed to a constant at compile time, but this does not always happen, + // due to link-time effects. So, consumer can expect either a link-time sized or + // run-time sized array. + EOpArrayLength, + + // + // Image operations + // + + EOpImageGuardBegin, + + EOpImageQuerySize, + EOpImageQuerySamples, + EOpImageLoad, + EOpImageStore, + EOpImageLoadLod, + EOpImageStoreLod, + EOpImageAtomicAdd, + EOpImageAtomicMin, + EOpImageAtomicMax, + EOpImageAtomicAnd, + EOpImageAtomicOr, + EOpImageAtomicXor, + EOpImageAtomicExchange, + EOpImageAtomicCompSwap, + EOpImageAtomicLoad, + EOpImageAtomicStore, + + EOpSubpassLoad, + EOpSubpassLoadMS, + EOpSparseImageLoad, + EOpSparseImageLoadLod, + + EOpImageGuardEnd, + + // + // Texture operations + // + + EOpTextureGuardBegin, + + EOpTextureQuerySize, + EOpTextureQueryLod, + EOpTextureQueryLevels, + EOpTextureQuerySamples, + + EOpSamplingGuardBegin, + + EOpTexture, + EOpTextureProj, + EOpTextureLod, + EOpTextureOffset, + EOpTextureFetch, + EOpTextureFetchOffset, + EOpTextureProjOffset, + EOpTextureLodOffset, + EOpTextureProjLod, + EOpTextureProjLodOffset, + EOpTextureGrad, + EOpTextureGradOffset, + EOpTextureProjGrad, + EOpTextureProjGradOffset, + EOpTextureGather, + EOpTextureGatherOffset, + EOpTextureGatherOffsets, + EOpTextureClamp, + EOpTextureOffsetClamp, + EOpTextureGradClamp, + EOpTextureGradOffsetClamp, + EOpTextureGatherLod, + EOpTextureGatherLodOffset, + EOpTextureGatherLodOffsets, + EOpFragmentMaskFetch, + EOpFragmentFetch, + + EOpSparseTextureGuardBegin, + + EOpSparseTexture, + EOpSparseTextureLod, + EOpSparseTextureOffset, + EOpSparseTextureFetch, + EOpSparseTextureFetchOffset, + EOpSparseTextureLodOffset, + EOpSparseTextureGrad, + EOpSparseTextureGradOffset, + EOpSparseTextureGather, + EOpSparseTextureGatherOffset, + EOpSparseTextureGatherOffsets, + EOpSparseTexelsResident, + EOpSparseTextureClamp, + EOpSparseTextureOffsetClamp, + EOpSparseTextureGradClamp, + EOpSparseTextureGradOffsetClamp, + EOpSparseTextureGatherLod, + EOpSparseTextureGatherLodOffset, + EOpSparseTextureGatherLodOffsets, + + EOpSparseTextureGuardEnd, + + EOpImageFootprintGuardBegin, + EOpImageSampleFootprintNV, + EOpImageSampleFootprintClampNV, + EOpImageSampleFootprintLodNV, + EOpImageSampleFootprintGradNV, + EOpImageSampleFootprintGradClampNV, + EOpImageFootprintGuardEnd, + EOpSamplingGuardEnd, + EOpTextureGuardEnd, + + // + // Integer operations + // + + EOpAddCarry, + EOpSubBorrow, + EOpUMulExtended, + EOpIMulExtended, + EOpBitfieldExtract, + EOpBitfieldInsert, + EOpBitFieldReverse, + EOpBitCount, + EOpFindLSB, + EOpFindMSB, + + EOpCountLeadingZeros, + EOpCountTrailingZeros, + EOpAbsDifference, + EOpAddSaturate, + EOpSubSaturate, + EOpAverage, + EOpAverageRounded, + EOpMul32x16, + + EOpTrace, + EOpReportIntersection, + EOpIgnoreIntersection, + EOpTerminateRay, + EOpExecuteCallable, + EOpWritePackedPrimitiveIndices4x8NV, + + // + // GL_EXT_ray_query operations + // + + EOpRayQueryInitialize, + EOpRayQueryTerminate, + EOpRayQueryGenerateIntersection, + EOpRayQueryConfirmIntersection, + EOpRayQueryProceed, + EOpRayQueryGetIntersectionType, + EOpRayQueryGetRayTMin, + EOpRayQueryGetRayFlags, + EOpRayQueryGetIntersectionT, + EOpRayQueryGetIntersectionInstanceCustomIndex, + EOpRayQueryGetIntersectionInstanceId, + EOpRayQueryGetIntersectionInstanceShaderBindingTableRecordOffset, + EOpRayQueryGetIntersectionGeometryIndex, + EOpRayQueryGetIntersectionPrimitiveIndex, + EOpRayQueryGetIntersectionBarycentrics, + EOpRayQueryGetIntersectionFrontFace, + EOpRayQueryGetIntersectionCandidateAABBOpaque, + EOpRayQueryGetIntersectionObjectRayDirection, + EOpRayQueryGetIntersectionObjectRayOrigin, + EOpRayQueryGetWorldRayDirection, + EOpRayQueryGetWorldRayOrigin, + EOpRayQueryGetIntersectionObjectToWorld, + EOpRayQueryGetIntersectionWorldToObject, + + // + // HLSL operations + // + + EOpClip, // discard if input value < 0 + EOpIsFinite, + EOpLog10, // base 10 log + EOpRcp, // 1/x + EOpSaturate, // clamp from 0 to 1 + EOpSinCos, // sin and cos in out parameters + EOpGenMul, // mul(x,y) on any of mat/vec/scalars + EOpDst, // x = 1, y=src0.y * src1.y, z=src0.z, w=src1.w + EOpInterlockedAdd, // atomic ops, but uses [optional] out arg instead of return + EOpInterlockedAnd, // ... + EOpInterlockedCompareExchange, // ... + EOpInterlockedCompareStore, // ... + EOpInterlockedExchange, // ... + EOpInterlockedMax, // ... + EOpInterlockedMin, // ... + EOpInterlockedOr, // ... + EOpInterlockedXor, // ... + EOpAllMemoryBarrierWithGroupSync, // memory barriers without non-hlsl AST equivalents + EOpDeviceMemoryBarrier, // ... + EOpDeviceMemoryBarrierWithGroupSync, // ... + EOpWorkgroupMemoryBarrier, // ... + EOpWorkgroupMemoryBarrierWithGroupSync, // ... + EOpEvaluateAttributeSnapped, // InterpolateAtOffset with int position on 16x16 grid + EOpF32tof16, // HLSL conversion: half of a PackHalf2x16 + EOpF16tof32, // HLSL conversion: half of an UnpackHalf2x16 + EOpLit, // HLSL lighting coefficient vector + EOpTextureBias, // HLSL texture bias: will be lowered to EOpTexture + EOpAsDouble, // slightly different from EOpUint64BitsToDouble + EOpD3DCOLORtoUBYTE4, // convert and swizzle 4-component color to UBYTE4 range + + EOpMethodSample, // Texture object methods. These are translated to existing + EOpMethodSampleBias, // AST methods, and exist to represent HLSL semantics until that + EOpMethodSampleCmp, // translation is performed. See HlslParseContext::decomposeSampleMethods(). + EOpMethodSampleCmpLevelZero, // ... + EOpMethodSampleGrad, // ... + EOpMethodSampleLevel, // ... + EOpMethodLoad, // ... + EOpMethodGetDimensions, // ... + EOpMethodGetSamplePosition, // ... + EOpMethodGather, // ... + EOpMethodCalculateLevelOfDetail, // ... + EOpMethodCalculateLevelOfDetailUnclamped, // ... + + // Load already defined above for textures + EOpMethodLoad2, // Structure buffer object methods. These are translated to existing + EOpMethodLoad3, // AST methods, and exist to represent HLSL semantics until that + EOpMethodLoad4, // translation is performed. See HlslParseContext::decomposeSampleMethods(). + EOpMethodStore, // ... + EOpMethodStore2, // ... + EOpMethodStore3, // ... + EOpMethodStore4, // ... + EOpMethodIncrementCounter, // ... + EOpMethodDecrementCounter, // ... + // EOpMethodAppend is defined for geo shaders below + EOpMethodConsume, + + // SM5 texture methods + EOpMethodGatherRed, // These are covered under the above EOpMethodSample comment about + EOpMethodGatherGreen, // translation to existing AST opcodes. They exist temporarily + EOpMethodGatherBlue, // because HLSL arguments are slightly different. + EOpMethodGatherAlpha, // ... + EOpMethodGatherCmp, // ... + EOpMethodGatherCmpRed, // ... + EOpMethodGatherCmpGreen, // ... + EOpMethodGatherCmpBlue, // ... + EOpMethodGatherCmpAlpha, // ... + + // geometry methods + EOpMethodAppend, // Geometry shader methods + EOpMethodRestartStrip, // ... + + // matrix + EOpMatrixSwizzle, // select multiple matrix components (non-column) + + // SM6 wave ops + EOpWaveGetLaneCount, // Will decompose to gl_SubgroupSize. + EOpWaveGetLaneIndex, // Will decompose to gl_SubgroupInvocationID. + EOpWaveActiveCountBits, // Will decompose to subgroupBallotBitCount(subgroupBallot()). + EOpWavePrefixCountBits, // Will decompose to subgroupBallotInclusiveBitCount(subgroupBallot()). + + // Shader Clock Ops + EOpReadClockSubgroupKHR, + EOpReadClockDeviceKHR, +}; + +class TIntermTraverser; +class TIntermOperator; +class TIntermAggregate; +class TIntermUnary; +class TIntermBinary; +class TIntermConstantUnion; +class TIntermSelection; +class TIntermSwitch; +class TIntermBranch; +class TIntermTyped; +class TIntermMethod; +class TIntermSymbol; +class TIntermLoop; + +} // end namespace glslang + +// +// Base class for the tree nodes +// +// (Put outside the glslang namespace, as it's used as part of the external interface.) +// +class TIntermNode { +public: + POOL_ALLOCATOR_NEW_DELETE(glslang::GetThreadPoolAllocator()) + + TIntermNode() { loc.init(); } + virtual const glslang::TSourceLoc& getLoc() const { return loc; } + virtual void setLoc(const glslang::TSourceLoc& l) { loc = l; } + virtual void traverse(glslang::TIntermTraverser*) = 0; + virtual glslang::TIntermTyped* getAsTyped() { return 0; } + virtual glslang::TIntermOperator* getAsOperator() { return 0; } + virtual glslang::TIntermConstantUnion* getAsConstantUnion() { return 0; } + virtual glslang::TIntermAggregate* getAsAggregate() { return 0; } + virtual glslang::TIntermUnary* getAsUnaryNode() { return 0; } + virtual glslang::TIntermBinary* getAsBinaryNode() { return 0; } + virtual glslang::TIntermSelection* getAsSelectionNode() { return 0; } + virtual glslang::TIntermSwitch* getAsSwitchNode() { return 0; } + virtual glslang::TIntermMethod* getAsMethodNode() { return 0; } + virtual glslang::TIntermSymbol* getAsSymbolNode() { return 0; } + virtual glslang::TIntermBranch* getAsBranchNode() { return 0; } + virtual glslang::TIntermLoop* getAsLoopNode() { return 0; } + + virtual const glslang::TIntermTyped* getAsTyped() const { return 0; } + virtual const glslang::TIntermOperator* getAsOperator() const { return 0; } + virtual const glslang::TIntermConstantUnion* getAsConstantUnion() const { return 0; } + virtual const glslang::TIntermAggregate* getAsAggregate() const { return 0; } + virtual const glslang::TIntermUnary* getAsUnaryNode() const { return 0; } + virtual const glslang::TIntermBinary* getAsBinaryNode() const { return 0; } + virtual const glslang::TIntermSelection* getAsSelectionNode() const { return 0; } + virtual const glslang::TIntermSwitch* getAsSwitchNode() const { return 0; } + virtual const glslang::TIntermMethod* getAsMethodNode() const { return 0; } + virtual const glslang::TIntermSymbol* getAsSymbolNode() const { return 0; } + virtual const glslang::TIntermBranch* getAsBranchNode() const { return 0; } + virtual const glslang::TIntermLoop* getAsLoopNode() const { return 0; } + virtual ~TIntermNode() { } + +protected: + TIntermNode(const TIntermNode&); + TIntermNode& operator=(const TIntermNode&); + glslang::TSourceLoc loc; +}; + +namespace glslang { + +// +// This is just to help yacc. +// +struct TIntermNodePair { + TIntermNode* node1; + TIntermNode* node2; +}; + +// +// Intermediate class for nodes that have a type. +// +class TIntermTyped : public TIntermNode { +public: + TIntermTyped(const TType& t) { type.shallowCopy(t); } + TIntermTyped(TBasicType basicType) { TType bt(basicType); type.shallowCopy(bt); } + virtual TIntermTyped* getAsTyped() { return this; } + virtual const TIntermTyped* getAsTyped() const { return this; } + virtual void setType(const TType& t) { type.shallowCopy(t); } + virtual const TType& getType() const { return type; } + virtual TType& getWritableType() { return type; } + + virtual TBasicType getBasicType() const { return type.getBasicType(); } + virtual TQualifier& getQualifier() { return type.getQualifier(); } + virtual const TQualifier& getQualifier() const { return type.getQualifier(); } + virtual void propagatePrecision(TPrecisionQualifier); + virtual int getVectorSize() const { return type.getVectorSize(); } + virtual int getMatrixCols() const { return type.getMatrixCols(); } + virtual int getMatrixRows() const { return type.getMatrixRows(); } + virtual bool isMatrix() const { return type.isMatrix(); } + virtual bool isArray() const { return type.isArray(); } + virtual bool isVector() const { return type.isVector(); } + virtual bool isScalar() const { return type.isScalar(); } + virtual bool isStruct() const { return type.isStruct(); } + virtual bool isFloatingDomain() const { return type.isFloatingDomain(); } + virtual bool isIntegerDomain() const { return type.isIntegerDomain(); } + bool isAtomic() const { return type.isAtomic(); } + bool isReference() const { return type.isReference(); } + TString getCompleteString() const { return type.getCompleteString(); } + +protected: + TIntermTyped& operator=(const TIntermTyped&); + TType type; +}; + +// +// Handle for, do-while, and while loops. +// +class TIntermLoop : public TIntermNode { +public: + TIntermLoop(TIntermNode* aBody, TIntermTyped* aTest, TIntermTyped* aTerminal, bool testFirst) : + body(aBody), + test(aTest), + terminal(aTerminal), + first(testFirst), + unroll(false), + dontUnroll(false), + dependency(0), + minIterations(0), + maxIterations(iterationsInfinite), + iterationMultiple(1), + peelCount(0), + partialCount(0) + { } + + virtual TIntermLoop* getAsLoopNode() { return this; } + virtual const TIntermLoop* getAsLoopNode() const { return this; } + virtual void traverse(TIntermTraverser*); + TIntermNode* getBody() const { return body; } + TIntermTyped* getTest() const { return test; } + TIntermTyped* getTerminal() const { return terminal; } + bool testFirst() const { return first; } + + void setUnroll() { unroll = true; } + void setDontUnroll() { + dontUnroll = true; + peelCount = 0; + partialCount = 0; + } + bool getUnroll() const { return unroll; } + bool getDontUnroll() const { return dontUnroll; } + + static const unsigned int dependencyInfinite = 0xFFFFFFFF; + static const unsigned int iterationsInfinite = 0xFFFFFFFF; + void setLoopDependency(int d) { dependency = d; } + int getLoopDependency() const { return dependency; } + + void setMinIterations(unsigned int v) { minIterations = v; } + unsigned int getMinIterations() const { return minIterations; } + void setMaxIterations(unsigned int v) { maxIterations = v; } + unsigned int getMaxIterations() const { return maxIterations; } + void setIterationMultiple(unsigned int v) { iterationMultiple = v; } + unsigned int getIterationMultiple() const { return iterationMultiple; } + void setPeelCount(unsigned int v) { + peelCount = v; + dontUnroll = false; + } + unsigned int getPeelCount() const { return peelCount; } + void setPartialCount(unsigned int v) { + partialCount = v; + dontUnroll = false; + } + unsigned int getPartialCount() const { return partialCount; } + +protected: + TIntermNode* body; // code to loop over + TIntermTyped* test; // exit condition associated with loop, could be 0 for 'for' loops + TIntermTyped* terminal; // exists for for-loops + bool first; // true for while and for, not for do-while + bool unroll; // true if unroll requested + bool dontUnroll; // true if request to not unroll + unsigned int dependency; // loop dependency hint; 0 means not set or unknown + unsigned int minIterations; // as per the SPIR-V specification + unsigned int maxIterations; // as per the SPIR-V specification + unsigned int iterationMultiple; // as per the SPIR-V specification + unsigned int peelCount; // as per the SPIR-V specification + unsigned int partialCount; // as per the SPIR-V specification +}; + +// +// Handle case, break, continue, return, and kill. +// +class TIntermBranch : public TIntermNode { +public: + TIntermBranch(TOperator op, TIntermTyped* e) : + flowOp(op), + expression(e) { } + virtual TIntermBranch* getAsBranchNode() { return this; } + virtual const TIntermBranch* getAsBranchNode() const { return this; } + virtual void traverse(TIntermTraverser*); + TOperator getFlowOp() const { return flowOp; } + TIntermTyped* getExpression() const { return expression; } + void setExpression(TIntermTyped* pExpression) { expression = pExpression; } +protected: + TOperator flowOp; + TIntermTyped* expression; +}; + +// +// Represent method names before seeing their calling signature +// or resolving them to operations. Just an expression as the base object +// and a textural name. +// +class TIntermMethod : public TIntermTyped { +public: + TIntermMethod(TIntermTyped* o, const TType& t, const TString& m) : TIntermTyped(t), object(o), method(m) { } + virtual TIntermMethod* getAsMethodNode() { return this; } + virtual const TIntermMethod* getAsMethodNode() const { return this; } + virtual const TString& getMethodName() const { return method; } + virtual TIntermTyped* getObject() const { return object; } + virtual void traverse(TIntermTraverser*); +protected: + TIntermTyped* object; + TString method; +}; + +// +// Nodes that correspond to symbols or constants in the source code. +// +class TIntermSymbol : public TIntermTyped { +public: + // if symbol is initialized as symbol(sym), the memory comes from the pool allocator of sym. If sym comes from + // per process threadPoolAllocator, then it causes increased memory usage per compile + // it is essential to use "symbol = sym" to assign to symbol + TIntermSymbol(int i, const TString& n, const TType& t) + : TIntermTyped(t), id(i), +#ifndef GLSLANG_WEB + flattenSubset(-1), +#endif + constSubtree(nullptr) + { name = n; } + virtual int getId() const { return id; } + virtual void changeId(int i) { id = i; } + virtual const TString& getName() const { return name; } + virtual void traverse(TIntermTraverser*); + virtual TIntermSymbol* getAsSymbolNode() { return this; } + virtual const TIntermSymbol* getAsSymbolNode() const { return this; } + void setConstArray(const TConstUnionArray& c) { constArray = c; } + const TConstUnionArray& getConstArray() const { return constArray; } + void setConstSubtree(TIntermTyped* subtree) { constSubtree = subtree; } + TIntermTyped* getConstSubtree() const { return constSubtree; } +#ifndef GLSLANG_WEB + void setFlattenSubset(int subset) { flattenSubset = subset; } + int getFlattenSubset() const { return flattenSubset; } // -1 means full object +#endif + + // This is meant for cases where a node has already been constructed, and + // later on, it becomes necessary to switch to a different symbol. + virtual void switchId(int newId) { id = newId; } + +protected: + int id; // the unique id of the symbol this node represents +#ifndef GLSLANG_WEB + int flattenSubset; // how deeply the flattened object rooted at id has been dereferenced +#endif + TString name; // the name of the symbol this node represents + TConstUnionArray constArray; // if the symbol is a front-end compile-time constant, this is its value + TIntermTyped* constSubtree; +}; + +class TIntermConstantUnion : public TIntermTyped { +public: + TIntermConstantUnion(const TConstUnionArray& ua, const TType& t) : TIntermTyped(t), constArray(ua), literal(false) { } + const TConstUnionArray& getConstArray() const { return constArray; } + virtual TIntermConstantUnion* getAsConstantUnion() { return this; } + virtual const TIntermConstantUnion* getAsConstantUnion() const { return this; } + virtual void traverse(TIntermTraverser*); + virtual TIntermTyped* fold(TOperator, const TIntermTyped*) const; + virtual TIntermTyped* fold(TOperator, const TType&) const; + void setLiteral() { literal = true; } + void setExpression() { literal = false; } + bool isLiteral() const { return literal; } + +protected: + TIntermConstantUnion& operator=(const TIntermConstantUnion&); + + const TConstUnionArray constArray; + bool literal; // true if node represents a literal in the source code +}; + +// Represent the independent aspects of a texturing TOperator +struct TCrackedTextureOp { + bool query; + bool proj; + bool lod; + bool fetch; + bool offset; + bool offsets; + bool gather; + bool grad; + bool subpass; + bool lodClamp; + bool fragMask; +}; + +// +// Intermediate class for node types that hold operators. +// +class TIntermOperator : public TIntermTyped { +public: + virtual TIntermOperator* getAsOperator() { return this; } + virtual const TIntermOperator* getAsOperator() const { return this; } + TOperator getOp() const { return op; } + void setOp(TOperator newOp) { op = newOp; } + bool modifiesState() const; + bool isConstructor() const; + bool isTexture() const { return op > EOpTextureGuardBegin && op < EOpTextureGuardEnd; } + bool isSampling() const { return op > EOpSamplingGuardBegin && op < EOpSamplingGuardEnd; } +#ifdef GLSLANG_WEB + bool isImage() const { return false; } + bool isSparseTexture() const { return false; } + bool isImageFootprint() const { return false; } + bool isSparseImage() const { return false; } + bool isSubgroup() const { return false; } +#else + bool isImage() const { return op > EOpImageGuardBegin && op < EOpImageGuardEnd; } + bool isSparseTexture() const { return op > EOpSparseTextureGuardBegin && op < EOpSparseTextureGuardEnd; } + bool isImageFootprint() const { return op > EOpImageFootprintGuardBegin && op < EOpImageFootprintGuardEnd; } + bool isSparseImage() const { return op == EOpSparseImageLoad; } + bool isSubgroup() const { return op > EOpSubgroupGuardStart && op < EOpSubgroupGuardStop; } +#endif + + void setOperationPrecision(TPrecisionQualifier p) { operationPrecision = p; } + TPrecisionQualifier getOperationPrecision() const { return operationPrecision != EpqNone ? + operationPrecision : + type.getQualifier().precision; } + TString getCompleteString() const + { + TString cs = type.getCompleteString(); + if (getOperationPrecision() != type.getQualifier().precision) { + cs += ", operation at "; + cs += GetPrecisionQualifierString(getOperationPrecision()); + } + + return cs; + } + + // Crack the op into the individual dimensions of texturing operation. + void crackTexture(TSampler sampler, TCrackedTextureOp& cracked) const + { + cracked.query = false; + cracked.proj = false; + cracked.lod = false; + cracked.fetch = false; + cracked.offset = false; + cracked.offsets = false; + cracked.gather = false; + cracked.grad = false; + cracked.subpass = false; + cracked.lodClamp = false; + cracked.fragMask = false; + + switch (op) { + case EOpImageQuerySize: + case EOpImageQuerySamples: + case EOpTextureQuerySize: + case EOpTextureQueryLod: + case EOpTextureQueryLevels: + case EOpTextureQuerySamples: + case EOpSparseTexelsResident: + cracked.query = true; + break; + case EOpTexture: + case EOpSparseTexture: + break; + case EOpTextureProj: + cracked.proj = true; + break; + case EOpTextureLod: + case EOpSparseTextureLod: + cracked.lod = true; + break; + case EOpTextureOffset: + case EOpSparseTextureOffset: + cracked.offset = true; + break; + case EOpTextureFetch: + case EOpSparseTextureFetch: + cracked.fetch = true; + if (sampler.is1D() || (sampler.dim == Esd2D && ! sampler.isMultiSample()) || sampler.dim == Esd3D) + cracked.lod = true; + break; + case EOpTextureFetchOffset: + case EOpSparseTextureFetchOffset: + cracked.fetch = true; + cracked.offset = true; + if (sampler.is1D() || (sampler.dim == Esd2D && ! sampler.isMultiSample()) || sampler.dim == Esd3D) + cracked.lod = true; + break; + case EOpTextureProjOffset: + cracked.offset = true; + cracked.proj = true; + break; + case EOpTextureLodOffset: + case EOpSparseTextureLodOffset: + cracked.offset = true; + cracked.lod = true; + break; + case EOpTextureProjLod: + cracked.lod = true; + cracked.proj = true; + break; + case EOpTextureProjLodOffset: + cracked.offset = true; + cracked.lod = true; + cracked.proj = true; + break; + case EOpTextureGrad: + case EOpSparseTextureGrad: + cracked.grad = true; + break; + case EOpTextureGradOffset: + case EOpSparseTextureGradOffset: + cracked.grad = true; + cracked.offset = true; + break; + case EOpTextureProjGrad: + cracked.grad = true; + cracked.proj = true; + break; + case EOpTextureProjGradOffset: + cracked.grad = true; + cracked.offset = true; + cracked.proj = true; + break; +#ifndef GLSLANG_WEB + case EOpTextureClamp: + case EOpSparseTextureClamp: + cracked.lodClamp = true; + break; + case EOpTextureOffsetClamp: + case EOpSparseTextureOffsetClamp: + cracked.offset = true; + cracked.lodClamp = true; + break; + case EOpTextureGradClamp: + case EOpSparseTextureGradClamp: + cracked.grad = true; + cracked.lodClamp = true; + break; + case EOpTextureGradOffsetClamp: + case EOpSparseTextureGradOffsetClamp: + cracked.grad = true; + cracked.offset = true; + cracked.lodClamp = true; + break; + case EOpTextureGather: + case EOpSparseTextureGather: + cracked.gather = true; + break; + case EOpTextureGatherOffset: + case EOpSparseTextureGatherOffset: + cracked.gather = true; + cracked.offset = true; + break; + case EOpTextureGatherOffsets: + case EOpSparseTextureGatherOffsets: + cracked.gather = true; + cracked.offsets = true; + break; + case EOpTextureGatherLod: + case EOpSparseTextureGatherLod: + cracked.gather = true; + cracked.lod = true; + break; + case EOpTextureGatherLodOffset: + case EOpSparseTextureGatherLodOffset: + cracked.gather = true; + cracked.offset = true; + cracked.lod = true; + break; + case EOpTextureGatherLodOffsets: + case EOpSparseTextureGatherLodOffsets: + cracked.gather = true; + cracked.offsets = true; + cracked.lod = true; + break; + case EOpImageLoadLod: + case EOpImageStoreLod: + case EOpSparseImageLoadLod: + cracked.lod = true; + break; + case EOpFragmentMaskFetch: + cracked.subpass = sampler.dim == EsdSubpass; + cracked.fragMask = true; + break; + case EOpFragmentFetch: + cracked.subpass = sampler.dim == EsdSubpass; + cracked.fragMask = true; + break; + case EOpImageSampleFootprintNV: + break; + case EOpImageSampleFootprintClampNV: + cracked.lodClamp = true; + break; + case EOpImageSampleFootprintLodNV: + cracked.lod = true; + break; + case EOpImageSampleFootprintGradNV: + cracked.grad = true; + break; + case EOpImageSampleFootprintGradClampNV: + cracked.lodClamp = true; + cracked.grad = true; + break; + case EOpSubpassLoad: + case EOpSubpassLoadMS: + cracked.subpass = true; + break; +#endif + default: + break; + } + } + +protected: + TIntermOperator(TOperator o) : TIntermTyped(EbtFloat), op(o), operationPrecision(EpqNone) {} + TIntermOperator(TOperator o, TType& t) : TIntermTyped(t), op(o), operationPrecision(EpqNone) {} + TOperator op; + // The result precision is in the inherited TType, and is usually meant to be both + // the operation precision and the result precision. However, some more complex things, + // like built-in function calls, distinguish between the two, in which case non-EqpNone + // 'operationPrecision' overrides the result precision as far as operation precision + // is concerned. + TPrecisionQualifier operationPrecision; +}; + +// +// Nodes for all the basic binary math operators. +// +class TIntermBinary : public TIntermOperator { +public: + TIntermBinary(TOperator o) : TIntermOperator(o) {} + virtual void traverse(TIntermTraverser*); + virtual void setLeft(TIntermTyped* n) { left = n; } + virtual void setRight(TIntermTyped* n) { right = n; } + virtual TIntermTyped* getLeft() const { return left; } + virtual TIntermTyped* getRight() const { return right; } + virtual TIntermBinary* getAsBinaryNode() { return this; } + virtual const TIntermBinary* getAsBinaryNode() const { return this; } + virtual void updatePrecision(); +protected: + TIntermTyped* left; + TIntermTyped* right; +}; + +// +// Nodes for unary math operators. +// +class TIntermUnary : public TIntermOperator { +public: + TIntermUnary(TOperator o, TType& t) : TIntermOperator(o, t), operand(0) {} + TIntermUnary(TOperator o) : TIntermOperator(o), operand(0) {} + virtual void traverse(TIntermTraverser*); + virtual void setOperand(TIntermTyped* o) { operand = o; } + virtual TIntermTyped* getOperand() { return operand; } + virtual const TIntermTyped* getOperand() const { return operand; } + virtual TIntermUnary* getAsUnaryNode() { return this; } + virtual const TIntermUnary* getAsUnaryNode() const { return this; } + virtual void updatePrecision(); +protected: + TIntermTyped* operand; +}; + +typedef TVector TIntermSequence; +typedef TVector TQualifierList; +// +// Nodes that operate on an arbitrary sized set of children. +// +class TIntermAggregate : public TIntermOperator { +public: + TIntermAggregate() : TIntermOperator(EOpNull), userDefined(false), pragmaTable(nullptr) { } + TIntermAggregate(TOperator o) : TIntermOperator(o), pragmaTable(nullptr) { } + ~TIntermAggregate() { delete pragmaTable; } + virtual TIntermAggregate* getAsAggregate() { return this; } + virtual const TIntermAggregate* getAsAggregate() const { return this; } + virtual void setOperator(TOperator o) { op = o; } + virtual TIntermSequence& getSequence() { return sequence; } + virtual const TIntermSequence& getSequence() const { return sequence; } + virtual void setName(const TString& n) { name = n; } + virtual const TString& getName() const { return name; } + virtual void traverse(TIntermTraverser*); + virtual void setUserDefined() { userDefined = true; } + virtual bool isUserDefined() { return userDefined; } + virtual TQualifierList& getQualifierList() { return qualifier; } + virtual const TQualifierList& getQualifierList() const { return qualifier; } + void setOptimize(bool o) { optimize = o; } + void setDebug(bool d) { debug = d; } + bool getOptimize() const { return optimize; } + bool getDebug() const { return debug; } + void setPragmaTable(const TPragmaTable& pTable); + const TPragmaTable& getPragmaTable() const { return *pragmaTable; } +protected: + TIntermAggregate(const TIntermAggregate&); // disallow copy constructor + TIntermAggregate& operator=(const TIntermAggregate&); // disallow assignment operator + TIntermSequence sequence; + TQualifierList qualifier; + TString name; + bool userDefined; // used for user defined function names + bool optimize; + bool debug; + TPragmaTable* pragmaTable; +}; + +// +// For if tests. +// +class TIntermSelection : public TIntermTyped { +public: + TIntermSelection(TIntermTyped* cond, TIntermNode* trueB, TIntermNode* falseB) : + TIntermTyped(EbtVoid), condition(cond), trueBlock(trueB), falseBlock(falseB), + shortCircuit(true), + flatten(false), dontFlatten(false) {} + TIntermSelection(TIntermTyped* cond, TIntermNode* trueB, TIntermNode* falseB, const TType& type) : + TIntermTyped(type), condition(cond), trueBlock(trueB), falseBlock(falseB), + shortCircuit(true), + flatten(false), dontFlatten(false) {} + virtual void traverse(TIntermTraverser*); + virtual TIntermTyped* getCondition() const { return condition; } + virtual TIntermNode* getTrueBlock() const { return trueBlock; } + virtual TIntermNode* getFalseBlock() const { return falseBlock; } + virtual TIntermSelection* getAsSelectionNode() { return this; } + virtual const TIntermSelection* getAsSelectionNode() const { return this; } + + void setNoShortCircuit() { shortCircuit = false; } + bool getShortCircuit() const { return shortCircuit; } + + void setFlatten() { flatten = true; } + void setDontFlatten() { dontFlatten = true; } + bool getFlatten() const { return flatten; } + bool getDontFlatten() const { return dontFlatten; } + +protected: + TIntermTyped* condition; + TIntermNode* trueBlock; + TIntermNode* falseBlock; + bool shortCircuit; // normally all if-then-else and all GLSL ?: short-circuit, but HLSL ?: does not + bool flatten; // true if flatten requested + bool dontFlatten; // true if requested to not flatten +}; + +// +// For switch statements. Designed use is that a switch will have sequence of nodes +// that are either case/default nodes or a *single* node that represents all the code +// in between (if any) consecutive case/defaults. So, a traversal need only deal with +// 0 or 1 nodes per case/default statement. +// +class TIntermSwitch : public TIntermNode { +public: + TIntermSwitch(TIntermTyped* cond, TIntermAggregate* b) : condition(cond), body(b), + flatten(false), dontFlatten(false) {} + virtual void traverse(TIntermTraverser*); + virtual TIntermNode* getCondition() const { return condition; } + virtual TIntermAggregate* getBody() const { return body; } + virtual TIntermSwitch* getAsSwitchNode() { return this; } + virtual const TIntermSwitch* getAsSwitchNode() const { return this; } + + void setFlatten() { flatten = true; } + void setDontFlatten() { dontFlatten = true; } + bool getFlatten() const { return flatten; } + bool getDontFlatten() const { return dontFlatten; } + +protected: + TIntermTyped* condition; + TIntermAggregate* body; + bool flatten; // true if flatten requested + bool dontFlatten; // true if requested to not flatten +}; + +enum TVisit +{ + EvPreVisit, + EvInVisit, + EvPostVisit +}; + +// +// For traversing the tree. User should derive from this, +// put their traversal specific data in it, and then pass +// it to a Traverse method. +// +// When using this, just fill in the methods for nodes you want visited. +// Return false from a pre-visit to skip visiting that node's subtree. +// +// Explicitly set postVisit to true if you want post visiting, otherwise, +// filled in methods will only be called at pre-visit time (before processing +// the subtree). Similarly for inVisit for in-order visiting of nodes with +// multiple children. +// +// If you only want post-visits, explicitly turn off preVisit (and inVisit) +// and turn on postVisit. +// +// In general, for the visit*() methods, return true from interior nodes +// to have the traversal continue on to children. +// +// If you process children yourself, or don't want them processed, return false. +// +class TIntermTraverser { +public: + POOL_ALLOCATOR_NEW_DELETE(glslang::GetThreadPoolAllocator()) + TIntermTraverser(bool preVisit = true, bool inVisit = false, bool postVisit = false, bool rightToLeft = false) : + preVisit(preVisit), + inVisit(inVisit), + postVisit(postVisit), + rightToLeft(rightToLeft), + depth(0), + maxDepth(0) { } + virtual ~TIntermTraverser() { } + + virtual void visitSymbol(TIntermSymbol*) { } + virtual void visitConstantUnion(TIntermConstantUnion*) { } + virtual bool visitBinary(TVisit, TIntermBinary*) { return true; } + virtual bool visitUnary(TVisit, TIntermUnary*) { return true; } + virtual bool visitSelection(TVisit, TIntermSelection*) { return true; } + virtual bool visitAggregate(TVisit, TIntermAggregate*) { return true; } + virtual bool visitLoop(TVisit, TIntermLoop*) { return true; } + virtual bool visitBranch(TVisit, TIntermBranch*) { return true; } + virtual bool visitSwitch(TVisit, TIntermSwitch*) { return true; } + + int getMaxDepth() const { return maxDepth; } + + void incrementDepth(TIntermNode *current) + { + depth++; + maxDepth = (std::max)(maxDepth, depth); + path.push_back(current); + } + + void decrementDepth() + { + depth--; + path.pop_back(); + } + + TIntermNode *getParentNode() + { + return path.size() == 0 ? NULL : path.back(); + } + + const bool preVisit; + const bool inVisit; + const bool postVisit; + const bool rightToLeft; + +protected: + TIntermTraverser& operator=(TIntermTraverser&); + + int depth; + int maxDepth; + + // All the nodes from root to the current node's parent during traversing. + TVector path; +}; + +// KHR_vulkan_glsl says "Two arrays sized with specialization constants are the same type only if +// sized with the same symbol, involving no operations" +inline bool SameSpecializationConstants(TIntermTyped* node1, TIntermTyped* node2) +{ + return node1->getAsSymbolNode() && node2->getAsSymbolNode() && + node1->getAsSymbolNode()->getId() == node2->getAsSymbolNode()->getId(); +} + +} // end namespace glslang + +#endif // __INTERMEDIATE_H diff --git a/dep/glslang/glslang/Include/revision.h b/dep/glslang/glslang/Include/revision.h new file mode 100644 index 000000000..693557bec --- /dev/null +++ b/dep/glslang/glslang/Include/revision.h @@ -0,0 +1,3 @@ +// This header is generated by the make-revision script. + +#define GLSLANG_PATCH_LEVEL 3766 diff --git a/dep/glslang/glslang/MachineIndependent/Constant.cpp b/dep/glslang/glslang/MachineIndependent/Constant.cpp new file mode 100644 index 000000000..e21cf427f --- /dev/null +++ b/dep/glslang/glslang/MachineIndependent/Constant.cpp @@ -0,0 +1,1428 @@ +// +// Copyright (C) 2002-2005 3Dlabs Inc. Ltd. +// Copyright (C) 2012-2013 LunarG, Inc. +// Copyright (C) 2017 ARM Limited. +// Copyright (C) 2018-2020 Google, Inc. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// +// Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// +// Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// Neither the name of 3Dlabs Inc. Ltd. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// + +#include "localintermediate.h" +#include +#include +#include +#include + +namespace { + +using namespace glslang; + +typedef union { + double d; + int i[2]; +} DoubleIntUnion; + +// Some helper functions + +bool isNan(double x) +{ + DoubleIntUnion u; + // tough to find a platform independent library function, do it directly + u.d = x; + int bitPatternL = u.i[0]; + int bitPatternH = u.i[1]; + return (bitPatternH & 0x7ff80000) == 0x7ff80000 && + ((bitPatternH & 0xFFFFF) != 0 || bitPatternL != 0); +} + +bool isInf(double x) +{ + DoubleIntUnion u; + // tough to find a platform independent library function, do it directly + u.d = x; + int bitPatternL = u.i[0]; + int bitPatternH = u.i[1]; + return (bitPatternH & 0x7ff00000) == 0x7ff00000 && + (bitPatternH & 0xFFFFF) == 0 && bitPatternL == 0; +} + +const double pi = 3.1415926535897932384626433832795; + +} // end anonymous namespace + + +namespace glslang { + +// +// The fold functions see if an operation on a constant can be done in place, +// without generating run-time code. +// +// Returns the node to keep using, which may or may not be the node passed in. +// +// Note: As of version 1.2, all constant operations must be folded. It is +// not opportunistic, but rather a semantic requirement. +// + +// +// Do folding between a pair of nodes. +// 'this' is the left-hand operand and 'rightConstantNode' is the right-hand operand. +// +// Returns a new node representing the result. +// +TIntermTyped* TIntermConstantUnion::fold(TOperator op, const TIntermTyped* rightConstantNode) const +{ + // For most cases, the return type matches the argument type, so set that + // up and just code to exceptions below. + TType returnType; + returnType.shallowCopy(getType()); + + // + // A pair of nodes is to be folded together + // + + const TIntermConstantUnion *rightNode = rightConstantNode->getAsConstantUnion(); + TConstUnionArray leftUnionArray = getConstArray(); + TConstUnionArray rightUnionArray = rightNode->getConstArray(); + + // Figure out the size of the result + int newComps; + int constComps; + switch(op) { + case EOpMatrixTimesMatrix: + newComps = rightNode->getMatrixCols() * getMatrixRows(); + break; + case EOpMatrixTimesVector: + newComps = getMatrixRows(); + break; + case EOpVectorTimesMatrix: + newComps = rightNode->getMatrixCols(); + break; + default: + newComps = getType().computeNumComponents(); + constComps = rightConstantNode->getType().computeNumComponents(); + if (constComps == 1 && newComps > 1) { + // for a case like vec4 f = vec4(2,3,4,5) + 1.2; + TConstUnionArray smearedArray(newComps, rightNode->getConstArray()[0]); + rightUnionArray = smearedArray; + } else if (constComps > 1 && newComps == 1) { + // for a case like vec4 f = 1.2 + vec4(2,3,4,5); + newComps = constComps; + rightUnionArray = rightNode->getConstArray(); + TConstUnionArray smearedArray(newComps, getConstArray()[0]); + leftUnionArray = smearedArray; + returnType.shallowCopy(rightNode->getType()); + } + break; + } + + TConstUnionArray newConstArray(newComps); + TType constBool(EbtBool, EvqConst); + + switch(op) { + case EOpAdd: + for (int i = 0; i < newComps; i++) + newConstArray[i] = leftUnionArray[i] + rightUnionArray[i]; + break; + case EOpSub: + for (int i = 0; i < newComps; i++) + newConstArray[i] = leftUnionArray[i] - rightUnionArray[i]; + break; + + case EOpMul: + case EOpVectorTimesScalar: + case EOpMatrixTimesScalar: + for (int i = 0; i < newComps; i++) + newConstArray[i] = leftUnionArray[i] * rightUnionArray[i]; + break; + case EOpMatrixTimesMatrix: + for (int row = 0; row < getMatrixRows(); row++) { + for (int column = 0; column < rightNode->getMatrixCols(); column++) { + double sum = 0.0f; + for (int i = 0; i < rightNode->getMatrixRows(); i++) + sum += leftUnionArray[i * getMatrixRows() + row].getDConst() * rightUnionArray[column * rightNode->getMatrixRows() + i].getDConst(); + newConstArray[column * getMatrixRows() + row].setDConst(sum); + } + } + returnType.shallowCopy(TType(getType().getBasicType(), EvqConst, 0, rightNode->getMatrixCols(), getMatrixRows())); + break; + case EOpDiv: + for (int i = 0; i < newComps; i++) { + switch (getType().getBasicType()) { + case EbtDouble: + case EbtFloat: + case EbtFloat16: + if (rightUnionArray[i].getDConst() != 0.0) + newConstArray[i].setDConst(leftUnionArray[i].getDConst() / rightUnionArray[i].getDConst()); + else if (leftUnionArray[i].getDConst() > 0.0) + newConstArray[i].setDConst((double)INFINITY); + else if (leftUnionArray[i].getDConst() < 0.0) + newConstArray[i].setDConst(-(double)INFINITY); + else + newConstArray[i].setDConst((double)NAN); + break; + + case EbtInt: + if (rightUnionArray[i] == 0) + newConstArray[i].setIConst(0x7FFFFFFF); + else if (rightUnionArray[i].getIConst() == -1 && leftUnionArray[i].getIConst() == (int)-0x80000000ll) + newConstArray[i].setIConst((int)-0x80000000ll); + else + newConstArray[i].setIConst(leftUnionArray[i].getIConst() / rightUnionArray[i].getIConst()); + break; + + case EbtUint: + if (rightUnionArray[i] == 0u) + newConstArray[i].setUConst(0xFFFFFFFFu); + else + newConstArray[i].setUConst(leftUnionArray[i].getUConst() / rightUnionArray[i].getUConst()); + break; + +#ifndef GLSLANG_WEB + case EbtInt8: + if (rightUnionArray[i] == (signed char)0) + newConstArray[i].setI8Const((signed char)0x7F); + else if (rightUnionArray[i].getI8Const() == (signed char)-1 && leftUnionArray[i].getI8Const() == (signed char)-0x80) + newConstArray[i].setI8Const((signed char)-0x80); + else + newConstArray[i].setI8Const(leftUnionArray[i].getI8Const() / rightUnionArray[i].getI8Const()); + break; + + case EbtUint8: + if (rightUnionArray[i] == (unsigned char)0u) + newConstArray[i].setU8Const((unsigned char)0xFFu); + else + newConstArray[i].setU8Const(leftUnionArray[i].getU8Const() / rightUnionArray[i].getU8Const()); + break; + + case EbtInt16: + if (rightUnionArray[i] == (signed short)0) + newConstArray[i].setI16Const((signed short)0x7FFF); + else if (rightUnionArray[i].getI16Const() == (signed short)-1 && leftUnionArray[i].getI16Const() == (signed short)-0x8000) + newConstArray[i].setI16Const((signed short)-0x8000); + else + newConstArray[i].setI16Const(leftUnionArray[i].getI16Const() / rightUnionArray[i].getI16Const()); + break; + + case EbtUint16: + if (rightUnionArray[i] == (unsigned short)0u) + newConstArray[i].setU16Const((unsigned short)0xFFFFu); + else + newConstArray[i].setU16Const(leftUnionArray[i].getU16Const() / rightUnionArray[i].getU16Const()); + break; + + case EbtInt64: + if (rightUnionArray[i] == 0ll) + newConstArray[i].setI64Const(0x7FFFFFFFFFFFFFFFll); + else if (rightUnionArray[i].getI64Const() == -1 && leftUnionArray[i].getI64Const() == (long long)-0x8000000000000000ll) + newConstArray[i].setI64Const((long long)-0x8000000000000000ll); + else + newConstArray[i].setI64Const(leftUnionArray[i].getI64Const() / rightUnionArray[i].getI64Const()); + break; + + case EbtUint64: + if (rightUnionArray[i] == 0ull) + newConstArray[i].setU64Const(0xFFFFFFFFFFFFFFFFull); + else + newConstArray[i].setU64Const(leftUnionArray[i].getU64Const() / rightUnionArray[i].getU64Const()); + break; + default: + return 0; +#endif + } + } + break; + + case EOpMatrixTimesVector: + for (int i = 0; i < getMatrixRows(); i++) { + double sum = 0.0f; + for (int j = 0; j < rightNode->getVectorSize(); j++) { + sum += leftUnionArray[j*getMatrixRows() + i].getDConst() * rightUnionArray[j].getDConst(); + } + newConstArray[i].setDConst(sum); + } + + returnType.shallowCopy(TType(getBasicType(), EvqConst, getMatrixRows())); + break; + + case EOpVectorTimesMatrix: + for (int i = 0; i < rightNode->getMatrixCols(); i++) { + double sum = 0.0f; + for (int j = 0; j < getVectorSize(); j++) + sum += leftUnionArray[j].getDConst() * rightUnionArray[i*rightNode->getMatrixRows() + j].getDConst(); + newConstArray[i].setDConst(sum); + } + + returnType.shallowCopy(TType(getBasicType(), EvqConst, rightNode->getMatrixCols())); + break; + + case EOpMod: + for (int i = 0; i < newComps; i++) { + if (rightUnionArray[i] == 0) + newConstArray[i] = leftUnionArray[i]; + else { + switch (getType().getBasicType()) { + case EbtInt: + if (rightUnionArray[i].getIConst() == -1 && leftUnionArray[i].getIConst() == INT_MIN) { + newConstArray[i].setIConst(0); + break; + } else goto modulo_default; +#ifndef GLSLANG_WEB + case EbtInt64: + if (rightUnionArray[i].getI64Const() == -1 && leftUnionArray[i].getI64Const() == LLONG_MIN) { + newConstArray[i].setI64Const(0); + break; + } else goto modulo_default; + case EbtInt16: + if (rightUnionArray[i].getIConst() == -1 && leftUnionArray[i].getIConst() == SHRT_MIN) { + newConstArray[i].setIConst(0); + break; + } else goto modulo_default; +#endif + default: + modulo_default: + newConstArray[i] = leftUnionArray[i] % rightUnionArray[i]; + } + } + } + break; + + case EOpRightShift: + for (int i = 0; i < newComps; i++) + newConstArray[i] = leftUnionArray[i] >> rightUnionArray[i]; + break; + + case EOpLeftShift: + for (int i = 0; i < newComps; i++) + newConstArray[i] = leftUnionArray[i] << rightUnionArray[i]; + break; + + case EOpAnd: + for (int i = 0; i < newComps; i++) + newConstArray[i] = leftUnionArray[i] & rightUnionArray[i]; + break; + case EOpInclusiveOr: + for (int i = 0; i < newComps; i++) + newConstArray[i] = leftUnionArray[i] | rightUnionArray[i]; + break; + case EOpExclusiveOr: + for (int i = 0; i < newComps; i++) + newConstArray[i] = leftUnionArray[i] ^ rightUnionArray[i]; + break; + + case EOpLogicalAnd: // this code is written for possible future use, will not get executed currently + for (int i = 0; i < newComps; i++) + newConstArray[i] = leftUnionArray[i] && rightUnionArray[i]; + break; + + case EOpLogicalOr: // this code is written for possible future use, will not get executed currently + for (int i = 0; i < newComps; i++) + newConstArray[i] = leftUnionArray[i] || rightUnionArray[i]; + break; + + case EOpLogicalXor: + for (int i = 0; i < newComps; i++) { + switch (getType().getBasicType()) { + case EbtBool: newConstArray[i].setBConst((leftUnionArray[i] == rightUnionArray[i]) ? false : true); break; + default: assert(false && "Default missing"); + } + } + break; + + case EOpLessThan: + newConstArray[0].setBConst(leftUnionArray[0] < rightUnionArray[0]); + returnType.shallowCopy(constBool); + break; + case EOpGreaterThan: + newConstArray[0].setBConst(leftUnionArray[0] > rightUnionArray[0]); + returnType.shallowCopy(constBool); + break; + case EOpLessThanEqual: + newConstArray[0].setBConst(! (leftUnionArray[0] > rightUnionArray[0])); + returnType.shallowCopy(constBool); + break; + case EOpGreaterThanEqual: + newConstArray[0].setBConst(! (leftUnionArray[0] < rightUnionArray[0])); + returnType.shallowCopy(constBool); + break; + case EOpEqual: + newConstArray[0].setBConst(rightNode->getConstArray() == leftUnionArray); + returnType.shallowCopy(constBool); + break; + case EOpNotEqual: + newConstArray[0].setBConst(rightNode->getConstArray() != leftUnionArray); + returnType.shallowCopy(constBool); + break; + + default: + return 0; + } + + TIntermConstantUnion *newNode = new TIntermConstantUnion(newConstArray, returnType); + newNode->setLoc(getLoc()); + + return newNode; +} + +// +// Do single unary node folding +// +// Returns a new node representing the result. +// +TIntermTyped* TIntermConstantUnion::fold(TOperator op, const TType& returnType) const +{ + // First, size the result, which is mostly the same as the argument's size, + // but not always, and classify what is componentwise. + // Also, eliminate cases that can't be compile-time constant. + int resultSize; + bool componentWise = true; + + int objectSize = getType().computeNumComponents(); + switch (op) { + case EOpDeterminant: + case EOpAny: + case EOpAll: + case EOpLength: + componentWise = false; + resultSize = 1; + break; + + case EOpEmitStreamVertex: + case EOpEndStreamPrimitive: + // These don't fold + return nullptr; + + case EOpPackSnorm2x16: + case EOpPackUnorm2x16: + case EOpPackHalf2x16: + componentWise = false; + resultSize = 1; + break; + + case EOpUnpackSnorm2x16: + case EOpUnpackUnorm2x16: + case EOpUnpackHalf2x16: + componentWise = false; + resultSize = 2; + break; + + case EOpPack16: + case EOpPack32: + case EOpPack64: + case EOpUnpack32: + case EOpUnpack16: + case EOpUnpack8: + case EOpNormalize: + componentWise = false; + resultSize = objectSize; + break; + + default: + resultSize = objectSize; + break; + } + + // Set up for processing + TConstUnionArray newConstArray(resultSize); + const TConstUnionArray& unionArray = getConstArray(); + + // Process non-component-wise operations + switch (op) { + case EOpLength: + case EOpNormalize: + { + double sum = 0; + for (int i = 0; i < objectSize; i++) + sum += unionArray[i].getDConst() * unionArray[i].getDConst(); + double length = sqrt(sum); + if (op == EOpLength) + newConstArray[0].setDConst(length); + else { + for (int i = 0; i < objectSize; i++) + newConstArray[i].setDConst(unionArray[i].getDConst() / length); + } + break; + } + + case EOpAny: + { + bool result = false; + for (int i = 0; i < objectSize; i++) { + if (unionArray[i].getBConst()) + result = true; + } + newConstArray[0].setBConst(result); + break; + } + case EOpAll: + { + bool result = true; + for (int i = 0; i < objectSize; i++) { + if (! unionArray[i].getBConst()) + result = false; + } + newConstArray[0].setBConst(result); + break; + } + + case EOpPackSnorm2x16: + case EOpPackUnorm2x16: + case EOpPackHalf2x16: + case EOpPack16: + case EOpPack32: + case EOpPack64: + case EOpUnpack32: + case EOpUnpack16: + case EOpUnpack8: + + case EOpUnpackSnorm2x16: + case EOpUnpackUnorm2x16: + case EOpUnpackHalf2x16: + + case EOpDeterminant: + case EOpMatrixInverse: + case EOpTranspose: + return nullptr; + + default: + assert(componentWise); + break; + } + + // Turn off the componentwise loop + if (! componentWise) + objectSize = 0; + + // Process component-wise operations + for (int i = 0; i < objectSize; i++) { + switch (op) { + case EOpNegative: + switch (getType().getBasicType()) { + case EbtDouble: + case EbtFloat16: + case EbtFloat: newConstArray[i].setDConst(-unionArray[i].getDConst()); break; + case EbtInt: newConstArray[i].setIConst(-unionArray[i].getIConst()); break; + case EbtUint: newConstArray[i].setUConst(static_cast(-static_cast(unionArray[i].getUConst()))); break; +#ifndef GLSLANG_WEB + case EbtInt8: newConstArray[i].setI8Const(-unionArray[i].getI8Const()); break; + case EbtUint8: newConstArray[i].setU8Const(static_cast(-static_cast(unionArray[i].getU8Const()))); break; + case EbtInt16: newConstArray[i].setI16Const(-unionArray[i].getI16Const()); break; + case EbtUint16:newConstArray[i].setU16Const(static_cast(-static_cast(unionArray[i].getU16Const()))); break; + case EbtInt64: newConstArray[i].setI64Const(-unionArray[i].getI64Const()); break; + case EbtUint64: newConstArray[i].setU64Const(static_cast(-static_cast(unionArray[i].getU64Const()))); break; +#endif + default: + return nullptr; + } + break; + case EOpLogicalNot: + case EOpVectorLogicalNot: + switch (getType().getBasicType()) { + case EbtBool: newConstArray[i].setBConst(!unionArray[i].getBConst()); break; + default: + return nullptr; + } + break; + case EOpBitwiseNot: + newConstArray[i] = ~unionArray[i]; + break; + case EOpRadians: + newConstArray[i].setDConst(unionArray[i].getDConst() * pi / 180.0); + break; + case EOpDegrees: + newConstArray[i].setDConst(unionArray[i].getDConst() * 180.0 / pi); + break; + case EOpSin: + newConstArray[i].setDConst(sin(unionArray[i].getDConst())); + break; + case EOpCos: + newConstArray[i].setDConst(cos(unionArray[i].getDConst())); + break; + case EOpTan: + newConstArray[i].setDConst(tan(unionArray[i].getDConst())); + break; + case EOpAsin: + newConstArray[i].setDConst(asin(unionArray[i].getDConst())); + break; + case EOpAcos: + newConstArray[i].setDConst(acos(unionArray[i].getDConst())); + break; + case EOpAtan: + newConstArray[i].setDConst(atan(unionArray[i].getDConst())); + break; + + case EOpDPdx: + case EOpDPdy: + case EOpFwidth: + case EOpDPdxFine: + case EOpDPdyFine: + case EOpFwidthFine: + case EOpDPdxCoarse: + case EOpDPdyCoarse: + case EOpFwidthCoarse: + // The derivatives are all mandated to create a constant 0. + newConstArray[i].setDConst(0.0); + break; + + case EOpExp: + newConstArray[i].setDConst(exp(unionArray[i].getDConst())); + break; + case EOpLog: + newConstArray[i].setDConst(log(unionArray[i].getDConst())); + break; + case EOpExp2: + { + const double inv_log2_e = 0.69314718055994530941723212145818; + newConstArray[i].setDConst(exp(unionArray[i].getDConst() * inv_log2_e)); + break; + } + case EOpLog2: + { + const double log2_e = 1.4426950408889634073599246810019; + newConstArray[i].setDConst(log2_e * log(unionArray[i].getDConst())); + break; + } + case EOpSqrt: + newConstArray[i].setDConst(sqrt(unionArray[i].getDConst())); + break; + case EOpInverseSqrt: + newConstArray[i].setDConst(1.0 / sqrt(unionArray[i].getDConst())); + break; + + case EOpAbs: + if (unionArray[i].getType() == EbtDouble) + newConstArray[i].setDConst(fabs(unionArray[i].getDConst())); + else if (unionArray[i].getType() == EbtInt) + newConstArray[i].setIConst(abs(unionArray[i].getIConst())); + else + newConstArray[i] = unionArray[i]; + break; + case EOpSign: + #define SIGN(X) (X == 0 ? 0 : (X < 0 ? -1 : 1)) + if (unionArray[i].getType() == EbtDouble) + newConstArray[i].setDConst(SIGN(unionArray[i].getDConst())); + else + newConstArray[i].setIConst(SIGN(unionArray[i].getIConst())); + break; + case EOpFloor: + newConstArray[i].setDConst(floor(unionArray[i].getDConst())); + break; + case EOpTrunc: + if (unionArray[i].getDConst() > 0) + newConstArray[i].setDConst(floor(unionArray[i].getDConst())); + else + newConstArray[i].setDConst(ceil(unionArray[i].getDConst())); + break; + case EOpRound: + newConstArray[i].setDConst(floor(0.5 + unionArray[i].getDConst())); + break; + case EOpRoundEven: + { + double flr = floor(unionArray[i].getDConst()); + bool even = flr / 2.0 == floor(flr / 2.0); + double rounded = even ? ceil(unionArray[i].getDConst() - 0.5) : floor(unionArray[i].getDConst() + 0.5); + newConstArray[i].setDConst(rounded); + break; + } + case EOpCeil: + newConstArray[i].setDConst(ceil(unionArray[i].getDConst())); + break; + case EOpFract: + { + double x = unionArray[i].getDConst(); + newConstArray[i].setDConst(x - floor(x)); + break; + } + + case EOpIsNan: + { + newConstArray[i].setBConst(isNan(unionArray[i].getDConst())); + break; + } + case EOpIsInf: + { + newConstArray[i].setBConst(isInf(unionArray[i].getDConst())); + break; + } + + case EOpConvIntToBool: + newConstArray[i].setBConst(unionArray[i].getIConst() != 0); break; + case EOpConvUintToBool: + newConstArray[i].setBConst(unionArray[i].getUConst() != 0); break; + case EOpConvBoolToInt: + newConstArray[i].setIConst(unionArray[i].getBConst()); break; + case EOpConvBoolToUint: + newConstArray[i].setUConst(unionArray[i].getBConst()); break; + case EOpConvIntToUint: + newConstArray[i].setUConst(unionArray[i].getIConst()); break; + case EOpConvUintToInt: + newConstArray[i].setIConst(unionArray[i].getUConst()); break; + + case EOpConvFloatToBool: + case EOpConvDoubleToBool: + newConstArray[i].setBConst(unionArray[i].getDConst() != 0); break; + + case EOpConvBoolToFloat: + case EOpConvBoolToDouble: + newConstArray[i].setDConst(unionArray[i].getBConst()); break; + + case EOpConvIntToFloat: + case EOpConvIntToDouble: + newConstArray[i].setDConst(unionArray[i].getIConst()); break; + + case EOpConvUintToFloat: + case EOpConvUintToDouble: + newConstArray[i].setDConst(unionArray[i].getUConst()); break; + + case EOpConvDoubleToFloat: + case EOpConvFloatToDouble: + newConstArray[i].setDConst(unionArray[i].getDConst()); break; + + case EOpConvFloatToUint: + case EOpConvDoubleToUint: + newConstArray[i].setUConst(static_cast(unionArray[i].getDConst())); break; + + case EOpConvFloatToInt: + case EOpConvDoubleToInt: + newConstArray[i].setIConst(static_cast(unionArray[i].getDConst())); break; + +#ifndef GLSLANG_WEB + case EOpConvInt8ToBool: + newConstArray[i].setBConst(unionArray[i].getI8Const() != 0); break; + case EOpConvUint8ToBool: + newConstArray[i].setBConst(unionArray[i].getU8Const() != 0); break; + case EOpConvInt16ToBool: + newConstArray[i].setBConst(unionArray[i].getI16Const() != 0); break; + case EOpConvUint16ToBool: + newConstArray[i].setBConst(unionArray[i].getU16Const() != 0); break; + case EOpConvInt64ToBool: + newConstArray[i].setBConst(unionArray[i].getI64Const() != 0); break; + case EOpConvUint64ToBool: + newConstArray[i].setBConst(unionArray[i].getI64Const() != 0); break; + case EOpConvFloat16ToBool: + newConstArray[i].setBConst(unionArray[i].getDConst() != 0); break; + + case EOpConvBoolToInt8: + newConstArray[i].setI8Const(unionArray[i].getBConst()); break; + case EOpConvBoolToUint8: + newConstArray[i].setU8Const(unionArray[i].getBConst()); break; + case EOpConvBoolToInt16: + newConstArray[i].setI16Const(unionArray[i].getBConst()); break; + case EOpConvBoolToUint16: + newConstArray[i].setU16Const(unionArray[i].getBConst()); break; + case EOpConvBoolToInt64: + newConstArray[i].setI64Const(unionArray[i].getBConst()); break; + case EOpConvBoolToUint64: + newConstArray[i].setU64Const(unionArray[i].getBConst()); break; + case EOpConvBoolToFloat16: + newConstArray[i].setDConst(unionArray[i].getBConst()); break; + + case EOpConvInt8ToInt16: + newConstArray[i].setI16Const(unionArray[i].getI8Const()); break; + case EOpConvInt8ToInt: + newConstArray[i].setIConst(unionArray[i].getI8Const()); break; + case EOpConvInt8ToInt64: + newConstArray[i].setI64Const(unionArray[i].getI8Const()); break; + case EOpConvInt8ToUint8: + newConstArray[i].setU8Const(unionArray[i].getI8Const()); break; + case EOpConvInt8ToUint16: + newConstArray[i].setU16Const(unionArray[i].getI8Const()); break; + case EOpConvInt8ToUint: + newConstArray[i].setUConst(unionArray[i].getI8Const()); break; + case EOpConvInt8ToUint64: + newConstArray[i].setU64Const(unionArray[i].getI8Const()); break; + case EOpConvUint8ToInt8: + newConstArray[i].setI8Const(unionArray[i].getU8Const()); break; + case EOpConvUint8ToInt16: + newConstArray[i].setI16Const(unionArray[i].getU8Const()); break; + case EOpConvUint8ToInt: + newConstArray[i].setIConst(unionArray[i].getU8Const()); break; + case EOpConvUint8ToInt64: + newConstArray[i].setI64Const(unionArray[i].getU8Const()); break; + case EOpConvUint8ToUint16: + newConstArray[i].setU16Const(unionArray[i].getU8Const()); break; + case EOpConvUint8ToUint: + newConstArray[i].setUConst(unionArray[i].getU8Const()); break; + case EOpConvUint8ToUint64: + newConstArray[i].setU64Const(unionArray[i].getU8Const()); break; + case EOpConvInt8ToFloat16: + newConstArray[i].setDConst(unionArray[i].getI8Const()); break; + case EOpConvInt8ToFloat: + newConstArray[i].setDConst(unionArray[i].getI8Const()); break; + case EOpConvInt8ToDouble: + newConstArray[i].setDConst(unionArray[i].getI8Const()); break; + case EOpConvUint8ToFloat16: + newConstArray[i].setDConst(unionArray[i].getU8Const()); break; + case EOpConvUint8ToFloat: + newConstArray[i].setDConst(unionArray[i].getU8Const()); break; + case EOpConvUint8ToDouble: + newConstArray[i].setDConst(unionArray[i].getU8Const()); break; + + case EOpConvInt16ToInt8: + newConstArray[i].setI8Const(static_cast(unionArray[i].getI16Const())); break; + case EOpConvInt16ToInt: + newConstArray[i].setIConst(unionArray[i].getI16Const()); break; + case EOpConvInt16ToInt64: + newConstArray[i].setI64Const(unionArray[i].getI16Const()); break; + case EOpConvInt16ToUint8: + newConstArray[i].setU8Const(static_cast(unionArray[i].getI16Const())); break; + case EOpConvInt16ToUint16: + newConstArray[i].setU16Const(unionArray[i].getI16Const()); break; + case EOpConvInt16ToUint: + newConstArray[i].setUConst(unionArray[i].getI16Const()); break; + case EOpConvInt16ToUint64: + newConstArray[i].setU64Const(unionArray[i].getI16Const()); break; + case EOpConvUint16ToInt8: + newConstArray[i].setI8Const(static_cast(unionArray[i].getU16Const())); break; + case EOpConvUint16ToInt16: + newConstArray[i].setI16Const(unionArray[i].getU16Const()); break; + case EOpConvUint16ToInt: + newConstArray[i].setIConst(unionArray[i].getU16Const()); break; + case EOpConvUint16ToInt64: + newConstArray[i].setI64Const(unionArray[i].getU16Const()); break; + case EOpConvUint16ToUint8: + newConstArray[i].setU8Const(static_cast(unionArray[i].getU16Const())); break; + + case EOpConvUint16ToUint: + newConstArray[i].setUConst(unionArray[i].getU16Const()); break; + case EOpConvUint16ToUint64: + newConstArray[i].setU64Const(unionArray[i].getU16Const()); break; + case EOpConvInt16ToFloat16: + newConstArray[i].setDConst(unionArray[i].getI16Const()); break; + case EOpConvInt16ToFloat: + newConstArray[i].setDConst(unionArray[i].getI16Const()); break; + case EOpConvInt16ToDouble: + newConstArray[i].setDConst(unionArray[i].getI16Const()); break; + case EOpConvUint16ToFloat16: + newConstArray[i].setDConst(unionArray[i].getU16Const()); break; + case EOpConvUint16ToFloat: + newConstArray[i].setDConst(unionArray[i].getU16Const()); break; + case EOpConvUint16ToDouble: + newConstArray[i].setDConst(unionArray[i].getU16Const()); break; + + case EOpConvIntToInt8: + newConstArray[i].setI8Const((signed char)unionArray[i].getIConst()); break; + case EOpConvIntToInt16: + newConstArray[i].setI16Const((signed short)unionArray[i].getIConst()); break; + case EOpConvIntToInt64: + newConstArray[i].setI64Const(unionArray[i].getIConst()); break; + case EOpConvIntToUint8: + newConstArray[i].setU8Const((unsigned char)unionArray[i].getIConst()); break; + case EOpConvIntToUint16: + newConstArray[i].setU16Const((unsigned char)unionArray[i].getIConst()); break; + case EOpConvIntToUint64: + newConstArray[i].setU64Const(unionArray[i].getIConst()); break; + + case EOpConvUintToInt8: + newConstArray[i].setI8Const((signed char)unionArray[i].getUConst()); break; + case EOpConvUintToInt16: + newConstArray[i].setI16Const((signed short)unionArray[i].getUConst()); break; + case EOpConvUintToInt64: + newConstArray[i].setI64Const(unionArray[i].getUConst()); break; + case EOpConvUintToUint8: + newConstArray[i].setU8Const((unsigned char)unionArray[i].getUConst()); break; + case EOpConvUintToUint16: + newConstArray[i].setU16Const((unsigned short)unionArray[i].getUConst()); break; + case EOpConvUintToUint64: + newConstArray[i].setU64Const(unionArray[i].getUConst()); break; + case EOpConvIntToFloat16: + newConstArray[i].setDConst(unionArray[i].getIConst()); break; + case EOpConvUintToFloat16: + newConstArray[i].setDConst(unionArray[i].getUConst()); break; + case EOpConvInt64ToInt8: + newConstArray[i].setI8Const(static_cast(unionArray[i].getI64Const())); break; + case EOpConvInt64ToInt16: + newConstArray[i].setI16Const(static_cast(unionArray[i].getI64Const())); break; + case EOpConvInt64ToInt: + newConstArray[i].setIConst(static_cast(unionArray[i].getI64Const())); break; + case EOpConvInt64ToUint8: + newConstArray[i].setU8Const(static_cast(unionArray[i].getI64Const())); break; + case EOpConvInt64ToUint16: + newConstArray[i].setU16Const(static_cast(unionArray[i].getI64Const())); break; + case EOpConvInt64ToUint: + newConstArray[i].setUConst(static_cast(unionArray[i].getI64Const())); break; + case EOpConvInt64ToUint64: + newConstArray[i].setU64Const(unionArray[i].getI64Const()); break; + case EOpConvUint64ToInt8: + newConstArray[i].setI8Const(static_cast(unionArray[i].getU64Const())); break; + case EOpConvUint64ToInt16: + newConstArray[i].setI16Const(static_cast(unionArray[i].getU64Const())); break; + case EOpConvUint64ToInt: + newConstArray[i].setIConst(static_cast(unionArray[i].getU64Const())); break; + case EOpConvUint64ToInt64: + newConstArray[i].setI64Const(unionArray[i].getU64Const()); break; + case EOpConvUint64ToUint8: + newConstArray[i].setU8Const(static_cast(unionArray[i].getU64Const())); break; + case EOpConvUint64ToUint16: + newConstArray[i].setU16Const(static_cast(unionArray[i].getU64Const())); break; + case EOpConvUint64ToUint: + newConstArray[i].setUConst(static_cast(unionArray[i].getU64Const())); break; + case EOpConvInt64ToFloat16: + newConstArray[i].setDConst(static_cast(unionArray[i].getI64Const())); break; + case EOpConvInt64ToFloat: + newConstArray[i].setDConst(static_cast(unionArray[i].getI64Const())); break; + case EOpConvInt64ToDouble: + newConstArray[i].setDConst(static_cast(unionArray[i].getI64Const())); break; + case EOpConvUint64ToFloat16: + newConstArray[i].setDConst(static_cast(unionArray[i].getU64Const())); break; + case EOpConvUint64ToFloat: + newConstArray[i].setDConst(static_cast(unionArray[i].getU64Const())); break; + case EOpConvUint64ToDouble: + newConstArray[i].setDConst(static_cast(unionArray[i].getU64Const())); break; + case EOpConvFloat16ToInt8: + newConstArray[i].setI8Const(static_cast(unionArray[i].getDConst())); break; + case EOpConvFloat16ToInt16: + newConstArray[i].setI16Const(static_cast(unionArray[i].getDConst())); break; + case EOpConvFloat16ToInt: + newConstArray[i].setIConst(static_cast(unionArray[i].getDConst())); break; + case EOpConvFloat16ToInt64: + newConstArray[i].setI64Const(static_cast(unionArray[i].getDConst())); break; + case EOpConvFloat16ToUint8: + newConstArray[i].setU8Const(static_cast(unionArray[i].getDConst())); break; + case EOpConvFloat16ToUint16: + newConstArray[i].setU16Const(static_cast(unionArray[i].getDConst())); break; + case EOpConvFloat16ToUint: + newConstArray[i].setUConst(static_cast(unionArray[i].getDConst())); break; + case EOpConvFloat16ToUint64: + newConstArray[i].setU64Const(static_cast(unionArray[i].getDConst())); break; + case EOpConvFloat16ToFloat: + newConstArray[i].setDConst(unionArray[i].getDConst()); break; + case EOpConvFloat16ToDouble: + newConstArray[i].setDConst(unionArray[i].getDConst()); break; + case EOpConvFloatToInt8: + newConstArray[i].setI8Const(static_cast(unionArray[i].getDConst())); break; + case EOpConvFloatToInt16: + newConstArray[i].setI16Const(static_cast(unionArray[i].getDConst())); break; + case EOpConvFloatToInt64: + newConstArray[i].setI64Const(static_cast(unionArray[i].getDConst())); break; + case EOpConvFloatToUint8: + newConstArray[i].setU8Const(static_cast(unionArray[i].getDConst())); break; + case EOpConvFloatToUint16: + newConstArray[i].setU16Const(static_cast(unionArray[i].getDConst())); break; + case EOpConvFloatToUint64: + newConstArray[i].setU64Const(static_cast(unionArray[i].getDConst())); break; + case EOpConvFloatToFloat16: + newConstArray[i].setDConst(unionArray[i].getDConst()); break; + case EOpConvDoubleToInt8: + newConstArray[i].setI8Const(static_cast(unionArray[i].getDConst())); break; + case EOpConvDoubleToInt16: + newConstArray[i].setI16Const(static_cast(unionArray[i].getDConst())); break; + case EOpConvDoubleToInt64: + newConstArray[i].setI64Const(static_cast(unionArray[i].getDConst())); break; + case EOpConvDoubleToUint8: + newConstArray[i].setU8Const(static_cast(unionArray[i].getDConst())); break; + case EOpConvDoubleToUint16: + newConstArray[i].setU16Const(static_cast(unionArray[i].getDConst())); break; + case EOpConvDoubleToUint64: + newConstArray[i].setU64Const(static_cast(unionArray[i].getDConst())); break; + case EOpConvDoubleToFloat16: + newConstArray[i].setDConst(unionArray[i].getDConst()); break; + case EOpConvPtrToUint64: + case EOpConvUint64ToPtr: + case EOpConstructReference: + newConstArray[i].setU64Const(unionArray[i].getU64Const()); break; +#endif + + // TODO: 3.0 Functionality: unary constant folding: the rest of the ops have to be fleshed out + + case EOpSinh: + case EOpCosh: + case EOpTanh: + case EOpAsinh: + case EOpAcosh: + case EOpAtanh: + + case EOpFloatBitsToInt: + case EOpFloatBitsToUint: + case EOpIntBitsToFloat: + case EOpUintBitsToFloat: + case EOpDoubleBitsToInt64: + case EOpDoubleBitsToUint64: + case EOpInt64BitsToDouble: + case EOpUint64BitsToDouble: + case EOpFloat16BitsToInt16: + case EOpFloat16BitsToUint16: + case EOpInt16BitsToFloat16: + case EOpUint16BitsToFloat16: + default: + return nullptr; + } + } + + TIntermConstantUnion *newNode = new TIntermConstantUnion(newConstArray, returnType); + newNode->getWritableType().getQualifier().storage = EvqConst; + newNode->setLoc(getLoc()); + + return newNode; +} + +// +// Do constant folding for an aggregate node that has all its children +// as constants and an operator that requires constant folding. +// +TIntermTyped* TIntermediate::fold(TIntermAggregate* aggrNode) +{ + if (aggrNode == nullptr) + return aggrNode; + + if (! areAllChildConst(aggrNode)) + return aggrNode; + + if (aggrNode->isConstructor()) + return foldConstructor(aggrNode); + + TIntermSequence& children = aggrNode->getSequence(); + + // First, see if this is an operation to constant fold, kick out if not, + // see what size the result is if so. + + bool componentwise = false; // will also say componentwise if a scalar argument gets repeated to make per-component results + int objectSize; + switch (aggrNode->getOp()) { + case EOpAtan: + case EOpPow: + case EOpMin: + case EOpMax: + case EOpMix: + case EOpMod: + case EOpClamp: + case EOpLessThan: + case EOpGreaterThan: + case EOpLessThanEqual: + case EOpGreaterThanEqual: + case EOpVectorEqual: + case EOpVectorNotEqual: + componentwise = true; + objectSize = children[0]->getAsConstantUnion()->getType().computeNumComponents(); + break; + case EOpCross: + case EOpReflect: + case EOpRefract: + case EOpFaceForward: + objectSize = children[0]->getAsConstantUnion()->getType().computeNumComponents(); + break; + case EOpDistance: + case EOpDot: + objectSize = 1; + break; + case EOpOuterProduct: + objectSize = children[0]->getAsTyped()->getType().getVectorSize() * + children[1]->getAsTyped()->getType().getVectorSize(); + break; + case EOpStep: + componentwise = true; + objectSize = std::max(children[0]->getAsTyped()->getType().getVectorSize(), + children[1]->getAsTyped()->getType().getVectorSize()); + break; + case EOpSmoothStep: + componentwise = true; + objectSize = std::max(children[0]->getAsTyped()->getType().getVectorSize(), + children[2]->getAsTyped()->getType().getVectorSize()); + break; + default: + return aggrNode; + } + TConstUnionArray newConstArray(objectSize); + + TVector childConstUnions; + for (unsigned int arg = 0; arg < children.size(); ++arg) + childConstUnions.push_back(children[arg]->getAsConstantUnion()->getConstArray()); + + if (componentwise) { + for (int comp = 0; comp < objectSize; comp++) { + + // some arguments are scalars instead of matching vectors; simulate a smear + int arg0comp = std::min(comp, children[0]->getAsTyped()->getType().getVectorSize() - 1); + int arg1comp = 0; + if (children.size() > 1) + arg1comp = std::min(comp, children[1]->getAsTyped()->getType().getVectorSize() - 1); + int arg2comp = 0; + if (children.size() > 2) + arg2comp = std::min(comp, children[2]->getAsTyped()->getType().getVectorSize() - 1); + + switch (aggrNode->getOp()) { + case EOpAtan: + newConstArray[comp].setDConst(atan2(childConstUnions[0][arg0comp].getDConst(), childConstUnions[1][arg1comp].getDConst())); + break; + case EOpPow: + newConstArray[comp].setDConst(pow(childConstUnions[0][arg0comp].getDConst(), childConstUnions[1][arg1comp].getDConst())); + break; + case EOpMod: + { + double arg0 = childConstUnions[0][arg0comp].getDConst(); + double arg1 = childConstUnions[1][arg1comp].getDConst(); + double result = arg0 - arg1 * floor(arg0 / arg1); + newConstArray[comp].setDConst(result); + break; + } + case EOpMin: + switch(children[0]->getAsTyped()->getBasicType()) { + case EbtFloat16: + case EbtFloat: + case EbtDouble: + newConstArray[comp].setDConst(std::min(childConstUnions[0][arg0comp].getDConst(), childConstUnions[1][arg1comp].getDConst())); + break; + case EbtInt: + newConstArray[comp].setIConst(std::min(childConstUnions[0][arg0comp].getIConst(), childConstUnions[1][arg1comp].getIConst())); + break; + case EbtUint: + newConstArray[comp].setUConst(std::min(childConstUnions[0][arg0comp].getUConst(), childConstUnions[1][arg1comp].getUConst())); + break; +#ifndef GLSLANG_WEB + case EbtInt8: + newConstArray[comp].setI8Const(std::min(childConstUnions[0][arg0comp].getI8Const(), childConstUnions[1][arg1comp].getI8Const())); + break; + case EbtUint8: + newConstArray[comp].setU8Const(std::min(childConstUnions[0][arg0comp].getU8Const(), childConstUnions[1][arg1comp].getU8Const())); + break; + case EbtInt16: + newConstArray[comp].setI16Const(std::min(childConstUnions[0][arg0comp].getI16Const(), childConstUnions[1][arg1comp].getI16Const())); + break; + case EbtUint16: + newConstArray[comp].setU16Const(std::min(childConstUnions[0][arg0comp].getU16Const(), childConstUnions[1][arg1comp].getU16Const())); + break; + case EbtInt64: + newConstArray[comp].setI64Const(std::min(childConstUnions[0][arg0comp].getI64Const(), childConstUnions[1][arg1comp].getI64Const())); + break; + case EbtUint64: + newConstArray[comp].setU64Const(std::min(childConstUnions[0][arg0comp].getU64Const(), childConstUnions[1][arg1comp].getU64Const())); + break; +#endif + default: assert(false && "Default missing"); + } + break; + case EOpMax: + switch(children[0]->getAsTyped()->getBasicType()) { + case EbtFloat16: + case EbtFloat: + case EbtDouble: + newConstArray[comp].setDConst(std::max(childConstUnions[0][arg0comp].getDConst(), childConstUnions[1][arg1comp].getDConst())); + break; + case EbtInt: + newConstArray[comp].setIConst(std::max(childConstUnions[0][arg0comp].getIConst(), childConstUnions[1][arg1comp].getIConst())); + break; + case EbtUint: + newConstArray[comp].setUConst(std::max(childConstUnions[0][arg0comp].getUConst(), childConstUnions[1][arg1comp].getUConst())); + break; +#ifndef GLSLANG_WEB + case EbtInt8: + newConstArray[comp].setI8Const(std::max(childConstUnions[0][arg0comp].getI8Const(), childConstUnions[1][arg1comp].getI8Const())); + break; + case EbtUint8: + newConstArray[comp].setU8Const(std::max(childConstUnions[0][arg0comp].getU8Const(), childConstUnions[1][arg1comp].getU8Const())); + break; + case EbtInt16: + newConstArray[comp].setI16Const(std::max(childConstUnions[0][arg0comp].getI16Const(), childConstUnions[1][arg1comp].getI16Const())); + break; + case EbtUint16: + newConstArray[comp].setU16Const(std::max(childConstUnions[0][arg0comp].getU16Const(), childConstUnions[1][arg1comp].getU16Const())); + break; + case EbtInt64: + newConstArray[comp].setI64Const(std::max(childConstUnions[0][arg0comp].getI64Const(), childConstUnions[1][arg1comp].getI64Const())); + break; + case EbtUint64: + newConstArray[comp].setU64Const(std::max(childConstUnions[0][arg0comp].getU64Const(), childConstUnions[1][arg1comp].getU64Const())); + break; +#endif + default: assert(false && "Default missing"); + } + break; + case EOpClamp: + switch(children[0]->getAsTyped()->getBasicType()) { + case EbtFloat16: + case EbtFloat: + case EbtDouble: + newConstArray[comp].setDConst(std::min(std::max(childConstUnions[0][arg0comp].getDConst(), childConstUnions[1][arg1comp].getDConst()), + childConstUnions[2][arg2comp].getDConst())); + break; + case EbtUint: + newConstArray[comp].setUConst(std::min(std::max(childConstUnions[0][arg0comp].getUConst(), childConstUnions[1][arg1comp].getUConst()), + childConstUnions[2][arg2comp].getUConst())); + break; +#ifndef GLSLANG_WEB + case EbtInt8: + newConstArray[comp].setI8Const(std::min(std::max(childConstUnions[0][arg0comp].getI8Const(), childConstUnions[1][arg1comp].getI8Const()), + childConstUnions[2][arg2comp].getI8Const())); + break; + case EbtUint8: + newConstArray[comp].setU8Const(std::min(std::max(childConstUnions[0][arg0comp].getU8Const(), childConstUnions[1][arg1comp].getU8Const()), + childConstUnions[2][arg2comp].getU8Const())); + break; + case EbtInt16: + newConstArray[comp].setI16Const(std::min(std::max(childConstUnions[0][arg0comp].getI16Const(), childConstUnions[1][arg1comp].getI16Const()), + childConstUnions[2][arg2comp].getI16Const())); + break; + case EbtUint16: + newConstArray[comp].setU16Const(std::min(std::max(childConstUnions[0][arg0comp].getU16Const(), childConstUnions[1][arg1comp].getU16Const()), + childConstUnions[2][arg2comp].getU16Const())); + break; + case EbtInt: + newConstArray[comp].setIConst(std::min(std::max(childConstUnions[0][arg0comp].getIConst(), childConstUnions[1][arg1comp].getIConst()), + childConstUnions[2][arg2comp].getIConst())); + break; + case EbtInt64: + newConstArray[comp].setI64Const(std::min(std::max(childConstUnions[0][arg0comp].getI64Const(), childConstUnions[1][arg1comp].getI64Const()), + childConstUnions[2][arg2comp].getI64Const())); + break; + case EbtUint64: + newConstArray[comp].setU64Const(std::min(std::max(childConstUnions[0][arg0comp].getU64Const(), childConstUnions[1][arg1comp].getU64Const()), + childConstUnions[2][arg2comp].getU64Const())); + break; +#endif + default: assert(false && "Default missing"); + } + break; + case EOpLessThan: + newConstArray[comp].setBConst(childConstUnions[0][arg0comp] < childConstUnions[1][arg1comp]); + break; + case EOpGreaterThan: + newConstArray[comp].setBConst(childConstUnions[0][arg0comp] > childConstUnions[1][arg1comp]); + break; + case EOpLessThanEqual: + newConstArray[comp].setBConst(! (childConstUnions[0][arg0comp] > childConstUnions[1][arg1comp])); + break; + case EOpGreaterThanEqual: + newConstArray[comp].setBConst(! (childConstUnions[0][arg0comp] < childConstUnions[1][arg1comp])); + break; + case EOpVectorEqual: + newConstArray[comp].setBConst(childConstUnions[0][arg0comp] == childConstUnions[1][arg1comp]); + break; + case EOpVectorNotEqual: + newConstArray[comp].setBConst(childConstUnions[0][arg0comp] != childConstUnions[1][arg1comp]); + break; + case EOpMix: + if (!children[0]->getAsTyped()->isFloatingDomain()) + return aggrNode; + if (children[2]->getAsTyped()->getBasicType() == EbtBool) { + newConstArray[comp].setDConst(childConstUnions[2][arg2comp].getBConst() + ? childConstUnions[1][arg1comp].getDConst() + : childConstUnions[0][arg0comp].getDConst()); + } else { + newConstArray[comp].setDConst( + childConstUnions[0][arg0comp].getDConst() * (1.0 - childConstUnions[2][arg2comp].getDConst()) + + childConstUnions[1][arg1comp].getDConst() * childConstUnions[2][arg2comp].getDConst()); + } + break; + case EOpStep: + newConstArray[comp].setDConst(childConstUnions[1][arg1comp].getDConst() < childConstUnions[0][arg0comp].getDConst() ? 0.0 : 1.0); + break; + case EOpSmoothStep: + { + double t = (childConstUnions[2][arg2comp].getDConst() - childConstUnions[0][arg0comp].getDConst()) / + (childConstUnions[1][arg1comp].getDConst() - childConstUnions[0][arg0comp].getDConst()); + if (t < 0.0) + t = 0.0; + if (t > 1.0) + t = 1.0; + newConstArray[comp].setDConst(t * t * (3.0 - 2.0 * t)); + break; + } + default: + return aggrNode; + } + } + } else { + // Non-componentwise... + + int numComps = children[0]->getAsConstantUnion()->getType().computeNumComponents(); + double dot; + + switch (aggrNode->getOp()) { + case EOpDistance: + { + double sum = 0.0; + for (int comp = 0; comp < numComps; ++comp) { + double diff = childConstUnions[1][comp].getDConst() - childConstUnions[0][comp].getDConst(); + sum += diff * diff; + } + newConstArray[0].setDConst(sqrt(sum)); + break; + } + case EOpDot: + newConstArray[0].setDConst(childConstUnions[0].dot(childConstUnions[1])); + break; + case EOpCross: + newConstArray[0] = childConstUnions[0][1] * childConstUnions[1][2] - childConstUnions[0][2] * childConstUnions[1][1]; + newConstArray[1] = childConstUnions[0][2] * childConstUnions[1][0] - childConstUnions[0][0] * childConstUnions[1][2]; + newConstArray[2] = childConstUnions[0][0] * childConstUnions[1][1] - childConstUnions[0][1] * childConstUnions[1][0]; + break; + case EOpFaceForward: + // If dot(Nref, I) < 0 return N, otherwise return -N: Arguments are (N, I, Nref). + dot = childConstUnions[1].dot(childConstUnions[2]); + for (int comp = 0; comp < numComps; ++comp) { + if (dot < 0.0) + newConstArray[comp] = childConstUnions[0][comp]; + else + newConstArray[comp].setDConst(-childConstUnions[0][comp].getDConst()); + } + break; + case EOpReflect: + // I - 2 * dot(N, I) * N: Arguments are (I, N). + dot = childConstUnions[0].dot(childConstUnions[1]); + dot *= 2.0; + for (int comp = 0; comp < numComps; ++comp) + newConstArray[comp].setDConst(childConstUnions[0][comp].getDConst() - dot * childConstUnions[1][comp].getDConst()); + break; + case EOpRefract: + { + // Arguments are (I, N, eta). + // k = 1.0 - eta * eta * (1.0 - dot(N, I) * dot(N, I)) + // if (k < 0.0) + // return dvec(0.0) + // else + // return eta * I - (eta * dot(N, I) + sqrt(k)) * N + dot = childConstUnions[0].dot(childConstUnions[1]); + double eta = childConstUnions[2][0].getDConst(); + double k = 1.0 - eta * eta * (1.0 - dot * dot); + if (k < 0.0) { + for (int comp = 0; comp < numComps; ++comp) + newConstArray[comp].setDConst(0.0); + } else { + for (int comp = 0; comp < numComps; ++comp) + newConstArray[comp].setDConst(eta * childConstUnions[0][comp].getDConst() - (eta * dot + sqrt(k)) * childConstUnions[1][comp].getDConst()); + } + break; + } + case EOpOuterProduct: + { + int numRows = numComps; + int numCols = children[1]->getAsConstantUnion()->getType().computeNumComponents(); + for (int row = 0; row < numRows; ++row) + for (int col = 0; col < numCols; ++col) + newConstArray[col * numRows + row] = childConstUnions[0][row] * childConstUnions[1][col]; + break; + } + default: + return aggrNode; + } + } + + TIntermConstantUnion *newNode = new TIntermConstantUnion(newConstArray, aggrNode->getType()); + newNode->getWritableType().getQualifier().storage = EvqConst; + newNode->setLoc(aggrNode->getLoc()); + + return newNode; +} + +bool TIntermediate::areAllChildConst(TIntermAggregate* aggrNode) +{ + bool allConstant = true; + + // check if all the child nodes are constants so that they can be inserted into + // the parent node + if (aggrNode) { + TIntermSequence& childSequenceVector = aggrNode->getSequence(); + for (TIntermSequence::iterator p = childSequenceVector.begin(); + p != childSequenceVector.end(); p++) { + if (!(*p)->getAsTyped()->getAsConstantUnion()) + return false; + } + } + + return allConstant; +} + +TIntermTyped* TIntermediate::foldConstructor(TIntermAggregate* aggrNode) +{ + bool error = false; + + TConstUnionArray unionArray(aggrNode->getType().computeNumComponents()); + if (aggrNode->getSequence().size() == 1) + error = parseConstTree(aggrNode, unionArray, aggrNode->getOp(), aggrNode->getType(), true); + else + error = parseConstTree(aggrNode, unionArray, aggrNode->getOp(), aggrNode->getType()); + + if (error) + return aggrNode; + + return addConstantUnion(unionArray, aggrNode->getType(), aggrNode->getLoc()); +} + +// +// Constant folding of a bracket (array-style) dereference or struct-like dot +// dereference. Can handle anything except a multi-character swizzle, though +// all swizzles may go to foldSwizzle(). +// +TIntermTyped* TIntermediate::foldDereference(TIntermTyped* node, int index, const TSourceLoc& loc) +{ + TType dereferencedType(node->getType(), index); + dereferencedType.getQualifier().storage = EvqConst; + TIntermTyped* result = 0; + int size = dereferencedType.computeNumComponents(); + + // arrays, vectors, matrices, all use simple multiplicative math + // while structures need to add up heterogeneous members + int start; + if (node->getType().isCoopMat()) + start = 0; + else if (node->isArray() || ! node->isStruct()) + start = size * index; + else { + // it is a structure + assert(node->isStruct()); + start = 0; + for (int i = 0; i < index; ++i) + start += (*node->getType().getStruct())[i].type->computeNumComponents(); + } + + result = addConstantUnion(TConstUnionArray(node->getAsConstantUnion()->getConstArray(), start, size), node->getType(), loc); + + if (result == 0) + result = node; + else + result->setType(dereferencedType); + + return result; +} + +// +// Make a constant vector node or constant scalar node, representing a given +// constant vector and constant swizzle into it. +// +TIntermTyped* TIntermediate::foldSwizzle(TIntermTyped* node, TSwizzleSelectors& selectors, const TSourceLoc& loc) +{ + const TConstUnionArray& unionArray = node->getAsConstantUnion()->getConstArray(); + TConstUnionArray constArray(selectors.size()); + + for (int i = 0; i < selectors.size(); i++) + constArray[i] = unionArray[selectors[i]]; + + TIntermTyped* result = addConstantUnion(constArray, node->getType(), loc); + + if (result == 0) + result = node; + else + result->setType(TType(node->getBasicType(), EvqConst, selectors.size())); + + return result; +} + +} // end namespace glslang diff --git a/dep/glslang/glslang/MachineIndependent/InfoSink.cpp b/dep/glslang/glslang/MachineIndependent/InfoSink.cpp new file mode 100644 index 000000000..d00c42256 --- /dev/null +++ b/dep/glslang/glslang/MachineIndependent/InfoSink.cpp @@ -0,0 +1,113 @@ +// +// Copyright (C) 2002-2005 3Dlabs Inc. Ltd. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// +// Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// +// Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// Neither the name of 3Dlabs Inc. Ltd. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// + +#include "../Include/InfoSink.h" + +#include + +namespace glslang { + +void TInfoSinkBase::append(const char* s) +{ + if (outputStream & EString) { + if (s == nullptr) + sink.append("(null)"); + else { + checkMem(strlen(s)); + sink.append(s); + } + } + +//#ifdef _WIN32 +// if (outputStream & EDebugger) +// OutputDebugString(s); +//#endif + + if (outputStream & EStdOut) + fprintf(stdout, "%s", s); +} + +void TInfoSinkBase::append(int count, char c) +{ + if (outputStream & EString) { + checkMem(count); + sink.append(count, c); + } + +//#ifdef _WIN32 +// if (outputStream & EDebugger) { +// char str[2]; +// str[0] = c; +// str[1] = '\0'; +// OutputDebugString(str); +// } +//#endif + + if (outputStream & EStdOut) + fprintf(stdout, "%c", c); +} + +void TInfoSinkBase::append(const TPersistString& t) +{ + if (outputStream & EString) { + checkMem(t.size()); + sink.append(t); + } + +//#ifdef _WIN32 +// if (outputStream & EDebugger) +// OutputDebugString(t.c_str()); +//#endif + + if (outputStream & EStdOut) + fprintf(stdout, "%s", t.c_str()); +} + +void TInfoSinkBase::append(const TString& t) +{ + if (outputStream & EString) { + checkMem(t.size()); + sink.append(t.c_str()); + } + +//#ifdef _WIN32 +// if (outputStream & EDebugger) +// OutputDebugString(t.c_str()); +//#endif + + if (outputStream & EStdOut) + fprintf(stdout, "%s", t.c_str()); +} + +} // end namespace glslang diff --git a/dep/glslang/glslang/MachineIndependent/Initialize.cpp b/dep/glslang/glslang/MachineIndependent/Initialize.cpp new file mode 100644 index 000000000..78eacac78 --- /dev/null +++ b/dep/glslang/glslang/MachineIndependent/Initialize.cpp @@ -0,0 +1,9186 @@ +// +// Copyright (C) 2002-2005 3Dlabs Inc. Ltd. +// Copyright (C) 2012-2016 LunarG, Inc. +// Copyright (C) 2015-2020 Google, Inc. +// Copyright (C) 2017 ARM Limited. +// Modifications Copyright (C) 2020 Advanced Micro Devices, Inc. All rights reserved. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// +// Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// +// Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// Neither the name of 3Dlabs Inc. Ltd. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// + +// +// Create strings that declare built-in definitions, add built-ins programmatically +// that cannot be expressed in the strings, and establish mappings between +// built-in functions and operators. +// +// Where to put a built-in: +// TBuiltIns::initialize(version,profile) context-independent textual built-ins; add them to the right string +// TBuiltIns::initialize(resources,...) context-dependent textual built-ins; add them to the right string +// TBuiltIns::identifyBuiltIns(...,symbolTable) context-independent programmatic additions/mappings to the symbol table, +// including identifying what extensions are needed if a version does not allow a symbol +// TBuiltIns::identifyBuiltIns(...,symbolTable, resources) context-dependent programmatic additions/mappings to the symbol table, +// including identifying what extensions are needed if a version does not allow a symbol +// + +#include "../Include/intermediate.h" +#include "Initialize.h" + +namespace glslang { + +// TODO: ARB_Compatability: do full extension support +const bool ARBCompatibility = true; + +const bool ForwardCompatibility = false; + +// change this back to false if depending on textual spellings of texturing calls when consuming the AST +// Using PureOperatorBuiltins=false is deprecated. +bool PureOperatorBuiltins = true; + +namespace { + +// +// A set of definitions for tabling of the built-in functions. +// + +// Order matters here, as does correlation with the subsequent +// "const int ..." declarations and the ArgType enumerants. +const char* TypeString[] = { + "bool", "bvec2", "bvec3", "bvec4", + "float", "vec2", "vec3", "vec4", + "int", "ivec2", "ivec3", "ivec4", + "uint", "uvec2", "uvec3", "uvec4", +}; +const int TypeStringCount = sizeof(TypeString) / sizeof(char*); // number of entries in 'TypeString' +const int TypeStringRowShift = 2; // shift amount to go downe one row in 'TypeString' +const int TypeStringColumnMask = (1 << TypeStringRowShift) - 1; // reduce type to its column number in 'TypeString' +const int TypeStringScalarMask = ~TypeStringColumnMask; // take type to its scalar column in 'TypeString' + +enum ArgType { + // numbers hardcoded to correspond to 'TypeString'; order and value matter + TypeB = 1 << 0, // Boolean + TypeF = 1 << 1, // float 32 + TypeI = 1 << 2, // int 32 + TypeU = 1 << 3, // uint 32 + TypeF16 = 1 << 4, // float 16 + TypeF64 = 1 << 5, // float 64 + TypeI8 = 1 << 6, // int 8 + TypeI16 = 1 << 7, // int 16 + TypeI64 = 1 << 8, // int 64 + TypeU8 = 1 << 9, // uint 8 + TypeU16 = 1 << 10, // uint 16 + TypeU64 = 1 << 11, // uint 64 +}; +// Mixtures of the above, to help the function tables +const ArgType TypeFI = static_cast(TypeF | TypeI); +const ArgType TypeFIB = static_cast(TypeF | TypeI | TypeB); +const ArgType TypeIU = static_cast(TypeI | TypeU); + +// The relationships between arguments and return type, whether anything is +// output, or other unusual situations. +enum ArgClass { + ClassRegular = 0, // nothing special, just all vector widths with matching return type; traditional arithmetic + ClassLS = 1 << 0, // the last argument is also held fixed as a (type-matched) scalar while the others cycle + ClassXLS = 1 << 1, // the last argument is exclusively a (type-matched) scalar while the others cycle + ClassLS2 = 1 << 2, // the last two arguments are held fixed as a (type-matched) scalar while the others cycle + ClassFS = 1 << 3, // the first argument is held fixed as a (type-matched) scalar while the others cycle + ClassFS2 = 1 << 4, // the first two arguments are held fixed as a (type-matched) scalar while the others cycle + ClassLO = 1 << 5, // the last argument is an output + ClassB = 1 << 6, // return type cycles through only bool/bvec, matching vector width of args + ClassLB = 1 << 7, // last argument cycles through only bool/bvec, matching vector width of args + ClassV1 = 1 << 8, // scalar only + ClassFIO = 1 << 9, // first argument is inout + ClassRS = 1 << 10, // the return is held scalar as the arguments cycle + ClassNS = 1 << 11, // no scalar prototype + ClassCV = 1 << 12, // first argument is 'coherent volatile' + ClassFO = 1 << 13, // first argument is output + ClassV3 = 1 << 14, // vec3 only +}; +// Mixtures of the above, to help the function tables +const ArgClass ClassV1FIOCV = (ArgClass)(ClassV1 | ClassFIO | ClassCV); +const ArgClass ClassBNS = (ArgClass)(ClassB | ClassNS); +const ArgClass ClassRSNS = (ArgClass)(ClassRS | ClassNS); + +// A descriptor, for a single profile, of when something is available. +// If the current profile does not match 'profile' mask below, the other fields +// do not apply (nor validate). +// profiles == EBadProfile is the end of an array of these +struct Versioning { + EProfile profiles; // the profile(s) (mask) that the following fields are valid for + int minExtendedVersion; // earliest version when extensions are enabled; ignored if numExtensions is 0 + int minCoreVersion; // earliest version function is in core; 0 means never + int numExtensions; // how many extensions are in the 'extensions' list + const char** extensions; // list of extension names enabling the function +}; + +EProfile EDesktopProfile = static_cast(ENoProfile | ECoreProfile | ECompatibilityProfile); + +// Declare pointers to put into the table for versioning. +#ifdef GLSLANG_WEB + const Versioning* Es300Desktop130 = nullptr; + const Versioning* Es310Desktop420 = nullptr; +#else + const Versioning Es300Desktop130Version[] = { { EEsProfile, 0, 300, 0, nullptr }, + { EDesktopProfile, 0, 130, 0, nullptr }, + { EBadProfile } }; + const Versioning* Es300Desktop130 = &Es300Desktop130Version[0]; + + const Versioning Es310Desktop420Version[] = { { EEsProfile, 0, 310, 0, nullptr }, + { EDesktopProfile, 0, 420, 0, nullptr }, + { EBadProfile } }; + const Versioning* Es310Desktop420 = &Es310Desktop420Version[0]; + + const Versioning Es310Desktop450Version[] = { { EEsProfile, 0, 310, 0, nullptr }, + { EDesktopProfile, 0, 450, 0, nullptr }, + { EBadProfile } }; + const Versioning* Es310Desktop450 = &Es310Desktop450Version[0]; +#endif + +// The main descriptor of what a set of function prototypes can look like, and +// a pointer to extra versioning information, when needed. +struct BuiltInFunction { + TOperator op; // operator to map the name to + const char* name; // function name + int numArguments; // number of arguments (overloads with varying arguments need different entries) + ArgType types; // ArgType mask + ArgClass classes; // the ways this particular function entry manifests + const Versioning* versioning; // nullptr means always a valid version +}; + +// The tables can have the same built-in function name more than one time, +// but the exact same prototype must be indicated at most once. +// The prototypes that get declared are the union of all those indicated. +// This is important when different releases add new prototypes for the same name. +// It also also congnitively simpler tiling of the prototype space. +// In practice, most names can be fully represented with one entry. +// +// Table is terminated by an OpNull TOperator. + +const BuiltInFunction BaseFunctions[] = { +// TOperator, name, arg-count, ArgType, ArgClass, versioning +// --------- ---- --------- ------- -------- ---------- + { EOpRadians, "radians", 1, TypeF, ClassRegular, nullptr }, + { EOpDegrees, "degrees", 1, TypeF, ClassRegular, nullptr }, + { EOpSin, "sin", 1, TypeF, ClassRegular, nullptr }, + { EOpCos, "cos", 1, TypeF, ClassRegular, nullptr }, + { EOpTan, "tan", 1, TypeF, ClassRegular, nullptr }, + { EOpAsin, "asin", 1, TypeF, ClassRegular, nullptr }, + { EOpAcos, "acos", 1, TypeF, ClassRegular, nullptr }, + { EOpAtan, "atan", 2, TypeF, ClassRegular, nullptr }, + { EOpAtan, "atan", 1, TypeF, ClassRegular, nullptr }, + { EOpPow, "pow", 2, TypeF, ClassRegular, nullptr }, + { EOpExp, "exp", 1, TypeF, ClassRegular, nullptr }, + { EOpLog, "log", 1, TypeF, ClassRegular, nullptr }, + { EOpExp2, "exp2", 1, TypeF, ClassRegular, nullptr }, + { EOpLog2, "log2", 1, TypeF, ClassRegular, nullptr }, + { EOpSqrt, "sqrt", 1, TypeF, ClassRegular, nullptr }, + { EOpInverseSqrt, "inversesqrt", 1, TypeF, ClassRegular, nullptr }, + { EOpAbs, "abs", 1, TypeF, ClassRegular, nullptr }, + { EOpSign, "sign", 1, TypeF, ClassRegular, nullptr }, + { EOpFloor, "floor", 1, TypeF, ClassRegular, nullptr }, + { EOpCeil, "ceil", 1, TypeF, ClassRegular, nullptr }, + { EOpFract, "fract", 1, TypeF, ClassRegular, nullptr }, + { EOpMod, "mod", 2, TypeF, ClassLS, nullptr }, + { EOpMin, "min", 2, TypeF, ClassLS, nullptr }, + { EOpMax, "max", 2, TypeF, ClassLS, nullptr }, + { EOpClamp, "clamp", 3, TypeF, ClassLS2, nullptr }, + { EOpMix, "mix", 3, TypeF, ClassLS, nullptr }, + { EOpStep, "step", 2, TypeF, ClassFS, nullptr }, + { EOpSmoothStep, "smoothstep", 3, TypeF, ClassFS2, nullptr }, + { EOpNormalize, "normalize", 1, TypeF, ClassRegular, nullptr }, + { EOpFaceForward, "faceforward", 3, TypeF, ClassRegular, nullptr }, + { EOpReflect, "reflect", 2, TypeF, ClassRegular, nullptr }, + { EOpRefract, "refract", 3, TypeF, ClassXLS, nullptr }, + { EOpLength, "length", 1, TypeF, ClassRS, nullptr }, + { EOpDistance, "distance", 2, TypeF, ClassRS, nullptr }, + { EOpDot, "dot", 2, TypeF, ClassRS, nullptr }, + { EOpCross, "cross", 2, TypeF, ClassV3, nullptr }, + { EOpLessThan, "lessThan", 2, TypeFI, ClassBNS, nullptr }, + { EOpLessThanEqual, "lessThanEqual", 2, TypeFI, ClassBNS, nullptr }, + { EOpGreaterThan, "greaterThan", 2, TypeFI, ClassBNS, nullptr }, + { EOpGreaterThanEqual, "greaterThanEqual", 2, TypeFI, ClassBNS, nullptr }, + { EOpVectorEqual, "equal", 2, TypeFIB, ClassBNS, nullptr }, + { EOpVectorNotEqual, "notEqual", 2, TypeFIB, ClassBNS, nullptr }, + { EOpAny, "any", 1, TypeB, ClassRSNS, nullptr }, + { EOpAll, "all", 1, TypeB, ClassRSNS, nullptr }, + { EOpVectorLogicalNot, "not", 1, TypeB, ClassNS, nullptr }, + { EOpSinh, "sinh", 1, TypeF, ClassRegular, Es300Desktop130 }, + { EOpCosh, "cosh", 1, TypeF, ClassRegular, Es300Desktop130 }, + { EOpTanh, "tanh", 1, TypeF, ClassRegular, Es300Desktop130 }, + { EOpAsinh, "asinh", 1, TypeF, ClassRegular, Es300Desktop130 }, + { EOpAcosh, "acosh", 1, TypeF, ClassRegular, Es300Desktop130 }, + { EOpAtanh, "atanh", 1, TypeF, ClassRegular, Es300Desktop130 }, + { EOpAbs, "abs", 1, TypeI, ClassRegular, Es300Desktop130 }, + { EOpSign, "sign", 1, TypeI, ClassRegular, Es300Desktop130 }, + { EOpTrunc, "trunc", 1, TypeF, ClassRegular, Es300Desktop130 }, + { EOpRound, "round", 1, TypeF, ClassRegular, Es300Desktop130 }, + { EOpRoundEven, "roundEven", 1, TypeF, ClassRegular, Es300Desktop130 }, + { EOpModf, "modf", 2, TypeF, ClassLO, Es300Desktop130 }, + { EOpMin, "min", 2, TypeIU, ClassLS, Es300Desktop130 }, + { EOpMax, "max", 2, TypeIU, ClassLS, Es300Desktop130 }, + { EOpClamp, "clamp", 3, TypeIU, ClassLS2, Es300Desktop130 }, + { EOpMix, "mix", 3, TypeF, ClassLB, Es300Desktop130 }, + { EOpIsInf, "isinf", 1, TypeF, ClassB, Es300Desktop130 }, + { EOpIsNan, "isnan", 1, TypeF, ClassB, Es300Desktop130 }, + { EOpLessThan, "lessThan", 2, TypeU, ClassBNS, Es300Desktop130 }, + { EOpLessThanEqual, "lessThanEqual", 2, TypeU, ClassBNS, Es300Desktop130 }, + { EOpGreaterThan, "greaterThan", 2, TypeU, ClassBNS, Es300Desktop130 }, + { EOpGreaterThanEqual, "greaterThanEqual", 2, TypeU, ClassBNS, Es300Desktop130 }, + { EOpVectorEqual, "equal", 2, TypeU, ClassBNS, Es300Desktop130 }, + { EOpVectorNotEqual, "notEqual", 2, TypeU, ClassBNS, Es300Desktop130 }, + { EOpAtomicAdd, "atomicAdd", 2, TypeIU, ClassV1FIOCV, Es310Desktop420 }, + { EOpAtomicMin, "atomicMin", 2, TypeIU, ClassV1FIOCV, Es310Desktop420 }, + { EOpAtomicMax, "atomicMax", 2, TypeIU, ClassV1FIOCV, Es310Desktop420 }, + { EOpAtomicAnd, "atomicAnd", 2, TypeIU, ClassV1FIOCV, Es310Desktop420 }, + { EOpAtomicOr, "atomicOr", 2, TypeIU, ClassV1FIOCV, Es310Desktop420 }, + { EOpAtomicXor, "atomicXor", 2, TypeIU, ClassV1FIOCV, Es310Desktop420 }, + { EOpAtomicExchange, "atomicExchange", 2, TypeIU, ClassV1FIOCV, Es310Desktop420 }, + { EOpAtomicCompSwap, "atomicCompSwap", 3, TypeIU, ClassV1FIOCV, Es310Desktop420 }, +#ifndef GLSLANG_WEB + { EOpMix, "mix", 3, TypeB, ClassRegular, Es310Desktop450 }, + { EOpMix, "mix", 3, TypeIU, ClassLB, Es310Desktop450 }, +#endif + + { EOpNull } +}; + +const BuiltInFunction DerivativeFunctions[] = { + { EOpDPdx, "dFdx", 1, TypeF, ClassRegular, nullptr }, + { EOpDPdy, "dFdy", 1, TypeF, ClassRegular, nullptr }, + { EOpFwidth, "fwidth", 1, TypeF, ClassRegular, nullptr }, + { EOpNull } +}; + +// For functions declared some other way, but still use the table to relate to operator. +struct CustomFunction { + TOperator op; // operator to map the name to + const char* name; // function name + const Versioning* versioning; // nullptr means always a valid version +}; + +const CustomFunction CustomFunctions[] = { + { EOpBarrier, "barrier", nullptr }, + { EOpMemoryBarrierShared, "memoryBarrierShared", nullptr }, + { EOpGroupMemoryBarrier, "groupMemoryBarrier", nullptr }, + { EOpMemoryBarrier, "memoryBarrier", nullptr }, + { EOpMemoryBarrierBuffer, "memoryBarrierBuffer", nullptr }, + + { EOpPackSnorm2x16, "packSnorm2x16", nullptr }, + { EOpUnpackSnorm2x16, "unpackSnorm2x16", nullptr }, + { EOpPackUnorm2x16, "packUnorm2x16", nullptr }, + { EOpUnpackUnorm2x16, "unpackUnorm2x16", nullptr }, + { EOpPackHalf2x16, "packHalf2x16", nullptr }, + { EOpUnpackHalf2x16, "unpackHalf2x16", nullptr }, + + { EOpMul, "matrixCompMult", nullptr }, + { EOpOuterProduct, "outerProduct", nullptr }, + { EOpTranspose, "transpose", nullptr }, + { EOpDeterminant, "determinant", nullptr }, + { EOpMatrixInverse, "inverse", nullptr }, + { EOpFloatBitsToInt, "floatBitsToInt", nullptr }, + { EOpFloatBitsToUint, "floatBitsToUint", nullptr }, + { EOpIntBitsToFloat, "intBitsToFloat", nullptr }, + { EOpUintBitsToFloat, "uintBitsToFloat", nullptr }, + + { EOpTextureQuerySize, "textureSize", nullptr }, + { EOpTextureQueryLod, "textureQueryLod", nullptr }, + { EOpTextureQueryLevels, "textureQueryLevels", nullptr }, + { EOpTextureQuerySamples, "textureSamples", nullptr }, + { EOpTexture, "texture", nullptr }, + { EOpTextureProj, "textureProj", nullptr }, + { EOpTextureLod, "textureLod", nullptr }, + { EOpTextureOffset, "textureOffset", nullptr }, + { EOpTextureFetch, "texelFetch", nullptr }, + { EOpTextureFetchOffset, "texelFetchOffset", nullptr }, + { EOpTextureProjOffset, "textureProjOffset", nullptr }, + { EOpTextureLodOffset, "textureLodOffset", nullptr }, + { EOpTextureProjLod, "textureProjLod", nullptr }, + { EOpTextureProjLodOffset, "textureProjLodOffset", nullptr }, + { EOpTextureGrad, "textureGrad", nullptr }, + { EOpTextureGradOffset, "textureGradOffset", nullptr }, + { EOpTextureProjGrad, "textureProjGrad", nullptr }, + { EOpTextureProjGradOffset, "textureProjGradOffset", nullptr }, + + { EOpNull } +}; + +// For the given table of functions, add all the indicated prototypes for each +// one, to be returned in the passed in decls. +void AddTabledBuiltin(TString& decls, const BuiltInFunction& function) +{ + const auto isScalarType = [](int type) { return (type & TypeStringColumnMask) == 0; }; + + // loop across these two: + // 0: the varying arg set, and + // 1: the fixed scalar args + const ArgClass ClassFixed = (ArgClass)(ClassLS | ClassXLS | ClassLS2 | ClassFS | ClassFS2); + for (int fixed = 0; fixed < ((function.classes & ClassFixed) > 0 ? 2 : 1); ++fixed) { + + if (fixed == 0 && (function.classes & ClassXLS)) + continue; + + // walk the type strings in TypeString[] + for (int type = 0; type < TypeStringCount; ++type) { + // skip types not selected: go from type to row number to type bit + if ((function.types & (1 << (type >> TypeStringRowShift))) == 0) + continue; + + // if we aren't on a scalar, and should be, skip + if ((function.classes & ClassV1) && !isScalarType(type)) + continue; + + // if we aren't on a 3-vector, and should be, skip + if ((function.classes & ClassV3) && (type & TypeStringColumnMask) != 2) + continue; + + // skip replication of all arg scalars between the varying arg set and the fixed args + if (fixed == 1 && type == (type & TypeStringScalarMask) && (function.classes & ClassXLS) == 0) + continue; + + // skip scalars when we are told to + if ((function.classes & ClassNS) && isScalarType(type)) + continue; + + // return type + if (function.classes & ClassB) + decls.append(TypeString[type & TypeStringColumnMask]); + else if (function.classes & ClassRS) + decls.append(TypeString[type & TypeStringScalarMask]); + else + decls.append(TypeString[type]); + decls.append(" "); + decls.append(function.name); + decls.append("("); + + // arguments + for (int arg = 0; arg < function.numArguments; ++arg) { + if (arg == function.numArguments - 1 && (function.classes & ClassLO)) + decls.append("out "); + if (arg == 0) { +#ifndef GLSLANG_WEB + if (function.classes & ClassCV) + decls.append("coherent volatile "); +#endif + if (function.classes & ClassFIO) + decls.append("inout "); + if (function.classes & ClassFO) + decls.append("out "); + } + if ((function.classes & ClassLB) && arg == function.numArguments - 1) + decls.append(TypeString[type & TypeStringColumnMask]); + else if (fixed && ((arg == function.numArguments - 1 && (function.classes & (ClassLS | ClassXLS | + ClassLS2))) || + (arg == function.numArguments - 2 && (function.classes & ClassLS2)) || + (arg == 0 && (function.classes & (ClassFS | ClassFS2))) || + (arg == 1 && (function.classes & ClassFS2)))) + decls.append(TypeString[type & TypeStringScalarMask]); + else + decls.append(TypeString[type]); + if (arg < function.numArguments - 1) + decls.append(","); + } + decls.append(");\n"); + } + } +} + +// See if the tabled versioning information allows the current version. +bool ValidVersion(const BuiltInFunction& function, int version, EProfile profile, const SpvVersion& /* spVersion */) +{ +#ifdef GLSLANG_WEB + // all entries in table are valid + return true; +#endif + + // nullptr means always valid + if (function.versioning == nullptr) + return true; + + // check for what is said about our current profile + for (const Versioning* v = function.versioning; v->profiles != EBadProfile; ++v) { + if ((v->profiles & profile) != 0) { + if (v->minCoreVersion <= version || (v->numExtensions > 0 && v->minExtendedVersion <= version)) + return true; + } + } + + return false; +} + +// Relate a single table of built-ins to their AST operator. +// This can get called redundantly (especially for the common built-ins, when +// called once per stage). This is a performance issue only, not a correctness +// concern. It is done for quality arising from simplicity, as there are subtleties +// to get correct if instead trying to do it surgically. +template +void RelateTabledBuiltins(const FunctionT* functions, TSymbolTable& symbolTable) +{ + while (functions->op != EOpNull) { + symbolTable.relateToOperator(functions->name, functions->op); + ++functions; + } +} + +} // end anonymous namespace + +// Add declarations for all tables of built-in functions. +void TBuiltIns::addTabledBuiltins(int version, EProfile profile, const SpvVersion& spvVersion) +{ + const auto forEachFunction = [&](TString& decls, const BuiltInFunction* function) { + while (function->op != EOpNull) { + if (ValidVersion(*function, version, profile, spvVersion)) + AddTabledBuiltin(decls, *function); + ++function; + } + }; + + forEachFunction(commonBuiltins, BaseFunctions); + forEachFunction(stageBuiltins[EShLangFragment], DerivativeFunctions); + + if ((profile == EEsProfile && version >= 320) || (profile != EEsProfile && version >= 450)) + forEachFunction(stageBuiltins[EShLangCompute], DerivativeFunctions); +} + +// Relate all tables of built-ins to the AST operators. +void TBuiltIns::relateTabledBuiltins(int /* version */, EProfile /* profile */, const SpvVersion& /* spvVersion */, EShLanguage /* stage */, TSymbolTable& symbolTable) +{ + RelateTabledBuiltins(BaseFunctions, symbolTable); + RelateTabledBuiltins(DerivativeFunctions, symbolTable); + RelateTabledBuiltins(CustomFunctions, symbolTable); +} + +inline bool IncludeLegacy(int version, EProfile profile, const SpvVersion& spvVersion) +{ + return profile != EEsProfile && (version <= 130 || (spvVersion.spv == 0 && ARBCompatibility) || profile == ECompatibilityProfile); +} + +// Construct TBuiltInParseables base class. This can be used for language-common constructs. +TBuiltInParseables::TBuiltInParseables() +{ +} + +// Destroy TBuiltInParseables. +TBuiltInParseables::~TBuiltInParseables() +{ +} + +TBuiltIns::TBuiltIns() +{ + // Set up textual representations for making all the permutations + // of texturing/imaging functions. + prefixes[EbtFloat] = ""; + prefixes[EbtInt] = "i"; + prefixes[EbtUint] = "u"; +#ifndef GLSLANG_WEB + prefixes[EbtFloat16] = "f16"; + prefixes[EbtInt8] = "i8"; + prefixes[EbtUint8] = "u8"; + prefixes[EbtInt16] = "i16"; + prefixes[EbtUint16] = "u16"; +#endif + + postfixes[2] = "2"; + postfixes[3] = "3"; + postfixes[4] = "4"; + + // Map from symbolic class of texturing dimension to numeric dimensions. + dimMap[Esd2D] = 2; + dimMap[Esd3D] = 3; + dimMap[EsdCube] = 3; +#ifndef GLSLANG_WEB + dimMap[Esd1D] = 1; + dimMap[EsdRect] = 2; + dimMap[EsdBuffer] = 1; + dimMap[EsdSubpass] = 2; // potentially unused for now +#endif +} + +TBuiltIns::~TBuiltIns() +{ +} + + +// +// Add all context-independent built-in functions and variables that are present +// for the given version and profile. Share common ones across stages, otherwise +// make stage-specific entries. +// +// Most built-ins variables can be added as simple text strings. Some need to +// be added programmatically, which is done later in IdentifyBuiltIns() below. +// +void TBuiltIns::initialize(int version, EProfile profile, const SpvVersion& spvVersion) +{ +#ifdef GLSLANG_WEB + version = 310; + profile = EEsProfile; +#endif + addTabledBuiltins(version, profile, spvVersion); + + //============================================================================ + // + // Prototypes for built-in functions used repeatly by different shaders + // + //============================================================================ + +#ifndef GLSLANG_WEB + // + // Derivatives Functions. + // + TString derivativeControls ( + "float dFdxFine(float p);" + "vec2 dFdxFine(vec2 p);" + "vec3 dFdxFine(vec3 p);" + "vec4 dFdxFine(vec4 p);" + + "float dFdyFine(float p);" + "vec2 dFdyFine(vec2 p);" + "vec3 dFdyFine(vec3 p);" + "vec4 dFdyFine(vec4 p);" + + "float fwidthFine(float p);" + "vec2 fwidthFine(vec2 p);" + "vec3 fwidthFine(vec3 p);" + "vec4 fwidthFine(vec4 p);" + + "float dFdxCoarse(float p);" + "vec2 dFdxCoarse(vec2 p);" + "vec3 dFdxCoarse(vec3 p);" + "vec4 dFdxCoarse(vec4 p);" + + "float dFdyCoarse(float p);" + "vec2 dFdyCoarse(vec2 p);" + "vec3 dFdyCoarse(vec3 p);" + "vec4 dFdyCoarse(vec4 p);" + + "float fwidthCoarse(float p);" + "vec2 fwidthCoarse(vec2 p);" + "vec3 fwidthCoarse(vec3 p);" + "vec4 fwidthCoarse(vec4 p);" + ); + + TString derivativesAndControl16bits ( + "float16_t dFdx(float16_t);" + "f16vec2 dFdx(f16vec2);" + "f16vec3 dFdx(f16vec3);" + "f16vec4 dFdx(f16vec4);" + + "float16_t dFdy(float16_t);" + "f16vec2 dFdy(f16vec2);" + "f16vec3 dFdy(f16vec3);" + "f16vec4 dFdy(f16vec4);" + + "float16_t dFdxFine(float16_t);" + "f16vec2 dFdxFine(f16vec2);" + "f16vec3 dFdxFine(f16vec3);" + "f16vec4 dFdxFine(f16vec4);" + + "float16_t dFdyFine(float16_t);" + "f16vec2 dFdyFine(f16vec2);" + "f16vec3 dFdyFine(f16vec3);" + "f16vec4 dFdyFine(f16vec4);" + + "float16_t dFdxCoarse(float16_t);" + "f16vec2 dFdxCoarse(f16vec2);" + "f16vec3 dFdxCoarse(f16vec3);" + "f16vec4 dFdxCoarse(f16vec4);" + + "float16_t dFdyCoarse(float16_t);" + "f16vec2 dFdyCoarse(f16vec2);" + "f16vec3 dFdyCoarse(f16vec3);" + "f16vec4 dFdyCoarse(f16vec4);" + + "float16_t fwidth(float16_t);" + "f16vec2 fwidth(f16vec2);" + "f16vec3 fwidth(f16vec3);" + "f16vec4 fwidth(f16vec4);" + + "float16_t fwidthFine(float16_t);" + "f16vec2 fwidthFine(f16vec2);" + "f16vec3 fwidthFine(f16vec3);" + "f16vec4 fwidthFine(f16vec4);" + + "float16_t fwidthCoarse(float16_t);" + "f16vec2 fwidthCoarse(f16vec2);" + "f16vec3 fwidthCoarse(f16vec3);" + "f16vec4 fwidthCoarse(f16vec4);" + ); + + TString derivativesAndControl64bits ( + "float64_t dFdx(float64_t);" + "f64vec2 dFdx(f64vec2);" + "f64vec3 dFdx(f64vec3);" + "f64vec4 dFdx(f64vec4);" + + "float64_t dFdy(float64_t);" + "f64vec2 dFdy(f64vec2);" + "f64vec3 dFdy(f64vec3);" + "f64vec4 dFdy(f64vec4);" + + "float64_t dFdxFine(float64_t);" + "f64vec2 dFdxFine(f64vec2);" + "f64vec3 dFdxFine(f64vec3);" + "f64vec4 dFdxFine(f64vec4);" + + "float64_t dFdyFine(float64_t);" + "f64vec2 dFdyFine(f64vec2);" + "f64vec3 dFdyFine(f64vec3);" + "f64vec4 dFdyFine(f64vec4);" + + "float64_t dFdxCoarse(float64_t);" + "f64vec2 dFdxCoarse(f64vec2);" + "f64vec3 dFdxCoarse(f64vec3);" + "f64vec4 dFdxCoarse(f64vec4);" + + "float64_t dFdyCoarse(float64_t);" + "f64vec2 dFdyCoarse(f64vec2);" + "f64vec3 dFdyCoarse(f64vec3);" + "f64vec4 dFdyCoarse(f64vec4);" + + "float64_t fwidth(float64_t);" + "f64vec2 fwidth(f64vec2);" + "f64vec3 fwidth(f64vec3);" + "f64vec4 fwidth(f64vec4);" + + "float64_t fwidthFine(float64_t);" + "f64vec2 fwidthFine(f64vec2);" + "f64vec3 fwidthFine(f64vec3);" + "f64vec4 fwidthFine(f64vec4);" + + "float64_t fwidthCoarse(float64_t);" + "f64vec2 fwidthCoarse(f64vec2);" + "f64vec3 fwidthCoarse(f64vec3);" + "f64vec4 fwidthCoarse(f64vec4);" + ); + + //============================================================================ + // + // Prototypes for built-in functions seen by both vertex and fragment shaders. + // + //============================================================================ + + // + // double functions added to desktop 4.00, but not fma, frexp, ldexp, or pack/unpack + // + if (profile != EEsProfile && version >= 150) { // ARB_gpu_shader_fp64 + commonBuiltins.append( + + "double sqrt(double);" + "dvec2 sqrt(dvec2);" + "dvec3 sqrt(dvec3);" + "dvec4 sqrt(dvec4);" + + "double inversesqrt(double);" + "dvec2 inversesqrt(dvec2);" + "dvec3 inversesqrt(dvec3);" + "dvec4 inversesqrt(dvec4);" + + "double abs(double);" + "dvec2 abs(dvec2);" + "dvec3 abs(dvec3);" + "dvec4 abs(dvec4);" + + "double sign(double);" + "dvec2 sign(dvec2);" + "dvec3 sign(dvec3);" + "dvec4 sign(dvec4);" + + "double floor(double);" + "dvec2 floor(dvec2);" + "dvec3 floor(dvec3);" + "dvec4 floor(dvec4);" + + "double trunc(double);" + "dvec2 trunc(dvec2);" + "dvec3 trunc(dvec3);" + "dvec4 trunc(dvec4);" + + "double round(double);" + "dvec2 round(dvec2);" + "dvec3 round(dvec3);" + "dvec4 round(dvec4);" + + "double roundEven(double);" + "dvec2 roundEven(dvec2);" + "dvec3 roundEven(dvec3);" + "dvec4 roundEven(dvec4);" + + "double ceil(double);" + "dvec2 ceil(dvec2);" + "dvec3 ceil(dvec3);" + "dvec4 ceil(dvec4);" + + "double fract(double);" + "dvec2 fract(dvec2);" + "dvec3 fract(dvec3);" + "dvec4 fract(dvec4);" + + "double mod(double, double);" + "dvec2 mod(dvec2 , double);" + "dvec3 mod(dvec3 , double);" + "dvec4 mod(dvec4 , double);" + "dvec2 mod(dvec2 , dvec2);" + "dvec3 mod(dvec3 , dvec3);" + "dvec4 mod(dvec4 , dvec4);" + + "double modf(double, out double);" + "dvec2 modf(dvec2, out dvec2);" + "dvec3 modf(dvec3, out dvec3);" + "dvec4 modf(dvec4, out dvec4);" + + "double min(double, double);" + "dvec2 min(dvec2, double);" + "dvec3 min(dvec3, double);" + "dvec4 min(dvec4, double);" + "dvec2 min(dvec2, dvec2);" + "dvec3 min(dvec3, dvec3);" + "dvec4 min(dvec4, dvec4);" + + "double max(double, double);" + "dvec2 max(dvec2 , double);" + "dvec3 max(dvec3 , double);" + "dvec4 max(dvec4 , double);" + "dvec2 max(dvec2 , dvec2);" + "dvec3 max(dvec3 , dvec3);" + "dvec4 max(dvec4 , dvec4);" + + "double clamp(double, double, double);" + "dvec2 clamp(dvec2 , double, double);" + "dvec3 clamp(dvec3 , double, double);" + "dvec4 clamp(dvec4 , double, double);" + "dvec2 clamp(dvec2 , dvec2 , dvec2);" + "dvec3 clamp(dvec3 , dvec3 , dvec3);" + "dvec4 clamp(dvec4 , dvec4 , dvec4);" + + "double mix(double, double, double);" + "dvec2 mix(dvec2, dvec2, double);" + "dvec3 mix(dvec3, dvec3, double);" + "dvec4 mix(dvec4, dvec4, double);" + "dvec2 mix(dvec2, dvec2, dvec2);" + "dvec3 mix(dvec3, dvec3, dvec3);" + "dvec4 mix(dvec4, dvec4, dvec4);" + "double mix(double, double, bool);" + "dvec2 mix(dvec2, dvec2, bvec2);" + "dvec3 mix(dvec3, dvec3, bvec3);" + "dvec4 mix(dvec4, dvec4, bvec4);" + + "double step(double, double);" + "dvec2 step(dvec2 , dvec2);" + "dvec3 step(dvec3 , dvec3);" + "dvec4 step(dvec4 , dvec4);" + "dvec2 step(double, dvec2);" + "dvec3 step(double, dvec3);" + "dvec4 step(double, dvec4);" + + "double smoothstep(double, double, double);" + "dvec2 smoothstep(dvec2 , dvec2 , dvec2);" + "dvec3 smoothstep(dvec3 , dvec3 , dvec3);" + "dvec4 smoothstep(dvec4 , dvec4 , dvec4);" + "dvec2 smoothstep(double, double, dvec2);" + "dvec3 smoothstep(double, double, dvec3);" + "dvec4 smoothstep(double, double, dvec4);" + + "bool isnan(double);" + "bvec2 isnan(dvec2);" + "bvec3 isnan(dvec3);" + "bvec4 isnan(dvec4);" + + "bool isinf(double);" + "bvec2 isinf(dvec2);" + "bvec3 isinf(dvec3);" + "bvec4 isinf(dvec4);" + + "double length(double);" + "double length(dvec2);" + "double length(dvec3);" + "double length(dvec4);" + + "double distance(double, double);" + "double distance(dvec2 , dvec2);" + "double distance(dvec3 , dvec3);" + "double distance(dvec4 , dvec4);" + + "double dot(double, double);" + "double dot(dvec2 , dvec2);" + "double dot(dvec3 , dvec3);" + "double dot(dvec4 , dvec4);" + + "dvec3 cross(dvec3, dvec3);" + + "double normalize(double);" + "dvec2 normalize(dvec2);" + "dvec3 normalize(dvec3);" + "dvec4 normalize(dvec4);" + + "double faceforward(double, double, double);" + "dvec2 faceforward(dvec2, dvec2, dvec2);" + "dvec3 faceforward(dvec3, dvec3, dvec3);" + "dvec4 faceforward(dvec4, dvec4, dvec4);" + + "double reflect(double, double);" + "dvec2 reflect(dvec2 , dvec2 );" + "dvec3 reflect(dvec3 , dvec3 );" + "dvec4 reflect(dvec4 , dvec4 );" + + "double refract(double, double, double);" + "dvec2 refract(dvec2 , dvec2 , double);" + "dvec3 refract(dvec3 , dvec3 , double);" + "dvec4 refract(dvec4 , dvec4 , double);" + + "dmat2 matrixCompMult(dmat2, dmat2);" + "dmat3 matrixCompMult(dmat3, dmat3);" + "dmat4 matrixCompMult(dmat4, dmat4);" + "dmat2x3 matrixCompMult(dmat2x3, dmat2x3);" + "dmat2x4 matrixCompMult(dmat2x4, dmat2x4);" + "dmat3x2 matrixCompMult(dmat3x2, dmat3x2);" + "dmat3x4 matrixCompMult(dmat3x4, dmat3x4);" + "dmat4x2 matrixCompMult(dmat4x2, dmat4x2);" + "dmat4x3 matrixCompMult(dmat4x3, dmat4x3);" + + "dmat2 outerProduct(dvec2, dvec2);" + "dmat3 outerProduct(dvec3, dvec3);" + "dmat4 outerProduct(dvec4, dvec4);" + "dmat2x3 outerProduct(dvec3, dvec2);" + "dmat3x2 outerProduct(dvec2, dvec3);" + "dmat2x4 outerProduct(dvec4, dvec2);" + "dmat4x2 outerProduct(dvec2, dvec4);" + "dmat3x4 outerProduct(dvec4, dvec3);" + "dmat4x3 outerProduct(dvec3, dvec4);" + + "dmat2 transpose(dmat2);" + "dmat3 transpose(dmat3);" + "dmat4 transpose(dmat4);" + "dmat2x3 transpose(dmat3x2);" + "dmat3x2 transpose(dmat2x3);" + "dmat2x4 transpose(dmat4x2);" + "dmat4x2 transpose(dmat2x4);" + "dmat3x4 transpose(dmat4x3);" + "dmat4x3 transpose(dmat3x4);" + + "double determinant(dmat2);" + "double determinant(dmat3);" + "double determinant(dmat4);" + + "dmat2 inverse(dmat2);" + "dmat3 inverse(dmat3);" + "dmat4 inverse(dmat4);" + + "bvec2 lessThan(dvec2, dvec2);" + "bvec3 lessThan(dvec3, dvec3);" + "bvec4 lessThan(dvec4, dvec4);" + + "bvec2 lessThanEqual(dvec2, dvec2);" + "bvec3 lessThanEqual(dvec3, dvec3);" + "bvec4 lessThanEqual(dvec4, dvec4);" + + "bvec2 greaterThan(dvec2, dvec2);" + "bvec3 greaterThan(dvec3, dvec3);" + "bvec4 greaterThan(dvec4, dvec4);" + + "bvec2 greaterThanEqual(dvec2, dvec2);" + "bvec3 greaterThanEqual(dvec3, dvec3);" + "bvec4 greaterThanEqual(dvec4, dvec4);" + + "bvec2 equal(dvec2, dvec2);" + "bvec3 equal(dvec3, dvec3);" + "bvec4 equal(dvec4, dvec4);" + + "bvec2 notEqual(dvec2, dvec2);" + "bvec3 notEqual(dvec3, dvec3);" + "bvec4 notEqual(dvec4, dvec4);" + + "\n"); + } + + if (profile != EEsProfile && version >= 450) { + commonBuiltins.append( + + "int64_t abs(int64_t);" + "i64vec2 abs(i64vec2);" + "i64vec3 abs(i64vec3);" + "i64vec4 abs(i64vec4);" + + "int64_t sign(int64_t);" + "i64vec2 sign(i64vec2);" + "i64vec3 sign(i64vec3);" + "i64vec4 sign(i64vec4);" + + "int64_t min(int64_t, int64_t);" + "i64vec2 min(i64vec2, int64_t);" + "i64vec3 min(i64vec3, int64_t);" + "i64vec4 min(i64vec4, int64_t);" + "i64vec2 min(i64vec2, i64vec2);" + "i64vec3 min(i64vec3, i64vec3);" + "i64vec4 min(i64vec4, i64vec4);" + "uint64_t min(uint64_t, uint64_t);" + "u64vec2 min(u64vec2, uint64_t);" + "u64vec3 min(u64vec3, uint64_t);" + "u64vec4 min(u64vec4, uint64_t);" + "u64vec2 min(u64vec2, u64vec2);" + "u64vec3 min(u64vec3, u64vec3);" + "u64vec4 min(u64vec4, u64vec4);" + + "int64_t max(int64_t, int64_t);" + "i64vec2 max(i64vec2, int64_t);" + "i64vec3 max(i64vec3, int64_t);" + "i64vec4 max(i64vec4, int64_t);" + "i64vec2 max(i64vec2, i64vec2);" + "i64vec3 max(i64vec3, i64vec3);" + "i64vec4 max(i64vec4, i64vec4);" + "uint64_t max(uint64_t, uint64_t);" + "u64vec2 max(u64vec2, uint64_t);" + "u64vec3 max(u64vec3, uint64_t);" + "u64vec4 max(u64vec4, uint64_t);" + "u64vec2 max(u64vec2, u64vec2);" + "u64vec3 max(u64vec3, u64vec3);" + "u64vec4 max(u64vec4, u64vec4);" + + "int64_t clamp(int64_t, int64_t, int64_t);" + "i64vec2 clamp(i64vec2, int64_t, int64_t);" + "i64vec3 clamp(i64vec3, int64_t, int64_t);" + "i64vec4 clamp(i64vec4, int64_t, int64_t);" + "i64vec2 clamp(i64vec2, i64vec2, i64vec2);" + "i64vec3 clamp(i64vec3, i64vec3, i64vec3);" + "i64vec4 clamp(i64vec4, i64vec4, i64vec4);" + "uint64_t clamp(uint64_t, uint64_t, uint64_t);" + "u64vec2 clamp(u64vec2, uint64_t, uint64_t);" + "u64vec3 clamp(u64vec3, uint64_t, uint64_t);" + "u64vec4 clamp(u64vec4, uint64_t, uint64_t);" + "u64vec2 clamp(u64vec2, u64vec2, u64vec2);" + "u64vec3 clamp(u64vec3, u64vec3, u64vec3);" + "u64vec4 clamp(u64vec4, u64vec4, u64vec4);" + + "int64_t mix(int64_t, int64_t, bool);" + "i64vec2 mix(i64vec2, i64vec2, bvec2);" + "i64vec3 mix(i64vec3, i64vec3, bvec3);" + "i64vec4 mix(i64vec4, i64vec4, bvec4);" + "uint64_t mix(uint64_t, uint64_t, bool);" + "u64vec2 mix(u64vec2, u64vec2, bvec2);" + "u64vec3 mix(u64vec3, u64vec3, bvec3);" + "u64vec4 mix(u64vec4, u64vec4, bvec4);" + + "int64_t doubleBitsToInt64(double);" + "i64vec2 doubleBitsToInt64(dvec2);" + "i64vec3 doubleBitsToInt64(dvec3);" + "i64vec4 doubleBitsToInt64(dvec4);" + + "uint64_t doubleBitsToUint64(double);" + "u64vec2 doubleBitsToUint64(dvec2);" + "u64vec3 doubleBitsToUint64(dvec3);" + "u64vec4 doubleBitsToUint64(dvec4);" + + "double int64BitsToDouble(int64_t);" + "dvec2 int64BitsToDouble(i64vec2);" + "dvec3 int64BitsToDouble(i64vec3);" + "dvec4 int64BitsToDouble(i64vec4);" + + "double uint64BitsToDouble(uint64_t);" + "dvec2 uint64BitsToDouble(u64vec2);" + "dvec3 uint64BitsToDouble(u64vec3);" + "dvec4 uint64BitsToDouble(u64vec4);" + + "int64_t packInt2x32(ivec2);" + "uint64_t packUint2x32(uvec2);" + "ivec2 unpackInt2x32(int64_t);" + "uvec2 unpackUint2x32(uint64_t);" + + "bvec2 lessThan(i64vec2, i64vec2);" + "bvec3 lessThan(i64vec3, i64vec3);" + "bvec4 lessThan(i64vec4, i64vec4);" + "bvec2 lessThan(u64vec2, u64vec2);" + "bvec3 lessThan(u64vec3, u64vec3);" + "bvec4 lessThan(u64vec4, u64vec4);" + + "bvec2 lessThanEqual(i64vec2, i64vec2);" + "bvec3 lessThanEqual(i64vec3, i64vec3);" + "bvec4 lessThanEqual(i64vec4, i64vec4);" + "bvec2 lessThanEqual(u64vec2, u64vec2);" + "bvec3 lessThanEqual(u64vec3, u64vec3);" + "bvec4 lessThanEqual(u64vec4, u64vec4);" + + "bvec2 greaterThan(i64vec2, i64vec2);" + "bvec3 greaterThan(i64vec3, i64vec3);" + "bvec4 greaterThan(i64vec4, i64vec4);" + "bvec2 greaterThan(u64vec2, u64vec2);" + "bvec3 greaterThan(u64vec3, u64vec3);" + "bvec4 greaterThan(u64vec4, u64vec4);" + + "bvec2 greaterThanEqual(i64vec2, i64vec2);" + "bvec3 greaterThanEqual(i64vec3, i64vec3);" + "bvec4 greaterThanEqual(i64vec4, i64vec4);" + "bvec2 greaterThanEqual(u64vec2, u64vec2);" + "bvec3 greaterThanEqual(u64vec3, u64vec3);" + "bvec4 greaterThanEqual(u64vec4, u64vec4);" + + "bvec2 equal(i64vec2, i64vec2);" + "bvec3 equal(i64vec3, i64vec3);" + "bvec4 equal(i64vec4, i64vec4);" + "bvec2 equal(u64vec2, u64vec2);" + "bvec3 equal(u64vec3, u64vec3);" + "bvec4 equal(u64vec4, u64vec4);" + + "bvec2 notEqual(i64vec2, i64vec2);" + "bvec3 notEqual(i64vec3, i64vec3);" + "bvec4 notEqual(i64vec4, i64vec4);" + "bvec2 notEqual(u64vec2, u64vec2);" + "bvec3 notEqual(u64vec3, u64vec3);" + "bvec4 notEqual(u64vec4, u64vec4);" + + "int64_t findLSB(int64_t);" + "i64vec2 findLSB(i64vec2);" + "i64vec3 findLSB(i64vec3);" + "i64vec4 findLSB(i64vec4);" + + "int64_t findLSB(uint64_t);" + "i64vec2 findLSB(u64vec2);" + "i64vec3 findLSB(u64vec3);" + "i64vec4 findLSB(u64vec4);" + + "int64_t findMSB(int64_t);" + "i64vec2 findMSB(i64vec2);" + "i64vec3 findMSB(i64vec3);" + "i64vec4 findMSB(i64vec4);" + + "int64_t findMSB(uint64_t);" + "i64vec2 findMSB(u64vec2);" + "i64vec3 findMSB(u64vec3);" + "i64vec4 findMSB(u64vec4);" + + "\n" + ); + } + + // GL_AMD_shader_trinary_minmax + if (profile != EEsProfile && version >= 430) { + commonBuiltins.append( + "float min3(float, float, float);" + "vec2 min3(vec2, vec2, vec2);" + "vec3 min3(vec3, vec3, vec3);" + "vec4 min3(vec4, vec4, vec4);" + + "int min3(int, int, int);" + "ivec2 min3(ivec2, ivec2, ivec2);" + "ivec3 min3(ivec3, ivec3, ivec3);" + "ivec4 min3(ivec4, ivec4, ivec4);" + + "uint min3(uint, uint, uint);" + "uvec2 min3(uvec2, uvec2, uvec2);" + "uvec3 min3(uvec3, uvec3, uvec3);" + "uvec4 min3(uvec4, uvec4, uvec4);" + + "float max3(float, float, float);" + "vec2 max3(vec2, vec2, vec2);" + "vec3 max3(vec3, vec3, vec3);" + "vec4 max3(vec4, vec4, vec4);" + + "int max3(int, int, int);" + "ivec2 max3(ivec2, ivec2, ivec2);" + "ivec3 max3(ivec3, ivec3, ivec3);" + "ivec4 max3(ivec4, ivec4, ivec4);" + + "uint max3(uint, uint, uint);" + "uvec2 max3(uvec2, uvec2, uvec2);" + "uvec3 max3(uvec3, uvec3, uvec3);" + "uvec4 max3(uvec4, uvec4, uvec4);" + + "float mid3(float, float, float);" + "vec2 mid3(vec2, vec2, vec2);" + "vec3 mid3(vec3, vec3, vec3);" + "vec4 mid3(vec4, vec4, vec4);" + + "int mid3(int, int, int);" + "ivec2 mid3(ivec2, ivec2, ivec2);" + "ivec3 mid3(ivec3, ivec3, ivec3);" + "ivec4 mid3(ivec4, ivec4, ivec4);" + + "uint mid3(uint, uint, uint);" + "uvec2 mid3(uvec2, uvec2, uvec2);" + "uvec3 mid3(uvec3, uvec3, uvec3);" + "uvec4 mid3(uvec4, uvec4, uvec4);" + + "float16_t min3(float16_t, float16_t, float16_t);" + "f16vec2 min3(f16vec2, f16vec2, f16vec2);" + "f16vec3 min3(f16vec3, f16vec3, f16vec3);" + "f16vec4 min3(f16vec4, f16vec4, f16vec4);" + + "float16_t max3(float16_t, float16_t, float16_t);" + "f16vec2 max3(f16vec2, f16vec2, f16vec2);" + "f16vec3 max3(f16vec3, f16vec3, f16vec3);" + "f16vec4 max3(f16vec4, f16vec4, f16vec4);" + + "float16_t mid3(float16_t, float16_t, float16_t);" + "f16vec2 mid3(f16vec2, f16vec2, f16vec2);" + "f16vec3 mid3(f16vec3, f16vec3, f16vec3);" + "f16vec4 mid3(f16vec4, f16vec4, f16vec4);" + + "int16_t min3(int16_t, int16_t, int16_t);" + "i16vec2 min3(i16vec2, i16vec2, i16vec2);" + "i16vec3 min3(i16vec3, i16vec3, i16vec3);" + "i16vec4 min3(i16vec4, i16vec4, i16vec4);" + + "int16_t max3(int16_t, int16_t, int16_t);" + "i16vec2 max3(i16vec2, i16vec2, i16vec2);" + "i16vec3 max3(i16vec3, i16vec3, i16vec3);" + "i16vec4 max3(i16vec4, i16vec4, i16vec4);" + + "int16_t mid3(int16_t, int16_t, int16_t);" + "i16vec2 mid3(i16vec2, i16vec2, i16vec2);" + "i16vec3 mid3(i16vec3, i16vec3, i16vec3);" + "i16vec4 mid3(i16vec4, i16vec4, i16vec4);" + + "uint16_t min3(uint16_t, uint16_t, uint16_t);" + "u16vec2 min3(u16vec2, u16vec2, u16vec2);" + "u16vec3 min3(u16vec3, u16vec3, u16vec3);" + "u16vec4 min3(u16vec4, u16vec4, u16vec4);" + + "uint16_t max3(uint16_t, uint16_t, uint16_t);" + "u16vec2 max3(u16vec2, u16vec2, u16vec2);" + "u16vec3 max3(u16vec3, u16vec3, u16vec3);" + "u16vec4 max3(u16vec4, u16vec4, u16vec4);" + + "uint16_t mid3(uint16_t, uint16_t, uint16_t);" + "u16vec2 mid3(u16vec2, u16vec2, u16vec2);" + "u16vec3 mid3(u16vec3, u16vec3, u16vec3);" + "u16vec4 mid3(u16vec4, u16vec4, u16vec4);" + + "\n" + ); + } + + if ((profile == EEsProfile && version >= 310) || + (profile != EEsProfile && version >= 430)) { + commonBuiltins.append( + "uint atomicAdd(coherent volatile inout uint, uint, int, int, int);" + " int atomicAdd(coherent volatile inout int, int, int, int, int);" + + "uint atomicMin(coherent volatile inout uint, uint, int, int, int);" + " int atomicMin(coherent volatile inout int, int, int, int, int);" + + "uint atomicMax(coherent volatile inout uint, uint, int, int, int);" + " int atomicMax(coherent volatile inout int, int, int, int, int);" + + "uint atomicAnd(coherent volatile inout uint, uint, int, int, int);" + " int atomicAnd(coherent volatile inout int, int, int, int, int);" + + "uint atomicOr (coherent volatile inout uint, uint, int, int, int);" + " int atomicOr (coherent volatile inout int, int, int, int, int);" + + "uint atomicXor(coherent volatile inout uint, uint, int, int, int);" + " int atomicXor(coherent volatile inout int, int, int, int, int);" + + "uint atomicExchange(coherent volatile inout uint, uint, int, int, int);" + " int atomicExchange(coherent volatile inout int, int, int, int, int);" + + "uint atomicCompSwap(coherent volatile inout uint, uint, uint, int, int, int, int, int);" + " int atomicCompSwap(coherent volatile inout int, int, int, int, int, int, int, int);" + + "uint atomicLoad(coherent volatile in uint, int, int, int);" + " int atomicLoad(coherent volatile in int, int, int, int);" + + "void atomicStore(coherent volatile out uint, uint, int, int, int);" + "void atomicStore(coherent volatile out int, int, int, int, int);" + + "\n"); + } + + if (profile != EEsProfile && version >= 440) { + commonBuiltins.append( + "uint64_t atomicMin(coherent volatile inout uint64_t, uint64_t);" + " int64_t atomicMin(coherent volatile inout int64_t, int64_t);" + "uint64_t atomicMin(coherent volatile inout uint64_t, uint64_t, int, int, int);" + " int64_t atomicMin(coherent volatile inout int64_t, int64_t, int, int, int);" + + "uint64_t atomicMax(coherent volatile inout uint64_t, uint64_t);" + " int64_t atomicMax(coherent volatile inout int64_t, int64_t);" + "uint64_t atomicMax(coherent volatile inout uint64_t, uint64_t, int, int, int);" + " int64_t atomicMax(coherent volatile inout int64_t, int64_t, int, int, int);" + + "uint64_t atomicAnd(coherent volatile inout uint64_t, uint64_t);" + " int64_t atomicAnd(coherent volatile inout int64_t, int64_t);" + "uint64_t atomicAnd(coherent volatile inout uint64_t, uint64_t, int, int, int);" + " int64_t atomicAnd(coherent volatile inout int64_t, int64_t, int, int, int);" + + "uint64_t atomicOr (coherent volatile inout uint64_t, uint64_t);" + " int64_t atomicOr (coherent volatile inout int64_t, int64_t);" + "uint64_t atomicOr (coherent volatile inout uint64_t, uint64_t, int, int, int);" + " int64_t atomicOr (coherent volatile inout int64_t, int64_t, int, int, int);" + + "uint64_t atomicXor(coherent volatile inout uint64_t, uint64_t);" + " int64_t atomicXor(coherent volatile inout int64_t, int64_t);" + "uint64_t atomicXor(coherent volatile inout uint64_t, uint64_t, int, int, int);" + " int64_t atomicXor(coherent volatile inout int64_t, int64_t, int, int, int);" + + "uint64_t atomicAdd(coherent volatile inout uint64_t, uint64_t);" + " int64_t atomicAdd(coherent volatile inout int64_t, int64_t);" + "uint64_t atomicAdd(coherent volatile inout uint64_t, uint64_t, int, int, int);" + " int64_t atomicAdd(coherent volatile inout int64_t, int64_t, int, int, int);" + + "uint64_t atomicExchange(coherent volatile inout uint64_t, uint64_t);" + " int64_t atomicExchange(coherent volatile inout int64_t, int64_t);" + "uint64_t atomicExchange(coherent volatile inout uint64_t, uint64_t, int, int, int);" + " int64_t atomicExchange(coherent volatile inout int64_t, int64_t, int, int, int);" + + "uint64_t atomicCompSwap(coherent volatile inout uint64_t, uint64_t, uint64_t);" + " int64_t atomicCompSwap(coherent volatile inout int64_t, int64_t, int64_t);" + "uint64_t atomicCompSwap(coherent volatile inout uint64_t, uint64_t, uint64_t, int, int, int, int, int);" + " int64_t atomicCompSwap(coherent volatile inout int64_t, int64_t, int64_t, int, int, int, int, int);" + + "uint64_t atomicLoad(coherent volatile in uint64_t, int, int, int);" + " int64_t atomicLoad(coherent volatile in int64_t, int, int, int);" + + "void atomicStore(coherent volatile out uint64_t, uint64_t, int, int, int);" + "void atomicStore(coherent volatile out int64_t, int64_t, int, int, int);" + "\n"); + } +#endif + + if ((profile == EEsProfile && version >= 300) || + (profile != EEsProfile && version >= 150)) { // GL_ARB_shader_bit_encoding + commonBuiltins.append( + "int floatBitsToInt(highp float value);" + "ivec2 floatBitsToInt(highp vec2 value);" + "ivec3 floatBitsToInt(highp vec3 value);" + "ivec4 floatBitsToInt(highp vec4 value);" + + "uint floatBitsToUint(highp float value);" + "uvec2 floatBitsToUint(highp vec2 value);" + "uvec3 floatBitsToUint(highp vec3 value);" + "uvec4 floatBitsToUint(highp vec4 value);" + + "float intBitsToFloat(highp int value);" + "vec2 intBitsToFloat(highp ivec2 value);" + "vec3 intBitsToFloat(highp ivec3 value);" + "vec4 intBitsToFloat(highp ivec4 value);" + + "float uintBitsToFloat(highp uint value);" + "vec2 uintBitsToFloat(highp uvec2 value);" + "vec3 uintBitsToFloat(highp uvec3 value);" + "vec4 uintBitsToFloat(highp uvec4 value);" + + "\n"); + } + +#ifndef GLSLANG_WEB + if ((profile != EEsProfile && version >= 400) || + (profile == EEsProfile && version >= 310)) { // GL_OES_gpu_shader5 + + commonBuiltins.append( + "float fma(float, float, float );" + "vec2 fma(vec2, vec2, vec2 );" + "vec3 fma(vec3, vec3, vec3 );" + "vec4 fma(vec4, vec4, vec4 );" + "\n"); + } + + if (profile != EEsProfile && version >= 150) { // ARB_gpu_shader_fp64 + commonBuiltins.append( + "double fma(double, double, double);" + "dvec2 fma(dvec2, dvec2, dvec2 );" + "dvec3 fma(dvec3, dvec3, dvec3 );" + "dvec4 fma(dvec4, dvec4, dvec4 );" + "\n"); + } + + if ((profile == EEsProfile && version >= 310) || + (profile != EEsProfile && version >= 400)) { + commonBuiltins.append( + "float frexp(highp float, out highp int);" + "vec2 frexp(highp vec2, out highp ivec2);" + "vec3 frexp(highp vec3, out highp ivec3);" + "vec4 frexp(highp vec4, out highp ivec4);" + + "float ldexp(highp float, highp int);" + "vec2 ldexp(highp vec2, highp ivec2);" + "vec3 ldexp(highp vec3, highp ivec3);" + "vec4 ldexp(highp vec4, highp ivec4);" + + "\n"); + } + + if (profile != EEsProfile && version >= 150) { // ARB_gpu_shader_fp64 + commonBuiltins.append( + "double frexp(double, out int);" + "dvec2 frexp( dvec2, out ivec2);" + "dvec3 frexp( dvec3, out ivec3);" + "dvec4 frexp( dvec4, out ivec4);" + + "double ldexp(double, int);" + "dvec2 ldexp( dvec2, ivec2);" + "dvec3 ldexp( dvec3, ivec3);" + "dvec4 ldexp( dvec4, ivec4);" + + "double packDouble2x32(uvec2);" + "uvec2 unpackDouble2x32(double);" + + "\n"); + } +#endif + + if ((profile == EEsProfile && version >= 300) || + (profile != EEsProfile && version >= 150)) { + commonBuiltins.append( + "highp uint packUnorm2x16(vec2);" + "vec2 unpackUnorm2x16(highp uint);" + "\n"); + } + + if ((profile == EEsProfile && version >= 300) || + (profile != EEsProfile && version >= 150)) { + commonBuiltins.append( + "highp uint packSnorm2x16(vec2);" + " vec2 unpackSnorm2x16(highp uint);" + "highp uint packHalf2x16(vec2);" + "\n"); + } + + if (profile == EEsProfile && version >= 300) { + commonBuiltins.append( + "mediump vec2 unpackHalf2x16(highp uint);" + "\n"); + } else if (profile != EEsProfile && version >= 150) { + commonBuiltins.append( + " vec2 unpackHalf2x16(highp uint);" + "\n"); + } + +#ifndef GLSLANG_WEB + if ((profile == EEsProfile && version >= 310) || + (profile != EEsProfile && version >= 150)) { + commonBuiltins.append( + "highp uint packSnorm4x8(vec4);" + "highp uint packUnorm4x8(vec4);" + "\n"); + } + + if (profile == EEsProfile && version >= 310) { + commonBuiltins.append( + "mediump vec4 unpackSnorm4x8(highp uint);" + "mediump vec4 unpackUnorm4x8(highp uint);" + "\n"); + } else if (profile != EEsProfile && version >= 150) { + commonBuiltins.append( + "vec4 unpackSnorm4x8(highp uint);" + "vec4 unpackUnorm4x8(highp uint);" + "\n"); + } +#endif + + // + // Matrix Functions. + // + commonBuiltins.append( + "mat2 matrixCompMult(mat2 x, mat2 y);" + "mat3 matrixCompMult(mat3 x, mat3 y);" + "mat4 matrixCompMult(mat4 x, mat4 y);" + + "\n"); + + // 120 is correct for both ES and desktop + if (version >= 120) { + commonBuiltins.append( + "mat2 outerProduct(vec2 c, vec2 r);" + "mat3 outerProduct(vec3 c, vec3 r);" + "mat4 outerProduct(vec4 c, vec4 r);" + "mat2x3 outerProduct(vec3 c, vec2 r);" + "mat3x2 outerProduct(vec2 c, vec3 r);" + "mat2x4 outerProduct(vec4 c, vec2 r);" + "mat4x2 outerProduct(vec2 c, vec4 r);" + "mat3x4 outerProduct(vec4 c, vec3 r);" + "mat4x3 outerProduct(vec3 c, vec4 r);" + + "mat2 transpose(mat2 m);" + "mat3 transpose(mat3 m);" + "mat4 transpose(mat4 m);" + "mat2x3 transpose(mat3x2 m);" + "mat3x2 transpose(mat2x3 m);" + "mat2x4 transpose(mat4x2 m);" + "mat4x2 transpose(mat2x4 m);" + "mat3x4 transpose(mat4x3 m);" + "mat4x3 transpose(mat3x4 m);" + + "mat2x3 matrixCompMult(mat2x3, mat2x3);" + "mat2x4 matrixCompMult(mat2x4, mat2x4);" + "mat3x2 matrixCompMult(mat3x2, mat3x2);" + "mat3x4 matrixCompMult(mat3x4, mat3x4);" + "mat4x2 matrixCompMult(mat4x2, mat4x2);" + "mat4x3 matrixCompMult(mat4x3, mat4x3);" + + "\n"); + + // 150 is correct for both ES and desktop + if (version >= 150) { + commonBuiltins.append( + "float determinant(mat2 m);" + "float determinant(mat3 m);" + "float determinant(mat4 m);" + + "mat2 inverse(mat2 m);" + "mat3 inverse(mat3 m);" + "mat4 inverse(mat4 m);" + + "\n"); + } + } + +#ifndef GLSLANG_WEB + // + // Original-style texture functions existing in all stages. + // (Per-stage functions below.) + // + if ((profile == EEsProfile && version == 100) || + profile == ECompatibilityProfile || + (profile == ECoreProfile && version < 420) || + profile == ENoProfile) { + if (spvVersion.spv == 0) { + commonBuiltins.append( + "vec4 texture2D(sampler2D, vec2);" + + "vec4 texture2DProj(sampler2D, vec3);" + "vec4 texture2DProj(sampler2D, vec4);" + + "vec4 texture3D(sampler3D, vec3);" // OES_texture_3D, but caught by keyword check + "vec4 texture3DProj(sampler3D, vec4);" // OES_texture_3D, but caught by keyword check + + "vec4 textureCube(samplerCube, vec3);" + + "\n"); + } + } + + if ( profile == ECompatibilityProfile || + (profile == ECoreProfile && version < 420) || + profile == ENoProfile) { + if (spvVersion.spv == 0) { + commonBuiltins.append( + "vec4 texture1D(sampler1D, float);" + + "vec4 texture1DProj(sampler1D, vec2);" + "vec4 texture1DProj(sampler1D, vec4);" + + "vec4 shadow1D(sampler1DShadow, vec3);" + "vec4 shadow2D(sampler2DShadow, vec3);" + "vec4 shadow1DProj(sampler1DShadow, vec4);" + "vec4 shadow2DProj(sampler2DShadow, vec4);" + + "vec4 texture2DRect(sampler2DRect, vec2);" // GL_ARB_texture_rectangle, caught by keyword check + "vec4 texture2DRectProj(sampler2DRect, vec3);" // GL_ARB_texture_rectangle, caught by keyword check + "vec4 texture2DRectProj(sampler2DRect, vec4);" // GL_ARB_texture_rectangle, caught by keyword check + "vec4 shadow2DRect(sampler2DRectShadow, vec3);" // GL_ARB_texture_rectangle, caught by keyword check + "vec4 shadow2DRectProj(sampler2DRectShadow, vec4);" // GL_ARB_texture_rectangle, caught by keyword check + + "\n"); + } + } + + if (profile == EEsProfile) { + if (spvVersion.spv == 0) { + if (version < 300) { + commonBuiltins.append( + "vec4 texture2D(samplerExternalOES, vec2 coord);" // GL_OES_EGL_image_external + "vec4 texture2DProj(samplerExternalOES, vec3);" // GL_OES_EGL_image_external + "vec4 texture2DProj(samplerExternalOES, vec4);" // GL_OES_EGL_image_external + "\n"); + } else { + commonBuiltins.append( + "highp ivec2 textureSize(samplerExternalOES, int lod);" // GL_OES_EGL_image_external_essl3 + "vec4 texture(samplerExternalOES, vec2);" // GL_OES_EGL_image_external_essl3 + "vec4 texture(samplerExternalOES, vec2, float bias);" // GL_OES_EGL_image_external_essl3 + "vec4 textureProj(samplerExternalOES, vec3);" // GL_OES_EGL_image_external_essl3 + "vec4 textureProj(samplerExternalOES, vec3, float bias);" // GL_OES_EGL_image_external_essl3 + "vec4 textureProj(samplerExternalOES, vec4);" // GL_OES_EGL_image_external_essl3 + "vec4 textureProj(samplerExternalOES, vec4, float bias);" // GL_OES_EGL_image_external_essl3 + "vec4 texelFetch(samplerExternalOES, ivec2, int lod);" // GL_OES_EGL_image_external_essl3 + "\n"); + } + commonBuiltins.append( + "highp ivec2 textureSize(__samplerExternal2DY2YEXT, int lod);" // GL_EXT_YUV_target + "vec4 texture(__samplerExternal2DY2YEXT, vec2);" // GL_EXT_YUV_target + "vec4 texture(__samplerExternal2DY2YEXT, vec2, float bias);" // GL_EXT_YUV_target + "vec4 textureProj(__samplerExternal2DY2YEXT, vec3);" // GL_EXT_YUV_target + "vec4 textureProj(__samplerExternal2DY2YEXT, vec3, float bias);" // GL_EXT_YUV_target + "vec4 textureProj(__samplerExternal2DY2YEXT, vec4);" // GL_EXT_YUV_target + "vec4 textureProj(__samplerExternal2DY2YEXT, vec4, float bias);" // GL_EXT_YUV_target + "vec4 texelFetch(__samplerExternal2DY2YEXT sampler, ivec2, int lod);" // GL_EXT_YUV_target + "\n"); + commonBuiltins.append( + "vec4 texture2DGradEXT(sampler2D, vec2, vec2, vec2);" // GL_EXT_shader_texture_lod + "vec4 texture2DProjGradEXT(sampler2D, vec3, vec2, vec2);" // GL_EXT_shader_texture_lod + "vec4 texture2DProjGradEXT(sampler2D, vec4, vec2, vec2);" // GL_EXT_shader_texture_lod + "vec4 textureCubeGradEXT(samplerCube, vec3, vec3, vec3);" // GL_EXT_shader_texture_lod + + "float shadow2DEXT(sampler2DShadow, vec3);" // GL_EXT_shadow_samplers + "float shadow2DProjEXT(sampler2DShadow, vec4);" // GL_EXT_shadow_samplers + + "\n"); + } + } + + // + // Noise functions. + // + if (spvVersion.spv == 0 && profile != EEsProfile) { + commonBuiltins.append( + "float noise1(float x);" + "float noise1(vec2 x);" + "float noise1(vec3 x);" + "float noise1(vec4 x);" + + "vec2 noise2(float x);" + "vec2 noise2(vec2 x);" + "vec2 noise2(vec3 x);" + "vec2 noise2(vec4 x);" + + "vec3 noise3(float x);" + "vec3 noise3(vec2 x);" + "vec3 noise3(vec3 x);" + "vec3 noise3(vec4 x);" + + "vec4 noise4(float x);" + "vec4 noise4(vec2 x);" + "vec4 noise4(vec3 x);" + "vec4 noise4(vec4 x);" + + "\n"); + } + + if (spvVersion.vulkan == 0) { + // + // Atomic counter functions. + // + if ((profile != EEsProfile && version >= 300) || + (profile == EEsProfile && version >= 310)) { + commonBuiltins.append( + "uint atomicCounterIncrement(atomic_uint);" + "uint atomicCounterDecrement(atomic_uint);" + "uint atomicCounter(atomic_uint);" + + "\n"); + } + if (profile != EEsProfile && version >= 460) { + commonBuiltins.append( + "uint atomicCounterAdd(atomic_uint, uint);" + "uint atomicCounterSubtract(atomic_uint, uint);" + "uint atomicCounterMin(atomic_uint, uint);" + "uint atomicCounterMax(atomic_uint, uint);" + "uint atomicCounterAnd(atomic_uint, uint);" + "uint atomicCounterOr(atomic_uint, uint);" + "uint atomicCounterXor(atomic_uint, uint);" + "uint atomicCounterExchange(atomic_uint, uint);" + "uint atomicCounterCompSwap(atomic_uint, uint, uint);" + + "\n"); + } + } + + // Bitfield + if ((profile == EEsProfile && version >= 310) || + (profile != EEsProfile && version >= 400)) { + commonBuiltins.append( + " int bitfieldExtract( int, int, int);" + "ivec2 bitfieldExtract(ivec2, int, int);" + "ivec3 bitfieldExtract(ivec3, int, int);" + "ivec4 bitfieldExtract(ivec4, int, int);" + + " uint bitfieldExtract( uint, int, int);" + "uvec2 bitfieldExtract(uvec2, int, int);" + "uvec3 bitfieldExtract(uvec3, int, int);" + "uvec4 bitfieldExtract(uvec4, int, int);" + + " int bitfieldInsert( int base, int, int, int);" + "ivec2 bitfieldInsert(ivec2 base, ivec2, int, int);" + "ivec3 bitfieldInsert(ivec3 base, ivec3, int, int);" + "ivec4 bitfieldInsert(ivec4 base, ivec4, int, int);" + + " uint bitfieldInsert( uint base, uint, int, int);" + "uvec2 bitfieldInsert(uvec2 base, uvec2, int, int);" + "uvec3 bitfieldInsert(uvec3 base, uvec3, int, int);" + "uvec4 bitfieldInsert(uvec4 base, uvec4, int, int);" + + "\n"); + } + + if (profile != EEsProfile && version >= 400) { + commonBuiltins.append( + " int findLSB( int);" + "ivec2 findLSB(ivec2);" + "ivec3 findLSB(ivec3);" + "ivec4 findLSB(ivec4);" + + " int findLSB( uint);" + "ivec2 findLSB(uvec2);" + "ivec3 findLSB(uvec3);" + "ivec4 findLSB(uvec4);" + + "\n"); + } else if (profile == EEsProfile && version >= 310) { + commonBuiltins.append( + "lowp int findLSB( int);" + "lowp ivec2 findLSB(ivec2);" + "lowp ivec3 findLSB(ivec3);" + "lowp ivec4 findLSB(ivec4);" + + "lowp int findLSB( uint);" + "lowp ivec2 findLSB(uvec2);" + "lowp ivec3 findLSB(uvec3);" + "lowp ivec4 findLSB(uvec4);" + + "\n"); + } + + if (profile != EEsProfile && version >= 400) { + commonBuiltins.append( + " int bitCount( int);" + "ivec2 bitCount(ivec2);" + "ivec3 bitCount(ivec3);" + "ivec4 bitCount(ivec4);" + + " int bitCount( uint);" + "ivec2 bitCount(uvec2);" + "ivec3 bitCount(uvec3);" + "ivec4 bitCount(uvec4);" + + " int findMSB(highp int);" + "ivec2 findMSB(highp ivec2);" + "ivec3 findMSB(highp ivec3);" + "ivec4 findMSB(highp ivec4);" + + " int findMSB(highp uint);" + "ivec2 findMSB(highp uvec2);" + "ivec3 findMSB(highp uvec3);" + "ivec4 findMSB(highp uvec4);" + + "\n"); + } + + if ((profile == EEsProfile && version >= 310) || + (profile != EEsProfile && version >= 400)) { + commonBuiltins.append( + " uint uaddCarry(highp uint, highp uint, out lowp uint carry);" + "uvec2 uaddCarry(highp uvec2, highp uvec2, out lowp uvec2 carry);" + "uvec3 uaddCarry(highp uvec3, highp uvec3, out lowp uvec3 carry);" + "uvec4 uaddCarry(highp uvec4, highp uvec4, out lowp uvec4 carry);" + + " uint usubBorrow(highp uint, highp uint, out lowp uint borrow);" + "uvec2 usubBorrow(highp uvec2, highp uvec2, out lowp uvec2 borrow);" + "uvec3 usubBorrow(highp uvec3, highp uvec3, out lowp uvec3 borrow);" + "uvec4 usubBorrow(highp uvec4, highp uvec4, out lowp uvec4 borrow);" + + "void umulExtended(highp uint, highp uint, out highp uint, out highp uint lsb);" + "void umulExtended(highp uvec2, highp uvec2, out highp uvec2, out highp uvec2 lsb);" + "void umulExtended(highp uvec3, highp uvec3, out highp uvec3, out highp uvec3 lsb);" + "void umulExtended(highp uvec4, highp uvec4, out highp uvec4, out highp uvec4 lsb);" + + "void imulExtended(highp int, highp int, out highp int, out highp int lsb);" + "void imulExtended(highp ivec2, highp ivec2, out highp ivec2, out highp ivec2 lsb);" + "void imulExtended(highp ivec3, highp ivec3, out highp ivec3, out highp ivec3 lsb);" + "void imulExtended(highp ivec4, highp ivec4, out highp ivec4, out highp ivec4 lsb);" + + " int bitfieldReverse(highp int);" + "ivec2 bitfieldReverse(highp ivec2);" + "ivec3 bitfieldReverse(highp ivec3);" + "ivec4 bitfieldReverse(highp ivec4);" + + " uint bitfieldReverse(highp uint);" + "uvec2 bitfieldReverse(highp uvec2);" + "uvec3 bitfieldReverse(highp uvec3);" + "uvec4 bitfieldReverse(highp uvec4);" + + "\n"); + } + + if (profile == EEsProfile && version >= 310) { + commonBuiltins.append( + "lowp int bitCount( int);" + "lowp ivec2 bitCount(ivec2);" + "lowp ivec3 bitCount(ivec3);" + "lowp ivec4 bitCount(ivec4);" + + "lowp int bitCount( uint);" + "lowp ivec2 bitCount(uvec2);" + "lowp ivec3 bitCount(uvec3);" + "lowp ivec4 bitCount(uvec4);" + + "lowp int findMSB(highp int);" + "lowp ivec2 findMSB(highp ivec2);" + "lowp ivec3 findMSB(highp ivec3);" + "lowp ivec4 findMSB(highp ivec4);" + + "lowp int findMSB(highp uint);" + "lowp ivec2 findMSB(highp uvec2);" + "lowp ivec3 findMSB(highp uvec3);" + "lowp ivec4 findMSB(highp uvec4);" + + "\n"); + } + + // GL_ARB_shader_ballot + if (profile != EEsProfile && version >= 450) { + commonBuiltins.append( + "uint64_t ballotARB(bool);" + + "float readInvocationARB(float, uint);" + "vec2 readInvocationARB(vec2, uint);" + "vec3 readInvocationARB(vec3, uint);" + "vec4 readInvocationARB(vec4, uint);" + + "int readInvocationARB(int, uint);" + "ivec2 readInvocationARB(ivec2, uint);" + "ivec3 readInvocationARB(ivec3, uint);" + "ivec4 readInvocationARB(ivec4, uint);" + + "uint readInvocationARB(uint, uint);" + "uvec2 readInvocationARB(uvec2, uint);" + "uvec3 readInvocationARB(uvec3, uint);" + "uvec4 readInvocationARB(uvec4, uint);" + + "float readFirstInvocationARB(float);" + "vec2 readFirstInvocationARB(vec2);" + "vec3 readFirstInvocationARB(vec3);" + "vec4 readFirstInvocationARB(vec4);" + + "int readFirstInvocationARB(int);" + "ivec2 readFirstInvocationARB(ivec2);" + "ivec3 readFirstInvocationARB(ivec3);" + "ivec4 readFirstInvocationARB(ivec4);" + + "uint readFirstInvocationARB(uint);" + "uvec2 readFirstInvocationARB(uvec2);" + "uvec3 readFirstInvocationARB(uvec3);" + "uvec4 readFirstInvocationARB(uvec4);" + + "\n"); + } + + // GL_ARB_shader_group_vote + if (profile != EEsProfile && version >= 430) { + commonBuiltins.append( + "bool anyInvocationARB(bool);" + "bool allInvocationsARB(bool);" + "bool allInvocationsEqualARB(bool);" + + "\n"); + } + + // GL_KHR_shader_subgroup + if ((profile == EEsProfile && version >= 310) || + (profile != EEsProfile && version >= 140)) { + commonBuiltins.append( + "void subgroupBarrier();" + "void subgroupMemoryBarrier();" + "void subgroupMemoryBarrierBuffer();" + "void subgroupMemoryBarrierImage();" + "bool subgroupElect();" + + "bool subgroupAll(bool);\n" + "bool subgroupAny(bool);\n" + "uvec4 subgroupBallot(bool);\n" + "bool subgroupInverseBallot(uvec4);\n" + "bool subgroupBallotBitExtract(uvec4, uint);\n" + "uint subgroupBallotBitCount(uvec4);\n" + "uint subgroupBallotInclusiveBitCount(uvec4);\n" + "uint subgroupBallotExclusiveBitCount(uvec4);\n" + "uint subgroupBallotFindLSB(uvec4);\n" + "uint subgroupBallotFindMSB(uvec4);\n" + ); + + // Generate all flavors of subgroup ops. + static const char *subgroupOps[] = + { + "bool subgroupAllEqual(%s);\n", + "%s subgroupBroadcast(%s, uint);\n", + "%s subgroupBroadcastFirst(%s);\n", + "%s subgroupShuffle(%s, uint);\n", + "%s subgroupShuffleXor(%s, uint);\n", + "%s subgroupShuffleUp(%s, uint delta);\n", + "%s subgroupShuffleDown(%s, uint delta);\n", + "%s subgroupAdd(%s);\n", + "%s subgroupMul(%s);\n", + "%s subgroupMin(%s);\n", + "%s subgroupMax(%s);\n", + "%s subgroupAnd(%s);\n", + "%s subgroupOr(%s);\n", + "%s subgroupXor(%s);\n", + "%s subgroupInclusiveAdd(%s);\n", + "%s subgroupInclusiveMul(%s);\n", + "%s subgroupInclusiveMin(%s);\n", + "%s subgroupInclusiveMax(%s);\n", + "%s subgroupInclusiveAnd(%s);\n", + "%s subgroupInclusiveOr(%s);\n", + "%s subgroupInclusiveXor(%s);\n", + "%s subgroupExclusiveAdd(%s);\n", + "%s subgroupExclusiveMul(%s);\n", + "%s subgroupExclusiveMin(%s);\n", + "%s subgroupExclusiveMax(%s);\n", + "%s subgroupExclusiveAnd(%s);\n", + "%s subgroupExclusiveOr(%s);\n", + "%s subgroupExclusiveXor(%s);\n", + "%s subgroupClusteredAdd(%s, uint);\n", + "%s subgroupClusteredMul(%s, uint);\n", + "%s subgroupClusteredMin(%s, uint);\n", + "%s subgroupClusteredMax(%s, uint);\n", + "%s subgroupClusteredAnd(%s, uint);\n", + "%s subgroupClusteredOr(%s, uint);\n", + "%s subgroupClusteredXor(%s, uint);\n", + "%s subgroupQuadBroadcast(%s, uint);\n", + "%s subgroupQuadSwapHorizontal(%s);\n", + "%s subgroupQuadSwapVertical(%s);\n", + "%s subgroupQuadSwapDiagonal(%s);\n", + "uvec4 subgroupPartitionNV(%s);\n", + "%s subgroupPartitionedAddNV(%s, uvec4 ballot);\n", + "%s subgroupPartitionedMulNV(%s, uvec4 ballot);\n", + "%s subgroupPartitionedMinNV(%s, uvec4 ballot);\n", + "%s subgroupPartitionedMaxNV(%s, uvec4 ballot);\n", + "%s subgroupPartitionedAndNV(%s, uvec4 ballot);\n", + "%s subgroupPartitionedOrNV(%s, uvec4 ballot);\n", + "%s subgroupPartitionedXorNV(%s, uvec4 ballot);\n", + "%s subgroupPartitionedInclusiveAddNV(%s, uvec4 ballot);\n", + "%s subgroupPartitionedInclusiveMulNV(%s, uvec4 ballot);\n", + "%s subgroupPartitionedInclusiveMinNV(%s, uvec4 ballot);\n", + "%s subgroupPartitionedInclusiveMaxNV(%s, uvec4 ballot);\n", + "%s subgroupPartitionedInclusiveAndNV(%s, uvec4 ballot);\n", + "%s subgroupPartitionedInclusiveOrNV(%s, uvec4 ballot);\n", + "%s subgroupPartitionedInclusiveXorNV(%s, uvec4 ballot);\n", + "%s subgroupPartitionedExclusiveAddNV(%s, uvec4 ballot);\n", + "%s subgroupPartitionedExclusiveMulNV(%s, uvec4 ballot);\n", + "%s subgroupPartitionedExclusiveMinNV(%s, uvec4 ballot);\n", + "%s subgroupPartitionedExclusiveMaxNV(%s, uvec4 ballot);\n", + "%s subgroupPartitionedExclusiveAndNV(%s, uvec4 ballot);\n", + "%s subgroupPartitionedExclusiveOrNV(%s, uvec4 ballot);\n", + "%s subgroupPartitionedExclusiveXorNV(%s, uvec4 ballot);\n", + }; + + static const char *floatTypes[] = { + "float", "vec2", "vec3", "vec4", + "float16_t", "f16vec2", "f16vec3", "f16vec4", + }; + static const char *doubleTypes[] = { + "double", "dvec2", "dvec3", "dvec4", + }; + static const char *intTypes[] = { + "int8_t", "i8vec2", "i8vec3", "i8vec4", + "int16_t", "i16vec2", "i16vec3", "i16vec4", + "int", "ivec2", "ivec3", "ivec4", + "int64_t", "i64vec2", "i64vec3", "i64vec4", + "uint8_t", "u8vec2", "u8vec3", "u8vec4", + "uint16_t", "u16vec2", "u16vec3", "u16vec4", + "uint", "uvec2", "uvec3", "uvec4", + "uint64_t", "u64vec2", "u64vec3", "u64vec4", + }; + static const char *boolTypes[] = { + "bool", "bvec2", "bvec3", "bvec4", + }; + + for (size_t i = 0; i < sizeof(subgroupOps)/sizeof(subgroupOps[0]); ++i) { + const char *op = subgroupOps[i]; + + // Logical operations don't support float + bool logicalOp = strstr(op, "Or") || strstr(op, "And") || + (strstr(op, "Xor") && !strstr(op, "ShuffleXor")); + // Math operations don't support bool + bool mathOp = strstr(op, "Add") || strstr(op, "Mul") || strstr(op, "Min") || strstr(op, "Max"); + + const int bufSize = 256; + char buf[bufSize]; + + if (!logicalOp) { + for (size_t j = 0; j < sizeof(floatTypes)/sizeof(floatTypes[0]); ++j) { + snprintf(buf, bufSize, op, floatTypes[j], floatTypes[j]); + commonBuiltins.append(buf); + } + if (profile != EEsProfile && version >= 400) { + for (size_t j = 0; j < sizeof(doubleTypes)/sizeof(doubleTypes[0]); ++j) { + snprintf(buf, bufSize, op, doubleTypes[j], doubleTypes[j]); + commonBuiltins.append(buf); + } + } + } + if (!mathOp) { + for (size_t j = 0; j < sizeof(boolTypes)/sizeof(boolTypes[0]); ++j) { + snprintf(buf, bufSize, op, boolTypes[j], boolTypes[j]); + commonBuiltins.append(buf); + } + } + for (size_t j = 0; j < sizeof(intTypes)/sizeof(intTypes[0]); ++j) { + snprintf(buf, bufSize, op, intTypes[j], intTypes[j]); + commonBuiltins.append(buf); + } + } + + stageBuiltins[EShLangCompute].append( + "void subgroupMemoryBarrierShared();" + + "\n" + ); + stageBuiltins[EShLangMeshNV].append( + "void subgroupMemoryBarrierShared();" + "\n" + ); + stageBuiltins[EShLangTaskNV].append( + "void subgroupMemoryBarrierShared();" + "\n" + ); + } + + if (profile != EEsProfile && version >= 460) { + commonBuiltins.append( + "bool anyInvocation(bool);" + "bool allInvocations(bool);" + "bool allInvocationsEqual(bool);" + + "\n"); + } + + // GL_AMD_shader_ballot + if (profile != EEsProfile && version >= 450) { + commonBuiltins.append( + "float minInvocationsAMD(float);" + "vec2 minInvocationsAMD(vec2);" + "vec3 minInvocationsAMD(vec3);" + "vec4 minInvocationsAMD(vec4);" + + "int minInvocationsAMD(int);" + "ivec2 minInvocationsAMD(ivec2);" + "ivec3 minInvocationsAMD(ivec3);" + "ivec4 minInvocationsAMD(ivec4);" + + "uint minInvocationsAMD(uint);" + "uvec2 minInvocationsAMD(uvec2);" + "uvec3 minInvocationsAMD(uvec3);" + "uvec4 minInvocationsAMD(uvec4);" + + "double minInvocationsAMD(double);" + "dvec2 minInvocationsAMD(dvec2);" + "dvec3 minInvocationsAMD(dvec3);" + "dvec4 minInvocationsAMD(dvec4);" + + "int64_t minInvocationsAMD(int64_t);" + "i64vec2 minInvocationsAMD(i64vec2);" + "i64vec3 minInvocationsAMD(i64vec3);" + "i64vec4 minInvocationsAMD(i64vec4);" + + "uint64_t minInvocationsAMD(uint64_t);" + "u64vec2 minInvocationsAMD(u64vec2);" + "u64vec3 minInvocationsAMD(u64vec3);" + "u64vec4 minInvocationsAMD(u64vec4);" + + "float16_t minInvocationsAMD(float16_t);" + "f16vec2 minInvocationsAMD(f16vec2);" + "f16vec3 minInvocationsAMD(f16vec3);" + "f16vec4 minInvocationsAMD(f16vec4);" + + "int16_t minInvocationsAMD(int16_t);" + "i16vec2 minInvocationsAMD(i16vec2);" + "i16vec3 minInvocationsAMD(i16vec3);" + "i16vec4 minInvocationsAMD(i16vec4);" + + "uint16_t minInvocationsAMD(uint16_t);" + "u16vec2 minInvocationsAMD(u16vec2);" + "u16vec3 minInvocationsAMD(u16vec3);" + "u16vec4 minInvocationsAMD(u16vec4);" + + "float minInvocationsInclusiveScanAMD(float);" + "vec2 minInvocationsInclusiveScanAMD(vec2);" + "vec3 minInvocationsInclusiveScanAMD(vec3);" + "vec4 minInvocationsInclusiveScanAMD(vec4);" + + "int minInvocationsInclusiveScanAMD(int);" + "ivec2 minInvocationsInclusiveScanAMD(ivec2);" + "ivec3 minInvocationsInclusiveScanAMD(ivec3);" + "ivec4 minInvocationsInclusiveScanAMD(ivec4);" + + "uint minInvocationsInclusiveScanAMD(uint);" + "uvec2 minInvocationsInclusiveScanAMD(uvec2);" + "uvec3 minInvocationsInclusiveScanAMD(uvec3);" + "uvec4 minInvocationsInclusiveScanAMD(uvec4);" + + "double minInvocationsInclusiveScanAMD(double);" + "dvec2 minInvocationsInclusiveScanAMD(dvec2);" + "dvec3 minInvocationsInclusiveScanAMD(dvec3);" + "dvec4 minInvocationsInclusiveScanAMD(dvec4);" + + "int64_t minInvocationsInclusiveScanAMD(int64_t);" + "i64vec2 minInvocationsInclusiveScanAMD(i64vec2);" + "i64vec3 minInvocationsInclusiveScanAMD(i64vec3);" + "i64vec4 minInvocationsInclusiveScanAMD(i64vec4);" + + "uint64_t minInvocationsInclusiveScanAMD(uint64_t);" + "u64vec2 minInvocationsInclusiveScanAMD(u64vec2);" + "u64vec3 minInvocationsInclusiveScanAMD(u64vec3);" + "u64vec4 minInvocationsInclusiveScanAMD(u64vec4);" + + "float16_t minInvocationsInclusiveScanAMD(float16_t);" + "f16vec2 minInvocationsInclusiveScanAMD(f16vec2);" + "f16vec3 minInvocationsInclusiveScanAMD(f16vec3);" + "f16vec4 minInvocationsInclusiveScanAMD(f16vec4);" + + "int16_t minInvocationsInclusiveScanAMD(int16_t);" + "i16vec2 minInvocationsInclusiveScanAMD(i16vec2);" + "i16vec3 minInvocationsInclusiveScanAMD(i16vec3);" + "i16vec4 minInvocationsInclusiveScanAMD(i16vec4);" + + "uint16_t minInvocationsInclusiveScanAMD(uint16_t);" + "u16vec2 minInvocationsInclusiveScanAMD(u16vec2);" + "u16vec3 minInvocationsInclusiveScanAMD(u16vec3);" + "u16vec4 minInvocationsInclusiveScanAMD(u16vec4);" + + "float minInvocationsExclusiveScanAMD(float);" + "vec2 minInvocationsExclusiveScanAMD(vec2);" + "vec3 minInvocationsExclusiveScanAMD(vec3);" + "vec4 minInvocationsExclusiveScanAMD(vec4);" + + "int minInvocationsExclusiveScanAMD(int);" + "ivec2 minInvocationsExclusiveScanAMD(ivec2);" + "ivec3 minInvocationsExclusiveScanAMD(ivec3);" + "ivec4 minInvocationsExclusiveScanAMD(ivec4);" + + "uint minInvocationsExclusiveScanAMD(uint);" + "uvec2 minInvocationsExclusiveScanAMD(uvec2);" + "uvec3 minInvocationsExclusiveScanAMD(uvec3);" + "uvec4 minInvocationsExclusiveScanAMD(uvec4);" + + "double minInvocationsExclusiveScanAMD(double);" + "dvec2 minInvocationsExclusiveScanAMD(dvec2);" + "dvec3 minInvocationsExclusiveScanAMD(dvec3);" + "dvec4 minInvocationsExclusiveScanAMD(dvec4);" + + "int64_t minInvocationsExclusiveScanAMD(int64_t);" + "i64vec2 minInvocationsExclusiveScanAMD(i64vec2);" + "i64vec3 minInvocationsExclusiveScanAMD(i64vec3);" + "i64vec4 minInvocationsExclusiveScanAMD(i64vec4);" + + "uint64_t minInvocationsExclusiveScanAMD(uint64_t);" + "u64vec2 minInvocationsExclusiveScanAMD(u64vec2);" + "u64vec3 minInvocationsExclusiveScanAMD(u64vec3);" + "u64vec4 minInvocationsExclusiveScanAMD(u64vec4);" + + "float16_t minInvocationsExclusiveScanAMD(float16_t);" + "f16vec2 minInvocationsExclusiveScanAMD(f16vec2);" + "f16vec3 minInvocationsExclusiveScanAMD(f16vec3);" + "f16vec4 minInvocationsExclusiveScanAMD(f16vec4);" + + "int16_t minInvocationsExclusiveScanAMD(int16_t);" + "i16vec2 minInvocationsExclusiveScanAMD(i16vec2);" + "i16vec3 minInvocationsExclusiveScanAMD(i16vec3);" + "i16vec4 minInvocationsExclusiveScanAMD(i16vec4);" + + "uint16_t minInvocationsExclusiveScanAMD(uint16_t);" + "u16vec2 minInvocationsExclusiveScanAMD(u16vec2);" + "u16vec3 minInvocationsExclusiveScanAMD(u16vec3);" + "u16vec4 minInvocationsExclusiveScanAMD(u16vec4);" + + "float maxInvocationsAMD(float);" + "vec2 maxInvocationsAMD(vec2);" + "vec3 maxInvocationsAMD(vec3);" + "vec4 maxInvocationsAMD(vec4);" + + "int maxInvocationsAMD(int);" + "ivec2 maxInvocationsAMD(ivec2);" + "ivec3 maxInvocationsAMD(ivec3);" + "ivec4 maxInvocationsAMD(ivec4);" + + "uint maxInvocationsAMD(uint);" + "uvec2 maxInvocationsAMD(uvec2);" + "uvec3 maxInvocationsAMD(uvec3);" + "uvec4 maxInvocationsAMD(uvec4);" + + "double maxInvocationsAMD(double);" + "dvec2 maxInvocationsAMD(dvec2);" + "dvec3 maxInvocationsAMD(dvec3);" + "dvec4 maxInvocationsAMD(dvec4);" + + "int64_t maxInvocationsAMD(int64_t);" + "i64vec2 maxInvocationsAMD(i64vec2);" + "i64vec3 maxInvocationsAMD(i64vec3);" + "i64vec4 maxInvocationsAMD(i64vec4);" + + "uint64_t maxInvocationsAMD(uint64_t);" + "u64vec2 maxInvocationsAMD(u64vec2);" + "u64vec3 maxInvocationsAMD(u64vec3);" + "u64vec4 maxInvocationsAMD(u64vec4);" + + "float16_t maxInvocationsAMD(float16_t);" + "f16vec2 maxInvocationsAMD(f16vec2);" + "f16vec3 maxInvocationsAMD(f16vec3);" + "f16vec4 maxInvocationsAMD(f16vec4);" + + "int16_t maxInvocationsAMD(int16_t);" + "i16vec2 maxInvocationsAMD(i16vec2);" + "i16vec3 maxInvocationsAMD(i16vec3);" + "i16vec4 maxInvocationsAMD(i16vec4);" + + "uint16_t maxInvocationsAMD(uint16_t);" + "u16vec2 maxInvocationsAMD(u16vec2);" + "u16vec3 maxInvocationsAMD(u16vec3);" + "u16vec4 maxInvocationsAMD(u16vec4);" + + "float maxInvocationsInclusiveScanAMD(float);" + "vec2 maxInvocationsInclusiveScanAMD(vec2);" + "vec3 maxInvocationsInclusiveScanAMD(vec3);" + "vec4 maxInvocationsInclusiveScanAMD(vec4);" + + "int maxInvocationsInclusiveScanAMD(int);" + "ivec2 maxInvocationsInclusiveScanAMD(ivec2);" + "ivec3 maxInvocationsInclusiveScanAMD(ivec3);" + "ivec4 maxInvocationsInclusiveScanAMD(ivec4);" + + "uint maxInvocationsInclusiveScanAMD(uint);" + "uvec2 maxInvocationsInclusiveScanAMD(uvec2);" + "uvec3 maxInvocationsInclusiveScanAMD(uvec3);" + "uvec4 maxInvocationsInclusiveScanAMD(uvec4);" + + "double maxInvocationsInclusiveScanAMD(double);" + "dvec2 maxInvocationsInclusiveScanAMD(dvec2);" + "dvec3 maxInvocationsInclusiveScanAMD(dvec3);" + "dvec4 maxInvocationsInclusiveScanAMD(dvec4);" + + "int64_t maxInvocationsInclusiveScanAMD(int64_t);" + "i64vec2 maxInvocationsInclusiveScanAMD(i64vec2);" + "i64vec3 maxInvocationsInclusiveScanAMD(i64vec3);" + "i64vec4 maxInvocationsInclusiveScanAMD(i64vec4);" + + "uint64_t maxInvocationsInclusiveScanAMD(uint64_t);" + "u64vec2 maxInvocationsInclusiveScanAMD(u64vec2);" + "u64vec3 maxInvocationsInclusiveScanAMD(u64vec3);" + "u64vec4 maxInvocationsInclusiveScanAMD(u64vec4);" + + "float16_t maxInvocationsInclusiveScanAMD(float16_t);" + "f16vec2 maxInvocationsInclusiveScanAMD(f16vec2);" + "f16vec3 maxInvocationsInclusiveScanAMD(f16vec3);" + "f16vec4 maxInvocationsInclusiveScanAMD(f16vec4);" + + "int16_t maxInvocationsInclusiveScanAMD(int16_t);" + "i16vec2 maxInvocationsInclusiveScanAMD(i16vec2);" + "i16vec3 maxInvocationsInclusiveScanAMD(i16vec3);" + "i16vec4 maxInvocationsInclusiveScanAMD(i16vec4);" + + "uint16_t maxInvocationsInclusiveScanAMD(uint16_t);" + "u16vec2 maxInvocationsInclusiveScanAMD(u16vec2);" + "u16vec3 maxInvocationsInclusiveScanAMD(u16vec3);" + "u16vec4 maxInvocationsInclusiveScanAMD(u16vec4);" + + "float maxInvocationsExclusiveScanAMD(float);" + "vec2 maxInvocationsExclusiveScanAMD(vec2);" + "vec3 maxInvocationsExclusiveScanAMD(vec3);" + "vec4 maxInvocationsExclusiveScanAMD(vec4);" + + "int maxInvocationsExclusiveScanAMD(int);" + "ivec2 maxInvocationsExclusiveScanAMD(ivec2);" + "ivec3 maxInvocationsExclusiveScanAMD(ivec3);" + "ivec4 maxInvocationsExclusiveScanAMD(ivec4);" + + "uint maxInvocationsExclusiveScanAMD(uint);" + "uvec2 maxInvocationsExclusiveScanAMD(uvec2);" + "uvec3 maxInvocationsExclusiveScanAMD(uvec3);" + "uvec4 maxInvocationsExclusiveScanAMD(uvec4);" + + "double maxInvocationsExclusiveScanAMD(double);" + "dvec2 maxInvocationsExclusiveScanAMD(dvec2);" + "dvec3 maxInvocationsExclusiveScanAMD(dvec3);" + "dvec4 maxInvocationsExclusiveScanAMD(dvec4);" + + "int64_t maxInvocationsExclusiveScanAMD(int64_t);" + "i64vec2 maxInvocationsExclusiveScanAMD(i64vec2);" + "i64vec3 maxInvocationsExclusiveScanAMD(i64vec3);" + "i64vec4 maxInvocationsExclusiveScanAMD(i64vec4);" + + "uint64_t maxInvocationsExclusiveScanAMD(uint64_t);" + "u64vec2 maxInvocationsExclusiveScanAMD(u64vec2);" + "u64vec3 maxInvocationsExclusiveScanAMD(u64vec3);" + "u64vec4 maxInvocationsExclusiveScanAMD(u64vec4);" + + "float16_t maxInvocationsExclusiveScanAMD(float16_t);" + "f16vec2 maxInvocationsExclusiveScanAMD(f16vec2);" + "f16vec3 maxInvocationsExclusiveScanAMD(f16vec3);" + "f16vec4 maxInvocationsExclusiveScanAMD(f16vec4);" + + "int16_t maxInvocationsExclusiveScanAMD(int16_t);" + "i16vec2 maxInvocationsExclusiveScanAMD(i16vec2);" + "i16vec3 maxInvocationsExclusiveScanAMD(i16vec3);" + "i16vec4 maxInvocationsExclusiveScanAMD(i16vec4);" + + "uint16_t maxInvocationsExclusiveScanAMD(uint16_t);" + "u16vec2 maxInvocationsExclusiveScanAMD(u16vec2);" + "u16vec3 maxInvocationsExclusiveScanAMD(u16vec3);" + "u16vec4 maxInvocationsExclusiveScanAMD(u16vec4);" + + "float addInvocationsAMD(float);" + "vec2 addInvocationsAMD(vec2);" + "vec3 addInvocationsAMD(vec3);" + "vec4 addInvocationsAMD(vec4);" + + "int addInvocationsAMD(int);" + "ivec2 addInvocationsAMD(ivec2);" + "ivec3 addInvocationsAMD(ivec3);" + "ivec4 addInvocationsAMD(ivec4);" + + "uint addInvocationsAMD(uint);" + "uvec2 addInvocationsAMD(uvec2);" + "uvec3 addInvocationsAMD(uvec3);" + "uvec4 addInvocationsAMD(uvec4);" + + "double addInvocationsAMD(double);" + "dvec2 addInvocationsAMD(dvec2);" + "dvec3 addInvocationsAMD(dvec3);" + "dvec4 addInvocationsAMD(dvec4);" + + "int64_t addInvocationsAMD(int64_t);" + "i64vec2 addInvocationsAMD(i64vec2);" + "i64vec3 addInvocationsAMD(i64vec3);" + "i64vec4 addInvocationsAMD(i64vec4);" + + "uint64_t addInvocationsAMD(uint64_t);" + "u64vec2 addInvocationsAMD(u64vec2);" + "u64vec3 addInvocationsAMD(u64vec3);" + "u64vec4 addInvocationsAMD(u64vec4);" + + "float16_t addInvocationsAMD(float16_t);" + "f16vec2 addInvocationsAMD(f16vec2);" + "f16vec3 addInvocationsAMD(f16vec3);" + "f16vec4 addInvocationsAMD(f16vec4);" + + "int16_t addInvocationsAMD(int16_t);" + "i16vec2 addInvocationsAMD(i16vec2);" + "i16vec3 addInvocationsAMD(i16vec3);" + "i16vec4 addInvocationsAMD(i16vec4);" + + "uint16_t addInvocationsAMD(uint16_t);" + "u16vec2 addInvocationsAMD(u16vec2);" + "u16vec3 addInvocationsAMD(u16vec3);" + "u16vec4 addInvocationsAMD(u16vec4);" + + "float addInvocationsInclusiveScanAMD(float);" + "vec2 addInvocationsInclusiveScanAMD(vec2);" + "vec3 addInvocationsInclusiveScanAMD(vec3);" + "vec4 addInvocationsInclusiveScanAMD(vec4);" + + "int addInvocationsInclusiveScanAMD(int);" + "ivec2 addInvocationsInclusiveScanAMD(ivec2);" + "ivec3 addInvocationsInclusiveScanAMD(ivec3);" + "ivec4 addInvocationsInclusiveScanAMD(ivec4);" + + "uint addInvocationsInclusiveScanAMD(uint);" + "uvec2 addInvocationsInclusiveScanAMD(uvec2);" + "uvec3 addInvocationsInclusiveScanAMD(uvec3);" + "uvec4 addInvocationsInclusiveScanAMD(uvec4);" + + "double addInvocationsInclusiveScanAMD(double);" + "dvec2 addInvocationsInclusiveScanAMD(dvec2);" + "dvec3 addInvocationsInclusiveScanAMD(dvec3);" + "dvec4 addInvocationsInclusiveScanAMD(dvec4);" + + "int64_t addInvocationsInclusiveScanAMD(int64_t);" + "i64vec2 addInvocationsInclusiveScanAMD(i64vec2);" + "i64vec3 addInvocationsInclusiveScanAMD(i64vec3);" + "i64vec4 addInvocationsInclusiveScanAMD(i64vec4);" + + "uint64_t addInvocationsInclusiveScanAMD(uint64_t);" + "u64vec2 addInvocationsInclusiveScanAMD(u64vec2);" + "u64vec3 addInvocationsInclusiveScanAMD(u64vec3);" + "u64vec4 addInvocationsInclusiveScanAMD(u64vec4);" + + "float16_t addInvocationsInclusiveScanAMD(float16_t);" + "f16vec2 addInvocationsInclusiveScanAMD(f16vec2);" + "f16vec3 addInvocationsInclusiveScanAMD(f16vec3);" + "f16vec4 addInvocationsInclusiveScanAMD(f16vec4);" + + "int16_t addInvocationsInclusiveScanAMD(int16_t);" + "i16vec2 addInvocationsInclusiveScanAMD(i16vec2);" + "i16vec3 addInvocationsInclusiveScanAMD(i16vec3);" + "i16vec4 addInvocationsInclusiveScanAMD(i16vec4);" + + "uint16_t addInvocationsInclusiveScanAMD(uint16_t);" + "u16vec2 addInvocationsInclusiveScanAMD(u16vec2);" + "u16vec3 addInvocationsInclusiveScanAMD(u16vec3);" + "u16vec4 addInvocationsInclusiveScanAMD(u16vec4);" + + "float addInvocationsExclusiveScanAMD(float);" + "vec2 addInvocationsExclusiveScanAMD(vec2);" + "vec3 addInvocationsExclusiveScanAMD(vec3);" + "vec4 addInvocationsExclusiveScanAMD(vec4);" + + "int addInvocationsExclusiveScanAMD(int);" + "ivec2 addInvocationsExclusiveScanAMD(ivec2);" + "ivec3 addInvocationsExclusiveScanAMD(ivec3);" + "ivec4 addInvocationsExclusiveScanAMD(ivec4);" + + "uint addInvocationsExclusiveScanAMD(uint);" + "uvec2 addInvocationsExclusiveScanAMD(uvec2);" + "uvec3 addInvocationsExclusiveScanAMD(uvec3);" + "uvec4 addInvocationsExclusiveScanAMD(uvec4);" + + "double addInvocationsExclusiveScanAMD(double);" + "dvec2 addInvocationsExclusiveScanAMD(dvec2);" + "dvec3 addInvocationsExclusiveScanAMD(dvec3);" + "dvec4 addInvocationsExclusiveScanAMD(dvec4);" + + "int64_t addInvocationsExclusiveScanAMD(int64_t);" + "i64vec2 addInvocationsExclusiveScanAMD(i64vec2);" + "i64vec3 addInvocationsExclusiveScanAMD(i64vec3);" + "i64vec4 addInvocationsExclusiveScanAMD(i64vec4);" + + "uint64_t addInvocationsExclusiveScanAMD(uint64_t);" + "u64vec2 addInvocationsExclusiveScanAMD(u64vec2);" + "u64vec3 addInvocationsExclusiveScanAMD(u64vec3);" + "u64vec4 addInvocationsExclusiveScanAMD(u64vec4);" + + "float16_t addInvocationsExclusiveScanAMD(float16_t);" + "f16vec2 addInvocationsExclusiveScanAMD(f16vec2);" + "f16vec3 addInvocationsExclusiveScanAMD(f16vec3);" + "f16vec4 addInvocationsExclusiveScanAMD(f16vec4);" + + "int16_t addInvocationsExclusiveScanAMD(int16_t);" + "i16vec2 addInvocationsExclusiveScanAMD(i16vec2);" + "i16vec3 addInvocationsExclusiveScanAMD(i16vec3);" + "i16vec4 addInvocationsExclusiveScanAMD(i16vec4);" + + "uint16_t addInvocationsExclusiveScanAMD(uint16_t);" + "u16vec2 addInvocationsExclusiveScanAMD(u16vec2);" + "u16vec3 addInvocationsExclusiveScanAMD(u16vec3);" + "u16vec4 addInvocationsExclusiveScanAMD(u16vec4);" + + "float minInvocationsNonUniformAMD(float);" + "vec2 minInvocationsNonUniformAMD(vec2);" + "vec3 minInvocationsNonUniformAMD(vec3);" + "vec4 minInvocationsNonUniformAMD(vec4);" + + "int minInvocationsNonUniformAMD(int);" + "ivec2 minInvocationsNonUniformAMD(ivec2);" + "ivec3 minInvocationsNonUniformAMD(ivec3);" + "ivec4 minInvocationsNonUniformAMD(ivec4);" + + "uint minInvocationsNonUniformAMD(uint);" + "uvec2 minInvocationsNonUniformAMD(uvec2);" + "uvec3 minInvocationsNonUniformAMD(uvec3);" + "uvec4 minInvocationsNonUniformAMD(uvec4);" + + "double minInvocationsNonUniformAMD(double);" + "dvec2 minInvocationsNonUniformAMD(dvec2);" + "dvec3 minInvocationsNonUniformAMD(dvec3);" + "dvec4 minInvocationsNonUniformAMD(dvec4);" + + "int64_t minInvocationsNonUniformAMD(int64_t);" + "i64vec2 minInvocationsNonUniformAMD(i64vec2);" + "i64vec3 minInvocationsNonUniformAMD(i64vec3);" + "i64vec4 minInvocationsNonUniformAMD(i64vec4);" + + "uint64_t minInvocationsNonUniformAMD(uint64_t);" + "u64vec2 minInvocationsNonUniformAMD(u64vec2);" + "u64vec3 minInvocationsNonUniformAMD(u64vec3);" + "u64vec4 minInvocationsNonUniformAMD(u64vec4);" + + "float16_t minInvocationsNonUniformAMD(float16_t);" + "f16vec2 minInvocationsNonUniformAMD(f16vec2);" + "f16vec3 minInvocationsNonUniformAMD(f16vec3);" + "f16vec4 minInvocationsNonUniformAMD(f16vec4);" + + "int16_t minInvocationsNonUniformAMD(int16_t);" + "i16vec2 minInvocationsNonUniformAMD(i16vec2);" + "i16vec3 minInvocationsNonUniformAMD(i16vec3);" + "i16vec4 minInvocationsNonUniformAMD(i16vec4);" + + "uint16_t minInvocationsNonUniformAMD(uint16_t);" + "u16vec2 minInvocationsNonUniformAMD(u16vec2);" + "u16vec3 minInvocationsNonUniformAMD(u16vec3);" + "u16vec4 minInvocationsNonUniformAMD(u16vec4);" + + "float minInvocationsInclusiveScanNonUniformAMD(float);" + "vec2 minInvocationsInclusiveScanNonUniformAMD(vec2);" + "vec3 minInvocationsInclusiveScanNonUniformAMD(vec3);" + "vec4 minInvocationsInclusiveScanNonUniformAMD(vec4);" + + "int minInvocationsInclusiveScanNonUniformAMD(int);" + "ivec2 minInvocationsInclusiveScanNonUniformAMD(ivec2);" + "ivec3 minInvocationsInclusiveScanNonUniformAMD(ivec3);" + "ivec4 minInvocationsInclusiveScanNonUniformAMD(ivec4);" + + "uint minInvocationsInclusiveScanNonUniformAMD(uint);" + "uvec2 minInvocationsInclusiveScanNonUniformAMD(uvec2);" + "uvec3 minInvocationsInclusiveScanNonUniformAMD(uvec3);" + "uvec4 minInvocationsInclusiveScanNonUniformAMD(uvec4);" + + "double minInvocationsInclusiveScanNonUniformAMD(double);" + "dvec2 minInvocationsInclusiveScanNonUniformAMD(dvec2);" + "dvec3 minInvocationsInclusiveScanNonUniformAMD(dvec3);" + "dvec4 minInvocationsInclusiveScanNonUniformAMD(dvec4);" + + "int64_t minInvocationsInclusiveScanNonUniformAMD(int64_t);" + "i64vec2 minInvocationsInclusiveScanNonUniformAMD(i64vec2);" + "i64vec3 minInvocationsInclusiveScanNonUniformAMD(i64vec3);" + "i64vec4 minInvocationsInclusiveScanNonUniformAMD(i64vec4);" + + "uint64_t minInvocationsInclusiveScanNonUniformAMD(uint64_t);" + "u64vec2 minInvocationsInclusiveScanNonUniformAMD(u64vec2);" + "u64vec3 minInvocationsInclusiveScanNonUniformAMD(u64vec3);" + "u64vec4 minInvocationsInclusiveScanNonUniformAMD(u64vec4);" + + "float16_t minInvocationsInclusiveScanNonUniformAMD(float16_t);" + "f16vec2 minInvocationsInclusiveScanNonUniformAMD(f16vec2);" + "f16vec3 minInvocationsInclusiveScanNonUniformAMD(f16vec3);" + "f16vec4 minInvocationsInclusiveScanNonUniformAMD(f16vec4);" + + "int16_t minInvocationsInclusiveScanNonUniformAMD(int16_t);" + "i16vec2 minInvocationsInclusiveScanNonUniformAMD(i16vec2);" + "i16vec3 minInvocationsInclusiveScanNonUniformAMD(i16vec3);" + "i16vec4 minInvocationsInclusiveScanNonUniformAMD(i16vec4);" + + "uint16_t minInvocationsInclusiveScanNonUniformAMD(uint16_t);" + "u16vec2 minInvocationsInclusiveScanNonUniformAMD(u16vec2);" + "u16vec3 minInvocationsInclusiveScanNonUniformAMD(u16vec3);" + "u16vec4 minInvocationsInclusiveScanNonUniformAMD(u16vec4);" + + "float minInvocationsExclusiveScanNonUniformAMD(float);" + "vec2 minInvocationsExclusiveScanNonUniformAMD(vec2);" + "vec3 minInvocationsExclusiveScanNonUniformAMD(vec3);" + "vec4 minInvocationsExclusiveScanNonUniformAMD(vec4);" + + "int minInvocationsExclusiveScanNonUniformAMD(int);" + "ivec2 minInvocationsExclusiveScanNonUniformAMD(ivec2);" + "ivec3 minInvocationsExclusiveScanNonUniformAMD(ivec3);" + "ivec4 minInvocationsExclusiveScanNonUniformAMD(ivec4);" + + "uint minInvocationsExclusiveScanNonUniformAMD(uint);" + "uvec2 minInvocationsExclusiveScanNonUniformAMD(uvec2);" + "uvec3 minInvocationsExclusiveScanNonUniformAMD(uvec3);" + "uvec4 minInvocationsExclusiveScanNonUniformAMD(uvec4);" + + "double minInvocationsExclusiveScanNonUniformAMD(double);" + "dvec2 minInvocationsExclusiveScanNonUniformAMD(dvec2);" + "dvec3 minInvocationsExclusiveScanNonUniformAMD(dvec3);" + "dvec4 minInvocationsExclusiveScanNonUniformAMD(dvec4);" + + "int64_t minInvocationsExclusiveScanNonUniformAMD(int64_t);" + "i64vec2 minInvocationsExclusiveScanNonUniformAMD(i64vec2);" + "i64vec3 minInvocationsExclusiveScanNonUniformAMD(i64vec3);" + "i64vec4 minInvocationsExclusiveScanNonUniformAMD(i64vec4);" + + "uint64_t minInvocationsExclusiveScanNonUniformAMD(uint64_t);" + "u64vec2 minInvocationsExclusiveScanNonUniformAMD(u64vec2);" + "u64vec3 minInvocationsExclusiveScanNonUniformAMD(u64vec3);" + "u64vec4 minInvocationsExclusiveScanNonUniformAMD(u64vec4);" + + "float16_t minInvocationsExclusiveScanNonUniformAMD(float16_t);" + "f16vec2 minInvocationsExclusiveScanNonUniformAMD(f16vec2);" + "f16vec3 minInvocationsExclusiveScanNonUniformAMD(f16vec3);" + "f16vec4 minInvocationsExclusiveScanNonUniformAMD(f16vec4);" + + "int16_t minInvocationsExclusiveScanNonUniformAMD(int16_t);" + "i16vec2 minInvocationsExclusiveScanNonUniformAMD(i16vec2);" + "i16vec3 minInvocationsExclusiveScanNonUniformAMD(i16vec3);" + "i16vec4 minInvocationsExclusiveScanNonUniformAMD(i16vec4);" + + "uint16_t minInvocationsExclusiveScanNonUniformAMD(uint16_t);" + "u16vec2 minInvocationsExclusiveScanNonUniformAMD(u16vec2);" + "u16vec3 minInvocationsExclusiveScanNonUniformAMD(u16vec3);" + "u16vec4 minInvocationsExclusiveScanNonUniformAMD(u16vec4);" + + "float maxInvocationsNonUniformAMD(float);" + "vec2 maxInvocationsNonUniformAMD(vec2);" + "vec3 maxInvocationsNonUniformAMD(vec3);" + "vec4 maxInvocationsNonUniformAMD(vec4);" + + "int maxInvocationsNonUniformAMD(int);" + "ivec2 maxInvocationsNonUniformAMD(ivec2);" + "ivec3 maxInvocationsNonUniformAMD(ivec3);" + "ivec4 maxInvocationsNonUniformAMD(ivec4);" + + "uint maxInvocationsNonUniformAMD(uint);" + "uvec2 maxInvocationsNonUniformAMD(uvec2);" + "uvec3 maxInvocationsNonUniformAMD(uvec3);" + "uvec4 maxInvocationsNonUniformAMD(uvec4);" + + "double maxInvocationsNonUniformAMD(double);" + "dvec2 maxInvocationsNonUniformAMD(dvec2);" + "dvec3 maxInvocationsNonUniformAMD(dvec3);" + "dvec4 maxInvocationsNonUniformAMD(dvec4);" + + "int64_t maxInvocationsNonUniformAMD(int64_t);" + "i64vec2 maxInvocationsNonUniformAMD(i64vec2);" + "i64vec3 maxInvocationsNonUniformAMD(i64vec3);" + "i64vec4 maxInvocationsNonUniformAMD(i64vec4);" + + "uint64_t maxInvocationsNonUniformAMD(uint64_t);" + "u64vec2 maxInvocationsNonUniformAMD(u64vec2);" + "u64vec3 maxInvocationsNonUniformAMD(u64vec3);" + "u64vec4 maxInvocationsNonUniformAMD(u64vec4);" + + "float16_t maxInvocationsNonUniformAMD(float16_t);" + "f16vec2 maxInvocationsNonUniformAMD(f16vec2);" + "f16vec3 maxInvocationsNonUniformAMD(f16vec3);" + "f16vec4 maxInvocationsNonUniformAMD(f16vec4);" + + "int16_t maxInvocationsNonUniformAMD(int16_t);" + "i16vec2 maxInvocationsNonUniformAMD(i16vec2);" + "i16vec3 maxInvocationsNonUniformAMD(i16vec3);" + "i16vec4 maxInvocationsNonUniformAMD(i16vec4);" + + "uint16_t maxInvocationsNonUniformAMD(uint16_t);" + "u16vec2 maxInvocationsNonUniformAMD(u16vec2);" + "u16vec3 maxInvocationsNonUniformAMD(u16vec3);" + "u16vec4 maxInvocationsNonUniformAMD(u16vec4);" + + "float maxInvocationsInclusiveScanNonUniformAMD(float);" + "vec2 maxInvocationsInclusiveScanNonUniformAMD(vec2);" + "vec3 maxInvocationsInclusiveScanNonUniformAMD(vec3);" + "vec4 maxInvocationsInclusiveScanNonUniformAMD(vec4);" + + "int maxInvocationsInclusiveScanNonUniformAMD(int);" + "ivec2 maxInvocationsInclusiveScanNonUniformAMD(ivec2);" + "ivec3 maxInvocationsInclusiveScanNonUniformAMD(ivec3);" + "ivec4 maxInvocationsInclusiveScanNonUniformAMD(ivec4);" + + "uint maxInvocationsInclusiveScanNonUniformAMD(uint);" + "uvec2 maxInvocationsInclusiveScanNonUniformAMD(uvec2);" + "uvec3 maxInvocationsInclusiveScanNonUniformAMD(uvec3);" + "uvec4 maxInvocationsInclusiveScanNonUniformAMD(uvec4);" + + "double maxInvocationsInclusiveScanNonUniformAMD(double);" + "dvec2 maxInvocationsInclusiveScanNonUniformAMD(dvec2);" + "dvec3 maxInvocationsInclusiveScanNonUniformAMD(dvec3);" + "dvec4 maxInvocationsInclusiveScanNonUniformAMD(dvec4);" + + "int64_t maxInvocationsInclusiveScanNonUniformAMD(int64_t);" + "i64vec2 maxInvocationsInclusiveScanNonUniformAMD(i64vec2);" + "i64vec3 maxInvocationsInclusiveScanNonUniformAMD(i64vec3);" + "i64vec4 maxInvocationsInclusiveScanNonUniformAMD(i64vec4);" + + "uint64_t maxInvocationsInclusiveScanNonUniformAMD(uint64_t);" + "u64vec2 maxInvocationsInclusiveScanNonUniformAMD(u64vec2);" + "u64vec3 maxInvocationsInclusiveScanNonUniformAMD(u64vec3);" + "u64vec4 maxInvocationsInclusiveScanNonUniformAMD(u64vec4);" + + "float16_t maxInvocationsInclusiveScanNonUniformAMD(float16_t);" + "f16vec2 maxInvocationsInclusiveScanNonUniformAMD(f16vec2);" + "f16vec3 maxInvocationsInclusiveScanNonUniformAMD(f16vec3);" + "f16vec4 maxInvocationsInclusiveScanNonUniformAMD(f16vec4);" + + "int16_t maxInvocationsInclusiveScanNonUniformAMD(int16_t);" + "i16vec2 maxInvocationsInclusiveScanNonUniformAMD(i16vec2);" + "i16vec3 maxInvocationsInclusiveScanNonUniformAMD(i16vec3);" + "i16vec4 maxInvocationsInclusiveScanNonUniformAMD(i16vec4);" + + "uint16_t maxInvocationsInclusiveScanNonUniformAMD(uint16_t);" + "u16vec2 maxInvocationsInclusiveScanNonUniformAMD(u16vec2);" + "u16vec3 maxInvocationsInclusiveScanNonUniformAMD(u16vec3);" + "u16vec4 maxInvocationsInclusiveScanNonUniformAMD(u16vec4);" + + "float maxInvocationsExclusiveScanNonUniformAMD(float);" + "vec2 maxInvocationsExclusiveScanNonUniformAMD(vec2);" + "vec3 maxInvocationsExclusiveScanNonUniformAMD(vec3);" + "vec4 maxInvocationsExclusiveScanNonUniformAMD(vec4);" + + "int maxInvocationsExclusiveScanNonUniformAMD(int);" + "ivec2 maxInvocationsExclusiveScanNonUniformAMD(ivec2);" + "ivec3 maxInvocationsExclusiveScanNonUniformAMD(ivec3);" + "ivec4 maxInvocationsExclusiveScanNonUniformAMD(ivec4);" + + "uint maxInvocationsExclusiveScanNonUniformAMD(uint);" + "uvec2 maxInvocationsExclusiveScanNonUniformAMD(uvec2);" + "uvec3 maxInvocationsExclusiveScanNonUniformAMD(uvec3);" + "uvec4 maxInvocationsExclusiveScanNonUniformAMD(uvec4);" + + "double maxInvocationsExclusiveScanNonUniformAMD(double);" + "dvec2 maxInvocationsExclusiveScanNonUniformAMD(dvec2);" + "dvec3 maxInvocationsExclusiveScanNonUniformAMD(dvec3);" + "dvec4 maxInvocationsExclusiveScanNonUniformAMD(dvec4);" + + "int64_t maxInvocationsExclusiveScanNonUniformAMD(int64_t);" + "i64vec2 maxInvocationsExclusiveScanNonUniformAMD(i64vec2);" + "i64vec3 maxInvocationsExclusiveScanNonUniformAMD(i64vec3);" + "i64vec4 maxInvocationsExclusiveScanNonUniformAMD(i64vec4);" + + "uint64_t maxInvocationsExclusiveScanNonUniformAMD(uint64_t);" + "u64vec2 maxInvocationsExclusiveScanNonUniformAMD(u64vec2);" + "u64vec3 maxInvocationsExclusiveScanNonUniformAMD(u64vec3);" + "u64vec4 maxInvocationsExclusiveScanNonUniformAMD(u64vec4);" + + "float16_t maxInvocationsExclusiveScanNonUniformAMD(float16_t);" + "f16vec2 maxInvocationsExclusiveScanNonUniformAMD(f16vec2);" + "f16vec3 maxInvocationsExclusiveScanNonUniformAMD(f16vec3);" + "f16vec4 maxInvocationsExclusiveScanNonUniformAMD(f16vec4);" + + "int16_t maxInvocationsExclusiveScanNonUniformAMD(int16_t);" + "i16vec2 maxInvocationsExclusiveScanNonUniformAMD(i16vec2);" + "i16vec3 maxInvocationsExclusiveScanNonUniformAMD(i16vec3);" + "i16vec4 maxInvocationsExclusiveScanNonUniformAMD(i16vec4);" + + "uint16_t maxInvocationsExclusiveScanNonUniformAMD(uint16_t);" + "u16vec2 maxInvocationsExclusiveScanNonUniformAMD(u16vec2);" + "u16vec3 maxInvocationsExclusiveScanNonUniformAMD(u16vec3);" + "u16vec4 maxInvocationsExclusiveScanNonUniformAMD(u16vec4);" + + "float addInvocationsNonUniformAMD(float);" + "vec2 addInvocationsNonUniformAMD(vec2);" + "vec3 addInvocationsNonUniformAMD(vec3);" + "vec4 addInvocationsNonUniformAMD(vec4);" + + "int addInvocationsNonUniformAMD(int);" + "ivec2 addInvocationsNonUniformAMD(ivec2);" + "ivec3 addInvocationsNonUniformAMD(ivec3);" + "ivec4 addInvocationsNonUniformAMD(ivec4);" + + "uint addInvocationsNonUniformAMD(uint);" + "uvec2 addInvocationsNonUniformAMD(uvec2);" + "uvec3 addInvocationsNonUniformAMD(uvec3);" + "uvec4 addInvocationsNonUniformAMD(uvec4);" + + "double addInvocationsNonUniformAMD(double);" + "dvec2 addInvocationsNonUniformAMD(dvec2);" + "dvec3 addInvocationsNonUniformAMD(dvec3);" + "dvec4 addInvocationsNonUniformAMD(dvec4);" + + "int64_t addInvocationsNonUniformAMD(int64_t);" + "i64vec2 addInvocationsNonUniformAMD(i64vec2);" + "i64vec3 addInvocationsNonUniformAMD(i64vec3);" + "i64vec4 addInvocationsNonUniformAMD(i64vec4);" + + "uint64_t addInvocationsNonUniformAMD(uint64_t);" + "u64vec2 addInvocationsNonUniformAMD(u64vec2);" + "u64vec3 addInvocationsNonUniformAMD(u64vec3);" + "u64vec4 addInvocationsNonUniformAMD(u64vec4);" + + "float16_t addInvocationsNonUniformAMD(float16_t);" + "f16vec2 addInvocationsNonUniformAMD(f16vec2);" + "f16vec3 addInvocationsNonUniformAMD(f16vec3);" + "f16vec4 addInvocationsNonUniformAMD(f16vec4);" + + "int16_t addInvocationsNonUniformAMD(int16_t);" + "i16vec2 addInvocationsNonUniformAMD(i16vec2);" + "i16vec3 addInvocationsNonUniformAMD(i16vec3);" + "i16vec4 addInvocationsNonUniformAMD(i16vec4);" + + "uint16_t addInvocationsNonUniformAMD(uint16_t);" + "u16vec2 addInvocationsNonUniformAMD(u16vec2);" + "u16vec3 addInvocationsNonUniformAMD(u16vec3);" + "u16vec4 addInvocationsNonUniformAMD(u16vec4);" + + "float addInvocationsInclusiveScanNonUniformAMD(float);" + "vec2 addInvocationsInclusiveScanNonUniformAMD(vec2);" + "vec3 addInvocationsInclusiveScanNonUniformAMD(vec3);" + "vec4 addInvocationsInclusiveScanNonUniformAMD(vec4);" + + "int addInvocationsInclusiveScanNonUniformAMD(int);" + "ivec2 addInvocationsInclusiveScanNonUniformAMD(ivec2);" + "ivec3 addInvocationsInclusiveScanNonUniformAMD(ivec3);" + "ivec4 addInvocationsInclusiveScanNonUniformAMD(ivec4);" + + "uint addInvocationsInclusiveScanNonUniformAMD(uint);" + "uvec2 addInvocationsInclusiveScanNonUniformAMD(uvec2);" + "uvec3 addInvocationsInclusiveScanNonUniformAMD(uvec3);" + "uvec4 addInvocationsInclusiveScanNonUniformAMD(uvec4);" + + "double addInvocationsInclusiveScanNonUniformAMD(double);" + "dvec2 addInvocationsInclusiveScanNonUniformAMD(dvec2);" + "dvec3 addInvocationsInclusiveScanNonUniformAMD(dvec3);" + "dvec4 addInvocationsInclusiveScanNonUniformAMD(dvec4);" + + "int64_t addInvocationsInclusiveScanNonUniformAMD(int64_t);" + "i64vec2 addInvocationsInclusiveScanNonUniformAMD(i64vec2);" + "i64vec3 addInvocationsInclusiveScanNonUniformAMD(i64vec3);" + "i64vec4 addInvocationsInclusiveScanNonUniformAMD(i64vec4);" + + "uint64_t addInvocationsInclusiveScanNonUniformAMD(uint64_t);" + "u64vec2 addInvocationsInclusiveScanNonUniformAMD(u64vec2);" + "u64vec3 addInvocationsInclusiveScanNonUniformAMD(u64vec3);" + "u64vec4 addInvocationsInclusiveScanNonUniformAMD(u64vec4);" + + "float16_t addInvocationsInclusiveScanNonUniformAMD(float16_t);" + "f16vec2 addInvocationsInclusiveScanNonUniformAMD(f16vec2);" + "f16vec3 addInvocationsInclusiveScanNonUniformAMD(f16vec3);" + "f16vec4 addInvocationsInclusiveScanNonUniformAMD(f16vec4);" + + "int16_t addInvocationsInclusiveScanNonUniformAMD(int16_t);" + "i16vec2 addInvocationsInclusiveScanNonUniformAMD(i16vec2);" + "i16vec3 addInvocationsInclusiveScanNonUniformAMD(i16vec3);" + "i16vec4 addInvocationsInclusiveScanNonUniformAMD(i16vec4);" + + "uint16_t addInvocationsInclusiveScanNonUniformAMD(uint16_t);" + "u16vec2 addInvocationsInclusiveScanNonUniformAMD(u16vec2);" + "u16vec3 addInvocationsInclusiveScanNonUniformAMD(u16vec3);" + "u16vec4 addInvocationsInclusiveScanNonUniformAMD(u16vec4);" + + "float addInvocationsExclusiveScanNonUniformAMD(float);" + "vec2 addInvocationsExclusiveScanNonUniformAMD(vec2);" + "vec3 addInvocationsExclusiveScanNonUniformAMD(vec3);" + "vec4 addInvocationsExclusiveScanNonUniformAMD(vec4);" + + "int addInvocationsExclusiveScanNonUniformAMD(int);" + "ivec2 addInvocationsExclusiveScanNonUniformAMD(ivec2);" + "ivec3 addInvocationsExclusiveScanNonUniformAMD(ivec3);" + "ivec4 addInvocationsExclusiveScanNonUniformAMD(ivec4);" + + "uint addInvocationsExclusiveScanNonUniformAMD(uint);" + "uvec2 addInvocationsExclusiveScanNonUniformAMD(uvec2);" + "uvec3 addInvocationsExclusiveScanNonUniformAMD(uvec3);" + "uvec4 addInvocationsExclusiveScanNonUniformAMD(uvec4);" + + "double addInvocationsExclusiveScanNonUniformAMD(double);" + "dvec2 addInvocationsExclusiveScanNonUniformAMD(dvec2);" + "dvec3 addInvocationsExclusiveScanNonUniformAMD(dvec3);" + "dvec4 addInvocationsExclusiveScanNonUniformAMD(dvec4);" + + "int64_t addInvocationsExclusiveScanNonUniformAMD(int64_t);" + "i64vec2 addInvocationsExclusiveScanNonUniformAMD(i64vec2);" + "i64vec3 addInvocationsExclusiveScanNonUniformAMD(i64vec3);" + "i64vec4 addInvocationsExclusiveScanNonUniformAMD(i64vec4);" + + "uint64_t addInvocationsExclusiveScanNonUniformAMD(uint64_t);" + "u64vec2 addInvocationsExclusiveScanNonUniformAMD(u64vec2);" + "u64vec3 addInvocationsExclusiveScanNonUniformAMD(u64vec3);" + "u64vec4 addInvocationsExclusiveScanNonUniformAMD(u64vec4);" + + "float16_t addInvocationsExclusiveScanNonUniformAMD(float16_t);" + "f16vec2 addInvocationsExclusiveScanNonUniformAMD(f16vec2);" + "f16vec3 addInvocationsExclusiveScanNonUniformAMD(f16vec3);" + "f16vec4 addInvocationsExclusiveScanNonUniformAMD(f16vec4);" + + "int16_t addInvocationsExclusiveScanNonUniformAMD(int16_t);" + "i16vec2 addInvocationsExclusiveScanNonUniformAMD(i16vec2);" + "i16vec3 addInvocationsExclusiveScanNonUniformAMD(i16vec3);" + "i16vec4 addInvocationsExclusiveScanNonUniformAMD(i16vec4);" + + "uint16_t addInvocationsExclusiveScanNonUniformAMD(uint16_t);" + "u16vec2 addInvocationsExclusiveScanNonUniformAMD(u16vec2);" + "u16vec3 addInvocationsExclusiveScanNonUniformAMD(u16vec3);" + "u16vec4 addInvocationsExclusiveScanNonUniformAMD(u16vec4);" + + "float swizzleInvocationsAMD(float, uvec4);" + "vec2 swizzleInvocationsAMD(vec2, uvec4);" + "vec3 swizzleInvocationsAMD(vec3, uvec4);" + "vec4 swizzleInvocationsAMD(vec4, uvec4);" + + "int swizzleInvocationsAMD(int, uvec4);" + "ivec2 swizzleInvocationsAMD(ivec2, uvec4);" + "ivec3 swizzleInvocationsAMD(ivec3, uvec4);" + "ivec4 swizzleInvocationsAMD(ivec4, uvec4);" + + "uint swizzleInvocationsAMD(uint, uvec4);" + "uvec2 swizzleInvocationsAMD(uvec2, uvec4);" + "uvec3 swizzleInvocationsAMD(uvec3, uvec4);" + "uvec4 swizzleInvocationsAMD(uvec4, uvec4);" + + "float swizzleInvocationsMaskedAMD(float, uvec3);" + "vec2 swizzleInvocationsMaskedAMD(vec2, uvec3);" + "vec3 swizzleInvocationsMaskedAMD(vec3, uvec3);" + "vec4 swizzleInvocationsMaskedAMD(vec4, uvec3);" + + "int swizzleInvocationsMaskedAMD(int, uvec3);" + "ivec2 swizzleInvocationsMaskedAMD(ivec2, uvec3);" + "ivec3 swizzleInvocationsMaskedAMD(ivec3, uvec3);" + "ivec4 swizzleInvocationsMaskedAMD(ivec4, uvec3);" + + "uint swizzleInvocationsMaskedAMD(uint, uvec3);" + "uvec2 swizzleInvocationsMaskedAMD(uvec2, uvec3);" + "uvec3 swizzleInvocationsMaskedAMD(uvec3, uvec3);" + "uvec4 swizzleInvocationsMaskedAMD(uvec4, uvec3);" + + "float writeInvocationAMD(float, float, uint);" + "vec2 writeInvocationAMD(vec2, vec2, uint);" + "vec3 writeInvocationAMD(vec3, vec3, uint);" + "vec4 writeInvocationAMD(vec4, vec4, uint);" + + "int writeInvocationAMD(int, int, uint);" + "ivec2 writeInvocationAMD(ivec2, ivec2, uint);" + "ivec3 writeInvocationAMD(ivec3, ivec3, uint);" + "ivec4 writeInvocationAMD(ivec4, ivec4, uint);" + + "uint writeInvocationAMD(uint, uint, uint);" + "uvec2 writeInvocationAMD(uvec2, uvec2, uint);" + "uvec3 writeInvocationAMD(uvec3, uvec3, uint);" + "uvec4 writeInvocationAMD(uvec4, uvec4, uint);" + + "uint mbcntAMD(uint64_t);" + + "\n"); + } + + // GL_AMD_gcn_shader + if (profile != EEsProfile && version >= 440) { + commonBuiltins.append( + "float cubeFaceIndexAMD(vec3);" + "vec2 cubeFaceCoordAMD(vec3);" + "uint64_t timeAMD();" + + "in int gl_SIMDGroupSizeAMD;" + "\n"); + } + + // GL_AMD_shader_fragment_mask + if (profile != EEsProfile && version >= 450) { + commonBuiltins.append( + "uint fragmentMaskFetchAMD(sampler2DMS, ivec2);" + "uint fragmentMaskFetchAMD(isampler2DMS, ivec2);" + "uint fragmentMaskFetchAMD(usampler2DMS, ivec2);" + + "uint fragmentMaskFetchAMD(sampler2DMSArray, ivec3);" + "uint fragmentMaskFetchAMD(isampler2DMSArray, ivec3);" + "uint fragmentMaskFetchAMD(usampler2DMSArray, ivec3);" + + "vec4 fragmentFetchAMD(sampler2DMS, ivec2, uint);" + "ivec4 fragmentFetchAMD(isampler2DMS, ivec2, uint);" + "uvec4 fragmentFetchAMD(usampler2DMS, ivec2, uint);" + + "vec4 fragmentFetchAMD(sampler2DMSArray, ivec3, uint);" + "ivec4 fragmentFetchAMD(isampler2DMSArray, ivec3, uint);" + "uvec4 fragmentFetchAMD(usampler2DMSArray, ivec3, uint);" + + "\n"); + } + + if ((profile != EEsProfile && version >= 130) || + (profile == EEsProfile && version >= 300)) { + commonBuiltins.append( + "uint countLeadingZeros(uint);" + "uvec2 countLeadingZeros(uvec2);" + "uvec3 countLeadingZeros(uvec3);" + "uvec4 countLeadingZeros(uvec4);" + + "uint countTrailingZeros(uint);" + "uvec2 countTrailingZeros(uvec2);" + "uvec3 countTrailingZeros(uvec3);" + "uvec4 countTrailingZeros(uvec4);" + + "uint absoluteDifference(int, int);" + "uvec2 absoluteDifference(ivec2, ivec2);" + "uvec3 absoluteDifference(ivec3, ivec3);" + "uvec4 absoluteDifference(ivec4, ivec4);" + + "uint16_t absoluteDifference(int16_t, int16_t);" + "u16vec2 absoluteDifference(i16vec2, i16vec2);" + "u16vec3 absoluteDifference(i16vec3, i16vec3);" + "u16vec4 absoluteDifference(i16vec4, i16vec4);" + + "uint64_t absoluteDifference(int64_t, int64_t);" + "u64vec2 absoluteDifference(i64vec2, i64vec2);" + "u64vec3 absoluteDifference(i64vec3, i64vec3);" + "u64vec4 absoluteDifference(i64vec4, i64vec4);" + + "uint absoluteDifference(uint, uint);" + "uvec2 absoluteDifference(uvec2, uvec2);" + "uvec3 absoluteDifference(uvec3, uvec3);" + "uvec4 absoluteDifference(uvec4, uvec4);" + + "uint16_t absoluteDifference(uint16_t, uint16_t);" + "u16vec2 absoluteDifference(u16vec2, u16vec2);" + "u16vec3 absoluteDifference(u16vec3, u16vec3);" + "u16vec4 absoluteDifference(u16vec4, u16vec4);" + + "uint64_t absoluteDifference(uint64_t, uint64_t);" + "u64vec2 absoluteDifference(u64vec2, u64vec2);" + "u64vec3 absoluteDifference(u64vec3, u64vec3);" + "u64vec4 absoluteDifference(u64vec4, u64vec4);" + + "int addSaturate(int, int);" + "ivec2 addSaturate(ivec2, ivec2);" + "ivec3 addSaturate(ivec3, ivec3);" + "ivec4 addSaturate(ivec4, ivec4);" + + "int16_t addSaturate(int16_t, int16_t);" + "i16vec2 addSaturate(i16vec2, i16vec2);" + "i16vec3 addSaturate(i16vec3, i16vec3);" + "i16vec4 addSaturate(i16vec4, i16vec4);" + + "int64_t addSaturate(int64_t, int64_t);" + "i64vec2 addSaturate(i64vec2, i64vec2);" + "i64vec3 addSaturate(i64vec3, i64vec3);" + "i64vec4 addSaturate(i64vec4, i64vec4);" + + "uint addSaturate(uint, uint);" + "uvec2 addSaturate(uvec2, uvec2);" + "uvec3 addSaturate(uvec3, uvec3);" + "uvec4 addSaturate(uvec4, uvec4);" + + "uint16_t addSaturate(uint16_t, uint16_t);" + "u16vec2 addSaturate(u16vec2, u16vec2);" + "u16vec3 addSaturate(u16vec3, u16vec3);" + "u16vec4 addSaturate(u16vec4, u16vec4);" + + "uint64_t addSaturate(uint64_t, uint64_t);" + "u64vec2 addSaturate(u64vec2, u64vec2);" + "u64vec3 addSaturate(u64vec3, u64vec3);" + "u64vec4 addSaturate(u64vec4, u64vec4);" + + "int subtractSaturate(int, int);" + "ivec2 subtractSaturate(ivec2, ivec2);" + "ivec3 subtractSaturate(ivec3, ivec3);" + "ivec4 subtractSaturate(ivec4, ivec4);" + + "int16_t subtractSaturate(int16_t, int16_t);" + "i16vec2 subtractSaturate(i16vec2, i16vec2);" + "i16vec3 subtractSaturate(i16vec3, i16vec3);" + "i16vec4 subtractSaturate(i16vec4, i16vec4);" + + "int64_t subtractSaturate(int64_t, int64_t);" + "i64vec2 subtractSaturate(i64vec2, i64vec2);" + "i64vec3 subtractSaturate(i64vec3, i64vec3);" + "i64vec4 subtractSaturate(i64vec4, i64vec4);" + + "uint subtractSaturate(uint, uint);" + "uvec2 subtractSaturate(uvec2, uvec2);" + "uvec3 subtractSaturate(uvec3, uvec3);" + "uvec4 subtractSaturate(uvec4, uvec4);" + + "uint16_t subtractSaturate(uint16_t, uint16_t);" + "u16vec2 subtractSaturate(u16vec2, u16vec2);" + "u16vec3 subtractSaturate(u16vec3, u16vec3);" + "u16vec4 subtractSaturate(u16vec4, u16vec4);" + + "uint64_t subtractSaturate(uint64_t, uint64_t);" + "u64vec2 subtractSaturate(u64vec2, u64vec2);" + "u64vec3 subtractSaturate(u64vec3, u64vec3);" + "u64vec4 subtractSaturate(u64vec4, u64vec4);" + + "int average(int, int);" + "ivec2 average(ivec2, ivec2);" + "ivec3 average(ivec3, ivec3);" + "ivec4 average(ivec4, ivec4);" + + "int16_t average(int16_t, int16_t);" + "i16vec2 average(i16vec2, i16vec2);" + "i16vec3 average(i16vec3, i16vec3);" + "i16vec4 average(i16vec4, i16vec4);" + + "int64_t average(int64_t, int64_t);" + "i64vec2 average(i64vec2, i64vec2);" + "i64vec3 average(i64vec3, i64vec3);" + "i64vec4 average(i64vec4, i64vec4);" + + "uint average(uint, uint);" + "uvec2 average(uvec2, uvec2);" + "uvec3 average(uvec3, uvec3);" + "uvec4 average(uvec4, uvec4);" + + "uint16_t average(uint16_t, uint16_t);" + "u16vec2 average(u16vec2, u16vec2);" + "u16vec3 average(u16vec3, u16vec3);" + "u16vec4 average(u16vec4, u16vec4);" + + "uint64_t average(uint64_t, uint64_t);" + "u64vec2 average(u64vec2, u64vec2);" + "u64vec3 average(u64vec3, u64vec3);" + "u64vec4 average(u64vec4, u64vec4);" + + "int averageRounded(int, int);" + "ivec2 averageRounded(ivec2, ivec2);" + "ivec3 averageRounded(ivec3, ivec3);" + "ivec4 averageRounded(ivec4, ivec4);" + + "int16_t averageRounded(int16_t, int16_t);" + "i16vec2 averageRounded(i16vec2, i16vec2);" + "i16vec3 averageRounded(i16vec3, i16vec3);" + "i16vec4 averageRounded(i16vec4, i16vec4);" + + "int64_t averageRounded(int64_t, int64_t);" + "i64vec2 averageRounded(i64vec2, i64vec2);" + "i64vec3 averageRounded(i64vec3, i64vec3);" + "i64vec4 averageRounded(i64vec4, i64vec4);" + + "uint averageRounded(uint, uint);" + "uvec2 averageRounded(uvec2, uvec2);" + "uvec3 averageRounded(uvec3, uvec3);" + "uvec4 averageRounded(uvec4, uvec4);" + + "uint16_t averageRounded(uint16_t, uint16_t);" + "u16vec2 averageRounded(u16vec2, u16vec2);" + "u16vec3 averageRounded(u16vec3, u16vec3);" + "u16vec4 averageRounded(u16vec4, u16vec4);" + + "uint64_t averageRounded(uint64_t, uint64_t);" + "u64vec2 averageRounded(u64vec2, u64vec2);" + "u64vec3 averageRounded(u64vec3, u64vec3);" + "u64vec4 averageRounded(u64vec4, u64vec4);" + + "int multiply32x16(int, int);" + "ivec2 multiply32x16(ivec2, ivec2);" + "ivec3 multiply32x16(ivec3, ivec3);" + "ivec4 multiply32x16(ivec4, ivec4);" + + "uint multiply32x16(uint, uint);" + "uvec2 multiply32x16(uvec2, uvec2);" + "uvec3 multiply32x16(uvec3, uvec3);" + "uvec4 multiply32x16(uvec4, uvec4);" + "\n"); + } + + if ((profile != EEsProfile && version >= 450) || + (profile == EEsProfile && version >= 320)) { + commonBuiltins.append( + "struct gl_TextureFootprint2DNV {" + "uvec2 anchor;" + "uvec2 offset;" + "uvec2 mask;" + "uint lod;" + "uint granularity;" + "};" + + "struct gl_TextureFootprint3DNV {" + "uvec3 anchor;" + "uvec3 offset;" + "uvec2 mask;" + "uint lod;" + "uint granularity;" + "};" + "bool textureFootprintNV(sampler2D, vec2, int, bool, out gl_TextureFootprint2DNV);" + "bool textureFootprintNV(sampler3D, vec3, int, bool, out gl_TextureFootprint3DNV);" + "bool textureFootprintNV(sampler2D, vec2, int, bool, out gl_TextureFootprint2DNV, float);" + "bool textureFootprintNV(sampler3D, vec3, int, bool, out gl_TextureFootprint3DNV, float);" + "bool textureFootprintClampNV(sampler2D, vec2, float, int, bool, out gl_TextureFootprint2DNV);" + "bool textureFootprintClampNV(sampler3D, vec3, float, int, bool, out gl_TextureFootprint3DNV);" + "bool textureFootprintClampNV(sampler2D, vec2, float, int, bool, out gl_TextureFootprint2DNV, float);" + "bool textureFootprintClampNV(sampler3D, vec3, float, int, bool, out gl_TextureFootprint3DNV, float);" + "bool textureFootprintLodNV(sampler2D, vec2, float, int, bool, out gl_TextureFootprint2DNV);" + "bool textureFootprintLodNV(sampler3D, vec3, float, int, bool, out gl_TextureFootprint3DNV);" + "bool textureFootprintGradNV(sampler2D, vec2, vec2, vec2, int, bool, out gl_TextureFootprint2DNV);" + "bool textureFootprintGradClampNV(sampler2D, vec2, vec2, vec2, float, int, bool, out gl_TextureFootprint2DNV);" + "\n"); + } + + if ((profile == EEsProfile && version >= 300 && version < 310) || + (profile != EEsProfile && version >= 150 && version < 450)) { // GL_EXT_shader_integer_mix + commonBuiltins.append("int mix(int, int, bool);" + "ivec2 mix(ivec2, ivec2, bvec2);" + "ivec3 mix(ivec3, ivec3, bvec3);" + "ivec4 mix(ivec4, ivec4, bvec4);" + "uint mix(uint, uint, bool );" + "uvec2 mix(uvec2, uvec2, bvec2);" + "uvec3 mix(uvec3, uvec3, bvec3);" + "uvec4 mix(uvec4, uvec4, bvec4);" + "bool mix(bool, bool, bool );" + "bvec2 mix(bvec2, bvec2, bvec2);" + "bvec3 mix(bvec3, bvec3, bvec3);" + "bvec4 mix(bvec4, bvec4, bvec4);" + + "\n"); + } + + // GL_AMD_gpu_shader_half_float/Explicit types + if (profile != EEsProfile && version >= 450) { + commonBuiltins.append( + "float16_t radians(float16_t);" + "f16vec2 radians(f16vec2);" + "f16vec3 radians(f16vec3);" + "f16vec4 radians(f16vec4);" + + "float16_t degrees(float16_t);" + "f16vec2 degrees(f16vec2);" + "f16vec3 degrees(f16vec3);" + "f16vec4 degrees(f16vec4);" + + "float16_t sin(float16_t);" + "f16vec2 sin(f16vec2);" + "f16vec3 sin(f16vec3);" + "f16vec4 sin(f16vec4);" + + "float16_t cos(float16_t);" + "f16vec2 cos(f16vec2);" + "f16vec3 cos(f16vec3);" + "f16vec4 cos(f16vec4);" + + "float16_t tan(float16_t);" + "f16vec2 tan(f16vec2);" + "f16vec3 tan(f16vec3);" + "f16vec4 tan(f16vec4);" + + "float16_t asin(float16_t);" + "f16vec2 asin(f16vec2);" + "f16vec3 asin(f16vec3);" + "f16vec4 asin(f16vec4);" + + "float16_t acos(float16_t);" + "f16vec2 acos(f16vec2);" + "f16vec3 acos(f16vec3);" + "f16vec4 acos(f16vec4);" + + "float16_t atan(float16_t, float16_t);" + "f16vec2 atan(f16vec2, f16vec2);" + "f16vec3 atan(f16vec3, f16vec3);" + "f16vec4 atan(f16vec4, f16vec4);" + + "float16_t atan(float16_t);" + "f16vec2 atan(f16vec2);" + "f16vec3 atan(f16vec3);" + "f16vec4 atan(f16vec4);" + + "float16_t sinh(float16_t);" + "f16vec2 sinh(f16vec2);" + "f16vec3 sinh(f16vec3);" + "f16vec4 sinh(f16vec4);" + + "float16_t cosh(float16_t);" + "f16vec2 cosh(f16vec2);" + "f16vec3 cosh(f16vec3);" + "f16vec4 cosh(f16vec4);" + + "float16_t tanh(float16_t);" + "f16vec2 tanh(f16vec2);" + "f16vec3 tanh(f16vec3);" + "f16vec4 tanh(f16vec4);" + + "float16_t asinh(float16_t);" + "f16vec2 asinh(f16vec2);" + "f16vec3 asinh(f16vec3);" + "f16vec4 asinh(f16vec4);" + + "float16_t acosh(float16_t);" + "f16vec2 acosh(f16vec2);" + "f16vec3 acosh(f16vec3);" + "f16vec4 acosh(f16vec4);" + + "float16_t atanh(float16_t);" + "f16vec2 atanh(f16vec2);" + "f16vec3 atanh(f16vec3);" + "f16vec4 atanh(f16vec4);" + + "float16_t pow(float16_t, float16_t);" + "f16vec2 pow(f16vec2, f16vec2);" + "f16vec3 pow(f16vec3, f16vec3);" + "f16vec4 pow(f16vec4, f16vec4);" + + "float16_t exp(float16_t);" + "f16vec2 exp(f16vec2);" + "f16vec3 exp(f16vec3);" + "f16vec4 exp(f16vec4);" + + "float16_t log(float16_t);" + "f16vec2 log(f16vec2);" + "f16vec3 log(f16vec3);" + "f16vec4 log(f16vec4);" + + "float16_t exp2(float16_t);" + "f16vec2 exp2(f16vec2);" + "f16vec3 exp2(f16vec3);" + "f16vec4 exp2(f16vec4);" + + "float16_t log2(float16_t);" + "f16vec2 log2(f16vec2);" + "f16vec3 log2(f16vec3);" + "f16vec4 log2(f16vec4);" + + "float16_t sqrt(float16_t);" + "f16vec2 sqrt(f16vec2);" + "f16vec3 sqrt(f16vec3);" + "f16vec4 sqrt(f16vec4);" + + "float16_t inversesqrt(float16_t);" + "f16vec2 inversesqrt(f16vec2);" + "f16vec3 inversesqrt(f16vec3);" + "f16vec4 inversesqrt(f16vec4);" + + "float16_t abs(float16_t);" + "f16vec2 abs(f16vec2);" + "f16vec3 abs(f16vec3);" + "f16vec4 abs(f16vec4);" + + "float16_t sign(float16_t);" + "f16vec2 sign(f16vec2);" + "f16vec3 sign(f16vec3);" + "f16vec4 sign(f16vec4);" + + "float16_t floor(float16_t);" + "f16vec2 floor(f16vec2);" + "f16vec3 floor(f16vec3);" + "f16vec4 floor(f16vec4);" + + "float16_t trunc(float16_t);" + "f16vec2 trunc(f16vec2);" + "f16vec3 trunc(f16vec3);" + "f16vec4 trunc(f16vec4);" + + "float16_t round(float16_t);" + "f16vec2 round(f16vec2);" + "f16vec3 round(f16vec3);" + "f16vec4 round(f16vec4);" + + "float16_t roundEven(float16_t);" + "f16vec2 roundEven(f16vec2);" + "f16vec3 roundEven(f16vec3);" + "f16vec4 roundEven(f16vec4);" + + "float16_t ceil(float16_t);" + "f16vec2 ceil(f16vec2);" + "f16vec3 ceil(f16vec3);" + "f16vec4 ceil(f16vec4);" + + "float16_t fract(float16_t);" + "f16vec2 fract(f16vec2);" + "f16vec3 fract(f16vec3);" + "f16vec4 fract(f16vec4);" + + "float16_t mod(float16_t, float16_t);" + "f16vec2 mod(f16vec2, float16_t);" + "f16vec3 mod(f16vec3, float16_t);" + "f16vec4 mod(f16vec4, float16_t);" + "f16vec2 mod(f16vec2, f16vec2);" + "f16vec3 mod(f16vec3, f16vec3);" + "f16vec4 mod(f16vec4, f16vec4);" + + "float16_t modf(float16_t, out float16_t);" + "f16vec2 modf(f16vec2, out f16vec2);" + "f16vec3 modf(f16vec3, out f16vec3);" + "f16vec4 modf(f16vec4, out f16vec4);" + + "float16_t min(float16_t, float16_t);" + "f16vec2 min(f16vec2, float16_t);" + "f16vec3 min(f16vec3, float16_t);" + "f16vec4 min(f16vec4, float16_t);" + "f16vec2 min(f16vec2, f16vec2);" + "f16vec3 min(f16vec3, f16vec3);" + "f16vec4 min(f16vec4, f16vec4);" + + "float16_t max(float16_t, float16_t);" + "f16vec2 max(f16vec2, float16_t);" + "f16vec3 max(f16vec3, float16_t);" + "f16vec4 max(f16vec4, float16_t);" + "f16vec2 max(f16vec2, f16vec2);" + "f16vec3 max(f16vec3, f16vec3);" + "f16vec4 max(f16vec4, f16vec4);" + + "float16_t clamp(float16_t, float16_t, float16_t);" + "f16vec2 clamp(f16vec2, float16_t, float16_t);" + "f16vec3 clamp(f16vec3, float16_t, float16_t);" + "f16vec4 clamp(f16vec4, float16_t, float16_t);" + "f16vec2 clamp(f16vec2, f16vec2, f16vec2);" + "f16vec3 clamp(f16vec3, f16vec3, f16vec3);" + "f16vec4 clamp(f16vec4, f16vec4, f16vec4);" + + "float16_t mix(float16_t, float16_t, float16_t);" + "f16vec2 mix(f16vec2, f16vec2, float16_t);" + "f16vec3 mix(f16vec3, f16vec3, float16_t);" + "f16vec4 mix(f16vec4, f16vec4, float16_t);" + "f16vec2 mix(f16vec2, f16vec2, f16vec2);" + "f16vec3 mix(f16vec3, f16vec3, f16vec3);" + "f16vec4 mix(f16vec4, f16vec4, f16vec4);" + "float16_t mix(float16_t, float16_t, bool);" + "f16vec2 mix(f16vec2, f16vec2, bvec2);" + "f16vec3 mix(f16vec3, f16vec3, bvec3);" + "f16vec4 mix(f16vec4, f16vec4, bvec4);" + + "float16_t step(float16_t, float16_t);" + "f16vec2 step(f16vec2, f16vec2);" + "f16vec3 step(f16vec3, f16vec3);" + "f16vec4 step(f16vec4, f16vec4);" + "f16vec2 step(float16_t, f16vec2);" + "f16vec3 step(float16_t, f16vec3);" + "f16vec4 step(float16_t, f16vec4);" + + "float16_t smoothstep(float16_t, float16_t, float16_t);" + "f16vec2 smoothstep(f16vec2, f16vec2, f16vec2);" + "f16vec3 smoothstep(f16vec3, f16vec3, f16vec3);" + "f16vec4 smoothstep(f16vec4, f16vec4, f16vec4);" + "f16vec2 smoothstep(float16_t, float16_t, f16vec2);" + "f16vec3 smoothstep(float16_t, float16_t, f16vec3);" + "f16vec4 smoothstep(float16_t, float16_t, f16vec4);" + + "bool isnan(float16_t);" + "bvec2 isnan(f16vec2);" + "bvec3 isnan(f16vec3);" + "bvec4 isnan(f16vec4);" + + "bool isinf(float16_t);" + "bvec2 isinf(f16vec2);" + "bvec3 isinf(f16vec3);" + "bvec4 isinf(f16vec4);" + + "float16_t fma(float16_t, float16_t, float16_t);" + "f16vec2 fma(f16vec2, f16vec2, f16vec2);" + "f16vec3 fma(f16vec3, f16vec3, f16vec3);" + "f16vec4 fma(f16vec4, f16vec4, f16vec4);" + + "float16_t frexp(float16_t, out int);" + "f16vec2 frexp(f16vec2, out ivec2);" + "f16vec3 frexp(f16vec3, out ivec3);" + "f16vec4 frexp(f16vec4, out ivec4);" + + "float16_t ldexp(float16_t, in int);" + "f16vec2 ldexp(f16vec2, in ivec2);" + "f16vec3 ldexp(f16vec3, in ivec3);" + "f16vec4 ldexp(f16vec4, in ivec4);" + + "uint packFloat2x16(f16vec2);" + "f16vec2 unpackFloat2x16(uint);" + + "float16_t length(float16_t);" + "float16_t length(f16vec2);" + "float16_t length(f16vec3);" + "float16_t length(f16vec4);" + + "float16_t distance(float16_t, float16_t);" + "float16_t distance(f16vec2, f16vec2);" + "float16_t distance(f16vec3, f16vec3);" + "float16_t distance(f16vec4, f16vec4);" + + "float16_t dot(float16_t, float16_t);" + "float16_t dot(f16vec2, f16vec2);" + "float16_t dot(f16vec3, f16vec3);" + "float16_t dot(f16vec4, f16vec4);" + + "f16vec3 cross(f16vec3, f16vec3);" + + "float16_t normalize(float16_t);" + "f16vec2 normalize(f16vec2);" + "f16vec3 normalize(f16vec3);" + "f16vec4 normalize(f16vec4);" + + "float16_t faceforward(float16_t, float16_t, float16_t);" + "f16vec2 faceforward(f16vec2, f16vec2, f16vec2);" + "f16vec3 faceforward(f16vec3, f16vec3, f16vec3);" + "f16vec4 faceforward(f16vec4, f16vec4, f16vec4);" + + "float16_t reflect(float16_t, float16_t);" + "f16vec2 reflect(f16vec2, f16vec2);" + "f16vec3 reflect(f16vec3, f16vec3);" + "f16vec4 reflect(f16vec4, f16vec4);" + + "float16_t refract(float16_t, float16_t, float16_t);" + "f16vec2 refract(f16vec2, f16vec2, float16_t);" + "f16vec3 refract(f16vec3, f16vec3, float16_t);" + "f16vec4 refract(f16vec4, f16vec4, float16_t);" + + "f16mat2 matrixCompMult(f16mat2, f16mat2);" + "f16mat3 matrixCompMult(f16mat3, f16mat3);" + "f16mat4 matrixCompMult(f16mat4, f16mat4);" + "f16mat2x3 matrixCompMult(f16mat2x3, f16mat2x3);" + "f16mat2x4 matrixCompMult(f16mat2x4, f16mat2x4);" + "f16mat3x2 matrixCompMult(f16mat3x2, f16mat3x2);" + "f16mat3x4 matrixCompMult(f16mat3x4, f16mat3x4);" + "f16mat4x2 matrixCompMult(f16mat4x2, f16mat4x2);" + "f16mat4x3 matrixCompMult(f16mat4x3, f16mat4x3);" + + "f16mat2 outerProduct(f16vec2, f16vec2);" + "f16mat3 outerProduct(f16vec3, f16vec3);" + "f16mat4 outerProduct(f16vec4, f16vec4);" + "f16mat2x3 outerProduct(f16vec3, f16vec2);" + "f16mat3x2 outerProduct(f16vec2, f16vec3);" + "f16mat2x4 outerProduct(f16vec4, f16vec2);" + "f16mat4x2 outerProduct(f16vec2, f16vec4);" + "f16mat3x4 outerProduct(f16vec4, f16vec3);" + "f16mat4x3 outerProduct(f16vec3, f16vec4);" + + "f16mat2 transpose(f16mat2);" + "f16mat3 transpose(f16mat3);" + "f16mat4 transpose(f16mat4);" + "f16mat2x3 transpose(f16mat3x2);" + "f16mat3x2 transpose(f16mat2x3);" + "f16mat2x4 transpose(f16mat4x2);" + "f16mat4x2 transpose(f16mat2x4);" + "f16mat3x4 transpose(f16mat4x3);" + "f16mat4x3 transpose(f16mat3x4);" + + "float16_t determinant(f16mat2);" + "float16_t determinant(f16mat3);" + "float16_t determinant(f16mat4);" + + "f16mat2 inverse(f16mat2);" + "f16mat3 inverse(f16mat3);" + "f16mat4 inverse(f16mat4);" + + "bvec2 lessThan(f16vec2, f16vec2);" + "bvec3 lessThan(f16vec3, f16vec3);" + "bvec4 lessThan(f16vec4, f16vec4);" + + "bvec2 lessThanEqual(f16vec2, f16vec2);" + "bvec3 lessThanEqual(f16vec3, f16vec3);" + "bvec4 lessThanEqual(f16vec4, f16vec4);" + + "bvec2 greaterThan(f16vec2, f16vec2);" + "bvec3 greaterThan(f16vec3, f16vec3);" + "bvec4 greaterThan(f16vec4, f16vec4);" + + "bvec2 greaterThanEqual(f16vec2, f16vec2);" + "bvec3 greaterThanEqual(f16vec3, f16vec3);" + "bvec4 greaterThanEqual(f16vec4, f16vec4);" + + "bvec2 equal(f16vec2, f16vec2);" + "bvec3 equal(f16vec3, f16vec3);" + "bvec4 equal(f16vec4, f16vec4);" + + "bvec2 notEqual(f16vec2, f16vec2);" + "bvec3 notEqual(f16vec3, f16vec3);" + "bvec4 notEqual(f16vec4, f16vec4);" + + "\n"); + } + + // Explicit types + if (profile != EEsProfile && version >= 450) { + commonBuiltins.append( + "int8_t abs(int8_t);" + "i8vec2 abs(i8vec2);" + "i8vec3 abs(i8vec3);" + "i8vec4 abs(i8vec4);" + + "int8_t sign(int8_t);" + "i8vec2 sign(i8vec2);" + "i8vec3 sign(i8vec3);" + "i8vec4 sign(i8vec4);" + + "int8_t min(int8_t x, int8_t y);" + "i8vec2 min(i8vec2 x, int8_t y);" + "i8vec3 min(i8vec3 x, int8_t y);" + "i8vec4 min(i8vec4 x, int8_t y);" + "i8vec2 min(i8vec2 x, i8vec2 y);" + "i8vec3 min(i8vec3 x, i8vec3 y);" + "i8vec4 min(i8vec4 x, i8vec4 y);" + + "uint8_t min(uint8_t x, uint8_t y);" + "u8vec2 min(u8vec2 x, uint8_t y);" + "u8vec3 min(u8vec3 x, uint8_t y);" + "u8vec4 min(u8vec4 x, uint8_t y);" + "u8vec2 min(u8vec2 x, u8vec2 y);" + "u8vec3 min(u8vec3 x, u8vec3 y);" + "u8vec4 min(u8vec4 x, u8vec4 y);" + + "int8_t max(int8_t x, int8_t y);" + "i8vec2 max(i8vec2 x, int8_t y);" + "i8vec3 max(i8vec3 x, int8_t y);" + "i8vec4 max(i8vec4 x, int8_t y);" + "i8vec2 max(i8vec2 x, i8vec2 y);" + "i8vec3 max(i8vec3 x, i8vec3 y);" + "i8vec4 max(i8vec4 x, i8vec4 y);" + + "uint8_t max(uint8_t x, uint8_t y);" + "u8vec2 max(u8vec2 x, uint8_t y);" + "u8vec3 max(u8vec3 x, uint8_t y);" + "u8vec4 max(u8vec4 x, uint8_t y);" + "u8vec2 max(u8vec2 x, u8vec2 y);" + "u8vec3 max(u8vec3 x, u8vec3 y);" + "u8vec4 max(u8vec4 x, u8vec4 y);" + + "int8_t clamp(int8_t x, int8_t minVal, int8_t maxVal);" + "i8vec2 clamp(i8vec2 x, int8_t minVal, int8_t maxVal);" + "i8vec3 clamp(i8vec3 x, int8_t minVal, int8_t maxVal);" + "i8vec4 clamp(i8vec4 x, int8_t minVal, int8_t maxVal);" + "i8vec2 clamp(i8vec2 x, i8vec2 minVal, i8vec2 maxVal);" + "i8vec3 clamp(i8vec3 x, i8vec3 minVal, i8vec3 maxVal);" + "i8vec4 clamp(i8vec4 x, i8vec4 minVal, i8vec4 maxVal);" + + "uint8_t clamp(uint8_t x, uint8_t minVal, uint8_t maxVal);" + "u8vec2 clamp(u8vec2 x, uint8_t minVal, uint8_t maxVal);" + "u8vec3 clamp(u8vec3 x, uint8_t minVal, uint8_t maxVal);" + "u8vec4 clamp(u8vec4 x, uint8_t minVal, uint8_t maxVal);" + "u8vec2 clamp(u8vec2 x, u8vec2 minVal, u8vec2 maxVal);" + "u8vec3 clamp(u8vec3 x, u8vec3 minVal, u8vec3 maxVal);" + "u8vec4 clamp(u8vec4 x, u8vec4 minVal, u8vec4 maxVal);" + + "int8_t mix(int8_t, int8_t, bool);" + "i8vec2 mix(i8vec2, i8vec2, bvec2);" + "i8vec3 mix(i8vec3, i8vec3, bvec3);" + "i8vec4 mix(i8vec4, i8vec4, bvec4);" + "uint8_t mix(uint8_t, uint8_t, bool);" + "u8vec2 mix(u8vec2, u8vec2, bvec2);" + "u8vec3 mix(u8vec3, u8vec3, bvec3);" + "u8vec4 mix(u8vec4, u8vec4, bvec4);" + + "bvec2 lessThan(i8vec2, i8vec2);" + "bvec3 lessThan(i8vec3, i8vec3);" + "bvec4 lessThan(i8vec4, i8vec4);" + "bvec2 lessThan(u8vec2, u8vec2);" + "bvec3 lessThan(u8vec3, u8vec3);" + "bvec4 lessThan(u8vec4, u8vec4);" + + "bvec2 lessThanEqual(i8vec2, i8vec2);" + "bvec3 lessThanEqual(i8vec3, i8vec3);" + "bvec4 lessThanEqual(i8vec4, i8vec4);" + "bvec2 lessThanEqual(u8vec2, u8vec2);" + "bvec3 lessThanEqual(u8vec3, u8vec3);" + "bvec4 lessThanEqual(u8vec4, u8vec4);" + + "bvec2 greaterThan(i8vec2, i8vec2);" + "bvec3 greaterThan(i8vec3, i8vec3);" + "bvec4 greaterThan(i8vec4, i8vec4);" + "bvec2 greaterThan(u8vec2, u8vec2);" + "bvec3 greaterThan(u8vec3, u8vec3);" + "bvec4 greaterThan(u8vec4, u8vec4);" + + "bvec2 greaterThanEqual(i8vec2, i8vec2);" + "bvec3 greaterThanEqual(i8vec3, i8vec3);" + "bvec4 greaterThanEqual(i8vec4, i8vec4);" + "bvec2 greaterThanEqual(u8vec2, u8vec2);" + "bvec3 greaterThanEqual(u8vec3, u8vec3);" + "bvec4 greaterThanEqual(u8vec4, u8vec4);" + + "bvec2 equal(i8vec2, i8vec2);" + "bvec3 equal(i8vec3, i8vec3);" + "bvec4 equal(i8vec4, i8vec4);" + "bvec2 equal(u8vec2, u8vec2);" + "bvec3 equal(u8vec3, u8vec3);" + "bvec4 equal(u8vec4, u8vec4);" + + "bvec2 notEqual(i8vec2, i8vec2);" + "bvec3 notEqual(i8vec3, i8vec3);" + "bvec4 notEqual(i8vec4, i8vec4);" + "bvec2 notEqual(u8vec2, u8vec2);" + "bvec3 notEqual(u8vec3, u8vec3);" + "bvec4 notEqual(u8vec4, u8vec4);" + + " int8_t bitfieldExtract( int8_t, int8_t, int8_t);" + "i8vec2 bitfieldExtract(i8vec2, int8_t, int8_t);" + "i8vec3 bitfieldExtract(i8vec3, int8_t, int8_t);" + "i8vec4 bitfieldExtract(i8vec4, int8_t, int8_t);" + + " uint8_t bitfieldExtract( uint8_t, int8_t, int8_t);" + "u8vec2 bitfieldExtract(u8vec2, int8_t, int8_t);" + "u8vec3 bitfieldExtract(u8vec3, int8_t, int8_t);" + "u8vec4 bitfieldExtract(u8vec4, int8_t, int8_t);" + + " int8_t bitfieldInsert( int8_t base, int8_t, int8_t, int8_t);" + "i8vec2 bitfieldInsert(i8vec2 base, i8vec2, int8_t, int8_t);" + "i8vec3 bitfieldInsert(i8vec3 base, i8vec3, int8_t, int8_t);" + "i8vec4 bitfieldInsert(i8vec4 base, i8vec4, int8_t, int8_t);" + + " uint8_t bitfieldInsert( uint8_t base, uint8_t, int8_t, int8_t);" + "u8vec2 bitfieldInsert(u8vec2 base, u8vec2, int8_t, int8_t);" + "u8vec3 bitfieldInsert(u8vec3 base, u8vec3, int8_t, int8_t);" + "u8vec4 bitfieldInsert(u8vec4 base, u8vec4, int8_t, int8_t);" + + " int8_t bitCount( int8_t);" + "i8vec2 bitCount(i8vec2);" + "i8vec3 bitCount(i8vec3);" + "i8vec4 bitCount(i8vec4);" + + " int8_t bitCount( uint8_t);" + "i8vec2 bitCount(u8vec2);" + "i8vec3 bitCount(u8vec3);" + "i8vec4 bitCount(u8vec4);" + + " int8_t findLSB( int8_t);" + "i8vec2 findLSB(i8vec2);" + "i8vec3 findLSB(i8vec3);" + "i8vec4 findLSB(i8vec4);" + + " int8_t findLSB( uint8_t);" + "i8vec2 findLSB(u8vec2);" + "i8vec3 findLSB(u8vec3);" + "i8vec4 findLSB(u8vec4);" + + " int8_t findMSB( int8_t);" + "i8vec2 findMSB(i8vec2);" + "i8vec3 findMSB(i8vec3);" + "i8vec4 findMSB(i8vec4);" + + " int8_t findMSB( uint8_t);" + "i8vec2 findMSB(u8vec2);" + "i8vec3 findMSB(u8vec3);" + "i8vec4 findMSB(u8vec4);" + + "int16_t abs(int16_t);" + "i16vec2 abs(i16vec2);" + "i16vec3 abs(i16vec3);" + "i16vec4 abs(i16vec4);" + + "int16_t sign(int16_t);" + "i16vec2 sign(i16vec2);" + "i16vec3 sign(i16vec3);" + "i16vec4 sign(i16vec4);" + + "int16_t min(int16_t x, int16_t y);" + "i16vec2 min(i16vec2 x, int16_t y);" + "i16vec3 min(i16vec3 x, int16_t y);" + "i16vec4 min(i16vec4 x, int16_t y);" + "i16vec2 min(i16vec2 x, i16vec2 y);" + "i16vec3 min(i16vec3 x, i16vec3 y);" + "i16vec4 min(i16vec4 x, i16vec4 y);" + + "uint16_t min(uint16_t x, uint16_t y);" + "u16vec2 min(u16vec2 x, uint16_t y);" + "u16vec3 min(u16vec3 x, uint16_t y);" + "u16vec4 min(u16vec4 x, uint16_t y);" + "u16vec2 min(u16vec2 x, u16vec2 y);" + "u16vec3 min(u16vec3 x, u16vec3 y);" + "u16vec4 min(u16vec4 x, u16vec4 y);" + + "int16_t max(int16_t x, int16_t y);" + "i16vec2 max(i16vec2 x, int16_t y);" + "i16vec3 max(i16vec3 x, int16_t y);" + "i16vec4 max(i16vec4 x, int16_t y);" + "i16vec2 max(i16vec2 x, i16vec2 y);" + "i16vec3 max(i16vec3 x, i16vec3 y);" + "i16vec4 max(i16vec4 x, i16vec4 y);" + + "uint16_t max(uint16_t x, uint16_t y);" + "u16vec2 max(u16vec2 x, uint16_t y);" + "u16vec3 max(u16vec3 x, uint16_t y);" + "u16vec4 max(u16vec4 x, uint16_t y);" + "u16vec2 max(u16vec2 x, u16vec2 y);" + "u16vec3 max(u16vec3 x, u16vec3 y);" + "u16vec4 max(u16vec4 x, u16vec4 y);" + + "int16_t clamp(int16_t x, int16_t minVal, int16_t maxVal);" + "i16vec2 clamp(i16vec2 x, int16_t minVal, int16_t maxVal);" + "i16vec3 clamp(i16vec3 x, int16_t minVal, int16_t maxVal);" + "i16vec4 clamp(i16vec4 x, int16_t minVal, int16_t maxVal);" + "i16vec2 clamp(i16vec2 x, i16vec2 minVal, i16vec2 maxVal);" + "i16vec3 clamp(i16vec3 x, i16vec3 minVal, i16vec3 maxVal);" + "i16vec4 clamp(i16vec4 x, i16vec4 minVal, i16vec4 maxVal);" + + "uint16_t clamp(uint16_t x, uint16_t minVal, uint16_t maxVal);" + "u16vec2 clamp(u16vec2 x, uint16_t minVal, uint16_t maxVal);" + "u16vec3 clamp(u16vec3 x, uint16_t minVal, uint16_t maxVal);" + "u16vec4 clamp(u16vec4 x, uint16_t minVal, uint16_t maxVal);" + "u16vec2 clamp(u16vec2 x, u16vec2 minVal, u16vec2 maxVal);" + "u16vec3 clamp(u16vec3 x, u16vec3 minVal, u16vec3 maxVal);" + "u16vec4 clamp(u16vec4 x, u16vec4 minVal, u16vec4 maxVal);" + + "int16_t mix(int16_t, int16_t, bool);" + "i16vec2 mix(i16vec2, i16vec2, bvec2);" + "i16vec3 mix(i16vec3, i16vec3, bvec3);" + "i16vec4 mix(i16vec4, i16vec4, bvec4);" + "uint16_t mix(uint16_t, uint16_t, bool);" + "u16vec2 mix(u16vec2, u16vec2, bvec2);" + "u16vec3 mix(u16vec3, u16vec3, bvec3);" + "u16vec4 mix(u16vec4, u16vec4, bvec4);" + + "float16_t frexp(float16_t, out int16_t);" + "f16vec2 frexp(f16vec2, out i16vec2);" + "f16vec3 frexp(f16vec3, out i16vec3);" + "f16vec4 frexp(f16vec4, out i16vec4);" + + "float16_t ldexp(float16_t, int16_t);" + "f16vec2 ldexp(f16vec2, i16vec2);" + "f16vec3 ldexp(f16vec3, i16vec3);" + "f16vec4 ldexp(f16vec4, i16vec4);" + + "int16_t halfBitsToInt16(float16_t);" + "i16vec2 halfBitsToInt16(f16vec2);" + "i16vec3 halhBitsToInt16(f16vec3);" + "i16vec4 halfBitsToInt16(f16vec4);" + + "uint16_t halfBitsToUint16(float16_t);" + "u16vec2 halfBitsToUint16(f16vec2);" + "u16vec3 halfBitsToUint16(f16vec3);" + "u16vec4 halfBitsToUint16(f16vec4);" + + "int16_t float16BitsToInt16(float16_t);" + "i16vec2 float16BitsToInt16(f16vec2);" + "i16vec3 float16BitsToInt16(f16vec3);" + "i16vec4 float16BitsToInt16(f16vec4);" + + "uint16_t float16BitsToUint16(float16_t);" + "u16vec2 float16BitsToUint16(f16vec2);" + "u16vec3 float16BitsToUint16(f16vec3);" + "u16vec4 float16BitsToUint16(f16vec4);" + + "float16_t int16BitsToFloat16(int16_t);" + "f16vec2 int16BitsToFloat16(i16vec2);" + "f16vec3 int16BitsToFloat16(i16vec3);" + "f16vec4 int16BitsToFloat16(i16vec4);" + + "float16_t uint16BitsToFloat16(uint16_t);" + "f16vec2 uint16BitsToFloat16(u16vec2);" + "f16vec3 uint16BitsToFloat16(u16vec3);" + "f16vec4 uint16BitsToFloat16(u16vec4);" + + "float16_t int16BitsToHalf(int16_t);" + "f16vec2 int16BitsToHalf(i16vec2);" + "f16vec3 int16BitsToHalf(i16vec3);" + "f16vec4 int16BitsToHalf(i16vec4);" + + "float16_t uint16BitsToHalf(uint16_t);" + "f16vec2 uint16BitsToHalf(u16vec2);" + "f16vec3 uint16BitsToHalf(u16vec3);" + "f16vec4 uint16BitsToHalf(u16vec4);" + + "int packInt2x16(i16vec2);" + "uint packUint2x16(u16vec2);" + "int64_t packInt4x16(i16vec4);" + "uint64_t packUint4x16(u16vec4);" + "i16vec2 unpackInt2x16(int);" + "u16vec2 unpackUint2x16(uint);" + "i16vec4 unpackInt4x16(int64_t);" + "u16vec4 unpackUint4x16(uint64_t);" + + "bvec2 lessThan(i16vec2, i16vec2);" + "bvec3 lessThan(i16vec3, i16vec3);" + "bvec4 lessThan(i16vec4, i16vec4);" + "bvec2 lessThan(u16vec2, u16vec2);" + "bvec3 lessThan(u16vec3, u16vec3);" + "bvec4 lessThan(u16vec4, u16vec4);" + + "bvec2 lessThanEqual(i16vec2, i16vec2);" + "bvec3 lessThanEqual(i16vec3, i16vec3);" + "bvec4 lessThanEqual(i16vec4, i16vec4);" + "bvec2 lessThanEqual(u16vec2, u16vec2);" + "bvec3 lessThanEqual(u16vec3, u16vec3);" + "bvec4 lessThanEqual(u16vec4, u16vec4);" + + "bvec2 greaterThan(i16vec2, i16vec2);" + "bvec3 greaterThan(i16vec3, i16vec3);" + "bvec4 greaterThan(i16vec4, i16vec4);" + "bvec2 greaterThan(u16vec2, u16vec2);" + "bvec3 greaterThan(u16vec3, u16vec3);" + "bvec4 greaterThan(u16vec4, u16vec4);" + + "bvec2 greaterThanEqual(i16vec2, i16vec2);" + "bvec3 greaterThanEqual(i16vec3, i16vec3);" + "bvec4 greaterThanEqual(i16vec4, i16vec4);" + "bvec2 greaterThanEqual(u16vec2, u16vec2);" + "bvec3 greaterThanEqual(u16vec3, u16vec3);" + "bvec4 greaterThanEqual(u16vec4, u16vec4);" + + "bvec2 equal(i16vec2, i16vec2);" + "bvec3 equal(i16vec3, i16vec3);" + "bvec4 equal(i16vec4, i16vec4);" + "bvec2 equal(u16vec2, u16vec2);" + "bvec3 equal(u16vec3, u16vec3);" + "bvec4 equal(u16vec4, u16vec4);" + + "bvec2 notEqual(i16vec2, i16vec2);" + "bvec3 notEqual(i16vec3, i16vec3);" + "bvec4 notEqual(i16vec4, i16vec4);" + "bvec2 notEqual(u16vec2, u16vec2);" + "bvec3 notEqual(u16vec3, u16vec3);" + "bvec4 notEqual(u16vec4, u16vec4);" + + " int16_t bitfieldExtract( int16_t, int16_t, int16_t);" + "i16vec2 bitfieldExtract(i16vec2, int16_t, int16_t);" + "i16vec3 bitfieldExtract(i16vec3, int16_t, int16_t);" + "i16vec4 bitfieldExtract(i16vec4, int16_t, int16_t);" + + " uint16_t bitfieldExtract( uint16_t, int16_t, int16_t);" + "u16vec2 bitfieldExtract(u16vec2, int16_t, int16_t);" + "u16vec3 bitfieldExtract(u16vec3, int16_t, int16_t);" + "u16vec4 bitfieldExtract(u16vec4, int16_t, int16_t);" + + " int16_t bitfieldInsert( int16_t base, int16_t, int16_t, int16_t);" + "i16vec2 bitfieldInsert(i16vec2 base, i16vec2, int16_t, int16_t);" + "i16vec3 bitfieldInsert(i16vec3 base, i16vec3, int16_t, int16_t);" + "i16vec4 bitfieldInsert(i16vec4 base, i16vec4, int16_t, int16_t);" + + " uint16_t bitfieldInsert( uint16_t base, uint16_t, int16_t, int16_t);" + "u16vec2 bitfieldInsert(u16vec2 base, u16vec2, int16_t, int16_t);" + "u16vec3 bitfieldInsert(u16vec3 base, u16vec3, int16_t, int16_t);" + "u16vec4 bitfieldInsert(u16vec4 base, u16vec4, int16_t, int16_t);" + + " int16_t bitCount( int16_t);" + "i16vec2 bitCount(i16vec2);" + "i16vec3 bitCount(i16vec3);" + "i16vec4 bitCount(i16vec4);" + + " int16_t bitCount( uint16_t);" + "i16vec2 bitCount(u16vec2);" + "i16vec3 bitCount(u16vec3);" + "i16vec4 bitCount(u16vec4);" + + " int16_t findLSB( int16_t);" + "i16vec2 findLSB(i16vec2);" + "i16vec3 findLSB(i16vec3);" + "i16vec4 findLSB(i16vec4);" + + " int16_t findLSB( uint16_t);" + "i16vec2 findLSB(u16vec2);" + "i16vec3 findLSB(u16vec3);" + "i16vec4 findLSB(u16vec4);" + + " int16_t findMSB( int16_t);" + "i16vec2 findMSB(i16vec2);" + "i16vec3 findMSB(i16vec3);" + "i16vec4 findMSB(i16vec4);" + + " int16_t findMSB( uint16_t);" + "i16vec2 findMSB(u16vec2);" + "i16vec3 findMSB(u16vec3);" + "i16vec4 findMSB(u16vec4);" + + "int16_t pack16(i8vec2);" + "uint16_t pack16(u8vec2);" + "int32_t pack32(i8vec4);" + "uint32_t pack32(u8vec4);" + "int32_t pack32(i16vec2);" + "uint32_t pack32(u16vec2);" + "int64_t pack64(i16vec4);" + "uint64_t pack64(u16vec4);" + "int64_t pack64(i32vec2);" + "uint64_t pack64(u32vec2);" + + "i8vec2 unpack8(int16_t);" + "u8vec2 unpack8(uint16_t);" + "i8vec4 unpack8(int32_t);" + "u8vec4 unpack8(uint32_t);" + "i16vec2 unpack16(int32_t);" + "u16vec2 unpack16(uint32_t);" + "i16vec4 unpack16(int64_t);" + "u16vec4 unpack16(uint64_t);" + "i32vec2 unpack32(int64_t);" + "u32vec2 unpack32(uint64_t);" + + "float64_t radians(float64_t);" + "f64vec2 radians(f64vec2);" + "f64vec3 radians(f64vec3);" + "f64vec4 radians(f64vec4);" + + "float64_t degrees(float64_t);" + "f64vec2 degrees(f64vec2);" + "f64vec3 degrees(f64vec3);" + "f64vec4 degrees(f64vec4);" + + "float64_t sin(float64_t);" + "f64vec2 sin(f64vec2);" + "f64vec3 sin(f64vec3);" + "f64vec4 sin(f64vec4);" + + "float64_t cos(float64_t);" + "f64vec2 cos(f64vec2);" + "f64vec3 cos(f64vec3);" + "f64vec4 cos(f64vec4);" + + "float64_t tan(float64_t);" + "f64vec2 tan(f64vec2);" + "f64vec3 tan(f64vec3);" + "f64vec4 tan(f64vec4);" + + "float64_t asin(float64_t);" + "f64vec2 asin(f64vec2);" + "f64vec3 asin(f64vec3);" + "f64vec4 asin(f64vec4);" + + "float64_t acos(float64_t);" + "f64vec2 acos(f64vec2);" + "f64vec3 acos(f64vec3);" + "f64vec4 acos(f64vec4);" + + "float64_t atan(float64_t, float64_t);" + "f64vec2 atan(f64vec2, f64vec2);" + "f64vec3 atan(f64vec3, f64vec3);" + "f64vec4 atan(f64vec4, f64vec4);" + + "float64_t atan(float64_t);" + "f64vec2 atan(f64vec2);" + "f64vec3 atan(f64vec3);" + "f64vec4 atan(f64vec4);" + + "float64_t sinh(float64_t);" + "f64vec2 sinh(f64vec2);" + "f64vec3 sinh(f64vec3);" + "f64vec4 sinh(f64vec4);" + + "float64_t cosh(float64_t);" + "f64vec2 cosh(f64vec2);" + "f64vec3 cosh(f64vec3);" + "f64vec4 cosh(f64vec4);" + + "float64_t tanh(float64_t);" + "f64vec2 tanh(f64vec2);" + "f64vec3 tanh(f64vec3);" + "f64vec4 tanh(f64vec4);" + + "float64_t asinh(float64_t);" + "f64vec2 asinh(f64vec2);" + "f64vec3 asinh(f64vec3);" + "f64vec4 asinh(f64vec4);" + + "float64_t acosh(float64_t);" + "f64vec2 acosh(f64vec2);" + "f64vec3 acosh(f64vec3);" + "f64vec4 acosh(f64vec4);" + + "float64_t atanh(float64_t);" + "f64vec2 atanh(f64vec2);" + "f64vec3 atanh(f64vec3);" + "f64vec4 atanh(f64vec4);" + + "float64_t pow(float64_t, float64_t);" + "f64vec2 pow(f64vec2, f64vec2);" + "f64vec3 pow(f64vec3, f64vec3);" + "f64vec4 pow(f64vec4, f64vec4);" + + "float64_t exp(float64_t);" + "f64vec2 exp(f64vec2);" + "f64vec3 exp(f64vec3);" + "f64vec4 exp(f64vec4);" + + "float64_t log(float64_t);" + "f64vec2 log(f64vec2);" + "f64vec3 log(f64vec3);" + "f64vec4 log(f64vec4);" + + "float64_t exp2(float64_t);" + "f64vec2 exp2(f64vec2);" + "f64vec3 exp2(f64vec3);" + "f64vec4 exp2(f64vec4);" + + "float64_t log2(float64_t);" + "f64vec2 log2(f64vec2);" + "f64vec3 log2(f64vec3);" + "f64vec4 log2(f64vec4);" + "\n"); + } + if (profile != EEsProfile && version >= 450) { + stageBuiltins[EShLangFragment].append(derivativesAndControl64bits); + stageBuiltins[EShLangFragment].append( + "float64_t interpolateAtCentroid(float64_t);" + "f64vec2 interpolateAtCentroid(f64vec2);" + "f64vec3 interpolateAtCentroid(f64vec3);" + "f64vec4 interpolateAtCentroid(f64vec4);" + + "float64_t interpolateAtSample(float64_t, int);" + "f64vec2 interpolateAtSample(f64vec2, int);" + "f64vec3 interpolateAtSample(f64vec3, int);" + "f64vec4 interpolateAtSample(f64vec4, int);" + + "float64_t interpolateAtOffset(float64_t, f64vec2);" + "f64vec2 interpolateAtOffset(f64vec2, f64vec2);" + "f64vec3 interpolateAtOffset(f64vec3, f64vec2);" + "f64vec4 interpolateAtOffset(f64vec4, f64vec2);" + + "\n"); + + } + + //============================================================================ + // + // Prototypes for built-in functions seen by vertex shaders only. + // (Except legacy lod functions, where it depends which release they are + // vertex only.) + // + //============================================================================ + + // + // Geometric Functions. + // + if (spvVersion.vulkan == 0 && IncludeLegacy(version, profile, spvVersion)) + stageBuiltins[EShLangVertex].append("vec4 ftransform();"); + + // + // Original-style texture Functions with lod. + // + TString* s; + if (version == 100) + s = &stageBuiltins[EShLangVertex]; + else + s = &commonBuiltins; + if ((profile == EEsProfile && version == 100) || + profile == ECompatibilityProfile || + (profile == ECoreProfile && version < 420) || + profile == ENoProfile) { + if (spvVersion.spv == 0) { + s->append( + "vec4 texture2DLod(sampler2D, vec2, float);" // GL_ARB_shader_texture_lod + "vec4 texture2DProjLod(sampler2D, vec3, float);" // GL_ARB_shader_texture_lod + "vec4 texture2DProjLod(sampler2D, vec4, float);" // GL_ARB_shader_texture_lod + "vec4 texture3DLod(sampler3D, vec3, float);" // GL_ARB_shader_texture_lod // OES_texture_3D, but caught by keyword check + "vec4 texture3DProjLod(sampler3D, vec4, float);" // GL_ARB_shader_texture_lod // OES_texture_3D, but caught by keyword check + "vec4 textureCubeLod(samplerCube, vec3, float);" // GL_ARB_shader_texture_lod + + "\n"); + } + } + if ( profile == ECompatibilityProfile || + (profile == ECoreProfile && version < 420) || + profile == ENoProfile) { + if (spvVersion.spv == 0) { + s->append( + "vec4 texture1DLod(sampler1D, float, float);" // GL_ARB_shader_texture_lod + "vec4 texture1DProjLod(sampler1D, vec2, float);" // GL_ARB_shader_texture_lod + "vec4 texture1DProjLod(sampler1D, vec4, float);" // GL_ARB_shader_texture_lod + "vec4 shadow1DLod(sampler1DShadow, vec3, float);" // GL_ARB_shader_texture_lod + "vec4 shadow2DLod(sampler2DShadow, vec3, float);" // GL_ARB_shader_texture_lod + "vec4 shadow1DProjLod(sampler1DShadow, vec4, float);" // GL_ARB_shader_texture_lod + "vec4 shadow2DProjLod(sampler2DShadow, vec4, float);" // GL_ARB_shader_texture_lod + + "vec4 texture1DGradARB(sampler1D, float, float, float);" // GL_ARB_shader_texture_lod + "vec4 texture1DProjGradARB(sampler1D, vec2, float, float);" // GL_ARB_shader_texture_lod + "vec4 texture1DProjGradARB(sampler1D, vec4, float, float);" // GL_ARB_shader_texture_lod + "vec4 texture2DGradARB(sampler2D, vec2, vec2, vec2);" // GL_ARB_shader_texture_lod + "vec4 texture2DProjGradARB(sampler2D, vec3, vec2, vec2);" // GL_ARB_shader_texture_lod + "vec4 texture2DProjGradARB(sampler2D, vec4, vec2, vec2);" // GL_ARB_shader_texture_lod + "vec4 texture3DGradARB(sampler3D, vec3, vec3, vec3);" // GL_ARB_shader_texture_lod + "vec4 texture3DProjGradARB(sampler3D, vec4, vec3, vec3);" // GL_ARB_shader_texture_lod + "vec4 textureCubeGradARB(samplerCube, vec3, vec3, vec3);" // GL_ARB_shader_texture_lod + "vec4 shadow1DGradARB(sampler1DShadow, vec3, float, float);" // GL_ARB_shader_texture_lod + "vec4 shadow1DProjGradARB( sampler1DShadow, vec4, float, float);" // GL_ARB_shader_texture_lod + "vec4 shadow2DGradARB(sampler2DShadow, vec3, vec2, vec2);" // GL_ARB_shader_texture_lod + "vec4 shadow2DProjGradARB( sampler2DShadow, vec4, vec2, vec2);" // GL_ARB_shader_texture_lod + "vec4 texture2DRectGradARB(sampler2DRect, vec2, vec2, vec2);" // GL_ARB_shader_texture_lod + "vec4 texture2DRectProjGradARB( sampler2DRect, vec3, vec2, vec2);" // GL_ARB_shader_texture_lod + "vec4 texture2DRectProjGradARB( sampler2DRect, vec4, vec2, vec2);" // GL_ARB_shader_texture_lod + "vec4 shadow2DRectGradARB( sampler2DRectShadow, vec3, vec2, vec2);" // GL_ARB_shader_texture_lod + "vec4 shadow2DRectProjGradARB(sampler2DRectShadow, vec4, vec2, vec2);" // GL_ARB_shader_texture_lod + + "\n"); + } + } + + if ((profile != EEsProfile && version >= 150) || + (profile == EEsProfile && version >= 310)) { + //============================================================================ + // + // Prototypes for built-in functions seen by geometry shaders only. + // + //============================================================================ + + if (profile != EEsProfile && version >= 400) { + stageBuiltins[EShLangGeometry].append( + "void EmitStreamVertex(int);" + "void EndStreamPrimitive(int);" + ); + } + stageBuiltins[EShLangGeometry].append( + "void EmitVertex();" + "void EndPrimitive();" + "\n"); + } +#endif + + //============================================================================ + // + // Prototypes for all control functions. + // + //============================================================================ + bool esBarrier = (profile == EEsProfile && version >= 310); + if ((profile != EEsProfile && version >= 150) || esBarrier) + stageBuiltins[EShLangTessControl].append( + "void barrier();" + ); + if ((profile != EEsProfile && version >= 420) || esBarrier) + stageBuiltins[EShLangCompute].append( + "void barrier();" + ); + if ((profile != EEsProfile && version >= 450) || (profile == EEsProfile && version >= 320)) { + stageBuiltins[EShLangMeshNV].append( + "void barrier();" + ); + stageBuiltins[EShLangTaskNV].append( + "void barrier();" + ); + } + if ((profile != EEsProfile && version >= 130) || esBarrier) + commonBuiltins.append( + "void memoryBarrier();" + ); + if ((profile != EEsProfile && version >= 420) || esBarrier) { + commonBuiltins.append( + "void memoryBarrierBuffer();" + ); + stageBuiltins[EShLangCompute].append( + "void memoryBarrierShared();" + "void groupMemoryBarrier();" + ); + } +#ifndef GLSLANG_WEB + if ((profile != EEsProfile && version >= 420) || esBarrier) { + if (spvVersion.vulkan == 0) { + commonBuiltins.append("void memoryBarrierAtomicCounter();"); + } + commonBuiltins.append("void memoryBarrierImage();"); + } + if ((profile != EEsProfile && version >= 450) || (profile == EEsProfile && version >= 320)) { + stageBuiltins[EShLangMeshNV].append( + "void memoryBarrierShared();" + "void groupMemoryBarrier();" + ); + stageBuiltins[EShLangTaskNV].append( + "void memoryBarrierShared();" + "void groupMemoryBarrier();" + ); + } + + commonBuiltins.append("void controlBarrier(int, int, int, int);\n" + "void memoryBarrier(int, int, int);\n"); + + commonBuiltins.append("void debugPrintfEXT();\n"); + + if (profile != EEsProfile && version >= 450) { + // coopMatStoreNV perhaps ought to have "out" on the buf parameter, but + // adding it introduces undesirable tempArgs on the stack. What we want + // is more like "buf" thought of as a pointer value being an in parameter. + stageBuiltins[EShLangCompute].append( + "void coopMatLoadNV(out fcoopmatNV m, volatile coherent float16_t[] buf, uint element, uint stride, bool colMajor);\n" + "void coopMatLoadNV(out fcoopmatNV m, volatile coherent float[] buf, uint element, uint stride, bool colMajor);\n" + "void coopMatLoadNV(out fcoopmatNV m, volatile coherent uint8_t[] buf, uint element, uint stride, bool colMajor);\n" + "void coopMatLoadNV(out fcoopmatNV m, volatile coherent uint16_t[] buf, uint element, uint stride, bool colMajor);\n" + "void coopMatLoadNV(out fcoopmatNV m, volatile coherent uint[] buf, uint element, uint stride, bool colMajor);\n" + "void coopMatLoadNV(out fcoopmatNV m, volatile coherent uint64_t[] buf, uint element, uint stride, bool colMajor);\n" + "void coopMatLoadNV(out fcoopmatNV m, volatile coherent uvec2[] buf, uint element, uint stride, bool colMajor);\n" + "void coopMatLoadNV(out fcoopmatNV m, volatile coherent uvec4[] buf, uint element, uint stride, bool colMajor);\n" + + "void coopMatStoreNV(fcoopmatNV m, volatile coherent float16_t[] buf, uint element, uint stride, bool colMajor);\n" + "void coopMatStoreNV(fcoopmatNV m, volatile coherent float[] buf, uint element, uint stride, bool colMajor);\n" + "void coopMatStoreNV(fcoopmatNV m, volatile coherent float64_t[] buf, uint element, uint stride, bool colMajor);\n" + "void coopMatStoreNV(fcoopmatNV m, volatile coherent uint8_t[] buf, uint element, uint stride, bool colMajor);\n" + "void coopMatStoreNV(fcoopmatNV m, volatile coherent uint16_t[] buf, uint element, uint stride, bool colMajor);\n" + "void coopMatStoreNV(fcoopmatNV m, volatile coherent uint[] buf, uint element, uint stride, bool colMajor);\n" + "void coopMatStoreNV(fcoopmatNV m, volatile coherent uint64_t[] buf, uint element, uint stride, bool colMajor);\n" + "void coopMatStoreNV(fcoopmatNV m, volatile coherent uvec2[] buf, uint element, uint stride, bool colMajor);\n" + "void coopMatStoreNV(fcoopmatNV m, volatile coherent uvec4[] buf, uint element, uint stride, bool colMajor);\n" + + "fcoopmatNV coopMatMulAddNV(fcoopmatNV A, fcoopmatNV B, fcoopmatNV C);\n" + "void coopMatLoadNV(out icoopmatNV m, volatile coherent int8_t[] buf, uint element, uint stride, bool colMajor);\n" + "void coopMatLoadNV(out icoopmatNV m, volatile coherent int16_t[] buf, uint element, uint stride, bool colMajor);\n" + "void coopMatLoadNV(out icoopmatNV m, volatile coherent int[] buf, uint element, uint stride, bool colMajor);\n" + "void coopMatLoadNV(out icoopmatNV m, volatile coherent int64_t[] buf, uint element, uint stride, bool colMajor);\n" + "void coopMatLoadNV(out icoopmatNV m, volatile coherent ivec2[] buf, uint element, uint stride, bool colMajor);\n" + "void coopMatLoadNV(out icoopmatNV m, volatile coherent ivec4[] buf, uint element, uint stride, bool colMajor);\n" + "void coopMatLoadNV(out icoopmatNV m, volatile coherent uint8_t[] buf, uint element, uint stride, bool colMajor);\n" + "void coopMatLoadNV(out icoopmatNV m, volatile coherent uint16_t[] buf, uint element, uint stride, bool colMajor);\n" + "void coopMatLoadNV(out icoopmatNV m, volatile coherent uint[] buf, uint element, uint stride, bool colMajor);\n" + "void coopMatLoadNV(out icoopmatNV m, volatile coherent uint64_t[] buf, uint element, uint stride, bool colMajor);\n" + "void coopMatLoadNV(out icoopmatNV m, volatile coherent uvec2[] buf, uint element, uint stride, bool colMajor);\n" + "void coopMatLoadNV(out icoopmatNV m, volatile coherent uvec4[] buf, uint element, uint stride, bool colMajor);\n" + + "void coopMatLoadNV(out ucoopmatNV m, volatile coherent int8_t[] buf, uint element, uint stride, bool colMajor);\n" + "void coopMatLoadNV(out ucoopmatNV m, volatile coherent int16_t[] buf, uint element, uint stride, bool colMajor);\n" + "void coopMatLoadNV(out ucoopmatNV m, volatile coherent int[] buf, uint element, uint stride, bool colMajor);\n" + "void coopMatLoadNV(out ucoopmatNV m, volatile coherent int64_t[] buf, uint element, uint stride, bool colMajor);\n" + "void coopMatLoadNV(out ucoopmatNV m, volatile coherent ivec2[] buf, uint element, uint stride, bool colMajor);\n" + "void coopMatLoadNV(out ucoopmatNV m, volatile coherent ivec4[] buf, uint element, uint stride, bool colMajor);\n" + "void coopMatLoadNV(out ucoopmatNV m, volatile coherent uint8_t[] buf, uint element, uint stride, bool colMajor);\n" + "void coopMatLoadNV(out ucoopmatNV m, volatile coherent uint16_t[] buf, uint element, uint stride, bool colMajor);\n" + "void coopMatLoadNV(out ucoopmatNV m, volatile coherent uint[] buf, uint element, uint stride, bool colMajor);\n" + "void coopMatLoadNV(out ucoopmatNV m, volatile coherent uint64_t[] buf, uint element, uint stride, bool colMajor);\n" + "void coopMatLoadNV(out ucoopmatNV m, volatile coherent uvec2[] buf, uint element, uint stride, bool colMajor);\n" + "void coopMatLoadNV(out ucoopmatNV m, volatile coherent uvec4[] buf, uint element, uint stride, bool colMajor);\n" + + "void coopMatStoreNV(icoopmatNV m, volatile coherent int8_t[] buf, uint element, uint stride, bool colMajor);\n" + "void coopMatStoreNV(icoopmatNV m, volatile coherent int16_t[] buf, uint element, uint stride, bool colMajor);\n" + "void coopMatStoreNV(icoopmatNV m, volatile coherent int[] buf, uint element, uint stride, bool colMajor);\n" + "void coopMatStoreNV(icoopmatNV m, volatile coherent int64_t[] buf, uint element, uint stride, bool colMajor);\n" + "void coopMatStoreNV(icoopmatNV m, volatile coherent ivec2[] buf, uint element, uint stride, bool colMajor);\n" + "void coopMatStoreNV(icoopmatNV m, volatile coherent ivec4[] buf, uint element, uint stride, bool colMajor);\n" + "void coopMatStoreNV(icoopmatNV m, volatile coherent uint8_t[] buf, uint element, uint stride, bool colMajor);\n" + "void coopMatStoreNV(icoopmatNV m, volatile coherent uint16_t[] buf, uint element, uint stride, bool colMajor);\n" + "void coopMatStoreNV(icoopmatNV m, volatile coherent uint[] buf, uint element, uint stride, bool colMajor);\n" + "void coopMatStoreNV(icoopmatNV m, volatile coherent uint64_t[] buf, uint element, uint stride, bool colMajor);\n" + "void coopMatStoreNV(icoopmatNV m, volatile coherent uvec2[] buf, uint element, uint stride, bool colMajor);\n" + "void coopMatStoreNV(icoopmatNV m, volatile coherent uvec4[] buf, uint element, uint stride, bool colMajor);\n" + + "void coopMatStoreNV(ucoopmatNV m, volatile coherent int8_t[] buf, uint element, uint stride, bool colMajor);\n" + "void coopMatStoreNV(ucoopmatNV m, volatile coherent int16_t[] buf, uint element, uint stride, bool colMajor);\n" + "void coopMatStoreNV(ucoopmatNV m, volatile coherent int[] buf, uint element, uint stride, bool colMajor);\n" + "void coopMatStoreNV(ucoopmatNV m, volatile coherent int64_t[] buf, uint element, uint stride, bool colMajor);\n" + "void coopMatStoreNV(ucoopmatNV m, volatile coherent ivec2[] buf, uint element, uint stride, bool colMajor);\n" + "void coopMatStoreNV(ucoopmatNV m, volatile coherent ivec4[] buf, uint element, uint stride, bool colMajor);\n" + "void coopMatStoreNV(ucoopmatNV m, volatile coherent uint8_t[] buf, uint element, uint stride, bool colMajor);\n" + "void coopMatStoreNV(ucoopmatNV m, volatile coherent uint16_t[] buf, uint element, uint stride, bool colMajor);\n" + "void coopMatStoreNV(ucoopmatNV m, volatile coherent uint[] buf, uint element, uint stride, bool colMajor);\n" + "void coopMatStoreNV(ucoopmatNV m, volatile coherent uint64_t[] buf, uint element, uint stride, bool colMajor);\n" + "void coopMatStoreNV(ucoopmatNV m, volatile coherent uvec2[] buf, uint element, uint stride, bool colMajor);\n" + "void coopMatStoreNV(ucoopmatNV m, volatile coherent uvec4[] buf, uint element, uint stride, bool colMajor);\n" + + "icoopmatNV coopMatMulAddNV(icoopmatNV A, icoopmatNV B, icoopmatNV C);\n" + "ucoopmatNV coopMatMulAddNV(ucoopmatNV A, ucoopmatNV B, ucoopmatNV C);\n" + ); + } + + //============================================================================ + // + // Prototypes for built-in functions seen by fragment shaders only. + // + //============================================================================ + + // + // Original-style texture Functions with bias. + // + if (spvVersion.spv == 0 && (profile != EEsProfile || version == 100)) { + stageBuiltins[EShLangFragment].append( + "vec4 texture2D(sampler2D, vec2, float);" + "vec4 texture2DProj(sampler2D, vec3, float);" + "vec4 texture2DProj(sampler2D, vec4, float);" + "vec4 texture3D(sampler3D, vec3, float);" // OES_texture_3D + "vec4 texture3DProj(sampler3D, vec4, float);" // OES_texture_3D + "vec4 textureCube(samplerCube, vec3, float);" + + "\n"); + } + if (spvVersion.spv == 0 && (profile != EEsProfile && version > 100)) { + stageBuiltins[EShLangFragment].append( + "vec4 texture1D(sampler1D, float, float);" + "vec4 texture1DProj(sampler1D, vec2, float);" + "vec4 texture1DProj(sampler1D, vec4, float);" + "vec4 shadow1D(sampler1DShadow, vec3, float);" + "vec4 shadow2D(sampler2DShadow, vec3, float);" + "vec4 shadow1DProj(sampler1DShadow, vec4, float);" + "vec4 shadow2DProj(sampler2DShadow, vec4, float);" + + "\n"); + } + if (spvVersion.spv == 0 && profile == EEsProfile) { + stageBuiltins[EShLangFragment].append( + "vec4 texture2DLodEXT(sampler2D, vec2, float);" // GL_EXT_shader_texture_lod + "vec4 texture2DProjLodEXT(sampler2D, vec3, float);" // GL_EXT_shader_texture_lod + "vec4 texture2DProjLodEXT(sampler2D, vec4, float);" // GL_EXT_shader_texture_lod + "vec4 textureCubeLodEXT(samplerCube, vec3, float);" // GL_EXT_shader_texture_lod + + "\n"); + } + + // GL_ARB_derivative_control + if (profile != EEsProfile && version >= 400) { + stageBuiltins[EShLangFragment].append(derivativeControls); + stageBuiltins[EShLangFragment].append("\n"); + } + + // GL_OES_shader_multisample_interpolation + if ((profile == EEsProfile && version >= 310) || + (profile != EEsProfile && version >= 400)) { + stageBuiltins[EShLangFragment].append( + "float interpolateAtCentroid(float);" + "vec2 interpolateAtCentroid(vec2);" + "vec3 interpolateAtCentroid(vec3);" + "vec4 interpolateAtCentroid(vec4);" + + "float interpolateAtSample(float, int);" + "vec2 interpolateAtSample(vec2, int);" + "vec3 interpolateAtSample(vec3, int);" + "vec4 interpolateAtSample(vec4, int);" + + "float interpolateAtOffset(float, vec2);" + "vec2 interpolateAtOffset(vec2, vec2);" + "vec3 interpolateAtOffset(vec3, vec2);" + "vec4 interpolateAtOffset(vec4, vec2);" + + "\n"); + } + + stageBuiltins[EShLangFragment].append( + "void beginInvocationInterlockARB(void);" + "void endInvocationInterlockARB(void);"); + + stageBuiltins[EShLangFragment].append( + "bool helperInvocationEXT();" + "\n"); + + // GL_AMD_shader_explicit_vertex_parameter + if (profile != EEsProfile && version >= 450) { + stageBuiltins[EShLangFragment].append( + "float interpolateAtVertexAMD(float, uint);" + "vec2 interpolateAtVertexAMD(vec2, uint);" + "vec3 interpolateAtVertexAMD(vec3, uint);" + "vec4 interpolateAtVertexAMD(vec4, uint);" + + "int interpolateAtVertexAMD(int, uint);" + "ivec2 interpolateAtVertexAMD(ivec2, uint);" + "ivec3 interpolateAtVertexAMD(ivec3, uint);" + "ivec4 interpolateAtVertexAMD(ivec4, uint);" + + "uint interpolateAtVertexAMD(uint, uint);" + "uvec2 interpolateAtVertexAMD(uvec2, uint);" + "uvec3 interpolateAtVertexAMD(uvec3, uint);" + "uvec4 interpolateAtVertexAMD(uvec4, uint);" + + "float16_t interpolateAtVertexAMD(float16_t, uint);" + "f16vec2 interpolateAtVertexAMD(f16vec2, uint);" + "f16vec3 interpolateAtVertexAMD(f16vec3, uint);" + "f16vec4 interpolateAtVertexAMD(f16vec4, uint);" + + "\n"); + } + + // GL_AMD_gpu_shader_half_float + if (profile != EEsProfile && version >= 450) { + stageBuiltins[EShLangFragment].append(derivativesAndControl16bits); + stageBuiltins[EShLangFragment].append("\n"); + + stageBuiltins[EShLangFragment].append( + "float16_t interpolateAtCentroid(float16_t);" + "f16vec2 interpolateAtCentroid(f16vec2);" + "f16vec3 interpolateAtCentroid(f16vec3);" + "f16vec4 interpolateAtCentroid(f16vec4);" + + "float16_t interpolateAtSample(float16_t, int);" + "f16vec2 interpolateAtSample(f16vec2, int);" + "f16vec3 interpolateAtSample(f16vec3, int);" + "f16vec4 interpolateAtSample(f16vec4, int);" + + "float16_t interpolateAtOffset(float16_t, f16vec2);" + "f16vec2 interpolateAtOffset(f16vec2, f16vec2);" + "f16vec3 interpolateAtOffset(f16vec3, f16vec2);" + "f16vec4 interpolateAtOffset(f16vec4, f16vec2);" + + "\n"); + } + + // GL_ARB_shader_clock & GL_EXT_shader_realtime_clock + if (profile != EEsProfile && version >= 450) { + commonBuiltins.append( + "uvec2 clock2x32ARB();" + "uint64_t clockARB();" + "uvec2 clockRealtime2x32EXT();" + "uint64_t clockRealtimeEXT();" + "\n"); + } + + // GL_AMD_shader_fragment_mask + if (profile != EEsProfile && version >= 450 && spvVersion.vulkan > 0) { + stageBuiltins[EShLangFragment].append( + "uint fragmentMaskFetchAMD(subpassInputMS);" + "uint fragmentMaskFetchAMD(isubpassInputMS);" + "uint fragmentMaskFetchAMD(usubpassInputMS);" + + "vec4 fragmentFetchAMD(subpassInputMS, uint);" + "ivec4 fragmentFetchAMD(isubpassInputMS, uint);" + "uvec4 fragmentFetchAMD(usubpassInputMS, uint);" + + "\n"); + } + + // Builtins for GL_NV_ray_tracing/GL_EXT_ray_tracing/GL_EXT_ray_query + if (profile != EEsProfile && version >= 460) { + commonBuiltins.append("void rayQueryInitializeEXT(rayQueryEXT, accelerationStructureEXT, uint, uint, vec3, float, vec3, float);" + "void rayQueryTerminateEXT(rayQueryEXT);" + "void rayQueryGenerateIntersectionEXT(rayQueryEXT, float);" + "void rayQueryConfirmIntersectionEXT(rayQueryEXT);" + "bool rayQueryProceedEXT(rayQueryEXT);" + "uint rayQueryGetIntersectionTypeEXT(rayQueryEXT, bool);" + "float rayQueryGetRayTMinEXT(rayQueryEXT);" + "uint rayQueryGetRayFlagsEXT(rayQueryEXT);" + "vec3 rayQueryGetWorldRayOriginEXT(rayQueryEXT);" + "vec3 rayQueryGetWorldRayDirectionEXT(rayQueryEXT);" + "float rayQueryGetIntersectionTEXT(rayQueryEXT, bool);" + "int rayQueryGetIntersectionInstanceCustomIndexEXT(rayQueryEXT, bool);" + "int rayQueryGetIntersectionInstanceIdEXT(rayQueryEXT, bool);" + "uint rayQueryGetIntersectionInstanceShaderBindingTableRecordOffsetEXT(rayQueryEXT, bool);" + "int rayQueryGetIntersectionGeometryIndexEXT(rayQueryEXT, bool);" + "int rayQueryGetIntersectionPrimitiveIndexEXT(rayQueryEXT, bool);" + "vec2 rayQueryGetIntersectionBarycentricsEXT(rayQueryEXT, bool);" + "bool rayQueryGetIntersectionFrontFaceEXT(rayQueryEXT, bool);" + "bool rayQueryGetIntersectionCandidateAABBOpaqueEXT(rayQueryEXT);" + "vec3 rayQueryGetIntersectionObjectRayDirectionEXT(rayQueryEXT, bool);" + "vec3 rayQueryGetIntersectionObjectRayOriginEXT(rayQueryEXT, bool);" + "mat4x3 rayQueryGetIntersectionObjectToWorldEXT(rayQueryEXT, bool);" + "mat4x3 rayQueryGetIntersectionWorldToObjectEXT(rayQueryEXT, bool);" + "\n"); + + stageBuiltins[EShLangRayGen].append( + "void traceNV(accelerationStructureNV,uint,uint,uint,uint,uint,vec3,float,vec3,float,int);" + "void traceRayEXT(accelerationStructureEXT,uint,uint,uint,uint,uint,vec3,float,vec3,float,int);" + "void executeCallableNV(uint, int);" + "void executeCallableEXT(uint, int);" + "\n"); + stageBuiltins[EShLangIntersect].append( + "bool reportIntersectionNV(float, uint);" + "bool reportIntersectionEXT(float, uint);" + "\n"); + stageBuiltins[EShLangAnyHit].append( + "void ignoreIntersectionNV();" + "void ignoreIntersectionEXT();" + "void terminateRayNV();" + "void terminateRayEXT();" + "\n"); + stageBuiltins[EShLangClosestHit].append( + "void traceNV(accelerationStructureNV,uint,uint,uint,uint,uint,vec3,float,vec3,float,int);" + "void traceRayEXT(accelerationStructureEXT,uint,uint,uint,uint,uint,vec3,float,vec3,float,int);" + "void executeCallableNV(uint, int);" + "void executeCallableEXT(uint, int);" + "\n"); + stageBuiltins[EShLangMiss].append( + "void traceNV(accelerationStructureNV,uint,uint,uint,uint,uint,vec3,float,vec3,float,int);" + "void traceRayEXT(accelerationStructureEXT,uint,uint,uint,uint,uint,vec3,float,vec3,float,int);" + "void executeCallableNV(uint, int);" + "void executeCallableEXT(uint, int);" + "\n"); + stageBuiltins[EShLangCallable].append( + "void executeCallableNV(uint, int);" + "void executeCallableEXT(uint, int);" + "\n"); + } + + //E_SPV_NV_compute_shader_derivatives + if ((profile == EEsProfile && version >= 320) || (profile != EEsProfile && version >= 450)) { + stageBuiltins[EShLangCompute].append(derivativeControls); + stageBuiltins[EShLangCompute].append("\n"); + } + if (profile != EEsProfile && version >= 450) { + stageBuiltins[EShLangCompute].append(derivativesAndControl16bits); + stageBuiltins[EShLangCompute].append(derivativesAndControl64bits); + stageBuiltins[EShLangCompute].append("\n"); + } + + // Builtins for GL_NV_mesh_shader + if ((profile != EEsProfile && version >= 450) || (profile == EEsProfile && version >= 320)) { + stageBuiltins[EShLangMeshNV].append( + "void writePackedPrimitiveIndices4x8NV(uint, uint);" + "\n"); + } +#endif + + //============================================================================ + // + // Standard Uniforms + // + //============================================================================ + + // + // Depth range in window coordinates, p. 33 + // + if (spvVersion.spv == 0) { + commonBuiltins.append( + "struct gl_DepthRangeParameters {" + ); + if (profile == EEsProfile) { + commonBuiltins.append( + "highp float near;" // n + "highp float far;" // f + "highp float diff;" // f - n + ); + } else { +#ifndef GLSLANG_WEB + commonBuiltins.append( + "float near;" // n + "float far;" // f + "float diff;" // f - n + ); +#endif + } + + commonBuiltins.append( + "};" + "uniform gl_DepthRangeParameters gl_DepthRange;" + "\n"); + } + +#ifndef GLSLANG_WEB + if (spvVersion.spv == 0 && IncludeLegacy(version, profile, spvVersion)) { + // + // Matrix state. p. 31, 32, 37, 39, 40. + // + commonBuiltins.append( + "uniform mat4 gl_ModelViewMatrix;" + "uniform mat4 gl_ProjectionMatrix;" + "uniform mat4 gl_ModelViewProjectionMatrix;" + + // + // Derived matrix state that provides inverse and transposed versions + // of the matrices above. + // + "uniform mat3 gl_NormalMatrix;" + + "uniform mat4 gl_ModelViewMatrixInverse;" + "uniform mat4 gl_ProjectionMatrixInverse;" + "uniform mat4 gl_ModelViewProjectionMatrixInverse;" + + "uniform mat4 gl_ModelViewMatrixTranspose;" + "uniform mat4 gl_ProjectionMatrixTranspose;" + "uniform mat4 gl_ModelViewProjectionMatrixTranspose;" + + "uniform mat4 gl_ModelViewMatrixInverseTranspose;" + "uniform mat4 gl_ProjectionMatrixInverseTranspose;" + "uniform mat4 gl_ModelViewProjectionMatrixInverseTranspose;" + + // + // Normal scaling p. 39. + // + "uniform float gl_NormalScale;" + + // + // Point Size, p. 66, 67. + // + "struct gl_PointParameters {" + "float size;" + "float sizeMin;" + "float sizeMax;" + "float fadeThresholdSize;" + "float distanceConstantAttenuation;" + "float distanceLinearAttenuation;" + "float distanceQuadraticAttenuation;" + "};" + + "uniform gl_PointParameters gl_Point;" + + // + // Material State p. 50, 55. + // + "struct gl_MaterialParameters {" + "vec4 emission;" // Ecm + "vec4 ambient;" // Acm + "vec4 diffuse;" // Dcm + "vec4 specular;" // Scm + "float shininess;" // Srm + "};" + "uniform gl_MaterialParameters gl_FrontMaterial;" + "uniform gl_MaterialParameters gl_BackMaterial;" + + // + // Light State p 50, 53, 55. + // + "struct gl_LightSourceParameters {" + "vec4 ambient;" // Acli + "vec4 diffuse;" // Dcli + "vec4 specular;" // Scli + "vec4 position;" // Ppli + "vec4 halfVector;" // Derived: Hi + "vec3 spotDirection;" // Sdli + "float spotExponent;" // Srli + "float spotCutoff;" // Crli + // (range: [0.0,90.0], 180.0) + "float spotCosCutoff;" // Derived: cos(Crli) + // (range: [1.0,0.0],-1.0) + "float constantAttenuation;" // K0 + "float linearAttenuation;" // K1 + "float quadraticAttenuation;"// K2 + "};" + + "struct gl_LightModelParameters {" + "vec4 ambient;" // Acs + "};" + + "uniform gl_LightModelParameters gl_LightModel;" + + // + // Derived state from products of light and material. + // + "struct gl_LightModelProducts {" + "vec4 sceneColor;" // Derived. Ecm + Acm * Acs + "};" + + "uniform gl_LightModelProducts gl_FrontLightModelProduct;" + "uniform gl_LightModelProducts gl_BackLightModelProduct;" + + "struct gl_LightProducts {" + "vec4 ambient;" // Acm * Acli + "vec4 diffuse;" // Dcm * Dcli + "vec4 specular;" // Scm * Scli + "};" + + // + // Fog p. 161 + // + "struct gl_FogParameters {" + "vec4 color;" + "float density;" + "float start;" + "float end;" + "float scale;" // 1 / (gl_FogEnd - gl_FogStart) + "};" + + "uniform gl_FogParameters gl_Fog;" + + "\n"); + } +#endif + + //============================================================================ + // + // Define the interface to the compute shader. + // + //============================================================================ + + if ((profile != EEsProfile && version >= 420) || + (profile == EEsProfile && version >= 310)) { + stageBuiltins[EShLangCompute].append( + "in highp uvec3 gl_NumWorkGroups;" + "const highp uvec3 gl_WorkGroupSize = uvec3(1,1,1);" + + "in highp uvec3 gl_WorkGroupID;" + "in highp uvec3 gl_LocalInvocationID;" + + "in highp uvec3 gl_GlobalInvocationID;" + "in highp uint gl_LocalInvocationIndex;" + + "\n"); + } + + if ((profile != EEsProfile && version >= 140) || + (profile == EEsProfile && version >= 310)) { + stageBuiltins[EShLangCompute].append( + "in highp int gl_DeviceIndex;" // GL_EXT_device_group + "\n"); + } + +#ifndef GLSLANG_WEB + //============================================================================ + // + // Define the interface to the mesh/task shader. + // + //============================================================================ + + if ((profile != EEsProfile && version >= 450) || (profile == EEsProfile && version >= 320)) { + // per-vertex attributes + stageBuiltins[EShLangMeshNV].append( + "out gl_MeshPerVertexNV {" + "vec4 gl_Position;" + "float gl_PointSize;" + "float gl_ClipDistance[];" + "float gl_CullDistance[];" + "perviewNV vec4 gl_PositionPerViewNV[];" + "perviewNV float gl_ClipDistancePerViewNV[][];" + "perviewNV float gl_CullDistancePerViewNV[][];" + "} gl_MeshVerticesNV[];" + ); + + // per-primitive attributes + stageBuiltins[EShLangMeshNV].append( + "perprimitiveNV out gl_MeshPerPrimitiveNV {" + "int gl_PrimitiveID;" + "int gl_Layer;" + "int gl_ViewportIndex;" + "int gl_ViewportMask[];" + "perviewNV int gl_LayerPerViewNV[];" + "perviewNV int gl_ViewportMaskPerViewNV[][];" + "} gl_MeshPrimitivesNV[];" + ); + + stageBuiltins[EShLangMeshNV].append( + "out uint gl_PrimitiveCountNV;" + "out uint gl_PrimitiveIndicesNV[];" + + "in uint gl_MeshViewCountNV;" + "in uint gl_MeshViewIndicesNV[4];" + + "const highp uvec3 gl_WorkGroupSize = uvec3(1,1,1);" + + "in highp uvec3 gl_WorkGroupID;" + "in highp uvec3 gl_LocalInvocationID;" + + "in highp uvec3 gl_GlobalInvocationID;" + "in highp uint gl_LocalInvocationIndex;" + + "\n"); + + stageBuiltins[EShLangTaskNV].append( + "out uint gl_TaskCountNV;" + + "const highp uvec3 gl_WorkGroupSize = uvec3(1,1,1);" + + "in highp uvec3 gl_WorkGroupID;" + "in highp uvec3 gl_LocalInvocationID;" + + "in highp uvec3 gl_GlobalInvocationID;" + "in highp uint gl_LocalInvocationIndex;" + + "in uint gl_MeshViewCountNV;" + "in uint gl_MeshViewIndicesNV[4];" + + "\n"); + } + + if (profile != EEsProfile && version >= 450) { + stageBuiltins[EShLangMeshNV].append( + "in highp int gl_DeviceIndex;" // GL_EXT_device_group + "in int gl_DrawIDARB;" // GL_ARB_shader_draw_parameters + "\n"); + + stageBuiltins[EShLangTaskNV].append( + "in highp int gl_DeviceIndex;" // GL_EXT_device_group + "in int gl_DrawIDARB;" // GL_ARB_shader_draw_parameters + "\n"); + + if (version >= 460) { + stageBuiltins[EShLangMeshNV].append( + "in int gl_DrawID;" + "\n"); + + stageBuiltins[EShLangTaskNV].append( + "in int gl_DrawID;" + "\n"); + } + } + + //============================================================================ + // + // Define the interface to the vertex shader. + // + //============================================================================ + + if (profile != EEsProfile) { + if (version < 130) { + stageBuiltins[EShLangVertex].append( + "attribute vec4 gl_Color;" + "attribute vec4 gl_SecondaryColor;" + "attribute vec3 gl_Normal;" + "attribute vec4 gl_Vertex;" + "attribute vec4 gl_MultiTexCoord0;" + "attribute vec4 gl_MultiTexCoord1;" + "attribute vec4 gl_MultiTexCoord2;" + "attribute vec4 gl_MultiTexCoord3;" + "attribute vec4 gl_MultiTexCoord4;" + "attribute vec4 gl_MultiTexCoord5;" + "attribute vec4 gl_MultiTexCoord6;" + "attribute vec4 gl_MultiTexCoord7;" + "attribute float gl_FogCoord;" + "\n"); + } else if (IncludeLegacy(version, profile, spvVersion)) { + stageBuiltins[EShLangVertex].append( + "in vec4 gl_Color;" + "in vec4 gl_SecondaryColor;" + "in vec3 gl_Normal;" + "in vec4 gl_Vertex;" + "in vec4 gl_MultiTexCoord0;" + "in vec4 gl_MultiTexCoord1;" + "in vec4 gl_MultiTexCoord2;" + "in vec4 gl_MultiTexCoord3;" + "in vec4 gl_MultiTexCoord4;" + "in vec4 gl_MultiTexCoord5;" + "in vec4 gl_MultiTexCoord6;" + "in vec4 gl_MultiTexCoord7;" + "in float gl_FogCoord;" + "\n"); + } + + if (version < 150) { + if (version < 130) { + stageBuiltins[EShLangVertex].append( + " vec4 gl_ClipVertex;" // needs qualifier fixed later + "varying vec4 gl_FrontColor;" + "varying vec4 gl_BackColor;" + "varying vec4 gl_FrontSecondaryColor;" + "varying vec4 gl_BackSecondaryColor;" + "varying vec4 gl_TexCoord[];" + "varying float gl_FogFragCoord;" + "\n"); + } else if (IncludeLegacy(version, profile, spvVersion)) { + stageBuiltins[EShLangVertex].append( + " vec4 gl_ClipVertex;" // needs qualifier fixed later + "out vec4 gl_FrontColor;" + "out vec4 gl_BackColor;" + "out vec4 gl_FrontSecondaryColor;" + "out vec4 gl_BackSecondaryColor;" + "out vec4 gl_TexCoord[];" + "out float gl_FogFragCoord;" + "\n"); + } + stageBuiltins[EShLangVertex].append( + "vec4 gl_Position;" // needs qualifier fixed later + "float gl_PointSize;" // needs qualifier fixed later + ); + + if (version == 130 || version == 140) + stageBuiltins[EShLangVertex].append( + "out float gl_ClipDistance[];" + ); + } else { + // version >= 150 + stageBuiltins[EShLangVertex].append( + "out gl_PerVertex {" + "vec4 gl_Position;" // needs qualifier fixed later + "float gl_PointSize;" // needs qualifier fixed later + "float gl_ClipDistance[];" + ); + if (IncludeLegacy(version, profile, spvVersion)) + stageBuiltins[EShLangVertex].append( + "vec4 gl_ClipVertex;" // needs qualifier fixed later + "vec4 gl_FrontColor;" + "vec4 gl_BackColor;" + "vec4 gl_FrontSecondaryColor;" + "vec4 gl_BackSecondaryColor;" + "vec4 gl_TexCoord[];" + "float gl_FogFragCoord;" + ); + if (version >= 450) + stageBuiltins[EShLangVertex].append( + "float gl_CullDistance[];" + ); + stageBuiltins[EShLangVertex].append( + "};" + "\n"); + } + if (version >= 130 && spvVersion.vulkan == 0) + stageBuiltins[EShLangVertex].append( + "int gl_VertexID;" // needs qualifier fixed later + ); + if (version >= 140 && spvVersion.vulkan == 0) + stageBuiltins[EShLangVertex].append( + "int gl_InstanceID;" // needs qualifier fixed later + ); + if (spvVersion.vulkan > 0 && version >= 140) + stageBuiltins[EShLangVertex].append( + "in int gl_VertexIndex;" + "in int gl_InstanceIndex;" + ); + if (version >= 440) { + stageBuiltins[EShLangVertex].append( + "in int gl_BaseVertexARB;" + "in int gl_BaseInstanceARB;" + "in int gl_DrawIDARB;" + ); + } + if (version >= 410) { + stageBuiltins[EShLangVertex].append( + "out int gl_ViewportIndex;" + "out int gl_Layer;" + ); + } + if (version >= 460) { + stageBuiltins[EShLangVertex].append( + "in int gl_BaseVertex;" + "in int gl_BaseInstance;" + "in int gl_DrawID;" + ); + } + + if (version >= 450) + stageBuiltins[EShLangVertex].append( + "out int gl_ViewportMask[];" // GL_NV_viewport_array2 + "out int gl_SecondaryViewportMaskNV[];" // GL_NV_stereo_view_rendering + "out vec4 gl_SecondaryPositionNV;" // GL_NV_stereo_view_rendering + "out vec4 gl_PositionPerViewNV[];" // GL_NVX_multiview_per_view_attributes + "out int gl_ViewportMaskPerViewNV[];" // GL_NVX_multiview_per_view_attributes + ); + } else { + // ES profile + if (version == 100) { + stageBuiltins[EShLangVertex].append( + "highp vec4 gl_Position;" // needs qualifier fixed later + "mediump float gl_PointSize;" // needs qualifier fixed later + ); + } else { + if (spvVersion.vulkan == 0) + stageBuiltins[EShLangVertex].append( + "in highp int gl_VertexID;" // needs qualifier fixed later + "in highp int gl_InstanceID;" // needs qualifier fixed later + ); + if (spvVersion.vulkan > 0) +#endif + stageBuiltins[EShLangVertex].append( + "in highp int gl_VertexIndex;" + "in highp int gl_InstanceIndex;" + ); +#ifndef GLSLANG_WEB + if (version < 310) +#endif + stageBuiltins[EShLangVertex].append( + "highp vec4 gl_Position;" // needs qualifier fixed later + "highp float gl_PointSize;" // needs qualifier fixed later + ); +#ifndef GLSLANG_WEB + else + stageBuiltins[EShLangVertex].append( + "out gl_PerVertex {" + "highp vec4 gl_Position;" // needs qualifier fixed later + "highp float gl_PointSize;" // needs qualifier fixed later + "};" + ); + } + } + + if ((profile != EEsProfile && version >= 140) || + (profile == EEsProfile && version >= 310)) { + stageBuiltins[EShLangVertex].append( + "in highp int gl_DeviceIndex;" // GL_EXT_device_group + "in highp int gl_ViewIndex;" // GL_EXT_multiview + "\n"); + } + + if (version >= 300 /* both ES and non-ES */) { + stageBuiltins[EShLangVertex].append( + "in highp uint gl_ViewID_OVR;" // GL_OVR_multiview, GL_OVR_multiview2 + "\n"); + } + + + //============================================================================ + // + // Define the interface to the geometry shader. + // + //============================================================================ + + if (profile == ECoreProfile || profile == ECompatibilityProfile) { + stageBuiltins[EShLangGeometry].append( + "in gl_PerVertex {" + "vec4 gl_Position;" + "float gl_PointSize;" + "float gl_ClipDistance[];" + ); + if (profile == ECompatibilityProfile) + stageBuiltins[EShLangGeometry].append( + "vec4 gl_ClipVertex;" + "vec4 gl_FrontColor;" + "vec4 gl_BackColor;" + "vec4 gl_FrontSecondaryColor;" + "vec4 gl_BackSecondaryColor;" + "vec4 gl_TexCoord[];" + "float gl_FogFragCoord;" + ); + if (version >= 450) + stageBuiltins[EShLangGeometry].append( + "float gl_CullDistance[];" + "vec4 gl_SecondaryPositionNV;" // GL_NV_stereo_view_rendering + "vec4 gl_PositionPerViewNV[];" // GL_NVX_multiview_per_view_attributes + ); + stageBuiltins[EShLangGeometry].append( + "} gl_in[];" + + "in int gl_PrimitiveIDIn;" + "out gl_PerVertex {" + "vec4 gl_Position;" + "float gl_PointSize;" + "float gl_ClipDistance[];" + "\n"); + if (profile == ECompatibilityProfile && version >= 400) + stageBuiltins[EShLangGeometry].append( + "vec4 gl_ClipVertex;" + "vec4 gl_FrontColor;" + "vec4 gl_BackColor;" + "vec4 gl_FrontSecondaryColor;" + "vec4 gl_BackSecondaryColor;" + "vec4 gl_TexCoord[];" + "float gl_FogFragCoord;" + ); + if (version >= 450) + stageBuiltins[EShLangGeometry].append( + "float gl_CullDistance[];" + ); + stageBuiltins[EShLangGeometry].append( + "};" + + "out int gl_PrimitiveID;" + "out int gl_Layer;"); + + if (version >= 150) + stageBuiltins[EShLangGeometry].append( + "out int gl_ViewportIndex;" + ); + + if (profile == ECompatibilityProfile && version < 400) + stageBuiltins[EShLangGeometry].append( + "out vec4 gl_ClipVertex;" + ); + + if (version >= 400) + stageBuiltins[EShLangGeometry].append( + "in int gl_InvocationID;" + ); + + if (version >= 450) + stageBuiltins[EShLangGeometry].append( + "out int gl_ViewportMask[];" // GL_NV_viewport_array2 + "out int gl_SecondaryViewportMaskNV[];" // GL_NV_stereo_view_rendering + "out vec4 gl_SecondaryPositionNV;" // GL_NV_stereo_view_rendering + "out vec4 gl_PositionPerViewNV[];" // GL_NVX_multiview_per_view_attributes + "out int gl_ViewportMaskPerViewNV[];" // GL_NVX_multiview_per_view_attributes + ); + + stageBuiltins[EShLangGeometry].append("\n"); + } else if (profile == EEsProfile && version >= 310) { + stageBuiltins[EShLangGeometry].append( + "in gl_PerVertex {" + "highp vec4 gl_Position;" + "highp float gl_PointSize;" + "} gl_in[];" + "\n" + "in highp int gl_PrimitiveIDIn;" + "in highp int gl_InvocationID;" + "\n" + "out gl_PerVertex {" + "highp vec4 gl_Position;" + "highp float gl_PointSize;" + "};" + "\n" + "out highp int gl_PrimitiveID;" + "out highp int gl_Layer;" + "\n" + ); + } + + if ((profile != EEsProfile && version >= 140) || + (profile == EEsProfile && version >= 310)) { + stageBuiltins[EShLangGeometry].append( + "in highp int gl_DeviceIndex;" // GL_EXT_device_group + "in highp int gl_ViewIndex;" // GL_EXT_multiview + "\n"); + } + + //============================================================================ + // + // Define the interface to the tessellation control shader. + // + //============================================================================ + + if (profile != EEsProfile && version >= 150) { + // Note: "in gl_PerVertex {...} gl_in[gl_MaxPatchVertices];" is declared in initialize() below, + // as it depends on the resource sizing of gl_MaxPatchVertices. + + stageBuiltins[EShLangTessControl].append( + "in int gl_PatchVerticesIn;" + "in int gl_PrimitiveID;" + "in int gl_InvocationID;" + + "out gl_PerVertex {" + "vec4 gl_Position;" + "float gl_PointSize;" + "float gl_ClipDistance[];" + ); + if (profile == ECompatibilityProfile) + stageBuiltins[EShLangTessControl].append( + "vec4 gl_ClipVertex;" + "vec4 gl_FrontColor;" + "vec4 gl_BackColor;" + "vec4 gl_FrontSecondaryColor;" + "vec4 gl_BackSecondaryColor;" + "vec4 gl_TexCoord[];" + "float gl_FogFragCoord;" + ); + if (version >= 450) + stageBuiltins[EShLangTessControl].append( + "float gl_CullDistance[];" + "int gl_ViewportMask[];" // GL_NV_viewport_array2 + "vec4 gl_SecondaryPositionNV;" // GL_NV_stereo_view_rendering + "int gl_SecondaryViewportMaskNV[];" // GL_NV_stereo_view_rendering + "vec4 gl_PositionPerViewNV[];" // GL_NVX_multiview_per_view_attributes + "int gl_ViewportMaskPerViewNV[];" // GL_NVX_multiview_per_view_attributes + ); + stageBuiltins[EShLangTessControl].append( + "} gl_out[];" + + "patch out float gl_TessLevelOuter[4];" + "patch out float gl_TessLevelInner[2];" + "\n"); + + if (version >= 410) + stageBuiltins[EShLangTessControl].append( + "out int gl_ViewportIndex;" + "out int gl_Layer;" + "\n"); + + } else { + // Note: "in gl_PerVertex {...} gl_in[gl_MaxPatchVertices];" is declared in initialize() below, + // as it depends on the resource sizing of gl_MaxPatchVertices. + + stageBuiltins[EShLangTessControl].append( + "in highp int gl_PatchVerticesIn;" + "in highp int gl_PrimitiveID;" + "in highp int gl_InvocationID;" + + "out gl_PerVertex {" + "highp vec4 gl_Position;" + "highp float gl_PointSize;" + ); + stageBuiltins[EShLangTessControl].append( + "} gl_out[];" + + "patch out highp float gl_TessLevelOuter[4];" + "patch out highp float gl_TessLevelInner[2];" + "patch out highp vec4 gl_BoundingBoxOES[2];" + "patch out highp vec4 gl_BoundingBoxEXT[2];" + "\n"); + if (profile == EEsProfile && version >= 320) { + stageBuiltins[EShLangTessControl].append( + "patch out highp vec4 gl_BoundingBox[2];" + "\n" + ); + } + } + + if ((profile != EEsProfile && version >= 140) || + (profile == EEsProfile && version >= 310)) { + stageBuiltins[EShLangTessControl].append( + "in highp int gl_DeviceIndex;" // GL_EXT_device_group + "in highp int gl_ViewIndex;" // GL_EXT_multiview + "\n"); + } + + //============================================================================ + // + // Define the interface to the tessellation evaluation shader. + // + //============================================================================ + + if (profile != EEsProfile && version >= 150) { + // Note: "in gl_PerVertex {...} gl_in[gl_MaxPatchVertices];" is declared in initialize() below, + // as it depends on the resource sizing of gl_MaxPatchVertices. + + stageBuiltins[EShLangTessEvaluation].append( + "in int gl_PatchVerticesIn;" + "in int gl_PrimitiveID;" + "in vec3 gl_TessCoord;" + + "patch in float gl_TessLevelOuter[4];" + "patch in float gl_TessLevelInner[2];" + + "out gl_PerVertex {" + "vec4 gl_Position;" + "float gl_PointSize;" + "float gl_ClipDistance[];" + ); + if (version >= 400 && profile == ECompatibilityProfile) + stageBuiltins[EShLangTessEvaluation].append( + "vec4 gl_ClipVertex;" + "vec4 gl_FrontColor;" + "vec4 gl_BackColor;" + "vec4 gl_FrontSecondaryColor;" + "vec4 gl_BackSecondaryColor;" + "vec4 gl_TexCoord[];" + "float gl_FogFragCoord;" + ); + if (version >= 450) + stageBuiltins[EShLangTessEvaluation].append( + "float gl_CullDistance[];" + ); + stageBuiltins[EShLangTessEvaluation].append( + "};" + "\n"); + + if (version >= 410) + stageBuiltins[EShLangTessEvaluation].append( + "out int gl_ViewportIndex;" + "out int gl_Layer;" + "\n"); + + if (version >= 450) + stageBuiltins[EShLangTessEvaluation].append( + "out int gl_ViewportMask[];" // GL_NV_viewport_array2 + "out vec4 gl_SecondaryPositionNV;" // GL_NV_stereo_view_rendering + "out int gl_SecondaryViewportMaskNV[];" // GL_NV_stereo_view_rendering + "out vec4 gl_PositionPerViewNV[];" // GL_NVX_multiview_per_view_attributes + "out int gl_ViewportMaskPerViewNV[];" // GL_NVX_multiview_per_view_attributes + ); + + } else if (profile == EEsProfile && version >= 310) { + // Note: "in gl_PerVertex {...} gl_in[gl_MaxPatchVertices];" is declared in initialize() below, + // as it depends on the resource sizing of gl_MaxPatchVertices. + + stageBuiltins[EShLangTessEvaluation].append( + "in highp int gl_PatchVerticesIn;" + "in highp int gl_PrimitiveID;" + "in highp vec3 gl_TessCoord;" + + "patch in highp float gl_TessLevelOuter[4];" + "patch in highp float gl_TessLevelInner[2];" + + "out gl_PerVertex {" + "highp vec4 gl_Position;" + "highp float gl_PointSize;" + ); + stageBuiltins[EShLangTessEvaluation].append( + "};" + "\n"); + } + + if ((profile != EEsProfile && version >= 140) || + (profile == EEsProfile && version >= 310)) { + stageBuiltins[EShLangTessEvaluation].append( + "in highp int gl_DeviceIndex;" // GL_EXT_device_group + "in highp int gl_ViewIndex;" // GL_EXT_multiview + "\n"); + } + + //============================================================================ + // + // Define the interface to the fragment shader. + // + //============================================================================ + + if (profile != EEsProfile) { + + stageBuiltins[EShLangFragment].append( + "vec4 gl_FragCoord;" // needs qualifier fixed later + "bool gl_FrontFacing;" // needs qualifier fixed later + "float gl_FragDepth;" // needs qualifier fixed later + ); + if (version >= 120) + stageBuiltins[EShLangFragment].append( + "vec2 gl_PointCoord;" // needs qualifier fixed later + ); + if (version >= 140) + stageBuiltins[EShLangFragment].append( + "out int gl_FragStencilRefARB;" + ); + if (IncludeLegacy(version, profile, spvVersion) || (! ForwardCompatibility && version < 420)) + stageBuiltins[EShLangFragment].append( + "vec4 gl_FragColor;" // needs qualifier fixed later + ); + + if (version < 130) { + stageBuiltins[EShLangFragment].append( + "varying vec4 gl_Color;" + "varying vec4 gl_SecondaryColor;" + "varying vec4 gl_TexCoord[];" + "varying float gl_FogFragCoord;" + ); + } else { + stageBuiltins[EShLangFragment].append( + "in float gl_ClipDistance[];" + ); + + if (IncludeLegacy(version, profile, spvVersion)) { + if (version < 150) + stageBuiltins[EShLangFragment].append( + "in float gl_FogFragCoord;" + "in vec4 gl_TexCoord[];" + "in vec4 gl_Color;" + "in vec4 gl_SecondaryColor;" + ); + else + stageBuiltins[EShLangFragment].append( + "in gl_PerFragment {" + "in float gl_FogFragCoord;" + "in vec4 gl_TexCoord[];" + "in vec4 gl_Color;" + "in vec4 gl_SecondaryColor;" + "};" + ); + } + } + + if (version >= 150) + stageBuiltins[EShLangFragment].append( + "flat in int gl_PrimitiveID;" + ); + + if (version >= 130) { // ARB_sample_shading + stageBuiltins[EShLangFragment].append( + "flat in int gl_SampleID;" + " in vec2 gl_SamplePosition;" + " out int gl_SampleMask[];" + ); + + if (spvVersion.spv == 0) { + stageBuiltins[EShLangFragment].append( + "uniform int gl_NumSamples;" + ); + } + } + + if (version >= 400) + stageBuiltins[EShLangFragment].append( + "flat in int gl_SampleMaskIn[];" + ); + + if (version >= 430) + stageBuiltins[EShLangFragment].append( + "flat in int gl_Layer;" + "flat in int gl_ViewportIndex;" + ); + + if (version >= 450) + stageBuiltins[EShLangFragment].append( + "in float gl_CullDistance[];" + "bool gl_HelperInvocation;" // needs qualifier fixed later + ); + + if (version >= 450) + stageBuiltins[EShLangFragment].append( // GL_EXT_fragment_invocation_density + "flat in ivec2 gl_FragSizeEXT;" + "flat in int gl_FragInvocationCountEXT;" + ); + + if (version >= 450) + stageBuiltins[EShLangFragment].append( + "in vec2 gl_BaryCoordNoPerspAMD;" + "in vec2 gl_BaryCoordNoPerspCentroidAMD;" + "in vec2 gl_BaryCoordNoPerspSampleAMD;" + "in vec2 gl_BaryCoordSmoothAMD;" + "in vec2 gl_BaryCoordSmoothCentroidAMD;" + "in vec2 gl_BaryCoordSmoothSampleAMD;" + "in vec3 gl_BaryCoordPullModelAMD;" + ); + + if (version >= 430) + stageBuiltins[EShLangFragment].append( + "in bool gl_FragFullyCoveredNV;" + ); + if (version >= 450) + stageBuiltins[EShLangFragment].append( + "flat in ivec2 gl_FragmentSizeNV;" // GL_NV_shading_rate_image + "flat in int gl_InvocationsPerPixelNV;" + "in vec3 gl_BaryCoordNV;" // GL_NV_fragment_shader_barycentric + "in vec3 gl_BaryCoordNoPerspNV;" + ); + + } else { + // ES profile + + if (version == 100) { + stageBuiltins[EShLangFragment].append( + "mediump vec4 gl_FragCoord;" // needs qualifier fixed later + " bool gl_FrontFacing;" // needs qualifier fixed later + "mediump vec4 gl_FragColor;" // needs qualifier fixed later + "mediump vec2 gl_PointCoord;" // needs qualifier fixed later + ); + } +#endif + if (version >= 300) { + stageBuiltins[EShLangFragment].append( + "highp vec4 gl_FragCoord;" // needs qualifier fixed later + " bool gl_FrontFacing;" // needs qualifier fixed later + "mediump vec2 gl_PointCoord;" // needs qualifier fixed later + "highp float gl_FragDepth;" // needs qualifier fixed later + ); + } +#ifndef GLSLANG_WEB + if (version >= 310) { + stageBuiltins[EShLangFragment].append( + "bool gl_HelperInvocation;" // needs qualifier fixed later + "flat in highp int gl_PrimitiveID;" // needs qualifier fixed later + "flat in highp int gl_Layer;" // needs qualifier fixed later + ); + + stageBuiltins[EShLangFragment].append( // GL_OES_sample_variables + "flat in lowp int gl_SampleID;" + " in mediump vec2 gl_SamplePosition;" + "flat in highp int gl_SampleMaskIn[];" + " out highp int gl_SampleMask[];" + ); + if (spvVersion.spv == 0) + stageBuiltins[EShLangFragment].append( // GL_OES_sample_variables + "uniform lowp int gl_NumSamples;" + ); + } + stageBuiltins[EShLangFragment].append( + "highp float gl_FragDepthEXT;" // GL_EXT_frag_depth + ); + + if (version >= 310) + stageBuiltins[EShLangFragment].append( // GL_EXT_fragment_invocation_density + "flat in ivec2 gl_FragSizeEXT;" + "flat in int gl_FragInvocationCountEXT;" + ); + if (version >= 320) + stageBuiltins[EShLangFragment].append( // GL_NV_shading_rate_image + "flat in ivec2 gl_FragmentSizeNV;" + "flat in int gl_InvocationsPerPixelNV;" + ); + if (version >= 320) + stageBuiltins[EShLangFragment].append( + "in vec3 gl_BaryCoordNV;" + "in vec3 gl_BaryCoordNoPerspNV;" + ); + } +#endif + + stageBuiltins[EShLangFragment].append("\n"); + + if (version >= 130) + add2ndGenerationSamplingImaging(version, profile, spvVersion); + +#ifndef GLSLANG_WEB + + // GL_ARB_shader_ballot + if (profile != EEsProfile && version >= 450) { + const char* ballotDecls = + "uniform uint gl_SubGroupSizeARB;" + "in uint gl_SubGroupInvocationARB;" + "in uint64_t gl_SubGroupEqMaskARB;" + "in uint64_t gl_SubGroupGeMaskARB;" + "in uint64_t gl_SubGroupGtMaskARB;" + "in uint64_t gl_SubGroupLeMaskARB;" + "in uint64_t gl_SubGroupLtMaskARB;" + "\n"; + const char* fragmentBallotDecls = + "uniform uint gl_SubGroupSizeARB;" + "flat in uint gl_SubGroupInvocationARB;" + "flat in uint64_t gl_SubGroupEqMaskARB;" + "flat in uint64_t gl_SubGroupGeMaskARB;" + "flat in uint64_t gl_SubGroupGtMaskARB;" + "flat in uint64_t gl_SubGroupLeMaskARB;" + "flat in uint64_t gl_SubGroupLtMaskARB;" + "\n"; + stageBuiltins[EShLangVertex] .append(ballotDecls); + stageBuiltins[EShLangTessControl] .append(ballotDecls); + stageBuiltins[EShLangTessEvaluation].append(ballotDecls); + stageBuiltins[EShLangGeometry] .append(ballotDecls); + stageBuiltins[EShLangCompute] .append(ballotDecls); + stageBuiltins[EShLangFragment] .append(fragmentBallotDecls); + stageBuiltins[EShLangMeshNV] .append(ballotDecls); + stageBuiltins[EShLangTaskNV] .append(ballotDecls); + } + + if ((profile != EEsProfile && version >= 140) || + (profile == EEsProfile && version >= 310)) { + stageBuiltins[EShLangFragment].append( + "flat in highp int gl_DeviceIndex;" // GL_EXT_device_group + "flat in highp int gl_ViewIndex;" // GL_EXT_multiview + "\n"); + } + + // GL_KHR_shader_subgroup + if ((profile == EEsProfile && version >= 310) || + (profile != EEsProfile && version >= 140)) { + const char* subgroupDecls = + "in mediump uint gl_SubgroupSize;" + "in mediump uint gl_SubgroupInvocationID;" + "in highp uvec4 gl_SubgroupEqMask;" + "in highp uvec4 gl_SubgroupGeMask;" + "in highp uvec4 gl_SubgroupGtMask;" + "in highp uvec4 gl_SubgroupLeMask;" + "in highp uvec4 gl_SubgroupLtMask;" + // GL_NV_shader_sm_builtins + "in highp uint gl_WarpsPerSMNV;" + "in highp uint gl_SMCountNV;" + "in highp uint gl_WarpIDNV;" + "in highp uint gl_SMIDNV;" + "\n"; + const char* fragmentSubgroupDecls = + "flat in mediump uint gl_SubgroupSize;" + "flat in mediump uint gl_SubgroupInvocationID;" + "flat in highp uvec4 gl_SubgroupEqMask;" + "flat in highp uvec4 gl_SubgroupGeMask;" + "flat in highp uvec4 gl_SubgroupGtMask;" + "flat in highp uvec4 gl_SubgroupLeMask;" + "flat in highp uvec4 gl_SubgroupLtMask;" + // GL_NV_shader_sm_builtins + "flat in highp uint gl_WarpsPerSMNV;" + "flat in highp uint gl_SMCountNV;" + "flat in highp uint gl_WarpIDNV;" + "flat in highp uint gl_SMIDNV;" + "\n"; + const char* computeSubgroupDecls = + "in highp uint gl_NumSubgroups;" + "in highp uint gl_SubgroupID;" + "\n"; + + stageBuiltins[EShLangVertex] .append(subgroupDecls); + stageBuiltins[EShLangTessControl] .append(subgroupDecls); + stageBuiltins[EShLangTessEvaluation].append(subgroupDecls); + stageBuiltins[EShLangGeometry] .append(subgroupDecls); + stageBuiltins[EShLangCompute] .append(subgroupDecls); + stageBuiltins[EShLangCompute] .append(computeSubgroupDecls); + stageBuiltins[EShLangFragment] .append(fragmentSubgroupDecls); + stageBuiltins[EShLangMeshNV] .append(subgroupDecls); + stageBuiltins[EShLangMeshNV] .append(computeSubgroupDecls); + stageBuiltins[EShLangTaskNV] .append(subgroupDecls); + stageBuiltins[EShLangTaskNV] .append(computeSubgroupDecls); + stageBuiltins[EShLangRayGen] .append(subgroupDecls); + stageBuiltins[EShLangIntersect] .append(subgroupDecls); + stageBuiltins[EShLangAnyHit] .append(subgroupDecls); + stageBuiltins[EShLangClosestHit] .append(subgroupDecls); + stageBuiltins[EShLangMiss] .append(subgroupDecls); + stageBuiltins[EShLangCallable] .append(subgroupDecls); + } + + // GL_NV_ray_tracing/GL_EXT_ray_tracing + if (profile != EEsProfile && version >= 460) { + + const char *constRayFlags = + "const uint gl_RayFlagsNoneNV = 0U;" + "const uint gl_RayFlagsNoneEXT = 0U;" + "const uint gl_RayFlagsOpaqueNV = 1U;" + "const uint gl_RayFlagsOpaqueEXT = 1U;" + "const uint gl_RayFlagsNoOpaqueNV = 2U;" + "const uint gl_RayFlagsNoOpaqueEXT = 2U;" + "const uint gl_RayFlagsTerminateOnFirstHitNV = 4U;" + "const uint gl_RayFlagsTerminateOnFirstHitEXT = 4U;" + "const uint gl_RayFlagsSkipClosestHitShaderNV = 8U;" + "const uint gl_RayFlagsSkipClosestHitShaderEXT = 8U;" + "const uint gl_RayFlagsCullBackFacingTrianglesNV = 16U;" + "const uint gl_RayFlagsCullBackFacingTrianglesEXT = 16U;" + "const uint gl_RayFlagsCullFrontFacingTrianglesNV = 32U;" + "const uint gl_RayFlagsCullFrontFacingTrianglesEXT = 32U;" + "const uint gl_RayFlagsCullOpaqueNV = 64U;" + "const uint gl_RayFlagsCullOpaqueEXT = 64U;" + "const uint gl_RayFlagsCullNoOpaqueNV = 128U;" + "const uint gl_RayFlagsCullNoOpaqueEXT = 128U;" + "const uint gl_RayFlagsSkipTrianglesEXT = 256U;" + "const uint gl_RayFlagsSkipAABBEXT = 512U;" + "const uint gl_HitKindFrontFacingTriangleEXT = 254U;" + "const uint gl_HitKindBackFacingTriangleEXT = 255U;" + "\n"; + + const char *constRayQueryIntersection = + "const uint gl_RayQueryCandidateIntersectionEXT = 0U;" + "const uint gl_RayQueryCommittedIntersectionEXT = 1U;" + "const uint gl_RayQueryCommittedIntersectionNoneEXT = 0U;" + "const uint gl_RayQueryCommittedIntersectionTriangleEXT = 1U;" + "const uint gl_RayQueryCommittedIntersectionGeneratedEXT = 2U;" + "const uint gl_RayQueryCandidateIntersectionTriangleEXT = 0U;" + "const uint gl_RayQueryCandidateIntersectionAABBEXT = 1U;" + "\n"; + + const char *rayGenDecls = + "in uvec3 gl_LaunchIDNV;" + "in uvec3 gl_LaunchIDEXT;" + "in uvec3 gl_LaunchSizeNV;" + "in uvec3 gl_LaunchSizeEXT;" + "\n"; + const char *intersectDecls = + "in uvec3 gl_LaunchIDNV;" + "in uvec3 gl_LaunchIDEXT;" + "in uvec3 gl_LaunchSizeNV;" + "in uvec3 gl_LaunchSizeEXT;" + "in int gl_PrimitiveID;" + "in int gl_InstanceID;" + "in int gl_InstanceCustomIndexNV;" + "in int gl_InstanceCustomIndexEXT;" + "in int gl_GeometryIndexEXT;" + "in vec3 gl_WorldRayOriginNV;" + "in vec3 gl_WorldRayOriginEXT;" + "in vec3 gl_WorldRayDirectionNV;" + "in vec3 gl_WorldRayDirectionEXT;" + "in vec3 gl_ObjectRayOriginNV;" + "in vec3 gl_ObjectRayOriginEXT;" + "in vec3 gl_ObjectRayDirectionNV;" + "in vec3 gl_ObjectRayDirectionEXT;" + "in float gl_RayTminNV;" + "in float gl_RayTminEXT;" + "in float gl_RayTmaxNV;" + "in float gl_RayTmaxEXT;" + "in mat4x3 gl_ObjectToWorldNV;" + "in mat4x3 gl_ObjectToWorldEXT;" + "in mat3x4 gl_ObjectToWorld3x4EXT;" + "in mat4x3 gl_WorldToObjectNV;" + "in mat4x3 gl_WorldToObjectEXT;" + "in mat3x4 gl_WorldToObject3x4EXT;" + "in uint gl_IncomingRayFlagsNV;" + "in uint gl_IncomingRayFlagsEXT;" + "\n"; + const char *hitDecls = + "in uvec3 gl_LaunchIDNV;" + "in uvec3 gl_LaunchIDEXT;" + "in uvec3 gl_LaunchSizeNV;" + "in uvec3 gl_LaunchSizeEXT;" + "in int gl_PrimitiveID;" + "in int gl_InstanceID;" + "in int gl_InstanceCustomIndexNV;" + "in int gl_InstanceCustomIndexEXT;" + "in int gl_GeometryIndexEXT;" + "in vec3 gl_WorldRayOriginNV;" + "in vec3 gl_WorldRayOriginEXT;" + "in vec3 gl_WorldRayDirectionNV;" + "in vec3 gl_WorldRayDirectionEXT;" + "in vec3 gl_ObjectRayOriginNV;" + "in vec3 gl_ObjectRayOriginEXT;" + "in vec3 gl_ObjectRayDirectionNV;" + "in vec3 gl_ObjectRayDirectionEXT;" + "in float gl_RayTminNV;" + "in float gl_RayTminEXT;" + "in float gl_RayTmaxNV;" + "in float gl_RayTmaxEXT;" + "in float gl_HitTNV;" + "in float gl_HitTEXT;" + "in uint gl_HitKindNV;" + "in uint gl_HitKindEXT;" + "in mat4x3 gl_ObjectToWorldNV;" + "in mat4x3 gl_ObjectToWorldEXT;" + "in mat3x4 gl_ObjectToWorld3x4EXT;" + "in mat4x3 gl_WorldToObjectNV;" + "in mat4x3 gl_WorldToObjectEXT;" + "in mat3x4 gl_WorldToObject3x4EXT;" + "in uint gl_IncomingRayFlagsNV;" + "in uint gl_IncomingRayFlagsEXT;" + "\n"; + const char *missDecls = + "in uvec3 gl_LaunchIDNV;" + "in uvec3 gl_LaunchIDEXT;" + "in uvec3 gl_LaunchSizeNV;" + "in uvec3 gl_LaunchSizeEXT;" + "in vec3 gl_WorldRayOriginNV;" + "in vec3 gl_WorldRayOriginEXT;" + "in vec3 gl_WorldRayDirectionNV;" + "in vec3 gl_WorldRayDirectionEXT;" + "in vec3 gl_ObjectRayOriginNV;" + "in vec3 gl_ObjectRayDirectionNV;" + "in float gl_RayTminNV;" + "in float gl_RayTminEXT;" + "in float gl_RayTmaxNV;" + "in float gl_RayTmaxEXT;" + "in uint gl_IncomingRayFlagsNV;" + "in uint gl_IncomingRayFlagsEXT;" + "\n"; + + const char *callableDecls = + "in uvec3 gl_LaunchIDNV;" + "in uvec3 gl_LaunchIDEXT;" + "in uvec3 gl_LaunchSizeNV;" + "in uvec3 gl_LaunchSizeEXT;" + "\n"; + + + commonBuiltins.append(constRayQueryIntersection); + commonBuiltins.append(constRayFlags); + + stageBuiltins[EShLangRayGen].append(rayGenDecls); + stageBuiltins[EShLangIntersect].append(intersectDecls); + stageBuiltins[EShLangAnyHit].append(hitDecls); + stageBuiltins[EShLangClosestHit].append(hitDecls); + stageBuiltins[EShLangMiss].append(missDecls); + stageBuiltins[EShLangCallable].append(callableDecls); + + } + if ((profile != EEsProfile && version >= 140)) { + const char *deviceIndex = + "in highp int gl_DeviceIndex;" // GL_EXT_device_group + "\n"; + + stageBuiltins[EShLangRayGen].append(deviceIndex); + stageBuiltins[EShLangIntersect].append(deviceIndex); + stageBuiltins[EShLangAnyHit].append(deviceIndex); + stageBuiltins[EShLangClosestHit].append(deviceIndex); + stageBuiltins[EShLangMiss].append(deviceIndex); + } + + if (version >= 300 /* both ES and non-ES */) { + stageBuiltins[EShLangFragment].append( + "flat in highp uint gl_ViewID_OVR;" // GL_OVR_multiview, GL_OVR_multiview2 + "\n"); + } + + if ((profile != EEsProfile && version >= 420) || + (profile == EEsProfile && version >= 310)) { + commonBuiltins.append("const int gl_ScopeDevice = 1;\n"); + commonBuiltins.append("const int gl_ScopeWorkgroup = 2;\n"); + commonBuiltins.append("const int gl_ScopeSubgroup = 3;\n"); + commonBuiltins.append("const int gl_ScopeInvocation = 4;\n"); + commonBuiltins.append("const int gl_ScopeQueueFamily = 5;\n"); + commonBuiltins.append("const int gl_ScopeShaderCallEXT = 6;\n"); + + commonBuiltins.append("const int gl_SemanticsRelaxed = 0x0;\n"); + commonBuiltins.append("const int gl_SemanticsAcquire = 0x2;\n"); + commonBuiltins.append("const int gl_SemanticsRelease = 0x4;\n"); + commonBuiltins.append("const int gl_SemanticsAcquireRelease = 0x8;\n"); + commonBuiltins.append("const int gl_SemanticsMakeAvailable = 0x2000;\n"); + commonBuiltins.append("const int gl_SemanticsMakeVisible = 0x4000;\n"); + commonBuiltins.append("const int gl_SemanticsVolatile = 0x8000;\n"); + + commonBuiltins.append("const int gl_StorageSemanticsNone = 0x0;\n"); + commonBuiltins.append("const int gl_StorageSemanticsBuffer = 0x40;\n"); + commonBuiltins.append("const int gl_StorageSemanticsShared = 0x100;\n"); + commonBuiltins.append("const int gl_StorageSemanticsImage = 0x800;\n"); + commonBuiltins.append("const int gl_StorageSemanticsOutput = 0x1000;\n"); + } +#endif + + // printf("%s\n", commonBuiltins.c_str()); + // printf("%s\n", stageBuiltins[EShLangFragment].c_str()); +} + +// +// Helper function for initialize(), to add the second set of names for texturing, +// when adding context-independent built-in functions. +// +void TBuiltIns::add2ndGenerationSamplingImaging(int version, EProfile profile, const SpvVersion& spvVersion) +{ + // + // In this function proper, enumerate the types, then calls the next set of functions + // to enumerate all the uses for that type. + // + + // enumerate all the types +#ifdef GLSLANG_WEB + const TBasicType bTypes[] = { EbtFloat, EbtInt, EbtUint }; + bool skipBuffer = true; + bool skipCubeArrayed = true; + const int image = 0; +#else + const TBasicType bTypes[] = { EbtFloat, EbtInt, EbtUint, EbtFloat16 }; + bool skipBuffer = (profile == EEsProfile && version < 310) || (profile != EEsProfile && version < 140); + bool skipCubeArrayed = (profile == EEsProfile && version < 310) || (profile != EEsProfile && version < 130); + for (int image = 0; image <= 1; ++image) // loop over "bool" image vs sampler +#endif + { + for (int shadow = 0; shadow <= 1; ++shadow) { // loop over "bool" shadow or not +#ifdef GLSLANG_WEB + const int ms = 0; +#else + for (int ms = 0; ms <= 1; ++ms) // loop over "bool" multisample or not +#endif + { + if ((ms || image) && shadow) + continue; + if (ms && profile != EEsProfile && version < 150) + continue; + if (ms && image && profile == EEsProfile) + continue; + if (ms && profile == EEsProfile && version < 310) + continue; + + for (int arrayed = 0; arrayed <= 1; ++arrayed) { // loop over "bool" arrayed or not +#ifdef GLSLANG_WEB + for (int dim = Esd2D; dim <= EsdCube; ++dim) { // 2D, 3D, and Cube +#else + for (int dim = Esd1D; dim < EsdNumDims; ++dim) { // 1D, ..., buffer, subpass + if (dim == EsdSubpass && spvVersion.vulkan == 0) + continue; + if (dim == EsdSubpass && (image || shadow || arrayed)) + continue; + if ((dim == Esd1D || dim == EsdRect) && profile == EEsProfile) + continue; + if (dim == EsdSubpass && spvVersion.vulkan == 0) + continue; + if (dim == EsdSubpass && (image || shadow || arrayed)) + continue; + if ((dim == Esd1D || dim == EsdRect) && profile == EEsProfile) + continue; + if (dim != Esd2D && dim != EsdSubpass && ms) + continue; + if (dim == EsdBuffer && skipBuffer) + continue; + if (dim == EsdBuffer && (shadow || arrayed || ms)) + continue; + if (ms && arrayed && profile == EEsProfile && version < 310) + continue; +#endif + if (dim == Esd3D && shadow) + continue; + if (dim == EsdCube && arrayed && skipCubeArrayed) + continue; + if ((dim == Esd3D || dim == EsdRect) && arrayed) + continue; + + // Loop over the bTypes + for (size_t bType = 0; bType < sizeof(bTypes)/sizeof(TBasicType); ++bType) { +#ifndef GLSLANG_WEB + if (bTypes[bType] == EbtFloat16 && (profile == EEsProfile || version < 450)) + continue; + if (dim == EsdRect && version < 140 && bType > 0) + continue; +#endif + if (shadow && (bTypes[bType] == EbtInt || bTypes[bType] == EbtUint)) + continue; + + // + // Now, make all the function prototypes for the type we just built... + // + TSampler sampler; +#ifndef GLSLANG_WEB + if (dim == EsdSubpass) { + sampler.setSubpass(bTypes[bType], ms ? true : false); + } else +#endif + if (image) { + sampler.setImage(bTypes[bType], (TSamplerDim)dim, arrayed ? true : false, + shadow ? true : false, + ms ? true : false); + } else { + sampler.set(bTypes[bType], (TSamplerDim)dim, arrayed ? true : false, + shadow ? true : false, + ms ? true : false); + } + + TString typeName = sampler.getString(); + +#ifndef GLSLANG_WEB + if (dim == EsdSubpass) { + addSubpassSampling(sampler, typeName, version, profile); + continue; + } +#endif + + addQueryFunctions(sampler, typeName, version, profile); + + if (image) + addImageFunctions(sampler, typeName, version, profile); + else { + addSamplingFunctions(sampler, typeName, version, profile); +#ifndef GLSLANG_WEB + addGatherFunctions(sampler, typeName, version, profile); + if (spvVersion.vulkan > 0 && sampler.isCombined() && !sampler.shadow) { + // Base Vulkan allows texelFetch() for + // textureBuffer (i.e. without sampler). + // + // GL_EXT_samplerless_texture_functions + // allows texelFetch() and query functions + // (other than textureQueryLod()) for all + // texture types. + sampler.setTexture(sampler.type, sampler.dim, sampler.arrayed, sampler.shadow, + sampler.ms); + TString textureTypeName = sampler.getString(); + addSamplingFunctions(sampler, textureTypeName, version, profile); + addQueryFunctions(sampler, textureTypeName, version, profile); + } +#endif + } + } + } + } + } + } + } + + // + // sparseTexelsResidentARB() + // + if (profile != EEsProfile && version >= 450) { + commonBuiltins.append("bool sparseTexelsResidentARB(int code);\n"); + } +} + +// +// Helper function for add2ndGenerationSamplingImaging(), +// when adding context-independent built-in functions. +// +// Add all the query functions for the given type. +// +void TBuiltIns::addQueryFunctions(TSampler sampler, const TString& typeName, int version, EProfile profile) +{ + // + // textureSize() and imageSize() + // + + int sizeDims = dimMap[sampler.dim] + (sampler.arrayed ? 1 : 0) - (sampler.dim == EsdCube ? 1 : 0); + +#ifdef GLSLANG_WEB + commonBuiltins.append("highp "); + commonBuiltins.append("ivec"); + commonBuiltins.append(postfixes[sizeDims]); + commonBuiltins.append(" textureSize("); + commonBuiltins.append(typeName); + commonBuiltins.append(",int);\n"); + return; +#endif + + if (sampler.isImage() && ((profile == EEsProfile && version < 310) || (profile != EEsProfile && version < 420))) + return; + + if (profile == EEsProfile) + commonBuiltins.append("highp "); + if (sizeDims == 1) + commonBuiltins.append("int"); + else { + commonBuiltins.append("ivec"); + commonBuiltins.append(postfixes[sizeDims]); + } + if (sampler.isImage()) + commonBuiltins.append(" imageSize(readonly writeonly volatile coherent "); + else + commonBuiltins.append(" textureSize("); + commonBuiltins.append(typeName); + if (! sampler.isImage() && ! sampler.isRect() && ! sampler.isBuffer() && ! sampler.isMultiSample()) + commonBuiltins.append(",int);\n"); + else + commonBuiltins.append(");\n"); + + // + // textureSamples() and imageSamples() + // + + // GL_ARB_shader_texture_image_samples + // TODO: spec issue? there are no memory qualifiers; how to query a writeonly/readonly image, etc? + if (profile != EEsProfile && version >= 430 && sampler.isMultiSample()) { + commonBuiltins.append("int "); + if (sampler.isImage()) + commonBuiltins.append("imageSamples(readonly writeonly volatile coherent "); + else + commonBuiltins.append("textureSamples("); + commonBuiltins.append(typeName); + commonBuiltins.append(");\n"); + } + + // + // textureQueryLod(), fragment stage only + // Also enabled with extension GL_ARB_texture_query_lod + + if (profile != EEsProfile && version >= 150 && sampler.isCombined() && sampler.dim != EsdRect && + ! sampler.isMultiSample() && ! sampler.isBuffer()) { + for (int f16TexAddr = 0; f16TexAddr < 2; ++f16TexAddr) { + if (f16TexAddr && sampler.type != EbtFloat16) + continue; + stageBuiltins[EShLangFragment].append("vec2 textureQueryLod("); + stageBuiltins[EShLangFragment].append(typeName); + if (dimMap[sampler.dim] == 1) + if (f16TexAddr) + stageBuiltins[EShLangFragment].append(", float16_t"); + else + stageBuiltins[EShLangFragment].append(", float"); + else { + if (f16TexAddr) + stageBuiltins[EShLangFragment].append(", f16vec"); + else + stageBuiltins[EShLangFragment].append(", vec"); + stageBuiltins[EShLangFragment].append(postfixes[dimMap[sampler.dim]]); + } + stageBuiltins[EShLangFragment].append(");\n"); + } + + stageBuiltins[EShLangCompute].append("vec2 textureQueryLod("); + stageBuiltins[EShLangCompute].append(typeName); + if (dimMap[sampler.dim] == 1) + stageBuiltins[EShLangCompute].append(", float"); + else { + stageBuiltins[EShLangCompute].append(", vec"); + stageBuiltins[EShLangCompute].append(postfixes[dimMap[sampler.dim]]); + } + stageBuiltins[EShLangCompute].append(");\n"); + } + + // + // textureQueryLevels() + // + + if (profile != EEsProfile && version >= 430 && ! sampler.isImage() && sampler.dim != EsdRect && + ! sampler.isMultiSample() && ! sampler.isBuffer()) { + commonBuiltins.append("int textureQueryLevels("); + commonBuiltins.append(typeName); + commonBuiltins.append(");\n"); + } +} + +// +// Helper function for add2ndGenerationSamplingImaging(), +// when adding context-independent built-in functions. +// +// Add all the image access functions for the given type. +// +void TBuiltIns::addImageFunctions(TSampler sampler, const TString& typeName, int version, EProfile profile) +{ + int dims = dimMap[sampler.dim]; + // most things with an array add a dimension, except for cubemaps + if (sampler.arrayed && sampler.dim != EsdCube) + ++dims; + + TString imageParams = typeName; + if (dims == 1) + imageParams.append(", int"); + else { + imageParams.append(", ivec"); + imageParams.append(postfixes[dims]); + } + if (sampler.isMultiSample()) + imageParams.append(", int"); + + if (profile == EEsProfile) + commonBuiltins.append("highp "); + commonBuiltins.append(prefixes[sampler.type]); + commonBuiltins.append("vec4 imageLoad(readonly volatile coherent "); + commonBuiltins.append(imageParams); + commonBuiltins.append(");\n"); + + commonBuiltins.append("void imageStore(writeonly volatile coherent "); + commonBuiltins.append(imageParams); + commonBuiltins.append(", "); + commonBuiltins.append(prefixes[sampler.type]); + commonBuiltins.append("vec4);\n"); + + if (! sampler.is1D() && ! sampler.isBuffer() && profile != EEsProfile && version >= 450) { + commonBuiltins.append("int sparseImageLoadARB(readonly volatile coherent "); + commonBuiltins.append(imageParams); + commonBuiltins.append(", out "); + commonBuiltins.append(prefixes[sampler.type]); + commonBuiltins.append("vec4"); + commonBuiltins.append(");\n"); + } + + if ( profile != EEsProfile || + (profile == EEsProfile && version >= 310)) { + if (sampler.type == EbtInt || sampler.type == EbtUint) { + const char* dataType = sampler.type == EbtInt ? "highp int" : "highp uint"; + + const int numBuiltins = 7; + + static const char* atomicFunc[numBuiltins] = { + " imageAtomicAdd(volatile coherent ", + " imageAtomicMin(volatile coherent ", + " imageAtomicMax(volatile coherent ", + " imageAtomicAnd(volatile coherent ", + " imageAtomicOr(volatile coherent ", + " imageAtomicXor(volatile coherent ", + " imageAtomicExchange(volatile coherent " + }; + + // Loop twice to add prototypes with/without scope/semantics + for (int j = 0; j < 2; ++j) { + for (size_t i = 0; i < numBuiltins; ++i) { + commonBuiltins.append(dataType); + commonBuiltins.append(atomicFunc[i]); + commonBuiltins.append(imageParams); + commonBuiltins.append(", "); + commonBuiltins.append(dataType); + if (j == 1) { + commonBuiltins.append(", int, int, int"); + } + commonBuiltins.append(");\n"); + } + + commonBuiltins.append(dataType); + commonBuiltins.append(" imageAtomicCompSwap(volatile coherent "); + commonBuiltins.append(imageParams); + commonBuiltins.append(", "); + commonBuiltins.append(dataType); + commonBuiltins.append(", "); + commonBuiltins.append(dataType); + if (j == 1) { + commonBuiltins.append(", int, int, int, int, int"); + } + commonBuiltins.append(");\n"); + } + + commonBuiltins.append(dataType); + commonBuiltins.append(" imageAtomicLoad(volatile coherent "); + commonBuiltins.append(imageParams); + commonBuiltins.append(", int, int, int);\n"); + + commonBuiltins.append("void imageAtomicStore(volatile coherent "); + commonBuiltins.append(imageParams); + commonBuiltins.append(", "); + commonBuiltins.append(dataType); + commonBuiltins.append(", int, int, int);\n"); + + } else { + // not int or uint + // GL_ARB_ES3_1_compatibility + // TODO: spec issue: are there restrictions on the kind of layout() that can be used? what about dropping memory qualifiers? + if ((profile != EEsProfile && version >= 450) || + (profile == EEsProfile && version >= 310)) { + commonBuiltins.append("float imageAtomicExchange(volatile coherent "); + commonBuiltins.append(imageParams); + commonBuiltins.append(", float);\n"); + } + } + } + + if (sampler.dim == EsdRect || sampler.dim == EsdBuffer || sampler.shadow || sampler.isMultiSample()) + return; + + if (profile == EEsProfile || version < 450) + return; + + TString imageLodParams = typeName; + if (dims == 1) + imageLodParams.append(", int"); + else { + imageLodParams.append(", ivec"); + imageLodParams.append(postfixes[dims]); + } + imageLodParams.append(", int"); + + commonBuiltins.append(prefixes[sampler.type]); + commonBuiltins.append("vec4 imageLoadLodAMD(readonly volatile coherent "); + commonBuiltins.append(imageLodParams); + commonBuiltins.append(");\n"); + + commonBuiltins.append("void imageStoreLodAMD(writeonly volatile coherent "); + commonBuiltins.append(imageLodParams); + commonBuiltins.append(", "); + commonBuiltins.append(prefixes[sampler.type]); + commonBuiltins.append("vec4);\n"); + + if (! sampler.is1D()) { + commonBuiltins.append("int sparseImageLoadLodAMD(readonly volatile coherent "); + commonBuiltins.append(imageLodParams); + commonBuiltins.append(", out "); + commonBuiltins.append(prefixes[sampler.type]); + commonBuiltins.append("vec4"); + commonBuiltins.append(");\n"); + } +} + +// +// Helper function for initialize(), +// when adding context-independent built-in functions. +// +// Add all the subpass access functions for the given type. +// +void TBuiltIns::addSubpassSampling(TSampler sampler, const TString& typeName, int /*version*/, EProfile /*profile*/) +{ + stageBuiltins[EShLangFragment].append(prefixes[sampler.type]); + stageBuiltins[EShLangFragment].append("vec4 subpassLoad"); + stageBuiltins[EShLangFragment].append("("); + stageBuiltins[EShLangFragment].append(typeName.c_str()); + if (sampler.isMultiSample()) + stageBuiltins[EShLangFragment].append(", int"); + stageBuiltins[EShLangFragment].append(");\n"); +} + +// +// Helper function for add2ndGenerationSamplingImaging(), +// when adding context-independent built-in functions. +// +// Add all the texture lookup functions for the given type. +// +void TBuiltIns::addSamplingFunctions(TSampler sampler, const TString& typeName, int version, EProfile profile) +{ +#ifdef GLSLANG_WEB + profile = EEsProfile; + version = 310; +#endif + + // + // texturing + // + for (int proj = 0; proj <= 1; ++proj) { // loop over "bool" projective or not + + if (proj && (sampler.dim == EsdCube || sampler.isBuffer() || sampler.arrayed || sampler.isMultiSample() + || !sampler.isCombined())) + continue; + + for (int lod = 0; lod <= 1; ++lod) { + + if (lod && (sampler.isBuffer() || sampler.isRect() || sampler.isMultiSample() || !sampler.isCombined())) + continue; + if (lod && sampler.dim == Esd2D && sampler.arrayed && sampler.shadow) + continue; + if (lod && sampler.dim == EsdCube && sampler.shadow) + continue; + + for (int bias = 0; bias <= 1; ++bias) { + + if (bias && (lod || sampler.isMultiSample() || !sampler.isCombined())) + continue; + if (bias && (sampler.dim == Esd2D || sampler.dim == EsdCube) && sampler.shadow && sampler.arrayed) + continue; + if (bias && (sampler.isRect() || sampler.isBuffer())) + continue; + + for (int offset = 0; offset <= 1; ++offset) { // loop over "bool" offset or not + + if (proj + offset + bias + lod > 3) + continue; + if (offset && (sampler.dim == EsdCube || sampler.isBuffer() || sampler.isMultiSample())) + continue; + + for (int fetch = 0; fetch <= 1; ++fetch) { // loop over "bool" fetch or not + + if (proj + offset + fetch + bias + lod > 3) + continue; + if (fetch && (lod || bias)) + continue; + if (fetch && (sampler.shadow || sampler.dim == EsdCube)) + continue; + if (fetch == 0 && (sampler.isMultiSample() || sampler.isBuffer() + || !sampler.isCombined())) + continue; + + for (int grad = 0; grad <= 1; ++grad) { // loop over "bool" grad or not + + if (grad && (lod || bias || sampler.isMultiSample() || !sampler.isCombined())) + continue; + if (grad && sampler.isBuffer()) + continue; + if (proj + offset + fetch + grad + bias + lod > 3) + continue; + + for (int extraProj = 0; extraProj <= 1; ++extraProj) { + bool compare = false; + int totalDims = dimMap[sampler.dim] + (sampler.arrayed ? 1 : 0); + // skip dummy unused second component for 1D non-array shadows + if (sampler.shadow && totalDims < 2) + totalDims = 2; + totalDims += (sampler.shadow ? 1 : 0) + proj; + if (totalDims > 4 && sampler.shadow) { + compare = true; + totalDims = 4; + } + assert(totalDims <= 4); + + if (extraProj && ! proj) + continue; + if (extraProj && (sampler.dim == Esd3D || sampler.shadow || !sampler.isCombined())) + continue; + + // loop over 16-bit floating-point texel addressing +#ifdef GLSLANG_WEB + const int f16TexAddr = 0; +#else + for (int f16TexAddr = 0; f16TexAddr <= 1; ++f16TexAddr) +#endif + { + if (f16TexAddr && sampler.type != EbtFloat16) + continue; + if (f16TexAddr && sampler.shadow && ! compare) { + compare = true; // compare argument is always present + totalDims--; + } + // loop over "bool" lod clamp +#ifdef GLSLANG_WEB + const int lodClamp = 0; +#else + for (int lodClamp = 0; lodClamp <= 1 ;++lodClamp) +#endif + { + if (lodClamp && (profile == EEsProfile || version < 450)) + continue; + if (lodClamp && (proj || lod || fetch)) + continue; + + // loop over "bool" sparse or not +#ifdef GLSLANG_WEB + const int sparse = 0; +#else + for (int sparse = 0; sparse <= 1; ++sparse) +#endif + { + if (sparse && (profile == EEsProfile || version < 450)) + continue; + // Sparse sampling is not for 1D/1D array texture, buffer texture, and + // projective texture + if (sparse && (sampler.is1D() || sampler.isBuffer() || proj)) + continue; + + TString s; + + // return type + if (sparse) + s.append("int "); + else { + if (sampler.shadow) + if (sampler.type == EbtFloat16) + s.append("float16_t "); + else + s.append("float "); + else { + s.append(prefixes[sampler.type]); + s.append("vec4 "); + } + } + + // name + if (sparse) { + if (fetch) + s.append("sparseTexel"); + else + s.append("sparseTexture"); + } + else { + if (fetch) + s.append("texel"); + else + s.append("texture"); + } + if (proj) + s.append("Proj"); + if (lod) + s.append("Lod"); + if (grad) + s.append("Grad"); + if (fetch) + s.append("Fetch"); + if (offset) + s.append("Offset"); + if (lodClamp) + s.append("Clamp"); + if (lodClamp || sparse) + s.append("ARB"); + s.append("("); + + // sampler type + s.append(typeName); + // P coordinate + if (extraProj) { + if (f16TexAddr) + s.append(",f16vec4"); + else + s.append(",vec4"); + } else { + s.append(","); + TBasicType t = fetch ? EbtInt : (f16TexAddr ? EbtFloat16 : EbtFloat); + if (totalDims == 1) + s.append(TType::getBasicString(t)); + else { + s.append(prefixes[t]); + s.append("vec"); + s.append(postfixes[totalDims]); + } + } + // non-optional compare + if (compare) + s.append(",float"); + + // non-optional lod argument (lod that's not driven by lod loop) or sample + if ((fetch && !sampler.isBuffer() && + !sampler.isRect() && !sampler.isMultiSample()) + || (sampler.isMultiSample() && fetch)) + s.append(",int"); + // non-optional lod + if (lod) { + if (f16TexAddr) + s.append(",float16_t"); + else + s.append(",float"); + } + + // gradient arguments + if (grad) { + if (dimMap[sampler.dim] == 1) { + if (f16TexAddr) + s.append(",float16_t,float16_t"); + else + s.append(",float,float"); + } else { + if (f16TexAddr) + s.append(",f16vec"); + else + s.append(",vec"); + s.append(postfixes[dimMap[sampler.dim]]); + if (f16TexAddr) + s.append(",f16vec"); + else + s.append(",vec"); + s.append(postfixes[dimMap[sampler.dim]]); + } + } + // offset + if (offset) { + if (dimMap[sampler.dim] == 1) + s.append(",int"); + else { + s.append(",ivec"); + s.append(postfixes[dimMap[sampler.dim]]); + } + } + + // lod clamp + if (lodClamp) { + if (f16TexAddr) + s.append(",float16_t"); + else + s.append(",float"); + } + // texel out (for sparse texture) + if (sparse) { + s.append(",out "); + if (sampler.shadow) + if (sampler.type == EbtFloat16) + s.append("float16_t"); + else + s.append("float"); + else { + s.append(prefixes[sampler.type]); + s.append("vec4"); + } + } + // optional bias + if (bias) { + if (f16TexAddr) + s.append(",float16_t"); + else + s.append(",float"); + } + s.append(");\n"); + + // Add to the per-language set of built-ins + if (bias || lodClamp) { + stageBuiltins[EShLangFragment].append(s); + stageBuiltins[EShLangCompute].append(s); + } else + commonBuiltins.append(s); + + } + } + } + } + } + } + } + } + } + } +} + +// +// Helper function for add2ndGenerationSamplingImaging(), +// when adding context-independent built-in functions. +// +// Add all the texture gather functions for the given type. +// +void TBuiltIns::addGatherFunctions(TSampler sampler, const TString& typeName, int version, EProfile profile) +{ +#ifdef GLSLANG_WEB + profile = EEsProfile; + version = 310; +#endif + + switch (sampler.dim) { + case Esd2D: + case EsdRect: + case EsdCube: + break; + default: + return; + } + + if (sampler.isMultiSample()) + return; + + if (version < 140 && sampler.dim == EsdRect && sampler.type != EbtFloat) + return; + + for (int f16TexAddr = 0; f16TexAddr <= 1; ++f16TexAddr) { // loop over 16-bit floating-point texel addressing + + if (f16TexAddr && sampler.type != EbtFloat16) + continue; + for (int offset = 0; offset < 3; ++offset) { // loop over three forms of offset in the call name: none, Offset, and Offsets + + for (int comp = 0; comp < 2; ++comp) { // loop over presence of comp argument + + if (comp > 0 && sampler.shadow) + continue; + + if (offset > 0 && sampler.dim == EsdCube) + continue; + + for (int sparse = 0; sparse <= 1; ++sparse) { // loop over "bool" sparse or not + if (sparse && (profile == EEsProfile || version < 450)) + continue; + + TString s; + + // return type + if (sparse) + s.append("int "); + else { + s.append(prefixes[sampler.type]); + s.append("vec4 "); + } + + // name + if (sparse) + s.append("sparseTextureGather"); + else + s.append("textureGather"); + switch (offset) { + case 1: + s.append("Offset"); + break; + case 2: + s.append("Offsets"); + break; + default: + break; + } + if (sparse) + s.append("ARB"); + s.append("("); + + // sampler type argument + s.append(typeName); + + // P coordinate argument + if (f16TexAddr) + s.append(",f16vec"); + else + s.append(",vec"); + int totalDims = dimMap[sampler.dim] + (sampler.arrayed ? 1 : 0); + s.append(postfixes[totalDims]); + + // refZ argument + if (sampler.shadow) + s.append(",float"); + + // offset argument + if (offset > 0) { + s.append(",ivec2"); + if (offset == 2) + s.append("[4]"); + } + + // texel out (for sparse texture) + if (sparse) { + s.append(",out "); + s.append(prefixes[sampler.type]); + s.append("vec4 "); + } + + // comp argument + if (comp) + s.append(",int"); + + s.append(");\n"); + commonBuiltins.append(s); + } + } + } + } + + if (sampler.dim == EsdRect || sampler.shadow) + return; + + if (profile == EEsProfile || version < 450) + return; + + for (int bias = 0; bias < 2; ++bias) { // loop over presence of bias argument + + for (int lod = 0; lod < 2; ++lod) { // loop over presence of lod argument + + if ((lod && bias) || (lod == 0 && bias == 0)) + continue; + + for (int f16TexAddr = 0; f16TexAddr <= 1; ++f16TexAddr) { // loop over 16-bit floating-point texel addressing + + if (f16TexAddr && sampler.type != EbtFloat16) + continue; + + for (int offset = 0; offset < 3; ++offset) { // loop over three forms of offset in the call name: none, Offset, and Offsets + + for (int comp = 0; comp < 2; ++comp) { // loop over presence of comp argument + + if (comp == 0 && bias) + continue; + + if (offset > 0 && sampler.dim == EsdCube) + continue; + + for (int sparse = 0; sparse <= 1; ++sparse) { // loop over "bool" sparse or not + if (sparse && (profile == EEsProfile || version < 450)) + continue; + + TString s; + + // return type + if (sparse) + s.append("int "); + else { + s.append(prefixes[sampler.type]); + s.append("vec4 "); + } + + // name + if (sparse) + s.append("sparseTextureGather"); + else + s.append("textureGather"); + + if (lod) + s.append("Lod"); + + switch (offset) { + case 1: + s.append("Offset"); + break; + case 2: + s.append("Offsets"); + break; + default: + break; + } + + if (lod) + s.append("AMD"); + else if (sparse) + s.append("ARB"); + + s.append("("); + + // sampler type argument + s.append(typeName); + + // P coordinate argument + if (f16TexAddr) + s.append(",f16vec"); + else + s.append(",vec"); + int totalDims = dimMap[sampler.dim] + (sampler.arrayed ? 1 : 0); + s.append(postfixes[totalDims]); + + // lod argument + if (lod) { + if (f16TexAddr) + s.append(",float16_t"); + else + s.append(",float"); + } + + // offset argument + if (offset > 0) { + s.append(",ivec2"); + if (offset == 2) + s.append("[4]"); + } + + // texel out (for sparse texture) + if (sparse) { + s.append(",out "); + s.append(prefixes[sampler.type]); + s.append("vec4 "); + } + + // comp argument + if (comp) + s.append(",int"); + + // bias argument + if (bias) { + if (f16TexAddr) + s.append(",float16_t"); + else + s.append(",float"); + } + + s.append(");\n"); + if (bias) + stageBuiltins[EShLangFragment].append(s); + else + commonBuiltins.append(s); + } + } + } + } + } + } +} + +// +// Add context-dependent built-in functions and variables that are present +// for the given version and profile. All the results are put into just the +// commonBuiltins, because it is called for just a specific stage. So, +// add stage-specific entries to the commonBuiltins, and only if that stage +// was requested. +// +void TBuiltIns::initialize(const TBuiltInResource &resources, int version, EProfile profile, const SpvVersion& spvVersion, EShLanguage language) +{ +#ifdef GLSLANG_WEB + version = 310; + profile = EEsProfile; +#endif + + // + // Initialize the context-dependent (resource-dependent) built-in strings for parsing. + // + + //============================================================================ + // + // Standard Uniforms + // + //============================================================================ + + TString& s = commonBuiltins; + const int maxSize = 200; + char builtInConstant[maxSize]; + + // + // Build string of implementation dependent constants. + // + + if (profile == EEsProfile) { + snprintf(builtInConstant, maxSize, "const mediump int gl_MaxVertexAttribs = %d;", resources.maxVertexAttribs); + s.append(builtInConstant); + + snprintf(builtInConstant, maxSize, "const mediump int gl_MaxVertexUniformVectors = %d;", resources.maxVertexUniformVectors); + s.append(builtInConstant); + + snprintf(builtInConstant, maxSize, "const mediump int gl_MaxVertexTextureImageUnits = %d;", resources.maxVertexTextureImageUnits); + s.append(builtInConstant); + + snprintf(builtInConstant, maxSize, "const mediump int gl_MaxCombinedTextureImageUnits = %d;", resources.maxCombinedTextureImageUnits); + s.append(builtInConstant); + + snprintf(builtInConstant, maxSize, "const mediump int gl_MaxTextureImageUnits = %d;", resources.maxTextureImageUnits); + s.append(builtInConstant); + + snprintf(builtInConstant, maxSize, "const mediump int gl_MaxFragmentUniformVectors = %d;", resources.maxFragmentUniformVectors); + s.append(builtInConstant); + + snprintf(builtInConstant, maxSize, "const mediump int gl_MaxDrawBuffers = %d;", resources.maxDrawBuffers); + s.append(builtInConstant); + + if (version == 100) { + snprintf(builtInConstant, maxSize, "const mediump int gl_MaxVaryingVectors = %d;", resources.maxVaryingVectors); + s.append(builtInConstant); + } else { + snprintf(builtInConstant, maxSize, "const mediump int gl_MaxVertexOutputVectors = %d;", resources.maxVertexOutputVectors); + s.append(builtInConstant); + + snprintf(builtInConstant, maxSize, "const mediump int gl_MaxFragmentInputVectors = %d;", resources.maxFragmentInputVectors); + s.append(builtInConstant); + + snprintf(builtInConstant, maxSize, "const mediump int gl_MinProgramTexelOffset = %d;", resources.minProgramTexelOffset); + s.append(builtInConstant); + + snprintf(builtInConstant, maxSize, "const mediump int gl_MaxProgramTexelOffset = %d;", resources.maxProgramTexelOffset); + s.append(builtInConstant); + } + +#ifndef GLSLANG_WEB + if (version >= 310) { + // geometry + + snprintf(builtInConstant, maxSize, "const int gl_MaxGeometryInputComponents = %d;", resources.maxGeometryInputComponents); + s.append(builtInConstant); + snprintf(builtInConstant, maxSize, "const int gl_MaxGeometryOutputComponents = %d;", resources.maxGeometryOutputComponents); + s.append(builtInConstant); + snprintf(builtInConstant, maxSize, "const int gl_MaxGeometryImageUniforms = %d;", resources.maxGeometryImageUniforms); + s.append(builtInConstant); + snprintf(builtInConstant, maxSize, "const int gl_MaxGeometryTextureImageUnits = %d;", resources.maxGeometryTextureImageUnits); + s.append(builtInConstant); + snprintf(builtInConstant, maxSize, "const int gl_MaxGeometryOutputVertices = %d;", resources.maxGeometryOutputVertices); + s.append(builtInConstant); + snprintf(builtInConstant, maxSize, "const int gl_MaxGeometryTotalOutputComponents = %d;", resources.maxGeometryTotalOutputComponents); + s.append(builtInConstant); + snprintf(builtInConstant, maxSize, "const int gl_MaxGeometryUniformComponents = %d;", resources.maxGeometryUniformComponents); + s.append(builtInConstant); + snprintf(builtInConstant, maxSize, "const int gl_MaxGeometryAtomicCounters = %d;", resources.maxGeometryAtomicCounters); + s.append(builtInConstant); + snprintf(builtInConstant, maxSize, "const int gl_MaxGeometryAtomicCounterBuffers = %d;", resources.maxGeometryAtomicCounterBuffers); + s.append(builtInConstant); + + // tessellation + + snprintf(builtInConstant, maxSize, "const int gl_MaxTessControlInputComponents = %d;", resources.maxTessControlInputComponents); + s.append(builtInConstant); + snprintf(builtInConstant, maxSize, "const int gl_MaxTessControlOutputComponents = %d;", resources.maxTessControlOutputComponents); + s.append(builtInConstant); + snprintf(builtInConstant, maxSize, "const int gl_MaxTessControlTextureImageUnits = %d;", resources.maxTessControlTextureImageUnits); + s.append(builtInConstant); + snprintf(builtInConstant, maxSize, "const int gl_MaxTessControlUniformComponents = %d;", resources.maxTessControlUniformComponents); + s.append(builtInConstant); + snprintf(builtInConstant, maxSize, "const int gl_MaxTessControlTotalOutputComponents = %d;", resources.maxTessControlTotalOutputComponents); + s.append(builtInConstant); + + snprintf(builtInConstant, maxSize, "const int gl_MaxTessEvaluationInputComponents = %d;", resources.maxTessEvaluationInputComponents); + s.append(builtInConstant); + snprintf(builtInConstant, maxSize, "const int gl_MaxTessEvaluationOutputComponents = %d;", resources.maxTessEvaluationOutputComponents); + s.append(builtInConstant); + snprintf(builtInConstant, maxSize, "const int gl_MaxTessEvaluationTextureImageUnits = %d;", resources.maxTessEvaluationTextureImageUnits); + s.append(builtInConstant); + snprintf(builtInConstant, maxSize, "const int gl_MaxTessEvaluationUniformComponents = %d;", resources.maxTessEvaluationUniformComponents); + s.append(builtInConstant); + + snprintf(builtInConstant, maxSize, "const int gl_MaxTessPatchComponents = %d;", resources.maxTessPatchComponents); + s.append(builtInConstant); + + snprintf(builtInConstant, maxSize, "const int gl_MaxPatchVertices = %d;", resources.maxPatchVertices); + s.append(builtInConstant); + snprintf(builtInConstant, maxSize, "const int gl_MaxTessGenLevel = %d;", resources.maxTessGenLevel); + s.append(builtInConstant); + + // this is here instead of with the others in initialize(version, profile) due to the dependence on gl_MaxPatchVertices + if (language == EShLangTessControl || language == EShLangTessEvaluation) { + s.append( + "in gl_PerVertex {" + "highp vec4 gl_Position;" + "highp float gl_PointSize;" + "highp vec4 gl_SecondaryPositionNV;" // GL_NV_stereo_view_rendering + "highp vec4 gl_PositionPerViewNV[];" // GL_NVX_multiview_per_view_attributes + "} gl_in[gl_MaxPatchVertices];" + "\n"); + } + } + + if (version >= 320) { + // tessellation + + snprintf(builtInConstant, maxSize, "const int gl_MaxTessControlImageUniforms = %d;", resources.maxTessControlImageUniforms); + s.append(builtInConstant); + snprintf(builtInConstant, maxSize, "const int gl_MaxTessEvaluationImageUniforms = %d;", resources.maxTessEvaluationImageUniforms); + s.append(builtInConstant); + snprintf(builtInConstant, maxSize, "const int gl_MaxTessControlAtomicCounters = %d;", resources.maxTessControlAtomicCounters); + s.append(builtInConstant); + snprintf(builtInConstant, maxSize, "const int gl_MaxTessEvaluationAtomicCounters = %d;", resources.maxTessEvaluationAtomicCounters); + s.append(builtInConstant); + snprintf(builtInConstant, maxSize, "const int gl_MaxTessControlAtomicCounterBuffers = %d;", resources.maxTessControlAtomicCounterBuffers); + s.append(builtInConstant); + snprintf(builtInConstant, maxSize, "const int gl_MaxTessEvaluationAtomicCounterBuffers = %d;", resources.maxTessEvaluationAtomicCounterBuffers); + s.append(builtInConstant); + } + + if (version >= 100) { + // GL_EXT_blend_func_extended + snprintf(builtInConstant, maxSize, "const mediump int gl_MaxDualSourceDrawBuffersEXT = %d;", resources.maxDualSourceDrawBuffersEXT); + s.append(builtInConstant); + // this is here instead of with the others in initialize(version, profile) due to the dependence on gl_MaxDualSourceDrawBuffersEXT + if (language == EShLangFragment) { + s.append( + "mediump vec4 gl_SecondaryFragColorEXT;" + "mediump vec4 gl_SecondaryFragDataEXT[gl_MaxDualSourceDrawBuffersEXT];" + "\n"); + } + } + } else { + // non-ES profile + + if (version > 400) { + snprintf(builtInConstant, maxSize, "const int gl_MaxVertexUniformVectors = %d;", resources.maxVertexUniformVectors); + s.append(builtInConstant); + + snprintf(builtInConstant, maxSize, "const int gl_MaxFragmentUniformVectors = %d;", resources.maxFragmentUniformVectors); + s.append(builtInConstant); + } + + snprintf(builtInConstant, maxSize, "const int gl_MaxVertexAttribs = %d;", resources.maxVertexAttribs); + s.append(builtInConstant); + + snprintf(builtInConstant, maxSize, "const int gl_MaxVertexTextureImageUnits = %d;", resources.maxVertexTextureImageUnits); + s.append(builtInConstant); + + snprintf(builtInConstant, maxSize, "const int gl_MaxCombinedTextureImageUnits = %d;", resources.maxCombinedTextureImageUnits); + s.append(builtInConstant); + + snprintf(builtInConstant, maxSize, "const int gl_MaxTextureImageUnits = %d;", resources.maxTextureImageUnits); + s.append(builtInConstant); + + snprintf(builtInConstant, maxSize, "const int gl_MaxDrawBuffers = %d;", resources.maxDrawBuffers); + s.append(builtInConstant); + + snprintf(builtInConstant, maxSize, "const int gl_MaxLights = %d;", resources.maxLights); + s.append(builtInConstant); + + snprintf(builtInConstant, maxSize, "const int gl_MaxClipPlanes = %d;", resources.maxClipPlanes); + s.append(builtInConstant); + + snprintf(builtInConstant, maxSize, "const int gl_MaxTextureUnits = %d;", resources.maxTextureUnits); + s.append(builtInConstant); + + snprintf(builtInConstant, maxSize, "const int gl_MaxTextureCoords = %d;", resources.maxTextureCoords); + s.append(builtInConstant); + + snprintf(builtInConstant, maxSize, "const int gl_MaxVertexUniformComponents = %d;", resources.maxVertexUniformComponents); + s.append(builtInConstant); + + if (version < 150 || ARBCompatibility) { + snprintf(builtInConstant, maxSize, "const int gl_MaxVaryingFloats = %d;", resources.maxVaryingFloats); + s.append(builtInConstant); + } + + snprintf(builtInConstant, maxSize, "const int gl_MaxFragmentUniformComponents = %d;", resources.maxFragmentUniformComponents); + s.append(builtInConstant); + + if (spvVersion.spv == 0 && IncludeLegacy(version, profile, spvVersion)) { + // + // OpenGL'uniform' state. Page numbers are in reference to version + // 1.4 of the OpenGL specification. + // + + // + // Matrix state. p. 31, 32, 37, 39, 40. + // + s.append("uniform mat4 gl_TextureMatrix[gl_MaxTextureCoords];" + + // + // Derived matrix state that provides inverse and transposed versions + // of the matrices above. + // + "uniform mat4 gl_TextureMatrixInverse[gl_MaxTextureCoords];" + + "uniform mat4 gl_TextureMatrixTranspose[gl_MaxTextureCoords];" + + "uniform mat4 gl_TextureMatrixInverseTranspose[gl_MaxTextureCoords];" + + // + // Clip planes p. 42. + // + "uniform vec4 gl_ClipPlane[gl_MaxClipPlanes];" + + // + // Light State p 50, 53, 55. + // + "uniform gl_LightSourceParameters gl_LightSource[gl_MaxLights];" + + // + // Derived state from products of light. + // + "uniform gl_LightProducts gl_FrontLightProduct[gl_MaxLights];" + "uniform gl_LightProducts gl_BackLightProduct[gl_MaxLights];" + + // + // Texture Environment and Generation, p. 152, p. 40-42. + // + "uniform vec4 gl_TextureEnvColor[gl_MaxTextureImageUnits];" + "uniform vec4 gl_EyePlaneS[gl_MaxTextureCoords];" + "uniform vec4 gl_EyePlaneT[gl_MaxTextureCoords];" + "uniform vec4 gl_EyePlaneR[gl_MaxTextureCoords];" + "uniform vec4 gl_EyePlaneQ[gl_MaxTextureCoords];" + "uniform vec4 gl_ObjectPlaneS[gl_MaxTextureCoords];" + "uniform vec4 gl_ObjectPlaneT[gl_MaxTextureCoords];" + "uniform vec4 gl_ObjectPlaneR[gl_MaxTextureCoords];" + "uniform vec4 gl_ObjectPlaneQ[gl_MaxTextureCoords];"); + } + + if (version >= 130) { + snprintf(builtInConstant, maxSize, "const int gl_MaxClipDistances = %d;", resources.maxClipDistances); + s.append(builtInConstant); + snprintf(builtInConstant, maxSize, "const int gl_MaxVaryingComponents = %d;", resources.maxVaryingComponents); + s.append(builtInConstant); + + // GL_ARB_shading_language_420pack + snprintf(builtInConstant, maxSize, "const mediump int gl_MinProgramTexelOffset = %d;", resources.minProgramTexelOffset); + s.append(builtInConstant); + snprintf(builtInConstant, maxSize, "const mediump int gl_MaxProgramTexelOffset = %d;", resources.maxProgramTexelOffset); + s.append(builtInConstant); + } + + // geometry + if (version >= 150) { + snprintf(builtInConstant, maxSize, "const int gl_MaxGeometryInputComponents = %d;", resources.maxGeometryInputComponents); + s.append(builtInConstant); + snprintf(builtInConstant, maxSize, "const int gl_MaxGeometryOutputComponents = %d;", resources.maxGeometryOutputComponents); + s.append(builtInConstant); + snprintf(builtInConstant, maxSize, "const int gl_MaxGeometryTextureImageUnits = %d;", resources.maxGeometryTextureImageUnits); + s.append(builtInConstant); + snprintf(builtInConstant, maxSize, "const int gl_MaxGeometryOutputVertices = %d;", resources.maxGeometryOutputVertices); + s.append(builtInConstant); + snprintf(builtInConstant, maxSize, "const int gl_MaxGeometryTotalOutputComponents = %d;", resources.maxGeometryTotalOutputComponents); + s.append(builtInConstant); + snprintf(builtInConstant, maxSize, "const int gl_MaxGeometryUniformComponents = %d;", resources.maxGeometryUniformComponents); + s.append(builtInConstant); + snprintf(builtInConstant, maxSize, "const int gl_MaxGeometryVaryingComponents = %d;", resources.maxGeometryVaryingComponents); + s.append(builtInConstant); + + } + + if (version >= 150) { + snprintf(builtInConstant, maxSize, "const int gl_MaxVertexOutputComponents = %d;", resources.maxVertexOutputComponents); + s.append(builtInConstant); + snprintf(builtInConstant, maxSize, "const int gl_MaxFragmentInputComponents = %d;", resources.maxFragmentInputComponents); + s.append(builtInConstant); + } + + // tessellation + if (version >= 150) { + snprintf(builtInConstant, maxSize, "const int gl_MaxTessControlInputComponents = %d;", resources.maxTessControlInputComponents); + s.append(builtInConstant); + snprintf(builtInConstant, maxSize, "const int gl_MaxTessControlOutputComponents = %d;", resources.maxTessControlOutputComponents); + s.append(builtInConstant); + snprintf(builtInConstant, maxSize, "const int gl_MaxTessControlTextureImageUnits = %d;", resources.maxTessControlTextureImageUnits); + s.append(builtInConstant); + snprintf(builtInConstant, maxSize, "const int gl_MaxTessControlUniformComponents = %d;", resources.maxTessControlUniformComponents); + s.append(builtInConstant); + snprintf(builtInConstant, maxSize, "const int gl_MaxTessControlTotalOutputComponents = %d;", resources.maxTessControlTotalOutputComponents); + s.append(builtInConstant); + + snprintf(builtInConstant, maxSize, "const int gl_MaxTessEvaluationInputComponents = %d;", resources.maxTessEvaluationInputComponents); + s.append(builtInConstant); + snprintf(builtInConstant, maxSize, "const int gl_MaxTessEvaluationOutputComponents = %d;", resources.maxTessEvaluationOutputComponents); + s.append(builtInConstant); + snprintf(builtInConstant, maxSize, "const int gl_MaxTessEvaluationTextureImageUnits = %d;", resources.maxTessEvaluationTextureImageUnits); + s.append(builtInConstant); + snprintf(builtInConstant, maxSize, "const int gl_MaxTessEvaluationUniformComponents = %d;", resources.maxTessEvaluationUniformComponents); + s.append(builtInConstant); + + snprintf(builtInConstant, maxSize, "const int gl_MaxTessPatchComponents = %d;", resources.maxTessPatchComponents); + s.append(builtInConstant); + snprintf(builtInConstant, maxSize, "const int gl_MaxTessGenLevel = %d;", resources.maxTessGenLevel); + s.append(builtInConstant); + snprintf(builtInConstant, maxSize, "const int gl_MaxPatchVertices = %d;", resources.maxPatchVertices); + s.append(builtInConstant); + + // this is here instead of with the others in initialize(version, profile) due to the dependence on gl_MaxPatchVertices + if (language == EShLangTessControl || language == EShLangTessEvaluation) { + s.append( + "in gl_PerVertex {" + "vec4 gl_Position;" + "float gl_PointSize;" + "float gl_ClipDistance[];" + ); + if (profile == ECompatibilityProfile) + s.append( + "vec4 gl_ClipVertex;" + "vec4 gl_FrontColor;" + "vec4 gl_BackColor;" + "vec4 gl_FrontSecondaryColor;" + "vec4 gl_BackSecondaryColor;" + "vec4 gl_TexCoord[];" + "float gl_FogFragCoord;" + ); + if (profile != EEsProfile && version >= 450) + s.append( + "float gl_CullDistance[];" + "vec4 gl_SecondaryPositionNV;" // GL_NV_stereo_view_rendering + "vec4 gl_PositionPerViewNV[];" // GL_NVX_multiview_per_view_attributes + ); + s.append( + "} gl_in[gl_MaxPatchVertices];" + "\n"); + } + } + + if (version >= 150) { + snprintf(builtInConstant, maxSize, "const int gl_MaxViewports = %d;", resources.maxViewports); + s.append(builtInConstant); + } + + // images + if (version >= 130) { + snprintf(builtInConstant, maxSize, "const int gl_MaxCombinedImageUnitsAndFragmentOutputs = %d;", resources.maxCombinedImageUnitsAndFragmentOutputs); + s.append(builtInConstant); + snprintf(builtInConstant, maxSize, "const int gl_MaxImageSamples = %d;", resources.maxImageSamples); + s.append(builtInConstant); + snprintf(builtInConstant, maxSize, "const int gl_MaxTessControlImageUniforms = %d;", resources.maxTessControlImageUniforms); + s.append(builtInConstant); + snprintf(builtInConstant, maxSize, "const int gl_MaxTessEvaluationImageUniforms = %d;", resources.maxTessEvaluationImageUniforms); + s.append(builtInConstant); + snprintf(builtInConstant, maxSize, "const int gl_MaxGeometryImageUniforms = %d;", resources.maxGeometryImageUniforms); + s.append(builtInConstant); + } + + // enhanced layouts + if (version >= 430) { + snprintf(builtInConstant, maxSize, "const int gl_MaxTransformFeedbackBuffers = %d;", resources.maxTransformFeedbackBuffers); + s.append(builtInConstant); + snprintf(builtInConstant, maxSize, "const int gl_MaxTransformFeedbackInterleavedComponents = %d;", resources.maxTransformFeedbackInterleavedComponents); + s.append(builtInConstant); + } +#endif + } + + // compute + if ((profile == EEsProfile && version >= 310) || (profile != EEsProfile && version >= 420)) { + snprintf(builtInConstant, maxSize, "const ivec3 gl_MaxComputeWorkGroupCount = ivec3(%d,%d,%d);", resources.maxComputeWorkGroupCountX, + resources.maxComputeWorkGroupCountY, + resources.maxComputeWorkGroupCountZ); + s.append(builtInConstant); + snprintf(builtInConstant, maxSize, "const ivec3 gl_MaxComputeWorkGroupSize = ivec3(%d,%d,%d);", resources.maxComputeWorkGroupSizeX, + resources.maxComputeWorkGroupSizeY, + resources.maxComputeWorkGroupSizeZ); + s.append(builtInConstant); + + snprintf(builtInConstant, maxSize, "const int gl_MaxComputeUniformComponents = %d;", resources.maxComputeUniformComponents); + s.append(builtInConstant); + snprintf(builtInConstant, maxSize, "const int gl_MaxComputeTextureImageUnits = %d;", resources.maxComputeTextureImageUnits); + s.append(builtInConstant); + + s.append("\n"); + } + +#ifndef GLSLANG_WEB + // images (some in compute below) + if ((profile == EEsProfile && version >= 310) || + (profile != EEsProfile && version >= 130)) { + snprintf(builtInConstant, maxSize, "const int gl_MaxImageUnits = %d;", resources.maxImageUnits); + s.append(builtInConstant); + snprintf(builtInConstant, maxSize, "const int gl_MaxCombinedShaderOutputResources = %d;", resources.maxCombinedShaderOutputResources); + s.append(builtInConstant); + snprintf(builtInConstant, maxSize, "const int gl_MaxVertexImageUniforms = %d;", resources.maxVertexImageUniforms); + s.append(builtInConstant); + snprintf(builtInConstant, maxSize, "const int gl_MaxFragmentImageUniforms = %d;", resources.maxFragmentImageUniforms); + s.append(builtInConstant); + snprintf(builtInConstant, maxSize, "const int gl_MaxCombinedImageUniforms = %d;", resources.maxCombinedImageUniforms); + s.append(builtInConstant); + } + + // compute + if ((profile == EEsProfile && version >= 310) || (profile != EEsProfile && version >= 420)) { + snprintf(builtInConstant, maxSize, "const int gl_MaxComputeImageUniforms = %d;", resources.maxComputeImageUniforms); + s.append(builtInConstant); + snprintf(builtInConstant, maxSize, "const int gl_MaxComputeAtomicCounters = %d;", resources.maxComputeAtomicCounters); + s.append(builtInConstant); + snprintf(builtInConstant, maxSize, "const int gl_MaxComputeAtomicCounterBuffers = %d;", resources.maxComputeAtomicCounterBuffers); + s.append(builtInConstant); + + s.append("\n"); + } + + // atomic counters (some in compute below) + if ((profile == EEsProfile && version >= 310) || + (profile != EEsProfile && version >= 420)) { + snprintf(builtInConstant, maxSize, "const int gl_MaxVertexAtomicCounters = %d;", resources. maxVertexAtomicCounters); + s.append(builtInConstant); + snprintf(builtInConstant, maxSize, "const int gl_MaxFragmentAtomicCounters = %d;", resources. maxFragmentAtomicCounters); + s.append(builtInConstant); + snprintf(builtInConstant, maxSize, "const int gl_MaxCombinedAtomicCounters = %d;", resources. maxCombinedAtomicCounters); + s.append(builtInConstant); + snprintf(builtInConstant, maxSize, "const int gl_MaxAtomicCounterBindings = %d;", resources. maxAtomicCounterBindings); + s.append(builtInConstant); + snprintf(builtInConstant, maxSize, "const int gl_MaxVertexAtomicCounterBuffers = %d;", resources. maxVertexAtomicCounterBuffers); + s.append(builtInConstant); + snprintf(builtInConstant, maxSize, "const int gl_MaxFragmentAtomicCounterBuffers = %d;", resources. maxFragmentAtomicCounterBuffers); + s.append(builtInConstant); + snprintf(builtInConstant, maxSize, "const int gl_MaxCombinedAtomicCounterBuffers = %d;", resources. maxCombinedAtomicCounterBuffers); + s.append(builtInConstant); + snprintf(builtInConstant, maxSize, "const int gl_MaxAtomicCounterBufferSize = %d;", resources. maxAtomicCounterBufferSize); + s.append(builtInConstant); + } + if (profile != EEsProfile && version >= 420) { + snprintf(builtInConstant, maxSize, "const int gl_MaxTessControlAtomicCounters = %d;", resources. maxTessControlAtomicCounters); + s.append(builtInConstant); + snprintf(builtInConstant, maxSize, "const int gl_MaxTessEvaluationAtomicCounters = %d;", resources. maxTessEvaluationAtomicCounters); + s.append(builtInConstant); + snprintf(builtInConstant, maxSize, "const int gl_MaxGeometryAtomicCounters = %d;", resources. maxGeometryAtomicCounters); + s.append(builtInConstant); + snprintf(builtInConstant, maxSize, "const int gl_MaxTessControlAtomicCounterBuffers = %d;", resources. maxTessControlAtomicCounterBuffers); + s.append(builtInConstant); + snprintf(builtInConstant, maxSize, "const int gl_MaxTessEvaluationAtomicCounterBuffers = %d;", resources. maxTessEvaluationAtomicCounterBuffers); + s.append(builtInConstant); + snprintf(builtInConstant, maxSize, "const int gl_MaxGeometryAtomicCounterBuffers = %d;", resources. maxGeometryAtomicCounterBuffers); + s.append(builtInConstant); + + s.append("\n"); + } + + // GL_ARB_cull_distance + if (profile != EEsProfile && version >= 450) { + snprintf(builtInConstant, maxSize, "const int gl_MaxCullDistances = %d;", resources.maxCullDistances); + s.append(builtInConstant); + snprintf(builtInConstant, maxSize, "const int gl_MaxCombinedClipAndCullDistances = %d;", resources.maxCombinedClipAndCullDistances); + s.append(builtInConstant); + } + + // GL_ARB_ES3_1_compatibility + if ((profile != EEsProfile && version >= 450) || + (profile == EEsProfile && version >= 310)) { + snprintf(builtInConstant, maxSize, "const int gl_MaxSamples = %d;", resources.maxSamples); + s.append(builtInConstant); + } + + // SPV_NV_mesh_shader + if ((profile != EEsProfile && version >= 450) || (profile == EEsProfile && version >= 320)) { + snprintf(builtInConstant, maxSize, "const int gl_MaxMeshOutputVerticesNV = %d;", resources.maxMeshOutputVerticesNV); + s.append(builtInConstant); + + snprintf(builtInConstant, maxSize, "const int gl_MaxMeshOutputPrimitivesNV = %d;", resources.maxMeshOutputPrimitivesNV); + s.append(builtInConstant); + + snprintf(builtInConstant, maxSize, "const ivec3 gl_MaxMeshWorkGroupSizeNV = ivec3(%d,%d,%d);", resources.maxMeshWorkGroupSizeX_NV, + resources.maxMeshWorkGroupSizeY_NV, + resources.maxMeshWorkGroupSizeZ_NV); + s.append(builtInConstant); + snprintf(builtInConstant, maxSize, "const ivec3 gl_MaxTaskWorkGroupSizeNV = ivec3(%d,%d,%d);", resources.maxTaskWorkGroupSizeX_NV, + resources.maxTaskWorkGroupSizeY_NV, + resources.maxTaskWorkGroupSizeZ_NV); + s.append(builtInConstant); + + snprintf(builtInConstant, maxSize, "const int gl_MaxMeshViewCountNV = %d;", resources.maxMeshViewCountNV); + s.append(builtInConstant); + + s.append("\n"); + } +#endif + + s.append("\n"); +} + +// +// To support special built-ins that have a special qualifier that cannot be declared textually +// in a shader, like gl_Position. +// +// This lets the type of the built-in be declared textually, and then have just its qualifier be +// updated afterward. +// +// Safe to call even if name is not present. +// +// Only use this for built-in variables that have a special qualifier in TStorageQualifier. +// New built-in variables should use a generic (textually declarable) qualifier in +// TStoraregQualifier and only call BuiltInVariable(). +// +static void SpecialQualifier(const char* name, TStorageQualifier qualifier, TBuiltInVariable builtIn, TSymbolTable& symbolTable) +{ + TSymbol* symbol = symbolTable.find(name); + if (symbol == nullptr) + return; + + TQualifier& symQualifier = symbol->getWritableType().getQualifier(); + symQualifier.storage = qualifier; + symQualifier.builtIn = builtIn; +} + +// +// To tag built-in variables with their TBuiltInVariable enum. Use this when the +// normal declaration text already gets the qualifier right, and all that's needed +// is setting the builtIn field. This should be the normal way for all new +// built-in variables. +// +// If SpecialQualifier() was called, this does not need to be called. +// +// Safe to call even if name is not present. +// +static void BuiltInVariable(const char* name, TBuiltInVariable builtIn, TSymbolTable& symbolTable) +{ + TSymbol* symbol = symbolTable.find(name); + if (symbol == nullptr) + return; + + TQualifier& symQualifier = symbol->getWritableType().getQualifier(); + symQualifier.builtIn = builtIn; +} + +// +// For built-in variables inside a named block. +// SpecialQualifier() won't ever go inside a block; their member's qualifier come +// from the qualification of the block. +// +// See comments above for other detail. +// +static void BuiltInVariable(const char* blockName, const char* name, TBuiltInVariable builtIn, TSymbolTable& symbolTable) +{ + TSymbol* symbol = symbolTable.find(blockName); + if (symbol == nullptr) + return; + + TTypeList& structure = *symbol->getWritableType().getWritableStruct(); + for (int i = 0; i < (int)structure.size(); ++i) { + if (structure[i].type->getFieldName().compare(name) == 0) { + structure[i].type->getQualifier().builtIn = builtIn; + return; + } + } +} + +// +// Finish adding/processing context-independent built-in symbols. +// 1) Programmatically add symbols that could not be added by simple text strings above. +// 2) Map built-in functions to operators, for those that will turn into an operation node +// instead of remaining a function call. +// 3) Tag extension-related symbols added to their base version with their extensions, so +// that if an early version has the extension turned off, there is an error reported on use. +// +void TBuiltIns::identifyBuiltIns(int version, EProfile profile, const SpvVersion& spvVersion, EShLanguage language, TSymbolTable& symbolTable) +{ +#ifdef GLSLANG_WEB + version = 310; + profile = EEsProfile; +#endif + + // + // Tag built-in variables and functions with additional qualifier and extension information + // that cannot be declared with the text strings. + // + + // N.B.: a symbol should only be tagged once, and this function is called multiple times, once + // per stage that's used for this profile. So + // - generally, stick common ones in the fragment stage to ensure they are tagged exactly once + // - for ES, which has different precisions for different stages, the coarsest-grained tagging + // for a built-in used in many stages needs to be once for the fragment stage and once for + // the vertex stage + + switch(language) { + case EShLangVertex: + if (spvVersion.vulkan > 0) { + BuiltInVariable("gl_VertexIndex", EbvVertexIndex, symbolTable); + BuiltInVariable("gl_InstanceIndex", EbvInstanceIndex, symbolTable); + } + +#ifndef GLSLANG_WEB + if (spvVersion.vulkan == 0) { + SpecialQualifier("gl_VertexID", EvqVertexId, EbvVertexId, symbolTable); + SpecialQualifier("gl_InstanceID", EvqInstanceId, EbvInstanceId, symbolTable); + } + + if (profile != EEsProfile) { + if (version >= 440) { + symbolTable.setVariableExtensions("gl_BaseVertexARB", 1, &E_GL_ARB_shader_draw_parameters); + symbolTable.setVariableExtensions("gl_BaseInstanceARB", 1, &E_GL_ARB_shader_draw_parameters); + symbolTable.setVariableExtensions("gl_DrawIDARB", 1, &E_GL_ARB_shader_draw_parameters); + BuiltInVariable("gl_BaseVertexARB", EbvBaseVertex, symbolTable); + BuiltInVariable("gl_BaseInstanceARB", EbvBaseInstance, symbolTable); + BuiltInVariable("gl_DrawIDARB", EbvDrawId, symbolTable); + } + if (version >= 460) { + BuiltInVariable("gl_BaseVertex", EbvBaseVertex, symbolTable); + BuiltInVariable("gl_BaseInstance", EbvBaseInstance, symbolTable); + BuiltInVariable("gl_DrawID", EbvDrawId, symbolTable); + } + symbolTable.setVariableExtensions("gl_SubGroupSizeARB", 1, &E_GL_ARB_shader_ballot); + symbolTable.setVariableExtensions("gl_SubGroupInvocationARB", 1, &E_GL_ARB_shader_ballot); + symbolTable.setVariableExtensions("gl_SubGroupEqMaskARB", 1, &E_GL_ARB_shader_ballot); + symbolTable.setVariableExtensions("gl_SubGroupGeMaskARB", 1, &E_GL_ARB_shader_ballot); + symbolTable.setVariableExtensions("gl_SubGroupGtMaskARB", 1, &E_GL_ARB_shader_ballot); + symbolTable.setVariableExtensions("gl_SubGroupLeMaskARB", 1, &E_GL_ARB_shader_ballot); + symbolTable.setVariableExtensions("gl_SubGroupLtMaskARB", 1, &E_GL_ARB_shader_ballot); + + symbolTable.setFunctionExtensions("ballotARB", 1, &E_GL_ARB_shader_ballot); + symbolTable.setFunctionExtensions("readInvocationARB", 1, &E_GL_ARB_shader_ballot); + symbolTable.setFunctionExtensions("readFirstInvocationARB", 1, &E_GL_ARB_shader_ballot); + + if (version >= 430) { + symbolTable.setFunctionExtensions("anyInvocationARB", 1, &E_GL_ARB_shader_group_vote); + symbolTable.setFunctionExtensions("allInvocationsARB", 1, &E_GL_ARB_shader_group_vote); + symbolTable.setFunctionExtensions("allInvocationsEqualARB", 1, &E_GL_ARB_shader_group_vote); + } + } + + + if (profile != EEsProfile) { + symbolTable.setFunctionExtensions("minInvocationsAMD", 1, &E_GL_AMD_shader_ballot); + symbolTable.setFunctionExtensions("maxInvocationsAMD", 1, &E_GL_AMD_shader_ballot); + symbolTable.setFunctionExtensions("addInvocationsAMD", 1, &E_GL_AMD_shader_ballot); + symbolTable.setFunctionExtensions("minInvocationsNonUniformAMD", 1, &E_GL_AMD_shader_ballot); + symbolTable.setFunctionExtensions("maxInvocationsNonUniformAMD", 1, &E_GL_AMD_shader_ballot); + symbolTable.setFunctionExtensions("addInvocationsNonUniformAMD", 1, &E_GL_AMD_shader_ballot); + symbolTable.setFunctionExtensions("swizzleInvocationsAMD", 1, &E_GL_AMD_shader_ballot); + symbolTable.setFunctionExtensions("swizzleInvocationsWithPatternAMD", 1, &E_GL_AMD_shader_ballot); + symbolTable.setFunctionExtensions("writeInvocationAMD", 1, &E_GL_AMD_shader_ballot); + symbolTable.setFunctionExtensions("mbcntAMD", 1, &E_GL_AMD_shader_ballot); + + symbolTable.setFunctionExtensions("minInvocationsInclusiveScanAMD", 1, &E_GL_AMD_shader_ballot); + symbolTable.setFunctionExtensions("maxInvocationsInclusiveScanAMD", 1, &E_GL_AMD_shader_ballot); + symbolTable.setFunctionExtensions("addInvocationsInclusiveScanAMD", 1, &E_GL_AMD_shader_ballot); + symbolTable.setFunctionExtensions("minInvocationsInclusiveScanNonUniformAMD", 1, &E_GL_AMD_shader_ballot); + symbolTable.setFunctionExtensions("maxInvocationsInclusiveScanNonUniformAMD", 1, &E_GL_AMD_shader_ballot); + symbolTable.setFunctionExtensions("addInvocationsInclusiveScanNonUniformAMD", 1, &E_GL_AMD_shader_ballot); + symbolTable.setFunctionExtensions("minInvocationsExclusiveScanAMD", 1, &E_GL_AMD_shader_ballot); + symbolTable.setFunctionExtensions("maxInvocationsExclusiveScanAMD", 1, &E_GL_AMD_shader_ballot); + symbolTable.setFunctionExtensions("addInvocationsExclusiveScanAMD", 1, &E_GL_AMD_shader_ballot); + symbolTable.setFunctionExtensions("minInvocationsExclusiveScanNonUniformAMD", 1, &E_GL_AMD_shader_ballot); + symbolTable.setFunctionExtensions("maxInvocationsExclusiveScanNonUniformAMD", 1, &E_GL_AMD_shader_ballot); + symbolTable.setFunctionExtensions("addInvocationsExclusiveScanNonUniformAMD", 1, &E_GL_AMD_shader_ballot); + } + + if (profile != EEsProfile) { + symbolTable.setFunctionExtensions("min3", 1, &E_GL_AMD_shader_trinary_minmax); + symbolTable.setFunctionExtensions("max3", 1, &E_GL_AMD_shader_trinary_minmax); + symbolTable.setFunctionExtensions("mid3", 1, &E_GL_AMD_shader_trinary_minmax); + } + + if (profile != EEsProfile) { + symbolTable.setVariableExtensions("gl_SIMDGroupSizeAMD", 1, &E_GL_AMD_gcn_shader); + SpecialQualifier("gl_SIMDGroupSizeAMD", EvqVaryingIn, EbvSubGroupSize, symbolTable); + + symbolTable.setFunctionExtensions("cubeFaceIndexAMD", 1, &E_GL_AMD_gcn_shader); + symbolTable.setFunctionExtensions("cubeFaceCoordAMD", 1, &E_GL_AMD_gcn_shader); + symbolTable.setFunctionExtensions("timeAMD", 1, &E_GL_AMD_gcn_shader); + } + + if (profile != EEsProfile) { + symbolTable.setFunctionExtensions("fragmentMaskFetchAMD", 1, &E_GL_AMD_shader_fragment_mask); + symbolTable.setFunctionExtensions("fragmentFetchAMD", 1, &E_GL_AMD_shader_fragment_mask); + } + + symbolTable.setFunctionExtensions("countLeadingZeros", 1, &E_GL_INTEL_shader_integer_functions2); + symbolTable.setFunctionExtensions("countTrailingZeros", 1, &E_GL_INTEL_shader_integer_functions2); + symbolTable.setFunctionExtensions("absoluteDifference", 1, &E_GL_INTEL_shader_integer_functions2); + symbolTable.setFunctionExtensions("addSaturate", 1, &E_GL_INTEL_shader_integer_functions2); + symbolTable.setFunctionExtensions("subtractSaturate", 1, &E_GL_INTEL_shader_integer_functions2); + symbolTable.setFunctionExtensions("average", 1, &E_GL_INTEL_shader_integer_functions2); + symbolTable.setFunctionExtensions("averageRounded", 1, &E_GL_INTEL_shader_integer_functions2); + symbolTable.setFunctionExtensions("multiply32x16", 1, &E_GL_INTEL_shader_integer_functions2); + + symbolTable.setFunctionExtensions("textureFootprintNV", 1, &E_GL_NV_shader_texture_footprint); + symbolTable.setFunctionExtensions("textureFootprintClampNV", 1, &E_GL_NV_shader_texture_footprint); + symbolTable.setFunctionExtensions("textureFootprintLodNV", 1, &E_GL_NV_shader_texture_footprint); + symbolTable.setFunctionExtensions("textureFootprintGradNV", 1, &E_GL_NV_shader_texture_footprint); + symbolTable.setFunctionExtensions("textureFootprintGradClampNV", 1, &E_GL_NV_shader_texture_footprint); + // Compatibility variables, vertex only + if (spvVersion.spv == 0) { + BuiltInVariable("gl_Color", EbvColor, symbolTable); + BuiltInVariable("gl_SecondaryColor", EbvSecondaryColor, symbolTable); + BuiltInVariable("gl_Normal", EbvNormal, symbolTable); + BuiltInVariable("gl_Vertex", EbvVertex, symbolTable); + BuiltInVariable("gl_MultiTexCoord0", EbvMultiTexCoord0, symbolTable); + BuiltInVariable("gl_MultiTexCoord1", EbvMultiTexCoord1, symbolTable); + BuiltInVariable("gl_MultiTexCoord2", EbvMultiTexCoord2, symbolTable); + BuiltInVariable("gl_MultiTexCoord3", EbvMultiTexCoord3, symbolTable); + BuiltInVariable("gl_MultiTexCoord4", EbvMultiTexCoord4, symbolTable); + BuiltInVariable("gl_MultiTexCoord5", EbvMultiTexCoord5, symbolTable); + BuiltInVariable("gl_MultiTexCoord6", EbvMultiTexCoord6, symbolTable); + BuiltInVariable("gl_MultiTexCoord7", EbvMultiTexCoord7, symbolTable); + BuiltInVariable("gl_FogCoord", EbvFogFragCoord, symbolTable); + } + + if (profile == EEsProfile) { + if (spvVersion.spv == 0) { + symbolTable.setFunctionExtensions("texture2DGradEXT", 1, &E_GL_EXT_shader_texture_lod); + symbolTable.setFunctionExtensions("texture2DProjGradEXT", 1, &E_GL_EXT_shader_texture_lod); + symbolTable.setFunctionExtensions("textureCubeGradEXT", 1, &E_GL_EXT_shader_texture_lod); + if (version == 310) + symbolTable.setFunctionExtensions("textureGatherOffsets", Num_AEP_gpu_shader5, AEP_gpu_shader5); + } + if (version == 310) + symbolTable.setFunctionExtensions("fma", Num_AEP_gpu_shader5, AEP_gpu_shader5); + } + + if (profile == EEsProfile && version < 320) { + symbolTable.setFunctionExtensions("imageAtomicAdd", 1, &E_GL_OES_shader_image_atomic); + symbolTable.setFunctionExtensions("imageAtomicMin", 1, &E_GL_OES_shader_image_atomic); + symbolTable.setFunctionExtensions("imageAtomicMax", 1, &E_GL_OES_shader_image_atomic); + symbolTable.setFunctionExtensions("imageAtomicAnd", 1, &E_GL_OES_shader_image_atomic); + symbolTable.setFunctionExtensions("imageAtomicOr", 1, &E_GL_OES_shader_image_atomic); + symbolTable.setFunctionExtensions("imageAtomicXor", 1, &E_GL_OES_shader_image_atomic); + symbolTable.setFunctionExtensions("imageAtomicExchange", 1, &E_GL_OES_shader_image_atomic); + symbolTable.setFunctionExtensions("imageAtomicCompSwap", 1, &E_GL_OES_shader_image_atomic); + } + + if (version >= 300 /* both ES and non-ES */) { + symbolTable.setVariableExtensions("gl_ViewID_OVR", Num_OVR_multiview_EXTs, OVR_multiview_EXTs); + BuiltInVariable("gl_ViewID_OVR", EbvViewIndex, symbolTable); + } + + if (profile == EEsProfile) { + symbolTable.setFunctionExtensions("shadow2DEXT", 1, &E_GL_EXT_shadow_samplers); + symbolTable.setFunctionExtensions("shadow2DProjEXT", 1, &E_GL_EXT_shadow_samplers); + } + // Fall through + + case EShLangTessControl: + if (profile == EEsProfile && version >= 310) { + BuiltInVariable("gl_BoundingBoxEXT", EbvBoundingBox, symbolTable); + symbolTable.setVariableExtensions("gl_BoundingBoxEXT", 1, + &E_GL_EXT_primitive_bounding_box); + BuiltInVariable("gl_BoundingBoxOES", EbvBoundingBox, symbolTable); + symbolTable.setVariableExtensions("gl_BoundingBoxOES", 1, + &E_GL_OES_primitive_bounding_box); + + if (version >= 320) { + BuiltInVariable("gl_BoundingBox", EbvBoundingBox, symbolTable); + } + } + // Fall through + + case EShLangTessEvaluation: + case EShLangGeometry: +#endif + SpecialQualifier("gl_Position", EvqPosition, EbvPosition, symbolTable); + SpecialQualifier("gl_PointSize", EvqPointSize, EbvPointSize, symbolTable); + + BuiltInVariable("gl_in", "gl_Position", EbvPosition, symbolTable); + BuiltInVariable("gl_in", "gl_PointSize", EbvPointSize, symbolTable); + + BuiltInVariable("gl_out", "gl_Position", EbvPosition, symbolTable); + BuiltInVariable("gl_out", "gl_PointSize", EbvPointSize, symbolTable); + +#ifndef GLSLANG_WEB + SpecialQualifier("gl_ClipVertex", EvqClipVertex, EbvClipVertex, symbolTable); + + BuiltInVariable("gl_in", "gl_ClipDistance", EbvClipDistance, symbolTable); + BuiltInVariable("gl_in", "gl_CullDistance", EbvCullDistance, symbolTable); + + BuiltInVariable("gl_out", "gl_ClipDistance", EbvClipDistance, symbolTable); + BuiltInVariable("gl_out", "gl_CullDistance", EbvCullDistance, symbolTable); + + BuiltInVariable("gl_ClipDistance", EbvClipDistance, symbolTable); + BuiltInVariable("gl_CullDistance", EbvCullDistance, symbolTable); + BuiltInVariable("gl_PrimitiveIDIn", EbvPrimitiveId, symbolTable); + BuiltInVariable("gl_PrimitiveID", EbvPrimitiveId, symbolTable); + BuiltInVariable("gl_InvocationID", EbvInvocationId, symbolTable); + BuiltInVariable("gl_Layer", EbvLayer, symbolTable); + BuiltInVariable("gl_ViewportIndex", EbvViewportIndex, symbolTable); + + if (language != EShLangGeometry) { + symbolTable.setVariableExtensions("gl_Layer", Num_viewportEXTs, viewportEXTs); + symbolTable.setVariableExtensions("gl_ViewportIndex", Num_viewportEXTs, viewportEXTs); + } + symbolTable.setVariableExtensions("gl_ViewportMask", 1, &E_GL_NV_viewport_array2); + symbolTable.setVariableExtensions("gl_SecondaryPositionNV", 1, &E_GL_NV_stereo_view_rendering); + symbolTable.setVariableExtensions("gl_SecondaryViewportMaskNV", 1, &E_GL_NV_stereo_view_rendering); + symbolTable.setVariableExtensions("gl_PositionPerViewNV", 1, &E_GL_NVX_multiview_per_view_attributes); + symbolTable.setVariableExtensions("gl_ViewportMaskPerViewNV", 1, &E_GL_NVX_multiview_per_view_attributes); + + BuiltInVariable("gl_ViewportMask", EbvViewportMaskNV, symbolTable); + BuiltInVariable("gl_SecondaryPositionNV", EbvSecondaryPositionNV, symbolTable); + BuiltInVariable("gl_SecondaryViewportMaskNV", EbvSecondaryViewportMaskNV, symbolTable); + BuiltInVariable("gl_PositionPerViewNV", EbvPositionPerViewNV, symbolTable); + BuiltInVariable("gl_ViewportMaskPerViewNV", EbvViewportMaskPerViewNV, symbolTable); + + if (language == EShLangVertex || language == EShLangGeometry) { + symbolTable.setVariableExtensions("gl_in", "gl_SecondaryPositionNV", 1, &E_GL_NV_stereo_view_rendering); + symbolTable.setVariableExtensions("gl_in", "gl_PositionPerViewNV", 1, &E_GL_NVX_multiview_per_view_attributes); + + BuiltInVariable("gl_in", "gl_SecondaryPositionNV", EbvSecondaryPositionNV, symbolTable); + BuiltInVariable("gl_in", "gl_PositionPerViewNV", EbvPositionPerViewNV, symbolTable); + } + symbolTable.setVariableExtensions("gl_out", "gl_ViewportMask", 1, &E_GL_NV_viewport_array2); + symbolTable.setVariableExtensions("gl_out", "gl_SecondaryPositionNV", 1, &E_GL_NV_stereo_view_rendering); + symbolTable.setVariableExtensions("gl_out", "gl_SecondaryViewportMaskNV", 1, &E_GL_NV_stereo_view_rendering); + symbolTable.setVariableExtensions("gl_out", "gl_PositionPerViewNV", 1, &E_GL_NVX_multiview_per_view_attributes); + symbolTable.setVariableExtensions("gl_out", "gl_ViewportMaskPerViewNV", 1, &E_GL_NVX_multiview_per_view_attributes); + + BuiltInVariable("gl_out", "gl_ViewportMask", EbvViewportMaskNV, symbolTable); + BuiltInVariable("gl_out", "gl_SecondaryPositionNV", EbvSecondaryPositionNV, symbolTable); + BuiltInVariable("gl_out", "gl_SecondaryViewportMaskNV", EbvSecondaryViewportMaskNV, symbolTable); + BuiltInVariable("gl_out", "gl_PositionPerViewNV", EbvPositionPerViewNV, symbolTable); + BuiltInVariable("gl_out", "gl_ViewportMaskPerViewNV", EbvViewportMaskPerViewNV, symbolTable); + + BuiltInVariable("gl_PatchVerticesIn", EbvPatchVertices, symbolTable); + BuiltInVariable("gl_TessLevelOuter", EbvTessLevelOuter, symbolTable); + BuiltInVariable("gl_TessLevelInner", EbvTessLevelInner, symbolTable); + BuiltInVariable("gl_TessCoord", EbvTessCoord, symbolTable); + + if (version < 410) + symbolTable.setVariableExtensions("gl_ViewportIndex", 1, &E_GL_ARB_viewport_array); + + // Compatibility variables + + BuiltInVariable("gl_in", "gl_ClipVertex", EbvClipVertex, symbolTable); + BuiltInVariable("gl_in", "gl_FrontColor", EbvFrontColor, symbolTable); + BuiltInVariable("gl_in", "gl_BackColor", EbvBackColor, symbolTable); + BuiltInVariable("gl_in", "gl_FrontSecondaryColor", EbvFrontSecondaryColor, symbolTable); + BuiltInVariable("gl_in", "gl_BackSecondaryColor", EbvBackSecondaryColor, symbolTable); + BuiltInVariable("gl_in", "gl_TexCoord", EbvTexCoord, symbolTable); + BuiltInVariable("gl_in", "gl_FogFragCoord", EbvFogFragCoord, symbolTable); + + BuiltInVariable("gl_out", "gl_ClipVertex", EbvClipVertex, symbolTable); + BuiltInVariable("gl_out", "gl_FrontColor", EbvFrontColor, symbolTable); + BuiltInVariable("gl_out", "gl_BackColor", EbvBackColor, symbolTable); + BuiltInVariable("gl_out", "gl_FrontSecondaryColor", EbvFrontSecondaryColor, symbolTable); + BuiltInVariable("gl_out", "gl_BackSecondaryColor", EbvBackSecondaryColor, symbolTable); + BuiltInVariable("gl_out", "gl_TexCoord", EbvTexCoord, symbolTable); + BuiltInVariable("gl_out", "gl_FogFragCoord", EbvFogFragCoord, symbolTable); + + BuiltInVariable("gl_ClipVertex", EbvClipVertex, symbolTable); + BuiltInVariable("gl_FrontColor", EbvFrontColor, symbolTable); + BuiltInVariable("gl_BackColor", EbvBackColor, symbolTable); + BuiltInVariable("gl_FrontSecondaryColor", EbvFrontSecondaryColor, symbolTable); + BuiltInVariable("gl_BackSecondaryColor", EbvBackSecondaryColor, symbolTable); + BuiltInVariable("gl_TexCoord", EbvTexCoord, symbolTable); + BuiltInVariable("gl_FogFragCoord", EbvFogFragCoord, symbolTable); + + // gl_PointSize, when it needs to be tied to an extension, is always a member of a block. + // (Sometimes with an instance name, sometimes anonymous). + if (profile == EEsProfile) { + if (language == EShLangGeometry) { + symbolTable.setVariableExtensions("gl_PointSize", Num_AEP_geometry_point_size, AEP_geometry_point_size); + symbolTable.setVariableExtensions("gl_in", "gl_PointSize", Num_AEP_geometry_point_size, AEP_geometry_point_size); + } else if (language == EShLangTessEvaluation || language == EShLangTessControl) { + // gl_in tessellation settings of gl_PointSize are in the context-dependent paths + symbolTable.setVariableExtensions("gl_PointSize", Num_AEP_tessellation_point_size, AEP_tessellation_point_size); + symbolTable.setVariableExtensions("gl_out", "gl_PointSize", Num_AEP_tessellation_point_size, AEP_tessellation_point_size); + } + } + + if ((profile != EEsProfile && version >= 140) || + (profile == EEsProfile && version >= 310)) { + symbolTable.setVariableExtensions("gl_DeviceIndex", 1, &E_GL_EXT_device_group); + BuiltInVariable("gl_DeviceIndex", EbvDeviceIndex, symbolTable); + symbolTable.setVariableExtensions("gl_ViewIndex", 1, &E_GL_EXT_multiview); + BuiltInVariable("gl_ViewIndex", EbvViewIndex, symbolTable); + } + + if (profile != EEsProfile) { + BuiltInVariable("gl_SubGroupInvocationARB", EbvSubGroupInvocation, symbolTable); + BuiltInVariable("gl_SubGroupEqMaskARB", EbvSubGroupEqMask, symbolTable); + BuiltInVariable("gl_SubGroupGeMaskARB", EbvSubGroupGeMask, symbolTable); + BuiltInVariable("gl_SubGroupGtMaskARB", EbvSubGroupGtMask, symbolTable); + BuiltInVariable("gl_SubGroupLeMaskARB", EbvSubGroupLeMask, symbolTable); + BuiltInVariable("gl_SubGroupLtMaskARB", EbvSubGroupLtMask, symbolTable); + + if (spvVersion.vulkan > 0) + // Treat "gl_SubGroupSizeARB" as shader input instead of uniform for Vulkan + SpecialQualifier("gl_SubGroupSizeARB", EvqVaryingIn, EbvSubGroupSize, symbolTable); + else + BuiltInVariable("gl_SubGroupSizeARB", EbvSubGroupSize, symbolTable); + } + + // GL_KHR_shader_subgroup + if ((profile == EEsProfile && version >= 310) || + (profile != EEsProfile && version >= 140)) { + symbolTable.setVariableExtensions("gl_SubgroupSize", 1, &E_GL_KHR_shader_subgroup_basic); + symbolTable.setVariableExtensions("gl_SubgroupInvocationID", 1, &E_GL_KHR_shader_subgroup_basic); + symbolTable.setVariableExtensions("gl_SubgroupEqMask", 1, &E_GL_KHR_shader_subgroup_ballot); + symbolTable.setVariableExtensions("gl_SubgroupGeMask", 1, &E_GL_KHR_shader_subgroup_ballot); + symbolTable.setVariableExtensions("gl_SubgroupGtMask", 1, &E_GL_KHR_shader_subgroup_ballot); + symbolTable.setVariableExtensions("gl_SubgroupLeMask", 1, &E_GL_KHR_shader_subgroup_ballot); + symbolTable.setVariableExtensions("gl_SubgroupLtMask", 1, &E_GL_KHR_shader_subgroup_ballot); + + BuiltInVariable("gl_SubgroupSize", EbvSubgroupSize2, symbolTable); + BuiltInVariable("gl_SubgroupInvocationID", EbvSubgroupInvocation2, symbolTable); + BuiltInVariable("gl_SubgroupEqMask", EbvSubgroupEqMask2, symbolTable); + BuiltInVariable("gl_SubgroupGeMask", EbvSubgroupGeMask2, symbolTable); + BuiltInVariable("gl_SubgroupGtMask", EbvSubgroupGtMask2, symbolTable); + BuiltInVariable("gl_SubgroupLeMask", EbvSubgroupLeMask2, symbolTable); + BuiltInVariable("gl_SubgroupLtMask", EbvSubgroupLtMask2, symbolTable); + + // GL_NV_shader_sm_builtins + symbolTable.setVariableExtensions("gl_WarpsPerSMNV", 1, &E_GL_NV_shader_sm_builtins); + symbolTable.setVariableExtensions("gl_SMCountNV", 1, &E_GL_NV_shader_sm_builtins); + symbolTable.setVariableExtensions("gl_WarpIDNV", 1, &E_GL_NV_shader_sm_builtins); + symbolTable.setVariableExtensions("gl_SMIDNV", 1, &E_GL_NV_shader_sm_builtins); + BuiltInVariable("gl_WarpsPerSMNV", EbvWarpsPerSM, symbolTable); + BuiltInVariable("gl_SMCountNV", EbvSMCount, symbolTable); + BuiltInVariable("gl_WarpIDNV", EbvWarpID, symbolTable); + BuiltInVariable("gl_SMIDNV", EbvSMID, symbolTable); + } +#endif + break; + + case EShLangFragment: + SpecialQualifier("gl_FrontFacing", EvqFace, EbvFace, symbolTable); + SpecialQualifier("gl_FragCoord", EvqFragCoord, EbvFragCoord, symbolTable); + SpecialQualifier("gl_PointCoord", EvqPointCoord, EbvPointCoord, symbolTable); + if (spvVersion.spv == 0) + SpecialQualifier("gl_FragColor", EvqFragColor, EbvFragColor, symbolTable); + else { + TSymbol* symbol = symbolTable.find("gl_FragColor"); + if (symbol) { + symbol->getWritableType().getQualifier().storage = EvqVaryingOut; + symbol->getWritableType().getQualifier().layoutLocation = 0; + } + } + SpecialQualifier("gl_FragDepth", EvqFragDepth, EbvFragDepth, symbolTable); +#ifndef GLSLANG_WEB + SpecialQualifier("gl_FragDepthEXT", EvqFragDepth, EbvFragDepth, symbolTable); + SpecialQualifier("gl_HelperInvocation", EvqVaryingIn, EbvHelperInvocation, symbolTable); + + BuiltInVariable("gl_ClipDistance", EbvClipDistance, symbolTable); + BuiltInVariable("gl_CullDistance", EbvCullDistance, symbolTable); + BuiltInVariable("gl_PrimitiveID", EbvPrimitiveId, symbolTable); + + if (profile != EEsProfile && version >= 140) { + symbolTable.setVariableExtensions("gl_FragStencilRefARB", 1, &E_GL_ARB_shader_stencil_export); + BuiltInVariable("gl_FragStencilRefARB", EbvFragStencilRef, symbolTable); + } + + if (profile != EEsProfile && version < 400) { + symbolTable.setFunctionExtensions("textureQueryLod", 1, &E_GL_ARB_texture_query_lod); + } + + if (profile != EEsProfile && version >= 460) { + symbolTable.setFunctionExtensions("rayQueryInitializeEXT", 1, &E_GL_EXT_ray_query); + symbolTable.setFunctionExtensions("rayQueryTerminateEXT", 1, &E_GL_EXT_ray_query); + symbolTable.setFunctionExtensions("rayQueryGenerateIntersectionEXT", 1, &E_GL_EXT_ray_query); + symbolTable.setFunctionExtensions("rayQueryConfirmIntersectionEXT", 1, &E_GL_EXT_ray_query); + symbolTable.setFunctionExtensions("rayQueryProceedEXT", 1, &E_GL_EXT_ray_query); + symbolTable.setFunctionExtensions("rayQueryGetIntersectionTypeEXT", 1, &E_GL_EXT_ray_query); + symbolTable.setFunctionExtensions("rayQueryGetIntersectionTEXT", 1, &E_GL_EXT_ray_query); + symbolTable.setFunctionExtensions("rayQueryGetRayFlagsEXT", 1, &E_GL_EXT_ray_query); + symbolTable.setFunctionExtensions("rayQueryGetRayTMinEXT", 1, &E_GL_EXT_ray_query); + symbolTable.setFunctionExtensions("rayQueryGetIntersectionInstanceCustomIndexEXT", 1, &E_GL_EXT_ray_query); + symbolTable.setFunctionExtensions("rayQueryGetIntersectionInstanceIdEXT", 1, &E_GL_EXT_ray_query); + symbolTable.setFunctionExtensions("rayQueryGetIntersectionInstanceShaderBindingTableRecordOffsetEXT", 1, &E_GL_EXT_ray_query); + symbolTable.setFunctionExtensions("rayQueryGetIntersectionGeometryIndexEXT", 1, &E_GL_EXT_ray_query); + symbolTable.setFunctionExtensions("rayQueryGetIntersectionPrimitiveIndexEXT", 1, &E_GL_EXT_ray_query); + symbolTable.setFunctionExtensions("rayQueryGetIntersectionBarycentricsEXT", 1, &E_GL_EXT_ray_query); + symbolTable.setFunctionExtensions("rayQueryGetIntersectionFrontFaceEXT", 1, &E_GL_EXT_ray_query); + symbolTable.setFunctionExtensions("rayQueryGetIntersectionCandidateAABBOpaqueEXT", 1, &E_GL_EXT_ray_query); + symbolTable.setFunctionExtensions("rayQueryGetIntersectionObjectRayDirectionEXT", 1, &E_GL_EXT_ray_query); + symbolTable.setFunctionExtensions("rayQueryGetIntersectionObjectRayOriginEXT", 1, &E_GL_EXT_ray_query); + symbolTable.setFunctionExtensions("rayQueryGetIntersectionObjectToWorldEXT", 1, &E_GL_EXT_ray_query); + symbolTable.setFunctionExtensions("rayQueryGetIntersectionWorldToObjectEXT", 1, &E_GL_EXT_ray_query); + symbolTable.setFunctionExtensions("rayQueryGetWorldRayOriginEXT", 1, &E_GL_EXT_ray_query); + symbolTable.setFunctionExtensions("rayQueryGetWorldRayDirectionEXT", 1, &E_GL_EXT_ray_query); + symbolTable.setVariableExtensions("gl_RayFlagsSkipAABBEXT", 1, &E_GL_EXT_ray_flags_primitive_culling); + symbolTable.setVariableExtensions("gl_RayFlagsSkipTrianglesEXT", 1, &E_GL_EXT_ray_flags_primitive_culling); + } + + if ((profile != EEsProfile && version >= 130) || + (profile == EEsProfile && version >= 310)) { + BuiltInVariable("gl_SampleID", EbvSampleId, symbolTable); + BuiltInVariable("gl_SamplePosition", EbvSamplePosition, symbolTable); + BuiltInVariable("gl_SampleMask", EbvSampleMask, symbolTable); + + if (profile != EEsProfile && version < 400) { + BuiltInVariable("gl_NumSamples", EbvSampleMask, symbolTable); + + symbolTable.setVariableExtensions("gl_SampleMask", 1, &E_GL_ARB_sample_shading); + symbolTable.setVariableExtensions("gl_SampleID", 1, &E_GL_ARB_sample_shading); + symbolTable.setVariableExtensions("gl_SamplePosition", 1, &E_GL_ARB_sample_shading); + symbolTable.setVariableExtensions("gl_NumSamples", 1, &E_GL_ARB_sample_shading); + } else { + BuiltInVariable("gl_SampleMaskIn", EbvSampleMask, symbolTable); + + if (profile == EEsProfile && version < 320) { + symbolTable.setVariableExtensions("gl_SampleID", 1, &E_GL_OES_sample_variables); + symbolTable.setVariableExtensions("gl_SamplePosition", 1, &E_GL_OES_sample_variables); + symbolTable.setVariableExtensions("gl_SampleMaskIn", 1, &E_GL_OES_sample_variables); + symbolTable.setVariableExtensions("gl_SampleMask", 1, &E_GL_OES_sample_variables); + symbolTable.setVariableExtensions("gl_NumSamples", 1, &E_GL_OES_sample_variables); + } + } + } + + BuiltInVariable("gl_Layer", EbvLayer, symbolTable); + BuiltInVariable("gl_ViewportIndex", EbvViewportIndex, symbolTable); + + // Compatibility variables + + BuiltInVariable("gl_in", "gl_FogFragCoord", EbvFogFragCoord, symbolTable); + BuiltInVariable("gl_in", "gl_TexCoord", EbvTexCoord, symbolTable); + BuiltInVariable("gl_in", "gl_Color", EbvColor, symbolTable); + BuiltInVariable("gl_in", "gl_SecondaryColor", EbvSecondaryColor, symbolTable); + + BuiltInVariable("gl_FogFragCoord", EbvFogFragCoord, symbolTable); + BuiltInVariable("gl_TexCoord", EbvTexCoord, symbolTable); + BuiltInVariable("gl_Color", EbvColor, symbolTable); + BuiltInVariable("gl_SecondaryColor", EbvSecondaryColor, symbolTable); + + // built-in functions + + if (profile == EEsProfile) { + if (spvVersion.spv == 0) { + symbolTable.setFunctionExtensions("texture2DLodEXT", 1, &E_GL_EXT_shader_texture_lod); + symbolTable.setFunctionExtensions("texture2DProjLodEXT", 1, &E_GL_EXT_shader_texture_lod); + symbolTable.setFunctionExtensions("textureCubeLodEXT", 1, &E_GL_EXT_shader_texture_lod); + symbolTable.setFunctionExtensions("texture2DGradEXT", 1, &E_GL_EXT_shader_texture_lod); + symbolTable.setFunctionExtensions("texture2DProjGradEXT", 1, &E_GL_EXT_shader_texture_lod); + symbolTable.setFunctionExtensions("textureCubeGradEXT", 1, &E_GL_EXT_shader_texture_lod); + if (version < 320) + symbolTable.setFunctionExtensions("textureGatherOffsets", Num_AEP_gpu_shader5, AEP_gpu_shader5); + } + if (version == 100) { + symbolTable.setFunctionExtensions("dFdx", 1, &E_GL_OES_standard_derivatives); + symbolTable.setFunctionExtensions("dFdy", 1, &E_GL_OES_standard_derivatives); + symbolTable.setFunctionExtensions("fwidth", 1, &E_GL_OES_standard_derivatives); + } + if (version == 310) { + symbolTable.setFunctionExtensions("fma", Num_AEP_gpu_shader5, AEP_gpu_shader5); + symbolTable.setFunctionExtensions("interpolateAtCentroid", 1, &E_GL_OES_shader_multisample_interpolation); + symbolTable.setFunctionExtensions("interpolateAtSample", 1, &E_GL_OES_shader_multisample_interpolation); + symbolTable.setFunctionExtensions("interpolateAtOffset", 1, &E_GL_OES_shader_multisample_interpolation); + } + } else if (version < 130) { + if (spvVersion.spv == 0) { + symbolTable.setFunctionExtensions("texture1DLod", 1, &E_GL_ARB_shader_texture_lod); + symbolTable.setFunctionExtensions("texture2DLod", 1, &E_GL_ARB_shader_texture_lod); + symbolTable.setFunctionExtensions("texture3DLod", 1, &E_GL_ARB_shader_texture_lod); + symbolTable.setFunctionExtensions("textureCubeLod", 1, &E_GL_ARB_shader_texture_lod); + symbolTable.setFunctionExtensions("texture1DProjLod", 1, &E_GL_ARB_shader_texture_lod); + symbolTable.setFunctionExtensions("texture2DProjLod", 1, &E_GL_ARB_shader_texture_lod); + symbolTable.setFunctionExtensions("texture3DProjLod", 1, &E_GL_ARB_shader_texture_lod); + symbolTable.setFunctionExtensions("shadow1DLod", 1, &E_GL_ARB_shader_texture_lod); + symbolTable.setFunctionExtensions("shadow2DLod", 1, &E_GL_ARB_shader_texture_lod); + symbolTable.setFunctionExtensions("shadow1DProjLod", 1, &E_GL_ARB_shader_texture_lod); + symbolTable.setFunctionExtensions("shadow2DProjLod", 1, &E_GL_ARB_shader_texture_lod); + } + } + + // E_GL_ARB_shader_texture_lod functions usable only with the extension enabled + if (profile != EEsProfile && spvVersion.spv == 0) { + symbolTable.setFunctionExtensions("texture1DGradARB", 1, &E_GL_ARB_shader_texture_lod); + symbolTable.setFunctionExtensions("texture1DProjGradARB", 1, &E_GL_ARB_shader_texture_lod); + symbolTable.setFunctionExtensions("texture2DGradARB", 1, &E_GL_ARB_shader_texture_lod); + symbolTable.setFunctionExtensions("texture2DProjGradARB", 1, &E_GL_ARB_shader_texture_lod); + symbolTable.setFunctionExtensions("texture3DGradARB", 1, &E_GL_ARB_shader_texture_lod); + symbolTable.setFunctionExtensions("texture3DProjGradARB", 1, &E_GL_ARB_shader_texture_lod); + symbolTable.setFunctionExtensions("textureCubeGradARB", 1, &E_GL_ARB_shader_texture_lod); + symbolTable.setFunctionExtensions("shadow1DGradARB", 1, &E_GL_ARB_shader_texture_lod); + symbolTable.setFunctionExtensions("shadow1DProjGradARB", 1, &E_GL_ARB_shader_texture_lod); + symbolTable.setFunctionExtensions("shadow2DGradARB", 1, &E_GL_ARB_shader_texture_lod); + symbolTable.setFunctionExtensions("shadow2DProjGradARB", 1, &E_GL_ARB_shader_texture_lod); + symbolTable.setFunctionExtensions("texture2DRectGradARB", 1, &E_GL_ARB_shader_texture_lod); + symbolTable.setFunctionExtensions("texture2DRectProjGradARB", 1, &E_GL_ARB_shader_texture_lod); + symbolTable.setFunctionExtensions("shadow2DRectGradARB", 1, &E_GL_ARB_shader_texture_lod); + symbolTable.setFunctionExtensions("shadow2DRectProjGradARB", 1, &E_GL_ARB_shader_texture_lod); + } + + // E_GL_ARB_shader_image_load_store + if (profile != EEsProfile && version < 420) + symbolTable.setFunctionExtensions("memoryBarrier", 1, &E_GL_ARB_shader_image_load_store); + // All the image access functions are protected by checks on the type of the first argument. + + // E_GL_ARB_shader_atomic_counters + if (profile != EEsProfile && version < 420) { + symbolTable.setFunctionExtensions("atomicCounterIncrement", 1, &E_GL_ARB_shader_atomic_counters); + symbolTable.setFunctionExtensions("atomicCounterDecrement", 1, &E_GL_ARB_shader_atomic_counters); + symbolTable.setFunctionExtensions("atomicCounter" , 1, &E_GL_ARB_shader_atomic_counters); + } + + // E_GL_ARB_derivative_control + if (profile != EEsProfile && version < 450) { + symbolTable.setFunctionExtensions("dFdxFine", 1, &E_GL_ARB_derivative_control); + symbolTable.setFunctionExtensions("dFdyFine", 1, &E_GL_ARB_derivative_control); + symbolTable.setFunctionExtensions("fwidthFine", 1, &E_GL_ARB_derivative_control); + symbolTable.setFunctionExtensions("dFdxCoarse", 1, &E_GL_ARB_derivative_control); + symbolTable.setFunctionExtensions("dFdyCoarse", 1, &E_GL_ARB_derivative_control); + symbolTable.setFunctionExtensions("fwidthCoarse", 1, &E_GL_ARB_derivative_control); + } + + // E_GL_ARB_sparse_texture2 + if (profile != EEsProfile) + { + symbolTable.setFunctionExtensions("sparseTextureARB", 1, &E_GL_ARB_sparse_texture2); + symbolTable.setFunctionExtensions("sparseTextureLodARB", 1, &E_GL_ARB_sparse_texture2); + symbolTable.setFunctionExtensions("sparseTextureOffsetARB", 1, &E_GL_ARB_sparse_texture2); + symbolTable.setFunctionExtensions("sparseTexelFetchARB", 1, &E_GL_ARB_sparse_texture2); + symbolTable.setFunctionExtensions("sparseTexelFetchOffsetARB", 1, &E_GL_ARB_sparse_texture2); + symbolTable.setFunctionExtensions("sparseTextureLodOffsetARB", 1, &E_GL_ARB_sparse_texture2); + symbolTable.setFunctionExtensions("sparseTextureGradARB", 1, &E_GL_ARB_sparse_texture2); + symbolTable.setFunctionExtensions("sparseTextureGradOffsetARB", 1, &E_GL_ARB_sparse_texture2); + symbolTable.setFunctionExtensions("sparseTextureGatherARB", 1, &E_GL_ARB_sparse_texture2); + symbolTable.setFunctionExtensions("sparseTextureGatherOffsetARB", 1, &E_GL_ARB_sparse_texture2); + symbolTable.setFunctionExtensions("sparseTextureGatherOffsetsARB", 1, &E_GL_ARB_sparse_texture2); + symbolTable.setFunctionExtensions("sparseImageLoadARB", 1, &E_GL_ARB_sparse_texture2); + symbolTable.setFunctionExtensions("sparseTexelsResident", 1, &E_GL_ARB_sparse_texture2); + } + + // E_GL_ARB_sparse_texture_clamp + if (profile != EEsProfile) + { + symbolTable.setFunctionExtensions("sparseTextureClampARB", 1, &E_GL_ARB_sparse_texture_clamp); + symbolTable.setFunctionExtensions("sparseTextureOffsetClampARB", 1, &E_GL_ARB_sparse_texture_clamp); + symbolTable.setFunctionExtensions("sparseTextureGradClampARB", 1, &E_GL_ARB_sparse_texture_clamp); + symbolTable.setFunctionExtensions("sparseTextureGradOffsetClampARB", 1, &E_GL_ARB_sparse_texture_clamp); + symbolTable.setFunctionExtensions("textureClampARB", 1, &E_GL_ARB_sparse_texture_clamp); + symbolTable.setFunctionExtensions("textureOffsetClampARB", 1, &E_GL_ARB_sparse_texture_clamp); + symbolTable.setFunctionExtensions("textureGradClampARB", 1, &E_GL_ARB_sparse_texture_clamp); + symbolTable.setFunctionExtensions("textureGradOffsetClampARB", 1, &E_GL_ARB_sparse_texture_clamp); + } + + // E_GL_AMD_shader_explicit_vertex_parameter + if (profile != EEsProfile) { + symbolTable.setVariableExtensions("gl_BaryCoordNoPerspAMD", 1, &E_GL_AMD_shader_explicit_vertex_parameter); + symbolTable.setVariableExtensions("gl_BaryCoordNoPerspCentroidAMD", 1, &E_GL_AMD_shader_explicit_vertex_parameter); + symbolTable.setVariableExtensions("gl_BaryCoordNoPerspSampleAMD", 1, &E_GL_AMD_shader_explicit_vertex_parameter); + symbolTable.setVariableExtensions("gl_BaryCoordSmoothAMD", 1, &E_GL_AMD_shader_explicit_vertex_parameter); + symbolTable.setVariableExtensions("gl_BaryCoordSmoothCentroidAMD", 1, &E_GL_AMD_shader_explicit_vertex_parameter); + symbolTable.setVariableExtensions("gl_BaryCoordSmoothSampleAMD", 1, &E_GL_AMD_shader_explicit_vertex_parameter); + symbolTable.setVariableExtensions("gl_BaryCoordPullModelAMD", 1, &E_GL_AMD_shader_explicit_vertex_parameter); + + symbolTable.setFunctionExtensions("interpolateAtVertexAMD", 1, &E_GL_AMD_shader_explicit_vertex_parameter); + + BuiltInVariable("gl_BaryCoordNoPerspAMD", EbvBaryCoordNoPersp, symbolTable); + BuiltInVariable("gl_BaryCoordNoPerspCentroidAMD", EbvBaryCoordNoPerspCentroid, symbolTable); + BuiltInVariable("gl_BaryCoordNoPerspSampleAMD", EbvBaryCoordNoPerspSample, symbolTable); + BuiltInVariable("gl_BaryCoordSmoothAMD", EbvBaryCoordSmooth, symbolTable); + BuiltInVariable("gl_BaryCoordSmoothCentroidAMD", EbvBaryCoordSmoothCentroid, symbolTable); + BuiltInVariable("gl_BaryCoordSmoothSampleAMD", EbvBaryCoordSmoothSample, symbolTable); + BuiltInVariable("gl_BaryCoordPullModelAMD", EbvBaryCoordPullModel, symbolTable); + } + + // E_GL_AMD_texture_gather_bias_lod + if (profile != EEsProfile) { + symbolTable.setFunctionExtensions("textureGatherLodAMD", 1, &E_GL_AMD_texture_gather_bias_lod); + symbolTable.setFunctionExtensions("textureGatherLodOffsetAMD", 1, &E_GL_AMD_texture_gather_bias_lod); + symbolTable.setFunctionExtensions("textureGatherLodOffsetsAMD", 1, &E_GL_AMD_texture_gather_bias_lod); + symbolTable.setFunctionExtensions("sparseTextureGatherLodAMD", 1, &E_GL_AMD_texture_gather_bias_lod); + symbolTable.setFunctionExtensions("sparseTextureGatherLodOffsetAMD", 1, &E_GL_AMD_texture_gather_bias_lod); + symbolTable.setFunctionExtensions("sparseTextureGatherLodOffsetsAMD", 1, &E_GL_AMD_texture_gather_bias_lod); + } + + // E_GL_AMD_shader_image_load_store_lod + if (profile != EEsProfile) { + symbolTable.setFunctionExtensions("imageLoadLodAMD", 1, &E_GL_AMD_shader_image_load_store_lod); + symbolTable.setFunctionExtensions("imageStoreLodAMD", 1, &E_GL_AMD_shader_image_load_store_lod); + symbolTable.setFunctionExtensions("sparseImageLoadLodAMD", 1, &E_GL_AMD_shader_image_load_store_lod); + } + if (profile != EEsProfile && version >= 430) { + symbolTable.setVariableExtensions("gl_FragFullyCoveredNV", 1, &E_GL_NV_conservative_raster_underestimation); + BuiltInVariable("gl_FragFullyCoveredNV", EbvFragFullyCoveredNV, symbolTable); + } + if ((profile != EEsProfile && version >= 450) || + (profile == EEsProfile && version >= 320)) { + symbolTable.setVariableExtensions("gl_FragmentSizeNV", 1, &E_GL_NV_shading_rate_image); + symbolTable.setVariableExtensions("gl_InvocationsPerPixelNV", 1, &E_GL_NV_shading_rate_image); + BuiltInVariable("gl_FragmentSizeNV", EbvFragmentSizeNV, symbolTable); + BuiltInVariable("gl_InvocationsPerPixelNV", EbvInvocationsPerPixelNV, symbolTable); + symbolTable.setVariableExtensions("gl_BaryCoordNV", 1, &E_GL_NV_fragment_shader_barycentric); + symbolTable.setVariableExtensions("gl_BaryCoordNoPerspNV", 1, &E_GL_NV_fragment_shader_barycentric); + BuiltInVariable("gl_BaryCoordNV", EbvBaryCoordNV, symbolTable); + BuiltInVariable("gl_BaryCoordNoPerspNV", EbvBaryCoordNoPerspNV, symbolTable); + } + + if ((profile != EEsProfile && version >= 450) || + (profile == EEsProfile && version >= 310)) { + symbolTable.setVariableExtensions("gl_FragSizeEXT", 1, &E_GL_EXT_fragment_invocation_density); + symbolTable.setVariableExtensions("gl_FragInvocationCountEXT", 1, &E_GL_EXT_fragment_invocation_density); + BuiltInVariable("gl_FragSizeEXT", EbvFragSizeEXT, symbolTable); + BuiltInVariable("gl_FragInvocationCountEXT", EbvFragInvocationCountEXT, symbolTable); + } + + symbolTable.setVariableExtensions("gl_FragDepthEXT", 1, &E_GL_EXT_frag_depth); + + symbolTable.setFunctionExtensions("clockARB", 1, &E_GL_ARB_shader_clock); + symbolTable.setFunctionExtensions("clock2x32ARB", 1, &E_GL_ARB_shader_clock); + + symbolTable.setFunctionExtensions("clockRealtimeEXT", 1, &E_GL_EXT_shader_realtime_clock); + symbolTable.setFunctionExtensions("clockRealtime2x32EXT", 1, &E_GL_EXT_shader_realtime_clock); + + if (profile == EEsProfile && version < 320) { + symbolTable.setVariableExtensions("gl_PrimitiveID", Num_AEP_geometry_shader, AEP_geometry_shader); + symbolTable.setVariableExtensions("gl_Layer", Num_AEP_geometry_shader, AEP_geometry_shader); + } + + if (profile == EEsProfile && version < 320) { + symbolTable.setFunctionExtensions("imageAtomicAdd", 1, &E_GL_OES_shader_image_atomic); + symbolTable.setFunctionExtensions("imageAtomicMin", 1, &E_GL_OES_shader_image_atomic); + symbolTable.setFunctionExtensions("imageAtomicMax", 1, &E_GL_OES_shader_image_atomic); + symbolTable.setFunctionExtensions("imageAtomicAnd", 1, &E_GL_OES_shader_image_atomic); + symbolTable.setFunctionExtensions("imageAtomicOr", 1, &E_GL_OES_shader_image_atomic); + symbolTable.setFunctionExtensions("imageAtomicXor", 1, &E_GL_OES_shader_image_atomic); + symbolTable.setFunctionExtensions("imageAtomicExchange", 1, &E_GL_OES_shader_image_atomic); + symbolTable.setFunctionExtensions("imageAtomicCompSwap", 1, &E_GL_OES_shader_image_atomic); + } + + if (profile != EEsProfile && version < 330 ) { + symbolTable.setFunctionExtensions("floatBitsToInt", 1, &E_GL_ARB_shader_bit_encoding); + symbolTable.setFunctionExtensions("floatBitsToUint", 1, &E_GL_ARB_shader_bit_encoding); + symbolTable.setFunctionExtensions("intBitsToFloat", 1, &E_GL_ARB_shader_bit_encoding); + symbolTable.setFunctionExtensions("uintBitsToFloat", 1, &E_GL_ARB_shader_bit_encoding); + } + + if (profile != EEsProfile && version < 430 ) { + symbolTable.setFunctionExtensions("imageSize", 1, &E_GL_ARB_shader_image_size); + } + + // GL_ARB_shader_storage_buffer_object + if (profile != EEsProfile && version < 430 ) { + symbolTable.setFunctionExtensions("atomicAdd", 1, &E_GL_ARB_shader_storage_buffer_object); + symbolTable.setFunctionExtensions("atomicMin", 1, &E_GL_ARB_shader_storage_buffer_object); + symbolTable.setFunctionExtensions("atomicMax", 1, &E_GL_ARB_shader_storage_buffer_object); + symbolTable.setFunctionExtensions("atomicAnd", 1, &E_GL_ARB_shader_storage_buffer_object); + symbolTable.setFunctionExtensions("atomicOr", 1, &E_GL_ARB_shader_storage_buffer_object); + symbolTable.setFunctionExtensions("atomicXor", 1, &E_GL_ARB_shader_storage_buffer_object); + symbolTable.setFunctionExtensions("atomicExchange", 1, &E_GL_ARB_shader_storage_buffer_object); + symbolTable.setFunctionExtensions("atomicCompSwap", 1, &E_GL_ARB_shader_storage_buffer_object); + } + + // GL_ARB_shading_language_packing + if (profile != EEsProfile && version < 400 ) { + symbolTable.setFunctionExtensions("packUnorm2x16", 1, &E_GL_ARB_shading_language_packing); + symbolTable.setFunctionExtensions("unpackUnorm2x16", 1, &E_GL_ARB_shading_language_packing); + symbolTable.setFunctionExtensions("packSnorm4x8", 1, &E_GL_ARB_shading_language_packing); + symbolTable.setFunctionExtensions("packUnorm4x8", 1, &E_GL_ARB_shading_language_packing); + symbolTable.setFunctionExtensions("unpackSnorm4x8", 1, &E_GL_ARB_shading_language_packing); + symbolTable.setFunctionExtensions("unpackUnorm4x8", 1, &E_GL_ARB_shading_language_packing); + } + if (profile != EEsProfile && version < 420 ) { + symbolTable.setFunctionExtensions("packSnorm2x16", 1, &E_GL_ARB_shading_language_packing); + symbolTable.setFunctionExtensions("unpackSnorm2x16", 1, &E_GL_ARB_shading_language_packing); + symbolTable.setFunctionExtensions("unpackHalf2x16", 1, &E_GL_ARB_shading_language_packing); + symbolTable.setFunctionExtensions("packHalf2x16", 1, &E_GL_ARB_shading_language_packing); + } + + symbolTable.setVariableExtensions("gl_DeviceIndex", 1, &E_GL_EXT_device_group); + BuiltInVariable("gl_DeviceIndex", EbvDeviceIndex, symbolTable); + symbolTable.setVariableExtensions("gl_ViewIndex", 1, &E_GL_EXT_multiview); + BuiltInVariable("gl_ViewIndex", EbvViewIndex, symbolTable); + if (version >= 300 /* both ES and non-ES */) { + symbolTable.setVariableExtensions("gl_ViewID_OVR", Num_OVR_multiview_EXTs, OVR_multiview_EXTs); + BuiltInVariable("gl_ViewID_OVR", EbvViewIndex, symbolTable); + } + + // GL_ARB_shader_ballot + if (profile != EEsProfile) { + symbolTable.setVariableExtensions("gl_SubGroupSizeARB", 1, &E_GL_ARB_shader_ballot); + symbolTable.setVariableExtensions("gl_SubGroupInvocationARB", 1, &E_GL_ARB_shader_ballot); + symbolTable.setVariableExtensions("gl_SubGroupEqMaskARB", 1, &E_GL_ARB_shader_ballot); + symbolTable.setVariableExtensions("gl_SubGroupGeMaskARB", 1, &E_GL_ARB_shader_ballot); + symbolTable.setVariableExtensions("gl_SubGroupGtMaskARB", 1, &E_GL_ARB_shader_ballot); + symbolTable.setVariableExtensions("gl_SubGroupLeMaskARB", 1, &E_GL_ARB_shader_ballot); + symbolTable.setVariableExtensions("gl_SubGroupLtMaskARB", 1, &E_GL_ARB_shader_ballot); + + BuiltInVariable("gl_SubGroupInvocationARB", EbvSubGroupInvocation, symbolTable); + BuiltInVariable("gl_SubGroupEqMaskARB", EbvSubGroupEqMask, symbolTable); + BuiltInVariable("gl_SubGroupGeMaskARB", EbvSubGroupGeMask, symbolTable); + BuiltInVariable("gl_SubGroupGtMaskARB", EbvSubGroupGtMask, symbolTable); + BuiltInVariable("gl_SubGroupLeMaskARB", EbvSubGroupLeMask, symbolTable); + BuiltInVariable("gl_SubGroupLtMaskARB", EbvSubGroupLtMask, symbolTable); + + if (spvVersion.vulkan > 0) + // Treat "gl_SubGroupSizeARB" as shader input instead of uniform for Vulkan + SpecialQualifier("gl_SubGroupSizeARB", EvqVaryingIn, EbvSubGroupSize, symbolTable); + else + BuiltInVariable("gl_SubGroupSizeARB", EbvSubGroupSize, symbolTable); + } + + // GL_KHR_shader_subgroup + if ((profile == EEsProfile && version >= 310) || + (profile != EEsProfile && version >= 140)) { + symbolTable.setVariableExtensions("gl_SubgroupSize", 1, &E_GL_KHR_shader_subgroup_basic); + symbolTable.setVariableExtensions("gl_SubgroupInvocationID", 1, &E_GL_KHR_shader_subgroup_basic); + symbolTable.setVariableExtensions("gl_SubgroupEqMask", 1, &E_GL_KHR_shader_subgroup_ballot); + symbolTable.setVariableExtensions("gl_SubgroupGeMask", 1, &E_GL_KHR_shader_subgroup_ballot); + symbolTable.setVariableExtensions("gl_SubgroupGtMask", 1, &E_GL_KHR_shader_subgroup_ballot); + symbolTable.setVariableExtensions("gl_SubgroupLeMask", 1, &E_GL_KHR_shader_subgroup_ballot); + symbolTable.setVariableExtensions("gl_SubgroupLtMask", 1, &E_GL_KHR_shader_subgroup_ballot); + + BuiltInVariable("gl_SubgroupSize", EbvSubgroupSize2, symbolTable); + BuiltInVariable("gl_SubgroupInvocationID", EbvSubgroupInvocation2, symbolTable); + BuiltInVariable("gl_SubgroupEqMask", EbvSubgroupEqMask2, symbolTable); + BuiltInVariable("gl_SubgroupGeMask", EbvSubgroupGeMask2, symbolTable); + BuiltInVariable("gl_SubgroupGtMask", EbvSubgroupGtMask2, symbolTable); + BuiltInVariable("gl_SubgroupLeMask", EbvSubgroupLeMask2, symbolTable); + BuiltInVariable("gl_SubgroupLtMask", EbvSubgroupLtMask2, symbolTable); + + symbolTable.setFunctionExtensions("subgroupBarrier", 1, &E_GL_KHR_shader_subgroup_basic); + symbolTable.setFunctionExtensions("subgroupMemoryBarrier", 1, &E_GL_KHR_shader_subgroup_basic); + symbolTable.setFunctionExtensions("subgroupMemoryBarrierBuffer", 1, &E_GL_KHR_shader_subgroup_basic); + symbolTable.setFunctionExtensions("subgroupMemoryBarrierImage", 1, &E_GL_KHR_shader_subgroup_basic); + symbolTable.setFunctionExtensions("subgroupElect", 1, &E_GL_KHR_shader_subgroup_basic); + symbolTable.setFunctionExtensions("subgroupAll", 1, &E_GL_KHR_shader_subgroup_vote); + symbolTable.setFunctionExtensions("subgroupAny", 1, &E_GL_KHR_shader_subgroup_vote); + symbolTable.setFunctionExtensions("subgroupAllEqual", 1, &E_GL_KHR_shader_subgroup_vote); + symbolTable.setFunctionExtensions("subgroupBroadcast", 1, &E_GL_KHR_shader_subgroup_ballot); + symbolTable.setFunctionExtensions("subgroupBroadcastFirst", 1, &E_GL_KHR_shader_subgroup_ballot); + symbolTable.setFunctionExtensions("subgroupBallot", 1, &E_GL_KHR_shader_subgroup_ballot); + symbolTable.setFunctionExtensions("subgroupInverseBallot", 1, &E_GL_KHR_shader_subgroup_ballot); + symbolTable.setFunctionExtensions("subgroupBallotBitExtract", 1, &E_GL_KHR_shader_subgroup_ballot); + symbolTable.setFunctionExtensions("subgroupBallotBitCount", 1, &E_GL_KHR_shader_subgroup_ballot); + symbolTable.setFunctionExtensions("subgroupBallotInclusiveBitCount", 1, &E_GL_KHR_shader_subgroup_ballot); + symbolTable.setFunctionExtensions("subgroupBallotExclusiveBitCount", 1, &E_GL_KHR_shader_subgroup_ballot); + symbolTable.setFunctionExtensions("subgroupBallotFindLSB", 1, &E_GL_KHR_shader_subgroup_ballot); + symbolTable.setFunctionExtensions("subgroupBallotFindMSB", 1, &E_GL_KHR_shader_subgroup_ballot); + symbolTable.setFunctionExtensions("subgroupShuffle", 1, &E_GL_KHR_shader_subgroup_shuffle); + symbolTable.setFunctionExtensions("subgroupShuffleXor", 1, &E_GL_KHR_shader_subgroup_shuffle); + symbolTable.setFunctionExtensions("subgroupShuffleUp", 1, &E_GL_KHR_shader_subgroup_shuffle_relative); + symbolTable.setFunctionExtensions("subgroupShuffleDown", 1, &E_GL_KHR_shader_subgroup_shuffle_relative); + symbolTable.setFunctionExtensions("subgroupAdd", 1, &E_GL_KHR_shader_subgroup_arithmetic); + symbolTable.setFunctionExtensions("subgroupMul", 1, &E_GL_KHR_shader_subgroup_arithmetic); + symbolTable.setFunctionExtensions("subgroupMin", 1, &E_GL_KHR_shader_subgroup_arithmetic); + symbolTable.setFunctionExtensions("subgroupMax", 1, &E_GL_KHR_shader_subgroup_arithmetic); + symbolTable.setFunctionExtensions("subgroupAnd", 1, &E_GL_KHR_shader_subgroup_arithmetic); + symbolTable.setFunctionExtensions("subgroupOr", 1, &E_GL_KHR_shader_subgroup_arithmetic); + symbolTable.setFunctionExtensions("subgroupXor", 1, &E_GL_KHR_shader_subgroup_arithmetic); + symbolTable.setFunctionExtensions("subgroupInclusiveAdd", 1, &E_GL_KHR_shader_subgroup_arithmetic); + symbolTable.setFunctionExtensions("subgroupInclusiveMul", 1, &E_GL_KHR_shader_subgroup_arithmetic); + symbolTable.setFunctionExtensions("subgroupInclusiveMin", 1, &E_GL_KHR_shader_subgroup_arithmetic); + symbolTable.setFunctionExtensions("subgroupInclusiveMax", 1, &E_GL_KHR_shader_subgroup_arithmetic); + symbolTable.setFunctionExtensions("subgroupInclusiveAnd", 1, &E_GL_KHR_shader_subgroup_arithmetic); + symbolTable.setFunctionExtensions("subgroupInclusiveOr", 1, &E_GL_KHR_shader_subgroup_arithmetic); + symbolTable.setFunctionExtensions("subgroupInclusiveXor", 1, &E_GL_KHR_shader_subgroup_arithmetic); + symbolTable.setFunctionExtensions("subgroupExclusiveAdd", 1, &E_GL_KHR_shader_subgroup_arithmetic); + symbolTable.setFunctionExtensions("subgroupExclusiveMul", 1, &E_GL_KHR_shader_subgroup_arithmetic); + symbolTable.setFunctionExtensions("subgroupExclusiveMin", 1, &E_GL_KHR_shader_subgroup_arithmetic); + symbolTable.setFunctionExtensions("subgroupExclusiveMax", 1, &E_GL_KHR_shader_subgroup_arithmetic); + symbolTable.setFunctionExtensions("subgroupExclusiveAnd", 1, &E_GL_KHR_shader_subgroup_arithmetic); + symbolTable.setFunctionExtensions("subgroupExclusiveOr", 1, &E_GL_KHR_shader_subgroup_arithmetic); + symbolTable.setFunctionExtensions("subgroupExclusiveXor", 1, &E_GL_KHR_shader_subgroup_arithmetic); + symbolTable.setFunctionExtensions("subgroupClusteredAdd", 1, &E_GL_KHR_shader_subgroup_clustered); + symbolTable.setFunctionExtensions("subgroupClusteredMul", 1, &E_GL_KHR_shader_subgroup_clustered); + symbolTable.setFunctionExtensions("subgroupClusteredMin", 1, &E_GL_KHR_shader_subgroup_clustered); + symbolTable.setFunctionExtensions("subgroupClusteredMax", 1, &E_GL_KHR_shader_subgroup_clustered); + symbolTable.setFunctionExtensions("subgroupClusteredAnd", 1, &E_GL_KHR_shader_subgroup_clustered); + symbolTable.setFunctionExtensions("subgroupClusteredOr", 1, &E_GL_KHR_shader_subgroup_clustered); + symbolTable.setFunctionExtensions("subgroupClusteredXor", 1, &E_GL_KHR_shader_subgroup_clustered); + symbolTable.setFunctionExtensions("subgroupQuadBroadcast", 1, &E_GL_KHR_shader_subgroup_quad); + symbolTable.setFunctionExtensions("subgroupQuadSwapHorizontal", 1, &E_GL_KHR_shader_subgroup_quad); + symbolTable.setFunctionExtensions("subgroupQuadSwapVertical", 1, &E_GL_KHR_shader_subgroup_quad); + symbolTable.setFunctionExtensions("subgroupQuadSwapDiagonal", 1, &E_GL_KHR_shader_subgroup_quad); + symbolTable.setFunctionExtensions("subgroupPartitionNV", 1, &E_GL_NV_shader_subgroup_partitioned); + symbolTable.setFunctionExtensions("subgroupPartitionedAddNV", 1, &E_GL_NV_shader_subgroup_partitioned); + symbolTable.setFunctionExtensions("subgroupPartitionedMulNV", 1, &E_GL_NV_shader_subgroup_partitioned); + symbolTable.setFunctionExtensions("subgroupPartitionedMinNV", 1, &E_GL_NV_shader_subgroup_partitioned); + symbolTable.setFunctionExtensions("subgroupPartitionedMaxNV", 1, &E_GL_NV_shader_subgroup_partitioned); + symbolTable.setFunctionExtensions("subgroupPartitionedAndNV", 1, &E_GL_NV_shader_subgroup_partitioned); + symbolTable.setFunctionExtensions("subgroupPartitionedOrNV", 1, &E_GL_NV_shader_subgroup_partitioned); + symbolTable.setFunctionExtensions("subgroupPartitionedXorNV", 1, &E_GL_NV_shader_subgroup_partitioned); + symbolTable.setFunctionExtensions("subgroupPartitionedInclusiveAddNV", 1, &E_GL_NV_shader_subgroup_partitioned); + symbolTable.setFunctionExtensions("subgroupPartitionedInclusiveMulNV", 1, &E_GL_NV_shader_subgroup_partitioned); + symbolTable.setFunctionExtensions("subgroupPartitionedInclusiveMinNV", 1, &E_GL_NV_shader_subgroup_partitioned); + symbolTable.setFunctionExtensions("subgroupPartitionedInclusiveMaxNV", 1, &E_GL_NV_shader_subgroup_partitioned); + symbolTable.setFunctionExtensions("subgroupPartitionedInclusiveAndNV", 1, &E_GL_NV_shader_subgroup_partitioned); + symbolTable.setFunctionExtensions("subgroupPartitionedInclusiveOrNV", 1, &E_GL_NV_shader_subgroup_partitioned); + symbolTable.setFunctionExtensions("subgroupPartitionedInclusiveXorNV", 1, &E_GL_NV_shader_subgroup_partitioned); + symbolTable.setFunctionExtensions("subgroupPartitionedExclusiveAddNV", 1, &E_GL_NV_shader_subgroup_partitioned); + symbolTable.setFunctionExtensions("subgroupPartitionedExclusiveMulNV", 1, &E_GL_NV_shader_subgroup_partitioned); + symbolTable.setFunctionExtensions("subgroupPartitionedExclusiveMinNV", 1, &E_GL_NV_shader_subgroup_partitioned); + symbolTable.setFunctionExtensions("subgroupPartitionedExclusiveMaxNV", 1, &E_GL_NV_shader_subgroup_partitioned); + symbolTable.setFunctionExtensions("subgroupPartitionedExclusiveAndNV", 1, &E_GL_NV_shader_subgroup_partitioned); + symbolTable.setFunctionExtensions("subgroupPartitionedExclusiveOrNV", 1, &E_GL_NV_shader_subgroup_partitioned); + symbolTable.setFunctionExtensions("subgroupPartitionedExclusiveXorNV", 1, &E_GL_NV_shader_subgroup_partitioned); + + // GL_NV_shader_sm_builtins + symbolTable.setVariableExtensions("gl_WarpsPerSMNV", 1, &E_GL_NV_shader_sm_builtins); + symbolTable.setVariableExtensions("gl_SMCountNV", 1, &E_GL_NV_shader_sm_builtins); + symbolTable.setVariableExtensions("gl_WarpIDNV", 1, &E_GL_NV_shader_sm_builtins); + symbolTable.setVariableExtensions("gl_SMIDNV", 1, &E_GL_NV_shader_sm_builtins); + BuiltInVariable("gl_WarpsPerSMNV", EbvWarpsPerSM, symbolTable); + BuiltInVariable("gl_SMCountNV", EbvSMCount, symbolTable); + BuiltInVariable("gl_WarpIDNV", EbvWarpID, symbolTable); + BuiltInVariable("gl_SMIDNV", EbvSMID, symbolTable); + } + + if (profile == EEsProfile) { + symbolTable.setFunctionExtensions("shadow2DEXT", 1, &E_GL_EXT_shadow_samplers); + symbolTable.setFunctionExtensions("shadow2DProjEXT", 1, &E_GL_EXT_shadow_samplers); + } + + if (spvVersion.vulkan > 0) { + symbolTable.setVariableExtensions("gl_ScopeDevice", 1, &E_GL_KHR_memory_scope_semantics); + symbolTable.setVariableExtensions("gl_ScopeWorkgroup", 1, &E_GL_KHR_memory_scope_semantics); + symbolTable.setVariableExtensions("gl_ScopeSubgroup", 1, &E_GL_KHR_memory_scope_semantics); + symbolTable.setVariableExtensions("gl_ScopeInvocation", 1, &E_GL_KHR_memory_scope_semantics); + + symbolTable.setVariableExtensions("gl_SemanticsRelaxed", 1, &E_GL_KHR_memory_scope_semantics); + symbolTable.setVariableExtensions("gl_SemanticsAcquire", 1, &E_GL_KHR_memory_scope_semantics); + symbolTable.setVariableExtensions("gl_SemanticsRelease", 1, &E_GL_KHR_memory_scope_semantics); + symbolTable.setVariableExtensions("gl_SemanticsAcquireRelease", 1, &E_GL_KHR_memory_scope_semantics); + symbolTable.setVariableExtensions("gl_SemanticsMakeAvailable", 1, &E_GL_KHR_memory_scope_semantics); + symbolTable.setVariableExtensions("gl_SemanticsMakeVisible", 1, &E_GL_KHR_memory_scope_semantics); + symbolTable.setVariableExtensions("gl_SemanticsVolatile", 1, &E_GL_KHR_memory_scope_semantics); + + symbolTable.setVariableExtensions("gl_StorageSemanticsNone", 1, &E_GL_KHR_memory_scope_semantics); + symbolTable.setVariableExtensions("gl_StorageSemanticsBuffer", 1, &E_GL_KHR_memory_scope_semantics); + symbolTable.setVariableExtensions("gl_StorageSemanticsShared", 1, &E_GL_KHR_memory_scope_semantics); + symbolTable.setVariableExtensions("gl_StorageSemanticsImage", 1, &E_GL_KHR_memory_scope_semantics); + symbolTable.setVariableExtensions("gl_StorageSemanticsOutput", 1, &E_GL_KHR_memory_scope_semantics); + } + + symbolTable.setFunctionExtensions("helperInvocationEXT", 1, &E_GL_EXT_demote_to_helper_invocation); +#endif + break; + + case EShLangCompute: + BuiltInVariable("gl_NumWorkGroups", EbvNumWorkGroups, symbolTable); + BuiltInVariable("gl_WorkGroupSize", EbvWorkGroupSize, symbolTable); + BuiltInVariable("gl_WorkGroupID", EbvWorkGroupId, symbolTable); + BuiltInVariable("gl_LocalInvocationID", EbvLocalInvocationId, symbolTable); + BuiltInVariable("gl_GlobalInvocationID", EbvGlobalInvocationId, symbolTable); + BuiltInVariable("gl_LocalInvocationIndex", EbvLocalInvocationIndex, symbolTable); + BuiltInVariable("gl_DeviceIndex", EbvDeviceIndex, symbolTable); + BuiltInVariable("gl_ViewIndex", EbvViewIndex, symbolTable); + +#ifndef GLSLANG_WEB + if ((profile != EEsProfile && version >= 140) || + (profile == EEsProfile && version >= 310)) { + symbolTable.setVariableExtensions("gl_DeviceIndex", 1, &E_GL_EXT_device_group); + symbolTable.setVariableExtensions("gl_ViewIndex", 1, &E_GL_EXT_multiview); + } + + if (profile != EEsProfile && version < 430) { + symbolTable.setVariableExtensions("gl_NumWorkGroups", 1, &E_GL_ARB_compute_shader); + symbolTable.setVariableExtensions("gl_WorkGroupSize", 1, &E_GL_ARB_compute_shader); + symbolTable.setVariableExtensions("gl_WorkGroupID", 1, &E_GL_ARB_compute_shader); + symbolTable.setVariableExtensions("gl_LocalInvocationID", 1, &E_GL_ARB_compute_shader); + symbolTable.setVariableExtensions("gl_GlobalInvocationID", 1, &E_GL_ARB_compute_shader); + symbolTable.setVariableExtensions("gl_LocalInvocationIndex", 1, &E_GL_ARB_compute_shader); + + symbolTable.setVariableExtensions("gl_MaxComputeWorkGroupCount", 1, &E_GL_ARB_compute_shader); + symbolTable.setVariableExtensions("gl_MaxComputeWorkGroupSize", 1, &E_GL_ARB_compute_shader); + symbolTable.setVariableExtensions("gl_MaxComputeUniformComponents", 1, &E_GL_ARB_compute_shader); + symbolTable.setVariableExtensions("gl_MaxComputeTextureImageUnits", 1, &E_GL_ARB_compute_shader); + symbolTable.setVariableExtensions("gl_MaxComputeImageUniforms", 1, &E_GL_ARB_compute_shader); + symbolTable.setVariableExtensions("gl_MaxComputeAtomicCounters", 1, &E_GL_ARB_compute_shader); + symbolTable.setVariableExtensions("gl_MaxComputeAtomicCounterBuffers", 1, &E_GL_ARB_compute_shader); + + symbolTable.setFunctionExtensions("barrier", 1, &E_GL_ARB_compute_shader); + symbolTable.setFunctionExtensions("memoryBarrierAtomicCounter", 1, &E_GL_ARB_compute_shader); + symbolTable.setFunctionExtensions("memoryBarrierBuffer", 1, &E_GL_ARB_compute_shader); + symbolTable.setFunctionExtensions("memoryBarrierImage", 1, &E_GL_ARB_compute_shader); + symbolTable.setFunctionExtensions("memoryBarrierShared", 1, &E_GL_ARB_compute_shader); + symbolTable.setFunctionExtensions("groupMemoryBarrier", 1, &E_GL_ARB_compute_shader); + } + + + symbolTable.setFunctionExtensions("controlBarrier", 1, &E_GL_KHR_memory_scope_semantics); + symbolTable.setFunctionExtensions("debugPrintfEXT", 1, &E_GL_EXT_debug_printf); + + // GL_ARB_shader_ballot + if (profile != EEsProfile) { + symbolTable.setVariableExtensions("gl_SubGroupSizeARB", 1, &E_GL_ARB_shader_ballot); + symbolTable.setVariableExtensions("gl_SubGroupInvocationARB", 1, &E_GL_ARB_shader_ballot); + symbolTable.setVariableExtensions("gl_SubGroupEqMaskARB", 1, &E_GL_ARB_shader_ballot); + symbolTable.setVariableExtensions("gl_SubGroupGeMaskARB", 1, &E_GL_ARB_shader_ballot); + symbolTable.setVariableExtensions("gl_SubGroupGtMaskARB", 1, &E_GL_ARB_shader_ballot); + symbolTable.setVariableExtensions("gl_SubGroupLeMaskARB", 1, &E_GL_ARB_shader_ballot); + symbolTable.setVariableExtensions("gl_SubGroupLtMaskARB", 1, &E_GL_ARB_shader_ballot); + + BuiltInVariable("gl_SubGroupInvocationARB", EbvSubGroupInvocation, symbolTable); + BuiltInVariable("gl_SubGroupEqMaskARB", EbvSubGroupEqMask, symbolTable); + BuiltInVariable("gl_SubGroupGeMaskARB", EbvSubGroupGeMask, symbolTable); + BuiltInVariable("gl_SubGroupGtMaskARB", EbvSubGroupGtMask, symbolTable); + BuiltInVariable("gl_SubGroupLeMaskARB", EbvSubGroupLeMask, symbolTable); + BuiltInVariable("gl_SubGroupLtMaskARB", EbvSubGroupLtMask, symbolTable); + + if (spvVersion.vulkan > 0) + // Treat "gl_SubGroupSizeARB" as shader input instead of uniform for Vulkan + SpecialQualifier("gl_SubGroupSizeARB", EvqVaryingIn, EbvSubGroupSize, symbolTable); + else + BuiltInVariable("gl_SubGroupSizeARB", EbvSubGroupSize, symbolTable); + } + + // GL_KHR_shader_subgroup + if ((profile == EEsProfile && version >= 310) || + (profile != EEsProfile && version >= 140)) { + symbolTable.setVariableExtensions("gl_SubgroupSize", 1, &E_GL_KHR_shader_subgroup_basic); + symbolTable.setVariableExtensions("gl_SubgroupInvocationID", 1, &E_GL_KHR_shader_subgroup_basic); + symbolTable.setVariableExtensions("gl_SubgroupEqMask", 1, &E_GL_KHR_shader_subgroup_ballot); + symbolTable.setVariableExtensions("gl_SubgroupGeMask", 1, &E_GL_KHR_shader_subgroup_ballot); + symbolTable.setVariableExtensions("gl_SubgroupGtMask", 1, &E_GL_KHR_shader_subgroup_ballot); + symbolTable.setVariableExtensions("gl_SubgroupLeMask", 1, &E_GL_KHR_shader_subgroup_ballot); + symbolTable.setVariableExtensions("gl_SubgroupLtMask", 1, &E_GL_KHR_shader_subgroup_ballot); + + BuiltInVariable("gl_SubgroupSize", EbvSubgroupSize2, symbolTable); + BuiltInVariable("gl_SubgroupInvocationID", EbvSubgroupInvocation2, symbolTable); + BuiltInVariable("gl_SubgroupEqMask", EbvSubgroupEqMask2, symbolTable); + BuiltInVariable("gl_SubgroupGeMask", EbvSubgroupGeMask2, symbolTable); + BuiltInVariable("gl_SubgroupGtMask", EbvSubgroupGtMask2, symbolTable); + BuiltInVariable("gl_SubgroupLeMask", EbvSubgroupLeMask2, symbolTable); + BuiltInVariable("gl_SubgroupLtMask", EbvSubgroupLtMask2, symbolTable); + + // GL_NV_shader_sm_builtins + symbolTable.setVariableExtensions("gl_WarpsPerSMNV", 1, &E_GL_NV_shader_sm_builtins); + symbolTable.setVariableExtensions("gl_SMCountNV", 1, &E_GL_NV_shader_sm_builtins); + symbolTable.setVariableExtensions("gl_WarpIDNV", 1, &E_GL_NV_shader_sm_builtins); + symbolTable.setVariableExtensions("gl_SMIDNV", 1, &E_GL_NV_shader_sm_builtins); + BuiltInVariable("gl_WarpsPerSMNV", EbvWarpsPerSM, symbolTable); + BuiltInVariable("gl_SMCountNV", EbvSMCount, symbolTable); + BuiltInVariable("gl_WarpIDNV", EbvWarpID, symbolTable); + BuiltInVariable("gl_SMIDNV", EbvSMID, symbolTable); + } + + // GL_KHR_shader_subgroup + if ((profile == EEsProfile && version >= 310) || + (profile != EEsProfile && version >= 140)) { + symbolTable.setVariableExtensions("gl_NumSubgroups", 1, &E_GL_KHR_shader_subgroup_basic); + symbolTable.setVariableExtensions("gl_SubgroupID", 1, &E_GL_KHR_shader_subgroup_basic); + + BuiltInVariable("gl_NumSubgroups", EbvNumSubgroups, symbolTable); + BuiltInVariable("gl_SubgroupID", EbvSubgroupID, symbolTable); + + symbolTable.setFunctionExtensions("subgroupMemoryBarrierShared", 1, &E_GL_KHR_shader_subgroup_basic); + } + + { + const char *coopExt[2] = { E_GL_NV_cooperative_matrix, E_GL_NV_integer_cooperative_matrix }; + symbolTable.setFunctionExtensions("coopMatLoadNV", 2, coopExt); + symbolTable.setFunctionExtensions("coopMatStoreNV", 2, coopExt); + symbolTable.setFunctionExtensions("coopMatMulAddNV", 2, coopExt); + } + + if ((profile != EEsProfile && version >= 450) || (profile == EEsProfile && version >= 320)) { + symbolTable.setFunctionExtensions("dFdx", 1, &E_GL_NV_compute_shader_derivatives); + symbolTable.setFunctionExtensions("dFdy", 1, &E_GL_NV_compute_shader_derivatives); + symbolTable.setFunctionExtensions("fwidth", 1, &E_GL_NV_compute_shader_derivatives); + symbolTable.setFunctionExtensions("dFdxFine", 1, &E_GL_NV_compute_shader_derivatives); + symbolTable.setFunctionExtensions("dFdyFine", 1, &E_GL_NV_compute_shader_derivatives); + symbolTable.setFunctionExtensions("fwidthFine", 1, &E_GL_NV_compute_shader_derivatives); + symbolTable.setFunctionExtensions("dFdxCoarse", 1, &E_GL_NV_compute_shader_derivatives); + symbolTable.setFunctionExtensions("dFdyCoarse", 1, &E_GL_NV_compute_shader_derivatives); + symbolTable.setFunctionExtensions("fwidthCoarse", 1, &E_GL_NV_compute_shader_derivatives); + } +#endif + break; + +#ifndef GLSLANG_WEB + case EShLangRayGen: + case EShLangIntersect: + case EShLangAnyHit: + case EShLangClosestHit: + case EShLangMiss: + case EShLangCallable: + if (profile != EEsProfile && version >= 460) { + const char *rtexts[] = { E_GL_NV_ray_tracing, E_GL_EXT_ray_tracing }; + symbolTable.setVariableExtensions("gl_LaunchIDNV", 1, &E_GL_NV_ray_tracing); + symbolTable.setVariableExtensions("gl_LaunchIDEXT", 1, &E_GL_EXT_ray_tracing); + symbolTable.setVariableExtensions("gl_LaunchSizeNV", 1, &E_GL_NV_ray_tracing); + symbolTable.setVariableExtensions("gl_LaunchSizeEXT", 1, &E_GL_EXT_ray_tracing); + symbolTable.setVariableExtensions("gl_PrimitiveID", 2, rtexts); + symbolTable.setVariableExtensions("gl_InstanceID", 2, rtexts); + symbolTable.setVariableExtensions("gl_InstanceCustomIndexNV", 1, &E_GL_NV_ray_tracing); + symbolTable.setVariableExtensions("gl_InstanceCustomIndexEXT", 1, &E_GL_EXT_ray_tracing); + symbolTable.setVariableExtensions("gl_GeometryIndexEXT", 1, &E_GL_EXT_ray_tracing); + symbolTable.setVariableExtensions("gl_WorldRayOriginNV", 1, &E_GL_NV_ray_tracing); + symbolTable.setVariableExtensions("gl_WorldRayOriginEXT", 1, &E_GL_EXT_ray_tracing); + symbolTable.setVariableExtensions("gl_WorldRayDirectionNV", 1, &E_GL_NV_ray_tracing); + symbolTable.setVariableExtensions("gl_WorldRayDirectionEXT", 1, &E_GL_EXT_ray_tracing); + symbolTable.setVariableExtensions("gl_ObjectRayOriginNV", 1, &E_GL_NV_ray_tracing); + symbolTable.setVariableExtensions("gl_ObjectRayOriginEXT", 1, &E_GL_EXT_ray_tracing); + symbolTable.setVariableExtensions("gl_ObjectRayDirectionNV", 1, &E_GL_NV_ray_tracing); + symbolTable.setVariableExtensions("gl_ObjectRayDirectionEXT", 1, &E_GL_EXT_ray_tracing); + symbolTable.setVariableExtensions("gl_RayTminNV", 1, &E_GL_NV_ray_tracing); + symbolTable.setVariableExtensions("gl_RayTminEXT", 1, &E_GL_EXT_ray_tracing); + symbolTable.setVariableExtensions("gl_RayTmaxNV", 1, &E_GL_NV_ray_tracing); + symbolTable.setVariableExtensions("gl_RayTmaxEXT", 1, &E_GL_EXT_ray_tracing); + symbolTable.setVariableExtensions("gl_HitTNV", 1, &E_GL_NV_ray_tracing); + symbolTable.setVariableExtensions("gl_HitTEXT", 1, &E_GL_EXT_ray_tracing); + symbolTable.setVariableExtensions("gl_HitKindNV", 1, &E_GL_NV_ray_tracing); + symbolTable.setVariableExtensions("gl_HitKindEXT", 1, &E_GL_EXT_ray_tracing); + symbolTable.setVariableExtensions("gl_ObjectToWorldNV", 1, &E_GL_NV_ray_tracing); + symbolTable.setVariableExtensions("gl_ObjectToWorldEXT", 1, &E_GL_EXT_ray_tracing); + symbolTable.setVariableExtensions("gl_ObjectToWorld3x4EXT", 1, &E_GL_EXT_ray_tracing); + symbolTable.setVariableExtensions("gl_WorldToObjectNV", 1, &E_GL_NV_ray_tracing); + symbolTable.setVariableExtensions("gl_WorldToObjectEXT", 1, &E_GL_EXT_ray_tracing); + symbolTable.setVariableExtensions("gl_WorldToObject3x4EXT", 1, &E_GL_EXT_ray_tracing); + symbolTable.setVariableExtensions("gl_IncomingRayFlagsNV", 1, &E_GL_NV_ray_tracing); + symbolTable.setVariableExtensions("gl_IncomingRayFlagsEXT", 1, &E_GL_EXT_ray_tracing); + + symbolTable.setVariableExtensions("gl_DeviceIndex", 1, &E_GL_EXT_device_group); + + + symbolTable.setFunctionExtensions("traceNV", 1, &E_GL_NV_ray_tracing); + symbolTable.setFunctionExtensions("traceRayEXT", 1, &E_GL_EXT_ray_tracing); + symbolTable.setFunctionExtensions("reportIntersectionNV", 1, &E_GL_NV_ray_tracing); + symbolTable.setFunctionExtensions("reportIntersectionEXT", 1, &E_GL_EXT_ray_tracing); + symbolTable.setFunctionExtensions("ignoreIntersectionNV", 1, &E_GL_NV_ray_tracing); + symbolTable.setFunctionExtensions("ignoreIntersectionEXT", 1, &E_GL_EXT_ray_tracing); + symbolTable.setFunctionExtensions("terminateRayNV", 1, &E_GL_NV_ray_tracing); + symbolTable.setFunctionExtensions("terminateRayEXT", 1, &E_GL_EXT_ray_tracing); + symbolTable.setFunctionExtensions("executeCallableNV", 1, &E_GL_NV_ray_tracing); + symbolTable.setFunctionExtensions("executeCallableEXT", 1, &E_GL_EXT_ray_tracing); + + + BuiltInVariable("gl_LaunchIDNV", EbvLaunchId, symbolTable); + BuiltInVariable("gl_LaunchIDEXT", EbvLaunchId, symbolTable); + BuiltInVariable("gl_LaunchSizeNV", EbvLaunchSize, symbolTable); + BuiltInVariable("gl_LaunchSizeEXT", EbvLaunchSize, symbolTable); + BuiltInVariable("gl_PrimitiveID", EbvPrimitiveId, symbolTable); + BuiltInVariable("gl_InstanceID", EbvInstanceId, symbolTable); + BuiltInVariable("gl_InstanceCustomIndexNV", EbvInstanceCustomIndex,symbolTable); + BuiltInVariable("gl_InstanceCustomIndexEXT", EbvInstanceCustomIndex,symbolTable); + BuiltInVariable("gl_GeometryIndexEXT", EbvGeometryIndex, symbolTable); + BuiltInVariable("gl_WorldRayOriginNV", EbvWorldRayOrigin, symbolTable); + BuiltInVariable("gl_WorldRayOriginEXT", EbvWorldRayOrigin, symbolTable); + BuiltInVariable("gl_WorldRayDirectionNV", EbvWorldRayDirection, symbolTable); + BuiltInVariable("gl_WorldRayDirectionEXT", EbvWorldRayDirection, symbolTable); + BuiltInVariable("gl_ObjectRayOriginNV", EbvObjectRayOrigin, symbolTable); + BuiltInVariable("gl_ObjectRayOriginEXT", EbvObjectRayOrigin, symbolTable); + BuiltInVariable("gl_ObjectRayDirectionNV", EbvObjectRayDirection, symbolTable); + BuiltInVariable("gl_ObjectRayDirectionEXT", EbvObjectRayDirection, symbolTable); + BuiltInVariable("gl_RayTminNV", EbvRayTmin, symbolTable); + BuiltInVariable("gl_RayTminEXT", EbvRayTmin, symbolTable); + BuiltInVariable("gl_RayTmaxNV", EbvRayTmax, symbolTable); + BuiltInVariable("gl_RayTmaxEXT", EbvRayTmax, symbolTable); + BuiltInVariable("gl_HitTNV", EbvHitT, symbolTable); + BuiltInVariable("gl_HitTEXT", EbvHitT, symbolTable); + BuiltInVariable("gl_HitKindNV", EbvHitKind, symbolTable); + BuiltInVariable("gl_HitKindEXT", EbvHitKind, symbolTable); + BuiltInVariable("gl_ObjectToWorldNV", EbvObjectToWorld, symbolTable); + BuiltInVariable("gl_ObjectToWorldEXT", EbvObjectToWorld, symbolTable); + BuiltInVariable("gl_ObjectToWorld3x4EXT", EbvObjectToWorld3x4, symbolTable); + BuiltInVariable("gl_WorldToObjectNV", EbvWorldToObject, symbolTable); + BuiltInVariable("gl_WorldToObjectEXT", EbvWorldToObject, symbolTable); + BuiltInVariable("gl_WorldToObject3x4EXT", EbvWorldToObject3x4, symbolTable); + BuiltInVariable("gl_IncomingRayFlagsNV", EbvIncomingRayFlags, symbolTable); + BuiltInVariable("gl_IncomingRayFlagsEXT", EbvIncomingRayFlags, symbolTable); + BuiltInVariable("gl_DeviceIndex", EbvDeviceIndex, symbolTable); + + // GL_ARB_shader_ballot + symbolTable.setVariableExtensions("gl_SubGroupSizeARB", 1, &E_GL_ARB_shader_ballot); + symbolTable.setVariableExtensions("gl_SubGroupInvocationARB", 1, &E_GL_ARB_shader_ballot); + symbolTable.setVariableExtensions("gl_SubGroupEqMaskARB", 1, &E_GL_ARB_shader_ballot); + symbolTable.setVariableExtensions("gl_SubGroupGeMaskARB", 1, &E_GL_ARB_shader_ballot); + symbolTable.setVariableExtensions("gl_SubGroupGtMaskARB", 1, &E_GL_ARB_shader_ballot); + symbolTable.setVariableExtensions("gl_SubGroupLeMaskARB", 1, &E_GL_ARB_shader_ballot); + symbolTable.setVariableExtensions("gl_SubGroupLtMaskARB", 1, &E_GL_ARB_shader_ballot); + + BuiltInVariable("gl_SubGroupInvocationARB", EbvSubGroupInvocation, symbolTable); + BuiltInVariable("gl_SubGroupEqMaskARB", EbvSubGroupEqMask, symbolTable); + BuiltInVariable("gl_SubGroupGeMaskARB", EbvSubGroupGeMask, symbolTable); + BuiltInVariable("gl_SubGroupGtMaskARB", EbvSubGroupGtMask, symbolTable); + BuiltInVariable("gl_SubGroupLeMaskARB", EbvSubGroupLeMask, symbolTable); + BuiltInVariable("gl_SubGroupLtMaskARB", EbvSubGroupLtMask, symbolTable); + + if (spvVersion.vulkan > 0) + // Treat "gl_SubGroupSizeARB" as shader input instead of uniform for Vulkan + SpecialQualifier("gl_SubGroupSizeARB", EvqVaryingIn, EbvSubGroupSize, symbolTable); + else + BuiltInVariable("gl_SubGroupSizeARB", EbvSubGroupSize, symbolTable); + + // GL_KHR_shader_subgroup + symbolTable.setVariableExtensions("gl_NumSubgroups", 1, &E_GL_KHR_shader_subgroup_basic); + symbolTable.setVariableExtensions("gl_SubgroupID", 1, &E_GL_KHR_shader_subgroup_basic); + symbolTable.setVariableExtensions("gl_SubgroupSize", 1, &E_GL_KHR_shader_subgroup_basic); + symbolTable.setVariableExtensions("gl_SubgroupInvocationID", 1, &E_GL_KHR_shader_subgroup_basic); + symbolTable.setVariableExtensions("gl_SubgroupEqMask", 1, &E_GL_KHR_shader_subgroup_ballot); + symbolTable.setVariableExtensions("gl_SubgroupGeMask", 1, &E_GL_KHR_shader_subgroup_ballot); + symbolTable.setVariableExtensions("gl_SubgroupGtMask", 1, &E_GL_KHR_shader_subgroup_ballot); + symbolTable.setVariableExtensions("gl_SubgroupLeMask", 1, &E_GL_KHR_shader_subgroup_ballot); + symbolTable.setVariableExtensions("gl_SubgroupLtMask", 1, &E_GL_KHR_shader_subgroup_ballot); + + BuiltInVariable("gl_NumSubgroups", EbvNumSubgroups, symbolTable); + BuiltInVariable("gl_SubgroupID", EbvSubgroupID, symbolTable); + BuiltInVariable("gl_SubgroupSize", EbvSubgroupSize2, symbolTable); + BuiltInVariable("gl_SubgroupInvocationID", EbvSubgroupInvocation2, symbolTable); + BuiltInVariable("gl_SubgroupEqMask", EbvSubgroupEqMask2, symbolTable); + BuiltInVariable("gl_SubgroupGeMask", EbvSubgroupGeMask2, symbolTable); + BuiltInVariable("gl_SubgroupGtMask", EbvSubgroupGtMask2, symbolTable); + BuiltInVariable("gl_SubgroupLeMask", EbvSubgroupLeMask2, symbolTable); + BuiltInVariable("gl_SubgroupLtMask", EbvSubgroupLtMask2, symbolTable); + + // GL_NV_shader_sm_builtins + symbolTable.setVariableExtensions("gl_WarpsPerSMNV", 1, &E_GL_NV_shader_sm_builtins); + symbolTable.setVariableExtensions("gl_SMCountNV", 1, &E_GL_NV_shader_sm_builtins); + symbolTable.setVariableExtensions("gl_WarpIDNV", 1, &E_GL_NV_shader_sm_builtins); + symbolTable.setVariableExtensions("gl_SMIDNV", 1, &E_GL_NV_shader_sm_builtins); + BuiltInVariable("gl_WarpsPerSMNV", EbvWarpsPerSM, symbolTable); + BuiltInVariable("gl_SMCountNV", EbvSMCount, symbolTable); + BuiltInVariable("gl_WarpIDNV", EbvWarpID, symbolTable); + BuiltInVariable("gl_SMIDNV", EbvSMID, symbolTable); + } + break; + + case EShLangMeshNV: + if ((profile != EEsProfile && version >= 450) || (profile == EEsProfile && version >= 320)) { + // per-vertex builtins + symbolTable.setVariableExtensions("gl_MeshVerticesNV", "gl_Position", 1, &E_GL_NV_mesh_shader); + symbolTable.setVariableExtensions("gl_MeshVerticesNV", "gl_PointSize", 1, &E_GL_NV_mesh_shader); + symbolTable.setVariableExtensions("gl_MeshVerticesNV", "gl_ClipDistance", 1, &E_GL_NV_mesh_shader); + symbolTable.setVariableExtensions("gl_MeshVerticesNV", "gl_CullDistance", 1, &E_GL_NV_mesh_shader); + + BuiltInVariable("gl_MeshVerticesNV", "gl_Position", EbvPosition, symbolTable); + BuiltInVariable("gl_MeshVerticesNV", "gl_PointSize", EbvPointSize, symbolTable); + BuiltInVariable("gl_MeshVerticesNV", "gl_ClipDistance", EbvClipDistance, symbolTable); + BuiltInVariable("gl_MeshVerticesNV", "gl_CullDistance", EbvCullDistance, symbolTable); + + symbolTable.setVariableExtensions("gl_MeshVerticesNV", "gl_PositionPerViewNV", 1, &E_GL_NV_mesh_shader); + symbolTable.setVariableExtensions("gl_MeshVerticesNV", "gl_ClipDistancePerViewNV", 1, &E_GL_NV_mesh_shader); + symbolTable.setVariableExtensions("gl_MeshVerticesNV", "gl_CullDistancePerViewNV", 1, &E_GL_NV_mesh_shader); + + BuiltInVariable("gl_MeshVerticesNV", "gl_PositionPerViewNV", EbvPositionPerViewNV, symbolTable); + BuiltInVariable("gl_MeshVerticesNV", "gl_ClipDistancePerViewNV", EbvClipDistancePerViewNV, symbolTable); + BuiltInVariable("gl_MeshVerticesNV", "gl_CullDistancePerViewNV", EbvCullDistancePerViewNV, symbolTable); + + // per-primitive builtins + symbolTable.setVariableExtensions("gl_MeshPrimitivesNV", "gl_PrimitiveID", 1, &E_GL_NV_mesh_shader); + symbolTable.setVariableExtensions("gl_MeshPrimitivesNV", "gl_Layer", 1, &E_GL_NV_mesh_shader); + symbolTable.setVariableExtensions("gl_MeshPrimitivesNV", "gl_ViewportIndex", 1, &E_GL_NV_mesh_shader); + symbolTable.setVariableExtensions("gl_MeshPrimitivesNV", "gl_ViewportMask", 1, &E_GL_NV_mesh_shader); + + BuiltInVariable("gl_MeshPrimitivesNV", "gl_PrimitiveID", EbvPrimitiveId, symbolTable); + BuiltInVariable("gl_MeshPrimitivesNV", "gl_Layer", EbvLayer, symbolTable); + BuiltInVariable("gl_MeshPrimitivesNV", "gl_ViewportIndex", EbvViewportIndex, symbolTable); + BuiltInVariable("gl_MeshPrimitivesNV", "gl_ViewportMask", EbvViewportMaskNV, symbolTable); + + // per-view per-primitive builtins + symbolTable.setVariableExtensions("gl_MeshPrimitivesNV", "gl_LayerPerViewNV", 1, &E_GL_NV_mesh_shader); + symbolTable.setVariableExtensions("gl_MeshPrimitivesNV", "gl_ViewportMaskPerViewNV", 1, &E_GL_NV_mesh_shader); + + BuiltInVariable("gl_MeshPrimitivesNV", "gl_LayerPerViewNV", EbvLayerPerViewNV, symbolTable); + BuiltInVariable("gl_MeshPrimitivesNV", "gl_ViewportMaskPerViewNV", EbvViewportMaskPerViewNV, symbolTable); + + // other builtins + symbolTable.setVariableExtensions("gl_PrimitiveCountNV", 1, &E_GL_NV_mesh_shader); + symbolTable.setVariableExtensions("gl_PrimitiveIndicesNV", 1, &E_GL_NV_mesh_shader); + symbolTable.setVariableExtensions("gl_MeshViewCountNV", 1, &E_GL_NV_mesh_shader); + symbolTable.setVariableExtensions("gl_MeshViewIndicesNV", 1, &E_GL_NV_mesh_shader); + symbolTable.setVariableExtensions("gl_WorkGroupSize", 1, &E_GL_NV_mesh_shader); + symbolTable.setVariableExtensions("gl_WorkGroupID", 1, &E_GL_NV_mesh_shader); + symbolTable.setVariableExtensions("gl_LocalInvocationID", 1, &E_GL_NV_mesh_shader); + symbolTable.setVariableExtensions("gl_GlobalInvocationID", 1, &E_GL_NV_mesh_shader); + symbolTable.setVariableExtensions("gl_LocalInvocationIndex", 1, &E_GL_NV_mesh_shader); + + BuiltInVariable("gl_PrimitiveCountNV", EbvPrimitiveCountNV, symbolTable); + BuiltInVariable("gl_PrimitiveIndicesNV", EbvPrimitiveIndicesNV, symbolTable); + BuiltInVariable("gl_MeshViewCountNV", EbvMeshViewCountNV, symbolTable); + BuiltInVariable("gl_MeshViewIndicesNV", EbvMeshViewIndicesNV, symbolTable); + BuiltInVariable("gl_WorkGroupSize", EbvWorkGroupSize, symbolTable); + BuiltInVariable("gl_WorkGroupID", EbvWorkGroupId, symbolTable); + BuiltInVariable("gl_LocalInvocationID", EbvLocalInvocationId, symbolTable); + BuiltInVariable("gl_GlobalInvocationID", EbvGlobalInvocationId, symbolTable); + BuiltInVariable("gl_LocalInvocationIndex", EbvLocalInvocationIndex, symbolTable); + + // builtin constants + symbolTable.setVariableExtensions("gl_MaxMeshOutputVerticesNV", 1, &E_GL_NV_mesh_shader); + symbolTable.setVariableExtensions("gl_MaxMeshOutputPrimitivesNV", 1, &E_GL_NV_mesh_shader); + symbolTable.setVariableExtensions("gl_MaxMeshWorkGroupSizeNV", 1, &E_GL_NV_mesh_shader); + symbolTable.setVariableExtensions("gl_MaxMeshViewCountNV", 1, &E_GL_NV_mesh_shader); + + // builtin functions + symbolTable.setFunctionExtensions("barrier", 1, &E_GL_NV_mesh_shader); + symbolTable.setFunctionExtensions("memoryBarrierShared", 1, &E_GL_NV_mesh_shader); + symbolTable.setFunctionExtensions("groupMemoryBarrier", 1, &E_GL_NV_mesh_shader); + } + + if (profile != EEsProfile && version >= 450) { + // GL_EXT_device_group + symbolTable.setVariableExtensions("gl_DeviceIndex", 1, &E_GL_EXT_device_group); + BuiltInVariable("gl_DeviceIndex", EbvDeviceIndex, symbolTable); + + // GL_ARB_shader_draw_parameters + symbolTable.setVariableExtensions("gl_DrawIDARB", 1, &E_GL_ARB_shader_draw_parameters); + BuiltInVariable("gl_DrawIDARB", EbvDrawId, symbolTable); + if (version >= 460) { + BuiltInVariable("gl_DrawID", EbvDrawId, symbolTable); + } + + // GL_ARB_shader_ballot + symbolTable.setVariableExtensions("gl_SubGroupSizeARB", 1, &E_GL_ARB_shader_ballot); + symbolTable.setVariableExtensions("gl_SubGroupInvocationARB", 1, &E_GL_ARB_shader_ballot); + symbolTable.setVariableExtensions("gl_SubGroupEqMaskARB", 1, &E_GL_ARB_shader_ballot); + symbolTable.setVariableExtensions("gl_SubGroupGeMaskARB", 1, &E_GL_ARB_shader_ballot); + symbolTable.setVariableExtensions("gl_SubGroupGtMaskARB", 1, &E_GL_ARB_shader_ballot); + symbolTable.setVariableExtensions("gl_SubGroupLeMaskARB", 1, &E_GL_ARB_shader_ballot); + symbolTable.setVariableExtensions("gl_SubGroupLtMaskARB", 1, &E_GL_ARB_shader_ballot); + + BuiltInVariable("gl_SubGroupInvocationARB", EbvSubGroupInvocation, symbolTable); + BuiltInVariable("gl_SubGroupEqMaskARB", EbvSubGroupEqMask, symbolTable); + BuiltInVariable("gl_SubGroupGeMaskARB", EbvSubGroupGeMask, symbolTable); + BuiltInVariable("gl_SubGroupGtMaskARB", EbvSubGroupGtMask, symbolTable); + BuiltInVariable("gl_SubGroupLeMaskARB", EbvSubGroupLeMask, symbolTable); + BuiltInVariable("gl_SubGroupLtMaskARB", EbvSubGroupLtMask, symbolTable); + + if (spvVersion.vulkan > 0) + // Treat "gl_SubGroupSizeARB" as shader input instead of uniform for Vulkan + SpecialQualifier("gl_SubGroupSizeARB", EvqVaryingIn, EbvSubGroupSize, symbolTable); + else + BuiltInVariable("gl_SubGroupSizeARB", EbvSubGroupSize, symbolTable); + } + + // GL_KHR_shader_subgroup + if ((profile == EEsProfile && version >= 310) || + (profile != EEsProfile && version >= 140)) { + symbolTable.setVariableExtensions("gl_NumSubgroups", 1, &E_GL_KHR_shader_subgroup_basic); + symbolTable.setVariableExtensions("gl_SubgroupID", 1, &E_GL_KHR_shader_subgroup_basic); + symbolTable.setVariableExtensions("gl_SubgroupSize", 1, &E_GL_KHR_shader_subgroup_basic); + symbolTable.setVariableExtensions("gl_SubgroupInvocationID", 1, &E_GL_KHR_shader_subgroup_basic); + symbolTable.setVariableExtensions("gl_SubgroupEqMask", 1, &E_GL_KHR_shader_subgroup_ballot); + symbolTable.setVariableExtensions("gl_SubgroupGeMask", 1, &E_GL_KHR_shader_subgroup_ballot); + symbolTable.setVariableExtensions("gl_SubgroupGtMask", 1, &E_GL_KHR_shader_subgroup_ballot); + symbolTable.setVariableExtensions("gl_SubgroupLeMask", 1, &E_GL_KHR_shader_subgroup_ballot); + symbolTable.setVariableExtensions("gl_SubgroupLtMask", 1, &E_GL_KHR_shader_subgroup_ballot); + + BuiltInVariable("gl_NumSubgroups", EbvNumSubgroups, symbolTable); + BuiltInVariable("gl_SubgroupID", EbvSubgroupID, symbolTable); + BuiltInVariable("gl_SubgroupSize", EbvSubgroupSize2, symbolTable); + BuiltInVariable("gl_SubgroupInvocationID", EbvSubgroupInvocation2, symbolTable); + BuiltInVariable("gl_SubgroupEqMask", EbvSubgroupEqMask2, symbolTable); + BuiltInVariable("gl_SubgroupGeMask", EbvSubgroupGeMask2, symbolTable); + BuiltInVariable("gl_SubgroupGtMask", EbvSubgroupGtMask2, symbolTable); + BuiltInVariable("gl_SubgroupLeMask", EbvSubgroupLeMask2, symbolTable); + BuiltInVariable("gl_SubgroupLtMask", EbvSubgroupLtMask2, symbolTable); + + symbolTable.setFunctionExtensions("subgroupMemoryBarrierShared", 1, &E_GL_KHR_shader_subgroup_basic); + + // GL_NV_shader_sm_builtins + symbolTable.setVariableExtensions("gl_WarpsPerSMNV", 1, &E_GL_NV_shader_sm_builtins); + symbolTable.setVariableExtensions("gl_SMCountNV", 1, &E_GL_NV_shader_sm_builtins); + symbolTable.setVariableExtensions("gl_WarpIDNV", 1, &E_GL_NV_shader_sm_builtins); + symbolTable.setVariableExtensions("gl_SMIDNV", 1, &E_GL_NV_shader_sm_builtins); + BuiltInVariable("gl_WarpsPerSMNV", EbvWarpsPerSM, symbolTable); + BuiltInVariable("gl_SMCountNV", EbvSMCount, symbolTable); + BuiltInVariable("gl_WarpIDNV", EbvWarpID, symbolTable); + BuiltInVariable("gl_SMIDNV", EbvSMID, symbolTable); + } + break; + + case EShLangTaskNV: + if ((profile != EEsProfile && version >= 450) || (profile == EEsProfile && version >= 320)) { + symbolTable.setVariableExtensions("gl_TaskCountNV", 1, &E_GL_NV_mesh_shader); + symbolTable.setVariableExtensions("gl_WorkGroupSize", 1, &E_GL_NV_mesh_shader); + symbolTable.setVariableExtensions("gl_WorkGroupID", 1, &E_GL_NV_mesh_shader); + symbolTable.setVariableExtensions("gl_LocalInvocationID", 1, &E_GL_NV_mesh_shader); + symbolTable.setVariableExtensions("gl_GlobalInvocationID", 1, &E_GL_NV_mesh_shader); + symbolTable.setVariableExtensions("gl_LocalInvocationIndex", 1, &E_GL_NV_mesh_shader); + symbolTable.setVariableExtensions("gl_MeshViewCountNV", 1, &E_GL_NV_mesh_shader); + symbolTable.setVariableExtensions("gl_MeshViewIndicesNV", 1, &E_GL_NV_mesh_shader); + + BuiltInVariable("gl_TaskCountNV", EbvTaskCountNV, symbolTable); + BuiltInVariable("gl_WorkGroupSize", EbvWorkGroupSize, symbolTable); + BuiltInVariable("gl_WorkGroupID", EbvWorkGroupId, symbolTable); + BuiltInVariable("gl_LocalInvocationID", EbvLocalInvocationId, symbolTable); + BuiltInVariable("gl_GlobalInvocationID", EbvGlobalInvocationId, symbolTable); + BuiltInVariable("gl_LocalInvocationIndex", EbvLocalInvocationIndex, symbolTable); + BuiltInVariable("gl_MeshViewCountNV", EbvMeshViewCountNV, symbolTable); + BuiltInVariable("gl_MeshViewIndicesNV", EbvMeshViewIndicesNV, symbolTable); + + symbolTable.setVariableExtensions("gl_MaxTaskWorkGroupSizeNV", 1, &E_GL_NV_mesh_shader); + symbolTable.setVariableExtensions("gl_MaxMeshViewCountNV", 1, &E_GL_NV_mesh_shader); + + symbolTable.setFunctionExtensions("barrier", 1, &E_GL_NV_mesh_shader); + symbolTable.setFunctionExtensions("memoryBarrierShared", 1, &E_GL_NV_mesh_shader); + symbolTable.setFunctionExtensions("groupMemoryBarrier", 1, &E_GL_NV_mesh_shader); + } + + if (profile != EEsProfile && version >= 450) { + // GL_EXT_device_group + symbolTable.setVariableExtensions("gl_DeviceIndex", 1, &E_GL_EXT_device_group); + BuiltInVariable("gl_DeviceIndex", EbvDeviceIndex, symbolTable); + + // GL_ARB_shader_draw_parameters + symbolTable.setVariableExtensions("gl_DrawIDARB", 1, &E_GL_ARB_shader_draw_parameters); + BuiltInVariable("gl_DrawIDARB", EbvDrawId, symbolTable); + if (version >= 460) { + BuiltInVariable("gl_DrawID", EbvDrawId, symbolTable); + } + + // GL_ARB_shader_ballot + symbolTable.setVariableExtensions("gl_SubGroupSizeARB", 1, &E_GL_ARB_shader_ballot); + symbolTable.setVariableExtensions("gl_SubGroupInvocationARB", 1, &E_GL_ARB_shader_ballot); + symbolTable.setVariableExtensions("gl_SubGroupEqMaskARB", 1, &E_GL_ARB_shader_ballot); + symbolTable.setVariableExtensions("gl_SubGroupGeMaskARB", 1, &E_GL_ARB_shader_ballot); + symbolTable.setVariableExtensions("gl_SubGroupGtMaskARB", 1, &E_GL_ARB_shader_ballot); + symbolTable.setVariableExtensions("gl_SubGroupLeMaskARB", 1, &E_GL_ARB_shader_ballot); + symbolTable.setVariableExtensions("gl_SubGroupLtMaskARB", 1, &E_GL_ARB_shader_ballot); + + BuiltInVariable("gl_SubGroupInvocationARB", EbvSubGroupInvocation, symbolTable); + BuiltInVariable("gl_SubGroupEqMaskARB", EbvSubGroupEqMask, symbolTable); + BuiltInVariable("gl_SubGroupGeMaskARB", EbvSubGroupGeMask, symbolTable); + BuiltInVariable("gl_SubGroupGtMaskARB", EbvSubGroupGtMask, symbolTable); + BuiltInVariable("gl_SubGroupLeMaskARB", EbvSubGroupLeMask, symbolTable); + BuiltInVariable("gl_SubGroupLtMaskARB", EbvSubGroupLtMask, symbolTable); + + if (spvVersion.vulkan > 0) + // Treat "gl_SubGroupSizeARB" as shader input instead of uniform for Vulkan + SpecialQualifier("gl_SubGroupSizeARB", EvqVaryingIn, EbvSubGroupSize, symbolTable); + else + BuiltInVariable("gl_SubGroupSizeARB", EbvSubGroupSize, symbolTable); + } + + // GL_KHR_shader_subgroup + if ((profile == EEsProfile && version >= 310) || + (profile != EEsProfile && version >= 140)) { + symbolTable.setVariableExtensions("gl_NumSubgroups", 1, &E_GL_KHR_shader_subgroup_basic); + symbolTable.setVariableExtensions("gl_SubgroupID", 1, &E_GL_KHR_shader_subgroup_basic); + symbolTable.setVariableExtensions("gl_SubgroupSize", 1, &E_GL_KHR_shader_subgroup_basic); + symbolTable.setVariableExtensions("gl_SubgroupInvocationID", 1, &E_GL_KHR_shader_subgroup_basic); + symbolTable.setVariableExtensions("gl_SubgroupEqMask", 1, &E_GL_KHR_shader_subgroup_ballot); + symbolTable.setVariableExtensions("gl_SubgroupGeMask", 1, &E_GL_KHR_shader_subgroup_ballot); + symbolTable.setVariableExtensions("gl_SubgroupGtMask", 1, &E_GL_KHR_shader_subgroup_ballot); + symbolTable.setVariableExtensions("gl_SubgroupLeMask", 1, &E_GL_KHR_shader_subgroup_ballot); + symbolTable.setVariableExtensions("gl_SubgroupLtMask", 1, &E_GL_KHR_shader_subgroup_ballot); + + BuiltInVariable("gl_NumSubgroups", EbvNumSubgroups, symbolTable); + BuiltInVariable("gl_SubgroupID", EbvSubgroupID, symbolTable); + BuiltInVariable("gl_SubgroupSize", EbvSubgroupSize2, symbolTable); + BuiltInVariable("gl_SubgroupInvocationID", EbvSubgroupInvocation2, symbolTable); + BuiltInVariable("gl_SubgroupEqMask", EbvSubgroupEqMask2, symbolTable); + BuiltInVariable("gl_SubgroupGeMask", EbvSubgroupGeMask2, symbolTable); + BuiltInVariable("gl_SubgroupGtMask", EbvSubgroupGtMask2, symbolTable); + BuiltInVariable("gl_SubgroupLeMask", EbvSubgroupLeMask2, symbolTable); + BuiltInVariable("gl_SubgroupLtMask", EbvSubgroupLtMask2, symbolTable); + + symbolTable.setFunctionExtensions("subgroupMemoryBarrierShared", 1, &E_GL_KHR_shader_subgroup_basic); + + // GL_NV_shader_sm_builtins + symbolTable.setVariableExtensions("gl_WarpsPerSMNV", 1, &E_GL_NV_shader_sm_builtins); + symbolTable.setVariableExtensions("gl_SMCountNV", 1, &E_GL_NV_shader_sm_builtins); + symbolTable.setVariableExtensions("gl_WarpIDNV", 1, &E_GL_NV_shader_sm_builtins); + symbolTable.setVariableExtensions("gl_SMIDNV", 1, &E_GL_NV_shader_sm_builtins); + BuiltInVariable("gl_WarpsPerSMNV", EbvWarpsPerSM, symbolTable); + BuiltInVariable("gl_SMCountNV", EbvSMCount, symbolTable); + BuiltInVariable("gl_WarpIDNV", EbvWarpID, symbolTable); + BuiltInVariable("gl_SMIDNV", EbvSMID, symbolTable); + } + break; +#endif + + default: + assert(false && "Language not supported"); + break; + } + + // + // Next, identify which built-ins have a mapping to an operator. + // If PureOperatorBuiltins is false, those that are not identified as such are + // expected to be resolved through a library of functions, versus as + // operations. + // + + relateTabledBuiltins(version, profile, spvVersion, language, symbolTable); + +#ifndef GLSLANG_WEB + symbolTable.relateToOperator("doubleBitsToInt64", EOpDoubleBitsToInt64); + symbolTable.relateToOperator("doubleBitsToUint64", EOpDoubleBitsToUint64); + symbolTable.relateToOperator("int64BitsToDouble", EOpInt64BitsToDouble); + symbolTable.relateToOperator("uint64BitsToDouble", EOpUint64BitsToDouble); + symbolTable.relateToOperator("halfBitsToInt16", EOpFloat16BitsToInt16); + symbolTable.relateToOperator("halfBitsToUint16", EOpFloat16BitsToUint16); + symbolTable.relateToOperator("float16BitsToInt16", EOpFloat16BitsToInt16); + symbolTable.relateToOperator("float16BitsToUint16", EOpFloat16BitsToUint16); + symbolTable.relateToOperator("int16BitsToFloat16", EOpInt16BitsToFloat16); + symbolTable.relateToOperator("uint16BitsToFloat16", EOpUint16BitsToFloat16); + + symbolTable.relateToOperator("int16BitsToHalf", EOpInt16BitsToFloat16); + symbolTable.relateToOperator("uint16BitsToHalf", EOpUint16BitsToFloat16); + + symbolTable.relateToOperator("packSnorm4x8", EOpPackSnorm4x8); + symbolTable.relateToOperator("unpackSnorm4x8", EOpUnpackSnorm4x8); + symbolTable.relateToOperator("packUnorm4x8", EOpPackUnorm4x8); + symbolTable.relateToOperator("unpackUnorm4x8", EOpUnpackUnorm4x8); + + symbolTable.relateToOperator("packDouble2x32", EOpPackDouble2x32); + symbolTable.relateToOperator("unpackDouble2x32", EOpUnpackDouble2x32); + + symbolTable.relateToOperator("packInt2x32", EOpPackInt2x32); + symbolTable.relateToOperator("unpackInt2x32", EOpUnpackInt2x32); + symbolTable.relateToOperator("packUint2x32", EOpPackUint2x32); + symbolTable.relateToOperator("unpackUint2x32", EOpUnpackUint2x32); + + symbolTable.relateToOperator("packInt2x16", EOpPackInt2x16); + symbolTable.relateToOperator("unpackInt2x16", EOpUnpackInt2x16); + symbolTable.relateToOperator("packUint2x16", EOpPackUint2x16); + symbolTable.relateToOperator("unpackUint2x16", EOpUnpackUint2x16); + + symbolTable.relateToOperator("packInt4x16", EOpPackInt4x16); + symbolTable.relateToOperator("unpackInt4x16", EOpUnpackInt4x16); + symbolTable.relateToOperator("packUint4x16", EOpPackUint4x16); + symbolTable.relateToOperator("unpackUint4x16", EOpUnpackUint4x16); + symbolTable.relateToOperator("packFloat2x16", EOpPackFloat2x16); + symbolTable.relateToOperator("unpackFloat2x16", EOpUnpackFloat2x16); + + symbolTable.relateToOperator("pack16", EOpPack16); + symbolTable.relateToOperator("pack32", EOpPack32); + symbolTable.relateToOperator("pack64", EOpPack64); + + symbolTable.relateToOperator("unpack32", EOpUnpack32); + symbolTable.relateToOperator("unpack16", EOpUnpack16); + symbolTable.relateToOperator("unpack8", EOpUnpack8); + + symbolTable.relateToOperator("controlBarrier", EOpBarrier); + symbolTable.relateToOperator("memoryBarrierAtomicCounter", EOpMemoryBarrierAtomicCounter); + symbolTable.relateToOperator("memoryBarrierImage", EOpMemoryBarrierImage); + + symbolTable.relateToOperator("atomicLoad", EOpAtomicLoad); + symbolTable.relateToOperator("atomicStore", EOpAtomicStore); + + symbolTable.relateToOperator("atomicCounterIncrement", EOpAtomicCounterIncrement); + symbolTable.relateToOperator("atomicCounterDecrement", EOpAtomicCounterDecrement); + symbolTable.relateToOperator("atomicCounter", EOpAtomicCounter); + + symbolTable.relateToOperator("clockARB", EOpReadClockSubgroupKHR); + symbolTable.relateToOperator("clock2x32ARB", EOpReadClockSubgroupKHR); + + symbolTable.relateToOperator("clockRealtimeEXT", EOpReadClockDeviceKHR); + symbolTable.relateToOperator("clockRealtime2x32EXT", EOpReadClockDeviceKHR); + + if (profile != EEsProfile && version >= 460) { + symbolTable.relateToOperator("atomicCounterAdd", EOpAtomicCounterAdd); + symbolTable.relateToOperator("atomicCounterSubtract", EOpAtomicCounterSubtract); + symbolTable.relateToOperator("atomicCounterMin", EOpAtomicCounterMin); + symbolTable.relateToOperator("atomicCounterMax", EOpAtomicCounterMax); + symbolTable.relateToOperator("atomicCounterAnd", EOpAtomicCounterAnd); + symbolTable.relateToOperator("atomicCounterOr", EOpAtomicCounterOr); + symbolTable.relateToOperator("atomicCounterXor", EOpAtomicCounterXor); + symbolTable.relateToOperator("atomicCounterExchange", EOpAtomicCounterExchange); + symbolTable.relateToOperator("atomicCounterCompSwap", EOpAtomicCounterCompSwap); + } + + symbolTable.relateToOperator("fma", EOpFma); + symbolTable.relateToOperator("frexp", EOpFrexp); + symbolTable.relateToOperator("ldexp", EOpLdexp); + symbolTable.relateToOperator("uaddCarry", EOpAddCarry); + symbolTable.relateToOperator("usubBorrow", EOpSubBorrow); + symbolTable.relateToOperator("umulExtended", EOpUMulExtended); + symbolTable.relateToOperator("imulExtended", EOpIMulExtended); + symbolTable.relateToOperator("bitfieldExtract", EOpBitfieldExtract); + symbolTable.relateToOperator("bitfieldInsert", EOpBitfieldInsert); + symbolTable.relateToOperator("bitfieldReverse", EOpBitFieldReverse); + symbolTable.relateToOperator("bitCount", EOpBitCount); + symbolTable.relateToOperator("findLSB", EOpFindLSB); + symbolTable.relateToOperator("findMSB", EOpFindMSB); + + symbolTable.relateToOperator("helperInvocationEXT", EOpIsHelperInvocation); + + symbolTable.relateToOperator("countLeadingZeros", EOpCountLeadingZeros); + symbolTable.relateToOperator("countTrailingZeros", EOpCountTrailingZeros); + symbolTable.relateToOperator("absoluteDifference", EOpAbsDifference); + symbolTable.relateToOperator("addSaturate", EOpAddSaturate); + symbolTable.relateToOperator("subtractSaturate", EOpSubSaturate); + symbolTable.relateToOperator("average", EOpAverage); + symbolTable.relateToOperator("averageRounded", EOpAverageRounded); + symbolTable.relateToOperator("multiply32x16", EOpMul32x16); + symbolTable.relateToOperator("debugPrintfEXT", EOpDebugPrintf); + + + if (PureOperatorBuiltins) { + symbolTable.relateToOperator("imageSize", EOpImageQuerySize); + symbolTable.relateToOperator("imageSamples", EOpImageQuerySamples); + symbolTable.relateToOperator("imageLoad", EOpImageLoad); + symbolTable.relateToOperator("imageStore", EOpImageStore); + symbolTable.relateToOperator("imageAtomicAdd", EOpImageAtomicAdd); + symbolTable.relateToOperator("imageAtomicMin", EOpImageAtomicMin); + symbolTable.relateToOperator("imageAtomicMax", EOpImageAtomicMax); + symbolTable.relateToOperator("imageAtomicAnd", EOpImageAtomicAnd); + symbolTable.relateToOperator("imageAtomicOr", EOpImageAtomicOr); + symbolTable.relateToOperator("imageAtomicXor", EOpImageAtomicXor); + symbolTable.relateToOperator("imageAtomicExchange", EOpImageAtomicExchange); + symbolTable.relateToOperator("imageAtomicCompSwap", EOpImageAtomicCompSwap); + symbolTable.relateToOperator("imageAtomicLoad", EOpImageAtomicLoad); + symbolTable.relateToOperator("imageAtomicStore", EOpImageAtomicStore); + + symbolTable.relateToOperator("subpassLoad", EOpSubpassLoad); + symbolTable.relateToOperator("subpassLoadMS", EOpSubpassLoadMS); + + symbolTable.relateToOperator("textureGather", EOpTextureGather); + symbolTable.relateToOperator("textureGatherOffset", EOpTextureGatherOffset); + symbolTable.relateToOperator("textureGatherOffsets", EOpTextureGatherOffsets); + + symbolTable.relateToOperator("noise1", EOpNoise); + symbolTable.relateToOperator("noise2", EOpNoise); + symbolTable.relateToOperator("noise3", EOpNoise); + symbolTable.relateToOperator("noise4", EOpNoise); + + symbolTable.relateToOperator("textureFootprintNV", EOpImageSampleFootprintNV); + symbolTable.relateToOperator("textureFootprintClampNV", EOpImageSampleFootprintClampNV); + symbolTable.relateToOperator("textureFootprintLodNV", EOpImageSampleFootprintLodNV); + symbolTable.relateToOperator("textureFootprintGradNV", EOpImageSampleFootprintGradNV); + symbolTable.relateToOperator("textureFootprintGradClampNV", EOpImageSampleFootprintGradClampNV); + + if (spvVersion.spv == 0 && IncludeLegacy(version, profile, spvVersion)) + symbolTable.relateToOperator("ftransform", EOpFtransform); + + if (spvVersion.spv == 0 && (IncludeLegacy(version, profile, spvVersion) || + (profile == EEsProfile && version == 100))) { + + symbolTable.relateToOperator("texture1D", EOpTexture); + symbolTable.relateToOperator("texture1DGradARB", EOpTextureGrad); + symbolTable.relateToOperator("texture1DProj", EOpTextureProj); + symbolTable.relateToOperator("texture1DProjGradARB", EOpTextureProjGrad); + symbolTable.relateToOperator("texture1DLod", EOpTextureLod); + symbolTable.relateToOperator("texture1DProjLod", EOpTextureProjLod); + + symbolTable.relateToOperator("texture2DRect", EOpTexture); + symbolTable.relateToOperator("texture2DRectProj", EOpTextureProj); + symbolTable.relateToOperator("texture2DRectGradARB", EOpTextureGrad); + symbolTable.relateToOperator("texture2DRectProjGradARB", EOpTextureProjGrad); + symbolTable.relateToOperator("shadow2DRect", EOpTexture); + symbolTable.relateToOperator("shadow2DRectProj", EOpTextureProj); + symbolTable.relateToOperator("shadow2DRectGradARB", EOpTextureGrad); + symbolTable.relateToOperator("shadow2DRectProjGradARB", EOpTextureProjGrad); + + symbolTable.relateToOperator("texture2D", EOpTexture); + symbolTable.relateToOperator("texture2DProj", EOpTextureProj); + symbolTable.relateToOperator("texture2DGradEXT", EOpTextureGrad); + symbolTable.relateToOperator("texture2DGradARB", EOpTextureGrad); + symbolTable.relateToOperator("texture2DProjGradEXT", EOpTextureProjGrad); + symbolTable.relateToOperator("texture2DProjGradARB", EOpTextureProjGrad); + symbolTable.relateToOperator("texture2DLod", EOpTextureLod); + symbolTable.relateToOperator("texture2DLodEXT", EOpTextureLod); + symbolTable.relateToOperator("texture2DProjLod", EOpTextureProjLod); + symbolTable.relateToOperator("texture2DProjLodEXT", EOpTextureProjLod); + + symbolTable.relateToOperator("texture3D", EOpTexture); + symbolTable.relateToOperator("texture3DGradARB", EOpTextureGrad); + symbolTable.relateToOperator("texture3DProj", EOpTextureProj); + symbolTable.relateToOperator("texture3DProjGradARB", EOpTextureProjGrad); + symbolTable.relateToOperator("texture3DLod", EOpTextureLod); + symbolTable.relateToOperator("texture3DProjLod", EOpTextureProjLod); + symbolTable.relateToOperator("textureCube", EOpTexture); + symbolTable.relateToOperator("textureCubeGradEXT", EOpTextureGrad); + symbolTable.relateToOperator("textureCubeGradARB", EOpTextureGrad); + symbolTable.relateToOperator("textureCubeLod", EOpTextureLod); + symbolTable.relateToOperator("textureCubeLodEXT", EOpTextureLod); + symbolTable.relateToOperator("shadow1D", EOpTexture); + symbolTable.relateToOperator("shadow1DGradARB", EOpTextureGrad); + symbolTable.relateToOperator("shadow2D", EOpTexture); + symbolTable.relateToOperator("shadow2DGradARB", EOpTextureGrad); + symbolTable.relateToOperator("shadow1DProj", EOpTextureProj); + symbolTable.relateToOperator("shadow2DProj", EOpTextureProj); + symbolTable.relateToOperator("shadow1DProjGradARB", EOpTextureProjGrad); + symbolTable.relateToOperator("shadow2DProjGradARB", EOpTextureProjGrad); + symbolTable.relateToOperator("shadow1DLod", EOpTextureLod); + symbolTable.relateToOperator("shadow2DLod", EOpTextureLod); + symbolTable.relateToOperator("shadow1DProjLod", EOpTextureProjLod); + symbolTable.relateToOperator("shadow2DProjLod", EOpTextureProjLod); + } + + if (profile != EEsProfile) { + symbolTable.relateToOperator("sparseTextureARB", EOpSparseTexture); + symbolTable.relateToOperator("sparseTextureLodARB", EOpSparseTextureLod); + symbolTable.relateToOperator("sparseTextureOffsetARB", EOpSparseTextureOffset); + symbolTable.relateToOperator("sparseTexelFetchARB", EOpSparseTextureFetch); + symbolTable.relateToOperator("sparseTexelFetchOffsetARB", EOpSparseTextureFetchOffset); + symbolTable.relateToOperator("sparseTextureLodOffsetARB", EOpSparseTextureLodOffset); + symbolTable.relateToOperator("sparseTextureGradARB", EOpSparseTextureGrad); + symbolTable.relateToOperator("sparseTextureGradOffsetARB", EOpSparseTextureGradOffset); + symbolTable.relateToOperator("sparseTextureGatherARB", EOpSparseTextureGather); + symbolTable.relateToOperator("sparseTextureGatherOffsetARB", EOpSparseTextureGatherOffset); + symbolTable.relateToOperator("sparseTextureGatherOffsetsARB", EOpSparseTextureGatherOffsets); + symbolTable.relateToOperator("sparseImageLoadARB", EOpSparseImageLoad); + symbolTable.relateToOperator("sparseTexelsResidentARB", EOpSparseTexelsResident); + + symbolTable.relateToOperator("sparseTextureClampARB", EOpSparseTextureClamp); + symbolTable.relateToOperator("sparseTextureOffsetClampARB", EOpSparseTextureOffsetClamp); + symbolTable.relateToOperator("sparseTextureGradClampARB", EOpSparseTextureGradClamp); + symbolTable.relateToOperator("sparseTextureGradOffsetClampARB", EOpSparseTextureGradOffsetClamp); + symbolTable.relateToOperator("textureClampARB", EOpTextureClamp); + symbolTable.relateToOperator("textureOffsetClampARB", EOpTextureOffsetClamp); + symbolTable.relateToOperator("textureGradClampARB", EOpTextureGradClamp); + symbolTable.relateToOperator("textureGradOffsetClampARB", EOpTextureGradOffsetClamp); + + symbolTable.relateToOperator("ballotARB", EOpBallot); + symbolTable.relateToOperator("readInvocationARB", EOpReadInvocation); + symbolTable.relateToOperator("readFirstInvocationARB", EOpReadFirstInvocation); + + if (version >= 430) { + symbolTable.relateToOperator("anyInvocationARB", EOpAnyInvocation); + symbolTable.relateToOperator("allInvocationsARB", EOpAllInvocations); + symbolTable.relateToOperator("allInvocationsEqualARB", EOpAllInvocationsEqual); + } + if (version >= 460) { + symbolTable.relateToOperator("anyInvocation", EOpAnyInvocation); + symbolTable.relateToOperator("allInvocations", EOpAllInvocations); + symbolTable.relateToOperator("allInvocationsEqual", EOpAllInvocationsEqual); + } + symbolTable.relateToOperator("minInvocationsAMD", EOpMinInvocations); + symbolTable.relateToOperator("maxInvocationsAMD", EOpMaxInvocations); + symbolTable.relateToOperator("addInvocationsAMD", EOpAddInvocations); + symbolTable.relateToOperator("minInvocationsNonUniformAMD", EOpMinInvocationsNonUniform); + symbolTable.relateToOperator("maxInvocationsNonUniformAMD", EOpMaxInvocationsNonUniform); + symbolTable.relateToOperator("addInvocationsNonUniformAMD", EOpAddInvocationsNonUniform); + symbolTable.relateToOperator("minInvocationsInclusiveScanAMD", EOpMinInvocationsInclusiveScan); + symbolTable.relateToOperator("maxInvocationsInclusiveScanAMD", EOpMaxInvocationsInclusiveScan); + symbolTable.relateToOperator("addInvocationsInclusiveScanAMD", EOpAddInvocationsInclusiveScan); + symbolTable.relateToOperator("minInvocationsInclusiveScanNonUniformAMD", EOpMinInvocationsInclusiveScanNonUniform); + symbolTable.relateToOperator("maxInvocationsInclusiveScanNonUniformAMD", EOpMaxInvocationsInclusiveScanNonUniform); + symbolTable.relateToOperator("addInvocationsInclusiveScanNonUniformAMD", EOpAddInvocationsInclusiveScanNonUniform); + symbolTable.relateToOperator("minInvocationsExclusiveScanAMD", EOpMinInvocationsExclusiveScan); + symbolTable.relateToOperator("maxInvocationsExclusiveScanAMD", EOpMaxInvocationsExclusiveScan); + symbolTable.relateToOperator("addInvocationsExclusiveScanAMD", EOpAddInvocationsExclusiveScan); + symbolTable.relateToOperator("minInvocationsExclusiveScanNonUniformAMD", EOpMinInvocationsExclusiveScanNonUniform); + symbolTable.relateToOperator("maxInvocationsExclusiveScanNonUniformAMD", EOpMaxInvocationsExclusiveScanNonUniform); + symbolTable.relateToOperator("addInvocationsExclusiveScanNonUniformAMD", EOpAddInvocationsExclusiveScanNonUniform); + symbolTable.relateToOperator("swizzleInvocationsAMD", EOpSwizzleInvocations); + symbolTable.relateToOperator("swizzleInvocationsMaskedAMD", EOpSwizzleInvocationsMasked); + symbolTable.relateToOperator("writeInvocationAMD", EOpWriteInvocation); + symbolTable.relateToOperator("mbcntAMD", EOpMbcnt); + + symbolTable.relateToOperator("min3", EOpMin3); + symbolTable.relateToOperator("max3", EOpMax3); + symbolTable.relateToOperator("mid3", EOpMid3); + + symbolTable.relateToOperator("cubeFaceIndexAMD", EOpCubeFaceIndex); + symbolTable.relateToOperator("cubeFaceCoordAMD", EOpCubeFaceCoord); + symbolTable.relateToOperator("timeAMD", EOpTime); + + symbolTable.relateToOperator("textureGatherLodAMD", EOpTextureGatherLod); + symbolTable.relateToOperator("textureGatherLodOffsetAMD", EOpTextureGatherLodOffset); + symbolTable.relateToOperator("textureGatherLodOffsetsAMD", EOpTextureGatherLodOffsets); + symbolTable.relateToOperator("sparseTextureGatherLodAMD", EOpSparseTextureGatherLod); + symbolTable.relateToOperator("sparseTextureGatherLodOffsetAMD", EOpSparseTextureGatherLodOffset); + symbolTable.relateToOperator("sparseTextureGatherLodOffsetsAMD", EOpSparseTextureGatherLodOffsets); + + symbolTable.relateToOperator("imageLoadLodAMD", EOpImageLoadLod); + symbolTable.relateToOperator("imageStoreLodAMD", EOpImageStoreLod); + symbolTable.relateToOperator("sparseImageLoadLodAMD", EOpSparseImageLoadLod); + + symbolTable.relateToOperator("fragmentMaskFetchAMD", EOpFragmentMaskFetch); + symbolTable.relateToOperator("fragmentFetchAMD", EOpFragmentFetch); + } + + // GL_KHR_shader_subgroup + if ((profile == EEsProfile && version >= 310) || + (profile != EEsProfile && version >= 140)) { + symbolTable.relateToOperator("subgroupBarrier", EOpSubgroupBarrier); + symbolTable.relateToOperator("subgroupMemoryBarrier", EOpSubgroupMemoryBarrier); + symbolTable.relateToOperator("subgroupMemoryBarrierBuffer", EOpSubgroupMemoryBarrierBuffer); + symbolTable.relateToOperator("subgroupMemoryBarrierImage", EOpSubgroupMemoryBarrierImage); + symbolTable.relateToOperator("subgroupElect", EOpSubgroupElect); + symbolTable.relateToOperator("subgroupAll", EOpSubgroupAll); + symbolTable.relateToOperator("subgroupAny", EOpSubgroupAny); + symbolTable.relateToOperator("subgroupAllEqual", EOpSubgroupAllEqual); + symbolTable.relateToOperator("subgroupBroadcast", EOpSubgroupBroadcast); + symbolTable.relateToOperator("subgroupBroadcastFirst", EOpSubgroupBroadcastFirst); + symbolTable.relateToOperator("subgroupBallot", EOpSubgroupBallot); + symbolTable.relateToOperator("subgroupInverseBallot", EOpSubgroupInverseBallot); + symbolTable.relateToOperator("subgroupBallotBitExtract", EOpSubgroupBallotBitExtract); + symbolTable.relateToOperator("subgroupBallotBitCount", EOpSubgroupBallotBitCount); + symbolTable.relateToOperator("subgroupBallotInclusiveBitCount", EOpSubgroupBallotInclusiveBitCount); + symbolTable.relateToOperator("subgroupBallotExclusiveBitCount", EOpSubgroupBallotExclusiveBitCount); + symbolTable.relateToOperator("subgroupBallotFindLSB", EOpSubgroupBallotFindLSB); + symbolTable.relateToOperator("subgroupBallotFindMSB", EOpSubgroupBallotFindMSB); + symbolTable.relateToOperator("subgroupShuffle", EOpSubgroupShuffle); + symbolTable.relateToOperator("subgroupShuffleXor", EOpSubgroupShuffleXor); + symbolTable.relateToOperator("subgroupShuffleUp", EOpSubgroupShuffleUp); + symbolTable.relateToOperator("subgroupShuffleDown", EOpSubgroupShuffleDown); + symbolTable.relateToOperator("subgroupAdd", EOpSubgroupAdd); + symbolTable.relateToOperator("subgroupMul", EOpSubgroupMul); + symbolTable.relateToOperator("subgroupMin", EOpSubgroupMin); + symbolTable.relateToOperator("subgroupMax", EOpSubgroupMax); + symbolTable.relateToOperator("subgroupAnd", EOpSubgroupAnd); + symbolTable.relateToOperator("subgroupOr", EOpSubgroupOr); + symbolTable.relateToOperator("subgroupXor", EOpSubgroupXor); + symbolTable.relateToOperator("subgroupInclusiveAdd", EOpSubgroupInclusiveAdd); + symbolTable.relateToOperator("subgroupInclusiveMul", EOpSubgroupInclusiveMul); + symbolTable.relateToOperator("subgroupInclusiveMin", EOpSubgroupInclusiveMin); + symbolTable.relateToOperator("subgroupInclusiveMax", EOpSubgroupInclusiveMax); + symbolTable.relateToOperator("subgroupInclusiveAnd", EOpSubgroupInclusiveAnd); + symbolTable.relateToOperator("subgroupInclusiveOr", EOpSubgroupInclusiveOr); + symbolTable.relateToOperator("subgroupInclusiveXor", EOpSubgroupInclusiveXor); + symbolTable.relateToOperator("subgroupExclusiveAdd", EOpSubgroupExclusiveAdd); + symbolTable.relateToOperator("subgroupExclusiveMul", EOpSubgroupExclusiveMul); + symbolTable.relateToOperator("subgroupExclusiveMin", EOpSubgroupExclusiveMin); + symbolTable.relateToOperator("subgroupExclusiveMax", EOpSubgroupExclusiveMax); + symbolTable.relateToOperator("subgroupExclusiveAnd", EOpSubgroupExclusiveAnd); + symbolTable.relateToOperator("subgroupExclusiveOr", EOpSubgroupExclusiveOr); + symbolTable.relateToOperator("subgroupExclusiveXor", EOpSubgroupExclusiveXor); + symbolTable.relateToOperator("subgroupClusteredAdd", EOpSubgroupClusteredAdd); + symbolTable.relateToOperator("subgroupClusteredMul", EOpSubgroupClusteredMul); + symbolTable.relateToOperator("subgroupClusteredMin", EOpSubgroupClusteredMin); + symbolTable.relateToOperator("subgroupClusteredMax", EOpSubgroupClusteredMax); + symbolTable.relateToOperator("subgroupClusteredAnd", EOpSubgroupClusteredAnd); + symbolTable.relateToOperator("subgroupClusteredOr", EOpSubgroupClusteredOr); + symbolTable.relateToOperator("subgroupClusteredXor", EOpSubgroupClusteredXor); + symbolTable.relateToOperator("subgroupQuadBroadcast", EOpSubgroupQuadBroadcast); + symbolTable.relateToOperator("subgroupQuadSwapHorizontal", EOpSubgroupQuadSwapHorizontal); + symbolTable.relateToOperator("subgroupQuadSwapVertical", EOpSubgroupQuadSwapVertical); + symbolTable.relateToOperator("subgroupQuadSwapDiagonal", EOpSubgroupQuadSwapDiagonal); + + symbolTable.relateToOperator("subgroupPartitionNV", EOpSubgroupPartition); + symbolTable.relateToOperator("subgroupPartitionedAddNV", EOpSubgroupPartitionedAdd); + symbolTable.relateToOperator("subgroupPartitionedMulNV", EOpSubgroupPartitionedMul); + symbolTable.relateToOperator("subgroupPartitionedMinNV", EOpSubgroupPartitionedMin); + symbolTable.relateToOperator("subgroupPartitionedMaxNV", EOpSubgroupPartitionedMax); + symbolTable.relateToOperator("subgroupPartitionedAndNV", EOpSubgroupPartitionedAnd); + symbolTable.relateToOperator("subgroupPartitionedOrNV", EOpSubgroupPartitionedOr); + symbolTable.relateToOperator("subgroupPartitionedXorNV", EOpSubgroupPartitionedXor); + symbolTable.relateToOperator("subgroupPartitionedInclusiveAddNV", EOpSubgroupPartitionedInclusiveAdd); + symbolTable.relateToOperator("subgroupPartitionedInclusiveMulNV", EOpSubgroupPartitionedInclusiveMul); + symbolTable.relateToOperator("subgroupPartitionedInclusiveMinNV", EOpSubgroupPartitionedInclusiveMin); + symbolTable.relateToOperator("subgroupPartitionedInclusiveMaxNV", EOpSubgroupPartitionedInclusiveMax); + symbolTable.relateToOperator("subgroupPartitionedInclusiveAndNV", EOpSubgroupPartitionedInclusiveAnd); + symbolTable.relateToOperator("subgroupPartitionedInclusiveOrNV", EOpSubgroupPartitionedInclusiveOr); + symbolTable.relateToOperator("subgroupPartitionedInclusiveXorNV", EOpSubgroupPartitionedInclusiveXor); + symbolTable.relateToOperator("subgroupPartitionedExclusiveAddNV", EOpSubgroupPartitionedExclusiveAdd); + symbolTable.relateToOperator("subgroupPartitionedExclusiveMulNV", EOpSubgroupPartitionedExclusiveMul); + symbolTable.relateToOperator("subgroupPartitionedExclusiveMinNV", EOpSubgroupPartitionedExclusiveMin); + symbolTable.relateToOperator("subgroupPartitionedExclusiveMaxNV", EOpSubgroupPartitionedExclusiveMax); + symbolTable.relateToOperator("subgroupPartitionedExclusiveAndNV", EOpSubgroupPartitionedExclusiveAnd); + symbolTable.relateToOperator("subgroupPartitionedExclusiveOrNV", EOpSubgroupPartitionedExclusiveOr); + symbolTable.relateToOperator("subgroupPartitionedExclusiveXorNV", EOpSubgroupPartitionedExclusiveXor); + } + + if (profile == EEsProfile) { + symbolTable.relateToOperator("shadow2DEXT", EOpTexture); + symbolTable.relateToOperator("shadow2DProjEXT", EOpTextureProj); + } + } + + switch(language) { + case EShLangVertex: + break; + + case EShLangTessControl: + case EShLangTessEvaluation: + break; + + case EShLangGeometry: + symbolTable.relateToOperator("EmitStreamVertex", EOpEmitStreamVertex); + symbolTable.relateToOperator("EndStreamPrimitive", EOpEndStreamPrimitive); + symbolTable.relateToOperator("EmitVertex", EOpEmitVertex); + symbolTable.relateToOperator("EndPrimitive", EOpEndPrimitive); + break; + + case EShLangFragment: + if (profile != EEsProfile && version >= 400) { + symbolTable.relateToOperator("dFdxFine", EOpDPdxFine); + symbolTable.relateToOperator("dFdyFine", EOpDPdyFine); + symbolTable.relateToOperator("fwidthFine", EOpFwidthFine); + symbolTable.relateToOperator("dFdxCoarse", EOpDPdxCoarse); + symbolTable.relateToOperator("dFdyCoarse", EOpDPdyCoarse); + symbolTable.relateToOperator("fwidthCoarse", EOpFwidthCoarse); + } + + if (profile != EEsProfile && version >= 460) { + symbolTable.relateToOperator("rayQueryInitializeEXT", EOpRayQueryInitialize); + symbolTable.relateToOperator("rayQueryTerminateEXT", EOpRayQueryTerminate); + symbolTable.relateToOperator("rayQueryGenerateIntersectionEXT", EOpRayQueryGenerateIntersection); + symbolTable.relateToOperator("rayQueryConfirmIntersectionEXT", EOpRayQueryConfirmIntersection); + symbolTable.relateToOperator("rayQueryProceedEXT", EOpRayQueryProceed); + symbolTable.relateToOperator("rayQueryGetIntersectionTypeEXT", EOpRayQueryGetIntersectionType); + symbolTable.relateToOperator("rayQueryGetRayTMinEXT", EOpRayQueryGetRayTMin); + symbolTable.relateToOperator("rayQueryGetRayFlagsEXT", EOpRayQueryGetRayFlags); + symbolTable.relateToOperator("rayQueryGetIntersectionTEXT", EOpRayQueryGetIntersectionT); + symbolTable.relateToOperator("rayQueryGetIntersectionInstanceCustomIndexEXT", EOpRayQueryGetIntersectionInstanceCustomIndex); + symbolTable.relateToOperator("rayQueryGetIntersectionInstanceIdEXT", EOpRayQueryGetIntersectionInstanceId); + symbolTable.relateToOperator("rayQueryGetIntersectionInstanceShaderBindingTableRecordOffsetEXT", EOpRayQueryGetIntersectionInstanceShaderBindingTableRecordOffset); + symbolTable.relateToOperator("rayQueryGetIntersectionGeometryIndexEXT", EOpRayQueryGetIntersectionGeometryIndex); + symbolTable.relateToOperator("rayQueryGetIntersectionPrimitiveIndexEXT", EOpRayQueryGetIntersectionPrimitiveIndex); + symbolTable.relateToOperator("rayQueryGetIntersectionBarycentricsEXT", EOpRayQueryGetIntersectionBarycentrics); + symbolTable.relateToOperator("rayQueryGetIntersectionFrontFaceEXT", EOpRayQueryGetIntersectionFrontFace); + symbolTable.relateToOperator("rayQueryGetIntersectionCandidateAABBOpaqueEXT", EOpRayQueryGetIntersectionCandidateAABBOpaque); + symbolTable.relateToOperator("rayQueryGetIntersectionObjectRayDirectionEXT", EOpRayQueryGetIntersectionObjectRayDirection); + symbolTable.relateToOperator("rayQueryGetIntersectionObjectRayOriginEXT", EOpRayQueryGetIntersectionObjectRayOrigin); + symbolTable.relateToOperator("rayQueryGetWorldRayDirectionEXT", EOpRayQueryGetWorldRayDirection); + symbolTable.relateToOperator("rayQueryGetWorldRayOriginEXT", EOpRayQueryGetWorldRayOrigin); + symbolTable.relateToOperator("rayQueryGetIntersectionObjectToWorldEXT", EOpRayQueryGetIntersectionObjectToWorld); + symbolTable.relateToOperator("rayQueryGetIntersectionWorldToObjectEXT", EOpRayQueryGetIntersectionWorldToObject); + } + + symbolTable.relateToOperator("interpolateAtCentroid", EOpInterpolateAtCentroid); + symbolTable.relateToOperator("interpolateAtSample", EOpInterpolateAtSample); + symbolTable.relateToOperator("interpolateAtOffset", EOpInterpolateAtOffset); + + if (profile != EEsProfile) + symbolTable.relateToOperator("interpolateAtVertexAMD", EOpInterpolateAtVertex); + + symbolTable.relateToOperator("beginInvocationInterlockARB", EOpBeginInvocationInterlock); + symbolTable.relateToOperator("endInvocationInterlockARB", EOpEndInvocationInterlock); + + break; + + case EShLangCompute: + symbolTable.relateToOperator("subgroupMemoryBarrierShared", EOpSubgroupMemoryBarrierShared); + if ((profile != EEsProfile && version >= 450) || + (profile == EEsProfile && version >= 320)) { + symbolTable.relateToOperator("dFdx", EOpDPdx); + symbolTable.relateToOperator("dFdy", EOpDPdy); + symbolTable.relateToOperator("fwidth", EOpFwidth); + symbolTable.relateToOperator("dFdxFine", EOpDPdxFine); + symbolTable.relateToOperator("dFdyFine", EOpDPdyFine); + symbolTable.relateToOperator("fwidthFine", EOpFwidthFine); + symbolTable.relateToOperator("dFdxCoarse", EOpDPdxCoarse); + symbolTable.relateToOperator("dFdyCoarse", EOpDPdyCoarse); + symbolTable.relateToOperator("fwidthCoarse",EOpFwidthCoarse); + } + symbolTable.relateToOperator("coopMatLoadNV", EOpCooperativeMatrixLoad); + symbolTable.relateToOperator("coopMatStoreNV", EOpCooperativeMatrixStore); + symbolTable.relateToOperator("coopMatMulAddNV", EOpCooperativeMatrixMulAdd); + break; + + case EShLangRayGen: + case EShLangClosestHit: + case EShLangMiss: + if (profile != EEsProfile && version >= 460) { + symbolTable.relateToOperator("traceNV", EOpTrace); + symbolTable.relateToOperator("traceRayEXT", EOpTrace); + symbolTable.relateToOperator("executeCallableNV", EOpExecuteCallable); + symbolTable.relateToOperator("executeCallableEXT", EOpExecuteCallable); + } + break; + case EShLangIntersect: + if (profile != EEsProfile && version >= 460) { + symbolTable.relateToOperator("reportIntersectionNV", EOpReportIntersection); + symbolTable.relateToOperator("reportIntersectionEXT", EOpReportIntersection); + } + break; + case EShLangAnyHit: + if (profile != EEsProfile && version >= 460) { + symbolTable.relateToOperator("ignoreIntersectionNV", EOpIgnoreIntersection); + symbolTable.relateToOperator("ignoreIntersectionEXT", EOpIgnoreIntersection); + symbolTable.relateToOperator("terminateRayNV", EOpTerminateRay); + symbolTable.relateToOperator("terminateRayEXT", EOpTerminateRay); + } + break; + case EShLangCallable: + if (profile != EEsProfile && version >= 460) { + symbolTable.relateToOperator("executeCallableNV", EOpExecuteCallable); + symbolTable.relateToOperator("executeCallableEXT", EOpExecuteCallable); + } + break; + case EShLangMeshNV: + if ((profile != EEsProfile && version >= 450) || (profile == EEsProfile && version >= 320)) { + symbolTable.relateToOperator("writePackedPrimitiveIndices4x8NV", EOpWritePackedPrimitiveIndices4x8NV); + } + // fall through + case EShLangTaskNV: + if ((profile != EEsProfile && version >= 450) || (profile == EEsProfile && version >= 320)) { + symbolTable.relateToOperator("memoryBarrierShared", EOpMemoryBarrierShared); + symbolTable.relateToOperator("groupMemoryBarrier", EOpGroupMemoryBarrier); + symbolTable.relateToOperator("subgroupMemoryBarrierShared", EOpSubgroupMemoryBarrierShared); + } + break; + + default: + assert(false && "Language not supported"); + } +#endif +} + +// +// Add context-dependent (resource-specific) built-ins not handled by the above. These +// would be ones that need to be programmatically added because they cannot +// be added by simple text strings. For these, also +// 1) Map built-in functions to operators, for those that will turn into an operation node +// instead of remaining a function call. +// 2) Tag extension-related symbols added to their base version with their extensions, so +// that if an early version has the extension turned off, there is an error reported on use. +// +void TBuiltIns::identifyBuiltIns(int version, EProfile profile, const SpvVersion& spvVersion, EShLanguage language, TSymbolTable& symbolTable, const TBuiltInResource &resources) +{ +#ifndef GLSLANG_WEB + if (profile != EEsProfile && version >= 430 && version < 440) { + symbolTable.setVariableExtensions("gl_MaxTransformFeedbackBuffers", 1, &E_GL_ARB_enhanced_layouts); + symbolTable.setVariableExtensions("gl_MaxTransformFeedbackInterleavedComponents", 1, &E_GL_ARB_enhanced_layouts); + } + if (profile != EEsProfile && version >= 130 && version < 420) { + symbolTable.setVariableExtensions("gl_MinProgramTexelOffset", 1, &E_GL_ARB_shading_language_420pack); + symbolTable.setVariableExtensions("gl_MaxProgramTexelOffset", 1, &E_GL_ARB_shading_language_420pack); + } + if (profile != EEsProfile && version >= 150 && version < 410) + symbolTable.setVariableExtensions("gl_MaxViewports", 1, &E_GL_ARB_viewport_array); + + switch(language) { + case EShLangFragment: + // Set up gl_FragData based on current array size. + if (version == 100 || IncludeLegacy(version, profile, spvVersion) || (! ForwardCompatibility && profile != EEsProfile && version < 420)) { + TPrecisionQualifier pq = profile == EEsProfile ? EpqMedium : EpqNone; + TType fragData(EbtFloat, EvqFragColor, pq, 4); + TArraySizes* arraySizes = new TArraySizes; + arraySizes->addInnerSize(resources.maxDrawBuffers); + fragData.transferArraySizes(arraySizes); + symbolTable.insert(*new TVariable(NewPoolTString("gl_FragData"), fragData)); + SpecialQualifier("gl_FragData", EvqFragColor, EbvFragData, symbolTable); + } + + // GL_EXT_blend_func_extended + if (profile == EEsProfile && version >= 100) { + symbolTable.setVariableExtensions("gl_MaxDualSourceDrawBuffersEXT", 1, &E_GL_EXT_blend_func_extended); + symbolTable.setVariableExtensions("gl_SecondaryFragColorEXT", 1, &E_GL_EXT_blend_func_extended); + symbolTable.setVariableExtensions("gl_SecondaryFragDataEXT", 1, &E_GL_EXT_blend_func_extended); + SpecialQualifier("gl_SecondaryFragColorEXT", EvqVaryingOut, EbvSecondaryFragColorEXT, symbolTable); + SpecialQualifier("gl_SecondaryFragDataEXT", EvqVaryingOut, EbvSecondaryFragDataEXT, symbolTable); + } + + break; + + case EShLangTessControl: + case EShLangTessEvaluation: + // Because of the context-dependent array size (gl_MaxPatchVertices), + // these variables were added later than the others and need to be mapped now. + + // standard members + BuiltInVariable("gl_in", "gl_Position", EbvPosition, symbolTable); + BuiltInVariable("gl_in", "gl_PointSize", EbvPointSize, symbolTable); + BuiltInVariable("gl_in", "gl_ClipDistance", EbvClipDistance, symbolTable); + BuiltInVariable("gl_in", "gl_CullDistance", EbvCullDistance, symbolTable); + + // compatibility members + BuiltInVariable("gl_in", "gl_ClipVertex", EbvClipVertex, symbolTable); + BuiltInVariable("gl_in", "gl_FrontColor", EbvFrontColor, symbolTable); + BuiltInVariable("gl_in", "gl_BackColor", EbvBackColor, symbolTable); + BuiltInVariable("gl_in", "gl_FrontSecondaryColor", EbvFrontSecondaryColor, symbolTable); + BuiltInVariable("gl_in", "gl_BackSecondaryColor", EbvBackSecondaryColor, symbolTable); + BuiltInVariable("gl_in", "gl_TexCoord", EbvTexCoord, symbolTable); + BuiltInVariable("gl_in", "gl_FogFragCoord", EbvFogFragCoord, symbolTable); + + symbolTable.setVariableExtensions("gl_in", "gl_SecondaryPositionNV", 1, &E_GL_NV_stereo_view_rendering); + symbolTable.setVariableExtensions("gl_in", "gl_PositionPerViewNV", 1, &E_GL_NVX_multiview_per_view_attributes); + + BuiltInVariable("gl_in", "gl_SecondaryPositionNV", EbvSecondaryPositionNV, symbolTable); + BuiltInVariable("gl_in", "gl_PositionPerViewNV", EbvPositionPerViewNV, symbolTable); + + // extension requirements + if (profile == EEsProfile) { + symbolTable.setVariableExtensions("gl_in", "gl_PointSize", Num_AEP_tessellation_point_size, AEP_tessellation_point_size); + } + + break; + + default: + break; + } +#endif +} + +} // end namespace glslang diff --git a/dep/glslang/glslang/MachineIndependent/Initialize.h b/dep/glslang/glslang/MachineIndependent/Initialize.h new file mode 100644 index 000000000..ac8ec33e9 --- /dev/null +++ b/dep/glslang/glslang/MachineIndependent/Initialize.h @@ -0,0 +1,112 @@ +// +// Copyright (C) 2002-2005 3Dlabs Inc. Ltd. +// Copyright (C) 2013-2016 LunarG, Inc. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// +// Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// +// Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// Neither the name of 3Dlabs Inc. Ltd. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// + +#ifndef _INITIALIZE_INCLUDED_ +#define _INITIALIZE_INCLUDED_ + +#include "../Include/ResourceLimits.h" +#include "../Include/Common.h" +#include "../Include/ShHandle.h" +#include "SymbolTable.h" +#include "Versions.h" + +namespace glslang { + +// +// This is made to hold parseable strings for almost all the built-in +// functions and variables for one specific combination of version +// and profile. (Some still need to be added programmatically.) +// This is a base class for language-specific derivations, which +// can be used for language independent builtins. +// +// The strings are organized by +// commonBuiltins: intersection of all stages' built-ins, processed just once +// stageBuiltins[]: anything a stage needs that's not in commonBuiltins +// +class TBuiltInParseables { +public: + POOL_ALLOCATOR_NEW_DELETE(GetThreadPoolAllocator()) + TBuiltInParseables(); + virtual ~TBuiltInParseables(); + virtual void initialize(int version, EProfile, const SpvVersion& spvVersion) = 0; + virtual void initialize(const TBuiltInResource& resources, int version, EProfile, const SpvVersion& spvVersion, EShLanguage) = 0; + virtual const TString& getCommonString() const { return commonBuiltins; } + virtual const TString& getStageString(EShLanguage language) const { return stageBuiltins[language]; } + + virtual void identifyBuiltIns(int version, EProfile profile, const SpvVersion& spvVersion, EShLanguage language, TSymbolTable& symbolTable) = 0; + virtual void identifyBuiltIns(int version, EProfile profile, const SpvVersion& spvVersion, EShLanguage language, TSymbolTable& symbolTable, const TBuiltInResource &resources) = 0; + +protected: + TString commonBuiltins; + TString stageBuiltins[EShLangCount]; +}; + +// +// This is a GLSL specific derivation of TBuiltInParseables. To present a stable +// interface and match other similar code, it is called TBuiltIns, rather +// than TBuiltInParseablesGlsl. +// +class TBuiltIns : public TBuiltInParseables { +public: + POOL_ALLOCATOR_NEW_DELETE(GetThreadPoolAllocator()) + TBuiltIns(); + virtual ~TBuiltIns(); + void initialize(int version, EProfile, const SpvVersion& spvVersion); + void initialize(const TBuiltInResource& resources, int version, EProfile, const SpvVersion& spvVersion, EShLanguage); + + void identifyBuiltIns(int version, EProfile profile, const SpvVersion& spvVersion, EShLanguage language, TSymbolTable& symbolTable); + void identifyBuiltIns(int version, EProfile profile, const SpvVersion& spvVersion, EShLanguage language, TSymbolTable& symbolTable, const TBuiltInResource &resources); + +protected: + void addTabledBuiltins(int version, EProfile profile, const SpvVersion& spvVersion); + void relateTabledBuiltins(int version, EProfile profile, const SpvVersion& spvVersion, EShLanguage, TSymbolTable&); + void add2ndGenerationSamplingImaging(int version, EProfile profile, const SpvVersion& spvVersion); + void addSubpassSampling(TSampler, const TString& typeName, int version, EProfile profile); + void addQueryFunctions(TSampler, const TString& typeName, int version, EProfile profile); + void addImageFunctions(TSampler, const TString& typeName, int version, EProfile profile); + void addSamplingFunctions(TSampler, const TString& typeName, int version, EProfile profile); + void addGatherFunctions(TSampler, const TString& typeName, int version, EProfile profile); + + // Helpers for making textual representations of the permutations + // of texturing/imaging functions. + const char* postfixes[5]; + const char* prefixes[EbtNumTypes]; + int dimMap[EsdNumDims]; +}; + +} // end namespace glslang + +#endif // _INITIALIZE_INCLUDED_ diff --git a/dep/glslang/glslang/MachineIndependent/IntermTraverse.cpp b/dep/glslang/glslang/MachineIndependent/IntermTraverse.cpp new file mode 100644 index 000000000..f46010b71 --- /dev/null +++ b/dep/glslang/glslang/MachineIndependent/IntermTraverse.cpp @@ -0,0 +1,302 @@ +// +// Copyright (C) 2002-2005 3Dlabs Inc. Ltd. +// Copyright (C) 2013 LunarG, Inc. +// Copyright (c) 2002-2010 The ANGLE Project Authors. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// +// Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// +// Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// Neither the name of 3Dlabs Inc. Ltd. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// + +#include "../Include/intermediate.h" + +namespace glslang { + +// +// Traverse the intermediate representation tree, and +// call a node type specific function for each node. +// Done recursively through the member function Traverse(). +// Node types can be skipped if their function to call is 0, +// but their subtree will still be traversed. +// Nodes with children can have their whole subtree skipped +// if preVisit is turned on and the type specific function +// returns false. +// +// preVisit, postVisit, and rightToLeft control what order +// nodes are visited in. +// + +// +// Traversal functions for terminals are straightforward.... +// +void TIntermMethod::traverse(TIntermTraverser*) +{ + // Tree should always resolve all methods as a non-method. +} + +void TIntermSymbol::traverse(TIntermTraverser *it) +{ + it->visitSymbol(this); +} + +void TIntermConstantUnion::traverse(TIntermTraverser *it) +{ + it->visitConstantUnion(this); +} + +// +// Traverse a binary node. +// +void TIntermBinary::traverse(TIntermTraverser *it) +{ + bool visit = true; + + // + // visit the node before children if pre-visiting. + // + if (it->preVisit) + visit = it->visitBinary(EvPreVisit, this); + + // + // Visit the children, in the right order. + // + if (visit) { + it->incrementDepth(this); + + if (it->rightToLeft) { + if (right) + right->traverse(it); + + if (it->inVisit) + visit = it->visitBinary(EvInVisit, this); + + if (visit && left) + left->traverse(it); + } else { + if (left) + left->traverse(it); + + if (it->inVisit) + visit = it->visitBinary(EvInVisit, this); + + if (visit && right) + right->traverse(it); + } + + it->decrementDepth(); + } + + // + // Visit the node after the children, if requested and the traversal + // hasn't been canceled yet. + // + if (visit && it->postVisit) + it->visitBinary(EvPostVisit, this); +} + +// +// Traverse a unary node. Same comments in binary node apply here. +// +void TIntermUnary::traverse(TIntermTraverser *it) +{ + bool visit = true; + + if (it->preVisit) + visit = it->visitUnary(EvPreVisit, this); + + if (visit) { + it->incrementDepth(this); + operand->traverse(it); + it->decrementDepth(); + } + + if (visit && it->postVisit) + it->visitUnary(EvPostVisit, this); +} + +// +// Traverse an aggregate node. Same comments in binary node apply here. +// +void TIntermAggregate::traverse(TIntermTraverser *it) +{ + bool visit = true; + + if (it->preVisit) + visit = it->visitAggregate(EvPreVisit, this); + + if (visit) { + it->incrementDepth(this); + + if (it->rightToLeft) { + for (TIntermSequence::reverse_iterator sit = sequence.rbegin(); sit != sequence.rend(); sit++) { + (*sit)->traverse(it); + + if (visit && it->inVisit) { + if (*sit != sequence.front()) + visit = it->visitAggregate(EvInVisit, this); + } + } + } else { + for (TIntermSequence::iterator sit = sequence.begin(); sit != sequence.end(); sit++) { + (*sit)->traverse(it); + + if (visit && it->inVisit) { + if (*sit != sequence.back()) + visit = it->visitAggregate(EvInVisit, this); + } + } + } + + it->decrementDepth(); + } + + if (visit && it->postVisit) + it->visitAggregate(EvPostVisit, this); +} + +// +// Traverse a selection node. Same comments in binary node apply here. +// +void TIntermSelection::traverse(TIntermTraverser *it) +{ + bool visit = true; + + if (it->preVisit) + visit = it->visitSelection(EvPreVisit, this); + + if (visit) { + it->incrementDepth(this); + if (it->rightToLeft) { + if (falseBlock) + falseBlock->traverse(it); + if (trueBlock) + trueBlock->traverse(it); + condition->traverse(it); + } else { + condition->traverse(it); + if (trueBlock) + trueBlock->traverse(it); + if (falseBlock) + falseBlock->traverse(it); + } + it->decrementDepth(); + } + + if (visit && it->postVisit) + it->visitSelection(EvPostVisit, this); +} + +// +// Traverse a loop node. Same comments in binary node apply here. +// +void TIntermLoop::traverse(TIntermTraverser *it) +{ + bool visit = true; + + if (it->preVisit) + visit = it->visitLoop(EvPreVisit, this); + + if (visit) { + it->incrementDepth(this); + + if (it->rightToLeft) { + if (terminal) + terminal->traverse(it); + + if (body) + body->traverse(it); + + if (test) + test->traverse(it); + } else { + if (test) + test->traverse(it); + + if (body) + body->traverse(it); + + if (terminal) + terminal->traverse(it); + } + + it->decrementDepth(); + } + + if (visit && it->postVisit) + it->visitLoop(EvPostVisit, this); +} + +// +// Traverse a branch node. Same comments in binary node apply here. +// +void TIntermBranch::traverse(TIntermTraverser *it) +{ + bool visit = true; + + if (it->preVisit) + visit = it->visitBranch(EvPreVisit, this); + + if (visit && expression) { + it->incrementDepth(this); + expression->traverse(it); + it->decrementDepth(); + } + + if (visit && it->postVisit) + it->visitBranch(EvPostVisit, this); +} + +// +// Traverse a switch node. +// +void TIntermSwitch::traverse(TIntermTraverser* it) +{ + bool visit = true; + + if (it->preVisit) + visit = it->visitSwitch(EvPreVisit, this); + + if (visit) { + it->incrementDepth(this); + if (it->rightToLeft) { + body->traverse(it); + condition->traverse(it); + } else { + condition->traverse(it); + body->traverse(it); + } + it->decrementDepth(); + } + + if (visit && it->postVisit) + it->visitSwitch(EvPostVisit, this); +} + +} // end namespace glslang diff --git a/dep/glslang/glslang/MachineIndependent/Intermediate.cpp b/dep/glslang/glslang/MachineIndependent/Intermediate.cpp new file mode 100644 index 000000000..52ec6115f --- /dev/null +++ b/dep/glslang/glslang/MachineIndependent/Intermediate.cpp @@ -0,0 +1,3994 @@ +// +// Copyright (C) 2002-2005 3Dlabs Inc. Ltd. +// Copyright (C) 2012-2015 LunarG, Inc. +// Copyright (C) 2015-2020 Google, Inc. +// Copyright (C) 2017 ARM Limited. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// +// Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// +// Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// Neither the name of 3Dlabs Inc. Ltd. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// + +// +// Build the intermediate representation. +// + +#include "localintermediate.h" +#include "RemoveTree.h" +#include "SymbolTable.h" +#include "propagateNoContraction.h" + +#include +#include +#include + +namespace glslang { + +//////////////////////////////////////////////////////////////////////////// +// +// First set of functions are to help build the intermediate representation. +// These functions are not member functions of the nodes. +// They are called from parser productions. +// +///////////////////////////////////////////////////////////////////////////// + +// +// Add a terminal node for an identifier in an expression. +// +// Returns the added node. +// + +TIntermSymbol* TIntermediate::addSymbol(int id, const TString& name, const TType& type, const TConstUnionArray& constArray, + TIntermTyped* constSubtree, const TSourceLoc& loc) +{ + TIntermSymbol* node = new TIntermSymbol(id, name, type); + node->setLoc(loc); + node->setConstArray(constArray); + node->setConstSubtree(constSubtree); + + return node; +} + +TIntermSymbol* TIntermediate::addSymbol(const TIntermSymbol& intermSymbol) +{ + return addSymbol(intermSymbol.getId(), + intermSymbol.getName(), + intermSymbol.getType(), + intermSymbol.getConstArray(), + intermSymbol.getConstSubtree(), + intermSymbol.getLoc()); +} + +TIntermSymbol* TIntermediate::addSymbol(const TVariable& variable) +{ + glslang::TSourceLoc loc; // just a null location + loc.init(); + + return addSymbol(variable, loc); +} + +TIntermSymbol* TIntermediate::addSymbol(const TVariable& variable, const TSourceLoc& loc) +{ + return addSymbol(variable.getUniqueId(), variable.getName(), variable.getType(), variable.getConstArray(), variable.getConstSubtree(), loc); +} + +TIntermSymbol* TIntermediate::addSymbol(const TType& type, const TSourceLoc& loc) +{ + TConstUnionArray unionArray; // just a null constant + + return addSymbol(0, "", type, unionArray, nullptr, loc); +} + +// +// Connect two nodes with a new parent that does a binary operation on the nodes. +// +// Returns the added node. +// +// Returns nullptr if the working conversions and promotions could not be found. +// +TIntermTyped* TIntermediate::addBinaryMath(TOperator op, TIntermTyped* left, TIntermTyped* right, TSourceLoc loc) +{ + // No operations work on blocks + if (left->getType().getBasicType() == EbtBlock || right->getType().getBasicType() == EbtBlock) + return nullptr; + + // Convert "reference +/- int" and "reference - reference" to integer math + if ((op == EOpAdd || op == EOpSub) && extensionRequested(E_GL_EXT_buffer_reference2)) { + + // No addressing math on struct with unsized array. + if ((left->isReference() && left->getType().getReferentType()->containsUnsizedArray()) || + (right->isReference() && right->getType().getReferentType()->containsUnsizedArray())) { + return nullptr; + } + + if (left->isReference() && isTypeInt(right->getBasicType())) { + const TType& referenceType = left->getType(); + TIntermConstantUnion* size = addConstantUnion((unsigned long long)computeBufferReferenceTypeSize(left->getType()), loc, true); + left = addBuiltInFunctionCall(loc, EOpConvPtrToUint64, true, left, TType(EbtUint64)); + + right = createConversion(EbtInt64, right); + right = addBinaryMath(EOpMul, right, size, loc); + + TIntermTyped *node = addBinaryMath(op, left, right, loc); + node = addBuiltInFunctionCall(loc, EOpConvUint64ToPtr, true, node, referenceType); + return node; + } + + if (op == EOpAdd && right->isReference() && isTypeInt(left->getBasicType())) { + const TType& referenceType = right->getType(); + TIntermConstantUnion* size = addConstantUnion((unsigned long long)computeBufferReferenceTypeSize(right->getType()), loc, true); + right = addBuiltInFunctionCall(loc, EOpConvPtrToUint64, true, right, TType(EbtUint64)); + + left = createConversion(EbtInt64, left); + left = addBinaryMath(EOpMul, left, size, loc); + + TIntermTyped *node = addBinaryMath(op, left, right, loc); + node = addBuiltInFunctionCall(loc, EOpConvUint64ToPtr, true, node, referenceType); + return node; + } + + if (op == EOpSub && left->isReference() && right->isReference()) { + TIntermConstantUnion* size = addConstantUnion((long long)computeBufferReferenceTypeSize(left->getType()), loc, true); + + left = addBuiltInFunctionCall(loc, EOpConvPtrToUint64, true, left, TType(EbtUint64)); + right = addBuiltInFunctionCall(loc, EOpConvPtrToUint64, true, right, TType(EbtUint64)); + + left = addBuiltInFunctionCall(loc, EOpConvUint64ToInt64, true, left, TType(EbtInt64)); + right = addBuiltInFunctionCall(loc, EOpConvUint64ToInt64, true, right, TType(EbtInt64)); + + left = addBinaryMath(EOpSub, left, right, loc); + + TIntermTyped *node = addBinaryMath(EOpDiv, left, size, loc); + return node; + } + + // No other math operators supported on references + if (left->isReference() || right->isReference()) { + return nullptr; + } + } + + // Try converting the children's base types to compatible types. + auto children = addConversion(op, left, right); + left = std::get<0>(children); + right = std::get<1>(children); + + if (left == nullptr || right == nullptr) + return nullptr; + + // Convert the children's type shape to be compatible. + addBiShapeConversion(op, left, right); + if (left == nullptr || right == nullptr) + return nullptr; + + // + // Need a new node holding things together. Make + // one and promote it to the right type. + // + TIntermBinary* node = addBinaryNode(op, left, right, loc); + if (! promote(node)) + return nullptr; + + node->updatePrecision(); + + // + // If they are both (non-specialization) constants, they must be folded. + // (Unless it's the sequence (comma) operator, but that's handled in addComma().) + // + TIntermConstantUnion *leftTempConstant = node->getLeft()->getAsConstantUnion(); + TIntermConstantUnion *rightTempConstant = node->getRight()->getAsConstantUnion(); + if (leftTempConstant && rightTempConstant) { + TIntermTyped* folded = leftTempConstant->fold(node->getOp(), rightTempConstant); + if (folded) + return folded; + } + + // If can propagate spec-constantness and if the operation is an allowed + // specialization-constant operation, make a spec-constant. + if (specConstantPropagates(*node->getLeft(), *node->getRight()) && isSpecializationOperation(*node)) + node->getWritableType().getQualifier().makeSpecConstant(); + + // If must propagate nonuniform, make a nonuniform. + if ((node->getLeft()->getQualifier().isNonUniform() || node->getRight()->getQualifier().isNonUniform()) && + isNonuniformPropagating(node->getOp())) + node->getWritableType().getQualifier().nonUniform = true; + + return node; +} + +// +// Low level: add binary node (no promotions or other argument modifications) +// +TIntermBinary* TIntermediate::addBinaryNode(TOperator op, TIntermTyped* left, TIntermTyped* right, TSourceLoc loc) const +{ + // build the node + TIntermBinary* node = new TIntermBinary(op); + if (loc.line == 0) + loc = left->getLoc(); + node->setLoc(loc); + node->setLeft(left); + node->setRight(right); + + return node; +} + +// +// like non-type form, but sets node's type. +// +TIntermBinary* TIntermediate::addBinaryNode(TOperator op, TIntermTyped* left, TIntermTyped* right, TSourceLoc loc, const TType& type) const +{ + TIntermBinary* node = addBinaryNode(op, left, right, loc); + node->setType(type); + return node; +} + +// +// Low level: add unary node (no promotions or other argument modifications) +// +TIntermUnary* TIntermediate::addUnaryNode(TOperator op, TIntermTyped* child, TSourceLoc loc) const +{ + TIntermUnary* node = new TIntermUnary(op); + if (loc.line == 0) + loc = child->getLoc(); + node->setLoc(loc); + node->setOperand(child); + + return node; +} + +// +// like non-type form, but sets node's type. +// +TIntermUnary* TIntermediate::addUnaryNode(TOperator op, TIntermTyped* child, TSourceLoc loc, const TType& type) const +{ + TIntermUnary* node = addUnaryNode(op, child, loc); + node->setType(type); + return node; +} + +// +// Connect two nodes through an assignment. +// +// Returns the added node. +// +// Returns nullptr if the 'right' type could not be converted to match the 'left' type, +// or the resulting operation cannot be properly promoted. +// +TIntermTyped* TIntermediate::addAssign(TOperator op, TIntermTyped* left, TIntermTyped* right, TSourceLoc loc) +{ + // No block assignment + if (left->getType().getBasicType() == EbtBlock || right->getType().getBasicType() == EbtBlock) + return nullptr; + + // Convert "reference += int" to "reference = reference + int". We need this because the + // "reference + int" calculation involves a cast back to the original type, which makes it + // not an lvalue. + if ((op == EOpAddAssign || op == EOpSubAssign) && left->isReference() && + extensionRequested(E_GL_EXT_buffer_reference2)) { + + if (!(right->getType().isScalar() && right->getType().isIntegerDomain())) + return nullptr; + + TIntermTyped* node = addBinaryMath(op == EOpAddAssign ? EOpAdd : EOpSub, left, right, loc); + if (!node) + return nullptr; + + TIntermSymbol* symbol = left->getAsSymbolNode(); + left = addSymbol(*symbol); + + node = addAssign(EOpAssign, left, node, loc); + return node; + } + + // + // Like adding binary math, except the conversion can only go + // from right to left. + // + + // convert base types, nullptr return means not possible + right = addConversion(op, left->getType(), right); + if (right == nullptr) + return nullptr; + + // convert shape + right = addUniShapeConversion(op, left->getType(), right); + + // build the node + TIntermBinary* node = addBinaryNode(op, left, right, loc); + + if (! promote(node)) + return nullptr; + + node->updatePrecision(); + + return node; +} + +// +// Connect two nodes through an index operator, where the left node is the base +// of an array or struct, and the right node is a direct or indirect offset. +// +// Returns the added node. +// The caller should set the type of the returned node. +// +TIntermTyped* TIntermediate::addIndex(TOperator op, TIntermTyped* base, TIntermTyped* index, TSourceLoc loc) +{ + // caller should set the type + return addBinaryNode(op, base, index, loc); +} + +// +// Add one node as the parent of another that it operates on. +// +// Returns the added node. +// +TIntermTyped* TIntermediate::addUnaryMath(TOperator op, TIntermTyped* child, TSourceLoc loc) +{ + if (child == 0) + return nullptr; + + if (child->getType().getBasicType() == EbtBlock) + return nullptr; + + switch (op) { + case EOpLogicalNot: + if (getSource() == EShSourceHlsl) { + break; // HLSL can promote logical not + } + + if (child->getType().getBasicType() != EbtBool || child->getType().isMatrix() || child->getType().isArray() || child->getType().isVector()) { + return nullptr; + } + break; + + case EOpPostIncrement: + case EOpPreIncrement: + case EOpPostDecrement: + case EOpPreDecrement: + case EOpNegative: + if (child->getType().getBasicType() == EbtStruct || child->getType().isArray()) + return nullptr; + default: break; // some compilers want this + } + + // + // Do we need to promote the operand? + // + TBasicType newType = EbtVoid; + switch (op) { + case EOpConstructBool: newType = EbtBool; break; + case EOpConstructFloat: newType = EbtFloat; break; + case EOpConstructInt: newType = EbtInt; break; + case EOpConstructUint: newType = EbtUint; break; +#ifndef GLSLANG_WEB + case EOpConstructInt8: newType = EbtInt8; break; + case EOpConstructUint8: newType = EbtUint8; break; + case EOpConstructInt16: newType = EbtInt16; break; + case EOpConstructUint16: newType = EbtUint16; break; + case EOpConstructInt64: newType = EbtInt64; break; + case EOpConstructUint64: newType = EbtUint64; break; + case EOpConstructDouble: newType = EbtDouble; break; + case EOpConstructFloat16: newType = EbtFloat16; break; +#endif + default: break; // some compilers want this + } + + if (newType != EbtVoid) { + child = addConversion(op, TType(newType, EvqTemporary, child->getVectorSize(), + child->getMatrixCols(), + child->getMatrixRows(), + child->isVector()), + child); + if (child == nullptr) + return nullptr; + } + + // + // For constructors, we are now done, it was all in the conversion. + // TODO: but, did this bypass constant folding? + // + switch (op) { + case EOpConstructInt8: + case EOpConstructUint8: + case EOpConstructInt16: + case EOpConstructUint16: + case EOpConstructInt: + case EOpConstructUint: + case EOpConstructInt64: + case EOpConstructUint64: + case EOpConstructBool: + case EOpConstructFloat: + case EOpConstructDouble: + case EOpConstructFloat16: + return child; + default: break; // some compilers want this + } + + // + // Make a new node for the operator. + // + TIntermUnary* node = addUnaryNode(op, child, loc); + + if (! promote(node)) + return nullptr; + + node->updatePrecision(); + + // If it's a (non-specialization) constant, it must be folded. + if (node->getOperand()->getAsConstantUnion()) + return node->getOperand()->getAsConstantUnion()->fold(op, node->getType()); + + // If it's a specialization constant, the result is too, + // if the operation is allowed for specialization constants. + if (node->getOperand()->getType().getQualifier().isSpecConstant() && isSpecializationOperation(*node)) + node->getWritableType().getQualifier().makeSpecConstant(); + + // If must propagate nonuniform, make a nonuniform. + if (node->getOperand()->getQualifier().isNonUniform() && isNonuniformPropagating(node->getOp())) + node->getWritableType().getQualifier().nonUniform = true; + + return node; +} + +TIntermTyped* TIntermediate::addBuiltInFunctionCall(const TSourceLoc& loc, TOperator op, bool unary, + TIntermNode* childNode, const TType& returnType) +{ + if (unary) { + // + // Treat it like a unary operator. + // addUnaryMath() should get the type correct on its own; + // including constness (which would differ from the prototype). + // + TIntermTyped* child = childNode->getAsTyped(); + if (child == nullptr) + return nullptr; + + if (child->getAsConstantUnion()) { + TIntermTyped* folded = child->getAsConstantUnion()->fold(op, returnType); + if (folded) + return folded; + } + + return addUnaryNode(op, child, child->getLoc(), returnType); + } else { + // setAggregateOperater() calls fold() for constant folding + TIntermTyped* node = setAggregateOperator(childNode, op, returnType, loc); + + return node; + } +} + +// +// This is the safe way to change the operator on an aggregate, as it +// does lots of error checking and fixing. Especially for establishing +// a function call's operation on its set of parameters. Sequences +// of instructions are also aggregates, but they just directly set +// their operator to EOpSequence. +// +// Returns an aggregate node, which could be the one passed in if +// it was already an aggregate. +// +TIntermTyped* TIntermediate::setAggregateOperator(TIntermNode* node, TOperator op, const TType& type, TSourceLoc loc) +{ + TIntermAggregate* aggNode; + + // + // Make sure we have an aggregate. If not turn it into one. + // + if (node != nullptr) { + aggNode = node->getAsAggregate(); + if (aggNode == nullptr || aggNode->getOp() != EOpNull) { + // + // Make an aggregate containing this node. + // + aggNode = new TIntermAggregate(); + aggNode->getSequence().push_back(node); + if (loc.line == 0) + loc = node->getLoc(); + } + } else + aggNode = new TIntermAggregate(); + + // + // Set the operator. + // + aggNode->setOperator(op); + if (loc.line != 0) + aggNode->setLoc(loc); + + aggNode->setType(type); + + return fold(aggNode); +} + +bool TIntermediate::isConversionAllowed(TOperator op, TIntermTyped* node) const +{ + // + // Does the base type even allow the operation? + // + switch (node->getBasicType()) { + case EbtVoid: + return false; + case EbtAtomicUint: + case EbtSampler: + case EbtAccStruct: + // opaque types can be passed to functions + if (op == EOpFunction) + break; + + // HLSL can assign samplers directly (no constructor) + if (getSource() == EShSourceHlsl && node->getBasicType() == EbtSampler) + break; + + // samplers can get assigned via a sampler constructor + // (well, not yet, but code in the rest of this function is ready for it) + if (node->getBasicType() == EbtSampler && op == EOpAssign && + node->getAsOperator() != nullptr && node->getAsOperator()->getOp() == EOpConstructTextureSampler) + break; + + // otherwise, opaque types can't even be operated on, let alone converted + return false; + default: + break; + } + + return true; +} + +bool TIntermediate::buildConvertOp(TBasicType dst, TBasicType src, TOperator& newOp) const +{ + switch (dst) { +#ifndef GLSLANG_WEB + case EbtDouble: + switch (src) { + case EbtUint: newOp = EOpConvUintToDouble; break; + case EbtBool: newOp = EOpConvBoolToDouble; break; + case EbtFloat: newOp = EOpConvFloatToDouble; break; + case EbtInt: newOp = EOpConvIntToDouble; break; + case EbtInt8: newOp = EOpConvInt8ToDouble; break; + case EbtUint8: newOp = EOpConvUint8ToDouble; break; + case EbtInt16: newOp = EOpConvInt16ToDouble; break; + case EbtUint16: newOp = EOpConvUint16ToDouble; break; + case EbtFloat16: newOp = EOpConvFloat16ToDouble; break; + case EbtInt64: newOp = EOpConvInt64ToDouble; break; + case EbtUint64: newOp = EOpConvUint64ToDouble; break; + default: + return false; + } + break; +#endif + case EbtFloat: + switch (src) { + case EbtInt: newOp = EOpConvIntToFloat; break; + case EbtUint: newOp = EOpConvUintToFloat; break; + case EbtBool: newOp = EOpConvBoolToFloat; break; +#ifndef GLSLANG_WEB + case EbtDouble: newOp = EOpConvDoubleToFloat; break; + case EbtInt8: newOp = EOpConvInt8ToFloat; break; + case EbtUint8: newOp = EOpConvUint8ToFloat; break; + case EbtInt16: newOp = EOpConvInt16ToFloat; break; + case EbtUint16: newOp = EOpConvUint16ToFloat; break; + case EbtFloat16: newOp = EOpConvFloat16ToFloat; break; + case EbtInt64: newOp = EOpConvInt64ToFloat; break; + case EbtUint64: newOp = EOpConvUint64ToFloat; break; +#endif + default: + return false; + } + break; +#ifndef GLSLANG_WEB + case EbtFloat16: + switch (src) { + case EbtInt8: newOp = EOpConvInt8ToFloat16; break; + case EbtUint8: newOp = EOpConvUint8ToFloat16; break; + case EbtInt16: newOp = EOpConvInt16ToFloat16; break; + case EbtUint16: newOp = EOpConvUint16ToFloat16; break; + case EbtInt: newOp = EOpConvIntToFloat16; break; + case EbtUint: newOp = EOpConvUintToFloat16; break; + case EbtBool: newOp = EOpConvBoolToFloat16; break; + case EbtFloat: newOp = EOpConvFloatToFloat16; break; + case EbtDouble: newOp = EOpConvDoubleToFloat16; break; + case EbtInt64: newOp = EOpConvInt64ToFloat16; break; + case EbtUint64: newOp = EOpConvUint64ToFloat16; break; + default: + return false; + } + break; +#endif + case EbtBool: + switch (src) { + case EbtInt: newOp = EOpConvIntToBool; break; + case EbtUint: newOp = EOpConvUintToBool; break; + case EbtFloat: newOp = EOpConvFloatToBool; break; +#ifndef GLSLANG_WEB + case EbtDouble: newOp = EOpConvDoubleToBool; break; + case EbtInt8: newOp = EOpConvInt8ToBool; break; + case EbtUint8: newOp = EOpConvUint8ToBool; break; + case EbtInt16: newOp = EOpConvInt16ToBool; break; + case EbtUint16: newOp = EOpConvUint16ToBool; break; + case EbtFloat16: newOp = EOpConvFloat16ToBool; break; + case EbtInt64: newOp = EOpConvInt64ToBool; break; + case EbtUint64: newOp = EOpConvUint64ToBool; break; +#endif + default: + return false; + } + break; +#ifndef GLSLANG_WEB + case EbtInt8: + switch (src) { + case EbtUint8: newOp = EOpConvUint8ToInt8; break; + case EbtInt16: newOp = EOpConvInt16ToInt8; break; + case EbtUint16: newOp = EOpConvUint16ToInt8; break; + case EbtInt: newOp = EOpConvIntToInt8; break; + case EbtUint: newOp = EOpConvUintToInt8; break; + case EbtInt64: newOp = EOpConvInt64ToInt8; break; + case EbtUint64: newOp = EOpConvUint64ToInt8; break; + case EbtBool: newOp = EOpConvBoolToInt8; break; + case EbtFloat: newOp = EOpConvFloatToInt8; break; + case EbtDouble: newOp = EOpConvDoubleToInt8; break; + case EbtFloat16: newOp = EOpConvFloat16ToInt8; break; + default: + return false; + } + break; + case EbtUint8: + switch (src) { + case EbtInt8: newOp = EOpConvInt8ToUint8; break; + case EbtInt16: newOp = EOpConvInt16ToUint8; break; + case EbtUint16: newOp = EOpConvUint16ToUint8; break; + case EbtInt: newOp = EOpConvIntToUint8; break; + case EbtUint: newOp = EOpConvUintToUint8; break; + case EbtInt64: newOp = EOpConvInt64ToUint8; break; + case EbtUint64: newOp = EOpConvUint64ToUint8; break; + case EbtBool: newOp = EOpConvBoolToUint8; break; + case EbtFloat: newOp = EOpConvFloatToUint8; break; + case EbtDouble: newOp = EOpConvDoubleToUint8; break; + case EbtFloat16: newOp = EOpConvFloat16ToUint8; break; + default: + return false; + } + break; + + case EbtInt16: + switch (src) { + case EbtUint8: newOp = EOpConvUint8ToInt16; break; + case EbtInt8: newOp = EOpConvInt8ToInt16; break; + case EbtUint16: newOp = EOpConvUint16ToInt16; break; + case EbtInt: newOp = EOpConvIntToInt16; break; + case EbtUint: newOp = EOpConvUintToInt16; break; + case EbtInt64: newOp = EOpConvInt64ToInt16; break; + case EbtUint64: newOp = EOpConvUint64ToInt16; break; + case EbtBool: newOp = EOpConvBoolToInt16; break; + case EbtFloat: newOp = EOpConvFloatToInt16; break; + case EbtDouble: newOp = EOpConvDoubleToInt16; break; + case EbtFloat16: newOp = EOpConvFloat16ToInt16; break; + default: + return false; + } + break; + case EbtUint16: + switch (src) { + case EbtInt8: newOp = EOpConvInt8ToUint16; break; + case EbtUint8: newOp = EOpConvUint8ToUint16; break; + case EbtInt16: newOp = EOpConvInt16ToUint16; break; + case EbtInt: newOp = EOpConvIntToUint16; break; + case EbtUint: newOp = EOpConvUintToUint16; break; + case EbtInt64: newOp = EOpConvInt64ToUint16; break; + case EbtUint64: newOp = EOpConvUint64ToUint16; break; + case EbtBool: newOp = EOpConvBoolToUint16; break; + case EbtFloat: newOp = EOpConvFloatToUint16; break; + case EbtDouble: newOp = EOpConvDoubleToUint16; break; + case EbtFloat16: newOp = EOpConvFloat16ToUint16; break; + default: + return false; + } + break; +#endif + + case EbtInt: + switch (src) { + case EbtUint: newOp = EOpConvUintToInt; break; + case EbtBool: newOp = EOpConvBoolToInt; break; + case EbtFloat: newOp = EOpConvFloatToInt; break; +#ifndef GLSLANG_WEB + case EbtInt8: newOp = EOpConvInt8ToInt; break; + case EbtUint8: newOp = EOpConvUint8ToInt; break; + case EbtInt16: newOp = EOpConvInt16ToInt; break; + case EbtUint16: newOp = EOpConvUint16ToInt; break; + case EbtDouble: newOp = EOpConvDoubleToInt; break; + case EbtFloat16: newOp = EOpConvFloat16ToInt; break; + case EbtInt64: newOp = EOpConvInt64ToInt; break; + case EbtUint64: newOp = EOpConvUint64ToInt; break; +#endif + default: + return false; + } + break; + case EbtUint: + switch (src) { + case EbtInt: newOp = EOpConvIntToUint; break; + case EbtBool: newOp = EOpConvBoolToUint; break; + case EbtFloat: newOp = EOpConvFloatToUint; break; +#ifndef GLSLANG_WEB + case EbtInt8: newOp = EOpConvInt8ToUint; break; + case EbtUint8: newOp = EOpConvUint8ToUint; break; + case EbtInt16: newOp = EOpConvInt16ToUint; break; + case EbtUint16: newOp = EOpConvUint16ToUint; break; + case EbtDouble: newOp = EOpConvDoubleToUint; break; + case EbtFloat16: newOp = EOpConvFloat16ToUint; break; + case EbtInt64: newOp = EOpConvInt64ToUint; break; + case EbtUint64: newOp = EOpConvUint64ToUint; break; +#endif + default: + return false; + } + break; +#ifndef GLSLANG_WEB + case EbtInt64: + switch (src) { + case EbtInt8: newOp = EOpConvInt8ToInt64; break; + case EbtUint8: newOp = EOpConvUint8ToInt64; break; + case EbtInt16: newOp = EOpConvInt16ToInt64; break; + case EbtUint16: newOp = EOpConvUint16ToInt64; break; + case EbtInt: newOp = EOpConvIntToInt64; break; + case EbtUint: newOp = EOpConvUintToInt64; break; + case EbtBool: newOp = EOpConvBoolToInt64; break; + case EbtFloat: newOp = EOpConvFloatToInt64; break; + case EbtDouble: newOp = EOpConvDoubleToInt64; break; + case EbtFloat16: newOp = EOpConvFloat16ToInt64; break; + case EbtUint64: newOp = EOpConvUint64ToInt64; break; + default: + return false; + } + break; + case EbtUint64: + switch (src) { + case EbtInt8: newOp = EOpConvInt8ToUint64; break; + case EbtUint8: newOp = EOpConvUint8ToUint64; break; + case EbtInt16: newOp = EOpConvInt16ToUint64; break; + case EbtUint16: newOp = EOpConvUint16ToUint64; break; + case EbtInt: newOp = EOpConvIntToUint64; break; + case EbtUint: newOp = EOpConvUintToUint64; break; + case EbtBool: newOp = EOpConvBoolToUint64; break; + case EbtFloat: newOp = EOpConvFloatToUint64; break; + case EbtDouble: newOp = EOpConvDoubleToUint64; break; + case EbtFloat16: newOp = EOpConvFloat16ToUint64; break; + case EbtInt64: newOp = EOpConvInt64ToUint64; break; + default: + return false; + } + break; +#endif + default: + return false; + } + return true; +} + +// This is 'mechanism' here, it does any conversion told. +// It is about basic type, not about shape. +// The policy comes from the shader or the calling code. +TIntermTyped* TIntermediate::createConversion(TBasicType convertTo, TIntermTyped* node) const +{ + // + // Add a new newNode for the conversion. + // + +#ifndef GLSLANG_WEB + bool convertToIntTypes = (convertTo == EbtInt8 || convertTo == EbtUint8 || + convertTo == EbtInt16 || convertTo == EbtUint16 || + convertTo == EbtInt || convertTo == EbtUint || + convertTo == EbtInt64 || convertTo == EbtUint64); + + bool convertFromIntTypes = (node->getBasicType() == EbtInt8 || node->getBasicType() == EbtUint8 || + node->getBasicType() == EbtInt16 || node->getBasicType() == EbtUint16 || + node->getBasicType() == EbtInt || node->getBasicType() == EbtUint || + node->getBasicType() == EbtInt64 || node->getBasicType() == EbtUint64); + + bool convertToFloatTypes = (convertTo == EbtFloat16 || convertTo == EbtFloat || convertTo == EbtDouble); + + bool convertFromFloatTypes = (node->getBasicType() == EbtFloat16 || + node->getBasicType() == EbtFloat || + node->getBasicType() == EbtDouble); + + if (! getArithemeticInt8Enabled()) { + if (((convertTo == EbtInt8 || convertTo == EbtUint8) && ! convertFromIntTypes) || + ((node->getBasicType() == EbtInt8 || node->getBasicType() == EbtUint8) && ! convertToIntTypes)) + return nullptr; + } + + if (! getArithemeticInt16Enabled()) { + if (((convertTo == EbtInt16 || convertTo == EbtUint16) && ! convertFromIntTypes) || + ((node->getBasicType() == EbtInt16 || node->getBasicType() == EbtUint16) && ! convertToIntTypes)) + return nullptr; + } + + if (! getArithemeticFloat16Enabled()) { + if ((convertTo == EbtFloat16 && ! convertFromFloatTypes) || + (node->getBasicType() == EbtFloat16 && ! convertToFloatTypes)) + return nullptr; + } +#endif + + TIntermUnary* newNode = nullptr; + TOperator newOp = EOpNull; + if (!buildConvertOp(convertTo, node->getBasicType(), newOp)) { + return nullptr; + } + + TType newType(convertTo, EvqTemporary, node->getVectorSize(), node->getMatrixCols(), node->getMatrixRows()); + newNode = addUnaryNode(newOp, node, node->getLoc(), newType); + + if (node->getAsConstantUnion()) { +#ifndef GLSLANG_WEB + // 8/16-bit storage extensions don't support 8/16-bit constants, so don't fold conversions + // to those types + if ((getArithemeticInt8Enabled() || !(convertTo == EbtInt8 || convertTo == EbtUint8)) && + (getArithemeticInt16Enabled() || !(convertTo == EbtInt16 || convertTo == EbtUint16)) && + (getArithemeticFloat16Enabled() || !(convertTo == EbtFloat16))) +#endif + { + TIntermTyped* folded = node->getAsConstantUnion()->fold(newOp, newType); + if (folded) + return folded; + } + } + + // Propagate specialization-constant-ness, if allowed + if (node->getType().getQualifier().isSpecConstant() && isSpecializationOperation(*newNode)) + newNode->getWritableType().getQualifier().makeSpecConstant(); + + return newNode; +} + +TIntermTyped* TIntermediate::addConversion(TBasicType convertTo, TIntermTyped* node) const +{ + return createConversion(convertTo, node); +} + +// For converting a pair of operands to a binary operation to compatible +// types with each other, relative to the operation in 'op'. +// This does not cover assignment operations, which is asymmetric in that the +// left type is not changeable. +// See addConversion(op, type, node) for assignments and unary operation +// conversions. +// +// Generally, this is focused on basic type conversion, not shape conversion. +// See addShapeConversion() for shape conversions. +// +// Returns the converted pair of nodes. +// Returns when there is no conversion. +std::tuple +TIntermediate::addConversion(TOperator op, TIntermTyped* node0, TIntermTyped* node1) +{ + if (!isConversionAllowed(op, node0) || !isConversionAllowed(op, node1)) + return std::make_tuple(nullptr, nullptr); + + if (node0->getType() != node1->getType()) { + // If differing structure, then no conversions. + if (node0->isStruct() || node1->isStruct()) + return std::make_tuple(nullptr, nullptr); + + // If differing arrays, then no conversions. + if (node0->getType().isArray() || node1->getType().isArray()) + return std::make_tuple(nullptr, nullptr); + + // No implicit conversions for operations involving cooperative matrices + if (node0->getType().isCoopMat() || node1->getType().isCoopMat()) + return std::make_tuple(node0, node1); + } + + auto promoteTo = std::make_tuple(EbtNumTypes, EbtNumTypes); + + switch (op) { + // + // List all the binary ops that can implicitly convert one operand to the other's type; + // This implements the 'policy' for implicit type conversion. + // + case EOpLessThan: + case EOpGreaterThan: + case EOpLessThanEqual: + case EOpGreaterThanEqual: + case EOpEqual: + case EOpNotEqual: + + case EOpAdd: + case EOpSub: + case EOpMul: + case EOpDiv: + case EOpMod: + + case EOpVectorTimesScalar: + case EOpVectorTimesMatrix: + case EOpMatrixTimesVector: + case EOpMatrixTimesScalar: + + case EOpAnd: + case EOpInclusiveOr: + case EOpExclusiveOr: + + case EOpSequence: // used by ?: + + if (node0->getBasicType() == node1->getBasicType()) + return std::make_tuple(node0, node1); + + promoteTo = getConversionDestinatonType(node0->getBasicType(), node1->getBasicType(), op); + if (std::get<0>(promoteTo) == EbtNumTypes || std::get<1>(promoteTo) == EbtNumTypes) + return std::make_tuple(nullptr, nullptr); + + break; + + case EOpLogicalAnd: + case EOpLogicalOr: + case EOpLogicalXor: + if (getSource() == EShSourceHlsl) + promoteTo = std::make_tuple(EbtBool, EbtBool); + else + return std::make_tuple(node0, node1); + break; + + // There are no conversions needed for GLSL; the shift amount just needs to be an + // integer type, as does the base. + // HLSL can promote bools to ints to make this work. + case EOpLeftShift: + case EOpRightShift: + if (getSource() == EShSourceHlsl) { + TBasicType node0BasicType = node0->getBasicType(); + if (node0BasicType == EbtBool) + node0BasicType = EbtInt; + if (node1->getBasicType() == EbtBool) + promoteTo = std::make_tuple(node0BasicType, EbtInt); + else + promoteTo = std::make_tuple(node0BasicType, node1->getBasicType()); + } else { + if (isTypeInt(node0->getBasicType()) && isTypeInt(node1->getBasicType())) + return std::make_tuple(node0, node1); + else + return std::make_tuple(nullptr, nullptr); + } + break; + + default: + if (node0->getType() == node1->getType()) + return std::make_tuple(node0, node1); + + return std::make_tuple(nullptr, nullptr); + } + + TIntermTyped* newNode0; + TIntermTyped* newNode1; + + if (std::get<0>(promoteTo) != node0->getType().getBasicType()) { + if (node0->getAsConstantUnion()) + newNode0 = promoteConstantUnion(std::get<0>(promoteTo), node0->getAsConstantUnion()); + else + newNode0 = createConversion(std::get<0>(promoteTo), node0); + } else + newNode0 = node0; + + if (std::get<1>(promoteTo) != node1->getType().getBasicType()) { + if (node1->getAsConstantUnion()) + newNode1 = promoteConstantUnion(std::get<1>(promoteTo), node1->getAsConstantUnion()); + else + newNode1 = createConversion(std::get<1>(promoteTo), node1); + } else + newNode1 = node1; + + return std::make_tuple(newNode0, newNode1); +} + +// +// Convert the node's type to the given type, as allowed by the operation involved: 'op'. +// For implicit conversions, 'op' is not the requested conversion, it is the explicit +// operation requiring the implicit conversion. +// +// Binary operation conversions should be handled by addConversion(op, node, node), not here. +// +// Returns a node representing the conversion, which could be the same +// node passed in if no conversion was needed. +// +// Generally, this is focused on basic type conversion, not shape conversion. +// See addShapeConversion() for shape conversions. +// +// Return nullptr if a conversion can't be done. +// +TIntermTyped* TIntermediate::addConversion(TOperator op, const TType& type, TIntermTyped* node) +{ + if (!isConversionAllowed(op, node)) + return nullptr; + + // Otherwise, if types are identical, no problem + if (type == node->getType()) + return node; + + // If one's a structure, then no conversions. + if (type.isStruct() || node->isStruct()) + return nullptr; + + // If one's an array, then no conversions. + if (type.isArray() || node->getType().isArray()) + return nullptr; + + // Note: callers are responsible for other aspects of shape, + // like vector and matrix sizes. + + TBasicType promoteTo; + // GL_EXT_shader_16bit_storage can't do OpConstantComposite with + // 16-bit types, so disable promotion for those types. + bool canPromoteConstant = true; + + switch (op) { + // + // Explicit conversions (unary operations) + // + case EOpConstructBool: + promoteTo = EbtBool; + break; + case EOpConstructFloat: + promoteTo = EbtFloat; + break; + case EOpConstructInt: + promoteTo = EbtInt; + break; + case EOpConstructUint: + promoteTo = EbtUint; + break; +#ifndef GLSLANG_WEB + case EOpConstructDouble: + promoteTo = EbtDouble; + break; + case EOpConstructFloat16: + promoteTo = EbtFloat16; + canPromoteConstant = extensionRequested(E_GL_EXT_shader_explicit_arithmetic_types) || + extensionRequested(E_GL_EXT_shader_explicit_arithmetic_types_float16); + break; + case EOpConstructInt8: + promoteTo = EbtInt8; + canPromoteConstant = extensionRequested(E_GL_EXT_shader_explicit_arithmetic_types) || + extensionRequested(E_GL_EXT_shader_explicit_arithmetic_types_int8); + break; + case EOpConstructUint8: + promoteTo = EbtUint8; + canPromoteConstant = extensionRequested(E_GL_EXT_shader_explicit_arithmetic_types) || + extensionRequested(E_GL_EXT_shader_explicit_arithmetic_types_int8); + break; + case EOpConstructInt16: + promoteTo = EbtInt16; + canPromoteConstant = extensionRequested(E_GL_EXT_shader_explicit_arithmetic_types) || + extensionRequested(E_GL_EXT_shader_explicit_arithmetic_types_int16); + break; + case EOpConstructUint16: + promoteTo = EbtUint16; + canPromoteConstant = extensionRequested(E_GL_EXT_shader_explicit_arithmetic_types) || + extensionRequested(E_GL_EXT_shader_explicit_arithmetic_types_int16); + break; + case EOpConstructInt64: + promoteTo = EbtInt64; + break; + case EOpConstructUint64: + promoteTo = EbtUint64; + break; +#endif + + case EOpLogicalNot: + + case EOpFunctionCall: + + case EOpReturn: + case EOpAssign: + case EOpAddAssign: + case EOpSubAssign: + case EOpMulAssign: + case EOpVectorTimesScalarAssign: + case EOpMatrixTimesScalarAssign: + case EOpDivAssign: + case EOpModAssign: + case EOpAndAssign: + case EOpInclusiveOrAssign: + case EOpExclusiveOrAssign: + + case EOpAtan: + case EOpClamp: + case EOpCross: + case EOpDistance: + case EOpDot: + case EOpDst: + case EOpFaceForward: + case EOpFma: + case EOpFrexp: + case EOpLdexp: + case EOpMix: + case EOpLit: + case EOpMax: + case EOpMin: + case EOpMod: + case EOpModf: + case EOpPow: + case EOpReflect: + case EOpRefract: + case EOpSmoothStep: + case EOpStep: + + case EOpSequence: + case EOpConstructStruct: + case EOpConstructCooperativeMatrix: + + if (type.isReference() || node->getType().isReference()) { + // types must match to assign a reference + if (type == node->getType()) + return node; + else + return nullptr; + } + + if (type.getBasicType() == node->getType().getBasicType()) + return node; + + if (canImplicitlyPromote(node->getBasicType(), type.getBasicType(), op)) + promoteTo = type.getBasicType(); + else + return nullptr; + break; + + // For GLSL, there are no conversions needed; the shift amount just needs to be an + // integer type, as do the base/result. + // HLSL can convert the shift from a bool to an int. + case EOpLeftShiftAssign: + case EOpRightShiftAssign: + { + if (getSource() == EShSourceHlsl && node->getType().getBasicType() == EbtBool) + promoteTo = type.getBasicType(); + else { + if (isTypeInt(type.getBasicType()) && isTypeInt(node->getBasicType())) + return node; + else + return nullptr; + } + break; + } + + default: + // default is to require a match; all exceptions should have case statements above + + if (type.getBasicType() == node->getType().getBasicType()) + return node; + else + return nullptr; + } + + if (canPromoteConstant && node->getAsConstantUnion()) + return promoteConstantUnion(promoteTo, node->getAsConstantUnion()); + + // + // Add a new newNode for the conversion. + // + TIntermTyped* newNode = createConversion(promoteTo, node); + + return newNode; +} + +// Convert the node's shape of type for the given type, as allowed by the +// operation involved: 'op'. This is for situations where there is only one +// direction to consider doing the shape conversion. +// +// This implements policy, it call addShapeConversion() for the mechanism. +// +// Generally, the AST represents allowed GLSL shapes, so this isn't needed +// for GLSL. Bad shapes are caught in conversion or promotion. +// +// Return 'node' if no conversion was done. Promotion handles final shape +// checking. +// +TIntermTyped* TIntermediate::addUniShapeConversion(TOperator op, const TType& type, TIntermTyped* node) +{ + // some source languages don't do this + switch (getSource()) { + case EShSourceHlsl: + break; + case EShSourceGlsl: + default: + return node; + } + + // some operations don't do this + switch (op) { + case EOpFunctionCall: + case EOpReturn: + break; + + case EOpMulAssign: + // want to support vector *= scalar native ops in AST and lower, not smear, similarly for + // matrix *= scalar, etc. + + case EOpAddAssign: + case EOpSubAssign: + case EOpDivAssign: + case EOpAndAssign: + case EOpInclusiveOrAssign: + case EOpExclusiveOrAssign: + case EOpRightShiftAssign: + case EOpLeftShiftAssign: + if (node->getVectorSize() == 1) + return node; + break; + + case EOpAssign: + break; + + case EOpMix: + break; + + default: + return node; + } + + return addShapeConversion(type, node); +} + +// Convert the nodes' shapes to be compatible for the operation 'op'. +// +// This implements policy, it call addShapeConversion() for the mechanism. +// +// Generally, the AST represents allowed GLSL shapes, so this isn't needed +// for GLSL. Bad shapes are caught in conversion or promotion. +// +void TIntermediate::addBiShapeConversion(TOperator op, TIntermTyped*& lhsNode, TIntermTyped*& rhsNode) +{ + // some source languages don't do this + switch (getSource()) { + case EShSourceHlsl: + break; + case EShSourceGlsl: + default: + return; + } + + // some operations don't do this + // 'break' will mean attempt bidirectional conversion + switch (op) { + case EOpMulAssign: + case EOpAssign: + case EOpAddAssign: + case EOpSubAssign: + case EOpDivAssign: + case EOpAndAssign: + case EOpInclusiveOrAssign: + case EOpExclusiveOrAssign: + case EOpRightShiftAssign: + case EOpLeftShiftAssign: + // switch to unidirectional conversion (the lhs can't change) + rhsNode = addUniShapeConversion(op, lhsNode->getType(), rhsNode); + return; + + case EOpMul: + // matrix multiply does not change shapes + if (lhsNode->isMatrix() && rhsNode->isMatrix()) + return; + case EOpAdd: + case EOpSub: + case EOpDiv: + // want to support vector * scalar native ops in AST and lower, not smear, similarly for + // matrix * vector, etc. + if (lhsNode->getVectorSize() == 1 || rhsNode->getVectorSize() == 1) + return; + break; + + case EOpRightShift: + case EOpLeftShift: + // can natively support the right operand being a scalar and the left a vector, + // but not the reverse + if (rhsNode->getVectorSize() == 1) + return; + break; + + case EOpLessThan: + case EOpGreaterThan: + case EOpLessThanEqual: + case EOpGreaterThanEqual: + + case EOpEqual: + case EOpNotEqual: + + case EOpLogicalAnd: + case EOpLogicalOr: + case EOpLogicalXor: + + case EOpAnd: + case EOpInclusiveOr: + case EOpExclusiveOr: + + case EOpMix: + break; + + default: + return; + } + + // Do bidirectional conversions + if (lhsNode->getType().isScalarOrVec1() || rhsNode->getType().isScalarOrVec1()) { + if (lhsNode->getType().isScalarOrVec1()) + lhsNode = addShapeConversion(rhsNode->getType(), lhsNode); + else + rhsNode = addShapeConversion(lhsNode->getType(), rhsNode); + } + lhsNode = addShapeConversion(rhsNode->getType(), lhsNode); + rhsNode = addShapeConversion(lhsNode->getType(), rhsNode); +} + +// Convert the node's shape of type for the given type, as allowed by the +// operation involved: 'op'. +// +// Generally, the AST represents allowed GLSL shapes, so this isn't needed +// for GLSL. Bad shapes are caught in conversion or promotion. +// +// Return 'node' if no conversion was done. Promotion handles final shape +// checking. +// +TIntermTyped* TIntermediate::addShapeConversion(const TType& type, TIntermTyped* node) +{ + // no conversion needed + if (node->getType() == type) + return node; + + // structures and arrays don't change shape, either to or from + if (node->getType().isStruct() || node->getType().isArray() || + type.isStruct() || type.isArray()) + return node; + + // The new node that handles the conversion + TOperator constructorOp = mapTypeToConstructorOp(type); + + if (getSource() == EShSourceHlsl) { + // HLSL rules for scalar, vector and matrix conversions: + // 1) scalar can become anything, initializing every component with its value + // 2) vector and matrix can become scalar, first element is used (warning: truncation) + // 3) matrix can become matrix with less rows and/or columns (warning: truncation) + // 4) vector can become vector with less rows size (warning: truncation) + // 5a) vector 4 can become 2x2 matrix (special case) (same packing layout, its a reinterpret) + // 5b) 2x2 matrix can become vector 4 (special case) (same packing layout, its a reinterpret) + + const TType &sourceType = node->getType(); + + // rule 1 for scalar to matrix is special + if (sourceType.isScalarOrVec1() && type.isMatrix()) { + + // HLSL semantics: the scalar (or vec1) is replicated to every component of the matrix. Left to its + // own devices, the constructor from a scalar would populate the diagonal. This forces replication + // to every matrix element. + + // Note that if the node is complex (e.g, a function call), we don't want to duplicate it here + // repeatedly, so we copy it to a temp, then use the temp. + const int matSize = type.computeNumComponents(); + TIntermAggregate* rhsAggregate = new TIntermAggregate(); + + const bool isSimple = (node->getAsSymbolNode() != nullptr) || (node->getAsConstantUnion() != nullptr); + + if (!isSimple) { + assert(0); // TODO: use node replicator service when available. + } + + for (int x = 0; x < matSize; ++x) + rhsAggregate->getSequence().push_back(node); + + return setAggregateOperator(rhsAggregate, constructorOp, type, node->getLoc()); + } + + // rule 1 and 2 + if ((sourceType.isScalar() && !type.isScalar()) || (!sourceType.isScalar() && type.isScalar())) + return setAggregateOperator(makeAggregate(node), constructorOp, type, node->getLoc()); + + // rule 3 and 5b + if (sourceType.isMatrix()) { + // rule 3 + if (type.isMatrix()) { + if ((sourceType.getMatrixCols() != type.getMatrixCols() || sourceType.getMatrixRows() != type.getMatrixRows()) && + sourceType.getMatrixCols() >= type.getMatrixCols() && sourceType.getMatrixRows() >= type.getMatrixRows()) + return setAggregateOperator(makeAggregate(node), constructorOp, type, node->getLoc()); + // rule 5b + } else if (type.isVector()) { + if (type.getVectorSize() == 4 && sourceType.getMatrixCols() == 2 && sourceType.getMatrixRows() == 2) + return setAggregateOperator(makeAggregate(node), constructorOp, type, node->getLoc()); + } + } + + // rule 4 and 5a + if (sourceType.isVector()) { + // rule 4 + if (type.isVector()) + { + if (sourceType.getVectorSize() > type.getVectorSize()) + return setAggregateOperator(makeAggregate(node), constructorOp, type, node->getLoc()); + // rule 5a + } else if (type.isMatrix()) { + if (sourceType.getVectorSize() == 4 && type.getMatrixCols() == 2 && type.getMatrixRows() == 2) + return setAggregateOperator(makeAggregate(node), constructorOp, type, node->getLoc()); + } + } + } + + // scalar -> vector or vec1 -> vector or + // vector -> scalar or + // bigger vector -> smaller vector + if ((node->getType().isScalarOrVec1() && type.isVector()) || + (node->getType().isVector() && type.isScalar()) || + (node->isVector() && type.isVector() && node->getVectorSize() > type.getVectorSize())) + return setAggregateOperator(makeAggregate(node), constructorOp, type, node->getLoc()); + + return node; +} + +bool TIntermediate::isIntegralPromotion(TBasicType from, TBasicType to) const +{ + // integral promotions + if (to == EbtInt) { + switch(from) { + case EbtInt8: + case EbtInt16: + case EbtUint8: + case EbtUint16: + return true; + default: + break; + } + } + return false; +} + +bool TIntermediate::isFPPromotion(TBasicType from, TBasicType to) const +{ + // floating-point promotions + if (to == EbtDouble) { + switch(from) { + case EbtFloat16: + case EbtFloat: + return true; + default: + break; + } + } + return false; +} + +bool TIntermediate::isIntegralConversion(TBasicType from, TBasicType to) const +{ +#ifdef GLSLANG_WEB + return false; +#endif + + switch (from) { + case EbtInt: + switch(to) { + case EbtUint: + return version >= 400 || getSource() == EShSourceHlsl; + case EbtInt64: + case EbtUint64: + return true; + default: + break; + } + break; + case EbtUint: + switch(to) { + case EbtInt64: + case EbtUint64: + return true; + default: + break; + } + break; + case EbtInt8: + switch (to) { + case EbtUint8: + case EbtInt16: + case EbtUint16: + case EbtUint: + case EbtInt64: + case EbtUint64: + return true; + default: + break; + } + break; + case EbtUint8: + switch (to) { + case EbtInt16: + case EbtUint16: + case EbtUint: + case EbtInt64: + case EbtUint64: + return true; + default: + break; + } + break; + case EbtInt16: + switch(to) { + case EbtUint16: + case EbtUint: + case EbtInt64: + case EbtUint64: + return true; + default: + break; + } + break; + case EbtUint16: + switch(to) { + case EbtUint: + case EbtInt64: + case EbtUint64: + return true; + default: + break; + } + break; + case EbtInt64: + if (to == EbtUint64) { + return true; + } + break; + default: + break; + } + return false; +} + +bool TIntermediate::isFPConversion(TBasicType from, TBasicType to) const +{ +#ifdef GLSLANG_WEB + return false; +#endif + + if (to == EbtFloat && from == EbtFloat16) { + return true; + } else { + return false; + } +} + +bool TIntermediate::isFPIntegralConversion(TBasicType from, TBasicType to) const +{ + switch (from) { + case EbtInt: + case EbtUint: + switch(to) { + case EbtFloat: + case EbtDouble: + return true; + default: + break; + } + break; +#ifndef GLSLANG_WEB + case EbtInt8: + case EbtUint8: + case EbtInt16: + case EbtUint16: + switch (to) { + case EbtFloat16: + case EbtFloat: + case EbtDouble: + return true; + default: + break; + } + break; + case EbtInt64: + case EbtUint64: + if (to == EbtDouble) { + return true; + } + break; +#endif + default: + break; + } + return false; +} + +// +// See if the 'from' type is allowed to be implicitly converted to the +// 'to' type. This is not about vector/array/struct, only about basic type. +// +bool TIntermediate::canImplicitlyPromote(TBasicType from, TBasicType to, TOperator op) const +{ + if ((isEsProfile() && version < 310 ) || version == 110) + return false; + + if (from == to) + return true; + + // TODO: Move more policies into language-specific handlers. + // Some languages allow more general (or potentially, more specific) conversions under some conditions. + if (getSource() == EShSourceHlsl) { + const bool fromConvertable = (from == EbtFloat || from == EbtDouble || from == EbtInt || from == EbtUint || from == EbtBool); + const bool toConvertable = (to == EbtFloat || to == EbtDouble || to == EbtInt || to == EbtUint || to == EbtBool); + + if (fromConvertable && toConvertable) { + switch (op) { + case EOpAndAssign: // assignments can perform arbitrary conversions + case EOpInclusiveOrAssign: // ... + case EOpExclusiveOrAssign: // ... + case EOpAssign: // ... + case EOpAddAssign: // ... + case EOpSubAssign: // ... + case EOpMulAssign: // ... + case EOpVectorTimesScalarAssign: // ... + case EOpMatrixTimesScalarAssign: // ... + case EOpDivAssign: // ... + case EOpModAssign: // ... + case EOpReturn: // function returns can also perform arbitrary conversions + case EOpFunctionCall: // conversion of a calling parameter + case EOpLogicalNot: + case EOpLogicalAnd: + case EOpLogicalOr: + case EOpLogicalXor: + case EOpConstructStruct: + return true; + default: + break; + } + } + } + + bool explicitTypesEnabled = extensionRequested(E_GL_EXT_shader_explicit_arithmetic_types) || + extensionRequested(E_GL_EXT_shader_explicit_arithmetic_types_int8) || + extensionRequested(E_GL_EXT_shader_explicit_arithmetic_types_int16) || + extensionRequested(E_GL_EXT_shader_explicit_arithmetic_types_int32) || + extensionRequested(E_GL_EXT_shader_explicit_arithmetic_types_int64) || + extensionRequested(E_GL_EXT_shader_explicit_arithmetic_types_float16) || + extensionRequested(E_GL_EXT_shader_explicit_arithmetic_types_float32) || + extensionRequested(E_GL_EXT_shader_explicit_arithmetic_types_float64); + + if (explicitTypesEnabled) { + // integral promotions + if (isIntegralPromotion(from, to)) { + return true; + } + + // floating-point promotions + if (isFPPromotion(from, to)) { + return true; + } + + // integral conversions + if (isIntegralConversion(from, to)) { + return true; + } + + // floating-point conversions + if (isFPConversion(from, to)) { + return true; + } + + // floating-integral conversions + if (isFPIntegralConversion(from, to)) { + return true; + } + + // hlsl supported conversions + if (getSource() == EShSourceHlsl) { + if (from == EbtBool && (to == EbtInt || to == EbtUint || to == EbtFloat)) + return true; + } + } else if (isEsProfile()) { + switch (to) { + case EbtFloat: + switch (from) { + case EbtInt: + case EbtUint: + return extensionRequested(E_GL_EXT_shader_implicit_conversions); + case EbtFloat: + return true; + default: + return false; + } + case EbtUint: + switch (from) { + case EbtInt: + return extensionRequested(E_GL_EXT_shader_implicit_conversions); + case EbtUint: + return true; + default: + return false; + } + default: + return false; + } + } else { + switch (to) { + case EbtDouble: + switch (from) { + case EbtInt: + case EbtUint: + case EbtInt64: + case EbtUint64: + case EbtFloat: + case EbtDouble: + return version >= 400 || extensionRequested(E_GL_ARB_gpu_shader_fp64); + case EbtInt16: + case EbtUint16: + return (version >= 400 || extensionRequested(E_GL_ARB_gpu_shader_fp64)) && + extensionRequested(E_GL_AMD_gpu_shader_int16); + case EbtFloat16: + return (version >= 400 || extensionRequested(E_GL_ARB_gpu_shader_fp64)) && + extensionRequested(E_GL_AMD_gpu_shader_half_float); + default: + return false; + } + case EbtFloat: + switch (from) { + case EbtInt: + case EbtUint: + case EbtFloat: + return true; + case EbtBool: + return getSource() == EShSourceHlsl; + case EbtInt16: + case EbtUint16: + return extensionRequested(E_GL_AMD_gpu_shader_int16); + case EbtFloat16: + return + extensionRequested(E_GL_AMD_gpu_shader_half_float) || getSource() == EShSourceHlsl; + default: + return false; + } + case EbtUint: + switch (from) { + case EbtInt: + return version >= 400 || getSource() == EShSourceHlsl; + case EbtUint: + return true; + case EbtBool: + return getSource() == EShSourceHlsl; + case EbtInt16: + case EbtUint16: + return extensionRequested(E_GL_AMD_gpu_shader_int16); + default: + return false; + } + case EbtInt: + switch (from) { + case EbtInt: + return true; + case EbtBool: + return getSource() == EShSourceHlsl; + case EbtInt16: + return extensionRequested(E_GL_AMD_gpu_shader_int16); + default: + return false; + } + case EbtUint64: + switch (from) { + case EbtInt: + case EbtUint: + case EbtInt64: + case EbtUint64: + return true; + case EbtInt16: + case EbtUint16: + return extensionRequested(E_GL_AMD_gpu_shader_int16); + default: + return false; + } + case EbtInt64: + switch (from) { + case EbtInt: + case EbtInt64: + return true; + case EbtInt16: + return extensionRequested(E_GL_AMD_gpu_shader_int16); + default: + return false; + } + case EbtFloat16: + switch (from) { + case EbtInt16: + case EbtUint16: + return extensionRequested(E_GL_AMD_gpu_shader_int16); + case EbtFloat16: + return extensionRequested(E_GL_AMD_gpu_shader_half_float); + default: + break; + } + return false; + case EbtUint16: + switch (from) { + case EbtInt16: + case EbtUint16: + return extensionRequested(E_GL_AMD_gpu_shader_int16); + default: + break; + } + return false; + default: + return false; + } + } + + return false; +} + +static bool canSignedIntTypeRepresentAllUnsignedValues(TBasicType sintType, TBasicType uintType) +{ +#ifdef GLSLANG_WEB + return false; +#endif + + switch(sintType) { + case EbtInt8: + switch(uintType) { + case EbtUint8: + case EbtUint16: + case EbtUint: + case EbtUint64: + return false; + default: + assert(false); + return false; + } + break; + case EbtInt16: + switch(uintType) { + case EbtUint8: + return true; + case EbtUint16: + case EbtUint: + case EbtUint64: + return false; + default: + assert(false); + return false; + } + break; + case EbtInt: + switch(uintType) { + case EbtUint8: + case EbtUint16: + return true; + case EbtUint: + return false; + default: + assert(false); + return false; + } + break; + case EbtInt64: + switch(uintType) { + case EbtUint8: + case EbtUint16: + case EbtUint: + return true; + case EbtUint64: + return false; + default: + assert(false); + return false; + } + break; + default: + assert(false); + return false; + } +} + + +static TBasicType getCorrespondingUnsignedType(TBasicType type) +{ +#ifdef GLSLANG_WEB + assert(type == EbtInt); + return EbtUint; +#endif + + switch(type) { + case EbtInt8: + return EbtUint8; + case EbtInt16: + return EbtUint16; + case EbtInt: + return EbtUint; + case EbtInt64: + return EbtUint64; + default: + assert(false); + return EbtNumTypes; + } +} + +// Implements the following rules +// - If either operand has type float64_t or derived from float64_t, +// the other shall be converted to float64_t or derived type. +// - Otherwise, if either operand has type float32_t or derived from +// float32_t, the other shall be converted to float32_t or derived type. +// - Otherwise, if either operand has type float16_t or derived from +// float16_t, the other shall be converted to float16_t or derived type. +// - Otherwise, if both operands have integer types the following rules +// shall be applied to the operands: +// - If both operands have the same type, no further conversion +// is needed. +// - Otherwise, if both operands have signed integer types or both +// have unsigned integer types, the operand with the type of lesser +// integer conversion rank shall be converted to the type of the +// operand with greater rank. +// - Otherwise, if the operand that has unsigned integer type has rank +// greater than or equal to the rank of the type of the other +// operand, the operand with signed integer type shall be converted +// to the type of the operand with unsigned integer type. +// - Otherwise, if the type of the operand with signed integer type can +// represent all of the values of the type of the operand with +// unsigned integer type, the operand with unsigned integer type +// shall be converted to the type of the operand with signed +// integer type. +// - Otherwise, both operands shall be converted to the unsigned +// integer type corresponding to the type of the operand with signed +// integer type. + +std::tuple TIntermediate::getConversionDestinatonType(TBasicType type0, TBasicType type1, TOperator op) const +{ + TBasicType res0 = EbtNumTypes; + TBasicType res1 = EbtNumTypes; + + if ((isEsProfile() && + (version < 310 || !extensionRequested(E_GL_EXT_shader_implicit_conversions))) || + version == 110) + return std::make_tuple(res0, res1); + + if (getSource() == EShSourceHlsl) { + if (canImplicitlyPromote(type1, type0, op)) { + res0 = type0; + res1 = type0; + } else if (canImplicitlyPromote(type0, type1, op)) { + res0 = type1; + res1 = type1; + } + return std::make_tuple(res0, res1); + } + + if ((type0 == EbtDouble && canImplicitlyPromote(type1, EbtDouble, op)) || + (type1 == EbtDouble && canImplicitlyPromote(type0, EbtDouble, op)) ) { + res0 = EbtDouble; + res1 = EbtDouble; + } else if ((type0 == EbtFloat && canImplicitlyPromote(type1, EbtFloat, op)) || + (type1 == EbtFloat && canImplicitlyPromote(type0, EbtFloat, op)) ) { + res0 = EbtFloat; + res1 = EbtFloat; + } else if ((type0 == EbtFloat16 && canImplicitlyPromote(type1, EbtFloat16, op)) || + (type1 == EbtFloat16 && canImplicitlyPromote(type0, EbtFloat16, op)) ) { + res0 = EbtFloat16; + res1 = EbtFloat16; + } else if (isTypeInt(type0) && isTypeInt(type1) && + (canImplicitlyPromote(type0, type1, op) || canImplicitlyPromote(type1, type0, op))) { + if ((isTypeSignedInt(type0) && isTypeSignedInt(type1)) || + (isTypeUnsignedInt(type0) && isTypeUnsignedInt(type1))) { + if (getTypeRank(type0) < getTypeRank(type1)) { + res0 = type1; + res1 = type1; + } else { + res0 = type0; + res1 = type0; + } + } else if (isTypeUnsignedInt(type0) && (getTypeRank(type0) > getTypeRank(type1))) { + res0 = type0; + res1 = type0; + } else if (isTypeUnsignedInt(type1) && (getTypeRank(type1) > getTypeRank(type0))) { + res0 = type1; + res1 = type1; + } else if (isTypeSignedInt(type0)) { + if (canSignedIntTypeRepresentAllUnsignedValues(type0, type1)) { + res0 = type0; + res1 = type0; + } else { + res0 = getCorrespondingUnsignedType(type0); + res1 = getCorrespondingUnsignedType(type0); + } + } else if (isTypeSignedInt(type1)) { + if (canSignedIntTypeRepresentAllUnsignedValues(type1, type0)) { + res0 = type1; + res1 = type1; + } else { + res0 = getCorrespondingUnsignedType(type1); + res1 = getCorrespondingUnsignedType(type1); + } + } + } + + return std::make_tuple(res0, res1); +} + +// +// Given a type, find what operation would fully construct it. +// +TOperator TIntermediate::mapTypeToConstructorOp(const TType& type) const +{ + TOperator op = EOpNull; + + if (type.getQualifier().isNonUniform()) + return EOpConstructNonuniform; + + if (type.isCoopMat()) + return EOpConstructCooperativeMatrix; + + switch (type.getBasicType()) { + case EbtStruct: + op = EOpConstructStruct; + break; + case EbtSampler: + if (type.getSampler().isCombined()) + op = EOpConstructTextureSampler; + break; + case EbtFloat: + if (type.isMatrix()) { + switch (type.getMatrixCols()) { + case 2: + switch (type.getMatrixRows()) { + case 2: op = EOpConstructMat2x2; break; + case 3: op = EOpConstructMat2x3; break; + case 4: op = EOpConstructMat2x4; break; + default: break; // some compilers want this + } + break; + case 3: + switch (type.getMatrixRows()) { + case 2: op = EOpConstructMat3x2; break; + case 3: op = EOpConstructMat3x3; break; + case 4: op = EOpConstructMat3x4; break; + default: break; // some compilers want this + } + break; + case 4: + switch (type.getMatrixRows()) { + case 2: op = EOpConstructMat4x2; break; + case 3: op = EOpConstructMat4x3; break; + case 4: op = EOpConstructMat4x4; break; + default: break; // some compilers want this + } + break; + default: break; // some compilers want this + } + } else { + switch(type.getVectorSize()) { + case 1: op = EOpConstructFloat; break; + case 2: op = EOpConstructVec2; break; + case 3: op = EOpConstructVec3; break; + case 4: op = EOpConstructVec4; break; + default: break; // some compilers want this + } + } + break; + case EbtInt: + if (type.getMatrixCols()) { + switch (type.getMatrixCols()) { + case 2: + switch (type.getMatrixRows()) { + case 2: op = EOpConstructIMat2x2; break; + case 3: op = EOpConstructIMat2x3; break; + case 4: op = EOpConstructIMat2x4; break; + default: break; // some compilers want this + } + break; + case 3: + switch (type.getMatrixRows()) { + case 2: op = EOpConstructIMat3x2; break; + case 3: op = EOpConstructIMat3x3; break; + case 4: op = EOpConstructIMat3x4; break; + default: break; // some compilers want this + } + break; + case 4: + switch (type.getMatrixRows()) { + case 2: op = EOpConstructIMat4x2; break; + case 3: op = EOpConstructIMat4x3; break; + case 4: op = EOpConstructIMat4x4; break; + default: break; // some compilers want this + } + break; + } + } else { + switch(type.getVectorSize()) { + case 1: op = EOpConstructInt; break; + case 2: op = EOpConstructIVec2; break; + case 3: op = EOpConstructIVec3; break; + case 4: op = EOpConstructIVec4; break; + default: break; // some compilers want this + } + } + break; + case EbtUint: + if (type.getMatrixCols()) { + switch (type.getMatrixCols()) { + case 2: + switch (type.getMatrixRows()) { + case 2: op = EOpConstructUMat2x2; break; + case 3: op = EOpConstructUMat2x3; break; + case 4: op = EOpConstructUMat2x4; break; + default: break; // some compilers want this + } + break; + case 3: + switch (type.getMatrixRows()) { + case 2: op = EOpConstructUMat3x2; break; + case 3: op = EOpConstructUMat3x3; break; + case 4: op = EOpConstructUMat3x4; break; + default: break; // some compilers want this + } + break; + case 4: + switch (type.getMatrixRows()) { + case 2: op = EOpConstructUMat4x2; break; + case 3: op = EOpConstructUMat4x3; break; + case 4: op = EOpConstructUMat4x4; break; + default: break; // some compilers want this + } + break; + } + } else { + switch(type.getVectorSize()) { + case 1: op = EOpConstructUint; break; + case 2: op = EOpConstructUVec2; break; + case 3: op = EOpConstructUVec3; break; + case 4: op = EOpConstructUVec4; break; + default: break; // some compilers want this + } + } + break; + case EbtBool: + if (type.getMatrixCols()) { + switch (type.getMatrixCols()) { + case 2: + switch (type.getMatrixRows()) { + case 2: op = EOpConstructBMat2x2; break; + case 3: op = EOpConstructBMat2x3; break; + case 4: op = EOpConstructBMat2x4; break; + default: break; // some compilers want this + } + break; + case 3: + switch (type.getMatrixRows()) { + case 2: op = EOpConstructBMat3x2; break; + case 3: op = EOpConstructBMat3x3; break; + case 4: op = EOpConstructBMat3x4; break; + default: break; // some compilers want this + } + break; + case 4: + switch (type.getMatrixRows()) { + case 2: op = EOpConstructBMat4x2; break; + case 3: op = EOpConstructBMat4x3; break; + case 4: op = EOpConstructBMat4x4; break; + default: break; // some compilers want this + } + break; + } + } else { + switch(type.getVectorSize()) { + case 1: op = EOpConstructBool; break; + case 2: op = EOpConstructBVec2; break; + case 3: op = EOpConstructBVec3; break; + case 4: op = EOpConstructBVec4; break; + default: break; // some compilers want this + } + } + break; +#ifndef GLSLANG_WEB + case EbtDouble: + if (type.getMatrixCols()) { + switch (type.getMatrixCols()) { + case 2: + switch (type.getMatrixRows()) { + case 2: op = EOpConstructDMat2x2; break; + case 3: op = EOpConstructDMat2x3; break; + case 4: op = EOpConstructDMat2x4; break; + default: break; // some compilers want this + } + break; + case 3: + switch (type.getMatrixRows()) { + case 2: op = EOpConstructDMat3x2; break; + case 3: op = EOpConstructDMat3x3; break; + case 4: op = EOpConstructDMat3x4; break; + default: break; // some compilers want this + } + break; + case 4: + switch (type.getMatrixRows()) { + case 2: op = EOpConstructDMat4x2; break; + case 3: op = EOpConstructDMat4x3; break; + case 4: op = EOpConstructDMat4x4; break; + default: break; // some compilers want this + } + break; + } + } else { + switch(type.getVectorSize()) { + case 1: op = EOpConstructDouble; break; + case 2: op = EOpConstructDVec2; break; + case 3: op = EOpConstructDVec3; break; + case 4: op = EOpConstructDVec4; break; + default: break; // some compilers want this + } + } + break; + case EbtFloat16: + if (type.getMatrixCols()) { + switch (type.getMatrixCols()) { + case 2: + switch (type.getMatrixRows()) { + case 2: op = EOpConstructF16Mat2x2; break; + case 3: op = EOpConstructF16Mat2x3; break; + case 4: op = EOpConstructF16Mat2x4; break; + default: break; // some compilers want this + } + break; + case 3: + switch (type.getMatrixRows()) { + case 2: op = EOpConstructF16Mat3x2; break; + case 3: op = EOpConstructF16Mat3x3; break; + case 4: op = EOpConstructF16Mat3x4; break; + default: break; // some compilers want this + } + break; + case 4: + switch (type.getMatrixRows()) { + case 2: op = EOpConstructF16Mat4x2; break; + case 3: op = EOpConstructF16Mat4x3; break; + case 4: op = EOpConstructF16Mat4x4; break; + default: break; // some compilers want this + } + break; + } + } + else { + switch (type.getVectorSize()) { + case 1: op = EOpConstructFloat16; break; + case 2: op = EOpConstructF16Vec2; break; + case 3: op = EOpConstructF16Vec3; break; + case 4: op = EOpConstructF16Vec4; break; + default: break; // some compilers want this + } + } + break; + case EbtInt8: + switch(type.getVectorSize()) { + case 1: op = EOpConstructInt8; break; + case 2: op = EOpConstructI8Vec2; break; + case 3: op = EOpConstructI8Vec3; break; + case 4: op = EOpConstructI8Vec4; break; + default: break; // some compilers want this + } + break; + case EbtUint8: + switch(type.getVectorSize()) { + case 1: op = EOpConstructUint8; break; + case 2: op = EOpConstructU8Vec2; break; + case 3: op = EOpConstructU8Vec3; break; + case 4: op = EOpConstructU8Vec4; break; + default: break; // some compilers want this + } + break; + case EbtInt16: + switch(type.getVectorSize()) { + case 1: op = EOpConstructInt16; break; + case 2: op = EOpConstructI16Vec2; break; + case 3: op = EOpConstructI16Vec3; break; + case 4: op = EOpConstructI16Vec4; break; + default: break; // some compilers want this + } + break; + case EbtUint16: + switch(type.getVectorSize()) { + case 1: op = EOpConstructUint16; break; + case 2: op = EOpConstructU16Vec2; break; + case 3: op = EOpConstructU16Vec3; break; + case 4: op = EOpConstructU16Vec4; break; + default: break; // some compilers want this + } + break; + case EbtInt64: + switch(type.getVectorSize()) { + case 1: op = EOpConstructInt64; break; + case 2: op = EOpConstructI64Vec2; break; + case 3: op = EOpConstructI64Vec3; break; + case 4: op = EOpConstructI64Vec4; break; + default: break; // some compilers want this + } + break; + case EbtUint64: + switch(type.getVectorSize()) { + case 1: op = EOpConstructUint64; break; + case 2: op = EOpConstructU64Vec2; break; + case 3: op = EOpConstructU64Vec3; break; + case 4: op = EOpConstructU64Vec4; break; + default: break; // some compilers want this + } + break; + case EbtReference: + op = EOpConstructReference; + break; +#endif + default: + break; + } + + return op; +} + +// +// Safe way to combine two nodes into an aggregate. Works with null pointers, +// a node that's not a aggregate yet, etc. +// +// Returns the resulting aggregate, unless nullptr was passed in for +// both existing nodes. +// +TIntermAggregate* TIntermediate::growAggregate(TIntermNode* left, TIntermNode* right) +{ + if (left == nullptr && right == nullptr) + return nullptr; + + TIntermAggregate* aggNode = nullptr; + if (left != nullptr) + aggNode = left->getAsAggregate(); + if (aggNode == nullptr || aggNode->getOp() != EOpNull) { + aggNode = new TIntermAggregate; + if (left != nullptr) + aggNode->getSequence().push_back(left); + } + + if (right != nullptr) + aggNode->getSequence().push_back(right); + + return aggNode; +} + +TIntermAggregate* TIntermediate::growAggregate(TIntermNode* left, TIntermNode* right, const TSourceLoc& loc) +{ + TIntermAggregate* aggNode = growAggregate(left, right); + if (aggNode) + aggNode->setLoc(loc); + + return aggNode; +} + +// +// Turn an existing node into an aggregate. +// +// Returns an aggregate, unless nullptr was passed in for the existing node. +// +TIntermAggregate* TIntermediate::makeAggregate(TIntermNode* node) +{ + if (node == nullptr) + return nullptr; + + TIntermAggregate* aggNode = new TIntermAggregate; + aggNode->getSequence().push_back(node); + aggNode->setLoc(node->getLoc()); + + return aggNode; +} + +TIntermAggregate* TIntermediate::makeAggregate(TIntermNode* node, const TSourceLoc& loc) +{ + if (node == nullptr) + return nullptr; + + TIntermAggregate* aggNode = new TIntermAggregate; + aggNode->getSequence().push_back(node); + aggNode->setLoc(loc); + + return aggNode; +} + +// +// Make an aggregate with an empty sequence. +// +TIntermAggregate* TIntermediate::makeAggregate(const TSourceLoc& loc) +{ + TIntermAggregate* aggNode = new TIntermAggregate; + aggNode->setLoc(loc); + + return aggNode; +} + +// +// For "if" test nodes. There are three children; a condition, +// a true path, and a false path. The two paths are in the +// nodePair. +// +// Returns the selection node created. +// +TIntermSelection* TIntermediate::addSelection(TIntermTyped* cond, TIntermNodePair nodePair, const TSourceLoc& loc) +{ + // + // Don't prune the false path for compile-time constants; it's needed + // for static access analysis. + // + + TIntermSelection* node = new TIntermSelection(cond, nodePair.node1, nodePair.node2); + node->setLoc(loc); + + return node; +} + +TIntermTyped* TIntermediate::addComma(TIntermTyped* left, TIntermTyped* right, const TSourceLoc& loc) +{ + // However, the lowest precedence operators of the sequence operator ( , ) and the assignment operators + // ... are not included in the operators that can create a constant expression. + // + // if (left->getType().getQualifier().storage == EvqConst && + // right->getType().getQualifier().storage == EvqConst) { + + // return right; + //} + + TIntermTyped *commaAggregate = growAggregate(left, right, loc); + commaAggregate->getAsAggregate()->setOperator(EOpComma); + commaAggregate->setType(right->getType()); + commaAggregate->getWritableType().getQualifier().makeTemporary(); + + return commaAggregate; +} + +TIntermTyped* TIntermediate::addMethod(TIntermTyped* object, const TType& type, const TString* name, const TSourceLoc& loc) +{ + TIntermMethod* method = new TIntermMethod(object, type, *name); + method->setLoc(loc); + + return method; +} + +// +// For "?:" test nodes. There are three children; a condition, +// a true path, and a false path. The two paths are specified +// as separate parameters. For vector 'cond', the true and false +// are not paths, but vectors to mix. +// +// Specialization constant operations include +// - The ternary operator ( ? : ) +// +// Returns the selection node created, or nullptr if one could not be. +// +TIntermTyped* TIntermediate::addSelection(TIntermTyped* cond, TIntermTyped* trueBlock, TIntermTyped* falseBlock, + const TSourceLoc& loc) +{ + // If it's void, go to the if-then-else selection() + if (trueBlock->getBasicType() == EbtVoid && falseBlock->getBasicType() == EbtVoid) { + TIntermNodePair pair = { trueBlock, falseBlock }; + TIntermSelection* selection = addSelection(cond, pair, loc); + if (getSource() == EShSourceHlsl) + selection->setNoShortCircuit(); + + return selection; + } + + // + // Get compatible types. + // + auto children = addConversion(EOpSequence, trueBlock, falseBlock); + trueBlock = std::get<0>(children); + falseBlock = std::get<1>(children); + + if (trueBlock == nullptr || falseBlock == nullptr) + return nullptr; + + // Handle a vector condition as a mix + if (!cond->getType().isScalarOrVec1()) { + TType targetVectorType(trueBlock->getType().getBasicType(), EvqTemporary, + cond->getType().getVectorSize()); + // smear true/false operands as needed + trueBlock = addUniShapeConversion(EOpMix, targetVectorType, trueBlock); + falseBlock = addUniShapeConversion(EOpMix, targetVectorType, falseBlock); + + // After conversion, types have to match. + if (falseBlock->getType() != trueBlock->getType()) + return nullptr; + + // make the mix operation + TIntermAggregate* mix = makeAggregate(loc); + mix = growAggregate(mix, falseBlock); + mix = growAggregate(mix, trueBlock); + mix = growAggregate(mix, cond); + mix->setType(targetVectorType); + mix->setOp(EOpMix); + + return mix; + } + + // Now have a scalar condition... + + // Convert true and false expressions to matching types + addBiShapeConversion(EOpMix, trueBlock, falseBlock); + + // After conversion, types have to match. + if (falseBlock->getType() != trueBlock->getType()) + return nullptr; + + // Eliminate the selection when the condition is a scalar and all operands are constant. + if (cond->getAsConstantUnion() && trueBlock->getAsConstantUnion() && falseBlock->getAsConstantUnion()) { + if (cond->getAsConstantUnion()->getConstArray()[0].getBConst()) + return trueBlock; + else + return falseBlock; + } + + // + // Make a selection node. + // + TIntermSelection* node = new TIntermSelection(cond, trueBlock, falseBlock, trueBlock->getType()); + node->setLoc(loc); + node->getQualifier().precision = std::max(trueBlock->getQualifier().precision, falseBlock->getQualifier().precision); + + if ((cond->getQualifier().isConstant() && specConstantPropagates(*trueBlock, *falseBlock)) || + (cond->getQualifier().isSpecConstant() && trueBlock->getQualifier().isConstant() && + falseBlock->getQualifier().isConstant())) + node->getQualifier().makeSpecConstant(); + else + node->getQualifier().makeTemporary(); + + if (getSource() == EShSourceHlsl) + node->setNoShortCircuit(); + + return node; +} + +// +// Constant terminal nodes. Has a union that contains bool, float or int constants +// +// Returns the constant union node created. +// + +TIntermConstantUnion* TIntermediate::addConstantUnion(const TConstUnionArray& unionArray, const TType& t, const TSourceLoc& loc, bool literal) const +{ + TIntermConstantUnion* node = new TIntermConstantUnion(unionArray, t); + node->getQualifier().storage = EvqConst; + node->setLoc(loc); + if (literal) + node->setLiteral(); + + return node; +} +TIntermConstantUnion* TIntermediate::addConstantUnion(signed char i8, const TSourceLoc& loc, bool literal) const +{ + TConstUnionArray unionArray(1); + unionArray[0].setI8Const(i8); + + return addConstantUnion(unionArray, TType(EbtInt8, EvqConst), loc, literal); +} + +TIntermConstantUnion* TIntermediate::addConstantUnion(unsigned char u8, const TSourceLoc& loc, bool literal) const +{ + TConstUnionArray unionArray(1); + unionArray[0].setUConst(u8); + + return addConstantUnion(unionArray, TType(EbtUint8, EvqConst), loc, literal); +} + +TIntermConstantUnion* TIntermediate::addConstantUnion(signed short i16, const TSourceLoc& loc, bool literal) const +{ + TConstUnionArray unionArray(1); + unionArray[0].setI16Const(i16); + + return addConstantUnion(unionArray, TType(EbtInt16, EvqConst), loc, literal); +} + +TIntermConstantUnion* TIntermediate::addConstantUnion(unsigned short u16, const TSourceLoc& loc, bool literal) const +{ + TConstUnionArray unionArray(1); + unionArray[0].setU16Const(u16); + + return addConstantUnion(unionArray, TType(EbtUint16, EvqConst), loc, literal); +} + +TIntermConstantUnion* TIntermediate::addConstantUnion(int i, const TSourceLoc& loc, bool literal) const +{ + TConstUnionArray unionArray(1); + unionArray[0].setIConst(i); + + return addConstantUnion(unionArray, TType(EbtInt, EvqConst), loc, literal); +} + +TIntermConstantUnion* TIntermediate::addConstantUnion(unsigned int u, const TSourceLoc& loc, bool literal) const +{ + TConstUnionArray unionArray(1); + unionArray[0].setUConst(u); + + return addConstantUnion(unionArray, TType(EbtUint, EvqConst), loc, literal); +} + +TIntermConstantUnion* TIntermediate::addConstantUnion(long long i64, const TSourceLoc& loc, bool literal) const +{ + TConstUnionArray unionArray(1); + unionArray[0].setI64Const(i64); + + return addConstantUnion(unionArray, TType(EbtInt64, EvqConst), loc, literal); +} + +TIntermConstantUnion* TIntermediate::addConstantUnion(unsigned long long u64, const TSourceLoc& loc, bool literal) const +{ + TConstUnionArray unionArray(1); + unionArray[0].setU64Const(u64); + + return addConstantUnion(unionArray, TType(EbtUint64, EvqConst), loc, literal); +} + +TIntermConstantUnion* TIntermediate::addConstantUnion(bool b, const TSourceLoc& loc, bool literal) const +{ + TConstUnionArray unionArray(1); + unionArray[0].setBConst(b); + + return addConstantUnion(unionArray, TType(EbtBool, EvqConst), loc, literal); +} + +TIntermConstantUnion* TIntermediate::addConstantUnion(double d, TBasicType baseType, const TSourceLoc& loc, bool literal) const +{ + assert(baseType == EbtFloat || baseType == EbtDouble || baseType == EbtFloat16); + + TConstUnionArray unionArray(1); + unionArray[0].setDConst(d); + + return addConstantUnion(unionArray, TType(baseType, EvqConst), loc, literal); +} + +TIntermConstantUnion* TIntermediate::addConstantUnion(const TString* s, const TSourceLoc& loc, bool literal) const +{ + TConstUnionArray unionArray(1); + unionArray[0].setSConst(s); + + return addConstantUnion(unionArray, TType(EbtString, EvqConst), loc, literal); +} + +// Put vector swizzle selectors onto the given sequence +void TIntermediate::pushSelector(TIntermSequence& sequence, const TVectorSelector& selector, const TSourceLoc& loc) +{ + TIntermConstantUnion* constIntNode = addConstantUnion(selector, loc); + sequence.push_back(constIntNode); +} + +// Put matrix swizzle selectors onto the given sequence +void TIntermediate::pushSelector(TIntermSequence& sequence, const TMatrixSelector& selector, const TSourceLoc& loc) +{ + TIntermConstantUnion* constIntNode = addConstantUnion(selector.coord1, loc); + sequence.push_back(constIntNode); + constIntNode = addConstantUnion(selector.coord2, loc); + sequence.push_back(constIntNode); +} + +// Make an aggregate node that has a sequence of all selectors. +template TIntermTyped* TIntermediate::addSwizzle(TSwizzleSelectors& selector, const TSourceLoc& loc); +template TIntermTyped* TIntermediate::addSwizzle(TSwizzleSelectors& selector, const TSourceLoc& loc); +template +TIntermTyped* TIntermediate::addSwizzle(TSwizzleSelectors& selector, const TSourceLoc& loc) +{ + TIntermAggregate* node = new TIntermAggregate(EOpSequence); + + node->setLoc(loc); + TIntermSequence &sequenceVector = node->getSequence(); + + for (int i = 0; i < selector.size(); i++) + pushSelector(sequenceVector, selector[i], loc); + + return node; +} + +// +// Follow the left branches down to the root of an l-value +// expression (just "." and []). +// +// Return the base of the l-value (where following indexing quits working). +// Return nullptr if a chain following dereferences cannot be followed. +// +// 'swizzleOkay' says whether or not it is okay to consider a swizzle +// a valid part of the dereference chain. +// +const TIntermTyped* TIntermediate::findLValueBase(const TIntermTyped* node, bool swizzleOkay) +{ + do { + const TIntermBinary* binary = node->getAsBinaryNode(); + if (binary == nullptr) + return node; + TOperator op = binary->getOp(); + if (op != EOpIndexDirect && op != EOpIndexIndirect && op != EOpIndexDirectStruct && op != EOpVectorSwizzle && op != EOpMatrixSwizzle) + return nullptr; + if (! swizzleOkay) { + if (op == EOpVectorSwizzle || op == EOpMatrixSwizzle) + return nullptr; + if ((op == EOpIndexDirect || op == EOpIndexIndirect) && + (binary->getLeft()->getType().isVector() || binary->getLeft()->getType().isScalar()) && + ! binary->getLeft()->getType().isArray()) + return nullptr; + } + node = node->getAsBinaryNode()->getLeft(); + } while (true); +} + +// +// Create while and do-while loop nodes. +// +TIntermLoop* TIntermediate::addLoop(TIntermNode* body, TIntermTyped* test, TIntermTyped* terminal, bool testFirst, + const TSourceLoc& loc) +{ + TIntermLoop* node = new TIntermLoop(body, test, terminal, testFirst); + node->setLoc(loc); + + return node; +} + +// +// Create a for-loop sequence. +// +TIntermAggregate* TIntermediate::addForLoop(TIntermNode* body, TIntermNode* initializer, TIntermTyped* test, + TIntermTyped* terminal, bool testFirst, const TSourceLoc& loc, TIntermLoop*& node) +{ + node = new TIntermLoop(body, test, terminal, testFirst); + node->setLoc(loc); + + // make a sequence of the initializer and statement, but try to reuse the + // aggregate already created for whatever is in the initializer, if there is one + TIntermAggregate* loopSequence = (initializer == nullptr || + initializer->getAsAggregate() == nullptr) ? makeAggregate(initializer, loc) + : initializer->getAsAggregate(); + if (loopSequence != nullptr && loopSequence->getOp() == EOpSequence) + loopSequence->setOp(EOpNull); + loopSequence = growAggregate(loopSequence, node); + loopSequence->setOperator(EOpSequence); + + return loopSequence; +} + +// +// Add branches. +// +TIntermBranch* TIntermediate::addBranch(TOperator branchOp, const TSourceLoc& loc) +{ + return addBranch(branchOp, nullptr, loc); +} + +TIntermBranch* TIntermediate::addBranch(TOperator branchOp, TIntermTyped* expression, const TSourceLoc& loc) +{ + TIntermBranch* node = new TIntermBranch(branchOp, expression); + node->setLoc(loc); + + return node; +} + +// +// This is to be executed after the final root is put on top by the parsing +// process. +// +bool TIntermediate::postProcess(TIntermNode* root, EShLanguage /*language*/) +{ + if (root == nullptr) + return true; + + // Finish off the top-level sequence + TIntermAggregate* aggRoot = root->getAsAggregate(); + if (aggRoot && aggRoot->getOp() == EOpNull) + aggRoot->setOperator(EOpSequence); + +#ifndef GLSLANG_WEB + // Propagate 'noContraction' label in backward from 'precise' variables. + glslang::PropagateNoContraction(*this); + + switch (textureSamplerTransformMode) { + case EShTexSampTransKeep: + break; + case EShTexSampTransUpgradeTextureRemoveSampler: + performTextureUpgradeAndSamplerRemovalTransformation(root); + break; + case EShTexSampTransCount: + assert(0); + break; + } +#endif + + return true; +} + +void TIntermediate::addSymbolLinkageNodes(TIntermAggregate*& linkage, EShLanguage language, TSymbolTable& symbolTable) +{ + // Add top-level nodes for declarations that must be checked cross + // compilation unit by a linker, yet might not have been referenced + // by the AST. + // + // Almost entirely, translation of symbols is driven by what's present + // in the AST traversal, not by translating the symbol table. + // + // However, there are some special cases: + // - From the specification: "Special built-in inputs gl_VertexID and + // gl_InstanceID are also considered active vertex attributes." + // - Linker-based type mismatch error reporting needs to see all + // uniforms/ins/outs variables and blocks. + // - ftransform() can make gl_Vertex and gl_ModelViewProjectionMatrix active. + // + + // if (ftransformUsed) { + // TODO: 1.1 lowering functionality: track ftransform() usage + // addSymbolLinkageNode(root, symbolTable, "gl_Vertex"); + // addSymbolLinkageNode(root, symbolTable, "gl_ModelViewProjectionMatrix"); + //} + + if (language == EShLangVertex) { + // the names won't be found in the symbol table unless the versions are right, + // so version logic does not need to be repeated here + addSymbolLinkageNode(linkage, symbolTable, "gl_VertexID"); + addSymbolLinkageNode(linkage, symbolTable, "gl_InstanceID"); + } + + // Add a child to the root node for the linker objects + linkage->setOperator(EOpLinkerObjects); + treeRoot = growAggregate(treeRoot, linkage); +} + +// +// Add the given name or symbol to the list of nodes at the end of the tree used +// for link-time checking and external linkage. +// + +void TIntermediate::addSymbolLinkageNode(TIntermAggregate*& linkage, TSymbolTable& symbolTable, const TString& name) +{ + TSymbol* symbol = symbolTable.find(name); + if (symbol) + addSymbolLinkageNode(linkage, *symbol->getAsVariable()); +} + +void TIntermediate::addSymbolLinkageNode(TIntermAggregate*& linkage, const TSymbol& symbol) +{ + const TVariable* variable = symbol.getAsVariable(); + if (! variable) { + // This must be a member of an anonymous block, and we need to add the whole block + const TAnonMember* anon = symbol.getAsAnonMember(); + variable = &anon->getAnonContainer(); + } + TIntermSymbol* node = addSymbol(*variable); + linkage = growAggregate(linkage, node); +} + +// +// Add a caller->callee relationship to the call graph. +// Assumes the strings are unique per signature. +// +void TIntermediate::addToCallGraph(TInfoSink& /*infoSink*/, const TString& caller, const TString& callee) +{ + // Duplicates are okay, but faster to not keep them, and they come grouped by caller, + // as long as new ones are push on the same end we check on for duplicates + for (TGraph::const_iterator call = callGraph.begin(); call != callGraph.end(); ++call) { + if (call->caller != caller) + break; + if (call->callee == callee) + return; + } + + callGraph.push_front(TCall(caller, callee)); +} + +// +// This deletes the tree. +// +void TIntermediate::removeTree() +{ + if (treeRoot) + RemoveAllTreeNodes(treeRoot); +} + +// +// Implement the part of KHR_vulkan_glsl that lists the set of operations +// that can result in a specialization constant operation. +// +// "5.x Specialization Constant Operations" +// +// Only some operations discussed in this section may be applied to a +// specialization constant and still yield a result that is as +// specialization constant. The operations allowed are listed below. +// When a specialization constant is operated on with one of these +// operators and with another constant or specialization constant, the +// result is implicitly a specialization constant. +// +// - int(), uint(), and bool() constructors for type conversions +// from any of the following types to any of the following types: +// * int +// * uint +// * bool +// - vector versions of the above conversion constructors +// - allowed implicit conversions of the above +// - swizzles (e.g., foo.yx) +// - The following when applied to integer or unsigned integer types: +// * unary negative ( - ) +// * binary operations ( + , - , * , / , % ) +// * shift ( <<, >> ) +// * bitwise operations ( & , | , ^ ) +// - The following when applied to integer or unsigned integer scalar types: +// * comparison ( == , != , > , >= , < , <= ) +// - The following when applied to the Boolean scalar type: +// * not ( ! ) +// * logical operations ( && , || , ^^ ) +// * comparison ( == , != )" +// +// This function just handles binary and unary nodes. Construction +// rules are handled in construction paths that are not covered by the unary +// and binary paths, while required conversions will still show up here +// as unary converters in the from a construction operator. +// +bool TIntermediate::isSpecializationOperation(const TIntermOperator& node) const +{ + // The operations resulting in floating point are quite limited + // (However, some floating-point operations result in bool, like ">", + // so are handled later.) + if (node.getType().isFloatingDomain()) { + switch (node.getOp()) { + case EOpIndexDirect: + case EOpIndexIndirect: + case EOpIndexDirectStruct: + case EOpVectorSwizzle: + case EOpConvFloatToDouble: + case EOpConvDoubleToFloat: + case EOpConvFloat16ToFloat: + case EOpConvFloatToFloat16: + case EOpConvFloat16ToDouble: + case EOpConvDoubleToFloat16: + return true; + default: + return false; + } + } + + // Check for floating-point arguments + if (const TIntermBinary* bin = node.getAsBinaryNode()) + if (bin->getLeft() ->getType().isFloatingDomain() || + bin->getRight()->getType().isFloatingDomain()) + return false; + + // So, for now, we can assume everything left is non-floating-point... + + // Now check for integer/bool-based operations + switch (node.getOp()) { + + // dereference/swizzle + case EOpIndexDirect: + case EOpIndexIndirect: + case EOpIndexDirectStruct: + case EOpVectorSwizzle: + + // (u)int* -> bool + case EOpConvInt8ToBool: + case EOpConvInt16ToBool: + case EOpConvIntToBool: + case EOpConvInt64ToBool: + case EOpConvUint8ToBool: + case EOpConvUint16ToBool: + case EOpConvUintToBool: + case EOpConvUint64ToBool: + + // bool -> (u)int* + case EOpConvBoolToInt8: + case EOpConvBoolToInt16: + case EOpConvBoolToInt: + case EOpConvBoolToInt64: + case EOpConvBoolToUint8: + case EOpConvBoolToUint16: + case EOpConvBoolToUint: + case EOpConvBoolToUint64: + + // int8_t -> (u)int* + case EOpConvInt8ToInt16: + case EOpConvInt8ToInt: + case EOpConvInt8ToInt64: + case EOpConvInt8ToUint8: + case EOpConvInt8ToUint16: + case EOpConvInt8ToUint: + case EOpConvInt8ToUint64: + + // int16_t -> (u)int* + case EOpConvInt16ToInt8: + case EOpConvInt16ToInt: + case EOpConvInt16ToInt64: + case EOpConvInt16ToUint8: + case EOpConvInt16ToUint16: + case EOpConvInt16ToUint: + case EOpConvInt16ToUint64: + + // int32_t -> (u)int* + case EOpConvIntToInt8: + case EOpConvIntToInt16: + case EOpConvIntToInt64: + case EOpConvIntToUint8: + case EOpConvIntToUint16: + case EOpConvIntToUint: + case EOpConvIntToUint64: + + // int64_t -> (u)int* + case EOpConvInt64ToInt8: + case EOpConvInt64ToInt16: + case EOpConvInt64ToInt: + case EOpConvInt64ToUint8: + case EOpConvInt64ToUint16: + case EOpConvInt64ToUint: + case EOpConvInt64ToUint64: + + // uint8_t -> (u)int* + case EOpConvUint8ToInt8: + case EOpConvUint8ToInt16: + case EOpConvUint8ToInt: + case EOpConvUint8ToInt64: + case EOpConvUint8ToUint16: + case EOpConvUint8ToUint: + case EOpConvUint8ToUint64: + + // uint16_t -> (u)int* + case EOpConvUint16ToInt8: + case EOpConvUint16ToInt16: + case EOpConvUint16ToInt: + case EOpConvUint16ToInt64: + case EOpConvUint16ToUint8: + case EOpConvUint16ToUint: + case EOpConvUint16ToUint64: + + // uint32_t -> (u)int* + case EOpConvUintToInt8: + case EOpConvUintToInt16: + case EOpConvUintToInt: + case EOpConvUintToInt64: + case EOpConvUintToUint8: + case EOpConvUintToUint16: + case EOpConvUintToUint64: + + // uint64_t -> (u)int* + case EOpConvUint64ToInt8: + case EOpConvUint64ToInt16: + case EOpConvUint64ToInt: + case EOpConvUint64ToInt64: + case EOpConvUint64ToUint8: + case EOpConvUint64ToUint16: + case EOpConvUint64ToUint: + + // unary operations + case EOpNegative: + case EOpLogicalNot: + case EOpBitwiseNot: + + // binary operations + case EOpAdd: + case EOpSub: + case EOpMul: + case EOpVectorTimesScalar: + case EOpDiv: + case EOpMod: + case EOpRightShift: + case EOpLeftShift: + case EOpAnd: + case EOpInclusiveOr: + case EOpExclusiveOr: + case EOpLogicalOr: + case EOpLogicalXor: + case EOpLogicalAnd: + case EOpEqual: + case EOpNotEqual: + case EOpLessThan: + case EOpGreaterThan: + case EOpLessThanEqual: + case EOpGreaterThanEqual: + return true; + default: + return false; + } +} + +// Is the operation one that must propagate nonuniform? +bool TIntermediate::isNonuniformPropagating(TOperator op) const +{ + // "* All Operators in Section 5.1 (Operators), except for assignment, + // arithmetic assignment, and sequence + // * Component selection in Section 5.5 + // * Matrix components in Section 5.6 + // * Structure and Array Operations in Section 5.7, except for the length + // method." + switch (op) { + case EOpPostIncrement: + case EOpPostDecrement: + case EOpPreIncrement: + case EOpPreDecrement: + + case EOpNegative: + case EOpLogicalNot: + case EOpVectorLogicalNot: + case EOpBitwiseNot: + + case EOpAdd: + case EOpSub: + case EOpMul: + case EOpDiv: + case EOpMod: + case EOpRightShift: + case EOpLeftShift: + case EOpAnd: + case EOpInclusiveOr: + case EOpExclusiveOr: + case EOpEqual: + case EOpNotEqual: + case EOpLessThan: + case EOpGreaterThan: + case EOpLessThanEqual: + case EOpGreaterThanEqual: + case EOpVectorTimesScalar: + case EOpVectorTimesMatrix: + case EOpMatrixTimesVector: + case EOpMatrixTimesScalar: + + case EOpLogicalOr: + case EOpLogicalXor: + case EOpLogicalAnd: + + case EOpIndexDirect: + case EOpIndexIndirect: + case EOpIndexDirectStruct: + case EOpVectorSwizzle: + return true; + + default: + break; + } + + return false; +} + +//////////////////////////////////////////////////////////////// +// +// Member functions of the nodes used for building the tree. +// +//////////////////////////////////////////////////////////////// + +// +// Say whether or not an operation node changes the value of a variable. +// +// Returns true if state is modified. +// +bool TIntermOperator::modifiesState() const +{ + switch (op) { + case EOpPostIncrement: + case EOpPostDecrement: + case EOpPreIncrement: + case EOpPreDecrement: + case EOpAssign: + case EOpAddAssign: + case EOpSubAssign: + case EOpMulAssign: + case EOpVectorTimesMatrixAssign: + case EOpVectorTimesScalarAssign: + case EOpMatrixTimesScalarAssign: + case EOpMatrixTimesMatrixAssign: + case EOpDivAssign: + case EOpModAssign: + case EOpAndAssign: + case EOpInclusiveOrAssign: + case EOpExclusiveOrAssign: + case EOpLeftShiftAssign: + case EOpRightShiftAssign: + return true; + default: + return false; + } +} + +// +// returns true if the operator is for one of the constructors +// +bool TIntermOperator::isConstructor() const +{ + return op > EOpConstructGuardStart && op < EOpConstructGuardEnd; +} + +// +// Make sure the type of an operator is appropriate for its +// combination of operation and operand type. This will invoke +// promoteUnary, promoteBinary, etc as needed. +// +// Returns false if nothing makes sense. +// +bool TIntermediate::promote(TIntermOperator* node) +{ + if (node == nullptr) + return false; + + if (node->getAsUnaryNode()) + return promoteUnary(*node->getAsUnaryNode()); + + if (node->getAsBinaryNode()) + return promoteBinary(*node->getAsBinaryNode()); + + if (node->getAsAggregate()) + return promoteAggregate(*node->getAsAggregate()); + + return false; +} + +// +// See TIntermediate::promote +// +bool TIntermediate::promoteUnary(TIntermUnary& node) +{ + const TOperator op = node.getOp(); + TIntermTyped* operand = node.getOperand(); + + switch (op) { + case EOpLogicalNot: + // Convert operand to a boolean type + if (operand->getBasicType() != EbtBool) { + // Add constructor to boolean type. If that fails, we can't do it, so return false. + TIntermTyped* converted = addConversion(op, TType(EbtBool), operand); + if (converted == nullptr) + return false; + + // Use the result of converting the node to a bool. + node.setOperand(operand = converted); // also updates stack variable + } + break; + case EOpBitwiseNot: + if (!isTypeInt(operand->getBasicType())) + return false; + break; + case EOpNegative: + case EOpPostIncrement: + case EOpPostDecrement: + case EOpPreIncrement: + case EOpPreDecrement: + if (!isTypeInt(operand->getBasicType()) && + operand->getBasicType() != EbtFloat && + operand->getBasicType() != EbtFloat16 && + operand->getBasicType() != EbtDouble) + + return false; + break; + default: + // HLSL uses this path for initial function signature finding for built-ins + // taking a single argument, which generally don't participate in + // operator-based type promotion (type conversion will occur later). + // For now, scalar argument cases are relying on the setType() call below. + if (getSource() == EShSourceHlsl) + break; + + // GLSL only allows integer arguments for the cases identified above in the + // case statements. + if (operand->getBasicType() != EbtFloat) + return false; + } + + node.setType(operand->getType()); + node.getWritableType().getQualifier().makeTemporary(); + + return true; +} + +void TIntermUnary::updatePrecision() +{ + if (getBasicType() == EbtInt || getBasicType() == EbtUint || getBasicType() == EbtFloat || getBasicType() == EbtFloat16) { + if (operand->getQualifier().precision > getQualifier().precision) + getQualifier().precision = operand->getQualifier().precision; + } +} + +// +// See TIntermediate::promote +// +bool TIntermediate::promoteBinary(TIntermBinary& node) +{ + TOperator op = node.getOp(); + TIntermTyped* left = node.getLeft(); + TIntermTyped* right = node.getRight(); + + // Arrays and structures have to be exact matches. + if ((left->isArray() || right->isArray() || left->getBasicType() == EbtStruct || right->getBasicType() == EbtStruct) + && left->getType() != right->getType()) + return false; + + // Base assumption: just make the type the same as the left + // operand. Only deviations from this will be coded. + node.setType(left->getType()); + node.getWritableType().getQualifier().clear(); + + // Composite and opaque types don't having pending operator changes, e.g., + // array, structure, and samplers. Just establish final type and correctness. + if (left->isArray() || left->getBasicType() == EbtStruct || left->getBasicType() == EbtSampler) { + switch (op) { + case EOpEqual: + case EOpNotEqual: + if (left->getBasicType() == EbtSampler) { + // can't compare samplers + return false; + } else { + // Promote to conditional + node.setType(TType(EbtBool)); + } + + return true; + + case EOpAssign: + // Keep type from above + + return true; + + default: + return false; + } + } + + // + // We now have only scalars, vectors, and matrices to worry about. + // + + // HLSL implicitly promotes bool -> int for numeric operations. + // (Implicit conversions to make the operands match each other's types were already done.) + if (getSource() == EShSourceHlsl && + (left->getBasicType() == EbtBool || right->getBasicType() == EbtBool)) { + switch (op) { + case EOpLessThan: + case EOpGreaterThan: + case EOpLessThanEqual: + case EOpGreaterThanEqual: + + case EOpRightShift: + case EOpLeftShift: + + case EOpMod: + + case EOpAnd: + case EOpInclusiveOr: + case EOpExclusiveOr: + + case EOpAdd: + case EOpSub: + case EOpDiv: + case EOpMul: + if (left->getBasicType() == EbtBool) + left = createConversion(EbtInt, left); + if (right->getBasicType() == EbtBool) + right = createConversion(EbtInt, right); + if (left == nullptr || right == nullptr) + return false; + node.setLeft(left); + node.setRight(right); + + // Update the original base assumption on result type.. + node.setType(left->getType()); + node.getWritableType().getQualifier().clear(); + + break; + + default: + break; + } + } + + // Do general type checks against individual operands (comparing left and right is coming up, checking mixed shapes after that) + switch (op) { + case EOpLessThan: + case EOpGreaterThan: + case EOpLessThanEqual: + case EOpGreaterThanEqual: + // Relational comparisons need numeric types and will promote to scalar Boolean. + if (left->getBasicType() == EbtBool) + return false; + + node.setType(TType(EbtBool, EvqTemporary, left->getVectorSize())); + break; + + case EOpEqual: + case EOpNotEqual: + if (getSource() == EShSourceHlsl) { + const int resultWidth = std::max(left->getVectorSize(), right->getVectorSize()); + + // In HLSL, == or != on vectors means component-wise comparison. + if (resultWidth > 1) { + op = (op == EOpEqual) ? EOpVectorEqual : EOpVectorNotEqual; + node.setOp(op); + } + + node.setType(TType(EbtBool, EvqTemporary, resultWidth)); + } else { + // All the above comparisons result in a bool (but not the vector compares) + node.setType(TType(EbtBool)); + } + break; + + case EOpLogicalAnd: + case EOpLogicalOr: + case EOpLogicalXor: + // logical ops operate only on Booleans or vectors of Booleans. + if (left->getBasicType() != EbtBool || left->isMatrix()) + return false; + + if (getSource() == EShSourceGlsl) { + // logical ops operate only on scalar Booleans and will promote to scalar Boolean. + if (left->isVector()) + return false; + } + + node.setType(TType(EbtBool, EvqTemporary, left->getVectorSize())); + break; + + case EOpRightShift: + case EOpLeftShift: + case EOpRightShiftAssign: + case EOpLeftShiftAssign: + + case EOpMod: + case EOpModAssign: + + case EOpAnd: + case EOpInclusiveOr: + case EOpExclusiveOr: + case EOpAndAssign: + case EOpInclusiveOrAssign: + case EOpExclusiveOrAssign: + if (getSource() == EShSourceHlsl) + break; + + // Check for integer-only operands. + if (!isTypeInt(left->getBasicType()) && !isTypeInt(right->getBasicType())) + return false; + if (left->isMatrix() || right->isMatrix()) + return false; + + break; + + case EOpAdd: + case EOpSub: + case EOpDiv: + case EOpMul: + case EOpAddAssign: + case EOpSubAssign: + case EOpMulAssign: + case EOpDivAssign: + // check for non-Boolean operands + if (left->getBasicType() == EbtBool || right->getBasicType() == EbtBool) + return false; + + default: + break; + } + + // Compare left and right, and finish with the cases where the operand types must match + switch (op) { + case EOpLessThan: + case EOpGreaterThan: + case EOpLessThanEqual: + case EOpGreaterThanEqual: + + case EOpEqual: + case EOpNotEqual: + case EOpVectorEqual: + case EOpVectorNotEqual: + + case EOpLogicalAnd: + case EOpLogicalOr: + case EOpLogicalXor: + return left->getType() == right->getType(); + + case EOpMod: + case EOpModAssign: + + case EOpAnd: + case EOpInclusiveOr: + case EOpExclusiveOr: + case EOpAndAssign: + case EOpInclusiveOrAssign: + case EOpExclusiveOrAssign: + + case EOpAdd: + case EOpSub: + case EOpDiv: + + case EOpAddAssign: + case EOpSubAssign: + case EOpDivAssign: + // Quick out in case the types do match + if (left->getType() == right->getType()) + return true; + + // Fall through + + case EOpMul: + case EOpMulAssign: + // At least the basic type has to match + if (left->getBasicType() != right->getBasicType()) + return false; + + default: + break; + } + + if (left->getType().isCoopMat() || right->getType().isCoopMat()) { + if (left->getType().isCoopMat() && right->getType().isCoopMat() && + *left->getType().getTypeParameters() != *right->getType().getTypeParameters()) { + return false; + } + switch (op) { + case EOpMul: + case EOpMulAssign: + if (left->getType().isCoopMat() && right->getType().isCoopMat()) { + return false; + } + if (op == EOpMulAssign && right->getType().isCoopMat()) { + return false; + } + node.setOp(op == EOpMulAssign ? EOpMatrixTimesScalarAssign : EOpMatrixTimesScalar); + if (right->getType().isCoopMat()) { + node.setType(right->getType()); + } + return true; + case EOpAdd: + case EOpSub: + case EOpDiv: + case EOpAssign: + // These require both to be cooperative matrices + if (!left->getType().isCoopMat() || !right->getType().isCoopMat()) { + return false; + } + return true; + default: + break; + } + return false; + } + + // Finish handling the case, for all ops, where both operands are scalars. + if (left->isScalar() && right->isScalar()) + return true; + + // Finish handling the case, for all ops, where there are two vectors of different sizes + if (left->isVector() && right->isVector() && left->getVectorSize() != right->getVectorSize() && right->getVectorSize() > 1) + return false; + + // + // We now have a mix of scalars, vectors, or matrices, for non-relational operations. + // + + // Can these two operands be combined, what is the resulting type? + TBasicType basicType = left->getBasicType(); + switch (op) { + case EOpMul: + if (!left->isMatrix() && right->isMatrix()) { + if (left->isVector()) { + if (left->getVectorSize() != right->getMatrixRows()) + return false; + node.setOp(op = EOpVectorTimesMatrix); + node.setType(TType(basicType, EvqTemporary, right->getMatrixCols())); + } else { + node.setOp(op = EOpMatrixTimesScalar); + node.setType(TType(basicType, EvqTemporary, 0, right->getMatrixCols(), right->getMatrixRows())); + } + } else if (left->isMatrix() && !right->isMatrix()) { + if (right->isVector()) { + if (left->getMatrixCols() != right->getVectorSize()) + return false; + node.setOp(op = EOpMatrixTimesVector); + node.setType(TType(basicType, EvqTemporary, left->getMatrixRows())); + } else { + node.setOp(op = EOpMatrixTimesScalar); + } + } else if (left->isMatrix() && right->isMatrix()) { + if (left->getMatrixCols() != right->getMatrixRows()) + return false; + node.setOp(op = EOpMatrixTimesMatrix); + node.setType(TType(basicType, EvqTemporary, 0, right->getMatrixCols(), left->getMatrixRows())); + } else if (! left->isMatrix() && ! right->isMatrix()) { + if (left->isVector() && right->isVector()) { + ; // leave as component product + } else if (left->isVector() || right->isVector()) { + node.setOp(op = EOpVectorTimesScalar); + if (right->isVector()) + node.setType(TType(basicType, EvqTemporary, right->getVectorSize())); + } + } else { + return false; + } + break; + case EOpMulAssign: + if (! left->isMatrix() && right->isMatrix()) { + if (left->isVector()) { + if (left->getVectorSize() != right->getMatrixRows() || left->getVectorSize() != right->getMatrixCols()) + return false; + node.setOp(op = EOpVectorTimesMatrixAssign); + } else { + return false; + } + } else if (left->isMatrix() && !right->isMatrix()) { + if (right->isVector()) { + return false; + } else { + node.setOp(op = EOpMatrixTimesScalarAssign); + } + } else if (left->isMatrix() && right->isMatrix()) { + if (left->getMatrixCols() != right->getMatrixCols() || left->getMatrixCols() != right->getMatrixRows()) + return false; + node.setOp(op = EOpMatrixTimesMatrixAssign); + } else if (!left->isMatrix() && !right->isMatrix()) { + if (left->isVector() && right->isVector()) { + // leave as component product + } else if (left->isVector() || right->isVector()) { + if (! left->isVector()) + return false; + node.setOp(op = EOpVectorTimesScalarAssign); + } + } else { + return false; + } + break; + + case EOpRightShift: + case EOpLeftShift: + case EOpRightShiftAssign: + case EOpLeftShiftAssign: + if (right->isVector() && (! left->isVector() || right->getVectorSize() != left->getVectorSize())) + return false; + break; + + case EOpAssign: + if (left->getVectorSize() != right->getVectorSize() || left->getMatrixCols() != right->getMatrixCols() || left->getMatrixRows() != right->getMatrixRows()) + return false; + // fall through + + case EOpAdd: + case EOpSub: + case EOpDiv: + case EOpMod: + case EOpAnd: + case EOpInclusiveOr: + case EOpExclusiveOr: + case EOpAddAssign: + case EOpSubAssign: + case EOpDivAssign: + case EOpModAssign: + case EOpAndAssign: + case EOpInclusiveOrAssign: + case EOpExclusiveOrAssign: + + if ((left->isMatrix() && right->isVector()) || + (left->isVector() && right->isMatrix()) || + left->getBasicType() != right->getBasicType()) + return false; + if (left->isMatrix() && right->isMatrix() && (left->getMatrixCols() != right->getMatrixCols() || left->getMatrixRows() != right->getMatrixRows())) + return false; + if (left->isVector() && right->isVector() && left->getVectorSize() != right->getVectorSize()) + return false; + if (right->isVector() || right->isMatrix()) { + node.getWritableType().shallowCopy(right->getType()); + node.getWritableType().getQualifier().makeTemporary(); + } + break; + + default: + return false; + } + + // + // One more check for assignment. + // + switch (op) { + // The resulting type has to match the left operand. + case EOpAssign: + case EOpAddAssign: + case EOpSubAssign: + case EOpMulAssign: + case EOpDivAssign: + case EOpModAssign: + case EOpAndAssign: + case EOpInclusiveOrAssign: + case EOpExclusiveOrAssign: + case EOpLeftShiftAssign: + case EOpRightShiftAssign: + if (node.getType() != left->getType()) + return false; + break; + default: + break; + } + + return true; +} + +// +// See TIntermediate::promote +// +bool TIntermediate::promoteAggregate(TIntermAggregate& node) +{ + TOperator op = node.getOp(); + TIntermSequence& args = node.getSequence(); + const int numArgs = static_cast(args.size()); + + // Presently, only hlsl does intrinsic promotions. + if (getSource() != EShSourceHlsl) + return true; + + // set of opcodes that can be promoted in this manner. + switch (op) { + case EOpAtan: + case EOpClamp: + case EOpCross: + case EOpDistance: + case EOpDot: + case EOpDst: + case EOpFaceForward: + // case EOpFindMSB: TODO: + // case EOpFindLSB: TODO: + case EOpFma: + case EOpMod: + case EOpFrexp: + case EOpLdexp: + case EOpMix: + case EOpLit: + case EOpMax: + case EOpMin: + case EOpModf: + // case EOpGenMul: TODO: + case EOpPow: + case EOpReflect: + case EOpRefract: + // case EOpSinCos: TODO: + case EOpSmoothStep: + case EOpStep: + break; + default: + return true; + } + + // TODO: array and struct behavior + + // Try converting all nodes to the given node's type + TIntermSequence convertedArgs(numArgs, nullptr); + + // Try to convert all types to the nonConvArg type. + for (int nonConvArg = 0; nonConvArg < numArgs; ++nonConvArg) { + // Try converting all args to this arg's type + for (int convArg = 0; convArg < numArgs; ++convArg) { + convertedArgs[convArg] = addConversion(op, args[nonConvArg]->getAsTyped()->getType(), + args[convArg]->getAsTyped()); + } + + // If we successfully converted all the args, use the result. + if (std::all_of(convertedArgs.begin(), convertedArgs.end(), + [](const TIntermNode* node) { return node != nullptr; })) { + + std::swap(args, convertedArgs); + return true; + } + } + + return false; +} + +void TIntermBinary::updatePrecision() +{ + if (getBasicType() == EbtInt || getBasicType() == EbtUint || getBasicType() == EbtFloat || getBasicType() == EbtFloat16) { + getQualifier().precision = std::max(right->getQualifier().precision, left->getQualifier().precision); + if (getQualifier().precision != EpqNone) { + left->propagatePrecision(getQualifier().precision); + right->propagatePrecision(getQualifier().precision); + } + } +} + +void TIntermTyped::propagatePrecision(TPrecisionQualifier newPrecision) +{ + if (getQualifier().precision != EpqNone || (getBasicType() != EbtInt && getBasicType() != EbtUint && getBasicType() != EbtFloat && getBasicType() != EbtFloat16)) + return; + + getQualifier().precision = newPrecision; + + TIntermBinary* binaryNode = getAsBinaryNode(); + if (binaryNode) { + binaryNode->getLeft()->propagatePrecision(newPrecision); + binaryNode->getRight()->propagatePrecision(newPrecision); + + return; + } + + TIntermUnary* unaryNode = getAsUnaryNode(); + if (unaryNode) { + unaryNode->getOperand()->propagatePrecision(newPrecision); + + return; + } + + TIntermAggregate* aggregateNode = getAsAggregate(); + if (aggregateNode) { + TIntermSequence operands = aggregateNode->getSequence(); + for (unsigned int i = 0; i < operands.size(); ++i) { + TIntermTyped* typedNode = operands[i]->getAsTyped(); + if (! typedNode) + break; + typedNode->propagatePrecision(newPrecision); + } + + return; + } + + TIntermSelection* selectionNode = getAsSelectionNode(); + if (selectionNode) { + TIntermTyped* typedNode = selectionNode->getTrueBlock()->getAsTyped(); + if (typedNode) { + typedNode->propagatePrecision(newPrecision); + typedNode = selectionNode->getFalseBlock()->getAsTyped(); + if (typedNode) + typedNode->propagatePrecision(newPrecision); + } + + return; + } +} + +TIntermTyped* TIntermediate::promoteConstantUnion(TBasicType promoteTo, TIntermConstantUnion* node) const +{ + const TConstUnionArray& rightUnionArray = node->getConstArray(); + int size = node->getType().computeNumComponents(); + + TConstUnionArray leftUnionArray(size); + + for (int i=0; i < size; i++) { + +#define PROMOTE(Set, CType, Get) leftUnionArray[i].Set(static_cast(rightUnionArray[i].Get())) +#define PROMOTE_TO_BOOL(Get) leftUnionArray[i].setBConst(rightUnionArray[i].Get() != 0) + +#ifdef GLSLANG_WEB +#define TO_ALL(Get) \ + switch (promoteTo) { \ + case EbtFloat: PROMOTE(setDConst, double, Get); break; \ + case EbtInt: PROMOTE(setIConst, int, Get); break; \ + case EbtUint: PROMOTE(setUConst, unsigned int, Get); break; \ + case EbtBool: PROMOTE_TO_BOOL(Get); break; \ + default: return node; \ + } +#else +#define TO_ALL(Get) \ + switch (promoteTo) { \ + case EbtFloat16: PROMOTE(setDConst, double, Get); break; \ + case EbtFloat: PROMOTE(setDConst, double, Get); break; \ + case EbtDouble: PROMOTE(setDConst, double, Get); break; \ + case EbtInt8: PROMOTE(setI8Const, char, Get); break; \ + case EbtInt16: PROMOTE(setI16Const, short, Get); break; \ + case EbtInt: PROMOTE(setIConst, int, Get); break; \ + case EbtInt64: PROMOTE(setI64Const, long long, Get); break; \ + case EbtUint8: PROMOTE(setU8Const, unsigned char, Get); break; \ + case EbtUint16: PROMOTE(setU16Const, unsigned short, Get); break; \ + case EbtUint: PROMOTE(setUConst, unsigned int, Get); break; \ + case EbtUint64: PROMOTE(setU64Const, unsigned long long, Get); break; \ + case EbtBool: PROMOTE_TO_BOOL(Get); break; \ + default: return node; \ + } +#endif + + switch (node->getType().getBasicType()) { + case EbtFloat: TO_ALL(getDConst); break; + case EbtInt: TO_ALL(getIConst); break; + case EbtUint: TO_ALL(getUConst); break; + case EbtBool: TO_ALL(getBConst); break; +#ifndef GLSLANG_WEB + case EbtFloat16: TO_ALL(getDConst); break; + case EbtDouble: TO_ALL(getDConst); break; + case EbtInt8: TO_ALL(getI8Const); break; + case EbtInt16: TO_ALL(getI16Const); break; + case EbtInt64: TO_ALL(getI64Const); break; + case EbtUint8: TO_ALL(getU8Const); break; + case EbtUint16: TO_ALL(getU16Const); break; + case EbtUint64: TO_ALL(getU64Const); break; +#endif + default: return node; + } + } + + const TType& t = node->getType(); + + return addConstantUnion(leftUnionArray, TType(promoteTo, t.getQualifier().storage, t.getVectorSize(), t.getMatrixCols(), t.getMatrixRows()), + node->getLoc()); +} + +void TIntermAggregate::setPragmaTable(const TPragmaTable& pTable) +{ + assert(pragmaTable == nullptr); + pragmaTable = new TPragmaTable; + *pragmaTable = pTable; +} + +// If either node is a specialization constant, while the other is +// a constant (or specialization constant), the result is still +// a specialization constant. +bool TIntermediate::specConstantPropagates(const TIntermTyped& node1, const TIntermTyped& node2) +{ + return (node1.getType().getQualifier().isSpecConstant() && node2.getType().getQualifier().isConstant()) || + (node2.getType().getQualifier().isSpecConstant() && node1.getType().getQualifier().isConstant()); +} + +struct TextureUpgradeAndSamplerRemovalTransform : public TIntermTraverser { + void visitSymbol(TIntermSymbol* symbol) override { + if (symbol->getBasicType() == EbtSampler && symbol->getType().getSampler().isTexture()) { + symbol->getWritableType().getSampler().setCombined(true); + } + } + bool visitAggregate(TVisit, TIntermAggregate* ag) override { + using namespace std; + TIntermSequence& seq = ag->getSequence(); + TQualifierList& qual = ag->getQualifierList(); + + // qual and seq are indexed using the same indices, so we have to modify both in lock-step + assert(seq.size() == qual.size() || qual.empty()); + + size_t write = 0; + for (size_t i = 0; i < seq.size(); ++i) { + TIntermSymbol* symbol = seq[i]->getAsSymbolNode(); + if (symbol && symbol->getBasicType() == EbtSampler && symbol->getType().getSampler().isPureSampler()) { + // remove pure sampler variables + continue; + } + + TIntermNode* result = seq[i]; + + // replace constructors with sampler/textures + TIntermAggregate *constructor = seq[i]->getAsAggregate(); + if (constructor && constructor->getOp() == EOpConstructTextureSampler) { + if (!constructor->getSequence().empty()) + result = constructor->getSequence()[0]; + } + + // write new node & qualifier + seq[write] = result; + if (!qual.empty()) + qual[write] = qual[i]; + write++; + } + + seq.resize(write); + if (!qual.empty()) + qual.resize(write); + + return true; + } +}; + +void TIntermediate::performTextureUpgradeAndSamplerRemovalTransformation(TIntermNode* root) +{ + TextureUpgradeAndSamplerRemovalTransform transform; + root->traverse(&transform); +} + +const char* TIntermediate::getResourceName(TResourceType res) +{ + switch (res) { + case EResSampler: return "shift-sampler-binding"; + case EResTexture: return "shift-texture-binding"; + case EResImage: return "shift-image-binding"; + case EResUbo: return "shift-UBO-binding"; + case EResSsbo: return "shift-ssbo-binding"; + case EResUav: return "shift-uav-binding"; + default: + assert(0); // internal error: should only be called with valid resource types. + return nullptr; + } +} + + +} // end namespace glslang diff --git a/dep/glslang/glslang/MachineIndependent/LiveTraverser.h b/dep/glslang/glslang/MachineIndependent/LiveTraverser.h new file mode 100644 index 000000000..7333bc964 --- /dev/null +++ b/dep/glslang/glslang/MachineIndependent/LiveTraverser.h @@ -0,0 +1,138 @@ +// +// Copyright (C) 2016 LunarG, Inc. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// +// Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// +// Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// Neither the name of 3Dlabs Inc. Ltd. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// + +#pragma once + +#include "../Include/Common.h" +#include "reflection.h" +#include "localintermediate.h" + +#include "gl_types.h" + +#include +#include + +namespace glslang { + +// +// The traverser: mostly pass through, except +// - processing function-call nodes to push live functions onto the stack of functions to process +// - processing selection nodes to trim semantically dead code +// +// This is in the glslang namespace directly so it can be a friend of TReflection. +// This can be derived from to implement reflection database traversers or +// binding mappers: anything that wants to traverse the live subset of the tree. +// + +class TLiveTraverser : public TIntermTraverser { +public: + TLiveTraverser(const TIntermediate& i, bool traverseAll = false, + bool preVisit = true, bool inVisit = false, bool postVisit = false) : + TIntermTraverser(preVisit, inVisit, postVisit), + intermediate(i), traverseAll(traverseAll) + { } + + // + // Given a function name, find its subroot in the tree, and push it onto the stack of + // functions left to process. + // + void pushFunction(const TString& name) + { + TIntermSequence& globals = intermediate.getTreeRoot()->getAsAggregate()->getSequence(); + for (unsigned int f = 0; f < globals.size(); ++f) { + TIntermAggregate* candidate = globals[f]->getAsAggregate(); + if (candidate && candidate->getOp() == EOpFunction && candidate->getName() == name) { + functions.push_back(candidate); + break; + } + } + } + + typedef std::list TFunctionStack; + TFunctionStack functions; + +protected: + // To catch which function calls are not dead, and hence which functions must be visited. + virtual bool visitAggregate(TVisit, TIntermAggregate* node) + { + if (!traverseAll) + if (node->getOp() == EOpFunctionCall) + addFunctionCall(node); + + return true; // traverse this subtree + } + + // To prune semantically dead paths. + virtual bool visitSelection(TVisit /* visit */, TIntermSelection* node) + { + if (traverseAll) + return true; // traverse all code + + TIntermConstantUnion* constant = node->getCondition()->getAsConstantUnion(); + if (constant) { + // cull the path that is dead + if (constant->getConstArray()[0].getBConst() == true && node->getTrueBlock()) + node->getTrueBlock()->traverse(this); + if (constant->getConstArray()[0].getBConst() == false && node->getFalseBlock()) + node->getFalseBlock()->traverse(this); + + return false; // don't traverse any more, we did it all above + } else + return true; // traverse the whole subtree + } + + // Track live functions as well as uniforms, so that we don't visit dead functions + // and only visit each function once. + void addFunctionCall(TIntermAggregate* call) + { + // // just use the map to ensure we process each function at most once + if (liveFunctions.find(call->getName()) == liveFunctions.end()) { + liveFunctions.insert(call->getName()); + pushFunction(call->getName()); + } + } + + const TIntermediate& intermediate; + typedef std::unordered_set TLiveFunctions; + TLiveFunctions liveFunctions; + bool traverseAll; + +private: + // prevent copy & copy construct + TLiveTraverser(TLiveTraverser&); + TLiveTraverser& operator=(TLiveTraverser&); +}; + +} // namespace glslang diff --git a/dep/glslang/glslang/MachineIndependent/ParseContextBase.cpp b/dep/glslang/glslang/MachineIndependent/ParseContextBase.cpp new file mode 100644 index 000000000..b46400914 --- /dev/null +++ b/dep/glslang/glslang/MachineIndependent/ParseContextBase.cpp @@ -0,0 +1,641 @@ +// +// Copyright (C) 2002-2005 3Dlabs Inc. Ltd. +// Copyright (C) 2016 Google, Inc. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// +// Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// +// Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// Neither the name of 3Dlabs Inc. Ltd. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// + +// Implement the TParseContextBase class. + +#include + +#include "ParseHelper.h" + +extern int yyparse(glslang::TParseContext*); + +namespace glslang { + +// +// Used to output syntax, parsing, and semantic errors. +// + +void TParseContextBase::outputMessage(const TSourceLoc& loc, const char* szReason, + const char* szToken, + const char* szExtraInfoFormat, + TPrefixType prefix, va_list args) +{ + const int maxSize = MaxTokenLength + 200; + char szExtraInfo[maxSize]; + + safe_vsprintf(szExtraInfo, maxSize, szExtraInfoFormat, args); + + infoSink.info.prefix(prefix); + infoSink.info.location(loc); + infoSink.info << "'" << szToken << "' : " << szReason << " " << szExtraInfo << "\n"; + + if (prefix == EPrefixError) { + ++numErrors; + } +} + +#if !defined(GLSLANG_WEB) || defined(GLSLANG_WEB_DEVEL) + +void C_DECL TParseContextBase::error(const TSourceLoc& loc, const char* szReason, const char* szToken, + const char* szExtraInfoFormat, ...) +{ + if (messages & EShMsgOnlyPreprocessor) + return; + va_list args; + va_start(args, szExtraInfoFormat); + outputMessage(loc, szReason, szToken, szExtraInfoFormat, EPrefixError, args); + va_end(args); + + if ((messages & EShMsgCascadingErrors) == 0) + currentScanner->setEndOfInput(); +} + +void C_DECL TParseContextBase::warn(const TSourceLoc& loc, const char* szReason, const char* szToken, + const char* szExtraInfoFormat, ...) +{ + if (suppressWarnings()) + return; + va_list args; + va_start(args, szExtraInfoFormat); + outputMessage(loc, szReason, szToken, szExtraInfoFormat, EPrefixWarning, args); + va_end(args); +} + +void C_DECL TParseContextBase::ppError(const TSourceLoc& loc, const char* szReason, const char* szToken, + const char* szExtraInfoFormat, ...) +{ + va_list args; + va_start(args, szExtraInfoFormat); + outputMessage(loc, szReason, szToken, szExtraInfoFormat, EPrefixError, args); + va_end(args); + + if ((messages & EShMsgCascadingErrors) == 0) + currentScanner->setEndOfInput(); +} + +void C_DECL TParseContextBase::ppWarn(const TSourceLoc& loc, const char* szReason, const char* szToken, + const char* szExtraInfoFormat, ...) +{ + va_list args; + va_start(args, szExtraInfoFormat); + outputMessage(loc, szReason, szToken, szExtraInfoFormat, EPrefixWarning, args); + va_end(args); +} + +#endif + +// +// Both test and if necessary, spit out an error, to see if the node is really +// an l-value that can be operated on this way. +// +// Returns true if there was an error. +// +bool TParseContextBase::lValueErrorCheck(const TSourceLoc& loc, const char* op, TIntermTyped* node) +{ + TIntermBinary* binaryNode = node->getAsBinaryNode(); + + if (binaryNode) { + switch(binaryNode->getOp()) { + case EOpIndexDirect: + case EOpIndexIndirect: // fall through + case EOpIndexDirectStruct: // fall through + case EOpVectorSwizzle: + case EOpMatrixSwizzle: + return lValueErrorCheck(loc, op, binaryNode->getLeft()); + default: + break; + } + error(loc, " l-value required", op, "", ""); + + return true; + } + + const char* symbol = nullptr; + TIntermSymbol* symNode = node->getAsSymbolNode(); + if (symNode != nullptr) + symbol = symNode->getName().c_str(); + + const char* message = nullptr; + switch (node->getQualifier().storage) { + case EvqConst: message = "can't modify a const"; break; + case EvqConstReadOnly: message = "can't modify a const"; break; + case EvqUniform: message = "can't modify a uniform"; break; +#ifndef GLSLANG_WEB + case EvqBuffer: + if (node->getQualifier().isReadOnly()) + message = "can't modify a readonly buffer"; + if (node->getQualifier().isShaderRecord()) + message = "can't modify a shaderrecordnv qualified buffer"; + break; + case EvqHitAttr: + if (language != EShLangIntersect) + message = "cannot modify hitAttributeNV in this stage"; + break; +#endif + + default: + // + // Type that can't be written to? + // + switch (node->getBasicType()) { + case EbtSampler: + message = "can't modify a sampler"; + break; + case EbtVoid: + message = "can't modify void"; + break; +#ifndef GLSLANG_WEB + case EbtAtomicUint: + message = "can't modify an atomic_uint"; + break; + case EbtAccStruct: + message = "can't modify accelerationStructureNV"; + break; + case EbtRayQuery: + message = "can't modify rayQueryEXT"; + break; +#endif + default: + break; + } + } + + if (message == nullptr && binaryNode == nullptr && symNode == nullptr) { + error(loc, " l-value required", op, "", ""); + + return true; + } + + // + // Everything else is okay, no error. + // + if (message == nullptr) + return false; + + // + // If we get here, we have an error and a message. + // + if (symNode) + error(loc, " l-value required", op, "\"%s\" (%s)", symbol, message); + else + error(loc, " l-value required", op, "(%s)", message); + + return true; +} + +// Test for and give an error if the node can't be read from. +void TParseContextBase::rValueErrorCheck(const TSourceLoc& loc, const char* op, TIntermTyped* node) +{ + if (! node) + return; + + TIntermBinary* binaryNode = node->getAsBinaryNode(); + if (binaryNode) { + switch(binaryNode->getOp()) { + case EOpIndexDirect: + case EOpIndexIndirect: + case EOpIndexDirectStruct: + case EOpVectorSwizzle: + case EOpMatrixSwizzle: + rValueErrorCheck(loc, op, binaryNode->getLeft()); + default: + break; + } + + return; + } + + TIntermSymbol* symNode = node->getAsSymbolNode(); + if (symNode && symNode->getQualifier().isWriteOnly()) + error(loc, "can't read from writeonly object: ", op, symNode->getName().c_str()); +} + +// Add 'symbol' to the list of deferred linkage symbols, which +// are later processed in finish(), at which point the symbol +// must still be valid. +// It is okay if the symbol's type will be subsequently edited; +// the modifications will be tracked. +// Order is preserved, to avoid creating novel forward references. +void TParseContextBase::trackLinkage(TSymbol& symbol) +{ + if (!parsingBuiltins) + linkageSymbols.push_back(&symbol); +} + +// Ensure index is in bounds, correct if necessary. +// Give an error if not. +void TParseContextBase::checkIndex(const TSourceLoc& loc, const TType& type, int& index) +{ + const auto sizeIsSpecializationExpression = [&type]() { + return type.containsSpecializationSize() && + type.getArraySizes()->getOuterNode() != nullptr && + type.getArraySizes()->getOuterNode()->getAsSymbolNode() == nullptr; }; + + if (index < 0) { + error(loc, "", "[", "index out of range '%d'", index); + index = 0; + } else if (type.isArray()) { + if (type.isSizedArray() && !sizeIsSpecializationExpression() && + index >= type.getOuterArraySize()) { + error(loc, "", "[", "array index out of range '%d'", index); + index = type.getOuterArraySize() - 1; + } + } else if (type.isVector()) { + if (index >= type.getVectorSize()) { + error(loc, "", "[", "vector index out of range '%d'", index); + index = type.getVectorSize() - 1; + } + } else if (type.isMatrix()) { + if (index >= type.getMatrixCols()) { + error(loc, "", "[", "matrix index out of range '%d'", index); + index = type.getMatrixCols() - 1; + } + } +} + +// Make a shared symbol have a non-shared version that can be edited by the current +// compile, such that editing its type will not change the shared version and will +// effect all nodes already sharing it (non-shallow type), +// or adopting its full type after being edited (shallow type). +void TParseContextBase::makeEditable(TSymbol*& symbol) +{ + // copyUp() does a deep copy of the type. + symbol = symbolTable.copyUp(symbol); + + // Save it (deferred, so it can be edited first) in the AST for linker use. + if (symbol) + trackLinkage(*symbol); +} + +// Return a writable version of the variable 'name'. +// +// Return nullptr if 'name' is not found. This should mean +// something is seriously wrong (e.g., compiler asking self for +// built-in that doesn't exist). +TVariable* TParseContextBase::getEditableVariable(const char* name) +{ + bool builtIn; + TSymbol* symbol = symbolTable.find(name, &builtIn); + + assert(symbol != nullptr); + if (symbol == nullptr) + return nullptr; + + if (builtIn) + makeEditable(symbol); + + return symbol->getAsVariable(); +} + +// Select the best matching function for 'call' from 'candidateList'. +// +// Assumptions +// +// There is no exact match, so a selection algorithm needs to run. That is, the +// language-specific handler should check for exact match first, to +// decide what to do, before calling this selector. +// +// Input +// +// * list of candidate signatures to select from +// * the call +// * a predicate function convertible(from, to) that says whether or not type +// 'from' can implicitly convert to type 'to' (it includes the case of what +// the calling language would consider a matching type with no conversion +// needed) +// * a predicate function better(from1, from2, to1, to2) that says whether or +// not a conversion from <-> to2 is considered better than a conversion +// from <-> to1 (both in and out directions need testing, as declared by the +// formal parameter) +// +// Output +// +// * best matching candidate (or none, if no viable candidates found) +// * whether there was a tie for the best match (ambiguous overload selection, +// caller's choice for how to report) +// +const TFunction* TParseContextBase::selectFunction( + const TVector candidateList, + const TFunction& call, + std::function convertible, + std::function better, + /* output */ bool& tie) +{ +// +// Operation +// +// 1. Prune the input list of candidates down to a list of viable candidates, +// where each viable candidate has +// +// * at least as many parameters as there are calling arguments, with any +// remaining parameters being optional or having default values +// * each parameter is true under convertible(A, B), where A is the calling +// type for in and B is the formal type, and in addition, for out B is the +// calling type and A is the formal type +// +// 2. If there are no viable candidates, return with no match. +// +// 3. If there is only one viable candidate, it is the best match. +// +// 4. If there are multiple viable candidates, select the first viable candidate +// as the incumbent. Compare the incumbent to the next viable candidate, and if +// that candidate is better (bullets below), make it the incumbent. Repeat, with +// a linear walk through the viable candidate list. The final incumbent will be +// returned as the best match. A viable candidate is better than the incumbent if +// +// * it has a function argument with a better(...) conversion than the incumbent, +// for all directions needed by in and out +// * the incumbent has no argument with a better(...) conversion then the +// candidate, for either in or out (as needed) +// +// 5. Check for ambiguity by comparing the best match against all other viable +// candidates. If any other viable candidate has a function argument with a +// better(...) conversion than the best candidate (for either in or out +// directions), return that there was a tie for best. +// + + tie = false; + + // 1. prune to viable... + TVector viableCandidates; + for (auto it = candidateList.begin(); it != candidateList.end(); ++it) { + const TFunction& candidate = *(*it); + + // to even be a potential match, number of arguments must be >= the number of + // fixed (non-default) parameters, and <= the total (including parameter with defaults). + if (call.getParamCount() < candidate.getFixedParamCount() || + call.getParamCount() > candidate.getParamCount()) + continue; + + // see if arguments are convertible + bool viable = true; + + // The call can have fewer parameters than the candidate, if some have defaults. + const int paramCount = std::min(call.getParamCount(), candidate.getParamCount()); + for (int param = 0; param < paramCount; ++param) { + if (candidate[param].type->getQualifier().isParamInput()) { + if (! convertible(*call[param].type, *candidate[param].type, candidate.getBuiltInOp(), param)) { + viable = false; + break; + } + } + if (candidate[param].type->getQualifier().isParamOutput()) { + if (! convertible(*candidate[param].type, *call[param].type, candidate.getBuiltInOp(), param)) { + viable = false; + break; + } + } + } + + if (viable) + viableCandidates.push_back(&candidate); + } + + // 2. none viable... + if (viableCandidates.size() == 0) + return nullptr; + + // 3. only one viable... + if (viableCandidates.size() == 1) + return viableCandidates.front(); + + // 4. find best... + const auto betterParam = [&call, &better](const TFunction& can1, const TFunction& can2) -> bool { + // is call -> can2 better than call -> can1 for any parameter + bool hasBetterParam = false; + for (int param = 0; param < call.getParamCount(); ++param) { + if (better(*call[param].type, *can1[param].type, *can2[param].type)) { + hasBetterParam = true; + break; + } + } + return hasBetterParam; + }; + + const auto equivalentParams = [&call, &better](const TFunction& can1, const TFunction& can2) -> bool { + // is call -> can2 equivalent to call -> can1 for all the call parameters? + for (int param = 0; param < call.getParamCount(); ++param) { + if (better(*call[param].type, *can1[param].type, *can2[param].type) || + better(*call[param].type, *can2[param].type, *can1[param].type)) + return false; + } + return true; + }; + + const TFunction* incumbent = viableCandidates.front(); + for (auto it = viableCandidates.begin() + 1; it != viableCandidates.end(); ++it) { + const TFunction& candidate = *(*it); + if (betterParam(*incumbent, candidate) && ! betterParam(candidate, *incumbent)) + incumbent = &candidate; + } + + // 5. ambiguity... + for (auto it = viableCandidates.begin(); it != viableCandidates.end(); ++it) { + if (incumbent == *it) + continue; + const TFunction& candidate = *(*it); + + // In the case of default parameters, it may have an identical initial set, which is + // also ambiguous + if (betterParam(*incumbent, candidate) || equivalentParams(*incumbent, candidate)) + tie = true; + } + + return incumbent; +} + +// +// Look at a '.' field selector string and change it into numerical selectors +// for a vector or scalar. +// +// Always return some form of swizzle, so the result is always usable. +// +void TParseContextBase::parseSwizzleSelector(const TSourceLoc& loc, const TString& compString, int vecSize, + TSwizzleSelectors& selector) +{ + // Too long? + if (compString.size() > MaxSwizzleSelectors) + error(loc, "vector swizzle too long", compString.c_str(), ""); + + // Use this to test that all swizzle characters are from the same swizzle-namespace-set + enum { + exyzw, + ergba, + estpq, + } fieldSet[MaxSwizzleSelectors]; + + // Decode the swizzle string. + int size = std::min(MaxSwizzleSelectors, (int)compString.size()); + for (int i = 0; i < size; ++i) { + switch (compString[i]) { + case 'x': + selector.push_back(0); + fieldSet[i] = exyzw; + break; + case 'r': + selector.push_back(0); + fieldSet[i] = ergba; + break; + case 's': + selector.push_back(0); + fieldSet[i] = estpq; + break; + + case 'y': + selector.push_back(1); + fieldSet[i] = exyzw; + break; + case 'g': + selector.push_back(1); + fieldSet[i] = ergba; + break; + case 't': + selector.push_back(1); + fieldSet[i] = estpq; + break; + + case 'z': + selector.push_back(2); + fieldSet[i] = exyzw; + break; + case 'b': + selector.push_back(2); + fieldSet[i] = ergba; + break; + case 'p': + selector.push_back(2); + fieldSet[i] = estpq; + break; + + case 'w': + selector.push_back(3); + fieldSet[i] = exyzw; + break; + case 'a': + selector.push_back(3); + fieldSet[i] = ergba; + break; + case 'q': + selector.push_back(3); + fieldSet[i] = estpq; + break; + + default: + error(loc, "unknown swizzle selection", compString.c_str(), ""); + break; + } + } + + // Additional error checking. + for (int i = 0; i < selector.size(); ++i) { + if (selector[i] >= vecSize) { + error(loc, "vector swizzle selection out of range", compString.c_str(), ""); + selector.resize(i); + break; + } + + if (i > 0 && fieldSet[i] != fieldSet[i-1]) { + error(loc, "vector swizzle selectors not from the same set", compString.c_str(), ""); + selector.resize(i); + break; + } + } + + // Ensure it is valid. + if (selector.size() == 0) + selector.push_back(0); +} + +#ifdef ENABLE_HLSL +// +// Make the passed-in variable information become a member of the +// global uniform block. If this doesn't exist yet, make it. +// +void TParseContextBase::growGlobalUniformBlock(const TSourceLoc& loc, TType& memberType, const TString& memberName, TTypeList* typeList) +{ + // Make the global block, if not yet made. + if (globalUniformBlock == nullptr) { + TQualifier blockQualifier; + blockQualifier.clear(); + blockQualifier.storage = EvqUniform; + TType blockType(new TTypeList, *NewPoolTString(getGlobalUniformBlockName()), blockQualifier); + setUniformBlockDefaults(blockType); + globalUniformBlock = new TVariable(NewPoolTString(""), blockType, true); + firstNewMember = 0; + } + + // Update with binding and set + globalUniformBlock->getWritableType().getQualifier().layoutBinding = globalUniformBinding; + globalUniformBlock->getWritableType().getQualifier().layoutSet = globalUniformSet; + + // Add the requested member as a member to the global block. + TType* type = new TType; + type->shallowCopy(memberType); + type->setFieldName(memberName); + if (typeList) + type->setStruct(typeList); + TTypeLoc typeLoc = {type, loc}; + globalUniformBlock->getType().getWritableStruct()->push_back(typeLoc); + + // Insert into the symbol table. + if (firstNewMember == 0) { + // This is the first request; we need a normal symbol table insert + if (symbolTable.insert(*globalUniformBlock)) + trackLinkage(*globalUniformBlock); + else + error(loc, "failed to insert the global constant buffer", "uniform", ""); + } else { + // This is a follow-on request; we need to amend the first insert + symbolTable.amend(*globalUniformBlock, firstNewMember); + } + + ++firstNewMember; +} +#endif + +void TParseContextBase::finish() +{ + if (parsingBuiltins) + return; + + // Transfer the linkage symbols to AST nodes, preserving order. + TIntermAggregate* linkage = new TIntermAggregate; + for (auto i = linkageSymbols.begin(); i != linkageSymbols.end(); ++i) + intermediate.addSymbolLinkageNode(linkage, **i); + intermediate.addSymbolLinkageNodes(linkage, getLanguage(), symbolTable); +} + +} // end namespace glslang diff --git a/dep/glslang/glslang/MachineIndependent/ParseHelper.cpp b/dep/glslang/glslang/MachineIndependent/ParseHelper.cpp new file mode 100644 index 000000000..a35d41a05 --- /dev/null +++ b/dep/glslang/glslang/MachineIndependent/ParseHelper.cpp @@ -0,0 +1,8578 @@ +// +// Copyright (C) 2002-2005 3Dlabs Inc. Ltd. +// Copyright (C) 2012-2015 LunarG, Inc. +// Copyright (C) 2015-2018 Google, Inc. +// Copyright (C) 2017, 2019 ARM Limited. +// Modifications Copyright (C) 2020 Advanced Micro Devices, Inc. All rights reserved. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// +// Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// +// Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// Neither the name of 3Dlabs Inc. Ltd. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// + +#include "ParseHelper.h" +#include "Scan.h" + +#include "../OSDependent/osinclude.h" +#include + +#include "preprocessor/PpContext.h" + +extern int yyparse(glslang::TParseContext*); + +namespace glslang { + +TParseContext::TParseContext(TSymbolTable& symbolTable, TIntermediate& interm, bool parsingBuiltins, + int version, EProfile profile, const SpvVersion& spvVersion, EShLanguage language, + TInfoSink& infoSink, bool forwardCompatible, EShMessages messages, + const TString* entryPoint) : + TParseContextBase(symbolTable, interm, parsingBuiltins, version, profile, spvVersion, language, + infoSink, forwardCompatible, messages, entryPoint), + inMain(false), + blockName(nullptr), + limits(resources.limits) +#ifndef GLSLANG_WEB + , + atomicUintOffsets(nullptr), anyIndexLimits(false) +#endif +{ + // decide whether precision qualifiers should be ignored or respected + if (isEsProfile() || spvVersion.vulkan > 0) { + precisionManager.respectPrecisionQualifiers(); + if (! parsingBuiltins && language == EShLangFragment && !isEsProfile() && spvVersion.vulkan > 0) + precisionManager.warnAboutDefaults(); + } + + setPrecisionDefaults(); + + globalUniformDefaults.clear(); + globalUniformDefaults.layoutMatrix = ElmColumnMajor; + globalUniformDefaults.layoutPacking = spvVersion.spv != 0 ? ElpStd140 : ElpShared; + + globalBufferDefaults.clear(); + globalBufferDefaults.layoutMatrix = ElmColumnMajor; + globalBufferDefaults.layoutPacking = spvVersion.spv != 0 ? ElpStd430 : ElpShared; + + // use storage buffer on SPIR-V 1.3 and up + if (spvVersion.spv >= EShTargetSpv_1_3) + intermediate.setUseStorageBuffer(); + + globalInputDefaults.clear(); + globalOutputDefaults.clear(); + +#ifndef GLSLANG_WEB + // "Shaders in the transform + // feedback capturing mode have an initial global default of + // layout(xfb_buffer = 0) out;" + if (language == EShLangVertex || + language == EShLangTessControl || + language == EShLangTessEvaluation || + language == EShLangGeometry) + globalOutputDefaults.layoutXfbBuffer = 0; + + if (language == EShLangGeometry) + globalOutputDefaults.layoutStream = 0; +#endif + + if (entryPoint != nullptr && entryPoint->size() > 0 && *entryPoint != "main") + infoSink.info.message(EPrefixError, "Source entry point must be \"main\""); +} + +TParseContext::~TParseContext() +{ +#ifndef GLSLANG_WEB + delete [] atomicUintOffsets; +#endif +} + +// Set up all default precisions as needed by the current environment. +// Intended just as a TParseContext constructor helper. +void TParseContext::setPrecisionDefaults() +{ + // Set all precision defaults to EpqNone, which is correct for all types + // when not obeying precision qualifiers, and correct for types that don't + // have defaults (thus getting an error on use) when obeying precision + // qualifiers. + + for (int type = 0; type < EbtNumTypes; ++type) + defaultPrecision[type] = EpqNone; + + for (int type = 0; type < maxSamplerIndex; ++type) + defaultSamplerPrecision[type] = EpqNone; + + // replace with real precision defaults for those that have them + if (obeyPrecisionQualifiers()) { + if (isEsProfile()) { + // Most don't have defaults, a few default to lowp. + TSampler sampler; + sampler.set(EbtFloat, Esd2D); + defaultSamplerPrecision[computeSamplerTypeIndex(sampler)] = EpqLow; + sampler.set(EbtFloat, EsdCube); + defaultSamplerPrecision[computeSamplerTypeIndex(sampler)] = EpqLow; + sampler.set(EbtFloat, Esd2D); + sampler.setExternal(true); + defaultSamplerPrecision[computeSamplerTypeIndex(sampler)] = EpqLow; + } + + // If we are parsing built-in computational variables/functions, it is meaningful to record + // whether the built-in has no precision qualifier, as that ambiguity + // is used to resolve the precision from the supplied arguments/operands instead. + // So, we don't actually want to replace EpqNone with a default precision for built-ins. + if (! parsingBuiltins) { + if (isEsProfile() && language == EShLangFragment) { + defaultPrecision[EbtInt] = EpqMedium; + defaultPrecision[EbtUint] = EpqMedium; + } else { + defaultPrecision[EbtInt] = EpqHigh; + defaultPrecision[EbtUint] = EpqHigh; + defaultPrecision[EbtFloat] = EpqHigh; + } + + if (!isEsProfile()) { + // Non-ES profile + // All sampler precisions default to highp. + for (int type = 0; type < maxSamplerIndex; ++type) + defaultSamplerPrecision[type] = EpqHigh; + } + } + + defaultPrecision[EbtSampler] = EpqLow; + defaultPrecision[EbtAtomicUint] = EpqHigh; + } +} + +void TParseContext::setLimits(const TBuiltInResource& r) +{ + resources = r; + intermediate.setLimits(r); + +#ifndef GLSLANG_WEB + anyIndexLimits = ! limits.generalAttributeMatrixVectorIndexing || + ! limits.generalConstantMatrixVectorIndexing || + ! limits.generalSamplerIndexing || + ! limits.generalUniformIndexing || + ! limits.generalVariableIndexing || + ! limits.generalVaryingIndexing; + + + // "Each binding point tracks its own current default offset for + // inheritance of subsequent variables using the same binding. The initial state of compilation is that all + // binding points have an offset of 0." + atomicUintOffsets = new int[resources.maxAtomicCounterBindings]; + for (int b = 0; b < resources.maxAtomicCounterBindings; ++b) + atomicUintOffsets[b] = 0; +#endif +} + +// +// Parse an array of strings using yyparse, going through the +// preprocessor to tokenize the shader strings, then through +// the GLSL scanner. +// +// Returns true for successful acceptance of the shader, false if any errors. +// +bool TParseContext::parseShaderStrings(TPpContext& ppContext, TInputScanner& input, bool versionWillBeError) +{ + currentScanner = &input; + ppContext.setInput(input, versionWillBeError); + yyparse(this); + + finish(); + + return numErrors == 0; +} + +// This is called from bison when it has a parse (syntax) error +// Note though that to stop cascading errors, we set EOF, which +// will usually cause a syntax error, so be more accurate that +// compilation is terminating. +void TParseContext::parserError(const char* s) +{ + if (! getScanner()->atEndOfInput() || numErrors == 0) + error(getCurrentLoc(), "", "", s, ""); + else + error(getCurrentLoc(), "compilation terminated", "", ""); +} + +void TParseContext::handlePragma(const TSourceLoc& loc, const TVector& tokens) +{ +#ifndef GLSLANG_WEB + if (pragmaCallback) + pragmaCallback(loc.line, tokens); + + if (tokens.size() == 0) + return; + + if (tokens[0].compare("optimize") == 0) { + if (tokens.size() != 4) { + error(loc, "optimize pragma syntax is incorrect", "#pragma", ""); + return; + } + + if (tokens[1].compare("(") != 0) { + error(loc, "\"(\" expected after 'optimize' keyword", "#pragma", ""); + return; + } + + if (tokens[2].compare("on") == 0) + contextPragma.optimize = true; + else if (tokens[2].compare("off") == 0) + contextPragma.optimize = false; + else { + error(loc, "\"on\" or \"off\" expected after '(' for 'optimize' pragma", "#pragma", ""); + return; + } + + if (tokens[3].compare(")") != 0) { + error(loc, "\")\" expected to end 'optimize' pragma", "#pragma", ""); + return; + } + } else if (tokens[0].compare("debug") == 0) { + if (tokens.size() != 4) { + error(loc, "debug pragma syntax is incorrect", "#pragma", ""); + return; + } + + if (tokens[1].compare("(") != 0) { + error(loc, "\"(\" expected after 'debug' keyword", "#pragma", ""); + return; + } + + if (tokens[2].compare("on") == 0) + contextPragma.debug = true; + else if (tokens[2].compare("off") == 0) + contextPragma.debug = false; + else { + error(loc, "\"on\" or \"off\" expected after '(' for 'debug' pragma", "#pragma", ""); + return; + } + + if (tokens[3].compare(")") != 0) { + error(loc, "\")\" expected to end 'debug' pragma", "#pragma", ""); + return; + } + } else if (spvVersion.spv > 0 && tokens[0].compare("use_storage_buffer") == 0) { + if (tokens.size() != 1) + error(loc, "extra tokens", "#pragma", ""); + intermediate.setUseStorageBuffer(); + } else if (spvVersion.spv > 0 && tokens[0].compare("use_vulkan_memory_model") == 0) { + if (tokens.size() != 1) + error(loc, "extra tokens", "#pragma", ""); + intermediate.setUseVulkanMemoryModel(); + } else if (spvVersion.spv > 0 && tokens[0].compare("use_variable_pointers") == 0) { + if (tokens.size() != 1) + error(loc, "extra tokens", "#pragma", ""); + if (spvVersion.spv < glslang::EShTargetSpv_1_3) + error(loc, "requires SPIR-V 1.3", "#pragma use_variable_pointers", ""); + intermediate.setUseVariablePointers(); + } else if (tokens[0].compare("once") == 0) { + warn(loc, "not implemented", "#pragma once", ""); + } else if (tokens[0].compare("glslang_binary_double_output") == 0) + intermediate.setBinaryDoubleOutput(); +#endif +} + +// +// Handle seeing a variable identifier in the grammar. +// +TIntermTyped* TParseContext::handleVariable(const TSourceLoc& loc, TSymbol* symbol, const TString* string) +{ + TIntermTyped* node = nullptr; + + // Error check for requiring specific extensions present. + if (symbol && symbol->getNumExtensions()) + requireExtensions(loc, symbol->getNumExtensions(), symbol->getExtensions(), symbol->getName().c_str()); + +#ifndef GLSLANG_WEB + if (symbol && symbol->isReadOnly()) { + // All shared things containing an unsized array must be copied up + // on first use, so that all future references will share its array structure, + // so that editing the implicit size will effect all nodes consuming it, + // and so that editing the implicit size won't change the shared one. + // + // If this is a variable or a block, check it and all it contains, but if this + // is a member of an anonymous block, check the whole block, as the whole block + // will need to be copied up if it contains an unsized array. + // + // This check is being done before the block-name check further down, so guard + // for that too. + if (!symbol->getType().isUnusableName()) { + if (symbol->getType().containsUnsizedArray() || + (symbol->getAsAnonMember() && + symbol->getAsAnonMember()->getAnonContainer().getType().containsUnsizedArray())) + makeEditable(symbol); + } + } +#endif + + const TVariable* variable; + const TAnonMember* anon = symbol ? symbol->getAsAnonMember() : nullptr; + if (anon) { + // It was a member of an anonymous container. + + // Create a subtree for its dereference. + variable = anon->getAnonContainer().getAsVariable(); + TIntermTyped* container = intermediate.addSymbol(*variable, loc); + TIntermTyped* constNode = intermediate.addConstantUnion(anon->getMemberNumber(), loc); + node = intermediate.addIndex(EOpIndexDirectStruct, container, constNode, loc); + + node->setType(*(*variable->getType().getStruct())[anon->getMemberNumber()].type); + if (node->getType().hiddenMember()) + error(loc, "member of nameless block was not redeclared", string->c_str(), ""); + } else { + // Not a member of an anonymous container. + + // The symbol table search was done in the lexical phase. + // See if it was a variable. + variable = symbol ? symbol->getAsVariable() : nullptr; + if (variable) { + if (variable->getType().isUnusableName()) { + error(loc, "cannot be used (maybe an instance name is needed)", string->c_str(), ""); + variable = nullptr; + } + } else { + if (symbol) + error(loc, "variable name expected", string->c_str(), ""); + } + + // Recovery, if it wasn't found or was not a variable. + if (! variable) + variable = new TVariable(string, TType(EbtVoid)); + + if (variable->getType().getQualifier().isFrontEndConstant()) + node = intermediate.addConstantUnion(variable->getConstArray(), variable->getType(), loc); + else + node = intermediate.addSymbol(*variable, loc); + } + + if (variable->getType().getQualifier().isIo()) + intermediate.addIoAccessed(*string); + + if (variable->getType().isReference() && + variable->getType().getQualifier().bufferReferenceNeedsVulkanMemoryModel()) { + intermediate.setUseVulkanMemoryModel(); + } + + return node; +} + +// +// Handle seeing a base[index] dereference in the grammar. +// +TIntermTyped* TParseContext::handleBracketDereference(const TSourceLoc& loc, TIntermTyped* base, TIntermTyped* index) +{ + int indexValue = 0; + if (index->getQualifier().isFrontEndConstant()) + indexValue = index->getAsConstantUnion()->getConstArray()[0].getIConst(); + + // basic type checks... + variableCheck(base); + + if (! base->isArray() && ! base->isMatrix() && ! base->isVector() && ! base->getType().isCoopMat() && + ! base->isReference()) { + if (base->getAsSymbolNode()) + error(loc, " left of '[' is not of type array, matrix, or vector ", base->getAsSymbolNode()->getName().c_str(), ""); + else + error(loc, " left of '[' is not of type array, matrix, or vector ", "expression", ""); + + // Insert dummy error-recovery result + return intermediate.addConstantUnion(0.0, EbtFloat, loc); + } + + if (!base->isArray() && base->isVector()) { + if (base->getType().contains16BitFloat()) + requireFloat16Arithmetic(loc, "[", "does not operate on types containing float16"); + if (base->getType().contains16BitInt()) + requireInt16Arithmetic(loc, "[", "does not operate on types containing (u)int16"); + if (base->getType().contains8BitInt()) + requireInt8Arithmetic(loc, "[", "does not operate on types containing (u)int8"); + } + + // check for constant folding + if (base->getType().getQualifier().isFrontEndConstant() && index->getQualifier().isFrontEndConstant()) { + // both base and index are front-end constants + checkIndex(loc, base->getType(), indexValue); + return intermediate.foldDereference(base, indexValue, loc); + } + + // at least one of base and index is not a front-end constant variable... + TIntermTyped* result = nullptr; + +#ifndef GLSLANG_WEB + if (base->isReference() && ! base->isArray()) { + requireExtensions(loc, 1, &E_GL_EXT_buffer_reference2, "buffer reference indexing"); + if (base->getType().getReferentType()->containsUnsizedArray()) { + error(loc, "cannot index reference to buffer containing an unsized array", "", ""); + result = nullptr; + } else { + result = intermediate.addBinaryMath(EOpAdd, base, index, loc); + if (result != nullptr) + result->setType(base->getType()); + } + if (result == nullptr) { + error(loc, "cannot index buffer reference", "", ""); + result = intermediate.addConstantUnion(0.0, EbtFloat, loc); + } + return result; + } + if (base->getAsSymbolNode() && isIoResizeArray(base->getType())) + handleIoResizeArrayAccess(loc, base); +#endif + + if (index->getQualifier().isFrontEndConstant()) + checkIndex(loc, base->getType(), indexValue); + + if (index->getQualifier().isFrontEndConstant()) { +#ifndef GLSLANG_WEB + if (base->getType().isUnsizedArray()) { + base->getWritableType().updateImplicitArraySize(indexValue + 1); + // For 2D per-view builtin arrays, update the inner dimension size in parent type + if (base->getQualifier().isPerView() && base->getQualifier().builtIn != EbvNone) { + TIntermBinary* binaryNode = base->getAsBinaryNode(); + if (binaryNode) { + TType& leftType = binaryNode->getLeft()->getWritableType(); + TArraySizes& arraySizes = *leftType.getArraySizes(); + assert(arraySizes.getNumDims() == 2); + arraySizes.setDimSize(1, std::max(arraySizes.getDimSize(1), indexValue + 1)); + } + } + } else +#endif + checkIndex(loc, base->getType(), indexValue); + result = intermediate.addIndex(EOpIndexDirect, base, index, loc); + } else { +#ifndef GLSLANG_WEB + if (base->getType().isUnsizedArray()) { + // we have a variable index into an unsized array, which is okay, + // depending on the situation + if (base->getAsSymbolNode() && isIoResizeArray(base->getType())) + error(loc, "", "[", "array must be sized by a redeclaration or layout qualifier before being indexed with a variable"); + else { + // it is okay for a run-time sized array + checkRuntimeSizable(loc, *base); + } + base->getWritableType().setArrayVariablyIndexed(); + } +#endif + if (base->getBasicType() == EbtBlock) { + if (base->getQualifier().storage == EvqBuffer) + requireProfile(base->getLoc(), ~EEsProfile, "variable indexing buffer block array"); + else if (base->getQualifier().storage == EvqUniform) + profileRequires(base->getLoc(), EEsProfile, 320, Num_AEP_gpu_shader5, AEP_gpu_shader5, + "variable indexing uniform block array"); + else { + // input/output blocks either don't exist or can't be variably indexed + } + } else if (language == EShLangFragment && base->getQualifier().isPipeOutput()) + requireProfile(base->getLoc(), ~EEsProfile, "variable indexing fragment shader output array"); + else if (base->getBasicType() == EbtSampler && version >= 130) { + const char* explanation = "variable indexing sampler array"; + requireProfile(base->getLoc(), EEsProfile | ECoreProfile | ECompatibilityProfile, explanation); + profileRequires(base->getLoc(), EEsProfile, 320, Num_AEP_gpu_shader5, AEP_gpu_shader5, explanation); + profileRequires(base->getLoc(), ECoreProfile | ECompatibilityProfile, 400, nullptr, explanation); + } + + result = intermediate.addIndex(EOpIndexIndirect, base, index, loc); + } + + // Insert valid dereferenced result type + TType newType(base->getType(), 0); + if (base->getType().getQualifier().isConstant() && index->getQualifier().isConstant()) { + newType.getQualifier().storage = EvqConst; + // If base or index is a specialization constant, the result should also be a specialization constant. + if (base->getType().getQualifier().isSpecConstant() || index->getQualifier().isSpecConstant()) { + newType.getQualifier().makeSpecConstant(); + } + } else { + newType.getQualifier().storage = EvqTemporary; + newType.getQualifier().specConstant = false; + } + result->setType(newType); + +#ifndef GLSLANG_WEB + inheritMemoryQualifiers(base->getQualifier(), result->getWritableType().getQualifier()); + + // Propagate nonuniform + if (base->getQualifier().isNonUniform() || index->getQualifier().isNonUniform()) + result->getWritableType().getQualifier().nonUniform = true; + + if (anyIndexLimits) + handleIndexLimits(loc, base, index); +#endif + + return result; +} + +#ifndef GLSLANG_WEB + +// for ES 2.0 (version 100) limitations for almost all index operations except vertex-shader uniforms +void TParseContext::handleIndexLimits(const TSourceLoc& /*loc*/, TIntermTyped* base, TIntermTyped* index) +{ + if ((! limits.generalSamplerIndexing && base->getBasicType() == EbtSampler) || + (! limits.generalUniformIndexing && base->getQualifier().isUniformOrBuffer() && language != EShLangVertex) || + (! limits.generalAttributeMatrixVectorIndexing && base->getQualifier().isPipeInput() && language == EShLangVertex && (base->getType().isMatrix() || base->getType().isVector())) || + (! limits.generalConstantMatrixVectorIndexing && base->getAsConstantUnion()) || + (! limits.generalVariableIndexing && ! base->getType().getQualifier().isUniformOrBuffer() && + ! base->getType().getQualifier().isPipeInput() && + ! base->getType().getQualifier().isPipeOutput() && + ! base->getType().getQualifier().isConstant()) || + (! limits.generalVaryingIndexing && (base->getType().getQualifier().isPipeInput() || + base->getType().getQualifier().isPipeOutput()))) { + // it's too early to know what the inductive variables are, save it for post processing + needsIndexLimitationChecking.push_back(index); + } +} + +// Make a shared symbol have a non-shared version that can be edited by the current +// compile, such that editing its type will not change the shared version and will +// effect all nodes sharing it. +void TParseContext::makeEditable(TSymbol*& symbol) +{ + TParseContextBase::makeEditable(symbol); + + // See if it's tied to IO resizing + if (isIoResizeArray(symbol->getType())) + ioArraySymbolResizeList.push_back(symbol); +} + +// Return true if this is a geometry shader input array or tessellation control output array +// or mesh shader output array. +bool TParseContext::isIoResizeArray(const TType& type) const +{ + return type.isArray() && + ((language == EShLangGeometry && type.getQualifier().storage == EvqVaryingIn) || + (language == EShLangTessControl && type.getQualifier().storage == EvqVaryingOut && + ! type.getQualifier().patch) || + (language == EShLangFragment && type.getQualifier().storage == EvqVaryingIn && + type.getQualifier().pervertexNV) || + (language == EShLangMeshNV && type.getQualifier().storage == EvqVaryingOut && + !type.getQualifier().perTaskNV)); +} + +// If an array is not isIoResizeArray() but is an io array, make sure it has the right size +void TParseContext::fixIoArraySize(const TSourceLoc& loc, TType& type) +{ + if (! type.isArray() || type.getQualifier().patch || symbolTable.atBuiltInLevel()) + return; + + assert(! isIoResizeArray(type)); + + if (type.getQualifier().storage != EvqVaryingIn || type.getQualifier().patch) + return; + + if (language == EShLangTessControl || language == EShLangTessEvaluation) { + if (type.getOuterArraySize() != resources.maxPatchVertices) { + if (type.isSizedArray()) + error(loc, "tessellation input array size must be gl_MaxPatchVertices or implicitly sized", "[]", ""); + type.changeOuterArraySize(resources.maxPatchVertices); + } + } +} + +// Issue any errors if the non-array object is missing arrayness WRT +// shader I/O that has array requirements. +// All arrayness checking is handled in array paths, this is for +void TParseContext::ioArrayCheck(const TSourceLoc& loc, const TType& type, const TString& identifier) +{ + if (! type.isArray() && ! symbolTable.atBuiltInLevel()) { + if (type.getQualifier().isArrayedIo(language) && !type.getQualifier().layoutPassthrough) + error(loc, "type must be an array:", type.getStorageQualifierString(), identifier.c_str()); + } +} + +// Handle a dereference of a geometry shader input array or tessellation control output array. +// See ioArraySymbolResizeList comment in ParseHelper.h. +// +void TParseContext::handleIoResizeArrayAccess(const TSourceLoc& /*loc*/, TIntermTyped* base) +{ + TIntermSymbol* symbolNode = base->getAsSymbolNode(); + assert(symbolNode); + if (! symbolNode) + return; + + // fix array size, if it can be fixed and needs to be fixed (will allow variable indexing) + if (symbolNode->getType().isUnsizedArray()) { + int newSize = getIoArrayImplicitSize(symbolNode->getType().getQualifier()); + if (newSize > 0) + symbolNode->getWritableType().changeOuterArraySize(newSize); + } +} + +// If there has been an input primitive declaration (geometry shader) or an output +// number of vertices declaration(tessellation shader), make sure all input array types +// match it in size. Types come either from nodes in the AST or symbols in the +// symbol table. +// +// Types without an array size will be given one. +// Types already having a size that is wrong will get an error. +// +void TParseContext::checkIoArraysConsistency(const TSourceLoc &loc, bool tailOnly) +{ + int requiredSize = 0; + TString featureString; + size_t listSize = ioArraySymbolResizeList.size(); + size_t i = 0; + + // If tailOnly = true, only check the last array symbol in the list. + if (tailOnly) { + i = listSize - 1; + } + for (bool firstIteration = true; i < listSize; ++i) { + TType &type = ioArraySymbolResizeList[i]->getWritableType(); + + // As I/O array sizes don't change, fetch requiredSize only once, + // except for mesh shaders which could have different I/O array sizes based on type qualifiers. + if (firstIteration || (language == EShLangMeshNV)) { + requiredSize = getIoArrayImplicitSize(type.getQualifier(), &featureString); + if (requiredSize == 0) + break; + firstIteration = false; + } + + checkIoArrayConsistency(loc, requiredSize, featureString.c_str(), type, + ioArraySymbolResizeList[i]->getName()); + } +} + +int TParseContext::getIoArrayImplicitSize(const TQualifier &qualifier, TString *featureString) const +{ + int expectedSize = 0; + TString str = "unknown"; + unsigned int maxVertices = intermediate.getVertices() != TQualifier::layoutNotSet ? intermediate.getVertices() : 0; + + if (language == EShLangGeometry) { + expectedSize = TQualifier::mapGeometryToSize(intermediate.getInputPrimitive()); + str = TQualifier::getGeometryString(intermediate.getInputPrimitive()); + } + else if (language == EShLangTessControl) { + expectedSize = maxVertices; + str = "vertices"; + } else if (language == EShLangFragment) { + // Number of vertices for Fragment shader is always three. + expectedSize = 3; + str = "vertices"; + } else if (language == EShLangMeshNV) { + unsigned int maxPrimitives = + intermediate.getPrimitives() != TQualifier::layoutNotSet ? intermediate.getPrimitives() : 0; + if (qualifier.builtIn == EbvPrimitiveIndicesNV) { + expectedSize = maxPrimitives * TQualifier::mapGeometryToSize(intermediate.getOutputPrimitive()); + str = "max_primitives*"; + str += TQualifier::getGeometryString(intermediate.getOutputPrimitive()); + } + else if (qualifier.isPerPrimitive()) { + expectedSize = maxPrimitives; + str = "max_primitives"; + } + else { + expectedSize = maxVertices; + str = "max_vertices"; + } + } + if (featureString) + *featureString = str; + return expectedSize; +} + +void TParseContext::checkIoArrayConsistency(const TSourceLoc& loc, int requiredSize, const char* feature, TType& type, const TString& name) +{ + if (type.isUnsizedArray()) + type.changeOuterArraySize(requiredSize); + else if (type.getOuterArraySize() != requiredSize) { + if (language == EShLangGeometry) + error(loc, "inconsistent input primitive for array size of", feature, name.c_str()); + else if (language == EShLangTessControl) + error(loc, "inconsistent output number of vertices for array size of", feature, name.c_str()); + else if (language == EShLangFragment) { + if (type.getOuterArraySize() > requiredSize) + error(loc, " cannot be greater than 3 for pervertexNV", feature, name.c_str()); + } + else if (language == EShLangMeshNV) + error(loc, "inconsistent output array size of", feature, name.c_str()); + else + assert(0); + } +} + +#endif // GLSLANG_WEB + +// Handle seeing a binary node with a math operation. +// Returns nullptr if not semantically allowed. +TIntermTyped* TParseContext::handleBinaryMath(const TSourceLoc& loc, const char* str, TOperator op, TIntermTyped* left, TIntermTyped* right) +{ + rValueErrorCheck(loc, str, left->getAsTyped()); + rValueErrorCheck(loc, str, right->getAsTyped()); + + bool allowed = true; + switch (op) { + // TODO: Bring more source language-specific checks up from intermediate.cpp + // to the specific parse helpers for that source language. + case EOpLessThan: + case EOpGreaterThan: + case EOpLessThanEqual: + case EOpGreaterThanEqual: + if (! left->isScalar() || ! right->isScalar()) + allowed = false; + break; + default: + break; + } + + if (((left->getType().contains16BitFloat() || right->getType().contains16BitFloat()) && !float16Arithmetic()) || + ((left->getType().contains16BitInt() || right->getType().contains16BitInt()) && !int16Arithmetic()) || + ((left->getType().contains8BitInt() || right->getType().contains8BitInt()) && !int8Arithmetic())) { + allowed = false; + } + + TIntermTyped* result = nullptr; + if (allowed) + result = intermediate.addBinaryMath(op, left, right, loc); + + if (result == nullptr) + binaryOpError(loc, str, left->getCompleteString(), right->getCompleteString()); + + return result; +} + +// Handle seeing a unary node with a math operation. +TIntermTyped* TParseContext::handleUnaryMath(const TSourceLoc& loc, const char* str, TOperator op, TIntermTyped* childNode) +{ + rValueErrorCheck(loc, str, childNode); + + bool allowed = true; + if ((childNode->getType().contains16BitFloat() && !float16Arithmetic()) || + (childNode->getType().contains16BitInt() && !int16Arithmetic()) || + (childNode->getType().contains8BitInt() && !int8Arithmetic())) { + allowed = false; + } + + TIntermTyped* result = nullptr; + if (allowed) + result = intermediate.addUnaryMath(op, childNode, loc); + + if (result) + return result; + else + unaryOpError(loc, str, childNode->getCompleteString()); + + return childNode; +} + +// +// Handle seeing a base.field dereference in the grammar. +// +TIntermTyped* TParseContext::handleDotDereference(const TSourceLoc& loc, TIntermTyped* base, const TString& field) +{ + variableCheck(base); + + // + // .length() can't be resolved until we later see the function-calling syntax. + // Save away the name in the AST for now. Processing is completed in + // handleLengthMethod(). + // + if (field == "length") { + if (base->isArray()) { + profileRequires(loc, ENoProfile, 120, E_GL_3DL_array_objects, ".length"); + profileRequires(loc, EEsProfile, 300, nullptr, ".length"); + } else if (base->isVector() || base->isMatrix()) { + const char* feature = ".length() on vectors and matrices"; + requireProfile(loc, ~EEsProfile, feature); + profileRequires(loc, ~EEsProfile, 420, E_GL_ARB_shading_language_420pack, feature); + } else if (!base->getType().isCoopMat()) { + error(loc, "does not operate on this type:", field.c_str(), base->getType().getCompleteString().c_str()); + + return base; + } + + return intermediate.addMethod(base, TType(EbtInt), &field, loc); + } + + // It's not .length() if we get to here. + + if (base->isArray()) { + error(loc, "cannot apply to an array:", ".", field.c_str()); + + return base; + } + + if (base->getType().isCoopMat()) { + error(loc, "cannot apply to a cooperative matrix type:", ".", field.c_str()); + return base; + } + + // It's neither an array nor .length() if we get here, + // leaving swizzles and struct/block dereferences. + + TIntermTyped* result = base; + if ((base->isVector() || base->isScalar()) && + (base->isFloatingDomain() || base->isIntegerDomain() || base->getBasicType() == EbtBool)) { + result = handleDotSwizzle(loc, base, field); + } else if (base->isStruct() || base->isReference()) { + const TTypeList* fields = base->isReference() ? + base->getType().getReferentType()->getStruct() : + base->getType().getStruct(); + bool fieldFound = false; + int member; + for (member = 0; member < (int)fields->size(); ++member) { + if ((*fields)[member].type->getFieldName() == field) { + fieldFound = true; + break; + } + } + if (fieldFound) { + if (base->getType().getQualifier().isFrontEndConstant()) + result = intermediate.foldDereference(base, member, loc); + else { + blockMemberExtensionCheck(loc, base, member, field); + TIntermTyped* index = intermediate.addConstantUnion(member, loc); + result = intermediate.addIndex(EOpIndexDirectStruct, base, index, loc); + result->setType(*(*fields)[member].type); + if ((*fields)[member].type->getQualifier().isIo()) + intermediate.addIoAccessed(field); + } + inheritMemoryQualifiers(base->getQualifier(), result->getWritableType().getQualifier()); + } else + error(loc, "no such field in structure", field.c_str(), ""); + } else + error(loc, "does not apply to this type:", field.c_str(), base->getType().getCompleteString().c_str()); + + // Propagate noContraction up the dereference chain + if (base->getQualifier().isNoContraction()) + result->getWritableType().getQualifier().setNoContraction(); + + // Propagate nonuniform + if (base->getQualifier().isNonUniform()) + result->getWritableType().getQualifier().nonUniform = true; + + return result; +} + +// +// Handle seeing a base.swizzle, a subset of base.identifier in the grammar. +// +TIntermTyped* TParseContext::handleDotSwizzle(const TSourceLoc& loc, TIntermTyped* base, const TString& field) +{ + TIntermTyped* result = base; + if (base->isScalar()) { + const char* dotFeature = "scalar swizzle"; + requireProfile(loc, ~EEsProfile, dotFeature); + profileRequires(loc, ~EEsProfile, 420, E_GL_ARB_shading_language_420pack, dotFeature); + } + + TSwizzleSelectors selectors; + parseSwizzleSelector(loc, field, base->getVectorSize(), selectors); + + if (base->isVector() && selectors.size() != 1 && base->getType().contains16BitFloat()) + requireFloat16Arithmetic(loc, ".", "can't swizzle types containing float16"); + if (base->isVector() && selectors.size() != 1 && base->getType().contains16BitInt()) + requireInt16Arithmetic(loc, ".", "can't swizzle types containing (u)int16"); + if (base->isVector() && selectors.size() != 1 && base->getType().contains8BitInt()) + requireInt8Arithmetic(loc, ".", "can't swizzle types containing (u)int8"); + + if (base->isScalar()) { + if (selectors.size() == 1) + return result; + else { + TType type(base->getBasicType(), EvqTemporary, selectors.size()); + // Swizzle operations propagate specialization-constantness + if (base->getQualifier().isSpecConstant()) + type.getQualifier().makeSpecConstant(); + return addConstructor(loc, base, type); + } + } + + if (base->getType().getQualifier().isFrontEndConstant()) + result = intermediate.foldSwizzle(base, selectors, loc); + else { + if (selectors.size() == 1) { + TIntermTyped* index = intermediate.addConstantUnion(selectors[0], loc); + result = intermediate.addIndex(EOpIndexDirect, base, index, loc); + result->setType(TType(base->getBasicType(), EvqTemporary, base->getType().getQualifier().precision)); + } else { + TIntermTyped* index = intermediate.addSwizzle(selectors, loc); + result = intermediate.addIndex(EOpVectorSwizzle, base, index, loc); + result->setType(TType(base->getBasicType(), EvqTemporary, base->getType().getQualifier().precision, selectors.size())); + } + // Swizzle operations propagate specialization-constantness + if (base->getType().getQualifier().isSpecConstant()) + result->getWritableType().getQualifier().makeSpecConstant(); + } + + return result; +} + +void TParseContext::blockMemberExtensionCheck(const TSourceLoc& loc, const TIntermTyped* base, int member, const TString& memberName) +{ + // a block that needs extension checking is either 'base', or if arrayed, + // one level removed to the left + const TIntermSymbol* baseSymbol = nullptr; + if (base->getAsBinaryNode() == nullptr) + baseSymbol = base->getAsSymbolNode(); + else + baseSymbol = base->getAsBinaryNode()->getLeft()->getAsSymbolNode(); + if (baseSymbol == nullptr) + return; + const TSymbol* symbol = symbolTable.find(baseSymbol->getName()); + if (symbol == nullptr) + return; + const TVariable* variable = symbol->getAsVariable(); + if (variable == nullptr) + return; + if (!variable->hasMemberExtensions()) + return; + + // We now have a variable that is the base of a dot reference + // with members that need extension checking. + if (variable->getNumMemberExtensions(member) > 0) + requireExtensions(loc, variable->getNumMemberExtensions(member), variable->getMemberExtensions(member), memberName.c_str()); +} + +// +// Handle seeing a function declarator in the grammar. This is the precursor +// to recognizing a function prototype or function definition. +// +TFunction* TParseContext::handleFunctionDeclarator(const TSourceLoc& loc, TFunction& function, bool prototype) +{ + // ES can't declare prototypes inside functions + if (! symbolTable.atGlobalLevel()) + requireProfile(loc, ~EEsProfile, "local function declaration"); + + // + // Multiple declarations of the same function name are allowed. + // + // If this is a definition, the definition production code will check for redefinitions + // (we don't know at this point if it's a definition or not). + // + // Redeclarations (full signature match) are allowed. But, return types and parameter qualifiers must also match. + // - except ES 100, which only allows a single prototype + // + // ES 100 does not allow redefining, but does allow overloading of built-in functions. + // ES 300 does not allow redefining or overloading of built-in functions. + // + bool builtIn; + TSymbol* symbol = symbolTable.find(function.getMangledName(), &builtIn); + if (symbol && symbol->getAsFunction() && builtIn) + requireProfile(loc, ~EEsProfile, "redefinition of built-in function"); + const TFunction* prevDec = symbol ? symbol->getAsFunction() : 0; + if (prevDec) { + if (prevDec->isPrototyped() && prototype) + profileRequires(loc, EEsProfile, 300, nullptr, "multiple prototypes for same function"); + if (prevDec->getType() != function.getType()) + error(loc, "overloaded functions must have the same return type", function.getName().c_str(), ""); + for (int i = 0; i < prevDec->getParamCount(); ++i) { + if ((*prevDec)[i].type->getQualifier().storage != function[i].type->getQualifier().storage) + error(loc, "overloaded functions must have the same parameter storage qualifiers for argument", function[i].type->getStorageQualifierString(), "%d", i+1); + + if ((*prevDec)[i].type->getQualifier().precision != function[i].type->getQualifier().precision) + error(loc, "overloaded functions must have the same parameter precision qualifiers for argument", function[i].type->getPrecisionQualifierString(), "%d", i+1); + } + } + + arrayObjectCheck(loc, function.getType(), "array in function return type"); + + if (prototype) { + // All built-in functions are defined, even though they don't have a body. + // Count their prototype as a definition instead. + if (symbolTable.atBuiltInLevel()) + function.setDefined(); + else { + if (prevDec && ! builtIn) + symbol->getAsFunction()->setPrototyped(); // need a writable one, but like having prevDec as a const + function.setPrototyped(); + } + } + + // This insert won't actually insert it if it's a duplicate signature, but it will still check for + // other forms of name collisions. + if (! symbolTable.insert(function)) + error(loc, "function name is redeclaration of existing name", function.getName().c_str(), ""); + + // + // If this is a redeclaration, it could also be a definition, + // in which case, we need to use the parameter names from this one, and not the one that's + // being redeclared. So, pass back this declaration, not the one in the symbol table. + // + return &function; +} + +// +// Handle seeing the function prototype in front of a function definition in the grammar. +// The body is handled after this function returns. +// +TIntermAggregate* TParseContext::handleFunctionDefinition(const TSourceLoc& loc, TFunction& function) +{ + currentCaller = function.getMangledName(); + TSymbol* symbol = symbolTable.find(function.getMangledName()); + TFunction* prevDec = symbol ? symbol->getAsFunction() : nullptr; + + if (! prevDec) + error(loc, "can't find function", function.getName().c_str(), ""); + // Note: 'prevDec' could be 'function' if this is the first time we've seen function + // as it would have just been put in the symbol table. Otherwise, we're looking up + // an earlier occurrence. + + if (prevDec && prevDec->isDefined()) { + // Then this function already has a body. + error(loc, "function already has a body", function.getName().c_str(), ""); + } + if (prevDec && ! prevDec->isDefined()) { + prevDec->setDefined(); + + // Remember the return type for later checking for RETURN statements. + currentFunctionType = &(prevDec->getType()); + } else + currentFunctionType = new TType(EbtVoid); + functionReturnsValue = false; + + // Check for entry point + if (function.getName().compare(intermediate.getEntryPointName().c_str()) == 0) { + intermediate.setEntryPointMangledName(function.getMangledName().c_str()); + intermediate.incrementEntryPointCount(); + inMain = true; + } else + inMain = false; + + // + // Raise error message if main function takes any parameters or returns anything other than void + // + if (inMain) { + if (function.getParamCount() > 0) + error(loc, "function cannot take any parameter(s)", function.getName().c_str(), ""); + if (function.getType().getBasicType() != EbtVoid) + error(loc, "", function.getType().getBasicTypeString().c_str(), "entry point cannot return a value"); + } + + // + // New symbol table scope for body of function plus its arguments + // + symbolTable.push(); + + // + // Insert parameters into the symbol table. + // If the parameter has no name, it's not an error, just don't insert it + // (could be used for unused args). + // + // Also, accumulate the list of parameters into the HIL, so lower level code + // knows where to find parameters. + // + TIntermAggregate* paramNodes = new TIntermAggregate; + for (int i = 0; i < function.getParamCount(); i++) { + TParameter& param = function[i]; + if (param.name != nullptr) { + TVariable *variable = new TVariable(param.name, *param.type); + + // Insert the parameters with name in the symbol table. + if (! symbolTable.insert(*variable)) + error(loc, "redefinition", variable->getName().c_str(), ""); + else { + // Transfer ownership of name pointer to symbol table. + param.name = nullptr; + + // Add the parameter to the HIL + paramNodes = intermediate.growAggregate(paramNodes, + intermediate.addSymbol(*variable, loc), + loc); + } + } else + paramNodes = intermediate.growAggregate(paramNodes, intermediate.addSymbol(*param.type, loc), loc); + } + intermediate.setAggregateOperator(paramNodes, EOpParameters, TType(EbtVoid), loc); + loopNestingLevel = 0; + statementNestingLevel = 0; + controlFlowNestingLevel = 0; + postEntryPointReturn = false; + + return paramNodes; +} + +// +// Handle seeing function call syntax in the grammar, which could be any of +// - .length() method +// - constructor +// - a call to a built-in function mapped to an operator +// - a call to a built-in function that will remain a function call (e.g., texturing) +// - user function +// - subroutine call (not implemented yet) +// +TIntermTyped* TParseContext::handleFunctionCall(const TSourceLoc& loc, TFunction* function, TIntermNode* arguments) +{ + TIntermTyped* result = nullptr; + + if (function->getBuiltInOp() == EOpArrayLength) + result = handleLengthMethod(loc, function, arguments); + else if (function->getBuiltInOp() != EOpNull) { + // + // Then this should be a constructor. + // Don't go through the symbol table for constructors. + // Their parameters will be verified algorithmically. + // + TType type(EbtVoid); // use this to get the type back + if (! constructorError(loc, arguments, *function, function->getBuiltInOp(), type)) { + // + // It's a constructor, of type 'type'. + // + result = addConstructor(loc, arguments, type); + if (result == nullptr) + error(loc, "cannot construct with these arguments", type.getCompleteString().c_str(), ""); + } + } else { + // + // Find it in the symbol table. + // + const TFunction* fnCandidate; + bool builtIn; + fnCandidate = findFunction(loc, *function, builtIn); + if (fnCandidate) { + // This is a declared function that might map to + // - a built-in operator, + // - a built-in function not mapped to an operator, or + // - a user function. + + // Error check for a function requiring specific extensions present. + if (builtIn && fnCandidate->getNumExtensions()) + requireExtensions(loc, fnCandidate->getNumExtensions(), fnCandidate->getExtensions(), fnCandidate->getName().c_str()); + + if (builtIn && fnCandidate->getType().contains16BitFloat()) + requireFloat16Arithmetic(loc, "built-in function", "float16 types can only be in uniform block or buffer storage"); + if (builtIn && fnCandidate->getType().contains16BitInt()) + requireInt16Arithmetic(loc, "built-in function", "(u)int16 types can only be in uniform block or buffer storage"); + if (builtIn && fnCandidate->getType().contains8BitInt()) + requireInt8Arithmetic(loc, "built-in function", "(u)int8 types can only be in uniform block or buffer storage"); + + if (arguments != nullptr) { + // Make sure qualifications work for these arguments. + TIntermAggregate* aggregate = arguments->getAsAggregate(); + for (int i = 0; i < fnCandidate->getParamCount(); ++i) { + // At this early point there is a slight ambiguity between whether an aggregate 'arguments' + // is the single argument itself or its children are the arguments. Only one argument + // means take 'arguments' itself as the one argument. + TIntermNode* arg = fnCandidate->getParamCount() == 1 ? arguments : (aggregate ? aggregate->getSequence()[i] : arguments); + TQualifier& formalQualifier = (*fnCandidate)[i].type->getQualifier(); + if (formalQualifier.isParamOutput()) { + if (lValueErrorCheck(arguments->getLoc(), "assign", arg->getAsTyped())) + error(arguments->getLoc(), "Non-L-value cannot be passed for 'out' or 'inout' parameters.", "out", ""); + } + const TType& argType = arg->getAsTyped()->getType(); + const TQualifier& argQualifier = argType.getQualifier(); + if (argQualifier.isMemory() && (argType.containsOpaque() || argType.isReference())) { + const char* message = "argument cannot drop memory qualifier when passed to formal parameter"; +#ifndef GLSLANG_WEB + if (argQualifier.volatil && ! formalQualifier.volatil) + error(arguments->getLoc(), message, "volatile", ""); + if (argQualifier.coherent && ! (formalQualifier.devicecoherent || formalQualifier.coherent)) + error(arguments->getLoc(), message, "coherent", ""); + if (argQualifier.devicecoherent && ! (formalQualifier.devicecoherent || formalQualifier.coherent)) + error(arguments->getLoc(), message, "devicecoherent", ""); + if (argQualifier.queuefamilycoherent && ! (formalQualifier.queuefamilycoherent || formalQualifier.devicecoherent || formalQualifier.coherent)) + error(arguments->getLoc(), message, "queuefamilycoherent", ""); + if (argQualifier.workgroupcoherent && ! (formalQualifier.workgroupcoherent || formalQualifier.queuefamilycoherent || formalQualifier.devicecoherent || formalQualifier.coherent)) + error(arguments->getLoc(), message, "workgroupcoherent", ""); + if (argQualifier.subgroupcoherent && ! (formalQualifier.subgroupcoherent || formalQualifier.workgroupcoherent || formalQualifier.queuefamilycoherent || formalQualifier.devicecoherent || formalQualifier.coherent)) + error(arguments->getLoc(), message, "subgroupcoherent", ""); + if (argQualifier.readonly && ! formalQualifier.readonly) + error(arguments->getLoc(), message, "readonly", ""); + if (argQualifier.writeonly && ! formalQualifier.writeonly) + error(arguments->getLoc(), message, "writeonly", ""); + // Don't check 'restrict', it is different than the rest: + // "...but only restrict can be taken away from a calling argument, by a formal parameter that + // lacks the restrict qualifier..." +#endif + } + if (!builtIn && argQualifier.getFormat() != formalQualifier.getFormat()) { + // we have mismatched formats, which should only be allowed if writeonly + // and at least one format is unknown + if (!formalQualifier.isWriteOnly() || (formalQualifier.getFormat() != ElfNone && + argQualifier.getFormat() != ElfNone)) + error(arguments->getLoc(), "image formats must match", "format", ""); + } + if (builtIn && arg->getAsTyped()->getType().contains16BitFloat()) + requireFloat16Arithmetic(arguments->getLoc(), "built-in function", "float16 types can only be in uniform block or buffer storage"); + if (builtIn && arg->getAsTyped()->getType().contains16BitInt()) + requireInt16Arithmetic(arguments->getLoc(), "built-in function", "(u)int16 types can only be in uniform block or buffer storage"); + if (builtIn && arg->getAsTyped()->getType().contains8BitInt()) + requireInt8Arithmetic(arguments->getLoc(), "built-in function", "(u)int8 types can only be in uniform block or buffer storage"); + + // TODO 4.5 functionality: A shader will fail to compile + // if the value passed to the memargument of an atomic memory function does not correspond to a buffer or + // shared variable. It is acceptable to pass an element of an array or a single component of a vector to the + // memargument of an atomic memory function, as long as the underlying array or vector is a buffer or + // shared variable. + } + + // Convert 'in' arguments + addInputArgumentConversions(*fnCandidate, arguments); // arguments may be modified if it's just a single argument node + } + + if (builtIn && fnCandidate->getBuiltInOp() != EOpNull) { + // A function call mapped to a built-in operation. + result = handleBuiltInFunctionCall(loc, arguments, *fnCandidate); + } else { + // This is a function call not mapped to built-in operator. + // It could still be a built-in function, but only if PureOperatorBuiltins == false. + result = intermediate.setAggregateOperator(arguments, EOpFunctionCall, fnCandidate->getType(), loc); + TIntermAggregate* call = result->getAsAggregate(); + call->setName(fnCandidate->getMangledName()); + + // this is how we know whether the given function is a built-in function or a user-defined function + // if builtIn == false, it's a userDefined -> could be an overloaded built-in function also + // if builtIn == true, it's definitely a built-in function with EOpNull + if (! builtIn) { + call->setUserDefined(); + if (symbolTable.atGlobalLevel()) { + requireProfile(loc, ~EEsProfile, "calling user function from global scope"); + intermediate.addToCallGraph(infoSink, "main(", fnCandidate->getMangledName()); + } else + intermediate.addToCallGraph(infoSink, currentCaller, fnCandidate->getMangledName()); + } + +#ifndef GLSLANG_WEB + if (builtIn) + nonOpBuiltInCheck(loc, *fnCandidate, *call); + else +#endif + userFunctionCallCheck(loc, *call); + } + + // Convert 'out' arguments. If it was a constant folded built-in, it won't be an aggregate anymore. + // Built-ins with a single argument aren't called with an aggregate, but they also don't have an output. + // Also, build the qualifier list for user function calls, which are always called with an aggregate. + if (result->getAsAggregate()) { + TQualifierList& qualifierList = result->getAsAggregate()->getQualifierList(); + for (int i = 0; i < fnCandidate->getParamCount(); ++i) { + TStorageQualifier qual = (*fnCandidate)[i].type->getQualifier().storage; + qualifierList.push_back(qual); + } + result = addOutputArgumentConversions(*fnCandidate, *result->getAsAggregate()); + } + + if (result->getAsTyped()->getType().isCoopMat() && + !result->getAsTyped()->getType().isParameterized()) { + assert(fnCandidate->getBuiltInOp() == EOpCooperativeMatrixMulAdd); + + result->setType(result->getAsAggregate()->getSequence()[2]->getAsTyped()->getType()); + } + } + } + + // generic error recovery + // TODO: simplification: localize all the error recoveries that look like this, and taking type into account to reduce cascades + if (result == nullptr) + result = intermediate.addConstantUnion(0.0, EbtFloat, loc); + + return result; +} + +TIntermTyped* TParseContext::handleBuiltInFunctionCall(TSourceLoc loc, TIntermNode* arguments, + const TFunction& function) +{ + checkLocation(loc, function.getBuiltInOp()); + TIntermTyped *result = intermediate.addBuiltInFunctionCall(loc, function.getBuiltInOp(), + function.getParamCount() == 1, + arguments, function.getType()); + if (obeyPrecisionQualifiers()) + computeBuiltinPrecisions(*result, function); + + if (result == nullptr) { + if (arguments == nullptr) + error(loc, " wrong operand type", "Internal Error", + "built in unary operator function. Type: %s", ""); + else + error(arguments->getLoc(), " wrong operand type", "Internal Error", + "built in unary operator function. Type: %s", + static_cast(arguments)->getCompleteString().c_str()); + } else if (result->getAsOperator()) + builtInOpCheck(loc, function, *result->getAsOperator()); + + return result; +} + +// "The operation of a built-in function can have a different precision +// qualification than the precision qualification of the resulting value. +// These two precision qualifications are established as follows. +// +// The precision qualification of the operation of a built-in function is +// based on the precision qualification of its input arguments and formal +// parameters: When a formal parameter specifies a precision qualifier, +// that is used, otherwise, the precision qualification of the calling +// argument is used. The highest precision of these will be the precision +// qualification of the operation of the built-in function. Generally, +// this is applied across all arguments to a built-in function, with the +// exceptions being: +// - bitfieldExtract and bitfieldInsert ignore the 'offset' and 'bits' +// arguments. +// - interpolateAt* functions only look at the 'interpolant' argument. +// +// The precision qualification of the result of a built-in function is +// determined in one of the following ways: +// +// - For the texture sampling, image load, and image store functions, +// the precision of the return type matches the precision of the +// sampler type +// +// Otherwise: +// +// - For prototypes that do not specify a resulting precision qualifier, +// the precision will be the same as the precision of the operation. +// +// - For prototypes that do specify a resulting precision qualifier, +// the specified precision qualifier is the precision qualification of +// the result." +// +void TParseContext::computeBuiltinPrecisions(TIntermTyped& node, const TFunction& function) +{ + TPrecisionQualifier operationPrecision = EpqNone; + TPrecisionQualifier resultPrecision = EpqNone; + + TIntermOperator* opNode = node.getAsOperator(); + if (opNode == nullptr) + return; + + if (TIntermUnary* unaryNode = node.getAsUnaryNode()) { + operationPrecision = std::max(function[0].type->getQualifier().precision, + unaryNode->getOperand()->getType().getQualifier().precision); + if (function.getType().getBasicType() != EbtBool) + resultPrecision = function.getType().getQualifier().precision == EpqNone ? + operationPrecision : + function.getType().getQualifier().precision; + } else if (TIntermAggregate* agg = node.getAsAggregate()) { + TIntermSequence& sequence = agg->getSequence(); + unsigned int numArgs = (unsigned int)sequence.size(); + switch (agg->getOp()) { + case EOpBitfieldExtract: + numArgs = 1; + break; + case EOpBitfieldInsert: + numArgs = 2; + break; + case EOpInterpolateAtCentroid: + case EOpInterpolateAtOffset: + case EOpInterpolateAtSample: + numArgs = 1; + break; + case EOpDebugPrintf: + numArgs = 0; + break; + default: + break; + } + // find the maximum precision from the arguments and parameters + for (unsigned int arg = 0; arg < numArgs; ++arg) { + operationPrecision = std::max(operationPrecision, sequence[arg]->getAsTyped()->getQualifier().precision); + operationPrecision = std::max(operationPrecision, function[arg].type->getQualifier().precision); + } + // compute the result precision + if (agg->isSampling() || + agg->getOp() == EOpImageLoad || agg->getOp() == EOpImageStore || + agg->getOp() == EOpImageLoadLod || agg->getOp() == EOpImageStoreLod) + resultPrecision = sequence[0]->getAsTyped()->getQualifier().precision; + else if (function.getType().getBasicType() != EbtBool) + resultPrecision = function.getType().getQualifier().precision == EpqNone ? + operationPrecision : + function.getType().getQualifier().precision; + } + + // Propagate precision through this node and its children. That algorithm stops + // when a precision is found, so start by clearing this subroot precision + opNode->getQualifier().precision = EpqNone; + if (operationPrecision != EpqNone) { + opNode->propagatePrecision(operationPrecision); + opNode->setOperationPrecision(operationPrecision); + } + // Now, set the result precision, which might not match + opNode->getQualifier().precision = resultPrecision; +} + +TIntermNode* TParseContext::handleReturnValue(const TSourceLoc& loc, TIntermTyped* value) +{ +#ifndef GLSLANG_WEB + storage16BitAssignmentCheck(loc, value->getType(), "return"); +#endif + + functionReturnsValue = true; + if (currentFunctionType->getBasicType() == EbtVoid) { + error(loc, "void function cannot return a value", "return", ""); + return intermediate.addBranch(EOpReturn, loc); + } else if (*currentFunctionType != value->getType()) { + TIntermTyped* converted = intermediate.addConversion(EOpReturn, *currentFunctionType, value); + if (converted) { + if (*currentFunctionType != converted->getType()) + error(loc, "cannot convert return value to function return type", "return", ""); + if (version < 420) + warn(loc, "type conversion on return values was not explicitly allowed until version 420", "return", ""); + return intermediate.addBranch(EOpReturn, converted, loc); + } else { + error(loc, "type does not match, or is not convertible to, the function's return type", "return", ""); + return intermediate.addBranch(EOpReturn, value, loc); + } + } else + return intermediate.addBranch(EOpReturn, value, loc); +} + +// See if the operation is being done in an illegal location. +void TParseContext::checkLocation(const TSourceLoc& loc, TOperator op) +{ +#ifndef GLSLANG_WEB + switch (op) { + case EOpBarrier: + if (language == EShLangTessControl) { + if (controlFlowNestingLevel > 0) + error(loc, "tessellation control barrier() cannot be placed within flow control", "", ""); + if (! inMain) + error(loc, "tessellation control barrier() must be in main()", "", ""); + else if (postEntryPointReturn) + error(loc, "tessellation control barrier() cannot be placed after a return from main()", "", ""); + } + break; + case EOpBeginInvocationInterlock: + if (language != EShLangFragment) + error(loc, "beginInvocationInterlockARB() must be in a fragment shader", "", ""); + if (! inMain) + error(loc, "beginInvocationInterlockARB() must be in main()", "", ""); + else if (postEntryPointReturn) + error(loc, "beginInvocationInterlockARB() cannot be placed after a return from main()", "", ""); + if (controlFlowNestingLevel > 0) + error(loc, "beginInvocationInterlockARB() cannot be placed within flow control", "", ""); + + if (beginInvocationInterlockCount > 0) + error(loc, "beginInvocationInterlockARB() must only be called once", "", ""); + if (endInvocationInterlockCount > 0) + error(loc, "beginInvocationInterlockARB() must be called before endInvocationInterlockARB()", "", ""); + + beginInvocationInterlockCount++; + + // default to pixel_interlock_ordered + if (intermediate.getInterlockOrdering() == EioNone) + intermediate.setInterlockOrdering(EioPixelInterlockOrdered); + break; + case EOpEndInvocationInterlock: + if (language != EShLangFragment) + error(loc, "endInvocationInterlockARB() must be in a fragment shader", "", ""); + if (! inMain) + error(loc, "endInvocationInterlockARB() must be in main()", "", ""); + else if (postEntryPointReturn) + error(loc, "endInvocationInterlockARB() cannot be placed after a return from main()", "", ""); + if (controlFlowNestingLevel > 0) + error(loc, "endInvocationInterlockARB() cannot be placed within flow control", "", ""); + + if (endInvocationInterlockCount > 0) + error(loc, "endInvocationInterlockARB() must only be called once", "", ""); + if (beginInvocationInterlockCount == 0) + error(loc, "beginInvocationInterlockARB() must be called before endInvocationInterlockARB()", "", ""); + + endInvocationInterlockCount++; + break; + default: + break; + } +#endif +} + +// Finish processing object.length(). This started earlier in handleDotDereference(), where +// the ".length" part was recognized and semantically checked, and finished here where the +// function syntax "()" is recognized. +// +// Return resulting tree node. +TIntermTyped* TParseContext::handleLengthMethod(const TSourceLoc& loc, TFunction* function, TIntermNode* intermNode) +{ + int length = 0; + + if (function->getParamCount() > 0) + error(loc, "method does not accept any arguments", function->getName().c_str(), ""); + else { + const TType& type = intermNode->getAsTyped()->getType(); + if (type.isArray()) { + if (type.isUnsizedArray()) { +#ifndef GLSLANG_WEB + if (intermNode->getAsSymbolNode() && isIoResizeArray(type)) { + // We could be between a layout declaration that gives a built-in io array implicit size and + // a user redeclaration of that array, meaning we have to substitute its implicit size here + // without actually redeclaring the array. (It is an error to use a member before the + // redeclaration, but not an error to use the array name itself.) + const TString& name = intermNode->getAsSymbolNode()->getName(); + if (name == "gl_in" || name == "gl_out" || name == "gl_MeshVerticesNV" || + name == "gl_MeshPrimitivesNV") { + length = getIoArrayImplicitSize(type.getQualifier()); + } + } +#endif + if (length == 0) { +#ifndef GLSLANG_WEB + if (intermNode->getAsSymbolNode() && isIoResizeArray(type)) + error(loc, "", function->getName().c_str(), "array must first be sized by a redeclaration or layout qualifier"); + else if (isRuntimeLength(*intermNode->getAsTyped())) { + // Create a unary op and let the back end handle it + return intermediate.addBuiltInFunctionCall(loc, EOpArrayLength, true, intermNode, TType(EbtInt)); + } else +#endif + error(loc, "", function->getName().c_str(), "array must be declared with a size before using this method"); + } + } else if (type.getOuterArrayNode()) { + // If the array's outer size is specified by an intermediate node, it means the array's length + // was specified by a specialization constant. In such a case, we should return the node of the + // specialization constants to represent the length. + return type.getOuterArrayNode(); + } else + length = type.getOuterArraySize(); + } else if (type.isMatrix()) + length = type.getMatrixCols(); + else if (type.isVector()) + length = type.getVectorSize(); + else if (type.isCoopMat()) + return intermediate.addBuiltInFunctionCall(loc, EOpArrayLength, true, intermNode, TType(EbtInt)); + else { + // we should not get here, because earlier semantic checking should have prevented this path + error(loc, ".length()", "unexpected use of .length()", ""); + } + } + + if (length == 0) + length = 1; + + return intermediate.addConstantUnion(length, loc); +} + +// +// Add any needed implicit conversions for function-call arguments to input parameters. +// +void TParseContext::addInputArgumentConversions(const TFunction& function, TIntermNode*& arguments) const +{ +#ifndef GLSLANG_WEB + TIntermAggregate* aggregate = arguments->getAsAggregate(); + + // Process each argument's conversion + for (int i = 0; i < function.getParamCount(); ++i) { + // At this early point there is a slight ambiguity between whether an aggregate 'arguments' + // is the single argument itself or its children are the arguments. Only one argument + // means take 'arguments' itself as the one argument. + TIntermTyped* arg = function.getParamCount() == 1 ? arguments->getAsTyped() : (aggregate ? aggregate->getSequence()[i]->getAsTyped() : arguments->getAsTyped()); + if (*function[i].type != arg->getType()) { + if (function[i].type->getQualifier().isParamInput() && + !function[i].type->isCoopMat()) { + // In-qualified arguments just need an extra node added above the argument to + // convert to the correct type. + arg = intermediate.addConversion(EOpFunctionCall, *function[i].type, arg); + if (arg) { + if (function.getParamCount() == 1) + arguments = arg; + else { + if (aggregate) + aggregate->getSequence()[i] = arg; + else + arguments = arg; + } + } + } + } + } +#endif +} + +// +// Add any needed implicit output conversions for function-call arguments. This +// can require a new tree topology, complicated further by whether the function +// has a return value. +// +// Returns a node of a subtree that evaluates to the return value of the function. +// +TIntermTyped* TParseContext::addOutputArgumentConversions(const TFunction& function, TIntermAggregate& intermNode) const +{ +#ifdef GLSLANG_WEB + return &intermNode; +#else + TIntermSequence& arguments = intermNode.getSequence(); + + // Will there be any output conversions? + bool outputConversions = false; + for (int i = 0; i < function.getParamCount(); ++i) { + if (*function[i].type != arguments[i]->getAsTyped()->getType() && function[i].type->getQualifier().isParamOutput()) { + outputConversions = true; + break; + } + } + + if (! outputConversions) + return &intermNode; + + // Setup for the new tree, if needed: + // + // Output conversions need a different tree topology. + // Out-qualified arguments need a temporary of the correct type, with the call + // followed by an assignment of the temporary to the original argument: + // void: function(arg, ...) -> ( function(tempArg, ...), arg = tempArg, ...) + // ret = function(arg, ...) -> ret = (tempRet = function(tempArg, ...), arg = tempArg, ..., tempRet) + // Where the "tempArg" type needs no conversion as an argument, but will convert on assignment. + TIntermTyped* conversionTree = nullptr; + TVariable* tempRet = nullptr; + if (intermNode.getBasicType() != EbtVoid) { + // do the "tempRet = function(...), " bit from above + tempRet = makeInternalVariable("tempReturn", intermNode.getType()); + TIntermSymbol* tempRetNode = intermediate.addSymbol(*tempRet, intermNode.getLoc()); + conversionTree = intermediate.addAssign(EOpAssign, tempRetNode, &intermNode, intermNode.getLoc()); + } else + conversionTree = &intermNode; + + conversionTree = intermediate.makeAggregate(conversionTree); + + // Process each argument's conversion + for (int i = 0; i < function.getParamCount(); ++i) { + if (*function[i].type != arguments[i]->getAsTyped()->getType()) { + if (function[i].type->getQualifier().isParamOutput()) { + // Out-qualified arguments need to use the topology set up above. + // do the " ...(tempArg, ...), arg = tempArg" bit from above + TType paramType; + paramType.shallowCopy(*function[i].type); + if (arguments[i]->getAsTyped()->getType().isParameterized() && + !paramType.isParameterized()) { + paramType.shallowCopy(arguments[i]->getAsTyped()->getType()); + paramType.copyTypeParameters(*arguments[i]->getAsTyped()->getType().getTypeParameters()); + } + TVariable* tempArg = makeInternalVariable("tempArg", paramType); + tempArg->getWritableType().getQualifier().makeTemporary(); + TIntermSymbol* tempArgNode = intermediate.addSymbol(*tempArg, intermNode.getLoc()); + TIntermTyped* tempAssign = intermediate.addAssign(EOpAssign, arguments[i]->getAsTyped(), tempArgNode, arguments[i]->getLoc()); + conversionTree = intermediate.growAggregate(conversionTree, tempAssign, arguments[i]->getLoc()); + // replace the argument with another node for the same tempArg variable + arguments[i] = intermediate.addSymbol(*tempArg, intermNode.getLoc()); + } + } + } + + // Finalize the tree topology (see bigger comment above). + if (tempRet) { + // do the "..., tempRet" bit from above + TIntermSymbol* tempRetNode = intermediate.addSymbol(*tempRet, intermNode.getLoc()); + conversionTree = intermediate.growAggregate(conversionTree, tempRetNode, intermNode.getLoc()); + } + conversionTree = intermediate.setAggregateOperator(conversionTree, EOpComma, intermNode.getType(), intermNode.getLoc()); + + return conversionTree; +#endif +} + +void TParseContext::memorySemanticsCheck(const TSourceLoc& loc, const TFunction& fnCandidate, const TIntermOperator& callNode) +{ + const TIntermSequence* argp = &callNode.getAsAggregate()->getSequence(); + + //const int gl_SemanticsRelaxed = 0x0; + const int gl_SemanticsAcquire = 0x2; + const int gl_SemanticsRelease = 0x4; + const int gl_SemanticsAcquireRelease = 0x8; + const int gl_SemanticsMakeAvailable = 0x2000; + const int gl_SemanticsMakeVisible = 0x4000; + const int gl_SemanticsVolatile = 0x8000; + + //const int gl_StorageSemanticsNone = 0x0; + const int gl_StorageSemanticsBuffer = 0x40; + const int gl_StorageSemanticsShared = 0x100; + const int gl_StorageSemanticsImage = 0x800; + const int gl_StorageSemanticsOutput = 0x1000; + + + unsigned int semantics = 0, storageClassSemantics = 0; + unsigned int semantics2 = 0, storageClassSemantics2 = 0; + + const TIntermTyped* arg0 = (*argp)[0]->getAsTyped(); + const bool isMS = arg0->getBasicType() == EbtSampler && arg0->getType().getSampler().isMultiSample(); + + // Grab the semantics and storage class semantics from the operands, based on opcode + switch (callNode.getOp()) { + case EOpAtomicAdd: + case EOpAtomicMin: + case EOpAtomicMax: + case EOpAtomicAnd: + case EOpAtomicOr: + case EOpAtomicXor: + case EOpAtomicExchange: + case EOpAtomicStore: + storageClassSemantics = (*argp)[3]->getAsConstantUnion()->getConstArray()[0].getIConst(); + semantics = (*argp)[4]->getAsConstantUnion()->getConstArray()[0].getIConst(); + break; + case EOpAtomicLoad: + storageClassSemantics = (*argp)[2]->getAsConstantUnion()->getConstArray()[0].getIConst(); + semantics = (*argp)[3]->getAsConstantUnion()->getConstArray()[0].getIConst(); + break; + case EOpAtomicCompSwap: + storageClassSemantics = (*argp)[4]->getAsConstantUnion()->getConstArray()[0].getIConst(); + semantics = (*argp)[5]->getAsConstantUnion()->getConstArray()[0].getIConst(); + storageClassSemantics2 = (*argp)[6]->getAsConstantUnion()->getConstArray()[0].getIConst(); + semantics2 = (*argp)[7]->getAsConstantUnion()->getConstArray()[0].getIConst(); + break; + + case EOpImageAtomicAdd: + case EOpImageAtomicMin: + case EOpImageAtomicMax: + case EOpImageAtomicAnd: + case EOpImageAtomicOr: + case EOpImageAtomicXor: + case EOpImageAtomicExchange: + case EOpImageAtomicStore: + storageClassSemantics = (*argp)[isMS ? 5 : 4]->getAsConstantUnion()->getConstArray()[0].getIConst(); + semantics = (*argp)[isMS ? 6 : 5]->getAsConstantUnion()->getConstArray()[0].getIConst(); + break; + case EOpImageAtomicLoad: + storageClassSemantics = (*argp)[isMS ? 4 : 3]->getAsConstantUnion()->getConstArray()[0].getIConst(); + semantics = (*argp)[isMS ? 5 : 4]->getAsConstantUnion()->getConstArray()[0].getIConst(); + break; + case EOpImageAtomicCompSwap: + storageClassSemantics = (*argp)[isMS ? 6 : 5]->getAsConstantUnion()->getConstArray()[0].getIConst(); + semantics = (*argp)[isMS ? 7 : 6]->getAsConstantUnion()->getConstArray()[0].getIConst(); + storageClassSemantics2 = (*argp)[isMS ? 8 : 7]->getAsConstantUnion()->getConstArray()[0].getIConst(); + semantics2 = (*argp)[isMS ? 9 : 8]->getAsConstantUnion()->getConstArray()[0].getIConst(); + break; + + case EOpBarrier: + storageClassSemantics = (*argp)[2]->getAsConstantUnion()->getConstArray()[0].getIConst(); + semantics = (*argp)[3]->getAsConstantUnion()->getConstArray()[0].getIConst(); + break; + case EOpMemoryBarrier: + storageClassSemantics = (*argp)[1]->getAsConstantUnion()->getConstArray()[0].getIConst(); + semantics = (*argp)[2]->getAsConstantUnion()->getConstArray()[0].getIConst(); + break; + default: + break; + } + + if ((semantics & gl_SemanticsAcquire) && + (callNode.getOp() == EOpAtomicStore || callNode.getOp() == EOpImageAtomicStore)) { + error(loc, "gl_SemanticsAcquire must not be used with (image) atomic store", + fnCandidate.getName().c_str(), ""); + } + if ((semantics & gl_SemanticsRelease) && + (callNode.getOp() == EOpAtomicLoad || callNode.getOp() == EOpImageAtomicLoad)) { + error(loc, "gl_SemanticsRelease must not be used with (image) atomic load", + fnCandidate.getName().c_str(), ""); + } + if ((semantics & gl_SemanticsAcquireRelease) && + (callNode.getOp() == EOpAtomicStore || callNode.getOp() == EOpImageAtomicStore || + callNode.getOp() == EOpAtomicLoad || callNode.getOp() == EOpImageAtomicLoad)) { + error(loc, "gl_SemanticsAcquireRelease must not be used with (image) atomic load/store", + fnCandidate.getName().c_str(), ""); + } + if (((semantics | semantics2) & ~(gl_SemanticsAcquire | + gl_SemanticsRelease | + gl_SemanticsAcquireRelease | + gl_SemanticsMakeAvailable | + gl_SemanticsMakeVisible | + gl_SemanticsVolatile))) { + error(loc, "Invalid semantics value", fnCandidate.getName().c_str(), ""); + } + if (((storageClassSemantics | storageClassSemantics2) & ~(gl_StorageSemanticsBuffer | + gl_StorageSemanticsShared | + gl_StorageSemanticsImage | + gl_StorageSemanticsOutput))) { + error(loc, "Invalid storage class semantics value", fnCandidate.getName().c_str(), ""); + } + + if (callNode.getOp() == EOpMemoryBarrier) { + if (!IsPow2(semantics & (gl_SemanticsAcquire | gl_SemanticsRelease | gl_SemanticsAcquireRelease))) { + error(loc, "Semantics must include exactly one of gl_SemanticsRelease, gl_SemanticsAcquire, or " + "gl_SemanticsAcquireRelease", fnCandidate.getName().c_str(), ""); + } + } else { + if (semantics & (gl_SemanticsAcquire | gl_SemanticsRelease | gl_SemanticsAcquireRelease)) { + if (!IsPow2(semantics & (gl_SemanticsAcquire | gl_SemanticsRelease | gl_SemanticsAcquireRelease))) { + error(loc, "Semantics must not include multiple of gl_SemanticsRelease, gl_SemanticsAcquire, or " + "gl_SemanticsAcquireRelease", fnCandidate.getName().c_str(), ""); + } + } + if (semantics2 & (gl_SemanticsAcquire | gl_SemanticsRelease | gl_SemanticsAcquireRelease)) { + if (!IsPow2(semantics2 & (gl_SemanticsAcquire | gl_SemanticsRelease | gl_SemanticsAcquireRelease))) { + error(loc, "semUnequal must not include multiple of gl_SemanticsRelease, gl_SemanticsAcquire, or " + "gl_SemanticsAcquireRelease", fnCandidate.getName().c_str(), ""); + } + } + } + if (callNode.getOp() == EOpMemoryBarrier) { + if (storageClassSemantics == 0) { + error(loc, "Storage class semantics must not be zero", fnCandidate.getName().c_str(), ""); + } + } + if (callNode.getOp() == EOpBarrier && semantics != 0 && storageClassSemantics == 0) { + error(loc, "Storage class semantics must not be zero", fnCandidate.getName().c_str(), ""); + } + if ((callNode.getOp() == EOpAtomicCompSwap || callNode.getOp() == EOpImageAtomicCompSwap) && + (semantics2 & (gl_SemanticsRelease | gl_SemanticsAcquireRelease))) { + error(loc, "semUnequal must not be gl_SemanticsRelease or gl_SemanticsAcquireRelease", + fnCandidate.getName().c_str(), ""); + } + if ((semantics & gl_SemanticsMakeAvailable) && + !(semantics & (gl_SemanticsRelease | gl_SemanticsAcquireRelease))) { + error(loc, "gl_SemanticsMakeAvailable requires gl_SemanticsRelease or gl_SemanticsAcquireRelease", + fnCandidate.getName().c_str(), ""); + } + if ((semantics & gl_SemanticsMakeVisible) && + !(semantics & (gl_SemanticsAcquire | gl_SemanticsAcquireRelease))) { + error(loc, "gl_SemanticsMakeVisible requires gl_SemanticsAcquire or gl_SemanticsAcquireRelease", + fnCandidate.getName().c_str(), ""); + } + if ((semantics & gl_SemanticsVolatile) && + (callNode.getOp() == EOpMemoryBarrier || callNode.getOp() == EOpBarrier)) { + error(loc, "gl_SemanticsVolatile must not be used with memoryBarrier or controlBarrier", + fnCandidate.getName().c_str(), ""); + } + if ((callNode.getOp() == EOpAtomicCompSwap || callNode.getOp() == EOpImageAtomicCompSwap) && + ((semantics ^ semantics2) & gl_SemanticsVolatile)) { + error(loc, "semEqual and semUnequal must either both include gl_SemanticsVolatile or neither", + fnCandidate.getName().c_str(), ""); + } +} + +// +// Do additional checking of built-in function calls that is not caught +// by normal semantic checks on argument type, extension tagging, etc. +// +// Assumes there has been a semantically correct match to a built-in function prototype. +// +void TParseContext::builtInOpCheck(const TSourceLoc& loc, const TFunction& fnCandidate, TIntermOperator& callNode) +{ + // Set up convenience accessors to the argument(s). There is almost always + // multiple arguments for the cases below, but when there might be one, + // check the unaryArg first. + const TIntermSequence* argp = nullptr; // confusing to use [] syntax on a pointer, so this is to help get a reference + const TIntermTyped* unaryArg = nullptr; + const TIntermTyped* arg0 = nullptr; + if (callNode.getAsAggregate()) { + argp = &callNode.getAsAggregate()->getSequence(); + if (argp->size() > 0) + arg0 = (*argp)[0]->getAsTyped(); + } else { + assert(callNode.getAsUnaryNode()); + unaryArg = callNode.getAsUnaryNode()->getOperand(); + arg0 = unaryArg; + } + + TString featureString; + const char* feature = nullptr; + switch (callNode.getOp()) { +#ifndef GLSLANG_WEB + case EOpTextureGather: + case EOpTextureGatherOffset: + case EOpTextureGatherOffsets: + { + // Figure out which variants are allowed by what extensions, + // and what arguments must be constant for which situations. + + featureString = fnCandidate.getName(); + featureString += "(...)"; + feature = featureString.c_str(); + profileRequires(loc, EEsProfile, 310, nullptr, feature); + int compArg = -1; // track which argument, if any, is the constant component argument + switch (callNode.getOp()) { + case EOpTextureGather: + // More than two arguments needs gpu_shader5, and rectangular or shadow needs gpu_shader5, + // otherwise, need GL_ARB_texture_gather. + if (fnCandidate.getParamCount() > 2 || fnCandidate[0].type->getSampler().dim == EsdRect || fnCandidate[0].type->getSampler().shadow) { + profileRequires(loc, ~EEsProfile, 400, E_GL_ARB_gpu_shader5, feature); + if (! fnCandidate[0].type->getSampler().shadow) + compArg = 2; + } else + profileRequires(loc, ~EEsProfile, 400, E_GL_ARB_texture_gather, feature); + break; + case EOpTextureGatherOffset: + // GL_ARB_texture_gather is good enough for 2D non-shadow textures with no component argument + if (fnCandidate[0].type->getSampler().dim == Esd2D && ! fnCandidate[0].type->getSampler().shadow && fnCandidate.getParamCount() == 3) + profileRequires(loc, ~EEsProfile, 400, E_GL_ARB_texture_gather, feature); + else + profileRequires(loc, ~EEsProfile, 400, E_GL_ARB_gpu_shader5, feature); + if (! (*argp)[fnCandidate[0].type->getSampler().shadow ? 3 : 2]->getAsConstantUnion()) + profileRequires(loc, EEsProfile, 320, Num_AEP_gpu_shader5, AEP_gpu_shader5, + "non-constant offset argument"); + if (! fnCandidate[0].type->getSampler().shadow) + compArg = 3; + break; + case EOpTextureGatherOffsets: + profileRequires(loc, ~EEsProfile, 400, E_GL_ARB_gpu_shader5, feature); + if (! fnCandidate[0].type->getSampler().shadow) + compArg = 3; + // check for constant offsets + if (! (*argp)[fnCandidate[0].type->getSampler().shadow ? 3 : 2]->getAsConstantUnion()) + error(loc, "must be a compile-time constant:", feature, "offsets argument"); + break; + default: + break; + } + + if (compArg > 0 && compArg < fnCandidate.getParamCount()) { + if ((*argp)[compArg]->getAsConstantUnion()) { + int value = (*argp)[compArg]->getAsConstantUnion()->getConstArray()[0].getIConst(); + if (value < 0 || value > 3) + error(loc, "must be 0, 1, 2, or 3:", feature, "component argument"); + } else + error(loc, "must be a compile-time constant:", feature, "component argument"); + } + + bool bias = false; + if (callNode.getOp() == EOpTextureGather) + bias = fnCandidate.getParamCount() > 3; + else if (callNode.getOp() == EOpTextureGatherOffset || + callNode.getOp() == EOpTextureGatherOffsets) + bias = fnCandidate.getParamCount() > 4; + + if (bias) { + featureString = fnCandidate.getName(); + featureString += "with bias argument"; + feature = featureString.c_str(); + profileRequires(loc, ~EEsProfile, 450, nullptr, feature); + requireExtensions(loc, 1, &E_GL_AMD_texture_gather_bias_lod, feature); + } + break; + } + case EOpSparseTextureGather: + case EOpSparseTextureGatherOffset: + case EOpSparseTextureGatherOffsets: + { + bool bias = false; + if (callNode.getOp() == EOpSparseTextureGather) + bias = fnCandidate.getParamCount() > 4; + else if (callNode.getOp() == EOpSparseTextureGatherOffset || + callNode.getOp() == EOpSparseTextureGatherOffsets) + bias = fnCandidate.getParamCount() > 5; + + if (bias) { + featureString = fnCandidate.getName(); + featureString += "with bias argument"; + feature = featureString.c_str(); + profileRequires(loc, ~EEsProfile, 450, nullptr, feature); + requireExtensions(loc, 1, &E_GL_AMD_texture_gather_bias_lod, feature); + } + + break; + } + + case EOpSparseTextureGatherLod: + case EOpSparseTextureGatherLodOffset: + case EOpSparseTextureGatherLodOffsets: + { + requireExtensions(loc, 1, &E_GL_ARB_sparse_texture2, fnCandidate.getName().c_str()); + break; + } + + case EOpSwizzleInvocations: + { + if (! (*argp)[1]->getAsConstantUnion()) + error(loc, "argument must be compile-time constant", "offset", ""); + else { + unsigned offset[4] = {}; + offset[0] = (*argp)[1]->getAsConstantUnion()->getConstArray()[0].getUConst(); + offset[1] = (*argp)[1]->getAsConstantUnion()->getConstArray()[1].getUConst(); + offset[2] = (*argp)[1]->getAsConstantUnion()->getConstArray()[2].getUConst(); + offset[3] = (*argp)[1]->getAsConstantUnion()->getConstArray()[3].getUConst(); + if (offset[0] > 3 || offset[1] > 3 || offset[2] > 3 || offset[3] > 3) + error(loc, "components must be in the range [0, 3]", "offset", ""); + } + + break; + } + + case EOpSwizzleInvocationsMasked: + { + if (! (*argp)[1]->getAsConstantUnion()) + error(loc, "argument must be compile-time constant", "mask", ""); + else { + unsigned mask[3] = {}; + mask[0] = (*argp)[1]->getAsConstantUnion()->getConstArray()[0].getUConst(); + mask[1] = (*argp)[1]->getAsConstantUnion()->getConstArray()[1].getUConst(); + mask[2] = (*argp)[1]->getAsConstantUnion()->getConstArray()[2].getUConst(); + if (mask[0] > 31 || mask[1] > 31 || mask[2] > 31) + error(loc, "components must be in the range [0, 31]", "mask", ""); + } + + break; + } +#endif + + case EOpTextureOffset: + case EOpTextureFetchOffset: + case EOpTextureProjOffset: + case EOpTextureLodOffset: + case EOpTextureProjLodOffset: + case EOpTextureGradOffset: + case EOpTextureProjGradOffset: + { + // Handle texture-offset limits checking + // Pick which argument has to hold constant offsets + int arg = -1; + switch (callNode.getOp()) { + case EOpTextureOffset: arg = 2; break; + case EOpTextureFetchOffset: arg = (arg0->getType().getSampler().isRect()) ? 2 : 3; break; + case EOpTextureProjOffset: arg = 2; break; + case EOpTextureLodOffset: arg = 3; break; + case EOpTextureProjLodOffset: arg = 3; break; + case EOpTextureGradOffset: arg = 4; break; + case EOpTextureProjGradOffset: arg = 4; break; + default: + assert(0); + break; + } + + if (arg > 0) { + +#ifndef GLSLANG_WEB + bool f16ShadowCompare = (*argp)[1]->getAsTyped()->getBasicType() == EbtFloat16 && + arg0->getType().getSampler().shadow; + if (f16ShadowCompare) + ++arg; +#endif + if (! (*argp)[arg]->getAsTyped()->getQualifier().isConstant()) + error(loc, "argument must be compile-time constant", "texel offset", ""); + else if ((*argp)[arg]->getAsConstantUnion()) { + const TType& type = (*argp)[arg]->getAsTyped()->getType(); + for (int c = 0; c < type.getVectorSize(); ++c) { + int offset = (*argp)[arg]->getAsConstantUnion()->getConstArray()[c].getIConst(); + if (offset > resources.maxProgramTexelOffset || offset < resources.minProgramTexelOffset) + error(loc, "value is out of range:", "texel offset", + "[gl_MinProgramTexelOffset, gl_MaxProgramTexelOffset]"); + } + } + } + + break; + } + +#ifndef GLSLANG_WEB + case EOpTrace: + if (!(*argp)[10]->getAsConstantUnion()) + error(loc, "argument must be compile-time constant", "payload number", ""); + break; + case EOpExecuteCallable: + if (!(*argp)[1]->getAsConstantUnion()) + error(loc, "argument must be compile-time constant", "callable data number", ""); + break; + + case EOpRayQueryGetIntersectionType: + case EOpRayQueryGetIntersectionT: + case EOpRayQueryGetIntersectionInstanceCustomIndex: + case EOpRayQueryGetIntersectionInstanceId: + case EOpRayQueryGetIntersectionInstanceShaderBindingTableRecordOffset: + case EOpRayQueryGetIntersectionGeometryIndex: + case EOpRayQueryGetIntersectionPrimitiveIndex: + case EOpRayQueryGetIntersectionBarycentrics: + case EOpRayQueryGetIntersectionFrontFace: + case EOpRayQueryGetIntersectionObjectRayDirection: + case EOpRayQueryGetIntersectionObjectRayOrigin: + case EOpRayQueryGetIntersectionObjectToWorld: + case EOpRayQueryGetIntersectionWorldToObject: + if (!(*argp)[1]->getAsConstantUnion()) + error(loc, "argument must be compile-time constant", "committed", ""); + break; + + case EOpTextureQuerySamples: + case EOpImageQuerySamples: + // GL_ARB_shader_texture_image_samples + profileRequires(loc, ~EEsProfile, 450, E_GL_ARB_shader_texture_image_samples, "textureSamples and imageSamples"); + break; + + case EOpImageAtomicAdd: + case EOpImageAtomicMin: + case EOpImageAtomicMax: + case EOpImageAtomicAnd: + case EOpImageAtomicOr: + case EOpImageAtomicXor: + case EOpImageAtomicExchange: + case EOpImageAtomicCompSwap: + case EOpImageAtomicLoad: + case EOpImageAtomicStore: + { + // Make sure the image types have the correct layout() format and correct argument types + const TType& imageType = arg0->getType(); + if (imageType.getSampler().type == EbtInt || imageType.getSampler().type == EbtUint) { + if (imageType.getQualifier().getFormat() != ElfR32i && imageType.getQualifier().getFormat() != ElfR32ui) + error(loc, "only supported on image with format r32i or r32ui", fnCandidate.getName().c_str(), ""); + } else { + if (fnCandidate.getName().compare(0, 19, "imageAtomicExchange") != 0) + error(loc, "only supported on integer images", fnCandidate.getName().c_str(), ""); + else if (imageType.getQualifier().getFormat() != ElfR32f && isEsProfile()) + error(loc, "only supported on image with format r32f", fnCandidate.getName().c_str(), ""); + } + + const size_t maxArgs = imageType.getSampler().isMultiSample() ? 5 : 4; + if (argp->size() > maxArgs) { + requireExtensions(loc, 1, &E_GL_KHR_memory_scope_semantics, fnCandidate.getName().c_str()); + memorySemanticsCheck(loc, fnCandidate, callNode); + } + + break; + } + + case EOpAtomicAdd: + case EOpAtomicMin: + case EOpAtomicMax: + case EOpAtomicAnd: + case EOpAtomicOr: + case EOpAtomicXor: + case EOpAtomicExchange: + case EOpAtomicCompSwap: + case EOpAtomicLoad: + case EOpAtomicStore: + { + if (argp->size() > 3) { + requireExtensions(loc, 1, &E_GL_KHR_memory_scope_semantics, fnCandidate.getName().c_str()); + memorySemanticsCheck(loc, fnCandidate, callNode); + } else if (arg0->getType().getBasicType() == EbtInt64 || arg0->getType().getBasicType() == EbtUint64) { + const char* const extensions[2] = { E_GL_NV_shader_atomic_int64, + E_GL_EXT_shader_atomic_int64 }; + requireExtensions(loc, 2, extensions, fnCandidate.getName().c_str()); + } + break; + } + + case EOpInterpolateAtCentroid: + case EOpInterpolateAtSample: + case EOpInterpolateAtOffset: + case EOpInterpolateAtVertex: + // Make sure the first argument is an interpolant, or an array element of an interpolant + if (arg0->getType().getQualifier().storage != EvqVaryingIn) { + // It might still be an array element. + // + // We could check more, but the semantics of the first argument are already met; the + // only way to turn an array into a float/vec* is array dereference and swizzle. + // + // ES and desktop 4.3 and earlier: swizzles may not be used + // desktop 4.4 and later: swizzles may be used + bool swizzleOkay = (!isEsProfile()) && (version >= 440); + const TIntermTyped* base = TIntermediate::findLValueBase(arg0, swizzleOkay); + if (base == nullptr || base->getType().getQualifier().storage != EvqVaryingIn) + error(loc, "first argument must be an interpolant, or interpolant-array element", fnCandidate.getName().c_str(), ""); + } + + if (callNode.getOp() == EOpInterpolateAtVertex) { + if (!arg0->getType().getQualifier().isExplicitInterpolation()) + error(loc, "argument must be qualified as __explicitInterpAMD in", "interpolant", ""); + else { + if (! (*argp)[1]->getAsConstantUnion()) + error(loc, "argument must be compile-time constant", "vertex index", ""); + else { + unsigned vertexIdx = (*argp)[1]->getAsConstantUnion()->getConstArray()[0].getUConst(); + if (vertexIdx > 2) + error(loc, "must be in the range [0, 2]", "vertex index", ""); + } + } + } + break; + + case EOpEmitStreamVertex: + case EOpEndStreamPrimitive: + intermediate.setMultiStream(); + break; + + case EOpSubgroupClusteredAdd: + case EOpSubgroupClusteredMul: + case EOpSubgroupClusteredMin: + case EOpSubgroupClusteredMax: + case EOpSubgroupClusteredAnd: + case EOpSubgroupClusteredOr: + case EOpSubgroupClusteredXor: + // The as used in the subgroupClustered() operations must be: + // - An integral constant expression. + // - At least 1. + // - A power of 2. + if ((*argp)[1]->getAsConstantUnion() == nullptr) + error(loc, "argument must be compile-time constant", "cluster size", ""); + else { + int size = (*argp)[1]->getAsConstantUnion()->getConstArray()[0].getIConst(); + if (size < 1) + error(loc, "argument must be at least 1", "cluster size", ""); + else if (!IsPow2(size)) + error(loc, "argument must be a power of 2", "cluster size", ""); + } + break; + + case EOpSubgroupBroadcast: + case EOpSubgroupQuadBroadcast: + if (spvVersion.spv < EShTargetSpv_1_5) { + // must be an integral constant expression. + if ((*argp)[1]->getAsConstantUnion() == nullptr) + error(loc, "argument must be compile-time constant", "id", ""); + } + break; + + case EOpBarrier: + case EOpMemoryBarrier: + if (argp->size() > 0) { + requireExtensions(loc, 1, &E_GL_KHR_memory_scope_semantics, fnCandidate.getName().c_str()); + memorySemanticsCheck(loc, fnCandidate, callNode); + } + break; + + case EOpMix: + if (profile == EEsProfile && version < 310) { + // Look for specific signatures + if ((*argp)[0]->getAsTyped()->getBasicType() != EbtFloat && + (*argp)[1]->getAsTyped()->getBasicType() != EbtFloat && + (*argp)[2]->getAsTyped()->getBasicType() == EbtBool) { + requireExtensions(loc, 1, &E_GL_EXT_shader_integer_mix, "specific signature of builtin mix"); + } + } + + if (profile != EEsProfile && version < 450) { + if ((*argp)[0]->getAsTyped()->getBasicType() != EbtFloat && + (*argp)[0]->getAsTyped()->getBasicType() != EbtDouble && + (*argp)[1]->getAsTyped()->getBasicType() != EbtFloat && + (*argp)[1]->getAsTyped()->getBasicType() != EbtDouble && + (*argp)[2]->getAsTyped()->getBasicType() == EbtBool) { + requireExtensions(loc, 1, &E_GL_EXT_shader_integer_mix, fnCandidate.getName().c_str()); + } + } + + break; +#endif + + default: + break; + } + + // Texture operations on texture objects (aside from texelFetch on a + // textureBuffer) require EXT_samplerless_texture_functions. + switch (callNode.getOp()) { + case EOpTextureQuerySize: + case EOpTextureQueryLevels: + case EOpTextureQuerySamples: + case EOpTextureFetch: + case EOpTextureFetchOffset: + { + const TSampler& sampler = fnCandidate[0].type->getSampler(); + + const bool isTexture = sampler.isTexture() && !sampler.isCombined(); + const bool isBuffer = sampler.isBuffer(); + const bool isFetch = callNode.getOp() == EOpTextureFetch || callNode.getOp() == EOpTextureFetchOffset; + + if (isTexture && (!isBuffer || !isFetch)) + requireExtensions(loc, 1, &E_GL_EXT_samplerless_texture_functions, fnCandidate.getName().c_str()); + + break; + } + + default: + break; + } + + if (callNode.isSubgroup()) { + // these require SPIR-V 1.3 + if (spvVersion.spv > 0 && spvVersion.spv < EShTargetSpv_1_3) + error(loc, "requires SPIR-V 1.3", "subgroup op", ""); + + // Check that if extended types are being used that the correct extensions are enabled. + if (arg0 != nullptr) { + const TType& type = arg0->getType(); + switch (type.getBasicType()) { + default: + break; + case EbtInt8: + case EbtUint8: + requireExtensions(loc, 1, &E_GL_EXT_shader_subgroup_extended_types_int8, type.getCompleteString().c_str()); + break; + case EbtInt16: + case EbtUint16: + requireExtensions(loc, 1, &E_GL_EXT_shader_subgroup_extended_types_int16, type.getCompleteString().c_str()); + break; + case EbtInt64: + case EbtUint64: + requireExtensions(loc, 1, &E_GL_EXT_shader_subgroup_extended_types_int64, type.getCompleteString().c_str()); + break; + case EbtFloat16: + requireExtensions(loc, 1, &E_GL_EXT_shader_subgroup_extended_types_float16, type.getCompleteString().c_str()); + break; + } + } + } +} + +#ifndef GLSLANG_WEB + +extern bool PureOperatorBuiltins; + +// Deprecated! Use PureOperatorBuiltins == true instead, in which case this +// functionality is handled in builtInOpCheck() instead of here. +// +// Do additional checking of built-in function calls that were not mapped +// to built-in operations (e.g., texturing functions). +// +// Assumes there has been a semantically correct match to a built-in function. +// +void TParseContext::nonOpBuiltInCheck(const TSourceLoc& loc, const TFunction& fnCandidate, TIntermAggregate& callNode) +{ + // Further maintenance of this function is deprecated, because the "correct" + // future-oriented design is to not have to do string compares on function names. + + // If PureOperatorBuiltins == true, then all built-ins should be mapped + // to a TOperator, and this function would then never get called. + + assert(PureOperatorBuiltins == false); + + // built-in texturing functions get their return value precision from the precision of the sampler + if (fnCandidate.getType().getQualifier().precision == EpqNone && + fnCandidate.getParamCount() > 0 && fnCandidate[0].type->getBasicType() == EbtSampler) + callNode.getQualifier().precision = callNode.getSequence()[0]->getAsTyped()->getQualifier().precision; + + if (fnCandidate.getName().compare(0, 7, "texture") == 0) { + if (fnCandidate.getName().compare(0, 13, "textureGather") == 0) { + TString featureString = fnCandidate.getName() + "(...)"; + const char* feature = featureString.c_str(); + profileRequires(loc, EEsProfile, 310, nullptr, feature); + + int compArg = -1; // track which argument, if any, is the constant component argument + if (fnCandidate.getName().compare("textureGatherOffset") == 0) { + // GL_ARB_texture_gather is good enough for 2D non-shadow textures with no component argument + if (fnCandidate[0].type->getSampler().dim == Esd2D && ! fnCandidate[0].type->getSampler().shadow && fnCandidate.getParamCount() == 3) + profileRequires(loc, ~EEsProfile, 400, E_GL_ARB_texture_gather, feature); + else + profileRequires(loc, ~EEsProfile, 400, E_GL_ARB_gpu_shader5, feature); + int offsetArg = fnCandidate[0].type->getSampler().shadow ? 3 : 2; + if (! callNode.getSequence()[offsetArg]->getAsConstantUnion()) + profileRequires(loc, EEsProfile, 320, Num_AEP_gpu_shader5, AEP_gpu_shader5, + "non-constant offset argument"); + if (! fnCandidate[0].type->getSampler().shadow) + compArg = 3; + } else if (fnCandidate.getName().compare("textureGatherOffsets") == 0) { + profileRequires(loc, ~EEsProfile, 400, E_GL_ARB_gpu_shader5, feature); + if (! fnCandidate[0].type->getSampler().shadow) + compArg = 3; + // check for constant offsets + int offsetArg = fnCandidate[0].type->getSampler().shadow ? 3 : 2; + if (! callNode.getSequence()[offsetArg]->getAsConstantUnion()) + error(loc, "must be a compile-time constant:", feature, "offsets argument"); + } else if (fnCandidate.getName().compare("textureGather") == 0) { + // More than two arguments needs gpu_shader5, and rectangular or shadow needs gpu_shader5, + // otherwise, need GL_ARB_texture_gather. + if (fnCandidate.getParamCount() > 2 || fnCandidate[0].type->getSampler().dim == EsdRect || fnCandidate[0].type->getSampler().shadow) { + profileRequires(loc, ~EEsProfile, 400, E_GL_ARB_gpu_shader5, feature); + if (! fnCandidate[0].type->getSampler().shadow) + compArg = 2; + } else + profileRequires(loc, ~EEsProfile, 400, E_GL_ARB_texture_gather, feature); + } + + if (compArg > 0 && compArg < fnCandidate.getParamCount()) { + if (callNode.getSequence()[compArg]->getAsConstantUnion()) { + int value = callNode.getSequence()[compArg]->getAsConstantUnion()->getConstArray()[0].getIConst(); + if (value < 0 || value > 3) + error(loc, "must be 0, 1, 2, or 3:", feature, "component argument"); + } else + error(loc, "must be a compile-time constant:", feature, "component argument"); + } + } else { + // this is only for functions not starting "textureGather"... + if (fnCandidate.getName().find("Offset") != TString::npos) { + + // Handle texture-offset limits checking + int arg = -1; + if (fnCandidate.getName().compare("textureOffset") == 0) + arg = 2; + else if (fnCandidate.getName().compare("texelFetchOffset") == 0) + arg = 3; + else if (fnCandidate.getName().compare("textureProjOffset") == 0) + arg = 2; + else if (fnCandidate.getName().compare("textureLodOffset") == 0) + arg = 3; + else if (fnCandidate.getName().compare("textureProjLodOffset") == 0) + arg = 3; + else if (fnCandidate.getName().compare("textureGradOffset") == 0) + arg = 4; + else if (fnCandidate.getName().compare("textureProjGradOffset") == 0) + arg = 4; + + if (arg > 0) { + if (! callNode.getSequence()[arg]->getAsConstantUnion()) + error(loc, "argument must be compile-time constant", "texel offset", ""); + else { + const TType& type = callNode.getSequence()[arg]->getAsTyped()->getType(); + for (int c = 0; c < type.getVectorSize(); ++c) { + int offset = callNode.getSequence()[arg]->getAsConstantUnion()->getConstArray()[c].getIConst(); + if (offset > resources.maxProgramTexelOffset || offset < resources.minProgramTexelOffset) + error(loc, "value is out of range:", "texel offset", "[gl_MinProgramTexelOffset, gl_MaxProgramTexelOffset]"); + } + } + } + } + } + } + + // GL_ARB_shader_texture_image_samples + if (fnCandidate.getName().compare(0, 14, "textureSamples") == 0 || fnCandidate.getName().compare(0, 12, "imageSamples") == 0) + profileRequires(loc, ~EEsProfile, 450, E_GL_ARB_shader_texture_image_samples, "textureSamples and imageSamples"); + + if (fnCandidate.getName().compare(0, 11, "imageAtomic") == 0) { + const TType& imageType = callNode.getSequence()[0]->getAsTyped()->getType(); + if (imageType.getSampler().type == EbtInt || imageType.getSampler().type == EbtUint) { + if (imageType.getQualifier().getFormat() != ElfR32i && imageType.getQualifier().getFormat() != ElfR32ui) + error(loc, "only supported on image with format r32i or r32ui", fnCandidate.getName().c_str(), ""); + } else { + if (fnCandidate.getName().compare(0, 19, "imageAtomicExchange") != 0) + error(loc, "only supported on integer images", fnCandidate.getName().c_str(), ""); + else if (imageType.getQualifier().getFormat() != ElfR32f && isEsProfile()) + error(loc, "only supported on image with format r32f", fnCandidate.getName().c_str(), ""); + } + } +} + +#endif + +// +// Do any extra checking for a user function call. +// +void TParseContext::userFunctionCallCheck(const TSourceLoc& loc, TIntermAggregate& callNode) +{ + TIntermSequence& arguments = callNode.getSequence(); + + for (int i = 0; i < (int)arguments.size(); ++i) + samplerConstructorLocationCheck(loc, "call argument", arguments[i]); +} + +// +// Emit an error if this is a sampler constructor +// +void TParseContext::samplerConstructorLocationCheck(const TSourceLoc& loc, const char* token, TIntermNode* node) +{ + if (node->getAsOperator() && node->getAsOperator()->getOp() == EOpConstructTextureSampler) + error(loc, "sampler constructor must appear at point of use", token, ""); +} + +// +// Handle seeing a built-in constructor in a grammar production. +// +TFunction* TParseContext::handleConstructorCall(const TSourceLoc& loc, const TPublicType& publicType) +{ + TType type(publicType); + type.getQualifier().precision = EpqNone; + + if (type.isArray()) { + profileRequires(loc, ENoProfile, 120, E_GL_3DL_array_objects, "arrayed constructor"); + profileRequires(loc, EEsProfile, 300, nullptr, "arrayed constructor"); + } + + TOperator op = intermediate.mapTypeToConstructorOp(type); + + if (op == EOpNull) { + error(loc, "cannot construct this type", type.getBasicString(), ""); + op = EOpConstructFloat; + TType errorType(EbtFloat); + type.shallowCopy(errorType); + } + + TString empty(""); + + return new TFunction(&empty, type, op); +} + +// Handle seeing a precision qualifier in the grammar. +void TParseContext::handlePrecisionQualifier(const TSourceLoc& /*loc*/, TQualifier& qualifier, TPrecisionQualifier precision) +{ + if (obeyPrecisionQualifiers()) + qualifier.precision = precision; +} + +// Check for messages to give on seeing a precision qualifier used in a +// declaration in the grammar. +void TParseContext::checkPrecisionQualifier(const TSourceLoc& loc, TPrecisionQualifier) +{ + if (precisionManager.shouldWarnAboutDefaults()) { + warn(loc, "all default precisions are highp; use precision statements to quiet warning, e.g.:\n" + " \"precision mediump int; precision highp float;\"", "", ""); + precisionManager.defaultWarningGiven(); + } +} + +// +// Same error message for all places assignments don't work. +// +void TParseContext::assignError(const TSourceLoc& loc, const char* op, TString left, TString right) +{ + error(loc, "", op, "cannot convert from '%s' to '%s'", + right.c_str(), left.c_str()); +} + +// +// Same error message for all places unary operations don't work. +// +void TParseContext::unaryOpError(const TSourceLoc& loc, const char* op, TString operand) +{ + error(loc, " wrong operand type", op, + "no operation '%s' exists that takes an operand of type %s (or there is no acceptable conversion)", + op, operand.c_str()); +} + +// +// Same error message for all binary operations don't work. +// +void TParseContext::binaryOpError(const TSourceLoc& loc, const char* op, TString left, TString right) +{ + error(loc, " wrong operand types:", op, + "no operation '%s' exists that takes a left-hand operand of type '%s' and " + "a right operand of type '%s' (or there is no acceptable conversion)", + op, left.c_str(), right.c_str()); +} + +// +// A basic type of EbtVoid is a key that the name string was seen in the source, but +// it was not found as a variable in the symbol table. If so, give the error +// message and insert a dummy variable in the symbol table to prevent future errors. +// +void TParseContext::variableCheck(TIntermTyped*& nodePtr) +{ + TIntermSymbol* symbol = nodePtr->getAsSymbolNode(); + if (! symbol) + return; + + if (symbol->getType().getBasicType() == EbtVoid) { + const char *extraInfoFormat = ""; + if (spvVersion.vulkan != 0 && symbol->getName() == "gl_VertexID") { + extraInfoFormat = "(Did you mean gl_VertexIndex?)"; + } else if (spvVersion.vulkan != 0 && symbol->getName() == "gl_InstanceID") { + extraInfoFormat = "(Did you mean gl_InstanceIndex?)"; + } + error(symbol->getLoc(), "undeclared identifier", symbol->getName().c_str(), extraInfoFormat); + + // Add to symbol table to prevent future error messages on the same name + if (symbol->getName().size() > 0) { + TVariable* fakeVariable = new TVariable(&symbol->getName(), TType(EbtFloat)); + symbolTable.insert(*fakeVariable); + + // substitute a symbol node for this new variable + nodePtr = intermediate.addSymbol(*fakeVariable, symbol->getLoc()); + } + } else { + switch (symbol->getQualifier().storage) { + case EvqPointCoord: + profileRequires(symbol->getLoc(), ENoProfile, 120, nullptr, "gl_PointCoord"); + break; + default: break; // some compilers want this + } + } +} + +// +// Both test and if necessary, spit out an error, to see if the node is really +// an l-value that can be operated on this way. +// +// Returns true if there was an error. +// +bool TParseContext::lValueErrorCheck(const TSourceLoc& loc, const char* op, TIntermTyped* node) +{ + TIntermBinary* binaryNode = node->getAsBinaryNode(); + + if (binaryNode) { + bool errorReturn = false; + + switch(binaryNode->getOp()) { +#ifndef GLSLANG_WEB + case EOpIndexDirect: + case EOpIndexIndirect: + // ... tessellation control shader ... + // If a per-vertex output variable is used as an l-value, it is a + // compile-time or link-time error if the expression indicating the + // vertex index is not the identifier gl_InvocationID. + if (language == EShLangTessControl) { + const TType& leftType = binaryNode->getLeft()->getType(); + if (leftType.getQualifier().storage == EvqVaryingOut && ! leftType.getQualifier().patch && binaryNode->getLeft()->getAsSymbolNode()) { + // we have a per-vertex output + const TIntermSymbol* rightSymbol = binaryNode->getRight()->getAsSymbolNode(); + if (! rightSymbol || rightSymbol->getQualifier().builtIn != EbvInvocationId) + error(loc, "tessellation-control per-vertex output l-value must be indexed with gl_InvocationID", "[]", ""); + } + } + break; // left node is checked by base class +#endif + case EOpVectorSwizzle: + errorReturn = lValueErrorCheck(loc, op, binaryNode->getLeft()); + if (!errorReturn) { + int offset[4] = {0,0,0,0}; + + TIntermTyped* rightNode = binaryNode->getRight(); + TIntermAggregate *aggrNode = rightNode->getAsAggregate(); + + for (TIntermSequence::iterator p = aggrNode->getSequence().begin(); + p != aggrNode->getSequence().end(); p++) { + int value = (*p)->getAsTyped()->getAsConstantUnion()->getConstArray()[0].getIConst(); + offset[value]++; + if (offset[value] > 1) { + error(loc, " l-value of swizzle cannot have duplicate components", op, "", ""); + + return true; + } + } + } + + return errorReturn; + default: + break; + } + + if (errorReturn) { + error(loc, " l-value required", op, "", ""); + return true; + } + } + + if (binaryNode && binaryNode->getOp() == EOpIndexDirectStruct && binaryNode->getLeft()->isReference()) + return false; + + // Let the base class check errors + if (TParseContextBase::lValueErrorCheck(loc, op, node)) + return true; + + const char* symbol = nullptr; + TIntermSymbol* symNode = node->getAsSymbolNode(); + if (symNode != nullptr) + symbol = symNode->getName().c_str(); + + const char* message = nullptr; + switch (node->getQualifier().storage) { + case EvqVaryingIn: message = "can't modify shader input"; break; + case EvqInstanceId: message = "can't modify gl_InstanceID"; break; + case EvqVertexId: message = "can't modify gl_VertexID"; break; + case EvqFace: message = "can't modify gl_FrontFace"; break; + case EvqFragCoord: message = "can't modify gl_FragCoord"; break; + case EvqPointCoord: message = "can't modify gl_PointCoord"; break; + case EvqFragDepth: + intermediate.setDepthReplacing(); + // "In addition, it is an error to statically write to gl_FragDepth in the fragment shader." + if (isEsProfile() && intermediate.getEarlyFragmentTests()) + message = "can't modify gl_FragDepth if using early_fragment_tests"; + break; + + default: + break; + } + + if (message == nullptr && binaryNode == nullptr && symNode == nullptr) { + error(loc, " l-value required", op, "", ""); + + return true; + } + + // + // Everything else is okay, no error. + // + if (message == nullptr) + return false; + + // + // If we get here, we have an error and a message. + // + if (symNode) + error(loc, " l-value required", op, "\"%s\" (%s)", symbol, message); + else + error(loc, " l-value required", op, "(%s)", message); + + return true; +} + +// Test for and give an error if the node can't be read from. +void TParseContext::rValueErrorCheck(const TSourceLoc& loc, const char* op, TIntermTyped* node) +{ + // Let the base class check errors + TParseContextBase::rValueErrorCheck(loc, op, node); + + TIntermSymbol* symNode = node->getAsSymbolNode(); + if (!(symNode && symNode->getQualifier().isWriteOnly())) // base class checks + if (symNode && symNode->getQualifier().isExplicitInterpolation()) + error(loc, "can't read from explicitly-interpolated object: ", op, symNode->getName().c_str()); +} + +// +// Both test, and if necessary spit out an error, to see if the node is really +// a constant. +// +void TParseContext::constantValueCheck(TIntermTyped* node, const char* token) +{ + if (! node->getQualifier().isConstant()) + error(node->getLoc(), "constant expression required", token, ""); +} + +// +// Both test, and if necessary spit out an error, to see if the node is really +// an integer. +// +void TParseContext::integerCheck(const TIntermTyped* node, const char* token) +{ + if ((node->getBasicType() == EbtInt || node->getBasicType() == EbtUint) && node->isScalar()) + return; + + error(node->getLoc(), "scalar integer expression required", token, ""); +} + +// +// Both test, and if necessary spit out an error, to see if we are currently +// globally scoped. +// +void TParseContext::globalCheck(const TSourceLoc& loc, const char* token) +{ + if (! symbolTable.atGlobalLevel()) + error(loc, "not allowed in nested scope", token, ""); +} + +// +// Reserved errors for GLSL. +// +void TParseContext::reservedErrorCheck(const TSourceLoc& loc, const TString& identifier) +{ + // "Identifiers starting with "gl_" are reserved for use by OpenGL, and may not be + // declared in a shader; this results in a compile-time error." + if (! symbolTable.atBuiltInLevel()) { + if (builtInName(identifier)) + error(loc, "identifiers starting with \"gl_\" are reserved", identifier.c_str(), ""); + + // "__" are not supposed to be an error. ES 300 (and desktop) added the clarification: + // "In addition, all identifiers containing two consecutive underscores (__) are + // reserved; using such a name does not itself result in an error, but may result + // in undefined behavior." + // however, before that, ES tests required an error. + if (identifier.find("__") != TString::npos) { + if (isEsProfile() && version < 300) + error(loc, "identifiers containing consecutive underscores (\"__\") are reserved, and an error if version < 300", identifier.c_str(), ""); + else + warn(loc, "identifiers containing consecutive underscores (\"__\") are reserved", identifier.c_str(), ""); + } + } +} + +// +// Reserved errors for the preprocessor. +// +void TParseContext::reservedPpErrorCheck(const TSourceLoc& loc, const char* identifier, const char* op) +{ + // "__" are not supposed to be an error. ES 300 (and desktop) added the clarification: + // "All macro names containing two consecutive underscores ( __ ) are reserved; + // defining such a name does not itself result in an error, but may result in + // undefined behavior. All macro names prefixed with "GL_" ("GL" followed by a + // single underscore) are also reserved, and defining such a name results in a + // compile-time error." + // however, before that, ES tests required an error. + if (strncmp(identifier, "GL_", 3) == 0) + ppError(loc, "names beginning with \"GL_\" can't be (un)defined:", op, identifier); + else if (strncmp(identifier, "defined", 8) == 0) + ppError(loc, "\"defined\" can't be (un)defined:", op, identifier); + else if (strstr(identifier, "__") != 0) { + if (isEsProfile() && version >= 300 && + (strcmp(identifier, "__LINE__") == 0 || + strcmp(identifier, "__FILE__") == 0 || + strcmp(identifier, "__VERSION__") == 0)) + ppError(loc, "predefined names can't be (un)defined:", op, identifier); + else { + if (isEsProfile() && version < 300) + ppError(loc, "names containing consecutive underscores are reserved, and an error if version < 300:", op, identifier); + else + ppWarn(loc, "names containing consecutive underscores are reserved:", op, identifier); + } + } +} + +// +// See if this version/profile allows use of the line-continuation character '\'. +// +// Returns true if a line continuation should be done. +// +bool TParseContext::lineContinuationCheck(const TSourceLoc& loc, bool endOfComment) +{ +#ifdef GLSLANG_WEB + return true; +#endif + + const char* message = "line continuation"; + + bool lineContinuationAllowed = (isEsProfile() && version >= 300) || + (!isEsProfile() && (version >= 420 || extensionTurnedOn(E_GL_ARB_shading_language_420pack))); + + if (endOfComment) { + if (lineContinuationAllowed) + warn(loc, "used at end of comment; the following line is still part of the comment", message, ""); + else + warn(loc, "used at end of comment, but this version does not provide line continuation", message, ""); + + return lineContinuationAllowed; + } + + if (relaxedErrors()) { + if (! lineContinuationAllowed) + warn(loc, "not allowed in this version", message, ""); + return true; + } else { + profileRequires(loc, EEsProfile, 300, nullptr, message); + profileRequires(loc, ~EEsProfile, 420, E_GL_ARB_shading_language_420pack, message); + } + + return lineContinuationAllowed; +} + +bool TParseContext::builtInName(const TString& identifier) +{ + return identifier.compare(0, 3, "gl_") == 0; +} + +// +// Make sure there is enough data and not too many arguments provided to the +// constructor to build something of the type of the constructor. Also returns +// the type of the constructor. +// +// Part of establishing type is establishing specialization-constness. +// We don't yet know "top down" whether type is a specialization constant, +// but a const constructor can becomes a specialization constant if any of +// its children are, subject to KHR_vulkan_glsl rules: +// +// - int(), uint(), and bool() constructors for type conversions +// from any of the following types to any of the following types: +// * int +// * uint +// * bool +// - vector versions of the above conversion constructors +// +// Returns true if there was an error in construction. +// +bool TParseContext::constructorError(const TSourceLoc& loc, TIntermNode* node, TFunction& function, TOperator op, TType& type) +{ + // See if the constructor does not establish the main type, only requalifies + // it, in which case the type comes from the argument instead of from the + // constructor function. + switch (op) { +#ifndef GLSLANG_WEB + case EOpConstructNonuniform: + if (node != nullptr && node->getAsTyped() != nullptr) { + type.shallowCopy(node->getAsTyped()->getType()); + type.getQualifier().makeTemporary(); + type.getQualifier().nonUniform = true; + } + break; +#endif + default: + type.shallowCopy(function.getType()); + break; + } + + // See if it's a matrix + bool constructingMatrix = false; + switch (op) { + case EOpConstructTextureSampler: + return constructorTextureSamplerError(loc, function); + case EOpConstructMat2x2: + case EOpConstructMat2x3: + case EOpConstructMat2x4: + case EOpConstructMat3x2: + case EOpConstructMat3x3: + case EOpConstructMat3x4: + case EOpConstructMat4x2: + case EOpConstructMat4x3: + case EOpConstructMat4x4: +#ifndef GLSLANG_WEB + case EOpConstructDMat2x2: + case EOpConstructDMat2x3: + case EOpConstructDMat2x4: + case EOpConstructDMat3x2: + case EOpConstructDMat3x3: + case EOpConstructDMat3x4: + case EOpConstructDMat4x2: + case EOpConstructDMat4x3: + case EOpConstructDMat4x4: + case EOpConstructF16Mat2x2: + case EOpConstructF16Mat2x3: + case EOpConstructF16Mat2x4: + case EOpConstructF16Mat3x2: + case EOpConstructF16Mat3x3: + case EOpConstructF16Mat3x4: + case EOpConstructF16Mat4x2: + case EOpConstructF16Mat4x3: + case EOpConstructF16Mat4x4: +#endif + constructingMatrix = true; + break; + default: + break; + } + + // + // Walk the arguments for first-pass checks and collection of information. + // + + int size = 0; + bool constType = true; + bool specConstType = false; // value is only valid if constType is true + bool full = false; + bool overFull = false; + bool matrixInMatrix = false; + bool arrayArg = false; + bool floatArgument = false; + for (int arg = 0; arg < function.getParamCount(); ++arg) { + if (function[arg].type->isArray()) { + if (function[arg].type->isUnsizedArray()) { + // Can't construct from an unsized array. + error(loc, "array argument must be sized", "constructor", ""); + return true; + } + arrayArg = true; + } + if (constructingMatrix && function[arg].type->isMatrix()) + matrixInMatrix = true; + + // 'full' will go to true when enough args have been seen. If we loop + // again, there is an extra argument. + if (full) { + // For vectors and matrices, it's okay to have too many components + // available, but not okay to have unused arguments. + overFull = true; + } + + size += function[arg].type->computeNumComponents(); + if (op != EOpConstructStruct && ! type.isArray() && size >= type.computeNumComponents()) + full = true; + + if (! function[arg].type->getQualifier().isConstant()) + constType = false; + if (function[arg].type->getQualifier().isSpecConstant()) + specConstType = true; + if (function[arg].type->isFloatingDomain()) + floatArgument = true; + if (type.isStruct()) { + if (function[arg].type->contains16BitFloat()) { + requireFloat16Arithmetic(loc, "constructor", "can't construct structure containing 16-bit type"); + } + if (function[arg].type->contains16BitInt()) { + requireInt16Arithmetic(loc, "constructor", "can't construct structure containing 16-bit type"); + } + if (function[arg].type->contains8BitInt()) { + requireInt8Arithmetic(loc, "constructor", "can't construct structure containing 8-bit type"); + } + } + } + if (op == EOpConstructNonuniform) + constType = false; + +#ifndef GLSLANG_WEB + switch (op) { + case EOpConstructFloat16: + case EOpConstructF16Vec2: + case EOpConstructF16Vec3: + case EOpConstructF16Vec4: + if (type.isArray()) + requireFloat16Arithmetic(loc, "constructor", "16-bit arrays not supported"); + if (type.isVector() && function.getParamCount() != 1) + requireFloat16Arithmetic(loc, "constructor", "16-bit vectors only take vector types"); + break; + case EOpConstructUint16: + case EOpConstructU16Vec2: + case EOpConstructU16Vec3: + case EOpConstructU16Vec4: + case EOpConstructInt16: + case EOpConstructI16Vec2: + case EOpConstructI16Vec3: + case EOpConstructI16Vec4: + if (type.isArray()) + requireInt16Arithmetic(loc, "constructor", "16-bit arrays not supported"); + if (type.isVector() && function.getParamCount() != 1) + requireInt16Arithmetic(loc, "constructor", "16-bit vectors only take vector types"); + break; + case EOpConstructUint8: + case EOpConstructU8Vec2: + case EOpConstructU8Vec3: + case EOpConstructU8Vec4: + case EOpConstructInt8: + case EOpConstructI8Vec2: + case EOpConstructI8Vec3: + case EOpConstructI8Vec4: + if (type.isArray()) + requireInt8Arithmetic(loc, "constructor", "8-bit arrays not supported"); + if (type.isVector() && function.getParamCount() != 1) + requireInt8Arithmetic(loc, "constructor", "8-bit vectors only take vector types"); + break; + default: + break; + } +#endif + + // inherit constness from children + if (constType) { + bool makeSpecConst; + // Finish pinning down spec-const semantics + if (specConstType) { + switch (op) { + case EOpConstructInt8: + case EOpConstructInt: + case EOpConstructUint: + case EOpConstructBool: + case EOpConstructBVec2: + case EOpConstructBVec3: + case EOpConstructBVec4: + case EOpConstructIVec2: + case EOpConstructIVec3: + case EOpConstructIVec4: + case EOpConstructUVec2: + case EOpConstructUVec3: + case EOpConstructUVec4: +#ifndef GLSLANG_WEB + case EOpConstructUint8: + case EOpConstructInt16: + case EOpConstructUint16: + case EOpConstructInt64: + case EOpConstructUint64: + case EOpConstructI8Vec2: + case EOpConstructI8Vec3: + case EOpConstructI8Vec4: + case EOpConstructU8Vec2: + case EOpConstructU8Vec3: + case EOpConstructU8Vec4: + case EOpConstructI16Vec2: + case EOpConstructI16Vec3: + case EOpConstructI16Vec4: + case EOpConstructU16Vec2: + case EOpConstructU16Vec3: + case EOpConstructU16Vec4: + case EOpConstructI64Vec2: + case EOpConstructI64Vec3: + case EOpConstructI64Vec4: + case EOpConstructU64Vec2: + case EOpConstructU64Vec3: + case EOpConstructU64Vec4: +#endif + // This was the list of valid ones, if they aren't converting from float + // and aren't making an array. + makeSpecConst = ! floatArgument && ! type.isArray(); + break; + default: + // anything else wasn't white-listed in the spec as a conversion + makeSpecConst = false; + break; + } + } else + makeSpecConst = false; + + if (makeSpecConst) + type.getQualifier().makeSpecConstant(); + else if (specConstType) + type.getQualifier().makeTemporary(); + else + type.getQualifier().storage = EvqConst; + } + + if (type.isArray()) { + if (function.getParamCount() == 0) { + error(loc, "array constructor must have at least one argument", "constructor", ""); + return true; + } + + if (type.isUnsizedArray()) { + // auto adapt the constructor type to the number of arguments + type.changeOuterArraySize(function.getParamCount()); + } else if (type.getOuterArraySize() != function.getParamCount()) { + error(loc, "array constructor needs one argument per array element", "constructor", ""); + return true; + } + + if (type.isArrayOfArrays()) { + // Types have to match, but we're still making the type. + // Finish making the type, and the comparison is done later + // when checking for conversion. + TArraySizes& arraySizes = *type.getArraySizes(); + + // At least the dimensionalities have to match. + if (! function[0].type->isArray() || + arraySizes.getNumDims() != function[0].type->getArraySizes()->getNumDims() + 1) { + error(loc, "array constructor argument not correct type to construct array element", "constructor", ""); + return true; + } + + if (arraySizes.isInnerUnsized()) { + // "Arrays of arrays ..., and the size for any dimension is optional" + // That means we need to adopt (from the first argument) the other array sizes into the type. + for (int d = 1; d < arraySizes.getNumDims(); ++d) { + if (arraySizes.getDimSize(d) == UnsizedArraySize) { + arraySizes.setDimSize(d, function[0].type->getArraySizes()->getDimSize(d - 1)); + } + } + } + } + } + + if (arrayArg && op != EOpConstructStruct && ! type.isArrayOfArrays()) { + error(loc, "constructing non-array constituent from array argument", "constructor", ""); + return true; + } + + if (matrixInMatrix && ! type.isArray()) { + profileRequires(loc, ENoProfile, 120, nullptr, "constructing matrix from matrix"); + + // "If a matrix argument is given to a matrix constructor, + // it is a compile-time error to have any other arguments." + if (function.getParamCount() != 1) + error(loc, "matrix constructed from matrix can only have one argument", "constructor", ""); + return false; + } + + if (overFull) { + error(loc, "too many arguments", "constructor", ""); + return true; + } + + if (op == EOpConstructStruct && ! type.isArray() && (int)type.getStruct()->size() != function.getParamCount()) { + error(loc, "Number of constructor parameters does not match the number of structure fields", "constructor", ""); + return true; + } + + if ((op != EOpConstructStruct && size != 1 && size < type.computeNumComponents()) || + (op == EOpConstructStruct && size < type.computeNumComponents())) { + error(loc, "not enough data provided for construction", "constructor", ""); + return true; + } + + if (type.isCoopMat() && function.getParamCount() != 1) { + error(loc, "wrong number of arguments", "constructor", ""); + return true; + } + if (type.isCoopMat() && + !(function[0].type->isScalar() || function[0].type->isCoopMat())) { + error(loc, "Cooperative matrix constructor argument must be scalar or cooperative matrix", "constructor", ""); + return true; + } + + TIntermTyped* typed = node->getAsTyped(); + if (typed == nullptr) { + error(loc, "constructor argument does not have a type", "constructor", ""); + return true; + } + if (op != EOpConstructStruct && op != EOpConstructNonuniform && typed->getBasicType() == EbtSampler) { + error(loc, "cannot convert a sampler", "constructor", ""); + return true; + } + if (op != EOpConstructStruct && typed->isAtomic()) { + error(loc, "cannot convert an atomic_uint", "constructor", ""); + return true; + } + if (typed->getBasicType() == EbtVoid) { + error(loc, "cannot convert a void", "constructor", ""); + return true; + } + + return false; +} + +// Verify all the correct semantics for constructing a combined texture/sampler. +// Return true if the semantics are incorrect. +bool TParseContext::constructorTextureSamplerError(const TSourceLoc& loc, const TFunction& function) +{ + TString constructorName = function.getType().getBasicTypeString(); // TODO: performance: should not be making copy; interface needs to change + const char* token = constructorName.c_str(); + + // exactly two arguments needed + if (function.getParamCount() != 2) { + error(loc, "sampler-constructor requires two arguments", token, ""); + return true; + } + + // For now, not allowing arrayed constructors, the rest of this function + // is set up to allow them, if this test is removed: + if (function.getType().isArray()) { + error(loc, "sampler-constructor cannot make an array of samplers", token, ""); + return true; + } + + // first argument + // * the constructor's first argument must be a texture type + // * the dimensionality (1D, 2D, 3D, Cube, Rect, Buffer, MS, and Array) + // of the texture type must match that of the constructed sampler type + // (that is, the suffixes of the type of the first argument and the + // type of the constructor will be spelled the same way) + if (function[0].type->getBasicType() != EbtSampler || + ! function[0].type->getSampler().isTexture() || + function[0].type->isArray()) { + error(loc, "sampler-constructor first argument must be a scalar *texture* type", token, ""); + return true; + } + // simulate the first argument's impact on the result type, so it can be compared with the encapsulated operator!=() + TSampler texture = function.getType().getSampler(); + texture.setCombined(false); + texture.shadow = false; + if (texture != function[0].type->getSampler()) { + error(loc, "sampler-constructor first argument must be a *texture* type" + " matching the dimensionality and sampled type of the constructor", token, ""); + return true; + } + + // second argument + // * the constructor's second argument must be a scalar of type + // *sampler* or *samplerShadow* + if ( function[1].type->getBasicType() != EbtSampler || + ! function[1].type->getSampler().isPureSampler() || + function[1].type->isArray()) { + error(loc, "sampler-constructor second argument must be a scalar sampler or samplerShadow", token, ""); + return true; + } + + return false; +} + +// Checks to see if a void variable has been declared and raise an error message for such a case +// +// returns true in case of an error +// +bool TParseContext::voidErrorCheck(const TSourceLoc& loc, const TString& identifier, const TBasicType basicType) +{ + if (basicType == EbtVoid) { + error(loc, "illegal use of type 'void'", identifier.c_str(), ""); + return true; + } + + return false; +} + +// Checks to see if the node (for the expression) contains a scalar boolean expression or not +void TParseContext::boolCheck(const TSourceLoc& loc, const TIntermTyped* type) +{ + if (type->getBasicType() != EbtBool || type->isArray() || type->isMatrix() || type->isVector()) + error(loc, "boolean expression expected", "", ""); +} + +// This function checks to see if the node (for the expression) contains a scalar boolean expression or not +void TParseContext::boolCheck(const TSourceLoc& loc, const TPublicType& pType) +{ + if (pType.basicType != EbtBool || pType.arraySizes || pType.matrixCols > 1 || (pType.vectorSize > 1)) + error(loc, "boolean expression expected", "", ""); +} + +void TParseContext::samplerCheck(const TSourceLoc& loc, const TType& type, const TString& identifier, TIntermTyped* /*initializer*/) +{ + // Check that the appropriate extension is enabled if external sampler is used. + // There are two extensions. The correct one must be used based on GLSL version. + if (type.getBasicType() == EbtSampler && type.getSampler().isExternal()) { + if (version < 300) { + requireExtensions(loc, 1, &E_GL_OES_EGL_image_external, "samplerExternalOES"); + } else { + requireExtensions(loc, 1, &E_GL_OES_EGL_image_external_essl3, "samplerExternalOES"); + } + } + if (type.getSampler().isYuv()) { + requireExtensions(loc, 1, &E_GL_EXT_YUV_target, "__samplerExternal2DY2YEXT"); + } + + if (type.getQualifier().storage == EvqUniform) + return; + + if (type.getBasicType() == EbtStruct && containsFieldWithBasicType(type, EbtSampler)) + error(loc, "non-uniform struct contains a sampler or image:", type.getBasicTypeString().c_str(), identifier.c_str()); + else if (type.getBasicType() == EbtSampler && type.getQualifier().storage != EvqUniform) { + // non-uniform sampler + // not yet: okay if it has an initializer + // if (! initializer) + error(loc, "sampler/image types can only be used in uniform variables or function parameters:", type.getBasicTypeString().c_str(), identifier.c_str()); + } +} + +#ifndef GLSLANG_WEB + +void TParseContext::atomicUintCheck(const TSourceLoc& loc, const TType& type, const TString& identifier) +{ + if (type.getQualifier().storage == EvqUniform) + return; + + if (type.getBasicType() == EbtStruct && containsFieldWithBasicType(type, EbtAtomicUint)) + error(loc, "non-uniform struct contains an atomic_uint:", type.getBasicTypeString().c_str(), identifier.c_str()); + else if (type.getBasicType() == EbtAtomicUint && type.getQualifier().storage != EvqUniform) + error(loc, "atomic_uints can only be used in uniform variables or function parameters:", type.getBasicTypeString().c_str(), identifier.c_str()); +} + +void TParseContext::accStructCheck(const TSourceLoc& loc, const TType& type, const TString& identifier) +{ + if (type.getQualifier().storage == EvqUniform) + return; + + if (type.getBasicType() == EbtStruct && containsFieldWithBasicType(type, EbtAccStruct)) + error(loc, "non-uniform struct contains an accelerationStructureNV:", type.getBasicTypeString().c_str(), identifier.c_str()); + else if (type.getBasicType() == EbtAccStruct && type.getQualifier().storage != EvqUniform) + error(loc, "accelerationStructureNV can only be used in uniform variables or function parameters:", + type.getBasicTypeString().c_str(), identifier.c_str()); + +} + +#endif // GLSLANG_WEB + +void TParseContext::transparentOpaqueCheck(const TSourceLoc& loc, const TType& type, const TString& identifier) +{ + if (parsingBuiltins) + return; + + if (type.getQualifier().storage != EvqUniform) + return; + + if (type.containsNonOpaque()) { + // Vulkan doesn't allow transparent uniforms outside of blocks + if (spvVersion.vulkan > 0) + vulkanRemoved(loc, "non-opaque uniforms outside a block"); + // OpenGL wants locations on these (unless they are getting automapped) + if (spvVersion.openGl > 0 && !type.getQualifier().hasLocation() && !intermediate.getAutoMapLocations()) + error(loc, "non-opaque uniform variables need a layout(location=L)", identifier.c_str(), ""); + } +} + +// +// Qualifier checks knowing the qualifier and that it is a member of a struct/block. +// +void TParseContext::memberQualifierCheck(glslang::TPublicType& publicType) +{ + globalQualifierFixCheck(publicType.loc, publicType.qualifier); + checkNoShaderLayouts(publicType.loc, publicType.shaderQualifiers); + if (publicType.qualifier.isNonUniform()) { + error(publicType.loc, "not allowed on block or structure members", "nonuniformEXT", ""); + publicType.qualifier.nonUniform = false; + } +} + +// +// Check/fix just a full qualifier (no variables or types yet, but qualifier is complete) at global level. +// +void TParseContext::globalQualifierFixCheck(const TSourceLoc& loc, TQualifier& qualifier) +{ + bool nonuniformOkay = false; + + // move from parameter/unknown qualifiers to pipeline in/out qualifiers + switch (qualifier.storage) { + case EvqIn: + profileRequires(loc, ENoProfile, 130, nullptr, "in for stage inputs"); + profileRequires(loc, EEsProfile, 300, nullptr, "in for stage inputs"); + qualifier.storage = EvqVaryingIn; + nonuniformOkay = true; + break; + case EvqOut: + profileRequires(loc, ENoProfile, 130, nullptr, "out for stage outputs"); + profileRequires(loc, EEsProfile, 300, nullptr, "out for stage outputs"); + qualifier.storage = EvqVaryingOut; + break; + case EvqInOut: + qualifier.storage = EvqVaryingIn; + error(loc, "cannot use 'inout' at global scope", "", ""); + break; + case EvqGlobal: + case EvqTemporary: + nonuniformOkay = true; + break; + default: + break; + } + + if (!nonuniformOkay && qualifier.isNonUniform()) + error(loc, "for non-parameter, can only apply to 'in' or no storage qualifier", "nonuniformEXT", ""); + + invariantCheck(loc, qualifier); +} + +// +// Check a full qualifier and type (no variable yet) at global level. +// +void TParseContext::globalQualifierTypeCheck(const TSourceLoc& loc, const TQualifier& qualifier, const TPublicType& publicType) +{ + if (! symbolTable.atGlobalLevel()) + return; + + if (!(publicType.userDef && publicType.userDef->isReference())) { + if (qualifier.isMemoryQualifierImageAndSSBOOnly() && ! publicType.isImage() && publicType.qualifier.storage != EvqBuffer) { + error(loc, "memory qualifiers cannot be used on this type", "", ""); + } else if (qualifier.isMemory() && (publicType.basicType != EbtSampler) && !publicType.qualifier.isUniformOrBuffer()) { + error(loc, "memory qualifiers cannot be used on this type", "", ""); + } + } + + if (qualifier.storage == EvqBuffer && + publicType.basicType != EbtBlock && + !qualifier.hasBufferReference()) + error(loc, "buffers can be declared only as blocks", "buffer", ""); + + if (qualifier.storage != EvqVaryingIn && publicType.basicType == EbtDouble && + extensionTurnedOn(E_GL_ARB_vertex_attrib_64bit) && language == EShLangVertex && + version < 400) { + profileRequires(loc, ECoreProfile | ECompatibilityProfile, 410, E_GL_ARB_gpu_shader_fp64, "vertex-shader `double` type"); + } + if (qualifier.storage != EvqVaryingIn && qualifier.storage != EvqVaryingOut) + return; + + if (publicType.shaderQualifiers.hasBlendEquation()) + error(loc, "can only be applied to a standalone 'out'", "blend equation", ""); + + // now, knowing it is a shader in/out, do all the in/out semantic checks + + if (publicType.basicType == EbtBool && !parsingBuiltins) { + error(loc, "cannot be bool", GetStorageQualifierString(qualifier.storage), ""); + return; + } + + if (isTypeInt(publicType.basicType) || publicType.basicType == EbtDouble) + profileRequires(loc, EEsProfile, 300, nullptr, "shader input/output"); + + if (!qualifier.flat && !qualifier.isExplicitInterpolation() && !qualifier.isPervertexNV()) { + if (isTypeInt(publicType.basicType) || + publicType.basicType == EbtDouble || + (publicType.userDef && ( publicType.userDef->containsBasicType(EbtInt) + || publicType.userDef->containsBasicType(EbtUint) + || publicType.userDef->contains16BitInt() + || publicType.userDef->contains8BitInt() + || publicType.userDef->contains64BitInt() + || publicType.userDef->containsDouble()))) { + if (qualifier.storage == EvqVaryingIn && language == EShLangFragment) + error(loc, "must be qualified as flat", TType::getBasicString(publicType.basicType), GetStorageQualifierString(qualifier.storage)); + else if (qualifier.storage == EvqVaryingOut && language == EShLangVertex && version == 300) + error(loc, "must be qualified as flat", TType::getBasicString(publicType.basicType), GetStorageQualifierString(qualifier.storage)); + } + } + + if (qualifier.isPatch() && qualifier.isInterpolation()) + error(loc, "cannot use interpolation qualifiers with patch", "patch", ""); + + if (qualifier.isTaskMemory() && publicType.basicType != EbtBlock) + error(loc, "taskNV variables can be declared only as blocks", "taskNV", ""); + + if (qualifier.storage == EvqVaryingIn) { + switch (language) { + case EShLangVertex: + if (publicType.basicType == EbtStruct) { + error(loc, "cannot be a structure or array", GetStorageQualifierString(qualifier.storage), ""); + return; + } + if (publicType.arraySizes) { + requireProfile(loc, ~EEsProfile, "vertex input arrays"); + profileRequires(loc, ENoProfile, 150, nullptr, "vertex input arrays"); + } + if (publicType.basicType == EbtDouble) + profileRequires(loc, ~EEsProfile, 410, E_GL_ARB_vertex_attrib_64bit, "vertex-shader `double` type input"); + if (qualifier.isAuxiliary() || qualifier.isInterpolation() || qualifier.isMemory() || qualifier.invariant) + error(loc, "vertex input cannot be further qualified", "", ""); + break; + case EShLangFragment: + if (publicType.userDef) { + profileRequires(loc, EEsProfile, 300, nullptr, "fragment-shader struct input"); + profileRequires(loc, ~EEsProfile, 150, nullptr, "fragment-shader struct input"); + if (publicType.userDef->containsStructure()) + requireProfile(loc, ~EEsProfile, "fragment-shader struct input containing structure"); + if (publicType.userDef->containsArray()) + requireProfile(loc, ~EEsProfile, "fragment-shader struct input containing an array"); + } + break; + case EShLangCompute: + if (! symbolTable.atBuiltInLevel()) + error(loc, "global storage input qualifier cannot be used in a compute shader", "in", ""); + break; +#ifndef GLSLANG_WEB + case EShLangTessControl: + if (qualifier.patch) + error(loc, "can only use on output in tessellation-control shader", "patch", ""); + break; +#endif + default: + break; + } + } else { + // qualifier.storage == EvqVaryingOut + switch (language) { + case EShLangVertex: + if (publicType.userDef) { + profileRequires(loc, EEsProfile, 300, nullptr, "vertex-shader struct output"); + profileRequires(loc, ~EEsProfile, 150, nullptr, "vertex-shader struct output"); + if (publicType.userDef->containsStructure()) + requireProfile(loc, ~EEsProfile, "vertex-shader struct output containing structure"); + if (publicType.userDef->containsArray()) + requireProfile(loc, ~EEsProfile, "vertex-shader struct output containing an array"); + } + + break; + case EShLangFragment: + profileRequires(loc, EEsProfile, 300, nullptr, "fragment shader output"); + if (publicType.basicType == EbtStruct) { + error(loc, "cannot be a structure", GetStorageQualifierString(qualifier.storage), ""); + return; + } + if (publicType.matrixRows > 0) { + error(loc, "cannot be a matrix", GetStorageQualifierString(qualifier.storage), ""); + return; + } + if (qualifier.isAuxiliary()) + error(loc, "can't use auxiliary qualifier on a fragment output", "centroid/sample/patch", ""); + if (qualifier.isInterpolation()) + error(loc, "can't use interpolation qualifier on a fragment output", "flat/smooth/noperspective", ""); + if (publicType.basicType == EbtDouble || publicType.basicType == EbtInt64 || publicType.basicType == EbtUint64) + error(loc, "cannot contain a double, int64, or uint64", GetStorageQualifierString(qualifier.storage), ""); + break; + + case EShLangCompute: + error(loc, "global storage output qualifier cannot be used in a compute shader", "out", ""); + break; +#ifndef GLSLANG_WEB + case EShLangTessEvaluation: + if (qualifier.patch) + error(loc, "can only use on input in tessellation-evaluation shader", "patch", ""); + break; +#endif + default: + break; + } + } +} + +// +// Merge characteristics of the 'src' qualifier into the 'dst'. +// If there is duplication, issue error messages, unless 'force' +// is specified, which means to just override default settings. +// +// Also, when force is false, it will be assumed that 'src' follows +// 'dst', for the purpose of error checking order for versions +// that require specific orderings of qualifiers. +// +void TParseContext::mergeQualifiers(const TSourceLoc& loc, TQualifier& dst, const TQualifier& src, bool force) +{ + // Multiple auxiliary qualifiers (mostly done later by 'individual qualifiers') + if (src.isAuxiliary() && dst.isAuxiliary()) + error(loc, "can only have one auxiliary qualifier (centroid, patch, and sample)", "", ""); + + // Multiple interpolation qualifiers (mostly done later by 'individual qualifiers') + if (src.isInterpolation() && dst.isInterpolation()) + error(loc, "can only have one interpolation qualifier (flat, smooth, noperspective, __explicitInterpAMD)", "", ""); + + // Ordering + if (! force && ((!isEsProfile() && version < 420) || + (isEsProfile() && version < 310)) + && ! extensionTurnedOn(E_GL_ARB_shading_language_420pack)) { + // non-function parameters + if (src.isNoContraction() && (dst.invariant || dst.isInterpolation() || dst.isAuxiliary() || dst.storage != EvqTemporary || dst.precision != EpqNone)) + error(loc, "precise qualifier must appear first", "", ""); + if (src.invariant && (dst.isInterpolation() || dst.isAuxiliary() || dst.storage != EvqTemporary || dst.precision != EpqNone)) + error(loc, "invariant qualifier must appear before interpolation, storage, and precision qualifiers ", "", ""); + else if (src.isInterpolation() && (dst.isAuxiliary() || dst.storage != EvqTemporary || dst.precision != EpqNone)) + error(loc, "interpolation qualifiers must appear before storage and precision qualifiers", "", ""); + else if (src.isAuxiliary() && (dst.storage != EvqTemporary || dst.precision != EpqNone)) + error(loc, "Auxiliary qualifiers (centroid, patch, and sample) must appear before storage and precision qualifiers", "", ""); + else if (src.storage != EvqTemporary && (dst.precision != EpqNone)) + error(loc, "precision qualifier must appear as last qualifier", "", ""); + + // function parameters + if (src.isNoContraction() && (dst.storage == EvqConst || dst.storage == EvqIn || dst.storage == EvqOut)) + error(loc, "precise qualifier must appear first", "", ""); + if (src.storage == EvqConst && (dst.storage == EvqIn || dst.storage == EvqOut)) + error(loc, "in/out must appear before const", "", ""); + } + + // Storage qualification + if (dst.storage == EvqTemporary || dst.storage == EvqGlobal) + dst.storage = src.storage; + else if ((dst.storage == EvqIn && src.storage == EvqOut) || + (dst.storage == EvqOut && src.storage == EvqIn)) + dst.storage = EvqInOut; + else if ((dst.storage == EvqIn && src.storage == EvqConst) || + (dst.storage == EvqConst && src.storage == EvqIn)) + dst.storage = EvqConstReadOnly; + else if (src.storage != EvqTemporary && + src.storage != EvqGlobal) + error(loc, "too many storage qualifiers", GetStorageQualifierString(src.storage), ""); + + // Precision qualifiers + if (! force && src.precision != EpqNone && dst.precision != EpqNone) + error(loc, "only one precision qualifier allowed", GetPrecisionQualifierString(src.precision), ""); + if (dst.precision == EpqNone || (force && src.precision != EpqNone)) + dst.precision = src.precision; + +#ifndef GLSLANG_WEB + if (!force && ((src.coherent && (dst.devicecoherent || dst.queuefamilycoherent || dst.workgroupcoherent || dst.subgroupcoherent || dst.shadercallcoherent)) || + (src.devicecoherent && (dst.coherent || dst.queuefamilycoherent || dst.workgroupcoherent || dst.subgroupcoherent || dst.shadercallcoherent)) || + (src.queuefamilycoherent && (dst.coherent || dst.devicecoherent || dst.workgroupcoherent || dst.subgroupcoherent || dst.shadercallcoherent)) || + (src.workgroupcoherent && (dst.coherent || dst.devicecoherent || dst.queuefamilycoherent || dst.subgroupcoherent || dst.shadercallcoherent)) || + (src.subgroupcoherent && (dst.coherent || dst.devicecoherent || dst.queuefamilycoherent || dst.workgroupcoherent || dst.shadercallcoherent)) || + (src.shadercallcoherent && (dst.coherent || dst.devicecoherent || dst.queuefamilycoherent || dst.workgroupcoherent || dst.subgroupcoherent)))) { + error(loc, "only one coherent/devicecoherent/queuefamilycoherent/workgroupcoherent/subgroupcoherent/shadercallcoherent qualifier allowed", + GetPrecisionQualifierString(src.precision), ""); + } +#endif + // Layout qualifiers + mergeObjectLayoutQualifiers(dst, src, false); + + // individual qualifiers + bool repeated = false; + #define MERGE_SINGLETON(field) repeated |= dst.field && src.field; dst.field |= src.field; + MERGE_SINGLETON(invariant); + MERGE_SINGLETON(centroid); + MERGE_SINGLETON(smooth); + MERGE_SINGLETON(flat); + MERGE_SINGLETON(specConstant); +#ifndef GLSLANG_WEB + MERGE_SINGLETON(noContraction); + MERGE_SINGLETON(nopersp); + MERGE_SINGLETON(explicitInterp); + MERGE_SINGLETON(perPrimitiveNV); + MERGE_SINGLETON(perViewNV); + MERGE_SINGLETON(perTaskNV); + MERGE_SINGLETON(patch); + MERGE_SINGLETON(sample); + MERGE_SINGLETON(coherent); + MERGE_SINGLETON(devicecoherent); + MERGE_SINGLETON(queuefamilycoherent); + MERGE_SINGLETON(workgroupcoherent); + MERGE_SINGLETON(subgroupcoherent); + MERGE_SINGLETON(shadercallcoherent); + MERGE_SINGLETON(nonprivate); + MERGE_SINGLETON(volatil); + MERGE_SINGLETON(restrict); + MERGE_SINGLETON(readonly); + MERGE_SINGLETON(writeonly); + MERGE_SINGLETON(nonUniform); +#endif + + if (repeated) + error(loc, "replicated qualifiers", "", ""); +} + +void TParseContext::setDefaultPrecision(const TSourceLoc& loc, TPublicType& publicType, TPrecisionQualifier qualifier) +{ + TBasicType basicType = publicType.basicType; + + if (basicType == EbtSampler) { + defaultSamplerPrecision[computeSamplerTypeIndex(publicType.sampler)] = qualifier; + + return; // all is well + } + + if (basicType == EbtInt || basicType == EbtFloat) { + if (publicType.isScalar()) { + defaultPrecision[basicType] = qualifier; + if (basicType == EbtInt) { + defaultPrecision[EbtUint] = qualifier; + precisionManager.explicitIntDefaultSeen(); + } else + precisionManager.explicitFloatDefaultSeen(); + + return; // all is well + } + } + + if (basicType == EbtAtomicUint) { + if (qualifier != EpqHigh) + error(loc, "can only apply highp to atomic_uint", "precision", ""); + + return; + } + + error(loc, "cannot apply precision statement to this type; use 'float', 'int' or a sampler type", TType::getBasicString(basicType), ""); +} + +// used to flatten the sampler type space into a single dimension +// correlates with the declaration of defaultSamplerPrecision[] +int TParseContext::computeSamplerTypeIndex(TSampler& sampler) +{ + int arrayIndex = sampler.arrayed ? 1 : 0; + int shadowIndex = sampler.shadow ? 1 : 0; + int externalIndex = sampler.isExternal() ? 1 : 0; + int imageIndex = sampler.isImageClass() ? 1 : 0; + int msIndex = sampler.isMultiSample() ? 1 : 0; + + int flattened = EsdNumDims * (EbtNumTypes * (2 * (2 * (2 * (2 * arrayIndex + msIndex) + imageIndex) + shadowIndex) + + externalIndex) + sampler.type) + sampler.dim; + assert(flattened < maxSamplerIndex); + + return flattened; +} + +TPrecisionQualifier TParseContext::getDefaultPrecision(TPublicType& publicType) +{ + if (publicType.basicType == EbtSampler) + return defaultSamplerPrecision[computeSamplerTypeIndex(publicType.sampler)]; + else + return defaultPrecision[publicType.basicType]; +} + +void TParseContext::precisionQualifierCheck(const TSourceLoc& loc, TBasicType baseType, TQualifier& qualifier) +{ + // Built-in symbols are allowed some ambiguous precisions, to be pinned down + // later by context. + if (! obeyPrecisionQualifiers() || parsingBuiltins) + return; + +#ifndef GLSLANG_WEB + if (baseType == EbtAtomicUint && qualifier.precision != EpqNone && qualifier.precision != EpqHigh) + error(loc, "atomic counters can only be highp", "atomic_uint", ""); +#endif + + if (baseType == EbtFloat || baseType == EbtUint || baseType == EbtInt || baseType == EbtSampler || baseType == EbtAtomicUint) { + if (qualifier.precision == EpqNone) { + if (relaxedErrors()) + warn(loc, "type requires declaration of default precision qualifier", TType::getBasicString(baseType), "substituting 'mediump'"); + else + error(loc, "type requires declaration of default precision qualifier", TType::getBasicString(baseType), ""); + qualifier.precision = EpqMedium; + defaultPrecision[baseType] = EpqMedium; + } + } else if (qualifier.precision != EpqNone) + error(loc, "type cannot have precision qualifier", TType::getBasicString(baseType), ""); +} + +void TParseContext::parameterTypeCheck(const TSourceLoc& loc, TStorageQualifier qualifier, const TType& type) +{ + if ((qualifier == EvqOut || qualifier == EvqInOut) && type.isOpaque()) + error(loc, "samplers and atomic_uints cannot be output parameters", type.getBasicTypeString().c_str(), ""); + if (!parsingBuiltins && type.contains16BitFloat()) + requireFloat16Arithmetic(loc, type.getBasicTypeString().c_str(), "float16 types can only be in uniform block or buffer storage"); + if (!parsingBuiltins && type.contains16BitInt()) + requireInt16Arithmetic(loc, type.getBasicTypeString().c_str(), "(u)int16 types can only be in uniform block or buffer storage"); + if (!parsingBuiltins && type.contains8BitInt()) + requireInt8Arithmetic(loc, type.getBasicTypeString().c_str(), "(u)int8 types can only be in uniform block or buffer storage"); +} + +bool TParseContext::containsFieldWithBasicType(const TType& type, TBasicType basicType) +{ + if (type.getBasicType() == basicType) + return true; + + if (type.getBasicType() == EbtStruct) { + const TTypeList& structure = *type.getStruct(); + for (unsigned int i = 0; i < structure.size(); ++i) { + if (containsFieldWithBasicType(*structure[i].type, basicType)) + return true; + } + } + + return false; +} + +// +// Do size checking for an array type's size. +// +void TParseContext::arraySizeCheck(const TSourceLoc& loc, TIntermTyped* expr, TArraySize& sizePair, const char *sizeType) +{ + bool isConst = false; + sizePair.node = nullptr; + + int size = 1; + + TIntermConstantUnion* constant = expr->getAsConstantUnion(); + if (constant) { + // handle true (non-specialization) constant + size = constant->getConstArray()[0].getIConst(); + isConst = true; + } else { + // see if it's a specialization constant instead + if (expr->getQualifier().isSpecConstant()) { + isConst = true; + sizePair.node = expr; + TIntermSymbol* symbol = expr->getAsSymbolNode(); + if (symbol && symbol->getConstArray().size() > 0) + size = symbol->getConstArray()[0].getIConst(); + } else if (expr->getAsUnaryNode() && + expr->getAsUnaryNode()->getOp() == glslang::EOpArrayLength && + expr->getAsUnaryNode()->getOperand()->getType().isCoopMat()) { + isConst = true; + size = 1; + sizePair.node = expr->getAsUnaryNode(); + } + } + + sizePair.size = size; + + if (! isConst || (expr->getBasicType() != EbtInt && expr->getBasicType() != EbtUint)) { + error(loc, sizeType, "", "must be a constant integer expression"); + return; + } + + if (size <= 0) { + error(loc, sizeType, "", "must be a positive integer"); + return; + } +} + +// +// See if this qualifier can be an array. +// +// Returns true if there is an error. +// +bool TParseContext::arrayQualifierError(const TSourceLoc& loc, const TQualifier& qualifier) +{ + if (qualifier.storage == EvqConst) { + profileRequires(loc, ENoProfile, 120, E_GL_3DL_array_objects, "const array"); + profileRequires(loc, EEsProfile, 300, nullptr, "const array"); + } + + if (qualifier.storage == EvqVaryingIn && language == EShLangVertex) { + requireProfile(loc, ~EEsProfile, "vertex input arrays"); + profileRequires(loc, ENoProfile, 150, nullptr, "vertex input arrays"); + } + + return false; +} + +// +// See if this qualifier and type combination can be an array. +// Assumes arrayQualifierError() was also called to catch the type-invariant tests. +// +// Returns true if there is an error. +// +bool TParseContext::arrayError(const TSourceLoc& loc, const TType& type) +{ + if (type.getQualifier().storage == EvqVaryingOut && language == EShLangVertex) { + if (type.isArrayOfArrays()) + requireProfile(loc, ~EEsProfile, "vertex-shader array-of-array output"); + else if (type.isStruct()) + requireProfile(loc, ~EEsProfile, "vertex-shader array-of-struct output"); + } + if (type.getQualifier().storage == EvqVaryingIn && language == EShLangFragment) { + if (type.isArrayOfArrays()) + requireProfile(loc, ~EEsProfile, "fragment-shader array-of-array input"); + else if (type.isStruct()) + requireProfile(loc, ~EEsProfile, "fragment-shader array-of-struct input"); + } + if (type.getQualifier().storage == EvqVaryingOut && language == EShLangFragment) { + if (type.isArrayOfArrays()) + requireProfile(loc, ~EEsProfile, "fragment-shader array-of-array output"); + } + + return false; +} + +// +// Require array to be completely sized +// +void TParseContext::arraySizeRequiredCheck(const TSourceLoc& loc, const TArraySizes& arraySizes) +{ + if (!parsingBuiltins && arraySizes.hasUnsized()) + error(loc, "array size required", "", ""); +} + +void TParseContext::structArrayCheck(const TSourceLoc& /*loc*/, const TType& type) +{ + const TTypeList& structure = *type.getStruct(); + for (int m = 0; m < (int)structure.size(); ++m) { + const TType& member = *structure[m].type; + if (member.isArray()) + arraySizeRequiredCheck(structure[m].loc, *member.getArraySizes()); + } +} + +void TParseContext::arraySizesCheck(const TSourceLoc& loc, const TQualifier& qualifier, TArraySizes* arraySizes, + const TIntermTyped* initializer, bool lastMember) +{ + assert(arraySizes); + + // always allow special built-in ins/outs sized to topologies + if (parsingBuiltins) + return; + + // initializer must be a sized array, in which case + // allow the initializer to set any unknown array sizes + if (initializer != nullptr) { + if (initializer->getType().isUnsizedArray()) + error(loc, "array initializer must be sized", "[]", ""); + return; + } + + // No environment allows any non-outer-dimension to be implicitly sized + if (arraySizes->isInnerUnsized()) { + error(loc, "only outermost dimension of an array of arrays can be implicitly sized", "[]", ""); + arraySizes->clearInnerUnsized(); + } + + if (arraySizes->isInnerSpecialization() && + (qualifier.storage != EvqTemporary && qualifier.storage != EvqGlobal && qualifier.storage != EvqShared && qualifier.storage != EvqConst)) + error(loc, "only outermost dimension of an array of arrays can be a specialization constant", "[]", ""); + +#ifndef GLSLANG_WEB + + // desktop always allows outer-dimension-unsized variable arrays, + if (!isEsProfile()) + return; + + // for ES, if size isn't coming from an initializer, it has to be explicitly declared now, + // with very few exceptions + + // implicitly-sized io exceptions: + switch (language) { + case EShLangGeometry: + if (qualifier.storage == EvqVaryingIn) + if ((isEsProfile() && version >= 320) || + extensionsTurnedOn(Num_AEP_geometry_shader, AEP_geometry_shader)) + return; + break; + case EShLangTessControl: + if ( qualifier.storage == EvqVaryingIn || + (qualifier.storage == EvqVaryingOut && ! qualifier.isPatch())) + if ((isEsProfile() && version >= 320) || + extensionsTurnedOn(Num_AEP_tessellation_shader, AEP_tessellation_shader)) + return; + break; + case EShLangTessEvaluation: + if ((qualifier.storage == EvqVaryingIn && ! qualifier.isPatch()) || + qualifier.storage == EvqVaryingOut) + if ((isEsProfile() && version >= 320) || + extensionsTurnedOn(Num_AEP_tessellation_shader, AEP_tessellation_shader)) + return; + break; + case EShLangMeshNV: + if (qualifier.storage == EvqVaryingOut) + if ((isEsProfile() && version >= 320) || + extensionTurnedOn(E_GL_NV_mesh_shader)) + return; + break; + default: + break; + } + +#endif + + // last member of ssbo block exception: + if (qualifier.storage == EvqBuffer && lastMember) + return; + + arraySizeRequiredCheck(loc, *arraySizes); +} + +void TParseContext::arrayOfArrayVersionCheck(const TSourceLoc& loc, const TArraySizes* sizes) +{ + if (sizes == nullptr || sizes->getNumDims() == 1) + return; + + const char* feature = "arrays of arrays"; + + requireProfile(loc, EEsProfile | ECoreProfile | ECompatibilityProfile, feature); + profileRequires(loc, EEsProfile, 310, nullptr, feature); + profileRequires(loc, ECoreProfile | ECompatibilityProfile, 430, nullptr, feature); +} + +// +// Do all the semantic checking for declaring or redeclaring an array, with and +// without a size, and make the right changes to the symbol table. +// +void TParseContext::declareArray(const TSourceLoc& loc, const TString& identifier, const TType& type, TSymbol*& symbol) +{ + if (symbol == nullptr) { + bool currentScope; + symbol = symbolTable.find(identifier, nullptr, ¤tScope); + + if (symbol && builtInName(identifier) && ! symbolTable.atBuiltInLevel()) { + // bad shader (errors already reported) trying to redeclare a built-in name as an array + symbol = nullptr; + return; + } + if (symbol == nullptr || ! currentScope) { + // + // Successfully process a new definition. + // (Redeclarations have to take place at the same scope; otherwise they are hiding declarations) + // + symbol = new TVariable(&identifier, type); + symbolTable.insert(*symbol); + if (symbolTable.atGlobalLevel()) + trackLinkage(*symbol); + +#ifndef GLSLANG_WEB + if (! symbolTable.atBuiltInLevel()) { + if (isIoResizeArray(type)) { + ioArraySymbolResizeList.push_back(symbol); + checkIoArraysConsistency(loc, true); + } else + fixIoArraySize(loc, symbol->getWritableType()); + } +#endif + + return; + } + if (symbol->getAsAnonMember()) { + error(loc, "cannot redeclare a user-block member array", identifier.c_str(), ""); + symbol = nullptr; + return; + } + } + + // + // Process a redeclaration. + // + + if (symbol == nullptr) { + error(loc, "array variable name expected", identifier.c_str(), ""); + return; + } + + // redeclareBuiltinVariable() should have already done the copyUp() + TType& existingType = symbol->getWritableType(); + + if (! existingType.isArray()) { + error(loc, "redeclaring non-array as array", identifier.c_str(), ""); + return; + } + + if (! existingType.sameElementType(type)) { + error(loc, "redeclaration of array with a different element type", identifier.c_str(), ""); + return; + } + + if (! existingType.sameInnerArrayness(type)) { + error(loc, "redeclaration of array with a different array dimensions or sizes", identifier.c_str(), ""); + return; + } + +#ifndef GLSLANG_WEB + if (existingType.isSizedArray()) { + // be more leniant for input arrays to geometry shaders and tessellation control outputs, where the redeclaration is the same size + if (! (isIoResizeArray(type) && existingType.getOuterArraySize() == type.getOuterArraySize())) + error(loc, "redeclaration of array with size", identifier.c_str(), ""); + return; + } + + arrayLimitCheck(loc, identifier, type.getOuterArraySize()); + + existingType.updateArraySizes(type); + + if (isIoResizeArray(type)) + checkIoArraysConsistency(loc); +#endif +} + +#ifndef GLSLANG_WEB + +// Policy and error check for needing a runtime sized array. +void TParseContext::checkRuntimeSizable(const TSourceLoc& loc, const TIntermTyped& base) +{ + // runtime length implies runtime sizeable, so no problem + if (isRuntimeLength(base)) + return; + + // Check for last member of a bufferreference type, which is runtime sizeable + // but doesn't support runtime length + if (base.getType().getQualifier().storage == EvqBuffer) { + const TIntermBinary* binary = base.getAsBinaryNode(); + if (binary != nullptr && + binary->getOp() == EOpIndexDirectStruct && + binary->getLeft()->isReference()) { + + const int index = binary->getRight()->getAsConstantUnion()->getConstArray()[0].getIConst(); + const int memberCount = (int)binary->getLeft()->getType().getReferentType()->getStruct()->size(); + if (index == memberCount - 1) + return; + } + } + + // check for additional things allowed by GL_EXT_nonuniform_qualifier + if (base.getBasicType() == EbtSampler || base.getBasicType() == EbtAccStruct || base.getBasicType() == EbtRayQuery || + (base.getBasicType() == EbtBlock && base.getType().getQualifier().isUniformOrBuffer())) + requireExtensions(loc, 1, &E_GL_EXT_nonuniform_qualifier, "variable index"); + else + error(loc, "", "[", "array must be redeclared with a size before being indexed with a variable"); +} + +// Policy decision for whether a run-time .length() is allowed. +bool TParseContext::isRuntimeLength(const TIntermTyped& base) const +{ + if (base.getType().getQualifier().storage == EvqBuffer) { + // in a buffer block + const TIntermBinary* binary = base.getAsBinaryNode(); + if (binary != nullptr && binary->getOp() == EOpIndexDirectStruct) { + // is it the last member? + const int index = binary->getRight()->getAsConstantUnion()->getConstArray()[0].getIConst(); + + if (binary->getLeft()->isReference()) + return false; + + const int memberCount = (int)binary->getLeft()->getType().getStruct()->size(); + if (index == memberCount - 1) + return true; + } + } + + return false; +} + +// Check if mesh perviewNV attributes have a view dimension +// and resize it to gl_MaxMeshViewCountNV when implicitly sized. +void TParseContext::checkAndResizeMeshViewDim(const TSourceLoc& loc, TType& type, bool isBlockMember) +{ + // see if member is a per-view attribute + if (!type.getQualifier().isPerView()) + return; + + if ((isBlockMember && type.isArray()) || (!isBlockMember && type.isArrayOfArrays())) { + // since we don't have the maxMeshViewCountNV set during parsing builtins, we hardcode the value. + int maxViewCount = parsingBuiltins ? 4 : resources.maxMeshViewCountNV; + // For block members, outermost array dimension is the view dimension. + // For non-block members, outermost array dimension is the vertex/primitive dimension + // and 2nd outermost is the view dimension. + int viewDim = isBlockMember ? 0 : 1; + int viewDimSize = type.getArraySizes()->getDimSize(viewDim); + + if (viewDimSize != UnsizedArraySize && viewDimSize != maxViewCount) + error(loc, "mesh view output array size must be gl_MaxMeshViewCountNV or implicitly sized", "[]", ""); + else if (viewDimSize == UnsizedArraySize) + type.getArraySizes()->setDimSize(viewDim, maxViewCount); + } + else { + error(loc, "requires a view array dimension", "perviewNV", ""); + } +} + +#endif // GLSLANG_WEB + +// Returns true if the first argument to the #line directive is the line number for the next line. +// +// Desktop, pre-version 3.30: "After processing this directive +// (including its new-line), the implementation will behave as if it is compiling at line number line+1 and +// source string number source-string-number." +// +// Desktop, version 3.30 and later, and ES: "After processing this directive +// (including its new-line), the implementation will behave as if it is compiling at line number line and +// source string number source-string-number. +bool TParseContext::lineDirectiveShouldSetNextLine() const +{ + return isEsProfile() || version >= 330; +} + +// +// Enforce non-initializer type/qualifier rules. +// +void TParseContext::nonInitConstCheck(const TSourceLoc& loc, TString& identifier, TType& type) +{ + // + // Make the qualifier make sense, given that there is not an initializer. + // + if (type.getQualifier().storage == EvqConst || + type.getQualifier().storage == EvqConstReadOnly) { + type.getQualifier().makeTemporary(); + error(loc, "variables with qualifier 'const' must be initialized", identifier.c_str(), ""); + } +} + +// +// See if the identifier is a built-in symbol that can be redeclared, and if so, +// copy the symbol table's read-only built-in variable to the current +// global level, where it can be modified based on the passed in type. +// +// Returns nullptr if no redeclaration took place; meaning a normal declaration still +// needs to occur for it, not necessarily an error. +// +// Returns a redeclared and type-modified variable if a redeclarated occurred. +// +TSymbol* TParseContext::redeclareBuiltinVariable(const TSourceLoc& loc, const TString& identifier, + const TQualifier& qualifier, const TShaderQualifiers& publicType) +{ +#ifndef GLSLANG_WEB + if (! builtInName(identifier) || symbolTable.atBuiltInLevel() || ! symbolTable.atGlobalLevel()) + return nullptr; + + bool nonEsRedecls = (!isEsProfile() && (version >= 130 || identifier == "gl_TexCoord")); + bool esRedecls = (isEsProfile() && + (version >= 320 || extensionsTurnedOn(Num_AEP_shader_io_blocks, AEP_shader_io_blocks))); + if (! esRedecls && ! nonEsRedecls) + return nullptr; + + // Special case when using GL_ARB_separate_shader_objects + bool ssoPre150 = false; // means the only reason this variable is redeclared is due to this combination + if (!isEsProfile() && version <= 140 && extensionTurnedOn(E_GL_ARB_separate_shader_objects)) { + if (identifier == "gl_Position" || + identifier == "gl_PointSize" || + identifier == "gl_ClipVertex" || + identifier == "gl_FogFragCoord") + ssoPre150 = true; + } + + // Potentially redeclaring a built-in variable... + + if (ssoPre150 || + (identifier == "gl_FragDepth" && ((nonEsRedecls && version >= 420) || esRedecls)) || + (identifier == "gl_FragCoord" && ((nonEsRedecls && version >= 150) || esRedecls)) || + identifier == "gl_ClipDistance" || + identifier == "gl_CullDistance" || + identifier == "gl_FrontColor" || + identifier == "gl_BackColor" || + identifier == "gl_FrontSecondaryColor" || + identifier == "gl_BackSecondaryColor" || + identifier == "gl_SecondaryColor" || + (identifier == "gl_Color" && language == EShLangFragment) || + (identifier == "gl_FragStencilRefARB" && (nonEsRedecls && version >= 140) + && language == EShLangFragment) || + identifier == "gl_SampleMask" || + identifier == "gl_Layer" || + identifier == "gl_PrimitiveIndicesNV" || + identifier == "gl_TexCoord") { + + // Find the existing symbol, if any. + bool builtIn; + TSymbol* symbol = symbolTable.find(identifier, &builtIn); + + // If the symbol was not found, this must be a version/profile/stage + // that doesn't have it. + if (! symbol) + return nullptr; + + // If it wasn't at a built-in level, then it's already been redeclared; + // that is, this is a redeclaration of a redeclaration; reuse that initial + // redeclaration. Otherwise, make the new one. + if (builtIn) + makeEditable(symbol); + + // Now, modify the type of the copy, as per the type of the current redeclaration. + + TQualifier& symbolQualifier = symbol->getWritableType().getQualifier(); + if (ssoPre150) { + if (intermediate.inIoAccessed(identifier)) + error(loc, "cannot redeclare after use", identifier.c_str(), ""); + if (qualifier.hasLayout()) + error(loc, "cannot apply layout qualifier to", "redeclaration", symbol->getName().c_str()); + if (qualifier.isMemory() || qualifier.isAuxiliary() || (language == EShLangVertex && qualifier.storage != EvqVaryingOut) || + (language == EShLangFragment && qualifier.storage != EvqVaryingIn)) + error(loc, "cannot change storage, memory, or auxiliary qualification of", "redeclaration", symbol->getName().c_str()); + if (! qualifier.smooth) + error(loc, "cannot change interpolation qualification of", "redeclaration", symbol->getName().c_str()); + } else if (identifier == "gl_FrontColor" || + identifier == "gl_BackColor" || + identifier == "gl_FrontSecondaryColor" || + identifier == "gl_BackSecondaryColor" || + identifier == "gl_SecondaryColor" || + identifier == "gl_Color") { + symbolQualifier.flat = qualifier.flat; + symbolQualifier.smooth = qualifier.smooth; + symbolQualifier.nopersp = qualifier.nopersp; + if (qualifier.hasLayout()) + error(loc, "cannot apply layout qualifier to", "redeclaration", symbol->getName().c_str()); + if (qualifier.isMemory() || qualifier.isAuxiliary() || symbol->getType().getQualifier().storage != qualifier.storage) + error(loc, "cannot change storage, memory, or auxiliary qualification of", "redeclaration", symbol->getName().c_str()); + } else if (identifier == "gl_TexCoord" || + identifier == "gl_ClipDistance" || + identifier == "gl_CullDistance") { + if (qualifier.hasLayout() || qualifier.isMemory() || qualifier.isAuxiliary() || + qualifier.nopersp != symbolQualifier.nopersp || qualifier.flat != symbolQualifier.flat || + symbolQualifier.storage != qualifier.storage) + error(loc, "cannot change qualification of", "redeclaration", symbol->getName().c_str()); + } else if (identifier == "gl_FragCoord") { + if (intermediate.inIoAccessed("gl_FragCoord")) + error(loc, "cannot redeclare after use", "gl_FragCoord", ""); + if (qualifier.nopersp != symbolQualifier.nopersp || qualifier.flat != symbolQualifier.flat || + qualifier.isMemory() || qualifier.isAuxiliary()) + error(loc, "can only change layout qualification of", "redeclaration", symbol->getName().c_str()); + if (qualifier.storage != EvqVaryingIn) + error(loc, "cannot change input storage qualification of", "redeclaration", symbol->getName().c_str()); + if (! builtIn && (publicType.pixelCenterInteger != intermediate.getPixelCenterInteger() || + publicType.originUpperLeft != intermediate.getOriginUpperLeft())) + error(loc, "cannot redeclare with different qualification:", "redeclaration", symbol->getName().c_str()); + if (publicType.pixelCenterInteger) + intermediate.setPixelCenterInteger(); + if (publicType.originUpperLeft) + intermediate.setOriginUpperLeft(); + } else if (identifier == "gl_FragDepth") { + if (qualifier.nopersp != symbolQualifier.nopersp || qualifier.flat != symbolQualifier.flat || + qualifier.isMemory() || qualifier.isAuxiliary()) + error(loc, "can only change layout qualification of", "redeclaration", symbol->getName().c_str()); + if (qualifier.storage != EvqVaryingOut) + error(loc, "cannot change output storage qualification of", "redeclaration", symbol->getName().c_str()); + if (publicType.layoutDepth != EldNone) { + if (intermediate.inIoAccessed("gl_FragDepth")) + error(loc, "cannot redeclare after use", "gl_FragDepth", ""); + if (! intermediate.setDepth(publicType.layoutDepth)) + error(loc, "all redeclarations must use the same depth layout on", "redeclaration", symbol->getName().c_str()); + } + } + else if ( + identifier == "gl_PrimitiveIndicesNV" || + identifier == "gl_FragStencilRefARB") { + if (qualifier.hasLayout()) + error(loc, "cannot apply layout qualifier to", "redeclaration", symbol->getName().c_str()); + if (qualifier.storage != EvqVaryingOut) + error(loc, "cannot change output storage qualification of", "redeclaration", symbol->getName().c_str()); + } + else if (identifier == "gl_SampleMask") { + if (!publicType.layoutOverrideCoverage) { + error(loc, "redeclaration only allowed for override_coverage layout", "redeclaration", symbol->getName().c_str()); + } + intermediate.setLayoutOverrideCoverage(); + } + else if (identifier == "gl_Layer") { + if (!qualifier.layoutViewportRelative && qualifier.layoutSecondaryViewportRelativeOffset == -2048) + error(loc, "redeclaration only allowed for viewport_relative or secondary_view_offset layout", "redeclaration", symbol->getName().c_str()); + symbolQualifier.layoutViewportRelative = qualifier.layoutViewportRelative; + symbolQualifier.layoutSecondaryViewportRelativeOffset = qualifier.layoutSecondaryViewportRelativeOffset; + } + + // TODO: semantics quality: separate smooth from nothing declared, then use IsInterpolation for several tests above + + return symbol; + } +#endif + + return nullptr; +} + +// +// Either redeclare the requested block, or give an error message why it can't be done. +// +// TODO: functionality: explicitly sizing members of redeclared blocks is not giving them an explicit size +void TParseContext::redeclareBuiltinBlock(const TSourceLoc& loc, TTypeList& newTypeList, const TString& blockName, + const TString* instanceName, TArraySizes* arraySizes) +{ +#ifndef GLSLANG_WEB + const char* feature = "built-in block redeclaration"; + profileRequires(loc, EEsProfile, 320, Num_AEP_shader_io_blocks, AEP_shader_io_blocks, feature); + profileRequires(loc, ~EEsProfile, 410, E_GL_ARB_separate_shader_objects, feature); + + if (blockName != "gl_PerVertex" && blockName != "gl_PerFragment" && + blockName != "gl_MeshPerVertexNV" && blockName != "gl_MeshPerPrimitiveNV") { + error(loc, "cannot redeclare block: ", "block declaration", blockName.c_str()); + return; + } + + // Redeclaring a built-in block... + + if (instanceName && ! builtInName(*instanceName)) { + error(loc, "cannot redeclare a built-in block with a user name", instanceName->c_str(), ""); + return; + } + + // Blocks with instance names are easy to find, lookup the instance name, + // Anonymous blocks need to be found via a member. + bool builtIn; + TSymbol* block; + if (instanceName) + block = symbolTable.find(*instanceName, &builtIn); + else + block = symbolTable.find(newTypeList.front().type->getFieldName(), &builtIn); + + // If the block was not found, this must be a version/profile/stage + // that doesn't have it, or the instance name is wrong. + const char* errorName = instanceName ? instanceName->c_str() : newTypeList.front().type->getFieldName().c_str(); + if (! block) { + error(loc, "no declaration found for redeclaration", errorName, ""); + return; + } + // Built-in blocks cannot be redeclared more than once, which if happened, + // we'd be finding the already redeclared one here, rather than the built in. + if (! builtIn) { + error(loc, "can only redeclare a built-in block once, and before any use", blockName.c_str(), ""); + return; + } + + // Copy the block to make a writable version, to insert into the block table after editing. + block = symbolTable.copyUpDeferredInsert(block); + + if (block->getType().getBasicType() != EbtBlock) { + error(loc, "cannot redeclare a non block as a block", errorName, ""); + return; + } + + // Fix XFB stuff up, it applies to the order of the redeclaration, not + // the order of the original members. + if (currentBlockQualifier.storage == EvqVaryingOut && globalOutputDefaults.hasXfbBuffer()) { + if (!currentBlockQualifier.hasXfbBuffer()) + currentBlockQualifier.layoutXfbBuffer = globalOutputDefaults.layoutXfbBuffer; + if (!currentBlockQualifier.hasStream()) + currentBlockQualifier.layoutStream = globalOutputDefaults.layoutStream; + fixXfbOffsets(currentBlockQualifier, newTypeList); + } + + // Edit and error check the container against the redeclaration + // - remove unused members + // - ensure remaining qualifiers/types match + + TType& type = block->getWritableType(); + + // if gl_PerVertex is redeclared for the purpose of passing through "gl_Position" + // for passthrough purpose, the redeclared block should have the same qualifers as + // the current one + if (currentBlockQualifier.layoutPassthrough) { + type.getQualifier().layoutPassthrough = currentBlockQualifier.layoutPassthrough; + type.getQualifier().storage = currentBlockQualifier.storage; + type.getQualifier().layoutStream = currentBlockQualifier.layoutStream; + type.getQualifier().layoutXfbBuffer = currentBlockQualifier.layoutXfbBuffer; + } + + TTypeList::iterator member = type.getWritableStruct()->begin(); + size_t numOriginalMembersFound = 0; + while (member != type.getStruct()->end()) { + // look for match + bool found = false; + TTypeList::const_iterator newMember; + TSourceLoc memberLoc; + memberLoc.init(); + for (newMember = newTypeList.begin(); newMember != newTypeList.end(); ++newMember) { + if (member->type->getFieldName() == newMember->type->getFieldName()) { + found = true; + memberLoc = newMember->loc; + break; + } + } + + if (found) { + ++numOriginalMembersFound; + // - ensure match between redeclared members' types + // - check for things that can't be changed + // - update things that can be changed + TType& oldType = *member->type; + const TType& newType = *newMember->type; + if (! newType.sameElementType(oldType)) + error(memberLoc, "cannot redeclare block member with a different type", member->type->getFieldName().c_str(), ""); + if (oldType.isArray() != newType.isArray()) + error(memberLoc, "cannot change arrayness of redeclared block member", member->type->getFieldName().c_str(), ""); + else if (! oldType.getQualifier().isPerView() && ! oldType.sameArrayness(newType) && oldType.isSizedArray()) + error(memberLoc, "cannot change array size of redeclared block member", member->type->getFieldName().c_str(), ""); + else if (! oldType.getQualifier().isPerView() && newType.isArray()) + arrayLimitCheck(loc, member->type->getFieldName(), newType.getOuterArraySize()); + if (oldType.getQualifier().isPerView() && ! newType.getQualifier().isPerView()) + error(memberLoc, "missing perviewNV qualifier to redeclared block member", member->type->getFieldName().c_str(), ""); + else if (! oldType.getQualifier().isPerView() && newType.getQualifier().isPerView()) + error(memberLoc, "cannot add perviewNV qualifier to redeclared block member", member->type->getFieldName().c_str(), ""); + else if (newType.getQualifier().isPerView()) { + if (oldType.getArraySizes()->getNumDims() != newType.getArraySizes()->getNumDims()) + error(memberLoc, "cannot change arrayness of redeclared block member", member->type->getFieldName().c_str(), ""); + else if (! newType.isUnsizedArray() && newType.getOuterArraySize() != resources.maxMeshViewCountNV) + error(loc, "mesh view output array size must be gl_MaxMeshViewCountNV or implicitly sized", "[]", ""); + else if (newType.getArraySizes()->getNumDims() == 2) { + int innerDimSize = newType.getArraySizes()->getDimSize(1); + arrayLimitCheck(memberLoc, member->type->getFieldName(), innerDimSize); + oldType.getArraySizes()->setDimSize(1, innerDimSize); + } + } + if (oldType.getQualifier().isPerPrimitive() && ! newType.getQualifier().isPerPrimitive()) + error(memberLoc, "missing perprimitiveNV qualifier to redeclared block member", member->type->getFieldName().c_str(), ""); + else if (! oldType.getQualifier().isPerPrimitive() && newType.getQualifier().isPerPrimitive()) + error(memberLoc, "cannot add perprimitiveNV qualifier to redeclared block member", member->type->getFieldName().c_str(), ""); + if (newType.getQualifier().isMemory()) + error(memberLoc, "cannot add memory qualifier to redeclared block member", member->type->getFieldName().c_str(), ""); + if (newType.getQualifier().hasNonXfbLayout()) + error(memberLoc, "cannot add non-XFB layout to redeclared block member", member->type->getFieldName().c_str(), ""); + if (newType.getQualifier().patch) + error(memberLoc, "cannot add patch to redeclared block member", member->type->getFieldName().c_str(), ""); + if (newType.getQualifier().hasXfbBuffer() && + newType.getQualifier().layoutXfbBuffer != currentBlockQualifier.layoutXfbBuffer) + error(memberLoc, "member cannot contradict block (or what block inherited from global)", "xfb_buffer", ""); + if (newType.getQualifier().hasStream() && + newType.getQualifier().layoutStream != currentBlockQualifier.layoutStream) + error(memberLoc, "member cannot contradict block (or what block inherited from global)", "xfb_stream", ""); + oldType.getQualifier().centroid = newType.getQualifier().centroid; + oldType.getQualifier().sample = newType.getQualifier().sample; + oldType.getQualifier().invariant = newType.getQualifier().invariant; + oldType.getQualifier().noContraction = newType.getQualifier().noContraction; + oldType.getQualifier().smooth = newType.getQualifier().smooth; + oldType.getQualifier().flat = newType.getQualifier().flat; + oldType.getQualifier().nopersp = newType.getQualifier().nopersp; + oldType.getQualifier().layoutXfbOffset = newType.getQualifier().layoutXfbOffset; + oldType.getQualifier().layoutXfbBuffer = newType.getQualifier().layoutXfbBuffer; + oldType.getQualifier().layoutXfbStride = newType.getQualifier().layoutXfbStride; + if (oldType.getQualifier().layoutXfbOffset != TQualifier::layoutXfbBufferEnd) { + // If any member has an xfb_offset, then the block's xfb_buffer inherents current xfb_buffer, + // and for xfb processing, the member needs it as well, along with xfb_stride. + type.getQualifier().layoutXfbBuffer = currentBlockQualifier.layoutXfbBuffer; + oldType.getQualifier().layoutXfbBuffer = currentBlockQualifier.layoutXfbBuffer; + } + if (oldType.isUnsizedArray() && newType.isSizedArray()) + oldType.changeOuterArraySize(newType.getOuterArraySize()); + + // check and process the member's type, which will include managing xfb information + layoutTypeCheck(loc, oldType); + + // go to next member + ++member; + } else { + // For missing members of anonymous blocks that have been redeclared, + // hide the original (shared) declaration. + // Instance-named blocks can just have the member removed. + if (instanceName) + member = type.getWritableStruct()->erase(member); + else { + member->type->hideMember(); + ++member; + } + } + } + + if (spvVersion.vulkan > 0) { + // ...then streams apply to built-in blocks, instead of them being only on stream 0 + type.getQualifier().layoutStream = currentBlockQualifier.layoutStream; + } + + if (numOriginalMembersFound < newTypeList.size()) + error(loc, "block redeclaration has extra members", blockName.c_str(), ""); + if (type.isArray() != (arraySizes != nullptr) || + (type.isArray() && arraySizes != nullptr && type.getArraySizes()->getNumDims() != arraySizes->getNumDims())) + error(loc, "cannot change arrayness of redeclared block", blockName.c_str(), ""); + else if (type.isArray()) { + // At this point, we know both are arrays and both have the same number of dimensions. + + // It is okay for a built-in block redeclaration to be unsized, and keep the size of the + // original block declaration. + if (!arraySizes->isSized() && type.isSizedArray()) + arraySizes->changeOuterSize(type.getOuterArraySize()); + + // And, okay to be giving a size to the array, by the redeclaration + if (!type.isSizedArray() && arraySizes->isSized()) + type.changeOuterArraySize(arraySizes->getOuterSize()); + + // Now, they must match in all dimensions. + if (type.isSizedArray() && *type.getArraySizes() != *arraySizes) + error(loc, "cannot change array size of redeclared block", blockName.c_str(), ""); + } + + symbolTable.insert(*block); + + // Check for general layout qualifier errors + layoutObjectCheck(loc, *block); + + // Tracking for implicit sizing of array + if (isIoResizeArray(block->getType())) { + ioArraySymbolResizeList.push_back(block); + checkIoArraysConsistency(loc, true); + } else if (block->getType().isArray()) + fixIoArraySize(loc, block->getWritableType()); + + // Save it in the AST for linker use. + trackLinkage(*block); +#endif // GLSLANG_WEB +} + +void TParseContext::paramCheckFixStorage(const TSourceLoc& loc, const TStorageQualifier& qualifier, TType& type) +{ + switch (qualifier) { + case EvqConst: + case EvqConstReadOnly: + type.getQualifier().storage = EvqConstReadOnly; + break; + case EvqIn: + case EvqOut: + case EvqInOut: + type.getQualifier().storage = qualifier; + break; + case EvqGlobal: + case EvqTemporary: + type.getQualifier().storage = EvqIn; + break; + default: + type.getQualifier().storage = EvqIn; + error(loc, "storage qualifier not allowed on function parameter", GetStorageQualifierString(qualifier), ""); + break; + } +} + +void TParseContext::paramCheckFix(const TSourceLoc& loc, const TQualifier& qualifier, TType& type) +{ +#ifndef GLSLANG_WEB + if (qualifier.isMemory()) { + type.getQualifier().volatil = qualifier.volatil; + type.getQualifier().coherent = qualifier.coherent; + type.getQualifier().devicecoherent = qualifier.devicecoherent ; + type.getQualifier().queuefamilycoherent = qualifier.queuefamilycoherent; + type.getQualifier().workgroupcoherent = qualifier.workgroupcoherent; + type.getQualifier().subgroupcoherent = qualifier.subgroupcoherent; + type.getQualifier().shadercallcoherent = qualifier.shadercallcoherent; + type.getQualifier().nonprivate = qualifier.nonprivate; + type.getQualifier().readonly = qualifier.readonly; + type.getQualifier().writeonly = qualifier.writeonly; + type.getQualifier().restrict = qualifier.restrict; + } +#endif + + if (qualifier.isAuxiliary() || + qualifier.isInterpolation()) + error(loc, "cannot use auxiliary or interpolation qualifiers on a function parameter", "", ""); + if (qualifier.hasLayout()) + error(loc, "cannot use layout qualifiers on a function parameter", "", ""); + if (qualifier.invariant) + error(loc, "cannot use invariant qualifier on a function parameter", "", ""); + if (qualifier.isNoContraction()) { + if (qualifier.isParamOutput()) + type.getQualifier().setNoContraction(); + else + warn(loc, "qualifier has no effect on non-output parameters", "precise", ""); + } + if (qualifier.isNonUniform()) + type.getQualifier().nonUniform = qualifier.nonUniform; + + paramCheckFixStorage(loc, qualifier.storage, type); +} + +void TParseContext::nestedBlockCheck(const TSourceLoc& loc) +{ + if (structNestingLevel > 0) + error(loc, "cannot nest a block definition inside a structure or block", "", ""); + ++structNestingLevel; +} + +void TParseContext::nestedStructCheck(const TSourceLoc& loc) +{ + if (structNestingLevel > 0) + error(loc, "cannot nest a structure definition inside a structure or block", "", ""); + ++structNestingLevel; +} + +void TParseContext::arrayObjectCheck(const TSourceLoc& loc, const TType& type, const char* op) +{ + // Some versions don't allow comparing arrays or structures containing arrays + if (type.containsArray()) { + profileRequires(loc, ENoProfile, 120, E_GL_3DL_array_objects, op); + profileRequires(loc, EEsProfile, 300, nullptr, op); + } +} + +void TParseContext::opaqueCheck(const TSourceLoc& loc, const TType& type, const char* op) +{ + if (containsFieldWithBasicType(type, EbtSampler)) + error(loc, "can't use with samplers or structs containing samplers", op, ""); +} + +void TParseContext::referenceCheck(const TSourceLoc& loc, const TType& type, const char* op) +{ +#ifndef GLSLANG_WEB + if (containsFieldWithBasicType(type, EbtReference)) + error(loc, "can't use with reference types", op, ""); +#endif +} + +void TParseContext::storage16BitAssignmentCheck(const TSourceLoc& loc, const TType& type, const char* op) +{ +#ifndef GLSLANG_WEB + if (type.getBasicType() == EbtStruct && containsFieldWithBasicType(type, EbtFloat16)) + requireFloat16Arithmetic(loc, op, "can't use with structs containing float16"); + + if (type.isArray() && type.getBasicType() == EbtFloat16) + requireFloat16Arithmetic(loc, op, "can't use with arrays containing float16"); + + if (type.getBasicType() == EbtStruct && containsFieldWithBasicType(type, EbtInt16)) + requireInt16Arithmetic(loc, op, "can't use with structs containing int16"); + + if (type.isArray() && type.getBasicType() == EbtInt16) + requireInt16Arithmetic(loc, op, "can't use with arrays containing int16"); + + if (type.getBasicType() == EbtStruct && containsFieldWithBasicType(type, EbtUint16)) + requireInt16Arithmetic(loc, op, "can't use with structs containing uint16"); + + if (type.isArray() && type.getBasicType() == EbtUint16) + requireInt16Arithmetic(loc, op, "can't use with arrays containing uint16"); + + if (type.getBasicType() == EbtStruct && containsFieldWithBasicType(type, EbtInt8)) + requireInt8Arithmetic(loc, op, "can't use with structs containing int8"); + + if (type.isArray() && type.getBasicType() == EbtInt8) + requireInt8Arithmetic(loc, op, "can't use with arrays containing int8"); + + if (type.getBasicType() == EbtStruct && containsFieldWithBasicType(type, EbtUint8)) + requireInt8Arithmetic(loc, op, "can't use with structs containing uint8"); + + if (type.isArray() && type.getBasicType() == EbtUint8) + requireInt8Arithmetic(loc, op, "can't use with arrays containing uint8"); +#endif +} + +void TParseContext::specializationCheck(const TSourceLoc& loc, const TType& type, const char* op) +{ + if (type.containsSpecializationSize()) + error(loc, "can't use with types containing arrays sized with a specialization constant", op, ""); +} + +void TParseContext::structTypeCheck(const TSourceLoc& /*loc*/, TPublicType& publicType) +{ + const TTypeList& typeList = *publicType.userDef->getStruct(); + + // fix and check for member storage qualifiers and types that don't belong within a structure + for (unsigned int member = 0; member < typeList.size(); ++member) { + TQualifier& memberQualifier = typeList[member].type->getQualifier(); + const TSourceLoc& memberLoc = typeList[member].loc; + if (memberQualifier.isAuxiliary() || + memberQualifier.isInterpolation() || + (memberQualifier.storage != EvqTemporary && memberQualifier.storage != EvqGlobal)) + error(memberLoc, "cannot use storage or interpolation qualifiers on structure members", typeList[member].type->getFieldName().c_str(), ""); + if (memberQualifier.isMemory()) + error(memberLoc, "cannot use memory qualifiers on structure members", typeList[member].type->getFieldName().c_str(), ""); + if (memberQualifier.hasLayout()) { + error(memberLoc, "cannot use layout qualifiers on structure members", typeList[member].type->getFieldName().c_str(), ""); + memberQualifier.clearLayout(); + } + if (memberQualifier.invariant) + error(memberLoc, "cannot use invariant qualifier on structure members", typeList[member].type->getFieldName().c_str(), ""); + } +} + +// +// See if this loop satisfies the limitations for ES 2.0 (version 100) for loops in Appendex A: +// +// "The loop index has type int or float. +// +// "The for statement has the form: +// for ( init-declaration ; condition ; expression ) +// init-declaration has the form: type-specifier identifier = constant-expression +// condition has the form: loop-index relational_operator constant-expression +// where relational_operator is one of: > >= < <= == or != +// expression [sic] has one of the following forms: +// loop-index++ +// loop-index-- +// loop-index += constant-expression +// loop-index -= constant-expression +// +// The body is handled in an AST traversal. +// +void TParseContext::inductiveLoopCheck(const TSourceLoc& loc, TIntermNode* init, TIntermLoop* loop) +{ +#ifndef GLSLANG_WEB + // loop index init must exist and be a declaration, which shows up in the AST as an aggregate of size 1 of the declaration + bool badInit = false; + if (! init || ! init->getAsAggregate() || init->getAsAggregate()->getSequence().size() != 1) + badInit = true; + TIntermBinary* binaryInit = 0; + if (! badInit) { + // get the declaration assignment + binaryInit = init->getAsAggregate()->getSequence()[0]->getAsBinaryNode(); + if (! binaryInit) + badInit = true; + } + if (badInit) { + error(loc, "inductive-loop init-declaration requires the form \"type-specifier loop-index = constant-expression\"", "limitations", ""); + return; + } + + // loop index must be type int or float + if (! binaryInit->getType().isScalar() || (binaryInit->getBasicType() != EbtInt && binaryInit->getBasicType() != EbtFloat)) { + error(loc, "inductive loop requires a scalar 'int' or 'float' loop index", "limitations", ""); + return; + } + + // init is the form "loop-index = constant" + if (binaryInit->getOp() != EOpAssign || ! binaryInit->getLeft()->getAsSymbolNode() || ! binaryInit->getRight()->getAsConstantUnion()) { + error(loc, "inductive-loop init-declaration requires the form \"type-specifier loop-index = constant-expression\"", "limitations", ""); + return; + } + + // get the unique id of the loop index + int loopIndex = binaryInit->getLeft()->getAsSymbolNode()->getId(); + inductiveLoopIds.insert(loopIndex); + + // condition's form must be "loop-index relational-operator constant-expression" + bool badCond = ! loop->getTest(); + if (! badCond) { + TIntermBinary* binaryCond = loop->getTest()->getAsBinaryNode(); + badCond = ! binaryCond; + if (! badCond) { + switch (binaryCond->getOp()) { + case EOpGreaterThan: + case EOpGreaterThanEqual: + case EOpLessThan: + case EOpLessThanEqual: + case EOpEqual: + case EOpNotEqual: + break; + default: + badCond = true; + } + } + if (binaryCond && (! binaryCond->getLeft()->getAsSymbolNode() || + binaryCond->getLeft()->getAsSymbolNode()->getId() != loopIndex || + ! binaryCond->getRight()->getAsConstantUnion())) + badCond = true; + } + if (badCond) { + error(loc, "inductive-loop condition requires the form \"loop-index constant-expression\"", "limitations", ""); + return; + } + + // loop-index++ + // loop-index-- + // loop-index += constant-expression + // loop-index -= constant-expression + bool badTerminal = ! loop->getTerminal(); + if (! badTerminal) { + TIntermUnary* unaryTerminal = loop->getTerminal()->getAsUnaryNode(); + TIntermBinary* binaryTerminal = loop->getTerminal()->getAsBinaryNode(); + if (unaryTerminal || binaryTerminal) { + switch(loop->getTerminal()->getAsOperator()->getOp()) { + case EOpPostDecrement: + case EOpPostIncrement: + case EOpAddAssign: + case EOpSubAssign: + break; + default: + badTerminal = true; + } + } else + badTerminal = true; + if (binaryTerminal && (! binaryTerminal->getLeft()->getAsSymbolNode() || + binaryTerminal->getLeft()->getAsSymbolNode()->getId() != loopIndex || + ! binaryTerminal->getRight()->getAsConstantUnion())) + badTerminal = true; + if (unaryTerminal && (! unaryTerminal->getOperand()->getAsSymbolNode() || + unaryTerminal->getOperand()->getAsSymbolNode()->getId() != loopIndex)) + badTerminal = true; + } + if (badTerminal) { + error(loc, "inductive-loop termination requires the form \"loop-index++, loop-index--, loop-index += constant-expression, or loop-index -= constant-expression\"", "limitations", ""); + return; + } + + // the body + inductiveLoopBodyCheck(loop->getBody(), loopIndex, symbolTable); +#endif +} + +#ifndef GLSLANG_WEB +// Do limit checks for built-in arrays. +void TParseContext::arrayLimitCheck(const TSourceLoc& loc, const TString& identifier, int size) +{ + if (identifier.compare("gl_TexCoord") == 0) + limitCheck(loc, size, "gl_MaxTextureCoords", "gl_TexCoord array size"); + else if (identifier.compare("gl_ClipDistance") == 0) + limitCheck(loc, size, "gl_MaxClipDistances", "gl_ClipDistance array size"); + else if (identifier.compare("gl_CullDistance") == 0) + limitCheck(loc, size, "gl_MaxCullDistances", "gl_CullDistance array size"); + else if (identifier.compare("gl_ClipDistancePerViewNV") == 0) + limitCheck(loc, size, "gl_MaxClipDistances", "gl_ClipDistancePerViewNV array size"); + else if (identifier.compare("gl_CullDistancePerViewNV") == 0) + limitCheck(loc, size, "gl_MaxCullDistances", "gl_CullDistancePerViewNV array size"); +} +#endif // GLSLANG_WEB + +// See if the provided value is less than or equal to the symbol indicated by limit, +// which should be a constant in the symbol table. +void TParseContext::limitCheck(const TSourceLoc& loc, int value, const char* limit, const char* feature) +{ + TSymbol* symbol = symbolTable.find(limit); + assert(symbol->getAsVariable()); + const TConstUnionArray& constArray = symbol->getAsVariable()->getConstArray(); + assert(! constArray.empty()); + if (value > constArray[0].getIConst()) + error(loc, "must be less than or equal to", feature, "%s (%d)", limit, constArray[0].getIConst()); +} + +#ifndef GLSLANG_WEB + +// +// Do any additional error checking, etc., once we know the parsing is done. +// +void TParseContext::finish() +{ + TParseContextBase::finish(); + + if (parsingBuiltins) + return; + + // Check on array indexes for ES 2.0 (version 100) limitations. + for (size_t i = 0; i < needsIndexLimitationChecking.size(); ++i) + constantIndexExpressionCheck(needsIndexLimitationChecking[i]); + + // Check for stages that are enabled by extension. + // Can't do this at the beginning, it is chicken and egg to add a stage by + // extension. + // Stage-specific features were correctly tested for already, this is just + // about the stage itself. + switch (language) { + case EShLangGeometry: + if (isEsProfile() && version == 310) + requireExtensions(getCurrentLoc(), Num_AEP_geometry_shader, AEP_geometry_shader, "geometry shaders"); + break; + case EShLangTessControl: + case EShLangTessEvaluation: + if (isEsProfile() && version == 310) + requireExtensions(getCurrentLoc(), Num_AEP_tessellation_shader, AEP_tessellation_shader, "tessellation shaders"); + else if (!isEsProfile() && version < 400) + requireExtensions(getCurrentLoc(), 1, &E_GL_ARB_tessellation_shader, "tessellation shaders"); + break; + case EShLangCompute: + if (!isEsProfile() && version < 430) + requireExtensions(getCurrentLoc(), 1, &E_GL_ARB_compute_shader, "compute shaders"); + break; + case EShLangTaskNV: + requireExtensions(getCurrentLoc(), 1, &E_GL_NV_mesh_shader, "task shaders"); + break; + case EShLangMeshNV: + requireExtensions(getCurrentLoc(), 1, &E_GL_NV_mesh_shader, "mesh shaders"); + break; + default: + break; + } + + // Set default outputs for GL_NV_geometry_shader_passthrough + if (language == EShLangGeometry && extensionTurnedOn(E_SPV_NV_geometry_shader_passthrough)) { + if (intermediate.getOutputPrimitive() == ElgNone) { + switch (intermediate.getInputPrimitive()) { + case ElgPoints: intermediate.setOutputPrimitive(ElgPoints); break; + case ElgLines: intermediate.setOutputPrimitive(ElgLineStrip); break; + case ElgTriangles: intermediate.setOutputPrimitive(ElgTriangleStrip); break; + default: break; + } + } + if (intermediate.getVertices() == TQualifier::layoutNotSet) { + switch (intermediate.getInputPrimitive()) { + case ElgPoints: intermediate.setVertices(1); break; + case ElgLines: intermediate.setVertices(2); break; + case ElgTriangles: intermediate.setVertices(3); break; + default: break; + } + } + } +} +#endif // GLSLANG_WEB + +// +// Layout qualifier stuff. +// + +// Put the id's layout qualification into the public type, for qualifiers not having a number set. +// This is before we know any type information for error checking. +void TParseContext::setLayoutQualifier(const TSourceLoc& loc, TPublicType& publicType, TString& id) +{ + std::transform(id.begin(), id.end(), id.begin(), ::tolower); + + if (id == TQualifier::getLayoutMatrixString(ElmColumnMajor)) { + publicType.qualifier.layoutMatrix = ElmColumnMajor; + return; + } + if (id == TQualifier::getLayoutMatrixString(ElmRowMajor)) { + publicType.qualifier.layoutMatrix = ElmRowMajor; + return; + } + if (id == TQualifier::getLayoutPackingString(ElpPacked)) { + if (spvVersion.spv != 0) + spvRemoved(loc, "packed"); + publicType.qualifier.layoutPacking = ElpPacked; + return; + } + if (id == TQualifier::getLayoutPackingString(ElpShared)) { + if (spvVersion.spv != 0) + spvRemoved(loc, "shared"); + publicType.qualifier.layoutPacking = ElpShared; + return; + } + if (id == TQualifier::getLayoutPackingString(ElpStd140)) { + publicType.qualifier.layoutPacking = ElpStd140; + return; + } +#ifndef GLSLANG_WEB + if (id == TQualifier::getLayoutPackingString(ElpStd430)) { + requireProfile(loc, EEsProfile | ECoreProfile | ECompatibilityProfile, "std430"); + profileRequires(loc, ECoreProfile | ECompatibilityProfile, 430, E_GL_ARB_shader_storage_buffer_object, "std430"); + profileRequires(loc, EEsProfile, 310, nullptr, "std430"); + publicType.qualifier.layoutPacking = ElpStd430; + return; + } + if (id == TQualifier::getLayoutPackingString(ElpScalar)) { + requireVulkan(loc, "scalar"); + requireExtensions(loc, 1, &E_GL_EXT_scalar_block_layout, "scalar block layout"); + publicType.qualifier.layoutPacking = ElpScalar; + return; + } + // TODO: compile-time performance: may need to stop doing linear searches + for (TLayoutFormat format = (TLayoutFormat)(ElfNone + 1); format < ElfCount; format = (TLayoutFormat)(format + 1)) { + if (id == TQualifier::getLayoutFormatString(format)) { + if ((format > ElfEsFloatGuard && format < ElfFloatGuard) || + (format > ElfEsIntGuard && format < ElfIntGuard) || + (format > ElfEsUintGuard && format < ElfCount)) + requireProfile(loc, ENoProfile | ECoreProfile | ECompatibilityProfile, "image load-store format"); + profileRequires(loc, ENoProfile | ECoreProfile | ECompatibilityProfile, 420, E_GL_ARB_shader_image_load_store, "image load store"); + profileRequires(loc, EEsProfile, 310, E_GL_ARB_shader_image_load_store, "image load store"); + publicType.qualifier.layoutFormat = format; + return; + } + } + if (id == "push_constant") { + requireVulkan(loc, "push_constant"); + publicType.qualifier.layoutPushConstant = true; + return; + } + if (id == "buffer_reference") { + requireVulkan(loc, "buffer_reference"); + requireExtensions(loc, 1, &E_GL_EXT_buffer_reference, "buffer_reference"); + publicType.qualifier.layoutBufferReference = true; + intermediate.setUseStorageBuffer(); + intermediate.setUsePhysicalStorageBuffer(); + return; + } + if (language == EShLangGeometry || language == EShLangTessEvaluation || language == EShLangMeshNV) { + if (id == TQualifier::getGeometryString(ElgTriangles)) { + publicType.shaderQualifiers.geometry = ElgTriangles; + return; + } + if (language == EShLangGeometry || language == EShLangMeshNV) { + if (id == TQualifier::getGeometryString(ElgPoints)) { + publicType.shaderQualifiers.geometry = ElgPoints; + return; + } + if (id == TQualifier::getGeometryString(ElgLines)) { + publicType.shaderQualifiers.geometry = ElgLines; + return; + } + if (language == EShLangGeometry) { + if (id == TQualifier::getGeometryString(ElgLineStrip)) { + publicType.shaderQualifiers.geometry = ElgLineStrip; + return; + } + if (id == TQualifier::getGeometryString(ElgLinesAdjacency)) { + publicType.shaderQualifiers.geometry = ElgLinesAdjacency; + return; + } + if (id == TQualifier::getGeometryString(ElgTrianglesAdjacency)) { + publicType.shaderQualifiers.geometry = ElgTrianglesAdjacency; + return; + } + if (id == TQualifier::getGeometryString(ElgTriangleStrip)) { + publicType.shaderQualifiers.geometry = ElgTriangleStrip; + return; + } + if (id == "passthrough") { + requireExtensions(loc, 1, &E_SPV_NV_geometry_shader_passthrough, "geometry shader passthrough"); + publicType.qualifier.layoutPassthrough = true; + intermediate.setGeoPassthroughEXT(); + return; + } + } + } else { + assert(language == EShLangTessEvaluation); + + // input primitive + if (id == TQualifier::getGeometryString(ElgTriangles)) { + publicType.shaderQualifiers.geometry = ElgTriangles; + return; + } + if (id == TQualifier::getGeometryString(ElgQuads)) { + publicType.shaderQualifiers.geometry = ElgQuads; + return; + } + if (id == TQualifier::getGeometryString(ElgIsolines)) { + publicType.shaderQualifiers.geometry = ElgIsolines; + return; + } + + // vertex spacing + if (id == TQualifier::getVertexSpacingString(EvsEqual)) { + publicType.shaderQualifiers.spacing = EvsEqual; + return; + } + if (id == TQualifier::getVertexSpacingString(EvsFractionalEven)) { + publicType.shaderQualifiers.spacing = EvsFractionalEven; + return; + } + if (id == TQualifier::getVertexSpacingString(EvsFractionalOdd)) { + publicType.shaderQualifiers.spacing = EvsFractionalOdd; + return; + } + + // triangle order + if (id == TQualifier::getVertexOrderString(EvoCw)) { + publicType.shaderQualifiers.order = EvoCw; + return; + } + if (id == TQualifier::getVertexOrderString(EvoCcw)) { + publicType.shaderQualifiers.order = EvoCcw; + return; + } + + // point mode + if (id == "point_mode") { + publicType.shaderQualifiers.pointMode = true; + return; + } + } + } + if (language == EShLangFragment) { + if (id == "origin_upper_left") { + requireProfile(loc, ECoreProfile | ECompatibilityProfile, "origin_upper_left"); + publicType.shaderQualifiers.originUpperLeft = true; + return; + } + if (id == "pixel_center_integer") { + requireProfile(loc, ECoreProfile | ECompatibilityProfile, "pixel_center_integer"); + publicType.shaderQualifiers.pixelCenterInteger = true; + return; + } + if (id == "early_fragment_tests") { + profileRequires(loc, ENoProfile | ECoreProfile | ECompatibilityProfile, 420, E_GL_ARB_shader_image_load_store, "early_fragment_tests"); + profileRequires(loc, EEsProfile, 310, nullptr, "early_fragment_tests"); + publicType.shaderQualifiers.earlyFragmentTests = true; + return; + } + if (id == "post_depth_coverage") { + requireExtensions(loc, Num_post_depth_coverageEXTs, post_depth_coverageEXTs, "post depth coverage"); + if (extensionTurnedOn(E_GL_ARB_post_depth_coverage)) { + publicType.shaderQualifiers.earlyFragmentTests = true; + } + publicType.shaderQualifiers.postDepthCoverage = true; + return; + } + for (TLayoutDepth depth = (TLayoutDepth)(EldNone + 1); depth < EldCount; depth = (TLayoutDepth)(depth+1)) { + if (id == TQualifier::getLayoutDepthString(depth)) { + requireProfile(loc, ECoreProfile | ECompatibilityProfile, "depth layout qualifier"); + profileRequires(loc, ECoreProfile | ECompatibilityProfile, 420, nullptr, "depth layout qualifier"); + publicType.shaderQualifiers.layoutDepth = depth; + return; + } + } + for (TInterlockOrdering order = (TInterlockOrdering)(EioNone + 1); order < EioCount; order = (TInterlockOrdering)(order+1)) { + if (id == TQualifier::getInterlockOrderingString(order)) { + requireProfile(loc, ECoreProfile | ECompatibilityProfile, "fragment shader interlock layout qualifier"); + profileRequires(loc, ECoreProfile | ECompatibilityProfile, 450, nullptr, "fragment shader interlock layout qualifier"); + requireExtensions(loc, 1, &E_GL_ARB_fragment_shader_interlock, TQualifier::getInterlockOrderingString(order)); + if (order == EioShadingRateInterlockOrdered || order == EioShadingRateInterlockUnordered) + requireExtensions(loc, 1, &E_GL_NV_shading_rate_image, TQualifier::getInterlockOrderingString(order)); + publicType.shaderQualifiers.interlockOrdering = order; + return; + } + } + if (id.compare(0, 13, "blend_support") == 0) { + bool found = false; + for (TBlendEquationShift be = (TBlendEquationShift)0; be < EBlendCount; be = (TBlendEquationShift)(be + 1)) { + if (id == TQualifier::getBlendEquationString(be)) { + profileRequires(loc, EEsProfile, 320, E_GL_KHR_blend_equation_advanced, "blend equation"); + profileRequires(loc, ~EEsProfile, 0, E_GL_KHR_blend_equation_advanced, "blend equation"); + intermediate.addBlendEquation(be); + publicType.shaderQualifiers.blendEquation = true; + found = true; + break; + } + } + if (! found) + error(loc, "unknown blend equation", "blend_support", ""); + return; + } + if (id == "override_coverage") { + requireExtensions(loc, 1, &E_GL_NV_sample_mask_override_coverage, "sample mask override coverage"); + publicType.shaderQualifiers.layoutOverrideCoverage = true; + return; + } + } + if (language == EShLangVertex || + language == EShLangTessControl || + language == EShLangTessEvaluation || + language == EShLangGeometry ) { + if (id == "viewport_relative") { + requireExtensions(loc, 1, &E_GL_NV_viewport_array2, "view port array2"); + publicType.qualifier.layoutViewportRelative = true; + return; + } + } else { + if (language == EShLangRayGen || language == EShLangIntersect || + language == EShLangAnyHit || language == EShLangClosestHit || + language == EShLangMiss || language == EShLangCallable) { + if (id == "shaderrecordnv" || id == "shaderrecordext") { + if (id == "shaderrecordnv") { + requireExtensions(loc, 1, &E_GL_NV_ray_tracing, "shader record NV"); + } else { + requireExtensions(loc, 1, &E_GL_EXT_ray_tracing, "shader record EXT"); + } + publicType.qualifier.layoutShaderRecord = true; + return; + } + + } + } + if (language == EShLangCompute) { + if (id.compare(0, 17, "derivative_group_") == 0) { + requireExtensions(loc, 1, &E_GL_NV_compute_shader_derivatives, "compute shader derivatives"); + if (id == "derivative_group_quadsnv") { + publicType.shaderQualifiers.layoutDerivativeGroupQuads = true; + return; + } else if (id == "derivative_group_linearnv") { + publicType.shaderQualifiers.layoutDerivativeGroupLinear = true; + return; + } + } + } + + if (id == "primitive_culling") { + requireExtensions(loc, 1, &E_GL_EXT_ray_flags_primitive_culling, "primitive culling"); + publicType.shaderQualifiers.layoutPrimitiveCulling = true; + return; + } +#endif + + error(loc, "unrecognized layout identifier, or qualifier requires assignment (e.g., binding = 4)", id.c_str(), ""); +} + +// Put the id's layout qualifier value into the public type, for qualifiers having a number set. +// This is before we know any type information for error checking. +void TParseContext::setLayoutQualifier(const TSourceLoc& loc, TPublicType& publicType, TString& id, const TIntermTyped* node) +{ + const char* feature = "layout-id value"; + const char* nonLiteralFeature = "non-literal layout-id value"; + + integerCheck(node, feature); + const TIntermConstantUnion* constUnion = node->getAsConstantUnion(); + int value; + bool nonLiteral = false; + if (constUnion) { + value = constUnion->getConstArray()[0].getIConst(); + if (! constUnion->isLiteral()) { + requireProfile(loc, ECoreProfile | ECompatibilityProfile, nonLiteralFeature); + profileRequires(loc, ECoreProfile | ECompatibilityProfile, 440, E_GL_ARB_enhanced_layouts, nonLiteralFeature); + } + } else { + // grammar should have give out the error message + value = 0; + nonLiteral = true; + } + + if (value < 0) { + error(loc, "cannot be negative", feature, ""); + return; + } + + std::transform(id.begin(), id.end(), id.begin(), ::tolower); + + if (id == "offset") { + // "offset" can be for either + // - uniform offsets + // - atomic_uint offsets + const char* feature = "offset"; + if (spvVersion.spv == 0) { + requireProfile(loc, EEsProfile | ECoreProfile | ECompatibilityProfile, feature); + const char* exts[2] = { E_GL_ARB_enhanced_layouts, E_GL_ARB_shader_atomic_counters }; + profileRequires(loc, ECoreProfile | ECompatibilityProfile, 420, 2, exts, feature); + profileRequires(loc, EEsProfile, 310, nullptr, feature); + } + publicType.qualifier.layoutOffset = value; + publicType.qualifier.explicitOffset = true; + if (nonLiteral) + error(loc, "needs a literal integer", "offset", ""); + return; + } else if (id == "align") { + const char* feature = "uniform buffer-member align"; + if (spvVersion.spv == 0) { + requireProfile(loc, ECoreProfile | ECompatibilityProfile, feature); + profileRequires(loc, ECoreProfile | ECompatibilityProfile, 440, E_GL_ARB_enhanced_layouts, feature); + } + // "The specified alignment must be a power of 2, or a compile-time error results." + if (! IsPow2(value)) + error(loc, "must be a power of 2", "align", ""); + else + publicType.qualifier.layoutAlign = value; + if (nonLiteral) + error(loc, "needs a literal integer", "align", ""); + return; + } else if (id == "location") { + profileRequires(loc, EEsProfile, 300, nullptr, "location"); + const char* exts[2] = { E_GL_ARB_separate_shader_objects, E_GL_ARB_explicit_attrib_location }; + // GL_ARB_explicit_uniform_location requires 330 or GL_ARB_explicit_attrib_location we do not need to add it here + profileRequires(loc, ~EEsProfile, 330, 2, exts, "location"); + if ((unsigned int)value >= TQualifier::layoutLocationEnd) + error(loc, "location is too large", id.c_str(), ""); + else + publicType.qualifier.layoutLocation = value; + if (nonLiteral) + error(loc, "needs a literal integer", "location", ""); + return; + } else if (id == "set") { + if ((unsigned int)value >= TQualifier::layoutSetEnd) + error(loc, "set is too large", id.c_str(), ""); + else + publicType.qualifier.layoutSet = value; + if (value != 0) + requireVulkan(loc, "descriptor set"); + if (nonLiteral) + error(loc, "needs a literal integer", "set", ""); + return; + } else if (id == "binding") { +#ifndef GLSLANG_WEB + profileRequires(loc, ~EEsProfile, 420, E_GL_ARB_shading_language_420pack, "binding"); + profileRequires(loc, EEsProfile, 310, nullptr, "binding"); +#endif + if ((unsigned int)value >= TQualifier::layoutBindingEnd) + error(loc, "binding is too large", id.c_str(), ""); + else + publicType.qualifier.layoutBinding = value; + if (nonLiteral) + error(loc, "needs a literal integer", "binding", ""); + return; + } + if (id == "constant_id") { + requireSpv(loc, "constant_id"); + if (value >= (int)TQualifier::layoutSpecConstantIdEnd) { + error(loc, "specialization-constant id is too large", id.c_str(), ""); + } else { + publicType.qualifier.layoutSpecConstantId = value; + publicType.qualifier.specConstant = true; + if (! intermediate.addUsedConstantId(value)) + error(loc, "specialization-constant id already used", id.c_str(), ""); + } + if (nonLiteral) + error(loc, "needs a literal integer", "constant_id", ""); + return; + } +#ifndef GLSLANG_WEB + if (id == "component") { + requireProfile(loc, ECoreProfile | ECompatibilityProfile, "component"); + profileRequires(loc, ECoreProfile | ECompatibilityProfile, 440, E_GL_ARB_enhanced_layouts, "component"); + if ((unsigned)value >= TQualifier::layoutComponentEnd) + error(loc, "component is too large", id.c_str(), ""); + else + publicType.qualifier.layoutComponent = value; + if (nonLiteral) + error(loc, "needs a literal integer", "component", ""); + return; + } + if (id.compare(0, 4, "xfb_") == 0) { + // "Any shader making any static use (after preprocessing) of any of these + // *xfb_* qualifiers will cause the shader to be in a transform feedback + // capturing mode and hence responsible for describing the transform feedback + // setup." + intermediate.setXfbMode(); + const char* feature = "transform feedback qualifier"; + requireStage(loc, (EShLanguageMask)(EShLangVertexMask | EShLangGeometryMask | EShLangTessControlMask | EShLangTessEvaluationMask), feature); + requireProfile(loc, ECoreProfile | ECompatibilityProfile, feature); + profileRequires(loc, ECoreProfile | ECompatibilityProfile, 440, E_GL_ARB_enhanced_layouts, feature); + if (id == "xfb_buffer") { + // "It is a compile-time error to specify an *xfb_buffer* that is greater than + // the implementation-dependent constant gl_MaxTransformFeedbackBuffers." + if (value >= resources.maxTransformFeedbackBuffers) + error(loc, "buffer is too large:", id.c_str(), "gl_MaxTransformFeedbackBuffers is %d", resources.maxTransformFeedbackBuffers); + if (value >= (int)TQualifier::layoutXfbBufferEnd) + error(loc, "buffer is too large:", id.c_str(), "internal max is %d", TQualifier::layoutXfbBufferEnd-1); + else + publicType.qualifier.layoutXfbBuffer = value; + if (nonLiteral) + error(loc, "needs a literal integer", "xfb_buffer", ""); + return; + } else if (id == "xfb_offset") { + if (value >= (int)TQualifier::layoutXfbOffsetEnd) + error(loc, "offset is too large:", id.c_str(), "internal max is %d", TQualifier::layoutXfbOffsetEnd-1); + else + publicType.qualifier.layoutXfbOffset = value; + if (nonLiteral) + error(loc, "needs a literal integer", "xfb_offset", ""); + return; + } else if (id == "xfb_stride") { + // "The resulting stride (implicit or explicit), when divided by 4, must be less than or equal to the + // implementation-dependent constant gl_MaxTransformFeedbackInterleavedComponents." + if (value > 4 * resources.maxTransformFeedbackInterleavedComponents) { + error(loc, "1/4 stride is too large:", id.c_str(), "gl_MaxTransformFeedbackInterleavedComponents is %d", + resources.maxTransformFeedbackInterleavedComponents); + } + if (value >= (int)TQualifier::layoutXfbStrideEnd) + error(loc, "stride is too large:", id.c_str(), "internal max is %d", TQualifier::layoutXfbStrideEnd-1); + else + publicType.qualifier.layoutXfbStride = value; + if (nonLiteral) + error(loc, "needs a literal integer", "xfb_stride", ""); + return; + } + } + if (id == "input_attachment_index") { + requireVulkan(loc, "input_attachment_index"); + if (value >= (int)TQualifier::layoutAttachmentEnd) + error(loc, "attachment index is too large", id.c_str(), ""); + else + publicType.qualifier.layoutAttachment = value; + if (nonLiteral) + error(loc, "needs a literal integer", "input_attachment_index", ""); + return; + } + if (id == "num_views") { + requireExtensions(loc, Num_OVR_multiview_EXTs, OVR_multiview_EXTs, "num_views"); + publicType.shaderQualifiers.numViews = value; + if (nonLiteral) + error(loc, "needs a literal integer", "num_views", ""); + return; + } + if (language == EShLangVertex || + language == EShLangTessControl || + language == EShLangTessEvaluation || + language == EShLangGeometry) { + if (id == "secondary_view_offset") { + requireExtensions(loc, 1, &E_GL_NV_stereo_view_rendering, "stereo view rendering"); + publicType.qualifier.layoutSecondaryViewportRelativeOffset = value; + if (nonLiteral) + error(loc, "needs a literal integer", "secondary_view_offset", ""); + return; + } + } + + if (id == "buffer_reference_align") { + requireExtensions(loc, 1, &E_GL_EXT_buffer_reference, "buffer_reference_align"); + if (! IsPow2(value)) + error(loc, "must be a power of 2", "buffer_reference_align", ""); + else + publicType.qualifier.layoutBufferReferenceAlign = (unsigned int)std::log2(value); + if (nonLiteral) + error(loc, "needs a literal integer", "buffer_reference_align", ""); + return; + } +#endif + + switch (language) { +#ifndef GLSLANG_WEB + case EShLangTessControl: + if (id == "vertices") { + if (value == 0) + error(loc, "must be greater than 0", "vertices", ""); + else + publicType.shaderQualifiers.vertices = value; + if (nonLiteral) + error(loc, "needs a literal integer", "vertices", ""); + return; + } + break; + + case EShLangGeometry: + if (id == "invocations") { + profileRequires(loc, ECompatibilityProfile | ECoreProfile, 400, nullptr, "invocations"); + if (value == 0) + error(loc, "must be at least 1", "invocations", ""); + else + publicType.shaderQualifiers.invocations = value; + if (nonLiteral) + error(loc, "needs a literal integer", "invocations", ""); + return; + } + if (id == "max_vertices") { + publicType.shaderQualifiers.vertices = value; + if (value > resources.maxGeometryOutputVertices) + error(loc, "too large, must be less than gl_MaxGeometryOutputVertices", "max_vertices", ""); + if (nonLiteral) + error(loc, "needs a literal integer", "max_vertices", ""); + return; + } + if (id == "stream") { + requireProfile(loc, ~EEsProfile, "selecting output stream"); + publicType.qualifier.layoutStream = value; + if (value > 0) + intermediate.setMultiStream(); + if (nonLiteral) + error(loc, "needs a literal integer", "stream", ""); + return; + } + break; + + case EShLangFragment: + if (id == "index") { + requireProfile(loc, ECompatibilityProfile | ECoreProfile | EEsProfile, "index layout qualifier on fragment output"); + const char* exts[2] = { E_GL_ARB_separate_shader_objects, E_GL_ARB_explicit_attrib_location }; + profileRequires(loc, ECompatibilityProfile | ECoreProfile, 330, 2, exts, "index layout qualifier on fragment output"); + profileRequires(loc, EEsProfile ,310, E_GL_EXT_blend_func_extended, "index layout qualifier on fragment output"); + // "It is also a compile-time error if a fragment shader sets a layout index to less than 0 or greater than 1." + if (value < 0 || value > 1) { + value = 0; + error(loc, "value must be 0 or 1", "index", ""); + } + + publicType.qualifier.layoutIndex = value; + if (nonLiteral) + error(loc, "needs a literal integer", "index", ""); + return; + } + break; + + case EShLangMeshNV: + if (id == "max_vertices") { + requireExtensions(loc, 1, &E_GL_NV_mesh_shader, "max_vertices"); + publicType.shaderQualifiers.vertices = value; + if (value > resources.maxMeshOutputVerticesNV) + error(loc, "too large, must be less than gl_MaxMeshOutputVerticesNV", "max_vertices", ""); + if (nonLiteral) + error(loc, "needs a literal integer", "max_vertices", ""); + return; + } + if (id == "max_primitives") { + requireExtensions(loc, 1, &E_GL_NV_mesh_shader, "max_primitives"); + publicType.shaderQualifiers.primitives = value; + if (value > resources.maxMeshOutputPrimitivesNV) + error(loc, "too large, must be less than gl_MaxMeshOutputPrimitivesNV", "max_primitives", ""); + if (nonLiteral) + error(loc, "needs a literal integer", "max_primitives", ""); + return; + } + // Fall through + + case EShLangTaskNV: + // Fall through +#endif + case EShLangCompute: + if (id.compare(0, 11, "local_size_") == 0) { +#ifndef GLSLANG_WEB + if (language == EShLangMeshNV || language == EShLangTaskNV) { + requireExtensions(loc, 1, &E_GL_NV_mesh_shader, "gl_WorkGroupSize"); + } else { + profileRequires(loc, EEsProfile, 310, 0, "gl_WorkGroupSize"); + profileRequires(loc, ~EEsProfile, 430, E_GL_ARB_compute_shader, "gl_WorkGroupSize"); + } +#endif + if (nonLiteral) + error(loc, "needs a literal integer", "local_size", ""); + if (id.size() == 12 && value == 0) { + error(loc, "must be at least 1", id.c_str(), ""); + return; + } + if (id == "local_size_x") { + publicType.shaderQualifiers.localSize[0] = value; + publicType.shaderQualifiers.localSizeNotDefault[0] = true; + return; + } + if (id == "local_size_y") { + publicType.shaderQualifiers.localSize[1] = value; + publicType.shaderQualifiers.localSizeNotDefault[1] = true; + return; + } + if (id == "local_size_z") { + publicType.shaderQualifiers.localSize[2] = value; + publicType.shaderQualifiers.localSizeNotDefault[2] = true; + return; + } + if (spvVersion.spv != 0) { + if (id == "local_size_x_id") { + publicType.shaderQualifiers.localSizeSpecId[0] = value; + return; + } + if (id == "local_size_y_id") { + publicType.shaderQualifiers.localSizeSpecId[1] = value; + return; + } + if (id == "local_size_z_id") { + publicType.shaderQualifiers.localSizeSpecId[2] = value; + return; + } + } + } + break; + + default: + break; + } + + error(loc, "there is no such layout identifier for this stage taking an assigned value", id.c_str(), ""); +} + +// Merge any layout qualifier information from src into dst, leaving everything else in dst alone +// +// "More than one layout qualifier may appear in a single declaration. +// Additionally, the same layout-qualifier-name can occur multiple times +// within a layout qualifier or across multiple layout qualifiers in the +// same declaration. When the same layout-qualifier-name occurs +// multiple times, in a single declaration, the last occurrence overrides +// the former occurrence(s). Further, if such a layout-qualifier-name +// will effect subsequent declarations or other observable behavior, it +// is only the last occurrence that will have any effect, behaving as if +// the earlier occurrence(s) within the declaration are not present. +// This is also true for overriding layout-qualifier-names, where one +// overrides the other (e.g., row_major vs. column_major); only the last +// occurrence has any effect." +void TParseContext::mergeObjectLayoutQualifiers(TQualifier& dst, const TQualifier& src, bool inheritOnly) +{ + if (src.hasMatrix()) + dst.layoutMatrix = src.layoutMatrix; + if (src.hasPacking()) + dst.layoutPacking = src.layoutPacking; + +#ifndef GLSLANG_WEB + if (src.hasStream()) + dst.layoutStream = src.layoutStream; + if (src.hasFormat()) + dst.layoutFormat = src.layoutFormat; + if (src.hasXfbBuffer()) + dst.layoutXfbBuffer = src.layoutXfbBuffer; + if (src.hasBufferReferenceAlign()) + dst.layoutBufferReferenceAlign = src.layoutBufferReferenceAlign; +#endif + + if (src.hasAlign()) + dst.layoutAlign = src.layoutAlign; + + if (! inheritOnly) { + if (src.hasLocation()) + dst.layoutLocation = src.layoutLocation; + if (src.hasOffset()) + dst.layoutOffset = src.layoutOffset; + if (src.hasSet()) + dst.layoutSet = src.layoutSet; + if (src.layoutBinding != TQualifier::layoutBindingEnd) + dst.layoutBinding = src.layoutBinding; + + if (src.hasSpecConstantId()) + dst.layoutSpecConstantId = src.layoutSpecConstantId; + +#ifndef GLSLANG_WEB + if (src.hasComponent()) + dst.layoutComponent = src.layoutComponent; + if (src.hasIndex()) + dst.layoutIndex = src.layoutIndex; + if (src.hasXfbStride()) + dst.layoutXfbStride = src.layoutXfbStride; + if (src.hasXfbOffset()) + dst.layoutXfbOffset = src.layoutXfbOffset; + if (src.hasAttachment()) + dst.layoutAttachment = src.layoutAttachment; + if (src.layoutPushConstant) + dst.layoutPushConstant = true; + + if (src.layoutBufferReference) + dst.layoutBufferReference = true; + + if (src.layoutPassthrough) + dst.layoutPassthrough = true; + if (src.layoutViewportRelative) + dst.layoutViewportRelative = true; + if (src.layoutSecondaryViewportRelativeOffset != -2048) + dst.layoutSecondaryViewportRelativeOffset = src.layoutSecondaryViewportRelativeOffset; + if (src.layoutShaderRecord) + dst.layoutShaderRecord = true; + if (src.pervertexNV) + dst.pervertexNV = true; +#endif + } +} + +// Do error layout error checking given a full variable/block declaration. +void TParseContext::layoutObjectCheck(const TSourceLoc& loc, const TSymbol& symbol) +{ + const TType& type = symbol.getType(); + const TQualifier& qualifier = type.getQualifier(); + + // first, cross check WRT to just the type + layoutTypeCheck(loc, type); + + // now, any remaining error checking based on the object itself + + if (qualifier.hasAnyLocation()) { + switch (qualifier.storage) { + case EvqUniform: + case EvqBuffer: + if (symbol.getAsVariable() == nullptr) + error(loc, "can only be used on variable declaration", "location", ""); + break; + default: + break; + } + } + + // user-variable location check, which are required for SPIR-V in/out: + // - variables have it directly, + // - blocks have it on each member (already enforced), so check first one + if (spvVersion.spv > 0 && !parsingBuiltins && qualifier.builtIn == EbvNone && + !qualifier.hasLocation() && !intermediate.getAutoMapLocations()) { + + switch (qualifier.storage) { + case EvqVaryingIn: + case EvqVaryingOut: + if (!type.getQualifier().isTaskMemory() && + (type.getBasicType() != EbtBlock || + (!(*type.getStruct())[0].type->getQualifier().hasLocation() && + (*type.getStruct())[0].type->getQualifier().builtIn == EbvNone))) + error(loc, "SPIR-V requires location for user input/output", "location", ""); + break; + default: + break; + } + } + + // Check packing and matrix + if (qualifier.hasUniformLayout()) { + switch (qualifier.storage) { + case EvqUniform: + case EvqBuffer: + if (type.getBasicType() != EbtBlock) { + if (qualifier.hasMatrix()) + error(loc, "cannot specify matrix layout on a variable declaration", "layout", ""); + if (qualifier.hasPacking()) + error(loc, "cannot specify packing on a variable declaration", "layout", ""); + // "The offset qualifier can only be used on block members of blocks..." + if (qualifier.hasOffset() && !type.isAtomic()) + error(loc, "cannot specify on a variable declaration", "offset", ""); + // "The align qualifier can only be used on blocks or block members..." + if (qualifier.hasAlign()) + error(loc, "cannot specify on a variable declaration", "align", ""); + if (qualifier.isPushConstant()) + error(loc, "can only specify on a uniform block", "push_constant", ""); + if (qualifier.isShaderRecord()) + error(loc, "can only specify on a buffer block", "shaderRecordNV", ""); + } + break; + default: + // these were already filtered by layoutTypeCheck() (or its callees) + break; + } + } +} + +// "For some blocks declared as arrays, the location can only be applied at the block level: +// When a block is declared as an array where additional locations are needed for each member +// for each block array element, it is a compile-time error to specify locations on the block +// members. That is, when locations would be under specified by applying them on block members, +// they are not allowed on block members. For arrayed interfaces (those generally having an +// extra level of arrayness due to interface expansion), the outer array is stripped before +// applying this rule." +void TParseContext::layoutMemberLocationArrayCheck(const TSourceLoc& loc, bool memberWithLocation, + TArraySizes* arraySizes) +{ + if (memberWithLocation && arraySizes != nullptr) { + if (arraySizes->getNumDims() > (currentBlockQualifier.isArrayedIo(language) ? 1 : 0)) + error(loc, "cannot use in a block array where new locations are needed for each block element", + "location", ""); + } +} + +// Do layout error checking with respect to a type. +void TParseContext::layoutTypeCheck(const TSourceLoc& loc, const TType& type) +{ + const TQualifier& qualifier = type.getQualifier(); + + // first, intra-layout qualifier-only error checking + layoutQualifierCheck(loc, qualifier); + + // now, error checking combining type and qualifier + + if (qualifier.hasAnyLocation()) { + if (qualifier.hasLocation()) { + if (qualifier.storage == EvqVaryingOut && language == EShLangFragment) { + if (qualifier.layoutLocation >= (unsigned int)resources.maxDrawBuffers) + error(loc, "too large for fragment output", "location", ""); + } + } + if (qualifier.hasComponent()) { + // "It is a compile-time error if this sequence of components gets larger than 3." + if (qualifier.layoutComponent + type.getVectorSize() * (type.getBasicType() == EbtDouble ? 2 : 1) > 4) + error(loc, "type overflows the available 4 components", "component", ""); + + // "It is a compile-time error to apply the component qualifier to a matrix, a structure, a block, or an array containing any of these." + if (type.isMatrix() || type.getBasicType() == EbtBlock || type.getBasicType() == EbtStruct) + error(loc, "cannot apply to a matrix, structure, or block", "component", ""); + + // " It is a compile-time error to use component 1 or 3 as the beginning of a double or dvec2." + if (type.getBasicType() == EbtDouble) + if (qualifier.layoutComponent & 1) + error(loc, "doubles cannot start on an odd-numbered component", "component", ""); + } + + switch (qualifier.storage) { + case EvqVaryingIn: + case EvqVaryingOut: + if (type.getBasicType() == EbtBlock) + profileRequires(loc, ECoreProfile | ECompatibilityProfile, 440, E_GL_ARB_enhanced_layouts, "location qualifier on in/out block"); + if (type.getQualifier().isTaskMemory()) + error(loc, "cannot apply to taskNV in/out blocks", "location", ""); + break; + case EvqUniform: + case EvqBuffer: + if (type.getBasicType() == EbtBlock) + error(loc, "cannot apply to uniform or buffer block", "location", ""); + break; +#ifndef GLSLANG_WEB + case EvqPayload: + case EvqPayloadIn: + case EvqHitAttr: + case EvqCallableData: + case EvqCallableDataIn: + break; +#endif + default: + error(loc, "can only apply to uniform, buffer, in, or out storage qualifiers", "location", ""); + break; + } + + bool typeCollision; + int repeated = intermediate.addUsedLocation(qualifier, type, typeCollision); + if (repeated >= 0 && ! typeCollision) + error(loc, "overlapping use of location", "location", "%d", repeated); + // "fragment-shader outputs ... if two variables are placed within the same + // location, they must have the same underlying type (floating-point or integer)" + if (typeCollision && language == EShLangFragment && qualifier.isPipeOutput()) + error(loc, "fragment outputs sharing the same location must be the same basic type", "location", "%d", repeated); + } + +#ifndef GLSLANG_WEB + if (qualifier.hasXfbOffset() && qualifier.hasXfbBuffer()) { + int repeated = intermediate.addXfbBufferOffset(type); + if (repeated >= 0) + error(loc, "overlapping offsets at", "xfb_offset", "offset %d in buffer %d", repeated, qualifier.layoutXfbBuffer); + + // "The offset must be a multiple of the size of the first component of the first + // qualified variable or block member, or a compile-time error results. Further, if applied to an aggregate + // containing a double or 64-bit integer, the offset must also be a multiple of 8..." + if ((type.containsBasicType(EbtDouble) || type.containsBasicType(EbtInt64) || type.containsBasicType(EbtUint64)) && + ! IsMultipleOfPow2(qualifier.layoutXfbOffset, 8)) + error(loc, "type contains double or 64-bit integer; xfb_offset must be a multiple of 8", "xfb_offset", ""); + else if ((type.containsBasicType(EbtBool) || type.containsBasicType(EbtFloat) || + type.containsBasicType(EbtInt) || type.containsBasicType(EbtUint)) && + ! IsMultipleOfPow2(qualifier.layoutXfbOffset, 4)) + error(loc, "must be a multiple of size of first component", "xfb_offset", ""); + // ..., if applied to an aggregate containing a half float or 16-bit integer, the offset must also be a multiple of 2..." + else if ((type.contains16BitFloat() || type.containsBasicType(EbtInt16) || type.containsBasicType(EbtUint16)) && + !IsMultipleOfPow2(qualifier.layoutXfbOffset, 2)) + error(loc, "type contains half float or 16-bit integer; xfb_offset must be a multiple of 2", "xfb_offset", ""); + } + if (qualifier.hasXfbStride() && qualifier.hasXfbBuffer()) { + if (! intermediate.setXfbBufferStride(qualifier.layoutXfbBuffer, qualifier.layoutXfbStride)) + error(loc, "all stride settings must match for xfb buffer", "xfb_stride", "%d", qualifier.layoutXfbBuffer); + } +#endif + + if (qualifier.hasBinding()) { + // Binding checking, from the spec: + // + // "If the binding point for any uniform or shader storage block instance is less than zero, or greater than or + // equal to the implementation-dependent maximum number of uniform buffer bindings, a compile-time + // error will occur. When the binding identifier is used with a uniform or shader storage block instanced as + // an array of size N, all elements of the array from binding through binding + N - 1 must be within this + // range." + // + if (! type.isOpaque() && type.getBasicType() != EbtBlock) + error(loc, "requires block, or sampler/image, or atomic-counter type", "binding", ""); + if (type.getBasicType() == EbtSampler) { + int lastBinding = qualifier.layoutBinding; + if (type.isArray()) { + if (spvVersion.vulkan > 0) + lastBinding += 1; + else { + if (type.isSizedArray()) + lastBinding += type.getCumulativeArraySize(); + else { + lastBinding += 1; +#ifndef GLSLANG_WEB + if (spvVersion.vulkan == 0) + warn(loc, "assuming binding count of one for compile-time checking of binding numbers for unsized array", "[]", ""); +#endif + } + } + } +#ifndef GLSLANG_WEB + if (spvVersion.vulkan == 0 && lastBinding >= resources.maxCombinedTextureImageUnits) + error(loc, "sampler binding not less than gl_MaxCombinedTextureImageUnits", "binding", type.isArray() ? "(using array)" : ""); +#endif + } + if (type.isAtomic()) { + if (qualifier.layoutBinding >= (unsigned int)resources.maxAtomicCounterBindings) { + error(loc, "atomic_uint binding is too large; see gl_MaxAtomicCounterBindings", "binding", ""); + return; + } + } + } else if (!intermediate.getAutoMapBindings()) { + // some types require bindings + + // atomic_uint + if (type.isAtomic()) + error(loc, "layout(binding=X) is required", "atomic_uint", ""); + + // SPIR-V + if (spvVersion.spv > 0) { + if (qualifier.isUniformOrBuffer()) { + if (type.getBasicType() == EbtBlock && !qualifier.isPushConstant() && + !qualifier.isShaderRecord() && + !qualifier.hasAttachment() && + !qualifier.hasBufferReference()) + error(loc, "uniform/buffer blocks require layout(binding=X)", "binding", ""); + else if (spvVersion.vulkan > 0 && type.getBasicType() == EbtSampler) + error(loc, "sampler/texture/image requires layout(binding=X)", "binding", ""); + } + } + } + + // some things can't have arrays of arrays + if (type.isArrayOfArrays()) { + if (spvVersion.vulkan > 0) { + if (type.isOpaque() || (type.getQualifier().isUniformOrBuffer() && type.getBasicType() == EbtBlock)) + warn(loc, "Generating SPIR-V array-of-arrays, but Vulkan only supports single array level for this resource", "[][]", ""); + } + } + + // "The offset qualifier can only be used on block members of blocks..." + if (qualifier.hasOffset()) { + if (type.getBasicType() == EbtBlock) + error(loc, "only applies to block members, not blocks", "offset", ""); + } + + // Image format + if (qualifier.hasFormat()) { + if (! type.isImage()) + error(loc, "only apply to images", TQualifier::getLayoutFormatString(qualifier.getFormat()), ""); + else { + if (type.getSampler().type == EbtFloat && qualifier.getFormat() > ElfFloatGuard) + error(loc, "does not apply to floating point images", TQualifier::getLayoutFormatString(qualifier.getFormat()), ""); + if (type.getSampler().type == EbtInt && (qualifier.getFormat() < ElfFloatGuard || qualifier.getFormat() > ElfIntGuard)) + error(loc, "does not apply to signed integer images", TQualifier::getLayoutFormatString(qualifier.getFormat()), ""); + if (type.getSampler().type == EbtUint && qualifier.getFormat() < ElfIntGuard) + error(loc, "does not apply to unsigned integer images", TQualifier::getLayoutFormatString(qualifier.getFormat()), ""); + + if (isEsProfile()) { + // "Except for image variables qualified with the format qualifiers r32f, r32i, and r32ui, image variables must + // specify either memory qualifier readonly or the memory qualifier writeonly." + if (! (qualifier.getFormat() == ElfR32f || qualifier.getFormat() == ElfR32i || qualifier.getFormat() == ElfR32ui)) { + if (! qualifier.isReadOnly() && ! qualifier.isWriteOnly()) + error(loc, "format requires readonly or writeonly memory qualifier", TQualifier::getLayoutFormatString(qualifier.getFormat()), ""); + } + } + } + } else if (type.isImage() && ! qualifier.isWriteOnly()) { + const char *explanation = "image variables not declared 'writeonly' and without a format layout qualifier"; + requireProfile(loc, ECoreProfile | ECompatibilityProfile, explanation); + profileRequires(loc, ECoreProfile | ECompatibilityProfile, 0, E_GL_EXT_shader_image_load_formatted, explanation); + } + + if (qualifier.isPushConstant() && type.getBasicType() != EbtBlock) + error(loc, "can only be used with a block", "push_constant", ""); + + if (qualifier.hasBufferReference() && type.getBasicType() != EbtBlock) + error(loc, "can only be used with a block", "buffer_reference", ""); + + if (qualifier.isShaderRecord() && type.getBasicType() != EbtBlock) + error(loc, "can only be used with a block", "shaderRecordNV", ""); + + // input attachment + if (type.isSubpass()) { + if (! qualifier.hasAttachment()) + error(loc, "requires an input_attachment_index layout qualifier", "subpass", ""); + } else { + if (qualifier.hasAttachment()) + error(loc, "can only be used with a subpass", "input_attachment_index", ""); + } + + // specialization-constant id + if (qualifier.hasSpecConstantId()) { + if (type.getQualifier().storage != EvqConst) + error(loc, "can only be applied to 'const'-qualified scalar", "constant_id", ""); + if (! type.isScalar()) + error(loc, "can only be applied to a scalar", "constant_id", ""); + switch (type.getBasicType()) + { + case EbtInt8: + case EbtUint8: + case EbtInt16: + case EbtUint16: + case EbtInt: + case EbtUint: + case EbtInt64: + case EbtUint64: + case EbtBool: + case EbtFloat: + case EbtDouble: + case EbtFloat16: + break; + default: + error(loc, "cannot be applied to this type", "constant_id", ""); + break; + } + } +} + +// Do layout error checking that can be done within a layout qualifier proper, not needing to know +// if there are blocks, atomic counters, variables, etc. +void TParseContext::layoutQualifierCheck(const TSourceLoc& loc, const TQualifier& qualifier) +{ + if (qualifier.storage == EvqShared && qualifier.hasLayout()) + error(loc, "cannot apply layout qualifiers to a shared variable", "shared", ""); + + // "It is a compile-time error to use *component* without also specifying the location qualifier (order does not matter)." + if (qualifier.hasComponent() && ! qualifier.hasLocation()) + error(loc, "must specify 'location' to use 'component'", "component", ""); + + if (qualifier.hasAnyLocation()) { + + // "As with input layout qualifiers, all shaders except compute shaders + // allow *location* layout qualifiers on output variable declarations, + // output block declarations, and output block member declarations." + + switch (qualifier.storage) { +#ifndef GLSLANG_WEB + case EvqVaryingIn: + { + const char* feature = "location qualifier on input"; + if (isEsProfile() && version < 310) + requireStage(loc, EShLangVertex, feature); + else + requireStage(loc, (EShLanguageMask)~EShLangComputeMask, feature); + if (language == EShLangVertex) { + const char* exts[2] = { E_GL_ARB_separate_shader_objects, E_GL_ARB_explicit_attrib_location }; + profileRequires(loc, ~EEsProfile, 330, 2, exts, feature); + profileRequires(loc, EEsProfile, 300, nullptr, feature); + } else { + profileRequires(loc, ~EEsProfile, 410, E_GL_ARB_separate_shader_objects, feature); + profileRequires(loc, EEsProfile, 310, nullptr, feature); + } + break; + } + case EvqVaryingOut: + { + const char* feature = "location qualifier on output"; + if (isEsProfile() && version < 310) + requireStage(loc, EShLangFragment, feature); + else + requireStage(loc, (EShLanguageMask)~EShLangComputeMask, feature); + if (language == EShLangFragment) { + const char* exts[2] = { E_GL_ARB_separate_shader_objects, E_GL_ARB_explicit_attrib_location }; + profileRequires(loc, ~EEsProfile, 330, 2, exts, feature); + profileRequires(loc, EEsProfile, 300, nullptr, feature); + } else { + profileRequires(loc, ~EEsProfile, 410, E_GL_ARB_separate_shader_objects, feature); + profileRequires(loc, EEsProfile, 310, nullptr, feature); + } + break; + } +#endif + case EvqUniform: + case EvqBuffer: + { + const char* feature = "location qualifier on uniform or buffer"; + requireProfile(loc, EEsProfile | ECoreProfile | ECompatibilityProfile | ENoProfile, feature); + profileRequires(loc, ~EEsProfile, 330, E_GL_ARB_explicit_attrib_location, feature); + profileRequires(loc, ~EEsProfile, 430, E_GL_ARB_explicit_uniform_location, feature); + profileRequires(loc, EEsProfile, 310, nullptr, feature); + break; + } + default: + break; + } + if (qualifier.hasIndex()) { + if (qualifier.storage != EvqVaryingOut) + error(loc, "can only be used on an output", "index", ""); + if (! qualifier.hasLocation()) + error(loc, "can only be used with an explicit location", "index", ""); + } + } + + if (qualifier.hasBinding()) { + if (! qualifier.isUniformOrBuffer() && !qualifier.isTaskMemory()) + error(loc, "requires uniform or buffer storage qualifier", "binding", ""); + } + if (qualifier.hasStream()) { + if (!qualifier.isPipeOutput()) + error(loc, "can only be used on an output", "stream", ""); + } + if (qualifier.hasXfb()) { + if (!qualifier.isPipeOutput()) + error(loc, "can only be used on an output", "xfb layout qualifier", ""); + } + if (qualifier.hasUniformLayout()) { + if (! qualifier.isUniformOrBuffer() && !qualifier.isTaskMemory()) { + if (qualifier.hasMatrix() || qualifier.hasPacking()) + error(loc, "matrix or packing qualifiers can only be used on a uniform or buffer", "layout", ""); + if (qualifier.hasOffset() || qualifier.hasAlign()) + error(loc, "offset/align can only be used on a uniform or buffer", "layout", ""); + } + } + if (qualifier.isPushConstant()) { + if (qualifier.storage != EvqUniform) + error(loc, "can only be used with a uniform", "push_constant", ""); + if (qualifier.hasSet()) + error(loc, "cannot be used with push_constant", "set", ""); + } + if (qualifier.hasBufferReference()) { + if (qualifier.storage != EvqBuffer) + error(loc, "can only be used with buffer", "buffer_reference", ""); + } + if (qualifier.isShaderRecord()) { + if (qualifier.storage != EvqBuffer) + error(loc, "can only be used with a buffer", "shaderRecordNV", ""); + if (qualifier.hasBinding()) + error(loc, "cannot be used with shaderRecordNV", "binding", ""); + if (qualifier.hasSet()) + error(loc, "cannot be used with shaderRecordNV", "set", ""); + + } + if (qualifier.storage == EvqHitAttr && qualifier.hasLayout()) { + error(loc, "cannot apply layout qualifiers to hitAttributeNV variable", "hitAttributeNV", ""); + } +} + +// For places that can't have shader-level layout qualifiers +void TParseContext::checkNoShaderLayouts(const TSourceLoc& loc, const TShaderQualifiers& shaderQualifiers) +{ +#ifndef GLSLANG_WEB + const char* message = "can only apply to a standalone qualifier"; + + if (shaderQualifiers.geometry != ElgNone) + error(loc, message, TQualifier::getGeometryString(shaderQualifiers.geometry), ""); + if (shaderQualifiers.spacing != EvsNone) + error(loc, message, TQualifier::getVertexSpacingString(shaderQualifiers.spacing), ""); + if (shaderQualifiers.order != EvoNone) + error(loc, message, TQualifier::getVertexOrderString(shaderQualifiers.order), ""); + if (shaderQualifiers.pointMode) + error(loc, message, "point_mode", ""); + if (shaderQualifiers.invocations != TQualifier::layoutNotSet) + error(loc, message, "invocations", ""); + for (int i = 0; i < 3; ++i) { + if (shaderQualifiers.localSize[i] > 1) + error(loc, message, "local_size", ""); + if (shaderQualifiers.localSizeSpecId[i] != TQualifier::layoutNotSet) + error(loc, message, "local_size id", ""); + } + if (shaderQualifiers.vertices != TQualifier::layoutNotSet) { + if (language == EShLangGeometry || language == EShLangMeshNV) + error(loc, message, "max_vertices", ""); + else if (language == EShLangTessControl) + error(loc, message, "vertices", ""); + else + assert(0); + } + if (shaderQualifiers.earlyFragmentTests) + error(loc, message, "early_fragment_tests", ""); + if (shaderQualifiers.postDepthCoverage) + error(loc, message, "post_depth_coverage", ""); + if (shaderQualifiers.primitives != TQualifier::layoutNotSet) { + if (language == EShLangMeshNV) + error(loc, message, "max_primitives", ""); + else + assert(0); + } + if (shaderQualifiers.hasBlendEquation()) + error(loc, message, "blend equation", ""); + if (shaderQualifiers.numViews != TQualifier::layoutNotSet) + error(loc, message, "num_views", ""); + if (shaderQualifiers.interlockOrdering != EioNone) + error(loc, message, TQualifier::getInterlockOrderingString(shaderQualifiers.interlockOrdering), ""); + if (shaderQualifiers.layoutPrimitiveCulling) + error(loc, "can only be applied as standalone", "primitive_culling", ""); +#endif +} + +// Correct and/or advance an object's offset layout qualifier. +void TParseContext::fixOffset(const TSourceLoc& loc, TSymbol& symbol) +{ + const TQualifier& qualifier = symbol.getType().getQualifier(); +#ifndef GLSLANG_WEB + if (symbol.getType().isAtomic()) { + if (qualifier.hasBinding() && (int)qualifier.layoutBinding < resources.maxAtomicCounterBindings) { + + // Set the offset + int offset; + if (qualifier.hasOffset()) + offset = qualifier.layoutOffset; + else + offset = atomicUintOffsets[qualifier.layoutBinding]; + + if (offset % 4 != 0) + error(loc, "atomic counters offset should align based on 4:", "offset", "%d", offset); + + symbol.getWritableType().getQualifier().layoutOffset = offset; + + // Check for overlap + int numOffsets = 4; + if (symbol.getType().isArray()) { + if (symbol.getType().isSizedArray() && !symbol.getType().getArraySizes()->isInnerUnsized()) + numOffsets *= symbol.getType().getCumulativeArraySize(); + else { + // "It is a compile-time error to declare an unsized array of atomic_uint." + error(loc, "array must be explicitly sized", "atomic_uint", ""); + } + } + int repeated = intermediate.addUsedOffsets(qualifier.layoutBinding, offset, numOffsets); + if (repeated >= 0) + error(loc, "atomic counters sharing the same offset:", "offset", "%d", repeated); + + // Bump the default offset + atomicUintOffsets[qualifier.layoutBinding] = offset + numOffsets; + } + } +#endif +} + +// +// Look up a function name in the symbol table, and make sure it is a function. +// +// Return the function symbol if found, otherwise nullptr. +// +const TFunction* TParseContext::findFunction(const TSourceLoc& loc, const TFunction& call, bool& builtIn) +{ + if (symbolTable.isFunctionNameVariable(call.getName())) { + error(loc, "can't use function syntax on variable", call.getName().c_str(), ""); + return nullptr; + } + +#ifdef GLSLANG_WEB + return findFunctionExact(loc, call, builtIn); +#endif + + const TFunction* function = nullptr; + + // debugPrintfEXT has var args and is in the symbol table as "debugPrintfEXT()", + // mangled to "debugPrintfEXT(" + if (call.getName() == "debugPrintfEXT") { + TSymbol* symbol = symbolTable.find("debugPrintfEXT(", &builtIn); + if (symbol) + return symbol->getAsFunction(); + } + + bool explicitTypesEnabled = extensionTurnedOn(E_GL_EXT_shader_explicit_arithmetic_types) || + extensionTurnedOn(E_GL_EXT_shader_explicit_arithmetic_types_int8) || + extensionTurnedOn(E_GL_EXT_shader_explicit_arithmetic_types_int16) || + extensionTurnedOn(E_GL_EXT_shader_explicit_arithmetic_types_int32) || + extensionTurnedOn(E_GL_EXT_shader_explicit_arithmetic_types_int64) || + extensionTurnedOn(E_GL_EXT_shader_explicit_arithmetic_types_float16) || + extensionTurnedOn(E_GL_EXT_shader_explicit_arithmetic_types_float32) || + extensionTurnedOn(E_GL_EXT_shader_explicit_arithmetic_types_float64); + + if (isEsProfile()) + function = (extensionTurnedOn(E_GL_EXT_shader_implicit_conversions) && version >= 310) ? + findFunction120(loc, call, builtIn) : findFunctionExact(loc, call, builtIn); + else if (version < 120) + function = findFunctionExact(loc, call, builtIn); + else if (version < 400) + function = extensionTurnedOn(E_GL_ARB_gpu_shader_fp64) ? findFunction400(loc, call, builtIn) : findFunction120(loc, call, builtIn); + else if (explicitTypesEnabled) + function = findFunctionExplicitTypes(loc, call, builtIn); + else + function = findFunction400(loc, call, builtIn); + + return function; +} + +// Function finding algorithm for ES and desktop 110. +const TFunction* TParseContext::findFunctionExact(const TSourceLoc& loc, const TFunction& call, bool& builtIn) +{ + TSymbol* symbol = symbolTable.find(call.getMangledName(), &builtIn); + if (symbol == nullptr) { + error(loc, "no matching overloaded function found", call.getName().c_str(), ""); + + return nullptr; + } + + return symbol->getAsFunction(); +} + +// Function finding algorithm for desktop versions 120 through 330. +const TFunction* TParseContext::findFunction120(const TSourceLoc& loc, const TFunction& call, bool& builtIn) +{ + // first, look for an exact match + TSymbol* symbol = symbolTable.find(call.getMangledName(), &builtIn); + if (symbol) + return symbol->getAsFunction(); + + // exact match not found, look through a list of overloaded functions of the same name + + // "If no exact match is found, then [implicit conversions] will be applied to find a match. Mismatched types + // on input parameters (in or inout or default) must have a conversion from the calling argument type to the + // formal parameter type. Mismatched types on output parameters (out or inout) must have a conversion + // from the formal parameter type to the calling argument type. When argument conversions are used to find + // a match, it is a semantic error if there are multiple ways to apply these conversions to make the call match + // more than one function." + + const TFunction* candidate = nullptr; + TVector candidateList; + symbolTable.findFunctionNameList(call.getMangledName(), candidateList, builtIn); + + for (auto it = candidateList.begin(); it != candidateList.end(); ++it) { + const TFunction& function = *(*it); + + // to even be a potential match, number of arguments has to match + if (call.getParamCount() != function.getParamCount()) + continue; + + bool possibleMatch = true; + for (int i = 0; i < function.getParamCount(); ++i) { + // same types is easy + if (*function[i].type == *call[i].type) + continue; + + // We have a mismatch in type, see if it is implicitly convertible + + if (function[i].type->isArray() || call[i].type->isArray() || + ! function[i].type->sameElementShape(*call[i].type)) + possibleMatch = false; + else { + // do direction-specific checks for conversion of basic type + if (function[i].type->getQualifier().isParamInput()) { + if (! intermediate.canImplicitlyPromote(call[i].type->getBasicType(), function[i].type->getBasicType())) + possibleMatch = false; + } + if (function[i].type->getQualifier().isParamOutput()) { + if (! intermediate.canImplicitlyPromote(function[i].type->getBasicType(), call[i].type->getBasicType())) + possibleMatch = false; + } + } + if (! possibleMatch) + break; + } + if (possibleMatch) { + if (candidate) { + // our second match, meaning ambiguity + error(loc, "ambiguous function signature match: multiple signatures match under implicit type conversion", call.getName().c_str(), ""); + } else + candidate = &function; + } + } + + if (candidate == nullptr) + error(loc, "no matching overloaded function found", call.getName().c_str(), ""); + + return candidate; +} + +// Function finding algorithm for desktop version 400 and above. +// +// "When function calls are resolved, an exact type match for all the arguments +// is sought. If an exact match is found, all other functions are ignored, and +// the exact match is used. If no exact match is found, then the implicit +// conversions in section 4.1.10 Implicit Conversions will be applied to find +// a match. Mismatched types on input parameters (in or inout or default) must +// have a conversion from the calling argument type to the formal parameter type. +// Mismatched types on output parameters (out or inout) must have a conversion +// from the formal parameter type to the calling argument type. +// +// "If implicit conversions can be used to find more than one matching function, +// a single best-matching function is sought. To determine a best match, the +// conversions between calling argument and formal parameter types are compared +// for each function argument and pair of matching functions. After these +// comparisons are performed, each pair of matching functions are compared. +// A function declaration A is considered a better match than function +// declaration B if +// +// * for at least one function argument, the conversion for that argument in A +// is better than the corresponding conversion in B; and +// * there is no function argument for which the conversion in B is better than +// the corresponding conversion in A. +// +// "If a single function declaration is considered a better match than every +// other matching function declaration, it will be used. Otherwise, a +// compile-time semantic error for an ambiguous overloaded function call occurs. +// +// "To determine whether the conversion for a single argument in one match is +// better than that for another match, the following rules are applied, in order: +// +// 1. An exact match is better than a match involving any implicit conversion. +// 2. A match involving an implicit conversion from float to double is better +// than a match involving any other implicit conversion. +// 3. A match involving an implicit conversion from either int or uint to float +// is better than a match involving an implicit conversion from either int +// or uint to double. +// +// "If none of the rules above apply to a particular pair of conversions, neither +// conversion is considered better than the other." +// +const TFunction* TParseContext::findFunction400(const TSourceLoc& loc, const TFunction& call, bool& builtIn) +{ + // first, look for an exact match + TSymbol* symbol = symbolTable.find(call.getMangledName(), &builtIn); + if (symbol) + return symbol->getAsFunction(); + + // no exact match, use the generic selector, parameterized by the GLSL rules + + // create list of candidates to send + TVector candidateList; + symbolTable.findFunctionNameList(call.getMangledName(), candidateList, builtIn); + + // can 'from' convert to 'to'? + const auto convertible = [this,builtIn](const TType& from, const TType& to, TOperator, int) -> bool { + if (from == to) + return true; + if (from.coopMatParameterOK(to)) + return true; + // Allow a sized array to be passed through an unsized array parameter, for coopMatLoad/Store functions + if (builtIn && from.isArray() && to.isUnsizedArray()) { + TType fromElementType(from, 0); + TType toElementType(to, 0); + if (fromElementType == toElementType) + return true; + } + if (from.isArray() || to.isArray() || ! from.sameElementShape(to)) + return false; + if (from.isCoopMat() && to.isCoopMat()) + return from.sameCoopMatBaseType(to); + return intermediate.canImplicitlyPromote(from.getBasicType(), to.getBasicType()); + }; + + // Is 'to2' a better conversion than 'to1'? + // Ties should not be considered as better. + // Assumes 'convertible' already said true. + const auto better = [](const TType& from, const TType& to1, const TType& to2) -> bool { + // 1. exact match + if (from == to2) + return from != to1; + if (from == to1) + return false; + + // 2. float -> double is better + if (from.getBasicType() == EbtFloat) { + if (to2.getBasicType() == EbtDouble && to1.getBasicType() != EbtDouble) + return true; + } + + // 3. -> float is better than -> double + return to2.getBasicType() == EbtFloat && to1.getBasicType() == EbtDouble; + }; + + // for ambiguity reporting + bool tie = false; + + // send to the generic selector + const TFunction* bestMatch = selectFunction(candidateList, call, convertible, better, tie); + + if (bestMatch == nullptr) + error(loc, "no matching overloaded function found", call.getName().c_str(), ""); + else if (tie) + error(loc, "ambiguous best function under implicit type conversion", call.getName().c_str(), ""); + + return bestMatch; +} + +// "To determine whether the conversion for a single argument in one match +// is better than that for another match, the conversion is assigned of the +// three ranks ordered from best to worst: +// 1. Exact match: no conversion. +// 2. Promotion: integral or floating-point promotion. +// 3. Conversion: integral conversion, floating-point conversion, +// floating-integral conversion. +// A conversion C1 is better than a conversion C2 if the rank of C1 is +// better than the rank of C2." +const TFunction* TParseContext::findFunctionExplicitTypes(const TSourceLoc& loc, const TFunction& call, bool& builtIn) +{ + // first, look for an exact match + TSymbol* symbol = symbolTable.find(call.getMangledName(), &builtIn); + if (symbol) + return symbol->getAsFunction(); + + // no exact match, use the generic selector, parameterized by the GLSL rules + + // create list of candidates to send + TVector candidateList; + symbolTable.findFunctionNameList(call.getMangledName(), candidateList, builtIn); + + // can 'from' convert to 'to'? + const auto convertible = [this,builtIn](const TType& from, const TType& to, TOperator, int) -> bool { + if (from == to) + return true; + if (from.coopMatParameterOK(to)) + return true; + // Allow a sized array to be passed through an unsized array parameter, for coopMatLoad/Store functions + if (builtIn && from.isArray() && to.isUnsizedArray()) { + TType fromElementType(from, 0); + TType toElementType(to, 0); + if (fromElementType == toElementType) + return true; + } + if (from.isArray() || to.isArray() || ! from.sameElementShape(to)) + return false; + if (from.isCoopMat() && to.isCoopMat()) + return from.sameCoopMatBaseType(to); + return intermediate.canImplicitlyPromote(from.getBasicType(), to.getBasicType()); + }; + + // Is 'to2' a better conversion than 'to1'? + // Ties should not be considered as better. + // Assumes 'convertible' already said true. + const auto better = [this](const TType& from, const TType& to1, const TType& to2) -> bool { + // 1. exact match + if (from == to2) + return from != to1; + if (from == to1) + return false; + + // 2. Promotion (integral, floating-point) is better + TBasicType from_type = from.getBasicType(); + TBasicType to1_type = to1.getBasicType(); + TBasicType to2_type = to2.getBasicType(); + bool isPromotion1 = (intermediate.isIntegralPromotion(from_type, to1_type) || + intermediate.isFPPromotion(from_type, to1_type)); + bool isPromotion2 = (intermediate.isIntegralPromotion(from_type, to2_type) || + intermediate.isFPPromotion(from_type, to2_type)); + if (isPromotion2) + return !isPromotion1; + if(isPromotion1) + return false; + + // 3. Conversion (integral, floating-point , floating-integral) + bool isConversion1 = (intermediate.isIntegralConversion(from_type, to1_type) || + intermediate.isFPConversion(from_type, to1_type) || + intermediate.isFPIntegralConversion(from_type, to1_type)); + bool isConversion2 = (intermediate.isIntegralConversion(from_type, to2_type) || + intermediate.isFPConversion(from_type, to2_type) || + intermediate.isFPIntegralConversion(from_type, to2_type)); + + return isConversion2 && !isConversion1; + }; + + // for ambiguity reporting + bool tie = false; + + // send to the generic selector + const TFunction* bestMatch = selectFunction(candidateList, call, convertible, better, tie); + + if (bestMatch == nullptr) + error(loc, "no matching overloaded function found", call.getName().c_str(), ""); + else if (tie) + error(loc, "ambiguous best function under implicit type conversion", call.getName().c_str(), ""); + + return bestMatch; +} + +// When a declaration includes a type, but not a variable name, it can be used +// to establish defaults. +void TParseContext::declareTypeDefaults(const TSourceLoc& loc, const TPublicType& publicType) +{ +#ifndef GLSLANG_WEB + if (publicType.basicType == EbtAtomicUint && publicType.qualifier.hasBinding()) { + if (publicType.qualifier.layoutBinding >= (unsigned int)resources.maxAtomicCounterBindings) { + error(loc, "atomic_uint binding is too large", "binding", ""); + return; + } + + if(publicType.qualifier.hasOffset()) { + atomicUintOffsets[publicType.qualifier.layoutBinding] = publicType.qualifier.layoutOffset; + } + return; + } + + if (publicType.qualifier.hasLayout() && !publicType.qualifier.hasBufferReference()) + warn(loc, "useless application of layout qualifier", "layout", ""); +#endif +} + +// +// Do everything necessary to handle a variable (non-block) declaration. +// Either redeclaring a variable, or making a new one, updating the symbol +// table, and all error checking. +// +// Returns a subtree node that computes an initializer, if needed. +// Returns nullptr if there is no code to execute for initialization. +// +// 'publicType' is the type part of the declaration (to the left) +// 'arraySizes' is the arrayness tagged on the identifier (to the right) +// +TIntermNode* TParseContext::declareVariable(const TSourceLoc& loc, TString& identifier, const TPublicType& publicType, + TArraySizes* arraySizes, TIntermTyped* initializer) +{ + // Make a fresh type that combines the characteristics from the individual + // identifier syntax and the declaration-type syntax. + TType type(publicType); + type.transferArraySizes(arraySizes); + type.copyArrayInnerSizes(publicType.arraySizes); + arrayOfArrayVersionCheck(loc, type.getArraySizes()); + + if (type.isCoopMat()) { + intermediate.setUseVulkanMemoryModel(); + intermediate.setUseStorageBuffer(); + + if (!publicType.typeParameters || publicType.typeParameters->getNumDims() != 4) { + error(loc, "expected four type parameters", identifier.c_str(), ""); + } + if (publicType.typeParameters) { + if (isTypeFloat(publicType.basicType) && + publicType.typeParameters->getDimSize(0) != 16 && + publicType.typeParameters->getDimSize(0) != 32 && + publicType.typeParameters->getDimSize(0) != 64) { + error(loc, "expected 16, 32, or 64 bits for first type parameter", identifier.c_str(), ""); + } + if (isTypeInt(publicType.basicType) && + publicType.typeParameters->getDimSize(0) != 8 && + publicType.typeParameters->getDimSize(0) != 32) { + error(loc, "expected 8 or 32 bits for first type parameter", identifier.c_str(), ""); + } + } + + } else { + if (publicType.typeParameters && publicType.typeParameters->getNumDims() != 0) { + error(loc, "unexpected type parameters", identifier.c_str(), ""); + } + } + + if (voidErrorCheck(loc, identifier, type.getBasicType())) + return nullptr; + + if (initializer) + rValueErrorCheck(loc, "initializer", initializer); + else + nonInitConstCheck(loc, identifier, type); + + samplerCheck(loc, type, identifier, initializer); + transparentOpaqueCheck(loc, type, identifier); +#ifndef GLSLANG_WEB + atomicUintCheck(loc, type, identifier); + accStructCheck(loc, type, identifier); + checkAndResizeMeshViewDim(loc, type, /*isBlockMember*/ false); +#endif + if (type.getQualifier().storage == EvqConst && type.containsReference()) { + error(loc, "variables with reference type can't have qualifier 'const'", "qualifier", ""); + } + + if (type.getQualifier().storage != EvqUniform && type.getQualifier().storage != EvqBuffer) { + if (type.contains16BitFloat()) + requireFloat16Arithmetic(loc, "qualifier", "float16 types can only be in uniform block or buffer storage"); + if (type.contains16BitInt()) + requireInt16Arithmetic(loc, "qualifier", "(u)int16 types can only be in uniform block or buffer storage"); + if (type.contains8BitInt()) + requireInt8Arithmetic(loc, "qualifier", "(u)int8 types can only be in uniform block or buffer storage"); + } + + if (type.getQualifier().storage == EvqShared && type.containsCoopMat()) + error(loc, "qualifier", "Cooperative matrix types must not be used in shared memory", ""); + + if (identifier != "gl_FragCoord" && (publicType.shaderQualifiers.originUpperLeft || publicType.shaderQualifiers.pixelCenterInteger)) + error(loc, "can only apply origin_upper_left and pixel_center_origin to gl_FragCoord", "layout qualifier", ""); + if (identifier != "gl_FragDepth" && publicType.shaderQualifiers.getDepth() != EldNone) + error(loc, "can only apply depth layout to gl_FragDepth", "layout qualifier", ""); + + // Check for redeclaration of built-ins and/or attempting to declare a reserved name + TSymbol* symbol = redeclareBuiltinVariable(loc, identifier, type.getQualifier(), publicType.shaderQualifiers); + if (symbol == nullptr) + reservedErrorCheck(loc, identifier); + + inheritGlobalDefaults(type.getQualifier()); + + // Declare the variable + if (type.isArray()) { + // Check that implicit sizing is only where allowed. + arraySizesCheck(loc, type.getQualifier(), type.getArraySizes(), initializer, false); + + if (! arrayQualifierError(loc, type.getQualifier()) && ! arrayError(loc, type)) + declareArray(loc, identifier, type, symbol); + + if (initializer) { + profileRequires(loc, ENoProfile, 120, E_GL_3DL_array_objects, "initializer"); + profileRequires(loc, EEsProfile, 300, nullptr, "initializer"); + } + } else { + // non-array case + if (symbol == nullptr) + symbol = declareNonArray(loc, identifier, type); + else if (type != symbol->getType()) + error(loc, "cannot change the type of", "redeclaration", symbol->getName().c_str()); + } + + if (symbol == nullptr) + return nullptr; + + // Deal with initializer + TIntermNode* initNode = nullptr; + if (symbol != nullptr && initializer) { + TVariable* variable = symbol->getAsVariable(); + if (! variable) { + error(loc, "initializer requires a variable, not a member", identifier.c_str(), ""); + return nullptr; + } + initNode = executeInitializer(loc, initializer, variable); + } + + // look for errors in layout qualifier use + layoutObjectCheck(loc, *symbol); + + // fix up + fixOffset(loc, *symbol); + + return initNode; +} + +// Pick up global defaults from the provide global defaults into dst. +void TParseContext::inheritGlobalDefaults(TQualifier& dst) const +{ +#ifndef GLSLANG_WEB + if (dst.storage == EvqVaryingOut) { + if (! dst.hasStream() && language == EShLangGeometry) + dst.layoutStream = globalOutputDefaults.layoutStream; + if (! dst.hasXfbBuffer()) + dst.layoutXfbBuffer = globalOutputDefaults.layoutXfbBuffer; + } +#endif +} + +// +// Make an internal-only variable whose name is for debug purposes only +// and won't be searched for. Callers will only use the return value to use +// the variable, not the name to look it up. It is okay if the name +// is the same as other names; there won't be any conflict. +// +TVariable* TParseContext::makeInternalVariable(const char* name, const TType& type) const +{ + TString* nameString = NewPoolTString(name); + TVariable* variable = new TVariable(nameString, type); + symbolTable.makeInternalVariable(*variable); + + return variable; +} + +// +// Declare a non-array variable, the main point being there is no redeclaration +// for resizing allowed. +// +// Return the successfully declared variable. +// +TVariable* TParseContext::declareNonArray(const TSourceLoc& loc, const TString& identifier, const TType& type) +{ + // make a new variable + TVariable* variable = new TVariable(&identifier, type); + +#ifndef GLSLANG_WEB + ioArrayCheck(loc, type, identifier); +#endif + + // add variable to symbol table + if (symbolTable.insert(*variable)) { + if (symbolTable.atGlobalLevel()) + trackLinkage(*variable); + return variable; + } + + error(loc, "redefinition", variable->getName().c_str(), ""); + return nullptr; +} + +// +// Handle all types of initializers from the grammar. +// +// Returning nullptr just means there is no code to execute to handle the +// initializer, which will, for example, be the case for constant initializers. +// +TIntermNode* TParseContext::executeInitializer(const TSourceLoc& loc, TIntermTyped* initializer, TVariable* variable) +{ + // + // Identifier must be of type constant, a global, or a temporary, and + // starting at version 120, desktop allows uniforms to have initializers. + // + TStorageQualifier qualifier = variable->getType().getQualifier().storage; + if (! (qualifier == EvqTemporary || qualifier == EvqGlobal || qualifier == EvqConst || + (qualifier == EvqUniform && !isEsProfile() && version >= 120))) { + error(loc, " cannot initialize this type of qualifier ", variable->getType().getStorageQualifierString(), ""); + return nullptr; + } + arrayObjectCheck(loc, variable->getType(), "array initializer"); + + // + // If the initializer was from braces { ... }, we convert the whole subtree to a + // constructor-style subtree, allowing the rest of the code to operate + // identically for both kinds of initializers. + // + // Type can't be deduced from the initializer list, so a skeletal type to + // follow has to be passed in. Constness and specialization-constness + // should be deduced bottom up, not dictated by the skeletal type. + // + TType skeletalType; + skeletalType.shallowCopy(variable->getType()); + skeletalType.getQualifier().makeTemporary(); +#ifndef GLSLANG_WEB + initializer = convertInitializerList(loc, skeletalType, initializer); +#endif + if (! initializer) { + // error recovery; don't leave const without constant values + if (qualifier == EvqConst) + variable->getWritableType().getQualifier().makeTemporary(); + return nullptr; + } + + // Fix outer arrayness if variable is unsized, getting size from the initializer + if (initializer->getType().isSizedArray() && variable->getType().isUnsizedArray()) + variable->getWritableType().changeOuterArraySize(initializer->getType().getOuterArraySize()); + + // Inner arrayness can also get set by an initializer + if (initializer->getType().isArrayOfArrays() && variable->getType().isArrayOfArrays() && + initializer->getType().getArraySizes()->getNumDims() == + variable->getType().getArraySizes()->getNumDims()) { + // adopt unsized sizes from the initializer's sizes + for (int d = 1; d < variable->getType().getArraySizes()->getNumDims(); ++d) { + if (variable->getType().getArraySizes()->getDimSize(d) == UnsizedArraySize) { + variable->getWritableType().getArraySizes()->setDimSize(d, + initializer->getType().getArraySizes()->getDimSize(d)); + } + } + } + + // Uniforms require a compile-time constant initializer + if (qualifier == EvqUniform && ! initializer->getType().getQualifier().isFrontEndConstant()) { + error(loc, "uniform initializers must be constant", "=", "'%s'", variable->getType().getCompleteString().c_str()); + variable->getWritableType().getQualifier().makeTemporary(); + return nullptr; + } + // Global consts require a constant initializer (specialization constant is okay) + if (qualifier == EvqConst && symbolTable.atGlobalLevel() && ! initializer->getType().getQualifier().isConstant()) { + error(loc, "global const initializers must be constant", "=", "'%s'", variable->getType().getCompleteString().c_str()); + variable->getWritableType().getQualifier().makeTemporary(); + return nullptr; + } + + // Const variables require a constant initializer, depending on version + if (qualifier == EvqConst) { + if (! initializer->getType().getQualifier().isConstant()) { + const char* initFeature = "non-constant initializer"; + requireProfile(loc, ~EEsProfile, initFeature); + profileRequires(loc, ~EEsProfile, 420, E_GL_ARB_shading_language_420pack, initFeature); + variable->getWritableType().getQualifier().storage = EvqConstReadOnly; + qualifier = EvqConstReadOnly; + } + } else { + // Non-const global variables in ES need a const initializer. + // + // "In declarations of global variables with no storage qualifier or with a const + // qualifier any initializer must be a constant expression." + if (symbolTable.atGlobalLevel() && ! initializer->getType().getQualifier().isConstant()) { + const char* initFeature = "non-constant global initializer (needs GL_EXT_shader_non_constant_global_initializers)"; + if (isEsProfile()) { + if (relaxedErrors() && ! extensionTurnedOn(E_GL_EXT_shader_non_constant_global_initializers)) + warn(loc, "not allowed in this version", initFeature, ""); + else + profileRequires(loc, EEsProfile, 0, E_GL_EXT_shader_non_constant_global_initializers, initFeature); + } + } + } + + if (qualifier == EvqConst || qualifier == EvqUniform) { + // Compile-time tagging of the variable with its constant value... + + initializer = intermediate.addConversion(EOpAssign, variable->getType(), initializer); + if (! initializer || ! initializer->getType().getQualifier().isConstant() || variable->getType() != initializer->getType()) { + error(loc, "non-matching or non-convertible constant type for const initializer", + variable->getType().getStorageQualifierString(), ""); + variable->getWritableType().getQualifier().makeTemporary(); + return nullptr; + } + + // We either have a folded constant in getAsConstantUnion, or we have to use + // the initializer's subtree in the AST to represent the computation of a + // specialization constant. + assert(initializer->getAsConstantUnion() || initializer->getType().getQualifier().isSpecConstant()); + if (initializer->getAsConstantUnion()) + variable->setConstArray(initializer->getAsConstantUnion()->getConstArray()); + else { + // It's a specialization constant. + variable->getWritableType().getQualifier().makeSpecConstant(); + + // Keep the subtree that computes the specialization constant with the variable. + // Later, a symbol node will adopt the subtree from the variable. + variable->setConstSubtree(initializer); + } + } else { + // normal assigning of a value to a variable... + specializationCheck(loc, initializer->getType(), "initializer"); + TIntermSymbol* intermSymbol = intermediate.addSymbol(*variable, loc); + TIntermTyped* initNode = intermediate.addAssign(EOpAssign, intermSymbol, initializer, loc); + if (! initNode) + assignError(loc, "=", intermSymbol->getCompleteString(), initializer->getCompleteString()); + + return initNode; + } + + return nullptr; +} + +// +// Reprocess any initializer-list (the "{ ... }" syntax) parts of the +// initializer. +// +// Need to hierarchically assign correct types and implicit +// conversions. Will do this mimicking the same process used for +// creating a constructor-style initializer, ensuring we get the +// same form. However, it has to in parallel walk the 'type' +// passed in, as type cannot be deduced from an initializer list. +// +TIntermTyped* TParseContext::convertInitializerList(const TSourceLoc& loc, const TType& type, TIntermTyped* initializer) +{ + // Will operate recursively. Once a subtree is found that is constructor style, + // everything below it is already good: Only the "top part" of the initializer + // can be an initializer list, where "top part" can extend for several (or all) levels. + + // see if we have bottomed out in the tree within the initializer-list part + TIntermAggregate* initList = initializer->getAsAggregate(); + if (! initList || initList->getOp() != EOpNull) + return initializer; + + // Of the initializer-list set of nodes, need to process bottom up, + // so recurse deep, then process on the way up. + + // Go down the tree here... + if (type.isArray()) { + // The type's array might be unsized, which could be okay, so base sizes on the size of the aggregate. + // Later on, initializer execution code will deal with array size logic. + TType arrayType; + arrayType.shallowCopy(type); // sharing struct stuff is fine + arrayType.copyArraySizes(*type.getArraySizes()); // but get a fresh copy of the array information, to edit below + + // edit array sizes to fill in unsized dimensions + arrayType.changeOuterArraySize((int)initList->getSequence().size()); + TIntermTyped* firstInit = initList->getSequence()[0]->getAsTyped(); + if (arrayType.isArrayOfArrays() && firstInit->getType().isArray() && + arrayType.getArraySizes()->getNumDims() == firstInit->getType().getArraySizes()->getNumDims() + 1) { + for (int d = 1; d < arrayType.getArraySizes()->getNumDims(); ++d) { + if (arrayType.getArraySizes()->getDimSize(d) == UnsizedArraySize) + arrayType.getArraySizes()->setDimSize(d, firstInit->getType().getArraySizes()->getDimSize(d - 1)); + } + } + + TType elementType(arrayType, 0); // dereferenced type + for (size_t i = 0; i < initList->getSequence().size(); ++i) { + initList->getSequence()[i] = convertInitializerList(loc, elementType, initList->getSequence()[i]->getAsTyped()); + if (initList->getSequence()[i] == nullptr) + return nullptr; + } + + return addConstructor(loc, initList, arrayType); + } else if (type.isStruct()) { + if (type.getStruct()->size() != initList->getSequence().size()) { + error(loc, "wrong number of structure members", "initializer list", ""); + return nullptr; + } + for (size_t i = 0; i < type.getStruct()->size(); ++i) { + initList->getSequence()[i] = convertInitializerList(loc, *(*type.getStruct())[i].type, initList->getSequence()[i]->getAsTyped()); + if (initList->getSequence()[i] == nullptr) + return nullptr; + } + } else if (type.isMatrix()) { + if (type.getMatrixCols() != (int)initList->getSequence().size()) { + error(loc, "wrong number of matrix columns:", "initializer list", type.getCompleteString().c_str()); + return nullptr; + } + TType vectorType(type, 0); // dereferenced type + for (int i = 0; i < type.getMatrixCols(); ++i) { + initList->getSequence()[i] = convertInitializerList(loc, vectorType, initList->getSequence()[i]->getAsTyped()); + if (initList->getSequence()[i] == nullptr) + return nullptr; + } + } else if (type.isVector()) { + if (type.getVectorSize() != (int)initList->getSequence().size()) { + error(loc, "wrong vector size (or rows in a matrix column):", "initializer list", type.getCompleteString().c_str()); + return nullptr; + } + } else { + error(loc, "unexpected initializer-list type:", "initializer list", type.getCompleteString().c_str()); + return nullptr; + } + + // Now that the subtree is processed, process this node as if the + // initializer list is a set of arguments to a constructor. + TIntermNode* emulatedConstructorArguments; + if (initList->getSequence().size() == 1) + emulatedConstructorArguments = initList->getSequence()[0]; + else + emulatedConstructorArguments = initList; + return addConstructor(loc, emulatedConstructorArguments, type); +} + +// +// Test for the correctness of the parameters passed to various constructor functions +// and also convert them to the right data type, if allowed and required. +// +// 'node' is what to construct from. +// 'type' is what type to construct. +// +// Returns nullptr for an error or the constructed node (aggregate or typed) for no error. +// +TIntermTyped* TParseContext::addConstructor(const TSourceLoc& loc, TIntermNode* node, const TType& type) +{ + if (node == nullptr || node->getAsTyped() == nullptr) + return nullptr; + rValueErrorCheck(loc, "constructor", node->getAsTyped()); + + TIntermAggregate* aggrNode = node->getAsAggregate(); + TOperator op = intermediate.mapTypeToConstructorOp(type); + + // Combined texture-sampler constructors are completely semantic checked + // in constructorTextureSamplerError() + if (op == EOpConstructTextureSampler) { + if (aggrNode->getSequence()[1]->getAsTyped()->getType().getSampler().shadow) { + // Transfer depth into the texture (SPIR-V image) type, as a hint + // for tools to know this texture/image is a depth image. + aggrNode->getSequence()[0]->getAsTyped()->getWritableType().getSampler().shadow = true; + } + return intermediate.setAggregateOperator(aggrNode, op, type, loc); + } + + TTypeList::const_iterator memberTypes; + if (op == EOpConstructStruct) + memberTypes = type.getStruct()->begin(); + + TType elementType; + if (type.isArray()) { + TType dereferenced(type, 0); + elementType.shallowCopy(dereferenced); + } else + elementType.shallowCopy(type); + + bool singleArg; + if (aggrNode) { + if (aggrNode->getOp() != EOpNull) + singleArg = true; + else + singleArg = false; + } else + singleArg = true; + + TIntermTyped *newNode; + if (singleArg) { + // If structure constructor or array constructor is being called + // for only one parameter inside the structure, we need to call constructAggregate function once. + if (type.isArray()) + newNode = constructAggregate(node, elementType, 1, node->getLoc()); + else if (op == EOpConstructStruct) + newNode = constructAggregate(node, *(*memberTypes).type, 1, node->getLoc()); + else + newNode = constructBuiltIn(type, op, node->getAsTyped(), node->getLoc(), false); + + if (newNode && (type.isArray() || op == EOpConstructStruct)) + newNode = intermediate.setAggregateOperator(newNode, EOpConstructStruct, type, loc); + + return newNode; + } + + // + // Handle list of arguments. + // + TIntermSequence &sequenceVector = aggrNode->getSequence(); // Stores the information about the parameter to the constructor + // if the structure constructor contains more than one parameter, then construct + // each parameter + + int paramCount = 0; // keeps track of the constructor parameter number being checked + + // for each parameter to the constructor call, check to see if the right type is passed or convert them + // to the right type if possible (and allowed). + // for structure constructors, just check if the right type is passed, no conversion is allowed. + for (TIntermSequence::iterator p = sequenceVector.begin(); + p != sequenceVector.end(); p++, paramCount++) { + if (type.isArray()) + newNode = constructAggregate(*p, elementType, paramCount+1, node->getLoc()); + else if (op == EOpConstructStruct) + newNode = constructAggregate(*p, *(memberTypes[paramCount]).type, paramCount+1, node->getLoc()); + else + newNode = constructBuiltIn(type, op, (*p)->getAsTyped(), node->getLoc(), true); + + if (newNode) + *p = newNode; + else + return nullptr; + } + + return intermediate.setAggregateOperator(aggrNode, op, type, loc); +} + +// Function for constructor implementation. Calls addUnaryMath with appropriate EOp value +// for the parameter to the constructor (passed to this function). Essentially, it converts +// the parameter types correctly. If a constructor expects an int (like ivec2) and is passed a +// float, then float is converted to int. +// +// Returns nullptr for an error or the constructed node. +// +TIntermTyped* TParseContext::constructBuiltIn(const TType& type, TOperator op, TIntermTyped* node, const TSourceLoc& loc, + bool subset) +{ + // If we are changing a matrix in both domain of basic type and to a non matrix, + // do the shape change first (by default, below, basic type is changed before shape). + // This avoids requesting a matrix of a new type that is going to be discarded anyway. + // TODO: This could be generalized to more type combinations, but that would require + // more extensive testing and full algorithm rework. For now, the need to do two changes makes + // the recursive call work, and avoids the most egregious case of creating integer matrices. + if (node->getType().isMatrix() && (type.isScalar() || type.isVector()) && + type.isFloatingDomain() != node->getType().isFloatingDomain()) { + TType transitionType(node->getBasicType(), glslang::EvqTemporary, type.getVectorSize(), 0, 0, node->isVector()); + TOperator transitionOp = intermediate.mapTypeToConstructorOp(transitionType); + node = constructBuiltIn(transitionType, transitionOp, node, loc, false); + } + + TIntermTyped* newNode; + TOperator basicOp; + + // + // First, convert types as needed. + // + switch (op) { + case EOpConstructVec2: + case EOpConstructVec3: + case EOpConstructVec4: + case EOpConstructMat2x2: + case EOpConstructMat2x3: + case EOpConstructMat2x4: + case EOpConstructMat3x2: + case EOpConstructMat3x3: + case EOpConstructMat3x4: + case EOpConstructMat4x2: + case EOpConstructMat4x3: + case EOpConstructMat4x4: + case EOpConstructFloat: + basicOp = EOpConstructFloat; + break; + + case EOpConstructIVec2: + case EOpConstructIVec3: + case EOpConstructIVec4: + case EOpConstructInt: + basicOp = EOpConstructInt; + break; + + case EOpConstructUVec2: + if (node->getType().getBasicType() == EbtReference) { + requireExtensions(loc, 1, &E_GL_EXT_buffer_reference_uvec2, "reference conversion to uvec2"); + TIntermTyped* newNode = intermediate.addBuiltInFunctionCall(node->getLoc(), EOpConvPtrToUvec2, true, node, + type); + return newNode; + } + case EOpConstructUVec3: + case EOpConstructUVec4: + case EOpConstructUint: + basicOp = EOpConstructUint; + break; + + case EOpConstructBVec2: + case EOpConstructBVec3: + case EOpConstructBVec4: + case EOpConstructBool: + basicOp = EOpConstructBool; + break; + +#ifndef GLSLANG_WEB + + case EOpConstructDVec2: + case EOpConstructDVec3: + case EOpConstructDVec4: + case EOpConstructDMat2x2: + case EOpConstructDMat2x3: + case EOpConstructDMat2x4: + case EOpConstructDMat3x2: + case EOpConstructDMat3x3: + case EOpConstructDMat3x4: + case EOpConstructDMat4x2: + case EOpConstructDMat4x3: + case EOpConstructDMat4x4: + case EOpConstructDouble: + basicOp = EOpConstructDouble; + break; + + case EOpConstructF16Vec2: + case EOpConstructF16Vec3: + case EOpConstructF16Vec4: + case EOpConstructF16Mat2x2: + case EOpConstructF16Mat2x3: + case EOpConstructF16Mat2x4: + case EOpConstructF16Mat3x2: + case EOpConstructF16Mat3x3: + case EOpConstructF16Mat3x4: + case EOpConstructF16Mat4x2: + case EOpConstructF16Mat4x3: + case EOpConstructF16Mat4x4: + case EOpConstructFloat16: + basicOp = EOpConstructFloat16; + // 8/16-bit storage extensions don't support constructing composites of 8/16-bit types, + // so construct a 32-bit type and convert + if (!intermediate.getArithemeticFloat16Enabled()) { + TType tempType(EbtFloat, EvqTemporary, type.getVectorSize()); + newNode = node; + if (tempType != newNode->getType()) { + TOperator aggregateOp; + if (op == EOpConstructFloat16) + aggregateOp = EOpConstructFloat; + else + aggregateOp = (TOperator)(EOpConstructVec2 + op - EOpConstructF16Vec2); + newNode = intermediate.setAggregateOperator(newNode, aggregateOp, tempType, node->getLoc()); + } + newNode = intermediate.addConversion(EbtFloat16, newNode); + return newNode; + } + break; + + case EOpConstructI8Vec2: + case EOpConstructI8Vec3: + case EOpConstructI8Vec4: + case EOpConstructInt8: + basicOp = EOpConstructInt8; + // 8/16-bit storage extensions don't support constructing composites of 8/16-bit types, + // so construct a 32-bit type and convert + if (!intermediate.getArithemeticInt8Enabled()) { + TType tempType(EbtInt, EvqTemporary, type.getVectorSize()); + newNode = node; + if (tempType != newNode->getType()) { + TOperator aggregateOp; + if (op == EOpConstructInt8) + aggregateOp = EOpConstructInt; + else + aggregateOp = (TOperator)(EOpConstructIVec2 + op - EOpConstructI8Vec2); + newNode = intermediate.setAggregateOperator(newNode, aggregateOp, tempType, node->getLoc()); + } + newNode = intermediate.addConversion(EbtInt8, newNode); + return newNode; + } + break; + + case EOpConstructU8Vec2: + case EOpConstructU8Vec3: + case EOpConstructU8Vec4: + case EOpConstructUint8: + basicOp = EOpConstructUint8; + // 8/16-bit storage extensions don't support constructing composites of 8/16-bit types, + // so construct a 32-bit type and convert + if (!intermediate.getArithemeticInt8Enabled()) { + TType tempType(EbtUint, EvqTemporary, type.getVectorSize()); + newNode = node; + if (tempType != newNode->getType()) { + TOperator aggregateOp; + if (op == EOpConstructUint8) + aggregateOp = EOpConstructUint; + else + aggregateOp = (TOperator)(EOpConstructUVec2 + op - EOpConstructU8Vec2); + newNode = intermediate.setAggregateOperator(newNode, aggregateOp, tempType, node->getLoc()); + } + newNode = intermediate.addConversion(EbtUint8, newNode); + return newNode; + } + break; + + case EOpConstructI16Vec2: + case EOpConstructI16Vec3: + case EOpConstructI16Vec4: + case EOpConstructInt16: + basicOp = EOpConstructInt16; + // 8/16-bit storage extensions don't support constructing composites of 8/16-bit types, + // so construct a 32-bit type and convert + if (!intermediate.getArithemeticInt16Enabled()) { + TType tempType(EbtInt, EvqTemporary, type.getVectorSize()); + newNode = node; + if (tempType != newNode->getType()) { + TOperator aggregateOp; + if (op == EOpConstructInt16) + aggregateOp = EOpConstructInt; + else + aggregateOp = (TOperator)(EOpConstructIVec2 + op - EOpConstructI16Vec2); + newNode = intermediate.setAggregateOperator(newNode, aggregateOp, tempType, node->getLoc()); + } + newNode = intermediate.addConversion(EbtInt16, newNode); + return newNode; + } + break; + + case EOpConstructU16Vec2: + case EOpConstructU16Vec3: + case EOpConstructU16Vec4: + case EOpConstructUint16: + basicOp = EOpConstructUint16; + // 8/16-bit storage extensions don't support constructing composites of 8/16-bit types, + // so construct a 32-bit type and convert + if (!intermediate.getArithemeticInt16Enabled()) { + TType tempType(EbtUint, EvqTemporary, type.getVectorSize()); + newNode = node; + if (tempType != newNode->getType()) { + TOperator aggregateOp; + if (op == EOpConstructUint16) + aggregateOp = EOpConstructUint; + else + aggregateOp = (TOperator)(EOpConstructUVec2 + op - EOpConstructU16Vec2); + newNode = intermediate.setAggregateOperator(newNode, aggregateOp, tempType, node->getLoc()); + } + newNode = intermediate.addConversion(EbtUint16, newNode); + return newNode; + } + break; + + case EOpConstructI64Vec2: + case EOpConstructI64Vec3: + case EOpConstructI64Vec4: + case EOpConstructInt64: + basicOp = EOpConstructInt64; + break; + + case EOpConstructUint64: + if (type.isScalar() && node->getType().isReference()) { + TIntermTyped* newNode = intermediate.addBuiltInFunctionCall(node->getLoc(), EOpConvPtrToUint64, true, node, type); + return newNode; + } + // fall through + case EOpConstructU64Vec2: + case EOpConstructU64Vec3: + case EOpConstructU64Vec4: + basicOp = EOpConstructUint64; + break; + + case EOpConstructNonuniform: + // Make a nonuniform copy of node + newNode = intermediate.addBuiltInFunctionCall(node->getLoc(), EOpCopyObject, true, node, type); + return newNode; + + case EOpConstructReference: + // construct reference from reference + if (node->getType().isReference()) { + newNode = intermediate.addBuiltInFunctionCall(node->getLoc(), EOpConstructReference, true, node, type); + return newNode; + // construct reference from uint64 + } else if (node->getType().isScalar() && node->getType().getBasicType() == EbtUint64) { + TIntermTyped* newNode = intermediate.addBuiltInFunctionCall(node->getLoc(), EOpConvUint64ToPtr, true, node, + type); + return newNode; + // construct reference from uvec2 + } else if (node->getType().isVector() && node->getType().getBasicType() == EbtUint && + node->getVectorSize() == 2) { + requireExtensions(loc, 1, &E_GL_EXT_buffer_reference_uvec2, "uvec2 conversion to reference"); + TIntermTyped* newNode = intermediate.addBuiltInFunctionCall(node->getLoc(), EOpConvUvec2ToPtr, true, node, + type); + return newNode; + } else { + return nullptr; + } + + case EOpConstructCooperativeMatrix: + if (!node->getType().isCoopMat()) { + if (type.getBasicType() != node->getType().getBasicType()) { + node = intermediate.addConversion(type.getBasicType(), node); + } + node = intermediate.setAggregateOperator(node, EOpConstructCooperativeMatrix, type, node->getLoc()); + } else { + TOperator op = EOpNull; + switch (type.getBasicType()) { + default: + assert(0); + break; + case EbtInt: + switch (node->getType().getBasicType()) { + case EbtFloat: op = EOpConvFloatToInt; break; + case EbtFloat16: op = EOpConvFloat16ToInt; break; + case EbtUint8: op = EOpConvUint8ToInt; break; + case EbtInt8: op = EOpConvInt8ToInt; break; + case EbtUint: op = EOpConvUintToInt; break; + default: assert(0); + } + break; + case EbtUint: + switch (node->getType().getBasicType()) { + case EbtFloat: op = EOpConvFloatToUint; break; + case EbtFloat16: op = EOpConvFloat16ToUint; break; + case EbtUint8: op = EOpConvUint8ToUint; break; + case EbtInt8: op = EOpConvInt8ToUint; break; + case EbtInt: op = EOpConvIntToUint; break; + case EbtUint: op = EOpConvUintToInt8; break; + default: assert(0); + } + break; + case EbtInt8: + switch (node->getType().getBasicType()) { + case EbtFloat: op = EOpConvFloatToInt8; break; + case EbtFloat16: op = EOpConvFloat16ToInt8; break; + case EbtUint8: op = EOpConvUint8ToInt8; break; + case EbtInt: op = EOpConvIntToInt8; break; + case EbtUint: op = EOpConvUintToInt8; break; + default: assert(0); + } + break; + case EbtUint8: + switch (node->getType().getBasicType()) { + case EbtFloat: op = EOpConvFloatToUint8; break; + case EbtFloat16: op = EOpConvFloat16ToUint8; break; + case EbtInt8: op = EOpConvInt8ToUint8; break; + case EbtInt: op = EOpConvIntToUint8; break; + case EbtUint: op = EOpConvUintToUint8; break; + default: assert(0); + } + break; + case EbtFloat: + switch (node->getType().getBasicType()) { + case EbtFloat16: op = EOpConvFloat16ToFloat; break; + case EbtInt8: op = EOpConvInt8ToFloat; break; + case EbtUint8: op = EOpConvUint8ToFloat; break; + case EbtInt: op = EOpConvIntToFloat; break; + case EbtUint: op = EOpConvUintToFloat; break; + default: assert(0); + } + break; + case EbtFloat16: + switch (node->getType().getBasicType()) { + case EbtFloat: op = EOpConvFloatToFloat16; break; + case EbtInt8: op = EOpConvInt8ToFloat16; break; + case EbtUint8: op = EOpConvUint8ToFloat16; break; + case EbtInt: op = EOpConvIntToFloat16; break; + case EbtUint: op = EOpConvUintToFloat16; break; + default: assert(0); + } + break; + } + + node = intermediate.addUnaryNode(op, node, node->getLoc(), type); + // If it's a (non-specialization) constant, it must be folded. + if (node->getAsUnaryNode()->getOperand()->getAsConstantUnion()) + return node->getAsUnaryNode()->getOperand()->getAsConstantUnion()->fold(op, node->getType()); + } + + return node; + +#endif // GLSLANG_WEB + + default: + error(loc, "unsupported construction", "", ""); + + return nullptr; + } + newNode = intermediate.addUnaryMath(basicOp, node, node->getLoc()); + if (newNode == nullptr) { + error(loc, "can't convert", "constructor", ""); + return nullptr; + } + + // + // Now, if there still isn't an operation to do the construction, and we need one, add one. + // + + // Otherwise, skip out early. + if (subset || (newNode != node && newNode->getType() == type)) + return newNode; + + // setAggregateOperator will insert a new node for the constructor, as needed. + return intermediate.setAggregateOperator(newNode, op, type, loc); +} + +// This function tests for the type of the parameters to the structure or array constructor. Raises +// an error message if the expected type does not match the parameter passed to the constructor. +// +// Returns nullptr for an error or the input node itself if the expected and the given parameter types match. +// +TIntermTyped* TParseContext::constructAggregate(TIntermNode* node, const TType& type, int paramCount, const TSourceLoc& loc) +{ + TIntermTyped* converted = intermediate.addConversion(EOpConstructStruct, type, node->getAsTyped()); + if (! converted || converted->getType() != type) { + error(loc, "", "constructor", "cannot convert parameter %d from '%s' to '%s'", paramCount, + node->getAsTyped()->getType().getCompleteString().c_str(), type.getCompleteString().c_str()); + + return nullptr; + } + + return converted; +} + +// If a memory qualifier is present in 'to', also make it present in 'from'. +void TParseContext::inheritMemoryQualifiers(const TQualifier& from, TQualifier& to) +{ +#ifndef GLSLANG_WEB + if (from.isReadOnly()) + to.readonly = from.readonly; + if (from.isWriteOnly()) + to.writeonly = from.writeonly; + if (from.coherent) + to.coherent = from.coherent; + if (from.volatil) + to.volatil = from.volatil; + if (from.restrict) + to.restrict = from.restrict; +#endif +} + +// +// Do everything needed to add an interface block. +// +void TParseContext::declareBlock(const TSourceLoc& loc, TTypeList& typeList, const TString* instanceName, + TArraySizes* arraySizes) +{ + blockStageIoCheck(loc, currentBlockQualifier); + blockQualifierCheck(loc, currentBlockQualifier, instanceName != nullptr); + if (arraySizes != nullptr) { + arraySizesCheck(loc, currentBlockQualifier, arraySizes, nullptr, false); + arrayOfArrayVersionCheck(loc, arraySizes); + if (arraySizes->getNumDims() > 1) + requireProfile(loc, ~EEsProfile, "array-of-array of block"); + } + + // Inherit and check member storage qualifiers WRT to the block-level qualifier. + for (unsigned int member = 0; member < typeList.size(); ++member) { + TType& memberType = *typeList[member].type; + TQualifier& memberQualifier = memberType.getQualifier(); + const TSourceLoc& memberLoc = typeList[member].loc; + globalQualifierFixCheck(memberLoc, memberQualifier); + if (memberQualifier.storage != EvqTemporary && memberQualifier.storage != EvqGlobal && memberQualifier.storage != currentBlockQualifier.storage) + error(memberLoc, "member storage qualifier cannot contradict block storage qualifier", memberType.getFieldName().c_str(), ""); + memberQualifier.storage = currentBlockQualifier.storage; +#ifndef GLSLANG_WEB + inheritMemoryQualifiers(currentBlockQualifier, memberQualifier); + if (currentBlockQualifier.perPrimitiveNV) + memberQualifier.perPrimitiveNV = currentBlockQualifier.perPrimitiveNV; + if (currentBlockQualifier.perViewNV) + memberQualifier.perViewNV = currentBlockQualifier.perViewNV; + if (currentBlockQualifier.perTaskNV) + memberQualifier.perTaskNV = currentBlockQualifier.perTaskNV; +#endif + if ((currentBlockQualifier.storage == EvqUniform || currentBlockQualifier.storage == EvqBuffer) && (memberQualifier.isInterpolation() || memberQualifier.isAuxiliary())) + error(memberLoc, "member of uniform or buffer block cannot have an auxiliary or interpolation qualifier", memberType.getFieldName().c_str(), ""); + if (memberType.isArray()) + arraySizesCheck(memberLoc, currentBlockQualifier, memberType.getArraySizes(), nullptr, member == typeList.size() - 1); + if (memberQualifier.hasOffset()) { + if (spvVersion.spv == 0) { + requireProfile(memberLoc, ~EEsProfile, "offset on block member"); + profileRequires(memberLoc, ~EEsProfile, 440, E_GL_ARB_enhanced_layouts, "offset on block member"); + } + } + + if (memberType.containsOpaque()) + error(memberLoc, "member of block cannot be or contain a sampler, image, or atomic_uint type", typeList[member].type->getFieldName().c_str(), ""); + + if (memberType.containsCoopMat()) + error(memberLoc, "member of block cannot be or contain a cooperative matrix type", typeList[member].type->getFieldName().c_str(), ""); + } + + // This might be a redeclaration of a built-in block. If so, redeclareBuiltinBlock() will + // do all the rest. + if (! symbolTable.atBuiltInLevel() && builtInName(*blockName)) { + redeclareBuiltinBlock(loc, typeList, *blockName, instanceName, arraySizes); + return; + } + + // Not a redeclaration of a built-in; check that all names are user names. + reservedErrorCheck(loc, *blockName); + if (instanceName) + reservedErrorCheck(loc, *instanceName); + for (unsigned int member = 0; member < typeList.size(); ++member) + reservedErrorCheck(typeList[member].loc, typeList[member].type->getFieldName()); + + // Make default block qualification, and adjust the member qualifications + + TQualifier defaultQualification; + switch (currentBlockQualifier.storage) { + case EvqUniform: defaultQualification = globalUniformDefaults; break; + case EvqBuffer: defaultQualification = globalBufferDefaults; break; + case EvqVaryingIn: defaultQualification = globalInputDefaults; break; + case EvqVaryingOut: defaultQualification = globalOutputDefaults; break; + default: defaultQualification.clear(); break; + } + + // Special case for "push_constant uniform", which has a default of std430, + // contrary to normal uniform defaults, and can't have a default tracked for it. + if ((currentBlockQualifier.isPushConstant() && !currentBlockQualifier.hasPacking()) || + (currentBlockQualifier.isShaderRecord() && !currentBlockQualifier.hasPacking())) + currentBlockQualifier.layoutPacking = ElpStd430; + + // Special case for "taskNV in/out", which has a default of std430, + if (currentBlockQualifier.isTaskMemory() && !currentBlockQualifier.hasPacking()) + currentBlockQualifier.layoutPacking = ElpStd430; + + // fix and check for member layout qualifiers + + mergeObjectLayoutQualifiers(defaultQualification, currentBlockQualifier, true); + + // "The align qualifier can only be used on blocks or block members, and only for blocks declared with std140 or std430 layouts." + if (currentBlockQualifier.hasAlign()) { + if (defaultQualification.layoutPacking != ElpStd140 && + defaultQualification.layoutPacking != ElpStd430 && + defaultQualification.layoutPacking != ElpScalar) { + error(loc, "can only be used with std140, std430, or scalar layout packing", "align", ""); + defaultQualification.layoutAlign = -1; + } + } + + bool memberWithLocation = false; + bool memberWithoutLocation = false; + bool memberWithPerViewQualifier = false; + for (unsigned int member = 0; member < typeList.size(); ++member) { + TQualifier& memberQualifier = typeList[member].type->getQualifier(); + const TSourceLoc& memberLoc = typeList[member].loc; +#ifndef GLSLANG_WEB + if (memberQualifier.hasStream()) { + if (defaultQualification.layoutStream != memberQualifier.layoutStream) + error(memberLoc, "member cannot contradict block", "stream", ""); + } + + // "This includes a block's inheritance of the + // current global default buffer, a block member's inheritance of the block's + // buffer, and the requirement that any *xfb_buffer* declared on a block + // member must match the buffer inherited from the block." + if (memberQualifier.hasXfbBuffer()) { + if (defaultQualification.layoutXfbBuffer != memberQualifier.layoutXfbBuffer) + error(memberLoc, "member cannot contradict block (or what block inherited from global)", "xfb_buffer", ""); + } +#endif + + if (memberQualifier.hasPacking()) + error(memberLoc, "member of block cannot have a packing layout qualifier", typeList[member].type->getFieldName().c_str(), ""); + if (memberQualifier.hasLocation()) { + const char* feature = "location on block member"; + switch (currentBlockQualifier.storage) { +#ifndef GLSLANG_WEB + case EvqVaryingIn: + case EvqVaryingOut: + requireProfile(memberLoc, ECoreProfile | ECompatibilityProfile | EEsProfile, feature); + profileRequires(memberLoc, ECoreProfile | ECompatibilityProfile, 440, E_GL_ARB_enhanced_layouts, feature); + profileRequires(memberLoc, EEsProfile, 320, Num_AEP_shader_io_blocks, AEP_shader_io_blocks, feature); + memberWithLocation = true; + break; +#endif + default: + error(memberLoc, "can only use in an in/out block", feature, ""); + break; + } + } else + memberWithoutLocation = true; + + // "The offset qualifier can only be used on block members of blocks declared with std140 or std430 layouts." + // "The align qualifier can only be used on blocks or block members, and only for blocks declared with std140 or std430 layouts." + if (memberQualifier.hasAlign() || memberQualifier.hasOffset()) { + if (defaultQualification.layoutPacking != ElpStd140 && + defaultQualification.layoutPacking != ElpStd430 && + defaultQualification.layoutPacking != ElpScalar) + error(memberLoc, "can only be used with std140, std430, or scalar layout packing", "offset/align", ""); + } + + if (memberQualifier.isPerView()) { + memberWithPerViewQualifier = true; + } + + TQualifier newMemberQualification = defaultQualification; + mergeQualifiers(memberLoc, newMemberQualification, memberQualifier, false); + memberQualifier = newMemberQualification; + } + + layoutMemberLocationArrayCheck(loc, memberWithLocation, arraySizes); + +#ifndef GLSLANG_WEB + // Ensure that the block has an XfbBuffer assigned. This is needed + // because if the block has a XfbOffset assigned, then it is + // assumed that it has implicitly assigned the current global + // XfbBuffer, and because it's members need to be assigned a + // XfbOffset if they lack it. + if (currentBlockQualifier.storage == EvqVaryingOut && globalOutputDefaults.hasXfbBuffer()) { + if (!currentBlockQualifier.hasXfbBuffer() && currentBlockQualifier.hasXfbOffset()) + currentBlockQualifier.layoutXfbBuffer = globalOutputDefaults.layoutXfbBuffer; + } +#endif + + // Process the members + fixBlockLocations(loc, currentBlockQualifier, typeList, memberWithLocation, memberWithoutLocation); + fixXfbOffsets(currentBlockQualifier, typeList); + fixBlockUniformOffsets(currentBlockQualifier, typeList); + fixBlockUniformLayoutMatrix(currentBlockQualifier, &typeList, nullptr); + fixBlockUniformLayoutPacking(currentBlockQualifier, &typeList, nullptr); + for (unsigned int member = 0; member < typeList.size(); ++member) + layoutTypeCheck(typeList[member].loc, *typeList[member].type); + +#ifndef GLSLANG_WEB + if (memberWithPerViewQualifier) { + for (unsigned int member = 0; member < typeList.size(); ++member) { + checkAndResizeMeshViewDim(typeList[member].loc, *typeList[member].type, /*isBlockMember*/ true); + } + } +#endif + + // reverse merge, so that currentBlockQualifier now has all layout information + // (can't use defaultQualification directly, it's missing other non-layout-default-class qualifiers) + mergeObjectLayoutQualifiers(currentBlockQualifier, defaultQualification, true); + + // + // Build and add the interface block as a new type named 'blockName' + // + + TType blockType(&typeList, *blockName, currentBlockQualifier); + if (arraySizes != nullptr) + blockType.transferArraySizes(arraySizes); + +#ifndef GLSLANG_WEB + if (arraySizes == nullptr) + ioArrayCheck(loc, blockType, instanceName ? *instanceName : *blockName); + if (currentBlockQualifier.hasBufferReference()) { + + if (currentBlockQualifier.storage != EvqBuffer) + error(loc, "can only be used with buffer", "buffer_reference", ""); + + // Create the block reference type. If it was forward-declared, detect that + // as a referent struct type with no members. Replace the referent type with + // blockType. + TType blockNameType(EbtReference, blockType, *blockName); + TVariable* blockNameVar = new TVariable(blockName, blockNameType, true); + if (! symbolTable.insert(*blockNameVar)) { + TSymbol* existingName = symbolTable.find(*blockName); + if (existingName->getType().isReference() && + existingName->getType().getReferentType()->getStruct() && + existingName->getType().getReferentType()->getStruct()->size() == 0 && + existingName->getType().getQualifier().storage == blockType.getQualifier().storage) { + existingName->getType().getReferentType()->deepCopy(blockType); + } else { + error(loc, "block name cannot be redefined", blockName->c_str(), ""); + } + } + if (!instanceName) { + return; + } + } else +#endif + { + // + // Don't make a user-defined type out of block name; that will cause an error + // if the same block name gets reused in a different interface. + // + // "Block names have no other use within a shader + // beyond interface matching; it is a compile-time error to use a block name at global scope for anything + // other than as a block name (e.g., use of a block name for a global variable name or function name is + // currently reserved)." + // + // Use the symbol table to prevent normal reuse of the block's name, as a variable entry, + // whose type is EbtBlock, but without all the structure; that will come from the type + // the instances point to. + // + TType blockNameType(EbtBlock, blockType.getQualifier().storage); + TVariable* blockNameVar = new TVariable(blockName, blockNameType); + if (! symbolTable.insert(*blockNameVar)) { + TSymbol* existingName = symbolTable.find(*blockName); + if (existingName->getType().getBasicType() == EbtBlock) { + if (existingName->getType().getQualifier().storage == blockType.getQualifier().storage) { + error(loc, "Cannot reuse block name within the same interface:", blockName->c_str(), blockType.getStorageQualifierString()); + return; + } + } else { + error(loc, "block name cannot redefine a non-block name", blockName->c_str(), ""); + return; + } + } + } + + // Add the variable, as anonymous or named instanceName. + // Make an anonymous variable if no name was provided. + if (! instanceName) + instanceName = NewPoolTString(""); + + TVariable& variable = *new TVariable(instanceName, blockType); + if (! symbolTable.insert(variable)) { + if (*instanceName == "") + error(loc, "nameless block contains a member that already has a name at global scope", blockName->c_str(), ""); + else + error(loc, "block instance name redefinition", variable.getName().c_str(), ""); + + return; + } + + // Check for general layout qualifier errors + layoutObjectCheck(loc, variable); + +#ifndef GLSLANG_WEB + // fix up + if (isIoResizeArray(blockType)) { + ioArraySymbolResizeList.push_back(&variable); + checkIoArraysConsistency(loc, true); + } else + fixIoArraySize(loc, variable.getWritableType()); +#endif + + // Save it in the AST for linker use. + trackLinkage(variable); +} + +// Do all block-declaration checking regarding the combination of in/out/uniform/buffer +// with a particular stage. +void TParseContext::blockStageIoCheck(const TSourceLoc& loc, const TQualifier& qualifier) +{ + const char *extsrt[2] = { E_GL_NV_ray_tracing, E_GL_EXT_ray_tracing }; + switch (qualifier.storage) { + case EvqUniform: + profileRequires(loc, EEsProfile, 300, nullptr, "uniform block"); + profileRequires(loc, ENoProfile, 140, E_GL_ARB_uniform_buffer_object, "uniform block"); + if (currentBlockQualifier.layoutPacking == ElpStd430 && ! currentBlockQualifier.isPushConstant()) + requireExtensions(loc, 1, &E_GL_EXT_scalar_block_layout, "std430 requires the buffer storage qualifier"); + break; + case EvqBuffer: + requireProfile(loc, EEsProfile | ECoreProfile | ECompatibilityProfile, "buffer block"); + profileRequires(loc, ECoreProfile | ECompatibilityProfile, 430, E_GL_ARB_shader_storage_buffer_object, "buffer block"); + profileRequires(loc, EEsProfile, 310, nullptr, "buffer block"); + break; + case EvqVaryingIn: + profileRequires(loc, ~EEsProfile, 150, E_GL_ARB_separate_shader_objects, "input block"); + // It is a compile-time error to have an input block in a vertex shader or an output block in a fragment shader + // "Compute shaders do not permit user-defined input variables..." + requireStage(loc, (EShLanguageMask)(EShLangTessControlMask|EShLangTessEvaluationMask|EShLangGeometryMask| + EShLangFragmentMask|EShLangMeshNVMask), "input block"); + if (language == EShLangFragment) { + profileRequires(loc, EEsProfile, 320, Num_AEP_shader_io_blocks, AEP_shader_io_blocks, "fragment input block"); + } else if (language == EShLangMeshNV && ! qualifier.isTaskMemory()) { + error(loc, "input blocks cannot be used in a mesh shader", "out", ""); + } + break; + case EvqVaryingOut: + profileRequires(loc, ~EEsProfile, 150, E_GL_ARB_separate_shader_objects, "output block"); + requireStage(loc, (EShLanguageMask)(EShLangVertexMask|EShLangTessControlMask|EShLangTessEvaluationMask| + EShLangGeometryMask|EShLangMeshNVMask|EShLangTaskNVMask), "output block"); + // ES 310 can have a block before shader_io is turned on, so skip this test for built-ins + if (language == EShLangVertex && ! parsingBuiltins) { + profileRequires(loc, EEsProfile, 320, Num_AEP_shader_io_blocks, AEP_shader_io_blocks, "vertex output block"); + } else if (language == EShLangMeshNV && qualifier.isTaskMemory()) { + error(loc, "can only use on input blocks in mesh shader", "taskNV", ""); + } else if (language == EShLangTaskNV && ! qualifier.isTaskMemory()) { + error(loc, "output blocks cannot be used in a task shader", "out", ""); + } + break; +#ifndef GLSLANG_WEB + case EvqPayload: + profileRequires(loc, ~EEsProfile, 460, 2, extsrt, "rayPayloadNV block"); + requireStage(loc, (EShLanguageMask)(EShLangRayGenMask | EShLangAnyHitMask | EShLangClosestHitMask | EShLangMissMask), + "rayPayloadNV block"); + break; + case EvqPayloadIn: + profileRequires(loc, ~EEsProfile, 460, 2, extsrt, "rayPayloadInNV block"); + requireStage(loc, (EShLanguageMask)(EShLangAnyHitMask | EShLangClosestHitMask | EShLangMissMask), + "rayPayloadInNV block"); + break; + case EvqHitAttr: + profileRequires(loc, ~EEsProfile, 460, 2, extsrt, "hitAttributeNV block"); + requireStage(loc, (EShLanguageMask)(EShLangIntersectMask | EShLangAnyHitMask | EShLangClosestHitMask), "hitAttributeNV block"); + break; + case EvqCallableData: + profileRequires(loc, ~EEsProfile, 460, 2, extsrt, "callableDataNV block"); + requireStage(loc, (EShLanguageMask)(EShLangRayGenMask | EShLangClosestHitMask | EShLangMissMask | EShLangCallableMask), + "callableDataNV block"); + break; + case EvqCallableDataIn: + profileRequires(loc, ~EEsProfile, 460, 2, extsrt, "callableDataInNV block"); + requireStage(loc, (EShLanguageMask)(EShLangCallableMask), "callableDataInNV block"); + break; +#endif + default: + error(loc, "only uniform, buffer, in, or out blocks are supported", blockName->c_str(), ""); + break; + } +} + +// Do all block-declaration checking regarding its qualifiers. +void TParseContext::blockQualifierCheck(const TSourceLoc& loc, const TQualifier& qualifier, bool /*instanceName*/) +{ + // The 4.5 specification says: + // + // interface-block : + // layout-qualifieropt interface-qualifier block-name { member-list } instance-nameopt ; + // + // interface-qualifier : + // in + // out + // patch in + // patch out + // uniform + // buffer + // + // Note however memory qualifiers aren't included, yet the specification also says + // + // "...memory qualifiers may also be used in the declaration of shader storage blocks..." + + if (qualifier.isInterpolation()) + error(loc, "cannot use interpolation qualifiers on an interface block", "flat/smooth/noperspective", ""); + if (qualifier.centroid) + error(loc, "cannot use centroid qualifier on an interface block", "centroid", ""); + if (qualifier.isSample()) + error(loc, "cannot use sample qualifier on an interface block", "sample", ""); + if (qualifier.invariant) + error(loc, "cannot use invariant qualifier on an interface block", "invariant", ""); + if (qualifier.isPushConstant()) + intermediate.addPushConstantCount(); + if (qualifier.isShaderRecord()) + intermediate.addShaderRecordCount(); + if (qualifier.isTaskMemory()) + intermediate.addTaskNVCount(); +} + +// +// "For a block, this process applies to the entire block, or until the first member +// is reached that has a location layout qualifier. When a block member is declared with a location +// qualifier, its location comes from that qualifier: The member's location qualifier overrides the block-level +// declaration. Subsequent members are again assigned consecutive locations, based on the newest location, +// until the next member declared with a location qualifier. The values used for locations do not have to be +// declared in increasing order." +void TParseContext::fixBlockLocations(const TSourceLoc& loc, TQualifier& qualifier, TTypeList& typeList, bool memberWithLocation, bool memberWithoutLocation) +{ + // "If a block has no block-level location layout qualifier, it is required that either all or none of its members + // have a location layout qualifier, or a compile-time error results." + if (! qualifier.hasLocation() && memberWithLocation && memberWithoutLocation) + error(loc, "either the block needs a location, or all members need a location, or no members have a location", "location", ""); + else { + if (memberWithLocation) { + // remove any block-level location and make it per *every* member + int nextLocation = 0; // by the rule above, initial value is not relevant + if (qualifier.hasAnyLocation()) { + nextLocation = qualifier.layoutLocation; + qualifier.layoutLocation = TQualifier::layoutLocationEnd; + if (qualifier.hasComponent()) { + // "It is a compile-time error to apply the *component* qualifier to a ... block" + error(loc, "cannot apply to a block", "component", ""); + } + if (qualifier.hasIndex()) { + error(loc, "cannot apply to a block", "index", ""); + } + } + for (unsigned int member = 0; member < typeList.size(); ++member) { + TQualifier& memberQualifier = typeList[member].type->getQualifier(); + const TSourceLoc& memberLoc = typeList[member].loc; + if (! memberQualifier.hasLocation()) { + if (nextLocation >= (int)TQualifier::layoutLocationEnd) + error(memberLoc, "location is too large", "location", ""); + memberQualifier.layoutLocation = nextLocation; + memberQualifier.layoutComponent = TQualifier::layoutComponentEnd; + } + nextLocation = memberQualifier.layoutLocation + intermediate.computeTypeLocationSize( + *typeList[member].type, language); + } + } + } +} + +void TParseContext::fixXfbOffsets(TQualifier& qualifier, TTypeList& typeList) +{ +#ifndef GLSLANG_WEB + // "If a block is qualified with xfb_offset, all its + // members are assigned transform feedback buffer offsets. If a block is not qualified with xfb_offset, any + // members of that block not qualified with an xfb_offset will not be assigned transform feedback buffer + // offsets." + + if (! qualifier.hasXfbBuffer() || ! qualifier.hasXfbOffset()) + return; + + int nextOffset = qualifier.layoutXfbOffset; + for (unsigned int member = 0; member < typeList.size(); ++member) { + TQualifier& memberQualifier = typeList[member].type->getQualifier(); + bool contains64BitType = false; + bool contains32BitType = false; + bool contains16BitType = false; + int memberSize = intermediate.computeTypeXfbSize(*typeList[member].type, contains64BitType, contains32BitType, contains16BitType); + // see if we need to auto-assign an offset to this member + if (! memberQualifier.hasXfbOffset()) { + // "if applied to an aggregate containing a double or 64-bit integer, the offset must also be a multiple of 8" + if (contains64BitType) + RoundToPow2(nextOffset, 8); + else if (contains32BitType) + RoundToPow2(nextOffset, 4); + else if (contains16BitType) + RoundToPow2(nextOffset, 2); + memberQualifier.layoutXfbOffset = nextOffset; + } else + nextOffset = memberQualifier.layoutXfbOffset; + nextOffset += memberSize; + } + + // The above gave all block members an offset, so we can take it off the block now, + // which will avoid double counting the offset usage. + qualifier.layoutXfbOffset = TQualifier::layoutXfbOffsetEnd; +#endif +} + +// Calculate and save the offset of each block member, using the recursively +// defined block offset rules and the user-provided offset and align. +// +// Also, compute and save the total size of the block. For the block's size, arrayness +// is not taken into account, as each element is backed by a separate buffer. +// +void TParseContext::fixBlockUniformOffsets(TQualifier& qualifier, TTypeList& typeList) +{ + if (!qualifier.isUniformOrBuffer() && !qualifier.isTaskMemory()) + return; + if (qualifier.layoutPacking != ElpStd140 && qualifier.layoutPacking != ElpStd430 && qualifier.layoutPacking != ElpScalar) + return; + + int offset = 0; + int memberSize; + for (unsigned int member = 0; member < typeList.size(); ++member) { + TQualifier& memberQualifier = typeList[member].type->getQualifier(); + const TSourceLoc& memberLoc = typeList[member].loc; + + // "When align is applied to an array, it effects only the start of the array, not the array's internal stride." + + // modify just the children's view of matrix layout, if there is one for this member + TLayoutMatrix subMatrixLayout = typeList[member].type->getQualifier().layoutMatrix; + int dummyStride; + int memberAlignment = intermediate.getMemberAlignment(*typeList[member].type, memberSize, dummyStride, qualifier.layoutPacking, + subMatrixLayout != ElmNone ? subMatrixLayout == ElmRowMajor : qualifier.layoutMatrix == ElmRowMajor); + if (memberQualifier.hasOffset()) { + // "The specified offset must be a multiple + // of the base alignment of the type of the block member it qualifies, or a compile-time error results." + if (! IsMultipleOfPow2(memberQualifier.layoutOffset, memberAlignment)) + error(memberLoc, "must be a multiple of the member's alignment", "offset", ""); + + // GLSL: "It is a compile-time error to specify an offset that is smaller than the offset of the previous + // member in the block or that lies within the previous member of the block" + if (spvVersion.spv == 0) { + if (memberQualifier.layoutOffset < offset) + error(memberLoc, "cannot lie in previous members", "offset", ""); + + // "The offset qualifier forces the qualified member to start at or after the specified + // integral-constant expression, which will be its byte offset from the beginning of the buffer. + // "The actual offset of a member is computed as + // follows: If offset was declared, start with that offset, otherwise start with the next available offset." + offset = std::max(offset, memberQualifier.layoutOffset); + } else { + // TODO: Vulkan: "It is a compile-time error to have any offset, explicit or assigned, + // that lies within another member of the block." + + offset = memberQualifier.layoutOffset; + } + } + + // "The actual alignment of a member will be the greater of the specified align alignment and the standard + // (e.g., std140) base alignment for the member's type." + if (memberQualifier.hasAlign()) + memberAlignment = std::max(memberAlignment, memberQualifier.layoutAlign); + + // "If the resulting offset is not a multiple of the actual alignment, + // increase it to the first offset that is a multiple of + // the actual alignment." + RoundToPow2(offset, memberAlignment); + typeList[member].type->getQualifier().layoutOffset = offset; + offset += memberSize; + } +} + +// +// Spread LayoutMatrix to uniform block member, if a uniform block member is a struct, +// we need spread LayoutMatrix to this struct member too. and keep this rule for recursive. +// +void TParseContext::fixBlockUniformLayoutMatrix(TQualifier& qualifier, TTypeList* originTypeList, + TTypeList* tmpTypeList) +{ + assert(tmpTypeList == nullptr || originTypeList->size() == tmpTypeList->size()); + for (unsigned int member = 0; member < originTypeList->size(); ++member) { + if (qualifier.layoutPacking != ElpNone) { + if (tmpTypeList == nullptr) { + if (((*originTypeList)[member].type->isMatrix() || + (*originTypeList)[member].type->getBasicType() == EbtStruct) && + (*originTypeList)[member].type->getQualifier().layoutMatrix == ElmNone) { + (*originTypeList)[member].type->getQualifier().layoutMatrix = qualifier.layoutMatrix; + } + } else { + if (((*tmpTypeList)[member].type->isMatrix() || + (*tmpTypeList)[member].type->getBasicType() == EbtStruct) && + (*tmpTypeList)[member].type->getQualifier().layoutMatrix == ElmNone) { + (*tmpTypeList)[member].type->getQualifier().layoutMatrix = qualifier.layoutMatrix; + } + } + } + + if ((*originTypeList)[member].type->getBasicType() == EbtStruct) { + TQualifier* memberQualifier = nullptr; + // block member can be declare a matrix style, so it should be update to the member's style + if ((*originTypeList)[member].type->getQualifier().layoutMatrix == ElmNone) { + memberQualifier = &qualifier; + } else { + memberQualifier = &((*originTypeList)[member].type->getQualifier()); + } + + const TType* tmpType = tmpTypeList == nullptr ? + (*originTypeList)[member].type->clone() : (*tmpTypeList)[member].type; + + fixBlockUniformLayoutMatrix(*memberQualifier, (*originTypeList)[member].type->getWritableStruct(), + tmpType->getWritableStruct()); + + const TTypeList* structure = recordStructCopy(matrixFixRecord, (*originTypeList)[member].type, tmpType); + + if (tmpTypeList == nullptr) { + (*originTypeList)[member].type->setStruct(const_cast(structure)); + } + if (tmpTypeList != nullptr) { + (*tmpTypeList)[member].type->setStruct(const_cast(structure)); + } + } + } +} + +// +// Spread LayoutPacking to block member, if a block member is a struct, we need spread LayoutPacking to +// this struct member too. and keep this rule for recursive. +// +void TParseContext::fixBlockUniformLayoutPacking(TQualifier& qualifier, TTypeList* originTypeList, + TTypeList* tmpTypeList) +{ + assert(tmpTypeList == nullptr || originTypeList->size() == tmpTypeList->size()); + for (unsigned int member = 0; member < originTypeList->size(); ++member) { + if (qualifier.layoutPacking != ElpNone) { + if (tmpTypeList == nullptr) { + if ((*originTypeList)[member].type->getQualifier().layoutPacking == ElpNone) { + (*originTypeList)[member].type->getQualifier().layoutPacking = qualifier.layoutPacking; + } + } else { + if ((*tmpTypeList)[member].type->getQualifier().layoutPacking == ElpNone) { + (*tmpTypeList)[member].type->getQualifier().layoutPacking = qualifier.layoutPacking; + } + } + } + + if ((*originTypeList)[member].type->getBasicType() == EbtStruct) { + // Deep copy the type in pool. + // Because, struct use in different block may have different layout qualifier. + // We have to new a object to distinguish between them. + const TType* tmpType = tmpTypeList == nullptr ? + (*originTypeList)[member].type->clone() : (*tmpTypeList)[member].type; + + fixBlockUniformLayoutPacking(qualifier, (*originTypeList)[member].type->getWritableStruct(), + tmpType->getWritableStruct()); + + const TTypeList* structure = recordStructCopy(packingFixRecord, (*originTypeList)[member].type, tmpType); + + if (tmpTypeList == nullptr) { + (*originTypeList)[member].type->setStruct(const_cast(structure)); + } + if (tmpTypeList != nullptr) { + (*tmpTypeList)[member].type->setStruct(const_cast(structure)); + } + } + } +} + +// For an identifier that is already declared, add more qualification to it. +void TParseContext::addQualifierToExisting(const TSourceLoc& loc, TQualifier qualifier, const TString& identifier) +{ + TSymbol* symbol = symbolTable.find(identifier); + + // A forward declaration of a block reference looks to the grammar like adding + // a qualifier to an existing symbol. Detect this and create the block reference + // type with an empty type list, which will be filled in later in + // TParseContext::declareBlock. + if (!symbol && qualifier.hasBufferReference()) { + TTypeList typeList; + TType blockType(&typeList, identifier, qualifier);; + TType blockNameType(EbtReference, blockType, identifier); + TVariable* blockNameVar = new TVariable(&identifier, blockNameType, true); + if (! symbolTable.insert(*blockNameVar)) { + error(loc, "block name cannot redefine a non-block name", blockName->c_str(), ""); + } + return; + } + + if (! symbol) { + error(loc, "identifier not previously declared", identifier.c_str(), ""); + return; + } + if (symbol->getAsFunction()) { + error(loc, "cannot re-qualify a function name", identifier.c_str(), ""); + return; + } + + if (qualifier.isAuxiliary() || + qualifier.isMemory() || + qualifier.isInterpolation() || + qualifier.hasLayout() || + qualifier.storage != EvqTemporary || + qualifier.precision != EpqNone) { + error(loc, "cannot add storage, auxiliary, memory, interpolation, layout, or precision qualifier to an existing variable", identifier.c_str(), ""); + return; + } + + // For read-only built-ins, add a new symbol for holding the modified qualifier. + // This will bring up an entire block, if a block type has to be modified (e.g., gl_Position inside a block) + if (symbol->isReadOnly()) + symbol = symbolTable.copyUp(symbol); + + if (qualifier.invariant) { + if (intermediate.inIoAccessed(identifier)) + error(loc, "cannot change qualification after use", "invariant", ""); + symbol->getWritableType().getQualifier().invariant = true; + invariantCheck(loc, symbol->getType().getQualifier()); + } else if (qualifier.isNoContraction()) { + if (intermediate.inIoAccessed(identifier)) + error(loc, "cannot change qualification after use", "precise", ""); + symbol->getWritableType().getQualifier().setNoContraction(); + } else if (qualifier.specConstant) { + symbol->getWritableType().getQualifier().makeSpecConstant(); + if (qualifier.hasSpecConstantId()) + symbol->getWritableType().getQualifier().layoutSpecConstantId = qualifier.layoutSpecConstantId; + } else + warn(loc, "unknown requalification", "", ""); +} + +void TParseContext::addQualifierToExisting(const TSourceLoc& loc, TQualifier qualifier, TIdentifierList& identifiers) +{ + for (unsigned int i = 0; i < identifiers.size(); ++i) + addQualifierToExisting(loc, qualifier, *identifiers[i]); +} + +// Make sure 'invariant' isn't being applied to a non-allowed object. +void TParseContext::invariantCheck(const TSourceLoc& loc, const TQualifier& qualifier) +{ + if (! qualifier.invariant) + return; + + bool pipeOut = qualifier.isPipeOutput(); + bool pipeIn = qualifier.isPipeInput(); + if (version >= 300 || (!isEsProfile() && version >= 420)) { + if (! pipeOut) + error(loc, "can only apply to an output", "invariant", ""); + } else { + if ((language == EShLangVertex && pipeIn) || (! pipeOut && ! pipeIn)) + error(loc, "can only apply to an output, or to an input in a non-vertex stage\n", "invariant", ""); + } +} + +// +// Updating default qualifier for the case of a declaration with just a qualifier, +// no type, block, or identifier. +// +void TParseContext::updateStandaloneQualifierDefaults(const TSourceLoc& loc, const TPublicType& publicType) +{ +#ifndef GLSLANG_WEB + if (publicType.shaderQualifiers.vertices != TQualifier::layoutNotSet) { + assert(language == EShLangTessControl || language == EShLangGeometry || language == EShLangMeshNV); + const char* id = (language == EShLangTessControl) ? "vertices" : "max_vertices"; + + if (publicType.qualifier.storage != EvqVaryingOut) + error(loc, "can only apply to 'out'", id, ""); + if (! intermediate.setVertices(publicType.shaderQualifiers.vertices)) + error(loc, "cannot change previously set layout value", id, ""); + + if (language == EShLangTessControl) + checkIoArraysConsistency(loc); + } + if (publicType.shaderQualifiers.primitives != TQualifier::layoutNotSet) { + assert(language == EShLangMeshNV); + const char* id = "max_primitives"; + + if (publicType.qualifier.storage != EvqVaryingOut) + error(loc, "can only apply to 'out'", id, ""); + if (! intermediate.setPrimitives(publicType.shaderQualifiers.primitives)) + error(loc, "cannot change previously set layout value", id, ""); + } + if (publicType.shaderQualifiers.invocations != TQualifier::layoutNotSet) { + if (publicType.qualifier.storage != EvqVaryingIn) + error(loc, "can only apply to 'in'", "invocations", ""); + if (! intermediate.setInvocations(publicType.shaderQualifiers.invocations)) + error(loc, "cannot change previously set layout value", "invocations", ""); + } + if (publicType.shaderQualifiers.geometry != ElgNone) { + if (publicType.qualifier.storage == EvqVaryingIn) { + switch (publicType.shaderQualifiers.geometry) { + case ElgPoints: + case ElgLines: + case ElgLinesAdjacency: + case ElgTriangles: + case ElgTrianglesAdjacency: + case ElgQuads: + case ElgIsolines: + if (language == EShLangMeshNV) { + error(loc, "cannot apply to input", TQualifier::getGeometryString(publicType.shaderQualifiers.geometry), ""); + break; + } + if (intermediate.setInputPrimitive(publicType.shaderQualifiers.geometry)) { + if (language == EShLangGeometry) + checkIoArraysConsistency(loc); + } else + error(loc, "cannot change previously set input primitive", TQualifier::getGeometryString(publicType.shaderQualifiers.geometry), ""); + break; + default: + error(loc, "cannot apply to input", TQualifier::getGeometryString(publicType.shaderQualifiers.geometry), ""); + } + } else if (publicType.qualifier.storage == EvqVaryingOut) { + switch (publicType.shaderQualifiers.geometry) { + case ElgLines: + case ElgTriangles: + if (language != EShLangMeshNV) { + error(loc, "cannot apply to 'out'", TQualifier::getGeometryString(publicType.shaderQualifiers.geometry), ""); + break; + } + // Fall through + case ElgPoints: + case ElgLineStrip: + case ElgTriangleStrip: + if (! intermediate.setOutputPrimitive(publicType.shaderQualifiers.geometry)) + error(loc, "cannot change previously set output primitive", TQualifier::getGeometryString(publicType.shaderQualifiers.geometry), ""); + break; + default: + error(loc, "cannot apply to 'out'", TQualifier::getGeometryString(publicType.shaderQualifiers.geometry), ""); + } + } else + error(loc, "cannot apply to:", TQualifier::getGeometryString(publicType.shaderQualifiers.geometry), GetStorageQualifierString(publicType.qualifier.storage)); + } + if (publicType.shaderQualifiers.spacing != EvsNone) { + if (publicType.qualifier.storage == EvqVaryingIn) { + if (! intermediate.setVertexSpacing(publicType.shaderQualifiers.spacing)) + error(loc, "cannot change previously set vertex spacing", TQualifier::getVertexSpacingString(publicType.shaderQualifiers.spacing), ""); + } else + error(loc, "can only apply to 'in'", TQualifier::getVertexSpacingString(publicType.shaderQualifiers.spacing), ""); + } + if (publicType.shaderQualifiers.order != EvoNone) { + if (publicType.qualifier.storage == EvqVaryingIn) { + if (! intermediate.setVertexOrder(publicType.shaderQualifiers.order)) + error(loc, "cannot change previously set vertex order", TQualifier::getVertexOrderString(publicType.shaderQualifiers.order), ""); + } else + error(loc, "can only apply to 'in'", TQualifier::getVertexOrderString(publicType.shaderQualifiers.order), ""); + } + if (publicType.shaderQualifiers.pointMode) { + if (publicType.qualifier.storage == EvqVaryingIn) + intermediate.setPointMode(); + else + error(loc, "can only apply to 'in'", "point_mode", ""); + } +#endif + for (int i = 0; i < 3; ++i) { + if (publicType.shaderQualifiers.localSizeNotDefault[i]) { + if (publicType.qualifier.storage == EvqVaryingIn) { + if (! intermediate.setLocalSize(i, publicType.shaderQualifiers.localSize[i])) + error(loc, "cannot change previously set size", "local_size", ""); + else { + int max = 0; + if (language == EShLangCompute) { + switch (i) { + case 0: max = resources.maxComputeWorkGroupSizeX; break; + case 1: max = resources.maxComputeWorkGroupSizeY; break; + case 2: max = resources.maxComputeWorkGroupSizeZ; break; + default: break; + } + if (intermediate.getLocalSize(i) > (unsigned int)max) + error(loc, "too large; see gl_MaxComputeWorkGroupSize", "local_size", ""); + } +#ifndef GLSLANG_WEB + else if (language == EShLangMeshNV) { + switch (i) { + case 0: max = resources.maxMeshWorkGroupSizeX_NV; break; + case 1: max = resources.maxMeshWorkGroupSizeY_NV; break; + case 2: max = resources.maxMeshWorkGroupSizeZ_NV; break; + default: break; + } + if (intermediate.getLocalSize(i) > (unsigned int)max) + error(loc, "too large; see gl_MaxMeshWorkGroupSizeNV", "local_size", ""); + } else if (language == EShLangTaskNV) { + switch (i) { + case 0: max = resources.maxTaskWorkGroupSizeX_NV; break; + case 1: max = resources.maxTaskWorkGroupSizeY_NV; break; + case 2: max = resources.maxTaskWorkGroupSizeZ_NV; break; + default: break; + } + if (intermediate.getLocalSize(i) > (unsigned int)max) + error(loc, "too large; see gl_MaxTaskWorkGroupSizeNV", "local_size", ""); + } +#endif + else { + assert(0); + } + + // Fix the existing constant gl_WorkGroupSize with this new information. + TVariable* workGroupSize = getEditableVariable("gl_WorkGroupSize"); + if (workGroupSize != nullptr) + workGroupSize->getWritableConstArray()[i].setUConst(intermediate.getLocalSize(i)); + } + } else + error(loc, "can only apply to 'in'", "local_size", ""); + } + if (publicType.shaderQualifiers.localSizeSpecId[i] != TQualifier::layoutNotSet) { + if (publicType.qualifier.storage == EvqVaryingIn) { + if (! intermediate.setLocalSizeSpecId(i, publicType.shaderQualifiers.localSizeSpecId[i])) + error(loc, "cannot change previously set size", "local_size", ""); + } else + error(loc, "can only apply to 'in'", "local_size id", ""); + // Set the workgroup built-in variable as a specialization constant + TVariable* workGroupSize = getEditableVariable("gl_WorkGroupSize"); + if (workGroupSize != nullptr) + workGroupSize->getWritableType().getQualifier().specConstant = true; + } + } + +#ifndef GLSLANG_WEB + if (publicType.shaderQualifiers.earlyFragmentTests) { + if (publicType.qualifier.storage == EvqVaryingIn) + intermediate.setEarlyFragmentTests(); + else + error(loc, "can only apply to 'in'", "early_fragment_tests", ""); + } + if (publicType.shaderQualifiers.postDepthCoverage) { + if (publicType.qualifier.storage == EvqVaryingIn) + intermediate.setPostDepthCoverage(); + else + error(loc, "can only apply to 'in'", "post_coverage_coverage", ""); + } + if (publicType.shaderQualifiers.hasBlendEquation()) { + if (publicType.qualifier.storage != EvqVaryingOut) + error(loc, "can only apply to 'out'", "blend equation", ""); + } + if (publicType.shaderQualifiers.interlockOrdering) { + if (publicType.qualifier.storage == EvqVaryingIn) { + if (!intermediate.setInterlockOrdering(publicType.shaderQualifiers.interlockOrdering)) + error(loc, "cannot change previously set fragment shader interlock ordering", TQualifier::getInterlockOrderingString(publicType.shaderQualifiers.interlockOrdering), ""); + } + else + error(loc, "can only apply to 'in'", TQualifier::getInterlockOrderingString(publicType.shaderQualifiers.interlockOrdering), ""); + } + + if (publicType.shaderQualifiers.layoutDerivativeGroupQuads && + publicType.shaderQualifiers.layoutDerivativeGroupLinear) { + error(loc, "cannot be both specified", "derivative_group_quadsNV and derivative_group_linearNV", ""); + } + + if (publicType.shaderQualifiers.layoutDerivativeGroupQuads) { + if (publicType.qualifier.storage == EvqVaryingIn) { + if ((intermediate.getLocalSize(0) & 1) || + (intermediate.getLocalSize(1) & 1)) + error(loc, "requires local_size_x and local_size_y to be multiple of two", "derivative_group_quadsNV", ""); + else + intermediate.setLayoutDerivativeMode(LayoutDerivativeGroupQuads); + } + else + error(loc, "can only apply to 'in'", "derivative_group_quadsNV", ""); + } + if (publicType.shaderQualifiers.layoutDerivativeGroupLinear) { + if (publicType.qualifier.storage == EvqVaryingIn) { + if((intermediate.getLocalSize(0) * + intermediate.getLocalSize(1) * + intermediate.getLocalSize(2)) % 4 != 0) + error(loc, "requires total group size to be multiple of four", "derivative_group_linearNV", ""); + else + intermediate.setLayoutDerivativeMode(LayoutDerivativeGroupLinear); + } + else + error(loc, "can only apply to 'in'", "derivative_group_linearNV", ""); + } + // Check mesh out array sizes, once all the necessary out qualifiers are defined. + if ((language == EShLangMeshNV) && + (intermediate.getVertices() != TQualifier::layoutNotSet) && + (intermediate.getPrimitives() != TQualifier::layoutNotSet) && + (intermediate.getOutputPrimitive() != ElgNone)) + { + checkIoArraysConsistency(loc); + } + + if (publicType.shaderQualifiers.layoutPrimitiveCulling) { + if (publicType.qualifier.storage != EvqTemporary) + error(loc, "layout qualifier can not have storage qualifiers", "primitive_culling","", ""); + else { + intermediate.setLayoutPrimitiveCulling(); + } + // Exit early as further checks are not valid + return; + } +#endif + const TQualifier& qualifier = publicType.qualifier; + + if (qualifier.isAuxiliary() || + qualifier.isMemory() || + qualifier.isInterpolation() || + qualifier.precision != EpqNone) + error(loc, "cannot use auxiliary, memory, interpolation, or precision qualifier in a default qualifier declaration (declaration with no type)", "qualifier", ""); + + // "The offset qualifier can only be used on block members of blocks..." + // "The align qualifier can only be used on blocks or block members..." + if (qualifier.hasOffset() || + qualifier.hasAlign()) + error(loc, "cannot use offset or align qualifiers in a default qualifier declaration (declaration with no type)", "layout qualifier", ""); + + layoutQualifierCheck(loc, qualifier); + + switch (qualifier.storage) { + case EvqUniform: + if (qualifier.hasMatrix()) + globalUniformDefaults.layoutMatrix = qualifier.layoutMatrix; + if (qualifier.hasPacking()) + globalUniformDefaults.layoutPacking = qualifier.layoutPacking; + break; + case EvqBuffer: + if (qualifier.hasMatrix()) + globalBufferDefaults.layoutMatrix = qualifier.layoutMatrix; + if (qualifier.hasPacking()) + globalBufferDefaults.layoutPacking = qualifier.layoutPacking; + break; + case EvqVaryingIn: + break; + case EvqVaryingOut: +#ifndef GLSLANG_WEB + if (qualifier.hasStream()) + globalOutputDefaults.layoutStream = qualifier.layoutStream; + if (qualifier.hasXfbBuffer()) + globalOutputDefaults.layoutXfbBuffer = qualifier.layoutXfbBuffer; + if (globalOutputDefaults.hasXfbBuffer() && qualifier.hasXfbStride()) { + if (! intermediate.setXfbBufferStride(globalOutputDefaults.layoutXfbBuffer, qualifier.layoutXfbStride)) + error(loc, "all stride settings must match for xfb buffer", "xfb_stride", "%d", qualifier.layoutXfbBuffer); + } +#endif + break; + default: + error(loc, "default qualifier requires 'uniform', 'buffer', 'in', or 'out' storage qualification", "", ""); + return; + } + + if (qualifier.hasBinding()) + error(loc, "cannot declare a default, include a type or full declaration", "binding", ""); + if (qualifier.hasAnyLocation()) + error(loc, "cannot declare a default, use a full declaration", "location/component/index", ""); + if (qualifier.hasXfbOffset()) + error(loc, "cannot declare a default, use a full declaration", "xfb_offset", ""); + if (qualifier.isPushConstant()) + error(loc, "cannot declare a default, can only be used on a block", "push_constant", ""); + if (qualifier.hasBufferReference()) + error(loc, "cannot declare a default, can only be used on a block", "buffer_reference", ""); + if (qualifier.hasSpecConstantId()) + error(loc, "cannot declare a default, can only be used on a scalar", "constant_id", ""); + if (qualifier.isShaderRecord()) + error(loc, "cannot declare a default, can only be used on a block", "shaderRecordNV", ""); +} + +// +// Take the sequence of statements that has been built up since the last case/default, +// put it on the list of top-level nodes for the current (inner-most) switch statement, +// and follow that by the case/default we are on now. (See switch topology comment on +// TIntermSwitch.) +// +void TParseContext::wrapupSwitchSubsequence(TIntermAggregate* statements, TIntermNode* branchNode) +{ + TIntermSequence* switchSequence = switchSequenceStack.back(); + + if (statements) { + if (switchSequence->size() == 0) + error(statements->getLoc(), "cannot have statements before first case/default label", "switch", ""); + statements->setOperator(EOpSequence); + switchSequence->push_back(statements); + } + if (branchNode) { + // check all previous cases for the same label (or both are 'default') + for (unsigned int s = 0; s < switchSequence->size(); ++s) { + TIntermBranch* prevBranch = (*switchSequence)[s]->getAsBranchNode(); + if (prevBranch) { + TIntermTyped* prevExpression = prevBranch->getExpression(); + TIntermTyped* newExpression = branchNode->getAsBranchNode()->getExpression(); + if (prevExpression == nullptr && newExpression == nullptr) + error(branchNode->getLoc(), "duplicate label", "default", ""); + else if (prevExpression != nullptr && + newExpression != nullptr && + prevExpression->getAsConstantUnion() && + newExpression->getAsConstantUnion() && + prevExpression->getAsConstantUnion()->getConstArray()[0].getIConst() == + newExpression->getAsConstantUnion()->getConstArray()[0].getIConst()) + error(branchNode->getLoc(), "duplicated value", "case", ""); + } + } + switchSequence->push_back(branchNode); + } +} + +// +// Turn the top-level node sequence built up of wrapupSwitchSubsequence9) +// into a switch node. +// +TIntermNode* TParseContext::addSwitch(const TSourceLoc& loc, TIntermTyped* expression, TIntermAggregate* lastStatements) +{ + profileRequires(loc, EEsProfile, 300, nullptr, "switch statements"); + profileRequires(loc, ENoProfile, 130, nullptr, "switch statements"); + + wrapupSwitchSubsequence(lastStatements, nullptr); + + if (expression == nullptr || + (expression->getBasicType() != EbtInt && expression->getBasicType() != EbtUint) || + expression->getType().isArray() || expression->getType().isMatrix() || expression->getType().isVector()) + error(loc, "condition must be a scalar integer expression", "switch", ""); + + // If there is nothing to do, drop the switch but still execute the expression + TIntermSequence* switchSequence = switchSequenceStack.back(); + if (switchSequence->size() == 0) + return expression; + + if (lastStatements == nullptr) { + // This was originally an ERRROR, because early versions of the specification said + // "it is an error to have no statement between a label and the end of the switch statement." + // The specifications were updated to remove this (being ill-defined what a "statement" was), + // so, this became a warning. However, 3.0 tests still check for the error. + if (isEsProfile() && version <= 300 && ! relaxedErrors()) + error(loc, "last case/default label not followed by statements", "switch", ""); + else + warn(loc, "last case/default label not followed by statements", "switch", ""); + + // emulate a break for error recovery + lastStatements = intermediate.makeAggregate(intermediate.addBranch(EOpBreak, loc)); + lastStatements->setOperator(EOpSequence); + switchSequence->push_back(lastStatements); + } + + TIntermAggregate* body = new TIntermAggregate(EOpSequence); + body->getSequence() = *switchSequenceStack.back(); + body->setLoc(loc); + + TIntermSwitch* switchNode = new TIntermSwitch(expression, body); + switchNode->setLoc(loc); + + return switchNode; +} + +// +// When a struct used in block, and has it's own layout packing, layout matrix, +// record the origin structure of a struct to map, and Record the structure copy to the copy table, +// +const TTypeList* TParseContext::recordStructCopy(TStructRecord& record, const TType* originType, const TType* tmpType) +{ + size_t memberCount = tmpType->getStruct()->size(); + size_t originHash = 0, tmpHash = 0; + std::hash hasher; + for (size_t i = 0; i < memberCount; i++) { + size_t originMemberHash = hasher(originType->getStruct()->at(i).type->getQualifier().layoutPacking + + originType->getStruct()->at(i).type->getQualifier().layoutMatrix); + size_t tmpMemberHash = hasher(tmpType->getStruct()->at(i).type->getQualifier().layoutPacking + + tmpType->getStruct()->at(i).type->getQualifier().layoutMatrix); + originHash = hasher((originHash ^ originMemberHash) << 1); + tmpHash = hasher((tmpHash ^ tmpMemberHash) << 1); + } + const TTypeList* originStruct = originType->getStruct(); + const TTypeList* tmpStruct = tmpType->getStruct(); + if (originHash != tmpHash) { + auto fixRecords = record.find(originStruct); + if (fixRecords != record.end()) { + auto fixRecord = fixRecords->second.find(tmpHash); + if (fixRecord != fixRecords->second.end()) { + return fixRecord->second; + } else { + record[originStruct][tmpHash] = tmpStruct; + return tmpStruct; + } + } else { + record[originStruct] = std::map(); + record[originStruct][tmpHash] = tmpStruct; + return tmpStruct; + } + } + return originStruct; +} + +} // end namespace glslang + diff --git a/dep/glslang/glslang/MachineIndependent/ParseHelper.h b/dep/glslang/glslang/MachineIndependent/ParseHelper.h new file mode 100644 index 000000000..8b262c946 --- /dev/null +++ b/dep/glslang/glslang/MachineIndependent/ParseHelper.h @@ -0,0 +1,532 @@ +// +// Copyright (C) 2002-2005 3Dlabs Inc. Ltd. +// Copyright (C) 2012-2013 LunarG, Inc. +// Copyright (C) 2015-2018 Google, Inc. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// +// Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// +// Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// Neither the name of 3Dlabs Inc. Ltd. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// + +// +// This header defines a two-level parse-helper hierarchy, derived from +// TParseVersions: +// - TParseContextBase: sharable across multiple parsers +// - TParseContext: GLSL specific helper +// + +#ifndef _PARSER_HELPER_INCLUDED_ +#define _PARSER_HELPER_INCLUDED_ + +#include +#include + +#include "parseVersions.h" +#include "../Include/ShHandle.h" +#include "SymbolTable.h" +#include "localintermediate.h" +#include "Scan.h" +#include "attribute.h" + +namespace glslang { + +struct TPragma { + TPragma(bool o, bool d) : optimize(o), debug(d) { } + bool optimize; + bool debug; + TPragmaTable pragmaTable; +}; + +class TScanContext; +class TPpContext; + +typedef std::set TIdSetType; +typedef std::map> TStructRecord; + +// +// Sharable code (as well as what's in TParseVersions) across +// parse helpers. +// +class TParseContextBase : public TParseVersions { +public: + TParseContextBase(TSymbolTable& symbolTable, TIntermediate& interm, bool parsingBuiltins, int version, + EProfile profile, const SpvVersion& spvVersion, EShLanguage language, + TInfoSink& infoSink, bool forwardCompatible, EShMessages messages, + const TString* entryPoint = nullptr) + : TParseVersions(interm, version, profile, spvVersion, language, infoSink, forwardCompatible, messages), + scopeMangler("::"), + symbolTable(symbolTable), + statementNestingLevel(0), loopNestingLevel(0), structNestingLevel(0), controlFlowNestingLevel(0), + postEntryPointReturn(false), + contextPragma(true, false), + beginInvocationInterlockCount(0), endInvocationInterlockCount(0), + parsingBuiltins(parsingBuiltins), scanContext(nullptr), ppContext(nullptr), + limits(resources.limits), + globalUniformBlock(nullptr), + globalUniformBinding(TQualifier::layoutBindingEnd), + globalUniformSet(TQualifier::layoutSetEnd) + { + if (entryPoint != nullptr) + sourceEntryPointName = *entryPoint; + } + virtual ~TParseContextBase() { } + +#if !defined(GLSLANG_WEB) || defined(GLSLANG_WEB_DEVEL) + virtual void C_DECL error(const TSourceLoc&, const char* szReason, const char* szToken, + const char* szExtraInfoFormat, ...); + virtual void C_DECL warn(const TSourceLoc&, const char* szReason, const char* szToken, + const char* szExtraInfoFormat, ...); + virtual void C_DECL ppError(const TSourceLoc&, const char* szReason, const char* szToken, + const char* szExtraInfoFormat, ...); + virtual void C_DECL ppWarn(const TSourceLoc&, const char* szReason, const char* szToken, + const char* szExtraInfoFormat, ...); +#endif + + virtual void setLimits(const TBuiltInResource&) = 0; + + void checkIndex(const TSourceLoc&, const TType&, int& index); + + EShLanguage getLanguage() const { return language; } + void setScanContext(TScanContext* c) { scanContext = c; } + TScanContext* getScanContext() const { return scanContext; } + void setPpContext(TPpContext* c) { ppContext = c; } + TPpContext* getPpContext() const { return ppContext; } + + virtual void setLineCallback(const std::function& func) { lineCallback = func; } + virtual void setExtensionCallback(const std::function& func) { extensionCallback = func; } + virtual void setVersionCallback(const std::function& func) { versionCallback = func; } + virtual void setPragmaCallback(const std::function&)>& func) { pragmaCallback = func; } + virtual void setErrorCallback(const std::function& func) { errorCallback = func; } + + virtual void reservedPpErrorCheck(const TSourceLoc&, const char* name, const char* op) = 0; + virtual bool lineContinuationCheck(const TSourceLoc&, bool endOfComment) = 0; + virtual bool lineDirectiveShouldSetNextLine() const = 0; + virtual void handlePragma(const TSourceLoc&, const TVector&) = 0; + + virtual bool parseShaderStrings(TPpContext&, TInputScanner& input, bool versionWillBeError = false) = 0; + + virtual void notifyVersion(int line, int version, const char* type_string) + { + if (versionCallback) + versionCallback(line, version, type_string); + } + virtual void notifyErrorDirective(int line, const char* error_message) + { + if (errorCallback) + errorCallback(line, error_message); + } + virtual void notifyLineDirective(int curLineNo, int newLineNo, bool hasSource, int sourceNum, const char* sourceName) + { + if (lineCallback) + lineCallback(curLineNo, newLineNo, hasSource, sourceNum, sourceName); + } + virtual void notifyExtensionDirective(int line, const char* extension, const char* behavior) + { + if (extensionCallback) + extensionCallback(line, extension, behavior); + } + +#ifdef ENABLE_HLSL + // Manage the global uniform block (default uniforms in GLSL, $Global in HLSL) + virtual void growGlobalUniformBlock(const TSourceLoc&, TType&, const TString& memberName, TTypeList* typeList = nullptr); +#endif + + // Potentially rename shader entry point function + void renameShaderFunction(TString*& name) const + { + // Replace the entry point name given in the shader with the real entry point name, + // if there is a substitution. + if (name != nullptr && *name == sourceEntryPointName && intermediate.getEntryPointName().size() > 0) + name = NewPoolTString(intermediate.getEntryPointName().c_str()); + } + + virtual bool lValueErrorCheck(const TSourceLoc&, const char* op, TIntermTyped*); + virtual void rValueErrorCheck(const TSourceLoc&, const char* op, TIntermTyped*); + + const char* const scopeMangler; + + // Basic parsing state, easily accessible to the grammar + + TSymbolTable& symbolTable; // symbol table that goes with the current language, version, and profile + int statementNestingLevel; // 0 if outside all flow control or compound statements + int loopNestingLevel; // 0 if outside all loops + int structNestingLevel; // 0 if outside blocks and structures + int controlFlowNestingLevel; // 0 if outside all flow control + const TType* currentFunctionType; // the return type of the function that's currently being parsed + bool functionReturnsValue; // true if a non-void function has a return + // if inside a function, true if the function is the entry point and this is after a return statement + bool postEntryPointReturn; + // case, node, case, case, node, ...; ensure only one node between cases; stack of them for nesting + TList switchSequenceStack; + // the statementNestingLevel the current switch statement is at, which must match the level of its case statements + TList switchLevel; + struct TPragma contextPragma; + int beginInvocationInterlockCount; + int endInvocationInterlockCount; + +protected: + TParseContextBase(TParseContextBase&); + TParseContextBase& operator=(TParseContextBase&); + + const bool parsingBuiltins; // true if parsing built-in symbols/functions + TVector linkageSymbols; // will be transferred to 'linkage', after all editing is done, order preserving + TScanContext* scanContext; + TPpContext* ppContext; + TBuiltInResource resources; + TLimits& limits; + TString sourceEntryPointName; + + // These, if set, will be called when a line, pragma ... is preprocessed. + // They will be called with any parameters to the original directive. + std::function lineCallback; + std::function&)> pragmaCallback; + std::function versionCallback; + std::function extensionCallback; + std::function errorCallback; + + // see implementation for detail + const TFunction* selectFunction(const TVector, const TFunction&, + std::function, + std::function, + /* output */ bool& tie); + + virtual void parseSwizzleSelector(const TSourceLoc&, const TString&, int size, + TSwizzleSelectors&); + + // Manage the global uniform block (default uniforms in GLSL, $Global in HLSL) + TVariable* globalUniformBlock; // the actual block, inserted into the symbol table + unsigned int globalUniformBinding; // the block's binding number + unsigned int globalUniformSet; // the block's set number + int firstNewMember; // the index of the first member not yet inserted into the symbol table + // override this to set the language-specific name + virtual const char* getGlobalUniformBlockName() const { return ""; } + virtual void setUniformBlockDefaults(TType&) const { } + virtual void finalizeGlobalUniformBlockLayout(TVariable&) { } + virtual void outputMessage(const TSourceLoc&, const char* szReason, const char* szToken, + const char* szExtraInfoFormat, TPrefixType prefix, + va_list args); + virtual void trackLinkage(TSymbol& symbol); + virtual void makeEditable(TSymbol*&); + virtual TVariable* getEditableVariable(const char* name); + virtual void finish(); +}; + +// +// Manage the state for when to respect precision qualifiers and when to warn about +// the defaults being different than might be expected. +// +class TPrecisionManager { +public: + TPrecisionManager() : obey(false), warn(false), explicitIntDefault(false), explicitFloatDefault(false){ } + virtual ~TPrecisionManager() {} + + void respectPrecisionQualifiers() { obey = true; } + bool respectingPrecisionQualifiers() const { return obey; } + bool shouldWarnAboutDefaults() const { return warn; } + void defaultWarningGiven() { warn = false; } + void warnAboutDefaults() { warn = true; } + void explicitIntDefaultSeen() + { + explicitIntDefault = true; + if (explicitFloatDefault) + warn = false; + } + void explicitFloatDefaultSeen() + { + explicitFloatDefault = true; + if (explicitIntDefault) + warn = false; + } + +protected: + bool obey; // respect precision qualifiers + bool warn; // need to give a warning about the defaults + bool explicitIntDefault; // user set the default for int/uint + bool explicitFloatDefault; // user set the default for float +}; + +// +// GLSL-specific parse helper. Should have GLSL in the name, but that's +// too big of a change for comparing branches at the moment, and perhaps +// impacts downstream consumers as well. +// +class TParseContext : public TParseContextBase { +public: + TParseContext(TSymbolTable&, TIntermediate&, bool parsingBuiltins, int version, EProfile, const SpvVersion& spvVersion, EShLanguage, TInfoSink&, + bool forwardCompatible = false, EShMessages messages = EShMsgDefault, + const TString* entryPoint = nullptr); + virtual ~TParseContext(); + + bool obeyPrecisionQualifiers() const { return precisionManager.respectingPrecisionQualifiers(); } + void setPrecisionDefaults(); + + void setLimits(const TBuiltInResource&) override; + bool parseShaderStrings(TPpContext&, TInputScanner& input, bool versionWillBeError = false) override; + void parserError(const char* s); // for bison's yyerror + + void reservedErrorCheck(const TSourceLoc&, const TString&); + void reservedPpErrorCheck(const TSourceLoc&, const char* name, const char* op) override; + bool lineContinuationCheck(const TSourceLoc&, bool endOfComment) override; + bool lineDirectiveShouldSetNextLine() const override; + bool builtInName(const TString&); + + void handlePragma(const TSourceLoc&, const TVector&) override; + TIntermTyped* handleVariable(const TSourceLoc&, TSymbol* symbol, const TString* string); + TIntermTyped* handleBracketDereference(const TSourceLoc&, TIntermTyped* base, TIntermTyped* index); + void handleIndexLimits(const TSourceLoc&, TIntermTyped* base, TIntermTyped* index); + +#ifndef GLSLANG_WEB + void makeEditable(TSymbol*&) override; + void ioArrayCheck(const TSourceLoc&, const TType&, const TString& identifier); +#endif + bool isIoResizeArray(const TType&) const; + void fixIoArraySize(const TSourceLoc&, TType&); + void handleIoResizeArrayAccess(const TSourceLoc&, TIntermTyped* base); + void checkIoArraysConsistency(const TSourceLoc&, bool tailOnly = false); + int getIoArrayImplicitSize(const TQualifier&, TString* featureString = nullptr) const; + void checkIoArrayConsistency(const TSourceLoc&, int requiredSize, const char* feature, TType&, const TString&); + + TIntermTyped* handleBinaryMath(const TSourceLoc&, const char* str, TOperator op, TIntermTyped* left, TIntermTyped* right); + TIntermTyped* handleUnaryMath(const TSourceLoc&, const char* str, TOperator op, TIntermTyped* childNode); + TIntermTyped* handleDotDereference(const TSourceLoc&, TIntermTyped* base, const TString& field); + TIntermTyped* handleDotSwizzle(const TSourceLoc&, TIntermTyped* base, const TString& field); + void blockMemberExtensionCheck(const TSourceLoc&, const TIntermTyped* base, int member, const TString& memberName); + TFunction* handleFunctionDeclarator(const TSourceLoc&, TFunction& function, bool prototype); + TIntermAggregate* handleFunctionDefinition(const TSourceLoc&, TFunction&); + TIntermTyped* handleFunctionCall(const TSourceLoc&, TFunction*, TIntermNode*); + TIntermTyped* handleBuiltInFunctionCall(TSourceLoc, TIntermNode* arguments, const TFunction& function); + void computeBuiltinPrecisions(TIntermTyped&, const TFunction&); + TIntermNode* handleReturnValue(const TSourceLoc&, TIntermTyped*); + void checkLocation(const TSourceLoc&, TOperator); + TIntermTyped* handleLengthMethod(const TSourceLoc&, TFunction*, TIntermNode*); + void addInputArgumentConversions(const TFunction&, TIntermNode*&) const; + TIntermTyped* addOutputArgumentConversions(const TFunction&, TIntermAggregate&) const; + void builtInOpCheck(const TSourceLoc&, const TFunction&, TIntermOperator&); + void nonOpBuiltInCheck(const TSourceLoc&, const TFunction&, TIntermAggregate&); + void userFunctionCallCheck(const TSourceLoc&, TIntermAggregate&); + void samplerConstructorLocationCheck(const TSourceLoc&, const char* token, TIntermNode*); + TFunction* handleConstructorCall(const TSourceLoc&, const TPublicType&); + void handlePrecisionQualifier(const TSourceLoc&, TQualifier&, TPrecisionQualifier); + void checkPrecisionQualifier(const TSourceLoc&, TPrecisionQualifier); + void memorySemanticsCheck(const TSourceLoc&, const TFunction&, const TIntermOperator& callNode); + + void assignError(const TSourceLoc&, const char* op, TString left, TString right); + void unaryOpError(const TSourceLoc&, const char* op, TString operand); + void binaryOpError(const TSourceLoc&, const char* op, TString left, TString right); + void variableCheck(TIntermTyped*& nodePtr); + bool lValueErrorCheck(const TSourceLoc&, const char* op, TIntermTyped*) override; + void rValueErrorCheck(const TSourceLoc&, const char* op, TIntermTyped*) override; + void constantValueCheck(TIntermTyped* node, const char* token); + void integerCheck(const TIntermTyped* node, const char* token); + void globalCheck(const TSourceLoc&, const char* token); + bool constructorError(const TSourceLoc&, TIntermNode*, TFunction&, TOperator, TType&); + bool constructorTextureSamplerError(const TSourceLoc&, const TFunction&); + void arraySizeCheck(const TSourceLoc&, TIntermTyped* expr, TArraySize&, const char *sizeType); + bool arrayQualifierError(const TSourceLoc&, const TQualifier&); + bool arrayError(const TSourceLoc&, const TType&); + void arraySizeRequiredCheck(const TSourceLoc&, const TArraySizes&); + void structArrayCheck(const TSourceLoc&, const TType& structure); + void arraySizesCheck(const TSourceLoc&, const TQualifier&, TArraySizes*, const TIntermTyped* initializer, bool lastMember); + void arrayOfArrayVersionCheck(const TSourceLoc&, const TArraySizes*); + bool voidErrorCheck(const TSourceLoc&, const TString&, TBasicType); + void boolCheck(const TSourceLoc&, const TIntermTyped*); + void boolCheck(const TSourceLoc&, const TPublicType&); + void samplerCheck(const TSourceLoc&, const TType&, const TString& identifier, TIntermTyped* initializer); + void atomicUintCheck(const TSourceLoc&, const TType&, const TString& identifier); + void accStructCheck(const TSourceLoc & loc, const TType & type, const TString & identifier); + void transparentOpaqueCheck(const TSourceLoc&, const TType&, const TString& identifier); + void memberQualifierCheck(glslang::TPublicType&); + void globalQualifierFixCheck(const TSourceLoc&, TQualifier&); + void globalQualifierTypeCheck(const TSourceLoc&, const TQualifier&, const TPublicType&); + bool structQualifierErrorCheck(const TSourceLoc&, const TPublicType& pType); + void mergeQualifiers(const TSourceLoc&, TQualifier& dst, const TQualifier& src, bool force); + void setDefaultPrecision(const TSourceLoc&, TPublicType&, TPrecisionQualifier); + int computeSamplerTypeIndex(TSampler&); + TPrecisionQualifier getDefaultPrecision(TPublicType&); + void precisionQualifierCheck(const TSourceLoc&, TBasicType, TQualifier&); + void parameterTypeCheck(const TSourceLoc&, TStorageQualifier qualifier, const TType& type); + bool containsFieldWithBasicType(const TType& type ,TBasicType basicType); + TSymbol* redeclareBuiltinVariable(const TSourceLoc&, const TString&, const TQualifier&, const TShaderQualifiers&); + void redeclareBuiltinBlock(const TSourceLoc&, TTypeList& typeList, const TString& blockName, const TString* instanceName, TArraySizes* arraySizes); + void paramCheckFixStorage(const TSourceLoc&, const TStorageQualifier&, TType& type); + void paramCheckFix(const TSourceLoc&, const TQualifier&, TType& type); + void nestedBlockCheck(const TSourceLoc&); + void nestedStructCheck(const TSourceLoc&); + void arrayObjectCheck(const TSourceLoc&, const TType&, const char* op); + void opaqueCheck(const TSourceLoc&, const TType&, const char* op); + void referenceCheck(const TSourceLoc&, const TType&, const char* op); + void storage16BitAssignmentCheck(const TSourceLoc&, const TType&, const char* op); + void specializationCheck(const TSourceLoc&, const TType&, const char* op); + void structTypeCheck(const TSourceLoc&, TPublicType&); + void inductiveLoopCheck(const TSourceLoc&, TIntermNode* init, TIntermLoop* loop); + void arrayLimitCheck(const TSourceLoc&, const TString&, int size); + void limitCheck(const TSourceLoc&, int value, const char* limit, const char* feature); + + void inductiveLoopBodyCheck(TIntermNode*, int loopIndexId, TSymbolTable&); + void constantIndexExpressionCheck(TIntermNode*); + + void setLayoutQualifier(const TSourceLoc&, TPublicType&, TString&); + void setLayoutQualifier(const TSourceLoc&, TPublicType&, TString&, const TIntermTyped*); + void mergeObjectLayoutQualifiers(TQualifier& dest, const TQualifier& src, bool inheritOnly); + void layoutObjectCheck(const TSourceLoc&, const TSymbol&); + void layoutMemberLocationArrayCheck(const TSourceLoc&, bool memberWithLocation, TArraySizes* arraySizes); + void layoutTypeCheck(const TSourceLoc&, const TType&); + void layoutQualifierCheck(const TSourceLoc&, const TQualifier&); + void checkNoShaderLayouts(const TSourceLoc&, const TShaderQualifiers&); + void fixOffset(const TSourceLoc&, TSymbol&); + + const TFunction* findFunction(const TSourceLoc& loc, const TFunction& call, bool& builtIn); + const TFunction* findFunctionExact(const TSourceLoc& loc, const TFunction& call, bool& builtIn); + const TFunction* findFunction120(const TSourceLoc& loc, const TFunction& call, bool& builtIn); + const TFunction* findFunction400(const TSourceLoc& loc, const TFunction& call, bool& builtIn); + const TFunction* findFunctionExplicitTypes(const TSourceLoc& loc, const TFunction& call, bool& builtIn); + void declareTypeDefaults(const TSourceLoc&, const TPublicType&); + TIntermNode* declareVariable(const TSourceLoc&, TString& identifier, const TPublicType&, TArraySizes* typeArray = 0, TIntermTyped* initializer = 0); + TIntermTyped* addConstructor(const TSourceLoc&, TIntermNode*, const TType&); + TIntermTyped* constructAggregate(TIntermNode*, const TType&, int, const TSourceLoc&); + TIntermTyped* constructBuiltIn(const TType&, TOperator, TIntermTyped*, const TSourceLoc&, bool subset); + void inheritMemoryQualifiers(const TQualifier& from, TQualifier& to); + void declareBlock(const TSourceLoc&, TTypeList& typeList, const TString* instanceName = 0, TArraySizes* arraySizes = 0); + void blockStageIoCheck(const TSourceLoc&, const TQualifier&); + void blockQualifierCheck(const TSourceLoc&, const TQualifier&, bool instanceName); + void fixBlockLocations(const TSourceLoc&, TQualifier&, TTypeList&, bool memberWithLocation, bool memberWithoutLocation); + void fixXfbOffsets(TQualifier&, TTypeList&); + void fixBlockUniformOffsets(TQualifier&, TTypeList&); + void fixBlockUniformLayoutMatrix(TQualifier&, TTypeList*, TTypeList*); + void fixBlockUniformLayoutPacking(TQualifier&, TTypeList*, TTypeList*); + void addQualifierToExisting(const TSourceLoc&, TQualifier, const TString& identifier); + void addQualifierToExisting(const TSourceLoc&, TQualifier, TIdentifierList&); + void invariantCheck(const TSourceLoc&, const TQualifier&); + void updateStandaloneQualifierDefaults(const TSourceLoc&, const TPublicType&); + void wrapupSwitchSubsequence(TIntermAggregate* statements, TIntermNode* branchNode); + TIntermNode* addSwitch(const TSourceLoc&, TIntermTyped* expression, TIntermAggregate* body); + const TTypeList* recordStructCopy(TStructRecord&, const TType*, const TType*); + +#ifndef GLSLANG_WEB + TAttributeType attributeFromName(const TString& name) const; + TAttributes* makeAttributes(const TString& identifier) const; + TAttributes* makeAttributes(const TString& identifier, TIntermNode* node) const; + TAttributes* mergeAttributes(TAttributes*, TAttributes*) const; + + // Determine selection control from attributes + void handleSelectionAttributes(const TAttributes& attributes, TIntermNode*); + void handleSwitchAttributes(const TAttributes& attributes, TIntermNode*); + // Determine loop control from attributes + void handleLoopAttributes(const TAttributes& attributes, TIntermNode*); +#endif + + void checkAndResizeMeshViewDim(const TSourceLoc&, TType&, bool isBlockMember); + +protected: + void nonInitConstCheck(const TSourceLoc&, TString& identifier, TType& type); + void inheritGlobalDefaults(TQualifier& dst) const; + TVariable* makeInternalVariable(const char* name, const TType&) const; + TVariable* declareNonArray(const TSourceLoc&, const TString& identifier, const TType&); + void declareArray(const TSourceLoc&, const TString& identifier, const TType&, TSymbol*&); + void checkRuntimeSizable(const TSourceLoc&, const TIntermTyped&); + bool isRuntimeLength(const TIntermTyped&) const; + TIntermNode* executeInitializer(const TSourceLoc&, TIntermTyped* initializer, TVariable* variable); + TIntermTyped* convertInitializerList(const TSourceLoc&, const TType&, TIntermTyped* initializer); +#ifndef GLSLANG_WEB + void finish() override; +#endif + +public: + // + // Generally, bison productions, the scanner, and the PP need read/write access to these; just give them direct access + // + + // Current state of parsing + bool inMain; // if inside a function, true if the function is main + const TString* blockName; + TQualifier currentBlockQualifier; + TPrecisionQualifier defaultPrecision[EbtNumTypes]; + TBuiltInResource resources; + TLimits& limits; + +protected: + TParseContext(TParseContext&); + TParseContext& operator=(TParseContext&); + + static const int maxSamplerIndex = EsdNumDims * (EbtNumTypes * (2 * 2 * 2 * 2 * 2)); // see computeSamplerTypeIndex() + TPrecisionQualifier defaultSamplerPrecision[maxSamplerIndex]; + TPrecisionManager precisionManager; + TQualifier globalBufferDefaults; + TQualifier globalUniformDefaults; + TQualifier globalInputDefaults; + TQualifier globalOutputDefaults; + TString currentCaller; // name of last function body entered (not valid when at global scope) +#ifndef GLSLANG_WEB + int* atomicUintOffsets; // to become an array of the right size to hold an offset per binding point + bool anyIndexLimits; + TIdSetType inductiveLoopIds; + TVector needsIndexLimitationChecking; + TStructRecord matrixFixRecord; + TStructRecord packingFixRecord; + + // + // Geometry shader input arrays: + // - array sizing is based on input primitive and/or explicit size + // + // Tessellation control output arrays: + // - array sizing is based on output layout(vertices=...) and/or explicit size + // + // Both: + // - array sizing is retroactive + // - built-in block redeclarations interact with this + // + // Design: + // - use a per-context "resize-list", a list of symbols whose array sizes + // can be fixed + // + // - the resize-list starts empty at beginning of user-shader compilation, it does + // not have built-ins in it + // + // - on built-in array use: copyUp() symbol and add it to the resize-list + // + // - on user array declaration: add it to the resize-list + // + // - on block redeclaration: copyUp() symbol and add it to the resize-list + // * note, that appropriately gives an error if redeclaring a block that + // was already used and hence already copied-up + // + // - on seeing a layout declaration that sizes the array, fix everything in the + // resize-list, giving errors for mismatch + // + // - on seeing an array size declaration, give errors on mismatch between it and previous + // array-sizing declarations + // + TVector ioArraySymbolResizeList; +#endif +}; + +} // end namespace glslang + +#endif // _PARSER_HELPER_INCLUDED_ diff --git a/dep/glslang/glslang/MachineIndependent/PoolAlloc.cpp b/dep/glslang/glslang/MachineIndependent/PoolAlloc.cpp new file mode 100644 index 000000000..84c40f4e7 --- /dev/null +++ b/dep/glslang/glslang/MachineIndependent/PoolAlloc.cpp @@ -0,0 +1,315 @@ +// +// Copyright (C) 2002-2005 3Dlabs Inc. Ltd. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// +// Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// +// Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// Neither the name of 3Dlabs Inc. Ltd. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// + +#include "../Include/Common.h" +#include "../Include/PoolAlloc.h" + +#include "../Include/InitializeGlobals.h" +#include "../OSDependent/osinclude.h" + +namespace glslang { + +// Process-wide TLS index +OS_TLSIndex PoolIndex; + +// Return the thread-specific current pool. +TPoolAllocator& GetThreadPoolAllocator() +{ + return *static_cast(OS_GetTLSValue(PoolIndex)); +} + +// Set the thread-specific current pool. +void SetThreadPoolAllocator(TPoolAllocator* poolAllocator) +{ + OS_SetTLSValue(PoolIndex, poolAllocator); +} + +// Process-wide set up of the TLS pool storage. +bool InitializePoolIndex() +{ + // Allocate a TLS index. + if ((PoolIndex = OS_AllocTLSIndex()) == OS_INVALID_TLS_INDEX) + return false; + + return true; +} + +// +// Implement the functionality of the TPoolAllocator class, which +// is documented in PoolAlloc.h. +// +TPoolAllocator::TPoolAllocator(int growthIncrement, int allocationAlignment) : + pageSize(growthIncrement), + alignment(allocationAlignment), + freeList(nullptr), + inUseList(nullptr), + numCalls(0) +{ + // + // Don't allow page sizes we know are smaller than all common + // OS page sizes. + // + if (pageSize < 4*1024) + pageSize = 4*1024; + + // + // A large currentPageOffset indicates a new page needs to + // be obtained to allocate memory. + // + currentPageOffset = pageSize; + + // + // Adjust alignment to be at least pointer aligned and + // power of 2. + // + size_t minAlign = sizeof(void*); + alignment &= ~(minAlign - 1); + if (alignment < minAlign) + alignment = minAlign; + size_t a = 1; + while (a < alignment) + a <<= 1; + alignment = a; + alignmentMask = a - 1; + + // + // Align header skip + // + headerSkip = minAlign; + if (headerSkip < sizeof(tHeader)) { + headerSkip = (sizeof(tHeader) + alignmentMask) & ~alignmentMask; + } + + push(); +} + +TPoolAllocator::~TPoolAllocator() +{ + while (inUseList) { + tHeader* next = inUseList->nextPage; + inUseList->~tHeader(); + delete [] reinterpret_cast(inUseList); + inUseList = next; + } + + // + // Always delete the free list memory - it can't be being + // (correctly) referenced, whether the pool allocator was + // global or not. We should not check the guard blocks + // here, because we did it already when the block was + // placed into the free list. + // + while (freeList) { + tHeader* next = freeList->nextPage; + delete [] reinterpret_cast(freeList); + freeList = next; + } +} + +const unsigned char TAllocation::guardBlockBeginVal = 0xfb; +const unsigned char TAllocation::guardBlockEndVal = 0xfe; +const unsigned char TAllocation::userDataFill = 0xcd; + +# ifdef GUARD_BLOCKS + const size_t TAllocation::guardBlockSize = 16; +# else + const size_t TAllocation::guardBlockSize = 0; +# endif + +// +// Check a single guard block for damage +// +#ifdef GUARD_BLOCKS +void TAllocation::checkGuardBlock(unsigned char* blockMem, unsigned char val, const char* locText) const +#else +void TAllocation::checkGuardBlock(unsigned char*, unsigned char, const char*) const +#endif +{ +#ifdef GUARD_BLOCKS + for (size_t x = 0; x < guardBlockSize; x++) { + if (blockMem[x] != val) { + const int maxSize = 80; + char assertMsg[maxSize]; + + // We don't print the assert message. It's here just to be helpful. + snprintf(assertMsg, maxSize, "PoolAlloc: Damage %s %zu byte allocation at 0x%p\n", + locText, size, data()); + assert(0 && "PoolAlloc: Damage in guard block"); + } + } +#else + assert(guardBlockSize == 0); +#endif +} + +void TPoolAllocator::push() +{ + tAllocState state = { currentPageOffset, inUseList }; + + stack.push_back(state); + + // + // Indicate there is no current page to allocate from. + // + currentPageOffset = pageSize; +} + +// +// Do a mass-deallocation of all the individual allocations +// that have occurred since the last push(), or since the +// last pop(), or since the object's creation. +// +// The deallocated pages are saved for future allocations. +// +void TPoolAllocator::pop() +{ + if (stack.size() < 1) + return; + + tHeader* page = stack.back().page; + currentPageOffset = stack.back().offset; + + while (inUseList != page) { + tHeader* nextInUse = inUseList->nextPage; + size_t pageCount = inUseList->pageCount; + + // This technically ends the lifetime of the header as C++ object, + // but we will still control the memory and reuse it. + inUseList->~tHeader(); // currently, just a debug allocation checker + + if (pageCount > 1) { + delete [] reinterpret_cast(inUseList); + } else { + inUseList->nextPage = freeList; + freeList = inUseList; + } + inUseList = nextInUse; + } + + stack.pop_back(); +} + +// +// Do a mass-deallocation of all the individual allocations +// that have occurred. +// +void TPoolAllocator::popAll() +{ + while (stack.size() > 0) + pop(); +} + +void* TPoolAllocator::allocate(size_t numBytes) +{ + // If we are using guard blocks, all allocations are bracketed by + // them: [guardblock][allocation][guardblock]. numBytes is how + // much memory the caller asked for. allocationSize is the total + // size including guard blocks. In release build, + // guardBlockSize=0 and this all gets optimized away. + size_t allocationSize = TAllocation::allocationSize(numBytes); + + // + // Just keep some interesting statistics. + // + ++numCalls; + totalBytes += numBytes; + + // + // Do the allocation, most likely case first, for efficiency. + // This step could be moved to be inline sometime. + // + if (currentPageOffset + allocationSize <= pageSize) { + // + // Safe to allocate from currentPageOffset. + // + unsigned char* memory = reinterpret_cast(inUseList) + currentPageOffset; + currentPageOffset += allocationSize; + currentPageOffset = (currentPageOffset + alignmentMask) & ~alignmentMask; + + return initializeAllocation(inUseList, memory, numBytes); + } + + if (allocationSize + headerSkip > pageSize) { + // + // Do a multi-page allocation. Don't mix these with the others. + // The OS is efficient and allocating and free-ing multiple pages. + // + size_t numBytesToAlloc = allocationSize + headerSkip; + tHeader* memory = reinterpret_cast(::new char[numBytesToAlloc]); + if (memory == 0) + return 0; + + // Use placement-new to initialize header + new(memory) tHeader(inUseList, (numBytesToAlloc + pageSize - 1) / pageSize); + inUseList = memory; + + currentPageOffset = pageSize; // make next allocation come from a new page + + // No guard blocks for multi-page allocations (yet) + return reinterpret_cast(reinterpret_cast(memory) + headerSkip); + } + + // + // Need a simple page to allocate from. + // + tHeader* memory; + if (freeList) { + memory = freeList; + freeList = freeList->nextPage; + } else { + memory = reinterpret_cast(::new char[pageSize]); + if (memory == 0) + return 0; + } + + // Use placement-new to initialize header + new(memory) tHeader(inUseList, 1); + inUseList = memory; + + unsigned char* ret = reinterpret_cast(inUseList) + headerSkip; + currentPageOffset = (headerSkip + allocationSize + alignmentMask) & ~alignmentMask; + + return initializeAllocation(inUseList, ret, numBytes); +} + +// +// Check all allocations in a list for damage by calling check on each. +// +void TAllocation::checkAllocList() const +{ + for (const TAllocation* alloc = this; alloc != 0; alloc = alloc->prevAlloc) + alloc->check(); +} + +} // end namespace glslang diff --git a/dep/glslang/glslang/MachineIndependent/RemoveTree.cpp b/dep/glslang/glslang/MachineIndependent/RemoveTree.cpp new file mode 100644 index 000000000..1d33bfd20 --- /dev/null +++ b/dep/glslang/glslang/MachineIndependent/RemoveTree.cpp @@ -0,0 +1,118 @@ +// +// Copyright (C) 2002-2005 3Dlabs Inc. Ltd. +// Copyright (C) 2013 LunarG, Inc. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// +// Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// +// Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// Neither the name of 3Dlabs Inc. Ltd. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// + +#include "../Include/intermediate.h" +#include "RemoveTree.h" + +namespace glslang { + +// +// Code to recursively delete the intermediate tree. +// +struct TRemoveTraverser : TIntermTraverser { + TRemoveTraverser() : TIntermTraverser(false, false, true, false) {} + + virtual void visitSymbol(TIntermSymbol* node) + { + delete node; + } + + virtual bool visitBinary(TVisit /* visit*/ , TIntermBinary* node) + { + delete node; + + return true; + } + + virtual bool visitUnary(TVisit /* visit */, TIntermUnary* node) + { + delete node; + + return true; + } + + virtual bool visitAggregate(TVisit /* visit*/ , TIntermAggregate* node) + { + delete node; + + return true; + } + + virtual bool visitSelection(TVisit /* visit*/ , TIntermSelection* node) + { + delete node; + + return true; + } + + virtual bool visitSwitch(TVisit /* visit*/ , TIntermSwitch* node) + { + delete node; + + return true; + } + + virtual void visitConstantUnion(TIntermConstantUnion* node) + { + delete node; + } + + virtual bool visitLoop(TVisit /* visit*/ , TIntermLoop* node) + { + delete node; + + return true; + } + + virtual bool visitBranch(TVisit /* visit*/ , TIntermBranch* node) + { + delete node; + + return true; + } +}; + +// +// Entry point. +// +void RemoveAllTreeNodes(TIntermNode* root) +{ + TRemoveTraverser it; + + root->traverse(&it); +} + +} // end namespace glslang diff --git a/dep/glslang/glslang/MachineIndependent/RemoveTree.h b/dep/glslang/glslang/MachineIndependent/RemoveTree.h new file mode 100644 index 000000000..1ed015626 --- /dev/null +++ b/dep/glslang/glslang/MachineIndependent/RemoveTree.h @@ -0,0 +1,41 @@ +// +// Copyright (C) 2002-2005 3Dlabs Inc. Ltd. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// +// Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// +// Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// Neither the name of 3Dlabs Inc. Ltd. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// + +#pragma once + +namespace glslang { + +void RemoveAllTreeNodes(TIntermNode*); + +} // end namespace glslang diff --git a/dep/glslang/glslang/MachineIndependent/Scan.cpp b/dep/glslang/glslang/MachineIndependent/Scan.cpp new file mode 100644 index 000000000..bd3181a74 --- /dev/null +++ b/dep/glslang/glslang/MachineIndependent/Scan.cpp @@ -0,0 +1,1827 @@ +// +// Copyright (C) 2002-2005 3Dlabs Inc. Ltd. +// Copyright (C) 2013 LunarG, Inc. +// Copyright (C) 2017 ARM Limited. +// Copyright (C) 2020 Google, Inc. +// Modifications Copyright (C) 2020 Advanced Micro Devices, Inc. All rights reserved. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// +// Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// +// Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// Neither the name of 3Dlabs Inc. Ltd. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// + +// +// GLSL scanning, leveraging the scanning done by the preprocessor. +// + +#include +#include +#include + +#include "../Include/Types.h" +#include "SymbolTable.h" +#include "ParseHelper.h" +#include "attribute.h" +#include "glslang_tab.cpp.h" +#include "ScanContext.h" +#include "Scan.h" + +// preprocessor includes +#include "preprocessor/PpContext.h" +#include "preprocessor/PpTokens.h" + +// Required to avoid missing prototype warnings for some compilers +int yylex(YYSTYPE*, glslang::TParseContext&); + +namespace glslang { + +// read past any white space +void TInputScanner::consumeWhiteSpace(bool& foundNonSpaceTab) +{ + int c = peek(); // don't accidentally consume anything other than whitespace + while (c == ' ' || c == '\t' || c == '\r' || c == '\n') { + if (c == '\r' || c == '\n') + foundNonSpaceTab = true; + get(); + c = peek(); + } +} + +// return true if a comment was actually consumed +bool TInputScanner::consumeComment() +{ + if (peek() != '/') + return false; + + get(); // consume the '/' + int c = peek(); + if (c == '/') { + + // a '//' style comment + get(); // consume the second '/' + c = get(); + do { + while (c != EndOfInput && c != '\\' && c != '\r' && c != '\n') + c = get(); + + if (c == EndOfInput || c == '\r' || c == '\n') { + while (c == '\r' || c == '\n') + c = get(); + + // we reached the end of the comment + break; + } else { + // it's a '\', so we need to keep going, after skipping what's escaped + + // read the skipped character + c = get(); + + // if it's a two-character newline, skip both characters + if (c == '\r' && peek() == '\n') + get(); + c = get(); + } + } while (true); + + // put back the last non-comment character + if (c != EndOfInput) + unget(); + + return true; + } else if (c == '*') { + + // a '/*' style comment + get(); // consume the '*' + c = get(); + do { + while (c != EndOfInput && c != '*') + c = get(); + if (c == '*') { + c = get(); + if (c == '/') + break; // end of comment + // not end of comment + } else // end of input + break; + } while (true); + + return true; + } else { + // it's not a comment, put the '/' back + unget(); + + return false; + } +} + +// skip whitespace, then skip a comment, rinse, repeat +void TInputScanner::consumeWhitespaceComment(bool& foundNonSpaceTab) +{ + do { + consumeWhiteSpace(foundNonSpaceTab); + + // if not starting a comment now, then done + int c = peek(); + if (c != '/' || c == EndOfInput) + return; + + // skip potential comment + foundNonSpaceTab = true; + if (! consumeComment()) + return; + + } while (true); +} + +// Returns true if there was non-white space (e.g., a comment, newline) before the #version +// or no #version was found; otherwise, returns false. There is no error case, it always +// succeeds, but will leave version == 0 if no #version was found. +// +// Sets notFirstToken based on whether tokens (beyond white space and comments) +// appeared before the #version. +// +// N.B. does not attempt to leave input in any particular known state. The assumption +// is that scanning will start anew, following the rules for the chosen version/profile, +// and with a corresponding parsing context. +// +bool TInputScanner::scanVersion(int& version, EProfile& profile, bool& notFirstToken) +{ + // This function doesn't have to get all the semantics correct, + // just find the #version if there is a correct one present. + // The preprocessor will have the responsibility of getting all the semantics right. + + bool versionNotFirst = false; // means not first WRT comments and white space, nothing more + notFirstToken = false; // means not first WRT to real tokens + version = 0; // means not found + profile = ENoProfile; + + bool foundNonSpaceTab = false; + bool lookingInMiddle = false; + int c; + do { + if (lookingInMiddle) { + notFirstToken = true; + // make forward progress by finishing off the current line plus extra new lines + if (peek() != '\n' && peek() != '\r') { + do { + c = get(); + } while (c != EndOfInput && c != '\n' && c != '\r'); + } + while (peek() == '\n' || peek() == '\r') + get(); + if (peek() == EndOfInput) + return true; + } + lookingInMiddle = true; + + // Nominal start, skipping the desktop allowed comments and white space, but tracking if + // something else was found for ES: + consumeWhitespaceComment(foundNonSpaceTab); + if (foundNonSpaceTab) + versionNotFirst = true; + + // "#" + if (get() != '#') { + versionNotFirst = true; + continue; + } + + // whitespace + do { + c = get(); + } while (c == ' ' || c == '\t'); + + // "version" + if ( c != 'v' || + get() != 'e' || + get() != 'r' || + get() != 's' || + get() != 'i' || + get() != 'o' || + get() != 'n') { + versionNotFirst = true; + continue; + } + + // whitespace + do { + c = get(); + } while (c == ' ' || c == '\t'); + + // version number + while (c >= '0' && c <= '9') { + version = 10 * version + (c - '0'); + c = get(); + } + if (version == 0) { + versionNotFirst = true; + continue; + } + + // whitespace + while (c == ' ' || c == '\t') + c = get(); + + // profile + const int maxProfileLength = 13; // not including any 0 + char profileString[maxProfileLength]; + int profileLength; + for (profileLength = 0; profileLength < maxProfileLength; ++profileLength) { + if (c == EndOfInput || c == ' ' || c == '\t' || c == '\n' || c == '\r') + break; + profileString[profileLength] = (char)c; + c = get(); + } + if (c != EndOfInput && c != ' ' && c != '\t' && c != '\n' && c != '\r') { + versionNotFirst = true; + continue; + } + + if (profileLength == 2 && strncmp(profileString, "es", profileLength) == 0) + profile = EEsProfile; + else if (profileLength == 4 && strncmp(profileString, "core", profileLength) == 0) + profile = ECoreProfile; + else if (profileLength == 13 && strncmp(profileString, "compatibility", profileLength) == 0) + profile = ECompatibilityProfile; + + return versionNotFirst; + } while (true); +} + +// Fill this in when doing glslang-level scanning, to hand back to the parser. +class TParserToken { +public: + explicit TParserToken(YYSTYPE& b) : sType(b) { } + + YYSTYPE& sType; +protected: + TParserToken(TParserToken&); + TParserToken& operator=(TParserToken&); +}; + +} // end namespace glslang + +// This is the function the glslang parser (i.e., bison) calls to get its next token +int yylex(YYSTYPE* glslangTokenDesc, glslang::TParseContext& parseContext) +{ + glslang::TParserToken token(*glslangTokenDesc); + + return parseContext.getScanContext()->tokenize(parseContext.getPpContext(), token); +} + +namespace { + +struct str_eq +{ + bool operator()(const char* lhs, const char* rhs) const + { + return strcmp(lhs, rhs) == 0; + } +}; + +struct str_hash +{ + size_t operator()(const char* str) const + { + // djb2 + unsigned long hash = 5381; + int c; + + while ((c = *str++) != 0) + hash = ((hash << 5) + hash) + c; + + return hash; + } +}; + +// A single global usable by all threads, by all versions, by all languages. +// After a single process-level initialization, this is read only and thread safe +std::unordered_map* KeywordMap = nullptr; +#ifndef GLSLANG_WEB +std::unordered_set* ReservedSet = nullptr; +#endif + +}; + +namespace glslang { + +void TScanContext::fillInKeywordMap() +{ + if (KeywordMap != nullptr) { + // this is really an error, as this should called only once per process + // but, the only risk is if two threads called simultaneously + return; + } + KeywordMap = new std::unordered_map; + + (*KeywordMap)["const"] = CONST; + (*KeywordMap)["uniform"] = UNIFORM; + (*KeywordMap)["buffer"] = BUFFER; + (*KeywordMap)["in"] = IN; + (*KeywordMap)["out"] = OUT; + (*KeywordMap)["smooth"] = SMOOTH; + (*KeywordMap)["flat"] = FLAT; + (*KeywordMap)["centroid"] = CENTROID; + (*KeywordMap)["invariant"] = INVARIANT; + (*KeywordMap)["packed"] = PACKED; + (*KeywordMap)["resource"] = RESOURCE; + (*KeywordMap)["inout"] = INOUT; + (*KeywordMap)["struct"] = STRUCT; + (*KeywordMap)["break"] = BREAK; + (*KeywordMap)["continue"] = CONTINUE; + (*KeywordMap)["do"] = DO; + (*KeywordMap)["for"] = FOR; + (*KeywordMap)["while"] = WHILE; + (*KeywordMap)["switch"] = SWITCH; + (*KeywordMap)["case"] = CASE; + (*KeywordMap)["default"] = DEFAULT; + (*KeywordMap)["if"] = IF; + (*KeywordMap)["else"] = ELSE; + (*KeywordMap)["discard"] = DISCARD; + (*KeywordMap)["return"] = RETURN; + (*KeywordMap)["void"] = VOID; + (*KeywordMap)["bool"] = BOOL; + (*KeywordMap)["float"] = FLOAT; + (*KeywordMap)["int"] = INT; + (*KeywordMap)["bvec2"] = BVEC2; + (*KeywordMap)["bvec3"] = BVEC3; + (*KeywordMap)["bvec4"] = BVEC4; + (*KeywordMap)["vec2"] = VEC2; + (*KeywordMap)["vec3"] = VEC3; + (*KeywordMap)["vec4"] = VEC4; + (*KeywordMap)["ivec2"] = IVEC2; + (*KeywordMap)["ivec3"] = IVEC3; + (*KeywordMap)["ivec4"] = IVEC4; + (*KeywordMap)["mat2"] = MAT2; + (*KeywordMap)["mat3"] = MAT3; + (*KeywordMap)["mat4"] = MAT4; + (*KeywordMap)["true"] = BOOLCONSTANT; + (*KeywordMap)["false"] = BOOLCONSTANT; + (*KeywordMap)["layout"] = LAYOUT; + (*KeywordMap)["shared"] = SHARED; + (*KeywordMap)["highp"] = HIGH_PRECISION; + (*KeywordMap)["mediump"] = MEDIUM_PRECISION; + (*KeywordMap)["lowp"] = LOW_PRECISION; + (*KeywordMap)["superp"] = SUPERP; + (*KeywordMap)["precision"] = PRECISION; + (*KeywordMap)["mat2x2"] = MAT2X2; + (*KeywordMap)["mat2x3"] = MAT2X3; + (*KeywordMap)["mat2x4"] = MAT2X4; + (*KeywordMap)["mat3x2"] = MAT3X2; + (*KeywordMap)["mat3x3"] = MAT3X3; + (*KeywordMap)["mat3x4"] = MAT3X4; + (*KeywordMap)["mat4x2"] = MAT4X2; + (*KeywordMap)["mat4x3"] = MAT4X3; + (*KeywordMap)["mat4x4"] = MAT4X4; + (*KeywordMap)["uint"] = UINT; + (*KeywordMap)["uvec2"] = UVEC2; + (*KeywordMap)["uvec3"] = UVEC3; + (*KeywordMap)["uvec4"] = UVEC4; + +#ifndef GLSLANG_WEB + (*KeywordMap)["nonuniformEXT"] = NONUNIFORM; + (*KeywordMap)["demote"] = DEMOTE; + (*KeywordMap)["attribute"] = ATTRIBUTE; + (*KeywordMap)["varying"] = VARYING; + (*KeywordMap)["noperspective"] = NOPERSPECTIVE; + (*KeywordMap)["coherent"] = COHERENT; + (*KeywordMap)["devicecoherent"] = DEVICECOHERENT; + (*KeywordMap)["queuefamilycoherent"] = QUEUEFAMILYCOHERENT; + (*KeywordMap)["workgroupcoherent"] = WORKGROUPCOHERENT; + (*KeywordMap)["subgroupcoherent"] = SUBGROUPCOHERENT; + (*KeywordMap)["shadercallcoherent"] = SHADERCALLCOHERENT; + (*KeywordMap)["nonprivate"] = NONPRIVATE; + (*KeywordMap)["restrict"] = RESTRICT; + (*KeywordMap)["readonly"] = READONLY; + (*KeywordMap)["writeonly"] = WRITEONLY; + (*KeywordMap)["atomic_uint"] = ATOMIC_UINT; + (*KeywordMap)["volatile"] = VOLATILE; + (*KeywordMap)["patch"] = PATCH; + (*KeywordMap)["sample"] = SAMPLE; + (*KeywordMap)["subroutine"] = SUBROUTINE; + (*KeywordMap)["dmat2"] = DMAT2; + (*KeywordMap)["dmat3"] = DMAT3; + (*KeywordMap)["dmat4"] = DMAT4; + (*KeywordMap)["dmat2x2"] = DMAT2X2; + (*KeywordMap)["dmat2x3"] = DMAT2X3; + (*KeywordMap)["dmat2x4"] = DMAT2X4; + (*KeywordMap)["dmat3x2"] = DMAT3X2; + (*KeywordMap)["dmat3x3"] = DMAT3X3; + (*KeywordMap)["dmat3x4"] = DMAT3X4; + (*KeywordMap)["dmat4x2"] = DMAT4X2; + (*KeywordMap)["dmat4x3"] = DMAT4X3; + (*KeywordMap)["dmat4x4"] = DMAT4X4; + (*KeywordMap)["image1D"] = IMAGE1D; + (*KeywordMap)["iimage1D"] = IIMAGE1D; + (*KeywordMap)["uimage1D"] = UIMAGE1D; + (*KeywordMap)["image2D"] = IMAGE2D; + (*KeywordMap)["iimage2D"] = IIMAGE2D; + (*KeywordMap)["uimage2D"] = UIMAGE2D; + (*KeywordMap)["image3D"] = IMAGE3D; + (*KeywordMap)["iimage3D"] = IIMAGE3D; + (*KeywordMap)["uimage3D"] = UIMAGE3D; + (*KeywordMap)["image2DRect"] = IMAGE2DRECT; + (*KeywordMap)["iimage2DRect"] = IIMAGE2DRECT; + (*KeywordMap)["uimage2DRect"] = UIMAGE2DRECT; + (*KeywordMap)["imageCube"] = IMAGECUBE; + (*KeywordMap)["iimageCube"] = IIMAGECUBE; + (*KeywordMap)["uimageCube"] = UIMAGECUBE; + (*KeywordMap)["imageBuffer"] = IMAGEBUFFER; + (*KeywordMap)["iimageBuffer"] = IIMAGEBUFFER; + (*KeywordMap)["uimageBuffer"] = UIMAGEBUFFER; + (*KeywordMap)["image1DArray"] = IMAGE1DARRAY; + (*KeywordMap)["iimage1DArray"] = IIMAGE1DARRAY; + (*KeywordMap)["uimage1DArray"] = UIMAGE1DARRAY; + (*KeywordMap)["image2DArray"] = IMAGE2DARRAY; + (*KeywordMap)["iimage2DArray"] = IIMAGE2DARRAY; + (*KeywordMap)["uimage2DArray"] = UIMAGE2DARRAY; + (*KeywordMap)["imageCubeArray"] = IMAGECUBEARRAY; + (*KeywordMap)["iimageCubeArray"] = IIMAGECUBEARRAY; + (*KeywordMap)["uimageCubeArray"] = UIMAGECUBEARRAY; + (*KeywordMap)["image2DMS"] = IMAGE2DMS; + (*KeywordMap)["iimage2DMS"] = IIMAGE2DMS; + (*KeywordMap)["uimage2DMS"] = UIMAGE2DMS; + (*KeywordMap)["image2DMSArray"] = IMAGE2DMSARRAY; + (*KeywordMap)["iimage2DMSArray"] = IIMAGE2DMSARRAY; + (*KeywordMap)["uimage2DMSArray"] = UIMAGE2DMSARRAY; + (*KeywordMap)["double"] = DOUBLE; + (*KeywordMap)["dvec2"] = DVEC2; + (*KeywordMap)["dvec3"] = DVEC3; + (*KeywordMap)["dvec4"] = DVEC4; + (*KeywordMap)["int64_t"] = INT64_T; + (*KeywordMap)["uint64_t"] = UINT64_T; + (*KeywordMap)["i64vec2"] = I64VEC2; + (*KeywordMap)["i64vec3"] = I64VEC3; + (*KeywordMap)["i64vec4"] = I64VEC4; + (*KeywordMap)["u64vec2"] = U64VEC2; + (*KeywordMap)["u64vec3"] = U64VEC3; + (*KeywordMap)["u64vec4"] = U64VEC4; + + // GL_EXT_shader_explicit_arithmetic_types + (*KeywordMap)["int8_t"] = INT8_T; + (*KeywordMap)["i8vec2"] = I8VEC2; + (*KeywordMap)["i8vec3"] = I8VEC3; + (*KeywordMap)["i8vec4"] = I8VEC4; + (*KeywordMap)["uint8_t"] = UINT8_T; + (*KeywordMap)["u8vec2"] = U8VEC2; + (*KeywordMap)["u8vec3"] = U8VEC3; + (*KeywordMap)["u8vec4"] = U8VEC4; + + (*KeywordMap)["int16_t"] = INT16_T; + (*KeywordMap)["i16vec2"] = I16VEC2; + (*KeywordMap)["i16vec3"] = I16VEC3; + (*KeywordMap)["i16vec4"] = I16VEC4; + (*KeywordMap)["uint16_t"] = UINT16_T; + (*KeywordMap)["u16vec2"] = U16VEC2; + (*KeywordMap)["u16vec3"] = U16VEC3; + (*KeywordMap)["u16vec4"] = U16VEC4; + + (*KeywordMap)["int32_t"] = INT32_T; + (*KeywordMap)["i32vec2"] = I32VEC2; + (*KeywordMap)["i32vec3"] = I32VEC3; + (*KeywordMap)["i32vec4"] = I32VEC4; + (*KeywordMap)["uint32_t"] = UINT32_T; + (*KeywordMap)["u32vec2"] = U32VEC2; + (*KeywordMap)["u32vec3"] = U32VEC3; + (*KeywordMap)["u32vec4"] = U32VEC4; + + (*KeywordMap)["float16_t"] = FLOAT16_T; + (*KeywordMap)["f16vec2"] = F16VEC2; + (*KeywordMap)["f16vec3"] = F16VEC3; + (*KeywordMap)["f16vec4"] = F16VEC4; + (*KeywordMap)["f16mat2"] = F16MAT2; + (*KeywordMap)["f16mat3"] = F16MAT3; + (*KeywordMap)["f16mat4"] = F16MAT4; + (*KeywordMap)["f16mat2x2"] = F16MAT2X2; + (*KeywordMap)["f16mat2x3"] = F16MAT2X3; + (*KeywordMap)["f16mat2x4"] = F16MAT2X4; + (*KeywordMap)["f16mat3x2"] = F16MAT3X2; + (*KeywordMap)["f16mat3x3"] = F16MAT3X3; + (*KeywordMap)["f16mat3x4"] = F16MAT3X4; + (*KeywordMap)["f16mat4x2"] = F16MAT4X2; + (*KeywordMap)["f16mat4x3"] = F16MAT4X3; + (*KeywordMap)["f16mat4x4"] = F16MAT4X4; + + (*KeywordMap)["float32_t"] = FLOAT32_T; + (*KeywordMap)["f32vec2"] = F32VEC2; + (*KeywordMap)["f32vec3"] = F32VEC3; + (*KeywordMap)["f32vec4"] = F32VEC4; + (*KeywordMap)["f32mat2"] = F32MAT2; + (*KeywordMap)["f32mat3"] = F32MAT3; + (*KeywordMap)["f32mat4"] = F32MAT4; + (*KeywordMap)["f32mat2x2"] = F32MAT2X2; + (*KeywordMap)["f32mat2x3"] = F32MAT2X3; + (*KeywordMap)["f32mat2x4"] = F32MAT2X4; + (*KeywordMap)["f32mat3x2"] = F32MAT3X2; + (*KeywordMap)["f32mat3x3"] = F32MAT3X3; + (*KeywordMap)["f32mat3x4"] = F32MAT3X4; + (*KeywordMap)["f32mat4x2"] = F32MAT4X2; + (*KeywordMap)["f32mat4x3"] = F32MAT4X3; + (*KeywordMap)["f32mat4x4"] = F32MAT4X4; + (*KeywordMap)["float64_t"] = FLOAT64_T; + (*KeywordMap)["f64vec2"] = F64VEC2; + (*KeywordMap)["f64vec3"] = F64VEC3; + (*KeywordMap)["f64vec4"] = F64VEC4; + (*KeywordMap)["f64mat2"] = F64MAT2; + (*KeywordMap)["f64mat3"] = F64MAT3; + (*KeywordMap)["f64mat4"] = F64MAT4; + (*KeywordMap)["f64mat2x2"] = F64MAT2X2; + (*KeywordMap)["f64mat2x3"] = F64MAT2X3; + (*KeywordMap)["f64mat2x4"] = F64MAT2X4; + (*KeywordMap)["f64mat3x2"] = F64MAT3X2; + (*KeywordMap)["f64mat3x3"] = F64MAT3X3; + (*KeywordMap)["f64mat3x4"] = F64MAT3X4; + (*KeywordMap)["f64mat4x2"] = F64MAT4X2; + (*KeywordMap)["f64mat4x3"] = F64MAT4X3; + (*KeywordMap)["f64mat4x4"] = F64MAT4X4; +#endif + + (*KeywordMap)["sampler2D"] = SAMPLER2D; + (*KeywordMap)["samplerCube"] = SAMPLERCUBE; + (*KeywordMap)["samplerCubeShadow"] = SAMPLERCUBESHADOW; + (*KeywordMap)["sampler2DArray"] = SAMPLER2DARRAY; + (*KeywordMap)["sampler2DArrayShadow"] = SAMPLER2DARRAYSHADOW; + (*KeywordMap)["isampler2D"] = ISAMPLER2D; + (*KeywordMap)["isampler3D"] = ISAMPLER3D; + (*KeywordMap)["isamplerCube"] = ISAMPLERCUBE; + (*KeywordMap)["isampler2DArray"] = ISAMPLER2DARRAY; + (*KeywordMap)["usampler2D"] = USAMPLER2D; + (*KeywordMap)["usampler3D"] = USAMPLER3D; + (*KeywordMap)["usamplerCube"] = USAMPLERCUBE; + (*KeywordMap)["usampler2DArray"] = USAMPLER2DARRAY; + (*KeywordMap)["sampler3D"] = SAMPLER3D; + (*KeywordMap)["sampler2DShadow"] = SAMPLER2DSHADOW; + + (*KeywordMap)["texture2D"] = TEXTURE2D; + (*KeywordMap)["textureCube"] = TEXTURECUBE; + (*KeywordMap)["texture2DArray"] = TEXTURE2DARRAY; + (*KeywordMap)["itexture2D"] = ITEXTURE2D; + (*KeywordMap)["itexture3D"] = ITEXTURE3D; + (*KeywordMap)["itextureCube"] = ITEXTURECUBE; + (*KeywordMap)["itexture2DArray"] = ITEXTURE2DARRAY; + (*KeywordMap)["utexture2D"] = UTEXTURE2D; + (*KeywordMap)["utexture3D"] = UTEXTURE3D; + (*KeywordMap)["utextureCube"] = UTEXTURECUBE; + (*KeywordMap)["utexture2DArray"] = UTEXTURE2DARRAY; + (*KeywordMap)["texture3D"] = TEXTURE3D; + + (*KeywordMap)["sampler"] = SAMPLER; + (*KeywordMap)["samplerShadow"] = SAMPLERSHADOW; + +#ifndef GLSLANG_WEB + (*KeywordMap)["textureCubeArray"] = TEXTURECUBEARRAY; + (*KeywordMap)["itextureCubeArray"] = ITEXTURECUBEARRAY; + (*KeywordMap)["utextureCubeArray"] = UTEXTURECUBEARRAY; + (*KeywordMap)["samplerCubeArray"] = SAMPLERCUBEARRAY; + (*KeywordMap)["samplerCubeArrayShadow"] = SAMPLERCUBEARRAYSHADOW; + (*KeywordMap)["isamplerCubeArray"] = ISAMPLERCUBEARRAY; + (*KeywordMap)["usamplerCubeArray"] = USAMPLERCUBEARRAY; + (*KeywordMap)["sampler1DArrayShadow"] = SAMPLER1DARRAYSHADOW; + (*KeywordMap)["isampler1DArray"] = ISAMPLER1DARRAY; + (*KeywordMap)["usampler1D"] = USAMPLER1D; + (*KeywordMap)["isampler1D"] = ISAMPLER1D; + (*KeywordMap)["usampler1DArray"] = USAMPLER1DARRAY; + (*KeywordMap)["samplerBuffer"] = SAMPLERBUFFER; + (*KeywordMap)["isampler2DRect"] = ISAMPLER2DRECT; + (*KeywordMap)["usampler2DRect"] = USAMPLER2DRECT; + (*KeywordMap)["isamplerBuffer"] = ISAMPLERBUFFER; + (*KeywordMap)["usamplerBuffer"] = USAMPLERBUFFER; + (*KeywordMap)["sampler2DMS"] = SAMPLER2DMS; + (*KeywordMap)["isampler2DMS"] = ISAMPLER2DMS; + (*KeywordMap)["usampler2DMS"] = USAMPLER2DMS; + (*KeywordMap)["sampler2DMSArray"] = SAMPLER2DMSARRAY; + (*KeywordMap)["isampler2DMSArray"] = ISAMPLER2DMSARRAY; + (*KeywordMap)["usampler2DMSArray"] = USAMPLER2DMSARRAY; + (*KeywordMap)["sampler1D"] = SAMPLER1D; + (*KeywordMap)["sampler1DShadow"] = SAMPLER1DSHADOW; + (*KeywordMap)["sampler2DRect"] = SAMPLER2DRECT; + (*KeywordMap)["sampler2DRectShadow"] = SAMPLER2DRECTSHADOW; + (*KeywordMap)["sampler1DArray"] = SAMPLER1DARRAY; + + (*KeywordMap)["samplerExternalOES"] = SAMPLEREXTERNALOES; // GL_OES_EGL_image_external + + (*KeywordMap)["__samplerExternal2DY2YEXT"] = SAMPLEREXTERNAL2DY2YEXT; // GL_EXT_YUV_target + + (*KeywordMap)["itexture1DArray"] = ITEXTURE1DARRAY; + (*KeywordMap)["utexture1D"] = UTEXTURE1D; + (*KeywordMap)["itexture1D"] = ITEXTURE1D; + (*KeywordMap)["utexture1DArray"] = UTEXTURE1DARRAY; + (*KeywordMap)["textureBuffer"] = TEXTUREBUFFER; + (*KeywordMap)["itexture2DRect"] = ITEXTURE2DRECT; + (*KeywordMap)["utexture2DRect"] = UTEXTURE2DRECT; + (*KeywordMap)["itextureBuffer"] = ITEXTUREBUFFER; + (*KeywordMap)["utextureBuffer"] = UTEXTUREBUFFER; + (*KeywordMap)["texture2DMS"] = TEXTURE2DMS; + (*KeywordMap)["itexture2DMS"] = ITEXTURE2DMS; + (*KeywordMap)["utexture2DMS"] = UTEXTURE2DMS; + (*KeywordMap)["texture2DMSArray"] = TEXTURE2DMSARRAY; + (*KeywordMap)["itexture2DMSArray"] = ITEXTURE2DMSARRAY; + (*KeywordMap)["utexture2DMSArray"] = UTEXTURE2DMSARRAY; + (*KeywordMap)["texture1D"] = TEXTURE1D; + (*KeywordMap)["texture2DRect"] = TEXTURE2DRECT; + (*KeywordMap)["texture1DArray"] = TEXTURE1DARRAY; + + (*KeywordMap)["subpassInput"] = SUBPASSINPUT; + (*KeywordMap)["subpassInputMS"] = SUBPASSINPUTMS; + (*KeywordMap)["isubpassInput"] = ISUBPASSINPUT; + (*KeywordMap)["isubpassInputMS"] = ISUBPASSINPUTMS; + (*KeywordMap)["usubpassInput"] = USUBPASSINPUT; + (*KeywordMap)["usubpassInputMS"] = USUBPASSINPUTMS; + + (*KeywordMap)["f16sampler1D"] = F16SAMPLER1D; + (*KeywordMap)["f16sampler2D"] = F16SAMPLER2D; + (*KeywordMap)["f16sampler3D"] = F16SAMPLER3D; + (*KeywordMap)["f16sampler2DRect"] = F16SAMPLER2DRECT; + (*KeywordMap)["f16samplerCube"] = F16SAMPLERCUBE; + (*KeywordMap)["f16sampler1DArray"] = F16SAMPLER1DARRAY; + (*KeywordMap)["f16sampler2DArray"] = F16SAMPLER2DARRAY; + (*KeywordMap)["f16samplerCubeArray"] = F16SAMPLERCUBEARRAY; + (*KeywordMap)["f16samplerBuffer"] = F16SAMPLERBUFFER; + (*KeywordMap)["f16sampler2DMS"] = F16SAMPLER2DMS; + (*KeywordMap)["f16sampler2DMSArray"] = F16SAMPLER2DMSARRAY; + (*KeywordMap)["f16sampler1DShadow"] = F16SAMPLER1DSHADOW; + (*KeywordMap)["f16sampler2DShadow"] = F16SAMPLER2DSHADOW; + (*KeywordMap)["f16sampler2DRectShadow"] = F16SAMPLER2DRECTSHADOW; + (*KeywordMap)["f16samplerCubeShadow"] = F16SAMPLERCUBESHADOW; + (*KeywordMap)["f16sampler1DArrayShadow"] = F16SAMPLER1DARRAYSHADOW; + (*KeywordMap)["f16sampler2DArrayShadow"] = F16SAMPLER2DARRAYSHADOW; + (*KeywordMap)["f16samplerCubeArrayShadow"] = F16SAMPLERCUBEARRAYSHADOW; + + (*KeywordMap)["f16image1D"] = F16IMAGE1D; + (*KeywordMap)["f16image2D"] = F16IMAGE2D; + (*KeywordMap)["f16image3D"] = F16IMAGE3D; + (*KeywordMap)["f16image2DRect"] = F16IMAGE2DRECT; + (*KeywordMap)["f16imageCube"] = F16IMAGECUBE; + (*KeywordMap)["f16image1DArray"] = F16IMAGE1DARRAY; + (*KeywordMap)["f16image2DArray"] = F16IMAGE2DARRAY; + (*KeywordMap)["f16imageCubeArray"] = F16IMAGECUBEARRAY; + (*KeywordMap)["f16imageBuffer"] = F16IMAGEBUFFER; + (*KeywordMap)["f16image2DMS"] = F16IMAGE2DMS; + (*KeywordMap)["f16image2DMSArray"] = F16IMAGE2DMSARRAY; + + (*KeywordMap)["f16texture1D"] = F16TEXTURE1D; + (*KeywordMap)["f16texture2D"] = F16TEXTURE2D; + (*KeywordMap)["f16texture3D"] = F16TEXTURE3D; + (*KeywordMap)["f16texture2DRect"] = F16TEXTURE2DRECT; + (*KeywordMap)["f16textureCube"] = F16TEXTURECUBE; + (*KeywordMap)["f16texture1DArray"] = F16TEXTURE1DARRAY; + (*KeywordMap)["f16texture2DArray"] = F16TEXTURE2DARRAY; + (*KeywordMap)["f16textureCubeArray"] = F16TEXTURECUBEARRAY; + (*KeywordMap)["f16textureBuffer"] = F16TEXTUREBUFFER; + (*KeywordMap)["f16texture2DMS"] = F16TEXTURE2DMS; + (*KeywordMap)["f16texture2DMSArray"] = F16TEXTURE2DMSARRAY; + + (*KeywordMap)["f16subpassInput"] = F16SUBPASSINPUT; + (*KeywordMap)["f16subpassInputMS"] = F16SUBPASSINPUTMS; + (*KeywordMap)["__explicitInterpAMD"] = EXPLICITINTERPAMD; + (*KeywordMap)["pervertexNV"] = PERVERTEXNV; + (*KeywordMap)["precise"] = PRECISE; + + (*KeywordMap)["rayPayloadNV"] = PAYLOADNV; + (*KeywordMap)["rayPayloadEXT"] = PAYLOADEXT; + (*KeywordMap)["rayPayloadInNV"] = PAYLOADINNV; + (*KeywordMap)["rayPayloadInEXT"] = PAYLOADINEXT; + (*KeywordMap)["hitAttributeNV"] = HITATTRNV; + (*KeywordMap)["hitAttributeEXT"] = HITATTREXT; + (*KeywordMap)["callableDataNV"] = CALLDATANV; + (*KeywordMap)["callableDataEXT"] = CALLDATAEXT; + (*KeywordMap)["callableDataInNV"] = CALLDATAINNV; + (*KeywordMap)["callableDataInEXT"] = CALLDATAINEXT; + (*KeywordMap)["accelerationStructureNV"] = ACCSTRUCTNV; + (*KeywordMap)["accelerationStructureEXT"] = ACCSTRUCTEXT; + (*KeywordMap)["rayQueryEXT"] = RAYQUERYEXT; + (*KeywordMap)["perprimitiveNV"] = PERPRIMITIVENV; + (*KeywordMap)["perviewNV"] = PERVIEWNV; + (*KeywordMap)["taskNV"] = PERTASKNV; + + (*KeywordMap)["fcoopmatNV"] = FCOOPMATNV; + (*KeywordMap)["icoopmatNV"] = ICOOPMATNV; + (*KeywordMap)["ucoopmatNV"] = UCOOPMATNV; + + ReservedSet = new std::unordered_set; + + ReservedSet->insert("common"); + ReservedSet->insert("partition"); + ReservedSet->insert("active"); + ReservedSet->insert("asm"); + ReservedSet->insert("class"); + ReservedSet->insert("union"); + ReservedSet->insert("enum"); + ReservedSet->insert("typedef"); + ReservedSet->insert("template"); + ReservedSet->insert("this"); + ReservedSet->insert("goto"); + ReservedSet->insert("inline"); + ReservedSet->insert("noinline"); + ReservedSet->insert("public"); + ReservedSet->insert("static"); + ReservedSet->insert("extern"); + ReservedSet->insert("external"); + ReservedSet->insert("interface"); + ReservedSet->insert("long"); + ReservedSet->insert("short"); + ReservedSet->insert("half"); + ReservedSet->insert("fixed"); + ReservedSet->insert("unsigned"); + ReservedSet->insert("input"); + ReservedSet->insert("output"); + ReservedSet->insert("hvec2"); + ReservedSet->insert("hvec3"); + ReservedSet->insert("hvec4"); + ReservedSet->insert("fvec2"); + ReservedSet->insert("fvec3"); + ReservedSet->insert("fvec4"); + ReservedSet->insert("sampler3DRect"); + ReservedSet->insert("filter"); + ReservedSet->insert("sizeof"); + ReservedSet->insert("cast"); + ReservedSet->insert("namespace"); + ReservedSet->insert("using"); +#endif +} + +void TScanContext::deleteKeywordMap() +{ + delete KeywordMap; + KeywordMap = nullptr; +#ifndef GLSLANG_WEB + delete ReservedSet; + ReservedSet = nullptr; +#endif +} + +// Called by yylex to get the next token. +// Returning 0 implies end of input. +int TScanContext::tokenize(TPpContext* pp, TParserToken& token) +{ + do { + parserToken = &token; + TPpToken ppToken; + int token = pp->tokenize(ppToken); + if (token == EndOfInput) + return 0; + + tokenText = ppToken.name; + loc = ppToken.loc; + parserToken->sType.lex.loc = loc; + switch (token) { + case ';': afterType = false; afterBuffer = false; return SEMICOLON; + case ',': afterType = false; return COMMA; + case ':': return COLON; + case '=': afterType = false; return EQUAL; + case '(': afterType = false; return LEFT_PAREN; + case ')': afterType = false; return RIGHT_PAREN; + case '.': field = true; return DOT; + case '!': return BANG; + case '-': return DASH; + case '~': return TILDE; + case '+': return PLUS; + case '*': return STAR; + case '/': return SLASH; + case '%': return PERCENT; + case '<': return LEFT_ANGLE; + case '>': return RIGHT_ANGLE; + case '|': return VERTICAL_BAR; + case '^': return CARET; + case '&': return AMPERSAND; + case '?': return QUESTION; + case '[': return LEFT_BRACKET; + case ']': return RIGHT_BRACKET; + case '{': afterStruct = false; afterBuffer = false; return LEFT_BRACE; + case '}': return RIGHT_BRACE; + case '\\': + parseContext.error(loc, "illegal use of escape character", "\\", ""); + break; + + case PPAtomAddAssign: return ADD_ASSIGN; + case PPAtomSubAssign: return SUB_ASSIGN; + case PPAtomMulAssign: return MUL_ASSIGN; + case PPAtomDivAssign: return DIV_ASSIGN; + case PPAtomModAssign: return MOD_ASSIGN; + + case PpAtomRight: return RIGHT_OP; + case PpAtomLeft: return LEFT_OP; + + case PpAtomRightAssign: return RIGHT_ASSIGN; + case PpAtomLeftAssign: return LEFT_ASSIGN; + case PpAtomAndAssign: return AND_ASSIGN; + case PpAtomOrAssign: return OR_ASSIGN; + case PpAtomXorAssign: return XOR_ASSIGN; + + case PpAtomAnd: return AND_OP; + case PpAtomOr: return OR_OP; + case PpAtomXor: return XOR_OP; + + case PpAtomEQ: return EQ_OP; + case PpAtomGE: return GE_OP; + case PpAtomNE: return NE_OP; + case PpAtomLE: return LE_OP; + + case PpAtomDecrement: return DEC_OP; + case PpAtomIncrement: return INC_OP; + + case PpAtomColonColon: + parseContext.error(loc, "not supported", "::", ""); + break; + + case PpAtomConstString: parserToken->sType.lex.string = NewPoolTString(tokenText); return STRING_LITERAL; + case PpAtomConstInt: parserToken->sType.lex.i = ppToken.ival; return INTCONSTANT; + case PpAtomConstUint: parserToken->sType.lex.i = ppToken.ival; return UINTCONSTANT; + case PpAtomConstFloat: parserToken->sType.lex.d = ppToken.dval; return FLOATCONSTANT; +#ifndef GLSLANG_WEB + case PpAtomConstInt16: parserToken->sType.lex.i = ppToken.ival; return INT16CONSTANT; + case PpAtomConstUint16: parserToken->sType.lex.i = ppToken.ival; return UINT16CONSTANT; + case PpAtomConstInt64: parserToken->sType.lex.i64 = ppToken.i64val; return INT64CONSTANT; + case PpAtomConstUint64: parserToken->sType.lex.i64 = ppToken.i64val; return UINT64CONSTANT; + case PpAtomConstDouble: parserToken->sType.lex.d = ppToken.dval; return DOUBLECONSTANT; + case PpAtomConstFloat16: parserToken->sType.lex.d = ppToken.dval; return FLOAT16CONSTANT; +#endif + case PpAtomIdentifier: + { + int token = tokenizeIdentifier(); + field = false; + return token; + } + + case EndOfInput: return 0; + + default: + char buf[2]; + buf[0] = (char)token; + buf[1] = 0; + parseContext.error(loc, "unexpected token", buf, ""); + break; + } + } while (true); +} + +int TScanContext::tokenizeIdentifier() +{ +#ifndef GLSLANG_WEB + if (ReservedSet->find(tokenText) != ReservedSet->end()) + return reservedWord(); +#endif + + auto it = KeywordMap->find(tokenText); + if (it == KeywordMap->end()) { + // Should have an identifier of some sort + return identifierOrType(); + } + keyword = it->second; + + switch (keyword) { + case CONST: + case UNIFORM: + case IN: + case OUT: + case INOUT: + case BREAK: + case CONTINUE: + case DO: + case FOR: + case WHILE: + case IF: + case ELSE: + case DISCARD: + case RETURN: + case CASE: + return keyword; + + case BUFFER: + afterBuffer = true; + if ((parseContext.isEsProfile() && parseContext.version < 310) || + (!parseContext.isEsProfile() && (parseContext.version < 430 && + !parseContext.extensionTurnedOn(E_GL_ARB_shader_storage_buffer_object)))) + return identifierOrType(); + return keyword; + + case STRUCT: + afterStruct = true; + return keyword; + + case SWITCH: + case DEFAULT: + if ((parseContext.isEsProfile() && parseContext.version < 300) || + (!parseContext.isEsProfile() && parseContext.version < 130)) + reservedWord(); + return keyword; + + case VOID: + case BOOL: + case FLOAT: + case INT: + case BVEC2: + case BVEC3: + case BVEC4: + case VEC2: + case VEC3: + case VEC4: + case IVEC2: + case IVEC3: + case IVEC4: + case MAT2: + case MAT3: + case MAT4: + case SAMPLER2D: + case SAMPLERCUBE: + afterType = true; + return keyword; + + case BOOLCONSTANT: + if (strcmp("true", tokenText) == 0) + parserToken->sType.lex.b = true; + else + parserToken->sType.lex.b = false; + return keyword; + + case SMOOTH: + if ((parseContext.isEsProfile() && parseContext.version < 300) || + (!parseContext.isEsProfile() && parseContext.version < 130)) + return identifierOrType(); + return keyword; + case FLAT: + if (parseContext.isEsProfile() && parseContext.version < 300) + reservedWord(); + else if (!parseContext.isEsProfile() && parseContext.version < 130) + return identifierOrType(); + return keyword; + case CENTROID: + if (parseContext.version < 120) + return identifierOrType(); + return keyword; + case INVARIANT: + if (!parseContext.isEsProfile() && parseContext.version < 120) + return identifierOrType(); + return keyword; + case PACKED: + if ((parseContext.isEsProfile() && parseContext.version < 300) || + (!parseContext.isEsProfile() && parseContext.version < 330)) + return reservedWord(); + return identifierOrType(); + + case RESOURCE: + { + bool reserved = (parseContext.isEsProfile() && parseContext.version >= 300) || + (!parseContext.isEsProfile() && parseContext.version >= 420); + return identifierOrReserved(reserved); + } + case SUPERP: + { + bool reserved = parseContext.isEsProfile() || parseContext.version >= 130; + return identifierOrReserved(reserved); + } + +#ifndef GLSLANG_WEB + case NOPERSPECTIVE: + if (parseContext.extensionTurnedOn(E_GL_NV_shader_noperspective_interpolation)) + return keyword; + return es30ReservedFromGLSL(130); + + case NONUNIFORM: + if (parseContext.extensionTurnedOn(E_GL_EXT_nonuniform_qualifier)) + return keyword; + else + return identifierOrType(); + case ATTRIBUTE: + case VARYING: + if (parseContext.isEsProfile() && parseContext.version >= 300) + reservedWord(); + return keyword; + case PAYLOADNV: + case PAYLOADINNV: + case HITATTRNV: + case CALLDATANV: + case CALLDATAINNV: + case ACCSTRUCTNV: + if (parseContext.symbolTable.atBuiltInLevel() || + parseContext.extensionTurnedOn(E_GL_NV_ray_tracing)) + return keyword; + return identifierOrType(); + case PAYLOADEXT: + case PAYLOADINEXT: + case HITATTREXT: + case CALLDATAEXT: + case CALLDATAINEXT: + case ACCSTRUCTEXT: + if (parseContext.symbolTable.atBuiltInLevel() || + parseContext.extensionTurnedOn(E_GL_EXT_ray_tracing) || + parseContext.extensionTurnedOn(E_GL_EXT_ray_query)) + return keyword; + return identifierOrType(); + case RAYQUERYEXT: + if (parseContext.symbolTable.atBuiltInLevel() || + (!parseContext.isEsProfile() && parseContext.version >= 460 + && parseContext.extensionTurnedOn(E_GL_EXT_ray_query))) + return keyword; + return identifierOrType(); + case ATOMIC_UINT: + if ((parseContext.isEsProfile() && parseContext.version >= 310) || + parseContext.extensionTurnedOn(E_GL_ARB_shader_atomic_counters)) + return keyword; + return es30ReservedFromGLSL(420); + + case COHERENT: + case DEVICECOHERENT: + case QUEUEFAMILYCOHERENT: + case WORKGROUPCOHERENT: + case SUBGROUPCOHERENT: + case SHADERCALLCOHERENT: + case NONPRIVATE: + case RESTRICT: + case READONLY: + case WRITEONLY: + if (parseContext.isEsProfile() && parseContext.version >= 310) + return keyword; + return es30ReservedFromGLSL(parseContext.extensionTurnedOn(E_GL_ARB_shader_image_load_store) ? 130 : 420); + case VOLATILE: + if (parseContext.isEsProfile() && parseContext.version >= 310) + return keyword; + if (! parseContext.symbolTable.atBuiltInLevel() && (parseContext.isEsProfile() || + (parseContext.version < 420 && ! parseContext.extensionTurnedOn(E_GL_ARB_shader_image_load_store)))) + reservedWord(); + return keyword; + case PATCH: + if (parseContext.symbolTable.atBuiltInLevel() || + (parseContext.isEsProfile() && + (parseContext.version >= 320 || + parseContext.extensionsTurnedOn(Num_AEP_tessellation_shader, AEP_tessellation_shader))) || + (!parseContext.isEsProfile() && parseContext.extensionTurnedOn(E_GL_ARB_tessellation_shader))) + return keyword; + + return es30ReservedFromGLSL(400); + + case SAMPLE: + if ((parseContext.isEsProfile() && parseContext.version >= 320) || + parseContext.extensionsTurnedOn(1, &E_GL_OES_shader_multisample_interpolation)) + return keyword; + return es30ReservedFromGLSL(400); + + case SUBROUTINE: + return es30ReservedFromGLSL(400); +#endif + case SHARED: + if ((parseContext.isEsProfile() && parseContext.version < 300) || + (!parseContext.isEsProfile() && parseContext.version < 140)) + return identifierOrType(); + return keyword; + case LAYOUT: + { + const int numLayoutExts = 2; + const char* layoutExts[numLayoutExts] = { E_GL_ARB_shading_language_420pack, + E_GL_ARB_explicit_attrib_location }; + if ((parseContext.isEsProfile() && parseContext.version < 300) || + (!parseContext.isEsProfile() && parseContext.version < 140 && + ! parseContext.extensionsTurnedOn(numLayoutExts, layoutExts))) + return identifierOrType(); + return keyword; + } + + case HIGH_PRECISION: + case MEDIUM_PRECISION: + case LOW_PRECISION: + case PRECISION: + return precisionKeyword(); + + case MAT2X2: + case MAT2X3: + case MAT2X4: + case MAT3X2: + case MAT3X3: + case MAT3X4: + case MAT4X2: + case MAT4X3: + case MAT4X4: + return matNxM(); + +#ifndef GLSLANG_WEB + case DMAT2: + case DMAT3: + case DMAT4: + case DMAT2X2: + case DMAT2X3: + case DMAT2X4: + case DMAT3X2: + case DMAT3X3: + case DMAT3X4: + case DMAT4X2: + case DMAT4X3: + case DMAT4X4: + return dMat(); + + case IMAGE1D: + case IIMAGE1D: + case UIMAGE1D: + case IMAGE1DARRAY: + case IIMAGE1DARRAY: + case UIMAGE1DARRAY: + case IMAGE2DRECT: + case IIMAGE2DRECT: + case UIMAGE2DRECT: + afterType = true; + return firstGenerationImage(false); + + case IMAGEBUFFER: + case IIMAGEBUFFER: + case UIMAGEBUFFER: + afterType = true; + if ((parseContext.isEsProfile() && parseContext.version >= 320) || + parseContext.extensionsTurnedOn(Num_AEP_texture_buffer, AEP_texture_buffer)) + return keyword; + return firstGenerationImage(false); + + case IMAGE2D: + case IIMAGE2D: + case UIMAGE2D: + case IMAGE3D: + case IIMAGE3D: + case UIMAGE3D: + case IMAGECUBE: + case IIMAGECUBE: + case UIMAGECUBE: + case IMAGE2DARRAY: + case IIMAGE2DARRAY: + case UIMAGE2DARRAY: + afterType = true; + return firstGenerationImage(true); + + case IMAGECUBEARRAY: + case IIMAGECUBEARRAY: + case UIMAGECUBEARRAY: + afterType = true; + if ((parseContext.isEsProfile() && parseContext.version >= 320) || + parseContext.extensionsTurnedOn(Num_AEP_texture_cube_map_array, AEP_texture_cube_map_array)) + return keyword; + return secondGenerationImage(); + + case IMAGE2DMS: + case IIMAGE2DMS: + case UIMAGE2DMS: + case IMAGE2DMSARRAY: + case IIMAGE2DMSARRAY: + case UIMAGE2DMSARRAY: + afterType = true; + return secondGenerationImage(); + + case DOUBLE: + case DVEC2: + case DVEC3: + case DVEC4: + afterType = true; + if (parseContext.isEsProfile() || parseContext.version < 150 || + (!parseContext.symbolTable.atBuiltInLevel() && + (parseContext.version < 400 && !parseContext.extensionTurnedOn(E_GL_ARB_gpu_shader_fp64) && + (parseContext.version < 410 && !parseContext.extensionTurnedOn(E_GL_ARB_vertex_attrib_64bit))))) + reservedWord(); + return keyword; + + case INT64_T: + case UINT64_T: + case I64VEC2: + case I64VEC3: + case I64VEC4: + case U64VEC2: + case U64VEC3: + case U64VEC4: + afterType = true; + if (parseContext.symbolTable.atBuiltInLevel() || + parseContext.extensionTurnedOn(E_GL_ARB_gpu_shader_int64) || + parseContext.extensionTurnedOn(E_GL_EXT_shader_explicit_arithmetic_types) || + parseContext.extensionTurnedOn(E_GL_EXT_shader_explicit_arithmetic_types_int64)) + return keyword; + return identifierOrType(); + + case INT8_T: + case UINT8_T: + case I8VEC2: + case I8VEC3: + case I8VEC4: + case U8VEC2: + case U8VEC3: + case U8VEC4: + afterType = true; + if (parseContext.symbolTable.atBuiltInLevel() || + parseContext.extensionTurnedOn(E_GL_EXT_shader_explicit_arithmetic_types) || + parseContext.extensionTurnedOn(E_GL_EXT_shader_8bit_storage) || + parseContext.extensionTurnedOn(E_GL_EXT_shader_explicit_arithmetic_types_int8)) + return keyword; + return identifierOrType(); + + case INT16_T: + case UINT16_T: + case I16VEC2: + case I16VEC3: + case I16VEC4: + case U16VEC2: + case U16VEC3: + case U16VEC4: + afterType = true; + if (parseContext.symbolTable.atBuiltInLevel() || + parseContext.extensionTurnedOn(E_GL_AMD_gpu_shader_int16) || + parseContext.extensionTurnedOn(E_GL_EXT_shader_16bit_storage) || + parseContext.extensionTurnedOn(E_GL_EXT_shader_explicit_arithmetic_types) || + parseContext.extensionTurnedOn(E_GL_EXT_shader_explicit_arithmetic_types_int16)) + return keyword; + return identifierOrType(); + case INT32_T: + case UINT32_T: + case I32VEC2: + case I32VEC3: + case I32VEC4: + case U32VEC2: + case U32VEC3: + case U32VEC4: + afterType = true; + if (parseContext.symbolTable.atBuiltInLevel() || + parseContext.extensionTurnedOn(E_GL_EXT_shader_explicit_arithmetic_types) || + parseContext.extensionTurnedOn(E_GL_EXT_shader_explicit_arithmetic_types_int32)) + return keyword; + return identifierOrType(); + case FLOAT32_T: + case F32VEC2: + case F32VEC3: + case F32VEC4: + case F32MAT2: + case F32MAT3: + case F32MAT4: + case F32MAT2X2: + case F32MAT2X3: + case F32MAT2X4: + case F32MAT3X2: + case F32MAT3X3: + case F32MAT3X4: + case F32MAT4X2: + case F32MAT4X3: + case F32MAT4X4: + afterType = true; + if (parseContext.symbolTable.atBuiltInLevel() || + parseContext.extensionTurnedOn(E_GL_EXT_shader_explicit_arithmetic_types) || + parseContext.extensionTurnedOn(E_GL_EXT_shader_explicit_arithmetic_types_float32)) + return keyword; + return identifierOrType(); + + case FLOAT64_T: + case F64VEC2: + case F64VEC3: + case F64VEC4: + case F64MAT2: + case F64MAT3: + case F64MAT4: + case F64MAT2X2: + case F64MAT2X3: + case F64MAT2X4: + case F64MAT3X2: + case F64MAT3X3: + case F64MAT3X4: + case F64MAT4X2: + case F64MAT4X3: + case F64MAT4X4: + afterType = true; + if (parseContext.symbolTable.atBuiltInLevel() || + parseContext.extensionTurnedOn(E_GL_EXT_shader_explicit_arithmetic_types) || + parseContext.extensionTurnedOn(E_GL_EXT_shader_explicit_arithmetic_types_float64)) + return keyword; + return identifierOrType(); + + case FLOAT16_T: + case F16VEC2: + case F16VEC3: + case F16VEC4: + afterType = true; + if (parseContext.symbolTable.atBuiltInLevel() || + parseContext.extensionTurnedOn(E_GL_AMD_gpu_shader_half_float) || + parseContext.extensionTurnedOn(E_GL_EXT_shader_16bit_storage) || + parseContext.extensionTurnedOn(E_GL_EXT_shader_explicit_arithmetic_types) || + parseContext.extensionTurnedOn(E_GL_EXT_shader_explicit_arithmetic_types_float16)) + return keyword; + + return identifierOrType(); + + case F16MAT2: + case F16MAT3: + case F16MAT4: + case F16MAT2X2: + case F16MAT2X3: + case F16MAT2X4: + case F16MAT3X2: + case F16MAT3X3: + case F16MAT3X4: + case F16MAT4X2: + case F16MAT4X3: + case F16MAT4X4: + afterType = true; + if (parseContext.symbolTable.atBuiltInLevel() || + parseContext.extensionTurnedOn(E_GL_AMD_gpu_shader_half_float) || + parseContext.extensionTurnedOn(E_GL_EXT_shader_explicit_arithmetic_types) || + parseContext.extensionTurnedOn(E_GL_EXT_shader_explicit_arithmetic_types_float16)) + return keyword; + + return identifierOrType(); + + case SAMPLERCUBEARRAY: + case SAMPLERCUBEARRAYSHADOW: + case ISAMPLERCUBEARRAY: + case USAMPLERCUBEARRAY: + afterType = true; + if ((parseContext.isEsProfile() && parseContext.version >= 320) || + parseContext.extensionsTurnedOn(Num_AEP_texture_cube_map_array, AEP_texture_cube_map_array)) + return keyword; + if (parseContext.isEsProfile() || (parseContext.version < 400 && ! parseContext.extensionTurnedOn(E_GL_ARB_texture_cube_map_array))) + reservedWord(); + return keyword; + + case TEXTURECUBEARRAY: + case ITEXTURECUBEARRAY: + case UTEXTURECUBEARRAY: + if (parseContext.spvVersion.vulkan > 0) + return keyword; + else + return identifierOrType(); +#endif + + case UINT: + case UVEC2: + case UVEC3: + case UVEC4: + case SAMPLERCUBESHADOW: + case SAMPLER2DARRAY: + case SAMPLER2DARRAYSHADOW: + case ISAMPLER2D: + case ISAMPLER3D: + case ISAMPLERCUBE: + case ISAMPLER2DARRAY: + case USAMPLER2D: + case USAMPLER3D: + case USAMPLERCUBE: + case USAMPLER2DARRAY: + afterType = true; + return nonreservedKeyword(300, 130); + + case SAMPLER3D: + afterType = true; + if (parseContext.isEsProfile() && parseContext.version < 300) { + if (!parseContext.extensionTurnedOn(E_GL_OES_texture_3D)) + reservedWord(); + } + return keyword; + + case SAMPLER2DSHADOW: + afterType = true; + if (parseContext.isEsProfile() && parseContext.version < 300) { + if (!parseContext.extensionTurnedOn(E_GL_EXT_shadow_samplers)) + reservedWord(); + } + return keyword; + + case TEXTURE2D: + case TEXTURECUBE: + case TEXTURE2DARRAY: + case ITEXTURE2D: + case ITEXTURE3D: + case ITEXTURECUBE: + case ITEXTURE2DARRAY: + case UTEXTURE2D: + case UTEXTURE3D: + case UTEXTURECUBE: + case UTEXTURE2DARRAY: + case TEXTURE3D: + case SAMPLER: + case SAMPLERSHADOW: + if (parseContext.spvVersion.vulkan > 0) + return keyword; + else + return identifierOrType(); + +#ifndef GLSLANG_WEB + case ISAMPLER1D: + case ISAMPLER1DARRAY: + case SAMPLER1DARRAYSHADOW: + case USAMPLER1D: + case USAMPLER1DARRAY: + afterType = true; + return es30ReservedFromGLSL(130); + case ISAMPLER2DRECT: + case USAMPLER2DRECT: + afterType = true; + return es30ReservedFromGLSL(140); + + case SAMPLERBUFFER: + afterType = true; + if ((parseContext.isEsProfile() && parseContext.version >= 320) || + parseContext.extensionsTurnedOn(Num_AEP_texture_buffer, AEP_texture_buffer)) + return keyword; + return es30ReservedFromGLSL(130); + + case ISAMPLERBUFFER: + case USAMPLERBUFFER: + afterType = true; + if ((parseContext.isEsProfile() && parseContext.version >= 320) || + parseContext.extensionsTurnedOn(Num_AEP_texture_buffer, AEP_texture_buffer)) + return keyword; + return es30ReservedFromGLSL(140); + + case SAMPLER2DMS: + case ISAMPLER2DMS: + case USAMPLER2DMS: + afterType = true; + if (parseContext.isEsProfile() && parseContext.version >= 310) + return keyword; + if (!parseContext.isEsProfile() && (parseContext.version > 140 || + (parseContext.version == 140 && parseContext.extensionsTurnedOn(1, &E_GL_ARB_texture_multisample)))) + return keyword; + return es30ReservedFromGLSL(150); + + case SAMPLER2DMSARRAY: + case ISAMPLER2DMSARRAY: + case USAMPLER2DMSARRAY: + afterType = true; + if ((parseContext.isEsProfile() && parseContext.version >= 320) || + parseContext.extensionsTurnedOn(1, &E_GL_OES_texture_storage_multisample_2d_array)) + return keyword; + if (!parseContext.isEsProfile() && (parseContext.version > 140 || + (parseContext.version == 140 && parseContext.extensionsTurnedOn(1, &E_GL_ARB_texture_multisample)))) + return keyword; + return es30ReservedFromGLSL(150); + + case SAMPLER1D: + case SAMPLER1DSHADOW: + afterType = true; + if (parseContext.isEsProfile()) + reservedWord(); + return keyword; + + case SAMPLER2DRECT: + case SAMPLER2DRECTSHADOW: + afterType = true; + if (parseContext.isEsProfile()) + reservedWord(); + else if (parseContext.version < 140 && ! parseContext.symbolTable.atBuiltInLevel() && ! parseContext.extensionTurnedOn(E_GL_ARB_texture_rectangle)) { + if (parseContext.relaxedErrors()) + parseContext.requireExtensions(loc, 1, &E_GL_ARB_texture_rectangle, "texture-rectangle sampler keyword"); + else + reservedWord(); + } + return keyword; + + case SAMPLER1DARRAY: + afterType = true; + if (parseContext.isEsProfile() && parseContext.version == 300) + reservedWord(); + else if ((parseContext.isEsProfile() && parseContext.version < 300) || + (!parseContext.isEsProfile() && parseContext.version < 130)) + return identifierOrType(); + return keyword; + + case SAMPLEREXTERNALOES: + afterType = true; + if (parseContext.symbolTable.atBuiltInLevel() || + parseContext.extensionTurnedOn(E_GL_OES_EGL_image_external) || + parseContext.extensionTurnedOn(E_GL_OES_EGL_image_external_essl3)) + return keyword; + return identifierOrType(); + + case SAMPLEREXTERNAL2DY2YEXT: + afterType = true; + if (parseContext.symbolTable.atBuiltInLevel() || + parseContext.extensionTurnedOn(E_GL_EXT_YUV_target)) + return keyword; + return identifierOrType(); + + case ITEXTURE1DARRAY: + case UTEXTURE1D: + case ITEXTURE1D: + case UTEXTURE1DARRAY: + case TEXTUREBUFFER: + case ITEXTURE2DRECT: + case UTEXTURE2DRECT: + case ITEXTUREBUFFER: + case UTEXTUREBUFFER: + case TEXTURE2DMS: + case ITEXTURE2DMS: + case UTEXTURE2DMS: + case TEXTURE2DMSARRAY: + case ITEXTURE2DMSARRAY: + case UTEXTURE2DMSARRAY: + case TEXTURE1D: + case TEXTURE2DRECT: + case TEXTURE1DARRAY: + if (parseContext.spvVersion.vulkan > 0) + return keyword; + else + return identifierOrType(); + + case SUBPASSINPUT: + case SUBPASSINPUTMS: + case ISUBPASSINPUT: + case ISUBPASSINPUTMS: + case USUBPASSINPUT: + case USUBPASSINPUTMS: + if (parseContext.spvVersion.vulkan > 0) + return keyword; + else + return identifierOrType(); + + case F16SAMPLER1D: + case F16SAMPLER2D: + case F16SAMPLER3D: + case F16SAMPLER2DRECT: + case F16SAMPLERCUBE: + case F16SAMPLER1DARRAY: + case F16SAMPLER2DARRAY: + case F16SAMPLERCUBEARRAY: + case F16SAMPLERBUFFER: + case F16SAMPLER2DMS: + case F16SAMPLER2DMSARRAY: + case F16SAMPLER1DSHADOW: + case F16SAMPLER2DSHADOW: + case F16SAMPLER1DARRAYSHADOW: + case F16SAMPLER2DARRAYSHADOW: + case F16SAMPLER2DRECTSHADOW: + case F16SAMPLERCUBESHADOW: + case F16SAMPLERCUBEARRAYSHADOW: + + case F16IMAGE1D: + case F16IMAGE2D: + case F16IMAGE3D: + case F16IMAGE2DRECT: + case F16IMAGECUBE: + case F16IMAGE1DARRAY: + case F16IMAGE2DARRAY: + case F16IMAGECUBEARRAY: + case F16IMAGEBUFFER: + case F16IMAGE2DMS: + case F16IMAGE2DMSARRAY: + + case F16TEXTURE1D: + case F16TEXTURE2D: + case F16TEXTURE3D: + case F16TEXTURE2DRECT: + case F16TEXTURECUBE: + case F16TEXTURE1DARRAY: + case F16TEXTURE2DARRAY: + case F16TEXTURECUBEARRAY: + case F16TEXTUREBUFFER: + case F16TEXTURE2DMS: + case F16TEXTURE2DMSARRAY: + + case F16SUBPASSINPUT: + case F16SUBPASSINPUTMS: + afterType = true; + if (parseContext.symbolTable.atBuiltInLevel() || + parseContext.extensionTurnedOn(E_GL_AMD_gpu_shader_half_float_fetch)) + return keyword; + return identifierOrType(); + + case EXPLICITINTERPAMD: + if (parseContext.extensionTurnedOn(E_GL_AMD_shader_explicit_vertex_parameter)) + return keyword; + return identifierOrType(); + + case PERVERTEXNV: + if ((!parseContext.isEsProfile() && parseContext.version >= 450) || + parseContext.extensionTurnedOn(E_GL_NV_fragment_shader_barycentric)) + return keyword; + return identifierOrType(); + + case PRECISE: + if ((parseContext.isEsProfile() && + (parseContext.version >= 320 || parseContext.extensionsTurnedOn(Num_AEP_gpu_shader5, AEP_gpu_shader5))) || + (!parseContext.isEsProfile() && parseContext.version >= 400)) + return keyword; + if (parseContext.isEsProfile() && parseContext.version == 310) { + reservedWord(); + return keyword; + } + return identifierOrType(); + + case PERPRIMITIVENV: + case PERVIEWNV: + case PERTASKNV: + if ((!parseContext.isEsProfile() && parseContext.version >= 450) || + (parseContext.isEsProfile() && parseContext.version >= 320) || + parseContext.extensionTurnedOn(E_GL_NV_mesh_shader)) + return keyword; + return identifierOrType(); + + case FCOOPMATNV: + afterType = true; + if (parseContext.symbolTable.atBuiltInLevel() || + parseContext.extensionTurnedOn(E_GL_NV_cooperative_matrix)) + return keyword; + return identifierOrType(); + + case UCOOPMATNV: + case ICOOPMATNV: + afterType = true; + if (parseContext.symbolTable.atBuiltInLevel() || + parseContext.extensionTurnedOn(E_GL_NV_integer_cooperative_matrix)) + return keyword; + return identifierOrType(); + + case DEMOTE: + if (parseContext.extensionTurnedOn(E_GL_EXT_demote_to_helper_invocation)) + return keyword; + else + return identifierOrType(); +#endif + + default: + parseContext.infoSink.info.message(EPrefixInternalError, "Unknown glslang keyword", loc); + return 0; + } +} + +int TScanContext::identifierOrType() +{ + parserToken->sType.lex.string = NewPoolTString(tokenText); + if (field) + return IDENTIFIER; + + parserToken->sType.lex.symbol = parseContext.symbolTable.find(*parserToken->sType.lex.string); + if ((afterType == false && afterStruct == false) && parserToken->sType.lex.symbol != nullptr) { + if (const TVariable* variable = parserToken->sType.lex.symbol->getAsVariable()) { + if (variable->isUserType() && + // treat redeclaration of forward-declared buffer/uniform reference as an identifier + !(variable->getType().isReference() && afterBuffer)) { + afterType = true; + + return TYPE_NAME; + } + } + } + + return IDENTIFIER; +} + +// Give an error for use of a reserved symbol. +// However, allow built-in declarations to use reserved words, to allow +// extension support before the extension is enabled. +int TScanContext::reservedWord() +{ + if (! parseContext.symbolTable.atBuiltInLevel()) + parseContext.error(loc, "Reserved word.", tokenText, "", ""); + + return 0; +} + +int TScanContext::identifierOrReserved(bool reserved) +{ + if (reserved) { + reservedWord(); + + return 0; + } + + if (parseContext.isForwardCompatible()) + parseContext.warn(loc, "using future reserved keyword", tokenText, ""); + + return identifierOrType(); +} + +// For keywords that suddenly showed up on non-ES (not previously reserved) +// but then got reserved by ES 3.0. +int TScanContext::es30ReservedFromGLSL(int version) +{ + if (parseContext.symbolTable.atBuiltInLevel()) + return keyword; + + if ((parseContext.isEsProfile() && parseContext.version < 300) || + (!parseContext.isEsProfile() && parseContext.version < version)) { + if (parseContext.isForwardCompatible()) + parseContext.warn(loc, "future reserved word in ES 300 and keyword in GLSL", tokenText, ""); + + return identifierOrType(); + } else if (parseContext.isEsProfile() && parseContext.version >= 300) + reservedWord(); + + return keyword; +} + +// For a keyword that was never reserved, until it suddenly +// showed up, both in an es version and a non-ES version. +int TScanContext::nonreservedKeyword(int esVersion, int nonEsVersion) +{ + if ((parseContext.isEsProfile() && parseContext.version < esVersion) || + (!parseContext.isEsProfile() && parseContext.version < nonEsVersion)) { + if (parseContext.isForwardCompatible()) + parseContext.warn(loc, "using future keyword", tokenText, ""); + + return identifierOrType(); + } + + return keyword; +} + +int TScanContext::precisionKeyword() +{ + if (parseContext.isEsProfile() || parseContext.version >= 130) + return keyword; + + if (parseContext.isForwardCompatible()) + parseContext.warn(loc, "using ES precision qualifier keyword", tokenText, ""); + + return identifierOrType(); +} + +int TScanContext::matNxM() +{ + afterType = true; + + if (parseContext.version > 110) + return keyword; + + if (parseContext.isForwardCompatible()) + parseContext.warn(loc, "using future non-square matrix type keyword", tokenText, ""); + + return identifierOrType(); +} + +int TScanContext::dMat() +{ + afterType = true; + + if (parseContext.isEsProfile() && parseContext.version >= 300) { + reservedWord(); + + return keyword; + } + + if (!parseContext.isEsProfile() && (parseContext.version >= 400 || + parseContext.symbolTable.atBuiltInLevel() || + (parseContext.version >= 150 && parseContext.extensionTurnedOn(E_GL_ARB_gpu_shader_fp64)) || + (parseContext.version >= 150 && parseContext.extensionTurnedOn(E_GL_ARB_vertex_attrib_64bit) + && parseContext.language == EShLangVertex))) + return keyword; + + if (parseContext.isForwardCompatible()) + parseContext.warn(loc, "using future type keyword", tokenText, ""); + + return identifierOrType(); +} + +int TScanContext::firstGenerationImage(bool inEs310) +{ + if (parseContext.symbolTable.atBuiltInLevel() || + (!parseContext.isEsProfile() && (parseContext.version >= 420 || + parseContext.extensionTurnedOn(E_GL_ARB_shader_image_load_store))) || + (inEs310 && parseContext.isEsProfile() && parseContext.version >= 310)) + return keyword; + + if ((parseContext.isEsProfile() && parseContext.version >= 300) || + (!parseContext.isEsProfile() && parseContext.version >= 130)) { + reservedWord(); + + return keyword; + } + + if (parseContext.isForwardCompatible()) + parseContext.warn(loc, "using future type keyword", tokenText, ""); + + return identifierOrType(); +} + +int TScanContext::secondGenerationImage() +{ + if (parseContext.isEsProfile() && parseContext.version >= 310) { + reservedWord(); + return keyword; + } + + if (parseContext.symbolTable.atBuiltInLevel() || + (!parseContext.isEsProfile() && + (parseContext.version >= 420 || parseContext.extensionTurnedOn(E_GL_ARB_shader_image_load_store)))) + return keyword; + + if (parseContext.isForwardCompatible()) + parseContext.warn(loc, "using future type keyword", tokenText, ""); + + return identifierOrType(); +} + +} // end namespace glslang diff --git a/dep/glslang/glslang/MachineIndependent/Scan.h b/dep/glslang/glslang/MachineIndependent/Scan.h new file mode 100644 index 000000000..24b75cf7c --- /dev/null +++ b/dep/glslang/glslang/MachineIndependent/Scan.h @@ -0,0 +1,276 @@ +// +// Copyright (C) 2002-2005 3Dlabs Inc. Ltd. +// Copyright (C) 2013 LunarG, Inc. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// +// Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// +// Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// Neither the name of 3Dlabs Inc. Ltd. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// +#ifndef _GLSLANG_SCAN_INCLUDED_ +#define _GLSLANG_SCAN_INCLUDED_ + +#include "Versions.h" + +namespace glslang { + +// Use a global end-of-input character, so no translation is needed across +// layers of encapsulation. Characters are all 8 bit, and positive, so there is +// no aliasing of character 255 onto -1, for example. +const int EndOfInput = -1; + +// +// A character scanner that seamlessly, on read-only strings, reads across an +// array of strings without assuming null termination. +// +class TInputScanner { +public: + TInputScanner(int n, const char* const s[], size_t L[], const char* const* names = nullptr, + int b = 0, int f = 0, bool single = false) : + numSources(n), + // up to this point, common usage is "char*", but now we need positive 8-bit characters + sources(reinterpret_cast(s)), + lengths(L), currentSource(0), currentChar(0), stringBias(b), finale(f), singleLogical(single), + endOfFileReached(false) + { + loc = new TSourceLoc[numSources]; + for (int i = 0; i < numSources; ++i) { + loc[i].init(i - stringBias); + } + if (names != nullptr) { + for (int i = 0; i < numSources; ++i) + loc[i].name = names[i] != nullptr ? NewPoolTString(names[i]) : nullptr; + } + loc[currentSource].line = 1; + logicalSourceLoc.init(1); + logicalSourceLoc.name = loc[0].name; + } + + virtual ~TInputScanner() + { + delete [] loc; + } + + // retrieve the next character and advance one character + int get() + { + int ret = peek(); + if (ret == EndOfInput) + return ret; + ++loc[currentSource].column; + ++logicalSourceLoc.column; + if (ret == '\n') { + ++loc[currentSource].line; + ++logicalSourceLoc.line; + logicalSourceLoc.column = 0; + loc[currentSource].column = 0; + } + advance(); + + return ret; + } + + // retrieve the next character, no advance + int peek() + { + if (currentSource >= numSources) { + endOfFileReached = true; + return EndOfInput; + } + // Make sure we do not read off the end of a string. + // N.B. Sources can have a length of 0. + int sourceToRead = currentSource; + size_t charToRead = currentChar; + while(charToRead >= lengths[sourceToRead]) { + charToRead = 0; + sourceToRead += 1; + if (sourceToRead >= numSources) { + return EndOfInput; + } + } + + // Here, we care about making negative valued characters positive + return sources[sourceToRead][charToRead]; + } + + // go back one character + void unget() + { + // Do not roll back once we've reached the end of the file. + if (endOfFileReached) + return; + + if (currentChar > 0) { + --currentChar; + --loc[currentSource].column; + --logicalSourceLoc.column; + if (loc[currentSource].column < 0) { + // We've moved back past a new line. Find the + // previous newline (or start of the file) to compute + // the column count on the now current line. + size_t chIndex = currentChar; + while (chIndex > 0) { + if (sources[currentSource][chIndex] == '\n') { + break; + } + --chIndex; + } + logicalSourceLoc.column = (int)(currentChar - chIndex); + loc[currentSource].column = (int)(currentChar - chIndex); + } + } else { + do { + --currentSource; + } while (currentSource > 0 && lengths[currentSource] == 0); + if (lengths[currentSource] == 0) { + // set to 0 if we've backed up to the start of an empty string + currentChar = 0; + } else + currentChar = lengths[currentSource] - 1; + } + if (peek() == '\n') { + --loc[currentSource].line; + --logicalSourceLoc.line; + } + } + + // for #line override + void setLine(int newLine) + { + logicalSourceLoc.line = newLine; + loc[getLastValidSourceIndex()].line = newLine; + } + + // for #line override in filename based parsing + void setFile(const char* filename) + { + TString* fn_tstr = NewPoolTString(filename); + logicalSourceLoc.name = fn_tstr; + loc[getLastValidSourceIndex()].name = fn_tstr; + } + + void setFile(const char* filename, int i) + { + TString* fn_tstr = NewPoolTString(filename); + if (i == getLastValidSourceIndex()) { + logicalSourceLoc.name = fn_tstr; + } + loc[i].name = fn_tstr; + } + + void setString(int newString) + { + logicalSourceLoc.string = newString; + loc[getLastValidSourceIndex()].string = newString; + logicalSourceLoc.name = nullptr; + loc[getLastValidSourceIndex()].name = nullptr; + } + + // for #include content indentation + void setColumn(int col) + { + logicalSourceLoc.column = col; + loc[getLastValidSourceIndex()].column = col; + } + + void setEndOfInput() + { + endOfFileReached = true; + currentSource = numSources; + } + + bool atEndOfInput() const { return endOfFileReached; } + + const TSourceLoc& getSourceLoc() const + { + if (singleLogical) { + return logicalSourceLoc; + } else { + return loc[std::max(0, std::min(currentSource, numSources - finale - 1))]; + } + } + // Returns the index (starting from 0) of the most recent valid source string we are reading from. + int getLastValidSourceIndex() const { return std::min(currentSource, numSources - 1); } + + void consumeWhiteSpace(bool& foundNonSpaceTab); + bool consumeComment(); + void consumeWhitespaceComment(bool& foundNonSpaceTab); + bool scanVersion(int& version, EProfile& profile, bool& notFirstToken); + +protected: + + // advance one character + void advance() + { + ++currentChar; + if (currentChar >= lengths[currentSource]) { + ++currentSource; + if (currentSource < numSources) { + loc[currentSource].string = loc[currentSource - 1].string + 1; + loc[currentSource].line = 1; + loc[currentSource].column = 0; + } + while (currentSource < numSources && lengths[currentSource] == 0) { + ++currentSource; + if (currentSource < numSources) { + loc[currentSource].string = loc[currentSource - 1].string + 1; + loc[currentSource].line = 1; + loc[currentSource].column = 0; + } + } + currentChar = 0; + } + } + + int numSources; // number of strings in source + const unsigned char* const *sources; // array of strings; must be converted to positive values on use, to avoid aliasing with -1 as EndOfInput + const size_t *lengths; // length of each string + int currentSource; + size_t currentChar; + + // This is for reporting what string/line an error occurred on, and can be overridden by #line. + // It remembers the last state of each source string as it is left for the next one, so unget() + // can restore that state. + TSourceLoc* loc; // an array + + int stringBias; // the first string that is the user's string number 0 + int finale; // number of internal strings after user's last string + + TSourceLoc logicalSourceLoc; + bool singleLogical; // treats the strings as a single logical string. + // locations will be reported from the first string. + + // Set to true once peek() returns EndOfFile, so that we won't roll back + // once we've reached EndOfFile. + bool endOfFileReached; +}; + +} // end namespace glslang + +#endif // _GLSLANG_SCAN_INCLUDED_ diff --git a/dep/glslang/glslang/MachineIndependent/ScanContext.h b/dep/glslang/glslang/MachineIndependent/ScanContext.h new file mode 100644 index 000000000..74b2b3c74 --- /dev/null +++ b/dep/glslang/glslang/MachineIndependent/ScanContext.h @@ -0,0 +1,93 @@ +// +// Copyright (C) 2013 LunarG, Inc. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// +// Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// +// Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// Neither the name of 3Dlabs Inc. Ltd. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// + +// +// This holds context specific to the GLSL scanner, which +// sits between the preprocessor scanner and parser. +// + +#pragma once + +#include "ParseHelper.h" + +namespace glslang { + +class TPpContext; +class TPpToken; +class TParserToken; + +class TScanContext { +public: + explicit TScanContext(TParseContextBase& pc) : + parseContext(pc), + afterType(false), afterStruct(false), + field(false), afterBuffer(false) { } + virtual ~TScanContext() { } + + static void fillInKeywordMap(); + static void deleteKeywordMap(); + + int tokenize(TPpContext*, TParserToken&); + +protected: + TScanContext(TScanContext&); + TScanContext& operator=(TScanContext&); + + int tokenizeIdentifier(); + int identifierOrType(); + int reservedWord(); + int identifierOrReserved(bool reserved); + int es30ReservedFromGLSL(int version); + int nonreservedKeyword(int esVersion, int nonEsVersion); + int precisionKeyword(); + int matNxM(); + int dMat(); + int firstGenerationImage(bool inEs310); + int secondGenerationImage(); + + TParseContextBase& parseContext; + bool afterType; // true if we've recognized a type, so can only be looking for an identifier + bool afterStruct; // true if we've recognized the STRUCT keyword, so can only be looking for an identifier + bool field; // true if we're on a field, right after a '.' + bool afterBuffer; // true if we've recognized the BUFFER keyword + TSourceLoc loc; + TParserToken* parserToken; + TPpToken* ppToken; + + const char* tokenText; + int keyword; +}; + +} // end namespace glslang diff --git a/dep/glslang/glslang/MachineIndependent/ShaderLang.cpp b/dep/glslang/glslang/MachineIndependent/ShaderLang.cpp new file mode 100644 index 000000000..a1392dbde --- /dev/null +++ b/dep/glslang/glslang/MachineIndependent/ShaderLang.cpp @@ -0,0 +1,2118 @@ +// +// Copyright (C) 2002-2005 3Dlabs Inc. Ltd. +// Copyright (C) 2013-2016 LunarG, Inc. +// Copyright (C) 2015-2020 Google, Inc. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// +// Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// +// Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// Neither the name of 3Dlabs Inc. Ltd. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// + +// +// Implement the top-level of interface to the compiler/linker, +// as defined in ShaderLang.h +// This is the platform independent interface between an OGL driver +// and the shading language compiler/linker. +// +#include +#include +#include +#include +#include "SymbolTable.h" +#include "ParseHelper.h" +#include "Scan.h" +#include "ScanContext.h" + +#ifdef ENABLE_HLSL +#include "../../hlsl/hlslParseHelper.h" +#include "../../hlsl/hlslParseables.h" +#include "../../hlsl/hlslScanContext.h" +#endif + +#include "../Include/ShHandle.h" +#include "../../OGLCompilersDLL/InitializeDll.h" + +#include "preprocessor/PpContext.h" + +#define SH_EXPORTING +#include "../Public/ShaderLang.h" +#include "reflection.h" +#include "iomapper.h" +#include "Initialize.h" + +// TODO: this really shouldn't be here, it is only because of the trial addition +// of printing pre-processed tokens, which requires knowing the string literal +// token to print ", but none of that seems appropriate for this file. +#include "preprocessor/PpTokens.h" + +namespace { // anonymous namespace for file-local functions and symbols + +// Total number of successful initializers of glslang: a refcount +// Shared global; access should be protected by a global mutex/critical section. +int NumberOfClients = 0; + +using namespace glslang; + +// Create a language specific version of parseables. +TBuiltInParseables* CreateBuiltInParseables(TInfoSink& infoSink, EShSource source) +{ + switch (source) { + case EShSourceGlsl: return new TBuiltIns(); // GLSL builtIns +#ifdef ENABLE_HLSL + case EShSourceHlsl: return new TBuiltInParseablesHlsl(); // HLSL intrinsics +#endif + + default: + infoSink.info.message(EPrefixInternalError, "Unable to determine source language"); + return nullptr; + } +} + +// Create a language specific version of a parse context. +TParseContextBase* CreateParseContext(TSymbolTable& symbolTable, TIntermediate& intermediate, + int version, EProfile profile, EShSource source, + EShLanguage language, TInfoSink& infoSink, + SpvVersion spvVersion, bool forwardCompatible, EShMessages messages, + bool parsingBuiltIns, std::string sourceEntryPointName = "") +{ + switch (source) { + case EShSourceGlsl: { + if (sourceEntryPointName.size() == 0) + intermediate.setEntryPointName("main"); + TString entryPoint = sourceEntryPointName.c_str(); + return new TParseContext(symbolTable, intermediate, parsingBuiltIns, version, profile, spvVersion, + language, infoSink, forwardCompatible, messages, &entryPoint); + } +#ifdef ENABLE_HLSL + case EShSourceHlsl: + return new HlslParseContext(symbolTable, intermediate, parsingBuiltIns, version, profile, spvVersion, + language, infoSink, sourceEntryPointName.c_str(), forwardCompatible, messages); +#endif + default: + infoSink.info.message(EPrefixInternalError, "Unable to determine source language"); + return nullptr; + } +} + +// Local mapping functions for making arrays of symbol tables.... + +const int VersionCount = 17; // index range in MapVersionToIndex + +int MapVersionToIndex(int version) +{ + int index = 0; + + switch (version) { + case 100: index = 0; break; + case 110: index = 1; break; + case 120: index = 2; break; + case 130: index = 3; break; + case 140: index = 4; break; + case 150: index = 5; break; + case 300: index = 6; break; + case 330: index = 7; break; + case 400: index = 8; break; + case 410: index = 9; break; + case 420: index = 10; break; + case 430: index = 11; break; + case 440: index = 12; break; + case 310: index = 13; break; + case 450: index = 14; break; + case 500: index = 0; break; // HLSL + case 320: index = 15; break; + case 460: index = 16; break; + default: assert(0); break; + } + + assert(index < VersionCount); + + return index; +} + +const int SpvVersionCount = 3; // index range in MapSpvVersionToIndex + +int MapSpvVersionToIndex(const SpvVersion& spvVersion) +{ + int index = 0; + + if (spvVersion.openGl > 0) + index = 1; + else if (spvVersion.vulkan > 0) + index = 2; + + assert(index < SpvVersionCount); + + return index; +} + +const int ProfileCount = 4; // index range in MapProfileToIndex + +int MapProfileToIndex(EProfile profile) +{ + int index = 0; + + switch (profile) { + case ENoProfile: index = 0; break; + case ECoreProfile: index = 1; break; + case ECompatibilityProfile: index = 2; break; + case EEsProfile: index = 3; break; + default: break; + } + + assert(index < ProfileCount); + + return index; +} + +const int SourceCount = 2; + +int MapSourceToIndex(EShSource source) +{ + int index = 0; + + switch (source) { + case EShSourceGlsl: index = 0; break; + case EShSourceHlsl: index = 1; break; + default: break; + } + + assert(index < SourceCount); + + return index; +} + +// only one of these needed for non-ES; ES needs 2 for different precision defaults of built-ins +enum EPrecisionClass { + EPcGeneral, + EPcFragment, + EPcCount +}; + +// A process-global symbol table per version per profile for built-ins common +// to multiple stages (languages), and a process-global symbol table per version +// per profile per stage for built-ins unique to each stage. They will be sparsely +// populated, so they will only be generated as needed. +// +// Each has a different set of built-ins, and we want to preserve that from +// compile to compile. +// +TSymbolTable* CommonSymbolTable[VersionCount][SpvVersionCount][ProfileCount][SourceCount][EPcCount] = {}; +TSymbolTable* SharedSymbolTables[VersionCount][SpvVersionCount][ProfileCount][SourceCount][EShLangCount] = {}; + +TPoolAllocator* PerProcessGPA = nullptr; + +// +// Parse and add to the given symbol table the content of the given shader string. +// +bool InitializeSymbolTable(const TString& builtIns, int version, EProfile profile, const SpvVersion& spvVersion, EShLanguage language, + EShSource source, TInfoSink& infoSink, TSymbolTable& symbolTable) +{ + TIntermediate intermediate(language, version, profile); + + intermediate.setSource(source); + + std::unique_ptr parseContext(CreateParseContext(symbolTable, intermediate, version, profile, source, + language, infoSink, spvVersion, true, EShMsgDefault, + true)); + + TShader::ForbidIncluder includer; + TPpContext ppContext(*parseContext, "", includer); + TScanContext scanContext(*parseContext); + parseContext->setScanContext(&scanContext); + parseContext->setPpContext(&ppContext); + + // + // Push the symbol table to give it an initial scope. This + // push should not have a corresponding pop, so that built-ins + // are preserved, and the test for an empty table fails. + // + + symbolTable.push(); + + const char* builtInShaders[2]; + size_t builtInLengths[2]; + builtInShaders[0] = builtIns.c_str(); + builtInLengths[0] = builtIns.size(); + + if (builtInLengths[0] == 0) + return true; + + TInputScanner input(1, builtInShaders, builtInLengths); + if (! parseContext->parseShaderStrings(ppContext, input) != 0) { + infoSink.info.message(EPrefixInternalError, "Unable to parse built-ins"); + printf("Unable to parse built-ins\n%s\n", infoSink.info.c_str()); + printf("%s\n", builtInShaders[0]); + + return false; + } + + return true; +} + +int CommonIndex(EProfile profile, EShLanguage language) +{ + return (profile == EEsProfile && language == EShLangFragment) ? EPcFragment : EPcGeneral; +} + +// +// To initialize per-stage shared tables, with the common table already complete. +// +void InitializeStageSymbolTable(TBuiltInParseables& builtInParseables, int version, EProfile profile, const SpvVersion& spvVersion, + EShLanguage language, EShSource source, TInfoSink& infoSink, TSymbolTable** commonTable, + TSymbolTable** symbolTables) +{ +#ifdef GLSLANG_WEB + profile = EEsProfile; + version = 310; +#endif + + (*symbolTables[language]).adoptLevels(*commonTable[CommonIndex(profile, language)]); + InitializeSymbolTable(builtInParseables.getStageString(language), version, profile, spvVersion, language, source, + infoSink, *symbolTables[language]); + builtInParseables.identifyBuiltIns(version, profile, spvVersion, language, *symbolTables[language]); + if (profile == EEsProfile && version >= 300) + (*symbolTables[language]).setNoBuiltInRedeclarations(); + if (version == 110) + (*symbolTables[language]).setSeparateNameSpaces(); +} + +// +// Initialize the full set of shareable symbol tables; +// The common (cross-stage) and those shareable per-stage. +// +bool InitializeSymbolTables(TInfoSink& infoSink, TSymbolTable** commonTable, TSymbolTable** symbolTables, int version, EProfile profile, const SpvVersion& spvVersion, EShSource source) +{ +#ifdef GLSLANG_WEB + profile = EEsProfile; + version = 310; +#endif + + std::unique_ptr builtInParseables(CreateBuiltInParseables(infoSink, source)); + + if (builtInParseables == nullptr) + return false; + + builtInParseables->initialize(version, profile, spvVersion); + + // do the common tables + InitializeSymbolTable(builtInParseables->getCommonString(), version, profile, spvVersion, EShLangVertex, source, + infoSink, *commonTable[EPcGeneral]); + if (profile == EEsProfile) + InitializeSymbolTable(builtInParseables->getCommonString(), version, profile, spvVersion, EShLangFragment, source, + infoSink, *commonTable[EPcFragment]); + + // do the per-stage tables + + // always have vertex and fragment + InitializeStageSymbolTable(*builtInParseables, version, profile, spvVersion, EShLangVertex, source, + infoSink, commonTable, symbolTables); + InitializeStageSymbolTable(*builtInParseables, version, profile, spvVersion, EShLangFragment, source, + infoSink, commonTable, symbolTables); + +#ifndef GLSLANG_WEB + // check for tessellation + if ((profile != EEsProfile && version >= 150) || + (profile == EEsProfile && version >= 310)) { + InitializeStageSymbolTable(*builtInParseables, version, profile, spvVersion, EShLangTessControl, source, + infoSink, commonTable, symbolTables); + InitializeStageSymbolTable(*builtInParseables, version, profile, spvVersion, EShLangTessEvaluation, source, + infoSink, commonTable, symbolTables); + } + + // check for geometry + if ((profile != EEsProfile && version >= 150) || + (profile == EEsProfile && version >= 310)) + InitializeStageSymbolTable(*builtInParseables, version, profile, spvVersion, EShLangGeometry, source, + infoSink, commonTable, symbolTables); +#endif + + // check for compute + if ((profile != EEsProfile && version >= 420) || + (profile == EEsProfile && version >= 310)) + InitializeStageSymbolTable(*builtInParseables, version, profile, spvVersion, EShLangCompute, source, + infoSink, commonTable, symbolTables); + + // check for ray tracing stages + if (profile != EEsProfile && version >= 450) { + InitializeStageSymbolTable(*builtInParseables, version, profile, spvVersion, EShLangRayGen, source, + infoSink, commonTable, symbolTables); + InitializeStageSymbolTable(*builtInParseables, version, profile, spvVersion, EShLangIntersect, source, + infoSink, commonTable, symbolTables); + InitializeStageSymbolTable(*builtInParseables, version, profile, spvVersion, EShLangAnyHit, source, + infoSink, commonTable, symbolTables); + InitializeStageSymbolTable(*builtInParseables, version, profile, spvVersion, EShLangClosestHit, source, + infoSink, commonTable, symbolTables); + InitializeStageSymbolTable(*builtInParseables, version, profile, spvVersion, EShLangMiss, source, + infoSink, commonTable, symbolTables); + InitializeStageSymbolTable(*builtInParseables, version, profile, spvVersion, EShLangCallable, source, + infoSink, commonTable, symbolTables); + } + + // check for mesh + if ((profile != EEsProfile && version >= 450) || + (profile == EEsProfile && version >= 320)) + InitializeStageSymbolTable(*builtInParseables, version, profile, spvVersion, EShLangMeshNV, source, + infoSink, commonTable, symbolTables); + + // check for task + if ((profile != EEsProfile && version >= 450) || + (profile == EEsProfile && version >= 320)) + InitializeStageSymbolTable(*builtInParseables, version, profile, spvVersion, EShLangTaskNV, source, + infoSink, commonTable, symbolTables); + + return true; +} + +bool AddContextSpecificSymbols(const TBuiltInResource* resources, TInfoSink& infoSink, TSymbolTable& symbolTable, int version, + EProfile profile, const SpvVersion& spvVersion, EShLanguage language, EShSource source) +{ + std::unique_ptr builtInParseables(CreateBuiltInParseables(infoSink, source)); + + if (builtInParseables == nullptr) + return false; + + builtInParseables->initialize(*resources, version, profile, spvVersion, language); + InitializeSymbolTable(builtInParseables->getCommonString(), version, profile, spvVersion, language, source, infoSink, symbolTable); + builtInParseables->identifyBuiltIns(version, profile, spvVersion, language, symbolTable, *resources); + + return true; +} + +// +// To do this on the fly, we want to leave the current state of our thread's +// pool allocator intact, so: +// - Switch to a new pool for parsing the built-ins +// - Do the parsing, which builds the symbol table, using the new pool +// - Switch to the process-global pool to save a copy of the resulting symbol table +// - Free up the new pool used to parse the built-ins +// - Switch back to the original thread's pool +// +// This only gets done the first time any thread needs a particular symbol table +// (lazy evaluation). +// +void SetupBuiltinSymbolTable(int version, EProfile profile, const SpvVersion& spvVersion, EShSource source) +{ + TInfoSink infoSink; + + // Make sure only one thread tries to do this at a time + glslang::GetGlobalLock(); + + // See if it's already been done for this version/profile combination + int versionIndex = MapVersionToIndex(version); + int spvVersionIndex = MapSpvVersionToIndex(spvVersion); + int profileIndex = MapProfileToIndex(profile); + int sourceIndex = MapSourceToIndex(source); + if (CommonSymbolTable[versionIndex][spvVersionIndex][profileIndex][sourceIndex][EPcGeneral]) { + glslang::ReleaseGlobalLock(); + + return; + } + + // Switch to a new pool + TPoolAllocator& previousAllocator = GetThreadPoolAllocator(); + TPoolAllocator* builtInPoolAllocator = new TPoolAllocator; + SetThreadPoolAllocator(builtInPoolAllocator); + + // Dynamically allocate the local symbol tables so we can control when they are deallocated WRT when the pool is popped. + TSymbolTable* commonTable[EPcCount]; + TSymbolTable* stageTables[EShLangCount]; + for (int precClass = 0; precClass < EPcCount; ++precClass) + commonTable[precClass] = new TSymbolTable; + for (int stage = 0; stage < EShLangCount; ++stage) + stageTables[stage] = new TSymbolTable; + + // Generate the local symbol tables using the new pool + InitializeSymbolTables(infoSink, commonTable, stageTables, version, profile, spvVersion, source); + + // Switch to the process-global pool + SetThreadPoolAllocator(PerProcessGPA); + + // Copy the local symbol tables from the new pool to the global tables using the process-global pool + for (int precClass = 0; precClass < EPcCount; ++precClass) { + if (! commonTable[precClass]->isEmpty()) { + CommonSymbolTable[versionIndex][spvVersionIndex][profileIndex][sourceIndex][precClass] = new TSymbolTable; + CommonSymbolTable[versionIndex][spvVersionIndex][profileIndex][sourceIndex][precClass]->copyTable(*commonTable[precClass]); + CommonSymbolTable[versionIndex][spvVersionIndex][profileIndex][sourceIndex][precClass]->readOnly(); + } + } + for (int stage = 0; stage < EShLangCount; ++stage) { + if (! stageTables[stage]->isEmpty()) { + SharedSymbolTables[versionIndex][spvVersionIndex][profileIndex][sourceIndex][stage] = new TSymbolTable; + SharedSymbolTables[versionIndex][spvVersionIndex][profileIndex][sourceIndex][stage]->adoptLevels(*CommonSymbolTable + [versionIndex][spvVersionIndex][profileIndex][sourceIndex][CommonIndex(profile, (EShLanguage)stage)]); + SharedSymbolTables[versionIndex][spvVersionIndex][profileIndex][sourceIndex][stage]->copyTable(*stageTables[stage]); + SharedSymbolTables[versionIndex][spvVersionIndex][profileIndex][sourceIndex][stage]->readOnly(); + } + } + + // Clean up the local tables before deleting the pool they used. + for (int precClass = 0; precClass < EPcCount; ++precClass) + delete commonTable[precClass]; + for (int stage = 0; stage < EShLangCount; ++stage) + delete stageTables[stage]; + + delete builtInPoolAllocator; + SetThreadPoolAllocator(&previousAllocator); + + glslang::ReleaseGlobalLock(); +} + +// Function to Print all builtins +void DumpBuiltinSymbolTable(TInfoSink& infoSink, const TSymbolTable& symbolTable) +{ +#ifndef GLSLANG_WEB + infoSink.debug << "BuiltinSymbolTable {\n"; + + symbolTable.dump(infoSink, true); + + infoSink.debug << "}\n"; +#endif +} + +// Return true if the shader was correctly specified for version/profile/stage. +bool DeduceVersionProfile(TInfoSink& infoSink, EShLanguage stage, bool versionNotFirst, int defaultVersion, + EShSource source, int& version, EProfile& profile, const SpvVersion& spvVersion) +{ + const int FirstProfileVersion = 150; + bool correct = true; + + if (source == EShSourceHlsl) { + version = 500; // shader model; currently a characteristic of glslang, not the input + profile = ECoreProfile; // allow doubles in prototype parsing + return correct; + } + + // Get a version... + if (version == 0) { + version = defaultVersion; + // infoSink.info.message(EPrefixWarning, "#version: statement missing; use #version on first line of shader"); + } + + // Get a good profile... + if (profile == ENoProfile) { + if (version == 300 || version == 310 || version == 320) { + correct = false; + infoSink.info.message(EPrefixError, "#version: versions 300, 310, and 320 require specifying the 'es' profile"); + profile = EEsProfile; + } else if (version == 100) + profile = EEsProfile; + else if (version >= FirstProfileVersion) + profile = ECoreProfile; + else + profile = ENoProfile; + } else { + // a profile was provided... + if (version < 150) { + correct = false; + infoSink.info.message(EPrefixError, "#version: versions before 150 do not allow a profile token"); + if (version == 100) + profile = EEsProfile; + else + profile = ENoProfile; + } else if (version == 300 || version == 310 || version == 320) { + if (profile != EEsProfile) { + correct = false; + infoSink.info.message(EPrefixError, "#version: versions 300, 310, and 320 support only the es profile"); + } + profile = EEsProfile; + } else { + if (profile == EEsProfile) { + correct = false; + infoSink.info.message(EPrefixError, "#version: only version 300, 310, and 320 support the es profile"); + if (version >= FirstProfileVersion) + profile = ECoreProfile; + else + profile = ENoProfile; + } + // else: typical desktop case... e.g., "#version 410 core" + } + } + + // Fix version... + switch (version) { + // ES versions + case 100: break; + case 300: break; + case 310: break; + case 320: break; + + // desktop versions + case 110: break; + case 120: break; + case 130: break; + case 140: break; + case 150: break; + case 330: break; + case 400: break; + case 410: break; + case 420: break; + case 430: break; + case 440: break; + case 450: break; + case 460: break; + + // unknown version + default: + correct = false; + infoSink.info.message(EPrefixError, "version not supported"); + if (profile == EEsProfile) + version = 310; + else { + version = 450; + profile = ECoreProfile; + } + break; + } + +#ifndef GLSLANG_WEB + // Correct for stage type... + switch (stage) { + case EShLangGeometry: + if ((profile == EEsProfile && version < 310) || + (profile != EEsProfile && version < 150)) { + correct = false; + infoSink.info.message(EPrefixError, "#version: geometry shaders require es profile with version 310 or non-es profile with version 150 or above"); + version = (profile == EEsProfile) ? 310 : 150; + if (profile == EEsProfile || profile == ENoProfile) + profile = ECoreProfile; + } + break; + case EShLangTessControl: + case EShLangTessEvaluation: + if ((profile == EEsProfile && version < 310) || + (profile != EEsProfile && version < 150)) { + correct = false; + infoSink.info.message(EPrefixError, "#version: tessellation shaders require es profile with version 310 or non-es profile with version 150 or above"); + version = (profile == EEsProfile) ? 310 : 400; // 150 supports the extension, correction is to 400 which does not + if (profile == EEsProfile || profile == ENoProfile) + profile = ECoreProfile; + } + break; + case EShLangCompute: + if ((profile == EEsProfile && version < 310) || + (profile != EEsProfile && version < 420)) { + correct = false; + infoSink.info.message(EPrefixError, "#version: compute shaders require es profile with version 310 or above, or non-es profile with version 420 or above"); + version = profile == EEsProfile ? 310 : 420; + } + break; + case EShLangRayGen: + case EShLangIntersect: + case EShLangAnyHit: + case EShLangClosestHit: + case EShLangMiss: + case EShLangCallable: + if (profile == EEsProfile || version < 460) { + correct = false; + infoSink.info.message(EPrefixError, "#version: ray tracing shaders require non-es profile with version 460 or above"); + version = 460; + } + break; + case EShLangMeshNV: + case EShLangTaskNV: + if ((profile == EEsProfile && version < 320) || + (profile != EEsProfile && version < 450)) { + correct = false; + infoSink.info.message(EPrefixError, "#version: mesh/task shaders require es profile with version 320 or above, or non-es profile with version 450 or above"); + version = profile == EEsProfile ? 320 : 450; + } + default: + break; + } + + if (profile == EEsProfile && version >= 300 && versionNotFirst) { + correct = false; + infoSink.info.message(EPrefixError, "#version: statement must appear first in es-profile shader; before comments or newlines"); + } + + // Check for SPIR-V compatibility + if (spvVersion.spv != 0) { + switch (profile) { + case EEsProfile: + if (version < 310) { + correct = false; + infoSink.info.message(EPrefixError, "#version: ES shaders for SPIR-V require version 310 or higher"); + version = 310; + } + break; + case ECompatibilityProfile: + infoSink.info.message(EPrefixError, "#version: compilation for SPIR-V does not support the compatibility profile"); + break; + default: + if (spvVersion.vulkan > 0 && version < 140) { + correct = false; + infoSink.info.message(EPrefixError, "#version: Desktop shaders for Vulkan SPIR-V require version 140 or higher"); + version = 140; + } + if (spvVersion.openGl >= 100 && version < 330) { + correct = false; + infoSink.info.message(EPrefixError, "#version: Desktop shaders for OpenGL SPIR-V require version 330 or higher"); + version = 330; + } + break; + } + } +#endif + + return correct; +} + +// There are multiple paths in for setting environment stuff. +// TEnvironment takes precedence, for what it sets, so sort all this out. +// Ideally, the internal code could be made to use TEnvironment, but for +// now, translate it to the historically used parameters. +void TranslateEnvironment(const TEnvironment* environment, EShMessages& messages, EShSource& source, + EShLanguage& stage, SpvVersion& spvVersion) +{ + // Set up environmental defaults, first ignoring 'environment'. + if (messages & EShMsgSpvRules) + spvVersion.spv = EShTargetSpv_1_0; + if (messages & EShMsgVulkanRules) { + spvVersion.vulkan = EShTargetVulkan_1_0; + spvVersion.vulkanGlsl = 100; + } else if (spvVersion.spv != 0) + spvVersion.openGl = 100; + + // Now, override, based on any content set in 'environment'. + // 'environment' must be cleared to ESh*None settings when items + // are not being set. + if (environment != nullptr) { + // input language + if (environment->input.languageFamily != EShSourceNone) { + stage = environment->input.stage; + switch (environment->input.dialect) { + case EShClientNone: + break; + case EShClientVulkan: + spvVersion.vulkanGlsl = environment->input.dialectVersion; + break; + case EShClientOpenGL: + spvVersion.openGl = environment->input.dialectVersion; + break; + case EShClientCount: + assert(0); + break; + } + switch (environment->input.languageFamily) { + case EShSourceNone: + break; + case EShSourceGlsl: + source = EShSourceGlsl; + messages = static_cast(messages & ~EShMsgReadHlsl); + break; + case EShSourceHlsl: + source = EShSourceHlsl; + messages = static_cast(messages | EShMsgReadHlsl); + break; + case EShSourceCount: + assert(0); + break; + } + } + + // client + switch (environment->client.client) { + case EShClientVulkan: + spvVersion.vulkan = environment->client.version; + break; + default: + break; + } + + // generated code + switch (environment->target.language) { + case EshTargetSpv: + spvVersion.spv = environment->target.version; + break; + default: + break; + } + } +} + +// Most processes are recorded when set in the intermediate representation, +// These are the few that are not. +void RecordProcesses(TIntermediate& intermediate, EShMessages messages, const std::string& sourceEntryPointName) +{ + if ((messages & EShMsgRelaxedErrors) != 0) + intermediate.addProcess("relaxed-errors"); + if ((messages & EShMsgSuppressWarnings) != 0) + intermediate.addProcess("suppress-warnings"); + if ((messages & EShMsgKeepUncalled) != 0) + intermediate.addProcess("keep-uncalled"); + if (sourceEntryPointName.size() > 0) { + intermediate.addProcess("source-entrypoint"); + intermediate.addProcessArgument(sourceEntryPointName); + } +} + +// This is the common setup and cleanup code for PreprocessDeferred and +// CompileDeferred. +// It takes any callable with a signature of +// bool (TParseContextBase& parseContext, TPpContext& ppContext, +// TInputScanner& input, bool versionWillBeError, +// TSymbolTable& , TIntermediate& , +// EShOptimizationLevel , EShMessages ); +// Which returns false if a failure was detected and true otherwise. +// +template +bool ProcessDeferred( + TCompiler* compiler, + const char* const shaderStrings[], + const int numStrings, + const int* inputLengths, + const char* const stringNames[], + const char* customPreamble, + const EShOptimizationLevel optLevel, + const TBuiltInResource* resources, + int defaultVersion, // use 100 for ES environment, 110 for desktop; this is the GLSL version, not SPIR-V or Vulkan + EProfile defaultProfile, + // set version/profile to defaultVersion/defaultProfile regardless of the #version + // directive in the source code + bool forceDefaultVersionAndProfile, + bool forwardCompatible, // give errors for use of deprecated features + EShMessages messages, // warnings/errors/AST; things to print out + TIntermediate& intermediate, // returned tree, etc. + ProcessingContext& processingContext, + bool requireNonempty, + TShader::Includer& includer, + const std::string sourceEntryPointName = "", + const TEnvironment* environment = nullptr) // optional way of fully setting all versions, overriding the above +{ + // This must be undone (.pop()) by the caller, after it finishes consuming the created tree. + GetThreadPoolAllocator().push(); + + if (numStrings == 0) + return true; + + // Move to length-based strings, rather than null-terminated strings. + // Also, add strings to include the preamble and to ensure the shader is not null, + // which lets the grammar accept what was a null (post preprocessing) shader. + // + // Shader will look like + // string 0: system preamble + // string 1: custom preamble + // string 2...numStrings+1: user's shader + // string numStrings+2: "int;" + const int numPre = 2; + const int numPost = requireNonempty? 1 : 0; + const int numTotal = numPre + numStrings + numPost; + std::unique_ptr lengths(new size_t[numTotal]); + std::unique_ptr strings(new const char*[numTotal]); + std::unique_ptr names(new const char*[numTotal]); + for (int s = 0; s < numStrings; ++s) { + strings[s + numPre] = shaderStrings[s]; + if (inputLengths == nullptr || inputLengths[s] < 0) + lengths[s + numPre] = strlen(shaderStrings[s]); + else + lengths[s + numPre] = inputLengths[s]; + } + if (stringNames != nullptr) { + for (int s = 0; s < numStrings; ++s) + names[s + numPre] = stringNames[s]; + } else { + for (int s = 0; s < numStrings; ++s) + names[s + numPre] = nullptr; + } + + // Get all the stages, languages, clients, and other environment + // stuff sorted out. + EShSource sourceGuess = (messages & EShMsgReadHlsl) != 0 ? EShSourceHlsl : EShSourceGlsl; + SpvVersion spvVersion; + EShLanguage stage = compiler->getLanguage(); + TranslateEnvironment(environment, messages, sourceGuess, stage, spvVersion); +#ifdef ENABLE_HLSL + EShSource source = sourceGuess; + if (environment != nullptr && environment->target.hlslFunctionality1) + intermediate.setHlslFunctionality1(); +#else + const EShSource source = EShSourceGlsl; +#endif + // First, without using the preprocessor or parser, find the #version, so we know what + // symbol tables, processing rules, etc. to set up. This does not need the extra strings + // outlined above, just the user shader, after the system and user preambles. + glslang::TInputScanner userInput(numStrings, &strings[numPre], &lengths[numPre]); + int version = 0; + EProfile profile = ENoProfile; + bool versionNotFirstToken = false; + bool versionNotFirst = (source == EShSourceHlsl) + ? true + : userInput.scanVersion(version, profile, versionNotFirstToken); + bool versionNotFound = version == 0; + if (forceDefaultVersionAndProfile && source == EShSourceGlsl) { +#ifndef GLSLANG_WEB + if (! (messages & EShMsgSuppressWarnings) && ! versionNotFound && + (version != defaultVersion || profile != defaultProfile)) { + compiler->infoSink.info << "Warning, (version, profile) forced to be (" + << defaultVersion << ", " << ProfileName(defaultProfile) + << "), while in source code it is (" + << version << ", " << ProfileName(profile) << ")\n"; + } +#endif + if (versionNotFound) { + versionNotFirstToken = false; + versionNotFirst = false; + versionNotFound = false; + } + version = defaultVersion; + profile = defaultProfile; + } + + bool goodVersion = DeduceVersionProfile(compiler->infoSink, stage, + versionNotFirst, defaultVersion, source, version, profile, spvVersion); +#ifdef GLSLANG_WEB + profile = EEsProfile; + version = 310; +#endif + + bool versionWillBeError = (versionNotFound || (profile == EEsProfile && version >= 300 && versionNotFirst)); +#ifndef GLSLANG_WEB + bool warnVersionNotFirst = false; + if (! versionWillBeError && versionNotFirstToken) { + if (messages & EShMsgRelaxedErrors) + warnVersionNotFirst = true; + else + versionWillBeError = true; + } +#endif + + intermediate.setSource(source); + intermediate.setVersion(version); + intermediate.setProfile(profile); + intermediate.setSpv(spvVersion); + RecordProcesses(intermediate, messages, sourceEntryPointName); + if (spvVersion.vulkan > 0) + intermediate.setOriginUpperLeft(); +#ifdef ENABLE_HLSL + if ((messages & EShMsgHlslOffsets) || source == EShSourceHlsl) + intermediate.setHlslOffsets(); +#endif + if (messages & EShMsgDebugInfo) { + intermediate.setSourceFile(names[numPre]); + for (int s = 0; s < numStrings; ++s) { + // The string may not be null-terminated, so make sure we provide + // the length along with the string. + intermediate.addSourceText(strings[numPre + s], lengths[numPre + s]); + } + } + SetupBuiltinSymbolTable(version, profile, spvVersion, source); + + TSymbolTable* cachedTable = SharedSymbolTables[MapVersionToIndex(version)] + [MapSpvVersionToIndex(spvVersion)] + [MapProfileToIndex(profile)] + [MapSourceToIndex(source)] + [stage]; + + // Dynamically allocate the symbol table so we can control when it is deallocated WRT the pool. + std::unique_ptr symbolTable(new TSymbolTable); + if (cachedTable) + symbolTable->adoptLevels(*cachedTable); + + // Add built-in symbols that are potentially context dependent; + // they get popped again further down. + if (! AddContextSpecificSymbols(resources, compiler->infoSink, *symbolTable, version, profile, spvVersion, + stage, source)) { + return false; + } + + if (messages & EShMsgBuiltinSymbolTable) + DumpBuiltinSymbolTable(compiler->infoSink, *symbolTable); + + // + // Now we can process the full shader under proper symbols and rules. + // + + std::unique_ptr parseContext(CreateParseContext(*symbolTable, intermediate, version, profile, source, + stage, compiler->infoSink, + spvVersion, forwardCompatible, messages, false, sourceEntryPointName)); + TPpContext ppContext(*parseContext, names[numPre] ? names[numPre] : "", includer); + + // only GLSL (bison triggered, really) needs an externally set scan context + glslang::TScanContext scanContext(*parseContext); + if (source == EShSourceGlsl) + parseContext->setScanContext(&scanContext); + + parseContext->setPpContext(&ppContext); + parseContext->setLimits(*resources); + if (! goodVersion) + parseContext->addError(); +#ifndef GLSLANG_WEB + if (warnVersionNotFirst) { + TSourceLoc loc; + loc.init(); + parseContext->warn(loc, "Illegal to have non-comment, non-whitespace tokens before #version", "#version", ""); + } +#endif + + parseContext->initializeExtensionBehavior(); + + // Fill in the strings as outlined above. + std::string preamble; + parseContext->getPreamble(preamble); + strings[0] = preamble.c_str(); + lengths[0] = strlen(strings[0]); + names[0] = nullptr; + strings[1] = customPreamble; + lengths[1] = strlen(strings[1]); + names[1] = nullptr; + assert(2 == numPre); + if (requireNonempty) { + const int postIndex = numStrings + numPre; + strings[postIndex] = "\n int;"; + lengths[postIndex] = strlen(strings[numStrings + numPre]); + names[postIndex] = nullptr; + } + TInputScanner fullInput(numStrings + numPre + numPost, strings.get(), lengths.get(), names.get(), numPre, numPost); + + // Push a new symbol allocation scope that will get used for the shader's globals. + symbolTable->push(); + + bool success = processingContext(*parseContext, ppContext, fullInput, + versionWillBeError, *symbolTable, + intermediate, optLevel, messages); + return success; +} + +#ifndef GLSLANG_WEB + +// Responsible for keeping track of the most recent source string and line in +// the preprocessor and outputting newlines appropriately if the source string +// or line changes. +class SourceLineSynchronizer { +public: + SourceLineSynchronizer(const std::function& lastSourceIndex, + std::string* output) + : getLastSourceIndex(lastSourceIndex), output(output), lastSource(-1), lastLine(0) {} +// SourceLineSynchronizer(const SourceLineSynchronizer&) = delete; +// SourceLineSynchronizer& operator=(const SourceLineSynchronizer&) = delete; + + // Sets the internally tracked source string index to that of the most + // recently read token. If we switched to a new source string, returns + // true and inserts a newline. Otherwise, returns false and outputs nothing. + bool syncToMostRecentString() { + if (getLastSourceIndex() != lastSource) { + // After switching to a new source string, we need to reset lastLine + // because line number resets every time a new source string is + // used. We also need to output a newline to separate the output + // from the previous source string (if there is one). + if (lastSource != -1 || lastLine != 0) + *output += '\n'; + lastSource = getLastSourceIndex(); + lastLine = -1; + return true; + } + return false; + } + + // Calls syncToMostRecentString() and then sets the internally tracked line + // number to tokenLine. If we switched to a new line, returns true and inserts + // newlines appropriately. Otherwise, returns false and outputs nothing. + bool syncToLine(int tokenLine) { + syncToMostRecentString(); + const bool newLineStarted = lastLine < tokenLine; + for (; lastLine < tokenLine; ++lastLine) { + if (lastLine > 0) *output += '\n'; + } + return newLineStarted; + } + + // Sets the internally tracked line number to newLineNum. + void setLineNum(int newLineNum) { lastLine = newLineNum; } + +private: + SourceLineSynchronizer& operator=(const SourceLineSynchronizer&); + + // A function for getting the index of the last valid source string we've + // read tokens from. + const std::function getLastSourceIndex; + // output string for newlines. + std::string* output; + // lastSource is the source string index (starting from 0) of the last token + // processed. It is tracked in order for newlines to be inserted when a new + // source string starts. -1 means we haven't started processing any source + // string. + int lastSource; + // lastLine is the line number (starting from 1) of the last token processed. + // It is tracked in order for newlines to be inserted when a token appears + // on a new line. 0 means we haven't started processing any line in the + // current source string. + int lastLine; +}; + +// DoPreprocessing is a valid ProcessingContext template argument, +// which only performs the preprocessing step of compilation. +// It places the result in the "string" argument to its constructor. +// +// This is not an officially supported or fully working path. +struct DoPreprocessing { + explicit DoPreprocessing(std::string* string): outputString(string) {} + bool operator()(TParseContextBase& parseContext, TPpContext& ppContext, + TInputScanner& input, bool versionWillBeError, + TSymbolTable&, TIntermediate&, + EShOptimizationLevel, EShMessages) + { + // This is a list of tokens that do not require a space before or after. + static const std::string unNeededSpaceTokens = ";()[]"; + static const std::string noSpaceBeforeTokens = ","; + glslang::TPpToken ppToken; + + parseContext.setScanner(&input); + ppContext.setInput(input, versionWillBeError); + + std::string outputBuffer; + SourceLineSynchronizer lineSync( + std::bind(&TInputScanner::getLastValidSourceIndex, &input), &outputBuffer); + + parseContext.setExtensionCallback([&lineSync, &outputBuffer]( + int line, const char* extension, const char* behavior) { + lineSync.syncToLine(line); + outputBuffer += "#extension "; + outputBuffer += extension; + outputBuffer += " : "; + outputBuffer += behavior; + }); + + parseContext.setLineCallback([&lineSync, &outputBuffer, &parseContext]( + int curLineNum, int newLineNum, bool hasSource, int sourceNum, const char* sourceName) { + // SourceNum is the number of the source-string that is being parsed. + lineSync.syncToLine(curLineNum); + outputBuffer += "#line "; + outputBuffer += std::to_string(newLineNum); + if (hasSource) { + outputBuffer += ' '; + if (sourceName != nullptr) { + outputBuffer += '\"'; + outputBuffer += sourceName; + outputBuffer += '\"'; + } else { + outputBuffer += std::to_string(sourceNum); + } + } + if (parseContext.lineDirectiveShouldSetNextLine()) { + // newLineNum is the new line number for the line following the #line + // directive. So the new line number for the current line is + newLineNum -= 1; + } + outputBuffer += '\n'; + // And we are at the next line of the #line directive now. + lineSync.setLineNum(newLineNum + 1); + }); + + parseContext.setVersionCallback( + [&lineSync, &outputBuffer](int line, int version, const char* str) { + lineSync.syncToLine(line); + outputBuffer += "#version "; + outputBuffer += std::to_string(version); + if (str) { + outputBuffer += ' '; + outputBuffer += str; + } + }); + + parseContext.setPragmaCallback([&lineSync, &outputBuffer]( + int line, const glslang::TVector& ops) { + lineSync.syncToLine(line); + outputBuffer += "#pragma "; + for(size_t i = 0; i < ops.size(); ++i) { + outputBuffer += ops[i].c_str(); + } + }); + + parseContext.setErrorCallback([&lineSync, &outputBuffer]( + int line, const char* errorMessage) { + lineSync.syncToLine(line); + outputBuffer += "#error "; + outputBuffer += errorMessage; + }); + + int lastToken = EndOfInput; // lastToken records the last token processed. + do { + int token = ppContext.tokenize(ppToken); + if (token == EndOfInput) + break; + + bool isNewString = lineSync.syncToMostRecentString(); + bool isNewLine = lineSync.syncToLine(ppToken.loc.line); + + if (isNewLine) { + // Don't emit whitespace onto empty lines. + // Copy any whitespace characters at the start of a line + // from the input to the output. + outputBuffer += std::string(ppToken.loc.column - 1, ' '); + } + + // Output a space in between tokens, but not at the start of a line, + // and also not around special tokens. This helps with readability + // and consistency. + if (!isNewString && !isNewLine && lastToken != EndOfInput && + (unNeededSpaceTokens.find((char)token) == std::string::npos) && + (unNeededSpaceTokens.find((char)lastToken) == std::string::npos) && + (noSpaceBeforeTokens.find((char)token) == std::string::npos)) { + outputBuffer += ' '; + } + lastToken = token; + if (token == PpAtomConstString) + outputBuffer += "\""; + outputBuffer += ppToken.name; + if (token == PpAtomConstString) + outputBuffer += "\""; + } while (true); + outputBuffer += '\n'; + *outputString = std::move(outputBuffer); + + bool success = true; + if (parseContext.getNumErrors() > 0) { + success = false; + parseContext.infoSink.info.prefix(EPrefixError); + parseContext.infoSink.info << parseContext.getNumErrors() << " compilation errors. No code generated.\n\n"; + } + return success; + } + std::string* outputString; +}; + +#endif + +// DoFullParse is a valid ProcessingConext template argument for fully +// parsing the shader. It populates the "intermediate" with the AST. +struct DoFullParse{ + bool operator()(TParseContextBase& parseContext, TPpContext& ppContext, + TInputScanner& fullInput, bool versionWillBeError, + TSymbolTable&, TIntermediate& intermediate, + EShOptimizationLevel optLevel, EShMessages messages) + { + bool success = true; + // Parse the full shader. + if (! parseContext.parseShaderStrings(ppContext, fullInput, versionWillBeError)) + success = false; + + if (success && intermediate.getTreeRoot()) { + if (optLevel == EShOptNoGeneration) + parseContext.infoSink.info.message(EPrefixNone, "No errors. No code generation or linking was requested."); + else + success = intermediate.postProcess(intermediate.getTreeRoot(), parseContext.getLanguage()); + } else if (! success) { + parseContext.infoSink.info.prefix(EPrefixError); + parseContext.infoSink.info << parseContext.getNumErrors() << " compilation errors. No code generated.\n\n"; + } + + if (messages & EShMsgAST) + intermediate.output(parseContext.infoSink, true); + + return success; + } +}; + +#ifndef GLSLANG_WEB +// Take a single compilation unit, and run the preprocessor on it. +// Return: True if there were no issues found in preprocessing, +// False if during preprocessing any unknown version, pragmas or +// extensions were found. +// +// NOTE: Doing just preprocessing to obtain a correct preprocessed shader string +// is not an officially supported or fully working path. +bool PreprocessDeferred( + TCompiler* compiler, + const char* const shaderStrings[], + const int numStrings, + const int* inputLengths, + const char* const stringNames[], + const char* preamble, + const EShOptimizationLevel optLevel, + const TBuiltInResource* resources, + int defaultVersion, // use 100 for ES environment, 110 for desktop + EProfile defaultProfile, + bool forceDefaultVersionAndProfile, + bool forwardCompatible, // give errors for use of deprecated features + EShMessages messages, // warnings/errors/AST; things to print out + TShader::Includer& includer, + TIntermediate& intermediate, // returned tree, etc. + std::string* outputString) +{ + DoPreprocessing parser(outputString); + return ProcessDeferred(compiler, shaderStrings, numStrings, inputLengths, stringNames, + preamble, optLevel, resources, defaultVersion, + defaultProfile, forceDefaultVersionAndProfile, + forwardCompatible, messages, intermediate, parser, + false, includer); +} +#endif + +// +// do a partial compile on the given strings for a single compilation unit +// for a potential deferred link into a single stage (and deferred full compile of that +// stage through machine-dependent compilation). +// +// all preprocessing, parsing, semantic checks, etc. for a single compilation unit +// are done here. +// +// return: the tree and other information is filled into the intermediate argument, +// and true is returned by the function for success. +// +bool CompileDeferred( + TCompiler* compiler, + const char* const shaderStrings[], + const int numStrings, + const int* inputLengths, + const char* const stringNames[], + const char* preamble, + const EShOptimizationLevel optLevel, + const TBuiltInResource* resources, + int defaultVersion, // use 100 for ES environment, 110 for desktop + EProfile defaultProfile, + bool forceDefaultVersionAndProfile, + bool forwardCompatible, // give errors for use of deprecated features + EShMessages messages, // warnings/errors/AST; things to print out + TIntermediate& intermediate,// returned tree, etc. + TShader::Includer& includer, + const std::string sourceEntryPointName = "", + TEnvironment* environment = nullptr) +{ + DoFullParse parser; + return ProcessDeferred(compiler, shaderStrings, numStrings, inputLengths, stringNames, + preamble, optLevel, resources, defaultVersion, + defaultProfile, forceDefaultVersionAndProfile, + forwardCompatible, messages, intermediate, parser, + true, includer, sourceEntryPointName, environment); +} + +} // end anonymous namespace for local functions + +// +// ShInitialize() should be called exactly once per process, not per thread. +// +int ShInitialize() +{ + glslang::InitGlobalLock(); + + if (! InitProcess()) + return 0; + + glslang::GetGlobalLock(); + ++NumberOfClients; + glslang::ReleaseGlobalLock(); + + if (PerProcessGPA == nullptr) + PerProcessGPA = new TPoolAllocator(); + + glslang::TScanContext::fillInKeywordMap(); +#ifdef ENABLE_HLSL + glslang::HlslScanContext::fillInKeywordMap(); +#endif + + return 1; +} + +// +// Driver calls these to create and destroy compiler/linker +// objects. +// + +ShHandle ShConstructCompiler(const EShLanguage language, int debugOptions) +{ + if (!InitThread()) + return 0; + + TShHandleBase* base = static_cast(ConstructCompiler(language, debugOptions)); + + return reinterpret_cast(base); +} + +ShHandle ShConstructLinker(const EShExecutable executable, int debugOptions) +{ + if (!InitThread()) + return 0; + + TShHandleBase* base = static_cast(ConstructLinker(executable, debugOptions)); + + return reinterpret_cast(base); +} + +ShHandle ShConstructUniformMap() +{ + if (!InitThread()) + return 0; + + TShHandleBase* base = static_cast(ConstructUniformMap()); + + return reinterpret_cast(base); +} + +void ShDestruct(ShHandle handle) +{ + if (handle == 0) + return; + + TShHandleBase* base = static_cast(handle); + + if (base->getAsCompiler()) + DeleteCompiler(base->getAsCompiler()); + else if (base->getAsLinker()) + DeleteLinker(base->getAsLinker()); + else if (base->getAsUniformMap()) + DeleteUniformMap(base->getAsUniformMap()); +} + +// +// Cleanup symbol tables +// +int ShFinalize() +{ + glslang::GetGlobalLock(); + --NumberOfClients; + assert(NumberOfClients >= 0); + bool finalize = NumberOfClients == 0; + glslang::ReleaseGlobalLock(); + if (! finalize) + return 1; + + for (int version = 0; version < VersionCount; ++version) { + for (int spvVersion = 0; spvVersion < SpvVersionCount; ++spvVersion) { + for (int p = 0; p < ProfileCount; ++p) { + for (int source = 0; source < SourceCount; ++source) { + for (int stage = 0; stage < EShLangCount; ++stage) { + delete SharedSymbolTables[version][spvVersion][p][source][stage]; + SharedSymbolTables[version][spvVersion][p][source][stage] = 0; + } + } + } + } + } + + for (int version = 0; version < VersionCount; ++version) { + for (int spvVersion = 0; spvVersion < SpvVersionCount; ++spvVersion) { + for (int p = 0; p < ProfileCount; ++p) { + for (int source = 0; source < SourceCount; ++source) { + for (int pc = 0; pc < EPcCount; ++pc) { + delete CommonSymbolTable[version][spvVersion][p][source][pc]; + CommonSymbolTable[version][spvVersion][p][source][pc] = 0; + } + } + } + } + } + + if (PerProcessGPA != nullptr) { + delete PerProcessGPA; + PerProcessGPA = nullptr; + } + + glslang::TScanContext::deleteKeywordMap(); +#ifdef ENABLE_HLSL + glslang::HlslScanContext::deleteKeywordMap(); +#endif + + return 1; +} + +// +// Do a full compile on the given strings for a single compilation unit +// forming a complete stage. The result of the machine dependent compilation +// is left in the provided compile object. +// +// Return: The return value is really boolean, indicating +// success (1) or failure (0). +// +int ShCompile( + const ShHandle handle, + const char* const shaderStrings[], + const int numStrings, + const int* inputLengths, + const EShOptimizationLevel optLevel, + const TBuiltInResource* resources, + int /*debugOptions*/, + int defaultVersion, // use 100 for ES environment, 110 for desktop + bool forwardCompatible, // give errors for use of deprecated features + EShMessages messages // warnings/errors/AST; things to print out + ) +{ + // Map the generic handle to the C++ object + if (handle == 0) + return 0; + + TShHandleBase* base = reinterpret_cast(handle); + TCompiler* compiler = base->getAsCompiler(); + if (compiler == 0) + return 0; + + SetThreadPoolAllocator(compiler->getPool()); + + compiler->infoSink.info.erase(); + compiler->infoSink.debug.erase(); + + TIntermediate intermediate(compiler->getLanguage()); + TShader::ForbidIncluder includer; + bool success = CompileDeferred(compiler, shaderStrings, numStrings, inputLengths, nullptr, + "", optLevel, resources, defaultVersion, ENoProfile, false, + forwardCompatible, messages, intermediate, includer); + + // + // Call the machine dependent compiler + // + if (success && intermediate.getTreeRoot() && optLevel != EShOptNoGeneration) + success = compiler->compile(intermediate.getTreeRoot(), intermediate.getVersion(), intermediate.getProfile()); + + intermediate.removeTree(); + + // Throw away all the temporary memory used by the compilation process. + // The push was done in the CompileDeferred() call above. + GetThreadPoolAllocator().pop(); + + return success ? 1 : 0; +} + +// +// Link the given compile objects. +// +// Return: The return value of is really boolean, indicating +// success or failure. +// +int ShLinkExt( + const ShHandle linkHandle, + const ShHandle compHandles[], + const int numHandles) +{ + if (linkHandle == 0 || numHandles == 0) + return 0; + + THandleList cObjects; + + for (int i = 0; i < numHandles; ++i) { + if (compHandles[i] == 0) + return 0; + TShHandleBase* base = reinterpret_cast(compHandles[i]); + if (base->getAsLinker()) { + cObjects.push_back(base->getAsLinker()); + } + if (base->getAsCompiler()) + cObjects.push_back(base->getAsCompiler()); + + if (cObjects[i] == 0) + return 0; + } + + TShHandleBase* base = reinterpret_cast(linkHandle); + TLinker* linker = static_cast(base->getAsLinker()); + + SetThreadPoolAllocator(linker->getPool()); + + if (linker == 0) + return 0; + + linker->infoSink.info.erase(); + + for (int i = 0; i < numHandles; ++i) { + if (cObjects[i]->getAsCompiler()) { + if (! cObjects[i]->getAsCompiler()->linkable()) { + linker->infoSink.info.message(EPrefixError, "Not all shaders have valid object code."); + return 0; + } + } + } + + bool ret = linker->link(cObjects); + + return ret ? 1 : 0; +} + +// +// ShSetEncrpytionMethod is a place-holder for specifying +// how source code is encrypted. +// +void ShSetEncryptionMethod(ShHandle handle) +{ + if (handle == 0) + return; +} + +// +// Return any compiler/linker/uniformmap log of messages for the application. +// +const char* ShGetInfoLog(const ShHandle handle) +{ + if (handle == 0) + return 0; + + TShHandleBase* base = static_cast(handle); + TInfoSink* infoSink; + + if (base->getAsCompiler()) + infoSink = &(base->getAsCompiler()->getInfoSink()); + else if (base->getAsLinker()) + infoSink = &(base->getAsLinker()->getInfoSink()); + else + return 0; + + infoSink->info << infoSink->debug.c_str(); + return infoSink->info.c_str(); +} + +// +// Return the resulting binary code from the link process. Structure +// is machine dependent. +// +const void* ShGetExecutable(const ShHandle handle) +{ + if (handle == 0) + return 0; + + TShHandleBase* base = reinterpret_cast(handle); + + TLinker* linker = static_cast(base->getAsLinker()); + if (linker == 0) + return 0; + + return linker->getObjectCode(); +} + +// +// Let the linker know where the application said it's attributes are bound. +// The linker does not use these values, they are remapped by the ICD or +// hardware. It just needs them to know what's aliased. +// +// Return: The return value of is really boolean, indicating +// success or failure. +// +int ShSetVirtualAttributeBindings(const ShHandle handle, const ShBindingTable* table) +{ + if (handle == 0) + return 0; + + TShHandleBase* base = reinterpret_cast(handle); + TLinker* linker = static_cast(base->getAsLinker()); + + if (linker == 0) + return 0; + + linker->setAppAttributeBindings(table); + + return 1; +} + +// +// Let the linker know where the predefined attributes have to live. +// +int ShSetFixedAttributeBindings(const ShHandle handle, const ShBindingTable* table) +{ + if (handle == 0) + return 0; + + TShHandleBase* base = reinterpret_cast(handle); + TLinker* linker = static_cast(base->getAsLinker()); + + if (linker == 0) + return 0; + + linker->setFixedAttributeBindings(table); + return 1; +} + +// +// Some attribute locations are off-limits to the linker... +// +int ShExcludeAttributes(const ShHandle handle, int *attributes, int count) +{ + if (handle == 0) + return 0; + + TShHandleBase* base = reinterpret_cast(handle); + TLinker* linker = static_cast(base->getAsLinker()); + if (linker == 0) + return 0; + + linker->setExcludedAttributes(attributes, count); + + return 1; +} + +// +// Return the index for OpenGL to use for knowing where a uniform lives. +// +// Return: The return value of is really boolean, indicating +// success or failure. +// +int ShGetUniformLocation(const ShHandle handle, const char* name) +{ + if (handle == 0) + return -1; + + TShHandleBase* base = reinterpret_cast(handle); + TUniformMap* uniformMap= base->getAsUniformMap(); + if (uniformMap == 0) + return -1; + + return uniformMap->getLocation(name); +} + +//////////////////////////////////////////////////////////////////////////////////////////// +// +// Deferred-Lowering C++ Interface +// ----------------------------------- +// +// Below is a new alternate C++ interface that might potentially replace the above +// opaque handle-based interface. +// +// See more detailed comment in ShaderLang.h +// + +namespace glslang { + +#include "../Include/revision.h" + +#define QUOTE(s) #s +#define STR(n) QUOTE(n) + +const char* GetEsslVersionString() +{ + return "OpenGL ES GLSL 3.20 glslang Khronos. " STR(GLSLANG_MINOR_VERSION) "." STR(GLSLANG_PATCH_LEVEL); +} + +const char* GetGlslVersionString() +{ + return "4.60 glslang Khronos. " STR(GLSLANG_MINOR_VERSION) "." STR(GLSLANG_PATCH_LEVEL); +} + +int GetKhronosToolId() +{ + return 8; +} + +bool InitializeProcess() +{ + return ShInitialize() != 0; +} + +void FinalizeProcess() +{ + ShFinalize(); +} + +class TDeferredCompiler : public TCompiler { +public: + TDeferredCompiler(EShLanguage s, TInfoSink& i) : TCompiler(s, i) { } + virtual bool compile(TIntermNode*, int = 0, EProfile = ENoProfile) { return true; } +}; + +TShader::TShader(EShLanguage s) + : stage(s), lengths(nullptr), stringNames(nullptr), preamble("") +{ + pool = new TPoolAllocator; + infoSink = new TInfoSink; + compiler = new TDeferredCompiler(stage, *infoSink); + intermediate = new TIntermediate(s); + + // clear environment (avoid constructors in them for use in a C interface) + environment.input.languageFamily = EShSourceNone; + environment.input.dialect = EShClientNone; + environment.client.client = EShClientNone; + environment.target.language = EShTargetNone; + environment.target.hlslFunctionality1 = false; +} + +TShader::~TShader() +{ + delete infoSink; + delete compiler; + delete intermediate; + delete pool; +} + +void TShader::setStrings(const char* const* s, int n) +{ + strings = s; + numStrings = n; + lengths = nullptr; +} + +void TShader::setStringsWithLengths(const char* const* s, const int* l, int n) +{ + strings = s; + numStrings = n; + lengths = l; +} + +void TShader::setStringsWithLengthsAndNames( + const char* const* s, const int* l, const char* const* names, int n) +{ + strings = s; + numStrings = n; + lengths = l; + stringNames = names; +} + +void TShader::setEntryPoint(const char* entryPoint) +{ + intermediate->setEntryPointName(entryPoint); +} + +void TShader::setSourceEntryPoint(const char* name) +{ + sourceEntryPointName = name; +} + +// Log initial settings and transforms. +// See comment for class TProcesses. +void TShader::addProcesses(const std::vector& p) +{ + intermediate->addProcesses(p); +} + +void TShader::setInvertY(bool invert) { intermediate->setInvertY(invert); } +void TShader::setNanMinMaxClamp(bool useNonNan) { intermediate->setNanMinMaxClamp(useNonNan); } + +#ifndef GLSLANG_WEB + +// Set binding base for given resource type +void TShader::setShiftBinding(TResourceType res, unsigned int base) { + intermediate->setShiftBinding(res, base); +} + +// Set binding base for given resource type for a given binding set. +void TShader::setShiftBindingForSet(TResourceType res, unsigned int base, unsigned int set) { + intermediate->setShiftBindingForSet(res, base, set); +} + +// Set binding base for sampler types +void TShader::setShiftSamplerBinding(unsigned int base) { setShiftBinding(EResSampler, base); } +// Set binding base for texture types (SRV) +void TShader::setShiftTextureBinding(unsigned int base) { setShiftBinding(EResTexture, base); } +// Set binding base for image types +void TShader::setShiftImageBinding(unsigned int base) { setShiftBinding(EResImage, base); } +// Set binding base for uniform buffer objects (CBV) +void TShader::setShiftUboBinding(unsigned int base) { setShiftBinding(EResUbo, base); } +// Synonym for setShiftUboBinding, to match HLSL language. +void TShader::setShiftCbufferBinding(unsigned int base) { setShiftBinding(EResUbo, base); } +// Set binding base for UAV (unordered access view) +void TShader::setShiftUavBinding(unsigned int base) { setShiftBinding(EResUav, base); } +// Set binding base for SSBOs +void TShader::setShiftSsboBinding(unsigned int base) { setShiftBinding(EResSsbo, base); } +// Enables binding automapping using TIoMapper +void TShader::setAutoMapBindings(bool map) { intermediate->setAutoMapBindings(map); } +// Enables position.Y output negation in vertex shader + +// Fragile: currently within one stage: simple auto-assignment of location +void TShader::setAutoMapLocations(bool map) { intermediate->setAutoMapLocations(map); } +void TShader::addUniformLocationOverride(const char* name, int loc) +{ + intermediate->addUniformLocationOverride(name, loc); +} +void TShader::setUniformLocationBase(int base) +{ + intermediate->setUniformLocationBase(base); +} +void TShader::setNoStorageFormat(bool useUnknownFormat) { intermediate->setNoStorageFormat(useUnknownFormat); } +void TShader::setResourceSetBinding(const std::vector& base) { intermediate->setResourceSetBinding(base); } +void TShader::setTextureSamplerTransformMode(EShTextureSamplerTransformMode mode) { intermediate->setTextureSamplerTransformMode(mode); } +#endif + +#ifdef ENABLE_HLSL +// See comment above TDefaultHlslIoMapper in iomapper.cpp: +void TShader::setHlslIoMapping(bool hlslIoMap) { intermediate->setHlslIoMapping(hlslIoMap); } +void TShader::setFlattenUniformArrays(bool flatten) { intermediate->setFlattenUniformArrays(flatten); } +#endif + +// +// Turn the shader strings into a parse tree in the TIntermediate. +// +// Returns true for success. +// +bool TShader::parse(const TBuiltInResource* builtInResources, int defaultVersion, EProfile defaultProfile, bool forceDefaultVersionAndProfile, + bool forwardCompatible, EShMessages messages, Includer& includer) +{ + if (! InitThread()) + return false; + SetThreadPoolAllocator(pool); + + if (! preamble) + preamble = ""; + + return CompileDeferred(compiler, strings, numStrings, lengths, stringNames, + preamble, EShOptNone, builtInResources, defaultVersion, + defaultProfile, forceDefaultVersionAndProfile, + forwardCompatible, messages, *intermediate, includer, sourceEntryPointName, + &environment); +} + +#ifndef GLSLANG_WEB +// Fill in a string with the result of preprocessing ShaderStrings +// Returns true if all extensions, pragmas and version strings were valid. +// +// NOTE: Doing just preprocessing to obtain a correct preprocessed shader string +// is not an officially supported or fully working path. +bool TShader::preprocess(const TBuiltInResource* builtInResources, + int defaultVersion, EProfile defaultProfile, + bool forceDefaultVersionAndProfile, + bool forwardCompatible, EShMessages message, + std::string* output_string, + Includer& includer) +{ + if (! InitThread()) + return false; + SetThreadPoolAllocator(pool); + + if (! preamble) + preamble = ""; + + return PreprocessDeferred(compiler, strings, numStrings, lengths, stringNames, preamble, + EShOptNone, builtInResources, defaultVersion, + defaultProfile, forceDefaultVersionAndProfile, + forwardCompatible, message, includer, *intermediate, output_string); +} +#endif + +const char* TShader::getInfoLog() +{ + return infoSink->info.c_str(); +} + +const char* TShader::getInfoDebugLog() +{ + return infoSink->debug.c_str(); +} + +TProgram::TProgram() : +#ifndef GLSLANG_WEB + reflection(0), +#endif + linked(false) +{ + pool = new TPoolAllocator; + infoSink = new TInfoSink; + for (int s = 0; s < EShLangCount; ++s) { + intermediate[s] = 0; + newedIntermediate[s] = false; + } +} + +TProgram::~TProgram() +{ + delete infoSink; +#ifndef GLSLANG_WEB + delete reflection; +#endif + + for (int s = 0; s < EShLangCount; ++s) + if (newedIntermediate[s]) + delete intermediate[s]; + + delete pool; +} + +// +// Merge the compilation units within each stage into a single TIntermediate. +// All starting compilation units need to be the result of calling TShader::parse(). +// +// Return true for success. +// +bool TProgram::link(EShMessages messages) +{ + if (linked) + return false; + linked = true; + + bool error = false; + + SetThreadPoolAllocator(pool); + + for (int s = 0; s < EShLangCount; ++s) { + if (! linkStage((EShLanguage)s, messages)) + error = true; + } + + // TODO: Link: cross-stage error checking + + return ! error; +} + +// +// Merge the compilation units within the given stage into a single TIntermediate. +// +// Return true for success. +// +bool TProgram::linkStage(EShLanguage stage, EShMessages messages) +{ + if (stages[stage].size() == 0) + return true; + +#ifndef GLSLANG_WEB + int numEsShaders = 0, numNonEsShaders = 0; + for (auto it = stages[stage].begin(); it != stages[stage].end(); ++it) { + if ((*it)->intermediate->getProfile() == EEsProfile) { + numEsShaders++; + } else { + numNonEsShaders++; + } + } + + if (numEsShaders > 0 && numNonEsShaders > 0) { + infoSink->info.message(EPrefixError, "Cannot mix ES profile with non-ES profile shaders"); + return false; + } else if (numEsShaders > 1) { + infoSink->info.message(EPrefixError, "Cannot attach multiple ES shaders of the same type to a single program"); + return false; + } + + // + // Be efficient for the common single compilation unit per stage case, + // reusing it's TIntermediate instead of merging into a new one. + // + TIntermediate *firstIntermediate = stages[stage].front()->intermediate; + if (stages[stage].size() == 1) + intermediate[stage] = firstIntermediate; + else { + intermediate[stage] = new TIntermediate(stage, + firstIntermediate->getVersion(), + firstIntermediate->getProfile()); + + + // The new TIntermediate must use the same origin as the original TIntermediates. + // Otherwise linking will fail due to different coordinate systems. + if (firstIntermediate->getOriginUpperLeft()) { + intermediate[stage]->setOriginUpperLeft(); + } + intermediate[stage]->setSpv(firstIntermediate->getSpv()); + + newedIntermediate[stage] = true; + } + + if (messages & EShMsgAST) + infoSink->info << "\nLinked " << StageName(stage) << " stage:\n\n"; + + if (stages[stage].size() > 1) { + std::list::const_iterator it; + for (it = stages[stage].begin(); it != stages[stage].end(); ++it) + intermediate[stage]->merge(*infoSink, *(*it)->intermediate); + } +#else + intermediate[stage] = stages[stage].front()->intermediate; +#endif + intermediate[stage]->finalCheck(*infoSink, (messages & EShMsgKeepUncalled) != 0); + + if (messages & EShMsgAST) + intermediate[stage]->output(*infoSink, true); + + return intermediate[stage]->getNumErrors() == 0; +} + +const char* TProgram::getInfoLog() +{ + return infoSink->info.c_str(); +} + +const char* TProgram::getInfoDebugLog() +{ + return infoSink->debug.c_str(); +} + +#ifndef GLSLANG_WEB + +// +// Reflection implementation. +// + +bool TProgram::buildReflection(int opts) +{ + if (! linked || reflection != nullptr) + return false; + + int firstStage = EShLangVertex, lastStage = EShLangFragment; + + if (opts & EShReflectionIntermediateIO) { + // if we're reflecting intermediate I/O, determine the first and last stage linked and use those as the + // boundaries for which stages generate pipeline inputs/outputs + firstStage = EShLangCount; + lastStage = 0; + for (int s = 0; s < EShLangCount; ++s) { + if (intermediate[s]) { + firstStage = std::min(firstStage, s); + lastStage = std::max(lastStage, s); + } + } + } + + reflection = new TReflection((EShReflectionOptions)opts, (EShLanguage)firstStage, (EShLanguage)lastStage); + + for (int s = 0; s < EShLangCount; ++s) { + if (intermediate[s]) { + if (! reflection->addStage((EShLanguage)s, *intermediate[s])) + return false; + } + } + + return true; +} + +unsigned TProgram::getLocalSize(int dim) const { return reflection->getLocalSize(dim); } +int TProgram::getReflectionIndex(const char* name) const { return reflection->getIndex(name); } +int TProgram::getReflectionPipeIOIndex(const char* name, const bool inOrOut) const + { return reflection->getPipeIOIndex(name, inOrOut); } + +int TProgram::getNumUniformVariables() const { return reflection->getNumUniforms(); } +const TObjectReflection& TProgram::getUniform(int index) const { return reflection->getUniform(index); } +int TProgram::getNumUniformBlocks() const { return reflection->getNumUniformBlocks(); } +const TObjectReflection& TProgram::getUniformBlock(int index) const { return reflection->getUniformBlock(index); } +int TProgram::getNumPipeInputs() const { return reflection->getNumPipeInputs(); } +const TObjectReflection& TProgram::getPipeInput(int index) const { return reflection->getPipeInput(index); } +int TProgram::getNumPipeOutputs() const { return reflection->getNumPipeOutputs(); } +const TObjectReflection& TProgram::getPipeOutput(int index) const { return reflection->getPipeOutput(index); } +int TProgram::getNumBufferVariables() const { return reflection->getNumBufferVariables(); } +const TObjectReflection& TProgram::getBufferVariable(int index) const { return reflection->getBufferVariable(index); } +int TProgram::getNumBufferBlocks() const { return reflection->getNumStorageBuffers(); } +const TObjectReflection& TProgram::getBufferBlock(int index) const { return reflection->getStorageBufferBlock(index); } +int TProgram::getNumAtomicCounters() const { return reflection->getNumAtomicCounters(); } +const TObjectReflection& TProgram::getAtomicCounter(int index) const { return reflection->getAtomicCounter(index); } +void TProgram::dumpReflection() { if (reflection != nullptr) reflection->dump(); } + +// +// I/O mapping implementation. +// +bool TProgram::mapIO(TIoMapResolver* pResolver, TIoMapper* pIoMapper) +{ + if (! linked) + return false; + TIoMapper* ioMapper = nullptr; + TIoMapper defaultIOMapper; + if (pIoMapper == nullptr) + ioMapper = &defaultIOMapper; + else + ioMapper = pIoMapper; + for (int s = 0; s < EShLangCount; ++s) { + if (intermediate[s]) { + if (! ioMapper->addStage((EShLanguage)s, *intermediate[s], *infoSink, pResolver)) + return false; + } + } + + return ioMapper->doMap(pResolver, *infoSink); +} + +#endif // GLSLANG_WEB + +} // end namespace glslang diff --git a/dep/glslang/glslang/MachineIndependent/SymbolTable.cpp b/dep/glslang/glslang/MachineIndependent/SymbolTable.cpp new file mode 100644 index 000000000..06b5a813d --- /dev/null +++ b/dep/glslang/glslang/MachineIndependent/SymbolTable.cpp @@ -0,0 +1,446 @@ +// +// Copyright (C) 2002-2005 3Dlabs Inc. Ltd. +// Copyright (C) 2012-2013 LunarG, Inc. +// Copyright (C) 2017 ARM Limited. +// Copyright (C) 2015-2018 Google, Inc. +// Modifications Copyright (C) 2020 Advanced Micro Devices, Inc. All rights reserved. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// +// Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// +// Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// Neither the name of 3Dlabs Inc. Ltd. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// + +// +// Symbol table for parsing. Most functionality and main ideas +// are documented in the header file. +// + +#include "SymbolTable.h" + +namespace glslang { + +// +// TType helper function needs a place to live. +// + +// +// Recursively generate mangled names. +// +void TType::buildMangledName(TString& mangledName) const +{ + if (isMatrix()) + mangledName += 'm'; + else if (isVector()) + mangledName += 'v'; + + switch (basicType) { + case EbtFloat: mangledName += 'f'; break; + case EbtInt: mangledName += 'i'; break; + case EbtUint: mangledName += 'u'; break; + case EbtBool: mangledName += 'b'; break; +#ifndef GLSLANG_WEB + case EbtDouble: mangledName += 'd'; break; + case EbtFloat16: mangledName += "f16"; break; + case EbtInt8: mangledName += "i8"; break; + case EbtUint8: mangledName += "u8"; break; + case EbtInt16: mangledName += "i16"; break; + case EbtUint16: mangledName += "u16"; break; + case EbtInt64: mangledName += "i64"; break; + case EbtUint64: mangledName += "u64"; break; + case EbtAtomicUint: mangledName += "au"; break; + case EbtAccStruct: mangledName += "as"; break; + case EbtRayQuery: mangledName += "rq"; break; +#endif + case EbtSampler: + switch (sampler.type) { +#ifndef GLSLANG_WEB + case EbtFloat16: mangledName += "f16"; break; +#endif + case EbtInt: mangledName += "i"; break; + case EbtUint: mangledName += "u"; break; + default: break; // some compilers want this + } + if (sampler.isImageClass()) + mangledName += "I"; // a normal image or subpass + else if (sampler.isPureSampler()) + mangledName += "p"; // a "pure" sampler + else if (!sampler.isCombined()) + mangledName += "t"; // a "pure" texture + else + mangledName += "s"; // traditional combined sampler + if (sampler.isArrayed()) + mangledName += "A"; + if (sampler.isShadow()) + mangledName += "S"; + if (sampler.isExternal()) + mangledName += "E"; + if (sampler.isYuv()) + mangledName += "Y"; + switch (sampler.dim) { + case Esd2D: mangledName += "2"; break; + case Esd3D: mangledName += "3"; break; + case EsdCube: mangledName += "C"; break; +#ifndef GLSLANG_WEB + case Esd1D: mangledName += "1"; break; + case EsdRect: mangledName += "R2"; break; + case EsdBuffer: mangledName += "B"; break; + case EsdSubpass: mangledName += "P"; break; +#endif + default: break; // some compilers want this + } + +#ifdef ENABLE_HLSL + if (sampler.hasReturnStruct()) { + // Name mangle for sampler return struct uses struct table index. + mangledName += "-tx-struct"; + + char text[16]; // plenty enough space for the small integers. + snprintf(text, sizeof(text), "%u-", sampler.getStructReturnIndex()); + mangledName += text; + } else { + switch (sampler.getVectorSize()) { + case 1: mangledName += "1"; break; + case 2: mangledName += "2"; break; + case 3: mangledName += "3"; break; + case 4: break; // default to prior name mangle behavior + } + } +#endif + + if (sampler.isMultiSample()) + mangledName += "M"; + break; + case EbtStruct: + case EbtBlock: + if (basicType == EbtStruct) + mangledName += "struct-"; + else + mangledName += "block-"; + if (typeName) + mangledName += *typeName; + for (unsigned int i = 0; i < structure->size(); ++i) { + mangledName += '-'; + (*structure)[i].type->buildMangledName(mangledName); + } + default: + break; + } + + if (getVectorSize() > 0) + mangledName += static_cast('0' + getVectorSize()); + else { + mangledName += static_cast('0' + getMatrixCols()); + mangledName += static_cast('0' + getMatrixRows()); + } + + if (arraySizes) { + const int maxSize = 11; + char buf[maxSize]; + for (int i = 0; i < arraySizes->getNumDims(); ++i) { + if (arraySizes->getDimNode(i)) { + if (arraySizes->getDimNode(i)->getAsSymbolNode()) + snprintf(buf, maxSize, "s%d", arraySizes->getDimNode(i)->getAsSymbolNode()->getId()); + else + snprintf(buf, maxSize, "s%p", arraySizes->getDimNode(i)); + } else + snprintf(buf, maxSize, "%d", arraySizes->getDimSize(i)); + mangledName += '['; + mangledName += buf; + mangledName += ']'; + } + } +} + +#ifndef GLSLANG_WEB + +// +// Dump functions. +// + +void TSymbol::dumpExtensions(TInfoSink& infoSink) const +{ + int numExtensions = getNumExtensions(); + if (numExtensions) { + infoSink.debug << " <"; + + for (int i = 0; i < numExtensions; i++) + infoSink.debug << getExtensions()[i] << ","; + + infoSink.debug << ">"; + } +} + +void TVariable::dump(TInfoSink& infoSink, bool complete) const +{ + if (complete) { + infoSink.debug << getName().c_str() << ": " << type.getCompleteString(); + dumpExtensions(infoSink); + } else { + infoSink.debug << getName().c_str() << ": " << type.getStorageQualifierString() << " " + << type.getBasicTypeString(); + + if (type.isArray()) + infoSink.debug << "[0]"; + } + + infoSink.debug << "\n"; +} + +void TFunction::dump(TInfoSink& infoSink, bool complete) const +{ + if (complete) { + infoSink.debug << getName().c_str() << ": " << returnType.getCompleteString() << " " << getName().c_str() + << "("; + + int numParams = getParamCount(); + for (int i = 0; i < numParams; i++) { + const TParameter ¶m = parameters[i]; + infoSink.debug << param.type->getCompleteString() << " " + << (param.type->isStruct() ? "of " + param.type->getTypeName() + " " : "") + << (param.name ? *param.name : "") << (i < numParams - 1 ? "," : ""); + } + + infoSink.debug << ")"; + dumpExtensions(infoSink); + } else { + infoSink.debug << getName().c_str() << ": " << returnType.getBasicTypeString() << " " + << getMangledName().c_str() << "n"; + } + + infoSink.debug << "\n"; +} + +void TAnonMember::dump(TInfoSink& TInfoSink, bool) const +{ + TInfoSink.debug << "anonymous member " << getMemberNumber() << " of " << getAnonContainer().getName().c_str() + << "\n"; +} + +void TSymbolTableLevel::dump(TInfoSink& infoSink, bool complete) const +{ + tLevel::const_iterator it; + for (it = level.begin(); it != level.end(); ++it) + (*it).second->dump(infoSink, complete); +} + +void TSymbolTable::dump(TInfoSink& infoSink, bool complete) const +{ + for (int level = currentLevel(); level >= 0; --level) { + infoSink.debug << "LEVEL " << level << "\n"; + table[level]->dump(infoSink, complete); + } +} + +#endif + +// +// Functions have buried pointers to delete. +// +TFunction::~TFunction() +{ + for (TParamList::iterator i = parameters.begin(); i != parameters.end(); ++i) + delete (*i).type; +} + +// +// Symbol table levels are a map of pointers to symbols that have to be deleted. +// +TSymbolTableLevel::~TSymbolTableLevel() +{ + for (tLevel::iterator it = level.begin(); it != level.end(); ++it) + delete (*it).second; + + delete [] defaultPrecision; +} + +// +// Change all function entries in the table with the non-mangled name +// to be related to the provided built-in operation. +// +void TSymbolTableLevel::relateToOperator(const char* name, TOperator op) +{ + tLevel::const_iterator candidate = level.lower_bound(name); + while (candidate != level.end()) { + const TString& candidateName = (*candidate).first; + TString::size_type parenAt = candidateName.find_first_of('('); + if (parenAt != candidateName.npos && candidateName.compare(0, parenAt, name) == 0) { + TFunction* function = (*candidate).second->getAsFunction(); + function->relateToOperator(op); + } else + break; + ++candidate; + } +} + +// Make all function overloads of the given name require an extension(s). +// Should only be used for a version/profile that actually needs the extension(s). +void TSymbolTableLevel::setFunctionExtensions(const char* name, int num, const char* const extensions[]) +{ + tLevel::const_iterator candidate = level.lower_bound(name); + while (candidate != level.end()) { + const TString& candidateName = (*candidate).first; + TString::size_type parenAt = candidateName.find_first_of('('); + if (parenAt != candidateName.npos && candidateName.compare(0, parenAt, name) == 0) { + TSymbol* symbol = candidate->second; + symbol->setExtensions(num, extensions); + } else + break; + ++candidate; + } +} + +// +// Make all symbols in this table level read only. +// +void TSymbolTableLevel::readOnly() +{ + for (tLevel::iterator it = level.begin(); it != level.end(); ++it) + (*it).second->makeReadOnly(); +} + +// +// Copy a symbol, but the copy is writable; call readOnly() afterward if that's not desired. +// +TSymbol::TSymbol(const TSymbol& copyOf) +{ + name = NewPoolTString(copyOf.name->c_str()); + uniqueId = copyOf.uniqueId; + writable = true; +} + +TVariable::TVariable(const TVariable& copyOf) : TSymbol(copyOf) +{ + type.deepCopy(copyOf.type); + userType = copyOf.userType; + + // we don't support specialization-constant subtrees in cloned tables, only extensions + constSubtree = nullptr; + extensions = nullptr; + memberExtensions = nullptr; + if (copyOf.getNumExtensions() > 0) + setExtensions(copyOf.getNumExtensions(), copyOf.getExtensions()); + if (copyOf.hasMemberExtensions()) { + for (int m = 0; m < (int)copyOf.type.getStruct()->size(); ++m) { + if (copyOf.getNumMemberExtensions(m) > 0) + setMemberExtensions(m, copyOf.getNumMemberExtensions(m), copyOf.getMemberExtensions(m)); + } + } + + if (! copyOf.constArray.empty()) { + assert(! copyOf.type.isStruct()); + TConstUnionArray newArray(copyOf.constArray, 0, copyOf.constArray.size()); + constArray = newArray; + } +} + +TVariable* TVariable::clone() const +{ + TVariable *variable = new TVariable(*this); + + return variable; +} + +TFunction::TFunction(const TFunction& copyOf) : TSymbol(copyOf) +{ + for (unsigned int i = 0; i < copyOf.parameters.size(); ++i) { + TParameter param; + parameters.push_back(param); + parameters.back().copyParam(copyOf.parameters[i]); + } + + extensions = nullptr; + if (copyOf.getNumExtensions() > 0) + setExtensions(copyOf.getNumExtensions(), copyOf.getExtensions()); + returnType.deepCopy(copyOf.returnType); + mangledName = copyOf.mangledName; + op = copyOf.op; + defined = copyOf.defined; + prototyped = copyOf.prototyped; + implicitThis = copyOf.implicitThis; + illegalImplicitThis = copyOf.illegalImplicitThis; + defaultParamCount = copyOf.defaultParamCount; +} + +TFunction* TFunction::clone() const +{ + TFunction *function = new TFunction(*this); + + return function; +} + +TAnonMember* TAnonMember::clone() const +{ + // Anonymous members of a given block should be cloned at a higher level, + // where they can all be assured to still end up pointing to a single + // copy of the original container. + assert(0); + + return 0; +} + +TSymbolTableLevel* TSymbolTableLevel::clone() const +{ + TSymbolTableLevel *symTableLevel = new TSymbolTableLevel(); + symTableLevel->anonId = anonId; + symTableLevel->thisLevel = thisLevel; + std::vector containerCopied(anonId, false); + tLevel::const_iterator iter; + for (iter = level.begin(); iter != level.end(); ++iter) { + const TAnonMember* anon = iter->second->getAsAnonMember(); + if (anon) { + // Insert all the anonymous members of this same container at once, + // avoid inserting the remaining members in the future, once this has been done, + // allowing them to all be part of the same new container. + if (! containerCopied[anon->getAnonId()]) { + TVariable* container = anon->getAnonContainer().clone(); + container->changeName(NewPoolTString("")); + // insert the container and all its members + symTableLevel->insert(*container, false); + containerCopied[anon->getAnonId()] = true; + } + } else + symTableLevel->insert(*iter->second->clone(), false); + } + + return symTableLevel; +} + +void TSymbolTable::copyTable(const TSymbolTable& copyOf) +{ + assert(adoptedLevels == copyOf.adoptedLevels); + + uniqueId = copyOf.uniqueId; + noBuiltInRedeclarations = copyOf.noBuiltInRedeclarations; + separateNameSpaces = copyOf.separateNameSpaces; + for (unsigned int i = copyOf.adoptedLevels; i < copyOf.table.size(); ++i) + table.push_back(copyOf.table[i]->clone()); +} + +} // end namespace glslang diff --git a/dep/glslang/glslang/MachineIndependent/SymbolTable.h b/dep/glslang/glslang/MachineIndependent/SymbolTable.h new file mode 100644 index 000000000..40ca3da53 --- /dev/null +++ b/dep/glslang/glslang/MachineIndependent/SymbolTable.h @@ -0,0 +1,885 @@ +// +// Copyright (C) 2002-2005 3Dlabs Inc. Ltd. +// Copyright (C) 2013 LunarG, Inc. +// Copyright (C) 2015-2018 Google, Inc. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// +// Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// +// Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// Neither the name of 3Dlabs Inc. Ltd. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// + +#ifndef _SYMBOL_TABLE_INCLUDED_ +#define _SYMBOL_TABLE_INCLUDED_ + +// +// Symbol table for parsing. Has these design characteristics: +// +// * Same symbol table can be used to compile many shaders, to preserve +// effort of creating and loading with the large numbers of built-in +// symbols. +// +// --> This requires a copy mechanism, so initial pools used to create +// the shared information can be popped. Done through "clone" +// methods. +// +// * Name mangling will be used to give each function a unique name +// so that symbol table lookups are never ambiguous. This allows +// a simpler symbol table structure. +// +// * Pushing and popping of scope, so symbol table will really be a stack +// of symbol tables. Searched from the top, with new inserts going into +// the top. +// +// * Constants: Compile time constant symbols will keep their values +// in the symbol table. The parser can substitute constants at parse +// time, including doing constant folding and constant propagation. +// +// * No temporaries: Temporaries made from operations (+, --, .xy, etc.) +// are tracked in the intermediate representation, not the symbol table. +// + +#include "../Include/Common.h" +#include "../Include/intermediate.h" +#include "../Include/InfoSink.h" + +namespace glslang { + +// +// Symbol base class. (Can build functions or variables out of these...) +// + +class TVariable; +class TFunction; +class TAnonMember; + +typedef TVector TExtensionList; + +class TSymbol { +public: + POOL_ALLOCATOR_NEW_DELETE(GetThreadPoolAllocator()) + explicit TSymbol(const TString *n) : name(n), extensions(0), writable(true) { } + virtual TSymbol* clone() const = 0; + virtual ~TSymbol() { } // rely on all symbol owned memory coming from the pool + + virtual const TString& getName() const { return *name; } + virtual void changeName(const TString* newName) { name = newName; } + virtual void addPrefix(const char* prefix) + { + TString newName(prefix); + newName.append(*name); + changeName(NewPoolTString(newName.c_str())); + } + virtual const TString& getMangledName() const { return getName(); } + virtual TFunction* getAsFunction() { return 0; } + virtual const TFunction* getAsFunction() const { return 0; } + virtual TVariable* getAsVariable() { return 0; } + virtual const TVariable* getAsVariable() const { return 0; } + virtual const TAnonMember* getAsAnonMember() const { return 0; } + virtual const TType& getType() const = 0; + virtual TType& getWritableType() = 0; + virtual void setUniqueId(int id) { uniqueId = id; } + virtual int getUniqueId() const { return uniqueId; } + virtual void setExtensions(int numExts, const char* const exts[]) + { + assert(extensions == 0); + assert(numExts > 0); + extensions = NewPoolObject(extensions); + for (int e = 0; e < numExts; ++e) + extensions->push_back(exts[e]); + } + virtual int getNumExtensions() const { return extensions == nullptr ? 0 : (int)extensions->size(); } + virtual const char** getExtensions() const { return extensions->data(); } + +#ifndef GLSLANG_WEB + virtual void dump(TInfoSink& infoSink, bool complete = false) const = 0; + void dumpExtensions(TInfoSink& infoSink) const; +#endif + + virtual bool isReadOnly() const { return ! writable; } + virtual void makeReadOnly() { writable = false; } + +protected: + explicit TSymbol(const TSymbol&); + TSymbol& operator=(const TSymbol&); + + const TString *name; + unsigned int uniqueId; // For cross-scope comparing during code generation + + // For tracking what extensions must be present + // (don't use if correct version/profile is present). + TExtensionList* extensions; // an array of pointers to existing constant char strings + + // + // N.B.: Non-const functions that will be generally used should assert on this, + // to avoid overwriting shared symbol-table information. + // + bool writable; +}; + +// +// Variable class, meaning a symbol that's not a function. +// +// There could be a separate class hierarchy for Constant variables; +// Only one of int, bool, or float, (or none) is correct for +// any particular use, but it's easy to do this way, and doesn't +// seem worth having separate classes, and "getConst" can't simply return +// different values for different types polymorphically, so this is +// just simple and pragmatic. +// +class TVariable : public TSymbol { +public: + TVariable(const TString *name, const TType& t, bool uT = false ) + : TSymbol(name), + userType(uT), + constSubtree(nullptr), + memberExtensions(nullptr), + anonId(-1) + { type.shallowCopy(t); } + virtual TVariable* clone() const; + virtual ~TVariable() { } + + virtual TVariable* getAsVariable() { return this; } + virtual const TVariable* getAsVariable() const { return this; } + virtual const TType& getType() const { return type; } + virtual TType& getWritableType() { assert(writable); return type; } + virtual bool isUserType() const { return userType; } + virtual const TConstUnionArray& getConstArray() const { return constArray; } + virtual TConstUnionArray& getWritableConstArray() { assert(writable); return constArray; } + virtual void setConstArray(const TConstUnionArray& array) { constArray = array; } + virtual void setConstSubtree(TIntermTyped* subtree) { constSubtree = subtree; } + virtual TIntermTyped* getConstSubtree() const { return constSubtree; } + virtual void setAnonId(int i) { anonId = i; } + virtual int getAnonId() const { return anonId; } + + virtual void setMemberExtensions(int member, int numExts, const char* const exts[]) + { + assert(type.isStruct()); + assert(numExts > 0); + if (memberExtensions == nullptr) { + memberExtensions = NewPoolObject(memberExtensions); + memberExtensions->resize(type.getStruct()->size()); + } + for (int e = 0; e < numExts; ++e) + (*memberExtensions)[member].push_back(exts[e]); + } + virtual bool hasMemberExtensions() const { return memberExtensions != nullptr; } + virtual int getNumMemberExtensions(int member) const + { + return memberExtensions == nullptr ? 0 : (int)(*memberExtensions)[member].size(); + } + virtual const char** getMemberExtensions(int member) const { return (*memberExtensions)[member].data(); } + +#ifndef GLSLANG_WEB + virtual void dump(TInfoSink& infoSink, bool complete = false) const; +#endif + +protected: + explicit TVariable(const TVariable&); + TVariable& operator=(const TVariable&); + + TType type; + bool userType; + + // we are assuming that Pool Allocator will free the memory allocated to unionArray + // when this object is destroyed + + TConstUnionArray constArray; // for compile-time constant value + TIntermTyped* constSubtree; // for specialization constant computation + TVector* memberExtensions; // per-member extension list, allocated only when needed + int anonId; // the ID used for anonymous blocks: TODO: see if uniqueId could serve a dual purpose +}; + +// +// The function sub-class of symbols and the parser will need to +// share this definition of a function parameter. +// +struct TParameter { + TString *name; + TType* type; + TIntermTyped* defaultValue; + void copyParam(const TParameter& param) + { + if (param.name) + name = NewPoolTString(param.name->c_str()); + else + name = 0; + type = param.type->clone(); + defaultValue = param.defaultValue; + } + TBuiltInVariable getDeclaredBuiltIn() const { return type->getQualifier().declaredBuiltIn; } +}; + +// +// The function sub-class of a symbol. +// +class TFunction : public TSymbol { +public: + explicit TFunction(TOperator o) : + TSymbol(0), + op(o), + defined(false), prototyped(false), implicitThis(false), illegalImplicitThis(false), defaultParamCount(0) { } + TFunction(const TString *name, const TType& retType, TOperator tOp = EOpNull) : + TSymbol(name), + mangledName(*name + '('), + op(tOp), + defined(false), prototyped(false), implicitThis(false), illegalImplicitThis(false), defaultParamCount(0) + { + returnType.shallowCopy(retType); + declaredBuiltIn = retType.getQualifier().builtIn; + } + virtual TFunction* clone() const override; + virtual ~TFunction(); + + virtual TFunction* getAsFunction() override { return this; } + virtual const TFunction* getAsFunction() const override { return this; } + + // Install 'p' as the (non-'this') last parameter. + // Non-'this' parameters are reflected in both the list of parameters and the + // mangled name. + virtual void addParameter(TParameter& p) + { + assert(writable); + parameters.push_back(p); + p.type->appendMangledName(mangledName); + + if (p.defaultValue != nullptr) + defaultParamCount++; + } + + // Install 'this' as the first parameter. + // 'this' is reflected in the list of parameters, but not the mangled name. + virtual void addThisParameter(TType& type, const char* name) + { + TParameter p = { NewPoolTString(name), new TType, nullptr }; + p.type->shallowCopy(type); + parameters.insert(parameters.begin(), p); + } + + virtual void addPrefix(const char* prefix) override + { + TSymbol::addPrefix(prefix); + mangledName.insert(0, prefix); + } + + virtual void removePrefix(const TString& prefix) + { + assert(mangledName.compare(0, prefix.size(), prefix) == 0); + mangledName.erase(0, prefix.size()); + } + + virtual const TString& getMangledName() const override { return mangledName; } + virtual const TType& getType() const override { return returnType; } + virtual TBuiltInVariable getDeclaredBuiltInType() const { return declaredBuiltIn; } + virtual TType& getWritableType() override { return returnType; } + virtual void relateToOperator(TOperator o) { assert(writable); op = o; } + virtual TOperator getBuiltInOp() const { return op; } + virtual void setDefined() { assert(writable); defined = true; } + virtual bool isDefined() const { return defined; } + virtual void setPrototyped() { assert(writable); prototyped = true; } + virtual bool isPrototyped() const { return prototyped; } + virtual void setImplicitThis() { assert(writable); implicitThis = true; } + virtual bool hasImplicitThis() const { return implicitThis; } + virtual void setIllegalImplicitThis() { assert(writable); illegalImplicitThis = true; } + virtual bool hasIllegalImplicitThis() const { return illegalImplicitThis; } + + // Return total number of parameters + virtual int getParamCount() const { return static_cast(parameters.size()); } + // Return number of parameters with default values. + virtual int getDefaultParamCount() const { return defaultParamCount; } + // Return number of fixed parameters (without default values) + virtual int getFixedParamCount() const { return getParamCount() - getDefaultParamCount(); } + + virtual TParameter& operator[](int i) { assert(writable); return parameters[i]; } + virtual const TParameter& operator[](int i) const { return parameters[i]; } + +#ifndef GLSLANG_WEB + virtual void dump(TInfoSink& infoSink, bool complete = false) const override; +#endif + +protected: + explicit TFunction(const TFunction&); + TFunction& operator=(const TFunction&); + + typedef TVector TParamList; + TParamList parameters; + TType returnType; + TBuiltInVariable declaredBuiltIn; + + TString mangledName; + TOperator op; + bool defined; + bool prototyped; + bool implicitThis; // True if this function is allowed to see all members of 'this' + bool illegalImplicitThis; // True if this function is not supposed to have access to dynamic members of 'this', + // even if it finds member variables in the symbol table. + // This is important for a static member function that has member variables in scope, + // but is not allowed to use them, or see hidden symbols instead. + int defaultParamCount; +}; + +// +// Members of anonymous blocks are a kind of TSymbol. They are not hidden in +// the symbol table behind a container; rather they are visible and point to +// their anonymous container. (The anonymous container is found through the +// member, not the other way around.) +// +class TAnonMember : public TSymbol { +public: + TAnonMember(const TString* n, unsigned int m, TVariable& a, int an) : TSymbol(n), anonContainer(a), memberNumber(m), anonId(an) { } + virtual TAnonMember* clone() const override; + virtual ~TAnonMember() { } + + virtual const TAnonMember* getAsAnonMember() const override { return this; } + virtual const TVariable& getAnonContainer() const { return anonContainer; } + virtual unsigned int getMemberNumber() const { return memberNumber; } + + virtual const TType& getType() const override + { + const TTypeList& types = *anonContainer.getType().getStruct(); + return *types[memberNumber].type; + } + + virtual TType& getWritableType() override + { + assert(writable); + const TTypeList& types = *anonContainer.getType().getStruct(); + return *types[memberNumber].type; + } + + virtual void setExtensions(int numExts, const char* const exts[]) override + { + anonContainer.setMemberExtensions(memberNumber, numExts, exts); + } + virtual int getNumExtensions() const override { return anonContainer.getNumMemberExtensions(memberNumber); } + virtual const char** getExtensions() const override { return anonContainer.getMemberExtensions(memberNumber); } + + virtual int getAnonId() const { return anonId; } +#ifndef GLSLANG_WEB + virtual void dump(TInfoSink& infoSink, bool complete = false) const override; +#endif + +protected: + explicit TAnonMember(const TAnonMember&); + TAnonMember& operator=(const TAnonMember&); + + TVariable& anonContainer; + unsigned int memberNumber; + int anonId; +}; + +class TSymbolTableLevel { +public: + POOL_ALLOCATOR_NEW_DELETE(GetThreadPoolAllocator()) + TSymbolTableLevel() : defaultPrecision(0), anonId(0), thisLevel(false) { } + ~TSymbolTableLevel(); + + bool insert(TSymbol& symbol, bool separateNameSpaces) + { + // + // returning true means symbol was added to the table with no semantic errors + // + const TString& name = symbol.getName(); + if (name == "") { + symbol.getAsVariable()->setAnonId(anonId++); + // An empty name means an anonymous container, exposing its members to the external scope. + // Give it a name and insert its members in the symbol table, pointing to the container. + char buf[20]; + snprintf(buf, 20, "%s%d", AnonymousPrefix, symbol.getAsVariable()->getAnonId()); + symbol.changeName(NewPoolTString(buf)); + + return insertAnonymousMembers(symbol, 0); + } else { + // Check for redefinition errors: + // - STL itself will tell us if there is a direct name collision, with name mangling, at this level + // - additionally, check for function-redefining-variable name collisions + const TString& insertName = symbol.getMangledName(); + if (symbol.getAsFunction()) { + // make sure there isn't a variable of this name + if (! separateNameSpaces && level.find(name) != level.end()) + return false; + + // insert, and whatever happens is okay + level.insert(tLevelPair(insertName, &symbol)); + + return true; + } else + return level.insert(tLevelPair(insertName, &symbol)).second; + } + } + + // Add more members to an already inserted aggregate object + bool amend(TSymbol& symbol, int firstNewMember) + { + // See insert() for comments on basic explanation of insert. + // This operates similarly, but more simply. + // Only supporting amend of anonymous blocks so far. + if (IsAnonymous(symbol.getName())) + return insertAnonymousMembers(symbol, firstNewMember); + else + return false; + } + + bool insertAnonymousMembers(TSymbol& symbol, int firstMember) + { + const TTypeList& types = *symbol.getAsVariable()->getType().getStruct(); + for (unsigned int m = firstMember; m < types.size(); ++m) { + TAnonMember* member = new TAnonMember(&types[m].type->getFieldName(), m, *symbol.getAsVariable(), symbol.getAsVariable()->getAnonId()); + if (! level.insert(tLevelPair(member->getMangledName(), member)).second) + return false; + } + + return true; + } + + TSymbol* find(const TString& name) const + { + tLevel::const_iterator it = level.find(name); + if (it == level.end()) + return 0; + else + return (*it).second; + } + + void findFunctionNameList(const TString& name, TVector& list) + { + size_t parenAt = name.find_first_of('('); + TString base(name, 0, parenAt + 1); + + tLevel::const_iterator begin = level.lower_bound(base); + base[parenAt] = ')'; // assume ')' is lexically after '(' + tLevel::const_iterator end = level.upper_bound(base); + for (tLevel::const_iterator it = begin; it != end; ++it) + list.push_back(it->second->getAsFunction()); + } + + // See if there is already a function in the table having the given non-function-style name. + bool hasFunctionName(const TString& name) const + { + tLevel::const_iterator candidate = level.lower_bound(name); + if (candidate != level.end()) { + const TString& candidateName = (*candidate).first; + TString::size_type parenAt = candidateName.find_first_of('('); + if (parenAt != candidateName.npos && candidateName.compare(0, parenAt, name) == 0) + + return true; + } + + return false; + } + + // See if there is a variable at this level having the given non-function-style name. + // Return true if name is found, and set variable to true if the name was a variable. + bool findFunctionVariableName(const TString& name, bool& variable) const + { + tLevel::const_iterator candidate = level.lower_bound(name); + if (candidate != level.end()) { + const TString& candidateName = (*candidate).first; + TString::size_type parenAt = candidateName.find_first_of('('); + if (parenAt == candidateName.npos) { + // not a mangled name + if (candidateName == name) { + // found a variable name match + variable = true; + return true; + } + } else { + // a mangled name + if (candidateName.compare(0, parenAt, name) == 0) { + // found a function name match + variable = false; + return true; + } + } + } + + return false; + } + + // Use this to do a lazy 'push' of precision defaults the first time + // a precision statement is seen in a new scope. Leave it at 0 for + // when no push was needed. Thus, it is not the current defaults, + // it is what to restore the defaults to when popping a level. + void setPreviousDefaultPrecisions(const TPrecisionQualifier *p) + { + // can call multiple times at one scope, will only latch on first call, + // as we're tracking the previous scope's values, not the current values + if (defaultPrecision != 0) + return; + + defaultPrecision = new TPrecisionQualifier[EbtNumTypes]; + for (int t = 0; t < EbtNumTypes; ++t) + defaultPrecision[t] = p[t]; + } + + void getPreviousDefaultPrecisions(TPrecisionQualifier *p) + { + // can be called for table level pops that didn't set the + // defaults + if (defaultPrecision == 0 || p == 0) + return; + + for (int t = 0; t < EbtNumTypes; ++t) + p[t] = defaultPrecision[t]; + } + + void relateToOperator(const char* name, TOperator op); + void setFunctionExtensions(const char* name, int num, const char* const extensions[]); +#ifndef GLSLANG_WEB + void dump(TInfoSink& infoSink, bool complete = false) const; +#endif + TSymbolTableLevel* clone() const; + void readOnly(); + + void setThisLevel() { thisLevel = true; } + bool isThisLevel() const { return thisLevel; } + +protected: + explicit TSymbolTableLevel(TSymbolTableLevel&); + TSymbolTableLevel& operator=(TSymbolTableLevel&); + + typedef std::map, pool_allocator > > tLevel; + typedef const tLevel::value_type tLevelPair; + typedef std::pair tInsertResult; + + tLevel level; // named mappings + TPrecisionQualifier *defaultPrecision; + int anonId; + bool thisLevel; // True if this level of the symbol table is a structure scope containing member function + // that are supposed to see anonymous access to member variables. +}; + +class TSymbolTable { +public: + TSymbolTable() : uniqueId(0), noBuiltInRedeclarations(false), separateNameSpaces(false), adoptedLevels(0) + { + // + // This symbol table cannot be used until push() is called. + // + } + ~TSymbolTable() + { + // this can be called explicitly; safest to code it so it can be called multiple times + + // don't deallocate levels passed in from elsewhere + while (table.size() > adoptedLevels) + pop(0); + } + + void adoptLevels(TSymbolTable& symTable) + { + for (unsigned int level = 0; level < symTable.table.size(); ++level) { + table.push_back(symTable.table[level]); + ++adoptedLevels; + } + uniqueId = symTable.uniqueId; + noBuiltInRedeclarations = symTable.noBuiltInRedeclarations; + separateNameSpaces = symTable.separateNameSpaces; + } + + // + // While level adopting is generic, the methods below enact a the following + // convention for levels: + // 0: common built-ins shared across all stages, all compiles, only one copy for all symbol tables + // 1: per-stage built-ins, shared across all compiles, but a different copy per stage + // 2: built-ins specific to a compile, like resources that are context-dependent, or redeclared built-ins + // 3: user-shader globals + // +protected: + static const int globalLevel = 3; + bool isSharedLevel(int level) { return level <= 1; } // exclude all per-compile levels + bool isBuiltInLevel(int level) { return level <= 2; } // exclude user globals + bool isGlobalLevel(int level) { return level <= globalLevel; } // include user globals +public: + bool isEmpty() { return table.size() == 0; } + bool atBuiltInLevel() { return isBuiltInLevel(currentLevel()); } + bool atGlobalLevel() { return isGlobalLevel(currentLevel()); } + + void setNoBuiltInRedeclarations() { noBuiltInRedeclarations = true; } + void setSeparateNameSpaces() { separateNameSpaces = true; } + + void push() + { + table.push_back(new TSymbolTableLevel); + } + + // Make a new symbol-table level to represent the scope introduced by a structure + // containing member functions, such that the member functions can find anonymous + // references to member variables. + // + // 'thisSymbol' should have a name of "" to trigger anonymous structure-member + // symbol finds. + void pushThis(TSymbol& thisSymbol) + { + assert(thisSymbol.getName().size() == 0); + table.push_back(new TSymbolTableLevel); + table.back()->setThisLevel(); + insert(thisSymbol); + } + + void pop(TPrecisionQualifier *p) + { + table[currentLevel()]->getPreviousDefaultPrecisions(p); + delete table.back(); + table.pop_back(); + } + + // + // Insert a visible symbol into the symbol table so it can + // be found later by name. + // + // Returns false if the was a name collision. + // + bool insert(TSymbol& symbol) + { + symbol.setUniqueId(++uniqueId); + + // make sure there isn't a function of this variable name + if (! separateNameSpaces && ! symbol.getAsFunction() && table[currentLevel()]->hasFunctionName(symbol.getName())) + return false; + + // check for not overloading or redefining a built-in function + if (noBuiltInRedeclarations) { + if (atGlobalLevel() && currentLevel() > 0) { + if (table[0]->hasFunctionName(symbol.getName())) + return false; + if (currentLevel() > 1 && table[1]->hasFunctionName(symbol.getName())) + return false; + } + } + + return table[currentLevel()]->insert(symbol, separateNameSpaces); + } + + // Add more members to an already inserted aggregate object + bool amend(TSymbol& symbol, int firstNewMember) + { + // See insert() for comments on basic explanation of insert. + // This operates similarly, but more simply. + return table[currentLevel()]->amend(symbol, firstNewMember); + } + + // + // To allocate an internal temporary, which will need to be uniquely + // identified by the consumer of the AST, but never need to + // found by doing a symbol table search by name, hence allowed an + // arbitrary name in the symbol with no worry of collision. + // + void makeInternalVariable(TSymbol& symbol) + { + symbol.setUniqueId(++uniqueId); + } + + // + // Copy a variable or anonymous member's structure from a shared level so that + // it can be added (soon after return) to the symbol table where it can be + // modified without impacting other users of the shared table. + // + TSymbol* copyUpDeferredInsert(TSymbol* shared) + { + if (shared->getAsVariable()) { + TSymbol* copy = shared->clone(); + copy->setUniqueId(shared->getUniqueId()); + return copy; + } else { + const TAnonMember* anon = shared->getAsAnonMember(); + assert(anon); + TVariable* container = anon->getAnonContainer().clone(); + container->changeName(NewPoolTString("")); + container->setUniqueId(anon->getAnonContainer().getUniqueId()); + return container; + } + } + + TSymbol* copyUp(TSymbol* shared) + { + TSymbol* copy = copyUpDeferredInsert(shared); + table[globalLevel]->insert(*copy, separateNameSpaces); + if (shared->getAsVariable()) + return copy; + else { + // return the copy of the anonymous member + return table[globalLevel]->find(shared->getName()); + } + } + + // Normal find of a symbol, that can optionally say whether the symbol was found + // at a built-in level or the current top-scope level. + TSymbol* find(const TString& name, bool* builtIn = 0, bool* currentScope = 0, int* thisDepthP = 0) + { + int level = currentLevel(); + TSymbol* symbol; + int thisDepth = 0; + do { + if (table[level]->isThisLevel()) + ++thisDepth; + symbol = table[level]->find(name); + --level; + } while (symbol == nullptr && level >= 0); + level++; + if (builtIn) + *builtIn = isBuiltInLevel(level); + if (currentScope) + *currentScope = isGlobalLevel(currentLevel()) || level == currentLevel(); // consider shared levels as "current scope" WRT user globals + if (thisDepthP != nullptr) { + if (! table[level]->isThisLevel()) + thisDepth = 0; + *thisDepthP = thisDepth; + } + + return symbol; + } + + // Find of a symbol that returns how many layers deep of nested + // structures-with-member-functions ('this' scopes) deep the symbol was + // found in. + TSymbol* find(const TString& name, int& thisDepth) + { + int level = currentLevel(); + TSymbol* symbol; + thisDepth = 0; + do { + if (table[level]->isThisLevel()) + ++thisDepth; + symbol = table[level]->find(name); + --level; + } while (symbol == 0 && level >= 0); + + if (! table[level + 1]->isThisLevel()) + thisDepth = 0; + + return symbol; + } + + bool isFunctionNameVariable(const TString& name) const + { + if (separateNameSpaces) + return false; + + int level = currentLevel(); + do { + bool variable; + bool found = table[level]->findFunctionVariableName(name, variable); + if (found) + return variable; + --level; + } while (level >= 0); + + return false; + } + + void findFunctionNameList(const TString& name, TVector& list, bool& builtIn) + { + // For user levels, return the set found in the first scope with a match + builtIn = false; + int level = currentLevel(); + do { + table[level]->findFunctionNameList(name, list); + --level; + } while (list.empty() && level >= globalLevel); + + if (! list.empty()) + return; + + // Gather across all built-in levels; they don't hide each other + builtIn = true; + do { + table[level]->findFunctionNameList(name, list); + --level; + } while (level >= 0); + } + + void relateToOperator(const char* name, TOperator op) + { + for (unsigned int level = 0; level < table.size(); ++level) + table[level]->relateToOperator(name, op); + } + + void setFunctionExtensions(const char* name, int num, const char* const extensions[]) + { + for (unsigned int level = 0; level < table.size(); ++level) + table[level]->setFunctionExtensions(name, num, extensions); + } + + void setVariableExtensions(const char* name, int numExts, const char* const extensions[]) + { + TSymbol* symbol = find(TString(name)); + if (symbol == nullptr) + return; + + symbol->setExtensions(numExts, extensions); + } + + void setVariableExtensions(const char* blockName, const char* name, int numExts, const char* const extensions[]) + { + TSymbol* symbol = find(TString(blockName)); + if (symbol == nullptr) + return; + TVariable* variable = symbol->getAsVariable(); + assert(variable != nullptr); + + const TTypeList& structure = *variable->getAsVariable()->getType().getStruct(); + for (int member = 0; member < (int)structure.size(); ++member) { + if (structure[member].type->getFieldName().compare(name) == 0) { + variable->setMemberExtensions(member, numExts, extensions); + return; + } + } + } + + int getMaxSymbolId() { return uniqueId; } +#ifndef GLSLANG_WEB + void dump(TInfoSink& infoSink, bool complete = false) const; +#endif + void copyTable(const TSymbolTable& copyOf); + + void setPreviousDefaultPrecisions(TPrecisionQualifier *p) { table[currentLevel()]->setPreviousDefaultPrecisions(p); } + + void readOnly() + { + for (unsigned int level = 0; level < table.size(); ++level) + table[level]->readOnly(); + } + +protected: + TSymbolTable(TSymbolTable&); + TSymbolTable& operator=(TSymbolTableLevel&); + + int currentLevel() const { return static_cast(table.size()) - 1; } + + std::vector table; + int uniqueId; // for unique identification in code generation + bool noBuiltInRedeclarations; + bool separateNameSpaces; + unsigned int adoptedLevels; +}; + +} // end namespace glslang + +#endif // _SYMBOL_TABLE_INCLUDED_ diff --git a/dep/glslang/glslang/MachineIndependent/Versions.cpp b/dep/glslang/glslang/MachineIndependent/Versions.cpp new file mode 100644 index 000000000..143f2516d --- /dev/null +++ b/dep/glslang/glslang/MachineIndependent/Versions.cpp @@ -0,0 +1,1205 @@ +// +// Copyright (C) 2002-2005 3Dlabs Inc. Ltd. +// Copyright (C) 2012-2013 LunarG, Inc. +// Copyright (C) 2017 ARM Limited. +// Copyright (C) 2015-2020 Google, Inc. +// Modifications Copyright (C) 2020 Advanced Micro Devices, Inc. All rights reserved. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// +// Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// +// Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// Neither the name of 3Dlabs Inc. Ltd. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// + +// +// Help manage multiple profiles, versions, extensions etc. +// +// These don't return error codes, as the presumption is parsing will +// always continue as if the tested feature were enabled, and thus there +// is no error recovery needed. +// + +// +// HOW TO add a feature enabled by an extension. +// +// To add a new hypothetical "Feature F" to the front end, where an extension +// "XXX_extension_X" can be used to enable the feature, do the following. +// +// OVERVIEW: Specific features are what are error-checked for, not +// extensions: A specific Feature F might be enabled by an extension, or a +// particular version in a particular profile, or a stage, or combinations, etc. +// +// The basic mechanism is to use the following to "declare" all the things that +// enable/disable Feature F, in a code path that implements Feature F: +// +// requireProfile() +// profileRequires() +// requireStage() +// checkDeprecated() +// requireNotRemoved() +// requireExtensions() +// +// Typically, only the first two calls are needed. They go into a code path that +// implements Feature F, and will log the proper error/warning messages. Parsing +// will then always continue as if the tested feature was enabled. +// +// There is typically no if-testing or conditional parsing, just insertion of the calls above. +// However, if symbols specific to the extension are added (step 5), they will +// only be added under tests that the minimum version and profile are present. +// +// 1) Add a symbol name for the extension string at the bottom of Versions.h: +// +// const char* const XXX_extension_X = "XXX_extension_X"; +// +// 2) Add extension initialization to TParseVersions::initializeExtensionBehavior(), +// the first function below: +// +// extensionBehavior[XXX_extension_X] = EBhDisable; +// +// 3) Add any preprocessor directives etc. in the next function, TParseVersions::getPreamble(): +// +// "#define XXX_extension_X 1\n" +// +// The new-line is important, as that ends preprocess tokens. +// +// 4) Insert a profile check in the feature's path (unless all profiles support the feature, +// for some version level). That is, call requireProfile() to constrain the profiles, e.g.: +// +// // ... in a path specific to Feature F... +// requireProfile(loc, +// ECoreProfile | ECompatibilityProfile, +// "Feature F"); +// +// 5) For each profile that supports the feature, insert version/extension checks: +// +// The mostly likely scenario is that Feature F can only be used with a +// particular profile if XXX_extension_X is present or the version is +// high enough that the core specification already incorporated it. +// +// // following the requireProfile() call... +// profileRequires(loc, +// ECoreProfile | ECompatibilityProfile, +// 420, // 0 if no version incorporated the feature into the core spec. +// XXX_extension_X, // can be a list of extensions that all add the feature +// "Feature F Description"); +// +// This allows the feature if either A) one of the extensions is enabled or +// B) the version is high enough. If no version yet incorporates the feature +// into core, pass in 0. +// +// This can be called multiple times, if different profiles support the +// feature starting at different version numbers or with different +// extensions. +// +// This must be called for each profile allowed by the initial call to requireProfile(). +// +// Profiles are all masks, which can be "or"-ed together. +// +// ENoProfile +// ECoreProfile +// ECompatibilityProfile +// EEsProfile +// +// The ENoProfile profile is only for desktop, before profiles showed up in version 150; +// All other #version with no profile default to either es or core, and so have profiles. +// +// You can select all but a particular profile using ~. The following basically means "desktop": +// +// ~EEsProfile +// +// 6) If built-in symbols are added by the extension, add them in Initialize.cpp: Their use +// will be automatically error checked against the extensions enabled at that moment. +// see the comment at the top of Initialize.cpp for where to put them. Establish them at +// the earliest release that supports the extension. Then, tag them with the +// set of extensions that both enable them and are necessary, given the version of the symbol +// table. (There is a different symbol table for each version.) +// + +#include "parseVersions.h" +#include "localintermediate.h" + +namespace glslang { + +#ifndef GLSLANG_WEB + +// +// Initialize all extensions, almost always to 'disable', as once their features +// are incorporated into a core version, their features are supported through allowing that +// core version, not through a pseudo-enablement of the extension. +// +void TParseVersions::initializeExtensionBehavior() +{ + extensionBehavior[E_GL_OES_texture_3D] = EBhDisable; + extensionBehavior[E_GL_OES_standard_derivatives] = EBhDisable; + extensionBehavior[E_GL_EXT_frag_depth] = EBhDisable; + extensionBehavior[E_GL_OES_EGL_image_external] = EBhDisable; + extensionBehavior[E_GL_OES_EGL_image_external_essl3] = EBhDisable; + extensionBehavior[E_GL_EXT_YUV_target] = EBhDisable; + extensionBehavior[E_GL_EXT_shader_texture_lod] = EBhDisable; + extensionBehavior[E_GL_EXT_shadow_samplers] = EBhDisable; + extensionBehavior[E_GL_ARB_texture_rectangle] = EBhDisable; + extensionBehavior[E_GL_3DL_array_objects] = EBhDisable; + extensionBehavior[E_GL_ARB_shading_language_420pack] = EBhDisable; + extensionBehavior[E_GL_ARB_texture_gather] = EBhDisable; + extensionBehavior[E_GL_ARB_gpu_shader5] = EBhDisablePartial; + extensionBehavior[E_GL_ARB_separate_shader_objects] = EBhDisable; + extensionBehavior[E_GL_ARB_compute_shader] = EBhDisable; + extensionBehavior[E_GL_ARB_tessellation_shader] = EBhDisable; + extensionBehavior[E_GL_ARB_enhanced_layouts] = EBhDisable; + extensionBehavior[E_GL_ARB_texture_cube_map_array] = EBhDisable; + extensionBehavior[E_GL_ARB_texture_multisample] = EBhDisable; + extensionBehavior[E_GL_ARB_shader_texture_lod] = EBhDisable; + extensionBehavior[E_GL_ARB_explicit_attrib_location] = EBhDisable; + extensionBehavior[E_GL_ARB_explicit_uniform_location] = EBhDisable; + extensionBehavior[E_GL_ARB_shader_image_load_store] = EBhDisable; + extensionBehavior[E_GL_ARB_shader_atomic_counters] = EBhDisable; + extensionBehavior[E_GL_ARB_shader_draw_parameters] = EBhDisable; + extensionBehavior[E_GL_ARB_shader_group_vote] = EBhDisable; + extensionBehavior[E_GL_ARB_derivative_control] = EBhDisable; + extensionBehavior[E_GL_ARB_shader_texture_image_samples] = EBhDisable; + extensionBehavior[E_GL_ARB_viewport_array] = EBhDisable; + extensionBehavior[E_GL_ARB_gpu_shader_int64] = EBhDisable; + extensionBehavior[E_GL_ARB_gpu_shader_fp64] = EBhDisable; + extensionBehavior[E_GL_ARB_shader_ballot] = EBhDisable; + extensionBehavior[E_GL_ARB_sparse_texture2] = EBhDisable; + extensionBehavior[E_GL_ARB_sparse_texture_clamp] = EBhDisable; + extensionBehavior[E_GL_ARB_shader_stencil_export] = EBhDisable; +// extensionBehavior[E_GL_ARB_cull_distance] = EBhDisable; // present for 4.5, but need extension control over block members + extensionBehavior[E_GL_ARB_post_depth_coverage] = EBhDisable; + extensionBehavior[E_GL_ARB_shader_viewport_layer_array] = EBhDisable; + extensionBehavior[E_GL_ARB_fragment_shader_interlock] = EBhDisable; + extensionBehavior[E_GL_ARB_shader_clock] = EBhDisable; + extensionBehavior[E_GL_ARB_uniform_buffer_object] = EBhDisable; + extensionBehavior[E_GL_ARB_sample_shading] = EBhDisable; + extensionBehavior[E_GL_ARB_shader_bit_encoding] = EBhDisable; + extensionBehavior[E_GL_ARB_shader_image_size] = EBhDisable; + extensionBehavior[E_GL_ARB_shader_storage_buffer_object] = EBhDisable; + extensionBehavior[E_GL_ARB_shading_language_packing] = EBhDisable; + extensionBehavior[E_GL_ARB_texture_query_lod] = EBhDisable; + extensionBehavior[E_GL_ARB_vertex_attrib_64bit] = EBhDisable; + + extensionBehavior[E_GL_KHR_shader_subgroup_basic] = EBhDisable; + extensionBehavior[E_GL_KHR_shader_subgroup_vote] = EBhDisable; + extensionBehavior[E_GL_KHR_shader_subgroup_arithmetic] = EBhDisable; + extensionBehavior[E_GL_KHR_shader_subgroup_ballot] = EBhDisable; + extensionBehavior[E_GL_KHR_shader_subgroup_shuffle] = EBhDisable; + extensionBehavior[E_GL_KHR_shader_subgroup_shuffle_relative] = EBhDisable; + extensionBehavior[E_GL_KHR_shader_subgroup_clustered] = EBhDisable; + extensionBehavior[E_GL_KHR_shader_subgroup_quad] = EBhDisable; + extensionBehavior[E_GL_KHR_memory_scope_semantics] = EBhDisable; + + extensionBehavior[E_GL_EXT_shader_atomic_int64] = EBhDisable; + + extensionBehavior[E_GL_EXT_shader_non_constant_global_initializers] = EBhDisable; + extensionBehavior[E_GL_EXT_shader_image_load_formatted] = EBhDisable; + extensionBehavior[E_GL_EXT_post_depth_coverage] = EBhDisable; + extensionBehavior[E_GL_EXT_control_flow_attributes] = EBhDisable; + extensionBehavior[E_GL_EXT_nonuniform_qualifier] = EBhDisable; + extensionBehavior[E_GL_EXT_samplerless_texture_functions] = EBhDisable; + extensionBehavior[E_GL_EXT_scalar_block_layout] = EBhDisable; + extensionBehavior[E_GL_EXT_fragment_invocation_density] = EBhDisable; + extensionBehavior[E_GL_EXT_buffer_reference] = EBhDisable; + extensionBehavior[E_GL_EXT_buffer_reference2] = EBhDisable; + extensionBehavior[E_GL_EXT_buffer_reference_uvec2] = EBhDisable; + extensionBehavior[E_GL_EXT_demote_to_helper_invocation] = EBhDisable; + extensionBehavior[E_GL_EXT_debug_printf] = EBhDisable; + + extensionBehavior[E_GL_EXT_shader_16bit_storage] = EBhDisable; + extensionBehavior[E_GL_EXT_shader_8bit_storage] = EBhDisable; + + // #line and #include + extensionBehavior[E_GL_GOOGLE_cpp_style_line_directive] = EBhDisable; + extensionBehavior[E_GL_GOOGLE_include_directive] = EBhDisable; + + extensionBehavior[E_GL_AMD_shader_ballot] = EBhDisable; + extensionBehavior[E_GL_AMD_shader_trinary_minmax] = EBhDisable; + extensionBehavior[E_GL_AMD_shader_explicit_vertex_parameter] = EBhDisable; + extensionBehavior[E_GL_AMD_gcn_shader] = EBhDisable; + extensionBehavior[E_GL_AMD_gpu_shader_half_float] = EBhDisable; + extensionBehavior[E_GL_AMD_texture_gather_bias_lod] = EBhDisable; + extensionBehavior[E_GL_AMD_gpu_shader_int16] = EBhDisable; + extensionBehavior[E_GL_AMD_shader_image_load_store_lod] = EBhDisable; + extensionBehavior[E_GL_AMD_shader_fragment_mask] = EBhDisable; + extensionBehavior[E_GL_AMD_gpu_shader_half_float_fetch] = EBhDisable; + + extensionBehavior[E_GL_INTEL_shader_integer_functions2] = EBhDisable; + + extensionBehavior[E_GL_NV_sample_mask_override_coverage] = EBhDisable; + extensionBehavior[E_SPV_NV_geometry_shader_passthrough] = EBhDisable; + extensionBehavior[E_GL_NV_viewport_array2] = EBhDisable; + extensionBehavior[E_GL_NV_stereo_view_rendering] = EBhDisable; + extensionBehavior[E_GL_NVX_multiview_per_view_attributes] = EBhDisable; + extensionBehavior[E_GL_NV_shader_atomic_int64] = EBhDisable; + extensionBehavior[E_GL_NV_conservative_raster_underestimation] = EBhDisable; + extensionBehavior[E_GL_NV_shader_noperspective_interpolation] = EBhDisable; + extensionBehavior[E_GL_NV_shader_subgroup_partitioned] = EBhDisable; + extensionBehavior[E_GL_NV_shading_rate_image] = EBhDisable; + extensionBehavior[E_GL_NV_ray_tracing] = EBhDisable; + extensionBehavior[E_GL_NV_fragment_shader_barycentric] = EBhDisable; + extensionBehavior[E_GL_NV_compute_shader_derivatives] = EBhDisable; + extensionBehavior[E_GL_NV_shader_texture_footprint] = EBhDisable; + extensionBehavior[E_GL_NV_mesh_shader] = EBhDisable; + + extensionBehavior[E_GL_NV_cooperative_matrix] = EBhDisable; + extensionBehavior[E_GL_NV_shader_sm_builtins] = EBhDisable; + extensionBehavior[E_GL_NV_integer_cooperative_matrix] = EBhDisable; + + // AEP + extensionBehavior[E_GL_ANDROID_extension_pack_es31a] = EBhDisable; + extensionBehavior[E_GL_KHR_blend_equation_advanced] = EBhDisable; + extensionBehavior[E_GL_OES_sample_variables] = EBhDisable; + extensionBehavior[E_GL_OES_shader_image_atomic] = EBhDisable; + extensionBehavior[E_GL_OES_shader_multisample_interpolation] = EBhDisable; + extensionBehavior[E_GL_OES_texture_storage_multisample_2d_array] = EBhDisable; + extensionBehavior[E_GL_EXT_geometry_shader] = EBhDisable; + extensionBehavior[E_GL_EXT_geometry_point_size] = EBhDisable; + extensionBehavior[E_GL_EXT_gpu_shader5] = EBhDisable; + extensionBehavior[E_GL_EXT_primitive_bounding_box] = EBhDisable; + extensionBehavior[E_GL_EXT_shader_io_blocks] = EBhDisable; + extensionBehavior[E_GL_EXT_tessellation_shader] = EBhDisable; + extensionBehavior[E_GL_EXT_tessellation_point_size] = EBhDisable; + extensionBehavior[E_GL_EXT_texture_buffer] = EBhDisable; + extensionBehavior[E_GL_EXT_texture_cube_map_array] = EBhDisable; + + // OES matching AEP + extensionBehavior[E_GL_OES_geometry_shader] = EBhDisable; + extensionBehavior[E_GL_OES_geometry_point_size] = EBhDisable; + extensionBehavior[E_GL_OES_gpu_shader5] = EBhDisable; + extensionBehavior[E_GL_OES_primitive_bounding_box] = EBhDisable; + extensionBehavior[E_GL_OES_shader_io_blocks] = EBhDisable; + extensionBehavior[E_GL_OES_tessellation_shader] = EBhDisable; + extensionBehavior[E_GL_OES_tessellation_point_size] = EBhDisable; + extensionBehavior[E_GL_OES_texture_buffer] = EBhDisable; + extensionBehavior[E_GL_OES_texture_cube_map_array] = EBhDisable; + extensionBehavior[E_GL_EXT_shader_integer_mix] = EBhDisable; + + // EXT extensions + extensionBehavior[E_GL_EXT_device_group] = EBhDisable; + extensionBehavior[E_GL_EXT_multiview] = EBhDisable; + extensionBehavior[E_GL_EXT_shader_realtime_clock] = EBhDisable; + extensionBehavior[E_GL_EXT_ray_tracing] = EBhDisable; + extensionBehavior[E_GL_EXT_ray_query] = EBhDisable; + extensionBehavior[E_GL_EXT_ray_flags_primitive_culling] = EBhDisable; + extensionBehavior[E_GL_EXT_blend_func_extended] = EBhDisable; + extensionBehavior[E_GL_EXT_shader_implicit_conversions] = EBhDisable; + + // OVR extensions + extensionBehavior[E_GL_OVR_multiview] = EBhDisable; + extensionBehavior[E_GL_OVR_multiview2] = EBhDisable; + + // explicit types + extensionBehavior[E_GL_EXT_shader_explicit_arithmetic_types] = EBhDisable; + extensionBehavior[E_GL_EXT_shader_explicit_arithmetic_types_int8] = EBhDisable; + extensionBehavior[E_GL_EXT_shader_explicit_arithmetic_types_int16] = EBhDisable; + extensionBehavior[E_GL_EXT_shader_explicit_arithmetic_types_int32] = EBhDisable; + extensionBehavior[E_GL_EXT_shader_explicit_arithmetic_types_int64] = EBhDisable; + extensionBehavior[E_GL_EXT_shader_explicit_arithmetic_types_float16] = EBhDisable; + extensionBehavior[E_GL_EXT_shader_explicit_arithmetic_types_float32] = EBhDisable; + extensionBehavior[E_GL_EXT_shader_explicit_arithmetic_types_float64] = EBhDisable; + + // subgroup extended types + extensionBehavior[E_GL_EXT_shader_subgroup_extended_types_int8] = EBhDisable; + extensionBehavior[E_GL_EXT_shader_subgroup_extended_types_int16] = EBhDisable; + extensionBehavior[E_GL_EXT_shader_subgroup_extended_types_int64] = EBhDisable; + extensionBehavior[E_GL_EXT_shader_subgroup_extended_types_float16] = EBhDisable; +} +#endif // GLSLANG_WEB + +// Get code that is not part of a shared symbol table, is specific to this shader, +// or needed by the preprocessor (which does not use a shared symbol table). +void TParseVersions::getPreamble(std::string& preamble) +{ + if (isEsProfile()) { + preamble = + "#define GL_ES 1\n" + "#define GL_FRAGMENT_PRECISION_HIGH 1\n" +#ifdef GLSLANG_WEB + ; +#else + "#define GL_OES_texture_3D 1\n" + "#define GL_OES_standard_derivatives 1\n" + "#define GL_EXT_frag_depth 1\n" + "#define GL_OES_EGL_image_external 1\n" + "#define GL_OES_EGL_image_external_essl3 1\n" + "#define GL_EXT_YUV_target 1\n" + "#define GL_EXT_shader_texture_lod 1\n" + "#define GL_EXT_shadow_samplers 1\n" + + // AEP + "#define GL_ANDROID_extension_pack_es31a 1\n" + "#define GL_OES_sample_variables 1\n" + "#define GL_OES_shader_image_atomic 1\n" + "#define GL_OES_shader_multisample_interpolation 1\n" + "#define GL_OES_texture_storage_multisample_2d_array 1\n" + "#define GL_EXT_geometry_shader 1\n" + "#define GL_EXT_geometry_point_size 1\n" + "#define GL_EXT_gpu_shader5 1\n" + "#define GL_EXT_primitive_bounding_box 1\n" + "#define GL_EXT_shader_io_blocks 1\n" + "#define GL_EXT_tessellation_shader 1\n" + "#define GL_EXT_tessellation_point_size 1\n" + "#define GL_EXT_texture_buffer 1\n" + "#define GL_EXT_texture_cube_map_array 1\n" + "#define GL_EXT_shader_implicit_conversions 1\n" + "#define GL_EXT_shader_integer_mix 1\n" + "#define GL_EXT_blend_func_extended 1\n" + + // OES matching AEP + "#define GL_OES_geometry_shader 1\n" + "#define GL_OES_geometry_point_size 1\n" + "#define GL_OES_gpu_shader5 1\n" + "#define GL_OES_primitive_bounding_box 1\n" + "#define GL_OES_shader_io_blocks 1\n" + "#define GL_OES_tessellation_shader 1\n" + "#define GL_OES_tessellation_point_size 1\n" + "#define GL_OES_texture_buffer 1\n" + "#define GL_OES_texture_cube_map_array 1\n" + "#define GL_EXT_shader_non_constant_global_initializers 1\n" + ; + + if (isEsProfile() && version >= 300) { + preamble += "#define GL_NV_shader_noperspective_interpolation 1\n"; + } + + } else { + preamble = + "#define GL_FRAGMENT_PRECISION_HIGH 1\n" + "#define GL_ARB_texture_rectangle 1\n" + "#define GL_ARB_shading_language_420pack 1\n" + "#define GL_ARB_texture_gather 1\n" + "#define GL_ARB_gpu_shader5 1\n" + "#define GL_ARB_separate_shader_objects 1\n" + "#define GL_ARB_compute_shader 1\n" + "#define GL_ARB_tessellation_shader 1\n" + "#define GL_ARB_enhanced_layouts 1\n" + "#define GL_ARB_texture_cube_map_array 1\n" + "#define GL_ARB_texture_multisample 1\n" + "#define GL_ARB_shader_texture_lod 1\n" + "#define GL_ARB_explicit_attrib_location 1\n" + "#define GL_ARB_explicit_uniform_location 1\n" + "#define GL_ARB_shader_image_load_store 1\n" + "#define GL_ARB_shader_atomic_counters 1\n" + "#define GL_ARB_shader_draw_parameters 1\n" + "#define GL_ARB_shader_group_vote 1\n" + "#define GL_ARB_derivative_control 1\n" + "#define GL_ARB_shader_texture_image_samples 1\n" + "#define GL_ARB_viewport_array 1\n" + "#define GL_ARB_gpu_shader_int64 1\n" + "#define GL_ARB_gpu_shader_fp64 1\n" + "#define GL_ARB_shader_ballot 1\n" + "#define GL_ARB_sparse_texture2 1\n" + "#define GL_ARB_sparse_texture_clamp 1\n" + "#define GL_ARB_shader_stencil_export 1\n" + "#define GL_ARB_sample_shading 1\n" + "#define GL_ARB_shader_image_size 1\n" + "#define GL_ARB_shading_language_packing 1\n" +// "#define GL_ARB_cull_distance 1\n" // present for 4.5, but need extension control over block members + "#define GL_ARB_post_depth_coverage 1\n" + "#define GL_ARB_fragment_shader_interlock 1\n" + "#define GL_ARB_uniform_buffer_object 1\n" + "#define GL_ARB_shader_bit_encoding 1\n" + "#define GL_ARB_shader_storage_buffer_object 1\n" + "#define GL_ARB_texture_query_lod 1\n" + "#define GL_ARB_vertex_attrib_64bit 1\n" + "#define GL_EXT_shader_non_constant_global_initializers 1\n" + "#define GL_EXT_shader_image_load_formatted 1\n" + "#define GL_EXT_post_depth_coverage 1\n" + "#define GL_EXT_control_flow_attributes 1\n" + "#define GL_EXT_nonuniform_qualifier 1\n" + "#define GL_EXT_shader_16bit_storage 1\n" + "#define GL_EXT_shader_8bit_storage 1\n" + "#define GL_EXT_samplerless_texture_functions 1\n" + "#define GL_EXT_scalar_block_layout 1\n" + "#define GL_EXT_fragment_invocation_density 1\n" + "#define GL_EXT_buffer_reference 1\n" + "#define GL_EXT_buffer_reference2 1\n" + "#define GL_EXT_buffer_reference_uvec2 1\n" + "#define GL_EXT_demote_to_helper_invocation 1\n" + "#define GL_EXT_debug_printf 1\n" + + // GL_KHR_shader_subgroup + "#define GL_KHR_shader_subgroup_basic 1\n" + "#define GL_KHR_shader_subgroup_vote 1\n" + "#define GL_KHR_shader_subgroup_arithmetic 1\n" + "#define GL_KHR_shader_subgroup_ballot 1\n" + "#define GL_KHR_shader_subgroup_shuffle 1\n" + "#define GL_KHR_shader_subgroup_shuffle_relative 1\n" + "#define GL_KHR_shader_subgroup_clustered 1\n" + "#define GL_KHR_shader_subgroup_quad 1\n" + + "#define E_GL_EXT_shader_atomic_int64 1\n" + "#define E_GL_EXT_shader_realtime_clock 1\n" + "#define E_GL_EXT_ray_tracing 1\n" + "#define E_GL_EXT_ray_query 1\n" + "#define E_GL_EXT_ray_flags_primitive_culling 1\n" + + "#define GL_AMD_shader_ballot 1\n" + "#define GL_AMD_shader_trinary_minmax 1\n" + "#define GL_AMD_shader_explicit_vertex_parameter 1\n" + "#define GL_AMD_gcn_shader 1\n" + "#define GL_AMD_gpu_shader_half_float 1\n" + "#define GL_AMD_texture_gather_bias_lod 1\n" + "#define GL_AMD_gpu_shader_int16 1\n" + "#define GL_AMD_shader_image_load_store_lod 1\n" + "#define GL_AMD_shader_fragment_mask 1\n" + "#define GL_AMD_gpu_shader_half_float_fetch 1\n" + + "#define GL_INTEL_shader_integer_functions2 1\n" + + "#define GL_NV_sample_mask_override_coverage 1\n" + "#define GL_NV_geometry_shader_passthrough 1\n" + "#define GL_NV_viewport_array2 1\n" + "#define GL_NV_shader_atomic_int64 1\n" + "#define GL_NV_conservative_raster_underestimation 1\n" + "#define GL_NV_shader_subgroup_partitioned 1\n" + "#define GL_NV_shading_rate_image 1\n" + "#define GL_NV_ray_tracing 1\n" + "#define GL_NV_fragment_shader_barycentric 1\n" + "#define GL_NV_compute_shader_derivatives 1\n" + "#define GL_NV_shader_texture_footprint 1\n" + "#define GL_NV_mesh_shader 1\n" + "#define GL_NV_cooperative_matrix 1\n" + "#define GL_NV_integer_cooperative_matrix 1\n" + + "#define GL_EXT_shader_explicit_arithmetic_types 1\n" + "#define GL_EXT_shader_explicit_arithmetic_types_int8 1\n" + "#define GL_EXT_shader_explicit_arithmetic_types_int16 1\n" + "#define GL_EXT_shader_explicit_arithmetic_types_int32 1\n" + "#define GL_EXT_shader_explicit_arithmetic_types_int64 1\n" + "#define GL_EXT_shader_explicit_arithmetic_types_float16 1\n" + "#define GL_EXT_shader_explicit_arithmetic_types_float32 1\n" + "#define GL_EXT_shader_explicit_arithmetic_types_float64 1\n" + + "#define GL_EXT_shader_subgroup_extended_types_int8 1\n" + "#define GL_EXT_shader_subgroup_extended_types_int16 1\n" + "#define GL_EXT_shader_subgroup_extended_types_int64 1\n" + "#define GL_EXT_shader_subgroup_extended_types_float16 1\n" + ; + + if (version >= 150) { + // define GL_core_profile and GL_compatibility_profile + preamble += "#define GL_core_profile 1\n"; + + if (profile == ECompatibilityProfile) + preamble += "#define GL_compatibility_profile 1\n"; + } +#endif // GLSLANG_WEB + } + +#ifndef GLSLANG_WEB + if ((!isEsProfile() && version >= 140) || + (isEsProfile() && version >= 310)) { + preamble += + "#define GL_EXT_device_group 1\n" + "#define GL_EXT_multiview 1\n" + "#define GL_NV_shader_sm_builtins 1\n" + ; + } + + if (version >= 300 /* both ES and non-ES */) { + preamble += + "#define GL_OVR_multiview 1\n" + "#define GL_OVR_multiview2 1\n" + ; + } + + // #line and #include + preamble += + "#define GL_GOOGLE_cpp_style_line_directive 1\n" + "#define GL_GOOGLE_include_directive 1\n" + "#define GL_KHR_blend_equation_advanced 1\n" + ; +#endif + + // #define VULKAN XXXX + const int numberBufSize = 12; + char numberBuf[numberBufSize]; + if (spvVersion.vulkanGlsl > 0) { + preamble += "#define VULKAN "; + snprintf(numberBuf, numberBufSize, "%d", spvVersion.vulkanGlsl); + preamble += numberBuf; + preamble += "\n"; + } + +#ifndef GLSLANG_WEB + // #define GL_SPIRV XXXX + if (spvVersion.openGl > 0) { + preamble += "#define GL_SPIRV "; + snprintf(numberBuf, numberBufSize, "%d", spvVersion.openGl); + preamble += numberBuf; + preamble += "\n"; + } +#endif +} + +// +// Map from stage enum to externally readable text name. +// +const char* StageName(EShLanguage stage) +{ + switch(stage) { + case EShLangVertex: return "vertex"; + case EShLangFragment: return "fragment"; + case EShLangCompute: return "compute"; +#ifndef GLSLANG_WEB + case EShLangTessControl: return "tessellation control"; + case EShLangTessEvaluation: return "tessellation evaluation"; + case EShLangGeometry: return "geometry"; + case EShLangRayGen: return "ray-generation"; + case EShLangIntersect: return "intersection"; + case EShLangAnyHit: return "any-hit"; + case EShLangClosestHit: return "closest-hit"; + case EShLangMiss: return "miss"; + case EShLangCallable: return "callable"; + case EShLangMeshNV: return "mesh"; + case EShLangTaskNV: return "task"; +#endif + default: return "unknown stage"; + } +} + +// +// When to use requireStage() +// +// If only some stages support a feature. +// +// Operation: If the current stage is not present, give an error message. +// +void TParseVersions::requireStage(const TSourceLoc& loc, EShLanguageMask languageMask, const char* featureDesc) +{ + if (((1 << language) & languageMask) == 0) + error(loc, "not supported in this stage:", featureDesc, StageName(language)); +} + +// If only one stage supports a feature, this can be called. But, all supporting stages +// must be specified with one call. +void TParseVersions::requireStage(const TSourceLoc& loc, EShLanguage stage, const char* featureDesc) +{ + requireStage(loc, static_cast(1 << stage), featureDesc); +} + +#ifndef GLSLANG_WEB +// +// When to use requireProfile(): +// +// Use if only some profiles support a feature. However, if within a profile the feature +// is version or extension specific, follow this call with calls to profileRequires(). +// +// Operation: If the current profile is not one of the profileMask, +// give an error message. +// +void TParseVersions::requireProfile(const TSourceLoc& loc, int profileMask, const char* featureDesc) +{ + if (! (profile & profileMask)) + error(loc, "not supported with this profile:", featureDesc, ProfileName(profile)); +} + +// +// When to use profileRequires(): +// +// If a set of profiles have the same requirements for what version or extensions +// are needed to support a feature. +// +// It must be called for each profile that needs protection. Use requireProfile() first +// to reduce that set of profiles. +// +// Operation: Will issue warnings/errors based on the current profile, version, and extension +// behaviors. It only checks extensions when the current profile is one of the profileMask. +// +// A minVersion of 0 means no version of the profileMask support this in core, +// the extension must be present. +// + +// entry point that takes multiple extensions +void TParseVersions::profileRequires(const TSourceLoc& loc, int profileMask, int minVersion, int numExtensions, + const char* const extensions[], const char* featureDesc) +{ + if (profile & profileMask) { + bool okay = minVersion > 0 && version >= minVersion; +#ifndef GLSLANG_WEB + for (int i = 0; i < numExtensions; ++i) { + switch (getExtensionBehavior(extensions[i])) { + case EBhWarn: + infoSink.info.message(EPrefixWarning, ("extension " + TString(extensions[i]) + " is being used for " + featureDesc).c_str(), loc); + // fall through + case EBhRequire: + case EBhEnable: + okay = true; + break; + default: break; // some compilers want this + } + } +#endif + if (! okay) + error(loc, "not supported for this version or the enabled extensions", featureDesc, ""); + } +} + +// entry point for the above that takes a single extension +void TParseVersions::profileRequires(const TSourceLoc& loc, int profileMask, int minVersion, const char* extension, + const char* featureDesc) +{ + profileRequires(loc, profileMask, minVersion, extension ? 1 : 0, &extension, featureDesc); +} + +void TParseVersions::unimplemented(const TSourceLoc& loc, const char* featureDesc) +{ + error(loc, "feature not yet implemented", featureDesc, ""); +} + +// +// Within a set of profiles, see if a feature is deprecated and give an error or warning based on whether +// a future compatibility context is being use. +// +void TParseVersions::checkDeprecated(const TSourceLoc& loc, int profileMask, int depVersion, const char* featureDesc) +{ + if (profile & profileMask) { + if (version >= depVersion) { + if (forwardCompatible) + error(loc, "deprecated, may be removed in future release", featureDesc, ""); + else if (! suppressWarnings()) + infoSink.info.message(EPrefixWarning, (TString(featureDesc) + " deprecated in version " + + String(depVersion) + "; may be removed in future release").c_str(), loc); + } + } +} + +// +// Within a set of profiles, see if a feature has now been removed and if so, give an error. +// The version argument is the first version no longer having the feature. +// +void TParseVersions::requireNotRemoved(const TSourceLoc& loc, int profileMask, int removedVersion, const char* featureDesc) +{ + if (profile & profileMask) { + if (version >= removedVersion) { + const int maxSize = 60; + char buf[maxSize]; + snprintf(buf, maxSize, "%s profile; removed in version %d", ProfileName(profile), removedVersion); + error(loc, "no longer supported in", featureDesc, buf); + } + } +} + +// Returns true if at least one of the extensions in the extensions parameter is requested. Otherwise, returns false. +// Warns appropriately if the requested behavior of an extension is "warn". +bool TParseVersions::checkExtensionsRequested(const TSourceLoc& loc, int numExtensions, const char* const extensions[], const char* featureDesc) +{ + // First, see if any of the extensions are enabled + for (int i = 0; i < numExtensions; ++i) { + TExtensionBehavior behavior = getExtensionBehavior(extensions[i]); + if (behavior == EBhEnable || behavior == EBhRequire) + return true; + } + + // See if any extensions want to give a warning on use; give warnings for all such extensions + bool warned = false; + for (int i = 0; i < numExtensions; ++i) { + TExtensionBehavior behavior = getExtensionBehavior(extensions[i]); + if (behavior == EBhDisable && relaxedErrors()) { + infoSink.info.message(EPrefixWarning, "The following extension must be enabled to use this feature:", loc); + behavior = EBhWarn; + } + if (behavior == EBhWarn) { + infoSink.info.message(EPrefixWarning, ("extension " + TString(extensions[i]) + " is being used for " + featureDesc).c_str(), loc); + warned = true; + } + } + if (warned) + return true; + return false; +} + +// +// Use when there are no profile/version to check, it's just an error if one of the +// extensions is not present. +// +void TParseVersions::requireExtensions(const TSourceLoc& loc, int numExtensions, const char* const extensions[], const char* featureDesc) +{ + if (checkExtensionsRequested(loc, numExtensions, extensions, featureDesc)) + return; + + // If we get this far, give errors explaining what extensions are needed + if (numExtensions == 1) + error(loc, "required extension not requested:", featureDesc, extensions[0]); + else { + error(loc, "required extension not requested:", featureDesc, "Possible extensions include:"); + for (int i = 0; i < numExtensions; ++i) + infoSink.info.message(EPrefixNone, extensions[i]); + } +} + +// +// Use by preprocessor when there are no profile/version to check, it's just an error if one of the +// extensions is not present. +// +void TParseVersions::ppRequireExtensions(const TSourceLoc& loc, int numExtensions, const char* const extensions[], const char* featureDesc) +{ + if (checkExtensionsRequested(loc, numExtensions, extensions, featureDesc)) + return; + + // If we get this far, give errors explaining what extensions are needed + if (numExtensions == 1) + ppError(loc, "required extension not requested:", featureDesc, extensions[0]); + else { + ppError(loc, "required extension not requested:", featureDesc, "Possible extensions include:"); + for (int i = 0; i < numExtensions; ++i) + infoSink.info.message(EPrefixNone, extensions[i]); + } +} + +TExtensionBehavior TParseVersions::getExtensionBehavior(const char* extension) +{ + auto iter = extensionBehavior.find(TString(extension)); + if (iter == extensionBehavior.end()) + return EBhMissing; + else + return iter->second; +} + +// Returns true if the given extension is set to enable, require, or warn. +bool TParseVersions::extensionTurnedOn(const char* const extension) +{ + switch (getExtensionBehavior(extension)) { + case EBhEnable: + case EBhRequire: + case EBhWarn: + return true; + default: + break; + } + return false; +} +// See if any of the extensions are set to enable, require, or warn. +bool TParseVersions::extensionsTurnedOn(int numExtensions, const char* const extensions[]) +{ + for (int i = 0; i < numExtensions; ++i) { + if (extensionTurnedOn(extensions[i])) + return true; + } + return false; +} + +// +// Change the current state of an extension's behavior. +// +void TParseVersions::updateExtensionBehavior(int line, const char* extension, const char* behaviorString) +{ + // Translate from text string of extension's behavior to an enum. + TExtensionBehavior behavior = EBhDisable; + if (! strcmp("require", behaviorString)) + behavior = EBhRequire; + else if (! strcmp("enable", behaviorString)) + behavior = EBhEnable; + else if (! strcmp("disable", behaviorString)) + behavior = EBhDisable; + else if (! strcmp("warn", behaviorString)) + behavior = EBhWarn; + else { + error(getCurrentLoc(), "behavior not supported:", "#extension", behaviorString); + return; + } + + // check if extension is used with correct shader stage + checkExtensionStage(getCurrentLoc(), extension); + + // update the requested extension + updateExtensionBehavior(extension, behavior); + + // see if need to propagate to implicitly modified things + if (strcmp(extension, "GL_ANDROID_extension_pack_es31a") == 0) { + // to everything in AEP + updateExtensionBehavior(line, "GL_KHR_blend_equation_advanced", behaviorString); + updateExtensionBehavior(line, "GL_OES_sample_variables", behaviorString); + updateExtensionBehavior(line, "GL_OES_shader_image_atomic", behaviorString); + updateExtensionBehavior(line, "GL_OES_shader_multisample_interpolation", behaviorString); + updateExtensionBehavior(line, "GL_OES_texture_storage_multisample_2d_array", behaviorString); + updateExtensionBehavior(line, "GL_EXT_geometry_shader", behaviorString); + updateExtensionBehavior(line, "GL_EXT_gpu_shader5", behaviorString); + updateExtensionBehavior(line, "GL_EXT_primitive_bounding_box", behaviorString); + updateExtensionBehavior(line, "GL_EXT_shader_io_blocks", behaviorString); + updateExtensionBehavior(line, "GL_EXT_tessellation_shader", behaviorString); + updateExtensionBehavior(line, "GL_EXT_texture_buffer", behaviorString); + updateExtensionBehavior(line, "GL_EXT_texture_cube_map_array", behaviorString); + } + // geometry to io_blocks + else if (strcmp(extension, "GL_EXT_geometry_shader") == 0) + updateExtensionBehavior(line, "GL_EXT_shader_io_blocks", behaviorString); + else if (strcmp(extension, "GL_OES_geometry_shader") == 0) + updateExtensionBehavior(line, "GL_OES_shader_io_blocks", behaviorString); + // tessellation to io_blocks + else if (strcmp(extension, "GL_EXT_tessellation_shader") == 0) + updateExtensionBehavior(line, "GL_EXT_shader_io_blocks", behaviorString); + else if (strcmp(extension, "GL_OES_tessellation_shader") == 0) + updateExtensionBehavior(line, "GL_OES_shader_io_blocks", behaviorString); + else if (strcmp(extension, "GL_GOOGLE_include_directive") == 0) + updateExtensionBehavior(line, "GL_GOOGLE_cpp_style_line_directive", behaviorString); + // subgroup_* to subgroup_basic + else if (strcmp(extension, "GL_KHR_shader_subgroup_vote") == 0) + updateExtensionBehavior(line, "GL_KHR_shader_subgroup_basic", behaviorString); + else if (strcmp(extension, "GL_KHR_shader_subgroup_arithmetic") == 0) + updateExtensionBehavior(line, "GL_KHR_shader_subgroup_basic", behaviorString); + else if (strcmp(extension, "GL_KHR_shader_subgroup_ballot") == 0) + updateExtensionBehavior(line, "GL_KHR_shader_subgroup_basic", behaviorString); + else if (strcmp(extension, "GL_KHR_shader_subgroup_shuffle") == 0) + updateExtensionBehavior(line, "GL_KHR_shader_subgroup_basic", behaviorString); + else if (strcmp(extension, "GL_KHR_shader_subgroup_shuffle_relative") == 0) + updateExtensionBehavior(line, "GL_KHR_shader_subgroup_basic", behaviorString); + else if (strcmp(extension, "GL_KHR_shader_subgroup_clustered") == 0) + updateExtensionBehavior(line, "GL_KHR_shader_subgroup_basic", behaviorString); + else if (strcmp(extension, "GL_KHR_shader_subgroup_quad") == 0) + updateExtensionBehavior(line, "GL_KHR_shader_subgroup_basic", behaviorString); + else if (strcmp(extension, "GL_NV_shader_subgroup_partitioned") == 0) + updateExtensionBehavior(line, "GL_KHR_shader_subgroup_basic", behaviorString); + else if (strcmp(extension, "GL_EXT_buffer_reference2") == 0 || + strcmp(extension, "GL_EXT_buffer_reference_uvec2") == 0) + updateExtensionBehavior(line, "GL_EXT_buffer_reference", behaviorString); + else if (strcmp(extension, "GL_NV_integer_cooperative_matrix") == 0) + updateExtensionBehavior(line, "GL_NV_cooperative_matrix", behaviorString); + // subgroup extended types to explicit types + else if (strcmp(extension, "GL_EXT_shader_subgroup_extended_types_int8") == 0) + updateExtensionBehavior(line, "GL_EXT_shader_explicit_arithmetic_types_int8", behaviorString); + else if (strcmp(extension, "GL_EXT_shader_subgroup_extended_types_int16") == 0) + updateExtensionBehavior(line, "GL_EXT_shader_explicit_arithmetic_types_int16", behaviorString); + else if (strcmp(extension, "GL_EXT_shader_subgroup_extended_types_int64") == 0) + updateExtensionBehavior(line, "GL_EXT_shader_explicit_arithmetic_types_int64", behaviorString); + else if (strcmp(extension, "GL_EXT_shader_subgroup_extended_types_float16") == 0) + updateExtensionBehavior(line, "GL_EXT_shader_explicit_arithmetic_types_float16", behaviorString); +} + +void TParseVersions::updateExtensionBehavior(const char* extension, TExtensionBehavior behavior) +{ + // Update the current behavior + if (strcmp(extension, "all") == 0) { + // special case for the 'all' extension; apply it to every extension present + if (behavior == EBhRequire || behavior == EBhEnable) { + error(getCurrentLoc(), "extension 'all' cannot have 'require' or 'enable' behavior", "#extension", ""); + return; + } else { + for (auto iter = extensionBehavior.begin(); iter != extensionBehavior.end(); ++iter) + iter->second = behavior; + } + } else { + // Do the update for this single extension + auto iter = extensionBehavior.find(TString(extension)); + if (iter == extensionBehavior.end()) { + switch (behavior) { + case EBhRequire: + error(getCurrentLoc(), "extension not supported:", "#extension", extension); + break; + case EBhEnable: + case EBhWarn: + case EBhDisable: + warn(getCurrentLoc(), "extension not supported:", "#extension", extension); + break; + default: + assert(0 && "unexpected behavior"); + } + + return; + } else { + if (iter->second == EBhDisablePartial) + warn(getCurrentLoc(), "extension is only partially supported:", "#extension", extension); + if (behavior == EBhEnable || behavior == EBhRequire || behavior == EBhDisable) + intermediate.updateRequestedExtension(extension, behavior); + iter->second = behavior; + } + } +} + +// Check if extension is used with correct shader stage. +void TParseVersions::checkExtensionStage(const TSourceLoc& loc, const char * const extension) +{ + // GL_NV_mesh_shader extension is only allowed in task/mesh shaders + if (strcmp(extension, "GL_NV_mesh_shader") == 0) { + requireStage(loc, (EShLanguageMask)(EShLangTaskNVMask | EShLangMeshNVMask | EShLangFragmentMask), + "#extension GL_NV_mesh_shader"); + profileRequires(loc, ECoreProfile, 450, 0, "#extension GL_NV_mesh_shader"); + profileRequires(loc, EEsProfile, 320, 0, "#extension GL_NV_mesh_shader"); + } +} + +// Call for any operation needing full GLSL integer data-type support. +void TParseVersions::fullIntegerCheck(const TSourceLoc& loc, const char* op) +{ + profileRequires(loc, ENoProfile, 130, nullptr, op); + profileRequires(loc, EEsProfile, 300, nullptr, op); +} + +// Call for any operation needing GLSL double data-type support. +void TParseVersions::doubleCheck(const TSourceLoc& loc, const char* op) +{ + + //requireProfile(loc, ECoreProfile | ECompatibilityProfile, op); + if (language == EShLangVertex) { + const char* const f64_Extensions[] = {E_GL_ARB_gpu_shader_fp64, E_GL_ARB_vertex_attrib_64bit}; + profileRequires(loc, ECoreProfile | ECompatibilityProfile, 400, 2, f64_Extensions, op); + } else + profileRequires(loc, ECoreProfile | ECompatibilityProfile, 400, E_GL_ARB_gpu_shader_fp64, op); +} + +// Call for any operation needing GLSL float16 data-type support. +void TParseVersions::float16Check(const TSourceLoc& loc, const char* op, bool builtIn) +{ + if (!builtIn) { + const char* const extensions[] = { + E_GL_AMD_gpu_shader_half_float, + E_GL_EXT_shader_explicit_arithmetic_types, + E_GL_EXT_shader_explicit_arithmetic_types_float16}; + requireExtensions(loc, sizeof(extensions)/sizeof(extensions[0]), extensions, op); + } +} + +bool TParseVersions::float16Arithmetic() +{ + const char* const extensions[] = { + E_GL_AMD_gpu_shader_half_float, + E_GL_EXT_shader_explicit_arithmetic_types, + E_GL_EXT_shader_explicit_arithmetic_types_float16}; + return extensionsTurnedOn(sizeof(extensions)/sizeof(extensions[0]), extensions); +} + +bool TParseVersions::int16Arithmetic() +{ + const char* const extensions[] = { + E_GL_AMD_gpu_shader_int16, + E_GL_EXT_shader_explicit_arithmetic_types, + E_GL_EXT_shader_explicit_arithmetic_types_int16}; + return extensionsTurnedOn(sizeof(extensions)/sizeof(extensions[0]), extensions); +} + +bool TParseVersions::int8Arithmetic() +{ + const char* const extensions[] = { + E_GL_EXT_shader_explicit_arithmetic_types, + E_GL_EXT_shader_explicit_arithmetic_types_int8}; + return extensionsTurnedOn(sizeof(extensions)/sizeof(extensions[0]), extensions); +} + +void TParseVersions::requireFloat16Arithmetic(const TSourceLoc& loc, const char* op, const char* featureDesc) +{ + TString combined; + combined = op; + combined += ": "; + combined += featureDesc; + + const char* const extensions[] = { + E_GL_AMD_gpu_shader_half_float, + E_GL_EXT_shader_explicit_arithmetic_types, + E_GL_EXT_shader_explicit_arithmetic_types_float16}; + requireExtensions(loc, sizeof(extensions)/sizeof(extensions[0]), extensions, combined.c_str()); +} + +void TParseVersions::requireInt16Arithmetic(const TSourceLoc& loc, const char* op, const char* featureDesc) +{ + TString combined; + combined = op; + combined += ": "; + combined += featureDesc; + + const char* const extensions[] = { + E_GL_AMD_gpu_shader_int16, + E_GL_EXT_shader_explicit_arithmetic_types, + E_GL_EXT_shader_explicit_arithmetic_types_int16}; + requireExtensions(loc, sizeof(extensions)/sizeof(extensions[0]), extensions, combined.c_str()); +} + +void TParseVersions::requireInt8Arithmetic(const TSourceLoc& loc, const char* op, const char* featureDesc) +{ + TString combined; + combined = op; + combined += ": "; + combined += featureDesc; + + const char* const extensions[] = { + E_GL_EXT_shader_explicit_arithmetic_types, + E_GL_EXT_shader_explicit_arithmetic_types_int8}; + requireExtensions(loc, sizeof(extensions)/sizeof(extensions[0]), extensions, combined.c_str()); +} + +void TParseVersions::float16ScalarVectorCheck(const TSourceLoc& loc, const char* op, bool builtIn) +{ + if (!builtIn) { + const char* const extensions[] = { + E_GL_AMD_gpu_shader_half_float, + E_GL_EXT_shader_16bit_storage, + E_GL_EXT_shader_explicit_arithmetic_types, + E_GL_EXT_shader_explicit_arithmetic_types_float16}; + requireExtensions(loc, sizeof(extensions)/sizeof(extensions[0]), extensions, op); + } +} + +// Call for any operation needing GLSL float32 data-type support. +void TParseVersions::explicitFloat32Check(const TSourceLoc& loc, const char* op, bool builtIn) +{ + if (!builtIn) { + const char* const extensions[2] = {E_GL_EXT_shader_explicit_arithmetic_types, + E_GL_EXT_shader_explicit_arithmetic_types_float32}; + requireExtensions(loc, 2, extensions, op); + } +} + +// Call for any operation needing GLSL float64 data-type support. +void TParseVersions::explicitFloat64Check(const TSourceLoc& loc, const char* op, bool builtIn) +{ + if (!builtIn) { + const char* const extensions[2] = {E_GL_EXT_shader_explicit_arithmetic_types, + E_GL_EXT_shader_explicit_arithmetic_types_float64}; + requireExtensions(loc, 2, extensions, op); + requireProfile(loc, ECoreProfile | ECompatibilityProfile, op); + profileRequires(loc, ECoreProfile | ECompatibilityProfile, 400, nullptr, op); + } +} + +// Call for any operation needing GLSL explicit int8 data-type support. +void TParseVersions::explicitInt8Check(const TSourceLoc& loc, const char* op, bool builtIn) +{ + if (! builtIn) { + const char* const extensions[2] = {E_GL_EXT_shader_explicit_arithmetic_types, + E_GL_EXT_shader_explicit_arithmetic_types_int8}; + requireExtensions(loc, 2, extensions, op); + } +} + +// Call for any operation needing GLSL float16 opaque-type support +void TParseVersions::float16OpaqueCheck(const TSourceLoc& loc, const char* op, bool builtIn) +{ + if (! builtIn) { + requireExtensions(loc, 1, &E_GL_AMD_gpu_shader_half_float_fetch, op); + requireProfile(loc, ECoreProfile | ECompatibilityProfile, op); + profileRequires(loc, ECoreProfile | ECompatibilityProfile, 400, nullptr, op); + } +} + +// Call for any operation needing GLSL explicit int16 data-type support. +void TParseVersions::explicitInt16Check(const TSourceLoc& loc, const char* op, bool builtIn) +{ + if (! builtIn) { + const char* const extensions[] = { + E_GL_AMD_gpu_shader_int16, + E_GL_EXT_shader_explicit_arithmetic_types, + E_GL_EXT_shader_explicit_arithmetic_types_int16}; + requireExtensions(loc, sizeof(extensions)/sizeof(extensions[0]), extensions, op); + } +} + +void TParseVersions::int16ScalarVectorCheck(const TSourceLoc& loc, const char* op, bool builtIn) +{ + if (! builtIn) { + const char* const extensions[] = { + E_GL_AMD_gpu_shader_int16, + E_GL_EXT_shader_16bit_storage, + E_GL_EXT_shader_explicit_arithmetic_types, + E_GL_EXT_shader_explicit_arithmetic_types_int16}; + requireExtensions(loc, sizeof(extensions)/sizeof(extensions[0]), extensions, op); + } +} + +void TParseVersions::int8ScalarVectorCheck(const TSourceLoc& loc, const char* op, bool builtIn) +{ + if (! builtIn) { + const char* const extensions[] = { + E_GL_EXT_shader_8bit_storage, + E_GL_EXT_shader_explicit_arithmetic_types, + E_GL_EXT_shader_explicit_arithmetic_types_int8}; + requireExtensions(loc, sizeof(extensions)/sizeof(extensions[0]), extensions, op); + } +} + +// Call for any operation needing GLSL explicit int32 data-type support. +void TParseVersions::explicitInt32Check(const TSourceLoc& loc, const char* op, bool builtIn) +{ + if (! builtIn) { + const char* const extensions[2] = {E_GL_EXT_shader_explicit_arithmetic_types, + E_GL_EXT_shader_explicit_arithmetic_types_int32}; + requireExtensions(loc, 2, extensions, op); + } +} + +// Call for any operation needing GLSL 64-bit integer data-type support. +void TParseVersions::int64Check(const TSourceLoc& loc, const char* op, bool builtIn) +{ + if (! builtIn) { + const char* const extensions[3] = {E_GL_ARB_gpu_shader_int64, + E_GL_EXT_shader_explicit_arithmetic_types, + E_GL_EXT_shader_explicit_arithmetic_types_int64}; + requireExtensions(loc, 3, extensions, op); + requireProfile(loc, ECoreProfile | ECompatibilityProfile, op); + profileRequires(loc, ECoreProfile | ECompatibilityProfile, 400, nullptr, op); + } +} + +void TParseVersions::fcoopmatCheck(const TSourceLoc& loc, const char* op, bool builtIn) +{ + if (!builtIn) { + const char* const extensions[] = {E_GL_NV_cooperative_matrix}; + requireExtensions(loc, sizeof(extensions)/sizeof(extensions[0]), extensions, op); + } +} + +void TParseVersions::intcoopmatCheck(const TSourceLoc& loc, const char* op, bool builtIn) +{ + if (!builtIn) { + const char* const extensions[] = {E_GL_NV_integer_cooperative_matrix}; + requireExtensions(loc, sizeof(extensions)/sizeof(extensions[0]), extensions, op); + } +} +#endif // GLSLANG_WEB +// Call for any operation removed because SPIR-V is in use. +void TParseVersions::spvRemoved(const TSourceLoc& loc, const char* op) +{ + if (spvVersion.spv != 0) + error(loc, "not allowed when generating SPIR-V", op, ""); +} + +// Call for any operation removed because Vulkan SPIR-V is being generated. +void TParseVersions::vulkanRemoved(const TSourceLoc& loc, const char* op) +{ + if (spvVersion.vulkan > 0) + error(loc, "not allowed when using GLSL for Vulkan", op, ""); +} + +// Call for any operation that requires Vulkan. +void TParseVersions::requireVulkan(const TSourceLoc& loc, const char* op) +{ +#ifndef GLSLANG_WEB + if (spvVersion.vulkan == 0) + error(loc, "only allowed when using GLSL for Vulkan", op, ""); +#endif +} + +// Call for any operation that requires SPIR-V. +void TParseVersions::requireSpv(const TSourceLoc& loc, const char* op) +{ +#ifndef GLSLANG_WEB + if (spvVersion.spv == 0) + error(loc, "only allowed when generating SPIR-V", op, ""); +#endif +} + +} // end namespace glslang diff --git a/dep/glslang/glslang/MachineIndependent/Versions.h b/dep/glslang/glslang/MachineIndependent/Versions.h new file mode 100644 index 000000000..c128d4642 --- /dev/null +++ b/dep/glslang/glslang/MachineIndependent/Versions.h @@ -0,0 +1,331 @@ +// +// Copyright (C) 2002-2005 3Dlabs Inc. Ltd. +// Copyright (C) 2012-2013 LunarG, Inc. +// Copyright (C) 2017 ARM Limited. +// Copyright (C) 2015-2018 Google, Inc. +// Modifications Copyright (C) 2020 Advanced Micro Devices, Inc. All rights reserved. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// +// Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// +// Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// Neither the name of 3Dlabs Inc. Ltd. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// +#ifndef _VERSIONS_INCLUDED_ +#define _VERSIONS_INCLUDED_ + +#define LAST_ELEMENT_MARKER(x) x + +// +// Help manage multiple profiles, versions, extensions etc. +// + +// +// Profiles are set up for masking operations, so queries can be done on multiple +// profiles at the same time. +// +// Don't maintain an ordinal set of enums (0,1,2,3...) to avoid all possible +// defects from mixing the two different forms. +// +typedef enum : unsigned { + EBadProfile = 0, + ENoProfile = (1 << 0), // only for desktop, before profiles showed up + ECoreProfile = (1 << 1), + ECompatibilityProfile = (1 << 2), + EEsProfile = (1 << 3), + LAST_ELEMENT_MARKER(EProfileCount), +} EProfile; + +namespace glslang { + +// +// Map from profile enum to externally readable text name. +// +inline const char* ProfileName(EProfile profile) +{ + switch (profile) { + case ENoProfile: return "none"; + case ECoreProfile: return "core"; + case ECompatibilityProfile: return "compatibility"; + case EEsProfile: return "es"; + default: return "unknown profile"; + } +} + +// +// What source rules, validation rules, target language, etc. are needed or +// desired for SPIR-V? +// +// 0 means a target or rule set is not enabled (ignore rules from that entity). +// Non-0 means to apply semantic rules arising from that version of its rule set. +// The union of all requested rule sets will be applied. +// +struct SpvVersion { + SpvVersion() : spv(0), vulkanGlsl(0), vulkan(0), openGl(0) {} + unsigned int spv; // the version of SPIR-V to target, as defined by "word 1" of the SPIR-V binary header + int vulkanGlsl; // the version of GLSL semantics for Vulkan, from GL_KHR_vulkan_glsl, for "#define VULKAN XXX" + int vulkan; // the version of Vulkan, for which SPIR-V execution environment rules to use + int openGl; // the version of GLSL semantics for OpenGL, from GL_ARB_gl_spirv, for "#define GL_SPIRV XXX" +}; + +// +// The behaviors from the GLSL "#extension extension_name : behavior" +// +typedef enum { + EBhMissing = 0, + EBhRequire, + EBhEnable, + EBhWarn, + EBhDisable, + EBhDisablePartial // use as initial state of an extension that is only partially implemented +} TExtensionBehavior; + +// +// Symbolic names for extensions. Strings may be directly used when calling the +// functions, but better to have the compiler do spelling checks. +// +const char* const E_GL_OES_texture_3D = "GL_OES_texture_3D"; +const char* const E_GL_OES_standard_derivatives = "GL_OES_standard_derivatives"; +const char* const E_GL_EXT_frag_depth = "GL_EXT_frag_depth"; +const char* const E_GL_OES_EGL_image_external = "GL_OES_EGL_image_external"; +const char* const E_GL_OES_EGL_image_external_essl3 = "GL_OES_EGL_image_external_essl3"; +const char* const E_GL_EXT_YUV_target = "GL_EXT_YUV_target"; +const char* const E_GL_EXT_shader_texture_lod = "GL_EXT_shader_texture_lod"; +const char* const E_GL_EXT_shadow_samplers = "GL_EXT_shadow_samplers"; + +const char* const E_GL_ARB_texture_rectangle = "GL_ARB_texture_rectangle"; +const char* const E_GL_3DL_array_objects = "GL_3DL_array_objects"; +const char* const E_GL_ARB_shading_language_420pack = "GL_ARB_shading_language_420pack"; +const char* const E_GL_ARB_texture_gather = "GL_ARB_texture_gather"; +const char* const E_GL_ARB_gpu_shader5 = "GL_ARB_gpu_shader5"; +const char* const E_GL_ARB_separate_shader_objects = "GL_ARB_separate_shader_objects"; +const char* const E_GL_ARB_compute_shader = "GL_ARB_compute_shader"; +const char* const E_GL_ARB_tessellation_shader = "GL_ARB_tessellation_shader"; +const char* const E_GL_ARB_enhanced_layouts = "GL_ARB_enhanced_layouts"; +const char* const E_GL_ARB_texture_cube_map_array = "GL_ARB_texture_cube_map_array"; +const char* const E_GL_ARB_texture_multisample = "GL_ARB_texture_multisample"; +const char* const E_GL_ARB_shader_texture_lod = "GL_ARB_shader_texture_lod"; +const char* const E_GL_ARB_explicit_attrib_location = "GL_ARB_explicit_attrib_location"; +const char* const E_GL_ARB_explicit_uniform_location = "GL_ARB_explicit_uniform_location"; +const char* const E_GL_ARB_shader_image_load_store = "GL_ARB_shader_image_load_store"; +const char* const E_GL_ARB_shader_atomic_counters = "GL_ARB_shader_atomic_counters"; +const char* const E_GL_ARB_shader_draw_parameters = "GL_ARB_shader_draw_parameters"; +const char* const E_GL_ARB_shader_group_vote = "GL_ARB_shader_group_vote"; +const char* const E_GL_ARB_derivative_control = "GL_ARB_derivative_control"; +const char* const E_GL_ARB_shader_texture_image_samples = "GL_ARB_shader_texture_image_samples"; +const char* const E_GL_ARB_viewport_array = "GL_ARB_viewport_array"; +const char* const E_GL_ARB_gpu_shader_int64 = "GL_ARB_gpu_shader_int64"; +const char* const E_GL_ARB_gpu_shader_fp64 = "GL_ARB_gpu_shader_fp64"; +const char* const E_GL_ARB_shader_ballot = "GL_ARB_shader_ballot"; +const char* const E_GL_ARB_sparse_texture2 = "GL_ARB_sparse_texture2"; +const char* const E_GL_ARB_sparse_texture_clamp = "GL_ARB_sparse_texture_clamp"; +const char* const E_GL_ARB_shader_stencil_export = "GL_ARB_shader_stencil_export"; +// const char* const E_GL_ARB_cull_distance = "GL_ARB_cull_distance"; // present for 4.5, but need extension control over block members +const char* const E_GL_ARB_post_depth_coverage = "GL_ARB_post_depth_coverage"; +const char* const E_GL_ARB_shader_viewport_layer_array = "GL_ARB_shader_viewport_layer_array"; +const char* const E_GL_ARB_fragment_shader_interlock = "GL_ARB_fragment_shader_interlock"; +const char* const E_GL_ARB_shader_clock = "GL_ARB_shader_clock"; +const char* const E_GL_ARB_uniform_buffer_object = "GL_ARB_uniform_buffer_object"; +const char* const E_GL_ARB_sample_shading = "GL_ARB_sample_shading"; +const char* const E_GL_ARB_shader_bit_encoding = "GL_ARB_shader_bit_encoding"; +const char* const E_GL_ARB_shader_image_size = "GL_ARB_shader_image_size"; +const char* const E_GL_ARB_shader_storage_buffer_object = "GL_ARB_shader_storage_buffer_object"; +const char* const E_GL_ARB_shading_language_packing = "GL_ARB_shading_language_packing"; +const char* const E_GL_ARB_texture_query_lod = "GL_ARB_texture_query_lod"; +const char* const E_GL_ARB_vertex_attrib_64bit = "GL_ARB_vertex_attrib_64bit"; + +const char* const E_GL_KHR_shader_subgroup_basic = "GL_KHR_shader_subgroup_basic"; +const char* const E_GL_KHR_shader_subgroup_vote = "GL_KHR_shader_subgroup_vote"; +const char* const E_GL_KHR_shader_subgroup_arithmetic = "GL_KHR_shader_subgroup_arithmetic"; +const char* const E_GL_KHR_shader_subgroup_ballot = "GL_KHR_shader_subgroup_ballot"; +const char* const E_GL_KHR_shader_subgroup_shuffle = "GL_KHR_shader_subgroup_shuffle"; +const char* const E_GL_KHR_shader_subgroup_shuffle_relative = "GL_KHR_shader_subgroup_shuffle_relative"; +const char* const E_GL_KHR_shader_subgroup_clustered = "GL_KHR_shader_subgroup_clustered"; +const char* const E_GL_KHR_shader_subgroup_quad = "GL_KHR_shader_subgroup_quad"; +const char* const E_GL_KHR_memory_scope_semantics = "GL_KHR_memory_scope_semantics"; + +const char* const E_GL_EXT_shader_atomic_int64 = "GL_EXT_shader_atomic_int64"; + +const char* const E_GL_EXT_shader_non_constant_global_initializers = "GL_EXT_shader_non_constant_global_initializers"; +const char* const E_GL_EXT_shader_image_load_formatted = "GL_EXT_shader_image_load_formatted"; + +const char* const E_GL_EXT_shader_16bit_storage = "GL_EXT_shader_16bit_storage"; +const char* const E_GL_EXT_shader_8bit_storage = "GL_EXT_shader_8bit_storage"; + + +// EXT extensions +const char* const E_GL_EXT_device_group = "GL_EXT_device_group"; +const char* const E_GL_EXT_multiview = "GL_EXT_multiview"; +const char* const E_GL_EXT_post_depth_coverage = "GL_EXT_post_depth_coverage"; +const char* const E_GL_EXT_control_flow_attributes = "GL_EXT_control_flow_attributes"; +const char* const E_GL_EXT_nonuniform_qualifier = "GL_EXT_nonuniform_qualifier"; +const char* const E_GL_EXT_samplerless_texture_functions = "GL_EXT_samplerless_texture_functions"; +const char* const E_GL_EXT_scalar_block_layout = "GL_EXT_scalar_block_layout"; +const char* const E_GL_EXT_fragment_invocation_density = "GL_EXT_fragment_invocation_density"; +const char* const E_GL_EXT_buffer_reference = "GL_EXT_buffer_reference"; +const char* const E_GL_EXT_buffer_reference2 = "GL_EXT_buffer_reference2"; +const char* const E_GL_EXT_buffer_reference_uvec2 = "GL_EXT_buffer_reference_uvec2"; +const char* const E_GL_EXT_demote_to_helper_invocation = "GL_EXT_demote_to_helper_invocation"; +const char* const E_GL_EXT_shader_realtime_clock = "GL_EXT_shader_realtime_clock"; +const char* const E_GL_EXT_debug_printf = "GL_EXT_debug_printf"; +const char* const E_GL_EXT_ray_tracing = "GL_EXT_ray_tracing"; +const char* const E_GL_EXT_ray_query = "GL_EXT_ray_query"; +const char* const E_GL_EXT_ray_flags_primitive_culling = "GL_EXT_ray_flags_primitive_culling"; +const char* const E_GL_EXT_blend_func_extended = "GL_EXT_blend_func_extended"; +const char* const E_GL_EXT_shader_implicit_conversions = "GL_EXT_shader_implicit_conversions"; + +// Arrays of extensions for the above viewportEXTs duplications + +const char* const post_depth_coverageEXTs[] = { E_GL_ARB_post_depth_coverage, E_GL_EXT_post_depth_coverage }; +const int Num_post_depth_coverageEXTs = sizeof(post_depth_coverageEXTs) / sizeof(post_depth_coverageEXTs[0]); + +// OVR extensions +const char* const E_GL_OVR_multiview = "GL_OVR_multiview"; +const char* const E_GL_OVR_multiview2 = "GL_OVR_multiview2"; + +const char* const OVR_multiview_EXTs[] = { E_GL_OVR_multiview, E_GL_OVR_multiview2 }; +const int Num_OVR_multiview_EXTs = sizeof(OVR_multiview_EXTs) / sizeof(OVR_multiview_EXTs[0]); + +// #line and #include +const char* const E_GL_GOOGLE_cpp_style_line_directive = "GL_GOOGLE_cpp_style_line_directive"; +const char* const E_GL_GOOGLE_include_directive = "GL_GOOGLE_include_directive"; + +const char* const E_GL_AMD_shader_ballot = "GL_AMD_shader_ballot"; +const char* const E_GL_AMD_shader_trinary_minmax = "GL_AMD_shader_trinary_minmax"; +const char* const E_GL_AMD_shader_explicit_vertex_parameter = "GL_AMD_shader_explicit_vertex_parameter"; +const char* const E_GL_AMD_gcn_shader = "GL_AMD_gcn_shader"; +const char* const E_GL_AMD_gpu_shader_half_float = "GL_AMD_gpu_shader_half_float"; +const char* const E_GL_AMD_texture_gather_bias_lod = "GL_AMD_texture_gather_bias_lod"; +const char* const E_GL_AMD_gpu_shader_int16 = "GL_AMD_gpu_shader_int16"; +const char* const E_GL_AMD_shader_image_load_store_lod = "GL_AMD_shader_image_load_store_lod"; +const char* const E_GL_AMD_shader_fragment_mask = "GL_AMD_shader_fragment_mask"; +const char* const E_GL_AMD_gpu_shader_half_float_fetch = "GL_AMD_gpu_shader_half_float_fetch"; + +const char* const E_GL_INTEL_shader_integer_functions2 = "GL_INTEL_shader_integer_functions2"; + +const char* const E_GL_NV_sample_mask_override_coverage = "GL_NV_sample_mask_override_coverage"; +const char* const E_SPV_NV_geometry_shader_passthrough = "GL_NV_geometry_shader_passthrough"; +const char* const E_GL_NV_viewport_array2 = "GL_NV_viewport_array2"; +const char* const E_GL_NV_stereo_view_rendering = "GL_NV_stereo_view_rendering"; +const char* const E_GL_NVX_multiview_per_view_attributes = "GL_NVX_multiview_per_view_attributes"; +const char* const E_GL_NV_shader_atomic_int64 = "GL_NV_shader_atomic_int64"; +const char* const E_GL_NV_conservative_raster_underestimation = "GL_NV_conservative_raster_underestimation"; +const char* const E_GL_NV_shader_noperspective_interpolation = "GL_NV_shader_noperspective_interpolation"; +const char* const E_GL_NV_shader_subgroup_partitioned = "GL_NV_shader_subgroup_partitioned"; +const char* const E_GL_NV_shading_rate_image = "GL_NV_shading_rate_image"; +const char* const E_GL_NV_ray_tracing = "GL_NV_ray_tracing"; +const char* const E_GL_NV_fragment_shader_barycentric = "GL_NV_fragment_shader_barycentric"; +const char* const E_GL_NV_compute_shader_derivatives = "GL_NV_compute_shader_derivatives"; +const char* const E_GL_NV_shader_texture_footprint = "GL_NV_shader_texture_footprint"; +const char* const E_GL_NV_mesh_shader = "GL_NV_mesh_shader"; + +// Arrays of extensions for the above viewportEXTs duplications + +const char* const viewportEXTs[] = { E_GL_ARB_shader_viewport_layer_array, E_GL_NV_viewport_array2 }; +const int Num_viewportEXTs = sizeof(viewportEXTs) / sizeof(viewportEXTs[0]); + +const char* const E_GL_NV_cooperative_matrix = "GL_NV_cooperative_matrix"; +const char* const E_GL_NV_shader_sm_builtins = "GL_NV_shader_sm_builtins"; +const char* const E_GL_NV_integer_cooperative_matrix = "GL_NV_integer_cooperative_matrix"; + +// AEP +const char* const E_GL_ANDROID_extension_pack_es31a = "GL_ANDROID_extension_pack_es31a"; +const char* const E_GL_KHR_blend_equation_advanced = "GL_KHR_blend_equation_advanced"; +const char* const E_GL_OES_sample_variables = "GL_OES_sample_variables"; +const char* const E_GL_OES_shader_image_atomic = "GL_OES_shader_image_atomic"; +const char* const E_GL_OES_shader_multisample_interpolation = "GL_OES_shader_multisample_interpolation"; +const char* const E_GL_OES_texture_storage_multisample_2d_array = "GL_OES_texture_storage_multisample_2d_array"; +const char* const E_GL_EXT_geometry_shader = "GL_EXT_geometry_shader"; +const char* const E_GL_EXT_geometry_point_size = "GL_EXT_geometry_point_size"; +const char* const E_GL_EXT_gpu_shader5 = "GL_EXT_gpu_shader5"; +const char* const E_GL_EXT_primitive_bounding_box = "GL_EXT_primitive_bounding_box"; +const char* const E_GL_EXT_shader_io_blocks = "GL_EXT_shader_io_blocks"; +const char* const E_GL_EXT_tessellation_shader = "GL_EXT_tessellation_shader"; +const char* const E_GL_EXT_tessellation_point_size = "GL_EXT_tessellation_point_size"; +const char* const E_GL_EXT_texture_buffer = "GL_EXT_texture_buffer"; +const char* const E_GL_EXT_texture_cube_map_array = "GL_EXT_texture_cube_map_array"; +const char* const E_GL_EXT_shader_integer_mix = "GL_EXT_shader_integer_mix"; + +// OES matching AEP +const char* const E_GL_OES_geometry_shader = "GL_OES_geometry_shader"; +const char* const E_GL_OES_geometry_point_size = "GL_OES_geometry_point_size"; +const char* const E_GL_OES_gpu_shader5 = "GL_OES_gpu_shader5"; +const char* const E_GL_OES_primitive_bounding_box = "GL_OES_primitive_bounding_box"; +const char* const E_GL_OES_shader_io_blocks = "GL_OES_shader_io_blocks"; +const char* const E_GL_OES_tessellation_shader = "GL_OES_tessellation_shader"; +const char* const E_GL_OES_tessellation_point_size = "GL_OES_tessellation_point_size"; +const char* const E_GL_OES_texture_buffer = "GL_OES_texture_buffer"; +const char* const E_GL_OES_texture_cube_map_array = "GL_OES_texture_cube_map_array"; + +// EXT +const char* const E_GL_EXT_shader_explicit_arithmetic_types = "GL_EXT_shader_explicit_arithmetic_types"; +const char* const E_GL_EXT_shader_explicit_arithmetic_types_int8 = "GL_EXT_shader_explicit_arithmetic_types_int8"; +const char* const E_GL_EXT_shader_explicit_arithmetic_types_int16 = "GL_EXT_shader_explicit_arithmetic_types_int16"; +const char* const E_GL_EXT_shader_explicit_arithmetic_types_int32 = "GL_EXT_shader_explicit_arithmetic_types_int32"; +const char* const E_GL_EXT_shader_explicit_arithmetic_types_int64 = "GL_EXT_shader_explicit_arithmetic_types_int64"; +const char* const E_GL_EXT_shader_explicit_arithmetic_types_float16 = "GL_EXT_shader_explicit_arithmetic_types_float16"; +const char* const E_GL_EXT_shader_explicit_arithmetic_types_float32 = "GL_EXT_shader_explicit_arithmetic_types_float32"; +const char* const E_GL_EXT_shader_explicit_arithmetic_types_float64 = "GL_EXT_shader_explicit_arithmetic_types_float64"; + +const char* const E_GL_EXT_shader_subgroup_extended_types_int8 = "GL_EXT_shader_subgroup_extended_types_int8"; +const char* const E_GL_EXT_shader_subgroup_extended_types_int16 = "GL_EXT_shader_subgroup_extended_types_int16"; +const char* const E_GL_EXT_shader_subgroup_extended_types_int64 = "GL_EXT_shader_subgroup_extended_types_int64"; +const char* const E_GL_EXT_shader_subgroup_extended_types_float16 = "GL_EXT_shader_subgroup_extended_types_float16"; + +// Arrays of extensions for the above AEP duplications + +const char* const AEP_geometry_shader[] = { E_GL_EXT_geometry_shader, E_GL_OES_geometry_shader }; +const int Num_AEP_geometry_shader = sizeof(AEP_geometry_shader)/sizeof(AEP_geometry_shader[0]); + +const char* const AEP_geometry_point_size[] = { E_GL_EXT_geometry_point_size, E_GL_OES_geometry_point_size }; +const int Num_AEP_geometry_point_size = sizeof(AEP_geometry_point_size)/sizeof(AEP_geometry_point_size[0]); + +const char* const AEP_gpu_shader5[] = { E_GL_EXT_gpu_shader5, E_GL_OES_gpu_shader5 }; +const int Num_AEP_gpu_shader5 = sizeof(AEP_gpu_shader5)/sizeof(AEP_gpu_shader5[0]); + +const char* const AEP_primitive_bounding_box[] = { E_GL_EXT_primitive_bounding_box, E_GL_OES_primitive_bounding_box }; +const int Num_AEP_primitive_bounding_box = sizeof(AEP_primitive_bounding_box)/sizeof(AEP_primitive_bounding_box[0]); + +const char* const AEP_shader_io_blocks[] = { E_GL_EXT_shader_io_blocks, E_GL_OES_shader_io_blocks }; +const int Num_AEP_shader_io_blocks = sizeof(AEP_shader_io_blocks)/sizeof(AEP_shader_io_blocks[0]); + +const char* const AEP_tessellation_shader[] = { E_GL_EXT_tessellation_shader, E_GL_OES_tessellation_shader }; +const int Num_AEP_tessellation_shader = sizeof(AEP_tessellation_shader)/sizeof(AEP_tessellation_shader[0]); + +const char* const AEP_tessellation_point_size[] = { E_GL_EXT_tessellation_point_size, E_GL_OES_tessellation_point_size }; +const int Num_AEP_tessellation_point_size = sizeof(AEP_tessellation_point_size)/sizeof(AEP_tessellation_point_size[0]); + +const char* const AEP_texture_buffer[] = { E_GL_EXT_texture_buffer, E_GL_OES_texture_buffer }; +const int Num_AEP_texture_buffer = sizeof(AEP_texture_buffer)/sizeof(AEP_texture_buffer[0]); + +const char* const AEP_texture_cube_map_array[] = { E_GL_EXT_texture_cube_map_array, E_GL_OES_texture_cube_map_array }; +const int Num_AEP_texture_cube_map_array = sizeof(AEP_texture_cube_map_array)/sizeof(AEP_texture_cube_map_array[0]); + +} // end namespace glslang + +#endif // _VERSIONS_INCLUDED_ diff --git a/dep/glslang/glslang/MachineIndependent/attribute.cpp b/dep/glslang/glslang/MachineIndependent/attribute.cpp new file mode 100644 index 000000000..958551834 --- /dev/null +++ b/dep/glslang/glslang/MachineIndependent/attribute.cpp @@ -0,0 +1,346 @@ +// +// Copyright (C) 2017 LunarG, Inc. +// Copyright (C) 2018 Google, Inc. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// +// Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// +// Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// Neither the name of Google, Inc., nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// + +#ifndef GLSLANG_WEB + +#include "attribute.h" +#include "../Include/intermediate.h" +#include "ParseHelper.h" + +namespace glslang { + +// extract integers out of attribute arguments stored in attribute aggregate +bool TAttributeArgs::getInt(int& value, int argNum) const +{ + const TConstUnion* intConst = getConstUnion(EbtInt, argNum); + + if (intConst == nullptr) + return false; + + value = intConst->getIConst(); + return true; +} + + +// extract strings out of attribute arguments stored in attribute aggregate. +// convert to lower case if converToLower is true (for case-insensitive compare convenience) +bool TAttributeArgs::getString(TString& value, int argNum, bool convertToLower) const +{ + const TConstUnion* stringConst = getConstUnion(EbtString, argNum); + + if (stringConst == nullptr) + return false; + + value = *stringConst->getSConst(); + + // Convenience. + if (convertToLower) + std::transform(value.begin(), value.end(), value.begin(), ::tolower); + + return true; +} + +// How many arguments were supplied? +int TAttributeArgs::size() const +{ + return args == nullptr ? 0 : (int)args->getSequence().size(); +} + +// Helper to get attribute const union. Returns nullptr on failure. +const TConstUnion* TAttributeArgs::getConstUnion(TBasicType basicType, int argNum) const +{ + if (args == nullptr) + return nullptr; + + if (argNum >= (int)args->getSequence().size()) + return nullptr; + + if (args->getSequence()[argNum]->getAsConstantUnion() == nullptr) + return nullptr; + + const TConstUnion* constVal = &args->getSequence()[argNum]->getAsConstantUnion()->getConstArray()[0]; + if (constVal == nullptr || constVal->getType() != basicType) + return nullptr; + + return constVal; +} + +// Implementation of TParseContext parts of attributes +TAttributeType TParseContext::attributeFromName(const TString& name) const +{ + if (name == "branch" || name == "dont_flatten") + return EatBranch; + else if (name == "flatten") + return EatFlatten; + else if (name == "unroll") + return EatUnroll; + else if (name == "loop" || name == "dont_unroll") + return EatLoop; + else if (name == "dependency_infinite") + return EatDependencyInfinite; + else if (name == "dependency_length") + return EatDependencyLength; + else if (name == "min_iterations") + return EatMinIterations; + else if (name == "max_iterations") + return EatMaxIterations; + else if (name == "iteration_multiple") + return EatIterationMultiple; + else if (name == "peel_count") + return EatPeelCount; + else if (name == "partial_count") + return EatPartialCount; + else + return EatNone; +} + +// Make an initial leaf for the grammar from a no-argument attribute +TAttributes* TParseContext::makeAttributes(const TString& identifier) const +{ + TAttributes *attributes = nullptr; + attributes = NewPoolObject(attributes); + TAttributeArgs args = { attributeFromName(identifier), nullptr }; + attributes->push_back(args); + return attributes; +} + +// Make an initial leaf for the grammar from a one-argument attribute +TAttributes* TParseContext::makeAttributes(const TString& identifier, TIntermNode* node) const +{ + TAttributes *attributes = nullptr; + attributes = NewPoolObject(attributes); + + // for now, node is always a simple single expression, but other code expects + // a list, so make it so + TIntermAggregate* agg = intermediate.makeAggregate(node); + TAttributeArgs args = { attributeFromName(identifier), agg }; + attributes->push_back(args); + return attributes; +} + +// Merge two sets of attributes into a single set. +// The second argument is destructively consumed. +TAttributes* TParseContext::mergeAttributes(TAttributes* attr1, TAttributes* attr2) const +{ + attr1->splice(attr1->end(), *attr2); + return attr1; +} + +// +// Selection attributes +// +void TParseContext::handleSelectionAttributes(const TAttributes& attributes, TIntermNode* node) +{ + TIntermSelection* selection = node->getAsSelectionNode(); + if (selection == nullptr) + return; + + for (auto it = attributes.begin(); it != attributes.end(); ++it) { + if (it->size() > 0) { + warn(node->getLoc(), "attribute with arguments not recognized, skipping", "", ""); + continue; + } + + switch (it->name) { + case EatFlatten: + selection->setFlatten(); + break; + case EatBranch: + selection->setDontFlatten(); + break; + default: + warn(node->getLoc(), "attribute does not apply to a selection", "", ""); + break; + } + } +} + +// +// Switch attributes +// +void TParseContext::handleSwitchAttributes(const TAttributes& attributes, TIntermNode* node) +{ + TIntermSwitch* selection = node->getAsSwitchNode(); + if (selection == nullptr) + return; + + for (auto it = attributes.begin(); it != attributes.end(); ++it) { + if (it->size() > 0) { + warn(node->getLoc(), "attribute with arguments not recognized, skipping", "", ""); + continue; + } + + switch (it->name) { + case EatFlatten: + selection->setFlatten(); + break; + case EatBranch: + selection->setDontFlatten(); + break; + default: + warn(node->getLoc(), "attribute does not apply to a switch", "", ""); + break; + } + } +} + +// +// Loop attributes +// +void TParseContext::handleLoopAttributes(const TAttributes& attributes, TIntermNode* node) +{ + TIntermLoop* loop = node->getAsLoopNode(); + if (loop == nullptr) { + // the actual loop might be part of a sequence + TIntermAggregate* agg = node->getAsAggregate(); + if (agg == nullptr) + return; + for (auto it = agg->getSequence().begin(); it != agg->getSequence().end(); ++it) { + loop = (*it)->getAsLoopNode(); + if (loop != nullptr) + break; + } + if (loop == nullptr) + return; + } + + for (auto it = attributes.begin(); it != attributes.end(); ++it) { + + const auto noArgument = [&](const char* feature) { + if (it->size() > 0) { + warn(node->getLoc(), "expected no arguments", feature, ""); + return false; + } + return true; + }; + + const auto positiveSignedArgument = [&](const char* feature, int& value) { + if (it->size() == 1 && it->getInt(value)) { + if (value <= 0) { + error(node->getLoc(), "must be positive", feature, ""); + return false; + } + } else { + warn(node->getLoc(), "expected a single integer argument", feature, ""); + return false; + } + return true; + }; + + const auto unsignedArgument = [&](const char* feature, unsigned int& uiValue) { + int value; + if (!(it->size() == 1 && it->getInt(value))) { + warn(node->getLoc(), "expected a single integer argument", feature, ""); + return false; + } + uiValue = (unsigned int)value; + return true; + }; + + const auto positiveUnsignedArgument = [&](const char* feature, unsigned int& uiValue) { + int value; + if (it->size() == 1 && it->getInt(value)) { + if (value == 0) { + error(node->getLoc(), "must be greater than or equal to 1", feature, ""); + return false; + } + } else { + warn(node->getLoc(), "expected a single integer argument", feature, ""); + return false; + } + uiValue = (unsigned int)value; + return true; + }; + + const auto spirv14 = [&](const char* feature) { + if (spvVersion.spv > 0 && spvVersion.spv < EShTargetSpv_1_4) + warn(node->getLoc(), "attribute requires a SPIR-V 1.4 target-env", feature, ""); + }; + + int value = 0; + unsigned uiValue = 0; + switch (it->name) { + case EatUnroll: + if (noArgument("unroll")) + loop->setUnroll(); + break; + case EatLoop: + if (noArgument("dont_unroll")) + loop->setDontUnroll(); + break; + case EatDependencyInfinite: + if (noArgument("dependency_infinite")) + loop->setLoopDependency(TIntermLoop::dependencyInfinite); + break; + case EatDependencyLength: + if (positiveSignedArgument("dependency_length", value)) + loop->setLoopDependency(value); + break; + case EatMinIterations: + spirv14("min_iterations"); + if (unsignedArgument("min_iterations", uiValue)) + loop->setMinIterations(uiValue); + break; + case EatMaxIterations: + spirv14("max_iterations"); + if (unsignedArgument("max_iterations", uiValue)) + loop->setMaxIterations(uiValue); + break; + case EatIterationMultiple: + spirv14("iteration_multiple"); + if (positiveUnsignedArgument("iteration_multiple", uiValue)) + loop->setIterationMultiple(uiValue); + break; + case EatPeelCount: + spirv14("peel_count"); + if (unsignedArgument("peel_count", uiValue)) + loop->setPeelCount(uiValue); + break; + case EatPartialCount: + spirv14("partial_count"); + if (unsignedArgument("partial_count", uiValue)) + loop->setPartialCount(uiValue); + break; + default: + warn(node->getLoc(), "attribute does not apply to a loop", "", ""); + break; + } + } +} + +} // end namespace glslang + +#endif // GLSLANG_WEB diff --git a/dep/glslang/glslang/MachineIndependent/attribute.h b/dep/glslang/glslang/MachineIndependent/attribute.h new file mode 100644 index 000000000..38a943d28 --- /dev/null +++ b/dep/glslang/glslang/MachineIndependent/attribute.h @@ -0,0 +1,149 @@ +// +// Copyright (C) 2017 LunarG, Inc. +// Copyright (C) 2018 Google, Inc. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// +// Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// +// Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// Neither the name of 3Dlabs Inc. Ltd. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// + +#ifndef _ATTRIBUTE_INCLUDED_ +#define _ATTRIBUTE_INCLUDED_ + +#include "../Include/Common.h" +#include "../Include/ConstantUnion.h" + +namespace glslang { + + enum TAttributeType { + EatNone, + EatAllow_uav_condition, + EatBranch, + EatCall, + EatDomain, + EatEarlyDepthStencil, + EatFastOpt, + EatFlatten, + EatForceCase, + EatInstance, + EatMaxTessFactor, + EatNumThreads, + EatMaxVertexCount, + EatOutputControlPoints, + EatOutputTopology, + EatPartitioning, + EatPatchConstantFunc, + EatPatchSize, + EatUnroll, + EatLoop, + EatBinding, + EatGlobalBinding, + EatLocation, + EatInputAttachment, + EatBuiltIn, + EatPushConstant, + EatConstantId, + EatDependencyInfinite, + EatDependencyLength, + EatMinIterations, + EatMaxIterations, + EatIterationMultiple, + EatPeelCount, + EatPartialCount, + EatFormatRgba32f, + EatFormatRgba16f, + EatFormatR32f, + EatFormatRgba8, + EatFormatRgba8Snorm, + EatFormatRg32f, + EatFormatRg16f, + EatFormatR11fG11fB10f, + EatFormatR16f, + EatFormatRgba16, + EatFormatRgb10A2, + EatFormatRg16, + EatFormatRg8, + EatFormatR16, + EatFormatR8, + EatFormatRgba16Snorm, + EatFormatRg16Snorm, + EatFormatRg8Snorm, + EatFormatR16Snorm, + EatFormatR8Snorm, + EatFormatRgba32i, + EatFormatRgba16i, + EatFormatRgba8i, + EatFormatR32i, + EatFormatRg32i, + EatFormatRg16i, + EatFormatRg8i, + EatFormatR16i, + EatFormatR8i, + EatFormatRgba32ui, + EatFormatRgba16ui, + EatFormatRgba8ui, + EatFormatR32ui, + EatFormatRgb10a2ui, + EatFormatRg32ui, + EatFormatRg16ui, + EatFormatRg8ui, + EatFormatR16ui, + EatFormatR8ui, + EatFormatUnknown, + EatNonWritable, + EatNonReadable + }; + + class TIntermAggregate; + + struct TAttributeArgs { + TAttributeType name; + const TIntermAggregate* args; + + // Obtain attribute as integer + // Return false if it cannot be obtained + bool getInt(int& value, int argNum = 0) const; + + // Obtain attribute as string, with optional to-lower transform + // Return false if it cannot be obtained + bool getString(TString& value, int argNum = 0, bool convertToLower = true) const; + + // How many arguments were provided to the attribute? + int size() const; + + protected: + const TConstUnion* getConstUnion(TBasicType basicType, int argNum) const; + }; + + typedef TList TAttributes; + +} // end namespace glslang + +#endif // _ATTRIBUTE_INCLUDED_ diff --git a/dep/glslang/glslang/MachineIndependent/gl_types.h b/dep/glslang/glslang/MachineIndependent/gl_types.h new file mode 100644 index 000000000..b6f613bce --- /dev/null +++ b/dep/glslang/glslang/MachineIndependent/gl_types.h @@ -0,0 +1,210 @@ +/* +** Copyright (c) 2013 The Khronos Group Inc. +** +** Permission is hereby granted, free of charge, to any person obtaining a +** copy of this software and/or associated documentation files (the +** "Materials"), to deal in the Materials without restriction, including +** without limitation the rights to use, copy, modify, merge, publish, +** distribute, sublicense, and/or sell copies of the Materials, and to +** permit persons to whom the Materials are furnished to do so, subject to +** the following conditions: +** +** The above copyright notice and this permission notice shall be included +** in all copies or substantial portions of the Materials. +** +** THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +** EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +** MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +** IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +** CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +** TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +** MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS. +*/ + +#pragma once + +#define GL_FLOAT 0x1406 +#define GL_FLOAT_VEC2 0x8B50 +#define GL_FLOAT_VEC3 0x8B51 +#define GL_FLOAT_VEC4 0x8B52 + +#define GL_DOUBLE 0x140A +#define GL_DOUBLE_VEC2 0x8FFC +#define GL_DOUBLE_VEC3 0x8FFD +#define GL_DOUBLE_VEC4 0x8FFE + +#define GL_INT 0x1404 +#define GL_INT_VEC2 0x8B53 +#define GL_INT_VEC3 0x8B54 +#define GL_INT_VEC4 0x8B55 + +#define GL_UNSIGNED_INT 0x1405 +#define GL_UNSIGNED_INT_VEC2 0x8DC6 +#define GL_UNSIGNED_INT_VEC3 0x8DC7 +#define GL_UNSIGNED_INT_VEC4 0x8DC8 + +#define GL_INT64_ARB 0x140E +#define GL_INT64_VEC2_ARB 0x8FE9 +#define GL_INT64_VEC3_ARB 0x8FEA +#define GL_INT64_VEC4_ARB 0x8FEB + +#define GL_UNSIGNED_INT64_ARB 0x140F +#define GL_UNSIGNED_INT64_VEC2_ARB 0x8FE5 +#define GL_UNSIGNED_INT64_VEC3_ARB 0x8FE6 +#define GL_UNSIGNED_INT64_VEC4_ARB 0x8FE7 + +#define GL_BOOL 0x8B56 +#define GL_BOOL_VEC2 0x8B57 +#define GL_BOOL_VEC3 0x8B58 +#define GL_BOOL_VEC4 0x8B59 + +#define GL_FLOAT_MAT2 0x8B5A +#define GL_FLOAT_MAT3 0x8B5B +#define GL_FLOAT_MAT4 0x8B5C +#define GL_FLOAT_MAT2x3 0x8B65 +#define GL_FLOAT_MAT2x4 0x8B66 +#define GL_FLOAT_MAT3x2 0x8B67 +#define GL_FLOAT_MAT3x4 0x8B68 +#define GL_FLOAT_MAT4x2 0x8B69 +#define GL_FLOAT_MAT4x3 0x8B6A + +#define GL_DOUBLE_MAT2 0x8F46 +#define GL_DOUBLE_MAT3 0x8F47 +#define GL_DOUBLE_MAT4 0x8F48 +#define GL_DOUBLE_MAT2x3 0x8F49 +#define GL_DOUBLE_MAT2x4 0x8F4A +#define GL_DOUBLE_MAT3x2 0x8F4B +#define GL_DOUBLE_MAT3x4 0x8F4C +#define GL_DOUBLE_MAT4x2 0x8F4D +#define GL_DOUBLE_MAT4x3 0x8F4E + +// Those constants are borrowed from extension NV_gpu_shader5 +#define GL_FLOAT16_NV 0x8FF8 +#define GL_FLOAT16_VEC2_NV 0x8FF9 +#define GL_FLOAT16_VEC3_NV 0x8FFA +#define GL_FLOAT16_VEC4_NV 0x8FFB + +#define GL_FLOAT16_MAT2_AMD 0x91C5 +#define GL_FLOAT16_MAT3_AMD 0x91C6 +#define GL_FLOAT16_MAT4_AMD 0x91C7 +#define GL_FLOAT16_MAT2x3_AMD 0x91C8 +#define GL_FLOAT16_MAT2x4_AMD 0x91C9 +#define GL_FLOAT16_MAT3x2_AMD 0x91CA +#define GL_FLOAT16_MAT3x4_AMD 0x91CB +#define GL_FLOAT16_MAT4x2_AMD 0x91CC +#define GL_FLOAT16_MAT4x3_AMD 0x91CD + +#define GL_SAMPLER_1D 0x8B5D +#define GL_SAMPLER_2D 0x8B5E +#define GL_SAMPLER_3D 0x8B5F +#define GL_SAMPLER_CUBE 0x8B60 +#define GL_SAMPLER_BUFFER 0x8DC2 +#define GL_SAMPLER_1D_ARRAY 0x8DC0 +#define GL_SAMPLER_2D_ARRAY 0x8DC1 +#define GL_SAMPLER_1D_ARRAY_SHADOW 0x8DC3 +#define GL_SAMPLER_2D_ARRAY_SHADOW 0x8DC4 +#define GL_SAMPLER_CUBE_SHADOW 0x8DC5 +#define GL_SAMPLER_1D_SHADOW 0x8B61 +#define GL_SAMPLER_2D_SHADOW 0x8B62 +#define GL_SAMPLER_2D_RECT 0x8B63 +#define GL_SAMPLER_2D_RECT_SHADOW 0x8B64 +#define GL_SAMPLER_2D_MULTISAMPLE 0x9108 +#define GL_SAMPLER_2D_MULTISAMPLE_ARRAY 0x910B +#define GL_SAMPLER_CUBE_MAP_ARRAY 0x900C +#define GL_SAMPLER_CUBE_MAP_ARRAY_SHADOW 0x900D +#define GL_SAMPLER_CUBE_MAP_ARRAY_ARB 0x900C +#define GL_SAMPLER_CUBE_MAP_ARRAY_SHADOW_ARB 0x900D + +#define GL_FLOAT16_SAMPLER_1D_AMD 0x91CE +#define GL_FLOAT16_SAMPLER_2D_AMD 0x91CF +#define GL_FLOAT16_SAMPLER_3D_AMD 0x91D0 +#define GL_FLOAT16_SAMPLER_CUBE_AMD 0x91D1 +#define GL_FLOAT16_SAMPLER_2D_RECT_AMD 0x91D2 +#define GL_FLOAT16_SAMPLER_1D_ARRAY_AMD 0x91D3 +#define GL_FLOAT16_SAMPLER_2D_ARRAY_AMD 0x91D4 +#define GL_FLOAT16_SAMPLER_CUBE_MAP_ARRAY_AMD 0x91D5 +#define GL_FLOAT16_SAMPLER_BUFFER_AMD 0x91D6 +#define GL_FLOAT16_SAMPLER_2D_MULTISAMPLE_AMD 0x91D7 +#define GL_FLOAT16_SAMPLER_2D_MULTISAMPLE_ARRAY_AMD 0x91D8 + +#define GL_FLOAT16_SAMPLER_1D_SHADOW_AMD 0x91D9 +#define GL_FLOAT16_SAMPLER_2D_SHADOW_AMD 0x91DA +#define GL_FLOAT16_SAMPLER_2D_RECT_SHADOW_AMD 0x91DB +#define GL_FLOAT16_SAMPLER_1D_ARRAY_SHADOW_AMD 0x91DC +#define GL_FLOAT16_SAMPLER_2D_ARRAY_SHADOW_AMD 0x91DD +#define GL_FLOAT16_SAMPLER_CUBE_SHADOW_AMD 0x91DE +#define GL_FLOAT16_SAMPLER_CUBE_MAP_ARRAY_SHADOW_AMD 0x91DF + +#define GL_FLOAT16_IMAGE_1D_AMD 0x91E0 +#define GL_FLOAT16_IMAGE_2D_AMD 0x91E1 +#define GL_FLOAT16_IMAGE_3D_AMD 0x91E2 +#define GL_FLOAT16_IMAGE_2D_RECT_AMD 0x91E3 +#define GL_FLOAT16_IMAGE_CUBE_AMD 0x91E4 +#define GL_FLOAT16_IMAGE_1D_ARRAY_AMD 0x91E5 +#define GL_FLOAT16_IMAGE_2D_ARRAY_AMD 0x91E6 +#define GL_FLOAT16_IMAGE_CUBE_MAP_ARRAY_AMD 0x91E7 +#define GL_FLOAT16_IMAGE_BUFFER_AMD 0x91E8 +#define GL_FLOAT16_IMAGE_2D_MULTISAMPLE_AMD 0x91E9 +#define GL_FLOAT16_IMAGE_2D_MULTISAMPLE_ARRAY_AMD 0x91EA + +#define GL_INT_SAMPLER_1D 0x8DC9 +#define GL_INT_SAMPLER_2D 0x8DCA +#define GL_INT_SAMPLER_3D 0x8DCB +#define GL_INT_SAMPLER_CUBE 0x8DCC +#define GL_INT_SAMPLER_1D_ARRAY 0x8DCE +#define GL_INT_SAMPLER_2D_ARRAY 0x8DCF +#define GL_INT_SAMPLER_2D_RECT 0x8DCD +#define GL_INT_SAMPLER_BUFFER 0x8DD0 +#define GL_INT_SAMPLER_2D_MULTISAMPLE 0x9109 +#define GL_INT_SAMPLER_2D_MULTISAMPLE_ARRAY 0x910C +#define GL_INT_SAMPLER_CUBE_MAP_ARRAY 0x900E +#define GL_INT_SAMPLER_CUBE_MAP_ARRAY_ARB 0x900E + +#define GL_UNSIGNED_INT_SAMPLER_1D 0x8DD1 +#define GL_UNSIGNED_INT_SAMPLER_2D 0x8DD2 +#define GL_UNSIGNED_INT_SAMPLER_3D 0x8DD3 +#define GL_UNSIGNED_INT_SAMPLER_CUBE 0x8DD4 +#define GL_UNSIGNED_INT_SAMPLER_1D_ARRAY 0x8DD6 +#define GL_UNSIGNED_INT_SAMPLER_2D_ARRAY 0x8DD7 +#define GL_UNSIGNED_INT_SAMPLER_2D_RECT 0x8DD5 +#define GL_UNSIGNED_INT_SAMPLER_BUFFER 0x8DD8 +#define GL_UNSIGNED_INT_SAMPLER_2D_MULTISAMPLE_ARRAY 0x910D +#define GL_UNSIGNED_INT_SAMPLER_CUBE_MAP_ARRAY 0x900F +#define GL_UNSIGNED_INT_SAMPLER_CUBE_MAP_ARRAY_ARB 0x900F +#define GL_UNSIGNED_INT_SAMPLER_2D_MULTISAMPLE 0x910A + +#define GL_IMAGE_1D 0x904C +#define GL_IMAGE_2D 0x904D +#define GL_IMAGE_3D 0x904E +#define GL_IMAGE_2D_RECT 0x904F +#define GL_IMAGE_CUBE 0x9050 +#define GL_IMAGE_BUFFER 0x9051 +#define GL_IMAGE_1D_ARRAY 0x9052 +#define GL_IMAGE_2D_ARRAY 0x9053 +#define GL_IMAGE_CUBE_MAP_ARRAY 0x9054 +#define GL_IMAGE_2D_MULTISAMPLE 0x9055 +#define GL_IMAGE_2D_MULTISAMPLE_ARRAY 0x9056 +#define GL_INT_IMAGE_1D 0x9057 +#define GL_INT_IMAGE_2D 0x9058 +#define GL_INT_IMAGE_3D 0x9059 +#define GL_INT_IMAGE_2D_RECT 0x905A +#define GL_INT_IMAGE_CUBE 0x905B +#define GL_INT_IMAGE_BUFFER 0x905C +#define GL_INT_IMAGE_1D_ARRAY 0x905D +#define GL_INT_IMAGE_2D_ARRAY 0x905E +#define GL_INT_IMAGE_CUBE_MAP_ARRAY 0x905F +#define GL_INT_IMAGE_2D_MULTISAMPLE 0x9060 +#define GL_INT_IMAGE_2D_MULTISAMPLE_ARRAY 0x9061 +#define GL_UNSIGNED_INT_IMAGE_1D 0x9062 +#define GL_UNSIGNED_INT_IMAGE_2D 0x9063 +#define GL_UNSIGNED_INT_IMAGE_3D 0x9064 +#define GL_UNSIGNED_INT_IMAGE_2D_RECT 0x9065 +#define GL_UNSIGNED_INT_IMAGE_CUBE 0x9066 +#define GL_UNSIGNED_INT_IMAGE_BUFFER 0x9067 +#define GL_UNSIGNED_INT_IMAGE_1D_ARRAY 0x9068 +#define GL_UNSIGNED_INT_IMAGE_2D_ARRAY 0x9069 +#define GL_UNSIGNED_INT_IMAGE_CUBE_MAP_ARRAY 0x906A +#define GL_UNSIGNED_INT_IMAGE_2D_MULTISAMPLE 0x906B +#define GL_UNSIGNED_INT_IMAGE_2D_MULTISAMPLE_ARRAY 0x906C + +#define GL_UNSIGNED_INT_ATOMIC_COUNTER 0x92DB diff --git a/dep/glslang/glslang/MachineIndependent/glslang.m4 b/dep/glslang/glslang/MachineIndependent/glslang.m4 new file mode 100644 index 000000000..1432bf6e8 --- /dev/null +++ b/dep/glslang/glslang/MachineIndependent/glslang.m4 @@ -0,0 +1,3887 @@ +// +// Copyright (C) 2002-2005 3Dlabs Inc. Ltd. +// Copyright (C) 2012-2013 LunarG, Inc. +// Copyright (C) 2017 ARM Limited. +// Copyright (C) 2015-2019 Google, Inc. +// Modifications Copyright (C) 2020 Advanced Micro Devices, Inc. All rights reserved. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// +// Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// +// Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// Neither the name of 3Dlabs Inc. Ltd. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// + +// +// Do not edit the .y file, only edit the .m4 file. +// The .y bison file is not a source file, it is a derivative of the .m4 file. +// The m4 file needs to be processed by m4 to generate the .y bison file. +// +// Code sandwiched between a pair: +// +// GLSLANG_WEB_EXCLUDE_ON +// ... +// ... +// ... +// GLSLANG_WEB_EXCLUDE_OFF +// +// Will be excluded from the grammar when m4 is executed as: +// +// m4 -P -DGLSLANG_WEB +// +// It will be included when m4 is executed as: +// +// m4 -P +// + +m4_define(`GLSLANG_WEB_EXCLUDE_ON', `m4_ifdef(`GLSLANG_WEB', `m4_divert(`-1')')') +m4_define(`GLSLANG_WEB_EXCLUDE_OFF', `m4_ifdef(`GLSLANG_WEB', `m4_divert')') + +/** + * This is bison grammar and productions for parsing all versions of the + * GLSL shading languages. + */ +%{ + +/* Based on: +ANSI C Yacc grammar + +In 1985, Jeff Lee published his Yacc grammar (which is accompanied by a +matching Lex specification) for the April 30, 1985 draft version of the +ANSI C standard. Tom Stockfisch reposted it to net.sources in 1987; that +original, as mentioned in the answer to question 17.25 of the comp.lang.c +FAQ, can be ftp'ed from ftp.uu.net, file usenet/net.sources/ansi.c.grammar.Z. + +I intend to keep this version as close to the current C Standard grammar as +possible; please let me know if you discover discrepancies. + +Jutta Degener, 1995 +*/ + +#include "SymbolTable.h" +#include "ParseHelper.h" +#include "../Public/ShaderLang.h" +#include "attribute.h" + +using namespace glslang; + +%} + +%define parse.error verbose + +%union { + struct { + glslang::TSourceLoc loc; + union { + glslang::TString *string; + int i; + unsigned int u; + long long i64; + unsigned long long u64; + bool b; + double d; + }; + glslang::TSymbol* symbol; + } lex; + struct { + glslang::TSourceLoc loc; + glslang::TOperator op; + union { + TIntermNode* intermNode; + glslang::TIntermNodePair nodePair; + glslang::TIntermTyped* intermTypedNode; + glslang::TAttributes* attributes; + }; + union { + glslang::TPublicType type; + glslang::TFunction* function; + glslang::TParameter param; + glslang::TTypeLoc typeLine; + glslang::TTypeList* typeList; + glslang::TArraySizes* arraySizes; + glslang::TIdentifierList* identifierList; + }; + glslang::TArraySizes* typeParameters; + } interm; +} + +%{ + +/* windows only pragma */ +#ifdef _MSC_VER + #pragma warning(disable : 4065) + #pragma warning(disable : 4127) + #pragma warning(disable : 4244) +#endif + +#define parseContext (*pParseContext) +#define yyerror(context, msg) context->parserError(msg) + +extern int yylex(YYSTYPE*, TParseContext&); + +%} + +%parse-param {glslang::TParseContext* pParseContext} +%lex-param {parseContext} +%pure-parser // enable thread safety +%expect 1 // One shift reduce conflict because of if | else + +%token CONST BOOL INT UINT FLOAT +%token BVEC2 BVEC3 BVEC4 +%token IVEC2 IVEC3 IVEC4 +%token UVEC2 UVEC3 UVEC4 +%token VEC2 VEC3 VEC4 +%token MAT2 MAT3 MAT4 +%token MAT2X2 MAT2X3 MAT2X4 +%token MAT3X2 MAT3X3 MAT3X4 +%token MAT4X2 MAT4X3 MAT4X4 + +// combined image/sampler +%token SAMPLER2D SAMPLER3D SAMPLERCUBE SAMPLER2DSHADOW +%token SAMPLERCUBESHADOW SAMPLER2DARRAY +%token SAMPLER2DARRAYSHADOW ISAMPLER2D ISAMPLER3D ISAMPLERCUBE +%token ISAMPLER2DARRAY USAMPLER2D USAMPLER3D +%token USAMPLERCUBE USAMPLER2DARRAY + +// separate image/sampler +%token SAMPLER SAMPLERSHADOW +%token TEXTURE2D TEXTURE3D TEXTURECUBE TEXTURE2DARRAY +%token ITEXTURE2D ITEXTURE3D ITEXTURECUBE ITEXTURE2DARRAY +%token UTEXTURE2D UTEXTURE3D UTEXTURECUBE UTEXTURE2DARRAY + +GLSLANG_WEB_EXCLUDE_ON + +%token ATTRIBUTE VARYING +%token FLOAT16_T FLOAT32_T DOUBLE FLOAT64_T +%token INT64_T UINT64_T INT32_T UINT32_T INT16_T UINT16_T INT8_T UINT8_T +%token I64VEC2 I64VEC3 I64VEC4 +%token U64VEC2 U64VEC3 U64VEC4 +%token I32VEC2 I32VEC3 I32VEC4 +%token U32VEC2 U32VEC3 U32VEC4 +%token I16VEC2 I16VEC3 I16VEC4 +%token U16VEC2 U16VEC3 U16VEC4 +%token I8VEC2 I8VEC3 I8VEC4 +%token U8VEC2 U8VEC3 U8VEC4 +%token DVEC2 DVEC3 DVEC4 DMAT2 DMAT3 DMAT4 +%token F16VEC2 F16VEC3 F16VEC4 F16MAT2 F16MAT3 F16MAT4 +%token F32VEC2 F32VEC3 F32VEC4 F32MAT2 F32MAT3 F32MAT4 +%token F64VEC2 F64VEC3 F64VEC4 F64MAT2 F64MAT3 F64MAT4 +%token DMAT2X2 DMAT2X3 DMAT2X4 +%token DMAT3X2 DMAT3X3 DMAT3X4 +%token DMAT4X2 DMAT4X3 DMAT4X4 +%token F16MAT2X2 F16MAT2X3 F16MAT2X4 +%token F16MAT3X2 F16MAT3X3 F16MAT3X4 +%token F16MAT4X2 F16MAT4X3 F16MAT4X4 +%token F32MAT2X2 F32MAT2X3 F32MAT2X4 +%token F32MAT3X2 F32MAT3X3 F32MAT3X4 +%token F32MAT4X2 F32MAT4X3 F32MAT4X4 +%token F64MAT2X2 F64MAT2X3 F64MAT2X4 +%token F64MAT3X2 F64MAT3X3 F64MAT3X4 +%token F64MAT4X2 F64MAT4X3 F64MAT4X4 +%token ATOMIC_UINT +%token ACCSTRUCTNV +%token ACCSTRUCTEXT +%token RAYQUERYEXT +%token FCOOPMATNV ICOOPMATNV UCOOPMATNV + +// combined image/sampler +%token SAMPLERCUBEARRAY SAMPLERCUBEARRAYSHADOW +%token ISAMPLERCUBEARRAY USAMPLERCUBEARRAY +%token SAMPLER1D SAMPLER1DARRAY SAMPLER1DARRAYSHADOW ISAMPLER1D SAMPLER1DSHADOW +%token SAMPLER2DRECT SAMPLER2DRECTSHADOW ISAMPLER2DRECT USAMPLER2DRECT +%token SAMPLERBUFFER ISAMPLERBUFFER USAMPLERBUFFER +%token SAMPLER2DMS ISAMPLER2DMS USAMPLER2DMS +%token SAMPLER2DMSARRAY ISAMPLER2DMSARRAY USAMPLER2DMSARRAY +%token SAMPLEREXTERNALOES +%token SAMPLEREXTERNAL2DY2YEXT +%token ISAMPLER1DARRAY USAMPLER1D USAMPLER1DARRAY +%token F16SAMPLER1D F16SAMPLER2D F16SAMPLER3D F16SAMPLER2DRECT F16SAMPLERCUBE +%token F16SAMPLER1DARRAY F16SAMPLER2DARRAY F16SAMPLERCUBEARRAY +%token F16SAMPLERBUFFER F16SAMPLER2DMS F16SAMPLER2DMSARRAY +%token F16SAMPLER1DSHADOW F16SAMPLER2DSHADOW F16SAMPLER1DARRAYSHADOW F16SAMPLER2DARRAYSHADOW +%token F16SAMPLER2DRECTSHADOW F16SAMPLERCUBESHADOW F16SAMPLERCUBEARRAYSHADOW + +// images +%token IMAGE1D IIMAGE1D UIMAGE1D IMAGE2D IIMAGE2D +%token UIMAGE2D IMAGE3D IIMAGE3D UIMAGE3D +%token IMAGE2DRECT IIMAGE2DRECT UIMAGE2DRECT +%token IMAGECUBE IIMAGECUBE UIMAGECUBE +%token IMAGEBUFFER IIMAGEBUFFER UIMAGEBUFFER +%token IMAGE1DARRAY IIMAGE1DARRAY UIMAGE1DARRAY +%token IMAGE2DARRAY IIMAGE2DARRAY UIMAGE2DARRAY +%token IMAGECUBEARRAY IIMAGECUBEARRAY UIMAGECUBEARRAY +%token IMAGE2DMS IIMAGE2DMS UIMAGE2DMS +%token IMAGE2DMSARRAY IIMAGE2DMSARRAY UIMAGE2DMSARRAY + +%token F16IMAGE1D F16IMAGE2D F16IMAGE3D F16IMAGE2DRECT +%token F16IMAGECUBE F16IMAGE1DARRAY F16IMAGE2DARRAY F16IMAGECUBEARRAY +%token F16IMAGEBUFFER F16IMAGE2DMS F16IMAGE2DMSARRAY + +// texture without sampler +%token TEXTURECUBEARRAY ITEXTURECUBEARRAY UTEXTURECUBEARRAY +%token TEXTURE1D ITEXTURE1D UTEXTURE1D +%token TEXTURE1DARRAY ITEXTURE1DARRAY UTEXTURE1DARRAY +%token TEXTURE2DRECT ITEXTURE2DRECT UTEXTURE2DRECT +%token TEXTUREBUFFER ITEXTUREBUFFER UTEXTUREBUFFER +%token TEXTURE2DMS ITEXTURE2DMS UTEXTURE2DMS +%token TEXTURE2DMSARRAY ITEXTURE2DMSARRAY UTEXTURE2DMSARRAY + +%token F16TEXTURE1D F16TEXTURE2D F16TEXTURE3D F16TEXTURE2DRECT F16TEXTURECUBE +%token F16TEXTURE1DARRAY F16TEXTURE2DARRAY F16TEXTURECUBEARRAY +%token F16TEXTUREBUFFER F16TEXTURE2DMS F16TEXTURE2DMSARRAY + +// input attachments +%token SUBPASSINPUT SUBPASSINPUTMS ISUBPASSINPUT ISUBPASSINPUTMS USUBPASSINPUT USUBPASSINPUTMS +%token F16SUBPASSINPUT F16SUBPASSINPUTMS + +GLSLANG_WEB_EXCLUDE_OFF + +%token LEFT_OP RIGHT_OP +%token INC_OP DEC_OP LE_OP GE_OP EQ_OP NE_OP +%token AND_OP OR_OP XOR_OP MUL_ASSIGN DIV_ASSIGN ADD_ASSIGN +%token MOD_ASSIGN LEFT_ASSIGN RIGHT_ASSIGN AND_ASSIGN XOR_ASSIGN OR_ASSIGN +%token SUB_ASSIGN +%token STRING_LITERAL + +%token LEFT_PAREN RIGHT_PAREN LEFT_BRACKET RIGHT_BRACKET LEFT_BRACE RIGHT_BRACE DOT +%token COMMA COLON EQUAL SEMICOLON BANG DASH TILDE PLUS STAR SLASH PERCENT +%token LEFT_ANGLE RIGHT_ANGLE VERTICAL_BAR CARET AMPERSAND QUESTION + +%token INVARIANT +%token HIGH_PRECISION MEDIUM_PRECISION LOW_PRECISION PRECISION +%token PACKED RESOURCE SUPERP + +%token FLOATCONSTANT INTCONSTANT UINTCONSTANT BOOLCONSTANT +%token IDENTIFIER TYPE_NAME +%token CENTROID IN OUT INOUT +%token STRUCT VOID WHILE +%token BREAK CONTINUE DO ELSE FOR IF DISCARD RETURN SWITCH CASE DEFAULT +%token UNIFORM SHARED BUFFER +%token FLAT SMOOTH LAYOUT + +GLSLANG_WEB_EXCLUDE_ON +%token DOUBLECONSTANT INT16CONSTANT UINT16CONSTANT FLOAT16CONSTANT INT32CONSTANT UINT32CONSTANT +%token INT64CONSTANT UINT64CONSTANT +%token SUBROUTINE DEMOTE +%token PAYLOADNV PAYLOADINNV HITATTRNV CALLDATANV CALLDATAINNV +%token PAYLOADEXT PAYLOADINEXT HITATTREXT CALLDATAEXT CALLDATAINEXT +%token PATCH SAMPLE NONUNIFORM +%token COHERENT VOLATILE RESTRICT READONLY WRITEONLY DEVICECOHERENT QUEUEFAMILYCOHERENT WORKGROUPCOHERENT +%token SUBGROUPCOHERENT NONPRIVATE SHADERCALLCOHERENT +%token NOPERSPECTIVE EXPLICITINTERPAMD PERVERTEXNV PERPRIMITIVENV PERVIEWNV PERTASKNV +%token PRECISE +GLSLANG_WEB_EXCLUDE_OFF + +%type assignment_operator unary_operator +%type variable_identifier primary_expression postfix_expression +%type expression integer_expression assignment_expression +%type unary_expression multiplicative_expression additive_expression +%type relational_expression equality_expression +%type conditional_expression constant_expression +%type logical_or_expression logical_xor_expression logical_and_expression +%type shift_expression and_expression exclusive_or_expression inclusive_or_expression +%type function_call initializer condition conditionopt + +%type translation_unit function_definition +%type statement simple_statement +%type statement_list switch_statement_list compound_statement +%type declaration_statement selection_statement selection_statement_nonattributed expression_statement +%type switch_statement switch_statement_nonattributed case_label +%type declaration external_declaration +%type for_init_statement compound_statement_no_new_scope +%type selection_rest_statement for_rest_statement +%type iteration_statement iteration_statement_nonattributed jump_statement statement_no_new_scope statement_scoped +%type single_declaration init_declarator_list + +%type parameter_declaration parameter_declarator parameter_type_specifier + +%type array_specifier +%type invariant_qualifier interpolation_qualifier storage_qualifier precision_qualifier +%type layout_qualifier layout_qualifier_id_list layout_qualifier_id + +%type type_parameter_specifier +%type type_parameter_specifier_opt +%type type_parameter_specifier_list + +%type type_qualifier fully_specified_type type_specifier +%type single_type_qualifier +%type type_specifier_nonarray +%type struct_specifier +%type struct_declarator +%type struct_declarator_list struct_declaration struct_declaration_list +%type block_structure +%type function_header function_declarator +%type function_header_with_parameters +%type function_call_header_with_parameters function_call_header_no_parameters function_call_generic function_prototype +%type function_call_or_method function_identifier function_call_header + +%type identifier_list + +GLSLANG_WEB_EXCLUDE_ON +%type precise_qualifier non_uniform_qualifier +%type type_name_list +%type attribute attribute_list single_attribute +%type demote_statement +%type initializer_list +GLSLANG_WEB_EXCLUDE_OFF + +%start translation_unit +%% + +variable_identifier + : IDENTIFIER { + $$ = parseContext.handleVariable($1.loc, $1.symbol, $1.string); + } + ; + +primary_expression + : variable_identifier { + $$ = $1; + } + | LEFT_PAREN expression RIGHT_PAREN { + $$ = $2; + if ($$->getAsConstantUnion()) + $$->getAsConstantUnion()->setExpression(); + } + | FLOATCONSTANT { + $$ = parseContext.intermediate.addConstantUnion($1.d, EbtFloat, $1.loc, true); + } + | INTCONSTANT { + $$ = parseContext.intermediate.addConstantUnion($1.i, $1.loc, true); + } + | UINTCONSTANT { + parseContext.fullIntegerCheck($1.loc, "unsigned literal"); + $$ = parseContext.intermediate.addConstantUnion($1.u, $1.loc, true); + } + | BOOLCONSTANT { + $$ = parseContext.intermediate.addConstantUnion($1.b, $1.loc, true); + } +GLSLANG_WEB_EXCLUDE_ON + | STRING_LITERAL { + $$ = parseContext.intermediate.addConstantUnion($1.string, $1.loc, true); + } + | INT32CONSTANT { + parseContext.explicitInt32Check($1.loc, "32-bit signed literal"); + $$ = parseContext.intermediate.addConstantUnion($1.i, $1.loc, true); + } + | UINT32CONSTANT { + parseContext.explicitInt32Check($1.loc, "32-bit signed literal"); + $$ = parseContext.intermediate.addConstantUnion($1.u, $1.loc, true); + } + | INT64CONSTANT { + parseContext.int64Check($1.loc, "64-bit integer literal"); + $$ = parseContext.intermediate.addConstantUnion($1.i64, $1.loc, true); + } + | UINT64CONSTANT { + parseContext.int64Check($1.loc, "64-bit unsigned integer literal"); + $$ = parseContext.intermediate.addConstantUnion($1.u64, $1.loc, true); + } + | INT16CONSTANT { + parseContext.explicitInt16Check($1.loc, "16-bit integer literal"); + $$ = parseContext.intermediate.addConstantUnion((short)$1.i, $1.loc, true); + } + | UINT16CONSTANT { + parseContext.explicitInt16Check($1.loc, "16-bit unsigned integer literal"); + $$ = parseContext.intermediate.addConstantUnion((unsigned short)$1.u, $1.loc, true); + } + | DOUBLECONSTANT { + parseContext.requireProfile($1.loc, ECoreProfile | ECompatibilityProfile, "double literal"); + if (! parseContext.symbolTable.atBuiltInLevel()) + parseContext.doubleCheck($1.loc, "double literal"); + $$ = parseContext.intermediate.addConstantUnion($1.d, EbtDouble, $1.loc, true); + } + | FLOAT16CONSTANT { + parseContext.float16Check($1.loc, "half float literal"); + $$ = parseContext.intermediate.addConstantUnion($1.d, EbtFloat16, $1.loc, true); + } +GLSLANG_WEB_EXCLUDE_OFF + ; + +postfix_expression + : primary_expression { + $$ = $1; + } + | postfix_expression LEFT_BRACKET integer_expression RIGHT_BRACKET { + $$ = parseContext.handleBracketDereference($2.loc, $1, $3); + } + | function_call { + $$ = $1; + } + | postfix_expression DOT IDENTIFIER { + $$ = parseContext.handleDotDereference($3.loc, $1, *$3.string); + } + | postfix_expression INC_OP { + parseContext.variableCheck($1); + parseContext.lValueErrorCheck($2.loc, "++", $1); + $$ = parseContext.handleUnaryMath($2.loc, "++", EOpPostIncrement, $1); + } + | postfix_expression DEC_OP { + parseContext.variableCheck($1); + parseContext.lValueErrorCheck($2.loc, "--", $1); + $$ = parseContext.handleUnaryMath($2.loc, "--", EOpPostDecrement, $1); + } + ; + +integer_expression + : expression { + parseContext.integerCheck($1, "[]"); + $$ = $1; + } + ; + +function_call + : function_call_or_method { + $$ = parseContext.handleFunctionCall($1.loc, $1.function, $1.intermNode); + delete $1.function; + } + ; + +function_call_or_method + : function_call_generic { + $$ = $1; + } + ; + +function_call_generic + : function_call_header_with_parameters RIGHT_PAREN { + $$ = $1; + $$.loc = $2.loc; + } + | function_call_header_no_parameters RIGHT_PAREN { + $$ = $1; + $$.loc = $2.loc; + } + ; + +function_call_header_no_parameters + : function_call_header VOID { + $$ = $1; + } + | function_call_header { + $$ = $1; + } + ; + +function_call_header_with_parameters + : function_call_header assignment_expression { + TParameter param = { 0, new TType }; + param.type->shallowCopy($2->getType()); + $1.function->addParameter(param); + $$.function = $1.function; + $$.intermNode = $2; + } + | function_call_header_with_parameters COMMA assignment_expression { + TParameter param = { 0, new TType }; + param.type->shallowCopy($3->getType()); + $1.function->addParameter(param); + $$.function = $1.function; + $$.intermNode = parseContext.intermediate.growAggregate($1.intermNode, $3, $2.loc); + } + ; + +function_call_header + : function_identifier LEFT_PAREN { + $$ = $1; + } + ; + +// Grammar Note: Constructors look like functions, but are recognized as types. + +function_identifier + : type_specifier { + // Constructor + $$.intermNode = 0; + $$.function = parseContext.handleConstructorCall($1.loc, $1); + } + | postfix_expression { + // + // Should be a method or subroutine call, but we haven't recognized the arguments yet. + // + $$.function = 0; + $$.intermNode = 0; + + TIntermMethod* method = $1->getAsMethodNode(); + if (method) { + $$.function = new TFunction(&method->getMethodName(), TType(EbtInt), EOpArrayLength); + $$.intermNode = method->getObject(); + } else { + TIntermSymbol* symbol = $1->getAsSymbolNode(); + if (symbol) { + parseContext.reservedErrorCheck(symbol->getLoc(), symbol->getName()); + TFunction *function = new TFunction(&symbol->getName(), TType(EbtVoid)); + $$.function = function; + } else + parseContext.error($1->getLoc(), "function call, method, or subroutine call expected", "", ""); + } + + if ($$.function == 0) { + // error recover + TString* empty = NewPoolTString(""); + $$.function = new TFunction(empty, TType(EbtVoid), EOpNull); + } + } +GLSLANG_WEB_EXCLUDE_ON + | non_uniform_qualifier { + // Constructor + $$.intermNode = 0; + $$.function = parseContext.handleConstructorCall($1.loc, $1); + } +GLSLANG_WEB_EXCLUDE_OFF + ; + +unary_expression + : postfix_expression { + parseContext.variableCheck($1); + $$ = $1; + if (TIntermMethod* method = $1->getAsMethodNode()) + parseContext.error($1->getLoc(), "incomplete method syntax", method->getMethodName().c_str(), ""); + } + | INC_OP unary_expression { + parseContext.lValueErrorCheck($1.loc, "++", $2); + $$ = parseContext.handleUnaryMath($1.loc, "++", EOpPreIncrement, $2); + } + | DEC_OP unary_expression { + parseContext.lValueErrorCheck($1.loc, "--", $2); + $$ = parseContext.handleUnaryMath($1.loc, "--", EOpPreDecrement, $2); + } + | unary_operator unary_expression { + if ($1.op != EOpNull) { + char errorOp[2] = {0, 0}; + switch($1.op) { + case EOpNegative: errorOp[0] = '-'; break; + case EOpLogicalNot: errorOp[0] = '!'; break; + case EOpBitwiseNot: errorOp[0] = '~'; break; + default: break; // some compilers want this + } + $$ = parseContext.handleUnaryMath($1.loc, errorOp, $1.op, $2); + } else { + $$ = $2; + if ($$->getAsConstantUnion()) + $$->getAsConstantUnion()->setExpression(); + } + } + ; +// Grammar Note: No traditional style type casts. + +unary_operator + : PLUS { $$.loc = $1.loc; $$.op = EOpNull; } + | DASH { $$.loc = $1.loc; $$.op = EOpNegative; } + | BANG { $$.loc = $1.loc; $$.op = EOpLogicalNot; } + | TILDE { $$.loc = $1.loc; $$.op = EOpBitwiseNot; + parseContext.fullIntegerCheck($1.loc, "bitwise not"); } + ; +// Grammar Note: No '*' or '&' unary ops. Pointers are not supported. + +multiplicative_expression + : unary_expression { $$ = $1; } + | multiplicative_expression STAR unary_expression { + $$ = parseContext.handleBinaryMath($2.loc, "*", EOpMul, $1, $3); + if ($$ == 0) + $$ = $1; + } + | multiplicative_expression SLASH unary_expression { + $$ = parseContext.handleBinaryMath($2.loc, "/", EOpDiv, $1, $3); + if ($$ == 0) + $$ = $1; + } + | multiplicative_expression PERCENT unary_expression { + parseContext.fullIntegerCheck($2.loc, "%"); + $$ = parseContext.handleBinaryMath($2.loc, "%", EOpMod, $1, $3); + if ($$ == 0) + $$ = $1; + } + ; + +additive_expression + : multiplicative_expression { $$ = $1; } + | additive_expression PLUS multiplicative_expression { + $$ = parseContext.handleBinaryMath($2.loc, "+", EOpAdd, $1, $3); + if ($$ == 0) + $$ = $1; + } + | additive_expression DASH multiplicative_expression { + $$ = parseContext.handleBinaryMath($2.loc, "-", EOpSub, $1, $3); + if ($$ == 0) + $$ = $1; + } + ; + +shift_expression + : additive_expression { $$ = $1; } + | shift_expression LEFT_OP additive_expression { + parseContext.fullIntegerCheck($2.loc, "bit shift left"); + $$ = parseContext.handleBinaryMath($2.loc, "<<", EOpLeftShift, $1, $3); + if ($$ == 0) + $$ = $1; + } + | shift_expression RIGHT_OP additive_expression { + parseContext.fullIntegerCheck($2.loc, "bit shift right"); + $$ = parseContext.handleBinaryMath($2.loc, ">>", EOpRightShift, $1, $3); + if ($$ == 0) + $$ = $1; + } + ; + +relational_expression + : shift_expression { $$ = $1; } + | relational_expression LEFT_ANGLE shift_expression { + $$ = parseContext.handleBinaryMath($2.loc, "<", EOpLessThan, $1, $3); + if ($$ == 0) + $$ = parseContext.intermediate.addConstantUnion(false, $2.loc); + } + | relational_expression RIGHT_ANGLE shift_expression { + $$ = parseContext.handleBinaryMath($2.loc, ">", EOpGreaterThan, $1, $3); + if ($$ == 0) + $$ = parseContext.intermediate.addConstantUnion(false, $2.loc); + } + | relational_expression LE_OP shift_expression { + $$ = parseContext.handleBinaryMath($2.loc, "<=", EOpLessThanEqual, $1, $3); + if ($$ == 0) + $$ = parseContext.intermediate.addConstantUnion(false, $2.loc); + } + | relational_expression GE_OP shift_expression { + $$ = parseContext.handleBinaryMath($2.loc, ">=", EOpGreaterThanEqual, $1, $3); + if ($$ == 0) + $$ = parseContext.intermediate.addConstantUnion(false, $2.loc); + } + ; + +equality_expression + : relational_expression { $$ = $1; } + | equality_expression EQ_OP relational_expression { + parseContext.arrayObjectCheck($2.loc, $1->getType(), "array comparison"); + parseContext.opaqueCheck($2.loc, $1->getType(), "=="); + parseContext.specializationCheck($2.loc, $1->getType(), "=="); + parseContext.referenceCheck($2.loc, $1->getType(), "=="); + $$ = parseContext.handleBinaryMath($2.loc, "==", EOpEqual, $1, $3); + if ($$ == 0) + $$ = parseContext.intermediate.addConstantUnion(false, $2.loc); + } + | equality_expression NE_OP relational_expression { + parseContext.arrayObjectCheck($2.loc, $1->getType(), "array comparison"); + parseContext.opaqueCheck($2.loc, $1->getType(), "!="); + parseContext.specializationCheck($2.loc, $1->getType(), "!="); + parseContext.referenceCheck($2.loc, $1->getType(), "!="); + $$ = parseContext.handleBinaryMath($2.loc, "!=", EOpNotEqual, $1, $3); + if ($$ == 0) + $$ = parseContext.intermediate.addConstantUnion(false, $2.loc); + } + ; + +and_expression + : equality_expression { $$ = $1; } + | and_expression AMPERSAND equality_expression { + parseContext.fullIntegerCheck($2.loc, "bitwise and"); + $$ = parseContext.handleBinaryMath($2.loc, "&", EOpAnd, $1, $3); + if ($$ == 0) + $$ = $1; + } + ; + +exclusive_or_expression + : and_expression { $$ = $1; } + | exclusive_or_expression CARET and_expression { + parseContext.fullIntegerCheck($2.loc, "bitwise exclusive or"); + $$ = parseContext.handleBinaryMath($2.loc, "^", EOpExclusiveOr, $1, $3); + if ($$ == 0) + $$ = $1; + } + ; + +inclusive_or_expression + : exclusive_or_expression { $$ = $1; } + | inclusive_or_expression VERTICAL_BAR exclusive_or_expression { + parseContext.fullIntegerCheck($2.loc, "bitwise inclusive or"); + $$ = parseContext.handleBinaryMath($2.loc, "|", EOpInclusiveOr, $1, $3); + if ($$ == 0) + $$ = $1; + } + ; + +logical_and_expression + : inclusive_or_expression { $$ = $1; } + | logical_and_expression AND_OP inclusive_or_expression { + $$ = parseContext.handleBinaryMath($2.loc, "&&", EOpLogicalAnd, $1, $3); + if ($$ == 0) + $$ = parseContext.intermediate.addConstantUnion(false, $2.loc); + } + ; + +logical_xor_expression + : logical_and_expression { $$ = $1; } + | logical_xor_expression XOR_OP logical_and_expression { + $$ = parseContext.handleBinaryMath($2.loc, "^^", EOpLogicalXor, $1, $3); + if ($$ == 0) + $$ = parseContext.intermediate.addConstantUnion(false, $2.loc); + } + ; + +logical_or_expression + : logical_xor_expression { $$ = $1; } + | logical_or_expression OR_OP logical_xor_expression { + $$ = parseContext.handleBinaryMath($2.loc, "||", EOpLogicalOr, $1, $3); + if ($$ == 0) + $$ = parseContext.intermediate.addConstantUnion(false, $2.loc); + } + ; + +conditional_expression + : logical_or_expression { $$ = $1; } + | logical_or_expression QUESTION { + ++parseContext.controlFlowNestingLevel; + } + expression COLON assignment_expression { + --parseContext.controlFlowNestingLevel; + parseContext.boolCheck($2.loc, $1); + parseContext.rValueErrorCheck($2.loc, "?", $1); + parseContext.rValueErrorCheck($5.loc, ":", $4); + parseContext.rValueErrorCheck($5.loc, ":", $6); + $$ = parseContext.intermediate.addSelection($1, $4, $6, $2.loc); + if ($$ == 0) { + parseContext.binaryOpError($2.loc, ":", $4->getCompleteString(), $6->getCompleteString()); + $$ = $6; + } + } + ; + +assignment_expression + : conditional_expression { $$ = $1; } + | unary_expression assignment_operator assignment_expression { + parseContext.arrayObjectCheck($2.loc, $1->getType(), "array assignment"); + parseContext.opaqueCheck($2.loc, $1->getType(), "="); + parseContext.storage16BitAssignmentCheck($2.loc, $1->getType(), "="); + parseContext.specializationCheck($2.loc, $1->getType(), "="); + parseContext.lValueErrorCheck($2.loc, "assign", $1); + parseContext.rValueErrorCheck($2.loc, "assign", $3); + $$ = parseContext.intermediate.addAssign($2.op, $1, $3, $2.loc); + if ($$ == 0) { + parseContext.assignError($2.loc, "assign", $1->getCompleteString(), $3->getCompleteString()); + $$ = $1; + } + } + ; + +assignment_operator + : EQUAL { + $$.loc = $1.loc; + $$.op = EOpAssign; + } + | MUL_ASSIGN { + $$.loc = $1.loc; + $$.op = EOpMulAssign; + } + | DIV_ASSIGN { + $$.loc = $1.loc; + $$.op = EOpDivAssign; + } + | MOD_ASSIGN { + parseContext.fullIntegerCheck($1.loc, "%="); + $$.loc = $1.loc; + $$.op = EOpModAssign; + } + | ADD_ASSIGN { + $$.loc = $1.loc; + $$.op = EOpAddAssign; + } + | SUB_ASSIGN { + $$.loc = $1.loc; + $$.op = EOpSubAssign; + } + | LEFT_ASSIGN { + parseContext.fullIntegerCheck($1.loc, "bit-shift left assign"); + $$.loc = $1.loc; $$.op = EOpLeftShiftAssign; + } + | RIGHT_ASSIGN { + parseContext.fullIntegerCheck($1.loc, "bit-shift right assign"); + $$.loc = $1.loc; $$.op = EOpRightShiftAssign; + } + | AND_ASSIGN { + parseContext.fullIntegerCheck($1.loc, "bitwise-and assign"); + $$.loc = $1.loc; $$.op = EOpAndAssign; + } + | XOR_ASSIGN { + parseContext.fullIntegerCheck($1.loc, "bitwise-xor assign"); + $$.loc = $1.loc; $$.op = EOpExclusiveOrAssign; + } + | OR_ASSIGN { + parseContext.fullIntegerCheck($1.loc, "bitwise-or assign"); + $$.loc = $1.loc; $$.op = EOpInclusiveOrAssign; + } + ; + +expression + : assignment_expression { + $$ = $1; + } + | expression COMMA assignment_expression { + parseContext.samplerConstructorLocationCheck($2.loc, ",", $3); + $$ = parseContext.intermediate.addComma($1, $3, $2.loc); + if ($$ == 0) { + parseContext.binaryOpError($2.loc, ",", $1->getCompleteString(), $3->getCompleteString()); + $$ = $3; + } + } + ; + +constant_expression + : conditional_expression { + parseContext.constantValueCheck($1, ""); + $$ = $1; + } + ; + +declaration + : function_prototype SEMICOLON { + parseContext.handleFunctionDeclarator($1.loc, *$1.function, true /* prototype */); + $$ = 0; + // TODO: 4.0 functionality: subroutines: make the identifier a user type for this signature + } + | init_declarator_list SEMICOLON { + if ($1.intermNode && $1.intermNode->getAsAggregate()) + $1.intermNode->getAsAggregate()->setOperator(EOpSequence); + $$ = $1.intermNode; + } + | PRECISION precision_qualifier type_specifier SEMICOLON { + parseContext.profileRequires($1.loc, ENoProfile, 130, 0, "precision statement"); + // lazy setting of the previous scope's defaults, has effect only the first time it is called in a particular scope + parseContext.symbolTable.setPreviousDefaultPrecisions(&parseContext.defaultPrecision[0]); + parseContext.setDefaultPrecision($1.loc, $3, $2.qualifier.precision); + $$ = 0; + } + | block_structure SEMICOLON { + parseContext.declareBlock($1.loc, *$1.typeList); + $$ = 0; + } + | block_structure IDENTIFIER SEMICOLON { + parseContext.declareBlock($1.loc, *$1.typeList, $2.string); + $$ = 0; + } + | block_structure IDENTIFIER array_specifier SEMICOLON { + parseContext.declareBlock($1.loc, *$1.typeList, $2.string, $3.arraySizes); + $$ = 0; + } + | type_qualifier SEMICOLON { + parseContext.globalQualifierFixCheck($1.loc, $1.qualifier); + parseContext.updateStandaloneQualifierDefaults($1.loc, $1); + $$ = 0; + } + | type_qualifier IDENTIFIER SEMICOLON { + parseContext.checkNoShaderLayouts($1.loc, $1.shaderQualifiers); + parseContext.addQualifierToExisting($1.loc, $1.qualifier, *$2.string); + $$ = 0; + } + | type_qualifier IDENTIFIER identifier_list SEMICOLON { + parseContext.checkNoShaderLayouts($1.loc, $1.shaderQualifiers); + $3->push_back($2.string); + parseContext.addQualifierToExisting($1.loc, $1.qualifier, *$3); + $$ = 0; + } + ; + +block_structure + : type_qualifier IDENTIFIER LEFT_BRACE { parseContext.nestedBlockCheck($1.loc); } struct_declaration_list RIGHT_BRACE { + --parseContext.structNestingLevel; + parseContext.blockName = $2.string; + parseContext.globalQualifierFixCheck($1.loc, $1.qualifier); + parseContext.checkNoShaderLayouts($1.loc, $1.shaderQualifiers); + parseContext.currentBlockQualifier = $1.qualifier; + $$.loc = $1.loc; + $$.typeList = $5; + } + +identifier_list + : COMMA IDENTIFIER { + $$ = new TIdentifierList; + $$->push_back($2.string); + } + | identifier_list COMMA IDENTIFIER { + $$ = $1; + $$->push_back($3.string); + } + ; + +function_prototype + : function_declarator RIGHT_PAREN { + $$.function = $1; + $$.loc = $2.loc; + } + ; + +function_declarator + : function_header { + $$ = $1; + } + | function_header_with_parameters { + $$ = $1; + } + ; + + +function_header_with_parameters + : function_header parameter_declaration { + // Add the parameter + $$ = $1; + if ($2.param.type->getBasicType() != EbtVoid) + $1->addParameter($2.param); + else + delete $2.param.type; + } + | function_header_with_parameters COMMA parameter_declaration { + // + // Only first parameter of one-parameter functions can be void + // The check for named parameters not being void is done in parameter_declarator + // + if ($3.param.type->getBasicType() == EbtVoid) { + // + // This parameter > first is void + // + parseContext.error($2.loc, "cannot be an argument type except for '(void)'", "void", ""); + delete $3.param.type; + } else { + // Add the parameter + $$ = $1; + $1->addParameter($3.param); + } + } + ; + +function_header + : fully_specified_type IDENTIFIER LEFT_PAREN { + if ($1.qualifier.storage != EvqGlobal && $1.qualifier.storage != EvqTemporary) { + parseContext.error($2.loc, "no qualifiers allowed for function return", + GetStorageQualifierString($1.qualifier.storage), ""); + } + if ($1.arraySizes) + parseContext.arraySizeRequiredCheck($1.loc, *$1.arraySizes); + + // Add the function as a prototype after parsing it (we do not support recursion) + TFunction *function; + TType type($1); + + // Potentially rename shader entry point function. No-op most of the time. + parseContext.renameShaderFunction($2.string); + + // Make the function + function = new TFunction($2.string, type); + $$ = function; + } + ; + +parameter_declarator + // Type + name + : type_specifier IDENTIFIER { + if ($1.arraySizes) { + parseContext.profileRequires($1.loc, ENoProfile, 120, E_GL_3DL_array_objects, "arrayed type"); + parseContext.profileRequires($1.loc, EEsProfile, 300, 0, "arrayed type"); + parseContext.arraySizeRequiredCheck($1.loc, *$1.arraySizes); + } + if ($1.basicType == EbtVoid) { + parseContext.error($2.loc, "illegal use of type 'void'", $2.string->c_str(), ""); + } + parseContext.reservedErrorCheck($2.loc, *$2.string); + + TParameter param = {$2.string, new TType($1)}; + $$.loc = $2.loc; + $$.param = param; + } + | type_specifier IDENTIFIER array_specifier { + if ($1.arraySizes) { + parseContext.profileRequires($1.loc, ENoProfile, 120, E_GL_3DL_array_objects, "arrayed type"); + parseContext.profileRequires($1.loc, EEsProfile, 300, 0, "arrayed type"); + parseContext.arraySizeRequiredCheck($1.loc, *$1.arraySizes); + } + TType* type = new TType($1); + type->transferArraySizes($3.arraySizes); + type->copyArrayInnerSizes($1.arraySizes); + + parseContext.arrayOfArrayVersionCheck($2.loc, type->getArraySizes()); + parseContext.arraySizeRequiredCheck($3.loc, *$3.arraySizes); + parseContext.reservedErrorCheck($2.loc, *$2.string); + + TParameter param = { $2.string, type }; + + $$.loc = $2.loc; + $$.param = param; + } + ; + +parameter_declaration + // + // With name + // + : type_qualifier parameter_declarator { + $$ = $2; + if ($1.qualifier.precision != EpqNone) + $$.param.type->getQualifier().precision = $1.qualifier.precision; + parseContext.precisionQualifierCheck($$.loc, $$.param.type->getBasicType(), $$.param.type->getQualifier()); + + parseContext.checkNoShaderLayouts($1.loc, $1.shaderQualifiers); + parseContext.parameterTypeCheck($2.loc, $1.qualifier.storage, *$$.param.type); + parseContext.paramCheckFix($1.loc, $1.qualifier, *$$.param.type); + + } + | parameter_declarator { + $$ = $1; + + parseContext.parameterTypeCheck($1.loc, EvqIn, *$1.param.type); + parseContext.paramCheckFixStorage($1.loc, EvqTemporary, *$$.param.type); + parseContext.precisionQualifierCheck($$.loc, $$.param.type->getBasicType(), $$.param.type->getQualifier()); + } + // + // Without name + // + | type_qualifier parameter_type_specifier { + $$ = $2; + if ($1.qualifier.precision != EpqNone) + $$.param.type->getQualifier().precision = $1.qualifier.precision; + parseContext.precisionQualifierCheck($1.loc, $$.param.type->getBasicType(), $$.param.type->getQualifier()); + + parseContext.checkNoShaderLayouts($1.loc, $1.shaderQualifiers); + parseContext.parameterTypeCheck($2.loc, $1.qualifier.storage, *$$.param.type); + parseContext.paramCheckFix($1.loc, $1.qualifier, *$$.param.type); + } + | parameter_type_specifier { + $$ = $1; + + parseContext.parameterTypeCheck($1.loc, EvqIn, *$1.param.type); + parseContext.paramCheckFixStorage($1.loc, EvqTemporary, *$$.param.type); + parseContext.precisionQualifierCheck($$.loc, $$.param.type->getBasicType(), $$.param.type->getQualifier()); + } + ; + +parameter_type_specifier + : type_specifier { + TParameter param = { 0, new TType($1) }; + $$.param = param; + if ($1.arraySizes) + parseContext.arraySizeRequiredCheck($1.loc, *$1.arraySizes); + } + ; + +init_declarator_list + : single_declaration { + $$ = $1; + } + | init_declarator_list COMMA IDENTIFIER { + $$ = $1; + parseContext.declareVariable($3.loc, *$3.string, $1.type); + } + | init_declarator_list COMMA IDENTIFIER array_specifier { + $$ = $1; + parseContext.declareVariable($3.loc, *$3.string, $1.type, $4.arraySizes); + } + | init_declarator_list COMMA IDENTIFIER array_specifier EQUAL initializer { + $$.type = $1.type; + TIntermNode* initNode = parseContext.declareVariable($3.loc, *$3.string, $1.type, $4.arraySizes, $6); + $$.intermNode = parseContext.intermediate.growAggregate($1.intermNode, initNode, $5.loc); + } + | init_declarator_list COMMA IDENTIFIER EQUAL initializer { + $$.type = $1.type; + TIntermNode* initNode = parseContext.declareVariable($3.loc, *$3.string, $1.type, 0, $5); + $$.intermNode = parseContext.intermediate.growAggregate($1.intermNode, initNode, $4.loc); + } + ; + +single_declaration + : fully_specified_type { + $$.type = $1; + $$.intermNode = 0; +GLSLANG_WEB_EXCLUDE_ON + parseContext.declareTypeDefaults($$.loc, $$.type); +GLSLANG_WEB_EXCLUDE_OFF + } + | fully_specified_type IDENTIFIER { + $$.type = $1; + $$.intermNode = 0; + parseContext.declareVariable($2.loc, *$2.string, $1); + } + | fully_specified_type IDENTIFIER array_specifier { + $$.type = $1; + $$.intermNode = 0; + parseContext.declareVariable($2.loc, *$2.string, $1, $3.arraySizes); + } + | fully_specified_type IDENTIFIER array_specifier EQUAL initializer { + $$.type = $1; + TIntermNode* initNode = parseContext.declareVariable($2.loc, *$2.string, $1, $3.arraySizes, $5); + $$.intermNode = parseContext.intermediate.growAggregate(0, initNode, $4.loc); + } + | fully_specified_type IDENTIFIER EQUAL initializer { + $$.type = $1; + TIntermNode* initNode = parseContext.declareVariable($2.loc, *$2.string, $1, 0, $4); + $$.intermNode = parseContext.intermediate.growAggregate(0, initNode, $3.loc); + } + +// Grammar Note: No 'enum', or 'typedef'. + +fully_specified_type + : type_specifier { + $$ = $1; + + parseContext.globalQualifierTypeCheck($1.loc, $1.qualifier, $$); + if ($1.arraySizes) { + parseContext.profileRequires($1.loc, ENoProfile, 120, E_GL_3DL_array_objects, "arrayed type"); + parseContext.profileRequires($1.loc, EEsProfile, 300, 0, "arrayed type"); + } + parseContext.precisionQualifierCheck($$.loc, $$.basicType, $$.qualifier); + } + | type_qualifier type_specifier { + parseContext.globalQualifierFixCheck($1.loc, $1.qualifier); + parseContext.globalQualifierTypeCheck($1.loc, $1.qualifier, $2); + + if ($2.arraySizes) { + parseContext.profileRequires($2.loc, ENoProfile, 120, E_GL_3DL_array_objects, "arrayed type"); + parseContext.profileRequires($2.loc, EEsProfile, 300, 0, "arrayed type"); + } + + if ($2.arraySizes && parseContext.arrayQualifierError($2.loc, $1.qualifier)) + $2.arraySizes = nullptr; + + parseContext.checkNoShaderLayouts($2.loc, $1.shaderQualifiers); + $2.shaderQualifiers.merge($1.shaderQualifiers); + parseContext.mergeQualifiers($2.loc, $2.qualifier, $1.qualifier, true); + parseContext.precisionQualifierCheck($2.loc, $2.basicType, $2.qualifier); + + $$ = $2; + + if (! $$.qualifier.isInterpolation() && + ((parseContext.language == EShLangVertex && $$.qualifier.storage == EvqVaryingOut) || + (parseContext.language == EShLangFragment && $$.qualifier.storage == EvqVaryingIn))) + $$.qualifier.smooth = true; + } + ; + +invariant_qualifier + : INVARIANT { + parseContext.globalCheck($1.loc, "invariant"); + parseContext.profileRequires($$.loc, ENoProfile, 120, 0, "invariant"); + $$.init($1.loc); + $$.qualifier.invariant = true; + } + ; + +interpolation_qualifier + : SMOOTH { + parseContext.globalCheck($1.loc, "smooth"); + parseContext.profileRequires($1.loc, ENoProfile, 130, 0, "smooth"); + parseContext.profileRequires($1.loc, EEsProfile, 300, 0, "smooth"); + $$.init($1.loc); + $$.qualifier.smooth = true; + } + | FLAT { + parseContext.globalCheck($1.loc, "flat"); + parseContext.profileRequires($1.loc, ENoProfile, 130, 0, "flat"); + parseContext.profileRequires($1.loc, EEsProfile, 300, 0, "flat"); + $$.init($1.loc); + $$.qualifier.flat = true; + } +GLSLANG_WEB_EXCLUDE_ON + | NOPERSPECTIVE { + parseContext.globalCheck($1.loc, "noperspective"); + parseContext.profileRequires($1.loc, EEsProfile, 0, E_GL_NV_shader_noperspective_interpolation, "noperspective"); + parseContext.profileRequires($1.loc, ENoProfile, 130, 0, "noperspective"); + $$.init($1.loc); + $$.qualifier.nopersp = true; + } + | EXPLICITINTERPAMD { + parseContext.globalCheck($1.loc, "__explicitInterpAMD"); + parseContext.profileRequires($1.loc, ECoreProfile, 450, E_GL_AMD_shader_explicit_vertex_parameter, "explicit interpolation"); + parseContext.profileRequires($1.loc, ECompatibilityProfile, 450, E_GL_AMD_shader_explicit_vertex_parameter, "explicit interpolation"); + $$.init($1.loc); + $$.qualifier.explicitInterp = true; + } + | PERVERTEXNV { + parseContext.globalCheck($1.loc, "pervertexNV"); + parseContext.profileRequires($1.loc, ECoreProfile, 0, E_GL_NV_fragment_shader_barycentric, "fragment shader barycentric"); + parseContext.profileRequires($1.loc, ECompatibilityProfile, 0, E_GL_NV_fragment_shader_barycentric, "fragment shader barycentric"); + parseContext.profileRequires($1.loc, EEsProfile, 0, E_GL_NV_fragment_shader_barycentric, "fragment shader barycentric"); + $$.init($1.loc); + $$.qualifier.pervertexNV = true; + } + | PERPRIMITIVENV { + // No need for profile version or extension check. Shader stage already checks both. + parseContext.globalCheck($1.loc, "perprimitiveNV"); + parseContext.requireStage($1.loc, (EShLanguageMask)(EShLangFragmentMask | EShLangMeshNVMask), "perprimitiveNV"); + // Fragment shader stage doesn't check for extension. So we explicitly add below extension check. + if (parseContext.language == EShLangFragment) + parseContext.requireExtensions($1.loc, 1, &E_GL_NV_mesh_shader, "perprimitiveNV"); + $$.init($1.loc); + $$.qualifier.perPrimitiveNV = true; + } + | PERVIEWNV { + // No need for profile version or extension check. Shader stage already checks both. + parseContext.globalCheck($1.loc, "perviewNV"); + parseContext.requireStage($1.loc, EShLangMeshNV, "perviewNV"); + $$.init($1.loc); + $$.qualifier.perViewNV = true; + } + | PERTASKNV { + // No need for profile version or extension check. Shader stage already checks both. + parseContext.globalCheck($1.loc, "taskNV"); + parseContext.requireStage($1.loc, (EShLanguageMask)(EShLangTaskNVMask | EShLangMeshNVMask), "taskNV"); + $$.init($1.loc); + $$.qualifier.perTaskNV = true; + } +GLSLANG_WEB_EXCLUDE_OFF + ; + +layout_qualifier + : LAYOUT LEFT_PAREN layout_qualifier_id_list RIGHT_PAREN { + $$ = $3; + } + ; + +layout_qualifier_id_list + : layout_qualifier_id { + $$ = $1; + } + | layout_qualifier_id_list COMMA layout_qualifier_id { + $$ = $1; + $$.shaderQualifiers.merge($3.shaderQualifiers); + parseContext.mergeObjectLayoutQualifiers($$.qualifier, $3.qualifier, false); + } + +layout_qualifier_id + : IDENTIFIER { + $$.init($1.loc); + parseContext.setLayoutQualifier($1.loc, $$, *$1.string); + } + | IDENTIFIER EQUAL constant_expression { + $$.init($1.loc); + parseContext.setLayoutQualifier($1.loc, $$, *$1.string, $3); + } + | SHARED { // because "shared" is both an identifier and a keyword + $$.init($1.loc); + TString strShared("shared"); + parseContext.setLayoutQualifier($1.loc, $$, strShared); + } + ; + +GLSLANG_WEB_EXCLUDE_ON +precise_qualifier + : PRECISE { + parseContext.profileRequires($$.loc, ECoreProfile | ECompatibilityProfile, 400, E_GL_ARB_gpu_shader5, "precise"); + parseContext.profileRequires($1.loc, EEsProfile, 320, Num_AEP_gpu_shader5, AEP_gpu_shader5, "precise"); + $$.init($1.loc); + $$.qualifier.noContraction = true; + } + ; +GLSLANG_WEB_EXCLUDE_OFF + +type_qualifier + : single_type_qualifier { + $$ = $1; + } + | type_qualifier single_type_qualifier { + $$ = $1; + if ($$.basicType == EbtVoid) + $$.basicType = $2.basicType; + + $$.shaderQualifiers.merge($2.shaderQualifiers); + parseContext.mergeQualifiers($$.loc, $$.qualifier, $2.qualifier, false); + } + ; + +single_type_qualifier + : storage_qualifier { + $$ = $1; + } + | layout_qualifier { + $$ = $1; + } + | precision_qualifier { + parseContext.checkPrecisionQualifier($1.loc, $1.qualifier.precision); + $$ = $1; + } + | interpolation_qualifier { + // allow inheritance of storage qualifier from block declaration + $$ = $1; + } + | invariant_qualifier { + // allow inheritance of storage qualifier from block declaration + $$ = $1; + } +GLSLANG_WEB_EXCLUDE_ON + | precise_qualifier { + // allow inheritance of storage qualifier from block declaration + $$ = $1; + } + | non_uniform_qualifier { + $$ = $1; + } +GLSLANG_WEB_EXCLUDE_OFF + ; + +storage_qualifier + : CONST { + $$.init($1.loc); + $$.qualifier.storage = EvqConst; // will later turn into EvqConstReadOnly, if the initializer is not constant + } + | INOUT { + parseContext.globalCheck($1.loc, "inout"); + $$.init($1.loc); + $$.qualifier.storage = EvqInOut; + } + | IN { + parseContext.globalCheck($1.loc, "in"); + $$.init($1.loc); + // whether this is a parameter "in" or a pipeline "in" will get sorted out a bit later + $$.qualifier.storage = EvqIn; + } + | OUT { + parseContext.globalCheck($1.loc, "out"); + $$.init($1.loc); + // whether this is a parameter "out" or a pipeline "out" will get sorted out a bit later + $$.qualifier.storage = EvqOut; + } + | CENTROID { + parseContext.profileRequires($1.loc, ENoProfile, 120, 0, "centroid"); + parseContext.profileRequires($1.loc, EEsProfile, 300, 0, "centroid"); + parseContext.globalCheck($1.loc, "centroid"); + $$.init($1.loc); + $$.qualifier.centroid = true; + } + | UNIFORM { + parseContext.globalCheck($1.loc, "uniform"); + $$.init($1.loc); + $$.qualifier.storage = EvqUniform; + } + | SHARED { + parseContext.globalCheck($1.loc, "shared"); + parseContext.profileRequires($1.loc, ECoreProfile | ECompatibilityProfile, 430, E_GL_ARB_compute_shader, "shared"); + parseContext.profileRequires($1.loc, EEsProfile, 310, 0, "shared"); + parseContext.requireStage($1.loc, (EShLanguageMask)(EShLangComputeMask | EShLangMeshNVMask | EShLangTaskNVMask), "shared"); + $$.init($1.loc); + $$.qualifier.storage = EvqShared; + } + | BUFFER { + parseContext.globalCheck($1.loc, "buffer"); + $$.init($1.loc); + $$.qualifier.storage = EvqBuffer; + } +GLSLANG_WEB_EXCLUDE_ON + | ATTRIBUTE { + parseContext.requireStage($1.loc, EShLangVertex, "attribute"); + parseContext.checkDeprecated($1.loc, ECoreProfile, 130, "attribute"); + parseContext.checkDeprecated($1.loc, ENoProfile, 130, "attribute"); + parseContext.requireNotRemoved($1.loc, ECoreProfile, 420, "attribute"); + parseContext.requireNotRemoved($1.loc, EEsProfile, 300, "attribute"); + + parseContext.globalCheck($1.loc, "attribute"); + + $$.init($1.loc); + $$.qualifier.storage = EvqVaryingIn; + } + | VARYING { + parseContext.checkDeprecated($1.loc, ENoProfile, 130, "varying"); + parseContext.checkDeprecated($1.loc, ECoreProfile, 130, "varying"); + parseContext.requireNotRemoved($1.loc, ECoreProfile, 420, "varying"); + parseContext.requireNotRemoved($1.loc, EEsProfile, 300, "varying"); + + parseContext.globalCheck($1.loc, "varying"); + + $$.init($1.loc); + if (parseContext.language == EShLangVertex) + $$.qualifier.storage = EvqVaryingOut; + else + $$.qualifier.storage = EvqVaryingIn; + } + | PATCH { + parseContext.globalCheck($1.loc, "patch"); + parseContext.requireStage($1.loc, (EShLanguageMask)(EShLangTessControlMask | EShLangTessEvaluationMask), "patch"); + $$.init($1.loc); + $$.qualifier.patch = true; + } + | SAMPLE { + parseContext.globalCheck($1.loc, "sample"); + $$.init($1.loc); + $$.qualifier.sample = true; + } + | HITATTRNV { + parseContext.globalCheck($1.loc, "hitAttributeNV"); + parseContext.requireStage($1.loc, (EShLanguageMask)(EShLangIntersectMask | EShLangClosestHitMask + | EShLangAnyHitMask), "hitAttributeNV"); + parseContext.profileRequires($1.loc, ECoreProfile, 460, E_GL_NV_ray_tracing, "hitAttributeNV"); + $$.init($1.loc); + $$.qualifier.storage = EvqHitAttr; + } + | HITATTREXT { + parseContext.globalCheck($1.loc, "hitAttributeEXT"); + parseContext.requireStage($1.loc, (EShLanguageMask)(EShLangIntersectMask | EShLangClosestHitMask + | EShLangAnyHitMask), "hitAttributeEXT"); + parseContext.profileRequires($1.loc, ECoreProfile, 460, E_GL_EXT_ray_tracing, "hitAttributeNV"); + $$.init($1.loc); + $$.qualifier.storage = EvqHitAttr; + } + | PAYLOADNV { + parseContext.globalCheck($1.loc, "rayPayloadNV"); + parseContext.requireStage($1.loc, (EShLanguageMask)(EShLangRayGenMask | EShLangClosestHitMask | + EShLangAnyHitMask | EShLangMissMask), "rayPayloadNV"); + parseContext.profileRequires($1.loc, ECoreProfile, 460, E_GL_NV_ray_tracing, "rayPayloadNV"); + $$.init($1.loc); + $$.qualifier.storage = EvqPayload; + } + | PAYLOADEXT { + parseContext.globalCheck($1.loc, "rayPayloadEXT"); + parseContext.requireStage($1.loc, (EShLanguageMask)(EShLangRayGenMask | EShLangClosestHitMask | + EShLangAnyHitMask | EShLangMissMask), "rayPayloadEXT"); + parseContext.profileRequires($1.loc, ECoreProfile, 460, E_GL_EXT_ray_tracing, "rayPayloadEXT"); + $$.init($1.loc); + $$.qualifier.storage = EvqPayload; + } + | PAYLOADINNV { + parseContext.globalCheck($1.loc, "rayPayloadInNV"); + parseContext.requireStage($1.loc, (EShLanguageMask)(EShLangClosestHitMask | + EShLangAnyHitMask | EShLangMissMask), "rayPayloadInNV"); + parseContext.profileRequires($1.loc, ECoreProfile, 460, E_GL_NV_ray_tracing, "rayPayloadInNV"); + $$.init($1.loc); + $$.qualifier.storage = EvqPayloadIn; + } + | PAYLOADINEXT { + parseContext.globalCheck($1.loc, "rayPayloadInEXT"); + parseContext.requireStage($1.loc, (EShLanguageMask)(EShLangClosestHitMask | + EShLangAnyHitMask | EShLangMissMask), "rayPayloadInEXT"); + parseContext.profileRequires($1.loc, ECoreProfile, 460, E_GL_EXT_ray_tracing, "rayPayloadInEXT"); + $$.init($1.loc); + $$.qualifier.storage = EvqPayloadIn; + } + | CALLDATANV { + parseContext.globalCheck($1.loc, "callableDataNV"); + parseContext.requireStage($1.loc, (EShLanguageMask)(EShLangRayGenMask | + EShLangClosestHitMask | EShLangMissMask | EShLangCallableMask), "callableDataNV"); + parseContext.profileRequires($1.loc, ECoreProfile, 460, E_GL_NV_ray_tracing, "callableDataNV"); + $$.init($1.loc); + $$.qualifier.storage = EvqCallableData; + } + | CALLDATAEXT { + parseContext.globalCheck($1.loc, "callableDataEXT"); + parseContext.requireStage($1.loc, (EShLanguageMask)(EShLangRayGenMask | + EShLangClosestHitMask | EShLangMissMask | EShLangCallableMask), "callableDataEXT"); + parseContext.profileRequires($1.loc, ECoreProfile, 460, E_GL_EXT_ray_tracing, "callableDataEXT"); + $$.init($1.loc); + $$.qualifier.storage = EvqCallableData; + } + | CALLDATAINNV { + parseContext.globalCheck($1.loc, "callableDataInNV"); + parseContext.requireStage($1.loc, (EShLanguageMask)(EShLangCallableMask), "callableDataInNV"); + parseContext.profileRequires($1.loc, ECoreProfile, 460, E_GL_NV_ray_tracing, "callableDataInNV"); + $$.init($1.loc); + $$.qualifier.storage = EvqCallableDataIn; + } + | CALLDATAINEXT { + parseContext.globalCheck($1.loc, "callableDataInEXT"); + parseContext.requireStage($1.loc, (EShLanguageMask)(EShLangCallableMask), "callableDataInEXT"); + parseContext.profileRequires($1.loc, ECoreProfile, 460, E_GL_EXT_ray_tracing, "callableDataInEXT"); + $$.init($1.loc); + $$.qualifier.storage = EvqCallableDataIn; + } + | COHERENT { + $$.init($1.loc); + $$.qualifier.coherent = true; + } + | DEVICECOHERENT { + $$.init($1.loc); + parseContext.requireExtensions($1.loc, 1, &E_GL_KHR_memory_scope_semantics, "devicecoherent"); + $$.qualifier.devicecoherent = true; + } + | QUEUEFAMILYCOHERENT { + $$.init($1.loc); + parseContext.requireExtensions($1.loc, 1, &E_GL_KHR_memory_scope_semantics, "queuefamilycoherent"); + $$.qualifier.queuefamilycoherent = true; + } + | WORKGROUPCOHERENT { + $$.init($1.loc); + parseContext.requireExtensions($1.loc, 1, &E_GL_KHR_memory_scope_semantics, "workgroupcoherent"); + $$.qualifier.workgroupcoherent = true; + } + | SUBGROUPCOHERENT { + $$.init($1.loc); + parseContext.requireExtensions($1.loc, 1, &E_GL_KHR_memory_scope_semantics, "subgroupcoherent"); + $$.qualifier.subgroupcoherent = true; + } + | NONPRIVATE { + $$.init($1.loc); + parseContext.requireExtensions($1.loc, 1, &E_GL_KHR_memory_scope_semantics, "nonprivate"); + $$.qualifier.nonprivate = true; + } + | SHADERCALLCOHERENT { + $$.init($1.loc); + parseContext.requireExtensions($1.loc, 1, &E_GL_EXT_ray_tracing, "shadercallcoherent"); + $$.qualifier.shadercallcoherent = true; + } + | VOLATILE { + $$.init($1.loc); + $$.qualifier.volatil = true; + } + | RESTRICT { + $$.init($1.loc); + $$.qualifier.restrict = true; + } + | READONLY { + $$.init($1.loc); + $$.qualifier.readonly = true; + } + | WRITEONLY { + $$.init($1.loc); + $$.qualifier.writeonly = true; + } + | SUBROUTINE { + parseContext.spvRemoved($1.loc, "subroutine"); + parseContext.globalCheck($1.loc, "subroutine"); + parseContext.unimplemented($1.loc, "subroutine"); + $$.init($1.loc); + } + | SUBROUTINE LEFT_PAREN type_name_list RIGHT_PAREN { + parseContext.spvRemoved($1.loc, "subroutine"); + parseContext.globalCheck($1.loc, "subroutine"); + parseContext.unimplemented($1.loc, "subroutine"); + $$.init($1.loc); + } +GLSLANG_WEB_EXCLUDE_OFF + ; + +GLSLANG_WEB_EXCLUDE_ON +non_uniform_qualifier + : NONUNIFORM { + $$.init($1.loc); + $$.qualifier.nonUniform = true; + } + ; + +type_name_list + : IDENTIFIER { + // TODO + } + | type_name_list COMMA IDENTIFIER { + // TODO: 4.0 semantics: subroutines + // 1) make sure each identifier is a type declared earlier with SUBROUTINE + // 2) save all of the identifiers for future comparison with the declared function + } + ; +GLSLANG_WEB_EXCLUDE_OFF + +type_specifier + : type_specifier_nonarray type_parameter_specifier_opt { + $$ = $1; + $$.qualifier.precision = parseContext.getDefaultPrecision($$); + $$.typeParameters = $2; + } + | type_specifier_nonarray type_parameter_specifier_opt array_specifier { + parseContext.arrayOfArrayVersionCheck($3.loc, $3.arraySizes); + $$ = $1; + $$.qualifier.precision = parseContext.getDefaultPrecision($$); + $$.typeParameters = $2; + $$.arraySizes = $3.arraySizes; + } + ; + +array_specifier + : LEFT_BRACKET RIGHT_BRACKET { + $$.loc = $1.loc; + $$.arraySizes = new TArraySizes; + $$.arraySizes->addInnerSize(); + } + | LEFT_BRACKET conditional_expression RIGHT_BRACKET { + $$.loc = $1.loc; + $$.arraySizes = new TArraySizes; + + TArraySize size; + parseContext.arraySizeCheck($2->getLoc(), $2, size, "array size"); + $$.arraySizes->addInnerSize(size); + } + | array_specifier LEFT_BRACKET RIGHT_BRACKET { + $$ = $1; + $$.arraySizes->addInnerSize(); + } + | array_specifier LEFT_BRACKET conditional_expression RIGHT_BRACKET { + $$ = $1; + + TArraySize size; + parseContext.arraySizeCheck($3->getLoc(), $3, size, "array size"); + $$.arraySizes->addInnerSize(size); + } + ; + +type_parameter_specifier_opt + : type_parameter_specifier { + $$ = $1; + } + | /* May be null */ { + $$ = 0; + } + ; + +type_parameter_specifier + : LEFT_ANGLE type_parameter_specifier_list RIGHT_ANGLE { + $$ = $2; + } + ; + +type_parameter_specifier_list + : unary_expression { + $$ = new TArraySizes; + + TArraySize size; + parseContext.arraySizeCheck($1->getLoc(), $1, size, "type parameter"); + $$->addInnerSize(size); + } + | type_parameter_specifier_list COMMA unary_expression { + $$ = $1; + + TArraySize size; + parseContext.arraySizeCheck($3->getLoc(), $3, size, "type parameter"); + $$->addInnerSize(size); + } + ; + +type_specifier_nonarray + : VOID { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtVoid; + } + | FLOAT { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtFloat; + } + | INT { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtInt; + } + | UINT { + parseContext.fullIntegerCheck($1.loc, "unsigned integer"); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtUint; + } + | BOOL { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtBool; + } + | VEC2 { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtFloat; + $$.setVector(2); + } + | VEC3 { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtFloat; + $$.setVector(3); + } + | VEC4 { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtFloat; + $$.setVector(4); + } + | BVEC2 { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtBool; + $$.setVector(2); + } + | BVEC3 { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtBool; + $$.setVector(3); + } + | BVEC4 { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtBool; + $$.setVector(4); + } + | IVEC2 { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtInt; + $$.setVector(2); + } + | IVEC3 { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtInt; + $$.setVector(3); + } + | IVEC4 { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtInt; + $$.setVector(4); + } + | UVEC2 { + parseContext.fullIntegerCheck($1.loc, "unsigned integer vector"); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtUint; + $$.setVector(2); + } + | UVEC3 { + parseContext.fullIntegerCheck($1.loc, "unsigned integer vector"); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtUint; + $$.setVector(3); + } + | UVEC4 { + parseContext.fullIntegerCheck($1.loc, "unsigned integer vector"); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtUint; + $$.setVector(4); + } + | MAT2 { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtFloat; + $$.setMatrix(2, 2); + } + | MAT3 { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtFloat; + $$.setMatrix(3, 3); + } + | MAT4 { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtFloat; + $$.setMatrix(4, 4); + } + | MAT2X2 { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtFloat; + $$.setMatrix(2, 2); + } + | MAT2X3 { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtFloat; + $$.setMatrix(2, 3); + } + | MAT2X4 { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtFloat; + $$.setMatrix(2, 4); + } + | MAT3X2 { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtFloat; + $$.setMatrix(3, 2); + } + | MAT3X3 { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtFloat; + $$.setMatrix(3, 3); + } + | MAT3X4 { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtFloat; + $$.setMatrix(3, 4); + } + | MAT4X2 { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtFloat; + $$.setMatrix(4, 2); + } + | MAT4X3 { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtFloat; + $$.setMatrix(4, 3); + } + | MAT4X4 { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtFloat; + $$.setMatrix(4, 4); + } +GLSLANG_WEB_EXCLUDE_ON + | DOUBLE { + parseContext.requireProfile($1.loc, ECoreProfile | ECompatibilityProfile, "double"); + if (! parseContext.symbolTable.atBuiltInLevel()) + parseContext.doubleCheck($1.loc, "double"); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtDouble; + } + | FLOAT16_T { + parseContext.float16ScalarVectorCheck($1.loc, "float16_t", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtFloat16; + } + | FLOAT32_T { + parseContext.explicitFloat32Check($1.loc, "float32_t", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtFloat; + } + | FLOAT64_T { + parseContext.explicitFloat64Check($1.loc, "float64_t", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtDouble; + } + | INT8_T { + parseContext.int8ScalarVectorCheck($1.loc, "8-bit signed integer", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtInt8; + } + | UINT8_T { + parseContext.int8ScalarVectorCheck($1.loc, "8-bit unsigned integer", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtUint8; + } + | INT16_T { + parseContext.int16ScalarVectorCheck($1.loc, "16-bit signed integer", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtInt16; + } + | UINT16_T { + parseContext.int16ScalarVectorCheck($1.loc, "16-bit unsigned integer", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtUint16; + } + | INT32_T { + parseContext.explicitInt32Check($1.loc, "32-bit signed integer", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtInt; + } + | UINT32_T { + parseContext.explicitInt32Check($1.loc, "32-bit unsigned integer", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtUint; + } + | INT64_T { + parseContext.int64Check($1.loc, "64-bit integer", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtInt64; + } + | UINT64_T { + parseContext.int64Check($1.loc, "64-bit unsigned integer", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtUint64; + } + | DVEC2 { + parseContext.requireProfile($1.loc, ECoreProfile | ECompatibilityProfile, "double vector"); + if (! parseContext.symbolTable.atBuiltInLevel()) + parseContext.doubleCheck($1.loc, "double vector"); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtDouble; + $$.setVector(2); + } + | DVEC3 { + parseContext.requireProfile($1.loc, ECoreProfile | ECompatibilityProfile, "double vector"); + if (! parseContext.symbolTable.atBuiltInLevel()) + parseContext.doubleCheck($1.loc, "double vector"); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtDouble; + $$.setVector(3); + } + | DVEC4 { + parseContext.requireProfile($1.loc, ECoreProfile | ECompatibilityProfile, "double vector"); + if (! parseContext.symbolTable.atBuiltInLevel()) + parseContext.doubleCheck($1.loc, "double vector"); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtDouble; + $$.setVector(4); + } + | F16VEC2 { + parseContext.float16ScalarVectorCheck($1.loc, "half float vector", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtFloat16; + $$.setVector(2); + } + | F16VEC3 { + parseContext.float16ScalarVectorCheck($1.loc, "half float vector", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtFloat16; + $$.setVector(3); + } + | F16VEC4 { + parseContext.float16ScalarVectorCheck($1.loc, "half float vector", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtFloat16; + $$.setVector(4); + } + | F32VEC2 { + parseContext.explicitFloat32Check($1.loc, "float32_t vector", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtFloat; + $$.setVector(2); + } + | F32VEC3 { + parseContext.explicitFloat32Check($1.loc, "float32_t vector", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtFloat; + $$.setVector(3); + } + | F32VEC4 { + parseContext.explicitFloat32Check($1.loc, "float32_t vector", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtFloat; + $$.setVector(4); + } + | F64VEC2 { + parseContext.explicitFloat64Check($1.loc, "float64_t vector", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtDouble; + $$.setVector(2); + } + | F64VEC3 { + parseContext.explicitFloat64Check($1.loc, "float64_t vector", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtDouble; + $$.setVector(3); + } + | F64VEC4 { + parseContext.explicitFloat64Check($1.loc, "float64_t vector", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtDouble; + $$.setVector(4); + } + | I8VEC2 { + parseContext.int8ScalarVectorCheck($1.loc, "8-bit signed integer vector", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtInt8; + $$.setVector(2); + } + | I8VEC3 { + parseContext.int8ScalarVectorCheck($1.loc, "8-bit signed integer vector", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtInt8; + $$.setVector(3); + } + | I8VEC4 { + parseContext.int8ScalarVectorCheck($1.loc, "8-bit signed integer vector", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtInt8; + $$.setVector(4); + } + | I16VEC2 { + parseContext.int16ScalarVectorCheck($1.loc, "16-bit signed integer vector", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtInt16; + $$.setVector(2); + } + | I16VEC3 { + parseContext.int16ScalarVectorCheck($1.loc, "16-bit signed integer vector", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtInt16; + $$.setVector(3); + } + | I16VEC4 { + parseContext.int16ScalarVectorCheck($1.loc, "16-bit signed integer vector", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtInt16; + $$.setVector(4); + } + | I32VEC2 { + parseContext.explicitInt32Check($1.loc, "32-bit signed integer vector", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtInt; + $$.setVector(2); + } + | I32VEC3 { + parseContext.explicitInt32Check($1.loc, "32-bit signed integer vector", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtInt; + $$.setVector(3); + } + | I32VEC4 { + parseContext.explicitInt32Check($1.loc, "32-bit signed integer vector", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtInt; + $$.setVector(4); + } + | I64VEC2 { + parseContext.int64Check($1.loc, "64-bit integer vector", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtInt64; + $$.setVector(2); + } + | I64VEC3 { + parseContext.int64Check($1.loc, "64-bit integer vector", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtInt64; + $$.setVector(3); + } + | I64VEC4 { + parseContext.int64Check($1.loc, "64-bit integer vector", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtInt64; + $$.setVector(4); + } + | U8VEC2 { + parseContext.int8ScalarVectorCheck($1.loc, "8-bit unsigned integer vector", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtUint8; + $$.setVector(2); + } + | U8VEC3 { + parseContext.int8ScalarVectorCheck($1.loc, "8-bit unsigned integer vector", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtUint8; + $$.setVector(3); + } + | U8VEC4 { + parseContext.int8ScalarVectorCheck($1.loc, "8-bit unsigned integer vector", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtUint8; + $$.setVector(4); + } + | U16VEC2 { + parseContext.int16ScalarVectorCheck($1.loc, "16-bit unsigned integer vector", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtUint16; + $$.setVector(2); + } + | U16VEC3 { + parseContext.int16ScalarVectorCheck($1.loc, "16-bit unsigned integer vector", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtUint16; + $$.setVector(3); + } + | U16VEC4 { + parseContext.int16ScalarVectorCheck($1.loc, "16-bit unsigned integer vector", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtUint16; + $$.setVector(4); + } + | U32VEC2 { + parseContext.explicitInt32Check($1.loc, "32-bit unsigned integer vector", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtUint; + $$.setVector(2); + } + | U32VEC3 { + parseContext.explicitInt32Check($1.loc, "32-bit unsigned integer vector", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtUint; + $$.setVector(3); + } + | U32VEC4 { + parseContext.explicitInt32Check($1.loc, "32-bit unsigned integer vector", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtUint; + $$.setVector(4); + } + | U64VEC2 { + parseContext.int64Check($1.loc, "64-bit unsigned integer vector", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtUint64; + $$.setVector(2); + } + | U64VEC3 { + parseContext.int64Check($1.loc, "64-bit unsigned integer vector", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtUint64; + $$.setVector(3); + } + | U64VEC4 { + parseContext.int64Check($1.loc, "64-bit unsigned integer vector", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtUint64; + $$.setVector(4); + } + | DMAT2 { + parseContext.requireProfile($1.loc, ECoreProfile | ECompatibilityProfile, "double matrix"); + if (! parseContext.symbolTable.atBuiltInLevel()) + parseContext.doubleCheck($1.loc, "double matrix"); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtDouble; + $$.setMatrix(2, 2); + } + | DMAT3 { + parseContext.requireProfile($1.loc, ECoreProfile | ECompatibilityProfile, "double matrix"); + if (! parseContext.symbolTable.atBuiltInLevel()) + parseContext.doubleCheck($1.loc, "double matrix"); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtDouble; + $$.setMatrix(3, 3); + } + | DMAT4 { + parseContext.requireProfile($1.loc, ECoreProfile | ECompatibilityProfile, "double matrix"); + if (! parseContext.symbolTable.atBuiltInLevel()) + parseContext.doubleCheck($1.loc, "double matrix"); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtDouble; + $$.setMatrix(4, 4); + } + | DMAT2X2 { + parseContext.requireProfile($1.loc, ECoreProfile | ECompatibilityProfile, "double matrix"); + if (! parseContext.symbolTable.atBuiltInLevel()) + parseContext.doubleCheck($1.loc, "double matrix"); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtDouble; + $$.setMatrix(2, 2); + } + | DMAT2X3 { + parseContext.requireProfile($1.loc, ECoreProfile | ECompatibilityProfile, "double matrix"); + if (! parseContext.symbolTable.atBuiltInLevel()) + parseContext.doubleCheck($1.loc, "double matrix"); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtDouble; + $$.setMatrix(2, 3); + } + | DMAT2X4 { + parseContext.requireProfile($1.loc, ECoreProfile | ECompatibilityProfile, "double matrix"); + if (! parseContext.symbolTable.atBuiltInLevel()) + parseContext.doubleCheck($1.loc, "double matrix"); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtDouble; + $$.setMatrix(2, 4); + } + | DMAT3X2 { + parseContext.requireProfile($1.loc, ECoreProfile | ECompatibilityProfile, "double matrix"); + if (! parseContext.symbolTable.atBuiltInLevel()) + parseContext.doubleCheck($1.loc, "double matrix"); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtDouble; + $$.setMatrix(3, 2); + } + | DMAT3X3 { + parseContext.requireProfile($1.loc, ECoreProfile | ECompatibilityProfile, "double matrix"); + if (! parseContext.symbolTable.atBuiltInLevel()) + parseContext.doubleCheck($1.loc, "double matrix"); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtDouble; + $$.setMatrix(3, 3); + } + | DMAT3X4 { + parseContext.requireProfile($1.loc, ECoreProfile | ECompatibilityProfile, "double matrix"); + if (! parseContext.symbolTable.atBuiltInLevel()) + parseContext.doubleCheck($1.loc, "double matrix"); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtDouble; + $$.setMatrix(3, 4); + } + | DMAT4X2 { + parseContext.requireProfile($1.loc, ECoreProfile | ECompatibilityProfile, "double matrix"); + if (! parseContext.symbolTable.atBuiltInLevel()) + parseContext.doubleCheck($1.loc, "double matrix"); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtDouble; + $$.setMatrix(4, 2); + } + | DMAT4X3 { + parseContext.requireProfile($1.loc, ECoreProfile | ECompatibilityProfile, "double matrix"); + if (! parseContext.symbolTable.atBuiltInLevel()) + parseContext.doubleCheck($1.loc, "double matrix"); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtDouble; + $$.setMatrix(4, 3); + } + | DMAT4X4 { + parseContext.requireProfile($1.loc, ECoreProfile | ECompatibilityProfile, "double matrix"); + if (! parseContext.symbolTable.atBuiltInLevel()) + parseContext.doubleCheck($1.loc, "double matrix"); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtDouble; + $$.setMatrix(4, 4); + } + | F16MAT2 { + parseContext.float16Check($1.loc, "half float matrix", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtFloat16; + $$.setMatrix(2, 2); + } + | F16MAT3 { + parseContext.float16Check($1.loc, "half float matrix", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtFloat16; + $$.setMatrix(3, 3); + } + | F16MAT4 { + parseContext.float16Check($1.loc, "half float matrix", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtFloat16; + $$.setMatrix(4, 4); + } + | F16MAT2X2 { + parseContext.float16Check($1.loc, "half float matrix", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtFloat16; + $$.setMatrix(2, 2); + } + | F16MAT2X3 { + parseContext.float16Check($1.loc, "half float matrix", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtFloat16; + $$.setMatrix(2, 3); + } + | F16MAT2X4 { + parseContext.float16Check($1.loc, "half float matrix", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtFloat16; + $$.setMatrix(2, 4); + } + | F16MAT3X2 { + parseContext.float16Check($1.loc, "half float matrix", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtFloat16; + $$.setMatrix(3, 2); + } + | F16MAT3X3 { + parseContext.float16Check($1.loc, "half float matrix", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtFloat16; + $$.setMatrix(3, 3); + } + | F16MAT3X4 { + parseContext.float16Check($1.loc, "half float matrix", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtFloat16; + $$.setMatrix(3, 4); + } + | F16MAT4X2 { + parseContext.float16Check($1.loc, "half float matrix", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtFloat16; + $$.setMatrix(4, 2); + } + | F16MAT4X3 { + parseContext.float16Check($1.loc, "half float matrix", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtFloat16; + $$.setMatrix(4, 3); + } + | F16MAT4X4 { + parseContext.float16Check($1.loc, "half float matrix", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtFloat16; + $$.setMatrix(4, 4); + } + | F32MAT2 { + parseContext.explicitFloat32Check($1.loc, "float32_t matrix", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtFloat; + $$.setMatrix(2, 2); + } + | F32MAT3 { + parseContext.explicitFloat32Check($1.loc, "float32_t matrix", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtFloat; + $$.setMatrix(3, 3); + } + | F32MAT4 { + parseContext.explicitFloat32Check($1.loc, "float32_t matrix", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtFloat; + $$.setMatrix(4, 4); + } + | F32MAT2X2 { + parseContext.explicitFloat32Check($1.loc, "float32_t matrix", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtFloat; + $$.setMatrix(2, 2); + } + | F32MAT2X3 { + parseContext.explicitFloat32Check($1.loc, "float32_t matrix", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtFloat; + $$.setMatrix(2, 3); + } + | F32MAT2X4 { + parseContext.explicitFloat32Check($1.loc, "float32_t matrix", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtFloat; + $$.setMatrix(2, 4); + } + | F32MAT3X2 { + parseContext.explicitFloat32Check($1.loc, "float32_t matrix", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtFloat; + $$.setMatrix(3, 2); + } + | F32MAT3X3 { + parseContext.explicitFloat32Check($1.loc, "float32_t matrix", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtFloat; + $$.setMatrix(3, 3); + } + | F32MAT3X4 { + parseContext.explicitFloat32Check($1.loc, "float32_t matrix", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtFloat; + $$.setMatrix(3, 4); + } + | F32MAT4X2 { + parseContext.explicitFloat32Check($1.loc, "float32_t matrix", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtFloat; + $$.setMatrix(4, 2); + } + | F32MAT4X3 { + parseContext.explicitFloat32Check($1.loc, "float32_t matrix", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtFloat; + $$.setMatrix(4, 3); + } + | F32MAT4X4 { + parseContext.explicitFloat32Check($1.loc, "float32_t matrix", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtFloat; + $$.setMatrix(4, 4); + } + | F64MAT2 { + parseContext.explicitFloat64Check($1.loc, "float64_t matrix", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtDouble; + $$.setMatrix(2, 2); + } + | F64MAT3 { + parseContext.explicitFloat64Check($1.loc, "float64_t matrix", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtDouble; + $$.setMatrix(3, 3); + } + | F64MAT4 { + parseContext.explicitFloat64Check($1.loc, "float64_t matrix", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtDouble; + $$.setMatrix(4, 4); + } + | F64MAT2X2 { + parseContext.explicitFloat64Check($1.loc, "float64_t matrix", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtDouble; + $$.setMatrix(2, 2); + } + | F64MAT2X3 { + parseContext.explicitFloat64Check($1.loc, "float64_t matrix", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtDouble; + $$.setMatrix(2, 3); + } + | F64MAT2X4 { + parseContext.explicitFloat64Check($1.loc, "float64_t matrix", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtDouble; + $$.setMatrix(2, 4); + } + | F64MAT3X2 { + parseContext.explicitFloat64Check($1.loc, "float64_t matrix", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtDouble; + $$.setMatrix(3, 2); + } + | F64MAT3X3 { + parseContext.explicitFloat64Check($1.loc, "float64_t matrix", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtDouble; + $$.setMatrix(3, 3); + } + | F64MAT3X4 { + parseContext.explicitFloat64Check($1.loc, "float64_t matrix", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtDouble; + $$.setMatrix(3, 4); + } + | F64MAT4X2 { + parseContext.explicitFloat64Check($1.loc, "float64_t matrix", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtDouble; + $$.setMatrix(4, 2); + } + | F64MAT4X3 { + parseContext.explicitFloat64Check($1.loc, "float64_t matrix", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtDouble; + $$.setMatrix(4, 3); + } + | F64MAT4X4 { + parseContext.explicitFloat64Check($1.loc, "float64_t matrix", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtDouble; + $$.setMatrix(4, 4); + } + | ACCSTRUCTNV { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtAccStruct; + } + | ACCSTRUCTEXT { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtAccStruct; + } + | RAYQUERYEXT { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtRayQuery; + } + | ATOMIC_UINT { + parseContext.vulkanRemoved($1.loc, "atomic counter types"); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtAtomicUint; + } + | SAMPLER1D { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.set(EbtFloat, Esd1D); + } +GLSLANG_WEB_EXCLUDE_OFF + | SAMPLER2D { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.set(EbtFloat, Esd2D); + } + | SAMPLER3D { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.set(EbtFloat, Esd3D); + } + | SAMPLERCUBE { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.set(EbtFloat, EsdCube); + } + | SAMPLER2DSHADOW { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.set(EbtFloat, Esd2D, false, true); + } + | SAMPLERCUBESHADOW { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.set(EbtFloat, EsdCube, false, true); + } + | SAMPLER2DARRAY { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.set(EbtFloat, Esd2D, true); + } + | SAMPLER2DARRAYSHADOW { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.set(EbtFloat, Esd2D, true, true); + } +GLSLANG_WEB_EXCLUDE_ON + | SAMPLER1DSHADOW { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.set(EbtFloat, Esd1D, false, true); + } + | SAMPLER1DARRAY { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.set(EbtFloat, Esd1D, true); + } + | SAMPLER1DARRAYSHADOW { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.set(EbtFloat, Esd1D, true, true); + } + | SAMPLERCUBEARRAY { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.set(EbtFloat, EsdCube, true); + } + | SAMPLERCUBEARRAYSHADOW { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.set(EbtFloat, EsdCube, true, true); + } + | F16SAMPLER1D { + parseContext.float16OpaqueCheck($1.loc, "half float sampler", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.set(EbtFloat16, Esd1D); + } + | F16SAMPLER2D { + parseContext.float16OpaqueCheck($1.loc, "half float sampler", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.set(EbtFloat16, Esd2D); + } + | F16SAMPLER3D { + parseContext.float16OpaqueCheck($1.loc, "half float sampler", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.set(EbtFloat16, Esd3D); + } + | F16SAMPLERCUBE { + parseContext.float16OpaqueCheck($1.loc, "half float sampler", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.set(EbtFloat16, EsdCube); + } + | F16SAMPLER1DSHADOW { + parseContext.float16OpaqueCheck($1.loc, "half float sampler", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.set(EbtFloat16, Esd1D, false, true); + } + | F16SAMPLER2DSHADOW { + parseContext.float16OpaqueCheck($1.loc, "half float sampler", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.set(EbtFloat16, Esd2D, false, true); + } + | F16SAMPLERCUBESHADOW { + parseContext.float16OpaqueCheck($1.loc, "half float sampler", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.set(EbtFloat16, EsdCube, false, true); + } + | F16SAMPLER1DARRAY { + parseContext.float16OpaqueCheck($1.loc, "half float sampler", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.set(EbtFloat16, Esd1D, true); + } + | F16SAMPLER2DARRAY { + parseContext.float16OpaqueCheck($1.loc, "half float sampler", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.set(EbtFloat16, Esd2D, true); + } + | F16SAMPLER1DARRAYSHADOW { + parseContext.float16OpaqueCheck($1.loc, "half float sampler", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.set(EbtFloat16, Esd1D, true, true); + } + | F16SAMPLER2DARRAYSHADOW { + parseContext.float16OpaqueCheck($1.loc, "half float sampler", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.set(EbtFloat16, Esd2D, true, true); + } + | F16SAMPLERCUBEARRAY { + parseContext.float16OpaqueCheck($1.loc, "half float sampler", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.set(EbtFloat16, EsdCube, true); + } + | F16SAMPLERCUBEARRAYSHADOW { + parseContext.float16OpaqueCheck($1.loc, "half float sampler", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.set(EbtFloat16, EsdCube, true, true); + } + | ISAMPLER1D { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.set(EbtInt, Esd1D); + } +GLSLANG_WEB_EXCLUDE_OFF + | ISAMPLER2D { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.set(EbtInt, Esd2D); + } + | ISAMPLER3D { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.set(EbtInt, Esd3D); + } + | ISAMPLERCUBE { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.set(EbtInt, EsdCube); + } + | ISAMPLER2DARRAY { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.set(EbtInt, Esd2D, true); + } + | USAMPLER2D { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.set(EbtUint, Esd2D); + } + | USAMPLER3D { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.set(EbtUint, Esd3D); + } + | USAMPLERCUBE { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.set(EbtUint, EsdCube); + } +GLSLANG_WEB_EXCLUDE_ON + | ISAMPLER1DARRAY { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.set(EbtInt, Esd1D, true); + } + | ISAMPLERCUBEARRAY { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.set(EbtInt, EsdCube, true); + } + | USAMPLER1D { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.set(EbtUint, Esd1D); + } + | USAMPLER1DARRAY { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.set(EbtUint, Esd1D, true); + } + | USAMPLERCUBEARRAY { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.set(EbtUint, EsdCube, true); + } + | TEXTURECUBEARRAY { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setTexture(EbtFloat, EsdCube, true); + } + | ITEXTURECUBEARRAY { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setTexture(EbtInt, EsdCube, true); + } + | UTEXTURECUBEARRAY { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setTexture(EbtUint, EsdCube, true); + } +GLSLANG_WEB_EXCLUDE_OFF + | USAMPLER2DARRAY { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.set(EbtUint, Esd2D, true); + } + | TEXTURE2D { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setTexture(EbtFloat, Esd2D); + } + | TEXTURE3D { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setTexture(EbtFloat, Esd3D); + } + | TEXTURE2DARRAY { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setTexture(EbtFloat, Esd2D, true); + } + | TEXTURECUBE { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setTexture(EbtFloat, EsdCube); + } + | ITEXTURE2D { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setTexture(EbtInt, Esd2D); + } + | ITEXTURE3D { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setTexture(EbtInt, Esd3D); + } + | ITEXTURECUBE { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setTexture(EbtInt, EsdCube); + } + | ITEXTURE2DARRAY { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setTexture(EbtInt, Esd2D, true); + } + | UTEXTURE2D { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setTexture(EbtUint, Esd2D); + } + | UTEXTURE3D { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setTexture(EbtUint, Esd3D); + } + | UTEXTURECUBE { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setTexture(EbtUint, EsdCube); + } + | UTEXTURE2DARRAY { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setTexture(EbtUint, Esd2D, true); + } + | SAMPLER { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setPureSampler(false); + } + | SAMPLERSHADOW { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setPureSampler(true); + } +GLSLANG_WEB_EXCLUDE_ON + | SAMPLER2DRECT { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.set(EbtFloat, EsdRect); + } + | SAMPLER2DRECTSHADOW { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.set(EbtFloat, EsdRect, false, true); + } + | F16SAMPLER2DRECT { + parseContext.float16OpaqueCheck($1.loc, "half float sampler", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.set(EbtFloat16, EsdRect); + } + | F16SAMPLER2DRECTSHADOW { + parseContext.float16OpaqueCheck($1.loc, "half float sampler", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.set(EbtFloat16, EsdRect, false, true); + } + | ISAMPLER2DRECT { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.set(EbtInt, EsdRect); + } + | USAMPLER2DRECT { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.set(EbtUint, EsdRect); + } + | SAMPLERBUFFER { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.set(EbtFloat, EsdBuffer); + } + | F16SAMPLERBUFFER { + parseContext.float16OpaqueCheck($1.loc, "half float sampler", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.set(EbtFloat16, EsdBuffer); + } + | ISAMPLERBUFFER { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.set(EbtInt, EsdBuffer); + } + | USAMPLERBUFFER { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.set(EbtUint, EsdBuffer); + } + | SAMPLER2DMS { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.set(EbtFloat, Esd2D, false, false, true); + } + | F16SAMPLER2DMS { + parseContext.float16OpaqueCheck($1.loc, "half float sampler", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.set(EbtFloat16, Esd2D, false, false, true); + } + | ISAMPLER2DMS { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.set(EbtInt, Esd2D, false, false, true); + } + | USAMPLER2DMS { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.set(EbtUint, Esd2D, false, false, true); + } + | SAMPLER2DMSARRAY { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.set(EbtFloat, Esd2D, true, false, true); + } + | F16SAMPLER2DMSARRAY { + parseContext.float16OpaqueCheck($1.loc, "half float sampler", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.set(EbtFloat16, Esd2D, true, false, true); + } + | ISAMPLER2DMSARRAY { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.set(EbtInt, Esd2D, true, false, true); + } + | USAMPLER2DMSARRAY { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.set(EbtUint, Esd2D, true, false, true); + } + | TEXTURE1D { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setTexture(EbtFloat, Esd1D); + } + | F16TEXTURE1D { + parseContext.float16OpaqueCheck($1.loc, "half float texture", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setTexture(EbtFloat16, Esd1D); + } + | F16TEXTURE2D { + parseContext.float16OpaqueCheck($1.loc, "half float texture", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setTexture(EbtFloat16, Esd2D); + } + | F16TEXTURE3D { + parseContext.float16OpaqueCheck($1.loc, "half float texture", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setTexture(EbtFloat16, Esd3D); + } + | F16TEXTURECUBE { + parseContext.float16OpaqueCheck($1.loc, "half float texture", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setTexture(EbtFloat16, EsdCube); + } + | TEXTURE1DARRAY { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setTexture(EbtFloat, Esd1D, true); + } + | F16TEXTURE1DARRAY { + parseContext.float16OpaqueCheck($1.loc, "half float texture", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setTexture(EbtFloat16, Esd1D, true); + } + | F16TEXTURE2DARRAY { + parseContext.float16OpaqueCheck($1.loc, "half float texture", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setTexture(EbtFloat16, Esd2D, true); + } + | F16TEXTURECUBEARRAY { + parseContext.float16OpaqueCheck($1.loc, "half float texture", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setTexture(EbtFloat16, EsdCube, true); + } + | ITEXTURE1D { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setTexture(EbtInt, Esd1D); + } + | ITEXTURE1DARRAY { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setTexture(EbtInt, Esd1D, true); + } + | UTEXTURE1D { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setTexture(EbtUint, Esd1D); + } + | UTEXTURE1DARRAY { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setTexture(EbtUint, Esd1D, true); + } + | TEXTURE2DRECT { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setTexture(EbtFloat, EsdRect); + } + | F16TEXTURE2DRECT { + parseContext.float16OpaqueCheck($1.loc, "half float texture", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setTexture(EbtFloat16, EsdRect); + } + | ITEXTURE2DRECT { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setTexture(EbtInt, EsdRect); + } + | UTEXTURE2DRECT { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setTexture(EbtUint, EsdRect); + } + | TEXTUREBUFFER { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setTexture(EbtFloat, EsdBuffer); + } + | F16TEXTUREBUFFER { + parseContext.float16OpaqueCheck($1.loc, "half float texture", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setTexture(EbtFloat16, EsdBuffer); + } + | ITEXTUREBUFFER { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setTexture(EbtInt, EsdBuffer); + } + | UTEXTUREBUFFER { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setTexture(EbtUint, EsdBuffer); + } + | TEXTURE2DMS { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setTexture(EbtFloat, Esd2D, false, false, true); + } + | F16TEXTURE2DMS { + parseContext.float16OpaqueCheck($1.loc, "half float texture", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setTexture(EbtFloat16, Esd2D, false, false, true); + } + | ITEXTURE2DMS { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setTexture(EbtInt, Esd2D, false, false, true); + } + | UTEXTURE2DMS { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setTexture(EbtUint, Esd2D, false, false, true); + } + | TEXTURE2DMSARRAY { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setTexture(EbtFloat, Esd2D, true, false, true); + } + | F16TEXTURE2DMSARRAY { + parseContext.float16OpaqueCheck($1.loc, "half float texture", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setTexture(EbtFloat16, Esd2D, true, false, true); + } + | ITEXTURE2DMSARRAY { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setTexture(EbtInt, Esd2D, true, false, true); + } + | UTEXTURE2DMSARRAY { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setTexture(EbtUint, Esd2D, true, false, true); + } + | IMAGE1D { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setImage(EbtFloat, Esd1D); + } + | F16IMAGE1D { + parseContext.float16OpaqueCheck($1.loc, "half float image", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setImage(EbtFloat16, Esd1D); + } + | IIMAGE1D { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setImage(EbtInt, Esd1D); + } + | UIMAGE1D { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setImage(EbtUint, Esd1D); + } + | IMAGE2D { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setImage(EbtFloat, Esd2D); + } + | F16IMAGE2D { + parseContext.float16OpaqueCheck($1.loc, "half float image", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setImage(EbtFloat16, Esd2D); + } + | IIMAGE2D { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setImage(EbtInt, Esd2D); + } + | UIMAGE2D { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setImage(EbtUint, Esd2D); + } + | IMAGE3D { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setImage(EbtFloat, Esd3D); + } + | F16IMAGE3D { + parseContext.float16OpaqueCheck($1.loc, "half float image", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setImage(EbtFloat16, Esd3D); + } + | IIMAGE3D { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setImage(EbtInt, Esd3D); + } + | UIMAGE3D { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setImage(EbtUint, Esd3D); + } + | IMAGE2DRECT { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setImage(EbtFloat, EsdRect); + } + | F16IMAGE2DRECT { + parseContext.float16OpaqueCheck($1.loc, "half float image", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setImage(EbtFloat16, EsdRect); + } + | IIMAGE2DRECT { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setImage(EbtInt, EsdRect); + } + | UIMAGE2DRECT { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setImage(EbtUint, EsdRect); + } + | IMAGECUBE { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setImage(EbtFloat, EsdCube); + } + | F16IMAGECUBE { + parseContext.float16OpaqueCheck($1.loc, "half float image", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setImage(EbtFloat16, EsdCube); + } + | IIMAGECUBE { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setImage(EbtInt, EsdCube); + } + | UIMAGECUBE { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setImage(EbtUint, EsdCube); + } + | IMAGEBUFFER { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setImage(EbtFloat, EsdBuffer); + } + | F16IMAGEBUFFER { + parseContext.float16OpaqueCheck($1.loc, "half float image", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setImage(EbtFloat16, EsdBuffer); + } + | IIMAGEBUFFER { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setImage(EbtInt, EsdBuffer); + } + | UIMAGEBUFFER { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setImage(EbtUint, EsdBuffer); + } + | IMAGE1DARRAY { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setImage(EbtFloat, Esd1D, true); + } + | F16IMAGE1DARRAY { + parseContext.float16OpaqueCheck($1.loc, "half float image", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setImage(EbtFloat16, Esd1D, true); + } + | IIMAGE1DARRAY { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setImage(EbtInt, Esd1D, true); + } + | UIMAGE1DARRAY { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setImage(EbtUint, Esd1D, true); + } + | IMAGE2DARRAY { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setImage(EbtFloat, Esd2D, true); + } + | F16IMAGE2DARRAY { + parseContext.float16OpaqueCheck($1.loc, "half float image", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setImage(EbtFloat16, Esd2D, true); + } + | IIMAGE2DARRAY { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setImage(EbtInt, Esd2D, true); + } + | UIMAGE2DARRAY { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setImage(EbtUint, Esd2D, true); + } + | IMAGECUBEARRAY { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setImage(EbtFloat, EsdCube, true); + } + | F16IMAGECUBEARRAY { + parseContext.float16OpaqueCheck($1.loc, "half float image", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setImage(EbtFloat16, EsdCube, true); + } + | IIMAGECUBEARRAY { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setImage(EbtInt, EsdCube, true); + } + | UIMAGECUBEARRAY { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setImage(EbtUint, EsdCube, true); + } + | IMAGE2DMS { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setImage(EbtFloat, Esd2D, false, false, true); + } + | F16IMAGE2DMS { + parseContext.float16OpaqueCheck($1.loc, "half float image", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setImage(EbtFloat16, Esd2D, false, false, true); + } + | IIMAGE2DMS { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setImage(EbtInt, Esd2D, false, false, true); + } + | UIMAGE2DMS { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setImage(EbtUint, Esd2D, false, false, true); + } + | IMAGE2DMSARRAY { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setImage(EbtFloat, Esd2D, true, false, true); + } + | F16IMAGE2DMSARRAY { + parseContext.float16OpaqueCheck($1.loc, "half float image", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setImage(EbtFloat16, Esd2D, true, false, true); + } + | IIMAGE2DMSARRAY { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setImage(EbtInt, Esd2D, true, false, true); + } + | UIMAGE2DMSARRAY { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setImage(EbtUint, Esd2D, true, false, true); + } + | SAMPLEREXTERNALOES { // GL_OES_EGL_image_external + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.set(EbtFloat, Esd2D); + $$.sampler.external = true; + } + | SAMPLEREXTERNAL2DY2YEXT { // GL_EXT_YUV_target + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.set(EbtFloat, Esd2D); + $$.sampler.yuv = true; + } + | SUBPASSINPUT { + parseContext.requireStage($1.loc, EShLangFragment, "subpass input"); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setSubpass(EbtFloat); + } + | SUBPASSINPUTMS { + parseContext.requireStage($1.loc, EShLangFragment, "subpass input"); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setSubpass(EbtFloat, true); + } + | F16SUBPASSINPUT { + parseContext.float16OpaqueCheck($1.loc, "half float subpass input", parseContext.symbolTable.atBuiltInLevel()); + parseContext.requireStage($1.loc, EShLangFragment, "subpass input"); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setSubpass(EbtFloat16); + } + | F16SUBPASSINPUTMS { + parseContext.float16OpaqueCheck($1.loc, "half float subpass input", parseContext.symbolTable.atBuiltInLevel()); + parseContext.requireStage($1.loc, EShLangFragment, "subpass input"); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setSubpass(EbtFloat16, true); + } + | ISUBPASSINPUT { + parseContext.requireStage($1.loc, EShLangFragment, "subpass input"); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setSubpass(EbtInt); + } + | ISUBPASSINPUTMS { + parseContext.requireStage($1.loc, EShLangFragment, "subpass input"); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setSubpass(EbtInt, true); + } + | USUBPASSINPUT { + parseContext.requireStage($1.loc, EShLangFragment, "subpass input"); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setSubpass(EbtUint); + } + | USUBPASSINPUTMS { + parseContext.requireStage($1.loc, EShLangFragment, "subpass input"); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setSubpass(EbtUint, true); + } + | FCOOPMATNV { + parseContext.fcoopmatCheck($1.loc, "fcoopmatNV", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtFloat; + $$.coopmat = true; + } + | ICOOPMATNV { + parseContext.intcoopmatCheck($1.loc, "icoopmatNV", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtInt; + $$.coopmat = true; + } + | UCOOPMATNV { + parseContext.intcoopmatCheck($1.loc, "ucoopmatNV", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtUint; + $$.coopmat = true; + } +GLSLANG_WEB_EXCLUDE_OFF + | struct_specifier { + $$ = $1; + $$.qualifier.storage = parseContext.symbolTable.atGlobalLevel() ? EvqGlobal : EvqTemporary; + parseContext.structTypeCheck($$.loc, $$); + } + | TYPE_NAME { + // + // This is for user defined type names. The lexical phase looked up the + // type. + // + if (const TVariable* variable = ($1.symbol)->getAsVariable()) { + const TType& structure = variable->getType(); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtStruct; + $$.userDef = &structure; + } else + parseContext.error($1.loc, "expected type name", $1.string->c_str(), ""); + } + ; + +precision_qualifier + : HIGH_PRECISION { + parseContext.profileRequires($1.loc, ENoProfile, 130, 0, "highp precision qualifier"); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + parseContext.handlePrecisionQualifier($1.loc, $$.qualifier, EpqHigh); + } + | MEDIUM_PRECISION { + parseContext.profileRequires($1.loc, ENoProfile, 130, 0, "mediump precision qualifier"); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + parseContext.handlePrecisionQualifier($1.loc, $$.qualifier, EpqMedium); + } + | LOW_PRECISION { + parseContext.profileRequires($1.loc, ENoProfile, 130, 0, "lowp precision qualifier"); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + parseContext.handlePrecisionQualifier($1.loc, $$.qualifier, EpqLow); + } + ; + +struct_specifier + : STRUCT IDENTIFIER LEFT_BRACE { parseContext.nestedStructCheck($1.loc); } struct_declaration_list RIGHT_BRACE { + TType* structure = new TType($5, *$2.string); + parseContext.structArrayCheck($2.loc, *structure); + TVariable* userTypeDef = new TVariable($2.string, *structure, true); + if (! parseContext.symbolTable.insert(*userTypeDef)) + parseContext.error($2.loc, "redefinition", $2.string->c_str(), "struct"); + $$.init($1.loc); + $$.basicType = EbtStruct; + $$.userDef = structure; + --parseContext.structNestingLevel; + } + | STRUCT LEFT_BRACE { parseContext.nestedStructCheck($1.loc); } struct_declaration_list RIGHT_BRACE { + TType* structure = new TType($4, TString("")); + $$.init($1.loc); + $$.basicType = EbtStruct; + $$.userDef = structure; + --parseContext.structNestingLevel; + } + ; + +struct_declaration_list + : struct_declaration { + $$ = $1; + } + | struct_declaration_list struct_declaration { + $$ = $1; + for (unsigned int i = 0; i < $2->size(); ++i) { + for (unsigned int j = 0; j < $$->size(); ++j) { + if ((*$$)[j].type->getFieldName() == (*$2)[i].type->getFieldName()) + parseContext.error((*$2)[i].loc, "duplicate member name:", "", (*$2)[i].type->getFieldName().c_str()); + } + $$->push_back((*$2)[i]); + } + } + ; + +struct_declaration + : type_specifier struct_declarator_list SEMICOLON { + if ($1.arraySizes) { + parseContext.profileRequires($1.loc, ENoProfile, 120, E_GL_3DL_array_objects, "arrayed type"); + parseContext.profileRequires($1.loc, EEsProfile, 300, 0, "arrayed type"); + if (parseContext.isEsProfile()) + parseContext.arraySizeRequiredCheck($1.loc, *$1.arraySizes); + } + + $$ = $2; + + parseContext.voidErrorCheck($1.loc, (*$2)[0].type->getFieldName(), $1.basicType); + parseContext.precisionQualifierCheck($1.loc, $1.basicType, $1.qualifier); + + for (unsigned int i = 0; i < $$->size(); ++i) { + TType type($1); + type.setFieldName((*$$)[i].type->getFieldName()); + type.transferArraySizes((*$$)[i].type->getArraySizes()); + type.copyArrayInnerSizes($1.arraySizes); + parseContext.arrayOfArrayVersionCheck((*$$)[i].loc, type.getArraySizes()); + (*$$)[i].type->shallowCopy(type); + } + } + | type_qualifier type_specifier struct_declarator_list SEMICOLON { + if ($2.arraySizes) { + parseContext.profileRequires($2.loc, ENoProfile, 120, E_GL_3DL_array_objects, "arrayed type"); + parseContext.profileRequires($2.loc, EEsProfile, 300, 0, "arrayed type"); + if (parseContext.isEsProfile()) + parseContext.arraySizeRequiredCheck($2.loc, *$2.arraySizes); + } + + $$ = $3; + + parseContext.memberQualifierCheck($1); + parseContext.voidErrorCheck($2.loc, (*$3)[0].type->getFieldName(), $2.basicType); + parseContext.mergeQualifiers($2.loc, $2.qualifier, $1.qualifier, true); + parseContext.precisionQualifierCheck($2.loc, $2.basicType, $2.qualifier); + + for (unsigned int i = 0; i < $$->size(); ++i) { + TType type($2); + type.setFieldName((*$$)[i].type->getFieldName()); + type.transferArraySizes((*$$)[i].type->getArraySizes()); + type.copyArrayInnerSizes($2.arraySizes); + parseContext.arrayOfArrayVersionCheck((*$$)[i].loc, type.getArraySizes()); + (*$$)[i].type->shallowCopy(type); + } + } + ; + +struct_declarator_list + : struct_declarator { + $$ = new TTypeList; + $$->push_back($1); + } + | struct_declarator_list COMMA struct_declarator { + $$->push_back($3); + } + ; + +struct_declarator + : IDENTIFIER { + $$.type = new TType(EbtVoid); + $$.loc = $1.loc; + $$.type->setFieldName(*$1.string); + } + | IDENTIFIER array_specifier { + parseContext.arrayOfArrayVersionCheck($1.loc, $2.arraySizes); + + $$.type = new TType(EbtVoid); + $$.loc = $1.loc; + $$.type->setFieldName(*$1.string); + $$.type->transferArraySizes($2.arraySizes); + } + ; + +initializer + : assignment_expression { + $$ = $1; + } +GLSLANG_WEB_EXCLUDE_ON + | LEFT_BRACE initializer_list RIGHT_BRACE { + const char* initFeature = "{ } style initializers"; + parseContext.requireProfile($1.loc, ~EEsProfile, initFeature); + parseContext.profileRequires($1.loc, ~EEsProfile, 420, E_GL_ARB_shading_language_420pack, initFeature); + $$ = $2; + } + | LEFT_BRACE initializer_list COMMA RIGHT_BRACE { + const char* initFeature = "{ } style initializers"; + parseContext.requireProfile($1.loc, ~EEsProfile, initFeature); + parseContext.profileRequires($1.loc, ~EEsProfile, 420, E_GL_ARB_shading_language_420pack, initFeature); + $$ = $2; + } +GLSLANG_WEB_EXCLUDE_OFF + ; + +GLSLANG_WEB_EXCLUDE_ON +initializer_list + : initializer { + $$ = parseContext.intermediate.growAggregate(0, $1, $1->getLoc()); + } + | initializer_list COMMA initializer { + $$ = parseContext.intermediate.growAggregate($1, $3); + } + ; +GLSLANG_WEB_EXCLUDE_OFF + +declaration_statement + : declaration { $$ = $1; } + ; + +statement + : compound_statement { $$ = $1; } + | simple_statement { $$ = $1; } + ; + +// Grammar Note: labeled statements for switch statements only; 'goto' is not supported. + +simple_statement + : declaration_statement { $$ = $1; } + | expression_statement { $$ = $1; } + | selection_statement { $$ = $1; } + | switch_statement { $$ = $1; } + | case_label { $$ = $1; } + | iteration_statement { $$ = $1; } + | jump_statement { $$ = $1; } +GLSLANG_WEB_EXCLUDE_ON + | demote_statement { $$ = $1; } +GLSLANG_WEB_EXCLUDE_OFF + ; + +GLSLANG_WEB_EXCLUDE_ON +demote_statement + : DEMOTE SEMICOLON { + parseContext.requireStage($1.loc, EShLangFragment, "demote"); + parseContext.requireExtensions($1.loc, 1, &E_GL_EXT_demote_to_helper_invocation, "demote"); + $$ = parseContext.intermediate.addBranch(EOpDemote, $1.loc); + } + ; +GLSLANG_WEB_EXCLUDE_OFF + +compound_statement + : LEFT_BRACE RIGHT_BRACE { $$ = 0; } + | LEFT_BRACE { + parseContext.symbolTable.push(); + ++parseContext.statementNestingLevel; + } + statement_list { + parseContext.symbolTable.pop(&parseContext.defaultPrecision[0]); + --parseContext.statementNestingLevel; + } + RIGHT_BRACE { + if ($3 && $3->getAsAggregate()) + $3->getAsAggregate()->setOperator(EOpSequence); + $$ = $3; + } + ; + +statement_no_new_scope + : compound_statement_no_new_scope { $$ = $1; } + | simple_statement { $$ = $1; } + ; + +statement_scoped + : { + ++parseContext.controlFlowNestingLevel; + } + compound_statement { + --parseContext.controlFlowNestingLevel; + $$ = $2; + } + | { + parseContext.symbolTable.push(); + ++parseContext.statementNestingLevel; + ++parseContext.controlFlowNestingLevel; + } + simple_statement { + parseContext.symbolTable.pop(&parseContext.defaultPrecision[0]); + --parseContext.statementNestingLevel; + --parseContext.controlFlowNestingLevel; + $$ = $2; + } + +compound_statement_no_new_scope + // Statement that doesn't create a new scope, for selection_statement, iteration_statement + : LEFT_BRACE RIGHT_BRACE { + $$ = 0; + } + | LEFT_BRACE statement_list RIGHT_BRACE { + if ($2 && $2->getAsAggregate()) + $2->getAsAggregate()->setOperator(EOpSequence); + $$ = $2; + } + ; + +statement_list + : statement { + $$ = parseContext.intermediate.makeAggregate($1); + if ($1 && $1->getAsBranchNode() && ($1->getAsBranchNode()->getFlowOp() == EOpCase || + $1->getAsBranchNode()->getFlowOp() == EOpDefault)) { + parseContext.wrapupSwitchSubsequence(0, $1); + $$ = 0; // start a fresh subsequence for what's after this case + } + } + | statement_list statement { + if ($2 && $2->getAsBranchNode() && ($2->getAsBranchNode()->getFlowOp() == EOpCase || + $2->getAsBranchNode()->getFlowOp() == EOpDefault)) { + parseContext.wrapupSwitchSubsequence($1 ? $1->getAsAggregate() : 0, $2); + $$ = 0; // start a fresh subsequence for what's after this case + } else + $$ = parseContext.intermediate.growAggregate($1, $2); + } + ; + +expression_statement + : SEMICOLON { $$ = 0; } + | expression SEMICOLON { $$ = static_cast($1); } + ; + +selection_statement + : selection_statement_nonattributed { + $$ = $1; + } +GLSLANG_WEB_EXCLUDE_ON + | attribute selection_statement_nonattributed { + parseContext.handleSelectionAttributes(*$1, $2); + $$ = $2; + } +GLSLANG_WEB_EXCLUDE_OFF + +selection_statement_nonattributed + : IF LEFT_PAREN expression RIGHT_PAREN selection_rest_statement { + parseContext.boolCheck($1.loc, $3); + $$ = parseContext.intermediate.addSelection($3, $5, $1.loc); + } + ; + +selection_rest_statement + : statement_scoped ELSE statement_scoped { + $$.node1 = $1; + $$.node2 = $3; + } + | statement_scoped { + $$.node1 = $1; + $$.node2 = 0; + } + ; + +condition + // In 1996 c++ draft, conditions can include single declarations + : expression { + $$ = $1; + parseContext.boolCheck($1->getLoc(), $1); + } + | fully_specified_type IDENTIFIER EQUAL initializer { + parseContext.boolCheck($2.loc, $1); + + TType type($1); + TIntermNode* initNode = parseContext.declareVariable($2.loc, *$2.string, $1, 0, $4); + if (initNode) + $$ = initNode->getAsTyped(); + else + $$ = 0; + } + ; + +switch_statement + : switch_statement_nonattributed { + $$ = $1; + } +GLSLANG_WEB_EXCLUDE_ON + | attribute switch_statement_nonattributed { + parseContext.handleSwitchAttributes(*$1, $2); + $$ = $2; + } +GLSLANG_WEB_EXCLUDE_OFF + +switch_statement_nonattributed + : SWITCH LEFT_PAREN expression RIGHT_PAREN { + // start new switch sequence on the switch stack + ++parseContext.controlFlowNestingLevel; + ++parseContext.statementNestingLevel; + parseContext.switchSequenceStack.push_back(new TIntermSequence); + parseContext.switchLevel.push_back(parseContext.statementNestingLevel); + parseContext.symbolTable.push(); + } + LEFT_BRACE switch_statement_list RIGHT_BRACE { + $$ = parseContext.addSwitch($1.loc, $3, $7 ? $7->getAsAggregate() : 0); + delete parseContext.switchSequenceStack.back(); + parseContext.switchSequenceStack.pop_back(); + parseContext.switchLevel.pop_back(); + parseContext.symbolTable.pop(&parseContext.defaultPrecision[0]); + --parseContext.statementNestingLevel; + --parseContext.controlFlowNestingLevel; + } + ; + +switch_statement_list + : /* nothing */ { + $$ = 0; + } + | statement_list { + $$ = $1; + } + ; + +case_label + : CASE expression COLON { + $$ = 0; + if (parseContext.switchLevel.size() == 0) + parseContext.error($1.loc, "cannot appear outside switch statement", "case", ""); + else if (parseContext.switchLevel.back() != parseContext.statementNestingLevel) + parseContext.error($1.loc, "cannot be nested inside control flow", "case", ""); + else { + parseContext.constantValueCheck($2, "case"); + parseContext.integerCheck($2, "case"); + $$ = parseContext.intermediate.addBranch(EOpCase, $2, $1.loc); + } + } + | DEFAULT COLON { + $$ = 0; + if (parseContext.switchLevel.size() == 0) + parseContext.error($1.loc, "cannot appear outside switch statement", "default", ""); + else if (parseContext.switchLevel.back() != parseContext.statementNestingLevel) + parseContext.error($1.loc, "cannot be nested inside control flow", "default", ""); + else + $$ = parseContext.intermediate.addBranch(EOpDefault, $1.loc); + } + ; + +iteration_statement + : iteration_statement_nonattributed { + $$ = $1; + } +GLSLANG_WEB_EXCLUDE_ON + | attribute iteration_statement_nonattributed { + parseContext.handleLoopAttributes(*$1, $2); + $$ = $2; + } +GLSLANG_WEB_EXCLUDE_OFF + +iteration_statement_nonattributed + : WHILE LEFT_PAREN { + if (! parseContext.limits.whileLoops) + parseContext.error($1.loc, "while loops not available", "limitation", ""); + parseContext.symbolTable.push(); + ++parseContext.loopNestingLevel; + ++parseContext.statementNestingLevel; + ++parseContext.controlFlowNestingLevel; + } + condition RIGHT_PAREN statement_no_new_scope { + parseContext.symbolTable.pop(&parseContext.defaultPrecision[0]); + $$ = parseContext.intermediate.addLoop($6, $4, 0, true, $1.loc); + --parseContext.loopNestingLevel; + --parseContext.statementNestingLevel; + --parseContext.controlFlowNestingLevel; + } + | DO { + ++parseContext.loopNestingLevel; + ++parseContext.statementNestingLevel; + ++parseContext.controlFlowNestingLevel; + } + statement WHILE LEFT_PAREN expression RIGHT_PAREN SEMICOLON { + if (! parseContext.limits.whileLoops) + parseContext.error($1.loc, "do-while loops not available", "limitation", ""); + + parseContext.boolCheck($8.loc, $6); + + $$ = parseContext.intermediate.addLoop($3, $6, 0, false, $4.loc); + --parseContext.loopNestingLevel; + --parseContext.statementNestingLevel; + --parseContext.controlFlowNestingLevel; + } + | FOR LEFT_PAREN { + parseContext.symbolTable.push(); + ++parseContext.loopNestingLevel; + ++parseContext.statementNestingLevel; + ++parseContext.controlFlowNestingLevel; + } + for_init_statement for_rest_statement RIGHT_PAREN statement_no_new_scope { + parseContext.symbolTable.pop(&parseContext.defaultPrecision[0]); + $$ = parseContext.intermediate.makeAggregate($4, $2.loc); + TIntermLoop* forLoop = parseContext.intermediate.addLoop($7, reinterpret_cast($5.node1), reinterpret_cast($5.node2), true, $1.loc); + if (! parseContext.limits.nonInductiveForLoops) + parseContext.inductiveLoopCheck($1.loc, $4, forLoop); + $$ = parseContext.intermediate.growAggregate($$, forLoop, $1.loc); + $$->getAsAggregate()->setOperator(EOpSequence); + --parseContext.loopNestingLevel; + --parseContext.statementNestingLevel; + --parseContext.controlFlowNestingLevel; + } + ; + +for_init_statement + : expression_statement { + $$ = $1; + } + | declaration_statement { + $$ = $1; + } + ; + +conditionopt + : condition { + $$ = $1; + } + | /* May be null */ { + $$ = 0; + } + ; + +for_rest_statement + : conditionopt SEMICOLON { + $$.node1 = $1; + $$.node2 = 0; + } + | conditionopt SEMICOLON expression { + $$.node1 = $1; + $$.node2 = $3; + } + ; + +jump_statement + : CONTINUE SEMICOLON { + if (parseContext.loopNestingLevel <= 0) + parseContext.error($1.loc, "continue statement only allowed in loops", "", ""); + $$ = parseContext.intermediate.addBranch(EOpContinue, $1.loc); + } + | BREAK SEMICOLON { + if (parseContext.loopNestingLevel + parseContext.switchSequenceStack.size() <= 0) + parseContext.error($1.loc, "break statement only allowed in switch and loops", "", ""); + $$ = parseContext.intermediate.addBranch(EOpBreak, $1.loc); + } + | RETURN SEMICOLON { + $$ = parseContext.intermediate.addBranch(EOpReturn, $1.loc); + if (parseContext.currentFunctionType->getBasicType() != EbtVoid) + parseContext.error($1.loc, "non-void function must return a value", "return", ""); + if (parseContext.inMain) + parseContext.postEntryPointReturn = true; + } + | RETURN expression SEMICOLON { + $$ = parseContext.handleReturnValue($1.loc, $2); + } + | DISCARD SEMICOLON { + parseContext.requireStage($1.loc, EShLangFragment, "discard"); + $$ = parseContext.intermediate.addBranch(EOpKill, $1.loc); + } + ; + +// Grammar Note: No 'goto'. Gotos are not supported. + +translation_unit + : external_declaration { + $$ = $1; + parseContext.intermediate.setTreeRoot($$); + } + | translation_unit external_declaration { + if ($2 != nullptr) { + $$ = parseContext.intermediate.growAggregate($1, $2); + parseContext.intermediate.setTreeRoot($$); + } + } + ; + +external_declaration + : function_definition { + $$ = $1; + } + | declaration { + $$ = $1; + } +GLSLANG_WEB_EXCLUDE_ON + | SEMICOLON { + parseContext.requireProfile($1.loc, ~EEsProfile, "extraneous semicolon"); + parseContext.profileRequires($1.loc, ~EEsProfile, 460, nullptr, "extraneous semicolon"); + $$ = nullptr; + } +GLSLANG_WEB_EXCLUDE_OFF + ; + +function_definition + : function_prototype { + $1.function = parseContext.handleFunctionDeclarator($1.loc, *$1.function, false /* not prototype */); + $1.intermNode = parseContext.handleFunctionDefinition($1.loc, *$1.function); + } + compound_statement_no_new_scope { + // May be best done as post process phase on intermediate code + if (parseContext.currentFunctionType->getBasicType() != EbtVoid && ! parseContext.functionReturnsValue) + parseContext.error($1.loc, "function does not return a value:", "", $1.function->getName().c_str()); + parseContext.symbolTable.pop(&parseContext.defaultPrecision[0]); + $$ = parseContext.intermediate.growAggregate($1.intermNode, $3); + parseContext.intermediate.setAggregateOperator($$, EOpFunction, $1.function->getType(), $1.loc); + $$->getAsAggregate()->setName($1.function->getMangledName().c_str()); + + // store the pragma information for debug and optimize and other vendor specific + // information. This information can be queried from the parse tree + $$->getAsAggregate()->setOptimize(parseContext.contextPragma.optimize); + $$->getAsAggregate()->setDebug(parseContext.contextPragma.debug); + $$->getAsAggregate()->setPragmaTable(parseContext.contextPragma.pragmaTable); + } + ; + +GLSLANG_WEB_EXCLUDE_ON +attribute + : LEFT_BRACKET LEFT_BRACKET attribute_list RIGHT_BRACKET RIGHT_BRACKET { + $$ = $3; + parseContext.requireExtensions($1.loc, 1, &E_GL_EXT_control_flow_attributes, "attribute"); + } + +attribute_list + : single_attribute { + $$ = $1; + } + | attribute_list COMMA single_attribute { + $$ = parseContext.mergeAttributes($1, $3); + } + +single_attribute + : IDENTIFIER { + $$ = parseContext.makeAttributes(*$1.string); + } + | IDENTIFIER LEFT_PAREN constant_expression RIGHT_PAREN { + $$ = parseContext.makeAttributes(*$1.string, $3); + } +GLSLANG_WEB_EXCLUDE_OFF + +%% diff --git a/dep/glslang/glslang/MachineIndependent/glslang.y b/dep/glslang/glslang/MachineIndependent/glslang.y new file mode 100644 index 000000000..e33d7d1ca --- /dev/null +++ b/dep/glslang/glslang/MachineIndependent/glslang.y @@ -0,0 +1,3888 @@ +// +// Copyright (C) 2002-2005 3Dlabs Inc. Ltd. +// Copyright (C) 2012-2013 LunarG, Inc. +// Copyright (C) 2017 ARM Limited. +// Copyright (C) 2015-2019 Google, Inc. +// Modifications Copyright (C) 2020 Advanced Micro Devices, Inc. All rights reserved. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// +// Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// +// Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// Neither the name of 3Dlabs Inc. Ltd. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// + +// +// Do not edit the .y file, only edit the .m4 file. +// The .y bison file is not a source file, it is a derivative of the .m4 file. +// The m4 file needs to be processed by m4 to generate the .y bison file. +// +// Code sandwiched between a pair: +// +// GLSLANG_WEB_EXCLUDE_ON +// ... +// ... +// ... +// GLSLANG_WEB_EXCLUDE_OFF +// +// Will be excluded from the grammar when m4 is executed as: +// +// m4 -P -DGLSLANG_WEB +// +// It will be included when m4 is executed as: +// +// m4 -P +// + + + + +/** + * This is bison grammar and productions for parsing all versions of the + * GLSL shading languages. + */ +%{ + +/* Based on: +ANSI C Yacc grammar + +In 1985, Jeff Lee published his Yacc grammar (which is accompanied by a +matching Lex specification) for the April 30, 1985 draft version of the +ANSI C standard. Tom Stockfisch reposted it to net.sources in 1987; that +original, as mentioned in the answer to question 17.25 of the comp.lang.c +FAQ, can be ftp'ed from ftp.uu.net, file usenet/net.sources/ansi.c.grammar.Z. + +I intend to keep this version as close to the current C Standard grammar as +possible; please let me know if you discover discrepancies. + +Jutta Degener, 1995 +*/ + +#include "SymbolTable.h" +#include "ParseHelper.h" +#include "../Public/ShaderLang.h" +#include "attribute.h" + +using namespace glslang; + +%} + +%define parse.error verbose + +%union { + struct { + glslang::TSourceLoc loc; + union { + glslang::TString *string; + int i; + unsigned int u; + long long i64; + unsigned long long u64; + bool b; + double d; + }; + glslang::TSymbol* symbol; + } lex; + struct { + glslang::TSourceLoc loc; + glslang::TOperator op; + union { + TIntermNode* intermNode; + glslang::TIntermNodePair nodePair; + glslang::TIntermTyped* intermTypedNode; + glslang::TAttributes* attributes; + }; + union { + glslang::TPublicType type; + glslang::TFunction* function; + glslang::TParameter param; + glslang::TTypeLoc typeLine; + glslang::TTypeList* typeList; + glslang::TArraySizes* arraySizes; + glslang::TIdentifierList* identifierList; + }; + glslang::TArraySizes* typeParameters; + } interm; +} + +%{ + +/* windows only pragma */ +#ifdef _MSC_VER + #pragma warning(disable : 4065) + #pragma warning(disable : 4127) + #pragma warning(disable : 4244) +#endif + +#define parseContext (*pParseContext) +#define yyerror(context, msg) context->parserError(msg) + +extern int yylex(YYSTYPE*, TParseContext&); + +%} + +%parse-param {glslang::TParseContext* pParseContext} +%lex-param {parseContext} +%pure-parser // enable thread safety +%expect 1 // One shift reduce conflict because of if | else + +%token CONST BOOL INT UINT FLOAT +%token BVEC2 BVEC3 BVEC4 +%token IVEC2 IVEC3 IVEC4 +%token UVEC2 UVEC3 UVEC4 +%token VEC2 VEC3 VEC4 +%token MAT2 MAT3 MAT4 +%token MAT2X2 MAT2X3 MAT2X4 +%token MAT3X2 MAT3X3 MAT3X4 +%token MAT4X2 MAT4X3 MAT4X4 + +// combined image/sampler +%token SAMPLER2D SAMPLER3D SAMPLERCUBE SAMPLER2DSHADOW +%token SAMPLERCUBESHADOW SAMPLER2DARRAY +%token SAMPLER2DARRAYSHADOW ISAMPLER2D ISAMPLER3D ISAMPLERCUBE +%token ISAMPLER2DARRAY USAMPLER2D USAMPLER3D +%token USAMPLERCUBE USAMPLER2DARRAY + +// separate image/sampler +%token SAMPLER SAMPLERSHADOW +%token TEXTURE2D TEXTURE3D TEXTURECUBE TEXTURE2DARRAY +%token ITEXTURE2D ITEXTURE3D ITEXTURECUBE ITEXTURE2DARRAY +%token UTEXTURE2D UTEXTURE3D UTEXTURECUBE UTEXTURE2DARRAY + + + +%token ATTRIBUTE VARYING +%token FLOAT16_T FLOAT32_T DOUBLE FLOAT64_T +%token INT64_T UINT64_T INT32_T UINT32_T INT16_T UINT16_T INT8_T UINT8_T +%token I64VEC2 I64VEC3 I64VEC4 +%token U64VEC2 U64VEC3 U64VEC4 +%token I32VEC2 I32VEC3 I32VEC4 +%token U32VEC2 U32VEC3 U32VEC4 +%token I16VEC2 I16VEC3 I16VEC4 +%token U16VEC2 U16VEC3 U16VEC4 +%token I8VEC2 I8VEC3 I8VEC4 +%token U8VEC2 U8VEC3 U8VEC4 +%token DVEC2 DVEC3 DVEC4 DMAT2 DMAT3 DMAT4 +%token F16VEC2 F16VEC3 F16VEC4 F16MAT2 F16MAT3 F16MAT4 +%token F32VEC2 F32VEC3 F32VEC4 F32MAT2 F32MAT3 F32MAT4 +%token F64VEC2 F64VEC3 F64VEC4 F64MAT2 F64MAT3 F64MAT4 +%token DMAT2X2 DMAT2X3 DMAT2X4 +%token DMAT3X2 DMAT3X3 DMAT3X4 +%token DMAT4X2 DMAT4X3 DMAT4X4 +%token F16MAT2X2 F16MAT2X3 F16MAT2X4 +%token F16MAT3X2 F16MAT3X3 F16MAT3X4 +%token F16MAT4X2 F16MAT4X3 F16MAT4X4 +%token F32MAT2X2 F32MAT2X3 F32MAT2X4 +%token F32MAT3X2 F32MAT3X3 F32MAT3X4 +%token F32MAT4X2 F32MAT4X3 F32MAT4X4 +%token F64MAT2X2 F64MAT2X3 F64MAT2X4 +%token F64MAT3X2 F64MAT3X3 F64MAT3X4 +%token F64MAT4X2 F64MAT4X3 F64MAT4X4 +%token ATOMIC_UINT +%token ACCSTRUCTNV +%token ACCSTRUCTEXT +%token RAYQUERYEXT +%token FCOOPMATNV ICOOPMATNV UCOOPMATNV + +// combined image/sampler +%token SAMPLERCUBEARRAY SAMPLERCUBEARRAYSHADOW +%token ISAMPLERCUBEARRAY USAMPLERCUBEARRAY +%token SAMPLER1D SAMPLER1DARRAY SAMPLER1DARRAYSHADOW ISAMPLER1D SAMPLER1DSHADOW +%token SAMPLER2DRECT SAMPLER2DRECTSHADOW ISAMPLER2DRECT USAMPLER2DRECT +%token SAMPLERBUFFER ISAMPLERBUFFER USAMPLERBUFFER +%token SAMPLER2DMS ISAMPLER2DMS USAMPLER2DMS +%token SAMPLER2DMSARRAY ISAMPLER2DMSARRAY USAMPLER2DMSARRAY +%token SAMPLEREXTERNALOES +%token SAMPLEREXTERNAL2DY2YEXT +%token ISAMPLER1DARRAY USAMPLER1D USAMPLER1DARRAY +%token F16SAMPLER1D F16SAMPLER2D F16SAMPLER3D F16SAMPLER2DRECT F16SAMPLERCUBE +%token F16SAMPLER1DARRAY F16SAMPLER2DARRAY F16SAMPLERCUBEARRAY +%token F16SAMPLERBUFFER F16SAMPLER2DMS F16SAMPLER2DMSARRAY +%token F16SAMPLER1DSHADOW F16SAMPLER2DSHADOW F16SAMPLER1DARRAYSHADOW F16SAMPLER2DARRAYSHADOW +%token F16SAMPLER2DRECTSHADOW F16SAMPLERCUBESHADOW F16SAMPLERCUBEARRAYSHADOW + +// images +%token IMAGE1D IIMAGE1D UIMAGE1D IMAGE2D IIMAGE2D +%token UIMAGE2D IMAGE3D IIMAGE3D UIMAGE3D +%token IMAGE2DRECT IIMAGE2DRECT UIMAGE2DRECT +%token IMAGECUBE IIMAGECUBE UIMAGECUBE +%token IMAGEBUFFER IIMAGEBUFFER UIMAGEBUFFER +%token IMAGE1DARRAY IIMAGE1DARRAY UIMAGE1DARRAY +%token IMAGE2DARRAY IIMAGE2DARRAY UIMAGE2DARRAY +%token IMAGECUBEARRAY IIMAGECUBEARRAY UIMAGECUBEARRAY +%token IMAGE2DMS IIMAGE2DMS UIMAGE2DMS +%token IMAGE2DMSARRAY IIMAGE2DMSARRAY UIMAGE2DMSARRAY + +%token F16IMAGE1D F16IMAGE2D F16IMAGE3D F16IMAGE2DRECT +%token F16IMAGECUBE F16IMAGE1DARRAY F16IMAGE2DARRAY F16IMAGECUBEARRAY +%token F16IMAGEBUFFER F16IMAGE2DMS F16IMAGE2DMSARRAY + +// texture without sampler +%token TEXTURECUBEARRAY ITEXTURECUBEARRAY UTEXTURECUBEARRAY +%token TEXTURE1D ITEXTURE1D UTEXTURE1D +%token TEXTURE1DARRAY ITEXTURE1DARRAY UTEXTURE1DARRAY +%token TEXTURE2DRECT ITEXTURE2DRECT UTEXTURE2DRECT +%token TEXTUREBUFFER ITEXTUREBUFFER UTEXTUREBUFFER +%token TEXTURE2DMS ITEXTURE2DMS UTEXTURE2DMS +%token TEXTURE2DMSARRAY ITEXTURE2DMSARRAY UTEXTURE2DMSARRAY + +%token F16TEXTURE1D F16TEXTURE2D F16TEXTURE3D F16TEXTURE2DRECT F16TEXTURECUBE +%token F16TEXTURE1DARRAY F16TEXTURE2DARRAY F16TEXTURECUBEARRAY +%token F16TEXTUREBUFFER F16TEXTURE2DMS F16TEXTURE2DMSARRAY + +// input attachments +%token SUBPASSINPUT SUBPASSINPUTMS ISUBPASSINPUT ISUBPASSINPUTMS USUBPASSINPUT USUBPASSINPUTMS +%token F16SUBPASSINPUT F16SUBPASSINPUTMS + + + +%token LEFT_OP RIGHT_OP +%token INC_OP DEC_OP LE_OP GE_OP EQ_OP NE_OP +%token AND_OP OR_OP XOR_OP MUL_ASSIGN DIV_ASSIGN ADD_ASSIGN +%token MOD_ASSIGN LEFT_ASSIGN RIGHT_ASSIGN AND_ASSIGN XOR_ASSIGN OR_ASSIGN +%token SUB_ASSIGN +%token STRING_LITERAL + +%token LEFT_PAREN RIGHT_PAREN LEFT_BRACKET RIGHT_BRACKET LEFT_BRACE RIGHT_BRACE DOT +%token COMMA COLON EQUAL SEMICOLON BANG DASH TILDE PLUS STAR SLASH PERCENT +%token LEFT_ANGLE RIGHT_ANGLE VERTICAL_BAR CARET AMPERSAND QUESTION + +%token INVARIANT +%token HIGH_PRECISION MEDIUM_PRECISION LOW_PRECISION PRECISION +%token PACKED RESOURCE SUPERP + +%token FLOATCONSTANT INTCONSTANT UINTCONSTANT BOOLCONSTANT +%token IDENTIFIER TYPE_NAME +%token CENTROID IN OUT INOUT +%token STRUCT VOID WHILE +%token BREAK CONTINUE DO ELSE FOR IF DISCARD RETURN SWITCH CASE DEFAULT +%token UNIFORM SHARED BUFFER +%token FLAT SMOOTH LAYOUT + + +%token DOUBLECONSTANT INT16CONSTANT UINT16CONSTANT FLOAT16CONSTANT INT32CONSTANT UINT32CONSTANT +%token INT64CONSTANT UINT64CONSTANT +%token SUBROUTINE DEMOTE +%token PAYLOADNV PAYLOADINNV HITATTRNV CALLDATANV CALLDATAINNV +%token PAYLOADEXT PAYLOADINEXT HITATTREXT CALLDATAEXT CALLDATAINEXT +%token PATCH SAMPLE NONUNIFORM +%token COHERENT VOLATILE RESTRICT READONLY WRITEONLY DEVICECOHERENT QUEUEFAMILYCOHERENT WORKGROUPCOHERENT +%token SUBGROUPCOHERENT NONPRIVATE SHADERCALLCOHERENT +%token NOPERSPECTIVE EXPLICITINTERPAMD PERVERTEXNV PERPRIMITIVENV PERVIEWNV PERTASKNV +%token PRECISE + + +%type assignment_operator unary_operator +%type variable_identifier primary_expression postfix_expression +%type expression integer_expression assignment_expression +%type unary_expression multiplicative_expression additive_expression +%type relational_expression equality_expression +%type conditional_expression constant_expression +%type logical_or_expression logical_xor_expression logical_and_expression +%type shift_expression and_expression exclusive_or_expression inclusive_or_expression +%type function_call initializer condition conditionopt + +%type translation_unit function_definition +%type statement simple_statement +%type statement_list switch_statement_list compound_statement +%type declaration_statement selection_statement selection_statement_nonattributed expression_statement +%type switch_statement switch_statement_nonattributed case_label +%type declaration external_declaration +%type for_init_statement compound_statement_no_new_scope +%type selection_rest_statement for_rest_statement +%type iteration_statement iteration_statement_nonattributed jump_statement statement_no_new_scope statement_scoped +%type single_declaration init_declarator_list + +%type parameter_declaration parameter_declarator parameter_type_specifier + +%type array_specifier +%type invariant_qualifier interpolation_qualifier storage_qualifier precision_qualifier +%type layout_qualifier layout_qualifier_id_list layout_qualifier_id + +%type type_parameter_specifier +%type type_parameter_specifier_opt +%type type_parameter_specifier_list + +%type type_qualifier fully_specified_type type_specifier +%type single_type_qualifier +%type type_specifier_nonarray +%type struct_specifier +%type struct_declarator +%type struct_declarator_list struct_declaration struct_declaration_list +%type block_structure +%type function_header function_declarator +%type function_header_with_parameters +%type function_call_header_with_parameters function_call_header_no_parameters function_call_generic function_prototype +%type function_call_or_method function_identifier function_call_header + +%type identifier_list + + +%type precise_qualifier non_uniform_qualifier +%type type_name_list +%type attribute attribute_list single_attribute +%type demote_statement +%type initializer_list + + +%start translation_unit +%% + +variable_identifier + : IDENTIFIER { + $$ = parseContext.handleVariable($1.loc, $1.symbol, $1.string); + } + ; + +primary_expression + : variable_identifier { + $$ = $1; + } + | LEFT_PAREN expression RIGHT_PAREN { + $$ = $2; + if ($$->getAsConstantUnion()) + $$->getAsConstantUnion()->setExpression(); + } + | FLOATCONSTANT { + $$ = parseContext.intermediate.addConstantUnion($1.d, EbtFloat, $1.loc, true); + } + | INTCONSTANT { + $$ = parseContext.intermediate.addConstantUnion($1.i, $1.loc, true); + } + | UINTCONSTANT { + parseContext.fullIntegerCheck($1.loc, "unsigned literal"); + $$ = parseContext.intermediate.addConstantUnion($1.u, $1.loc, true); + } + | BOOLCONSTANT { + $$ = parseContext.intermediate.addConstantUnion($1.b, $1.loc, true); + } + + | STRING_LITERAL { + $$ = parseContext.intermediate.addConstantUnion($1.string, $1.loc, true); + } + | INT32CONSTANT { + parseContext.explicitInt32Check($1.loc, "32-bit signed literal"); + $$ = parseContext.intermediate.addConstantUnion($1.i, $1.loc, true); + } + | UINT32CONSTANT { + parseContext.explicitInt32Check($1.loc, "32-bit signed literal"); + $$ = parseContext.intermediate.addConstantUnion($1.u, $1.loc, true); + } + | INT64CONSTANT { + parseContext.int64Check($1.loc, "64-bit integer literal"); + $$ = parseContext.intermediate.addConstantUnion($1.i64, $1.loc, true); + } + | UINT64CONSTANT { + parseContext.int64Check($1.loc, "64-bit unsigned integer literal"); + $$ = parseContext.intermediate.addConstantUnion($1.u64, $1.loc, true); + } + | INT16CONSTANT { + parseContext.explicitInt16Check($1.loc, "16-bit integer literal"); + $$ = parseContext.intermediate.addConstantUnion((short)$1.i, $1.loc, true); + } + | UINT16CONSTANT { + parseContext.explicitInt16Check($1.loc, "16-bit unsigned integer literal"); + $$ = parseContext.intermediate.addConstantUnion((unsigned short)$1.u, $1.loc, true); + } + | DOUBLECONSTANT { + parseContext.requireProfile($1.loc, ECoreProfile | ECompatibilityProfile, "double literal"); + if (! parseContext.symbolTable.atBuiltInLevel()) + parseContext.doubleCheck($1.loc, "double literal"); + $$ = parseContext.intermediate.addConstantUnion($1.d, EbtDouble, $1.loc, true); + } + | FLOAT16CONSTANT { + parseContext.float16Check($1.loc, "half float literal"); + $$ = parseContext.intermediate.addConstantUnion($1.d, EbtFloat16, $1.loc, true); + } + + ; + +postfix_expression + : primary_expression { + $$ = $1; + } + | postfix_expression LEFT_BRACKET integer_expression RIGHT_BRACKET { + $$ = parseContext.handleBracketDereference($2.loc, $1, $3); + } + | function_call { + $$ = $1; + } + | postfix_expression DOT IDENTIFIER { + $$ = parseContext.handleDotDereference($3.loc, $1, *$3.string); + } + | postfix_expression INC_OP { + parseContext.variableCheck($1); + parseContext.lValueErrorCheck($2.loc, "++", $1); + $$ = parseContext.handleUnaryMath($2.loc, "++", EOpPostIncrement, $1); + } + | postfix_expression DEC_OP { + parseContext.variableCheck($1); + parseContext.lValueErrorCheck($2.loc, "--", $1); + $$ = parseContext.handleUnaryMath($2.loc, "--", EOpPostDecrement, $1); + } + ; + +integer_expression + : expression { + parseContext.integerCheck($1, "[]"); + $$ = $1; + } + ; + +function_call + : function_call_or_method { + $$ = parseContext.handleFunctionCall($1.loc, $1.function, $1.intermNode); + delete $1.function; + } + ; + +function_call_or_method + : function_call_generic { + $$ = $1; + } + ; + +function_call_generic + : function_call_header_with_parameters RIGHT_PAREN { + $$ = $1; + $$.loc = $2.loc; + } + | function_call_header_no_parameters RIGHT_PAREN { + $$ = $1; + $$.loc = $2.loc; + } + ; + +function_call_header_no_parameters + : function_call_header VOID { + $$ = $1; + } + | function_call_header { + $$ = $1; + } + ; + +function_call_header_with_parameters + : function_call_header assignment_expression { + TParameter param = { 0, new TType }; + param.type->shallowCopy($2->getType()); + $1.function->addParameter(param); + $$.function = $1.function; + $$.intermNode = $2; + } + | function_call_header_with_parameters COMMA assignment_expression { + TParameter param = { 0, new TType }; + param.type->shallowCopy($3->getType()); + $1.function->addParameter(param); + $$.function = $1.function; + $$.intermNode = parseContext.intermediate.growAggregate($1.intermNode, $3, $2.loc); + } + ; + +function_call_header + : function_identifier LEFT_PAREN { + $$ = $1; + } + ; + +// Grammar Note: Constructors look like functions, but are recognized as types. + +function_identifier + : type_specifier { + // Constructor + $$.intermNode = 0; + $$.function = parseContext.handleConstructorCall($1.loc, $1); + } + | postfix_expression { + // + // Should be a method or subroutine call, but we haven't recognized the arguments yet. + // + $$.function = 0; + $$.intermNode = 0; + + TIntermMethod* method = $1->getAsMethodNode(); + if (method) { + $$.function = new TFunction(&method->getMethodName(), TType(EbtInt), EOpArrayLength); + $$.intermNode = method->getObject(); + } else { + TIntermSymbol* symbol = $1->getAsSymbolNode(); + if (symbol) { + parseContext.reservedErrorCheck(symbol->getLoc(), symbol->getName()); + TFunction *function = new TFunction(&symbol->getName(), TType(EbtVoid)); + $$.function = function; + } else + parseContext.error($1->getLoc(), "function call, method, or subroutine call expected", "", ""); + } + + if ($$.function == 0) { + // error recover + TString* empty = NewPoolTString(""); + $$.function = new TFunction(empty, TType(EbtVoid), EOpNull); + } + } + + | non_uniform_qualifier { + // Constructor + $$.intermNode = 0; + $$.function = parseContext.handleConstructorCall($1.loc, $1); + } + + ; + +unary_expression + : postfix_expression { + parseContext.variableCheck($1); + $$ = $1; + if (TIntermMethod* method = $1->getAsMethodNode()) + parseContext.error($1->getLoc(), "incomplete method syntax", method->getMethodName().c_str(), ""); + } + | INC_OP unary_expression { + parseContext.lValueErrorCheck($1.loc, "++", $2); + $$ = parseContext.handleUnaryMath($1.loc, "++", EOpPreIncrement, $2); + } + | DEC_OP unary_expression { + parseContext.lValueErrorCheck($1.loc, "--", $2); + $$ = parseContext.handleUnaryMath($1.loc, "--", EOpPreDecrement, $2); + } + | unary_operator unary_expression { + if ($1.op != EOpNull) { + char errorOp[2] = {0, 0}; + switch($1.op) { + case EOpNegative: errorOp[0] = '-'; break; + case EOpLogicalNot: errorOp[0] = '!'; break; + case EOpBitwiseNot: errorOp[0] = '~'; break; + default: break; // some compilers want this + } + $$ = parseContext.handleUnaryMath($1.loc, errorOp, $1.op, $2); + } else { + $$ = $2; + if ($$->getAsConstantUnion()) + $$->getAsConstantUnion()->setExpression(); + } + } + ; +// Grammar Note: No traditional style type casts. + +unary_operator + : PLUS { $$.loc = $1.loc; $$.op = EOpNull; } + | DASH { $$.loc = $1.loc; $$.op = EOpNegative; } + | BANG { $$.loc = $1.loc; $$.op = EOpLogicalNot; } + | TILDE { $$.loc = $1.loc; $$.op = EOpBitwiseNot; + parseContext.fullIntegerCheck($1.loc, "bitwise not"); } + ; +// Grammar Note: No '*' or '&' unary ops. Pointers are not supported. + +multiplicative_expression + : unary_expression { $$ = $1; } + | multiplicative_expression STAR unary_expression { + $$ = parseContext.handleBinaryMath($2.loc, "*", EOpMul, $1, $3); + if ($$ == 0) + $$ = $1; + } + | multiplicative_expression SLASH unary_expression { + $$ = parseContext.handleBinaryMath($2.loc, "/", EOpDiv, $1, $3); + if ($$ == 0) + $$ = $1; + } + | multiplicative_expression PERCENT unary_expression { + parseContext.fullIntegerCheck($2.loc, "%"); + $$ = parseContext.handleBinaryMath($2.loc, "%", EOpMod, $1, $3); + if ($$ == 0) + $$ = $1; + } + ; + +additive_expression + : multiplicative_expression { $$ = $1; } + | additive_expression PLUS multiplicative_expression { + $$ = parseContext.handleBinaryMath($2.loc, "+", EOpAdd, $1, $3); + if ($$ == 0) + $$ = $1; + } + | additive_expression DASH multiplicative_expression { + $$ = parseContext.handleBinaryMath($2.loc, "-", EOpSub, $1, $3); + if ($$ == 0) + $$ = $1; + } + ; + +shift_expression + : additive_expression { $$ = $1; } + | shift_expression LEFT_OP additive_expression { + parseContext.fullIntegerCheck($2.loc, "bit shift left"); + $$ = parseContext.handleBinaryMath($2.loc, "<<", EOpLeftShift, $1, $3); + if ($$ == 0) + $$ = $1; + } + | shift_expression RIGHT_OP additive_expression { + parseContext.fullIntegerCheck($2.loc, "bit shift right"); + $$ = parseContext.handleBinaryMath($2.loc, ">>", EOpRightShift, $1, $3); + if ($$ == 0) + $$ = $1; + } + ; + +relational_expression + : shift_expression { $$ = $1; } + | relational_expression LEFT_ANGLE shift_expression { + $$ = parseContext.handleBinaryMath($2.loc, "<", EOpLessThan, $1, $3); + if ($$ == 0) + $$ = parseContext.intermediate.addConstantUnion(false, $2.loc); + } + | relational_expression RIGHT_ANGLE shift_expression { + $$ = parseContext.handleBinaryMath($2.loc, ">", EOpGreaterThan, $1, $3); + if ($$ == 0) + $$ = parseContext.intermediate.addConstantUnion(false, $2.loc); + } + | relational_expression LE_OP shift_expression { + $$ = parseContext.handleBinaryMath($2.loc, "<=", EOpLessThanEqual, $1, $3); + if ($$ == 0) + $$ = parseContext.intermediate.addConstantUnion(false, $2.loc); + } + | relational_expression GE_OP shift_expression { + $$ = parseContext.handleBinaryMath($2.loc, ">=", EOpGreaterThanEqual, $1, $3); + if ($$ == 0) + $$ = parseContext.intermediate.addConstantUnion(false, $2.loc); + } + ; + +equality_expression + : relational_expression { $$ = $1; } + | equality_expression EQ_OP relational_expression { + parseContext.arrayObjectCheck($2.loc, $1->getType(), "array comparison"); + parseContext.opaqueCheck($2.loc, $1->getType(), "=="); + parseContext.specializationCheck($2.loc, $1->getType(), "=="); + parseContext.referenceCheck($2.loc, $1->getType(), "=="); + $$ = parseContext.handleBinaryMath($2.loc, "==", EOpEqual, $1, $3); + if ($$ == 0) + $$ = parseContext.intermediate.addConstantUnion(false, $2.loc); + } + | equality_expression NE_OP relational_expression { + parseContext.arrayObjectCheck($2.loc, $1->getType(), "array comparison"); + parseContext.opaqueCheck($2.loc, $1->getType(), "!="); + parseContext.specializationCheck($2.loc, $1->getType(), "!="); + parseContext.referenceCheck($2.loc, $1->getType(), "!="); + $$ = parseContext.handleBinaryMath($2.loc, "!=", EOpNotEqual, $1, $3); + if ($$ == 0) + $$ = parseContext.intermediate.addConstantUnion(false, $2.loc); + } + ; + +and_expression + : equality_expression { $$ = $1; } + | and_expression AMPERSAND equality_expression { + parseContext.fullIntegerCheck($2.loc, "bitwise and"); + $$ = parseContext.handleBinaryMath($2.loc, "&", EOpAnd, $1, $3); + if ($$ == 0) + $$ = $1; + } + ; + +exclusive_or_expression + : and_expression { $$ = $1; } + | exclusive_or_expression CARET and_expression { + parseContext.fullIntegerCheck($2.loc, "bitwise exclusive or"); + $$ = parseContext.handleBinaryMath($2.loc, "^", EOpExclusiveOr, $1, $3); + if ($$ == 0) + $$ = $1; + } + ; + +inclusive_or_expression + : exclusive_or_expression { $$ = $1; } + | inclusive_or_expression VERTICAL_BAR exclusive_or_expression { + parseContext.fullIntegerCheck($2.loc, "bitwise inclusive or"); + $$ = parseContext.handleBinaryMath($2.loc, "|", EOpInclusiveOr, $1, $3); + if ($$ == 0) + $$ = $1; + } + ; + +logical_and_expression + : inclusive_or_expression { $$ = $1; } + | logical_and_expression AND_OP inclusive_or_expression { + $$ = parseContext.handleBinaryMath($2.loc, "&&", EOpLogicalAnd, $1, $3); + if ($$ == 0) + $$ = parseContext.intermediate.addConstantUnion(false, $2.loc); + } + ; + +logical_xor_expression + : logical_and_expression { $$ = $1; } + | logical_xor_expression XOR_OP logical_and_expression { + $$ = parseContext.handleBinaryMath($2.loc, "^^", EOpLogicalXor, $1, $3); + if ($$ == 0) + $$ = parseContext.intermediate.addConstantUnion(false, $2.loc); + } + ; + +logical_or_expression + : logical_xor_expression { $$ = $1; } + | logical_or_expression OR_OP logical_xor_expression { + $$ = parseContext.handleBinaryMath($2.loc, "||", EOpLogicalOr, $1, $3); + if ($$ == 0) + $$ = parseContext.intermediate.addConstantUnion(false, $2.loc); + } + ; + +conditional_expression + : logical_or_expression { $$ = $1; } + | logical_or_expression QUESTION { + ++parseContext.controlFlowNestingLevel; + } + expression COLON assignment_expression { + --parseContext.controlFlowNestingLevel; + parseContext.boolCheck($2.loc, $1); + parseContext.rValueErrorCheck($2.loc, "?", $1); + parseContext.rValueErrorCheck($5.loc, ":", $4); + parseContext.rValueErrorCheck($5.loc, ":", $6); + $$ = parseContext.intermediate.addSelection($1, $4, $6, $2.loc); + if ($$ == 0) { + parseContext.binaryOpError($2.loc, ":", $4->getCompleteString(), $6->getCompleteString()); + $$ = $6; + } + } + ; + +assignment_expression + : conditional_expression { $$ = $1; } + | unary_expression assignment_operator assignment_expression { + parseContext.arrayObjectCheck($2.loc, $1->getType(), "array assignment"); + parseContext.opaqueCheck($2.loc, $1->getType(), "="); + parseContext.storage16BitAssignmentCheck($2.loc, $1->getType(), "="); + parseContext.specializationCheck($2.loc, $1->getType(), "="); + parseContext.lValueErrorCheck($2.loc, "assign", $1); + parseContext.rValueErrorCheck($2.loc, "assign", $3); + $$ = parseContext.intermediate.addAssign($2.op, $1, $3, $2.loc); + if ($$ == 0) { + parseContext.assignError($2.loc, "assign", $1->getCompleteString(), $3->getCompleteString()); + $$ = $1; + } + } + ; + +assignment_operator + : EQUAL { + $$.loc = $1.loc; + $$.op = EOpAssign; + } + | MUL_ASSIGN { + $$.loc = $1.loc; + $$.op = EOpMulAssign; + } + | DIV_ASSIGN { + $$.loc = $1.loc; + $$.op = EOpDivAssign; + } + | MOD_ASSIGN { + parseContext.fullIntegerCheck($1.loc, "%="); + $$.loc = $1.loc; + $$.op = EOpModAssign; + } + | ADD_ASSIGN { + $$.loc = $1.loc; + $$.op = EOpAddAssign; + } + | SUB_ASSIGN { + $$.loc = $1.loc; + $$.op = EOpSubAssign; + } + | LEFT_ASSIGN { + parseContext.fullIntegerCheck($1.loc, "bit-shift left assign"); + $$.loc = $1.loc; $$.op = EOpLeftShiftAssign; + } + | RIGHT_ASSIGN { + parseContext.fullIntegerCheck($1.loc, "bit-shift right assign"); + $$.loc = $1.loc; $$.op = EOpRightShiftAssign; + } + | AND_ASSIGN { + parseContext.fullIntegerCheck($1.loc, "bitwise-and assign"); + $$.loc = $1.loc; $$.op = EOpAndAssign; + } + | XOR_ASSIGN { + parseContext.fullIntegerCheck($1.loc, "bitwise-xor assign"); + $$.loc = $1.loc; $$.op = EOpExclusiveOrAssign; + } + | OR_ASSIGN { + parseContext.fullIntegerCheck($1.loc, "bitwise-or assign"); + $$.loc = $1.loc; $$.op = EOpInclusiveOrAssign; + } + ; + +expression + : assignment_expression { + $$ = $1; + } + | expression COMMA assignment_expression { + parseContext.samplerConstructorLocationCheck($2.loc, ",", $3); + $$ = parseContext.intermediate.addComma($1, $3, $2.loc); + if ($$ == 0) { + parseContext.binaryOpError($2.loc, ",", $1->getCompleteString(), $3->getCompleteString()); + $$ = $3; + } + } + ; + +constant_expression + : conditional_expression { + parseContext.constantValueCheck($1, ""); + $$ = $1; + } + ; + +declaration + : function_prototype SEMICOLON { + parseContext.handleFunctionDeclarator($1.loc, *$1.function, true /* prototype */); + $$ = 0; + // TODO: 4.0 functionality: subroutines: make the identifier a user type for this signature + } + | init_declarator_list SEMICOLON { + if ($1.intermNode && $1.intermNode->getAsAggregate()) + $1.intermNode->getAsAggregate()->setOperator(EOpSequence); + $$ = $1.intermNode; + } + | PRECISION precision_qualifier type_specifier SEMICOLON { + parseContext.profileRequires($1.loc, ENoProfile, 130, 0, "precision statement"); + // lazy setting of the previous scope's defaults, has effect only the first time it is called in a particular scope + parseContext.symbolTable.setPreviousDefaultPrecisions(&parseContext.defaultPrecision[0]); + parseContext.setDefaultPrecision($1.loc, $3, $2.qualifier.precision); + $$ = 0; + } + | block_structure SEMICOLON { + parseContext.declareBlock($1.loc, *$1.typeList); + $$ = 0; + } + | block_structure IDENTIFIER SEMICOLON { + parseContext.declareBlock($1.loc, *$1.typeList, $2.string); + $$ = 0; + } + | block_structure IDENTIFIER array_specifier SEMICOLON { + parseContext.declareBlock($1.loc, *$1.typeList, $2.string, $3.arraySizes); + $$ = 0; + } + | type_qualifier SEMICOLON { + parseContext.globalQualifierFixCheck($1.loc, $1.qualifier); + parseContext.updateStandaloneQualifierDefaults($1.loc, $1); + $$ = 0; + } + | type_qualifier IDENTIFIER SEMICOLON { + parseContext.checkNoShaderLayouts($1.loc, $1.shaderQualifiers); + parseContext.addQualifierToExisting($1.loc, $1.qualifier, *$2.string); + $$ = 0; + } + | type_qualifier IDENTIFIER identifier_list SEMICOLON { + parseContext.checkNoShaderLayouts($1.loc, $1.shaderQualifiers); + $3->push_back($2.string); + parseContext.addQualifierToExisting($1.loc, $1.qualifier, *$3); + $$ = 0; + } + ; + +block_structure + : type_qualifier IDENTIFIER LEFT_BRACE { parseContext.nestedBlockCheck($1.loc); } struct_declaration_list RIGHT_BRACE { + --parseContext.structNestingLevel; + parseContext.blockName = $2.string; + parseContext.globalQualifierFixCheck($1.loc, $1.qualifier); + parseContext.checkNoShaderLayouts($1.loc, $1.shaderQualifiers); + parseContext.currentBlockQualifier = $1.qualifier; + $$.loc = $1.loc; + $$.typeList = $5; + } + +identifier_list + : COMMA IDENTIFIER { + $$ = new TIdentifierList; + $$->push_back($2.string); + } + | identifier_list COMMA IDENTIFIER { + $$ = $1; + $$->push_back($3.string); + } + ; + +function_prototype + : function_declarator RIGHT_PAREN { + $$.function = $1; + $$.loc = $2.loc; + } + ; + +function_declarator + : function_header { + $$ = $1; + } + | function_header_with_parameters { + $$ = $1; + } + ; + + +function_header_with_parameters + : function_header parameter_declaration { + // Add the parameter + $$ = $1; + if ($2.param.type->getBasicType() != EbtVoid) + $1->addParameter($2.param); + else + delete $2.param.type; + } + | function_header_with_parameters COMMA parameter_declaration { + // + // Only first parameter of one-parameter functions can be void + // The check for named parameters not being void is done in parameter_declarator + // + if ($3.param.type->getBasicType() == EbtVoid) { + // + // This parameter > first is void + // + parseContext.error($2.loc, "cannot be an argument type except for '(void)'", "void", ""); + delete $3.param.type; + } else { + // Add the parameter + $$ = $1; + $1->addParameter($3.param); + } + } + ; + +function_header + : fully_specified_type IDENTIFIER LEFT_PAREN { + if ($1.qualifier.storage != EvqGlobal && $1.qualifier.storage != EvqTemporary) { + parseContext.error($2.loc, "no qualifiers allowed for function return", + GetStorageQualifierString($1.qualifier.storage), ""); + } + if ($1.arraySizes) + parseContext.arraySizeRequiredCheck($1.loc, *$1.arraySizes); + + // Add the function as a prototype after parsing it (we do not support recursion) + TFunction *function; + TType type($1); + + // Potentially rename shader entry point function. No-op most of the time. + parseContext.renameShaderFunction($2.string); + + // Make the function + function = new TFunction($2.string, type); + $$ = function; + } + ; + +parameter_declarator + // Type + name + : type_specifier IDENTIFIER { + if ($1.arraySizes) { + parseContext.profileRequires($1.loc, ENoProfile, 120, E_GL_3DL_array_objects, "arrayed type"); + parseContext.profileRequires($1.loc, EEsProfile, 300, 0, "arrayed type"); + parseContext.arraySizeRequiredCheck($1.loc, *$1.arraySizes); + } + if ($1.basicType == EbtVoid) { + parseContext.error($2.loc, "illegal use of type 'void'", $2.string->c_str(), ""); + } + parseContext.reservedErrorCheck($2.loc, *$2.string); + + TParameter param = {$2.string, new TType($1)}; + $$.loc = $2.loc; + $$.param = param; + } + | type_specifier IDENTIFIER array_specifier { + if ($1.arraySizes) { + parseContext.profileRequires($1.loc, ENoProfile, 120, E_GL_3DL_array_objects, "arrayed type"); + parseContext.profileRequires($1.loc, EEsProfile, 300, 0, "arrayed type"); + parseContext.arraySizeRequiredCheck($1.loc, *$1.arraySizes); + } + TType* type = new TType($1); + type->transferArraySizes($3.arraySizes); + type->copyArrayInnerSizes($1.arraySizes); + + parseContext.arrayOfArrayVersionCheck($2.loc, type->getArraySizes()); + parseContext.arraySizeRequiredCheck($3.loc, *$3.arraySizes); + parseContext.reservedErrorCheck($2.loc, *$2.string); + + TParameter param = { $2.string, type }; + + $$.loc = $2.loc; + $$.param = param; + } + ; + +parameter_declaration + // + // With name + // + : type_qualifier parameter_declarator { + $$ = $2; + if ($1.qualifier.precision != EpqNone) + $$.param.type->getQualifier().precision = $1.qualifier.precision; + parseContext.precisionQualifierCheck($$.loc, $$.param.type->getBasicType(), $$.param.type->getQualifier()); + + parseContext.checkNoShaderLayouts($1.loc, $1.shaderQualifiers); + parseContext.parameterTypeCheck($2.loc, $1.qualifier.storage, *$$.param.type); + parseContext.paramCheckFix($1.loc, $1.qualifier, *$$.param.type); + + } + | parameter_declarator { + $$ = $1; + + parseContext.parameterTypeCheck($1.loc, EvqIn, *$1.param.type); + parseContext.paramCheckFixStorage($1.loc, EvqTemporary, *$$.param.type); + parseContext.precisionQualifierCheck($$.loc, $$.param.type->getBasicType(), $$.param.type->getQualifier()); + } + // + // Without name + // + | type_qualifier parameter_type_specifier { + $$ = $2; + if ($1.qualifier.precision != EpqNone) + $$.param.type->getQualifier().precision = $1.qualifier.precision; + parseContext.precisionQualifierCheck($1.loc, $$.param.type->getBasicType(), $$.param.type->getQualifier()); + + parseContext.checkNoShaderLayouts($1.loc, $1.shaderQualifiers); + parseContext.parameterTypeCheck($2.loc, $1.qualifier.storage, *$$.param.type); + parseContext.paramCheckFix($1.loc, $1.qualifier, *$$.param.type); + } + | parameter_type_specifier { + $$ = $1; + + parseContext.parameterTypeCheck($1.loc, EvqIn, *$1.param.type); + parseContext.paramCheckFixStorage($1.loc, EvqTemporary, *$$.param.type); + parseContext.precisionQualifierCheck($$.loc, $$.param.type->getBasicType(), $$.param.type->getQualifier()); + } + ; + +parameter_type_specifier + : type_specifier { + TParameter param = { 0, new TType($1) }; + $$.param = param; + if ($1.arraySizes) + parseContext.arraySizeRequiredCheck($1.loc, *$1.arraySizes); + } + ; + +init_declarator_list + : single_declaration { + $$ = $1; + } + | init_declarator_list COMMA IDENTIFIER { + $$ = $1; + parseContext.declareVariable($3.loc, *$3.string, $1.type); + } + | init_declarator_list COMMA IDENTIFIER array_specifier { + $$ = $1; + parseContext.declareVariable($3.loc, *$3.string, $1.type, $4.arraySizes); + } + | init_declarator_list COMMA IDENTIFIER array_specifier EQUAL initializer { + $$.type = $1.type; + TIntermNode* initNode = parseContext.declareVariable($3.loc, *$3.string, $1.type, $4.arraySizes, $6); + $$.intermNode = parseContext.intermediate.growAggregate($1.intermNode, initNode, $5.loc); + } + | init_declarator_list COMMA IDENTIFIER EQUAL initializer { + $$.type = $1.type; + TIntermNode* initNode = parseContext.declareVariable($3.loc, *$3.string, $1.type, 0, $5); + $$.intermNode = parseContext.intermediate.growAggregate($1.intermNode, initNode, $4.loc); + } + ; + +single_declaration + : fully_specified_type { + $$.type = $1; + $$.intermNode = 0; + + parseContext.declareTypeDefaults($$.loc, $$.type); + + } + | fully_specified_type IDENTIFIER { + $$.type = $1; + $$.intermNode = 0; + parseContext.declareVariable($2.loc, *$2.string, $1); + } + | fully_specified_type IDENTIFIER array_specifier { + $$.type = $1; + $$.intermNode = 0; + parseContext.declareVariable($2.loc, *$2.string, $1, $3.arraySizes); + } + | fully_specified_type IDENTIFIER array_specifier EQUAL initializer { + $$.type = $1; + TIntermNode* initNode = parseContext.declareVariable($2.loc, *$2.string, $1, $3.arraySizes, $5); + $$.intermNode = parseContext.intermediate.growAggregate(0, initNode, $4.loc); + } + | fully_specified_type IDENTIFIER EQUAL initializer { + $$.type = $1; + TIntermNode* initNode = parseContext.declareVariable($2.loc, *$2.string, $1, 0, $4); + $$.intermNode = parseContext.intermediate.growAggregate(0, initNode, $3.loc); + } + +// Grammar Note: No 'enum', or 'typedef'. + +fully_specified_type + : type_specifier { + $$ = $1; + + parseContext.globalQualifierTypeCheck($1.loc, $1.qualifier, $$); + if ($1.arraySizes) { + parseContext.profileRequires($1.loc, ENoProfile, 120, E_GL_3DL_array_objects, "arrayed type"); + parseContext.profileRequires($1.loc, EEsProfile, 300, 0, "arrayed type"); + } + parseContext.precisionQualifierCheck($$.loc, $$.basicType, $$.qualifier); + } + | type_qualifier type_specifier { + parseContext.globalQualifierFixCheck($1.loc, $1.qualifier); + parseContext.globalQualifierTypeCheck($1.loc, $1.qualifier, $2); + + if ($2.arraySizes) { + parseContext.profileRequires($2.loc, ENoProfile, 120, E_GL_3DL_array_objects, "arrayed type"); + parseContext.profileRequires($2.loc, EEsProfile, 300, 0, "arrayed type"); + } + + if ($2.arraySizes && parseContext.arrayQualifierError($2.loc, $1.qualifier)) + $2.arraySizes = nullptr; + + parseContext.checkNoShaderLayouts($2.loc, $1.shaderQualifiers); + $2.shaderQualifiers.merge($1.shaderQualifiers); + parseContext.mergeQualifiers($2.loc, $2.qualifier, $1.qualifier, true); + parseContext.precisionQualifierCheck($2.loc, $2.basicType, $2.qualifier); + + $$ = $2; + + if (! $$.qualifier.isInterpolation() && + ((parseContext.language == EShLangVertex && $$.qualifier.storage == EvqVaryingOut) || + (parseContext.language == EShLangFragment && $$.qualifier.storage == EvqVaryingIn))) + $$.qualifier.smooth = true; + } + ; + +invariant_qualifier + : INVARIANT { + parseContext.globalCheck($1.loc, "invariant"); + parseContext.profileRequires($$.loc, ENoProfile, 120, 0, "invariant"); + $$.init($1.loc); + $$.qualifier.invariant = true; + } + ; + +interpolation_qualifier + : SMOOTH { + parseContext.globalCheck($1.loc, "smooth"); + parseContext.profileRequires($1.loc, ENoProfile, 130, 0, "smooth"); + parseContext.profileRequires($1.loc, EEsProfile, 300, 0, "smooth"); + $$.init($1.loc); + $$.qualifier.smooth = true; + } + | FLAT { + parseContext.globalCheck($1.loc, "flat"); + parseContext.profileRequires($1.loc, ENoProfile, 130, 0, "flat"); + parseContext.profileRequires($1.loc, EEsProfile, 300, 0, "flat"); + $$.init($1.loc); + $$.qualifier.flat = true; + } + + | NOPERSPECTIVE { + parseContext.globalCheck($1.loc, "noperspective"); + parseContext.profileRequires($1.loc, EEsProfile, 0, E_GL_NV_shader_noperspective_interpolation, "noperspective"); + parseContext.profileRequires($1.loc, ENoProfile, 130, 0, "noperspective"); + $$.init($1.loc); + $$.qualifier.nopersp = true; + } + | EXPLICITINTERPAMD { + parseContext.globalCheck($1.loc, "__explicitInterpAMD"); + parseContext.profileRequires($1.loc, ECoreProfile, 450, E_GL_AMD_shader_explicit_vertex_parameter, "explicit interpolation"); + parseContext.profileRequires($1.loc, ECompatibilityProfile, 450, E_GL_AMD_shader_explicit_vertex_parameter, "explicit interpolation"); + $$.init($1.loc); + $$.qualifier.explicitInterp = true; + } + | PERVERTEXNV { + parseContext.globalCheck($1.loc, "pervertexNV"); + parseContext.profileRequires($1.loc, ECoreProfile, 0, E_GL_NV_fragment_shader_barycentric, "fragment shader barycentric"); + parseContext.profileRequires($1.loc, ECompatibilityProfile, 0, E_GL_NV_fragment_shader_barycentric, "fragment shader barycentric"); + parseContext.profileRequires($1.loc, EEsProfile, 0, E_GL_NV_fragment_shader_barycentric, "fragment shader barycentric"); + $$.init($1.loc); + $$.qualifier.pervertexNV = true; + } + | PERPRIMITIVENV { + // No need for profile version or extension check. Shader stage already checks both. + parseContext.globalCheck($1.loc, "perprimitiveNV"); + parseContext.requireStage($1.loc, (EShLanguageMask)(EShLangFragmentMask | EShLangMeshNVMask), "perprimitiveNV"); + // Fragment shader stage doesn't check for extension. So we explicitly add below extension check. + if (parseContext.language == EShLangFragment) + parseContext.requireExtensions($1.loc, 1, &E_GL_NV_mesh_shader, "perprimitiveNV"); + $$.init($1.loc); + $$.qualifier.perPrimitiveNV = true; + } + | PERVIEWNV { + // No need for profile version or extension check. Shader stage already checks both. + parseContext.globalCheck($1.loc, "perviewNV"); + parseContext.requireStage($1.loc, EShLangMeshNV, "perviewNV"); + $$.init($1.loc); + $$.qualifier.perViewNV = true; + } + | PERTASKNV { + // No need for profile version or extension check. Shader stage already checks both. + parseContext.globalCheck($1.loc, "taskNV"); + parseContext.requireStage($1.loc, (EShLanguageMask)(EShLangTaskNVMask | EShLangMeshNVMask), "taskNV"); + $$.init($1.loc); + $$.qualifier.perTaskNV = true; + } + + ; + +layout_qualifier + : LAYOUT LEFT_PAREN layout_qualifier_id_list RIGHT_PAREN { + $$ = $3; + } + ; + +layout_qualifier_id_list + : layout_qualifier_id { + $$ = $1; + } + | layout_qualifier_id_list COMMA layout_qualifier_id { + $$ = $1; + $$.shaderQualifiers.merge($3.shaderQualifiers); + parseContext.mergeObjectLayoutQualifiers($$.qualifier, $3.qualifier, false); + } + +layout_qualifier_id + : IDENTIFIER { + $$.init($1.loc); + parseContext.setLayoutQualifier($1.loc, $$, *$1.string); + } + | IDENTIFIER EQUAL constant_expression { + $$.init($1.loc); + parseContext.setLayoutQualifier($1.loc, $$, *$1.string, $3); + } + | SHARED { // because "shared" is both an identifier and a keyword + $$.init($1.loc); + TString strShared("shared"); + parseContext.setLayoutQualifier($1.loc, $$, strShared); + } + ; + + +precise_qualifier + : PRECISE { + parseContext.profileRequires($$.loc, ECoreProfile | ECompatibilityProfile, 400, E_GL_ARB_gpu_shader5, "precise"); + parseContext.profileRequires($1.loc, EEsProfile, 320, Num_AEP_gpu_shader5, AEP_gpu_shader5, "precise"); + $$.init($1.loc); + $$.qualifier.noContraction = true; + } + ; + + +type_qualifier + : single_type_qualifier { + $$ = $1; + } + | type_qualifier single_type_qualifier { + $$ = $1; + if ($$.basicType == EbtVoid) + $$.basicType = $2.basicType; + + $$.shaderQualifiers.merge($2.shaderQualifiers); + parseContext.mergeQualifiers($$.loc, $$.qualifier, $2.qualifier, false); + } + ; + +single_type_qualifier + : storage_qualifier { + $$ = $1; + } + | layout_qualifier { + $$ = $1; + } + | precision_qualifier { + parseContext.checkPrecisionQualifier($1.loc, $1.qualifier.precision); + $$ = $1; + } + | interpolation_qualifier { + // allow inheritance of storage qualifier from block declaration + $$ = $1; + } + | invariant_qualifier { + // allow inheritance of storage qualifier from block declaration + $$ = $1; + } + + | precise_qualifier { + // allow inheritance of storage qualifier from block declaration + $$ = $1; + } + | non_uniform_qualifier { + $$ = $1; + } + + ; + +storage_qualifier + : CONST { + $$.init($1.loc); + $$.qualifier.storage = EvqConst; // will later turn into EvqConstReadOnly, if the initializer is not constant + } + | INOUT { + parseContext.globalCheck($1.loc, "inout"); + $$.init($1.loc); + $$.qualifier.storage = EvqInOut; + } + | IN { + parseContext.globalCheck($1.loc, "in"); + $$.init($1.loc); + // whether this is a parameter "in" or a pipeline "in" will get sorted out a bit later + $$.qualifier.storage = EvqIn; + } + | OUT { + parseContext.globalCheck($1.loc, "out"); + $$.init($1.loc); + // whether this is a parameter "out" or a pipeline "out" will get sorted out a bit later + $$.qualifier.storage = EvqOut; + } + | CENTROID { + parseContext.profileRequires($1.loc, ENoProfile, 120, 0, "centroid"); + parseContext.profileRequires($1.loc, EEsProfile, 300, 0, "centroid"); + parseContext.globalCheck($1.loc, "centroid"); + $$.init($1.loc); + $$.qualifier.centroid = true; + } + | UNIFORM { + parseContext.globalCheck($1.loc, "uniform"); + $$.init($1.loc); + $$.qualifier.storage = EvqUniform; + } + | SHARED { + parseContext.globalCheck($1.loc, "shared"); + parseContext.profileRequires($1.loc, ECoreProfile | ECompatibilityProfile, 430, E_GL_ARB_compute_shader, "shared"); + parseContext.profileRequires($1.loc, EEsProfile, 310, 0, "shared"); + parseContext.requireStage($1.loc, (EShLanguageMask)(EShLangComputeMask | EShLangMeshNVMask | EShLangTaskNVMask), "shared"); + $$.init($1.loc); + $$.qualifier.storage = EvqShared; + } + | BUFFER { + parseContext.globalCheck($1.loc, "buffer"); + $$.init($1.loc); + $$.qualifier.storage = EvqBuffer; + } + + | ATTRIBUTE { + parseContext.requireStage($1.loc, EShLangVertex, "attribute"); + parseContext.checkDeprecated($1.loc, ECoreProfile, 130, "attribute"); + parseContext.checkDeprecated($1.loc, ENoProfile, 130, "attribute"); + parseContext.requireNotRemoved($1.loc, ECoreProfile, 420, "attribute"); + parseContext.requireNotRemoved($1.loc, EEsProfile, 300, "attribute"); + + parseContext.globalCheck($1.loc, "attribute"); + + $$.init($1.loc); + $$.qualifier.storage = EvqVaryingIn; + } + | VARYING { + parseContext.checkDeprecated($1.loc, ENoProfile, 130, "varying"); + parseContext.checkDeprecated($1.loc, ECoreProfile, 130, "varying"); + parseContext.requireNotRemoved($1.loc, ECoreProfile, 420, "varying"); + parseContext.requireNotRemoved($1.loc, EEsProfile, 300, "varying"); + + parseContext.globalCheck($1.loc, "varying"); + + $$.init($1.loc); + if (parseContext.language == EShLangVertex) + $$.qualifier.storage = EvqVaryingOut; + else + $$.qualifier.storage = EvqVaryingIn; + } + | PATCH { + parseContext.globalCheck($1.loc, "patch"); + parseContext.requireStage($1.loc, (EShLanguageMask)(EShLangTessControlMask | EShLangTessEvaluationMask), "patch"); + $$.init($1.loc); + $$.qualifier.patch = true; + } + | SAMPLE { + parseContext.globalCheck($1.loc, "sample"); + $$.init($1.loc); + $$.qualifier.sample = true; + } + | HITATTRNV { + parseContext.globalCheck($1.loc, "hitAttributeNV"); + parseContext.requireStage($1.loc, (EShLanguageMask)(EShLangIntersectMask | EShLangClosestHitMask + | EShLangAnyHitMask), "hitAttributeNV"); + parseContext.profileRequires($1.loc, ECoreProfile, 460, E_GL_NV_ray_tracing, "hitAttributeNV"); + $$.init($1.loc); + $$.qualifier.storage = EvqHitAttr; + } + | HITATTREXT { + parseContext.globalCheck($1.loc, "hitAttributeEXT"); + parseContext.requireStage($1.loc, (EShLanguageMask)(EShLangIntersectMask | EShLangClosestHitMask + | EShLangAnyHitMask), "hitAttributeEXT"); + parseContext.profileRequires($1.loc, ECoreProfile, 460, E_GL_EXT_ray_tracing, "hitAttributeNV"); + $$.init($1.loc); + $$.qualifier.storage = EvqHitAttr; + } + | PAYLOADNV { + parseContext.globalCheck($1.loc, "rayPayloadNV"); + parseContext.requireStage($1.loc, (EShLanguageMask)(EShLangRayGenMask | EShLangClosestHitMask | + EShLangAnyHitMask | EShLangMissMask), "rayPayloadNV"); + parseContext.profileRequires($1.loc, ECoreProfile, 460, E_GL_NV_ray_tracing, "rayPayloadNV"); + $$.init($1.loc); + $$.qualifier.storage = EvqPayload; + } + | PAYLOADEXT { + parseContext.globalCheck($1.loc, "rayPayloadEXT"); + parseContext.requireStage($1.loc, (EShLanguageMask)(EShLangRayGenMask | EShLangClosestHitMask | + EShLangAnyHitMask | EShLangMissMask), "rayPayloadEXT"); + parseContext.profileRequires($1.loc, ECoreProfile, 460, E_GL_EXT_ray_tracing, "rayPayloadEXT"); + $$.init($1.loc); + $$.qualifier.storage = EvqPayload; + } + | PAYLOADINNV { + parseContext.globalCheck($1.loc, "rayPayloadInNV"); + parseContext.requireStage($1.loc, (EShLanguageMask)(EShLangClosestHitMask | + EShLangAnyHitMask | EShLangMissMask), "rayPayloadInNV"); + parseContext.profileRequires($1.loc, ECoreProfile, 460, E_GL_NV_ray_tracing, "rayPayloadInNV"); + $$.init($1.loc); + $$.qualifier.storage = EvqPayloadIn; + } + | PAYLOADINEXT { + parseContext.globalCheck($1.loc, "rayPayloadInEXT"); + parseContext.requireStage($1.loc, (EShLanguageMask)(EShLangClosestHitMask | + EShLangAnyHitMask | EShLangMissMask), "rayPayloadInEXT"); + parseContext.profileRequires($1.loc, ECoreProfile, 460, E_GL_EXT_ray_tracing, "rayPayloadInEXT"); + $$.init($1.loc); + $$.qualifier.storage = EvqPayloadIn; + } + | CALLDATANV { + parseContext.globalCheck($1.loc, "callableDataNV"); + parseContext.requireStage($1.loc, (EShLanguageMask)(EShLangRayGenMask | + EShLangClosestHitMask | EShLangMissMask | EShLangCallableMask), "callableDataNV"); + parseContext.profileRequires($1.loc, ECoreProfile, 460, E_GL_NV_ray_tracing, "callableDataNV"); + $$.init($1.loc); + $$.qualifier.storage = EvqCallableData; + } + | CALLDATAEXT { + parseContext.globalCheck($1.loc, "callableDataEXT"); + parseContext.requireStage($1.loc, (EShLanguageMask)(EShLangRayGenMask | + EShLangClosestHitMask | EShLangMissMask | EShLangCallableMask), "callableDataEXT"); + parseContext.profileRequires($1.loc, ECoreProfile, 460, E_GL_EXT_ray_tracing, "callableDataEXT"); + $$.init($1.loc); + $$.qualifier.storage = EvqCallableData; + } + | CALLDATAINNV { + parseContext.globalCheck($1.loc, "callableDataInNV"); + parseContext.requireStage($1.loc, (EShLanguageMask)(EShLangCallableMask), "callableDataInNV"); + parseContext.profileRequires($1.loc, ECoreProfile, 460, E_GL_NV_ray_tracing, "callableDataInNV"); + $$.init($1.loc); + $$.qualifier.storage = EvqCallableDataIn; + } + | CALLDATAINEXT { + parseContext.globalCheck($1.loc, "callableDataInEXT"); + parseContext.requireStage($1.loc, (EShLanguageMask)(EShLangCallableMask), "callableDataInEXT"); + parseContext.profileRequires($1.loc, ECoreProfile, 460, E_GL_EXT_ray_tracing, "callableDataInEXT"); + $$.init($1.loc); + $$.qualifier.storage = EvqCallableDataIn; + } + | COHERENT { + $$.init($1.loc); + $$.qualifier.coherent = true; + } + | DEVICECOHERENT { + $$.init($1.loc); + parseContext.requireExtensions($1.loc, 1, &E_GL_KHR_memory_scope_semantics, "devicecoherent"); + $$.qualifier.devicecoherent = true; + } + | QUEUEFAMILYCOHERENT { + $$.init($1.loc); + parseContext.requireExtensions($1.loc, 1, &E_GL_KHR_memory_scope_semantics, "queuefamilycoherent"); + $$.qualifier.queuefamilycoherent = true; + } + | WORKGROUPCOHERENT { + $$.init($1.loc); + parseContext.requireExtensions($1.loc, 1, &E_GL_KHR_memory_scope_semantics, "workgroupcoherent"); + $$.qualifier.workgroupcoherent = true; + } + | SUBGROUPCOHERENT { + $$.init($1.loc); + parseContext.requireExtensions($1.loc, 1, &E_GL_KHR_memory_scope_semantics, "subgroupcoherent"); + $$.qualifier.subgroupcoherent = true; + } + | NONPRIVATE { + $$.init($1.loc); + parseContext.requireExtensions($1.loc, 1, &E_GL_KHR_memory_scope_semantics, "nonprivate"); + $$.qualifier.nonprivate = true; + } + | SHADERCALLCOHERENT { + $$.init($1.loc); + parseContext.requireExtensions($1.loc, 1, &E_GL_EXT_ray_tracing, "shadercallcoherent"); + $$.qualifier.shadercallcoherent = true; + } + | VOLATILE { + $$.init($1.loc); + $$.qualifier.volatil = true; + } + | RESTRICT { + $$.init($1.loc); + $$.qualifier.restrict = true; + } + | READONLY { + $$.init($1.loc); + $$.qualifier.readonly = true; + } + | WRITEONLY { + $$.init($1.loc); + $$.qualifier.writeonly = true; + } + | SUBROUTINE { + parseContext.spvRemoved($1.loc, "subroutine"); + parseContext.globalCheck($1.loc, "subroutine"); + parseContext.unimplemented($1.loc, "subroutine"); + $$.init($1.loc); + } + | SUBROUTINE LEFT_PAREN type_name_list RIGHT_PAREN { + parseContext.spvRemoved($1.loc, "subroutine"); + parseContext.globalCheck($1.loc, "subroutine"); + parseContext.unimplemented($1.loc, "subroutine"); + $$.init($1.loc); + } + + ; + + +non_uniform_qualifier + : NONUNIFORM { + $$.init($1.loc); + $$.qualifier.nonUniform = true; + } + ; + +type_name_list + : IDENTIFIER { + // TODO + } + | type_name_list COMMA IDENTIFIER { + // TODO: 4.0 semantics: subroutines + // 1) make sure each identifier is a type declared earlier with SUBROUTINE + // 2) save all of the identifiers for future comparison with the declared function + } + ; + + +type_specifier + : type_specifier_nonarray type_parameter_specifier_opt { + $$ = $1; + $$.qualifier.precision = parseContext.getDefaultPrecision($$); + $$.typeParameters = $2; + } + | type_specifier_nonarray type_parameter_specifier_opt array_specifier { + parseContext.arrayOfArrayVersionCheck($3.loc, $3.arraySizes); + $$ = $1; + $$.qualifier.precision = parseContext.getDefaultPrecision($$); + $$.typeParameters = $2; + $$.arraySizes = $3.arraySizes; + } + ; + +array_specifier + : LEFT_BRACKET RIGHT_BRACKET { + $$.loc = $1.loc; + $$.arraySizes = new TArraySizes; + $$.arraySizes->addInnerSize(); + } + | LEFT_BRACKET conditional_expression RIGHT_BRACKET { + $$.loc = $1.loc; + $$.arraySizes = new TArraySizes; + + TArraySize size; + parseContext.arraySizeCheck($2->getLoc(), $2, size, "array size"); + $$.arraySizes->addInnerSize(size); + } + | array_specifier LEFT_BRACKET RIGHT_BRACKET { + $$ = $1; + $$.arraySizes->addInnerSize(); + } + | array_specifier LEFT_BRACKET conditional_expression RIGHT_BRACKET { + $$ = $1; + + TArraySize size; + parseContext.arraySizeCheck($3->getLoc(), $3, size, "array size"); + $$.arraySizes->addInnerSize(size); + } + ; + +type_parameter_specifier_opt + : type_parameter_specifier { + $$ = $1; + } + | /* May be null */ { + $$ = 0; + } + ; + +type_parameter_specifier + : LEFT_ANGLE type_parameter_specifier_list RIGHT_ANGLE { + $$ = $2; + } + ; + +type_parameter_specifier_list + : unary_expression { + $$ = new TArraySizes; + + TArraySize size; + parseContext.arraySizeCheck($1->getLoc(), $1, size, "type parameter"); + $$->addInnerSize(size); + } + | type_parameter_specifier_list COMMA unary_expression { + $$ = $1; + + TArraySize size; + parseContext.arraySizeCheck($3->getLoc(), $3, size, "type parameter"); + $$->addInnerSize(size); + } + ; + +type_specifier_nonarray + : VOID { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtVoid; + } + | FLOAT { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtFloat; + } + | INT { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtInt; + } + | UINT { + parseContext.fullIntegerCheck($1.loc, "unsigned integer"); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtUint; + } + | BOOL { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtBool; + } + | VEC2 { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtFloat; + $$.setVector(2); + } + | VEC3 { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtFloat; + $$.setVector(3); + } + | VEC4 { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtFloat; + $$.setVector(4); + } + | BVEC2 { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtBool; + $$.setVector(2); + } + | BVEC3 { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtBool; + $$.setVector(3); + } + | BVEC4 { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtBool; + $$.setVector(4); + } + | IVEC2 { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtInt; + $$.setVector(2); + } + | IVEC3 { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtInt; + $$.setVector(3); + } + | IVEC4 { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtInt; + $$.setVector(4); + } + | UVEC2 { + parseContext.fullIntegerCheck($1.loc, "unsigned integer vector"); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtUint; + $$.setVector(2); + } + | UVEC3 { + parseContext.fullIntegerCheck($1.loc, "unsigned integer vector"); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtUint; + $$.setVector(3); + } + | UVEC4 { + parseContext.fullIntegerCheck($1.loc, "unsigned integer vector"); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtUint; + $$.setVector(4); + } + | MAT2 { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtFloat; + $$.setMatrix(2, 2); + } + | MAT3 { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtFloat; + $$.setMatrix(3, 3); + } + | MAT4 { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtFloat; + $$.setMatrix(4, 4); + } + | MAT2X2 { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtFloat; + $$.setMatrix(2, 2); + } + | MAT2X3 { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtFloat; + $$.setMatrix(2, 3); + } + | MAT2X4 { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtFloat; + $$.setMatrix(2, 4); + } + | MAT3X2 { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtFloat; + $$.setMatrix(3, 2); + } + | MAT3X3 { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtFloat; + $$.setMatrix(3, 3); + } + | MAT3X4 { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtFloat; + $$.setMatrix(3, 4); + } + | MAT4X2 { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtFloat; + $$.setMatrix(4, 2); + } + | MAT4X3 { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtFloat; + $$.setMatrix(4, 3); + } + | MAT4X4 { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtFloat; + $$.setMatrix(4, 4); + } + + | DOUBLE { + parseContext.requireProfile($1.loc, ECoreProfile | ECompatibilityProfile, "double"); + if (! parseContext.symbolTable.atBuiltInLevel()) + parseContext.doubleCheck($1.loc, "double"); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtDouble; + } + | FLOAT16_T { + parseContext.float16ScalarVectorCheck($1.loc, "float16_t", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtFloat16; + } + | FLOAT32_T { + parseContext.explicitFloat32Check($1.loc, "float32_t", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtFloat; + } + | FLOAT64_T { + parseContext.explicitFloat64Check($1.loc, "float64_t", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtDouble; + } + | INT8_T { + parseContext.int8ScalarVectorCheck($1.loc, "8-bit signed integer", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtInt8; + } + | UINT8_T { + parseContext.int8ScalarVectorCheck($1.loc, "8-bit unsigned integer", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtUint8; + } + | INT16_T { + parseContext.int16ScalarVectorCheck($1.loc, "16-bit signed integer", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtInt16; + } + | UINT16_T { + parseContext.int16ScalarVectorCheck($1.loc, "16-bit unsigned integer", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtUint16; + } + | INT32_T { + parseContext.explicitInt32Check($1.loc, "32-bit signed integer", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtInt; + } + | UINT32_T { + parseContext.explicitInt32Check($1.loc, "32-bit unsigned integer", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtUint; + } + | INT64_T { + parseContext.int64Check($1.loc, "64-bit integer", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtInt64; + } + | UINT64_T { + parseContext.int64Check($1.loc, "64-bit unsigned integer", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtUint64; + } + | DVEC2 { + parseContext.requireProfile($1.loc, ECoreProfile | ECompatibilityProfile, "double vector"); + if (! parseContext.symbolTable.atBuiltInLevel()) + parseContext.doubleCheck($1.loc, "double vector"); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtDouble; + $$.setVector(2); + } + | DVEC3 { + parseContext.requireProfile($1.loc, ECoreProfile | ECompatibilityProfile, "double vector"); + if (! parseContext.symbolTable.atBuiltInLevel()) + parseContext.doubleCheck($1.loc, "double vector"); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtDouble; + $$.setVector(3); + } + | DVEC4 { + parseContext.requireProfile($1.loc, ECoreProfile | ECompatibilityProfile, "double vector"); + if (! parseContext.symbolTable.atBuiltInLevel()) + parseContext.doubleCheck($1.loc, "double vector"); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtDouble; + $$.setVector(4); + } + | F16VEC2 { + parseContext.float16ScalarVectorCheck($1.loc, "half float vector", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtFloat16; + $$.setVector(2); + } + | F16VEC3 { + parseContext.float16ScalarVectorCheck($1.loc, "half float vector", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtFloat16; + $$.setVector(3); + } + | F16VEC4 { + parseContext.float16ScalarVectorCheck($1.loc, "half float vector", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtFloat16; + $$.setVector(4); + } + | F32VEC2 { + parseContext.explicitFloat32Check($1.loc, "float32_t vector", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtFloat; + $$.setVector(2); + } + | F32VEC3 { + parseContext.explicitFloat32Check($1.loc, "float32_t vector", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtFloat; + $$.setVector(3); + } + | F32VEC4 { + parseContext.explicitFloat32Check($1.loc, "float32_t vector", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtFloat; + $$.setVector(4); + } + | F64VEC2 { + parseContext.explicitFloat64Check($1.loc, "float64_t vector", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtDouble; + $$.setVector(2); + } + | F64VEC3 { + parseContext.explicitFloat64Check($1.loc, "float64_t vector", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtDouble; + $$.setVector(3); + } + | F64VEC4 { + parseContext.explicitFloat64Check($1.loc, "float64_t vector", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtDouble; + $$.setVector(4); + } + | I8VEC2 { + parseContext.int8ScalarVectorCheck($1.loc, "8-bit signed integer vector", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtInt8; + $$.setVector(2); + } + | I8VEC3 { + parseContext.int8ScalarVectorCheck($1.loc, "8-bit signed integer vector", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtInt8; + $$.setVector(3); + } + | I8VEC4 { + parseContext.int8ScalarVectorCheck($1.loc, "8-bit signed integer vector", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtInt8; + $$.setVector(4); + } + | I16VEC2 { + parseContext.int16ScalarVectorCheck($1.loc, "16-bit signed integer vector", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtInt16; + $$.setVector(2); + } + | I16VEC3 { + parseContext.int16ScalarVectorCheck($1.loc, "16-bit signed integer vector", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtInt16; + $$.setVector(3); + } + | I16VEC4 { + parseContext.int16ScalarVectorCheck($1.loc, "16-bit signed integer vector", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtInt16; + $$.setVector(4); + } + | I32VEC2 { + parseContext.explicitInt32Check($1.loc, "32-bit signed integer vector", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtInt; + $$.setVector(2); + } + | I32VEC3 { + parseContext.explicitInt32Check($1.loc, "32-bit signed integer vector", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtInt; + $$.setVector(3); + } + | I32VEC4 { + parseContext.explicitInt32Check($1.loc, "32-bit signed integer vector", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtInt; + $$.setVector(4); + } + | I64VEC2 { + parseContext.int64Check($1.loc, "64-bit integer vector", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtInt64; + $$.setVector(2); + } + | I64VEC3 { + parseContext.int64Check($1.loc, "64-bit integer vector", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtInt64; + $$.setVector(3); + } + | I64VEC4 { + parseContext.int64Check($1.loc, "64-bit integer vector", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtInt64; + $$.setVector(4); + } + | U8VEC2 { + parseContext.int8ScalarVectorCheck($1.loc, "8-bit unsigned integer vector", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtUint8; + $$.setVector(2); + } + | U8VEC3 { + parseContext.int8ScalarVectorCheck($1.loc, "8-bit unsigned integer vector", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtUint8; + $$.setVector(3); + } + | U8VEC4 { + parseContext.int8ScalarVectorCheck($1.loc, "8-bit unsigned integer vector", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtUint8; + $$.setVector(4); + } + | U16VEC2 { + parseContext.int16ScalarVectorCheck($1.loc, "16-bit unsigned integer vector", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtUint16; + $$.setVector(2); + } + | U16VEC3 { + parseContext.int16ScalarVectorCheck($1.loc, "16-bit unsigned integer vector", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtUint16; + $$.setVector(3); + } + | U16VEC4 { + parseContext.int16ScalarVectorCheck($1.loc, "16-bit unsigned integer vector", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtUint16; + $$.setVector(4); + } + | U32VEC2 { + parseContext.explicitInt32Check($1.loc, "32-bit unsigned integer vector", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtUint; + $$.setVector(2); + } + | U32VEC3 { + parseContext.explicitInt32Check($1.loc, "32-bit unsigned integer vector", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtUint; + $$.setVector(3); + } + | U32VEC4 { + parseContext.explicitInt32Check($1.loc, "32-bit unsigned integer vector", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtUint; + $$.setVector(4); + } + | U64VEC2 { + parseContext.int64Check($1.loc, "64-bit unsigned integer vector", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtUint64; + $$.setVector(2); + } + | U64VEC3 { + parseContext.int64Check($1.loc, "64-bit unsigned integer vector", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtUint64; + $$.setVector(3); + } + | U64VEC4 { + parseContext.int64Check($1.loc, "64-bit unsigned integer vector", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtUint64; + $$.setVector(4); + } + | DMAT2 { + parseContext.requireProfile($1.loc, ECoreProfile | ECompatibilityProfile, "double matrix"); + if (! parseContext.symbolTable.atBuiltInLevel()) + parseContext.doubleCheck($1.loc, "double matrix"); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtDouble; + $$.setMatrix(2, 2); + } + | DMAT3 { + parseContext.requireProfile($1.loc, ECoreProfile | ECompatibilityProfile, "double matrix"); + if (! parseContext.symbolTable.atBuiltInLevel()) + parseContext.doubleCheck($1.loc, "double matrix"); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtDouble; + $$.setMatrix(3, 3); + } + | DMAT4 { + parseContext.requireProfile($1.loc, ECoreProfile | ECompatibilityProfile, "double matrix"); + if (! parseContext.symbolTable.atBuiltInLevel()) + parseContext.doubleCheck($1.loc, "double matrix"); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtDouble; + $$.setMatrix(4, 4); + } + | DMAT2X2 { + parseContext.requireProfile($1.loc, ECoreProfile | ECompatibilityProfile, "double matrix"); + if (! parseContext.symbolTable.atBuiltInLevel()) + parseContext.doubleCheck($1.loc, "double matrix"); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtDouble; + $$.setMatrix(2, 2); + } + | DMAT2X3 { + parseContext.requireProfile($1.loc, ECoreProfile | ECompatibilityProfile, "double matrix"); + if (! parseContext.symbolTable.atBuiltInLevel()) + parseContext.doubleCheck($1.loc, "double matrix"); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtDouble; + $$.setMatrix(2, 3); + } + | DMAT2X4 { + parseContext.requireProfile($1.loc, ECoreProfile | ECompatibilityProfile, "double matrix"); + if (! parseContext.symbolTable.atBuiltInLevel()) + parseContext.doubleCheck($1.loc, "double matrix"); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtDouble; + $$.setMatrix(2, 4); + } + | DMAT3X2 { + parseContext.requireProfile($1.loc, ECoreProfile | ECompatibilityProfile, "double matrix"); + if (! parseContext.symbolTable.atBuiltInLevel()) + parseContext.doubleCheck($1.loc, "double matrix"); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtDouble; + $$.setMatrix(3, 2); + } + | DMAT3X3 { + parseContext.requireProfile($1.loc, ECoreProfile | ECompatibilityProfile, "double matrix"); + if (! parseContext.symbolTable.atBuiltInLevel()) + parseContext.doubleCheck($1.loc, "double matrix"); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtDouble; + $$.setMatrix(3, 3); + } + | DMAT3X4 { + parseContext.requireProfile($1.loc, ECoreProfile | ECompatibilityProfile, "double matrix"); + if (! parseContext.symbolTable.atBuiltInLevel()) + parseContext.doubleCheck($1.loc, "double matrix"); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtDouble; + $$.setMatrix(3, 4); + } + | DMAT4X2 { + parseContext.requireProfile($1.loc, ECoreProfile | ECompatibilityProfile, "double matrix"); + if (! parseContext.symbolTable.atBuiltInLevel()) + parseContext.doubleCheck($1.loc, "double matrix"); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtDouble; + $$.setMatrix(4, 2); + } + | DMAT4X3 { + parseContext.requireProfile($1.loc, ECoreProfile | ECompatibilityProfile, "double matrix"); + if (! parseContext.symbolTable.atBuiltInLevel()) + parseContext.doubleCheck($1.loc, "double matrix"); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtDouble; + $$.setMatrix(4, 3); + } + | DMAT4X4 { + parseContext.requireProfile($1.loc, ECoreProfile | ECompatibilityProfile, "double matrix"); + if (! parseContext.symbolTable.atBuiltInLevel()) + parseContext.doubleCheck($1.loc, "double matrix"); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtDouble; + $$.setMatrix(4, 4); + } + | F16MAT2 { + parseContext.float16Check($1.loc, "half float matrix", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtFloat16; + $$.setMatrix(2, 2); + } + | F16MAT3 { + parseContext.float16Check($1.loc, "half float matrix", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtFloat16; + $$.setMatrix(3, 3); + } + | F16MAT4 { + parseContext.float16Check($1.loc, "half float matrix", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtFloat16; + $$.setMatrix(4, 4); + } + | F16MAT2X2 { + parseContext.float16Check($1.loc, "half float matrix", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtFloat16; + $$.setMatrix(2, 2); + } + | F16MAT2X3 { + parseContext.float16Check($1.loc, "half float matrix", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtFloat16; + $$.setMatrix(2, 3); + } + | F16MAT2X4 { + parseContext.float16Check($1.loc, "half float matrix", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtFloat16; + $$.setMatrix(2, 4); + } + | F16MAT3X2 { + parseContext.float16Check($1.loc, "half float matrix", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtFloat16; + $$.setMatrix(3, 2); + } + | F16MAT3X3 { + parseContext.float16Check($1.loc, "half float matrix", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtFloat16; + $$.setMatrix(3, 3); + } + | F16MAT3X4 { + parseContext.float16Check($1.loc, "half float matrix", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtFloat16; + $$.setMatrix(3, 4); + } + | F16MAT4X2 { + parseContext.float16Check($1.loc, "half float matrix", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtFloat16; + $$.setMatrix(4, 2); + } + | F16MAT4X3 { + parseContext.float16Check($1.loc, "half float matrix", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtFloat16; + $$.setMatrix(4, 3); + } + | F16MAT4X4 { + parseContext.float16Check($1.loc, "half float matrix", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtFloat16; + $$.setMatrix(4, 4); + } + | F32MAT2 { + parseContext.explicitFloat32Check($1.loc, "float32_t matrix", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtFloat; + $$.setMatrix(2, 2); + } + | F32MAT3 { + parseContext.explicitFloat32Check($1.loc, "float32_t matrix", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtFloat; + $$.setMatrix(3, 3); + } + | F32MAT4 { + parseContext.explicitFloat32Check($1.loc, "float32_t matrix", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtFloat; + $$.setMatrix(4, 4); + } + | F32MAT2X2 { + parseContext.explicitFloat32Check($1.loc, "float32_t matrix", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtFloat; + $$.setMatrix(2, 2); + } + | F32MAT2X3 { + parseContext.explicitFloat32Check($1.loc, "float32_t matrix", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtFloat; + $$.setMatrix(2, 3); + } + | F32MAT2X4 { + parseContext.explicitFloat32Check($1.loc, "float32_t matrix", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtFloat; + $$.setMatrix(2, 4); + } + | F32MAT3X2 { + parseContext.explicitFloat32Check($1.loc, "float32_t matrix", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtFloat; + $$.setMatrix(3, 2); + } + | F32MAT3X3 { + parseContext.explicitFloat32Check($1.loc, "float32_t matrix", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtFloat; + $$.setMatrix(3, 3); + } + | F32MAT3X4 { + parseContext.explicitFloat32Check($1.loc, "float32_t matrix", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtFloat; + $$.setMatrix(3, 4); + } + | F32MAT4X2 { + parseContext.explicitFloat32Check($1.loc, "float32_t matrix", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtFloat; + $$.setMatrix(4, 2); + } + | F32MAT4X3 { + parseContext.explicitFloat32Check($1.loc, "float32_t matrix", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtFloat; + $$.setMatrix(4, 3); + } + | F32MAT4X4 { + parseContext.explicitFloat32Check($1.loc, "float32_t matrix", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtFloat; + $$.setMatrix(4, 4); + } + | F64MAT2 { + parseContext.explicitFloat64Check($1.loc, "float64_t matrix", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtDouble; + $$.setMatrix(2, 2); + } + | F64MAT3 { + parseContext.explicitFloat64Check($1.loc, "float64_t matrix", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtDouble; + $$.setMatrix(3, 3); + } + | F64MAT4 { + parseContext.explicitFloat64Check($1.loc, "float64_t matrix", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtDouble; + $$.setMatrix(4, 4); + } + | F64MAT2X2 { + parseContext.explicitFloat64Check($1.loc, "float64_t matrix", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtDouble; + $$.setMatrix(2, 2); + } + | F64MAT2X3 { + parseContext.explicitFloat64Check($1.loc, "float64_t matrix", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtDouble; + $$.setMatrix(2, 3); + } + | F64MAT2X4 { + parseContext.explicitFloat64Check($1.loc, "float64_t matrix", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtDouble; + $$.setMatrix(2, 4); + } + | F64MAT3X2 { + parseContext.explicitFloat64Check($1.loc, "float64_t matrix", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtDouble; + $$.setMatrix(3, 2); + } + | F64MAT3X3 { + parseContext.explicitFloat64Check($1.loc, "float64_t matrix", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtDouble; + $$.setMatrix(3, 3); + } + | F64MAT3X4 { + parseContext.explicitFloat64Check($1.loc, "float64_t matrix", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtDouble; + $$.setMatrix(3, 4); + } + | F64MAT4X2 { + parseContext.explicitFloat64Check($1.loc, "float64_t matrix", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtDouble; + $$.setMatrix(4, 2); + } + | F64MAT4X3 { + parseContext.explicitFloat64Check($1.loc, "float64_t matrix", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtDouble; + $$.setMatrix(4, 3); + } + | F64MAT4X4 { + parseContext.explicitFloat64Check($1.loc, "float64_t matrix", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtDouble; + $$.setMatrix(4, 4); + } + | ACCSTRUCTNV { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtAccStruct; + } + | ACCSTRUCTEXT { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtAccStruct; + } + | RAYQUERYEXT { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtRayQuery; + } + | ATOMIC_UINT { + parseContext.vulkanRemoved($1.loc, "atomic counter types"); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtAtomicUint; + } + | SAMPLER1D { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.set(EbtFloat, Esd1D); + } + + | SAMPLER2D { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.set(EbtFloat, Esd2D); + } + | SAMPLER3D { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.set(EbtFloat, Esd3D); + } + | SAMPLERCUBE { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.set(EbtFloat, EsdCube); + } + | SAMPLER2DSHADOW { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.set(EbtFloat, Esd2D, false, true); + } + | SAMPLERCUBESHADOW { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.set(EbtFloat, EsdCube, false, true); + } + | SAMPLER2DARRAY { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.set(EbtFloat, Esd2D, true); + } + | SAMPLER2DARRAYSHADOW { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.set(EbtFloat, Esd2D, true, true); + } + + | SAMPLER1DSHADOW { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.set(EbtFloat, Esd1D, false, true); + } + | SAMPLER1DARRAY { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.set(EbtFloat, Esd1D, true); + } + | SAMPLER1DARRAYSHADOW { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.set(EbtFloat, Esd1D, true, true); + } + | SAMPLERCUBEARRAY { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.set(EbtFloat, EsdCube, true); + } + | SAMPLERCUBEARRAYSHADOW { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.set(EbtFloat, EsdCube, true, true); + } + | F16SAMPLER1D { + parseContext.float16OpaqueCheck($1.loc, "half float sampler", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.set(EbtFloat16, Esd1D); + } + | F16SAMPLER2D { + parseContext.float16OpaqueCheck($1.loc, "half float sampler", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.set(EbtFloat16, Esd2D); + } + | F16SAMPLER3D { + parseContext.float16OpaqueCheck($1.loc, "half float sampler", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.set(EbtFloat16, Esd3D); + } + | F16SAMPLERCUBE { + parseContext.float16OpaqueCheck($1.loc, "half float sampler", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.set(EbtFloat16, EsdCube); + } + | F16SAMPLER1DSHADOW { + parseContext.float16OpaqueCheck($1.loc, "half float sampler", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.set(EbtFloat16, Esd1D, false, true); + } + | F16SAMPLER2DSHADOW { + parseContext.float16OpaqueCheck($1.loc, "half float sampler", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.set(EbtFloat16, Esd2D, false, true); + } + | F16SAMPLERCUBESHADOW { + parseContext.float16OpaqueCheck($1.loc, "half float sampler", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.set(EbtFloat16, EsdCube, false, true); + } + | F16SAMPLER1DARRAY { + parseContext.float16OpaqueCheck($1.loc, "half float sampler", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.set(EbtFloat16, Esd1D, true); + } + | F16SAMPLER2DARRAY { + parseContext.float16OpaqueCheck($1.loc, "half float sampler", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.set(EbtFloat16, Esd2D, true); + } + | F16SAMPLER1DARRAYSHADOW { + parseContext.float16OpaqueCheck($1.loc, "half float sampler", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.set(EbtFloat16, Esd1D, true, true); + } + | F16SAMPLER2DARRAYSHADOW { + parseContext.float16OpaqueCheck($1.loc, "half float sampler", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.set(EbtFloat16, Esd2D, true, true); + } + | F16SAMPLERCUBEARRAY { + parseContext.float16OpaqueCheck($1.loc, "half float sampler", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.set(EbtFloat16, EsdCube, true); + } + | F16SAMPLERCUBEARRAYSHADOW { + parseContext.float16OpaqueCheck($1.loc, "half float sampler", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.set(EbtFloat16, EsdCube, true, true); + } + | ISAMPLER1D { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.set(EbtInt, Esd1D); + } + + | ISAMPLER2D { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.set(EbtInt, Esd2D); + } + | ISAMPLER3D { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.set(EbtInt, Esd3D); + } + | ISAMPLERCUBE { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.set(EbtInt, EsdCube); + } + | ISAMPLER2DARRAY { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.set(EbtInt, Esd2D, true); + } + | USAMPLER2D { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.set(EbtUint, Esd2D); + } + | USAMPLER3D { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.set(EbtUint, Esd3D); + } + | USAMPLERCUBE { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.set(EbtUint, EsdCube); + } + + | ISAMPLER1DARRAY { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.set(EbtInt, Esd1D, true); + } + | ISAMPLERCUBEARRAY { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.set(EbtInt, EsdCube, true); + } + | USAMPLER1D { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.set(EbtUint, Esd1D); + } + | USAMPLER1DARRAY { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.set(EbtUint, Esd1D, true); + } + | USAMPLERCUBEARRAY { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.set(EbtUint, EsdCube, true); + } + | TEXTURECUBEARRAY { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setTexture(EbtFloat, EsdCube, true); + } + | ITEXTURECUBEARRAY { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setTexture(EbtInt, EsdCube, true); + } + | UTEXTURECUBEARRAY { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setTexture(EbtUint, EsdCube, true); + } + + | USAMPLER2DARRAY { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.set(EbtUint, Esd2D, true); + } + | TEXTURE2D { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setTexture(EbtFloat, Esd2D); + } + | TEXTURE3D { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setTexture(EbtFloat, Esd3D); + } + | TEXTURE2DARRAY { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setTexture(EbtFloat, Esd2D, true); + } + | TEXTURECUBE { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setTexture(EbtFloat, EsdCube); + } + | ITEXTURE2D { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setTexture(EbtInt, Esd2D); + } + | ITEXTURE3D { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setTexture(EbtInt, Esd3D); + } + | ITEXTURECUBE { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setTexture(EbtInt, EsdCube); + } + | ITEXTURE2DARRAY { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setTexture(EbtInt, Esd2D, true); + } + | UTEXTURE2D { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setTexture(EbtUint, Esd2D); + } + | UTEXTURE3D { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setTexture(EbtUint, Esd3D); + } + | UTEXTURECUBE { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setTexture(EbtUint, EsdCube); + } + | UTEXTURE2DARRAY { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setTexture(EbtUint, Esd2D, true); + } + | SAMPLER { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setPureSampler(false); + } + | SAMPLERSHADOW { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setPureSampler(true); + } + + | SAMPLER2DRECT { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.set(EbtFloat, EsdRect); + } + | SAMPLER2DRECTSHADOW { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.set(EbtFloat, EsdRect, false, true); + } + | F16SAMPLER2DRECT { + parseContext.float16OpaqueCheck($1.loc, "half float sampler", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.set(EbtFloat16, EsdRect); + } + | F16SAMPLER2DRECTSHADOW { + parseContext.float16OpaqueCheck($1.loc, "half float sampler", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.set(EbtFloat16, EsdRect, false, true); + } + | ISAMPLER2DRECT { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.set(EbtInt, EsdRect); + } + | USAMPLER2DRECT { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.set(EbtUint, EsdRect); + } + | SAMPLERBUFFER { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.set(EbtFloat, EsdBuffer); + } + | F16SAMPLERBUFFER { + parseContext.float16OpaqueCheck($1.loc, "half float sampler", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.set(EbtFloat16, EsdBuffer); + } + | ISAMPLERBUFFER { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.set(EbtInt, EsdBuffer); + } + | USAMPLERBUFFER { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.set(EbtUint, EsdBuffer); + } + | SAMPLER2DMS { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.set(EbtFloat, Esd2D, false, false, true); + } + | F16SAMPLER2DMS { + parseContext.float16OpaqueCheck($1.loc, "half float sampler", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.set(EbtFloat16, Esd2D, false, false, true); + } + | ISAMPLER2DMS { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.set(EbtInt, Esd2D, false, false, true); + } + | USAMPLER2DMS { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.set(EbtUint, Esd2D, false, false, true); + } + | SAMPLER2DMSARRAY { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.set(EbtFloat, Esd2D, true, false, true); + } + | F16SAMPLER2DMSARRAY { + parseContext.float16OpaqueCheck($1.loc, "half float sampler", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.set(EbtFloat16, Esd2D, true, false, true); + } + | ISAMPLER2DMSARRAY { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.set(EbtInt, Esd2D, true, false, true); + } + | USAMPLER2DMSARRAY { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.set(EbtUint, Esd2D, true, false, true); + } + | TEXTURE1D { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setTexture(EbtFloat, Esd1D); + } + | F16TEXTURE1D { + parseContext.float16OpaqueCheck($1.loc, "half float texture", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setTexture(EbtFloat16, Esd1D); + } + | F16TEXTURE2D { + parseContext.float16OpaqueCheck($1.loc, "half float texture", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setTexture(EbtFloat16, Esd2D); + } + | F16TEXTURE3D { + parseContext.float16OpaqueCheck($1.loc, "half float texture", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setTexture(EbtFloat16, Esd3D); + } + | F16TEXTURECUBE { + parseContext.float16OpaqueCheck($1.loc, "half float texture", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setTexture(EbtFloat16, EsdCube); + } + | TEXTURE1DARRAY { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setTexture(EbtFloat, Esd1D, true); + } + | F16TEXTURE1DARRAY { + parseContext.float16OpaqueCheck($1.loc, "half float texture", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setTexture(EbtFloat16, Esd1D, true); + } + | F16TEXTURE2DARRAY { + parseContext.float16OpaqueCheck($1.loc, "half float texture", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setTexture(EbtFloat16, Esd2D, true); + } + | F16TEXTURECUBEARRAY { + parseContext.float16OpaqueCheck($1.loc, "half float texture", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setTexture(EbtFloat16, EsdCube, true); + } + | ITEXTURE1D { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setTexture(EbtInt, Esd1D); + } + | ITEXTURE1DARRAY { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setTexture(EbtInt, Esd1D, true); + } + | UTEXTURE1D { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setTexture(EbtUint, Esd1D); + } + | UTEXTURE1DARRAY { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setTexture(EbtUint, Esd1D, true); + } + | TEXTURE2DRECT { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setTexture(EbtFloat, EsdRect); + } + | F16TEXTURE2DRECT { + parseContext.float16OpaqueCheck($1.loc, "half float texture", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setTexture(EbtFloat16, EsdRect); + } + | ITEXTURE2DRECT { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setTexture(EbtInt, EsdRect); + } + | UTEXTURE2DRECT { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setTexture(EbtUint, EsdRect); + } + | TEXTUREBUFFER { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setTexture(EbtFloat, EsdBuffer); + } + | F16TEXTUREBUFFER { + parseContext.float16OpaqueCheck($1.loc, "half float texture", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setTexture(EbtFloat16, EsdBuffer); + } + | ITEXTUREBUFFER { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setTexture(EbtInt, EsdBuffer); + } + | UTEXTUREBUFFER { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setTexture(EbtUint, EsdBuffer); + } + | TEXTURE2DMS { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setTexture(EbtFloat, Esd2D, false, false, true); + } + | F16TEXTURE2DMS { + parseContext.float16OpaqueCheck($1.loc, "half float texture", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setTexture(EbtFloat16, Esd2D, false, false, true); + } + | ITEXTURE2DMS { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setTexture(EbtInt, Esd2D, false, false, true); + } + | UTEXTURE2DMS { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setTexture(EbtUint, Esd2D, false, false, true); + } + | TEXTURE2DMSARRAY { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setTexture(EbtFloat, Esd2D, true, false, true); + } + | F16TEXTURE2DMSARRAY { + parseContext.float16OpaqueCheck($1.loc, "half float texture", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setTexture(EbtFloat16, Esd2D, true, false, true); + } + | ITEXTURE2DMSARRAY { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setTexture(EbtInt, Esd2D, true, false, true); + } + | UTEXTURE2DMSARRAY { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setTexture(EbtUint, Esd2D, true, false, true); + } + | IMAGE1D { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setImage(EbtFloat, Esd1D); + } + | F16IMAGE1D { + parseContext.float16OpaqueCheck($1.loc, "half float image", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setImage(EbtFloat16, Esd1D); + } + | IIMAGE1D { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setImage(EbtInt, Esd1D); + } + | UIMAGE1D { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setImage(EbtUint, Esd1D); + } + | IMAGE2D { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setImage(EbtFloat, Esd2D); + } + | F16IMAGE2D { + parseContext.float16OpaqueCheck($1.loc, "half float image", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setImage(EbtFloat16, Esd2D); + } + | IIMAGE2D { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setImage(EbtInt, Esd2D); + } + | UIMAGE2D { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setImage(EbtUint, Esd2D); + } + | IMAGE3D { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setImage(EbtFloat, Esd3D); + } + | F16IMAGE3D { + parseContext.float16OpaqueCheck($1.loc, "half float image", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setImage(EbtFloat16, Esd3D); + } + | IIMAGE3D { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setImage(EbtInt, Esd3D); + } + | UIMAGE3D { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setImage(EbtUint, Esd3D); + } + | IMAGE2DRECT { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setImage(EbtFloat, EsdRect); + } + | F16IMAGE2DRECT { + parseContext.float16OpaqueCheck($1.loc, "half float image", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setImage(EbtFloat16, EsdRect); + } + | IIMAGE2DRECT { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setImage(EbtInt, EsdRect); + } + | UIMAGE2DRECT { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setImage(EbtUint, EsdRect); + } + | IMAGECUBE { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setImage(EbtFloat, EsdCube); + } + | F16IMAGECUBE { + parseContext.float16OpaqueCheck($1.loc, "half float image", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setImage(EbtFloat16, EsdCube); + } + | IIMAGECUBE { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setImage(EbtInt, EsdCube); + } + | UIMAGECUBE { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setImage(EbtUint, EsdCube); + } + | IMAGEBUFFER { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setImage(EbtFloat, EsdBuffer); + } + | F16IMAGEBUFFER { + parseContext.float16OpaqueCheck($1.loc, "half float image", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setImage(EbtFloat16, EsdBuffer); + } + | IIMAGEBUFFER { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setImage(EbtInt, EsdBuffer); + } + | UIMAGEBUFFER { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setImage(EbtUint, EsdBuffer); + } + | IMAGE1DARRAY { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setImage(EbtFloat, Esd1D, true); + } + | F16IMAGE1DARRAY { + parseContext.float16OpaqueCheck($1.loc, "half float image", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setImage(EbtFloat16, Esd1D, true); + } + | IIMAGE1DARRAY { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setImage(EbtInt, Esd1D, true); + } + | UIMAGE1DARRAY { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setImage(EbtUint, Esd1D, true); + } + | IMAGE2DARRAY { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setImage(EbtFloat, Esd2D, true); + } + | F16IMAGE2DARRAY { + parseContext.float16OpaqueCheck($1.loc, "half float image", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setImage(EbtFloat16, Esd2D, true); + } + | IIMAGE2DARRAY { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setImage(EbtInt, Esd2D, true); + } + | UIMAGE2DARRAY { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setImage(EbtUint, Esd2D, true); + } + | IMAGECUBEARRAY { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setImage(EbtFloat, EsdCube, true); + } + | F16IMAGECUBEARRAY { + parseContext.float16OpaqueCheck($1.loc, "half float image", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setImage(EbtFloat16, EsdCube, true); + } + | IIMAGECUBEARRAY { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setImage(EbtInt, EsdCube, true); + } + | UIMAGECUBEARRAY { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setImage(EbtUint, EsdCube, true); + } + | IMAGE2DMS { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setImage(EbtFloat, Esd2D, false, false, true); + } + | F16IMAGE2DMS { + parseContext.float16OpaqueCheck($1.loc, "half float image", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setImage(EbtFloat16, Esd2D, false, false, true); + } + | IIMAGE2DMS { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setImage(EbtInt, Esd2D, false, false, true); + } + | UIMAGE2DMS { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setImage(EbtUint, Esd2D, false, false, true); + } + | IMAGE2DMSARRAY { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setImage(EbtFloat, Esd2D, true, false, true); + } + | F16IMAGE2DMSARRAY { + parseContext.float16OpaqueCheck($1.loc, "half float image", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setImage(EbtFloat16, Esd2D, true, false, true); + } + | IIMAGE2DMSARRAY { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setImage(EbtInt, Esd2D, true, false, true); + } + | UIMAGE2DMSARRAY { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setImage(EbtUint, Esd2D, true, false, true); + } + | SAMPLEREXTERNALOES { // GL_OES_EGL_image_external + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.set(EbtFloat, Esd2D); + $$.sampler.external = true; + } + | SAMPLEREXTERNAL2DY2YEXT { // GL_EXT_YUV_target + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.set(EbtFloat, Esd2D); + $$.sampler.yuv = true; + } + | SUBPASSINPUT { + parseContext.requireStage($1.loc, EShLangFragment, "subpass input"); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setSubpass(EbtFloat); + } + | SUBPASSINPUTMS { + parseContext.requireStage($1.loc, EShLangFragment, "subpass input"); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setSubpass(EbtFloat, true); + } + | F16SUBPASSINPUT { + parseContext.float16OpaqueCheck($1.loc, "half float subpass input", parseContext.symbolTable.atBuiltInLevel()); + parseContext.requireStage($1.loc, EShLangFragment, "subpass input"); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setSubpass(EbtFloat16); + } + | F16SUBPASSINPUTMS { + parseContext.float16OpaqueCheck($1.loc, "half float subpass input", parseContext.symbolTable.atBuiltInLevel()); + parseContext.requireStage($1.loc, EShLangFragment, "subpass input"); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setSubpass(EbtFloat16, true); + } + | ISUBPASSINPUT { + parseContext.requireStage($1.loc, EShLangFragment, "subpass input"); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setSubpass(EbtInt); + } + | ISUBPASSINPUTMS { + parseContext.requireStage($1.loc, EShLangFragment, "subpass input"); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setSubpass(EbtInt, true); + } + | USUBPASSINPUT { + parseContext.requireStage($1.loc, EShLangFragment, "subpass input"); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setSubpass(EbtUint); + } + | USUBPASSINPUTMS { + parseContext.requireStage($1.loc, EShLangFragment, "subpass input"); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setSubpass(EbtUint, true); + } + | FCOOPMATNV { + parseContext.fcoopmatCheck($1.loc, "fcoopmatNV", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtFloat; + $$.coopmat = true; + } + | ICOOPMATNV { + parseContext.intcoopmatCheck($1.loc, "icoopmatNV", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtInt; + $$.coopmat = true; + } + | UCOOPMATNV { + parseContext.intcoopmatCheck($1.loc, "ucoopmatNV", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtUint; + $$.coopmat = true; + } + + | struct_specifier { + $$ = $1; + $$.qualifier.storage = parseContext.symbolTable.atGlobalLevel() ? EvqGlobal : EvqTemporary; + parseContext.structTypeCheck($$.loc, $$); + } + | TYPE_NAME { + // + // This is for user defined type names. The lexical phase looked up the + // type. + // + if (const TVariable* variable = ($1.symbol)->getAsVariable()) { + const TType& structure = variable->getType(); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtStruct; + $$.userDef = &structure; + } else + parseContext.error($1.loc, "expected type name", $1.string->c_str(), ""); + } + ; + +precision_qualifier + : HIGH_PRECISION { + parseContext.profileRequires($1.loc, ENoProfile, 130, 0, "highp precision qualifier"); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + parseContext.handlePrecisionQualifier($1.loc, $$.qualifier, EpqHigh); + } + | MEDIUM_PRECISION { + parseContext.profileRequires($1.loc, ENoProfile, 130, 0, "mediump precision qualifier"); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + parseContext.handlePrecisionQualifier($1.loc, $$.qualifier, EpqMedium); + } + | LOW_PRECISION { + parseContext.profileRequires($1.loc, ENoProfile, 130, 0, "lowp precision qualifier"); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + parseContext.handlePrecisionQualifier($1.loc, $$.qualifier, EpqLow); + } + ; + +struct_specifier + : STRUCT IDENTIFIER LEFT_BRACE { parseContext.nestedStructCheck($1.loc); } struct_declaration_list RIGHT_BRACE { + TType* structure = new TType($5, *$2.string); + parseContext.structArrayCheck($2.loc, *structure); + TVariable* userTypeDef = new TVariable($2.string, *structure, true); + if (! parseContext.symbolTable.insert(*userTypeDef)) + parseContext.error($2.loc, "redefinition", $2.string->c_str(), "struct"); + $$.init($1.loc); + $$.basicType = EbtStruct; + $$.userDef = structure; + --parseContext.structNestingLevel; + } + | STRUCT LEFT_BRACE { parseContext.nestedStructCheck($1.loc); } struct_declaration_list RIGHT_BRACE { + TType* structure = new TType($4, TString("")); + $$.init($1.loc); + $$.basicType = EbtStruct; + $$.userDef = structure; + --parseContext.structNestingLevel; + } + ; + +struct_declaration_list + : struct_declaration { + $$ = $1; + } + | struct_declaration_list struct_declaration { + $$ = $1; + for (unsigned int i = 0; i < $2->size(); ++i) { + for (unsigned int j = 0; j < $$->size(); ++j) { + if ((*$$)[j].type->getFieldName() == (*$2)[i].type->getFieldName()) + parseContext.error((*$2)[i].loc, "duplicate member name:", "", (*$2)[i].type->getFieldName().c_str()); + } + $$->push_back((*$2)[i]); + } + } + ; + +struct_declaration + : type_specifier struct_declarator_list SEMICOLON { + if ($1.arraySizes) { + parseContext.profileRequires($1.loc, ENoProfile, 120, E_GL_3DL_array_objects, "arrayed type"); + parseContext.profileRequires($1.loc, EEsProfile, 300, 0, "arrayed type"); + if (parseContext.isEsProfile()) + parseContext.arraySizeRequiredCheck($1.loc, *$1.arraySizes); + } + + $$ = $2; + + parseContext.voidErrorCheck($1.loc, (*$2)[0].type->getFieldName(), $1.basicType); + parseContext.precisionQualifierCheck($1.loc, $1.basicType, $1.qualifier); + + for (unsigned int i = 0; i < $$->size(); ++i) { + TType type($1); + type.setFieldName((*$$)[i].type->getFieldName()); + type.transferArraySizes((*$$)[i].type->getArraySizes()); + type.copyArrayInnerSizes($1.arraySizes); + parseContext.arrayOfArrayVersionCheck((*$$)[i].loc, type.getArraySizes()); + (*$$)[i].type->shallowCopy(type); + } + } + | type_qualifier type_specifier struct_declarator_list SEMICOLON { + if ($2.arraySizes) { + parseContext.profileRequires($2.loc, ENoProfile, 120, E_GL_3DL_array_objects, "arrayed type"); + parseContext.profileRequires($2.loc, EEsProfile, 300, 0, "arrayed type"); + if (parseContext.isEsProfile()) + parseContext.arraySizeRequiredCheck($2.loc, *$2.arraySizes); + } + + $$ = $3; + + parseContext.memberQualifierCheck($1); + parseContext.voidErrorCheck($2.loc, (*$3)[0].type->getFieldName(), $2.basicType); + parseContext.mergeQualifiers($2.loc, $2.qualifier, $1.qualifier, true); + parseContext.precisionQualifierCheck($2.loc, $2.basicType, $2.qualifier); + + for (unsigned int i = 0; i < $$->size(); ++i) { + TType type($2); + type.setFieldName((*$$)[i].type->getFieldName()); + type.transferArraySizes((*$$)[i].type->getArraySizes()); + type.copyArrayInnerSizes($2.arraySizes); + parseContext.arrayOfArrayVersionCheck((*$$)[i].loc, type.getArraySizes()); + (*$$)[i].type->shallowCopy(type); + } + } + ; + +struct_declarator_list + : struct_declarator { + $$ = new TTypeList; + $$->push_back($1); + } + | struct_declarator_list COMMA struct_declarator { + $$->push_back($3); + } + ; + +struct_declarator + : IDENTIFIER { + $$.type = new TType(EbtVoid); + $$.loc = $1.loc; + $$.type->setFieldName(*$1.string); + } + | IDENTIFIER array_specifier { + parseContext.arrayOfArrayVersionCheck($1.loc, $2.arraySizes); + + $$.type = new TType(EbtVoid); + $$.loc = $1.loc; + $$.type->setFieldName(*$1.string); + $$.type->transferArraySizes($2.arraySizes); + } + ; + +initializer + : assignment_expression { + $$ = $1; + } + + | LEFT_BRACE initializer_list RIGHT_BRACE { + const char* initFeature = "{ } style initializers"; + parseContext.requireProfile($1.loc, ~EEsProfile, initFeature); + parseContext.profileRequires($1.loc, ~EEsProfile, 420, E_GL_ARB_shading_language_420pack, initFeature); + $$ = $2; + } + | LEFT_BRACE initializer_list COMMA RIGHT_BRACE { + const char* initFeature = "{ } style initializers"; + parseContext.requireProfile($1.loc, ~EEsProfile, initFeature); + parseContext.profileRequires($1.loc, ~EEsProfile, 420, E_GL_ARB_shading_language_420pack, initFeature); + $$ = $2; + } + + ; + + +initializer_list + : initializer { + $$ = parseContext.intermediate.growAggregate(0, $1, $1->getLoc()); + } + | initializer_list COMMA initializer { + $$ = parseContext.intermediate.growAggregate($1, $3); + } + ; + + +declaration_statement + : declaration { $$ = $1; } + ; + +statement + : compound_statement { $$ = $1; } + | simple_statement { $$ = $1; } + ; + +// Grammar Note: labeled statements for switch statements only; 'goto' is not supported. + +simple_statement + : declaration_statement { $$ = $1; } + | expression_statement { $$ = $1; } + | selection_statement { $$ = $1; } + | switch_statement { $$ = $1; } + | case_label { $$ = $1; } + | iteration_statement { $$ = $1; } + | jump_statement { $$ = $1; } + + | demote_statement { $$ = $1; } + + ; + + +demote_statement + : DEMOTE SEMICOLON { + parseContext.requireStage($1.loc, EShLangFragment, "demote"); + parseContext.requireExtensions($1.loc, 1, &E_GL_EXT_demote_to_helper_invocation, "demote"); + $$ = parseContext.intermediate.addBranch(EOpDemote, $1.loc); + } + ; + + +compound_statement + : LEFT_BRACE RIGHT_BRACE { $$ = 0; } + | LEFT_BRACE { + parseContext.symbolTable.push(); + ++parseContext.statementNestingLevel; + } + statement_list { + parseContext.symbolTable.pop(&parseContext.defaultPrecision[0]); + --parseContext.statementNestingLevel; + } + RIGHT_BRACE { + if ($3 && $3->getAsAggregate()) + $3->getAsAggregate()->setOperator(EOpSequence); + $$ = $3; + } + ; + +statement_no_new_scope + : compound_statement_no_new_scope { $$ = $1; } + | simple_statement { $$ = $1; } + ; + +statement_scoped + : { + ++parseContext.controlFlowNestingLevel; + } + compound_statement { + --parseContext.controlFlowNestingLevel; + $$ = $2; + } + | { + parseContext.symbolTable.push(); + ++parseContext.statementNestingLevel; + ++parseContext.controlFlowNestingLevel; + } + simple_statement { + parseContext.symbolTable.pop(&parseContext.defaultPrecision[0]); + --parseContext.statementNestingLevel; + --parseContext.controlFlowNestingLevel; + $$ = $2; + } + +compound_statement_no_new_scope + // Statement that doesn't create a new scope, for selection_statement, iteration_statement + : LEFT_BRACE RIGHT_BRACE { + $$ = 0; + } + | LEFT_BRACE statement_list RIGHT_BRACE { + if ($2 && $2->getAsAggregate()) + $2->getAsAggregate()->setOperator(EOpSequence); + $$ = $2; + } + ; + +statement_list + : statement { + $$ = parseContext.intermediate.makeAggregate($1); + if ($1 && $1->getAsBranchNode() && ($1->getAsBranchNode()->getFlowOp() == EOpCase || + $1->getAsBranchNode()->getFlowOp() == EOpDefault)) { + parseContext.wrapupSwitchSubsequence(0, $1); + $$ = 0; // start a fresh subsequence for what's after this case + } + } + | statement_list statement { + if ($2 && $2->getAsBranchNode() && ($2->getAsBranchNode()->getFlowOp() == EOpCase || + $2->getAsBranchNode()->getFlowOp() == EOpDefault)) { + parseContext.wrapupSwitchSubsequence($1 ? $1->getAsAggregate() : 0, $2); + $$ = 0; // start a fresh subsequence for what's after this case + } else + $$ = parseContext.intermediate.growAggregate($1, $2); + } + ; + +expression_statement + : SEMICOLON { $$ = 0; } + | expression SEMICOLON { $$ = static_cast($1); } + ; + +selection_statement + : selection_statement_nonattributed { + $$ = $1; + } + + | attribute selection_statement_nonattributed { + parseContext.handleSelectionAttributes(*$1, $2); + $$ = $2; + } + + +selection_statement_nonattributed + : IF LEFT_PAREN expression RIGHT_PAREN selection_rest_statement { + parseContext.boolCheck($1.loc, $3); + $$ = parseContext.intermediate.addSelection($3, $5, $1.loc); + } + ; + +selection_rest_statement + : statement_scoped ELSE statement_scoped { + $$.node1 = $1; + $$.node2 = $3; + } + | statement_scoped { + $$.node1 = $1; + $$.node2 = 0; + } + ; + +condition + // In 1996 c++ draft, conditions can include single declarations + : expression { + $$ = $1; + parseContext.boolCheck($1->getLoc(), $1); + } + | fully_specified_type IDENTIFIER EQUAL initializer { + parseContext.boolCheck($2.loc, $1); + + TType type($1); + TIntermNode* initNode = parseContext.declareVariable($2.loc, *$2.string, $1, 0, $4); + if (initNode) + $$ = initNode->getAsTyped(); + else + $$ = 0; + } + ; + +switch_statement + : switch_statement_nonattributed { + $$ = $1; + } + + | attribute switch_statement_nonattributed { + parseContext.handleSwitchAttributes(*$1, $2); + $$ = $2; + } + + +switch_statement_nonattributed + : SWITCH LEFT_PAREN expression RIGHT_PAREN { + // start new switch sequence on the switch stack + ++parseContext.controlFlowNestingLevel; + ++parseContext.statementNestingLevel; + parseContext.switchSequenceStack.push_back(new TIntermSequence); + parseContext.switchLevel.push_back(parseContext.statementNestingLevel); + parseContext.symbolTable.push(); + } + LEFT_BRACE switch_statement_list RIGHT_BRACE { + $$ = parseContext.addSwitch($1.loc, $3, $7 ? $7->getAsAggregate() : 0); + delete parseContext.switchSequenceStack.back(); + parseContext.switchSequenceStack.pop_back(); + parseContext.switchLevel.pop_back(); + parseContext.symbolTable.pop(&parseContext.defaultPrecision[0]); + --parseContext.statementNestingLevel; + --parseContext.controlFlowNestingLevel; + } + ; + +switch_statement_list + : /* nothing */ { + $$ = 0; + } + | statement_list { + $$ = $1; + } + ; + +case_label + : CASE expression COLON { + $$ = 0; + if (parseContext.switchLevel.size() == 0) + parseContext.error($1.loc, "cannot appear outside switch statement", "case", ""); + else if (parseContext.switchLevel.back() != parseContext.statementNestingLevel) + parseContext.error($1.loc, "cannot be nested inside control flow", "case", ""); + else { + parseContext.constantValueCheck($2, "case"); + parseContext.integerCheck($2, "case"); + $$ = parseContext.intermediate.addBranch(EOpCase, $2, $1.loc); + } + } + | DEFAULT COLON { + $$ = 0; + if (parseContext.switchLevel.size() == 0) + parseContext.error($1.loc, "cannot appear outside switch statement", "default", ""); + else if (parseContext.switchLevel.back() != parseContext.statementNestingLevel) + parseContext.error($1.loc, "cannot be nested inside control flow", "default", ""); + else + $$ = parseContext.intermediate.addBranch(EOpDefault, $1.loc); + } + ; + +iteration_statement + : iteration_statement_nonattributed { + $$ = $1; + } + + | attribute iteration_statement_nonattributed { + parseContext.handleLoopAttributes(*$1, $2); + $$ = $2; + } + + +iteration_statement_nonattributed + : WHILE LEFT_PAREN { + if (! parseContext.limits.whileLoops) + parseContext.error($1.loc, "while loops not available", "limitation", ""); + parseContext.symbolTable.push(); + ++parseContext.loopNestingLevel; + ++parseContext.statementNestingLevel; + ++parseContext.controlFlowNestingLevel; + } + condition RIGHT_PAREN statement_no_new_scope { + parseContext.symbolTable.pop(&parseContext.defaultPrecision[0]); + $$ = parseContext.intermediate.addLoop($6, $4, 0, true, $1.loc); + --parseContext.loopNestingLevel; + --parseContext.statementNestingLevel; + --parseContext.controlFlowNestingLevel; + } + | DO { + ++parseContext.loopNestingLevel; + ++parseContext.statementNestingLevel; + ++parseContext.controlFlowNestingLevel; + } + statement WHILE LEFT_PAREN expression RIGHT_PAREN SEMICOLON { + if (! parseContext.limits.whileLoops) + parseContext.error($1.loc, "do-while loops not available", "limitation", ""); + + parseContext.boolCheck($8.loc, $6); + + $$ = parseContext.intermediate.addLoop($3, $6, 0, false, $4.loc); + --parseContext.loopNestingLevel; + --parseContext.statementNestingLevel; + --parseContext.controlFlowNestingLevel; + } + | FOR LEFT_PAREN { + parseContext.symbolTable.push(); + ++parseContext.loopNestingLevel; + ++parseContext.statementNestingLevel; + ++parseContext.controlFlowNestingLevel; + } + for_init_statement for_rest_statement RIGHT_PAREN statement_no_new_scope { + parseContext.symbolTable.pop(&parseContext.defaultPrecision[0]); + $$ = parseContext.intermediate.makeAggregate($4, $2.loc); + TIntermLoop* forLoop = parseContext.intermediate.addLoop($7, reinterpret_cast($5.node1), reinterpret_cast($5.node2), true, $1.loc); + if (! parseContext.limits.nonInductiveForLoops) + parseContext.inductiveLoopCheck($1.loc, $4, forLoop); + $$ = parseContext.intermediate.growAggregate($$, forLoop, $1.loc); + $$->getAsAggregate()->setOperator(EOpSequence); + --parseContext.loopNestingLevel; + --parseContext.statementNestingLevel; + --parseContext.controlFlowNestingLevel; + } + ; + +for_init_statement + : expression_statement { + $$ = $1; + } + | declaration_statement { + $$ = $1; + } + ; + +conditionopt + : condition { + $$ = $1; + } + | /* May be null */ { + $$ = 0; + } + ; + +for_rest_statement + : conditionopt SEMICOLON { + $$.node1 = $1; + $$.node2 = 0; + } + | conditionopt SEMICOLON expression { + $$.node1 = $1; + $$.node2 = $3; + } + ; + +jump_statement + : CONTINUE SEMICOLON { + if (parseContext.loopNestingLevel <= 0) + parseContext.error($1.loc, "continue statement only allowed in loops", "", ""); + $$ = parseContext.intermediate.addBranch(EOpContinue, $1.loc); + } + | BREAK SEMICOLON { + if (parseContext.loopNestingLevel + parseContext.switchSequenceStack.size() <= 0) + parseContext.error($1.loc, "break statement only allowed in switch and loops", "", ""); + $$ = parseContext.intermediate.addBranch(EOpBreak, $1.loc); + } + | RETURN SEMICOLON { + $$ = parseContext.intermediate.addBranch(EOpReturn, $1.loc); + if (parseContext.currentFunctionType->getBasicType() != EbtVoid) + parseContext.error($1.loc, "non-void function must return a value", "return", ""); + if (parseContext.inMain) + parseContext.postEntryPointReturn = true; + } + | RETURN expression SEMICOLON { + $$ = parseContext.handleReturnValue($1.loc, $2); + } + | DISCARD SEMICOLON { + parseContext.requireStage($1.loc, EShLangFragment, "discard"); + $$ = parseContext.intermediate.addBranch(EOpKill, $1.loc); + } + ; + +// Grammar Note: No 'goto'. Gotos are not supported. + +translation_unit + : external_declaration { + $$ = $1; + parseContext.intermediate.setTreeRoot($$); + } + | translation_unit external_declaration { + if ($2 != nullptr) { + $$ = parseContext.intermediate.growAggregate($1, $2); + parseContext.intermediate.setTreeRoot($$); + } + } + ; + +external_declaration + : function_definition { + $$ = $1; + } + | declaration { + $$ = $1; + } + + | SEMICOLON { + parseContext.requireProfile($1.loc, ~EEsProfile, "extraneous semicolon"); + parseContext.profileRequires($1.loc, ~EEsProfile, 460, nullptr, "extraneous semicolon"); + $$ = nullptr; + } + + ; + +function_definition + : function_prototype { + $1.function = parseContext.handleFunctionDeclarator($1.loc, *$1.function, false /* not prototype */); + $1.intermNode = parseContext.handleFunctionDefinition($1.loc, *$1.function); + } + compound_statement_no_new_scope { + // May be best done as post process phase on intermediate code + if (parseContext.currentFunctionType->getBasicType() != EbtVoid && ! parseContext.functionReturnsValue) + parseContext.error($1.loc, "function does not return a value:", "", $1.function->getName().c_str()); + parseContext.symbolTable.pop(&parseContext.defaultPrecision[0]); + $$ = parseContext.intermediate.growAggregate($1.intermNode, $3); + parseContext.intermediate.setAggregateOperator($$, EOpFunction, $1.function->getType(), $1.loc); + $$->getAsAggregate()->setName($1.function->getMangledName().c_str()); + + // store the pragma information for debug and optimize and other vendor specific + // information. This information can be queried from the parse tree + $$->getAsAggregate()->setOptimize(parseContext.contextPragma.optimize); + $$->getAsAggregate()->setDebug(parseContext.contextPragma.debug); + $$->getAsAggregate()->setPragmaTable(parseContext.contextPragma.pragmaTable); + } + ; + + +attribute + : LEFT_BRACKET LEFT_BRACKET attribute_list RIGHT_BRACKET RIGHT_BRACKET { + $$ = $3; + parseContext.requireExtensions($1.loc, 1, &E_GL_EXT_control_flow_attributes, "attribute"); + } + +attribute_list + : single_attribute { + $$ = $1; + } + | attribute_list COMMA single_attribute { + $$ = parseContext.mergeAttributes($1, $3); + } + +single_attribute + : IDENTIFIER { + $$ = parseContext.makeAttributes(*$1.string); + } + | IDENTIFIER LEFT_PAREN constant_expression RIGHT_PAREN { + $$ = parseContext.makeAttributes(*$1.string, $3); + } + + +%% + diff --git a/dep/glslang/glslang/MachineIndependent/glslang_tab.cpp b/dep/glslang/glslang/MachineIndependent/glslang_tab.cpp new file mode 100644 index 000000000..69f7f8e0b --- /dev/null +++ b/dep/glslang/glslang/MachineIndependent/glslang_tab.cpp @@ -0,0 +1,10597 @@ +/* A Bison parser, made by GNU Bison 3.0.4. */ + +/* Bison implementation for Yacc-like parsers in C + + Copyright (C) 1984, 1989-1990, 2000-2015 Free Software Foundation, Inc. + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . */ + +/* As a special exception, you may create a larger work that contains + part or all of the Bison parser skeleton and distribute that work + under terms of your choice, so long as that work isn't itself a + parser generator using the skeleton or a modified version thereof + as a parser skeleton. Alternatively, if you modify or redistribute + the parser skeleton itself, you may (at your option) remove this + special exception, which will cause the skeleton and the resulting + Bison output files to be licensed under the GNU General Public + License without this special exception. + + This special exception was added by the Free Software Foundation in + version 2.2 of Bison. */ + +/* C LALR(1) parser skeleton written by Richard Stallman, by + simplifying the original so-called "semantic" parser. */ + +/* All symbols defined below should begin with yy or YY, to avoid + infringing on user name space. This should be done even for local + variables, as they might otherwise be expanded by user macros. + There are some unavoidable exceptions within include files to + define necessary library symbols; they are noted "INFRINGES ON + USER NAME SPACE" below. */ + +/* Identify Bison output. */ +#define YYBISON 1 + +/* Bison version. */ +#define YYBISON_VERSION "3.0.4" + +/* Skeleton name. */ +#define YYSKELETON_NAME "yacc.c" + +/* Pure parsers. */ +#define YYPURE 1 + +/* Push parsers. */ +#define YYPUSH 0 + +/* Pull parsers. */ +#define YYPULL 1 + + + + +/* Copy the first part of user declarations. */ +#line 69 "glslang.y" /* yacc.c:339 */ + + +/* Based on: +ANSI C Yacc grammar + +In 1985, Jeff Lee published his Yacc grammar (which is accompanied by a +matching Lex specification) for the April 30, 1985 draft version of the +ANSI C standard. Tom Stockfisch reposted it to net.sources in 1987; that +original, as mentioned in the answer to question 17.25 of the comp.lang.c +FAQ, can be ftp'ed from ftp.uu.net, file usenet/net.sources/ansi.c.grammar.Z. + +I intend to keep this version as close to the current C Standard grammar as +possible; please let me know if you discover discrepancies. + +Jutta Degener, 1995 +*/ + +#include "SymbolTable.h" +#include "ParseHelper.h" +#include "../Public/ShaderLang.h" +#include "attribute.h" + +using namespace glslang; + + +#line 92 "glslang_tab.cpp" /* yacc.c:339 */ + +# ifndef YY_NULLPTR +# if defined __cplusplus && 201103L <= __cplusplus +# define YY_NULLPTR nullptr +# else +# define YY_NULLPTR 0 +# endif +# endif + +/* Enabling verbose error messages. */ +#ifdef YYERROR_VERBOSE +# undef YYERROR_VERBOSE +# define YYERROR_VERBOSE 1 +#else +# define YYERROR_VERBOSE 1 +#endif + +/* In a future release of Bison, this section will be replaced + by #include "glslang_tab.cpp.h". */ +#ifndef YY_YY_GLSLANG_TAB_CPP_H_INCLUDED +# define YY_YY_GLSLANG_TAB_CPP_H_INCLUDED +/* Debug traces. */ +#ifndef YYDEBUG +# define YYDEBUG 1 +#endif +#if YYDEBUG +extern int yydebug; +#endif + +/* Token type. */ +#ifndef YYTOKENTYPE +# define YYTOKENTYPE + enum yytokentype + { + CONST = 258, + BOOL = 259, + INT = 260, + UINT = 261, + FLOAT = 262, + BVEC2 = 263, + BVEC3 = 264, + BVEC4 = 265, + IVEC2 = 266, + IVEC3 = 267, + IVEC4 = 268, + UVEC2 = 269, + UVEC3 = 270, + UVEC4 = 271, + VEC2 = 272, + VEC3 = 273, + VEC4 = 274, + MAT2 = 275, + MAT3 = 276, + MAT4 = 277, + MAT2X2 = 278, + MAT2X3 = 279, + MAT2X4 = 280, + MAT3X2 = 281, + MAT3X3 = 282, + MAT3X4 = 283, + MAT4X2 = 284, + MAT4X3 = 285, + MAT4X4 = 286, + SAMPLER2D = 287, + SAMPLER3D = 288, + SAMPLERCUBE = 289, + SAMPLER2DSHADOW = 290, + SAMPLERCUBESHADOW = 291, + SAMPLER2DARRAY = 292, + SAMPLER2DARRAYSHADOW = 293, + ISAMPLER2D = 294, + ISAMPLER3D = 295, + ISAMPLERCUBE = 296, + ISAMPLER2DARRAY = 297, + USAMPLER2D = 298, + USAMPLER3D = 299, + USAMPLERCUBE = 300, + USAMPLER2DARRAY = 301, + SAMPLER = 302, + SAMPLERSHADOW = 303, + TEXTURE2D = 304, + TEXTURE3D = 305, + TEXTURECUBE = 306, + TEXTURE2DARRAY = 307, + ITEXTURE2D = 308, + ITEXTURE3D = 309, + ITEXTURECUBE = 310, + ITEXTURE2DARRAY = 311, + UTEXTURE2D = 312, + UTEXTURE3D = 313, + UTEXTURECUBE = 314, + UTEXTURE2DARRAY = 315, + ATTRIBUTE = 316, + VARYING = 317, + FLOAT16_T = 318, + FLOAT32_T = 319, + DOUBLE = 320, + FLOAT64_T = 321, + INT64_T = 322, + UINT64_T = 323, + INT32_T = 324, + UINT32_T = 325, + INT16_T = 326, + UINT16_T = 327, + INT8_T = 328, + UINT8_T = 329, + I64VEC2 = 330, + I64VEC3 = 331, + I64VEC4 = 332, + U64VEC2 = 333, + U64VEC3 = 334, + U64VEC4 = 335, + I32VEC2 = 336, + I32VEC3 = 337, + I32VEC4 = 338, + U32VEC2 = 339, + U32VEC3 = 340, + U32VEC4 = 341, + I16VEC2 = 342, + I16VEC3 = 343, + I16VEC4 = 344, + U16VEC2 = 345, + U16VEC3 = 346, + U16VEC4 = 347, + I8VEC2 = 348, + I8VEC3 = 349, + I8VEC4 = 350, + U8VEC2 = 351, + U8VEC3 = 352, + U8VEC4 = 353, + DVEC2 = 354, + DVEC3 = 355, + DVEC4 = 356, + DMAT2 = 357, + DMAT3 = 358, + DMAT4 = 359, + F16VEC2 = 360, + F16VEC3 = 361, + F16VEC4 = 362, + F16MAT2 = 363, + F16MAT3 = 364, + F16MAT4 = 365, + F32VEC2 = 366, + F32VEC3 = 367, + F32VEC4 = 368, + F32MAT2 = 369, + F32MAT3 = 370, + F32MAT4 = 371, + F64VEC2 = 372, + F64VEC3 = 373, + F64VEC4 = 374, + F64MAT2 = 375, + F64MAT3 = 376, + F64MAT4 = 377, + DMAT2X2 = 378, + DMAT2X3 = 379, + DMAT2X4 = 380, + DMAT3X2 = 381, + DMAT3X3 = 382, + DMAT3X4 = 383, + DMAT4X2 = 384, + DMAT4X3 = 385, + DMAT4X4 = 386, + F16MAT2X2 = 387, + F16MAT2X3 = 388, + F16MAT2X4 = 389, + F16MAT3X2 = 390, + F16MAT3X3 = 391, + F16MAT3X4 = 392, + F16MAT4X2 = 393, + F16MAT4X3 = 394, + F16MAT4X4 = 395, + F32MAT2X2 = 396, + F32MAT2X3 = 397, + F32MAT2X4 = 398, + F32MAT3X2 = 399, + F32MAT3X3 = 400, + F32MAT3X4 = 401, + F32MAT4X2 = 402, + F32MAT4X3 = 403, + F32MAT4X4 = 404, + F64MAT2X2 = 405, + F64MAT2X3 = 406, + F64MAT2X4 = 407, + F64MAT3X2 = 408, + F64MAT3X3 = 409, + F64MAT3X4 = 410, + F64MAT4X2 = 411, + F64MAT4X3 = 412, + F64MAT4X4 = 413, + ATOMIC_UINT = 414, + ACCSTRUCTNV = 415, + ACCSTRUCTEXT = 416, + RAYQUERYEXT = 417, + FCOOPMATNV = 418, + ICOOPMATNV = 419, + UCOOPMATNV = 420, + SAMPLERCUBEARRAY = 421, + SAMPLERCUBEARRAYSHADOW = 422, + ISAMPLERCUBEARRAY = 423, + USAMPLERCUBEARRAY = 424, + SAMPLER1D = 425, + SAMPLER1DARRAY = 426, + SAMPLER1DARRAYSHADOW = 427, + ISAMPLER1D = 428, + SAMPLER1DSHADOW = 429, + SAMPLER2DRECT = 430, + SAMPLER2DRECTSHADOW = 431, + ISAMPLER2DRECT = 432, + USAMPLER2DRECT = 433, + SAMPLERBUFFER = 434, + ISAMPLERBUFFER = 435, + USAMPLERBUFFER = 436, + SAMPLER2DMS = 437, + ISAMPLER2DMS = 438, + USAMPLER2DMS = 439, + SAMPLER2DMSARRAY = 440, + ISAMPLER2DMSARRAY = 441, + USAMPLER2DMSARRAY = 442, + SAMPLEREXTERNALOES = 443, + SAMPLEREXTERNAL2DY2YEXT = 444, + ISAMPLER1DARRAY = 445, + USAMPLER1D = 446, + USAMPLER1DARRAY = 447, + F16SAMPLER1D = 448, + F16SAMPLER2D = 449, + F16SAMPLER3D = 450, + F16SAMPLER2DRECT = 451, + F16SAMPLERCUBE = 452, + F16SAMPLER1DARRAY = 453, + F16SAMPLER2DARRAY = 454, + F16SAMPLERCUBEARRAY = 455, + F16SAMPLERBUFFER = 456, + F16SAMPLER2DMS = 457, + F16SAMPLER2DMSARRAY = 458, + F16SAMPLER1DSHADOW = 459, + F16SAMPLER2DSHADOW = 460, + F16SAMPLER1DARRAYSHADOW = 461, + F16SAMPLER2DARRAYSHADOW = 462, + F16SAMPLER2DRECTSHADOW = 463, + F16SAMPLERCUBESHADOW = 464, + F16SAMPLERCUBEARRAYSHADOW = 465, + IMAGE1D = 466, + IIMAGE1D = 467, + UIMAGE1D = 468, + IMAGE2D = 469, + IIMAGE2D = 470, + UIMAGE2D = 471, + IMAGE3D = 472, + IIMAGE3D = 473, + UIMAGE3D = 474, + IMAGE2DRECT = 475, + IIMAGE2DRECT = 476, + UIMAGE2DRECT = 477, + IMAGECUBE = 478, + IIMAGECUBE = 479, + UIMAGECUBE = 480, + IMAGEBUFFER = 481, + IIMAGEBUFFER = 482, + UIMAGEBUFFER = 483, + IMAGE1DARRAY = 484, + IIMAGE1DARRAY = 485, + UIMAGE1DARRAY = 486, + IMAGE2DARRAY = 487, + IIMAGE2DARRAY = 488, + UIMAGE2DARRAY = 489, + IMAGECUBEARRAY = 490, + IIMAGECUBEARRAY = 491, + UIMAGECUBEARRAY = 492, + IMAGE2DMS = 493, + IIMAGE2DMS = 494, + UIMAGE2DMS = 495, + IMAGE2DMSARRAY = 496, + IIMAGE2DMSARRAY = 497, + UIMAGE2DMSARRAY = 498, + F16IMAGE1D = 499, + F16IMAGE2D = 500, + F16IMAGE3D = 501, + F16IMAGE2DRECT = 502, + F16IMAGECUBE = 503, + F16IMAGE1DARRAY = 504, + F16IMAGE2DARRAY = 505, + F16IMAGECUBEARRAY = 506, + F16IMAGEBUFFER = 507, + F16IMAGE2DMS = 508, + F16IMAGE2DMSARRAY = 509, + TEXTURECUBEARRAY = 510, + ITEXTURECUBEARRAY = 511, + UTEXTURECUBEARRAY = 512, + TEXTURE1D = 513, + ITEXTURE1D = 514, + UTEXTURE1D = 515, + TEXTURE1DARRAY = 516, + ITEXTURE1DARRAY = 517, + UTEXTURE1DARRAY = 518, + TEXTURE2DRECT = 519, + ITEXTURE2DRECT = 520, + UTEXTURE2DRECT = 521, + TEXTUREBUFFER = 522, + ITEXTUREBUFFER = 523, + UTEXTUREBUFFER = 524, + TEXTURE2DMS = 525, + ITEXTURE2DMS = 526, + UTEXTURE2DMS = 527, + TEXTURE2DMSARRAY = 528, + ITEXTURE2DMSARRAY = 529, + UTEXTURE2DMSARRAY = 530, + F16TEXTURE1D = 531, + F16TEXTURE2D = 532, + F16TEXTURE3D = 533, + F16TEXTURE2DRECT = 534, + F16TEXTURECUBE = 535, + F16TEXTURE1DARRAY = 536, + F16TEXTURE2DARRAY = 537, + F16TEXTURECUBEARRAY = 538, + F16TEXTUREBUFFER = 539, + F16TEXTURE2DMS = 540, + F16TEXTURE2DMSARRAY = 541, + SUBPASSINPUT = 542, + SUBPASSINPUTMS = 543, + ISUBPASSINPUT = 544, + ISUBPASSINPUTMS = 545, + USUBPASSINPUT = 546, + USUBPASSINPUTMS = 547, + F16SUBPASSINPUT = 548, + F16SUBPASSINPUTMS = 549, + LEFT_OP = 550, + RIGHT_OP = 551, + INC_OP = 552, + DEC_OP = 553, + LE_OP = 554, + GE_OP = 555, + EQ_OP = 556, + NE_OP = 557, + AND_OP = 558, + OR_OP = 559, + XOR_OP = 560, + MUL_ASSIGN = 561, + DIV_ASSIGN = 562, + ADD_ASSIGN = 563, + MOD_ASSIGN = 564, + LEFT_ASSIGN = 565, + RIGHT_ASSIGN = 566, + AND_ASSIGN = 567, + XOR_ASSIGN = 568, + OR_ASSIGN = 569, + SUB_ASSIGN = 570, + STRING_LITERAL = 571, + LEFT_PAREN = 572, + RIGHT_PAREN = 573, + LEFT_BRACKET = 574, + RIGHT_BRACKET = 575, + LEFT_BRACE = 576, + RIGHT_BRACE = 577, + DOT = 578, + COMMA = 579, + COLON = 580, + EQUAL = 581, + SEMICOLON = 582, + BANG = 583, + DASH = 584, + TILDE = 585, + PLUS = 586, + STAR = 587, + SLASH = 588, + PERCENT = 589, + LEFT_ANGLE = 590, + RIGHT_ANGLE = 591, + VERTICAL_BAR = 592, + CARET = 593, + AMPERSAND = 594, + QUESTION = 595, + INVARIANT = 596, + HIGH_PRECISION = 597, + MEDIUM_PRECISION = 598, + LOW_PRECISION = 599, + PRECISION = 600, + PACKED = 601, + RESOURCE = 602, + SUPERP = 603, + FLOATCONSTANT = 604, + INTCONSTANT = 605, + UINTCONSTANT = 606, + BOOLCONSTANT = 607, + IDENTIFIER = 608, + TYPE_NAME = 609, + CENTROID = 610, + IN = 611, + OUT = 612, + INOUT = 613, + STRUCT = 614, + VOID = 615, + WHILE = 616, + BREAK = 617, + CONTINUE = 618, + DO = 619, + ELSE = 620, + FOR = 621, + IF = 622, + DISCARD = 623, + RETURN = 624, + SWITCH = 625, + CASE = 626, + DEFAULT = 627, + UNIFORM = 628, + SHARED = 629, + BUFFER = 630, + FLAT = 631, + SMOOTH = 632, + LAYOUT = 633, + DOUBLECONSTANT = 634, + INT16CONSTANT = 635, + UINT16CONSTANT = 636, + FLOAT16CONSTANT = 637, + INT32CONSTANT = 638, + UINT32CONSTANT = 639, + INT64CONSTANT = 640, + UINT64CONSTANT = 641, + SUBROUTINE = 642, + DEMOTE = 643, + PAYLOADNV = 644, + PAYLOADINNV = 645, + HITATTRNV = 646, + CALLDATANV = 647, + CALLDATAINNV = 648, + PAYLOADEXT = 649, + PAYLOADINEXT = 650, + HITATTREXT = 651, + CALLDATAEXT = 652, + CALLDATAINEXT = 653, + PATCH = 654, + SAMPLE = 655, + NONUNIFORM = 656, + COHERENT = 657, + VOLATILE = 658, + RESTRICT = 659, + READONLY = 660, + WRITEONLY = 661, + DEVICECOHERENT = 662, + QUEUEFAMILYCOHERENT = 663, + WORKGROUPCOHERENT = 664, + SUBGROUPCOHERENT = 665, + NONPRIVATE = 666, + SHADERCALLCOHERENT = 667, + NOPERSPECTIVE = 668, + EXPLICITINTERPAMD = 669, + PERVERTEXNV = 670, + PERPRIMITIVENV = 671, + PERVIEWNV = 672, + PERTASKNV = 673, + PRECISE = 674 + }; +#endif + +/* Value type. */ +#if ! defined YYSTYPE && ! defined YYSTYPE_IS_DECLARED + +union YYSTYPE +{ +#line 97 "glslang.y" /* yacc.c:355 */ + + struct { + glslang::TSourceLoc loc; + union { + glslang::TString *string; + int i; + unsigned int u; + long long i64; + unsigned long long u64; + bool b; + double d; + }; + glslang::TSymbol* symbol; + } lex; + struct { + glslang::TSourceLoc loc; + glslang::TOperator op; + union { + TIntermNode* intermNode; + glslang::TIntermNodePair nodePair; + glslang::TIntermTyped* intermTypedNode; + glslang::TAttributes* attributes; + }; + union { + glslang::TPublicType type; + glslang::TFunction* function; + glslang::TParameter param; + glslang::TTypeLoc typeLine; + glslang::TTypeList* typeList; + glslang::TArraySizes* arraySizes; + glslang::TIdentifierList* identifierList; + }; + glslang::TArraySizes* typeParameters; + } interm; + +#line 588 "glslang_tab.cpp" /* yacc.c:355 */ +}; + +typedef union YYSTYPE YYSTYPE; +# define YYSTYPE_IS_TRIVIAL 1 +# define YYSTYPE_IS_DECLARED 1 +#endif + + + +int yyparse (glslang::TParseContext* pParseContext); + +#endif /* !YY_YY_GLSLANG_TAB_CPP_H_INCLUDED */ + +/* Copy the second part of user declarations. */ +#line 133 "glslang.y" /* yacc.c:358 */ + + +/* windows only pragma */ +#ifdef _MSC_VER + #pragma warning(disable : 4065) + #pragma warning(disable : 4127) + #pragma warning(disable : 4244) +#endif + +#define parseContext (*pParseContext) +#define yyerror(context, msg) context->parserError(msg) + +extern int yylex(YYSTYPE*, TParseContext&); + + +#line 619 "glslang_tab.cpp" /* yacc.c:358 */ + +#ifdef short +# undef short +#endif + +#ifdef YYTYPE_UINT8 +typedef YYTYPE_UINT8 yytype_uint8; +#else +typedef unsigned char yytype_uint8; +#endif + +#ifdef YYTYPE_INT8 +typedef YYTYPE_INT8 yytype_int8; +#else +typedef signed char yytype_int8; +#endif + +#ifdef YYTYPE_UINT16 +typedef YYTYPE_UINT16 yytype_uint16; +#else +typedef unsigned short int yytype_uint16; +#endif + +#ifdef YYTYPE_INT16 +typedef YYTYPE_INT16 yytype_int16; +#else +typedef short int yytype_int16; +#endif + +#ifndef YYSIZE_T +# ifdef __SIZE_TYPE__ +# define YYSIZE_T __SIZE_TYPE__ +# elif defined size_t +# define YYSIZE_T size_t +# elif ! defined YYSIZE_T +# include /* INFRINGES ON USER NAME SPACE */ +# define YYSIZE_T size_t +# else +# define YYSIZE_T unsigned int +# endif +#endif + +#define YYSIZE_MAXIMUM ((YYSIZE_T) -1) + +#ifndef YY_ +# if defined YYENABLE_NLS && YYENABLE_NLS +# if ENABLE_NLS +# include /* INFRINGES ON USER NAME SPACE */ +# define YY_(Msgid) dgettext ("bison-runtime", Msgid) +# endif +# endif +# ifndef YY_ +# define YY_(Msgid) Msgid +# endif +#endif + +#ifndef YY_ATTRIBUTE +# if (defined __GNUC__ \ + && (2 < __GNUC__ || (__GNUC__ == 2 && 96 <= __GNUC_MINOR__))) \ + || defined __SUNPRO_C && 0x5110 <= __SUNPRO_C +# define YY_ATTRIBUTE(Spec) __attribute__(Spec) +# else +# define YY_ATTRIBUTE(Spec) /* empty */ +# endif +#endif + +#ifndef YY_ATTRIBUTE_PURE +# define YY_ATTRIBUTE_PURE YY_ATTRIBUTE ((__pure__)) +#endif + +#ifndef YY_ATTRIBUTE_UNUSED +# define YY_ATTRIBUTE_UNUSED YY_ATTRIBUTE ((__unused__)) +#endif + +#if !defined _Noreturn \ + && (!defined __STDC_VERSION__ || __STDC_VERSION__ < 201112) +# if defined _MSC_VER && 1200 <= _MSC_VER +# define _Noreturn __declspec (noreturn) +# else +# define _Noreturn YY_ATTRIBUTE ((__noreturn__)) +# endif +#endif + +/* Suppress unused-variable warnings by "using" E. */ +#if ! defined lint || defined __GNUC__ +# define YYUSE(E) ((void) (E)) +#else +# define YYUSE(E) /* empty */ +#endif + +#if defined __GNUC__ && 407 <= __GNUC__ * 100 + __GNUC_MINOR__ +/* Suppress an incorrect diagnostic about yylval being uninitialized. */ +# define YY_IGNORE_MAYBE_UNINITIALIZED_BEGIN \ + _Pragma ("GCC diagnostic push") \ + _Pragma ("GCC diagnostic ignored \"-Wuninitialized\"")\ + _Pragma ("GCC diagnostic ignored \"-Wmaybe-uninitialized\"") +# define YY_IGNORE_MAYBE_UNINITIALIZED_END \ + _Pragma ("GCC diagnostic pop") +#else +# define YY_INITIAL_VALUE(Value) Value +#endif +#ifndef YY_IGNORE_MAYBE_UNINITIALIZED_BEGIN +# define YY_IGNORE_MAYBE_UNINITIALIZED_BEGIN +# define YY_IGNORE_MAYBE_UNINITIALIZED_END +#endif +#ifndef YY_INITIAL_VALUE +# define YY_INITIAL_VALUE(Value) /* Nothing. */ +#endif + + +#if ! defined yyoverflow || YYERROR_VERBOSE + +/* The parser invokes alloca or malloc; define the necessary symbols. */ + +# ifdef YYSTACK_USE_ALLOCA +# if YYSTACK_USE_ALLOCA +# ifdef __GNUC__ +# define YYSTACK_ALLOC __builtin_alloca +# elif defined __BUILTIN_VA_ARG_INCR +# include /* INFRINGES ON USER NAME SPACE */ +# elif defined _AIX +# define YYSTACK_ALLOC __alloca +# elif defined _MSC_VER +# include /* INFRINGES ON USER NAME SPACE */ +# define alloca _alloca +# else +# define YYSTACK_ALLOC alloca +# if ! defined _ALLOCA_H && ! defined EXIT_SUCCESS +# include /* INFRINGES ON USER NAME SPACE */ + /* Use EXIT_SUCCESS as a witness for stdlib.h. */ +# ifndef EXIT_SUCCESS +# define EXIT_SUCCESS 0 +# endif +# endif +# endif +# endif +# endif + +# ifdef YYSTACK_ALLOC + /* Pacify GCC's 'empty if-body' warning. */ +# define YYSTACK_FREE(Ptr) do { /* empty */; } while (0) +# ifndef YYSTACK_ALLOC_MAXIMUM + /* The OS might guarantee only one guard page at the bottom of the stack, + and a page size can be as small as 4096 bytes. So we cannot safely + invoke alloca (N) if N exceeds 4096. Use a slightly smaller number + to allow for a few compiler-allocated temporary stack slots. */ +# define YYSTACK_ALLOC_MAXIMUM 4032 /* reasonable circa 2006 */ +# endif +# else +# define YYSTACK_ALLOC YYMALLOC +# define YYSTACK_FREE YYFREE +# ifndef YYSTACK_ALLOC_MAXIMUM +# define YYSTACK_ALLOC_MAXIMUM YYSIZE_MAXIMUM +# endif +# if (defined __cplusplus && ! defined EXIT_SUCCESS \ + && ! ((defined YYMALLOC || defined malloc) \ + && (defined YYFREE || defined free))) +# include /* INFRINGES ON USER NAME SPACE */ +# ifndef EXIT_SUCCESS +# define EXIT_SUCCESS 0 +# endif +# endif +# ifndef YYMALLOC +# define YYMALLOC malloc +# if ! defined malloc && ! defined EXIT_SUCCESS +void *malloc (YYSIZE_T); /* INFRINGES ON USER NAME SPACE */ +# endif +# endif +# ifndef YYFREE +# define YYFREE free +# if ! defined free && ! defined EXIT_SUCCESS +void free (void *); /* INFRINGES ON USER NAME SPACE */ +# endif +# endif +# endif +#endif /* ! defined yyoverflow || YYERROR_VERBOSE */ + + +#if (! defined yyoverflow \ + && (! defined __cplusplus \ + || (defined YYSTYPE_IS_TRIVIAL && YYSTYPE_IS_TRIVIAL))) + +/* A type that is properly aligned for any stack member. */ +union yyalloc +{ + yytype_int16 yyss_alloc; + YYSTYPE yyvs_alloc; +}; + +/* The size of the maximum gap between one aligned stack and the next. */ +# define YYSTACK_GAP_MAXIMUM (sizeof (union yyalloc) - 1) + +/* The size of an array large to enough to hold all stacks, each with + N elements. */ +# define YYSTACK_BYTES(N) \ + ((N) * (sizeof (yytype_int16) + sizeof (YYSTYPE)) \ + + YYSTACK_GAP_MAXIMUM) + +# define YYCOPY_NEEDED 1 + +/* Relocate STACK from its old location to the new one. The + local variables YYSIZE and YYSTACKSIZE give the old and new number of + elements in the stack, and YYPTR gives the new location of the + stack. Advance YYPTR to a properly aligned location for the next + stack. */ +# define YYSTACK_RELOCATE(Stack_alloc, Stack) \ + do \ + { \ + YYSIZE_T yynewbytes; \ + YYCOPY (&yyptr->Stack_alloc, Stack, yysize); \ + Stack = &yyptr->Stack_alloc; \ + yynewbytes = yystacksize * sizeof (*Stack) + YYSTACK_GAP_MAXIMUM; \ + yyptr += yynewbytes / sizeof (*yyptr); \ + } \ + while (0) + +#endif + +#if defined YYCOPY_NEEDED && YYCOPY_NEEDED +/* Copy COUNT objects from SRC to DST. The source and destination do + not overlap. */ +# ifndef YYCOPY +# if defined __GNUC__ && 1 < __GNUC__ +# define YYCOPY(Dst, Src, Count) \ + __builtin_memcpy (Dst, Src, (Count) * sizeof (*(Src))) +# else +# define YYCOPY(Dst, Src, Count) \ + do \ + { \ + YYSIZE_T yyi; \ + for (yyi = 0; yyi < (Count); yyi++) \ + (Dst)[yyi] = (Src)[yyi]; \ + } \ + while (0) +# endif +# endif +#endif /* !YYCOPY_NEEDED */ + +/* YYFINAL -- State number of the termination state. */ +#define YYFINAL 394 +/* YYLAST -- Last index in YYTABLE. */ +#define YYLAST 9550 + +/* YYNTOKENS -- Number of terminals. */ +#define YYNTOKENS 420 +/* YYNNTS -- Number of nonterminals. */ +#define YYNNTS 111 +/* YYNRULES -- Number of rules. */ +#define YYNRULES 591 +/* YYNSTATES -- Number of states. */ +#define YYNSTATES 736 + +/* YYTRANSLATE[YYX] -- Symbol number corresponding to YYX as returned + by yylex, with out-of-bounds checking. */ +#define YYUNDEFTOK 2 +#define YYMAXUTOK 674 + +#define YYTRANSLATE(YYX) \ + ((unsigned int) (YYX) <= YYMAXUTOK ? yytranslate[YYX] : YYUNDEFTOK) + +/* YYTRANSLATE[TOKEN-NUM] -- Symbol number corresponding to TOKEN-NUM + as returned by yylex, without out-of-bounds checking. */ +static const yytype_uint16 yytranslate[] = +{ + 0, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 1, 2, 3, 4, + 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, + 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, + 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, + 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, + 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, + 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, + 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, + 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, + 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, + 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, + 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, + 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, + 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, + 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, + 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, + 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, + 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, + 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, + 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, + 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, + 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, + 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, + 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, + 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, + 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, + 255, 256, 257, 258, 259, 260, 261, 262, 263, 264, + 265, 266, 267, 268, 269, 270, 271, 272, 273, 274, + 275, 276, 277, 278, 279, 280, 281, 282, 283, 284, + 285, 286, 287, 288, 289, 290, 291, 292, 293, 294, + 295, 296, 297, 298, 299, 300, 301, 302, 303, 304, + 305, 306, 307, 308, 309, 310, 311, 312, 313, 314, + 315, 316, 317, 318, 319, 320, 321, 322, 323, 324, + 325, 326, 327, 328, 329, 330, 331, 332, 333, 334, + 335, 336, 337, 338, 339, 340, 341, 342, 343, 344, + 345, 346, 347, 348, 349, 350, 351, 352, 353, 354, + 355, 356, 357, 358, 359, 360, 361, 362, 363, 364, + 365, 366, 367, 368, 369, 370, 371, 372, 373, 374, + 375, 376, 377, 378, 379, 380, 381, 382, 383, 384, + 385, 386, 387, 388, 389, 390, 391, 392, 393, 394, + 395, 396, 397, 398, 399, 400, 401, 402, 403, 404, + 405, 406, 407, 408, 409, 410, 411, 412, 413, 414, + 415, 416, 417, 418, 419 +}; + +#if YYDEBUG + /* YYRLINE[YYN] -- Source line where rule number YYN was defined. */ +static const yytype_uint16 yyrline[] = +{ + 0, 357, 357, 363, 366, 371, 374, 377, 381, 385, + 388, 392, 396, 400, 404, 408, 412, 418, 426, 429, + 432, 435, 438, 443, 451, 458, 465, 471, 475, 482, + 485, 491, 498, 508, 516, 521, 549, 558, 564, 568, + 572, 592, 593, 594, 595, 601, 602, 607, 612, 621, + 622, 627, 635, 636, 642, 651, 652, 657, 662, 667, + 675, 676, 685, 697, 698, 707, 708, 717, 718, 727, + 728, 736, 737, 745, 746, 754, 755, 755, 773, 774, + 790, 794, 798, 802, 807, 811, 815, 819, 823, 827, + 831, 838, 841, 852, 859, 864, 869, 876, 880, 884, + 888, 893, 898, 907, 907, 918, 922, 929, 936, 939, + 946, 954, 974, 997, 1012, 1037, 1048, 1058, 1068, 1078, + 1087, 1090, 1094, 1098, 1103, 1111, 1118, 1123, 1128, 1133, + 1142, 1152, 1179, 1188, 1195, 1203, 1210, 1217, 1225, 1235, + 1242, 1253, 1259, 1262, 1269, 1273, 1277, 1286, 1296, 1299, + 1310, 1313, 1316, 1320, 1324, 1329, 1333, 1340, 1344, 1349, + 1355, 1361, 1368, 1373, 1381, 1387, 1399, 1413, 1419, 1424, + 1432, 1440, 1448, 1456, 1464, 1472, 1480, 1488, 1495, 1502, + 1506, 1511, 1516, 1521, 1526, 1531, 1536, 1540, 1544, 1548, + 1552, 1558, 1569, 1576, 1579, 1588, 1593, 1603, 1608, 1616, + 1620, 1630, 1633, 1639, 1645, 1652, 1662, 1666, 1670, 1674, + 1679, 1683, 1688, 1693, 1698, 1703, 1708, 1713, 1718, 1723, + 1728, 1734, 1740, 1746, 1751, 1756, 1761, 1766, 1771, 1776, + 1781, 1786, 1791, 1796, 1801, 1807, 1814, 1819, 1824, 1829, + 1834, 1839, 1844, 1849, 1854, 1859, 1864, 1869, 1877, 1885, + 1893, 1899, 1905, 1911, 1917, 1923, 1929, 1935, 1941, 1947, + 1953, 1959, 1965, 1971, 1977, 1983, 1989, 1995, 2001, 2007, + 2013, 2019, 2025, 2031, 2037, 2043, 2049, 2055, 2061, 2067, + 2073, 2079, 2085, 2091, 2099, 2107, 2115, 2123, 2131, 2139, + 2147, 2155, 2163, 2171, 2179, 2187, 2193, 2199, 2205, 2211, + 2217, 2223, 2229, 2235, 2241, 2247, 2253, 2259, 2265, 2271, + 2277, 2283, 2289, 2295, 2301, 2307, 2313, 2319, 2325, 2331, + 2337, 2343, 2349, 2355, 2361, 2367, 2373, 2379, 2385, 2391, + 2397, 2403, 2407, 2411, 2415, 2420, 2426, 2431, 2436, 2441, + 2446, 2451, 2456, 2462, 2467, 2472, 2477, 2482, 2487, 2493, + 2499, 2505, 2511, 2517, 2523, 2529, 2535, 2541, 2547, 2553, + 2559, 2565, 2571, 2576, 2581, 2586, 2591, 2596, 2601, 2607, + 2612, 2617, 2622, 2627, 2632, 2637, 2642, 2648, 2653, 2658, + 2663, 2668, 2673, 2678, 2683, 2688, 2693, 2698, 2703, 2708, + 2713, 2718, 2724, 2729, 2734, 2740, 2746, 2751, 2756, 2761, + 2767, 2772, 2777, 2782, 2788, 2793, 2798, 2803, 2809, 2814, + 2819, 2824, 2830, 2836, 2842, 2848, 2853, 2859, 2865, 2871, + 2876, 2881, 2886, 2891, 2896, 2902, 2907, 2912, 2917, 2923, + 2928, 2933, 2938, 2944, 2949, 2954, 2959, 2965, 2970, 2975, + 2980, 2986, 2991, 2996, 3001, 3007, 3012, 3017, 3022, 3028, + 3033, 3038, 3043, 3049, 3054, 3059, 3064, 3070, 3075, 3080, + 3085, 3091, 3096, 3101, 3106, 3112, 3117, 3122, 3127, 3133, + 3138, 3143, 3148, 3154, 3159, 3164, 3169, 3175, 3180, 3185, + 3190, 3196, 3201, 3206, 3212, 3218, 3224, 3230, 3237, 3244, + 3250, 3256, 3262, 3268, 3274, 3280, 3287, 3292, 3308, 3313, + 3318, 3326, 3326, 3337, 3337, 3347, 3350, 3363, 3385, 3412, + 3416, 3422, 3427, 3438, 3442, 3448, 3459, 3462, 3469, 3473, + 3474, 3480, 3481, 3482, 3483, 3484, 3485, 3486, 3488, 3494, + 3503, 3504, 3508, 3504, 3520, 3521, 3525, 3525, 3532, 3532, + 3546, 3549, 3557, 3565, 3576, 3577, 3581, 3585, 3592, 3599, + 3603, 3611, 3615, 3628, 3632, 3639, 3639, 3659, 3662, 3668, + 3680, 3692, 3696, 3703, 3703, 3718, 3718, 3734, 3734, 3755, + 3758, 3764, 3767, 3773, 3777, 3784, 3789, 3794, 3801, 3804, + 3813, 3817, 3826, 3829, 3833, 3842, 3842, 3865, 3871, 3874, + 3879, 3882 +}; +#endif + +#if YYDEBUG || YYERROR_VERBOSE || 1 +/* YYTNAME[SYMBOL-NUM] -- String name of the symbol SYMBOL-NUM. + First, the terminals, then, starting at YYNTOKENS, nonterminals. */ +static const char *const yytname[] = +{ + "$end", "error", "$undefined", "CONST", "BOOL", "INT", "UINT", "FLOAT", + "BVEC2", "BVEC3", "BVEC4", "IVEC2", "IVEC3", "IVEC4", "UVEC2", "UVEC3", + "UVEC4", "VEC2", "VEC3", "VEC4", "MAT2", "MAT3", "MAT4", "MAT2X2", + "MAT2X3", "MAT2X4", "MAT3X2", "MAT3X3", "MAT3X4", "MAT4X2", "MAT4X3", + "MAT4X4", "SAMPLER2D", "SAMPLER3D", "SAMPLERCUBE", "SAMPLER2DSHADOW", + "SAMPLERCUBESHADOW", "SAMPLER2DARRAY", "SAMPLER2DARRAYSHADOW", + "ISAMPLER2D", "ISAMPLER3D", "ISAMPLERCUBE", "ISAMPLER2DARRAY", + "USAMPLER2D", "USAMPLER3D", "USAMPLERCUBE", "USAMPLER2DARRAY", "SAMPLER", + "SAMPLERSHADOW", "TEXTURE2D", "TEXTURE3D", "TEXTURECUBE", + "TEXTURE2DARRAY", "ITEXTURE2D", "ITEXTURE3D", "ITEXTURECUBE", + "ITEXTURE2DARRAY", "UTEXTURE2D", "UTEXTURE3D", "UTEXTURECUBE", + "UTEXTURE2DARRAY", "ATTRIBUTE", "VARYING", "FLOAT16_T", "FLOAT32_T", + "DOUBLE", "FLOAT64_T", "INT64_T", "UINT64_T", "INT32_T", "UINT32_T", + "INT16_T", "UINT16_T", "INT8_T", "UINT8_T", "I64VEC2", "I64VEC3", + "I64VEC4", "U64VEC2", "U64VEC3", "U64VEC4", "I32VEC2", "I32VEC3", + "I32VEC4", "U32VEC2", "U32VEC3", "U32VEC4", "I16VEC2", "I16VEC3", + "I16VEC4", "U16VEC2", "U16VEC3", "U16VEC4", "I8VEC2", "I8VEC3", "I8VEC4", + "U8VEC2", "U8VEC3", "U8VEC4", "DVEC2", "DVEC3", "DVEC4", "DMAT2", + "DMAT3", "DMAT4", "F16VEC2", "F16VEC3", "F16VEC4", "F16MAT2", "F16MAT3", + "F16MAT4", "F32VEC2", "F32VEC3", "F32VEC4", "F32MAT2", "F32MAT3", + "F32MAT4", "F64VEC2", "F64VEC3", "F64VEC4", "F64MAT2", "F64MAT3", + "F64MAT4", "DMAT2X2", "DMAT2X3", "DMAT2X4", "DMAT3X2", "DMAT3X3", + "DMAT3X4", "DMAT4X2", "DMAT4X3", "DMAT4X4", "F16MAT2X2", "F16MAT2X3", + "F16MAT2X4", "F16MAT3X2", "F16MAT3X3", "F16MAT3X4", "F16MAT4X2", + "F16MAT4X3", "F16MAT4X4", "F32MAT2X2", "F32MAT2X3", "F32MAT2X4", + "F32MAT3X2", "F32MAT3X3", "F32MAT3X4", "F32MAT4X2", "F32MAT4X3", + "F32MAT4X4", "F64MAT2X2", "F64MAT2X3", "F64MAT2X4", "F64MAT3X2", + "F64MAT3X3", "F64MAT3X4", "F64MAT4X2", "F64MAT4X3", "F64MAT4X4", + "ATOMIC_UINT", "ACCSTRUCTNV", "ACCSTRUCTEXT", "RAYQUERYEXT", + "FCOOPMATNV", "ICOOPMATNV", "UCOOPMATNV", "SAMPLERCUBEARRAY", + "SAMPLERCUBEARRAYSHADOW", "ISAMPLERCUBEARRAY", "USAMPLERCUBEARRAY", + "SAMPLER1D", "SAMPLER1DARRAY", "SAMPLER1DARRAYSHADOW", "ISAMPLER1D", + "SAMPLER1DSHADOW", "SAMPLER2DRECT", "SAMPLER2DRECTSHADOW", + "ISAMPLER2DRECT", "USAMPLER2DRECT", "SAMPLERBUFFER", "ISAMPLERBUFFER", + "USAMPLERBUFFER", "SAMPLER2DMS", "ISAMPLER2DMS", "USAMPLER2DMS", + "SAMPLER2DMSARRAY", "ISAMPLER2DMSARRAY", "USAMPLER2DMSARRAY", + "SAMPLEREXTERNALOES", "SAMPLEREXTERNAL2DY2YEXT", "ISAMPLER1DARRAY", + "USAMPLER1D", "USAMPLER1DARRAY", "F16SAMPLER1D", "F16SAMPLER2D", + "F16SAMPLER3D", "F16SAMPLER2DRECT", "F16SAMPLERCUBE", + "F16SAMPLER1DARRAY", "F16SAMPLER2DARRAY", "F16SAMPLERCUBEARRAY", + "F16SAMPLERBUFFER", "F16SAMPLER2DMS", "F16SAMPLER2DMSARRAY", + "F16SAMPLER1DSHADOW", "F16SAMPLER2DSHADOW", "F16SAMPLER1DARRAYSHADOW", + "F16SAMPLER2DARRAYSHADOW", "F16SAMPLER2DRECTSHADOW", + "F16SAMPLERCUBESHADOW", "F16SAMPLERCUBEARRAYSHADOW", "IMAGE1D", + "IIMAGE1D", "UIMAGE1D", "IMAGE2D", "IIMAGE2D", "UIMAGE2D", "IMAGE3D", + "IIMAGE3D", "UIMAGE3D", "IMAGE2DRECT", "IIMAGE2DRECT", "UIMAGE2DRECT", + "IMAGECUBE", "IIMAGECUBE", "UIMAGECUBE", "IMAGEBUFFER", "IIMAGEBUFFER", + "UIMAGEBUFFER", "IMAGE1DARRAY", "IIMAGE1DARRAY", "UIMAGE1DARRAY", + "IMAGE2DARRAY", "IIMAGE2DARRAY", "UIMAGE2DARRAY", "IMAGECUBEARRAY", + "IIMAGECUBEARRAY", "UIMAGECUBEARRAY", "IMAGE2DMS", "IIMAGE2DMS", + "UIMAGE2DMS", "IMAGE2DMSARRAY", "IIMAGE2DMSARRAY", "UIMAGE2DMSARRAY", + "F16IMAGE1D", "F16IMAGE2D", "F16IMAGE3D", "F16IMAGE2DRECT", + "F16IMAGECUBE", "F16IMAGE1DARRAY", "F16IMAGE2DARRAY", + "F16IMAGECUBEARRAY", "F16IMAGEBUFFER", "F16IMAGE2DMS", + "F16IMAGE2DMSARRAY", "TEXTURECUBEARRAY", "ITEXTURECUBEARRAY", + "UTEXTURECUBEARRAY", "TEXTURE1D", "ITEXTURE1D", "UTEXTURE1D", + "TEXTURE1DARRAY", "ITEXTURE1DARRAY", "UTEXTURE1DARRAY", "TEXTURE2DRECT", + "ITEXTURE2DRECT", "UTEXTURE2DRECT", "TEXTUREBUFFER", "ITEXTUREBUFFER", + "UTEXTUREBUFFER", "TEXTURE2DMS", "ITEXTURE2DMS", "UTEXTURE2DMS", + "TEXTURE2DMSARRAY", "ITEXTURE2DMSARRAY", "UTEXTURE2DMSARRAY", + "F16TEXTURE1D", "F16TEXTURE2D", "F16TEXTURE3D", "F16TEXTURE2DRECT", + "F16TEXTURECUBE", "F16TEXTURE1DARRAY", "F16TEXTURE2DARRAY", + "F16TEXTURECUBEARRAY", "F16TEXTUREBUFFER", "F16TEXTURE2DMS", + "F16TEXTURE2DMSARRAY", "SUBPASSINPUT", "SUBPASSINPUTMS", "ISUBPASSINPUT", + "ISUBPASSINPUTMS", "USUBPASSINPUT", "USUBPASSINPUTMS", "F16SUBPASSINPUT", + "F16SUBPASSINPUTMS", "LEFT_OP", "RIGHT_OP", "INC_OP", "DEC_OP", "LE_OP", + "GE_OP", "EQ_OP", "NE_OP", "AND_OP", "OR_OP", "XOR_OP", "MUL_ASSIGN", + "DIV_ASSIGN", "ADD_ASSIGN", "MOD_ASSIGN", "LEFT_ASSIGN", "RIGHT_ASSIGN", + "AND_ASSIGN", "XOR_ASSIGN", "OR_ASSIGN", "SUB_ASSIGN", "STRING_LITERAL", + "LEFT_PAREN", "RIGHT_PAREN", "LEFT_BRACKET", "RIGHT_BRACKET", + "LEFT_BRACE", "RIGHT_BRACE", "DOT", "COMMA", "COLON", "EQUAL", + "SEMICOLON", "BANG", "DASH", "TILDE", "PLUS", "STAR", "SLASH", "PERCENT", + "LEFT_ANGLE", "RIGHT_ANGLE", "VERTICAL_BAR", "CARET", "AMPERSAND", + "QUESTION", "INVARIANT", "HIGH_PRECISION", "MEDIUM_PRECISION", + "LOW_PRECISION", "PRECISION", "PACKED", "RESOURCE", "SUPERP", + "FLOATCONSTANT", "INTCONSTANT", "UINTCONSTANT", "BOOLCONSTANT", + "IDENTIFIER", "TYPE_NAME", "CENTROID", "IN", "OUT", "INOUT", "STRUCT", + "VOID", "WHILE", "BREAK", "CONTINUE", "DO", "ELSE", "FOR", "IF", + "DISCARD", "RETURN", "SWITCH", "CASE", "DEFAULT", "UNIFORM", "SHARED", + "BUFFER", "FLAT", "SMOOTH", "LAYOUT", "DOUBLECONSTANT", "INT16CONSTANT", + "UINT16CONSTANT", "FLOAT16CONSTANT", "INT32CONSTANT", "UINT32CONSTANT", + "INT64CONSTANT", "UINT64CONSTANT", "SUBROUTINE", "DEMOTE", "PAYLOADNV", + "PAYLOADINNV", "HITATTRNV", "CALLDATANV", "CALLDATAINNV", "PAYLOADEXT", + "PAYLOADINEXT", "HITATTREXT", "CALLDATAEXT", "CALLDATAINEXT", "PATCH", + "SAMPLE", "NONUNIFORM", "COHERENT", "VOLATILE", "RESTRICT", "READONLY", + "WRITEONLY", "DEVICECOHERENT", "QUEUEFAMILYCOHERENT", + "WORKGROUPCOHERENT", "SUBGROUPCOHERENT", "NONPRIVATE", + "SHADERCALLCOHERENT", "NOPERSPECTIVE", "EXPLICITINTERPAMD", + "PERVERTEXNV", "PERPRIMITIVENV", "PERVIEWNV", "PERTASKNV", "PRECISE", + "$accept", "variable_identifier", "primary_expression", + "postfix_expression", "integer_expression", "function_call", + "function_call_or_method", "function_call_generic", + "function_call_header_no_parameters", + "function_call_header_with_parameters", "function_call_header", + "function_identifier", "unary_expression", "unary_operator", + "multiplicative_expression", "additive_expression", "shift_expression", + "relational_expression", "equality_expression", "and_expression", + "exclusive_or_expression", "inclusive_or_expression", + "logical_and_expression", "logical_xor_expression", + "logical_or_expression", "conditional_expression", "$@1", + "assignment_expression", "assignment_operator", "expression", + "constant_expression", "declaration", "block_structure", "$@2", + "identifier_list", "function_prototype", "function_declarator", + "function_header_with_parameters", "function_header", + "parameter_declarator", "parameter_declaration", + "parameter_type_specifier", "init_declarator_list", "single_declaration", + "fully_specified_type", "invariant_qualifier", "interpolation_qualifier", + "layout_qualifier", "layout_qualifier_id_list", "layout_qualifier_id", + "precise_qualifier", "type_qualifier", "single_type_qualifier", + "storage_qualifier", "non_uniform_qualifier", "type_name_list", + "type_specifier", "array_specifier", "type_parameter_specifier_opt", + "type_parameter_specifier", "type_parameter_specifier_list", + "type_specifier_nonarray", "precision_qualifier", "struct_specifier", + "$@3", "$@4", "struct_declaration_list", "struct_declaration", + "struct_declarator_list", "struct_declarator", "initializer", + "initializer_list", "declaration_statement", "statement", + "simple_statement", "demote_statement", "compound_statement", "$@5", + "$@6", "statement_no_new_scope", "statement_scoped", "$@7", "$@8", + "compound_statement_no_new_scope", "statement_list", + "expression_statement", "selection_statement", + "selection_statement_nonattributed", "selection_rest_statement", + "condition", "switch_statement", "switch_statement_nonattributed", "$@9", + "switch_statement_list", "case_label", "iteration_statement", + "iteration_statement_nonattributed", "$@10", "$@11", "$@12", + "for_init_statement", "conditionopt", "for_rest_statement", + "jump_statement", "translation_unit", "external_declaration", + "function_definition", "$@13", "attribute", "attribute_list", + "single_attribute", YY_NULLPTR +}; +#endif + +# ifdef YYPRINT +/* YYTOKNUM[NUM] -- (External) token number corresponding to the + (internal) symbol number NUM (which must be that of a token). */ +static const yytype_uint16 yytoknum[] = +{ + 0, 256, 257, 258, 259, 260, 261, 262, 263, 264, + 265, 266, 267, 268, 269, 270, 271, 272, 273, 274, + 275, 276, 277, 278, 279, 280, 281, 282, 283, 284, + 285, 286, 287, 288, 289, 290, 291, 292, 293, 294, + 295, 296, 297, 298, 299, 300, 301, 302, 303, 304, + 305, 306, 307, 308, 309, 310, 311, 312, 313, 314, + 315, 316, 317, 318, 319, 320, 321, 322, 323, 324, + 325, 326, 327, 328, 329, 330, 331, 332, 333, 334, + 335, 336, 337, 338, 339, 340, 341, 342, 343, 344, + 345, 346, 347, 348, 349, 350, 351, 352, 353, 354, + 355, 356, 357, 358, 359, 360, 361, 362, 363, 364, + 365, 366, 367, 368, 369, 370, 371, 372, 373, 374, + 375, 376, 377, 378, 379, 380, 381, 382, 383, 384, + 385, 386, 387, 388, 389, 390, 391, 392, 393, 394, + 395, 396, 397, 398, 399, 400, 401, 402, 403, 404, + 405, 406, 407, 408, 409, 410, 411, 412, 413, 414, + 415, 416, 417, 418, 419, 420, 421, 422, 423, 424, + 425, 426, 427, 428, 429, 430, 431, 432, 433, 434, + 435, 436, 437, 438, 439, 440, 441, 442, 443, 444, + 445, 446, 447, 448, 449, 450, 451, 452, 453, 454, + 455, 456, 457, 458, 459, 460, 461, 462, 463, 464, + 465, 466, 467, 468, 469, 470, 471, 472, 473, 474, + 475, 476, 477, 478, 479, 480, 481, 482, 483, 484, + 485, 486, 487, 488, 489, 490, 491, 492, 493, 494, + 495, 496, 497, 498, 499, 500, 501, 502, 503, 504, + 505, 506, 507, 508, 509, 510, 511, 512, 513, 514, + 515, 516, 517, 518, 519, 520, 521, 522, 523, 524, + 525, 526, 527, 528, 529, 530, 531, 532, 533, 534, + 535, 536, 537, 538, 539, 540, 541, 542, 543, 544, + 545, 546, 547, 548, 549, 550, 551, 552, 553, 554, + 555, 556, 557, 558, 559, 560, 561, 562, 563, 564, + 565, 566, 567, 568, 569, 570, 571, 572, 573, 574, + 575, 576, 577, 578, 579, 580, 581, 582, 583, 584, + 585, 586, 587, 588, 589, 590, 591, 592, 593, 594, + 595, 596, 597, 598, 599, 600, 601, 602, 603, 604, + 605, 606, 607, 608, 609, 610, 611, 612, 613, 614, + 615, 616, 617, 618, 619, 620, 621, 622, 623, 624, + 625, 626, 627, 628, 629, 630, 631, 632, 633, 634, + 635, 636, 637, 638, 639, 640, 641, 642, 643, 644, + 645, 646, 647, 648, 649, 650, 651, 652, 653, 654, + 655, 656, 657, 658, 659, 660, 661, 662, 663, 664, + 665, 666, 667, 668, 669, 670, 671, 672, 673, 674 +}; +# endif + +#define YYPACT_NINF -457 + +#define yypact_value_is_default(Yystate) \ + (!!((Yystate) == (-457))) + +#define YYTABLE_NINF -537 + +#define yytable_value_is_error(Yytable_value) \ + 0 + + /* YYPACT[STATE-NUM] -- Index in YYTABLE of the portion describing + STATE-NUM. */ +static const yytype_int16 yypact[] = +{ + 4075, -457, -457, -457, -457, -457, -457, -457, -457, -457, + -457, -457, -457, -457, -457, -457, -457, -457, -457, -457, + -457, -457, -457, -457, -457, -457, -457, -457, -457, -457, + -457, -457, -457, -457, -457, -457, -457, -457, -457, -457, + -457, -457, -457, -457, -457, -457, -457, -457, -457, -457, + -457, -457, -457, -457, -457, -457, -457, -457, -457, -457, + -457, -457, -457, -457, -457, -457, -457, -457, -457, -457, + -457, -457, -457, -457, -457, -457, -457, -457, -457, -457, + -457, -457, -457, -457, -457, -457, -457, -457, -457, -457, + -457, -457, -457, -457, -457, -457, -457, -457, -457, -457, + -457, -457, -457, -457, -457, -457, -457, -457, -457, -457, + -457, -457, -457, -457, -457, -457, -457, -457, -457, -457, + -457, -457, -457, -457, -457, -457, -457, -457, -457, -457, + -457, -457, -457, -457, -457, -457, -457, -457, -457, -457, + -457, -457, -457, -457, -457, -457, -457, -457, -457, -457, + -457, -457, -457, -457, -457, -457, -457, -457, -457, -457, + -457, -457, -457, -457, -457, -457, -457, -457, -457, -457, + -457, -457, -457, -457, -457, -457, -457, -457, -457, -457, + -457, -457, -457, -457, -457, -457, -457, -457, -457, -457, + -457, -457, -457, -457, -457, -457, -457, -457, -457, -457, + -457, -457, -457, -457, -457, -457, -457, -457, -457, -457, + -457, -457, -457, -457, -457, -457, -457, -457, -457, -457, + -457, -457, -457, -457, -457, -457, -457, -457, -457, -457, + -457, -457, -457, -457, -457, -457, -457, -457, -457, -457, + -457, -457, -457, -457, -457, -457, -457, -457, -457, -457, + -457, -457, -457, -457, -457, -457, -457, -457, -457, -457, + -457, -457, -457, -457, -457, -457, -457, -457, -457, -457, + -457, -457, -457, -457, -457, -457, -457, -457, -457, -457, + -457, -457, -457, -457, -457, -457, -457, -457, -457, -457, + -457, -457, -457, -457, -457, -457, -457, -457, 132, -457, + -457, -457, -457, -457, -1, -457, -457, -457, -457, -457, + -457, -301, -298, -457, -457, -457, -457, -457, -457, -457, + -457, -457, -457, -457, -457, -457, -457, -457, -457, -457, + -457, -457, -457, -457, -457, -457, -457, -457, -457, -457, + -457, -457, -457, -457, -457, 11, -249, 17, 30, 6160, + 20, -457, 28, -457, -457, -457, -457, 4492, -457, -457, + -457, -457, 50, -457, -457, 739, -457, -457, 16, -457, + 81, -29, 69, -457, -313, -457, 111, -457, 6160, -457, + -457, -457, 6160, 103, 106, -457, -314, -457, 72, -457, + -457, 8566, 142, -457, -457, -457, 136, 6160, -457, 144, + -457, 53, -457, -457, 76, 6974, -457, -312, 1156, -457, + -457, -457, -457, 142, -309, -457, 7372, -308, -457, 119, + -457, 65, 8566, 8566, -457, 8566, -457, -457, -457, -457, + -457, -457, -457, -457, -457, -457, -457, -457, -457, -457, + -457, -457, -457, -457, -457, 36, -457, -457, -457, 171, + 85, 8964, 173, -457, 8566, -457, -457, -323, 174, -457, + 6160, 139, 4909, -457, 6160, 8566, -457, -29, -457, 141, + -457, -457, 145, 99, 35, 26, 71, 156, 159, 161, + 196, 195, 23, 181, 7770, -457, 183, 182, -457, -457, + 186, 179, 180, -457, 191, 192, 187, 8168, 193, 8566, + 188, 189, 127, -457, -457, 96, -457, -249, 200, 201, + -457, -457, -457, -457, -457, 1573, -457, -457, -457, -457, + -457, -457, -457, -457, -457, -24, 174, 7372, 13, 7372, + -457, -457, 7372, 6160, -457, 166, -457, -457, -457, 86, + -457, -457, 8566, 168, -457, -457, 8566, 205, -457, -457, + -457, 8566, -457, 139, 142, 124, -457, -457, -457, 5326, + -457, -457, -457, -457, 8566, 8566, 8566, 8566, 8566, 8566, + 8566, 8566, 8566, 8566, 8566, 8566, 8566, 8566, 8566, 8566, + 8566, 8566, 8566, -457, -457, -457, 206, 172, -457, 1990, + -457, -457, -457, 1990, -457, 8566, -457, -457, 130, 8566, + 125, -457, -457, -457, -457, -457, -457, -457, -457, -457, + -457, -457, -457, -457, 8566, 8566, -457, -457, -457, -457, + -457, -457, -457, 7372, -457, 94, -457, 5743, -457, -457, + 207, 204, -457, -457, -457, 131, 174, 139, -457, -457, + -457, -457, -457, 145, 145, 99, 99, 35, 35, 35, + 35, 26, 26, 71, 156, 159, 161, 196, 195, 8566, + -457, 212, 60, -457, 1990, 3658, 169, 3241, 87, -457, + 89, -457, -457, -457, -457, -457, 6576, -457, -457, -457, + -457, 143, 8566, 211, 172, 210, 204, 184, 6160, 217, + 219, -457, -457, 3658, 218, -457, -457, -457, 8566, 220, + -457, -457, -457, 214, 2407, 8566, -457, 216, 223, 185, + 224, 2824, -457, 225, -457, -457, 7372, -457, -457, -457, + 97, 8566, 2407, 218, -457, -457, 1990, -457, 222, 204, + -457, -457, 1990, 229, -457, -457 +}; + + /* YYDEFACT[STATE-NUM] -- Default reduction number in state STATE-NUM. + Performed when YYTABLE does not specify something else to do. Zero + means the default is an error. */ +static const yytype_uint16 yydefact[] = +{ + 0, 157, 210, 208, 209, 207, 214, 215, 216, 217, + 218, 219, 220, 221, 222, 211, 212, 213, 223, 224, + 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, + 336, 337, 338, 339, 340, 341, 342, 362, 363, 364, + 365, 366, 367, 368, 377, 390, 391, 378, 379, 381, + 380, 382, 383, 384, 385, 386, 387, 388, 389, 165, + 166, 236, 237, 235, 238, 245, 246, 243, 244, 241, + 242, 239, 240, 268, 269, 270, 280, 281, 282, 265, + 266, 267, 277, 278, 279, 262, 263, 264, 274, 275, + 276, 259, 260, 261, 271, 272, 273, 247, 248, 249, + 283, 284, 285, 250, 251, 252, 295, 296, 297, 253, + 254, 255, 307, 308, 309, 256, 257, 258, 319, 320, + 321, 286, 287, 288, 289, 290, 291, 292, 293, 294, + 298, 299, 300, 301, 302, 303, 304, 305, 306, 310, + 311, 312, 313, 314, 315, 316, 317, 318, 322, 323, + 324, 325, 326, 327, 328, 329, 330, 334, 331, 332, + 333, 493, 494, 495, 346, 347, 370, 373, 335, 344, + 345, 361, 343, 392, 393, 396, 397, 398, 400, 401, + 402, 404, 405, 406, 408, 409, 483, 484, 369, 371, + 372, 348, 349, 350, 394, 351, 355, 356, 359, 399, + 403, 407, 352, 353, 357, 358, 395, 354, 360, 439, + 441, 442, 443, 445, 446, 447, 449, 450, 451, 453, + 454, 455, 457, 458, 459, 461, 462, 463, 465, 466, + 467, 469, 470, 471, 473, 474, 475, 477, 478, 479, + 481, 482, 440, 444, 448, 452, 456, 464, 468, 472, + 460, 476, 480, 374, 375, 376, 410, 419, 421, 415, + 420, 422, 423, 425, 426, 427, 429, 430, 431, 433, + 434, 435, 437, 438, 411, 412, 413, 424, 414, 416, + 417, 418, 428, 432, 436, 485, 486, 489, 490, 491, + 492, 487, 488, 584, 132, 498, 499, 500, 0, 497, + 161, 159, 160, 158, 0, 206, 162, 163, 164, 134, + 133, 0, 190, 171, 173, 169, 175, 177, 172, 174, + 170, 176, 178, 167, 168, 192, 179, 186, 187, 188, + 189, 180, 181, 182, 183, 184, 185, 135, 136, 137, + 138, 139, 140, 147, 583, 0, 585, 0, 109, 108, + 0, 120, 125, 154, 153, 151, 155, 0, 148, 150, + 156, 130, 202, 152, 496, 0, 580, 582, 0, 503, + 0, 0, 0, 97, 0, 94, 0, 107, 0, 116, + 110, 118, 0, 119, 0, 95, 126, 100, 0, 149, + 131, 0, 195, 201, 1, 581, 0, 0, 501, 144, + 146, 0, 142, 193, 0, 0, 98, 0, 0, 586, + 111, 115, 117, 113, 121, 112, 0, 127, 103, 0, + 101, 0, 0, 0, 9, 0, 43, 42, 44, 41, + 5, 6, 7, 8, 2, 16, 14, 15, 17, 10, + 11, 12, 13, 3, 18, 37, 20, 25, 26, 0, + 0, 30, 0, 204, 0, 36, 34, 0, 196, 96, + 0, 0, 0, 505, 0, 0, 141, 0, 191, 0, + 197, 45, 49, 52, 55, 60, 63, 65, 67, 69, + 71, 73, 75, 0, 0, 99, 0, 531, 540, 544, + 0, 0, 0, 565, 0, 0, 0, 0, 0, 0, + 0, 0, 45, 78, 91, 0, 518, 0, 156, 130, + 521, 542, 520, 528, 519, 0, 522, 523, 546, 524, + 553, 525, 526, 561, 527, 0, 114, 0, 122, 0, + 513, 129, 0, 0, 105, 0, 102, 38, 39, 0, + 22, 23, 0, 0, 28, 27, 0, 206, 31, 33, + 40, 0, 203, 0, 511, 0, 509, 504, 506, 0, + 93, 145, 143, 194, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 76, 198, 199, 0, 0, 530, 0, + 563, 576, 575, 0, 567, 0, 579, 577, 0, 0, + 0, 560, 529, 81, 82, 84, 83, 86, 87, 88, + 89, 90, 85, 80, 0, 0, 545, 541, 543, 547, + 554, 562, 124, 0, 516, 0, 128, 0, 106, 4, + 0, 24, 21, 32, 205, 0, 512, 0, 507, 502, + 46, 47, 48, 51, 50, 53, 54, 58, 59, 56, + 57, 61, 62, 64, 66, 68, 70, 72, 74, 0, + 200, 590, 0, 588, 532, 0, 0, 0, 0, 578, + 0, 559, 79, 92, 123, 514, 0, 104, 19, 508, + 510, 0, 0, 0, 0, 0, 551, 0, 0, 0, + 0, 570, 569, 572, 538, 555, 515, 517, 0, 0, + 587, 589, 533, 0, 0, 0, 571, 0, 0, 550, + 0, 0, 548, 0, 77, 591, 0, 535, 564, 534, + 0, 573, 0, 538, 537, 539, 557, 552, 0, 574, + 568, 549, 558, 0, 566, 556 +}; + + /* YYPGOTO[NTERM-NUM]. */ +static const yytype_int16 yypgoto[] = +{ + -457, -457, -457, -457, -457, -457, -457, -457, -457, -457, + -457, -457, 8868, -457, -87, -84, -127, -93, -33, -31, + -27, -25, -28, -26, -457, -86, -457, -103, -457, -111, + -125, 2, -457, -457, -457, 4, -457, -457, -457, 176, + 194, 178, -457, -457, -337, -457, -457, -457, -457, 95, + -457, -37, -46, -457, 9, -457, 0, -63, -457, -457, + -457, -457, 263, -457, -457, -457, -456, -140, 10, -73, + -211, -457, -102, -198, -321, -457, -144, -457, -457, -155, + -154, -457, -457, 198, -274, -97, -457, 46, -457, -118, + -457, 51, -457, -457, -457, -457, 52, -457, -457, -457, + -457, -457, -457, -457, -457, 213, -457, -457, -457, -457, + -105 +}; + + /* YYDEFGOTO[NTERM-NUM]. */ +static const yytype_int16 yydefgoto[] = +{ + -1, 443, 444, 445, 630, 446, 447, 448, 449, 450, + 451, 452, 502, 454, 472, 473, 474, 475, 476, 477, + 478, 479, 480, 481, 482, 503, 659, 504, 614, 505, + 561, 506, 345, 533, 421, 507, 347, 348, 349, 379, + 380, 381, 350, 351, 352, 353, 354, 355, 401, 402, + 356, 357, 358, 359, 455, 404, 456, 407, 392, 393, + 457, 362, 363, 364, 464, 397, 462, 463, 555, 556, + 531, 625, 510, 511, 512, 513, 514, 589, 685, 718, + 709, 710, 711, 719, 515, 516, 517, 518, 712, 689, + 519, 520, 713, 733, 521, 522, 523, 665, 593, 667, + 693, 707, 708, 524, 365, 366, 367, 376, 525, 662, + 663 +}; + + /* YYTABLE[YYPACT[STATE-NUM]] -- What to do in state STATE-NUM. If + positive, shift that token. If negative, reduce the rule whose + number is the opposite. If YYTABLE_NINF, syntax error. */ +static const yytype_int16 yytable[] = +{ + 361, 551, 344, 415, 346, 405, 405, 484, 559, 360, + 405, 484, 416, 552, 406, 485, 371, 527, 532, 372, + 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, + 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, + 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, + 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, + 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, + 52, 53, 54, 55, 56, 57, 58, 627, 375, 61, + 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, + 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, + 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, + 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, + 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, + 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, + 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, + 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, + 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, + 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, + 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, + 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, + 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, + 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, + 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, + 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, + 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, + 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, + 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, + 252, 253, 254, 255, 256, 257, 258, 259, 260, 261, + 262, 263, 264, 265, 266, 267, 268, 269, 270, 271, + 272, 273, 274, 275, 276, 277, 278, 279, 280, 281, + 282, 283, 284, 285, 286, 287, 288, 289, 290, 291, + 292, 389, 382, 530, 539, 664, 622, 618, 624, 483, + 369, 626, 558, 417, 399, 571, 572, 582, 687, 458, + 569, 570, 484, 540, 541, 377, 389, 490, 373, 623, + 493, 382, 494, 495, 384, 400, 498, 385, 548, 383, + 526, 528, 370, -35, 378, 542, 687, 390, 360, 543, + 460, 573, 574, 583, 374, 361, 360, 344, 396, 346, + 299, 466, 575, 576, 360, 304, 305, 467, 383, 560, + 683, 386, 383, 717, 684, 391, 598, 360, 600, 535, + 725, 360, 536, 418, 468, 666, 419, 461, 586, 420, + 469, 717, 398, 545, 629, 694, 360, 695, 509, 546, + 615, 615, 674, 615, 389, 728, 675, 508, 676, 558, + 615, 615, 403, 616, 530, 460, 530, 460, 567, 530, + 568, 631, 408, 603, 604, 605, 606, 607, 608, 609, + 610, 611, 612, 633, 647, 648, 649, 650, 637, 615, + 671, 638, 732, 613, 615, 637, 413, 669, 679, 414, + 553, 405, 461, 459, 461, 697, 618, 615, 698, 360, + 465, 360, 534, 360, 295, 296, 297, 564, 565, 566, + 643, 644, 651, 652, 668, 645, 646, 558, 670, 544, + 549, 636, 554, 484, 563, 577, 460, 578, 579, 580, + 581, 584, 587, 590, 588, 727, 591, 592, 594, 595, + 599, 672, 673, 601, 596, 509, 602, -36, -34, 628, + 530, 632, 460, -29, 508, 661, 660, 678, 615, 682, + 690, 700, 702, 461, 618, 704, 705, 703, 715, -536, + 716, 722, 360, 721, 653, 487, 726, 654, 681, 734, + 723, 735, 655, 657, 686, 656, 658, 699, 411, 461, + 412, 368, 562, 635, 680, 691, 724, 730, 360, 731, + 692, 619, 410, 530, 409, 706, 620, 621, 395, 701, + 0, 0, 686, 0, 0, 0, 0, 0, 0, 509, + 460, 0, 0, 509, 720, 714, 560, 0, 508, 0, + 0, 0, 508, 0, 0, 0, 0, 0, 0, 0, + 729, 0, 0, 530, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 461, 688, 0, + 0, 0, 0, 0, 0, 0, 360, 0, 0, 0, + 0, 0, 389, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 688, 0, 0, 0, + 0, 0, 0, 0, 509, 509, 0, 509, 0, 0, + 0, 0, 0, 508, 508, 0, 508, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 390, 0, + 0, 0, 0, 509, 0, 0, 0, 360, 0, 0, + 0, 0, 508, 0, 509, 0, 0, 0, 0, 0, + 0, 509, 0, 508, 0, 0, 0, 0, 0, 0, + 508, 0, 509, 0, 0, 0, 509, 0, 0, 0, + 0, 508, 509, 0, 0, 508, 0, 0, 0, 394, + 0, 508, 1, 2, 3, 4, 5, 6, 7, 8, + 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, + 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, + 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, + 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, + 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, + 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, + 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, + 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, + 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, + 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, + 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, + 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, + 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, + 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, + 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, + 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, + 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, + 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, + 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, + 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, + 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, + 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, + 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, + 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, + 249, 250, 251, 252, 253, 254, 255, 256, 257, 258, + 259, 260, 261, 262, 263, 264, 265, 266, 267, 268, + 269, 270, 271, 272, 273, 274, 275, 276, 277, 278, + 279, 280, 281, 282, 283, 284, 285, 286, 287, 288, + 289, 290, 291, 292, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 293, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 294, 295, 296, 297, 298, 0, 0, 0, 0, 0, + 0, 0, 0, 299, 300, 301, 302, 303, 304, 305, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 306, 307, 308, 309, 310, 311, 0, 0, + 0, 0, 0, 0, 0, 0, 312, 0, 313, 314, + 315, 316, 317, 318, 319, 320, 321, 322, 323, 324, + 325, 326, 327, 328, 329, 330, 331, 332, 333, 334, + 335, 336, 337, 338, 339, 340, 341, 342, 343, 1, + 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, + 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, + 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, + 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, + 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, + 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, + 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, + 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, + 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, + 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, + 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, + 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, + 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, + 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, + 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, + 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, + 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, + 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, + 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, + 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, + 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, + 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, + 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, + 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, + 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, + 252, 253, 254, 255, 256, 257, 258, 259, 260, 261, + 262, 263, 264, 265, 266, 267, 268, 269, 270, 271, + 272, 273, 274, 275, 276, 277, 278, 279, 280, 281, + 282, 283, 284, 285, 286, 287, 288, 289, 290, 291, + 292, 0, 0, 422, 423, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 424, 425, 0, 486, 0, 487, 488, 0, + 0, 0, 0, 489, 426, 427, 428, 429, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 294, 295, 296, + 297, 298, 0, 0, 0, 430, 431, 432, 433, 434, + 299, 300, 301, 302, 303, 304, 305, 490, 491, 492, + 493, 0, 494, 495, 496, 497, 498, 499, 500, 306, + 307, 308, 309, 310, 311, 435, 436, 437, 438, 439, + 440, 441, 442, 312, 501, 313, 314, 315, 316, 317, + 318, 319, 320, 321, 322, 323, 324, 325, 326, 327, + 328, 329, 330, 331, 332, 333, 334, 335, 336, 337, + 338, 339, 340, 341, 342, 343, 1, 2, 3, 4, + 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, + 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, + 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, + 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, + 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, + 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, + 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, + 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, + 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, + 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, + 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, + 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, + 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, + 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, + 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, + 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, + 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, + 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, + 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, + 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, + 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, + 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, + 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, + 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, + 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, + 255, 256, 257, 258, 259, 260, 261, 262, 263, 264, + 265, 266, 267, 268, 269, 270, 271, 272, 273, 274, + 275, 276, 277, 278, 279, 280, 281, 282, 283, 284, + 285, 286, 287, 288, 289, 290, 291, 292, 0, 0, + 422, 423, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 424, + 425, 0, 486, 0, 487, 617, 0, 0, 0, 0, + 489, 426, 427, 428, 429, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 294, 295, 296, 297, 298, 0, + 0, 0, 430, 431, 432, 433, 434, 299, 300, 301, + 302, 303, 304, 305, 490, 491, 492, 493, 0, 494, + 495, 496, 497, 498, 499, 500, 306, 307, 308, 309, + 310, 311, 435, 436, 437, 438, 439, 440, 441, 442, + 312, 501, 313, 314, 315, 316, 317, 318, 319, 320, + 321, 322, 323, 324, 325, 326, 327, 328, 329, 330, + 331, 332, 333, 334, 335, 336, 337, 338, 339, 340, + 341, 342, 343, 1, 2, 3, 4, 5, 6, 7, + 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, + 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, + 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, + 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, + 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, + 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, + 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, + 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, + 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, + 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, + 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, + 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, + 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, + 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, + 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, + 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, + 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, + 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, + 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, + 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, + 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, + 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, + 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, + 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, + 248, 249, 250, 251, 252, 253, 254, 255, 256, 257, + 258, 259, 260, 261, 262, 263, 264, 265, 266, 267, + 268, 269, 270, 271, 272, 273, 274, 275, 276, 277, + 278, 279, 280, 281, 282, 283, 284, 285, 286, 287, + 288, 289, 290, 291, 292, 0, 0, 422, 423, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 424, 425, 0, 486, + 0, 487, 0, 0, 0, 0, 0, 489, 426, 427, + 428, 429, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 294, 295, 296, 297, 298, 0, 0, 0, 430, + 431, 432, 433, 434, 299, 300, 301, 302, 303, 304, + 305, 490, 491, 492, 493, 0, 494, 495, 496, 497, + 498, 499, 500, 306, 307, 308, 309, 310, 311, 435, + 436, 437, 438, 439, 440, 441, 442, 312, 501, 313, + 314, 315, 316, 317, 318, 319, 320, 321, 322, 323, + 324, 325, 326, 327, 328, 329, 330, 331, 332, 333, + 334, 335, 336, 337, 338, 339, 340, 341, 342, 343, + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, + 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, + 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, + 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, + 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, + 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, + 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, + 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, + 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, + 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, + 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, + 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, + 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, + 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, + 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, + 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, + 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, + 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, + 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, + 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, + 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, + 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, + 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, + 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, + 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, + 251, 252, 253, 254, 255, 256, 257, 258, 259, 260, + 261, 262, 263, 264, 265, 266, 267, 268, 269, 270, + 271, 272, 273, 274, 275, 276, 277, 278, 279, 280, + 281, 282, 283, 284, 285, 286, 287, 288, 289, 290, + 291, 292, 0, 0, 422, 423, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 424, 425, 0, 486, 0, 408, 0, + 0, 0, 0, 0, 489, 426, 427, 428, 429, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 294, 295, + 296, 297, 298, 0, 0, 0, 430, 431, 432, 433, + 434, 299, 300, 301, 302, 303, 304, 305, 490, 491, + 492, 493, 0, 494, 495, 496, 497, 498, 499, 500, + 306, 307, 308, 309, 310, 311, 435, 436, 437, 438, + 439, 440, 441, 442, 312, 501, 313, 314, 315, 316, + 317, 318, 319, 320, 321, 322, 323, 324, 325, 326, + 327, 328, 329, 330, 331, 332, 333, 334, 335, 336, + 337, 338, 339, 340, 341, 342, 343, 1, 2, 3, + 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, + 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, + 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, + 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, + 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, + 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, + 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, + 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, + 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, + 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, + 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, + 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, + 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, + 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, + 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, + 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, + 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, + 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, + 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, + 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, + 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, + 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, + 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, + 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, + 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, + 254, 255, 256, 257, 258, 259, 260, 261, 262, 263, + 264, 265, 266, 267, 268, 269, 270, 271, 272, 273, + 274, 275, 276, 277, 278, 279, 280, 281, 282, 283, + 284, 285, 286, 287, 288, 289, 290, 291, 292, 0, + 0, 422, 423, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 424, 425, 0, 486, 0, 0, 0, 0, 0, 0, + 0, 489, 426, 427, 428, 429, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 294, 295, 296, 297, 298, + 0, 0, 0, 430, 431, 432, 433, 434, 299, 300, + 301, 302, 303, 304, 305, 490, 491, 492, 493, 0, + 494, 495, 496, 497, 498, 499, 500, 306, 307, 308, + 309, 310, 311, 435, 436, 437, 438, 439, 440, 441, + 442, 312, 501, 313, 314, 315, 316, 317, 318, 319, + 320, 321, 322, 323, 324, 325, 326, 327, 328, 329, + 330, 331, 332, 333, 334, 335, 336, 337, 338, 339, + 340, 341, 342, 343, 1, 2, 3, 4, 5, 6, + 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, + 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, + 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, + 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, + 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, + 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, + 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, + 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, + 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, + 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, + 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, + 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, + 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, + 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, + 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, + 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, + 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, + 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, + 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, + 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, + 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, + 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, + 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, + 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, + 247, 248, 249, 250, 251, 252, 253, 254, 255, 256, + 257, 258, 259, 260, 261, 262, 263, 264, 265, 266, + 267, 268, 269, 270, 271, 272, 273, 274, 275, 276, + 277, 278, 279, 280, 281, 282, 283, 284, 285, 286, + 287, 288, 289, 290, 291, 292, 0, 0, 422, 423, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 424, 425, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 489, 426, + 427, 428, 429, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 294, 295, 296, 297, 298, 0, 0, 0, + 430, 431, 432, 433, 434, 299, 300, 301, 302, 303, + 304, 305, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 306, 307, 308, 309, 310, 311, + 435, 436, 437, 438, 439, 440, 441, 442, 312, 0, + 313, 314, 315, 316, 317, 318, 319, 320, 321, 322, + 323, 324, 325, 326, 327, 328, 329, 330, 331, 332, + 333, 334, 335, 336, 337, 338, 339, 340, 341, 342, + 343, 1, 2, 3, 4, 5, 6, 7, 8, 9, + 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, + 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, + 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, + 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, + 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, + 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, + 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, + 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, + 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, + 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, + 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, + 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, + 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, + 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, + 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, + 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, + 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, + 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, + 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, + 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, + 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, + 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, + 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, + 250, 251, 252, 253, 254, 255, 256, 257, 258, 259, + 260, 261, 262, 263, 264, 265, 266, 267, 268, 269, + 270, 271, 272, 273, 274, 275, 276, 277, 278, 279, + 280, 281, 282, 283, 284, 285, 286, 287, 288, 289, + 290, 291, 292, 0, 0, 422, 423, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 424, 425, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 426, 427, 428, 429, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 294, + 295, 296, 297, 0, 0, 0, 0, 430, 431, 432, + 433, 434, 299, 300, 301, 302, 303, 304, 305, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 306, 307, 308, 309, 310, 311, 435, 436, 437, + 438, 439, 440, 441, 442, 312, 0, 313, 314, 315, + 316, 317, 318, 319, 320, 321, 322, 323, 324, 325, + 326, 327, 328, 329, 330, 331, 332, 333, 334, 335, + 336, 337, 338, 339, 340, 341, 342, 343, 1, 2, + 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, + 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, + 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, + 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, + 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, + 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, + 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, + 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, + 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, + 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, + 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, + 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, + 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, + 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, + 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, + 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, + 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, + 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, + 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, + 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, + 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, + 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, + 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, + 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, + 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, + 253, 254, 255, 256, 257, 258, 259, 260, 261, 262, + 263, 264, 265, 266, 267, 268, 269, 270, 271, 272, + 273, 274, 275, 276, 277, 278, 279, 280, 281, 282, + 283, 284, 285, 286, 287, 288, 289, 290, 291, 292, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 293, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 294, 295, 296, 297, + 298, 0, 0, 0, 0, 0, 0, 0, 0, 299, + 300, 301, 302, 303, 304, 305, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 306, 307, + 308, 309, 310, 311, 0, 0, 0, 0, 0, 0, + 0, 0, 312, 0, 313, 314, 315, 316, 317, 318, + 319, 320, 321, 322, 323, 324, 325, 326, 327, 328, + 329, 330, 331, 332, 333, 334, 335, 336, 337, 338, + 339, 340, 341, 342, 343, 1, 2, 3, 4, 5, + 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, + 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, + 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, + 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, + 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, + 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, + 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, + 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, + 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, + 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, + 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, + 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, + 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, + 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, + 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, + 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, + 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, + 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, + 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, + 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, + 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, + 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, + 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, + 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, + 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, + 256, 257, 258, 259, 260, 261, 262, 263, 264, 265, + 266, 267, 268, 269, 270, 271, 272, 273, 274, 275, + 276, 277, 278, 279, 280, 281, 282, 283, 284, 285, + 286, 287, 288, 289, 290, 291, 292, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 387, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 294, 295, 296, 297, 0, 0, 0, + 0, 0, 0, 0, 0, 388, 299, 300, 301, 302, + 303, 304, 305, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 306, 307, 308, 309, 310, + 311, 0, 0, 0, 0, 0, 0, 0, 0, 312, + 0, 313, 314, 315, 316, 317, 318, 319, 320, 321, + 322, 323, 324, 325, 326, 327, 328, 329, 330, 331, + 332, 333, 334, 335, 336, 337, 338, 339, 340, 341, + 342, 343, 1, 2, 3, 4, 5, 6, 7, 8, + 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, + 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, + 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, + 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, + 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, + 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, + 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, + 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, + 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, + 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, + 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, + 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, + 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, + 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, + 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, + 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, + 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, + 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, + 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, + 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, + 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, + 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, + 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, + 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, + 249, 250, 251, 252, 253, 254, 255, 256, 257, 258, + 259, 260, 261, 262, 263, 264, 265, 266, 267, 268, + 269, 270, 271, 272, 273, 274, 275, 276, 277, 278, + 279, 280, 281, 282, 283, 284, 285, 286, 287, 288, + 289, 290, 291, 292, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 557, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 294, 295, 296, 297, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 299, 300, 301, 302, 303, 304, 305, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 306, 307, 308, 309, 310, 311, 0, 0, + 0, 0, 0, 0, 0, 0, 312, 0, 313, 314, + 315, 316, 317, 318, 319, 320, 321, 322, 323, 324, + 325, 326, 327, 328, 329, 330, 331, 332, 333, 334, + 335, 336, 337, 338, 339, 340, 341, 342, 343, 1, + 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, + 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, + 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, + 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, + 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, + 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, + 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, + 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, + 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, + 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, + 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, + 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, + 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, + 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, + 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, + 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, + 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, + 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, + 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, + 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, + 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, + 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, + 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, + 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, + 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, + 252, 253, 254, 255, 256, 257, 258, 259, 260, 261, + 262, 263, 264, 265, 266, 267, 268, 269, 270, 271, + 272, 273, 274, 275, 276, 277, 278, 279, 280, 281, + 282, 283, 284, 285, 286, 287, 288, 289, 290, 291, + 292, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 639, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 294, 295, 296, + 297, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 299, 300, 301, 302, 303, 304, 305, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 306, + 307, 308, 309, 310, 311, 0, 0, 0, 0, 0, + 0, 0, 0, 312, 0, 313, 314, 315, 316, 317, + 318, 319, 320, 321, 322, 323, 324, 325, 326, 327, + 328, 329, 330, 331, 332, 333, 334, 335, 336, 337, + 338, 339, 340, 341, 342, 343, 1, 2, 3, 4, + 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, + 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, + 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, + 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, + 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, + 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, + 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, + 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, + 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, + 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, + 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, + 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, + 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, + 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, + 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, + 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, + 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, + 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, + 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, + 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, + 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, + 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, + 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, + 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, + 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, + 255, 256, 257, 258, 259, 260, 261, 262, 263, 264, + 265, 266, 267, 268, 269, 270, 271, 272, 273, 274, + 275, 276, 277, 278, 279, 280, 281, 282, 283, 284, + 285, 286, 287, 288, 289, 290, 291, 292, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 677, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 294, 295, 296, 297, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 299, 300, 301, + 302, 303, 304, 305, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 306, 307, 308, 309, + 310, 311, 0, 0, 0, 0, 0, 0, 0, 0, + 312, 0, 313, 314, 315, 316, 317, 318, 319, 320, + 321, 322, 323, 324, 325, 326, 327, 328, 329, 330, + 331, 332, 333, 334, 335, 336, 337, 338, 339, 340, + 341, 342, 343, 1, 2, 3, 4, 5, 6, 7, + 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, + 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, + 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, + 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, + 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, + 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, + 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, + 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, + 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, + 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, + 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, + 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, + 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, + 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, + 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, + 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, + 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, + 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, + 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, + 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, + 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, + 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, + 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, + 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, + 248, 249, 250, 251, 252, 253, 254, 255, 256, 257, + 258, 259, 260, 261, 262, 263, 264, 265, 266, 267, + 268, 269, 270, 271, 272, 273, 274, 275, 276, 277, + 278, 279, 280, 281, 282, 283, 284, 285, 286, 287, + 288, 289, 290, 291, 292, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 294, 295, 296, 297, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 299, 300, 301, 302, 303, 304, + 305, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 306, 307, 308, 309, 310, 311, 0, + 0, 0, 0, 0, 0, 0, 0, 312, 0, 313, + 314, 315, 316, 317, 318, 319, 320, 321, 322, 323, + 324, 325, 326, 327, 328, 329, 330, 331, 332, 333, + 334, 335, 336, 337, 338, 339, 340, 341, 342, 343, + 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, + 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, + 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, + 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, + 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, + 52, 53, 54, 55, 56, 57, 58, 0, 0, 61, + 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, + 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, + 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, + 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, + 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, + 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, + 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, + 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, + 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, + 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, + 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, + 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, + 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, + 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, + 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, + 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, + 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, + 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, + 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, + 252, 253, 254, 255, 256, 257, 258, 259, 260, 261, + 262, 263, 264, 265, 266, 267, 268, 269, 270, 271, + 272, 273, 274, 275, 276, 277, 278, 279, 280, 281, + 282, 283, 284, 285, 286, 287, 288, 289, 290, 291, + 292, 0, 0, 422, 423, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 424, 425, 0, 0, 0, 529, 696, 0, + 0, 0, 0, 0, 426, 427, 428, 429, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 430, 431, 432, 433, 434, + 299, 0, 0, 0, 0, 304, 305, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 435, 436, 437, 438, 439, + 440, 441, 442, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 325, 2, 3, + 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, + 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, + 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, + 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, + 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, + 54, 55, 56, 57, 58, 0, 0, 61, 62, 63, + 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, + 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, + 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, + 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, + 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, + 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, + 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, + 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, + 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, + 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, + 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, + 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, + 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, + 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, + 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, + 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, + 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, + 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, + 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, + 254, 255, 256, 257, 258, 259, 260, 261, 262, 263, + 264, 265, 266, 267, 268, 269, 270, 271, 272, 273, + 274, 275, 276, 277, 278, 279, 280, 281, 282, 283, + 284, 285, 286, 287, 288, 289, 290, 291, 292, 0, + 0, 422, 423, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 424, 425, 0, 0, 470, 0, 0, 0, 0, 0, + 0, 0, 426, 427, 428, 429, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 430, 431, 432, 433, 434, 299, 0, + 0, 0, 0, 304, 305, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 435, 436, 437, 438, 439, 440, 441, + 442, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 325, 2, 3, 4, 5, + 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, + 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, + 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, + 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, + 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, + 56, 57, 58, 0, 0, 61, 62, 63, 64, 65, + 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, + 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, + 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, + 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, + 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, + 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, + 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, + 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, + 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, + 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, + 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, + 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, + 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, + 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, + 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, + 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, + 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, + 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, + 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, + 256, 257, 258, 259, 260, 261, 262, 263, 264, 265, + 266, 267, 268, 269, 270, 271, 272, 273, 274, 275, + 276, 277, 278, 279, 280, 281, 282, 283, 284, 285, + 286, 287, 288, 289, 290, 291, 292, 0, 0, 422, + 423, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 424, 425, + 0, 0, 0, 529, 0, 0, 0, 0, 0, 0, + 426, 427, 428, 429, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 430, 431, 432, 433, 434, 299, 0, 0, 0, + 0, 304, 305, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 435, 436, 437, 438, 439, 440, 441, 442, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 325, 2, 3, 4, 5, 6, 7, + 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, + 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, + 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, + 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, + 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, + 58, 0, 0, 61, 62, 63, 64, 65, 66, 67, + 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, + 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, + 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, + 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, + 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, + 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, + 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, + 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, + 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, + 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, + 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, + 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, + 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, + 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, + 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, + 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, + 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, + 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, + 248, 249, 250, 251, 252, 253, 254, 255, 256, 257, + 258, 259, 260, 261, 262, 263, 264, 265, 266, 267, + 268, 269, 270, 271, 272, 273, 274, 275, 276, 277, + 278, 279, 280, 281, 282, 283, 284, 285, 286, 287, + 288, 289, 290, 291, 292, 0, 0, 422, 423, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 424, 425, 0, 0, + 585, 0, 0, 0, 0, 0, 0, 0, 426, 427, + 428, 429, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 430, + 431, 432, 433, 434, 299, 0, 0, 0, 0, 304, + 305, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 435, + 436, 437, 438, 439, 440, 441, 442, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 325, 2, 3, 4, 5, 6, 7, 8, 9, + 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, + 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, + 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, + 50, 51, 52, 53, 54, 55, 56, 57, 58, 0, + 0, 61, 62, 63, 64, 65, 66, 67, 68, 69, + 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, + 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, + 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, + 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, + 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, + 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, + 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, + 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, + 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, + 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, + 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, + 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, + 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, + 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, + 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, + 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, + 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, + 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, + 250, 251, 252, 253, 254, 255, 256, 257, 258, 259, + 260, 261, 262, 263, 264, 265, 266, 267, 268, 269, + 270, 271, 272, 273, 274, 275, 276, 277, 278, 279, + 280, 281, 282, 283, 284, 285, 286, 287, 288, 289, + 290, 291, 292, 0, 0, 422, 423, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 424, 425, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 597, 426, 427, 428, 429, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 430, 431, 432, + 433, 434, 299, 0, 0, 0, 0, 304, 305, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 435, 436, 437, + 438, 439, 440, 441, 442, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 325, + 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, + 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, + 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, + 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, + 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, + 52, 53, 54, 55, 56, 57, 58, 0, 0, 61, + 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, + 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, + 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, + 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, + 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, + 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, + 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, + 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, + 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, + 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, + 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, + 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, + 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, + 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, + 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, + 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, + 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, + 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, + 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, + 252, 253, 254, 255, 256, 257, 258, 259, 260, 261, + 262, 263, 264, 265, 266, 267, 268, 269, 270, 271, + 272, 273, 274, 275, 276, 277, 278, 279, 280, 281, + 282, 283, 284, 285, 286, 287, 288, 289, 290, 291, + 292, 0, 0, 422, 423, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 424, 425, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 426, 427, 428, 429, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 430, 431, 432, 433, 434, + 299, 0, 0, 0, 0, 304, 305, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 435, 436, 437, 438, 439, + 440, 441, 442, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 325, 2, 3, + 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, + 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, + 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, + 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, + 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, + 54, 55, 56, 57, 58, 0, 0, 61, 62, 63, + 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, + 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, + 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, + 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, + 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, + 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, + 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, + 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, + 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, + 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, + 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, + 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, + 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, + 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, + 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, + 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, + 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, + 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, + 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, + 254, 255, 256, 257, 258, 259, 260, 261, 262, 263, + 264, 265, 266, 267, 268, 269, 270, 271, 272, 273, + 274, 275, 276, 277, 278, 279, 280, 281, 282, 283, + 284, 285, 286, 287, 288, 289, 290, 291, 292, 453, + 0, 422, 423, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 471, 0, 0, 0, 0, 0, 0, + 424, 425, 0, 0, 0, 0, 0, 0, 0, 0, + 537, 538, 426, 427, 428, 429, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 430, 431, 432, 433, 434, 299, 0, + 0, 0, 550, 304, 547, 0, 0, 0, 0, 0, + 0, 0, 0, 471, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 435, 436, 437, 438, 439, 440, 441, + 442, 0, 471, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 325, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 634, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 640, 641, 642, 471, 471, 471, 471, 471, + 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, + 471, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 471 +}; + +static const yytype_int16 yycheck[] = +{ + 0, 324, 0, 317, 0, 319, 319, 319, 464, 0, + 319, 319, 326, 336, 327, 327, 317, 326, 326, 317, + 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, + 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, + 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, + 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, + 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, + 54, 55, 56, 57, 58, 59, 60, 533, 327, 63, + 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, + 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, + 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, + 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, + 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, + 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, + 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, + 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, + 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, + 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, + 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, + 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, + 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, + 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, + 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, + 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, + 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, + 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, + 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, + 254, 255, 256, 257, 258, 259, 260, 261, 262, 263, + 264, 265, 266, 267, 268, 269, 270, 271, 272, 273, + 274, 275, 276, 277, 278, 279, 280, 281, 282, 283, + 284, 285, 286, 287, 288, 289, 290, 291, 292, 293, + 294, 357, 349, 416, 425, 589, 527, 515, 529, 405, + 321, 532, 462, 386, 353, 299, 300, 304, 665, 392, + 295, 296, 319, 297, 298, 318, 382, 361, 327, 326, + 364, 378, 366, 367, 324, 374, 370, 327, 451, 349, + 413, 414, 353, 317, 324, 319, 693, 357, 349, 323, + 397, 335, 336, 340, 353, 365, 357, 365, 368, 365, + 354, 318, 301, 302, 365, 359, 360, 324, 378, 465, + 320, 353, 382, 704, 324, 335, 497, 378, 499, 324, + 711, 382, 327, 321, 318, 593, 324, 397, 484, 327, + 324, 722, 321, 318, 318, 318, 397, 318, 408, 324, + 324, 324, 623, 324, 460, 318, 322, 408, 324, 559, + 324, 324, 353, 327, 527, 462, 529, 464, 329, 532, + 331, 542, 321, 306, 307, 308, 309, 310, 311, 312, + 313, 314, 315, 546, 571, 572, 573, 574, 324, 324, + 325, 327, 726, 326, 324, 324, 353, 327, 327, 353, + 460, 319, 462, 327, 464, 676, 664, 324, 325, 460, + 326, 462, 353, 464, 342, 343, 344, 332, 333, 334, + 567, 568, 575, 576, 595, 569, 570, 627, 599, 318, + 317, 554, 353, 319, 353, 339, 533, 338, 337, 303, + 305, 320, 319, 317, 322, 716, 327, 327, 317, 317, + 317, 614, 615, 325, 327, 515, 327, 317, 317, 353, + 623, 353, 559, 318, 515, 353, 320, 320, 324, 317, + 361, 320, 322, 533, 732, 318, 317, 353, 318, 321, + 326, 318, 533, 327, 577, 321, 321, 578, 659, 327, + 365, 322, 579, 581, 665, 580, 582, 682, 382, 559, + 382, 298, 467, 553, 637, 667, 710, 722, 559, 723, + 667, 525, 378, 676, 376, 693, 525, 525, 365, 684, + -1, -1, 693, -1, -1, -1, -1, -1, -1, 589, + 627, -1, -1, 593, 705, 698, 682, -1, 589, -1, + -1, -1, 593, -1, -1, -1, -1, -1, -1, -1, + 721, -1, -1, 716, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, 627, 665, -1, + -1, -1, -1, -1, -1, -1, 627, -1, -1, -1, + -1, -1, 688, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, 693, -1, -1, -1, + -1, -1, -1, -1, 664, 665, -1, 667, -1, -1, + -1, -1, -1, 664, 665, -1, 667, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, 688, -1, + -1, -1, -1, 693, -1, -1, -1, 688, -1, -1, + -1, -1, 693, -1, 704, -1, -1, -1, -1, -1, + -1, 711, -1, 704, -1, -1, -1, -1, -1, -1, + 711, -1, 722, -1, -1, -1, 726, -1, -1, -1, + -1, 722, 732, -1, -1, 726, -1, -1, -1, 0, + -1, 732, 3, 4, 5, 6, 7, 8, 9, 10, + 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, + 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, + 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, + 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, + 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, + 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, + 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, + 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, + 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, + 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, + 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, + 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, + 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, + 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, + 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, + 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, + 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, + 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, + 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, + 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, + 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, + 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, + 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, + 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, + 251, 252, 253, 254, 255, 256, 257, 258, 259, 260, + 261, 262, 263, 264, 265, 266, 267, 268, 269, 270, + 271, 272, 273, 274, 275, 276, 277, 278, 279, 280, + 281, 282, 283, 284, 285, 286, 287, 288, 289, 290, + 291, 292, 293, 294, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, 327, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + 341, 342, 343, 344, 345, -1, -1, -1, -1, -1, + -1, -1, -1, 354, 355, 356, 357, 358, 359, 360, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, 373, 374, 375, 376, 377, 378, -1, -1, + -1, -1, -1, -1, -1, -1, 387, -1, 389, 390, + 391, 392, 393, 394, 395, 396, 397, 398, 399, 400, + 401, 402, 403, 404, 405, 406, 407, 408, 409, 410, + 411, 412, 413, 414, 415, 416, 417, 418, 419, 3, + 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, + 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, + 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, + 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, + 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, + 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, + 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, + 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, + 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, + 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, + 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, + 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, + 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, + 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, + 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, + 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, + 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, + 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, + 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, + 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, + 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, + 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, + 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, + 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, + 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, + 254, 255, 256, 257, 258, 259, 260, 261, 262, 263, + 264, 265, 266, 267, 268, 269, 270, 271, 272, 273, + 274, 275, 276, 277, 278, 279, 280, 281, 282, 283, + 284, 285, 286, 287, 288, 289, 290, 291, 292, 293, + 294, -1, -1, 297, 298, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, 316, 317, -1, 319, -1, 321, 322, -1, + -1, -1, -1, 327, 328, 329, 330, 331, -1, -1, + -1, -1, -1, -1, -1, -1, -1, 341, 342, 343, + 344, 345, -1, -1, -1, 349, 350, 351, 352, 353, + 354, 355, 356, 357, 358, 359, 360, 361, 362, 363, + 364, -1, 366, 367, 368, 369, 370, 371, 372, 373, + 374, 375, 376, 377, 378, 379, 380, 381, 382, 383, + 384, 385, 386, 387, 388, 389, 390, 391, 392, 393, + 394, 395, 396, 397, 398, 399, 400, 401, 402, 403, + 404, 405, 406, 407, 408, 409, 410, 411, 412, 413, + 414, 415, 416, 417, 418, 419, 3, 4, 5, 6, + 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, + 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, + 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, + 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, + 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, + 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, + 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, + 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, + 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, + 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, + 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, + 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, + 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, + 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, + 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, + 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, + 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, + 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, + 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, + 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, + 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, + 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, + 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, + 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, + 247, 248, 249, 250, 251, 252, 253, 254, 255, 256, + 257, 258, 259, 260, 261, 262, 263, 264, 265, 266, + 267, 268, 269, 270, 271, 272, 273, 274, 275, 276, + 277, 278, 279, 280, 281, 282, 283, 284, 285, 286, + 287, 288, 289, 290, 291, 292, 293, 294, -1, -1, + 297, 298, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, 316, + 317, -1, 319, -1, 321, 322, -1, -1, -1, -1, + 327, 328, 329, 330, 331, -1, -1, -1, -1, -1, + -1, -1, -1, -1, 341, 342, 343, 344, 345, -1, + -1, -1, 349, 350, 351, 352, 353, 354, 355, 356, + 357, 358, 359, 360, 361, 362, 363, 364, -1, 366, + 367, 368, 369, 370, 371, 372, 373, 374, 375, 376, + 377, 378, 379, 380, 381, 382, 383, 384, 385, 386, + 387, 388, 389, 390, 391, 392, 393, 394, 395, 396, + 397, 398, 399, 400, 401, 402, 403, 404, 405, 406, + 407, 408, 409, 410, 411, 412, 413, 414, 415, 416, + 417, 418, 419, 3, 4, 5, 6, 7, 8, 9, + 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, + 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, + 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, + 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, + 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, + 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, + 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, + 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, + 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, + 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, + 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, + 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, + 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, + 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, + 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, + 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, + 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, + 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, + 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, + 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, + 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, + 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, + 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, + 250, 251, 252, 253, 254, 255, 256, 257, 258, 259, + 260, 261, 262, 263, 264, 265, 266, 267, 268, 269, + 270, 271, 272, 273, 274, 275, 276, 277, 278, 279, + 280, 281, 282, 283, 284, 285, 286, 287, 288, 289, + 290, 291, 292, 293, 294, -1, -1, 297, 298, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, 316, 317, -1, 319, + -1, 321, -1, -1, -1, -1, -1, 327, 328, 329, + 330, 331, -1, -1, -1, -1, -1, -1, -1, -1, + -1, 341, 342, 343, 344, 345, -1, -1, -1, 349, + 350, 351, 352, 353, 354, 355, 356, 357, 358, 359, + 360, 361, 362, 363, 364, -1, 366, 367, 368, 369, + 370, 371, 372, 373, 374, 375, 376, 377, 378, 379, + 380, 381, 382, 383, 384, 385, 386, 387, 388, 389, + 390, 391, 392, 393, 394, 395, 396, 397, 398, 399, + 400, 401, 402, 403, 404, 405, 406, 407, 408, 409, + 410, 411, 412, 413, 414, 415, 416, 417, 418, 419, + 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, + 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, + 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, + 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, + 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, + 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, + 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, + 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, + 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, + 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, + 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, + 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, + 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, + 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, + 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, + 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, + 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, + 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, + 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, + 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, + 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, + 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, + 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, + 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, + 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, + 253, 254, 255, 256, 257, 258, 259, 260, 261, 262, + 263, 264, 265, 266, 267, 268, 269, 270, 271, 272, + 273, 274, 275, 276, 277, 278, 279, 280, 281, 282, + 283, 284, 285, 286, 287, 288, 289, 290, 291, 292, + 293, 294, -1, -1, 297, 298, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, 316, 317, -1, 319, -1, 321, -1, + -1, -1, -1, -1, 327, 328, 329, 330, 331, -1, + -1, -1, -1, -1, -1, -1, -1, -1, 341, 342, + 343, 344, 345, -1, -1, -1, 349, 350, 351, 352, + 353, 354, 355, 356, 357, 358, 359, 360, 361, 362, + 363, 364, -1, 366, 367, 368, 369, 370, 371, 372, + 373, 374, 375, 376, 377, 378, 379, 380, 381, 382, + 383, 384, 385, 386, 387, 388, 389, 390, 391, 392, + 393, 394, 395, 396, 397, 398, 399, 400, 401, 402, + 403, 404, 405, 406, 407, 408, 409, 410, 411, 412, + 413, 414, 415, 416, 417, 418, 419, 3, 4, 5, + 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, + 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, + 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, + 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, + 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, + 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, + 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, + 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, + 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, + 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, + 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, + 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, + 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, + 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, + 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, + 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, + 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, + 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, + 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, + 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, + 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, + 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, + 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, + 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, + 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, + 256, 257, 258, 259, 260, 261, 262, 263, 264, 265, + 266, 267, 268, 269, 270, 271, 272, 273, 274, 275, + 276, 277, 278, 279, 280, 281, 282, 283, 284, 285, + 286, 287, 288, 289, 290, 291, 292, 293, 294, -1, + -1, 297, 298, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + 316, 317, -1, 319, -1, -1, -1, -1, -1, -1, + -1, 327, 328, 329, 330, 331, -1, -1, -1, -1, + -1, -1, -1, -1, -1, 341, 342, 343, 344, 345, + -1, -1, -1, 349, 350, 351, 352, 353, 354, 355, + 356, 357, 358, 359, 360, 361, 362, 363, 364, -1, + 366, 367, 368, 369, 370, 371, 372, 373, 374, 375, + 376, 377, 378, 379, 380, 381, 382, 383, 384, 385, + 386, 387, 388, 389, 390, 391, 392, 393, 394, 395, + 396, 397, 398, 399, 400, 401, 402, 403, 404, 405, + 406, 407, 408, 409, 410, 411, 412, 413, 414, 415, + 416, 417, 418, 419, 3, 4, 5, 6, 7, 8, + 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, + 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, + 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, + 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, + 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, + 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, + 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, + 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, + 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, + 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, + 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, + 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, + 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, + 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, + 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, + 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, + 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, + 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, + 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, + 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, + 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, + 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, + 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, + 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, + 249, 250, 251, 252, 253, 254, 255, 256, 257, 258, + 259, 260, 261, 262, 263, 264, 265, 266, 267, 268, + 269, 270, 271, 272, 273, 274, 275, 276, 277, 278, + 279, 280, 281, 282, 283, 284, 285, 286, 287, 288, + 289, 290, 291, 292, 293, 294, -1, -1, 297, 298, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, 316, 317, -1, + -1, -1, -1, -1, -1, -1, -1, -1, 327, 328, + 329, 330, 331, -1, -1, -1, -1, -1, -1, -1, + -1, -1, 341, 342, 343, 344, 345, -1, -1, -1, + 349, 350, 351, 352, 353, 354, 355, 356, 357, 358, + 359, 360, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, 373, 374, 375, 376, 377, 378, + 379, 380, 381, 382, 383, 384, 385, 386, 387, -1, + 389, 390, 391, 392, 393, 394, 395, 396, 397, 398, + 399, 400, 401, 402, 403, 404, 405, 406, 407, 408, + 409, 410, 411, 412, 413, 414, 415, 416, 417, 418, + 419, 3, 4, 5, 6, 7, 8, 9, 10, 11, + 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, + 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, + 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, + 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, + 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, + 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, + 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, + 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, + 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, + 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, + 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, + 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, + 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, + 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, + 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, + 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, + 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, + 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, + 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, + 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, + 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, + 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, + 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, + 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, + 252, 253, 254, 255, 256, 257, 258, 259, 260, 261, + 262, 263, 264, 265, 266, 267, 268, 269, 270, 271, + 272, 273, 274, 275, 276, 277, 278, 279, 280, 281, + 282, 283, 284, 285, 286, 287, 288, 289, 290, 291, + 292, 293, 294, -1, -1, 297, 298, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, 316, 317, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, 328, 329, 330, 331, + -1, -1, -1, -1, -1, -1, -1, -1, -1, 341, + 342, 343, 344, -1, -1, -1, -1, 349, 350, 351, + 352, 353, 354, 355, 356, 357, 358, 359, 360, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, 373, 374, 375, 376, 377, 378, 379, 380, 381, + 382, 383, 384, 385, 386, 387, -1, 389, 390, 391, + 392, 393, 394, 395, 396, 397, 398, 399, 400, 401, + 402, 403, 404, 405, 406, 407, 408, 409, 410, 411, + 412, 413, 414, 415, 416, 417, 418, 419, 3, 4, + 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, + 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, + 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, + 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, + 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, + 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, + 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, + 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, + 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, + 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, + 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, + 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, + 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, + 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, + 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, + 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, + 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, + 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, + 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, + 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, + 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, + 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, + 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, + 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, + 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, + 255, 256, 257, 258, 259, 260, 261, 262, 263, 264, + 265, 266, 267, 268, 269, 270, 271, 272, 273, 274, + 275, 276, 277, 278, 279, 280, 281, 282, 283, 284, + 285, 286, 287, 288, 289, 290, 291, 292, 293, 294, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, 327, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, 341, 342, 343, 344, + 345, -1, -1, -1, -1, -1, -1, -1, -1, 354, + 355, 356, 357, 358, 359, 360, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, 373, 374, + 375, 376, 377, 378, -1, -1, -1, -1, -1, -1, + -1, -1, 387, -1, 389, 390, 391, 392, 393, 394, + 395, 396, 397, 398, 399, 400, 401, 402, 403, 404, + 405, 406, 407, 408, 409, 410, 411, 412, 413, 414, + 415, 416, 417, 418, 419, 3, 4, 5, 6, 7, + 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, + 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, + 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, + 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, + 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, + 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, + 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, + 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, + 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, + 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, + 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, + 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, + 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, + 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, + 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, + 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, + 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, + 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, + 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, + 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, + 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, + 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, + 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, + 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, + 248, 249, 250, 251, 252, 253, 254, 255, 256, 257, + 258, 259, 260, 261, 262, 263, 264, 265, 266, 267, + 268, 269, 270, 271, 272, 273, 274, 275, 276, 277, + 278, 279, 280, 281, 282, 283, 284, 285, 286, 287, + 288, 289, 290, 291, 292, 293, 294, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, 327, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, 341, 342, 343, 344, -1, -1, -1, + -1, -1, -1, -1, -1, 353, 354, 355, 356, 357, + 358, 359, 360, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, 373, 374, 375, 376, 377, + 378, -1, -1, -1, -1, -1, -1, -1, -1, 387, + -1, 389, 390, 391, 392, 393, 394, 395, 396, 397, + 398, 399, 400, 401, 402, 403, 404, 405, 406, 407, + 408, 409, 410, 411, 412, 413, 414, 415, 416, 417, + 418, 419, 3, 4, 5, 6, 7, 8, 9, 10, + 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, + 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, + 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, + 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, + 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, + 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, + 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, + 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, + 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, + 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, + 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, + 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, + 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, + 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, + 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, + 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, + 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, + 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, + 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, + 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, + 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, + 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, + 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, + 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, + 251, 252, 253, 254, 255, 256, 257, 258, 259, 260, + 261, 262, 263, 264, 265, 266, 267, 268, 269, 270, + 271, 272, 273, 274, 275, 276, 277, 278, 279, 280, + 281, 282, 283, 284, 285, 286, 287, 288, 289, 290, + 291, 292, 293, 294, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, 322, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + 341, 342, 343, 344, -1, -1, -1, -1, -1, -1, + -1, -1, -1, 354, 355, 356, 357, 358, 359, 360, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, 373, 374, 375, 376, 377, 378, -1, -1, + -1, -1, -1, -1, -1, -1, 387, -1, 389, 390, + 391, 392, 393, 394, 395, 396, 397, 398, 399, 400, + 401, 402, 403, 404, 405, 406, 407, 408, 409, 410, + 411, 412, 413, 414, 415, 416, 417, 418, 419, 3, + 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, + 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, + 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, + 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, + 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, + 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, + 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, + 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, + 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, + 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, + 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, + 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, + 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, + 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, + 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, + 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, + 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, + 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, + 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, + 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, + 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, + 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, + 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, + 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, + 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, + 254, 255, 256, 257, 258, 259, 260, 261, 262, 263, + 264, 265, 266, 267, 268, 269, 270, 271, 272, 273, + 274, 275, 276, 277, 278, 279, 280, 281, 282, 283, + 284, 285, 286, 287, 288, 289, 290, 291, 292, 293, + 294, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, 322, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, 341, 342, 343, + 344, -1, -1, -1, -1, -1, -1, -1, -1, -1, + 354, 355, 356, 357, 358, 359, 360, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, 373, + 374, 375, 376, 377, 378, -1, -1, -1, -1, -1, + -1, -1, -1, 387, -1, 389, 390, 391, 392, 393, + 394, 395, 396, 397, 398, 399, 400, 401, 402, 403, + 404, 405, 406, 407, 408, 409, 410, 411, 412, 413, + 414, 415, 416, 417, 418, 419, 3, 4, 5, 6, + 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, + 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, + 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, + 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, + 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, + 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, + 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, + 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, + 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, + 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, + 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, + 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, + 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, + 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, + 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, + 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, + 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, + 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, + 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, + 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, + 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, + 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, + 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, + 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, + 247, 248, 249, 250, 251, 252, 253, 254, 255, 256, + 257, 258, 259, 260, 261, 262, 263, 264, 265, 266, + 267, 268, 269, 270, 271, 272, 273, 274, 275, 276, + 277, 278, 279, 280, 281, 282, 283, 284, 285, 286, + 287, 288, 289, 290, 291, 292, 293, 294, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, 322, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, 341, 342, 343, 344, -1, -1, + -1, -1, -1, -1, -1, -1, -1, 354, 355, 356, + 357, 358, 359, 360, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, 373, 374, 375, 376, + 377, 378, -1, -1, -1, -1, -1, -1, -1, -1, + 387, -1, 389, 390, 391, 392, 393, 394, 395, 396, + 397, 398, 399, 400, 401, 402, 403, 404, 405, 406, + 407, 408, 409, 410, 411, 412, 413, 414, 415, 416, + 417, 418, 419, 3, 4, 5, 6, 7, 8, 9, + 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, + 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, + 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, + 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, + 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, + 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, + 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, + 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, + 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, + 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, + 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, + 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, + 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, + 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, + 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, + 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, + 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, + 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, + 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, + 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, + 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, + 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, + 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, + 250, 251, 252, 253, 254, 255, 256, 257, 258, 259, + 260, 261, 262, 263, 264, 265, 266, 267, 268, 269, + 270, 271, 272, 273, 274, 275, 276, 277, 278, 279, + 280, 281, 282, 283, 284, 285, 286, 287, 288, 289, + 290, 291, 292, 293, 294, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, 341, 342, 343, 344, -1, -1, -1, -1, -1, + -1, -1, -1, -1, 354, 355, 356, 357, 358, 359, + 360, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, 373, 374, 375, 376, 377, 378, -1, + -1, -1, -1, -1, -1, -1, -1, 387, -1, 389, + 390, 391, 392, 393, 394, 395, 396, 397, 398, 399, + 400, 401, 402, 403, 404, 405, 406, 407, 408, 409, + 410, 411, 412, 413, 414, 415, 416, 417, 418, 419, + 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, + 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, + 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, + 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, + 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, + 54, 55, 56, 57, 58, 59, 60, -1, -1, 63, + 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, + 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, + 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, + 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, + 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, + 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, + 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, + 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, + 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, + 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, + 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, + 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, + 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, + 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, + 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, + 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, + 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, + 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, + 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, + 254, 255, 256, 257, 258, 259, 260, 261, 262, 263, + 264, 265, 266, 267, 268, 269, 270, 271, 272, 273, + 274, 275, 276, 277, 278, 279, 280, 281, 282, 283, + 284, 285, 286, 287, 288, 289, 290, 291, 292, 293, + 294, -1, -1, 297, 298, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, 316, 317, -1, -1, -1, 321, 322, -1, + -1, -1, -1, -1, 328, 329, 330, 331, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, 349, 350, 351, 352, 353, + 354, -1, -1, -1, -1, 359, 360, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, 379, 380, 381, 382, 383, + 384, 385, 386, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, 401, 4, 5, + 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, + 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, + 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, + 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, + 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, + 56, 57, 58, 59, 60, -1, -1, 63, 64, 65, + 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, + 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, + 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, + 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, + 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, + 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, + 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, + 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, + 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, + 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, + 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, + 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, + 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, + 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, + 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, + 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, + 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, + 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, + 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, + 256, 257, 258, 259, 260, 261, 262, 263, 264, 265, + 266, 267, 268, 269, 270, 271, 272, 273, 274, 275, + 276, 277, 278, 279, 280, 281, 282, 283, 284, 285, + 286, 287, 288, 289, 290, 291, 292, 293, 294, -1, + -1, 297, 298, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + 316, 317, -1, -1, 320, -1, -1, -1, -1, -1, + -1, -1, 328, 329, 330, 331, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, 349, 350, 351, 352, 353, 354, -1, + -1, -1, -1, 359, 360, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, 379, 380, 381, 382, 383, 384, 385, + 386, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, 401, 4, 5, 6, 7, + 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, + 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, + 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, + 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, + 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, + 58, 59, 60, -1, -1, 63, 64, 65, 66, 67, + 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, + 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, + 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, + 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, + 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, + 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, + 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, + 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, + 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, + 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, + 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, + 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, + 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, + 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, + 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, + 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, + 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, + 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, + 248, 249, 250, 251, 252, 253, 254, 255, 256, 257, + 258, 259, 260, 261, 262, 263, 264, 265, 266, 267, + 268, 269, 270, 271, 272, 273, 274, 275, 276, 277, + 278, 279, 280, 281, 282, 283, 284, 285, 286, 287, + 288, 289, 290, 291, 292, 293, 294, -1, -1, 297, + 298, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, 316, 317, + -1, -1, -1, 321, -1, -1, -1, -1, -1, -1, + 328, 329, 330, 331, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, 349, 350, 351, 352, 353, 354, -1, -1, -1, + -1, 359, 360, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, 379, 380, 381, 382, 383, 384, 385, 386, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, 401, 4, 5, 6, 7, 8, 9, + 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, + 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, + 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, + 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, + 60, -1, -1, 63, 64, 65, 66, 67, 68, 69, + 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, + 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, + 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, + 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, + 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, + 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, + 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, + 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, + 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, + 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, + 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, + 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, + 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, + 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, + 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, + 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, + 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, + 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, + 250, 251, 252, 253, 254, 255, 256, 257, 258, 259, + 260, 261, 262, 263, 264, 265, 266, 267, 268, 269, + 270, 271, 272, 273, 274, 275, 276, 277, 278, 279, + 280, 281, 282, 283, 284, 285, 286, 287, 288, 289, + 290, 291, 292, 293, 294, -1, -1, 297, 298, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, 316, 317, -1, -1, + 320, -1, -1, -1, -1, -1, -1, -1, 328, 329, + 330, 331, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, 349, + 350, 351, 352, 353, 354, -1, -1, -1, -1, 359, + 360, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, 379, + 380, 381, 382, 383, 384, 385, 386, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, 401, 4, 5, 6, 7, 8, 9, 10, 11, + 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, + 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, + 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, + 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, + 52, 53, 54, 55, 56, 57, 58, 59, 60, -1, + -1, 63, 64, 65, 66, 67, 68, 69, 70, 71, + 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, + 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, + 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, + 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, + 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, + 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, + 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, + 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, + 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, + 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, + 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, + 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, + 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, + 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, + 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, + 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, + 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, + 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, + 252, 253, 254, 255, 256, 257, 258, 259, 260, 261, + 262, 263, 264, 265, 266, 267, 268, 269, 270, 271, + 272, 273, 274, 275, 276, 277, 278, 279, 280, 281, + 282, 283, 284, 285, 286, 287, 288, 289, 290, 291, + 292, 293, 294, -1, -1, 297, 298, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, 316, 317, -1, -1, -1, -1, + -1, -1, -1, -1, -1, 327, 328, 329, 330, 331, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, 349, 350, 351, + 352, 353, 354, -1, -1, -1, -1, 359, 360, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, 379, 380, 381, + 382, 383, 384, 385, 386, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, 401, + 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, + 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, + 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, + 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, + 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, + 54, 55, 56, 57, 58, 59, 60, -1, -1, 63, + 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, + 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, + 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, + 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, + 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, + 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, + 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, + 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, + 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, + 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, + 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, + 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, + 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, + 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, + 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, + 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, + 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, + 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, + 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, + 254, 255, 256, 257, 258, 259, 260, 261, 262, 263, + 264, 265, 266, 267, 268, 269, 270, 271, 272, 273, + 274, 275, 276, 277, 278, 279, 280, 281, 282, 283, + 284, 285, 286, 287, 288, 289, 290, 291, 292, 293, + 294, -1, -1, 297, 298, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, 316, 317, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, 328, 329, 330, 331, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, 349, 350, 351, 352, 353, + 354, -1, -1, -1, -1, 359, 360, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, 379, 380, 381, 382, 383, + 384, 385, 386, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, 401, 4, 5, + 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, + 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, + 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, + 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, + 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, + 56, 57, 58, 59, 60, -1, -1, 63, 64, 65, + 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, + 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, + 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, + 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, + 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, + 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, + 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, + 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, + 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, + 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, + 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, + 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, + 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, + 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, + 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, + 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, + 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, + 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, + 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, + 256, 257, 258, 259, 260, 261, 262, 263, 264, 265, + 266, 267, 268, 269, 270, 271, 272, 273, 274, 275, + 276, 277, 278, 279, 280, 281, 282, 283, 284, 285, + 286, 287, 288, 289, 290, 291, 292, 293, 294, 391, + -1, 297, 298, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, 405, -1, -1, -1, -1, -1, -1, + 316, 317, -1, -1, -1, -1, -1, -1, -1, -1, + 422, 423, 328, 329, 330, 331, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, 349, 350, 351, 352, 353, 354, -1, + -1, -1, 454, 359, 360, -1, -1, -1, -1, -1, + -1, -1, -1, 465, -1, -1, -1, -1, -1, -1, + -1, -1, -1, 379, 380, 381, 382, 383, 384, 385, + 386, -1, 484, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, 401, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, 551, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, 564, 565, 566, 567, 568, 569, 570, 571, + 572, 573, 574, 575, 576, 577, 578, 579, 580, 581, + 582, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + 682 +}; + + /* YYSTOS[STATE-NUM] -- The (internal number of the) accessing + symbol of state STATE-NUM. */ +static const yytype_uint16 yystos[] = +{ + 0, 3, 4, 5, 6, 7, 8, 9, 10, 11, + 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, + 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, + 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, + 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, + 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, + 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, + 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, + 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, + 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, + 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, + 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, + 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, + 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, + 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, + 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, + 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, + 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, + 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, + 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, + 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, + 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, + 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, + 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, + 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, + 252, 253, 254, 255, 256, 257, 258, 259, 260, 261, + 262, 263, 264, 265, 266, 267, 268, 269, 270, 271, + 272, 273, 274, 275, 276, 277, 278, 279, 280, 281, + 282, 283, 284, 285, 286, 287, 288, 289, 290, 291, + 292, 293, 294, 327, 341, 342, 343, 344, 345, 354, + 355, 356, 357, 358, 359, 360, 373, 374, 375, 376, + 377, 378, 387, 389, 390, 391, 392, 393, 394, 395, + 396, 397, 398, 399, 400, 401, 402, 403, 404, 405, + 406, 407, 408, 409, 410, 411, 412, 413, 414, 415, + 416, 417, 418, 419, 451, 452, 455, 456, 457, 458, + 462, 463, 464, 465, 466, 467, 470, 471, 472, 473, + 474, 476, 481, 482, 483, 524, 525, 526, 482, 321, + 353, 317, 317, 327, 353, 327, 527, 318, 324, 459, + 460, 461, 471, 476, 324, 327, 353, 327, 353, 472, + 476, 335, 478, 479, 0, 525, 476, 485, 321, 353, + 374, 468, 469, 353, 475, 319, 327, 477, 321, 503, + 460, 459, 461, 353, 353, 317, 326, 477, 321, 324, + 327, 454, 297, 298, 316, 317, 328, 329, 330, 331, + 349, 350, 351, 352, 353, 379, 380, 381, 382, 383, + 384, 385, 386, 421, 422, 423, 425, 426, 427, 428, + 429, 430, 431, 432, 433, 474, 476, 480, 477, 327, + 471, 476, 486, 487, 484, 326, 318, 324, 318, 324, + 320, 432, 434, 435, 436, 437, 438, 439, 440, 441, + 442, 443, 444, 445, 319, 327, 319, 321, 322, 327, + 361, 362, 363, 364, 366, 367, 368, 369, 370, 371, + 372, 388, 432, 445, 447, 449, 451, 455, 474, 476, + 492, 493, 494, 495, 496, 504, 505, 506, 507, 510, + 511, 514, 515, 516, 523, 528, 477, 326, 477, 321, + 447, 490, 326, 453, 353, 324, 327, 432, 432, 449, + 297, 298, 319, 323, 318, 318, 324, 360, 447, 317, + 432, 324, 336, 476, 353, 488, 489, 322, 487, 486, + 445, 450, 469, 353, 332, 333, 334, 329, 331, 295, + 296, 299, 300, 335, 336, 301, 302, 339, 338, 337, + 303, 305, 304, 340, 320, 320, 445, 319, 322, 497, + 317, 327, 327, 518, 317, 317, 327, 327, 449, 317, + 449, 325, 327, 306, 307, 308, 309, 310, 311, 312, + 313, 314, 315, 326, 448, 324, 327, 322, 493, 507, + 511, 516, 490, 326, 490, 491, 490, 486, 353, 318, + 424, 449, 353, 447, 432, 488, 477, 324, 327, 322, + 432, 432, 432, 434, 434, 435, 435, 436, 436, 436, + 436, 437, 437, 438, 439, 440, 441, 442, 443, 446, + 320, 353, 529, 530, 504, 517, 493, 519, 449, 327, + 449, 325, 447, 447, 490, 322, 324, 322, 320, 327, + 489, 449, 317, 320, 324, 498, 449, 464, 471, 509, + 361, 492, 505, 520, 318, 318, 322, 490, 325, 450, + 320, 530, 322, 353, 318, 317, 509, 521, 522, 500, + 501, 502, 508, 512, 447, 318, 326, 494, 499, 503, + 449, 327, 318, 365, 496, 494, 321, 490, 318, 449, + 499, 500, 504, 513, 327, 322 +}; + + /* YYR1[YYN] -- Symbol number of symbol that rule YYN derives. */ +static const yytype_uint16 yyr1[] = +{ + 0, 420, 421, 422, 422, 422, 422, 422, 422, 422, + 422, 422, 422, 422, 422, 422, 422, 422, 423, 423, + 423, 423, 423, 423, 424, 425, 426, 427, 427, 428, + 428, 429, 429, 430, 431, 431, 431, 432, 432, 432, + 432, 433, 433, 433, 433, 434, 434, 434, 434, 435, + 435, 435, 436, 436, 436, 437, 437, 437, 437, 437, + 438, 438, 438, 439, 439, 440, 440, 441, 441, 442, + 442, 443, 443, 444, 444, 445, 446, 445, 447, 447, + 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, + 448, 449, 449, 450, 451, 451, 451, 451, 451, 451, + 451, 451, 451, 453, 452, 454, 454, 455, 456, 456, + 457, 457, 458, 459, 459, 460, 460, 460, 460, 461, + 462, 462, 462, 462, 462, 463, 463, 463, 463, 463, + 464, 464, 465, 466, 466, 466, 466, 466, 466, 466, + 466, 467, 468, 468, 469, 469, 469, 470, 471, 471, + 472, 472, 472, 472, 472, 472, 472, 473, 473, 473, + 473, 473, 473, 473, 473, 473, 473, 473, 473, 473, + 473, 473, 473, 473, 473, 473, 473, 473, 473, 473, + 473, 473, 473, 473, 473, 473, 473, 473, 473, 473, + 473, 473, 474, 475, 475, 476, 476, 477, 477, 477, + 477, 478, 478, 479, 480, 480, 481, 481, 481, 481, + 481, 481, 481, 481, 481, 481, 481, 481, 481, 481, + 481, 481, 481, 481, 481, 481, 481, 481, 481, 481, + 481, 481, 481, 481, 481, 481, 481, 481, 481, 481, + 481, 481, 481, 481, 481, 481, 481, 481, 481, 481, + 481, 481, 481, 481, 481, 481, 481, 481, 481, 481, + 481, 481, 481, 481, 481, 481, 481, 481, 481, 481, + 481, 481, 481, 481, 481, 481, 481, 481, 481, 481, + 481, 481, 481, 481, 481, 481, 481, 481, 481, 481, + 481, 481, 481, 481, 481, 481, 481, 481, 481, 481, + 481, 481, 481, 481, 481, 481, 481, 481, 481, 481, + 481, 481, 481, 481, 481, 481, 481, 481, 481, 481, + 481, 481, 481, 481, 481, 481, 481, 481, 481, 481, + 481, 481, 481, 481, 481, 481, 481, 481, 481, 481, + 481, 481, 481, 481, 481, 481, 481, 481, 481, 481, + 481, 481, 481, 481, 481, 481, 481, 481, 481, 481, + 481, 481, 481, 481, 481, 481, 481, 481, 481, 481, + 481, 481, 481, 481, 481, 481, 481, 481, 481, 481, + 481, 481, 481, 481, 481, 481, 481, 481, 481, 481, + 481, 481, 481, 481, 481, 481, 481, 481, 481, 481, + 481, 481, 481, 481, 481, 481, 481, 481, 481, 481, + 481, 481, 481, 481, 481, 481, 481, 481, 481, 481, + 481, 481, 481, 481, 481, 481, 481, 481, 481, 481, + 481, 481, 481, 481, 481, 481, 481, 481, 481, 481, + 481, 481, 481, 481, 481, 481, 481, 481, 481, 481, + 481, 481, 481, 481, 481, 481, 481, 481, 481, 481, + 481, 481, 481, 481, 481, 481, 481, 481, 481, 481, + 481, 481, 481, 481, 481, 481, 481, 481, 481, 481, + 481, 481, 481, 481, 481, 481, 481, 481, 481, 481, + 481, 481, 481, 481, 481, 481, 481, 481, 482, 482, + 482, 484, 483, 485, 483, 486, 486, 487, 487, 488, + 488, 489, 489, 490, 490, 490, 491, 491, 492, 493, + 493, 494, 494, 494, 494, 494, 494, 494, 494, 495, + 496, 497, 498, 496, 499, 499, 501, 500, 502, 500, + 503, 503, 504, 504, 505, 505, 506, 506, 507, 508, + 508, 509, 509, 510, 510, 512, 511, 513, 513, 514, + 514, 515, 515, 517, 516, 518, 516, 519, 516, 520, + 520, 521, 521, 522, 522, 523, 523, 523, 523, 523, + 524, 524, 525, 525, 525, 527, 526, 528, 529, 529, + 530, 530 +}; + + /* YYR2[YYN] -- Number of symbols on the right hand side of rule YYN. */ +static const yytype_uint8 yyr2[] = +{ + 0, 2, 1, 1, 3, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 4, + 1, 3, 2, 2, 1, 1, 1, 2, 2, 2, + 1, 2, 3, 2, 1, 1, 1, 1, 2, 2, + 2, 1, 1, 1, 1, 1, 3, 3, 3, 1, + 3, 3, 1, 3, 3, 1, 3, 3, 3, 3, + 1, 3, 3, 1, 3, 1, 3, 1, 3, 1, + 3, 1, 3, 1, 3, 1, 0, 6, 1, 3, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 3, 1, 2, 2, 4, 2, 3, 4, + 2, 3, 4, 0, 6, 2, 3, 2, 1, 1, + 2, 3, 3, 2, 3, 2, 1, 2, 1, 1, + 1, 3, 4, 6, 5, 1, 2, 3, 5, 4, + 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 4, 1, 3, 1, 3, 1, 1, 1, 2, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 4, 1, 1, 3, 2, 3, 2, 3, 3, + 4, 1, 0, 3, 1, 3, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 0, 6, 0, 5, 1, 2, 3, 4, 1, + 3, 1, 2, 1, 3, 4, 1, 3, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, + 2, 0, 0, 5, 1, 1, 0, 2, 0, 2, + 2, 3, 1, 2, 1, 2, 1, 2, 5, 3, + 1, 1, 4, 1, 2, 0, 8, 0, 1, 3, + 2, 1, 2, 0, 6, 0, 8, 0, 7, 1, + 1, 1, 0, 2, 3, 2, 2, 2, 3, 2, + 1, 2, 1, 1, 1, 0, 3, 5, 1, 3, + 1, 4 +}; + + +#define yyerrok (yyerrstatus = 0) +#define yyclearin (yychar = YYEMPTY) +#define YYEMPTY (-2) +#define YYEOF 0 + +#define YYACCEPT goto yyacceptlab +#define YYABORT goto yyabortlab +#define YYERROR goto yyerrorlab + + +#define YYRECOVERING() (!!yyerrstatus) + +#define YYBACKUP(Token, Value) \ +do \ + if (yychar == YYEMPTY) \ + { \ + yychar = (Token); \ + yylval = (Value); \ + YYPOPSTACK (yylen); \ + yystate = *yyssp; \ + goto yybackup; \ + } \ + else \ + { \ + yyerror (pParseContext, YY_("syntax error: cannot back up")); \ + YYERROR; \ + } \ +while (0) + +/* Error token number */ +#define YYTERROR 1 +#define YYERRCODE 256 + + + +/* Enable debugging if requested. */ +#if YYDEBUG + +# ifndef YYFPRINTF +# include /* INFRINGES ON USER NAME SPACE */ +# define YYFPRINTF fprintf +# endif + +# define YYDPRINTF(Args) \ +do { \ + if (yydebug) \ + YYFPRINTF Args; \ +} while (0) + +/* This macro is provided for backward compatibility. */ +#ifndef YY_LOCATION_PRINT +# define YY_LOCATION_PRINT(File, Loc) ((void) 0) +#endif + + +# define YY_SYMBOL_PRINT(Title, Type, Value, Location) \ +do { \ + if (yydebug) \ + { \ + YYFPRINTF (stderr, "%s ", Title); \ + yy_symbol_print (stderr, \ + Type, Value, pParseContext); \ + YYFPRINTF (stderr, "\n"); \ + } \ +} while (0) + + +/*----------------------------------------. +| Print this symbol's value on YYOUTPUT. | +`----------------------------------------*/ + +static void +yy_symbol_value_print (FILE *yyoutput, int yytype, YYSTYPE const * const yyvaluep, glslang::TParseContext* pParseContext) +{ + FILE *yyo = yyoutput; + YYUSE (yyo); + YYUSE (pParseContext); + if (!yyvaluep) + return; +# ifdef YYPRINT + if (yytype < YYNTOKENS) + YYPRINT (yyoutput, yytoknum[yytype], *yyvaluep); +# endif + YYUSE (yytype); +} + + +/*--------------------------------. +| Print this symbol on YYOUTPUT. | +`--------------------------------*/ + +static void +yy_symbol_print (FILE *yyoutput, int yytype, YYSTYPE const * const yyvaluep, glslang::TParseContext* pParseContext) +{ + YYFPRINTF (yyoutput, "%s %s (", + yytype < YYNTOKENS ? "token" : "nterm", yytname[yytype]); + + yy_symbol_value_print (yyoutput, yytype, yyvaluep, pParseContext); + YYFPRINTF (yyoutput, ")"); +} + +/*------------------------------------------------------------------. +| yy_stack_print -- Print the state stack from its BOTTOM up to its | +| TOP (included). | +`------------------------------------------------------------------*/ + +static void +yy_stack_print (yytype_int16 *yybottom, yytype_int16 *yytop) +{ + YYFPRINTF (stderr, "Stack now"); + for (; yybottom <= yytop; yybottom++) + { + int yybot = *yybottom; + YYFPRINTF (stderr, " %d", yybot); + } + YYFPRINTF (stderr, "\n"); +} + +# define YY_STACK_PRINT(Bottom, Top) \ +do { \ + if (yydebug) \ + yy_stack_print ((Bottom), (Top)); \ +} while (0) + + +/*------------------------------------------------. +| Report that the YYRULE is going to be reduced. | +`------------------------------------------------*/ + +static void +yy_reduce_print (yytype_int16 *yyssp, YYSTYPE *yyvsp, int yyrule, glslang::TParseContext* pParseContext) +{ + unsigned long int yylno = yyrline[yyrule]; + int yynrhs = yyr2[yyrule]; + int yyi; + YYFPRINTF (stderr, "Reducing stack by rule %d (line %lu):\n", + yyrule - 1, yylno); + /* The symbols being reduced. */ + for (yyi = 0; yyi < yynrhs; yyi++) + { + YYFPRINTF (stderr, " $%d = ", yyi + 1); + yy_symbol_print (stderr, + yystos[yyssp[yyi + 1 - yynrhs]], + &(yyvsp[(yyi + 1) - (yynrhs)]) + , pParseContext); + YYFPRINTF (stderr, "\n"); + } +} + +# define YY_REDUCE_PRINT(Rule) \ +do { \ + if (yydebug) \ + yy_reduce_print (yyssp, yyvsp, Rule, pParseContext); \ +} while (0) + +/* Nonzero means print parse trace. It is left uninitialized so that + multiple parsers can coexist. */ +int yydebug; +#else /* !YYDEBUG */ +# define YYDPRINTF(Args) +# define YY_SYMBOL_PRINT(Title, Type, Value, Location) +# define YY_STACK_PRINT(Bottom, Top) +# define YY_REDUCE_PRINT(Rule) +#endif /* !YYDEBUG */ + + +/* YYINITDEPTH -- initial size of the parser's stacks. */ +#ifndef YYINITDEPTH +# define YYINITDEPTH 200 +#endif + +/* YYMAXDEPTH -- maximum size the stacks can grow to (effective only + if the built-in stack extension method is used). + + Do not make this value too large; the results are undefined if + YYSTACK_ALLOC_MAXIMUM < YYSTACK_BYTES (YYMAXDEPTH) + evaluated with infinite-precision integer arithmetic. */ + +#ifndef YYMAXDEPTH +# define YYMAXDEPTH 10000 +#endif + + +#if YYERROR_VERBOSE + +# ifndef yystrlen +# if defined __GLIBC__ && defined _STRING_H +# define yystrlen strlen +# else +/* Return the length of YYSTR. */ +static YYSIZE_T +yystrlen (const char *yystr) +{ + YYSIZE_T yylen; + for (yylen = 0; yystr[yylen]; yylen++) + continue; + return yylen; +} +# endif +# endif + +# ifndef yystpcpy +# if defined __GLIBC__ && defined _STRING_H && defined _GNU_SOURCE +# define yystpcpy stpcpy +# else +/* Copy YYSRC to YYDEST, returning the address of the terminating '\0' in + YYDEST. */ +static char * +yystpcpy (char *yydest, const char *yysrc) +{ + char *yyd = yydest; + const char *yys = yysrc; + + while ((*yyd++ = *yys++) != '\0') + continue; + + return yyd - 1; +} +# endif +# endif + +# ifndef yytnamerr +/* Copy to YYRES the contents of YYSTR after stripping away unnecessary + quotes and backslashes, so that it's suitable for yyerror. The + heuristic is that double-quoting is unnecessary unless the string + contains an apostrophe, a comma, or backslash (other than + backslash-backslash). YYSTR is taken from yytname. If YYRES is + null, do not copy; instead, return the length of what the result + would have been. */ +static YYSIZE_T +yytnamerr (char *yyres, const char *yystr) +{ + if (*yystr == '"') + { + YYSIZE_T yyn = 0; + char const *yyp = yystr; + + for (;;) + switch (*++yyp) + { + case '\'': + case ',': + goto do_not_strip_quotes; + + case '\\': + if (*++yyp != '\\') + goto do_not_strip_quotes; + /* Fall through. */ + default: + if (yyres) + yyres[yyn] = *yyp; + yyn++; + break; + + case '"': + if (yyres) + yyres[yyn] = '\0'; + return yyn; + } + do_not_strip_quotes: ; + } + + if (! yyres) + return yystrlen (yystr); + + return yystpcpy (yyres, yystr) - yyres; +} +# endif + +/* Copy into *YYMSG, which is of size *YYMSG_ALLOC, an error message + about the unexpected token YYTOKEN for the state stack whose top is + YYSSP. + + Return 0 if *YYMSG was successfully written. Return 1 if *YYMSG is + not large enough to hold the message. In that case, also set + *YYMSG_ALLOC to the required number of bytes. Return 2 if the + required number of bytes is too large to store. */ +static int +yysyntax_error (YYSIZE_T *yymsg_alloc, char **yymsg, + yytype_int16 *yyssp, int yytoken) +{ + YYSIZE_T yysize0 = yytnamerr (YY_NULLPTR, yytname[yytoken]); + YYSIZE_T yysize = yysize0; + enum { YYERROR_VERBOSE_ARGS_MAXIMUM = 5 }; + /* Internationalized format string. */ + const char *yyformat = YY_NULLPTR; + /* Arguments of yyformat. */ + char const *yyarg[YYERROR_VERBOSE_ARGS_MAXIMUM]; + /* Number of reported tokens (one for the "unexpected", one per + "expected"). */ + int yycount = 0; + + /* There are many possibilities here to consider: + - If this state is a consistent state with a default action, then + the only way this function was invoked is if the default action + is an error action. In that case, don't check for expected + tokens because there are none. + - The only way there can be no lookahead present (in yychar) is if + this state is a consistent state with a default action. Thus, + detecting the absence of a lookahead is sufficient to determine + that there is no unexpected or expected token to report. In that + case, just report a simple "syntax error". + - Don't assume there isn't a lookahead just because this state is a + consistent state with a default action. There might have been a + previous inconsistent state, consistent state with a non-default + action, or user semantic action that manipulated yychar. + - Of course, the expected token list depends on states to have + correct lookahead information, and it depends on the parser not + to perform extra reductions after fetching a lookahead from the + scanner and before detecting a syntax error. Thus, state merging + (from LALR or IELR) and default reductions corrupt the expected + token list. However, the list is correct for canonical LR with + one exception: it will still contain any token that will not be + accepted due to an error action in a later state. + */ + if (yytoken != YYEMPTY) + { + int yyn = yypact[*yyssp]; + yyarg[yycount++] = yytname[yytoken]; + if (!yypact_value_is_default (yyn)) + { + /* Start YYX at -YYN if negative to avoid negative indexes in + YYCHECK. In other words, skip the first -YYN actions for + this state because they are default actions. */ + int yyxbegin = yyn < 0 ? -yyn : 0; + /* Stay within bounds of both yycheck and yytname. */ + int yychecklim = YYLAST - yyn + 1; + int yyxend = yychecklim < YYNTOKENS ? yychecklim : YYNTOKENS; + int yyx; + + for (yyx = yyxbegin; yyx < yyxend; ++yyx) + if (yycheck[yyx + yyn] == yyx && yyx != YYTERROR + && !yytable_value_is_error (yytable[yyx + yyn])) + { + if (yycount == YYERROR_VERBOSE_ARGS_MAXIMUM) + { + yycount = 1; + yysize = yysize0; + break; + } + yyarg[yycount++] = yytname[yyx]; + { + YYSIZE_T yysize1 = yysize + yytnamerr (YY_NULLPTR, yytname[yyx]); + if (! (yysize <= yysize1 + && yysize1 <= YYSTACK_ALLOC_MAXIMUM)) + return 2; + yysize = yysize1; + } + } + } + } + + switch (yycount) + { +# define YYCASE_(N, S) \ + case N: \ + yyformat = S; \ + break + YYCASE_(0, YY_("syntax error")); + YYCASE_(1, YY_("syntax error, unexpected %s")); + YYCASE_(2, YY_("syntax error, unexpected %s, expecting %s")); + YYCASE_(3, YY_("syntax error, unexpected %s, expecting %s or %s")); + YYCASE_(4, YY_("syntax error, unexpected %s, expecting %s or %s or %s")); + YYCASE_(5, YY_("syntax error, unexpected %s, expecting %s or %s or %s or %s")); +# undef YYCASE_ + } + + { + YYSIZE_T yysize1 = yysize + yystrlen (yyformat); + if (! (yysize <= yysize1 && yysize1 <= YYSTACK_ALLOC_MAXIMUM)) + return 2; + yysize = yysize1; + } + + if (*yymsg_alloc < yysize) + { + *yymsg_alloc = 2 * yysize; + if (! (yysize <= *yymsg_alloc + && *yymsg_alloc <= YYSTACK_ALLOC_MAXIMUM)) + *yymsg_alloc = YYSTACK_ALLOC_MAXIMUM; + return 1; + } + + /* Avoid sprintf, as that infringes on the user's name space. + Don't have undefined behavior even if the translation + produced a string with the wrong number of "%s"s. */ + { + char *yyp = *yymsg; + int yyi = 0; + while ((*yyp = *yyformat) != '\0') + if (*yyp == '%' && yyformat[1] == 's' && yyi < yycount) + { + yyp += yytnamerr (yyp, yyarg[yyi++]); + yyformat += 2; + } + else + { + yyp++; + yyformat++; + } + } + return 0; +} +#endif /* YYERROR_VERBOSE */ + +/*-----------------------------------------------. +| Release the memory associated to this symbol. | +`-----------------------------------------------*/ + +static void +yydestruct (const char *yymsg, int yytype, YYSTYPE *yyvaluep, glslang::TParseContext* pParseContext) +{ + YYUSE (yyvaluep); + YYUSE (pParseContext); + if (!yymsg) + yymsg = "Deleting"; + YY_SYMBOL_PRINT (yymsg, yytype, yyvaluep, yylocationp); + + YY_IGNORE_MAYBE_UNINITIALIZED_BEGIN + YYUSE (yytype); + YY_IGNORE_MAYBE_UNINITIALIZED_END +} + + + + +/*----------. +| yyparse. | +`----------*/ + +int +yyparse (glslang::TParseContext* pParseContext) +{ +/* The lookahead symbol. */ +int yychar; + + +/* The semantic value of the lookahead symbol. */ +/* Default value used for initialization, for pacifying older GCCs + or non-GCC compilers. */ +YY_INITIAL_VALUE (static YYSTYPE yyval_default;) +YYSTYPE yylval YY_INITIAL_VALUE (= yyval_default); + + /* Number of syntax errors so far. */ + int yynerrs; + + int yystate; + /* Number of tokens to shift before error messages enabled. */ + int yyerrstatus; + + /* The stacks and their tools: + 'yyss': related to states. + 'yyvs': related to semantic values. + + Refer to the stacks through separate pointers, to allow yyoverflow + to reallocate them elsewhere. */ + + /* The state stack. */ + yytype_int16 yyssa[YYINITDEPTH]; + yytype_int16 *yyss; + yytype_int16 *yyssp; + + /* The semantic value stack. */ + YYSTYPE yyvsa[YYINITDEPTH]; + YYSTYPE *yyvs; + YYSTYPE *yyvsp; + + YYSIZE_T yystacksize; + + int yyn; + int yyresult; + /* Lookahead token as an internal (translated) token number. */ + int yytoken = 0; + /* The variables used to return semantic value and location from the + action routines. */ + YYSTYPE yyval; + +#if YYERROR_VERBOSE + /* Buffer for error messages, and its allocated size. */ + char yymsgbuf[128]; + char *yymsg = yymsgbuf; + YYSIZE_T yymsg_alloc = sizeof yymsgbuf; +#endif + +#define YYPOPSTACK(N) (yyvsp -= (N), yyssp -= (N)) + + /* The number of symbols on the RHS of the reduced rule. + Keep to zero when no symbol should be popped. */ + int yylen = 0; + + yyssp = yyss = yyssa; + yyvsp = yyvs = yyvsa; + yystacksize = YYINITDEPTH; + + YYDPRINTF ((stderr, "Starting parse\n")); + + yystate = 0; + yyerrstatus = 0; + yynerrs = 0; + yychar = YYEMPTY; /* Cause a token to be read. */ + goto yysetstate; + +/*------------------------------------------------------------. +| yynewstate -- Push a new state, which is found in yystate. | +`------------------------------------------------------------*/ + yynewstate: + /* In all cases, when you get here, the value and location stacks + have just been pushed. So pushing a state here evens the stacks. */ + yyssp++; + + yysetstate: + *yyssp = yystate; + + if (yyss + yystacksize - 1 <= yyssp) + { + /* Get the current used size of the three stacks, in elements. */ + YYSIZE_T yysize = yyssp - yyss + 1; + +#ifdef yyoverflow + { + /* Give user a chance to reallocate the stack. Use copies of + these so that the &'s don't force the real ones into + memory. */ + YYSTYPE *yyvs1 = yyvs; + yytype_int16 *yyss1 = yyss; + + /* Each stack pointer address is followed by the size of the + data in use in that stack, in bytes. This used to be a + conditional around just the two extra args, but that might + be undefined if yyoverflow is a macro. */ + yyoverflow (YY_("memory exhausted"), + &yyss1, yysize * sizeof (*yyssp), + &yyvs1, yysize * sizeof (*yyvsp), + &yystacksize); + + yyss = yyss1; + yyvs = yyvs1; + } +#else /* no yyoverflow */ +# ifndef YYSTACK_RELOCATE + goto yyexhaustedlab; +# else + /* Extend the stack our own way. */ + if (YYMAXDEPTH <= yystacksize) + goto yyexhaustedlab; + yystacksize *= 2; + if (YYMAXDEPTH < yystacksize) + yystacksize = YYMAXDEPTH; + + { + yytype_int16 *yyss1 = yyss; + union yyalloc *yyptr = + (union yyalloc *) YYSTACK_ALLOC (YYSTACK_BYTES (yystacksize)); + if (! yyptr) + goto yyexhaustedlab; + YYSTACK_RELOCATE (yyss_alloc, yyss); + YYSTACK_RELOCATE (yyvs_alloc, yyvs); +# undef YYSTACK_RELOCATE + if (yyss1 != yyssa) + YYSTACK_FREE (yyss1); + } +# endif +#endif /* no yyoverflow */ + + yyssp = yyss + yysize - 1; + yyvsp = yyvs + yysize - 1; + + YYDPRINTF ((stderr, "Stack size increased to %lu\n", + (unsigned long int) yystacksize)); + + if (yyss + yystacksize - 1 <= yyssp) + YYABORT; + } + + YYDPRINTF ((stderr, "Entering state %d\n", yystate)); + + if (yystate == YYFINAL) + YYACCEPT; + + goto yybackup; + +/*-----------. +| yybackup. | +`-----------*/ +yybackup: + + /* Do appropriate processing given the current state. Read a + lookahead token if we need one and don't already have one. */ + + /* First try to decide what to do without reference to lookahead token. */ + yyn = yypact[yystate]; + if (yypact_value_is_default (yyn)) + goto yydefault; + + /* Not known => get a lookahead token if don't already have one. */ + + /* YYCHAR is either YYEMPTY or YYEOF or a valid lookahead symbol. */ + if (yychar == YYEMPTY) + { + YYDPRINTF ((stderr, "Reading a token: ")); + yychar = yylex (&yylval, parseContext); + } + + if (yychar <= YYEOF) + { + yychar = yytoken = YYEOF; + YYDPRINTF ((stderr, "Now at end of input.\n")); + } + else + { + yytoken = YYTRANSLATE (yychar); + YY_SYMBOL_PRINT ("Next token is", yytoken, &yylval, &yylloc); + } + + /* If the proper action on seeing token YYTOKEN is to reduce or to + detect an error, take that action. */ + yyn += yytoken; + if (yyn < 0 || YYLAST < yyn || yycheck[yyn] != yytoken) + goto yydefault; + yyn = yytable[yyn]; + if (yyn <= 0) + { + if (yytable_value_is_error (yyn)) + goto yyerrlab; + yyn = -yyn; + goto yyreduce; + } + + /* Count tokens shifted since error; after three, turn off error + status. */ + if (yyerrstatus) + yyerrstatus--; + + /* Shift the lookahead token. */ + YY_SYMBOL_PRINT ("Shifting", yytoken, &yylval, &yylloc); + + /* Discard the shifted token. */ + yychar = YYEMPTY; + + yystate = yyn; + YY_IGNORE_MAYBE_UNINITIALIZED_BEGIN + *++yyvsp = yylval; + YY_IGNORE_MAYBE_UNINITIALIZED_END + + goto yynewstate; + + +/*-----------------------------------------------------------. +| yydefault -- do the default action for the current state. | +`-----------------------------------------------------------*/ +yydefault: + yyn = yydefact[yystate]; + if (yyn == 0) + goto yyerrlab; + goto yyreduce; + + +/*-----------------------------. +| yyreduce -- Do a reduction. | +`-----------------------------*/ +yyreduce: + /* yyn is the number of a rule to reduce with. */ + yylen = yyr2[yyn]; + + /* If YYLEN is nonzero, implement the default value of the action: + '$$ = $1'. + + Otherwise, the following line sets YYVAL to garbage. + This behavior is undocumented and Bison + users should not rely upon it. Assigning to YYVAL + unconditionally makes the parser a bit smaller, and it avoids a + GCC warning that YYVAL may be used uninitialized. */ + yyval = yyvsp[1-yylen]; + + + YY_REDUCE_PRINT (yyn); + switch (yyn) + { + case 2: +#line 357 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.intermTypedNode) = parseContext.handleVariable((yyvsp[0].lex).loc, (yyvsp[0].lex).symbol, (yyvsp[0].lex).string); + } +#line 4229 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 3: +#line 363 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.intermTypedNode) = (yyvsp[0].interm.intermTypedNode); + } +#line 4237 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 4: +#line 366 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.intermTypedNode) = (yyvsp[-1].interm.intermTypedNode); + if ((yyval.interm.intermTypedNode)->getAsConstantUnion()) + (yyval.interm.intermTypedNode)->getAsConstantUnion()->setExpression(); + } +#line 4247 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 5: +#line 371 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.intermTypedNode) = parseContext.intermediate.addConstantUnion((yyvsp[0].lex).d, EbtFloat, (yyvsp[0].lex).loc, true); + } +#line 4255 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 6: +#line 374 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.intermTypedNode) = parseContext.intermediate.addConstantUnion((yyvsp[0].lex).i, (yyvsp[0].lex).loc, true); + } +#line 4263 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 7: +#line 377 "glslang.y" /* yacc.c:1646 */ + { + parseContext.fullIntegerCheck((yyvsp[0].lex).loc, "unsigned literal"); + (yyval.interm.intermTypedNode) = parseContext.intermediate.addConstantUnion((yyvsp[0].lex).u, (yyvsp[0].lex).loc, true); + } +#line 4272 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 8: +#line 381 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.intermTypedNode) = parseContext.intermediate.addConstantUnion((yyvsp[0].lex).b, (yyvsp[0].lex).loc, true); + } +#line 4280 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 9: +#line 385 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.intermTypedNode) = parseContext.intermediate.addConstantUnion((yyvsp[0].lex).string, (yyvsp[0].lex).loc, true); + } +#line 4288 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 10: +#line 388 "glslang.y" /* yacc.c:1646 */ + { + parseContext.explicitInt32Check((yyvsp[0].lex).loc, "32-bit signed literal"); + (yyval.interm.intermTypedNode) = parseContext.intermediate.addConstantUnion((yyvsp[0].lex).i, (yyvsp[0].lex).loc, true); + } +#line 4297 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 11: +#line 392 "glslang.y" /* yacc.c:1646 */ + { + parseContext.explicitInt32Check((yyvsp[0].lex).loc, "32-bit signed literal"); + (yyval.interm.intermTypedNode) = parseContext.intermediate.addConstantUnion((yyvsp[0].lex).u, (yyvsp[0].lex).loc, true); + } +#line 4306 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 12: +#line 396 "glslang.y" /* yacc.c:1646 */ + { + parseContext.int64Check((yyvsp[0].lex).loc, "64-bit integer literal"); + (yyval.interm.intermTypedNode) = parseContext.intermediate.addConstantUnion((yyvsp[0].lex).i64, (yyvsp[0].lex).loc, true); + } +#line 4315 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 13: +#line 400 "glslang.y" /* yacc.c:1646 */ + { + parseContext.int64Check((yyvsp[0].lex).loc, "64-bit unsigned integer literal"); + (yyval.interm.intermTypedNode) = parseContext.intermediate.addConstantUnion((yyvsp[0].lex).u64, (yyvsp[0].lex).loc, true); + } +#line 4324 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 14: +#line 404 "glslang.y" /* yacc.c:1646 */ + { + parseContext.explicitInt16Check((yyvsp[0].lex).loc, "16-bit integer literal"); + (yyval.interm.intermTypedNode) = parseContext.intermediate.addConstantUnion((short)(yyvsp[0].lex).i, (yyvsp[0].lex).loc, true); + } +#line 4333 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 15: +#line 408 "glslang.y" /* yacc.c:1646 */ + { + parseContext.explicitInt16Check((yyvsp[0].lex).loc, "16-bit unsigned integer literal"); + (yyval.interm.intermTypedNode) = parseContext.intermediate.addConstantUnion((unsigned short)(yyvsp[0].lex).u, (yyvsp[0].lex).loc, true); + } +#line 4342 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 16: +#line 412 "glslang.y" /* yacc.c:1646 */ + { + parseContext.requireProfile((yyvsp[0].lex).loc, ECoreProfile | ECompatibilityProfile, "double literal"); + if (! parseContext.symbolTable.atBuiltInLevel()) + parseContext.doubleCheck((yyvsp[0].lex).loc, "double literal"); + (yyval.interm.intermTypedNode) = parseContext.intermediate.addConstantUnion((yyvsp[0].lex).d, EbtDouble, (yyvsp[0].lex).loc, true); + } +#line 4353 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 17: +#line 418 "glslang.y" /* yacc.c:1646 */ + { + parseContext.float16Check((yyvsp[0].lex).loc, "half float literal"); + (yyval.interm.intermTypedNode) = parseContext.intermediate.addConstantUnion((yyvsp[0].lex).d, EbtFloat16, (yyvsp[0].lex).loc, true); + } +#line 4362 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 18: +#line 426 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.intermTypedNode) = (yyvsp[0].interm.intermTypedNode); + } +#line 4370 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 19: +#line 429 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.intermTypedNode) = parseContext.handleBracketDereference((yyvsp[-2].lex).loc, (yyvsp[-3].interm.intermTypedNode), (yyvsp[-1].interm.intermTypedNode)); + } +#line 4378 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 20: +#line 432 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.intermTypedNode) = (yyvsp[0].interm.intermTypedNode); + } +#line 4386 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 21: +#line 435 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.intermTypedNode) = parseContext.handleDotDereference((yyvsp[0].lex).loc, (yyvsp[-2].interm.intermTypedNode), *(yyvsp[0].lex).string); + } +#line 4394 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 22: +#line 438 "glslang.y" /* yacc.c:1646 */ + { + parseContext.variableCheck((yyvsp[-1].interm.intermTypedNode)); + parseContext.lValueErrorCheck((yyvsp[0].lex).loc, "++", (yyvsp[-1].interm.intermTypedNode)); + (yyval.interm.intermTypedNode) = parseContext.handleUnaryMath((yyvsp[0].lex).loc, "++", EOpPostIncrement, (yyvsp[-1].interm.intermTypedNode)); + } +#line 4404 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 23: +#line 443 "glslang.y" /* yacc.c:1646 */ + { + parseContext.variableCheck((yyvsp[-1].interm.intermTypedNode)); + parseContext.lValueErrorCheck((yyvsp[0].lex).loc, "--", (yyvsp[-1].interm.intermTypedNode)); + (yyval.interm.intermTypedNode) = parseContext.handleUnaryMath((yyvsp[0].lex).loc, "--", EOpPostDecrement, (yyvsp[-1].interm.intermTypedNode)); + } +#line 4414 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 24: +#line 451 "glslang.y" /* yacc.c:1646 */ + { + parseContext.integerCheck((yyvsp[0].interm.intermTypedNode), "[]"); + (yyval.interm.intermTypedNode) = (yyvsp[0].interm.intermTypedNode); + } +#line 4423 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 25: +#line 458 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.intermTypedNode) = parseContext.handleFunctionCall((yyvsp[0].interm).loc, (yyvsp[0].interm).function, (yyvsp[0].interm).intermNode); + delete (yyvsp[0].interm).function; + } +#line 4432 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 26: +#line 465 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm) = (yyvsp[0].interm); + } +#line 4440 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 27: +#line 471 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm) = (yyvsp[-1].interm); + (yyval.interm).loc = (yyvsp[0].lex).loc; + } +#line 4449 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 28: +#line 475 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm) = (yyvsp[-1].interm); + (yyval.interm).loc = (yyvsp[0].lex).loc; + } +#line 4458 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 29: +#line 482 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm) = (yyvsp[-1].interm); + } +#line 4466 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 30: +#line 485 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm) = (yyvsp[0].interm); + } +#line 4474 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 31: +#line 491 "glslang.y" /* yacc.c:1646 */ + { + TParameter param = { 0, new TType }; + param.type->shallowCopy((yyvsp[0].interm.intermTypedNode)->getType()); + (yyvsp[-1].interm).function->addParameter(param); + (yyval.interm).function = (yyvsp[-1].interm).function; + (yyval.interm).intermNode = (yyvsp[0].interm.intermTypedNode); + } +#line 4486 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 32: +#line 498 "glslang.y" /* yacc.c:1646 */ + { + TParameter param = { 0, new TType }; + param.type->shallowCopy((yyvsp[0].interm.intermTypedNode)->getType()); + (yyvsp[-2].interm).function->addParameter(param); + (yyval.interm).function = (yyvsp[-2].interm).function; + (yyval.interm).intermNode = parseContext.intermediate.growAggregate((yyvsp[-2].interm).intermNode, (yyvsp[0].interm.intermTypedNode), (yyvsp[-1].lex).loc); + } +#line 4498 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 33: +#line 508 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm) = (yyvsp[-1].interm); + } +#line 4506 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 34: +#line 516 "glslang.y" /* yacc.c:1646 */ + { + // Constructor + (yyval.interm).intermNode = 0; + (yyval.interm).function = parseContext.handleConstructorCall((yyvsp[0].interm.type).loc, (yyvsp[0].interm.type)); + } +#line 4516 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 35: +#line 521 "glslang.y" /* yacc.c:1646 */ + { + // + // Should be a method or subroutine call, but we haven't recognized the arguments yet. + // + (yyval.interm).function = 0; + (yyval.interm).intermNode = 0; + + TIntermMethod* method = (yyvsp[0].interm.intermTypedNode)->getAsMethodNode(); + if (method) { + (yyval.interm).function = new TFunction(&method->getMethodName(), TType(EbtInt), EOpArrayLength); + (yyval.interm).intermNode = method->getObject(); + } else { + TIntermSymbol* symbol = (yyvsp[0].interm.intermTypedNode)->getAsSymbolNode(); + if (symbol) { + parseContext.reservedErrorCheck(symbol->getLoc(), symbol->getName()); + TFunction *function = new TFunction(&symbol->getName(), TType(EbtVoid)); + (yyval.interm).function = function; + } else + parseContext.error((yyvsp[0].interm.intermTypedNode)->getLoc(), "function call, method, or subroutine call expected", "", ""); + } + + if ((yyval.interm).function == 0) { + // error recover + TString* empty = NewPoolTString(""); + (yyval.interm).function = new TFunction(empty, TType(EbtVoid), EOpNull); + } + } +#line 4548 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 36: +#line 549 "glslang.y" /* yacc.c:1646 */ + { + // Constructor + (yyval.interm).intermNode = 0; + (yyval.interm).function = parseContext.handleConstructorCall((yyvsp[0].interm.type).loc, (yyvsp[0].interm.type)); + } +#line 4558 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 37: +#line 558 "glslang.y" /* yacc.c:1646 */ + { + parseContext.variableCheck((yyvsp[0].interm.intermTypedNode)); + (yyval.interm.intermTypedNode) = (yyvsp[0].interm.intermTypedNode); + if (TIntermMethod* method = (yyvsp[0].interm.intermTypedNode)->getAsMethodNode()) + parseContext.error((yyvsp[0].interm.intermTypedNode)->getLoc(), "incomplete method syntax", method->getMethodName().c_str(), ""); + } +#line 4569 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 38: +#line 564 "glslang.y" /* yacc.c:1646 */ + { + parseContext.lValueErrorCheck((yyvsp[-1].lex).loc, "++", (yyvsp[0].interm.intermTypedNode)); + (yyval.interm.intermTypedNode) = parseContext.handleUnaryMath((yyvsp[-1].lex).loc, "++", EOpPreIncrement, (yyvsp[0].interm.intermTypedNode)); + } +#line 4578 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 39: +#line 568 "glslang.y" /* yacc.c:1646 */ + { + parseContext.lValueErrorCheck((yyvsp[-1].lex).loc, "--", (yyvsp[0].interm.intermTypedNode)); + (yyval.interm.intermTypedNode) = parseContext.handleUnaryMath((yyvsp[-1].lex).loc, "--", EOpPreDecrement, (yyvsp[0].interm.intermTypedNode)); + } +#line 4587 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 40: +#line 572 "glslang.y" /* yacc.c:1646 */ + { + if ((yyvsp[-1].interm).op != EOpNull) { + char errorOp[2] = {0, 0}; + switch((yyvsp[-1].interm).op) { + case EOpNegative: errorOp[0] = '-'; break; + case EOpLogicalNot: errorOp[0] = '!'; break; + case EOpBitwiseNot: errorOp[0] = '~'; break; + default: break; // some compilers want this + } + (yyval.interm.intermTypedNode) = parseContext.handleUnaryMath((yyvsp[-1].interm).loc, errorOp, (yyvsp[-1].interm).op, (yyvsp[0].interm.intermTypedNode)); + } else { + (yyval.interm.intermTypedNode) = (yyvsp[0].interm.intermTypedNode); + if ((yyval.interm.intermTypedNode)->getAsConstantUnion()) + (yyval.interm.intermTypedNode)->getAsConstantUnion()->setExpression(); + } + } +#line 4608 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 41: +#line 592 "glslang.y" /* yacc.c:1646 */ + { (yyval.interm).loc = (yyvsp[0].lex).loc; (yyval.interm).op = EOpNull; } +#line 4614 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 42: +#line 593 "glslang.y" /* yacc.c:1646 */ + { (yyval.interm).loc = (yyvsp[0].lex).loc; (yyval.interm).op = EOpNegative; } +#line 4620 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 43: +#line 594 "glslang.y" /* yacc.c:1646 */ + { (yyval.interm).loc = (yyvsp[0].lex).loc; (yyval.interm).op = EOpLogicalNot; } +#line 4626 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 44: +#line 595 "glslang.y" /* yacc.c:1646 */ + { (yyval.interm).loc = (yyvsp[0].lex).loc; (yyval.interm).op = EOpBitwiseNot; + parseContext.fullIntegerCheck((yyvsp[0].lex).loc, "bitwise not"); } +#line 4633 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 45: +#line 601 "glslang.y" /* yacc.c:1646 */ + { (yyval.interm.intermTypedNode) = (yyvsp[0].interm.intermTypedNode); } +#line 4639 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 46: +#line 602 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.intermTypedNode) = parseContext.handleBinaryMath((yyvsp[-1].lex).loc, "*", EOpMul, (yyvsp[-2].interm.intermTypedNode), (yyvsp[0].interm.intermTypedNode)); + if ((yyval.interm.intermTypedNode) == 0) + (yyval.interm.intermTypedNode) = (yyvsp[-2].interm.intermTypedNode); + } +#line 4649 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 47: +#line 607 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.intermTypedNode) = parseContext.handleBinaryMath((yyvsp[-1].lex).loc, "/", EOpDiv, (yyvsp[-2].interm.intermTypedNode), (yyvsp[0].interm.intermTypedNode)); + if ((yyval.interm.intermTypedNode) == 0) + (yyval.interm.intermTypedNode) = (yyvsp[-2].interm.intermTypedNode); + } +#line 4659 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 48: +#line 612 "glslang.y" /* yacc.c:1646 */ + { + parseContext.fullIntegerCheck((yyvsp[-1].lex).loc, "%"); + (yyval.interm.intermTypedNode) = parseContext.handleBinaryMath((yyvsp[-1].lex).loc, "%", EOpMod, (yyvsp[-2].interm.intermTypedNode), (yyvsp[0].interm.intermTypedNode)); + if ((yyval.interm.intermTypedNode) == 0) + (yyval.interm.intermTypedNode) = (yyvsp[-2].interm.intermTypedNode); + } +#line 4670 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 49: +#line 621 "glslang.y" /* yacc.c:1646 */ + { (yyval.interm.intermTypedNode) = (yyvsp[0].interm.intermTypedNode); } +#line 4676 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 50: +#line 622 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.intermTypedNode) = parseContext.handleBinaryMath((yyvsp[-1].lex).loc, "+", EOpAdd, (yyvsp[-2].interm.intermTypedNode), (yyvsp[0].interm.intermTypedNode)); + if ((yyval.interm.intermTypedNode) == 0) + (yyval.interm.intermTypedNode) = (yyvsp[-2].interm.intermTypedNode); + } +#line 4686 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 51: +#line 627 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.intermTypedNode) = parseContext.handleBinaryMath((yyvsp[-1].lex).loc, "-", EOpSub, (yyvsp[-2].interm.intermTypedNode), (yyvsp[0].interm.intermTypedNode)); + if ((yyval.interm.intermTypedNode) == 0) + (yyval.interm.intermTypedNode) = (yyvsp[-2].interm.intermTypedNode); + } +#line 4696 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 52: +#line 635 "glslang.y" /* yacc.c:1646 */ + { (yyval.interm.intermTypedNode) = (yyvsp[0].interm.intermTypedNode); } +#line 4702 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 53: +#line 636 "glslang.y" /* yacc.c:1646 */ + { + parseContext.fullIntegerCheck((yyvsp[-1].lex).loc, "bit shift left"); + (yyval.interm.intermTypedNode) = parseContext.handleBinaryMath((yyvsp[-1].lex).loc, "<<", EOpLeftShift, (yyvsp[-2].interm.intermTypedNode), (yyvsp[0].interm.intermTypedNode)); + if ((yyval.interm.intermTypedNode) == 0) + (yyval.interm.intermTypedNode) = (yyvsp[-2].interm.intermTypedNode); + } +#line 4713 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 54: +#line 642 "glslang.y" /* yacc.c:1646 */ + { + parseContext.fullIntegerCheck((yyvsp[-1].lex).loc, "bit shift right"); + (yyval.interm.intermTypedNode) = parseContext.handleBinaryMath((yyvsp[-1].lex).loc, ">>", EOpRightShift, (yyvsp[-2].interm.intermTypedNode), (yyvsp[0].interm.intermTypedNode)); + if ((yyval.interm.intermTypedNode) == 0) + (yyval.interm.intermTypedNode) = (yyvsp[-2].interm.intermTypedNode); + } +#line 4724 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 55: +#line 651 "glslang.y" /* yacc.c:1646 */ + { (yyval.interm.intermTypedNode) = (yyvsp[0].interm.intermTypedNode); } +#line 4730 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 56: +#line 652 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.intermTypedNode) = parseContext.handleBinaryMath((yyvsp[-1].lex).loc, "<", EOpLessThan, (yyvsp[-2].interm.intermTypedNode), (yyvsp[0].interm.intermTypedNode)); + if ((yyval.interm.intermTypedNode) == 0) + (yyval.interm.intermTypedNode) = parseContext.intermediate.addConstantUnion(false, (yyvsp[-1].lex).loc); + } +#line 4740 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 57: +#line 657 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.intermTypedNode) = parseContext.handleBinaryMath((yyvsp[-1].lex).loc, ">", EOpGreaterThan, (yyvsp[-2].interm.intermTypedNode), (yyvsp[0].interm.intermTypedNode)); + if ((yyval.interm.intermTypedNode) == 0) + (yyval.interm.intermTypedNode) = parseContext.intermediate.addConstantUnion(false, (yyvsp[-1].lex).loc); + } +#line 4750 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 58: +#line 662 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.intermTypedNode) = parseContext.handleBinaryMath((yyvsp[-1].lex).loc, "<=", EOpLessThanEqual, (yyvsp[-2].interm.intermTypedNode), (yyvsp[0].interm.intermTypedNode)); + if ((yyval.interm.intermTypedNode) == 0) + (yyval.interm.intermTypedNode) = parseContext.intermediate.addConstantUnion(false, (yyvsp[-1].lex).loc); + } +#line 4760 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 59: +#line 667 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.intermTypedNode) = parseContext.handleBinaryMath((yyvsp[-1].lex).loc, ">=", EOpGreaterThanEqual, (yyvsp[-2].interm.intermTypedNode), (yyvsp[0].interm.intermTypedNode)); + if ((yyval.interm.intermTypedNode) == 0) + (yyval.interm.intermTypedNode) = parseContext.intermediate.addConstantUnion(false, (yyvsp[-1].lex).loc); + } +#line 4770 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 60: +#line 675 "glslang.y" /* yacc.c:1646 */ + { (yyval.interm.intermTypedNode) = (yyvsp[0].interm.intermTypedNode); } +#line 4776 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 61: +#line 676 "glslang.y" /* yacc.c:1646 */ + { + parseContext.arrayObjectCheck((yyvsp[-1].lex).loc, (yyvsp[-2].interm.intermTypedNode)->getType(), "array comparison"); + parseContext.opaqueCheck((yyvsp[-1].lex).loc, (yyvsp[-2].interm.intermTypedNode)->getType(), "=="); + parseContext.specializationCheck((yyvsp[-1].lex).loc, (yyvsp[-2].interm.intermTypedNode)->getType(), "=="); + parseContext.referenceCheck((yyvsp[-1].lex).loc, (yyvsp[-2].interm.intermTypedNode)->getType(), "=="); + (yyval.interm.intermTypedNode) = parseContext.handleBinaryMath((yyvsp[-1].lex).loc, "==", EOpEqual, (yyvsp[-2].interm.intermTypedNode), (yyvsp[0].interm.intermTypedNode)); + if ((yyval.interm.intermTypedNode) == 0) + (yyval.interm.intermTypedNode) = parseContext.intermediate.addConstantUnion(false, (yyvsp[-1].lex).loc); + } +#line 4790 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 62: +#line 685 "glslang.y" /* yacc.c:1646 */ + { + parseContext.arrayObjectCheck((yyvsp[-1].lex).loc, (yyvsp[-2].interm.intermTypedNode)->getType(), "array comparison"); + parseContext.opaqueCheck((yyvsp[-1].lex).loc, (yyvsp[-2].interm.intermTypedNode)->getType(), "!="); + parseContext.specializationCheck((yyvsp[-1].lex).loc, (yyvsp[-2].interm.intermTypedNode)->getType(), "!="); + parseContext.referenceCheck((yyvsp[-1].lex).loc, (yyvsp[-2].interm.intermTypedNode)->getType(), "!="); + (yyval.interm.intermTypedNode) = parseContext.handleBinaryMath((yyvsp[-1].lex).loc, "!=", EOpNotEqual, (yyvsp[-2].interm.intermTypedNode), (yyvsp[0].interm.intermTypedNode)); + if ((yyval.interm.intermTypedNode) == 0) + (yyval.interm.intermTypedNode) = parseContext.intermediate.addConstantUnion(false, (yyvsp[-1].lex).loc); + } +#line 4804 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 63: +#line 697 "glslang.y" /* yacc.c:1646 */ + { (yyval.interm.intermTypedNode) = (yyvsp[0].interm.intermTypedNode); } +#line 4810 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 64: +#line 698 "glslang.y" /* yacc.c:1646 */ + { + parseContext.fullIntegerCheck((yyvsp[-1].lex).loc, "bitwise and"); + (yyval.interm.intermTypedNode) = parseContext.handleBinaryMath((yyvsp[-1].lex).loc, "&", EOpAnd, (yyvsp[-2].interm.intermTypedNode), (yyvsp[0].interm.intermTypedNode)); + if ((yyval.interm.intermTypedNode) == 0) + (yyval.interm.intermTypedNode) = (yyvsp[-2].interm.intermTypedNode); + } +#line 4821 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 65: +#line 707 "glslang.y" /* yacc.c:1646 */ + { (yyval.interm.intermTypedNode) = (yyvsp[0].interm.intermTypedNode); } +#line 4827 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 66: +#line 708 "glslang.y" /* yacc.c:1646 */ + { + parseContext.fullIntegerCheck((yyvsp[-1].lex).loc, "bitwise exclusive or"); + (yyval.interm.intermTypedNode) = parseContext.handleBinaryMath((yyvsp[-1].lex).loc, "^", EOpExclusiveOr, (yyvsp[-2].interm.intermTypedNode), (yyvsp[0].interm.intermTypedNode)); + if ((yyval.interm.intermTypedNode) == 0) + (yyval.interm.intermTypedNode) = (yyvsp[-2].interm.intermTypedNode); + } +#line 4838 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 67: +#line 717 "glslang.y" /* yacc.c:1646 */ + { (yyval.interm.intermTypedNode) = (yyvsp[0].interm.intermTypedNode); } +#line 4844 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 68: +#line 718 "glslang.y" /* yacc.c:1646 */ + { + parseContext.fullIntegerCheck((yyvsp[-1].lex).loc, "bitwise inclusive or"); + (yyval.interm.intermTypedNode) = parseContext.handleBinaryMath((yyvsp[-1].lex).loc, "|", EOpInclusiveOr, (yyvsp[-2].interm.intermTypedNode), (yyvsp[0].interm.intermTypedNode)); + if ((yyval.interm.intermTypedNode) == 0) + (yyval.interm.intermTypedNode) = (yyvsp[-2].interm.intermTypedNode); + } +#line 4855 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 69: +#line 727 "glslang.y" /* yacc.c:1646 */ + { (yyval.interm.intermTypedNode) = (yyvsp[0].interm.intermTypedNode); } +#line 4861 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 70: +#line 728 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.intermTypedNode) = parseContext.handleBinaryMath((yyvsp[-1].lex).loc, "&&", EOpLogicalAnd, (yyvsp[-2].interm.intermTypedNode), (yyvsp[0].interm.intermTypedNode)); + if ((yyval.interm.intermTypedNode) == 0) + (yyval.interm.intermTypedNode) = parseContext.intermediate.addConstantUnion(false, (yyvsp[-1].lex).loc); + } +#line 4871 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 71: +#line 736 "glslang.y" /* yacc.c:1646 */ + { (yyval.interm.intermTypedNode) = (yyvsp[0].interm.intermTypedNode); } +#line 4877 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 72: +#line 737 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.intermTypedNode) = parseContext.handleBinaryMath((yyvsp[-1].lex).loc, "^^", EOpLogicalXor, (yyvsp[-2].interm.intermTypedNode), (yyvsp[0].interm.intermTypedNode)); + if ((yyval.interm.intermTypedNode) == 0) + (yyval.interm.intermTypedNode) = parseContext.intermediate.addConstantUnion(false, (yyvsp[-1].lex).loc); + } +#line 4887 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 73: +#line 745 "glslang.y" /* yacc.c:1646 */ + { (yyval.interm.intermTypedNode) = (yyvsp[0].interm.intermTypedNode); } +#line 4893 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 74: +#line 746 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.intermTypedNode) = parseContext.handleBinaryMath((yyvsp[-1].lex).loc, "||", EOpLogicalOr, (yyvsp[-2].interm.intermTypedNode), (yyvsp[0].interm.intermTypedNode)); + if ((yyval.interm.intermTypedNode) == 0) + (yyval.interm.intermTypedNode) = parseContext.intermediate.addConstantUnion(false, (yyvsp[-1].lex).loc); + } +#line 4903 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 75: +#line 754 "glslang.y" /* yacc.c:1646 */ + { (yyval.interm.intermTypedNode) = (yyvsp[0].interm.intermTypedNode); } +#line 4909 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 76: +#line 755 "glslang.y" /* yacc.c:1646 */ + { + ++parseContext.controlFlowNestingLevel; + } +#line 4917 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 77: +#line 758 "glslang.y" /* yacc.c:1646 */ + { + --parseContext.controlFlowNestingLevel; + parseContext.boolCheck((yyvsp[-4].lex).loc, (yyvsp[-5].interm.intermTypedNode)); + parseContext.rValueErrorCheck((yyvsp[-4].lex).loc, "?", (yyvsp[-5].interm.intermTypedNode)); + parseContext.rValueErrorCheck((yyvsp[-1].lex).loc, ":", (yyvsp[-2].interm.intermTypedNode)); + parseContext.rValueErrorCheck((yyvsp[-1].lex).loc, ":", (yyvsp[0].interm.intermTypedNode)); + (yyval.interm.intermTypedNode) = parseContext.intermediate.addSelection((yyvsp[-5].interm.intermTypedNode), (yyvsp[-2].interm.intermTypedNode), (yyvsp[0].interm.intermTypedNode), (yyvsp[-4].lex).loc); + if ((yyval.interm.intermTypedNode) == 0) { + parseContext.binaryOpError((yyvsp[-4].lex).loc, ":", (yyvsp[-2].interm.intermTypedNode)->getCompleteString(), (yyvsp[0].interm.intermTypedNode)->getCompleteString()); + (yyval.interm.intermTypedNode) = (yyvsp[0].interm.intermTypedNode); + } + } +#line 4934 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 78: +#line 773 "glslang.y" /* yacc.c:1646 */ + { (yyval.interm.intermTypedNode) = (yyvsp[0].interm.intermTypedNode); } +#line 4940 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 79: +#line 774 "glslang.y" /* yacc.c:1646 */ + { + parseContext.arrayObjectCheck((yyvsp[-1].interm).loc, (yyvsp[-2].interm.intermTypedNode)->getType(), "array assignment"); + parseContext.opaqueCheck((yyvsp[-1].interm).loc, (yyvsp[-2].interm.intermTypedNode)->getType(), "="); + parseContext.storage16BitAssignmentCheck((yyvsp[-1].interm).loc, (yyvsp[-2].interm.intermTypedNode)->getType(), "="); + parseContext.specializationCheck((yyvsp[-1].interm).loc, (yyvsp[-2].interm.intermTypedNode)->getType(), "="); + parseContext.lValueErrorCheck((yyvsp[-1].interm).loc, "assign", (yyvsp[-2].interm.intermTypedNode)); + parseContext.rValueErrorCheck((yyvsp[-1].interm).loc, "assign", (yyvsp[0].interm.intermTypedNode)); + (yyval.interm.intermTypedNode) = parseContext.intermediate.addAssign((yyvsp[-1].interm).op, (yyvsp[-2].interm.intermTypedNode), (yyvsp[0].interm.intermTypedNode), (yyvsp[-1].interm).loc); + if ((yyval.interm.intermTypedNode) == 0) { + parseContext.assignError((yyvsp[-1].interm).loc, "assign", (yyvsp[-2].interm.intermTypedNode)->getCompleteString(), (yyvsp[0].interm.intermTypedNode)->getCompleteString()); + (yyval.interm.intermTypedNode) = (yyvsp[-2].interm.intermTypedNode); + } + } +#line 4958 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 80: +#line 790 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm).loc = (yyvsp[0].lex).loc; + (yyval.interm).op = EOpAssign; + } +#line 4967 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 81: +#line 794 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm).loc = (yyvsp[0].lex).loc; + (yyval.interm).op = EOpMulAssign; + } +#line 4976 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 82: +#line 798 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm).loc = (yyvsp[0].lex).loc; + (yyval.interm).op = EOpDivAssign; + } +#line 4985 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 83: +#line 802 "glslang.y" /* yacc.c:1646 */ + { + parseContext.fullIntegerCheck((yyvsp[0].lex).loc, "%="); + (yyval.interm).loc = (yyvsp[0].lex).loc; + (yyval.interm).op = EOpModAssign; + } +#line 4995 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 84: +#line 807 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm).loc = (yyvsp[0].lex).loc; + (yyval.interm).op = EOpAddAssign; + } +#line 5004 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 85: +#line 811 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm).loc = (yyvsp[0].lex).loc; + (yyval.interm).op = EOpSubAssign; + } +#line 5013 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 86: +#line 815 "glslang.y" /* yacc.c:1646 */ + { + parseContext.fullIntegerCheck((yyvsp[0].lex).loc, "bit-shift left assign"); + (yyval.interm).loc = (yyvsp[0].lex).loc; (yyval.interm).op = EOpLeftShiftAssign; + } +#line 5022 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 87: +#line 819 "glslang.y" /* yacc.c:1646 */ + { + parseContext.fullIntegerCheck((yyvsp[0].lex).loc, "bit-shift right assign"); + (yyval.interm).loc = (yyvsp[0].lex).loc; (yyval.interm).op = EOpRightShiftAssign; + } +#line 5031 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 88: +#line 823 "glslang.y" /* yacc.c:1646 */ + { + parseContext.fullIntegerCheck((yyvsp[0].lex).loc, "bitwise-and assign"); + (yyval.interm).loc = (yyvsp[0].lex).loc; (yyval.interm).op = EOpAndAssign; + } +#line 5040 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 89: +#line 827 "glslang.y" /* yacc.c:1646 */ + { + parseContext.fullIntegerCheck((yyvsp[0].lex).loc, "bitwise-xor assign"); + (yyval.interm).loc = (yyvsp[0].lex).loc; (yyval.interm).op = EOpExclusiveOrAssign; + } +#line 5049 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 90: +#line 831 "glslang.y" /* yacc.c:1646 */ + { + parseContext.fullIntegerCheck((yyvsp[0].lex).loc, "bitwise-or assign"); + (yyval.interm).loc = (yyvsp[0].lex).loc; (yyval.interm).op = EOpInclusiveOrAssign; + } +#line 5058 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 91: +#line 838 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.intermTypedNode) = (yyvsp[0].interm.intermTypedNode); + } +#line 5066 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 92: +#line 841 "glslang.y" /* yacc.c:1646 */ + { + parseContext.samplerConstructorLocationCheck((yyvsp[-1].lex).loc, ",", (yyvsp[0].interm.intermTypedNode)); + (yyval.interm.intermTypedNode) = parseContext.intermediate.addComma((yyvsp[-2].interm.intermTypedNode), (yyvsp[0].interm.intermTypedNode), (yyvsp[-1].lex).loc); + if ((yyval.interm.intermTypedNode) == 0) { + parseContext.binaryOpError((yyvsp[-1].lex).loc, ",", (yyvsp[-2].interm.intermTypedNode)->getCompleteString(), (yyvsp[0].interm.intermTypedNode)->getCompleteString()); + (yyval.interm.intermTypedNode) = (yyvsp[0].interm.intermTypedNode); + } + } +#line 5079 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 93: +#line 852 "glslang.y" /* yacc.c:1646 */ + { + parseContext.constantValueCheck((yyvsp[0].interm.intermTypedNode), ""); + (yyval.interm.intermTypedNode) = (yyvsp[0].interm.intermTypedNode); + } +#line 5088 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 94: +#line 859 "glslang.y" /* yacc.c:1646 */ + { + parseContext.handleFunctionDeclarator((yyvsp[-1].interm).loc, *(yyvsp[-1].interm).function, true /* prototype */); + (yyval.interm.intermNode) = 0; + // TODO: 4.0 functionality: subroutines: make the identifier a user type for this signature + } +#line 5098 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 95: +#line 864 "glslang.y" /* yacc.c:1646 */ + { + if ((yyvsp[-1].interm).intermNode && (yyvsp[-1].interm).intermNode->getAsAggregate()) + (yyvsp[-1].interm).intermNode->getAsAggregate()->setOperator(EOpSequence); + (yyval.interm.intermNode) = (yyvsp[-1].interm).intermNode; + } +#line 5108 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 96: +#line 869 "glslang.y" /* yacc.c:1646 */ + { + parseContext.profileRequires((yyvsp[-3].lex).loc, ENoProfile, 130, 0, "precision statement"); + // lazy setting of the previous scope's defaults, has effect only the first time it is called in a particular scope + parseContext.symbolTable.setPreviousDefaultPrecisions(&parseContext.defaultPrecision[0]); + parseContext.setDefaultPrecision((yyvsp[-3].lex).loc, (yyvsp[-1].interm.type), (yyvsp[-2].interm.type).qualifier.precision); + (yyval.interm.intermNode) = 0; + } +#line 5120 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 97: +#line 876 "glslang.y" /* yacc.c:1646 */ + { + parseContext.declareBlock((yyvsp[-1].interm).loc, *(yyvsp[-1].interm).typeList); + (yyval.interm.intermNode) = 0; + } +#line 5129 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 98: +#line 880 "glslang.y" /* yacc.c:1646 */ + { + parseContext.declareBlock((yyvsp[-2].interm).loc, *(yyvsp[-2].interm).typeList, (yyvsp[-1].lex).string); + (yyval.interm.intermNode) = 0; + } +#line 5138 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 99: +#line 884 "glslang.y" /* yacc.c:1646 */ + { + parseContext.declareBlock((yyvsp[-3].interm).loc, *(yyvsp[-3].interm).typeList, (yyvsp[-2].lex).string, (yyvsp[-1].interm).arraySizes); + (yyval.interm.intermNode) = 0; + } +#line 5147 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 100: +#line 888 "glslang.y" /* yacc.c:1646 */ + { + parseContext.globalQualifierFixCheck((yyvsp[-1].interm.type).loc, (yyvsp[-1].interm.type).qualifier); + parseContext.updateStandaloneQualifierDefaults((yyvsp[-1].interm.type).loc, (yyvsp[-1].interm.type)); + (yyval.interm.intermNode) = 0; + } +#line 5157 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 101: +#line 893 "glslang.y" /* yacc.c:1646 */ + { + parseContext.checkNoShaderLayouts((yyvsp[-2].interm.type).loc, (yyvsp[-2].interm.type).shaderQualifiers); + parseContext.addQualifierToExisting((yyvsp[-2].interm.type).loc, (yyvsp[-2].interm.type).qualifier, *(yyvsp[-1].lex).string); + (yyval.interm.intermNode) = 0; + } +#line 5167 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 102: +#line 898 "glslang.y" /* yacc.c:1646 */ + { + parseContext.checkNoShaderLayouts((yyvsp[-3].interm.type).loc, (yyvsp[-3].interm.type).shaderQualifiers); + (yyvsp[-1].interm.identifierList)->push_back((yyvsp[-2].lex).string); + parseContext.addQualifierToExisting((yyvsp[-3].interm.type).loc, (yyvsp[-3].interm.type).qualifier, *(yyvsp[-1].interm.identifierList)); + (yyval.interm.intermNode) = 0; + } +#line 5178 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 103: +#line 907 "glslang.y" /* yacc.c:1646 */ + { parseContext.nestedBlockCheck((yyvsp[-2].interm.type).loc); } +#line 5184 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 104: +#line 907 "glslang.y" /* yacc.c:1646 */ + { + --parseContext.structNestingLevel; + parseContext.blockName = (yyvsp[-4].lex).string; + parseContext.globalQualifierFixCheck((yyvsp[-5].interm.type).loc, (yyvsp[-5].interm.type).qualifier); + parseContext.checkNoShaderLayouts((yyvsp[-5].interm.type).loc, (yyvsp[-5].interm.type).shaderQualifiers); + parseContext.currentBlockQualifier = (yyvsp[-5].interm.type).qualifier; + (yyval.interm).loc = (yyvsp[-5].interm.type).loc; + (yyval.interm).typeList = (yyvsp[-1].interm.typeList); + } +#line 5198 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 105: +#line 918 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.identifierList) = new TIdentifierList; + (yyval.interm.identifierList)->push_back((yyvsp[0].lex).string); + } +#line 5207 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 106: +#line 922 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.identifierList) = (yyvsp[-2].interm.identifierList); + (yyval.interm.identifierList)->push_back((yyvsp[0].lex).string); + } +#line 5216 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 107: +#line 929 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm).function = (yyvsp[-1].interm.function); + (yyval.interm).loc = (yyvsp[0].lex).loc; + } +#line 5225 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 108: +#line 936 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.function) = (yyvsp[0].interm.function); + } +#line 5233 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 109: +#line 939 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.function) = (yyvsp[0].interm.function); + } +#line 5241 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 110: +#line 946 "glslang.y" /* yacc.c:1646 */ + { + // Add the parameter + (yyval.interm.function) = (yyvsp[-1].interm.function); + if ((yyvsp[0].interm).param.type->getBasicType() != EbtVoid) + (yyvsp[-1].interm.function)->addParameter((yyvsp[0].interm).param); + else + delete (yyvsp[0].interm).param.type; + } +#line 5254 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 111: +#line 954 "glslang.y" /* yacc.c:1646 */ + { + // + // Only first parameter of one-parameter functions can be void + // The check for named parameters not being void is done in parameter_declarator + // + if ((yyvsp[0].interm).param.type->getBasicType() == EbtVoid) { + // + // This parameter > first is void + // + parseContext.error((yyvsp[-1].lex).loc, "cannot be an argument type except for '(void)'", "void", ""); + delete (yyvsp[0].interm).param.type; + } else { + // Add the parameter + (yyval.interm.function) = (yyvsp[-2].interm.function); + (yyvsp[-2].interm.function)->addParameter((yyvsp[0].interm).param); + } + } +#line 5276 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 112: +#line 974 "glslang.y" /* yacc.c:1646 */ + { + if ((yyvsp[-2].interm.type).qualifier.storage != EvqGlobal && (yyvsp[-2].interm.type).qualifier.storage != EvqTemporary) { + parseContext.error((yyvsp[-1].lex).loc, "no qualifiers allowed for function return", + GetStorageQualifierString((yyvsp[-2].interm.type).qualifier.storage), ""); + } + if ((yyvsp[-2].interm.type).arraySizes) + parseContext.arraySizeRequiredCheck((yyvsp[-2].interm.type).loc, *(yyvsp[-2].interm.type).arraySizes); + + // Add the function as a prototype after parsing it (we do not support recursion) + TFunction *function; + TType type((yyvsp[-2].interm.type)); + + // Potentially rename shader entry point function. No-op most of the time. + parseContext.renameShaderFunction((yyvsp[-1].lex).string); + + // Make the function + function = new TFunction((yyvsp[-1].lex).string, type); + (yyval.interm.function) = function; + } +#line 5300 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 113: +#line 997 "glslang.y" /* yacc.c:1646 */ + { + if ((yyvsp[-1].interm.type).arraySizes) { + parseContext.profileRequires((yyvsp[-1].interm.type).loc, ENoProfile, 120, E_GL_3DL_array_objects, "arrayed type"); + parseContext.profileRequires((yyvsp[-1].interm.type).loc, EEsProfile, 300, 0, "arrayed type"); + parseContext.arraySizeRequiredCheck((yyvsp[-1].interm.type).loc, *(yyvsp[-1].interm.type).arraySizes); + } + if ((yyvsp[-1].interm.type).basicType == EbtVoid) { + parseContext.error((yyvsp[0].lex).loc, "illegal use of type 'void'", (yyvsp[0].lex).string->c_str(), ""); + } + parseContext.reservedErrorCheck((yyvsp[0].lex).loc, *(yyvsp[0].lex).string); + + TParameter param = {(yyvsp[0].lex).string, new TType((yyvsp[-1].interm.type))}; + (yyval.interm).loc = (yyvsp[0].lex).loc; + (yyval.interm).param = param; + } +#line 5320 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 114: +#line 1012 "glslang.y" /* yacc.c:1646 */ + { + if ((yyvsp[-2].interm.type).arraySizes) { + parseContext.profileRequires((yyvsp[-2].interm.type).loc, ENoProfile, 120, E_GL_3DL_array_objects, "arrayed type"); + parseContext.profileRequires((yyvsp[-2].interm.type).loc, EEsProfile, 300, 0, "arrayed type"); + parseContext.arraySizeRequiredCheck((yyvsp[-2].interm.type).loc, *(yyvsp[-2].interm.type).arraySizes); + } + TType* type = new TType((yyvsp[-2].interm.type)); + type->transferArraySizes((yyvsp[0].interm).arraySizes); + type->copyArrayInnerSizes((yyvsp[-2].interm.type).arraySizes); + + parseContext.arrayOfArrayVersionCheck((yyvsp[-1].lex).loc, type->getArraySizes()); + parseContext.arraySizeRequiredCheck((yyvsp[0].interm).loc, *(yyvsp[0].interm).arraySizes); + parseContext.reservedErrorCheck((yyvsp[-1].lex).loc, *(yyvsp[-1].lex).string); + + TParameter param = { (yyvsp[-1].lex).string, type }; + + (yyval.interm).loc = (yyvsp[-1].lex).loc; + (yyval.interm).param = param; + } +#line 5344 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 115: +#line 1037 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm) = (yyvsp[0].interm); + if ((yyvsp[-1].interm.type).qualifier.precision != EpqNone) + (yyval.interm).param.type->getQualifier().precision = (yyvsp[-1].interm.type).qualifier.precision; + parseContext.precisionQualifierCheck((yyval.interm).loc, (yyval.interm).param.type->getBasicType(), (yyval.interm).param.type->getQualifier()); + + parseContext.checkNoShaderLayouts((yyvsp[-1].interm.type).loc, (yyvsp[-1].interm.type).shaderQualifiers); + parseContext.parameterTypeCheck((yyvsp[0].interm).loc, (yyvsp[-1].interm.type).qualifier.storage, *(yyval.interm).param.type); + parseContext.paramCheckFix((yyvsp[-1].interm.type).loc, (yyvsp[-1].interm.type).qualifier, *(yyval.interm).param.type); + + } +#line 5360 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 116: +#line 1048 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm) = (yyvsp[0].interm); + + parseContext.parameterTypeCheck((yyvsp[0].interm).loc, EvqIn, *(yyvsp[0].interm).param.type); + parseContext.paramCheckFixStorage((yyvsp[0].interm).loc, EvqTemporary, *(yyval.interm).param.type); + parseContext.precisionQualifierCheck((yyval.interm).loc, (yyval.interm).param.type->getBasicType(), (yyval.interm).param.type->getQualifier()); + } +#line 5372 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 117: +#line 1058 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm) = (yyvsp[0].interm); + if ((yyvsp[-1].interm.type).qualifier.precision != EpqNone) + (yyval.interm).param.type->getQualifier().precision = (yyvsp[-1].interm.type).qualifier.precision; + parseContext.precisionQualifierCheck((yyvsp[-1].interm.type).loc, (yyval.interm).param.type->getBasicType(), (yyval.interm).param.type->getQualifier()); + + parseContext.checkNoShaderLayouts((yyvsp[-1].interm.type).loc, (yyvsp[-1].interm.type).shaderQualifiers); + parseContext.parameterTypeCheck((yyvsp[0].interm).loc, (yyvsp[-1].interm.type).qualifier.storage, *(yyval.interm).param.type); + parseContext.paramCheckFix((yyvsp[-1].interm.type).loc, (yyvsp[-1].interm.type).qualifier, *(yyval.interm).param.type); + } +#line 5387 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 118: +#line 1068 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm) = (yyvsp[0].interm); + + parseContext.parameterTypeCheck((yyvsp[0].interm).loc, EvqIn, *(yyvsp[0].interm).param.type); + parseContext.paramCheckFixStorage((yyvsp[0].interm).loc, EvqTemporary, *(yyval.interm).param.type); + parseContext.precisionQualifierCheck((yyval.interm).loc, (yyval.interm).param.type->getBasicType(), (yyval.interm).param.type->getQualifier()); + } +#line 5399 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 119: +#line 1078 "glslang.y" /* yacc.c:1646 */ + { + TParameter param = { 0, new TType((yyvsp[0].interm.type)) }; + (yyval.interm).param = param; + if ((yyvsp[0].interm.type).arraySizes) + parseContext.arraySizeRequiredCheck((yyvsp[0].interm.type).loc, *(yyvsp[0].interm.type).arraySizes); + } +#line 5410 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 120: +#line 1087 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm) = (yyvsp[0].interm); + } +#line 5418 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 121: +#line 1090 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm) = (yyvsp[-2].interm); + parseContext.declareVariable((yyvsp[0].lex).loc, *(yyvsp[0].lex).string, (yyvsp[-2].interm).type); + } +#line 5427 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 122: +#line 1094 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm) = (yyvsp[-3].interm); + parseContext.declareVariable((yyvsp[-1].lex).loc, *(yyvsp[-1].lex).string, (yyvsp[-3].interm).type, (yyvsp[0].interm).arraySizes); + } +#line 5436 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 123: +#line 1098 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm).type = (yyvsp[-5].interm).type; + TIntermNode* initNode = parseContext.declareVariable((yyvsp[-3].lex).loc, *(yyvsp[-3].lex).string, (yyvsp[-5].interm).type, (yyvsp[-2].interm).arraySizes, (yyvsp[0].interm.intermTypedNode)); + (yyval.interm).intermNode = parseContext.intermediate.growAggregate((yyvsp[-5].interm).intermNode, initNode, (yyvsp[-1].lex).loc); + } +#line 5446 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 124: +#line 1103 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm).type = (yyvsp[-4].interm).type; + TIntermNode* initNode = parseContext.declareVariable((yyvsp[-2].lex).loc, *(yyvsp[-2].lex).string, (yyvsp[-4].interm).type, 0, (yyvsp[0].interm.intermTypedNode)); + (yyval.interm).intermNode = parseContext.intermediate.growAggregate((yyvsp[-4].interm).intermNode, initNode, (yyvsp[-1].lex).loc); + } +#line 5456 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 125: +#line 1111 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm).type = (yyvsp[0].interm.type); + (yyval.interm).intermNode = 0; + + parseContext.declareTypeDefaults((yyval.interm).loc, (yyval.interm).type); + + } +#line 5468 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 126: +#line 1118 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm).type = (yyvsp[-1].interm.type); + (yyval.interm).intermNode = 0; + parseContext.declareVariable((yyvsp[0].lex).loc, *(yyvsp[0].lex).string, (yyvsp[-1].interm.type)); + } +#line 5478 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 127: +#line 1123 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm).type = (yyvsp[-2].interm.type); + (yyval.interm).intermNode = 0; + parseContext.declareVariable((yyvsp[-1].lex).loc, *(yyvsp[-1].lex).string, (yyvsp[-2].interm.type), (yyvsp[0].interm).arraySizes); + } +#line 5488 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 128: +#line 1128 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm).type = (yyvsp[-4].interm.type); + TIntermNode* initNode = parseContext.declareVariable((yyvsp[-3].lex).loc, *(yyvsp[-3].lex).string, (yyvsp[-4].interm.type), (yyvsp[-2].interm).arraySizes, (yyvsp[0].interm.intermTypedNode)); + (yyval.interm).intermNode = parseContext.intermediate.growAggregate(0, initNode, (yyvsp[-1].lex).loc); + } +#line 5498 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 129: +#line 1133 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm).type = (yyvsp[-3].interm.type); + TIntermNode* initNode = parseContext.declareVariable((yyvsp[-2].lex).loc, *(yyvsp[-2].lex).string, (yyvsp[-3].interm.type), 0, (yyvsp[0].interm.intermTypedNode)); + (yyval.interm).intermNode = parseContext.intermediate.growAggregate(0, initNode, (yyvsp[-1].lex).loc); + } +#line 5508 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 130: +#line 1142 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type) = (yyvsp[0].interm.type); + + parseContext.globalQualifierTypeCheck((yyvsp[0].interm.type).loc, (yyvsp[0].interm.type).qualifier, (yyval.interm.type)); + if ((yyvsp[0].interm.type).arraySizes) { + parseContext.profileRequires((yyvsp[0].interm.type).loc, ENoProfile, 120, E_GL_3DL_array_objects, "arrayed type"); + parseContext.profileRequires((yyvsp[0].interm.type).loc, EEsProfile, 300, 0, "arrayed type"); + } + parseContext.precisionQualifierCheck((yyval.interm.type).loc, (yyval.interm.type).basicType, (yyval.interm.type).qualifier); + } +#line 5523 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 131: +#line 1152 "glslang.y" /* yacc.c:1646 */ + { + parseContext.globalQualifierFixCheck((yyvsp[-1].interm.type).loc, (yyvsp[-1].interm.type).qualifier); + parseContext.globalQualifierTypeCheck((yyvsp[-1].interm.type).loc, (yyvsp[-1].interm.type).qualifier, (yyvsp[0].interm.type)); + + if ((yyvsp[0].interm.type).arraySizes) { + parseContext.profileRequires((yyvsp[0].interm.type).loc, ENoProfile, 120, E_GL_3DL_array_objects, "arrayed type"); + parseContext.profileRequires((yyvsp[0].interm.type).loc, EEsProfile, 300, 0, "arrayed type"); + } + + if ((yyvsp[0].interm.type).arraySizes && parseContext.arrayQualifierError((yyvsp[0].interm.type).loc, (yyvsp[-1].interm.type).qualifier)) + (yyvsp[0].interm.type).arraySizes = nullptr; + + parseContext.checkNoShaderLayouts((yyvsp[0].interm.type).loc, (yyvsp[-1].interm.type).shaderQualifiers); + (yyvsp[0].interm.type).shaderQualifiers.merge((yyvsp[-1].interm.type).shaderQualifiers); + parseContext.mergeQualifiers((yyvsp[0].interm.type).loc, (yyvsp[0].interm.type).qualifier, (yyvsp[-1].interm.type).qualifier, true); + parseContext.precisionQualifierCheck((yyvsp[0].interm.type).loc, (yyvsp[0].interm.type).basicType, (yyvsp[0].interm.type).qualifier); + + (yyval.interm.type) = (yyvsp[0].interm.type); + + if (! (yyval.interm.type).qualifier.isInterpolation() && + ((parseContext.language == EShLangVertex && (yyval.interm.type).qualifier.storage == EvqVaryingOut) || + (parseContext.language == EShLangFragment && (yyval.interm.type).qualifier.storage == EvqVaryingIn))) + (yyval.interm.type).qualifier.smooth = true; + } +#line 5552 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 132: +#line 1179 "glslang.y" /* yacc.c:1646 */ + { + parseContext.globalCheck((yyvsp[0].lex).loc, "invariant"); + parseContext.profileRequires((yyval.interm.type).loc, ENoProfile, 120, 0, "invariant"); + (yyval.interm.type).init((yyvsp[0].lex).loc); + (yyval.interm.type).qualifier.invariant = true; + } +#line 5563 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 133: +#line 1188 "glslang.y" /* yacc.c:1646 */ + { + parseContext.globalCheck((yyvsp[0].lex).loc, "smooth"); + parseContext.profileRequires((yyvsp[0].lex).loc, ENoProfile, 130, 0, "smooth"); + parseContext.profileRequires((yyvsp[0].lex).loc, EEsProfile, 300, 0, "smooth"); + (yyval.interm.type).init((yyvsp[0].lex).loc); + (yyval.interm.type).qualifier.smooth = true; + } +#line 5575 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 134: +#line 1195 "glslang.y" /* yacc.c:1646 */ + { + parseContext.globalCheck((yyvsp[0].lex).loc, "flat"); + parseContext.profileRequires((yyvsp[0].lex).loc, ENoProfile, 130, 0, "flat"); + parseContext.profileRequires((yyvsp[0].lex).loc, EEsProfile, 300, 0, "flat"); + (yyval.interm.type).init((yyvsp[0].lex).loc); + (yyval.interm.type).qualifier.flat = true; + } +#line 5587 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 135: +#line 1203 "glslang.y" /* yacc.c:1646 */ + { + parseContext.globalCheck((yyvsp[0].lex).loc, "noperspective"); + parseContext.profileRequires((yyvsp[0].lex).loc, EEsProfile, 0, E_GL_NV_shader_noperspective_interpolation, "noperspective"); + parseContext.profileRequires((yyvsp[0].lex).loc, ENoProfile, 130, 0, "noperspective"); + (yyval.interm.type).init((yyvsp[0].lex).loc); + (yyval.interm.type).qualifier.nopersp = true; + } +#line 5599 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 136: +#line 1210 "glslang.y" /* yacc.c:1646 */ + { + parseContext.globalCheck((yyvsp[0].lex).loc, "__explicitInterpAMD"); + parseContext.profileRequires((yyvsp[0].lex).loc, ECoreProfile, 450, E_GL_AMD_shader_explicit_vertex_parameter, "explicit interpolation"); + parseContext.profileRequires((yyvsp[0].lex).loc, ECompatibilityProfile, 450, E_GL_AMD_shader_explicit_vertex_parameter, "explicit interpolation"); + (yyval.interm.type).init((yyvsp[0].lex).loc); + (yyval.interm.type).qualifier.explicitInterp = true; + } +#line 5611 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 137: +#line 1217 "glslang.y" /* yacc.c:1646 */ + { + parseContext.globalCheck((yyvsp[0].lex).loc, "pervertexNV"); + parseContext.profileRequires((yyvsp[0].lex).loc, ECoreProfile, 0, E_GL_NV_fragment_shader_barycentric, "fragment shader barycentric"); + parseContext.profileRequires((yyvsp[0].lex).loc, ECompatibilityProfile, 0, E_GL_NV_fragment_shader_barycentric, "fragment shader barycentric"); + parseContext.profileRequires((yyvsp[0].lex).loc, EEsProfile, 0, E_GL_NV_fragment_shader_barycentric, "fragment shader barycentric"); + (yyval.interm.type).init((yyvsp[0].lex).loc); + (yyval.interm.type).qualifier.pervertexNV = true; + } +#line 5624 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 138: +#line 1225 "glslang.y" /* yacc.c:1646 */ + { + // No need for profile version or extension check. Shader stage already checks both. + parseContext.globalCheck((yyvsp[0].lex).loc, "perprimitiveNV"); + parseContext.requireStage((yyvsp[0].lex).loc, (EShLanguageMask)(EShLangFragmentMask | EShLangMeshNVMask), "perprimitiveNV"); + // Fragment shader stage doesn't check for extension. So we explicitly add below extension check. + if (parseContext.language == EShLangFragment) + parseContext.requireExtensions((yyvsp[0].lex).loc, 1, &E_GL_NV_mesh_shader, "perprimitiveNV"); + (yyval.interm.type).init((yyvsp[0].lex).loc); + (yyval.interm.type).qualifier.perPrimitiveNV = true; + } +#line 5639 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 139: +#line 1235 "glslang.y" /* yacc.c:1646 */ + { + // No need for profile version or extension check. Shader stage already checks both. + parseContext.globalCheck((yyvsp[0].lex).loc, "perviewNV"); + parseContext.requireStage((yyvsp[0].lex).loc, EShLangMeshNV, "perviewNV"); + (yyval.interm.type).init((yyvsp[0].lex).loc); + (yyval.interm.type).qualifier.perViewNV = true; + } +#line 5651 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 140: +#line 1242 "glslang.y" /* yacc.c:1646 */ + { + // No need for profile version or extension check. Shader stage already checks both. + parseContext.globalCheck((yyvsp[0].lex).loc, "taskNV"); + parseContext.requireStage((yyvsp[0].lex).loc, (EShLanguageMask)(EShLangTaskNVMask | EShLangMeshNVMask), "taskNV"); + (yyval.interm.type).init((yyvsp[0].lex).loc); + (yyval.interm.type).qualifier.perTaskNV = true; + } +#line 5663 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 141: +#line 1253 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type) = (yyvsp[-1].interm.type); + } +#line 5671 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 142: +#line 1259 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type) = (yyvsp[0].interm.type); + } +#line 5679 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 143: +#line 1262 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type) = (yyvsp[-2].interm.type); + (yyval.interm.type).shaderQualifiers.merge((yyvsp[0].interm.type).shaderQualifiers); + parseContext.mergeObjectLayoutQualifiers((yyval.interm.type).qualifier, (yyvsp[0].interm.type).qualifier, false); + } +#line 5689 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 144: +#line 1269 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc); + parseContext.setLayoutQualifier((yyvsp[0].lex).loc, (yyval.interm.type), *(yyvsp[0].lex).string); + } +#line 5698 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 145: +#line 1273 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[-2].lex).loc); + parseContext.setLayoutQualifier((yyvsp[-2].lex).loc, (yyval.interm.type), *(yyvsp[-2].lex).string, (yyvsp[0].interm.intermTypedNode)); + } +#line 5707 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 146: +#line 1277 "glslang.y" /* yacc.c:1646 */ + { // because "shared" is both an identifier and a keyword + (yyval.interm.type).init((yyvsp[0].lex).loc); + TString strShared("shared"); + parseContext.setLayoutQualifier((yyvsp[0].lex).loc, (yyval.interm.type), strShared); + } +#line 5717 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 147: +#line 1286 "glslang.y" /* yacc.c:1646 */ + { + parseContext.profileRequires((yyval.interm.type).loc, ECoreProfile | ECompatibilityProfile, 400, E_GL_ARB_gpu_shader5, "precise"); + parseContext.profileRequires((yyvsp[0].lex).loc, EEsProfile, 320, Num_AEP_gpu_shader5, AEP_gpu_shader5, "precise"); + (yyval.interm.type).init((yyvsp[0].lex).loc); + (yyval.interm.type).qualifier.noContraction = true; + } +#line 5728 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 148: +#line 1296 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type) = (yyvsp[0].interm.type); + } +#line 5736 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 149: +#line 1299 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type) = (yyvsp[-1].interm.type); + if ((yyval.interm.type).basicType == EbtVoid) + (yyval.interm.type).basicType = (yyvsp[0].interm.type).basicType; + + (yyval.interm.type).shaderQualifiers.merge((yyvsp[0].interm.type).shaderQualifiers); + parseContext.mergeQualifiers((yyval.interm.type).loc, (yyval.interm.type).qualifier, (yyvsp[0].interm.type).qualifier, false); + } +#line 5749 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 150: +#line 1310 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type) = (yyvsp[0].interm.type); + } +#line 5757 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 151: +#line 1313 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type) = (yyvsp[0].interm.type); + } +#line 5765 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 152: +#line 1316 "glslang.y" /* yacc.c:1646 */ + { + parseContext.checkPrecisionQualifier((yyvsp[0].interm.type).loc, (yyvsp[0].interm.type).qualifier.precision); + (yyval.interm.type) = (yyvsp[0].interm.type); + } +#line 5774 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 153: +#line 1320 "glslang.y" /* yacc.c:1646 */ + { + // allow inheritance of storage qualifier from block declaration + (yyval.interm.type) = (yyvsp[0].interm.type); + } +#line 5783 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 154: +#line 1324 "glslang.y" /* yacc.c:1646 */ + { + // allow inheritance of storage qualifier from block declaration + (yyval.interm.type) = (yyvsp[0].interm.type); + } +#line 5792 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 155: +#line 1329 "glslang.y" /* yacc.c:1646 */ + { + // allow inheritance of storage qualifier from block declaration + (yyval.interm.type) = (yyvsp[0].interm.type); + } +#line 5801 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 156: +#line 1333 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type) = (yyvsp[0].interm.type); + } +#line 5809 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 157: +#line 1340 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc); + (yyval.interm.type).qualifier.storage = EvqConst; // will later turn into EvqConstReadOnly, if the initializer is not constant + } +#line 5818 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 158: +#line 1344 "glslang.y" /* yacc.c:1646 */ + { + parseContext.globalCheck((yyvsp[0].lex).loc, "inout"); + (yyval.interm.type).init((yyvsp[0].lex).loc); + (yyval.interm.type).qualifier.storage = EvqInOut; + } +#line 5828 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 159: +#line 1349 "glslang.y" /* yacc.c:1646 */ + { + parseContext.globalCheck((yyvsp[0].lex).loc, "in"); + (yyval.interm.type).init((yyvsp[0].lex).loc); + // whether this is a parameter "in" or a pipeline "in" will get sorted out a bit later + (yyval.interm.type).qualifier.storage = EvqIn; + } +#line 5839 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 160: +#line 1355 "glslang.y" /* yacc.c:1646 */ + { + parseContext.globalCheck((yyvsp[0].lex).loc, "out"); + (yyval.interm.type).init((yyvsp[0].lex).loc); + // whether this is a parameter "out" or a pipeline "out" will get sorted out a bit later + (yyval.interm.type).qualifier.storage = EvqOut; + } +#line 5850 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 161: +#line 1361 "glslang.y" /* yacc.c:1646 */ + { + parseContext.profileRequires((yyvsp[0].lex).loc, ENoProfile, 120, 0, "centroid"); + parseContext.profileRequires((yyvsp[0].lex).loc, EEsProfile, 300, 0, "centroid"); + parseContext.globalCheck((yyvsp[0].lex).loc, "centroid"); + (yyval.interm.type).init((yyvsp[0].lex).loc); + (yyval.interm.type).qualifier.centroid = true; + } +#line 5862 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 162: +#line 1368 "glslang.y" /* yacc.c:1646 */ + { + parseContext.globalCheck((yyvsp[0].lex).loc, "uniform"); + (yyval.interm.type).init((yyvsp[0].lex).loc); + (yyval.interm.type).qualifier.storage = EvqUniform; + } +#line 5872 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 163: +#line 1373 "glslang.y" /* yacc.c:1646 */ + { + parseContext.globalCheck((yyvsp[0].lex).loc, "shared"); + parseContext.profileRequires((yyvsp[0].lex).loc, ECoreProfile | ECompatibilityProfile, 430, E_GL_ARB_compute_shader, "shared"); + parseContext.profileRequires((yyvsp[0].lex).loc, EEsProfile, 310, 0, "shared"); + parseContext.requireStage((yyvsp[0].lex).loc, (EShLanguageMask)(EShLangComputeMask | EShLangMeshNVMask | EShLangTaskNVMask), "shared"); + (yyval.interm.type).init((yyvsp[0].lex).loc); + (yyval.interm.type).qualifier.storage = EvqShared; + } +#line 5885 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 164: +#line 1381 "glslang.y" /* yacc.c:1646 */ + { + parseContext.globalCheck((yyvsp[0].lex).loc, "buffer"); + (yyval.interm.type).init((yyvsp[0].lex).loc); + (yyval.interm.type).qualifier.storage = EvqBuffer; + } +#line 5895 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 165: +#line 1387 "glslang.y" /* yacc.c:1646 */ + { + parseContext.requireStage((yyvsp[0].lex).loc, EShLangVertex, "attribute"); + parseContext.checkDeprecated((yyvsp[0].lex).loc, ECoreProfile, 130, "attribute"); + parseContext.checkDeprecated((yyvsp[0].lex).loc, ENoProfile, 130, "attribute"); + parseContext.requireNotRemoved((yyvsp[0].lex).loc, ECoreProfile, 420, "attribute"); + parseContext.requireNotRemoved((yyvsp[0].lex).loc, EEsProfile, 300, "attribute"); + + parseContext.globalCheck((yyvsp[0].lex).loc, "attribute"); + + (yyval.interm.type).init((yyvsp[0].lex).loc); + (yyval.interm.type).qualifier.storage = EvqVaryingIn; + } +#line 5912 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 166: +#line 1399 "glslang.y" /* yacc.c:1646 */ + { + parseContext.checkDeprecated((yyvsp[0].lex).loc, ENoProfile, 130, "varying"); + parseContext.checkDeprecated((yyvsp[0].lex).loc, ECoreProfile, 130, "varying"); + parseContext.requireNotRemoved((yyvsp[0].lex).loc, ECoreProfile, 420, "varying"); + parseContext.requireNotRemoved((yyvsp[0].lex).loc, EEsProfile, 300, "varying"); + + parseContext.globalCheck((yyvsp[0].lex).loc, "varying"); + + (yyval.interm.type).init((yyvsp[0].lex).loc); + if (parseContext.language == EShLangVertex) + (yyval.interm.type).qualifier.storage = EvqVaryingOut; + else + (yyval.interm.type).qualifier.storage = EvqVaryingIn; + } +#line 5931 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 167: +#line 1413 "glslang.y" /* yacc.c:1646 */ + { + parseContext.globalCheck((yyvsp[0].lex).loc, "patch"); + parseContext.requireStage((yyvsp[0].lex).loc, (EShLanguageMask)(EShLangTessControlMask | EShLangTessEvaluationMask), "patch"); + (yyval.interm.type).init((yyvsp[0].lex).loc); + (yyval.interm.type).qualifier.patch = true; + } +#line 5942 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 168: +#line 1419 "glslang.y" /* yacc.c:1646 */ + { + parseContext.globalCheck((yyvsp[0].lex).loc, "sample"); + (yyval.interm.type).init((yyvsp[0].lex).loc); + (yyval.interm.type).qualifier.sample = true; + } +#line 5952 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 169: +#line 1424 "glslang.y" /* yacc.c:1646 */ + { + parseContext.globalCheck((yyvsp[0].lex).loc, "hitAttributeNV"); + parseContext.requireStage((yyvsp[0].lex).loc, (EShLanguageMask)(EShLangIntersectMask | EShLangClosestHitMask + | EShLangAnyHitMask), "hitAttributeNV"); + parseContext.profileRequires((yyvsp[0].lex).loc, ECoreProfile, 460, E_GL_NV_ray_tracing, "hitAttributeNV"); + (yyval.interm.type).init((yyvsp[0].lex).loc); + (yyval.interm.type).qualifier.storage = EvqHitAttr; + } +#line 5965 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 170: +#line 1432 "glslang.y" /* yacc.c:1646 */ + { + parseContext.globalCheck((yyvsp[0].lex).loc, "hitAttributeEXT"); + parseContext.requireStage((yyvsp[0].lex).loc, (EShLanguageMask)(EShLangIntersectMask | EShLangClosestHitMask + | EShLangAnyHitMask), "hitAttributeEXT"); + parseContext.profileRequires((yyvsp[0].lex).loc, ECoreProfile, 460, E_GL_EXT_ray_tracing, "hitAttributeNV"); + (yyval.interm.type).init((yyvsp[0].lex).loc); + (yyval.interm.type).qualifier.storage = EvqHitAttr; + } +#line 5978 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 171: +#line 1440 "glslang.y" /* yacc.c:1646 */ + { + parseContext.globalCheck((yyvsp[0].lex).loc, "rayPayloadNV"); + parseContext.requireStage((yyvsp[0].lex).loc, (EShLanguageMask)(EShLangRayGenMask | EShLangClosestHitMask | + EShLangAnyHitMask | EShLangMissMask), "rayPayloadNV"); + parseContext.profileRequires((yyvsp[0].lex).loc, ECoreProfile, 460, E_GL_NV_ray_tracing, "rayPayloadNV"); + (yyval.interm.type).init((yyvsp[0].lex).loc); + (yyval.interm.type).qualifier.storage = EvqPayload; + } +#line 5991 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 172: +#line 1448 "glslang.y" /* yacc.c:1646 */ + { + parseContext.globalCheck((yyvsp[0].lex).loc, "rayPayloadEXT"); + parseContext.requireStage((yyvsp[0].lex).loc, (EShLanguageMask)(EShLangRayGenMask | EShLangClosestHitMask | + EShLangAnyHitMask | EShLangMissMask), "rayPayloadEXT"); + parseContext.profileRequires((yyvsp[0].lex).loc, ECoreProfile, 460, E_GL_EXT_ray_tracing, "rayPayloadEXT"); + (yyval.interm.type).init((yyvsp[0].lex).loc); + (yyval.interm.type).qualifier.storage = EvqPayload; + } +#line 6004 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 173: +#line 1456 "glslang.y" /* yacc.c:1646 */ + { + parseContext.globalCheck((yyvsp[0].lex).loc, "rayPayloadInNV"); + parseContext.requireStage((yyvsp[0].lex).loc, (EShLanguageMask)(EShLangClosestHitMask | + EShLangAnyHitMask | EShLangMissMask), "rayPayloadInNV"); + parseContext.profileRequires((yyvsp[0].lex).loc, ECoreProfile, 460, E_GL_NV_ray_tracing, "rayPayloadInNV"); + (yyval.interm.type).init((yyvsp[0].lex).loc); + (yyval.interm.type).qualifier.storage = EvqPayloadIn; + } +#line 6017 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 174: +#line 1464 "glslang.y" /* yacc.c:1646 */ + { + parseContext.globalCheck((yyvsp[0].lex).loc, "rayPayloadInEXT"); + parseContext.requireStage((yyvsp[0].lex).loc, (EShLanguageMask)(EShLangClosestHitMask | + EShLangAnyHitMask | EShLangMissMask), "rayPayloadInEXT"); + parseContext.profileRequires((yyvsp[0].lex).loc, ECoreProfile, 460, E_GL_EXT_ray_tracing, "rayPayloadInEXT"); + (yyval.interm.type).init((yyvsp[0].lex).loc); + (yyval.interm.type).qualifier.storage = EvqPayloadIn; + } +#line 6030 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 175: +#line 1472 "glslang.y" /* yacc.c:1646 */ + { + parseContext.globalCheck((yyvsp[0].lex).loc, "callableDataNV"); + parseContext.requireStage((yyvsp[0].lex).loc, (EShLanguageMask)(EShLangRayGenMask | + EShLangClosestHitMask | EShLangMissMask | EShLangCallableMask), "callableDataNV"); + parseContext.profileRequires((yyvsp[0].lex).loc, ECoreProfile, 460, E_GL_NV_ray_tracing, "callableDataNV"); + (yyval.interm.type).init((yyvsp[0].lex).loc); + (yyval.interm.type).qualifier.storage = EvqCallableData; + } +#line 6043 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 176: +#line 1480 "glslang.y" /* yacc.c:1646 */ + { + parseContext.globalCheck((yyvsp[0].lex).loc, "callableDataEXT"); + parseContext.requireStage((yyvsp[0].lex).loc, (EShLanguageMask)(EShLangRayGenMask | + EShLangClosestHitMask | EShLangMissMask | EShLangCallableMask), "callableDataEXT"); + parseContext.profileRequires((yyvsp[0].lex).loc, ECoreProfile, 460, E_GL_EXT_ray_tracing, "callableDataEXT"); + (yyval.interm.type).init((yyvsp[0].lex).loc); + (yyval.interm.type).qualifier.storage = EvqCallableData; + } +#line 6056 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 177: +#line 1488 "glslang.y" /* yacc.c:1646 */ + { + parseContext.globalCheck((yyvsp[0].lex).loc, "callableDataInNV"); + parseContext.requireStage((yyvsp[0].lex).loc, (EShLanguageMask)(EShLangCallableMask), "callableDataInNV"); + parseContext.profileRequires((yyvsp[0].lex).loc, ECoreProfile, 460, E_GL_NV_ray_tracing, "callableDataInNV"); + (yyval.interm.type).init((yyvsp[0].lex).loc); + (yyval.interm.type).qualifier.storage = EvqCallableDataIn; + } +#line 6068 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 178: +#line 1495 "glslang.y" /* yacc.c:1646 */ + { + parseContext.globalCheck((yyvsp[0].lex).loc, "callableDataInEXT"); + parseContext.requireStage((yyvsp[0].lex).loc, (EShLanguageMask)(EShLangCallableMask), "callableDataInEXT"); + parseContext.profileRequires((yyvsp[0].lex).loc, ECoreProfile, 460, E_GL_EXT_ray_tracing, "callableDataInEXT"); + (yyval.interm.type).init((yyvsp[0].lex).loc); + (yyval.interm.type).qualifier.storage = EvqCallableDataIn; + } +#line 6080 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 179: +#line 1502 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc); + (yyval.interm.type).qualifier.coherent = true; + } +#line 6089 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 180: +#line 1506 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc); + parseContext.requireExtensions((yyvsp[0].lex).loc, 1, &E_GL_KHR_memory_scope_semantics, "devicecoherent"); + (yyval.interm.type).qualifier.devicecoherent = true; + } +#line 6099 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 181: +#line 1511 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc); + parseContext.requireExtensions((yyvsp[0].lex).loc, 1, &E_GL_KHR_memory_scope_semantics, "queuefamilycoherent"); + (yyval.interm.type).qualifier.queuefamilycoherent = true; + } +#line 6109 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 182: +#line 1516 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc); + parseContext.requireExtensions((yyvsp[0].lex).loc, 1, &E_GL_KHR_memory_scope_semantics, "workgroupcoherent"); + (yyval.interm.type).qualifier.workgroupcoherent = true; + } +#line 6119 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 183: +#line 1521 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc); + parseContext.requireExtensions((yyvsp[0].lex).loc, 1, &E_GL_KHR_memory_scope_semantics, "subgroupcoherent"); + (yyval.interm.type).qualifier.subgroupcoherent = true; + } +#line 6129 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 184: +#line 1526 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc); + parseContext.requireExtensions((yyvsp[0].lex).loc, 1, &E_GL_KHR_memory_scope_semantics, "nonprivate"); + (yyval.interm.type).qualifier.nonprivate = true; + } +#line 6139 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 185: +#line 1531 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc); + parseContext.requireExtensions((yyvsp[0].lex).loc, 1, &E_GL_EXT_ray_tracing, "shadercallcoherent"); + (yyval.interm.type).qualifier.shadercallcoherent = true; + } +#line 6149 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 186: +#line 1536 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc); + (yyval.interm.type).qualifier.volatil = true; + } +#line 6158 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 187: +#line 1540 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc); + (yyval.interm.type).qualifier.restrict = true; + } +#line 6167 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 188: +#line 1544 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc); + (yyval.interm.type).qualifier.readonly = true; + } +#line 6176 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 189: +#line 1548 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc); + (yyval.interm.type).qualifier.writeonly = true; + } +#line 6185 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 190: +#line 1552 "glslang.y" /* yacc.c:1646 */ + { + parseContext.spvRemoved((yyvsp[0].lex).loc, "subroutine"); + parseContext.globalCheck((yyvsp[0].lex).loc, "subroutine"); + parseContext.unimplemented((yyvsp[0].lex).loc, "subroutine"); + (yyval.interm.type).init((yyvsp[0].lex).loc); + } +#line 6196 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 191: +#line 1558 "glslang.y" /* yacc.c:1646 */ + { + parseContext.spvRemoved((yyvsp[-3].lex).loc, "subroutine"); + parseContext.globalCheck((yyvsp[-3].lex).loc, "subroutine"); + parseContext.unimplemented((yyvsp[-3].lex).loc, "subroutine"); + (yyval.interm.type).init((yyvsp[-3].lex).loc); + } +#line 6207 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 192: +#line 1569 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc); + (yyval.interm.type).qualifier.nonUniform = true; + } +#line 6216 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 193: +#line 1576 "glslang.y" /* yacc.c:1646 */ + { + // TODO + } +#line 6224 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 194: +#line 1579 "glslang.y" /* yacc.c:1646 */ + { + // TODO: 4.0 semantics: subroutines + // 1) make sure each identifier is a type declared earlier with SUBROUTINE + // 2) save all of the identifiers for future comparison with the declared function + } +#line 6234 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 195: +#line 1588 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type) = (yyvsp[-1].interm.type); + (yyval.interm.type).qualifier.precision = parseContext.getDefaultPrecision((yyval.interm.type)); + (yyval.interm.type).typeParameters = (yyvsp[0].interm.typeParameters); + } +#line 6244 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 196: +#line 1593 "glslang.y" /* yacc.c:1646 */ + { + parseContext.arrayOfArrayVersionCheck((yyvsp[0].interm).loc, (yyvsp[0].interm).arraySizes); + (yyval.interm.type) = (yyvsp[-2].interm.type); + (yyval.interm.type).qualifier.precision = parseContext.getDefaultPrecision((yyval.interm.type)); + (yyval.interm.type).typeParameters = (yyvsp[-1].interm.typeParameters); + (yyval.interm.type).arraySizes = (yyvsp[0].interm).arraySizes; + } +#line 6256 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 197: +#line 1603 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm).loc = (yyvsp[-1].lex).loc; + (yyval.interm).arraySizes = new TArraySizes; + (yyval.interm).arraySizes->addInnerSize(); + } +#line 6266 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 198: +#line 1608 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm).loc = (yyvsp[-2].lex).loc; + (yyval.interm).arraySizes = new TArraySizes; + + TArraySize size; + parseContext.arraySizeCheck((yyvsp[-1].interm.intermTypedNode)->getLoc(), (yyvsp[-1].interm.intermTypedNode), size, "array size"); + (yyval.interm).arraySizes->addInnerSize(size); + } +#line 6279 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 199: +#line 1616 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm) = (yyvsp[-2].interm); + (yyval.interm).arraySizes->addInnerSize(); + } +#line 6288 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 200: +#line 1620 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm) = (yyvsp[-3].interm); + + TArraySize size; + parseContext.arraySizeCheck((yyvsp[-1].interm.intermTypedNode)->getLoc(), (yyvsp[-1].interm.intermTypedNode), size, "array size"); + (yyval.interm).arraySizes->addInnerSize(size); + } +#line 6300 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 201: +#line 1630 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.typeParameters) = (yyvsp[0].interm.typeParameters); + } +#line 6308 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 202: +#line 1633 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.typeParameters) = 0; + } +#line 6316 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 203: +#line 1639 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.typeParameters) = (yyvsp[-1].interm.typeParameters); + } +#line 6324 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 204: +#line 1645 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.typeParameters) = new TArraySizes; + + TArraySize size; + parseContext.arraySizeCheck((yyvsp[0].interm.intermTypedNode)->getLoc(), (yyvsp[0].interm.intermTypedNode), size, "type parameter"); + (yyval.interm.typeParameters)->addInnerSize(size); + } +#line 6336 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 205: +#line 1652 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.typeParameters) = (yyvsp[-2].interm.typeParameters); + + TArraySize size; + parseContext.arraySizeCheck((yyvsp[0].interm.intermTypedNode)->getLoc(), (yyvsp[0].interm.intermTypedNode), size, "type parameter"); + (yyval.interm.typeParameters)->addInnerSize(size); + } +#line 6348 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 206: +#line 1662 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtVoid; + } +#line 6357 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 207: +#line 1666 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtFloat; + } +#line 6366 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 208: +#line 1670 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtInt; + } +#line 6375 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 209: +#line 1674 "glslang.y" /* yacc.c:1646 */ + { + parseContext.fullIntegerCheck((yyvsp[0].lex).loc, "unsigned integer"); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtUint; + } +#line 6385 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 210: +#line 1679 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtBool; + } +#line 6394 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 211: +#line 1683 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtFloat; + (yyval.interm.type).setVector(2); + } +#line 6404 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 212: +#line 1688 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtFloat; + (yyval.interm.type).setVector(3); + } +#line 6414 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 213: +#line 1693 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtFloat; + (yyval.interm.type).setVector(4); + } +#line 6424 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 214: +#line 1698 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtBool; + (yyval.interm.type).setVector(2); + } +#line 6434 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 215: +#line 1703 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtBool; + (yyval.interm.type).setVector(3); + } +#line 6444 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 216: +#line 1708 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtBool; + (yyval.interm.type).setVector(4); + } +#line 6454 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 217: +#line 1713 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtInt; + (yyval.interm.type).setVector(2); + } +#line 6464 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 218: +#line 1718 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtInt; + (yyval.interm.type).setVector(3); + } +#line 6474 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 219: +#line 1723 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtInt; + (yyval.interm.type).setVector(4); + } +#line 6484 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 220: +#line 1728 "glslang.y" /* yacc.c:1646 */ + { + parseContext.fullIntegerCheck((yyvsp[0].lex).loc, "unsigned integer vector"); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtUint; + (yyval.interm.type).setVector(2); + } +#line 6495 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 221: +#line 1734 "glslang.y" /* yacc.c:1646 */ + { + parseContext.fullIntegerCheck((yyvsp[0].lex).loc, "unsigned integer vector"); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtUint; + (yyval.interm.type).setVector(3); + } +#line 6506 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 222: +#line 1740 "glslang.y" /* yacc.c:1646 */ + { + parseContext.fullIntegerCheck((yyvsp[0].lex).loc, "unsigned integer vector"); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtUint; + (yyval.interm.type).setVector(4); + } +#line 6517 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 223: +#line 1746 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtFloat; + (yyval.interm.type).setMatrix(2, 2); + } +#line 6527 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 224: +#line 1751 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtFloat; + (yyval.interm.type).setMatrix(3, 3); + } +#line 6537 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 225: +#line 1756 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtFloat; + (yyval.interm.type).setMatrix(4, 4); + } +#line 6547 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 226: +#line 1761 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtFloat; + (yyval.interm.type).setMatrix(2, 2); + } +#line 6557 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 227: +#line 1766 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtFloat; + (yyval.interm.type).setMatrix(2, 3); + } +#line 6567 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 228: +#line 1771 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtFloat; + (yyval.interm.type).setMatrix(2, 4); + } +#line 6577 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 229: +#line 1776 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtFloat; + (yyval.interm.type).setMatrix(3, 2); + } +#line 6587 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 230: +#line 1781 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtFloat; + (yyval.interm.type).setMatrix(3, 3); + } +#line 6597 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 231: +#line 1786 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtFloat; + (yyval.interm.type).setMatrix(3, 4); + } +#line 6607 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 232: +#line 1791 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtFloat; + (yyval.interm.type).setMatrix(4, 2); + } +#line 6617 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 233: +#line 1796 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtFloat; + (yyval.interm.type).setMatrix(4, 3); + } +#line 6627 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 234: +#line 1801 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtFloat; + (yyval.interm.type).setMatrix(4, 4); + } +#line 6637 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 235: +#line 1807 "glslang.y" /* yacc.c:1646 */ + { + parseContext.requireProfile((yyvsp[0].lex).loc, ECoreProfile | ECompatibilityProfile, "double"); + if (! parseContext.symbolTable.atBuiltInLevel()) + parseContext.doubleCheck((yyvsp[0].lex).loc, "double"); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtDouble; + } +#line 6649 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 236: +#line 1814 "glslang.y" /* yacc.c:1646 */ + { + parseContext.float16ScalarVectorCheck((yyvsp[0].lex).loc, "float16_t", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtFloat16; + } +#line 6659 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 237: +#line 1819 "glslang.y" /* yacc.c:1646 */ + { + parseContext.explicitFloat32Check((yyvsp[0].lex).loc, "float32_t", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtFloat; + } +#line 6669 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 238: +#line 1824 "glslang.y" /* yacc.c:1646 */ + { + parseContext.explicitFloat64Check((yyvsp[0].lex).loc, "float64_t", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtDouble; + } +#line 6679 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 239: +#line 1829 "glslang.y" /* yacc.c:1646 */ + { + parseContext.int8ScalarVectorCheck((yyvsp[0].lex).loc, "8-bit signed integer", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtInt8; + } +#line 6689 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 240: +#line 1834 "glslang.y" /* yacc.c:1646 */ + { + parseContext.int8ScalarVectorCheck((yyvsp[0].lex).loc, "8-bit unsigned integer", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtUint8; + } +#line 6699 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 241: +#line 1839 "glslang.y" /* yacc.c:1646 */ + { + parseContext.int16ScalarVectorCheck((yyvsp[0].lex).loc, "16-bit signed integer", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtInt16; + } +#line 6709 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 242: +#line 1844 "glslang.y" /* yacc.c:1646 */ + { + parseContext.int16ScalarVectorCheck((yyvsp[0].lex).loc, "16-bit unsigned integer", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtUint16; + } +#line 6719 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 243: +#line 1849 "glslang.y" /* yacc.c:1646 */ + { + parseContext.explicitInt32Check((yyvsp[0].lex).loc, "32-bit signed integer", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtInt; + } +#line 6729 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 244: +#line 1854 "glslang.y" /* yacc.c:1646 */ + { + parseContext.explicitInt32Check((yyvsp[0].lex).loc, "32-bit unsigned integer", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtUint; + } +#line 6739 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 245: +#line 1859 "glslang.y" /* yacc.c:1646 */ + { + parseContext.int64Check((yyvsp[0].lex).loc, "64-bit integer", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtInt64; + } +#line 6749 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 246: +#line 1864 "glslang.y" /* yacc.c:1646 */ + { + parseContext.int64Check((yyvsp[0].lex).loc, "64-bit unsigned integer", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtUint64; + } +#line 6759 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 247: +#line 1869 "glslang.y" /* yacc.c:1646 */ + { + parseContext.requireProfile((yyvsp[0].lex).loc, ECoreProfile | ECompatibilityProfile, "double vector"); + if (! parseContext.symbolTable.atBuiltInLevel()) + parseContext.doubleCheck((yyvsp[0].lex).loc, "double vector"); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtDouble; + (yyval.interm.type).setVector(2); + } +#line 6772 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 248: +#line 1877 "glslang.y" /* yacc.c:1646 */ + { + parseContext.requireProfile((yyvsp[0].lex).loc, ECoreProfile | ECompatibilityProfile, "double vector"); + if (! parseContext.symbolTable.atBuiltInLevel()) + parseContext.doubleCheck((yyvsp[0].lex).loc, "double vector"); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtDouble; + (yyval.interm.type).setVector(3); + } +#line 6785 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 249: +#line 1885 "glslang.y" /* yacc.c:1646 */ + { + parseContext.requireProfile((yyvsp[0].lex).loc, ECoreProfile | ECompatibilityProfile, "double vector"); + if (! parseContext.symbolTable.atBuiltInLevel()) + parseContext.doubleCheck((yyvsp[0].lex).loc, "double vector"); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtDouble; + (yyval.interm.type).setVector(4); + } +#line 6798 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 250: +#line 1893 "glslang.y" /* yacc.c:1646 */ + { + parseContext.float16ScalarVectorCheck((yyvsp[0].lex).loc, "half float vector", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtFloat16; + (yyval.interm.type).setVector(2); + } +#line 6809 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 251: +#line 1899 "glslang.y" /* yacc.c:1646 */ + { + parseContext.float16ScalarVectorCheck((yyvsp[0].lex).loc, "half float vector", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtFloat16; + (yyval.interm.type).setVector(3); + } +#line 6820 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 252: +#line 1905 "glslang.y" /* yacc.c:1646 */ + { + parseContext.float16ScalarVectorCheck((yyvsp[0].lex).loc, "half float vector", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtFloat16; + (yyval.interm.type).setVector(4); + } +#line 6831 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 253: +#line 1911 "glslang.y" /* yacc.c:1646 */ + { + parseContext.explicitFloat32Check((yyvsp[0].lex).loc, "float32_t vector", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtFloat; + (yyval.interm.type).setVector(2); + } +#line 6842 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 254: +#line 1917 "glslang.y" /* yacc.c:1646 */ + { + parseContext.explicitFloat32Check((yyvsp[0].lex).loc, "float32_t vector", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtFloat; + (yyval.interm.type).setVector(3); + } +#line 6853 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 255: +#line 1923 "glslang.y" /* yacc.c:1646 */ + { + parseContext.explicitFloat32Check((yyvsp[0].lex).loc, "float32_t vector", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtFloat; + (yyval.interm.type).setVector(4); + } +#line 6864 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 256: +#line 1929 "glslang.y" /* yacc.c:1646 */ + { + parseContext.explicitFloat64Check((yyvsp[0].lex).loc, "float64_t vector", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtDouble; + (yyval.interm.type).setVector(2); + } +#line 6875 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 257: +#line 1935 "glslang.y" /* yacc.c:1646 */ + { + parseContext.explicitFloat64Check((yyvsp[0].lex).loc, "float64_t vector", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtDouble; + (yyval.interm.type).setVector(3); + } +#line 6886 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 258: +#line 1941 "glslang.y" /* yacc.c:1646 */ + { + parseContext.explicitFloat64Check((yyvsp[0].lex).loc, "float64_t vector", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtDouble; + (yyval.interm.type).setVector(4); + } +#line 6897 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 259: +#line 1947 "glslang.y" /* yacc.c:1646 */ + { + parseContext.int8ScalarVectorCheck((yyvsp[0].lex).loc, "8-bit signed integer vector", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtInt8; + (yyval.interm.type).setVector(2); + } +#line 6908 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 260: +#line 1953 "glslang.y" /* yacc.c:1646 */ + { + parseContext.int8ScalarVectorCheck((yyvsp[0].lex).loc, "8-bit signed integer vector", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtInt8; + (yyval.interm.type).setVector(3); + } +#line 6919 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 261: +#line 1959 "glslang.y" /* yacc.c:1646 */ + { + parseContext.int8ScalarVectorCheck((yyvsp[0].lex).loc, "8-bit signed integer vector", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtInt8; + (yyval.interm.type).setVector(4); + } +#line 6930 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 262: +#line 1965 "glslang.y" /* yacc.c:1646 */ + { + parseContext.int16ScalarVectorCheck((yyvsp[0].lex).loc, "16-bit signed integer vector", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtInt16; + (yyval.interm.type).setVector(2); + } +#line 6941 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 263: +#line 1971 "glslang.y" /* yacc.c:1646 */ + { + parseContext.int16ScalarVectorCheck((yyvsp[0].lex).loc, "16-bit signed integer vector", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtInt16; + (yyval.interm.type).setVector(3); + } +#line 6952 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 264: +#line 1977 "glslang.y" /* yacc.c:1646 */ + { + parseContext.int16ScalarVectorCheck((yyvsp[0].lex).loc, "16-bit signed integer vector", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtInt16; + (yyval.interm.type).setVector(4); + } +#line 6963 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 265: +#line 1983 "glslang.y" /* yacc.c:1646 */ + { + parseContext.explicitInt32Check((yyvsp[0].lex).loc, "32-bit signed integer vector", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtInt; + (yyval.interm.type).setVector(2); + } +#line 6974 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 266: +#line 1989 "glslang.y" /* yacc.c:1646 */ + { + parseContext.explicitInt32Check((yyvsp[0].lex).loc, "32-bit signed integer vector", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtInt; + (yyval.interm.type).setVector(3); + } +#line 6985 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 267: +#line 1995 "glslang.y" /* yacc.c:1646 */ + { + parseContext.explicitInt32Check((yyvsp[0].lex).loc, "32-bit signed integer vector", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtInt; + (yyval.interm.type).setVector(4); + } +#line 6996 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 268: +#line 2001 "glslang.y" /* yacc.c:1646 */ + { + parseContext.int64Check((yyvsp[0].lex).loc, "64-bit integer vector", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtInt64; + (yyval.interm.type).setVector(2); + } +#line 7007 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 269: +#line 2007 "glslang.y" /* yacc.c:1646 */ + { + parseContext.int64Check((yyvsp[0].lex).loc, "64-bit integer vector", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtInt64; + (yyval.interm.type).setVector(3); + } +#line 7018 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 270: +#line 2013 "glslang.y" /* yacc.c:1646 */ + { + parseContext.int64Check((yyvsp[0].lex).loc, "64-bit integer vector", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtInt64; + (yyval.interm.type).setVector(4); + } +#line 7029 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 271: +#line 2019 "glslang.y" /* yacc.c:1646 */ + { + parseContext.int8ScalarVectorCheck((yyvsp[0].lex).loc, "8-bit unsigned integer vector", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtUint8; + (yyval.interm.type).setVector(2); + } +#line 7040 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 272: +#line 2025 "glslang.y" /* yacc.c:1646 */ + { + parseContext.int8ScalarVectorCheck((yyvsp[0].lex).loc, "8-bit unsigned integer vector", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtUint8; + (yyval.interm.type).setVector(3); + } +#line 7051 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 273: +#line 2031 "glslang.y" /* yacc.c:1646 */ + { + parseContext.int8ScalarVectorCheck((yyvsp[0].lex).loc, "8-bit unsigned integer vector", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtUint8; + (yyval.interm.type).setVector(4); + } +#line 7062 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 274: +#line 2037 "glslang.y" /* yacc.c:1646 */ + { + parseContext.int16ScalarVectorCheck((yyvsp[0].lex).loc, "16-bit unsigned integer vector", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtUint16; + (yyval.interm.type).setVector(2); + } +#line 7073 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 275: +#line 2043 "glslang.y" /* yacc.c:1646 */ + { + parseContext.int16ScalarVectorCheck((yyvsp[0].lex).loc, "16-bit unsigned integer vector", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtUint16; + (yyval.interm.type).setVector(3); + } +#line 7084 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 276: +#line 2049 "glslang.y" /* yacc.c:1646 */ + { + parseContext.int16ScalarVectorCheck((yyvsp[0].lex).loc, "16-bit unsigned integer vector", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtUint16; + (yyval.interm.type).setVector(4); + } +#line 7095 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 277: +#line 2055 "glslang.y" /* yacc.c:1646 */ + { + parseContext.explicitInt32Check((yyvsp[0].lex).loc, "32-bit unsigned integer vector", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtUint; + (yyval.interm.type).setVector(2); + } +#line 7106 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 278: +#line 2061 "glslang.y" /* yacc.c:1646 */ + { + parseContext.explicitInt32Check((yyvsp[0].lex).loc, "32-bit unsigned integer vector", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtUint; + (yyval.interm.type).setVector(3); + } +#line 7117 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 279: +#line 2067 "glslang.y" /* yacc.c:1646 */ + { + parseContext.explicitInt32Check((yyvsp[0].lex).loc, "32-bit unsigned integer vector", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtUint; + (yyval.interm.type).setVector(4); + } +#line 7128 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 280: +#line 2073 "glslang.y" /* yacc.c:1646 */ + { + parseContext.int64Check((yyvsp[0].lex).loc, "64-bit unsigned integer vector", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtUint64; + (yyval.interm.type).setVector(2); + } +#line 7139 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 281: +#line 2079 "glslang.y" /* yacc.c:1646 */ + { + parseContext.int64Check((yyvsp[0].lex).loc, "64-bit unsigned integer vector", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtUint64; + (yyval.interm.type).setVector(3); + } +#line 7150 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 282: +#line 2085 "glslang.y" /* yacc.c:1646 */ + { + parseContext.int64Check((yyvsp[0].lex).loc, "64-bit unsigned integer vector", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtUint64; + (yyval.interm.type).setVector(4); + } +#line 7161 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 283: +#line 2091 "glslang.y" /* yacc.c:1646 */ + { + parseContext.requireProfile((yyvsp[0].lex).loc, ECoreProfile | ECompatibilityProfile, "double matrix"); + if (! parseContext.symbolTable.atBuiltInLevel()) + parseContext.doubleCheck((yyvsp[0].lex).loc, "double matrix"); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtDouble; + (yyval.interm.type).setMatrix(2, 2); + } +#line 7174 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 284: +#line 2099 "glslang.y" /* yacc.c:1646 */ + { + parseContext.requireProfile((yyvsp[0].lex).loc, ECoreProfile | ECompatibilityProfile, "double matrix"); + if (! parseContext.symbolTable.atBuiltInLevel()) + parseContext.doubleCheck((yyvsp[0].lex).loc, "double matrix"); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtDouble; + (yyval.interm.type).setMatrix(3, 3); + } +#line 7187 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 285: +#line 2107 "glslang.y" /* yacc.c:1646 */ + { + parseContext.requireProfile((yyvsp[0].lex).loc, ECoreProfile | ECompatibilityProfile, "double matrix"); + if (! parseContext.symbolTable.atBuiltInLevel()) + parseContext.doubleCheck((yyvsp[0].lex).loc, "double matrix"); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtDouble; + (yyval.interm.type).setMatrix(4, 4); + } +#line 7200 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 286: +#line 2115 "glslang.y" /* yacc.c:1646 */ + { + parseContext.requireProfile((yyvsp[0].lex).loc, ECoreProfile | ECompatibilityProfile, "double matrix"); + if (! parseContext.symbolTable.atBuiltInLevel()) + parseContext.doubleCheck((yyvsp[0].lex).loc, "double matrix"); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtDouble; + (yyval.interm.type).setMatrix(2, 2); + } +#line 7213 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 287: +#line 2123 "glslang.y" /* yacc.c:1646 */ + { + parseContext.requireProfile((yyvsp[0].lex).loc, ECoreProfile | ECompatibilityProfile, "double matrix"); + if (! parseContext.symbolTable.atBuiltInLevel()) + parseContext.doubleCheck((yyvsp[0].lex).loc, "double matrix"); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtDouble; + (yyval.interm.type).setMatrix(2, 3); + } +#line 7226 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 288: +#line 2131 "glslang.y" /* yacc.c:1646 */ + { + parseContext.requireProfile((yyvsp[0].lex).loc, ECoreProfile | ECompatibilityProfile, "double matrix"); + if (! parseContext.symbolTable.atBuiltInLevel()) + parseContext.doubleCheck((yyvsp[0].lex).loc, "double matrix"); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtDouble; + (yyval.interm.type).setMatrix(2, 4); + } +#line 7239 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 289: +#line 2139 "glslang.y" /* yacc.c:1646 */ + { + parseContext.requireProfile((yyvsp[0].lex).loc, ECoreProfile | ECompatibilityProfile, "double matrix"); + if (! parseContext.symbolTable.atBuiltInLevel()) + parseContext.doubleCheck((yyvsp[0].lex).loc, "double matrix"); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtDouble; + (yyval.interm.type).setMatrix(3, 2); + } +#line 7252 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 290: +#line 2147 "glslang.y" /* yacc.c:1646 */ + { + parseContext.requireProfile((yyvsp[0].lex).loc, ECoreProfile | ECompatibilityProfile, "double matrix"); + if (! parseContext.symbolTable.atBuiltInLevel()) + parseContext.doubleCheck((yyvsp[0].lex).loc, "double matrix"); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtDouble; + (yyval.interm.type).setMatrix(3, 3); + } +#line 7265 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 291: +#line 2155 "glslang.y" /* yacc.c:1646 */ + { + parseContext.requireProfile((yyvsp[0].lex).loc, ECoreProfile | ECompatibilityProfile, "double matrix"); + if (! parseContext.symbolTable.atBuiltInLevel()) + parseContext.doubleCheck((yyvsp[0].lex).loc, "double matrix"); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtDouble; + (yyval.interm.type).setMatrix(3, 4); + } +#line 7278 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 292: +#line 2163 "glslang.y" /* yacc.c:1646 */ + { + parseContext.requireProfile((yyvsp[0].lex).loc, ECoreProfile | ECompatibilityProfile, "double matrix"); + if (! parseContext.symbolTable.atBuiltInLevel()) + parseContext.doubleCheck((yyvsp[0].lex).loc, "double matrix"); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtDouble; + (yyval.interm.type).setMatrix(4, 2); + } +#line 7291 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 293: +#line 2171 "glslang.y" /* yacc.c:1646 */ + { + parseContext.requireProfile((yyvsp[0].lex).loc, ECoreProfile | ECompatibilityProfile, "double matrix"); + if (! parseContext.symbolTable.atBuiltInLevel()) + parseContext.doubleCheck((yyvsp[0].lex).loc, "double matrix"); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtDouble; + (yyval.interm.type).setMatrix(4, 3); + } +#line 7304 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 294: +#line 2179 "glslang.y" /* yacc.c:1646 */ + { + parseContext.requireProfile((yyvsp[0].lex).loc, ECoreProfile | ECompatibilityProfile, "double matrix"); + if (! parseContext.symbolTable.atBuiltInLevel()) + parseContext.doubleCheck((yyvsp[0].lex).loc, "double matrix"); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtDouble; + (yyval.interm.type).setMatrix(4, 4); + } +#line 7317 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 295: +#line 2187 "glslang.y" /* yacc.c:1646 */ + { + parseContext.float16Check((yyvsp[0].lex).loc, "half float matrix", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtFloat16; + (yyval.interm.type).setMatrix(2, 2); + } +#line 7328 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 296: +#line 2193 "glslang.y" /* yacc.c:1646 */ + { + parseContext.float16Check((yyvsp[0].lex).loc, "half float matrix", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtFloat16; + (yyval.interm.type).setMatrix(3, 3); + } +#line 7339 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 297: +#line 2199 "glslang.y" /* yacc.c:1646 */ + { + parseContext.float16Check((yyvsp[0].lex).loc, "half float matrix", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtFloat16; + (yyval.interm.type).setMatrix(4, 4); + } +#line 7350 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 298: +#line 2205 "glslang.y" /* yacc.c:1646 */ + { + parseContext.float16Check((yyvsp[0].lex).loc, "half float matrix", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtFloat16; + (yyval.interm.type).setMatrix(2, 2); + } +#line 7361 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 299: +#line 2211 "glslang.y" /* yacc.c:1646 */ + { + parseContext.float16Check((yyvsp[0].lex).loc, "half float matrix", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtFloat16; + (yyval.interm.type).setMatrix(2, 3); + } +#line 7372 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 300: +#line 2217 "glslang.y" /* yacc.c:1646 */ + { + parseContext.float16Check((yyvsp[0].lex).loc, "half float matrix", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtFloat16; + (yyval.interm.type).setMatrix(2, 4); + } +#line 7383 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 301: +#line 2223 "glslang.y" /* yacc.c:1646 */ + { + parseContext.float16Check((yyvsp[0].lex).loc, "half float matrix", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtFloat16; + (yyval.interm.type).setMatrix(3, 2); + } +#line 7394 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 302: +#line 2229 "glslang.y" /* yacc.c:1646 */ + { + parseContext.float16Check((yyvsp[0].lex).loc, "half float matrix", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtFloat16; + (yyval.interm.type).setMatrix(3, 3); + } +#line 7405 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 303: +#line 2235 "glslang.y" /* yacc.c:1646 */ + { + parseContext.float16Check((yyvsp[0].lex).loc, "half float matrix", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtFloat16; + (yyval.interm.type).setMatrix(3, 4); + } +#line 7416 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 304: +#line 2241 "glslang.y" /* yacc.c:1646 */ + { + parseContext.float16Check((yyvsp[0].lex).loc, "half float matrix", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtFloat16; + (yyval.interm.type).setMatrix(4, 2); + } +#line 7427 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 305: +#line 2247 "glslang.y" /* yacc.c:1646 */ + { + parseContext.float16Check((yyvsp[0].lex).loc, "half float matrix", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtFloat16; + (yyval.interm.type).setMatrix(4, 3); + } +#line 7438 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 306: +#line 2253 "glslang.y" /* yacc.c:1646 */ + { + parseContext.float16Check((yyvsp[0].lex).loc, "half float matrix", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtFloat16; + (yyval.interm.type).setMatrix(4, 4); + } +#line 7449 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 307: +#line 2259 "glslang.y" /* yacc.c:1646 */ + { + parseContext.explicitFloat32Check((yyvsp[0].lex).loc, "float32_t matrix", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtFloat; + (yyval.interm.type).setMatrix(2, 2); + } +#line 7460 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 308: +#line 2265 "glslang.y" /* yacc.c:1646 */ + { + parseContext.explicitFloat32Check((yyvsp[0].lex).loc, "float32_t matrix", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtFloat; + (yyval.interm.type).setMatrix(3, 3); + } +#line 7471 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 309: +#line 2271 "glslang.y" /* yacc.c:1646 */ + { + parseContext.explicitFloat32Check((yyvsp[0].lex).loc, "float32_t matrix", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtFloat; + (yyval.interm.type).setMatrix(4, 4); + } +#line 7482 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 310: +#line 2277 "glslang.y" /* yacc.c:1646 */ + { + parseContext.explicitFloat32Check((yyvsp[0].lex).loc, "float32_t matrix", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtFloat; + (yyval.interm.type).setMatrix(2, 2); + } +#line 7493 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 311: +#line 2283 "glslang.y" /* yacc.c:1646 */ + { + parseContext.explicitFloat32Check((yyvsp[0].lex).loc, "float32_t matrix", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtFloat; + (yyval.interm.type).setMatrix(2, 3); + } +#line 7504 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 312: +#line 2289 "glslang.y" /* yacc.c:1646 */ + { + parseContext.explicitFloat32Check((yyvsp[0].lex).loc, "float32_t matrix", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtFloat; + (yyval.interm.type).setMatrix(2, 4); + } +#line 7515 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 313: +#line 2295 "glslang.y" /* yacc.c:1646 */ + { + parseContext.explicitFloat32Check((yyvsp[0].lex).loc, "float32_t matrix", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtFloat; + (yyval.interm.type).setMatrix(3, 2); + } +#line 7526 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 314: +#line 2301 "glslang.y" /* yacc.c:1646 */ + { + parseContext.explicitFloat32Check((yyvsp[0].lex).loc, "float32_t matrix", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtFloat; + (yyval.interm.type).setMatrix(3, 3); + } +#line 7537 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 315: +#line 2307 "glslang.y" /* yacc.c:1646 */ + { + parseContext.explicitFloat32Check((yyvsp[0].lex).loc, "float32_t matrix", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtFloat; + (yyval.interm.type).setMatrix(3, 4); + } +#line 7548 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 316: +#line 2313 "glslang.y" /* yacc.c:1646 */ + { + parseContext.explicitFloat32Check((yyvsp[0].lex).loc, "float32_t matrix", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtFloat; + (yyval.interm.type).setMatrix(4, 2); + } +#line 7559 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 317: +#line 2319 "glslang.y" /* yacc.c:1646 */ + { + parseContext.explicitFloat32Check((yyvsp[0].lex).loc, "float32_t matrix", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtFloat; + (yyval.interm.type).setMatrix(4, 3); + } +#line 7570 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 318: +#line 2325 "glslang.y" /* yacc.c:1646 */ + { + parseContext.explicitFloat32Check((yyvsp[0].lex).loc, "float32_t matrix", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtFloat; + (yyval.interm.type).setMatrix(4, 4); + } +#line 7581 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 319: +#line 2331 "glslang.y" /* yacc.c:1646 */ + { + parseContext.explicitFloat64Check((yyvsp[0].lex).loc, "float64_t matrix", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtDouble; + (yyval.interm.type).setMatrix(2, 2); + } +#line 7592 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 320: +#line 2337 "glslang.y" /* yacc.c:1646 */ + { + parseContext.explicitFloat64Check((yyvsp[0].lex).loc, "float64_t matrix", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtDouble; + (yyval.interm.type).setMatrix(3, 3); + } +#line 7603 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 321: +#line 2343 "glslang.y" /* yacc.c:1646 */ + { + parseContext.explicitFloat64Check((yyvsp[0].lex).loc, "float64_t matrix", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtDouble; + (yyval.interm.type).setMatrix(4, 4); + } +#line 7614 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 322: +#line 2349 "glslang.y" /* yacc.c:1646 */ + { + parseContext.explicitFloat64Check((yyvsp[0].lex).loc, "float64_t matrix", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtDouble; + (yyval.interm.type).setMatrix(2, 2); + } +#line 7625 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 323: +#line 2355 "glslang.y" /* yacc.c:1646 */ + { + parseContext.explicitFloat64Check((yyvsp[0].lex).loc, "float64_t matrix", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtDouble; + (yyval.interm.type).setMatrix(2, 3); + } +#line 7636 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 324: +#line 2361 "glslang.y" /* yacc.c:1646 */ + { + parseContext.explicitFloat64Check((yyvsp[0].lex).loc, "float64_t matrix", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtDouble; + (yyval.interm.type).setMatrix(2, 4); + } +#line 7647 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 325: +#line 2367 "glslang.y" /* yacc.c:1646 */ + { + parseContext.explicitFloat64Check((yyvsp[0].lex).loc, "float64_t matrix", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtDouble; + (yyval.interm.type).setMatrix(3, 2); + } +#line 7658 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 326: +#line 2373 "glslang.y" /* yacc.c:1646 */ + { + parseContext.explicitFloat64Check((yyvsp[0].lex).loc, "float64_t matrix", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtDouble; + (yyval.interm.type).setMatrix(3, 3); + } +#line 7669 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 327: +#line 2379 "glslang.y" /* yacc.c:1646 */ + { + parseContext.explicitFloat64Check((yyvsp[0].lex).loc, "float64_t matrix", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtDouble; + (yyval.interm.type).setMatrix(3, 4); + } +#line 7680 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 328: +#line 2385 "glslang.y" /* yacc.c:1646 */ + { + parseContext.explicitFloat64Check((yyvsp[0].lex).loc, "float64_t matrix", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtDouble; + (yyval.interm.type).setMatrix(4, 2); + } +#line 7691 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 329: +#line 2391 "glslang.y" /* yacc.c:1646 */ + { + parseContext.explicitFloat64Check((yyvsp[0].lex).loc, "float64_t matrix", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtDouble; + (yyval.interm.type).setMatrix(4, 3); + } +#line 7702 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 330: +#line 2397 "glslang.y" /* yacc.c:1646 */ + { + parseContext.explicitFloat64Check((yyvsp[0].lex).loc, "float64_t matrix", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtDouble; + (yyval.interm.type).setMatrix(4, 4); + } +#line 7713 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 331: +#line 2403 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtAccStruct; + } +#line 7722 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 332: +#line 2407 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtAccStruct; + } +#line 7731 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 333: +#line 2411 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtRayQuery; + } +#line 7740 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 334: +#line 2415 "glslang.y" /* yacc.c:1646 */ + { + parseContext.vulkanRemoved((yyvsp[0].lex).loc, "atomic counter types"); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtAtomicUint; + } +#line 7750 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 335: +#line 2420 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.set(EbtFloat, Esd1D); + } +#line 7760 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 336: +#line 2426 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.set(EbtFloat, Esd2D); + } +#line 7770 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 337: +#line 2431 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.set(EbtFloat, Esd3D); + } +#line 7780 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 338: +#line 2436 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.set(EbtFloat, EsdCube); + } +#line 7790 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 339: +#line 2441 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.set(EbtFloat, Esd2D, false, true); + } +#line 7800 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 340: +#line 2446 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.set(EbtFloat, EsdCube, false, true); + } +#line 7810 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 341: +#line 2451 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.set(EbtFloat, Esd2D, true); + } +#line 7820 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 342: +#line 2456 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.set(EbtFloat, Esd2D, true, true); + } +#line 7830 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 343: +#line 2462 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.set(EbtFloat, Esd1D, false, true); + } +#line 7840 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 344: +#line 2467 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.set(EbtFloat, Esd1D, true); + } +#line 7850 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 345: +#line 2472 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.set(EbtFloat, Esd1D, true, true); + } +#line 7860 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 346: +#line 2477 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.set(EbtFloat, EsdCube, true); + } +#line 7870 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 347: +#line 2482 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.set(EbtFloat, EsdCube, true, true); + } +#line 7880 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 348: +#line 2487 "glslang.y" /* yacc.c:1646 */ + { + parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float sampler", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.set(EbtFloat16, Esd1D); + } +#line 7891 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 349: +#line 2493 "glslang.y" /* yacc.c:1646 */ + { + parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float sampler", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.set(EbtFloat16, Esd2D); + } +#line 7902 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 350: +#line 2499 "glslang.y" /* yacc.c:1646 */ + { + parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float sampler", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.set(EbtFloat16, Esd3D); + } +#line 7913 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 351: +#line 2505 "glslang.y" /* yacc.c:1646 */ + { + parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float sampler", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.set(EbtFloat16, EsdCube); + } +#line 7924 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 352: +#line 2511 "glslang.y" /* yacc.c:1646 */ + { + parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float sampler", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.set(EbtFloat16, Esd1D, false, true); + } +#line 7935 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 353: +#line 2517 "glslang.y" /* yacc.c:1646 */ + { + parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float sampler", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.set(EbtFloat16, Esd2D, false, true); + } +#line 7946 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 354: +#line 2523 "glslang.y" /* yacc.c:1646 */ + { + parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float sampler", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.set(EbtFloat16, EsdCube, false, true); + } +#line 7957 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 355: +#line 2529 "glslang.y" /* yacc.c:1646 */ + { + parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float sampler", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.set(EbtFloat16, Esd1D, true); + } +#line 7968 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 356: +#line 2535 "glslang.y" /* yacc.c:1646 */ + { + parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float sampler", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.set(EbtFloat16, Esd2D, true); + } +#line 7979 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 357: +#line 2541 "glslang.y" /* yacc.c:1646 */ + { + parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float sampler", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.set(EbtFloat16, Esd1D, true, true); + } +#line 7990 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 358: +#line 2547 "glslang.y" /* yacc.c:1646 */ + { + parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float sampler", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.set(EbtFloat16, Esd2D, true, true); + } +#line 8001 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 359: +#line 2553 "glslang.y" /* yacc.c:1646 */ + { + parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float sampler", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.set(EbtFloat16, EsdCube, true); + } +#line 8012 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 360: +#line 2559 "glslang.y" /* yacc.c:1646 */ + { + parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float sampler", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.set(EbtFloat16, EsdCube, true, true); + } +#line 8023 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 361: +#line 2565 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.set(EbtInt, Esd1D); + } +#line 8033 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 362: +#line 2571 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.set(EbtInt, Esd2D); + } +#line 8043 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 363: +#line 2576 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.set(EbtInt, Esd3D); + } +#line 8053 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 364: +#line 2581 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.set(EbtInt, EsdCube); + } +#line 8063 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 365: +#line 2586 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.set(EbtInt, Esd2D, true); + } +#line 8073 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 366: +#line 2591 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.set(EbtUint, Esd2D); + } +#line 8083 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 367: +#line 2596 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.set(EbtUint, Esd3D); + } +#line 8093 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 368: +#line 2601 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.set(EbtUint, EsdCube); + } +#line 8103 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 369: +#line 2607 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.set(EbtInt, Esd1D, true); + } +#line 8113 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 370: +#line 2612 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.set(EbtInt, EsdCube, true); + } +#line 8123 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 371: +#line 2617 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.set(EbtUint, Esd1D); + } +#line 8133 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 372: +#line 2622 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.set(EbtUint, Esd1D, true); + } +#line 8143 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 373: +#line 2627 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.set(EbtUint, EsdCube, true); + } +#line 8153 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 374: +#line 2632 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setTexture(EbtFloat, EsdCube, true); + } +#line 8163 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 375: +#line 2637 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setTexture(EbtInt, EsdCube, true); + } +#line 8173 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 376: +#line 2642 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setTexture(EbtUint, EsdCube, true); + } +#line 8183 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 377: +#line 2648 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.set(EbtUint, Esd2D, true); + } +#line 8193 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 378: +#line 2653 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setTexture(EbtFloat, Esd2D); + } +#line 8203 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 379: +#line 2658 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setTexture(EbtFloat, Esd3D); + } +#line 8213 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 380: +#line 2663 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setTexture(EbtFloat, Esd2D, true); + } +#line 8223 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 381: +#line 2668 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setTexture(EbtFloat, EsdCube); + } +#line 8233 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 382: +#line 2673 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setTexture(EbtInt, Esd2D); + } +#line 8243 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 383: +#line 2678 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setTexture(EbtInt, Esd3D); + } +#line 8253 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 384: +#line 2683 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setTexture(EbtInt, EsdCube); + } +#line 8263 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 385: +#line 2688 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setTexture(EbtInt, Esd2D, true); + } +#line 8273 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 386: +#line 2693 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setTexture(EbtUint, Esd2D); + } +#line 8283 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 387: +#line 2698 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setTexture(EbtUint, Esd3D); + } +#line 8293 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 388: +#line 2703 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setTexture(EbtUint, EsdCube); + } +#line 8303 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 389: +#line 2708 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setTexture(EbtUint, Esd2D, true); + } +#line 8313 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 390: +#line 2713 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setPureSampler(false); + } +#line 8323 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 391: +#line 2718 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setPureSampler(true); + } +#line 8333 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 392: +#line 2724 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.set(EbtFloat, EsdRect); + } +#line 8343 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 393: +#line 2729 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.set(EbtFloat, EsdRect, false, true); + } +#line 8353 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 394: +#line 2734 "glslang.y" /* yacc.c:1646 */ + { + parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float sampler", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.set(EbtFloat16, EsdRect); + } +#line 8364 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 395: +#line 2740 "glslang.y" /* yacc.c:1646 */ + { + parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float sampler", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.set(EbtFloat16, EsdRect, false, true); + } +#line 8375 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 396: +#line 2746 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.set(EbtInt, EsdRect); + } +#line 8385 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 397: +#line 2751 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.set(EbtUint, EsdRect); + } +#line 8395 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 398: +#line 2756 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.set(EbtFloat, EsdBuffer); + } +#line 8405 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 399: +#line 2761 "glslang.y" /* yacc.c:1646 */ + { + parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float sampler", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.set(EbtFloat16, EsdBuffer); + } +#line 8416 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 400: +#line 2767 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.set(EbtInt, EsdBuffer); + } +#line 8426 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 401: +#line 2772 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.set(EbtUint, EsdBuffer); + } +#line 8436 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 402: +#line 2777 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.set(EbtFloat, Esd2D, false, false, true); + } +#line 8446 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 403: +#line 2782 "glslang.y" /* yacc.c:1646 */ + { + parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float sampler", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.set(EbtFloat16, Esd2D, false, false, true); + } +#line 8457 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 404: +#line 2788 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.set(EbtInt, Esd2D, false, false, true); + } +#line 8467 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 405: +#line 2793 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.set(EbtUint, Esd2D, false, false, true); + } +#line 8477 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 406: +#line 2798 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.set(EbtFloat, Esd2D, true, false, true); + } +#line 8487 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 407: +#line 2803 "glslang.y" /* yacc.c:1646 */ + { + parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float sampler", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.set(EbtFloat16, Esd2D, true, false, true); + } +#line 8498 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 408: +#line 2809 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.set(EbtInt, Esd2D, true, false, true); + } +#line 8508 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 409: +#line 2814 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.set(EbtUint, Esd2D, true, false, true); + } +#line 8518 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 410: +#line 2819 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setTexture(EbtFloat, Esd1D); + } +#line 8528 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 411: +#line 2824 "glslang.y" /* yacc.c:1646 */ + { + parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float texture", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setTexture(EbtFloat16, Esd1D); + } +#line 8539 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 412: +#line 2830 "glslang.y" /* yacc.c:1646 */ + { + parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float texture", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setTexture(EbtFloat16, Esd2D); + } +#line 8550 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 413: +#line 2836 "glslang.y" /* yacc.c:1646 */ + { + parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float texture", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setTexture(EbtFloat16, Esd3D); + } +#line 8561 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 414: +#line 2842 "glslang.y" /* yacc.c:1646 */ + { + parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float texture", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setTexture(EbtFloat16, EsdCube); + } +#line 8572 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 415: +#line 2848 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setTexture(EbtFloat, Esd1D, true); + } +#line 8582 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 416: +#line 2853 "glslang.y" /* yacc.c:1646 */ + { + parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float texture", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setTexture(EbtFloat16, Esd1D, true); + } +#line 8593 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 417: +#line 2859 "glslang.y" /* yacc.c:1646 */ + { + parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float texture", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setTexture(EbtFloat16, Esd2D, true); + } +#line 8604 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 418: +#line 2865 "glslang.y" /* yacc.c:1646 */ + { + parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float texture", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setTexture(EbtFloat16, EsdCube, true); + } +#line 8615 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 419: +#line 2871 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setTexture(EbtInt, Esd1D); + } +#line 8625 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 420: +#line 2876 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setTexture(EbtInt, Esd1D, true); + } +#line 8635 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 421: +#line 2881 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setTexture(EbtUint, Esd1D); + } +#line 8645 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 422: +#line 2886 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setTexture(EbtUint, Esd1D, true); + } +#line 8655 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 423: +#line 2891 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setTexture(EbtFloat, EsdRect); + } +#line 8665 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 424: +#line 2896 "glslang.y" /* yacc.c:1646 */ + { + parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float texture", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setTexture(EbtFloat16, EsdRect); + } +#line 8676 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 425: +#line 2902 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setTexture(EbtInt, EsdRect); + } +#line 8686 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 426: +#line 2907 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setTexture(EbtUint, EsdRect); + } +#line 8696 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 427: +#line 2912 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setTexture(EbtFloat, EsdBuffer); + } +#line 8706 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 428: +#line 2917 "glslang.y" /* yacc.c:1646 */ + { + parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float texture", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setTexture(EbtFloat16, EsdBuffer); + } +#line 8717 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 429: +#line 2923 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setTexture(EbtInt, EsdBuffer); + } +#line 8727 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 430: +#line 2928 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setTexture(EbtUint, EsdBuffer); + } +#line 8737 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 431: +#line 2933 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setTexture(EbtFloat, Esd2D, false, false, true); + } +#line 8747 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 432: +#line 2938 "glslang.y" /* yacc.c:1646 */ + { + parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float texture", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setTexture(EbtFloat16, Esd2D, false, false, true); + } +#line 8758 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 433: +#line 2944 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setTexture(EbtInt, Esd2D, false, false, true); + } +#line 8768 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 434: +#line 2949 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setTexture(EbtUint, Esd2D, false, false, true); + } +#line 8778 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 435: +#line 2954 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setTexture(EbtFloat, Esd2D, true, false, true); + } +#line 8788 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 436: +#line 2959 "glslang.y" /* yacc.c:1646 */ + { + parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float texture", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setTexture(EbtFloat16, Esd2D, true, false, true); + } +#line 8799 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 437: +#line 2965 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setTexture(EbtInt, Esd2D, true, false, true); + } +#line 8809 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 438: +#line 2970 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setTexture(EbtUint, Esd2D, true, false, true); + } +#line 8819 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 439: +#line 2975 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setImage(EbtFloat, Esd1D); + } +#line 8829 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 440: +#line 2980 "glslang.y" /* yacc.c:1646 */ + { + parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float image", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setImage(EbtFloat16, Esd1D); + } +#line 8840 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 441: +#line 2986 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setImage(EbtInt, Esd1D); + } +#line 8850 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 442: +#line 2991 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setImage(EbtUint, Esd1D); + } +#line 8860 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 443: +#line 2996 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setImage(EbtFloat, Esd2D); + } +#line 8870 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 444: +#line 3001 "glslang.y" /* yacc.c:1646 */ + { + parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float image", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setImage(EbtFloat16, Esd2D); + } +#line 8881 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 445: +#line 3007 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setImage(EbtInt, Esd2D); + } +#line 8891 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 446: +#line 3012 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setImage(EbtUint, Esd2D); + } +#line 8901 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 447: +#line 3017 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setImage(EbtFloat, Esd3D); + } +#line 8911 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 448: +#line 3022 "glslang.y" /* yacc.c:1646 */ + { + parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float image", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setImage(EbtFloat16, Esd3D); + } +#line 8922 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 449: +#line 3028 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setImage(EbtInt, Esd3D); + } +#line 8932 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 450: +#line 3033 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setImage(EbtUint, Esd3D); + } +#line 8942 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 451: +#line 3038 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setImage(EbtFloat, EsdRect); + } +#line 8952 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 452: +#line 3043 "glslang.y" /* yacc.c:1646 */ + { + parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float image", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setImage(EbtFloat16, EsdRect); + } +#line 8963 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 453: +#line 3049 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setImage(EbtInt, EsdRect); + } +#line 8973 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 454: +#line 3054 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setImage(EbtUint, EsdRect); + } +#line 8983 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 455: +#line 3059 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setImage(EbtFloat, EsdCube); + } +#line 8993 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 456: +#line 3064 "glslang.y" /* yacc.c:1646 */ + { + parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float image", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setImage(EbtFloat16, EsdCube); + } +#line 9004 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 457: +#line 3070 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setImage(EbtInt, EsdCube); + } +#line 9014 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 458: +#line 3075 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setImage(EbtUint, EsdCube); + } +#line 9024 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 459: +#line 3080 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setImage(EbtFloat, EsdBuffer); + } +#line 9034 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 460: +#line 3085 "glslang.y" /* yacc.c:1646 */ + { + parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float image", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setImage(EbtFloat16, EsdBuffer); + } +#line 9045 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 461: +#line 3091 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setImage(EbtInt, EsdBuffer); + } +#line 9055 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 462: +#line 3096 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setImage(EbtUint, EsdBuffer); + } +#line 9065 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 463: +#line 3101 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setImage(EbtFloat, Esd1D, true); + } +#line 9075 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 464: +#line 3106 "glslang.y" /* yacc.c:1646 */ + { + parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float image", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setImage(EbtFloat16, Esd1D, true); + } +#line 9086 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 465: +#line 3112 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setImage(EbtInt, Esd1D, true); + } +#line 9096 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 466: +#line 3117 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setImage(EbtUint, Esd1D, true); + } +#line 9106 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 467: +#line 3122 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setImage(EbtFloat, Esd2D, true); + } +#line 9116 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 468: +#line 3127 "glslang.y" /* yacc.c:1646 */ + { + parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float image", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setImage(EbtFloat16, Esd2D, true); + } +#line 9127 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 469: +#line 3133 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setImage(EbtInt, Esd2D, true); + } +#line 9137 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 470: +#line 3138 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setImage(EbtUint, Esd2D, true); + } +#line 9147 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 471: +#line 3143 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setImage(EbtFloat, EsdCube, true); + } +#line 9157 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 472: +#line 3148 "glslang.y" /* yacc.c:1646 */ + { + parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float image", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setImage(EbtFloat16, EsdCube, true); + } +#line 9168 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 473: +#line 3154 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setImage(EbtInt, EsdCube, true); + } +#line 9178 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 474: +#line 3159 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setImage(EbtUint, EsdCube, true); + } +#line 9188 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 475: +#line 3164 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setImage(EbtFloat, Esd2D, false, false, true); + } +#line 9198 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 476: +#line 3169 "glslang.y" /* yacc.c:1646 */ + { + parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float image", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setImage(EbtFloat16, Esd2D, false, false, true); + } +#line 9209 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 477: +#line 3175 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setImage(EbtInt, Esd2D, false, false, true); + } +#line 9219 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 478: +#line 3180 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setImage(EbtUint, Esd2D, false, false, true); + } +#line 9229 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 479: +#line 3185 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setImage(EbtFloat, Esd2D, true, false, true); + } +#line 9239 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 480: +#line 3190 "glslang.y" /* yacc.c:1646 */ + { + parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float image", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setImage(EbtFloat16, Esd2D, true, false, true); + } +#line 9250 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 481: +#line 3196 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setImage(EbtInt, Esd2D, true, false, true); + } +#line 9260 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 482: +#line 3201 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setImage(EbtUint, Esd2D, true, false, true); + } +#line 9270 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 483: +#line 3206 "glslang.y" /* yacc.c:1646 */ + { // GL_OES_EGL_image_external + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.set(EbtFloat, Esd2D); + (yyval.interm.type).sampler.external = true; + } +#line 9281 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 484: +#line 3212 "glslang.y" /* yacc.c:1646 */ + { // GL_EXT_YUV_target + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.set(EbtFloat, Esd2D); + (yyval.interm.type).sampler.yuv = true; + } +#line 9292 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 485: +#line 3218 "glslang.y" /* yacc.c:1646 */ + { + parseContext.requireStage((yyvsp[0].lex).loc, EShLangFragment, "subpass input"); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setSubpass(EbtFloat); + } +#line 9303 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 486: +#line 3224 "glslang.y" /* yacc.c:1646 */ + { + parseContext.requireStage((yyvsp[0].lex).loc, EShLangFragment, "subpass input"); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setSubpass(EbtFloat, true); + } +#line 9314 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 487: +#line 3230 "glslang.y" /* yacc.c:1646 */ + { + parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float subpass input", parseContext.symbolTable.atBuiltInLevel()); + parseContext.requireStage((yyvsp[0].lex).loc, EShLangFragment, "subpass input"); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setSubpass(EbtFloat16); + } +#line 9326 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 488: +#line 3237 "glslang.y" /* yacc.c:1646 */ + { + parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float subpass input", parseContext.symbolTable.atBuiltInLevel()); + parseContext.requireStage((yyvsp[0].lex).loc, EShLangFragment, "subpass input"); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setSubpass(EbtFloat16, true); + } +#line 9338 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 489: +#line 3244 "glslang.y" /* yacc.c:1646 */ + { + parseContext.requireStage((yyvsp[0].lex).loc, EShLangFragment, "subpass input"); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setSubpass(EbtInt); + } +#line 9349 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 490: +#line 3250 "glslang.y" /* yacc.c:1646 */ + { + parseContext.requireStage((yyvsp[0].lex).loc, EShLangFragment, "subpass input"); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setSubpass(EbtInt, true); + } +#line 9360 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 491: +#line 3256 "glslang.y" /* yacc.c:1646 */ + { + parseContext.requireStage((yyvsp[0].lex).loc, EShLangFragment, "subpass input"); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setSubpass(EbtUint); + } +#line 9371 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 492: +#line 3262 "glslang.y" /* yacc.c:1646 */ + { + parseContext.requireStage((yyvsp[0].lex).loc, EShLangFragment, "subpass input"); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setSubpass(EbtUint, true); + } +#line 9382 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 493: +#line 3268 "glslang.y" /* yacc.c:1646 */ + { + parseContext.fcoopmatCheck((yyvsp[0].lex).loc, "fcoopmatNV", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtFloat; + (yyval.interm.type).coopmat = true; + } +#line 9393 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 494: +#line 3274 "glslang.y" /* yacc.c:1646 */ + { + parseContext.intcoopmatCheck((yyvsp[0].lex).loc, "icoopmatNV", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtInt; + (yyval.interm.type).coopmat = true; + } +#line 9404 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 495: +#line 3280 "glslang.y" /* yacc.c:1646 */ + { + parseContext.intcoopmatCheck((yyvsp[0].lex).loc, "ucoopmatNV", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtUint; + (yyval.interm.type).coopmat = true; + } +#line 9415 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 496: +#line 3287 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type) = (yyvsp[0].interm.type); + (yyval.interm.type).qualifier.storage = parseContext.symbolTable.atGlobalLevel() ? EvqGlobal : EvqTemporary; + parseContext.structTypeCheck((yyval.interm.type).loc, (yyval.interm.type)); + } +#line 9425 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 497: +#line 3292 "glslang.y" /* yacc.c:1646 */ + { + // + // This is for user defined type names. The lexical phase looked up the + // type. + // + if (const TVariable* variable = ((yyvsp[0].lex).symbol)->getAsVariable()) { + const TType& structure = variable->getType(); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtStruct; + (yyval.interm.type).userDef = &structure; + } else + parseContext.error((yyvsp[0].lex).loc, "expected type name", (yyvsp[0].lex).string->c_str(), ""); + } +#line 9443 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 498: +#line 3308 "glslang.y" /* yacc.c:1646 */ + { + parseContext.profileRequires((yyvsp[0].lex).loc, ENoProfile, 130, 0, "highp precision qualifier"); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + parseContext.handlePrecisionQualifier((yyvsp[0].lex).loc, (yyval.interm.type).qualifier, EpqHigh); + } +#line 9453 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 499: +#line 3313 "glslang.y" /* yacc.c:1646 */ + { + parseContext.profileRequires((yyvsp[0].lex).loc, ENoProfile, 130, 0, "mediump precision qualifier"); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + parseContext.handlePrecisionQualifier((yyvsp[0].lex).loc, (yyval.interm.type).qualifier, EpqMedium); + } +#line 9463 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 500: +#line 3318 "glslang.y" /* yacc.c:1646 */ + { + parseContext.profileRequires((yyvsp[0].lex).loc, ENoProfile, 130, 0, "lowp precision qualifier"); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + parseContext.handlePrecisionQualifier((yyvsp[0].lex).loc, (yyval.interm.type).qualifier, EpqLow); + } +#line 9473 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 501: +#line 3326 "glslang.y" /* yacc.c:1646 */ + { parseContext.nestedStructCheck((yyvsp[-2].lex).loc); } +#line 9479 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 502: +#line 3326 "glslang.y" /* yacc.c:1646 */ + { + TType* structure = new TType((yyvsp[-1].interm.typeList), *(yyvsp[-4].lex).string); + parseContext.structArrayCheck((yyvsp[-4].lex).loc, *structure); + TVariable* userTypeDef = new TVariable((yyvsp[-4].lex).string, *structure, true); + if (! parseContext.symbolTable.insert(*userTypeDef)) + parseContext.error((yyvsp[-4].lex).loc, "redefinition", (yyvsp[-4].lex).string->c_str(), "struct"); + (yyval.interm.type).init((yyvsp[-5].lex).loc); + (yyval.interm.type).basicType = EbtStruct; + (yyval.interm.type).userDef = structure; + --parseContext.structNestingLevel; + } +#line 9495 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 503: +#line 3337 "glslang.y" /* yacc.c:1646 */ + { parseContext.nestedStructCheck((yyvsp[-1].lex).loc); } +#line 9501 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 504: +#line 3337 "glslang.y" /* yacc.c:1646 */ + { + TType* structure = new TType((yyvsp[-1].interm.typeList), TString("")); + (yyval.interm.type).init((yyvsp[-4].lex).loc); + (yyval.interm.type).basicType = EbtStruct; + (yyval.interm.type).userDef = structure; + --parseContext.structNestingLevel; + } +#line 9513 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 505: +#line 3347 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.typeList) = (yyvsp[0].interm.typeList); + } +#line 9521 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 506: +#line 3350 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.typeList) = (yyvsp[-1].interm.typeList); + for (unsigned int i = 0; i < (yyvsp[0].interm.typeList)->size(); ++i) { + for (unsigned int j = 0; j < (yyval.interm.typeList)->size(); ++j) { + if ((*(yyval.interm.typeList))[j].type->getFieldName() == (*(yyvsp[0].interm.typeList))[i].type->getFieldName()) + parseContext.error((*(yyvsp[0].interm.typeList))[i].loc, "duplicate member name:", "", (*(yyvsp[0].interm.typeList))[i].type->getFieldName().c_str()); + } + (yyval.interm.typeList)->push_back((*(yyvsp[0].interm.typeList))[i]); + } + } +#line 9536 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 507: +#line 3363 "glslang.y" /* yacc.c:1646 */ + { + if ((yyvsp[-2].interm.type).arraySizes) { + parseContext.profileRequires((yyvsp[-2].interm.type).loc, ENoProfile, 120, E_GL_3DL_array_objects, "arrayed type"); + parseContext.profileRequires((yyvsp[-2].interm.type).loc, EEsProfile, 300, 0, "arrayed type"); + if (parseContext.isEsProfile()) + parseContext.arraySizeRequiredCheck((yyvsp[-2].interm.type).loc, *(yyvsp[-2].interm.type).arraySizes); + } + + (yyval.interm.typeList) = (yyvsp[-1].interm.typeList); + + parseContext.voidErrorCheck((yyvsp[-2].interm.type).loc, (*(yyvsp[-1].interm.typeList))[0].type->getFieldName(), (yyvsp[-2].interm.type).basicType); + parseContext.precisionQualifierCheck((yyvsp[-2].interm.type).loc, (yyvsp[-2].interm.type).basicType, (yyvsp[-2].interm.type).qualifier); + + for (unsigned int i = 0; i < (yyval.interm.typeList)->size(); ++i) { + TType type((yyvsp[-2].interm.type)); + type.setFieldName((*(yyval.interm.typeList))[i].type->getFieldName()); + type.transferArraySizes((*(yyval.interm.typeList))[i].type->getArraySizes()); + type.copyArrayInnerSizes((yyvsp[-2].interm.type).arraySizes); + parseContext.arrayOfArrayVersionCheck((*(yyval.interm.typeList))[i].loc, type.getArraySizes()); + (*(yyval.interm.typeList))[i].type->shallowCopy(type); + } + } +#line 9563 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 508: +#line 3385 "glslang.y" /* yacc.c:1646 */ + { + if ((yyvsp[-2].interm.type).arraySizes) { + parseContext.profileRequires((yyvsp[-2].interm.type).loc, ENoProfile, 120, E_GL_3DL_array_objects, "arrayed type"); + parseContext.profileRequires((yyvsp[-2].interm.type).loc, EEsProfile, 300, 0, "arrayed type"); + if (parseContext.isEsProfile()) + parseContext.arraySizeRequiredCheck((yyvsp[-2].interm.type).loc, *(yyvsp[-2].interm.type).arraySizes); + } + + (yyval.interm.typeList) = (yyvsp[-1].interm.typeList); + + parseContext.memberQualifierCheck((yyvsp[-3].interm.type)); + parseContext.voidErrorCheck((yyvsp[-2].interm.type).loc, (*(yyvsp[-1].interm.typeList))[0].type->getFieldName(), (yyvsp[-2].interm.type).basicType); + parseContext.mergeQualifiers((yyvsp[-2].interm.type).loc, (yyvsp[-2].interm.type).qualifier, (yyvsp[-3].interm.type).qualifier, true); + parseContext.precisionQualifierCheck((yyvsp[-2].interm.type).loc, (yyvsp[-2].interm.type).basicType, (yyvsp[-2].interm.type).qualifier); + + for (unsigned int i = 0; i < (yyval.interm.typeList)->size(); ++i) { + TType type((yyvsp[-2].interm.type)); + type.setFieldName((*(yyval.interm.typeList))[i].type->getFieldName()); + type.transferArraySizes((*(yyval.interm.typeList))[i].type->getArraySizes()); + type.copyArrayInnerSizes((yyvsp[-2].interm.type).arraySizes); + parseContext.arrayOfArrayVersionCheck((*(yyval.interm.typeList))[i].loc, type.getArraySizes()); + (*(yyval.interm.typeList))[i].type->shallowCopy(type); + } + } +#line 9592 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 509: +#line 3412 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.typeList) = new TTypeList; + (yyval.interm.typeList)->push_back((yyvsp[0].interm.typeLine)); + } +#line 9601 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 510: +#line 3416 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.typeList)->push_back((yyvsp[0].interm.typeLine)); + } +#line 9609 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 511: +#line 3422 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.typeLine).type = new TType(EbtVoid); + (yyval.interm.typeLine).loc = (yyvsp[0].lex).loc; + (yyval.interm.typeLine).type->setFieldName(*(yyvsp[0].lex).string); + } +#line 9619 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 512: +#line 3427 "glslang.y" /* yacc.c:1646 */ + { + parseContext.arrayOfArrayVersionCheck((yyvsp[-1].lex).loc, (yyvsp[0].interm).arraySizes); + + (yyval.interm.typeLine).type = new TType(EbtVoid); + (yyval.interm.typeLine).loc = (yyvsp[-1].lex).loc; + (yyval.interm.typeLine).type->setFieldName(*(yyvsp[-1].lex).string); + (yyval.interm.typeLine).type->transferArraySizes((yyvsp[0].interm).arraySizes); + } +#line 9632 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 513: +#line 3438 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.intermTypedNode) = (yyvsp[0].interm.intermTypedNode); + } +#line 9640 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 514: +#line 3442 "glslang.y" /* yacc.c:1646 */ + { + const char* initFeature = "{ } style initializers"; + parseContext.requireProfile((yyvsp[-2].lex).loc, ~EEsProfile, initFeature); + parseContext.profileRequires((yyvsp[-2].lex).loc, ~EEsProfile, 420, E_GL_ARB_shading_language_420pack, initFeature); + (yyval.interm.intermTypedNode) = (yyvsp[-1].interm.intermTypedNode); + } +#line 9651 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 515: +#line 3448 "glslang.y" /* yacc.c:1646 */ + { + const char* initFeature = "{ } style initializers"; + parseContext.requireProfile((yyvsp[-3].lex).loc, ~EEsProfile, initFeature); + parseContext.profileRequires((yyvsp[-3].lex).loc, ~EEsProfile, 420, E_GL_ARB_shading_language_420pack, initFeature); + (yyval.interm.intermTypedNode) = (yyvsp[-2].interm.intermTypedNode); + } +#line 9662 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 516: +#line 3459 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.intermTypedNode) = parseContext.intermediate.growAggregate(0, (yyvsp[0].interm.intermTypedNode), (yyvsp[0].interm.intermTypedNode)->getLoc()); + } +#line 9670 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 517: +#line 3462 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.intermTypedNode) = parseContext.intermediate.growAggregate((yyvsp[-2].interm.intermTypedNode), (yyvsp[0].interm.intermTypedNode)); + } +#line 9678 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 518: +#line 3469 "glslang.y" /* yacc.c:1646 */ + { (yyval.interm.intermNode) = (yyvsp[0].interm.intermNode); } +#line 9684 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 519: +#line 3473 "glslang.y" /* yacc.c:1646 */ + { (yyval.interm.intermNode) = (yyvsp[0].interm.intermNode); } +#line 9690 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 520: +#line 3474 "glslang.y" /* yacc.c:1646 */ + { (yyval.interm.intermNode) = (yyvsp[0].interm.intermNode); } +#line 9696 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 521: +#line 3480 "glslang.y" /* yacc.c:1646 */ + { (yyval.interm.intermNode) = (yyvsp[0].interm.intermNode); } +#line 9702 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 522: +#line 3481 "glslang.y" /* yacc.c:1646 */ + { (yyval.interm.intermNode) = (yyvsp[0].interm.intermNode); } +#line 9708 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 523: +#line 3482 "glslang.y" /* yacc.c:1646 */ + { (yyval.interm.intermNode) = (yyvsp[0].interm.intermNode); } +#line 9714 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 524: +#line 3483 "glslang.y" /* yacc.c:1646 */ + { (yyval.interm.intermNode) = (yyvsp[0].interm.intermNode); } +#line 9720 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 525: +#line 3484 "glslang.y" /* yacc.c:1646 */ + { (yyval.interm.intermNode) = (yyvsp[0].interm.intermNode); } +#line 9726 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 526: +#line 3485 "glslang.y" /* yacc.c:1646 */ + { (yyval.interm.intermNode) = (yyvsp[0].interm.intermNode); } +#line 9732 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 527: +#line 3486 "glslang.y" /* yacc.c:1646 */ + { (yyval.interm.intermNode) = (yyvsp[0].interm.intermNode); } +#line 9738 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 528: +#line 3488 "glslang.y" /* yacc.c:1646 */ + { (yyval.interm.intermNode) = (yyvsp[0].interm.intermNode); } +#line 9744 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 529: +#line 3494 "glslang.y" /* yacc.c:1646 */ + { + parseContext.requireStage((yyvsp[-1].lex).loc, EShLangFragment, "demote"); + parseContext.requireExtensions((yyvsp[-1].lex).loc, 1, &E_GL_EXT_demote_to_helper_invocation, "demote"); + (yyval.interm.intermNode) = parseContext.intermediate.addBranch(EOpDemote, (yyvsp[-1].lex).loc); + } +#line 9754 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 530: +#line 3503 "glslang.y" /* yacc.c:1646 */ + { (yyval.interm.intermNode) = 0; } +#line 9760 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 531: +#line 3504 "glslang.y" /* yacc.c:1646 */ + { + parseContext.symbolTable.push(); + ++parseContext.statementNestingLevel; + } +#line 9769 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 532: +#line 3508 "glslang.y" /* yacc.c:1646 */ + { + parseContext.symbolTable.pop(&parseContext.defaultPrecision[0]); + --parseContext.statementNestingLevel; + } +#line 9778 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 533: +#line 3512 "glslang.y" /* yacc.c:1646 */ + { + if ((yyvsp[-2].interm.intermNode) && (yyvsp[-2].interm.intermNode)->getAsAggregate()) + (yyvsp[-2].interm.intermNode)->getAsAggregate()->setOperator(EOpSequence); + (yyval.interm.intermNode) = (yyvsp[-2].interm.intermNode); + } +#line 9788 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 534: +#line 3520 "glslang.y" /* yacc.c:1646 */ + { (yyval.interm.intermNode) = (yyvsp[0].interm.intermNode); } +#line 9794 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 535: +#line 3521 "glslang.y" /* yacc.c:1646 */ + { (yyval.interm.intermNode) = (yyvsp[0].interm.intermNode); } +#line 9800 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 536: +#line 3525 "glslang.y" /* yacc.c:1646 */ + { + ++parseContext.controlFlowNestingLevel; + } +#line 9808 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 537: +#line 3528 "glslang.y" /* yacc.c:1646 */ + { + --parseContext.controlFlowNestingLevel; + (yyval.interm.intermNode) = (yyvsp[0].interm.intermNode); + } +#line 9817 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 538: +#line 3532 "glslang.y" /* yacc.c:1646 */ + { + parseContext.symbolTable.push(); + ++parseContext.statementNestingLevel; + ++parseContext.controlFlowNestingLevel; + } +#line 9827 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 539: +#line 3537 "glslang.y" /* yacc.c:1646 */ + { + parseContext.symbolTable.pop(&parseContext.defaultPrecision[0]); + --parseContext.statementNestingLevel; + --parseContext.controlFlowNestingLevel; + (yyval.interm.intermNode) = (yyvsp[0].interm.intermNode); + } +#line 9838 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 540: +#line 3546 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.intermNode) = 0; + } +#line 9846 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 541: +#line 3549 "glslang.y" /* yacc.c:1646 */ + { + if ((yyvsp[-1].interm.intermNode) && (yyvsp[-1].interm.intermNode)->getAsAggregate()) + (yyvsp[-1].interm.intermNode)->getAsAggregate()->setOperator(EOpSequence); + (yyval.interm.intermNode) = (yyvsp[-1].interm.intermNode); + } +#line 9856 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 542: +#line 3557 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.intermNode) = parseContext.intermediate.makeAggregate((yyvsp[0].interm.intermNode)); + if ((yyvsp[0].interm.intermNode) && (yyvsp[0].interm.intermNode)->getAsBranchNode() && ((yyvsp[0].interm.intermNode)->getAsBranchNode()->getFlowOp() == EOpCase || + (yyvsp[0].interm.intermNode)->getAsBranchNode()->getFlowOp() == EOpDefault)) { + parseContext.wrapupSwitchSubsequence(0, (yyvsp[0].interm.intermNode)); + (yyval.interm.intermNode) = 0; // start a fresh subsequence for what's after this case + } + } +#line 9869 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 543: +#line 3565 "glslang.y" /* yacc.c:1646 */ + { + if ((yyvsp[0].interm.intermNode) && (yyvsp[0].interm.intermNode)->getAsBranchNode() && ((yyvsp[0].interm.intermNode)->getAsBranchNode()->getFlowOp() == EOpCase || + (yyvsp[0].interm.intermNode)->getAsBranchNode()->getFlowOp() == EOpDefault)) { + parseContext.wrapupSwitchSubsequence((yyvsp[-1].interm.intermNode) ? (yyvsp[-1].interm.intermNode)->getAsAggregate() : 0, (yyvsp[0].interm.intermNode)); + (yyval.interm.intermNode) = 0; // start a fresh subsequence for what's after this case + } else + (yyval.interm.intermNode) = parseContext.intermediate.growAggregate((yyvsp[-1].interm.intermNode), (yyvsp[0].interm.intermNode)); + } +#line 9882 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 544: +#line 3576 "glslang.y" /* yacc.c:1646 */ + { (yyval.interm.intermNode) = 0; } +#line 9888 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 545: +#line 3577 "glslang.y" /* yacc.c:1646 */ + { (yyval.interm.intermNode) = static_cast((yyvsp[-1].interm.intermTypedNode)); } +#line 9894 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 546: +#line 3581 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.intermNode) = (yyvsp[0].interm.intermNode); + } +#line 9902 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 547: +#line 3585 "glslang.y" /* yacc.c:1646 */ + { + parseContext.handleSelectionAttributes(*(yyvsp[-1].interm.attributes), (yyvsp[0].interm.intermNode)); + (yyval.interm.intermNode) = (yyvsp[0].interm.intermNode); + } +#line 9911 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 548: +#line 3592 "glslang.y" /* yacc.c:1646 */ + { + parseContext.boolCheck((yyvsp[-4].lex).loc, (yyvsp[-2].interm.intermTypedNode)); + (yyval.interm.intermNode) = parseContext.intermediate.addSelection((yyvsp[-2].interm.intermTypedNode), (yyvsp[0].interm.nodePair), (yyvsp[-4].lex).loc); + } +#line 9920 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 549: +#line 3599 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.nodePair).node1 = (yyvsp[-2].interm.intermNode); + (yyval.interm.nodePair).node2 = (yyvsp[0].interm.intermNode); + } +#line 9929 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 550: +#line 3603 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.nodePair).node1 = (yyvsp[0].interm.intermNode); + (yyval.interm.nodePair).node2 = 0; + } +#line 9938 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 551: +#line 3611 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.intermTypedNode) = (yyvsp[0].interm.intermTypedNode); + parseContext.boolCheck((yyvsp[0].interm.intermTypedNode)->getLoc(), (yyvsp[0].interm.intermTypedNode)); + } +#line 9947 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 552: +#line 3615 "glslang.y" /* yacc.c:1646 */ + { + parseContext.boolCheck((yyvsp[-2].lex).loc, (yyvsp[-3].interm.type)); + + TType type((yyvsp[-3].interm.type)); + TIntermNode* initNode = parseContext.declareVariable((yyvsp[-2].lex).loc, *(yyvsp[-2].lex).string, (yyvsp[-3].interm.type), 0, (yyvsp[0].interm.intermTypedNode)); + if (initNode) + (yyval.interm.intermTypedNode) = initNode->getAsTyped(); + else + (yyval.interm.intermTypedNode) = 0; + } +#line 9962 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 553: +#line 3628 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.intermNode) = (yyvsp[0].interm.intermNode); + } +#line 9970 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 554: +#line 3632 "glslang.y" /* yacc.c:1646 */ + { + parseContext.handleSwitchAttributes(*(yyvsp[-1].interm.attributes), (yyvsp[0].interm.intermNode)); + (yyval.interm.intermNode) = (yyvsp[0].interm.intermNode); + } +#line 9979 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 555: +#line 3639 "glslang.y" /* yacc.c:1646 */ + { + // start new switch sequence on the switch stack + ++parseContext.controlFlowNestingLevel; + ++parseContext.statementNestingLevel; + parseContext.switchSequenceStack.push_back(new TIntermSequence); + parseContext.switchLevel.push_back(parseContext.statementNestingLevel); + parseContext.symbolTable.push(); + } +#line 9992 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 556: +#line 3647 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.intermNode) = parseContext.addSwitch((yyvsp[-7].lex).loc, (yyvsp[-5].interm.intermTypedNode), (yyvsp[-1].interm.intermNode) ? (yyvsp[-1].interm.intermNode)->getAsAggregate() : 0); + delete parseContext.switchSequenceStack.back(); + parseContext.switchSequenceStack.pop_back(); + parseContext.switchLevel.pop_back(); + parseContext.symbolTable.pop(&parseContext.defaultPrecision[0]); + --parseContext.statementNestingLevel; + --parseContext.controlFlowNestingLevel; + } +#line 10006 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 557: +#line 3659 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.intermNode) = 0; + } +#line 10014 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 558: +#line 3662 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.intermNode) = (yyvsp[0].interm.intermNode); + } +#line 10022 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 559: +#line 3668 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.intermNode) = 0; + if (parseContext.switchLevel.size() == 0) + parseContext.error((yyvsp[-2].lex).loc, "cannot appear outside switch statement", "case", ""); + else if (parseContext.switchLevel.back() != parseContext.statementNestingLevel) + parseContext.error((yyvsp[-2].lex).loc, "cannot be nested inside control flow", "case", ""); + else { + parseContext.constantValueCheck((yyvsp[-1].interm.intermTypedNode), "case"); + parseContext.integerCheck((yyvsp[-1].interm.intermTypedNode), "case"); + (yyval.interm.intermNode) = parseContext.intermediate.addBranch(EOpCase, (yyvsp[-1].interm.intermTypedNode), (yyvsp[-2].lex).loc); + } + } +#line 10039 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 560: +#line 3680 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.intermNode) = 0; + if (parseContext.switchLevel.size() == 0) + parseContext.error((yyvsp[-1].lex).loc, "cannot appear outside switch statement", "default", ""); + else if (parseContext.switchLevel.back() != parseContext.statementNestingLevel) + parseContext.error((yyvsp[-1].lex).loc, "cannot be nested inside control flow", "default", ""); + else + (yyval.interm.intermNode) = parseContext.intermediate.addBranch(EOpDefault, (yyvsp[-1].lex).loc); + } +#line 10053 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 561: +#line 3692 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.intermNode) = (yyvsp[0].interm.intermNode); + } +#line 10061 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 562: +#line 3696 "glslang.y" /* yacc.c:1646 */ + { + parseContext.handleLoopAttributes(*(yyvsp[-1].interm.attributes), (yyvsp[0].interm.intermNode)); + (yyval.interm.intermNode) = (yyvsp[0].interm.intermNode); + } +#line 10070 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 563: +#line 3703 "glslang.y" /* yacc.c:1646 */ + { + if (! parseContext.limits.whileLoops) + parseContext.error((yyvsp[-1].lex).loc, "while loops not available", "limitation", ""); + parseContext.symbolTable.push(); + ++parseContext.loopNestingLevel; + ++parseContext.statementNestingLevel; + ++parseContext.controlFlowNestingLevel; + } +#line 10083 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 564: +#line 3711 "glslang.y" /* yacc.c:1646 */ + { + parseContext.symbolTable.pop(&parseContext.defaultPrecision[0]); + (yyval.interm.intermNode) = parseContext.intermediate.addLoop((yyvsp[0].interm.intermNode), (yyvsp[-2].interm.intermTypedNode), 0, true, (yyvsp[-5].lex).loc); + --parseContext.loopNestingLevel; + --parseContext.statementNestingLevel; + --parseContext.controlFlowNestingLevel; + } +#line 10095 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 565: +#line 3718 "glslang.y" /* yacc.c:1646 */ + { + ++parseContext.loopNestingLevel; + ++parseContext.statementNestingLevel; + ++parseContext.controlFlowNestingLevel; + } +#line 10105 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 566: +#line 3723 "glslang.y" /* yacc.c:1646 */ + { + if (! parseContext.limits.whileLoops) + parseContext.error((yyvsp[-7].lex).loc, "do-while loops not available", "limitation", ""); + + parseContext.boolCheck((yyvsp[0].lex).loc, (yyvsp[-2].interm.intermTypedNode)); + + (yyval.interm.intermNode) = parseContext.intermediate.addLoop((yyvsp[-5].interm.intermNode), (yyvsp[-2].interm.intermTypedNode), 0, false, (yyvsp[-4].lex).loc); + --parseContext.loopNestingLevel; + --parseContext.statementNestingLevel; + --parseContext.controlFlowNestingLevel; + } +#line 10121 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 567: +#line 3734 "glslang.y" /* yacc.c:1646 */ + { + parseContext.symbolTable.push(); + ++parseContext.loopNestingLevel; + ++parseContext.statementNestingLevel; + ++parseContext.controlFlowNestingLevel; + } +#line 10132 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 568: +#line 3740 "glslang.y" /* yacc.c:1646 */ + { + parseContext.symbolTable.pop(&parseContext.defaultPrecision[0]); + (yyval.interm.intermNode) = parseContext.intermediate.makeAggregate((yyvsp[-3].interm.intermNode), (yyvsp[-5].lex).loc); + TIntermLoop* forLoop = parseContext.intermediate.addLoop((yyvsp[0].interm.intermNode), reinterpret_cast((yyvsp[-2].interm.nodePair).node1), reinterpret_cast((yyvsp[-2].interm.nodePair).node2), true, (yyvsp[-6].lex).loc); + if (! parseContext.limits.nonInductiveForLoops) + parseContext.inductiveLoopCheck((yyvsp[-6].lex).loc, (yyvsp[-3].interm.intermNode), forLoop); + (yyval.interm.intermNode) = parseContext.intermediate.growAggregate((yyval.interm.intermNode), forLoop, (yyvsp[-6].lex).loc); + (yyval.interm.intermNode)->getAsAggregate()->setOperator(EOpSequence); + --parseContext.loopNestingLevel; + --parseContext.statementNestingLevel; + --parseContext.controlFlowNestingLevel; + } +#line 10149 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 569: +#line 3755 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.intermNode) = (yyvsp[0].interm.intermNode); + } +#line 10157 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 570: +#line 3758 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.intermNode) = (yyvsp[0].interm.intermNode); + } +#line 10165 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 571: +#line 3764 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.intermTypedNode) = (yyvsp[0].interm.intermTypedNode); + } +#line 10173 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 572: +#line 3767 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.intermTypedNode) = 0; + } +#line 10181 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 573: +#line 3773 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.nodePair).node1 = (yyvsp[-1].interm.intermTypedNode); + (yyval.interm.nodePair).node2 = 0; + } +#line 10190 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 574: +#line 3777 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.nodePair).node1 = (yyvsp[-2].interm.intermTypedNode); + (yyval.interm.nodePair).node2 = (yyvsp[0].interm.intermTypedNode); + } +#line 10199 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 575: +#line 3784 "glslang.y" /* yacc.c:1646 */ + { + if (parseContext.loopNestingLevel <= 0) + parseContext.error((yyvsp[-1].lex).loc, "continue statement only allowed in loops", "", ""); + (yyval.interm.intermNode) = parseContext.intermediate.addBranch(EOpContinue, (yyvsp[-1].lex).loc); + } +#line 10209 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 576: +#line 3789 "glslang.y" /* yacc.c:1646 */ + { + if (parseContext.loopNestingLevel + parseContext.switchSequenceStack.size() <= 0) + parseContext.error((yyvsp[-1].lex).loc, "break statement only allowed in switch and loops", "", ""); + (yyval.interm.intermNode) = parseContext.intermediate.addBranch(EOpBreak, (yyvsp[-1].lex).loc); + } +#line 10219 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 577: +#line 3794 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.intermNode) = parseContext.intermediate.addBranch(EOpReturn, (yyvsp[-1].lex).loc); + if (parseContext.currentFunctionType->getBasicType() != EbtVoid) + parseContext.error((yyvsp[-1].lex).loc, "non-void function must return a value", "return", ""); + if (parseContext.inMain) + parseContext.postEntryPointReturn = true; + } +#line 10231 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 578: +#line 3801 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.intermNode) = parseContext.handleReturnValue((yyvsp[-2].lex).loc, (yyvsp[-1].interm.intermTypedNode)); + } +#line 10239 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 579: +#line 3804 "glslang.y" /* yacc.c:1646 */ + { + parseContext.requireStage((yyvsp[-1].lex).loc, EShLangFragment, "discard"); + (yyval.interm.intermNode) = parseContext.intermediate.addBranch(EOpKill, (yyvsp[-1].lex).loc); + } +#line 10248 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 580: +#line 3813 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.intermNode) = (yyvsp[0].interm.intermNode); + parseContext.intermediate.setTreeRoot((yyval.interm.intermNode)); + } +#line 10257 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 581: +#line 3817 "glslang.y" /* yacc.c:1646 */ + { + if ((yyvsp[0].interm.intermNode) != nullptr) { + (yyval.interm.intermNode) = parseContext.intermediate.growAggregate((yyvsp[-1].interm.intermNode), (yyvsp[0].interm.intermNode)); + parseContext.intermediate.setTreeRoot((yyval.interm.intermNode)); + } + } +#line 10268 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 582: +#line 3826 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.intermNode) = (yyvsp[0].interm.intermNode); + } +#line 10276 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 583: +#line 3829 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.intermNode) = (yyvsp[0].interm.intermNode); + } +#line 10284 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 584: +#line 3833 "glslang.y" /* yacc.c:1646 */ + { + parseContext.requireProfile((yyvsp[0].lex).loc, ~EEsProfile, "extraneous semicolon"); + parseContext.profileRequires((yyvsp[0].lex).loc, ~EEsProfile, 460, nullptr, "extraneous semicolon"); + (yyval.interm.intermNode) = nullptr; + } +#line 10294 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 585: +#line 3842 "glslang.y" /* yacc.c:1646 */ + { + (yyvsp[0].interm).function = parseContext.handleFunctionDeclarator((yyvsp[0].interm).loc, *(yyvsp[0].interm).function, false /* not prototype */); + (yyvsp[0].interm).intermNode = parseContext.handleFunctionDefinition((yyvsp[0].interm).loc, *(yyvsp[0].interm).function); + } +#line 10303 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 586: +#line 3846 "glslang.y" /* yacc.c:1646 */ + { + // May be best done as post process phase on intermediate code + if (parseContext.currentFunctionType->getBasicType() != EbtVoid && ! parseContext.functionReturnsValue) + parseContext.error((yyvsp[-2].interm).loc, "function does not return a value:", "", (yyvsp[-2].interm).function->getName().c_str()); + parseContext.symbolTable.pop(&parseContext.defaultPrecision[0]); + (yyval.interm.intermNode) = parseContext.intermediate.growAggregate((yyvsp[-2].interm).intermNode, (yyvsp[0].interm.intermNode)); + parseContext.intermediate.setAggregateOperator((yyval.interm.intermNode), EOpFunction, (yyvsp[-2].interm).function->getType(), (yyvsp[-2].interm).loc); + (yyval.interm.intermNode)->getAsAggregate()->setName((yyvsp[-2].interm).function->getMangledName().c_str()); + + // store the pragma information for debug and optimize and other vendor specific + // information. This information can be queried from the parse tree + (yyval.interm.intermNode)->getAsAggregate()->setOptimize(parseContext.contextPragma.optimize); + (yyval.interm.intermNode)->getAsAggregate()->setDebug(parseContext.contextPragma.debug); + (yyval.interm.intermNode)->getAsAggregate()->setPragmaTable(parseContext.contextPragma.pragmaTable); + } +#line 10323 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 587: +#line 3865 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.attributes) = (yyvsp[-2].interm.attributes); + parseContext.requireExtensions((yyvsp[-4].lex).loc, 1, &E_GL_EXT_control_flow_attributes, "attribute"); + } +#line 10332 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 588: +#line 3871 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.attributes) = (yyvsp[0].interm.attributes); + } +#line 10340 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 589: +#line 3874 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.attributes) = parseContext.mergeAttributes((yyvsp[-2].interm.attributes), (yyvsp[0].interm.attributes)); + } +#line 10348 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 590: +#line 3879 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.attributes) = parseContext.makeAttributes(*(yyvsp[0].lex).string); + } +#line 10356 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 591: +#line 3882 "glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.attributes) = parseContext.makeAttributes(*(yyvsp[-3].lex).string, (yyvsp[-1].interm.intermTypedNode)); + } +#line 10364 "glslang_tab.cpp" /* yacc.c:1646 */ + break; + + +#line 10368 "glslang_tab.cpp" /* yacc.c:1646 */ + default: break; + } + /* User semantic actions sometimes alter yychar, and that requires + that yytoken be updated with the new translation. We take the + approach of translating immediately before every use of yytoken. + One alternative is translating here after every semantic action, + but that translation would be missed if the semantic action invokes + YYABORT, YYACCEPT, or YYERROR immediately after altering yychar or + if it invokes YYBACKUP. In the case of YYABORT or YYACCEPT, an + incorrect destructor might then be invoked immediately. In the + case of YYERROR or YYBACKUP, subsequent parser actions might lead + to an incorrect destructor call or verbose syntax error message + before the lookahead is translated. */ + YY_SYMBOL_PRINT ("-> $$ =", yyr1[yyn], &yyval, &yyloc); + + YYPOPSTACK (yylen); + yylen = 0; + YY_STACK_PRINT (yyss, yyssp); + + *++yyvsp = yyval; + + /* Now 'shift' the result of the reduction. Determine what state + that goes to, based on the state we popped back to and the rule + number reduced by. */ + + yyn = yyr1[yyn]; + + yystate = yypgoto[yyn - YYNTOKENS] + *yyssp; + if (0 <= yystate && yystate <= YYLAST && yycheck[yystate] == *yyssp) + yystate = yytable[yystate]; + else + yystate = yydefgoto[yyn - YYNTOKENS]; + + goto yynewstate; + + +/*--------------------------------------. +| yyerrlab -- here on detecting error. | +`--------------------------------------*/ +yyerrlab: + /* Make sure we have latest lookahead translation. See comments at + user semantic actions for why this is necessary. */ + yytoken = yychar == YYEMPTY ? YYEMPTY : YYTRANSLATE (yychar); + + /* If not already recovering from an error, report this error. */ + if (!yyerrstatus) + { + ++yynerrs; +#if ! YYERROR_VERBOSE + yyerror (pParseContext, YY_("syntax error")); +#else +# define YYSYNTAX_ERROR yysyntax_error (&yymsg_alloc, &yymsg, \ + yyssp, yytoken) + { + char const *yymsgp = YY_("syntax error"); + int yysyntax_error_status; + yysyntax_error_status = YYSYNTAX_ERROR; + if (yysyntax_error_status == 0) + yymsgp = yymsg; + else if (yysyntax_error_status == 1) + { + if (yymsg != yymsgbuf) + YYSTACK_FREE (yymsg); + yymsg = (char *) YYSTACK_ALLOC (yymsg_alloc); + if (!yymsg) + { + yymsg = yymsgbuf; + yymsg_alloc = sizeof yymsgbuf; + yysyntax_error_status = 2; + } + else + { + yysyntax_error_status = YYSYNTAX_ERROR; + yymsgp = yymsg; + } + } + yyerror (pParseContext, yymsgp); + if (yysyntax_error_status == 2) + goto yyexhaustedlab; + } +# undef YYSYNTAX_ERROR +#endif + } + + + + if (yyerrstatus == 3) + { + /* If just tried and failed to reuse lookahead token after an + error, discard it. */ + + if (yychar <= YYEOF) + { + /* Return failure if at end of input. */ + if (yychar == YYEOF) + YYABORT; + } + else + { + yydestruct ("Error: discarding", + yytoken, &yylval, pParseContext); + yychar = YYEMPTY; + } + } + + /* Else will try to reuse lookahead token after shifting the error + token. */ + goto yyerrlab1; + + +/*---------------------------------------------------. +| yyerrorlab -- error raised explicitly by YYERROR. | +`---------------------------------------------------*/ +yyerrorlab: + + /* Pacify compilers like GCC when the user code never invokes + YYERROR and the label yyerrorlab therefore never appears in user + code. */ + if (/*CONSTCOND*/ 0) + goto yyerrorlab; + + /* Do not reclaim the symbols of the rule whose action triggered + this YYERROR. */ + YYPOPSTACK (yylen); + yylen = 0; + YY_STACK_PRINT (yyss, yyssp); + yystate = *yyssp; + goto yyerrlab1; + + +/*-------------------------------------------------------------. +| yyerrlab1 -- common code for both syntax error and YYERROR. | +`-------------------------------------------------------------*/ +yyerrlab1: + yyerrstatus = 3; /* Each real token shifted decrements this. */ + + for (;;) + { + yyn = yypact[yystate]; + if (!yypact_value_is_default (yyn)) + { + yyn += YYTERROR; + if (0 <= yyn && yyn <= YYLAST && yycheck[yyn] == YYTERROR) + { + yyn = yytable[yyn]; + if (0 < yyn) + break; + } + } + + /* Pop the current state because it cannot handle the error token. */ + if (yyssp == yyss) + YYABORT; + + + yydestruct ("Error: popping", + yystos[yystate], yyvsp, pParseContext); + YYPOPSTACK (1); + yystate = *yyssp; + YY_STACK_PRINT (yyss, yyssp); + } + + YY_IGNORE_MAYBE_UNINITIALIZED_BEGIN + *++yyvsp = yylval; + YY_IGNORE_MAYBE_UNINITIALIZED_END + + + /* Shift the error token. */ + YY_SYMBOL_PRINT ("Shifting", yystos[yyn], yyvsp, yylsp); + + yystate = yyn; + goto yynewstate; + + +/*-------------------------------------. +| yyacceptlab -- YYACCEPT comes here. | +`-------------------------------------*/ +yyacceptlab: + yyresult = 0; + goto yyreturn; + +/*-----------------------------------. +| yyabortlab -- YYABORT comes here. | +`-----------------------------------*/ +yyabortlab: + yyresult = 1; + goto yyreturn; + +#if !defined yyoverflow || YYERROR_VERBOSE +/*-------------------------------------------------. +| yyexhaustedlab -- memory exhaustion comes here. | +`-------------------------------------------------*/ +yyexhaustedlab: + yyerror (pParseContext, YY_("memory exhausted")); + yyresult = 2; + /* Fall through. */ +#endif + +yyreturn: + if (yychar != YYEMPTY) + { + /* Make sure we have latest lookahead translation. See comments at + user semantic actions for why this is necessary. */ + yytoken = YYTRANSLATE (yychar); + yydestruct ("Cleanup: discarding lookahead", + yytoken, &yylval, pParseContext); + } + /* Do not reclaim the symbols of the rule whose action triggered + this YYABORT or YYACCEPT. */ + YYPOPSTACK (yylen); + YY_STACK_PRINT (yyss, yyssp); + while (yyssp != yyss) + { + yydestruct ("Cleanup: popping", + yystos[*yyssp], yyvsp, pParseContext); + YYPOPSTACK (1); + } +#ifndef yyoverflow + if (yyss != yyssa) + YYSTACK_FREE (yyss); +#endif +#if YYERROR_VERBOSE + if (yymsg != yymsgbuf) + YYSTACK_FREE (yymsg); +#endif + return yyresult; +} +#line 3887 "glslang.y" /* yacc.c:1906 */ + + diff --git a/dep/glslang/glslang/MachineIndependent/glslang_tab.cpp.h b/dep/glslang/glslang/MachineIndependent/glslang_tab.cpp.h new file mode 100644 index 000000000..31c8f9024 --- /dev/null +++ b/dep/glslang/glslang/MachineIndependent/glslang_tab.cpp.h @@ -0,0 +1,521 @@ +/* A Bison parser, made by GNU Bison 3.0.4. */ + +/* Bison interface for Yacc-like parsers in C + + Copyright (C) 1984, 1989-1990, 2000-2015 Free Software Foundation, Inc. + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . */ + +/* As a special exception, you may create a larger work that contains + part or all of the Bison parser skeleton and distribute that work + under terms of your choice, so long as that work isn't itself a + parser generator using the skeleton or a modified version thereof + as a parser skeleton. Alternatively, if you modify or redistribute + the parser skeleton itself, you may (at your option) remove this + special exception, which will cause the skeleton and the resulting + Bison output files to be licensed under the GNU General Public + License without this special exception. + + This special exception was added by the Free Software Foundation in + version 2.2 of Bison. */ + +#ifndef YY_YY_GLSLANG_TAB_CPP_H_INCLUDED +# define YY_YY_GLSLANG_TAB_CPP_H_INCLUDED +/* Debug traces. */ +#ifndef YYDEBUG +# define YYDEBUG 1 +#endif +#if YYDEBUG +extern int yydebug; +#endif + +/* Token type. */ +#ifndef YYTOKENTYPE +# define YYTOKENTYPE + enum yytokentype + { + CONST = 258, + BOOL = 259, + INT = 260, + UINT = 261, + FLOAT = 262, + BVEC2 = 263, + BVEC3 = 264, + BVEC4 = 265, + IVEC2 = 266, + IVEC3 = 267, + IVEC4 = 268, + UVEC2 = 269, + UVEC3 = 270, + UVEC4 = 271, + VEC2 = 272, + VEC3 = 273, + VEC4 = 274, + MAT2 = 275, + MAT3 = 276, + MAT4 = 277, + MAT2X2 = 278, + MAT2X3 = 279, + MAT2X4 = 280, + MAT3X2 = 281, + MAT3X3 = 282, + MAT3X4 = 283, + MAT4X2 = 284, + MAT4X3 = 285, + MAT4X4 = 286, + SAMPLER2D = 287, + SAMPLER3D = 288, + SAMPLERCUBE = 289, + SAMPLER2DSHADOW = 290, + SAMPLERCUBESHADOW = 291, + SAMPLER2DARRAY = 292, + SAMPLER2DARRAYSHADOW = 293, + ISAMPLER2D = 294, + ISAMPLER3D = 295, + ISAMPLERCUBE = 296, + ISAMPLER2DARRAY = 297, + USAMPLER2D = 298, + USAMPLER3D = 299, + USAMPLERCUBE = 300, + USAMPLER2DARRAY = 301, + SAMPLER = 302, + SAMPLERSHADOW = 303, + TEXTURE2D = 304, + TEXTURE3D = 305, + TEXTURECUBE = 306, + TEXTURE2DARRAY = 307, + ITEXTURE2D = 308, + ITEXTURE3D = 309, + ITEXTURECUBE = 310, + ITEXTURE2DARRAY = 311, + UTEXTURE2D = 312, + UTEXTURE3D = 313, + UTEXTURECUBE = 314, + UTEXTURE2DARRAY = 315, + ATTRIBUTE = 316, + VARYING = 317, + FLOAT16_T = 318, + FLOAT32_T = 319, + DOUBLE = 320, + FLOAT64_T = 321, + INT64_T = 322, + UINT64_T = 323, + INT32_T = 324, + UINT32_T = 325, + INT16_T = 326, + UINT16_T = 327, + INT8_T = 328, + UINT8_T = 329, + I64VEC2 = 330, + I64VEC3 = 331, + I64VEC4 = 332, + U64VEC2 = 333, + U64VEC3 = 334, + U64VEC4 = 335, + I32VEC2 = 336, + I32VEC3 = 337, + I32VEC4 = 338, + U32VEC2 = 339, + U32VEC3 = 340, + U32VEC4 = 341, + I16VEC2 = 342, + I16VEC3 = 343, + I16VEC4 = 344, + U16VEC2 = 345, + U16VEC3 = 346, + U16VEC4 = 347, + I8VEC2 = 348, + I8VEC3 = 349, + I8VEC4 = 350, + U8VEC2 = 351, + U8VEC3 = 352, + U8VEC4 = 353, + DVEC2 = 354, + DVEC3 = 355, + DVEC4 = 356, + DMAT2 = 357, + DMAT3 = 358, + DMAT4 = 359, + F16VEC2 = 360, + F16VEC3 = 361, + F16VEC4 = 362, + F16MAT2 = 363, + F16MAT3 = 364, + F16MAT4 = 365, + F32VEC2 = 366, + F32VEC3 = 367, + F32VEC4 = 368, + F32MAT2 = 369, + F32MAT3 = 370, + F32MAT4 = 371, + F64VEC2 = 372, + F64VEC3 = 373, + F64VEC4 = 374, + F64MAT2 = 375, + F64MAT3 = 376, + F64MAT4 = 377, + DMAT2X2 = 378, + DMAT2X3 = 379, + DMAT2X4 = 380, + DMAT3X2 = 381, + DMAT3X3 = 382, + DMAT3X4 = 383, + DMAT4X2 = 384, + DMAT4X3 = 385, + DMAT4X4 = 386, + F16MAT2X2 = 387, + F16MAT2X3 = 388, + F16MAT2X4 = 389, + F16MAT3X2 = 390, + F16MAT3X3 = 391, + F16MAT3X4 = 392, + F16MAT4X2 = 393, + F16MAT4X3 = 394, + F16MAT4X4 = 395, + F32MAT2X2 = 396, + F32MAT2X3 = 397, + F32MAT2X4 = 398, + F32MAT3X2 = 399, + F32MAT3X3 = 400, + F32MAT3X4 = 401, + F32MAT4X2 = 402, + F32MAT4X3 = 403, + F32MAT4X4 = 404, + F64MAT2X2 = 405, + F64MAT2X3 = 406, + F64MAT2X4 = 407, + F64MAT3X2 = 408, + F64MAT3X3 = 409, + F64MAT3X4 = 410, + F64MAT4X2 = 411, + F64MAT4X3 = 412, + F64MAT4X4 = 413, + ATOMIC_UINT = 414, + ACCSTRUCTNV = 415, + ACCSTRUCTEXT = 416, + RAYQUERYEXT = 417, + FCOOPMATNV = 418, + ICOOPMATNV = 419, + UCOOPMATNV = 420, + SAMPLERCUBEARRAY = 421, + SAMPLERCUBEARRAYSHADOW = 422, + ISAMPLERCUBEARRAY = 423, + USAMPLERCUBEARRAY = 424, + SAMPLER1D = 425, + SAMPLER1DARRAY = 426, + SAMPLER1DARRAYSHADOW = 427, + ISAMPLER1D = 428, + SAMPLER1DSHADOW = 429, + SAMPLER2DRECT = 430, + SAMPLER2DRECTSHADOW = 431, + ISAMPLER2DRECT = 432, + USAMPLER2DRECT = 433, + SAMPLERBUFFER = 434, + ISAMPLERBUFFER = 435, + USAMPLERBUFFER = 436, + SAMPLER2DMS = 437, + ISAMPLER2DMS = 438, + USAMPLER2DMS = 439, + SAMPLER2DMSARRAY = 440, + ISAMPLER2DMSARRAY = 441, + USAMPLER2DMSARRAY = 442, + SAMPLEREXTERNALOES = 443, + SAMPLEREXTERNAL2DY2YEXT = 444, + ISAMPLER1DARRAY = 445, + USAMPLER1D = 446, + USAMPLER1DARRAY = 447, + F16SAMPLER1D = 448, + F16SAMPLER2D = 449, + F16SAMPLER3D = 450, + F16SAMPLER2DRECT = 451, + F16SAMPLERCUBE = 452, + F16SAMPLER1DARRAY = 453, + F16SAMPLER2DARRAY = 454, + F16SAMPLERCUBEARRAY = 455, + F16SAMPLERBUFFER = 456, + F16SAMPLER2DMS = 457, + F16SAMPLER2DMSARRAY = 458, + F16SAMPLER1DSHADOW = 459, + F16SAMPLER2DSHADOW = 460, + F16SAMPLER1DARRAYSHADOW = 461, + F16SAMPLER2DARRAYSHADOW = 462, + F16SAMPLER2DRECTSHADOW = 463, + F16SAMPLERCUBESHADOW = 464, + F16SAMPLERCUBEARRAYSHADOW = 465, + IMAGE1D = 466, + IIMAGE1D = 467, + UIMAGE1D = 468, + IMAGE2D = 469, + IIMAGE2D = 470, + UIMAGE2D = 471, + IMAGE3D = 472, + IIMAGE3D = 473, + UIMAGE3D = 474, + IMAGE2DRECT = 475, + IIMAGE2DRECT = 476, + UIMAGE2DRECT = 477, + IMAGECUBE = 478, + IIMAGECUBE = 479, + UIMAGECUBE = 480, + IMAGEBUFFER = 481, + IIMAGEBUFFER = 482, + UIMAGEBUFFER = 483, + IMAGE1DARRAY = 484, + IIMAGE1DARRAY = 485, + UIMAGE1DARRAY = 486, + IMAGE2DARRAY = 487, + IIMAGE2DARRAY = 488, + UIMAGE2DARRAY = 489, + IMAGECUBEARRAY = 490, + IIMAGECUBEARRAY = 491, + UIMAGECUBEARRAY = 492, + IMAGE2DMS = 493, + IIMAGE2DMS = 494, + UIMAGE2DMS = 495, + IMAGE2DMSARRAY = 496, + IIMAGE2DMSARRAY = 497, + UIMAGE2DMSARRAY = 498, + F16IMAGE1D = 499, + F16IMAGE2D = 500, + F16IMAGE3D = 501, + F16IMAGE2DRECT = 502, + F16IMAGECUBE = 503, + F16IMAGE1DARRAY = 504, + F16IMAGE2DARRAY = 505, + F16IMAGECUBEARRAY = 506, + F16IMAGEBUFFER = 507, + F16IMAGE2DMS = 508, + F16IMAGE2DMSARRAY = 509, + TEXTURECUBEARRAY = 510, + ITEXTURECUBEARRAY = 511, + UTEXTURECUBEARRAY = 512, + TEXTURE1D = 513, + ITEXTURE1D = 514, + UTEXTURE1D = 515, + TEXTURE1DARRAY = 516, + ITEXTURE1DARRAY = 517, + UTEXTURE1DARRAY = 518, + TEXTURE2DRECT = 519, + ITEXTURE2DRECT = 520, + UTEXTURE2DRECT = 521, + TEXTUREBUFFER = 522, + ITEXTUREBUFFER = 523, + UTEXTUREBUFFER = 524, + TEXTURE2DMS = 525, + ITEXTURE2DMS = 526, + UTEXTURE2DMS = 527, + TEXTURE2DMSARRAY = 528, + ITEXTURE2DMSARRAY = 529, + UTEXTURE2DMSARRAY = 530, + F16TEXTURE1D = 531, + F16TEXTURE2D = 532, + F16TEXTURE3D = 533, + F16TEXTURE2DRECT = 534, + F16TEXTURECUBE = 535, + F16TEXTURE1DARRAY = 536, + F16TEXTURE2DARRAY = 537, + F16TEXTURECUBEARRAY = 538, + F16TEXTUREBUFFER = 539, + F16TEXTURE2DMS = 540, + F16TEXTURE2DMSARRAY = 541, + SUBPASSINPUT = 542, + SUBPASSINPUTMS = 543, + ISUBPASSINPUT = 544, + ISUBPASSINPUTMS = 545, + USUBPASSINPUT = 546, + USUBPASSINPUTMS = 547, + F16SUBPASSINPUT = 548, + F16SUBPASSINPUTMS = 549, + LEFT_OP = 550, + RIGHT_OP = 551, + INC_OP = 552, + DEC_OP = 553, + LE_OP = 554, + GE_OP = 555, + EQ_OP = 556, + NE_OP = 557, + AND_OP = 558, + OR_OP = 559, + XOR_OP = 560, + MUL_ASSIGN = 561, + DIV_ASSIGN = 562, + ADD_ASSIGN = 563, + MOD_ASSIGN = 564, + LEFT_ASSIGN = 565, + RIGHT_ASSIGN = 566, + AND_ASSIGN = 567, + XOR_ASSIGN = 568, + OR_ASSIGN = 569, + SUB_ASSIGN = 570, + STRING_LITERAL = 571, + LEFT_PAREN = 572, + RIGHT_PAREN = 573, + LEFT_BRACKET = 574, + RIGHT_BRACKET = 575, + LEFT_BRACE = 576, + RIGHT_BRACE = 577, + DOT = 578, + COMMA = 579, + COLON = 580, + EQUAL = 581, + SEMICOLON = 582, + BANG = 583, + DASH = 584, + TILDE = 585, + PLUS = 586, + STAR = 587, + SLASH = 588, + PERCENT = 589, + LEFT_ANGLE = 590, + RIGHT_ANGLE = 591, + VERTICAL_BAR = 592, + CARET = 593, + AMPERSAND = 594, + QUESTION = 595, + INVARIANT = 596, + HIGH_PRECISION = 597, + MEDIUM_PRECISION = 598, + LOW_PRECISION = 599, + PRECISION = 600, + PACKED = 601, + RESOURCE = 602, + SUPERP = 603, + FLOATCONSTANT = 604, + INTCONSTANT = 605, + UINTCONSTANT = 606, + BOOLCONSTANT = 607, + IDENTIFIER = 608, + TYPE_NAME = 609, + CENTROID = 610, + IN = 611, + OUT = 612, + INOUT = 613, + STRUCT = 614, + VOID = 615, + WHILE = 616, + BREAK = 617, + CONTINUE = 618, + DO = 619, + ELSE = 620, + FOR = 621, + IF = 622, + DISCARD = 623, + RETURN = 624, + SWITCH = 625, + CASE = 626, + DEFAULT = 627, + UNIFORM = 628, + SHARED = 629, + BUFFER = 630, + FLAT = 631, + SMOOTH = 632, + LAYOUT = 633, + DOUBLECONSTANT = 634, + INT16CONSTANT = 635, + UINT16CONSTANT = 636, + FLOAT16CONSTANT = 637, + INT32CONSTANT = 638, + UINT32CONSTANT = 639, + INT64CONSTANT = 640, + UINT64CONSTANT = 641, + SUBROUTINE = 642, + DEMOTE = 643, + PAYLOADNV = 644, + PAYLOADINNV = 645, + HITATTRNV = 646, + CALLDATANV = 647, + CALLDATAINNV = 648, + PAYLOADEXT = 649, + PAYLOADINEXT = 650, + HITATTREXT = 651, + CALLDATAEXT = 652, + CALLDATAINEXT = 653, + PATCH = 654, + SAMPLE = 655, + NONUNIFORM = 656, + COHERENT = 657, + VOLATILE = 658, + RESTRICT = 659, + READONLY = 660, + WRITEONLY = 661, + DEVICECOHERENT = 662, + QUEUEFAMILYCOHERENT = 663, + WORKGROUPCOHERENT = 664, + SUBGROUPCOHERENT = 665, + NONPRIVATE = 666, + SHADERCALLCOHERENT = 667, + NOPERSPECTIVE = 668, + EXPLICITINTERPAMD = 669, + PERVERTEXNV = 670, + PERPRIMITIVENV = 671, + PERVIEWNV = 672, + PERTASKNV = 673, + PRECISE = 674 + }; +#endif + +/* Value type. */ +#if ! defined YYSTYPE && ! defined YYSTYPE_IS_DECLARED + +union YYSTYPE +{ +#line 97 "glslang.y" /* yacc.c:1909 */ + + struct { + glslang::TSourceLoc loc; + union { + glslang::TString *string; + int i; + unsigned int u; + long long i64; + unsigned long long u64; + bool b; + double d; + }; + glslang::TSymbol* symbol; + } lex; + struct { + glslang::TSourceLoc loc; + glslang::TOperator op; + union { + TIntermNode* intermNode; + glslang::TIntermNodePair nodePair; + glslang::TIntermTyped* intermTypedNode; + glslang::TAttributes* attributes; + }; + union { + glslang::TPublicType type; + glslang::TFunction* function; + glslang::TParameter param; + glslang::TTypeLoc typeLine; + glslang::TTypeList* typeList; + glslang::TArraySizes* arraySizes; + glslang::TIdentifierList* identifierList; + }; + glslang::TArraySizes* typeParameters; + } interm; + +#line 510 "glslang_tab.cpp.h" /* yacc.c:1909 */ +}; + +typedef union YYSTYPE YYSTYPE; +# define YYSTYPE_IS_TRIVIAL 1 +# define YYSTYPE_IS_DECLARED 1 +#endif + + + +int yyparse (glslang::TParseContext* pParseContext); + +#endif /* !YY_YY_GLSLANG_TAB_CPP_H_INCLUDED */ diff --git a/dep/glslang/glslang/MachineIndependent/intermOut.cpp b/dep/glslang/glslang/MachineIndependent/intermOut.cpp new file mode 100644 index 000000000..86edcfe4d --- /dev/null +++ b/dep/glslang/glslang/MachineIndependent/intermOut.cpp @@ -0,0 +1,1565 @@ +// +// Copyright (C) 2002-2005 3Dlabs Inc. Ltd. +// Copyright (C) 2012-2016 LunarG, Inc. +// Copyright (C) 2017 ARM Limited. +// Modifications Copyright (C) 2020 Advanced Micro Devices, Inc. All rights reserved. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// +// Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// +// Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// Neither the name of 3Dlabs Inc. Ltd. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// + +#ifndef GLSLANG_WEB + +#include "localintermediate.h" +#include "../Include/InfoSink.h" + +#ifdef _MSC_VER +#include +#else +#include +#endif +#include + +namespace { + +bool IsInfinity(double x) { +#ifdef _MSC_VER + switch (_fpclass(x)) { + case _FPCLASS_NINF: + case _FPCLASS_PINF: + return true; + default: + return false; + } +#else + return std::isinf(x); +#endif +} + +bool IsNan(double x) { +#ifdef _MSC_VER + switch (_fpclass(x)) { + case _FPCLASS_SNAN: + case _FPCLASS_QNAN: + return true; + default: + return false; + } +#else + return std::isnan(x); +#endif +} + +} + +namespace glslang { + +// +// Two purposes: +// 1. Show an example of how to iterate tree. Functions can +// also directly call Traverse() on children themselves to +// have finer grained control over the process than shown here. +// See the last function for how to get started. +// 2. Print out a text based description of the tree. +// + +// +// Use this class to carry along data from node to node in +// the traversal +// +class TOutputTraverser : public TIntermTraverser { +public: + TOutputTraverser(TInfoSink& i) : infoSink(i), extraOutput(NoExtraOutput) { } + + enum EExtraOutput { + NoExtraOutput, + BinaryDoubleOutput + }; + void setDoubleOutput(EExtraOutput extra) { extraOutput = extra; } + + virtual bool visitBinary(TVisit, TIntermBinary* node); + virtual bool visitUnary(TVisit, TIntermUnary* node); + virtual bool visitAggregate(TVisit, TIntermAggregate* node); + virtual bool visitSelection(TVisit, TIntermSelection* node); + virtual void visitConstantUnion(TIntermConstantUnion* node); + virtual void visitSymbol(TIntermSymbol* node); + virtual bool visitLoop(TVisit, TIntermLoop* node); + virtual bool visitBranch(TVisit, TIntermBranch* node); + virtual bool visitSwitch(TVisit, TIntermSwitch* node); + + TInfoSink& infoSink; +protected: + TOutputTraverser(TOutputTraverser&); + TOutputTraverser& operator=(TOutputTraverser&); + + EExtraOutput extraOutput; +}; + +// +// Helper functions for printing, not part of traversing. +// + +static void OutputTreeText(TInfoSink& infoSink, const TIntermNode* node, const int depth) +{ + int i; + + infoSink.debug << node->getLoc().string << ":"; + if (node->getLoc().line) + infoSink.debug << node->getLoc().line; + else + infoSink.debug << "? "; + + for (i = 0; i < depth; ++i) + infoSink.debug << " "; +} + +// +// The rest of the file are the traversal functions. The last one +// is the one that starts the traversal. +// +// Return true from interior nodes to have the external traversal +// continue on to children. If you process children yourself, +// return false. +// + +bool TOutputTraverser::visitBinary(TVisit /* visit */, TIntermBinary* node) +{ + TInfoSink& out = infoSink; + + OutputTreeText(out, node, depth); + + switch (node->getOp()) { + case EOpAssign: out.debug << "move second child to first child"; break; + case EOpAddAssign: out.debug << "add second child into first child"; break; + case EOpSubAssign: out.debug << "subtract second child into first child"; break; + case EOpMulAssign: out.debug << "multiply second child into first child"; break; + case EOpVectorTimesMatrixAssign: out.debug << "matrix mult second child into first child"; break; + case EOpVectorTimesScalarAssign: out.debug << "vector scale second child into first child"; break; + case EOpMatrixTimesScalarAssign: out.debug << "matrix scale second child into first child"; break; + case EOpMatrixTimesMatrixAssign: out.debug << "matrix mult second child into first child"; break; + case EOpDivAssign: out.debug << "divide second child into first child"; break; + case EOpModAssign: out.debug << "mod second child into first child"; break; + case EOpAndAssign: out.debug << "and second child into first child"; break; + case EOpInclusiveOrAssign: out.debug << "or second child into first child"; break; + case EOpExclusiveOrAssign: out.debug << "exclusive or second child into first child"; break; + case EOpLeftShiftAssign: out.debug << "left shift second child into first child"; break; + case EOpRightShiftAssign: out.debug << "right shift second child into first child"; break; + + case EOpIndexDirect: out.debug << "direct index"; break; + case EOpIndexIndirect: out.debug << "indirect index"; break; + case EOpIndexDirectStruct: + { + bool reference = node->getLeft()->getType().isReference(); + const TTypeList *members = reference ? node->getLeft()->getType().getReferentType()->getStruct() : node->getLeft()->getType().getStruct(); + out.debug << (*members)[node->getRight()->getAsConstantUnion()->getConstArray()[0].getIConst()].type->getFieldName(); + out.debug << ": direct index for structure"; break; + } + case EOpVectorSwizzle: out.debug << "vector swizzle"; break; + case EOpMatrixSwizzle: out.debug << "matrix swizzle"; break; + + case EOpAdd: out.debug << "add"; break; + case EOpSub: out.debug << "subtract"; break; + case EOpMul: out.debug << "component-wise multiply"; break; + case EOpDiv: out.debug << "divide"; break; + case EOpMod: out.debug << "mod"; break; + case EOpRightShift: out.debug << "right-shift"; break; + case EOpLeftShift: out.debug << "left-shift"; break; + case EOpAnd: out.debug << "bitwise and"; break; + case EOpInclusiveOr: out.debug << "inclusive-or"; break; + case EOpExclusiveOr: out.debug << "exclusive-or"; break; + case EOpEqual: out.debug << "Compare Equal"; break; + case EOpNotEqual: out.debug << "Compare Not Equal"; break; + case EOpLessThan: out.debug << "Compare Less Than"; break; + case EOpGreaterThan: out.debug << "Compare Greater Than"; break; + case EOpLessThanEqual: out.debug << "Compare Less Than or Equal"; break; + case EOpGreaterThanEqual: out.debug << "Compare Greater Than or Equal"; break; + case EOpVectorEqual: out.debug << "Equal"; break; + case EOpVectorNotEqual: out.debug << "NotEqual"; break; + + case EOpVectorTimesScalar: out.debug << "vector-scale"; break; + case EOpVectorTimesMatrix: out.debug << "vector-times-matrix"; break; + case EOpMatrixTimesVector: out.debug << "matrix-times-vector"; break; + case EOpMatrixTimesScalar: out.debug << "matrix-scale"; break; + case EOpMatrixTimesMatrix: out.debug << "matrix-multiply"; break; + + case EOpLogicalOr: out.debug << "logical-or"; break; + case EOpLogicalXor: out.debug << "logical-xor"; break; + case EOpLogicalAnd: out.debug << "logical-and"; break; + + case EOpAbsDifference: out.debug << "absoluteDifference"; break; + case EOpAddSaturate: out.debug << "addSaturate"; break; + case EOpSubSaturate: out.debug << "subtractSaturate"; break; + case EOpAverage: out.debug << "average"; break; + case EOpAverageRounded: out.debug << "averageRounded"; break; + case EOpMul32x16: out.debug << "multiply32x16"; break; + + default: out.debug << ""; + } + + out.debug << " (" << node->getCompleteString() << ")"; + + out.debug << "\n"; + + return true; +} + +bool TOutputTraverser::visitUnary(TVisit /* visit */, TIntermUnary* node) +{ + TInfoSink& out = infoSink; + + OutputTreeText(out, node, depth); + + switch (node->getOp()) { + case EOpNegative: out.debug << "Negate value"; break; + case EOpVectorLogicalNot: + case EOpLogicalNot: out.debug << "Negate conditional"; break; + case EOpBitwiseNot: out.debug << "Bitwise not"; break; + + case EOpPostIncrement: out.debug << "Post-Increment"; break; + case EOpPostDecrement: out.debug << "Post-Decrement"; break; + case EOpPreIncrement: out.debug << "Pre-Increment"; break; + case EOpPreDecrement: out.debug << "Pre-Decrement"; break; + case EOpCopyObject: out.debug << "copy object"; break; + + // * -> bool + case EOpConvInt8ToBool: out.debug << "Convert int8_t to bool"; break; + case EOpConvUint8ToBool: out.debug << "Convert uint8_t to bool"; break; + case EOpConvInt16ToBool: out.debug << "Convert int16_t to bool"; break; + case EOpConvUint16ToBool: out.debug << "Convert uint16_t to bool";break; + case EOpConvIntToBool: out.debug << "Convert int to bool"; break; + case EOpConvUintToBool: out.debug << "Convert uint to bool"; break; + case EOpConvInt64ToBool: out.debug << "Convert int64 to bool"; break; + case EOpConvUint64ToBool: out.debug << "Convert uint64 to bool"; break; + case EOpConvFloat16ToBool: out.debug << "Convert float16_t to bool"; break; + case EOpConvFloatToBool: out.debug << "Convert float to bool"; break; + case EOpConvDoubleToBool: out.debug << "Convert double to bool"; break; + + // bool -> * + case EOpConvBoolToInt8: out.debug << "Convert bool to int8_t"; break; + case EOpConvBoolToUint8: out.debug << "Convert bool to uint8_t"; break; + case EOpConvBoolToInt16: out.debug << "Convert bool to in16t_t"; break; + case EOpConvBoolToUint16: out.debug << "Convert bool to uint16_t";break; + case EOpConvBoolToInt: out.debug << "Convert bool to int" ; break; + case EOpConvBoolToUint: out.debug << "Convert bool to uint"; break; + case EOpConvBoolToInt64: out.debug << "Convert bool to int64"; break; + case EOpConvBoolToUint64: out.debug << "Convert bool to uint64";break; + case EOpConvBoolToFloat16: out.debug << "Convert bool to float16_t"; break; + case EOpConvBoolToFloat: out.debug << "Convert bool to float"; break; + case EOpConvBoolToDouble: out.debug << "Convert bool to double"; break; + + // int8_t -> (u)int* + case EOpConvInt8ToInt16: out.debug << "Convert int8_t to int16_t";break; + case EOpConvInt8ToInt: out.debug << "Convert int8_t to int"; break; + case EOpConvInt8ToInt64: out.debug << "Convert int8_t to int64"; break; + case EOpConvInt8ToUint8: out.debug << "Convert int8_t to uint8_t";break; + case EOpConvInt8ToUint16: out.debug << "Convert int8_t to uint16_t";break; + case EOpConvInt8ToUint: out.debug << "Convert int8_t to uint"; break; + case EOpConvInt8ToUint64: out.debug << "Convert int8_t to uint64"; break; + + // uint8_t -> (u)int* + case EOpConvUint8ToInt8: out.debug << "Convert uint8_t to int8_t";break; + case EOpConvUint8ToInt16: out.debug << "Convert uint8_t to int16_t";break; + case EOpConvUint8ToInt: out.debug << "Convert uint8_t to int"; break; + case EOpConvUint8ToInt64: out.debug << "Convert uint8_t to int64"; break; + case EOpConvUint8ToUint16: out.debug << "Convert uint8_t to uint16_t";break; + case EOpConvUint8ToUint: out.debug << "Convert uint8_t to uint"; break; + case EOpConvUint8ToUint64: out.debug << "Convert uint8_t to uint64"; break; + + // int8_t -> float* + case EOpConvInt8ToFloat16: out.debug << "Convert int8_t to float16_t";break; + case EOpConvInt8ToFloat: out.debug << "Convert int8_t to float"; break; + case EOpConvInt8ToDouble: out.debug << "Convert int8_t to double"; break; + + // uint8_t -> float* + case EOpConvUint8ToFloat16: out.debug << "Convert uint8_t to float16_t";break; + case EOpConvUint8ToFloat: out.debug << "Convert uint8_t to float"; break; + case EOpConvUint8ToDouble: out.debug << "Convert uint8_t to double"; break; + + // int16_t -> (u)int* + case EOpConvInt16ToInt8: out.debug << "Convert int16_t to int8_t";break; + case EOpConvInt16ToInt: out.debug << "Convert int16_t to int"; break; + case EOpConvInt16ToInt64: out.debug << "Convert int16_t to int64"; break; + case EOpConvInt16ToUint8: out.debug << "Convert int16_t to uint8_t";break; + case EOpConvInt16ToUint16: out.debug << "Convert int16_t to uint16_t";break; + case EOpConvInt16ToUint: out.debug << "Convert int16_t to uint"; break; + case EOpConvInt16ToUint64: out.debug << "Convert int16_t to uint64"; break; + + // int16_t -> float* + case EOpConvInt16ToFloat16: out.debug << "Convert int16_t to float16_t";break; + case EOpConvInt16ToFloat: out.debug << "Convert int16_t to float"; break; + case EOpConvInt16ToDouble: out.debug << "Convert int16_t to double"; break; + + // uint16_t -> (u)int* + case EOpConvUint16ToInt8: out.debug << "Convert uint16_t to int8_t";break; + case EOpConvUint16ToInt16: out.debug << "Convert uint16_t to int16_t";break; + case EOpConvUint16ToInt: out.debug << "Convert uint16_t to int"; break; + case EOpConvUint16ToInt64: out.debug << "Convert uint16_t to int64"; break; + case EOpConvUint16ToUint8: out.debug << "Convert uint16_t to uint8_t";break; + case EOpConvUint16ToUint: out.debug << "Convert uint16_t to uint"; break; + case EOpConvUint16ToUint64: out.debug << "Convert uint16_t to uint64"; break; + + // uint16_t -> float* + case EOpConvUint16ToFloat16: out.debug << "Convert uint16_t to float16_t";break; + case EOpConvUint16ToFloat: out.debug << "Convert uint16_t to float"; break; + case EOpConvUint16ToDouble: out.debug << "Convert uint16_t to double"; break; + + // int32_t -> (u)int* + case EOpConvIntToInt8: out.debug << "Convert int to int8_t";break; + case EOpConvIntToInt16: out.debug << "Convert int to int16_t";break; + case EOpConvIntToInt64: out.debug << "Convert int to int64"; break; + case EOpConvIntToUint8: out.debug << "Convert int to uint8_t";break; + case EOpConvIntToUint16: out.debug << "Convert int to uint16_t";break; + case EOpConvIntToUint: out.debug << "Convert int to uint"; break; + case EOpConvIntToUint64: out.debug << "Convert int to uint64"; break; + + // int32_t -> float* + case EOpConvIntToFloat16: out.debug << "Convert int to float16_t";break; + case EOpConvIntToFloat: out.debug << "Convert int to float"; break; + case EOpConvIntToDouble: out.debug << "Convert int to double"; break; + + // uint32_t -> (u)int* + case EOpConvUintToInt8: out.debug << "Convert uint to int8_t";break; + case EOpConvUintToInt16: out.debug << "Convert uint to int16_t";break; + case EOpConvUintToInt: out.debug << "Convert uint to int";break; + case EOpConvUintToInt64: out.debug << "Convert uint to int64"; break; + case EOpConvUintToUint8: out.debug << "Convert uint to uint8_t";break; + case EOpConvUintToUint16: out.debug << "Convert uint to uint16_t";break; + case EOpConvUintToUint64: out.debug << "Convert uint to uint64"; break; + + // uint32_t -> float* + case EOpConvUintToFloat16: out.debug << "Convert uint to float16_t";break; + case EOpConvUintToFloat: out.debug << "Convert uint to float"; break; + case EOpConvUintToDouble: out.debug << "Convert uint to double"; break; + + // int64 -> (u)int* + case EOpConvInt64ToInt8: out.debug << "Convert int64 to int8_t"; break; + case EOpConvInt64ToInt16: out.debug << "Convert int64 to int16_t"; break; + case EOpConvInt64ToInt: out.debug << "Convert int64 to int"; break; + case EOpConvInt64ToUint8: out.debug << "Convert int64 to uint8_t";break; + case EOpConvInt64ToUint16: out.debug << "Convert int64 to uint16_t";break; + case EOpConvInt64ToUint: out.debug << "Convert int64 to uint"; break; + case EOpConvInt64ToUint64: out.debug << "Convert int64 to uint64"; break; + + // int64 -> float* + case EOpConvInt64ToFloat16: out.debug << "Convert int64 to float16_t";break; + case EOpConvInt64ToFloat: out.debug << "Convert int64 to float"; break; + case EOpConvInt64ToDouble: out.debug << "Convert int64 to double"; break; + + // uint64 -> (u)int* + case EOpConvUint64ToInt8: out.debug << "Convert uint64 to int8_t";break; + case EOpConvUint64ToInt16: out.debug << "Convert uint64 to int16_t";break; + case EOpConvUint64ToInt: out.debug << "Convert uint64 to int"; break; + case EOpConvUint64ToInt64: out.debug << "Convert uint64 to int64"; break; + case EOpConvUint64ToUint8: out.debug << "Convert uint64 to uint8_t";break; + case EOpConvUint64ToUint16: out.debug << "Convert uint64 to uint16"; break; + case EOpConvUint64ToUint: out.debug << "Convert uint64 to uint"; break; + + // uint64 -> float* + case EOpConvUint64ToFloat16: out.debug << "Convert uint64 to float16_t";break; + case EOpConvUint64ToFloat: out.debug << "Convert uint64 to float"; break; + case EOpConvUint64ToDouble: out.debug << "Convert uint64 to double"; break; + + // float16_t -> int* + case EOpConvFloat16ToInt8: out.debug << "Convert float16_t to int8_t"; break; + case EOpConvFloat16ToInt16: out.debug << "Convert float16_t to int16_t"; break; + case EOpConvFloat16ToInt: out.debug << "Convert float16_t to int"; break; + case EOpConvFloat16ToInt64: out.debug << "Convert float16_t to int64"; break; + + // float16_t -> uint* + case EOpConvFloat16ToUint8: out.debug << "Convert float16_t to uint8_t"; break; + case EOpConvFloat16ToUint16: out.debug << "Convert float16_t to uint16_t"; break; + case EOpConvFloat16ToUint: out.debug << "Convert float16_t to uint"; break; + case EOpConvFloat16ToUint64: out.debug << "Convert float16_t to uint64"; break; + + // float16_t -> float* + case EOpConvFloat16ToFloat: out.debug << "Convert float16_t to float"; break; + case EOpConvFloat16ToDouble: out.debug << "Convert float16_t to double"; break; + + // float32 -> float* + case EOpConvFloatToFloat16: out.debug << "Convert float to float16_t"; break; + case EOpConvFloatToDouble: out.debug << "Convert float to double"; break; + + // float32_t -> int* + case EOpConvFloatToInt8: out.debug << "Convert float to int8_t"; break; + case EOpConvFloatToInt16: out.debug << "Convert float to int16_t"; break; + case EOpConvFloatToInt: out.debug << "Convert float to int"; break; + case EOpConvFloatToInt64: out.debug << "Convert float to int64"; break; + + // float32_t -> uint* + case EOpConvFloatToUint8: out.debug << "Convert float to uint8_t"; break; + case EOpConvFloatToUint16: out.debug << "Convert float to uint16_t"; break; + case EOpConvFloatToUint: out.debug << "Convert float to uint"; break; + case EOpConvFloatToUint64: out.debug << "Convert float to uint64"; break; + + // double -> float* + case EOpConvDoubleToFloat16: out.debug << "Convert double to float16_t"; break; + case EOpConvDoubleToFloat: out.debug << "Convert double to float"; break; + + // double -> int* + case EOpConvDoubleToInt8: out.debug << "Convert double to int8_t"; break; + case EOpConvDoubleToInt16: out.debug << "Convert double to int16_t"; break; + case EOpConvDoubleToInt: out.debug << "Convert double to int"; break; + case EOpConvDoubleToInt64: out.debug << "Convert double to int64"; break; + + // float32_t -> uint* + case EOpConvDoubleToUint8: out.debug << "Convert double to uint8_t"; break; + case EOpConvDoubleToUint16: out.debug << "Convert double to uint16_t"; break; + case EOpConvDoubleToUint: out.debug << "Convert double to uint"; break; + case EOpConvDoubleToUint64: out.debug << "Convert double to uint64"; break; + + case EOpConvUint64ToPtr: out.debug << "Convert uint64_t to pointer"; break; + case EOpConvPtrToUint64: out.debug << "Convert pointer to uint64_t"; break; + + case EOpRadians: out.debug << "radians"; break; + case EOpDegrees: out.debug << "degrees"; break; + case EOpSin: out.debug << "sine"; break; + case EOpCos: out.debug << "cosine"; break; + case EOpTan: out.debug << "tangent"; break; + case EOpAsin: out.debug << "arc sine"; break; + case EOpAcos: out.debug << "arc cosine"; break; + case EOpAtan: out.debug << "arc tangent"; break; + case EOpSinh: out.debug << "hyp. sine"; break; + case EOpCosh: out.debug << "hyp. cosine"; break; + case EOpTanh: out.debug << "hyp. tangent"; break; + case EOpAsinh: out.debug << "arc hyp. sine"; break; + case EOpAcosh: out.debug << "arc hyp. cosine"; break; + case EOpAtanh: out.debug << "arc hyp. tangent"; break; + + case EOpExp: out.debug << "exp"; break; + case EOpLog: out.debug << "log"; break; + case EOpExp2: out.debug << "exp2"; break; + case EOpLog2: out.debug << "log2"; break; + case EOpSqrt: out.debug << "sqrt"; break; + case EOpInverseSqrt: out.debug << "inverse sqrt"; break; + + case EOpAbs: out.debug << "Absolute value"; break; + case EOpSign: out.debug << "Sign"; break; + case EOpFloor: out.debug << "Floor"; break; + case EOpTrunc: out.debug << "trunc"; break; + case EOpRound: out.debug << "round"; break; + case EOpRoundEven: out.debug << "roundEven"; break; + case EOpCeil: out.debug << "Ceiling"; break; + case EOpFract: out.debug << "Fraction"; break; + + case EOpIsNan: out.debug << "isnan"; break; + case EOpIsInf: out.debug << "isinf"; break; + + case EOpFloatBitsToInt: out.debug << "floatBitsToInt"; break; + case EOpFloatBitsToUint:out.debug << "floatBitsToUint"; break; + case EOpIntBitsToFloat: out.debug << "intBitsToFloat"; break; + case EOpUintBitsToFloat:out.debug << "uintBitsToFloat"; break; + case EOpDoubleBitsToInt64: out.debug << "doubleBitsToInt64"; break; + case EOpDoubleBitsToUint64: out.debug << "doubleBitsToUint64"; break; + case EOpInt64BitsToDouble: out.debug << "int64BitsToDouble"; break; + case EOpUint64BitsToDouble: out.debug << "uint64BitsToDouble"; break; + case EOpFloat16BitsToInt16: out.debug << "float16BitsToInt16"; break; + case EOpFloat16BitsToUint16: out.debug << "float16BitsToUint16"; break; + case EOpInt16BitsToFloat16: out.debug << "int16BitsToFloat16"; break; + case EOpUint16BitsToFloat16: out.debug << "uint16BitsToFloat16"; break; + + case EOpPackSnorm2x16: out.debug << "packSnorm2x16"; break; + case EOpUnpackSnorm2x16:out.debug << "unpackSnorm2x16"; break; + case EOpPackUnorm2x16: out.debug << "packUnorm2x16"; break; + case EOpUnpackUnorm2x16:out.debug << "unpackUnorm2x16"; break; + case EOpPackHalf2x16: out.debug << "packHalf2x16"; break; + case EOpUnpackHalf2x16: out.debug << "unpackHalf2x16"; break; + case EOpPack16: out.debug << "pack16"; break; + case EOpPack32: out.debug << "pack32"; break; + case EOpPack64: out.debug << "pack64"; break; + case EOpUnpack32: out.debug << "unpack32"; break; + case EOpUnpack16: out.debug << "unpack16"; break; + case EOpUnpack8: out.debug << "unpack8"; break; + + case EOpPackSnorm4x8: out.debug << "PackSnorm4x8"; break; + case EOpUnpackSnorm4x8: out.debug << "UnpackSnorm4x8"; break; + case EOpPackUnorm4x8: out.debug << "PackUnorm4x8"; break; + case EOpUnpackUnorm4x8: out.debug << "UnpackUnorm4x8"; break; + case EOpPackDouble2x32: out.debug << "PackDouble2x32"; break; + case EOpUnpackDouble2x32: out.debug << "UnpackDouble2x32"; break; + + case EOpPackInt2x32: out.debug << "packInt2x32"; break; + case EOpUnpackInt2x32: out.debug << "unpackInt2x32"; break; + case EOpPackUint2x32: out.debug << "packUint2x32"; break; + case EOpUnpackUint2x32: out.debug << "unpackUint2x32"; break; + + case EOpPackInt2x16: out.debug << "packInt2x16"; break; + case EOpUnpackInt2x16: out.debug << "unpackInt2x16"; break; + case EOpPackUint2x16: out.debug << "packUint2x16"; break; + case EOpUnpackUint2x16: out.debug << "unpackUint2x16"; break; + + case EOpPackInt4x16: out.debug << "packInt4x16"; break; + case EOpUnpackInt4x16: out.debug << "unpackInt4x16"; break; + case EOpPackUint4x16: out.debug << "packUint4x16"; break; + case EOpUnpackUint4x16: out.debug << "unpackUint4x16"; break; + case EOpPackFloat2x16: out.debug << "packFloat2x16"; break; + case EOpUnpackFloat2x16: out.debug << "unpackFloat2x16"; break; + + case EOpLength: out.debug << "length"; break; + case EOpNormalize: out.debug << "normalize"; break; + case EOpDPdx: out.debug << "dPdx"; break; + case EOpDPdy: out.debug << "dPdy"; break; + case EOpFwidth: out.debug << "fwidth"; break; + case EOpDPdxFine: out.debug << "dPdxFine"; break; + case EOpDPdyFine: out.debug << "dPdyFine"; break; + case EOpFwidthFine: out.debug << "fwidthFine"; break; + case EOpDPdxCoarse: out.debug << "dPdxCoarse"; break; + case EOpDPdyCoarse: out.debug << "dPdyCoarse"; break; + case EOpFwidthCoarse: out.debug << "fwidthCoarse"; break; + + case EOpInterpolateAtCentroid: out.debug << "interpolateAtCentroid"; break; + + case EOpDeterminant: out.debug << "determinant"; break; + case EOpMatrixInverse: out.debug << "inverse"; break; + case EOpTranspose: out.debug << "transpose"; break; + + case EOpAny: out.debug << "any"; break; + case EOpAll: out.debug << "all"; break; + + case EOpArrayLength: out.debug << "array length"; break; + + case EOpEmitStreamVertex: out.debug << "EmitStreamVertex"; break; + case EOpEndStreamPrimitive: out.debug << "EndStreamPrimitive"; break; + + case EOpAtomicCounterIncrement: out.debug << "AtomicCounterIncrement";break; + case EOpAtomicCounterDecrement: out.debug << "AtomicCounterDecrement";break; + case EOpAtomicCounter: out.debug << "AtomicCounter"; break; + + case EOpTextureQuerySize: out.debug << "textureSize"; break; + case EOpTextureQueryLod: out.debug << "textureQueryLod"; break; + case EOpTextureQueryLevels: out.debug << "textureQueryLevels"; break; + case EOpTextureQuerySamples: out.debug << "textureSamples"; break; + case EOpImageQuerySize: out.debug << "imageQuerySize"; break; + case EOpImageQuerySamples: out.debug << "imageQuerySamples"; break; + case EOpImageLoad: out.debug << "imageLoad"; break; + + case EOpBitFieldReverse: out.debug << "bitFieldReverse"; break; + case EOpBitCount: out.debug << "bitCount"; break; + case EOpFindLSB: out.debug << "findLSB"; break; + case EOpFindMSB: out.debug << "findMSB"; break; + + case EOpCountLeadingZeros: out.debug << "countLeadingZeros"; break; + case EOpCountTrailingZeros: out.debug << "countTrailingZeros"; break; + + case EOpNoise: out.debug << "noise"; break; + + case EOpBallot: out.debug << "ballot"; break; + case EOpReadFirstInvocation: out.debug << "readFirstInvocation"; break; + + case EOpAnyInvocation: out.debug << "anyInvocation"; break; + case EOpAllInvocations: out.debug << "allInvocations"; break; + case EOpAllInvocationsEqual: out.debug << "allInvocationsEqual"; break; + + case EOpSubgroupElect: out.debug << "subgroupElect"; break; + case EOpSubgroupAll: out.debug << "subgroupAll"; break; + case EOpSubgroupAny: out.debug << "subgroupAny"; break; + case EOpSubgroupAllEqual: out.debug << "subgroupAllEqual"; break; + case EOpSubgroupBroadcast: out.debug << "subgroupBroadcast"; break; + case EOpSubgroupBroadcastFirst: out.debug << "subgroupBroadcastFirst"; break; + case EOpSubgroupBallot: out.debug << "subgroupBallot"; break; + case EOpSubgroupInverseBallot: out.debug << "subgroupInverseBallot"; break; + case EOpSubgroupBallotBitExtract: out.debug << "subgroupBallotBitExtract"; break; + case EOpSubgroupBallotBitCount: out.debug << "subgroupBallotBitCount"; break; + case EOpSubgroupBallotInclusiveBitCount: out.debug << "subgroupBallotInclusiveBitCount"; break; + case EOpSubgroupBallotExclusiveBitCount: out.debug << "subgroupBallotExclusiveBitCount"; break; + case EOpSubgroupBallotFindLSB: out.debug << "subgroupBallotFindLSB"; break; + case EOpSubgroupBallotFindMSB: out.debug << "subgroupBallotFindMSB"; break; + case EOpSubgroupShuffle: out.debug << "subgroupShuffle"; break; + case EOpSubgroupShuffleXor: out.debug << "subgroupShuffleXor"; break; + case EOpSubgroupShuffleUp: out.debug << "subgroupShuffleUp"; break; + case EOpSubgroupShuffleDown: out.debug << "subgroupShuffleDown"; break; + case EOpSubgroupAdd: out.debug << "subgroupAdd"; break; + case EOpSubgroupMul: out.debug << "subgroupMul"; break; + case EOpSubgroupMin: out.debug << "subgroupMin"; break; + case EOpSubgroupMax: out.debug << "subgroupMax"; break; + case EOpSubgroupAnd: out.debug << "subgroupAnd"; break; + case EOpSubgroupOr: out.debug << "subgroupOr"; break; + case EOpSubgroupXor: out.debug << "subgroupXor"; break; + case EOpSubgroupInclusiveAdd: out.debug << "subgroupInclusiveAdd"; break; + case EOpSubgroupInclusiveMul: out.debug << "subgroupInclusiveMul"; break; + case EOpSubgroupInclusiveMin: out.debug << "subgroupInclusiveMin"; break; + case EOpSubgroupInclusiveMax: out.debug << "subgroupInclusiveMax"; break; + case EOpSubgroupInclusiveAnd: out.debug << "subgroupInclusiveAnd"; break; + case EOpSubgroupInclusiveOr: out.debug << "subgroupInclusiveOr"; break; + case EOpSubgroupInclusiveXor: out.debug << "subgroupInclusiveXor"; break; + case EOpSubgroupExclusiveAdd: out.debug << "subgroupExclusiveAdd"; break; + case EOpSubgroupExclusiveMul: out.debug << "subgroupExclusiveMul"; break; + case EOpSubgroupExclusiveMin: out.debug << "subgroupExclusiveMin"; break; + case EOpSubgroupExclusiveMax: out.debug << "subgroupExclusiveMax"; break; + case EOpSubgroupExclusiveAnd: out.debug << "subgroupExclusiveAnd"; break; + case EOpSubgroupExclusiveOr: out.debug << "subgroupExclusiveOr"; break; + case EOpSubgroupExclusiveXor: out.debug << "subgroupExclusiveXor"; break; + case EOpSubgroupClusteredAdd: out.debug << "subgroupClusteredAdd"; break; + case EOpSubgroupClusteredMul: out.debug << "subgroupClusteredMul"; break; + case EOpSubgroupClusteredMin: out.debug << "subgroupClusteredMin"; break; + case EOpSubgroupClusteredMax: out.debug << "subgroupClusteredMax"; break; + case EOpSubgroupClusteredAnd: out.debug << "subgroupClusteredAnd"; break; + case EOpSubgroupClusteredOr: out.debug << "subgroupClusteredOr"; break; + case EOpSubgroupClusteredXor: out.debug << "subgroupClusteredXor"; break; + case EOpSubgroupQuadBroadcast: out.debug << "subgroupQuadBroadcast"; break; + case EOpSubgroupQuadSwapHorizontal: out.debug << "subgroupQuadSwapHorizontal"; break; + case EOpSubgroupQuadSwapVertical: out.debug << "subgroupQuadSwapVertical"; break; + case EOpSubgroupQuadSwapDiagonal: out.debug << "subgroupQuadSwapDiagonal"; break; + + case EOpSubgroupPartition: out.debug << "subgroupPartitionNV"; break; + case EOpSubgroupPartitionedAdd: out.debug << "subgroupPartitionedAddNV"; break; + case EOpSubgroupPartitionedMul: out.debug << "subgroupPartitionedMulNV"; break; + case EOpSubgroupPartitionedMin: out.debug << "subgroupPartitionedMinNV"; break; + case EOpSubgroupPartitionedMax: out.debug << "subgroupPartitionedMaxNV"; break; + case EOpSubgroupPartitionedAnd: out.debug << "subgroupPartitionedAndNV"; break; + case EOpSubgroupPartitionedOr: out.debug << "subgroupPartitionedOrNV"; break; + case EOpSubgroupPartitionedXor: out.debug << "subgroupPartitionedXorNV"; break; + case EOpSubgroupPartitionedInclusiveAdd: out.debug << "subgroupPartitionedInclusiveAddNV"; break; + case EOpSubgroupPartitionedInclusiveMul: out.debug << "subgroupPartitionedInclusiveMulNV"; break; + case EOpSubgroupPartitionedInclusiveMin: out.debug << "subgroupPartitionedInclusiveMinNV"; break; + case EOpSubgroupPartitionedInclusiveMax: out.debug << "subgroupPartitionedInclusiveMaxNV"; break; + case EOpSubgroupPartitionedInclusiveAnd: out.debug << "subgroupPartitionedInclusiveAndNV"; break; + case EOpSubgroupPartitionedInclusiveOr: out.debug << "subgroupPartitionedInclusiveOrNV"; break; + case EOpSubgroupPartitionedInclusiveXor: out.debug << "subgroupPartitionedInclusiveXorNV"; break; + case EOpSubgroupPartitionedExclusiveAdd: out.debug << "subgroupPartitionedExclusiveAddNV"; break; + case EOpSubgroupPartitionedExclusiveMul: out.debug << "subgroupPartitionedExclusiveMulNV"; break; + case EOpSubgroupPartitionedExclusiveMin: out.debug << "subgroupPartitionedExclusiveMinNV"; break; + case EOpSubgroupPartitionedExclusiveMax: out.debug << "subgroupPartitionedExclusiveMaxNV"; break; + case EOpSubgroupPartitionedExclusiveAnd: out.debug << "subgroupPartitionedExclusiveAndNV"; break; + case EOpSubgroupPartitionedExclusiveOr: out.debug << "subgroupPartitionedExclusiveOrNV"; break; + case EOpSubgroupPartitionedExclusiveXor: out.debug << "subgroupPartitionedExclusiveXorNV"; break; + + case EOpClip: out.debug << "clip"; break; + case EOpIsFinite: out.debug << "isfinite"; break; + case EOpLog10: out.debug << "log10"; break; + case EOpRcp: out.debug << "rcp"; break; + case EOpSaturate: out.debug << "saturate"; break; + + case EOpSparseTexelsResident: out.debug << "sparseTexelsResident"; break; + + case EOpMinInvocations: out.debug << "minInvocations"; break; + case EOpMaxInvocations: out.debug << "maxInvocations"; break; + case EOpAddInvocations: out.debug << "addInvocations"; break; + case EOpMinInvocationsNonUniform: out.debug << "minInvocationsNonUniform"; break; + case EOpMaxInvocationsNonUniform: out.debug << "maxInvocationsNonUniform"; break; + case EOpAddInvocationsNonUniform: out.debug << "addInvocationsNonUniform"; break; + + case EOpMinInvocationsInclusiveScan: out.debug << "minInvocationsInclusiveScan"; break; + case EOpMaxInvocationsInclusiveScan: out.debug << "maxInvocationsInclusiveScan"; break; + case EOpAddInvocationsInclusiveScan: out.debug << "addInvocationsInclusiveScan"; break; + case EOpMinInvocationsInclusiveScanNonUniform: out.debug << "minInvocationsInclusiveScanNonUniform"; break; + case EOpMaxInvocationsInclusiveScanNonUniform: out.debug << "maxInvocationsInclusiveScanNonUniform"; break; + case EOpAddInvocationsInclusiveScanNonUniform: out.debug << "addInvocationsInclusiveScanNonUniform"; break; + + case EOpMinInvocationsExclusiveScan: out.debug << "minInvocationsExclusiveScan"; break; + case EOpMaxInvocationsExclusiveScan: out.debug << "maxInvocationsExclusiveScan"; break; + case EOpAddInvocationsExclusiveScan: out.debug << "addInvocationsExclusiveScan"; break; + case EOpMinInvocationsExclusiveScanNonUniform: out.debug << "minInvocationsExclusiveScanNonUniform"; break; + case EOpMaxInvocationsExclusiveScanNonUniform: out.debug << "maxInvocationsExclusiveScanNonUniform"; break; + case EOpAddInvocationsExclusiveScanNonUniform: out.debug << "addInvocationsExclusiveScanNonUniform"; break; + + case EOpMbcnt: out.debug << "mbcnt"; break; + + case EOpFragmentMaskFetch: out.debug << "fragmentMaskFetchAMD"; break; + case EOpFragmentFetch: out.debug << "fragmentFetchAMD"; break; + + case EOpCubeFaceIndex: out.debug << "cubeFaceIndex"; break; + case EOpCubeFaceCoord: out.debug << "cubeFaceCoord"; break; + + case EOpSubpassLoad: out.debug << "subpassLoad"; break; + case EOpSubpassLoadMS: out.debug << "subpassLoadMS"; break; + + case EOpConstructReference: out.debug << "Construct reference type"; break; + + default: out.debug.message(EPrefixError, "Bad unary op"); + } + + out.debug << " (" << node->getCompleteString() << ")"; + + out.debug << "\n"; + + return true; +} + +bool TOutputTraverser::visitAggregate(TVisit /* visit */, TIntermAggregate* node) +{ + TInfoSink& out = infoSink; + + if (node->getOp() == EOpNull) { + out.debug.message(EPrefixError, "node is still EOpNull!"); + return true; + } + + OutputTreeText(out, node, depth); + + switch (node->getOp()) { + case EOpSequence: out.debug << "Sequence\n"; return true; + case EOpLinkerObjects: out.debug << "Linker Objects\n"; return true; + case EOpComma: out.debug << "Comma"; break; + case EOpFunction: out.debug << "Function Definition: " << node->getName(); break; + case EOpFunctionCall: out.debug << "Function Call: " << node->getName(); break; + case EOpParameters: out.debug << "Function Parameters: "; break; + + case EOpConstructFloat: out.debug << "Construct float"; break; + case EOpConstructDouble:out.debug << "Construct double"; break; + + case EOpConstructVec2: out.debug << "Construct vec2"; break; + case EOpConstructVec3: out.debug << "Construct vec3"; break; + case EOpConstructVec4: out.debug << "Construct vec4"; break; + case EOpConstructDVec2: out.debug << "Construct dvec2"; break; + case EOpConstructDVec3: out.debug << "Construct dvec3"; break; + case EOpConstructDVec4: out.debug << "Construct dvec4"; break; + case EOpConstructBool: out.debug << "Construct bool"; break; + case EOpConstructBVec2: out.debug << "Construct bvec2"; break; + case EOpConstructBVec3: out.debug << "Construct bvec3"; break; + case EOpConstructBVec4: out.debug << "Construct bvec4"; break; + case EOpConstructInt8: out.debug << "Construct int8_t"; break; + case EOpConstructI8Vec2: out.debug << "Construct i8vec2"; break; + case EOpConstructI8Vec3: out.debug << "Construct i8vec3"; break; + case EOpConstructI8Vec4: out.debug << "Construct i8vec4"; break; + case EOpConstructInt: out.debug << "Construct int"; break; + case EOpConstructIVec2: out.debug << "Construct ivec2"; break; + case EOpConstructIVec3: out.debug << "Construct ivec3"; break; + case EOpConstructIVec4: out.debug << "Construct ivec4"; break; + case EOpConstructUint8: out.debug << "Construct uint8_t"; break; + case EOpConstructU8Vec2: out.debug << "Construct u8vec2"; break; + case EOpConstructU8Vec3: out.debug << "Construct u8vec3"; break; + case EOpConstructU8Vec4: out.debug << "Construct u8vec4"; break; + case EOpConstructUint: out.debug << "Construct uint"; break; + case EOpConstructUVec2: out.debug << "Construct uvec2"; break; + case EOpConstructUVec3: out.debug << "Construct uvec3"; break; + case EOpConstructUVec4: out.debug << "Construct uvec4"; break; + case EOpConstructInt64: out.debug << "Construct int64"; break; + case EOpConstructI64Vec2: out.debug << "Construct i64vec2"; break; + case EOpConstructI64Vec3: out.debug << "Construct i64vec3"; break; + case EOpConstructI64Vec4: out.debug << "Construct i64vec4"; break; + case EOpConstructUint64: out.debug << "Construct uint64"; break; + case EOpConstructU64Vec2: out.debug << "Construct u64vec2"; break; + case EOpConstructU64Vec3: out.debug << "Construct u64vec3"; break; + case EOpConstructU64Vec4: out.debug << "Construct u64vec4"; break; + case EOpConstructInt16: out.debug << "Construct int16_t"; break; + case EOpConstructI16Vec2: out.debug << "Construct i16vec2"; break; + case EOpConstructI16Vec3: out.debug << "Construct i16vec3"; break; + case EOpConstructI16Vec4: out.debug << "Construct i16vec4"; break; + case EOpConstructUint16: out.debug << "Construct uint16_t"; break; + case EOpConstructU16Vec2: out.debug << "Construct u16vec2"; break; + case EOpConstructU16Vec3: out.debug << "Construct u16vec3"; break; + case EOpConstructU16Vec4: out.debug << "Construct u16vec4"; break; + case EOpConstructMat2x2: out.debug << "Construct mat2"; break; + case EOpConstructMat2x3: out.debug << "Construct mat2x3"; break; + case EOpConstructMat2x4: out.debug << "Construct mat2x4"; break; + case EOpConstructMat3x2: out.debug << "Construct mat3x2"; break; + case EOpConstructMat3x3: out.debug << "Construct mat3"; break; + case EOpConstructMat3x4: out.debug << "Construct mat3x4"; break; + case EOpConstructMat4x2: out.debug << "Construct mat4x2"; break; + case EOpConstructMat4x3: out.debug << "Construct mat4x3"; break; + case EOpConstructMat4x4: out.debug << "Construct mat4"; break; + case EOpConstructDMat2x2: out.debug << "Construct dmat2"; break; + case EOpConstructDMat2x3: out.debug << "Construct dmat2x3"; break; + case EOpConstructDMat2x4: out.debug << "Construct dmat2x4"; break; + case EOpConstructDMat3x2: out.debug << "Construct dmat3x2"; break; + case EOpConstructDMat3x3: out.debug << "Construct dmat3"; break; + case EOpConstructDMat3x4: out.debug << "Construct dmat3x4"; break; + case EOpConstructDMat4x2: out.debug << "Construct dmat4x2"; break; + case EOpConstructDMat4x3: out.debug << "Construct dmat4x3"; break; + case EOpConstructDMat4x4: out.debug << "Construct dmat4"; break; + case EOpConstructIMat2x2: out.debug << "Construct imat2"; break; + case EOpConstructIMat2x3: out.debug << "Construct imat2x3"; break; + case EOpConstructIMat2x4: out.debug << "Construct imat2x4"; break; + case EOpConstructIMat3x2: out.debug << "Construct imat3x2"; break; + case EOpConstructIMat3x3: out.debug << "Construct imat3"; break; + case EOpConstructIMat3x4: out.debug << "Construct imat3x4"; break; + case EOpConstructIMat4x2: out.debug << "Construct imat4x2"; break; + case EOpConstructIMat4x3: out.debug << "Construct imat4x3"; break; + case EOpConstructIMat4x4: out.debug << "Construct imat4"; break; + case EOpConstructUMat2x2: out.debug << "Construct umat2"; break; + case EOpConstructUMat2x3: out.debug << "Construct umat2x3"; break; + case EOpConstructUMat2x4: out.debug << "Construct umat2x4"; break; + case EOpConstructUMat3x2: out.debug << "Construct umat3x2"; break; + case EOpConstructUMat3x3: out.debug << "Construct umat3"; break; + case EOpConstructUMat3x4: out.debug << "Construct umat3x4"; break; + case EOpConstructUMat4x2: out.debug << "Construct umat4x2"; break; + case EOpConstructUMat4x3: out.debug << "Construct umat4x3"; break; + case EOpConstructUMat4x4: out.debug << "Construct umat4"; break; + case EOpConstructBMat2x2: out.debug << "Construct bmat2"; break; + case EOpConstructBMat2x3: out.debug << "Construct bmat2x3"; break; + case EOpConstructBMat2x4: out.debug << "Construct bmat2x4"; break; + case EOpConstructBMat3x2: out.debug << "Construct bmat3x2"; break; + case EOpConstructBMat3x3: out.debug << "Construct bmat3"; break; + case EOpConstructBMat3x4: out.debug << "Construct bmat3x4"; break; + case EOpConstructBMat4x2: out.debug << "Construct bmat4x2"; break; + case EOpConstructBMat4x3: out.debug << "Construct bmat4x3"; break; + case EOpConstructBMat4x4: out.debug << "Construct bmat4"; break; + case EOpConstructFloat16: out.debug << "Construct float16_t"; break; + case EOpConstructF16Vec2: out.debug << "Construct f16vec2"; break; + case EOpConstructF16Vec3: out.debug << "Construct f16vec3"; break; + case EOpConstructF16Vec4: out.debug << "Construct f16vec4"; break; + case EOpConstructF16Mat2x2: out.debug << "Construct f16mat2"; break; + case EOpConstructF16Mat2x3: out.debug << "Construct f16mat2x3"; break; + case EOpConstructF16Mat2x4: out.debug << "Construct f16mat2x4"; break; + case EOpConstructF16Mat3x2: out.debug << "Construct f16mat3x2"; break; + case EOpConstructF16Mat3x3: out.debug << "Construct f16mat3"; break; + case EOpConstructF16Mat3x4: out.debug << "Construct f16mat3x4"; break; + case EOpConstructF16Mat4x2: out.debug << "Construct f16mat4x2"; break; + case EOpConstructF16Mat4x3: out.debug << "Construct f16mat4x3"; break; + case EOpConstructF16Mat4x4: out.debug << "Construct f16mat4"; break; + case EOpConstructStruct: out.debug << "Construct structure"; break; + case EOpConstructTextureSampler: out.debug << "Construct combined texture-sampler"; break; + case EOpConstructReference: out.debug << "Construct reference"; break; + case EOpConstructCooperativeMatrix: out.debug << "Construct cooperative matrix"; break; + + case EOpLessThan: out.debug << "Compare Less Than"; break; + case EOpGreaterThan: out.debug << "Compare Greater Than"; break; + case EOpLessThanEqual: out.debug << "Compare Less Than or Equal"; break; + case EOpGreaterThanEqual: out.debug << "Compare Greater Than or Equal"; break; + case EOpVectorEqual: out.debug << "Equal"; break; + case EOpVectorNotEqual: out.debug << "NotEqual"; break; + + case EOpMod: out.debug << "mod"; break; + case EOpModf: out.debug << "modf"; break; + case EOpPow: out.debug << "pow"; break; + + case EOpAtan: out.debug << "arc tangent"; break; + + case EOpMin: out.debug << "min"; break; + case EOpMax: out.debug << "max"; break; + case EOpClamp: out.debug << "clamp"; break; + case EOpMix: out.debug << "mix"; break; + case EOpStep: out.debug << "step"; break; + case EOpSmoothStep: out.debug << "smoothstep"; break; + + case EOpDistance: out.debug << "distance"; break; + case EOpDot: out.debug << "dot-product"; break; + case EOpCross: out.debug << "cross-product"; break; + case EOpFaceForward: out.debug << "face-forward"; break; + case EOpReflect: out.debug << "reflect"; break; + case EOpRefract: out.debug << "refract"; break; + case EOpMul: out.debug << "component-wise multiply"; break; + case EOpOuterProduct: out.debug << "outer product"; break; + + case EOpEmitVertex: out.debug << "EmitVertex"; break; + case EOpEndPrimitive: out.debug << "EndPrimitive"; break; + + case EOpBarrier: out.debug << "Barrier"; break; + case EOpMemoryBarrier: out.debug << "MemoryBarrier"; break; + case EOpMemoryBarrierAtomicCounter: out.debug << "MemoryBarrierAtomicCounter"; break; + case EOpMemoryBarrierBuffer: out.debug << "MemoryBarrierBuffer"; break; + case EOpMemoryBarrierImage: out.debug << "MemoryBarrierImage"; break; + case EOpMemoryBarrierShared: out.debug << "MemoryBarrierShared"; break; + case EOpGroupMemoryBarrier: out.debug << "GroupMemoryBarrier"; break; + + case EOpReadInvocation: out.debug << "readInvocation"; break; + + case EOpSwizzleInvocations: out.debug << "swizzleInvocations"; break; + case EOpSwizzleInvocationsMasked: out.debug << "swizzleInvocationsMasked"; break; + case EOpWriteInvocation: out.debug << "writeInvocation"; break; + + case EOpMin3: out.debug << "min3"; break; + case EOpMax3: out.debug << "max3"; break; + case EOpMid3: out.debug << "mid3"; break; + case EOpTime: out.debug << "time"; break; + + case EOpAtomicAdd: out.debug << "AtomicAdd"; break; + case EOpAtomicMin: out.debug << "AtomicMin"; break; + case EOpAtomicMax: out.debug << "AtomicMax"; break; + case EOpAtomicAnd: out.debug << "AtomicAnd"; break; + case EOpAtomicOr: out.debug << "AtomicOr"; break; + case EOpAtomicXor: out.debug << "AtomicXor"; break; + case EOpAtomicExchange: out.debug << "AtomicExchange"; break; + case EOpAtomicCompSwap: out.debug << "AtomicCompSwap"; break; + case EOpAtomicLoad: out.debug << "AtomicLoad"; break; + case EOpAtomicStore: out.debug << "AtomicStore"; break; + + case EOpAtomicCounterAdd: out.debug << "AtomicCounterAdd"; break; + case EOpAtomicCounterSubtract: out.debug << "AtomicCounterSubtract"; break; + case EOpAtomicCounterMin: out.debug << "AtomicCounterMin"; break; + case EOpAtomicCounterMax: out.debug << "AtomicCounterMax"; break; + case EOpAtomicCounterAnd: out.debug << "AtomicCounterAnd"; break; + case EOpAtomicCounterOr: out.debug << "AtomicCounterOr"; break; + case EOpAtomicCounterXor: out.debug << "AtomicCounterXor"; break; + case EOpAtomicCounterExchange: out.debug << "AtomicCounterExchange"; break; + case EOpAtomicCounterCompSwap: out.debug << "AtomicCounterCompSwap"; break; + + case EOpImageQuerySize: out.debug << "imageQuerySize"; break; + case EOpImageQuerySamples: out.debug << "imageQuerySamples"; break; + case EOpImageLoad: out.debug << "imageLoad"; break; + case EOpImageStore: out.debug << "imageStore"; break; + case EOpImageAtomicAdd: out.debug << "imageAtomicAdd"; break; + case EOpImageAtomicMin: out.debug << "imageAtomicMin"; break; + case EOpImageAtomicMax: out.debug << "imageAtomicMax"; break; + case EOpImageAtomicAnd: out.debug << "imageAtomicAnd"; break; + case EOpImageAtomicOr: out.debug << "imageAtomicOr"; break; + case EOpImageAtomicXor: out.debug << "imageAtomicXor"; break; + case EOpImageAtomicExchange: out.debug << "imageAtomicExchange"; break; + case EOpImageAtomicCompSwap: out.debug << "imageAtomicCompSwap"; break; + case EOpImageAtomicLoad: out.debug << "imageAtomicLoad"; break; + case EOpImageAtomicStore: out.debug << "imageAtomicStore"; break; + case EOpImageLoadLod: out.debug << "imageLoadLod"; break; + case EOpImageStoreLod: out.debug << "imageStoreLod"; break; + + case EOpTextureQuerySize: out.debug << "textureSize"; break; + case EOpTextureQueryLod: out.debug << "textureQueryLod"; break; + case EOpTextureQueryLevels: out.debug << "textureQueryLevels"; break; + case EOpTextureQuerySamples: out.debug << "textureSamples"; break; + case EOpTexture: out.debug << "texture"; break; + case EOpTextureProj: out.debug << "textureProj"; break; + case EOpTextureLod: out.debug << "textureLod"; break; + case EOpTextureOffset: out.debug << "textureOffset"; break; + case EOpTextureFetch: out.debug << "textureFetch"; break; + case EOpTextureFetchOffset: out.debug << "textureFetchOffset"; break; + case EOpTextureProjOffset: out.debug << "textureProjOffset"; break; + case EOpTextureLodOffset: out.debug << "textureLodOffset"; break; + case EOpTextureProjLod: out.debug << "textureProjLod"; break; + case EOpTextureProjLodOffset: out.debug << "textureProjLodOffset"; break; + case EOpTextureGrad: out.debug << "textureGrad"; break; + case EOpTextureGradOffset: out.debug << "textureGradOffset"; break; + case EOpTextureProjGrad: out.debug << "textureProjGrad"; break; + case EOpTextureProjGradOffset: out.debug << "textureProjGradOffset"; break; + case EOpTextureGather: out.debug << "textureGather"; break; + case EOpTextureGatherOffset: out.debug << "textureGatherOffset"; break; + case EOpTextureGatherOffsets: out.debug << "textureGatherOffsets"; break; + case EOpTextureClamp: out.debug << "textureClamp"; break; + case EOpTextureOffsetClamp: out.debug << "textureOffsetClamp"; break; + case EOpTextureGradClamp: out.debug << "textureGradClamp"; break; + case EOpTextureGradOffsetClamp: out.debug << "textureGradOffsetClamp"; break; + case EOpTextureGatherLod: out.debug << "textureGatherLod"; break; + case EOpTextureGatherLodOffset: out.debug << "textureGatherLodOffset"; break; + case EOpTextureGatherLodOffsets: out.debug << "textureGatherLodOffsets"; break; + + case EOpSparseTexture: out.debug << "sparseTexture"; break; + case EOpSparseTextureOffset: out.debug << "sparseTextureOffset"; break; + case EOpSparseTextureLod: out.debug << "sparseTextureLod"; break; + case EOpSparseTextureLodOffset: out.debug << "sparseTextureLodOffset"; break; + case EOpSparseTextureFetch: out.debug << "sparseTexelFetch"; break; + case EOpSparseTextureFetchOffset: out.debug << "sparseTexelFetchOffset"; break; + case EOpSparseTextureGrad: out.debug << "sparseTextureGrad"; break; + case EOpSparseTextureGradOffset: out.debug << "sparseTextureGradOffset"; break; + case EOpSparseTextureGather: out.debug << "sparseTextureGather"; break; + case EOpSparseTextureGatherOffset: out.debug << "sparseTextureGatherOffset"; break; + case EOpSparseTextureGatherOffsets: out.debug << "sparseTextureGatherOffsets"; break; + case EOpSparseImageLoad: out.debug << "sparseImageLoad"; break; + case EOpSparseTextureClamp: out.debug << "sparseTextureClamp"; break; + case EOpSparseTextureOffsetClamp: out.debug << "sparseTextureOffsetClamp"; break; + case EOpSparseTextureGradClamp: out.debug << "sparseTextureGradClamp"; break; + case EOpSparseTextureGradOffsetClamp: out.debug << "sparseTextureGradOffsetClam"; break; + case EOpSparseTextureGatherLod: out.debug << "sparseTextureGatherLod"; break; + case EOpSparseTextureGatherLodOffset: out.debug << "sparseTextureGatherLodOffset"; break; + case EOpSparseTextureGatherLodOffsets: out.debug << "sparseTextureGatherLodOffsets"; break; + case EOpSparseImageLoadLod: out.debug << "sparseImageLoadLod"; break; + case EOpImageSampleFootprintNV: out.debug << "imageSampleFootprintNV"; break; + case EOpImageSampleFootprintClampNV: out.debug << "imageSampleFootprintClampNV"; break; + case EOpImageSampleFootprintLodNV: out.debug << "imageSampleFootprintLodNV"; break; + case EOpImageSampleFootprintGradNV: out.debug << "imageSampleFootprintGradNV"; break; + case EOpImageSampleFootprintGradClampNV: out.debug << "mageSampleFootprintGradClampNV"; break; + case EOpAddCarry: out.debug << "addCarry"; break; + case EOpSubBorrow: out.debug << "subBorrow"; break; + case EOpUMulExtended: out.debug << "uMulExtended"; break; + case EOpIMulExtended: out.debug << "iMulExtended"; break; + case EOpBitfieldExtract: out.debug << "bitfieldExtract"; break; + case EOpBitfieldInsert: out.debug << "bitfieldInsert"; break; + + case EOpFma: out.debug << "fma"; break; + case EOpFrexp: out.debug << "frexp"; break; + case EOpLdexp: out.debug << "ldexp"; break; + + case EOpInterpolateAtSample: out.debug << "interpolateAtSample"; break; + case EOpInterpolateAtOffset: out.debug << "interpolateAtOffset"; break; + case EOpInterpolateAtVertex: out.debug << "interpolateAtVertex"; break; + + case EOpSinCos: out.debug << "sincos"; break; + case EOpGenMul: out.debug << "mul"; break; + + case EOpAllMemoryBarrierWithGroupSync: out.debug << "AllMemoryBarrierWithGroupSync"; break; + case EOpDeviceMemoryBarrier: out.debug << "DeviceMemoryBarrier"; break; + case EOpDeviceMemoryBarrierWithGroupSync: out.debug << "DeviceMemoryBarrierWithGroupSync"; break; + case EOpWorkgroupMemoryBarrier: out.debug << "WorkgroupMemoryBarrier"; break; + case EOpWorkgroupMemoryBarrierWithGroupSync: out.debug << "WorkgroupMemoryBarrierWithGroupSync"; break; + + case EOpSubgroupBarrier: out.debug << "subgroupBarrier"; break; + case EOpSubgroupMemoryBarrier: out.debug << "subgroupMemoryBarrier"; break; + case EOpSubgroupMemoryBarrierBuffer: out.debug << "subgroupMemoryBarrierBuffer"; break; + case EOpSubgroupMemoryBarrierImage: out.debug << "subgroupMemoryBarrierImage"; break; + case EOpSubgroupMemoryBarrierShared: out.debug << "subgroupMemoryBarrierShared"; break; + case EOpSubgroupElect: out.debug << "subgroupElect"; break; + case EOpSubgroupAll: out.debug << "subgroupAll"; break; + case EOpSubgroupAny: out.debug << "subgroupAny"; break; + case EOpSubgroupAllEqual: out.debug << "subgroupAllEqual"; break; + case EOpSubgroupBroadcast: out.debug << "subgroupBroadcast"; break; + case EOpSubgroupBroadcastFirst: out.debug << "subgroupBroadcastFirst"; break; + case EOpSubgroupBallot: out.debug << "subgroupBallot"; break; + case EOpSubgroupInverseBallot: out.debug << "subgroupInverseBallot"; break; + case EOpSubgroupBallotBitExtract: out.debug << "subgroupBallotBitExtract"; break; + case EOpSubgroupBallotBitCount: out.debug << "subgroupBallotBitCount"; break; + case EOpSubgroupBallotInclusiveBitCount: out.debug << "subgroupBallotInclusiveBitCount"; break; + case EOpSubgroupBallotExclusiveBitCount: out.debug << "subgroupBallotExclusiveBitCount"; break; + case EOpSubgroupBallotFindLSB: out.debug << "subgroupBallotFindLSB"; break; + case EOpSubgroupBallotFindMSB: out.debug << "subgroupBallotFindMSB"; break; + case EOpSubgroupShuffle: out.debug << "subgroupShuffle"; break; + case EOpSubgroupShuffleXor: out.debug << "subgroupShuffleXor"; break; + case EOpSubgroupShuffleUp: out.debug << "subgroupShuffleUp"; break; + case EOpSubgroupShuffleDown: out.debug << "subgroupShuffleDown"; break; + case EOpSubgroupAdd: out.debug << "subgroupAdd"; break; + case EOpSubgroupMul: out.debug << "subgroupMul"; break; + case EOpSubgroupMin: out.debug << "subgroupMin"; break; + case EOpSubgroupMax: out.debug << "subgroupMax"; break; + case EOpSubgroupAnd: out.debug << "subgroupAnd"; break; + case EOpSubgroupOr: out.debug << "subgroupOr"; break; + case EOpSubgroupXor: out.debug << "subgroupXor"; break; + case EOpSubgroupInclusiveAdd: out.debug << "subgroupInclusiveAdd"; break; + case EOpSubgroupInclusiveMul: out.debug << "subgroupInclusiveMul"; break; + case EOpSubgroupInclusiveMin: out.debug << "subgroupInclusiveMin"; break; + case EOpSubgroupInclusiveMax: out.debug << "subgroupInclusiveMax"; break; + case EOpSubgroupInclusiveAnd: out.debug << "subgroupInclusiveAnd"; break; + case EOpSubgroupInclusiveOr: out.debug << "subgroupInclusiveOr"; break; + case EOpSubgroupInclusiveXor: out.debug << "subgroupInclusiveXor"; break; + case EOpSubgroupExclusiveAdd: out.debug << "subgroupExclusiveAdd"; break; + case EOpSubgroupExclusiveMul: out.debug << "subgroupExclusiveMul"; break; + case EOpSubgroupExclusiveMin: out.debug << "subgroupExclusiveMin"; break; + case EOpSubgroupExclusiveMax: out.debug << "subgroupExclusiveMax"; break; + case EOpSubgroupExclusiveAnd: out.debug << "subgroupExclusiveAnd"; break; + case EOpSubgroupExclusiveOr: out.debug << "subgroupExclusiveOr"; break; + case EOpSubgroupExclusiveXor: out.debug << "subgroupExclusiveXor"; break; + case EOpSubgroupClusteredAdd: out.debug << "subgroupClusteredAdd"; break; + case EOpSubgroupClusteredMul: out.debug << "subgroupClusteredMul"; break; + case EOpSubgroupClusteredMin: out.debug << "subgroupClusteredMin"; break; + case EOpSubgroupClusteredMax: out.debug << "subgroupClusteredMax"; break; + case EOpSubgroupClusteredAnd: out.debug << "subgroupClusteredAnd"; break; + case EOpSubgroupClusteredOr: out.debug << "subgroupClusteredOr"; break; + case EOpSubgroupClusteredXor: out.debug << "subgroupClusteredXor"; break; + case EOpSubgroupQuadBroadcast: out.debug << "subgroupQuadBroadcast"; break; + case EOpSubgroupQuadSwapHorizontal: out.debug << "subgroupQuadSwapHorizontal"; break; + case EOpSubgroupQuadSwapVertical: out.debug << "subgroupQuadSwapVertical"; break; + case EOpSubgroupQuadSwapDiagonal: out.debug << "subgroupQuadSwapDiagonal"; break; + + case EOpSubgroupPartition: out.debug << "subgroupPartitionNV"; break; + case EOpSubgroupPartitionedAdd: out.debug << "subgroupPartitionedAddNV"; break; + case EOpSubgroupPartitionedMul: out.debug << "subgroupPartitionedMulNV"; break; + case EOpSubgroupPartitionedMin: out.debug << "subgroupPartitionedMinNV"; break; + case EOpSubgroupPartitionedMax: out.debug << "subgroupPartitionedMaxNV"; break; + case EOpSubgroupPartitionedAnd: out.debug << "subgroupPartitionedAndNV"; break; + case EOpSubgroupPartitionedOr: out.debug << "subgroupPartitionedOrNV"; break; + case EOpSubgroupPartitionedXor: out.debug << "subgroupPartitionedXorNV"; break; + case EOpSubgroupPartitionedInclusiveAdd: out.debug << "subgroupPartitionedInclusiveAddNV"; break; + case EOpSubgroupPartitionedInclusiveMul: out.debug << "subgroupPartitionedInclusiveMulNV"; break; + case EOpSubgroupPartitionedInclusiveMin: out.debug << "subgroupPartitionedInclusiveMinNV"; break; + case EOpSubgroupPartitionedInclusiveMax: out.debug << "subgroupPartitionedInclusiveMaxNV"; break; + case EOpSubgroupPartitionedInclusiveAnd: out.debug << "subgroupPartitionedInclusiveAndNV"; break; + case EOpSubgroupPartitionedInclusiveOr: out.debug << "subgroupPartitionedInclusiveOrNV"; break; + case EOpSubgroupPartitionedInclusiveXor: out.debug << "subgroupPartitionedInclusiveXorNV"; break; + case EOpSubgroupPartitionedExclusiveAdd: out.debug << "subgroupPartitionedExclusiveAddNV"; break; + case EOpSubgroupPartitionedExclusiveMul: out.debug << "subgroupPartitionedExclusiveMulNV"; break; + case EOpSubgroupPartitionedExclusiveMin: out.debug << "subgroupPartitionedExclusiveMinNV"; break; + case EOpSubgroupPartitionedExclusiveMax: out.debug << "subgroupPartitionedExclusiveMaxNV"; break; + case EOpSubgroupPartitionedExclusiveAnd: out.debug << "subgroupPartitionedExclusiveAndNV"; break; + case EOpSubgroupPartitionedExclusiveOr: out.debug << "subgroupPartitionedExclusiveOrNV"; break; + case EOpSubgroupPartitionedExclusiveXor: out.debug << "subgroupPartitionedExclusiveXorNV"; break; + + case EOpSubpassLoad: out.debug << "subpassLoad"; break; + case EOpSubpassLoadMS: out.debug << "subpassLoadMS"; break; + + case EOpTrace: out.debug << "traceNV"; break; + case EOpReportIntersection: out.debug << "reportIntersectionNV"; break; + case EOpIgnoreIntersection: out.debug << "ignoreIntersectionNV"; break; + case EOpTerminateRay: out.debug << "terminateRayNV"; break; + case EOpExecuteCallable: out.debug << "executeCallableNV"; break; + case EOpWritePackedPrimitiveIndices4x8NV: out.debug << "writePackedPrimitiveIndices4x8NV"; break; + + case EOpRayQueryInitialize: out.debug << "rayQueryInitializeEXT"; break; + case EOpRayQueryTerminate: out.debug << "rayQueryTerminateEXT"; break; + case EOpRayQueryGenerateIntersection: out.debug << "rayQueryGenerateIntersectionEXT"; break; + case EOpRayQueryConfirmIntersection: out.debug << "rayQueryConfirmIntersectionEXT"; break; + case EOpRayQueryProceed: out.debug << "rayQueryProceedEXT"; break; + case EOpRayQueryGetIntersectionType: out.debug << "rayQueryGetIntersectionTypeEXT"; break; + case EOpRayQueryGetRayTMin: out.debug << "rayQueryGetRayTMinEXT"; break; + case EOpRayQueryGetRayFlags: out.debug << "rayQueryGetRayFlagsEXT"; break; + case EOpRayQueryGetIntersectionT: out.debug << "rayQueryGetIntersectionTEXT"; break; + case EOpRayQueryGetIntersectionInstanceCustomIndex: out.debug << "rayQueryGetIntersectionInstanceCustomIndexEXT"; break; + case EOpRayQueryGetIntersectionInstanceId: out.debug << "rayQueryGetIntersectionInstanceIdEXT"; break; + case EOpRayQueryGetIntersectionInstanceShaderBindingTableRecordOffset: out.debug << "rayQueryGetIntersectionInstanceShaderBindingTableRecordOffsetEXT"; break; + case EOpRayQueryGetIntersectionGeometryIndex: out.debug << "rayQueryGetIntersectionGeometryIndexEXT"; break; + case EOpRayQueryGetIntersectionPrimitiveIndex: out.debug << "rayQueryGetIntersectionPrimitiveIndexEXT"; break; + case EOpRayQueryGetIntersectionBarycentrics: out.debug << "rayQueryGetIntersectionBarycentricsEXT"; break; + case EOpRayQueryGetIntersectionFrontFace: out.debug << "rayQueryGetIntersectionFrontFaceEXT"; break; + case EOpRayQueryGetIntersectionCandidateAABBOpaque: out.debug << "rayQueryGetIntersectionCandidateAABBOpaqueEXT"; break; + case EOpRayQueryGetIntersectionObjectRayDirection: out.debug << "rayQueryGetIntersectionObjectRayDirectionEXT"; break; + case EOpRayQueryGetIntersectionObjectRayOrigin: out.debug << "rayQueryGetIntersectionObjectRayOriginEXT"; break; + case EOpRayQueryGetWorldRayDirection: out.debug << "rayQueryGetWorldRayDirectionEXT"; break; + case EOpRayQueryGetWorldRayOrigin: out.debug << "rayQueryGetWorldRayOriginEXT"; break; + case EOpRayQueryGetIntersectionObjectToWorld: out.debug << "rayQueryGetIntersectionObjectToWorldEXT"; break; + case EOpRayQueryGetIntersectionWorldToObject: out.debug << "rayQueryGetIntersectionWorldToObjectEXT"; break; + + case EOpCooperativeMatrixLoad: out.debug << "Load cooperative matrix"; break; + case EOpCooperativeMatrixStore: out.debug << "Store cooperative matrix"; break; + case EOpCooperativeMatrixMulAdd: out.debug << "MulAdd cooperative matrices"; break; + + case EOpIsHelperInvocation: out.debug << "IsHelperInvocation"; break; + case EOpDebugPrintf: out.debug << "Debug printf"; break; + + default: out.debug.message(EPrefixError, "Bad aggregation op"); + } + + if (node->getOp() != EOpSequence && node->getOp() != EOpParameters) + out.debug << " (" << node->getCompleteString() << ")"; + + out.debug << "\n"; + + return true; +} + +bool TOutputTraverser::visitSelection(TVisit /* visit */, TIntermSelection* node) +{ + TInfoSink& out = infoSink; + + OutputTreeText(out, node, depth); + + out.debug << "Test condition and select"; + out.debug << " (" << node->getCompleteString() << ")"; + + if (node->getShortCircuit() == false) + out.debug << ": no shortcircuit"; + if (node->getFlatten()) + out.debug << ": Flatten"; + if (node->getDontFlatten()) + out.debug << ": DontFlatten"; + out.debug << "\n"; + + ++depth; + + OutputTreeText(out, node, depth); + out.debug << "Condition\n"; + node->getCondition()->traverse(this); + + OutputTreeText(out, node, depth); + if (node->getTrueBlock()) { + out.debug << "true case\n"; + node->getTrueBlock()->traverse(this); + } else + out.debug << "true case is null\n"; + + if (node->getFalseBlock()) { + OutputTreeText(out, node, depth); + out.debug << "false case\n"; + node->getFalseBlock()->traverse(this); + } + + --depth; + + return false; +} + +// Print infinities and NaNs, and numbers in a portable way. +// Goals: +// - portable (across IEEE 754 platforms) +// - shows all possible IEEE values +// - shows simple numbers in a simple way, e.g., no leading/trailing 0s +// - shows all digits, no premature rounding +static void OutputDouble(TInfoSink& out, double value, TOutputTraverser::EExtraOutput extra) +{ + if (IsInfinity(value)) { + if (value < 0) + out.debug << "-1.#INF"; + else + out.debug << "+1.#INF"; + } else if (IsNan(value)) + out.debug << "1.#IND"; + else { + const int maxSize = 340; + char buf[maxSize]; + const char* format = "%f"; + if (fabs(value) > 0.0 && (fabs(value) < 1e-5 || fabs(value) > 1e12)) + format = "%-.13e"; + int len = snprintf(buf, maxSize, format, value); + assert(len < maxSize); + + // remove a leading zero in the 100s slot in exponent; it is not portable + // pattern: XX...XXXe+0XX or XX...XXXe-0XX + if (len > 5) { + if (buf[len-5] == 'e' && (buf[len-4] == '+' || buf[len-4] == '-') && buf[len-3] == '0') { + buf[len-3] = buf[len-2]; + buf[len-2] = buf[len-1]; + buf[len-1] = '\0'; + } + } + + out.debug << buf; + + switch (extra) { + case TOutputTraverser::BinaryDoubleOutput: + { + uint64_t b; + static_assert(sizeof(b) == sizeof(value), "sizeof(uint64_t) != sizeof(double)"); + memcpy(&b, &value, sizeof(b)); + + out.debug << " : "; + for (size_t i = 0; i < 8 * sizeof(value); ++i, ++b) { + out.debug << ((b & 0x8000000000000000) != 0 ? "1" : "0"); + b <<= 1; + } + break; + } + default: + break; + } + } +} + +static void OutputConstantUnion(TInfoSink& out, const TIntermTyped* node, const TConstUnionArray& constUnion, + TOutputTraverser::EExtraOutput extra, int depth) +{ + int size = node->getType().computeNumComponents(); + + for (int i = 0; i < size; i++) { + OutputTreeText(out, node, depth); + switch (constUnion[i].getType()) { + case EbtBool: + if (constUnion[i].getBConst()) + out.debug << "true"; + else + out.debug << "false"; + + out.debug << " (" << "const bool" << ")"; + + out.debug << "\n"; + break; + case EbtFloat: + case EbtDouble: + case EbtFloat16: + OutputDouble(out, constUnion[i].getDConst(), extra); + out.debug << "\n"; + break; + case EbtInt8: + { + const int maxSize = 300; + char buf[maxSize]; + snprintf(buf, maxSize, "%d (%s)", constUnion[i].getI8Const(), "const int8_t"); + + out.debug << buf << "\n"; + } + break; + case EbtUint8: + { + const int maxSize = 300; + char buf[maxSize]; + snprintf(buf, maxSize, "%u (%s)", constUnion[i].getU8Const(), "const uint8_t"); + + out.debug << buf << "\n"; + } + break; + case EbtInt16: + { + const int maxSize = 300; + char buf[maxSize]; + snprintf(buf, maxSize, "%d (%s)", constUnion[i].getI16Const(), "const int16_t"); + + out.debug << buf << "\n"; + } + break; + case EbtUint16: + { + const int maxSize = 300; + char buf[maxSize]; + snprintf(buf, maxSize, "%u (%s)", constUnion[i].getU16Const(), "const uint16_t"); + + out.debug << buf << "\n"; + } + break; + case EbtInt: + { + const int maxSize = 300; + char buf[maxSize]; + snprintf(buf, maxSize, "%d (%s)", constUnion[i].getIConst(), "const int"); + + out.debug << buf << "\n"; + } + break; + case EbtUint: + { + const int maxSize = 300; + char buf[maxSize]; + snprintf(buf, maxSize, "%u (%s)", constUnion[i].getUConst(), "const uint"); + + out.debug << buf << "\n"; + } + break; + case EbtInt64: + { + const int maxSize = 300; + char buf[maxSize]; + snprintf(buf, maxSize, "%lld (%s)", constUnion[i].getI64Const(), "const int64_t"); + + out.debug << buf << "\n"; + } + break; + case EbtUint64: + { + const int maxSize = 300; + char buf[maxSize]; + snprintf(buf, maxSize, "%llu (%s)", constUnion[i].getU64Const(), "const uint64_t"); + + out.debug << buf << "\n"; + } + break; + default: + out.info.message(EPrefixInternalError, "Unknown constant", node->getLoc()); + break; + } + } +} + +void TOutputTraverser::visitConstantUnion(TIntermConstantUnion* node) +{ + OutputTreeText(infoSink, node, depth); + infoSink.debug << "Constant:\n"; + + OutputConstantUnion(infoSink, node, node->getConstArray(), extraOutput, depth + 1); +} + +void TOutputTraverser::visitSymbol(TIntermSymbol* node) +{ + OutputTreeText(infoSink, node, depth); + + infoSink.debug << "'" << node->getName() << "' (" << node->getCompleteString() << ")\n"; + + if (! node->getConstArray().empty()) + OutputConstantUnion(infoSink, node, node->getConstArray(), extraOutput, depth + 1); + else if (node->getConstSubtree()) { + incrementDepth(node); + node->getConstSubtree()->traverse(this); + decrementDepth(); + } +} + +bool TOutputTraverser::visitLoop(TVisit /* visit */, TIntermLoop* node) +{ + TInfoSink& out = infoSink; + + OutputTreeText(out, node, depth); + + out.debug << "Loop with condition "; + if (! node->testFirst()) + out.debug << "not "; + out.debug << "tested first"; + + if (node->getUnroll()) + out.debug << ": Unroll"; + if (node->getDontUnroll()) + out.debug << ": DontUnroll"; + if (node->getLoopDependency()) { + out.debug << ": Dependency "; + out.debug << node->getLoopDependency(); + } + out.debug << "\n"; + + ++depth; + + OutputTreeText(infoSink, node, depth); + if (node->getTest()) { + out.debug << "Loop Condition\n"; + node->getTest()->traverse(this); + } else + out.debug << "No loop condition\n"; + + OutputTreeText(infoSink, node, depth); + if (node->getBody()) { + out.debug << "Loop Body\n"; + node->getBody()->traverse(this); + } else + out.debug << "No loop body\n"; + + if (node->getTerminal()) { + OutputTreeText(infoSink, node, depth); + out.debug << "Loop Terminal Expression\n"; + node->getTerminal()->traverse(this); + } + + --depth; + + return false; +} + +bool TOutputTraverser::visitBranch(TVisit /* visit*/, TIntermBranch* node) +{ + TInfoSink& out = infoSink; + + OutputTreeText(out, node, depth); + + switch (node->getFlowOp()) { + case EOpKill: out.debug << "Branch: Kill"; break; + case EOpBreak: out.debug << "Branch: Break"; break; + case EOpContinue: out.debug << "Branch: Continue"; break; + case EOpReturn: out.debug << "Branch: Return"; break; + case EOpCase: out.debug << "case: "; break; + case EOpDemote: out.debug << "Demote"; break; + case EOpDefault: out.debug << "default: "; break; + default: out.debug << "Branch: Unknown Branch"; break; + } + + if (node->getExpression()) { + out.debug << " with expression\n"; + ++depth; + node->getExpression()->traverse(this); + --depth; + } else + out.debug << "\n"; + + return false; +} + +bool TOutputTraverser::visitSwitch(TVisit /* visit */, TIntermSwitch* node) +{ + TInfoSink& out = infoSink; + + OutputTreeText(out, node, depth); + out.debug << "switch"; + + if (node->getFlatten()) + out.debug << ": Flatten"; + if (node->getDontFlatten()) + out.debug << ": DontFlatten"; + out.debug << "\n"; + + OutputTreeText(out, node, depth); + out.debug << "condition\n"; + ++depth; + node->getCondition()->traverse(this); + + --depth; + OutputTreeText(out, node, depth); + out.debug << "body\n"; + ++depth; + node->getBody()->traverse(this); + + --depth; + + return false; +} + +// +// This function is the one to call externally to start the traversal. +// Individual functions can be initialized to 0 to skip processing of that +// type of node. It's children will still be processed. +// +void TIntermediate::output(TInfoSink& infoSink, bool tree) +{ + infoSink.debug << "Shader version: " << version << "\n"; + if (requestedExtensions.size() > 0) { + for (auto extIt = requestedExtensions.begin(); extIt != requestedExtensions.end(); ++extIt) + infoSink.debug << "Requested " << extIt->first << "\n"; + } + + if (xfbMode) + infoSink.debug << "in xfb mode\n"; + + switch (language) { + case EShLangVertex: + break; + + case EShLangTessControl: + infoSink.debug << "vertices = " << vertices << "\n"; + + if (inputPrimitive != ElgNone) + infoSink.debug << "input primitive = " << TQualifier::getGeometryString(inputPrimitive) << "\n"; + if (vertexSpacing != EvsNone) + infoSink.debug << "vertex spacing = " << TQualifier::getVertexSpacingString(vertexSpacing) << "\n"; + if (vertexOrder != EvoNone) + infoSink.debug << "triangle order = " << TQualifier::getVertexOrderString(vertexOrder) << "\n"; + break; + + case EShLangTessEvaluation: + infoSink.debug << "input primitive = " << TQualifier::getGeometryString(inputPrimitive) << "\n"; + infoSink.debug << "vertex spacing = " << TQualifier::getVertexSpacingString(vertexSpacing) << "\n"; + infoSink.debug << "triangle order = " << TQualifier::getVertexOrderString(vertexOrder) << "\n"; + if (pointMode) + infoSink.debug << "using point mode\n"; + break; + + case EShLangGeometry: + infoSink.debug << "invocations = " << invocations << "\n"; + infoSink.debug << "max_vertices = " << vertices << "\n"; + infoSink.debug << "input primitive = " << TQualifier::getGeometryString(inputPrimitive) << "\n"; + infoSink.debug << "output primitive = " << TQualifier::getGeometryString(outputPrimitive) << "\n"; + break; + + case EShLangFragment: + if (pixelCenterInteger) + infoSink.debug << "gl_FragCoord pixel center is integer\n"; + if (originUpperLeft) + infoSink.debug << "gl_FragCoord origin is upper left\n"; + if (earlyFragmentTests) + infoSink.debug << "using early_fragment_tests\n"; + if (postDepthCoverage) + infoSink.debug << "using post_depth_coverage\n"; + if (depthLayout != EldNone) + infoSink.debug << "using " << TQualifier::getLayoutDepthString(depthLayout) << "\n"; + if (blendEquations != 0) { + infoSink.debug << "using"; + // blendEquations is a mask, decode it + for (TBlendEquationShift be = (TBlendEquationShift)0; be < EBlendCount; be = (TBlendEquationShift)(be + 1)) { + if (blendEquations & (1 << be)) + infoSink.debug << " " << TQualifier::getBlendEquationString(be); + } + infoSink.debug << "\n"; + } + if (interlockOrdering != EioNone) + infoSink.debug << "interlock ordering = " << TQualifier::getInterlockOrderingString(interlockOrdering) << "\n"; + break; + + case EShLangMeshNV: + infoSink.debug << "max_vertices = " << vertices << "\n"; + infoSink.debug << "max_primitives = " << primitives << "\n"; + infoSink.debug << "output primitive = " << TQualifier::getGeometryString(outputPrimitive) << "\n"; + // Fall through + case EShLangTaskNV: + // Fall through + case EShLangCompute: + infoSink.debug << "local_size = (" << localSize[0] << ", " << localSize[1] << ", " << localSize[2] << ")\n"; + { + if (localSizeSpecId[0] != TQualifier::layoutNotSet || + localSizeSpecId[1] != TQualifier::layoutNotSet || + localSizeSpecId[2] != TQualifier::layoutNotSet) { + infoSink.debug << "local_size ids = (" << + localSizeSpecId[0] << ", " << + localSizeSpecId[1] << ", " << + localSizeSpecId[2] << ")\n"; + } + } + break; + + default: + break; + } + + if (treeRoot == 0 || ! tree) + return; + + TOutputTraverser it(infoSink); + if (getBinaryDoubleOutput()) + it.setDoubleOutput(TOutputTraverser::BinaryDoubleOutput); + treeRoot->traverse(&it); +} + +} // end namespace glslang + +#endif // not GLSLANG_WEB diff --git a/dep/glslang/glslang/MachineIndependent/iomapper.cpp b/dep/glslang/glslang/MachineIndependent/iomapper.cpp new file mode 100644 index 000000000..4e409e06a --- /dev/null +++ b/dep/glslang/glslang/MachineIndependent/iomapper.cpp @@ -0,0 +1,1259 @@ +// +// Copyright (C) 2016-2017 LunarG, Inc. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// +// Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// +// Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// Neither the name of 3Dlabs Inc. Ltd. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// + +#ifndef GLSLANG_WEB + +#include "../Include/Common.h" +#include "../Include/InfoSink.h" + +#include "gl_types.h" +#include "iomapper.h" + +// +// Map IO bindings. +// +// High-level algorithm for one stage: +// +// 1. Traverse all code (live+dead) to find the explicitly provided bindings. +// +// 2. Traverse (just) the live code to determine which non-provided bindings +// require auto-numbering. We do not auto-number dead ones. +// +// 3. Traverse all the code to apply the bindings: +// a. explicitly given bindings are offset according to their type +// b. implicit live bindings are auto-numbered into the holes, using +// any open binding slot. +// c. implicit dead bindings are left un-bound. +// + +namespace glslang { + +class TVarGatherTraverser : public TLiveTraverser { +public: + TVarGatherTraverser(const TIntermediate& i, bool traverseDeadCode, TVarLiveMap& inList, TVarLiveMap& outList, TVarLiveMap& uniformList) + : TLiveTraverser(i, traverseDeadCode, true, true, false) + , inputList(inList) + , outputList(outList) + , uniformList(uniformList) + { + } + + virtual void visitSymbol(TIntermSymbol* base) + { + TVarLiveMap* target = nullptr; + if (base->getQualifier().storage == EvqVaryingIn) + target = &inputList; + else if (base->getQualifier().storage == EvqVaryingOut) + target = &outputList; + else if (base->getQualifier().isUniformOrBuffer() && !base->getQualifier().isPushConstant()) + target = &uniformList; + if (target) { + TVarEntryInfo ent = {base->getId(), base, ! traverseAll}; + ent.stage = intermediate.getStage(); + TVarLiveMap::iterator at = target->find( + ent.symbol->getName()); // std::lower_bound(target->begin(), target->end(), ent, TVarEntryInfo::TOrderById()); + if (at != target->end() && at->second.id == ent.id) + at->second.live = at->second.live || ! traverseAll; // update live state + else + (*target)[ent.symbol->getName()] = ent; + } + } + +private: + TVarLiveMap& inputList; + TVarLiveMap& outputList; + TVarLiveMap& uniformList; +}; + +class TVarSetTraverser : public TLiveTraverser +{ +public: + TVarSetTraverser(const TIntermediate& i, const TVarLiveMap& inList, const TVarLiveMap& outList, const TVarLiveMap& uniformList) + : TLiveTraverser(i, true, true, true, false) + , inputList(inList) + , outputList(outList) + , uniformList(uniformList) + { + } + + virtual void visitSymbol(TIntermSymbol* base) { + const TVarLiveMap* source; + if (base->getQualifier().storage == EvqVaryingIn) + source = &inputList; + else if (base->getQualifier().storage == EvqVaryingOut) + source = &outputList; + else if (base->getQualifier().isUniformOrBuffer()) + source = &uniformList; + else + return; + + TVarEntryInfo ent = { base->getId() }; + TVarLiveMap::const_iterator at = source->find(base->getName()); + if (at == source->end()) + return; + + if (at->second.id != ent.id) + return; + + if (at->second.newBinding != -1) + base->getWritableType().getQualifier().layoutBinding = at->second.newBinding; + if (at->second.newSet != -1) + base->getWritableType().getQualifier().layoutSet = at->second.newSet; + if (at->second.newLocation != -1) + base->getWritableType().getQualifier().layoutLocation = at->second.newLocation; + if (at->second.newComponent != -1) + base->getWritableType().getQualifier().layoutComponent = at->second.newComponent; + if (at->second.newIndex != -1) + base->getWritableType().getQualifier().layoutIndex = at->second.newIndex; + } + + private: + const TVarLiveMap& inputList; + const TVarLiveMap& outputList; + const TVarLiveMap& uniformList; +}; + +struct TNotifyUniformAdaptor +{ + EShLanguage stage; + TIoMapResolver& resolver; + inline TNotifyUniformAdaptor(EShLanguage s, TIoMapResolver& r) + : stage(s) + , resolver(r) + { + } + + inline void operator()(std::pair& entKey) + { + resolver.notifyBinding(stage, entKey.second); + } + +private: + TNotifyUniformAdaptor& operator=(TNotifyUniformAdaptor&) = delete; +}; + +struct TNotifyInOutAdaptor +{ + EShLanguage stage; + TIoMapResolver& resolver; + inline TNotifyInOutAdaptor(EShLanguage s, TIoMapResolver& r) + : stage(s) + , resolver(r) + { + } + + inline void operator()(std::pair& entKey) + { + resolver.notifyInOut(stage, entKey.second); + } + +private: + TNotifyInOutAdaptor& operator=(TNotifyInOutAdaptor&) = delete; +}; + +struct TResolverUniformAdaptor { + TResolverUniformAdaptor(EShLanguage s, TIoMapResolver& r, TInfoSink& i, bool& e) + : stage(s) + , resolver(r) + , infoSink(i) + , error(e) + { + } + + inline void operator()(std::pair& entKey) { + TVarEntryInfo& ent = entKey.second; + ent.newLocation = -1; + ent.newComponent = -1; + ent.newBinding = -1; + ent.newSet = -1; + ent.newIndex = -1; + const bool isValid = resolver.validateBinding(stage, ent); + if (isValid) { + resolver.resolveBinding(stage, ent); + resolver.resolveSet(stage, ent); + resolver.resolveUniformLocation(stage, ent); + + if (ent.newBinding != -1) { + if (ent.newBinding >= int(TQualifier::layoutBindingEnd)) { + TString err = "mapped binding out of range: " + entKey.first; + + infoSink.info.message(EPrefixInternalError, err.c_str()); + error = true; + } + } + if (ent.newSet != -1) { + if (ent.newSet >= int(TQualifier::layoutSetEnd)) { + TString err = "mapped set out of range: " + entKey.first; + + infoSink.info.message(EPrefixInternalError, err.c_str()); + error = true; + } + } + } else { + TString errorMsg = "Invalid binding: " + entKey.first; + infoSink.info.message(EPrefixInternalError, errorMsg.c_str()); + error = true; + } + } + + inline void setStage(EShLanguage s) { stage = s; } + + EShLanguage stage; + TIoMapResolver& resolver; + TInfoSink& infoSink; + bool& error; + +private: + TResolverUniformAdaptor& operator=(TResolverUniformAdaptor&) = delete; +}; + +struct TResolverInOutAdaptor { + TResolverInOutAdaptor(EShLanguage s, TIoMapResolver& r, TInfoSink& i, bool& e) + : stage(s) + , resolver(r) + , infoSink(i) + , error(e) + { + } + + inline void operator()(std::pair& entKey) + { + TVarEntryInfo& ent = entKey.second; + ent.newLocation = -1; + ent.newComponent = -1; + ent.newBinding = -1; + ent.newSet = -1; + ent.newIndex = -1; + const bool isValid = resolver.validateInOut(stage, ent); + if (isValid) { + resolver.resolveInOutLocation(stage, ent); + resolver.resolveInOutComponent(stage, ent); + resolver.resolveInOutIndex(stage, ent); + } else { + TString errorMsg; + if (ent.symbol->getType().getQualifier().semanticName != nullptr) { + errorMsg = "Invalid shader In/Out variable semantic: "; + errorMsg += ent.symbol->getType().getQualifier().semanticName; + } else { + errorMsg = "Invalid shader In/Out variable: "; + errorMsg += ent.symbol->getName(); + } + infoSink.info.message(EPrefixInternalError, errorMsg.c_str()); + error = true; + } + } + + inline void setStage(EShLanguage s) { stage = s; } + + EShLanguage stage; + TIoMapResolver& resolver; + TInfoSink& infoSink; + bool& error; + +private: + TResolverInOutAdaptor& operator=(TResolverInOutAdaptor&) = delete; +}; + +// The class is used for reserving explicit uniform locations and ubo/ssbo/opaque bindings + +struct TSymbolValidater +{ + TSymbolValidater(TIoMapResolver& r, TInfoSink& i, TVarLiveMap* in[EShLangCount], TVarLiveMap* out[EShLangCount], + TVarLiveMap* uniform[EShLangCount], bool& hadError) + : preStage(EShLangCount) + , currentStage(EShLangCount) + , nextStage(EShLangCount) + , resolver(r) + , infoSink(i) + , hadError(hadError) + { + memcpy(inVarMaps, in, EShLangCount * (sizeof(TVarLiveMap*))); + memcpy(outVarMaps, out, EShLangCount * (sizeof(TVarLiveMap*))); + memcpy(uniformVarMap, uniform, EShLangCount * (sizeof(TVarLiveMap*))); + } + + inline void operator()(std::pair& entKey) { + TVarEntryInfo& ent1 = entKey.second; + TIntermSymbol* base = ent1.symbol; + const TType& type = ent1.symbol->getType(); + const TString& name = entKey.first; + TString mangleName1, mangleName2; + type.appendMangledName(mangleName1); + EShLanguage stage = ent1.stage; + if (currentStage != stage) { + preStage = currentStage; + currentStage = stage; + nextStage = EShLangCount; + for (int i = currentStage + 1; i < EShLangCount; i++) { + if (inVarMaps[i] != nullptr) + nextStage = static_cast(i); + } + } + if (base->getQualifier().storage == EvqVaryingIn) { + // validate stage in; + if (preStage == EShLangCount) + return; + if (outVarMaps[preStage] != nullptr) { + auto ent2 = outVarMaps[preStage]->find(name); + if (ent2 != outVarMaps[preStage]->end()) { + ent2->second.symbol->getType().appendMangledName(mangleName2); + if (mangleName1 == mangleName2) + return; + else { + TString err = "Invalid In/Out variable type : " + entKey.first; + infoSink.info.message(EPrefixInternalError, err.c_str()); + hadError = true; + } + } + return; + } + } else if (base->getQualifier().storage == EvqVaryingOut) { + // validate stage out; + if (nextStage == EShLangCount) + return; + if (outVarMaps[nextStage] != nullptr) { + auto ent2 = inVarMaps[nextStage]->find(name); + if (ent2 != inVarMaps[nextStage]->end()) { + ent2->second.symbol->getType().appendMangledName(mangleName2); + if (mangleName1 == mangleName2) + return; + else { + TString err = "Invalid In/Out variable type : " + entKey.first; + infoSink.info.message(EPrefixInternalError, err.c_str()); + hadError = true; + } + } + return; + } + } else if (base->getQualifier().isUniformOrBuffer() && ! base->getQualifier().isPushConstant()) { + // validate uniform type; + for (int i = 0; i < EShLangCount; i++) { + if (i != currentStage && outVarMaps[i] != nullptr) { + auto ent2 = uniformVarMap[i]->find(name); + if (ent2 != uniformVarMap[i]->end()) { + ent2->second.symbol->getType().appendMangledName(mangleName2); + if (mangleName1 != mangleName2) { + TString err = "Invalid Uniform variable type : " + entKey.first; + infoSink.info.message(EPrefixInternalError, err.c_str()); + hadError = true; + } + mangleName2.clear(); + } + } + } + } + } + TVarLiveMap *inVarMaps[EShLangCount], *outVarMaps[EShLangCount], *uniformVarMap[EShLangCount]; + // Use for mark pre stage, to get more interface symbol information. + EShLanguage preStage, currentStage, nextStage; + // Use for mark current shader stage for resolver + TIoMapResolver& resolver; + TInfoSink& infoSink; + bool& hadError; + +private: + TSymbolValidater& operator=(TSymbolValidater&) = delete; +}; + +struct TSlotCollector { + TSlotCollector(TIoMapResolver& r, TInfoSink& i) : resolver(r), infoSink(i) { } + + inline void operator()(std::pair& entKey) { + resolver.reserverStorageSlot(entKey.second, infoSink); + resolver.reserverResourceSlot(entKey.second, infoSink); + } + TIoMapResolver& resolver; + TInfoSink& infoSink; + +private: + TSlotCollector& operator=(TSlotCollector&) = delete; +}; + +TDefaultIoResolverBase::TDefaultIoResolverBase(const TIntermediate& intermediate) + : intermediate(intermediate) + , nextUniformLocation(intermediate.getUniformLocationBase()) + , nextInputLocation(0) + , nextOutputLocation(0) +{ + memset(stageMask, false, sizeof(bool) * (EShLangCount + 1)); +} + +int TDefaultIoResolverBase::getBaseBinding(TResourceType res, unsigned int set) const { + return selectBaseBinding(intermediate.getShiftBinding(res), intermediate.getShiftBindingForSet(res, set)); +} + +const std::vector& TDefaultIoResolverBase::getResourceSetBinding() const { + return intermediate.getResourceSetBinding(); +} + +bool TDefaultIoResolverBase::doAutoBindingMapping() const { return intermediate.getAutoMapBindings(); } + +bool TDefaultIoResolverBase::doAutoLocationMapping() const { return intermediate.getAutoMapLocations(); } + +TDefaultIoResolverBase::TSlotSet::iterator TDefaultIoResolverBase::findSlot(int set, int slot) { + return std::lower_bound(slots[set].begin(), slots[set].end(), slot); +} + +bool TDefaultIoResolverBase::checkEmpty(int set, int slot) { + TSlotSet::iterator at = findSlot(set, slot); + return ! (at != slots[set].end() && *at == slot); +} + +int TDefaultIoResolverBase::reserveSlot(int set, int slot, int size) { + TSlotSet::iterator at = findSlot(set, slot); + // tolerate aliasing, by not double-recording aliases + // (policy about appropriateness of the alias is higher up) + for (int i = 0; i < size; i++) { + if (at == slots[set].end() || *at != slot + i) + at = slots[set].insert(at, slot + i); + ++at; + } + return slot; +} + +int TDefaultIoResolverBase::getFreeSlot(int set, int base, int size) { + TSlotSet::iterator at = findSlot(set, base); + if (at == slots[set].end()) + return reserveSlot(set, base, size); + // look for a big enough gap + for (; at != slots[set].end(); ++at) { + if (*at - base >= size) + break; + base = *at + 1; + } + return reserveSlot(set, base, size); +} + +int TDefaultIoResolverBase::resolveSet(EShLanguage /*stage*/, TVarEntryInfo& ent) { + const TType& type = ent.symbol->getType(); + if (type.getQualifier().hasSet()) { + return ent.newSet = type.getQualifier().layoutSet; + } + // If a command line or API option requested a single descriptor set, use that (if not overrided by spaceN) + if (getResourceSetBinding().size() == 1) { + return ent.newSet = atoi(getResourceSetBinding()[0].c_str()); + } + return ent.newSet = 0; +} + +int TDefaultIoResolverBase::resolveUniformLocation(EShLanguage /*stage*/, TVarEntryInfo& ent) { + const TType& type = ent.symbol->getType(); + const char* name = ent.symbol->getName().c_str(); + // kick out of not doing this + if (! doAutoLocationMapping()) { + return ent.newLocation = -1; + } + // no locations added if already present, a built-in variable, a block, or an opaque + if (type.getQualifier().hasLocation() || type.isBuiltIn() || type.getBasicType() == EbtBlock || + type.isAtomic() || (type.containsOpaque() && intermediate.getSpv().openGl == 0)) { + return ent.newLocation = -1; + } + // no locations on blocks of built-in variables + if (type.isStruct()) { + if (type.getStruct()->size() < 1) { + return ent.newLocation = -1; + } + if ((*type.getStruct())[0].type->isBuiltIn()) { + return ent.newLocation = -1; + } + } + int location = intermediate.getUniformLocationOverride(name); + if (location != -1) { + return ent.newLocation = location; + } + location = nextUniformLocation; + nextUniformLocation += TIntermediate::computeTypeUniformLocationSize(type); + return ent.newLocation = location; +} + +int TDefaultIoResolverBase::resolveInOutLocation(EShLanguage stage, TVarEntryInfo& ent) { + const TType& type = ent.symbol->getType(); + // kick out of not doing this + if (! doAutoLocationMapping()) { + return ent.newLocation = -1; + } + + // no locations added if already present, or a built-in variable + if (type.getQualifier().hasLocation() || type.isBuiltIn()) { + return ent.newLocation = -1; + } + + // no locations on blocks of built-in variables + if (type.isStruct()) { + if (type.getStruct()->size() < 1) { + return ent.newLocation = -1; + } + if ((*type.getStruct())[0].type->isBuiltIn()) { + return ent.newLocation = -1; + } + } + // point to the right input or output location counter + int& nextLocation = type.getQualifier().isPipeInput() ? nextInputLocation : nextOutputLocation; + // Placeholder. This does not do proper cross-stage lining up, nor + // work with mixed location/no-location declarations. + int location = nextLocation; + int typeLocationSize; + // Don’t take into account the outer-most array if the stage’s + // interface is automatically an array. + typeLocationSize = computeTypeLocationSize(type, stage); + nextLocation += typeLocationSize; + return ent.newLocation = location; +} + +int TDefaultIoResolverBase::resolveInOutComponent(EShLanguage /*stage*/, TVarEntryInfo& ent) { + return ent.newComponent = -1; +} + +int TDefaultIoResolverBase::resolveInOutIndex(EShLanguage /*stage*/, TVarEntryInfo& ent) { return ent.newIndex = -1; } + +uint32_t TDefaultIoResolverBase::computeTypeLocationSize(const TType& type, EShLanguage stage) { + int typeLocationSize; + // Don’t take into account the outer-most array if the stage’s + // interface is automatically an array. + if (type.getQualifier().isArrayedIo(stage)) { + TType elementType(type, 0); + typeLocationSize = TIntermediate::computeTypeLocationSize(elementType, stage); + } else { + typeLocationSize = TIntermediate::computeTypeLocationSize(type, stage); + } + return typeLocationSize; +} + +//TDefaultGlslIoResolver +TResourceType TDefaultGlslIoResolver::getResourceType(const glslang::TType& type) { + if (isImageType(type)) { + return EResImage; + } + if (isTextureType(type)) { + return EResTexture; + } + if (isSsboType(type)) { + return EResSsbo; + } + if (isSamplerType(type)) { + return EResSampler; + } + if (isUboType(type)) { + return EResUbo; + } + return EResCount; +} + +TDefaultGlslIoResolver::TDefaultGlslIoResolver(const TIntermediate& intermediate) + : TDefaultIoResolverBase(intermediate) + , preStage(EShLangCount) + , currentStage(EShLangCount) +{ } + +int TDefaultGlslIoResolver::resolveInOutLocation(EShLanguage stage, TVarEntryInfo& ent) { + const TType& type = ent.symbol->getType(); + const TString& name = getAccessName(ent.symbol); + if (currentStage != stage) { + preStage = currentStage; + currentStage = stage; + } + // kick out of not doing this + if (! doAutoLocationMapping()) { + return ent.newLocation = -1; + } + // expand the location to each element if the symbol is a struct or array + if (type.getQualifier().hasLocation()) { + return ent.newLocation = type.getQualifier().layoutLocation; + } + // no locations added if already present, or a built-in variable + if (type.isBuiltIn()) { + return ent.newLocation = -1; + } + // no locations on blocks of built-in variables + if (type.isStruct()) { + if (type.getStruct()->size() < 1) { + return ent.newLocation = -1; + } + if ((*type.getStruct())[0].type->isBuiltIn()) { + return ent.newLocation = -1; + } + } + int typeLocationSize = computeTypeLocationSize(type, stage); + int location = type.getQualifier().layoutLocation; + bool hasLocation = false; + EShLanguage keyStage(EShLangCount); + TStorageQualifier storage; + storage = EvqInOut; + if (type.getQualifier().isPipeInput()) { + // If this symbol is a input, search pre stage's out + keyStage = preStage; + } + if (type.getQualifier().isPipeOutput()) { + // If this symbol is a output, search next stage's in + keyStage = currentStage; + } + // The in/out in current stage is not declared with location, but it is possible declared + // with explicit location in other stages, find the storageSlotMap firstly to check whether + // the in/out has location + int resourceKey = buildStorageKey(keyStage, storage); + if (! storageSlotMap[resourceKey].empty()) { + TVarSlotMap::iterator iter = storageSlotMap[resourceKey].find(name); + if (iter != storageSlotMap[resourceKey].end()) { + // If interface resource be found, set it has location and this symbol's new location + // equal the symbol's explicit location declaration in pre or next stage. + // + // vs: out vec4 a; + // fs: layout(..., location = 3,...) in vec4 a; + hasLocation = true; + location = iter->second; + // if we want deal like that: + // vs: layout(location=4) out vec4 a; + // out vec4 b; + // + // fs: in vec4 a; + // layout(location = 4) in vec4 b; + // we need retraverse the map. + } + if (! hasLocation) { + // If interface resource note found, It's mean the location in two stage are both implicit declarat. + // So we should find a new slot for this interface. + // + // vs: out vec4 a; + // fs: in vec4 a; + location = getFreeSlot(resourceKey, 0, typeLocationSize); + storageSlotMap[resourceKey][name] = location; + } + } else { + // the first interface declarated in a program. + TVarSlotMap varSlotMap; + location = getFreeSlot(resourceKey, 0, typeLocationSize); + varSlotMap[name] = location; + storageSlotMap[resourceKey] = varSlotMap; + } + //Update location + return ent.newLocation = location; +} + +int TDefaultGlslIoResolver::resolveUniformLocation(EShLanguage /*stage*/, TVarEntryInfo& ent) { + const TType& type = ent.symbol->getType(); + const TString& name = getAccessName(ent.symbol); + // kick out of not doing this + if (! doAutoLocationMapping()) { + return ent.newLocation = -1; + } + // expand the location to each element if the symbol is a struct or array + if (type.getQualifier().hasLocation() && (type.isStruct() || type.isArray())) { + return ent.newLocation = type.getQualifier().layoutLocation; + } else { + // no locations added if already present, a built-in variable, a block, or an opaque + if (type.getQualifier().hasLocation() || type.isBuiltIn() || type.getBasicType() == EbtBlock || + type.isAtomic() || (type.containsOpaque() && intermediate.getSpv().openGl == 0)) { + return ent.newLocation = -1; + } + // no locations on blocks of built-in variables + if (type.isStruct()) { + if (type.getStruct()->size() < 1) { + return ent.newLocation = -1; + } + if ((*type.getStruct())[0].type->isBuiltIn()) { + return ent.newLocation = -1; + } + } + } + int location = intermediate.getUniformLocationOverride(name.c_str()); + if (location != -1) { + return ent.newLocation = location; + } + + int size = TIntermediate::computeTypeUniformLocationSize(type); + + // The uniform in current stage is not declared with location, but it is possible declared + // with explicit location in other stages, find the storageSlotMap firstly to check whether + // the uniform has location + bool hasLocation = false; + int resourceKey = buildStorageKey(EShLangCount, EvqUniform); + TVarSlotMap& slotMap = storageSlotMap[resourceKey]; + // Check dose shader program has uniform resource + if (! slotMap.empty()) { + // If uniform resource not empty, try find a same name uniform + TVarSlotMap::iterator iter = slotMap.find(name); + if (iter != slotMap.end()) { + // If uniform resource be found, set it has location and this symbol's new location + // equal the uniform's explicit location declaration in other stage. + // + // vs: uniform vec4 a; + // fs: layout(..., location = 3,...) uniform vec4 a; + hasLocation = true; + location = iter->second; + } + if (! hasLocation) { + // No explicit location declaration in other stage. + // So we should find a new slot for this uniform. + // + // vs: uniform vec4 a; + // fs: uniform vec4 a; + location = getFreeSlot(resourceKey, 0, computeTypeLocationSize(type, currentStage)); + storageSlotMap[resourceKey][name] = location; + } + } else { + // the first uniform declaration in a program. + TVarSlotMap varSlotMap; + location = getFreeSlot(resourceKey, 0, size); + varSlotMap[name] = location; + storageSlotMap[resourceKey] = varSlotMap; + } + return ent.newLocation = location; +} + +int TDefaultGlslIoResolver::resolveBinding(EShLanguage /*stage*/, TVarEntryInfo& ent) { + const TType& type = ent.symbol->getType(); + const TString& name = getAccessName(ent.symbol); + // On OpenGL arrays of opaque types take a separate binding for each element + int numBindings = intermediate.getSpv().openGl != 0 && type.isSizedArray() ? type.getCumulativeArraySize() : 1; + TResourceType resource = getResourceType(type); + // don't need to handle uniform symbol, it will be handled in resolveUniformLocation + if (resource == EResUbo && type.getBasicType() != EbtBlock) { + return ent.newBinding = -1; + } + // There is no 'set' qualifier in OpenGL shading language, each resource has its own + // binding name space, so remap the 'set' to resource type which make each resource + // binding is valid from 0 to MAX_XXRESOURCE_BINDINGS + int set = resource; + if (resource < EResCount) { + if (type.getQualifier().hasBinding()) { + ent.newBinding = reserveSlot(set, getBaseBinding(resource, set) + type.getQualifier().layoutBinding, numBindings); + return ent.newBinding; + } else if (ent.live && doAutoBindingMapping()) { + // The resource in current stage is not declared with binding, but it is possible declared + // with explicit binding in other stages, find the resourceSlotMap firstly to check whether + // the resource has binding, don't need to allocate if it already has a binding + bool hasBinding = false; + if (! resourceSlotMap[resource].empty()) { + TVarSlotMap::iterator iter = resourceSlotMap[resource].find(name); + if (iter != resourceSlotMap[resource].end()) { + hasBinding = true; + ent.newBinding = iter->second; + } + } + if (! hasBinding) { + TVarSlotMap varSlotMap; + // find free slot, the caller did make sure it passes all vars with binding + // first and now all are passed that do not have a binding and needs one + int binding = getFreeSlot(resource, getBaseBinding(resource, set), numBindings); + varSlotMap[name] = binding; + resourceSlotMap[resource] = varSlotMap; + ent.newBinding = binding; + } + return ent.newBinding; + } + } + return ent.newBinding = -1; +} + +void TDefaultGlslIoResolver::beginResolve(EShLanguage stage) { + // reset stage state + if (stage == EShLangCount) + preStage = currentStage = stage; + // update stage state + else if (currentStage != stage) { + preStage = currentStage; + currentStage = stage; + } +} + +void TDefaultGlslIoResolver::endResolve(EShLanguage /*stage*/) { + // TODO nothing +} + +void TDefaultGlslIoResolver::beginCollect(EShLanguage stage) { + // reset stage state + if (stage == EShLangCount) + preStage = currentStage = stage; + // update stage state + else if (currentStage != stage) { + preStage = currentStage; + currentStage = stage; + } +} + +void TDefaultGlslIoResolver::endCollect(EShLanguage /*stage*/) { + // TODO nothing +} + +void TDefaultGlslIoResolver::reserverStorageSlot(TVarEntryInfo& ent, TInfoSink& infoSink) { + const TType& type = ent.symbol->getType(); + const TString& name = getAccessName(ent.symbol); + TStorageQualifier storage = type.getQualifier().storage; + EShLanguage stage(EShLangCount); + switch (storage) { + case EvqUniform: + if (type.getBasicType() != EbtBlock && type.getQualifier().hasLocation()) { + // + // Reserve the slots for the uniforms who has explicit location + int storageKey = buildStorageKey(EShLangCount, EvqUniform); + int location = type.getQualifier().layoutLocation; + TVarSlotMap& varSlotMap = storageSlotMap[storageKey]; + TVarSlotMap::iterator iter = varSlotMap.find(name); + if (iter == varSlotMap.end()) { + int numLocations = TIntermediate::computeTypeUniformLocationSize(type); + reserveSlot(storageKey, location, numLocations); + varSlotMap[name] = location; + } else { + // Allocate location by name for OpenGL driver, so the uniform in different + // stages should be declared with the same location + if (iter->second != location) { + TString errorMsg = "Invalid location: " + name; + infoSink.info.message(EPrefixInternalError, errorMsg.c_str()); + hasError = true; + } + } + } + break; + case EvqVaryingIn: + case EvqVaryingOut: + // + // Reserve the slots for the inout who has explicit location + if (type.getQualifier().hasLocation()) { + stage = storage == EvqVaryingIn ? preStage : stage; + stage = storage == EvqVaryingOut ? currentStage : stage; + int storageKey = buildStorageKey(stage, EvqInOut); + int location = type.getQualifier().layoutLocation; + TVarSlotMap& varSlotMap = storageSlotMap[storageKey]; + TVarSlotMap::iterator iter = varSlotMap.find(name); + if (iter == varSlotMap.end()) { + int numLocations = TIntermediate::computeTypeUniformLocationSize(type); + reserveSlot(storageKey, location, numLocations); + varSlotMap[name] = location; + } else { + // Allocate location by name for OpenGL driver, so the uniform in different + // stages should be declared with the same location + if (iter->second != location) { + TString errorMsg = "Invalid location: " + name; + infoSink.info.message(EPrefixInternalError, errorMsg.c_str()); + hasError = true; + } + } + } + break; + default: + break; + } +} + +void TDefaultGlslIoResolver::reserverResourceSlot(TVarEntryInfo& ent, TInfoSink& infoSink) { + const TType& type = ent.symbol->getType(); + const TString& name = getAccessName(ent.symbol); + int resource = getResourceType(type); + if (type.getQualifier().hasBinding()) { + TVarSlotMap& varSlotMap = resourceSlotMap[resource]; + TVarSlotMap::iterator iter = varSlotMap.find(name); + int binding = type.getQualifier().layoutBinding; + if (iter == varSlotMap.end()) { + // Reserve the slots for the ubo, ssbo and opaques who has explicit binding + int numBindings = type.isSizedArray() ? type.getCumulativeArraySize() : 1; + varSlotMap[name] = binding; + reserveSlot(resource, binding, numBindings); + } else { + // Allocate binding by name for OpenGL driver, so the resource in different + // stages should be declared with the same binding + if (iter->second != binding) { + TString errorMsg = "Invalid binding: " + name; + infoSink.info.message(EPrefixInternalError, errorMsg.c_str()); + hasError = true; + } + } + } +} + +const TString& TDefaultGlslIoResolver::getAccessName(const TIntermSymbol* symbol) +{ + return symbol->getBasicType() == EbtBlock ? + symbol->getType().getTypeName() : + symbol->getName(); +} + +//TDefaultGlslIoResolver end + +/* + * Basic implementation of glslang::TIoMapResolver that replaces the + * previous offset behavior. + * It does the same, uses the offsets for the corresponding uniform + * types. Also respects the EOptionAutoMapBindings flag and binds + * them if needed. + */ +/* + * Default resolver + */ +struct TDefaultIoResolver : public TDefaultIoResolverBase { + TDefaultIoResolver(const TIntermediate& intermediate) : TDefaultIoResolverBase(intermediate) { } + + bool validateBinding(EShLanguage /*stage*/, TVarEntryInfo& /*ent*/) override { return true; } + + TResourceType getResourceType(const glslang::TType& type) override { + if (isImageType(type)) { + return EResImage; + } + if (isTextureType(type)) { + return EResTexture; + } + if (isSsboType(type)) { + return EResSsbo; + } + if (isSamplerType(type)) { + return EResSampler; + } + if (isUboType(type)) { + return EResUbo; + } + return EResCount; + } + + int resolveBinding(EShLanguage /*stage*/, TVarEntryInfo& ent) override { + const TType& type = ent.symbol->getType(); + const int set = getLayoutSet(type); + // On OpenGL arrays of opaque types take a seperate binding for each element + int numBindings = intermediate.getSpv().openGl != 0 && type.isSizedArray() ? type.getCumulativeArraySize() : 1; + TResourceType resource = getResourceType(type); + if (resource < EResCount) { + if (type.getQualifier().hasBinding()) { + return ent.newBinding = reserveSlot( + set, getBaseBinding(resource, set) + type.getQualifier().layoutBinding, numBindings); + } else if (ent.live && doAutoBindingMapping()) { + // find free slot, the caller did make sure it passes all vars with binding + // first and now all are passed that do not have a binding and needs one + return ent.newBinding = getFreeSlot(set, getBaseBinding(resource, set), numBindings); + } + } + return ent.newBinding = -1; + } +}; + +#ifdef ENABLE_HLSL +/******************************************************************************** +The following IO resolver maps types in HLSL register space, as follows: + +t - for shader resource views (SRV) + TEXTURE1D + TEXTURE1DARRAY + TEXTURE2D + TEXTURE2DARRAY + TEXTURE3D + TEXTURECUBE + TEXTURECUBEARRAY + TEXTURE2DMS + TEXTURE2DMSARRAY + STRUCTUREDBUFFER + BYTEADDRESSBUFFER + BUFFER + TBUFFER + +s - for samplers + SAMPLER + SAMPLER1D + SAMPLER2D + SAMPLER3D + SAMPLERCUBE + SAMPLERSTATE + SAMPLERCOMPARISONSTATE + +u - for unordered access views (UAV) + RWBYTEADDRESSBUFFER + RWSTRUCTUREDBUFFER + APPENDSTRUCTUREDBUFFER + CONSUMESTRUCTUREDBUFFER + RWBUFFER + RWTEXTURE1D + RWTEXTURE1DARRAY + RWTEXTURE2D + RWTEXTURE2DARRAY + RWTEXTURE3D + +b - for constant buffer views (CBV) + CBUFFER + CONSTANTBUFFER + ********************************************************************************/ +struct TDefaultHlslIoResolver : public TDefaultIoResolverBase { + TDefaultHlslIoResolver(const TIntermediate& intermediate) : TDefaultIoResolverBase(intermediate) { } + + bool validateBinding(EShLanguage /*stage*/, TVarEntryInfo& /*ent*/) override { return true; } + + TResourceType getResourceType(const glslang::TType& type) override { + if (isUavType(type)) { + return EResUav; + } + if (isSrvType(type)) { + return EResTexture; + } + if (isSamplerType(type)) { + return EResSampler; + } + if (isUboType(type)) { + return EResUbo; + } + return EResCount; + } + + int resolveBinding(EShLanguage /*stage*/, TVarEntryInfo& ent) override { + const TType& type = ent.symbol->getType(); + const int set = getLayoutSet(type); + TResourceType resource = getResourceType(type); + if (resource < EResCount) { + if (type.getQualifier().hasBinding()) { + return ent.newBinding = reserveSlot(set, getBaseBinding(resource, set) + type.getQualifier().layoutBinding); + } else if (ent.live && doAutoBindingMapping()) { + // find free slot, the caller did make sure it passes all vars with binding + // first and now all are passed that do not have a binding and needs one + return ent.newBinding = getFreeSlot(set, getBaseBinding(resource, set)); + } + } + return ent.newBinding = -1; + } +}; +#endif + +// Map I/O variables to provided offsets, and make bindings for +// unbound but live variables. +// +// Returns false if the input is too malformed to do this. +bool TIoMapper::addStage(EShLanguage stage, TIntermediate& intermediate, TInfoSink& infoSink, TIoMapResolver* resolver) { + bool somethingToDo = ! intermediate.getResourceSetBinding().empty() || intermediate.getAutoMapBindings() || + intermediate.getAutoMapLocations(); + // Restrict the stricter condition to further check 'somethingToDo' only if 'somethingToDo' has not been set, reduce + // unnecessary or insignificant for-loop operation after 'somethingToDo' have been true. + for (int res = 0; (res < EResCount && !somethingToDo); ++res) { + somethingToDo = somethingToDo || (intermediate.getShiftBinding(TResourceType(res)) != 0) || + intermediate.hasShiftBindingForSet(TResourceType(res)); + } + if (! somethingToDo && resolver == nullptr) + return true; + if (intermediate.getNumEntryPoints() != 1 || intermediate.isRecursive()) + return false; + TIntermNode* root = intermediate.getTreeRoot(); + if (root == nullptr) + return false; + // if no resolver is provided, use the default resolver with the given shifts and auto map settings + TDefaultIoResolver defaultResolver(intermediate); +#ifdef ENABLE_HLSL + TDefaultHlslIoResolver defaultHlslResolver(intermediate); + if (resolver == nullptr) { + // TODO: use a passed in IO mapper for this + if (intermediate.usingHlslIoMapping()) + resolver = &defaultHlslResolver; + else + resolver = &defaultResolver; + } + resolver->addStage(stage); +#else + resolver = &defaultResolver; +#endif + + TVarLiveMap inVarMap, outVarMap, uniformVarMap; + TVarLiveVector inVector, outVector, uniformVector; + TVarGatherTraverser iter_binding_all(intermediate, true, inVarMap, outVarMap, uniformVarMap); + TVarGatherTraverser iter_binding_live(intermediate, false, inVarMap, outVarMap, uniformVarMap); + root->traverse(&iter_binding_all); + iter_binding_live.pushFunction(intermediate.getEntryPointMangledName().c_str()); + while (! iter_binding_live.functions.empty()) { + TIntermNode* function = iter_binding_live.functions.back(); + iter_binding_live.functions.pop_back(); + function->traverse(&iter_binding_live); + } + // sort entries by priority. see TVarEntryInfo::TOrderByPriority for info. + std::for_each(inVarMap.begin(), inVarMap.end(), + [&inVector](TVarLivePair p) { inVector.push_back(p); }); + std::sort(inVector.begin(), inVector.end(), [](const TVarLivePair& p1, const TVarLivePair& p2) -> bool { + return TVarEntryInfo::TOrderByPriority()(p1.second, p2.second); + }); + std::for_each(outVarMap.begin(), outVarMap.end(), + [&outVector](TVarLivePair p) { outVector.push_back(p); }); + std::sort(outVector.begin(), outVector.end(), [](const TVarLivePair& p1, const TVarLivePair& p2) -> bool { + return TVarEntryInfo::TOrderByPriority()(p1.second, p2.second); + }); + std::for_each(uniformVarMap.begin(), uniformVarMap.end(), + [&uniformVector](TVarLivePair p) { uniformVector.push_back(p); }); + std::sort(uniformVector.begin(), uniformVector.end(), [](const TVarLivePair& p1, const TVarLivePair& p2) -> bool { + return TVarEntryInfo::TOrderByPriority()(p1.second, p2.second); + }); + bool hadError = false; + TNotifyInOutAdaptor inOutNotify(stage, *resolver); + TNotifyUniformAdaptor uniformNotify(stage, *resolver); + TResolverUniformAdaptor uniformResolve(stage, *resolver, infoSink, hadError); + TResolverInOutAdaptor inOutResolve(stage, *resolver, infoSink, hadError); + resolver->beginNotifications(stage); + std::for_each(inVector.begin(), inVector.end(), inOutNotify); + std::for_each(outVector.begin(), outVector.end(), inOutNotify); + std::for_each(uniformVector.begin(), uniformVector.end(), uniformNotify); + resolver->endNotifications(stage); + resolver->beginResolve(stage); + std::for_each(inVector.begin(), inVector.end(), inOutResolve); + std::for_each(inVector.begin(), inVector.end(), [&inVarMap](TVarLivePair p) { + auto at = inVarMap.find(p.second.symbol->getName()); + if (at != inVarMap.end()) + at->second = p.second; + }); + std::for_each(outVector.begin(), outVector.end(), inOutResolve); + std::for_each(outVector.begin(), outVector.end(), [&outVarMap](TVarLivePair p) { + auto at = outVarMap.find(p.second.symbol->getName()); + if (at != outVarMap.end()) + at->second = p.second; + }); + std::for_each(uniformVector.begin(), uniformVector.end(), uniformResolve); + std::for_each(uniformVector.begin(), uniformVector.end(), [&uniformVarMap](TVarLivePair p) { + auto at = uniformVarMap.find(p.second.symbol->getName()); + if (at != uniformVarMap.end()) + at->second = p.second; + }); + resolver->endResolve(stage); + if (!hadError) { + TVarSetTraverser iter_iomap(intermediate, inVarMap, outVarMap, uniformVarMap); + root->traverse(&iter_iomap); + } + return !hadError; +} + +// Map I/O variables to provided offsets, and make bindings for +// unbound but live variables. +// +// Returns false if the input is too malformed to do this. +bool TGlslIoMapper::addStage(EShLanguage stage, TIntermediate& intermediate, TInfoSink& infoSink, TIoMapResolver* resolver) { + + bool somethingToDo = ! intermediate.getResourceSetBinding().empty() || intermediate.getAutoMapBindings() || + intermediate.getAutoMapLocations(); + // Restrict the stricter condition to further check 'somethingToDo' only if 'somethingToDo' has not been set, reduce + // unnecessary or insignificant for-loop operation after 'somethingToDo' have been true. + for (int res = 0; (res < EResCount && !somethingToDo); ++res) { + somethingToDo = somethingToDo || (intermediate.getShiftBinding(TResourceType(res)) != 0) || + intermediate.hasShiftBindingForSet(TResourceType(res)); + } + if (! somethingToDo && resolver == nullptr) { + return true; + } + if (intermediate.getNumEntryPoints() != 1 || intermediate.isRecursive()) { + return false; + } + TIntermNode* root = intermediate.getTreeRoot(); + if (root == nullptr) { + return false; + } + // if no resolver is provided, use the default resolver with the given shifts and auto map settings + TDefaultGlslIoResolver defaultResolver(intermediate); + if (resolver == nullptr) { + resolver = &defaultResolver; + } + resolver->addStage(stage); + inVarMaps[stage] = new TVarLiveMap(); outVarMaps[stage] = new TVarLiveMap(); uniformVarMap[stage] = new TVarLiveMap(); + TVarGatherTraverser iter_binding_all(intermediate, true, *inVarMaps[stage], *outVarMaps[stage], + *uniformVarMap[stage]); + TVarGatherTraverser iter_binding_live(intermediate, false, *inVarMaps[stage], *outVarMaps[stage], + *uniformVarMap[stage]); + root->traverse(&iter_binding_all); + iter_binding_live.pushFunction(intermediate.getEntryPointMangledName().c_str()); + while (! iter_binding_live.functions.empty()) { + TIntermNode* function = iter_binding_live.functions.back(); + iter_binding_live.functions.pop_back(); + function->traverse(&iter_binding_live); + } + TNotifyInOutAdaptor inOutNotify(stage, *resolver); + TNotifyUniformAdaptor uniformNotify(stage, *resolver); + // Resolve current stage input symbol location with previous stage output here, + // uniform symbol, ubo, ssbo and opaque symbols are per-program resource, + // will resolve uniform symbol location and ubo/ssbo/opaque binding in doMap() + resolver->beginNotifications(stage); + std::for_each(inVarMaps[stage]->begin(), inVarMaps[stage]->end(), inOutNotify); + std::for_each(outVarMaps[stage]->begin(), outVarMaps[stage]->end(), inOutNotify); + std::for_each(uniformVarMap[stage]->begin(), uniformVarMap[stage]->end(), uniformNotify); + resolver->endNotifications(stage); + TSlotCollector slotCollector(*resolver, infoSink); + resolver->beginCollect(stage); + std::for_each(inVarMaps[stage]->begin(), inVarMaps[stage]->end(), slotCollector); + std::for_each(outVarMaps[stage]->begin(), outVarMaps[stage]->end(), slotCollector); + std::for_each(uniformVarMap[stage]->begin(), uniformVarMap[stage]->end(), slotCollector); + resolver->endCollect(stage); + intermediates[stage] = &intermediate; + return !hadError; +} + +bool TGlslIoMapper::doMap(TIoMapResolver* resolver, TInfoSink& infoSink) { + resolver->endResolve(EShLangCount); + if (!hadError) { + //Resolve uniform location, ubo/ssbo/opaque bindings across stages + TResolverUniformAdaptor uniformResolve(EShLangCount, *resolver, infoSink, hadError); + TResolverInOutAdaptor inOutResolve(EShLangCount, *resolver, infoSink, hadError); + TSymbolValidater symbolValidater(*resolver, infoSink, inVarMaps, outVarMaps, uniformVarMap, hadError); + TVarLiveVector uniformVector; + resolver->beginResolve(EShLangCount); + for (int stage = EShLangVertex; stage < EShLangCount; stage++) { + if (inVarMaps[stage] != nullptr) { + inOutResolve.setStage(EShLanguage(stage)); + std::for_each(inVarMaps[stage]->begin(), inVarMaps[stage]->end(), symbolValidater); + std::for_each(inVarMaps[stage]->begin(), inVarMaps[stage]->end(), inOutResolve); + std::for_each(outVarMaps[stage]->begin(), outVarMaps[stage]->end(), symbolValidater); + std::for_each(outVarMaps[stage]->begin(), outVarMaps[stage]->end(), inOutResolve); + } + if (uniformVarMap[stage] != nullptr) { + uniformResolve.setStage(EShLanguage(stage)); + // sort entries by priority. see TVarEntryInfo::TOrderByPriority for info. + std::for_each(uniformVarMap[stage]->begin(), uniformVarMap[stage]->end(), + [&uniformVector](TVarLivePair p) { uniformVector.push_back(p); }); + } + } + std::sort(uniformVector.begin(), uniformVector.end(), [](const TVarLivePair& p1, const TVarLivePair& p2) -> bool { + return TVarEntryInfo::TOrderByPriority()(p1.second, p2.second); + }); + std::for_each(uniformVector.begin(), uniformVector.end(), symbolValidater); + std::for_each(uniformVector.begin(), uniformVector.end(), uniformResolve); + std::sort(uniformVector.begin(), uniformVector.end(), [](const TVarLivePair& p1, const TVarLivePair& p2) -> bool { + return TVarEntryInfo::TOrderByPriority()(p1.second, p2.second); + }); + resolver->endResolve(EShLangCount); + for (size_t stage = 0; stage < EShLangCount; stage++) { + if (intermediates[stage] != nullptr) { + // traverse each stage, set new location to each input/output and unifom symbol, set new binding to + // ubo, ssbo and opaque symbols + TVarLiveMap** pUniformVarMap = uniformVarMap; + std::for_each(uniformVector.begin(), uniformVector.end(), [pUniformVarMap, stage](TVarLivePair p) { + auto at = pUniformVarMap[stage]->find(p.second.symbol->getName()); + if (at != pUniformVarMap[stage]->end()) + at->second = p.second; + }); + TVarSetTraverser iter_iomap(*intermediates[stage], *inVarMaps[stage], *outVarMaps[stage], + *uniformVarMap[stage]); + intermediates[stage]->getTreeRoot()->traverse(&iter_iomap); + } + } + return !hadError; + } else { + return false; + } +} + +} // end namespace glslang + +#endif // GLSLANG_WEB diff --git a/dep/glslang/glslang/MachineIndependent/iomapper.h b/dep/glslang/glslang/MachineIndependent/iomapper.h new file mode 100644 index 000000000..7ca18b8a5 --- /dev/null +++ b/dep/glslang/glslang/MachineIndependent/iomapper.h @@ -0,0 +1,302 @@ +// +// Copyright (C) 2016 LunarG, Inc. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// +// Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// +// Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// Neither the name of 3Dlabs Inc. Ltd. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// + +#ifndef GLSLANG_WEB + +#ifndef _IOMAPPER_INCLUDED +#define _IOMAPPER_INCLUDED + +#include +#include "LiveTraverser.h" +#include +#include +// +// A reflection database and its interface, consistent with the OpenGL API reflection queries. +// + +class TInfoSink; + +namespace glslang { + +class TIntermediate; +struct TVarEntryInfo { + int id; + TIntermSymbol* symbol; + bool live; + int newBinding; + int newSet; + int newLocation; + int newComponent; + int newIndex; + EShLanguage stage; + struct TOrderById { + inline bool operator()(const TVarEntryInfo& l, const TVarEntryInfo& r) { return l.id < r.id; } + }; + + struct TOrderByPriority { + // ordering: + // 1) has both binding and set + // 2) has binding but no set + // 3) has no binding but set + // 4) has no binding and no set + inline bool operator()(const TVarEntryInfo& l, const TVarEntryInfo& r) { + const TQualifier& lq = l.symbol->getQualifier(); + const TQualifier& rq = r.symbol->getQualifier(); + + // simple rules: + // has binding gives 2 points + // has set gives 1 point + // who has the most points is more important. + int lPoints = (lq.hasBinding() ? 2 : 0) + (lq.hasSet() ? 1 : 0); + int rPoints = (rq.hasBinding() ? 2 : 0) + (rq.hasSet() ? 1 : 0); + + if (lPoints == rPoints) + return l.id < r.id; + return lPoints > rPoints; + } + }; +}; + +// Base class for shared TIoMapResolver services, used by several derivations. +struct TDefaultIoResolverBase : public glslang::TIoMapResolver { +public: + TDefaultIoResolverBase(const TIntermediate& intermediate); + typedef std::vector TSlotSet; + typedef std::unordered_map TSlotSetMap; + + // grow the reflection stage by stage + void notifyBinding(EShLanguage, TVarEntryInfo& /*ent*/) override {} + void notifyInOut(EShLanguage, TVarEntryInfo& /*ent*/) override {} + void beginNotifications(EShLanguage) override {} + void endNotifications(EShLanguage) override {} + void beginResolve(EShLanguage) override {} + void endResolve(EShLanguage) override {} + void beginCollect(EShLanguage) override {} + void endCollect(EShLanguage) override {} + void reserverResourceSlot(TVarEntryInfo& /*ent*/, TInfoSink& /*infoSink*/) override {} + void reserverStorageSlot(TVarEntryInfo& /*ent*/, TInfoSink& /*infoSink*/) override {} + int getBaseBinding(TResourceType res, unsigned int set) const; + const std::vector& getResourceSetBinding() const; + virtual TResourceType getResourceType(const glslang::TType& type) = 0; + bool doAutoBindingMapping() const; + bool doAutoLocationMapping() const; + TSlotSet::iterator findSlot(int set, int slot); + bool checkEmpty(int set, int slot); + bool validateInOut(EShLanguage /*stage*/, TVarEntryInfo& /*ent*/) override { return true; } + int reserveSlot(int set, int slot, int size = 1); + int getFreeSlot(int set, int base, int size = 1); + int resolveSet(EShLanguage /*stage*/, TVarEntryInfo& ent) override; + int resolveUniformLocation(EShLanguage /*stage*/, TVarEntryInfo& ent) override; + int resolveInOutLocation(EShLanguage stage, TVarEntryInfo& ent) override; + int resolveInOutComponent(EShLanguage /*stage*/, TVarEntryInfo& ent) override; + int resolveInOutIndex(EShLanguage /*stage*/, TVarEntryInfo& ent) override; + void addStage(EShLanguage stage) override { + if (stage < EShLangCount) + stageMask[stage] = true; + } + uint32_t computeTypeLocationSize(const TType& type, EShLanguage stage); + + TSlotSetMap slots; + bool hasError = false; + +protected: + TDefaultIoResolverBase(TDefaultIoResolverBase&); + TDefaultIoResolverBase& operator=(TDefaultIoResolverBase&); + const TIntermediate& intermediate; + int nextUniformLocation; + int nextInputLocation; + int nextOutputLocation; + bool stageMask[EShLangCount + 1]; + // Return descriptor set specific base if there is one, and the generic base otherwise. + int selectBaseBinding(int base, int descriptorSetBase) const { + return descriptorSetBase != -1 ? descriptorSetBase : base; + } + + static int getLayoutSet(const glslang::TType& type) { + if (type.getQualifier().hasSet()) + return type.getQualifier().layoutSet; + else + return 0; + } + + static bool isSamplerType(const glslang::TType& type) { + return type.getBasicType() == glslang::EbtSampler && type.getSampler().isPureSampler(); + } + + static bool isTextureType(const glslang::TType& type) { + return (type.getBasicType() == glslang::EbtSampler && + (type.getSampler().isTexture() || type.getSampler().isSubpass())); + } + + static bool isUboType(const glslang::TType& type) { + return type.getQualifier().storage == EvqUniform; + } + + static bool isImageType(const glslang::TType& type) { + return type.getBasicType() == glslang::EbtSampler && type.getSampler().isImage(); + } + + static bool isSsboType(const glslang::TType& type) { + return type.getQualifier().storage == EvqBuffer; + } + + // Return true if this is a SRV (shader resource view) type: + static bool isSrvType(const glslang::TType& type) { + return isTextureType(type) || type.getQualifier().storage == EvqBuffer; + } + + // Return true if this is a UAV (unordered access view) type: + static bool isUavType(const glslang::TType& type) { + if (type.getQualifier().isReadOnly()) + return false; + return (type.getBasicType() == glslang::EbtSampler && type.getSampler().isImage()) || + (type.getQualifier().storage == EvqBuffer); + } +}; + +// Defaulf I/O resolver for OpenGL +struct TDefaultGlslIoResolver : public TDefaultIoResolverBase { +public: + typedef std::map TVarSlotMap; // + typedef std::map TSlotMap; // + TDefaultGlslIoResolver(const TIntermediate& intermediate); + bool validateBinding(EShLanguage /*stage*/, TVarEntryInfo& /*ent*/) override { return true; } + TResourceType getResourceType(const glslang::TType& type) override; + int resolveInOutLocation(EShLanguage stage, TVarEntryInfo& ent) override; + int resolveUniformLocation(EShLanguage /*stage*/, TVarEntryInfo& ent) override; + int resolveBinding(EShLanguage /*stage*/, TVarEntryInfo& ent) override; + void beginResolve(EShLanguage /*stage*/) override; + void endResolve(EShLanguage stage) override; + void beginCollect(EShLanguage) override; + void endCollect(EShLanguage) override; + void reserverStorageSlot(TVarEntryInfo& ent, TInfoSink& infoSink) override; + void reserverResourceSlot(TVarEntryInfo& ent, TInfoSink& infoSink) override; + const TString& getAccessName(const TIntermSymbol*); + // in/out symbol and uniform symbol are stored in the same resourceSlotMap, the storage key is used to identify each type of symbol. + // We use stage and storage qualifier to construct a storage key. it can help us identify the same storage resource used in different stage. + // if a resource is a program resource and we don't need know it usage stage, we can use same stage to build storage key. + // Note: both stage and type must less then 0xffff. + int buildStorageKey(EShLanguage stage, TStorageQualifier type) { + assert(static_cast(stage) <= 0x0000ffff && static_cast(type) <= 0x0000ffff); + return (stage << 16) | type; + } + +protected: + // Use for mark pre stage, to get more interface symbol information. + EShLanguage preStage; + // Use for mark current shader stage for resolver + EShLanguage currentStage; + // Slot map for storage resource(location of uniform and interface symbol) It's a program share slot + TSlotMap resourceSlotMap; + // Slot map for other resource(image, ubo, ssbo), It's a program share slot. + TSlotMap storageSlotMap; +}; + +typedef std::map TVarLiveMap; + +// override function "operator=", if a vector being sort, +// when use vc++, the sort function will call : +// pair& operator=(const pair<_Other1, _Other2>& _Right) +// { +// first = _Right.first; +// second = _Right.second; +// return (*this); +// } +// that will make a const type handing on left. +// override this function can avoid a compiler error. +// In the future, if the vc++ compiler can handle such a situation, +// this part of the code will be removed. +struct TVarLivePair : std::pair { + TVarLivePair(const std::pair& _Right) : pair(_Right.first, _Right.second) {} + TVarLivePair& operator=(const TVarLivePair& _Right) { + const_cast(first) = _Right.first; + second = _Right.second; + return (*this); + } + TVarLivePair(const TVarLivePair& src) : pair(src) { } +}; +typedef std::vector TVarLiveVector; + +// I/O mapper +class TIoMapper { +public: + TIoMapper() {} + virtual ~TIoMapper() {} + // grow the reflection stage by stage + bool virtual addStage(EShLanguage, TIntermediate&, TInfoSink&, TIoMapResolver*); + bool virtual doMap(TIoMapResolver*, TInfoSink&) { return true; } +}; + +// I/O mapper for OpenGL +class TGlslIoMapper : public TIoMapper { +public: + TGlslIoMapper() { + memset(inVarMaps, 0, sizeof(TVarLiveMap*) * EShLangCount); + memset(outVarMaps, 0, sizeof(TVarLiveMap*) * EShLangCount); + memset(uniformVarMap, 0, sizeof(TVarLiveMap*) * EShLangCount); + memset(intermediates, 0, sizeof(TIntermediate*) * EShLangCount); + } + virtual ~TGlslIoMapper() { + for (size_t stage = 0; stage < EShLangCount; stage++) { + if (inVarMaps[stage] != nullptr) { + delete inVarMaps[stage]; + inVarMaps[stage] = nullptr; + } + if (outVarMaps[stage] != nullptr) { + delete outVarMaps[stage]; + outVarMaps[stage] = nullptr; + } + if (uniformVarMap[stage] != nullptr) { + delete uniformVarMap[stage]; + uniformVarMap[stage] = nullptr; + } + if (intermediates[stage] != nullptr) + intermediates[stage] = nullptr; + } + } + // grow the reflection stage by stage + bool addStage(EShLanguage, TIntermediate&, TInfoSink&, TIoMapResolver*) override; + bool doMap(TIoMapResolver*, TInfoSink&) override; + TVarLiveMap *inVarMaps[EShLangCount], *outVarMaps[EShLangCount], + *uniformVarMap[EShLangCount]; + TIntermediate* intermediates[EShLangCount]; + bool hadError = false; +}; + +} // end namespace glslang + +#endif // _IOMAPPER_INCLUDED + +#endif // GLSLANG_WEB diff --git a/dep/glslang/glslang/MachineIndependent/limits.cpp b/dep/glslang/glslang/MachineIndependent/limits.cpp new file mode 100644 index 000000000..51d930034 --- /dev/null +++ b/dep/glslang/glslang/MachineIndependent/limits.cpp @@ -0,0 +1,200 @@ +// +// Copyright (C) 2013 LunarG, Inc. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// +// Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// +// Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// Neither the name of 3Dlabs Inc. Ltd. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// + +// +// Do sub tree walks for +// 1) inductive loop bodies to see if the inductive variable is modified +// 2) array-index expressions to see if they are "constant-index-expression" +// +// These are per Appendix A of ES 2.0: +// +// "Within the body of the loop, the loop index is not statically assigned to nor is it used as the +// argument to a function out or inout parameter." +// +// "The following are constant-index-expressions: +// - Constant expressions +// - Loop indices as defined in section 4 +// - Expressions composed of both of the above" +// +// N.B.: assuming the last rule excludes function calls +// + +#include "ParseHelper.h" + +namespace glslang { + +// +// The inductive loop-body traverser. +// +// Just look at things that might modify the loop index. +// + +class TInductiveTraverser : public TIntermTraverser { +public: + TInductiveTraverser(int id, TSymbolTable& st) + : loopId(id), symbolTable(st), bad(false) { } + + virtual bool visitBinary(TVisit, TIntermBinary* node); + virtual bool visitUnary(TVisit, TIntermUnary* node); + virtual bool visitAggregate(TVisit, TIntermAggregate* node); + + int loopId; // unique ID of the symbol that's the loop inductive variable + TSymbolTable& symbolTable; + bool bad; + TSourceLoc badLoc; + +protected: + TInductiveTraverser(TInductiveTraverser&); + TInductiveTraverser& operator=(TInductiveTraverser&); +}; + +// check binary operations for those modifying the loop index +bool TInductiveTraverser::visitBinary(TVisit /* visit */, TIntermBinary* node) +{ + if (node->modifiesState() && node->getLeft()->getAsSymbolNode() && + node->getLeft()->getAsSymbolNode()->getId() == loopId) { + bad = true; + badLoc = node->getLoc(); + } + + return true; +} + +// check unary operations for those modifying the loop index +bool TInductiveTraverser::visitUnary(TVisit /* visit */, TIntermUnary* node) +{ + if (node->modifiesState() && node->getOperand()->getAsSymbolNode() && + node->getOperand()->getAsSymbolNode()->getId() == loopId) { + bad = true; + badLoc = node->getLoc(); + } + + return true; +} + +// check function calls for arguments modifying the loop index +bool TInductiveTraverser::visitAggregate(TVisit /* visit */, TIntermAggregate* node) +{ + if (node->getOp() == EOpFunctionCall) { + // see if an out or inout argument is the loop index + const TIntermSequence& args = node->getSequence(); + for (int i = 0; i < (int)args.size(); ++i) { + if (args[i]->getAsSymbolNode() && args[i]->getAsSymbolNode()->getId() == loopId) { + TSymbol* function = symbolTable.find(node->getName()); + const TType* type = (*function->getAsFunction())[i].type; + if (type->getQualifier().storage == EvqOut || + type->getQualifier().storage == EvqInOut) { + bad = true; + badLoc = node->getLoc(); + } + } + } + } + + return true; +} + +// +// External function to call for loop check. +// +void TParseContext::inductiveLoopBodyCheck(TIntermNode* body, int loopId, TSymbolTable& symbolTable) +{ + TInductiveTraverser it(loopId, symbolTable); + + if (body == nullptr) + return; + + body->traverse(&it); + + if (it.bad) + error(it.badLoc, "inductive loop index modified", "limitations", ""); +} + +// +// The "constant-index-expression" tranverser. +// +// Just look at things that can form an index. +// + +class TIndexTraverser : public TIntermTraverser { +public: + TIndexTraverser(const TIdSetType& ids) : inductiveLoopIds(ids), bad(false) { } + virtual void visitSymbol(TIntermSymbol* symbol); + virtual bool visitAggregate(TVisit, TIntermAggregate* node); + const TIdSetType& inductiveLoopIds; + bool bad; + TSourceLoc badLoc; + +protected: + TIndexTraverser(TIndexTraverser&); + TIndexTraverser& operator=(TIndexTraverser&); +}; + +// make sure symbols are inductive-loop indexes +void TIndexTraverser::visitSymbol(TIntermSymbol* symbol) +{ + if (inductiveLoopIds.find(symbol->getId()) == inductiveLoopIds.end()) { + bad = true; + badLoc = symbol->getLoc(); + } +} + +// check for function calls, assuming they are bad; spec. doesn't really say +bool TIndexTraverser::visitAggregate(TVisit /* visit */, TIntermAggregate* node) +{ + if (node->getOp() == EOpFunctionCall) { + bad = true; + badLoc = node->getLoc(); + } + + return true; +} + +// +// External function to call for loop check. +// +void TParseContext::constantIndexExpressionCheck(TIntermNode* index) +{ +#ifndef GLSLANG_WEB + TIndexTraverser it(inductiveLoopIds); + + index->traverse(&it); + + if (it.bad) + error(it.badLoc, "Non-constant-index-expression", "limitations", ""); +#endif +} + +} // end namespace glslang diff --git a/dep/glslang/glslang/MachineIndependent/linkValidate.cpp b/dep/glslang/glslang/MachineIndependent/linkValidate.cpp new file mode 100644 index 000000000..045d45e3c --- /dev/null +++ b/dep/glslang/glslang/MachineIndependent/linkValidate.cpp @@ -0,0 +1,1779 @@ +// +// Copyright (C) 2013 LunarG, Inc. +// Copyright (C) 2017 ARM Limited. +// Copyright (C) 2015-2018 Google, Inc. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// +// Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// +// Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// Neither the name of 3Dlabs Inc. Ltd. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// + +// +// Do link-time merging and validation of intermediate representations. +// +// Basic model is that during compilation, each compilation unit (shader) is +// compiled into one TIntermediate instance. Then, at link time, multiple +// units for the same stage can be merged together, which can generate errors. +// Then, after all merging, a single instance of TIntermediate represents +// the whole stage. A final error check can be done on the resulting stage, +// even if no merging was done (i.e., the stage was only one compilation unit). +// + +#include "localintermediate.h" +#include "../Include/InfoSink.h" + +namespace glslang { + +// +// Link-time error emitter. +// +void TIntermediate::error(TInfoSink& infoSink, const char* message) +{ +#ifndef GLSLANG_WEB + infoSink.info.prefix(EPrefixError); + infoSink.info << "Linking " << StageName(language) << " stage: " << message << "\n"; +#endif + + ++numErrors; +} + +// Link-time warning. +void TIntermediate::warn(TInfoSink& infoSink, const char* message) +{ +#ifndef GLSLANG_WEB + infoSink.info.prefix(EPrefixWarning); + infoSink.info << "Linking " << StageName(language) << " stage: " << message << "\n"; +#endif +} + +// TODO: 4.4 offset/align: "Two blocks linked together in the same program with the same block +// name must have the exact same set of members qualified with offset and their integral-constant +// expression values must be the same, or a link-time error results." + +// +// Merge the information from 'unit' into 'this' +// +void TIntermediate::merge(TInfoSink& infoSink, TIntermediate& unit) +{ +#ifndef GLSLANG_WEB + mergeCallGraphs(infoSink, unit); + mergeModes(infoSink, unit); + mergeTrees(infoSink, unit); +#endif +} + +void TIntermediate::mergeCallGraphs(TInfoSink& infoSink, TIntermediate& unit) +{ + if (unit.getNumEntryPoints() > 0) { + if (getNumEntryPoints() > 0) + error(infoSink, "can't handle multiple entry points per stage"); + else { + entryPointName = unit.getEntryPointName(); + entryPointMangledName = unit.getEntryPointMangledName(); + } + } + numEntryPoints += unit.getNumEntryPoints(); + + callGraph.insert(callGraph.end(), unit.callGraph.begin(), unit.callGraph.end()); +} + +#ifndef GLSLANG_WEB + +#define MERGE_MAX(member) member = std::max(member, unit.member) +#define MERGE_TRUE(member) if (unit.member) member = unit.member; + +void TIntermediate::mergeModes(TInfoSink& infoSink, TIntermediate& unit) +{ + if (language != unit.language) + error(infoSink, "stages must match when linking into a single stage"); + + if (getSource() == EShSourceNone) + setSource(unit.getSource()); + if (getSource() != unit.getSource()) + error(infoSink, "can't link compilation units from different source languages"); + + if (treeRoot == nullptr) { + profile = unit.profile; + version = unit.version; + requestedExtensions = unit.requestedExtensions; + } else { + if ((isEsProfile()) != (unit.isEsProfile())) + error(infoSink, "Cannot cross link ES and desktop profiles"); + else if (unit.profile == ECompatibilityProfile) + profile = ECompatibilityProfile; + version = std::max(version, unit.version); + requestedExtensions.insert(unit.requestedExtensions.begin(), unit.requestedExtensions.end()); + } + + MERGE_MAX(spvVersion.spv); + MERGE_MAX(spvVersion.vulkanGlsl); + MERGE_MAX(spvVersion.vulkan); + MERGE_MAX(spvVersion.openGl); + + numErrors += unit.getNumErrors(); + // Only one push_constant is allowed, mergeLinkerObjects() will ensure the push_constant + // is the same for all units. + if (numPushConstants > 1 || unit.numPushConstants > 1) + error(infoSink, "Only one push_constant block is allowed per stage"); + numPushConstants = std::min(numPushConstants + unit.numPushConstants, 1); + + if (unit.invocations != TQualifier::layoutNotSet) { + if (invocations == TQualifier::layoutNotSet) + invocations = unit.invocations; + else if (invocations != unit.invocations) + error(infoSink, "number of invocations must match between compilation units"); + } + + if (vertices == TQualifier::layoutNotSet) + vertices = unit.vertices; + else if (unit.vertices != TQualifier::layoutNotSet && vertices != unit.vertices) { + if (language == EShLangGeometry || language == EShLangMeshNV) + error(infoSink, "Contradictory layout max_vertices values"); + else if (language == EShLangTessControl) + error(infoSink, "Contradictory layout vertices values"); + else + assert(0); + } + if (primitives == TQualifier::layoutNotSet) + primitives = unit.primitives; + else if (primitives != unit.primitives) { + if (language == EShLangMeshNV) + error(infoSink, "Contradictory layout max_primitives values"); + else + assert(0); + } + + if (inputPrimitive == ElgNone) + inputPrimitive = unit.inputPrimitive; + else if (unit.inputPrimitive != ElgNone && inputPrimitive != unit.inputPrimitive) + error(infoSink, "Contradictory input layout primitives"); + + if (outputPrimitive == ElgNone) + outputPrimitive = unit.outputPrimitive; + else if (unit.outputPrimitive != ElgNone && outputPrimitive != unit.outputPrimitive) + error(infoSink, "Contradictory output layout primitives"); + + if (originUpperLeft != unit.originUpperLeft || pixelCenterInteger != unit.pixelCenterInteger) + error(infoSink, "gl_FragCoord redeclarations must match across shaders"); + + if (vertexSpacing == EvsNone) + vertexSpacing = unit.vertexSpacing; + else if (vertexSpacing != unit.vertexSpacing) + error(infoSink, "Contradictory input vertex spacing"); + + if (vertexOrder == EvoNone) + vertexOrder = unit.vertexOrder; + else if (vertexOrder != unit.vertexOrder) + error(infoSink, "Contradictory triangle ordering"); + + MERGE_TRUE(pointMode); + + for (int i = 0; i < 3; ++i) { + if (!localSizeNotDefault[i] && unit.localSizeNotDefault[i]) { + localSize[i] = unit.localSize[i]; + localSizeNotDefault[i] = true; + } + else if (localSize[i] != unit.localSize[i]) + error(infoSink, "Contradictory local size"); + + if (localSizeSpecId[i] == TQualifier::layoutNotSet) + localSizeSpecId[i] = unit.localSizeSpecId[i]; + else if (localSizeSpecId[i] != unit.localSizeSpecId[i]) + error(infoSink, "Contradictory local size specialization ids"); + } + + MERGE_TRUE(earlyFragmentTests); + MERGE_TRUE(postDepthCoverage); + + if (depthLayout == EldNone) + depthLayout = unit.depthLayout; + else if (depthLayout != unit.depthLayout) + error(infoSink, "Contradictory depth layouts"); + + MERGE_TRUE(depthReplacing); + MERGE_TRUE(hlslFunctionality1); + + blendEquations |= unit.blendEquations; + + MERGE_TRUE(xfbMode); + + for (size_t b = 0; b < xfbBuffers.size(); ++b) { + if (xfbBuffers[b].stride == TQualifier::layoutXfbStrideEnd) + xfbBuffers[b].stride = unit.xfbBuffers[b].stride; + else if (xfbBuffers[b].stride != unit.xfbBuffers[b].stride) + error(infoSink, "Contradictory xfb_stride"); + xfbBuffers[b].implicitStride = std::max(xfbBuffers[b].implicitStride, unit.xfbBuffers[b].implicitStride); + if (unit.xfbBuffers[b].contains64BitType) + xfbBuffers[b].contains64BitType = true; + if (unit.xfbBuffers[b].contains32BitType) + xfbBuffers[b].contains32BitType = true; + if (unit.xfbBuffers[b].contains16BitType) + xfbBuffers[b].contains16BitType = true; + // TODO: 4.4 link: enhanced layouts: compare ranges + } + + MERGE_TRUE(multiStream); + MERGE_TRUE(layoutOverrideCoverage); + MERGE_TRUE(geoPassthroughEXT); + + for (unsigned int i = 0; i < unit.shiftBinding.size(); ++i) { + if (unit.shiftBinding[i] > 0) + setShiftBinding((TResourceType)i, unit.shiftBinding[i]); + } + + for (unsigned int i = 0; i < unit.shiftBindingForSet.size(); ++i) { + for (auto it = unit.shiftBindingForSet[i].begin(); it != unit.shiftBindingForSet[i].end(); ++it) + setShiftBindingForSet((TResourceType)i, it->second, it->first); + } + + resourceSetBinding.insert(resourceSetBinding.end(), unit.resourceSetBinding.begin(), unit.resourceSetBinding.end()); + + MERGE_TRUE(autoMapBindings); + MERGE_TRUE(autoMapLocations); + MERGE_TRUE(invertY); + MERGE_TRUE(flattenUniformArrays); + MERGE_TRUE(useUnknownFormat); + MERGE_TRUE(hlslOffsets); + MERGE_TRUE(useStorageBuffer); + MERGE_TRUE(hlslIoMapping); + + // TODO: sourceFile + // TODO: sourceText + // TODO: processes + + MERGE_TRUE(needToLegalize); + MERGE_TRUE(binaryDoubleOutput); + MERGE_TRUE(usePhysicalStorageBuffer); +} + +// +// Merge the 'unit' AST into 'this' AST. +// That includes rationalizing the unique IDs, which were set up independently, +// and might have overlaps that are not the same symbol, or might have different +// IDs for what should be the same shared symbol. +// +void TIntermediate::mergeTrees(TInfoSink& infoSink, TIntermediate& unit) +{ + if (unit.treeRoot == nullptr) + return; + + if (treeRoot == nullptr) { + treeRoot = unit.treeRoot; + return; + } + + // Getting this far means we have two existing trees to merge... + numShaderRecordBlocks += unit.numShaderRecordBlocks; + numTaskNVBlocks += unit.numTaskNVBlocks; + + // Get the top-level globals of each unit + TIntermSequence& globals = treeRoot->getAsAggregate()->getSequence(); + TIntermSequence& unitGlobals = unit.treeRoot->getAsAggregate()->getSequence(); + + // Get the linker-object lists + TIntermSequence& linkerObjects = findLinkerObjects()->getSequence(); + const TIntermSequence& unitLinkerObjects = unit.findLinkerObjects()->getSequence(); + + // Map by global name to unique ID to rationalize the same object having + // differing IDs in different trees. + TIdMaps idMaps; + int maxId; + seedIdMap(idMaps, maxId); + remapIds(idMaps, maxId + 1, unit); + + mergeBodies(infoSink, globals, unitGlobals); + mergeLinkerObjects(infoSink, linkerObjects, unitLinkerObjects); + ioAccessed.insert(unit.ioAccessed.begin(), unit.ioAccessed.end()); +} + +#endif + +static const TString& getNameForIdMap(TIntermSymbol* symbol) +{ + TShaderInterface si = symbol->getType().getShaderInterface(); + if (si == EsiNone) + return symbol->getName(); + else + return symbol->getType().getTypeName(); +} + + + +// Traverser that seeds an ID map with all built-ins, and tracks the +// maximum ID used. +// (It would be nice to put this in a function, but that causes warnings +// on having no bodies for the copy-constructor/operator=.) +class TBuiltInIdTraverser : public TIntermTraverser { +public: + TBuiltInIdTraverser(TIdMaps& idMaps) : idMaps(idMaps), maxId(0) { } + // If it's a built in, add it to the map. + // Track the max ID. + virtual void visitSymbol(TIntermSymbol* symbol) + { + const TQualifier& qualifier = symbol->getType().getQualifier(); + if (qualifier.builtIn != EbvNone) { + TShaderInterface si = symbol->getType().getShaderInterface(); + idMaps[si][getNameForIdMap(symbol)] = symbol->getId(); + } + maxId = std::max(maxId, symbol->getId()); + } + int getMaxId() const { return maxId; } +protected: + TBuiltInIdTraverser(TBuiltInIdTraverser&); + TBuiltInIdTraverser& operator=(TBuiltInIdTraverser&); + TIdMaps& idMaps; + int maxId; +}; + +// Traverser that seeds an ID map with non-builtins. +// (It would be nice to put this in a function, but that causes warnings +// on having no bodies for the copy-constructor/operator=.) +class TUserIdTraverser : public TIntermTraverser { +public: + TUserIdTraverser(TIdMaps& idMaps) : idMaps(idMaps) { } + // If its a non-built-in global, add it to the map. + virtual void visitSymbol(TIntermSymbol* symbol) + { + const TQualifier& qualifier = symbol->getType().getQualifier(); + if (qualifier.builtIn == EbvNone) { + TShaderInterface si = symbol->getType().getShaderInterface(); + idMaps[si][getNameForIdMap(symbol)] = symbol->getId(); + } + } + +protected: + TUserIdTraverser(TUserIdTraverser&); + TUserIdTraverser& operator=(TUserIdTraverser&); + TIdMaps& idMaps; // over biggest id +}; + +// Initialize the the ID map with what we know of 'this' AST. +void TIntermediate::seedIdMap(TIdMaps& idMaps, int& maxId) +{ + // all built-ins everywhere need to align on IDs and contribute to the max ID + TBuiltInIdTraverser builtInIdTraverser(idMaps); + treeRoot->traverse(&builtInIdTraverser); + maxId = builtInIdTraverser.getMaxId(); + + // user variables in the linker object list need to align on ids + TUserIdTraverser userIdTraverser(idMaps); + findLinkerObjects()->traverse(&userIdTraverser); +} + +// Traverser to map an AST ID to what was known from the seeding AST. +// (It would be nice to put this in a function, but that causes warnings +// on having no bodies for the copy-constructor/operator=.) +class TRemapIdTraverser : public TIntermTraverser { +public: + TRemapIdTraverser(const TIdMaps& idMaps, int idShift) : idMaps(idMaps), idShift(idShift) { } + // Do the mapping: + // - if the same symbol, adopt the 'this' ID + // - otherwise, ensure a unique ID by shifting to a new space + virtual void visitSymbol(TIntermSymbol* symbol) + { + const TQualifier& qualifier = symbol->getType().getQualifier(); + bool remapped = false; + if (qualifier.isLinkable() || qualifier.builtIn != EbvNone) { + TShaderInterface si = symbol->getType().getShaderInterface(); + auto it = idMaps[si].find(getNameForIdMap(symbol)); + if (it != idMaps[si].end()) { + symbol->changeId(it->second); + remapped = true; + } + } + if (!remapped) + symbol->changeId(symbol->getId() + idShift); + } +protected: + TRemapIdTraverser(TRemapIdTraverser&); + TRemapIdTraverser& operator=(TRemapIdTraverser&); + const TIdMaps& idMaps; + int idShift; +}; + +void TIntermediate::remapIds(const TIdMaps& idMaps, int idShift, TIntermediate& unit) +{ + // Remap all IDs to either share or be unique, as dictated by the idMap and idShift. + TRemapIdTraverser idTraverser(idMaps, idShift); + unit.getTreeRoot()->traverse(&idTraverser); +} + +// +// Merge the function bodies and global-level initializers from unitGlobals into globals. +// Will error check duplication of function bodies for the same signature. +// +void TIntermediate::mergeBodies(TInfoSink& infoSink, TIntermSequence& globals, const TIntermSequence& unitGlobals) +{ + // TODO: link-time performance: Processing in alphabetical order will be faster + + // Error check the global objects, not including the linker objects + for (unsigned int child = 0; child < globals.size() - 1; ++child) { + for (unsigned int unitChild = 0; unitChild < unitGlobals.size() - 1; ++unitChild) { + TIntermAggregate* body = globals[child]->getAsAggregate(); + TIntermAggregate* unitBody = unitGlobals[unitChild]->getAsAggregate(); + if (body && unitBody && body->getOp() == EOpFunction && unitBody->getOp() == EOpFunction && body->getName() == unitBody->getName()) { + error(infoSink, "Multiple function bodies in multiple compilation units for the same signature in the same stage:"); + infoSink.info << " " << globals[child]->getAsAggregate()->getName() << "\n"; + } + } + } + + // Merge the global objects, just in front of the linker objects + globals.insert(globals.end() - 1, unitGlobals.begin(), unitGlobals.end() - 1); +} + +// +// Merge the linker objects from unitLinkerObjects into linkerObjects. +// Duplication is expected and filtered out, but contradictions are an error. +// +void TIntermediate::mergeLinkerObjects(TInfoSink& infoSink, TIntermSequence& linkerObjects, const TIntermSequence& unitLinkerObjects) +{ + // Error check and merge the linker objects (duplicates should not be created) + std::size_t initialNumLinkerObjects = linkerObjects.size(); + for (unsigned int unitLinkObj = 0; unitLinkObj < unitLinkerObjects.size(); ++unitLinkObj) { + bool merge = true; + for (std::size_t linkObj = 0; linkObj < initialNumLinkerObjects; ++linkObj) { + TIntermSymbol* symbol = linkerObjects[linkObj]->getAsSymbolNode(); + TIntermSymbol* unitSymbol = unitLinkerObjects[unitLinkObj]->getAsSymbolNode(); + assert(symbol && unitSymbol); + + bool isSameSymbol = false; + // If they are both blocks in the same shader interface, + // match by the block-name, not the identifier name. + if (symbol->getType().getBasicType() == EbtBlock && unitSymbol->getType().getBasicType() == EbtBlock) { + if (symbol->getType().getShaderInterface() == unitSymbol->getType().getShaderInterface()) { + isSameSymbol = symbol->getType().getTypeName() == unitSymbol->getType().getTypeName(); + } + } + else if (symbol->getName() == unitSymbol->getName()) + isSameSymbol = true; + + if (isSameSymbol) { + // filter out copy + merge = false; + + // but if one has an initializer and the other does not, update + // the initializer + if (symbol->getConstArray().empty() && ! unitSymbol->getConstArray().empty()) + symbol->setConstArray(unitSymbol->getConstArray()); + + // Similarly for binding + if (! symbol->getQualifier().hasBinding() && unitSymbol->getQualifier().hasBinding()) + symbol->getQualifier().layoutBinding = unitSymbol->getQualifier().layoutBinding; + + // Update implicit array sizes + mergeImplicitArraySizes(symbol->getWritableType(), unitSymbol->getType()); + + // Check for consistent types/qualification/initializers etc. + mergeErrorCheck(infoSink, *symbol, *unitSymbol, false); + } + // If different symbols, verify they arn't push_constant since there can only be one per stage + else if (symbol->getQualifier().isPushConstant() && unitSymbol->getQualifier().isPushConstant()) + error(infoSink, "Only one push_constant block is allowed per stage"); + } + if (merge) + linkerObjects.push_back(unitLinkerObjects[unitLinkObj]); + } +} + +// TODO 4.5 link functionality: cull distance array size checking + +// Recursively merge the implicit array sizes through the objects' respective type trees. +void TIntermediate::mergeImplicitArraySizes(TType& type, const TType& unitType) +{ + if (type.isUnsizedArray()) { + if (unitType.isUnsizedArray()) { + type.updateImplicitArraySize(unitType.getImplicitArraySize()); + if (unitType.isArrayVariablyIndexed()) + type.setArrayVariablyIndexed(); + } else if (unitType.isSizedArray()) + type.changeOuterArraySize(unitType.getOuterArraySize()); + } + + // Type mismatches are caught and reported after this, just be careful for now. + if (! type.isStruct() || ! unitType.isStruct() || type.getStruct()->size() != unitType.getStruct()->size()) + return; + + for (int i = 0; i < (int)type.getStruct()->size(); ++i) + mergeImplicitArraySizes(*(*type.getStruct())[i].type, *(*unitType.getStruct())[i].type); +} + +// +// Compare two global objects from two compilation units and see if they match +// well enough. Rules can be different for intra- vs. cross-stage matching. +// +// This function only does one of intra- or cross-stage matching per call. +// +void TIntermediate::mergeErrorCheck(TInfoSink& infoSink, const TIntermSymbol& symbol, const TIntermSymbol& unitSymbol, bool crossStage) +{ +#ifndef GLSLANG_WEB + bool writeTypeComparison = false; + + // Types have to match + if (symbol.getType() != unitSymbol.getType()) { + // but, we make an exception if one is an implicit array and the other is sized + if (! (symbol.getType().isArray() && unitSymbol.getType().isArray() && + symbol.getType().sameElementType(unitSymbol.getType()) && + (symbol.getType().isUnsizedArray() || unitSymbol.getType().isUnsizedArray()))) { + error(infoSink, "Types must match:"); + writeTypeComparison = true; + } + } + + // Qualifiers have to (almost) match + + // Storage... + if (symbol.getQualifier().storage != unitSymbol.getQualifier().storage) { + error(infoSink, "Storage qualifiers must match:"); + writeTypeComparison = true; + } + + // Uniform and buffer blocks must either both have an instance name, or + // must both be anonymous. The names don't need to match though. + if (symbol.getQualifier().isUniformOrBuffer() && + (IsAnonymous(symbol.getName()) != IsAnonymous(unitSymbol.getName()))) { + error(infoSink, "Matched Uniform or Storage blocks must all be anonymous," + " or all be named:"); + writeTypeComparison = true; + } + + if (symbol.getQualifier().storage == unitSymbol.getQualifier().storage && + (IsAnonymous(symbol.getName()) != IsAnonymous(unitSymbol.getName()) || + (!IsAnonymous(symbol.getName()) && symbol.getName() != unitSymbol.getName()))) { + warn(infoSink, "Matched shader interfaces are using different instance names."); + writeTypeComparison = true; + } + + // Precision... + if (symbol.getQualifier().precision != unitSymbol.getQualifier().precision) { + error(infoSink, "Precision qualifiers must match:"); + writeTypeComparison = true; + } + + // Invariance... + if (! crossStage && symbol.getQualifier().invariant != unitSymbol.getQualifier().invariant) { + error(infoSink, "Presence of invariant qualifier must match:"); + writeTypeComparison = true; + } + + // Precise... + if (! crossStage && symbol.getQualifier().isNoContraction() != unitSymbol.getQualifier().isNoContraction()) { + error(infoSink, "Presence of precise qualifier must match:"); + writeTypeComparison = true; + } + + // Auxiliary and interpolation... + if (symbol.getQualifier().centroid != unitSymbol.getQualifier().centroid || + symbol.getQualifier().smooth != unitSymbol.getQualifier().smooth || + symbol.getQualifier().flat != unitSymbol.getQualifier().flat || + symbol.getQualifier().isSample()!= unitSymbol.getQualifier().isSample() || + symbol.getQualifier().isPatch() != unitSymbol.getQualifier().isPatch() || + symbol.getQualifier().isNonPerspective() != unitSymbol.getQualifier().isNonPerspective()) { + error(infoSink, "Interpolation and auxiliary storage qualifiers must match:"); + writeTypeComparison = true; + } + + // Memory... + if (symbol.getQualifier().coherent != unitSymbol.getQualifier().coherent || + symbol.getQualifier().devicecoherent != unitSymbol.getQualifier().devicecoherent || + symbol.getQualifier().queuefamilycoherent != unitSymbol.getQualifier().queuefamilycoherent || + symbol.getQualifier().workgroupcoherent != unitSymbol.getQualifier().workgroupcoherent || + symbol.getQualifier().subgroupcoherent != unitSymbol.getQualifier().subgroupcoherent || + symbol.getQualifier().shadercallcoherent!= unitSymbol.getQualifier().shadercallcoherent || + symbol.getQualifier().nonprivate != unitSymbol.getQualifier().nonprivate || + symbol.getQualifier().volatil != unitSymbol.getQualifier().volatil || + symbol.getQualifier().restrict != unitSymbol.getQualifier().restrict || + symbol.getQualifier().readonly != unitSymbol.getQualifier().readonly || + symbol.getQualifier().writeonly != unitSymbol.getQualifier().writeonly) { + error(infoSink, "Memory qualifiers must match:"); + writeTypeComparison = true; + } + + // Layouts... + // TODO: 4.4 enhanced layouts: Generalize to include offset/align: current spec + // requires separate user-supplied offset from actual computed offset, but + // current implementation only has one offset. + if (symbol.getQualifier().layoutMatrix != unitSymbol.getQualifier().layoutMatrix || + symbol.getQualifier().layoutPacking != unitSymbol.getQualifier().layoutPacking || + symbol.getQualifier().layoutLocation != unitSymbol.getQualifier().layoutLocation || + symbol.getQualifier().layoutComponent != unitSymbol.getQualifier().layoutComponent || + symbol.getQualifier().layoutIndex != unitSymbol.getQualifier().layoutIndex || + symbol.getQualifier().layoutBinding != unitSymbol.getQualifier().layoutBinding || + (symbol.getQualifier().hasBinding() && (symbol.getQualifier().layoutOffset != unitSymbol.getQualifier().layoutOffset))) { + error(infoSink, "Layout qualification must match:"); + writeTypeComparison = true; + } + + // Initializers have to match, if both are present, and if we don't already know the types don't match + if (! writeTypeComparison) { + if (! symbol.getConstArray().empty() && ! unitSymbol.getConstArray().empty()) { + if (symbol.getConstArray() != unitSymbol.getConstArray()) { + error(infoSink, "Initializers must match:"); + infoSink.info << " " << symbol.getName() << "\n"; + } + } + } + + if (writeTypeComparison) { + infoSink.info << " " << symbol.getName() << ": \"" << symbol.getType().getCompleteString() << "\" versus "; + if (symbol.getName() != unitSymbol.getName()) + infoSink.info << unitSymbol.getName() << ": "; + + infoSink.info << "\"" << unitSymbol.getType().getCompleteString() << "\"\n"; + } +#endif +} + +// +// Do final link-time error checking of a complete (merged) intermediate representation. +// (Much error checking was done during merging). +// +// Also, lock in defaults of things not set, including array sizes. +// +void TIntermediate::finalCheck(TInfoSink& infoSink, bool keepUncalled) +{ + if (getTreeRoot() == nullptr) + return; + + if (numEntryPoints < 1) { + if (getSource() == EShSourceGlsl) + error(infoSink, "Missing entry point: Each stage requires one entry point"); + else + warn(infoSink, "Entry point not found"); + } + + // recursion and missing body checking + checkCallGraphCycles(infoSink); + checkCallGraphBodies(infoSink, keepUncalled); + + // overlap/alias/missing I/O, etc. + inOutLocationCheck(infoSink); + +#ifndef GLSLANG_WEB + if (getNumPushConstants() > 1) + error(infoSink, "Only one push_constant block is allowed per stage"); + + // invocations + if (invocations == TQualifier::layoutNotSet) + invocations = 1; + + if (inIoAccessed("gl_ClipDistance") && inIoAccessed("gl_ClipVertex")) + error(infoSink, "Can only use one of gl_ClipDistance or gl_ClipVertex (gl_ClipDistance is preferred)"); + if (inIoAccessed("gl_CullDistance") && inIoAccessed("gl_ClipVertex")) + error(infoSink, "Can only use one of gl_CullDistance or gl_ClipVertex (gl_ClipDistance is preferred)"); + + if (userOutputUsed() && (inIoAccessed("gl_FragColor") || inIoAccessed("gl_FragData"))) + error(infoSink, "Cannot use gl_FragColor or gl_FragData when using user-defined outputs"); + if (inIoAccessed("gl_FragColor") && inIoAccessed("gl_FragData")) + error(infoSink, "Cannot use both gl_FragColor and gl_FragData"); + + for (size_t b = 0; b < xfbBuffers.size(); ++b) { + if (xfbBuffers[b].contains64BitType) + RoundToPow2(xfbBuffers[b].implicitStride, 8); + else if (xfbBuffers[b].contains32BitType) + RoundToPow2(xfbBuffers[b].implicitStride, 4); + else if (xfbBuffers[b].contains16BitType) + RoundToPow2(xfbBuffers[b].implicitStride, 2); + + // "It is a compile-time or link-time error to have + // any xfb_offset that overflows xfb_stride, whether stated on declarations before or after the xfb_stride, or + // in different compilation units. While xfb_stride can be declared multiple times for the same buffer, it is a + // compile-time or link-time error to have different values specified for the stride for the same buffer." + if (xfbBuffers[b].stride != TQualifier::layoutXfbStrideEnd && xfbBuffers[b].implicitStride > xfbBuffers[b].stride) { + error(infoSink, "xfb_stride is too small to hold all buffer entries:"); + infoSink.info.prefix(EPrefixError); + infoSink.info << " xfb_buffer " << (unsigned int)b << ", xfb_stride " << xfbBuffers[b].stride << ", minimum stride needed: " << xfbBuffers[b].implicitStride << "\n"; + } + if (xfbBuffers[b].stride == TQualifier::layoutXfbStrideEnd) + xfbBuffers[b].stride = xfbBuffers[b].implicitStride; + + // "If the buffer is capturing any + // outputs with double-precision or 64-bit integer components, the stride must be a multiple of 8, otherwise it must be a + // multiple of 4, or a compile-time or link-time error results." + if (xfbBuffers[b].contains64BitType && ! IsMultipleOfPow2(xfbBuffers[b].stride, 8)) { + error(infoSink, "xfb_stride must be multiple of 8 for buffer holding a double or 64-bit integer:"); + infoSink.info.prefix(EPrefixError); + infoSink.info << " xfb_buffer " << (unsigned int)b << ", xfb_stride " << xfbBuffers[b].stride << "\n"; + } else if (xfbBuffers[b].contains32BitType && ! IsMultipleOfPow2(xfbBuffers[b].stride, 4)) { + error(infoSink, "xfb_stride must be multiple of 4:"); + infoSink.info.prefix(EPrefixError); + infoSink.info << " xfb_buffer " << (unsigned int)b << ", xfb_stride " << xfbBuffers[b].stride << "\n"; + } + // "If the buffer is capturing any + // outputs with half-precision or 16-bit integer components, the stride must be a multiple of 2" + else if (xfbBuffers[b].contains16BitType && ! IsMultipleOfPow2(xfbBuffers[b].stride, 2)) { + error(infoSink, "xfb_stride must be multiple of 2 for buffer holding a half float or 16-bit integer:"); + infoSink.info.prefix(EPrefixError); + infoSink.info << " xfb_buffer " << (unsigned int)b << ", xfb_stride " << xfbBuffers[b].stride << "\n"; + } + + // "The resulting stride (implicit or explicit), when divided by 4, must be less than or equal to the + // implementation-dependent constant gl_MaxTransformFeedbackInterleavedComponents." + if (xfbBuffers[b].stride > (unsigned int)(4 * resources.maxTransformFeedbackInterleavedComponents)) { + error(infoSink, "xfb_stride is too large:"); + infoSink.info.prefix(EPrefixError); + infoSink.info << " xfb_buffer " << (unsigned int)b << ", components (1/4 stride) needed are " << xfbBuffers[b].stride/4 << ", gl_MaxTransformFeedbackInterleavedComponents is " << resources.maxTransformFeedbackInterleavedComponents << "\n"; + } + } + + switch (language) { + case EShLangVertex: + break; + case EShLangTessControl: + if (vertices == TQualifier::layoutNotSet) + error(infoSink, "At least one shader must specify an output layout(vertices=...)"); + break; + case EShLangTessEvaluation: + if (getSource() == EShSourceGlsl) { + if (inputPrimitive == ElgNone) + error(infoSink, "At least one shader must specify an input layout primitive"); + if (vertexSpacing == EvsNone) + vertexSpacing = EvsEqual; + if (vertexOrder == EvoNone) + vertexOrder = EvoCcw; + } + break; + case EShLangGeometry: + if (inputPrimitive == ElgNone) + error(infoSink, "At least one shader must specify an input layout primitive"); + if (outputPrimitive == ElgNone) + error(infoSink, "At least one shader must specify an output layout primitive"); + if (vertices == TQualifier::layoutNotSet) + error(infoSink, "At least one shader must specify a layout(max_vertices = value)"); + break; + case EShLangFragment: + // for GL_ARB_post_depth_coverage, EarlyFragmentTest is set automatically in + // ParseHelper.cpp. So if we reach here, this must be GL_EXT_post_depth_coverage + // requiring explicit early_fragment_tests + if (getPostDepthCoverage() && !getEarlyFragmentTests()) + error(infoSink, "post_depth_coverage requires early_fragment_tests"); + break; + case EShLangCompute: + break; + case EShLangRayGen: + case EShLangIntersect: + case EShLangAnyHit: + case EShLangClosestHit: + case EShLangMiss: + case EShLangCallable: + if (numShaderRecordBlocks > 1) + error(infoSink, "Only one shaderRecordNV buffer block is allowed per stage"); + break; + case EShLangMeshNV: + // NV_mesh_shader doesn't allow use of both single-view and per-view builtins. + if (inIoAccessed("gl_Position") && inIoAccessed("gl_PositionPerViewNV")) + error(infoSink, "Can only use one of gl_Position or gl_PositionPerViewNV"); + if (inIoAccessed("gl_ClipDistance") && inIoAccessed("gl_ClipDistancePerViewNV")) + error(infoSink, "Can only use one of gl_ClipDistance or gl_ClipDistancePerViewNV"); + if (inIoAccessed("gl_CullDistance") && inIoAccessed("gl_CullDistancePerViewNV")) + error(infoSink, "Can only use one of gl_CullDistance or gl_CullDistancePerViewNV"); + if (inIoAccessed("gl_Layer") && inIoAccessed("gl_LayerPerViewNV")) + error(infoSink, "Can only use one of gl_Layer or gl_LayerPerViewNV"); + if (inIoAccessed("gl_ViewportMask") && inIoAccessed("gl_ViewportMaskPerViewNV")) + error(infoSink, "Can only use one of gl_ViewportMask or gl_ViewportMaskPerViewNV"); + if (outputPrimitive == ElgNone) + error(infoSink, "At least one shader must specify an output layout primitive"); + if (vertices == TQualifier::layoutNotSet) + error(infoSink, "At least one shader must specify a layout(max_vertices = value)"); + if (primitives == TQualifier::layoutNotSet) + error(infoSink, "At least one shader must specify a layout(max_primitives = value)"); + // fall through + case EShLangTaskNV: + if (numTaskNVBlocks > 1) + error(infoSink, "Only one taskNV interface block is allowed per shader"); + break; + default: + error(infoSink, "Unknown Stage."); + break; + } + + // Process the tree for any node-specific work. + class TFinalLinkTraverser : public TIntermTraverser { + public: + TFinalLinkTraverser() { } + virtual ~TFinalLinkTraverser() { } + + virtual void visitSymbol(TIntermSymbol* symbol) + { + // Implicitly size arrays. + // If an unsized array is left as unsized, it effectively + // becomes run-time sized. + symbol->getWritableType().adoptImplicitArraySizes(false); + } + } finalLinkTraverser; + + treeRoot->traverse(&finalLinkTraverser); +#endif +} + +// +// See if the call graph contains any static recursion, which is disallowed +// by the specification. +// +void TIntermediate::checkCallGraphCycles(TInfoSink& infoSink) +{ + // Clear fields we'll use for this. + for (TGraph::iterator call = callGraph.begin(); call != callGraph.end(); ++call) { + call->visited = false; + call->currentPath = false; + call->errorGiven = false; + } + + // + // Loop, looking for a new connected subgraph. One subgraph is handled per loop iteration. + // + + TCall* newRoot; + do { + // See if we have unvisited parts of the graph. + newRoot = 0; + for (TGraph::iterator call = callGraph.begin(); call != callGraph.end(); ++call) { + if (! call->visited) { + newRoot = &(*call); + break; + } + } + + // If not, we are done. + if (! newRoot) + break; + + // Otherwise, we found a new subgraph, process it: + // See what all can be reached by this new root, and if any of + // that is recursive. This is done by depth-first traversals, seeing + // if a new call is found that was already in the currentPath (a back edge), + // thereby detecting recursion. + std::list stack; + newRoot->currentPath = true; // currentPath will be true iff it is on the stack + stack.push_back(newRoot); + while (! stack.empty()) { + // get a caller + TCall* call = stack.back(); + + // Add to the stack just one callee. + // This algorithm always terminates, because only !visited and !currentPath causes a push + // and all pushes change currentPath to true, and all pops change visited to true. + TGraph::iterator child = callGraph.begin(); + for (; child != callGraph.end(); ++child) { + + // If we already visited this node, its whole subgraph has already been processed, so skip it. + if (child->visited) + continue; + + if (call->callee == child->caller) { + if (child->currentPath) { + // Then, we found a back edge + if (! child->errorGiven) { + error(infoSink, "Recursion detected:"); + infoSink.info << " " << call->callee << " calling " << child->callee << "\n"; + child->errorGiven = true; + recursive = true; + } + } else { + child->currentPath = true; + stack.push_back(&(*child)); + break; + } + } + } + if (child == callGraph.end()) { + // no more callees, we bottomed out, never look at this node again + stack.back()->currentPath = false; + stack.back()->visited = true; + stack.pop_back(); + } + } // end while, meaning nothing left to process in this subtree + + } while (newRoot); // redundant loop check; should always exit via the 'break' above +} + +// +// See which functions are reachable from the entry point and which have bodies. +// Reachable ones with missing bodies are errors. +// Unreachable bodies are dead code. +// +void TIntermediate::checkCallGraphBodies(TInfoSink& infoSink, bool keepUncalled) +{ + // Clear fields we'll use for this. + for (TGraph::iterator call = callGraph.begin(); call != callGraph.end(); ++call) { + call->visited = false; + call->calleeBodyPosition = -1; + } + + // The top level of the AST includes function definitions (bodies). + // Compare these to function calls in the call graph. + // We'll end up knowing which have bodies, and if so, + // how to map the call-graph node to the location in the AST. + TIntermSequence &functionSequence = getTreeRoot()->getAsAggregate()->getSequence(); + std::vector reachable(functionSequence.size(), true); // so that non-functions are reachable + for (int f = 0; f < (int)functionSequence.size(); ++f) { + glslang::TIntermAggregate* node = functionSequence[f]->getAsAggregate(); + if (node && (node->getOp() == glslang::EOpFunction)) { + if (node->getName().compare(getEntryPointMangledName().c_str()) != 0) + reachable[f] = false; // so that function bodies are unreachable, until proven otherwise + for (TGraph::iterator call = callGraph.begin(); call != callGraph.end(); ++call) { + if (call->callee == node->getName()) + call->calleeBodyPosition = f; + } + } + } + + // Start call-graph traversal by visiting the entry point nodes. + for (TGraph::iterator call = callGraph.begin(); call != callGraph.end(); ++call) { + if (call->caller.compare(getEntryPointMangledName().c_str()) == 0) + call->visited = true; + } + + // Propagate 'visited' through the call-graph to every part of the graph it + // can reach (seeded with the entry-point setting above). + bool changed; + do { + changed = false; + for (auto call1 = callGraph.begin(); call1 != callGraph.end(); ++call1) { + if (call1->visited) { + for (TGraph::iterator call2 = callGraph.begin(); call2 != callGraph.end(); ++call2) { + if (! call2->visited) { + if (call1->callee == call2->caller) { + changed = true; + call2->visited = true; + } + } + } + } + } + } while (changed); + + // Any call-graph node set to visited but without a callee body is an error. + for (TGraph::iterator call = callGraph.begin(); call != callGraph.end(); ++call) { + if (call->visited) { + if (call->calleeBodyPosition == -1) { + error(infoSink, "No function definition (body) found: "); + infoSink.info << " " << call->callee << "\n"; + } else + reachable[call->calleeBodyPosition] = true; + } + } + + // Bodies in the AST not reached by the call graph are dead; + // clear them out, since they can't be reached and also can't + // be translated further due to possibility of being ill defined. + if (! keepUncalled) { + for (int f = 0; f < (int)functionSequence.size(); ++f) { + if (! reachable[f]) + functionSequence[f] = nullptr; + } + functionSequence.erase(std::remove(functionSequence.begin(), functionSequence.end(), nullptr), functionSequence.end()); + } +} + +// +// Satisfy rules for location qualifiers on inputs and outputs +// +void TIntermediate::inOutLocationCheck(TInfoSink& infoSink) +{ + // ES 3.0 requires all outputs to have location qualifiers if there is more than one output + bool fragOutWithNoLocation = false; + int numFragOut = 0; + + // TODO: linker functionality: location collision checking + + TIntermSequence& linkObjects = findLinkerObjects()->getSequence(); + for (size_t i = 0; i < linkObjects.size(); ++i) { + const TType& type = linkObjects[i]->getAsTyped()->getType(); + const TQualifier& qualifier = type.getQualifier(); + if (language == EShLangFragment) { + if (qualifier.storage == EvqVaryingOut && qualifier.builtIn == EbvNone) { + ++numFragOut; + if (!qualifier.hasAnyLocation()) + fragOutWithNoLocation = true; + } + } + } + + if (isEsProfile()) { + if (numFragOut > 1 && fragOutWithNoLocation) + error(infoSink, "when more than one fragment shader output, all must have location qualifiers"); + } +} + +TIntermAggregate* TIntermediate::findLinkerObjects() const +{ + // Get the top-level globals + TIntermSequence& globals = treeRoot->getAsAggregate()->getSequence(); + + // Get the last member of the sequences, expected to be the linker-object lists + assert(globals.back()->getAsAggregate()->getOp() == EOpLinkerObjects); + + return globals.back()->getAsAggregate(); +} + +// See if a variable was both a user-declared output and used. +// Note: the spec discusses writing to one, but this looks at read or write, which +// is more useful, and perhaps the spec should be changed to reflect that. +bool TIntermediate::userOutputUsed() const +{ + const TIntermSequence& linkerObjects = findLinkerObjects()->getSequence(); + + bool found = false; + for (size_t i = 0; i < linkerObjects.size(); ++i) { + const TIntermSymbol& symbolNode = *linkerObjects[i]->getAsSymbolNode(); + if (symbolNode.getQualifier().storage == EvqVaryingOut && + symbolNode.getName().compare(0, 3, "gl_") != 0 && + inIoAccessed(symbolNode.getName())) { + found = true; + break; + } + } + + return found; +} + +// Accumulate locations used for inputs, outputs, and uniforms, and check for collisions +// as the accumulation is done. +// +// Returns < 0 if no collision, >= 0 if collision and the value returned is a colliding value. +// +// typeCollision is set to true if there is no direct collision, but the types in the same location +// are different. +// +int TIntermediate::addUsedLocation(const TQualifier& qualifier, const TType& type, bool& typeCollision) +{ + typeCollision = false; + + int set; + if (qualifier.isPipeInput()) + set = 0; + else if (qualifier.isPipeOutput()) + set = 1; + else if (qualifier.storage == EvqUniform) + set = 2; + else if (qualifier.storage == EvqBuffer) + set = 3; + else + return -1; + + int size; + if (qualifier.isUniformOrBuffer() || qualifier.isTaskMemory()) { + if (type.isSizedArray()) + size = type.getCumulativeArraySize(); + else + size = 1; + } else { + // Strip off the outer array dimension for those having an extra one. + if (type.isArray() && qualifier.isArrayedIo(language)) { + TType elementType(type, 0); + size = computeTypeLocationSize(elementType, language); + } else + size = computeTypeLocationSize(type, language); + } + + // Locations, and components within locations. + // + // Almost always, dealing with components means a single location is involved. + // The exception is a dvec3. From the spec: + // + // "A dvec3 will consume all four components of the first location and components 0 and 1 of + // the second location. This leaves components 2 and 3 available for other component-qualified + // declarations." + // + // That means, without ever mentioning a component, a component range + // for a different location gets specified, if it's not a vertex shader input. (!) + // (A vertex shader input will show using only one location, even for a dvec3/4.) + // + // So, for the case of dvec3, we need two independent ioRanges. + + int collision = -1; // no collision +#ifndef GLSLANG_WEB + if (size == 2 && type.getBasicType() == EbtDouble && type.getVectorSize() == 3 && + (qualifier.isPipeInput() || qualifier.isPipeOutput())) { + // Dealing with dvec3 in/out split across two locations. + // Need two io-ranges. + // The case where the dvec3 doesn't start at component 0 was previously caught as overflow. + + // First range: + TRange locationRange(qualifier.layoutLocation, qualifier.layoutLocation); + TRange componentRange(0, 3); + TIoRange range(locationRange, componentRange, type.getBasicType(), 0); + + // check for collisions + collision = checkLocationRange(set, range, type, typeCollision); + if (collision < 0) { + usedIo[set].push_back(range); + + // Second range: + TRange locationRange2(qualifier.layoutLocation + 1, qualifier.layoutLocation + 1); + TRange componentRange2(0, 1); + TIoRange range2(locationRange2, componentRange2, type.getBasicType(), 0); + + // check for collisions + collision = checkLocationRange(set, range2, type, typeCollision); + if (collision < 0) + usedIo[set].push_back(range2); + } + } else +#endif + { + // Not a dvec3 in/out split across two locations, generic path. + // Need a single IO-range block. + + TRange locationRange(qualifier.layoutLocation, qualifier.layoutLocation + size - 1); + TRange componentRange(0, 3); + if (qualifier.hasComponent() || type.getVectorSize() > 0) { + int consumedComponents = type.getVectorSize() * (type.getBasicType() == EbtDouble ? 2 : 1); + if (qualifier.hasComponent()) + componentRange.start = qualifier.layoutComponent; + componentRange.last = componentRange.start + consumedComponents - 1; + } + + // combine location and component ranges + TIoRange range(locationRange, componentRange, type.getBasicType(), qualifier.hasIndex() ? qualifier.getIndex() : 0); + + // check for collisions, except for vertex inputs on desktop targeting OpenGL + if (! (!isEsProfile() && language == EShLangVertex && qualifier.isPipeInput()) || spvVersion.vulkan > 0) + collision = checkLocationRange(set, range, type, typeCollision); + + if (collision < 0) + usedIo[set].push_back(range); + } + + return collision; +} + +// Compare a new (the passed in) 'range' against the existing set, and see +// if there are any collisions. +// +// Returns < 0 if no collision, >= 0 if collision and the value returned is a colliding value. +// +int TIntermediate::checkLocationRange(int set, const TIoRange& range, const TType& type, bool& typeCollision) +{ + for (size_t r = 0; r < usedIo[set].size(); ++r) { + if (range.overlap(usedIo[set][r])) { + // there is a collision; pick one + return std::max(range.location.start, usedIo[set][r].location.start); + } else if (range.location.overlap(usedIo[set][r].location) && type.getBasicType() != usedIo[set][r].basicType) { + // aliased-type mismatch + typeCollision = true; + return std::max(range.location.start, usedIo[set][r].location.start); + } + } + + return -1; // no collision +} + +// Accumulate bindings and offsets, and check for collisions +// as the accumulation is done. +// +// Returns < 0 if no collision, >= 0 if collision and the value returned is a colliding value. +// +int TIntermediate::addUsedOffsets(int binding, int offset, int numOffsets) +{ + TRange bindingRange(binding, binding); + TRange offsetRange(offset, offset + numOffsets - 1); + TOffsetRange range(bindingRange, offsetRange); + + // check for collisions, except for vertex inputs on desktop + for (size_t r = 0; r < usedAtomics.size(); ++r) { + if (range.overlap(usedAtomics[r])) { + // there is a collision; pick one + return std::max(offset, usedAtomics[r].offset.start); + } + } + + usedAtomics.push_back(range); + + return -1; // no collision +} + +// Accumulate used constant_id values. +// +// Return false is one was already used. +bool TIntermediate::addUsedConstantId(int id) +{ + if (usedConstantId.find(id) != usedConstantId.end()) + return false; + + usedConstantId.insert(id); + + return true; +} + +// Recursively figure out how many locations are used up by an input or output type. +// Return the size of type, as measured by "locations". +int TIntermediate::computeTypeLocationSize(const TType& type, EShLanguage stage) +{ + // "If the declared input is an array of size n and each element takes m locations, it will be assigned m * n + // consecutive locations..." + if (type.isArray()) { + // TODO: perf: this can be flattened by using getCumulativeArraySize(), and a deref that discards all arrayness + // TODO: are there valid cases of having an unsized array with a location? If so, running this code too early. + TType elementType(type, 0); + if (type.isSizedArray() && !type.getQualifier().isPerView()) + return type.getOuterArraySize() * computeTypeLocationSize(elementType, stage); + else { +#ifndef GLSLANG_WEB + // unset perViewNV attributes for arrayed per-view outputs: "perviewNV vec4 v[MAX_VIEWS][3];" + elementType.getQualifier().perViewNV = false; +#endif + return computeTypeLocationSize(elementType, stage); + } + } + + // "The locations consumed by block and structure members are determined by applying the rules above + // recursively..." + if (type.isStruct()) { + int size = 0; + for (int member = 0; member < (int)type.getStruct()->size(); ++member) { + TType memberType(type, member); + size += computeTypeLocationSize(memberType, stage); + } + return size; + } + + // ES: "If a shader input is any scalar or vector type, it will consume a single location." + + // Desktop: "If a vertex shader input is any scalar or vector type, it will consume a single location. If a non-vertex + // shader input is a scalar or vector type other than dvec3 or dvec4, it will consume a single location, while + // types dvec3 or dvec4 will consume two consecutive locations. Inputs of type double and dvec2 will + // consume only a single location, in all stages." + if (type.isScalar()) + return 1; + if (type.isVector()) { + if (stage == EShLangVertex && type.getQualifier().isPipeInput()) + return 1; + if (type.getBasicType() == EbtDouble && type.getVectorSize() > 2) + return 2; + else + return 1; + } + + // "If the declared input is an n x m single- or double-precision matrix, ... + // The number of locations assigned for each matrix will be the same as + // for an n-element array of m-component vectors..." + if (type.isMatrix()) { + TType columnType(type, 0); + return type.getMatrixCols() * computeTypeLocationSize(columnType, stage); + } + + assert(0); + return 1; +} + +// Same as computeTypeLocationSize but for uniforms +int TIntermediate::computeTypeUniformLocationSize(const TType& type) +{ + // "Individual elements of a uniform array are assigned + // consecutive locations with the first element taking location + // location." + if (type.isArray()) { + // TODO: perf: this can be flattened by using getCumulativeArraySize(), and a deref that discards all arrayness + TType elementType(type, 0); + if (type.isSizedArray()) { + return type.getOuterArraySize() * computeTypeUniformLocationSize(elementType); + } else { + // TODO: are there valid cases of having an implicitly-sized array with a location? If so, running this code too early. + return computeTypeUniformLocationSize(elementType); + } + } + + // "Each subsequent inner-most member or element gets incremental + // locations for the entire structure or array." + if (type.isStruct()) { + int size = 0; + for (int member = 0; member < (int)type.getStruct()->size(); ++member) { + TType memberType(type, member); + size += computeTypeUniformLocationSize(memberType); + } + return size; + } + + return 1; +} + +#ifndef GLSLANG_WEB + +// Accumulate xfb buffer ranges and check for collisions as the accumulation is done. +// +// Returns < 0 if no collision, >= 0 if collision and the value returned is a colliding value. +// +int TIntermediate::addXfbBufferOffset(const TType& type) +{ + const TQualifier& qualifier = type.getQualifier(); + + assert(qualifier.hasXfbOffset() && qualifier.hasXfbBuffer()); + TXfbBuffer& buffer = xfbBuffers[qualifier.layoutXfbBuffer]; + + // compute the range + unsigned int size = computeTypeXfbSize(type, buffer.contains64BitType, buffer.contains32BitType, buffer.contains16BitType); + buffer.implicitStride = std::max(buffer.implicitStride, qualifier.layoutXfbOffset + size); + TRange range(qualifier.layoutXfbOffset, qualifier.layoutXfbOffset + size - 1); + + // check for collisions + for (size_t r = 0; r < buffer.ranges.size(); ++r) { + if (range.overlap(buffer.ranges[r])) { + // there is a collision; pick an example to return + return std::max(range.start, buffer.ranges[r].start); + } + } + + buffer.ranges.push_back(range); + + return -1; // no collision +} + +// Recursively figure out how many bytes of xfb buffer are used by the given type. +// Return the size of type, in bytes. +// Sets contains64BitType to true if the type contains a 64-bit data type. +// Sets contains32BitType to true if the type contains a 32-bit data type. +// Sets contains16BitType to true if the type contains a 16-bit data type. +// N.B. Caller must set contains64BitType, contains32BitType, and contains16BitType to false before calling. +unsigned int TIntermediate::computeTypeXfbSize(const TType& type, bool& contains64BitType, bool& contains32BitType, bool& contains16BitType) const +{ + // "...if applied to an aggregate containing a double or 64-bit integer, the offset must also be a multiple of 8, + // and the space taken in the buffer will be a multiple of 8. + // ...within the qualified entity, subsequent components are each + // assigned, in order, to the next available offset aligned to a multiple of + // that component's size. Aggregate types are flattened down to the component + // level to get this sequence of components." + + if (type.isArray()) { + // TODO: perf: this can be flattened by using getCumulativeArraySize(), and a deref that discards all arrayness + assert(type.isSizedArray()); + TType elementType(type, 0); + return type.getOuterArraySize() * computeTypeXfbSize(elementType, contains64BitType, contains16BitType, contains16BitType); + } + + if (type.isStruct()) { + unsigned int size = 0; + bool structContains64BitType = false; + bool structContains32BitType = false; + bool structContains16BitType = false; + for (int member = 0; member < (int)type.getStruct()->size(); ++member) { + TType memberType(type, member); + // "... if applied to + // an aggregate containing a double or 64-bit integer, the offset must also be a multiple of 8, + // and the space taken in the buffer will be a multiple of 8." + bool memberContains64BitType = false; + bool memberContains32BitType = false; + bool memberContains16BitType = false; + int memberSize = computeTypeXfbSize(memberType, memberContains64BitType, memberContains32BitType, memberContains16BitType); + if (memberContains64BitType) { + structContains64BitType = true; + RoundToPow2(size, 8); + } else if (memberContains32BitType) { + structContains32BitType = true; + RoundToPow2(size, 4); + } else if (memberContains16BitType) { + structContains16BitType = true; + RoundToPow2(size, 2); + } + size += memberSize; + } + + if (structContains64BitType) { + contains64BitType = true; + RoundToPow2(size, 8); + } else if (structContains32BitType) { + contains32BitType = true; + RoundToPow2(size, 4); + } else if (structContains16BitType) { + contains16BitType = true; + RoundToPow2(size, 2); + } + return size; + } + + int numComponents; + if (type.isScalar()) + numComponents = 1; + else if (type.isVector()) + numComponents = type.getVectorSize(); + else if (type.isMatrix()) + numComponents = type.getMatrixCols() * type.getMatrixRows(); + else { + assert(0); + numComponents = 1; + } + + if (type.getBasicType() == EbtDouble || type.getBasicType() == EbtInt64 || type.getBasicType() == EbtUint64) { + contains64BitType = true; + return 8 * numComponents; + } else if (type.getBasicType() == EbtFloat16 || type.getBasicType() == EbtInt16 || type.getBasicType() == EbtUint16) { + contains16BitType = true; + return 2 * numComponents; + } else if (type.getBasicType() == EbtInt8 || type.getBasicType() == EbtUint8) + return numComponents; + else { + contains32BitType = true; + return 4 * numComponents; + } +} + +#endif + +const int baseAlignmentVec4Std140 = 16; + +// Return the size and alignment of a component of the given type. +// The size is returned in the 'size' parameter +// Return value is the alignment.. +int TIntermediate::getBaseAlignmentScalar(const TType& type, int& size) +{ +#ifdef GLSLANG_WEB + size = 4; return 4; +#endif + + switch (type.getBasicType()) { + case EbtInt64: + case EbtUint64: + case EbtDouble: size = 8; return 8; + case EbtFloat16: size = 2; return 2; + case EbtInt8: + case EbtUint8: size = 1; return 1; + case EbtInt16: + case EbtUint16: size = 2; return 2; + case EbtReference: size = 8; return 8; + default: size = 4; return 4; + } +} + +// Implement base-alignment and size rules from section 7.6.2.2 Standard Uniform Block Layout +// Operates recursively. +// +// If std140 is true, it does the rounding up to vec4 size required by std140, +// otherwise it does not, yielding std430 rules. +// +// The size is returned in the 'size' parameter +// +// The stride is only non-0 for arrays or matrices, and is the stride of the +// top-level object nested within the type. E.g., for an array of matrices, +// it is the distances needed between matrices, despite the rules saying the +// stride comes from the flattening down to vectors. +// +// Return value is the alignment of the type. +int TIntermediate::getBaseAlignment(const TType& type, int& size, int& stride, TLayoutPacking layoutPacking, bool rowMajor) +{ + int alignment; + + bool std140 = layoutPacking == glslang::ElpStd140; + // When using the std140 storage layout, structures will be laid out in buffer + // storage with its members stored in monotonically increasing order based on their + // location in the declaration. A structure and each structure member have a base + // offset and a base alignment, from which an aligned offset is computed by rounding + // the base offset up to a multiple of the base alignment. The base offset of the first + // member of a structure is taken from the aligned offset of the structure itself. The + // base offset of all other structure members is derived by taking the offset of the + // last basic machine unit consumed by the previous member and adding one. Each + // structure member is stored in memory at its aligned offset. The members of a top- + // level uniform block are laid out in buffer storage by treating the uniform block as + // a structure with a base offset of zero. + // + // 1. If the member is a scalar consuming N basic machine units, the base alignment is N. + // + // 2. If the member is a two- or four-component vector with components consuming N basic + // machine units, the base alignment is 2N or 4N, respectively. + // + // 3. If the member is a three-component vector with components consuming N + // basic machine units, the base alignment is 4N. + // + // 4. If the member is an array of scalars or vectors, the base alignment and array + // stride are set to match the base alignment of a single array element, according + // to rules (1), (2), and (3), and rounded up to the base alignment of a vec4. The + // array may have padding at the end; the base offset of the member following + // the array is rounded up to the next multiple of the base alignment. + // + // 5. If the member is a column-major matrix with C columns and R rows, the + // matrix is stored identically to an array of C column vectors with R + // components each, according to rule (4). + // + // 6. If the member is an array of S column-major matrices with C columns and + // R rows, the matrix is stored identically to a row of S X C column vectors + // with R components each, according to rule (4). + // + // 7. If the member is a row-major matrix with C columns and R rows, the matrix + // is stored identically to an array of R row vectors with C components each, + // according to rule (4). + // + // 8. If the member is an array of S row-major matrices with C columns and R + // rows, the matrix is stored identically to a row of S X R row vectors with C + // components each, according to rule (4). + // + // 9. If the member is a structure, the base alignment of the structure is N , where + // N is the largest base alignment value of any of its members, and rounded + // up to the base alignment of a vec4. The individual members of this substructure + // are then assigned offsets by applying this set of rules recursively, + // where the base offset of the first member of the sub-structure is equal to the + // aligned offset of the structure. The structure may have padding at the end; + // the base offset of the member following the sub-structure is rounded up to + // the next multiple of the base alignment of the structure. + // + // 10. If the member is an array of S structures, the S elements of the array are laid + // out in order, according to rule (9). + // + // Assuming, for rule 10: The stride is the same as the size of an element. + + stride = 0; + int dummyStride; + + // rules 4, 6, 8, and 10 + if (type.isArray()) { + // TODO: perf: this might be flattened by using getCumulativeArraySize(), and a deref that discards all arrayness + TType derefType(type, 0); + alignment = getBaseAlignment(derefType, size, dummyStride, layoutPacking, rowMajor); + if (std140) + alignment = std::max(baseAlignmentVec4Std140, alignment); + RoundToPow2(size, alignment); + stride = size; // uses full matrix size for stride of an array of matrices (not quite what rule 6/8, but what's expected) + // uses the assumption for rule 10 in the comment above + size = stride * type.getOuterArraySize(); + return alignment; + } + + // rule 9 + if (type.getBasicType() == EbtStruct) { + const TTypeList& memberList = *type.getStruct(); + + size = 0; + int maxAlignment = std140 ? baseAlignmentVec4Std140 : 0; + for (size_t m = 0; m < memberList.size(); ++m) { + int memberSize; + // modify just the children's view of matrix layout, if there is one for this member + TLayoutMatrix subMatrixLayout = memberList[m].type->getQualifier().layoutMatrix; + int memberAlignment = getBaseAlignment(*memberList[m].type, memberSize, dummyStride, layoutPacking, + (subMatrixLayout != ElmNone) ? (subMatrixLayout == ElmRowMajor) : rowMajor); + maxAlignment = std::max(maxAlignment, memberAlignment); + RoundToPow2(size, memberAlignment); + size += memberSize; + } + + // The structure may have padding at the end; the base offset of + // the member following the sub-structure is rounded up to the next + // multiple of the base alignment of the structure. + RoundToPow2(size, maxAlignment); + + return maxAlignment; + } + + // rule 1 + if (type.isScalar()) + return getBaseAlignmentScalar(type, size); + + // rules 2 and 3 + if (type.isVector()) { + int scalarAlign = getBaseAlignmentScalar(type, size); + switch (type.getVectorSize()) { + case 1: // HLSL has this, GLSL does not + return scalarAlign; + case 2: + size *= 2; + return 2 * scalarAlign; + default: + size *= type.getVectorSize(); + return 4 * scalarAlign; + } + } + + // rules 5 and 7 + if (type.isMatrix()) { + // rule 5: deref to row, not to column, meaning the size of vector is num columns instead of num rows + TType derefType(type, 0, rowMajor); + + alignment = getBaseAlignment(derefType, size, dummyStride, layoutPacking, rowMajor); + if (std140) + alignment = std::max(baseAlignmentVec4Std140, alignment); + RoundToPow2(size, alignment); + stride = size; // use intra-matrix stride for stride of a just a matrix + if (rowMajor) + size = stride * type.getMatrixRows(); + else + size = stride * type.getMatrixCols(); + + return alignment; + } + + assert(0); // all cases should be covered above + size = baseAlignmentVec4Std140; + return baseAlignmentVec4Std140; +} + +// To aid the basic HLSL rule about crossing vec4 boundaries. +bool TIntermediate::improperStraddle(const TType& type, int size, int offset) +{ + if (! type.isVector() || type.isArray()) + return false; + + return size <= 16 ? offset / 16 != (offset + size - 1) / 16 + : offset % 16 != 0; +} + +int TIntermediate::getScalarAlignment(const TType& type, int& size, int& stride, bool rowMajor) +{ + int alignment; + + stride = 0; + int dummyStride; + + if (type.isArray()) { + TType derefType(type, 0); + alignment = getScalarAlignment(derefType, size, dummyStride, rowMajor); + + stride = size; + RoundToPow2(stride, alignment); + + size = stride * (type.getOuterArraySize() - 1) + size; + return alignment; + } + + if (type.getBasicType() == EbtStruct) { + const TTypeList& memberList = *type.getStruct(); + + size = 0; + int maxAlignment = 0; + for (size_t m = 0; m < memberList.size(); ++m) { + int memberSize; + // modify just the children's view of matrix layout, if there is one for this member + TLayoutMatrix subMatrixLayout = memberList[m].type->getQualifier().layoutMatrix; + int memberAlignment = getScalarAlignment(*memberList[m].type, memberSize, dummyStride, + (subMatrixLayout != ElmNone) ? (subMatrixLayout == ElmRowMajor) : rowMajor); + maxAlignment = std::max(maxAlignment, memberAlignment); + RoundToPow2(size, memberAlignment); + size += memberSize; + } + + return maxAlignment; + } + + if (type.isScalar()) + return getBaseAlignmentScalar(type, size); + + if (type.isVector()) { + int scalarAlign = getBaseAlignmentScalar(type, size); + + size *= type.getVectorSize(); + return scalarAlign; + } + + if (type.isMatrix()) { + TType derefType(type, 0, rowMajor); + + alignment = getScalarAlignment(derefType, size, dummyStride, rowMajor); + + stride = size; // use intra-matrix stride for stride of a just a matrix + if (rowMajor) + size = stride * type.getMatrixRows(); + else + size = stride * type.getMatrixCols(); + + return alignment; + } + + assert(0); // all cases should be covered above + size = 1; + return 1; +} + +int TIntermediate::getMemberAlignment(const TType& type, int& size, int& stride, TLayoutPacking layoutPacking, bool rowMajor) +{ + if (layoutPacking == glslang::ElpScalar) { + return getScalarAlignment(type, size, stride, rowMajor); + } else { + return getBaseAlignment(type, size, stride, layoutPacking, rowMajor); + } +} + +// shared calculation by getOffset and getOffsets +void TIntermediate::updateOffset(const TType& parentType, const TType& memberType, int& offset, int& memberSize) +{ + int dummyStride; + + // modify just the children's view of matrix layout, if there is one for this member + TLayoutMatrix subMatrixLayout = memberType.getQualifier().layoutMatrix; + int memberAlignment = getMemberAlignment(memberType, memberSize, dummyStride, + parentType.getQualifier().layoutPacking, + subMatrixLayout != ElmNone + ? subMatrixLayout == ElmRowMajor + : parentType.getQualifier().layoutMatrix == ElmRowMajor); + RoundToPow2(offset, memberAlignment); +} + +// Lookup or calculate the offset of a block member, using the recursively +// defined block offset rules. +int TIntermediate::getOffset(const TType& type, int index) +{ + const TTypeList& memberList = *type.getStruct(); + + // Don't calculate offset if one is present, it could be user supplied + // and different than what would be calculated. That is, this is faster, + // but not just an optimization. + if (memberList[index].type->getQualifier().hasOffset()) + return memberList[index].type->getQualifier().layoutOffset; + + int memberSize = 0; + int offset = 0; + for (int m = 0; m <= index; ++m) { + updateOffset(type, *memberList[m].type, offset, memberSize); + + if (m < index) + offset += memberSize; + } + + return offset; +} + +// Calculate the block data size. +// Block arrayness is not taken into account, each element is backed by a separate buffer. +int TIntermediate::getBlockSize(const TType& blockType) +{ + const TTypeList& memberList = *blockType.getStruct(); + int lastIndex = (int)memberList.size() - 1; + int lastOffset = getOffset(blockType, lastIndex); + + int lastMemberSize; + int dummyStride; + getMemberAlignment(*memberList[lastIndex].type, lastMemberSize, dummyStride, + blockType.getQualifier().layoutPacking, + blockType.getQualifier().layoutMatrix == ElmRowMajor); + + return lastOffset + lastMemberSize; +} + +int TIntermediate::computeBufferReferenceTypeSize(const TType& type) +{ + assert(type.isReference()); + int size = getBlockSize(*type.getReferentType()); + + int align = type.getBufferReferenceAlignment(); + + if (align) { + size = (size + align - 1) & ~(align-1); + } + + return size; +} + +} // end namespace glslang diff --git a/dep/glslang/glslang/MachineIndependent/localintermediate.h b/dep/glslang/glslang/MachineIndependent/localintermediate.h new file mode 100644 index 000000000..b6a445fd5 --- /dev/null +++ b/dep/glslang/glslang/MachineIndependent/localintermediate.h @@ -0,0 +1,1030 @@ +// +// Copyright (C) 2002-2005 3Dlabs Inc. Ltd. +// Copyright (C) 2016 LunarG, Inc. +// Copyright (C) 2017 ARM Limited. +// Copyright (C) 2015-2018 Google, Inc. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// +// Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// +// Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// Neither the name of 3Dlabs Inc. Ltd. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// + +#ifndef _LOCAL_INTERMEDIATE_INCLUDED_ +#define _LOCAL_INTERMEDIATE_INCLUDED_ + +#include "../Include/intermediate.h" +#include "../Public/ShaderLang.h" +#include "Versions.h" + +#include +#include +#include +#include +#include + +class TInfoSink; + +namespace glslang { + +struct TMatrixSelector { + int coord1; // stay agnostic about column/row; this is parse order + int coord2; +}; + +typedef int TVectorSelector; + +const int MaxSwizzleSelectors = 4; + +template +class TSwizzleSelectors { +public: + TSwizzleSelectors() : size_(0) { } + + void push_back(selectorType comp) + { + if (size_ < MaxSwizzleSelectors) + components[size_++] = comp; + } + void resize(int s) + { + assert(s <= size_); + size_ = s; + } + int size() const { return size_; } + selectorType operator[](int i) const + { + assert(i < MaxSwizzleSelectors); + return components[i]; + } + +private: + int size_; + selectorType components[MaxSwizzleSelectors]; +}; + +// +// Some helper structures for TIntermediate. Their contents are encapsulated +// by TIntermediate. +// + +// Used for call-graph algorithms for detecting recursion, missing bodies, and dead bodies. +// A "call" is a pair: . +// There can be duplicates. General assumption is the list is small. +struct TCall { + TCall(const TString& pCaller, const TString& pCallee) : caller(pCaller), callee(pCallee) { } + TString caller; + TString callee; + bool visited; + bool currentPath; + bool errorGiven; + int calleeBodyPosition; +}; + +// A generic 1-D range. +struct TRange { + TRange(int start, int last) : start(start), last(last) { } + bool overlap(const TRange& rhs) const + { + return last >= rhs.start && start <= rhs.last; + } + int start; + int last; +}; + +// An IO range is a 3-D rectangle; the set of (location, component, index) triples all lying +// within the same location range, component range, and index value. Locations don't alias unless +// all other dimensions of their range overlap. +struct TIoRange { + TIoRange(TRange location, TRange component, TBasicType basicType, int index) + : location(location), component(component), basicType(basicType), index(index) { } + bool overlap(const TIoRange& rhs) const + { + return location.overlap(rhs.location) && component.overlap(rhs.component) && index == rhs.index; + } + TRange location; + TRange component; + TBasicType basicType; + int index; +}; + +// An offset range is a 2-D rectangle; the set of (binding, offset) pairs all lying +// within the same binding and offset range. +struct TOffsetRange { + TOffsetRange(TRange binding, TRange offset) + : binding(binding), offset(offset) { } + bool overlap(const TOffsetRange& rhs) const + { + return binding.overlap(rhs.binding) && offset.overlap(rhs.offset); + } + TRange binding; + TRange offset; +}; + +#ifndef GLSLANG_WEB +// Things that need to be tracked per xfb buffer. +struct TXfbBuffer { + TXfbBuffer() : stride(TQualifier::layoutXfbStrideEnd), implicitStride(0), contains64BitType(false), + contains32BitType(false), contains16BitType(false) { } + std::vector ranges; // byte offsets that have already been assigned + unsigned int stride; + unsigned int implicitStride; + bool contains64BitType; + bool contains32BitType; + bool contains16BitType; +}; +#endif + +// Track a set of strings describing how the module was processed. +// This includes command line options, transforms, etc., ideally inclusive enough +// to reproduce the steps used to transform the input source to the output. +// E.g., see SPIR-V OpModuleProcessed. +// Each "process" or "transform" uses is expressed in the form: +// process arg0 arg1 arg2 ... +// process arg0 arg1 arg2 ... +// where everything is textual, and there can be zero or more arguments +class TProcesses { +public: + TProcesses() {} + ~TProcesses() {} + + void addProcess(const char* process) + { + processes.push_back(process); + } + void addProcess(const std::string& process) + { + processes.push_back(process); + } + void addArgument(int arg) + { + processes.back().append(" "); + std::string argString = std::to_string(arg); + processes.back().append(argString); + } + void addArgument(const char* arg) + { + processes.back().append(" "); + processes.back().append(arg); + } + void addArgument(const std::string& arg) + { + processes.back().append(" "); + processes.back().append(arg); + } + void addIfNonZero(const char* process, int value) + { + if (value != 0) { + addProcess(process); + addArgument(value); + } + } + + const std::vector& getProcesses() const { return processes; } + +private: + std::vector processes; +}; + +class TSymbolTable; +class TSymbol; +class TVariable; + +// +// Texture and Sampler transformation mode. +// +enum ComputeDerivativeMode { + LayoutDerivativeNone, // default layout as SPV_NV_compute_shader_derivatives not enabled + LayoutDerivativeGroupQuads, // derivative_group_quadsNV + LayoutDerivativeGroupLinear, // derivative_group_linearNV +}; + +class TIdMaps { +public: + TMap& operator[](int i) { return maps[i]; } + const TMap& operator[](int i) const { return maps[i]; } +private: + TMap maps[EsiCount]; +}; + + +// +// Set of helper functions to help parse and build the tree. +// +class TIntermediate { +public: + explicit TIntermediate(EShLanguage l, int v = 0, EProfile p = ENoProfile) : + language(l), + profile(p), version(v), treeRoot(0), + numEntryPoints(0), numErrors(0), numPushConstants(0), recursive(false), + invertY(false), + useStorageBuffer(false), + nanMinMaxClamp(false), + depthReplacing(false) +#ifndef GLSLANG_WEB + , + implicitThisName("@this"), implicitCounterName("@count"), + source(EShSourceNone), + useVulkanMemoryModel(false), + invocations(TQualifier::layoutNotSet), vertices(TQualifier::layoutNotSet), + inputPrimitive(ElgNone), outputPrimitive(ElgNone), + pixelCenterInteger(false), originUpperLeft(false), + vertexSpacing(EvsNone), vertexOrder(EvoNone), interlockOrdering(EioNone), pointMode(false), earlyFragmentTests(false), + postDepthCoverage(false), depthLayout(EldNone), + hlslFunctionality1(false), + blendEquations(0), xfbMode(false), multiStream(false), + layoutOverrideCoverage(false), + geoPassthroughEXT(false), + numShaderRecordBlocks(0), + computeDerivativeMode(LayoutDerivativeNone), + primitives(TQualifier::layoutNotSet), + numTaskNVBlocks(0), + layoutPrimitiveCulling(false), + autoMapBindings(false), + autoMapLocations(false), + flattenUniformArrays(false), + useUnknownFormat(false), + hlslOffsets(false), + hlslIoMapping(false), + useVariablePointers(false), + textureSamplerTransformMode(EShTexSampTransKeep), + needToLegalize(false), + binaryDoubleOutput(false), + usePhysicalStorageBuffer(false), + uniformLocationBase(0) +#endif + { + localSize[0] = 1; + localSize[1] = 1; + localSize[2] = 1; + localSizeNotDefault[0] = false; + localSizeNotDefault[1] = false; + localSizeNotDefault[2] = false; + localSizeSpecId[0] = TQualifier::layoutNotSet; + localSizeSpecId[1] = TQualifier::layoutNotSet; + localSizeSpecId[2] = TQualifier::layoutNotSet; +#ifndef GLSLANG_WEB + xfbBuffers.resize(TQualifier::layoutXfbBufferEnd); + shiftBinding.fill(0); +#endif + } + + void setVersion(int v) { version = v; } + int getVersion() const { return version; } + void setProfile(EProfile p) { profile = p; } + EProfile getProfile() const { return profile; } + void setSpv(const SpvVersion& s) + { + spvVersion = s; + + // client processes + if (spvVersion.vulkan > 0) + processes.addProcess("client vulkan100"); + if (spvVersion.openGl > 0) + processes.addProcess("client opengl100"); + + // target SPV + switch (spvVersion.spv) { + case 0: + break; + case EShTargetSpv_1_0: + break; + case EShTargetSpv_1_1: + processes.addProcess("target-env spirv1.1"); + break; + case EShTargetSpv_1_2: + processes.addProcess("target-env spirv1.2"); + break; + case EShTargetSpv_1_3: + processes.addProcess("target-env spirv1.3"); + break; + case EShTargetSpv_1_4: + processes.addProcess("target-env spirv1.4"); + break; + case EShTargetSpv_1_5: + processes.addProcess("target-env spirv1.5"); + break; + default: + processes.addProcess("target-env spirvUnknown"); + break; + } + + // target-environment processes + switch (spvVersion.vulkan) { + case 0: + break; + case EShTargetVulkan_1_0: + processes.addProcess("target-env vulkan1.0"); + break; + case EShTargetVulkan_1_1: + processes.addProcess("target-env vulkan1.1"); + break; + case EShTargetVulkan_1_2: + processes.addProcess("target-env vulkan1.2"); + break; + default: + processes.addProcess("target-env vulkanUnknown"); + break; + } + if (spvVersion.openGl > 0) + processes.addProcess("target-env opengl"); + } + const SpvVersion& getSpv() const { return spvVersion; } + EShLanguage getStage() const { return language; } + void updateRequestedExtension(const char* extension, TExtensionBehavior behavior) { + if(requestedExtensions.find(extension) != requestedExtensions.end()) { + requestedExtensions[extension] = behavior; + } else { + requestedExtensions.insert(std::make_pair(extension, behavior)); + } + } + + const std::map& getRequestedExtensions() const { return requestedExtensions; } + + void setTreeRoot(TIntermNode* r) { treeRoot = r; } + TIntermNode* getTreeRoot() const { return treeRoot; } + void incrementEntryPointCount() { ++numEntryPoints; } + int getNumEntryPoints() const { return numEntryPoints; } + int getNumErrors() const { return numErrors; } + void addPushConstantCount() { ++numPushConstants; } + void setLimits(const TBuiltInResource& r) { resources = r; } + + bool postProcess(TIntermNode*, EShLanguage); + void removeTree(); + + void setEntryPointName(const char* ep) + { + entryPointName = ep; + processes.addProcess("entry-point"); + processes.addArgument(entryPointName); + } + void setEntryPointMangledName(const char* ep) { entryPointMangledName = ep; } + const std::string& getEntryPointName() const { return entryPointName; } + const std::string& getEntryPointMangledName() const { return entryPointMangledName; } + + void setInvertY(bool invert) + { + invertY = invert; + if (invertY) + processes.addProcess("invert-y"); + } + bool getInvertY() const { return invertY; } + +#ifdef ENABLE_HLSL + void setSource(EShSource s) { source = s; } + EShSource getSource() const { return source; } +#else + void setSource(EShSource s) { assert(s == EShSourceGlsl); } + EShSource getSource() const { return EShSourceGlsl; } +#endif + + bool isRecursive() const { return recursive; } + + TIntermSymbol* addSymbol(const TVariable&); + TIntermSymbol* addSymbol(const TVariable&, const TSourceLoc&); + TIntermSymbol* addSymbol(const TType&, const TSourceLoc&); + TIntermSymbol* addSymbol(const TIntermSymbol&); + TIntermTyped* addConversion(TOperator, const TType&, TIntermTyped*); + std::tuple addConversion(TOperator op, TIntermTyped* node0, TIntermTyped* node1); + TIntermTyped* addUniShapeConversion(TOperator, const TType&, TIntermTyped*); + TIntermTyped* addConversion(TBasicType convertTo, TIntermTyped* node) const; + void addBiShapeConversion(TOperator, TIntermTyped*& lhsNode, TIntermTyped*& rhsNode); + TIntermTyped* addShapeConversion(const TType&, TIntermTyped*); + TIntermTyped* addBinaryMath(TOperator, TIntermTyped* left, TIntermTyped* right, TSourceLoc); + TIntermTyped* addAssign(TOperator op, TIntermTyped* left, TIntermTyped* right, TSourceLoc); + TIntermTyped* addIndex(TOperator op, TIntermTyped* base, TIntermTyped* index, TSourceLoc); + TIntermTyped* addUnaryMath(TOperator, TIntermTyped* child, TSourceLoc); + TIntermTyped* addBuiltInFunctionCall(const TSourceLoc& line, TOperator, bool unary, TIntermNode*, const TType& returnType); + bool canImplicitlyPromote(TBasicType from, TBasicType to, TOperator op = EOpNull) const; + bool isIntegralPromotion(TBasicType from, TBasicType to) const; + bool isFPPromotion(TBasicType from, TBasicType to) const; + bool isIntegralConversion(TBasicType from, TBasicType to) const; + bool isFPConversion(TBasicType from, TBasicType to) const; + bool isFPIntegralConversion(TBasicType from, TBasicType to) const; + TOperator mapTypeToConstructorOp(const TType&) const; + TIntermAggregate* growAggregate(TIntermNode* left, TIntermNode* right); + TIntermAggregate* growAggregate(TIntermNode* left, TIntermNode* right, const TSourceLoc&); + TIntermAggregate* makeAggregate(TIntermNode* node); + TIntermAggregate* makeAggregate(TIntermNode* node, const TSourceLoc&); + TIntermAggregate* makeAggregate(const TSourceLoc&); + TIntermTyped* setAggregateOperator(TIntermNode*, TOperator, const TType& type, TSourceLoc); + bool areAllChildConst(TIntermAggregate* aggrNode); + TIntermSelection* addSelection(TIntermTyped* cond, TIntermNodePair code, const TSourceLoc&); + TIntermTyped* addSelection(TIntermTyped* cond, TIntermTyped* trueBlock, TIntermTyped* falseBlock, const TSourceLoc&); + TIntermTyped* addComma(TIntermTyped* left, TIntermTyped* right, const TSourceLoc&); + TIntermTyped* addMethod(TIntermTyped*, const TType&, const TString*, const TSourceLoc&); + TIntermConstantUnion* addConstantUnion(const TConstUnionArray&, const TType&, const TSourceLoc&, bool literal = false) const; + TIntermConstantUnion* addConstantUnion(signed char, const TSourceLoc&, bool literal = false) const; + TIntermConstantUnion* addConstantUnion(unsigned char, const TSourceLoc&, bool literal = false) const; + TIntermConstantUnion* addConstantUnion(signed short, const TSourceLoc&, bool literal = false) const; + TIntermConstantUnion* addConstantUnion(unsigned short, const TSourceLoc&, bool literal = false) const; + TIntermConstantUnion* addConstantUnion(int, const TSourceLoc&, bool literal = false) const; + TIntermConstantUnion* addConstantUnion(unsigned int, const TSourceLoc&, bool literal = false) const; + TIntermConstantUnion* addConstantUnion(long long, const TSourceLoc&, bool literal = false) const; + TIntermConstantUnion* addConstantUnion(unsigned long long, const TSourceLoc&, bool literal = false) const; + TIntermConstantUnion* addConstantUnion(bool, const TSourceLoc&, bool literal = false) const; + TIntermConstantUnion* addConstantUnion(double, TBasicType, const TSourceLoc&, bool literal = false) const; + TIntermConstantUnion* addConstantUnion(const TString*, const TSourceLoc&, bool literal = false) const; + TIntermTyped* promoteConstantUnion(TBasicType, TIntermConstantUnion*) const; + bool parseConstTree(TIntermNode*, TConstUnionArray, TOperator, const TType&, bool singleConstantParam = false); + TIntermLoop* addLoop(TIntermNode*, TIntermTyped*, TIntermTyped*, bool testFirst, const TSourceLoc&); + TIntermAggregate* addForLoop(TIntermNode*, TIntermNode*, TIntermTyped*, TIntermTyped*, bool testFirst, + const TSourceLoc&, TIntermLoop*&); + TIntermBranch* addBranch(TOperator, const TSourceLoc&); + TIntermBranch* addBranch(TOperator, TIntermTyped*, const TSourceLoc&); + template TIntermTyped* addSwizzle(TSwizzleSelectors&, const TSourceLoc&); + + // Low level functions to add nodes (no conversions or other higher level transformations) + // If a type is provided, the node's type will be set to it. + TIntermBinary* addBinaryNode(TOperator op, TIntermTyped* left, TIntermTyped* right, TSourceLoc) const; + TIntermBinary* addBinaryNode(TOperator op, TIntermTyped* left, TIntermTyped* right, TSourceLoc, const TType&) const; + TIntermUnary* addUnaryNode(TOperator op, TIntermTyped* child, TSourceLoc) const; + TIntermUnary* addUnaryNode(TOperator op, TIntermTyped* child, TSourceLoc, const TType&) const; + + // Constant folding (in Constant.cpp) + TIntermTyped* fold(TIntermAggregate* aggrNode); + TIntermTyped* foldConstructor(TIntermAggregate* aggrNode); + TIntermTyped* foldDereference(TIntermTyped* node, int index, const TSourceLoc&); + TIntermTyped* foldSwizzle(TIntermTyped* node, TSwizzleSelectors& fields, const TSourceLoc&); + + // Tree ops + static const TIntermTyped* findLValueBase(const TIntermTyped*, bool swizzleOkay); + + // Linkage related + void addSymbolLinkageNodes(TIntermAggregate*& linkage, EShLanguage, TSymbolTable&); + void addSymbolLinkageNode(TIntermAggregate*& linkage, const TSymbol&); + + void setUseStorageBuffer() + { + useStorageBuffer = true; + processes.addProcess("use-storage-buffer"); + } + bool usingStorageBuffer() const { return useStorageBuffer; } + void setDepthReplacing() { depthReplacing = true; } + bool isDepthReplacing() const { return depthReplacing; } + bool setLocalSize(int dim, int size) + { + if (localSizeNotDefault[dim]) + return size == localSize[dim]; + localSizeNotDefault[dim] = true; + localSize[dim] = size; + return true; + } + unsigned int getLocalSize(int dim) const { return localSize[dim]; } + bool setLocalSizeSpecId(int dim, int id) + { + if (localSizeSpecId[dim] != TQualifier::layoutNotSet) + return id == localSizeSpecId[dim]; + localSizeSpecId[dim] = id; + return true; + } + int getLocalSizeSpecId(int dim) const { return localSizeSpecId[dim]; } +#ifdef GLSLANG_WEB + void output(TInfoSink&, bool tree) { } + + bool isEsProfile() const { return false; } + bool getXfbMode() const { return false; } + bool isMultiStream() const { return false; } + TLayoutGeometry getOutputPrimitive() const { return ElgNone; } + bool getPostDepthCoverage() const { return false; } + bool getEarlyFragmentTests() const { return false; } + TLayoutDepth getDepth() const { return EldNone; } + bool getPixelCenterInteger() const { return false; } + void setOriginUpperLeft() { } + bool getOriginUpperLeft() const { return true; } + TInterlockOrdering getInterlockOrdering() const { return EioNone; } + + bool getAutoMapBindings() const { return false; } + bool getAutoMapLocations() const { return false; } + int getNumPushConstants() const { return 0; } + void addShaderRecordCount() { } + void addTaskNVCount() { } + void setUseVulkanMemoryModel() { } + bool usingVulkanMemoryModel() const { return false; } + bool usingPhysicalStorageBuffer() const { return false; } + bool usingVariablePointers() const { return false; } + unsigned getXfbStride(int buffer) const { return 0; } + bool hasLayoutDerivativeModeNone() const { return false; } + ComputeDerivativeMode getLayoutDerivativeModeNone() const { return LayoutDerivativeNone; } +#else + void output(TInfoSink&, bool tree); + + bool isEsProfile() const { return profile == EEsProfile; } + + void setShiftBinding(TResourceType res, unsigned int shift) + { + shiftBinding[res] = shift; + + const char* name = getResourceName(res); + if (name != nullptr) + processes.addIfNonZero(name, shift); + } + + unsigned int getShiftBinding(TResourceType res) const { return shiftBinding[res]; } + + void setShiftBindingForSet(TResourceType res, unsigned int shift, unsigned int set) + { + if (shift == 0) // ignore if there's no shift: it's a no-op. + return; + + shiftBindingForSet[res][set] = shift; + + const char* name = getResourceName(res); + if (name != nullptr) { + processes.addProcess(name); + processes.addArgument(shift); + processes.addArgument(set); + } + } + + int getShiftBindingForSet(TResourceType res, unsigned int set) const + { + const auto shift = shiftBindingForSet[res].find(set); + return shift == shiftBindingForSet[res].end() ? -1 : shift->second; + } + bool hasShiftBindingForSet(TResourceType res) const { return !shiftBindingForSet[res].empty(); } + + void setResourceSetBinding(const std::vector& shift) + { + resourceSetBinding = shift; + if (shift.size() > 0) { + processes.addProcess("resource-set-binding"); + for (int s = 0; s < (int)shift.size(); ++s) + processes.addArgument(shift[s]); + } + } + const std::vector& getResourceSetBinding() const { return resourceSetBinding; } + void setAutoMapBindings(bool map) + { + autoMapBindings = map; + if (autoMapBindings) + processes.addProcess("auto-map-bindings"); + } + bool getAutoMapBindings() const { return autoMapBindings; } + void setAutoMapLocations(bool map) + { + autoMapLocations = map; + if (autoMapLocations) + processes.addProcess("auto-map-locations"); + } + bool getAutoMapLocations() const { return autoMapLocations; } + +#ifdef ENABLE_HLSL + void setFlattenUniformArrays(bool flatten) + { + flattenUniformArrays = flatten; + if (flattenUniformArrays) + processes.addProcess("flatten-uniform-arrays"); + } + bool getFlattenUniformArrays() const { return flattenUniformArrays; } +#endif + void setNoStorageFormat(bool b) + { + useUnknownFormat = b; + if (useUnknownFormat) + processes.addProcess("no-storage-format"); + } + bool getNoStorageFormat() const { return useUnknownFormat; } + void setUseVulkanMemoryModel() + { + useVulkanMemoryModel = true; + processes.addProcess("use-vulkan-memory-model"); + } + bool usingVulkanMemoryModel() const { return useVulkanMemoryModel; } + void setUsePhysicalStorageBuffer() + { + usePhysicalStorageBuffer = true; + } + bool usingPhysicalStorageBuffer() const { return usePhysicalStorageBuffer; } + void setUseVariablePointers() + { + useVariablePointers = true; + processes.addProcess("use-variable-pointers"); + } + bool usingVariablePointers() const { return useVariablePointers; } + +#ifdef ENABLE_HLSL + template T addCounterBufferName(const T& name) const { return name + implicitCounterName; } + bool hasCounterBufferName(const TString& name) const { + size_t len = strlen(implicitCounterName); + return name.size() > len && + name.compare(name.size() - len, len, implicitCounterName) == 0; + } +#endif + + void setTextureSamplerTransformMode(EShTextureSamplerTransformMode mode) { textureSamplerTransformMode = mode; } + int getNumPushConstants() const { return numPushConstants; } + void addShaderRecordCount() { ++numShaderRecordBlocks; } + void addTaskNVCount() { ++numTaskNVBlocks; } + + bool setInvocations(int i) + { + if (invocations != TQualifier::layoutNotSet) + return invocations == i; + invocations = i; + return true; + } + int getInvocations() const { return invocations; } + bool setVertices(int m) + { + if (vertices != TQualifier::layoutNotSet) + return vertices == m; + vertices = m; + return true; + } + int getVertices() const { return vertices; } + bool setInputPrimitive(TLayoutGeometry p) + { + if (inputPrimitive != ElgNone) + return inputPrimitive == p; + inputPrimitive = p; + return true; + } + TLayoutGeometry getInputPrimitive() const { return inputPrimitive; } + bool setVertexSpacing(TVertexSpacing s) + { + if (vertexSpacing != EvsNone) + return vertexSpacing == s; + vertexSpacing = s; + return true; + } + TVertexSpacing getVertexSpacing() const { return vertexSpacing; } + bool setVertexOrder(TVertexOrder o) + { + if (vertexOrder != EvoNone) + return vertexOrder == o; + vertexOrder = o; + return true; + } + TVertexOrder getVertexOrder() const { return vertexOrder; } + void setPointMode() { pointMode = true; } + bool getPointMode() const { return pointMode; } + + bool setInterlockOrdering(TInterlockOrdering o) + { + if (interlockOrdering != EioNone) + return interlockOrdering == o; + interlockOrdering = o; + return true; + } + TInterlockOrdering getInterlockOrdering() const { return interlockOrdering; } + + void setXfbMode() { xfbMode = true; } + bool getXfbMode() const { return xfbMode; } + void setMultiStream() { multiStream = true; } + bool isMultiStream() const { return multiStream; } + bool setOutputPrimitive(TLayoutGeometry p) + { + if (outputPrimitive != ElgNone) + return outputPrimitive == p; + outputPrimitive = p; + return true; + } + TLayoutGeometry getOutputPrimitive() const { return outputPrimitive; } + void setPostDepthCoverage() { postDepthCoverage = true; } + bool getPostDepthCoverage() const { return postDepthCoverage; } + void setEarlyFragmentTests() { earlyFragmentTests = true; } + bool getEarlyFragmentTests() const { return earlyFragmentTests; } + bool setDepth(TLayoutDepth d) + { + if (depthLayout != EldNone) + return depthLayout == d; + depthLayout = d; + return true; + } + TLayoutDepth getDepth() const { return depthLayout; } + void setOriginUpperLeft() { originUpperLeft = true; } + bool getOriginUpperLeft() const { return originUpperLeft; } + void setPixelCenterInteger() { pixelCenterInteger = true; } + bool getPixelCenterInteger() const { return pixelCenterInteger; } + void addBlendEquation(TBlendEquationShift b) { blendEquations |= (1 << b); } + unsigned int getBlendEquations() const { return blendEquations; } + bool setXfbBufferStride(int buffer, unsigned stride) + { + if (xfbBuffers[buffer].stride != TQualifier::layoutXfbStrideEnd) + return xfbBuffers[buffer].stride == stride; + xfbBuffers[buffer].stride = stride; + return true; + } + unsigned getXfbStride(int buffer) const { return xfbBuffers[buffer].stride; } + int addXfbBufferOffset(const TType&); + unsigned int computeTypeXfbSize(const TType&, bool& contains64BitType, bool& contains32BitType, bool& contains16BitType) const; + unsigned int computeTypeXfbSize(const TType&, bool& contains64BitType) const; + void setLayoutOverrideCoverage() { layoutOverrideCoverage = true; } + bool getLayoutOverrideCoverage() const { return layoutOverrideCoverage; } + void setGeoPassthroughEXT() { geoPassthroughEXT = true; } + bool getGeoPassthroughEXT() const { return geoPassthroughEXT; } + void setLayoutDerivativeMode(ComputeDerivativeMode mode) { computeDerivativeMode = mode; } + bool hasLayoutDerivativeModeNone() const { return computeDerivativeMode != LayoutDerivativeNone; } + ComputeDerivativeMode getLayoutDerivativeModeNone() const { return computeDerivativeMode; } + void setLayoutPrimitiveCulling() { layoutPrimitiveCulling = true; } + bool getLayoutPrimitiveCulling() const { return layoutPrimitiveCulling; } + bool setPrimitives(int m) + { + if (primitives != TQualifier::layoutNotSet) + return primitives == m; + primitives = m; + return true; + } + int getPrimitives() const { return primitives; } + const char* addSemanticName(const TString& name) + { + return semanticNameSet.insert(name).first->c_str(); + } + void addUniformLocationOverride(const char* nameStr, int location) + { + std::string name = nameStr; + uniformLocationOverrides[name] = location; + } + + int getUniformLocationOverride(const char* nameStr) const + { + std::string name = nameStr; + auto pos = uniformLocationOverrides.find(name); + if (pos == uniformLocationOverrides.end()) + return -1; + else + return pos->second; + } + + void setUniformLocationBase(int base) { uniformLocationBase = base; } + int getUniformLocationBase() const { return uniformLocationBase; } + + void setNeedsLegalization() { needToLegalize = true; } + bool needsLegalization() const { return needToLegalize; } + + void setBinaryDoubleOutput() { binaryDoubleOutput = true; } + bool getBinaryDoubleOutput() { return binaryDoubleOutput; } +#endif // GLSLANG_WEB + +#ifdef ENABLE_HLSL + void setHlslFunctionality1() { hlslFunctionality1 = true; } + bool getHlslFunctionality1() const { return hlslFunctionality1; } + void setHlslOffsets() + { + hlslOffsets = true; + if (hlslOffsets) + processes.addProcess("hlsl-offsets"); + } + bool usingHlslOffsets() const { return hlslOffsets; } + void setHlslIoMapping(bool b) + { + hlslIoMapping = b; + if (hlslIoMapping) + processes.addProcess("hlsl-iomap"); + } + bool usingHlslIoMapping() { return hlslIoMapping; } +#else + bool getHlslFunctionality1() const { return false; } + bool usingHlslOffsets() const { return false; } + bool usingHlslIoMapping() { return false; } +#endif + + void addToCallGraph(TInfoSink&, const TString& caller, const TString& callee); + void merge(TInfoSink&, TIntermediate&); + void finalCheck(TInfoSink&, bool keepUncalled); + + bool buildConvertOp(TBasicType dst, TBasicType src, TOperator& convertOp) const; + TIntermTyped* createConversion(TBasicType convertTo, TIntermTyped* node) const; + + void addIoAccessed(const TString& name) { ioAccessed.insert(name); } + bool inIoAccessed(const TString& name) const { return ioAccessed.find(name) != ioAccessed.end(); } + + int addUsedLocation(const TQualifier&, const TType&, bool& typeCollision); + int checkLocationRange(int set, const TIoRange& range, const TType&, bool& typeCollision); + int addUsedOffsets(int binding, int offset, int numOffsets); + bool addUsedConstantId(int id); + static int computeTypeLocationSize(const TType&, EShLanguage); + static int computeTypeUniformLocationSize(const TType&); + + static int getBaseAlignmentScalar(const TType&, int& size); + static int getBaseAlignment(const TType&, int& size, int& stride, TLayoutPacking layoutPacking, bool rowMajor); + static int getScalarAlignment(const TType&, int& size, int& stride, bool rowMajor); + static int getMemberAlignment(const TType&, int& size, int& stride, TLayoutPacking layoutPacking, bool rowMajor); + static bool improperStraddle(const TType& type, int size, int offset); + static void updateOffset(const TType& parentType, const TType& memberType, int& offset, int& memberSize); + static int getOffset(const TType& type, int index); + static int getBlockSize(const TType& blockType); + static int computeBufferReferenceTypeSize(const TType&); + bool promote(TIntermOperator*); + void setNanMinMaxClamp(bool setting) { nanMinMaxClamp = setting; } + bool getNanMinMaxClamp() const { return nanMinMaxClamp; } + + void setSourceFile(const char* file) { if (file != nullptr) sourceFile = file; } + const std::string& getSourceFile() const { return sourceFile; } + void addSourceText(const char* text, size_t len) { sourceText.append(text, len); } + const std::string& getSourceText() const { return sourceText; } + const std::map& getIncludeText() const { return includeText; } + void addIncludeText(const char* name, const char* text, size_t len) { includeText[name].assign(text,len); } + void addProcesses(const std::vector& p) + { + for (int i = 0; i < (int)p.size(); ++i) + processes.addProcess(p[i]); + } + void addProcess(const std::string& process) { processes.addProcess(process); } + void addProcessArgument(const std::string& arg) { processes.addArgument(arg); } + const std::vector& getProcesses() const { return processes.getProcesses(); } + + // Certain explicit conversions are allowed conditionally +#ifdef GLSLANG_WEB + bool getArithemeticInt8Enabled() const { return false; } + bool getArithemeticInt16Enabled() const { return false; } + bool getArithemeticFloat16Enabled() const { return false; } +#else + bool getArithemeticInt8Enabled() const { + return extensionRequested(E_GL_EXT_shader_explicit_arithmetic_types) || + extensionRequested(E_GL_EXT_shader_explicit_arithmetic_types_int8); + } + bool getArithemeticInt16Enabled() const { + return extensionRequested(E_GL_EXT_shader_explicit_arithmetic_types) || + extensionRequested(E_GL_AMD_gpu_shader_int16) || + extensionRequested(E_GL_EXT_shader_explicit_arithmetic_types_int16); + } + + bool getArithemeticFloat16Enabled() const { + return extensionRequested(E_GL_EXT_shader_explicit_arithmetic_types) || + extensionRequested(E_GL_AMD_gpu_shader_half_float) || + extensionRequested(E_GL_EXT_shader_explicit_arithmetic_types_float16); + } +#endif + +protected: + TIntermSymbol* addSymbol(int Id, const TString&, const TType&, const TConstUnionArray&, TIntermTyped* subtree, const TSourceLoc&); + void error(TInfoSink& infoSink, const char*); + void warn(TInfoSink& infoSink, const char*); + void mergeCallGraphs(TInfoSink&, TIntermediate&); + void mergeModes(TInfoSink&, TIntermediate&); + void mergeTrees(TInfoSink&, TIntermediate&); + void seedIdMap(TIdMaps& idMaps, int& maxId); + void remapIds(const TIdMaps& idMaps, int idShift, TIntermediate&); + void mergeBodies(TInfoSink&, TIntermSequence& globals, const TIntermSequence& unitGlobals); + void mergeLinkerObjects(TInfoSink&, TIntermSequence& linkerObjects, const TIntermSequence& unitLinkerObjects); + void mergeImplicitArraySizes(TType&, const TType&); + void mergeErrorCheck(TInfoSink&, const TIntermSymbol&, const TIntermSymbol&, bool crossStage); + void checkCallGraphCycles(TInfoSink&); + void checkCallGraphBodies(TInfoSink&, bool keepUncalled); + void inOutLocationCheck(TInfoSink&); + TIntermAggregate* findLinkerObjects() const; + bool userOutputUsed() const; + bool isSpecializationOperation(const TIntermOperator&) const; + bool isNonuniformPropagating(TOperator) const; + bool promoteUnary(TIntermUnary&); + bool promoteBinary(TIntermBinary&); + void addSymbolLinkageNode(TIntermAggregate*& linkage, TSymbolTable&, const TString&); + bool promoteAggregate(TIntermAggregate&); + void pushSelector(TIntermSequence&, const TVectorSelector&, const TSourceLoc&); + void pushSelector(TIntermSequence&, const TMatrixSelector&, const TSourceLoc&); + bool specConstantPropagates(const TIntermTyped&, const TIntermTyped&); + void performTextureUpgradeAndSamplerRemovalTransformation(TIntermNode* root); + bool isConversionAllowed(TOperator op, TIntermTyped* node) const; + std::tuple getConversionDestinatonType(TBasicType type0, TBasicType type1, TOperator op) const; + + // JohnK: I think this function should go away. + // This data structure is just a log to pass on to back ends. + // Versioning and extensions are handled in Version.cpp, with a rich + // set of functions for querying stages, versions, extension enable/disabled, etc. +#ifdef GLSLANG_WEB + bool extensionRequested(const char *extension) const { return false; } +#else + bool extensionRequested(const char *extension) const { + auto it = requestedExtensions.find(extension); + if (it != requestedExtensions.end()) { + return (it->second == EBhDisable) ? false : true; + } + return false; + } +#endif + + static const char* getResourceName(TResourceType); + + const EShLanguage language; // stage, known at construction time + std::string entryPointName; + std::string entryPointMangledName; + typedef std::list TGraph; + TGraph callGraph; + + EProfile profile; // source profile + int version; // source version + SpvVersion spvVersion; + TIntermNode* treeRoot; + std::map requestedExtensions; // cumulation of all enabled or required extensions; not connected to what subset of the shader used them + TBuiltInResource resources; + int numEntryPoints; + int numErrors; + int numPushConstants; + bool recursive; + bool invertY; + bool useStorageBuffer; + bool nanMinMaxClamp; // true if desiring min/max/clamp to favor non-NaN over NaN + bool depthReplacing; + int localSize[3]; + bool localSizeNotDefault[3]; + int localSizeSpecId[3]; +#ifndef GLSLANG_WEB +public: + const char* const implicitThisName; + const char* const implicitCounterName; +protected: + EShSource source; // source language, known a bit later + bool useVulkanMemoryModel; + int invocations; + int vertices; + TLayoutGeometry inputPrimitive; + TLayoutGeometry outputPrimitive; + bool pixelCenterInteger; + bool originUpperLeft; + TVertexSpacing vertexSpacing; + TVertexOrder vertexOrder; + TInterlockOrdering interlockOrdering; + bool pointMode; + bool earlyFragmentTests; + bool postDepthCoverage; + TLayoutDepth depthLayout; + bool hlslFunctionality1; + int blendEquations; // an 'or'ing of masks of shifts of TBlendEquationShift + bool xfbMode; + std::vector xfbBuffers; // all the data we need to track per xfb buffer + bool multiStream; + bool layoutOverrideCoverage; + bool geoPassthroughEXT; + int numShaderRecordBlocks; + ComputeDerivativeMode computeDerivativeMode; + int primitives; + int numTaskNVBlocks; + bool layoutPrimitiveCulling; + + // Base shift values + std::array shiftBinding; + + // Per-descriptor-set shift values + std::array, EResCount> shiftBindingForSet; + + std::vector resourceSetBinding; + bool autoMapBindings; + bool autoMapLocations; + bool flattenUniformArrays; + bool useUnknownFormat; + bool hlslOffsets; + bool hlslIoMapping; + bool useVariablePointers; + + std::set semanticNameSet; + + EShTextureSamplerTransformMode textureSamplerTransformMode; + + bool needToLegalize; + bool binaryDoubleOutput; + bool usePhysicalStorageBuffer; + + std::unordered_map uniformLocationOverrides; + int uniformLocationBase; +#endif + + std::unordered_set usedConstantId; // specialization constant ids used + std::vector usedAtomics; // sets of bindings used by atomic counters + std::vector usedIo[4]; // sets of used locations, one for each of in, out, uniform, and buffers + // set of names of statically read/written I/O that might need extra checking + std::set ioAccessed; + // source code of shader, useful as part of debug information + std::string sourceFile; + std::string sourceText; + + // Included text. First string is a name, second is the included text + std::map includeText; + + // for OpModuleProcessed, or equivalent + TProcesses processes; + +private: + void operator=(TIntermediate&); // prevent assignments +}; + +} // end namespace glslang + +#endif // _LOCAL_INTERMEDIATE_INCLUDED_ diff --git a/dep/glslang/glslang/MachineIndependent/parseConst.cpp b/dep/glslang/glslang/MachineIndependent/parseConst.cpp new file mode 100644 index 000000000..7c04743ba --- /dev/null +++ b/dep/glslang/glslang/MachineIndependent/parseConst.cpp @@ -0,0 +1,214 @@ +// +// Copyright (C) 2002-2005 3Dlabs Inc. Ltd. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// +// Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// +// Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// Neither the name of 3Dlabs Inc. Ltd. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// + +// +// Traverse a tree of constants to create a single folded constant. +// It should only be used when the whole tree is known to be constant. +// + +#include "ParseHelper.h" + +namespace glslang { + +class TConstTraverser : public TIntermTraverser { +public: + TConstTraverser(const TConstUnionArray& cUnion, bool singleConstParam, TOperator constructType, const TType& t) + : unionArray(cUnion), type(t), + constructorType(constructType), singleConstantParam(singleConstParam), error(false), isMatrix(false), + matrixCols(0), matrixRows(0) { index = 0; tOp = EOpNull; } + + virtual void visitConstantUnion(TIntermConstantUnion* node); + virtual bool visitAggregate(TVisit, TIntermAggregate* node); + + int index; + TConstUnionArray unionArray; + TOperator tOp; + const TType& type; + TOperator constructorType; + bool singleConstantParam; + bool error; + int size; // size of the constructor ( 4 for vec4) + bool isMatrix; + int matrixCols; + int matrixRows; + +protected: + TConstTraverser(TConstTraverser&); + TConstTraverser& operator=(TConstTraverser&); +}; + +bool TConstTraverser::visitAggregate(TVisit /* visit */, TIntermAggregate* node) +{ + if (! node->isConstructor() && node->getOp() != EOpComma) { + error = true; + + return false; + } + + bool flag = node->getSequence().size() == 1 && node->getSequence()[0]->getAsTyped()->getAsConstantUnion(); + if (flag) { + singleConstantParam = true; + constructorType = node->getOp(); + size = node->getType().computeNumComponents(); + + if (node->getType().isMatrix()) { + isMatrix = true; + matrixCols = node->getType().getMatrixCols(); + matrixRows = node->getType().getMatrixRows(); + } + } + + for (TIntermSequence::iterator p = node->getSequence().begin(); + p != node->getSequence().end(); p++) { + + if (node->getOp() == EOpComma) + index = 0; + + (*p)->traverse(this); + } + if (flag) + { + singleConstantParam = false; + constructorType = EOpNull; + size = 0; + isMatrix = false; + matrixCols = 0; + matrixRows = 0; + } + + return false; +} + +void TConstTraverser::visitConstantUnion(TIntermConstantUnion* node) +{ + TConstUnionArray leftUnionArray(unionArray); + int instanceSize = type.computeNumComponents(); + + if (index >= instanceSize) + return; + + if (! singleConstantParam) { + int rightUnionSize = node->getType().computeNumComponents(); + + const TConstUnionArray& rightUnionArray = node->getConstArray(); + for (int i = 0; i < rightUnionSize; i++) { + if (index >= instanceSize) + return; + leftUnionArray[index] = rightUnionArray[i]; + + index++; + } + } else { + int endIndex = index + size; + const TConstUnionArray& rightUnionArray = node->getConstArray(); + if (! isMatrix) { + int count = 0; + int nodeComps = node->getType().computeNumComponents(); + for (int i = index; i < endIndex; i++) { + if (i >= instanceSize) + return; + + leftUnionArray[i] = rightUnionArray[count]; + + (index)++; + + if (nodeComps > 1) + count++; + } + } else { + // constructing a matrix, but from what? + if (node->isMatrix()) { + // Matrix from a matrix; this has the outer matrix, node is the argument matrix. + // Traverse the outer, potentially bigger matrix, fill in missing pieces with the + // identity matrix. + for (int c = 0; c < matrixCols; ++c) { + for (int r = 0; r < matrixRows; ++r) { + int targetOffset = index + c * matrixRows + r; + if (r < node->getType().getMatrixRows() && c < node->getType().getMatrixCols()) { + int srcOffset = c * node->getType().getMatrixRows() + r; + leftUnionArray[targetOffset] = rightUnionArray[srcOffset]; + } else if (r == c) + leftUnionArray[targetOffset].setDConst(1.0); + else + leftUnionArray[targetOffset].setDConst(0.0); + } + } + } else { + // matrix from vector or scalar + int count = 0; + const int startIndex = index; + int nodeComps = node->getType().computeNumComponents(); + for (int i = startIndex; i < endIndex; i++) { + if (i >= instanceSize) + return; + if (nodeComps == 1) { + // If there is a single scalar parameter to a matrix + // constructor, it is used to initialize all the + // components on the matrix's diagonal, with the + // remaining components initialized to 0.0. + if (i == startIndex || (i - startIndex) % (matrixRows + 1) == 0 ) + leftUnionArray[i] = rightUnionArray[count]; + else + leftUnionArray[i].setDConst(0.0); + } else { + // construct the matrix in column-major order, from + // the components provided, in order + leftUnionArray[i] = rightUnionArray[count]; + } + + index++; + + if (nodeComps > 1) + count++; + } + } + } + } +} + +bool TIntermediate::parseConstTree(TIntermNode* root, TConstUnionArray unionArray, TOperator constructorType, const TType& t, bool singleConstantParam) +{ + if (root == 0) + return false; + + TConstTraverser it(unionArray, singleConstantParam, constructorType, t); + + root->traverse(&it); + if (it.error) + return true; + else + return false; +} + +} // end namespace glslang diff --git a/dep/glslang/glslang/MachineIndependent/parseVersions.h b/dep/glslang/glslang/MachineIndependent/parseVersions.h new file mode 100644 index 000000000..aa1964fc2 --- /dev/null +++ b/dep/glslang/glslang/MachineIndependent/parseVersions.h @@ -0,0 +1,236 @@ +// +// Copyright (C) 2015-2018 Google, Inc. +// Copyright (C) 2017 ARM Limited. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// +// Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// +// Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// Neither the name of 3Dlabs Inc. Ltd. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// + +// This is implemented in Versions.cpp + +#ifndef _PARSE_VERSIONS_INCLUDED_ +#define _PARSE_VERSIONS_INCLUDED_ + +#include "../Public/ShaderLang.h" +#include "../Include/InfoSink.h" +#include "Scan.h" + +#include + +namespace glslang { + +// +// Base class for parse helpers. +// This just has version-related information and checking. +// This class should be sufficient for preprocessing. +// +class TParseVersions { +public: + TParseVersions(TIntermediate& interm, int version, EProfile profile, + const SpvVersion& spvVersion, EShLanguage language, TInfoSink& infoSink, + bool forwardCompatible, EShMessages messages) + : +#ifndef GLSLANG_WEB + forwardCompatible(forwardCompatible), + profile(profile), +#endif + infoSink(infoSink), version(version), + language(language), + spvVersion(spvVersion), + intermediate(interm), messages(messages), numErrors(0), currentScanner(0) { } + virtual ~TParseVersions() { } + void requireStage(const TSourceLoc&, EShLanguageMask, const char* featureDesc); + void requireStage(const TSourceLoc&, EShLanguage, const char* featureDesc); +#ifdef GLSLANG_WEB + const EProfile profile = EEsProfile; + bool isEsProfile() const { return true; } + void requireProfile(const TSourceLoc& loc, int profileMask, const char* featureDesc) + { + if (! (EEsProfile & profileMask)) + error(loc, "not supported with this profile:", featureDesc, ProfileName(profile)); + } + void profileRequires(const TSourceLoc& loc, int profileMask, int minVersion, int numExtensions, + const char* const extensions[], const char* featureDesc) + { + if ((EEsProfile & profileMask) && (minVersion == 0 || version < minVersion)) + error(loc, "not supported for this version or the enabled extensions", featureDesc, ""); + } + void profileRequires(const TSourceLoc& loc, int profileMask, int minVersion, const char* extension, + const char* featureDesc) + { + profileRequires(loc, profileMask, minVersion, extension ? 1 : 0, &extension, featureDesc); + } + void initializeExtensionBehavior() { } + void checkDeprecated(const TSourceLoc&, int queryProfiles, int depVersion, const char* featureDesc) { } + void requireNotRemoved(const TSourceLoc&, int queryProfiles, int removedVersion, const char* featureDesc) { } + void requireExtensions(const TSourceLoc&, int numExtensions, const char* const extensions[], + const char* featureDesc) { } + void ppRequireExtensions(const TSourceLoc&, int numExtensions, const char* const extensions[], + const char* featureDesc) { } + TExtensionBehavior getExtensionBehavior(const char*) { return EBhMissing; } + bool extensionTurnedOn(const char* const extension) { return false; } + bool extensionsTurnedOn(int numExtensions, const char* const extensions[]) { return false; } + void updateExtensionBehavior(int line, const char* const extension, const char* behavior) { } + void updateExtensionBehavior(const char* const extension, TExtensionBehavior) { } + void checkExtensionStage(const TSourceLoc&, const char* const extension) { } + void fullIntegerCheck(const TSourceLoc&, const char* op) { } + void doubleCheck(const TSourceLoc&, const char* op) { } + bool float16Arithmetic() { return false; } + void requireFloat16Arithmetic(const TSourceLoc& loc, const char* op, const char* featureDesc) { } + bool int16Arithmetic() { return false; } + void requireInt16Arithmetic(const TSourceLoc& loc, const char* op, const char* featureDesc) { } + bool int8Arithmetic() { return false; } + void requireInt8Arithmetic(const TSourceLoc& loc, const char* op, const char* featureDesc) { } + void int64Check(const TSourceLoc&, const char* op, bool builtIn = false) { } + void explicitFloat32Check(const TSourceLoc&, const char* op, bool builtIn = false) { } + void explicitFloat64Check(const TSourceLoc&, const char* op, bool builtIn = false) { } + bool relaxedErrors() const { return false; } + bool suppressWarnings() const { return true; } + bool isForwardCompatible() const { return false; } +#else + bool forwardCompatible; // true if errors are to be given for use of deprecated features + EProfile profile; // the declared profile in the shader (core by default) + bool isEsProfile() const { return profile == EEsProfile; } + void requireProfile(const TSourceLoc& loc, int profileMask, const char* featureDesc); + void profileRequires(const TSourceLoc& loc, int profileMask, int minVersion, int numExtensions, + const char* const extensions[], const char* featureDesc); + void profileRequires(const TSourceLoc& loc, int profileMask, int minVersion, const char* extension, + const char* featureDesc); + virtual void initializeExtensionBehavior(); + virtual void checkDeprecated(const TSourceLoc&, int queryProfiles, int depVersion, const char* featureDesc); + virtual void requireNotRemoved(const TSourceLoc&, int queryProfiles, int removedVersion, const char* featureDesc); + virtual void requireExtensions(const TSourceLoc&, int numExtensions, const char* const extensions[], + const char* featureDesc); + virtual void ppRequireExtensions(const TSourceLoc&, int numExtensions, const char* const extensions[], + const char* featureDesc); + virtual TExtensionBehavior getExtensionBehavior(const char*); + virtual bool extensionTurnedOn(const char* const extension); + virtual bool extensionsTurnedOn(int numExtensions, const char* const extensions[]); + virtual void updateExtensionBehavior(int line, const char* const extension, const char* behavior); + virtual void updateExtensionBehavior(const char* const extension, TExtensionBehavior); + virtual bool checkExtensionsRequested(const TSourceLoc&, int numExtensions, const char* const extensions[], + const char* featureDesc); + virtual void checkExtensionStage(const TSourceLoc&, const char* const extension); + virtual void fullIntegerCheck(const TSourceLoc&, const char* op); + + virtual void unimplemented(const TSourceLoc&, const char* featureDesc); + virtual void doubleCheck(const TSourceLoc&, const char* op); + virtual void float16Check(const TSourceLoc&, const char* op, bool builtIn = false); + virtual void float16ScalarVectorCheck(const TSourceLoc&, const char* op, bool builtIn = false); + virtual bool float16Arithmetic(); + virtual void requireFloat16Arithmetic(const TSourceLoc& loc, const char* op, const char* featureDesc); + virtual void int16ScalarVectorCheck(const TSourceLoc&, const char* op, bool builtIn = false); + virtual bool int16Arithmetic(); + virtual void requireInt16Arithmetic(const TSourceLoc& loc, const char* op, const char* featureDesc); + virtual void int8ScalarVectorCheck(const TSourceLoc&, const char* op, bool builtIn = false); + virtual bool int8Arithmetic(); + virtual void requireInt8Arithmetic(const TSourceLoc& loc, const char* op, const char* featureDesc); + virtual void float16OpaqueCheck(const TSourceLoc&, const char* op, bool builtIn = false); + virtual void int64Check(const TSourceLoc&, const char* op, bool builtIn = false); + virtual void explicitInt8Check(const TSourceLoc&, const char* op, bool builtIn = false); + virtual void explicitInt16Check(const TSourceLoc&, const char* op, bool builtIn = false); + virtual void explicitInt32Check(const TSourceLoc&, const char* op, bool builtIn = false); + virtual void explicitFloat32Check(const TSourceLoc&, const char* op, bool builtIn = false); + virtual void explicitFloat64Check(const TSourceLoc&, const char* op, bool builtIn = false); + virtual void fcoopmatCheck(const TSourceLoc&, const char* op, bool builtIn = false); + virtual void intcoopmatCheck(const TSourceLoc&, const char *op, bool builtIn = false); + bool relaxedErrors() const { return (messages & EShMsgRelaxedErrors) != 0; } + bool suppressWarnings() const { return (messages & EShMsgSuppressWarnings) != 0; } + bool isForwardCompatible() const { return forwardCompatible; } +#endif // GLSLANG_WEB + virtual void spvRemoved(const TSourceLoc&, const char* op); + virtual void vulkanRemoved(const TSourceLoc&, const char* op); + virtual void requireVulkan(const TSourceLoc&, const char* op); + virtual void requireSpv(const TSourceLoc&, const char* op); + + +#if defined(GLSLANG_WEB) && !defined(GLSLANG_WEB_DEVEL) + void C_DECL error(const TSourceLoc&, const char* szReason, const char* szToken, + const char* szExtraInfoFormat, ...) { addError(); } + void C_DECL warn(const TSourceLoc&, const char* szReason, const char* szToken, + const char* szExtraInfoFormat, ...) { } + void C_DECL ppError(const TSourceLoc&, const char* szReason, const char* szToken, + const char* szExtraInfoFormat, ...) { addError(); } + void C_DECL ppWarn(const TSourceLoc&, const char* szReason, const char* szToken, + const char* szExtraInfoFormat, ...) { } +#else + virtual void C_DECL error(const TSourceLoc&, const char* szReason, const char* szToken, + const char* szExtraInfoFormat, ...) = 0; + virtual void C_DECL warn(const TSourceLoc&, const char* szReason, const char* szToken, + const char* szExtraInfoFormat, ...) = 0; + virtual void C_DECL ppError(const TSourceLoc&, const char* szReason, const char* szToken, + const char* szExtraInfoFormat, ...) = 0; + virtual void C_DECL ppWarn(const TSourceLoc&, const char* szReason, const char* szToken, + const char* szExtraInfoFormat, ...) = 0; +#endif + + void addError() { ++numErrors; } + int getNumErrors() const { return numErrors; } + + void setScanner(TInputScanner* scanner) { currentScanner = scanner; } + TInputScanner* getScanner() const { return currentScanner; } + const TSourceLoc& getCurrentLoc() const { return currentScanner->getSourceLoc(); } + void setCurrentLine(int line) { currentScanner->setLine(line); } + void setCurrentColumn(int col) { currentScanner->setColumn(col); } + void setCurrentSourceName(const char* name) { currentScanner->setFile(name); } + void setCurrentString(int string) { currentScanner->setString(string); } + + void getPreamble(std::string&); +#ifdef ENABLE_HLSL + bool isReadingHLSL() const { return (messages & EShMsgReadHlsl) == EShMsgReadHlsl; } + bool hlslEnable16BitTypes() const { return (messages & EShMsgHlslEnable16BitTypes) != 0; } + bool hlslDX9Compatible() const { return (messages & EShMsgHlslDX9Compatible) != 0; } +#else + bool isReadingHLSL() const { return false; } +#endif + + TInfoSink& infoSink; + + // compilation mode + int version; // version, updated by #version in the shader + EShLanguage language; // really the stage + SpvVersion spvVersion; + TIntermediate& intermediate; // helper for making and hooking up pieces of the parse tree + +protected: + TMap extensionBehavior; // for each extension string, what its current behavior is set to + EShMessages messages; // errors/warnings/rule-sets + int numErrors; // number of compile-time errors encountered + TInputScanner* currentScanner; + +private: + explicit TParseVersions(const TParseVersions&); + TParseVersions& operator=(const TParseVersions&); +}; + +} // end namespace glslang + +#endif // _PARSE_VERSIONS_INCLUDED_ diff --git a/dep/glslang/glslang/MachineIndependent/preprocessor/Pp.cpp b/dep/glslang/glslang/MachineIndependent/preprocessor/Pp.cpp new file mode 100644 index 000000000..ec3935614 --- /dev/null +++ b/dep/glslang/glslang/MachineIndependent/preprocessor/Pp.cpp @@ -0,0 +1,1338 @@ +// +// Copyright (C) 2002-2005 3Dlabs Inc. Ltd. +// Copyright (C) 2013 LunarG, Inc. +// Copyright (C) 2015-2018 Google, Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// +// Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// +// Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// Neither the name of 3Dlabs Inc. Ltd. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// +/****************************************************************************\ +Copyright (c) 2002, NVIDIA Corporation. + +NVIDIA Corporation("NVIDIA") supplies this software to you in +consideration of your agreement to the following terms, and your use, +installation, modification or redistribution of this NVIDIA software +constitutes acceptance of these terms. If you do not agree with these +terms, please do not use, install, modify or redistribute this NVIDIA +software. + +In consideration of your agreement to abide by the following terms, and +subject to these terms, NVIDIA grants you a personal, non-exclusive +license, under NVIDIA's copyrights in this original NVIDIA software (the +"NVIDIA Software"), to use, reproduce, modify and redistribute the +NVIDIA Software, with or without modifications, in source and/or binary +forms; provided that if you redistribute the NVIDIA Software, you must +retain the copyright notice of NVIDIA, this notice and the following +text and disclaimers in all such redistributions of the NVIDIA Software. +Neither the name, trademarks, service marks nor logos of NVIDIA +Corporation may be used to endorse or promote products derived from the +NVIDIA Software without specific prior written permission from NVIDIA. +Except as expressly stated in this notice, no other rights or licenses +express or implied, are granted by NVIDIA herein, including but not +limited to any patent rights that may be infringed by your derivative +works or by other works in which the NVIDIA Software may be +incorporated. No hardware is licensed hereunder. + +THE NVIDIA SOFTWARE IS BEING PROVIDED ON AN "AS IS" BASIS, WITHOUT +WARRANTIES OR CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED, +INCLUDING WITHOUT LIMITATION, WARRANTIES OR CONDITIONS OF TITLE, +NON-INFRINGEMENT, MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, OR +ITS USE AND OPERATION EITHER ALONE OR IN COMBINATION WITH OTHER +PRODUCTS. + +IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, +INCIDENTAL, EXEMPLARY, CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED +TO, LOST PROFITS; PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF +USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) OR ARISING IN ANY WAY +OUT OF THE USE, REPRODUCTION, MODIFICATION AND/OR DISTRIBUTION OF THE +NVIDIA SOFTWARE, HOWEVER CAUSED AND WHETHER UNDER THEORY OF CONTRACT, +TORT (INCLUDING NEGLIGENCE), STRICT LIABILITY OR OTHERWISE, EVEN IF +NVIDIA HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +\****************************************************************************/ + +#ifndef _CRT_SECURE_NO_WARNINGS +#define _CRT_SECURE_NO_WARNINGS +#endif + +#include +#include +#include +#include +#include + +#include "PpContext.h" +#include "PpTokens.h" + +namespace glslang { + +// Handle #define +int TPpContext::CPPdefine(TPpToken* ppToken) +{ + MacroSymbol mac; + + // get the macro name + int token = scanToken(ppToken); + if (token != PpAtomIdentifier) { + parseContext.ppError(ppToken->loc, "must be followed by macro name", "#define", ""); + return token; + } + if (ppToken->loc.string >= 0) { + // We are in user code; check for reserved name use: + parseContext.reservedPpErrorCheck(ppToken->loc, ppToken->name, "#define"); + } + + // save the macro name + const int defAtom = atomStrings.getAddAtom(ppToken->name); + TSourceLoc defineLoc = ppToken->loc; // because ppToken might go to the next line before we report errors + + // gather parameters to the macro, between (...) + token = scanToken(ppToken); + if (token == '(' && !ppToken->space) { + mac.functionLike = 1; + do { + token = scanToken(ppToken); + if (mac.args.size() == 0 && token == ')') + break; + if (token != PpAtomIdentifier) { + parseContext.ppError(ppToken->loc, "bad argument", "#define", ""); + + return token; + } + const int argAtom = atomStrings.getAddAtom(ppToken->name); + + // check for duplication of parameter name + bool duplicate = false; + for (size_t a = 0; a < mac.args.size(); ++a) { + if (mac.args[a] == argAtom) { + parseContext.ppError(ppToken->loc, "duplicate macro parameter", "#define", ""); + duplicate = true; + break; + } + } + if (! duplicate) + mac.args.push_back(argAtom); + token = scanToken(ppToken); + } while (token == ','); + if (token != ')') { + parseContext.ppError(ppToken->loc, "missing parenthesis", "#define", ""); + + return token; + } + + token = scanToken(ppToken); + } else if (token != '\n' && token != EndOfInput && !ppToken->space) { + parseContext.ppWarn(ppToken->loc, "missing space after macro name", "#define", ""); + + return token; + } + + // record the definition of the macro + while (token != '\n' && token != EndOfInput) { + mac.body.putToken(token, ppToken); + token = scanToken(ppToken); + if (token != '\n' && ppToken->space) + mac.body.putToken(' ', ppToken); + } + + // check for duplicate definition + MacroSymbol* existing = lookupMacroDef(defAtom); + if (existing != nullptr) { + if (! existing->undef) { + // Already defined -- need to make sure they are identical: + // "Two replacement lists are identical if and only if the + // preprocessing tokens in both have the same number, + // ordering, spelling, and white-space separation, where all + // white-space separations are considered identical." + if (existing->functionLike != mac.functionLike) { + parseContext.ppError(defineLoc, "Macro redefined; function-like versus object-like:", "#define", + atomStrings.getString(defAtom)); + } else if (existing->args.size() != mac.args.size()) { + parseContext.ppError(defineLoc, "Macro redefined; different number of arguments:", "#define", + atomStrings.getString(defAtom)); + } else { + if (existing->args != mac.args) { + parseContext.ppError(defineLoc, "Macro redefined; different argument names:", "#define", + atomStrings.getString(defAtom)); + } + // set up to compare the two + existing->body.reset(); + mac.body.reset(); + int newToken; + bool firstToken = true; + do { + int oldToken; + TPpToken oldPpToken; + TPpToken newPpToken; + oldToken = existing->body.getToken(parseContext, &oldPpToken); + newToken = mac.body.getToken(parseContext, &newPpToken); + // for the first token, preceding spaces don't matter + if (firstToken) { + newPpToken.space = oldPpToken.space; + firstToken = false; + } + if (oldToken != newToken || oldPpToken != newPpToken) { + parseContext.ppError(defineLoc, "Macro redefined; different substitutions:", "#define", + atomStrings.getString(defAtom)); + break; + } + } while (newToken != EndOfInput); + } + } + *existing = mac; + } else + addMacroDef(defAtom, mac); + + return '\n'; +} + +// Handle #undef +int TPpContext::CPPundef(TPpToken* ppToken) +{ + int token = scanToken(ppToken); + if (token != PpAtomIdentifier) { + parseContext.ppError(ppToken->loc, "must be followed by macro name", "#undef", ""); + + return token; + } + + parseContext.reservedPpErrorCheck(ppToken->loc, ppToken->name, "#undef"); + + MacroSymbol* macro = lookupMacroDef(atomStrings.getAtom(ppToken->name)); + if (macro != nullptr) + macro->undef = 1; + token = scanToken(ppToken); + if (token != '\n') + parseContext.ppError(ppToken->loc, "can only be followed by a single macro name", "#undef", ""); + + return token; +} + +// Handle #else +/* Skip forward to appropriate spot. This is used both +** to skip to a #endif after seeing an #else, AND to skip to a #else, +** #elif, or #endif after a #if/#ifdef/#ifndef/#elif test was false. +*/ +int TPpContext::CPPelse(int matchelse, TPpToken* ppToken) +{ + int depth = 0; + int token = scanToken(ppToken); + + while (token != EndOfInput) { + if (token != '#') { + while (token != '\n' && token != EndOfInput) + token = scanToken(ppToken); + + if (token == EndOfInput) + return token; + + token = scanToken(ppToken); + continue; + } + + if ((token = scanToken(ppToken)) != PpAtomIdentifier) + continue; + + int nextAtom = atomStrings.getAtom(ppToken->name); + if (nextAtom == PpAtomIf || nextAtom == PpAtomIfdef || nextAtom == PpAtomIfndef) { + depth++; + if (ifdepth >= maxIfNesting || elsetracker >= maxIfNesting) { + parseContext.ppError(ppToken->loc, "maximum nesting depth exceeded", "#if/#ifdef/#ifndef", ""); + return EndOfInput; + } else { + ifdepth++; + elsetracker++; + } + } else if (nextAtom == PpAtomEndif) { + token = extraTokenCheck(nextAtom, ppToken, scanToken(ppToken)); + elseSeen[elsetracker] = false; + --elsetracker; + if (depth == 0) { + // found the #endif we are looking for + if (ifdepth > 0) + --ifdepth; + break; + } + --depth; + --ifdepth; + } else if (matchelse && depth == 0) { + if (nextAtom == PpAtomElse) { + elseSeen[elsetracker] = true; + token = extraTokenCheck(nextAtom, ppToken, scanToken(ppToken)); + // found the #else we are looking for + break; + } else if (nextAtom == PpAtomElif) { + if (elseSeen[elsetracker]) + parseContext.ppError(ppToken->loc, "#elif after #else", "#elif", ""); + /* we decrement ifdepth here, because CPPif will increment + * it and we really want to leave it alone */ + if (ifdepth > 0) { + --ifdepth; + elseSeen[elsetracker] = false; + --elsetracker; + } + + return CPPif(ppToken); + } + } else if (nextAtom == PpAtomElse) { + if (elseSeen[elsetracker]) + parseContext.ppError(ppToken->loc, "#else after #else", "#else", ""); + else + elseSeen[elsetracker] = true; + token = extraTokenCheck(nextAtom, ppToken, scanToken(ppToken)); + } else if (nextAtom == PpAtomElif) { + if (elseSeen[elsetracker]) + parseContext.ppError(ppToken->loc, "#elif after #else", "#elif", ""); + } + } + + return token; +} + +// Call when there should be no more tokens left on a line. +int TPpContext::extraTokenCheck(int contextAtom, TPpToken* ppToken, int token) +{ + if (token != '\n' && token != EndOfInput) { + static const char* message = "unexpected tokens following directive"; + + const char* label; + if (contextAtom == PpAtomElse) + label = "#else"; + else if (contextAtom == PpAtomElif) + label = "#elif"; + else if (contextAtom == PpAtomEndif) + label = "#endif"; + else if (contextAtom == PpAtomIf) + label = "#if"; + else if (contextAtom == PpAtomLine) + label = "#line"; + else + label = ""; + + if (parseContext.relaxedErrors()) + parseContext.ppWarn(ppToken->loc, message, label, ""); + else + parseContext.ppError(ppToken->loc, message, label, ""); + + while (token != '\n' && token != EndOfInput) + token = scanToken(ppToken); + } + + return token; +} + +enum eval_prec { + MIN_PRECEDENCE, + COND, LOGOR, LOGAND, OR, XOR, AND, EQUAL, RELATION, SHIFT, ADD, MUL, UNARY, + MAX_PRECEDENCE +}; + +namespace { + + int op_logor(int a, int b) { return a || b; } + int op_logand(int a, int b) { return a && b; } + int op_or(int a, int b) { return a | b; } + int op_xor(int a, int b) { return a ^ b; } + int op_and(int a, int b) { return a & b; } + int op_eq(int a, int b) { return a == b; } + int op_ne(int a, int b) { return a != b; } + int op_ge(int a, int b) { return a >= b; } + int op_le(int a, int b) { return a <= b; } + int op_gt(int a, int b) { return a > b; } + int op_lt(int a, int b) { return a < b; } + int op_shl(int a, int b) { return a << b; } + int op_shr(int a, int b) { return a >> b; } + int op_add(int a, int b) { return a + b; } + int op_sub(int a, int b) { return a - b; } + int op_mul(int a, int b) { return a * b; } + int op_div(int a, int b) { return a == INT_MIN && b == -1 ? 0 : a / b; } + int op_mod(int a, int b) { return a == INT_MIN && b == -1 ? 0 : a % b; } + int op_pos(int a) { return a; } + int op_neg(int a) { return -a; } + int op_cmpl(int a) { return ~a; } + int op_not(int a) { return !a; } + +}; + +struct TBinop { + int token, precedence, (*op)(int, int); +} binop[] = { + { PpAtomOr, LOGOR, op_logor }, + { PpAtomAnd, LOGAND, op_logand }, + { '|', OR, op_or }, + { '^', XOR, op_xor }, + { '&', AND, op_and }, + { PpAtomEQ, EQUAL, op_eq }, + { PpAtomNE, EQUAL, op_ne }, + { '>', RELATION, op_gt }, + { PpAtomGE, RELATION, op_ge }, + { '<', RELATION, op_lt }, + { PpAtomLE, RELATION, op_le }, + { PpAtomLeft, SHIFT, op_shl }, + { PpAtomRight, SHIFT, op_shr }, + { '+', ADD, op_add }, + { '-', ADD, op_sub }, + { '*', MUL, op_mul }, + { '/', MUL, op_div }, + { '%', MUL, op_mod }, +}; + +struct TUnop { + int token, (*op)(int); +} unop[] = { + { '+', op_pos }, + { '-', op_neg }, + { '~', op_cmpl }, + { '!', op_not }, +}; + +#define NUM_ELEMENTS(A) (sizeof(A) / sizeof(A[0])) + +int TPpContext::eval(int token, int precedence, bool shortCircuit, int& res, bool& err, TPpToken* ppToken) +{ + TSourceLoc loc = ppToken->loc; // because we sometimes read the newline before reporting the error + if (token == PpAtomIdentifier) { + if (strcmp("defined", ppToken->name) == 0) { + if (! parseContext.isReadingHLSL() && isMacroInput()) { + if (parseContext.relaxedErrors()) + parseContext.ppWarn(ppToken->loc, "nonportable when expanded from macros for preprocessor expression", + "defined", ""); + else + parseContext.ppError(ppToken->loc, "cannot use in preprocessor expression when expanded from macros", + "defined", ""); + } + bool needclose = 0; + token = scanToken(ppToken); + if (token == '(') { + needclose = true; + token = scanToken(ppToken); + } + if (token != PpAtomIdentifier) { + parseContext.ppError(loc, "incorrect directive, expected identifier", "preprocessor evaluation", ""); + err = true; + res = 0; + + return token; + } + + MacroSymbol* macro = lookupMacroDef(atomStrings.getAtom(ppToken->name)); + res = macro != nullptr ? !macro->undef : 0; + token = scanToken(ppToken); + if (needclose) { + if (token != ')') { + parseContext.ppError(loc, "expected ')'", "preprocessor evaluation", ""); + err = true; + res = 0; + + return token; + } + token = scanToken(ppToken); + } + } else { + token = evalToToken(token, shortCircuit, res, err, ppToken); + return eval(token, precedence, shortCircuit, res, err, ppToken); + } + } else if (token == PpAtomConstInt) { + res = ppToken->ival; + token = scanToken(ppToken); + } else if (token == '(') { + token = scanToken(ppToken); + token = eval(token, MIN_PRECEDENCE, shortCircuit, res, err, ppToken); + if (! err) { + if (token != ')') { + parseContext.ppError(loc, "expected ')'", "preprocessor evaluation", ""); + err = true; + res = 0; + + return token; + } + token = scanToken(ppToken); + } + } else { + int op = NUM_ELEMENTS(unop) - 1; + for (; op >= 0; op--) { + if (unop[op].token == token) + break; + } + if (op >= 0) { + token = scanToken(ppToken); + token = eval(token, UNARY, shortCircuit, res, err, ppToken); + res = unop[op].op(res); + } else { + parseContext.ppError(loc, "bad expression", "preprocessor evaluation", ""); + err = true; + res = 0; + + return token; + } + } + + token = evalToToken(token, shortCircuit, res, err, ppToken); + + // Perform evaluation of binary operation, if there is one, otherwise we are done. + while (! err) { + if (token == ')' || token == '\n') + break; + int op; + for (op = NUM_ELEMENTS(binop) - 1; op >= 0; op--) { + if (binop[op].token == token) + break; + } + if (op < 0 || binop[op].precedence <= precedence) + break; + int leftSide = res; + + // Setup short-circuiting, needed for ES, unless already in a short circuit. + // (Once in a short-circuit, can't turn off again, until that whole subexpression is done. + if (! shortCircuit) { + if ((token == PpAtomOr && leftSide == 1) || + (token == PpAtomAnd && leftSide == 0)) + shortCircuit = true; + } + + token = scanToken(ppToken); + token = eval(token, binop[op].precedence, shortCircuit, res, err, ppToken); + + if (binop[op].op == op_div || binop[op].op == op_mod) { + if (res == 0) { + parseContext.ppError(loc, "division by 0", "preprocessor evaluation", ""); + res = 1; + } + } + res = binop[op].op(leftSide, res); + } + + return token; +} + +// Expand macros, skipping empty expansions, to get to the first real token in those expansions. +int TPpContext::evalToToken(int token, bool shortCircuit, int& res, bool& err, TPpToken* ppToken) +{ + while (token == PpAtomIdentifier && strcmp("defined", ppToken->name) != 0) { + switch (MacroExpand(ppToken, true, false)) { + case MacroExpandNotStarted: + case MacroExpandError: + parseContext.ppError(ppToken->loc, "can't evaluate expression", "preprocessor evaluation", ""); + err = true; + res = 0; + break; + case MacroExpandStarted: + break; + case MacroExpandUndef: + if (! shortCircuit && parseContext.isEsProfile()) { + const char* message = "undefined macro in expression not allowed in es profile"; + if (parseContext.relaxedErrors()) + parseContext.ppWarn(ppToken->loc, message, "preprocessor evaluation", ppToken->name); + else + parseContext.ppError(ppToken->loc, message, "preprocessor evaluation", ppToken->name); + } + break; + } + token = scanToken(ppToken); + if (err) + break; + } + + return token; +} + +// Handle #if +int TPpContext::CPPif(TPpToken* ppToken) +{ + int token = scanToken(ppToken); + if (ifdepth >= maxIfNesting || elsetracker >= maxIfNesting) { + parseContext.ppError(ppToken->loc, "maximum nesting depth exceeded", "#if", ""); + return EndOfInput; + } else { + elsetracker++; + ifdepth++; + } + int res = 0; + bool err = false; + token = eval(token, MIN_PRECEDENCE, false, res, err, ppToken); + token = extraTokenCheck(PpAtomIf, ppToken, token); + if (!res && !err) + token = CPPelse(1, ppToken); + + return token; +} + +// Handle #ifdef +int TPpContext::CPPifdef(int defined, TPpToken* ppToken) +{ + int token = scanToken(ppToken); + if (ifdepth > maxIfNesting || elsetracker > maxIfNesting) { + parseContext.ppError(ppToken->loc, "maximum nesting depth exceeded", "#ifdef", ""); + return EndOfInput; + } else { + elsetracker++; + ifdepth++; + } + + if (token != PpAtomIdentifier) { + if (defined) + parseContext.ppError(ppToken->loc, "must be followed by macro name", "#ifdef", ""); + else + parseContext.ppError(ppToken->loc, "must be followed by macro name", "#ifndef", ""); + } else { + MacroSymbol* macro = lookupMacroDef(atomStrings.getAtom(ppToken->name)); + token = scanToken(ppToken); + if (token != '\n') { + parseContext.ppError(ppToken->loc, "unexpected tokens following #ifdef directive - expected a newline", "#ifdef", ""); + while (token != '\n' && token != EndOfInput) + token = scanToken(ppToken); + } + if (((macro != nullptr && !macro->undef) ? 1 : 0) != defined) + token = CPPelse(1, ppToken); + } + + return token; +} + +// Handle #include ... +// TODO: Handle macro expansions for the header name +int TPpContext::CPPinclude(TPpToken* ppToken) +{ + const TSourceLoc directiveLoc = ppToken->loc; + bool startWithLocalSearch = true; // to additionally include the extra "" paths + int token; + + // Find the first non-whitespace char after #include + int ch = getChar(); + while (ch == ' ' || ch == '\t') { + ch = getChar(); + } + if (ch == '<') { + // style + startWithLocalSearch = false; + token = scanHeaderName(ppToken, '>'); + } else if (ch == '"') { + // "header-name" style + token = scanHeaderName(ppToken, '"'); + } else { + // unexpected, get the full token to generate the error + ungetChar(); + token = scanToken(ppToken); + } + + if (token != PpAtomConstString) { + parseContext.ppError(directiveLoc, "must be followed by a header name", "#include", ""); + return token; + } + + // Make a copy of the name because it will be overwritten by the next token scan. + const std::string filename = ppToken->name; + + // See if the directive was well formed + token = scanToken(ppToken); + if (token != '\n') { + if (token == EndOfInput) + parseContext.ppError(ppToken->loc, "expected newline after header name:", "#include", "%s", filename.c_str()); + else + parseContext.ppError(ppToken->loc, "extra content after header name:", "#include", "%s", filename.c_str()); + return token; + } + + // Process well-formed directive + + // Find the inclusion, first look in "Local" ("") paths, if requested, + // otherwise, only search the "System" (<>) paths. + TShader::Includer::IncludeResult* res = nullptr; + if (startWithLocalSearch) + res = includer.includeLocal(filename.c_str(), currentSourceFile.c_str(), includeStack.size() + 1); + if (res == nullptr || res->headerName.empty()) { + includer.releaseInclude(res); + res = includer.includeSystem(filename.c_str(), currentSourceFile.c_str(), includeStack.size() + 1); + } + + // Process the results + if (res != nullptr && !res->headerName.empty()) { + if (res->headerData != nullptr && res->headerLength > 0) { + // path for processing one or more tokens from an included header, hand off 'res' + const bool forNextLine = parseContext.lineDirectiveShouldSetNextLine(); + std::ostringstream prologue; + std::ostringstream epilogue; + prologue << "#line " << forNextLine << " " << "\"" << res->headerName << "\"\n"; + epilogue << (res->headerData[res->headerLength - 1] == '\n'? "" : "\n") << + "#line " << directiveLoc.line + forNextLine << " " << directiveLoc.getStringNameOrNum() << "\n"; + pushInput(new TokenizableIncludeFile(directiveLoc, prologue.str(), res, epilogue.str(), this)); + parseContext.intermediate.addIncludeText(res->headerName.c_str(), res->headerData, res->headerLength); + // There's no "current" location anymore. + parseContext.setCurrentColumn(0); + } else { + // things are okay, but there is nothing to process + includer.releaseInclude(res); + } + } else { + // error path, clean up + std::string message = + res != nullptr ? std::string(res->headerData, res->headerLength) + : std::string("Could not process include directive"); + parseContext.ppError(directiveLoc, message.c_str(), "#include", "for header name: %s", filename.c_str()); + includer.releaseInclude(res); + } + + return token; +} + +// Handle #line +int TPpContext::CPPline(TPpToken* ppToken) +{ + // "#line must have, after macro substitution, one of the following forms: + // "#line line + // "#line line source-string-number" + + int token = scanToken(ppToken); + const TSourceLoc directiveLoc = ppToken->loc; + if (token == '\n') { + parseContext.ppError(ppToken->loc, "must by followed by an integral literal", "#line", ""); + return token; + } + + int lineRes = 0; // Line number after macro expansion. + int lineToken = 0; + bool hasFile = false; + int fileRes = 0; // Source file number after macro expansion. + const char* sourceName = nullptr; // Optional source file name. + bool lineErr = false; + bool fileErr = false; + disableEscapeSequences = true; + token = eval(token, MIN_PRECEDENCE, false, lineRes, lineErr, ppToken); + disableEscapeSequences = false; + if (! lineErr) { + lineToken = lineRes; + if (token == '\n') + ++lineRes; + + if (parseContext.lineDirectiveShouldSetNextLine()) + --lineRes; + parseContext.setCurrentLine(lineRes); + + if (token != '\n') { +#ifndef GLSLANG_WEB + if (token == PpAtomConstString) { + parseContext.ppRequireExtensions(directiveLoc, 1, &E_GL_GOOGLE_cpp_style_line_directive, "filename-based #line"); + // We need to save a copy of the string instead of pointing + // to the name field of the token since the name field + // will likely be overwritten by the next token scan. + sourceName = atomStrings.getString(atomStrings.getAddAtom(ppToken->name)); + parseContext.setCurrentSourceName(sourceName); + hasFile = true; + token = scanToken(ppToken); + } else +#endif + { + token = eval(token, MIN_PRECEDENCE, false, fileRes, fileErr, ppToken); + if (! fileErr) { + parseContext.setCurrentString(fileRes); + hasFile = true; + } + } + } + } + if (!fileErr && !lineErr) { + parseContext.notifyLineDirective(directiveLoc.line, lineToken, hasFile, fileRes, sourceName); + } + token = extraTokenCheck(PpAtomLine, ppToken, token); + + return token; +} + +// Handle #error +int TPpContext::CPPerror(TPpToken* ppToken) +{ + disableEscapeSequences = true; + int token = scanToken(ppToken); + disableEscapeSequences = false; + std::string message; + TSourceLoc loc = ppToken->loc; + + while (token != '\n' && token != EndOfInput) { + if (token == PpAtomConstInt16 || token == PpAtomConstUint16 || + token == PpAtomConstInt || token == PpAtomConstUint || + token == PpAtomConstInt64 || token == PpAtomConstUint64 || + token == PpAtomConstFloat16 || + token == PpAtomConstFloat || token == PpAtomConstDouble) { + message.append(ppToken->name); + } else if (token == PpAtomIdentifier || token == PpAtomConstString) { + message.append(ppToken->name); + } else { + message.append(atomStrings.getString(token)); + } + message.append(" "); + token = scanToken(ppToken); + } + parseContext.notifyErrorDirective(loc.line, message.c_str()); + // store this msg into the shader's information log..set the Compile Error flag!!!! + parseContext.ppError(loc, message.c_str(), "#error", ""); + + return '\n'; +} + +// Handle #pragma +int TPpContext::CPPpragma(TPpToken* ppToken) +{ + char SrcStrName[2]; + TVector tokens; + + TSourceLoc loc = ppToken->loc; // because we go to the next line before processing + int token = scanToken(ppToken); + while (token != '\n' && token != EndOfInput) { + switch (token) { + case PpAtomIdentifier: + case PpAtomConstInt: + case PpAtomConstUint: + case PpAtomConstInt64: + case PpAtomConstUint64: + case PpAtomConstInt16: + case PpAtomConstUint16: + case PpAtomConstFloat: + case PpAtomConstDouble: + case PpAtomConstFloat16: + tokens.push_back(ppToken->name); + break; + default: + SrcStrName[0] = (char)token; + SrcStrName[1] = '\0'; + tokens.push_back(SrcStrName); + } + token = scanToken(ppToken); + } + + if (token == EndOfInput) + parseContext.ppError(loc, "directive must end with a newline", "#pragma", ""); + else + parseContext.handlePragma(loc, tokens); + + return token; +} + +// #version: This is just for error checking: the version and profile are decided before preprocessing starts +int TPpContext::CPPversion(TPpToken* ppToken) +{ + int token = scanToken(ppToken); + + if (errorOnVersion || versionSeen) { + if (parseContext.isReadingHLSL()) + parseContext.ppError(ppToken->loc, "invalid preprocessor command", "#version", ""); + else + parseContext.ppError(ppToken->loc, "must occur first in shader", "#version", ""); + } + versionSeen = true; + + if (token == '\n') { + parseContext.ppError(ppToken->loc, "must be followed by version number", "#version", ""); + + return token; + } + + if (token != PpAtomConstInt) + parseContext.ppError(ppToken->loc, "must be followed by version number", "#version", ""); + + ppToken->ival = atoi(ppToken->name); + int versionNumber = ppToken->ival; + int line = ppToken->loc.line; + token = scanToken(ppToken); + + if (token == '\n') { + parseContext.notifyVersion(line, versionNumber, nullptr); + return token; + } else { + int profileAtom = atomStrings.getAtom(ppToken->name); + if (profileAtom != PpAtomCore && + profileAtom != PpAtomCompatibility && + profileAtom != PpAtomEs) + parseContext.ppError(ppToken->loc, "bad profile name; use es, core, or compatibility", "#version", ""); + parseContext.notifyVersion(line, versionNumber, ppToken->name); + token = scanToken(ppToken); + + if (token == '\n') + return token; + else + parseContext.ppError(ppToken->loc, "bad tokens following profile -- expected newline", "#version", ""); + } + + return token; +} + +// Handle #extension +int TPpContext::CPPextension(TPpToken* ppToken) +{ + int line = ppToken->loc.line; + int token = scanToken(ppToken); + char extensionName[MaxTokenLength + 1]; + + if (token=='\n') { + parseContext.ppError(ppToken->loc, "extension name not specified", "#extension", ""); + return token; + } + + if (token != PpAtomIdentifier) + parseContext.ppError(ppToken->loc, "extension name expected", "#extension", ""); + + snprintf(extensionName, sizeof(extensionName), "%s", ppToken->name); + + token = scanToken(ppToken); + if (token != ':') { + parseContext.ppError(ppToken->loc, "':' missing after extension name", "#extension", ""); + return token; + } + + token = scanToken(ppToken); + if (token != PpAtomIdentifier) { + parseContext.ppError(ppToken->loc, "behavior for extension not specified", "#extension", ""); + return token; + } + + parseContext.updateExtensionBehavior(line, extensionName, ppToken->name); + parseContext.notifyExtensionDirective(line, extensionName, ppToken->name); + + token = scanToken(ppToken); + if (token == '\n') + return token; + else + parseContext.ppError(ppToken->loc, "extra tokens -- expected newline", "#extension",""); + + return token; +} + +int TPpContext::readCPPline(TPpToken* ppToken) +{ + int token = scanToken(ppToken); + + if (token == PpAtomIdentifier) { + switch (atomStrings.getAtom(ppToken->name)) { + case PpAtomDefine: + token = CPPdefine(ppToken); + break; + case PpAtomElse: + if (elseSeen[elsetracker]) + parseContext.ppError(ppToken->loc, "#else after #else", "#else", ""); + elseSeen[elsetracker] = true; + if (ifdepth == 0) + parseContext.ppError(ppToken->loc, "mismatched statements", "#else", ""); + token = extraTokenCheck(PpAtomElse, ppToken, scanToken(ppToken)); + token = CPPelse(0, ppToken); + break; + case PpAtomElif: + if (ifdepth == 0) + parseContext.ppError(ppToken->loc, "mismatched statements", "#elif", ""); + if (elseSeen[elsetracker]) + parseContext.ppError(ppToken->loc, "#elif after #else", "#elif", ""); + // this token is really a dont care, but we still need to eat the tokens + token = scanToken(ppToken); + while (token != '\n' && token != EndOfInput) + token = scanToken(ppToken); + token = CPPelse(0, ppToken); + break; + case PpAtomEndif: + if (ifdepth == 0) + parseContext.ppError(ppToken->loc, "mismatched statements", "#endif", ""); + else { + elseSeen[elsetracker] = false; + --elsetracker; + --ifdepth; + } + token = extraTokenCheck(PpAtomEndif, ppToken, scanToken(ppToken)); + break; + case PpAtomIf: + token = CPPif(ppToken); + break; + case PpAtomIfdef: + token = CPPifdef(1, ppToken); + break; + case PpAtomIfndef: + token = CPPifdef(0, ppToken); + break; + case PpAtomLine: + token = CPPline(ppToken); + break; +#ifndef GLSLANG_WEB + case PpAtomInclude: + if(!parseContext.isReadingHLSL()) { + parseContext.ppRequireExtensions(ppToken->loc, 1, &E_GL_GOOGLE_include_directive, "#include"); + } + token = CPPinclude(ppToken); + break; + case PpAtomPragma: + token = CPPpragma(ppToken); + break; +#endif + case PpAtomUndef: + token = CPPundef(ppToken); + break; + case PpAtomError: + token = CPPerror(ppToken); + break; + case PpAtomVersion: + token = CPPversion(ppToken); + break; + case PpAtomExtension: + token = CPPextension(ppToken); + break; + default: + parseContext.ppError(ppToken->loc, "invalid directive:", "#", ppToken->name); + break; + } + } else if (token != '\n' && token != EndOfInput) + parseContext.ppError(ppToken->loc, "invalid directive", "#", ""); + + while (token != '\n' && token != EndOfInput) + token = scanToken(ppToken); + + return token; +} + +// Context-dependent parsing of a #include . +// Assumes no macro expansions etc. are being done; the name is just on the current input. +// Always creates a name and returns PpAtomicConstString, unless we run out of input. +int TPpContext::scanHeaderName(TPpToken* ppToken, char delimit) +{ + bool tooLong = false; + + if (inputStack.empty()) + return EndOfInput; + + int len = 0; + ppToken->name[0] = '\0'; + do { + int ch = inputStack.back()->getch(); + + // done yet? + if (ch == delimit) { + ppToken->name[len] = '\0'; + if (tooLong) + parseContext.ppError(ppToken->loc, "header name too long", "", ""); + return PpAtomConstString; + } else if (ch == EndOfInput) + return EndOfInput; + + // found a character to expand the name with + if (len < MaxTokenLength) + ppToken->name[len++] = (char)ch; + else + tooLong = true; + } while (true); +} + +// Macro-expand a macro argument 'arg' to create 'expandedArg'. +// Does not replace 'arg'. +// Returns nullptr if no expanded argument is created. +TPpContext::TokenStream* TPpContext::PrescanMacroArg(TokenStream& arg, TPpToken* ppToken, bool newLineOkay) +{ + // expand the argument + TokenStream* expandedArg = new TokenStream; + pushInput(new tMarkerInput(this)); + pushTokenStreamInput(arg); + int token; + while ((token = scanToken(ppToken)) != tMarkerInput::marker && token != EndOfInput) { + token = tokenPaste(token, *ppToken); + if (token == PpAtomIdentifier) { + switch (MacroExpand(ppToken, false, newLineOkay)) { + case MacroExpandNotStarted: + break; + case MacroExpandError: + // toss the rest of the pushed-input argument by scanning until tMarkerInput + while ((token = scanToken(ppToken)) != tMarkerInput::marker && token != EndOfInput) + ; + break; + case MacroExpandStarted: + case MacroExpandUndef: + continue; + } + } + if (token == tMarkerInput::marker || token == EndOfInput) + break; + expandedArg->putToken(token, ppToken); + } + + if (token != tMarkerInput::marker) { + // Error, or MacroExpand ate the marker, so had bad input, recover + delete expandedArg; + expandedArg = nullptr; + } + + return expandedArg; +} + +// +// Return the next token for a macro expansion, handling macro arguments, +// whose semantics are dependent on being adjacent to ##. +// +int TPpContext::tMacroInput::scan(TPpToken* ppToken) +{ + int token; + do { + token = mac->body.getToken(pp->parseContext, ppToken); + } while (token == ' '); // handle white space in macro + + // Hash operators basically turn off a round of macro substitution + // (the round done on the argument before the round done on the RHS of the + // macro definition): + // + // "A parameter in the replacement list, unless preceded by a # or ## + // preprocessing token or followed by a ## preprocessing token (see below), + // is replaced by the corresponding argument after all macros contained + // therein have been expanded." + // + // "If, in the replacement list, a parameter is immediately preceded or + // followed by a ## preprocessing token, the parameter is replaced by the + // corresponding argument's preprocessing token sequence." + + bool pasting = false; + if (postpaste) { + // don't expand next token + pasting = true; + postpaste = false; + } + + if (prepaste) { + // already know we should be on a ##, verify + assert(token == PpAtomPaste); + prepaste = false; + postpaste = true; + } + + // see if are preceding a ## + if (mac->body.peekUntokenizedPasting()) { + prepaste = true; + pasting = true; + } + + // HLSL does expand macros before concatenation + if (pasting && pp->parseContext.isReadingHLSL()) + pasting = false; + + // TODO: preprocessor: properly handle whitespace (or lack of it) between tokens when expanding + if (token == PpAtomIdentifier) { + int i; + for (i = (int)mac->args.size() - 1; i >= 0; i--) + if (strcmp(pp->atomStrings.getString(mac->args[i]), ppToken->name) == 0) + break; + if (i >= 0) { + TokenStream* arg = expandedArgs[i]; + if (arg == nullptr || pasting) + arg = args[i]; + pp->pushTokenStreamInput(*arg, prepaste); + + return pp->scanToken(ppToken); + } + } + + if (token == EndOfInput) + mac->busy = 0; + + return token; +} + +// return a textual zero, for scanning a macro that was never defined +int TPpContext::tZeroInput::scan(TPpToken* ppToken) +{ + if (done) + return EndOfInput; + + ppToken->name[0] = '0'; + ppToken->name[1] = 0; + ppToken->ival = 0; + ppToken->space = false; + done = true; + + return PpAtomConstInt; +} + +// +// Check a token to see if it is a macro that should be expanded: +// - If it is, and defined, push a tInput that will produce the appropriate +// expansion and return MacroExpandStarted. +// - If it is, but undefined, and expandUndef is requested, push a tInput +// that will expand to 0 and return MacroExpandUndef. +// - Otherwise, there is no expansion, and there are two cases: +// * It might be okay there is no expansion, and no specific error was +// detected. Returns MacroExpandNotStarted. +// * The expansion was started, but could not be completed, due to an error +// that cannot be recovered from. Returns MacroExpandError. +// +MacroExpandResult TPpContext::MacroExpand(TPpToken* ppToken, bool expandUndef, bool newLineOkay) +{ + ppToken->space = false; + int macroAtom = atomStrings.getAtom(ppToken->name); + switch (macroAtom) { + case PpAtomLineMacro: + ppToken->ival = parseContext.getCurrentLoc().line; + snprintf(ppToken->name, sizeof(ppToken->name), "%d", ppToken->ival); + UngetToken(PpAtomConstInt, ppToken); + return MacroExpandStarted; + + case PpAtomFileMacro: { + if (parseContext.getCurrentLoc().name) + parseContext.ppRequireExtensions(ppToken->loc, 1, &E_GL_GOOGLE_cpp_style_line_directive, "filename-based __FILE__"); + ppToken->ival = parseContext.getCurrentLoc().string; + snprintf(ppToken->name, sizeof(ppToken->name), "%s", ppToken->loc.getStringNameOrNum().c_str()); + UngetToken(PpAtomConstInt, ppToken); + return MacroExpandStarted; + } + + case PpAtomVersionMacro: + ppToken->ival = parseContext.version; + snprintf(ppToken->name, sizeof(ppToken->name), "%d", ppToken->ival); + UngetToken(PpAtomConstInt, ppToken); + return MacroExpandStarted; + + default: + break; + } + + MacroSymbol* macro = macroAtom == 0 ? nullptr : lookupMacroDef(macroAtom); + + // no recursive expansions + if (macro != nullptr && macro->busy) + return MacroExpandNotStarted; + + // not expanding undefined macros + if ((macro == nullptr || macro->undef) && ! expandUndef) + return MacroExpandNotStarted; + + // 0 is the value of an undefined macro + if ((macro == nullptr || macro->undef) && expandUndef) { + pushInput(new tZeroInput(this)); + return MacroExpandUndef; + } + + tMacroInput *in = new tMacroInput(this); + + TSourceLoc loc = ppToken->loc; // in case we go to the next line before discovering the error + in->mac = macro; + if (macro->functionLike) { + // We don't know yet if this will be a successful call of a + // function-like macro; need to look for a '(', but without trashing + // the passed in ppToken, until we know we are no longer speculative. + TPpToken parenToken; + int token = scanToken(&parenToken); + if (newLineOkay) { + while (token == '\n') + token = scanToken(&parenToken); + } + if (token != '(') { + // Function-like macro called with object-like syntax: okay, don't expand. + // (We ate exactly one token that might not be white space; put it back. + UngetToken(token, &parenToken); + delete in; + return MacroExpandNotStarted; + } + in->args.resize(in->mac->args.size()); + for (size_t i = 0; i < in->mac->args.size(); i++) + in->args[i] = new TokenStream; + in->expandedArgs.resize(in->mac->args.size()); + for (size_t i = 0; i < in->mac->args.size(); i++) + in->expandedArgs[i] = nullptr; + size_t arg = 0; + bool tokenRecorded = false; + do { + TVector nestStack; + while (true) { + token = scanToken(ppToken); + if (token == EndOfInput || token == tMarkerInput::marker) { + parseContext.ppError(loc, "End of input in macro", "macro expansion", atomStrings.getString(macroAtom)); + delete in; + return MacroExpandError; + } + if (token == '\n') { + if (! newLineOkay) { + parseContext.ppError(loc, "End of line in macro substitution:", "macro expansion", atomStrings.getString(macroAtom)); + delete in; + return MacroExpandError; + } + continue; + } + if (token == '#') { + parseContext.ppError(ppToken->loc, "unexpected '#'", "macro expansion", atomStrings.getString(macroAtom)); + delete in; + return MacroExpandError; + } + if (in->mac->args.size() == 0 && token != ')') + break; + if (nestStack.size() == 0 && (token == ',' || token == ')')) + break; + if (token == '(') + nestStack.push_back(')'); + else if (token == '{' && parseContext.isReadingHLSL()) + nestStack.push_back('}'); + else if (nestStack.size() > 0 && token == nestStack.back()) + nestStack.pop_back(); + in->args[arg]->putToken(token, ppToken); + tokenRecorded = true; + } + // end of single argument scan + + if (token == ')') { + // closing paren of call + if (in->mac->args.size() == 1 && !tokenRecorded) + break; + arg++; + break; + } + arg++; + } while (arg < in->mac->args.size()); + // end of all arguments scan + + if (arg < in->mac->args.size()) + parseContext.ppError(loc, "Too few args in Macro", "macro expansion", atomStrings.getString(macroAtom)); + else if (token != ')') { + // Error recover code; find end of call, if possible + int depth = 0; + while (token != EndOfInput && (depth > 0 || token != ')')) { + if (token == ')' || token == '}') + depth--; + token = scanToken(ppToken); + if (token == '(' || token == '{') + depth++; + } + + if (token == EndOfInput) { + parseContext.ppError(loc, "End of input in macro", "macro expansion", atomStrings.getString(macroAtom)); + delete in; + return MacroExpandError; + } + parseContext.ppError(loc, "Too many args in macro", "macro expansion", atomStrings.getString(macroAtom)); + } + + // We need both expanded and non-expanded forms of the argument, for whether or + // not token pasting will be applied later when the argument is consumed next to ##. + for (size_t i = 0; i < in->mac->args.size(); i++) + in->expandedArgs[i] = PrescanMacroArg(*in->args[i], ppToken, newLineOkay); + } + + pushInput(in); + macro->busy = 1; + macro->body.reset(); + + return MacroExpandStarted; +} + +} // end namespace glslang diff --git a/dep/glslang/glslang/MachineIndependent/preprocessor/PpAtom.cpp b/dep/glslang/glslang/MachineIndependent/preprocessor/PpAtom.cpp new file mode 100644 index 000000000..06c2333ef --- /dev/null +++ b/dep/glslang/glslang/MachineIndependent/preprocessor/PpAtom.cpp @@ -0,0 +1,181 @@ +// +// Copyright (C) 2002-2005 3Dlabs Inc. Ltd. +// Copyright (C) 2013 LunarG, Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// +// Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// +// Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// Neither the name of 3Dlabs Inc. Ltd. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// +/****************************************************************************\ +Copyright (c) 2002, NVIDIA Corporation. + +NVIDIA Corporation("NVIDIA") supplies this software to you in +consideration of your agreement to the following terms, and your use, +installation, modification or redistribution of this NVIDIA software +constitutes acceptance of these terms. If you do not agree with these +terms, please do not use, install, modify or redistribute this NVIDIA +software. + +In consideration of your agreement to abide by the following terms, and +subject to these terms, NVIDIA grants you a personal, non-exclusive +license, under NVIDIA's copyrights in this original NVIDIA software (the +"NVIDIA Software"), to use, reproduce, modify and redistribute the +NVIDIA Software, with or without modifications, in source and/or binary +forms; provided that if you redistribute the NVIDIA Software, you must +retain the copyright notice of NVIDIA, this notice and the following +text and disclaimers in all such redistributions of the NVIDIA Software. +Neither the name, trademarks, service marks nor logos of NVIDIA +Corporation may be used to endorse or promote products derived from the +NVIDIA Software without specific prior written permission from NVIDIA. +Except as expressly stated in this notice, no other rights or licenses +express or implied, are granted by NVIDIA herein, including but not +limited to any patent rights that may be infringed by your derivative +works or by other works in which the NVIDIA Software may be +incorporated. No hardware is licensed hereunder. + +THE NVIDIA SOFTWARE IS BEING PROVIDED ON AN "AS IS" BASIS, WITHOUT +WARRANTIES OR CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED, +INCLUDING WITHOUT LIMITATION, WARRANTIES OR CONDITIONS OF TITLE, +NON-INFRINGEMENT, MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, OR +ITS USE AND OPERATION EITHER ALONE OR IN COMBINATION WITH OTHER +PRODUCTS. + +IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, +INCIDENTAL, EXEMPLARY, CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED +TO, LOST PROFITS; PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF +USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) OR ARISING IN ANY WAY +OUT OF THE USE, REPRODUCTION, MODIFICATION AND/OR DISTRIBUTION OF THE +NVIDIA SOFTWARE, HOWEVER CAUSED AND WHETHER UNDER THEORY OF CONTRACT, +TORT (INCLUDING NEGLIGENCE), STRICT LIABILITY OR OTHERWISE, EVEN IF +NVIDIA HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +\****************************************************************************/ + +#ifndef _CRT_SECURE_NO_WARNINGS +#define _CRT_SECURE_NO_WARNINGS +#endif + +#include +#include +#include + +#include "PpContext.h" +#include "PpTokens.h" + +namespace { + +using namespace glslang; + +const struct { + int val; + const char* str; +} tokens[] = { + + { PPAtomAddAssign, "+=" }, + { PPAtomSubAssign, "-=" }, + { PPAtomMulAssign, "*=" }, + { PPAtomDivAssign, "/=" }, + { PPAtomModAssign, "%=" }, + + { PpAtomRight, ">>" }, + { PpAtomLeft, "<<" }, + { PpAtomAnd, "&&" }, + { PpAtomOr, "||" }, + { PpAtomXor, "^^" }, + + { PpAtomRightAssign, ">>=" }, + { PpAtomLeftAssign, "<<=" }, + { PpAtomAndAssign, "&=" }, + { PpAtomOrAssign, "|=" }, + { PpAtomXorAssign, "^=" }, + + { PpAtomEQ, "==" }, + { PpAtomNE, "!=" }, + { PpAtomGE, ">=" }, + { PpAtomLE, "<=" }, + + { PpAtomDecrement, "--" }, + { PpAtomIncrement, "++" }, + + { PpAtomColonColon, "::" }, + + { PpAtomDefine, "define" }, + { PpAtomUndef, "undef" }, + { PpAtomIf, "if" }, + { PpAtomElif, "elif" }, + { PpAtomElse, "else" }, + { PpAtomEndif, "endif" }, + { PpAtomIfdef, "ifdef" }, + { PpAtomIfndef, "ifndef" }, + { PpAtomLine, "line" }, + { PpAtomPragma, "pragma" }, + { PpAtomError, "error" }, + + { PpAtomVersion, "version" }, + { PpAtomCore, "core" }, + { PpAtomCompatibility, "compatibility" }, + { PpAtomEs, "es" }, + { PpAtomExtension, "extension" }, + + { PpAtomLineMacro, "__LINE__" }, + { PpAtomFileMacro, "__FILE__" }, + { PpAtomVersionMacro, "__VERSION__" }, + + { PpAtomInclude, "include" }, +}; + +} // end anonymous namespace + +namespace glslang { + +// +// Initialize the atom table. +// +TStringAtomMap::TStringAtomMap() +{ + badToken.assign(""); + + // Add single character tokens to the atom table: + const char* s = "~!%^&*()-+=|,.<>/?;:[]{}#\\"; + char t[2]; + + t[1] = '\0'; + while (*s) { + t[0] = *s; + addAtomFixed(t, s[0]); + s++; + } + + // Add multiple character scanner tokens : + for (size_t ii = 0; ii < sizeof(tokens)/sizeof(tokens[0]); ii++) + addAtomFixed(tokens[ii].str, tokens[ii].val); + + nextAtom = PpAtomLast; +} + +} // end namespace glslang diff --git a/dep/glslang/glslang/MachineIndependent/preprocessor/PpContext.cpp b/dep/glslang/glslang/MachineIndependent/preprocessor/PpContext.cpp new file mode 100644 index 000000000..1363ce2be --- /dev/null +++ b/dep/glslang/glslang/MachineIndependent/preprocessor/PpContext.cpp @@ -0,0 +1,120 @@ +// +// Copyright (C) 2002-2005 3Dlabs Inc. Ltd. +// Copyright (C) 2013 LunarG, Inc. +// Copyright (C) 2015-2018 Google, Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// +// Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// +// Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// Neither the name of 3Dlabs Inc. Ltd. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// +/****************************************************************************\ +Copyright (c) 2002, NVIDIA Corporation. + +NVIDIA Corporation("NVIDIA") supplies this software to you in +consideration of your agreement to the following terms, and your use, +installation, modification or redistribution of this NVIDIA software +constitutes acceptance of these terms. If you do not agree with these +terms, please do not use, install, modify or redistribute this NVIDIA +software. + +In consideration of your agreement to abide by the following terms, and +subject to these terms, NVIDIA grants you a personal, non-exclusive +license, under NVIDIA's copyrights in this original NVIDIA software (the +"NVIDIA Software"), to use, reproduce, modify and redistribute the +NVIDIA Software, with or without modifications, in source and/or binary +forms; provided that if you redistribute the NVIDIA Software, you must +retain the copyright notice of NVIDIA, this notice and the following +text and disclaimers in all such redistributions of the NVIDIA Software. +Neither the name, trademarks, service marks nor logos of NVIDIA +Corporation may be used to endorse or promote products derived from the +NVIDIA Software without specific prior written permission from NVIDIA. +Except as expressly stated in this notice, no other rights or licenses +express or implied, are granted by NVIDIA herein, including but not +limited to any patent rights that may be infringed by your derivative +works or by other works in which the NVIDIA Software may be +incorporated. No hardware is licensed hereunder. + +THE NVIDIA SOFTWARE IS BEING PROVIDED ON AN "AS IS" BASIS, WITHOUT +WARRANTIES OR CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED, +INCLUDING WITHOUT LIMITATION, WARRANTIES OR CONDITIONS OF TITLE, +NON-INFRINGEMENT, MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, OR +ITS USE AND OPERATION EITHER ALONE OR IN COMBINATION WITH OTHER +PRODUCTS. + +IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, +INCIDENTAL, EXEMPLARY, CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED +TO, LOST PROFITS; PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF +USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) OR ARISING IN ANY WAY +OUT OF THE USE, REPRODUCTION, MODIFICATION AND/OR DISTRIBUTION OF THE +NVIDIA SOFTWARE, HOWEVER CAUSED AND WHETHER UNDER THEORY OF CONTRACT, +TORT (INCLUDING NEGLIGENCE), STRICT LIABILITY OR OTHERWISE, EVEN IF +NVIDIA HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +\****************************************************************************/ + +#include +#include + +#include "PpContext.h" + +namespace glslang { + +TPpContext::TPpContext(TParseContextBase& pc, const std::string& rootFileName, TShader::Includer& inclr) : + preamble(0), strings(0), previous_token('\n'), parseContext(pc), includer(inclr), inComment(false), + rootFileName(rootFileName), + currentSourceFile(rootFileName), + disableEscapeSequences(false) +{ + ifdepth = 0; + for (elsetracker = 0; elsetracker < maxIfNesting; elsetracker++) + elseSeen[elsetracker] = false; + elsetracker = 0; + + strtodStream.imbue(std::locale::classic()); +} + +TPpContext::~TPpContext() +{ + delete [] preamble; + + // free up the inputStack + while (! inputStack.empty()) + popInput(); +} + +void TPpContext::setInput(TInputScanner& input, bool versionWillBeError) +{ + assert(inputStack.size() == 0); + + pushInput(new tStringInput(this, input)); + + errorOnVersion = versionWillBeError; + versionSeen = false; +} + +} // end namespace glslang diff --git a/dep/glslang/glslang/MachineIndependent/preprocessor/PpContext.h b/dep/glslang/glslang/MachineIndependent/preprocessor/PpContext.h new file mode 100644 index 000000000..714b5eadb --- /dev/null +++ b/dep/glslang/glslang/MachineIndependent/preprocessor/PpContext.h @@ -0,0 +1,703 @@ +// +// Copyright (C) 2013 LunarG, Inc. +// Copyright (C) 2015-2018 Google, Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// +// Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// +// Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// Neither the name of 3Dlabs Inc. Ltd. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// +/****************************************************************************\ +Copyright (c) 2002, NVIDIA Corporation. + +NVIDIA Corporation("NVIDIA") supplies this software to you in +consideration of your agreement to the following terms, and your use, +installation, modification or redistribution of this NVIDIA software +constitutes acceptance of these terms. If you do not agree with these +terms, please do not use, install, modify or redistribute this NVIDIA +software. + +In consideration of your agreement to abide by the following terms, and +subject to these terms, NVIDIA grants you a personal, non-exclusive +license, under NVIDIA's copyrights in this original NVIDIA software (the +"NVIDIA Software"), to use, reproduce, modify and redistribute the +NVIDIA Software, with or without modifications, in source and/or binary +forms; provided that if you redistribute the NVIDIA Software, you must +retain the copyright notice of NVIDIA, this notice and the following +text and disclaimers in all such redistributions of the NVIDIA Software. +Neither the name, trademarks, service marks nor logos of NVIDIA +Corporation may be used to endorse or promote products derived from the +NVIDIA Software without specific prior written permission from NVIDIA. +Except as expressly stated in this notice, no other rights or licenses +express or implied, are granted by NVIDIA herein, including but not +limited to any patent rights that may be infringed by your derivative +works or by other works in which the NVIDIA Software may be +incorporated. No hardware is licensed hereunder. + +THE NVIDIA SOFTWARE IS BEING PROVIDED ON AN "AS IS" BASIS, WITHOUT +WARRANTIES OR CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED, +INCLUDING WITHOUT LIMITATION, WARRANTIES OR CONDITIONS OF TITLE, +NON-INFRINGEMENT, MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, OR +ITS USE AND OPERATION EITHER ALONE OR IN COMBINATION WITH OTHER +PRODUCTS. + +IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, +INCIDENTAL, EXEMPLARY, CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED +TO, LOST PROFITS; PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF +USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) OR ARISING IN ANY WAY +OUT OF THE USE, REPRODUCTION, MODIFICATION AND/OR DISTRIBUTION OF THE +NVIDIA SOFTWARE, HOWEVER CAUSED AND WHETHER UNDER THEORY OF CONTRACT, +TORT (INCLUDING NEGLIGENCE), STRICT LIABILITY OR OTHERWISE, EVEN IF +NVIDIA HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +\****************************************************************************/ + +#ifndef PPCONTEXT_H +#define PPCONTEXT_H + +#include +#include +#include + +#include "../ParseHelper.h" +#include "PpTokens.h" + +/* windows only pragma */ +#ifdef _MSC_VER + #pragma warning(disable : 4127) +#endif + +namespace glslang { + +class TPpToken { +public: + TPpToken() { clear(); } + void clear() + { + space = false; + i64val = 0; + loc.init(); + name[0] = 0; + } + + // Used for comparing macro definitions, so checks what is relevant for that. + bool operator==(const TPpToken& right) const + { + return space == right.space && + ival == right.ival && dval == right.dval && i64val == right.i64val && + strncmp(name, right.name, MaxTokenLength) == 0; + } + bool operator!=(const TPpToken& right) const { return ! operator==(right); } + + TSourceLoc loc; + // True if a space (for white space or a removed comment) should also be + // recognized, in front of the token returned: + bool space; + // Numeric value of the token: + union { + int ival; + double dval; + long long i64val; + }; + // Text string of the token: + char name[MaxTokenLength + 1]; +}; + +class TStringAtomMap { +// +// Implementation is in PpAtom.cpp +// +// Maintain a bi-directional mapping between relevant preprocessor strings and +// "atoms" which a unique integers (small, contiguous, not hash-like) per string. +// +public: + TStringAtomMap(); + + // Map string -> atom. + // Return 0 if no existing string. + int getAtom(const char* s) const + { + auto it = atomMap.find(s); + return it == atomMap.end() ? 0 : it->second; + } + + // Map a new or existing string -> atom, inventing a new atom if necessary. + int getAddAtom(const char* s) + { + int atom = getAtom(s); + if (atom == 0) { + atom = nextAtom++; + addAtomFixed(s, atom); + } + return atom; + } + + // Map atom -> string. + const char* getString(int atom) const { return stringMap[atom]->c_str(); } + +protected: + TStringAtomMap(TStringAtomMap&); + TStringAtomMap& operator=(TStringAtomMap&); + + TUnorderedMap atomMap; + TVector stringMap; // these point into the TString in atomMap + int nextAtom; + + // Bad source characters can lead to bad atoms, so gracefully handle those by + // pre-filling the table with them (to avoid if tests later). + TString badToken; + + // Add bi-directional mappings: + // - string -> atom + // - atom -> string + void addAtomFixed(const char* s, int atom) + { + auto it = atomMap.insert(std::pair(s, atom)).first; + if (stringMap.size() < (size_t)atom + 1) + stringMap.resize(atom + 100, &badToken); + stringMap[atom] = &it->first; + } +}; + +class TInputScanner; + +enum MacroExpandResult { + MacroExpandNotStarted, // macro not expanded, which might not be an error + MacroExpandError, // a clear error occurred while expanding, no expansion + MacroExpandStarted, // macro expansion process has started + MacroExpandUndef // macro is undefined and will be expanded +}; + +// This class is the result of turning a huge pile of C code communicating through globals +// into a class. This was done to allowing instancing to attain thread safety. +// Don't expect too much in terms of OO design. +class TPpContext { +public: + TPpContext(TParseContextBase&, const std::string& rootFileName, TShader::Includer&); + virtual ~TPpContext(); + + void setPreamble(const char* preamble, size_t length); + + int tokenize(TPpToken& ppToken); + int tokenPaste(int token, TPpToken&); + + class tInput { + public: + tInput(TPpContext* p) : done(false), pp(p) { } + virtual ~tInput() { } + + virtual int scan(TPpToken*) = 0; + virtual int getch() = 0; + virtual void ungetch() = 0; + virtual bool peekPasting() { return false; } // true when about to see ## + virtual bool peekContinuedPasting(int) { return false; } // true when non-spaced tokens can paste + virtual bool endOfReplacementList() { return false; } // true when at the end of a macro replacement list (RHS of #define) + virtual bool isMacroInput() { return false; } + + // Will be called when we start reading tokens from this instance + virtual void notifyActivated() {} + // Will be called when we do not read tokens from this instance anymore + virtual void notifyDeleted() {} + protected: + bool done; + TPpContext* pp; + }; + + void setInput(TInputScanner& input, bool versionWillBeError); + + void pushInput(tInput* in) + { + inputStack.push_back(in); + in->notifyActivated(); + } + void popInput() + { + inputStack.back()->notifyDeleted(); + delete inputStack.back(); + inputStack.pop_back(); + } + + // + // From PpTokens.cpp + // + + // Capture the needed parts of a token stream for macro recording/playback. + class TokenStream { + public: + // Manage a stream of these 'Token', which capture the relevant parts + // of a TPpToken, plus its atom. + class Token { + public: + Token(int atom, const TPpToken& ppToken) : + atom(atom), + space(ppToken.space), + i64val(ppToken.i64val), + name(ppToken.name) { } + int get(TPpToken& ppToken) + { + ppToken.clear(); + ppToken.space = space; + ppToken.i64val = i64val; + snprintf(ppToken.name, sizeof(ppToken.name), "%s", name.c_str()); + return atom; + } + bool isAtom(int a) const { return atom == a; } + int getAtom() const { return atom; } + bool nonSpaced() const { return !space; } + protected: + Token() {} + int atom; + bool space; // did a space precede the token? + long long i64val; + TString name; + }; + + TokenStream() : currentPos(0) { } + + void putToken(int token, TPpToken* ppToken); + bool peekToken(int atom) { return !atEnd() && stream[currentPos].isAtom(atom); } + bool peekContinuedPasting(int atom) + { + // This is basically necessary because, for example, the PP + // tokenizer only accepts valid numeric-literals plus suffixes, so + // separates numeric-literals plus bad suffix into two tokens, which + // should get both pasted together as one token when token pasting. + // + // The following code is a bit more generalized than the above example. + if (!atEnd() && atom == PpAtomIdentifier && stream[currentPos].nonSpaced()) { + switch(stream[currentPos].getAtom()) { + case PpAtomConstInt: + case PpAtomConstUint: + case PpAtomConstInt64: + case PpAtomConstUint64: + case PpAtomConstInt16: + case PpAtomConstUint16: + case PpAtomConstFloat: + case PpAtomConstDouble: + case PpAtomConstFloat16: + case PpAtomConstString: + case PpAtomIdentifier: + return true; + default: + break; + } + } + + return false; + } + int getToken(TParseContextBase&, TPpToken*); + bool atEnd() { return currentPos >= stream.size(); } + bool peekTokenizedPasting(bool lastTokenPastes); + bool peekUntokenizedPasting(); + void reset() { currentPos = 0; } + + protected: + TVector stream; + size_t currentPos; + }; + + // + // From Pp.cpp + // + + struct MacroSymbol { + MacroSymbol() : functionLike(0), busy(0), undef(0) { } + TVector args; + TokenStream body; + unsigned functionLike : 1; // 0 means object-like, 1 means function-like + unsigned busy : 1; + unsigned undef : 1; + }; + + typedef TMap TSymbolMap; + TSymbolMap macroDefs; // map atoms to macro definitions + MacroSymbol* lookupMacroDef(int atom) + { + auto existingMacroIt = macroDefs.find(atom); + return (existingMacroIt == macroDefs.end()) ? nullptr : &(existingMacroIt->second); + } + void addMacroDef(int atom, MacroSymbol& macroDef) { macroDefs[atom] = macroDef; } + +protected: + TPpContext(TPpContext&); + TPpContext& operator=(TPpContext&); + + TStringAtomMap atomStrings; + char* preamble; // string to parse, all before line 1 of string 0, it is 0 if no preamble + int preambleLength; + char** strings; // official strings of shader, starting a string 0 line 1 + size_t* lengths; + int numStrings; // how many official strings there are + int currentString; // which string we're currently parsing (-1 for preamble) + + // Scanner data: + int previous_token; + TParseContextBase& parseContext; + + // Get the next token from *stack* of input sources, popping input sources + // that are out of tokens, down until an input source is found that has a token. + // Return EndOfInput when there are no more tokens to be found by doing this. + int scanToken(TPpToken* ppToken) + { + int token = EndOfInput; + + while (! inputStack.empty()) { + token = inputStack.back()->scan(ppToken); + if (token != EndOfInput || inputStack.empty()) + break; + popInput(); + } + + return token; + } + int getChar() { return inputStack.back()->getch(); } + void ungetChar() { inputStack.back()->ungetch(); } + bool peekPasting() { return !inputStack.empty() && inputStack.back()->peekPasting(); } + bool peekContinuedPasting(int a) + { + return !inputStack.empty() && inputStack.back()->peekContinuedPasting(a); + } + bool endOfReplacementList() { return inputStack.empty() || inputStack.back()->endOfReplacementList(); } + bool isMacroInput() { return inputStack.size() > 0 && inputStack.back()->isMacroInput(); } + + static const int maxIfNesting = 65; + + int ifdepth; // current #if-#else-#endif nesting in the cpp.c file (pre-processor) + bool elseSeen[maxIfNesting]; // Keep a track of whether an else has been seen at a particular depth + int elsetracker; // #if-#else and #endif constructs...Counter. + + class tMacroInput : public tInput { + public: + tMacroInput(TPpContext* pp) : tInput(pp), prepaste(false), postpaste(false) { } + virtual ~tMacroInput() + { + for (size_t i = 0; i < args.size(); ++i) + delete args[i]; + for (size_t i = 0; i < expandedArgs.size(); ++i) + delete expandedArgs[i]; + } + + virtual int scan(TPpToken*) override; + virtual int getch() override { assert(0); return EndOfInput; } + virtual void ungetch() override { assert(0); } + bool peekPasting() override { return prepaste; } + bool peekContinuedPasting(int a) override { return mac->body.peekContinuedPasting(a); } + bool endOfReplacementList() override { return mac->body.atEnd(); } + bool isMacroInput() override { return true; } + + MacroSymbol *mac; + TVector args; + TVector expandedArgs; + + protected: + bool prepaste; // true if we are just before ## + bool postpaste; // true if we are right after ## + }; + + class tMarkerInput : public tInput { + public: + tMarkerInput(TPpContext* pp) : tInput(pp) { } + virtual int scan(TPpToken*) override + { + if (done) + return EndOfInput; + done = true; + + return marker; + } + virtual int getch() override { assert(0); return EndOfInput; } + virtual void ungetch() override { assert(0); } + static const int marker = -3; + }; + + class tZeroInput : public tInput { + public: + tZeroInput(TPpContext* pp) : tInput(pp) { } + virtual int scan(TPpToken*) override; + virtual int getch() override { assert(0); return EndOfInput; } + virtual void ungetch() override { assert(0); } + }; + + std::vector inputStack; + bool errorOnVersion; + bool versionSeen; + + // + // from Pp.cpp + // + + // Used to obtain #include content. + TShader::Includer& includer; + + int CPPdefine(TPpToken * ppToken); + int CPPundef(TPpToken * ppToken); + int CPPelse(int matchelse, TPpToken * ppToken); + int extraTokenCheck(int atom, TPpToken* ppToken, int token); + int eval(int token, int precedence, bool shortCircuit, int& res, bool& err, TPpToken * ppToken); + int evalToToken(int token, bool shortCircuit, int& res, bool& err, TPpToken * ppToken); + int CPPif (TPpToken * ppToken); + int CPPifdef(int defined, TPpToken * ppToken); + int CPPinclude(TPpToken * ppToken); + int CPPline(TPpToken * ppToken); + int CPPerror(TPpToken * ppToken); + int CPPpragma(TPpToken * ppToken); + int CPPversion(TPpToken * ppToken); + int CPPextension(TPpToken * ppToken); + int readCPPline(TPpToken * ppToken); + int scanHeaderName(TPpToken* ppToken, char delimit); + TokenStream* PrescanMacroArg(TokenStream&, TPpToken*, bool newLineOkay); + MacroExpandResult MacroExpand(TPpToken* ppToken, bool expandUndef, bool newLineOkay); + + // + // From PpTokens.cpp + // + void pushTokenStreamInput(TokenStream&, bool pasting = false); + void UngetToken(int token, TPpToken*); + + class tTokenInput : public tInput { + public: + tTokenInput(TPpContext* pp, TokenStream* t, bool prepasting) : + tInput(pp), + tokens(t), + lastTokenPastes(prepasting) { } + virtual int scan(TPpToken *ppToken) override { return tokens->getToken(pp->parseContext, ppToken); } + virtual int getch() override { assert(0); return EndOfInput; } + virtual void ungetch() override { assert(0); } + virtual bool peekPasting() override { return tokens->peekTokenizedPasting(lastTokenPastes); } + bool peekContinuedPasting(int a) override { return tokens->peekContinuedPasting(a); } + protected: + TokenStream* tokens; + bool lastTokenPastes; // true if the last token in the input is to be pasted, rather than consumed as a token + }; + + class tUngotTokenInput : public tInput { + public: + tUngotTokenInput(TPpContext* pp, int t, TPpToken* p) : tInput(pp), token(t), lval(*p) { } + virtual int scan(TPpToken *) override; + virtual int getch() override { assert(0); return EndOfInput; } + virtual void ungetch() override { assert(0); } + protected: + int token; + TPpToken lval; + }; + + // + // From PpScanner.cpp + // + class tStringInput : public tInput { + public: + tStringInput(TPpContext* pp, TInputScanner& i) : tInput(pp), input(&i) { } + virtual int scan(TPpToken*) override; + + // Scanner used to get source stream characters. + // - Escaped newlines are handled here, invisibly to the caller. + // - All forms of newline are handled, and turned into just a '\n'. + int getch() override + { + int ch = input->get(); + + if (ch == '\\') { + // Move past escaped newlines, as many as sequentially exist + do { + if (input->peek() == '\r' || input->peek() == '\n') { + bool allowed = pp->parseContext.lineContinuationCheck(input->getSourceLoc(), pp->inComment); + if (! allowed && pp->inComment) + return '\\'; + + // escape one newline now + ch = input->get(); + int nextch = input->get(); + if (ch == '\r' && nextch == '\n') + ch = input->get(); + else + ch = nextch; + } else + return '\\'; + } while (ch == '\\'); + } + + // handle any non-escaped newline + if (ch == '\r' || ch == '\n') { + if (ch == '\r' && input->peek() == '\n') + input->get(); + return '\n'; + } + + return ch; + } + + // Scanner used to backup the source stream characters. Newlines are + // handled here, invisibly to the caller, meaning have to undo exactly + // what getch() above does (e.g., don't leave things in the middle of a + // sequence of escaped newlines). + void ungetch() override + { + input->unget(); + + do { + int ch = input->peek(); + if (ch == '\r' || ch == '\n') { + if (ch == '\n') { + // correct for two-character newline + input->unget(); + if (input->peek() != '\r') + input->get(); + } + // now in front of a complete newline, move past an escape character + input->unget(); + if (input->peek() == '\\') + input->unget(); + else { + input->get(); + break; + } + } else + break; + } while (true); + } + + protected: + TInputScanner* input; + }; + + // Holds a reference to included file data, as well as a + // prologue and an epilogue string. This can be scanned using the tInput + // interface and acts as a single source string. + class TokenizableIncludeFile : public tInput { + public: + // Copies prologue and epilogue. The includedFile must remain valid + // until this TokenizableIncludeFile is no longer used. + TokenizableIncludeFile(const TSourceLoc& startLoc, + const std::string& prologue, + TShader::Includer::IncludeResult* includedFile, + const std::string& epilogue, + TPpContext* pp) + : tInput(pp), + prologue_(prologue), + epilogue_(epilogue), + includedFile_(includedFile), + scanner(3, strings, lengths, nullptr, 0, 0, true), + prevScanner(nullptr), + stringInput(pp, scanner) + { + strings[0] = prologue_.data(); + strings[1] = includedFile_->headerData; + strings[2] = epilogue_.data(); + + lengths[0] = prologue_.size(); + lengths[1] = includedFile_->headerLength; + lengths[2] = epilogue_.size(); + + scanner.setLine(startLoc.line); + scanner.setString(startLoc.string); + + scanner.setFile(startLoc.getFilenameStr(), 0); + scanner.setFile(startLoc.getFilenameStr(), 1); + scanner.setFile(startLoc.getFilenameStr(), 2); + } + + // tInput methods: + int scan(TPpToken* t) override { return stringInput.scan(t); } + int getch() override { return stringInput.getch(); } + void ungetch() override { stringInput.ungetch(); } + + void notifyActivated() override + { + prevScanner = pp->parseContext.getScanner(); + pp->parseContext.setScanner(&scanner); + pp->push_include(includedFile_); + } + + void notifyDeleted() override + { + pp->parseContext.setScanner(prevScanner); + pp->pop_include(); + } + + private: + TokenizableIncludeFile& operator=(const TokenizableIncludeFile&); + + // Stores the prologue for this string. + const std::string prologue_; + + // Stores the epilogue for this string. + const std::string epilogue_; + + // Points to the IncludeResult that this TokenizableIncludeFile represents. + TShader::Includer::IncludeResult* includedFile_; + + // Will point to prologue_, includedFile_->headerData and epilogue_ + // This is passed to scanner constructor. + // These do not own the storage and it must remain valid until this + // object has been destroyed. + const char* strings[3]; + // Length of str_, passed to scanner constructor. + size_t lengths[3]; + // Scans over str_. + TInputScanner scanner; + // The previous effective scanner before the scanner in this instance + // has been activated. + TInputScanner* prevScanner; + // Delegate object implementing the tInput interface. + tStringInput stringInput; + }; + + int ScanFromString(char* s); + void missingEndifCheck(); + int lFloatConst(int len, int ch, TPpToken* ppToken); + int characterLiteral(TPpToken* ppToken); + + void push_include(TShader::Includer::IncludeResult* result) + { + currentSourceFile = result->headerName; + includeStack.push(result); + } + + void pop_include() + { + TShader::Includer::IncludeResult* include = includeStack.top(); + includeStack.pop(); + includer.releaseInclude(include); + if (includeStack.empty()) { + currentSourceFile = rootFileName; + } else { + currentSourceFile = includeStack.top()->headerName; + } + } + + bool inComment; + std::string rootFileName; + std::stack includeStack; + std::string currentSourceFile; + + std::istringstream strtodStream; + bool disableEscapeSequences; +}; + +} // end namespace glslang + +#endif // PPCONTEXT_H diff --git a/dep/glslang/glslang/MachineIndependent/preprocessor/PpScanner.cpp b/dep/glslang/glslang/MachineIndependent/preprocessor/PpScanner.cpp new file mode 100644 index 000000000..e0f44f8b4 --- /dev/null +++ b/dep/glslang/glslang/MachineIndependent/preprocessor/PpScanner.cpp @@ -0,0 +1,1315 @@ +// +// Copyright (C) 2002-2005 3Dlabs Inc. Ltd. +// Copyright (C) 2013 LunarG, Inc. +// Copyright (C) 2017 ARM Limited. +// Copyright (C) 2015-2018 Google, Inc. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// +// Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// +// Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// Neither the name of 3Dlabs Inc. Ltd. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// +/****************************************************************************\ +Copyright (c) 2002, NVIDIA Corporation. + +NVIDIA Corporation("NVIDIA") supplies this software to you in +consideration of your agreement to the following terms, and your use, +installation, modification or redistribution of this NVIDIA software +constitutes acceptance of these terms. If you do not agree with these +terms, please do not use, install, modify or redistribute this NVIDIA +software. + +In consideration of your agreement to abide by the following terms, and +subject to these terms, NVIDIA grants you a personal, non-exclusive +license, under NVIDIA's copyrights in this original NVIDIA software (the +"NVIDIA Software"), to use, reproduce, modify and redistribute the +NVIDIA Software, with or without modifications, in source and/or binary +forms; provided that if you redistribute the NVIDIA Software, you must +retain the copyright notice of NVIDIA, this notice and the following +text and disclaimers in all such redistributions of the NVIDIA Software. +Neither the name, trademarks, service marks nor logos of NVIDIA +Corporation may be used to endorse or promote products derived from the +NVIDIA Software without specific prior written permission from NVIDIA. +Except as expressly stated in this notice, no other rights or licenses +express or implied, are granted by NVIDIA herein, including but not +limited to any patent rights that may be infringed by your derivative +works or by other works in which the NVIDIA Software may be +incorporated. No hardware is licensed hereunder. + +THE NVIDIA SOFTWARE IS BEING PROVIDED ON AN "AS IS" BASIS, WITHOUT +WARRANTIES OR CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED, +INCLUDING WITHOUT LIMITATION, WARRANTIES OR CONDITIONS OF TITLE, +NON-INFRINGEMENT, MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, OR +ITS USE AND OPERATION EITHER ALONE OR IN COMBINATION WITH OTHER +PRODUCTS. + +IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, +INCIDENTAL, EXEMPLARY, CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED +TO, LOST PROFITS; PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF +USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) OR ARISING IN ANY WAY +OUT OF THE USE, REPRODUCTION, MODIFICATION AND/OR DISTRIBUTION OF THE +NVIDIA SOFTWARE, HOWEVER CAUSED AND WHETHER UNDER THEORY OF CONTRACT, +TORT (INCLUDING NEGLIGENCE), STRICT LIABILITY OR OTHERWISE, EVEN IF +NVIDIA HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +\****************************************************************************/ + +#ifndef _CRT_SECURE_NO_WARNINGS +#define _CRT_SECURE_NO_WARNINGS +#endif + +#include +#include + +#include "PpContext.h" +#include "PpTokens.h" +#include "../Scan.h" + +namespace glslang { + +/////////////////////////////////////////////////////////////////////////////////////////////// +/////////////////////////////////// Floating point constants: ///////////////////////////////// +/////////////////////////////////////////////////////////////////////////////////////////////// + +// +// Scan a single- or double-precision floating point constant. +// Assumes that the scanner has seen at least one digit, +// followed by either a decimal '.' or the letter 'e', or a +// precision ending (e.g., F or LF). +// +// This is technically not correct, as the preprocessor should just +// accept the numeric literal along with whatever suffix it has, but +// currently, it stops on seeing a bad suffix, treating that as the +// next token. This effects things like token pasting, where it is +// relevant how many tokens something was broken into. +// +// See peekContinuedPasting(). +int TPpContext::lFloatConst(int len, int ch, TPpToken* ppToken) +{ + const auto saveName = [&](int ch) { + if (len <= MaxTokenLength) + ppToken->name[len++] = static_cast(ch); + }; + + // find the range of non-zero digits before the decimal point + int startNonZero = 0; + while (startNonZero < len && ppToken->name[startNonZero] == '0') + ++startNonZero; + int endNonZero = len; + while (endNonZero > startNonZero && ppToken->name[endNonZero-1] == '0') + --endNonZero; + int numWholeNumberDigits = endNonZero - startNonZero; + + // accumulate the range's value + bool fastPath = numWholeNumberDigits <= 15; // when the number gets too complex, set to false + unsigned long long wholeNumber = 0; + if (fastPath) { + for (int i = startNonZero; i < endNonZero; ++i) + wholeNumber = wholeNumber * 10 + (ppToken->name[i] - '0'); + } + int decimalShift = len - endNonZero; + + // Decimal point: + bool hasDecimalOrExponent = false; + if (ch == '.') { + hasDecimalOrExponent = true; + saveName(ch); + ch = getChar(); + int firstDecimal = len; + +#ifdef ENABLE_HLSL + // 1.#INF or -1.#INF + if (ch == '#' && (ifdepth > 0 || parseContext.intermediate.getSource() == EShSourceHlsl)) { + if ((len < 2) || + (len == 2 && ppToken->name[0] != '1') || + (len == 3 && ppToken->name[1] != '1' && !(ppToken->name[0] == '-' || ppToken->name[0] == '+')) || + (len > 3)) + parseContext.ppError(ppToken->loc, "unexpected use of", "#", ""); + else { + // we have 1.# or -1.# or +1.#, check for 'INF' + if ((ch = getChar()) != 'I' || + (ch = getChar()) != 'N' || + (ch = getChar()) != 'F') + parseContext.ppError(ppToken->loc, "expected 'INF'", "#", ""); + else { + // we have [+-].#INF, and we are targeting IEEE 754, so wrap it up: + saveName('I'); + saveName('N'); + saveName('F'); + ppToken->name[len] = '\0'; + if (ppToken->name[0] == '-') + ppToken->i64val = 0xfff0000000000000; // -Infinity + else + ppToken->i64val = 0x7ff0000000000000; // +Infinity + return PpAtomConstFloat; + } + } + } +#endif + + // Consume leading-zero digits after the decimal point + while (ch == '0') { + saveName(ch); + ch = getChar(); + } + int startNonZeroDecimal = len; + int endNonZeroDecimal = len; + + // Consume remaining digits, up to the exponent + while (ch >= '0' && ch <= '9') { + saveName(ch); + if (ch != '0') + endNonZeroDecimal = len; + ch = getChar(); + } + + // Compute accumulation up to the last non-zero digit + if (endNonZeroDecimal > startNonZeroDecimal) { + numWholeNumberDigits += endNonZeroDecimal - endNonZero - 1; // don't include the "." + if (numWholeNumberDigits > 15) + fastPath = false; + if (fastPath) { + for (int i = endNonZero; i < endNonZeroDecimal; ++i) { + if (ppToken->name[i] != '.') + wholeNumber = wholeNumber * 10 + (ppToken->name[i] - '0'); + } + } + decimalShift = firstDecimal - endNonZeroDecimal; + } + } + + // Exponent: + bool negativeExponent = false; + double exponentValue = 0.0; + int exponent = 0; + { + if (ch == 'e' || ch == 'E') { + hasDecimalOrExponent = true; + saveName(ch); + ch = getChar(); + if (ch == '+' || ch == '-') { + negativeExponent = ch == '-'; + saveName(ch); + ch = getChar(); + } + if (ch >= '0' && ch <= '9') { + while (ch >= '0' && ch <= '9') { + exponent = exponent * 10 + (ch - '0'); + saveName(ch); + ch = getChar(); + } + } else { + parseContext.ppError(ppToken->loc, "bad character in float exponent", "", ""); + } + } + + // Compensate for location of decimal + if (negativeExponent) + exponent -= decimalShift; + else { + exponent += decimalShift; + if (exponent < 0) { + negativeExponent = true; + exponent = -exponent; + } + } + if (exponent > 22) + fastPath = false; + + if (fastPath) { + // Compute the floating-point value of the exponent + exponentValue = 1.0; + if (exponent > 0) { + double expFactor = 10; + while (exponent > 0) { + if (exponent & 0x1) + exponentValue *= expFactor; + expFactor *= expFactor; + exponent >>= 1; + } + } + } + } + + // Suffix: + bool isDouble = false; + bool isFloat16 = false; +#ifndef GLSLANG_WEB + if (ch == 'l' || ch == 'L') { + if (ifdepth == 0 && parseContext.intermediate.getSource() == EShSourceGlsl) + parseContext.doubleCheck(ppToken->loc, "double floating-point suffix"); + if (ifdepth == 0 && !hasDecimalOrExponent) + parseContext.ppError(ppToken->loc, "float literal needs a decimal point or exponent", "", ""); + if (parseContext.intermediate.getSource() == EShSourceGlsl) { + int ch2 = getChar(); + if (ch2 != 'f' && ch2 != 'F') { + ungetChar(); + ungetChar(); + } else { + saveName(ch); + saveName(ch2); + isDouble = true; + } + } else if (parseContext.intermediate.getSource() == EShSourceHlsl) { + saveName(ch); + isDouble = true; + } + } else if (ch == 'h' || ch == 'H') { + if (ifdepth == 0 && parseContext.intermediate.getSource() == EShSourceGlsl) + parseContext.float16Check(ppToken->loc, "half floating-point suffix"); + if (ifdepth == 0 && !hasDecimalOrExponent) + parseContext.ppError(ppToken->loc, "float literal needs a decimal point or exponent", "", ""); + if (parseContext.intermediate.getSource() == EShSourceGlsl) { + int ch2 = getChar(); + if (ch2 != 'f' && ch2 != 'F') { + ungetChar(); + ungetChar(); + } else { + saveName(ch); + saveName(ch2); + isFloat16 = true; + } + } else if (parseContext.intermediate.getSource() == EShSourceHlsl) { + saveName(ch); + isFloat16 = true; + } + } else +#endif + if (ch == 'f' || ch == 'F') { +#ifndef GLSLANG_WEB + if (ifdepth == 0) + parseContext.profileRequires(ppToken->loc, EEsProfile, 300, nullptr, "floating-point suffix"); + if (ifdepth == 0 && !parseContext.relaxedErrors()) + parseContext.profileRequires(ppToken->loc, ~EEsProfile, 120, nullptr, "floating-point suffix"); +#endif + if (ifdepth == 0 && !hasDecimalOrExponent) + parseContext.ppError(ppToken->loc, "float literal needs a decimal point or exponent", "", ""); + saveName(ch); + } else + ungetChar(); + + // Patch up the name and length for overflow + + if (len > MaxTokenLength) { + len = MaxTokenLength; + parseContext.ppError(ppToken->loc, "float literal too long", "", ""); + } + ppToken->name[len] = '\0'; + + // Compute the numerical value + if (fastPath) { + // compute the floating-point value of the exponent + if (exponentValue == 0.0) + ppToken->dval = (double)wholeNumber; + else if (negativeExponent) + ppToken->dval = (double)wholeNumber / exponentValue; + else + ppToken->dval = (double)wholeNumber * exponentValue; + } else { + // slow path + ppToken->dval = 0.0; + + // remove suffix + TString numstr(ppToken->name); + if (numstr.back() == 'f' || numstr.back() == 'F') + numstr.pop_back(); + if (numstr.back() == 'h' || numstr.back() == 'H') + numstr.pop_back(); + if (numstr.back() == 'l' || numstr.back() == 'L') + numstr.pop_back(); + + // use platform library + strtodStream.clear(); + strtodStream.str(numstr.c_str()); + strtodStream >> ppToken->dval; + if (strtodStream.fail()) { + // Assume failure combined with a large exponent was overflow, in + // an attempt to set INF. + if (!negativeExponent && exponent + numWholeNumberDigits > 300) + ppToken->i64val = 0x7ff0000000000000; // +Infinity + // Assume failure combined with a small exponent was overflow. + if (negativeExponent && exponent + numWholeNumberDigits > 300) + ppToken->dval = 0.0; + // Unknown reason for failure. Theory is that either + // - the 0.0 is still there, or + // - something reasonable was written that is better than 0.0 + } + } + + // Return the right token type + if (isDouble) + return PpAtomConstDouble; + else if (isFloat16) + return PpAtomConstFloat16; + else + return PpAtomConstFloat; +} + +// Recognize a character literal. +// +// The first ' has already been accepted, read the rest, through the closing '. +// +// Always returns PpAtomConstInt. +// +int TPpContext::characterLiteral(TPpToken* ppToken) +{ + ppToken->name[0] = 0; + ppToken->ival = 0; + + if (parseContext.intermediate.getSource() != EShSourceHlsl) { + // illegal, except in macro definition, for which case we report the character + return '\''; + } + + int ch = getChar(); + switch (ch) { + case '\'': + // As empty sequence: '' + parseContext.ppError(ppToken->loc, "unexpected", "\'", ""); + return PpAtomConstInt; + case '\\': + // As escape sequence: '\XXX' + switch (ch = getChar()) { + case 'a': + ppToken->ival = 7; + break; + case 'b': + ppToken->ival = 8; + break; + case 't': + ppToken->ival = 9; + break; + case 'n': + ppToken->ival = 10; + break; + case 'v': + ppToken->ival = 11; + break; + case 'f': + ppToken->ival = 12; + break; + case 'r': + ppToken->ival = 13; + break; + case 'x': + case '0': + parseContext.ppError(ppToken->loc, "octal and hex sequences not supported", "\\", ""); + break; + default: + // This catches '\'', '\"', '\?', etc. + // Also, things like '\C' mean the same thing as 'C' + // (after the above cases are filtered out). + ppToken->ival = ch; + break; + } + break; + default: + ppToken->ival = ch; + break; + } + ppToken->name[0] = (char)ppToken->ival; + ppToken->name[1] = '\0'; + ch = getChar(); + if (ch != '\'') { + parseContext.ppError(ppToken->loc, "expected", "\'", ""); + // Look ahead for a closing ' + do { + ch = getChar(); + } while (ch != '\'' && ch != EndOfInput && ch != '\n'); + } + + return PpAtomConstInt; +} + +// +// Scanner used to tokenize source stream. +// +// N.B. Invalid numeric suffixes are not consumed.// +// This is technically not correct, as the preprocessor should just +// accept the numeric literal along with whatever suffix it has, but +// currently, it stops on seeing a bad suffix, treating that as the +// next token. This effects things like token pasting, where it is +// relevant how many tokens something was broken into. +// See peekContinuedPasting(). +// +int TPpContext::tStringInput::scan(TPpToken* ppToken) +{ + int AlreadyComplained = 0; + int len = 0; + int ch = 0; + int ii = 0; + unsigned long long ival = 0; + const auto floatingPointChar = [&](int ch) { return ch == '.' || ch == 'e' || ch == 'E' || + ch == 'f' || ch == 'F' || + ch == 'h' || ch == 'H'; }; + + static const char* const Int64_Extensions[] = { + E_GL_ARB_gpu_shader_int64, + E_GL_EXT_shader_explicit_arithmetic_types, + E_GL_EXT_shader_explicit_arithmetic_types_int64 }; + static const int Num_Int64_Extensions = sizeof(Int64_Extensions) / sizeof(Int64_Extensions[0]); + + static const char* const Int16_Extensions[] = { + E_GL_AMD_gpu_shader_int16, + E_GL_EXT_shader_explicit_arithmetic_types, + E_GL_EXT_shader_explicit_arithmetic_types_int16 }; + static const int Num_Int16_Extensions = sizeof(Int16_Extensions) / sizeof(Int16_Extensions[0]); + + ppToken->ival = 0; + ppToken->i64val = 0; + ppToken->space = false; + ch = getch(); + for (;;) { + while (ch == ' ' || ch == '\t') { + ppToken->space = true; + ch = getch(); + } + + ppToken->loc = pp->parseContext.getCurrentLoc(); + len = 0; + switch (ch) { + default: + // Single character token, including EndOfInput, '#' and '\' (escaped newlines are handled at a lower level, so this is just a '\' token) + if (ch > PpAtomMaxSingle) + ch = PpAtomBadToken; + return ch; + + case 'A': case 'B': case 'C': case 'D': case 'E': + case 'F': case 'G': case 'H': case 'I': case 'J': + case 'K': case 'L': case 'M': case 'N': case 'O': + case 'P': case 'Q': case 'R': case 'S': case 'T': + case 'U': case 'V': case 'W': case 'X': case 'Y': + case 'Z': case '_': + case 'a': case 'b': case 'c': case 'd': case 'e': + case 'f': case 'g': case 'h': case 'i': case 'j': + case 'k': case 'l': case 'm': case 'n': case 'o': + case 'p': case 'q': case 'r': case 's': case 't': + case 'u': case 'v': case 'w': case 'x': case 'y': + case 'z': + do { + if (len < MaxTokenLength) { + ppToken->name[len++] = (char)ch; + ch = getch(); + } else { + if (! AlreadyComplained) { + pp->parseContext.ppError(ppToken->loc, "name too long", "", ""); + AlreadyComplained = 1; + } + ch = getch(); + } + } while ((ch >= 'a' && ch <= 'z') || + (ch >= 'A' && ch <= 'Z') || + (ch >= '0' && ch <= '9') || + ch == '_'); + + // line continuation with no token before or after makes len == 0, and need to start over skipping white space, etc. + if (len == 0) + continue; + + ppToken->name[len] = '\0'; + ungetch(); + return PpAtomIdentifier; + case '0': + ppToken->name[len++] = (char)ch; + ch = getch(); + if (ch == 'x' || ch == 'X') { + // must be hexadecimal + + bool isUnsigned = false; + bool isInt64 = false; + bool isInt16 = false; + ppToken->name[len++] = (char)ch; + ch = getch(); + if ((ch >= '0' && ch <= '9') || + (ch >= 'A' && ch <= 'F') || + (ch >= 'a' && ch <= 'f')) { + + ival = 0; + do { + if (len < MaxTokenLength && ival <= 0x0fffffffffffffffull) { + ppToken->name[len++] = (char)ch; + if (ch >= '0' && ch <= '9') { + ii = ch - '0'; + } else if (ch >= 'A' && ch <= 'F') { + ii = ch - 'A' + 10; + } else if (ch >= 'a' && ch <= 'f') { + ii = ch - 'a' + 10; + } else + pp->parseContext.ppError(ppToken->loc, "bad digit in hexadecimal literal", "", ""); + ival = (ival << 4) | ii; + } else { + if (! AlreadyComplained) { + if(len < MaxTokenLength) + pp->parseContext.ppError(ppToken->loc, "hexadecimal literal too big", "", ""); + else + pp->parseContext.ppError(ppToken->loc, "hexadecimal literal too long", "", ""); + AlreadyComplained = 1; + } + ival = 0xffffffffffffffffull; + } + ch = getch(); + } while ((ch >= '0' && ch <= '9') || + (ch >= 'A' && ch <= 'F') || + (ch >= 'a' && ch <= 'f')); + } else { + pp->parseContext.ppError(ppToken->loc, "bad digit in hexadecimal literal", "", ""); + } + if (ch == 'u' || ch == 'U') { + if (len < MaxTokenLength) + ppToken->name[len++] = (char)ch; + isUnsigned = true; + +#ifndef GLSLANG_WEB + int nextCh = getch(); + if (nextCh == 'l' || nextCh == 'L') { + if (len < MaxTokenLength) + ppToken->name[len++] = (char)nextCh; + isInt64 = true; + } else + ungetch(); + + nextCh = getch(); + if ((nextCh == 's' || nextCh == 'S') && + pp->parseContext.intermediate.getSource() == EShSourceGlsl) { + if (len < MaxTokenLength) + ppToken->name[len++] = (char)nextCh; + isInt16 = true; + } else + ungetch(); + } else if (ch == 'l' || ch == 'L') { + if (len < MaxTokenLength) + ppToken->name[len++] = (char)ch; + isInt64 = true; + } else if ((ch == 's' || ch == 'S') && + pp->parseContext.intermediate.getSource() == EShSourceGlsl) { + if (len < MaxTokenLength) + ppToken->name[len++] = (char)ch; + isInt16 = true; +#endif + } else + ungetch(); + ppToken->name[len] = '\0'; + + if (isInt64 && pp->parseContext.intermediate.getSource() == EShSourceGlsl) { + if (pp->ifdepth == 0) { + pp->parseContext.requireProfile(ppToken->loc, ~EEsProfile, + "64-bit hexadecimal literal"); + pp->parseContext.profileRequires(ppToken->loc, ~EEsProfile, 0, + Num_Int64_Extensions, Int64_Extensions, "64-bit hexadecimal literal"); + } + ppToken->i64val = ival; + return isUnsigned ? PpAtomConstUint64 : PpAtomConstInt64; + } else if (isInt16) { + if (pp->ifdepth == 0) { + if (pp->parseContext.intermediate.getSource() == EShSourceGlsl) { + pp->parseContext.requireProfile(ppToken->loc, ~EEsProfile, + "16-bit hexadecimal literal"); + pp->parseContext.profileRequires(ppToken->loc, ~EEsProfile, 0, + Num_Int16_Extensions, Int16_Extensions, "16-bit hexadecimal literal"); + } + } + ppToken->ival = (int)ival; + return isUnsigned ? PpAtomConstUint16 : PpAtomConstInt16; + } else { + if (ival > 0xffffffffu && !AlreadyComplained) + pp->parseContext.ppError(ppToken->loc, "hexadecimal literal too big", "", ""); + ppToken->ival = (int)ival; + return isUnsigned ? PpAtomConstUint : PpAtomConstInt; + } + } else { + // could be octal integer or floating point, speculative pursue octal until it must be floating point + + bool isUnsigned = false; + bool isInt64 = false; + bool isInt16 = false; + bool octalOverflow = false; + bool nonOctal = false; + ival = 0; + + // see how much octal-like stuff we can read + while (ch >= '0' && ch <= '7') { + if (len < MaxTokenLength) + ppToken->name[len++] = (char)ch; + else if (! AlreadyComplained) { + pp->parseContext.ppError(ppToken->loc, "numeric literal too long", "", ""); + AlreadyComplained = 1; + } + if (ival <= 0x1fffffffffffffffull) { + ii = ch - '0'; + ival = (ival << 3) | ii; + } else + octalOverflow = true; + ch = getch(); + } + + // could be part of a float... + if (ch == '8' || ch == '9') { + nonOctal = true; + do { + if (len < MaxTokenLength) + ppToken->name[len++] = (char)ch; + else if (! AlreadyComplained) { + pp->parseContext.ppError(ppToken->loc, "numeric literal too long", "", ""); + AlreadyComplained = 1; + } + ch = getch(); + } while (ch >= '0' && ch <= '9'); + } + if (floatingPointChar(ch)) + return pp->lFloatConst(len, ch, ppToken); + + // wasn't a float, so must be octal... + if (nonOctal) + pp->parseContext.ppError(ppToken->loc, "octal literal digit too large", "", ""); + + if (ch == 'u' || ch == 'U') { + if (len < MaxTokenLength) + ppToken->name[len++] = (char)ch; + isUnsigned = true; + +#ifndef GLSLANG_WEB + int nextCh = getch(); + if (nextCh == 'l' || nextCh == 'L') { + if (len < MaxTokenLength) + ppToken->name[len++] = (char)nextCh; + isInt64 = true; + } else + ungetch(); + + nextCh = getch(); + if ((nextCh == 's' || nextCh == 'S') && + pp->parseContext.intermediate.getSource() == EShSourceGlsl) { + if (len < MaxTokenLength) + ppToken->name[len++] = (char)nextCh; + isInt16 = true; + } else + ungetch(); + } else if (ch == 'l' || ch == 'L') { + if (len < MaxTokenLength) + ppToken->name[len++] = (char)ch; + isInt64 = true; + } else if ((ch == 's' || ch == 'S') && + pp->parseContext.intermediate.getSource() == EShSourceGlsl) { + if (len < MaxTokenLength) + ppToken->name[len++] = (char)ch; + isInt16 = true; +#endif + } else + ungetch(); + ppToken->name[len] = '\0'; + + if (!isInt64 && ival > 0xffffffffu) + octalOverflow = true; + + if (octalOverflow) + pp->parseContext.ppError(ppToken->loc, "octal literal too big", "", ""); + + if (isInt64 && pp->parseContext.intermediate.getSource() == EShSourceGlsl) { + if (pp->ifdepth == 0) { + pp->parseContext.requireProfile(ppToken->loc, ~EEsProfile, + "64-bit octal literal"); + pp->parseContext.profileRequires(ppToken->loc, ~EEsProfile, 0, + Num_Int64_Extensions, Int64_Extensions, "64-bit octal literal"); + } + ppToken->i64val = ival; + return isUnsigned ? PpAtomConstUint64 : PpAtomConstInt64; + } else if (isInt16) { + if (pp->ifdepth == 0) { + if (pp->parseContext.intermediate.getSource() == EShSourceGlsl) { + pp->parseContext.requireProfile(ppToken->loc, ~EEsProfile, + "16-bit octal literal"); + pp->parseContext.profileRequires(ppToken->loc, ~EEsProfile, 0, + Num_Int16_Extensions, Int16_Extensions, "16-bit octal literal"); + } + } + ppToken->ival = (int)ival; + return isUnsigned ? PpAtomConstUint16 : PpAtomConstInt16; + } else { + ppToken->ival = (int)ival; + return isUnsigned ? PpAtomConstUint : PpAtomConstInt; + } + } + break; + case '1': case '2': case '3': case '4': + case '5': case '6': case '7': case '8': case '9': + // can't be hexadecimal or octal, is either decimal or floating point + + do { + if (len < MaxTokenLength) + ppToken->name[len++] = (char)ch; + else if (! AlreadyComplained) { + pp->parseContext.ppError(ppToken->loc, "numeric literal too long", "", ""); + AlreadyComplained = 1; + } + ch = getch(); + } while (ch >= '0' && ch <= '9'); + if (floatingPointChar(ch)) + return pp->lFloatConst(len, ch, ppToken); + else { + // Finish handling signed and unsigned integers + int numericLen = len; + bool isUnsigned = false; + bool isInt64 = false; + bool isInt16 = false; + if (ch == 'u' || ch == 'U') { + if (len < MaxTokenLength) + ppToken->name[len++] = (char)ch; + isUnsigned = true; + +#ifndef GLSLANG_WEB + int nextCh = getch(); + if (nextCh == 'l' || nextCh == 'L') { + if (len < MaxTokenLength) + ppToken->name[len++] = (char)nextCh; + isInt64 = true; + } else + ungetch(); + + nextCh = getch(); + if ((nextCh == 's' || nextCh == 'S') && + pp->parseContext.intermediate.getSource() == EShSourceGlsl) { + if (len < MaxTokenLength) + ppToken->name[len++] = (char)nextCh; + isInt16 = true; + } else + ungetch(); + } else if (ch == 'l' || ch == 'L') { + if (len < MaxTokenLength) + ppToken->name[len++] = (char)ch; + isInt64 = true; + } else if ((ch == 's' || ch == 'S') && + pp->parseContext.intermediate.getSource() == EShSourceGlsl) { + if (len < MaxTokenLength) + ppToken->name[len++] = (char)ch; + isInt16 = true; +#endif + } else + ungetch(); + + ppToken->name[len] = '\0'; + ival = 0; + const unsigned oneTenthMaxInt = 0xFFFFFFFFu / 10; + const unsigned remainderMaxInt = 0xFFFFFFFFu - 10 * oneTenthMaxInt; + const unsigned long long oneTenthMaxInt64 = 0xFFFFFFFFFFFFFFFFull / 10; + const unsigned long long remainderMaxInt64 = 0xFFFFFFFFFFFFFFFFull - 10 * oneTenthMaxInt64; + const unsigned short oneTenthMaxInt16 = 0xFFFFu / 10; + const unsigned short remainderMaxInt16 = 0xFFFFu - 10 * oneTenthMaxInt16; + for (int i = 0; i < numericLen; i++) { + ch = ppToken->name[i] - '0'; + bool overflow = false; + if (isInt64) + overflow = (ival > oneTenthMaxInt64 || (ival == oneTenthMaxInt64 && (unsigned long long)ch > remainderMaxInt64)); + else if (isInt16) + overflow = (ival > oneTenthMaxInt16 || (ival == oneTenthMaxInt16 && (unsigned short)ch > remainderMaxInt16)); + else + overflow = (ival > oneTenthMaxInt || (ival == oneTenthMaxInt && (unsigned)ch > remainderMaxInt)); + if (overflow) { + pp->parseContext.ppError(ppToken->loc, "numeric literal too big", "", ""); + ival = 0xFFFFFFFFFFFFFFFFull; + break; + } else + ival = ival * 10 + ch; + } + + if (isInt64 && pp->parseContext.intermediate.getSource() == EShSourceGlsl) { + if (pp->ifdepth == 0) { + pp->parseContext.requireProfile(ppToken->loc, ~EEsProfile, + "64-bit literal"); + pp->parseContext.profileRequires(ppToken->loc, ~EEsProfile, 0, + Num_Int64_Extensions, Int64_Extensions, "64-bit literal"); + } + ppToken->i64val = ival; + return isUnsigned ? PpAtomConstUint64 : PpAtomConstInt64; + } else if (isInt16) { + if (pp->ifdepth == 0 && pp->parseContext.intermediate.getSource() == EShSourceGlsl) { + pp->parseContext.requireProfile(ppToken->loc, ~EEsProfile, + "16-bit literal"); + pp->parseContext.profileRequires(ppToken->loc, ~EEsProfile, 0, + Num_Int16_Extensions, Int16_Extensions, "16-bit literal"); + } + ppToken->ival = (int)ival; + return isUnsigned ? PpAtomConstUint16 : PpAtomConstInt16; + } else { + ppToken->ival = (int)ival; + return isUnsigned ? PpAtomConstUint : PpAtomConstInt; + } + } + break; + case '-': + ch = getch(); + if (ch == '-') { + return PpAtomDecrement; + } else if (ch == '=') { + return PPAtomSubAssign; + } else { + ungetch(); + return '-'; + } + case '+': + ch = getch(); + if (ch == '+') { + return PpAtomIncrement; + } else if (ch == '=') { + return PPAtomAddAssign; + } else { + ungetch(); + return '+'; + } + case '*': + ch = getch(); + if (ch == '=') { + return PPAtomMulAssign; + } else { + ungetch(); + return '*'; + } + case '%': + ch = getch(); + if (ch == '=') { + return PPAtomModAssign; + } else { + ungetch(); + return '%'; + } + case '^': + ch = getch(); + if (ch == '^') { + return PpAtomXor; + } else { + if (ch == '=') + return PpAtomXorAssign; + else{ + ungetch(); + return '^'; + } + } + + case '=': + ch = getch(); + if (ch == '=') { + return PpAtomEQ; + } else { + ungetch(); + return '='; + } + case '!': + ch = getch(); + if (ch == '=') { + return PpAtomNE; + } else { + ungetch(); + return '!'; + } + case '|': + ch = getch(); + if (ch == '|') { + return PpAtomOr; + } else if (ch == '=') { + return PpAtomOrAssign; + } else { + ungetch(); + return '|'; + } + case '&': + ch = getch(); + if (ch == '&') { + return PpAtomAnd; + } else if (ch == '=') { + return PpAtomAndAssign; + } else { + ungetch(); + return '&'; + } + case '<': + ch = getch(); + if (ch == '<') { + ch = getch(); + if (ch == '=') + return PpAtomLeftAssign; + else { + ungetch(); + return PpAtomLeft; + } + } else if (ch == '=') { + return PpAtomLE; + } else { + ungetch(); + return '<'; + } + case '>': + ch = getch(); + if (ch == '>') { + ch = getch(); + if (ch == '=') + return PpAtomRightAssign; + else { + ungetch(); + return PpAtomRight; + } + } else if (ch == '=') { + return PpAtomGE; + } else { + ungetch(); + return '>'; + } + case '.': + ch = getch(); + if (ch >= '0' && ch <= '9') { + ungetch(); + return pp->lFloatConst(0, '.', ppToken); + } else { + ungetch(); + return '.'; + } + case '/': + ch = getch(); + if (ch == '/') { + pp->inComment = true; + do { + ch = getch(); + } while (ch != '\n' && ch != EndOfInput); + ppToken->space = true; + pp->inComment = false; + + return ch; + } else if (ch == '*') { + ch = getch(); + do { + while (ch != '*') { + if (ch == EndOfInput) { + pp->parseContext.ppError(ppToken->loc, "End of input in comment", "comment", ""); + return ch; + } + ch = getch(); + } + ch = getch(); + if (ch == EndOfInput) { + pp->parseContext.ppError(ppToken->loc, "End of input in comment", "comment", ""); + return ch; + } + } while (ch != '/'); + ppToken->space = true; + // loop again to get the next token... + break; + } else if (ch == '=') { + return PPAtomDivAssign; + } else { + ungetch(); + return '/'; + } + break; + case '\'': + return pp->characterLiteral(ppToken); + case '"': + // #include uses scanHeaderName() to ignore these escape sequences. + ch = getch(); + while (ch != '"' && ch != '\n' && ch != EndOfInput) { + if (len < MaxTokenLength) { + if (ch == '\\' && !pp->disableEscapeSequences) { + int nextCh = getch(); + switch (nextCh) { + case '\'': ch = 0x27; break; + case '"': ch = 0x22; break; + case '?': ch = 0x3f; break; + case '\\': ch = 0x5c; break; + case 'a': ch = 0x07; break; + case 'b': ch = 0x08; break; + case 'f': ch = 0x0c; break; + case 'n': ch = 0x0a; break; + case 'r': ch = 0x0d; break; + case 't': ch = 0x09; break; + case 'v': ch = 0x0b; break; + case 'x': + // Hex value, arbitrary number of characters. Terminated by the first + // non-hex digit + { + int numDigits = 0; + ch = 0; + while (true) { + nextCh = getch(); + if (nextCh >= '0' && nextCh <= '9') + nextCh -= '0'; + else if (nextCh >= 'A' && nextCh <= 'F') + nextCh -= 'A' - 10; + else if (nextCh >= 'a' && nextCh <= 'f') + nextCh -= 'a' - 10; + else { + ungetch(); + break; + } + numDigits++; + ch = ch * 0x10 + nextCh; + } + if (numDigits == 0) { + pp->parseContext.ppError(ppToken->loc, "Expected hex value in escape sequence", "string", ""); + } + break; + } + case '0': + case '1': + case '2': + case '3': + case '4': + case '5': + case '6': + case '7': + // Octal value, up to three octal digits + { + int numDigits = 1; + ch = nextCh - '0'; + while (numDigits < 3) { + nextCh = getch(); + if (nextCh >= '0' && nextCh <= '7') + nextCh -= '0'; + else { + ungetch(); + break; + } + numDigits++; + ch = ch * 8 + nextCh; + } + break; + } + default: + pp->parseContext.ppError(ppToken->loc, "Invalid escape sequence", "string", ""); + break; + } + } + ppToken->name[len] = (char)ch; + len++; + ch = getch(); + } else + break; + }; + ppToken->name[len] = '\0'; + if (ch != '"') { + ungetch(); + pp->parseContext.ppError(ppToken->loc, "End of line in string", "string", ""); + } + return PpAtomConstString; + case ':': + ch = getch(); + if (ch == ':') + return PpAtomColonColon; + ungetch(); + return ':'; + } + + ch = getch(); + } +} + +// +// The main functional entry point into the preprocessor, which will +// scan the source strings to figure out and return the next processing token. +// +// Return the token, or EndOfInput when no more tokens. +// +int TPpContext::tokenize(TPpToken& ppToken) +{ + for(;;) { + int token = scanToken(&ppToken); + + // Handle token-pasting logic + token = tokenPaste(token, ppToken); + + if (token == EndOfInput) { + missingEndifCheck(); + return EndOfInput; + } + if (token == '#') { + if (previous_token == '\n') { + token = readCPPline(&ppToken); + if (token == EndOfInput) { + missingEndifCheck(); + return EndOfInput; + } + continue; + } else { + parseContext.ppError(ppToken.loc, "preprocessor directive cannot be preceded by another token", "#", ""); + return EndOfInput; + } + } + previous_token = token; + + if (token == '\n') + continue; + + // expand macros + if (token == PpAtomIdentifier) { + switch (MacroExpand(&ppToken, false, true)) { + case MacroExpandNotStarted: + break; + case MacroExpandError: + return EndOfInput; + case MacroExpandStarted: + case MacroExpandUndef: + continue; + } + } + + switch (token) { + case PpAtomIdentifier: + case PpAtomConstInt: + case PpAtomConstUint: + case PpAtomConstFloat: + case PpAtomConstInt64: + case PpAtomConstUint64: + case PpAtomConstInt16: + case PpAtomConstUint16: + case PpAtomConstDouble: + case PpAtomConstFloat16: + if (ppToken.name[0] == '\0') + continue; + break; + case PpAtomConstString: + // HLSL allows string literals. + // GLSL allows string literals with GL_EXT_debug_printf. + if (ifdepth == 0 && parseContext.intermediate.getSource() != EShSourceHlsl) { + parseContext.requireExtensions(ppToken.loc, 1, &E_GL_EXT_debug_printf, "string literal"); + if (!parseContext.extensionTurnedOn(E_GL_EXT_debug_printf)) + continue; + } + break; + case '\'': + parseContext.ppError(ppToken.loc, "character literals not supported", "\'", ""); + continue; + default: + snprintf(ppToken.name, sizeof(ppToken.name), "%s", atomStrings.getString(token)); + break; + } + + return token; + } +} + +// +// Do all token-pasting related combining of two pasted tokens when getting a +// stream of tokens from a replacement list. Degenerates to no processing if a +// replacement list is not the source of the token stream. +// +int TPpContext::tokenPaste(int token, TPpToken& ppToken) +{ + // starting with ## is illegal, skip to next token + if (token == PpAtomPaste) { + parseContext.ppError(ppToken.loc, "unexpected location", "##", ""); + return scanToken(&ppToken); + } + + int resultToken = token; // "foo" pasted with "35" is an identifier, not a number + + // ## can be chained, process all in the chain at once + while (peekPasting()) { + TPpToken pastedPpToken; + + // next token has to be ## + token = scanToken(&pastedPpToken); + assert(token == PpAtomPaste); + + // This covers end of macro expansion + if (endOfReplacementList()) { + parseContext.ppError(ppToken.loc, "unexpected location; end of replacement list", "##", ""); + break; + } + + // Get the token(s) after the ##. + // Because of "space" semantics, and prior tokenization, what + // appeared a single token, e.g. "3A", might have been tokenized + // into two tokens "3" and "A", but the "A" will have 'space' set to + // false. Accumulate all of these to recreate the original lexical + // appearing token. + do { + token = scanToken(&pastedPpToken); + + // This covers end of argument expansion + if (token == tMarkerInput::marker) { + parseContext.ppError(ppToken.loc, "unexpected location; end of argument", "##", ""); + return resultToken; + } + + // get the token text + switch (resultToken) { + case PpAtomIdentifier: + // already have the correct text in token.names + break; + case '=': + case '!': + case '-': + case '~': + case '+': + case '*': + case '/': + case '%': + case '<': + case '>': + case '|': + case '^': + case '&': + case PpAtomRight: + case PpAtomLeft: + case PpAtomAnd: + case PpAtomOr: + case PpAtomXor: + snprintf(ppToken.name, sizeof(ppToken.name), "%s", atomStrings.getString(resultToken)); + snprintf(pastedPpToken.name, sizeof(pastedPpToken.name), "%s", atomStrings.getString(token)); + break; + default: + parseContext.ppError(ppToken.loc, "not supported for these tokens", "##", ""); + return resultToken; + } + + // combine the tokens + if (strlen(ppToken.name) + strlen(pastedPpToken.name) > MaxTokenLength) { + parseContext.ppError(ppToken.loc, "combined tokens are too long", "##", ""); + return resultToken; + } + snprintf(&ppToken.name[0] + strlen(ppToken.name), sizeof(ppToken.name) - strlen(ppToken.name), + "%s", pastedPpToken.name); + + // correct the kind of token we are making, if needed (identifiers stay identifiers) + if (resultToken != PpAtomIdentifier) { + int newToken = atomStrings.getAtom(ppToken.name); + if (newToken > 0) + resultToken = newToken; + else + parseContext.ppError(ppToken.loc, "combined token is invalid", "##", ""); + } + } while (peekContinuedPasting(resultToken)); + } + + return resultToken; +} + +// Checks if we've seen balanced #if...#endif +void TPpContext::missingEndifCheck() +{ + if (ifdepth > 0) + parseContext.ppError(parseContext.getCurrentLoc(), "missing #endif", "", ""); +} + +} // end namespace glslang diff --git a/dep/glslang/glslang/MachineIndependent/preprocessor/PpTokens.cpp b/dep/glslang/glslang/MachineIndependent/preprocessor/PpTokens.cpp new file mode 100644 index 000000000..7ed58703f --- /dev/null +++ b/dep/glslang/glslang/MachineIndependent/preprocessor/PpTokens.cpp @@ -0,0 +1,221 @@ +// +// Copyright (C) 2002-2005 3Dlabs Inc. Ltd. +// Copyright (C) 2013 LunarG, Inc. +// Copyright (C) 2015-2018 Google, Inc. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// +// Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// +// Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// Neither the name of 3Dlabs Inc. Ltd. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// +/****************************************************************************\ +Copyright (c) 2002, NVIDIA Corporation. + +NVIDIA Corporation("NVIDIA") supplies this software to you in +consideration of your agreement to the following terms, and your use, +installation, modification or redistribution of this NVIDIA software +constitutes acceptance of these terms. If you do not agree with these +terms, please do not use, install, modify or redistribute this NVIDIA +software. + +In consideration of your agreement to abide by the following terms, and +subject to these terms, NVIDIA grants you a personal, non-exclusive +license, under NVIDIA's copyrights in this original NVIDIA software (the +"NVIDIA Software"), to use, reproduce, modify and redistribute the +NVIDIA Software, with or without modifications, in source and/or binary +forms; provided that if you redistribute the NVIDIA Software, you must +retain the copyright notice of NVIDIA, this notice and the following +text and disclaimers in all such redistributions of the NVIDIA Software. +Neither the name, trademarks, service marks nor logos of NVIDIA +Corporation may be used to endorse or promote products derived from the +NVIDIA Software without specific prior written permission from NVIDIA. +Except as expressly stated in this notice, no other rights or licenses +express or implied, are granted by NVIDIA herein, including but not +limited to any patent rights that may be infringed by your derivative +works or by other works in which the NVIDIA Software may be +incorporated. No hardware is licensed hereunder. + +THE NVIDIA SOFTWARE IS BEING PROVIDED ON AN "AS IS" BASIS, WITHOUT +WARRANTIES OR CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED, +INCLUDING WITHOUT LIMITATION, WARRANTIES OR CONDITIONS OF TITLE, +NON-INFRINGEMENT, MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, OR +ITS USE AND OPERATION EITHER ALONE OR IN COMBINATION WITH OTHER +PRODUCTS. + +IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, +INCIDENTAL, EXEMPLARY, CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED +TO, LOST PROFITS; PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF +USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) OR ARISING IN ANY WAY +OUT OF THE USE, REPRODUCTION, MODIFICATION AND/OR DISTRIBUTION OF THE +NVIDIA SOFTWARE, HOWEVER CAUSED AND WHETHER UNDER THEORY OF CONTRACT, +TORT (INCLUDING NEGLIGENCE), STRICT LIABILITY OR OTHERWISE, EVEN IF +NVIDIA HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +\****************************************************************************/ + +// +// For recording and playing back the stream of tokens in a macro definition. +// + +#ifndef _CRT_SECURE_NO_WARNINGS +#define _CRT_SECURE_NO_WARNINGS +#endif +#if (defined(_MSC_VER) && _MSC_VER < 1900 /*vs2015*/) +#define snprintf sprintf_s +#endif + +#include +#include +#include +#include + +#include "PpContext.h" +#include "PpTokens.h" + +namespace glslang { + +// Add a token (including backing string) to the end of a macro +// token stream, for later playback. +void TPpContext::TokenStream::putToken(int atom, TPpToken* ppToken) +{ + TokenStream::Token streamToken(atom, *ppToken); + stream.push_back(streamToken); +} + +// Read the next token from a macro token stream. +int TPpContext::TokenStream::getToken(TParseContextBase& parseContext, TPpToken *ppToken) +{ + if (atEnd()) + return EndOfInput; + + int atom = stream[currentPos++].get(*ppToken); + ppToken->loc = parseContext.getCurrentLoc(); + +#ifndef GLSLANG_WEB + // Check for ##, unless the current # is the last character + if (atom == '#') { + if (peekToken('#')) { + parseContext.requireProfile(ppToken->loc, ~EEsProfile, "token pasting (##)"); + parseContext.profileRequires(ppToken->loc, ~EEsProfile, 130, 0, "token pasting (##)"); + currentPos++; + atom = PpAtomPaste; + } + } +#endif + + return atom; +} + +// We are pasting if +// 1. we are preceding a pasting operator within this stream +// or +// 2. the entire macro is preceding a pasting operator (lastTokenPastes) +// and we are also on the last token +bool TPpContext::TokenStream::peekTokenizedPasting(bool lastTokenPastes) +{ + // 1. preceding ##? + + size_t savePos = currentPos; + // skip white space + while (peekToken(' ')) + ++currentPos; + if (peekToken(PpAtomPaste)) { + currentPos = savePos; + return true; + } + + // 2. last token and we've been told after this there will be a ## + + if (! lastTokenPastes) + return false; + // Getting here means the last token will be pasted, after this + + // Are we at the last non-whitespace token? + savePos = currentPos; + bool moreTokens = false; + do { + if (atEnd()) + break; + if (!peekToken(' ')) { + moreTokens = true; + break; + } + ++currentPos; + } while (true); + currentPos = savePos; + + return !moreTokens; +} + +// See if the next non-white-space tokens are two consecutive # +bool TPpContext::TokenStream::peekUntokenizedPasting() +{ + // don't return early, have to restore this + size_t savePos = currentPos; + + // skip white-space + while (peekToken(' ')) + ++currentPos; + + // check for ## + bool pasting = false; + if (peekToken('#')) { + ++currentPos; + if (peekToken('#')) + pasting = true; + } + + currentPos = savePos; + + return pasting; +} + +void TPpContext::pushTokenStreamInput(TokenStream& ts, bool prepasting) +{ + pushInput(new tTokenInput(this, &ts, prepasting)); + ts.reset(); +} + +int TPpContext::tUngotTokenInput::scan(TPpToken* ppToken) +{ + if (done) + return EndOfInput; + + int ret = token; + *ppToken = lval; + done = true; + + return ret; +} + +void TPpContext::UngetToken(int token, TPpToken* ppToken) +{ + pushInput(new tUngotTokenInput(this, token, ppToken)); +} + +} // end namespace glslang diff --git a/dep/glslang/glslang/MachineIndependent/preprocessor/PpTokens.h b/dep/glslang/glslang/MachineIndependent/preprocessor/PpTokens.h new file mode 100644 index 000000000..7b0f81550 --- /dev/null +++ b/dep/glslang/glslang/MachineIndependent/preprocessor/PpTokens.h @@ -0,0 +1,179 @@ +// +// Copyright (C) 2002-2005 3Dlabs Inc. Ltd. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// +// Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// +// Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// Neither the name of 3Dlabs Inc. Ltd. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// +/****************************************************************************\ +Copyright (c) 2002, NVIDIA Corporation. + +NVIDIA Corporation("NVIDIA") supplies this software to you in +consideration of your agreement to the following terms, and your use, +installation, modification or redistribution of this NVIDIA software +constitutes acceptance of these terms. If you do not agree with these +terms, please do not use, install, modify or redistribute this NVIDIA +software. + +In consideration of your agreement to abide by the following terms, and +subject to these terms, NVIDIA grants you a personal, non-exclusive +license, under NVIDIA's copyrights in this original NVIDIA software (the +"NVIDIA Software"), to use, reproduce, modify and redistribute the +NVIDIA Software, with or without modifications, in source and/or binary +forms; provided that if you redistribute the NVIDIA Software, you must +retain the copyright notice of NVIDIA, this notice and the following +text and disclaimers in all such redistributions of the NVIDIA Software. +Neither the name, trademarks, service marks nor logos of NVIDIA +Corporation may be used to endorse or promote products derived from the +NVIDIA Software without specific prior written permission from NVIDIA. +Except as expressly stated in this notice, no other rights or licenses +express or implied, are granted by NVIDIA herein, including but not +limited to any patent rights that may be infringed by your derivative +works or by other works in which the NVIDIA Software may be +incorporated. No hardware is licensed hereunder. + +THE NVIDIA SOFTWARE IS BEING PROVIDED ON AN "AS IS" BASIS, WITHOUT +WARRANTIES OR CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED, +INCLUDING WITHOUT LIMITATION, WARRANTIES OR CONDITIONS OF TITLE, +NON-INFRINGEMENT, MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, OR +ITS USE AND OPERATION EITHER ALONE OR IN COMBINATION WITH OTHER +PRODUCTS. + +IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, +INCIDENTAL, EXEMPLARY, CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED +TO, LOST PROFITS; PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF +USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) OR ARISING IN ANY WAY +OUT OF THE USE, REPRODUCTION, MODIFICATION AND/OR DISTRIBUTION OF THE +NVIDIA SOFTWARE, HOWEVER CAUSED AND WHETHER UNDER THEORY OF CONTRACT, +TORT (INCLUDING NEGLIGENCE), STRICT LIABILITY OR OTHERWISE, EVEN IF +NVIDIA HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +\****************************************************************************/ + +#ifndef PARSER_H +#define PARSER_H + +namespace glslang { + +// Multi-character tokens +enum EFixedAtoms { + // single character tokens get their own char value as their token; start here for multi-character tokens + PpAtomMaxSingle = 127, + + // replace bad character tokens with this, to avoid accidental aliasing with the below + PpAtomBadToken, + + // Operators + + PPAtomAddAssign, + PPAtomSubAssign, + PPAtomMulAssign, + PPAtomDivAssign, + PPAtomModAssign, + + PpAtomRight, + PpAtomLeft, + + PpAtomRightAssign, + PpAtomLeftAssign, + PpAtomAndAssign, + PpAtomOrAssign, + PpAtomXorAssign, + + PpAtomAnd, + PpAtomOr, + PpAtomXor, + + PpAtomEQ, + PpAtomNE, + PpAtomGE, + PpAtomLE, + + PpAtomDecrement, + PpAtomIncrement, + + PpAtomColonColon, + + PpAtomPaste, + + // Constants + + PpAtomConstInt, + PpAtomConstUint, + PpAtomConstInt64, + PpAtomConstUint64, + PpAtomConstInt16, + PpAtomConstUint16, + PpAtomConstFloat, + PpAtomConstDouble, + PpAtomConstFloat16, + PpAtomConstString, + + // Identifiers + PpAtomIdentifier, + + // preprocessor "keywords" + + PpAtomDefine, + PpAtomUndef, + + PpAtomIf, + PpAtomIfdef, + PpAtomIfndef, + PpAtomElse, + PpAtomElif, + PpAtomEndif, + + PpAtomLine, + PpAtomPragma, + PpAtomError, + + // #version ... + PpAtomVersion, + PpAtomCore, + PpAtomCompatibility, + PpAtomEs, + + // #extension + PpAtomExtension, + + // __LINE__, __FILE__, __VERSION__ + + PpAtomLineMacro, + PpAtomFileMacro, + PpAtomVersionMacro, + + // #include + PpAtomInclude, + + PpAtomLast, +}; + +} // end namespace glslang + +#endif /* not PARSER_H */ diff --git a/dep/glslang/glslang/MachineIndependent/propagateNoContraction.cpp b/dep/glslang/glslang/MachineIndependent/propagateNoContraction.cpp new file mode 100644 index 000000000..9def592ba --- /dev/null +++ b/dep/glslang/glslang/MachineIndependent/propagateNoContraction.cpp @@ -0,0 +1,870 @@ +// +// Copyright (C) 2015-2016 Google, Inc. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// +// Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// +// Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. + +// +// Visit the nodes in the glslang intermediate tree representation to +// propagate the 'noContraction' qualifier. +// + +#ifndef GLSLANG_WEB + +#include "propagateNoContraction.h" + +#include +#include +#include +#include +#include + +#include "localintermediate.h" +namespace { + +// Use a string to hold the access chain information, as in most cases the +// access chain is short and may contain only one element, which is the symbol +// ID. +// Example: struct {float a; float b;} s; +// Object s.a will be represented with: /0 +// Object s.b will be represented with: /1 +// Object s will be represented with: +// For members of vector, matrix and arrays, they will be represented with the +// same symbol ID of their container symbol objects. This is because their +// preciseness is always the same as their container symbol objects. +typedef std::string ObjectAccessChain; + +// The delimiter used in the ObjectAccessChain string to separate symbol ID and +// different level of struct indices. +const char ObjectAccesschainDelimiter = '/'; + +// Mapping from Symbol IDs of symbol nodes, to their defining operation +// nodes. +typedef std::unordered_multimap NodeMapping; +// Mapping from object nodes to their access chain info string. +typedef std::unordered_map AccessChainMapping; + +// Set of object IDs. +typedef std::unordered_set ObjectAccesschainSet; +// Set of return branch nodes. +typedef std::unordered_set ReturnBranchNodeSet; + +// A helper function to tell whether a node is 'noContraction'. Returns true if +// the node has 'noContraction' qualifier, otherwise false. +bool isPreciseObjectNode(glslang::TIntermTyped* node) +{ + return node->getType().getQualifier().isNoContraction(); +} + +// Returns true if the opcode is a dereferencing one. +bool isDereferenceOperation(glslang::TOperator op) +{ + switch (op) { + case glslang::EOpIndexDirect: + case glslang::EOpIndexDirectStruct: + case glslang::EOpIndexIndirect: + case glslang::EOpVectorSwizzle: + case glslang::EOpMatrixSwizzle: + return true; + default: + return false; + } +} + +// Returns true if the opcode leads to an assignment operation. +bool isAssignOperation(glslang::TOperator op) +{ + switch (op) { + case glslang::EOpAssign: + case glslang::EOpAddAssign: + case glslang::EOpSubAssign: + case glslang::EOpMulAssign: + case glslang::EOpVectorTimesMatrixAssign: + case glslang::EOpVectorTimesScalarAssign: + case glslang::EOpMatrixTimesScalarAssign: + case glslang::EOpMatrixTimesMatrixAssign: + case glslang::EOpDivAssign: + case glslang::EOpModAssign: + case glslang::EOpAndAssign: + case glslang::EOpLeftShiftAssign: + case glslang::EOpRightShiftAssign: + case glslang::EOpInclusiveOrAssign: + case glslang::EOpExclusiveOrAssign: + + case glslang::EOpPostIncrement: + case glslang::EOpPostDecrement: + case glslang::EOpPreIncrement: + case glslang::EOpPreDecrement: + return true; + default: + return false; + } +} + +// A helper function to get the unsigned int from a given constant union node. +// Note the node should only hold a uint scalar. +unsigned getStructIndexFromConstantUnion(glslang::TIntermTyped* node) +{ + assert(node->getAsConstantUnion() && node->getAsConstantUnion()->isScalar()); + unsigned struct_dereference_index = node->getAsConstantUnion()->getConstArray()[0].getUConst(); + return struct_dereference_index; +} + +// A helper function to generate symbol_label. +ObjectAccessChain generateSymbolLabel(glslang::TIntermSymbol* node) +{ + ObjectAccessChain symbol_id = + std::to_string(node->getId()) + "(" + node->getName().c_str() + ")"; + return symbol_id; +} + +// Returns true if the operation is an arithmetic operation and valid for +// the 'NoContraction' decoration. +bool isArithmeticOperation(glslang::TOperator op) +{ + switch (op) { + case glslang::EOpAddAssign: + case glslang::EOpSubAssign: + case glslang::EOpMulAssign: + case glslang::EOpVectorTimesMatrixAssign: + case glslang::EOpVectorTimesScalarAssign: + case glslang::EOpMatrixTimesScalarAssign: + case glslang::EOpMatrixTimesMatrixAssign: + case glslang::EOpDivAssign: + case glslang::EOpModAssign: + + case glslang::EOpNegative: + + case glslang::EOpAdd: + case glslang::EOpSub: + case glslang::EOpMul: + case glslang::EOpDiv: + case glslang::EOpMod: + + case glslang::EOpVectorTimesScalar: + case glslang::EOpVectorTimesMatrix: + case glslang::EOpMatrixTimesVector: + case glslang::EOpMatrixTimesScalar: + case glslang::EOpMatrixTimesMatrix: + + case glslang::EOpDot: + + case glslang::EOpPostIncrement: + case glslang::EOpPostDecrement: + case glslang::EOpPreIncrement: + case glslang::EOpPreDecrement: + return true; + default: + return false; + } +} + +// A helper class to help manage the populating_initial_no_contraction_ flag. +template class StateSettingGuard { +public: + StateSettingGuard(T* state_ptr, T new_state_value) + : state_ptr_(state_ptr), previous_state_(*state_ptr) + { + *state_ptr = new_state_value; + } + StateSettingGuard(T* state_ptr) : state_ptr_(state_ptr), previous_state_(*state_ptr) {} + void setState(T new_state_value) { *state_ptr_ = new_state_value; } + ~StateSettingGuard() { *state_ptr_ = previous_state_; } + +private: + T* state_ptr_; + T previous_state_; +}; + +// A helper function to get the front element from a given ObjectAccessChain +ObjectAccessChain getFrontElement(const ObjectAccessChain& chain) +{ + size_t pos_delimiter = chain.find(ObjectAccesschainDelimiter); + return pos_delimiter == std::string::npos ? chain : chain.substr(0, pos_delimiter); +} + +// A helper function to get the access chain starting from the second element. +ObjectAccessChain subAccessChainFromSecondElement(const ObjectAccessChain& chain) +{ + size_t pos_delimiter = chain.find(ObjectAccesschainDelimiter); + return pos_delimiter == std::string::npos ? "" : chain.substr(pos_delimiter + 1); +} + +// A helper function to get the access chain after removing a given prefix. +ObjectAccessChain getSubAccessChainAfterPrefix(const ObjectAccessChain& chain, + const ObjectAccessChain& prefix) +{ + size_t pos = chain.find(prefix); + if (pos != 0) + return chain; + return chain.substr(prefix.length() + sizeof(ObjectAccesschainDelimiter)); +} + +// +// A traverser which traverses the whole AST and populates: +// 1) A mapping from symbol nodes' IDs to their defining operation nodes. +// 2) A set of access chains of the initial precise object nodes. +// +class TSymbolDefinitionCollectingTraverser : public glslang::TIntermTraverser { +public: + TSymbolDefinitionCollectingTraverser(NodeMapping* symbol_definition_mapping, + AccessChainMapping* accesschain_mapping, + ObjectAccesschainSet* precise_objects, + ReturnBranchNodeSet* precise_return_nodes); + + bool visitUnary(glslang::TVisit, glslang::TIntermUnary*) override; + bool visitBinary(glslang::TVisit, glslang::TIntermBinary*) override; + void visitSymbol(glslang::TIntermSymbol*) override; + bool visitAggregate(glslang::TVisit, glslang::TIntermAggregate*) override; + bool visitBranch(glslang::TVisit, glslang::TIntermBranch*) override; + +protected: + TSymbolDefinitionCollectingTraverser& operator=(const TSymbolDefinitionCollectingTraverser&); + + // The mapping from symbol node IDs to their defining nodes. This should be + // populated along traversing the AST. + NodeMapping& symbol_definition_mapping_; + // The set of symbol node IDs for precise symbol nodes, the ones marked as + // 'noContraction'. + ObjectAccesschainSet& precise_objects_; + // The set of precise return nodes. + ReturnBranchNodeSet& precise_return_nodes_; + // A temporary cache of the symbol node whose defining node is to be found + // currently along traversing the AST. + ObjectAccessChain current_object_; + // A map from object node to its access chain. This traverser stores + // the built access chains into this map for each object node it has + // visited. + AccessChainMapping& accesschain_mapping_; + // The pointer to the Function Definition node, so we can get the + // preciseness of the return expression from it when we traverse the + // return branch node. + glslang::TIntermAggregate* current_function_definition_node_; +}; + +TSymbolDefinitionCollectingTraverser::TSymbolDefinitionCollectingTraverser( + NodeMapping* symbol_definition_mapping, AccessChainMapping* accesschain_mapping, + ObjectAccesschainSet* precise_objects, + std::unordered_set* precise_return_nodes) + : TIntermTraverser(true, false, false), symbol_definition_mapping_(*symbol_definition_mapping), + precise_objects_(*precise_objects), precise_return_nodes_(*precise_return_nodes), + current_object_(), accesschain_mapping_(*accesschain_mapping), + current_function_definition_node_(nullptr) {} + +// Visits a symbol node, set the current_object_ to the +// current node symbol ID, and record a mapping from this node to the current +// current_object_, which is the just obtained symbol +// ID. +void TSymbolDefinitionCollectingTraverser::visitSymbol(glslang::TIntermSymbol* node) +{ + current_object_ = generateSymbolLabel(node); + accesschain_mapping_[node] = current_object_; +} + +// Visits an aggregate node, traverses all of its children. +bool TSymbolDefinitionCollectingTraverser::visitAggregate(glslang::TVisit, + glslang::TIntermAggregate* node) +{ + // This aggregate node might be a function definition node, in which case we need to + // cache this node, so we can get the preciseness information of the return value + // of this function later. + StateSettingGuard current_function_definition_node_setting_guard( + ¤t_function_definition_node_); + if (node->getOp() == glslang::EOpFunction) { + // This is function definition node, we need to cache this node so that we can + // get the preciseness of the return value later. + current_function_definition_node_setting_guard.setState(node); + } + // Traverse the items in the sequence. + glslang::TIntermSequence& seq = node->getSequence(); + for (int i = 0; i < (int)seq.size(); ++i) { + current_object_.clear(); + seq[i]->traverse(this); + } + return false; +} + +bool TSymbolDefinitionCollectingTraverser::visitBranch(glslang::TVisit, + glslang::TIntermBranch* node) +{ + if (node->getFlowOp() == glslang::EOpReturn && node->getExpression() && + current_function_definition_node_ && + current_function_definition_node_->getType().getQualifier().noContraction) { + // This node is a return node with an expression, and its function has a + // precise return value. We need to find the involved objects in its + // expression and add them to the set of initial precise objects. + precise_return_nodes_.insert(node); + node->getExpression()->traverse(this); + } + return false; +} + +// Visits a unary node. This might be an implicit assignment like i++, i--. etc. +bool TSymbolDefinitionCollectingTraverser::visitUnary(glslang::TVisit /* visit */, + glslang::TIntermUnary* node) +{ + current_object_.clear(); + node->getOperand()->traverse(this); + if (isAssignOperation(node->getOp())) { + // We should always be able to get an access chain of the operand node. + assert(!current_object_.empty()); + + // If the operand node object is 'precise', we collect its access chain + // for the initial set of 'precise' objects. + if (isPreciseObjectNode(node->getOperand())) { + // The operand node is an 'precise' object node, add its + // access chain to the set of 'precise' objects. This is to collect + // the initial set of 'precise' objects. + precise_objects_.insert(current_object_); + } + // Gets the symbol ID from the object's access chain. + ObjectAccessChain id_symbol = getFrontElement(current_object_); + // Add a mapping from the symbol ID to this assignment operation node. + symbol_definition_mapping_.insert(std::make_pair(id_symbol, node)); + } + // A unary node is not a dereference node, so we clear the access chain which + // is under construction. + current_object_.clear(); + return false; +} + +// Visits a binary node and updates the mapping from symbol IDs to the definition +// nodes. Also collects the access chains for the initial precise objects. +bool TSymbolDefinitionCollectingTraverser::visitBinary(glslang::TVisit /* visit */, + glslang::TIntermBinary* node) +{ + // Traverses the left node to build the access chain info for the object. + current_object_.clear(); + node->getLeft()->traverse(this); + + if (isAssignOperation(node->getOp())) { + // We should always be able to get an access chain for the left node. + assert(!current_object_.empty()); + + // If the left node object is 'precise', it is an initial precise object + // specified in the shader source. Adds it to the initial work list to + // process later. + if (isPreciseObjectNode(node->getLeft())) { + // The left node is an 'precise' object node, add its access chain to + // the set of 'precise' objects. This is to collect the initial set + // of 'precise' objects. + precise_objects_.insert(current_object_); + } + // Gets the symbol ID from the object access chain, which should be the + // first element recorded in the access chain. + ObjectAccessChain id_symbol = getFrontElement(current_object_); + // Adds a mapping from the symbol ID to this assignment operation node. + symbol_definition_mapping_.insert(std::make_pair(id_symbol, node)); + + // Traverses the right node, there may be other 'assignment' + // operations in the right. + current_object_.clear(); + node->getRight()->traverse(this); + + } else if (isDereferenceOperation(node->getOp())) { + // The left node (parent node) is a struct type object. We need to + // record the access chain information of the current node into its + // object id. + if (node->getOp() == glslang::EOpIndexDirectStruct) { + unsigned struct_dereference_index = getStructIndexFromConstantUnion(node->getRight()); + current_object_.push_back(ObjectAccesschainDelimiter); + current_object_.append(std::to_string(struct_dereference_index)); + } + accesschain_mapping_[node] = current_object_; + + // For a dereference node, there is no need to traverse the right child + // node as the right node should always be an integer type object. + + } else { + // For other binary nodes, still traverse the right node. + current_object_.clear(); + node->getRight()->traverse(this); + } + return false; +} + +// Traverses the AST and returns a tuple of four members: +// 1) a mapping from symbol IDs to the definition nodes (aka. assignment nodes) of these symbols. +// 2) a mapping from object nodes in the AST to the access chains of these objects. +// 3) a set of access chains of precise objects. +// 4) a set of return nodes with precise expressions. +std::tuple +getSymbolToDefinitionMappingAndPreciseSymbolIDs(const glslang::TIntermediate& intermediate) +{ + auto result_tuple = std::make_tuple(NodeMapping(), AccessChainMapping(), ObjectAccesschainSet(), + ReturnBranchNodeSet()); + + TIntermNode* root = intermediate.getTreeRoot(); + if (root == 0) + return result_tuple; + + NodeMapping& symbol_definition_mapping = std::get<0>(result_tuple); + AccessChainMapping& accesschain_mapping = std::get<1>(result_tuple); + ObjectAccesschainSet& precise_objects = std::get<2>(result_tuple); + ReturnBranchNodeSet& precise_return_nodes = std::get<3>(result_tuple); + + // Traverses the AST and populate the results. + TSymbolDefinitionCollectingTraverser collector(&symbol_definition_mapping, &accesschain_mapping, + &precise_objects, &precise_return_nodes); + root->traverse(&collector); + + return result_tuple; +} + +// +// A traverser that determine whether the left node (or operand node for unary +// node) of an assignment node is 'precise', containing 'precise' or not, +// according to the access chain a given precise object which share the same +// symbol as the left node. +// +// Post-orderly traverses the left node subtree of an binary assignment node and: +// +// 1) Propagates the 'precise' from the left object nodes to this object node. +// +// 2) Builds object access chain along the traversal, and also compares with +// the access chain of the given 'precise' object along with the traversal to +// tell if the node to be defined is 'precise' or not. +// +class TNoContractionAssigneeCheckingTraverser : public glslang::TIntermTraverser { + + enum DecisionStatus { + // The object node to be assigned to may contain 'precise' objects and also not 'precise' objects. + Mixed = 0, + // The object node to be assigned to is either a 'precise' object or a struct objects whose members are all 'precise'. + Precise = 1, + // The object node to be assigned to is not a 'precise' object. + NotPreicse = 2, + }; + +public: + TNoContractionAssigneeCheckingTraverser(const AccessChainMapping& accesschain_mapping) + : TIntermTraverser(true, false, false), accesschain_mapping_(accesschain_mapping), + precise_object_(nullptr) {} + + // Checks the preciseness of a given assignment node with a precise object + // represented as access chain. The precise object shares the same symbol + // with the assignee of the given assignment node. Return a tuple of two: + // + // 1) The preciseness of the assignee node of this assignment node. True + // if the assignee contains 'precise' objects or is 'precise', false if + // the assignee is not 'precise' according to the access chain of the given + // precise object. + // + // 2) The incremental access chain from the assignee node to its nested + // 'precise' object, according to the access chain of the given precise + // object. This incremental access chain can be empty, which means the + // assignee is 'precise'. Otherwise it shows the path to the nested + // precise object. + std::tuple + getPrecisenessAndRemainedAccessChain(glslang::TIntermOperator* node, + const ObjectAccessChain& precise_object) + { + assert(isAssignOperation(node->getOp())); + precise_object_ = &precise_object; + ObjectAccessChain assignee_object; + if (glslang::TIntermBinary* BN = node->getAsBinaryNode()) { + // This is a binary assignment node, we need to check the + // preciseness of the left node. + assert(accesschain_mapping_.count(BN->getLeft())); + // The left node (assignee node) is an object node, traverse the + // node to let the 'precise' of nesting objects being transfered to + // nested objects. + BN->getLeft()->traverse(this); + // After traversing the left node, if the left node is 'precise', + // we can conclude this assignment should propagate 'precise'. + if (isPreciseObjectNode(BN->getLeft())) { + return make_tuple(true, ObjectAccessChain()); + } + // If the preciseness of the left node (assignee node) can not + // be determined by now, we need to compare the access chain string + // of the assignee object with the given precise object. + assignee_object = accesschain_mapping_.at(BN->getLeft()); + + } else if (glslang::TIntermUnary* UN = node->getAsUnaryNode()) { + // This is a unary assignment node, we need to check the + // preciseness of the operand node. For unary assignment node, the + // operand node should always be an object node. + assert(accesschain_mapping_.count(UN->getOperand())); + // Traverse the operand node to let the 'precise' being propagated + // from lower nodes to upper nodes. + UN->getOperand()->traverse(this); + // After traversing the operand node, if the operand node is + // 'precise', this assignment should propagate 'precise'. + if (isPreciseObjectNode(UN->getOperand())) { + return make_tuple(true, ObjectAccessChain()); + } + // If the preciseness of the operand node (assignee node) can not + // be determined by now, we need to compare the access chain string + // of the assignee object with the given precise object. + assignee_object = accesschain_mapping_.at(UN->getOperand()); + } else { + // Not a binary or unary node, should not happen. + assert(false); + } + + // Compare the access chain string of the assignee node with the given + // precise object to determine if this assignment should propagate + // 'precise'. + if (assignee_object.find(precise_object) == 0) { + // The access chain string of the given precise object is a prefix + // of assignee's access chain string. The assignee should be + // 'precise'. + return make_tuple(true, ObjectAccessChain()); + } else if (precise_object.find(assignee_object) == 0) { + // The assignee's access chain string is a prefix of the given + // precise object, the assignee object contains 'precise' object, + // and we need to pass the remained access chain to the object nodes + // in the right. + return make_tuple(true, getSubAccessChainAfterPrefix(precise_object, assignee_object)); + } else { + // The access chain strings do not match, the assignee object can + // not be labeled as 'precise' according to the given precise + // object. + return make_tuple(false, ObjectAccessChain()); + } + } + +protected: + TNoContractionAssigneeCheckingTraverser& operator=(const TNoContractionAssigneeCheckingTraverser&); + + bool visitBinary(glslang::TVisit, glslang::TIntermBinary* node) override; + void visitSymbol(glslang::TIntermSymbol* node) override; + + // A map from object nodes to their access chain string (used as object ID). + const AccessChainMapping& accesschain_mapping_; + // A given precise object, represented in it access chain string. This + // precise object is used to be compared with the assignee node to tell if + // the assignee node is 'precise', contains 'precise' object or not + // 'precise'. + const ObjectAccessChain* precise_object_; +}; + +// Visits a binary node. If the node is an object node, it must be a dereference +// node. In such cases, if the left node is 'precise', this node should also be +// 'precise'. +bool TNoContractionAssigneeCheckingTraverser::visitBinary(glslang::TVisit, + glslang::TIntermBinary* node) +{ + // Traverses the left so that we transfer the 'precise' from nesting object + // to its nested object. + node->getLeft()->traverse(this); + // If this binary node is an object node, we should have it in the + // accesschain_mapping_. + if (accesschain_mapping_.count(node)) { + // A binary object node must be a dereference node. + assert(isDereferenceOperation(node->getOp())); + // If the left node is 'precise', this node should also be precise, + // otherwise, compare with the given precise_object_. If the + // access chain of this node matches with the given precise_object_, + // this node should be marked as 'precise'. + if (isPreciseObjectNode(node->getLeft())) { + node->getWritableType().getQualifier().noContraction = true; + } else if (accesschain_mapping_.at(node) == *precise_object_) { + node->getWritableType().getQualifier().noContraction = true; + } + } + return false; +} + +// Visits a symbol node, if the symbol node ID (its access chain string) matches +// with the given precise object, this node should be 'precise'. +void TNoContractionAssigneeCheckingTraverser::visitSymbol(glslang::TIntermSymbol* node) +{ + // A symbol node should always be an object node, and should have been added + // to the map from object nodes to their access chain strings. + assert(accesschain_mapping_.count(node)); + if (accesschain_mapping_.at(node) == *precise_object_) { + node->getWritableType().getQualifier().noContraction = true; + } +} + +// +// A traverser that only traverses the right side of binary assignment nodes +// and the operand node of unary assignment nodes. +// +// 1) Marks arithmetic operations as 'NoContraction'. +// +// 2) Find the object which should be marked as 'precise' in the right and +// update the 'precise' object work list. +// +class TNoContractionPropagator : public glslang::TIntermTraverser { +public: + TNoContractionPropagator(ObjectAccesschainSet* precise_objects, + const AccessChainMapping& accesschain_mapping) + : TIntermTraverser(true, false, false), + precise_objects_(*precise_objects), added_precise_object_ids_(), + remained_accesschain_(), accesschain_mapping_(accesschain_mapping) {} + + // Propagates 'precise' in the right nodes of a given assignment node with + // access chain record from the assignee node to a 'precise' object it + // contains. + void + propagateNoContractionInOneExpression(glslang::TIntermTyped* defining_node, + const ObjectAccessChain& assignee_remained_accesschain) + { + remained_accesschain_ = assignee_remained_accesschain; + if (glslang::TIntermBinary* BN = defining_node->getAsBinaryNode()) { + assert(isAssignOperation(BN->getOp())); + BN->getRight()->traverse(this); + if (isArithmeticOperation(BN->getOp())) { + BN->getWritableType().getQualifier().noContraction = true; + } + } else if (glslang::TIntermUnary* UN = defining_node->getAsUnaryNode()) { + assert(isAssignOperation(UN->getOp())); + UN->getOperand()->traverse(this); + if (isArithmeticOperation(UN->getOp())) { + UN->getWritableType().getQualifier().noContraction = true; + } + } + } + + // Propagates 'precise' in a given precise return node. + void propagateNoContractionInReturnNode(glslang::TIntermBranch* return_node) + { + remained_accesschain_ = ""; + assert(return_node->getFlowOp() == glslang::EOpReturn && return_node->getExpression()); + return_node->getExpression()->traverse(this); + } + +protected: + TNoContractionPropagator& operator=(const TNoContractionPropagator&); + + // Visits an aggregate node. The node can be a initializer list, in which + // case we need to find the 'precise' or 'precise' containing object node + // with the access chain record. In other cases, just need to traverse all + // the children nodes. + bool visitAggregate(glslang::TVisit, glslang::TIntermAggregate* node) override + { + if (!remained_accesschain_.empty() && node->getOp() == glslang::EOpConstructStruct) { + // This is a struct initializer node, and the remained + // access chain is not empty, we need to refer to the + // assignee_remained_access_chain_ to find the nested + // 'precise' object. And we don't need to visit other nodes in this + // aggregate node. + + // Gets the struct dereference index that leads to 'precise' object. + ObjectAccessChain precise_accesschain_index_str = + getFrontElement(remained_accesschain_); + unsigned precise_accesschain_index = (unsigned)strtoul(precise_accesschain_index_str.c_str(), nullptr, 10); + // Gets the node pointed by the access chain index extracted before. + glslang::TIntermTyped* potential_precise_node = + node->getSequence()[precise_accesschain_index]->getAsTyped(); + assert(potential_precise_node); + // Pop the front access chain index from the path, and visit the nested node. + { + ObjectAccessChain next_level_accesschain = + subAccessChainFromSecondElement(remained_accesschain_); + StateSettingGuard setup_remained_accesschain_for_next_level( + &remained_accesschain_, next_level_accesschain); + potential_precise_node->traverse(this); + } + return false; + } + return true; + } + + // Visits a binary node. A binary node can be an object node, e.g. a dereference node. + // As only the top object nodes in the right side of an assignment needs to be visited + // and added to 'precise' work list, this traverser won't visit the children nodes of + // an object node. If the binary node does not represent an object node, it should + // go on to traverse its children nodes and if it is an arithmetic operation node, this + // operation should be marked as 'noContraction'. + bool visitBinary(glslang::TVisit, glslang::TIntermBinary* node) override + { + if (isDereferenceOperation(node->getOp())) { + // This binary node is an object node. Need to update the precise + // object set with the access chain of this node + remained + // access chain . + ObjectAccessChain new_precise_accesschain = accesschain_mapping_.at(node); + if (remained_accesschain_.empty()) { + node->getWritableType().getQualifier().noContraction = true; + } else { + new_precise_accesschain += ObjectAccesschainDelimiter + remained_accesschain_; + } + // Cache the access chain as added precise object, so we won't add the + // same object to the work list again. + if (!added_precise_object_ids_.count(new_precise_accesschain)) { + precise_objects_.insert(new_precise_accesschain); + added_precise_object_ids_.insert(new_precise_accesschain); + } + // Only the upper-most object nodes should be visited, so do not + // visit children of this object node. + return false; + } + // If this is an arithmetic operation, marks this node as 'noContraction'. + if (isArithmeticOperation(node->getOp()) && node->getBasicType() != glslang::EbtInt) { + node->getWritableType().getQualifier().noContraction = true; + } + // As this node is not an object node, need to traverse the children nodes. + return true; + } + + // Visits a unary node. A unary node can not be an object node. If the operation + // is an arithmetic operation, need to mark this node as 'noContraction'. + bool visitUnary(glslang::TVisit /* visit */, glslang::TIntermUnary* node) override + { + // If this is an arithmetic operation, marks this with 'noContraction' + if (isArithmeticOperation(node->getOp())) { + node->getWritableType().getQualifier().noContraction = true; + } + return true; + } + + // Visits a symbol node. A symbol node is always an object node. So we + // should always be able to find its in our collected mapping from object + // nodes to access chains. As an object node, a symbol node can be either + // 'precise' or containing 'precise' objects according to unused + // access chain information we have when we visit this node. + void visitSymbol(glslang::TIntermSymbol* node) override + { + // Symbol nodes are object nodes and should always have an + // access chain collected before matches with it. + assert(accesschain_mapping_.count(node)); + ObjectAccessChain new_precise_accesschain = accesschain_mapping_.at(node); + // If the unused access chain is empty, this symbol node should be + // marked as 'precise'. Otherwise, the unused access chain should be + // appended to the symbol ID to build a new access chain which points to + // the nested 'precise' object in this symbol object. + if (remained_accesschain_.empty()) { + node->getWritableType().getQualifier().noContraction = true; + } else { + new_precise_accesschain += ObjectAccesschainDelimiter + remained_accesschain_; + } + // Add the new 'precise' access chain to the work list and make sure we + // don't visit it again. + if (!added_precise_object_ids_.count(new_precise_accesschain)) { + precise_objects_.insert(new_precise_accesschain); + added_precise_object_ids_.insert(new_precise_accesschain); + } + } + + // A set of precise objects, represented as access chains. + ObjectAccesschainSet& precise_objects_; + // Visited symbol nodes, should not revisit these nodes. + ObjectAccesschainSet added_precise_object_ids_; + // The left node of an assignment operation might be an parent of 'precise' objects. + // This means the left node might not be an 'precise' object node, but it may contains + // 'precise' qualifier which should be propagated to the corresponding child node in + // the right. So we need the path from the left node to its nested 'precise' node to + // tell us how to find the corresponding 'precise' node in the right. + ObjectAccessChain remained_accesschain_; + // A map from node pointers to their access chains. + const AccessChainMapping& accesschain_mapping_; +}; +} + +namespace glslang { + +void PropagateNoContraction(const glslang::TIntermediate& intermediate) +{ + // First, traverses the AST, records symbols with their defining operations + // and collects the initial set of precise symbols (symbol nodes that marked + // as 'noContraction') and precise return nodes. + auto mappings_and_precise_objects = + getSymbolToDefinitionMappingAndPreciseSymbolIDs(intermediate); + + // The mapping of symbol node IDs to their defining nodes. This enables us + // to get the defining node directly from a given symbol ID without + // traversing the tree again. + NodeMapping& symbol_definition_mapping = std::get<0>(mappings_and_precise_objects); + + // The mapping of object nodes to their access chains recorded. + AccessChainMapping& accesschain_mapping = std::get<1>(mappings_and_precise_objects); + + // The initial set of 'precise' objects which are represented as the + // access chain toward them. + ObjectAccesschainSet& precise_object_accesschains = std::get<2>(mappings_and_precise_objects); + + // The set of 'precise' return nodes. + ReturnBranchNodeSet& precise_return_nodes = std::get<3>(mappings_and_precise_objects); + + // Second, uses the initial set of precise objects as a work list, pops an + // access chain, extract the symbol ID from it. Then: + // 1) Check the assignee object, see if it is 'precise' object node or + // contains 'precise' object. Obtain the incremental access chain from the + // assignee node to its nested 'precise' node (if any). + // 2) If the assignee object node is 'precise' or it contains 'precise' + // objects, traverses the right side of the assignment operation + // expression to mark arithmetic operations as 'noContration' and update + // 'precise' access chain work list with new found object nodes. + // Repeat above steps until the work list is empty. + TNoContractionAssigneeCheckingTraverser checker(accesschain_mapping); + TNoContractionPropagator propagator(&precise_object_accesschains, accesschain_mapping); + + // We have two initial precise work lists to handle: + // 1) precise return nodes + // 2) precise object access chains + // We should process the precise return nodes first and the involved + // objects in the return expression should be added to the precise object + // access chain set. + while (!precise_return_nodes.empty()) { + glslang::TIntermBranch* precise_return_node = *precise_return_nodes.begin(); + propagator.propagateNoContractionInReturnNode(precise_return_node); + precise_return_nodes.erase(precise_return_node); + } + + while (!precise_object_accesschains.empty()) { + // Get the access chain of a precise object from the work list. + ObjectAccessChain precise_object_accesschain = *precise_object_accesschains.begin(); + // Get the symbol id from the access chain. + ObjectAccessChain symbol_id = getFrontElement(precise_object_accesschain); + // Get all the defining nodes of that symbol ID. + std::pair range = + symbol_definition_mapping.equal_range(symbol_id); + // Visits all the assignment nodes of that symbol ID and + // 1) Check if the assignee node is 'precise' or contains 'precise' + // objects. + // 2) Propagate the 'precise' to the top layer object nodes + // in the right side of the assignment operation, update the 'precise' + // work list with new access chains representing the new 'precise' + // objects, and mark arithmetic operations as 'noContraction'. + for (NodeMapping::iterator defining_node_iter = range.first; + defining_node_iter != range.second; defining_node_iter++) { + TIntermOperator* defining_node = defining_node_iter->second; + // Check the assignee node. + auto checker_result = checker.getPrecisenessAndRemainedAccessChain( + defining_node, precise_object_accesschain); + bool& contain_precise = std::get<0>(checker_result); + ObjectAccessChain& remained_accesschain = std::get<1>(checker_result); + // If the assignee node is 'precise' or contains 'precise', propagate the + // 'precise' to the right. Otherwise just skip this assignment node. + if (contain_precise) { + propagator.propagateNoContractionInOneExpression(defining_node, + remained_accesschain); + } + } + // Remove the last processed 'precise' object from the work list. + precise_object_accesschains.erase(precise_object_accesschain); + } +} +}; + +#endif // GLSLANG_WEB diff --git a/dep/glslang/glslang/MachineIndependent/propagateNoContraction.h b/dep/glslang/glslang/MachineIndependent/propagateNoContraction.h new file mode 100644 index 000000000..8521ad7d6 --- /dev/null +++ b/dep/glslang/glslang/MachineIndependent/propagateNoContraction.h @@ -0,0 +1,55 @@ +// +// Copyright (C) 2015-2016 Google, Inc. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// +// Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// +// Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. + +// +// Visit the nodes in the glslang intermediate tree representation to +// propagate 'noContraction' qualifier. +// + +#pragma once + +#include "../Include/intermediate.h" + +namespace glslang { + +// Propagates the 'precise' qualifier for objects (objects marked with +// 'noContraction' qualifier) from the shader source specified 'precise' +// variables to all the involved objects, and add 'noContraction' qualifier for +// the involved arithmetic operations. +// Note that the same qualifier: 'noContraction' is used in both object nodes +// and arithmetic operation nodes, but has different meaning. For object nodes, +// 'noContraction' means the object is 'precise'; and for arithmetic operation +// nodes, it means the operation should not be contracted. +void PropagateNoContraction(const glslang::TIntermediate& intermediate); +}; diff --git a/dep/glslang/glslang/MachineIndependent/reflection.cpp b/dep/glslang/glslang/MachineIndependent/reflection.cpp new file mode 100644 index 000000000..55caa1f44 --- /dev/null +++ b/dep/glslang/glslang/MachineIndependent/reflection.cpp @@ -0,0 +1,1268 @@ +// +// Copyright (C) 2013-2016 LunarG, Inc. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// +// Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// +// Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// Neither the name of 3Dlabs Inc. Ltd. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// + +#ifndef GLSLANG_WEB + +#include "../Include/Common.h" +#include "reflection.h" +#include "LiveTraverser.h" +#include "localintermediate.h" + +#include "gl_types.h" + +// +// Grow the reflection database through a friend traverser class of TReflection and a +// collection of functions to do a liveness traversal that note what uniforms are used +// in semantically non-dead code. +// +// Can be used multiple times, once per stage, to grow a program reflection. +// +// High-level algorithm for one stage: +// +// 1. Put the entry point on the list of live functions. +// +// 2. Traverse any live function, while skipping if-tests with a compile-time constant +// condition of false, and while adding any encountered function calls to the live +// function list. +// +// Repeat until the live function list is empty. +// +// 3. Add any encountered uniform variables and blocks to the reflection database. +// +// Can be attempted with a failed link, but will return false if recursion had been detected, or +// there wasn't exactly one entry point. +// + +namespace glslang { + +// +// The traverser: mostly pass through, except +// - processing binary nodes to see if they are dereferences of an aggregates to track +// - processing symbol nodes to see if they are non-aggregate objects to track +// +// This ignores semantically dead code by using TLiveTraverser. +// +// This is in the glslang namespace directly so it can be a friend of TReflection. +// + +class TReflectionTraverser : public TIntermTraverser { +public: + TReflectionTraverser(const TIntermediate& i, TReflection& r) : + TIntermTraverser(), intermediate(i), reflection(r), updateStageMasks(true) { } + + virtual bool visitBinary(TVisit, TIntermBinary* node); + virtual void visitSymbol(TIntermSymbol* base); + + // Add a simple reference to a uniform variable to the uniform database, no dereference involved. + // However, no dereference doesn't mean simple... it could be a complex aggregate. + void addUniform(const TIntermSymbol& base) + { + if (processedDerefs.find(&base) == processedDerefs.end()) { + processedDerefs.insert(&base); + + int blockIndex = -1; + int offset = -1; + TList derefs; + TString baseName = base.getName(); + + if (base.getType().getBasicType() == EbtBlock) { + offset = 0; + bool anonymous = IsAnonymous(baseName); + const TString& blockName = base.getType().getTypeName(); + + if (!anonymous) + baseName = blockName; + else + baseName = ""; + + if (base.getType().isArray()) { + TType derefType(base.getType(), 0); + + assert(!anonymous); + for (int e = 0; e < base.getType().getCumulativeArraySize(); ++e) + blockIndex = addBlockName(blockName + "[" + String(e) + "]", derefType, + intermediate.getBlockSize(base.getType())); + } + else + blockIndex = addBlockName(blockName, base.getType(), intermediate.getBlockSize(base.getType())); + } + + // Use a degenerate (empty) set of dereferences to immediately put as at the end of + // the dereference change expected by blowUpActiveAggregate. + blowUpActiveAggregate(base.getType(), baseName, derefs, derefs.end(), offset, blockIndex, 0, 0, + base.getQualifier().storage, updateStageMasks); + } + } + + void addPipeIOVariable(const TIntermSymbol& base) + { + if (processedDerefs.find(&base) == processedDerefs.end()) { + processedDerefs.insert(&base); + + const TString &name = base.getName(); + const TType &type = base.getType(); + const bool input = base.getQualifier().isPipeInput(); + + TReflection::TMapIndexToReflection &ioItems = + input ? reflection.indexToPipeInput : reflection.indexToPipeOutput; + + + TReflection::TNameToIndex &ioMapper = + input ? reflection.pipeInNameToIndex : reflection.pipeOutNameToIndex; + + if (reflection.options & EShReflectionUnwrapIOBlocks) { + bool anonymous = IsAnonymous(name); + + TString baseName; + if (type.getBasicType() == EbtBlock) { + baseName = anonymous ? TString() : type.getTypeName(); + } else { + baseName = anonymous ? TString() : name; + } + + // by convention if this is an arrayed block we ignore the array in the reflection + if (type.isArray() && type.getBasicType() == EbtBlock) { + blowUpIOAggregate(input, baseName, TType(type, 0)); + } else { + blowUpIOAggregate(input, baseName, type); + } + } else { + TReflection::TNameToIndex::const_iterator it = ioMapper.find(name.c_str()); + if (it == ioMapper.end()) { + // seperate pipe i/o params from uniforms and blocks + // in is only for input in first stage as out is only for last stage. check traverse in call stack. + ioMapper[name.c_str()] = static_cast(ioItems.size()); + ioItems.push_back( + TObjectReflection(name.c_str(), type, 0, mapToGlType(type), mapToGlArraySize(type), 0)); + EShLanguageMask& stages = ioItems.back().stages; + stages = static_cast(stages | 1 << intermediate.getStage()); + } else { + EShLanguageMask& stages = ioItems[it->second].stages; + stages = static_cast(stages | 1 << intermediate.getStage()); + } + } + } + } + + // Lookup or calculate the offset of all block members at once, using the recursively + // defined block offset rules. + void getOffsets(const TType& type, TVector& offsets) + { + const TTypeList& memberList = *type.getStruct(); + int memberSize = 0; + int offset = 0; + + for (size_t m = 0; m < offsets.size(); ++m) { + // if the user supplied an offset, snap to it now + if (memberList[m].type->getQualifier().hasOffset()) + offset = memberList[m].type->getQualifier().layoutOffset; + + // calculate the offset of the next member and align the current offset to this member + intermediate.updateOffset(type, *memberList[m].type, offset, memberSize); + + // save the offset of this member + offsets[m] = offset; + + // update for the next member + offset += memberSize; + } + } + + // Calculate the stride of an array type + int getArrayStride(const TType& baseType, const TType& type) + { + int dummySize; + int stride; + + // consider blocks to have 0 stride, so that all offsets are relative to the start of their block + if (type.getBasicType() == EbtBlock) + return 0; + + TLayoutMatrix subMatrixLayout = type.getQualifier().layoutMatrix; + intermediate.getMemberAlignment(type, dummySize, stride, + baseType.getQualifier().layoutPacking, + subMatrixLayout != ElmNone + ? subMatrixLayout == ElmRowMajor + : baseType.getQualifier().layoutMatrix == ElmRowMajor); + + return stride; + } + + // count the total number of leaf members from iterating out of a block type + int countAggregateMembers(const TType& parentType) + { + if (! parentType.isStruct()) + return 1; + + const bool strictArraySuffix = (reflection.options & EShReflectionStrictArraySuffix); + + bool blockParent = (parentType.getBasicType() == EbtBlock && parentType.getQualifier().storage == EvqBuffer); + + const TTypeList &memberList = *parentType.getStruct(); + + int ret = 0; + + for (size_t i = 0; i < memberList.size(); i++) + { + const TType &memberType = *memberList[i].type; + int numMembers = countAggregateMembers(memberType); + // for sized arrays of structs, apply logic to expand out the same as we would below in + // blowUpActiveAggregate + if (memberType.isArray() && ! memberType.getArraySizes()->hasUnsized() && memberType.isStruct()) { + if (! strictArraySuffix || ! blockParent) + numMembers *= memberType.getArraySizes()->getCumulativeSize(); + } + ret += numMembers; + } + + return ret; + } + + // Traverse the provided deref chain, including the base, and + // - build a full reflection-granularity name, array size, etc. entry out of it, if it goes down to that granularity + // - recursively expand any variable array index in the middle of that traversal + // - recursively expand what's left at the end if the deref chain did not reach down to reflection granularity + // + // arraySize tracks, just for the final dereference in the chain, if there was a specific known size. + // A value of 0 for arraySize will mean to use the full array's size. + void blowUpActiveAggregate(const TType& baseType, const TString& baseName, const TList& derefs, + TList::const_iterator deref, int offset, int blockIndex, int arraySize, + int topLevelArrayStride, TStorageQualifier baseStorage, bool active) + { + // when strictArraySuffix is enabled, we closely follow the rules from ARB_program_interface_query. + // Broadly: + // * arrays-of-structs always have a [x] suffix. + // * with array-of-struct variables in the root of a buffer block, only ever return [0]. + // * otherwise, array suffixes are added whenever we iterate, even if that means expanding out an array. + const bool strictArraySuffix = (reflection.options & EShReflectionStrictArraySuffix); + + // is this variable inside a buffer block. This flag is set back to false after we iterate inside the first array element. + bool blockParent = (baseType.getBasicType() == EbtBlock && baseType.getQualifier().storage == EvqBuffer); + + // process the part of the dereference chain that was explicit in the shader + TString name = baseName; + const TType* terminalType = &baseType; + for (; deref != derefs.end(); ++deref) { + TIntermBinary* visitNode = *deref; + terminalType = &visitNode->getType(); + int index; + switch (visitNode->getOp()) { + case EOpIndexIndirect: { + int stride = getArrayStride(baseType, visitNode->getLeft()->getType()); + + if (topLevelArrayStride == 0) + topLevelArrayStride = stride; + + // Visit all the indices of this array, and for each one add on the remaining dereferencing + for (int i = 0; i < std::max(visitNode->getLeft()->getType().getOuterArraySize(), 1); ++i) { + TString newBaseName = name; + if (strictArraySuffix && blockParent) + newBaseName.append(TString("[0]")); + else if (strictArraySuffix || baseType.getBasicType() != EbtBlock) + newBaseName.append(TString("[") + String(i) + "]"); + TList::const_iterator nextDeref = deref; + ++nextDeref; + blowUpActiveAggregate(*terminalType, newBaseName, derefs, nextDeref, offset, blockIndex, arraySize, + topLevelArrayStride, baseStorage, active); + + if (offset >= 0) + offset += stride; + } + + // it was all completed in the recursive calls above + return; + } + case EOpIndexDirect: { + int stride = getArrayStride(baseType, visitNode->getLeft()->getType()); + + index = visitNode->getRight()->getAsConstantUnion()->getConstArray()[0].getIConst(); + if (strictArraySuffix && blockParent) { + name.append(TString("[0]")); + } else if (strictArraySuffix || baseType.getBasicType() != EbtBlock) { + name.append(TString("[") + String(index) + "]"); + + if (offset >= 0) + offset += stride * index; + } + + if (topLevelArrayStride == 0) + topLevelArrayStride = stride; + + blockParent = false; + break; + } + case EOpIndexDirectStruct: + index = visitNode->getRight()->getAsConstantUnion()->getConstArray()[0].getIConst(); + if (offset >= 0) + offset += intermediate.getOffset(visitNode->getLeft()->getType(), index); + if (name.size() > 0) + name.append("."); + name.append((*visitNode->getLeft()->getType().getStruct())[index].type->getFieldName()); + break; + default: + break; + } + } + + // if the terminalType is still too coarse a granularity, this is still an aggregate to expand, expand it... + if (! isReflectionGranularity(*terminalType)) { + // the base offset of this node, that children are relative to + int baseOffset = offset; + + if (terminalType->isArray()) { + // Visit all the indices of this array, and for each one, + // fully explode the remaining aggregate to dereference + + int stride = 0; + if (offset >= 0) + stride = getArrayStride(baseType, *terminalType); + + if (topLevelArrayStride == 0) + topLevelArrayStride = stride; + + int arrayIterateSize = std::max(terminalType->getOuterArraySize(), 1); + + // for top-level arrays in blocks, only expand [0] to avoid explosion of items + if (strictArraySuffix && blockParent) + arrayIterateSize = 1; + + for (int i = 0; i < arrayIterateSize; ++i) { + TString newBaseName = name; + if (terminalType->getBasicType() != EbtBlock) + newBaseName.append(TString("[") + String(i) + "]"); + TType derefType(*terminalType, 0); + if (offset >= 0) + offset = baseOffset + stride * i; + + blowUpActiveAggregate(derefType, newBaseName, derefs, derefs.end(), offset, blockIndex, 0, + topLevelArrayStride, baseStorage, active); + } + } else { + // Visit all members of this aggregate, and for each one, + // fully explode the remaining aggregate to dereference + const TTypeList& typeList = *terminalType->getStruct(); + + TVector memberOffsets; + + if (baseOffset >= 0) { + memberOffsets.resize(typeList.size()); + getOffsets(*terminalType, memberOffsets); + } + + for (int i = 0; i < (int)typeList.size(); ++i) { + TString newBaseName = name; + if (newBaseName.size() > 0) + newBaseName.append("."); + newBaseName.append(typeList[i].type->getFieldName()); + TType derefType(*terminalType, i); + if (offset >= 0) + offset = baseOffset + memberOffsets[i]; + + int arrayStride = topLevelArrayStride; + if (terminalType->getBasicType() == EbtBlock && terminalType->getQualifier().storage == EvqBuffer && + derefType.isArray()) { + arrayStride = getArrayStride(baseType, derefType); + } + + blowUpActiveAggregate(derefType, newBaseName, derefs, derefs.end(), offset, blockIndex, 0, + arrayStride, baseStorage, active); + } + } + + // it was all completed in the recursive calls above + return; + } + + if ((reflection.options & EShReflectionBasicArraySuffix) && terminalType->isArray()) { + name.append(TString("[0]")); + } + + // Finally, add a full string to the reflection database, and update the array size if necessary. + // If the dereferenced entity to record is an array, compute the size and update the maximum size. + + // there might not be a final array dereference, it could have been copied as an array object + if (arraySize == 0) + arraySize = mapToGlArraySize(*terminalType); + + TReflection::TMapIndexToReflection& variables = reflection.GetVariableMapForStorage(baseStorage); + + TReflection::TNameToIndex::const_iterator it = reflection.nameToIndex.find(name.c_str()); + if (it == reflection.nameToIndex.end()) { + int uniformIndex = (int)variables.size(); + reflection.nameToIndex[name.c_str()] = uniformIndex; + variables.push_back(TObjectReflection(name.c_str(), *terminalType, offset, mapToGlType(*terminalType), + arraySize, blockIndex)); + if (terminalType->isArray()) { + variables.back().arrayStride = getArrayStride(baseType, *terminalType); + if (topLevelArrayStride == 0) + topLevelArrayStride = variables.back().arrayStride; + } + + if ((reflection.options & EShReflectionSeparateBuffers) && terminalType->isAtomic()) + reflection.atomicCounterUniformIndices.push_back(uniformIndex); + + variables.back().topLevelArrayStride = topLevelArrayStride; + + if ((reflection.options & EShReflectionAllBlockVariables) && active) { + EShLanguageMask& stages = variables.back().stages; + stages = static_cast(stages | 1 << intermediate.getStage()); + } + } else { + if (arraySize > 1) { + int& reflectedArraySize = variables[it->second].size; + reflectedArraySize = std::max(arraySize, reflectedArraySize); + } + + if ((reflection.options & EShReflectionAllBlockVariables) && active) { + EShLanguageMask& stages = variables[it->second].stages; + stages = static_cast(stages | 1 << intermediate.getStage()); + } + } + } + + // similar to blowUpActiveAggregate, but with simpler rules and no dereferences to follow. + void blowUpIOAggregate(bool input, const TString &baseName, const TType &type) + { + TString name = baseName; + + // if the type is still too coarse a granularity, this is still an aggregate to expand, expand it... + if (! isReflectionGranularity(type)) { + if (type.isArray()) { + // Visit all the indices of this array, and for each one, + // fully explode the remaining aggregate to dereference + for (int i = 0; i < std::max(type.getOuterArraySize(), 1); ++i) { + TString newBaseName = name; + newBaseName.append(TString("[") + String(i) + "]"); + TType derefType(type, 0); + + blowUpIOAggregate(input, newBaseName, derefType); + } + } else { + // Visit all members of this aggregate, and for each one, + // fully explode the remaining aggregate to dereference + const TTypeList& typeList = *type.getStruct(); + + for (int i = 0; i < (int)typeList.size(); ++i) { + TString newBaseName = name; + if (newBaseName.size() > 0) + newBaseName.append("."); + newBaseName.append(typeList[i].type->getFieldName()); + TType derefType(type, i); + + blowUpIOAggregate(input, newBaseName, derefType); + } + } + + // it was all completed in the recursive calls above + return; + } + + if ((reflection.options & EShReflectionBasicArraySuffix) && type.isArray()) { + name.append(TString("[0]")); + } + + TReflection::TMapIndexToReflection &ioItems = + input ? reflection.indexToPipeInput : reflection.indexToPipeOutput; + + std::string namespacedName = input ? "in " : "out "; + namespacedName += name.c_str(); + + TReflection::TNameToIndex::const_iterator it = reflection.nameToIndex.find(namespacedName); + if (it == reflection.nameToIndex.end()) { + reflection.nameToIndex[namespacedName] = (int)ioItems.size(); + ioItems.push_back( + TObjectReflection(name.c_str(), type, 0, mapToGlType(type), mapToGlArraySize(type), 0)); + + EShLanguageMask& stages = ioItems.back().stages; + stages = static_cast(stages | 1 << intermediate.getStage()); + } else { + EShLanguageMask& stages = ioItems[it->second].stages; + stages = static_cast(stages | 1 << intermediate.getStage()); + } + } + + // Add a uniform dereference where blocks/struct/arrays are involved in the access. + // Handles the situation where the left node is at the correct or too coarse a + // granularity for reflection. (That is, further dereferences up the tree will be + // skipped.) Earlier dereferences, down the tree, will be handled + // at the same time, and logged to prevent reprocessing as the tree is traversed. + // + // Note: Other things like the following must be caught elsewhere: + // - a simple non-array, non-struct variable (no dereference even conceivable) + // - an aggregrate consumed en masse, without a dereference + // + // So, this code is for cases like + // - a struct/block dereferencing a member (whether the member is array or not) + // - an array of struct + // - structs/arrays containing the above + // + void addDereferencedUniform(TIntermBinary* topNode) + { + // See if too fine-grained to process (wait to get further down the tree) + const TType& leftType = topNode->getLeft()->getType(); + if ((leftType.isVector() || leftType.isMatrix()) && ! leftType.isArray()) + return; + + // We have an array or structure or block dereference, see if it's a uniform + // based dereference (if not, skip it). + TIntermSymbol* base = findBase(topNode); + if (! base || ! base->getQualifier().isUniformOrBuffer()) + return; + + // See if we've already processed this (e.g., in the middle of something + // we did earlier), and if so skip it + if (processedDerefs.find(topNode) != processedDerefs.end()) + return; + + // Process this uniform dereference + + int offset = -1; + int blockIndex = -1; + bool anonymous = false; + + // See if we need to record the block itself + bool block = base->getBasicType() == EbtBlock; + if (block) { + offset = 0; + anonymous = IsAnonymous(base->getName()); + + const TString& blockName = base->getType().getTypeName(); + TString baseName; + + if (! anonymous) + baseName = blockName; + + if (base->getType().isArray()) { + TType derefType(base->getType(), 0); + + assert(! anonymous); + for (int e = 0; e < base->getType().getCumulativeArraySize(); ++e) + blockIndex = addBlockName(blockName + "[" + String(e) + "]", derefType, + intermediate.getBlockSize(base->getType())); + baseName.append(TString("[0]")); + } else + blockIndex = addBlockName(blockName, base->getType(), intermediate.getBlockSize(base->getType())); + + if (reflection.options & EShReflectionAllBlockVariables) { + // Use a degenerate (empty) set of dereferences to immediately put as at the end of + // the dereference change expected by blowUpActiveAggregate. + TList derefs; + + // because we don't have any derefs, the first thing blowUpActiveAggregate will do is iterate over each + // member in the struct definition. This will lose any information about whether the parent was a buffer + // block. So if we're using strict array rules which don't expand the first child of a buffer block we + // instead iterate over the children here. + const bool strictArraySuffix = (reflection.options & EShReflectionStrictArraySuffix); + bool blockParent = (base->getType().getBasicType() == EbtBlock && base->getQualifier().storage == EvqBuffer); + + if (strictArraySuffix && blockParent) { + TType structDerefType(base->getType(), 0); + + const TType &structType = base->getType().isArray() ? structDerefType : base->getType(); + const TTypeList& typeList = *structType.getStruct(); + + TVector memberOffsets; + + memberOffsets.resize(typeList.size()); + getOffsets(structType, memberOffsets); + + for (int i = 0; i < (int)typeList.size(); ++i) { + TType derefType(structType, i); + TString name = baseName; + if (name.size() > 0) + name.append("."); + name.append(typeList[i].type->getFieldName()); + + // if this member is an array, store the top-level array stride but start the explosion from + // the inner struct type. + if (derefType.isArray() && derefType.isStruct()) { + name.append("[0]"); + blowUpActiveAggregate(TType(derefType, 0), name, derefs, derefs.end(), memberOffsets[i], + blockIndex, 0, getArrayStride(structType, derefType), + base->getQualifier().storage, false); + } else { + blowUpActiveAggregate(derefType, name, derefs, derefs.end(), memberOffsets[i], blockIndex, + 0, 0, base->getQualifier().storage, false); + } + } + } else { + // otherwise - if we're not using strict array suffix rules, or this isn't a block so we are + // expanding root arrays anyway, just start the iteration from the base block type. + blowUpActiveAggregate(base->getType(), baseName, derefs, derefs.end(), 0, blockIndex, 0, 0, + base->getQualifier().storage, false); + } + } + } + + // Process the dereference chain, backward, accumulating the pieces for later forward traversal. + // If the topNode is a reflection-granularity-array dereference, don't include that last dereference. + TList derefs; + for (TIntermBinary* visitNode = topNode; visitNode; visitNode = visitNode->getLeft()->getAsBinaryNode()) { + if (isReflectionGranularity(visitNode->getLeft()->getType())) + continue; + + derefs.push_front(visitNode); + processedDerefs.insert(visitNode); + } + processedDerefs.insert(base); + + // See if we have a specific array size to stick to while enumerating the explosion of the aggregate + int arraySize = 0; + if (isReflectionGranularity(topNode->getLeft()->getType()) && topNode->getLeft()->isArray()) { + if (topNode->getOp() == EOpIndexDirect) + arraySize = topNode->getRight()->getAsConstantUnion()->getConstArray()[0].getIConst() + 1; + } + + // Put the dereference chain together, forward + TString baseName; + if (! anonymous) { + if (block) + baseName = base->getType().getTypeName(); + else + baseName = base->getName(); + } + blowUpActiveAggregate(base->getType(), baseName, derefs, derefs.begin(), offset, blockIndex, arraySize, 0, + base->getQualifier().storage, true); + } + + int addBlockName(const TString& name, const TType& type, int size) + { + TReflection::TMapIndexToReflection& blocks = reflection.GetBlockMapForStorage(type.getQualifier().storage); + + int blockIndex; + TReflection::TNameToIndex::const_iterator it = reflection.nameToIndex.find(name.c_str()); + if (reflection.nameToIndex.find(name.c_str()) == reflection.nameToIndex.end()) { + blockIndex = (int)blocks.size(); + reflection.nameToIndex[name.c_str()] = blockIndex; + blocks.push_back(TObjectReflection(name.c_str(), type, -1, -1, size, -1)); + + blocks.back().numMembers = countAggregateMembers(type); + + if (updateStageMasks) { + EShLanguageMask& stages = blocks.back().stages; + stages = static_cast(stages | 1 << intermediate.getStage()); + } + } else { + blockIndex = it->second; + + if (updateStageMasks) { + EShLanguageMask& stages = blocks[blockIndex].stages; + stages = static_cast(stages | 1 << intermediate.getStage()); + } + } + + return blockIndex; + } + + // Are we at a level in a dereference chain at which individual active uniform queries are made? + bool isReflectionGranularity(const TType& type) + { + return type.getBasicType() != EbtBlock && type.getBasicType() != EbtStruct && !type.isArrayOfArrays(); + } + + // For a binary operation indexing into an aggregate, chase down the base of the aggregate. + // Return 0 if the topology does not fit this situation. + TIntermSymbol* findBase(const TIntermBinary* node) + { + TIntermSymbol *base = node->getLeft()->getAsSymbolNode(); + if (base) + return base; + TIntermBinary* left = node->getLeft()->getAsBinaryNode(); + if (! left) + return nullptr; + + return findBase(left); + } + + // + // Translate a glslang sampler type into the GL API #define number. + // + int mapSamplerToGlType(TSampler sampler) + { + if (! sampler.image) { + // a sampler... + switch (sampler.type) { + case EbtFloat: + switch ((int)sampler.dim) { + case Esd1D: + switch ((int)sampler.shadow) { + case false: return sampler.arrayed ? GL_SAMPLER_1D_ARRAY : GL_SAMPLER_1D; + case true: return sampler.arrayed ? GL_SAMPLER_1D_ARRAY_SHADOW : GL_SAMPLER_1D_SHADOW; + } + case Esd2D: + switch ((int)sampler.ms) { + case false: + switch ((int)sampler.shadow) { + case false: return sampler.arrayed ? GL_SAMPLER_2D_ARRAY : GL_SAMPLER_2D; + case true: return sampler.arrayed ? GL_SAMPLER_2D_ARRAY_SHADOW : GL_SAMPLER_2D_SHADOW; + } + case true: return sampler.arrayed ? GL_SAMPLER_2D_MULTISAMPLE_ARRAY : GL_SAMPLER_2D_MULTISAMPLE; + } + case Esd3D: + return GL_SAMPLER_3D; + case EsdCube: + switch ((int)sampler.shadow) { + case false: return sampler.arrayed ? GL_SAMPLER_CUBE_MAP_ARRAY : GL_SAMPLER_CUBE; + case true: return sampler.arrayed ? GL_SAMPLER_CUBE_MAP_ARRAY_SHADOW : GL_SAMPLER_CUBE_SHADOW; + } + case EsdRect: + return sampler.shadow ? GL_SAMPLER_2D_RECT_SHADOW : GL_SAMPLER_2D_RECT; + case EsdBuffer: + return GL_SAMPLER_BUFFER; + } + case EbtFloat16: + switch ((int)sampler.dim) { + case Esd1D: + switch ((int)sampler.shadow) { + case false: return sampler.arrayed ? GL_FLOAT16_SAMPLER_1D_ARRAY_AMD : GL_FLOAT16_SAMPLER_1D_AMD; + case true: return sampler.arrayed ? GL_FLOAT16_SAMPLER_1D_ARRAY_SHADOW_AMD : GL_FLOAT16_SAMPLER_1D_SHADOW_AMD; + } + case Esd2D: + switch ((int)sampler.ms) { + case false: + switch ((int)sampler.shadow) { + case false: return sampler.arrayed ? GL_FLOAT16_SAMPLER_2D_ARRAY_AMD : GL_FLOAT16_SAMPLER_2D_AMD; + case true: return sampler.arrayed ? GL_FLOAT16_SAMPLER_2D_ARRAY_SHADOW_AMD : GL_FLOAT16_SAMPLER_2D_SHADOW_AMD; + } + case true: return sampler.arrayed ? GL_FLOAT16_SAMPLER_2D_MULTISAMPLE_ARRAY_AMD : GL_FLOAT16_SAMPLER_2D_MULTISAMPLE_AMD; + } + case Esd3D: + return GL_FLOAT16_SAMPLER_3D_AMD; + case EsdCube: + switch ((int)sampler.shadow) { + case false: return sampler.arrayed ? GL_FLOAT16_SAMPLER_CUBE_MAP_ARRAY_AMD : GL_FLOAT16_SAMPLER_CUBE_AMD; + case true: return sampler.arrayed ? GL_FLOAT16_SAMPLER_CUBE_MAP_ARRAY_SHADOW_AMD : GL_FLOAT16_SAMPLER_CUBE_SHADOW_AMD; + } + case EsdRect: + return sampler.shadow ? GL_FLOAT16_SAMPLER_2D_RECT_SHADOW_AMD : GL_FLOAT16_SAMPLER_2D_RECT_AMD; + case EsdBuffer: + return GL_FLOAT16_SAMPLER_BUFFER_AMD; + } + case EbtInt: + switch ((int)sampler.dim) { + case Esd1D: + return sampler.arrayed ? GL_INT_SAMPLER_1D_ARRAY : GL_INT_SAMPLER_1D; + case Esd2D: + switch ((int)sampler.ms) { + case false: return sampler.arrayed ? GL_INT_SAMPLER_2D_ARRAY : GL_INT_SAMPLER_2D; + case true: return sampler.arrayed ? GL_INT_SAMPLER_2D_MULTISAMPLE_ARRAY + : GL_INT_SAMPLER_2D_MULTISAMPLE; + } + case Esd3D: + return GL_INT_SAMPLER_3D; + case EsdCube: + return sampler.arrayed ? GL_INT_SAMPLER_CUBE_MAP_ARRAY : GL_INT_SAMPLER_CUBE; + case EsdRect: + return GL_INT_SAMPLER_2D_RECT; + case EsdBuffer: + return GL_INT_SAMPLER_BUFFER; + } + case EbtUint: + switch ((int)sampler.dim) { + case Esd1D: + return sampler.arrayed ? GL_UNSIGNED_INT_SAMPLER_1D_ARRAY : GL_UNSIGNED_INT_SAMPLER_1D; + case Esd2D: + switch ((int)sampler.ms) { + case false: return sampler.arrayed ? GL_UNSIGNED_INT_SAMPLER_2D_ARRAY : GL_UNSIGNED_INT_SAMPLER_2D; + case true: return sampler.arrayed ? GL_UNSIGNED_INT_SAMPLER_2D_MULTISAMPLE_ARRAY + : GL_UNSIGNED_INT_SAMPLER_2D_MULTISAMPLE; + } + case Esd3D: + return GL_UNSIGNED_INT_SAMPLER_3D; + case EsdCube: + return sampler.arrayed ? GL_UNSIGNED_INT_SAMPLER_CUBE_MAP_ARRAY : GL_UNSIGNED_INT_SAMPLER_CUBE; + case EsdRect: + return GL_UNSIGNED_INT_SAMPLER_2D_RECT; + case EsdBuffer: + return GL_UNSIGNED_INT_SAMPLER_BUFFER; + } + default: + return 0; + } + } else { + // an image... + switch (sampler.type) { + case EbtFloat: + switch ((int)sampler.dim) { + case Esd1D: + return sampler.arrayed ? GL_IMAGE_1D_ARRAY : GL_IMAGE_1D; + case Esd2D: + switch ((int)sampler.ms) { + case false: return sampler.arrayed ? GL_IMAGE_2D_ARRAY : GL_IMAGE_2D; + case true: return sampler.arrayed ? GL_IMAGE_2D_MULTISAMPLE_ARRAY : GL_IMAGE_2D_MULTISAMPLE; + } + case Esd3D: + return GL_IMAGE_3D; + case EsdCube: + return sampler.arrayed ? GL_IMAGE_CUBE_MAP_ARRAY : GL_IMAGE_CUBE; + case EsdRect: + return GL_IMAGE_2D_RECT; + case EsdBuffer: + return GL_IMAGE_BUFFER; + } + case EbtFloat16: + switch ((int)sampler.dim) { + case Esd1D: + return sampler.arrayed ? GL_FLOAT16_IMAGE_1D_ARRAY_AMD : GL_FLOAT16_IMAGE_1D_AMD; + case Esd2D: + switch ((int)sampler.ms) { + case false: return sampler.arrayed ? GL_FLOAT16_IMAGE_2D_ARRAY_AMD : GL_FLOAT16_IMAGE_2D_AMD; + case true: return sampler.arrayed ? GL_FLOAT16_IMAGE_2D_MULTISAMPLE_ARRAY_AMD : GL_FLOAT16_IMAGE_2D_MULTISAMPLE_AMD; + } + case Esd3D: + return GL_FLOAT16_IMAGE_3D_AMD; + case EsdCube: + return sampler.arrayed ? GL_FLOAT16_IMAGE_CUBE_MAP_ARRAY_AMD : GL_FLOAT16_IMAGE_CUBE_AMD; + case EsdRect: + return GL_FLOAT16_IMAGE_2D_RECT_AMD; + case EsdBuffer: + return GL_FLOAT16_IMAGE_BUFFER_AMD; + } + case EbtInt: + switch ((int)sampler.dim) { + case Esd1D: + return sampler.arrayed ? GL_INT_IMAGE_1D_ARRAY : GL_INT_IMAGE_1D; + case Esd2D: + switch ((int)sampler.ms) { + case false: return sampler.arrayed ? GL_INT_IMAGE_2D_ARRAY : GL_INT_IMAGE_2D; + case true: return sampler.arrayed ? GL_INT_IMAGE_2D_MULTISAMPLE_ARRAY : GL_INT_IMAGE_2D_MULTISAMPLE; + } + case Esd3D: + return GL_INT_IMAGE_3D; + case EsdCube: + return sampler.arrayed ? GL_INT_IMAGE_CUBE_MAP_ARRAY : GL_INT_IMAGE_CUBE; + case EsdRect: + return GL_INT_IMAGE_2D_RECT; + case EsdBuffer: + return GL_INT_IMAGE_BUFFER; + } + case EbtUint: + switch ((int)sampler.dim) { + case Esd1D: + return sampler.arrayed ? GL_UNSIGNED_INT_IMAGE_1D_ARRAY : GL_UNSIGNED_INT_IMAGE_1D; + case Esd2D: + switch ((int)sampler.ms) { + case false: return sampler.arrayed ? GL_UNSIGNED_INT_IMAGE_2D_ARRAY : GL_UNSIGNED_INT_IMAGE_2D; + case true: return sampler.arrayed ? GL_UNSIGNED_INT_IMAGE_2D_MULTISAMPLE_ARRAY + : GL_UNSIGNED_INT_IMAGE_2D_MULTISAMPLE; + } + case Esd3D: + return GL_UNSIGNED_INT_IMAGE_3D; + case EsdCube: + return sampler.arrayed ? GL_UNSIGNED_INT_IMAGE_CUBE_MAP_ARRAY : GL_UNSIGNED_INT_IMAGE_CUBE; + case EsdRect: + return GL_UNSIGNED_INT_IMAGE_2D_RECT; + case EsdBuffer: + return GL_UNSIGNED_INT_IMAGE_BUFFER; + } + default: + return 0; + } + } + } + + // + // Translate a glslang type into the GL API #define number. + // Ignores arrayness. + // + int mapToGlType(const TType& type) + { + switch (type.getBasicType()) { + case EbtSampler: + return mapSamplerToGlType(type.getSampler()); + case EbtStruct: + case EbtBlock: + case EbtVoid: + return 0; + default: + break; + } + + if (type.isVector()) { + int offset = type.getVectorSize() - 2; + switch (type.getBasicType()) { + case EbtFloat: return GL_FLOAT_VEC2 + offset; + case EbtDouble: return GL_DOUBLE_VEC2 + offset; + case EbtFloat16: return GL_FLOAT16_VEC2_NV + offset; + case EbtInt: return GL_INT_VEC2 + offset; + case EbtUint: return GL_UNSIGNED_INT_VEC2 + offset; + case EbtInt64: return GL_INT64_ARB + offset; + case EbtUint64: return GL_UNSIGNED_INT64_ARB + offset; + case EbtBool: return GL_BOOL_VEC2 + offset; + case EbtAtomicUint: return GL_UNSIGNED_INT_ATOMIC_COUNTER + offset; + default: return 0; + } + } + if (type.isMatrix()) { + switch (type.getBasicType()) { + case EbtFloat: + switch (type.getMatrixCols()) { + case 2: + switch (type.getMatrixRows()) { + case 2: return GL_FLOAT_MAT2; + case 3: return GL_FLOAT_MAT2x3; + case 4: return GL_FLOAT_MAT2x4; + default: return 0; + } + case 3: + switch (type.getMatrixRows()) { + case 2: return GL_FLOAT_MAT3x2; + case 3: return GL_FLOAT_MAT3; + case 4: return GL_FLOAT_MAT3x4; + default: return 0; + } + case 4: + switch (type.getMatrixRows()) { + case 2: return GL_FLOAT_MAT4x2; + case 3: return GL_FLOAT_MAT4x3; + case 4: return GL_FLOAT_MAT4; + default: return 0; + } + } + case EbtDouble: + switch (type.getMatrixCols()) { + case 2: + switch (type.getMatrixRows()) { + case 2: return GL_DOUBLE_MAT2; + case 3: return GL_DOUBLE_MAT2x3; + case 4: return GL_DOUBLE_MAT2x4; + default: return 0; + } + case 3: + switch (type.getMatrixRows()) { + case 2: return GL_DOUBLE_MAT3x2; + case 3: return GL_DOUBLE_MAT3; + case 4: return GL_DOUBLE_MAT3x4; + default: return 0; + } + case 4: + switch (type.getMatrixRows()) { + case 2: return GL_DOUBLE_MAT4x2; + case 3: return GL_DOUBLE_MAT4x3; + case 4: return GL_DOUBLE_MAT4; + default: return 0; + } + } + case EbtFloat16: + switch (type.getMatrixCols()) { + case 2: + switch (type.getMatrixRows()) { + case 2: return GL_FLOAT16_MAT2_AMD; + case 3: return GL_FLOAT16_MAT2x3_AMD; + case 4: return GL_FLOAT16_MAT2x4_AMD; + default: return 0; + } + case 3: + switch (type.getMatrixRows()) { + case 2: return GL_FLOAT16_MAT3x2_AMD; + case 3: return GL_FLOAT16_MAT3_AMD; + case 4: return GL_FLOAT16_MAT3x4_AMD; + default: return 0; + } + case 4: + switch (type.getMatrixRows()) { + case 2: return GL_FLOAT16_MAT4x2_AMD; + case 3: return GL_FLOAT16_MAT4x3_AMD; + case 4: return GL_FLOAT16_MAT4_AMD; + default: return 0; + } + } + default: + return 0; + } + } + if (type.getVectorSize() == 1) { + switch (type.getBasicType()) { + case EbtFloat: return GL_FLOAT; + case EbtDouble: return GL_DOUBLE; + case EbtFloat16: return GL_FLOAT16_NV; + case EbtInt: return GL_INT; + case EbtUint: return GL_UNSIGNED_INT; + case EbtInt64: return GL_INT64_ARB; + case EbtUint64: return GL_UNSIGNED_INT64_ARB; + case EbtBool: return GL_BOOL; + case EbtAtomicUint: return GL_UNSIGNED_INT_ATOMIC_COUNTER; + default: return 0; + } + } + + return 0; + } + + int mapToGlArraySize(const TType& type) + { + return type.isArray() ? type.getOuterArraySize() : 1; + } + + const TIntermediate& intermediate; + TReflection& reflection; + std::set processedDerefs; + bool updateStageMasks; + +protected: + TReflectionTraverser(TReflectionTraverser&); + TReflectionTraverser& operator=(TReflectionTraverser&); +}; + +// +// Implement the traversal functions of interest. +// + +// To catch dereferenced aggregates that must be reflected. +// This catches them at the highest level possible in the tree. +bool TReflectionTraverser::visitBinary(TVisit /* visit */, TIntermBinary* node) +{ + switch (node->getOp()) { + case EOpIndexDirect: + case EOpIndexIndirect: + case EOpIndexDirectStruct: + addDereferencedUniform(node); + break; + default: + break; + } + + // still need to visit everything below, which could contain sub-expressions + // containing different uniforms + return true; +} + +// To reflect non-dereferenced objects. +void TReflectionTraverser::visitSymbol(TIntermSymbol* base) +{ + if (base->getQualifier().storage == EvqUniform) { + if (base->getBasicType() == EbtBlock) { + if (reflection.options & EShReflectionSharedStd140Blocks) { + addUniform(*base); + } + } else { + addUniform(*base); + } + } + + if ((intermediate.getStage() == reflection.firstStage && base->getQualifier().isPipeInput()) || + (intermediate.getStage() == reflection.lastStage && base->getQualifier().isPipeOutput())) + addPipeIOVariable(*base); +} + +// +// Implement TObjectReflection methods. +// + +TObjectReflection::TObjectReflection(const std::string &pName, const TType &pType, int pOffset, int pGLDefineType, + int pSize, int pIndex) + : name(pName), offset(pOffset), glDefineType(pGLDefineType), size(pSize), index(pIndex), counterIndex(-1), + numMembers(-1), arrayStride(0), topLevelArrayStride(0), stages(EShLanguageMask(0)), type(pType.clone()) +{ +} + +int TObjectReflection::getBinding() const +{ + if (type == nullptr || !type->getQualifier().hasBinding()) + return -1; + return type->getQualifier().layoutBinding; +} + +void TObjectReflection::dump() const +{ + printf("%s: offset %d, type %x, size %d, index %d, binding %d, stages %d", name.c_str(), offset, glDefineType, size, + index, getBinding(), stages); + + if (counterIndex != -1) + printf(", counter %d", counterIndex); + + if (numMembers != -1) + printf(", numMembers %d", numMembers); + + if (arrayStride != 0) + printf(", arrayStride %d", arrayStride); + + if (topLevelArrayStride != 0) + printf(", topLevelArrayStride %d", topLevelArrayStride); + + printf("\n"); +} + +// +// Implement TReflection methods. +// + +// Track any required attribute reflection, such as compute shader numthreads. +// +void TReflection::buildAttributeReflection(EShLanguage stage, const TIntermediate& intermediate) +{ + if (stage == EShLangCompute) { + // Remember thread dimensions + for (int dim=0; dim<3; ++dim) + localSize[dim] = intermediate.getLocalSize(dim); + } +} + +// build counter block index associations for buffers +void TReflection::buildCounterIndices(const TIntermediate& intermediate) +{ +#ifdef ENABLE_HLSL + // search for ones that have counters + for (int i = 0; i < int(indexToUniformBlock.size()); ++i) { + const TString counterName(intermediate.addCounterBufferName(indexToUniformBlock[i].name).c_str()); + const int index = getIndex(counterName); + + if (index >= 0) + indexToUniformBlock[i].counterIndex = index; + } +#endif +} + +// build Shader Stages mask for all uniforms +void TReflection::buildUniformStageMask(const TIntermediate& intermediate) +{ + if (options & EShReflectionAllBlockVariables) + return; + + for (int i = 0; i < int(indexToUniform.size()); ++i) { + indexToUniform[i].stages = static_cast(indexToUniform[i].stages | 1 << intermediate.getStage()); + } + + for (int i = 0; i < int(indexToBufferVariable.size()); ++i) { + indexToBufferVariable[i].stages = + static_cast(indexToBufferVariable[i].stages | 1 << intermediate.getStage()); + } +} + +// Merge live symbols from 'intermediate' into the existing reflection database. +// +// Returns false if the input is too malformed to do this. +bool TReflection::addStage(EShLanguage stage, const TIntermediate& intermediate) +{ + if (intermediate.getTreeRoot() == nullptr || + intermediate.getNumEntryPoints() != 1 || + intermediate.isRecursive()) + return false; + + buildAttributeReflection(stage, intermediate); + + TReflectionTraverser it(intermediate, *this); + + for (auto& sequnence : intermediate.getTreeRoot()->getAsAggregate()->getSequence()) { + if (sequnence->getAsAggregate() != nullptr) { + if (sequnence->getAsAggregate()->getOp() == glslang::EOpLinkerObjects) { + it.updateStageMasks = false; + TIntermAggregate* linkerObjects = sequnence->getAsAggregate(); + for (auto& sequnence : linkerObjects->getSequence()) { + auto pNode = sequnence->getAsSymbolNode(); + if (pNode != nullptr && pNode->getQualifier().storage == EvqUniform && + (options & EShReflectionSharedStd140Blocks)) { + if (pNode->getBasicType() == EbtBlock) { + // collect std140 and shared uniform block form AST + if (pNode->getQualifier().layoutPacking == ElpStd140 || + pNode->getQualifier().layoutPacking == ElpShared) { + pNode->traverse(&it); + } + } + } + } + } else { + // This traverser will travers all function in AST. + // If we want reflect uncalled function, we need set linke message EShMsgKeepUncalled. + // When EShMsgKeepUncalled been set to true, all function will be keep in AST, even it is a uncalled function. + // This will keep some uniform variables in reflection, if those uniform variables is used in these uncalled function. + // + // If we just want reflect only live node, we can use a default link message or set EShMsgKeepUncalled false. + // When linke message not been set EShMsgKeepUncalled, linker won't keep uncalled function in AST. + // So, travers all function node can equivalent to travers live function. + it.updateStageMasks = true; + sequnence->getAsAggregate()->traverse(&it); + } + } + } + it.updateStageMasks = true; + + buildCounterIndices(intermediate); + buildUniformStageMask(intermediate); + + return true; +} + +void TReflection::dump() +{ + printf("Uniform reflection:\n"); + for (size_t i = 0; i < indexToUniform.size(); ++i) + indexToUniform[i].dump(); + printf("\n"); + + printf("Uniform block reflection:\n"); + for (size_t i = 0; i < indexToUniformBlock.size(); ++i) + indexToUniformBlock[i].dump(); + printf("\n"); + + printf("Buffer variable reflection:\n"); + for (size_t i = 0; i < indexToBufferVariable.size(); ++i) + indexToBufferVariable[i].dump(); + printf("\n"); + + printf("Buffer block reflection:\n"); + for (size_t i = 0; i < indexToBufferBlock.size(); ++i) + indexToBufferBlock[i].dump(); + printf("\n"); + + printf("Pipeline input reflection:\n"); + for (size_t i = 0; i < indexToPipeInput.size(); ++i) + indexToPipeInput[i].dump(); + printf("\n"); + + printf("Pipeline output reflection:\n"); + for (size_t i = 0; i < indexToPipeOutput.size(); ++i) + indexToPipeOutput[i].dump(); + printf("\n"); + + if (getLocalSize(0) > 1) { + static const char* axis[] = { "X", "Y", "Z" }; + + for (int dim=0; dim<3; ++dim) + if (getLocalSize(dim) > 1) + printf("Local size %s: %u\n", axis[dim], getLocalSize(dim)); + + printf("\n"); + } + + // printf("Live names\n"); + // for (TNameToIndex::const_iterator it = nameToIndex.begin(); it != nameToIndex.end(); ++it) + // printf("%s: %d\n", it->first.c_str(), it->second); + // printf("\n"); +} + +} // end namespace glslang + +#endif // GLSLANG_WEB diff --git a/dep/glslang/glslang/MachineIndependent/reflection.h b/dep/glslang/glslang/MachineIndependent/reflection.h new file mode 100644 index 000000000..0c33de459 --- /dev/null +++ b/dep/glslang/glslang/MachineIndependent/reflection.h @@ -0,0 +1,223 @@ +// +// Copyright (C) 2013-2016 LunarG, Inc. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// +// Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// +// Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// Neither the name of 3Dlabs Inc. Ltd. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// + +#ifndef GLSLANG_WEB + +#ifndef _REFLECTION_INCLUDED +#define _REFLECTION_INCLUDED + +#include "../Public/ShaderLang.h" +#include "../Include/Types.h" + +#include +#include + +// +// A reflection database and its interface, consistent with the OpenGL API reflection queries. +// + +namespace glslang { + +class TIntermediate; +class TIntermAggregate; +class TReflectionTraverser; + +// The full reflection database +class TReflection { +public: + TReflection(EShReflectionOptions opts, EShLanguage first, EShLanguage last) + : options(opts), firstStage(first), lastStage(last), badReflection(TObjectReflection::badReflection()) + { + for (int dim=0; dim<3; ++dim) + localSize[dim] = 0; + } + + virtual ~TReflection() {} + + // grow the reflection stage by stage + bool addStage(EShLanguage, const TIntermediate&); + + // for mapping a uniform index to a uniform object's description + int getNumUniforms() { return (int)indexToUniform.size(); } + const TObjectReflection& getUniform(int i) const + { + if (i >= 0 && i < (int)indexToUniform.size()) + return indexToUniform[i]; + else + return badReflection; + } + + // for mapping a block index to the block's description + int getNumUniformBlocks() const { return (int)indexToUniformBlock.size(); } + const TObjectReflection& getUniformBlock(int i) const + { + if (i >= 0 && i < (int)indexToUniformBlock.size()) + return indexToUniformBlock[i]; + else + return badReflection; + } + + // for mapping an pipeline input index to the input's description + int getNumPipeInputs() { return (int)indexToPipeInput.size(); } + const TObjectReflection& getPipeInput(int i) const + { + if (i >= 0 && i < (int)indexToPipeInput.size()) + return indexToPipeInput[i]; + else + return badReflection; + } + + // for mapping an pipeline output index to the output's description + int getNumPipeOutputs() { return (int)indexToPipeOutput.size(); } + const TObjectReflection& getPipeOutput(int i) const + { + if (i >= 0 && i < (int)indexToPipeOutput.size()) + return indexToPipeOutput[i]; + else + return badReflection; + } + + // for mapping from an atomic counter to the uniform index + int getNumAtomicCounters() const { return (int)atomicCounterUniformIndices.size(); } + const TObjectReflection& getAtomicCounter(int i) const + { + if (i >= 0 && i < (int)atomicCounterUniformIndices.size()) + return getUniform(atomicCounterUniformIndices[i]); + else + return badReflection; + } + + // for mapping a buffer variable index to a buffer variable object's description + int getNumBufferVariables() { return (int)indexToBufferVariable.size(); } + const TObjectReflection& getBufferVariable(int i) const + { + if (i >= 0 && i < (int)indexToBufferVariable.size()) + return indexToBufferVariable[i]; + else + return badReflection; + } + + // for mapping a storage block index to the storage block's description + int getNumStorageBuffers() const { return (int)indexToBufferBlock.size(); } + const TObjectReflection& getStorageBufferBlock(int i) const + { + if (i >= 0 && i < (int)indexToBufferBlock.size()) + return indexToBufferBlock[i]; + else + return badReflection; + } + + // for mapping any name to its index (block names, uniform names and input/output names) + int getIndex(const char* name) const + { + TNameToIndex::const_iterator it = nameToIndex.find(name); + if (it == nameToIndex.end()) + return -1; + else + return it->second; + } + + // see getIndex(const char*) + int getIndex(const TString& name) const { return getIndex(name.c_str()); } + + + // for mapping any name to its index (only pipe input/output names) + int getPipeIOIndex(const char* name, const bool inOrOut) const + { + TNameToIndex::const_iterator it = inOrOut ? pipeInNameToIndex.find(name) : pipeOutNameToIndex.find(name); + if (it == (inOrOut ? pipeInNameToIndex.end() : pipeOutNameToIndex.end())) + return -1; + else + return it->second; + } + + // see gePipeIOIndex(const char*, const bool) + int getPipeIOIndex(const TString& name, const bool inOrOut) const { return getPipeIOIndex(name.c_str(), inOrOut); } + + // Thread local size + unsigned getLocalSize(int dim) const { return dim <= 2 ? localSize[dim] : 0; } + + void dump(); + +protected: + friend class glslang::TReflectionTraverser; + + void buildCounterIndices(const TIntermediate&); + void buildUniformStageMask(const TIntermediate& intermediate); + void buildAttributeReflection(EShLanguage, const TIntermediate&); + + // Need a TString hash: typedef std::unordered_map TNameToIndex; + typedef std::map TNameToIndex; + typedef std::vector TMapIndexToReflection; + typedef std::vector TIndices; + + TMapIndexToReflection& GetBlockMapForStorage(TStorageQualifier storage) + { + if ((options & EShReflectionSeparateBuffers) && storage == EvqBuffer) + return indexToBufferBlock; + return indexToUniformBlock; + } + TMapIndexToReflection& GetVariableMapForStorage(TStorageQualifier storage) + { + if ((options & EShReflectionSeparateBuffers) && storage == EvqBuffer) + return indexToBufferVariable; + return indexToUniform; + } + + EShReflectionOptions options; + + EShLanguage firstStage; + EShLanguage lastStage; + + TObjectReflection badReflection; // return for queries of -1 or generally out of range; has expected descriptions with in it for this + TNameToIndex nameToIndex; // maps names to indexes; can hold all types of data: uniform/buffer and which function names have been processed + TNameToIndex pipeInNameToIndex; // maps pipe in names to indexes, this is a fix to seperate pipe I/O from uniforms and buffers. + TNameToIndex pipeOutNameToIndex; // maps pipe out names to indexes, this is a fix to seperate pipe I/O from uniforms and buffers. + TMapIndexToReflection indexToUniform; + TMapIndexToReflection indexToUniformBlock; + TMapIndexToReflection indexToBufferVariable; + TMapIndexToReflection indexToBufferBlock; + TMapIndexToReflection indexToPipeInput; + TMapIndexToReflection indexToPipeOutput; + TIndices atomicCounterUniformIndices; + + unsigned int localSize[3]; +}; + +} // end namespace glslang + +#endif // _REFLECTION_INCLUDED + +#endif // GLSLANG_WEB diff --git a/dep/glslang/glslang/OSDependent/Unix/ossource.cpp b/dep/glslang/glslang/OSDependent/Unix/ossource.cpp new file mode 100644 index 000000000..3f029f023 --- /dev/null +++ b/dep/glslang/glslang/OSDependent/Unix/ossource.cpp @@ -0,0 +1,207 @@ +// +// Copyright (C) 2002-2005 3Dlabs Inc. Ltd. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// +// Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// +// Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// Neither the name of 3Dlabs Inc. Ltd. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// + +// +// This file contains the Linux-specific functions +// +#include "../osinclude.h" +#include "../../../OGLCompilersDLL/InitializeDll.h" + +#include +#include +#include +#include +#include +#include +#include + +#if !defined(__Fuchsia__) +#include +#endif + +namespace glslang { + +// +// Thread cleanup +// + +// +// Wrapper for Linux call to DetachThread. This is required as pthread_cleanup_push() expects +// the cleanup routine to return void. +// +static void DetachThreadLinux(void *) +{ + DetachThread(); +} + +// +// Registers cleanup handler, sets cancel type and state, and executes the thread specific +// cleanup handler. This function will be called in the Standalone.cpp for regression +// testing. When OpenGL applications are run with the driver code, Linux OS does the +// thread cleanup. +// +void OS_CleanupThreadData(void) +{ +#if defined(__ANDROID__) || defined(__Fuchsia__) + DetachThreadLinux(NULL); +#else + int old_cancel_state, old_cancel_type; + void *cleanupArg = NULL; + + // + // Set thread cancel state and push cleanup handler. + // + pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, &old_cancel_state); + pthread_cleanup_push(DetachThreadLinux, (void *) cleanupArg); + + // + // Put the thread in deferred cancellation mode. + // + pthread_setcanceltype(PTHREAD_CANCEL_DEFERRED, &old_cancel_type); + + // + // Pop cleanup handler and execute it prior to unregistering the cleanup handler. + // + pthread_cleanup_pop(1); + + // + // Restore the thread's previous cancellation mode. + // + pthread_setcanceltype(old_cancel_state, NULL); +#endif +} + +// +// Thread Local Storage Operations +// +inline OS_TLSIndex PthreadKeyToTLSIndex(pthread_key_t key) +{ + return (OS_TLSIndex)((uintptr_t)key + 1); +} + +inline pthread_key_t TLSIndexToPthreadKey(OS_TLSIndex nIndex) +{ + return (pthread_key_t)((uintptr_t)nIndex - 1); +} + +OS_TLSIndex OS_AllocTLSIndex() +{ + pthread_key_t pPoolIndex; + + // + // Create global pool key. + // + if ((pthread_key_create(&pPoolIndex, NULL)) != 0) { + assert(0 && "OS_AllocTLSIndex(): Unable to allocate Thread Local Storage"); + return OS_INVALID_TLS_INDEX; + } + else + return PthreadKeyToTLSIndex(pPoolIndex); +} + +bool OS_SetTLSValue(OS_TLSIndex nIndex, void *lpvValue) +{ + if (nIndex == OS_INVALID_TLS_INDEX) { + assert(0 && "OS_SetTLSValue(): Invalid TLS Index"); + return false; + } + + if (pthread_setspecific(TLSIndexToPthreadKey(nIndex), lpvValue) == 0) + return true; + else + return false; +} + +void* OS_GetTLSValue(OS_TLSIndex nIndex) +{ + // + // This function should return 0 if nIndex is invalid. + // + assert(nIndex != OS_INVALID_TLS_INDEX); + return pthread_getspecific(TLSIndexToPthreadKey(nIndex)); +} + +bool OS_FreeTLSIndex(OS_TLSIndex nIndex) +{ + if (nIndex == OS_INVALID_TLS_INDEX) { + assert(0 && "OS_SetTLSValue(): Invalid TLS Index"); + return false; + } + + // + // Delete the global pool key. + // + if (pthread_key_delete(TLSIndexToPthreadKey(nIndex)) == 0) + return true; + else + return false; +} + +namespace { + pthread_mutex_t gMutex; +} + +void InitGlobalLock() +{ + pthread_mutexattr_t mutexattr; + pthread_mutexattr_init(&mutexattr); + pthread_mutexattr_settype(&mutexattr, PTHREAD_MUTEX_RECURSIVE); + pthread_mutex_init(&gMutex, &mutexattr); +} + +void GetGlobalLock() +{ + pthread_mutex_lock(&gMutex); +} + +void ReleaseGlobalLock() +{ + pthread_mutex_unlock(&gMutex); +} + +// #define DUMP_COUNTERS + +void OS_DumpMemoryCounters() +{ +#ifdef DUMP_COUNTERS + struct rusage usage; + + if (getrusage(RUSAGE_SELF, &usage) == 0) + printf("Working set size: %ld\n", usage.ru_maxrss * 1024); +#else + printf("Recompile with DUMP_COUNTERS defined to see counters.\n"); +#endif +} + +} // end namespace glslang diff --git a/dep/glslang/glslang/OSDependent/Windows/ossource.cpp b/dep/glslang/glslang/OSDependent/Windows/ossource.cpp new file mode 100644 index 000000000..870840c56 --- /dev/null +++ b/dep/glslang/glslang/OSDependent/Windows/ossource.cpp @@ -0,0 +1,147 @@ +// +// Copyright (C) 2002-2005 3Dlabs Inc. Ltd. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// +// Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// +// Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// Neither the name of 3Dlabs Inc. Ltd. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// + +#include "../osinclude.h" + +#define STRICT +#define VC_EXTRALEAN 1 +#include +#include +#include +#include +#include +#include + +// +// This file contains the Window-OS-specific functions +// + +#if !(defined(_WIN32) || defined(_WIN64)) +#error Trying to build a windows specific file in a non windows build. +#endif + +namespace glslang { + +inline OS_TLSIndex ToGenericTLSIndex (DWORD handle) +{ + return (OS_TLSIndex)((uintptr_t)handle + 1); +} + +inline DWORD ToNativeTLSIndex (OS_TLSIndex nIndex) +{ + return (DWORD)((uintptr_t)nIndex - 1); +} + +// +// Thread Local Storage Operations +// +OS_TLSIndex OS_AllocTLSIndex() +{ + DWORD dwIndex = TlsAlloc(); + if (dwIndex == TLS_OUT_OF_INDEXES) { + assert(0 && "OS_AllocTLSIndex(): Unable to allocate Thread Local Storage"); + return OS_INVALID_TLS_INDEX; + } + + return ToGenericTLSIndex(dwIndex); +} + +bool OS_SetTLSValue(OS_TLSIndex nIndex, void *lpvValue) +{ + if (nIndex == OS_INVALID_TLS_INDEX) { + assert(0 && "OS_SetTLSValue(): Invalid TLS Index"); + return false; + } + + if (TlsSetValue(ToNativeTLSIndex(nIndex), lpvValue)) + return true; + else + return false; +} + +void* OS_GetTLSValue(OS_TLSIndex nIndex) +{ + assert(nIndex != OS_INVALID_TLS_INDEX); + return TlsGetValue(ToNativeTLSIndex(nIndex)); +} + +bool OS_FreeTLSIndex(OS_TLSIndex nIndex) +{ + if (nIndex == OS_INVALID_TLS_INDEX) { + assert(0 && "OS_SetTLSValue(): Invalid TLS Index"); + return false; + } + + if (TlsFree(ToNativeTLSIndex(nIndex))) + return true; + else + return false; +} + +HANDLE GlobalLock; + +void InitGlobalLock() +{ + GlobalLock = CreateMutex(0, false, 0); +} + +void GetGlobalLock() +{ + WaitForSingleObject(GlobalLock, INFINITE); +} + +void ReleaseGlobalLock() +{ + ReleaseMutex(GlobalLock); +} + +unsigned int __stdcall EnterGenericThread (void* entry) +{ + return ((TThreadEntrypoint)entry)(0); +} + +//#define DUMP_COUNTERS + +void OS_DumpMemoryCounters() +{ +#ifdef DUMP_COUNTERS + PROCESS_MEMORY_COUNTERS counters; + GetProcessMemoryInfo(GetCurrentProcess(), &counters, sizeof(counters)); + printf("Working set size: %d\n", counters.WorkingSetSize); +#else + printf("Recompile with DUMP_COUNTERS defined to see counters.\n"); +#endif +} + +} // namespace glslang diff --git a/dep/glslang/glslang/OSDependent/osinclude.h b/dep/glslang/glslang/OSDependent/osinclude.h new file mode 100644 index 000000000..218abe4f2 --- /dev/null +++ b/dep/glslang/glslang/OSDependent/osinclude.h @@ -0,0 +1,63 @@ +// +// Copyright (C) 2002-2005 3Dlabs Inc. Ltd. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// +// Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// +// Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// Neither the name of 3Dlabs Inc. Ltd. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// + +#ifndef __OSINCLUDE_H +#define __OSINCLUDE_H + +namespace glslang { + +// +// Thread Local Storage Operations +// +typedef void* OS_TLSIndex; +#define OS_INVALID_TLS_INDEX ((void*)0) + +OS_TLSIndex OS_AllocTLSIndex(); +bool OS_SetTLSValue(OS_TLSIndex nIndex, void *lpvValue); +bool OS_FreeTLSIndex(OS_TLSIndex nIndex); +void* OS_GetTLSValue(OS_TLSIndex nIndex); + +void InitGlobalLock(); +void GetGlobalLock(); +void ReleaseGlobalLock(); + +typedef unsigned int (*TThreadEntrypoint)(void*); + +void OS_CleanupThreadData(void); + +void OS_DumpMemoryCounters(); + +} // end namespace glslang + +#endif // __OSINCLUDE_H diff --git a/dep/glslang/glslang/Public/ShaderLang.h b/dep/glslang/glslang/Public/ShaderLang.h new file mode 100644 index 000000000..ee23556d9 --- /dev/null +++ b/dep/glslang/glslang/Public/ShaderLang.h @@ -0,0 +1,928 @@ +// +// Copyright (C) 2002-2005 3Dlabs Inc. Ltd. +// Copyright (C) 2013-2016 LunarG, Inc. +// Copyright (C) 2015-2018 Google, Inc. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// +// Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// +// Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// Neither the name of 3Dlabs Inc. Ltd. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// +#ifndef _COMPILER_INTERFACE_INCLUDED_ +#define _COMPILER_INTERFACE_INCLUDED_ + +#include "../Include/ResourceLimits.h" +#include "../MachineIndependent/Versions.h" + +#include +#include + +#ifdef _WIN32 +#define C_DECL __cdecl +//#ifdef SH_EXPORTING +// #define SH_IMPORT_EXPORT __declspec(dllexport) +//#else +// #define SH_IMPORT_EXPORT __declspec(dllimport) +//#endif +#define SH_IMPORT_EXPORT +#else +#define SH_IMPORT_EXPORT +#define C_DECL +#endif + +// +// This is the platform independent interface between an OGL driver +// and the shading language compiler/linker. +// + +#ifdef __cplusplus + extern "C" { +#endif + +// This should always increase, as some paths to do not consume +// a more major number. +// It should increment by one when new functionality is added. +#define GLSLANG_MINOR_VERSION 14 + +// +// Call before doing any other compiler/linker operations. +// +// (Call once per process, not once per thread.) +// +SH_IMPORT_EXPORT int ShInitialize(); + +// +// Call this at process shutdown to clean up memory. +// +SH_IMPORT_EXPORT int ShFinalize(); + +// +// Types of languages the compiler can consume. +// +typedef enum { + EShLangVertex, + EShLangTessControl, + EShLangTessEvaluation, + EShLangGeometry, + EShLangFragment, + EShLangCompute, + EShLangRayGen, + EShLangRayGenNV = EShLangRayGen, + EShLangIntersect, + EShLangIntersectNV = EShLangIntersect, + EShLangAnyHit, + EShLangAnyHitNV = EShLangAnyHit, + EShLangClosestHit, + EShLangClosestHitNV = EShLangClosestHit, + EShLangMiss, + EShLangMissNV = EShLangMiss, + EShLangCallable, + EShLangCallableNV = EShLangCallable, + EShLangTaskNV, + EShLangMeshNV, + LAST_ELEMENT_MARKER(EShLangCount), +} EShLanguage; // would be better as stage, but this is ancient now + +typedef enum : unsigned { + EShLangVertexMask = (1 << EShLangVertex), + EShLangTessControlMask = (1 << EShLangTessControl), + EShLangTessEvaluationMask = (1 << EShLangTessEvaluation), + EShLangGeometryMask = (1 << EShLangGeometry), + EShLangFragmentMask = (1 << EShLangFragment), + EShLangComputeMask = (1 << EShLangCompute), + EShLangRayGenMask = (1 << EShLangRayGen), + EShLangRayGenNVMask = EShLangRayGenMask, + EShLangIntersectMask = (1 << EShLangIntersect), + EShLangIntersectNVMask = EShLangIntersectMask, + EShLangAnyHitMask = (1 << EShLangAnyHit), + EShLangAnyHitNVMask = EShLangAnyHitMask, + EShLangClosestHitMask = (1 << EShLangClosestHit), + EShLangClosestHitNVMask = EShLangClosestHitMask, + EShLangMissMask = (1 << EShLangMiss), + EShLangMissNVMask = EShLangMissMask, + EShLangCallableMask = (1 << EShLangCallable), + EShLangCallableNVMask = EShLangCallableMask, + EShLangTaskNVMask = (1 << EShLangTaskNV), + EShLangMeshNVMask = (1 << EShLangMeshNV), + LAST_ELEMENT_MARKER(EShLanguageMaskCount), +} EShLanguageMask; + +namespace glslang { + +class TType; + +typedef enum { + EShSourceNone, + EShSourceGlsl, // GLSL, includes ESSL (OpenGL ES GLSL) + EShSourceHlsl, // HLSL + LAST_ELEMENT_MARKER(EShSourceCount), +} EShSource; // if EShLanguage were EShStage, this could be EShLanguage instead + +typedef enum { + EShClientNone, // use when there is no client, e.g. for validation + EShClientVulkan, + EShClientOpenGL, + LAST_ELEMENT_MARKER(EShClientCount), +} EShClient; + +typedef enum { + EShTargetNone, + EShTargetSpv, // SPIR-V (preferred spelling) + EshTargetSpv = EShTargetSpv, // legacy spelling + LAST_ELEMENT_MARKER(EShTargetCount), +} EShTargetLanguage; + +typedef enum { + EShTargetVulkan_1_0 = (1 << 22), // Vulkan 1.0 + EShTargetVulkan_1_1 = (1 << 22) | (1 << 12), // Vulkan 1.1 + EShTargetVulkan_1_2 = (1 << 22) | (2 << 12), // Vulkan 1.2 + EShTargetOpenGL_450 = 450, // OpenGL + LAST_ELEMENT_MARKER(EShTargetClientVersionCount), +} EShTargetClientVersion; + +typedef EShTargetClientVersion EshTargetClientVersion; + +typedef enum { + EShTargetSpv_1_0 = (1 << 16), // SPIR-V 1.0 + EShTargetSpv_1_1 = (1 << 16) | (1 << 8), // SPIR-V 1.1 + EShTargetSpv_1_2 = (1 << 16) | (2 << 8), // SPIR-V 1.2 + EShTargetSpv_1_3 = (1 << 16) | (3 << 8), // SPIR-V 1.3 + EShTargetSpv_1_4 = (1 << 16) | (4 << 8), // SPIR-V 1.4 + EShTargetSpv_1_5 = (1 << 16) | (5 << 8), // SPIR-V 1.5 + LAST_ELEMENT_MARKER(EShTargetLanguageVersionCount), +} EShTargetLanguageVersion; + +struct TInputLanguage { + EShSource languageFamily; // redundant information with other input, this one overrides when not EShSourceNone + EShLanguage stage; // redundant information with other input, this one overrides when not EShSourceNone + EShClient dialect; + int dialectVersion; // version of client's language definition, not the client (when not EShClientNone) +}; + +struct TClient { + EShClient client; + EShTargetClientVersion version; // version of client itself (not the client's input dialect) +}; + +struct TTarget { + EShTargetLanguage language; + EShTargetLanguageVersion version; // version to target, if SPIR-V, defined by "word 1" of the SPIR-V header + bool hlslFunctionality1; // can target hlsl_functionality1 extension(s) +}; + +// All source/client/target versions and settings. +// Can override previous methods of setting, when items are set here. +// Expected to grow, as more are added, rather than growing parameter lists. +struct TEnvironment { + TInputLanguage input; // definition of the input language + TClient client; // what client is the overall compilation being done for? + TTarget target; // what to generate +}; + +const char* StageName(EShLanguage); + +} // end namespace glslang + +// +// Types of output the linker will create. +// +typedef enum { + EShExVertexFragment, + EShExFragment +} EShExecutable; + +// +// Optimization level for the compiler. +// +typedef enum { + EShOptNoGeneration, + EShOptNone, + EShOptSimple, // Optimizations that can be done quickly + EShOptFull, // Optimizations that will take more time + LAST_ELEMENT_MARKER(EshOptLevelCount), +} EShOptimizationLevel; + +// +// Texture and Sampler transformation mode. +// +typedef enum { + EShTexSampTransKeep, // keep textures and samplers as is (default) + EShTexSampTransUpgradeTextureRemoveSampler, // change texture w/o embeded sampler into sampled texture and throw away all samplers + LAST_ELEMENT_MARKER(EShTexSampTransCount), +} EShTextureSamplerTransformMode; + +// +// Message choices for what errors and warnings are given. +// +enum EShMessages : unsigned { + EShMsgDefault = 0, // default is to give all required errors and extra warnings + EShMsgRelaxedErrors = (1 << 0), // be liberal in accepting input + EShMsgSuppressWarnings = (1 << 1), // suppress all warnings, except those required by the specification + EShMsgAST = (1 << 2), // print the AST intermediate representation + EShMsgSpvRules = (1 << 3), // issue messages for SPIR-V generation + EShMsgVulkanRules = (1 << 4), // issue messages for Vulkan-requirements of GLSL for SPIR-V + EShMsgOnlyPreprocessor = (1 << 5), // only print out errors produced by the preprocessor + EShMsgReadHlsl = (1 << 6), // use HLSL parsing rules and semantics + EShMsgCascadingErrors = (1 << 7), // get cascading errors; risks error-recovery issues, instead of an early exit + EShMsgKeepUncalled = (1 << 8), // for testing, don't eliminate uncalled functions + EShMsgHlslOffsets = (1 << 9), // allow block offsets to follow HLSL rules instead of GLSL rules + EShMsgDebugInfo = (1 << 10), // save debug information + EShMsgHlslEnable16BitTypes = (1 << 11), // enable use of 16-bit types in SPIR-V for HLSL + EShMsgHlslLegalization = (1 << 12), // enable HLSL Legalization messages + EShMsgHlslDX9Compatible = (1 << 13), // enable HLSL DX9 compatible mode (right now only for samplers) + EShMsgBuiltinSymbolTable = (1 << 14), // print the builtin symbol table + LAST_ELEMENT_MARKER(EShMsgCount), +}; + +// +// Options for building reflection +// +typedef enum { + EShReflectionDefault = 0, // default is original behaviour before options were added + EShReflectionStrictArraySuffix = (1 << 0), // reflection will follow stricter rules for array-of-structs suffixes + EShReflectionBasicArraySuffix = (1 << 1), // arrays of basic types will be appended with [0] as in GL reflection + EShReflectionIntermediateIO = (1 << 2), // reflect inputs and outputs to program, even with no vertex shader + EShReflectionSeparateBuffers = (1 << 3), // buffer variables and buffer blocks are reflected separately + EShReflectionAllBlockVariables = (1 << 4), // reflect all variables in blocks, even if they are inactive + EShReflectionUnwrapIOBlocks = (1 << 5), // unwrap input/output blocks the same as with uniform blocks + EShReflectionSharedStd140Blocks = (1 << 6), // Apply std140/shared rules for ubo to ssbo + LAST_ELEMENT_MARKER(EShReflectionCount), +} EShReflectionOptions; + +// +// Build a table for bindings. This can be used for locating +// attributes, uniforms, globals, etc., as needed. +// +typedef struct { + const char* name; + int binding; +} ShBinding; + +typedef struct { + int numBindings; + ShBinding* bindings; // array of bindings +} ShBindingTable; + +// +// ShHandle held by but opaque to the driver. It is allocated, +// managed, and de-allocated by the compiler/linker. It's contents +// are defined by and used by the compiler and linker. For example, +// symbol table information and object code passed from the compiler +// to the linker can be stored where ShHandle points. +// +// If handle creation fails, 0 will be returned. +// +typedef void* ShHandle; + +// +// Driver calls these to create and destroy compiler/linker +// objects. +// +SH_IMPORT_EXPORT ShHandle ShConstructCompiler(const EShLanguage, int debugOptions); // one per shader +SH_IMPORT_EXPORT ShHandle ShConstructLinker(const EShExecutable, int debugOptions); // one per shader pair +SH_IMPORT_EXPORT ShHandle ShConstructUniformMap(); // one per uniform namespace (currently entire program object) +SH_IMPORT_EXPORT void ShDestruct(ShHandle); + +// +// The return value of ShCompile is boolean, non-zero indicating +// success. +// +// The info-log should be written by ShCompile into +// ShHandle, so it can answer future queries. +// +SH_IMPORT_EXPORT int ShCompile( + const ShHandle, + const char* const shaderStrings[], + const int numStrings, + const int* lengths, + const EShOptimizationLevel, + const TBuiltInResource *resources, + int debugOptions, + int defaultVersion = 110, // use 100 for ES environment, overridden by #version in shader + bool forwardCompatible = false, // give errors for use of deprecated features + EShMessages messages = EShMsgDefault // warnings and errors + ); + +SH_IMPORT_EXPORT int ShLinkExt( + const ShHandle, // linker object + const ShHandle h[], // compiler objects to link together + const int numHandles); + +// +// ShSetEncrpytionMethod is a place-holder for specifying +// how source code is encrypted. +// +SH_IMPORT_EXPORT void ShSetEncryptionMethod(ShHandle); + +// +// All the following return 0 if the information is not +// available in the object passed down, or the object is bad. +// +SH_IMPORT_EXPORT const char* ShGetInfoLog(const ShHandle); +SH_IMPORT_EXPORT const void* ShGetExecutable(const ShHandle); +SH_IMPORT_EXPORT int ShSetVirtualAttributeBindings(const ShHandle, const ShBindingTable*); // to detect user aliasing +SH_IMPORT_EXPORT int ShSetFixedAttributeBindings(const ShHandle, const ShBindingTable*); // to force any physical mappings +// +// Tell the linker to never assign a vertex attribute to this list of physical attributes +// +SH_IMPORT_EXPORT int ShExcludeAttributes(const ShHandle, int *attributes, int count); + +// +// Returns the location ID of the named uniform. +// Returns -1 if error. +// +SH_IMPORT_EXPORT int ShGetUniformLocation(const ShHandle uniformMap, const char* name); + +#ifdef __cplusplus + } // end extern "C" +#endif + +//////////////////////////////////////////////////////////////////////////////////////////// +// +// Deferred-Lowering C++ Interface +// ----------------------------------- +// +// Below is a new alternate C++ interface, which deprecates the above +// opaque handle-based interface. +// +// The below is further designed to handle multiple compilation units per stage, where +// the intermediate results, including the parse tree, are preserved until link time, +// rather than the above interface which is designed to have each compilation unit +// lowered at compile time. In the above model, linking occurs on the lowered results, +// whereas in this model intra-stage linking can occur at the parse tree +// (treeRoot in TIntermediate) level, and then a full stage can be lowered. +// + +#include +#include +#include + +class TCompiler; +class TInfoSink; + +namespace glslang { + +const char* GetEsslVersionString(); +const char* GetGlslVersionString(); +int GetKhronosToolId(); + +class TIntermediate; +class TProgram; +class TPoolAllocator; + +// Call this exactly once per process before using anything else +bool InitializeProcess(); + +// Call once per process to tear down everything +void FinalizeProcess(); + +// Resource type for IO resolver +enum TResourceType { + EResSampler, + EResTexture, + EResImage, + EResUbo, + EResSsbo, + EResUav, + EResCount +}; + +// Make one TShader per shader that you will link into a program. Then +// - provide the shader through setStrings() or setStringsWithLengths() +// - optionally call setEnv*(), see below for more detail +// - optionally use setPreamble() to set a special shader string that will be +// processed before all others but won't affect the validity of #version +// - optionally call addProcesses() for each setting/transform, +// see comment for class TProcesses +// - call parse(): source language and target environment must be selected +// either by correct setting of EShMessages sent to parse(), or by +// explicitly calling setEnv*() +// - query the info logs +// +// N.B.: Does not yet support having the same TShader instance being linked into +// multiple programs. +// +// N.B.: Destruct a linked program *before* destructing the shaders linked into it. +// +class TShader { +public: + explicit TShader(EShLanguage); + virtual ~TShader(); + void setStrings(const char* const* s, int n); + void setStringsWithLengths(const char* const* s, const int* l, int n); + void setStringsWithLengthsAndNames( + const char* const* s, const int* l, const char* const* names, int n); + void setPreamble(const char* s) { preamble = s; } + void setEntryPoint(const char* entryPoint); + void setSourceEntryPoint(const char* sourceEntryPointName); + void addProcesses(const std::vector&); + + // IO resolver binding data: see comments in ShaderLang.cpp + void setShiftBinding(TResourceType res, unsigned int base); + void setShiftSamplerBinding(unsigned int base); // DEPRECATED: use setShiftBinding + void setShiftTextureBinding(unsigned int base); // DEPRECATED: use setShiftBinding + void setShiftImageBinding(unsigned int base); // DEPRECATED: use setShiftBinding + void setShiftUboBinding(unsigned int base); // DEPRECATED: use setShiftBinding + void setShiftUavBinding(unsigned int base); // DEPRECATED: use setShiftBinding + void setShiftCbufferBinding(unsigned int base); // synonym for setShiftUboBinding + void setShiftSsboBinding(unsigned int base); // DEPRECATED: use setShiftBinding + void setShiftBindingForSet(TResourceType res, unsigned int base, unsigned int set); + void setResourceSetBinding(const std::vector& base); + void setAutoMapBindings(bool map); + void setAutoMapLocations(bool map); + void addUniformLocationOverride(const char* name, int loc); + void setUniformLocationBase(int base); + void setInvertY(bool invert); +#ifdef ENABLE_HLSL + void setHlslIoMapping(bool hlslIoMap); + void setFlattenUniformArrays(bool flatten); +#endif + void setNoStorageFormat(bool useUnknownFormat); + void setNanMinMaxClamp(bool nanMinMaxClamp); + void setTextureSamplerTransformMode(EShTextureSamplerTransformMode mode); + + // For setting up the environment (cleared to nothingness in the constructor). + // These must be called so that parsing is done for the right source language and + // target environment, either indirectly through TranslateEnvironment() based on + // EShMessages et. al., or directly by the user. + // + // setEnvInput: The input source language and stage. If generating code for a + // specific client, the input client semantics to use and the + // version of the that client's input semantics to use, otherwise + // use EShClientNone and version of 0, e.g. for validation mode. + // Note 'version' does not describe the target environment, + // just the version of the source dialect to compile under. + // + // See the definitions of TEnvironment, EShSource, EShLanguage, + // and EShClient for choices and more detail. + // + // setEnvClient: The client that will be hosting the execution, and it's version. + // Note 'version' is not the version of the languages involved, but + // the version of the client environment. + // Use EShClientNone and version of 0 if there is no client, e.g. + // for validation mode. + // + // See EShTargetClientVersion for choices. + // + // setEnvTarget: The language to translate to when generating code, and that + // language's version. + // Use EShTargetNone and version of 0 if there is no client, e.g. + // for validation mode. + // + void setEnvInput(EShSource lang, EShLanguage envStage, EShClient client, int version) + { + environment.input.languageFamily = lang; + environment.input.stage = envStage; + environment.input.dialect = client; + environment.input.dialectVersion = version; + } + void setEnvClient(EShClient client, EShTargetClientVersion version) + { + environment.client.client = client; + environment.client.version = version; + } + void setEnvTarget(EShTargetLanguage lang, EShTargetLanguageVersion version) + { + environment.target.language = lang; + environment.target.version = version; + } + + void getStrings(const char* const* &s, int& n) { s = strings; n = numStrings; } + +#ifdef ENABLE_HLSL + void setEnvTargetHlslFunctionality1() { environment.target.hlslFunctionality1 = true; } + bool getEnvTargetHlslFunctionality1() const { return environment.target.hlslFunctionality1; } +#else + bool getEnvTargetHlslFunctionality1() const { return false; } +#endif + + // Interface to #include handlers. + // + // To support #include, a client of Glslang does the following: + // 1. Call setStringsWithNames to set the source strings and associated + // names. For example, the names could be the names of the files + // containing the shader sources. + // 2. Call parse with an Includer. + // + // When the Glslang parser encounters an #include directive, it calls + // the Includer's include method with the requested include name + // together with the current string name. The returned IncludeResult + // contains the fully resolved name of the included source, together + // with the source text that should replace the #include directive + // in the source stream. After parsing that source, Glslang will + // release the IncludeResult object. + class Includer { + public: + // An IncludeResult contains the resolved name and content of a source + // inclusion. + struct IncludeResult { + IncludeResult(const std::string& headerName, const char* const headerData, const size_t headerLength, void* userData) : + headerName(headerName), headerData(headerData), headerLength(headerLength), userData(userData) { } + // For a successful inclusion, the fully resolved name of the requested + // include. For example, in a file system-based includer, full resolution + // should convert a relative path name into an absolute path name. + // For a failed inclusion, this is an empty string. + const std::string headerName; + // The content and byte length of the requested inclusion. The + // Includer producing this IncludeResult retains ownership of the + // storage. + // For a failed inclusion, the header + // field points to a string containing error details. + const char* const headerData; + const size_t headerLength; + // Include resolver's context. + void* userData; + protected: + IncludeResult& operator=(const IncludeResult&); + IncludeResult(); + }; + + // For both include methods below: + // + // Resolves an inclusion request by name, current source name, + // and include depth. + // On success, returns an IncludeResult containing the resolved name + // and content of the include. + // On failure, returns a nullptr, or an IncludeResult + // with an empty string for the headerName and error details in the + // header field. + // The Includer retains ownership of the contents + // of the returned IncludeResult value, and those contents must + // remain valid until the releaseInclude method is called on that + // IncludeResult object. + // + // Note "local" vs. "system" is not an "either/or": "local" is an + // extra thing to do over "system". Both might get called, as per + // the C++ specification. + + // For the "system" or <>-style includes; search the "system" paths. + virtual IncludeResult* includeSystem(const char* /*headerName*/, + const char* /*includerName*/, + size_t /*inclusionDepth*/) { return nullptr; } + + // For the "local"-only aspect of a "" include. Should not search in the + // "system" paths, because on returning a failure, the parser will + // call includeSystem() to look in the "system" locations. + virtual IncludeResult* includeLocal(const char* /*headerName*/, + const char* /*includerName*/, + size_t /*inclusionDepth*/) { return nullptr; } + + // Signals that the parser will no longer use the contents of the + // specified IncludeResult. + virtual void releaseInclude(IncludeResult*) = 0; + virtual ~Includer() {} + }; + + // Fail all Includer searches + class ForbidIncluder : public Includer { + public: + virtual void releaseInclude(IncludeResult*) override { } + }; + + bool parse(const TBuiltInResource*, int defaultVersion, EProfile defaultProfile, bool forceDefaultVersionAndProfile, + bool forwardCompatible, EShMessages, Includer&); + + bool parse(const TBuiltInResource* res, int defaultVersion, EProfile defaultProfile, bool forceDefaultVersionAndProfile, + bool forwardCompatible, EShMessages messages) + { + TShader::ForbidIncluder includer; + return parse(res, defaultVersion, defaultProfile, forceDefaultVersionAndProfile, forwardCompatible, messages, includer); + } + + // Equivalent to parse() without a default profile and without forcing defaults. + bool parse(const TBuiltInResource* builtInResources, int defaultVersion, bool forwardCompatible, EShMessages messages) + { + return parse(builtInResources, defaultVersion, ENoProfile, false, forwardCompatible, messages); + } + + bool parse(const TBuiltInResource* builtInResources, int defaultVersion, bool forwardCompatible, EShMessages messages, + Includer& includer) + { + return parse(builtInResources, defaultVersion, ENoProfile, false, forwardCompatible, messages, includer); + } + + // NOTE: Doing just preprocessing to obtain a correct preprocessed shader string + // is not an officially supported or fully working path. + bool preprocess(const TBuiltInResource* builtInResources, + int defaultVersion, EProfile defaultProfile, bool forceDefaultVersionAndProfile, + bool forwardCompatible, EShMessages message, std::string* outputString, + Includer& includer); + + const char* getInfoLog(); + const char* getInfoDebugLog(); + EShLanguage getStage() const { return stage; } + TIntermediate* getIntermediate() const { return intermediate; } + +protected: + TPoolAllocator* pool; + EShLanguage stage; + TCompiler* compiler; + TIntermediate* intermediate; + TInfoSink* infoSink; + // strings and lengths follow the standard for glShaderSource: + // strings is an array of numStrings pointers to string data. + // lengths can be null, but if not it is an array of numStrings + // integers containing the length of the associated strings. + // if lengths is null or lengths[n] < 0 the associated strings[n] is + // assumed to be null-terminated. + // stringNames is the optional names for all the strings. If stringNames + // is null, then none of the strings has name. If a certain element in + // stringNames is null, then the corresponding string does not have name. + const char* const* strings; // explicit code to compile, see previous comment + const int* lengths; + const char* const* stringNames; + int numStrings; // size of the above arrays + const char* preamble; // string of implicit code to compile before the explicitly provided code + + // a function in the source string can be renamed FROM this TO the name given in setEntryPoint. + std::string sourceEntryPointName; + + TEnvironment environment; + + friend class TProgram; + +private: + TShader& operator=(TShader&); +}; + +#ifndef GLSLANG_WEB + +// +// A reflection database and its interface, consistent with the OpenGL API reflection queries. +// + +// Data needed for just a single object at the granularity exchanged by the reflection API +class TObjectReflection { +public: + TObjectReflection(const std::string& pName, const TType& pType, int pOffset, int pGLDefineType, int pSize, int pIndex); + + const TType* getType() const { return type; } + int getBinding() const; + void dump() const; + static TObjectReflection badReflection() { return TObjectReflection(); } + + std::string name; + int offset; + int glDefineType; + int size; // data size in bytes for a block, array size for a (non-block) object that's an array + int index; + int counterIndex; + int numMembers; + int arrayStride; // stride of an array variable + int topLevelArrayStride; // stride of the top-level variable in a storage buffer member + EShLanguageMask stages; + +protected: + TObjectReflection() + : offset(-1), glDefineType(-1), size(-1), index(-1), counterIndex(-1), numMembers(-1), arrayStride(0), + topLevelArrayStride(0), stages(EShLanguageMask(0)), type(nullptr) + { + } + + const TType* type; +}; + +class TReflection; +class TIoMapper; +struct TVarEntryInfo; + +// Allows to customize the binding layout after linking. +// All used uniform variables will invoke at least validateBinding. +// If validateBinding returned true then the other resolveBinding, +// resolveSet, and resolveLocation are invoked to resolve the binding +// and descriptor set index respectively. +// +// Invocations happen in a particular order: +// 1) all shader inputs +// 2) all shader outputs +// 3) all uniforms with binding and set already defined +// 4) all uniforms with binding but no set defined +// 5) all uniforms with set but no binding defined +// 6) all uniforms with no binding and no set defined +// +// mapIO will use this resolver in two phases. The first +// phase is a notification phase, calling the corresponging +// notifiy callbacks, this phase ends with a call to endNotifications. +// Phase two starts directly after the call to endNotifications +// and calls all other callbacks to validate and to get the +// bindings, sets, locations, component and color indices. +// +// NOTE: that still limit checks are applied to bindings and sets +// and may result in an error. +class TIoMapResolver +{ +public: + virtual ~TIoMapResolver() {} + + // Should return true if the resulting/current binding would be okay. + // Basic idea is to do aliasing binding checks with this. + virtual bool validateBinding(EShLanguage stage, TVarEntryInfo& ent) = 0; + // Should return a value >= 0 if the current binding should be overridden. + // Return -1 if the current binding (including no binding) should be kept. + virtual int resolveBinding(EShLanguage stage, TVarEntryInfo& ent) = 0; + // Should return a value >= 0 if the current set should be overridden. + // Return -1 if the current set (including no set) should be kept. + virtual int resolveSet(EShLanguage stage, TVarEntryInfo& ent) = 0; + // Should return a value >= 0 if the current location should be overridden. + // Return -1 if the current location (including no location) should be kept. + virtual int resolveUniformLocation(EShLanguage stage, TVarEntryInfo& ent) = 0; + // Should return true if the resulting/current setup would be okay. + // Basic idea is to do aliasing checks and reject invalid semantic names. + virtual bool validateInOut(EShLanguage stage, TVarEntryInfo& ent) = 0; + // Should return a value >= 0 if the current location should be overridden. + // Return -1 if the current location (including no location) should be kept. + virtual int resolveInOutLocation(EShLanguage stage, TVarEntryInfo& ent) = 0; + // Should return a value >= 0 if the current component index should be overridden. + // Return -1 if the current component index (including no index) should be kept. + virtual int resolveInOutComponent(EShLanguage stage, TVarEntryInfo& ent) = 0; + // Should return a value >= 0 if the current color index should be overridden. + // Return -1 if the current color index (including no index) should be kept. + virtual int resolveInOutIndex(EShLanguage stage, TVarEntryInfo& ent) = 0; + // Notification of a uniform variable + virtual void notifyBinding(EShLanguage stage, TVarEntryInfo& ent) = 0; + // Notification of a in or out variable + virtual void notifyInOut(EShLanguage stage, TVarEntryInfo& ent) = 0; + // Called by mapIO when it starts its notify pass for the given stage + virtual void beginNotifications(EShLanguage stage) = 0; + // Called by mapIO when it has finished the notify pass + virtual void endNotifications(EShLanguage stage) = 0; + // Called by mipIO when it starts its resolve pass for the given stage + virtual void beginResolve(EShLanguage stage) = 0; + // Called by mapIO when it has finished the resolve pass + virtual void endResolve(EShLanguage stage) = 0; + // Called by mapIO when it starts its symbol collect for teh given stage + virtual void beginCollect(EShLanguage stage) = 0; + // Called by mapIO when it has finished the symbol collect + virtual void endCollect(EShLanguage stage) = 0; + // Called by TSlotCollector to resolve storage locations or bindings + virtual void reserverStorageSlot(TVarEntryInfo& ent, TInfoSink& infoSink) = 0; + // Called by TSlotCollector to resolve resource locations or bindings + virtual void reserverResourceSlot(TVarEntryInfo& ent, TInfoSink& infoSink) = 0; + // Called by mapIO.addStage to set shader stage mask to mark a stage be added to this pipeline + virtual void addStage(EShLanguage stage) = 0; +}; + +#endif // GLSLANG_WEB + +// Make one TProgram per set of shaders that will get linked together. Add all +// the shaders that are to be linked together. After calling shader.parse() +// for all shaders, call link(). +// +// N.B.: Destruct a linked program *before* destructing the shaders linked into it. +// +class TProgram { +public: + TProgram(); + virtual ~TProgram(); + void addShader(TShader* shader) { stages[shader->stage].push_back(shader); } + std::list& getShaders(EShLanguage stage) { return stages[stage]; } + // Link Validation interface + bool link(EShMessages); + const char* getInfoLog(); + const char* getInfoDebugLog(); + + TIntermediate* getIntermediate(EShLanguage stage) const { return intermediate[stage]; } + +#ifndef GLSLANG_WEB + + // Reflection Interface + + // call first, to do liveness analysis, index mapping, etc.; returns false on failure + bool buildReflection(int opts = EShReflectionDefault); + unsigned getLocalSize(int dim) const; // return dim'th local size + int getReflectionIndex(const char *name) const; + int getReflectionPipeIOIndex(const char* name, const bool inOrOut) const; + int getNumUniformVariables() const; + const TObjectReflection& getUniform(int index) const; + int getNumUniformBlocks() const; + const TObjectReflection& getUniformBlock(int index) const; + int getNumPipeInputs() const; + const TObjectReflection& getPipeInput(int index) const; + int getNumPipeOutputs() const; + const TObjectReflection& getPipeOutput(int index) const; + int getNumBufferVariables() const; + const TObjectReflection& getBufferVariable(int index) const; + int getNumBufferBlocks() const; + const TObjectReflection& getBufferBlock(int index) const; + int getNumAtomicCounters() const; + const TObjectReflection& getAtomicCounter(int index) const; + + // Legacy Reflection Interface - expressed in terms of above interface + + // can be used for glGetProgramiv(GL_ACTIVE_UNIFORMS) + int getNumLiveUniformVariables() const { return getNumUniformVariables(); } + + // can be used for glGetProgramiv(GL_ACTIVE_UNIFORM_BLOCKS) + int getNumLiveUniformBlocks() const { return getNumUniformBlocks(); } + + // can be used for glGetProgramiv(GL_ACTIVE_ATTRIBUTES) + int getNumLiveAttributes() const { return getNumPipeInputs(); } + + // can be used for glGetUniformIndices() + int getUniformIndex(const char *name) const { return getReflectionIndex(name); } + + int getPipeIOIndex(const char *name, const bool inOrOut) const + { return getReflectionPipeIOIndex(name, inOrOut); } + + // can be used for "name" part of glGetActiveUniform() + const char *getUniformName(int index) const { return getUniform(index).name.c_str(); } + + // returns the binding number + int getUniformBinding(int index) const { return getUniform(index).getBinding(); } + + // returns Shaders Stages where a Uniform is present + EShLanguageMask getUniformStages(int index) const { return getUniform(index).stages; } + + // can be used for glGetActiveUniformsiv(GL_UNIFORM_BLOCK_INDEX) + int getUniformBlockIndex(int index) const { return getUniform(index).index; } + + // can be used for glGetActiveUniformsiv(GL_UNIFORM_TYPE) + int getUniformType(int index) const { return getUniform(index).glDefineType; } + + // can be used for glGetActiveUniformsiv(GL_UNIFORM_OFFSET) + int getUniformBufferOffset(int index) const { return getUniform(index).offset; } + + // can be used for glGetActiveUniformsiv(GL_UNIFORM_SIZE) + int getUniformArraySize(int index) const { return getUniform(index).size; } + + // returns a TType* + const TType *getUniformTType(int index) const { return getUniform(index).getType(); } + + // can be used for glGetActiveUniformBlockName() + const char *getUniformBlockName(int index) const { return getUniformBlock(index).name.c_str(); } + + // can be used for glGetActiveUniformBlockiv(UNIFORM_BLOCK_DATA_SIZE) + int getUniformBlockSize(int index) const { return getUniformBlock(index).size; } + + // returns the block binding number + int getUniformBlockBinding(int index) const { return getUniformBlock(index).getBinding(); } + + // returns block index of associated counter. + int getUniformBlockCounterIndex(int index) const { return getUniformBlock(index).counterIndex; } + + // returns a TType* + const TType *getUniformBlockTType(int index) const { return getUniformBlock(index).getType(); } + + // can be used for glGetActiveAttrib() + const char *getAttributeName(int index) const { return getPipeInput(index).name.c_str(); } + + // can be used for glGetActiveAttrib() + int getAttributeType(int index) const { return getPipeInput(index).glDefineType; } + + // returns a TType* + const TType *getAttributeTType(int index) const { return getPipeInput(index).getType(); } + + void dumpReflection(); + // I/O mapping: apply base offsets and map live unbound variables + // If resolver is not provided it uses the previous approach + // and respects auto assignment and offsets. + bool mapIO(TIoMapResolver* pResolver = nullptr, TIoMapper* pIoMapper = nullptr); +#endif + +protected: + bool linkStage(EShLanguage, EShMessages); + + TPoolAllocator* pool; + std::list stages[EShLangCount]; + TIntermediate* intermediate[EShLangCount]; + bool newedIntermediate[EShLangCount]; // track which intermediate were "new" versus reusing a singleton unit in a stage + TInfoSink* infoSink; +#ifndef GLSLANG_WEB + TReflection* reflection; +#endif + bool linked; + +private: + TProgram(TProgram&); + TProgram& operator=(TProgram&); +}; + +} // end namespace glslang + +#endif // _COMPILER_INTERFACE_INCLUDED_ diff --git a/dep/imgui/CMakeLists.txt b/dep/imgui/CMakeLists.txt index 3ec44dd84..8f5b999f2 100644 --- a/dep/imgui/CMakeLists.txt +++ b/dep/imgui/CMakeLists.txt @@ -24,6 +24,12 @@ target_sources(imgui PRIVATE ) target_link_libraries(imgui PRIVATE glad) +target_sources(imgui PRIVATE + include/imgui_impl_vulkan.h + src/imgui_impl_vulkan.cpp +) +target_link_libraries(imgui PRIVATE vulkan-loader) + if(WIN32) target_sources(imgui PRIVATE include/imgui_impl_dx11.h src/imgui_impl_dx11.cpp) endif() diff --git a/dep/imgui/imgui.vcxproj b/dep/imgui/imgui.vcxproj index 615f9e3f4..3996cab87 100644 --- a/dep/imgui/imgui.vcxproj +++ b/dep/imgui/imgui.vcxproj @@ -39,6 +39,7 @@ + @@ -50,9 +51,18 @@ + + + + {43540154-9e1e-409c-834f-b84be5621388} + + + {9c8ddeb0-2b8f-4f5f-ba86-127cdf27f035} + + {BB08260F-6FBC-46AF-8924-090EE71360C6} Win32Proj @@ -201,7 +211,7 @@ Disabled imgui_STATIC;WIN32;_DEBUG;_LIB;%(PreprocessorDefinitions) ProgramDatabase - $(SolutionDir)dep\glad\include;$(ProjectDir)include;$(ProjectDir)src;%(AdditionalIncludeDirectories) + $(SolutionDir)dep\glad\include;$(SolutionDir)dep\vulkan-loader\include;$(ProjectDir)include;$(ProjectDir)src;%(AdditionalIncludeDirectories) true stdcpp17 true @@ -220,7 +230,7 @@ Disabled imgui_STATIC;WIN32;_DEBUG;_LIB;%(PreprocessorDefinitions) ProgramDatabase - $(SolutionDir)dep\glad\include;$(ProjectDir)include;$(ProjectDir)src;%(AdditionalIncludeDirectories) + $(SolutionDir)dep\glad\include;$(SolutionDir)dep\vulkan-loader\include;$(ProjectDir)include;$(ProjectDir)src;%(AdditionalIncludeDirectories) true stdcpp17 true @@ -239,7 +249,7 @@ Disabled imgui_STATIC;WIN32;_ITERATOR_DEBUG_LEVEL=1;_DEBUG;_LIB;%(PreprocessorDefinitions) ProgramDatabase - $(SolutionDir)dep\glad\include;$(ProjectDir)include;$(ProjectDir)src;%(AdditionalIncludeDirectories) + $(SolutionDir)dep\glad\include;$(SolutionDir)dep\vulkan-loader\include;$(ProjectDir)include;$(ProjectDir)src;%(AdditionalIncludeDirectories) true stdcpp17 false @@ -260,7 +270,7 @@ Disabled imgui_STATIC;WIN32;_ITERATOR_DEBUG_LEVEL=1;_DEBUG;_LIB;%(PreprocessorDefinitions) ProgramDatabase - $(SolutionDir)dep\glad\include;$(ProjectDir)include;$(ProjectDir)src;%(AdditionalIncludeDirectories) + $(SolutionDir)dep\glad\include;$(SolutionDir)dep\vulkan-loader\include;$(ProjectDir)include;$(ProjectDir)src;%(AdditionalIncludeDirectories) true stdcpp17 false @@ -281,7 +291,7 @@ MaxSpeed true imgui_STATIC;WIN32;NDEBUG;_LIB;%(PreprocessorDefinitions) - $(SolutionDir)dep\glad\include;$(ProjectDir)include;$(ProjectDir)src;%(AdditionalIncludeDirectories) + $(SolutionDir)dep\glad\include;$(SolutionDir)dep\vulkan-loader\include;$(ProjectDir)include;$(ProjectDir)src;%(AdditionalIncludeDirectories) true /Zo %(AdditionalOptions) false @@ -304,7 +314,7 @@ MaxSpeed true imgui_STATIC;WIN32;NDEBUG;_LIB;%(PreprocessorDefinitions) - $(SolutionDir)dep\glad\include;$(ProjectDir)include;$(ProjectDir)src;%(AdditionalIncludeDirectories) + $(SolutionDir)dep\glad\include;$(SolutionDir)dep\vulkan-loader\include;$(ProjectDir)include;$(ProjectDir)src;%(AdditionalIncludeDirectories) true /Zo %(AdditionalOptions) true @@ -328,7 +338,7 @@ MaxSpeed true imgui_STATIC;WIN32;NDEBUG;_LIB;%(PreprocessorDefinitions) - $(SolutionDir)dep\glad\include;$(ProjectDir)include;$(ProjectDir)src;%(AdditionalIncludeDirectories) + $(SolutionDir)dep\glad\include;$(SolutionDir)dep\vulkan-loader\include;$(ProjectDir)include;$(ProjectDir)src;%(AdditionalIncludeDirectories) true /Zo %(AdditionalOptions) false @@ -351,7 +361,7 @@ MaxSpeed true imgui_STATIC;WIN32;NDEBUG;_LIB;%(PreprocessorDefinitions) - $(SolutionDir)dep\glad\include;$(ProjectDir)include;$(ProjectDir)src;%(AdditionalIncludeDirectories) + $(SolutionDir)dep\glad\include;$(SolutionDir)dep\vulkan-loader\include;$(ProjectDir)include;$(ProjectDir)src;%(AdditionalIncludeDirectories) true /Zo %(AdditionalOptions) true diff --git a/dep/imgui/imgui.vcxproj.filters b/dep/imgui/imgui.vcxproj.filters index 3817cc58b..1d6e671ff 100644 --- a/dep/imgui/imgui.vcxproj.filters +++ b/dep/imgui/imgui.vcxproj.filters @@ -10,6 +10,7 @@ + @@ -18,5 +19,6 @@ + \ No newline at end of file diff --git a/dep/imgui/include/imgui_impl_vulkan.h b/dep/imgui/include/imgui_impl_vulkan.h new file mode 100644 index 000000000..8ad8180eb --- /dev/null +++ b/dep/imgui/include/imgui_impl_vulkan.h @@ -0,0 +1,126 @@ +// dear imgui: Renderer for Vulkan +// This needs to be used along with a Platform Binding (e.g. GLFW, SDL, Win32, custom..) + +// Implemented features: +// [X] Renderer: Support for large meshes (64k+ vertices) with 16-bits indices. +// Missing features: +// [ ] Renderer: User texture binding. Changes of ImTextureID aren't supported by this binding! See https://github.com/ocornut/imgui/pull/914 + +// You can copy and use unmodified imgui_impl_* files in your project. See main.cpp for an example of using this. +// If you are new to dear imgui, read examples/README.txt and read the documentation at the top of imgui.cpp. +// https://github.com/ocornut/imgui + +// The aim of imgui_impl_vulkan.h/.cpp is to be usable in your engine without any modification. +// IF YOU FEEL YOU NEED TO MAKE ANY CHANGE TO THIS CODE, please share them and your feedback at https://github.com/ocornut/imgui/ + +// Important note to the reader who wish to integrate imgui_impl_vulkan.cpp/.h in their own engine/app. +// - Common ImGui_ImplVulkan_XXX functions and structures are used to interface with imgui_impl_vulkan.cpp/.h. +// You will use those if you want to use this rendering back-end in your engine/app. +// - Helper ImGui_ImplVulkanH_XXX functions and structures are only used by this example (main.cpp) and by +// the back-end itself (imgui_impl_vulkan.cpp), but should PROBABLY NOT be used by your own engine/app code. +// Read comments in imgui_impl_vulkan.h. + +#pragma once + +#include "vulkan_loader.h" + +// Initialization data, for ImGui_ImplVulkan_Init() +// [Please zero-clear before use!] +struct ImGui_ImplVulkan_InitInfo +{ + VkInstance Instance; + VkPhysicalDevice PhysicalDevice; + VkDevice Device; + uint32_t QueueFamily; + VkQueue Queue; + VkPipelineCache PipelineCache; + VkDescriptorPool DescriptorPool; + uint32_t MinImageCount; // >= 2 + uint32_t ImageCount; // >= MinImageCount + VkSampleCountFlagBits MSAASamples; // >= VK_SAMPLE_COUNT_1_BIT + const VkAllocationCallbacks* Allocator; + void (*CheckVkResultFn)(VkResult err); +}; + +// Called by user code +IMGUI_IMPL_API bool ImGui_ImplVulkan_Init(ImGui_ImplVulkan_InitInfo* info, VkRenderPass render_pass); +IMGUI_IMPL_API void ImGui_ImplVulkan_Shutdown(); +IMGUI_IMPL_API void ImGui_ImplVulkan_NewFrame(); +IMGUI_IMPL_API void ImGui_ImplVulkan_RenderDrawData(ImDrawData* draw_data, VkCommandBuffer command_buffer); +IMGUI_IMPL_API bool ImGui_ImplVulkan_CreateFontsTexture(VkCommandBuffer command_buffer); +IMGUI_IMPL_API void ImGui_ImplVulkan_DestroyFontUploadObjects(); +IMGUI_IMPL_API void ImGui_ImplVulkan_SetMinImageCount(uint32_t min_image_count); // To override MinImageCount after initialization (e.g. if swap chain is recreated) + + +//------------------------------------------------------------------------- +// Internal / Miscellaneous Vulkan Helpers +// (Used by example's main.cpp. Used by multi-viewport features. PROBABLY NOT used by your own engine/app.) +//------------------------------------------------------------------------- +// You probably do NOT need to use or care about those functions. +// Those functions only exist because: +// 1) they facilitate the readability and maintenance of the multiple main.cpp examples files. +// 2) the upcoming multi-viewport feature will need them internally. +// Generally we avoid exposing any kind of superfluous high-level helpers in the bindings, +// but it is too much code to duplicate everywhere so we exceptionally expose them. +// +// Your engine/app will likely _already_ have code to setup all that stuff (swap chain, render pass, frame buffers, etc.). +// You may read this code to learn about Vulkan, but it is recommended you use you own custom tailored code to do equivalent work. +// (The ImGui_ImplVulkanH_XXX functions do not interact with any of the state used by the regular ImGui_ImplVulkan_XXX functions) +//------------------------------------------------------------------------- + +struct ImGui_ImplVulkanH_Frame; +struct ImGui_ImplVulkanH_Window; + +// Helpers +IMGUI_IMPL_API void ImGui_ImplVulkanH_CreateWindow(VkInstance instance, VkPhysicalDevice physical_device, VkDevice device, ImGui_ImplVulkanH_Window* wnd, uint32_t queue_family, const VkAllocationCallbacks* allocator, int w, int h, uint32_t min_image_count); +IMGUI_IMPL_API void ImGui_ImplVulkanH_DestroyWindow(VkInstance instance, VkDevice device, ImGui_ImplVulkanH_Window* wnd, const VkAllocationCallbacks* allocator); +IMGUI_IMPL_API VkSurfaceFormatKHR ImGui_ImplVulkanH_SelectSurfaceFormat(VkPhysicalDevice physical_device, VkSurfaceKHR surface, const VkFormat* request_formats, int request_formats_count, VkColorSpaceKHR request_color_space); +IMGUI_IMPL_API VkPresentModeKHR ImGui_ImplVulkanH_SelectPresentMode(VkPhysicalDevice physical_device, VkSurfaceKHR surface, const VkPresentModeKHR* request_modes, int request_modes_count); +IMGUI_IMPL_API int ImGui_ImplVulkanH_GetMinImageCountFromPresentMode(VkPresentModeKHR present_mode); + +// Helper structure to hold the data needed by one rendering frame +// (Used by example's main.cpp. Used by multi-viewport features. Probably NOT used by your own engine/app.) +// [Please zero-clear before use!] +struct ImGui_ImplVulkanH_Frame +{ + VkCommandPool CommandPool; + VkCommandBuffer CommandBuffer; + VkFence Fence; + VkImage Backbuffer; + VkImageView BackbufferView; + VkFramebuffer Framebuffer; +}; + +struct ImGui_ImplVulkanH_FrameSemaphores +{ + VkSemaphore ImageAcquiredSemaphore; + VkSemaphore RenderCompleteSemaphore; +}; + +// Helper structure to hold the data needed by one rendering context into one OS window +// (Used by example's main.cpp. Used by multi-viewport features. Probably NOT used by your own engine/app.) +struct ImGui_ImplVulkanH_Window +{ + int Width; + int Height; + VkSwapchainKHR Swapchain; + VkSurfaceKHR Surface; + VkSurfaceFormatKHR SurfaceFormat; + VkPresentModeKHR PresentMode; + VkRenderPass RenderPass; + bool ClearEnable; + VkClearValue ClearValue; + uint32_t FrameIndex; // Current frame being rendered to (0 <= FrameIndex < FrameInFlightCount) + uint32_t ImageCount; // Number of simultaneous in-flight frames (returned by vkGetSwapchainImagesKHR, usually derived from min_image_count) + uint32_t SemaphoreIndex; // Current set of swapchain wait semaphores we're using (needs to be distinct from per frame data) + ImGui_ImplVulkanH_Frame* Frames; + ImGui_ImplVulkanH_FrameSemaphores* FrameSemaphores; + + ImGui_ImplVulkanH_Window() + { + memset(this, 0, sizeof(*this)); + PresentMode = VK_PRESENT_MODE_MAX_ENUM_KHR; + ClearEnable = true; + } +}; + diff --git a/dep/imgui/src/imgui_impl_vulkan.cpp b/dep/imgui/src/imgui_impl_vulkan.cpp new file mode 100644 index 000000000..3fb10d235 --- /dev/null +++ b/dep/imgui/src/imgui_impl_vulkan.cpp @@ -0,0 +1,1221 @@ +// dear imgui: Renderer for Vulkan +// This needs to be used along with a Platform Binding (e.g. GLFW, SDL, Win32, custom..) + +// Implemented features: +// [X] Renderer: Support for large meshes (64k+ vertices) with 16-bits indices. +// Missing features: +// [ ] Renderer: User texture binding. Changes of ImTextureID aren't supported by this binding! See https://github.com/ocornut/imgui/pull/914 + +// You can copy and use unmodified imgui_impl_* files in your project. See main.cpp for an example of using this. +// If you are new to dear imgui, read examples/README.txt and read the documentation at the top of imgui.cpp. +// https://github.com/ocornut/imgui + +// The aim of imgui_impl_vulkan.h/.cpp is to be usable in your engine without any modification. +// IF YOU FEEL YOU NEED TO MAKE ANY CHANGE TO THIS CODE, please share them and your feedback at https://github.com/ocornut/imgui/ + +// Important note to the reader who wish to integrate imgui_impl_vulkan.cpp/.h in their own engine/app. +// - Common ImGui_ImplVulkan_XXX functions and structures are used to interface with imgui_impl_vulkan.cpp/.h. +// You will use those if you want to use this rendering back-end in your engine/app. +// - Helper ImGui_ImplVulkanH_XXX functions and structures are only used by this example (main.cpp) and by +// the back-end itself (imgui_impl_vulkan.cpp), but should PROBABLY NOT be used by your own engine/app code. +// Read comments in imgui_impl_vulkan.h. + +// CHANGELOG +// (minor and older changes stripped away, please see git history for details) +// 2019-08-01: Vulkan: Added support for specifying multisample count. Set ImGui_ImplVulkan_InitInfo::MSAASamples to one of the VkSampleCountFlagBits values to use, default is non-multisampled as before. +// 2019-05-29: Vulkan: Added support for large mesh (64K+ vertices), enable ImGuiBackendFlags_RendererHasVtxOffset flag. +// 2019-04-30: Vulkan: Added support for special ImDrawCallback_ResetRenderState callback to reset render state. +// 2019-04-04: *BREAKING CHANGE*: Vulkan: Added ImageCount/MinImageCount fields in ImGui_ImplVulkan_InitInfo, required for initialization (was previously a hard #define IMGUI_VK_QUEUED_FRAMES 2). Added ImGui_ImplVulkan_SetMinImageCount(). +// 2019-04-04: Vulkan: Added VkInstance argument to ImGui_ImplVulkanH_CreateWindow() optional helper. +// 2019-04-04: Vulkan: Avoid passing negative coordinates to vkCmdSetScissor, which debug validation layers do not like. +// 2019-04-01: Vulkan: Support for 32-bit index buffer (#define ImDrawIdx unsigned int). +// 2019-02-16: Vulkan: Viewport and clipping rectangles correctly using draw_data->FramebufferScale to allow retina display. +// 2018-11-30: Misc: Setting up io.BackendRendererName so it can be displayed in the About Window. +// 2018-08-25: Vulkan: Fixed mishandled VkSurfaceCapabilitiesKHR::maxImageCount=0 case. +// 2018-06-22: Inverted the parameters to ImGui_ImplVulkan_RenderDrawData() to be consistent with other bindings. +// 2018-06-08: Misc: Extracted imgui_impl_vulkan.cpp/.h away from the old combined GLFW+Vulkan example. +// 2018-06-08: Vulkan: Use draw_data->DisplayPos and draw_data->DisplaySize to setup projection matrix and clipping rectangle. +// 2018-03-03: Vulkan: Various refactor, created a couple of ImGui_ImplVulkanH_XXX helper that the example can use and that viewport support will use. +// 2018-03-01: Vulkan: Renamed ImGui_ImplVulkan_Init_Info to ImGui_ImplVulkan_InitInfo and fields to match more closely Vulkan terminology. +// 2018-02-16: Misc: Obsoleted the io.RenderDrawListsFn callback, ImGui_ImplVulkan_Render() calls ImGui_ImplVulkan_RenderDrawData() itself. +// 2018-02-06: Misc: Removed call to ImGui::Shutdown() which is not available from 1.60 WIP, user needs to call CreateContext/DestroyContext themselves. +// 2017-05-15: Vulkan: Fix scissor offset being negative. Fix new Vulkan validation warnings. Set required depth member for buffer image copy. +// 2016-11-13: Vulkan: Fix validation layer warnings and errors and redeclare gl_PerVertex. +// 2016-10-18: Vulkan: Add location decorators & change to use structs as in/out in glsl, update embedded spv (produced with glslangValidator -x). Null the released resources. +// 2016-08-27: Vulkan: Fix Vulkan example for use when a depth buffer is active. + +#include "imgui.h" +#include "imgui_impl_vulkan.h" +#include + +// Reusable buffers used for rendering 1 current in-flight frame, for ImGui_ImplVulkan_RenderDrawData() +// [Please zero-clear before use!] +struct ImGui_ImplVulkanH_FrameRenderBuffers +{ + VkDeviceMemory VertexBufferMemory; + VkDeviceMemory IndexBufferMemory; + VkDeviceSize VertexBufferSize; + VkDeviceSize IndexBufferSize; + VkBuffer VertexBuffer; + VkBuffer IndexBuffer; +}; + +// Each viewport will hold 1 ImGui_ImplVulkanH_WindowRenderBuffers +// [Please zero-clear before use!] +struct ImGui_ImplVulkanH_WindowRenderBuffers +{ + uint32_t Index; + uint32_t Count; + ImGui_ImplVulkanH_FrameRenderBuffers* FrameRenderBuffers; +}; + +// Vulkan data +static ImGui_ImplVulkan_InitInfo g_VulkanInitInfo = {}; +static VkRenderPass g_RenderPass = VK_NULL_HANDLE; +static VkDeviceSize g_BufferMemoryAlignment = 256; +static VkPipelineCreateFlags g_PipelineCreateFlags = 0x00; +static VkDescriptorSetLayout g_DescriptorSetLayout = VK_NULL_HANDLE; +static VkPipelineLayout g_PipelineLayout = VK_NULL_HANDLE; +static VkDescriptorSet g_DescriptorSet = VK_NULL_HANDLE; +static VkPipeline g_Pipeline = VK_NULL_HANDLE; + +// Font data +static VkSampler g_FontSampler = VK_NULL_HANDLE; +static VkDeviceMemory g_FontMemory = VK_NULL_HANDLE; +static VkImage g_FontImage = VK_NULL_HANDLE; +static VkImageView g_FontView = VK_NULL_HANDLE; +static VkDeviceMemory g_UploadBufferMemory = VK_NULL_HANDLE; +static VkBuffer g_UploadBuffer = VK_NULL_HANDLE; + +// Render buffers +static ImGui_ImplVulkanH_WindowRenderBuffers g_MainWindowRenderBuffers; + +// Forward Declarations +bool ImGui_ImplVulkan_CreateDeviceObjects(); +void ImGui_ImplVulkan_DestroyDeviceObjects(); +void ImGui_ImplVulkanH_DestroyFrame(VkDevice device, ImGui_ImplVulkanH_Frame* fd, const VkAllocationCallbacks* allocator); +void ImGui_ImplVulkanH_DestroyFrameSemaphores(VkDevice device, ImGui_ImplVulkanH_FrameSemaphores* fsd, const VkAllocationCallbacks* allocator); +void ImGui_ImplVulkanH_DestroyFrameRenderBuffers(VkDevice device, ImGui_ImplVulkanH_FrameRenderBuffers* buffers, const VkAllocationCallbacks* allocator); +void ImGui_ImplVulkanH_DestroyWindowRenderBuffers(VkDevice device, ImGui_ImplVulkanH_WindowRenderBuffers* buffers, const VkAllocationCallbacks* allocator); +void ImGui_ImplVulkanH_CreateWindowSwapChain(VkPhysicalDevice physical_device, VkDevice device, ImGui_ImplVulkanH_Window* wd, const VkAllocationCallbacks* allocator, int w, int h, uint32_t min_image_count); +void ImGui_ImplVulkanH_CreateWindowCommandBuffers(VkPhysicalDevice physical_device, VkDevice device, ImGui_ImplVulkanH_Window* wd, uint32_t queue_family, const VkAllocationCallbacks* allocator); + +//----------------------------------------------------------------------------- +// SHADERS +//----------------------------------------------------------------------------- + +// glsl_shader.vert, compiled with: +// # glslangValidator -V -x -o glsl_shader.vert.u32 glsl_shader.vert +/* +#version 450 core +layout(location = 0) in vec2 aPos; +layout(location = 1) in vec2 aUV; +layout(location = 2) in vec4 aColor; +layout(push_constant) uniform uPushConstant { vec2 uScale; vec2 uTranslate; } pc; + +out gl_PerVertex { vec4 gl_Position; }; +layout(location = 0) out struct { vec4 Color; vec2 UV; } Out; + +void main() +{ + Out.Color = aColor; + Out.UV = aUV; + gl_Position = vec4(aPos * pc.uScale + pc.uTranslate, 0, 1); +} +*/ +static uint32_t __glsl_shader_vert_spv[] = +{ + 0x07230203,0x00010000,0x00080001,0x0000002e,0x00000000,0x00020011,0x00000001,0x0006000b, + 0x00000001,0x4c534c47,0x6474732e,0x3035342e,0x00000000,0x0003000e,0x00000000,0x00000001, + 0x000a000f,0x00000000,0x00000004,0x6e69616d,0x00000000,0x0000000b,0x0000000f,0x00000015, + 0x0000001b,0x0000001c,0x00030003,0x00000002,0x000001c2,0x00040005,0x00000004,0x6e69616d, + 0x00000000,0x00030005,0x00000009,0x00000000,0x00050006,0x00000009,0x00000000,0x6f6c6f43, + 0x00000072,0x00040006,0x00000009,0x00000001,0x00005655,0x00030005,0x0000000b,0x0074754f, + 0x00040005,0x0000000f,0x6c6f4361,0x0000726f,0x00030005,0x00000015,0x00565561,0x00060005, + 0x00000019,0x505f6c67,0x65567265,0x78657472,0x00000000,0x00060006,0x00000019,0x00000000, + 0x505f6c67,0x7469736f,0x006e6f69,0x00030005,0x0000001b,0x00000000,0x00040005,0x0000001c, + 0x736f5061,0x00000000,0x00060005,0x0000001e,0x73755075,0x6e6f4368,0x6e617473,0x00000074, + 0x00050006,0x0000001e,0x00000000,0x61635375,0x0000656c,0x00060006,0x0000001e,0x00000001, + 0x61725475,0x616c736e,0x00006574,0x00030005,0x00000020,0x00006370,0x00040047,0x0000000b, + 0x0000001e,0x00000000,0x00040047,0x0000000f,0x0000001e,0x00000002,0x00040047,0x00000015, + 0x0000001e,0x00000001,0x00050048,0x00000019,0x00000000,0x0000000b,0x00000000,0x00030047, + 0x00000019,0x00000002,0x00040047,0x0000001c,0x0000001e,0x00000000,0x00050048,0x0000001e, + 0x00000000,0x00000023,0x00000000,0x00050048,0x0000001e,0x00000001,0x00000023,0x00000008, + 0x00030047,0x0000001e,0x00000002,0x00020013,0x00000002,0x00030021,0x00000003,0x00000002, + 0x00030016,0x00000006,0x00000020,0x00040017,0x00000007,0x00000006,0x00000004,0x00040017, + 0x00000008,0x00000006,0x00000002,0x0004001e,0x00000009,0x00000007,0x00000008,0x00040020, + 0x0000000a,0x00000003,0x00000009,0x0004003b,0x0000000a,0x0000000b,0x00000003,0x00040015, + 0x0000000c,0x00000020,0x00000001,0x0004002b,0x0000000c,0x0000000d,0x00000000,0x00040020, + 0x0000000e,0x00000001,0x00000007,0x0004003b,0x0000000e,0x0000000f,0x00000001,0x00040020, + 0x00000011,0x00000003,0x00000007,0x0004002b,0x0000000c,0x00000013,0x00000001,0x00040020, + 0x00000014,0x00000001,0x00000008,0x0004003b,0x00000014,0x00000015,0x00000001,0x00040020, + 0x00000017,0x00000003,0x00000008,0x0003001e,0x00000019,0x00000007,0x00040020,0x0000001a, + 0x00000003,0x00000019,0x0004003b,0x0000001a,0x0000001b,0x00000003,0x0004003b,0x00000014, + 0x0000001c,0x00000001,0x0004001e,0x0000001e,0x00000008,0x00000008,0x00040020,0x0000001f, + 0x00000009,0x0000001e,0x0004003b,0x0000001f,0x00000020,0x00000009,0x00040020,0x00000021, + 0x00000009,0x00000008,0x0004002b,0x00000006,0x00000028,0x00000000,0x0004002b,0x00000006, + 0x00000029,0x3f800000,0x00050036,0x00000002,0x00000004,0x00000000,0x00000003,0x000200f8, + 0x00000005,0x0004003d,0x00000007,0x00000010,0x0000000f,0x00050041,0x00000011,0x00000012, + 0x0000000b,0x0000000d,0x0003003e,0x00000012,0x00000010,0x0004003d,0x00000008,0x00000016, + 0x00000015,0x00050041,0x00000017,0x00000018,0x0000000b,0x00000013,0x0003003e,0x00000018, + 0x00000016,0x0004003d,0x00000008,0x0000001d,0x0000001c,0x00050041,0x00000021,0x00000022, + 0x00000020,0x0000000d,0x0004003d,0x00000008,0x00000023,0x00000022,0x00050085,0x00000008, + 0x00000024,0x0000001d,0x00000023,0x00050041,0x00000021,0x00000025,0x00000020,0x00000013, + 0x0004003d,0x00000008,0x00000026,0x00000025,0x00050081,0x00000008,0x00000027,0x00000024, + 0x00000026,0x00050051,0x00000006,0x0000002a,0x00000027,0x00000000,0x00050051,0x00000006, + 0x0000002b,0x00000027,0x00000001,0x00070050,0x00000007,0x0000002c,0x0000002a,0x0000002b, + 0x00000028,0x00000029,0x00050041,0x00000011,0x0000002d,0x0000001b,0x0000000d,0x0003003e, + 0x0000002d,0x0000002c,0x000100fd,0x00010038 +}; + +// glsl_shader.frag, compiled with: +// # glslangValidator -V -x -o glsl_shader.frag.u32 glsl_shader.frag +/* +#version 450 core +layout(location = 0) out vec4 fColor; +layout(set=0, binding=0) uniform sampler2D sTexture; +layout(location = 0) in struct { vec4 Color; vec2 UV; } In; +void main() +{ + fColor = In.Color * texture(sTexture, In.UV.st); +} +*/ +static uint32_t __glsl_shader_frag_spv[] = +{ + 0x07230203,0x00010000,0x00080001,0x0000001e,0x00000000,0x00020011,0x00000001,0x0006000b, + 0x00000001,0x4c534c47,0x6474732e,0x3035342e,0x00000000,0x0003000e,0x00000000,0x00000001, + 0x0007000f,0x00000004,0x00000004,0x6e69616d,0x00000000,0x00000009,0x0000000d,0x00030010, + 0x00000004,0x00000007,0x00030003,0x00000002,0x000001c2,0x00040005,0x00000004,0x6e69616d, + 0x00000000,0x00040005,0x00000009,0x6c6f4366,0x0000726f,0x00030005,0x0000000b,0x00000000, + 0x00050006,0x0000000b,0x00000000,0x6f6c6f43,0x00000072,0x00040006,0x0000000b,0x00000001, + 0x00005655,0x00030005,0x0000000d,0x00006e49,0x00050005,0x00000016,0x78655473,0x65727574, + 0x00000000,0x00040047,0x00000009,0x0000001e,0x00000000,0x00040047,0x0000000d,0x0000001e, + 0x00000000,0x00040047,0x00000016,0x00000022,0x00000000,0x00040047,0x00000016,0x00000021, + 0x00000000,0x00020013,0x00000002,0x00030021,0x00000003,0x00000002,0x00030016,0x00000006, + 0x00000020,0x00040017,0x00000007,0x00000006,0x00000004,0x00040020,0x00000008,0x00000003, + 0x00000007,0x0004003b,0x00000008,0x00000009,0x00000003,0x00040017,0x0000000a,0x00000006, + 0x00000002,0x0004001e,0x0000000b,0x00000007,0x0000000a,0x00040020,0x0000000c,0x00000001, + 0x0000000b,0x0004003b,0x0000000c,0x0000000d,0x00000001,0x00040015,0x0000000e,0x00000020, + 0x00000001,0x0004002b,0x0000000e,0x0000000f,0x00000000,0x00040020,0x00000010,0x00000001, + 0x00000007,0x00090019,0x00000013,0x00000006,0x00000001,0x00000000,0x00000000,0x00000000, + 0x00000001,0x00000000,0x0003001b,0x00000014,0x00000013,0x00040020,0x00000015,0x00000000, + 0x00000014,0x0004003b,0x00000015,0x00000016,0x00000000,0x0004002b,0x0000000e,0x00000018, + 0x00000001,0x00040020,0x00000019,0x00000001,0x0000000a,0x00050036,0x00000002,0x00000004, + 0x00000000,0x00000003,0x000200f8,0x00000005,0x00050041,0x00000010,0x00000011,0x0000000d, + 0x0000000f,0x0004003d,0x00000007,0x00000012,0x00000011,0x0004003d,0x00000014,0x00000017, + 0x00000016,0x00050041,0x00000019,0x0000001a,0x0000000d,0x00000018,0x0004003d,0x0000000a, + 0x0000001b,0x0000001a,0x00050057,0x00000007,0x0000001c,0x00000017,0x0000001b,0x00050085, + 0x00000007,0x0000001d,0x00000012,0x0000001c,0x0003003e,0x00000009,0x0000001d,0x000100fd, + 0x00010038 +}; + +//----------------------------------------------------------------------------- +// FUNCTIONS +//----------------------------------------------------------------------------- + +static uint32_t ImGui_ImplVulkan_MemoryType(VkMemoryPropertyFlags properties, uint32_t type_bits) +{ + ImGui_ImplVulkan_InitInfo* v = &g_VulkanInitInfo; + VkPhysicalDeviceMemoryProperties prop; + vkGetPhysicalDeviceMemoryProperties(v->PhysicalDevice, &prop); + for (uint32_t i = 0; i < prop.memoryTypeCount; i++) + if ((prop.memoryTypes[i].propertyFlags & properties) == properties && type_bits & (1<CheckVkResultFn) + v->CheckVkResultFn(err); +} + +static void CreateOrResizeBuffer(VkBuffer& buffer, VkDeviceMemory& buffer_memory, VkDeviceSize& p_buffer_size, size_t new_size, VkBufferUsageFlagBits usage) +{ + ImGui_ImplVulkan_InitInfo* v = &g_VulkanInitInfo; + VkResult err; + if (buffer != VK_NULL_HANDLE) + vkDestroyBuffer(v->Device, buffer, v->Allocator); + if (buffer_memory != VK_NULL_HANDLE) + vkFreeMemory(v->Device, buffer_memory, v->Allocator); + + VkDeviceSize vertex_buffer_size_aligned = ((new_size - 1) / g_BufferMemoryAlignment + 1) * g_BufferMemoryAlignment; + VkBufferCreateInfo buffer_info = {}; + buffer_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; + buffer_info.size = vertex_buffer_size_aligned; + buffer_info.usage = usage; + buffer_info.sharingMode = VK_SHARING_MODE_EXCLUSIVE; + err = vkCreateBuffer(v->Device, &buffer_info, v->Allocator, &buffer); + check_vk_result(err); + + VkMemoryRequirements req; + vkGetBufferMemoryRequirements(v->Device, buffer, &req); + g_BufferMemoryAlignment = (g_BufferMemoryAlignment > req.alignment) ? g_BufferMemoryAlignment : req.alignment; + VkMemoryAllocateInfo alloc_info = {}; + alloc_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; + alloc_info.allocationSize = req.size; + alloc_info.memoryTypeIndex = ImGui_ImplVulkan_MemoryType(VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT, req.memoryTypeBits); + err = vkAllocateMemory(v->Device, &alloc_info, v->Allocator, &buffer_memory); + check_vk_result(err); + + err = vkBindBufferMemory(v->Device, buffer, buffer_memory, 0); + check_vk_result(err); + p_buffer_size = new_size; +} + +static void ImGui_ImplVulkan_SetupRenderState(ImDrawData* draw_data, VkCommandBuffer command_buffer, ImGui_ImplVulkanH_FrameRenderBuffers* rb, int fb_width, int fb_height) +{ + // Bind pipeline and descriptor sets: + { + vkCmdBindPipeline(command_buffer, VK_PIPELINE_BIND_POINT_GRAPHICS, g_Pipeline); + VkDescriptorSet desc_set[1] = { g_DescriptorSet }; + vkCmdBindDescriptorSets(command_buffer, VK_PIPELINE_BIND_POINT_GRAPHICS, g_PipelineLayout, 0, 1, desc_set, 0, NULL); + } + + // Bind Vertex And Index Buffer: + { + VkBuffer vertex_buffers[1] = { rb->VertexBuffer }; + VkDeviceSize vertex_offset[1] = { 0 }; + vkCmdBindVertexBuffers(command_buffer, 0, 1, vertex_buffers, vertex_offset); + vkCmdBindIndexBuffer(command_buffer, rb->IndexBuffer, 0, sizeof(ImDrawIdx) == 2 ? VK_INDEX_TYPE_UINT16 : VK_INDEX_TYPE_UINT32); + } + + // Setup viewport: + { + VkViewport viewport; + viewport.x = 0; + viewport.y = 0; + viewport.width = (float)fb_width; + viewport.height = (float)fb_height; + viewport.minDepth = 0.0f; + viewport.maxDepth = 1.0f; + vkCmdSetViewport(command_buffer, 0, 1, &viewport); + } + + // Setup scale and translation: + // Our visible imgui space lies from draw_data->DisplayPps (top left) to draw_data->DisplayPos+data_data->DisplaySize (bottom right). DisplayPos is (0,0) for single viewport apps. + { + float scale[2]; + scale[0] = 2.0f / draw_data->DisplaySize.x; + scale[1] = 2.0f / draw_data->DisplaySize.y; + float translate[2]; + translate[0] = -1.0f - draw_data->DisplayPos.x * scale[0]; + translate[1] = -1.0f - draw_data->DisplayPos.y * scale[1]; + vkCmdPushConstants(command_buffer, g_PipelineLayout, VK_SHADER_STAGE_VERTEX_BIT, sizeof(float) * 0, sizeof(float) * 2, scale); + vkCmdPushConstants(command_buffer, g_PipelineLayout, VK_SHADER_STAGE_VERTEX_BIT, sizeof(float) * 2, sizeof(float) * 2, translate); + } +} + +// Render function +// (this used to be set in io.RenderDrawListsFn and called by ImGui::Render(), but you can now call this directly from your main loop) +void ImGui_ImplVulkan_RenderDrawData(ImDrawData* draw_data, VkCommandBuffer command_buffer) +{ + // Avoid rendering when minimized, scale coordinates for retina displays (screen coordinates != framebuffer coordinates) + int fb_width = (int)(draw_data->DisplaySize.x); + int fb_height = (int)(draw_data->DisplaySize.y); + if (fb_width <= 0 || fb_height <= 0 || draw_data->TotalVtxCount == 0) + return; + + ImGui_ImplVulkan_InitInfo* v = &g_VulkanInitInfo; + + // Allocate array to store enough vertex/index buffers + ImGui_ImplVulkanH_WindowRenderBuffers* wrb = &g_MainWindowRenderBuffers; + if (wrb->FrameRenderBuffers == NULL) + { + wrb->Index = 0; + wrb->Count = v->ImageCount; + wrb->FrameRenderBuffers = (ImGui_ImplVulkanH_FrameRenderBuffers*)IM_ALLOC(sizeof(ImGui_ImplVulkanH_FrameRenderBuffers) * wrb->Count); + memset(wrb->FrameRenderBuffers, 0, sizeof(ImGui_ImplVulkanH_FrameRenderBuffers) * wrb->Count); + } + IM_ASSERT(wrb->Count == v->ImageCount); + wrb->Index = (wrb->Index + 1) % wrb->Count; + ImGui_ImplVulkanH_FrameRenderBuffers* rb = &wrb->FrameRenderBuffers[wrb->Index]; + + VkResult err; + + // Create or resize the vertex/index buffers + size_t vertex_size = draw_data->TotalVtxCount * sizeof(ImDrawVert); + size_t index_size = draw_data->TotalIdxCount * sizeof(ImDrawIdx); + if (rb->VertexBuffer == VK_NULL_HANDLE || rb->VertexBufferSize < vertex_size) + CreateOrResizeBuffer(rb->VertexBuffer, rb->VertexBufferMemory, rb->VertexBufferSize, vertex_size, VK_BUFFER_USAGE_VERTEX_BUFFER_BIT); + if (rb->IndexBuffer == VK_NULL_HANDLE || rb->IndexBufferSize < index_size) + CreateOrResizeBuffer(rb->IndexBuffer, rb->IndexBufferMemory, rb->IndexBufferSize, index_size, VK_BUFFER_USAGE_INDEX_BUFFER_BIT); + + // Upload vertex/index data into a single contiguous GPU buffer + { + ImDrawVert* vtx_dst = NULL; + ImDrawIdx* idx_dst = NULL; + err = vkMapMemory(v->Device, rb->VertexBufferMemory, 0, vertex_size, 0, (void**)(&vtx_dst)); + check_vk_result(err); + err = vkMapMemory(v->Device, rb->IndexBufferMemory, 0, index_size, 0, (void**)(&idx_dst)); + check_vk_result(err); + for (int n = 0; n < draw_data->CmdListsCount; n++) + { + const ImDrawList* cmd_list = draw_data->CmdLists[n]; + memcpy(vtx_dst, cmd_list->VtxBuffer.Data, cmd_list->VtxBuffer.Size * sizeof(ImDrawVert)); + memcpy(idx_dst, cmd_list->IdxBuffer.Data, cmd_list->IdxBuffer.Size * sizeof(ImDrawIdx)); + vtx_dst += cmd_list->VtxBuffer.Size; + idx_dst += cmd_list->IdxBuffer.Size; + } + VkMappedMemoryRange range[2] = {}; + range[0].sType = VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE; + range[0].memory = rb->VertexBufferMemory; + range[0].size = VK_WHOLE_SIZE; + range[1].sType = VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE; + range[1].memory = rb->IndexBufferMemory; + range[1].size = VK_WHOLE_SIZE; + err = vkFlushMappedMemoryRanges(v->Device, 2, range); + check_vk_result(err); + vkUnmapMemory(v->Device, rb->VertexBufferMemory); + vkUnmapMemory(v->Device, rb->IndexBufferMemory); + } + + // Setup desired Vulkan state + ImGui_ImplVulkan_SetupRenderState(draw_data, command_buffer, rb, fb_width, fb_height); + + // Will project scissor/clipping rectangles into framebuffer space + ImVec2 clip_off = draw_data->DisplayPos; // (0,0) unless using multi-viewports + + // Render command lists + // (Because we merged all buffers into a single one, we maintain our own offset into them) + int global_vtx_offset = 0; + int global_idx_offset = 0; + for (int n = 0; n < draw_data->CmdListsCount; n++) + { + const ImDrawList* cmd_list = draw_data->CmdLists[n]; + for (int cmd_i = 0; cmd_i < cmd_list->CmdBuffer.Size; cmd_i++) + { + const ImDrawCmd* pcmd = &cmd_list->CmdBuffer[cmd_i]; + if (pcmd->UserCallback != NULL) + { + // User callback, registered via ImDrawList::AddCallback() + // (ImDrawCallback_ResetRenderState is a special callback value used by the user to request the renderer to reset render state.) + if (pcmd->UserCallback == ImDrawCallback_ResetRenderState) + ImGui_ImplVulkan_SetupRenderState(draw_data, command_buffer, rb, fb_width, fb_height); + else + pcmd->UserCallback(cmd_list, pcmd); + } + else + { + // Project scissor/clipping rectangles into framebuffer space + ImVec4 clip_rect; + clip_rect.x = (pcmd->ClipRect.x - clip_off.x); + clip_rect.y = (pcmd->ClipRect.y - clip_off.y); + clip_rect.z = (pcmd->ClipRect.z - clip_off.x); + clip_rect.w = (pcmd->ClipRect.w - clip_off.y); + + if (clip_rect.x < fb_width && clip_rect.y < fb_height && clip_rect.z >= 0.0f && clip_rect.w >= 0.0f) + { + // Negative offsets are illegal for vkCmdSetScissor + if (clip_rect.x < 0.0f) + clip_rect.x = 0.0f; + if (clip_rect.y < 0.0f) + clip_rect.y = 0.0f; + + // Apply scissor/clipping rectangle + VkRect2D scissor; + scissor.offset.x = (int32_t)(clip_rect.x); + scissor.offset.y = (int32_t)(clip_rect.y); + scissor.extent.width = (uint32_t)(clip_rect.z - clip_rect.x); + scissor.extent.height = (uint32_t)(clip_rect.w - clip_rect.y); + vkCmdSetScissor(command_buffer, 0, 1, &scissor); + + // Draw + vkCmdDrawIndexed(command_buffer, pcmd->ElemCount, 1, pcmd->IdxOffset + global_idx_offset, pcmd->VtxOffset + global_vtx_offset, 0); + } + } + } + global_idx_offset += cmd_list->IdxBuffer.Size; + global_vtx_offset += cmd_list->VtxBuffer.Size; + } +} + +bool ImGui_ImplVulkan_CreateFontsTexture(VkCommandBuffer command_buffer) +{ + ImGui_ImplVulkan_InitInfo* v = &g_VulkanInitInfo; + ImGuiIO& io = ImGui::GetIO(); + + unsigned char* pixels; + int width, height; + io.Fonts->GetTexDataAsRGBA32(&pixels, &width, &height); + size_t upload_size = width*height*4*sizeof(char); + + VkResult err; + + // Create the Image: + { + VkImageCreateInfo info = {}; + info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; + info.imageType = VK_IMAGE_TYPE_2D; + info.format = VK_FORMAT_R8G8B8A8_UNORM; + info.extent.width = width; + info.extent.height = height; + info.extent.depth = 1; + info.mipLevels = 1; + info.arrayLayers = 1; + info.samples = VK_SAMPLE_COUNT_1_BIT; + info.tiling = VK_IMAGE_TILING_OPTIMAL; + info.usage = VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT; + info.sharingMode = VK_SHARING_MODE_EXCLUSIVE; + info.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; + err = vkCreateImage(v->Device, &info, v->Allocator, &g_FontImage); + check_vk_result(err); + VkMemoryRequirements req; + vkGetImageMemoryRequirements(v->Device, g_FontImage, &req); + VkMemoryAllocateInfo alloc_info = {}; + alloc_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; + alloc_info.allocationSize = req.size; + alloc_info.memoryTypeIndex = ImGui_ImplVulkan_MemoryType(VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT, req.memoryTypeBits); + err = vkAllocateMemory(v->Device, &alloc_info, v->Allocator, &g_FontMemory); + check_vk_result(err); + err = vkBindImageMemory(v->Device, g_FontImage, g_FontMemory, 0); + check_vk_result(err); + } + + // Create the Image View: + { + VkImageViewCreateInfo info = {}; + info.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO; + info.image = g_FontImage; + info.viewType = VK_IMAGE_VIEW_TYPE_2D; + info.format = VK_FORMAT_R8G8B8A8_UNORM; + info.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; + info.subresourceRange.levelCount = 1; + info.subresourceRange.layerCount = 1; + err = vkCreateImageView(v->Device, &info, v->Allocator, &g_FontView); + check_vk_result(err); + } + + // Update the Descriptor Set: + { + VkDescriptorImageInfo desc_image[1] = {}; + desc_image[0].sampler = g_FontSampler; + desc_image[0].imageView = g_FontView; + desc_image[0].imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; + VkWriteDescriptorSet write_desc[1] = {}; + write_desc[0].sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; + write_desc[0].dstSet = g_DescriptorSet; + write_desc[0].descriptorCount = 1; + write_desc[0].descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER; + write_desc[0].pImageInfo = desc_image; + vkUpdateDescriptorSets(v->Device, 1, write_desc, 0, NULL); + } + + // Create the Upload Buffer: + { + VkBufferCreateInfo buffer_info = {}; + buffer_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; + buffer_info.size = upload_size; + buffer_info.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT; + buffer_info.sharingMode = VK_SHARING_MODE_EXCLUSIVE; + err = vkCreateBuffer(v->Device, &buffer_info, v->Allocator, &g_UploadBuffer); + check_vk_result(err); + VkMemoryRequirements req; + vkGetBufferMemoryRequirements(v->Device, g_UploadBuffer, &req); + g_BufferMemoryAlignment = (g_BufferMemoryAlignment > req.alignment) ? g_BufferMemoryAlignment : req.alignment; + VkMemoryAllocateInfo alloc_info = {}; + alloc_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; + alloc_info.allocationSize = req.size; + alloc_info.memoryTypeIndex = ImGui_ImplVulkan_MemoryType(VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT, req.memoryTypeBits); + err = vkAllocateMemory(v->Device, &alloc_info, v->Allocator, &g_UploadBufferMemory); + check_vk_result(err); + err = vkBindBufferMemory(v->Device, g_UploadBuffer, g_UploadBufferMemory, 0); + check_vk_result(err); + } + + // Upload to Buffer: + { + char* map = NULL; + err = vkMapMemory(v->Device, g_UploadBufferMemory, 0, upload_size, 0, (void**)(&map)); + check_vk_result(err); + memcpy(map, pixels, upload_size); + VkMappedMemoryRange range[1] = {}; + range[0].sType = VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE; + range[0].memory = g_UploadBufferMemory; + range[0].size = upload_size; + err = vkFlushMappedMemoryRanges(v->Device, 1, range); + check_vk_result(err); + vkUnmapMemory(v->Device, g_UploadBufferMemory); + } + + // Copy to Image: + { + VkImageMemoryBarrier copy_barrier[1] = {}; + copy_barrier[0].sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER; + copy_barrier[0].dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT; + copy_barrier[0].oldLayout = VK_IMAGE_LAYOUT_UNDEFINED; + copy_barrier[0].newLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL; + copy_barrier[0].srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; + copy_barrier[0].dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; + copy_barrier[0].image = g_FontImage; + copy_barrier[0].subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; + copy_barrier[0].subresourceRange.levelCount = 1; + copy_barrier[0].subresourceRange.layerCount = 1; + vkCmdPipelineBarrier(command_buffer, VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, 0, 0, NULL, 0, NULL, 1, copy_barrier); + + VkBufferImageCopy region = {}; + region.imageSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; + region.imageSubresource.layerCount = 1; + region.imageExtent.width = width; + region.imageExtent.height = height; + region.imageExtent.depth = 1; + vkCmdCopyBufferToImage(command_buffer, g_UploadBuffer, g_FontImage, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, ®ion); + + VkImageMemoryBarrier use_barrier[1] = {}; + use_barrier[0].sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER; + use_barrier[0].srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT; + use_barrier[0].dstAccessMask = VK_ACCESS_SHADER_READ_BIT; + use_barrier[0].oldLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL; + use_barrier[0].newLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; + use_barrier[0].srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; + use_barrier[0].dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; + use_barrier[0].image = g_FontImage; + use_barrier[0].subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; + use_barrier[0].subresourceRange.levelCount = 1; + use_barrier[0].subresourceRange.layerCount = 1; + vkCmdPipelineBarrier(command_buffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, 0, 0, NULL, 0, NULL, 1, use_barrier); + } + + // Store our identifier + io.Fonts->TexID = (ImTextureID)(intptr_t)g_FontImage; + + return true; +} + +bool ImGui_ImplVulkan_CreateDeviceObjects() +{ + ImGui_ImplVulkan_InitInfo* v = &g_VulkanInitInfo; + VkResult err; + VkShaderModule vert_module; + VkShaderModule frag_module; + + // Create The Shader Modules: + { + VkShaderModuleCreateInfo vert_info = {}; + vert_info.sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO; + vert_info.codeSize = sizeof(__glsl_shader_vert_spv); + vert_info.pCode = (uint32_t*)__glsl_shader_vert_spv; + err = vkCreateShaderModule(v->Device, &vert_info, v->Allocator, &vert_module); + check_vk_result(err); + VkShaderModuleCreateInfo frag_info = {}; + frag_info.sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO; + frag_info.codeSize = sizeof(__glsl_shader_frag_spv); + frag_info.pCode = (uint32_t*)__glsl_shader_frag_spv; + err = vkCreateShaderModule(v->Device, &frag_info, v->Allocator, &frag_module); + check_vk_result(err); + } + + if (!g_FontSampler) + { + VkSamplerCreateInfo info = {}; + info.sType = VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO; + info.magFilter = VK_FILTER_LINEAR; + info.minFilter = VK_FILTER_LINEAR; + info.mipmapMode = VK_SAMPLER_MIPMAP_MODE_LINEAR; + info.addressModeU = VK_SAMPLER_ADDRESS_MODE_REPEAT; + info.addressModeV = VK_SAMPLER_ADDRESS_MODE_REPEAT; + info.addressModeW = VK_SAMPLER_ADDRESS_MODE_REPEAT; + info.minLod = -1000; + info.maxLod = 1000; + info.maxAnisotropy = 1.0f; + err = vkCreateSampler(v->Device, &info, v->Allocator, &g_FontSampler); + check_vk_result(err); + } + + if (!g_DescriptorSetLayout) + { + VkSampler sampler[1] = {g_FontSampler}; + VkDescriptorSetLayoutBinding binding[1] = {}; + binding[0].descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER; + binding[0].descriptorCount = 1; + binding[0].stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT; + binding[0].pImmutableSamplers = sampler; + VkDescriptorSetLayoutCreateInfo info = {}; + info.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO; + info.bindingCount = 1; + info.pBindings = binding; + err = vkCreateDescriptorSetLayout(v->Device, &info, v->Allocator, &g_DescriptorSetLayout); + check_vk_result(err); + } + + // Create Descriptor Set: + { + VkDescriptorSetAllocateInfo alloc_info = {}; + alloc_info.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO; + alloc_info.descriptorPool = v->DescriptorPool; + alloc_info.descriptorSetCount = 1; + alloc_info.pSetLayouts = &g_DescriptorSetLayout; + err = vkAllocateDescriptorSets(v->Device, &alloc_info, &g_DescriptorSet); + check_vk_result(err); + } + + if (!g_PipelineLayout) + { + // Constants: we are using 'vec2 offset' and 'vec2 scale' instead of a full 3d projection matrix + VkPushConstantRange push_constants[1] = {}; + push_constants[0].stageFlags = VK_SHADER_STAGE_VERTEX_BIT; + push_constants[0].offset = sizeof(float) * 0; + push_constants[0].size = sizeof(float) * 4; + VkDescriptorSetLayout set_layout[1] = { g_DescriptorSetLayout }; + VkPipelineLayoutCreateInfo layout_info = {}; + layout_info.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO; + layout_info.setLayoutCount = 1; + layout_info.pSetLayouts = set_layout; + layout_info.pushConstantRangeCount = 1; + layout_info.pPushConstantRanges = push_constants; + err = vkCreatePipelineLayout(v->Device, &layout_info, v->Allocator, &g_PipelineLayout); + check_vk_result(err); + } + + VkPipelineShaderStageCreateInfo stage[2] = {}; + stage[0].sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO; + stage[0].stage = VK_SHADER_STAGE_VERTEX_BIT; + stage[0].module = vert_module; + stage[0].pName = "main"; + stage[1].sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO; + stage[1].stage = VK_SHADER_STAGE_FRAGMENT_BIT; + stage[1].module = frag_module; + stage[1].pName = "main"; + + VkVertexInputBindingDescription binding_desc[1] = {}; + binding_desc[0].stride = sizeof(ImDrawVert); + binding_desc[0].inputRate = VK_VERTEX_INPUT_RATE_VERTEX; + + VkVertexInputAttributeDescription attribute_desc[3] = {}; + attribute_desc[0].location = 0; + attribute_desc[0].binding = binding_desc[0].binding; + attribute_desc[0].format = VK_FORMAT_R32G32_SFLOAT; + attribute_desc[0].offset = IM_OFFSETOF(ImDrawVert, pos); + attribute_desc[1].location = 1; + attribute_desc[1].binding = binding_desc[0].binding; + attribute_desc[1].format = VK_FORMAT_R32G32_SFLOAT; + attribute_desc[1].offset = IM_OFFSETOF(ImDrawVert, uv); + attribute_desc[2].location = 2; + attribute_desc[2].binding = binding_desc[0].binding; + attribute_desc[2].format = VK_FORMAT_R8G8B8A8_UNORM; + attribute_desc[2].offset = IM_OFFSETOF(ImDrawVert, col); + + VkPipelineVertexInputStateCreateInfo vertex_info = {}; + vertex_info.sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO; + vertex_info.vertexBindingDescriptionCount = 1; + vertex_info.pVertexBindingDescriptions = binding_desc; + vertex_info.vertexAttributeDescriptionCount = 3; + vertex_info.pVertexAttributeDescriptions = attribute_desc; + + VkPipelineInputAssemblyStateCreateInfo ia_info = {}; + ia_info.sType = VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO; + ia_info.topology = VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST; + + VkPipelineViewportStateCreateInfo viewport_info = {}; + viewport_info.sType = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO; + viewport_info.viewportCount = 1; + viewport_info.scissorCount = 1; + + VkPipelineRasterizationStateCreateInfo raster_info = {}; + raster_info.sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO; + raster_info.polygonMode = VK_POLYGON_MODE_FILL; + raster_info.cullMode = VK_CULL_MODE_NONE; + raster_info.frontFace = VK_FRONT_FACE_COUNTER_CLOCKWISE; + raster_info.lineWidth = 1.0f; + + VkPipelineMultisampleStateCreateInfo ms_info = {}; + ms_info.sType = VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO; + if (v->MSAASamples != 0) + ms_info.rasterizationSamples = v->MSAASamples; + else + ms_info.rasterizationSamples = VK_SAMPLE_COUNT_1_BIT; + + VkPipelineColorBlendAttachmentState color_attachment[1] = {}; + color_attachment[0].blendEnable = VK_TRUE; + color_attachment[0].srcColorBlendFactor = VK_BLEND_FACTOR_SRC_ALPHA; + color_attachment[0].dstColorBlendFactor = VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA; + color_attachment[0].colorBlendOp = VK_BLEND_OP_ADD; + color_attachment[0].srcAlphaBlendFactor = VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA; + color_attachment[0].dstAlphaBlendFactor = VK_BLEND_FACTOR_ZERO; + color_attachment[0].alphaBlendOp = VK_BLEND_OP_ADD; + color_attachment[0].colorWriteMask = VK_COLOR_COMPONENT_R_BIT | VK_COLOR_COMPONENT_G_BIT | VK_COLOR_COMPONENT_B_BIT | VK_COLOR_COMPONENT_A_BIT; + + VkPipelineDepthStencilStateCreateInfo depth_info = {}; + depth_info.sType = VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO; + + VkPipelineColorBlendStateCreateInfo blend_info = {}; + blend_info.sType = VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO; + blend_info.attachmentCount = 1; + blend_info.pAttachments = color_attachment; + + VkDynamicState dynamic_states[2] = { VK_DYNAMIC_STATE_VIEWPORT, VK_DYNAMIC_STATE_SCISSOR }; + VkPipelineDynamicStateCreateInfo dynamic_state = {}; + dynamic_state.sType = VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO; + dynamic_state.dynamicStateCount = (uint32_t)IM_ARRAYSIZE(dynamic_states); + dynamic_state.pDynamicStates = dynamic_states; + + VkGraphicsPipelineCreateInfo info = {}; + info.sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO; + info.flags = g_PipelineCreateFlags; + info.stageCount = 2; + info.pStages = stage; + info.pVertexInputState = &vertex_info; + info.pInputAssemblyState = &ia_info; + info.pViewportState = &viewport_info; + info.pRasterizationState = &raster_info; + info.pMultisampleState = &ms_info; + info.pDepthStencilState = &depth_info; + info.pColorBlendState = &blend_info; + info.pDynamicState = &dynamic_state; + info.layout = g_PipelineLayout; + info.renderPass = g_RenderPass; + err = vkCreateGraphicsPipelines(v->Device, v->PipelineCache, 1, &info, v->Allocator, &g_Pipeline); + check_vk_result(err); + + vkDestroyShaderModule(v->Device, vert_module, v->Allocator); + vkDestroyShaderModule(v->Device, frag_module, v->Allocator); + + return true; +} + +void ImGui_ImplVulkan_DestroyFontUploadObjects() +{ + ImGui_ImplVulkan_InitInfo* v = &g_VulkanInitInfo; + if (g_UploadBuffer) + { + vkDestroyBuffer(v->Device, g_UploadBuffer, v->Allocator); + g_UploadBuffer = VK_NULL_HANDLE; + } + if (g_UploadBufferMemory) + { + vkFreeMemory(v->Device, g_UploadBufferMemory, v->Allocator); + g_UploadBufferMemory = VK_NULL_HANDLE; + } +} + +void ImGui_ImplVulkan_DestroyDeviceObjects() +{ + ImGui_ImplVulkan_InitInfo* v = &g_VulkanInitInfo; + ImGui_ImplVulkanH_DestroyWindowRenderBuffers(v->Device, &g_MainWindowRenderBuffers, v->Allocator); + ImGui_ImplVulkan_DestroyFontUploadObjects(); + + if (g_FontView) { vkDestroyImageView(v->Device, g_FontView, v->Allocator); g_FontView = VK_NULL_HANDLE; } + if (g_FontImage) { vkDestroyImage(v->Device, g_FontImage, v->Allocator); g_FontImage = VK_NULL_HANDLE; } + if (g_FontMemory) { vkFreeMemory(v->Device, g_FontMemory, v->Allocator); g_FontMemory = VK_NULL_HANDLE; } + if (g_FontSampler) { vkDestroySampler(v->Device, g_FontSampler, v->Allocator); g_FontSampler = VK_NULL_HANDLE; } + if (g_DescriptorSetLayout) { vkDestroyDescriptorSetLayout(v->Device, g_DescriptorSetLayout, v->Allocator); g_DescriptorSetLayout = VK_NULL_HANDLE; } + if (g_PipelineLayout) { vkDestroyPipelineLayout(v->Device, g_PipelineLayout, v->Allocator); g_PipelineLayout = VK_NULL_HANDLE; } + if (g_Pipeline) { vkDestroyPipeline(v->Device, g_Pipeline, v->Allocator); g_Pipeline = VK_NULL_HANDLE; } +} + +bool ImGui_ImplVulkan_Init(ImGui_ImplVulkan_InitInfo* info, VkRenderPass render_pass) +{ + // Setup back-end capabilities flags + ImGuiIO& io = ImGui::GetIO(); + io.BackendRendererName = "imgui_impl_vulkan"; + io.BackendFlags |= ImGuiBackendFlags_RendererHasVtxOffset; // We can honor the ImDrawCmd::VtxOffset field, allowing for large meshes. + + IM_ASSERT(info->Instance != VK_NULL_HANDLE); + IM_ASSERT(info->PhysicalDevice != VK_NULL_HANDLE); + IM_ASSERT(info->Device != VK_NULL_HANDLE); + IM_ASSERT(info->Queue != VK_NULL_HANDLE); + IM_ASSERT(info->DescriptorPool != VK_NULL_HANDLE); + IM_ASSERT(info->MinImageCount >= 2); + IM_ASSERT(info->ImageCount >= info->MinImageCount); + IM_ASSERT(render_pass != VK_NULL_HANDLE); + + g_VulkanInitInfo = *info; + g_RenderPass = render_pass; + ImGui_ImplVulkan_CreateDeviceObjects(); + + return true; +} + +void ImGui_ImplVulkan_Shutdown() +{ + ImGui_ImplVulkan_DestroyDeviceObjects(); +} + +void ImGui_ImplVulkan_NewFrame() +{ +} + +void ImGui_ImplVulkan_SetMinImageCount(uint32_t min_image_count) +{ + IM_ASSERT(min_image_count >= 2); + if (g_VulkanInitInfo.MinImageCount == min_image_count) + return; + + ImGui_ImplVulkan_InitInfo* v = &g_VulkanInitInfo; + VkResult err = vkDeviceWaitIdle(v->Device); + check_vk_result(err); + ImGui_ImplVulkanH_DestroyWindowRenderBuffers(v->Device, &g_MainWindowRenderBuffers, v->Allocator); + g_VulkanInitInfo.MinImageCount = min_image_count; +} + + +//------------------------------------------------------------------------- +// Internal / Miscellaneous Vulkan Helpers +// (Used by example's main.cpp. Used by multi-viewport features. PROBABLY NOT used by your own app.) +//------------------------------------------------------------------------- +// You probably do NOT need to use or care about those functions. +// Those functions only exist because: +// 1) they facilitate the readability and maintenance of the multiple main.cpp examples files. +// 2) the upcoming multi-viewport feature will need them internally. +// Generally we avoid exposing any kind of superfluous high-level helpers in the bindings, +// but it is too much code to duplicate everywhere so we exceptionally expose them. +// +// Your engine/app will likely _already_ have code to setup all that stuff (swap chain, render pass, frame buffers, etc.). +// You may read this code to learn about Vulkan, but it is recommended you use you own custom tailored code to do equivalent work. +// (The ImGui_ImplVulkanH_XXX functions do not interact with any of the state used by the regular ImGui_ImplVulkan_XXX functions) +//------------------------------------------------------------------------- + +VkSurfaceFormatKHR ImGui_ImplVulkanH_SelectSurfaceFormat(VkPhysicalDevice physical_device, VkSurfaceKHR surface, const VkFormat* request_formats, int request_formats_count, VkColorSpaceKHR request_color_space) +{ + IM_ASSERT(request_formats != NULL); + IM_ASSERT(request_formats_count > 0); + + // Per Spec Format and View Format are expected to be the same unless VK_IMAGE_CREATE_MUTABLE_BIT was set at image creation + // Assuming that the default behavior is without setting this bit, there is no need for separate Swapchain image and image view format + // Additionally several new color spaces were introduced with Vulkan Spec v1.0.40, + // hence we must make sure that a format with the mostly available color space, VK_COLOR_SPACE_SRGB_NONLINEAR_KHR, is found and used. + uint32_t avail_count; + vkGetPhysicalDeviceSurfaceFormatsKHR(physical_device, surface, &avail_count, NULL); + ImVector avail_format; + avail_format.resize((int)avail_count); + vkGetPhysicalDeviceSurfaceFormatsKHR(physical_device, surface, &avail_count, avail_format.Data); + + // First check if only one format, VK_FORMAT_UNDEFINED, is available, which would imply that any format is available + if (avail_count == 1) + { + if (avail_format[0].format == VK_FORMAT_UNDEFINED) + { + VkSurfaceFormatKHR ret; + ret.format = request_formats[0]; + ret.colorSpace = request_color_space; + return ret; + } + else + { + // No point in searching another format + return avail_format[0]; + } + } + else + { + // Request several formats, the first found will be used + for (int request_i = 0; request_i < request_formats_count; request_i++) + for (uint32_t avail_i = 0; avail_i < avail_count; avail_i++) + if (avail_format[avail_i].format == request_formats[request_i] && avail_format[avail_i].colorSpace == request_color_space) + return avail_format[avail_i]; + + // If none of the requested image formats could be found, use the first available + return avail_format[0]; + } +} + +VkPresentModeKHR ImGui_ImplVulkanH_SelectPresentMode(VkPhysicalDevice physical_device, VkSurfaceKHR surface, const VkPresentModeKHR* request_modes, int request_modes_count) +{ + IM_ASSERT(request_modes != NULL); + IM_ASSERT(request_modes_count > 0); + + // Request a certain mode and confirm that it is available. If not use VK_PRESENT_MODE_FIFO_KHR which is mandatory + uint32_t avail_count = 0; + vkGetPhysicalDeviceSurfacePresentModesKHR(physical_device, surface, &avail_count, NULL); + ImVector avail_modes; + avail_modes.resize((int)avail_count); + vkGetPhysicalDeviceSurfacePresentModesKHR(physical_device, surface, &avail_count, avail_modes.Data); + //for (uint32_t avail_i = 0; avail_i < avail_count; avail_i++) + // printf("[vulkan] avail_modes[%d] = %d\n", avail_i, avail_modes[avail_i]); + + for (int request_i = 0; request_i < request_modes_count; request_i++) + for (uint32_t avail_i = 0; avail_i < avail_count; avail_i++) + if (request_modes[request_i] == avail_modes[avail_i]) + return request_modes[request_i]; + + return VK_PRESENT_MODE_FIFO_KHR; // Always available +} + +void ImGui_ImplVulkanH_CreateWindowCommandBuffers(VkPhysicalDevice physical_device, VkDevice device, ImGui_ImplVulkanH_Window* wd, uint32_t queue_family, const VkAllocationCallbacks* allocator) +{ + IM_ASSERT(physical_device != VK_NULL_HANDLE && device != VK_NULL_HANDLE); + (void)physical_device; + (void)allocator; + + // Create Command Buffers + VkResult err; + for (uint32_t i = 0; i < wd->ImageCount; i++) + { + ImGui_ImplVulkanH_Frame* fd = &wd->Frames[i]; + ImGui_ImplVulkanH_FrameSemaphores* fsd = &wd->FrameSemaphores[i]; + { + VkCommandPoolCreateInfo info = {}; + info.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO; + info.flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT; + info.queueFamilyIndex = queue_family; + err = vkCreateCommandPool(device, &info, allocator, &fd->CommandPool); + check_vk_result(err); + } + { + VkCommandBufferAllocateInfo info = {}; + info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO; + info.commandPool = fd->CommandPool; + info.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY; + info.commandBufferCount = 1; + err = vkAllocateCommandBuffers(device, &info, &fd->CommandBuffer); + check_vk_result(err); + } + { + VkFenceCreateInfo info = {}; + info.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO; + info.flags = VK_FENCE_CREATE_SIGNALED_BIT; + err = vkCreateFence(device, &info, allocator, &fd->Fence); + check_vk_result(err); + } + { + VkSemaphoreCreateInfo info = {}; + info.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO; + err = vkCreateSemaphore(device, &info, allocator, &fsd->ImageAcquiredSemaphore); + check_vk_result(err); + err = vkCreateSemaphore(device, &info, allocator, &fsd->RenderCompleteSemaphore); + check_vk_result(err); + } + } +} + +int ImGui_ImplVulkanH_GetMinImageCountFromPresentMode(VkPresentModeKHR present_mode) +{ + if (present_mode == VK_PRESENT_MODE_MAILBOX_KHR) + return 3; + if (present_mode == VK_PRESENT_MODE_FIFO_KHR || present_mode == VK_PRESENT_MODE_FIFO_RELAXED_KHR) + return 2; + if (present_mode == VK_PRESENT_MODE_IMMEDIATE_KHR) + return 1; + IM_ASSERT(0); + return 1; +} + +// Also destroy old swap chain and in-flight frames data, if any. +void ImGui_ImplVulkanH_CreateWindowSwapChain(VkPhysicalDevice physical_device, VkDevice device, ImGui_ImplVulkanH_Window* wd, const VkAllocationCallbacks* allocator, int w, int h, uint32_t min_image_count) +{ + VkResult err; + VkSwapchainKHR old_swapchain = wd->Swapchain; + err = vkDeviceWaitIdle(device); + check_vk_result(err); + + // We don't use ImGui_ImplVulkanH_DestroyWindow() because we want to preserve the old swapchain to create the new one. + // Destroy old Framebuffer + for (uint32_t i = 0; i < wd->ImageCount; i++) + { + ImGui_ImplVulkanH_DestroyFrame(device, &wd->Frames[i], allocator); + ImGui_ImplVulkanH_DestroyFrameSemaphores(device, &wd->FrameSemaphores[i], allocator); + } + IM_FREE(wd->Frames); + IM_FREE(wd->FrameSemaphores); + wd->Frames = NULL; + wd->FrameSemaphores = NULL; + wd->ImageCount = 0; + if (wd->RenderPass) + vkDestroyRenderPass(device, wd->RenderPass, allocator); + + // If min image count was not specified, request different count of images dependent on selected present mode + if (min_image_count == 0) + min_image_count = ImGui_ImplVulkanH_GetMinImageCountFromPresentMode(wd->PresentMode); + + // Create Swapchain + { + VkSwapchainCreateInfoKHR info = {}; + info.sType = VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR; + info.surface = wd->Surface; + info.minImageCount = min_image_count; + info.imageFormat = wd->SurfaceFormat.format; + info.imageColorSpace = wd->SurfaceFormat.colorSpace; + info.imageArrayLayers = 1; + info.imageUsage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT; + info.imageSharingMode = VK_SHARING_MODE_EXCLUSIVE; // Assume that graphics family == present family + info.preTransform = VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR; + info.compositeAlpha = VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR; + info.presentMode = wd->PresentMode; + info.clipped = VK_TRUE; + info.oldSwapchain = old_swapchain; + VkSurfaceCapabilitiesKHR cap; + err = vkGetPhysicalDeviceSurfaceCapabilitiesKHR(physical_device, wd->Surface, &cap); + check_vk_result(err); + if (info.minImageCount < cap.minImageCount) + info.minImageCount = cap.minImageCount; + else if (cap.maxImageCount != 0 && info.minImageCount > cap.maxImageCount) + info.minImageCount = cap.maxImageCount; + + if (cap.currentExtent.width == 0xffffffff) + { + info.imageExtent.width = wd->Width = w; + info.imageExtent.height = wd->Height = h; + } + else + { + info.imageExtent.width = wd->Width = cap.currentExtent.width; + info.imageExtent.height = wd->Height = cap.currentExtent.height; + } + err = vkCreateSwapchainKHR(device, &info, allocator, &wd->Swapchain); + check_vk_result(err); + err = vkGetSwapchainImagesKHR(device, wd->Swapchain, &wd->ImageCount, NULL); + check_vk_result(err); + VkImage backbuffers[16] = {}; + IM_ASSERT(wd->ImageCount >= min_image_count); + IM_ASSERT(wd->ImageCount < IM_ARRAYSIZE(backbuffers)); + err = vkGetSwapchainImagesKHR(device, wd->Swapchain, &wd->ImageCount, backbuffers); + check_vk_result(err); + + IM_ASSERT(wd->Frames == NULL); + wd->Frames = (ImGui_ImplVulkanH_Frame*)IM_ALLOC(sizeof(ImGui_ImplVulkanH_Frame) * wd->ImageCount); + wd->FrameSemaphores = (ImGui_ImplVulkanH_FrameSemaphores*)IM_ALLOC(sizeof(ImGui_ImplVulkanH_FrameSemaphores) * wd->ImageCount); + memset(wd->Frames, 0, sizeof(wd->Frames[0]) * wd->ImageCount); + memset(wd->FrameSemaphores, 0, sizeof(wd->FrameSemaphores[0]) * wd->ImageCount); + for (uint32_t i = 0; i < wd->ImageCount; i++) + wd->Frames[i].Backbuffer = backbuffers[i]; + } + if (old_swapchain) + vkDestroySwapchainKHR(device, old_swapchain, allocator); + + // Create the Render Pass + { + VkAttachmentDescription attachment = {}; + attachment.format = wd->SurfaceFormat.format; + attachment.samples = VK_SAMPLE_COUNT_1_BIT; + attachment.loadOp = wd->ClearEnable ? VK_ATTACHMENT_LOAD_OP_CLEAR : VK_ATTACHMENT_LOAD_OP_DONT_CARE; + attachment.storeOp = VK_ATTACHMENT_STORE_OP_STORE; + attachment.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE; + attachment.stencilStoreOp = VK_ATTACHMENT_STORE_OP_DONT_CARE; + attachment.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; + attachment.finalLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR; + VkAttachmentReference color_attachment = {}; + color_attachment.attachment = 0; + color_attachment.layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; + VkSubpassDescription subpass = {}; + subpass.pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS; + subpass.colorAttachmentCount = 1; + subpass.pColorAttachments = &color_attachment; + VkSubpassDependency dependency = {}; + dependency.srcSubpass = VK_SUBPASS_EXTERNAL; + dependency.dstSubpass = 0; + dependency.srcStageMask = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT; + dependency.dstStageMask = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT; + dependency.srcAccessMask = 0; + dependency.dstAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT; + VkRenderPassCreateInfo info = {}; + info.sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO; + info.attachmentCount = 1; + info.pAttachments = &attachment; + info.subpassCount = 1; + info.pSubpasses = &subpass; + info.dependencyCount = 1; + info.pDependencies = &dependency; + err = vkCreateRenderPass(device, &info, allocator, &wd->RenderPass); + check_vk_result(err); + } + + // Create The Image Views + { + VkImageViewCreateInfo info = {}; + info.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO; + info.viewType = VK_IMAGE_VIEW_TYPE_2D; + info.format = wd->SurfaceFormat.format; + info.components.r = VK_COMPONENT_SWIZZLE_R; + info.components.g = VK_COMPONENT_SWIZZLE_G; + info.components.b = VK_COMPONENT_SWIZZLE_B; + info.components.a = VK_COMPONENT_SWIZZLE_A; + VkImageSubresourceRange image_range = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1 }; + info.subresourceRange = image_range; + for (uint32_t i = 0; i < wd->ImageCount; i++) + { + ImGui_ImplVulkanH_Frame* fd = &wd->Frames[i]; + info.image = fd->Backbuffer; + err = vkCreateImageView(device, &info, allocator, &fd->BackbufferView); + check_vk_result(err); + } + } + + // Create Framebuffer + { + VkImageView attachment[1]; + VkFramebufferCreateInfo info = {}; + info.sType = VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO; + info.renderPass = wd->RenderPass; + info.attachmentCount = 1; + info.pAttachments = attachment; + info.width = wd->Width; + info.height = wd->Height; + info.layers = 1; + for (uint32_t i = 0; i < wd->ImageCount; i++) + { + ImGui_ImplVulkanH_Frame* fd = &wd->Frames[i]; + attachment[0] = fd->BackbufferView; + err = vkCreateFramebuffer(device, &info, allocator, &fd->Framebuffer); + check_vk_result(err); + } + } +} + +void ImGui_ImplVulkanH_CreateWindow(VkInstance instance, VkPhysicalDevice physical_device, VkDevice device, ImGui_ImplVulkanH_Window* wd, uint32_t queue_family, const VkAllocationCallbacks* allocator, int width, int height, uint32_t min_image_count) +{ + (void)instance; + ImGui_ImplVulkanH_CreateWindowSwapChain(physical_device, device, wd, allocator, width, height, min_image_count); + ImGui_ImplVulkanH_CreateWindowCommandBuffers(physical_device, device, wd, queue_family, allocator); +} + +void ImGui_ImplVulkanH_DestroyWindow(VkInstance instance, VkDevice device, ImGui_ImplVulkanH_Window* wd, const VkAllocationCallbacks* allocator) +{ + vkDeviceWaitIdle(device); // FIXME: We could wait on the Queue if we had the queue in wd-> (otherwise VulkanH functions can't use globals) + //vkQueueWaitIdle(g_Queue); + + for (uint32_t i = 0; i < wd->ImageCount; i++) + { + ImGui_ImplVulkanH_DestroyFrame(device, &wd->Frames[i], allocator); + ImGui_ImplVulkanH_DestroyFrameSemaphores(device, &wd->FrameSemaphores[i], allocator); + } + IM_FREE(wd->Frames); + IM_FREE(wd->FrameSemaphores); + wd->Frames = NULL; + wd->FrameSemaphores = NULL; + vkDestroyRenderPass(device, wd->RenderPass, allocator); + vkDestroySwapchainKHR(device, wd->Swapchain, allocator); + vkDestroySurfaceKHR(instance, wd->Surface, allocator); + + *wd = ImGui_ImplVulkanH_Window(); +} + +void ImGui_ImplVulkanH_DestroyFrame(VkDevice device, ImGui_ImplVulkanH_Frame* fd, const VkAllocationCallbacks* allocator) +{ + vkDestroyFence(device, fd->Fence, allocator); + vkFreeCommandBuffers(device, fd->CommandPool, 1, &fd->CommandBuffer); + vkDestroyCommandPool(device, fd->CommandPool, allocator); + fd->Fence = VK_NULL_HANDLE; + fd->CommandBuffer = VK_NULL_HANDLE; + fd->CommandPool = VK_NULL_HANDLE; + + vkDestroyImageView(device, fd->BackbufferView, allocator); + vkDestroyFramebuffer(device, fd->Framebuffer, allocator); +} + +void ImGui_ImplVulkanH_DestroyFrameSemaphores(VkDevice device, ImGui_ImplVulkanH_FrameSemaphores* fsd, const VkAllocationCallbacks* allocator) +{ + vkDestroySemaphore(device, fsd->ImageAcquiredSemaphore, allocator); + vkDestroySemaphore(device, fsd->RenderCompleteSemaphore, allocator); + fsd->ImageAcquiredSemaphore = fsd->RenderCompleteSemaphore = VK_NULL_HANDLE; +} + +void ImGui_ImplVulkanH_DestroyFrameRenderBuffers(VkDevice device, ImGui_ImplVulkanH_FrameRenderBuffers* buffers, const VkAllocationCallbacks* allocator) +{ + if (buffers->VertexBuffer) { vkDestroyBuffer(device, buffers->VertexBuffer, allocator); buffers->VertexBuffer = VK_NULL_HANDLE; } + if (buffers->VertexBufferMemory) { vkFreeMemory(device, buffers->VertexBufferMemory, allocator); buffers->VertexBufferMemory = VK_NULL_HANDLE; } + if (buffers->IndexBuffer) { vkDestroyBuffer(device, buffers->IndexBuffer, allocator); buffers->IndexBuffer = VK_NULL_HANDLE; } + if (buffers->IndexBufferMemory) { vkFreeMemory(device, buffers->IndexBufferMemory, allocator); buffers->IndexBufferMemory = VK_NULL_HANDLE; } + buffers->VertexBufferSize = 0; + buffers->IndexBufferSize = 0; +} + +void ImGui_ImplVulkanH_DestroyWindowRenderBuffers(VkDevice device, ImGui_ImplVulkanH_WindowRenderBuffers* buffers, const VkAllocationCallbacks* allocator) +{ + for (uint32_t n = 0; n < buffers->Count; n++) + ImGui_ImplVulkanH_DestroyFrameRenderBuffers(device, &buffers->FrameRenderBuffers[n], allocator); + IM_FREE(buffers->FrameRenderBuffers); + buffers->FrameRenderBuffers = NULL; + buffers->Index = 0; + buffers->Count = 0; +} diff --git a/dep/vulkan-loader/CMakeLists.txt b/dep/vulkan-loader/CMakeLists.txt new file mode 100644 index 000000000..8ce9b7fed --- /dev/null +++ b/dep/vulkan-loader/CMakeLists.txt @@ -0,0 +1,32 @@ +set(SRCS + include/vulkan_loader.h + include/vulkan/vulkan_android.h + include/vulkan/vulkan_win32.h + include/vulkan/vk_layer.h + include/vulkan/vulkan.h + include/vulkan/vulkan_metal.h + include/vulkan/vulkan_core.h + include/vulkan/vulkan_fuchsia.h + include/vulkan/vulkan_xlib_xrandr.h + include/vulkan/vk_sdk_platform.h + include/vulkan/vulkan_xlib.h + include/vulkan/vulkan_ios.h + include/vulkan/vk_platform.h + include/vulkan/vulkan_ggp.h + include/vulkan/vulkan_wayland.h + include/vulkan/vulkan_xcb.h + include/vulkan/vulkan_vi.h + include/vulkan/vulkan_beta.h + include/vulkan/vk_icd.h + include/vulkan/vulkan_macos.h + src/vulkan_loader.cpp +) + +add_library(vulkan-loader ${SRCS}) +target_include_directories(vulkan-loader PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/include") +target_link_libraries(vulkan-loader PRIVATE ${CMAKE_DL_LIBS}) + +if(USE_X11) + target_compile_definitions(vulkan-loader PUBLIC "VULKAN_USE_X11=1") +endif() + diff --git a/dep/vulkan-loader/include/vulkan/vk_icd.h b/dep/vulkan-loader/include/vulkan/vk_icd.h new file mode 100644 index 000000000..5dff59a16 --- /dev/null +++ b/dep/vulkan-loader/include/vulkan/vk_icd.h @@ -0,0 +1,183 @@ +// +// File: vk_icd.h +// +/* + * Copyright (c) 2015-2016 The Khronos Group Inc. + * Copyright (c) 2015-2016 Valve Corporation + * Copyright (c) 2015-2016 LunarG, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#ifndef VKICD_H +#define VKICD_H + +#include "vulkan.h" +#include + +// Loader-ICD version negotiation API. Versions add the following features: +// Version 0 - Initial. Doesn't support vk_icdGetInstanceProcAddr +// or vk_icdNegotiateLoaderICDInterfaceVersion. +// Version 1 - Add support for vk_icdGetInstanceProcAddr. +// Version 2 - Add Loader/ICD Interface version negotiation +// via vk_icdNegotiateLoaderICDInterfaceVersion. +// Version 3 - Add ICD creation/destruction of KHR_surface objects. +// Version 4 - Add unknown physical device extension qyering via +// vk_icdGetPhysicalDeviceProcAddr. +// Version 5 - Tells ICDs that the loader is now paying attention to the +// application version of Vulkan passed into the ApplicationInfo +// structure during vkCreateInstance. This will tell the ICD +// that if the loader is older, it should automatically fail a +// call for any API version > 1.0. Otherwise, the loader will +// manually determine if it can support the expected version. +#define CURRENT_LOADER_ICD_INTERFACE_VERSION 5 +#define MIN_SUPPORTED_LOADER_ICD_INTERFACE_VERSION 0 +#define MIN_PHYS_DEV_EXTENSION_ICD_INTERFACE_VERSION 4 +typedef VkResult(VKAPI_PTR *PFN_vkNegotiateLoaderICDInterfaceVersion)(uint32_t *pVersion); + +// This is defined in vk_layer.h which will be found by the loader, but if an ICD is building against this +// file directly, it won't be found. +#ifndef PFN_GetPhysicalDeviceProcAddr +typedef PFN_vkVoidFunction(VKAPI_PTR *PFN_GetPhysicalDeviceProcAddr)(VkInstance instance, const char *pName); +#endif + +/* + * The ICD must reserve space for a pointer for the loader's dispatch + * table, at the start of . + * The ICD must initialize this variable using the SET_LOADER_MAGIC_VALUE macro. + */ + +#define ICD_LOADER_MAGIC 0x01CDC0DE + +typedef union { + uintptr_t loaderMagic; + void *loaderData; +} VK_LOADER_DATA; + +static inline void set_loader_magic_value(void *pNewObject) { + VK_LOADER_DATA *loader_info = (VK_LOADER_DATA *)pNewObject; + loader_info->loaderMagic = ICD_LOADER_MAGIC; +} + +static inline bool valid_loader_magic_value(void *pNewObject) { + const VK_LOADER_DATA *loader_info = (VK_LOADER_DATA *)pNewObject; + return (loader_info->loaderMagic & 0xffffffff) == ICD_LOADER_MAGIC; +} + +/* + * Windows and Linux ICDs will treat VkSurfaceKHR as a pointer to a struct that + * contains the platform-specific connection and surface information. + */ +typedef enum { + VK_ICD_WSI_PLATFORM_MIR, + VK_ICD_WSI_PLATFORM_WAYLAND, + VK_ICD_WSI_PLATFORM_WIN32, + VK_ICD_WSI_PLATFORM_XCB, + VK_ICD_WSI_PLATFORM_XLIB, + VK_ICD_WSI_PLATFORM_ANDROID, + VK_ICD_WSI_PLATFORM_MACOS, + VK_ICD_WSI_PLATFORM_IOS, + VK_ICD_WSI_PLATFORM_DISPLAY, + VK_ICD_WSI_PLATFORM_HEADLESS, + VK_ICD_WSI_PLATFORM_METAL, +} VkIcdWsiPlatform; + +typedef struct { + VkIcdWsiPlatform platform; +} VkIcdSurfaceBase; + +#ifdef VK_USE_PLATFORM_MIR_KHR +typedef struct { + VkIcdSurfaceBase base; + MirConnection *connection; + MirSurface *mirSurface; +} VkIcdSurfaceMir; +#endif // VK_USE_PLATFORM_MIR_KHR + +#ifdef VK_USE_PLATFORM_WAYLAND_KHR +typedef struct { + VkIcdSurfaceBase base; + struct wl_display *display; + struct wl_surface *surface; +} VkIcdSurfaceWayland; +#endif // VK_USE_PLATFORM_WAYLAND_KHR + +#ifdef VK_USE_PLATFORM_WIN32_KHR +typedef struct { + VkIcdSurfaceBase base; + HINSTANCE hinstance; + HWND hwnd; +} VkIcdSurfaceWin32; +#endif // VK_USE_PLATFORM_WIN32_KHR + +#ifdef VK_USE_PLATFORM_XCB_KHR +typedef struct { + VkIcdSurfaceBase base; + xcb_connection_t *connection; + xcb_window_t window; +} VkIcdSurfaceXcb; +#endif // VK_USE_PLATFORM_XCB_KHR + +#ifdef VK_USE_PLATFORM_XLIB_KHR +typedef struct { + VkIcdSurfaceBase base; + Display *dpy; + Window window; +} VkIcdSurfaceXlib; +#endif // VK_USE_PLATFORM_XLIB_KHR + +#ifdef VK_USE_PLATFORM_ANDROID_KHR +typedef struct { + VkIcdSurfaceBase base; + struct ANativeWindow *window; +} VkIcdSurfaceAndroid; +#endif // VK_USE_PLATFORM_ANDROID_KHR + +#ifdef VK_USE_PLATFORM_MACOS_MVK +typedef struct { + VkIcdSurfaceBase base; + const void *pView; +} VkIcdSurfaceMacOS; +#endif // VK_USE_PLATFORM_MACOS_MVK + +#ifdef VK_USE_PLATFORM_IOS_MVK +typedef struct { + VkIcdSurfaceBase base; + const void *pView; +} VkIcdSurfaceIOS; +#endif // VK_USE_PLATFORM_IOS_MVK + +typedef struct { + VkIcdSurfaceBase base; + VkDisplayModeKHR displayMode; + uint32_t planeIndex; + uint32_t planeStackIndex; + VkSurfaceTransformFlagBitsKHR transform; + float globalAlpha; + VkDisplayPlaneAlphaFlagBitsKHR alphaMode; + VkExtent2D imageExtent; +} VkIcdSurfaceDisplay; + +typedef struct { + VkIcdSurfaceBase base; +} VkIcdSurfaceHeadless; + +#ifdef VK_USE_PLATFORM_METAL_EXT +typedef struct { + VkIcdSurfaceBase base; + const CAMetalLayer *pLayer; +} VkIcdSurfaceMetal; +#endif // VK_USE_PLATFORM_METAL_EXT + +#endif // VKICD_H diff --git a/dep/vulkan-loader/include/vulkan/vk_layer.h b/dep/vulkan-loader/include/vulkan/vk_layer.h new file mode 100644 index 000000000..fa7652008 --- /dev/null +++ b/dep/vulkan-loader/include/vulkan/vk_layer.h @@ -0,0 +1,202 @@ +// +// File: vk_layer.h +// +/* + * Copyright (c) 2015-2017 The Khronos Group Inc. + * Copyright (c) 2015-2017 Valve Corporation + * Copyright (c) 2015-2017 LunarG, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +/* Need to define dispatch table + * Core struct can then have ptr to dispatch table at the top + * Along with object ptrs for current and next OBJ + */ +#pragma once + +#include "vulkan.h" +#if defined(__GNUC__) && __GNUC__ >= 4 +#define VK_LAYER_EXPORT __attribute__((visibility("default"))) +#elif defined(__SUNPRO_C) && (__SUNPRO_C >= 0x590) +#define VK_LAYER_EXPORT __attribute__((visibility("default"))) +#else +#define VK_LAYER_EXPORT +#endif + +#define MAX_NUM_UNKNOWN_EXTS 250 + + // Loader-Layer version negotiation API. Versions add the following features: + // Versions 0/1 - Initial. Doesn't support vk_layerGetPhysicalDeviceProcAddr + // or vk_icdNegotiateLoaderLayerInterfaceVersion. + // Version 2 - Add support for vk_layerGetPhysicalDeviceProcAddr and + // vk_icdNegotiateLoaderLayerInterfaceVersion. +#define CURRENT_LOADER_LAYER_INTERFACE_VERSION 2 +#define MIN_SUPPORTED_LOADER_LAYER_INTERFACE_VERSION 1 + +#define VK_CURRENT_CHAIN_VERSION 1 + +// Typedef for use in the interfaces below +typedef PFN_vkVoidFunction (VKAPI_PTR *PFN_GetPhysicalDeviceProcAddr)(VkInstance instance, const char* pName); + +// Version negotiation values +typedef enum VkNegotiateLayerStructType { + LAYER_NEGOTIATE_UNINTIALIZED = 0, + LAYER_NEGOTIATE_INTERFACE_STRUCT = 1, +} VkNegotiateLayerStructType; + +// Version negotiation structures +typedef struct VkNegotiateLayerInterface { + VkNegotiateLayerStructType sType; + void *pNext; + uint32_t loaderLayerInterfaceVersion; + PFN_vkGetInstanceProcAddr pfnGetInstanceProcAddr; + PFN_vkGetDeviceProcAddr pfnGetDeviceProcAddr; + PFN_GetPhysicalDeviceProcAddr pfnGetPhysicalDeviceProcAddr; +} VkNegotiateLayerInterface; + +// Version negotiation functions +typedef VkResult (VKAPI_PTR *PFN_vkNegotiateLoaderLayerInterfaceVersion)(VkNegotiateLayerInterface *pVersionStruct); + +// Function prototype for unknown physical device extension command +typedef VkResult(VKAPI_PTR *PFN_PhysDevExt)(VkPhysicalDevice phys_device); + +// ------------------------------------------------------------------------------------------------ +// CreateInstance and CreateDevice support structures + +/* Sub type of structure for instance and device loader ext of CreateInfo. + * When sType == VK_STRUCTURE_TYPE_LOADER_INSTANCE_CREATE_INFO + * or sType == VK_STRUCTURE_TYPE_LOADER_DEVICE_CREATE_INFO + * then VkLayerFunction indicates struct type pointed to by pNext + */ +typedef enum VkLayerFunction_ { + VK_LAYER_LINK_INFO = 0, + VK_LOADER_DATA_CALLBACK = 1, + VK_LOADER_LAYER_CREATE_DEVICE_CALLBACK = 2 +} VkLayerFunction; + +typedef struct VkLayerInstanceLink_ { + struct VkLayerInstanceLink_ *pNext; + PFN_vkGetInstanceProcAddr pfnNextGetInstanceProcAddr; + PFN_GetPhysicalDeviceProcAddr pfnNextGetPhysicalDeviceProcAddr; +} VkLayerInstanceLink; + +/* + * When creating the device chain the loader needs to pass + * down information about it's device structure needed at + * the end of the chain. Passing the data via the + * VkLayerDeviceInfo avoids issues with finding the + * exact instance being used. + */ +typedef struct VkLayerDeviceInfo_ { + void *device_info; + PFN_vkGetInstanceProcAddr pfnNextGetInstanceProcAddr; +} VkLayerDeviceInfo; + +typedef VkResult (VKAPI_PTR *PFN_vkSetInstanceLoaderData)(VkInstance instance, + void *object); +typedef VkResult (VKAPI_PTR *PFN_vkSetDeviceLoaderData)(VkDevice device, + void *object); +typedef VkResult (VKAPI_PTR *PFN_vkLayerCreateDevice)(VkInstance instance, VkPhysicalDevice physicalDevice, const VkDeviceCreateInfo *pCreateInfo, + const VkAllocationCallbacks *pAllocator, VkDevice *pDevice, PFN_vkGetInstanceProcAddr layerGIPA, PFN_vkGetDeviceProcAddr *nextGDPA); +typedef void (VKAPI_PTR *PFN_vkLayerDestroyDevice)(VkDevice physicalDevice, const VkAllocationCallbacks *pAllocator, PFN_vkDestroyDevice destroyFunction); +typedef struct { + VkStructureType sType; // VK_STRUCTURE_TYPE_LOADER_INSTANCE_CREATE_INFO + const void *pNext; + VkLayerFunction function; + union { + VkLayerInstanceLink *pLayerInfo; + PFN_vkSetInstanceLoaderData pfnSetInstanceLoaderData; + struct { + PFN_vkLayerCreateDevice pfnLayerCreateDevice; + PFN_vkLayerDestroyDevice pfnLayerDestroyDevice; + } layerDevice; + } u; +} VkLayerInstanceCreateInfo; + +typedef struct VkLayerDeviceLink_ { + struct VkLayerDeviceLink_ *pNext; + PFN_vkGetInstanceProcAddr pfnNextGetInstanceProcAddr; + PFN_vkGetDeviceProcAddr pfnNextGetDeviceProcAddr; +} VkLayerDeviceLink; + +typedef struct { + VkStructureType sType; // VK_STRUCTURE_TYPE_LOADER_DEVICE_CREATE_INFO + const void *pNext; + VkLayerFunction function; + union { + VkLayerDeviceLink *pLayerInfo; + PFN_vkSetDeviceLoaderData pfnSetDeviceLoaderData; + } u; +} VkLayerDeviceCreateInfo; + +#ifdef __cplusplus +extern "C" { +#endif + +VKAPI_ATTR VkResult VKAPI_CALL vkNegotiateLoaderLayerInterfaceVersion(VkNegotiateLayerInterface *pVersionStruct); + +typedef enum VkChainType { + VK_CHAIN_TYPE_UNKNOWN = 0, + VK_CHAIN_TYPE_ENUMERATE_INSTANCE_EXTENSION_PROPERTIES = 1, + VK_CHAIN_TYPE_ENUMERATE_INSTANCE_LAYER_PROPERTIES = 2, + VK_CHAIN_TYPE_ENUMERATE_INSTANCE_VERSION = 3, +} VkChainType; + +typedef struct VkChainHeader { + VkChainType type; + uint32_t version; + uint32_t size; +} VkChainHeader; + +typedef struct VkEnumerateInstanceExtensionPropertiesChain { + VkChainHeader header; + VkResult(VKAPI_PTR *pfnNextLayer)(const struct VkEnumerateInstanceExtensionPropertiesChain *, const char *, uint32_t *, + VkExtensionProperties *); + const struct VkEnumerateInstanceExtensionPropertiesChain *pNextLink; + +#if defined(__cplusplus) + inline VkResult CallDown(const char *pLayerName, uint32_t *pPropertyCount, VkExtensionProperties *pProperties) const { + return pfnNextLayer(pNextLink, pLayerName, pPropertyCount, pProperties); + } +#endif +} VkEnumerateInstanceExtensionPropertiesChain; + +typedef struct VkEnumerateInstanceLayerPropertiesChain { + VkChainHeader header; + VkResult(VKAPI_PTR *pfnNextLayer)(const struct VkEnumerateInstanceLayerPropertiesChain *, uint32_t *, VkLayerProperties *); + const struct VkEnumerateInstanceLayerPropertiesChain *pNextLink; + +#if defined(__cplusplus) + inline VkResult CallDown(uint32_t *pPropertyCount, VkLayerProperties *pProperties) const { + return pfnNextLayer(pNextLink, pPropertyCount, pProperties); + } +#endif +} VkEnumerateInstanceLayerPropertiesChain; + +typedef struct VkEnumerateInstanceVersionChain { + VkChainHeader header; + VkResult(VKAPI_PTR *pfnNextLayer)(const struct VkEnumerateInstanceVersionChain *, uint32_t *); + const struct VkEnumerateInstanceVersionChain *pNextLink; + +#if defined(__cplusplus) + inline VkResult CallDown(uint32_t *pApiVersion) const { + return pfnNextLayer(pNextLink, pApiVersion); + } +#endif +} VkEnumerateInstanceVersionChain; + +#ifdef __cplusplus +} +#endif diff --git a/dep/vulkan-loader/include/vulkan/vk_platform.h b/dep/vulkan-loader/include/vulkan/vk_platform.h new file mode 100644 index 000000000..dbb011285 --- /dev/null +++ b/dep/vulkan-loader/include/vulkan/vk_platform.h @@ -0,0 +1,92 @@ +// +// File: vk_platform.h +// +/* +** Copyright (c) 2014-2020 The Khronos Group Inc. +** +** Licensed under the Apache License, Version 2.0 (the "License"); +** you may not use this file except in compliance with the License. +** You may obtain a copy of the License at +** +** http://www.apache.org/licenses/LICENSE-2.0 +** +** Unless required by applicable law or agreed to in writing, software +** distributed under the License is distributed on an "AS IS" BASIS, +** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +** See the License for the specific language governing permissions and +** limitations under the License. +*/ + + +#ifndef VK_PLATFORM_H_ +#define VK_PLATFORM_H_ + +#ifdef __cplusplus +extern "C" +{ +#endif // __cplusplus + +/* +*************************************************************************************************** +* Platform-specific directives and type declarations +*************************************************************************************************** +*/ + +/* Platform-specific calling convention macros. + * + * Platforms should define these so that Vulkan clients call Vulkan commands + * with the same calling conventions that the Vulkan implementation expects. + * + * VKAPI_ATTR - Placed before the return type in function declarations. + * Useful for C++11 and GCC/Clang-style function attribute syntax. + * VKAPI_CALL - Placed after the return type in function declarations. + * Useful for MSVC-style calling convention syntax. + * VKAPI_PTR - Placed between the '(' and '*' in function pointer types. + * + * Function declaration: VKAPI_ATTR void VKAPI_CALL vkCommand(void); + * Function pointer type: typedef void (VKAPI_PTR *PFN_vkCommand)(void); + */ +#if defined(_WIN32) + // On Windows, Vulkan commands use the stdcall convention + #define VKAPI_ATTR + #define VKAPI_CALL __stdcall + #define VKAPI_PTR VKAPI_CALL +#elif defined(__ANDROID__) && defined(__ARM_ARCH) && __ARM_ARCH < 7 + #error "Vulkan isn't supported for the 'armeabi' NDK ABI" +#elif defined(__ANDROID__) && defined(__ARM_ARCH) && __ARM_ARCH >= 7 && defined(__ARM_32BIT_STATE) + // On Android 32-bit ARM targets, Vulkan functions use the "hardfloat" + // calling convention, i.e. float parameters are passed in registers. This + // is true even if the rest of the application passes floats on the stack, + // as it does by default when compiling for the armeabi-v7a NDK ABI. + #define VKAPI_ATTR __attribute__((pcs("aapcs-vfp"))) + #define VKAPI_CALL + #define VKAPI_PTR VKAPI_ATTR +#else + // On other platforms, use the default calling convention + #define VKAPI_ATTR + #define VKAPI_CALL + #define VKAPI_PTR +#endif + +#include + +#if !defined(VK_NO_STDINT_H) + #if defined(_MSC_VER) && (_MSC_VER < 1600) + typedef signed __int8 int8_t; + typedef unsigned __int8 uint8_t; + typedef signed __int16 int16_t; + typedef unsigned __int16 uint16_t; + typedef signed __int32 int32_t; + typedef unsigned __int32 uint32_t; + typedef signed __int64 int64_t; + typedef unsigned __int64 uint64_t; + #else + #include + #endif +#endif // !defined(VK_NO_STDINT_H) + +#ifdef __cplusplus +} // extern "C" +#endif // __cplusplus + +#endif diff --git a/dep/vulkan-loader/include/vulkan/vk_sdk_platform.h b/dep/vulkan-loader/include/vulkan/vk_sdk_platform.h new file mode 100644 index 000000000..96d867694 --- /dev/null +++ b/dep/vulkan-loader/include/vulkan/vk_sdk_platform.h @@ -0,0 +1,69 @@ +// +// File: vk_sdk_platform.h +// +/* + * Copyright (c) 2015-2016 The Khronos Group Inc. + * Copyright (c) 2015-2016 Valve Corporation + * Copyright (c) 2015-2016 LunarG, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef VK_SDK_PLATFORM_H +#define VK_SDK_PLATFORM_H + +#if defined(_WIN32) +#define NOMINMAX +#ifndef __cplusplus +#undef inline +#define inline __inline +#endif // __cplusplus + +#if (defined(_MSC_VER) && _MSC_VER < 1900 /*vs2015*/) +// C99: +// Microsoft didn't implement C99 in Visual Studio; but started adding it with +// VS2013. However, VS2013 still didn't have snprintf(). The following is a +// work-around (Note: The _CRT_SECURE_NO_WARNINGS macro must be set in the +// "CMakeLists.txt" file). +// NOTE: This is fixed in Visual Studio 2015. +#define snprintf _snprintf +#endif + +#define strdup _strdup + +#endif // _WIN32 + +// Check for noexcept support using clang, with fallback to Windows or GCC version numbers +#ifndef NOEXCEPT +#if defined(__clang__) +#if __has_feature(cxx_noexcept) +#define HAS_NOEXCEPT +#endif +#else +#if defined(__GXX_EXPERIMENTAL_CXX0X__) && __GNUC__ * 10 + __GNUC_MINOR__ >= 46 +#define HAS_NOEXCEPT +#else +#if defined(_MSC_FULL_VER) && _MSC_FULL_VER >= 190023026 && defined(_HAS_EXCEPTIONS) && _HAS_EXCEPTIONS +#define HAS_NOEXCEPT +#endif +#endif +#endif + +#ifdef HAS_NOEXCEPT +#define NOEXCEPT noexcept +#else +#define NOEXCEPT +#endif +#endif + +#endif // VK_SDK_PLATFORM_H diff --git a/dep/vulkan-loader/include/vulkan/vulkan.h b/dep/vulkan-loader/include/vulkan/vulkan.h new file mode 100644 index 000000000..20ecd1016 --- /dev/null +++ b/dep/vulkan-loader/include/vulkan/vulkan.h @@ -0,0 +1,91 @@ +#ifndef VULKAN_H_ +#define VULKAN_H_ 1 + +/* +** Copyright (c) 2015-2020 The Khronos Group Inc. +** +** Licensed under the Apache License, Version 2.0 (the "License"); +** you may not use this file except in compliance with the License. +** You may obtain a copy of the License at +** +** http://www.apache.org/licenses/LICENSE-2.0 +** +** Unless required by applicable law or agreed to in writing, software +** distributed under the License is distributed on an "AS IS" BASIS, +** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +** See the License for the specific language governing permissions and +** limitations under the License. +*/ + +#include "vk_platform.h" +#include "vulkan_core.h" + +#ifdef VK_USE_PLATFORM_ANDROID_KHR +#include "vulkan_android.h" +#endif + +#ifdef VK_USE_PLATFORM_FUCHSIA +#include +#include "vulkan_fuchsia.h" +#endif + +#ifdef VK_USE_PLATFORM_IOS_MVK +#include "vulkan_ios.h" +#endif + + +#ifdef VK_USE_PLATFORM_MACOS_MVK +#include "vulkan_macos.h" +#endif + +#ifdef VK_USE_PLATFORM_METAL_EXT +#include "vulkan_metal.h" +#endif + +#ifdef VK_USE_PLATFORM_VI_NN +#include "vulkan_vi.h" +#endif + + +#ifdef VK_USE_PLATFORM_WAYLAND_KHR +#include +#include "vulkan_wayland.h" +#endif + + +#ifdef VK_USE_PLATFORM_WIN32_KHR +#include +#include "vulkan_win32.h" +#endif + + +#ifdef VK_USE_PLATFORM_XCB_KHR +#include +#include "vulkan_xcb.h" +#endif + + +#ifdef VK_USE_PLATFORM_XLIB_KHR +#include +#include "vulkan_xlib.h" +#endif + + +#ifdef VK_USE_PLATFORM_XLIB_XRANDR_EXT +#include +#include +#include "vulkan_xlib_xrandr.h" +#endif + + +#ifdef VK_USE_PLATFORM_GGP +#include +#include "vulkan_ggp.h" +#endif + + +#ifdef VK_ENABLE_BETA_EXTENSIONS +#include "vulkan_beta.h" +#endif + +#endif // VULKAN_H_ diff --git a/dep/vulkan-loader/include/vulkan/vulkan_android.h b/dep/vulkan-loader/include/vulkan/vulkan_android.h new file mode 100644 index 000000000..4f27750cd --- /dev/null +++ b/dep/vulkan-loader/include/vulkan/vulkan_android.h @@ -0,0 +1,122 @@ +#ifndef VULKAN_ANDROID_H_ +#define VULKAN_ANDROID_H_ 1 + +/* +** Copyright (c) 2015-2020 The Khronos Group Inc. +** +** Licensed under the Apache License, Version 2.0 (the "License"); +** you may not use this file except in compliance with the License. +** You may obtain a copy of the License at +** +** http://www.apache.org/licenses/LICENSE-2.0 +** +** Unless required by applicable law or agreed to in writing, software +** distributed under the License is distributed on an "AS IS" BASIS, +** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +** See the License for the specific language governing permissions and +** limitations under the License. +*/ + +/* +** This header is generated from the Khronos Vulkan XML API Registry. +** +*/ + + +#ifdef __cplusplus +extern "C" { +#endif + + + +#define VK_KHR_android_surface 1 +struct ANativeWindow; +#define VK_KHR_ANDROID_SURFACE_SPEC_VERSION 6 +#define VK_KHR_ANDROID_SURFACE_EXTENSION_NAME "VK_KHR_android_surface" +typedef VkFlags VkAndroidSurfaceCreateFlagsKHR; +typedef struct VkAndroidSurfaceCreateInfoKHR { + VkStructureType sType; + const void* pNext; + VkAndroidSurfaceCreateFlagsKHR flags; + struct ANativeWindow* window; +} VkAndroidSurfaceCreateInfoKHR; + +typedef VkResult (VKAPI_PTR *PFN_vkCreateAndroidSurfaceKHR)(VkInstance instance, const VkAndroidSurfaceCreateInfoKHR* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkCreateAndroidSurfaceKHR( + VkInstance instance, + const VkAndroidSurfaceCreateInfoKHR* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkSurfaceKHR* pSurface); +#endif + + +#define VK_ANDROID_external_memory_android_hardware_buffer 1 +struct AHardwareBuffer; +#define VK_ANDROID_EXTERNAL_MEMORY_ANDROID_HARDWARE_BUFFER_SPEC_VERSION 3 +#define VK_ANDROID_EXTERNAL_MEMORY_ANDROID_HARDWARE_BUFFER_EXTENSION_NAME "VK_ANDROID_external_memory_android_hardware_buffer" +typedef struct VkAndroidHardwareBufferUsageANDROID { + VkStructureType sType; + void* pNext; + uint64_t androidHardwareBufferUsage; +} VkAndroidHardwareBufferUsageANDROID; + +typedef struct VkAndroidHardwareBufferPropertiesANDROID { + VkStructureType sType; + void* pNext; + VkDeviceSize allocationSize; + uint32_t memoryTypeBits; +} VkAndroidHardwareBufferPropertiesANDROID; + +typedef struct VkAndroidHardwareBufferFormatPropertiesANDROID { + VkStructureType sType; + void* pNext; + VkFormat format; + uint64_t externalFormat; + VkFormatFeatureFlags formatFeatures; + VkComponentMapping samplerYcbcrConversionComponents; + VkSamplerYcbcrModelConversion suggestedYcbcrModel; + VkSamplerYcbcrRange suggestedYcbcrRange; + VkChromaLocation suggestedXChromaOffset; + VkChromaLocation suggestedYChromaOffset; +} VkAndroidHardwareBufferFormatPropertiesANDROID; + +typedef struct VkImportAndroidHardwareBufferInfoANDROID { + VkStructureType sType; + const void* pNext; + struct AHardwareBuffer* buffer; +} VkImportAndroidHardwareBufferInfoANDROID; + +typedef struct VkMemoryGetAndroidHardwareBufferInfoANDROID { + VkStructureType sType; + const void* pNext; + VkDeviceMemory memory; +} VkMemoryGetAndroidHardwareBufferInfoANDROID; + +typedef struct VkExternalFormatANDROID { + VkStructureType sType; + void* pNext; + uint64_t externalFormat; +} VkExternalFormatANDROID; + +typedef VkResult (VKAPI_PTR *PFN_vkGetAndroidHardwareBufferPropertiesANDROID)(VkDevice device, const struct AHardwareBuffer* buffer, VkAndroidHardwareBufferPropertiesANDROID* pProperties); +typedef VkResult (VKAPI_PTR *PFN_vkGetMemoryAndroidHardwareBufferANDROID)(VkDevice device, const VkMemoryGetAndroidHardwareBufferInfoANDROID* pInfo, struct AHardwareBuffer** pBuffer); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetAndroidHardwareBufferPropertiesANDROID( + VkDevice device, + const struct AHardwareBuffer* buffer, + VkAndroidHardwareBufferPropertiesANDROID* pProperties); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetMemoryAndroidHardwareBufferANDROID( + VkDevice device, + const VkMemoryGetAndroidHardwareBufferInfoANDROID* pInfo, + struct AHardwareBuffer** pBuffer); +#endif + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/dep/vulkan-loader/include/vulkan/vulkan_beta.h b/dep/vulkan-loader/include/vulkan/vulkan_beta.h new file mode 100644 index 000000000..7380e39fe --- /dev/null +++ b/dep/vulkan-loader/include/vulkan/vulkan_beta.h @@ -0,0 +1,438 @@ +#ifndef VULKAN_BETA_H_ +#define VULKAN_BETA_H_ 1 + +/* +** Copyright (c) 2015-2020 The Khronos Group Inc. +** +** Licensed under the Apache License, Version 2.0 (the "License"); +** you may not use this file except in compliance with the License. +** You may obtain a copy of the License at +** +** http://www.apache.org/licenses/LICENSE-2.0 +** +** Unless required by applicable law or agreed to in writing, software +** distributed under the License is distributed on an "AS IS" BASIS, +** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +** See the License for the specific language governing permissions and +** limitations under the License. +*/ + +/* +** This header is generated from the Khronos Vulkan XML API Registry. +** +*/ + + +#ifdef __cplusplus +extern "C" { +#endif + + + +#define VK_KHR_deferred_host_operations 1 +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkDeferredOperationKHR) +#define VK_KHR_DEFERRED_HOST_OPERATIONS_SPEC_VERSION 2 +#define VK_KHR_DEFERRED_HOST_OPERATIONS_EXTENSION_NAME "VK_KHR_deferred_host_operations" +typedef struct VkDeferredOperationInfoKHR { + VkStructureType sType; + const void* pNext; + VkDeferredOperationKHR operationHandle; +} VkDeferredOperationInfoKHR; + +typedef VkResult (VKAPI_PTR *PFN_vkCreateDeferredOperationKHR)(VkDevice device, const VkAllocationCallbacks* pAllocator, VkDeferredOperationKHR* pDeferredOperation); +typedef void (VKAPI_PTR *PFN_vkDestroyDeferredOperationKHR)(VkDevice device, VkDeferredOperationKHR operation, const VkAllocationCallbacks* pAllocator); +typedef uint32_t (VKAPI_PTR *PFN_vkGetDeferredOperationMaxConcurrencyKHR)(VkDevice device, VkDeferredOperationKHR operation); +typedef VkResult (VKAPI_PTR *PFN_vkGetDeferredOperationResultKHR)(VkDevice device, VkDeferredOperationKHR operation); +typedef VkResult (VKAPI_PTR *PFN_vkDeferredOperationJoinKHR)(VkDevice device, VkDeferredOperationKHR operation); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkCreateDeferredOperationKHR( + VkDevice device, + const VkAllocationCallbacks* pAllocator, + VkDeferredOperationKHR* pDeferredOperation); + +VKAPI_ATTR void VKAPI_CALL vkDestroyDeferredOperationKHR( + VkDevice device, + VkDeferredOperationKHR operation, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR uint32_t VKAPI_CALL vkGetDeferredOperationMaxConcurrencyKHR( + VkDevice device, + VkDeferredOperationKHR operation); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetDeferredOperationResultKHR( + VkDevice device, + VkDeferredOperationKHR operation); + +VKAPI_ATTR VkResult VKAPI_CALL vkDeferredOperationJoinKHR( + VkDevice device, + VkDeferredOperationKHR operation); +#endif + + +#define VK_KHR_pipeline_library 1 +#define VK_KHR_PIPELINE_LIBRARY_SPEC_VERSION 1 +#define VK_KHR_PIPELINE_LIBRARY_EXTENSION_NAME "VK_KHR_pipeline_library" +typedef struct VkPipelineLibraryCreateInfoKHR { + VkStructureType sType; + const void* pNext; + uint32_t libraryCount; + const VkPipeline* pLibraries; +} VkPipelineLibraryCreateInfoKHR; + + + +#define VK_KHR_ray_tracing 1 +#define VK_KHR_RAY_TRACING_SPEC_VERSION 8 +#define VK_KHR_RAY_TRACING_EXTENSION_NAME "VK_KHR_ray_tracing" + +typedef enum VkAccelerationStructureBuildTypeKHR { + VK_ACCELERATION_STRUCTURE_BUILD_TYPE_HOST_KHR = 0, + VK_ACCELERATION_STRUCTURE_BUILD_TYPE_DEVICE_KHR = 1, + VK_ACCELERATION_STRUCTURE_BUILD_TYPE_HOST_OR_DEVICE_KHR = 2, + VK_ACCELERATION_STRUCTURE_BUILD_TYPE_MAX_ENUM_KHR = 0x7FFFFFFF +} VkAccelerationStructureBuildTypeKHR; +typedef union VkDeviceOrHostAddressKHR { + VkDeviceAddress deviceAddress; + void* hostAddress; +} VkDeviceOrHostAddressKHR; + +typedef union VkDeviceOrHostAddressConstKHR { + VkDeviceAddress deviceAddress; + const void* hostAddress; +} VkDeviceOrHostAddressConstKHR; + +typedef struct VkAccelerationStructureBuildOffsetInfoKHR { + uint32_t primitiveCount; + uint32_t primitiveOffset; + uint32_t firstVertex; + uint32_t transformOffset; +} VkAccelerationStructureBuildOffsetInfoKHR; + +typedef struct VkRayTracingShaderGroupCreateInfoKHR { + VkStructureType sType; + const void* pNext; + VkRayTracingShaderGroupTypeKHR type; + uint32_t generalShader; + uint32_t closestHitShader; + uint32_t anyHitShader; + uint32_t intersectionShader; + const void* pShaderGroupCaptureReplayHandle; +} VkRayTracingShaderGroupCreateInfoKHR; + +typedef struct VkRayTracingPipelineInterfaceCreateInfoKHR { + VkStructureType sType; + const void* pNext; + uint32_t maxPayloadSize; + uint32_t maxAttributeSize; + uint32_t maxCallableSize; +} VkRayTracingPipelineInterfaceCreateInfoKHR; + +typedef struct VkRayTracingPipelineCreateInfoKHR { + VkStructureType sType; + const void* pNext; + VkPipelineCreateFlags flags; + uint32_t stageCount; + const VkPipelineShaderStageCreateInfo* pStages; + uint32_t groupCount; + const VkRayTracingShaderGroupCreateInfoKHR* pGroups; + uint32_t maxRecursionDepth; + VkPipelineLibraryCreateInfoKHR libraries; + const VkRayTracingPipelineInterfaceCreateInfoKHR* pLibraryInterface; + VkPipelineLayout layout; + VkPipeline basePipelineHandle; + int32_t basePipelineIndex; +} VkRayTracingPipelineCreateInfoKHR; + +typedef struct VkAccelerationStructureGeometryTrianglesDataKHR { + VkStructureType sType; + const void* pNext; + VkFormat vertexFormat; + VkDeviceOrHostAddressConstKHR vertexData; + VkDeviceSize vertexStride; + VkIndexType indexType; + VkDeviceOrHostAddressConstKHR indexData; + VkDeviceOrHostAddressConstKHR transformData; +} VkAccelerationStructureGeometryTrianglesDataKHR; + +typedef struct VkAccelerationStructureGeometryAabbsDataKHR { + VkStructureType sType; + const void* pNext; + VkDeviceOrHostAddressConstKHR data; + VkDeviceSize stride; +} VkAccelerationStructureGeometryAabbsDataKHR; + +typedef struct VkAccelerationStructureGeometryInstancesDataKHR { + VkStructureType sType; + const void* pNext; + VkBool32 arrayOfPointers; + VkDeviceOrHostAddressConstKHR data; +} VkAccelerationStructureGeometryInstancesDataKHR; + +typedef union VkAccelerationStructureGeometryDataKHR { + VkAccelerationStructureGeometryTrianglesDataKHR triangles; + VkAccelerationStructureGeometryAabbsDataKHR aabbs; + VkAccelerationStructureGeometryInstancesDataKHR instances; +} VkAccelerationStructureGeometryDataKHR; + +typedef struct VkAccelerationStructureGeometryKHR { + VkStructureType sType; + const void* pNext; + VkGeometryTypeKHR geometryType; + VkAccelerationStructureGeometryDataKHR geometry; + VkGeometryFlagsKHR flags; +} VkAccelerationStructureGeometryKHR; + +typedef struct VkAccelerationStructureBuildGeometryInfoKHR { + VkStructureType sType; + const void* pNext; + VkAccelerationStructureTypeKHR type; + VkBuildAccelerationStructureFlagsKHR flags; + VkBool32 update; + VkAccelerationStructureKHR srcAccelerationStructure; + VkAccelerationStructureKHR dstAccelerationStructure; + VkBool32 geometryArrayOfPointers; + uint32_t geometryCount; + const VkAccelerationStructureGeometryKHR* const* ppGeometries; + VkDeviceOrHostAddressKHR scratchData; +} VkAccelerationStructureBuildGeometryInfoKHR; + +typedef struct VkAccelerationStructureCreateGeometryTypeInfoKHR { + VkStructureType sType; + const void* pNext; + VkGeometryTypeKHR geometryType; + uint32_t maxPrimitiveCount; + VkIndexType indexType; + uint32_t maxVertexCount; + VkFormat vertexFormat; + VkBool32 allowsTransforms; +} VkAccelerationStructureCreateGeometryTypeInfoKHR; + +typedef struct VkAccelerationStructureCreateInfoKHR { + VkStructureType sType; + const void* pNext; + VkDeviceSize compactedSize; + VkAccelerationStructureTypeKHR type; + VkBuildAccelerationStructureFlagsKHR flags; + uint32_t maxGeometryCount; + const VkAccelerationStructureCreateGeometryTypeInfoKHR* pGeometryInfos; + VkDeviceAddress deviceAddress; +} VkAccelerationStructureCreateInfoKHR; + +typedef struct VkAccelerationStructureMemoryRequirementsInfoKHR { + VkStructureType sType; + const void* pNext; + VkAccelerationStructureMemoryRequirementsTypeKHR type; + VkAccelerationStructureBuildTypeKHR buildType; + VkAccelerationStructureKHR accelerationStructure; +} VkAccelerationStructureMemoryRequirementsInfoKHR; + +typedef struct VkPhysicalDeviceRayTracingFeaturesKHR { + VkStructureType sType; + void* pNext; + VkBool32 rayTracing; + VkBool32 rayTracingShaderGroupHandleCaptureReplay; + VkBool32 rayTracingShaderGroupHandleCaptureReplayMixed; + VkBool32 rayTracingAccelerationStructureCaptureReplay; + VkBool32 rayTracingIndirectTraceRays; + VkBool32 rayTracingIndirectAccelerationStructureBuild; + VkBool32 rayTracingHostAccelerationStructureCommands; + VkBool32 rayQuery; + VkBool32 rayTracingPrimitiveCulling; +} VkPhysicalDeviceRayTracingFeaturesKHR; + +typedef struct VkPhysicalDeviceRayTracingPropertiesKHR { + VkStructureType sType; + void* pNext; + uint32_t shaderGroupHandleSize; + uint32_t maxRecursionDepth; + uint32_t maxShaderGroupStride; + uint32_t shaderGroupBaseAlignment; + uint64_t maxGeometryCount; + uint64_t maxInstanceCount; + uint64_t maxPrimitiveCount; + uint32_t maxDescriptorSetAccelerationStructures; + uint32_t shaderGroupHandleCaptureReplaySize; +} VkPhysicalDeviceRayTracingPropertiesKHR; + +typedef struct VkAccelerationStructureDeviceAddressInfoKHR { + VkStructureType sType; + const void* pNext; + VkAccelerationStructureKHR accelerationStructure; +} VkAccelerationStructureDeviceAddressInfoKHR; + +typedef struct VkAccelerationStructureVersionKHR { + VkStructureType sType; + const void* pNext; + const uint8_t* versionData; +} VkAccelerationStructureVersionKHR; + +typedef struct VkStridedBufferRegionKHR { + VkBuffer buffer; + VkDeviceSize offset; + VkDeviceSize stride; + VkDeviceSize size; +} VkStridedBufferRegionKHR; + +typedef struct VkTraceRaysIndirectCommandKHR { + uint32_t width; + uint32_t height; + uint32_t depth; +} VkTraceRaysIndirectCommandKHR; + +typedef struct VkCopyAccelerationStructureToMemoryInfoKHR { + VkStructureType sType; + const void* pNext; + VkAccelerationStructureKHR src; + VkDeviceOrHostAddressKHR dst; + VkCopyAccelerationStructureModeKHR mode; +} VkCopyAccelerationStructureToMemoryInfoKHR; + +typedef struct VkCopyMemoryToAccelerationStructureInfoKHR { + VkStructureType sType; + const void* pNext; + VkDeviceOrHostAddressConstKHR src; + VkAccelerationStructureKHR dst; + VkCopyAccelerationStructureModeKHR mode; +} VkCopyMemoryToAccelerationStructureInfoKHR; + +typedef struct VkCopyAccelerationStructureInfoKHR { + VkStructureType sType; + const void* pNext; + VkAccelerationStructureKHR src; + VkAccelerationStructureKHR dst; + VkCopyAccelerationStructureModeKHR mode; +} VkCopyAccelerationStructureInfoKHR; + +typedef VkResult (VKAPI_PTR *PFN_vkCreateAccelerationStructureKHR)(VkDevice device, const VkAccelerationStructureCreateInfoKHR* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkAccelerationStructureKHR* pAccelerationStructure); +typedef void (VKAPI_PTR *PFN_vkGetAccelerationStructureMemoryRequirementsKHR)(VkDevice device, const VkAccelerationStructureMemoryRequirementsInfoKHR* pInfo, VkMemoryRequirements2* pMemoryRequirements); +typedef void (VKAPI_PTR *PFN_vkCmdBuildAccelerationStructureKHR)(VkCommandBuffer commandBuffer, uint32_t infoCount, const VkAccelerationStructureBuildGeometryInfoKHR* pInfos, const VkAccelerationStructureBuildOffsetInfoKHR* const* ppOffsetInfos); +typedef void (VKAPI_PTR *PFN_vkCmdBuildAccelerationStructureIndirectKHR)(VkCommandBuffer commandBuffer, const VkAccelerationStructureBuildGeometryInfoKHR* pInfo, VkBuffer indirectBuffer, VkDeviceSize indirectOffset, uint32_t indirectStride); +typedef VkResult (VKAPI_PTR *PFN_vkBuildAccelerationStructureKHR)(VkDevice device, uint32_t infoCount, const VkAccelerationStructureBuildGeometryInfoKHR* pInfos, const VkAccelerationStructureBuildOffsetInfoKHR* const* ppOffsetInfos); +typedef VkResult (VKAPI_PTR *PFN_vkCopyAccelerationStructureKHR)(VkDevice device, const VkCopyAccelerationStructureInfoKHR* pInfo); +typedef VkResult (VKAPI_PTR *PFN_vkCopyAccelerationStructureToMemoryKHR)(VkDevice device, const VkCopyAccelerationStructureToMemoryInfoKHR* pInfo); +typedef VkResult (VKAPI_PTR *PFN_vkCopyMemoryToAccelerationStructureKHR)(VkDevice device, const VkCopyMemoryToAccelerationStructureInfoKHR* pInfo); +typedef VkResult (VKAPI_PTR *PFN_vkWriteAccelerationStructuresPropertiesKHR)(VkDevice device, uint32_t accelerationStructureCount, const VkAccelerationStructureKHR* pAccelerationStructures, VkQueryType queryType, size_t dataSize, void* pData, size_t stride); +typedef void (VKAPI_PTR *PFN_vkCmdCopyAccelerationStructureKHR)(VkCommandBuffer commandBuffer, const VkCopyAccelerationStructureInfoKHR* pInfo); +typedef void (VKAPI_PTR *PFN_vkCmdCopyAccelerationStructureToMemoryKHR)(VkCommandBuffer commandBuffer, const VkCopyAccelerationStructureToMemoryInfoKHR* pInfo); +typedef void (VKAPI_PTR *PFN_vkCmdCopyMemoryToAccelerationStructureKHR)(VkCommandBuffer commandBuffer, const VkCopyMemoryToAccelerationStructureInfoKHR* pInfo); +typedef void (VKAPI_PTR *PFN_vkCmdTraceRaysKHR)(VkCommandBuffer commandBuffer, const VkStridedBufferRegionKHR* pRaygenShaderBindingTable, const VkStridedBufferRegionKHR* pMissShaderBindingTable, const VkStridedBufferRegionKHR* pHitShaderBindingTable, const VkStridedBufferRegionKHR* pCallableShaderBindingTable, uint32_t width, uint32_t height, uint32_t depth); +typedef VkResult (VKAPI_PTR *PFN_vkCreateRayTracingPipelinesKHR)(VkDevice device, VkPipelineCache pipelineCache, uint32_t createInfoCount, const VkRayTracingPipelineCreateInfoKHR* pCreateInfos, const VkAllocationCallbacks* pAllocator, VkPipeline* pPipelines); +typedef VkDeviceAddress (VKAPI_PTR *PFN_vkGetAccelerationStructureDeviceAddressKHR)(VkDevice device, const VkAccelerationStructureDeviceAddressInfoKHR* pInfo); +typedef VkResult (VKAPI_PTR *PFN_vkGetRayTracingCaptureReplayShaderGroupHandlesKHR)(VkDevice device, VkPipeline pipeline, uint32_t firstGroup, uint32_t groupCount, size_t dataSize, void* pData); +typedef void (VKAPI_PTR *PFN_vkCmdTraceRaysIndirectKHR)(VkCommandBuffer commandBuffer, const VkStridedBufferRegionKHR* pRaygenShaderBindingTable, const VkStridedBufferRegionKHR* pMissShaderBindingTable, const VkStridedBufferRegionKHR* pHitShaderBindingTable, const VkStridedBufferRegionKHR* pCallableShaderBindingTable, VkBuffer buffer, VkDeviceSize offset); +typedef VkResult (VKAPI_PTR *PFN_vkGetDeviceAccelerationStructureCompatibilityKHR)(VkDevice device, const VkAccelerationStructureVersionKHR* version); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkCreateAccelerationStructureKHR( + VkDevice device, + const VkAccelerationStructureCreateInfoKHR* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkAccelerationStructureKHR* pAccelerationStructure); + +VKAPI_ATTR void VKAPI_CALL vkGetAccelerationStructureMemoryRequirementsKHR( + VkDevice device, + const VkAccelerationStructureMemoryRequirementsInfoKHR* pInfo, + VkMemoryRequirements2* pMemoryRequirements); + +VKAPI_ATTR void VKAPI_CALL vkCmdBuildAccelerationStructureKHR( + VkCommandBuffer commandBuffer, + uint32_t infoCount, + const VkAccelerationStructureBuildGeometryInfoKHR* pInfos, + const VkAccelerationStructureBuildOffsetInfoKHR* const* ppOffsetInfos); + +VKAPI_ATTR void VKAPI_CALL vkCmdBuildAccelerationStructureIndirectKHR( + VkCommandBuffer commandBuffer, + const VkAccelerationStructureBuildGeometryInfoKHR* pInfo, + VkBuffer indirectBuffer, + VkDeviceSize indirectOffset, + uint32_t indirectStride); + +VKAPI_ATTR VkResult VKAPI_CALL vkBuildAccelerationStructureKHR( + VkDevice device, + uint32_t infoCount, + const VkAccelerationStructureBuildGeometryInfoKHR* pInfos, + const VkAccelerationStructureBuildOffsetInfoKHR* const* ppOffsetInfos); + +VKAPI_ATTR VkResult VKAPI_CALL vkCopyAccelerationStructureKHR( + VkDevice device, + const VkCopyAccelerationStructureInfoKHR* pInfo); + +VKAPI_ATTR VkResult VKAPI_CALL vkCopyAccelerationStructureToMemoryKHR( + VkDevice device, + const VkCopyAccelerationStructureToMemoryInfoKHR* pInfo); + +VKAPI_ATTR VkResult VKAPI_CALL vkCopyMemoryToAccelerationStructureKHR( + VkDevice device, + const VkCopyMemoryToAccelerationStructureInfoKHR* pInfo); + +VKAPI_ATTR VkResult VKAPI_CALL vkWriteAccelerationStructuresPropertiesKHR( + VkDevice device, + uint32_t accelerationStructureCount, + const VkAccelerationStructureKHR* pAccelerationStructures, + VkQueryType queryType, + size_t dataSize, + void* pData, + size_t stride); + +VKAPI_ATTR void VKAPI_CALL vkCmdCopyAccelerationStructureKHR( + VkCommandBuffer commandBuffer, + const VkCopyAccelerationStructureInfoKHR* pInfo); + +VKAPI_ATTR void VKAPI_CALL vkCmdCopyAccelerationStructureToMemoryKHR( + VkCommandBuffer commandBuffer, + const VkCopyAccelerationStructureToMemoryInfoKHR* pInfo); + +VKAPI_ATTR void VKAPI_CALL vkCmdCopyMemoryToAccelerationStructureKHR( + VkCommandBuffer commandBuffer, + const VkCopyMemoryToAccelerationStructureInfoKHR* pInfo); + +VKAPI_ATTR void VKAPI_CALL vkCmdTraceRaysKHR( + VkCommandBuffer commandBuffer, + const VkStridedBufferRegionKHR* pRaygenShaderBindingTable, + const VkStridedBufferRegionKHR* pMissShaderBindingTable, + const VkStridedBufferRegionKHR* pHitShaderBindingTable, + const VkStridedBufferRegionKHR* pCallableShaderBindingTable, + uint32_t width, + uint32_t height, + uint32_t depth); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateRayTracingPipelinesKHR( + VkDevice device, + VkPipelineCache pipelineCache, + uint32_t createInfoCount, + const VkRayTracingPipelineCreateInfoKHR* pCreateInfos, + const VkAllocationCallbacks* pAllocator, + VkPipeline* pPipelines); + +VKAPI_ATTR VkDeviceAddress VKAPI_CALL vkGetAccelerationStructureDeviceAddressKHR( + VkDevice device, + const VkAccelerationStructureDeviceAddressInfoKHR* pInfo); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetRayTracingCaptureReplayShaderGroupHandlesKHR( + VkDevice device, + VkPipeline pipeline, + uint32_t firstGroup, + uint32_t groupCount, + size_t dataSize, + void* pData); + +VKAPI_ATTR void VKAPI_CALL vkCmdTraceRaysIndirectKHR( + VkCommandBuffer commandBuffer, + const VkStridedBufferRegionKHR* pRaygenShaderBindingTable, + const VkStridedBufferRegionKHR* pMissShaderBindingTable, + const VkStridedBufferRegionKHR* pHitShaderBindingTable, + const VkStridedBufferRegionKHR* pCallableShaderBindingTable, + VkBuffer buffer, + VkDeviceSize offset); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetDeviceAccelerationStructureCompatibilityKHR( + VkDevice device, + const VkAccelerationStructureVersionKHR* version); +#endif + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/dep/vulkan-loader/include/vulkan/vulkan_core.h b/dep/vulkan-loader/include/vulkan/vulkan_core.h new file mode 100644 index 000000000..db2e5561b --- /dev/null +++ b/dep/vulkan-loader/include/vulkan/vulkan_core.h @@ -0,0 +1,10822 @@ +#ifndef VULKAN_CORE_H_ +#define VULKAN_CORE_H_ 1 + +/* +** Copyright (c) 2015-2020 The Khronos Group Inc. +** +** Licensed under the Apache License, Version 2.0 (the "License"); +** you may not use this file except in compliance with the License. +** You may obtain a copy of the License at +** +** http://www.apache.org/licenses/LICENSE-2.0 +** +** Unless required by applicable law or agreed to in writing, software +** distributed under the License is distributed on an "AS IS" BASIS, +** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +** See the License for the specific language governing permissions and +** limitations under the License. +*/ + +/* +** This header is generated from the Khronos Vulkan XML API Registry. +** +*/ + + +#ifdef __cplusplus +extern "C" { +#endif + + + +#define VK_VERSION_1_0 1 +#include "vk_platform.h" +#define VK_MAKE_VERSION(major, minor, patch) \ + (((major) << 22) | ((minor) << 12) | (patch)) + +// DEPRECATED: This define has been removed. Specific version defines (e.g. VK_API_VERSION_1_0), or the VK_MAKE_VERSION macro, should be used instead. +//#define VK_API_VERSION VK_MAKE_VERSION(1, 0, 0) // Patch version should always be set to 0 + +// Vulkan 1.0 version number +#define VK_API_VERSION_1_0 VK_MAKE_VERSION(1, 0, 0)// Patch version should always be set to 0 + +#define VK_VERSION_MAJOR(version) ((uint32_t)(version) >> 22) +#define VK_VERSION_MINOR(version) (((uint32_t)(version) >> 12) & 0x3ff) +#define VK_VERSION_PATCH(version) ((uint32_t)(version) & 0xfff) +// Version of this file +#define VK_HEADER_VERSION 141 + +// Complete version of this file +#define VK_HEADER_VERSION_COMPLETE VK_MAKE_VERSION(1, 2, VK_HEADER_VERSION) + + +#define VK_NULL_HANDLE 0 + + +#define VK_DEFINE_HANDLE(object) typedef struct object##_T* object; + + +#if !defined(VK_DEFINE_NON_DISPATCHABLE_HANDLE) +#if defined(__LP64__) || defined(_WIN64) || (defined(__x86_64__) && !defined(__ILP32__) ) || defined(_M_X64) || defined(__ia64) || defined (_M_IA64) || defined(__aarch64__) || defined(__powerpc64__) + #define VK_DEFINE_NON_DISPATCHABLE_HANDLE(object) typedef struct object##_T *object; +#else + #define VK_DEFINE_NON_DISPATCHABLE_HANDLE(object) typedef uint64_t object; +#endif +#endif + +typedef uint32_t VkFlags; +typedef uint32_t VkBool32; +typedef uint64_t VkDeviceSize; +typedef uint32_t VkSampleMask; +VK_DEFINE_HANDLE(VkInstance) +VK_DEFINE_HANDLE(VkPhysicalDevice) +VK_DEFINE_HANDLE(VkDevice) +VK_DEFINE_HANDLE(VkQueue) +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkSemaphore) +VK_DEFINE_HANDLE(VkCommandBuffer) +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkFence) +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkDeviceMemory) +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkBuffer) +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkImage) +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkEvent) +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkQueryPool) +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkBufferView) +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkImageView) +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkShaderModule) +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkPipelineCache) +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkPipelineLayout) +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkRenderPass) +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkPipeline) +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkDescriptorSetLayout) +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkSampler) +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkDescriptorPool) +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkDescriptorSet) +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkFramebuffer) +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkCommandPool) +#define VK_LOD_CLAMP_NONE 1000.0f +#define VK_REMAINING_MIP_LEVELS (~0U) +#define VK_REMAINING_ARRAY_LAYERS (~0U) +#define VK_WHOLE_SIZE (~0ULL) +#define VK_ATTACHMENT_UNUSED (~0U) +#define VK_TRUE 1 +#define VK_FALSE 0 +#define VK_QUEUE_FAMILY_IGNORED (~0U) +#define VK_SUBPASS_EXTERNAL (~0U) +#define VK_MAX_PHYSICAL_DEVICE_NAME_SIZE 256 +#define VK_UUID_SIZE 16 +#define VK_MAX_MEMORY_TYPES 32 +#define VK_MAX_MEMORY_HEAPS 16 +#define VK_MAX_EXTENSION_NAME_SIZE 256 +#define VK_MAX_DESCRIPTION_SIZE 256 + +typedef enum VkPipelineCacheHeaderVersion { + VK_PIPELINE_CACHE_HEADER_VERSION_ONE = 1, + VK_PIPELINE_CACHE_HEADER_VERSION_MAX_ENUM = 0x7FFFFFFF +} VkPipelineCacheHeaderVersion; + +typedef enum VkResult { + VK_SUCCESS = 0, + VK_NOT_READY = 1, + VK_TIMEOUT = 2, + VK_EVENT_SET = 3, + VK_EVENT_RESET = 4, + VK_INCOMPLETE = 5, + VK_ERROR_OUT_OF_HOST_MEMORY = -1, + VK_ERROR_OUT_OF_DEVICE_MEMORY = -2, + VK_ERROR_INITIALIZATION_FAILED = -3, + VK_ERROR_DEVICE_LOST = -4, + VK_ERROR_MEMORY_MAP_FAILED = -5, + VK_ERROR_LAYER_NOT_PRESENT = -6, + VK_ERROR_EXTENSION_NOT_PRESENT = -7, + VK_ERROR_FEATURE_NOT_PRESENT = -8, + VK_ERROR_INCOMPATIBLE_DRIVER = -9, + VK_ERROR_TOO_MANY_OBJECTS = -10, + VK_ERROR_FORMAT_NOT_SUPPORTED = -11, + VK_ERROR_FRAGMENTED_POOL = -12, + VK_ERROR_UNKNOWN = -13, + VK_ERROR_OUT_OF_POOL_MEMORY = -1000069000, + VK_ERROR_INVALID_EXTERNAL_HANDLE = -1000072003, + VK_ERROR_FRAGMENTATION = -1000161000, + VK_ERROR_INVALID_OPAQUE_CAPTURE_ADDRESS = -1000257000, + VK_ERROR_SURFACE_LOST_KHR = -1000000000, + VK_ERROR_NATIVE_WINDOW_IN_USE_KHR = -1000000001, + VK_SUBOPTIMAL_KHR = 1000001003, + VK_ERROR_OUT_OF_DATE_KHR = -1000001004, + VK_ERROR_INCOMPATIBLE_DISPLAY_KHR = -1000003001, + VK_ERROR_VALIDATION_FAILED_EXT = -1000011001, + VK_ERROR_INVALID_SHADER_NV = -1000012000, + VK_ERROR_INCOMPATIBLE_VERSION_KHR = -1000150000, + VK_ERROR_INVALID_DRM_FORMAT_MODIFIER_PLANE_LAYOUT_EXT = -1000158000, + VK_ERROR_NOT_PERMITTED_EXT = -1000174001, + VK_ERROR_FULL_SCREEN_EXCLUSIVE_MODE_LOST_EXT = -1000255000, + VK_THREAD_IDLE_KHR = 1000268000, + VK_THREAD_DONE_KHR = 1000268001, + VK_OPERATION_DEFERRED_KHR = 1000268002, + VK_OPERATION_NOT_DEFERRED_KHR = 1000268003, + VK_PIPELINE_COMPILE_REQUIRED_EXT = 1000297000, + VK_ERROR_OUT_OF_POOL_MEMORY_KHR = VK_ERROR_OUT_OF_POOL_MEMORY, + VK_ERROR_INVALID_EXTERNAL_HANDLE_KHR = VK_ERROR_INVALID_EXTERNAL_HANDLE, + VK_ERROR_FRAGMENTATION_EXT = VK_ERROR_FRAGMENTATION, + VK_ERROR_INVALID_DEVICE_ADDRESS_EXT = VK_ERROR_INVALID_OPAQUE_CAPTURE_ADDRESS, + VK_ERROR_INVALID_OPAQUE_CAPTURE_ADDRESS_KHR = VK_ERROR_INVALID_OPAQUE_CAPTURE_ADDRESS, + VK_ERROR_PIPELINE_COMPILE_REQUIRED_EXT = VK_PIPELINE_COMPILE_REQUIRED_EXT, + VK_RESULT_MAX_ENUM = 0x7FFFFFFF +} VkResult; + +typedef enum VkStructureType { + VK_STRUCTURE_TYPE_APPLICATION_INFO = 0, + VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO = 1, + VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO = 2, + VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO = 3, + VK_STRUCTURE_TYPE_SUBMIT_INFO = 4, + VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO = 5, + VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE = 6, + VK_STRUCTURE_TYPE_BIND_SPARSE_INFO = 7, + VK_STRUCTURE_TYPE_FENCE_CREATE_INFO = 8, + VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO = 9, + VK_STRUCTURE_TYPE_EVENT_CREATE_INFO = 10, + VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO = 11, + VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO = 12, + VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO = 13, + VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO = 14, + VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO = 15, + VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO = 16, + VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO = 17, + VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO = 18, + VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO = 19, + VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO = 20, + VK_STRUCTURE_TYPE_PIPELINE_TESSELLATION_STATE_CREATE_INFO = 21, + VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO = 22, + VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO = 23, + VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO = 24, + VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO = 25, + VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO = 26, + VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO = 27, + VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO = 28, + VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO = 29, + VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO = 30, + VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO = 31, + VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO = 32, + VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO = 33, + VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO = 34, + VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET = 35, + VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET = 36, + VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO = 37, + VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO = 38, + VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO = 39, + VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO = 40, + VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_INFO = 41, + VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO = 42, + VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO = 43, + VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER = 44, + VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER = 45, + VK_STRUCTURE_TYPE_MEMORY_BARRIER = 46, + VK_STRUCTURE_TYPE_LOADER_INSTANCE_CREATE_INFO = 47, + VK_STRUCTURE_TYPE_LOADER_DEVICE_CREATE_INFO = 48, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_PROPERTIES = 1000094000, + VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO = 1000157000, + VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_INFO = 1000157001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_16BIT_STORAGE_FEATURES = 1000083000, + VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS = 1000127000, + VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO = 1000127001, + VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_FLAGS_INFO = 1000060000, + VK_STRUCTURE_TYPE_DEVICE_GROUP_RENDER_PASS_BEGIN_INFO = 1000060003, + VK_STRUCTURE_TYPE_DEVICE_GROUP_COMMAND_BUFFER_BEGIN_INFO = 1000060004, + VK_STRUCTURE_TYPE_DEVICE_GROUP_SUBMIT_INFO = 1000060005, + VK_STRUCTURE_TYPE_DEVICE_GROUP_BIND_SPARSE_INFO = 1000060006, + VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_DEVICE_GROUP_INFO = 1000060013, + VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_DEVICE_GROUP_INFO = 1000060014, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_GROUP_PROPERTIES = 1000070000, + VK_STRUCTURE_TYPE_DEVICE_GROUP_DEVICE_CREATE_INFO = 1000070001, + VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2 = 1000146000, + VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2 = 1000146001, + VK_STRUCTURE_TYPE_IMAGE_SPARSE_MEMORY_REQUIREMENTS_INFO_2 = 1000146002, + VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2 = 1000146003, + VK_STRUCTURE_TYPE_SPARSE_IMAGE_MEMORY_REQUIREMENTS_2 = 1000146004, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2 = 1000059000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2 = 1000059001, + VK_STRUCTURE_TYPE_FORMAT_PROPERTIES_2 = 1000059002, + VK_STRUCTURE_TYPE_IMAGE_FORMAT_PROPERTIES_2 = 1000059003, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_FORMAT_INFO_2 = 1000059004, + VK_STRUCTURE_TYPE_QUEUE_FAMILY_PROPERTIES_2 = 1000059005, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_PROPERTIES_2 = 1000059006, + VK_STRUCTURE_TYPE_SPARSE_IMAGE_FORMAT_PROPERTIES_2 = 1000059007, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SPARSE_IMAGE_FORMAT_INFO_2 = 1000059008, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_POINT_CLIPPING_PROPERTIES = 1000117000, + VK_STRUCTURE_TYPE_RENDER_PASS_INPUT_ATTACHMENT_ASPECT_CREATE_INFO = 1000117001, + VK_STRUCTURE_TYPE_IMAGE_VIEW_USAGE_CREATE_INFO = 1000117002, + VK_STRUCTURE_TYPE_PIPELINE_TESSELLATION_DOMAIN_ORIGIN_STATE_CREATE_INFO = 1000117003, + VK_STRUCTURE_TYPE_RENDER_PASS_MULTIVIEW_CREATE_INFO = 1000053000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_FEATURES = 1000053001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_PROPERTIES = 1000053002, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTERS_FEATURES = 1000120000, + VK_STRUCTURE_TYPE_PROTECTED_SUBMIT_INFO = 1000145000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROTECTED_MEMORY_FEATURES = 1000145001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROTECTED_MEMORY_PROPERTIES = 1000145002, + VK_STRUCTURE_TYPE_DEVICE_QUEUE_INFO_2 = 1000145003, + VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_CREATE_INFO = 1000156000, + VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_INFO = 1000156001, + VK_STRUCTURE_TYPE_BIND_IMAGE_PLANE_MEMORY_INFO = 1000156002, + VK_STRUCTURE_TYPE_IMAGE_PLANE_MEMORY_REQUIREMENTS_INFO = 1000156003, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_YCBCR_CONVERSION_FEATURES = 1000156004, + VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_IMAGE_FORMAT_PROPERTIES = 1000156005, + VK_STRUCTURE_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_CREATE_INFO = 1000085000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_IMAGE_FORMAT_INFO = 1000071000, + VK_STRUCTURE_TYPE_EXTERNAL_IMAGE_FORMAT_PROPERTIES = 1000071001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_BUFFER_INFO = 1000071002, + VK_STRUCTURE_TYPE_EXTERNAL_BUFFER_PROPERTIES = 1000071003, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ID_PROPERTIES = 1000071004, + VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_BUFFER_CREATE_INFO = 1000072000, + VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO = 1000072001, + VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO = 1000072002, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_FENCE_INFO = 1000112000, + VK_STRUCTURE_TYPE_EXTERNAL_FENCE_PROPERTIES = 1000112001, + VK_STRUCTURE_TYPE_EXPORT_FENCE_CREATE_INFO = 1000113000, + VK_STRUCTURE_TYPE_EXPORT_SEMAPHORE_CREATE_INFO = 1000077000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_SEMAPHORE_INFO = 1000076000, + VK_STRUCTURE_TYPE_EXTERNAL_SEMAPHORE_PROPERTIES = 1000076001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_3_PROPERTIES = 1000168000, + VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_SUPPORT = 1000168001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DRAW_PARAMETERS_FEATURES = 1000063000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_1_FEATURES = 49, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_1_PROPERTIES = 50, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_2_FEATURES = 51, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_2_PROPERTIES = 52, + VK_STRUCTURE_TYPE_IMAGE_FORMAT_LIST_CREATE_INFO = 1000147000, + VK_STRUCTURE_TYPE_ATTACHMENT_DESCRIPTION_2 = 1000109000, + VK_STRUCTURE_TYPE_ATTACHMENT_REFERENCE_2 = 1000109001, + VK_STRUCTURE_TYPE_SUBPASS_DESCRIPTION_2 = 1000109002, + VK_STRUCTURE_TYPE_SUBPASS_DEPENDENCY_2 = 1000109003, + VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO_2 = 1000109004, + VK_STRUCTURE_TYPE_SUBPASS_BEGIN_INFO = 1000109005, + VK_STRUCTURE_TYPE_SUBPASS_END_INFO = 1000109006, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_8BIT_STORAGE_FEATURES = 1000177000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DRIVER_PROPERTIES = 1000196000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_ATOMIC_INT64_FEATURES = 1000180000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_FLOAT16_INT8_FEATURES = 1000082000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FLOAT_CONTROLS_PROPERTIES = 1000197000, + VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_BINDING_FLAGS_CREATE_INFO = 1000161000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_FEATURES = 1000161001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_PROPERTIES = 1000161002, + VK_STRUCTURE_TYPE_DESCRIPTOR_SET_VARIABLE_DESCRIPTOR_COUNT_ALLOCATE_INFO = 1000161003, + VK_STRUCTURE_TYPE_DESCRIPTOR_SET_VARIABLE_DESCRIPTOR_COUNT_LAYOUT_SUPPORT = 1000161004, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEPTH_STENCIL_RESOLVE_PROPERTIES = 1000199000, + VK_STRUCTURE_TYPE_SUBPASS_DESCRIPTION_DEPTH_STENCIL_RESOLVE = 1000199001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SCALAR_BLOCK_LAYOUT_FEATURES = 1000221000, + VK_STRUCTURE_TYPE_IMAGE_STENCIL_USAGE_CREATE_INFO = 1000246000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_FILTER_MINMAX_PROPERTIES = 1000130000, + VK_STRUCTURE_TYPE_SAMPLER_REDUCTION_MODE_CREATE_INFO = 1000130001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_MEMORY_MODEL_FEATURES = 1000211000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGELESS_FRAMEBUFFER_FEATURES = 1000108000, + VK_STRUCTURE_TYPE_FRAMEBUFFER_ATTACHMENTS_CREATE_INFO = 1000108001, + VK_STRUCTURE_TYPE_FRAMEBUFFER_ATTACHMENT_IMAGE_INFO = 1000108002, + VK_STRUCTURE_TYPE_RENDER_PASS_ATTACHMENT_BEGIN_INFO = 1000108003, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_UNIFORM_BUFFER_STANDARD_LAYOUT_FEATURES = 1000253000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_SUBGROUP_EXTENDED_TYPES_FEATURES = 1000175000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SEPARATE_DEPTH_STENCIL_LAYOUTS_FEATURES = 1000241000, + VK_STRUCTURE_TYPE_ATTACHMENT_REFERENCE_STENCIL_LAYOUT = 1000241001, + VK_STRUCTURE_TYPE_ATTACHMENT_DESCRIPTION_STENCIL_LAYOUT = 1000241002, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_HOST_QUERY_RESET_FEATURES = 1000261000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TIMELINE_SEMAPHORE_FEATURES = 1000207000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TIMELINE_SEMAPHORE_PROPERTIES = 1000207001, + VK_STRUCTURE_TYPE_SEMAPHORE_TYPE_CREATE_INFO = 1000207002, + VK_STRUCTURE_TYPE_TIMELINE_SEMAPHORE_SUBMIT_INFO = 1000207003, + VK_STRUCTURE_TYPE_SEMAPHORE_WAIT_INFO = 1000207004, + VK_STRUCTURE_TYPE_SEMAPHORE_SIGNAL_INFO = 1000207005, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BUFFER_DEVICE_ADDRESS_FEATURES = 1000257000, + VK_STRUCTURE_TYPE_BUFFER_DEVICE_ADDRESS_INFO = 1000244001, + VK_STRUCTURE_TYPE_BUFFER_OPAQUE_CAPTURE_ADDRESS_CREATE_INFO = 1000257002, + VK_STRUCTURE_TYPE_MEMORY_OPAQUE_CAPTURE_ADDRESS_ALLOCATE_INFO = 1000257003, + VK_STRUCTURE_TYPE_DEVICE_MEMORY_OPAQUE_CAPTURE_ADDRESS_INFO = 1000257004, + VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR = 1000001000, + VK_STRUCTURE_TYPE_PRESENT_INFO_KHR = 1000001001, + VK_STRUCTURE_TYPE_DEVICE_GROUP_PRESENT_CAPABILITIES_KHR = 1000060007, + VK_STRUCTURE_TYPE_IMAGE_SWAPCHAIN_CREATE_INFO_KHR = 1000060008, + VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_SWAPCHAIN_INFO_KHR = 1000060009, + VK_STRUCTURE_TYPE_ACQUIRE_NEXT_IMAGE_INFO_KHR = 1000060010, + VK_STRUCTURE_TYPE_DEVICE_GROUP_PRESENT_INFO_KHR = 1000060011, + VK_STRUCTURE_TYPE_DEVICE_GROUP_SWAPCHAIN_CREATE_INFO_KHR = 1000060012, + VK_STRUCTURE_TYPE_DISPLAY_MODE_CREATE_INFO_KHR = 1000002000, + VK_STRUCTURE_TYPE_DISPLAY_SURFACE_CREATE_INFO_KHR = 1000002001, + VK_STRUCTURE_TYPE_DISPLAY_PRESENT_INFO_KHR = 1000003000, + VK_STRUCTURE_TYPE_XLIB_SURFACE_CREATE_INFO_KHR = 1000004000, + VK_STRUCTURE_TYPE_XCB_SURFACE_CREATE_INFO_KHR = 1000005000, + VK_STRUCTURE_TYPE_WAYLAND_SURFACE_CREATE_INFO_KHR = 1000006000, + VK_STRUCTURE_TYPE_ANDROID_SURFACE_CREATE_INFO_KHR = 1000008000, + VK_STRUCTURE_TYPE_WIN32_SURFACE_CREATE_INFO_KHR = 1000009000, + VK_STRUCTURE_TYPE_DEBUG_REPORT_CALLBACK_CREATE_INFO_EXT = 1000011000, + VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_RASTERIZATION_ORDER_AMD = 1000018000, + VK_STRUCTURE_TYPE_DEBUG_MARKER_OBJECT_NAME_INFO_EXT = 1000022000, + VK_STRUCTURE_TYPE_DEBUG_MARKER_OBJECT_TAG_INFO_EXT = 1000022001, + VK_STRUCTURE_TYPE_DEBUG_MARKER_MARKER_INFO_EXT = 1000022002, + VK_STRUCTURE_TYPE_DEDICATED_ALLOCATION_IMAGE_CREATE_INFO_NV = 1000026000, + VK_STRUCTURE_TYPE_DEDICATED_ALLOCATION_BUFFER_CREATE_INFO_NV = 1000026001, + VK_STRUCTURE_TYPE_DEDICATED_ALLOCATION_MEMORY_ALLOCATE_INFO_NV = 1000026002, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TRANSFORM_FEEDBACK_FEATURES_EXT = 1000028000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TRANSFORM_FEEDBACK_PROPERTIES_EXT = 1000028001, + VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_STREAM_CREATE_INFO_EXT = 1000028002, + VK_STRUCTURE_TYPE_IMAGE_VIEW_HANDLE_INFO_NVX = 1000030000, + VK_STRUCTURE_TYPE_IMAGE_VIEW_ADDRESS_PROPERTIES_NVX = 1000030001, + VK_STRUCTURE_TYPE_TEXTURE_LOD_GATHER_FORMAT_PROPERTIES_AMD = 1000041000, + VK_STRUCTURE_TYPE_STREAM_DESCRIPTOR_SURFACE_CREATE_INFO_GGP = 1000049000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CORNER_SAMPLED_IMAGE_FEATURES_NV = 1000050000, + VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO_NV = 1000056000, + VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO_NV = 1000056001, + VK_STRUCTURE_TYPE_IMPORT_MEMORY_WIN32_HANDLE_INFO_NV = 1000057000, + VK_STRUCTURE_TYPE_EXPORT_MEMORY_WIN32_HANDLE_INFO_NV = 1000057001, + VK_STRUCTURE_TYPE_WIN32_KEYED_MUTEX_ACQUIRE_RELEASE_INFO_NV = 1000058000, + VK_STRUCTURE_TYPE_VALIDATION_FLAGS_EXT = 1000061000, + VK_STRUCTURE_TYPE_VI_SURFACE_CREATE_INFO_NN = 1000062000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TEXTURE_COMPRESSION_ASTC_HDR_FEATURES_EXT = 1000066000, + VK_STRUCTURE_TYPE_IMAGE_VIEW_ASTC_DECODE_MODE_EXT = 1000067000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ASTC_DECODE_FEATURES_EXT = 1000067001, + VK_STRUCTURE_TYPE_IMPORT_MEMORY_WIN32_HANDLE_INFO_KHR = 1000073000, + VK_STRUCTURE_TYPE_EXPORT_MEMORY_WIN32_HANDLE_INFO_KHR = 1000073001, + VK_STRUCTURE_TYPE_MEMORY_WIN32_HANDLE_PROPERTIES_KHR = 1000073002, + VK_STRUCTURE_TYPE_MEMORY_GET_WIN32_HANDLE_INFO_KHR = 1000073003, + VK_STRUCTURE_TYPE_IMPORT_MEMORY_FD_INFO_KHR = 1000074000, + VK_STRUCTURE_TYPE_MEMORY_FD_PROPERTIES_KHR = 1000074001, + VK_STRUCTURE_TYPE_MEMORY_GET_FD_INFO_KHR = 1000074002, + VK_STRUCTURE_TYPE_WIN32_KEYED_MUTEX_ACQUIRE_RELEASE_INFO_KHR = 1000075000, + VK_STRUCTURE_TYPE_IMPORT_SEMAPHORE_WIN32_HANDLE_INFO_KHR = 1000078000, + VK_STRUCTURE_TYPE_EXPORT_SEMAPHORE_WIN32_HANDLE_INFO_KHR = 1000078001, + VK_STRUCTURE_TYPE_D3D12_FENCE_SUBMIT_INFO_KHR = 1000078002, + VK_STRUCTURE_TYPE_SEMAPHORE_GET_WIN32_HANDLE_INFO_KHR = 1000078003, + VK_STRUCTURE_TYPE_IMPORT_SEMAPHORE_FD_INFO_KHR = 1000079000, + VK_STRUCTURE_TYPE_SEMAPHORE_GET_FD_INFO_KHR = 1000079001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PUSH_DESCRIPTOR_PROPERTIES_KHR = 1000080000, + VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_CONDITIONAL_RENDERING_INFO_EXT = 1000081000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CONDITIONAL_RENDERING_FEATURES_EXT = 1000081001, + VK_STRUCTURE_TYPE_CONDITIONAL_RENDERING_BEGIN_INFO_EXT = 1000081002, + VK_STRUCTURE_TYPE_PRESENT_REGIONS_KHR = 1000084000, + VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_W_SCALING_STATE_CREATE_INFO_NV = 1000087000, + VK_STRUCTURE_TYPE_SURFACE_CAPABILITIES_2_EXT = 1000090000, + VK_STRUCTURE_TYPE_DISPLAY_POWER_INFO_EXT = 1000091000, + VK_STRUCTURE_TYPE_DEVICE_EVENT_INFO_EXT = 1000091001, + VK_STRUCTURE_TYPE_DISPLAY_EVENT_INFO_EXT = 1000091002, + VK_STRUCTURE_TYPE_SWAPCHAIN_COUNTER_CREATE_INFO_EXT = 1000091003, + VK_STRUCTURE_TYPE_PRESENT_TIMES_INFO_GOOGLE = 1000092000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_PER_VIEW_ATTRIBUTES_PROPERTIES_NVX = 1000097000, + VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_SWIZZLE_STATE_CREATE_INFO_NV = 1000098000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DISCARD_RECTANGLE_PROPERTIES_EXT = 1000099000, + VK_STRUCTURE_TYPE_PIPELINE_DISCARD_RECTANGLE_STATE_CREATE_INFO_EXT = 1000099001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CONSERVATIVE_RASTERIZATION_PROPERTIES_EXT = 1000101000, + VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_CONSERVATIVE_STATE_CREATE_INFO_EXT = 1000101001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEPTH_CLIP_ENABLE_FEATURES_EXT = 1000102000, + VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_DEPTH_CLIP_STATE_CREATE_INFO_EXT = 1000102001, + VK_STRUCTURE_TYPE_HDR_METADATA_EXT = 1000105000, + VK_STRUCTURE_TYPE_SHARED_PRESENT_SURFACE_CAPABILITIES_KHR = 1000111000, + VK_STRUCTURE_TYPE_IMPORT_FENCE_WIN32_HANDLE_INFO_KHR = 1000114000, + VK_STRUCTURE_TYPE_EXPORT_FENCE_WIN32_HANDLE_INFO_KHR = 1000114001, + VK_STRUCTURE_TYPE_FENCE_GET_WIN32_HANDLE_INFO_KHR = 1000114002, + VK_STRUCTURE_TYPE_IMPORT_FENCE_FD_INFO_KHR = 1000115000, + VK_STRUCTURE_TYPE_FENCE_GET_FD_INFO_KHR = 1000115001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PERFORMANCE_QUERY_FEATURES_KHR = 1000116000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PERFORMANCE_QUERY_PROPERTIES_KHR = 1000116001, + VK_STRUCTURE_TYPE_QUERY_POOL_PERFORMANCE_CREATE_INFO_KHR = 1000116002, + VK_STRUCTURE_TYPE_PERFORMANCE_QUERY_SUBMIT_INFO_KHR = 1000116003, + VK_STRUCTURE_TYPE_ACQUIRE_PROFILING_LOCK_INFO_KHR = 1000116004, + VK_STRUCTURE_TYPE_PERFORMANCE_COUNTER_KHR = 1000116005, + VK_STRUCTURE_TYPE_PERFORMANCE_COUNTER_DESCRIPTION_KHR = 1000116006, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SURFACE_INFO_2_KHR = 1000119000, + VK_STRUCTURE_TYPE_SURFACE_CAPABILITIES_2_KHR = 1000119001, + VK_STRUCTURE_TYPE_SURFACE_FORMAT_2_KHR = 1000119002, + VK_STRUCTURE_TYPE_DISPLAY_PROPERTIES_2_KHR = 1000121000, + VK_STRUCTURE_TYPE_DISPLAY_PLANE_PROPERTIES_2_KHR = 1000121001, + VK_STRUCTURE_TYPE_DISPLAY_MODE_PROPERTIES_2_KHR = 1000121002, + VK_STRUCTURE_TYPE_DISPLAY_PLANE_INFO_2_KHR = 1000121003, + VK_STRUCTURE_TYPE_DISPLAY_PLANE_CAPABILITIES_2_KHR = 1000121004, + VK_STRUCTURE_TYPE_IOS_SURFACE_CREATE_INFO_MVK = 1000122000, + VK_STRUCTURE_TYPE_MACOS_SURFACE_CREATE_INFO_MVK = 1000123000, + VK_STRUCTURE_TYPE_DEBUG_UTILS_OBJECT_NAME_INFO_EXT = 1000128000, + VK_STRUCTURE_TYPE_DEBUG_UTILS_OBJECT_TAG_INFO_EXT = 1000128001, + VK_STRUCTURE_TYPE_DEBUG_UTILS_LABEL_EXT = 1000128002, + VK_STRUCTURE_TYPE_DEBUG_UTILS_MESSENGER_CALLBACK_DATA_EXT = 1000128003, + VK_STRUCTURE_TYPE_DEBUG_UTILS_MESSENGER_CREATE_INFO_EXT = 1000128004, + VK_STRUCTURE_TYPE_ANDROID_HARDWARE_BUFFER_USAGE_ANDROID = 1000129000, + VK_STRUCTURE_TYPE_ANDROID_HARDWARE_BUFFER_PROPERTIES_ANDROID = 1000129001, + VK_STRUCTURE_TYPE_ANDROID_HARDWARE_BUFFER_FORMAT_PROPERTIES_ANDROID = 1000129002, + VK_STRUCTURE_TYPE_IMPORT_ANDROID_HARDWARE_BUFFER_INFO_ANDROID = 1000129003, + VK_STRUCTURE_TYPE_MEMORY_GET_ANDROID_HARDWARE_BUFFER_INFO_ANDROID = 1000129004, + VK_STRUCTURE_TYPE_EXTERNAL_FORMAT_ANDROID = 1000129005, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INLINE_UNIFORM_BLOCK_FEATURES_EXT = 1000138000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INLINE_UNIFORM_BLOCK_PROPERTIES_EXT = 1000138001, + VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET_INLINE_UNIFORM_BLOCK_EXT = 1000138002, + VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_INLINE_UNIFORM_BLOCK_CREATE_INFO_EXT = 1000138003, + VK_STRUCTURE_TYPE_SAMPLE_LOCATIONS_INFO_EXT = 1000143000, + VK_STRUCTURE_TYPE_RENDER_PASS_SAMPLE_LOCATIONS_BEGIN_INFO_EXT = 1000143001, + VK_STRUCTURE_TYPE_PIPELINE_SAMPLE_LOCATIONS_STATE_CREATE_INFO_EXT = 1000143002, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLE_LOCATIONS_PROPERTIES_EXT = 1000143003, + VK_STRUCTURE_TYPE_MULTISAMPLE_PROPERTIES_EXT = 1000143004, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BLEND_OPERATION_ADVANCED_FEATURES_EXT = 1000148000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BLEND_OPERATION_ADVANCED_PROPERTIES_EXT = 1000148001, + VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_ADVANCED_STATE_CREATE_INFO_EXT = 1000148002, + VK_STRUCTURE_TYPE_PIPELINE_COVERAGE_TO_COLOR_STATE_CREATE_INFO_NV = 1000149000, + VK_STRUCTURE_TYPE_BIND_ACCELERATION_STRUCTURE_MEMORY_INFO_KHR = 1000165006, + VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET_ACCELERATION_STRUCTURE_KHR = 1000165007, + VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_BUILD_GEOMETRY_INFO_KHR = 1000150000, + VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_CREATE_GEOMETRY_TYPE_INFO_KHR = 1000150001, + VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_DEVICE_ADDRESS_INFO_KHR = 1000150002, + VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_GEOMETRY_AABBS_DATA_KHR = 1000150003, + VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_GEOMETRY_INSTANCES_DATA_KHR = 1000150004, + VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_GEOMETRY_TRIANGLES_DATA_KHR = 1000150005, + VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_GEOMETRY_KHR = 1000150006, + VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_INFO_KHR = 1000150008, + VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_VERSION_KHR = 1000150009, + VK_STRUCTURE_TYPE_COPY_ACCELERATION_STRUCTURE_INFO_KHR = 1000150010, + VK_STRUCTURE_TYPE_COPY_ACCELERATION_STRUCTURE_TO_MEMORY_INFO_KHR = 1000150011, + VK_STRUCTURE_TYPE_COPY_MEMORY_TO_ACCELERATION_STRUCTURE_INFO_KHR = 1000150012, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_TRACING_FEATURES_KHR = 1000150013, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_TRACING_PROPERTIES_KHR = 1000150014, + VK_STRUCTURE_TYPE_RAY_TRACING_PIPELINE_CREATE_INFO_KHR = 1000150015, + VK_STRUCTURE_TYPE_RAY_TRACING_SHADER_GROUP_CREATE_INFO_KHR = 1000150016, + VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_CREATE_INFO_KHR = 1000150017, + VK_STRUCTURE_TYPE_RAY_TRACING_PIPELINE_INTERFACE_CREATE_INFO_KHR = 1000150018, + VK_STRUCTURE_TYPE_PIPELINE_COVERAGE_MODULATION_STATE_CREATE_INFO_NV = 1000152000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_SM_BUILTINS_FEATURES_NV = 1000154000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_SM_BUILTINS_PROPERTIES_NV = 1000154001, + VK_STRUCTURE_TYPE_DRM_FORMAT_MODIFIER_PROPERTIES_LIST_EXT = 1000158000, + VK_STRUCTURE_TYPE_DRM_FORMAT_MODIFIER_PROPERTIES_EXT = 1000158001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_DRM_FORMAT_MODIFIER_INFO_EXT = 1000158002, + VK_STRUCTURE_TYPE_IMAGE_DRM_FORMAT_MODIFIER_LIST_CREATE_INFO_EXT = 1000158003, + VK_STRUCTURE_TYPE_IMAGE_DRM_FORMAT_MODIFIER_EXPLICIT_CREATE_INFO_EXT = 1000158004, + VK_STRUCTURE_TYPE_IMAGE_DRM_FORMAT_MODIFIER_PROPERTIES_EXT = 1000158005, + VK_STRUCTURE_TYPE_VALIDATION_CACHE_CREATE_INFO_EXT = 1000160000, + VK_STRUCTURE_TYPE_SHADER_MODULE_VALIDATION_CACHE_CREATE_INFO_EXT = 1000160001, + VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_SHADING_RATE_IMAGE_STATE_CREATE_INFO_NV = 1000164000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADING_RATE_IMAGE_FEATURES_NV = 1000164001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADING_RATE_IMAGE_PROPERTIES_NV = 1000164002, + VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_COARSE_SAMPLE_ORDER_STATE_CREATE_INFO_NV = 1000164005, + VK_STRUCTURE_TYPE_RAY_TRACING_PIPELINE_CREATE_INFO_NV = 1000165000, + VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_CREATE_INFO_NV = 1000165001, + VK_STRUCTURE_TYPE_GEOMETRY_NV = 1000165003, + VK_STRUCTURE_TYPE_GEOMETRY_TRIANGLES_NV = 1000165004, + VK_STRUCTURE_TYPE_GEOMETRY_AABB_NV = 1000165005, + VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_INFO_NV = 1000165008, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_TRACING_PROPERTIES_NV = 1000165009, + VK_STRUCTURE_TYPE_RAY_TRACING_SHADER_GROUP_CREATE_INFO_NV = 1000165011, + VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_INFO_NV = 1000165012, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_REPRESENTATIVE_FRAGMENT_TEST_FEATURES_NV = 1000166000, + VK_STRUCTURE_TYPE_PIPELINE_REPRESENTATIVE_FRAGMENT_TEST_STATE_CREATE_INFO_NV = 1000166001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_VIEW_IMAGE_FORMAT_INFO_EXT = 1000170000, + VK_STRUCTURE_TYPE_FILTER_CUBIC_IMAGE_VIEW_IMAGE_FORMAT_PROPERTIES_EXT = 1000170001, + VK_STRUCTURE_TYPE_DEVICE_QUEUE_GLOBAL_PRIORITY_CREATE_INFO_EXT = 1000174000, + VK_STRUCTURE_TYPE_IMPORT_MEMORY_HOST_POINTER_INFO_EXT = 1000178000, + VK_STRUCTURE_TYPE_MEMORY_HOST_POINTER_PROPERTIES_EXT = 1000178001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_MEMORY_HOST_PROPERTIES_EXT = 1000178002, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_CLOCK_FEATURES_KHR = 1000181000, + VK_STRUCTURE_TYPE_PIPELINE_COMPILER_CONTROL_CREATE_INFO_AMD = 1000183000, + VK_STRUCTURE_TYPE_CALIBRATED_TIMESTAMP_INFO_EXT = 1000184000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_CORE_PROPERTIES_AMD = 1000185000, + VK_STRUCTURE_TYPE_DEVICE_MEMORY_OVERALLOCATION_CREATE_INFO_AMD = 1000189000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_PROPERTIES_EXT = 1000190000, + VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_DIVISOR_STATE_CREATE_INFO_EXT = 1000190001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_FEATURES_EXT = 1000190002, + VK_STRUCTURE_TYPE_PRESENT_FRAME_TOKEN_GGP = 1000191000, + VK_STRUCTURE_TYPE_PIPELINE_CREATION_FEEDBACK_CREATE_INFO_EXT = 1000192000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COMPUTE_SHADER_DERIVATIVES_FEATURES_NV = 1000201000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MESH_SHADER_FEATURES_NV = 1000202000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MESH_SHADER_PROPERTIES_NV = 1000202001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADER_BARYCENTRIC_FEATURES_NV = 1000203000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_IMAGE_FOOTPRINT_FEATURES_NV = 1000204000, + VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_EXCLUSIVE_SCISSOR_STATE_CREATE_INFO_NV = 1000205000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXCLUSIVE_SCISSOR_FEATURES_NV = 1000205002, + VK_STRUCTURE_TYPE_CHECKPOINT_DATA_NV = 1000206000, + VK_STRUCTURE_TYPE_QUEUE_FAMILY_CHECKPOINT_PROPERTIES_NV = 1000206001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_INTEGER_FUNCTIONS_2_FEATURES_INTEL = 1000209000, + VK_STRUCTURE_TYPE_QUERY_POOL_PERFORMANCE_QUERY_CREATE_INFO_INTEL = 1000210000, + VK_STRUCTURE_TYPE_INITIALIZE_PERFORMANCE_API_INFO_INTEL = 1000210001, + VK_STRUCTURE_TYPE_PERFORMANCE_MARKER_INFO_INTEL = 1000210002, + VK_STRUCTURE_TYPE_PERFORMANCE_STREAM_MARKER_INFO_INTEL = 1000210003, + VK_STRUCTURE_TYPE_PERFORMANCE_OVERRIDE_INFO_INTEL = 1000210004, + VK_STRUCTURE_TYPE_PERFORMANCE_CONFIGURATION_ACQUIRE_INFO_INTEL = 1000210005, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PCI_BUS_INFO_PROPERTIES_EXT = 1000212000, + VK_STRUCTURE_TYPE_DISPLAY_NATIVE_HDR_SURFACE_CAPABILITIES_AMD = 1000213000, + VK_STRUCTURE_TYPE_SWAPCHAIN_DISPLAY_NATIVE_HDR_CREATE_INFO_AMD = 1000213001, + VK_STRUCTURE_TYPE_IMAGEPIPE_SURFACE_CREATE_INFO_FUCHSIA = 1000214000, + VK_STRUCTURE_TYPE_METAL_SURFACE_CREATE_INFO_EXT = 1000217000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_DENSITY_MAP_FEATURES_EXT = 1000218000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_DENSITY_MAP_PROPERTIES_EXT = 1000218001, + VK_STRUCTURE_TYPE_RENDER_PASS_FRAGMENT_DENSITY_MAP_CREATE_INFO_EXT = 1000218002, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_PROPERTIES_EXT = 1000225000, + VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_REQUIRED_SUBGROUP_SIZE_CREATE_INFO_EXT = 1000225001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_FEATURES_EXT = 1000225002, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_CORE_PROPERTIES_2_AMD = 1000227000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COHERENT_MEMORY_FEATURES_AMD = 1000229000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_BUDGET_PROPERTIES_EXT = 1000237000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_PRIORITY_FEATURES_EXT = 1000238000, + VK_STRUCTURE_TYPE_MEMORY_PRIORITY_ALLOCATE_INFO_EXT = 1000238001, + VK_STRUCTURE_TYPE_SURFACE_PROTECTED_CAPABILITIES_KHR = 1000239000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEDICATED_ALLOCATION_IMAGE_ALIASING_FEATURES_NV = 1000240000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BUFFER_DEVICE_ADDRESS_FEATURES_EXT = 1000244000, + VK_STRUCTURE_TYPE_BUFFER_DEVICE_ADDRESS_CREATE_INFO_EXT = 1000244002, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TOOL_PROPERTIES_EXT = 1000245000, + VK_STRUCTURE_TYPE_VALIDATION_FEATURES_EXT = 1000247000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COOPERATIVE_MATRIX_FEATURES_NV = 1000249000, + VK_STRUCTURE_TYPE_COOPERATIVE_MATRIX_PROPERTIES_NV = 1000249001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COOPERATIVE_MATRIX_PROPERTIES_NV = 1000249002, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COVERAGE_REDUCTION_MODE_FEATURES_NV = 1000250000, + VK_STRUCTURE_TYPE_PIPELINE_COVERAGE_REDUCTION_STATE_CREATE_INFO_NV = 1000250001, + VK_STRUCTURE_TYPE_FRAMEBUFFER_MIXED_SAMPLES_COMBINATION_NV = 1000250002, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADER_INTERLOCK_FEATURES_EXT = 1000251000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_YCBCR_IMAGE_ARRAYS_FEATURES_EXT = 1000252000, + VK_STRUCTURE_TYPE_SURFACE_FULL_SCREEN_EXCLUSIVE_INFO_EXT = 1000255000, + VK_STRUCTURE_TYPE_SURFACE_CAPABILITIES_FULL_SCREEN_EXCLUSIVE_EXT = 1000255002, + VK_STRUCTURE_TYPE_SURFACE_FULL_SCREEN_EXCLUSIVE_WIN32_INFO_EXT = 1000255001, + VK_STRUCTURE_TYPE_HEADLESS_SURFACE_CREATE_INFO_EXT = 1000256000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_LINE_RASTERIZATION_FEATURES_EXT = 1000259000, + VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_LINE_STATE_CREATE_INFO_EXT = 1000259001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_LINE_RASTERIZATION_PROPERTIES_EXT = 1000259002, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INDEX_TYPE_UINT8_FEATURES_EXT = 1000265000, + VK_STRUCTURE_TYPE_DEFERRED_OPERATION_INFO_KHR = 1000268000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PIPELINE_EXECUTABLE_PROPERTIES_FEATURES_KHR = 1000269000, + VK_STRUCTURE_TYPE_PIPELINE_INFO_KHR = 1000269001, + VK_STRUCTURE_TYPE_PIPELINE_EXECUTABLE_PROPERTIES_KHR = 1000269002, + VK_STRUCTURE_TYPE_PIPELINE_EXECUTABLE_INFO_KHR = 1000269003, + VK_STRUCTURE_TYPE_PIPELINE_EXECUTABLE_STATISTIC_KHR = 1000269004, + VK_STRUCTURE_TYPE_PIPELINE_EXECUTABLE_INTERNAL_REPRESENTATION_KHR = 1000269005, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DEMOTE_TO_HELPER_INVOCATION_FEATURES_EXT = 1000276000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEVICE_GENERATED_COMMANDS_PROPERTIES_NV = 1000277000, + VK_STRUCTURE_TYPE_GRAPHICS_SHADER_GROUP_CREATE_INFO_NV = 1000277001, + VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_SHADER_GROUPS_CREATE_INFO_NV = 1000277002, + VK_STRUCTURE_TYPE_INDIRECT_COMMANDS_LAYOUT_TOKEN_NV = 1000277003, + VK_STRUCTURE_TYPE_INDIRECT_COMMANDS_LAYOUT_CREATE_INFO_NV = 1000277004, + VK_STRUCTURE_TYPE_GENERATED_COMMANDS_INFO_NV = 1000277005, + VK_STRUCTURE_TYPE_GENERATED_COMMANDS_MEMORY_REQUIREMENTS_INFO_NV = 1000277006, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEVICE_GENERATED_COMMANDS_FEATURES_NV = 1000277007, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TEXEL_BUFFER_ALIGNMENT_FEATURES_EXT = 1000281000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TEXEL_BUFFER_ALIGNMENT_PROPERTIES_EXT = 1000281001, + VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_RENDER_PASS_TRANSFORM_INFO_QCOM = 1000282000, + VK_STRUCTURE_TYPE_RENDER_PASS_TRANSFORM_BEGIN_INFO_QCOM = 1000282001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ROBUSTNESS_2_FEATURES_EXT = 1000286000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ROBUSTNESS_2_PROPERTIES_EXT = 1000286001, + VK_STRUCTURE_TYPE_SAMPLER_CUSTOM_BORDER_COLOR_CREATE_INFO_EXT = 1000287000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CUSTOM_BORDER_COLOR_PROPERTIES_EXT = 1000287001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CUSTOM_BORDER_COLOR_FEATURES_EXT = 1000287002, + VK_STRUCTURE_TYPE_PIPELINE_LIBRARY_CREATE_INFO_KHR = 1000290000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PRIVATE_DATA_FEATURES_EXT = 1000295000, + VK_STRUCTURE_TYPE_DEVICE_PRIVATE_DATA_CREATE_INFO_EXT = 1000295001, + VK_STRUCTURE_TYPE_PRIVATE_DATA_SLOT_CREATE_INFO_EXT = 1000295002, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PIPELINE_CREATION_CACHE_CONTROL_FEATURES_EXT = 1000297000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DIAGNOSTICS_CONFIG_FEATURES_NV = 1000300000, + VK_STRUCTURE_TYPE_DEVICE_DIAGNOSTICS_CONFIG_CREATE_INFO_NV = 1000300001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTER_FEATURES = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTERS_FEATURES, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DRAW_PARAMETER_FEATURES = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DRAW_PARAMETERS_FEATURES, + VK_STRUCTURE_TYPE_DEBUG_REPORT_CREATE_INFO_EXT = VK_STRUCTURE_TYPE_DEBUG_REPORT_CALLBACK_CREATE_INFO_EXT, + VK_STRUCTURE_TYPE_RENDER_PASS_MULTIVIEW_CREATE_INFO_KHR = VK_STRUCTURE_TYPE_RENDER_PASS_MULTIVIEW_CREATE_INFO, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_FEATURES, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_PROPERTIES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_PROPERTIES, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2, + VK_STRUCTURE_TYPE_FORMAT_PROPERTIES_2_KHR = VK_STRUCTURE_TYPE_FORMAT_PROPERTIES_2, + VK_STRUCTURE_TYPE_IMAGE_FORMAT_PROPERTIES_2_KHR = VK_STRUCTURE_TYPE_IMAGE_FORMAT_PROPERTIES_2, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_FORMAT_INFO_2_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_FORMAT_INFO_2, + VK_STRUCTURE_TYPE_QUEUE_FAMILY_PROPERTIES_2_KHR = VK_STRUCTURE_TYPE_QUEUE_FAMILY_PROPERTIES_2, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_PROPERTIES_2_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_PROPERTIES_2, + VK_STRUCTURE_TYPE_SPARSE_IMAGE_FORMAT_PROPERTIES_2_KHR = VK_STRUCTURE_TYPE_SPARSE_IMAGE_FORMAT_PROPERTIES_2, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SPARSE_IMAGE_FORMAT_INFO_2_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SPARSE_IMAGE_FORMAT_INFO_2, + VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_FLAGS_INFO_KHR = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_FLAGS_INFO, + VK_STRUCTURE_TYPE_DEVICE_GROUP_RENDER_PASS_BEGIN_INFO_KHR = VK_STRUCTURE_TYPE_DEVICE_GROUP_RENDER_PASS_BEGIN_INFO, + VK_STRUCTURE_TYPE_DEVICE_GROUP_COMMAND_BUFFER_BEGIN_INFO_KHR = VK_STRUCTURE_TYPE_DEVICE_GROUP_COMMAND_BUFFER_BEGIN_INFO, + VK_STRUCTURE_TYPE_DEVICE_GROUP_SUBMIT_INFO_KHR = VK_STRUCTURE_TYPE_DEVICE_GROUP_SUBMIT_INFO, + VK_STRUCTURE_TYPE_DEVICE_GROUP_BIND_SPARSE_INFO_KHR = VK_STRUCTURE_TYPE_DEVICE_GROUP_BIND_SPARSE_INFO, + VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_DEVICE_GROUP_INFO_KHR = VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_DEVICE_GROUP_INFO, + VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_DEVICE_GROUP_INFO_KHR = VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_DEVICE_GROUP_INFO, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_GROUP_PROPERTIES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_GROUP_PROPERTIES, + VK_STRUCTURE_TYPE_DEVICE_GROUP_DEVICE_CREATE_INFO_KHR = VK_STRUCTURE_TYPE_DEVICE_GROUP_DEVICE_CREATE_INFO, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_IMAGE_FORMAT_INFO_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_IMAGE_FORMAT_INFO, + VK_STRUCTURE_TYPE_EXTERNAL_IMAGE_FORMAT_PROPERTIES_KHR = VK_STRUCTURE_TYPE_EXTERNAL_IMAGE_FORMAT_PROPERTIES, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_BUFFER_INFO_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_BUFFER_INFO, + VK_STRUCTURE_TYPE_EXTERNAL_BUFFER_PROPERTIES_KHR = VK_STRUCTURE_TYPE_EXTERNAL_BUFFER_PROPERTIES, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ID_PROPERTIES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ID_PROPERTIES, + VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_BUFFER_CREATE_INFO_KHR = VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_BUFFER_CREATE_INFO, + VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO_KHR = VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO, + VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO_KHR = VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_SEMAPHORE_INFO_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_SEMAPHORE_INFO, + VK_STRUCTURE_TYPE_EXTERNAL_SEMAPHORE_PROPERTIES_KHR = VK_STRUCTURE_TYPE_EXTERNAL_SEMAPHORE_PROPERTIES, + VK_STRUCTURE_TYPE_EXPORT_SEMAPHORE_CREATE_INFO_KHR = VK_STRUCTURE_TYPE_EXPORT_SEMAPHORE_CREATE_INFO, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_FLOAT16_INT8_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_FLOAT16_INT8_FEATURES, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FLOAT16_INT8_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_FLOAT16_INT8_FEATURES, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_16BIT_STORAGE_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_16BIT_STORAGE_FEATURES, + VK_STRUCTURE_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_CREATE_INFO_KHR = VK_STRUCTURE_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_CREATE_INFO, + VK_STRUCTURE_TYPE_SURFACE_CAPABILITIES2_EXT = VK_STRUCTURE_TYPE_SURFACE_CAPABILITIES_2_EXT, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGELESS_FRAMEBUFFER_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGELESS_FRAMEBUFFER_FEATURES, + VK_STRUCTURE_TYPE_FRAMEBUFFER_ATTACHMENTS_CREATE_INFO_KHR = VK_STRUCTURE_TYPE_FRAMEBUFFER_ATTACHMENTS_CREATE_INFO, + VK_STRUCTURE_TYPE_FRAMEBUFFER_ATTACHMENT_IMAGE_INFO_KHR = VK_STRUCTURE_TYPE_FRAMEBUFFER_ATTACHMENT_IMAGE_INFO, + VK_STRUCTURE_TYPE_RENDER_PASS_ATTACHMENT_BEGIN_INFO_KHR = VK_STRUCTURE_TYPE_RENDER_PASS_ATTACHMENT_BEGIN_INFO, + VK_STRUCTURE_TYPE_ATTACHMENT_DESCRIPTION_2_KHR = VK_STRUCTURE_TYPE_ATTACHMENT_DESCRIPTION_2, + VK_STRUCTURE_TYPE_ATTACHMENT_REFERENCE_2_KHR = VK_STRUCTURE_TYPE_ATTACHMENT_REFERENCE_2, + VK_STRUCTURE_TYPE_SUBPASS_DESCRIPTION_2_KHR = VK_STRUCTURE_TYPE_SUBPASS_DESCRIPTION_2, + VK_STRUCTURE_TYPE_SUBPASS_DEPENDENCY_2_KHR = VK_STRUCTURE_TYPE_SUBPASS_DEPENDENCY_2, + VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO_2_KHR = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO_2, + VK_STRUCTURE_TYPE_SUBPASS_BEGIN_INFO_KHR = VK_STRUCTURE_TYPE_SUBPASS_BEGIN_INFO, + VK_STRUCTURE_TYPE_SUBPASS_END_INFO_KHR = VK_STRUCTURE_TYPE_SUBPASS_END_INFO, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_FENCE_INFO_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_FENCE_INFO, + VK_STRUCTURE_TYPE_EXTERNAL_FENCE_PROPERTIES_KHR = VK_STRUCTURE_TYPE_EXTERNAL_FENCE_PROPERTIES, + VK_STRUCTURE_TYPE_EXPORT_FENCE_CREATE_INFO_KHR = VK_STRUCTURE_TYPE_EXPORT_FENCE_CREATE_INFO, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_POINT_CLIPPING_PROPERTIES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_POINT_CLIPPING_PROPERTIES, + VK_STRUCTURE_TYPE_RENDER_PASS_INPUT_ATTACHMENT_ASPECT_CREATE_INFO_KHR = VK_STRUCTURE_TYPE_RENDER_PASS_INPUT_ATTACHMENT_ASPECT_CREATE_INFO, + VK_STRUCTURE_TYPE_IMAGE_VIEW_USAGE_CREATE_INFO_KHR = VK_STRUCTURE_TYPE_IMAGE_VIEW_USAGE_CREATE_INFO, + VK_STRUCTURE_TYPE_PIPELINE_TESSELLATION_DOMAIN_ORIGIN_STATE_CREATE_INFO_KHR = VK_STRUCTURE_TYPE_PIPELINE_TESSELLATION_DOMAIN_ORIGIN_STATE_CREATE_INFO, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTERS_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTERS_FEATURES, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTER_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTERS_FEATURES_KHR, + VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR = VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS, + VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR = VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_FILTER_MINMAX_PROPERTIES_EXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_FILTER_MINMAX_PROPERTIES, + VK_STRUCTURE_TYPE_SAMPLER_REDUCTION_MODE_CREATE_INFO_EXT = VK_STRUCTURE_TYPE_SAMPLER_REDUCTION_MODE_CREATE_INFO, + VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR = VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2, + VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR = VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2, + VK_STRUCTURE_TYPE_IMAGE_SPARSE_MEMORY_REQUIREMENTS_INFO_2_KHR = VK_STRUCTURE_TYPE_IMAGE_SPARSE_MEMORY_REQUIREMENTS_INFO_2, + VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR = VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2, + VK_STRUCTURE_TYPE_SPARSE_IMAGE_MEMORY_REQUIREMENTS_2_KHR = VK_STRUCTURE_TYPE_SPARSE_IMAGE_MEMORY_REQUIREMENTS_2, + VK_STRUCTURE_TYPE_IMAGE_FORMAT_LIST_CREATE_INFO_KHR = VK_STRUCTURE_TYPE_IMAGE_FORMAT_LIST_CREATE_INFO, + VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_CREATE_INFO_KHR = VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_CREATE_INFO, + VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_INFO_KHR = VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_INFO, + VK_STRUCTURE_TYPE_BIND_IMAGE_PLANE_MEMORY_INFO_KHR = VK_STRUCTURE_TYPE_BIND_IMAGE_PLANE_MEMORY_INFO, + VK_STRUCTURE_TYPE_IMAGE_PLANE_MEMORY_REQUIREMENTS_INFO_KHR = VK_STRUCTURE_TYPE_IMAGE_PLANE_MEMORY_REQUIREMENTS_INFO, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_YCBCR_CONVERSION_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_YCBCR_CONVERSION_FEATURES, + VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_IMAGE_FORMAT_PROPERTIES_KHR = VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_IMAGE_FORMAT_PROPERTIES, + VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO_KHR = VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO, + VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_INFO_KHR = VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_INFO, + VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_BINDING_FLAGS_CREATE_INFO_EXT = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_BINDING_FLAGS_CREATE_INFO, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_FEATURES_EXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_FEATURES, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_PROPERTIES_EXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_PROPERTIES, + VK_STRUCTURE_TYPE_DESCRIPTOR_SET_VARIABLE_DESCRIPTOR_COUNT_ALLOCATE_INFO_EXT = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_VARIABLE_DESCRIPTOR_COUNT_ALLOCATE_INFO, + VK_STRUCTURE_TYPE_DESCRIPTOR_SET_VARIABLE_DESCRIPTOR_COUNT_LAYOUT_SUPPORT_EXT = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_VARIABLE_DESCRIPTOR_COUNT_LAYOUT_SUPPORT, + VK_STRUCTURE_TYPE_BIND_ACCELERATION_STRUCTURE_MEMORY_INFO_NV = VK_STRUCTURE_TYPE_BIND_ACCELERATION_STRUCTURE_MEMORY_INFO_KHR, + VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET_ACCELERATION_STRUCTURE_NV = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET_ACCELERATION_STRUCTURE_KHR, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_3_PROPERTIES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_3_PROPERTIES, + VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_SUPPORT_KHR = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_SUPPORT, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_SUBGROUP_EXTENDED_TYPES_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_SUBGROUP_EXTENDED_TYPES_FEATURES, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_8BIT_STORAGE_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_8BIT_STORAGE_FEATURES, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_ATOMIC_INT64_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_ATOMIC_INT64_FEATURES, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DRIVER_PROPERTIES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DRIVER_PROPERTIES, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FLOAT_CONTROLS_PROPERTIES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FLOAT_CONTROLS_PROPERTIES, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEPTH_STENCIL_RESOLVE_PROPERTIES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEPTH_STENCIL_RESOLVE_PROPERTIES, + VK_STRUCTURE_TYPE_SUBPASS_DESCRIPTION_DEPTH_STENCIL_RESOLVE_KHR = VK_STRUCTURE_TYPE_SUBPASS_DESCRIPTION_DEPTH_STENCIL_RESOLVE, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TIMELINE_SEMAPHORE_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TIMELINE_SEMAPHORE_FEATURES, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TIMELINE_SEMAPHORE_PROPERTIES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TIMELINE_SEMAPHORE_PROPERTIES, + VK_STRUCTURE_TYPE_SEMAPHORE_TYPE_CREATE_INFO_KHR = VK_STRUCTURE_TYPE_SEMAPHORE_TYPE_CREATE_INFO, + VK_STRUCTURE_TYPE_TIMELINE_SEMAPHORE_SUBMIT_INFO_KHR = VK_STRUCTURE_TYPE_TIMELINE_SEMAPHORE_SUBMIT_INFO, + VK_STRUCTURE_TYPE_SEMAPHORE_WAIT_INFO_KHR = VK_STRUCTURE_TYPE_SEMAPHORE_WAIT_INFO, + VK_STRUCTURE_TYPE_SEMAPHORE_SIGNAL_INFO_KHR = VK_STRUCTURE_TYPE_SEMAPHORE_SIGNAL_INFO, + VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO_INTEL = VK_STRUCTURE_TYPE_QUERY_POOL_PERFORMANCE_QUERY_CREATE_INFO_INTEL, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_MEMORY_MODEL_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_MEMORY_MODEL_FEATURES, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SCALAR_BLOCK_LAYOUT_FEATURES_EXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SCALAR_BLOCK_LAYOUT_FEATURES, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SEPARATE_DEPTH_STENCIL_LAYOUTS_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SEPARATE_DEPTH_STENCIL_LAYOUTS_FEATURES, + VK_STRUCTURE_TYPE_ATTACHMENT_REFERENCE_STENCIL_LAYOUT_KHR = VK_STRUCTURE_TYPE_ATTACHMENT_REFERENCE_STENCIL_LAYOUT, + VK_STRUCTURE_TYPE_ATTACHMENT_DESCRIPTION_STENCIL_LAYOUT_KHR = VK_STRUCTURE_TYPE_ATTACHMENT_DESCRIPTION_STENCIL_LAYOUT, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BUFFER_ADDRESS_FEATURES_EXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BUFFER_DEVICE_ADDRESS_FEATURES_EXT, + VK_STRUCTURE_TYPE_BUFFER_DEVICE_ADDRESS_INFO_EXT = VK_STRUCTURE_TYPE_BUFFER_DEVICE_ADDRESS_INFO, + VK_STRUCTURE_TYPE_IMAGE_STENCIL_USAGE_CREATE_INFO_EXT = VK_STRUCTURE_TYPE_IMAGE_STENCIL_USAGE_CREATE_INFO, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_UNIFORM_BUFFER_STANDARD_LAYOUT_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_UNIFORM_BUFFER_STANDARD_LAYOUT_FEATURES, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BUFFER_DEVICE_ADDRESS_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BUFFER_DEVICE_ADDRESS_FEATURES, + VK_STRUCTURE_TYPE_BUFFER_DEVICE_ADDRESS_INFO_KHR = VK_STRUCTURE_TYPE_BUFFER_DEVICE_ADDRESS_INFO, + VK_STRUCTURE_TYPE_BUFFER_OPAQUE_CAPTURE_ADDRESS_CREATE_INFO_KHR = VK_STRUCTURE_TYPE_BUFFER_OPAQUE_CAPTURE_ADDRESS_CREATE_INFO, + VK_STRUCTURE_TYPE_MEMORY_OPAQUE_CAPTURE_ADDRESS_ALLOCATE_INFO_KHR = VK_STRUCTURE_TYPE_MEMORY_OPAQUE_CAPTURE_ADDRESS_ALLOCATE_INFO, + VK_STRUCTURE_TYPE_DEVICE_MEMORY_OPAQUE_CAPTURE_ADDRESS_INFO_KHR = VK_STRUCTURE_TYPE_DEVICE_MEMORY_OPAQUE_CAPTURE_ADDRESS_INFO, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_HOST_QUERY_RESET_FEATURES_EXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_HOST_QUERY_RESET_FEATURES, + VK_STRUCTURE_TYPE_MAX_ENUM = 0x7FFFFFFF +} VkStructureType; + +typedef enum VkSystemAllocationScope { + VK_SYSTEM_ALLOCATION_SCOPE_COMMAND = 0, + VK_SYSTEM_ALLOCATION_SCOPE_OBJECT = 1, + VK_SYSTEM_ALLOCATION_SCOPE_CACHE = 2, + VK_SYSTEM_ALLOCATION_SCOPE_DEVICE = 3, + VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE = 4, + VK_SYSTEM_ALLOCATION_SCOPE_MAX_ENUM = 0x7FFFFFFF +} VkSystemAllocationScope; + +typedef enum VkInternalAllocationType { + VK_INTERNAL_ALLOCATION_TYPE_EXECUTABLE = 0, + VK_INTERNAL_ALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF +} VkInternalAllocationType; + +typedef enum VkFormat { + VK_FORMAT_UNDEFINED = 0, + VK_FORMAT_R4G4_UNORM_PACK8 = 1, + VK_FORMAT_R4G4B4A4_UNORM_PACK16 = 2, + VK_FORMAT_B4G4R4A4_UNORM_PACK16 = 3, + VK_FORMAT_R5G6B5_UNORM_PACK16 = 4, + VK_FORMAT_B5G6R5_UNORM_PACK16 = 5, + VK_FORMAT_R5G5B5A1_UNORM_PACK16 = 6, + VK_FORMAT_B5G5R5A1_UNORM_PACK16 = 7, + VK_FORMAT_A1R5G5B5_UNORM_PACK16 = 8, + VK_FORMAT_R8_UNORM = 9, + VK_FORMAT_R8_SNORM = 10, + VK_FORMAT_R8_USCALED = 11, + VK_FORMAT_R8_SSCALED = 12, + VK_FORMAT_R8_UINT = 13, + VK_FORMAT_R8_SINT = 14, + VK_FORMAT_R8_SRGB = 15, + VK_FORMAT_R8G8_UNORM = 16, + VK_FORMAT_R8G8_SNORM = 17, + VK_FORMAT_R8G8_USCALED = 18, + VK_FORMAT_R8G8_SSCALED = 19, + VK_FORMAT_R8G8_UINT = 20, + VK_FORMAT_R8G8_SINT = 21, + VK_FORMAT_R8G8_SRGB = 22, + VK_FORMAT_R8G8B8_UNORM = 23, + VK_FORMAT_R8G8B8_SNORM = 24, + VK_FORMAT_R8G8B8_USCALED = 25, + VK_FORMAT_R8G8B8_SSCALED = 26, + VK_FORMAT_R8G8B8_UINT = 27, + VK_FORMAT_R8G8B8_SINT = 28, + VK_FORMAT_R8G8B8_SRGB = 29, + VK_FORMAT_B8G8R8_UNORM = 30, + VK_FORMAT_B8G8R8_SNORM = 31, + VK_FORMAT_B8G8R8_USCALED = 32, + VK_FORMAT_B8G8R8_SSCALED = 33, + VK_FORMAT_B8G8R8_UINT = 34, + VK_FORMAT_B8G8R8_SINT = 35, + VK_FORMAT_B8G8R8_SRGB = 36, + VK_FORMAT_R8G8B8A8_UNORM = 37, + VK_FORMAT_R8G8B8A8_SNORM = 38, + VK_FORMAT_R8G8B8A8_USCALED = 39, + VK_FORMAT_R8G8B8A8_SSCALED = 40, + VK_FORMAT_R8G8B8A8_UINT = 41, + VK_FORMAT_R8G8B8A8_SINT = 42, + VK_FORMAT_R8G8B8A8_SRGB = 43, + VK_FORMAT_B8G8R8A8_UNORM = 44, + VK_FORMAT_B8G8R8A8_SNORM = 45, + VK_FORMAT_B8G8R8A8_USCALED = 46, + VK_FORMAT_B8G8R8A8_SSCALED = 47, + VK_FORMAT_B8G8R8A8_UINT = 48, + VK_FORMAT_B8G8R8A8_SINT = 49, + VK_FORMAT_B8G8R8A8_SRGB = 50, + VK_FORMAT_A8B8G8R8_UNORM_PACK32 = 51, + VK_FORMAT_A8B8G8R8_SNORM_PACK32 = 52, + VK_FORMAT_A8B8G8R8_USCALED_PACK32 = 53, + VK_FORMAT_A8B8G8R8_SSCALED_PACK32 = 54, + VK_FORMAT_A8B8G8R8_UINT_PACK32 = 55, + VK_FORMAT_A8B8G8R8_SINT_PACK32 = 56, + VK_FORMAT_A8B8G8R8_SRGB_PACK32 = 57, + VK_FORMAT_A2R10G10B10_UNORM_PACK32 = 58, + VK_FORMAT_A2R10G10B10_SNORM_PACK32 = 59, + VK_FORMAT_A2R10G10B10_USCALED_PACK32 = 60, + VK_FORMAT_A2R10G10B10_SSCALED_PACK32 = 61, + VK_FORMAT_A2R10G10B10_UINT_PACK32 = 62, + VK_FORMAT_A2R10G10B10_SINT_PACK32 = 63, + VK_FORMAT_A2B10G10R10_UNORM_PACK32 = 64, + VK_FORMAT_A2B10G10R10_SNORM_PACK32 = 65, + VK_FORMAT_A2B10G10R10_USCALED_PACK32 = 66, + VK_FORMAT_A2B10G10R10_SSCALED_PACK32 = 67, + VK_FORMAT_A2B10G10R10_UINT_PACK32 = 68, + VK_FORMAT_A2B10G10R10_SINT_PACK32 = 69, + VK_FORMAT_R16_UNORM = 70, + VK_FORMAT_R16_SNORM = 71, + VK_FORMAT_R16_USCALED = 72, + VK_FORMAT_R16_SSCALED = 73, + VK_FORMAT_R16_UINT = 74, + VK_FORMAT_R16_SINT = 75, + VK_FORMAT_R16_SFLOAT = 76, + VK_FORMAT_R16G16_UNORM = 77, + VK_FORMAT_R16G16_SNORM = 78, + VK_FORMAT_R16G16_USCALED = 79, + VK_FORMAT_R16G16_SSCALED = 80, + VK_FORMAT_R16G16_UINT = 81, + VK_FORMAT_R16G16_SINT = 82, + VK_FORMAT_R16G16_SFLOAT = 83, + VK_FORMAT_R16G16B16_UNORM = 84, + VK_FORMAT_R16G16B16_SNORM = 85, + VK_FORMAT_R16G16B16_USCALED = 86, + VK_FORMAT_R16G16B16_SSCALED = 87, + VK_FORMAT_R16G16B16_UINT = 88, + VK_FORMAT_R16G16B16_SINT = 89, + VK_FORMAT_R16G16B16_SFLOAT = 90, + VK_FORMAT_R16G16B16A16_UNORM = 91, + VK_FORMAT_R16G16B16A16_SNORM = 92, + VK_FORMAT_R16G16B16A16_USCALED = 93, + VK_FORMAT_R16G16B16A16_SSCALED = 94, + VK_FORMAT_R16G16B16A16_UINT = 95, + VK_FORMAT_R16G16B16A16_SINT = 96, + VK_FORMAT_R16G16B16A16_SFLOAT = 97, + VK_FORMAT_R32_UINT = 98, + VK_FORMAT_R32_SINT = 99, + VK_FORMAT_R32_SFLOAT = 100, + VK_FORMAT_R32G32_UINT = 101, + VK_FORMAT_R32G32_SINT = 102, + VK_FORMAT_R32G32_SFLOAT = 103, + VK_FORMAT_R32G32B32_UINT = 104, + VK_FORMAT_R32G32B32_SINT = 105, + VK_FORMAT_R32G32B32_SFLOAT = 106, + VK_FORMAT_R32G32B32A32_UINT = 107, + VK_FORMAT_R32G32B32A32_SINT = 108, + VK_FORMAT_R32G32B32A32_SFLOAT = 109, + VK_FORMAT_R64_UINT = 110, + VK_FORMAT_R64_SINT = 111, + VK_FORMAT_R64_SFLOAT = 112, + VK_FORMAT_R64G64_UINT = 113, + VK_FORMAT_R64G64_SINT = 114, + VK_FORMAT_R64G64_SFLOAT = 115, + VK_FORMAT_R64G64B64_UINT = 116, + VK_FORMAT_R64G64B64_SINT = 117, + VK_FORMAT_R64G64B64_SFLOAT = 118, + VK_FORMAT_R64G64B64A64_UINT = 119, + VK_FORMAT_R64G64B64A64_SINT = 120, + VK_FORMAT_R64G64B64A64_SFLOAT = 121, + VK_FORMAT_B10G11R11_UFLOAT_PACK32 = 122, + VK_FORMAT_E5B9G9R9_UFLOAT_PACK32 = 123, + VK_FORMAT_D16_UNORM = 124, + VK_FORMAT_X8_D24_UNORM_PACK32 = 125, + VK_FORMAT_D32_SFLOAT = 126, + VK_FORMAT_S8_UINT = 127, + VK_FORMAT_D16_UNORM_S8_UINT = 128, + VK_FORMAT_D24_UNORM_S8_UINT = 129, + VK_FORMAT_D32_SFLOAT_S8_UINT = 130, + VK_FORMAT_BC1_RGB_UNORM_BLOCK = 131, + VK_FORMAT_BC1_RGB_SRGB_BLOCK = 132, + VK_FORMAT_BC1_RGBA_UNORM_BLOCK = 133, + VK_FORMAT_BC1_RGBA_SRGB_BLOCK = 134, + VK_FORMAT_BC2_UNORM_BLOCK = 135, + VK_FORMAT_BC2_SRGB_BLOCK = 136, + VK_FORMAT_BC3_UNORM_BLOCK = 137, + VK_FORMAT_BC3_SRGB_BLOCK = 138, + VK_FORMAT_BC4_UNORM_BLOCK = 139, + VK_FORMAT_BC4_SNORM_BLOCK = 140, + VK_FORMAT_BC5_UNORM_BLOCK = 141, + VK_FORMAT_BC5_SNORM_BLOCK = 142, + VK_FORMAT_BC6H_UFLOAT_BLOCK = 143, + VK_FORMAT_BC6H_SFLOAT_BLOCK = 144, + VK_FORMAT_BC7_UNORM_BLOCK = 145, + VK_FORMAT_BC7_SRGB_BLOCK = 146, + VK_FORMAT_ETC2_R8G8B8_UNORM_BLOCK = 147, + VK_FORMAT_ETC2_R8G8B8_SRGB_BLOCK = 148, + VK_FORMAT_ETC2_R8G8B8A1_UNORM_BLOCK = 149, + VK_FORMAT_ETC2_R8G8B8A1_SRGB_BLOCK = 150, + VK_FORMAT_ETC2_R8G8B8A8_UNORM_BLOCK = 151, + VK_FORMAT_ETC2_R8G8B8A8_SRGB_BLOCK = 152, + VK_FORMAT_EAC_R11_UNORM_BLOCK = 153, + VK_FORMAT_EAC_R11_SNORM_BLOCK = 154, + VK_FORMAT_EAC_R11G11_UNORM_BLOCK = 155, + VK_FORMAT_EAC_R11G11_SNORM_BLOCK = 156, + VK_FORMAT_ASTC_4x4_UNORM_BLOCK = 157, + VK_FORMAT_ASTC_4x4_SRGB_BLOCK = 158, + VK_FORMAT_ASTC_5x4_UNORM_BLOCK = 159, + VK_FORMAT_ASTC_5x4_SRGB_BLOCK = 160, + VK_FORMAT_ASTC_5x5_UNORM_BLOCK = 161, + VK_FORMAT_ASTC_5x5_SRGB_BLOCK = 162, + VK_FORMAT_ASTC_6x5_UNORM_BLOCK = 163, + VK_FORMAT_ASTC_6x5_SRGB_BLOCK = 164, + VK_FORMAT_ASTC_6x6_UNORM_BLOCK = 165, + VK_FORMAT_ASTC_6x6_SRGB_BLOCK = 166, + VK_FORMAT_ASTC_8x5_UNORM_BLOCK = 167, + VK_FORMAT_ASTC_8x5_SRGB_BLOCK = 168, + VK_FORMAT_ASTC_8x6_UNORM_BLOCK = 169, + VK_FORMAT_ASTC_8x6_SRGB_BLOCK = 170, + VK_FORMAT_ASTC_8x8_UNORM_BLOCK = 171, + VK_FORMAT_ASTC_8x8_SRGB_BLOCK = 172, + VK_FORMAT_ASTC_10x5_UNORM_BLOCK = 173, + VK_FORMAT_ASTC_10x5_SRGB_BLOCK = 174, + VK_FORMAT_ASTC_10x6_UNORM_BLOCK = 175, + VK_FORMAT_ASTC_10x6_SRGB_BLOCK = 176, + VK_FORMAT_ASTC_10x8_UNORM_BLOCK = 177, + VK_FORMAT_ASTC_10x8_SRGB_BLOCK = 178, + VK_FORMAT_ASTC_10x10_UNORM_BLOCK = 179, + VK_FORMAT_ASTC_10x10_SRGB_BLOCK = 180, + VK_FORMAT_ASTC_12x10_UNORM_BLOCK = 181, + VK_FORMAT_ASTC_12x10_SRGB_BLOCK = 182, + VK_FORMAT_ASTC_12x12_UNORM_BLOCK = 183, + VK_FORMAT_ASTC_12x12_SRGB_BLOCK = 184, + VK_FORMAT_G8B8G8R8_422_UNORM = 1000156000, + VK_FORMAT_B8G8R8G8_422_UNORM = 1000156001, + VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM = 1000156002, + VK_FORMAT_G8_B8R8_2PLANE_420_UNORM = 1000156003, + VK_FORMAT_G8_B8_R8_3PLANE_422_UNORM = 1000156004, + VK_FORMAT_G8_B8R8_2PLANE_422_UNORM = 1000156005, + VK_FORMAT_G8_B8_R8_3PLANE_444_UNORM = 1000156006, + VK_FORMAT_R10X6_UNORM_PACK16 = 1000156007, + VK_FORMAT_R10X6G10X6_UNORM_2PACK16 = 1000156008, + VK_FORMAT_R10X6G10X6B10X6A10X6_UNORM_4PACK16 = 1000156009, + VK_FORMAT_G10X6B10X6G10X6R10X6_422_UNORM_4PACK16 = 1000156010, + VK_FORMAT_B10X6G10X6R10X6G10X6_422_UNORM_4PACK16 = 1000156011, + VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_420_UNORM_3PACK16 = 1000156012, + VK_FORMAT_G10X6_B10X6R10X6_2PLANE_420_UNORM_3PACK16 = 1000156013, + VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_422_UNORM_3PACK16 = 1000156014, + VK_FORMAT_G10X6_B10X6R10X6_2PLANE_422_UNORM_3PACK16 = 1000156015, + VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_444_UNORM_3PACK16 = 1000156016, + VK_FORMAT_R12X4_UNORM_PACK16 = 1000156017, + VK_FORMAT_R12X4G12X4_UNORM_2PACK16 = 1000156018, + VK_FORMAT_R12X4G12X4B12X4A12X4_UNORM_4PACK16 = 1000156019, + VK_FORMAT_G12X4B12X4G12X4R12X4_422_UNORM_4PACK16 = 1000156020, + VK_FORMAT_B12X4G12X4R12X4G12X4_422_UNORM_4PACK16 = 1000156021, + VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_420_UNORM_3PACK16 = 1000156022, + VK_FORMAT_G12X4_B12X4R12X4_2PLANE_420_UNORM_3PACK16 = 1000156023, + VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_422_UNORM_3PACK16 = 1000156024, + VK_FORMAT_G12X4_B12X4R12X4_2PLANE_422_UNORM_3PACK16 = 1000156025, + VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_444_UNORM_3PACK16 = 1000156026, + VK_FORMAT_G16B16G16R16_422_UNORM = 1000156027, + VK_FORMAT_B16G16R16G16_422_UNORM = 1000156028, + VK_FORMAT_G16_B16_R16_3PLANE_420_UNORM = 1000156029, + VK_FORMAT_G16_B16R16_2PLANE_420_UNORM = 1000156030, + VK_FORMAT_G16_B16_R16_3PLANE_422_UNORM = 1000156031, + VK_FORMAT_G16_B16R16_2PLANE_422_UNORM = 1000156032, + VK_FORMAT_G16_B16_R16_3PLANE_444_UNORM = 1000156033, + VK_FORMAT_PVRTC1_2BPP_UNORM_BLOCK_IMG = 1000054000, + VK_FORMAT_PVRTC1_4BPP_UNORM_BLOCK_IMG = 1000054001, + VK_FORMAT_PVRTC2_2BPP_UNORM_BLOCK_IMG = 1000054002, + VK_FORMAT_PVRTC2_4BPP_UNORM_BLOCK_IMG = 1000054003, + VK_FORMAT_PVRTC1_2BPP_SRGB_BLOCK_IMG = 1000054004, + VK_FORMAT_PVRTC1_4BPP_SRGB_BLOCK_IMG = 1000054005, + VK_FORMAT_PVRTC2_2BPP_SRGB_BLOCK_IMG = 1000054006, + VK_FORMAT_PVRTC2_4BPP_SRGB_BLOCK_IMG = 1000054007, + VK_FORMAT_ASTC_4x4_SFLOAT_BLOCK_EXT = 1000066000, + VK_FORMAT_ASTC_5x4_SFLOAT_BLOCK_EXT = 1000066001, + VK_FORMAT_ASTC_5x5_SFLOAT_BLOCK_EXT = 1000066002, + VK_FORMAT_ASTC_6x5_SFLOAT_BLOCK_EXT = 1000066003, + VK_FORMAT_ASTC_6x6_SFLOAT_BLOCK_EXT = 1000066004, + VK_FORMAT_ASTC_8x5_SFLOAT_BLOCK_EXT = 1000066005, + VK_FORMAT_ASTC_8x6_SFLOAT_BLOCK_EXT = 1000066006, + VK_FORMAT_ASTC_8x8_SFLOAT_BLOCK_EXT = 1000066007, + VK_FORMAT_ASTC_10x5_SFLOAT_BLOCK_EXT = 1000066008, + VK_FORMAT_ASTC_10x6_SFLOAT_BLOCK_EXT = 1000066009, + VK_FORMAT_ASTC_10x8_SFLOAT_BLOCK_EXT = 1000066010, + VK_FORMAT_ASTC_10x10_SFLOAT_BLOCK_EXT = 1000066011, + VK_FORMAT_ASTC_12x10_SFLOAT_BLOCK_EXT = 1000066012, + VK_FORMAT_ASTC_12x12_SFLOAT_BLOCK_EXT = 1000066013, + VK_FORMAT_G8B8G8R8_422_UNORM_KHR = VK_FORMAT_G8B8G8R8_422_UNORM, + VK_FORMAT_B8G8R8G8_422_UNORM_KHR = VK_FORMAT_B8G8R8G8_422_UNORM, + VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM_KHR = VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM, + VK_FORMAT_G8_B8R8_2PLANE_420_UNORM_KHR = VK_FORMAT_G8_B8R8_2PLANE_420_UNORM, + VK_FORMAT_G8_B8_R8_3PLANE_422_UNORM_KHR = VK_FORMAT_G8_B8_R8_3PLANE_422_UNORM, + VK_FORMAT_G8_B8R8_2PLANE_422_UNORM_KHR = VK_FORMAT_G8_B8R8_2PLANE_422_UNORM, + VK_FORMAT_G8_B8_R8_3PLANE_444_UNORM_KHR = VK_FORMAT_G8_B8_R8_3PLANE_444_UNORM, + VK_FORMAT_R10X6_UNORM_PACK16_KHR = VK_FORMAT_R10X6_UNORM_PACK16, + VK_FORMAT_R10X6G10X6_UNORM_2PACK16_KHR = VK_FORMAT_R10X6G10X6_UNORM_2PACK16, + VK_FORMAT_R10X6G10X6B10X6A10X6_UNORM_4PACK16_KHR = VK_FORMAT_R10X6G10X6B10X6A10X6_UNORM_4PACK16, + VK_FORMAT_G10X6B10X6G10X6R10X6_422_UNORM_4PACK16_KHR = VK_FORMAT_G10X6B10X6G10X6R10X6_422_UNORM_4PACK16, + VK_FORMAT_B10X6G10X6R10X6G10X6_422_UNORM_4PACK16_KHR = VK_FORMAT_B10X6G10X6R10X6G10X6_422_UNORM_4PACK16, + VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_420_UNORM_3PACK16_KHR = VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_420_UNORM_3PACK16, + VK_FORMAT_G10X6_B10X6R10X6_2PLANE_420_UNORM_3PACK16_KHR = VK_FORMAT_G10X6_B10X6R10X6_2PLANE_420_UNORM_3PACK16, + VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_422_UNORM_3PACK16_KHR = VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_422_UNORM_3PACK16, + VK_FORMAT_G10X6_B10X6R10X6_2PLANE_422_UNORM_3PACK16_KHR = VK_FORMAT_G10X6_B10X6R10X6_2PLANE_422_UNORM_3PACK16, + VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_444_UNORM_3PACK16_KHR = VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_444_UNORM_3PACK16, + VK_FORMAT_R12X4_UNORM_PACK16_KHR = VK_FORMAT_R12X4_UNORM_PACK16, + VK_FORMAT_R12X4G12X4_UNORM_2PACK16_KHR = VK_FORMAT_R12X4G12X4_UNORM_2PACK16, + VK_FORMAT_R12X4G12X4B12X4A12X4_UNORM_4PACK16_KHR = VK_FORMAT_R12X4G12X4B12X4A12X4_UNORM_4PACK16, + VK_FORMAT_G12X4B12X4G12X4R12X4_422_UNORM_4PACK16_KHR = VK_FORMAT_G12X4B12X4G12X4R12X4_422_UNORM_4PACK16, + VK_FORMAT_B12X4G12X4R12X4G12X4_422_UNORM_4PACK16_KHR = VK_FORMAT_B12X4G12X4R12X4G12X4_422_UNORM_4PACK16, + VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_420_UNORM_3PACK16_KHR = VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_420_UNORM_3PACK16, + VK_FORMAT_G12X4_B12X4R12X4_2PLANE_420_UNORM_3PACK16_KHR = VK_FORMAT_G12X4_B12X4R12X4_2PLANE_420_UNORM_3PACK16, + VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_422_UNORM_3PACK16_KHR = VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_422_UNORM_3PACK16, + VK_FORMAT_G12X4_B12X4R12X4_2PLANE_422_UNORM_3PACK16_KHR = VK_FORMAT_G12X4_B12X4R12X4_2PLANE_422_UNORM_3PACK16, + VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_444_UNORM_3PACK16_KHR = VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_444_UNORM_3PACK16, + VK_FORMAT_G16B16G16R16_422_UNORM_KHR = VK_FORMAT_G16B16G16R16_422_UNORM, + VK_FORMAT_B16G16R16G16_422_UNORM_KHR = VK_FORMAT_B16G16R16G16_422_UNORM, + VK_FORMAT_G16_B16_R16_3PLANE_420_UNORM_KHR = VK_FORMAT_G16_B16_R16_3PLANE_420_UNORM, + VK_FORMAT_G16_B16R16_2PLANE_420_UNORM_KHR = VK_FORMAT_G16_B16R16_2PLANE_420_UNORM, + VK_FORMAT_G16_B16_R16_3PLANE_422_UNORM_KHR = VK_FORMAT_G16_B16_R16_3PLANE_422_UNORM, + VK_FORMAT_G16_B16R16_2PLANE_422_UNORM_KHR = VK_FORMAT_G16_B16R16_2PLANE_422_UNORM, + VK_FORMAT_G16_B16_R16_3PLANE_444_UNORM_KHR = VK_FORMAT_G16_B16_R16_3PLANE_444_UNORM, + VK_FORMAT_MAX_ENUM = 0x7FFFFFFF +} VkFormat; + +typedef enum VkImageType { + VK_IMAGE_TYPE_1D = 0, + VK_IMAGE_TYPE_2D = 1, + VK_IMAGE_TYPE_3D = 2, + VK_IMAGE_TYPE_MAX_ENUM = 0x7FFFFFFF +} VkImageType; + +typedef enum VkImageTiling { + VK_IMAGE_TILING_OPTIMAL = 0, + VK_IMAGE_TILING_LINEAR = 1, + VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT = 1000158000, + VK_IMAGE_TILING_MAX_ENUM = 0x7FFFFFFF +} VkImageTiling; + +typedef enum VkPhysicalDeviceType { + VK_PHYSICAL_DEVICE_TYPE_OTHER = 0, + VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU = 1, + VK_PHYSICAL_DEVICE_TYPE_DISCRETE_GPU = 2, + VK_PHYSICAL_DEVICE_TYPE_VIRTUAL_GPU = 3, + VK_PHYSICAL_DEVICE_TYPE_CPU = 4, + VK_PHYSICAL_DEVICE_TYPE_MAX_ENUM = 0x7FFFFFFF +} VkPhysicalDeviceType; + +typedef enum VkQueryType { + VK_QUERY_TYPE_OCCLUSION = 0, + VK_QUERY_TYPE_PIPELINE_STATISTICS = 1, + VK_QUERY_TYPE_TIMESTAMP = 2, + VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT = 1000028004, + VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR = 1000116000, + VK_QUERY_TYPE_ACCELERATION_STRUCTURE_COMPACTED_SIZE_KHR = 1000165000, + VK_QUERY_TYPE_ACCELERATION_STRUCTURE_SERIALIZATION_SIZE_KHR = 1000150000, + VK_QUERY_TYPE_PERFORMANCE_QUERY_INTEL = 1000210000, + VK_QUERY_TYPE_ACCELERATION_STRUCTURE_COMPACTED_SIZE_NV = VK_QUERY_TYPE_ACCELERATION_STRUCTURE_COMPACTED_SIZE_KHR, + VK_QUERY_TYPE_MAX_ENUM = 0x7FFFFFFF +} VkQueryType; + +typedef enum VkSharingMode { + VK_SHARING_MODE_EXCLUSIVE = 0, + VK_SHARING_MODE_CONCURRENT = 1, + VK_SHARING_MODE_MAX_ENUM = 0x7FFFFFFF +} VkSharingMode; + +typedef enum VkImageLayout { + VK_IMAGE_LAYOUT_UNDEFINED = 0, + VK_IMAGE_LAYOUT_GENERAL = 1, + VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL = 2, + VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL = 3, + VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL = 4, + VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL = 5, + VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL = 6, + VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL = 7, + VK_IMAGE_LAYOUT_PREINITIALIZED = 8, + VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL = 1000117000, + VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL = 1000117001, + VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_OPTIMAL = 1000241000, + VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_OPTIMAL = 1000241001, + VK_IMAGE_LAYOUT_STENCIL_ATTACHMENT_OPTIMAL = 1000241002, + VK_IMAGE_LAYOUT_STENCIL_READ_ONLY_OPTIMAL = 1000241003, + VK_IMAGE_LAYOUT_PRESENT_SRC_KHR = 1000001002, + VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR = 1000111000, + VK_IMAGE_LAYOUT_SHADING_RATE_OPTIMAL_NV = 1000164003, + VK_IMAGE_LAYOUT_FRAGMENT_DENSITY_MAP_OPTIMAL_EXT = 1000218000, + VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL_KHR = VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL, + VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL_KHR = VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL, + VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_OPTIMAL_KHR = VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_OPTIMAL, + VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_OPTIMAL_KHR = VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_OPTIMAL, + VK_IMAGE_LAYOUT_STENCIL_ATTACHMENT_OPTIMAL_KHR = VK_IMAGE_LAYOUT_STENCIL_ATTACHMENT_OPTIMAL, + VK_IMAGE_LAYOUT_STENCIL_READ_ONLY_OPTIMAL_KHR = VK_IMAGE_LAYOUT_STENCIL_READ_ONLY_OPTIMAL, + VK_IMAGE_LAYOUT_MAX_ENUM = 0x7FFFFFFF +} VkImageLayout; + +typedef enum VkImageViewType { + VK_IMAGE_VIEW_TYPE_1D = 0, + VK_IMAGE_VIEW_TYPE_2D = 1, + VK_IMAGE_VIEW_TYPE_3D = 2, + VK_IMAGE_VIEW_TYPE_CUBE = 3, + VK_IMAGE_VIEW_TYPE_1D_ARRAY = 4, + VK_IMAGE_VIEW_TYPE_2D_ARRAY = 5, + VK_IMAGE_VIEW_TYPE_CUBE_ARRAY = 6, + VK_IMAGE_VIEW_TYPE_MAX_ENUM = 0x7FFFFFFF +} VkImageViewType; + +typedef enum VkComponentSwizzle { + VK_COMPONENT_SWIZZLE_IDENTITY = 0, + VK_COMPONENT_SWIZZLE_ZERO = 1, + VK_COMPONENT_SWIZZLE_ONE = 2, + VK_COMPONENT_SWIZZLE_R = 3, + VK_COMPONENT_SWIZZLE_G = 4, + VK_COMPONENT_SWIZZLE_B = 5, + VK_COMPONENT_SWIZZLE_A = 6, + VK_COMPONENT_SWIZZLE_MAX_ENUM = 0x7FFFFFFF +} VkComponentSwizzle; + +typedef enum VkVertexInputRate { + VK_VERTEX_INPUT_RATE_VERTEX = 0, + VK_VERTEX_INPUT_RATE_INSTANCE = 1, + VK_VERTEX_INPUT_RATE_MAX_ENUM = 0x7FFFFFFF +} VkVertexInputRate; + +typedef enum VkPrimitiveTopology { + VK_PRIMITIVE_TOPOLOGY_POINT_LIST = 0, + VK_PRIMITIVE_TOPOLOGY_LINE_LIST = 1, + VK_PRIMITIVE_TOPOLOGY_LINE_STRIP = 2, + VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST = 3, + VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP = 4, + VK_PRIMITIVE_TOPOLOGY_TRIANGLE_FAN = 5, + VK_PRIMITIVE_TOPOLOGY_LINE_LIST_WITH_ADJACENCY = 6, + VK_PRIMITIVE_TOPOLOGY_LINE_STRIP_WITH_ADJACENCY = 7, + VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_WITH_ADJACENCY = 8, + VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP_WITH_ADJACENCY = 9, + VK_PRIMITIVE_TOPOLOGY_PATCH_LIST = 10, + VK_PRIMITIVE_TOPOLOGY_MAX_ENUM = 0x7FFFFFFF +} VkPrimitiveTopology; + +typedef enum VkPolygonMode { + VK_POLYGON_MODE_FILL = 0, + VK_POLYGON_MODE_LINE = 1, + VK_POLYGON_MODE_POINT = 2, + VK_POLYGON_MODE_FILL_RECTANGLE_NV = 1000153000, + VK_POLYGON_MODE_MAX_ENUM = 0x7FFFFFFF +} VkPolygonMode; + +typedef enum VkFrontFace { + VK_FRONT_FACE_COUNTER_CLOCKWISE = 0, + VK_FRONT_FACE_CLOCKWISE = 1, + VK_FRONT_FACE_MAX_ENUM = 0x7FFFFFFF +} VkFrontFace; + +typedef enum VkCompareOp { + VK_COMPARE_OP_NEVER = 0, + VK_COMPARE_OP_LESS = 1, + VK_COMPARE_OP_EQUAL = 2, + VK_COMPARE_OP_LESS_OR_EQUAL = 3, + VK_COMPARE_OP_GREATER = 4, + VK_COMPARE_OP_NOT_EQUAL = 5, + VK_COMPARE_OP_GREATER_OR_EQUAL = 6, + VK_COMPARE_OP_ALWAYS = 7, + VK_COMPARE_OP_MAX_ENUM = 0x7FFFFFFF +} VkCompareOp; + +typedef enum VkStencilOp { + VK_STENCIL_OP_KEEP = 0, + VK_STENCIL_OP_ZERO = 1, + VK_STENCIL_OP_REPLACE = 2, + VK_STENCIL_OP_INCREMENT_AND_CLAMP = 3, + VK_STENCIL_OP_DECREMENT_AND_CLAMP = 4, + VK_STENCIL_OP_INVERT = 5, + VK_STENCIL_OP_INCREMENT_AND_WRAP = 6, + VK_STENCIL_OP_DECREMENT_AND_WRAP = 7, + VK_STENCIL_OP_MAX_ENUM = 0x7FFFFFFF +} VkStencilOp; + +typedef enum VkLogicOp { + VK_LOGIC_OP_CLEAR = 0, + VK_LOGIC_OP_AND = 1, + VK_LOGIC_OP_AND_REVERSE = 2, + VK_LOGIC_OP_COPY = 3, + VK_LOGIC_OP_AND_INVERTED = 4, + VK_LOGIC_OP_NO_OP = 5, + VK_LOGIC_OP_XOR = 6, + VK_LOGIC_OP_OR = 7, + VK_LOGIC_OP_NOR = 8, + VK_LOGIC_OP_EQUIVALENT = 9, + VK_LOGIC_OP_INVERT = 10, + VK_LOGIC_OP_OR_REVERSE = 11, + VK_LOGIC_OP_COPY_INVERTED = 12, + VK_LOGIC_OP_OR_INVERTED = 13, + VK_LOGIC_OP_NAND = 14, + VK_LOGIC_OP_SET = 15, + VK_LOGIC_OP_MAX_ENUM = 0x7FFFFFFF +} VkLogicOp; + +typedef enum VkBlendFactor { + VK_BLEND_FACTOR_ZERO = 0, + VK_BLEND_FACTOR_ONE = 1, + VK_BLEND_FACTOR_SRC_COLOR = 2, + VK_BLEND_FACTOR_ONE_MINUS_SRC_COLOR = 3, + VK_BLEND_FACTOR_DST_COLOR = 4, + VK_BLEND_FACTOR_ONE_MINUS_DST_COLOR = 5, + VK_BLEND_FACTOR_SRC_ALPHA = 6, + VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA = 7, + VK_BLEND_FACTOR_DST_ALPHA = 8, + VK_BLEND_FACTOR_ONE_MINUS_DST_ALPHA = 9, + VK_BLEND_FACTOR_CONSTANT_COLOR = 10, + VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_COLOR = 11, + VK_BLEND_FACTOR_CONSTANT_ALPHA = 12, + VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA = 13, + VK_BLEND_FACTOR_SRC_ALPHA_SATURATE = 14, + VK_BLEND_FACTOR_SRC1_COLOR = 15, + VK_BLEND_FACTOR_ONE_MINUS_SRC1_COLOR = 16, + VK_BLEND_FACTOR_SRC1_ALPHA = 17, + VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA = 18, + VK_BLEND_FACTOR_MAX_ENUM = 0x7FFFFFFF +} VkBlendFactor; + +typedef enum VkBlendOp { + VK_BLEND_OP_ADD = 0, + VK_BLEND_OP_SUBTRACT = 1, + VK_BLEND_OP_REVERSE_SUBTRACT = 2, + VK_BLEND_OP_MIN = 3, + VK_BLEND_OP_MAX = 4, + VK_BLEND_OP_ZERO_EXT = 1000148000, + VK_BLEND_OP_SRC_EXT = 1000148001, + VK_BLEND_OP_DST_EXT = 1000148002, + VK_BLEND_OP_SRC_OVER_EXT = 1000148003, + VK_BLEND_OP_DST_OVER_EXT = 1000148004, + VK_BLEND_OP_SRC_IN_EXT = 1000148005, + VK_BLEND_OP_DST_IN_EXT = 1000148006, + VK_BLEND_OP_SRC_OUT_EXT = 1000148007, + VK_BLEND_OP_DST_OUT_EXT = 1000148008, + VK_BLEND_OP_SRC_ATOP_EXT = 1000148009, + VK_BLEND_OP_DST_ATOP_EXT = 1000148010, + VK_BLEND_OP_XOR_EXT = 1000148011, + VK_BLEND_OP_MULTIPLY_EXT = 1000148012, + VK_BLEND_OP_SCREEN_EXT = 1000148013, + VK_BLEND_OP_OVERLAY_EXT = 1000148014, + VK_BLEND_OP_DARKEN_EXT = 1000148015, + VK_BLEND_OP_LIGHTEN_EXT = 1000148016, + VK_BLEND_OP_COLORDODGE_EXT = 1000148017, + VK_BLEND_OP_COLORBURN_EXT = 1000148018, + VK_BLEND_OP_HARDLIGHT_EXT = 1000148019, + VK_BLEND_OP_SOFTLIGHT_EXT = 1000148020, + VK_BLEND_OP_DIFFERENCE_EXT = 1000148021, + VK_BLEND_OP_EXCLUSION_EXT = 1000148022, + VK_BLEND_OP_INVERT_EXT = 1000148023, + VK_BLEND_OP_INVERT_RGB_EXT = 1000148024, + VK_BLEND_OP_LINEARDODGE_EXT = 1000148025, + VK_BLEND_OP_LINEARBURN_EXT = 1000148026, + VK_BLEND_OP_VIVIDLIGHT_EXT = 1000148027, + VK_BLEND_OP_LINEARLIGHT_EXT = 1000148028, + VK_BLEND_OP_PINLIGHT_EXT = 1000148029, + VK_BLEND_OP_HARDMIX_EXT = 1000148030, + VK_BLEND_OP_HSL_HUE_EXT = 1000148031, + VK_BLEND_OP_HSL_SATURATION_EXT = 1000148032, + VK_BLEND_OP_HSL_COLOR_EXT = 1000148033, + VK_BLEND_OP_HSL_LUMINOSITY_EXT = 1000148034, + VK_BLEND_OP_PLUS_EXT = 1000148035, + VK_BLEND_OP_PLUS_CLAMPED_EXT = 1000148036, + VK_BLEND_OP_PLUS_CLAMPED_ALPHA_EXT = 1000148037, + VK_BLEND_OP_PLUS_DARKER_EXT = 1000148038, + VK_BLEND_OP_MINUS_EXT = 1000148039, + VK_BLEND_OP_MINUS_CLAMPED_EXT = 1000148040, + VK_BLEND_OP_CONTRAST_EXT = 1000148041, + VK_BLEND_OP_INVERT_OVG_EXT = 1000148042, + VK_BLEND_OP_RED_EXT = 1000148043, + VK_BLEND_OP_GREEN_EXT = 1000148044, + VK_BLEND_OP_BLUE_EXT = 1000148045, + VK_BLEND_OP_MAX_ENUM = 0x7FFFFFFF +} VkBlendOp; + +typedef enum VkDynamicState { + VK_DYNAMIC_STATE_VIEWPORT = 0, + VK_DYNAMIC_STATE_SCISSOR = 1, + VK_DYNAMIC_STATE_LINE_WIDTH = 2, + VK_DYNAMIC_STATE_DEPTH_BIAS = 3, + VK_DYNAMIC_STATE_BLEND_CONSTANTS = 4, + VK_DYNAMIC_STATE_DEPTH_BOUNDS = 5, + VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK = 6, + VK_DYNAMIC_STATE_STENCIL_WRITE_MASK = 7, + VK_DYNAMIC_STATE_STENCIL_REFERENCE = 8, + VK_DYNAMIC_STATE_VIEWPORT_W_SCALING_NV = 1000087000, + VK_DYNAMIC_STATE_DISCARD_RECTANGLE_EXT = 1000099000, + VK_DYNAMIC_STATE_SAMPLE_LOCATIONS_EXT = 1000143000, + VK_DYNAMIC_STATE_VIEWPORT_SHADING_RATE_PALETTE_NV = 1000164004, + VK_DYNAMIC_STATE_VIEWPORT_COARSE_SAMPLE_ORDER_NV = 1000164006, + VK_DYNAMIC_STATE_EXCLUSIVE_SCISSOR_NV = 1000205001, + VK_DYNAMIC_STATE_LINE_STIPPLE_EXT = 1000259000, + VK_DYNAMIC_STATE_MAX_ENUM = 0x7FFFFFFF +} VkDynamicState; + +typedef enum VkFilter { + VK_FILTER_NEAREST = 0, + VK_FILTER_LINEAR = 1, + VK_FILTER_CUBIC_IMG = 1000015000, + VK_FILTER_CUBIC_EXT = VK_FILTER_CUBIC_IMG, + VK_FILTER_MAX_ENUM = 0x7FFFFFFF +} VkFilter; + +typedef enum VkSamplerMipmapMode { + VK_SAMPLER_MIPMAP_MODE_NEAREST = 0, + VK_SAMPLER_MIPMAP_MODE_LINEAR = 1, + VK_SAMPLER_MIPMAP_MODE_MAX_ENUM = 0x7FFFFFFF +} VkSamplerMipmapMode; + +typedef enum VkSamplerAddressMode { + VK_SAMPLER_ADDRESS_MODE_REPEAT = 0, + VK_SAMPLER_ADDRESS_MODE_MIRRORED_REPEAT = 1, + VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE = 2, + VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER = 3, + VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE = 4, + VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE_KHR = VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE, + VK_SAMPLER_ADDRESS_MODE_MAX_ENUM = 0x7FFFFFFF +} VkSamplerAddressMode; + +typedef enum VkBorderColor { + VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK = 0, + VK_BORDER_COLOR_INT_TRANSPARENT_BLACK = 1, + VK_BORDER_COLOR_FLOAT_OPAQUE_BLACK = 2, + VK_BORDER_COLOR_INT_OPAQUE_BLACK = 3, + VK_BORDER_COLOR_FLOAT_OPAQUE_WHITE = 4, + VK_BORDER_COLOR_INT_OPAQUE_WHITE = 5, + VK_BORDER_COLOR_FLOAT_CUSTOM_EXT = 1000287003, + VK_BORDER_COLOR_INT_CUSTOM_EXT = 1000287004, + VK_BORDER_COLOR_MAX_ENUM = 0x7FFFFFFF +} VkBorderColor; + +typedef enum VkDescriptorType { + VK_DESCRIPTOR_TYPE_SAMPLER = 0, + VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER = 1, + VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE = 2, + VK_DESCRIPTOR_TYPE_STORAGE_IMAGE = 3, + VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER = 4, + VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER = 5, + VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER = 6, + VK_DESCRIPTOR_TYPE_STORAGE_BUFFER = 7, + VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC = 8, + VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC = 9, + VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT = 10, + VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT = 1000138000, + VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_KHR = 1000165000, + VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_NV = VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_KHR, + VK_DESCRIPTOR_TYPE_MAX_ENUM = 0x7FFFFFFF +} VkDescriptorType; + +typedef enum VkAttachmentLoadOp { + VK_ATTACHMENT_LOAD_OP_LOAD = 0, + VK_ATTACHMENT_LOAD_OP_CLEAR = 1, + VK_ATTACHMENT_LOAD_OP_DONT_CARE = 2, + VK_ATTACHMENT_LOAD_OP_MAX_ENUM = 0x7FFFFFFF +} VkAttachmentLoadOp; + +typedef enum VkAttachmentStoreOp { + VK_ATTACHMENT_STORE_OP_STORE = 0, + VK_ATTACHMENT_STORE_OP_DONT_CARE = 1, + VK_ATTACHMENT_STORE_OP_NONE_QCOM = 1000301000, + VK_ATTACHMENT_STORE_OP_MAX_ENUM = 0x7FFFFFFF +} VkAttachmentStoreOp; + +typedef enum VkPipelineBindPoint { + VK_PIPELINE_BIND_POINT_GRAPHICS = 0, + VK_PIPELINE_BIND_POINT_COMPUTE = 1, + VK_PIPELINE_BIND_POINT_RAY_TRACING_KHR = 1000165000, + VK_PIPELINE_BIND_POINT_RAY_TRACING_NV = VK_PIPELINE_BIND_POINT_RAY_TRACING_KHR, + VK_PIPELINE_BIND_POINT_MAX_ENUM = 0x7FFFFFFF +} VkPipelineBindPoint; + +typedef enum VkCommandBufferLevel { + VK_COMMAND_BUFFER_LEVEL_PRIMARY = 0, + VK_COMMAND_BUFFER_LEVEL_SECONDARY = 1, + VK_COMMAND_BUFFER_LEVEL_MAX_ENUM = 0x7FFFFFFF +} VkCommandBufferLevel; + +typedef enum VkIndexType { + VK_INDEX_TYPE_UINT16 = 0, + VK_INDEX_TYPE_UINT32 = 1, + VK_INDEX_TYPE_NONE_KHR = 1000165000, + VK_INDEX_TYPE_UINT8_EXT = 1000265000, + VK_INDEX_TYPE_NONE_NV = VK_INDEX_TYPE_NONE_KHR, + VK_INDEX_TYPE_MAX_ENUM = 0x7FFFFFFF +} VkIndexType; + +typedef enum VkSubpassContents { + VK_SUBPASS_CONTENTS_INLINE = 0, + VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS = 1, + VK_SUBPASS_CONTENTS_MAX_ENUM = 0x7FFFFFFF +} VkSubpassContents; + +typedef enum VkObjectType { + VK_OBJECT_TYPE_UNKNOWN = 0, + VK_OBJECT_TYPE_INSTANCE = 1, + VK_OBJECT_TYPE_PHYSICAL_DEVICE = 2, + VK_OBJECT_TYPE_DEVICE = 3, + VK_OBJECT_TYPE_QUEUE = 4, + VK_OBJECT_TYPE_SEMAPHORE = 5, + VK_OBJECT_TYPE_COMMAND_BUFFER = 6, + VK_OBJECT_TYPE_FENCE = 7, + VK_OBJECT_TYPE_DEVICE_MEMORY = 8, + VK_OBJECT_TYPE_BUFFER = 9, + VK_OBJECT_TYPE_IMAGE = 10, + VK_OBJECT_TYPE_EVENT = 11, + VK_OBJECT_TYPE_QUERY_POOL = 12, + VK_OBJECT_TYPE_BUFFER_VIEW = 13, + VK_OBJECT_TYPE_IMAGE_VIEW = 14, + VK_OBJECT_TYPE_SHADER_MODULE = 15, + VK_OBJECT_TYPE_PIPELINE_CACHE = 16, + VK_OBJECT_TYPE_PIPELINE_LAYOUT = 17, + VK_OBJECT_TYPE_RENDER_PASS = 18, + VK_OBJECT_TYPE_PIPELINE = 19, + VK_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT = 20, + VK_OBJECT_TYPE_SAMPLER = 21, + VK_OBJECT_TYPE_DESCRIPTOR_POOL = 22, + VK_OBJECT_TYPE_DESCRIPTOR_SET = 23, + VK_OBJECT_TYPE_FRAMEBUFFER = 24, + VK_OBJECT_TYPE_COMMAND_POOL = 25, + VK_OBJECT_TYPE_SAMPLER_YCBCR_CONVERSION = 1000156000, + VK_OBJECT_TYPE_DESCRIPTOR_UPDATE_TEMPLATE = 1000085000, + VK_OBJECT_TYPE_SURFACE_KHR = 1000000000, + VK_OBJECT_TYPE_SWAPCHAIN_KHR = 1000001000, + VK_OBJECT_TYPE_DISPLAY_KHR = 1000002000, + VK_OBJECT_TYPE_DISPLAY_MODE_KHR = 1000002001, + VK_OBJECT_TYPE_DEBUG_REPORT_CALLBACK_EXT = 1000011000, + VK_OBJECT_TYPE_DEBUG_UTILS_MESSENGER_EXT = 1000128000, + VK_OBJECT_TYPE_ACCELERATION_STRUCTURE_KHR = 1000165000, + VK_OBJECT_TYPE_VALIDATION_CACHE_EXT = 1000160000, + VK_OBJECT_TYPE_PERFORMANCE_CONFIGURATION_INTEL = 1000210000, + VK_OBJECT_TYPE_DEFERRED_OPERATION_KHR = 1000268000, + VK_OBJECT_TYPE_INDIRECT_COMMANDS_LAYOUT_NV = 1000277000, + VK_OBJECT_TYPE_PRIVATE_DATA_SLOT_EXT = 1000295000, + VK_OBJECT_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_KHR = VK_OBJECT_TYPE_DESCRIPTOR_UPDATE_TEMPLATE, + VK_OBJECT_TYPE_SAMPLER_YCBCR_CONVERSION_KHR = VK_OBJECT_TYPE_SAMPLER_YCBCR_CONVERSION, + VK_OBJECT_TYPE_ACCELERATION_STRUCTURE_NV = VK_OBJECT_TYPE_ACCELERATION_STRUCTURE_KHR, + VK_OBJECT_TYPE_MAX_ENUM = 0x7FFFFFFF +} VkObjectType; + +typedef enum VkVendorId { + VK_VENDOR_ID_VIV = 0x10001, + VK_VENDOR_ID_VSI = 0x10002, + VK_VENDOR_ID_KAZAN = 0x10003, + VK_VENDOR_ID_CODEPLAY = 0x10004, + VK_VENDOR_ID_MESA = 0x10005, + VK_VENDOR_ID_MAX_ENUM = 0x7FFFFFFF +} VkVendorId; +typedef VkFlags VkInstanceCreateFlags; + +typedef enum VkFormatFeatureFlagBits { + VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT = 0x00000001, + VK_FORMAT_FEATURE_STORAGE_IMAGE_BIT = 0x00000002, + VK_FORMAT_FEATURE_STORAGE_IMAGE_ATOMIC_BIT = 0x00000004, + VK_FORMAT_FEATURE_UNIFORM_TEXEL_BUFFER_BIT = 0x00000008, + VK_FORMAT_FEATURE_STORAGE_TEXEL_BUFFER_BIT = 0x00000010, + VK_FORMAT_FEATURE_STORAGE_TEXEL_BUFFER_ATOMIC_BIT = 0x00000020, + VK_FORMAT_FEATURE_VERTEX_BUFFER_BIT = 0x00000040, + VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT = 0x00000080, + VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BLEND_BIT = 0x00000100, + VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT = 0x00000200, + VK_FORMAT_FEATURE_BLIT_SRC_BIT = 0x00000400, + VK_FORMAT_FEATURE_BLIT_DST_BIT = 0x00000800, + VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_LINEAR_BIT = 0x00001000, + VK_FORMAT_FEATURE_TRANSFER_SRC_BIT = 0x00004000, + VK_FORMAT_FEATURE_TRANSFER_DST_BIT = 0x00008000, + VK_FORMAT_FEATURE_MIDPOINT_CHROMA_SAMPLES_BIT = 0x00020000, + VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_LINEAR_FILTER_BIT = 0x00040000, + VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_SEPARATE_RECONSTRUCTION_FILTER_BIT = 0x00080000, + VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_CHROMA_RECONSTRUCTION_EXPLICIT_BIT = 0x00100000, + VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_CHROMA_RECONSTRUCTION_EXPLICIT_FORCEABLE_BIT = 0x00200000, + VK_FORMAT_FEATURE_DISJOINT_BIT = 0x00400000, + VK_FORMAT_FEATURE_COSITED_CHROMA_SAMPLES_BIT = 0x00800000, + VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_MINMAX_BIT = 0x00010000, + VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_CUBIC_BIT_IMG = 0x00002000, + VK_FORMAT_FEATURE_ACCELERATION_STRUCTURE_VERTEX_BUFFER_BIT_KHR = 0x20000000, + VK_FORMAT_FEATURE_FRAGMENT_DENSITY_MAP_BIT_EXT = 0x01000000, + VK_FORMAT_FEATURE_TRANSFER_SRC_BIT_KHR = VK_FORMAT_FEATURE_TRANSFER_SRC_BIT, + VK_FORMAT_FEATURE_TRANSFER_DST_BIT_KHR = VK_FORMAT_FEATURE_TRANSFER_DST_BIT, + VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_MINMAX_BIT_EXT = VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_MINMAX_BIT, + VK_FORMAT_FEATURE_MIDPOINT_CHROMA_SAMPLES_BIT_KHR = VK_FORMAT_FEATURE_MIDPOINT_CHROMA_SAMPLES_BIT, + VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_LINEAR_FILTER_BIT_KHR = VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_LINEAR_FILTER_BIT, + VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_SEPARATE_RECONSTRUCTION_FILTER_BIT_KHR = VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_SEPARATE_RECONSTRUCTION_FILTER_BIT, + VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_CHROMA_RECONSTRUCTION_EXPLICIT_BIT_KHR = VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_CHROMA_RECONSTRUCTION_EXPLICIT_BIT, + VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_CHROMA_RECONSTRUCTION_EXPLICIT_FORCEABLE_BIT_KHR = VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_CHROMA_RECONSTRUCTION_EXPLICIT_FORCEABLE_BIT, + VK_FORMAT_FEATURE_DISJOINT_BIT_KHR = VK_FORMAT_FEATURE_DISJOINT_BIT, + VK_FORMAT_FEATURE_COSITED_CHROMA_SAMPLES_BIT_KHR = VK_FORMAT_FEATURE_COSITED_CHROMA_SAMPLES_BIT, + VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_CUBIC_BIT_EXT = VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_CUBIC_BIT_IMG, + VK_FORMAT_FEATURE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkFormatFeatureFlagBits; +typedef VkFlags VkFormatFeatureFlags; + +typedef enum VkImageUsageFlagBits { + VK_IMAGE_USAGE_TRANSFER_SRC_BIT = 0x00000001, + VK_IMAGE_USAGE_TRANSFER_DST_BIT = 0x00000002, + VK_IMAGE_USAGE_SAMPLED_BIT = 0x00000004, + VK_IMAGE_USAGE_STORAGE_BIT = 0x00000008, + VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT = 0x00000010, + VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT = 0x00000020, + VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT = 0x00000040, + VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT = 0x00000080, + VK_IMAGE_USAGE_SHADING_RATE_IMAGE_BIT_NV = 0x00000100, + VK_IMAGE_USAGE_FRAGMENT_DENSITY_MAP_BIT_EXT = 0x00000200, + VK_IMAGE_USAGE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkImageUsageFlagBits; +typedef VkFlags VkImageUsageFlags; + +typedef enum VkImageCreateFlagBits { + VK_IMAGE_CREATE_SPARSE_BINDING_BIT = 0x00000001, + VK_IMAGE_CREATE_SPARSE_RESIDENCY_BIT = 0x00000002, + VK_IMAGE_CREATE_SPARSE_ALIASED_BIT = 0x00000004, + VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT = 0x00000008, + VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT = 0x00000010, + VK_IMAGE_CREATE_ALIAS_BIT = 0x00000400, + VK_IMAGE_CREATE_SPLIT_INSTANCE_BIND_REGIONS_BIT = 0x00000040, + VK_IMAGE_CREATE_2D_ARRAY_COMPATIBLE_BIT = 0x00000020, + VK_IMAGE_CREATE_BLOCK_TEXEL_VIEW_COMPATIBLE_BIT = 0x00000080, + VK_IMAGE_CREATE_EXTENDED_USAGE_BIT = 0x00000100, + VK_IMAGE_CREATE_PROTECTED_BIT = 0x00000800, + VK_IMAGE_CREATE_DISJOINT_BIT = 0x00000200, + VK_IMAGE_CREATE_CORNER_SAMPLED_BIT_NV = 0x00002000, + VK_IMAGE_CREATE_SAMPLE_LOCATIONS_COMPATIBLE_DEPTH_BIT_EXT = 0x00001000, + VK_IMAGE_CREATE_SUBSAMPLED_BIT_EXT = 0x00004000, + VK_IMAGE_CREATE_SPLIT_INSTANCE_BIND_REGIONS_BIT_KHR = VK_IMAGE_CREATE_SPLIT_INSTANCE_BIND_REGIONS_BIT, + VK_IMAGE_CREATE_2D_ARRAY_COMPATIBLE_BIT_KHR = VK_IMAGE_CREATE_2D_ARRAY_COMPATIBLE_BIT, + VK_IMAGE_CREATE_BLOCK_TEXEL_VIEW_COMPATIBLE_BIT_KHR = VK_IMAGE_CREATE_BLOCK_TEXEL_VIEW_COMPATIBLE_BIT, + VK_IMAGE_CREATE_EXTENDED_USAGE_BIT_KHR = VK_IMAGE_CREATE_EXTENDED_USAGE_BIT, + VK_IMAGE_CREATE_DISJOINT_BIT_KHR = VK_IMAGE_CREATE_DISJOINT_BIT, + VK_IMAGE_CREATE_ALIAS_BIT_KHR = VK_IMAGE_CREATE_ALIAS_BIT, + VK_IMAGE_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkImageCreateFlagBits; +typedef VkFlags VkImageCreateFlags; + +typedef enum VkSampleCountFlagBits { + VK_SAMPLE_COUNT_1_BIT = 0x00000001, + VK_SAMPLE_COUNT_2_BIT = 0x00000002, + VK_SAMPLE_COUNT_4_BIT = 0x00000004, + VK_SAMPLE_COUNT_8_BIT = 0x00000008, + VK_SAMPLE_COUNT_16_BIT = 0x00000010, + VK_SAMPLE_COUNT_32_BIT = 0x00000020, + VK_SAMPLE_COUNT_64_BIT = 0x00000040, + VK_SAMPLE_COUNT_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkSampleCountFlagBits; +typedef VkFlags VkSampleCountFlags; + +typedef enum VkQueueFlagBits { + VK_QUEUE_GRAPHICS_BIT = 0x00000001, + VK_QUEUE_COMPUTE_BIT = 0x00000002, + VK_QUEUE_TRANSFER_BIT = 0x00000004, + VK_QUEUE_SPARSE_BINDING_BIT = 0x00000008, + VK_QUEUE_PROTECTED_BIT = 0x00000010, + VK_QUEUE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkQueueFlagBits; +typedef VkFlags VkQueueFlags; + +typedef enum VkMemoryPropertyFlagBits { + VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT = 0x00000001, + VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT = 0x00000002, + VK_MEMORY_PROPERTY_HOST_COHERENT_BIT = 0x00000004, + VK_MEMORY_PROPERTY_HOST_CACHED_BIT = 0x00000008, + VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT = 0x00000010, + VK_MEMORY_PROPERTY_PROTECTED_BIT = 0x00000020, + VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD = 0x00000040, + VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD = 0x00000080, + VK_MEMORY_PROPERTY_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkMemoryPropertyFlagBits; +typedef VkFlags VkMemoryPropertyFlags; + +typedef enum VkMemoryHeapFlagBits { + VK_MEMORY_HEAP_DEVICE_LOCAL_BIT = 0x00000001, + VK_MEMORY_HEAP_MULTI_INSTANCE_BIT = 0x00000002, + VK_MEMORY_HEAP_MULTI_INSTANCE_BIT_KHR = VK_MEMORY_HEAP_MULTI_INSTANCE_BIT, + VK_MEMORY_HEAP_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkMemoryHeapFlagBits; +typedef VkFlags VkMemoryHeapFlags; +typedef VkFlags VkDeviceCreateFlags; + +typedef enum VkDeviceQueueCreateFlagBits { + VK_DEVICE_QUEUE_CREATE_PROTECTED_BIT = 0x00000001, + VK_DEVICE_QUEUE_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkDeviceQueueCreateFlagBits; +typedef VkFlags VkDeviceQueueCreateFlags; + +typedef enum VkPipelineStageFlagBits { + VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT = 0x00000001, + VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT = 0x00000002, + VK_PIPELINE_STAGE_VERTEX_INPUT_BIT = 0x00000004, + VK_PIPELINE_STAGE_VERTEX_SHADER_BIT = 0x00000008, + VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT = 0x00000010, + VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT = 0x00000020, + VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT = 0x00000040, + VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT = 0x00000080, + VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT = 0x00000100, + VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT = 0x00000200, + VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT = 0x00000400, + VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT = 0x00000800, + VK_PIPELINE_STAGE_TRANSFER_BIT = 0x00001000, + VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT = 0x00002000, + VK_PIPELINE_STAGE_HOST_BIT = 0x00004000, + VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT = 0x00008000, + VK_PIPELINE_STAGE_ALL_COMMANDS_BIT = 0x00010000, + VK_PIPELINE_STAGE_TRANSFORM_FEEDBACK_BIT_EXT = 0x01000000, + VK_PIPELINE_STAGE_CONDITIONAL_RENDERING_BIT_EXT = 0x00040000, + VK_PIPELINE_STAGE_RAY_TRACING_SHADER_BIT_KHR = 0x00200000, + VK_PIPELINE_STAGE_ACCELERATION_STRUCTURE_BUILD_BIT_KHR = 0x02000000, + VK_PIPELINE_STAGE_SHADING_RATE_IMAGE_BIT_NV = 0x00400000, + VK_PIPELINE_STAGE_TASK_SHADER_BIT_NV = 0x00080000, + VK_PIPELINE_STAGE_MESH_SHADER_BIT_NV = 0x00100000, + VK_PIPELINE_STAGE_FRAGMENT_DENSITY_PROCESS_BIT_EXT = 0x00800000, + VK_PIPELINE_STAGE_COMMAND_PREPROCESS_BIT_NV = 0x00020000, + VK_PIPELINE_STAGE_RAY_TRACING_SHADER_BIT_NV = VK_PIPELINE_STAGE_RAY_TRACING_SHADER_BIT_KHR, + VK_PIPELINE_STAGE_ACCELERATION_STRUCTURE_BUILD_BIT_NV = VK_PIPELINE_STAGE_ACCELERATION_STRUCTURE_BUILD_BIT_KHR, + VK_PIPELINE_STAGE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkPipelineStageFlagBits; +typedef VkFlags VkPipelineStageFlags; +typedef VkFlags VkMemoryMapFlags; + +typedef enum VkImageAspectFlagBits { + VK_IMAGE_ASPECT_COLOR_BIT = 0x00000001, + VK_IMAGE_ASPECT_DEPTH_BIT = 0x00000002, + VK_IMAGE_ASPECT_STENCIL_BIT = 0x00000004, + VK_IMAGE_ASPECT_METADATA_BIT = 0x00000008, + VK_IMAGE_ASPECT_PLANE_0_BIT = 0x00000010, + VK_IMAGE_ASPECT_PLANE_1_BIT = 0x00000020, + VK_IMAGE_ASPECT_PLANE_2_BIT = 0x00000040, + VK_IMAGE_ASPECT_MEMORY_PLANE_0_BIT_EXT = 0x00000080, + VK_IMAGE_ASPECT_MEMORY_PLANE_1_BIT_EXT = 0x00000100, + VK_IMAGE_ASPECT_MEMORY_PLANE_2_BIT_EXT = 0x00000200, + VK_IMAGE_ASPECT_MEMORY_PLANE_3_BIT_EXT = 0x00000400, + VK_IMAGE_ASPECT_PLANE_0_BIT_KHR = VK_IMAGE_ASPECT_PLANE_0_BIT, + VK_IMAGE_ASPECT_PLANE_1_BIT_KHR = VK_IMAGE_ASPECT_PLANE_1_BIT, + VK_IMAGE_ASPECT_PLANE_2_BIT_KHR = VK_IMAGE_ASPECT_PLANE_2_BIT, + VK_IMAGE_ASPECT_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkImageAspectFlagBits; +typedef VkFlags VkImageAspectFlags; + +typedef enum VkSparseImageFormatFlagBits { + VK_SPARSE_IMAGE_FORMAT_SINGLE_MIPTAIL_BIT = 0x00000001, + VK_SPARSE_IMAGE_FORMAT_ALIGNED_MIP_SIZE_BIT = 0x00000002, + VK_SPARSE_IMAGE_FORMAT_NONSTANDARD_BLOCK_SIZE_BIT = 0x00000004, + VK_SPARSE_IMAGE_FORMAT_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkSparseImageFormatFlagBits; +typedef VkFlags VkSparseImageFormatFlags; + +typedef enum VkSparseMemoryBindFlagBits { + VK_SPARSE_MEMORY_BIND_METADATA_BIT = 0x00000001, + VK_SPARSE_MEMORY_BIND_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkSparseMemoryBindFlagBits; +typedef VkFlags VkSparseMemoryBindFlags; + +typedef enum VkFenceCreateFlagBits { + VK_FENCE_CREATE_SIGNALED_BIT = 0x00000001, + VK_FENCE_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkFenceCreateFlagBits; +typedef VkFlags VkFenceCreateFlags; +typedef VkFlags VkSemaphoreCreateFlags; +typedef VkFlags VkEventCreateFlags; +typedef VkFlags VkQueryPoolCreateFlags; + +typedef enum VkQueryPipelineStatisticFlagBits { + VK_QUERY_PIPELINE_STATISTIC_INPUT_ASSEMBLY_VERTICES_BIT = 0x00000001, + VK_QUERY_PIPELINE_STATISTIC_INPUT_ASSEMBLY_PRIMITIVES_BIT = 0x00000002, + VK_QUERY_PIPELINE_STATISTIC_VERTEX_SHADER_INVOCATIONS_BIT = 0x00000004, + VK_QUERY_PIPELINE_STATISTIC_GEOMETRY_SHADER_INVOCATIONS_BIT = 0x00000008, + VK_QUERY_PIPELINE_STATISTIC_GEOMETRY_SHADER_PRIMITIVES_BIT = 0x00000010, + VK_QUERY_PIPELINE_STATISTIC_CLIPPING_INVOCATIONS_BIT = 0x00000020, + VK_QUERY_PIPELINE_STATISTIC_CLIPPING_PRIMITIVES_BIT = 0x00000040, + VK_QUERY_PIPELINE_STATISTIC_FRAGMENT_SHADER_INVOCATIONS_BIT = 0x00000080, + VK_QUERY_PIPELINE_STATISTIC_TESSELLATION_CONTROL_SHADER_PATCHES_BIT = 0x00000100, + VK_QUERY_PIPELINE_STATISTIC_TESSELLATION_EVALUATION_SHADER_INVOCATIONS_BIT = 0x00000200, + VK_QUERY_PIPELINE_STATISTIC_COMPUTE_SHADER_INVOCATIONS_BIT = 0x00000400, + VK_QUERY_PIPELINE_STATISTIC_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkQueryPipelineStatisticFlagBits; +typedef VkFlags VkQueryPipelineStatisticFlags; + +typedef enum VkQueryResultFlagBits { + VK_QUERY_RESULT_64_BIT = 0x00000001, + VK_QUERY_RESULT_WAIT_BIT = 0x00000002, + VK_QUERY_RESULT_WITH_AVAILABILITY_BIT = 0x00000004, + VK_QUERY_RESULT_PARTIAL_BIT = 0x00000008, + VK_QUERY_RESULT_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkQueryResultFlagBits; +typedef VkFlags VkQueryResultFlags; + +typedef enum VkBufferCreateFlagBits { + VK_BUFFER_CREATE_SPARSE_BINDING_BIT = 0x00000001, + VK_BUFFER_CREATE_SPARSE_RESIDENCY_BIT = 0x00000002, + VK_BUFFER_CREATE_SPARSE_ALIASED_BIT = 0x00000004, + VK_BUFFER_CREATE_PROTECTED_BIT = 0x00000008, + VK_BUFFER_CREATE_DEVICE_ADDRESS_CAPTURE_REPLAY_BIT = 0x00000010, + VK_BUFFER_CREATE_DEVICE_ADDRESS_CAPTURE_REPLAY_BIT_EXT = VK_BUFFER_CREATE_DEVICE_ADDRESS_CAPTURE_REPLAY_BIT, + VK_BUFFER_CREATE_DEVICE_ADDRESS_CAPTURE_REPLAY_BIT_KHR = VK_BUFFER_CREATE_DEVICE_ADDRESS_CAPTURE_REPLAY_BIT, + VK_BUFFER_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkBufferCreateFlagBits; +typedef VkFlags VkBufferCreateFlags; + +typedef enum VkBufferUsageFlagBits { + VK_BUFFER_USAGE_TRANSFER_SRC_BIT = 0x00000001, + VK_BUFFER_USAGE_TRANSFER_DST_BIT = 0x00000002, + VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT = 0x00000004, + VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT = 0x00000008, + VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT = 0x00000010, + VK_BUFFER_USAGE_STORAGE_BUFFER_BIT = 0x00000020, + VK_BUFFER_USAGE_INDEX_BUFFER_BIT = 0x00000040, + VK_BUFFER_USAGE_VERTEX_BUFFER_BIT = 0x00000080, + VK_BUFFER_USAGE_INDIRECT_BUFFER_BIT = 0x00000100, + VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT = 0x00020000, + VK_BUFFER_USAGE_TRANSFORM_FEEDBACK_BUFFER_BIT_EXT = 0x00000800, + VK_BUFFER_USAGE_TRANSFORM_FEEDBACK_COUNTER_BUFFER_BIT_EXT = 0x00001000, + VK_BUFFER_USAGE_CONDITIONAL_RENDERING_BIT_EXT = 0x00000200, + VK_BUFFER_USAGE_RAY_TRACING_BIT_KHR = 0x00000400, + VK_BUFFER_USAGE_RAY_TRACING_BIT_NV = VK_BUFFER_USAGE_RAY_TRACING_BIT_KHR, + VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_EXT = VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT, + VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_KHR = VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT, + VK_BUFFER_USAGE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkBufferUsageFlagBits; +typedef VkFlags VkBufferUsageFlags; +typedef VkFlags VkBufferViewCreateFlags; + +typedef enum VkImageViewCreateFlagBits { + VK_IMAGE_VIEW_CREATE_FRAGMENT_DENSITY_MAP_DYNAMIC_BIT_EXT = 0x00000001, + VK_IMAGE_VIEW_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkImageViewCreateFlagBits; +typedef VkFlags VkImageViewCreateFlags; + +typedef enum VkShaderModuleCreateFlagBits { + VK_SHADER_MODULE_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkShaderModuleCreateFlagBits; +typedef VkFlags VkShaderModuleCreateFlags; + +typedef enum VkPipelineCacheCreateFlagBits { + VK_PIPELINE_CACHE_CREATE_EXTERNALLY_SYNCHRONIZED_BIT_EXT = 0x00000001, + VK_PIPELINE_CACHE_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkPipelineCacheCreateFlagBits; +typedef VkFlags VkPipelineCacheCreateFlags; + +typedef enum VkPipelineCreateFlagBits { + VK_PIPELINE_CREATE_DISABLE_OPTIMIZATION_BIT = 0x00000001, + VK_PIPELINE_CREATE_ALLOW_DERIVATIVES_BIT = 0x00000002, + VK_PIPELINE_CREATE_DERIVATIVE_BIT = 0x00000004, + VK_PIPELINE_CREATE_VIEW_INDEX_FROM_DEVICE_INDEX_BIT = 0x00000008, + VK_PIPELINE_CREATE_DISPATCH_BASE_BIT = 0x00000010, + VK_PIPELINE_CREATE_RAY_TRACING_NO_NULL_ANY_HIT_SHADERS_BIT_KHR = 0x00004000, + VK_PIPELINE_CREATE_RAY_TRACING_NO_NULL_CLOSEST_HIT_SHADERS_BIT_KHR = 0x00008000, + VK_PIPELINE_CREATE_RAY_TRACING_NO_NULL_MISS_SHADERS_BIT_KHR = 0x00010000, + VK_PIPELINE_CREATE_RAY_TRACING_NO_NULL_INTERSECTION_SHADERS_BIT_KHR = 0x00020000, + VK_PIPELINE_CREATE_RAY_TRACING_SKIP_TRIANGLES_BIT_KHR = 0x00001000, + VK_PIPELINE_CREATE_RAY_TRACING_SKIP_AABBS_BIT_KHR = 0x00002000, + VK_PIPELINE_CREATE_DEFER_COMPILE_BIT_NV = 0x00000020, + VK_PIPELINE_CREATE_CAPTURE_STATISTICS_BIT_KHR = 0x00000040, + VK_PIPELINE_CREATE_CAPTURE_INTERNAL_REPRESENTATIONS_BIT_KHR = 0x00000080, + VK_PIPELINE_CREATE_INDIRECT_BINDABLE_BIT_NV = 0x00040000, + VK_PIPELINE_CREATE_LIBRARY_BIT_KHR = 0x00000800, + VK_PIPELINE_CREATE_FAIL_ON_PIPELINE_COMPILE_REQUIRED_BIT_EXT = 0x00000100, + VK_PIPELINE_CREATE_EARLY_RETURN_ON_FAILURE_BIT_EXT = 0x00000200, + VK_PIPELINE_CREATE_DISPATCH_BASE = VK_PIPELINE_CREATE_DISPATCH_BASE_BIT, + VK_PIPELINE_CREATE_VIEW_INDEX_FROM_DEVICE_INDEX_BIT_KHR = VK_PIPELINE_CREATE_VIEW_INDEX_FROM_DEVICE_INDEX_BIT, + VK_PIPELINE_CREATE_DISPATCH_BASE_KHR = VK_PIPELINE_CREATE_DISPATCH_BASE, + VK_PIPELINE_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkPipelineCreateFlagBits; +typedef VkFlags VkPipelineCreateFlags; + +typedef enum VkPipelineShaderStageCreateFlagBits { + VK_PIPELINE_SHADER_STAGE_CREATE_ALLOW_VARYING_SUBGROUP_SIZE_BIT_EXT = 0x00000001, + VK_PIPELINE_SHADER_STAGE_CREATE_REQUIRE_FULL_SUBGROUPS_BIT_EXT = 0x00000002, + VK_PIPELINE_SHADER_STAGE_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkPipelineShaderStageCreateFlagBits; +typedef VkFlags VkPipelineShaderStageCreateFlags; + +typedef enum VkShaderStageFlagBits { + VK_SHADER_STAGE_VERTEX_BIT = 0x00000001, + VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT = 0x00000002, + VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT = 0x00000004, + VK_SHADER_STAGE_GEOMETRY_BIT = 0x00000008, + VK_SHADER_STAGE_FRAGMENT_BIT = 0x00000010, + VK_SHADER_STAGE_COMPUTE_BIT = 0x00000020, + VK_SHADER_STAGE_ALL_GRAPHICS = 0x0000001F, + VK_SHADER_STAGE_ALL = 0x7FFFFFFF, + VK_SHADER_STAGE_RAYGEN_BIT_KHR = 0x00000100, + VK_SHADER_STAGE_ANY_HIT_BIT_KHR = 0x00000200, + VK_SHADER_STAGE_CLOSEST_HIT_BIT_KHR = 0x00000400, + VK_SHADER_STAGE_MISS_BIT_KHR = 0x00000800, + VK_SHADER_STAGE_INTERSECTION_BIT_KHR = 0x00001000, + VK_SHADER_STAGE_CALLABLE_BIT_KHR = 0x00002000, + VK_SHADER_STAGE_TASK_BIT_NV = 0x00000040, + VK_SHADER_STAGE_MESH_BIT_NV = 0x00000080, + VK_SHADER_STAGE_RAYGEN_BIT_NV = VK_SHADER_STAGE_RAYGEN_BIT_KHR, + VK_SHADER_STAGE_ANY_HIT_BIT_NV = VK_SHADER_STAGE_ANY_HIT_BIT_KHR, + VK_SHADER_STAGE_CLOSEST_HIT_BIT_NV = VK_SHADER_STAGE_CLOSEST_HIT_BIT_KHR, + VK_SHADER_STAGE_MISS_BIT_NV = VK_SHADER_STAGE_MISS_BIT_KHR, + VK_SHADER_STAGE_INTERSECTION_BIT_NV = VK_SHADER_STAGE_INTERSECTION_BIT_KHR, + VK_SHADER_STAGE_CALLABLE_BIT_NV = VK_SHADER_STAGE_CALLABLE_BIT_KHR, + VK_SHADER_STAGE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkShaderStageFlagBits; +typedef VkFlags VkPipelineVertexInputStateCreateFlags; +typedef VkFlags VkPipelineInputAssemblyStateCreateFlags; +typedef VkFlags VkPipelineTessellationStateCreateFlags; +typedef VkFlags VkPipelineViewportStateCreateFlags; +typedef VkFlags VkPipelineRasterizationStateCreateFlags; + +typedef enum VkCullModeFlagBits { + VK_CULL_MODE_NONE = 0, + VK_CULL_MODE_FRONT_BIT = 0x00000001, + VK_CULL_MODE_BACK_BIT = 0x00000002, + VK_CULL_MODE_FRONT_AND_BACK = 0x00000003, + VK_CULL_MODE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkCullModeFlagBits; +typedef VkFlags VkCullModeFlags; +typedef VkFlags VkPipelineMultisampleStateCreateFlags; +typedef VkFlags VkPipelineDepthStencilStateCreateFlags; +typedef VkFlags VkPipelineColorBlendStateCreateFlags; + +typedef enum VkColorComponentFlagBits { + VK_COLOR_COMPONENT_R_BIT = 0x00000001, + VK_COLOR_COMPONENT_G_BIT = 0x00000002, + VK_COLOR_COMPONENT_B_BIT = 0x00000004, + VK_COLOR_COMPONENT_A_BIT = 0x00000008, + VK_COLOR_COMPONENT_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkColorComponentFlagBits; +typedef VkFlags VkColorComponentFlags; +typedef VkFlags VkPipelineDynamicStateCreateFlags; +typedef VkFlags VkPipelineLayoutCreateFlags; +typedef VkFlags VkShaderStageFlags; + +typedef enum VkSamplerCreateFlagBits { + VK_SAMPLER_CREATE_SUBSAMPLED_BIT_EXT = 0x00000001, + VK_SAMPLER_CREATE_SUBSAMPLED_COARSE_RECONSTRUCTION_BIT_EXT = 0x00000002, + VK_SAMPLER_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkSamplerCreateFlagBits; +typedef VkFlags VkSamplerCreateFlags; + +typedef enum VkDescriptorSetLayoutCreateFlagBits { + VK_DESCRIPTOR_SET_LAYOUT_CREATE_UPDATE_AFTER_BIND_POOL_BIT = 0x00000002, + VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR = 0x00000001, + VK_DESCRIPTOR_SET_LAYOUT_CREATE_UPDATE_AFTER_BIND_POOL_BIT_EXT = VK_DESCRIPTOR_SET_LAYOUT_CREATE_UPDATE_AFTER_BIND_POOL_BIT, + VK_DESCRIPTOR_SET_LAYOUT_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkDescriptorSetLayoutCreateFlagBits; +typedef VkFlags VkDescriptorSetLayoutCreateFlags; + +typedef enum VkDescriptorPoolCreateFlagBits { + VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT = 0x00000001, + VK_DESCRIPTOR_POOL_CREATE_UPDATE_AFTER_BIND_BIT = 0x00000002, + VK_DESCRIPTOR_POOL_CREATE_UPDATE_AFTER_BIND_BIT_EXT = VK_DESCRIPTOR_POOL_CREATE_UPDATE_AFTER_BIND_BIT, + VK_DESCRIPTOR_POOL_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkDescriptorPoolCreateFlagBits; +typedef VkFlags VkDescriptorPoolCreateFlags; +typedef VkFlags VkDescriptorPoolResetFlags; + +typedef enum VkFramebufferCreateFlagBits { + VK_FRAMEBUFFER_CREATE_IMAGELESS_BIT = 0x00000001, + VK_FRAMEBUFFER_CREATE_IMAGELESS_BIT_KHR = VK_FRAMEBUFFER_CREATE_IMAGELESS_BIT, + VK_FRAMEBUFFER_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkFramebufferCreateFlagBits; +typedef VkFlags VkFramebufferCreateFlags; + +typedef enum VkRenderPassCreateFlagBits { + VK_RENDER_PASS_CREATE_TRANSFORM_BIT_QCOM = 0x00000002, + VK_RENDER_PASS_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkRenderPassCreateFlagBits; +typedef VkFlags VkRenderPassCreateFlags; + +typedef enum VkAttachmentDescriptionFlagBits { + VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT = 0x00000001, + VK_ATTACHMENT_DESCRIPTION_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkAttachmentDescriptionFlagBits; +typedef VkFlags VkAttachmentDescriptionFlags; + +typedef enum VkSubpassDescriptionFlagBits { + VK_SUBPASS_DESCRIPTION_PER_VIEW_ATTRIBUTES_BIT_NVX = 0x00000001, + VK_SUBPASS_DESCRIPTION_PER_VIEW_POSITION_X_ONLY_BIT_NVX = 0x00000002, + VK_SUBPASS_DESCRIPTION_FRAGMENT_REGION_BIT_QCOM = 0x00000004, + VK_SUBPASS_DESCRIPTION_SHADER_RESOLVE_BIT_QCOM = 0x00000008, + VK_SUBPASS_DESCRIPTION_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkSubpassDescriptionFlagBits; +typedef VkFlags VkSubpassDescriptionFlags; + +typedef enum VkAccessFlagBits { + VK_ACCESS_INDIRECT_COMMAND_READ_BIT = 0x00000001, + VK_ACCESS_INDEX_READ_BIT = 0x00000002, + VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT = 0x00000004, + VK_ACCESS_UNIFORM_READ_BIT = 0x00000008, + VK_ACCESS_INPUT_ATTACHMENT_READ_BIT = 0x00000010, + VK_ACCESS_SHADER_READ_BIT = 0x00000020, + VK_ACCESS_SHADER_WRITE_BIT = 0x00000040, + VK_ACCESS_COLOR_ATTACHMENT_READ_BIT = 0x00000080, + VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT = 0x00000100, + VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT = 0x00000200, + VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT = 0x00000400, + VK_ACCESS_TRANSFER_READ_BIT = 0x00000800, + VK_ACCESS_TRANSFER_WRITE_BIT = 0x00001000, + VK_ACCESS_HOST_READ_BIT = 0x00002000, + VK_ACCESS_HOST_WRITE_BIT = 0x00004000, + VK_ACCESS_MEMORY_READ_BIT = 0x00008000, + VK_ACCESS_MEMORY_WRITE_BIT = 0x00010000, + VK_ACCESS_TRANSFORM_FEEDBACK_WRITE_BIT_EXT = 0x02000000, + VK_ACCESS_TRANSFORM_FEEDBACK_COUNTER_READ_BIT_EXT = 0x04000000, + VK_ACCESS_TRANSFORM_FEEDBACK_COUNTER_WRITE_BIT_EXT = 0x08000000, + VK_ACCESS_CONDITIONAL_RENDERING_READ_BIT_EXT = 0x00100000, + VK_ACCESS_COLOR_ATTACHMENT_READ_NONCOHERENT_BIT_EXT = 0x00080000, + VK_ACCESS_ACCELERATION_STRUCTURE_READ_BIT_KHR = 0x00200000, + VK_ACCESS_ACCELERATION_STRUCTURE_WRITE_BIT_KHR = 0x00400000, + VK_ACCESS_SHADING_RATE_IMAGE_READ_BIT_NV = 0x00800000, + VK_ACCESS_FRAGMENT_DENSITY_MAP_READ_BIT_EXT = 0x01000000, + VK_ACCESS_COMMAND_PREPROCESS_READ_BIT_NV = 0x00020000, + VK_ACCESS_COMMAND_PREPROCESS_WRITE_BIT_NV = 0x00040000, + VK_ACCESS_ACCELERATION_STRUCTURE_READ_BIT_NV = VK_ACCESS_ACCELERATION_STRUCTURE_READ_BIT_KHR, + VK_ACCESS_ACCELERATION_STRUCTURE_WRITE_BIT_NV = VK_ACCESS_ACCELERATION_STRUCTURE_WRITE_BIT_KHR, + VK_ACCESS_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkAccessFlagBits; +typedef VkFlags VkAccessFlags; + +typedef enum VkDependencyFlagBits { + VK_DEPENDENCY_BY_REGION_BIT = 0x00000001, + VK_DEPENDENCY_DEVICE_GROUP_BIT = 0x00000004, + VK_DEPENDENCY_VIEW_LOCAL_BIT = 0x00000002, + VK_DEPENDENCY_VIEW_LOCAL_BIT_KHR = VK_DEPENDENCY_VIEW_LOCAL_BIT, + VK_DEPENDENCY_DEVICE_GROUP_BIT_KHR = VK_DEPENDENCY_DEVICE_GROUP_BIT, + VK_DEPENDENCY_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkDependencyFlagBits; +typedef VkFlags VkDependencyFlags; + +typedef enum VkCommandPoolCreateFlagBits { + VK_COMMAND_POOL_CREATE_TRANSIENT_BIT = 0x00000001, + VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT = 0x00000002, + VK_COMMAND_POOL_CREATE_PROTECTED_BIT = 0x00000004, + VK_COMMAND_POOL_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkCommandPoolCreateFlagBits; +typedef VkFlags VkCommandPoolCreateFlags; + +typedef enum VkCommandPoolResetFlagBits { + VK_COMMAND_POOL_RESET_RELEASE_RESOURCES_BIT = 0x00000001, + VK_COMMAND_POOL_RESET_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkCommandPoolResetFlagBits; +typedef VkFlags VkCommandPoolResetFlags; + +typedef enum VkCommandBufferUsageFlagBits { + VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT = 0x00000001, + VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT = 0x00000002, + VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT = 0x00000004, + VK_COMMAND_BUFFER_USAGE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkCommandBufferUsageFlagBits; +typedef VkFlags VkCommandBufferUsageFlags; + +typedef enum VkQueryControlFlagBits { + VK_QUERY_CONTROL_PRECISE_BIT = 0x00000001, + VK_QUERY_CONTROL_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkQueryControlFlagBits; +typedef VkFlags VkQueryControlFlags; + +typedef enum VkCommandBufferResetFlagBits { + VK_COMMAND_BUFFER_RESET_RELEASE_RESOURCES_BIT = 0x00000001, + VK_COMMAND_BUFFER_RESET_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkCommandBufferResetFlagBits; +typedef VkFlags VkCommandBufferResetFlags; + +typedef enum VkStencilFaceFlagBits { + VK_STENCIL_FACE_FRONT_BIT = 0x00000001, + VK_STENCIL_FACE_BACK_BIT = 0x00000002, + VK_STENCIL_FACE_FRONT_AND_BACK = 0x00000003, + VK_STENCIL_FRONT_AND_BACK = VK_STENCIL_FACE_FRONT_AND_BACK, + VK_STENCIL_FACE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkStencilFaceFlagBits; +typedef VkFlags VkStencilFaceFlags; +typedef struct VkApplicationInfo { + VkStructureType sType; + const void* pNext; + const char* pApplicationName; + uint32_t applicationVersion; + const char* pEngineName; + uint32_t engineVersion; + uint32_t apiVersion; +} VkApplicationInfo; + +typedef struct VkInstanceCreateInfo { + VkStructureType sType; + const void* pNext; + VkInstanceCreateFlags flags; + const VkApplicationInfo* pApplicationInfo; + uint32_t enabledLayerCount; + const char* const* ppEnabledLayerNames; + uint32_t enabledExtensionCount; + const char* const* ppEnabledExtensionNames; +} VkInstanceCreateInfo; + +typedef void* (VKAPI_PTR *PFN_vkAllocationFunction)( + void* pUserData, + size_t size, + size_t alignment, + VkSystemAllocationScope allocationScope); + +typedef void* (VKAPI_PTR *PFN_vkReallocationFunction)( + void* pUserData, + void* pOriginal, + size_t size, + size_t alignment, + VkSystemAllocationScope allocationScope); + +typedef void (VKAPI_PTR *PFN_vkFreeFunction)( + void* pUserData, + void* pMemory); + +typedef void (VKAPI_PTR *PFN_vkInternalAllocationNotification)( + void* pUserData, + size_t size, + VkInternalAllocationType allocationType, + VkSystemAllocationScope allocationScope); + +typedef void (VKAPI_PTR *PFN_vkInternalFreeNotification)( + void* pUserData, + size_t size, + VkInternalAllocationType allocationType, + VkSystemAllocationScope allocationScope); + +typedef struct VkAllocationCallbacks { + void* pUserData; + PFN_vkAllocationFunction pfnAllocation; + PFN_vkReallocationFunction pfnReallocation; + PFN_vkFreeFunction pfnFree; + PFN_vkInternalAllocationNotification pfnInternalAllocation; + PFN_vkInternalFreeNotification pfnInternalFree; +} VkAllocationCallbacks; + +typedef struct VkPhysicalDeviceFeatures { + VkBool32 robustBufferAccess; + VkBool32 fullDrawIndexUint32; + VkBool32 imageCubeArray; + VkBool32 independentBlend; + VkBool32 geometryShader; + VkBool32 tessellationShader; + VkBool32 sampleRateShading; + VkBool32 dualSrcBlend; + VkBool32 logicOp; + VkBool32 multiDrawIndirect; + VkBool32 drawIndirectFirstInstance; + VkBool32 depthClamp; + VkBool32 depthBiasClamp; + VkBool32 fillModeNonSolid; + VkBool32 depthBounds; + VkBool32 wideLines; + VkBool32 largePoints; + VkBool32 alphaToOne; + VkBool32 multiViewport; + VkBool32 samplerAnisotropy; + VkBool32 textureCompressionETC2; + VkBool32 textureCompressionASTC_LDR; + VkBool32 textureCompressionBC; + VkBool32 occlusionQueryPrecise; + VkBool32 pipelineStatisticsQuery; + VkBool32 vertexPipelineStoresAndAtomics; + VkBool32 fragmentStoresAndAtomics; + VkBool32 shaderTessellationAndGeometryPointSize; + VkBool32 shaderImageGatherExtended; + VkBool32 shaderStorageImageExtendedFormats; + VkBool32 shaderStorageImageMultisample; + VkBool32 shaderStorageImageReadWithoutFormat; + VkBool32 shaderStorageImageWriteWithoutFormat; + VkBool32 shaderUniformBufferArrayDynamicIndexing; + VkBool32 shaderSampledImageArrayDynamicIndexing; + VkBool32 shaderStorageBufferArrayDynamicIndexing; + VkBool32 shaderStorageImageArrayDynamicIndexing; + VkBool32 shaderClipDistance; + VkBool32 shaderCullDistance; + VkBool32 shaderFloat64; + VkBool32 shaderInt64; + VkBool32 shaderInt16; + VkBool32 shaderResourceResidency; + VkBool32 shaderResourceMinLod; + VkBool32 sparseBinding; + VkBool32 sparseResidencyBuffer; + VkBool32 sparseResidencyImage2D; + VkBool32 sparseResidencyImage3D; + VkBool32 sparseResidency2Samples; + VkBool32 sparseResidency4Samples; + VkBool32 sparseResidency8Samples; + VkBool32 sparseResidency16Samples; + VkBool32 sparseResidencyAliased; + VkBool32 variableMultisampleRate; + VkBool32 inheritedQueries; +} VkPhysicalDeviceFeatures; + +typedef struct VkFormatProperties { + VkFormatFeatureFlags linearTilingFeatures; + VkFormatFeatureFlags optimalTilingFeatures; + VkFormatFeatureFlags bufferFeatures; +} VkFormatProperties; + +typedef struct VkExtent3D { + uint32_t width; + uint32_t height; + uint32_t depth; +} VkExtent3D; + +typedef struct VkImageFormatProperties { + VkExtent3D maxExtent; + uint32_t maxMipLevels; + uint32_t maxArrayLayers; + VkSampleCountFlags sampleCounts; + VkDeviceSize maxResourceSize; +} VkImageFormatProperties; + +typedef struct VkPhysicalDeviceLimits { + uint32_t maxImageDimension1D; + uint32_t maxImageDimension2D; + uint32_t maxImageDimension3D; + uint32_t maxImageDimensionCube; + uint32_t maxImageArrayLayers; + uint32_t maxTexelBufferElements; + uint32_t maxUniformBufferRange; + uint32_t maxStorageBufferRange; + uint32_t maxPushConstantsSize; + uint32_t maxMemoryAllocationCount; + uint32_t maxSamplerAllocationCount; + VkDeviceSize bufferImageGranularity; + VkDeviceSize sparseAddressSpaceSize; + uint32_t maxBoundDescriptorSets; + uint32_t maxPerStageDescriptorSamplers; + uint32_t maxPerStageDescriptorUniformBuffers; + uint32_t maxPerStageDescriptorStorageBuffers; + uint32_t maxPerStageDescriptorSampledImages; + uint32_t maxPerStageDescriptorStorageImages; + uint32_t maxPerStageDescriptorInputAttachments; + uint32_t maxPerStageResources; + uint32_t maxDescriptorSetSamplers; + uint32_t maxDescriptorSetUniformBuffers; + uint32_t maxDescriptorSetUniformBuffersDynamic; + uint32_t maxDescriptorSetStorageBuffers; + uint32_t maxDescriptorSetStorageBuffersDynamic; + uint32_t maxDescriptorSetSampledImages; + uint32_t maxDescriptorSetStorageImages; + uint32_t maxDescriptorSetInputAttachments; + uint32_t maxVertexInputAttributes; + uint32_t maxVertexInputBindings; + uint32_t maxVertexInputAttributeOffset; + uint32_t maxVertexInputBindingStride; + uint32_t maxVertexOutputComponents; + uint32_t maxTessellationGenerationLevel; + uint32_t maxTessellationPatchSize; + uint32_t maxTessellationControlPerVertexInputComponents; + uint32_t maxTessellationControlPerVertexOutputComponents; + uint32_t maxTessellationControlPerPatchOutputComponents; + uint32_t maxTessellationControlTotalOutputComponents; + uint32_t maxTessellationEvaluationInputComponents; + uint32_t maxTessellationEvaluationOutputComponents; + uint32_t maxGeometryShaderInvocations; + uint32_t maxGeometryInputComponents; + uint32_t maxGeometryOutputComponents; + uint32_t maxGeometryOutputVertices; + uint32_t maxGeometryTotalOutputComponents; + uint32_t maxFragmentInputComponents; + uint32_t maxFragmentOutputAttachments; + uint32_t maxFragmentDualSrcAttachments; + uint32_t maxFragmentCombinedOutputResources; + uint32_t maxComputeSharedMemorySize; + uint32_t maxComputeWorkGroupCount[3]; + uint32_t maxComputeWorkGroupInvocations; + uint32_t maxComputeWorkGroupSize[3]; + uint32_t subPixelPrecisionBits; + uint32_t subTexelPrecisionBits; + uint32_t mipmapPrecisionBits; + uint32_t maxDrawIndexedIndexValue; + uint32_t maxDrawIndirectCount; + float maxSamplerLodBias; + float maxSamplerAnisotropy; + uint32_t maxViewports; + uint32_t maxViewportDimensions[2]; + float viewportBoundsRange[2]; + uint32_t viewportSubPixelBits; + size_t minMemoryMapAlignment; + VkDeviceSize minTexelBufferOffsetAlignment; + VkDeviceSize minUniformBufferOffsetAlignment; + VkDeviceSize minStorageBufferOffsetAlignment; + int32_t minTexelOffset; + uint32_t maxTexelOffset; + int32_t minTexelGatherOffset; + uint32_t maxTexelGatherOffset; + float minInterpolationOffset; + float maxInterpolationOffset; + uint32_t subPixelInterpolationOffsetBits; + uint32_t maxFramebufferWidth; + uint32_t maxFramebufferHeight; + uint32_t maxFramebufferLayers; + VkSampleCountFlags framebufferColorSampleCounts; + VkSampleCountFlags framebufferDepthSampleCounts; + VkSampleCountFlags framebufferStencilSampleCounts; + VkSampleCountFlags framebufferNoAttachmentsSampleCounts; + uint32_t maxColorAttachments; + VkSampleCountFlags sampledImageColorSampleCounts; + VkSampleCountFlags sampledImageIntegerSampleCounts; + VkSampleCountFlags sampledImageDepthSampleCounts; + VkSampleCountFlags sampledImageStencilSampleCounts; + VkSampleCountFlags storageImageSampleCounts; + uint32_t maxSampleMaskWords; + VkBool32 timestampComputeAndGraphics; + float timestampPeriod; + uint32_t maxClipDistances; + uint32_t maxCullDistances; + uint32_t maxCombinedClipAndCullDistances; + uint32_t discreteQueuePriorities; + float pointSizeRange[2]; + float lineWidthRange[2]; + float pointSizeGranularity; + float lineWidthGranularity; + VkBool32 strictLines; + VkBool32 standardSampleLocations; + VkDeviceSize optimalBufferCopyOffsetAlignment; + VkDeviceSize optimalBufferCopyRowPitchAlignment; + VkDeviceSize nonCoherentAtomSize; +} VkPhysicalDeviceLimits; + +typedef struct VkPhysicalDeviceSparseProperties { + VkBool32 residencyStandard2DBlockShape; + VkBool32 residencyStandard2DMultisampleBlockShape; + VkBool32 residencyStandard3DBlockShape; + VkBool32 residencyAlignedMipSize; + VkBool32 residencyNonResidentStrict; +} VkPhysicalDeviceSparseProperties; + +typedef struct VkPhysicalDeviceProperties { + uint32_t apiVersion; + uint32_t driverVersion; + uint32_t vendorID; + uint32_t deviceID; + VkPhysicalDeviceType deviceType; + char deviceName[VK_MAX_PHYSICAL_DEVICE_NAME_SIZE]; + uint8_t pipelineCacheUUID[VK_UUID_SIZE]; + VkPhysicalDeviceLimits limits; + VkPhysicalDeviceSparseProperties sparseProperties; +} VkPhysicalDeviceProperties; + +typedef struct VkQueueFamilyProperties { + VkQueueFlags queueFlags; + uint32_t queueCount; + uint32_t timestampValidBits; + VkExtent3D minImageTransferGranularity; +} VkQueueFamilyProperties; + +typedef struct VkMemoryType { + VkMemoryPropertyFlags propertyFlags; + uint32_t heapIndex; +} VkMemoryType; + +typedef struct VkMemoryHeap { + VkDeviceSize size; + VkMemoryHeapFlags flags; +} VkMemoryHeap; + +typedef struct VkPhysicalDeviceMemoryProperties { + uint32_t memoryTypeCount; + VkMemoryType memoryTypes[VK_MAX_MEMORY_TYPES]; + uint32_t memoryHeapCount; + VkMemoryHeap memoryHeaps[VK_MAX_MEMORY_HEAPS]; +} VkPhysicalDeviceMemoryProperties; + +typedef void (VKAPI_PTR *PFN_vkVoidFunction)(void); +typedef struct VkDeviceQueueCreateInfo { + VkStructureType sType; + const void* pNext; + VkDeviceQueueCreateFlags flags; + uint32_t queueFamilyIndex; + uint32_t queueCount; + const float* pQueuePriorities; +} VkDeviceQueueCreateInfo; + +typedef struct VkDeviceCreateInfo { + VkStructureType sType; + const void* pNext; + VkDeviceCreateFlags flags; + uint32_t queueCreateInfoCount; + const VkDeviceQueueCreateInfo* pQueueCreateInfos; + uint32_t enabledLayerCount; + const char* const* ppEnabledLayerNames; + uint32_t enabledExtensionCount; + const char* const* ppEnabledExtensionNames; + const VkPhysicalDeviceFeatures* pEnabledFeatures; +} VkDeviceCreateInfo; + +typedef struct VkExtensionProperties { + char extensionName[VK_MAX_EXTENSION_NAME_SIZE]; + uint32_t specVersion; +} VkExtensionProperties; + +typedef struct VkLayerProperties { + char layerName[VK_MAX_EXTENSION_NAME_SIZE]; + uint32_t specVersion; + uint32_t implementationVersion; + char description[VK_MAX_DESCRIPTION_SIZE]; +} VkLayerProperties; + +typedef struct VkSubmitInfo { + VkStructureType sType; + const void* pNext; + uint32_t waitSemaphoreCount; + const VkSemaphore* pWaitSemaphores; + const VkPipelineStageFlags* pWaitDstStageMask; + uint32_t commandBufferCount; + const VkCommandBuffer* pCommandBuffers; + uint32_t signalSemaphoreCount; + const VkSemaphore* pSignalSemaphores; +} VkSubmitInfo; + +typedef struct VkMemoryAllocateInfo { + VkStructureType sType; + const void* pNext; + VkDeviceSize allocationSize; + uint32_t memoryTypeIndex; +} VkMemoryAllocateInfo; + +typedef struct VkMappedMemoryRange { + VkStructureType sType; + const void* pNext; + VkDeviceMemory memory; + VkDeviceSize offset; + VkDeviceSize size; +} VkMappedMemoryRange; + +typedef struct VkMemoryRequirements { + VkDeviceSize size; + VkDeviceSize alignment; + uint32_t memoryTypeBits; +} VkMemoryRequirements; + +typedef struct VkSparseImageFormatProperties { + VkImageAspectFlags aspectMask; + VkExtent3D imageGranularity; + VkSparseImageFormatFlags flags; +} VkSparseImageFormatProperties; + +typedef struct VkSparseImageMemoryRequirements { + VkSparseImageFormatProperties formatProperties; + uint32_t imageMipTailFirstLod; + VkDeviceSize imageMipTailSize; + VkDeviceSize imageMipTailOffset; + VkDeviceSize imageMipTailStride; +} VkSparseImageMemoryRequirements; + +typedef struct VkSparseMemoryBind { + VkDeviceSize resourceOffset; + VkDeviceSize size; + VkDeviceMemory memory; + VkDeviceSize memoryOffset; + VkSparseMemoryBindFlags flags; +} VkSparseMemoryBind; + +typedef struct VkSparseBufferMemoryBindInfo { + VkBuffer buffer; + uint32_t bindCount; + const VkSparseMemoryBind* pBinds; +} VkSparseBufferMemoryBindInfo; + +typedef struct VkSparseImageOpaqueMemoryBindInfo { + VkImage image; + uint32_t bindCount; + const VkSparseMemoryBind* pBinds; +} VkSparseImageOpaqueMemoryBindInfo; + +typedef struct VkImageSubresource { + VkImageAspectFlags aspectMask; + uint32_t mipLevel; + uint32_t arrayLayer; +} VkImageSubresource; + +typedef struct VkOffset3D { + int32_t x; + int32_t y; + int32_t z; +} VkOffset3D; + +typedef struct VkSparseImageMemoryBind { + VkImageSubresource subresource; + VkOffset3D offset; + VkExtent3D extent; + VkDeviceMemory memory; + VkDeviceSize memoryOffset; + VkSparseMemoryBindFlags flags; +} VkSparseImageMemoryBind; + +typedef struct VkSparseImageMemoryBindInfo { + VkImage image; + uint32_t bindCount; + const VkSparseImageMemoryBind* pBinds; +} VkSparseImageMemoryBindInfo; + +typedef struct VkBindSparseInfo { + VkStructureType sType; + const void* pNext; + uint32_t waitSemaphoreCount; + const VkSemaphore* pWaitSemaphores; + uint32_t bufferBindCount; + const VkSparseBufferMemoryBindInfo* pBufferBinds; + uint32_t imageOpaqueBindCount; + const VkSparseImageOpaqueMemoryBindInfo* pImageOpaqueBinds; + uint32_t imageBindCount; + const VkSparseImageMemoryBindInfo* pImageBinds; + uint32_t signalSemaphoreCount; + const VkSemaphore* pSignalSemaphores; +} VkBindSparseInfo; + +typedef struct VkFenceCreateInfo { + VkStructureType sType; + const void* pNext; + VkFenceCreateFlags flags; +} VkFenceCreateInfo; + +typedef struct VkSemaphoreCreateInfo { + VkStructureType sType; + const void* pNext; + VkSemaphoreCreateFlags flags; +} VkSemaphoreCreateInfo; + +typedef struct VkEventCreateInfo { + VkStructureType sType; + const void* pNext; + VkEventCreateFlags flags; +} VkEventCreateInfo; + +typedef struct VkQueryPoolCreateInfo { + VkStructureType sType; + const void* pNext; + VkQueryPoolCreateFlags flags; + VkQueryType queryType; + uint32_t queryCount; + VkQueryPipelineStatisticFlags pipelineStatistics; +} VkQueryPoolCreateInfo; + +typedef struct VkBufferCreateInfo { + VkStructureType sType; + const void* pNext; + VkBufferCreateFlags flags; + VkDeviceSize size; + VkBufferUsageFlags usage; + VkSharingMode sharingMode; + uint32_t queueFamilyIndexCount; + const uint32_t* pQueueFamilyIndices; +} VkBufferCreateInfo; + +typedef struct VkBufferViewCreateInfo { + VkStructureType sType; + const void* pNext; + VkBufferViewCreateFlags flags; + VkBuffer buffer; + VkFormat format; + VkDeviceSize offset; + VkDeviceSize range; +} VkBufferViewCreateInfo; + +typedef struct VkImageCreateInfo { + VkStructureType sType; + const void* pNext; + VkImageCreateFlags flags; + VkImageType imageType; + VkFormat format; + VkExtent3D extent; + uint32_t mipLevels; + uint32_t arrayLayers; + VkSampleCountFlagBits samples; + VkImageTiling tiling; + VkImageUsageFlags usage; + VkSharingMode sharingMode; + uint32_t queueFamilyIndexCount; + const uint32_t* pQueueFamilyIndices; + VkImageLayout initialLayout; +} VkImageCreateInfo; + +typedef struct VkSubresourceLayout { + VkDeviceSize offset; + VkDeviceSize size; + VkDeviceSize rowPitch; + VkDeviceSize arrayPitch; + VkDeviceSize depthPitch; +} VkSubresourceLayout; + +typedef struct VkComponentMapping { + VkComponentSwizzle r; + VkComponentSwizzle g; + VkComponentSwizzle b; + VkComponentSwizzle a; +} VkComponentMapping; + +typedef struct VkImageSubresourceRange { + VkImageAspectFlags aspectMask; + uint32_t baseMipLevel; + uint32_t levelCount; + uint32_t baseArrayLayer; + uint32_t layerCount; +} VkImageSubresourceRange; + +typedef struct VkImageViewCreateInfo { + VkStructureType sType; + const void* pNext; + VkImageViewCreateFlags flags; + VkImage image; + VkImageViewType viewType; + VkFormat format; + VkComponentMapping components; + VkImageSubresourceRange subresourceRange; +} VkImageViewCreateInfo; + +typedef struct VkShaderModuleCreateInfo { + VkStructureType sType; + const void* pNext; + VkShaderModuleCreateFlags flags; + size_t codeSize; + const uint32_t* pCode; +} VkShaderModuleCreateInfo; + +typedef struct VkPipelineCacheCreateInfo { + VkStructureType sType; + const void* pNext; + VkPipelineCacheCreateFlags flags; + size_t initialDataSize; + const void* pInitialData; +} VkPipelineCacheCreateInfo; + +typedef struct VkSpecializationMapEntry { + uint32_t constantID; + uint32_t offset; + size_t size; +} VkSpecializationMapEntry; + +typedef struct VkSpecializationInfo { + uint32_t mapEntryCount; + const VkSpecializationMapEntry* pMapEntries; + size_t dataSize; + const void* pData; +} VkSpecializationInfo; + +typedef struct VkPipelineShaderStageCreateInfo { + VkStructureType sType; + const void* pNext; + VkPipelineShaderStageCreateFlags flags; + VkShaderStageFlagBits stage; + VkShaderModule module; + const char* pName; + const VkSpecializationInfo* pSpecializationInfo; +} VkPipelineShaderStageCreateInfo; + +typedef struct VkVertexInputBindingDescription { + uint32_t binding; + uint32_t stride; + VkVertexInputRate inputRate; +} VkVertexInputBindingDescription; + +typedef struct VkVertexInputAttributeDescription { + uint32_t location; + uint32_t binding; + VkFormat format; + uint32_t offset; +} VkVertexInputAttributeDescription; + +typedef struct VkPipelineVertexInputStateCreateInfo { + VkStructureType sType; + const void* pNext; + VkPipelineVertexInputStateCreateFlags flags; + uint32_t vertexBindingDescriptionCount; + const VkVertexInputBindingDescription* pVertexBindingDescriptions; + uint32_t vertexAttributeDescriptionCount; + const VkVertexInputAttributeDescription* pVertexAttributeDescriptions; +} VkPipelineVertexInputStateCreateInfo; + +typedef struct VkPipelineInputAssemblyStateCreateInfo { + VkStructureType sType; + const void* pNext; + VkPipelineInputAssemblyStateCreateFlags flags; + VkPrimitiveTopology topology; + VkBool32 primitiveRestartEnable; +} VkPipelineInputAssemblyStateCreateInfo; + +typedef struct VkPipelineTessellationStateCreateInfo { + VkStructureType sType; + const void* pNext; + VkPipelineTessellationStateCreateFlags flags; + uint32_t patchControlPoints; +} VkPipelineTessellationStateCreateInfo; + +typedef struct VkViewport { + float x; + float y; + float width; + float height; + float minDepth; + float maxDepth; +} VkViewport; + +typedef struct VkOffset2D { + int32_t x; + int32_t y; +} VkOffset2D; + +typedef struct VkExtent2D { + uint32_t width; + uint32_t height; +} VkExtent2D; + +typedef struct VkRect2D { + VkOffset2D offset; + VkExtent2D extent; +} VkRect2D; + +typedef struct VkPipelineViewportStateCreateInfo { + VkStructureType sType; + const void* pNext; + VkPipelineViewportStateCreateFlags flags; + uint32_t viewportCount; + const VkViewport* pViewports; + uint32_t scissorCount; + const VkRect2D* pScissors; +} VkPipelineViewportStateCreateInfo; + +typedef struct VkPipelineRasterizationStateCreateInfo { + VkStructureType sType; + const void* pNext; + VkPipelineRasterizationStateCreateFlags flags; + VkBool32 depthClampEnable; + VkBool32 rasterizerDiscardEnable; + VkPolygonMode polygonMode; + VkCullModeFlags cullMode; + VkFrontFace frontFace; + VkBool32 depthBiasEnable; + float depthBiasConstantFactor; + float depthBiasClamp; + float depthBiasSlopeFactor; + float lineWidth; +} VkPipelineRasterizationStateCreateInfo; + +typedef struct VkPipelineMultisampleStateCreateInfo { + VkStructureType sType; + const void* pNext; + VkPipelineMultisampleStateCreateFlags flags; + VkSampleCountFlagBits rasterizationSamples; + VkBool32 sampleShadingEnable; + float minSampleShading; + const VkSampleMask* pSampleMask; + VkBool32 alphaToCoverageEnable; + VkBool32 alphaToOneEnable; +} VkPipelineMultisampleStateCreateInfo; + +typedef struct VkStencilOpState { + VkStencilOp failOp; + VkStencilOp passOp; + VkStencilOp depthFailOp; + VkCompareOp compareOp; + uint32_t compareMask; + uint32_t writeMask; + uint32_t reference; +} VkStencilOpState; + +typedef struct VkPipelineDepthStencilStateCreateInfo { + VkStructureType sType; + const void* pNext; + VkPipelineDepthStencilStateCreateFlags flags; + VkBool32 depthTestEnable; + VkBool32 depthWriteEnable; + VkCompareOp depthCompareOp; + VkBool32 depthBoundsTestEnable; + VkBool32 stencilTestEnable; + VkStencilOpState front; + VkStencilOpState back; + float minDepthBounds; + float maxDepthBounds; +} VkPipelineDepthStencilStateCreateInfo; + +typedef struct VkPipelineColorBlendAttachmentState { + VkBool32 blendEnable; + VkBlendFactor srcColorBlendFactor; + VkBlendFactor dstColorBlendFactor; + VkBlendOp colorBlendOp; + VkBlendFactor srcAlphaBlendFactor; + VkBlendFactor dstAlphaBlendFactor; + VkBlendOp alphaBlendOp; + VkColorComponentFlags colorWriteMask; +} VkPipelineColorBlendAttachmentState; + +typedef struct VkPipelineColorBlendStateCreateInfo { + VkStructureType sType; + const void* pNext; + VkPipelineColorBlendStateCreateFlags flags; + VkBool32 logicOpEnable; + VkLogicOp logicOp; + uint32_t attachmentCount; + const VkPipelineColorBlendAttachmentState* pAttachments; + float blendConstants[4]; +} VkPipelineColorBlendStateCreateInfo; + +typedef struct VkPipelineDynamicStateCreateInfo { + VkStructureType sType; + const void* pNext; + VkPipelineDynamicStateCreateFlags flags; + uint32_t dynamicStateCount; + const VkDynamicState* pDynamicStates; +} VkPipelineDynamicStateCreateInfo; + +typedef struct VkGraphicsPipelineCreateInfo { + VkStructureType sType; + const void* pNext; + VkPipelineCreateFlags flags; + uint32_t stageCount; + const VkPipelineShaderStageCreateInfo* pStages; + const VkPipelineVertexInputStateCreateInfo* pVertexInputState; + const VkPipelineInputAssemblyStateCreateInfo* pInputAssemblyState; + const VkPipelineTessellationStateCreateInfo* pTessellationState; + const VkPipelineViewportStateCreateInfo* pViewportState; + const VkPipelineRasterizationStateCreateInfo* pRasterizationState; + const VkPipelineMultisampleStateCreateInfo* pMultisampleState; + const VkPipelineDepthStencilStateCreateInfo* pDepthStencilState; + const VkPipelineColorBlendStateCreateInfo* pColorBlendState; + const VkPipelineDynamicStateCreateInfo* pDynamicState; + VkPipelineLayout layout; + VkRenderPass renderPass; + uint32_t subpass; + VkPipeline basePipelineHandle; + int32_t basePipelineIndex; +} VkGraphicsPipelineCreateInfo; + +typedef struct VkComputePipelineCreateInfo { + VkStructureType sType; + const void* pNext; + VkPipelineCreateFlags flags; + VkPipelineShaderStageCreateInfo stage; + VkPipelineLayout layout; + VkPipeline basePipelineHandle; + int32_t basePipelineIndex; +} VkComputePipelineCreateInfo; + +typedef struct VkPushConstantRange { + VkShaderStageFlags stageFlags; + uint32_t offset; + uint32_t size; +} VkPushConstantRange; + +typedef struct VkPipelineLayoutCreateInfo { + VkStructureType sType; + const void* pNext; + VkPipelineLayoutCreateFlags flags; + uint32_t setLayoutCount; + const VkDescriptorSetLayout* pSetLayouts; + uint32_t pushConstantRangeCount; + const VkPushConstantRange* pPushConstantRanges; +} VkPipelineLayoutCreateInfo; + +typedef struct VkSamplerCreateInfo { + VkStructureType sType; + const void* pNext; + VkSamplerCreateFlags flags; + VkFilter magFilter; + VkFilter minFilter; + VkSamplerMipmapMode mipmapMode; + VkSamplerAddressMode addressModeU; + VkSamplerAddressMode addressModeV; + VkSamplerAddressMode addressModeW; + float mipLodBias; + VkBool32 anisotropyEnable; + float maxAnisotropy; + VkBool32 compareEnable; + VkCompareOp compareOp; + float minLod; + float maxLod; + VkBorderColor borderColor; + VkBool32 unnormalizedCoordinates; +} VkSamplerCreateInfo; + +typedef struct VkDescriptorSetLayoutBinding { + uint32_t binding; + VkDescriptorType descriptorType; + uint32_t descriptorCount; + VkShaderStageFlags stageFlags; + const VkSampler* pImmutableSamplers; +} VkDescriptorSetLayoutBinding; + +typedef struct VkDescriptorSetLayoutCreateInfo { + VkStructureType sType; + const void* pNext; + VkDescriptorSetLayoutCreateFlags flags; + uint32_t bindingCount; + const VkDescriptorSetLayoutBinding* pBindings; +} VkDescriptorSetLayoutCreateInfo; + +typedef struct VkDescriptorPoolSize { + VkDescriptorType type; + uint32_t descriptorCount; +} VkDescriptorPoolSize; + +typedef struct VkDescriptorPoolCreateInfo { + VkStructureType sType; + const void* pNext; + VkDescriptorPoolCreateFlags flags; + uint32_t maxSets; + uint32_t poolSizeCount; + const VkDescriptorPoolSize* pPoolSizes; +} VkDescriptorPoolCreateInfo; + +typedef struct VkDescriptorSetAllocateInfo { + VkStructureType sType; + const void* pNext; + VkDescriptorPool descriptorPool; + uint32_t descriptorSetCount; + const VkDescriptorSetLayout* pSetLayouts; +} VkDescriptorSetAllocateInfo; + +typedef struct VkDescriptorImageInfo { + VkSampler sampler; + VkImageView imageView; + VkImageLayout imageLayout; +} VkDescriptorImageInfo; + +typedef struct VkDescriptorBufferInfo { + VkBuffer buffer; + VkDeviceSize offset; + VkDeviceSize range; +} VkDescriptorBufferInfo; + +typedef struct VkWriteDescriptorSet { + VkStructureType sType; + const void* pNext; + VkDescriptorSet dstSet; + uint32_t dstBinding; + uint32_t dstArrayElement; + uint32_t descriptorCount; + VkDescriptorType descriptorType; + const VkDescriptorImageInfo* pImageInfo; + const VkDescriptorBufferInfo* pBufferInfo; + const VkBufferView* pTexelBufferView; +} VkWriteDescriptorSet; + +typedef struct VkCopyDescriptorSet { + VkStructureType sType; + const void* pNext; + VkDescriptorSet srcSet; + uint32_t srcBinding; + uint32_t srcArrayElement; + VkDescriptorSet dstSet; + uint32_t dstBinding; + uint32_t dstArrayElement; + uint32_t descriptorCount; +} VkCopyDescriptorSet; + +typedef struct VkFramebufferCreateInfo { + VkStructureType sType; + const void* pNext; + VkFramebufferCreateFlags flags; + VkRenderPass renderPass; + uint32_t attachmentCount; + const VkImageView* pAttachments; + uint32_t width; + uint32_t height; + uint32_t layers; +} VkFramebufferCreateInfo; + +typedef struct VkAttachmentDescription { + VkAttachmentDescriptionFlags flags; + VkFormat format; + VkSampleCountFlagBits samples; + VkAttachmentLoadOp loadOp; + VkAttachmentStoreOp storeOp; + VkAttachmentLoadOp stencilLoadOp; + VkAttachmentStoreOp stencilStoreOp; + VkImageLayout initialLayout; + VkImageLayout finalLayout; +} VkAttachmentDescription; + +typedef struct VkAttachmentReference { + uint32_t attachment; + VkImageLayout layout; +} VkAttachmentReference; + +typedef struct VkSubpassDescription { + VkSubpassDescriptionFlags flags; + VkPipelineBindPoint pipelineBindPoint; + uint32_t inputAttachmentCount; + const VkAttachmentReference* pInputAttachments; + uint32_t colorAttachmentCount; + const VkAttachmentReference* pColorAttachments; + const VkAttachmentReference* pResolveAttachments; + const VkAttachmentReference* pDepthStencilAttachment; + uint32_t preserveAttachmentCount; + const uint32_t* pPreserveAttachments; +} VkSubpassDescription; + +typedef struct VkSubpassDependency { + uint32_t srcSubpass; + uint32_t dstSubpass; + VkPipelineStageFlags srcStageMask; + VkPipelineStageFlags dstStageMask; + VkAccessFlags srcAccessMask; + VkAccessFlags dstAccessMask; + VkDependencyFlags dependencyFlags; +} VkSubpassDependency; + +typedef struct VkRenderPassCreateInfo { + VkStructureType sType; + const void* pNext; + VkRenderPassCreateFlags flags; + uint32_t attachmentCount; + const VkAttachmentDescription* pAttachments; + uint32_t subpassCount; + const VkSubpassDescription* pSubpasses; + uint32_t dependencyCount; + const VkSubpassDependency* pDependencies; +} VkRenderPassCreateInfo; + +typedef struct VkCommandPoolCreateInfo { + VkStructureType sType; + const void* pNext; + VkCommandPoolCreateFlags flags; + uint32_t queueFamilyIndex; +} VkCommandPoolCreateInfo; + +typedef struct VkCommandBufferAllocateInfo { + VkStructureType sType; + const void* pNext; + VkCommandPool commandPool; + VkCommandBufferLevel level; + uint32_t commandBufferCount; +} VkCommandBufferAllocateInfo; + +typedef struct VkCommandBufferInheritanceInfo { + VkStructureType sType; + const void* pNext; + VkRenderPass renderPass; + uint32_t subpass; + VkFramebuffer framebuffer; + VkBool32 occlusionQueryEnable; + VkQueryControlFlags queryFlags; + VkQueryPipelineStatisticFlags pipelineStatistics; +} VkCommandBufferInheritanceInfo; + +typedef struct VkCommandBufferBeginInfo { + VkStructureType sType; + const void* pNext; + VkCommandBufferUsageFlags flags; + const VkCommandBufferInheritanceInfo* pInheritanceInfo; +} VkCommandBufferBeginInfo; + +typedef struct VkBufferCopy { + VkDeviceSize srcOffset; + VkDeviceSize dstOffset; + VkDeviceSize size; +} VkBufferCopy; + +typedef struct VkImageSubresourceLayers { + VkImageAspectFlags aspectMask; + uint32_t mipLevel; + uint32_t baseArrayLayer; + uint32_t layerCount; +} VkImageSubresourceLayers; + +typedef struct VkImageCopy { + VkImageSubresourceLayers srcSubresource; + VkOffset3D srcOffset; + VkImageSubresourceLayers dstSubresource; + VkOffset3D dstOffset; + VkExtent3D extent; +} VkImageCopy; + +typedef struct VkImageBlit { + VkImageSubresourceLayers srcSubresource; + VkOffset3D srcOffsets[2]; + VkImageSubresourceLayers dstSubresource; + VkOffset3D dstOffsets[2]; +} VkImageBlit; + +typedef struct VkBufferImageCopy { + VkDeviceSize bufferOffset; + uint32_t bufferRowLength; + uint32_t bufferImageHeight; + VkImageSubresourceLayers imageSubresource; + VkOffset3D imageOffset; + VkExtent3D imageExtent; +} VkBufferImageCopy; + +typedef union VkClearColorValue { + float float32[4]; + int32_t int32[4]; + uint32_t uint32[4]; +} VkClearColorValue; + +typedef struct VkClearDepthStencilValue { + float depth; + uint32_t stencil; +} VkClearDepthStencilValue; + +typedef union VkClearValue { + VkClearColorValue color; + VkClearDepthStencilValue depthStencil; +} VkClearValue; + +typedef struct VkClearAttachment { + VkImageAspectFlags aspectMask; + uint32_t colorAttachment; + VkClearValue clearValue; +} VkClearAttachment; + +typedef struct VkClearRect { + VkRect2D rect; + uint32_t baseArrayLayer; + uint32_t layerCount; +} VkClearRect; + +typedef struct VkImageResolve { + VkImageSubresourceLayers srcSubresource; + VkOffset3D srcOffset; + VkImageSubresourceLayers dstSubresource; + VkOffset3D dstOffset; + VkExtent3D extent; +} VkImageResolve; + +typedef struct VkMemoryBarrier { + VkStructureType sType; + const void* pNext; + VkAccessFlags srcAccessMask; + VkAccessFlags dstAccessMask; +} VkMemoryBarrier; + +typedef struct VkBufferMemoryBarrier { + VkStructureType sType; + const void* pNext; + VkAccessFlags srcAccessMask; + VkAccessFlags dstAccessMask; + uint32_t srcQueueFamilyIndex; + uint32_t dstQueueFamilyIndex; + VkBuffer buffer; + VkDeviceSize offset; + VkDeviceSize size; +} VkBufferMemoryBarrier; + +typedef struct VkImageMemoryBarrier { + VkStructureType sType; + const void* pNext; + VkAccessFlags srcAccessMask; + VkAccessFlags dstAccessMask; + VkImageLayout oldLayout; + VkImageLayout newLayout; + uint32_t srcQueueFamilyIndex; + uint32_t dstQueueFamilyIndex; + VkImage image; + VkImageSubresourceRange subresourceRange; +} VkImageMemoryBarrier; + +typedef struct VkRenderPassBeginInfo { + VkStructureType sType; + const void* pNext; + VkRenderPass renderPass; + VkFramebuffer framebuffer; + VkRect2D renderArea; + uint32_t clearValueCount; + const VkClearValue* pClearValues; +} VkRenderPassBeginInfo; + +typedef struct VkDispatchIndirectCommand { + uint32_t x; + uint32_t y; + uint32_t z; +} VkDispatchIndirectCommand; + +typedef struct VkDrawIndexedIndirectCommand { + uint32_t indexCount; + uint32_t instanceCount; + uint32_t firstIndex; + int32_t vertexOffset; + uint32_t firstInstance; +} VkDrawIndexedIndirectCommand; + +typedef struct VkDrawIndirectCommand { + uint32_t vertexCount; + uint32_t instanceCount; + uint32_t firstVertex; + uint32_t firstInstance; +} VkDrawIndirectCommand; + +typedef struct VkBaseOutStructure { + VkStructureType sType; + struct VkBaseOutStructure* pNext; +} VkBaseOutStructure; + +typedef struct VkBaseInStructure { + VkStructureType sType; + const struct VkBaseInStructure* pNext; +} VkBaseInStructure; + +typedef VkResult (VKAPI_PTR *PFN_vkCreateInstance)(const VkInstanceCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkInstance* pInstance); +typedef void (VKAPI_PTR *PFN_vkDestroyInstance)(VkInstance instance, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkEnumeratePhysicalDevices)(VkInstance instance, uint32_t* pPhysicalDeviceCount, VkPhysicalDevice* pPhysicalDevices); +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceFeatures)(VkPhysicalDevice physicalDevice, VkPhysicalDeviceFeatures* pFeatures); +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceFormatProperties)(VkPhysicalDevice physicalDevice, VkFormat format, VkFormatProperties* pFormatProperties); +typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceImageFormatProperties)(VkPhysicalDevice physicalDevice, VkFormat format, VkImageType type, VkImageTiling tiling, VkImageUsageFlags usage, VkImageCreateFlags flags, VkImageFormatProperties* pImageFormatProperties); +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceProperties)(VkPhysicalDevice physicalDevice, VkPhysicalDeviceProperties* pProperties); +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceQueueFamilyProperties)(VkPhysicalDevice physicalDevice, uint32_t* pQueueFamilyPropertyCount, VkQueueFamilyProperties* pQueueFamilyProperties); +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceMemoryProperties)(VkPhysicalDevice physicalDevice, VkPhysicalDeviceMemoryProperties* pMemoryProperties); +typedef PFN_vkVoidFunction (VKAPI_PTR *PFN_vkGetInstanceProcAddr)(VkInstance instance, const char* pName); +typedef PFN_vkVoidFunction (VKAPI_PTR *PFN_vkGetDeviceProcAddr)(VkDevice device, const char* pName); +typedef VkResult (VKAPI_PTR *PFN_vkCreateDevice)(VkPhysicalDevice physicalDevice, const VkDeviceCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkDevice* pDevice); +typedef void (VKAPI_PTR *PFN_vkDestroyDevice)(VkDevice device, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkEnumerateInstanceExtensionProperties)(const char* pLayerName, uint32_t* pPropertyCount, VkExtensionProperties* pProperties); +typedef VkResult (VKAPI_PTR *PFN_vkEnumerateDeviceExtensionProperties)(VkPhysicalDevice physicalDevice, const char* pLayerName, uint32_t* pPropertyCount, VkExtensionProperties* pProperties); +typedef VkResult (VKAPI_PTR *PFN_vkEnumerateInstanceLayerProperties)(uint32_t* pPropertyCount, VkLayerProperties* pProperties); +typedef VkResult (VKAPI_PTR *PFN_vkEnumerateDeviceLayerProperties)(VkPhysicalDevice physicalDevice, uint32_t* pPropertyCount, VkLayerProperties* pProperties); +typedef void (VKAPI_PTR *PFN_vkGetDeviceQueue)(VkDevice device, uint32_t queueFamilyIndex, uint32_t queueIndex, VkQueue* pQueue); +typedef VkResult (VKAPI_PTR *PFN_vkQueueSubmit)(VkQueue queue, uint32_t submitCount, const VkSubmitInfo* pSubmits, VkFence fence); +typedef VkResult (VKAPI_PTR *PFN_vkQueueWaitIdle)(VkQueue queue); +typedef VkResult (VKAPI_PTR *PFN_vkDeviceWaitIdle)(VkDevice device); +typedef VkResult (VKAPI_PTR *PFN_vkAllocateMemory)(VkDevice device, const VkMemoryAllocateInfo* pAllocateInfo, const VkAllocationCallbacks* pAllocator, VkDeviceMemory* pMemory); +typedef void (VKAPI_PTR *PFN_vkFreeMemory)(VkDevice device, VkDeviceMemory memory, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkMapMemory)(VkDevice device, VkDeviceMemory memory, VkDeviceSize offset, VkDeviceSize size, VkMemoryMapFlags flags, void** ppData); +typedef void (VKAPI_PTR *PFN_vkUnmapMemory)(VkDevice device, VkDeviceMemory memory); +typedef VkResult (VKAPI_PTR *PFN_vkFlushMappedMemoryRanges)(VkDevice device, uint32_t memoryRangeCount, const VkMappedMemoryRange* pMemoryRanges); +typedef VkResult (VKAPI_PTR *PFN_vkInvalidateMappedMemoryRanges)(VkDevice device, uint32_t memoryRangeCount, const VkMappedMemoryRange* pMemoryRanges); +typedef void (VKAPI_PTR *PFN_vkGetDeviceMemoryCommitment)(VkDevice device, VkDeviceMemory memory, VkDeviceSize* pCommittedMemoryInBytes); +typedef VkResult (VKAPI_PTR *PFN_vkBindBufferMemory)(VkDevice device, VkBuffer buffer, VkDeviceMemory memory, VkDeviceSize memoryOffset); +typedef VkResult (VKAPI_PTR *PFN_vkBindImageMemory)(VkDevice device, VkImage image, VkDeviceMemory memory, VkDeviceSize memoryOffset); +typedef void (VKAPI_PTR *PFN_vkGetBufferMemoryRequirements)(VkDevice device, VkBuffer buffer, VkMemoryRequirements* pMemoryRequirements); +typedef void (VKAPI_PTR *PFN_vkGetImageMemoryRequirements)(VkDevice device, VkImage image, VkMemoryRequirements* pMemoryRequirements); +typedef void (VKAPI_PTR *PFN_vkGetImageSparseMemoryRequirements)(VkDevice device, VkImage image, uint32_t* pSparseMemoryRequirementCount, VkSparseImageMemoryRequirements* pSparseMemoryRequirements); +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceSparseImageFormatProperties)(VkPhysicalDevice physicalDevice, VkFormat format, VkImageType type, VkSampleCountFlagBits samples, VkImageUsageFlags usage, VkImageTiling tiling, uint32_t* pPropertyCount, VkSparseImageFormatProperties* pProperties); +typedef VkResult (VKAPI_PTR *PFN_vkQueueBindSparse)(VkQueue queue, uint32_t bindInfoCount, const VkBindSparseInfo* pBindInfo, VkFence fence); +typedef VkResult (VKAPI_PTR *PFN_vkCreateFence)(VkDevice device, const VkFenceCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkFence* pFence); +typedef void (VKAPI_PTR *PFN_vkDestroyFence)(VkDevice device, VkFence fence, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkResetFences)(VkDevice device, uint32_t fenceCount, const VkFence* pFences); +typedef VkResult (VKAPI_PTR *PFN_vkGetFenceStatus)(VkDevice device, VkFence fence); +typedef VkResult (VKAPI_PTR *PFN_vkWaitForFences)(VkDevice device, uint32_t fenceCount, const VkFence* pFences, VkBool32 waitAll, uint64_t timeout); +typedef VkResult (VKAPI_PTR *PFN_vkCreateSemaphore)(VkDevice device, const VkSemaphoreCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSemaphore* pSemaphore); +typedef void (VKAPI_PTR *PFN_vkDestroySemaphore)(VkDevice device, VkSemaphore semaphore, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkCreateEvent)(VkDevice device, const VkEventCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkEvent* pEvent); +typedef void (VKAPI_PTR *PFN_vkDestroyEvent)(VkDevice device, VkEvent event, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkGetEventStatus)(VkDevice device, VkEvent event); +typedef VkResult (VKAPI_PTR *PFN_vkSetEvent)(VkDevice device, VkEvent event); +typedef VkResult (VKAPI_PTR *PFN_vkResetEvent)(VkDevice device, VkEvent event); +typedef VkResult (VKAPI_PTR *PFN_vkCreateQueryPool)(VkDevice device, const VkQueryPoolCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkQueryPool* pQueryPool); +typedef void (VKAPI_PTR *PFN_vkDestroyQueryPool)(VkDevice device, VkQueryPool queryPool, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkGetQueryPoolResults)(VkDevice device, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount, size_t dataSize, void* pData, VkDeviceSize stride, VkQueryResultFlags flags); +typedef VkResult (VKAPI_PTR *PFN_vkCreateBuffer)(VkDevice device, const VkBufferCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkBuffer* pBuffer); +typedef void (VKAPI_PTR *PFN_vkDestroyBuffer)(VkDevice device, VkBuffer buffer, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkCreateBufferView)(VkDevice device, const VkBufferViewCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkBufferView* pView); +typedef void (VKAPI_PTR *PFN_vkDestroyBufferView)(VkDevice device, VkBufferView bufferView, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkCreateImage)(VkDevice device, const VkImageCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkImage* pImage); +typedef void (VKAPI_PTR *PFN_vkDestroyImage)(VkDevice device, VkImage image, const VkAllocationCallbacks* pAllocator); +typedef void (VKAPI_PTR *PFN_vkGetImageSubresourceLayout)(VkDevice device, VkImage image, const VkImageSubresource* pSubresource, VkSubresourceLayout* pLayout); +typedef VkResult (VKAPI_PTR *PFN_vkCreateImageView)(VkDevice device, const VkImageViewCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkImageView* pView); +typedef void (VKAPI_PTR *PFN_vkDestroyImageView)(VkDevice device, VkImageView imageView, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkCreateShaderModule)(VkDevice device, const VkShaderModuleCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkShaderModule* pShaderModule); +typedef void (VKAPI_PTR *PFN_vkDestroyShaderModule)(VkDevice device, VkShaderModule shaderModule, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkCreatePipelineCache)(VkDevice device, const VkPipelineCacheCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkPipelineCache* pPipelineCache); +typedef void (VKAPI_PTR *PFN_vkDestroyPipelineCache)(VkDevice device, VkPipelineCache pipelineCache, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkGetPipelineCacheData)(VkDevice device, VkPipelineCache pipelineCache, size_t* pDataSize, void* pData); +typedef VkResult (VKAPI_PTR *PFN_vkMergePipelineCaches)(VkDevice device, VkPipelineCache dstCache, uint32_t srcCacheCount, const VkPipelineCache* pSrcCaches); +typedef VkResult (VKAPI_PTR *PFN_vkCreateGraphicsPipelines)(VkDevice device, VkPipelineCache pipelineCache, uint32_t createInfoCount, const VkGraphicsPipelineCreateInfo* pCreateInfos, const VkAllocationCallbacks* pAllocator, VkPipeline* pPipelines); +typedef VkResult (VKAPI_PTR *PFN_vkCreateComputePipelines)(VkDevice device, VkPipelineCache pipelineCache, uint32_t createInfoCount, const VkComputePipelineCreateInfo* pCreateInfos, const VkAllocationCallbacks* pAllocator, VkPipeline* pPipelines); +typedef void (VKAPI_PTR *PFN_vkDestroyPipeline)(VkDevice device, VkPipeline pipeline, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkCreatePipelineLayout)(VkDevice device, const VkPipelineLayoutCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkPipelineLayout* pPipelineLayout); +typedef void (VKAPI_PTR *PFN_vkDestroyPipelineLayout)(VkDevice device, VkPipelineLayout pipelineLayout, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkCreateSampler)(VkDevice device, const VkSamplerCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSampler* pSampler); +typedef void (VKAPI_PTR *PFN_vkDestroySampler)(VkDevice device, VkSampler sampler, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkCreateDescriptorSetLayout)(VkDevice device, const VkDescriptorSetLayoutCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkDescriptorSetLayout* pSetLayout); +typedef void (VKAPI_PTR *PFN_vkDestroyDescriptorSetLayout)(VkDevice device, VkDescriptorSetLayout descriptorSetLayout, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkCreateDescriptorPool)(VkDevice device, const VkDescriptorPoolCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkDescriptorPool* pDescriptorPool); +typedef void (VKAPI_PTR *PFN_vkDestroyDescriptorPool)(VkDevice device, VkDescriptorPool descriptorPool, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkResetDescriptorPool)(VkDevice device, VkDescriptorPool descriptorPool, VkDescriptorPoolResetFlags flags); +typedef VkResult (VKAPI_PTR *PFN_vkAllocateDescriptorSets)(VkDevice device, const VkDescriptorSetAllocateInfo* pAllocateInfo, VkDescriptorSet* pDescriptorSets); +typedef VkResult (VKAPI_PTR *PFN_vkFreeDescriptorSets)(VkDevice device, VkDescriptorPool descriptorPool, uint32_t descriptorSetCount, const VkDescriptorSet* pDescriptorSets); +typedef void (VKAPI_PTR *PFN_vkUpdateDescriptorSets)(VkDevice device, uint32_t descriptorWriteCount, const VkWriteDescriptorSet* pDescriptorWrites, uint32_t descriptorCopyCount, const VkCopyDescriptorSet* pDescriptorCopies); +typedef VkResult (VKAPI_PTR *PFN_vkCreateFramebuffer)(VkDevice device, const VkFramebufferCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkFramebuffer* pFramebuffer); +typedef void (VKAPI_PTR *PFN_vkDestroyFramebuffer)(VkDevice device, VkFramebuffer framebuffer, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkCreateRenderPass)(VkDevice device, const VkRenderPassCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkRenderPass* pRenderPass); +typedef void (VKAPI_PTR *PFN_vkDestroyRenderPass)(VkDevice device, VkRenderPass renderPass, const VkAllocationCallbacks* pAllocator); +typedef void (VKAPI_PTR *PFN_vkGetRenderAreaGranularity)(VkDevice device, VkRenderPass renderPass, VkExtent2D* pGranularity); +typedef VkResult (VKAPI_PTR *PFN_vkCreateCommandPool)(VkDevice device, const VkCommandPoolCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkCommandPool* pCommandPool); +typedef void (VKAPI_PTR *PFN_vkDestroyCommandPool)(VkDevice device, VkCommandPool commandPool, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkResetCommandPool)(VkDevice device, VkCommandPool commandPool, VkCommandPoolResetFlags flags); +typedef VkResult (VKAPI_PTR *PFN_vkAllocateCommandBuffers)(VkDevice device, const VkCommandBufferAllocateInfo* pAllocateInfo, VkCommandBuffer* pCommandBuffers); +typedef void (VKAPI_PTR *PFN_vkFreeCommandBuffers)(VkDevice device, VkCommandPool commandPool, uint32_t commandBufferCount, const VkCommandBuffer* pCommandBuffers); +typedef VkResult (VKAPI_PTR *PFN_vkBeginCommandBuffer)(VkCommandBuffer commandBuffer, const VkCommandBufferBeginInfo* pBeginInfo); +typedef VkResult (VKAPI_PTR *PFN_vkEndCommandBuffer)(VkCommandBuffer commandBuffer); +typedef VkResult (VKAPI_PTR *PFN_vkResetCommandBuffer)(VkCommandBuffer commandBuffer, VkCommandBufferResetFlags flags); +typedef void (VKAPI_PTR *PFN_vkCmdBindPipeline)(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipeline pipeline); +typedef void (VKAPI_PTR *PFN_vkCmdSetViewport)(VkCommandBuffer commandBuffer, uint32_t firstViewport, uint32_t viewportCount, const VkViewport* pViewports); +typedef void (VKAPI_PTR *PFN_vkCmdSetScissor)(VkCommandBuffer commandBuffer, uint32_t firstScissor, uint32_t scissorCount, const VkRect2D* pScissors); +typedef void (VKAPI_PTR *PFN_vkCmdSetLineWidth)(VkCommandBuffer commandBuffer, float lineWidth); +typedef void (VKAPI_PTR *PFN_vkCmdSetDepthBias)(VkCommandBuffer commandBuffer, float depthBiasConstantFactor, float depthBiasClamp, float depthBiasSlopeFactor); +typedef void (VKAPI_PTR *PFN_vkCmdSetBlendConstants)(VkCommandBuffer commandBuffer, const float blendConstants[4]); +typedef void (VKAPI_PTR *PFN_vkCmdSetDepthBounds)(VkCommandBuffer commandBuffer, float minDepthBounds, float maxDepthBounds); +typedef void (VKAPI_PTR *PFN_vkCmdSetStencilCompareMask)(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t compareMask); +typedef void (VKAPI_PTR *PFN_vkCmdSetStencilWriteMask)(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t writeMask); +typedef void (VKAPI_PTR *PFN_vkCmdSetStencilReference)(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t reference); +typedef void (VKAPI_PTR *PFN_vkCmdBindDescriptorSets)(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipelineLayout layout, uint32_t firstSet, uint32_t descriptorSetCount, const VkDescriptorSet* pDescriptorSets, uint32_t dynamicOffsetCount, const uint32_t* pDynamicOffsets); +typedef void (VKAPI_PTR *PFN_vkCmdBindIndexBuffer)(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkIndexType indexType); +typedef void (VKAPI_PTR *PFN_vkCmdBindVertexBuffers)(VkCommandBuffer commandBuffer, uint32_t firstBinding, uint32_t bindingCount, const VkBuffer* pBuffers, const VkDeviceSize* pOffsets); +typedef void (VKAPI_PTR *PFN_vkCmdDraw)(VkCommandBuffer commandBuffer, uint32_t vertexCount, uint32_t instanceCount, uint32_t firstVertex, uint32_t firstInstance); +typedef void (VKAPI_PTR *PFN_vkCmdDrawIndexed)(VkCommandBuffer commandBuffer, uint32_t indexCount, uint32_t instanceCount, uint32_t firstIndex, int32_t vertexOffset, uint32_t firstInstance); +typedef void (VKAPI_PTR *PFN_vkCmdDrawIndirect)(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t drawCount, uint32_t stride); +typedef void (VKAPI_PTR *PFN_vkCmdDrawIndexedIndirect)(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t drawCount, uint32_t stride); +typedef void (VKAPI_PTR *PFN_vkCmdDispatch)(VkCommandBuffer commandBuffer, uint32_t groupCountX, uint32_t groupCountY, uint32_t groupCountZ); +typedef void (VKAPI_PTR *PFN_vkCmdDispatchIndirect)(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset); +typedef void (VKAPI_PTR *PFN_vkCmdCopyBuffer)(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkBuffer dstBuffer, uint32_t regionCount, const VkBufferCopy* pRegions); +typedef void (VKAPI_PTR *PFN_vkCmdCopyImage)(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageCopy* pRegions); +typedef void (VKAPI_PTR *PFN_vkCmdBlitImage)(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageBlit* pRegions, VkFilter filter); +typedef void (VKAPI_PTR *PFN_vkCmdCopyBufferToImage)(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount, const VkBufferImageCopy* pRegions); +typedef void (VKAPI_PTR *PFN_vkCmdCopyImageToBuffer)(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkBuffer dstBuffer, uint32_t regionCount, const VkBufferImageCopy* pRegions); +typedef void (VKAPI_PTR *PFN_vkCmdUpdateBuffer)(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize dataSize, const void* pData); +typedef void (VKAPI_PTR *PFN_vkCmdFillBuffer)(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize size, uint32_t data); +typedef void (VKAPI_PTR *PFN_vkCmdClearColorImage)(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout, const VkClearColorValue* pColor, uint32_t rangeCount, const VkImageSubresourceRange* pRanges); +typedef void (VKAPI_PTR *PFN_vkCmdClearDepthStencilImage)(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout, const VkClearDepthStencilValue* pDepthStencil, uint32_t rangeCount, const VkImageSubresourceRange* pRanges); +typedef void (VKAPI_PTR *PFN_vkCmdClearAttachments)(VkCommandBuffer commandBuffer, uint32_t attachmentCount, const VkClearAttachment* pAttachments, uint32_t rectCount, const VkClearRect* pRects); +typedef void (VKAPI_PTR *PFN_vkCmdResolveImage)(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageResolve* pRegions); +typedef void (VKAPI_PTR *PFN_vkCmdSetEvent)(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask); +typedef void (VKAPI_PTR *PFN_vkCmdResetEvent)(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask); +typedef void (VKAPI_PTR *PFN_vkCmdWaitEvents)(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent* pEvents, VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask, uint32_t memoryBarrierCount, const VkMemoryBarrier* pMemoryBarriers, uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier* pBufferMemoryBarriers, uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier* pImageMemoryBarriers); +typedef void (VKAPI_PTR *PFN_vkCmdPipelineBarrier)(VkCommandBuffer commandBuffer, VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask, VkDependencyFlags dependencyFlags, uint32_t memoryBarrierCount, const VkMemoryBarrier* pMemoryBarriers, uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier* pBufferMemoryBarriers, uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier* pImageMemoryBarriers); +typedef void (VKAPI_PTR *PFN_vkCmdBeginQuery)(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t query, VkQueryControlFlags flags); +typedef void (VKAPI_PTR *PFN_vkCmdEndQuery)(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t query); +typedef void (VKAPI_PTR *PFN_vkCmdResetQueryPool)(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount); +typedef void (VKAPI_PTR *PFN_vkCmdWriteTimestamp)(VkCommandBuffer commandBuffer, VkPipelineStageFlagBits pipelineStage, VkQueryPool queryPool, uint32_t query); +typedef void (VKAPI_PTR *PFN_vkCmdCopyQueryPoolResults)(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount, VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize stride, VkQueryResultFlags flags); +typedef void (VKAPI_PTR *PFN_vkCmdPushConstants)(VkCommandBuffer commandBuffer, VkPipelineLayout layout, VkShaderStageFlags stageFlags, uint32_t offset, uint32_t size, const void* pValues); +typedef void (VKAPI_PTR *PFN_vkCmdBeginRenderPass)(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo* pRenderPassBegin, VkSubpassContents contents); +typedef void (VKAPI_PTR *PFN_vkCmdNextSubpass)(VkCommandBuffer commandBuffer, VkSubpassContents contents); +typedef void (VKAPI_PTR *PFN_vkCmdEndRenderPass)(VkCommandBuffer commandBuffer); +typedef void (VKAPI_PTR *PFN_vkCmdExecuteCommands)(VkCommandBuffer commandBuffer, uint32_t commandBufferCount, const VkCommandBuffer* pCommandBuffers); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkCreateInstance( + const VkInstanceCreateInfo* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkInstance* pInstance); + +VKAPI_ATTR void VKAPI_CALL vkDestroyInstance( + VkInstance instance, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkEnumeratePhysicalDevices( + VkInstance instance, + uint32_t* pPhysicalDeviceCount, + VkPhysicalDevice* pPhysicalDevices); + +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceFeatures( + VkPhysicalDevice physicalDevice, + VkPhysicalDeviceFeatures* pFeatures); + +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceFormatProperties( + VkPhysicalDevice physicalDevice, + VkFormat format, + VkFormatProperties* pFormatProperties); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceImageFormatProperties( + VkPhysicalDevice physicalDevice, + VkFormat format, + VkImageType type, + VkImageTiling tiling, + VkImageUsageFlags usage, + VkImageCreateFlags flags, + VkImageFormatProperties* pImageFormatProperties); + +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceProperties( + VkPhysicalDevice physicalDevice, + VkPhysicalDeviceProperties* pProperties); + +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceQueueFamilyProperties( + VkPhysicalDevice physicalDevice, + uint32_t* pQueueFamilyPropertyCount, + VkQueueFamilyProperties* pQueueFamilyProperties); + +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceMemoryProperties( + VkPhysicalDevice physicalDevice, + VkPhysicalDeviceMemoryProperties* pMemoryProperties); + +VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetInstanceProcAddr( + VkInstance instance, + const char* pName); + +VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetDeviceProcAddr( + VkDevice device, + const char* pName); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateDevice( + VkPhysicalDevice physicalDevice, + const VkDeviceCreateInfo* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkDevice* pDevice); + +VKAPI_ATTR void VKAPI_CALL vkDestroyDevice( + VkDevice device, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateInstanceExtensionProperties( + const char* pLayerName, + uint32_t* pPropertyCount, + VkExtensionProperties* pProperties); + +VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateDeviceExtensionProperties( + VkPhysicalDevice physicalDevice, + const char* pLayerName, + uint32_t* pPropertyCount, + VkExtensionProperties* pProperties); + +VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateInstanceLayerProperties( + uint32_t* pPropertyCount, + VkLayerProperties* pProperties); + +VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateDeviceLayerProperties( + VkPhysicalDevice physicalDevice, + uint32_t* pPropertyCount, + VkLayerProperties* pProperties); + +VKAPI_ATTR void VKAPI_CALL vkGetDeviceQueue( + VkDevice device, + uint32_t queueFamilyIndex, + uint32_t queueIndex, + VkQueue* pQueue); + +VKAPI_ATTR VkResult VKAPI_CALL vkQueueSubmit( + VkQueue queue, + uint32_t submitCount, + const VkSubmitInfo* pSubmits, + VkFence fence); + +VKAPI_ATTR VkResult VKAPI_CALL vkQueueWaitIdle( + VkQueue queue); + +VKAPI_ATTR VkResult VKAPI_CALL vkDeviceWaitIdle( + VkDevice device); + +VKAPI_ATTR VkResult VKAPI_CALL vkAllocateMemory( + VkDevice device, + const VkMemoryAllocateInfo* pAllocateInfo, + const VkAllocationCallbacks* pAllocator, + VkDeviceMemory* pMemory); + +VKAPI_ATTR void VKAPI_CALL vkFreeMemory( + VkDevice device, + VkDeviceMemory memory, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkMapMemory( + VkDevice device, + VkDeviceMemory memory, + VkDeviceSize offset, + VkDeviceSize size, + VkMemoryMapFlags flags, + void** ppData); + +VKAPI_ATTR void VKAPI_CALL vkUnmapMemory( + VkDevice device, + VkDeviceMemory memory); + +VKAPI_ATTR VkResult VKAPI_CALL vkFlushMappedMemoryRanges( + VkDevice device, + uint32_t memoryRangeCount, + const VkMappedMemoryRange* pMemoryRanges); + +VKAPI_ATTR VkResult VKAPI_CALL vkInvalidateMappedMemoryRanges( + VkDevice device, + uint32_t memoryRangeCount, + const VkMappedMemoryRange* pMemoryRanges); + +VKAPI_ATTR void VKAPI_CALL vkGetDeviceMemoryCommitment( + VkDevice device, + VkDeviceMemory memory, + VkDeviceSize* pCommittedMemoryInBytes); + +VKAPI_ATTR VkResult VKAPI_CALL vkBindBufferMemory( + VkDevice device, + VkBuffer buffer, + VkDeviceMemory memory, + VkDeviceSize memoryOffset); + +VKAPI_ATTR VkResult VKAPI_CALL vkBindImageMemory( + VkDevice device, + VkImage image, + VkDeviceMemory memory, + VkDeviceSize memoryOffset); + +VKAPI_ATTR void VKAPI_CALL vkGetBufferMemoryRequirements( + VkDevice device, + VkBuffer buffer, + VkMemoryRequirements* pMemoryRequirements); + +VKAPI_ATTR void VKAPI_CALL vkGetImageMemoryRequirements( + VkDevice device, + VkImage image, + VkMemoryRequirements* pMemoryRequirements); + +VKAPI_ATTR void VKAPI_CALL vkGetImageSparseMemoryRequirements( + VkDevice device, + VkImage image, + uint32_t* pSparseMemoryRequirementCount, + VkSparseImageMemoryRequirements* pSparseMemoryRequirements); + +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceSparseImageFormatProperties( + VkPhysicalDevice physicalDevice, + VkFormat format, + VkImageType type, + VkSampleCountFlagBits samples, + VkImageUsageFlags usage, + VkImageTiling tiling, + uint32_t* pPropertyCount, + VkSparseImageFormatProperties* pProperties); + +VKAPI_ATTR VkResult VKAPI_CALL vkQueueBindSparse( + VkQueue queue, + uint32_t bindInfoCount, + const VkBindSparseInfo* pBindInfo, + VkFence fence); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateFence( + VkDevice device, + const VkFenceCreateInfo* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkFence* pFence); + +VKAPI_ATTR void VKAPI_CALL vkDestroyFence( + VkDevice device, + VkFence fence, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkResetFences( + VkDevice device, + uint32_t fenceCount, + const VkFence* pFences); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetFenceStatus( + VkDevice device, + VkFence fence); + +VKAPI_ATTR VkResult VKAPI_CALL vkWaitForFences( + VkDevice device, + uint32_t fenceCount, + const VkFence* pFences, + VkBool32 waitAll, + uint64_t timeout); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateSemaphore( + VkDevice device, + const VkSemaphoreCreateInfo* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkSemaphore* pSemaphore); + +VKAPI_ATTR void VKAPI_CALL vkDestroySemaphore( + VkDevice device, + VkSemaphore semaphore, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateEvent( + VkDevice device, + const VkEventCreateInfo* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkEvent* pEvent); + +VKAPI_ATTR void VKAPI_CALL vkDestroyEvent( + VkDevice device, + VkEvent event, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetEventStatus( + VkDevice device, + VkEvent event); + +VKAPI_ATTR VkResult VKAPI_CALL vkSetEvent( + VkDevice device, + VkEvent event); + +VKAPI_ATTR VkResult VKAPI_CALL vkResetEvent( + VkDevice device, + VkEvent event); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateQueryPool( + VkDevice device, + const VkQueryPoolCreateInfo* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkQueryPool* pQueryPool); + +VKAPI_ATTR void VKAPI_CALL vkDestroyQueryPool( + VkDevice device, + VkQueryPool queryPool, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetQueryPoolResults( + VkDevice device, + VkQueryPool queryPool, + uint32_t firstQuery, + uint32_t queryCount, + size_t dataSize, + void* pData, + VkDeviceSize stride, + VkQueryResultFlags flags); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateBuffer( + VkDevice device, + const VkBufferCreateInfo* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkBuffer* pBuffer); + +VKAPI_ATTR void VKAPI_CALL vkDestroyBuffer( + VkDevice device, + VkBuffer buffer, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateBufferView( + VkDevice device, + const VkBufferViewCreateInfo* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkBufferView* pView); + +VKAPI_ATTR void VKAPI_CALL vkDestroyBufferView( + VkDevice device, + VkBufferView bufferView, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateImage( + VkDevice device, + const VkImageCreateInfo* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkImage* pImage); + +VKAPI_ATTR void VKAPI_CALL vkDestroyImage( + VkDevice device, + VkImage image, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR void VKAPI_CALL vkGetImageSubresourceLayout( + VkDevice device, + VkImage image, + const VkImageSubresource* pSubresource, + VkSubresourceLayout* pLayout); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateImageView( + VkDevice device, + const VkImageViewCreateInfo* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkImageView* pView); + +VKAPI_ATTR void VKAPI_CALL vkDestroyImageView( + VkDevice device, + VkImageView imageView, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateShaderModule( + VkDevice device, + const VkShaderModuleCreateInfo* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkShaderModule* pShaderModule); + +VKAPI_ATTR void VKAPI_CALL vkDestroyShaderModule( + VkDevice device, + VkShaderModule shaderModule, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreatePipelineCache( + VkDevice device, + const VkPipelineCacheCreateInfo* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkPipelineCache* pPipelineCache); + +VKAPI_ATTR void VKAPI_CALL vkDestroyPipelineCache( + VkDevice device, + VkPipelineCache pipelineCache, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetPipelineCacheData( + VkDevice device, + VkPipelineCache pipelineCache, + size_t* pDataSize, + void* pData); + +VKAPI_ATTR VkResult VKAPI_CALL vkMergePipelineCaches( + VkDevice device, + VkPipelineCache dstCache, + uint32_t srcCacheCount, + const VkPipelineCache* pSrcCaches); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateGraphicsPipelines( + VkDevice device, + VkPipelineCache pipelineCache, + uint32_t createInfoCount, + const VkGraphicsPipelineCreateInfo* pCreateInfos, + const VkAllocationCallbacks* pAllocator, + VkPipeline* pPipelines); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateComputePipelines( + VkDevice device, + VkPipelineCache pipelineCache, + uint32_t createInfoCount, + const VkComputePipelineCreateInfo* pCreateInfos, + const VkAllocationCallbacks* pAllocator, + VkPipeline* pPipelines); + +VKAPI_ATTR void VKAPI_CALL vkDestroyPipeline( + VkDevice device, + VkPipeline pipeline, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreatePipelineLayout( + VkDevice device, + const VkPipelineLayoutCreateInfo* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkPipelineLayout* pPipelineLayout); + +VKAPI_ATTR void VKAPI_CALL vkDestroyPipelineLayout( + VkDevice device, + VkPipelineLayout pipelineLayout, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateSampler( + VkDevice device, + const VkSamplerCreateInfo* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkSampler* pSampler); + +VKAPI_ATTR void VKAPI_CALL vkDestroySampler( + VkDevice device, + VkSampler sampler, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateDescriptorSetLayout( + VkDevice device, + const VkDescriptorSetLayoutCreateInfo* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkDescriptorSetLayout* pSetLayout); + +VKAPI_ATTR void VKAPI_CALL vkDestroyDescriptorSetLayout( + VkDevice device, + VkDescriptorSetLayout descriptorSetLayout, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateDescriptorPool( + VkDevice device, + const VkDescriptorPoolCreateInfo* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkDescriptorPool* pDescriptorPool); + +VKAPI_ATTR void VKAPI_CALL vkDestroyDescriptorPool( + VkDevice device, + VkDescriptorPool descriptorPool, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkResetDescriptorPool( + VkDevice device, + VkDescriptorPool descriptorPool, + VkDescriptorPoolResetFlags flags); + +VKAPI_ATTR VkResult VKAPI_CALL vkAllocateDescriptorSets( + VkDevice device, + const VkDescriptorSetAllocateInfo* pAllocateInfo, + VkDescriptorSet* pDescriptorSets); + +VKAPI_ATTR VkResult VKAPI_CALL vkFreeDescriptorSets( + VkDevice device, + VkDescriptorPool descriptorPool, + uint32_t descriptorSetCount, + const VkDescriptorSet* pDescriptorSets); + +VKAPI_ATTR void VKAPI_CALL vkUpdateDescriptorSets( + VkDevice device, + uint32_t descriptorWriteCount, + const VkWriteDescriptorSet* pDescriptorWrites, + uint32_t descriptorCopyCount, + const VkCopyDescriptorSet* pDescriptorCopies); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateFramebuffer( + VkDevice device, + const VkFramebufferCreateInfo* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkFramebuffer* pFramebuffer); + +VKAPI_ATTR void VKAPI_CALL vkDestroyFramebuffer( + VkDevice device, + VkFramebuffer framebuffer, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateRenderPass( + VkDevice device, + const VkRenderPassCreateInfo* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkRenderPass* pRenderPass); + +VKAPI_ATTR void VKAPI_CALL vkDestroyRenderPass( + VkDevice device, + VkRenderPass renderPass, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR void VKAPI_CALL vkGetRenderAreaGranularity( + VkDevice device, + VkRenderPass renderPass, + VkExtent2D* pGranularity); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateCommandPool( + VkDevice device, + const VkCommandPoolCreateInfo* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkCommandPool* pCommandPool); + +VKAPI_ATTR void VKAPI_CALL vkDestroyCommandPool( + VkDevice device, + VkCommandPool commandPool, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkResetCommandPool( + VkDevice device, + VkCommandPool commandPool, + VkCommandPoolResetFlags flags); + +VKAPI_ATTR VkResult VKAPI_CALL vkAllocateCommandBuffers( + VkDevice device, + const VkCommandBufferAllocateInfo* pAllocateInfo, + VkCommandBuffer* pCommandBuffers); + +VKAPI_ATTR void VKAPI_CALL vkFreeCommandBuffers( + VkDevice device, + VkCommandPool commandPool, + uint32_t commandBufferCount, + const VkCommandBuffer* pCommandBuffers); + +VKAPI_ATTR VkResult VKAPI_CALL vkBeginCommandBuffer( + VkCommandBuffer commandBuffer, + const VkCommandBufferBeginInfo* pBeginInfo); + +VKAPI_ATTR VkResult VKAPI_CALL vkEndCommandBuffer( + VkCommandBuffer commandBuffer); + +VKAPI_ATTR VkResult VKAPI_CALL vkResetCommandBuffer( + VkCommandBuffer commandBuffer, + VkCommandBufferResetFlags flags); + +VKAPI_ATTR void VKAPI_CALL vkCmdBindPipeline( + VkCommandBuffer commandBuffer, + VkPipelineBindPoint pipelineBindPoint, + VkPipeline pipeline); + +VKAPI_ATTR void VKAPI_CALL vkCmdSetViewport( + VkCommandBuffer commandBuffer, + uint32_t firstViewport, + uint32_t viewportCount, + const VkViewport* pViewports); + +VKAPI_ATTR void VKAPI_CALL vkCmdSetScissor( + VkCommandBuffer commandBuffer, + uint32_t firstScissor, + uint32_t scissorCount, + const VkRect2D* pScissors); + +VKAPI_ATTR void VKAPI_CALL vkCmdSetLineWidth( + VkCommandBuffer commandBuffer, + float lineWidth); + +VKAPI_ATTR void VKAPI_CALL vkCmdSetDepthBias( + VkCommandBuffer commandBuffer, + float depthBiasConstantFactor, + float depthBiasClamp, + float depthBiasSlopeFactor); + +VKAPI_ATTR void VKAPI_CALL vkCmdSetBlendConstants( + VkCommandBuffer commandBuffer, + const float blendConstants[4]); + +VKAPI_ATTR void VKAPI_CALL vkCmdSetDepthBounds( + VkCommandBuffer commandBuffer, + float minDepthBounds, + float maxDepthBounds); + +VKAPI_ATTR void VKAPI_CALL vkCmdSetStencilCompareMask( + VkCommandBuffer commandBuffer, + VkStencilFaceFlags faceMask, + uint32_t compareMask); + +VKAPI_ATTR void VKAPI_CALL vkCmdSetStencilWriteMask( + VkCommandBuffer commandBuffer, + VkStencilFaceFlags faceMask, + uint32_t writeMask); + +VKAPI_ATTR void VKAPI_CALL vkCmdSetStencilReference( + VkCommandBuffer commandBuffer, + VkStencilFaceFlags faceMask, + uint32_t reference); + +VKAPI_ATTR void VKAPI_CALL vkCmdBindDescriptorSets( + VkCommandBuffer commandBuffer, + VkPipelineBindPoint pipelineBindPoint, + VkPipelineLayout layout, + uint32_t firstSet, + uint32_t descriptorSetCount, + const VkDescriptorSet* pDescriptorSets, + uint32_t dynamicOffsetCount, + const uint32_t* pDynamicOffsets); + +VKAPI_ATTR void VKAPI_CALL vkCmdBindIndexBuffer( + VkCommandBuffer commandBuffer, + VkBuffer buffer, + VkDeviceSize offset, + VkIndexType indexType); + +VKAPI_ATTR void VKAPI_CALL vkCmdBindVertexBuffers( + VkCommandBuffer commandBuffer, + uint32_t firstBinding, + uint32_t bindingCount, + const VkBuffer* pBuffers, + const VkDeviceSize* pOffsets); + +VKAPI_ATTR void VKAPI_CALL vkCmdDraw( + VkCommandBuffer commandBuffer, + uint32_t vertexCount, + uint32_t instanceCount, + uint32_t firstVertex, + uint32_t firstInstance); + +VKAPI_ATTR void VKAPI_CALL vkCmdDrawIndexed( + VkCommandBuffer commandBuffer, + uint32_t indexCount, + uint32_t instanceCount, + uint32_t firstIndex, + int32_t vertexOffset, + uint32_t firstInstance); + +VKAPI_ATTR void VKAPI_CALL vkCmdDrawIndirect( + VkCommandBuffer commandBuffer, + VkBuffer buffer, + VkDeviceSize offset, + uint32_t drawCount, + uint32_t stride); + +VKAPI_ATTR void VKAPI_CALL vkCmdDrawIndexedIndirect( + VkCommandBuffer commandBuffer, + VkBuffer buffer, + VkDeviceSize offset, + uint32_t drawCount, + uint32_t stride); + +VKAPI_ATTR void VKAPI_CALL vkCmdDispatch( + VkCommandBuffer commandBuffer, + uint32_t groupCountX, + uint32_t groupCountY, + uint32_t groupCountZ); + +VKAPI_ATTR void VKAPI_CALL vkCmdDispatchIndirect( + VkCommandBuffer commandBuffer, + VkBuffer buffer, + VkDeviceSize offset); + +VKAPI_ATTR void VKAPI_CALL vkCmdCopyBuffer( + VkCommandBuffer commandBuffer, + VkBuffer srcBuffer, + VkBuffer dstBuffer, + uint32_t regionCount, + const VkBufferCopy* pRegions); + +VKAPI_ATTR void VKAPI_CALL vkCmdCopyImage( + VkCommandBuffer commandBuffer, + VkImage srcImage, + VkImageLayout srcImageLayout, + VkImage dstImage, + VkImageLayout dstImageLayout, + uint32_t regionCount, + const VkImageCopy* pRegions); + +VKAPI_ATTR void VKAPI_CALL vkCmdBlitImage( + VkCommandBuffer commandBuffer, + VkImage srcImage, + VkImageLayout srcImageLayout, + VkImage dstImage, + VkImageLayout dstImageLayout, + uint32_t regionCount, + const VkImageBlit* pRegions, + VkFilter filter); + +VKAPI_ATTR void VKAPI_CALL vkCmdCopyBufferToImage( + VkCommandBuffer commandBuffer, + VkBuffer srcBuffer, + VkImage dstImage, + VkImageLayout dstImageLayout, + uint32_t regionCount, + const VkBufferImageCopy* pRegions); + +VKAPI_ATTR void VKAPI_CALL vkCmdCopyImageToBuffer( + VkCommandBuffer commandBuffer, + VkImage srcImage, + VkImageLayout srcImageLayout, + VkBuffer dstBuffer, + uint32_t regionCount, + const VkBufferImageCopy* pRegions); + +VKAPI_ATTR void VKAPI_CALL vkCmdUpdateBuffer( + VkCommandBuffer commandBuffer, + VkBuffer dstBuffer, + VkDeviceSize dstOffset, + VkDeviceSize dataSize, + const void* pData); + +VKAPI_ATTR void VKAPI_CALL vkCmdFillBuffer( + VkCommandBuffer commandBuffer, + VkBuffer dstBuffer, + VkDeviceSize dstOffset, + VkDeviceSize size, + uint32_t data); + +VKAPI_ATTR void VKAPI_CALL vkCmdClearColorImage( + VkCommandBuffer commandBuffer, + VkImage image, + VkImageLayout imageLayout, + const VkClearColorValue* pColor, + uint32_t rangeCount, + const VkImageSubresourceRange* pRanges); + +VKAPI_ATTR void VKAPI_CALL vkCmdClearDepthStencilImage( + VkCommandBuffer commandBuffer, + VkImage image, + VkImageLayout imageLayout, + const VkClearDepthStencilValue* pDepthStencil, + uint32_t rangeCount, + const VkImageSubresourceRange* pRanges); + +VKAPI_ATTR void VKAPI_CALL vkCmdClearAttachments( + VkCommandBuffer commandBuffer, + uint32_t attachmentCount, + const VkClearAttachment* pAttachments, + uint32_t rectCount, + const VkClearRect* pRects); + +VKAPI_ATTR void VKAPI_CALL vkCmdResolveImage( + VkCommandBuffer commandBuffer, + VkImage srcImage, + VkImageLayout srcImageLayout, + VkImage dstImage, + VkImageLayout dstImageLayout, + uint32_t regionCount, + const VkImageResolve* pRegions); + +VKAPI_ATTR void VKAPI_CALL vkCmdSetEvent( + VkCommandBuffer commandBuffer, + VkEvent event, + VkPipelineStageFlags stageMask); + +VKAPI_ATTR void VKAPI_CALL vkCmdResetEvent( + VkCommandBuffer commandBuffer, + VkEvent event, + VkPipelineStageFlags stageMask); + +VKAPI_ATTR void VKAPI_CALL vkCmdWaitEvents( + VkCommandBuffer commandBuffer, + uint32_t eventCount, + const VkEvent* pEvents, + VkPipelineStageFlags srcStageMask, + VkPipelineStageFlags dstStageMask, + uint32_t memoryBarrierCount, + const VkMemoryBarrier* pMemoryBarriers, + uint32_t bufferMemoryBarrierCount, + const VkBufferMemoryBarrier* pBufferMemoryBarriers, + uint32_t imageMemoryBarrierCount, + const VkImageMemoryBarrier* pImageMemoryBarriers); + +VKAPI_ATTR void VKAPI_CALL vkCmdPipelineBarrier( + VkCommandBuffer commandBuffer, + VkPipelineStageFlags srcStageMask, + VkPipelineStageFlags dstStageMask, + VkDependencyFlags dependencyFlags, + uint32_t memoryBarrierCount, + const VkMemoryBarrier* pMemoryBarriers, + uint32_t bufferMemoryBarrierCount, + const VkBufferMemoryBarrier* pBufferMemoryBarriers, + uint32_t imageMemoryBarrierCount, + const VkImageMemoryBarrier* pImageMemoryBarriers); + +VKAPI_ATTR void VKAPI_CALL vkCmdBeginQuery( + VkCommandBuffer commandBuffer, + VkQueryPool queryPool, + uint32_t query, + VkQueryControlFlags flags); + +VKAPI_ATTR void VKAPI_CALL vkCmdEndQuery( + VkCommandBuffer commandBuffer, + VkQueryPool queryPool, + uint32_t query); + +VKAPI_ATTR void VKAPI_CALL vkCmdResetQueryPool( + VkCommandBuffer commandBuffer, + VkQueryPool queryPool, + uint32_t firstQuery, + uint32_t queryCount); + +VKAPI_ATTR void VKAPI_CALL vkCmdWriteTimestamp( + VkCommandBuffer commandBuffer, + VkPipelineStageFlagBits pipelineStage, + VkQueryPool queryPool, + uint32_t query); + +VKAPI_ATTR void VKAPI_CALL vkCmdCopyQueryPoolResults( + VkCommandBuffer commandBuffer, + VkQueryPool queryPool, + uint32_t firstQuery, + uint32_t queryCount, + VkBuffer dstBuffer, + VkDeviceSize dstOffset, + VkDeviceSize stride, + VkQueryResultFlags flags); + +VKAPI_ATTR void VKAPI_CALL vkCmdPushConstants( + VkCommandBuffer commandBuffer, + VkPipelineLayout layout, + VkShaderStageFlags stageFlags, + uint32_t offset, + uint32_t size, + const void* pValues); + +VKAPI_ATTR void VKAPI_CALL vkCmdBeginRenderPass( + VkCommandBuffer commandBuffer, + const VkRenderPassBeginInfo* pRenderPassBegin, + VkSubpassContents contents); + +VKAPI_ATTR void VKAPI_CALL vkCmdNextSubpass( + VkCommandBuffer commandBuffer, + VkSubpassContents contents); + +VKAPI_ATTR void VKAPI_CALL vkCmdEndRenderPass( + VkCommandBuffer commandBuffer); + +VKAPI_ATTR void VKAPI_CALL vkCmdExecuteCommands( + VkCommandBuffer commandBuffer, + uint32_t commandBufferCount, + const VkCommandBuffer* pCommandBuffers); +#endif + + +#define VK_VERSION_1_1 1 +// Vulkan 1.1 version number +#define VK_API_VERSION_1_1 VK_MAKE_VERSION(1, 1, 0)// Patch version should always be set to 0 + +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkSamplerYcbcrConversion) +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkDescriptorUpdateTemplate) +#define VK_MAX_DEVICE_GROUP_SIZE 32 +#define VK_LUID_SIZE 8 +#define VK_QUEUE_FAMILY_EXTERNAL (~0U-1) + +typedef enum VkPointClippingBehavior { + VK_POINT_CLIPPING_BEHAVIOR_ALL_CLIP_PLANES = 0, + VK_POINT_CLIPPING_BEHAVIOR_USER_CLIP_PLANES_ONLY = 1, + VK_POINT_CLIPPING_BEHAVIOR_ALL_CLIP_PLANES_KHR = VK_POINT_CLIPPING_BEHAVIOR_ALL_CLIP_PLANES, + VK_POINT_CLIPPING_BEHAVIOR_USER_CLIP_PLANES_ONLY_KHR = VK_POINT_CLIPPING_BEHAVIOR_USER_CLIP_PLANES_ONLY, + VK_POINT_CLIPPING_BEHAVIOR_MAX_ENUM = 0x7FFFFFFF +} VkPointClippingBehavior; + +typedef enum VkTessellationDomainOrigin { + VK_TESSELLATION_DOMAIN_ORIGIN_UPPER_LEFT = 0, + VK_TESSELLATION_DOMAIN_ORIGIN_LOWER_LEFT = 1, + VK_TESSELLATION_DOMAIN_ORIGIN_UPPER_LEFT_KHR = VK_TESSELLATION_DOMAIN_ORIGIN_UPPER_LEFT, + VK_TESSELLATION_DOMAIN_ORIGIN_LOWER_LEFT_KHR = VK_TESSELLATION_DOMAIN_ORIGIN_LOWER_LEFT, + VK_TESSELLATION_DOMAIN_ORIGIN_MAX_ENUM = 0x7FFFFFFF +} VkTessellationDomainOrigin; + +typedef enum VkSamplerYcbcrModelConversion { + VK_SAMPLER_YCBCR_MODEL_CONVERSION_RGB_IDENTITY = 0, + VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_IDENTITY = 1, + VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_709 = 2, + VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_601 = 3, + VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_2020 = 4, + VK_SAMPLER_YCBCR_MODEL_CONVERSION_RGB_IDENTITY_KHR = VK_SAMPLER_YCBCR_MODEL_CONVERSION_RGB_IDENTITY, + VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_IDENTITY_KHR = VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_IDENTITY, + VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_709_KHR = VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_709, + VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_601_KHR = VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_601, + VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_2020_KHR = VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_2020, + VK_SAMPLER_YCBCR_MODEL_CONVERSION_MAX_ENUM = 0x7FFFFFFF +} VkSamplerYcbcrModelConversion; + +typedef enum VkSamplerYcbcrRange { + VK_SAMPLER_YCBCR_RANGE_ITU_FULL = 0, + VK_SAMPLER_YCBCR_RANGE_ITU_NARROW = 1, + VK_SAMPLER_YCBCR_RANGE_ITU_FULL_KHR = VK_SAMPLER_YCBCR_RANGE_ITU_FULL, + VK_SAMPLER_YCBCR_RANGE_ITU_NARROW_KHR = VK_SAMPLER_YCBCR_RANGE_ITU_NARROW, + VK_SAMPLER_YCBCR_RANGE_MAX_ENUM = 0x7FFFFFFF +} VkSamplerYcbcrRange; + +typedef enum VkChromaLocation { + VK_CHROMA_LOCATION_COSITED_EVEN = 0, + VK_CHROMA_LOCATION_MIDPOINT = 1, + VK_CHROMA_LOCATION_COSITED_EVEN_KHR = VK_CHROMA_LOCATION_COSITED_EVEN, + VK_CHROMA_LOCATION_MIDPOINT_KHR = VK_CHROMA_LOCATION_MIDPOINT, + VK_CHROMA_LOCATION_MAX_ENUM = 0x7FFFFFFF +} VkChromaLocation; + +typedef enum VkDescriptorUpdateTemplateType { + VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET = 0, + VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_PUSH_DESCRIPTORS_KHR = 1, + VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET_KHR = VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET, + VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_MAX_ENUM = 0x7FFFFFFF +} VkDescriptorUpdateTemplateType; + +typedef enum VkSubgroupFeatureFlagBits { + VK_SUBGROUP_FEATURE_BASIC_BIT = 0x00000001, + VK_SUBGROUP_FEATURE_VOTE_BIT = 0x00000002, + VK_SUBGROUP_FEATURE_ARITHMETIC_BIT = 0x00000004, + VK_SUBGROUP_FEATURE_BALLOT_BIT = 0x00000008, + VK_SUBGROUP_FEATURE_SHUFFLE_BIT = 0x00000010, + VK_SUBGROUP_FEATURE_SHUFFLE_RELATIVE_BIT = 0x00000020, + VK_SUBGROUP_FEATURE_CLUSTERED_BIT = 0x00000040, + VK_SUBGROUP_FEATURE_QUAD_BIT = 0x00000080, + VK_SUBGROUP_FEATURE_PARTITIONED_BIT_NV = 0x00000100, + VK_SUBGROUP_FEATURE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkSubgroupFeatureFlagBits; +typedef VkFlags VkSubgroupFeatureFlags; + +typedef enum VkPeerMemoryFeatureFlagBits { + VK_PEER_MEMORY_FEATURE_COPY_SRC_BIT = 0x00000001, + VK_PEER_MEMORY_FEATURE_COPY_DST_BIT = 0x00000002, + VK_PEER_MEMORY_FEATURE_GENERIC_SRC_BIT = 0x00000004, + VK_PEER_MEMORY_FEATURE_GENERIC_DST_BIT = 0x00000008, + VK_PEER_MEMORY_FEATURE_COPY_SRC_BIT_KHR = VK_PEER_MEMORY_FEATURE_COPY_SRC_BIT, + VK_PEER_MEMORY_FEATURE_COPY_DST_BIT_KHR = VK_PEER_MEMORY_FEATURE_COPY_DST_BIT, + VK_PEER_MEMORY_FEATURE_GENERIC_SRC_BIT_KHR = VK_PEER_MEMORY_FEATURE_GENERIC_SRC_BIT, + VK_PEER_MEMORY_FEATURE_GENERIC_DST_BIT_KHR = VK_PEER_MEMORY_FEATURE_GENERIC_DST_BIT, + VK_PEER_MEMORY_FEATURE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkPeerMemoryFeatureFlagBits; +typedef VkFlags VkPeerMemoryFeatureFlags; + +typedef enum VkMemoryAllocateFlagBits { + VK_MEMORY_ALLOCATE_DEVICE_MASK_BIT = 0x00000001, + VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT = 0x00000002, + VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_CAPTURE_REPLAY_BIT = 0x00000004, + VK_MEMORY_ALLOCATE_DEVICE_MASK_BIT_KHR = VK_MEMORY_ALLOCATE_DEVICE_MASK_BIT, + VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT_KHR = VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT, + VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_CAPTURE_REPLAY_BIT_KHR = VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_CAPTURE_REPLAY_BIT, + VK_MEMORY_ALLOCATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkMemoryAllocateFlagBits; +typedef VkFlags VkMemoryAllocateFlags; +typedef VkFlags VkCommandPoolTrimFlags; +typedef VkFlags VkDescriptorUpdateTemplateCreateFlags; + +typedef enum VkExternalMemoryHandleTypeFlagBits { + VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT = 0x00000001, + VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_BIT = 0x00000002, + VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT = 0x00000004, + VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_TEXTURE_BIT = 0x00000008, + VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_TEXTURE_KMT_BIT = 0x00000010, + VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_HEAP_BIT = 0x00000020, + VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_RESOURCE_BIT = 0x00000040, + VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT = 0x00000200, + VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID = 0x00000400, + VK_EXTERNAL_MEMORY_HANDLE_TYPE_HOST_ALLOCATION_BIT_EXT = 0x00000080, + VK_EXTERNAL_MEMORY_HANDLE_TYPE_HOST_MAPPED_FOREIGN_MEMORY_BIT_EXT = 0x00000100, + VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT_KHR = VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT, + VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_BIT_KHR = VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_BIT, + VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT_KHR = VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT, + VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_TEXTURE_BIT_KHR = VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_TEXTURE_BIT, + VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_TEXTURE_KMT_BIT_KHR = VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_TEXTURE_KMT_BIT, + VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_HEAP_BIT_KHR = VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_HEAP_BIT, + VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_RESOURCE_BIT_KHR = VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_RESOURCE_BIT, + VK_EXTERNAL_MEMORY_HANDLE_TYPE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkExternalMemoryHandleTypeFlagBits; +typedef VkFlags VkExternalMemoryHandleTypeFlags; + +typedef enum VkExternalMemoryFeatureFlagBits { + VK_EXTERNAL_MEMORY_FEATURE_DEDICATED_ONLY_BIT = 0x00000001, + VK_EXTERNAL_MEMORY_FEATURE_EXPORTABLE_BIT = 0x00000002, + VK_EXTERNAL_MEMORY_FEATURE_IMPORTABLE_BIT = 0x00000004, + VK_EXTERNAL_MEMORY_FEATURE_DEDICATED_ONLY_BIT_KHR = VK_EXTERNAL_MEMORY_FEATURE_DEDICATED_ONLY_BIT, + VK_EXTERNAL_MEMORY_FEATURE_EXPORTABLE_BIT_KHR = VK_EXTERNAL_MEMORY_FEATURE_EXPORTABLE_BIT, + VK_EXTERNAL_MEMORY_FEATURE_IMPORTABLE_BIT_KHR = VK_EXTERNAL_MEMORY_FEATURE_IMPORTABLE_BIT, + VK_EXTERNAL_MEMORY_FEATURE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkExternalMemoryFeatureFlagBits; +typedef VkFlags VkExternalMemoryFeatureFlags; + +typedef enum VkExternalFenceHandleTypeFlagBits { + VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD_BIT = 0x00000001, + VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_WIN32_BIT = 0x00000002, + VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT = 0x00000004, + VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT = 0x00000008, + VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD_BIT_KHR = VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD_BIT, + VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_WIN32_BIT_KHR = VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_WIN32_BIT, + VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT_KHR = VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT, + VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT_KHR = VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT, + VK_EXTERNAL_FENCE_HANDLE_TYPE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkExternalFenceHandleTypeFlagBits; +typedef VkFlags VkExternalFenceHandleTypeFlags; + +typedef enum VkExternalFenceFeatureFlagBits { + VK_EXTERNAL_FENCE_FEATURE_EXPORTABLE_BIT = 0x00000001, + VK_EXTERNAL_FENCE_FEATURE_IMPORTABLE_BIT = 0x00000002, + VK_EXTERNAL_FENCE_FEATURE_EXPORTABLE_BIT_KHR = VK_EXTERNAL_FENCE_FEATURE_EXPORTABLE_BIT, + VK_EXTERNAL_FENCE_FEATURE_IMPORTABLE_BIT_KHR = VK_EXTERNAL_FENCE_FEATURE_IMPORTABLE_BIT, + VK_EXTERNAL_FENCE_FEATURE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkExternalFenceFeatureFlagBits; +typedef VkFlags VkExternalFenceFeatureFlags; + +typedef enum VkFenceImportFlagBits { + VK_FENCE_IMPORT_TEMPORARY_BIT = 0x00000001, + VK_FENCE_IMPORT_TEMPORARY_BIT_KHR = VK_FENCE_IMPORT_TEMPORARY_BIT, + VK_FENCE_IMPORT_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkFenceImportFlagBits; +typedef VkFlags VkFenceImportFlags; + +typedef enum VkSemaphoreImportFlagBits { + VK_SEMAPHORE_IMPORT_TEMPORARY_BIT = 0x00000001, + VK_SEMAPHORE_IMPORT_TEMPORARY_BIT_KHR = VK_SEMAPHORE_IMPORT_TEMPORARY_BIT, + VK_SEMAPHORE_IMPORT_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkSemaphoreImportFlagBits; +typedef VkFlags VkSemaphoreImportFlags; + +typedef enum VkExternalSemaphoreHandleTypeFlagBits { + VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT = 0x00000001, + VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_BIT = 0x00000002, + VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT = 0x00000004, + VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D12_FENCE_BIT = 0x00000008, + VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT = 0x00000010, + VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT_KHR = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT, + VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_BIT_KHR = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_BIT, + VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT_KHR = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT, + VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D12_FENCE_BIT_KHR = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D12_FENCE_BIT, + VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT_KHR = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT, + VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkExternalSemaphoreHandleTypeFlagBits; +typedef VkFlags VkExternalSemaphoreHandleTypeFlags; + +typedef enum VkExternalSemaphoreFeatureFlagBits { + VK_EXTERNAL_SEMAPHORE_FEATURE_EXPORTABLE_BIT = 0x00000001, + VK_EXTERNAL_SEMAPHORE_FEATURE_IMPORTABLE_BIT = 0x00000002, + VK_EXTERNAL_SEMAPHORE_FEATURE_EXPORTABLE_BIT_KHR = VK_EXTERNAL_SEMAPHORE_FEATURE_EXPORTABLE_BIT, + VK_EXTERNAL_SEMAPHORE_FEATURE_IMPORTABLE_BIT_KHR = VK_EXTERNAL_SEMAPHORE_FEATURE_IMPORTABLE_BIT, + VK_EXTERNAL_SEMAPHORE_FEATURE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkExternalSemaphoreFeatureFlagBits; +typedef VkFlags VkExternalSemaphoreFeatureFlags; +typedef struct VkPhysicalDeviceSubgroupProperties { + VkStructureType sType; + void* pNext; + uint32_t subgroupSize; + VkShaderStageFlags supportedStages; + VkSubgroupFeatureFlags supportedOperations; + VkBool32 quadOperationsInAllStages; +} VkPhysicalDeviceSubgroupProperties; + +typedef struct VkBindBufferMemoryInfo { + VkStructureType sType; + const void* pNext; + VkBuffer buffer; + VkDeviceMemory memory; + VkDeviceSize memoryOffset; +} VkBindBufferMemoryInfo; + +typedef struct VkBindImageMemoryInfo { + VkStructureType sType; + const void* pNext; + VkImage image; + VkDeviceMemory memory; + VkDeviceSize memoryOffset; +} VkBindImageMemoryInfo; + +typedef struct VkPhysicalDevice16BitStorageFeatures { + VkStructureType sType; + void* pNext; + VkBool32 storageBuffer16BitAccess; + VkBool32 uniformAndStorageBuffer16BitAccess; + VkBool32 storagePushConstant16; + VkBool32 storageInputOutput16; +} VkPhysicalDevice16BitStorageFeatures; + +typedef struct VkMemoryDedicatedRequirements { + VkStructureType sType; + void* pNext; + VkBool32 prefersDedicatedAllocation; + VkBool32 requiresDedicatedAllocation; +} VkMemoryDedicatedRequirements; + +typedef struct VkMemoryDedicatedAllocateInfo { + VkStructureType sType; + const void* pNext; + VkImage image; + VkBuffer buffer; +} VkMemoryDedicatedAllocateInfo; + +typedef struct VkMemoryAllocateFlagsInfo { + VkStructureType sType; + const void* pNext; + VkMemoryAllocateFlags flags; + uint32_t deviceMask; +} VkMemoryAllocateFlagsInfo; + +typedef struct VkDeviceGroupRenderPassBeginInfo { + VkStructureType sType; + const void* pNext; + uint32_t deviceMask; + uint32_t deviceRenderAreaCount; + const VkRect2D* pDeviceRenderAreas; +} VkDeviceGroupRenderPassBeginInfo; + +typedef struct VkDeviceGroupCommandBufferBeginInfo { + VkStructureType sType; + const void* pNext; + uint32_t deviceMask; +} VkDeviceGroupCommandBufferBeginInfo; + +typedef struct VkDeviceGroupSubmitInfo { + VkStructureType sType; + const void* pNext; + uint32_t waitSemaphoreCount; + const uint32_t* pWaitSemaphoreDeviceIndices; + uint32_t commandBufferCount; + const uint32_t* pCommandBufferDeviceMasks; + uint32_t signalSemaphoreCount; + const uint32_t* pSignalSemaphoreDeviceIndices; +} VkDeviceGroupSubmitInfo; + +typedef struct VkDeviceGroupBindSparseInfo { + VkStructureType sType; + const void* pNext; + uint32_t resourceDeviceIndex; + uint32_t memoryDeviceIndex; +} VkDeviceGroupBindSparseInfo; + +typedef struct VkBindBufferMemoryDeviceGroupInfo { + VkStructureType sType; + const void* pNext; + uint32_t deviceIndexCount; + const uint32_t* pDeviceIndices; +} VkBindBufferMemoryDeviceGroupInfo; + +typedef struct VkBindImageMemoryDeviceGroupInfo { + VkStructureType sType; + const void* pNext; + uint32_t deviceIndexCount; + const uint32_t* pDeviceIndices; + uint32_t splitInstanceBindRegionCount; + const VkRect2D* pSplitInstanceBindRegions; +} VkBindImageMemoryDeviceGroupInfo; + +typedef struct VkPhysicalDeviceGroupProperties { + VkStructureType sType; + void* pNext; + uint32_t physicalDeviceCount; + VkPhysicalDevice physicalDevices[VK_MAX_DEVICE_GROUP_SIZE]; + VkBool32 subsetAllocation; +} VkPhysicalDeviceGroupProperties; + +typedef struct VkDeviceGroupDeviceCreateInfo { + VkStructureType sType; + const void* pNext; + uint32_t physicalDeviceCount; + const VkPhysicalDevice* pPhysicalDevices; +} VkDeviceGroupDeviceCreateInfo; + +typedef struct VkBufferMemoryRequirementsInfo2 { + VkStructureType sType; + const void* pNext; + VkBuffer buffer; +} VkBufferMemoryRequirementsInfo2; + +typedef struct VkImageMemoryRequirementsInfo2 { + VkStructureType sType; + const void* pNext; + VkImage image; +} VkImageMemoryRequirementsInfo2; + +typedef struct VkImageSparseMemoryRequirementsInfo2 { + VkStructureType sType; + const void* pNext; + VkImage image; +} VkImageSparseMemoryRequirementsInfo2; + +typedef struct VkMemoryRequirements2 { + VkStructureType sType; + void* pNext; + VkMemoryRequirements memoryRequirements; +} VkMemoryRequirements2; + +typedef VkMemoryRequirements2 VkMemoryRequirements2KHR; + +typedef struct VkSparseImageMemoryRequirements2 { + VkStructureType sType; + void* pNext; + VkSparseImageMemoryRequirements memoryRequirements; +} VkSparseImageMemoryRequirements2; + +typedef struct VkPhysicalDeviceFeatures2 { + VkStructureType sType; + void* pNext; + VkPhysicalDeviceFeatures features; +} VkPhysicalDeviceFeatures2; + +typedef struct VkPhysicalDeviceProperties2 { + VkStructureType sType; + void* pNext; + VkPhysicalDeviceProperties properties; +} VkPhysicalDeviceProperties2; + +typedef struct VkFormatProperties2 { + VkStructureType sType; + void* pNext; + VkFormatProperties formatProperties; +} VkFormatProperties2; + +typedef struct VkImageFormatProperties2 { + VkStructureType sType; + void* pNext; + VkImageFormatProperties imageFormatProperties; +} VkImageFormatProperties2; + +typedef struct VkPhysicalDeviceImageFormatInfo2 { + VkStructureType sType; + const void* pNext; + VkFormat format; + VkImageType type; + VkImageTiling tiling; + VkImageUsageFlags usage; + VkImageCreateFlags flags; +} VkPhysicalDeviceImageFormatInfo2; + +typedef struct VkQueueFamilyProperties2 { + VkStructureType sType; + void* pNext; + VkQueueFamilyProperties queueFamilyProperties; +} VkQueueFamilyProperties2; + +typedef struct VkPhysicalDeviceMemoryProperties2 { + VkStructureType sType; + void* pNext; + VkPhysicalDeviceMemoryProperties memoryProperties; +} VkPhysicalDeviceMemoryProperties2; + +typedef struct VkSparseImageFormatProperties2 { + VkStructureType sType; + void* pNext; + VkSparseImageFormatProperties properties; +} VkSparseImageFormatProperties2; + +typedef struct VkPhysicalDeviceSparseImageFormatInfo2 { + VkStructureType sType; + const void* pNext; + VkFormat format; + VkImageType type; + VkSampleCountFlagBits samples; + VkImageUsageFlags usage; + VkImageTiling tiling; +} VkPhysicalDeviceSparseImageFormatInfo2; + +typedef struct VkPhysicalDevicePointClippingProperties { + VkStructureType sType; + void* pNext; + VkPointClippingBehavior pointClippingBehavior; +} VkPhysicalDevicePointClippingProperties; + +typedef struct VkInputAttachmentAspectReference { + uint32_t subpass; + uint32_t inputAttachmentIndex; + VkImageAspectFlags aspectMask; +} VkInputAttachmentAspectReference; + +typedef struct VkRenderPassInputAttachmentAspectCreateInfo { + VkStructureType sType; + const void* pNext; + uint32_t aspectReferenceCount; + const VkInputAttachmentAspectReference* pAspectReferences; +} VkRenderPassInputAttachmentAspectCreateInfo; + +typedef struct VkImageViewUsageCreateInfo { + VkStructureType sType; + const void* pNext; + VkImageUsageFlags usage; +} VkImageViewUsageCreateInfo; + +typedef struct VkPipelineTessellationDomainOriginStateCreateInfo { + VkStructureType sType; + const void* pNext; + VkTessellationDomainOrigin domainOrigin; +} VkPipelineTessellationDomainOriginStateCreateInfo; + +typedef struct VkRenderPassMultiviewCreateInfo { + VkStructureType sType; + const void* pNext; + uint32_t subpassCount; + const uint32_t* pViewMasks; + uint32_t dependencyCount; + const int32_t* pViewOffsets; + uint32_t correlationMaskCount; + const uint32_t* pCorrelationMasks; +} VkRenderPassMultiviewCreateInfo; + +typedef struct VkPhysicalDeviceMultiviewFeatures { + VkStructureType sType; + void* pNext; + VkBool32 multiview; + VkBool32 multiviewGeometryShader; + VkBool32 multiviewTessellationShader; +} VkPhysicalDeviceMultiviewFeatures; + +typedef struct VkPhysicalDeviceMultiviewProperties { + VkStructureType sType; + void* pNext; + uint32_t maxMultiviewViewCount; + uint32_t maxMultiviewInstanceIndex; +} VkPhysicalDeviceMultiviewProperties; + +typedef struct VkPhysicalDeviceVariablePointersFeatures { + VkStructureType sType; + void* pNext; + VkBool32 variablePointersStorageBuffer; + VkBool32 variablePointers; +} VkPhysicalDeviceVariablePointersFeatures; + +typedef VkPhysicalDeviceVariablePointersFeatures VkPhysicalDeviceVariablePointerFeatures; + +typedef struct VkPhysicalDeviceProtectedMemoryFeatures { + VkStructureType sType; + void* pNext; + VkBool32 protectedMemory; +} VkPhysicalDeviceProtectedMemoryFeatures; + +typedef struct VkPhysicalDeviceProtectedMemoryProperties { + VkStructureType sType; + void* pNext; + VkBool32 protectedNoFault; +} VkPhysicalDeviceProtectedMemoryProperties; + +typedef struct VkDeviceQueueInfo2 { + VkStructureType sType; + const void* pNext; + VkDeviceQueueCreateFlags flags; + uint32_t queueFamilyIndex; + uint32_t queueIndex; +} VkDeviceQueueInfo2; + +typedef struct VkProtectedSubmitInfo { + VkStructureType sType; + const void* pNext; + VkBool32 protectedSubmit; +} VkProtectedSubmitInfo; + +typedef struct VkSamplerYcbcrConversionCreateInfo { + VkStructureType sType; + const void* pNext; + VkFormat format; + VkSamplerYcbcrModelConversion ycbcrModel; + VkSamplerYcbcrRange ycbcrRange; + VkComponentMapping components; + VkChromaLocation xChromaOffset; + VkChromaLocation yChromaOffset; + VkFilter chromaFilter; + VkBool32 forceExplicitReconstruction; +} VkSamplerYcbcrConversionCreateInfo; + +typedef struct VkSamplerYcbcrConversionInfo { + VkStructureType sType; + const void* pNext; + VkSamplerYcbcrConversion conversion; +} VkSamplerYcbcrConversionInfo; + +typedef struct VkBindImagePlaneMemoryInfo { + VkStructureType sType; + const void* pNext; + VkImageAspectFlagBits planeAspect; +} VkBindImagePlaneMemoryInfo; + +typedef struct VkImagePlaneMemoryRequirementsInfo { + VkStructureType sType; + const void* pNext; + VkImageAspectFlagBits planeAspect; +} VkImagePlaneMemoryRequirementsInfo; + +typedef struct VkPhysicalDeviceSamplerYcbcrConversionFeatures { + VkStructureType sType; + void* pNext; + VkBool32 samplerYcbcrConversion; +} VkPhysicalDeviceSamplerYcbcrConversionFeatures; + +typedef struct VkSamplerYcbcrConversionImageFormatProperties { + VkStructureType sType; + void* pNext; + uint32_t combinedImageSamplerDescriptorCount; +} VkSamplerYcbcrConversionImageFormatProperties; + +typedef struct VkDescriptorUpdateTemplateEntry { + uint32_t dstBinding; + uint32_t dstArrayElement; + uint32_t descriptorCount; + VkDescriptorType descriptorType; + size_t offset; + size_t stride; +} VkDescriptorUpdateTemplateEntry; + +typedef struct VkDescriptorUpdateTemplateCreateInfo { + VkStructureType sType; + const void* pNext; + VkDescriptorUpdateTemplateCreateFlags flags; + uint32_t descriptorUpdateEntryCount; + const VkDescriptorUpdateTemplateEntry* pDescriptorUpdateEntries; + VkDescriptorUpdateTemplateType templateType; + VkDescriptorSetLayout descriptorSetLayout; + VkPipelineBindPoint pipelineBindPoint; + VkPipelineLayout pipelineLayout; + uint32_t set; +} VkDescriptorUpdateTemplateCreateInfo; + +typedef struct VkExternalMemoryProperties { + VkExternalMemoryFeatureFlags externalMemoryFeatures; + VkExternalMemoryHandleTypeFlags exportFromImportedHandleTypes; + VkExternalMemoryHandleTypeFlags compatibleHandleTypes; +} VkExternalMemoryProperties; + +typedef struct VkPhysicalDeviceExternalImageFormatInfo { + VkStructureType sType; + const void* pNext; + VkExternalMemoryHandleTypeFlagBits handleType; +} VkPhysicalDeviceExternalImageFormatInfo; + +typedef struct VkExternalImageFormatProperties { + VkStructureType sType; + void* pNext; + VkExternalMemoryProperties externalMemoryProperties; +} VkExternalImageFormatProperties; + +typedef struct VkPhysicalDeviceExternalBufferInfo { + VkStructureType sType; + const void* pNext; + VkBufferCreateFlags flags; + VkBufferUsageFlags usage; + VkExternalMemoryHandleTypeFlagBits handleType; +} VkPhysicalDeviceExternalBufferInfo; + +typedef struct VkExternalBufferProperties { + VkStructureType sType; + void* pNext; + VkExternalMemoryProperties externalMemoryProperties; +} VkExternalBufferProperties; + +typedef struct VkPhysicalDeviceIDProperties { + VkStructureType sType; + void* pNext; + uint8_t deviceUUID[VK_UUID_SIZE]; + uint8_t driverUUID[VK_UUID_SIZE]; + uint8_t deviceLUID[VK_LUID_SIZE]; + uint32_t deviceNodeMask; + VkBool32 deviceLUIDValid; +} VkPhysicalDeviceIDProperties; + +typedef struct VkExternalMemoryImageCreateInfo { + VkStructureType sType; + const void* pNext; + VkExternalMemoryHandleTypeFlags handleTypes; +} VkExternalMemoryImageCreateInfo; + +typedef struct VkExternalMemoryBufferCreateInfo { + VkStructureType sType; + const void* pNext; + VkExternalMemoryHandleTypeFlags handleTypes; +} VkExternalMemoryBufferCreateInfo; + +typedef struct VkExportMemoryAllocateInfo { + VkStructureType sType; + const void* pNext; + VkExternalMemoryHandleTypeFlags handleTypes; +} VkExportMemoryAllocateInfo; + +typedef struct VkPhysicalDeviceExternalFenceInfo { + VkStructureType sType; + const void* pNext; + VkExternalFenceHandleTypeFlagBits handleType; +} VkPhysicalDeviceExternalFenceInfo; + +typedef struct VkExternalFenceProperties { + VkStructureType sType; + void* pNext; + VkExternalFenceHandleTypeFlags exportFromImportedHandleTypes; + VkExternalFenceHandleTypeFlags compatibleHandleTypes; + VkExternalFenceFeatureFlags externalFenceFeatures; +} VkExternalFenceProperties; + +typedef struct VkExportFenceCreateInfo { + VkStructureType sType; + const void* pNext; + VkExternalFenceHandleTypeFlags handleTypes; +} VkExportFenceCreateInfo; + +typedef struct VkExportSemaphoreCreateInfo { + VkStructureType sType; + const void* pNext; + VkExternalSemaphoreHandleTypeFlags handleTypes; +} VkExportSemaphoreCreateInfo; + +typedef struct VkPhysicalDeviceExternalSemaphoreInfo { + VkStructureType sType; + const void* pNext; + VkExternalSemaphoreHandleTypeFlagBits handleType; +} VkPhysicalDeviceExternalSemaphoreInfo; + +typedef struct VkExternalSemaphoreProperties { + VkStructureType sType; + void* pNext; + VkExternalSemaphoreHandleTypeFlags exportFromImportedHandleTypes; + VkExternalSemaphoreHandleTypeFlags compatibleHandleTypes; + VkExternalSemaphoreFeatureFlags externalSemaphoreFeatures; +} VkExternalSemaphoreProperties; + +typedef struct VkPhysicalDeviceMaintenance3Properties { + VkStructureType sType; + void* pNext; + uint32_t maxPerSetDescriptors; + VkDeviceSize maxMemoryAllocationSize; +} VkPhysicalDeviceMaintenance3Properties; + +typedef struct VkDescriptorSetLayoutSupport { + VkStructureType sType; + void* pNext; + VkBool32 supported; +} VkDescriptorSetLayoutSupport; + +typedef struct VkPhysicalDeviceShaderDrawParametersFeatures { + VkStructureType sType; + void* pNext; + VkBool32 shaderDrawParameters; +} VkPhysicalDeviceShaderDrawParametersFeatures; + +typedef VkPhysicalDeviceShaderDrawParametersFeatures VkPhysicalDeviceShaderDrawParameterFeatures; + +typedef VkResult (VKAPI_PTR *PFN_vkEnumerateInstanceVersion)(uint32_t* pApiVersion); +typedef VkResult (VKAPI_PTR *PFN_vkBindBufferMemory2)(VkDevice device, uint32_t bindInfoCount, const VkBindBufferMemoryInfo* pBindInfos); +typedef VkResult (VKAPI_PTR *PFN_vkBindImageMemory2)(VkDevice device, uint32_t bindInfoCount, const VkBindImageMemoryInfo* pBindInfos); +typedef void (VKAPI_PTR *PFN_vkGetDeviceGroupPeerMemoryFeatures)(VkDevice device, uint32_t heapIndex, uint32_t localDeviceIndex, uint32_t remoteDeviceIndex, VkPeerMemoryFeatureFlags* pPeerMemoryFeatures); +typedef void (VKAPI_PTR *PFN_vkCmdSetDeviceMask)(VkCommandBuffer commandBuffer, uint32_t deviceMask); +typedef void (VKAPI_PTR *PFN_vkCmdDispatchBase)(VkCommandBuffer commandBuffer, uint32_t baseGroupX, uint32_t baseGroupY, uint32_t baseGroupZ, uint32_t groupCountX, uint32_t groupCountY, uint32_t groupCountZ); +typedef VkResult (VKAPI_PTR *PFN_vkEnumeratePhysicalDeviceGroups)(VkInstance instance, uint32_t* pPhysicalDeviceGroupCount, VkPhysicalDeviceGroupProperties* pPhysicalDeviceGroupProperties); +typedef void (VKAPI_PTR *PFN_vkGetImageMemoryRequirements2)(VkDevice device, const VkImageMemoryRequirementsInfo2* pInfo, VkMemoryRequirements2* pMemoryRequirements); +typedef void (VKAPI_PTR *PFN_vkGetBufferMemoryRequirements2)(VkDevice device, const VkBufferMemoryRequirementsInfo2* pInfo, VkMemoryRequirements2* pMemoryRequirements); +typedef void (VKAPI_PTR *PFN_vkGetImageSparseMemoryRequirements2)(VkDevice device, const VkImageSparseMemoryRequirementsInfo2* pInfo, uint32_t* pSparseMemoryRequirementCount, VkSparseImageMemoryRequirements2* pSparseMemoryRequirements); +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceFeatures2)(VkPhysicalDevice physicalDevice, VkPhysicalDeviceFeatures2* pFeatures); +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceProperties2)(VkPhysicalDevice physicalDevice, VkPhysicalDeviceProperties2* pProperties); +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceFormatProperties2)(VkPhysicalDevice physicalDevice, VkFormat format, VkFormatProperties2* pFormatProperties); +typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceImageFormatProperties2)(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceImageFormatInfo2* pImageFormatInfo, VkImageFormatProperties2* pImageFormatProperties); +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceQueueFamilyProperties2)(VkPhysicalDevice physicalDevice, uint32_t* pQueueFamilyPropertyCount, VkQueueFamilyProperties2* pQueueFamilyProperties); +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceMemoryProperties2)(VkPhysicalDevice physicalDevice, VkPhysicalDeviceMemoryProperties2* pMemoryProperties); +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceSparseImageFormatProperties2)(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceSparseImageFormatInfo2* pFormatInfo, uint32_t* pPropertyCount, VkSparseImageFormatProperties2* pProperties); +typedef void (VKAPI_PTR *PFN_vkTrimCommandPool)(VkDevice device, VkCommandPool commandPool, VkCommandPoolTrimFlags flags); +typedef void (VKAPI_PTR *PFN_vkGetDeviceQueue2)(VkDevice device, const VkDeviceQueueInfo2* pQueueInfo, VkQueue* pQueue); +typedef VkResult (VKAPI_PTR *PFN_vkCreateSamplerYcbcrConversion)(VkDevice device, const VkSamplerYcbcrConversionCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSamplerYcbcrConversion* pYcbcrConversion); +typedef void (VKAPI_PTR *PFN_vkDestroySamplerYcbcrConversion)(VkDevice device, VkSamplerYcbcrConversion ycbcrConversion, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkCreateDescriptorUpdateTemplate)(VkDevice device, const VkDescriptorUpdateTemplateCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkDescriptorUpdateTemplate* pDescriptorUpdateTemplate); +typedef void (VKAPI_PTR *PFN_vkDestroyDescriptorUpdateTemplate)(VkDevice device, VkDescriptorUpdateTemplate descriptorUpdateTemplate, const VkAllocationCallbacks* pAllocator); +typedef void (VKAPI_PTR *PFN_vkUpdateDescriptorSetWithTemplate)(VkDevice device, VkDescriptorSet descriptorSet, VkDescriptorUpdateTemplate descriptorUpdateTemplate, const void* pData); +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceExternalBufferProperties)(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceExternalBufferInfo* pExternalBufferInfo, VkExternalBufferProperties* pExternalBufferProperties); +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceExternalFenceProperties)(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceExternalFenceInfo* pExternalFenceInfo, VkExternalFenceProperties* pExternalFenceProperties); +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceExternalSemaphoreProperties)(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceExternalSemaphoreInfo* pExternalSemaphoreInfo, VkExternalSemaphoreProperties* pExternalSemaphoreProperties); +typedef void (VKAPI_PTR *PFN_vkGetDescriptorSetLayoutSupport)(VkDevice device, const VkDescriptorSetLayoutCreateInfo* pCreateInfo, VkDescriptorSetLayoutSupport* pSupport); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateInstanceVersion( + uint32_t* pApiVersion); + +VKAPI_ATTR VkResult VKAPI_CALL vkBindBufferMemory2( + VkDevice device, + uint32_t bindInfoCount, + const VkBindBufferMemoryInfo* pBindInfos); + +VKAPI_ATTR VkResult VKAPI_CALL vkBindImageMemory2( + VkDevice device, + uint32_t bindInfoCount, + const VkBindImageMemoryInfo* pBindInfos); + +VKAPI_ATTR void VKAPI_CALL vkGetDeviceGroupPeerMemoryFeatures( + VkDevice device, + uint32_t heapIndex, + uint32_t localDeviceIndex, + uint32_t remoteDeviceIndex, + VkPeerMemoryFeatureFlags* pPeerMemoryFeatures); + +VKAPI_ATTR void VKAPI_CALL vkCmdSetDeviceMask( + VkCommandBuffer commandBuffer, + uint32_t deviceMask); + +VKAPI_ATTR void VKAPI_CALL vkCmdDispatchBase( + VkCommandBuffer commandBuffer, + uint32_t baseGroupX, + uint32_t baseGroupY, + uint32_t baseGroupZ, + uint32_t groupCountX, + uint32_t groupCountY, + uint32_t groupCountZ); + +VKAPI_ATTR VkResult VKAPI_CALL vkEnumeratePhysicalDeviceGroups( + VkInstance instance, + uint32_t* pPhysicalDeviceGroupCount, + VkPhysicalDeviceGroupProperties* pPhysicalDeviceGroupProperties); + +VKAPI_ATTR void VKAPI_CALL vkGetImageMemoryRequirements2( + VkDevice device, + const VkImageMemoryRequirementsInfo2* pInfo, + VkMemoryRequirements2* pMemoryRequirements); + +VKAPI_ATTR void VKAPI_CALL vkGetBufferMemoryRequirements2( + VkDevice device, + const VkBufferMemoryRequirementsInfo2* pInfo, + VkMemoryRequirements2* pMemoryRequirements); + +VKAPI_ATTR void VKAPI_CALL vkGetImageSparseMemoryRequirements2( + VkDevice device, + const VkImageSparseMemoryRequirementsInfo2* pInfo, + uint32_t* pSparseMemoryRequirementCount, + VkSparseImageMemoryRequirements2* pSparseMemoryRequirements); + +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceFeatures2( + VkPhysicalDevice physicalDevice, + VkPhysicalDeviceFeatures2* pFeatures); + +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceProperties2( + VkPhysicalDevice physicalDevice, + VkPhysicalDeviceProperties2* pProperties); + +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceFormatProperties2( + VkPhysicalDevice physicalDevice, + VkFormat format, + VkFormatProperties2* pFormatProperties); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceImageFormatProperties2( + VkPhysicalDevice physicalDevice, + const VkPhysicalDeviceImageFormatInfo2* pImageFormatInfo, + VkImageFormatProperties2* pImageFormatProperties); + +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceQueueFamilyProperties2( + VkPhysicalDevice physicalDevice, + uint32_t* pQueueFamilyPropertyCount, + VkQueueFamilyProperties2* pQueueFamilyProperties); + +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceMemoryProperties2( + VkPhysicalDevice physicalDevice, + VkPhysicalDeviceMemoryProperties2* pMemoryProperties); + +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceSparseImageFormatProperties2( + VkPhysicalDevice physicalDevice, + const VkPhysicalDeviceSparseImageFormatInfo2* pFormatInfo, + uint32_t* pPropertyCount, + VkSparseImageFormatProperties2* pProperties); + +VKAPI_ATTR void VKAPI_CALL vkTrimCommandPool( + VkDevice device, + VkCommandPool commandPool, + VkCommandPoolTrimFlags flags); + +VKAPI_ATTR void VKAPI_CALL vkGetDeviceQueue2( + VkDevice device, + const VkDeviceQueueInfo2* pQueueInfo, + VkQueue* pQueue); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateSamplerYcbcrConversion( + VkDevice device, + const VkSamplerYcbcrConversionCreateInfo* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkSamplerYcbcrConversion* pYcbcrConversion); + +VKAPI_ATTR void VKAPI_CALL vkDestroySamplerYcbcrConversion( + VkDevice device, + VkSamplerYcbcrConversion ycbcrConversion, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateDescriptorUpdateTemplate( + VkDevice device, + const VkDescriptorUpdateTemplateCreateInfo* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkDescriptorUpdateTemplate* pDescriptorUpdateTemplate); + +VKAPI_ATTR void VKAPI_CALL vkDestroyDescriptorUpdateTemplate( + VkDevice device, + VkDescriptorUpdateTemplate descriptorUpdateTemplate, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR void VKAPI_CALL vkUpdateDescriptorSetWithTemplate( + VkDevice device, + VkDescriptorSet descriptorSet, + VkDescriptorUpdateTemplate descriptorUpdateTemplate, + const void* pData); + +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceExternalBufferProperties( + VkPhysicalDevice physicalDevice, + const VkPhysicalDeviceExternalBufferInfo* pExternalBufferInfo, + VkExternalBufferProperties* pExternalBufferProperties); + +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceExternalFenceProperties( + VkPhysicalDevice physicalDevice, + const VkPhysicalDeviceExternalFenceInfo* pExternalFenceInfo, + VkExternalFenceProperties* pExternalFenceProperties); + +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceExternalSemaphoreProperties( + VkPhysicalDevice physicalDevice, + const VkPhysicalDeviceExternalSemaphoreInfo* pExternalSemaphoreInfo, + VkExternalSemaphoreProperties* pExternalSemaphoreProperties); + +VKAPI_ATTR void VKAPI_CALL vkGetDescriptorSetLayoutSupport( + VkDevice device, + const VkDescriptorSetLayoutCreateInfo* pCreateInfo, + VkDescriptorSetLayoutSupport* pSupport); +#endif + + +#define VK_VERSION_1_2 1 +// Vulkan 1.2 version number +#define VK_API_VERSION_1_2 VK_MAKE_VERSION(1, 2, 0)// Patch version should always be set to 0 + +typedef uint64_t VkDeviceAddress; +#define VK_MAX_DRIVER_NAME_SIZE 256 +#define VK_MAX_DRIVER_INFO_SIZE 256 + +typedef enum VkDriverId { + VK_DRIVER_ID_AMD_PROPRIETARY = 1, + VK_DRIVER_ID_AMD_OPEN_SOURCE = 2, + VK_DRIVER_ID_MESA_RADV = 3, + VK_DRIVER_ID_NVIDIA_PROPRIETARY = 4, + VK_DRIVER_ID_INTEL_PROPRIETARY_WINDOWS = 5, + VK_DRIVER_ID_INTEL_OPEN_SOURCE_MESA = 6, + VK_DRIVER_ID_IMAGINATION_PROPRIETARY = 7, + VK_DRIVER_ID_QUALCOMM_PROPRIETARY = 8, + VK_DRIVER_ID_ARM_PROPRIETARY = 9, + VK_DRIVER_ID_GOOGLE_SWIFTSHADER = 10, + VK_DRIVER_ID_GGP_PROPRIETARY = 11, + VK_DRIVER_ID_BROADCOM_PROPRIETARY = 12, + VK_DRIVER_ID_MESA_LLVMPIPE = 13, + VK_DRIVER_ID_AMD_PROPRIETARY_KHR = VK_DRIVER_ID_AMD_PROPRIETARY, + VK_DRIVER_ID_AMD_OPEN_SOURCE_KHR = VK_DRIVER_ID_AMD_OPEN_SOURCE, + VK_DRIVER_ID_MESA_RADV_KHR = VK_DRIVER_ID_MESA_RADV, + VK_DRIVER_ID_NVIDIA_PROPRIETARY_KHR = VK_DRIVER_ID_NVIDIA_PROPRIETARY, + VK_DRIVER_ID_INTEL_PROPRIETARY_WINDOWS_KHR = VK_DRIVER_ID_INTEL_PROPRIETARY_WINDOWS, + VK_DRIVER_ID_INTEL_OPEN_SOURCE_MESA_KHR = VK_DRIVER_ID_INTEL_OPEN_SOURCE_MESA, + VK_DRIVER_ID_IMAGINATION_PROPRIETARY_KHR = VK_DRIVER_ID_IMAGINATION_PROPRIETARY, + VK_DRIVER_ID_QUALCOMM_PROPRIETARY_KHR = VK_DRIVER_ID_QUALCOMM_PROPRIETARY, + VK_DRIVER_ID_ARM_PROPRIETARY_KHR = VK_DRIVER_ID_ARM_PROPRIETARY, + VK_DRIVER_ID_GOOGLE_SWIFTSHADER_KHR = VK_DRIVER_ID_GOOGLE_SWIFTSHADER, + VK_DRIVER_ID_GGP_PROPRIETARY_KHR = VK_DRIVER_ID_GGP_PROPRIETARY, + VK_DRIVER_ID_BROADCOM_PROPRIETARY_KHR = VK_DRIVER_ID_BROADCOM_PROPRIETARY, + VK_DRIVER_ID_MAX_ENUM = 0x7FFFFFFF +} VkDriverId; + +typedef enum VkShaderFloatControlsIndependence { + VK_SHADER_FLOAT_CONTROLS_INDEPENDENCE_32_BIT_ONLY = 0, + VK_SHADER_FLOAT_CONTROLS_INDEPENDENCE_ALL = 1, + VK_SHADER_FLOAT_CONTROLS_INDEPENDENCE_NONE = 2, + VK_SHADER_FLOAT_CONTROLS_INDEPENDENCE_32_BIT_ONLY_KHR = VK_SHADER_FLOAT_CONTROLS_INDEPENDENCE_32_BIT_ONLY, + VK_SHADER_FLOAT_CONTROLS_INDEPENDENCE_ALL_KHR = VK_SHADER_FLOAT_CONTROLS_INDEPENDENCE_ALL, + VK_SHADER_FLOAT_CONTROLS_INDEPENDENCE_NONE_KHR = VK_SHADER_FLOAT_CONTROLS_INDEPENDENCE_NONE, + VK_SHADER_FLOAT_CONTROLS_INDEPENDENCE_MAX_ENUM = 0x7FFFFFFF +} VkShaderFloatControlsIndependence; + +typedef enum VkSamplerReductionMode { + VK_SAMPLER_REDUCTION_MODE_WEIGHTED_AVERAGE = 0, + VK_SAMPLER_REDUCTION_MODE_MIN = 1, + VK_SAMPLER_REDUCTION_MODE_MAX = 2, + VK_SAMPLER_REDUCTION_MODE_WEIGHTED_AVERAGE_EXT = VK_SAMPLER_REDUCTION_MODE_WEIGHTED_AVERAGE, + VK_SAMPLER_REDUCTION_MODE_MIN_EXT = VK_SAMPLER_REDUCTION_MODE_MIN, + VK_SAMPLER_REDUCTION_MODE_MAX_EXT = VK_SAMPLER_REDUCTION_MODE_MAX, + VK_SAMPLER_REDUCTION_MODE_MAX_ENUM = 0x7FFFFFFF +} VkSamplerReductionMode; + +typedef enum VkSemaphoreType { + VK_SEMAPHORE_TYPE_BINARY = 0, + VK_SEMAPHORE_TYPE_TIMELINE = 1, + VK_SEMAPHORE_TYPE_BINARY_KHR = VK_SEMAPHORE_TYPE_BINARY, + VK_SEMAPHORE_TYPE_TIMELINE_KHR = VK_SEMAPHORE_TYPE_TIMELINE, + VK_SEMAPHORE_TYPE_MAX_ENUM = 0x7FFFFFFF +} VkSemaphoreType; + +typedef enum VkResolveModeFlagBits { + VK_RESOLVE_MODE_NONE = 0, + VK_RESOLVE_MODE_SAMPLE_ZERO_BIT = 0x00000001, + VK_RESOLVE_MODE_AVERAGE_BIT = 0x00000002, + VK_RESOLVE_MODE_MIN_BIT = 0x00000004, + VK_RESOLVE_MODE_MAX_BIT = 0x00000008, + VK_RESOLVE_MODE_NONE_KHR = VK_RESOLVE_MODE_NONE, + VK_RESOLVE_MODE_SAMPLE_ZERO_BIT_KHR = VK_RESOLVE_MODE_SAMPLE_ZERO_BIT, + VK_RESOLVE_MODE_AVERAGE_BIT_KHR = VK_RESOLVE_MODE_AVERAGE_BIT, + VK_RESOLVE_MODE_MIN_BIT_KHR = VK_RESOLVE_MODE_MIN_BIT, + VK_RESOLVE_MODE_MAX_BIT_KHR = VK_RESOLVE_MODE_MAX_BIT, + VK_RESOLVE_MODE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkResolveModeFlagBits; +typedef VkFlags VkResolveModeFlags; + +typedef enum VkDescriptorBindingFlagBits { + VK_DESCRIPTOR_BINDING_UPDATE_AFTER_BIND_BIT = 0x00000001, + VK_DESCRIPTOR_BINDING_UPDATE_UNUSED_WHILE_PENDING_BIT = 0x00000002, + VK_DESCRIPTOR_BINDING_PARTIALLY_BOUND_BIT = 0x00000004, + VK_DESCRIPTOR_BINDING_VARIABLE_DESCRIPTOR_COUNT_BIT = 0x00000008, + VK_DESCRIPTOR_BINDING_UPDATE_AFTER_BIND_BIT_EXT = VK_DESCRIPTOR_BINDING_UPDATE_AFTER_BIND_BIT, + VK_DESCRIPTOR_BINDING_UPDATE_UNUSED_WHILE_PENDING_BIT_EXT = VK_DESCRIPTOR_BINDING_UPDATE_UNUSED_WHILE_PENDING_BIT, + VK_DESCRIPTOR_BINDING_PARTIALLY_BOUND_BIT_EXT = VK_DESCRIPTOR_BINDING_PARTIALLY_BOUND_BIT, + VK_DESCRIPTOR_BINDING_VARIABLE_DESCRIPTOR_COUNT_BIT_EXT = VK_DESCRIPTOR_BINDING_VARIABLE_DESCRIPTOR_COUNT_BIT, + VK_DESCRIPTOR_BINDING_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkDescriptorBindingFlagBits; +typedef VkFlags VkDescriptorBindingFlags; + +typedef enum VkSemaphoreWaitFlagBits { + VK_SEMAPHORE_WAIT_ANY_BIT = 0x00000001, + VK_SEMAPHORE_WAIT_ANY_BIT_KHR = VK_SEMAPHORE_WAIT_ANY_BIT, + VK_SEMAPHORE_WAIT_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkSemaphoreWaitFlagBits; +typedef VkFlags VkSemaphoreWaitFlags; +typedef struct VkPhysicalDeviceVulkan11Features { + VkStructureType sType; + void* pNext; + VkBool32 storageBuffer16BitAccess; + VkBool32 uniformAndStorageBuffer16BitAccess; + VkBool32 storagePushConstant16; + VkBool32 storageInputOutput16; + VkBool32 multiview; + VkBool32 multiviewGeometryShader; + VkBool32 multiviewTessellationShader; + VkBool32 variablePointersStorageBuffer; + VkBool32 variablePointers; + VkBool32 protectedMemory; + VkBool32 samplerYcbcrConversion; + VkBool32 shaderDrawParameters; +} VkPhysicalDeviceVulkan11Features; + +typedef struct VkPhysicalDeviceVulkan11Properties { + VkStructureType sType; + void* pNext; + uint8_t deviceUUID[VK_UUID_SIZE]; + uint8_t driverUUID[VK_UUID_SIZE]; + uint8_t deviceLUID[VK_LUID_SIZE]; + uint32_t deviceNodeMask; + VkBool32 deviceLUIDValid; + uint32_t subgroupSize; + VkShaderStageFlags subgroupSupportedStages; + VkSubgroupFeatureFlags subgroupSupportedOperations; + VkBool32 subgroupQuadOperationsInAllStages; + VkPointClippingBehavior pointClippingBehavior; + uint32_t maxMultiviewViewCount; + uint32_t maxMultiviewInstanceIndex; + VkBool32 protectedNoFault; + uint32_t maxPerSetDescriptors; + VkDeviceSize maxMemoryAllocationSize; +} VkPhysicalDeviceVulkan11Properties; + +typedef struct VkPhysicalDeviceVulkan12Features { + VkStructureType sType; + void* pNext; + VkBool32 samplerMirrorClampToEdge; + VkBool32 drawIndirectCount; + VkBool32 storageBuffer8BitAccess; + VkBool32 uniformAndStorageBuffer8BitAccess; + VkBool32 storagePushConstant8; + VkBool32 shaderBufferInt64Atomics; + VkBool32 shaderSharedInt64Atomics; + VkBool32 shaderFloat16; + VkBool32 shaderInt8; + VkBool32 descriptorIndexing; + VkBool32 shaderInputAttachmentArrayDynamicIndexing; + VkBool32 shaderUniformTexelBufferArrayDynamicIndexing; + VkBool32 shaderStorageTexelBufferArrayDynamicIndexing; + VkBool32 shaderUniformBufferArrayNonUniformIndexing; + VkBool32 shaderSampledImageArrayNonUniformIndexing; + VkBool32 shaderStorageBufferArrayNonUniformIndexing; + VkBool32 shaderStorageImageArrayNonUniformIndexing; + VkBool32 shaderInputAttachmentArrayNonUniformIndexing; + VkBool32 shaderUniformTexelBufferArrayNonUniformIndexing; + VkBool32 shaderStorageTexelBufferArrayNonUniformIndexing; + VkBool32 descriptorBindingUniformBufferUpdateAfterBind; + VkBool32 descriptorBindingSampledImageUpdateAfterBind; + VkBool32 descriptorBindingStorageImageUpdateAfterBind; + VkBool32 descriptorBindingStorageBufferUpdateAfterBind; + VkBool32 descriptorBindingUniformTexelBufferUpdateAfterBind; + VkBool32 descriptorBindingStorageTexelBufferUpdateAfterBind; + VkBool32 descriptorBindingUpdateUnusedWhilePending; + VkBool32 descriptorBindingPartiallyBound; + VkBool32 descriptorBindingVariableDescriptorCount; + VkBool32 runtimeDescriptorArray; + VkBool32 samplerFilterMinmax; + VkBool32 scalarBlockLayout; + VkBool32 imagelessFramebuffer; + VkBool32 uniformBufferStandardLayout; + VkBool32 shaderSubgroupExtendedTypes; + VkBool32 separateDepthStencilLayouts; + VkBool32 hostQueryReset; + VkBool32 timelineSemaphore; + VkBool32 bufferDeviceAddress; + VkBool32 bufferDeviceAddressCaptureReplay; + VkBool32 bufferDeviceAddressMultiDevice; + VkBool32 vulkanMemoryModel; + VkBool32 vulkanMemoryModelDeviceScope; + VkBool32 vulkanMemoryModelAvailabilityVisibilityChains; + VkBool32 shaderOutputViewportIndex; + VkBool32 shaderOutputLayer; + VkBool32 subgroupBroadcastDynamicId; +} VkPhysicalDeviceVulkan12Features; + +typedef struct VkConformanceVersion { + uint8_t major; + uint8_t minor; + uint8_t subminor; + uint8_t patch; +} VkConformanceVersion; + +typedef struct VkPhysicalDeviceVulkan12Properties { + VkStructureType sType; + void* pNext; + VkDriverId driverID; + char driverName[VK_MAX_DRIVER_NAME_SIZE]; + char driverInfo[VK_MAX_DRIVER_INFO_SIZE]; + VkConformanceVersion conformanceVersion; + VkShaderFloatControlsIndependence denormBehaviorIndependence; + VkShaderFloatControlsIndependence roundingModeIndependence; + VkBool32 shaderSignedZeroInfNanPreserveFloat16; + VkBool32 shaderSignedZeroInfNanPreserveFloat32; + VkBool32 shaderSignedZeroInfNanPreserveFloat64; + VkBool32 shaderDenormPreserveFloat16; + VkBool32 shaderDenormPreserveFloat32; + VkBool32 shaderDenormPreserveFloat64; + VkBool32 shaderDenormFlushToZeroFloat16; + VkBool32 shaderDenormFlushToZeroFloat32; + VkBool32 shaderDenormFlushToZeroFloat64; + VkBool32 shaderRoundingModeRTEFloat16; + VkBool32 shaderRoundingModeRTEFloat32; + VkBool32 shaderRoundingModeRTEFloat64; + VkBool32 shaderRoundingModeRTZFloat16; + VkBool32 shaderRoundingModeRTZFloat32; + VkBool32 shaderRoundingModeRTZFloat64; + uint32_t maxUpdateAfterBindDescriptorsInAllPools; + VkBool32 shaderUniformBufferArrayNonUniformIndexingNative; + VkBool32 shaderSampledImageArrayNonUniformIndexingNative; + VkBool32 shaderStorageBufferArrayNonUniformIndexingNative; + VkBool32 shaderStorageImageArrayNonUniformIndexingNative; + VkBool32 shaderInputAttachmentArrayNonUniformIndexingNative; + VkBool32 robustBufferAccessUpdateAfterBind; + VkBool32 quadDivergentImplicitLod; + uint32_t maxPerStageDescriptorUpdateAfterBindSamplers; + uint32_t maxPerStageDescriptorUpdateAfterBindUniformBuffers; + uint32_t maxPerStageDescriptorUpdateAfterBindStorageBuffers; + uint32_t maxPerStageDescriptorUpdateAfterBindSampledImages; + uint32_t maxPerStageDescriptorUpdateAfterBindStorageImages; + uint32_t maxPerStageDescriptorUpdateAfterBindInputAttachments; + uint32_t maxPerStageUpdateAfterBindResources; + uint32_t maxDescriptorSetUpdateAfterBindSamplers; + uint32_t maxDescriptorSetUpdateAfterBindUniformBuffers; + uint32_t maxDescriptorSetUpdateAfterBindUniformBuffersDynamic; + uint32_t maxDescriptorSetUpdateAfterBindStorageBuffers; + uint32_t maxDescriptorSetUpdateAfterBindStorageBuffersDynamic; + uint32_t maxDescriptorSetUpdateAfterBindSampledImages; + uint32_t maxDescriptorSetUpdateAfterBindStorageImages; + uint32_t maxDescriptorSetUpdateAfterBindInputAttachments; + VkResolveModeFlags supportedDepthResolveModes; + VkResolveModeFlags supportedStencilResolveModes; + VkBool32 independentResolveNone; + VkBool32 independentResolve; + VkBool32 filterMinmaxSingleComponentFormats; + VkBool32 filterMinmaxImageComponentMapping; + uint64_t maxTimelineSemaphoreValueDifference; + VkSampleCountFlags framebufferIntegerColorSampleCounts; +} VkPhysicalDeviceVulkan12Properties; + +typedef struct VkImageFormatListCreateInfo { + VkStructureType sType; + const void* pNext; + uint32_t viewFormatCount; + const VkFormat* pViewFormats; +} VkImageFormatListCreateInfo; + +typedef struct VkAttachmentDescription2 { + VkStructureType sType; + const void* pNext; + VkAttachmentDescriptionFlags flags; + VkFormat format; + VkSampleCountFlagBits samples; + VkAttachmentLoadOp loadOp; + VkAttachmentStoreOp storeOp; + VkAttachmentLoadOp stencilLoadOp; + VkAttachmentStoreOp stencilStoreOp; + VkImageLayout initialLayout; + VkImageLayout finalLayout; +} VkAttachmentDescription2; + +typedef struct VkAttachmentReference2 { + VkStructureType sType; + const void* pNext; + uint32_t attachment; + VkImageLayout layout; + VkImageAspectFlags aspectMask; +} VkAttachmentReference2; + +typedef struct VkSubpassDescription2 { + VkStructureType sType; + const void* pNext; + VkSubpassDescriptionFlags flags; + VkPipelineBindPoint pipelineBindPoint; + uint32_t viewMask; + uint32_t inputAttachmentCount; + const VkAttachmentReference2* pInputAttachments; + uint32_t colorAttachmentCount; + const VkAttachmentReference2* pColorAttachments; + const VkAttachmentReference2* pResolveAttachments; + const VkAttachmentReference2* pDepthStencilAttachment; + uint32_t preserveAttachmentCount; + const uint32_t* pPreserveAttachments; +} VkSubpassDescription2; + +typedef struct VkSubpassDependency2 { + VkStructureType sType; + const void* pNext; + uint32_t srcSubpass; + uint32_t dstSubpass; + VkPipelineStageFlags srcStageMask; + VkPipelineStageFlags dstStageMask; + VkAccessFlags srcAccessMask; + VkAccessFlags dstAccessMask; + VkDependencyFlags dependencyFlags; + int32_t viewOffset; +} VkSubpassDependency2; + +typedef struct VkRenderPassCreateInfo2 { + VkStructureType sType; + const void* pNext; + VkRenderPassCreateFlags flags; + uint32_t attachmentCount; + const VkAttachmentDescription2* pAttachments; + uint32_t subpassCount; + const VkSubpassDescription2* pSubpasses; + uint32_t dependencyCount; + const VkSubpassDependency2* pDependencies; + uint32_t correlatedViewMaskCount; + const uint32_t* pCorrelatedViewMasks; +} VkRenderPassCreateInfo2; + +typedef struct VkSubpassBeginInfo { + VkStructureType sType; + const void* pNext; + VkSubpassContents contents; +} VkSubpassBeginInfo; + +typedef struct VkSubpassEndInfo { + VkStructureType sType; + const void* pNext; +} VkSubpassEndInfo; + +typedef struct VkPhysicalDevice8BitStorageFeatures { + VkStructureType sType; + void* pNext; + VkBool32 storageBuffer8BitAccess; + VkBool32 uniformAndStorageBuffer8BitAccess; + VkBool32 storagePushConstant8; +} VkPhysicalDevice8BitStorageFeatures; + +typedef struct VkPhysicalDeviceDriverProperties { + VkStructureType sType; + void* pNext; + VkDriverId driverID; + char driverName[VK_MAX_DRIVER_NAME_SIZE]; + char driverInfo[VK_MAX_DRIVER_INFO_SIZE]; + VkConformanceVersion conformanceVersion; +} VkPhysicalDeviceDriverProperties; + +typedef struct VkPhysicalDeviceShaderAtomicInt64Features { + VkStructureType sType; + void* pNext; + VkBool32 shaderBufferInt64Atomics; + VkBool32 shaderSharedInt64Atomics; +} VkPhysicalDeviceShaderAtomicInt64Features; + +typedef struct VkPhysicalDeviceShaderFloat16Int8Features { + VkStructureType sType; + void* pNext; + VkBool32 shaderFloat16; + VkBool32 shaderInt8; +} VkPhysicalDeviceShaderFloat16Int8Features; + +typedef struct VkPhysicalDeviceFloatControlsProperties { + VkStructureType sType; + void* pNext; + VkShaderFloatControlsIndependence denormBehaviorIndependence; + VkShaderFloatControlsIndependence roundingModeIndependence; + VkBool32 shaderSignedZeroInfNanPreserveFloat16; + VkBool32 shaderSignedZeroInfNanPreserveFloat32; + VkBool32 shaderSignedZeroInfNanPreserveFloat64; + VkBool32 shaderDenormPreserveFloat16; + VkBool32 shaderDenormPreserveFloat32; + VkBool32 shaderDenormPreserveFloat64; + VkBool32 shaderDenormFlushToZeroFloat16; + VkBool32 shaderDenormFlushToZeroFloat32; + VkBool32 shaderDenormFlushToZeroFloat64; + VkBool32 shaderRoundingModeRTEFloat16; + VkBool32 shaderRoundingModeRTEFloat32; + VkBool32 shaderRoundingModeRTEFloat64; + VkBool32 shaderRoundingModeRTZFloat16; + VkBool32 shaderRoundingModeRTZFloat32; + VkBool32 shaderRoundingModeRTZFloat64; +} VkPhysicalDeviceFloatControlsProperties; + +typedef struct VkDescriptorSetLayoutBindingFlagsCreateInfo { + VkStructureType sType; + const void* pNext; + uint32_t bindingCount; + const VkDescriptorBindingFlags* pBindingFlags; +} VkDescriptorSetLayoutBindingFlagsCreateInfo; + +typedef struct VkPhysicalDeviceDescriptorIndexingFeatures { + VkStructureType sType; + void* pNext; + VkBool32 shaderInputAttachmentArrayDynamicIndexing; + VkBool32 shaderUniformTexelBufferArrayDynamicIndexing; + VkBool32 shaderStorageTexelBufferArrayDynamicIndexing; + VkBool32 shaderUniformBufferArrayNonUniformIndexing; + VkBool32 shaderSampledImageArrayNonUniformIndexing; + VkBool32 shaderStorageBufferArrayNonUniformIndexing; + VkBool32 shaderStorageImageArrayNonUniformIndexing; + VkBool32 shaderInputAttachmentArrayNonUniformIndexing; + VkBool32 shaderUniformTexelBufferArrayNonUniformIndexing; + VkBool32 shaderStorageTexelBufferArrayNonUniformIndexing; + VkBool32 descriptorBindingUniformBufferUpdateAfterBind; + VkBool32 descriptorBindingSampledImageUpdateAfterBind; + VkBool32 descriptorBindingStorageImageUpdateAfterBind; + VkBool32 descriptorBindingStorageBufferUpdateAfterBind; + VkBool32 descriptorBindingUniformTexelBufferUpdateAfterBind; + VkBool32 descriptorBindingStorageTexelBufferUpdateAfterBind; + VkBool32 descriptorBindingUpdateUnusedWhilePending; + VkBool32 descriptorBindingPartiallyBound; + VkBool32 descriptorBindingVariableDescriptorCount; + VkBool32 runtimeDescriptorArray; +} VkPhysicalDeviceDescriptorIndexingFeatures; + +typedef struct VkPhysicalDeviceDescriptorIndexingProperties { + VkStructureType sType; + void* pNext; + uint32_t maxUpdateAfterBindDescriptorsInAllPools; + VkBool32 shaderUniformBufferArrayNonUniformIndexingNative; + VkBool32 shaderSampledImageArrayNonUniformIndexingNative; + VkBool32 shaderStorageBufferArrayNonUniformIndexingNative; + VkBool32 shaderStorageImageArrayNonUniformIndexingNative; + VkBool32 shaderInputAttachmentArrayNonUniformIndexingNative; + VkBool32 robustBufferAccessUpdateAfterBind; + VkBool32 quadDivergentImplicitLod; + uint32_t maxPerStageDescriptorUpdateAfterBindSamplers; + uint32_t maxPerStageDescriptorUpdateAfterBindUniformBuffers; + uint32_t maxPerStageDescriptorUpdateAfterBindStorageBuffers; + uint32_t maxPerStageDescriptorUpdateAfterBindSampledImages; + uint32_t maxPerStageDescriptorUpdateAfterBindStorageImages; + uint32_t maxPerStageDescriptorUpdateAfterBindInputAttachments; + uint32_t maxPerStageUpdateAfterBindResources; + uint32_t maxDescriptorSetUpdateAfterBindSamplers; + uint32_t maxDescriptorSetUpdateAfterBindUniformBuffers; + uint32_t maxDescriptorSetUpdateAfterBindUniformBuffersDynamic; + uint32_t maxDescriptorSetUpdateAfterBindStorageBuffers; + uint32_t maxDescriptorSetUpdateAfterBindStorageBuffersDynamic; + uint32_t maxDescriptorSetUpdateAfterBindSampledImages; + uint32_t maxDescriptorSetUpdateAfterBindStorageImages; + uint32_t maxDescriptorSetUpdateAfterBindInputAttachments; +} VkPhysicalDeviceDescriptorIndexingProperties; + +typedef struct VkDescriptorSetVariableDescriptorCountAllocateInfo { + VkStructureType sType; + const void* pNext; + uint32_t descriptorSetCount; + const uint32_t* pDescriptorCounts; +} VkDescriptorSetVariableDescriptorCountAllocateInfo; + +typedef struct VkDescriptorSetVariableDescriptorCountLayoutSupport { + VkStructureType sType; + void* pNext; + uint32_t maxVariableDescriptorCount; +} VkDescriptorSetVariableDescriptorCountLayoutSupport; + +typedef struct VkSubpassDescriptionDepthStencilResolve { + VkStructureType sType; + const void* pNext; + VkResolveModeFlagBits depthResolveMode; + VkResolveModeFlagBits stencilResolveMode; + const VkAttachmentReference2* pDepthStencilResolveAttachment; +} VkSubpassDescriptionDepthStencilResolve; + +typedef struct VkPhysicalDeviceDepthStencilResolveProperties { + VkStructureType sType; + void* pNext; + VkResolveModeFlags supportedDepthResolveModes; + VkResolveModeFlags supportedStencilResolveModes; + VkBool32 independentResolveNone; + VkBool32 independentResolve; +} VkPhysicalDeviceDepthStencilResolveProperties; + +typedef struct VkPhysicalDeviceScalarBlockLayoutFeatures { + VkStructureType sType; + void* pNext; + VkBool32 scalarBlockLayout; +} VkPhysicalDeviceScalarBlockLayoutFeatures; + +typedef struct VkImageStencilUsageCreateInfo { + VkStructureType sType; + const void* pNext; + VkImageUsageFlags stencilUsage; +} VkImageStencilUsageCreateInfo; + +typedef struct VkSamplerReductionModeCreateInfo { + VkStructureType sType; + const void* pNext; + VkSamplerReductionMode reductionMode; +} VkSamplerReductionModeCreateInfo; + +typedef struct VkPhysicalDeviceSamplerFilterMinmaxProperties { + VkStructureType sType; + void* pNext; + VkBool32 filterMinmaxSingleComponentFormats; + VkBool32 filterMinmaxImageComponentMapping; +} VkPhysicalDeviceSamplerFilterMinmaxProperties; + +typedef struct VkPhysicalDeviceVulkanMemoryModelFeatures { + VkStructureType sType; + void* pNext; + VkBool32 vulkanMemoryModel; + VkBool32 vulkanMemoryModelDeviceScope; + VkBool32 vulkanMemoryModelAvailabilityVisibilityChains; +} VkPhysicalDeviceVulkanMemoryModelFeatures; + +typedef struct VkPhysicalDeviceImagelessFramebufferFeatures { + VkStructureType sType; + void* pNext; + VkBool32 imagelessFramebuffer; +} VkPhysicalDeviceImagelessFramebufferFeatures; + +typedef struct VkFramebufferAttachmentImageInfo { + VkStructureType sType; + const void* pNext; + VkImageCreateFlags flags; + VkImageUsageFlags usage; + uint32_t width; + uint32_t height; + uint32_t layerCount; + uint32_t viewFormatCount; + const VkFormat* pViewFormats; +} VkFramebufferAttachmentImageInfo; + +typedef struct VkFramebufferAttachmentsCreateInfo { + VkStructureType sType; + const void* pNext; + uint32_t attachmentImageInfoCount; + const VkFramebufferAttachmentImageInfo* pAttachmentImageInfos; +} VkFramebufferAttachmentsCreateInfo; + +typedef struct VkRenderPassAttachmentBeginInfo { + VkStructureType sType; + const void* pNext; + uint32_t attachmentCount; + const VkImageView* pAttachments; +} VkRenderPassAttachmentBeginInfo; + +typedef struct VkPhysicalDeviceUniformBufferStandardLayoutFeatures { + VkStructureType sType; + void* pNext; + VkBool32 uniformBufferStandardLayout; +} VkPhysicalDeviceUniformBufferStandardLayoutFeatures; + +typedef struct VkPhysicalDeviceShaderSubgroupExtendedTypesFeatures { + VkStructureType sType; + void* pNext; + VkBool32 shaderSubgroupExtendedTypes; +} VkPhysicalDeviceShaderSubgroupExtendedTypesFeatures; + +typedef struct VkPhysicalDeviceSeparateDepthStencilLayoutsFeatures { + VkStructureType sType; + void* pNext; + VkBool32 separateDepthStencilLayouts; +} VkPhysicalDeviceSeparateDepthStencilLayoutsFeatures; + +typedef struct VkAttachmentReferenceStencilLayout { + VkStructureType sType; + void* pNext; + VkImageLayout stencilLayout; +} VkAttachmentReferenceStencilLayout; + +typedef struct VkAttachmentDescriptionStencilLayout { + VkStructureType sType; + void* pNext; + VkImageLayout stencilInitialLayout; + VkImageLayout stencilFinalLayout; +} VkAttachmentDescriptionStencilLayout; + +typedef struct VkPhysicalDeviceHostQueryResetFeatures { + VkStructureType sType; + void* pNext; + VkBool32 hostQueryReset; +} VkPhysicalDeviceHostQueryResetFeatures; + +typedef struct VkPhysicalDeviceTimelineSemaphoreFeatures { + VkStructureType sType; + void* pNext; + VkBool32 timelineSemaphore; +} VkPhysicalDeviceTimelineSemaphoreFeatures; + +typedef struct VkPhysicalDeviceTimelineSemaphoreProperties { + VkStructureType sType; + void* pNext; + uint64_t maxTimelineSemaphoreValueDifference; +} VkPhysicalDeviceTimelineSemaphoreProperties; + +typedef struct VkSemaphoreTypeCreateInfo { + VkStructureType sType; + const void* pNext; + VkSemaphoreType semaphoreType; + uint64_t initialValue; +} VkSemaphoreTypeCreateInfo; + +typedef struct VkTimelineSemaphoreSubmitInfo { + VkStructureType sType; + const void* pNext; + uint32_t waitSemaphoreValueCount; + const uint64_t* pWaitSemaphoreValues; + uint32_t signalSemaphoreValueCount; + const uint64_t* pSignalSemaphoreValues; +} VkTimelineSemaphoreSubmitInfo; + +typedef struct VkSemaphoreWaitInfo { + VkStructureType sType; + const void* pNext; + VkSemaphoreWaitFlags flags; + uint32_t semaphoreCount; + const VkSemaphore* pSemaphores; + const uint64_t* pValues; +} VkSemaphoreWaitInfo; + +typedef struct VkSemaphoreSignalInfo { + VkStructureType sType; + const void* pNext; + VkSemaphore semaphore; + uint64_t value; +} VkSemaphoreSignalInfo; + +typedef struct VkPhysicalDeviceBufferDeviceAddressFeatures { + VkStructureType sType; + void* pNext; + VkBool32 bufferDeviceAddress; + VkBool32 bufferDeviceAddressCaptureReplay; + VkBool32 bufferDeviceAddressMultiDevice; +} VkPhysicalDeviceBufferDeviceAddressFeatures; + +typedef struct VkBufferDeviceAddressInfo { + VkStructureType sType; + const void* pNext; + VkBuffer buffer; +} VkBufferDeviceAddressInfo; + +typedef struct VkBufferOpaqueCaptureAddressCreateInfo { + VkStructureType sType; + const void* pNext; + uint64_t opaqueCaptureAddress; +} VkBufferOpaqueCaptureAddressCreateInfo; + +typedef struct VkMemoryOpaqueCaptureAddressAllocateInfo { + VkStructureType sType; + const void* pNext; + uint64_t opaqueCaptureAddress; +} VkMemoryOpaqueCaptureAddressAllocateInfo; + +typedef struct VkDeviceMemoryOpaqueCaptureAddressInfo { + VkStructureType sType; + const void* pNext; + VkDeviceMemory memory; +} VkDeviceMemoryOpaqueCaptureAddressInfo; + +typedef void (VKAPI_PTR *PFN_vkCmdDrawIndirectCount)(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkBuffer countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount, uint32_t stride); +typedef void (VKAPI_PTR *PFN_vkCmdDrawIndexedIndirectCount)(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkBuffer countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount, uint32_t stride); +typedef VkResult (VKAPI_PTR *PFN_vkCreateRenderPass2)(VkDevice device, const VkRenderPassCreateInfo2* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkRenderPass* pRenderPass); +typedef void (VKAPI_PTR *PFN_vkCmdBeginRenderPass2)(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo* pRenderPassBegin, const VkSubpassBeginInfo* pSubpassBeginInfo); +typedef void (VKAPI_PTR *PFN_vkCmdNextSubpass2)(VkCommandBuffer commandBuffer, const VkSubpassBeginInfo* pSubpassBeginInfo, const VkSubpassEndInfo* pSubpassEndInfo); +typedef void (VKAPI_PTR *PFN_vkCmdEndRenderPass2)(VkCommandBuffer commandBuffer, const VkSubpassEndInfo* pSubpassEndInfo); +typedef void (VKAPI_PTR *PFN_vkResetQueryPool)(VkDevice device, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount); +typedef VkResult (VKAPI_PTR *PFN_vkGetSemaphoreCounterValue)(VkDevice device, VkSemaphore semaphore, uint64_t* pValue); +typedef VkResult (VKAPI_PTR *PFN_vkWaitSemaphores)(VkDevice device, const VkSemaphoreWaitInfo* pWaitInfo, uint64_t timeout); +typedef VkResult (VKAPI_PTR *PFN_vkSignalSemaphore)(VkDevice device, const VkSemaphoreSignalInfo* pSignalInfo); +typedef VkDeviceAddress (VKAPI_PTR *PFN_vkGetBufferDeviceAddress)(VkDevice device, const VkBufferDeviceAddressInfo* pInfo); +typedef uint64_t (VKAPI_PTR *PFN_vkGetBufferOpaqueCaptureAddress)(VkDevice device, const VkBufferDeviceAddressInfo* pInfo); +typedef uint64_t (VKAPI_PTR *PFN_vkGetDeviceMemoryOpaqueCaptureAddress)(VkDevice device, const VkDeviceMemoryOpaqueCaptureAddressInfo* pInfo); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdDrawIndirectCount( + VkCommandBuffer commandBuffer, + VkBuffer buffer, + VkDeviceSize offset, + VkBuffer countBuffer, + VkDeviceSize countBufferOffset, + uint32_t maxDrawCount, + uint32_t stride); + +VKAPI_ATTR void VKAPI_CALL vkCmdDrawIndexedIndirectCount( + VkCommandBuffer commandBuffer, + VkBuffer buffer, + VkDeviceSize offset, + VkBuffer countBuffer, + VkDeviceSize countBufferOffset, + uint32_t maxDrawCount, + uint32_t stride); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateRenderPass2( + VkDevice device, + const VkRenderPassCreateInfo2* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkRenderPass* pRenderPass); + +VKAPI_ATTR void VKAPI_CALL vkCmdBeginRenderPass2( + VkCommandBuffer commandBuffer, + const VkRenderPassBeginInfo* pRenderPassBegin, + const VkSubpassBeginInfo* pSubpassBeginInfo); + +VKAPI_ATTR void VKAPI_CALL vkCmdNextSubpass2( + VkCommandBuffer commandBuffer, + const VkSubpassBeginInfo* pSubpassBeginInfo, + const VkSubpassEndInfo* pSubpassEndInfo); + +VKAPI_ATTR void VKAPI_CALL vkCmdEndRenderPass2( + VkCommandBuffer commandBuffer, + const VkSubpassEndInfo* pSubpassEndInfo); + +VKAPI_ATTR void VKAPI_CALL vkResetQueryPool( + VkDevice device, + VkQueryPool queryPool, + uint32_t firstQuery, + uint32_t queryCount); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetSemaphoreCounterValue( + VkDevice device, + VkSemaphore semaphore, + uint64_t* pValue); + +VKAPI_ATTR VkResult VKAPI_CALL vkWaitSemaphores( + VkDevice device, + const VkSemaphoreWaitInfo* pWaitInfo, + uint64_t timeout); + +VKAPI_ATTR VkResult VKAPI_CALL vkSignalSemaphore( + VkDevice device, + const VkSemaphoreSignalInfo* pSignalInfo); + +VKAPI_ATTR VkDeviceAddress VKAPI_CALL vkGetBufferDeviceAddress( + VkDevice device, + const VkBufferDeviceAddressInfo* pInfo); + +VKAPI_ATTR uint64_t VKAPI_CALL vkGetBufferOpaqueCaptureAddress( + VkDevice device, + const VkBufferDeviceAddressInfo* pInfo); + +VKAPI_ATTR uint64_t VKAPI_CALL vkGetDeviceMemoryOpaqueCaptureAddress( + VkDevice device, + const VkDeviceMemoryOpaqueCaptureAddressInfo* pInfo); +#endif + + +#define VK_KHR_surface 1 +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkSurfaceKHR) +#define VK_KHR_SURFACE_SPEC_VERSION 25 +#define VK_KHR_SURFACE_EXTENSION_NAME "VK_KHR_surface" + +typedef enum VkPresentModeKHR { + VK_PRESENT_MODE_IMMEDIATE_KHR = 0, + VK_PRESENT_MODE_MAILBOX_KHR = 1, + VK_PRESENT_MODE_FIFO_KHR = 2, + VK_PRESENT_MODE_FIFO_RELAXED_KHR = 3, + VK_PRESENT_MODE_SHARED_DEMAND_REFRESH_KHR = 1000111000, + VK_PRESENT_MODE_SHARED_CONTINUOUS_REFRESH_KHR = 1000111001, + VK_PRESENT_MODE_MAX_ENUM_KHR = 0x7FFFFFFF +} VkPresentModeKHR; + +typedef enum VkColorSpaceKHR { + VK_COLOR_SPACE_SRGB_NONLINEAR_KHR = 0, + VK_COLOR_SPACE_DISPLAY_P3_NONLINEAR_EXT = 1000104001, + VK_COLOR_SPACE_EXTENDED_SRGB_LINEAR_EXT = 1000104002, + VK_COLOR_SPACE_DISPLAY_P3_LINEAR_EXT = 1000104003, + VK_COLOR_SPACE_DCI_P3_NONLINEAR_EXT = 1000104004, + VK_COLOR_SPACE_BT709_LINEAR_EXT = 1000104005, + VK_COLOR_SPACE_BT709_NONLINEAR_EXT = 1000104006, + VK_COLOR_SPACE_BT2020_LINEAR_EXT = 1000104007, + VK_COLOR_SPACE_HDR10_ST2084_EXT = 1000104008, + VK_COLOR_SPACE_DOLBYVISION_EXT = 1000104009, + VK_COLOR_SPACE_HDR10_HLG_EXT = 1000104010, + VK_COLOR_SPACE_ADOBERGB_LINEAR_EXT = 1000104011, + VK_COLOR_SPACE_ADOBERGB_NONLINEAR_EXT = 1000104012, + VK_COLOR_SPACE_PASS_THROUGH_EXT = 1000104013, + VK_COLOR_SPACE_EXTENDED_SRGB_NONLINEAR_EXT = 1000104014, + VK_COLOR_SPACE_DISPLAY_NATIVE_AMD = 1000213000, + VK_COLORSPACE_SRGB_NONLINEAR_KHR = VK_COLOR_SPACE_SRGB_NONLINEAR_KHR, + VK_COLOR_SPACE_DCI_P3_LINEAR_EXT = VK_COLOR_SPACE_DISPLAY_P3_LINEAR_EXT, + VK_COLOR_SPACE_MAX_ENUM_KHR = 0x7FFFFFFF +} VkColorSpaceKHR; + +typedef enum VkSurfaceTransformFlagBitsKHR { + VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR = 0x00000001, + VK_SURFACE_TRANSFORM_ROTATE_90_BIT_KHR = 0x00000002, + VK_SURFACE_TRANSFORM_ROTATE_180_BIT_KHR = 0x00000004, + VK_SURFACE_TRANSFORM_ROTATE_270_BIT_KHR = 0x00000008, + VK_SURFACE_TRANSFORM_HORIZONTAL_MIRROR_BIT_KHR = 0x00000010, + VK_SURFACE_TRANSFORM_HORIZONTAL_MIRROR_ROTATE_90_BIT_KHR = 0x00000020, + VK_SURFACE_TRANSFORM_HORIZONTAL_MIRROR_ROTATE_180_BIT_KHR = 0x00000040, + VK_SURFACE_TRANSFORM_HORIZONTAL_MIRROR_ROTATE_270_BIT_KHR = 0x00000080, + VK_SURFACE_TRANSFORM_INHERIT_BIT_KHR = 0x00000100, + VK_SURFACE_TRANSFORM_FLAG_BITS_MAX_ENUM_KHR = 0x7FFFFFFF +} VkSurfaceTransformFlagBitsKHR; + +typedef enum VkCompositeAlphaFlagBitsKHR { + VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR = 0x00000001, + VK_COMPOSITE_ALPHA_PRE_MULTIPLIED_BIT_KHR = 0x00000002, + VK_COMPOSITE_ALPHA_POST_MULTIPLIED_BIT_KHR = 0x00000004, + VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR = 0x00000008, + VK_COMPOSITE_ALPHA_FLAG_BITS_MAX_ENUM_KHR = 0x7FFFFFFF +} VkCompositeAlphaFlagBitsKHR; +typedef VkFlags VkSurfaceTransformFlagsKHR; +typedef VkFlags VkCompositeAlphaFlagsKHR; +typedef struct VkSurfaceCapabilitiesKHR { + uint32_t minImageCount; + uint32_t maxImageCount; + VkExtent2D currentExtent; + VkExtent2D minImageExtent; + VkExtent2D maxImageExtent; + uint32_t maxImageArrayLayers; + VkSurfaceTransformFlagsKHR supportedTransforms; + VkSurfaceTransformFlagBitsKHR currentTransform; + VkCompositeAlphaFlagsKHR supportedCompositeAlpha; + VkImageUsageFlags supportedUsageFlags; +} VkSurfaceCapabilitiesKHR; + +typedef struct VkSurfaceFormatKHR { + VkFormat format; + VkColorSpaceKHR colorSpace; +} VkSurfaceFormatKHR; + +typedef void (VKAPI_PTR *PFN_vkDestroySurfaceKHR)(VkInstance instance, VkSurfaceKHR surface, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceSurfaceSupportKHR)(VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex, VkSurfaceKHR surface, VkBool32* pSupported); +typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceSurfaceCapabilitiesKHR)(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface, VkSurfaceCapabilitiesKHR* pSurfaceCapabilities); +typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceSurfaceFormatsKHR)(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface, uint32_t* pSurfaceFormatCount, VkSurfaceFormatKHR* pSurfaceFormats); +typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceSurfacePresentModesKHR)(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface, uint32_t* pPresentModeCount, VkPresentModeKHR* pPresentModes); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkDestroySurfaceKHR( + VkInstance instance, + VkSurfaceKHR surface, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceSurfaceSupportKHR( + VkPhysicalDevice physicalDevice, + uint32_t queueFamilyIndex, + VkSurfaceKHR surface, + VkBool32* pSupported); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceSurfaceCapabilitiesKHR( + VkPhysicalDevice physicalDevice, + VkSurfaceKHR surface, + VkSurfaceCapabilitiesKHR* pSurfaceCapabilities); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceSurfaceFormatsKHR( + VkPhysicalDevice physicalDevice, + VkSurfaceKHR surface, + uint32_t* pSurfaceFormatCount, + VkSurfaceFormatKHR* pSurfaceFormats); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceSurfacePresentModesKHR( + VkPhysicalDevice physicalDevice, + VkSurfaceKHR surface, + uint32_t* pPresentModeCount, + VkPresentModeKHR* pPresentModes); +#endif + + +#define VK_KHR_swapchain 1 +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkSwapchainKHR) +#define VK_KHR_SWAPCHAIN_SPEC_VERSION 70 +#define VK_KHR_SWAPCHAIN_EXTENSION_NAME "VK_KHR_swapchain" + +typedef enum VkSwapchainCreateFlagBitsKHR { + VK_SWAPCHAIN_CREATE_SPLIT_INSTANCE_BIND_REGIONS_BIT_KHR = 0x00000001, + VK_SWAPCHAIN_CREATE_PROTECTED_BIT_KHR = 0x00000002, + VK_SWAPCHAIN_CREATE_MUTABLE_FORMAT_BIT_KHR = 0x00000004, + VK_SWAPCHAIN_CREATE_FLAG_BITS_MAX_ENUM_KHR = 0x7FFFFFFF +} VkSwapchainCreateFlagBitsKHR; +typedef VkFlags VkSwapchainCreateFlagsKHR; + +typedef enum VkDeviceGroupPresentModeFlagBitsKHR { + VK_DEVICE_GROUP_PRESENT_MODE_LOCAL_BIT_KHR = 0x00000001, + VK_DEVICE_GROUP_PRESENT_MODE_REMOTE_BIT_KHR = 0x00000002, + VK_DEVICE_GROUP_PRESENT_MODE_SUM_BIT_KHR = 0x00000004, + VK_DEVICE_GROUP_PRESENT_MODE_LOCAL_MULTI_DEVICE_BIT_KHR = 0x00000008, + VK_DEVICE_GROUP_PRESENT_MODE_FLAG_BITS_MAX_ENUM_KHR = 0x7FFFFFFF +} VkDeviceGroupPresentModeFlagBitsKHR; +typedef VkFlags VkDeviceGroupPresentModeFlagsKHR; +typedef struct VkSwapchainCreateInfoKHR { + VkStructureType sType; + const void* pNext; + VkSwapchainCreateFlagsKHR flags; + VkSurfaceKHR surface; + uint32_t minImageCount; + VkFormat imageFormat; + VkColorSpaceKHR imageColorSpace; + VkExtent2D imageExtent; + uint32_t imageArrayLayers; + VkImageUsageFlags imageUsage; + VkSharingMode imageSharingMode; + uint32_t queueFamilyIndexCount; + const uint32_t* pQueueFamilyIndices; + VkSurfaceTransformFlagBitsKHR preTransform; + VkCompositeAlphaFlagBitsKHR compositeAlpha; + VkPresentModeKHR presentMode; + VkBool32 clipped; + VkSwapchainKHR oldSwapchain; +} VkSwapchainCreateInfoKHR; + +typedef struct VkPresentInfoKHR { + VkStructureType sType; + const void* pNext; + uint32_t waitSemaphoreCount; + const VkSemaphore* pWaitSemaphores; + uint32_t swapchainCount; + const VkSwapchainKHR* pSwapchains; + const uint32_t* pImageIndices; + VkResult* pResults; +} VkPresentInfoKHR; + +typedef struct VkImageSwapchainCreateInfoKHR { + VkStructureType sType; + const void* pNext; + VkSwapchainKHR swapchain; +} VkImageSwapchainCreateInfoKHR; + +typedef struct VkBindImageMemorySwapchainInfoKHR { + VkStructureType sType; + const void* pNext; + VkSwapchainKHR swapchain; + uint32_t imageIndex; +} VkBindImageMemorySwapchainInfoKHR; + +typedef struct VkAcquireNextImageInfoKHR { + VkStructureType sType; + const void* pNext; + VkSwapchainKHR swapchain; + uint64_t timeout; + VkSemaphore semaphore; + VkFence fence; + uint32_t deviceMask; +} VkAcquireNextImageInfoKHR; + +typedef struct VkDeviceGroupPresentCapabilitiesKHR { + VkStructureType sType; + const void* pNext; + uint32_t presentMask[VK_MAX_DEVICE_GROUP_SIZE]; + VkDeviceGroupPresentModeFlagsKHR modes; +} VkDeviceGroupPresentCapabilitiesKHR; + +typedef struct VkDeviceGroupPresentInfoKHR { + VkStructureType sType; + const void* pNext; + uint32_t swapchainCount; + const uint32_t* pDeviceMasks; + VkDeviceGroupPresentModeFlagBitsKHR mode; +} VkDeviceGroupPresentInfoKHR; + +typedef struct VkDeviceGroupSwapchainCreateInfoKHR { + VkStructureType sType; + const void* pNext; + VkDeviceGroupPresentModeFlagsKHR modes; +} VkDeviceGroupSwapchainCreateInfoKHR; + +typedef VkResult (VKAPI_PTR *PFN_vkCreateSwapchainKHR)(VkDevice device, const VkSwapchainCreateInfoKHR* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSwapchainKHR* pSwapchain); +typedef void (VKAPI_PTR *PFN_vkDestroySwapchainKHR)(VkDevice device, VkSwapchainKHR swapchain, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkGetSwapchainImagesKHR)(VkDevice device, VkSwapchainKHR swapchain, uint32_t* pSwapchainImageCount, VkImage* pSwapchainImages); +typedef VkResult (VKAPI_PTR *PFN_vkAcquireNextImageKHR)(VkDevice device, VkSwapchainKHR swapchain, uint64_t timeout, VkSemaphore semaphore, VkFence fence, uint32_t* pImageIndex); +typedef VkResult (VKAPI_PTR *PFN_vkQueuePresentKHR)(VkQueue queue, const VkPresentInfoKHR* pPresentInfo); +typedef VkResult (VKAPI_PTR *PFN_vkGetDeviceGroupPresentCapabilitiesKHR)(VkDevice device, VkDeviceGroupPresentCapabilitiesKHR* pDeviceGroupPresentCapabilities); +typedef VkResult (VKAPI_PTR *PFN_vkGetDeviceGroupSurfacePresentModesKHR)(VkDevice device, VkSurfaceKHR surface, VkDeviceGroupPresentModeFlagsKHR* pModes); +typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDevicePresentRectanglesKHR)(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface, uint32_t* pRectCount, VkRect2D* pRects); +typedef VkResult (VKAPI_PTR *PFN_vkAcquireNextImage2KHR)(VkDevice device, const VkAcquireNextImageInfoKHR* pAcquireInfo, uint32_t* pImageIndex); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkCreateSwapchainKHR( + VkDevice device, + const VkSwapchainCreateInfoKHR* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkSwapchainKHR* pSwapchain); + +VKAPI_ATTR void VKAPI_CALL vkDestroySwapchainKHR( + VkDevice device, + VkSwapchainKHR swapchain, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetSwapchainImagesKHR( + VkDevice device, + VkSwapchainKHR swapchain, + uint32_t* pSwapchainImageCount, + VkImage* pSwapchainImages); + +VKAPI_ATTR VkResult VKAPI_CALL vkAcquireNextImageKHR( + VkDevice device, + VkSwapchainKHR swapchain, + uint64_t timeout, + VkSemaphore semaphore, + VkFence fence, + uint32_t* pImageIndex); + +VKAPI_ATTR VkResult VKAPI_CALL vkQueuePresentKHR( + VkQueue queue, + const VkPresentInfoKHR* pPresentInfo); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetDeviceGroupPresentCapabilitiesKHR( + VkDevice device, + VkDeviceGroupPresentCapabilitiesKHR* pDeviceGroupPresentCapabilities); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetDeviceGroupSurfacePresentModesKHR( + VkDevice device, + VkSurfaceKHR surface, + VkDeviceGroupPresentModeFlagsKHR* pModes); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDevicePresentRectanglesKHR( + VkPhysicalDevice physicalDevice, + VkSurfaceKHR surface, + uint32_t* pRectCount, + VkRect2D* pRects); + +VKAPI_ATTR VkResult VKAPI_CALL vkAcquireNextImage2KHR( + VkDevice device, + const VkAcquireNextImageInfoKHR* pAcquireInfo, + uint32_t* pImageIndex); +#endif + + +#define VK_KHR_display 1 +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkDisplayKHR) +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkDisplayModeKHR) +#define VK_KHR_DISPLAY_SPEC_VERSION 23 +#define VK_KHR_DISPLAY_EXTENSION_NAME "VK_KHR_display" + +typedef enum VkDisplayPlaneAlphaFlagBitsKHR { + VK_DISPLAY_PLANE_ALPHA_OPAQUE_BIT_KHR = 0x00000001, + VK_DISPLAY_PLANE_ALPHA_GLOBAL_BIT_KHR = 0x00000002, + VK_DISPLAY_PLANE_ALPHA_PER_PIXEL_BIT_KHR = 0x00000004, + VK_DISPLAY_PLANE_ALPHA_PER_PIXEL_PREMULTIPLIED_BIT_KHR = 0x00000008, + VK_DISPLAY_PLANE_ALPHA_FLAG_BITS_MAX_ENUM_KHR = 0x7FFFFFFF +} VkDisplayPlaneAlphaFlagBitsKHR; +typedef VkFlags VkDisplayPlaneAlphaFlagsKHR; +typedef VkFlags VkDisplayModeCreateFlagsKHR; +typedef VkFlags VkDisplaySurfaceCreateFlagsKHR; +typedef struct VkDisplayPropertiesKHR { + VkDisplayKHR display; + const char* displayName; + VkExtent2D physicalDimensions; + VkExtent2D physicalResolution; + VkSurfaceTransformFlagsKHR supportedTransforms; + VkBool32 planeReorderPossible; + VkBool32 persistentContent; +} VkDisplayPropertiesKHR; + +typedef struct VkDisplayModeParametersKHR { + VkExtent2D visibleRegion; + uint32_t refreshRate; +} VkDisplayModeParametersKHR; + +typedef struct VkDisplayModePropertiesKHR { + VkDisplayModeKHR displayMode; + VkDisplayModeParametersKHR parameters; +} VkDisplayModePropertiesKHR; + +typedef struct VkDisplayModeCreateInfoKHR { + VkStructureType sType; + const void* pNext; + VkDisplayModeCreateFlagsKHR flags; + VkDisplayModeParametersKHR parameters; +} VkDisplayModeCreateInfoKHR; + +typedef struct VkDisplayPlaneCapabilitiesKHR { + VkDisplayPlaneAlphaFlagsKHR supportedAlpha; + VkOffset2D minSrcPosition; + VkOffset2D maxSrcPosition; + VkExtent2D minSrcExtent; + VkExtent2D maxSrcExtent; + VkOffset2D minDstPosition; + VkOffset2D maxDstPosition; + VkExtent2D minDstExtent; + VkExtent2D maxDstExtent; +} VkDisplayPlaneCapabilitiesKHR; + +typedef struct VkDisplayPlanePropertiesKHR { + VkDisplayKHR currentDisplay; + uint32_t currentStackIndex; +} VkDisplayPlanePropertiesKHR; + +typedef struct VkDisplaySurfaceCreateInfoKHR { + VkStructureType sType; + const void* pNext; + VkDisplaySurfaceCreateFlagsKHR flags; + VkDisplayModeKHR displayMode; + uint32_t planeIndex; + uint32_t planeStackIndex; + VkSurfaceTransformFlagBitsKHR transform; + float globalAlpha; + VkDisplayPlaneAlphaFlagBitsKHR alphaMode; + VkExtent2D imageExtent; +} VkDisplaySurfaceCreateInfoKHR; + +typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceDisplayPropertiesKHR)(VkPhysicalDevice physicalDevice, uint32_t* pPropertyCount, VkDisplayPropertiesKHR* pProperties); +typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceDisplayPlanePropertiesKHR)(VkPhysicalDevice physicalDevice, uint32_t* pPropertyCount, VkDisplayPlanePropertiesKHR* pProperties); +typedef VkResult (VKAPI_PTR *PFN_vkGetDisplayPlaneSupportedDisplaysKHR)(VkPhysicalDevice physicalDevice, uint32_t planeIndex, uint32_t* pDisplayCount, VkDisplayKHR* pDisplays); +typedef VkResult (VKAPI_PTR *PFN_vkGetDisplayModePropertiesKHR)(VkPhysicalDevice physicalDevice, VkDisplayKHR display, uint32_t* pPropertyCount, VkDisplayModePropertiesKHR* pProperties); +typedef VkResult (VKAPI_PTR *PFN_vkCreateDisplayModeKHR)(VkPhysicalDevice physicalDevice, VkDisplayKHR display, const VkDisplayModeCreateInfoKHR* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkDisplayModeKHR* pMode); +typedef VkResult (VKAPI_PTR *PFN_vkGetDisplayPlaneCapabilitiesKHR)(VkPhysicalDevice physicalDevice, VkDisplayModeKHR mode, uint32_t planeIndex, VkDisplayPlaneCapabilitiesKHR* pCapabilities); +typedef VkResult (VKAPI_PTR *PFN_vkCreateDisplayPlaneSurfaceKHR)(VkInstance instance, const VkDisplaySurfaceCreateInfoKHR* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceDisplayPropertiesKHR( + VkPhysicalDevice physicalDevice, + uint32_t* pPropertyCount, + VkDisplayPropertiesKHR* pProperties); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceDisplayPlanePropertiesKHR( + VkPhysicalDevice physicalDevice, + uint32_t* pPropertyCount, + VkDisplayPlanePropertiesKHR* pProperties); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetDisplayPlaneSupportedDisplaysKHR( + VkPhysicalDevice physicalDevice, + uint32_t planeIndex, + uint32_t* pDisplayCount, + VkDisplayKHR* pDisplays); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetDisplayModePropertiesKHR( + VkPhysicalDevice physicalDevice, + VkDisplayKHR display, + uint32_t* pPropertyCount, + VkDisplayModePropertiesKHR* pProperties); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateDisplayModeKHR( + VkPhysicalDevice physicalDevice, + VkDisplayKHR display, + const VkDisplayModeCreateInfoKHR* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkDisplayModeKHR* pMode); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetDisplayPlaneCapabilitiesKHR( + VkPhysicalDevice physicalDevice, + VkDisplayModeKHR mode, + uint32_t planeIndex, + VkDisplayPlaneCapabilitiesKHR* pCapabilities); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateDisplayPlaneSurfaceKHR( + VkInstance instance, + const VkDisplaySurfaceCreateInfoKHR* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkSurfaceKHR* pSurface); +#endif + + +#define VK_KHR_display_swapchain 1 +#define VK_KHR_DISPLAY_SWAPCHAIN_SPEC_VERSION 10 +#define VK_KHR_DISPLAY_SWAPCHAIN_EXTENSION_NAME "VK_KHR_display_swapchain" +typedef struct VkDisplayPresentInfoKHR { + VkStructureType sType; + const void* pNext; + VkRect2D srcRect; + VkRect2D dstRect; + VkBool32 persistent; +} VkDisplayPresentInfoKHR; + +typedef VkResult (VKAPI_PTR *PFN_vkCreateSharedSwapchainsKHR)(VkDevice device, uint32_t swapchainCount, const VkSwapchainCreateInfoKHR* pCreateInfos, const VkAllocationCallbacks* pAllocator, VkSwapchainKHR* pSwapchains); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkCreateSharedSwapchainsKHR( + VkDevice device, + uint32_t swapchainCount, + const VkSwapchainCreateInfoKHR* pCreateInfos, + const VkAllocationCallbacks* pAllocator, + VkSwapchainKHR* pSwapchains); +#endif + + +#define VK_KHR_sampler_mirror_clamp_to_edge 1 +#define VK_KHR_SAMPLER_MIRROR_CLAMP_TO_EDGE_SPEC_VERSION 3 +#define VK_KHR_SAMPLER_MIRROR_CLAMP_TO_EDGE_EXTENSION_NAME "VK_KHR_sampler_mirror_clamp_to_edge" + + +#define VK_KHR_multiview 1 +#define VK_KHR_MULTIVIEW_SPEC_VERSION 1 +#define VK_KHR_MULTIVIEW_EXTENSION_NAME "VK_KHR_multiview" +typedef VkRenderPassMultiviewCreateInfo VkRenderPassMultiviewCreateInfoKHR; + +typedef VkPhysicalDeviceMultiviewFeatures VkPhysicalDeviceMultiviewFeaturesKHR; + +typedef VkPhysicalDeviceMultiviewProperties VkPhysicalDeviceMultiviewPropertiesKHR; + + + +#define VK_KHR_get_physical_device_properties2 1 +#define VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_SPEC_VERSION 2 +#define VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME "VK_KHR_get_physical_device_properties2" +typedef VkPhysicalDeviceFeatures2 VkPhysicalDeviceFeatures2KHR; + +typedef VkPhysicalDeviceProperties2 VkPhysicalDeviceProperties2KHR; + +typedef VkFormatProperties2 VkFormatProperties2KHR; + +typedef VkImageFormatProperties2 VkImageFormatProperties2KHR; + +typedef VkPhysicalDeviceImageFormatInfo2 VkPhysicalDeviceImageFormatInfo2KHR; + +typedef VkQueueFamilyProperties2 VkQueueFamilyProperties2KHR; + +typedef VkPhysicalDeviceMemoryProperties2 VkPhysicalDeviceMemoryProperties2KHR; + +typedef VkSparseImageFormatProperties2 VkSparseImageFormatProperties2KHR; + +typedef VkPhysicalDeviceSparseImageFormatInfo2 VkPhysicalDeviceSparseImageFormatInfo2KHR; + +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceFeatures2KHR)(VkPhysicalDevice physicalDevice, VkPhysicalDeviceFeatures2* pFeatures); +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceProperties2KHR)(VkPhysicalDevice physicalDevice, VkPhysicalDeviceProperties2* pProperties); +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceFormatProperties2KHR)(VkPhysicalDevice physicalDevice, VkFormat format, VkFormatProperties2* pFormatProperties); +typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceImageFormatProperties2KHR)(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceImageFormatInfo2* pImageFormatInfo, VkImageFormatProperties2* pImageFormatProperties); +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceQueueFamilyProperties2KHR)(VkPhysicalDevice physicalDevice, uint32_t* pQueueFamilyPropertyCount, VkQueueFamilyProperties2* pQueueFamilyProperties); +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceMemoryProperties2KHR)(VkPhysicalDevice physicalDevice, VkPhysicalDeviceMemoryProperties2* pMemoryProperties); +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceSparseImageFormatProperties2KHR)(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceSparseImageFormatInfo2* pFormatInfo, uint32_t* pPropertyCount, VkSparseImageFormatProperties2* pProperties); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceFeatures2KHR( + VkPhysicalDevice physicalDevice, + VkPhysicalDeviceFeatures2* pFeatures); + +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceProperties2KHR( + VkPhysicalDevice physicalDevice, + VkPhysicalDeviceProperties2* pProperties); + +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceFormatProperties2KHR( + VkPhysicalDevice physicalDevice, + VkFormat format, + VkFormatProperties2* pFormatProperties); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceImageFormatProperties2KHR( + VkPhysicalDevice physicalDevice, + const VkPhysicalDeviceImageFormatInfo2* pImageFormatInfo, + VkImageFormatProperties2* pImageFormatProperties); + +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceQueueFamilyProperties2KHR( + VkPhysicalDevice physicalDevice, + uint32_t* pQueueFamilyPropertyCount, + VkQueueFamilyProperties2* pQueueFamilyProperties); + +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceMemoryProperties2KHR( + VkPhysicalDevice physicalDevice, + VkPhysicalDeviceMemoryProperties2* pMemoryProperties); + +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceSparseImageFormatProperties2KHR( + VkPhysicalDevice physicalDevice, + const VkPhysicalDeviceSparseImageFormatInfo2* pFormatInfo, + uint32_t* pPropertyCount, + VkSparseImageFormatProperties2* pProperties); +#endif + + +#define VK_KHR_device_group 1 +#define VK_KHR_DEVICE_GROUP_SPEC_VERSION 4 +#define VK_KHR_DEVICE_GROUP_EXTENSION_NAME "VK_KHR_device_group" +typedef VkPeerMemoryFeatureFlags VkPeerMemoryFeatureFlagsKHR; + +typedef VkPeerMemoryFeatureFlagBits VkPeerMemoryFeatureFlagBitsKHR; + +typedef VkMemoryAllocateFlags VkMemoryAllocateFlagsKHR; + +typedef VkMemoryAllocateFlagBits VkMemoryAllocateFlagBitsKHR; + +typedef VkMemoryAllocateFlagsInfo VkMemoryAllocateFlagsInfoKHR; + +typedef VkDeviceGroupRenderPassBeginInfo VkDeviceGroupRenderPassBeginInfoKHR; + +typedef VkDeviceGroupCommandBufferBeginInfo VkDeviceGroupCommandBufferBeginInfoKHR; + +typedef VkDeviceGroupSubmitInfo VkDeviceGroupSubmitInfoKHR; + +typedef VkDeviceGroupBindSparseInfo VkDeviceGroupBindSparseInfoKHR; + +typedef VkBindBufferMemoryDeviceGroupInfo VkBindBufferMemoryDeviceGroupInfoKHR; + +typedef VkBindImageMemoryDeviceGroupInfo VkBindImageMemoryDeviceGroupInfoKHR; + +typedef void (VKAPI_PTR *PFN_vkGetDeviceGroupPeerMemoryFeaturesKHR)(VkDevice device, uint32_t heapIndex, uint32_t localDeviceIndex, uint32_t remoteDeviceIndex, VkPeerMemoryFeatureFlags* pPeerMemoryFeatures); +typedef void (VKAPI_PTR *PFN_vkCmdSetDeviceMaskKHR)(VkCommandBuffer commandBuffer, uint32_t deviceMask); +typedef void (VKAPI_PTR *PFN_vkCmdDispatchBaseKHR)(VkCommandBuffer commandBuffer, uint32_t baseGroupX, uint32_t baseGroupY, uint32_t baseGroupZ, uint32_t groupCountX, uint32_t groupCountY, uint32_t groupCountZ); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkGetDeviceGroupPeerMemoryFeaturesKHR( + VkDevice device, + uint32_t heapIndex, + uint32_t localDeviceIndex, + uint32_t remoteDeviceIndex, + VkPeerMemoryFeatureFlags* pPeerMemoryFeatures); + +VKAPI_ATTR void VKAPI_CALL vkCmdSetDeviceMaskKHR( + VkCommandBuffer commandBuffer, + uint32_t deviceMask); + +VKAPI_ATTR void VKAPI_CALL vkCmdDispatchBaseKHR( + VkCommandBuffer commandBuffer, + uint32_t baseGroupX, + uint32_t baseGroupY, + uint32_t baseGroupZ, + uint32_t groupCountX, + uint32_t groupCountY, + uint32_t groupCountZ); +#endif + + +#define VK_KHR_shader_draw_parameters 1 +#define VK_KHR_SHADER_DRAW_PARAMETERS_SPEC_VERSION 1 +#define VK_KHR_SHADER_DRAW_PARAMETERS_EXTENSION_NAME "VK_KHR_shader_draw_parameters" + + +#define VK_KHR_maintenance1 1 +#define VK_KHR_MAINTENANCE1_SPEC_VERSION 2 +#define VK_KHR_MAINTENANCE1_EXTENSION_NAME "VK_KHR_maintenance1" +typedef VkCommandPoolTrimFlags VkCommandPoolTrimFlagsKHR; + +typedef void (VKAPI_PTR *PFN_vkTrimCommandPoolKHR)(VkDevice device, VkCommandPool commandPool, VkCommandPoolTrimFlags flags); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkTrimCommandPoolKHR( + VkDevice device, + VkCommandPool commandPool, + VkCommandPoolTrimFlags flags); +#endif + + +#define VK_KHR_device_group_creation 1 +#define VK_KHR_DEVICE_GROUP_CREATION_SPEC_VERSION 1 +#define VK_KHR_DEVICE_GROUP_CREATION_EXTENSION_NAME "VK_KHR_device_group_creation" +#define VK_MAX_DEVICE_GROUP_SIZE_KHR VK_MAX_DEVICE_GROUP_SIZE +typedef VkPhysicalDeviceGroupProperties VkPhysicalDeviceGroupPropertiesKHR; + +typedef VkDeviceGroupDeviceCreateInfo VkDeviceGroupDeviceCreateInfoKHR; + +typedef VkResult (VKAPI_PTR *PFN_vkEnumeratePhysicalDeviceGroupsKHR)(VkInstance instance, uint32_t* pPhysicalDeviceGroupCount, VkPhysicalDeviceGroupProperties* pPhysicalDeviceGroupProperties); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkEnumeratePhysicalDeviceGroupsKHR( + VkInstance instance, + uint32_t* pPhysicalDeviceGroupCount, + VkPhysicalDeviceGroupProperties* pPhysicalDeviceGroupProperties); +#endif + + +#define VK_KHR_external_memory_capabilities 1 +#define VK_KHR_EXTERNAL_MEMORY_CAPABILITIES_SPEC_VERSION 1 +#define VK_KHR_EXTERNAL_MEMORY_CAPABILITIES_EXTENSION_NAME "VK_KHR_external_memory_capabilities" +#define VK_LUID_SIZE_KHR VK_LUID_SIZE +typedef VkExternalMemoryHandleTypeFlags VkExternalMemoryHandleTypeFlagsKHR; + +typedef VkExternalMemoryHandleTypeFlagBits VkExternalMemoryHandleTypeFlagBitsKHR; + +typedef VkExternalMemoryFeatureFlags VkExternalMemoryFeatureFlagsKHR; + +typedef VkExternalMemoryFeatureFlagBits VkExternalMemoryFeatureFlagBitsKHR; + +typedef VkExternalMemoryProperties VkExternalMemoryPropertiesKHR; + +typedef VkPhysicalDeviceExternalImageFormatInfo VkPhysicalDeviceExternalImageFormatInfoKHR; + +typedef VkExternalImageFormatProperties VkExternalImageFormatPropertiesKHR; + +typedef VkPhysicalDeviceExternalBufferInfo VkPhysicalDeviceExternalBufferInfoKHR; + +typedef VkExternalBufferProperties VkExternalBufferPropertiesKHR; + +typedef VkPhysicalDeviceIDProperties VkPhysicalDeviceIDPropertiesKHR; + +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceExternalBufferPropertiesKHR)(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceExternalBufferInfo* pExternalBufferInfo, VkExternalBufferProperties* pExternalBufferProperties); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceExternalBufferPropertiesKHR( + VkPhysicalDevice physicalDevice, + const VkPhysicalDeviceExternalBufferInfo* pExternalBufferInfo, + VkExternalBufferProperties* pExternalBufferProperties); +#endif + + +#define VK_KHR_external_memory 1 +#define VK_KHR_EXTERNAL_MEMORY_SPEC_VERSION 1 +#define VK_KHR_EXTERNAL_MEMORY_EXTENSION_NAME "VK_KHR_external_memory" +#define VK_QUEUE_FAMILY_EXTERNAL_KHR VK_QUEUE_FAMILY_EXTERNAL +typedef VkExternalMemoryImageCreateInfo VkExternalMemoryImageCreateInfoKHR; + +typedef VkExternalMemoryBufferCreateInfo VkExternalMemoryBufferCreateInfoKHR; + +typedef VkExportMemoryAllocateInfo VkExportMemoryAllocateInfoKHR; + + + +#define VK_KHR_external_memory_fd 1 +#define VK_KHR_EXTERNAL_MEMORY_FD_SPEC_VERSION 1 +#define VK_KHR_EXTERNAL_MEMORY_FD_EXTENSION_NAME "VK_KHR_external_memory_fd" +typedef struct VkImportMemoryFdInfoKHR { + VkStructureType sType; + const void* pNext; + VkExternalMemoryHandleTypeFlagBits handleType; + int fd; +} VkImportMemoryFdInfoKHR; + +typedef struct VkMemoryFdPropertiesKHR { + VkStructureType sType; + void* pNext; + uint32_t memoryTypeBits; +} VkMemoryFdPropertiesKHR; + +typedef struct VkMemoryGetFdInfoKHR { + VkStructureType sType; + const void* pNext; + VkDeviceMemory memory; + VkExternalMemoryHandleTypeFlagBits handleType; +} VkMemoryGetFdInfoKHR; + +typedef VkResult (VKAPI_PTR *PFN_vkGetMemoryFdKHR)(VkDevice device, const VkMemoryGetFdInfoKHR* pGetFdInfo, int* pFd); +typedef VkResult (VKAPI_PTR *PFN_vkGetMemoryFdPropertiesKHR)(VkDevice device, VkExternalMemoryHandleTypeFlagBits handleType, int fd, VkMemoryFdPropertiesKHR* pMemoryFdProperties); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetMemoryFdKHR( + VkDevice device, + const VkMemoryGetFdInfoKHR* pGetFdInfo, + int* pFd); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetMemoryFdPropertiesKHR( + VkDevice device, + VkExternalMemoryHandleTypeFlagBits handleType, + int fd, + VkMemoryFdPropertiesKHR* pMemoryFdProperties); +#endif + + +#define VK_KHR_external_semaphore_capabilities 1 +#define VK_KHR_EXTERNAL_SEMAPHORE_CAPABILITIES_SPEC_VERSION 1 +#define VK_KHR_EXTERNAL_SEMAPHORE_CAPABILITIES_EXTENSION_NAME "VK_KHR_external_semaphore_capabilities" +typedef VkExternalSemaphoreHandleTypeFlags VkExternalSemaphoreHandleTypeFlagsKHR; + +typedef VkExternalSemaphoreHandleTypeFlagBits VkExternalSemaphoreHandleTypeFlagBitsKHR; + +typedef VkExternalSemaphoreFeatureFlags VkExternalSemaphoreFeatureFlagsKHR; + +typedef VkExternalSemaphoreFeatureFlagBits VkExternalSemaphoreFeatureFlagBitsKHR; + +typedef VkPhysicalDeviceExternalSemaphoreInfo VkPhysicalDeviceExternalSemaphoreInfoKHR; + +typedef VkExternalSemaphoreProperties VkExternalSemaphorePropertiesKHR; + +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceExternalSemaphorePropertiesKHR)(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceExternalSemaphoreInfo* pExternalSemaphoreInfo, VkExternalSemaphoreProperties* pExternalSemaphoreProperties); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceExternalSemaphorePropertiesKHR( + VkPhysicalDevice physicalDevice, + const VkPhysicalDeviceExternalSemaphoreInfo* pExternalSemaphoreInfo, + VkExternalSemaphoreProperties* pExternalSemaphoreProperties); +#endif + + +#define VK_KHR_external_semaphore 1 +#define VK_KHR_EXTERNAL_SEMAPHORE_SPEC_VERSION 1 +#define VK_KHR_EXTERNAL_SEMAPHORE_EXTENSION_NAME "VK_KHR_external_semaphore" +typedef VkSemaphoreImportFlags VkSemaphoreImportFlagsKHR; + +typedef VkSemaphoreImportFlagBits VkSemaphoreImportFlagBitsKHR; + +typedef VkExportSemaphoreCreateInfo VkExportSemaphoreCreateInfoKHR; + + + +#define VK_KHR_external_semaphore_fd 1 +#define VK_KHR_EXTERNAL_SEMAPHORE_FD_SPEC_VERSION 1 +#define VK_KHR_EXTERNAL_SEMAPHORE_FD_EXTENSION_NAME "VK_KHR_external_semaphore_fd" +typedef struct VkImportSemaphoreFdInfoKHR { + VkStructureType sType; + const void* pNext; + VkSemaphore semaphore; + VkSemaphoreImportFlags flags; + VkExternalSemaphoreHandleTypeFlagBits handleType; + int fd; +} VkImportSemaphoreFdInfoKHR; + +typedef struct VkSemaphoreGetFdInfoKHR { + VkStructureType sType; + const void* pNext; + VkSemaphore semaphore; + VkExternalSemaphoreHandleTypeFlagBits handleType; +} VkSemaphoreGetFdInfoKHR; + +typedef VkResult (VKAPI_PTR *PFN_vkImportSemaphoreFdKHR)(VkDevice device, const VkImportSemaphoreFdInfoKHR* pImportSemaphoreFdInfo); +typedef VkResult (VKAPI_PTR *PFN_vkGetSemaphoreFdKHR)(VkDevice device, const VkSemaphoreGetFdInfoKHR* pGetFdInfo, int* pFd); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkImportSemaphoreFdKHR( + VkDevice device, + const VkImportSemaphoreFdInfoKHR* pImportSemaphoreFdInfo); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetSemaphoreFdKHR( + VkDevice device, + const VkSemaphoreGetFdInfoKHR* pGetFdInfo, + int* pFd); +#endif + + +#define VK_KHR_push_descriptor 1 +#define VK_KHR_PUSH_DESCRIPTOR_SPEC_VERSION 2 +#define VK_KHR_PUSH_DESCRIPTOR_EXTENSION_NAME "VK_KHR_push_descriptor" +typedef struct VkPhysicalDevicePushDescriptorPropertiesKHR { + VkStructureType sType; + void* pNext; + uint32_t maxPushDescriptors; +} VkPhysicalDevicePushDescriptorPropertiesKHR; + +typedef void (VKAPI_PTR *PFN_vkCmdPushDescriptorSetKHR)(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipelineLayout layout, uint32_t set, uint32_t descriptorWriteCount, const VkWriteDescriptorSet* pDescriptorWrites); +typedef void (VKAPI_PTR *PFN_vkCmdPushDescriptorSetWithTemplateKHR)(VkCommandBuffer commandBuffer, VkDescriptorUpdateTemplate descriptorUpdateTemplate, VkPipelineLayout layout, uint32_t set, const void* pData); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdPushDescriptorSetKHR( + VkCommandBuffer commandBuffer, + VkPipelineBindPoint pipelineBindPoint, + VkPipelineLayout layout, + uint32_t set, + uint32_t descriptorWriteCount, + const VkWriteDescriptorSet* pDescriptorWrites); + +VKAPI_ATTR void VKAPI_CALL vkCmdPushDescriptorSetWithTemplateKHR( + VkCommandBuffer commandBuffer, + VkDescriptorUpdateTemplate descriptorUpdateTemplate, + VkPipelineLayout layout, + uint32_t set, + const void* pData); +#endif + + +#define VK_KHR_shader_float16_int8 1 +#define VK_KHR_SHADER_FLOAT16_INT8_SPEC_VERSION 1 +#define VK_KHR_SHADER_FLOAT16_INT8_EXTENSION_NAME "VK_KHR_shader_float16_int8" +typedef VkPhysicalDeviceShaderFloat16Int8Features VkPhysicalDeviceShaderFloat16Int8FeaturesKHR; + +typedef VkPhysicalDeviceShaderFloat16Int8Features VkPhysicalDeviceFloat16Int8FeaturesKHR; + + + +#define VK_KHR_16bit_storage 1 +#define VK_KHR_16BIT_STORAGE_SPEC_VERSION 1 +#define VK_KHR_16BIT_STORAGE_EXTENSION_NAME "VK_KHR_16bit_storage" +typedef VkPhysicalDevice16BitStorageFeatures VkPhysicalDevice16BitStorageFeaturesKHR; + + + +#define VK_KHR_incremental_present 1 +#define VK_KHR_INCREMENTAL_PRESENT_SPEC_VERSION 1 +#define VK_KHR_INCREMENTAL_PRESENT_EXTENSION_NAME "VK_KHR_incremental_present" +typedef struct VkRectLayerKHR { + VkOffset2D offset; + VkExtent2D extent; + uint32_t layer; +} VkRectLayerKHR; + +typedef struct VkPresentRegionKHR { + uint32_t rectangleCount; + const VkRectLayerKHR* pRectangles; +} VkPresentRegionKHR; + +typedef struct VkPresentRegionsKHR { + VkStructureType sType; + const void* pNext; + uint32_t swapchainCount; + const VkPresentRegionKHR* pRegions; +} VkPresentRegionsKHR; + + + +#define VK_KHR_descriptor_update_template 1 +typedef VkDescriptorUpdateTemplate VkDescriptorUpdateTemplateKHR; + +#define VK_KHR_DESCRIPTOR_UPDATE_TEMPLATE_SPEC_VERSION 1 +#define VK_KHR_DESCRIPTOR_UPDATE_TEMPLATE_EXTENSION_NAME "VK_KHR_descriptor_update_template" +typedef VkDescriptorUpdateTemplateType VkDescriptorUpdateTemplateTypeKHR; + +typedef VkDescriptorUpdateTemplateCreateFlags VkDescriptorUpdateTemplateCreateFlagsKHR; + +typedef VkDescriptorUpdateTemplateEntry VkDescriptorUpdateTemplateEntryKHR; + +typedef VkDescriptorUpdateTemplateCreateInfo VkDescriptorUpdateTemplateCreateInfoKHR; + +typedef VkResult (VKAPI_PTR *PFN_vkCreateDescriptorUpdateTemplateKHR)(VkDevice device, const VkDescriptorUpdateTemplateCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkDescriptorUpdateTemplate* pDescriptorUpdateTemplate); +typedef void (VKAPI_PTR *PFN_vkDestroyDescriptorUpdateTemplateKHR)(VkDevice device, VkDescriptorUpdateTemplate descriptorUpdateTemplate, const VkAllocationCallbacks* pAllocator); +typedef void (VKAPI_PTR *PFN_vkUpdateDescriptorSetWithTemplateKHR)(VkDevice device, VkDescriptorSet descriptorSet, VkDescriptorUpdateTemplate descriptorUpdateTemplate, const void* pData); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkCreateDescriptorUpdateTemplateKHR( + VkDevice device, + const VkDescriptorUpdateTemplateCreateInfo* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkDescriptorUpdateTemplate* pDescriptorUpdateTemplate); + +VKAPI_ATTR void VKAPI_CALL vkDestroyDescriptorUpdateTemplateKHR( + VkDevice device, + VkDescriptorUpdateTemplate descriptorUpdateTemplate, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR void VKAPI_CALL vkUpdateDescriptorSetWithTemplateKHR( + VkDevice device, + VkDescriptorSet descriptorSet, + VkDescriptorUpdateTemplate descriptorUpdateTemplate, + const void* pData); +#endif + + +#define VK_KHR_imageless_framebuffer 1 +#define VK_KHR_IMAGELESS_FRAMEBUFFER_SPEC_VERSION 1 +#define VK_KHR_IMAGELESS_FRAMEBUFFER_EXTENSION_NAME "VK_KHR_imageless_framebuffer" +typedef VkPhysicalDeviceImagelessFramebufferFeatures VkPhysicalDeviceImagelessFramebufferFeaturesKHR; + +typedef VkFramebufferAttachmentsCreateInfo VkFramebufferAttachmentsCreateInfoKHR; + +typedef VkFramebufferAttachmentImageInfo VkFramebufferAttachmentImageInfoKHR; + +typedef VkRenderPassAttachmentBeginInfo VkRenderPassAttachmentBeginInfoKHR; + + + +#define VK_KHR_create_renderpass2 1 +#define VK_KHR_CREATE_RENDERPASS_2_SPEC_VERSION 1 +#define VK_KHR_CREATE_RENDERPASS_2_EXTENSION_NAME "VK_KHR_create_renderpass2" +typedef VkRenderPassCreateInfo2 VkRenderPassCreateInfo2KHR; + +typedef VkAttachmentDescription2 VkAttachmentDescription2KHR; + +typedef VkAttachmentReference2 VkAttachmentReference2KHR; + +typedef VkSubpassDescription2 VkSubpassDescription2KHR; + +typedef VkSubpassDependency2 VkSubpassDependency2KHR; + +typedef VkSubpassBeginInfo VkSubpassBeginInfoKHR; + +typedef VkSubpassEndInfo VkSubpassEndInfoKHR; + +typedef VkResult (VKAPI_PTR *PFN_vkCreateRenderPass2KHR)(VkDevice device, const VkRenderPassCreateInfo2* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkRenderPass* pRenderPass); +typedef void (VKAPI_PTR *PFN_vkCmdBeginRenderPass2KHR)(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo* pRenderPassBegin, const VkSubpassBeginInfo* pSubpassBeginInfo); +typedef void (VKAPI_PTR *PFN_vkCmdNextSubpass2KHR)(VkCommandBuffer commandBuffer, const VkSubpassBeginInfo* pSubpassBeginInfo, const VkSubpassEndInfo* pSubpassEndInfo); +typedef void (VKAPI_PTR *PFN_vkCmdEndRenderPass2KHR)(VkCommandBuffer commandBuffer, const VkSubpassEndInfo* pSubpassEndInfo); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkCreateRenderPass2KHR( + VkDevice device, + const VkRenderPassCreateInfo2* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkRenderPass* pRenderPass); + +VKAPI_ATTR void VKAPI_CALL vkCmdBeginRenderPass2KHR( + VkCommandBuffer commandBuffer, + const VkRenderPassBeginInfo* pRenderPassBegin, + const VkSubpassBeginInfo* pSubpassBeginInfo); + +VKAPI_ATTR void VKAPI_CALL vkCmdNextSubpass2KHR( + VkCommandBuffer commandBuffer, + const VkSubpassBeginInfo* pSubpassBeginInfo, + const VkSubpassEndInfo* pSubpassEndInfo); + +VKAPI_ATTR void VKAPI_CALL vkCmdEndRenderPass2KHR( + VkCommandBuffer commandBuffer, + const VkSubpassEndInfo* pSubpassEndInfo); +#endif + + +#define VK_KHR_shared_presentable_image 1 +#define VK_KHR_SHARED_PRESENTABLE_IMAGE_SPEC_VERSION 1 +#define VK_KHR_SHARED_PRESENTABLE_IMAGE_EXTENSION_NAME "VK_KHR_shared_presentable_image" +typedef struct VkSharedPresentSurfaceCapabilitiesKHR { + VkStructureType sType; + void* pNext; + VkImageUsageFlags sharedPresentSupportedUsageFlags; +} VkSharedPresentSurfaceCapabilitiesKHR; + +typedef VkResult (VKAPI_PTR *PFN_vkGetSwapchainStatusKHR)(VkDevice device, VkSwapchainKHR swapchain); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetSwapchainStatusKHR( + VkDevice device, + VkSwapchainKHR swapchain); +#endif + + +#define VK_KHR_external_fence_capabilities 1 +#define VK_KHR_EXTERNAL_FENCE_CAPABILITIES_SPEC_VERSION 1 +#define VK_KHR_EXTERNAL_FENCE_CAPABILITIES_EXTENSION_NAME "VK_KHR_external_fence_capabilities" +typedef VkExternalFenceHandleTypeFlags VkExternalFenceHandleTypeFlagsKHR; + +typedef VkExternalFenceHandleTypeFlagBits VkExternalFenceHandleTypeFlagBitsKHR; + +typedef VkExternalFenceFeatureFlags VkExternalFenceFeatureFlagsKHR; + +typedef VkExternalFenceFeatureFlagBits VkExternalFenceFeatureFlagBitsKHR; + +typedef VkPhysicalDeviceExternalFenceInfo VkPhysicalDeviceExternalFenceInfoKHR; + +typedef VkExternalFenceProperties VkExternalFencePropertiesKHR; + +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceExternalFencePropertiesKHR)(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceExternalFenceInfo* pExternalFenceInfo, VkExternalFenceProperties* pExternalFenceProperties); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceExternalFencePropertiesKHR( + VkPhysicalDevice physicalDevice, + const VkPhysicalDeviceExternalFenceInfo* pExternalFenceInfo, + VkExternalFenceProperties* pExternalFenceProperties); +#endif + + +#define VK_KHR_external_fence 1 +#define VK_KHR_EXTERNAL_FENCE_SPEC_VERSION 1 +#define VK_KHR_EXTERNAL_FENCE_EXTENSION_NAME "VK_KHR_external_fence" +typedef VkFenceImportFlags VkFenceImportFlagsKHR; + +typedef VkFenceImportFlagBits VkFenceImportFlagBitsKHR; + +typedef VkExportFenceCreateInfo VkExportFenceCreateInfoKHR; + + + +#define VK_KHR_external_fence_fd 1 +#define VK_KHR_EXTERNAL_FENCE_FD_SPEC_VERSION 1 +#define VK_KHR_EXTERNAL_FENCE_FD_EXTENSION_NAME "VK_KHR_external_fence_fd" +typedef struct VkImportFenceFdInfoKHR { + VkStructureType sType; + const void* pNext; + VkFence fence; + VkFenceImportFlags flags; + VkExternalFenceHandleTypeFlagBits handleType; + int fd; +} VkImportFenceFdInfoKHR; + +typedef struct VkFenceGetFdInfoKHR { + VkStructureType sType; + const void* pNext; + VkFence fence; + VkExternalFenceHandleTypeFlagBits handleType; +} VkFenceGetFdInfoKHR; + +typedef VkResult (VKAPI_PTR *PFN_vkImportFenceFdKHR)(VkDevice device, const VkImportFenceFdInfoKHR* pImportFenceFdInfo); +typedef VkResult (VKAPI_PTR *PFN_vkGetFenceFdKHR)(VkDevice device, const VkFenceGetFdInfoKHR* pGetFdInfo, int* pFd); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkImportFenceFdKHR( + VkDevice device, + const VkImportFenceFdInfoKHR* pImportFenceFdInfo); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetFenceFdKHR( + VkDevice device, + const VkFenceGetFdInfoKHR* pGetFdInfo, + int* pFd); +#endif + + +#define VK_KHR_performance_query 1 +#define VK_KHR_PERFORMANCE_QUERY_SPEC_VERSION 1 +#define VK_KHR_PERFORMANCE_QUERY_EXTENSION_NAME "VK_KHR_performance_query" + +typedef enum VkPerformanceCounterUnitKHR { + VK_PERFORMANCE_COUNTER_UNIT_GENERIC_KHR = 0, + VK_PERFORMANCE_COUNTER_UNIT_PERCENTAGE_KHR = 1, + VK_PERFORMANCE_COUNTER_UNIT_NANOSECONDS_KHR = 2, + VK_PERFORMANCE_COUNTER_UNIT_BYTES_KHR = 3, + VK_PERFORMANCE_COUNTER_UNIT_BYTES_PER_SECOND_KHR = 4, + VK_PERFORMANCE_COUNTER_UNIT_KELVIN_KHR = 5, + VK_PERFORMANCE_COUNTER_UNIT_WATTS_KHR = 6, + VK_PERFORMANCE_COUNTER_UNIT_VOLTS_KHR = 7, + VK_PERFORMANCE_COUNTER_UNIT_AMPS_KHR = 8, + VK_PERFORMANCE_COUNTER_UNIT_HERTZ_KHR = 9, + VK_PERFORMANCE_COUNTER_UNIT_CYCLES_KHR = 10, + VK_PERFORMANCE_COUNTER_UNIT_MAX_ENUM_KHR = 0x7FFFFFFF +} VkPerformanceCounterUnitKHR; + +typedef enum VkPerformanceCounterScopeKHR { + VK_PERFORMANCE_COUNTER_SCOPE_COMMAND_BUFFER_KHR = 0, + VK_PERFORMANCE_COUNTER_SCOPE_RENDER_PASS_KHR = 1, + VK_PERFORMANCE_COUNTER_SCOPE_COMMAND_KHR = 2, + VK_QUERY_SCOPE_COMMAND_BUFFER_KHR = VK_PERFORMANCE_COUNTER_SCOPE_COMMAND_BUFFER_KHR, + VK_QUERY_SCOPE_RENDER_PASS_KHR = VK_PERFORMANCE_COUNTER_SCOPE_RENDER_PASS_KHR, + VK_QUERY_SCOPE_COMMAND_KHR = VK_PERFORMANCE_COUNTER_SCOPE_COMMAND_KHR, + VK_PERFORMANCE_COUNTER_SCOPE_MAX_ENUM_KHR = 0x7FFFFFFF +} VkPerformanceCounterScopeKHR; + +typedef enum VkPerformanceCounterStorageKHR { + VK_PERFORMANCE_COUNTER_STORAGE_INT32_KHR = 0, + VK_PERFORMANCE_COUNTER_STORAGE_INT64_KHR = 1, + VK_PERFORMANCE_COUNTER_STORAGE_UINT32_KHR = 2, + VK_PERFORMANCE_COUNTER_STORAGE_UINT64_KHR = 3, + VK_PERFORMANCE_COUNTER_STORAGE_FLOAT32_KHR = 4, + VK_PERFORMANCE_COUNTER_STORAGE_FLOAT64_KHR = 5, + VK_PERFORMANCE_COUNTER_STORAGE_MAX_ENUM_KHR = 0x7FFFFFFF +} VkPerformanceCounterStorageKHR; + +typedef enum VkPerformanceCounterDescriptionFlagBitsKHR { + VK_PERFORMANCE_COUNTER_DESCRIPTION_PERFORMANCE_IMPACTING_KHR = 0x00000001, + VK_PERFORMANCE_COUNTER_DESCRIPTION_CONCURRENTLY_IMPACTED_KHR = 0x00000002, + VK_PERFORMANCE_COUNTER_DESCRIPTION_FLAG_BITS_MAX_ENUM_KHR = 0x7FFFFFFF +} VkPerformanceCounterDescriptionFlagBitsKHR; +typedef VkFlags VkPerformanceCounterDescriptionFlagsKHR; + +typedef enum VkAcquireProfilingLockFlagBitsKHR { + VK_ACQUIRE_PROFILING_LOCK_FLAG_BITS_MAX_ENUM_KHR = 0x7FFFFFFF +} VkAcquireProfilingLockFlagBitsKHR; +typedef VkFlags VkAcquireProfilingLockFlagsKHR; +typedef struct VkPhysicalDevicePerformanceQueryFeaturesKHR { + VkStructureType sType; + void* pNext; + VkBool32 performanceCounterQueryPools; + VkBool32 performanceCounterMultipleQueryPools; +} VkPhysicalDevicePerformanceQueryFeaturesKHR; + +typedef struct VkPhysicalDevicePerformanceQueryPropertiesKHR { + VkStructureType sType; + void* pNext; + VkBool32 allowCommandBufferQueryCopies; +} VkPhysicalDevicePerformanceQueryPropertiesKHR; + +typedef struct VkPerformanceCounterKHR { + VkStructureType sType; + const void* pNext; + VkPerformanceCounterUnitKHR unit; + VkPerformanceCounterScopeKHR scope; + VkPerformanceCounterStorageKHR storage; + uint8_t uuid[VK_UUID_SIZE]; +} VkPerformanceCounterKHR; + +typedef struct VkPerformanceCounterDescriptionKHR { + VkStructureType sType; + const void* pNext; + VkPerformanceCounterDescriptionFlagsKHR flags; + char name[VK_MAX_DESCRIPTION_SIZE]; + char category[VK_MAX_DESCRIPTION_SIZE]; + char description[VK_MAX_DESCRIPTION_SIZE]; +} VkPerformanceCounterDescriptionKHR; + +typedef struct VkQueryPoolPerformanceCreateInfoKHR { + VkStructureType sType; + const void* pNext; + uint32_t queueFamilyIndex; + uint32_t counterIndexCount; + const uint32_t* pCounterIndices; +} VkQueryPoolPerformanceCreateInfoKHR; + +typedef union VkPerformanceCounterResultKHR { + int32_t int32; + int64_t int64; + uint32_t uint32; + uint64_t uint64; + float float32; + double float64; +} VkPerformanceCounterResultKHR; + +typedef struct VkAcquireProfilingLockInfoKHR { + VkStructureType sType; + const void* pNext; + VkAcquireProfilingLockFlagsKHR flags; + uint64_t timeout; +} VkAcquireProfilingLockInfoKHR; + +typedef struct VkPerformanceQuerySubmitInfoKHR { + VkStructureType sType; + const void* pNext; + uint32_t counterPassIndex; +} VkPerformanceQuerySubmitInfoKHR; + +typedef VkResult (VKAPI_PTR *PFN_vkEnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR)(VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex, uint32_t* pCounterCount, VkPerformanceCounterKHR* pCounters, VkPerformanceCounterDescriptionKHR* pCounterDescriptions); +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceQueueFamilyPerformanceQueryPassesKHR)(VkPhysicalDevice physicalDevice, const VkQueryPoolPerformanceCreateInfoKHR* pPerformanceQueryCreateInfo, uint32_t* pNumPasses); +typedef VkResult (VKAPI_PTR *PFN_vkAcquireProfilingLockKHR)(VkDevice device, const VkAcquireProfilingLockInfoKHR* pInfo); +typedef void (VKAPI_PTR *PFN_vkReleaseProfilingLockKHR)(VkDevice device); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkEnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR( + VkPhysicalDevice physicalDevice, + uint32_t queueFamilyIndex, + uint32_t* pCounterCount, + VkPerformanceCounterKHR* pCounters, + VkPerformanceCounterDescriptionKHR* pCounterDescriptions); + +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceQueueFamilyPerformanceQueryPassesKHR( + VkPhysicalDevice physicalDevice, + const VkQueryPoolPerformanceCreateInfoKHR* pPerformanceQueryCreateInfo, + uint32_t* pNumPasses); + +VKAPI_ATTR VkResult VKAPI_CALL vkAcquireProfilingLockKHR( + VkDevice device, + const VkAcquireProfilingLockInfoKHR* pInfo); + +VKAPI_ATTR void VKAPI_CALL vkReleaseProfilingLockKHR( + VkDevice device); +#endif + + +#define VK_KHR_maintenance2 1 +#define VK_KHR_MAINTENANCE2_SPEC_VERSION 1 +#define VK_KHR_MAINTENANCE2_EXTENSION_NAME "VK_KHR_maintenance2" +typedef VkPointClippingBehavior VkPointClippingBehaviorKHR; + +typedef VkTessellationDomainOrigin VkTessellationDomainOriginKHR; + +typedef VkPhysicalDevicePointClippingProperties VkPhysicalDevicePointClippingPropertiesKHR; + +typedef VkRenderPassInputAttachmentAspectCreateInfo VkRenderPassInputAttachmentAspectCreateInfoKHR; + +typedef VkInputAttachmentAspectReference VkInputAttachmentAspectReferenceKHR; + +typedef VkImageViewUsageCreateInfo VkImageViewUsageCreateInfoKHR; + +typedef VkPipelineTessellationDomainOriginStateCreateInfo VkPipelineTessellationDomainOriginStateCreateInfoKHR; + + + +#define VK_KHR_get_surface_capabilities2 1 +#define VK_KHR_GET_SURFACE_CAPABILITIES_2_SPEC_VERSION 1 +#define VK_KHR_GET_SURFACE_CAPABILITIES_2_EXTENSION_NAME "VK_KHR_get_surface_capabilities2" +typedef struct VkPhysicalDeviceSurfaceInfo2KHR { + VkStructureType sType; + const void* pNext; + VkSurfaceKHR surface; +} VkPhysicalDeviceSurfaceInfo2KHR; + +typedef struct VkSurfaceCapabilities2KHR { + VkStructureType sType; + void* pNext; + VkSurfaceCapabilitiesKHR surfaceCapabilities; +} VkSurfaceCapabilities2KHR; + +typedef struct VkSurfaceFormat2KHR { + VkStructureType sType; + void* pNext; + VkSurfaceFormatKHR surfaceFormat; +} VkSurfaceFormat2KHR; + +typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceSurfaceCapabilities2KHR)(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceSurfaceInfo2KHR* pSurfaceInfo, VkSurfaceCapabilities2KHR* pSurfaceCapabilities); +typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceSurfaceFormats2KHR)(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceSurfaceInfo2KHR* pSurfaceInfo, uint32_t* pSurfaceFormatCount, VkSurfaceFormat2KHR* pSurfaceFormats); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceSurfaceCapabilities2KHR( + VkPhysicalDevice physicalDevice, + const VkPhysicalDeviceSurfaceInfo2KHR* pSurfaceInfo, + VkSurfaceCapabilities2KHR* pSurfaceCapabilities); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceSurfaceFormats2KHR( + VkPhysicalDevice physicalDevice, + const VkPhysicalDeviceSurfaceInfo2KHR* pSurfaceInfo, + uint32_t* pSurfaceFormatCount, + VkSurfaceFormat2KHR* pSurfaceFormats); +#endif + + +#define VK_KHR_variable_pointers 1 +#define VK_KHR_VARIABLE_POINTERS_SPEC_VERSION 1 +#define VK_KHR_VARIABLE_POINTERS_EXTENSION_NAME "VK_KHR_variable_pointers" +typedef VkPhysicalDeviceVariablePointersFeatures VkPhysicalDeviceVariablePointerFeaturesKHR; + +typedef VkPhysicalDeviceVariablePointersFeatures VkPhysicalDeviceVariablePointersFeaturesKHR; + + + +#define VK_KHR_get_display_properties2 1 +#define VK_KHR_GET_DISPLAY_PROPERTIES_2_SPEC_VERSION 1 +#define VK_KHR_GET_DISPLAY_PROPERTIES_2_EXTENSION_NAME "VK_KHR_get_display_properties2" +typedef struct VkDisplayProperties2KHR { + VkStructureType sType; + void* pNext; + VkDisplayPropertiesKHR displayProperties; +} VkDisplayProperties2KHR; + +typedef struct VkDisplayPlaneProperties2KHR { + VkStructureType sType; + void* pNext; + VkDisplayPlanePropertiesKHR displayPlaneProperties; +} VkDisplayPlaneProperties2KHR; + +typedef struct VkDisplayModeProperties2KHR { + VkStructureType sType; + void* pNext; + VkDisplayModePropertiesKHR displayModeProperties; +} VkDisplayModeProperties2KHR; + +typedef struct VkDisplayPlaneInfo2KHR { + VkStructureType sType; + const void* pNext; + VkDisplayModeKHR mode; + uint32_t planeIndex; +} VkDisplayPlaneInfo2KHR; + +typedef struct VkDisplayPlaneCapabilities2KHR { + VkStructureType sType; + void* pNext; + VkDisplayPlaneCapabilitiesKHR capabilities; +} VkDisplayPlaneCapabilities2KHR; + +typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceDisplayProperties2KHR)(VkPhysicalDevice physicalDevice, uint32_t* pPropertyCount, VkDisplayProperties2KHR* pProperties); +typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceDisplayPlaneProperties2KHR)(VkPhysicalDevice physicalDevice, uint32_t* pPropertyCount, VkDisplayPlaneProperties2KHR* pProperties); +typedef VkResult (VKAPI_PTR *PFN_vkGetDisplayModeProperties2KHR)(VkPhysicalDevice physicalDevice, VkDisplayKHR display, uint32_t* pPropertyCount, VkDisplayModeProperties2KHR* pProperties); +typedef VkResult (VKAPI_PTR *PFN_vkGetDisplayPlaneCapabilities2KHR)(VkPhysicalDevice physicalDevice, const VkDisplayPlaneInfo2KHR* pDisplayPlaneInfo, VkDisplayPlaneCapabilities2KHR* pCapabilities); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceDisplayProperties2KHR( + VkPhysicalDevice physicalDevice, + uint32_t* pPropertyCount, + VkDisplayProperties2KHR* pProperties); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceDisplayPlaneProperties2KHR( + VkPhysicalDevice physicalDevice, + uint32_t* pPropertyCount, + VkDisplayPlaneProperties2KHR* pProperties); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetDisplayModeProperties2KHR( + VkPhysicalDevice physicalDevice, + VkDisplayKHR display, + uint32_t* pPropertyCount, + VkDisplayModeProperties2KHR* pProperties); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetDisplayPlaneCapabilities2KHR( + VkPhysicalDevice physicalDevice, + const VkDisplayPlaneInfo2KHR* pDisplayPlaneInfo, + VkDisplayPlaneCapabilities2KHR* pCapabilities); +#endif + + +#define VK_KHR_dedicated_allocation 1 +#define VK_KHR_DEDICATED_ALLOCATION_SPEC_VERSION 3 +#define VK_KHR_DEDICATED_ALLOCATION_EXTENSION_NAME "VK_KHR_dedicated_allocation" +typedef VkMemoryDedicatedRequirements VkMemoryDedicatedRequirementsKHR; + +typedef VkMemoryDedicatedAllocateInfo VkMemoryDedicatedAllocateInfoKHR; + + + +#define VK_KHR_storage_buffer_storage_class 1 +#define VK_KHR_STORAGE_BUFFER_STORAGE_CLASS_SPEC_VERSION 1 +#define VK_KHR_STORAGE_BUFFER_STORAGE_CLASS_EXTENSION_NAME "VK_KHR_storage_buffer_storage_class" + + +#define VK_KHR_relaxed_block_layout 1 +#define VK_KHR_RELAXED_BLOCK_LAYOUT_SPEC_VERSION 1 +#define VK_KHR_RELAXED_BLOCK_LAYOUT_EXTENSION_NAME "VK_KHR_relaxed_block_layout" + + +#define VK_KHR_get_memory_requirements2 1 +#define VK_KHR_GET_MEMORY_REQUIREMENTS_2_SPEC_VERSION 1 +#define VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME "VK_KHR_get_memory_requirements2" +typedef VkBufferMemoryRequirementsInfo2 VkBufferMemoryRequirementsInfo2KHR; + +typedef VkImageMemoryRequirementsInfo2 VkImageMemoryRequirementsInfo2KHR; + +typedef VkImageSparseMemoryRequirementsInfo2 VkImageSparseMemoryRequirementsInfo2KHR; + +typedef VkSparseImageMemoryRequirements2 VkSparseImageMemoryRequirements2KHR; + +typedef void (VKAPI_PTR *PFN_vkGetImageMemoryRequirements2KHR)(VkDevice device, const VkImageMemoryRequirementsInfo2* pInfo, VkMemoryRequirements2* pMemoryRequirements); +typedef void (VKAPI_PTR *PFN_vkGetBufferMemoryRequirements2KHR)(VkDevice device, const VkBufferMemoryRequirementsInfo2* pInfo, VkMemoryRequirements2* pMemoryRequirements); +typedef void (VKAPI_PTR *PFN_vkGetImageSparseMemoryRequirements2KHR)(VkDevice device, const VkImageSparseMemoryRequirementsInfo2* pInfo, uint32_t* pSparseMemoryRequirementCount, VkSparseImageMemoryRequirements2* pSparseMemoryRequirements); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkGetImageMemoryRequirements2KHR( + VkDevice device, + const VkImageMemoryRequirementsInfo2* pInfo, + VkMemoryRequirements2* pMemoryRequirements); + +VKAPI_ATTR void VKAPI_CALL vkGetBufferMemoryRequirements2KHR( + VkDevice device, + const VkBufferMemoryRequirementsInfo2* pInfo, + VkMemoryRequirements2* pMemoryRequirements); + +VKAPI_ATTR void VKAPI_CALL vkGetImageSparseMemoryRequirements2KHR( + VkDevice device, + const VkImageSparseMemoryRequirementsInfo2* pInfo, + uint32_t* pSparseMemoryRequirementCount, + VkSparseImageMemoryRequirements2* pSparseMemoryRequirements); +#endif + + +#define VK_KHR_image_format_list 1 +#define VK_KHR_IMAGE_FORMAT_LIST_SPEC_VERSION 1 +#define VK_KHR_IMAGE_FORMAT_LIST_EXTENSION_NAME "VK_KHR_image_format_list" +typedef VkImageFormatListCreateInfo VkImageFormatListCreateInfoKHR; + + + +#define VK_KHR_sampler_ycbcr_conversion 1 +typedef VkSamplerYcbcrConversion VkSamplerYcbcrConversionKHR; + +#define VK_KHR_SAMPLER_YCBCR_CONVERSION_SPEC_VERSION 14 +#define VK_KHR_SAMPLER_YCBCR_CONVERSION_EXTENSION_NAME "VK_KHR_sampler_ycbcr_conversion" +typedef VkSamplerYcbcrModelConversion VkSamplerYcbcrModelConversionKHR; + +typedef VkSamplerYcbcrRange VkSamplerYcbcrRangeKHR; + +typedef VkChromaLocation VkChromaLocationKHR; + +typedef VkSamplerYcbcrConversionCreateInfo VkSamplerYcbcrConversionCreateInfoKHR; + +typedef VkSamplerYcbcrConversionInfo VkSamplerYcbcrConversionInfoKHR; + +typedef VkBindImagePlaneMemoryInfo VkBindImagePlaneMemoryInfoKHR; + +typedef VkImagePlaneMemoryRequirementsInfo VkImagePlaneMemoryRequirementsInfoKHR; + +typedef VkPhysicalDeviceSamplerYcbcrConversionFeatures VkPhysicalDeviceSamplerYcbcrConversionFeaturesKHR; + +typedef VkSamplerYcbcrConversionImageFormatProperties VkSamplerYcbcrConversionImageFormatPropertiesKHR; + +typedef VkResult (VKAPI_PTR *PFN_vkCreateSamplerYcbcrConversionKHR)(VkDevice device, const VkSamplerYcbcrConversionCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSamplerYcbcrConversion* pYcbcrConversion); +typedef void (VKAPI_PTR *PFN_vkDestroySamplerYcbcrConversionKHR)(VkDevice device, VkSamplerYcbcrConversion ycbcrConversion, const VkAllocationCallbacks* pAllocator); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkCreateSamplerYcbcrConversionKHR( + VkDevice device, + const VkSamplerYcbcrConversionCreateInfo* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkSamplerYcbcrConversion* pYcbcrConversion); + +VKAPI_ATTR void VKAPI_CALL vkDestroySamplerYcbcrConversionKHR( + VkDevice device, + VkSamplerYcbcrConversion ycbcrConversion, + const VkAllocationCallbacks* pAllocator); +#endif + + +#define VK_KHR_bind_memory2 1 +#define VK_KHR_BIND_MEMORY_2_SPEC_VERSION 1 +#define VK_KHR_BIND_MEMORY_2_EXTENSION_NAME "VK_KHR_bind_memory2" +typedef VkBindBufferMemoryInfo VkBindBufferMemoryInfoKHR; + +typedef VkBindImageMemoryInfo VkBindImageMemoryInfoKHR; + +typedef VkResult (VKAPI_PTR *PFN_vkBindBufferMemory2KHR)(VkDevice device, uint32_t bindInfoCount, const VkBindBufferMemoryInfo* pBindInfos); +typedef VkResult (VKAPI_PTR *PFN_vkBindImageMemory2KHR)(VkDevice device, uint32_t bindInfoCount, const VkBindImageMemoryInfo* pBindInfos); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkBindBufferMemory2KHR( + VkDevice device, + uint32_t bindInfoCount, + const VkBindBufferMemoryInfo* pBindInfos); + +VKAPI_ATTR VkResult VKAPI_CALL vkBindImageMemory2KHR( + VkDevice device, + uint32_t bindInfoCount, + const VkBindImageMemoryInfo* pBindInfos); +#endif + + +#define VK_KHR_maintenance3 1 +#define VK_KHR_MAINTENANCE3_SPEC_VERSION 1 +#define VK_KHR_MAINTENANCE3_EXTENSION_NAME "VK_KHR_maintenance3" +typedef VkPhysicalDeviceMaintenance3Properties VkPhysicalDeviceMaintenance3PropertiesKHR; + +typedef VkDescriptorSetLayoutSupport VkDescriptorSetLayoutSupportKHR; + +typedef void (VKAPI_PTR *PFN_vkGetDescriptorSetLayoutSupportKHR)(VkDevice device, const VkDescriptorSetLayoutCreateInfo* pCreateInfo, VkDescriptorSetLayoutSupport* pSupport); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkGetDescriptorSetLayoutSupportKHR( + VkDevice device, + const VkDescriptorSetLayoutCreateInfo* pCreateInfo, + VkDescriptorSetLayoutSupport* pSupport); +#endif + + +#define VK_KHR_draw_indirect_count 1 +#define VK_KHR_DRAW_INDIRECT_COUNT_SPEC_VERSION 1 +#define VK_KHR_DRAW_INDIRECT_COUNT_EXTENSION_NAME "VK_KHR_draw_indirect_count" +typedef void (VKAPI_PTR *PFN_vkCmdDrawIndirectCountKHR)(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkBuffer countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount, uint32_t stride); +typedef void (VKAPI_PTR *PFN_vkCmdDrawIndexedIndirectCountKHR)(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkBuffer countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount, uint32_t stride); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdDrawIndirectCountKHR( + VkCommandBuffer commandBuffer, + VkBuffer buffer, + VkDeviceSize offset, + VkBuffer countBuffer, + VkDeviceSize countBufferOffset, + uint32_t maxDrawCount, + uint32_t stride); + +VKAPI_ATTR void VKAPI_CALL vkCmdDrawIndexedIndirectCountKHR( + VkCommandBuffer commandBuffer, + VkBuffer buffer, + VkDeviceSize offset, + VkBuffer countBuffer, + VkDeviceSize countBufferOffset, + uint32_t maxDrawCount, + uint32_t stride); +#endif + + +#define VK_KHR_shader_subgroup_extended_types 1 +#define VK_KHR_SHADER_SUBGROUP_EXTENDED_TYPES_SPEC_VERSION 1 +#define VK_KHR_SHADER_SUBGROUP_EXTENDED_TYPES_EXTENSION_NAME "VK_KHR_shader_subgroup_extended_types" +typedef VkPhysicalDeviceShaderSubgroupExtendedTypesFeatures VkPhysicalDeviceShaderSubgroupExtendedTypesFeaturesKHR; + + + +#define VK_KHR_8bit_storage 1 +#define VK_KHR_8BIT_STORAGE_SPEC_VERSION 1 +#define VK_KHR_8BIT_STORAGE_EXTENSION_NAME "VK_KHR_8bit_storage" +typedef VkPhysicalDevice8BitStorageFeatures VkPhysicalDevice8BitStorageFeaturesKHR; + + + +#define VK_KHR_shader_atomic_int64 1 +#define VK_KHR_SHADER_ATOMIC_INT64_SPEC_VERSION 1 +#define VK_KHR_SHADER_ATOMIC_INT64_EXTENSION_NAME "VK_KHR_shader_atomic_int64" +typedef VkPhysicalDeviceShaderAtomicInt64Features VkPhysicalDeviceShaderAtomicInt64FeaturesKHR; + + + +#define VK_KHR_shader_clock 1 +#define VK_KHR_SHADER_CLOCK_SPEC_VERSION 1 +#define VK_KHR_SHADER_CLOCK_EXTENSION_NAME "VK_KHR_shader_clock" +typedef struct VkPhysicalDeviceShaderClockFeaturesKHR { + VkStructureType sType; + void* pNext; + VkBool32 shaderSubgroupClock; + VkBool32 shaderDeviceClock; +} VkPhysicalDeviceShaderClockFeaturesKHR; + + + +#define VK_KHR_driver_properties 1 +#define VK_KHR_DRIVER_PROPERTIES_SPEC_VERSION 1 +#define VK_KHR_DRIVER_PROPERTIES_EXTENSION_NAME "VK_KHR_driver_properties" +#define VK_MAX_DRIVER_NAME_SIZE_KHR VK_MAX_DRIVER_NAME_SIZE +#define VK_MAX_DRIVER_INFO_SIZE_KHR VK_MAX_DRIVER_INFO_SIZE +typedef VkDriverId VkDriverIdKHR; + +typedef VkConformanceVersion VkConformanceVersionKHR; + +typedef VkPhysicalDeviceDriverProperties VkPhysicalDeviceDriverPropertiesKHR; + + + +#define VK_KHR_shader_float_controls 1 +#define VK_KHR_SHADER_FLOAT_CONTROLS_SPEC_VERSION 4 +#define VK_KHR_SHADER_FLOAT_CONTROLS_EXTENSION_NAME "VK_KHR_shader_float_controls" +typedef VkShaderFloatControlsIndependence VkShaderFloatControlsIndependenceKHR; + +typedef VkPhysicalDeviceFloatControlsProperties VkPhysicalDeviceFloatControlsPropertiesKHR; + + + +#define VK_KHR_depth_stencil_resolve 1 +#define VK_KHR_DEPTH_STENCIL_RESOLVE_SPEC_VERSION 1 +#define VK_KHR_DEPTH_STENCIL_RESOLVE_EXTENSION_NAME "VK_KHR_depth_stencil_resolve" +typedef VkResolveModeFlagBits VkResolveModeFlagBitsKHR; + +typedef VkResolveModeFlags VkResolveModeFlagsKHR; + +typedef VkSubpassDescriptionDepthStencilResolve VkSubpassDescriptionDepthStencilResolveKHR; + +typedef VkPhysicalDeviceDepthStencilResolveProperties VkPhysicalDeviceDepthStencilResolvePropertiesKHR; + + + +#define VK_KHR_swapchain_mutable_format 1 +#define VK_KHR_SWAPCHAIN_MUTABLE_FORMAT_SPEC_VERSION 1 +#define VK_KHR_SWAPCHAIN_MUTABLE_FORMAT_EXTENSION_NAME "VK_KHR_swapchain_mutable_format" + + +#define VK_KHR_timeline_semaphore 1 +#define VK_KHR_TIMELINE_SEMAPHORE_SPEC_VERSION 2 +#define VK_KHR_TIMELINE_SEMAPHORE_EXTENSION_NAME "VK_KHR_timeline_semaphore" +typedef VkSemaphoreType VkSemaphoreTypeKHR; + +typedef VkSemaphoreWaitFlagBits VkSemaphoreWaitFlagBitsKHR; + +typedef VkSemaphoreWaitFlags VkSemaphoreWaitFlagsKHR; + +typedef VkPhysicalDeviceTimelineSemaphoreFeatures VkPhysicalDeviceTimelineSemaphoreFeaturesKHR; + +typedef VkPhysicalDeviceTimelineSemaphoreProperties VkPhysicalDeviceTimelineSemaphorePropertiesKHR; + +typedef VkSemaphoreTypeCreateInfo VkSemaphoreTypeCreateInfoKHR; + +typedef VkTimelineSemaphoreSubmitInfo VkTimelineSemaphoreSubmitInfoKHR; + +typedef VkSemaphoreWaitInfo VkSemaphoreWaitInfoKHR; + +typedef VkSemaphoreSignalInfo VkSemaphoreSignalInfoKHR; + +typedef VkResult (VKAPI_PTR *PFN_vkGetSemaphoreCounterValueKHR)(VkDevice device, VkSemaphore semaphore, uint64_t* pValue); +typedef VkResult (VKAPI_PTR *PFN_vkWaitSemaphoresKHR)(VkDevice device, const VkSemaphoreWaitInfo* pWaitInfo, uint64_t timeout); +typedef VkResult (VKAPI_PTR *PFN_vkSignalSemaphoreKHR)(VkDevice device, const VkSemaphoreSignalInfo* pSignalInfo); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetSemaphoreCounterValueKHR( + VkDevice device, + VkSemaphore semaphore, + uint64_t* pValue); + +VKAPI_ATTR VkResult VKAPI_CALL vkWaitSemaphoresKHR( + VkDevice device, + const VkSemaphoreWaitInfo* pWaitInfo, + uint64_t timeout); + +VKAPI_ATTR VkResult VKAPI_CALL vkSignalSemaphoreKHR( + VkDevice device, + const VkSemaphoreSignalInfo* pSignalInfo); +#endif + + +#define VK_KHR_vulkan_memory_model 1 +#define VK_KHR_VULKAN_MEMORY_MODEL_SPEC_VERSION 3 +#define VK_KHR_VULKAN_MEMORY_MODEL_EXTENSION_NAME "VK_KHR_vulkan_memory_model" +typedef VkPhysicalDeviceVulkanMemoryModelFeatures VkPhysicalDeviceVulkanMemoryModelFeaturesKHR; + + + +#define VK_KHR_spirv_1_4 1 +#define VK_KHR_SPIRV_1_4_SPEC_VERSION 1 +#define VK_KHR_SPIRV_1_4_EXTENSION_NAME "VK_KHR_spirv_1_4" + + +#define VK_KHR_surface_protected_capabilities 1 +#define VK_KHR_SURFACE_PROTECTED_CAPABILITIES_SPEC_VERSION 1 +#define VK_KHR_SURFACE_PROTECTED_CAPABILITIES_EXTENSION_NAME "VK_KHR_surface_protected_capabilities" +typedef struct VkSurfaceProtectedCapabilitiesKHR { + VkStructureType sType; + const void* pNext; + VkBool32 supportsProtected; +} VkSurfaceProtectedCapabilitiesKHR; + + + +#define VK_KHR_separate_depth_stencil_layouts 1 +#define VK_KHR_SEPARATE_DEPTH_STENCIL_LAYOUTS_SPEC_VERSION 1 +#define VK_KHR_SEPARATE_DEPTH_STENCIL_LAYOUTS_EXTENSION_NAME "VK_KHR_separate_depth_stencil_layouts" +typedef VkPhysicalDeviceSeparateDepthStencilLayoutsFeatures VkPhysicalDeviceSeparateDepthStencilLayoutsFeaturesKHR; + +typedef VkAttachmentReferenceStencilLayout VkAttachmentReferenceStencilLayoutKHR; + +typedef VkAttachmentDescriptionStencilLayout VkAttachmentDescriptionStencilLayoutKHR; + + + +#define VK_KHR_uniform_buffer_standard_layout 1 +#define VK_KHR_UNIFORM_BUFFER_STANDARD_LAYOUT_SPEC_VERSION 1 +#define VK_KHR_UNIFORM_BUFFER_STANDARD_LAYOUT_EXTENSION_NAME "VK_KHR_uniform_buffer_standard_layout" +typedef VkPhysicalDeviceUniformBufferStandardLayoutFeatures VkPhysicalDeviceUniformBufferStandardLayoutFeaturesKHR; + + + +#define VK_KHR_buffer_device_address 1 +#define VK_KHR_BUFFER_DEVICE_ADDRESS_SPEC_VERSION 1 +#define VK_KHR_BUFFER_DEVICE_ADDRESS_EXTENSION_NAME "VK_KHR_buffer_device_address" +typedef VkPhysicalDeviceBufferDeviceAddressFeatures VkPhysicalDeviceBufferDeviceAddressFeaturesKHR; + +typedef VkBufferDeviceAddressInfo VkBufferDeviceAddressInfoKHR; + +typedef VkBufferOpaqueCaptureAddressCreateInfo VkBufferOpaqueCaptureAddressCreateInfoKHR; + +typedef VkMemoryOpaqueCaptureAddressAllocateInfo VkMemoryOpaqueCaptureAddressAllocateInfoKHR; + +typedef VkDeviceMemoryOpaqueCaptureAddressInfo VkDeviceMemoryOpaqueCaptureAddressInfoKHR; + +typedef VkDeviceAddress (VKAPI_PTR *PFN_vkGetBufferDeviceAddressKHR)(VkDevice device, const VkBufferDeviceAddressInfo* pInfo); +typedef uint64_t (VKAPI_PTR *PFN_vkGetBufferOpaqueCaptureAddressKHR)(VkDevice device, const VkBufferDeviceAddressInfo* pInfo); +typedef uint64_t (VKAPI_PTR *PFN_vkGetDeviceMemoryOpaqueCaptureAddressKHR)(VkDevice device, const VkDeviceMemoryOpaqueCaptureAddressInfo* pInfo); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkDeviceAddress VKAPI_CALL vkGetBufferDeviceAddressKHR( + VkDevice device, + const VkBufferDeviceAddressInfo* pInfo); + +VKAPI_ATTR uint64_t VKAPI_CALL vkGetBufferOpaqueCaptureAddressKHR( + VkDevice device, + const VkBufferDeviceAddressInfo* pInfo); + +VKAPI_ATTR uint64_t VKAPI_CALL vkGetDeviceMemoryOpaqueCaptureAddressKHR( + VkDevice device, + const VkDeviceMemoryOpaqueCaptureAddressInfo* pInfo); +#endif + + +#define VK_KHR_pipeline_executable_properties 1 +#define VK_KHR_PIPELINE_EXECUTABLE_PROPERTIES_SPEC_VERSION 1 +#define VK_KHR_PIPELINE_EXECUTABLE_PROPERTIES_EXTENSION_NAME "VK_KHR_pipeline_executable_properties" + +typedef enum VkPipelineExecutableStatisticFormatKHR { + VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_BOOL32_KHR = 0, + VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_INT64_KHR = 1, + VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_UINT64_KHR = 2, + VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_FLOAT64_KHR = 3, + VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_MAX_ENUM_KHR = 0x7FFFFFFF +} VkPipelineExecutableStatisticFormatKHR; +typedef struct VkPhysicalDevicePipelineExecutablePropertiesFeaturesKHR { + VkStructureType sType; + void* pNext; + VkBool32 pipelineExecutableInfo; +} VkPhysicalDevicePipelineExecutablePropertiesFeaturesKHR; + +typedef struct VkPipelineInfoKHR { + VkStructureType sType; + const void* pNext; + VkPipeline pipeline; +} VkPipelineInfoKHR; + +typedef struct VkPipelineExecutablePropertiesKHR { + VkStructureType sType; + void* pNext; + VkShaderStageFlags stages; + char name[VK_MAX_DESCRIPTION_SIZE]; + char description[VK_MAX_DESCRIPTION_SIZE]; + uint32_t subgroupSize; +} VkPipelineExecutablePropertiesKHR; + +typedef struct VkPipelineExecutableInfoKHR { + VkStructureType sType; + const void* pNext; + VkPipeline pipeline; + uint32_t executableIndex; +} VkPipelineExecutableInfoKHR; + +typedef union VkPipelineExecutableStatisticValueKHR { + VkBool32 b32; + int64_t i64; + uint64_t u64; + double f64; +} VkPipelineExecutableStatisticValueKHR; + +typedef struct VkPipelineExecutableStatisticKHR { + VkStructureType sType; + void* pNext; + char name[VK_MAX_DESCRIPTION_SIZE]; + char description[VK_MAX_DESCRIPTION_SIZE]; + VkPipelineExecutableStatisticFormatKHR format; + VkPipelineExecutableStatisticValueKHR value; +} VkPipelineExecutableStatisticKHR; + +typedef struct VkPipelineExecutableInternalRepresentationKHR { + VkStructureType sType; + void* pNext; + char name[VK_MAX_DESCRIPTION_SIZE]; + char description[VK_MAX_DESCRIPTION_SIZE]; + VkBool32 isText; + size_t dataSize; + void* pData; +} VkPipelineExecutableInternalRepresentationKHR; + +typedef VkResult (VKAPI_PTR *PFN_vkGetPipelineExecutablePropertiesKHR)(VkDevice device, const VkPipelineInfoKHR* pPipelineInfo, uint32_t* pExecutableCount, VkPipelineExecutablePropertiesKHR* pProperties); +typedef VkResult (VKAPI_PTR *PFN_vkGetPipelineExecutableStatisticsKHR)(VkDevice device, const VkPipelineExecutableInfoKHR* pExecutableInfo, uint32_t* pStatisticCount, VkPipelineExecutableStatisticKHR* pStatistics); +typedef VkResult (VKAPI_PTR *PFN_vkGetPipelineExecutableInternalRepresentationsKHR)(VkDevice device, const VkPipelineExecutableInfoKHR* pExecutableInfo, uint32_t* pInternalRepresentationCount, VkPipelineExecutableInternalRepresentationKHR* pInternalRepresentations); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetPipelineExecutablePropertiesKHR( + VkDevice device, + const VkPipelineInfoKHR* pPipelineInfo, + uint32_t* pExecutableCount, + VkPipelineExecutablePropertiesKHR* pProperties); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetPipelineExecutableStatisticsKHR( + VkDevice device, + const VkPipelineExecutableInfoKHR* pExecutableInfo, + uint32_t* pStatisticCount, + VkPipelineExecutableStatisticKHR* pStatistics); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetPipelineExecutableInternalRepresentationsKHR( + VkDevice device, + const VkPipelineExecutableInfoKHR* pExecutableInfo, + uint32_t* pInternalRepresentationCount, + VkPipelineExecutableInternalRepresentationKHR* pInternalRepresentations); +#endif + + +#define VK_KHR_shader_non_semantic_info 1 +#define VK_KHR_SHADER_NON_SEMANTIC_INFO_SPEC_VERSION 1 +#define VK_KHR_SHADER_NON_SEMANTIC_INFO_EXTENSION_NAME "VK_KHR_shader_non_semantic_info" + + +#define VK_EXT_debug_report 1 +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkDebugReportCallbackEXT) +#define VK_EXT_DEBUG_REPORT_SPEC_VERSION 9 +#define VK_EXT_DEBUG_REPORT_EXTENSION_NAME "VK_EXT_debug_report" + +typedef enum VkDebugReportObjectTypeEXT { + VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT = 0, + VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT = 1, + VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT = 2, + VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT = 3, + VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT = 4, + VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT = 5, + VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT = 6, + VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT = 7, + VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT = 8, + VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT = 9, + VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT = 10, + VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT = 11, + VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT = 12, + VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_VIEW_EXT = 13, + VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT = 14, + VK_DEBUG_REPORT_OBJECT_TYPE_SHADER_MODULE_EXT = 15, + VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_CACHE_EXT = 16, + VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_LAYOUT_EXT = 17, + VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT = 18, + VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT = 19, + VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT_EXT = 20, + VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT = 21, + VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT = 22, + VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT = 23, + VK_DEBUG_REPORT_OBJECT_TYPE_FRAMEBUFFER_EXT = 24, + VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_POOL_EXT = 25, + VK_DEBUG_REPORT_OBJECT_TYPE_SURFACE_KHR_EXT = 26, + VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT = 27, + VK_DEBUG_REPORT_OBJECT_TYPE_DEBUG_REPORT_CALLBACK_EXT_EXT = 28, + VK_DEBUG_REPORT_OBJECT_TYPE_DISPLAY_KHR_EXT = 29, + VK_DEBUG_REPORT_OBJECT_TYPE_DISPLAY_MODE_KHR_EXT = 30, + VK_DEBUG_REPORT_OBJECT_TYPE_VALIDATION_CACHE_EXT_EXT = 33, + VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_YCBCR_CONVERSION_EXT = 1000156000, + VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_EXT = 1000085000, + VK_DEBUG_REPORT_OBJECT_TYPE_ACCELERATION_STRUCTURE_KHR_EXT = 1000165000, + VK_DEBUG_REPORT_OBJECT_TYPE_DEBUG_REPORT_EXT = VK_DEBUG_REPORT_OBJECT_TYPE_DEBUG_REPORT_CALLBACK_EXT_EXT, + VK_DEBUG_REPORT_OBJECT_TYPE_VALIDATION_CACHE_EXT = VK_DEBUG_REPORT_OBJECT_TYPE_VALIDATION_CACHE_EXT_EXT, + VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_KHR_EXT = VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_EXT, + VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_YCBCR_CONVERSION_KHR_EXT = VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_YCBCR_CONVERSION_EXT, + VK_DEBUG_REPORT_OBJECT_TYPE_ACCELERATION_STRUCTURE_NV_EXT = VK_DEBUG_REPORT_OBJECT_TYPE_ACCELERATION_STRUCTURE_KHR_EXT, + VK_DEBUG_REPORT_OBJECT_TYPE_MAX_ENUM_EXT = 0x7FFFFFFF +} VkDebugReportObjectTypeEXT; + +typedef enum VkDebugReportFlagBitsEXT { + VK_DEBUG_REPORT_INFORMATION_BIT_EXT = 0x00000001, + VK_DEBUG_REPORT_WARNING_BIT_EXT = 0x00000002, + VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT = 0x00000004, + VK_DEBUG_REPORT_ERROR_BIT_EXT = 0x00000008, + VK_DEBUG_REPORT_DEBUG_BIT_EXT = 0x00000010, + VK_DEBUG_REPORT_FLAG_BITS_MAX_ENUM_EXT = 0x7FFFFFFF +} VkDebugReportFlagBitsEXT; +typedef VkFlags VkDebugReportFlagsEXT; +typedef VkBool32 (VKAPI_PTR *PFN_vkDebugReportCallbackEXT)( + VkDebugReportFlagsEXT flags, + VkDebugReportObjectTypeEXT objectType, + uint64_t object, + size_t location, + int32_t messageCode, + const char* pLayerPrefix, + const char* pMessage, + void* pUserData); + +typedef struct VkDebugReportCallbackCreateInfoEXT { + VkStructureType sType; + const void* pNext; + VkDebugReportFlagsEXT flags; + PFN_vkDebugReportCallbackEXT pfnCallback; + void* pUserData; +} VkDebugReportCallbackCreateInfoEXT; + +typedef VkResult (VKAPI_PTR *PFN_vkCreateDebugReportCallbackEXT)(VkInstance instance, const VkDebugReportCallbackCreateInfoEXT* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkDebugReportCallbackEXT* pCallback); +typedef void (VKAPI_PTR *PFN_vkDestroyDebugReportCallbackEXT)(VkInstance instance, VkDebugReportCallbackEXT callback, const VkAllocationCallbacks* pAllocator); +typedef void (VKAPI_PTR *PFN_vkDebugReportMessageEXT)(VkInstance instance, VkDebugReportFlagsEXT flags, VkDebugReportObjectTypeEXT objectType, uint64_t object, size_t location, int32_t messageCode, const char* pLayerPrefix, const char* pMessage); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkCreateDebugReportCallbackEXT( + VkInstance instance, + const VkDebugReportCallbackCreateInfoEXT* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkDebugReportCallbackEXT* pCallback); + +VKAPI_ATTR void VKAPI_CALL vkDestroyDebugReportCallbackEXT( + VkInstance instance, + VkDebugReportCallbackEXT callback, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR void VKAPI_CALL vkDebugReportMessageEXT( + VkInstance instance, + VkDebugReportFlagsEXT flags, + VkDebugReportObjectTypeEXT objectType, + uint64_t object, + size_t location, + int32_t messageCode, + const char* pLayerPrefix, + const char* pMessage); +#endif + + +#define VK_NV_glsl_shader 1 +#define VK_NV_GLSL_SHADER_SPEC_VERSION 1 +#define VK_NV_GLSL_SHADER_EXTENSION_NAME "VK_NV_glsl_shader" + + +#define VK_EXT_depth_range_unrestricted 1 +#define VK_EXT_DEPTH_RANGE_UNRESTRICTED_SPEC_VERSION 1 +#define VK_EXT_DEPTH_RANGE_UNRESTRICTED_EXTENSION_NAME "VK_EXT_depth_range_unrestricted" + + +#define VK_IMG_filter_cubic 1 +#define VK_IMG_FILTER_CUBIC_SPEC_VERSION 1 +#define VK_IMG_FILTER_CUBIC_EXTENSION_NAME "VK_IMG_filter_cubic" + + +#define VK_AMD_rasterization_order 1 +#define VK_AMD_RASTERIZATION_ORDER_SPEC_VERSION 1 +#define VK_AMD_RASTERIZATION_ORDER_EXTENSION_NAME "VK_AMD_rasterization_order" + +typedef enum VkRasterizationOrderAMD { + VK_RASTERIZATION_ORDER_STRICT_AMD = 0, + VK_RASTERIZATION_ORDER_RELAXED_AMD = 1, + VK_RASTERIZATION_ORDER_MAX_ENUM_AMD = 0x7FFFFFFF +} VkRasterizationOrderAMD; +typedef struct VkPipelineRasterizationStateRasterizationOrderAMD { + VkStructureType sType; + const void* pNext; + VkRasterizationOrderAMD rasterizationOrder; +} VkPipelineRasterizationStateRasterizationOrderAMD; + + + +#define VK_AMD_shader_trinary_minmax 1 +#define VK_AMD_SHADER_TRINARY_MINMAX_SPEC_VERSION 1 +#define VK_AMD_SHADER_TRINARY_MINMAX_EXTENSION_NAME "VK_AMD_shader_trinary_minmax" + + +#define VK_AMD_shader_explicit_vertex_parameter 1 +#define VK_AMD_SHADER_EXPLICIT_VERTEX_PARAMETER_SPEC_VERSION 1 +#define VK_AMD_SHADER_EXPLICIT_VERTEX_PARAMETER_EXTENSION_NAME "VK_AMD_shader_explicit_vertex_parameter" + + +#define VK_EXT_debug_marker 1 +#define VK_EXT_DEBUG_MARKER_SPEC_VERSION 4 +#define VK_EXT_DEBUG_MARKER_EXTENSION_NAME "VK_EXT_debug_marker" +typedef struct VkDebugMarkerObjectNameInfoEXT { + VkStructureType sType; + const void* pNext; + VkDebugReportObjectTypeEXT objectType; + uint64_t object; + const char* pObjectName; +} VkDebugMarkerObjectNameInfoEXT; + +typedef struct VkDebugMarkerObjectTagInfoEXT { + VkStructureType sType; + const void* pNext; + VkDebugReportObjectTypeEXT objectType; + uint64_t object; + uint64_t tagName; + size_t tagSize; + const void* pTag; +} VkDebugMarkerObjectTagInfoEXT; + +typedef struct VkDebugMarkerMarkerInfoEXT { + VkStructureType sType; + const void* pNext; + const char* pMarkerName; + float color[4]; +} VkDebugMarkerMarkerInfoEXT; + +typedef VkResult (VKAPI_PTR *PFN_vkDebugMarkerSetObjectTagEXT)(VkDevice device, const VkDebugMarkerObjectTagInfoEXT* pTagInfo); +typedef VkResult (VKAPI_PTR *PFN_vkDebugMarkerSetObjectNameEXT)(VkDevice device, const VkDebugMarkerObjectNameInfoEXT* pNameInfo); +typedef void (VKAPI_PTR *PFN_vkCmdDebugMarkerBeginEXT)(VkCommandBuffer commandBuffer, const VkDebugMarkerMarkerInfoEXT* pMarkerInfo); +typedef void (VKAPI_PTR *PFN_vkCmdDebugMarkerEndEXT)(VkCommandBuffer commandBuffer); +typedef void (VKAPI_PTR *PFN_vkCmdDebugMarkerInsertEXT)(VkCommandBuffer commandBuffer, const VkDebugMarkerMarkerInfoEXT* pMarkerInfo); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkDebugMarkerSetObjectTagEXT( + VkDevice device, + const VkDebugMarkerObjectTagInfoEXT* pTagInfo); + +VKAPI_ATTR VkResult VKAPI_CALL vkDebugMarkerSetObjectNameEXT( + VkDevice device, + const VkDebugMarkerObjectNameInfoEXT* pNameInfo); + +VKAPI_ATTR void VKAPI_CALL vkCmdDebugMarkerBeginEXT( + VkCommandBuffer commandBuffer, + const VkDebugMarkerMarkerInfoEXT* pMarkerInfo); + +VKAPI_ATTR void VKAPI_CALL vkCmdDebugMarkerEndEXT( + VkCommandBuffer commandBuffer); + +VKAPI_ATTR void VKAPI_CALL vkCmdDebugMarkerInsertEXT( + VkCommandBuffer commandBuffer, + const VkDebugMarkerMarkerInfoEXT* pMarkerInfo); +#endif + + +#define VK_AMD_gcn_shader 1 +#define VK_AMD_GCN_SHADER_SPEC_VERSION 1 +#define VK_AMD_GCN_SHADER_EXTENSION_NAME "VK_AMD_gcn_shader" + + +#define VK_NV_dedicated_allocation 1 +#define VK_NV_DEDICATED_ALLOCATION_SPEC_VERSION 1 +#define VK_NV_DEDICATED_ALLOCATION_EXTENSION_NAME "VK_NV_dedicated_allocation" +typedef struct VkDedicatedAllocationImageCreateInfoNV { + VkStructureType sType; + const void* pNext; + VkBool32 dedicatedAllocation; +} VkDedicatedAllocationImageCreateInfoNV; + +typedef struct VkDedicatedAllocationBufferCreateInfoNV { + VkStructureType sType; + const void* pNext; + VkBool32 dedicatedAllocation; +} VkDedicatedAllocationBufferCreateInfoNV; + +typedef struct VkDedicatedAllocationMemoryAllocateInfoNV { + VkStructureType sType; + const void* pNext; + VkImage image; + VkBuffer buffer; +} VkDedicatedAllocationMemoryAllocateInfoNV; + + + +#define VK_EXT_transform_feedback 1 +#define VK_EXT_TRANSFORM_FEEDBACK_SPEC_VERSION 1 +#define VK_EXT_TRANSFORM_FEEDBACK_EXTENSION_NAME "VK_EXT_transform_feedback" +typedef VkFlags VkPipelineRasterizationStateStreamCreateFlagsEXT; +typedef struct VkPhysicalDeviceTransformFeedbackFeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 transformFeedback; + VkBool32 geometryStreams; +} VkPhysicalDeviceTransformFeedbackFeaturesEXT; + +typedef struct VkPhysicalDeviceTransformFeedbackPropertiesEXT { + VkStructureType sType; + void* pNext; + uint32_t maxTransformFeedbackStreams; + uint32_t maxTransformFeedbackBuffers; + VkDeviceSize maxTransformFeedbackBufferSize; + uint32_t maxTransformFeedbackStreamDataSize; + uint32_t maxTransformFeedbackBufferDataSize; + uint32_t maxTransformFeedbackBufferDataStride; + VkBool32 transformFeedbackQueries; + VkBool32 transformFeedbackStreamsLinesTriangles; + VkBool32 transformFeedbackRasterizationStreamSelect; + VkBool32 transformFeedbackDraw; +} VkPhysicalDeviceTransformFeedbackPropertiesEXT; + +typedef struct VkPipelineRasterizationStateStreamCreateInfoEXT { + VkStructureType sType; + const void* pNext; + VkPipelineRasterizationStateStreamCreateFlagsEXT flags; + uint32_t rasterizationStream; +} VkPipelineRasterizationStateStreamCreateInfoEXT; + +typedef void (VKAPI_PTR *PFN_vkCmdBindTransformFeedbackBuffersEXT)(VkCommandBuffer commandBuffer, uint32_t firstBinding, uint32_t bindingCount, const VkBuffer* pBuffers, const VkDeviceSize* pOffsets, const VkDeviceSize* pSizes); +typedef void (VKAPI_PTR *PFN_vkCmdBeginTransformFeedbackEXT)(VkCommandBuffer commandBuffer, uint32_t firstCounterBuffer, uint32_t counterBufferCount, const VkBuffer* pCounterBuffers, const VkDeviceSize* pCounterBufferOffsets); +typedef void (VKAPI_PTR *PFN_vkCmdEndTransformFeedbackEXT)(VkCommandBuffer commandBuffer, uint32_t firstCounterBuffer, uint32_t counterBufferCount, const VkBuffer* pCounterBuffers, const VkDeviceSize* pCounterBufferOffsets); +typedef void (VKAPI_PTR *PFN_vkCmdBeginQueryIndexedEXT)(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t query, VkQueryControlFlags flags, uint32_t index); +typedef void (VKAPI_PTR *PFN_vkCmdEndQueryIndexedEXT)(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t query, uint32_t index); +typedef void (VKAPI_PTR *PFN_vkCmdDrawIndirectByteCountEXT)(VkCommandBuffer commandBuffer, uint32_t instanceCount, uint32_t firstInstance, VkBuffer counterBuffer, VkDeviceSize counterBufferOffset, uint32_t counterOffset, uint32_t vertexStride); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdBindTransformFeedbackBuffersEXT( + VkCommandBuffer commandBuffer, + uint32_t firstBinding, + uint32_t bindingCount, + const VkBuffer* pBuffers, + const VkDeviceSize* pOffsets, + const VkDeviceSize* pSizes); + +VKAPI_ATTR void VKAPI_CALL vkCmdBeginTransformFeedbackEXT( + VkCommandBuffer commandBuffer, + uint32_t firstCounterBuffer, + uint32_t counterBufferCount, + const VkBuffer* pCounterBuffers, + const VkDeviceSize* pCounterBufferOffsets); + +VKAPI_ATTR void VKAPI_CALL vkCmdEndTransformFeedbackEXT( + VkCommandBuffer commandBuffer, + uint32_t firstCounterBuffer, + uint32_t counterBufferCount, + const VkBuffer* pCounterBuffers, + const VkDeviceSize* pCounterBufferOffsets); + +VKAPI_ATTR void VKAPI_CALL vkCmdBeginQueryIndexedEXT( + VkCommandBuffer commandBuffer, + VkQueryPool queryPool, + uint32_t query, + VkQueryControlFlags flags, + uint32_t index); + +VKAPI_ATTR void VKAPI_CALL vkCmdEndQueryIndexedEXT( + VkCommandBuffer commandBuffer, + VkQueryPool queryPool, + uint32_t query, + uint32_t index); + +VKAPI_ATTR void VKAPI_CALL vkCmdDrawIndirectByteCountEXT( + VkCommandBuffer commandBuffer, + uint32_t instanceCount, + uint32_t firstInstance, + VkBuffer counterBuffer, + VkDeviceSize counterBufferOffset, + uint32_t counterOffset, + uint32_t vertexStride); +#endif + + +#define VK_NVX_image_view_handle 1 +#define VK_NVX_IMAGE_VIEW_HANDLE_SPEC_VERSION 2 +#define VK_NVX_IMAGE_VIEW_HANDLE_EXTENSION_NAME "VK_NVX_image_view_handle" +typedef struct VkImageViewHandleInfoNVX { + VkStructureType sType; + const void* pNext; + VkImageView imageView; + VkDescriptorType descriptorType; + VkSampler sampler; +} VkImageViewHandleInfoNVX; + +typedef struct VkImageViewAddressPropertiesNVX { + VkStructureType sType; + void* pNext; + VkDeviceAddress deviceAddress; + VkDeviceSize size; +} VkImageViewAddressPropertiesNVX; + +typedef uint32_t (VKAPI_PTR *PFN_vkGetImageViewHandleNVX)(VkDevice device, const VkImageViewHandleInfoNVX* pInfo); +typedef VkResult (VKAPI_PTR *PFN_vkGetImageViewAddressNVX)(VkDevice device, VkImageView imageView, VkImageViewAddressPropertiesNVX* pProperties); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR uint32_t VKAPI_CALL vkGetImageViewHandleNVX( + VkDevice device, + const VkImageViewHandleInfoNVX* pInfo); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetImageViewAddressNVX( + VkDevice device, + VkImageView imageView, + VkImageViewAddressPropertiesNVX* pProperties); +#endif + + +#define VK_AMD_draw_indirect_count 1 +#define VK_AMD_DRAW_INDIRECT_COUNT_SPEC_VERSION 2 +#define VK_AMD_DRAW_INDIRECT_COUNT_EXTENSION_NAME "VK_AMD_draw_indirect_count" +typedef void (VKAPI_PTR *PFN_vkCmdDrawIndirectCountAMD)(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkBuffer countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount, uint32_t stride); +typedef void (VKAPI_PTR *PFN_vkCmdDrawIndexedIndirectCountAMD)(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkBuffer countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount, uint32_t stride); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdDrawIndirectCountAMD( + VkCommandBuffer commandBuffer, + VkBuffer buffer, + VkDeviceSize offset, + VkBuffer countBuffer, + VkDeviceSize countBufferOffset, + uint32_t maxDrawCount, + uint32_t stride); + +VKAPI_ATTR void VKAPI_CALL vkCmdDrawIndexedIndirectCountAMD( + VkCommandBuffer commandBuffer, + VkBuffer buffer, + VkDeviceSize offset, + VkBuffer countBuffer, + VkDeviceSize countBufferOffset, + uint32_t maxDrawCount, + uint32_t stride); +#endif + + +#define VK_AMD_negative_viewport_height 1 +#define VK_AMD_NEGATIVE_VIEWPORT_HEIGHT_SPEC_VERSION 1 +#define VK_AMD_NEGATIVE_VIEWPORT_HEIGHT_EXTENSION_NAME "VK_AMD_negative_viewport_height" + + +#define VK_AMD_gpu_shader_half_float 1 +#define VK_AMD_GPU_SHADER_HALF_FLOAT_SPEC_VERSION 2 +#define VK_AMD_GPU_SHADER_HALF_FLOAT_EXTENSION_NAME "VK_AMD_gpu_shader_half_float" + + +#define VK_AMD_shader_ballot 1 +#define VK_AMD_SHADER_BALLOT_SPEC_VERSION 1 +#define VK_AMD_SHADER_BALLOT_EXTENSION_NAME "VK_AMD_shader_ballot" + + +#define VK_AMD_texture_gather_bias_lod 1 +#define VK_AMD_TEXTURE_GATHER_BIAS_LOD_SPEC_VERSION 1 +#define VK_AMD_TEXTURE_GATHER_BIAS_LOD_EXTENSION_NAME "VK_AMD_texture_gather_bias_lod" +typedef struct VkTextureLODGatherFormatPropertiesAMD { + VkStructureType sType; + void* pNext; + VkBool32 supportsTextureGatherLODBiasAMD; +} VkTextureLODGatherFormatPropertiesAMD; + + + +#define VK_AMD_shader_info 1 +#define VK_AMD_SHADER_INFO_SPEC_VERSION 1 +#define VK_AMD_SHADER_INFO_EXTENSION_NAME "VK_AMD_shader_info" + +typedef enum VkShaderInfoTypeAMD { + VK_SHADER_INFO_TYPE_STATISTICS_AMD = 0, + VK_SHADER_INFO_TYPE_BINARY_AMD = 1, + VK_SHADER_INFO_TYPE_DISASSEMBLY_AMD = 2, + VK_SHADER_INFO_TYPE_MAX_ENUM_AMD = 0x7FFFFFFF +} VkShaderInfoTypeAMD; +typedef struct VkShaderResourceUsageAMD { + uint32_t numUsedVgprs; + uint32_t numUsedSgprs; + uint32_t ldsSizePerLocalWorkGroup; + size_t ldsUsageSizeInBytes; + size_t scratchMemUsageInBytes; +} VkShaderResourceUsageAMD; + +typedef struct VkShaderStatisticsInfoAMD { + VkShaderStageFlags shaderStageMask; + VkShaderResourceUsageAMD resourceUsage; + uint32_t numPhysicalVgprs; + uint32_t numPhysicalSgprs; + uint32_t numAvailableVgprs; + uint32_t numAvailableSgprs; + uint32_t computeWorkGroupSize[3]; +} VkShaderStatisticsInfoAMD; + +typedef VkResult (VKAPI_PTR *PFN_vkGetShaderInfoAMD)(VkDevice device, VkPipeline pipeline, VkShaderStageFlagBits shaderStage, VkShaderInfoTypeAMD infoType, size_t* pInfoSize, void* pInfo); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetShaderInfoAMD( + VkDevice device, + VkPipeline pipeline, + VkShaderStageFlagBits shaderStage, + VkShaderInfoTypeAMD infoType, + size_t* pInfoSize, + void* pInfo); +#endif + + +#define VK_AMD_shader_image_load_store_lod 1 +#define VK_AMD_SHADER_IMAGE_LOAD_STORE_LOD_SPEC_VERSION 1 +#define VK_AMD_SHADER_IMAGE_LOAD_STORE_LOD_EXTENSION_NAME "VK_AMD_shader_image_load_store_lod" + + +#define VK_NV_corner_sampled_image 1 +#define VK_NV_CORNER_SAMPLED_IMAGE_SPEC_VERSION 2 +#define VK_NV_CORNER_SAMPLED_IMAGE_EXTENSION_NAME "VK_NV_corner_sampled_image" +typedef struct VkPhysicalDeviceCornerSampledImageFeaturesNV { + VkStructureType sType; + void* pNext; + VkBool32 cornerSampledImage; +} VkPhysicalDeviceCornerSampledImageFeaturesNV; + + + +#define VK_IMG_format_pvrtc 1 +#define VK_IMG_FORMAT_PVRTC_SPEC_VERSION 1 +#define VK_IMG_FORMAT_PVRTC_EXTENSION_NAME "VK_IMG_format_pvrtc" + + +#define VK_NV_external_memory_capabilities 1 +#define VK_NV_EXTERNAL_MEMORY_CAPABILITIES_SPEC_VERSION 1 +#define VK_NV_EXTERNAL_MEMORY_CAPABILITIES_EXTENSION_NAME "VK_NV_external_memory_capabilities" + +typedef enum VkExternalMemoryHandleTypeFlagBitsNV { + VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_BIT_NV = 0x00000001, + VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT_NV = 0x00000002, + VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_IMAGE_BIT_NV = 0x00000004, + VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_IMAGE_KMT_BIT_NV = 0x00000008, + VK_EXTERNAL_MEMORY_HANDLE_TYPE_FLAG_BITS_MAX_ENUM_NV = 0x7FFFFFFF +} VkExternalMemoryHandleTypeFlagBitsNV; +typedef VkFlags VkExternalMemoryHandleTypeFlagsNV; + +typedef enum VkExternalMemoryFeatureFlagBitsNV { + VK_EXTERNAL_MEMORY_FEATURE_DEDICATED_ONLY_BIT_NV = 0x00000001, + VK_EXTERNAL_MEMORY_FEATURE_EXPORTABLE_BIT_NV = 0x00000002, + VK_EXTERNAL_MEMORY_FEATURE_IMPORTABLE_BIT_NV = 0x00000004, + VK_EXTERNAL_MEMORY_FEATURE_FLAG_BITS_MAX_ENUM_NV = 0x7FFFFFFF +} VkExternalMemoryFeatureFlagBitsNV; +typedef VkFlags VkExternalMemoryFeatureFlagsNV; +typedef struct VkExternalImageFormatPropertiesNV { + VkImageFormatProperties imageFormatProperties; + VkExternalMemoryFeatureFlagsNV externalMemoryFeatures; + VkExternalMemoryHandleTypeFlagsNV exportFromImportedHandleTypes; + VkExternalMemoryHandleTypeFlagsNV compatibleHandleTypes; +} VkExternalImageFormatPropertiesNV; + +typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceExternalImageFormatPropertiesNV)(VkPhysicalDevice physicalDevice, VkFormat format, VkImageType type, VkImageTiling tiling, VkImageUsageFlags usage, VkImageCreateFlags flags, VkExternalMemoryHandleTypeFlagsNV externalHandleType, VkExternalImageFormatPropertiesNV* pExternalImageFormatProperties); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceExternalImageFormatPropertiesNV( + VkPhysicalDevice physicalDevice, + VkFormat format, + VkImageType type, + VkImageTiling tiling, + VkImageUsageFlags usage, + VkImageCreateFlags flags, + VkExternalMemoryHandleTypeFlagsNV externalHandleType, + VkExternalImageFormatPropertiesNV* pExternalImageFormatProperties); +#endif + + +#define VK_NV_external_memory 1 +#define VK_NV_EXTERNAL_MEMORY_SPEC_VERSION 1 +#define VK_NV_EXTERNAL_MEMORY_EXTENSION_NAME "VK_NV_external_memory" +typedef struct VkExternalMemoryImageCreateInfoNV { + VkStructureType sType; + const void* pNext; + VkExternalMemoryHandleTypeFlagsNV handleTypes; +} VkExternalMemoryImageCreateInfoNV; + +typedef struct VkExportMemoryAllocateInfoNV { + VkStructureType sType; + const void* pNext; + VkExternalMemoryHandleTypeFlagsNV handleTypes; +} VkExportMemoryAllocateInfoNV; + + + +#define VK_EXT_validation_flags 1 +#define VK_EXT_VALIDATION_FLAGS_SPEC_VERSION 2 +#define VK_EXT_VALIDATION_FLAGS_EXTENSION_NAME "VK_EXT_validation_flags" + +typedef enum VkValidationCheckEXT { + VK_VALIDATION_CHECK_ALL_EXT = 0, + VK_VALIDATION_CHECK_SHADERS_EXT = 1, + VK_VALIDATION_CHECK_MAX_ENUM_EXT = 0x7FFFFFFF +} VkValidationCheckEXT; +typedef struct VkValidationFlagsEXT { + VkStructureType sType; + const void* pNext; + uint32_t disabledValidationCheckCount; + const VkValidationCheckEXT* pDisabledValidationChecks; +} VkValidationFlagsEXT; + + + +#define VK_EXT_shader_subgroup_ballot 1 +#define VK_EXT_SHADER_SUBGROUP_BALLOT_SPEC_VERSION 1 +#define VK_EXT_SHADER_SUBGROUP_BALLOT_EXTENSION_NAME "VK_EXT_shader_subgroup_ballot" + + +#define VK_EXT_shader_subgroup_vote 1 +#define VK_EXT_SHADER_SUBGROUP_VOTE_SPEC_VERSION 1 +#define VK_EXT_SHADER_SUBGROUP_VOTE_EXTENSION_NAME "VK_EXT_shader_subgroup_vote" + + +#define VK_EXT_texture_compression_astc_hdr 1 +#define VK_EXT_TEXTURE_COMPRESSION_ASTC_HDR_SPEC_VERSION 1 +#define VK_EXT_TEXTURE_COMPRESSION_ASTC_HDR_EXTENSION_NAME "VK_EXT_texture_compression_astc_hdr" +typedef struct VkPhysicalDeviceTextureCompressionASTCHDRFeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 textureCompressionASTC_HDR; +} VkPhysicalDeviceTextureCompressionASTCHDRFeaturesEXT; + + + +#define VK_EXT_astc_decode_mode 1 +#define VK_EXT_ASTC_DECODE_MODE_SPEC_VERSION 1 +#define VK_EXT_ASTC_DECODE_MODE_EXTENSION_NAME "VK_EXT_astc_decode_mode" +typedef struct VkImageViewASTCDecodeModeEXT { + VkStructureType sType; + const void* pNext; + VkFormat decodeMode; +} VkImageViewASTCDecodeModeEXT; + +typedef struct VkPhysicalDeviceASTCDecodeFeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 decodeModeSharedExponent; +} VkPhysicalDeviceASTCDecodeFeaturesEXT; + + + +#define VK_EXT_conditional_rendering 1 +#define VK_EXT_CONDITIONAL_RENDERING_SPEC_VERSION 2 +#define VK_EXT_CONDITIONAL_RENDERING_EXTENSION_NAME "VK_EXT_conditional_rendering" + +typedef enum VkConditionalRenderingFlagBitsEXT { + VK_CONDITIONAL_RENDERING_INVERTED_BIT_EXT = 0x00000001, + VK_CONDITIONAL_RENDERING_FLAG_BITS_MAX_ENUM_EXT = 0x7FFFFFFF +} VkConditionalRenderingFlagBitsEXT; +typedef VkFlags VkConditionalRenderingFlagsEXT; +typedef struct VkConditionalRenderingBeginInfoEXT { + VkStructureType sType; + const void* pNext; + VkBuffer buffer; + VkDeviceSize offset; + VkConditionalRenderingFlagsEXT flags; +} VkConditionalRenderingBeginInfoEXT; + +typedef struct VkPhysicalDeviceConditionalRenderingFeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 conditionalRendering; + VkBool32 inheritedConditionalRendering; +} VkPhysicalDeviceConditionalRenderingFeaturesEXT; + +typedef struct VkCommandBufferInheritanceConditionalRenderingInfoEXT { + VkStructureType sType; + const void* pNext; + VkBool32 conditionalRenderingEnable; +} VkCommandBufferInheritanceConditionalRenderingInfoEXT; + +typedef void (VKAPI_PTR *PFN_vkCmdBeginConditionalRenderingEXT)(VkCommandBuffer commandBuffer, const VkConditionalRenderingBeginInfoEXT* pConditionalRenderingBegin); +typedef void (VKAPI_PTR *PFN_vkCmdEndConditionalRenderingEXT)(VkCommandBuffer commandBuffer); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdBeginConditionalRenderingEXT( + VkCommandBuffer commandBuffer, + const VkConditionalRenderingBeginInfoEXT* pConditionalRenderingBegin); + +VKAPI_ATTR void VKAPI_CALL vkCmdEndConditionalRenderingEXT( + VkCommandBuffer commandBuffer); +#endif + + +#define VK_NV_clip_space_w_scaling 1 +#define VK_NV_CLIP_SPACE_W_SCALING_SPEC_VERSION 1 +#define VK_NV_CLIP_SPACE_W_SCALING_EXTENSION_NAME "VK_NV_clip_space_w_scaling" +typedef struct VkViewportWScalingNV { + float xcoeff; + float ycoeff; +} VkViewportWScalingNV; + +typedef struct VkPipelineViewportWScalingStateCreateInfoNV { + VkStructureType sType; + const void* pNext; + VkBool32 viewportWScalingEnable; + uint32_t viewportCount; + const VkViewportWScalingNV* pViewportWScalings; +} VkPipelineViewportWScalingStateCreateInfoNV; + +typedef void (VKAPI_PTR *PFN_vkCmdSetViewportWScalingNV)(VkCommandBuffer commandBuffer, uint32_t firstViewport, uint32_t viewportCount, const VkViewportWScalingNV* pViewportWScalings); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdSetViewportWScalingNV( + VkCommandBuffer commandBuffer, + uint32_t firstViewport, + uint32_t viewportCount, + const VkViewportWScalingNV* pViewportWScalings); +#endif + + +#define VK_EXT_direct_mode_display 1 +#define VK_EXT_DIRECT_MODE_DISPLAY_SPEC_VERSION 1 +#define VK_EXT_DIRECT_MODE_DISPLAY_EXTENSION_NAME "VK_EXT_direct_mode_display" +typedef VkResult (VKAPI_PTR *PFN_vkReleaseDisplayEXT)(VkPhysicalDevice physicalDevice, VkDisplayKHR display); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkReleaseDisplayEXT( + VkPhysicalDevice physicalDevice, + VkDisplayKHR display); +#endif + + +#define VK_EXT_display_surface_counter 1 +#define VK_EXT_DISPLAY_SURFACE_COUNTER_SPEC_VERSION 1 +#define VK_EXT_DISPLAY_SURFACE_COUNTER_EXTENSION_NAME "VK_EXT_display_surface_counter" + +typedef enum VkSurfaceCounterFlagBitsEXT { + VK_SURFACE_COUNTER_VBLANK_EXT = 0x00000001, + VK_SURFACE_COUNTER_FLAG_BITS_MAX_ENUM_EXT = 0x7FFFFFFF +} VkSurfaceCounterFlagBitsEXT; +typedef VkFlags VkSurfaceCounterFlagsEXT; +typedef struct VkSurfaceCapabilities2EXT { + VkStructureType sType; + void* pNext; + uint32_t minImageCount; + uint32_t maxImageCount; + VkExtent2D currentExtent; + VkExtent2D minImageExtent; + VkExtent2D maxImageExtent; + uint32_t maxImageArrayLayers; + VkSurfaceTransformFlagsKHR supportedTransforms; + VkSurfaceTransformFlagBitsKHR currentTransform; + VkCompositeAlphaFlagsKHR supportedCompositeAlpha; + VkImageUsageFlags supportedUsageFlags; + VkSurfaceCounterFlagsEXT supportedSurfaceCounters; +} VkSurfaceCapabilities2EXT; + +typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceSurfaceCapabilities2EXT)(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface, VkSurfaceCapabilities2EXT* pSurfaceCapabilities); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceSurfaceCapabilities2EXT( + VkPhysicalDevice physicalDevice, + VkSurfaceKHR surface, + VkSurfaceCapabilities2EXT* pSurfaceCapabilities); +#endif + + +#define VK_EXT_display_control 1 +#define VK_EXT_DISPLAY_CONTROL_SPEC_VERSION 1 +#define VK_EXT_DISPLAY_CONTROL_EXTENSION_NAME "VK_EXT_display_control" + +typedef enum VkDisplayPowerStateEXT { + VK_DISPLAY_POWER_STATE_OFF_EXT = 0, + VK_DISPLAY_POWER_STATE_SUSPEND_EXT = 1, + VK_DISPLAY_POWER_STATE_ON_EXT = 2, + VK_DISPLAY_POWER_STATE_MAX_ENUM_EXT = 0x7FFFFFFF +} VkDisplayPowerStateEXT; + +typedef enum VkDeviceEventTypeEXT { + VK_DEVICE_EVENT_TYPE_DISPLAY_HOTPLUG_EXT = 0, + VK_DEVICE_EVENT_TYPE_MAX_ENUM_EXT = 0x7FFFFFFF +} VkDeviceEventTypeEXT; + +typedef enum VkDisplayEventTypeEXT { + VK_DISPLAY_EVENT_TYPE_FIRST_PIXEL_OUT_EXT = 0, + VK_DISPLAY_EVENT_TYPE_MAX_ENUM_EXT = 0x7FFFFFFF +} VkDisplayEventTypeEXT; +typedef struct VkDisplayPowerInfoEXT { + VkStructureType sType; + const void* pNext; + VkDisplayPowerStateEXT powerState; +} VkDisplayPowerInfoEXT; + +typedef struct VkDeviceEventInfoEXT { + VkStructureType sType; + const void* pNext; + VkDeviceEventTypeEXT deviceEvent; +} VkDeviceEventInfoEXT; + +typedef struct VkDisplayEventInfoEXT { + VkStructureType sType; + const void* pNext; + VkDisplayEventTypeEXT displayEvent; +} VkDisplayEventInfoEXT; + +typedef struct VkSwapchainCounterCreateInfoEXT { + VkStructureType sType; + const void* pNext; + VkSurfaceCounterFlagsEXT surfaceCounters; +} VkSwapchainCounterCreateInfoEXT; + +typedef VkResult (VKAPI_PTR *PFN_vkDisplayPowerControlEXT)(VkDevice device, VkDisplayKHR display, const VkDisplayPowerInfoEXT* pDisplayPowerInfo); +typedef VkResult (VKAPI_PTR *PFN_vkRegisterDeviceEventEXT)(VkDevice device, const VkDeviceEventInfoEXT* pDeviceEventInfo, const VkAllocationCallbacks* pAllocator, VkFence* pFence); +typedef VkResult (VKAPI_PTR *PFN_vkRegisterDisplayEventEXT)(VkDevice device, VkDisplayKHR display, const VkDisplayEventInfoEXT* pDisplayEventInfo, const VkAllocationCallbacks* pAllocator, VkFence* pFence); +typedef VkResult (VKAPI_PTR *PFN_vkGetSwapchainCounterEXT)(VkDevice device, VkSwapchainKHR swapchain, VkSurfaceCounterFlagBitsEXT counter, uint64_t* pCounterValue); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkDisplayPowerControlEXT( + VkDevice device, + VkDisplayKHR display, + const VkDisplayPowerInfoEXT* pDisplayPowerInfo); + +VKAPI_ATTR VkResult VKAPI_CALL vkRegisterDeviceEventEXT( + VkDevice device, + const VkDeviceEventInfoEXT* pDeviceEventInfo, + const VkAllocationCallbacks* pAllocator, + VkFence* pFence); + +VKAPI_ATTR VkResult VKAPI_CALL vkRegisterDisplayEventEXT( + VkDevice device, + VkDisplayKHR display, + const VkDisplayEventInfoEXT* pDisplayEventInfo, + const VkAllocationCallbacks* pAllocator, + VkFence* pFence); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetSwapchainCounterEXT( + VkDevice device, + VkSwapchainKHR swapchain, + VkSurfaceCounterFlagBitsEXT counter, + uint64_t* pCounterValue); +#endif + + +#define VK_GOOGLE_display_timing 1 +#define VK_GOOGLE_DISPLAY_TIMING_SPEC_VERSION 1 +#define VK_GOOGLE_DISPLAY_TIMING_EXTENSION_NAME "VK_GOOGLE_display_timing" +typedef struct VkRefreshCycleDurationGOOGLE { + uint64_t refreshDuration; +} VkRefreshCycleDurationGOOGLE; + +typedef struct VkPastPresentationTimingGOOGLE { + uint32_t presentID; + uint64_t desiredPresentTime; + uint64_t actualPresentTime; + uint64_t earliestPresentTime; + uint64_t presentMargin; +} VkPastPresentationTimingGOOGLE; + +typedef struct VkPresentTimeGOOGLE { + uint32_t presentID; + uint64_t desiredPresentTime; +} VkPresentTimeGOOGLE; + +typedef struct VkPresentTimesInfoGOOGLE { + VkStructureType sType; + const void* pNext; + uint32_t swapchainCount; + const VkPresentTimeGOOGLE* pTimes; +} VkPresentTimesInfoGOOGLE; + +typedef VkResult (VKAPI_PTR *PFN_vkGetRefreshCycleDurationGOOGLE)(VkDevice device, VkSwapchainKHR swapchain, VkRefreshCycleDurationGOOGLE* pDisplayTimingProperties); +typedef VkResult (VKAPI_PTR *PFN_vkGetPastPresentationTimingGOOGLE)(VkDevice device, VkSwapchainKHR swapchain, uint32_t* pPresentationTimingCount, VkPastPresentationTimingGOOGLE* pPresentationTimings); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetRefreshCycleDurationGOOGLE( + VkDevice device, + VkSwapchainKHR swapchain, + VkRefreshCycleDurationGOOGLE* pDisplayTimingProperties); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetPastPresentationTimingGOOGLE( + VkDevice device, + VkSwapchainKHR swapchain, + uint32_t* pPresentationTimingCount, + VkPastPresentationTimingGOOGLE* pPresentationTimings); +#endif + + +#define VK_NV_sample_mask_override_coverage 1 +#define VK_NV_SAMPLE_MASK_OVERRIDE_COVERAGE_SPEC_VERSION 1 +#define VK_NV_SAMPLE_MASK_OVERRIDE_COVERAGE_EXTENSION_NAME "VK_NV_sample_mask_override_coverage" + + +#define VK_NV_geometry_shader_passthrough 1 +#define VK_NV_GEOMETRY_SHADER_PASSTHROUGH_SPEC_VERSION 1 +#define VK_NV_GEOMETRY_SHADER_PASSTHROUGH_EXTENSION_NAME "VK_NV_geometry_shader_passthrough" + + +#define VK_NV_viewport_array2 1 +#define VK_NV_VIEWPORT_ARRAY2_SPEC_VERSION 1 +#define VK_NV_VIEWPORT_ARRAY2_EXTENSION_NAME "VK_NV_viewport_array2" + + +#define VK_NVX_multiview_per_view_attributes 1 +#define VK_NVX_MULTIVIEW_PER_VIEW_ATTRIBUTES_SPEC_VERSION 1 +#define VK_NVX_MULTIVIEW_PER_VIEW_ATTRIBUTES_EXTENSION_NAME "VK_NVX_multiview_per_view_attributes" +typedef struct VkPhysicalDeviceMultiviewPerViewAttributesPropertiesNVX { + VkStructureType sType; + void* pNext; + VkBool32 perViewPositionAllComponents; +} VkPhysicalDeviceMultiviewPerViewAttributesPropertiesNVX; + + + +#define VK_NV_viewport_swizzle 1 +#define VK_NV_VIEWPORT_SWIZZLE_SPEC_VERSION 1 +#define VK_NV_VIEWPORT_SWIZZLE_EXTENSION_NAME "VK_NV_viewport_swizzle" + +typedef enum VkViewportCoordinateSwizzleNV { + VK_VIEWPORT_COORDINATE_SWIZZLE_POSITIVE_X_NV = 0, + VK_VIEWPORT_COORDINATE_SWIZZLE_NEGATIVE_X_NV = 1, + VK_VIEWPORT_COORDINATE_SWIZZLE_POSITIVE_Y_NV = 2, + VK_VIEWPORT_COORDINATE_SWIZZLE_NEGATIVE_Y_NV = 3, + VK_VIEWPORT_COORDINATE_SWIZZLE_POSITIVE_Z_NV = 4, + VK_VIEWPORT_COORDINATE_SWIZZLE_NEGATIVE_Z_NV = 5, + VK_VIEWPORT_COORDINATE_SWIZZLE_POSITIVE_W_NV = 6, + VK_VIEWPORT_COORDINATE_SWIZZLE_NEGATIVE_W_NV = 7, + VK_VIEWPORT_COORDINATE_SWIZZLE_MAX_ENUM_NV = 0x7FFFFFFF +} VkViewportCoordinateSwizzleNV; +typedef VkFlags VkPipelineViewportSwizzleStateCreateFlagsNV; +typedef struct VkViewportSwizzleNV { + VkViewportCoordinateSwizzleNV x; + VkViewportCoordinateSwizzleNV y; + VkViewportCoordinateSwizzleNV z; + VkViewportCoordinateSwizzleNV w; +} VkViewportSwizzleNV; + +typedef struct VkPipelineViewportSwizzleStateCreateInfoNV { + VkStructureType sType; + const void* pNext; + VkPipelineViewportSwizzleStateCreateFlagsNV flags; + uint32_t viewportCount; + const VkViewportSwizzleNV* pViewportSwizzles; +} VkPipelineViewportSwizzleStateCreateInfoNV; + + + +#define VK_EXT_discard_rectangles 1 +#define VK_EXT_DISCARD_RECTANGLES_SPEC_VERSION 1 +#define VK_EXT_DISCARD_RECTANGLES_EXTENSION_NAME "VK_EXT_discard_rectangles" + +typedef enum VkDiscardRectangleModeEXT { + VK_DISCARD_RECTANGLE_MODE_INCLUSIVE_EXT = 0, + VK_DISCARD_RECTANGLE_MODE_EXCLUSIVE_EXT = 1, + VK_DISCARD_RECTANGLE_MODE_MAX_ENUM_EXT = 0x7FFFFFFF +} VkDiscardRectangleModeEXT; +typedef VkFlags VkPipelineDiscardRectangleStateCreateFlagsEXT; +typedef struct VkPhysicalDeviceDiscardRectanglePropertiesEXT { + VkStructureType sType; + void* pNext; + uint32_t maxDiscardRectangles; +} VkPhysicalDeviceDiscardRectanglePropertiesEXT; + +typedef struct VkPipelineDiscardRectangleStateCreateInfoEXT { + VkStructureType sType; + const void* pNext; + VkPipelineDiscardRectangleStateCreateFlagsEXT flags; + VkDiscardRectangleModeEXT discardRectangleMode; + uint32_t discardRectangleCount; + const VkRect2D* pDiscardRectangles; +} VkPipelineDiscardRectangleStateCreateInfoEXT; + +typedef void (VKAPI_PTR *PFN_vkCmdSetDiscardRectangleEXT)(VkCommandBuffer commandBuffer, uint32_t firstDiscardRectangle, uint32_t discardRectangleCount, const VkRect2D* pDiscardRectangles); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdSetDiscardRectangleEXT( + VkCommandBuffer commandBuffer, + uint32_t firstDiscardRectangle, + uint32_t discardRectangleCount, + const VkRect2D* pDiscardRectangles); +#endif + + +#define VK_EXT_conservative_rasterization 1 +#define VK_EXT_CONSERVATIVE_RASTERIZATION_SPEC_VERSION 1 +#define VK_EXT_CONSERVATIVE_RASTERIZATION_EXTENSION_NAME "VK_EXT_conservative_rasterization" + +typedef enum VkConservativeRasterizationModeEXT { + VK_CONSERVATIVE_RASTERIZATION_MODE_DISABLED_EXT = 0, + VK_CONSERVATIVE_RASTERIZATION_MODE_OVERESTIMATE_EXT = 1, + VK_CONSERVATIVE_RASTERIZATION_MODE_UNDERESTIMATE_EXT = 2, + VK_CONSERVATIVE_RASTERIZATION_MODE_MAX_ENUM_EXT = 0x7FFFFFFF +} VkConservativeRasterizationModeEXT; +typedef VkFlags VkPipelineRasterizationConservativeStateCreateFlagsEXT; +typedef struct VkPhysicalDeviceConservativeRasterizationPropertiesEXT { + VkStructureType sType; + void* pNext; + float primitiveOverestimationSize; + float maxExtraPrimitiveOverestimationSize; + float extraPrimitiveOverestimationSizeGranularity; + VkBool32 primitiveUnderestimation; + VkBool32 conservativePointAndLineRasterization; + VkBool32 degenerateTrianglesRasterized; + VkBool32 degenerateLinesRasterized; + VkBool32 fullyCoveredFragmentShaderInputVariable; + VkBool32 conservativeRasterizationPostDepthCoverage; +} VkPhysicalDeviceConservativeRasterizationPropertiesEXT; + +typedef struct VkPipelineRasterizationConservativeStateCreateInfoEXT { + VkStructureType sType; + const void* pNext; + VkPipelineRasterizationConservativeStateCreateFlagsEXT flags; + VkConservativeRasterizationModeEXT conservativeRasterizationMode; + float extraPrimitiveOverestimationSize; +} VkPipelineRasterizationConservativeStateCreateInfoEXT; + + + +#define VK_EXT_depth_clip_enable 1 +#define VK_EXT_DEPTH_CLIP_ENABLE_SPEC_VERSION 1 +#define VK_EXT_DEPTH_CLIP_ENABLE_EXTENSION_NAME "VK_EXT_depth_clip_enable" +typedef VkFlags VkPipelineRasterizationDepthClipStateCreateFlagsEXT; +typedef struct VkPhysicalDeviceDepthClipEnableFeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 depthClipEnable; +} VkPhysicalDeviceDepthClipEnableFeaturesEXT; + +typedef struct VkPipelineRasterizationDepthClipStateCreateInfoEXT { + VkStructureType sType; + const void* pNext; + VkPipelineRasterizationDepthClipStateCreateFlagsEXT flags; + VkBool32 depthClipEnable; +} VkPipelineRasterizationDepthClipStateCreateInfoEXT; + + + +#define VK_EXT_swapchain_colorspace 1 +#define VK_EXT_SWAPCHAIN_COLOR_SPACE_SPEC_VERSION 4 +#define VK_EXT_SWAPCHAIN_COLOR_SPACE_EXTENSION_NAME "VK_EXT_swapchain_colorspace" + + +#define VK_EXT_hdr_metadata 1 +#define VK_EXT_HDR_METADATA_SPEC_VERSION 2 +#define VK_EXT_HDR_METADATA_EXTENSION_NAME "VK_EXT_hdr_metadata" +typedef struct VkXYColorEXT { + float x; + float y; +} VkXYColorEXT; + +typedef struct VkHdrMetadataEXT { + VkStructureType sType; + const void* pNext; + VkXYColorEXT displayPrimaryRed; + VkXYColorEXT displayPrimaryGreen; + VkXYColorEXT displayPrimaryBlue; + VkXYColorEXT whitePoint; + float maxLuminance; + float minLuminance; + float maxContentLightLevel; + float maxFrameAverageLightLevel; +} VkHdrMetadataEXT; + +typedef void (VKAPI_PTR *PFN_vkSetHdrMetadataEXT)(VkDevice device, uint32_t swapchainCount, const VkSwapchainKHR* pSwapchains, const VkHdrMetadataEXT* pMetadata); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkSetHdrMetadataEXT( + VkDevice device, + uint32_t swapchainCount, + const VkSwapchainKHR* pSwapchains, + const VkHdrMetadataEXT* pMetadata); +#endif + + +#define VK_EXT_external_memory_dma_buf 1 +#define VK_EXT_EXTERNAL_MEMORY_DMA_BUF_SPEC_VERSION 1 +#define VK_EXT_EXTERNAL_MEMORY_DMA_BUF_EXTENSION_NAME "VK_EXT_external_memory_dma_buf" + + +#define VK_EXT_queue_family_foreign 1 +#define VK_EXT_QUEUE_FAMILY_FOREIGN_SPEC_VERSION 1 +#define VK_EXT_QUEUE_FAMILY_FOREIGN_EXTENSION_NAME "VK_EXT_queue_family_foreign" +#define VK_QUEUE_FAMILY_FOREIGN_EXT (~0U-2) + + +#define VK_EXT_debug_utils 1 +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkDebugUtilsMessengerEXT) +#define VK_EXT_DEBUG_UTILS_SPEC_VERSION 2 +#define VK_EXT_DEBUG_UTILS_EXTENSION_NAME "VK_EXT_debug_utils" +typedef VkFlags VkDebugUtilsMessengerCallbackDataFlagsEXT; + +typedef enum VkDebugUtilsMessageSeverityFlagBitsEXT { + VK_DEBUG_UTILS_MESSAGE_SEVERITY_VERBOSE_BIT_EXT = 0x00000001, + VK_DEBUG_UTILS_MESSAGE_SEVERITY_INFO_BIT_EXT = 0x00000010, + VK_DEBUG_UTILS_MESSAGE_SEVERITY_WARNING_BIT_EXT = 0x00000100, + VK_DEBUG_UTILS_MESSAGE_SEVERITY_ERROR_BIT_EXT = 0x00001000, + VK_DEBUG_UTILS_MESSAGE_SEVERITY_FLAG_BITS_MAX_ENUM_EXT = 0x7FFFFFFF +} VkDebugUtilsMessageSeverityFlagBitsEXT; + +typedef enum VkDebugUtilsMessageTypeFlagBitsEXT { + VK_DEBUG_UTILS_MESSAGE_TYPE_GENERAL_BIT_EXT = 0x00000001, + VK_DEBUG_UTILS_MESSAGE_TYPE_VALIDATION_BIT_EXT = 0x00000002, + VK_DEBUG_UTILS_MESSAGE_TYPE_PERFORMANCE_BIT_EXT = 0x00000004, + VK_DEBUG_UTILS_MESSAGE_TYPE_FLAG_BITS_MAX_ENUM_EXT = 0x7FFFFFFF +} VkDebugUtilsMessageTypeFlagBitsEXT; +typedef VkFlags VkDebugUtilsMessageTypeFlagsEXT; +typedef VkFlags VkDebugUtilsMessengerCreateFlagsEXT; +typedef VkFlags VkDebugUtilsMessageSeverityFlagsEXT; +typedef struct VkDebugUtilsLabelEXT { + VkStructureType sType; + const void* pNext; + const char* pLabelName; + float color[4]; +} VkDebugUtilsLabelEXT; + +typedef struct VkDebugUtilsObjectNameInfoEXT { + VkStructureType sType; + const void* pNext; + VkObjectType objectType; + uint64_t objectHandle; + const char* pObjectName; +} VkDebugUtilsObjectNameInfoEXT; + +typedef struct VkDebugUtilsMessengerCallbackDataEXT { + VkStructureType sType; + const void* pNext; + VkDebugUtilsMessengerCallbackDataFlagsEXT flags; + const char* pMessageIdName; + int32_t messageIdNumber; + const char* pMessage; + uint32_t queueLabelCount; + const VkDebugUtilsLabelEXT* pQueueLabels; + uint32_t cmdBufLabelCount; + const VkDebugUtilsLabelEXT* pCmdBufLabels; + uint32_t objectCount; + const VkDebugUtilsObjectNameInfoEXT* pObjects; +} VkDebugUtilsMessengerCallbackDataEXT; + +typedef VkBool32 (VKAPI_PTR *PFN_vkDebugUtilsMessengerCallbackEXT)( + VkDebugUtilsMessageSeverityFlagBitsEXT messageSeverity, + VkDebugUtilsMessageTypeFlagsEXT messageTypes, + const VkDebugUtilsMessengerCallbackDataEXT* pCallbackData, + void* pUserData); + +typedef struct VkDebugUtilsObjectTagInfoEXT { + VkStructureType sType; + const void* pNext; + VkObjectType objectType; + uint64_t objectHandle; + uint64_t tagName; + size_t tagSize; + const void* pTag; +} VkDebugUtilsObjectTagInfoEXT; + +typedef struct VkDebugUtilsMessengerCreateInfoEXT { + VkStructureType sType; + const void* pNext; + VkDebugUtilsMessengerCreateFlagsEXT flags; + VkDebugUtilsMessageSeverityFlagsEXT messageSeverity; + VkDebugUtilsMessageTypeFlagsEXT messageType; + PFN_vkDebugUtilsMessengerCallbackEXT pfnUserCallback; + void* pUserData; +} VkDebugUtilsMessengerCreateInfoEXT; + +typedef VkResult (VKAPI_PTR *PFN_vkSetDebugUtilsObjectNameEXT)(VkDevice device, const VkDebugUtilsObjectNameInfoEXT* pNameInfo); +typedef VkResult (VKAPI_PTR *PFN_vkSetDebugUtilsObjectTagEXT)(VkDevice device, const VkDebugUtilsObjectTagInfoEXT* pTagInfo); +typedef void (VKAPI_PTR *PFN_vkQueueBeginDebugUtilsLabelEXT)(VkQueue queue, const VkDebugUtilsLabelEXT* pLabelInfo); +typedef void (VKAPI_PTR *PFN_vkQueueEndDebugUtilsLabelEXT)(VkQueue queue); +typedef void (VKAPI_PTR *PFN_vkQueueInsertDebugUtilsLabelEXT)(VkQueue queue, const VkDebugUtilsLabelEXT* pLabelInfo); +typedef void (VKAPI_PTR *PFN_vkCmdBeginDebugUtilsLabelEXT)(VkCommandBuffer commandBuffer, const VkDebugUtilsLabelEXT* pLabelInfo); +typedef void (VKAPI_PTR *PFN_vkCmdEndDebugUtilsLabelEXT)(VkCommandBuffer commandBuffer); +typedef void (VKAPI_PTR *PFN_vkCmdInsertDebugUtilsLabelEXT)(VkCommandBuffer commandBuffer, const VkDebugUtilsLabelEXT* pLabelInfo); +typedef VkResult (VKAPI_PTR *PFN_vkCreateDebugUtilsMessengerEXT)(VkInstance instance, const VkDebugUtilsMessengerCreateInfoEXT* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkDebugUtilsMessengerEXT* pMessenger); +typedef void (VKAPI_PTR *PFN_vkDestroyDebugUtilsMessengerEXT)(VkInstance instance, VkDebugUtilsMessengerEXT messenger, const VkAllocationCallbacks* pAllocator); +typedef void (VKAPI_PTR *PFN_vkSubmitDebugUtilsMessageEXT)(VkInstance instance, VkDebugUtilsMessageSeverityFlagBitsEXT messageSeverity, VkDebugUtilsMessageTypeFlagsEXT messageTypes, const VkDebugUtilsMessengerCallbackDataEXT* pCallbackData); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkSetDebugUtilsObjectNameEXT( + VkDevice device, + const VkDebugUtilsObjectNameInfoEXT* pNameInfo); + +VKAPI_ATTR VkResult VKAPI_CALL vkSetDebugUtilsObjectTagEXT( + VkDevice device, + const VkDebugUtilsObjectTagInfoEXT* pTagInfo); + +VKAPI_ATTR void VKAPI_CALL vkQueueBeginDebugUtilsLabelEXT( + VkQueue queue, + const VkDebugUtilsLabelEXT* pLabelInfo); + +VKAPI_ATTR void VKAPI_CALL vkQueueEndDebugUtilsLabelEXT( + VkQueue queue); + +VKAPI_ATTR void VKAPI_CALL vkQueueInsertDebugUtilsLabelEXT( + VkQueue queue, + const VkDebugUtilsLabelEXT* pLabelInfo); + +VKAPI_ATTR void VKAPI_CALL vkCmdBeginDebugUtilsLabelEXT( + VkCommandBuffer commandBuffer, + const VkDebugUtilsLabelEXT* pLabelInfo); + +VKAPI_ATTR void VKAPI_CALL vkCmdEndDebugUtilsLabelEXT( + VkCommandBuffer commandBuffer); + +VKAPI_ATTR void VKAPI_CALL vkCmdInsertDebugUtilsLabelEXT( + VkCommandBuffer commandBuffer, + const VkDebugUtilsLabelEXT* pLabelInfo); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateDebugUtilsMessengerEXT( + VkInstance instance, + const VkDebugUtilsMessengerCreateInfoEXT* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkDebugUtilsMessengerEXT* pMessenger); + +VKAPI_ATTR void VKAPI_CALL vkDestroyDebugUtilsMessengerEXT( + VkInstance instance, + VkDebugUtilsMessengerEXT messenger, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR void VKAPI_CALL vkSubmitDebugUtilsMessageEXT( + VkInstance instance, + VkDebugUtilsMessageSeverityFlagBitsEXT messageSeverity, + VkDebugUtilsMessageTypeFlagsEXT messageTypes, + const VkDebugUtilsMessengerCallbackDataEXT* pCallbackData); +#endif + + +#define VK_EXT_sampler_filter_minmax 1 +#define VK_EXT_SAMPLER_FILTER_MINMAX_SPEC_VERSION 2 +#define VK_EXT_SAMPLER_FILTER_MINMAX_EXTENSION_NAME "VK_EXT_sampler_filter_minmax" +typedef VkSamplerReductionMode VkSamplerReductionModeEXT; + +typedef VkSamplerReductionModeCreateInfo VkSamplerReductionModeCreateInfoEXT; + +typedef VkPhysicalDeviceSamplerFilterMinmaxProperties VkPhysicalDeviceSamplerFilterMinmaxPropertiesEXT; + + + +#define VK_AMD_gpu_shader_int16 1 +#define VK_AMD_GPU_SHADER_INT16_SPEC_VERSION 2 +#define VK_AMD_GPU_SHADER_INT16_EXTENSION_NAME "VK_AMD_gpu_shader_int16" + + +#define VK_AMD_mixed_attachment_samples 1 +#define VK_AMD_MIXED_ATTACHMENT_SAMPLES_SPEC_VERSION 1 +#define VK_AMD_MIXED_ATTACHMENT_SAMPLES_EXTENSION_NAME "VK_AMD_mixed_attachment_samples" + + +#define VK_AMD_shader_fragment_mask 1 +#define VK_AMD_SHADER_FRAGMENT_MASK_SPEC_VERSION 1 +#define VK_AMD_SHADER_FRAGMENT_MASK_EXTENSION_NAME "VK_AMD_shader_fragment_mask" + + +#define VK_EXT_inline_uniform_block 1 +#define VK_EXT_INLINE_UNIFORM_BLOCK_SPEC_VERSION 1 +#define VK_EXT_INLINE_UNIFORM_BLOCK_EXTENSION_NAME "VK_EXT_inline_uniform_block" +typedef struct VkPhysicalDeviceInlineUniformBlockFeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 inlineUniformBlock; + VkBool32 descriptorBindingInlineUniformBlockUpdateAfterBind; +} VkPhysicalDeviceInlineUniformBlockFeaturesEXT; + +typedef struct VkPhysicalDeviceInlineUniformBlockPropertiesEXT { + VkStructureType sType; + void* pNext; + uint32_t maxInlineUniformBlockSize; + uint32_t maxPerStageDescriptorInlineUniformBlocks; + uint32_t maxPerStageDescriptorUpdateAfterBindInlineUniformBlocks; + uint32_t maxDescriptorSetInlineUniformBlocks; + uint32_t maxDescriptorSetUpdateAfterBindInlineUniformBlocks; +} VkPhysicalDeviceInlineUniformBlockPropertiesEXT; + +typedef struct VkWriteDescriptorSetInlineUniformBlockEXT { + VkStructureType sType; + const void* pNext; + uint32_t dataSize; + const void* pData; +} VkWriteDescriptorSetInlineUniformBlockEXT; + +typedef struct VkDescriptorPoolInlineUniformBlockCreateInfoEXT { + VkStructureType sType; + const void* pNext; + uint32_t maxInlineUniformBlockBindings; +} VkDescriptorPoolInlineUniformBlockCreateInfoEXT; + + + +#define VK_EXT_shader_stencil_export 1 +#define VK_EXT_SHADER_STENCIL_EXPORT_SPEC_VERSION 1 +#define VK_EXT_SHADER_STENCIL_EXPORT_EXTENSION_NAME "VK_EXT_shader_stencil_export" + + +#define VK_EXT_sample_locations 1 +#define VK_EXT_SAMPLE_LOCATIONS_SPEC_VERSION 1 +#define VK_EXT_SAMPLE_LOCATIONS_EXTENSION_NAME "VK_EXT_sample_locations" +typedef struct VkSampleLocationEXT { + float x; + float y; +} VkSampleLocationEXT; + +typedef struct VkSampleLocationsInfoEXT { + VkStructureType sType; + const void* pNext; + VkSampleCountFlagBits sampleLocationsPerPixel; + VkExtent2D sampleLocationGridSize; + uint32_t sampleLocationsCount; + const VkSampleLocationEXT* pSampleLocations; +} VkSampleLocationsInfoEXT; + +typedef struct VkAttachmentSampleLocationsEXT { + uint32_t attachmentIndex; + VkSampleLocationsInfoEXT sampleLocationsInfo; +} VkAttachmentSampleLocationsEXT; + +typedef struct VkSubpassSampleLocationsEXT { + uint32_t subpassIndex; + VkSampleLocationsInfoEXT sampleLocationsInfo; +} VkSubpassSampleLocationsEXT; + +typedef struct VkRenderPassSampleLocationsBeginInfoEXT { + VkStructureType sType; + const void* pNext; + uint32_t attachmentInitialSampleLocationsCount; + const VkAttachmentSampleLocationsEXT* pAttachmentInitialSampleLocations; + uint32_t postSubpassSampleLocationsCount; + const VkSubpassSampleLocationsEXT* pPostSubpassSampleLocations; +} VkRenderPassSampleLocationsBeginInfoEXT; + +typedef struct VkPipelineSampleLocationsStateCreateInfoEXT { + VkStructureType sType; + const void* pNext; + VkBool32 sampleLocationsEnable; + VkSampleLocationsInfoEXT sampleLocationsInfo; +} VkPipelineSampleLocationsStateCreateInfoEXT; + +typedef struct VkPhysicalDeviceSampleLocationsPropertiesEXT { + VkStructureType sType; + void* pNext; + VkSampleCountFlags sampleLocationSampleCounts; + VkExtent2D maxSampleLocationGridSize; + float sampleLocationCoordinateRange[2]; + uint32_t sampleLocationSubPixelBits; + VkBool32 variableSampleLocations; +} VkPhysicalDeviceSampleLocationsPropertiesEXT; + +typedef struct VkMultisamplePropertiesEXT { + VkStructureType sType; + void* pNext; + VkExtent2D maxSampleLocationGridSize; +} VkMultisamplePropertiesEXT; + +typedef void (VKAPI_PTR *PFN_vkCmdSetSampleLocationsEXT)(VkCommandBuffer commandBuffer, const VkSampleLocationsInfoEXT* pSampleLocationsInfo); +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceMultisamplePropertiesEXT)(VkPhysicalDevice physicalDevice, VkSampleCountFlagBits samples, VkMultisamplePropertiesEXT* pMultisampleProperties); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdSetSampleLocationsEXT( + VkCommandBuffer commandBuffer, + const VkSampleLocationsInfoEXT* pSampleLocationsInfo); + +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceMultisamplePropertiesEXT( + VkPhysicalDevice physicalDevice, + VkSampleCountFlagBits samples, + VkMultisamplePropertiesEXT* pMultisampleProperties); +#endif + + +#define VK_EXT_blend_operation_advanced 1 +#define VK_EXT_BLEND_OPERATION_ADVANCED_SPEC_VERSION 2 +#define VK_EXT_BLEND_OPERATION_ADVANCED_EXTENSION_NAME "VK_EXT_blend_operation_advanced" + +typedef enum VkBlendOverlapEXT { + VK_BLEND_OVERLAP_UNCORRELATED_EXT = 0, + VK_BLEND_OVERLAP_DISJOINT_EXT = 1, + VK_BLEND_OVERLAP_CONJOINT_EXT = 2, + VK_BLEND_OVERLAP_MAX_ENUM_EXT = 0x7FFFFFFF +} VkBlendOverlapEXT; +typedef struct VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 advancedBlendCoherentOperations; +} VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT; + +typedef struct VkPhysicalDeviceBlendOperationAdvancedPropertiesEXT { + VkStructureType sType; + void* pNext; + uint32_t advancedBlendMaxColorAttachments; + VkBool32 advancedBlendIndependentBlend; + VkBool32 advancedBlendNonPremultipliedSrcColor; + VkBool32 advancedBlendNonPremultipliedDstColor; + VkBool32 advancedBlendCorrelatedOverlap; + VkBool32 advancedBlendAllOperations; +} VkPhysicalDeviceBlendOperationAdvancedPropertiesEXT; + +typedef struct VkPipelineColorBlendAdvancedStateCreateInfoEXT { + VkStructureType sType; + const void* pNext; + VkBool32 srcPremultiplied; + VkBool32 dstPremultiplied; + VkBlendOverlapEXT blendOverlap; +} VkPipelineColorBlendAdvancedStateCreateInfoEXT; + + + +#define VK_NV_fragment_coverage_to_color 1 +#define VK_NV_FRAGMENT_COVERAGE_TO_COLOR_SPEC_VERSION 1 +#define VK_NV_FRAGMENT_COVERAGE_TO_COLOR_EXTENSION_NAME "VK_NV_fragment_coverage_to_color" +typedef VkFlags VkPipelineCoverageToColorStateCreateFlagsNV; +typedef struct VkPipelineCoverageToColorStateCreateInfoNV { + VkStructureType sType; + const void* pNext; + VkPipelineCoverageToColorStateCreateFlagsNV flags; + VkBool32 coverageToColorEnable; + uint32_t coverageToColorLocation; +} VkPipelineCoverageToColorStateCreateInfoNV; + + + +#define VK_NV_framebuffer_mixed_samples 1 +#define VK_NV_FRAMEBUFFER_MIXED_SAMPLES_SPEC_VERSION 1 +#define VK_NV_FRAMEBUFFER_MIXED_SAMPLES_EXTENSION_NAME "VK_NV_framebuffer_mixed_samples" + +typedef enum VkCoverageModulationModeNV { + VK_COVERAGE_MODULATION_MODE_NONE_NV = 0, + VK_COVERAGE_MODULATION_MODE_RGB_NV = 1, + VK_COVERAGE_MODULATION_MODE_ALPHA_NV = 2, + VK_COVERAGE_MODULATION_MODE_RGBA_NV = 3, + VK_COVERAGE_MODULATION_MODE_MAX_ENUM_NV = 0x7FFFFFFF +} VkCoverageModulationModeNV; +typedef VkFlags VkPipelineCoverageModulationStateCreateFlagsNV; +typedef struct VkPipelineCoverageModulationStateCreateInfoNV { + VkStructureType sType; + const void* pNext; + VkPipelineCoverageModulationStateCreateFlagsNV flags; + VkCoverageModulationModeNV coverageModulationMode; + VkBool32 coverageModulationTableEnable; + uint32_t coverageModulationTableCount; + const float* pCoverageModulationTable; +} VkPipelineCoverageModulationStateCreateInfoNV; + + + +#define VK_NV_fill_rectangle 1 +#define VK_NV_FILL_RECTANGLE_SPEC_VERSION 1 +#define VK_NV_FILL_RECTANGLE_EXTENSION_NAME "VK_NV_fill_rectangle" + + +#define VK_NV_shader_sm_builtins 1 +#define VK_NV_SHADER_SM_BUILTINS_SPEC_VERSION 1 +#define VK_NV_SHADER_SM_BUILTINS_EXTENSION_NAME "VK_NV_shader_sm_builtins" +typedef struct VkPhysicalDeviceShaderSMBuiltinsPropertiesNV { + VkStructureType sType; + void* pNext; + uint32_t shaderSMCount; + uint32_t shaderWarpsPerSM; +} VkPhysicalDeviceShaderSMBuiltinsPropertiesNV; + +typedef struct VkPhysicalDeviceShaderSMBuiltinsFeaturesNV { + VkStructureType sType; + void* pNext; + VkBool32 shaderSMBuiltins; +} VkPhysicalDeviceShaderSMBuiltinsFeaturesNV; + + + +#define VK_EXT_post_depth_coverage 1 +#define VK_EXT_POST_DEPTH_COVERAGE_SPEC_VERSION 1 +#define VK_EXT_POST_DEPTH_COVERAGE_EXTENSION_NAME "VK_EXT_post_depth_coverage" + + +#define VK_EXT_image_drm_format_modifier 1 +#define VK_EXT_IMAGE_DRM_FORMAT_MODIFIER_SPEC_VERSION 1 +#define VK_EXT_IMAGE_DRM_FORMAT_MODIFIER_EXTENSION_NAME "VK_EXT_image_drm_format_modifier" +typedef struct VkDrmFormatModifierPropertiesEXT { + uint64_t drmFormatModifier; + uint32_t drmFormatModifierPlaneCount; + VkFormatFeatureFlags drmFormatModifierTilingFeatures; +} VkDrmFormatModifierPropertiesEXT; + +typedef struct VkDrmFormatModifierPropertiesListEXT { + VkStructureType sType; + void* pNext; + uint32_t drmFormatModifierCount; + VkDrmFormatModifierPropertiesEXT* pDrmFormatModifierProperties; +} VkDrmFormatModifierPropertiesListEXT; + +typedef struct VkPhysicalDeviceImageDrmFormatModifierInfoEXT { + VkStructureType sType; + const void* pNext; + uint64_t drmFormatModifier; + VkSharingMode sharingMode; + uint32_t queueFamilyIndexCount; + const uint32_t* pQueueFamilyIndices; +} VkPhysicalDeviceImageDrmFormatModifierInfoEXT; + +typedef struct VkImageDrmFormatModifierListCreateInfoEXT { + VkStructureType sType; + const void* pNext; + uint32_t drmFormatModifierCount; + const uint64_t* pDrmFormatModifiers; +} VkImageDrmFormatModifierListCreateInfoEXT; + +typedef struct VkImageDrmFormatModifierExplicitCreateInfoEXT { + VkStructureType sType; + const void* pNext; + uint64_t drmFormatModifier; + uint32_t drmFormatModifierPlaneCount; + const VkSubresourceLayout* pPlaneLayouts; +} VkImageDrmFormatModifierExplicitCreateInfoEXT; + +typedef struct VkImageDrmFormatModifierPropertiesEXT { + VkStructureType sType; + void* pNext; + uint64_t drmFormatModifier; +} VkImageDrmFormatModifierPropertiesEXT; + +typedef VkResult (VKAPI_PTR *PFN_vkGetImageDrmFormatModifierPropertiesEXT)(VkDevice device, VkImage image, VkImageDrmFormatModifierPropertiesEXT* pProperties); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetImageDrmFormatModifierPropertiesEXT( + VkDevice device, + VkImage image, + VkImageDrmFormatModifierPropertiesEXT* pProperties); +#endif + + +#define VK_EXT_validation_cache 1 +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkValidationCacheEXT) +#define VK_EXT_VALIDATION_CACHE_SPEC_VERSION 1 +#define VK_EXT_VALIDATION_CACHE_EXTENSION_NAME "VK_EXT_validation_cache" + +typedef enum VkValidationCacheHeaderVersionEXT { + VK_VALIDATION_CACHE_HEADER_VERSION_ONE_EXT = 1, + VK_VALIDATION_CACHE_HEADER_VERSION_MAX_ENUM_EXT = 0x7FFFFFFF +} VkValidationCacheHeaderVersionEXT; +typedef VkFlags VkValidationCacheCreateFlagsEXT; +typedef struct VkValidationCacheCreateInfoEXT { + VkStructureType sType; + const void* pNext; + VkValidationCacheCreateFlagsEXT flags; + size_t initialDataSize; + const void* pInitialData; +} VkValidationCacheCreateInfoEXT; + +typedef struct VkShaderModuleValidationCacheCreateInfoEXT { + VkStructureType sType; + const void* pNext; + VkValidationCacheEXT validationCache; +} VkShaderModuleValidationCacheCreateInfoEXT; + +typedef VkResult (VKAPI_PTR *PFN_vkCreateValidationCacheEXT)(VkDevice device, const VkValidationCacheCreateInfoEXT* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkValidationCacheEXT* pValidationCache); +typedef void (VKAPI_PTR *PFN_vkDestroyValidationCacheEXT)(VkDevice device, VkValidationCacheEXT validationCache, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkMergeValidationCachesEXT)(VkDevice device, VkValidationCacheEXT dstCache, uint32_t srcCacheCount, const VkValidationCacheEXT* pSrcCaches); +typedef VkResult (VKAPI_PTR *PFN_vkGetValidationCacheDataEXT)(VkDevice device, VkValidationCacheEXT validationCache, size_t* pDataSize, void* pData); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkCreateValidationCacheEXT( + VkDevice device, + const VkValidationCacheCreateInfoEXT* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkValidationCacheEXT* pValidationCache); + +VKAPI_ATTR void VKAPI_CALL vkDestroyValidationCacheEXT( + VkDevice device, + VkValidationCacheEXT validationCache, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkMergeValidationCachesEXT( + VkDevice device, + VkValidationCacheEXT dstCache, + uint32_t srcCacheCount, + const VkValidationCacheEXT* pSrcCaches); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetValidationCacheDataEXT( + VkDevice device, + VkValidationCacheEXT validationCache, + size_t* pDataSize, + void* pData); +#endif + + +#define VK_EXT_descriptor_indexing 1 +#define VK_EXT_DESCRIPTOR_INDEXING_SPEC_VERSION 2 +#define VK_EXT_DESCRIPTOR_INDEXING_EXTENSION_NAME "VK_EXT_descriptor_indexing" +typedef VkDescriptorBindingFlagBits VkDescriptorBindingFlagBitsEXT; + +typedef VkDescriptorBindingFlags VkDescriptorBindingFlagsEXT; + +typedef VkDescriptorSetLayoutBindingFlagsCreateInfo VkDescriptorSetLayoutBindingFlagsCreateInfoEXT; + +typedef VkPhysicalDeviceDescriptorIndexingFeatures VkPhysicalDeviceDescriptorIndexingFeaturesEXT; + +typedef VkPhysicalDeviceDescriptorIndexingProperties VkPhysicalDeviceDescriptorIndexingPropertiesEXT; + +typedef VkDescriptorSetVariableDescriptorCountAllocateInfo VkDescriptorSetVariableDescriptorCountAllocateInfoEXT; + +typedef VkDescriptorSetVariableDescriptorCountLayoutSupport VkDescriptorSetVariableDescriptorCountLayoutSupportEXT; + + + +#define VK_EXT_shader_viewport_index_layer 1 +#define VK_EXT_SHADER_VIEWPORT_INDEX_LAYER_SPEC_VERSION 1 +#define VK_EXT_SHADER_VIEWPORT_INDEX_LAYER_EXTENSION_NAME "VK_EXT_shader_viewport_index_layer" + + +#define VK_NV_shading_rate_image 1 +#define VK_NV_SHADING_RATE_IMAGE_SPEC_VERSION 3 +#define VK_NV_SHADING_RATE_IMAGE_EXTENSION_NAME "VK_NV_shading_rate_image" + +typedef enum VkShadingRatePaletteEntryNV { + VK_SHADING_RATE_PALETTE_ENTRY_NO_INVOCATIONS_NV = 0, + VK_SHADING_RATE_PALETTE_ENTRY_16_INVOCATIONS_PER_PIXEL_NV = 1, + VK_SHADING_RATE_PALETTE_ENTRY_8_INVOCATIONS_PER_PIXEL_NV = 2, + VK_SHADING_RATE_PALETTE_ENTRY_4_INVOCATIONS_PER_PIXEL_NV = 3, + VK_SHADING_RATE_PALETTE_ENTRY_2_INVOCATIONS_PER_PIXEL_NV = 4, + VK_SHADING_RATE_PALETTE_ENTRY_1_INVOCATION_PER_PIXEL_NV = 5, + VK_SHADING_RATE_PALETTE_ENTRY_1_INVOCATION_PER_2X1_PIXELS_NV = 6, + VK_SHADING_RATE_PALETTE_ENTRY_1_INVOCATION_PER_1X2_PIXELS_NV = 7, + VK_SHADING_RATE_PALETTE_ENTRY_1_INVOCATION_PER_2X2_PIXELS_NV = 8, + VK_SHADING_RATE_PALETTE_ENTRY_1_INVOCATION_PER_4X2_PIXELS_NV = 9, + VK_SHADING_RATE_PALETTE_ENTRY_1_INVOCATION_PER_2X4_PIXELS_NV = 10, + VK_SHADING_RATE_PALETTE_ENTRY_1_INVOCATION_PER_4X4_PIXELS_NV = 11, + VK_SHADING_RATE_PALETTE_ENTRY_MAX_ENUM_NV = 0x7FFFFFFF +} VkShadingRatePaletteEntryNV; + +typedef enum VkCoarseSampleOrderTypeNV { + VK_COARSE_SAMPLE_ORDER_TYPE_DEFAULT_NV = 0, + VK_COARSE_SAMPLE_ORDER_TYPE_CUSTOM_NV = 1, + VK_COARSE_SAMPLE_ORDER_TYPE_PIXEL_MAJOR_NV = 2, + VK_COARSE_SAMPLE_ORDER_TYPE_SAMPLE_MAJOR_NV = 3, + VK_COARSE_SAMPLE_ORDER_TYPE_MAX_ENUM_NV = 0x7FFFFFFF +} VkCoarseSampleOrderTypeNV; +typedef struct VkShadingRatePaletteNV { + uint32_t shadingRatePaletteEntryCount; + const VkShadingRatePaletteEntryNV* pShadingRatePaletteEntries; +} VkShadingRatePaletteNV; + +typedef struct VkPipelineViewportShadingRateImageStateCreateInfoNV { + VkStructureType sType; + const void* pNext; + VkBool32 shadingRateImageEnable; + uint32_t viewportCount; + const VkShadingRatePaletteNV* pShadingRatePalettes; +} VkPipelineViewportShadingRateImageStateCreateInfoNV; + +typedef struct VkPhysicalDeviceShadingRateImageFeaturesNV { + VkStructureType sType; + void* pNext; + VkBool32 shadingRateImage; + VkBool32 shadingRateCoarseSampleOrder; +} VkPhysicalDeviceShadingRateImageFeaturesNV; + +typedef struct VkPhysicalDeviceShadingRateImagePropertiesNV { + VkStructureType sType; + void* pNext; + VkExtent2D shadingRateTexelSize; + uint32_t shadingRatePaletteSize; + uint32_t shadingRateMaxCoarseSamples; +} VkPhysicalDeviceShadingRateImagePropertiesNV; + +typedef struct VkCoarseSampleLocationNV { + uint32_t pixelX; + uint32_t pixelY; + uint32_t sample; +} VkCoarseSampleLocationNV; + +typedef struct VkCoarseSampleOrderCustomNV { + VkShadingRatePaletteEntryNV shadingRate; + uint32_t sampleCount; + uint32_t sampleLocationCount; + const VkCoarseSampleLocationNV* pSampleLocations; +} VkCoarseSampleOrderCustomNV; + +typedef struct VkPipelineViewportCoarseSampleOrderStateCreateInfoNV { + VkStructureType sType; + const void* pNext; + VkCoarseSampleOrderTypeNV sampleOrderType; + uint32_t customSampleOrderCount; + const VkCoarseSampleOrderCustomNV* pCustomSampleOrders; +} VkPipelineViewportCoarseSampleOrderStateCreateInfoNV; + +typedef void (VKAPI_PTR *PFN_vkCmdBindShadingRateImageNV)(VkCommandBuffer commandBuffer, VkImageView imageView, VkImageLayout imageLayout); +typedef void (VKAPI_PTR *PFN_vkCmdSetViewportShadingRatePaletteNV)(VkCommandBuffer commandBuffer, uint32_t firstViewport, uint32_t viewportCount, const VkShadingRatePaletteNV* pShadingRatePalettes); +typedef void (VKAPI_PTR *PFN_vkCmdSetCoarseSampleOrderNV)(VkCommandBuffer commandBuffer, VkCoarseSampleOrderTypeNV sampleOrderType, uint32_t customSampleOrderCount, const VkCoarseSampleOrderCustomNV* pCustomSampleOrders); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdBindShadingRateImageNV( + VkCommandBuffer commandBuffer, + VkImageView imageView, + VkImageLayout imageLayout); + +VKAPI_ATTR void VKAPI_CALL vkCmdSetViewportShadingRatePaletteNV( + VkCommandBuffer commandBuffer, + uint32_t firstViewport, + uint32_t viewportCount, + const VkShadingRatePaletteNV* pShadingRatePalettes); + +VKAPI_ATTR void VKAPI_CALL vkCmdSetCoarseSampleOrderNV( + VkCommandBuffer commandBuffer, + VkCoarseSampleOrderTypeNV sampleOrderType, + uint32_t customSampleOrderCount, + const VkCoarseSampleOrderCustomNV* pCustomSampleOrders); +#endif + + +#define VK_NV_ray_tracing 1 +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkAccelerationStructureKHR) +typedef VkAccelerationStructureKHR VkAccelerationStructureNV; + +#define VK_NV_RAY_TRACING_SPEC_VERSION 3 +#define VK_NV_RAY_TRACING_EXTENSION_NAME "VK_NV_ray_tracing" +#define VK_SHADER_UNUSED_KHR (~0U) +#define VK_SHADER_UNUSED_NV VK_SHADER_UNUSED_KHR + +typedef enum VkRayTracingShaderGroupTypeKHR { + VK_RAY_TRACING_SHADER_GROUP_TYPE_GENERAL_KHR = 0, + VK_RAY_TRACING_SHADER_GROUP_TYPE_TRIANGLES_HIT_GROUP_KHR = 1, + VK_RAY_TRACING_SHADER_GROUP_TYPE_PROCEDURAL_HIT_GROUP_KHR = 2, + VK_RAY_TRACING_SHADER_GROUP_TYPE_GENERAL_NV = VK_RAY_TRACING_SHADER_GROUP_TYPE_GENERAL_KHR, + VK_RAY_TRACING_SHADER_GROUP_TYPE_TRIANGLES_HIT_GROUP_NV = VK_RAY_TRACING_SHADER_GROUP_TYPE_TRIANGLES_HIT_GROUP_KHR, + VK_RAY_TRACING_SHADER_GROUP_TYPE_PROCEDURAL_HIT_GROUP_NV = VK_RAY_TRACING_SHADER_GROUP_TYPE_PROCEDURAL_HIT_GROUP_KHR, + VK_RAY_TRACING_SHADER_GROUP_TYPE_MAX_ENUM_KHR = 0x7FFFFFFF +} VkRayTracingShaderGroupTypeKHR; +typedef VkRayTracingShaderGroupTypeKHR VkRayTracingShaderGroupTypeNV; + + +typedef enum VkGeometryTypeKHR { + VK_GEOMETRY_TYPE_TRIANGLES_KHR = 0, + VK_GEOMETRY_TYPE_AABBS_KHR = 1, + VK_GEOMETRY_TYPE_INSTANCES_KHR = 1000150000, + VK_GEOMETRY_TYPE_TRIANGLES_NV = VK_GEOMETRY_TYPE_TRIANGLES_KHR, + VK_GEOMETRY_TYPE_AABBS_NV = VK_GEOMETRY_TYPE_AABBS_KHR, + VK_GEOMETRY_TYPE_MAX_ENUM_KHR = 0x7FFFFFFF +} VkGeometryTypeKHR; +typedef VkGeometryTypeKHR VkGeometryTypeNV; + + +typedef enum VkAccelerationStructureTypeKHR { + VK_ACCELERATION_STRUCTURE_TYPE_TOP_LEVEL_KHR = 0, + VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_KHR = 1, + VK_ACCELERATION_STRUCTURE_TYPE_TOP_LEVEL_NV = VK_ACCELERATION_STRUCTURE_TYPE_TOP_LEVEL_KHR, + VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_NV = VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_KHR, + VK_ACCELERATION_STRUCTURE_TYPE_MAX_ENUM_KHR = 0x7FFFFFFF +} VkAccelerationStructureTypeKHR; +typedef VkAccelerationStructureTypeKHR VkAccelerationStructureTypeNV; + + +typedef enum VkCopyAccelerationStructureModeKHR { + VK_COPY_ACCELERATION_STRUCTURE_MODE_CLONE_KHR = 0, + VK_COPY_ACCELERATION_STRUCTURE_MODE_COMPACT_KHR = 1, + VK_COPY_ACCELERATION_STRUCTURE_MODE_SERIALIZE_KHR = 2, + VK_COPY_ACCELERATION_STRUCTURE_MODE_DESERIALIZE_KHR = 3, + VK_COPY_ACCELERATION_STRUCTURE_MODE_CLONE_NV = VK_COPY_ACCELERATION_STRUCTURE_MODE_CLONE_KHR, + VK_COPY_ACCELERATION_STRUCTURE_MODE_COMPACT_NV = VK_COPY_ACCELERATION_STRUCTURE_MODE_COMPACT_KHR, + VK_COPY_ACCELERATION_STRUCTURE_MODE_MAX_ENUM_KHR = 0x7FFFFFFF +} VkCopyAccelerationStructureModeKHR; +typedef VkCopyAccelerationStructureModeKHR VkCopyAccelerationStructureModeNV; + + +typedef enum VkAccelerationStructureMemoryRequirementsTypeKHR { + VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_OBJECT_KHR = 0, + VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_BUILD_SCRATCH_KHR = 1, + VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_UPDATE_SCRATCH_KHR = 2, + VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_OBJECT_NV = VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_OBJECT_KHR, + VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_BUILD_SCRATCH_NV = VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_BUILD_SCRATCH_KHR, + VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_UPDATE_SCRATCH_NV = VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_UPDATE_SCRATCH_KHR, + VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_MAX_ENUM_KHR = 0x7FFFFFFF +} VkAccelerationStructureMemoryRequirementsTypeKHR; +typedef VkAccelerationStructureMemoryRequirementsTypeKHR VkAccelerationStructureMemoryRequirementsTypeNV; + + +typedef enum VkGeometryFlagBitsKHR { + VK_GEOMETRY_OPAQUE_BIT_KHR = 0x00000001, + VK_GEOMETRY_NO_DUPLICATE_ANY_HIT_INVOCATION_BIT_KHR = 0x00000002, + VK_GEOMETRY_OPAQUE_BIT_NV = VK_GEOMETRY_OPAQUE_BIT_KHR, + VK_GEOMETRY_NO_DUPLICATE_ANY_HIT_INVOCATION_BIT_NV = VK_GEOMETRY_NO_DUPLICATE_ANY_HIT_INVOCATION_BIT_KHR, + VK_GEOMETRY_FLAG_BITS_MAX_ENUM_KHR = 0x7FFFFFFF +} VkGeometryFlagBitsKHR; +typedef VkFlags VkGeometryFlagsKHR; +typedef VkGeometryFlagsKHR VkGeometryFlagsNV; + +typedef VkGeometryFlagBitsKHR VkGeometryFlagBitsNV; + + +typedef enum VkGeometryInstanceFlagBitsKHR { + VK_GEOMETRY_INSTANCE_TRIANGLE_FACING_CULL_DISABLE_BIT_KHR = 0x00000001, + VK_GEOMETRY_INSTANCE_TRIANGLE_FRONT_COUNTERCLOCKWISE_BIT_KHR = 0x00000002, + VK_GEOMETRY_INSTANCE_FORCE_OPAQUE_BIT_KHR = 0x00000004, + VK_GEOMETRY_INSTANCE_FORCE_NO_OPAQUE_BIT_KHR = 0x00000008, + VK_GEOMETRY_INSTANCE_TRIANGLE_CULL_DISABLE_BIT_NV = VK_GEOMETRY_INSTANCE_TRIANGLE_FACING_CULL_DISABLE_BIT_KHR, + VK_GEOMETRY_INSTANCE_TRIANGLE_FRONT_COUNTERCLOCKWISE_BIT_NV = VK_GEOMETRY_INSTANCE_TRIANGLE_FRONT_COUNTERCLOCKWISE_BIT_KHR, + VK_GEOMETRY_INSTANCE_FORCE_OPAQUE_BIT_NV = VK_GEOMETRY_INSTANCE_FORCE_OPAQUE_BIT_KHR, + VK_GEOMETRY_INSTANCE_FORCE_NO_OPAQUE_BIT_NV = VK_GEOMETRY_INSTANCE_FORCE_NO_OPAQUE_BIT_KHR, + VK_GEOMETRY_INSTANCE_FLAG_BITS_MAX_ENUM_KHR = 0x7FFFFFFF +} VkGeometryInstanceFlagBitsKHR; +typedef VkFlags VkGeometryInstanceFlagsKHR; +typedef VkGeometryInstanceFlagsKHR VkGeometryInstanceFlagsNV; + +typedef VkGeometryInstanceFlagBitsKHR VkGeometryInstanceFlagBitsNV; + + +typedef enum VkBuildAccelerationStructureFlagBitsKHR { + VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_UPDATE_BIT_KHR = 0x00000001, + VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_COMPACTION_BIT_KHR = 0x00000002, + VK_BUILD_ACCELERATION_STRUCTURE_PREFER_FAST_TRACE_BIT_KHR = 0x00000004, + VK_BUILD_ACCELERATION_STRUCTURE_PREFER_FAST_BUILD_BIT_KHR = 0x00000008, + VK_BUILD_ACCELERATION_STRUCTURE_LOW_MEMORY_BIT_KHR = 0x00000010, + VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_UPDATE_BIT_NV = VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_UPDATE_BIT_KHR, + VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_COMPACTION_BIT_NV = VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_COMPACTION_BIT_KHR, + VK_BUILD_ACCELERATION_STRUCTURE_PREFER_FAST_TRACE_BIT_NV = VK_BUILD_ACCELERATION_STRUCTURE_PREFER_FAST_TRACE_BIT_KHR, + VK_BUILD_ACCELERATION_STRUCTURE_PREFER_FAST_BUILD_BIT_NV = VK_BUILD_ACCELERATION_STRUCTURE_PREFER_FAST_BUILD_BIT_KHR, + VK_BUILD_ACCELERATION_STRUCTURE_LOW_MEMORY_BIT_NV = VK_BUILD_ACCELERATION_STRUCTURE_LOW_MEMORY_BIT_KHR, + VK_BUILD_ACCELERATION_STRUCTURE_FLAG_BITS_MAX_ENUM_KHR = 0x7FFFFFFF +} VkBuildAccelerationStructureFlagBitsKHR; +typedef VkFlags VkBuildAccelerationStructureFlagsKHR; +typedef VkBuildAccelerationStructureFlagsKHR VkBuildAccelerationStructureFlagsNV; + +typedef VkBuildAccelerationStructureFlagBitsKHR VkBuildAccelerationStructureFlagBitsNV; + +typedef struct VkRayTracingShaderGroupCreateInfoNV { + VkStructureType sType; + const void* pNext; + VkRayTracingShaderGroupTypeKHR type; + uint32_t generalShader; + uint32_t closestHitShader; + uint32_t anyHitShader; + uint32_t intersectionShader; +} VkRayTracingShaderGroupCreateInfoNV; + +typedef struct VkRayTracingPipelineCreateInfoNV { + VkStructureType sType; + const void* pNext; + VkPipelineCreateFlags flags; + uint32_t stageCount; + const VkPipelineShaderStageCreateInfo* pStages; + uint32_t groupCount; + const VkRayTracingShaderGroupCreateInfoNV* pGroups; + uint32_t maxRecursionDepth; + VkPipelineLayout layout; + VkPipeline basePipelineHandle; + int32_t basePipelineIndex; +} VkRayTracingPipelineCreateInfoNV; + +typedef struct VkGeometryTrianglesNV { + VkStructureType sType; + const void* pNext; + VkBuffer vertexData; + VkDeviceSize vertexOffset; + uint32_t vertexCount; + VkDeviceSize vertexStride; + VkFormat vertexFormat; + VkBuffer indexData; + VkDeviceSize indexOffset; + uint32_t indexCount; + VkIndexType indexType; + VkBuffer transformData; + VkDeviceSize transformOffset; +} VkGeometryTrianglesNV; + +typedef struct VkGeometryAABBNV { + VkStructureType sType; + const void* pNext; + VkBuffer aabbData; + uint32_t numAABBs; + uint32_t stride; + VkDeviceSize offset; +} VkGeometryAABBNV; + +typedef struct VkGeometryDataNV { + VkGeometryTrianglesNV triangles; + VkGeometryAABBNV aabbs; +} VkGeometryDataNV; + +typedef struct VkGeometryNV { + VkStructureType sType; + const void* pNext; + VkGeometryTypeKHR geometryType; + VkGeometryDataNV geometry; + VkGeometryFlagsKHR flags; +} VkGeometryNV; + +typedef struct VkAccelerationStructureInfoNV { + VkStructureType sType; + const void* pNext; + VkAccelerationStructureTypeNV type; + VkBuildAccelerationStructureFlagsNV flags; + uint32_t instanceCount; + uint32_t geometryCount; + const VkGeometryNV* pGeometries; +} VkAccelerationStructureInfoNV; + +typedef struct VkAccelerationStructureCreateInfoNV { + VkStructureType sType; + const void* pNext; + VkDeviceSize compactedSize; + VkAccelerationStructureInfoNV info; +} VkAccelerationStructureCreateInfoNV; + +typedef struct VkBindAccelerationStructureMemoryInfoKHR { + VkStructureType sType; + const void* pNext; + VkAccelerationStructureKHR accelerationStructure; + VkDeviceMemory memory; + VkDeviceSize memoryOffset; + uint32_t deviceIndexCount; + const uint32_t* pDeviceIndices; +} VkBindAccelerationStructureMemoryInfoKHR; + +typedef VkBindAccelerationStructureMemoryInfoKHR VkBindAccelerationStructureMemoryInfoNV; + +typedef struct VkWriteDescriptorSetAccelerationStructureKHR { + VkStructureType sType; + const void* pNext; + uint32_t accelerationStructureCount; + const VkAccelerationStructureKHR* pAccelerationStructures; +} VkWriteDescriptorSetAccelerationStructureKHR; + +typedef VkWriteDescriptorSetAccelerationStructureKHR VkWriteDescriptorSetAccelerationStructureNV; + +typedef struct VkAccelerationStructureMemoryRequirementsInfoNV { + VkStructureType sType; + const void* pNext; + VkAccelerationStructureMemoryRequirementsTypeNV type; + VkAccelerationStructureNV accelerationStructure; +} VkAccelerationStructureMemoryRequirementsInfoNV; + +typedef struct VkPhysicalDeviceRayTracingPropertiesNV { + VkStructureType sType; + void* pNext; + uint32_t shaderGroupHandleSize; + uint32_t maxRecursionDepth; + uint32_t maxShaderGroupStride; + uint32_t shaderGroupBaseAlignment; + uint64_t maxGeometryCount; + uint64_t maxInstanceCount; + uint64_t maxTriangleCount; + uint32_t maxDescriptorSetAccelerationStructures; +} VkPhysicalDeviceRayTracingPropertiesNV; + +typedef struct VkTransformMatrixKHR { + float matrix[3][4]; +} VkTransformMatrixKHR; + +typedef VkTransformMatrixKHR VkTransformMatrixNV; + +typedef struct VkAabbPositionsKHR { + float minX; + float minY; + float minZ; + float maxX; + float maxY; + float maxZ; +} VkAabbPositionsKHR; + +typedef VkAabbPositionsKHR VkAabbPositionsNV; + +typedef struct VkAccelerationStructureInstanceKHR { + VkTransformMatrixKHR transform; + uint32_t instanceCustomIndex:24; + uint32_t mask:8; + uint32_t instanceShaderBindingTableRecordOffset:24; + VkGeometryInstanceFlagsKHR flags:8; + uint64_t accelerationStructureReference; +} VkAccelerationStructureInstanceKHR; + +typedef VkAccelerationStructureInstanceKHR VkAccelerationStructureInstanceNV; + +typedef VkResult (VKAPI_PTR *PFN_vkCreateAccelerationStructureNV)(VkDevice device, const VkAccelerationStructureCreateInfoNV* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkAccelerationStructureNV* pAccelerationStructure); +typedef void (VKAPI_PTR *PFN_vkDestroyAccelerationStructureKHR)(VkDevice device, VkAccelerationStructureKHR accelerationStructure, const VkAllocationCallbacks* pAllocator); +typedef void (VKAPI_PTR *PFN_vkDestroyAccelerationStructureNV)(VkDevice device, VkAccelerationStructureKHR accelerationStructure, const VkAllocationCallbacks* pAllocator); +typedef void (VKAPI_PTR *PFN_vkGetAccelerationStructureMemoryRequirementsNV)(VkDevice device, const VkAccelerationStructureMemoryRequirementsInfoNV* pInfo, VkMemoryRequirements2KHR* pMemoryRequirements); +typedef VkResult (VKAPI_PTR *PFN_vkBindAccelerationStructureMemoryKHR)(VkDevice device, uint32_t bindInfoCount, const VkBindAccelerationStructureMemoryInfoKHR* pBindInfos); +typedef VkResult (VKAPI_PTR *PFN_vkBindAccelerationStructureMemoryNV)(VkDevice device, uint32_t bindInfoCount, const VkBindAccelerationStructureMemoryInfoKHR* pBindInfos); +typedef void (VKAPI_PTR *PFN_vkCmdBuildAccelerationStructureNV)(VkCommandBuffer commandBuffer, const VkAccelerationStructureInfoNV* pInfo, VkBuffer instanceData, VkDeviceSize instanceOffset, VkBool32 update, VkAccelerationStructureKHR dst, VkAccelerationStructureKHR src, VkBuffer scratch, VkDeviceSize scratchOffset); +typedef void (VKAPI_PTR *PFN_vkCmdCopyAccelerationStructureNV)(VkCommandBuffer commandBuffer, VkAccelerationStructureKHR dst, VkAccelerationStructureKHR src, VkCopyAccelerationStructureModeKHR mode); +typedef void (VKAPI_PTR *PFN_vkCmdTraceRaysNV)(VkCommandBuffer commandBuffer, VkBuffer raygenShaderBindingTableBuffer, VkDeviceSize raygenShaderBindingOffset, VkBuffer missShaderBindingTableBuffer, VkDeviceSize missShaderBindingOffset, VkDeviceSize missShaderBindingStride, VkBuffer hitShaderBindingTableBuffer, VkDeviceSize hitShaderBindingOffset, VkDeviceSize hitShaderBindingStride, VkBuffer callableShaderBindingTableBuffer, VkDeviceSize callableShaderBindingOffset, VkDeviceSize callableShaderBindingStride, uint32_t width, uint32_t height, uint32_t depth); +typedef VkResult (VKAPI_PTR *PFN_vkCreateRayTracingPipelinesNV)(VkDevice device, VkPipelineCache pipelineCache, uint32_t createInfoCount, const VkRayTracingPipelineCreateInfoNV* pCreateInfos, const VkAllocationCallbacks* pAllocator, VkPipeline* pPipelines); +typedef VkResult (VKAPI_PTR *PFN_vkGetRayTracingShaderGroupHandlesKHR)(VkDevice device, VkPipeline pipeline, uint32_t firstGroup, uint32_t groupCount, size_t dataSize, void* pData); +typedef VkResult (VKAPI_PTR *PFN_vkGetRayTracingShaderGroupHandlesNV)(VkDevice device, VkPipeline pipeline, uint32_t firstGroup, uint32_t groupCount, size_t dataSize, void* pData); +typedef VkResult (VKAPI_PTR *PFN_vkGetAccelerationStructureHandleNV)(VkDevice device, VkAccelerationStructureKHR accelerationStructure, size_t dataSize, void* pData); +typedef void (VKAPI_PTR *PFN_vkCmdWriteAccelerationStructuresPropertiesKHR)(VkCommandBuffer commandBuffer, uint32_t accelerationStructureCount, const VkAccelerationStructureKHR* pAccelerationStructures, VkQueryType queryType, VkQueryPool queryPool, uint32_t firstQuery); +typedef void (VKAPI_PTR *PFN_vkCmdWriteAccelerationStructuresPropertiesNV)(VkCommandBuffer commandBuffer, uint32_t accelerationStructureCount, const VkAccelerationStructureKHR* pAccelerationStructures, VkQueryType queryType, VkQueryPool queryPool, uint32_t firstQuery); +typedef VkResult (VKAPI_PTR *PFN_vkCompileDeferredNV)(VkDevice device, VkPipeline pipeline, uint32_t shader); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkCreateAccelerationStructureNV( + VkDevice device, + const VkAccelerationStructureCreateInfoNV* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkAccelerationStructureNV* pAccelerationStructure); + +VKAPI_ATTR void VKAPI_CALL vkDestroyAccelerationStructureKHR( + VkDevice device, + VkAccelerationStructureKHR accelerationStructure, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR void VKAPI_CALL vkDestroyAccelerationStructureNV( + VkDevice device, + VkAccelerationStructureKHR accelerationStructure, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR void VKAPI_CALL vkGetAccelerationStructureMemoryRequirementsNV( + VkDevice device, + const VkAccelerationStructureMemoryRequirementsInfoNV* pInfo, + VkMemoryRequirements2KHR* pMemoryRequirements); + +VKAPI_ATTR VkResult VKAPI_CALL vkBindAccelerationStructureMemoryKHR( + VkDevice device, + uint32_t bindInfoCount, + const VkBindAccelerationStructureMemoryInfoKHR* pBindInfos); + +VKAPI_ATTR VkResult VKAPI_CALL vkBindAccelerationStructureMemoryNV( + VkDevice device, + uint32_t bindInfoCount, + const VkBindAccelerationStructureMemoryInfoKHR* pBindInfos); + +VKAPI_ATTR void VKAPI_CALL vkCmdBuildAccelerationStructureNV( + VkCommandBuffer commandBuffer, + const VkAccelerationStructureInfoNV* pInfo, + VkBuffer instanceData, + VkDeviceSize instanceOffset, + VkBool32 update, + VkAccelerationStructureKHR dst, + VkAccelerationStructureKHR src, + VkBuffer scratch, + VkDeviceSize scratchOffset); + +VKAPI_ATTR void VKAPI_CALL vkCmdCopyAccelerationStructureNV( + VkCommandBuffer commandBuffer, + VkAccelerationStructureKHR dst, + VkAccelerationStructureKHR src, + VkCopyAccelerationStructureModeKHR mode); + +VKAPI_ATTR void VKAPI_CALL vkCmdTraceRaysNV( + VkCommandBuffer commandBuffer, + VkBuffer raygenShaderBindingTableBuffer, + VkDeviceSize raygenShaderBindingOffset, + VkBuffer missShaderBindingTableBuffer, + VkDeviceSize missShaderBindingOffset, + VkDeviceSize missShaderBindingStride, + VkBuffer hitShaderBindingTableBuffer, + VkDeviceSize hitShaderBindingOffset, + VkDeviceSize hitShaderBindingStride, + VkBuffer callableShaderBindingTableBuffer, + VkDeviceSize callableShaderBindingOffset, + VkDeviceSize callableShaderBindingStride, + uint32_t width, + uint32_t height, + uint32_t depth); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateRayTracingPipelinesNV( + VkDevice device, + VkPipelineCache pipelineCache, + uint32_t createInfoCount, + const VkRayTracingPipelineCreateInfoNV* pCreateInfos, + const VkAllocationCallbacks* pAllocator, + VkPipeline* pPipelines); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetRayTracingShaderGroupHandlesKHR( + VkDevice device, + VkPipeline pipeline, + uint32_t firstGroup, + uint32_t groupCount, + size_t dataSize, + void* pData); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetRayTracingShaderGroupHandlesNV( + VkDevice device, + VkPipeline pipeline, + uint32_t firstGroup, + uint32_t groupCount, + size_t dataSize, + void* pData); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetAccelerationStructureHandleNV( + VkDevice device, + VkAccelerationStructureKHR accelerationStructure, + size_t dataSize, + void* pData); + +VKAPI_ATTR void VKAPI_CALL vkCmdWriteAccelerationStructuresPropertiesKHR( + VkCommandBuffer commandBuffer, + uint32_t accelerationStructureCount, + const VkAccelerationStructureKHR* pAccelerationStructures, + VkQueryType queryType, + VkQueryPool queryPool, + uint32_t firstQuery); + +VKAPI_ATTR void VKAPI_CALL vkCmdWriteAccelerationStructuresPropertiesNV( + VkCommandBuffer commandBuffer, + uint32_t accelerationStructureCount, + const VkAccelerationStructureKHR* pAccelerationStructures, + VkQueryType queryType, + VkQueryPool queryPool, + uint32_t firstQuery); + +VKAPI_ATTR VkResult VKAPI_CALL vkCompileDeferredNV( + VkDevice device, + VkPipeline pipeline, + uint32_t shader); +#endif + + +#define VK_NV_representative_fragment_test 1 +#define VK_NV_REPRESENTATIVE_FRAGMENT_TEST_SPEC_VERSION 2 +#define VK_NV_REPRESENTATIVE_FRAGMENT_TEST_EXTENSION_NAME "VK_NV_representative_fragment_test" +typedef struct VkPhysicalDeviceRepresentativeFragmentTestFeaturesNV { + VkStructureType sType; + void* pNext; + VkBool32 representativeFragmentTest; +} VkPhysicalDeviceRepresentativeFragmentTestFeaturesNV; + +typedef struct VkPipelineRepresentativeFragmentTestStateCreateInfoNV { + VkStructureType sType; + const void* pNext; + VkBool32 representativeFragmentTestEnable; +} VkPipelineRepresentativeFragmentTestStateCreateInfoNV; + + + +#define VK_EXT_filter_cubic 1 +#define VK_EXT_FILTER_CUBIC_SPEC_VERSION 3 +#define VK_EXT_FILTER_CUBIC_EXTENSION_NAME "VK_EXT_filter_cubic" +typedef struct VkPhysicalDeviceImageViewImageFormatInfoEXT { + VkStructureType sType; + void* pNext; + VkImageViewType imageViewType; +} VkPhysicalDeviceImageViewImageFormatInfoEXT; + +typedef struct VkFilterCubicImageViewImageFormatPropertiesEXT { + VkStructureType sType; + void* pNext; + VkBool32 filterCubic; + VkBool32 filterCubicMinmax; +} VkFilterCubicImageViewImageFormatPropertiesEXT; + + + +#define VK_QCOM_render_pass_shader_resolve 1 +#define VK_QCOM_RENDER_PASS_SHADER_RESOLVE_SPEC_VERSION 4 +#define VK_QCOM_RENDER_PASS_SHADER_RESOLVE_EXTENSION_NAME "VK_QCOM_render_pass_shader_resolve" + + +#define VK_EXT_global_priority 1 +#define VK_EXT_GLOBAL_PRIORITY_SPEC_VERSION 2 +#define VK_EXT_GLOBAL_PRIORITY_EXTENSION_NAME "VK_EXT_global_priority" + +typedef enum VkQueueGlobalPriorityEXT { + VK_QUEUE_GLOBAL_PRIORITY_LOW_EXT = 128, + VK_QUEUE_GLOBAL_PRIORITY_MEDIUM_EXT = 256, + VK_QUEUE_GLOBAL_PRIORITY_HIGH_EXT = 512, + VK_QUEUE_GLOBAL_PRIORITY_REALTIME_EXT = 1024, + VK_QUEUE_GLOBAL_PRIORITY_MAX_ENUM_EXT = 0x7FFFFFFF +} VkQueueGlobalPriorityEXT; +typedef struct VkDeviceQueueGlobalPriorityCreateInfoEXT { + VkStructureType sType; + const void* pNext; + VkQueueGlobalPriorityEXT globalPriority; +} VkDeviceQueueGlobalPriorityCreateInfoEXT; + + + +#define VK_EXT_external_memory_host 1 +#define VK_EXT_EXTERNAL_MEMORY_HOST_SPEC_VERSION 1 +#define VK_EXT_EXTERNAL_MEMORY_HOST_EXTENSION_NAME "VK_EXT_external_memory_host" +typedef struct VkImportMemoryHostPointerInfoEXT { + VkStructureType sType; + const void* pNext; + VkExternalMemoryHandleTypeFlagBits handleType; + void* pHostPointer; +} VkImportMemoryHostPointerInfoEXT; + +typedef struct VkMemoryHostPointerPropertiesEXT { + VkStructureType sType; + void* pNext; + uint32_t memoryTypeBits; +} VkMemoryHostPointerPropertiesEXT; + +typedef struct VkPhysicalDeviceExternalMemoryHostPropertiesEXT { + VkStructureType sType; + void* pNext; + VkDeviceSize minImportedHostPointerAlignment; +} VkPhysicalDeviceExternalMemoryHostPropertiesEXT; + +typedef VkResult (VKAPI_PTR *PFN_vkGetMemoryHostPointerPropertiesEXT)(VkDevice device, VkExternalMemoryHandleTypeFlagBits handleType, const void* pHostPointer, VkMemoryHostPointerPropertiesEXT* pMemoryHostPointerProperties); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetMemoryHostPointerPropertiesEXT( + VkDevice device, + VkExternalMemoryHandleTypeFlagBits handleType, + const void* pHostPointer, + VkMemoryHostPointerPropertiesEXT* pMemoryHostPointerProperties); +#endif + + +#define VK_AMD_buffer_marker 1 +#define VK_AMD_BUFFER_MARKER_SPEC_VERSION 1 +#define VK_AMD_BUFFER_MARKER_EXTENSION_NAME "VK_AMD_buffer_marker" +typedef void (VKAPI_PTR *PFN_vkCmdWriteBufferMarkerAMD)(VkCommandBuffer commandBuffer, VkPipelineStageFlagBits pipelineStage, VkBuffer dstBuffer, VkDeviceSize dstOffset, uint32_t marker); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdWriteBufferMarkerAMD( + VkCommandBuffer commandBuffer, + VkPipelineStageFlagBits pipelineStage, + VkBuffer dstBuffer, + VkDeviceSize dstOffset, + uint32_t marker); +#endif + + +#define VK_AMD_pipeline_compiler_control 1 +#define VK_AMD_PIPELINE_COMPILER_CONTROL_SPEC_VERSION 1 +#define VK_AMD_PIPELINE_COMPILER_CONTROL_EXTENSION_NAME "VK_AMD_pipeline_compiler_control" + +typedef enum VkPipelineCompilerControlFlagBitsAMD { + VK_PIPELINE_COMPILER_CONTROL_FLAG_BITS_MAX_ENUM_AMD = 0x7FFFFFFF +} VkPipelineCompilerControlFlagBitsAMD; +typedef VkFlags VkPipelineCompilerControlFlagsAMD; +typedef struct VkPipelineCompilerControlCreateInfoAMD { + VkStructureType sType; + const void* pNext; + VkPipelineCompilerControlFlagsAMD compilerControlFlags; +} VkPipelineCompilerControlCreateInfoAMD; + + + +#define VK_EXT_calibrated_timestamps 1 +#define VK_EXT_CALIBRATED_TIMESTAMPS_SPEC_VERSION 1 +#define VK_EXT_CALIBRATED_TIMESTAMPS_EXTENSION_NAME "VK_EXT_calibrated_timestamps" + +typedef enum VkTimeDomainEXT { + VK_TIME_DOMAIN_DEVICE_EXT = 0, + VK_TIME_DOMAIN_CLOCK_MONOTONIC_EXT = 1, + VK_TIME_DOMAIN_CLOCK_MONOTONIC_RAW_EXT = 2, + VK_TIME_DOMAIN_QUERY_PERFORMANCE_COUNTER_EXT = 3, + VK_TIME_DOMAIN_MAX_ENUM_EXT = 0x7FFFFFFF +} VkTimeDomainEXT; +typedef struct VkCalibratedTimestampInfoEXT { + VkStructureType sType; + const void* pNext; + VkTimeDomainEXT timeDomain; +} VkCalibratedTimestampInfoEXT; + +typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceCalibrateableTimeDomainsEXT)(VkPhysicalDevice physicalDevice, uint32_t* pTimeDomainCount, VkTimeDomainEXT* pTimeDomains); +typedef VkResult (VKAPI_PTR *PFN_vkGetCalibratedTimestampsEXT)(VkDevice device, uint32_t timestampCount, const VkCalibratedTimestampInfoEXT* pTimestampInfos, uint64_t* pTimestamps, uint64_t* pMaxDeviation); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceCalibrateableTimeDomainsEXT( + VkPhysicalDevice physicalDevice, + uint32_t* pTimeDomainCount, + VkTimeDomainEXT* pTimeDomains); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetCalibratedTimestampsEXT( + VkDevice device, + uint32_t timestampCount, + const VkCalibratedTimestampInfoEXT* pTimestampInfos, + uint64_t* pTimestamps, + uint64_t* pMaxDeviation); +#endif + + +#define VK_AMD_shader_core_properties 1 +#define VK_AMD_SHADER_CORE_PROPERTIES_SPEC_VERSION 2 +#define VK_AMD_SHADER_CORE_PROPERTIES_EXTENSION_NAME "VK_AMD_shader_core_properties" +typedef struct VkPhysicalDeviceShaderCorePropertiesAMD { + VkStructureType sType; + void* pNext; + uint32_t shaderEngineCount; + uint32_t shaderArraysPerEngineCount; + uint32_t computeUnitsPerShaderArray; + uint32_t simdPerComputeUnit; + uint32_t wavefrontsPerSimd; + uint32_t wavefrontSize; + uint32_t sgprsPerSimd; + uint32_t minSgprAllocation; + uint32_t maxSgprAllocation; + uint32_t sgprAllocationGranularity; + uint32_t vgprsPerSimd; + uint32_t minVgprAllocation; + uint32_t maxVgprAllocation; + uint32_t vgprAllocationGranularity; +} VkPhysicalDeviceShaderCorePropertiesAMD; + + + +#define VK_AMD_memory_overallocation_behavior 1 +#define VK_AMD_MEMORY_OVERALLOCATION_BEHAVIOR_SPEC_VERSION 1 +#define VK_AMD_MEMORY_OVERALLOCATION_BEHAVIOR_EXTENSION_NAME "VK_AMD_memory_overallocation_behavior" + +typedef enum VkMemoryOverallocationBehaviorAMD { + VK_MEMORY_OVERALLOCATION_BEHAVIOR_DEFAULT_AMD = 0, + VK_MEMORY_OVERALLOCATION_BEHAVIOR_ALLOWED_AMD = 1, + VK_MEMORY_OVERALLOCATION_BEHAVIOR_DISALLOWED_AMD = 2, + VK_MEMORY_OVERALLOCATION_BEHAVIOR_MAX_ENUM_AMD = 0x7FFFFFFF +} VkMemoryOverallocationBehaviorAMD; +typedef struct VkDeviceMemoryOverallocationCreateInfoAMD { + VkStructureType sType; + const void* pNext; + VkMemoryOverallocationBehaviorAMD overallocationBehavior; +} VkDeviceMemoryOverallocationCreateInfoAMD; + + + +#define VK_EXT_vertex_attribute_divisor 1 +#define VK_EXT_VERTEX_ATTRIBUTE_DIVISOR_SPEC_VERSION 3 +#define VK_EXT_VERTEX_ATTRIBUTE_DIVISOR_EXTENSION_NAME "VK_EXT_vertex_attribute_divisor" +typedef struct VkPhysicalDeviceVertexAttributeDivisorPropertiesEXT { + VkStructureType sType; + void* pNext; + uint32_t maxVertexAttribDivisor; +} VkPhysicalDeviceVertexAttributeDivisorPropertiesEXT; + +typedef struct VkVertexInputBindingDivisorDescriptionEXT { + uint32_t binding; + uint32_t divisor; +} VkVertexInputBindingDivisorDescriptionEXT; + +typedef struct VkPipelineVertexInputDivisorStateCreateInfoEXT { + VkStructureType sType; + const void* pNext; + uint32_t vertexBindingDivisorCount; + const VkVertexInputBindingDivisorDescriptionEXT* pVertexBindingDivisors; +} VkPipelineVertexInputDivisorStateCreateInfoEXT; + +typedef struct VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 vertexAttributeInstanceRateDivisor; + VkBool32 vertexAttributeInstanceRateZeroDivisor; +} VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT; + + + +#define VK_EXT_pipeline_creation_feedback 1 +#define VK_EXT_PIPELINE_CREATION_FEEDBACK_SPEC_VERSION 1 +#define VK_EXT_PIPELINE_CREATION_FEEDBACK_EXTENSION_NAME "VK_EXT_pipeline_creation_feedback" + +typedef enum VkPipelineCreationFeedbackFlagBitsEXT { + VK_PIPELINE_CREATION_FEEDBACK_VALID_BIT_EXT = 0x00000001, + VK_PIPELINE_CREATION_FEEDBACK_APPLICATION_PIPELINE_CACHE_HIT_BIT_EXT = 0x00000002, + VK_PIPELINE_CREATION_FEEDBACK_BASE_PIPELINE_ACCELERATION_BIT_EXT = 0x00000004, + VK_PIPELINE_CREATION_FEEDBACK_FLAG_BITS_MAX_ENUM_EXT = 0x7FFFFFFF +} VkPipelineCreationFeedbackFlagBitsEXT; +typedef VkFlags VkPipelineCreationFeedbackFlagsEXT; +typedef struct VkPipelineCreationFeedbackEXT { + VkPipelineCreationFeedbackFlagsEXT flags; + uint64_t duration; +} VkPipelineCreationFeedbackEXT; + +typedef struct VkPipelineCreationFeedbackCreateInfoEXT { + VkStructureType sType; + const void* pNext; + VkPipelineCreationFeedbackEXT* pPipelineCreationFeedback; + uint32_t pipelineStageCreationFeedbackCount; + VkPipelineCreationFeedbackEXT* pPipelineStageCreationFeedbacks; +} VkPipelineCreationFeedbackCreateInfoEXT; + + + +#define VK_NV_shader_subgroup_partitioned 1 +#define VK_NV_SHADER_SUBGROUP_PARTITIONED_SPEC_VERSION 1 +#define VK_NV_SHADER_SUBGROUP_PARTITIONED_EXTENSION_NAME "VK_NV_shader_subgroup_partitioned" + + +#define VK_NV_compute_shader_derivatives 1 +#define VK_NV_COMPUTE_SHADER_DERIVATIVES_SPEC_VERSION 1 +#define VK_NV_COMPUTE_SHADER_DERIVATIVES_EXTENSION_NAME "VK_NV_compute_shader_derivatives" +typedef struct VkPhysicalDeviceComputeShaderDerivativesFeaturesNV { + VkStructureType sType; + void* pNext; + VkBool32 computeDerivativeGroupQuads; + VkBool32 computeDerivativeGroupLinear; +} VkPhysicalDeviceComputeShaderDerivativesFeaturesNV; + + + +#define VK_NV_mesh_shader 1 +#define VK_NV_MESH_SHADER_SPEC_VERSION 1 +#define VK_NV_MESH_SHADER_EXTENSION_NAME "VK_NV_mesh_shader" +typedef struct VkPhysicalDeviceMeshShaderFeaturesNV { + VkStructureType sType; + void* pNext; + VkBool32 taskShader; + VkBool32 meshShader; +} VkPhysicalDeviceMeshShaderFeaturesNV; + +typedef struct VkPhysicalDeviceMeshShaderPropertiesNV { + VkStructureType sType; + void* pNext; + uint32_t maxDrawMeshTasksCount; + uint32_t maxTaskWorkGroupInvocations; + uint32_t maxTaskWorkGroupSize[3]; + uint32_t maxTaskTotalMemorySize; + uint32_t maxTaskOutputCount; + uint32_t maxMeshWorkGroupInvocations; + uint32_t maxMeshWorkGroupSize[3]; + uint32_t maxMeshTotalMemorySize; + uint32_t maxMeshOutputVertices; + uint32_t maxMeshOutputPrimitives; + uint32_t maxMeshMultiviewViewCount; + uint32_t meshOutputPerVertexGranularity; + uint32_t meshOutputPerPrimitiveGranularity; +} VkPhysicalDeviceMeshShaderPropertiesNV; + +typedef struct VkDrawMeshTasksIndirectCommandNV { + uint32_t taskCount; + uint32_t firstTask; +} VkDrawMeshTasksIndirectCommandNV; + +typedef void (VKAPI_PTR *PFN_vkCmdDrawMeshTasksNV)(VkCommandBuffer commandBuffer, uint32_t taskCount, uint32_t firstTask); +typedef void (VKAPI_PTR *PFN_vkCmdDrawMeshTasksIndirectNV)(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t drawCount, uint32_t stride); +typedef void (VKAPI_PTR *PFN_vkCmdDrawMeshTasksIndirectCountNV)(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkBuffer countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount, uint32_t stride); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdDrawMeshTasksNV( + VkCommandBuffer commandBuffer, + uint32_t taskCount, + uint32_t firstTask); + +VKAPI_ATTR void VKAPI_CALL vkCmdDrawMeshTasksIndirectNV( + VkCommandBuffer commandBuffer, + VkBuffer buffer, + VkDeviceSize offset, + uint32_t drawCount, + uint32_t stride); + +VKAPI_ATTR void VKAPI_CALL vkCmdDrawMeshTasksIndirectCountNV( + VkCommandBuffer commandBuffer, + VkBuffer buffer, + VkDeviceSize offset, + VkBuffer countBuffer, + VkDeviceSize countBufferOffset, + uint32_t maxDrawCount, + uint32_t stride); +#endif + + +#define VK_NV_fragment_shader_barycentric 1 +#define VK_NV_FRAGMENT_SHADER_BARYCENTRIC_SPEC_VERSION 1 +#define VK_NV_FRAGMENT_SHADER_BARYCENTRIC_EXTENSION_NAME "VK_NV_fragment_shader_barycentric" +typedef struct VkPhysicalDeviceFragmentShaderBarycentricFeaturesNV { + VkStructureType sType; + void* pNext; + VkBool32 fragmentShaderBarycentric; +} VkPhysicalDeviceFragmentShaderBarycentricFeaturesNV; + + + +#define VK_NV_shader_image_footprint 1 +#define VK_NV_SHADER_IMAGE_FOOTPRINT_SPEC_VERSION 2 +#define VK_NV_SHADER_IMAGE_FOOTPRINT_EXTENSION_NAME "VK_NV_shader_image_footprint" +typedef struct VkPhysicalDeviceShaderImageFootprintFeaturesNV { + VkStructureType sType; + void* pNext; + VkBool32 imageFootprint; +} VkPhysicalDeviceShaderImageFootprintFeaturesNV; + + + +#define VK_NV_scissor_exclusive 1 +#define VK_NV_SCISSOR_EXCLUSIVE_SPEC_VERSION 1 +#define VK_NV_SCISSOR_EXCLUSIVE_EXTENSION_NAME "VK_NV_scissor_exclusive" +typedef struct VkPipelineViewportExclusiveScissorStateCreateInfoNV { + VkStructureType sType; + const void* pNext; + uint32_t exclusiveScissorCount; + const VkRect2D* pExclusiveScissors; +} VkPipelineViewportExclusiveScissorStateCreateInfoNV; + +typedef struct VkPhysicalDeviceExclusiveScissorFeaturesNV { + VkStructureType sType; + void* pNext; + VkBool32 exclusiveScissor; +} VkPhysicalDeviceExclusiveScissorFeaturesNV; + +typedef void (VKAPI_PTR *PFN_vkCmdSetExclusiveScissorNV)(VkCommandBuffer commandBuffer, uint32_t firstExclusiveScissor, uint32_t exclusiveScissorCount, const VkRect2D* pExclusiveScissors); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdSetExclusiveScissorNV( + VkCommandBuffer commandBuffer, + uint32_t firstExclusiveScissor, + uint32_t exclusiveScissorCount, + const VkRect2D* pExclusiveScissors); +#endif + + +#define VK_NV_device_diagnostic_checkpoints 1 +#define VK_NV_DEVICE_DIAGNOSTIC_CHECKPOINTS_SPEC_VERSION 2 +#define VK_NV_DEVICE_DIAGNOSTIC_CHECKPOINTS_EXTENSION_NAME "VK_NV_device_diagnostic_checkpoints" +typedef struct VkQueueFamilyCheckpointPropertiesNV { + VkStructureType sType; + void* pNext; + VkPipelineStageFlags checkpointExecutionStageMask; +} VkQueueFamilyCheckpointPropertiesNV; + +typedef struct VkCheckpointDataNV { + VkStructureType sType; + void* pNext; + VkPipelineStageFlagBits stage; + void* pCheckpointMarker; +} VkCheckpointDataNV; + +typedef void (VKAPI_PTR *PFN_vkCmdSetCheckpointNV)(VkCommandBuffer commandBuffer, const void* pCheckpointMarker); +typedef void (VKAPI_PTR *PFN_vkGetQueueCheckpointDataNV)(VkQueue queue, uint32_t* pCheckpointDataCount, VkCheckpointDataNV* pCheckpointData); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdSetCheckpointNV( + VkCommandBuffer commandBuffer, + const void* pCheckpointMarker); + +VKAPI_ATTR void VKAPI_CALL vkGetQueueCheckpointDataNV( + VkQueue queue, + uint32_t* pCheckpointDataCount, + VkCheckpointDataNV* pCheckpointData); +#endif + + +#define VK_INTEL_shader_integer_functions2 1 +#define VK_INTEL_SHADER_INTEGER_FUNCTIONS_2_SPEC_VERSION 1 +#define VK_INTEL_SHADER_INTEGER_FUNCTIONS_2_EXTENSION_NAME "VK_INTEL_shader_integer_functions2" +typedef struct VkPhysicalDeviceShaderIntegerFunctions2FeaturesINTEL { + VkStructureType sType; + void* pNext; + VkBool32 shaderIntegerFunctions2; +} VkPhysicalDeviceShaderIntegerFunctions2FeaturesINTEL; + + + +#define VK_INTEL_performance_query 1 +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkPerformanceConfigurationINTEL) +#define VK_INTEL_PERFORMANCE_QUERY_SPEC_VERSION 2 +#define VK_INTEL_PERFORMANCE_QUERY_EXTENSION_NAME "VK_INTEL_performance_query" + +typedef enum VkPerformanceConfigurationTypeINTEL { + VK_PERFORMANCE_CONFIGURATION_TYPE_COMMAND_QUEUE_METRICS_DISCOVERY_ACTIVATED_INTEL = 0, + VK_PERFORMANCE_CONFIGURATION_TYPE_MAX_ENUM_INTEL = 0x7FFFFFFF +} VkPerformanceConfigurationTypeINTEL; + +typedef enum VkQueryPoolSamplingModeINTEL { + VK_QUERY_POOL_SAMPLING_MODE_MANUAL_INTEL = 0, + VK_QUERY_POOL_SAMPLING_MODE_MAX_ENUM_INTEL = 0x7FFFFFFF +} VkQueryPoolSamplingModeINTEL; + +typedef enum VkPerformanceOverrideTypeINTEL { + VK_PERFORMANCE_OVERRIDE_TYPE_NULL_HARDWARE_INTEL = 0, + VK_PERFORMANCE_OVERRIDE_TYPE_FLUSH_GPU_CACHES_INTEL = 1, + VK_PERFORMANCE_OVERRIDE_TYPE_MAX_ENUM_INTEL = 0x7FFFFFFF +} VkPerformanceOverrideTypeINTEL; + +typedef enum VkPerformanceParameterTypeINTEL { + VK_PERFORMANCE_PARAMETER_TYPE_HW_COUNTERS_SUPPORTED_INTEL = 0, + VK_PERFORMANCE_PARAMETER_TYPE_STREAM_MARKER_VALID_BITS_INTEL = 1, + VK_PERFORMANCE_PARAMETER_TYPE_MAX_ENUM_INTEL = 0x7FFFFFFF +} VkPerformanceParameterTypeINTEL; + +typedef enum VkPerformanceValueTypeINTEL { + VK_PERFORMANCE_VALUE_TYPE_UINT32_INTEL = 0, + VK_PERFORMANCE_VALUE_TYPE_UINT64_INTEL = 1, + VK_PERFORMANCE_VALUE_TYPE_FLOAT_INTEL = 2, + VK_PERFORMANCE_VALUE_TYPE_BOOL_INTEL = 3, + VK_PERFORMANCE_VALUE_TYPE_STRING_INTEL = 4, + VK_PERFORMANCE_VALUE_TYPE_MAX_ENUM_INTEL = 0x7FFFFFFF +} VkPerformanceValueTypeINTEL; +typedef union VkPerformanceValueDataINTEL { + uint32_t value32; + uint64_t value64; + float valueFloat; + VkBool32 valueBool; + const char* valueString; +} VkPerformanceValueDataINTEL; + +typedef struct VkPerformanceValueINTEL { + VkPerformanceValueTypeINTEL type; + VkPerformanceValueDataINTEL data; +} VkPerformanceValueINTEL; + +typedef struct VkInitializePerformanceApiInfoINTEL { + VkStructureType sType; + const void* pNext; + void* pUserData; +} VkInitializePerformanceApiInfoINTEL; + +typedef struct VkQueryPoolPerformanceQueryCreateInfoINTEL { + VkStructureType sType; + const void* pNext; + VkQueryPoolSamplingModeINTEL performanceCountersSampling; +} VkQueryPoolPerformanceQueryCreateInfoINTEL; + +typedef VkQueryPoolPerformanceQueryCreateInfoINTEL VkQueryPoolCreateInfoINTEL; + +typedef struct VkPerformanceMarkerInfoINTEL { + VkStructureType sType; + const void* pNext; + uint64_t marker; +} VkPerformanceMarkerInfoINTEL; + +typedef struct VkPerformanceStreamMarkerInfoINTEL { + VkStructureType sType; + const void* pNext; + uint32_t marker; +} VkPerformanceStreamMarkerInfoINTEL; + +typedef struct VkPerformanceOverrideInfoINTEL { + VkStructureType sType; + const void* pNext; + VkPerformanceOverrideTypeINTEL type; + VkBool32 enable; + uint64_t parameter; +} VkPerformanceOverrideInfoINTEL; + +typedef struct VkPerformanceConfigurationAcquireInfoINTEL { + VkStructureType sType; + const void* pNext; + VkPerformanceConfigurationTypeINTEL type; +} VkPerformanceConfigurationAcquireInfoINTEL; + +typedef VkResult (VKAPI_PTR *PFN_vkInitializePerformanceApiINTEL)(VkDevice device, const VkInitializePerformanceApiInfoINTEL* pInitializeInfo); +typedef void (VKAPI_PTR *PFN_vkUninitializePerformanceApiINTEL)(VkDevice device); +typedef VkResult (VKAPI_PTR *PFN_vkCmdSetPerformanceMarkerINTEL)(VkCommandBuffer commandBuffer, const VkPerformanceMarkerInfoINTEL* pMarkerInfo); +typedef VkResult (VKAPI_PTR *PFN_vkCmdSetPerformanceStreamMarkerINTEL)(VkCommandBuffer commandBuffer, const VkPerformanceStreamMarkerInfoINTEL* pMarkerInfo); +typedef VkResult (VKAPI_PTR *PFN_vkCmdSetPerformanceOverrideINTEL)(VkCommandBuffer commandBuffer, const VkPerformanceOverrideInfoINTEL* pOverrideInfo); +typedef VkResult (VKAPI_PTR *PFN_vkAcquirePerformanceConfigurationINTEL)(VkDevice device, const VkPerformanceConfigurationAcquireInfoINTEL* pAcquireInfo, VkPerformanceConfigurationINTEL* pConfiguration); +typedef VkResult (VKAPI_PTR *PFN_vkReleasePerformanceConfigurationINTEL)(VkDevice device, VkPerformanceConfigurationINTEL configuration); +typedef VkResult (VKAPI_PTR *PFN_vkQueueSetPerformanceConfigurationINTEL)(VkQueue queue, VkPerformanceConfigurationINTEL configuration); +typedef VkResult (VKAPI_PTR *PFN_vkGetPerformanceParameterINTEL)(VkDevice device, VkPerformanceParameterTypeINTEL parameter, VkPerformanceValueINTEL* pValue); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkInitializePerformanceApiINTEL( + VkDevice device, + const VkInitializePerformanceApiInfoINTEL* pInitializeInfo); + +VKAPI_ATTR void VKAPI_CALL vkUninitializePerformanceApiINTEL( + VkDevice device); + +VKAPI_ATTR VkResult VKAPI_CALL vkCmdSetPerformanceMarkerINTEL( + VkCommandBuffer commandBuffer, + const VkPerformanceMarkerInfoINTEL* pMarkerInfo); + +VKAPI_ATTR VkResult VKAPI_CALL vkCmdSetPerformanceStreamMarkerINTEL( + VkCommandBuffer commandBuffer, + const VkPerformanceStreamMarkerInfoINTEL* pMarkerInfo); + +VKAPI_ATTR VkResult VKAPI_CALL vkCmdSetPerformanceOverrideINTEL( + VkCommandBuffer commandBuffer, + const VkPerformanceOverrideInfoINTEL* pOverrideInfo); + +VKAPI_ATTR VkResult VKAPI_CALL vkAcquirePerformanceConfigurationINTEL( + VkDevice device, + const VkPerformanceConfigurationAcquireInfoINTEL* pAcquireInfo, + VkPerformanceConfigurationINTEL* pConfiguration); + +VKAPI_ATTR VkResult VKAPI_CALL vkReleasePerformanceConfigurationINTEL( + VkDevice device, + VkPerformanceConfigurationINTEL configuration); + +VKAPI_ATTR VkResult VKAPI_CALL vkQueueSetPerformanceConfigurationINTEL( + VkQueue queue, + VkPerformanceConfigurationINTEL configuration); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetPerformanceParameterINTEL( + VkDevice device, + VkPerformanceParameterTypeINTEL parameter, + VkPerformanceValueINTEL* pValue); +#endif + + +#define VK_EXT_pci_bus_info 1 +#define VK_EXT_PCI_BUS_INFO_SPEC_VERSION 2 +#define VK_EXT_PCI_BUS_INFO_EXTENSION_NAME "VK_EXT_pci_bus_info" +typedef struct VkPhysicalDevicePCIBusInfoPropertiesEXT { + VkStructureType sType; + void* pNext; + uint32_t pciDomain; + uint32_t pciBus; + uint32_t pciDevice; + uint32_t pciFunction; +} VkPhysicalDevicePCIBusInfoPropertiesEXT; + + + +#define VK_AMD_display_native_hdr 1 +#define VK_AMD_DISPLAY_NATIVE_HDR_SPEC_VERSION 1 +#define VK_AMD_DISPLAY_NATIVE_HDR_EXTENSION_NAME "VK_AMD_display_native_hdr" +typedef struct VkDisplayNativeHdrSurfaceCapabilitiesAMD { + VkStructureType sType; + void* pNext; + VkBool32 localDimmingSupport; +} VkDisplayNativeHdrSurfaceCapabilitiesAMD; + +typedef struct VkSwapchainDisplayNativeHdrCreateInfoAMD { + VkStructureType sType; + const void* pNext; + VkBool32 localDimmingEnable; +} VkSwapchainDisplayNativeHdrCreateInfoAMD; + +typedef void (VKAPI_PTR *PFN_vkSetLocalDimmingAMD)(VkDevice device, VkSwapchainKHR swapChain, VkBool32 localDimmingEnable); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkSetLocalDimmingAMD( + VkDevice device, + VkSwapchainKHR swapChain, + VkBool32 localDimmingEnable); +#endif + + +#define VK_EXT_fragment_density_map 1 +#define VK_EXT_FRAGMENT_DENSITY_MAP_SPEC_VERSION 1 +#define VK_EXT_FRAGMENT_DENSITY_MAP_EXTENSION_NAME "VK_EXT_fragment_density_map" +typedef struct VkPhysicalDeviceFragmentDensityMapFeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 fragmentDensityMap; + VkBool32 fragmentDensityMapDynamic; + VkBool32 fragmentDensityMapNonSubsampledImages; +} VkPhysicalDeviceFragmentDensityMapFeaturesEXT; + +typedef struct VkPhysicalDeviceFragmentDensityMapPropertiesEXT { + VkStructureType sType; + void* pNext; + VkExtent2D minFragmentDensityTexelSize; + VkExtent2D maxFragmentDensityTexelSize; + VkBool32 fragmentDensityInvocations; +} VkPhysicalDeviceFragmentDensityMapPropertiesEXT; + +typedef struct VkRenderPassFragmentDensityMapCreateInfoEXT { + VkStructureType sType; + const void* pNext; + VkAttachmentReference fragmentDensityMapAttachment; +} VkRenderPassFragmentDensityMapCreateInfoEXT; + + + +#define VK_EXT_scalar_block_layout 1 +#define VK_EXT_SCALAR_BLOCK_LAYOUT_SPEC_VERSION 1 +#define VK_EXT_SCALAR_BLOCK_LAYOUT_EXTENSION_NAME "VK_EXT_scalar_block_layout" +typedef VkPhysicalDeviceScalarBlockLayoutFeatures VkPhysicalDeviceScalarBlockLayoutFeaturesEXT; + + + +#define VK_GOOGLE_hlsl_functionality1 1 +#define VK_GOOGLE_HLSL_FUNCTIONALITY1_SPEC_VERSION 1 +#define VK_GOOGLE_HLSL_FUNCTIONALITY1_EXTENSION_NAME "VK_GOOGLE_hlsl_functionality1" + + +#define VK_GOOGLE_decorate_string 1 +#define VK_GOOGLE_DECORATE_STRING_SPEC_VERSION 1 +#define VK_GOOGLE_DECORATE_STRING_EXTENSION_NAME "VK_GOOGLE_decorate_string" + + +#define VK_EXT_subgroup_size_control 1 +#define VK_EXT_SUBGROUP_SIZE_CONTROL_SPEC_VERSION 2 +#define VK_EXT_SUBGROUP_SIZE_CONTROL_EXTENSION_NAME "VK_EXT_subgroup_size_control" +typedef struct VkPhysicalDeviceSubgroupSizeControlFeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 subgroupSizeControl; + VkBool32 computeFullSubgroups; +} VkPhysicalDeviceSubgroupSizeControlFeaturesEXT; + +typedef struct VkPhysicalDeviceSubgroupSizeControlPropertiesEXT { + VkStructureType sType; + void* pNext; + uint32_t minSubgroupSize; + uint32_t maxSubgroupSize; + uint32_t maxComputeWorkgroupSubgroups; + VkShaderStageFlags requiredSubgroupSizeStages; +} VkPhysicalDeviceSubgroupSizeControlPropertiesEXT; + +typedef struct VkPipelineShaderStageRequiredSubgroupSizeCreateInfoEXT { + VkStructureType sType; + void* pNext; + uint32_t requiredSubgroupSize; +} VkPipelineShaderStageRequiredSubgroupSizeCreateInfoEXT; + + + +#define VK_AMD_shader_core_properties2 1 +#define VK_AMD_SHADER_CORE_PROPERTIES_2_SPEC_VERSION 1 +#define VK_AMD_SHADER_CORE_PROPERTIES_2_EXTENSION_NAME "VK_AMD_shader_core_properties2" + +typedef enum VkShaderCorePropertiesFlagBitsAMD { + VK_SHADER_CORE_PROPERTIES_FLAG_BITS_MAX_ENUM_AMD = 0x7FFFFFFF +} VkShaderCorePropertiesFlagBitsAMD; +typedef VkFlags VkShaderCorePropertiesFlagsAMD; +typedef struct VkPhysicalDeviceShaderCoreProperties2AMD { + VkStructureType sType; + void* pNext; + VkShaderCorePropertiesFlagsAMD shaderCoreFeatures; + uint32_t activeComputeUnitCount; +} VkPhysicalDeviceShaderCoreProperties2AMD; + + + +#define VK_AMD_device_coherent_memory 1 +#define VK_AMD_DEVICE_COHERENT_MEMORY_SPEC_VERSION 1 +#define VK_AMD_DEVICE_COHERENT_MEMORY_EXTENSION_NAME "VK_AMD_device_coherent_memory" +typedef struct VkPhysicalDeviceCoherentMemoryFeaturesAMD { + VkStructureType sType; + void* pNext; + VkBool32 deviceCoherentMemory; +} VkPhysicalDeviceCoherentMemoryFeaturesAMD; + + + +#define VK_EXT_memory_budget 1 +#define VK_EXT_MEMORY_BUDGET_SPEC_VERSION 1 +#define VK_EXT_MEMORY_BUDGET_EXTENSION_NAME "VK_EXT_memory_budget" +typedef struct VkPhysicalDeviceMemoryBudgetPropertiesEXT { + VkStructureType sType; + void* pNext; + VkDeviceSize heapBudget[VK_MAX_MEMORY_HEAPS]; + VkDeviceSize heapUsage[VK_MAX_MEMORY_HEAPS]; +} VkPhysicalDeviceMemoryBudgetPropertiesEXT; + + + +#define VK_EXT_memory_priority 1 +#define VK_EXT_MEMORY_PRIORITY_SPEC_VERSION 1 +#define VK_EXT_MEMORY_PRIORITY_EXTENSION_NAME "VK_EXT_memory_priority" +typedef struct VkPhysicalDeviceMemoryPriorityFeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 memoryPriority; +} VkPhysicalDeviceMemoryPriorityFeaturesEXT; + +typedef struct VkMemoryPriorityAllocateInfoEXT { + VkStructureType sType; + const void* pNext; + float priority; +} VkMemoryPriorityAllocateInfoEXT; + + + +#define VK_NV_dedicated_allocation_image_aliasing 1 +#define VK_NV_DEDICATED_ALLOCATION_IMAGE_ALIASING_SPEC_VERSION 1 +#define VK_NV_DEDICATED_ALLOCATION_IMAGE_ALIASING_EXTENSION_NAME "VK_NV_dedicated_allocation_image_aliasing" +typedef struct VkPhysicalDeviceDedicatedAllocationImageAliasingFeaturesNV { + VkStructureType sType; + void* pNext; + VkBool32 dedicatedAllocationImageAliasing; +} VkPhysicalDeviceDedicatedAllocationImageAliasingFeaturesNV; + + + +#define VK_EXT_buffer_device_address 1 +#define VK_EXT_BUFFER_DEVICE_ADDRESS_SPEC_VERSION 2 +#define VK_EXT_BUFFER_DEVICE_ADDRESS_EXTENSION_NAME "VK_EXT_buffer_device_address" +typedef struct VkPhysicalDeviceBufferDeviceAddressFeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 bufferDeviceAddress; + VkBool32 bufferDeviceAddressCaptureReplay; + VkBool32 bufferDeviceAddressMultiDevice; +} VkPhysicalDeviceBufferDeviceAddressFeaturesEXT; + +typedef VkPhysicalDeviceBufferDeviceAddressFeaturesEXT VkPhysicalDeviceBufferAddressFeaturesEXT; + +typedef VkBufferDeviceAddressInfo VkBufferDeviceAddressInfoEXT; + +typedef struct VkBufferDeviceAddressCreateInfoEXT { + VkStructureType sType; + const void* pNext; + VkDeviceAddress deviceAddress; +} VkBufferDeviceAddressCreateInfoEXT; + +typedef VkDeviceAddress (VKAPI_PTR *PFN_vkGetBufferDeviceAddressEXT)(VkDevice device, const VkBufferDeviceAddressInfo* pInfo); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkDeviceAddress VKAPI_CALL vkGetBufferDeviceAddressEXT( + VkDevice device, + const VkBufferDeviceAddressInfo* pInfo); +#endif + + +#define VK_EXT_tooling_info 1 +#define VK_EXT_TOOLING_INFO_SPEC_VERSION 1 +#define VK_EXT_TOOLING_INFO_EXTENSION_NAME "VK_EXT_tooling_info" + +typedef enum VkToolPurposeFlagBitsEXT { + VK_TOOL_PURPOSE_VALIDATION_BIT_EXT = 0x00000001, + VK_TOOL_PURPOSE_PROFILING_BIT_EXT = 0x00000002, + VK_TOOL_PURPOSE_TRACING_BIT_EXT = 0x00000004, + VK_TOOL_PURPOSE_ADDITIONAL_FEATURES_BIT_EXT = 0x00000008, + VK_TOOL_PURPOSE_MODIFYING_FEATURES_BIT_EXT = 0x00000010, + VK_TOOL_PURPOSE_DEBUG_REPORTING_BIT_EXT = 0x00000020, + VK_TOOL_PURPOSE_DEBUG_MARKERS_BIT_EXT = 0x00000040, + VK_TOOL_PURPOSE_FLAG_BITS_MAX_ENUM_EXT = 0x7FFFFFFF +} VkToolPurposeFlagBitsEXT; +typedef VkFlags VkToolPurposeFlagsEXT; +typedef struct VkPhysicalDeviceToolPropertiesEXT { + VkStructureType sType; + void* pNext; + char name[VK_MAX_EXTENSION_NAME_SIZE]; + char version[VK_MAX_EXTENSION_NAME_SIZE]; + VkToolPurposeFlagsEXT purposes; + char description[VK_MAX_DESCRIPTION_SIZE]; + char layer[VK_MAX_EXTENSION_NAME_SIZE]; +} VkPhysicalDeviceToolPropertiesEXT; + +typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceToolPropertiesEXT)(VkPhysicalDevice physicalDevice, uint32_t* pToolCount, VkPhysicalDeviceToolPropertiesEXT* pToolProperties); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceToolPropertiesEXT( + VkPhysicalDevice physicalDevice, + uint32_t* pToolCount, + VkPhysicalDeviceToolPropertiesEXT* pToolProperties); +#endif + + +#define VK_EXT_separate_stencil_usage 1 +#define VK_EXT_SEPARATE_STENCIL_USAGE_SPEC_VERSION 1 +#define VK_EXT_SEPARATE_STENCIL_USAGE_EXTENSION_NAME "VK_EXT_separate_stencil_usage" +typedef VkImageStencilUsageCreateInfo VkImageStencilUsageCreateInfoEXT; + + + +#define VK_EXT_validation_features 1 +#define VK_EXT_VALIDATION_FEATURES_SPEC_VERSION 3 +#define VK_EXT_VALIDATION_FEATURES_EXTENSION_NAME "VK_EXT_validation_features" + +typedef enum VkValidationFeatureEnableEXT { + VK_VALIDATION_FEATURE_ENABLE_GPU_ASSISTED_EXT = 0, + VK_VALIDATION_FEATURE_ENABLE_GPU_ASSISTED_RESERVE_BINDING_SLOT_EXT = 1, + VK_VALIDATION_FEATURE_ENABLE_BEST_PRACTICES_EXT = 2, + VK_VALIDATION_FEATURE_ENABLE_DEBUG_PRINTF_EXT = 3, + VK_VALIDATION_FEATURE_ENABLE_MAX_ENUM_EXT = 0x7FFFFFFF +} VkValidationFeatureEnableEXT; + +typedef enum VkValidationFeatureDisableEXT { + VK_VALIDATION_FEATURE_DISABLE_ALL_EXT = 0, + VK_VALIDATION_FEATURE_DISABLE_SHADERS_EXT = 1, + VK_VALIDATION_FEATURE_DISABLE_THREAD_SAFETY_EXT = 2, + VK_VALIDATION_FEATURE_DISABLE_API_PARAMETERS_EXT = 3, + VK_VALIDATION_FEATURE_DISABLE_OBJECT_LIFETIMES_EXT = 4, + VK_VALIDATION_FEATURE_DISABLE_CORE_CHECKS_EXT = 5, + VK_VALIDATION_FEATURE_DISABLE_UNIQUE_HANDLES_EXT = 6, + VK_VALIDATION_FEATURE_DISABLE_MAX_ENUM_EXT = 0x7FFFFFFF +} VkValidationFeatureDisableEXT; +typedef struct VkValidationFeaturesEXT { + VkStructureType sType; + const void* pNext; + uint32_t enabledValidationFeatureCount; + const VkValidationFeatureEnableEXT* pEnabledValidationFeatures; + uint32_t disabledValidationFeatureCount; + const VkValidationFeatureDisableEXT* pDisabledValidationFeatures; +} VkValidationFeaturesEXT; + + + +#define VK_NV_cooperative_matrix 1 +#define VK_NV_COOPERATIVE_MATRIX_SPEC_VERSION 1 +#define VK_NV_COOPERATIVE_MATRIX_EXTENSION_NAME "VK_NV_cooperative_matrix" + +typedef enum VkComponentTypeNV { + VK_COMPONENT_TYPE_FLOAT16_NV = 0, + VK_COMPONENT_TYPE_FLOAT32_NV = 1, + VK_COMPONENT_TYPE_FLOAT64_NV = 2, + VK_COMPONENT_TYPE_SINT8_NV = 3, + VK_COMPONENT_TYPE_SINT16_NV = 4, + VK_COMPONENT_TYPE_SINT32_NV = 5, + VK_COMPONENT_TYPE_SINT64_NV = 6, + VK_COMPONENT_TYPE_UINT8_NV = 7, + VK_COMPONENT_TYPE_UINT16_NV = 8, + VK_COMPONENT_TYPE_UINT32_NV = 9, + VK_COMPONENT_TYPE_UINT64_NV = 10, + VK_COMPONENT_TYPE_MAX_ENUM_NV = 0x7FFFFFFF +} VkComponentTypeNV; + +typedef enum VkScopeNV { + VK_SCOPE_DEVICE_NV = 1, + VK_SCOPE_WORKGROUP_NV = 2, + VK_SCOPE_SUBGROUP_NV = 3, + VK_SCOPE_QUEUE_FAMILY_NV = 5, + VK_SCOPE_MAX_ENUM_NV = 0x7FFFFFFF +} VkScopeNV; +typedef struct VkCooperativeMatrixPropertiesNV { + VkStructureType sType; + void* pNext; + uint32_t MSize; + uint32_t NSize; + uint32_t KSize; + VkComponentTypeNV AType; + VkComponentTypeNV BType; + VkComponentTypeNV CType; + VkComponentTypeNV DType; + VkScopeNV scope; +} VkCooperativeMatrixPropertiesNV; + +typedef struct VkPhysicalDeviceCooperativeMatrixFeaturesNV { + VkStructureType sType; + void* pNext; + VkBool32 cooperativeMatrix; + VkBool32 cooperativeMatrixRobustBufferAccess; +} VkPhysicalDeviceCooperativeMatrixFeaturesNV; + +typedef struct VkPhysicalDeviceCooperativeMatrixPropertiesNV { + VkStructureType sType; + void* pNext; + VkShaderStageFlags cooperativeMatrixSupportedStages; +} VkPhysicalDeviceCooperativeMatrixPropertiesNV; + +typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceCooperativeMatrixPropertiesNV)(VkPhysicalDevice physicalDevice, uint32_t* pPropertyCount, VkCooperativeMatrixPropertiesNV* pProperties); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceCooperativeMatrixPropertiesNV( + VkPhysicalDevice physicalDevice, + uint32_t* pPropertyCount, + VkCooperativeMatrixPropertiesNV* pProperties); +#endif + + +#define VK_NV_coverage_reduction_mode 1 +#define VK_NV_COVERAGE_REDUCTION_MODE_SPEC_VERSION 1 +#define VK_NV_COVERAGE_REDUCTION_MODE_EXTENSION_NAME "VK_NV_coverage_reduction_mode" + +typedef enum VkCoverageReductionModeNV { + VK_COVERAGE_REDUCTION_MODE_MERGE_NV = 0, + VK_COVERAGE_REDUCTION_MODE_TRUNCATE_NV = 1, + VK_COVERAGE_REDUCTION_MODE_MAX_ENUM_NV = 0x7FFFFFFF +} VkCoverageReductionModeNV; +typedef VkFlags VkPipelineCoverageReductionStateCreateFlagsNV; +typedef struct VkPhysicalDeviceCoverageReductionModeFeaturesNV { + VkStructureType sType; + void* pNext; + VkBool32 coverageReductionMode; +} VkPhysicalDeviceCoverageReductionModeFeaturesNV; + +typedef struct VkPipelineCoverageReductionStateCreateInfoNV { + VkStructureType sType; + const void* pNext; + VkPipelineCoverageReductionStateCreateFlagsNV flags; + VkCoverageReductionModeNV coverageReductionMode; +} VkPipelineCoverageReductionStateCreateInfoNV; + +typedef struct VkFramebufferMixedSamplesCombinationNV { + VkStructureType sType; + void* pNext; + VkCoverageReductionModeNV coverageReductionMode; + VkSampleCountFlagBits rasterizationSamples; + VkSampleCountFlags depthStencilSamples; + VkSampleCountFlags colorSamples; +} VkFramebufferMixedSamplesCombinationNV; + +typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceSupportedFramebufferMixedSamplesCombinationsNV)(VkPhysicalDevice physicalDevice, uint32_t* pCombinationCount, VkFramebufferMixedSamplesCombinationNV* pCombinations); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceSupportedFramebufferMixedSamplesCombinationsNV( + VkPhysicalDevice physicalDevice, + uint32_t* pCombinationCount, + VkFramebufferMixedSamplesCombinationNV* pCombinations); +#endif + + +#define VK_EXT_fragment_shader_interlock 1 +#define VK_EXT_FRAGMENT_SHADER_INTERLOCK_SPEC_VERSION 1 +#define VK_EXT_FRAGMENT_SHADER_INTERLOCK_EXTENSION_NAME "VK_EXT_fragment_shader_interlock" +typedef struct VkPhysicalDeviceFragmentShaderInterlockFeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 fragmentShaderSampleInterlock; + VkBool32 fragmentShaderPixelInterlock; + VkBool32 fragmentShaderShadingRateInterlock; +} VkPhysicalDeviceFragmentShaderInterlockFeaturesEXT; + + + +#define VK_EXT_ycbcr_image_arrays 1 +#define VK_EXT_YCBCR_IMAGE_ARRAYS_SPEC_VERSION 1 +#define VK_EXT_YCBCR_IMAGE_ARRAYS_EXTENSION_NAME "VK_EXT_ycbcr_image_arrays" +typedef struct VkPhysicalDeviceYcbcrImageArraysFeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 ycbcrImageArrays; +} VkPhysicalDeviceYcbcrImageArraysFeaturesEXT; + + + +#define VK_EXT_headless_surface 1 +#define VK_EXT_HEADLESS_SURFACE_SPEC_VERSION 1 +#define VK_EXT_HEADLESS_SURFACE_EXTENSION_NAME "VK_EXT_headless_surface" +typedef VkFlags VkHeadlessSurfaceCreateFlagsEXT; +typedef struct VkHeadlessSurfaceCreateInfoEXT { + VkStructureType sType; + const void* pNext; + VkHeadlessSurfaceCreateFlagsEXT flags; +} VkHeadlessSurfaceCreateInfoEXT; + +typedef VkResult (VKAPI_PTR *PFN_vkCreateHeadlessSurfaceEXT)(VkInstance instance, const VkHeadlessSurfaceCreateInfoEXT* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkCreateHeadlessSurfaceEXT( + VkInstance instance, + const VkHeadlessSurfaceCreateInfoEXT* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkSurfaceKHR* pSurface); +#endif + + +#define VK_EXT_line_rasterization 1 +#define VK_EXT_LINE_RASTERIZATION_SPEC_VERSION 1 +#define VK_EXT_LINE_RASTERIZATION_EXTENSION_NAME "VK_EXT_line_rasterization" + +typedef enum VkLineRasterizationModeEXT { + VK_LINE_RASTERIZATION_MODE_DEFAULT_EXT = 0, + VK_LINE_RASTERIZATION_MODE_RECTANGULAR_EXT = 1, + VK_LINE_RASTERIZATION_MODE_BRESENHAM_EXT = 2, + VK_LINE_RASTERIZATION_MODE_RECTANGULAR_SMOOTH_EXT = 3, + VK_LINE_RASTERIZATION_MODE_MAX_ENUM_EXT = 0x7FFFFFFF +} VkLineRasterizationModeEXT; +typedef struct VkPhysicalDeviceLineRasterizationFeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 rectangularLines; + VkBool32 bresenhamLines; + VkBool32 smoothLines; + VkBool32 stippledRectangularLines; + VkBool32 stippledBresenhamLines; + VkBool32 stippledSmoothLines; +} VkPhysicalDeviceLineRasterizationFeaturesEXT; + +typedef struct VkPhysicalDeviceLineRasterizationPropertiesEXT { + VkStructureType sType; + void* pNext; + uint32_t lineSubPixelPrecisionBits; +} VkPhysicalDeviceLineRasterizationPropertiesEXT; + +typedef struct VkPipelineRasterizationLineStateCreateInfoEXT { + VkStructureType sType; + const void* pNext; + VkLineRasterizationModeEXT lineRasterizationMode; + VkBool32 stippledLineEnable; + uint32_t lineStippleFactor; + uint16_t lineStipplePattern; +} VkPipelineRasterizationLineStateCreateInfoEXT; + +typedef void (VKAPI_PTR *PFN_vkCmdSetLineStippleEXT)(VkCommandBuffer commandBuffer, uint32_t lineStippleFactor, uint16_t lineStipplePattern); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdSetLineStippleEXT( + VkCommandBuffer commandBuffer, + uint32_t lineStippleFactor, + uint16_t lineStipplePattern); +#endif + + +#define VK_EXT_host_query_reset 1 +#define VK_EXT_HOST_QUERY_RESET_SPEC_VERSION 1 +#define VK_EXT_HOST_QUERY_RESET_EXTENSION_NAME "VK_EXT_host_query_reset" +typedef VkPhysicalDeviceHostQueryResetFeatures VkPhysicalDeviceHostQueryResetFeaturesEXT; + +typedef void (VKAPI_PTR *PFN_vkResetQueryPoolEXT)(VkDevice device, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkResetQueryPoolEXT( + VkDevice device, + VkQueryPool queryPool, + uint32_t firstQuery, + uint32_t queryCount); +#endif + + +#define VK_EXT_index_type_uint8 1 +#define VK_EXT_INDEX_TYPE_UINT8_SPEC_VERSION 1 +#define VK_EXT_INDEX_TYPE_UINT8_EXTENSION_NAME "VK_EXT_index_type_uint8" +typedef struct VkPhysicalDeviceIndexTypeUint8FeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 indexTypeUint8; +} VkPhysicalDeviceIndexTypeUint8FeaturesEXT; + + + +#define VK_EXT_shader_demote_to_helper_invocation 1 +#define VK_EXT_SHADER_DEMOTE_TO_HELPER_INVOCATION_SPEC_VERSION 1 +#define VK_EXT_SHADER_DEMOTE_TO_HELPER_INVOCATION_EXTENSION_NAME "VK_EXT_shader_demote_to_helper_invocation" +typedef struct VkPhysicalDeviceShaderDemoteToHelperInvocationFeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 shaderDemoteToHelperInvocation; +} VkPhysicalDeviceShaderDemoteToHelperInvocationFeaturesEXT; + + + +#define VK_NV_device_generated_commands 1 +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkIndirectCommandsLayoutNV) +#define VK_NV_DEVICE_GENERATED_COMMANDS_SPEC_VERSION 3 +#define VK_NV_DEVICE_GENERATED_COMMANDS_EXTENSION_NAME "VK_NV_device_generated_commands" + +typedef enum VkIndirectCommandsTokenTypeNV { + VK_INDIRECT_COMMANDS_TOKEN_TYPE_SHADER_GROUP_NV = 0, + VK_INDIRECT_COMMANDS_TOKEN_TYPE_STATE_FLAGS_NV = 1, + VK_INDIRECT_COMMANDS_TOKEN_TYPE_INDEX_BUFFER_NV = 2, + VK_INDIRECT_COMMANDS_TOKEN_TYPE_VERTEX_BUFFER_NV = 3, + VK_INDIRECT_COMMANDS_TOKEN_TYPE_PUSH_CONSTANT_NV = 4, + VK_INDIRECT_COMMANDS_TOKEN_TYPE_DRAW_INDEXED_NV = 5, + VK_INDIRECT_COMMANDS_TOKEN_TYPE_DRAW_NV = 6, + VK_INDIRECT_COMMANDS_TOKEN_TYPE_DRAW_TASKS_NV = 7, + VK_INDIRECT_COMMANDS_TOKEN_TYPE_MAX_ENUM_NV = 0x7FFFFFFF +} VkIndirectCommandsTokenTypeNV; + +typedef enum VkIndirectStateFlagBitsNV { + VK_INDIRECT_STATE_FLAG_FRONTFACE_BIT_NV = 0x00000001, + VK_INDIRECT_STATE_FLAG_BITS_MAX_ENUM_NV = 0x7FFFFFFF +} VkIndirectStateFlagBitsNV; +typedef VkFlags VkIndirectStateFlagsNV; + +typedef enum VkIndirectCommandsLayoutUsageFlagBitsNV { + VK_INDIRECT_COMMANDS_LAYOUT_USAGE_EXPLICIT_PREPROCESS_BIT_NV = 0x00000001, + VK_INDIRECT_COMMANDS_LAYOUT_USAGE_INDEXED_SEQUENCES_BIT_NV = 0x00000002, + VK_INDIRECT_COMMANDS_LAYOUT_USAGE_UNORDERED_SEQUENCES_BIT_NV = 0x00000004, + VK_INDIRECT_COMMANDS_LAYOUT_USAGE_FLAG_BITS_MAX_ENUM_NV = 0x7FFFFFFF +} VkIndirectCommandsLayoutUsageFlagBitsNV; +typedef VkFlags VkIndirectCommandsLayoutUsageFlagsNV; +typedef struct VkPhysicalDeviceDeviceGeneratedCommandsPropertiesNV { + VkStructureType sType; + void* pNext; + uint32_t maxGraphicsShaderGroupCount; + uint32_t maxIndirectSequenceCount; + uint32_t maxIndirectCommandsTokenCount; + uint32_t maxIndirectCommandsStreamCount; + uint32_t maxIndirectCommandsTokenOffset; + uint32_t maxIndirectCommandsStreamStride; + uint32_t minSequencesCountBufferOffsetAlignment; + uint32_t minSequencesIndexBufferOffsetAlignment; + uint32_t minIndirectCommandsBufferOffsetAlignment; +} VkPhysicalDeviceDeviceGeneratedCommandsPropertiesNV; + +typedef struct VkPhysicalDeviceDeviceGeneratedCommandsFeaturesNV { + VkStructureType sType; + void* pNext; + VkBool32 deviceGeneratedCommands; +} VkPhysicalDeviceDeviceGeneratedCommandsFeaturesNV; + +typedef struct VkGraphicsShaderGroupCreateInfoNV { + VkStructureType sType; + const void* pNext; + uint32_t stageCount; + const VkPipelineShaderStageCreateInfo* pStages; + const VkPipelineVertexInputStateCreateInfo* pVertexInputState; + const VkPipelineTessellationStateCreateInfo* pTessellationState; +} VkGraphicsShaderGroupCreateInfoNV; + +typedef struct VkGraphicsPipelineShaderGroupsCreateInfoNV { + VkStructureType sType; + const void* pNext; + uint32_t groupCount; + const VkGraphicsShaderGroupCreateInfoNV* pGroups; + uint32_t pipelineCount; + const VkPipeline* pPipelines; +} VkGraphicsPipelineShaderGroupsCreateInfoNV; + +typedef struct VkBindShaderGroupIndirectCommandNV { + uint32_t groupIndex; +} VkBindShaderGroupIndirectCommandNV; + +typedef struct VkBindIndexBufferIndirectCommandNV { + VkDeviceAddress bufferAddress; + uint32_t size; + VkIndexType indexType; +} VkBindIndexBufferIndirectCommandNV; + +typedef struct VkBindVertexBufferIndirectCommandNV { + VkDeviceAddress bufferAddress; + uint32_t size; + uint32_t stride; +} VkBindVertexBufferIndirectCommandNV; + +typedef struct VkSetStateFlagsIndirectCommandNV { + uint32_t data; +} VkSetStateFlagsIndirectCommandNV; + +typedef struct VkIndirectCommandsStreamNV { + VkBuffer buffer; + VkDeviceSize offset; +} VkIndirectCommandsStreamNV; + +typedef struct VkIndirectCommandsLayoutTokenNV { + VkStructureType sType; + const void* pNext; + VkIndirectCommandsTokenTypeNV tokenType; + uint32_t stream; + uint32_t offset; + uint32_t vertexBindingUnit; + VkBool32 vertexDynamicStride; + VkPipelineLayout pushconstantPipelineLayout; + VkShaderStageFlags pushconstantShaderStageFlags; + uint32_t pushconstantOffset; + uint32_t pushconstantSize; + VkIndirectStateFlagsNV indirectStateFlags; + uint32_t indexTypeCount; + const VkIndexType* pIndexTypes; + const uint32_t* pIndexTypeValues; +} VkIndirectCommandsLayoutTokenNV; + +typedef struct VkIndirectCommandsLayoutCreateInfoNV { + VkStructureType sType; + const void* pNext; + VkIndirectCommandsLayoutUsageFlagsNV flags; + VkPipelineBindPoint pipelineBindPoint; + uint32_t tokenCount; + const VkIndirectCommandsLayoutTokenNV* pTokens; + uint32_t streamCount; + const uint32_t* pStreamStrides; +} VkIndirectCommandsLayoutCreateInfoNV; + +typedef struct VkGeneratedCommandsInfoNV { + VkStructureType sType; + const void* pNext; + VkPipelineBindPoint pipelineBindPoint; + VkPipeline pipeline; + VkIndirectCommandsLayoutNV indirectCommandsLayout; + uint32_t streamCount; + const VkIndirectCommandsStreamNV* pStreams; + uint32_t sequencesCount; + VkBuffer preprocessBuffer; + VkDeviceSize preprocessOffset; + VkDeviceSize preprocessSize; + VkBuffer sequencesCountBuffer; + VkDeviceSize sequencesCountOffset; + VkBuffer sequencesIndexBuffer; + VkDeviceSize sequencesIndexOffset; +} VkGeneratedCommandsInfoNV; + +typedef struct VkGeneratedCommandsMemoryRequirementsInfoNV { + VkStructureType sType; + const void* pNext; + VkPipelineBindPoint pipelineBindPoint; + VkPipeline pipeline; + VkIndirectCommandsLayoutNV indirectCommandsLayout; + uint32_t maxSequencesCount; +} VkGeneratedCommandsMemoryRequirementsInfoNV; + +typedef void (VKAPI_PTR *PFN_vkGetGeneratedCommandsMemoryRequirementsNV)(VkDevice device, const VkGeneratedCommandsMemoryRequirementsInfoNV* pInfo, VkMemoryRequirements2* pMemoryRequirements); +typedef void (VKAPI_PTR *PFN_vkCmdPreprocessGeneratedCommandsNV)(VkCommandBuffer commandBuffer, const VkGeneratedCommandsInfoNV* pGeneratedCommandsInfo); +typedef void (VKAPI_PTR *PFN_vkCmdExecuteGeneratedCommandsNV)(VkCommandBuffer commandBuffer, VkBool32 isPreprocessed, const VkGeneratedCommandsInfoNV* pGeneratedCommandsInfo); +typedef void (VKAPI_PTR *PFN_vkCmdBindPipelineShaderGroupNV)(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipeline pipeline, uint32_t groupIndex); +typedef VkResult (VKAPI_PTR *PFN_vkCreateIndirectCommandsLayoutNV)(VkDevice device, const VkIndirectCommandsLayoutCreateInfoNV* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkIndirectCommandsLayoutNV* pIndirectCommandsLayout); +typedef void (VKAPI_PTR *PFN_vkDestroyIndirectCommandsLayoutNV)(VkDevice device, VkIndirectCommandsLayoutNV indirectCommandsLayout, const VkAllocationCallbacks* pAllocator); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkGetGeneratedCommandsMemoryRequirementsNV( + VkDevice device, + const VkGeneratedCommandsMemoryRequirementsInfoNV* pInfo, + VkMemoryRequirements2* pMemoryRequirements); + +VKAPI_ATTR void VKAPI_CALL vkCmdPreprocessGeneratedCommandsNV( + VkCommandBuffer commandBuffer, + const VkGeneratedCommandsInfoNV* pGeneratedCommandsInfo); + +VKAPI_ATTR void VKAPI_CALL vkCmdExecuteGeneratedCommandsNV( + VkCommandBuffer commandBuffer, + VkBool32 isPreprocessed, + const VkGeneratedCommandsInfoNV* pGeneratedCommandsInfo); + +VKAPI_ATTR void VKAPI_CALL vkCmdBindPipelineShaderGroupNV( + VkCommandBuffer commandBuffer, + VkPipelineBindPoint pipelineBindPoint, + VkPipeline pipeline, + uint32_t groupIndex); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateIndirectCommandsLayoutNV( + VkDevice device, + const VkIndirectCommandsLayoutCreateInfoNV* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkIndirectCommandsLayoutNV* pIndirectCommandsLayout); + +VKAPI_ATTR void VKAPI_CALL vkDestroyIndirectCommandsLayoutNV( + VkDevice device, + VkIndirectCommandsLayoutNV indirectCommandsLayout, + const VkAllocationCallbacks* pAllocator); +#endif + + +#define VK_EXT_texel_buffer_alignment 1 +#define VK_EXT_TEXEL_BUFFER_ALIGNMENT_SPEC_VERSION 1 +#define VK_EXT_TEXEL_BUFFER_ALIGNMENT_EXTENSION_NAME "VK_EXT_texel_buffer_alignment" +typedef struct VkPhysicalDeviceTexelBufferAlignmentFeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 texelBufferAlignment; +} VkPhysicalDeviceTexelBufferAlignmentFeaturesEXT; + +typedef struct VkPhysicalDeviceTexelBufferAlignmentPropertiesEXT { + VkStructureType sType; + void* pNext; + VkDeviceSize storageTexelBufferOffsetAlignmentBytes; + VkBool32 storageTexelBufferOffsetSingleTexelAlignment; + VkDeviceSize uniformTexelBufferOffsetAlignmentBytes; + VkBool32 uniformTexelBufferOffsetSingleTexelAlignment; +} VkPhysicalDeviceTexelBufferAlignmentPropertiesEXT; + + + +#define VK_QCOM_render_pass_transform 1 +#define VK_QCOM_RENDER_PASS_TRANSFORM_SPEC_VERSION 1 +#define VK_QCOM_RENDER_PASS_TRANSFORM_EXTENSION_NAME "VK_QCOM_render_pass_transform" +typedef struct VkRenderPassTransformBeginInfoQCOM { + VkStructureType sType; + void* pNext; + VkSurfaceTransformFlagBitsKHR transform; +} VkRenderPassTransformBeginInfoQCOM; + +typedef struct VkCommandBufferInheritanceRenderPassTransformInfoQCOM { + VkStructureType sType; + void* pNext; + VkSurfaceTransformFlagBitsKHR transform; + VkRect2D renderArea; +} VkCommandBufferInheritanceRenderPassTransformInfoQCOM; + + + +#define VK_EXT_robustness2 1 +#define VK_EXT_ROBUSTNESS_2_SPEC_VERSION 1 +#define VK_EXT_ROBUSTNESS_2_EXTENSION_NAME "VK_EXT_robustness2" +typedef struct VkPhysicalDeviceRobustness2FeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 robustBufferAccess2; + VkBool32 robustImageAccess2; + VkBool32 nullDescriptor; +} VkPhysicalDeviceRobustness2FeaturesEXT; + +typedef struct VkPhysicalDeviceRobustness2PropertiesEXT { + VkStructureType sType; + void* pNext; + VkDeviceSize robustStorageBufferAccessSizeAlignment; + VkDeviceSize robustUniformBufferAccessSizeAlignment; +} VkPhysicalDeviceRobustness2PropertiesEXT; + + + +#define VK_EXT_custom_border_color 1 +#define VK_EXT_CUSTOM_BORDER_COLOR_SPEC_VERSION 12 +#define VK_EXT_CUSTOM_BORDER_COLOR_EXTENSION_NAME "VK_EXT_custom_border_color" +typedef struct VkSamplerCustomBorderColorCreateInfoEXT { + VkStructureType sType; + const void* pNext; + VkClearColorValue customBorderColor; + VkFormat format; +} VkSamplerCustomBorderColorCreateInfoEXT; + +typedef struct VkPhysicalDeviceCustomBorderColorPropertiesEXT { + VkStructureType sType; + void* pNext; + uint32_t maxCustomBorderColorSamplers; +} VkPhysicalDeviceCustomBorderColorPropertiesEXT; + +typedef struct VkPhysicalDeviceCustomBorderColorFeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 customBorderColors; + VkBool32 customBorderColorWithoutFormat; +} VkPhysicalDeviceCustomBorderColorFeaturesEXT; + + + +#define VK_GOOGLE_user_type 1 +#define VK_GOOGLE_USER_TYPE_SPEC_VERSION 1 +#define VK_GOOGLE_USER_TYPE_EXTENSION_NAME "VK_GOOGLE_user_type" + + +#define VK_EXT_private_data 1 +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkPrivateDataSlotEXT) +#define VK_EXT_PRIVATE_DATA_SPEC_VERSION 1 +#define VK_EXT_PRIVATE_DATA_EXTENSION_NAME "VK_EXT_private_data" + +typedef enum VkPrivateDataSlotCreateFlagBitsEXT { + VK_PRIVATE_DATA_SLOT_CREATE_FLAG_BITS_MAX_ENUM_EXT = 0x7FFFFFFF +} VkPrivateDataSlotCreateFlagBitsEXT; +typedef VkFlags VkPrivateDataSlotCreateFlagsEXT; +typedef struct VkPhysicalDevicePrivateDataFeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 privateData; +} VkPhysicalDevicePrivateDataFeaturesEXT; + +typedef struct VkDevicePrivateDataCreateInfoEXT { + VkStructureType sType; + const void* pNext; + uint32_t privateDataSlotRequestCount; +} VkDevicePrivateDataCreateInfoEXT; + +typedef struct VkPrivateDataSlotCreateInfoEXT { + VkStructureType sType; + const void* pNext; + VkPrivateDataSlotCreateFlagsEXT flags; +} VkPrivateDataSlotCreateInfoEXT; + +typedef VkResult (VKAPI_PTR *PFN_vkCreatePrivateDataSlotEXT)(VkDevice device, const VkPrivateDataSlotCreateInfoEXT* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkPrivateDataSlotEXT* pPrivateDataSlot); +typedef void (VKAPI_PTR *PFN_vkDestroyPrivateDataSlotEXT)(VkDevice device, VkPrivateDataSlotEXT privateDataSlot, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkSetPrivateDataEXT)(VkDevice device, VkObjectType objectType, uint64_t objectHandle, VkPrivateDataSlotEXT privateDataSlot, uint64_t data); +typedef void (VKAPI_PTR *PFN_vkGetPrivateDataEXT)(VkDevice device, VkObjectType objectType, uint64_t objectHandle, VkPrivateDataSlotEXT privateDataSlot, uint64_t* pData); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkCreatePrivateDataSlotEXT( + VkDevice device, + const VkPrivateDataSlotCreateInfoEXT* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkPrivateDataSlotEXT* pPrivateDataSlot); + +VKAPI_ATTR void VKAPI_CALL vkDestroyPrivateDataSlotEXT( + VkDevice device, + VkPrivateDataSlotEXT privateDataSlot, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkSetPrivateDataEXT( + VkDevice device, + VkObjectType objectType, + uint64_t objectHandle, + VkPrivateDataSlotEXT privateDataSlot, + uint64_t data); + +VKAPI_ATTR void VKAPI_CALL vkGetPrivateDataEXT( + VkDevice device, + VkObjectType objectType, + uint64_t objectHandle, + VkPrivateDataSlotEXT privateDataSlot, + uint64_t* pData); +#endif + + +#define VK_EXT_pipeline_creation_cache_control 1 +#define VK_EXT_PIPELINE_CREATION_CACHE_CONTROL_SPEC_VERSION 3 +#define VK_EXT_PIPELINE_CREATION_CACHE_CONTROL_EXTENSION_NAME "VK_EXT_pipeline_creation_cache_control" +typedef struct VkPhysicalDevicePipelineCreationCacheControlFeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 pipelineCreationCacheControl; +} VkPhysicalDevicePipelineCreationCacheControlFeaturesEXT; + + + +#define VK_NV_device_diagnostics_config 1 +#define VK_NV_DEVICE_DIAGNOSTICS_CONFIG_SPEC_VERSION 1 +#define VK_NV_DEVICE_DIAGNOSTICS_CONFIG_EXTENSION_NAME "VK_NV_device_diagnostics_config" + +typedef enum VkDeviceDiagnosticsConfigFlagBitsNV { + VK_DEVICE_DIAGNOSTICS_CONFIG_ENABLE_SHADER_DEBUG_INFO_BIT_NV = 0x00000001, + VK_DEVICE_DIAGNOSTICS_CONFIG_ENABLE_RESOURCE_TRACKING_BIT_NV = 0x00000002, + VK_DEVICE_DIAGNOSTICS_CONFIG_ENABLE_AUTOMATIC_CHECKPOINTS_BIT_NV = 0x00000004, + VK_DEVICE_DIAGNOSTICS_CONFIG_FLAG_BITS_MAX_ENUM_NV = 0x7FFFFFFF +} VkDeviceDiagnosticsConfigFlagBitsNV; +typedef VkFlags VkDeviceDiagnosticsConfigFlagsNV; +typedef struct VkPhysicalDeviceDiagnosticsConfigFeaturesNV { + VkStructureType sType; + void* pNext; + VkBool32 diagnosticsConfig; +} VkPhysicalDeviceDiagnosticsConfigFeaturesNV; + +typedef struct VkDeviceDiagnosticsConfigCreateInfoNV { + VkStructureType sType; + const void* pNext; + VkDeviceDiagnosticsConfigFlagsNV flags; +} VkDeviceDiagnosticsConfigCreateInfoNV; + + + +#define VK_QCOM_render_pass_store_ops 1 +#define VK_QCOM_render_pass_store_ops_SPEC_VERSION 2 +#define VK_QCOM_render_pass_store_ops_EXTENSION_NAME "VK_QCOM_render_pass_store_ops" + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/dep/vulkan-loader/include/vulkan/vulkan_fuchsia.h b/dep/vulkan-loader/include/vulkan/vulkan_fuchsia.h new file mode 100644 index 000000000..e97901480 --- /dev/null +++ b/dep/vulkan-loader/include/vulkan/vulkan_fuchsia.h @@ -0,0 +1,57 @@ +#ifndef VULKAN_FUCHSIA_H_ +#define VULKAN_FUCHSIA_H_ 1 + +/* +** Copyright (c) 2015-2020 The Khronos Group Inc. +** +** Licensed under the Apache License, Version 2.0 (the "License"); +** you may not use this file except in compliance with the License. +** You may obtain a copy of the License at +** +** http://www.apache.org/licenses/LICENSE-2.0 +** +** Unless required by applicable law or agreed to in writing, software +** distributed under the License is distributed on an "AS IS" BASIS, +** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +** See the License for the specific language governing permissions and +** limitations under the License. +*/ + +/* +** This header is generated from the Khronos Vulkan XML API Registry. +** +*/ + + +#ifdef __cplusplus +extern "C" { +#endif + + + +#define VK_FUCHSIA_imagepipe_surface 1 +#define VK_FUCHSIA_IMAGEPIPE_SURFACE_SPEC_VERSION 1 +#define VK_FUCHSIA_IMAGEPIPE_SURFACE_EXTENSION_NAME "VK_FUCHSIA_imagepipe_surface" +typedef VkFlags VkImagePipeSurfaceCreateFlagsFUCHSIA; +typedef struct VkImagePipeSurfaceCreateInfoFUCHSIA { + VkStructureType sType; + const void* pNext; + VkImagePipeSurfaceCreateFlagsFUCHSIA flags; + zx_handle_t imagePipeHandle; +} VkImagePipeSurfaceCreateInfoFUCHSIA; + +typedef VkResult (VKAPI_PTR *PFN_vkCreateImagePipeSurfaceFUCHSIA)(VkInstance instance, const VkImagePipeSurfaceCreateInfoFUCHSIA* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkCreateImagePipeSurfaceFUCHSIA( + VkInstance instance, + const VkImagePipeSurfaceCreateInfoFUCHSIA* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkSurfaceKHR* pSurface); +#endif + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/dep/vulkan-loader/include/vulkan/vulkan_ggp.h b/dep/vulkan-loader/include/vulkan/vulkan_ggp.h new file mode 100644 index 000000000..09b337e6d --- /dev/null +++ b/dep/vulkan-loader/include/vulkan/vulkan_ggp.h @@ -0,0 +1,68 @@ +#ifndef VULKAN_GGP_H_ +#define VULKAN_GGP_H_ 1 + +/* +** Copyright (c) 2015-2020 The Khronos Group Inc. +** +** Licensed under the Apache License, Version 2.0 (the "License"); +** you may not use this file except in compliance with the License. +** You may obtain a copy of the License at +** +** http://www.apache.org/licenses/LICENSE-2.0 +** +** Unless required by applicable law or agreed to in writing, software +** distributed under the License is distributed on an "AS IS" BASIS, +** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +** See the License for the specific language governing permissions and +** limitations under the License. +*/ + +/* +** This header is generated from the Khronos Vulkan XML API Registry. +** +*/ + + +#ifdef __cplusplus +extern "C" { +#endif + + + +#define VK_GGP_stream_descriptor_surface 1 +#define VK_GGP_STREAM_DESCRIPTOR_SURFACE_SPEC_VERSION 1 +#define VK_GGP_STREAM_DESCRIPTOR_SURFACE_EXTENSION_NAME "VK_GGP_stream_descriptor_surface" +typedef VkFlags VkStreamDescriptorSurfaceCreateFlagsGGP; +typedef struct VkStreamDescriptorSurfaceCreateInfoGGP { + VkStructureType sType; + const void* pNext; + VkStreamDescriptorSurfaceCreateFlagsGGP flags; + GgpStreamDescriptor streamDescriptor; +} VkStreamDescriptorSurfaceCreateInfoGGP; + +typedef VkResult (VKAPI_PTR *PFN_vkCreateStreamDescriptorSurfaceGGP)(VkInstance instance, const VkStreamDescriptorSurfaceCreateInfoGGP* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkCreateStreamDescriptorSurfaceGGP( + VkInstance instance, + const VkStreamDescriptorSurfaceCreateInfoGGP* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkSurfaceKHR* pSurface); +#endif + + +#define VK_GGP_frame_token 1 +#define VK_GGP_FRAME_TOKEN_SPEC_VERSION 1 +#define VK_GGP_FRAME_TOKEN_EXTENSION_NAME "VK_GGP_frame_token" +typedef struct VkPresentFrameTokenGGP { + VkStructureType sType; + const void* pNext; + GgpFrameToken frameToken; +} VkPresentFrameTokenGGP; + + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/dep/vulkan-loader/include/vulkan/vulkan_ios.h b/dep/vulkan-loader/include/vulkan/vulkan_ios.h new file mode 100644 index 000000000..9f8199938 --- /dev/null +++ b/dep/vulkan-loader/include/vulkan/vulkan_ios.h @@ -0,0 +1,57 @@ +#ifndef VULKAN_IOS_H_ +#define VULKAN_IOS_H_ 1 + +/* +** Copyright (c) 2015-2020 The Khronos Group Inc. +** +** Licensed under the Apache License, Version 2.0 (the "License"); +** you may not use this file except in compliance with the License. +** You may obtain a copy of the License at +** +** http://www.apache.org/licenses/LICENSE-2.0 +** +** Unless required by applicable law or agreed to in writing, software +** distributed under the License is distributed on an "AS IS" BASIS, +** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +** See the License for the specific language governing permissions and +** limitations under the License. +*/ + +/* +** This header is generated from the Khronos Vulkan XML API Registry. +** +*/ + + +#ifdef __cplusplus +extern "C" { +#endif + + + +#define VK_MVK_ios_surface 1 +#define VK_MVK_IOS_SURFACE_SPEC_VERSION 2 +#define VK_MVK_IOS_SURFACE_EXTENSION_NAME "VK_MVK_ios_surface" +typedef VkFlags VkIOSSurfaceCreateFlagsMVK; +typedef struct VkIOSSurfaceCreateInfoMVK { + VkStructureType sType; + const void* pNext; + VkIOSSurfaceCreateFlagsMVK flags; + const void* pView; +} VkIOSSurfaceCreateInfoMVK; + +typedef VkResult (VKAPI_PTR *PFN_vkCreateIOSSurfaceMVK)(VkInstance instance, const VkIOSSurfaceCreateInfoMVK* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkCreateIOSSurfaceMVK( + VkInstance instance, + const VkIOSSurfaceCreateInfoMVK* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkSurfaceKHR* pSurface); +#endif + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/dep/vulkan-loader/include/vulkan/vulkan_macos.h b/dep/vulkan-loader/include/vulkan/vulkan_macos.h new file mode 100644 index 000000000..8c9684778 --- /dev/null +++ b/dep/vulkan-loader/include/vulkan/vulkan_macos.h @@ -0,0 +1,57 @@ +#ifndef VULKAN_MACOS_H_ +#define VULKAN_MACOS_H_ 1 + +/* +** Copyright (c) 2015-2020 The Khronos Group Inc. +** +** Licensed under the Apache License, Version 2.0 (the "License"); +** you may not use this file except in compliance with the License. +** You may obtain a copy of the License at +** +** http://www.apache.org/licenses/LICENSE-2.0 +** +** Unless required by applicable law or agreed to in writing, software +** distributed under the License is distributed on an "AS IS" BASIS, +** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +** See the License for the specific language governing permissions and +** limitations under the License. +*/ + +/* +** This header is generated from the Khronos Vulkan XML API Registry. +** +*/ + + +#ifdef __cplusplus +extern "C" { +#endif + + + +#define VK_MVK_macos_surface 1 +#define VK_MVK_MACOS_SURFACE_SPEC_VERSION 2 +#define VK_MVK_MACOS_SURFACE_EXTENSION_NAME "VK_MVK_macos_surface" +typedef VkFlags VkMacOSSurfaceCreateFlagsMVK; +typedef struct VkMacOSSurfaceCreateInfoMVK { + VkStructureType sType; + const void* pNext; + VkMacOSSurfaceCreateFlagsMVK flags; + const void* pView; +} VkMacOSSurfaceCreateInfoMVK; + +typedef VkResult (VKAPI_PTR *PFN_vkCreateMacOSSurfaceMVK)(VkInstance instance, const VkMacOSSurfaceCreateInfoMVK* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkCreateMacOSSurfaceMVK( + VkInstance instance, + const VkMacOSSurfaceCreateInfoMVK* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkSurfaceKHR* pSurface); +#endif + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/dep/vulkan-loader/include/vulkan/vulkan_metal.h b/dep/vulkan-loader/include/vulkan/vulkan_metal.h new file mode 100644 index 000000000..c85d24387 --- /dev/null +++ b/dep/vulkan-loader/include/vulkan/vulkan_metal.h @@ -0,0 +1,64 @@ +#ifndef VULKAN_METAL_H_ +#define VULKAN_METAL_H_ 1 + +/* +** Copyright (c) 2015-2020 The Khronos Group Inc. +** +** Licensed under the Apache License, Version 2.0 (the "License"); +** you may not use this file except in compliance with the License. +** You may obtain a copy of the License at +** +** http://www.apache.org/licenses/LICENSE-2.0 +** +** Unless required by applicable law or agreed to in writing, software +** distributed under the License is distributed on an "AS IS" BASIS, +** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +** See the License for the specific language governing permissions and +** limitations under the License. +*/ + +/* +** This header is generated from the Khronos Vulkan XML API Registry. +** +*/ + + +#ifdef __cplusplus +extern "C" { +#endif + + + +#define VK_EXT_metal_surface 1 + +#ifdef __OBJC__ +@class CAMetalLayer; +#else +typedef void CAMetalLayer; +#endif + +#define VK_EXT_METAL_SURFACE_SPEC_VERSION 1 +#define VK_EXT_METAL_SURFACE_EXTENSION_NAME "VK_EXT_metal_surface" +typedef VkFlags VkMetalSurfaceCreateFlagsEXT; +typedef struct VkMetalSurfaceCreateInfoEXT { + VkStructureType sType; + const void* pNext; + VkMetalSurfaceCreateFlagsEXT flags; + const CAMetalLayer* pLayer; +} VkMetalSurfaceCreateInfoEXT; + +typedef VkResult (VKAPI_PTR *PFN_vkCreateMetalSurfaceEXT)(VkInstance instance, const VkMetalSurfaceCreateInfoEXT* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkCreateMetalSurfaceEXT( + VkInstance instance, + const VkMetalSurfaceCreateInfoEXT* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkSurfaceKHR* pSurface); +#endif + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/dep/vulkan-loader/include/vulkan/vulkan_vi.h b/dep/vulkan-loader/include/vulkan/vulkan_vi.h new file mode 100644 index 000000000..ee877d9b9 --- /dev/null +++ b/dep/vulkan-loader/include/vulkan/vulkan_vi.h @@ -0,0 +1,57 @@ +#ifndef VULKAN_VI_H_ +#define VULKAN_VI_H_ 1 + +/* +** Copyright (c) 2015-2020 The Khronos Group Inc. +** +** Licensed under the Apache License, Version 2.0 (the "License"); +** you may not use this file except in compliance with the License. +** You may obtain a copy of the License at +** +** http://www.apache.org/licenses/LICENSE-2.0 +** +** Unless required by applicable law or agreed to in writing, software +** distributed under the License is distributed on an "AS IS" BASIS, +** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +** See the License for the specific language governing permissions and +** limitations under the License. +*/ + +/* +** This header is generated from the Khronos Vulkan XML API Registry. +** +*/ + + +#ifdef __cplusplus +extern "C" { +#endif + + + +#define VK_NN_vi_surface 1 +#define VK_NN_VI_SURFACE_SPEC_VERSION 1 +#define VK_NN_VI_SURFACE_EXTENSION_NAME "VK_NN_vi_surface" +typedef VkFlags VkViSurfaceCreateFlagsNN; +typedef struct VkViSurfaceCreateInfoNN { + VkStructureType sType; + const void* pNext; + VkViSurfaceCreateFlagsNN flags; + void* window; +} VkViSurfaceCreateInfoNN; + +typedef VkResult (VKAPI_PTR *PFN_vkCreateViSurfaceNN)(VkInstance instance, const VkViSurfaceCreateInfoNN* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkCreateViSurfaceNN( + VkInstance instance, + const VkViSurfaceCreateInfoNN* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkSurfaceKHR* pSurface); +#endif + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/dep/vulkan-loader/include/vulkan/vulkan_wayland.h b/dep/vulkan-loader/include/vulkan/vulkan_wayland.h new file mode 100644 index 000000000..5278c188b --- /dev/null +++ b/dep/vulkan-loader/include/vulkan/vulkan_wayland.h @@ -0,0 +1,64 @@ +#ifndef VULKAN_WAYLAND_H_ +#define VULKAN_WAYLAND_H_ 1 + +/* +** Copyright (c) 2015-2020 The Khronos Group Inc. +** +** Licensed under the Apache License, Version 2.0 (the "License"); +** you may not use this file except in compliance with the License. +** You may obtain a copy of the License at +** +** http://www.apache.org/licenses/LICENSE-2.0 +** +** Unless required by applicable law or agreed to in writing, software +** distributed under the License is distributed on an "AS IS" BASIS, +** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +** See the License for the specific language governing permissions and +** limitations under the License. +*/ + +/* +** This header is generated from the Khronos Vulkan XML API Registry. +** +*/ + + +#ifdef __cplusplus +extern "C" { +#endif + + + +#define VK_KHR_wayland_surface 1 +#define VK_KHR_WAYLAND_SURFACE_SPEC_VERSION 6 +#define VK_KHR_WAYLAND_SURFACE_EXTENSION_NAME "VK_KHR_wayland_surface" +typedef VkFlags VkWaylandSurfaceCreateFlagsKHR; +typedef struct VkWaylandSurfaceCreateInfoKHR { + VkStructureType sType; + const void* pNext; + VkWaylandSurfaceCreateFlagsKHR flags; + struct wl_display* display; + struct wl_surface* surface; +} VkWaylandSurfaceCreateInfoKHR; + +typedef VkResult (VKAPI_PTR *PFN_vkCreateWaylandSurfaceKHR)(VkInstance instance, const VkWaylandSurfaceCreateInfoKHR* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface); +typedef VkBool32 (VKAPI_PTR *PFN_vkGetPhysicalDeviceWaylandPresentationSupportKHR)(VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex, struct wl_display* display); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkCreateWaylandSurfaceKHR( + VkInstance instance, + const VkWaylandSurfaceCreateInfoKHR* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkSurfaceKHR* pSurface); + +VKAPI_ATTR VkBool32 VKAPI_CALL vkGetPhysicalDeviceWaylandPresentationSupportKHR( + VkPhysicalDevice physicalDevice, + uint32_t queueFamilyIndex, + struct wl_display* display); +#endif + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/dep/vulkan-loader/include/vulkan/vulkan_win32.h b/dep/vulkan-loader/include/vulkan/vulkan_win32.h new file mode 100644 index 000000000..4408d26de --- /dev/null +++ b/dep/vulkan-loader/include/vulkan/vulkan_win32.h @@ -0,0 +1,325 @@ +#ifndef VULKAN_WIN32_H_ +#define VULKAN_WIN32_H_ 1 + +/* +** Copyright (c) 2015-2020 The Khronos Group Inc. +** +** Licensed under the Apache License, Version 2.0 (the "License"); +** you may not use this file except in compliance with the License. +** You may obtain a copy of the License at +** +** http://www.apache.org/licenses/LICENSE-2.0 +** +** Unless required by applicable law or agreed to in writing, software +** distributed under the License is distributed on an "AS IS" BASIS, +** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +** See the License for the specific language governing permissions and +** limitations under the License. +*/ + +/* +** This header is generated from the Khronos Vulkan XML API Registry. +** +*/ + + +#ifdef __cplusplus +extern "C" { +#endif + + + +#define VK_KHR_win32_surface 1 +#define VK_KHR_WIN32_SURFACE_SPEC_VERSION 6 +#define VK_KHR_WIN32_SURFACE_EXTENSION_NAME "VK_KHR_win32_surface" +typedef VkFlags VkWin32SurfaceCreateFlagsKHR; +typedef struct VkWin32SurfaceCreateInfoKHR { + VkStructureType sType; + const void* pNext; + VkWin32SurfaceCreateFlagsKHR flags; + HINSTANCE hinstance; + HWND hwnd; +} VkWin32SurfaceCreateInfoKHR; + +typedef VkResult (VKAPI_PTR *PFN_vkCreateWin32SurfaceKHR)(VkInstance instance, const VkWin32SurfaceCreateInfoKHR* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface); +typedef VkBool32 (VKAPI_PTR *PFN_vkGetPhysicalDeviceWin32PresentationSupportKHR)(VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkCreateWin32SurfaceKHR( + VkInstance instance, + const VkWin32SurfaceCreateInfoKHR* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkSurfaceKHR* pSurface); + +VKAPI_ATTR VkBool32 VKAPI_CALL vkGetPhysicalDeviceWin32PresentationSupportKHR( + VkPhysicalDevice physicalDevice, + uint32_t queueFamilyIndex); +#endif + + +#define VK_KHR_external_memory_win32 1 +#define VK_KHR_EXTERNAL_MEMORY_WIN32_SPEC_VERSION 1 +#define VK_KHR_EXTERNAL_MEMORY_WIN32_EXTENSION_NAME "VK_KHR_external_memory_win32" +typedef struct VkImportMemoryWin32HandleInfoKHR { + VkStructureType sType; + const void* pNext; + VkExternalMemoryHandleTypeFlagBits handleType; + HANDLE handle; + LPCWSTR name; +} VkImportMemoryWin32HandleInfoKHR; + +typedef struct VkExportMemoryWin32HandleInfoKHR { + VkStructureType sType; + const void* pNext; + const SECURITY_ATTRIBUTES* pAttributes; + DWORD dwAccess; + LPCWSTR name; +} VkExportMemoryWin32HandleInfoKHR; + +typedef struct VkMemoryWin32HandlePropertiesKHR { + VkStructureType sType; + void* pNext; + uint32_t memoryTypeBits; +} VkMemoryWin32HandlePropertiesKHR; + +typedef struct VkMemoryGetWin32HandleInfoKHR { + VkStructureType sType; + const void* pNext; + VkDeviceMemory memory; + VkExternalMemoryHandleTypeFlagBits handleType; +} VkMemoryGetWin32HandleInfoKHR; + +typedef VkResult (VKAPI_PTR *PFN_vkGetMemoryWin32HandleKHR)(VkDevice device, const VkMemoryGetWin32HandleInfoKHR* pGetWin32HandleInfo, HANDLE* pHandle); +typedef VkResult (VKAPI_PTR *PFN_vkGetMemoryWin32HandlePropertiesKHR)(VkDevice device, VkExternalMemoryHandleTypeFlagBits handleType, HANDLE handle, VkMemoryWin32HandlePropertiesKHR* pMemoryWin32HandleProperties); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetMemoryWin32HandleKHR( + VkDevice device, + const VkMemoryGetWin32HandleInfoKHR* pGetWin32HandleInfo, + HANDLE* pHandle); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetMemoryWin32HandlePropertiesKHR( + VkDevice device, + VkExternalMemoryHandleTypeFlagBits handleType, + HANDLE handle, + VkMemoryWin32HandlePropertiesKHR* pMemoryWin32HandleProperties); +#endif + + +#define VK_KHR_win32_keyed_mutex 1 +#define VK_KHR_WIN32_KEYED_MUTEX_SPEC_VERSION 1 +#define VK_KHR_WIN32_KEYED_MUTEX_EXTENSION_NAME "VK_KHR_win32_keyed_mutex" +typedef struct VkWin32KeyedMutexAcquireReleaseInfoKHR { + VkStructureType sType; + const void* pNext; + uint32_t acquireCount; + const VkDeviceMemory* pAcquireSyncs; + const uint64_t* pAcquireKeys; + const uint32_t* pAcquireTimeouts; + uint32_t releaseCount; + const VkDeviceMemory* pReleaseSyncs; + const uint64_t* pReleaseKeys; +} VkWin32KeyedMutexAcquireReleaseInfoKHR; + + + +#define VK_KHR_external_semaphore_win32 1 +#define VK_KHR_EXTERNAL_SEMAPHORE_WIN32_SPEC_VERSION 1 +#define VK_KHR_EXTERNAL_SEMAPHORE_WIN32_EXTENSION_NAME "VK_KHR_external_semaphore_win32" +typedef struct VkImportSemaphoreWin32HandleInfoKHR { + VkStructureType sType; + const void* pNext; + VkSemaphore semaphore; + VkSemaphoreImportFlags flags; + VkExternalSemaphoreHandleTypeFlagBits handleType; + HANDLE handle; + LPCWSTR name; +} VkImportSemaphoreWin32HandleInfoKHR; + +typedef struct VkExportSemaphoreWin32HandleInfoKHR { + VkStructureType sType; + const void* pNext; + const SECURITY_ATTRIBUTES* pAttributes; + DWORD dwAccess; + LPCWSTR name; +} VkExportSemaphoreWin32HandleInfoKHR; + +typedef struct VkD3D12FenceSubmitInfoKHR { + VkStructureType sType; + const void* pNext; + uint32_t waitSemaphoreValuesCount; + const uint64_t* pWaitSemaphoreValues; + uint32_t signalSemaphoreValuesCount; + const uint64_t* pSignalSemaphoreValues; +} VkD3D12FenceSubmitInfoKHR; + +typedef struct VkSemaphoreGetWin32HandleInfoKHR { + VkStructureType sType; + const void* pNext; + VkSemaphore semaphore; + VkExternalSemaphoreHandleTypeFlagBits handleType; +} VkSemaphoreGetWin32HandleInfoKHR; + +typedef VkResult (VKAPI_PTR *PFN_vkImportSemaphoreWin32HandleKHR)(VkDevice device, const VkImportSemaphoreWin32HandleInfoKHR* pImportSemaphoreWin32HandleInfo); +typedef VkResult (VKAPI_PTR *PFN_vkGetSemaphoreWin32HandleKHR)(VkDevice device, const VkSemaphoreGetWin32HandleInfoKHR* pGetWin32HandleInfo, HANDLE* pHandle); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkImportSemaphoreWin32HandleKHR( + VkDevice device, + const VkImportSemaphoreWin32HandleInfoKHR* pImportSemaphoreWin32HandleInfo); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetSemaphoreWin32HandleKHR( + VkDevice device, + const VkSemaphoreGetWin32HandleInfoKHR* pGetWin32HandleInfo, + HANDLE* pHandle); +#endif + + +#define VK_KHR_external_fence_win32 1 +#define VK_KHR_EXTERNAL_FENCE_WIN32_SPEC_VERSION 1 +#define VK_KHR_EXTERNAL_FENCE_WIN32_EXTENSION_NAME "VK_KHR_external_fence_win32" +typedef struct VkImportFenceWin32HandleInfoKHR { + VkStructureType sType; + const void* pNext; + VkFence fence; + VkFenceImportFlags flags; + VkExternalFenceHandleTypeFlagBits handleType; + HANDLE handle; + LPCWSTR name; +} VkImportFenceWin32HandleInfoKHR; + +typedef struct VkExportFenceWin32HandleInfoKHR { + VkStructureType sType; + const void* pNext; + const SECURITY_ATTRIBUTES* pAttributes; + DWORD dwAccess; + LPCWSTR name; +} VkExportFenceWin32HandleInfoKHR; + +typedef struct VkFenceGetWin32HandleInfoKHR { + VkStructureType sType; + const void* pNext; + VkFence fence; + VkExternalFenceHandleTypeFlagBits handleType; +} VkFenceGetWin32HandleInfoKHR; + +typedef VkResult (VKAPI_PTR *PFN_vkImportFenceWin32HandleKHR)(VkDevice device, const VkImportFenceWin32HandleInfoKHR* pImportFenceWin32HandleInfo); +typedef VkResult (VKAPI_PTR *PFN_vkGetFenceWin32HandleKHR)(VkDevice device, const VkFenceGetWin32HandleInfoKHR* pGetWin32HandleInfo, HANDLE* pHandle); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkImportFenceWin32HandleKHR( + VkDevice device, + const VkImportFenceWin32HandleInfoKHR* pImportFenceWin32HandleInfo); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetFenceWin32HandleKHR( + VkDevice device, + const VkFenceGetWin32HandleInfoKHR* pGetWin32HandleInfo, + HANDLE* pHandle); +#endif + + +#define VK_NV_external_memory_win32 1 +#define VK_NV_EXTERNAL_MEMORY_WIN32_SPEC_VERSION 1 +#define VK_NV_EXTERNAL_MEMORY_WIN32_EXTENSION_NAME "VK_NV_external_memory_win32" +typedef struct VkImportMemoryWin32HandleInfoNV { + VkStructureType sType; + const void* pNext; + VkExternalMemoryHandleTypeFlagsNV handleType; + HANDLE handle; +} VkImportMemoryWin32HandleInfoNV; + +typedef struct VkExportMemoryWin32HandleInfoNV { + VkStructureType sType; + const void* pNext; + const SECURITY_ATTRIBUTES* pAttributes; + DWORD dwAccess; +} VkExportMemoryWin32HandleInfoNV; + +typedef VkResult (VKAPI_PTR *PFN_vkGetMemoryWin32HandleNV)(VkDevice device, VkDeviceMemory memory, VkExternalMemoryHandleTypeFlagsNV handleType, HANDLE* pHandle); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetMemoryWin32HandleNV( + VkDevice device, + VkDeviceMemory memory, + VkExternalMemoryHandleTypeFlagsNV handleType, + HANDLE* pHandle); +#endif + + +#define VK_NV_win32_keyed_mutex 1 +#define VK_NV_WIN32_KEYED_MUTEX_SPEC_VERSION 2 +#define VK_NV_WIN32_KEYED_MUTEX_EXTENSION_NAME "VK_NV_win32_keyed_mutex" +typedef struct VkWin32KeyedMutexAcquireReleaseInfoNV { + VkStructureType sType; + const void* pNext; + uint32_t acquireCount; + const VkDeviceMemory* pAcquireSyncs; + const uint64_t* pAcquireKeys; + const uint32_t* pAcquireTimeoutMilliseconds; + uint32_t releaseCount; + const VkDeviceMemory* pReleaseSyncs; + const uint64_t* pReleaseKeys; +} VkWin32KeyedMutexAcquireReleaseInfoNV; + + + +#define VK_EXT_full_screen_exclusive 1 +#define VK_EXT_FULL_SCREEN_EXCLUSIVE_SPEC_VERSION 4 +#define VK_EXT_FULL_SCREEN_EXCLUSIVE_EXTENSION_NAME "VK_EXT_full_screen_exclusive" + +typedef enum VkFullScreenExclusiveEXT { + VK_FULL_SCREEN_EXCLUSIVE_DEFAULT_EXT = 0, + VK_FULL_SCREEN_EXCLUSIVE_ALLOWED_EXT = 1, + VK_FULL_SCREEN_EXCLUSIVE_DISALLOWED_EXT = 2, + VK_FULL_SCREEN_EXCLUSIVE_APPLICATION_CONTROLLED_EXT = 3, + VK_FULL_SCREEN_EXCLUSIVE_MAX_ENUM_EXT = 0x7FFFFFFF +} VkFullScreenExclusiveEXT; +typedef struct VkSurfaceFullScreenExclusiveInfoEXT { + VkStructureType sType; + void* pNext; + VkFullScreenExclusiveEXT fullScreenExclusive; +} VkSurfaceFullScreenExclusiveInfoEXT; + +typedef struct VkSurfaceCapabilitiesFullScreenExclusiveEXT { + VkStructureType sType; + void* pNext; + VkBool32 fullScreenExclusiveSupported; +} VkSurfaceCapabilitiesFullScreenExclusiveEXT; + +typedef struct VkSurfaceFullScreenExclusiveWin32InfoEXT { + VkStructureType sType; + const void* pNext; + HMONITOR hmonitor; +} VkSurfaceFullScreenExclusiveWin32InfoEXT; + +typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceSurfacePresentModes2EXT)(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceSurfaceInfo2KHR* pSurfaceInfo, uint32_t* pPresentModeCount, VkPresentModeKHR* pPresentModes); +typedef VkResult (VKAPI_PTR *PFN_vkAcquireFullScreenExclusiveModeEXT)(VkDevice device, VkSwapchainKHR swapchain); +typedef VkResult (VKAPI_PTR *PFN_vkReleaseFullScreenExclusiveModeEXT)(VkDevice device, VkSwapchainKHR swapchain); +typedef VkResult (VKAPI_PTR *PFN_vkGetDeviceGroupSurfacePresentModes2EXT)(VkDevice device, const VkPhysicalDeviceSurfaceInfo2KHR* pSurfaceInfo, VkDeviceGroupPresentModeFlagsKHR* pModes); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceSurfacePresentModes2EXT( + VkPhysicalDevice physicalDevice, + const VkPhysicalDeviceSurfaceInfo2KHR* pSurfaceInfo, + uint32_t* pPresentModeCount, + VkPresentModeKHR* pPresentModes); + +VKAPI_ATTR VkResult VKAPI_CALL vkAcquireFullScreenExclusiveModeEXT( + VkDevice device, + VkSwapchainKHR swapchain); + +VKAPI_ATTR VkResult VKAPI_CALL vkReleaseFullScreenExclusiveModeEXT( + VkDevice device, + VkSwapchainKHR swapchain); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetDeviceGroupSurfacePresentModes2EXT( + VkDevice device, + const VkPhysicalDeviceSurfaceInfo2KHR* pSurfaceInfo, + VkDeviceGroupPresentModeFlagsKHR* pModes); +#endif + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/dep/vulkan-loader/include/vulkan/vulkan_xcb.h b/dep/vulkan-loader/include/vulkan/vulkan_xcb.h new file mode 100644 index 000000000..43d1407a8 --- /dev/null +++ b/dep/vulkan-loader/include/vulkan/vulkan_xcb.h @@ -0,0 +1,65 @@ +#ifndef VULKAN_XCB_H_ +#define VULKAN_XCB_H_ 1 + +/* +** Copyright (c) 2015-2020 The Khronos Group Inc. +** +** Licensed under the Apache License, Version 2.0 (the "License"); +** you may not use this file except in compliance with the License. +** You may obtain a copy of the License at +** +** http://www.apache.org/licenses/LICENSE-2.0 +** +** Unless required by applicable law or agreed to in writing, software +** distributed under the License is distributed on an "AS IS" BASIS, +** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +** See the License for the specific language governing permissions and +** limitations under the License. +*/ + +/* +** This header is generated from the Khronos Vulkan XML API Registry. +** +*/ + + +#ifdef __cplusplus +extern "C" { +#endif + + + +#define VK_KHR_xcb_surface 1 +#define VK_KHR_XCB_SURFACE_SPEC_VERSION 6 +#define VK_KHR_XCB_SURFACE_EXTENSION_NAME "VK_KHR_xcb_surface" +typedef VkFlags VkXcbSurfaceCreateFlagsKHR; +typedef struct VkXcbSurfaceCreateInfoKHR { + VkStructureType sType; + const void* pNext; + VkXcbSurfaceCreateFlagsKHR flags; + xcb_connection_t* connection; + xcb_window_t window; +} VkXcbSurfaceCreateInfoKHR; + +typedef VkResult (VKAPI_PTR *PFN_vkCreateXcbSurfaceKHR)(VkInstance instance, const VkXcbSurfaceCreateInfoKHR* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface); +typedef VkBool32 (VKAPI_PTR *PFN_vkGetPhysicalDeviceXcbPresentationSupportKHR)(VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex, xcb_connection_t* connection, xcb_visualid_t visual_id); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkCreateXcbSurfaceKHR( + VkInstance instance, + const VkXcbSurfaceCreateInfoKHR* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkSurfaceKHR* pSurface); + +VKAPI_ATTR VkBool32 VKAPI_CALL vkGetPhysicalDeviceXcbPresentationSupportKHR( + VkPhysicalDevice physicalDevice, + uint32_t queueFamilyIndex, + xcb_connection_t* connection, + xcb_visualid_t visual_id); +#endif + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/dep/vulkan-loader/include/vulkan/vulkan_xlib.h b/dep/vulkan-loader/include/vulkan/vulkan_xlib.h new file mode 100644 index 000000000..7beada925 --- /dev/null +++ b/dep/vulkan-loader/include/vulkan/vulkan_xlib.h @@ -0,0 +1,65 @@ +#ifndef VULKAN_XLIB_H_ +#define VULKAN_XLIB_H_ 1 + +/* +** Copyright (c) 2015-2020 The Khronos Group Inc. +** +** Licensed under the Apache License, Version 2.0 (the "License"); +** you may not use this file except in compliance with the License. +** You may obtain a copy of the License at +** +** http://www.apache.org/licenses/LICENSE-2.0 +** +** Unless required by applicable law or agreed to in writing, software +** distributed under the License is distributed on an "AS IS" BASIS, +** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +** See the License for the specific language governing permissions and +** limitations under the License. +*/ + +/* +** This header is generated from the Khronos Vulkan XML API Registry. +** +*/ + + +#ifdef __cplusplus +extern "C" { +#endif + + + +#define VK_KHR_xlib_surface 1 +#define VK_KHR_XLIB_SURFACE_SPEC_VERSION 6 +#define VK_KHR_XLIB_SURFACE_EXTENSION_NAME "VK_KHR_xlib_surface" +typedef VkFlags VkXlibSurfaceCreateFlagsKHR; +typedef struct VkXlibSurfaceCreateInfoKHR { + VkStructureType sType; + const void* pNext; + VkXlibSurfaceCreateFlagsKHR flags; + Display* dpy; + Window window; +} VkXlibSurfaceCreateInfoKHR; + +typedef VkResult (VKAPI_PTR *PFN_vkCreateXlibSurfaceKHR)(VkInstance instance, const VkXlibSurfaceCreateInfoKHR* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface); +typedef VkBool32 (VKAPI_PTR *PFN_vkGetPhysicalDeviceXlibPresentationSupportKHR)(VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex, Display* dpy, VisualID visualID); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkCreateXlibSurfaceKHR( + VkInstance instance, + const VkXlibSurfaceCreateInfoKHR* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkSurfaceKHR* pSurface); + +VKAPI_ATTR VkBool32 VKAPI_CALL vkGetPhysicalDeviceXlibPresentationSupportKHR( + VkPhysicalDevice physicalDevice, + uint32_t queueFamilyIndex, + Display* dpy, + VisualID visualID); +#endif + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/dep/vulkan-loader/include/vulkan/vulkan_xlib_xrandr.h b/dep/vulkan-loader/include/vulkan/vulkan_xlib_xrandr.h new file mode 100644 index 000000000..8e9e150d8 --- /dev/null +++ b/dep/vulkan-loader/include/vulkan/vulkan_xlib_xrandr.h @@ -0,0 +1,55 @@ +#ifndef VULKAN_XLIB_XRANDR_H_ +#define VULKAN_XLIB_XRANDR_H_ 1 + +/* +** Copyright (c) 2015-2020 The Khronos Group Inc. +** +** Licensed under the Apache License, Version 2.0 (the "License"); +** you may not use this file except in compliance with the License. +** You may obtain a copy of the License at +** +** http://www.apache.org/licenses/LICENSE-2.0 +** +** Unless required by applicable law or agreed to in writing, software +** distributed under the License is distributed on an "AS IS" BASIS, +** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +** See the License for the specific language governing permissions and +** limitations under the License. +*/ + +/* +** This header is generated from the Khronos Vulkan XML API Registry. +** +*/ + + +#ifdef __cplusplus +extern "C" { +#endif + + + +#define VK_EXT_acquire_xlib_display 1 +#define VK_EXT_ACQUIRE_XLIB_DISPLAY_SPEC_VERSION 1 +#define VK_EXT_ACQUIRE_XLIB_DISPLAY_EXTENSION_NAME "VK_EXT_acquire_xlib_display" +typedef VkResult (VKAPI_PTR *PFN_vkAcquireXlibDisplayEXT)(VkPhysicalDevice physicalDevice, Display* dpy, VkDisplayKHR display); +typedef VkResult (VKAPI_PTR *PFN_vkGetRandROutputDisplayEXT)(VkPhysicalDevice physicalDevice, Display* dpy, RROutput rrOutput, VkDisplayKHR* pDisplay); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkAcquireXlibDisplayEXT( + VkPhysicalDevice physicalDevice, + Display* dpy, + VkDisplayKHR display); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetRandROutputDisplayEXT( + VkPhysicalDevice physicalDevice, + Display* dpy, + RROutput rrOutput, + VkDisplayKHR* pDisplay); +#endif + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/dep/vulkan-loader/include/vulkan_entry_points.inl b/dep/vulkan-loader/include/vulkan_entry_points.inl new file mode 100644 index 000000000..84b58e38d --- /dev/null +++ b/dep/vulkan-loader/include/vulkan_entry_points.inl @@ -0,0 +1,202 @@ +// Copyright 2016 Dolphin Emulator Project +// Copyright 2020 DuckStation Emulator Project +// Licensed under GPLv2+ +// Refer to the LICENSE file included. + +// Expands the VULKAN_ENTRY_POINT macro for each function when this file is included. +// Parameters: Function name, is required +// VULKAN_MODULE_ENTRY_POINT is for functions in vulkan-1.dll +// VULKAN_INSTANCE_ENTRY_POINT is for instance-specific functions. +// VULKAN_DEVICE_ENTRY_POINT is for device-specific functions. + +#ifdef VULKAN_MODULE_ENTRY_POINT + +VULKAN_MODULE_ENTRY_POINT(vkCreateInstance, true) +VULKAN_MODULE_ENTRY_POINT(vkGetInstanceProcAddr, true) +VULKAN_MODULE_ENTRY_POINT(vkGetDeviceProcAddr, true) +VULKAN_MODULE_ENTRY_POINT(vkEnumerateInstanceExtensionProperties, true) +VULKAN_MODULE_ENTRY_POINT(vkEnumerateInstanceLayerProperties, true) +VULKAN_MODULE_ENTRY_POINT(vkEnumerateInstanceVersion, false) + +#endif // VULKAN_MODULE_ENTRY_POINT + +#ifdef VULKAN_INSTANCE_ENTRY_POINT + +VULKAN_INSTANCE_ENTRY_POINT(vkDestroyInstance, true) +VULKAN_INSTANCE_ENTRY_POINT(vkEnumeratePhysicalDevices, true) +VULKAN_INSTANCE_ENTRY_POINT(vkGetPhysicalDeviceFeatures, true) +VULKAN_INSTANCE_ENTRY_POINT(vkGetPhysicalDeviceFormatProperties, true) +VULKAN_INSTANCE_ENTRY_POINT(vkGetPhysicalDeviceImageFormatProperties, true) +VULKAN_INSTANCE_ENTRY_POINT(vkGetPhysicalDeviceProperties, true) +VULKAN_INSTANCE_ENTRY_POINT(vkGetPhysicalDeviceQueueFamilyProperties, true) +VULKAN_INSTANCE_ENTRY_POINT(vkGetPhysicalDeviceMemoryProperties, true) +VULKAN_INSTANCE_ENTRY_POINT(vkCreateDevice, true) +VULKAN_INSTANCE_ENTRY_POINT(vkEnumerateDeviceExtensionProperties, true) +VULKAN_INSTANCE_ENTRY_POINT(vkEnumerateDeviceLayerProperties, true) +VULKAN_INSTANCE_ENTRY_POINT(vkGetPhysicalDeviceSparseImageFormatProperties, true) +VULKAN_INSTANCE_ENTRY_POINT(vkDestroySurfaceKHR, false) +VULKAN_INSTANCE_ENTRY_POINT(vkGetPhysicalDeviceSurfaceSupportKHR, false) +VULKAN_INSTANCE_ENTRY_POINT(vkGetPhysicalDeviceSurfaceCapabilitiesKHR, false) +VULKAN_INSTANCE_ENTRY_POINT(vkGetPhysicalDeviceSurfaceFormatsKHR, false) +VULKAN_INSTANCE_ENTRY_POINT(vkGetPhysicalDeviceSurfacePresentModesKHR, false) + +#if defined(VK_USE_PLATFORM_WIN32_KHR) +VULKAN_INSTANCE_ENTRY_POINT(vkCreateWin32SurfaceKHR, false) +VULKAN_INSTANCE_ENTRY_POINT(vkGetPhysicalDeviceWin32PresentationSupportKHR, false) +#endif + +#if defined(VK_USE_PLATFORM_XLIB_KHR) +VULKAN_INSTANCE_ENTRY_POINT(vkCreateXlibSurfaceKHR, false) +VULKAN_INSTANCE_ENTRY_POINT(vkGetPhysicalDeviceXlibPresentationSupportKHR, false) +#endif + +#if defined(VK_USE_PLATFORM_ANDROID_KHR) +VULKAN_INSTANCE_ENTRY_POINT(vkCreateAndroidSurfaceKHR, false) +#endif + +#if defined(VK_USE_PLATFORM_METAL_EXT) +VULKAN_INSTANCE_ENTRY_POINT(vkCreateMetalSurfaceEXT, false) +#endif + +VULKAN_INSTANCE_ENTRY_POINT(vkCreateDebugReportCallbackEXT, false) +VULKAN_INSTANCE_ENTRY_POINT(vkDestroyDebugReportCallbackEXT, false) +VULKAN_INSTANCE_ENTRY_POINT(vkDebugReportMessageEXT, false) +VULKAN_INSTANCE_ENTRY_POINT(vkGetPhysicalDeviceProperties2, false) +VULKAN_INSTANCE_ENTRY_POINT(vkGetPhysicalDeviceSurfaceCapabilities2KHR, false) + +#endif // VULKAN_INSTANCE_ENTRY_POINT + +#ifdef VULKAN_DEVICE_ENTRY_POINT + +VULKAN_DEVICE_ENTRY_POINT(vkDestroyDevice, true) +VULKAN_DEVICE_ENTRY_POINT(vkGetDeviceQueue, true) +VULKAN_DEVICE_ENTRY_POINT(vkQueueSubmit, true) +VULKAN_DEVICE_ENTRY_POINT(vkQueueWaitIdle, true) +VULKAN_DEVICE_ENTRY_POINT(vkDeviceWaitIdle, true) +VULKAN_DEVICE_ENTRY_POINT(vkAllocateMemory, true) +VULKAN_DEVICE_ENTRY_POINT(vkFreeMemory, true) +VULKAN_DEVICE_ENTRY_POINT(vkMapMemory, true) +VULKAN_DEVICE_ENTRY_POINT(vkUnmapMemory, true) +VULKAN_DEVICE_ENTRY_POINT(vkFlushMappedMemoryRanges, true) +VULKAN_DEVICE_ENTRY_POINT(vkInvalidateMappedMemoryRanges, true) +VULKAN_DEVICE_ENTRY_POINT(vkGetDeviceMemoryCommitment, true) +VULKAN_DEVICE_ENTRY_POINT(vkBindBufferMemory, true) +VULKAN_DEVICE_ENTRY_POINT(vkBindImageMemory, true) +VULKAN_DEVICE_ENTRY_POINT(vkGetBufferMemoryRequirements, true) +VULKAN_DEVICE_ENTRY_POINT(vkGetImageMemoryRequirements, true) +VULKAN_DEVICE_ENTRY_POINT(vkGetImageSparseMemoryRequirements, true) +VULKAN_DEVICE_ENTRY_POINT(vkQueueBindSparse, true) +VULKAN_DEVICE_ENTRY_POINT(vkCreateFence, true) +VULKAN_DEVICE_ENTRY_POINT(vkDestroyFence, true) +VULKAN_DEVICE_ENTRY_POINT(vkResetFences, true) +VULKAN_DEVICE_ENTRY_POINT(vkGetFenceStatus, true) +VULKAN_DEVICE_ENTRY_POINT(vkWaitForFences, true) +VULKAN_DEVICE_ENTRY_POINT(vkCreateSemaphore, true) +VULKAN_DEVICE_ENTRY_POINT(vkDestroySemaphore, true) +VULKAN_DEVICE_ENTRY_POINT(vkCreateEvent, true) +VULKAN_DEVICE_ENTRY_POINT(vkDestroyEvent, true) +VULKAN_DEVICE_ENTRY_POINT(vkGetEventStatus, true) +VULKAN_DEVICE_ENTRY_POINT(vkSetEvent, true) +VULKAN_DEVICE_ENTRY_POINT(vkResetEvent, true) +VULKAN_DEVICE_ENTRY_POINT(vkCreateQueryPool, true) +VULKAN_DEVICE_ENTRY_POINT(vkDestroyQueryPool, true) +VULKAN_DEVICE_ENTRY_POINT(vkGetQueryPoolResults, true) +VULKAN_DEVICE_ENTRY_POINT(vkCreateBuffer, true) +VULKAN_DEVICE_ENTRY_POINT(vkDestroyBuffer, true) +VULKAN_DEVICE_ENTRY_POINT(vkCreateBufferView, true) +VULKAN_DEVICE_ENTRY_POINT(vkDestroyBufferView, true) +VULKAN_DEVICE_ENTRY_POINT(vkCreateImage, true) +VULKAN_DEVICE_ENTRY_POINT(vkDestroyImage, true) +VULKAN_DEVICE_ENTRY_POINT(vkGetImageSubresourceLayout, true) +VULKAN_DEVICE_ENTRY_POINT(vkCreateImageView, true) +VULKAN_DEVICE_ENTRY_POINT(vkDestroyImageView, true) +VULKAN_DEVICE_ENTRY_POINT(vkCreateShaderModule, true) +VULKAN_DEVICE_ENTRY_POINT(vkDestroyShaderModule, true) +VULKAN_DEVICE_ENTRY_POINT(vkCreatePipelineCache, true) +VULKAN_DEVICE_ENTRY_POINT(vkDestroyPipelineCache, true) +VULKAN_DEVICE_ENTRY_POINT(vkGetPipelineCacheData, true) +VULKAN_DEVICE_ENTRY_POINT(vkMergePipelineCaches, true) +VULKAN_DEVICE_ENTRY_POINT(vkCreateGraphicsPipelines, true) +VULKAN_DEVICE_ENTRY_POINT(vkCreateComputePipelines, true) +VULKAN_DEVICE_ENTRY_POINT(vkDestroyPipeline, true) +VULKAN_DEVICE_ENTRY_POINT(vkCreatePipelineLayout, true) +VULKAN_DEVICE_ENTRY_POINT(vkDestroyPipelineLayout, true) +VULKAN_DEVICE_ENTRY_POINT(vkCreateSampler, true) +VULKAN_DEVICE_ENTRY_POINT(vkDestroySampler, true) +VULKAN_DEVICE_ENTRY_POINT(vkCreateDescriptorSetLayout, true) +VULKAN_DEVICE_ENTRY_POINT(vkDestroyDescriptorSetLayout, true) +VULKAN_DEVICE_ENTRY_POINT(vkCreateDescriptorPool, true) +VULKAN_DEVICE_ENTRY_POINT(vkDestroyDescriptorPool, true) +VULKAN_DEVICE_ENTRY_POINT(vkResetDescriptorPool, true) +VULKAN_DEVICE_ENTRY_POINT(vkAllocateDescriptorSets, true) +VULKAN_DEVICE_ENTRY_POINT(vkFreeDescriptorSets, true) +VULKAN_DEVICE_ENTRY_POINT(vkUpdateDescriptorSets, true) +VULKAN_DEVICE_ENTRY_POINT(vkCreateFramebuffer, true) +VULKAN_DEVICE_ENTRY_POINT(vkDestroyFramebuffer, true) +VULKAN_DEVICE_ENTRY_POINT(vkCreateRenderPass, true) +VULKAN_DEVICE_ENTRY_POINT(vkDestroyRenderPass, true) +VULKAN_DEVICE_ENTRY_POINT(vkGetRenderAreaGranularity, true) +VULKAN_DEVICE_ENTRY_POINT(vkCreateCommandPool, true) +VULKAN_DEVICE_ENTRY_POINT(vkDestroyCommandPool, true) +VULKAN_DEVICE_ENTRY_POINT(vkResetCommandPool, true) +VULKAN_DEVICE_ENTRY_POINT(vkAllocateCommandBuffers, true) +VULKAN_DEVICE_ENTRY_POINT(vkFreeCommandBuffers, true) +VULKAN_DEVICE_ENTRY_POINT(vkBeginCommandBuffer, true) +VULKAN_DEVICE_ENTRY_POINT(vkEndCommandBuffer, true) +VULKAN_DEVICE_ENTRY_POINT(vkResetCommandBuffer, true) +VULKAN_DEVICE_ENTRY_POINT(vkCmdBindPipeline, true) +VULKAN_DEVICE_ENTRY_POINT(vkCmdSetViewport, true) +VULKAN_DEVICE_ENTRY_POINT(vkCmdSetScissor, true) +VULKAN_DEVICE_ENTRY_POINT(vkCmdSetLineWidth, true) +VULKAN_DEVICE_ENTRY_POINT(vkCmdSetDepthBias, true) +VULKAN_DEVICE_ENTRY_POINT(vkCmdSetBlendConstants, true) +VULKAN_DEVICE_ENTRY_POINT(vkCmdSetDepthBounds, true) +VULKAN_DEVICE_ENTRY_POINT(vkCmdSetStencilCompareMask, true) +VULKAN_DEVICE_ENTRY_POINT(vkCmdSetStencilWriteMask, true) +VULKAN_DEVICE_ENTRY_POINT(vkCmdSetStencilReference, true) +VULKAN_DEVICE_ENTRY_POINT(vkCmdBindDescriptorSets, true) +VULKAN_DEVICE_ENTRY_POINT(vkCmdBindIndexBuffer, true) +VULKAN_DEVICE_ENTRY_POINT(vkCmdBindVertexBuffers, true) +VULKAN_DEVICE_ENTRY_POINT(vkCmdDraw, true) +VULKAN_DEVICE_ENTRY_POINT(vkCmdDrawIndexed, true) +VULKAN_DEVICE_ENTRY_POINT(vkCmdDrawIndirect, true) +VULKAN_DEVICE_ENTRY_POINT(vkCmdDrawIndexedIndirect, true) +VULKAN_DEVICE_ENTRY_POINT(vkCmdDispatch, true) +VULKAN_DEVICE_ENTRY_POINT(vkCmdDispatchIndirect, true) +VULKAN_DEVICE_ENTRY_POINT(vkCmdCopyBuffer, true) +VULKAN_DEVICE_ENTRY_POINT(vkCmdCopyImage, true) +VULKAN_DEVICE_ENTRY_POINT(vkCmdBlitImage, true) +VULKAN_DEVICE_ENTRY_POINT(vkCmdCopyBufferToImage, true) +VULKAN_DEVICE_ENTRY_POINT(vkCmdCopyImageToBuffer, true) +VULKAN_DEVICE_ENTRY_POINT(vkCmdUpdateBuffer, true) +VULKAN_DEVICE_ENTRY_POINT(vkCmdFillBuffer, true) +VULKAN_DEVICE_ENTRY_POINT(vkCmdClearColorImage, true) +VULKAN_DEVICE_ENTRY_POINT(vkCmdClearDepthStencilImage, true) +VULKAN_DEVICE_ENTRY_POINT(vkCmdClearAttachments, true) +VULKAN_DEVICE_ENTRY_POINT(vkCmdResolveImage, true) +VULKAN_DEVICE_ENTRY_POINT(vkCmdSetEvent, true) +VULKAN_DEVICE_ENTRY_POINT(vkCmdResetEvent, true) +VULKAN_DEVICE_ENTRY_POINT(vkCmdWaitEvents, true) +VULKAN_DEVICE_ENTRY_POINT(vkCmdPipelineBarrier, true) +VULKAN_DEVICE_ENTRY_POINT(vkCmdBeginQuery, true) +VULKAN_DEVICE_ENTRY_POINT(vkCmdEndQuery, true) +VULKAN_DEVICE_ENTRY_POINT(vkCmdResetQueryPool, true) +VULKAN_DEVICE_ENTRY_POINT(vkCmdWriteTimestamp, true) +VULKAN_DEVICE_ENTRY_POINT(vkCmdCopyQueryPoolResults, true) +VULKAN_DEVICE_ENTRY_POINT(vkCmdPushConstants, true) +VULKAN_DEVICE_ENTRY_POINT(vkCmdBeginRenderPass, true) +VULKAN_DEVICE_ENTRY_POINT(vkCmdNextSubpass, true) +VULKAN_DEVICE_ENTRY_POINT(vkCmdEndRenderPass, true) +VULKAN_DEVICE_ENTRY_POINT(vkCmdExecuteCommands, true) +VULKAN_DEVICE_ENTRY_POINT(vkCreateSwapchainKHR, false) +VULKAN_DEVICE_ENTRY_POINT(vkDestroySwapchainKHR, false) +VULKAN_DEVICE_ENTRY_POINT(vkGetSwapchainImagesKHR, false) +VULKAN_DEVICE_ENTRY_POINT(vkAcquireNextImageKHR, false) +VULKAN_DEVICE_ENTRY_POINT(vkQueuePresentKHR, false) + +#ifdef SUPPORTS_VULKAN_EXCLUSIVE_FULLSCREEN +VULKAN_DEVICE_ENTRY_POINT(vkAcquireFullScreenExclusiveModeEXT, false) +VULKAN_DEVICE_ENTRY_POINT(vkReleaseFullScreenExclusiveModeEXT, false) +#endif + +#endif // VULKAN_DEVICE_ENTRY_POINT diff --git a/dep/vulkan-loader/include/vulkan_loader.h b/dep/vulkan-loader/include/vulkan_loader.h new file mode 100644 index 000000000..7676a0b4f --- /dev/null +++ b/dep/vulkan-loader/include/vulkan_loader.h @@ -0,0 +1,79 @@ +// Copyright 2016 Dolphin Emulator Project +// Copyright 2020 DuckStation Emulator Project +// Licensed under GPLv2+ +// Refer to the LICENSE file included. + +#pragma once + +#define VK_NO_PROTOTYPES + +#if defined(WIN32) + +#define VK_USE_PLATFORM_WIN32_KHR + +// vulkan.h pulls in windows.h on Windows, so we need to include our replacement header first +#ifndef WIN32_LEAN_AND_MEAN +#define WIN32_LEAN_AND_MEAN 1 +#endif +#ifndef NOMINMAX +#define NOMINMAX 1 +#endif + +// require vista+ +#ifdef _WIN32_WINNT +#undef _WIN32_WINNT +#endif +#define _WIN32_WINNT _WIN32_WINNT_VISTA + +#include + +#endif + +#if defined(VULKAN_USE_X11) +#define VK_USE_PLATFORM_XLIB_KHR +#endif + +#if defined(ANDROID) +#define VK_USE_PLATFORM_ANDROID_KHR +#endif + +#if defined(__APPLE__) +#define VK_USE_PLATFORM_METAL_EXT +#endif + +#include "vulkan/vulkan.h" + +// Currently, exclusive fullscreen is only supported on Windows. +#if defined(WIN32) +#define SUPPORTS_VULKAN_EXCLUSIVE_FULLSCREEN 1 +#endif + +#if defined(VULKAN_USE_X11) + +// This breaks a bunch of our code. They shouldn't be #defines in the first place. +#ifdef None +#undef None +#endif +#ifdef Status +#undef Status +#endif + +#endif + +// We abuse the preprocessor here to only need to specify function names once. +#define VULKAN_MODULE_ENTRY_POINT(name, required) extern PFN_##name name; +#define VULKAN_INSTANCE_ENTRY_POINT(name, required) extern PFN_##name name; +#define VULKAN_DEVICE_ENTRY_POINT(name, required) extern PFN_##name name; +#include "vulkan_entry_points.inl" +#undef VULKAN_DEVICE_ENTRY_POINT +#undef VULKAN_INSTANCE_ENTRY_POINT +#undef VULKAN_MODULE_ENTRY_POINT + +namespace Vulkan { + +bool LoadVulkanLibrary(); +bool LoadVulkanInstanceFunctions(VkInstance instance); +bool LoadVulkanDeviceFunctions(VkDevice device); +void UnloadVulkanLibrary(); + +} // namespace Vulkan diff --git a/dep/vulkan-loader/src/vulkan_loader.cpp b/dep/vulkan-loader/src/vulkan_loader.cpp new file mode 100644 index 000000000..37ce1dc34 --- /dev/null +++ b/dep/vulkan-loader/src/vulkan_loader.cpp @@ -0,0 +1,215 @@ +// Copyright 2016 Dolphin Emulator Project +// Copyright 2020 DuckStation Emulator Project +// Licensed under GPLv2+ +// Refer to the LICENSE file included. + +#include +#include +#include +#include + +#include "vulkan_loader.h" + +#ifndef WIN32 +#include +#endif + +#define VULKAN_MODULE_ENTRY_POINT(name, required) PFN_##name name; +#define VULKAN_INSTANCE_ENTRY_POINT(name, required) PFN_##name name; +#define VULKAN_DEVICE_ENTRY_POINT(name, required) PFN_##name name; +#include "vulkan_entry_points.inl" +#undef VULKAN_DEVICE_ENTRY_POINT +#undef VULKAN_INSTANCE_ENTRY_POINT +#undef VULKAN_MODULE_ENTRY_POINT + +namespace Vulkan { +static void ResetVulkanLibraryFunctionPointers() +{ +#define VULKAN_MODULE_ENTRY_POINT(name, required) name = nullptr; +#define VULKAN_INSTANCE_ENTRY_POINT(name, required) name = nullptr; +#define VULKAN_DEVICE_ENTRY_POINT(name, required) name = nullptr; +#include "vulkan_entry_points.inl" +#undef VULKAN_DEVICE_ENTRY_POINT +#undef VULKAN_INSTANCE_ENTRY_POINT +#undef VULKAN_MODULE_ENTRY_POINT +} + +#if defined(_WIN32) + +static HMODULE vulkan_module; +static std::atomic_int vulkan_module_ref_count = {0}; + +bool LoadVulkanLibrary() +{ + // Not thread safe if a second thread calls the loader whilst the first is still in-progress. + if (vulkan_module) + { + vulkan_module_ref_count++; + return true; + } + + vulkan_module = LoadLibraryA("vulkan-1.dll"); + if (!vulkan_module) + { + std::fprintf(stderr, "Failed to load vulkan-1.dll\n"); + return false; + } + + bool required_functions_missing = false; + auto LoadFunction = [&](FARPROC* func_ptr, const char* name, bool is_required) { + *func_ptr = GetProcAddress(vulkan_module, name); + if (!(*func_ptr) && is_required) + { + std::fprintf(stderr, "Vulkan: Failed to load required module function %s\n", name); + required_functions_missing = true; + } + }; + +#define VULKAN_MODULE_ENTRY_POINT(name, required) LoadFunction(reinterpret_cast(&name), #name, required); +#include "vulkan_entry_points.inl" +#undef VULKAN_MODULE_ENTRY_POINT + + if (required_functions_missing) + { + ResetVulkanLibraryFunctionPointers(); + FreeLibrary(vulkan_module); + vulkan_module = nullptr; + return false; + } + + vulkan_module_ref_count++; + return true; +} + +void UnloadVulkanLibrary() +{ + if ((--vulkan_module_ref_count) > 0) + return; + + ResetVulkanLibraryFunctionPointers(); + FreeLibrary(vulkan_module); + vulkan_module = nullptr; +} + +#else + +static void* vulkan_module; +static std::atomic_int vulkan_module_ref_count = {0}; + +bool LoadVulkanLibrary() +{ + // Not thread safe if a second thread calls the loader whilst the first is still in-progress. + if (vulkan_module) + { + vulkan_module_ref_count++; + return true; + } + +#if defined(__APPLE__) + // Check if a path to a specific Vulkan library has been specified. + char* libvulkan_env = getenv("LIBVULKAN_PATH"); + if (libvulkan_env) + vulkan_module = dlopen(libvulkan_env, RTLD_NOW); + if (!vulkan_module) + { + // Use the libvulkan.dylib from the application bundle. + std::string executable_path = FileSystem::GetProgramPath(); + std::string path = FileSystem::GetPathDirectory(executable_path.c_str()) + "/libvulkan.dylib"; + vulkan_module = dlopen(path.c_str(), RTLD_NOW); + } +#else + // Names of libraries to search. Desktop should use libvulkan.so.1 or libvulkan.so. + static const char* search_lib_names[] = {"libvulkan.so.1", "libvulkan.so"}; + for (size_t i = 0; i < sizeof(search_lib_names) / sizeof(search_lib_names[0]); i++) + { + vulkan_module = dlopen(search_lib_names[i], RTLD_NOW); + if (vulkan_module) + break; + } +#endif + + if (!vulkan_module) + { + std::fprintf(stderr, "Failed to load or locate libvulkan.so\n"); + return false; + } + + bool required_functions_missing = false; + auto LoadFunction = [&](void** func_ptr, const char* name, bool is_required) { + *func_ptr = dlsym(vulkan_module, name); + if (!(*func_ptr) && is_required) + { + std::fprintf(stderr, "Vulkan: Failed to load required module function %s\n", name); + required_functions_missing = true; + } + }; + +#define VULKAN_MODULE_ENTRY_POINT(name, required) LoadFunction(reinterpret_cast(&name), #name, required); +#include "vulkan_entry_points.inl" +#undef VULKAN_MODULE_ENTRY_POINT + + if (required_functions_missing) + { + ResetVulkanLibraryFunctionPointers(); + dlclose(vulkan_module); + vulkan_module = nullptr; + return false; + } + + vulkan_module_ref_count++; + return true; +} + +void UnloadVulkanLibrary() +{ + if ((--vulkan_module_ref_count) > 0) + return; + + ResetVulkanLibraryFunctionPointers(); + dlclose(vulkan_module); + vulkan_module = nullptr; +} + +#endif + +bool LoadVulkanInstanceFunctions(VkInstance instance) +{ + bool required_functions_missing = false; + auto LoadFunction = [&](PFN_vkVoidFunction* func_ptr, const char* name, bool is_required) { + *func_ptr = vkGetInstanceProcAddr(instance, name); + if (!(*func_ptr) && is_required) + { + std::fprintf(stderr, "Vulkan: Failed to load required instance function %s\n", name); + required_functions_missing = true; + } + }; + +#define VULKAN_INSTANCE_ENTRY_POINT(name, required) \ + LoadFunction(reinterpret_cast(&name), #name, required); +#include "vulkan_entry_points.inl" +#undef VULKAN_INSTANCE_ENTRY_POINT + + return !required_functions_missing; +} + +bool LoadVulkanDeviceFunctions(VkDevice device) +{ + bool required_functions_missing = false; + auto LoadFunction = [&](PFN_vkVoidFunction* func_ptr, const char* name, bool is_required) { + *func_ptr = vkGetDeviceProcAddr(device, name); + if (!(*func_ptr) && is_required) + { + std::fprintf(stderr, "Vulkan: Failed to load required device function %s\n", name); + required_functions_missing = true; + } + }; + +#define VULKAN_DEVICE_ENTRY_POINT(name, required) \ + LoadFunction(reinterpret_cast(&name), #name, required); +#include "vulkan_entry_points.inl" +#undef VULKAN_DEVICE_ENTRY_POINT + + return !required_functions_missing; +} + +} // namespace Vulkan diff --git a/dep/vulkan-loader/vulkan-loader.vcxproj b/dep/vulkan-loader/vulkan-loader.vcxproj new file mode 100644 index 000000000..edc9d8a1c --- /dev/null +++ b/dep/vulkan-loader/vulkan-loader.vcxproj @@ -0,0 +1,403 @@ + + + + + DebugFast + Win32 + + + DebugFast + x64 + + + Debug + Win32 + + + Debug + x64 + + + ReleaseLTCG + Win32 + + + ReleaseLTCG + x64 + + + Release + Win32 + + + Release + x64 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + {9C8DDEB0-2B8F-4F5F-BA86-127CDF27F035} + Win32Proj + vulkan-loader + 10.0 + + + + StaticLibrary + true + v142 + NotSet + + + StaticLibrary + true + v142 + NotSet + + + StaticLibrary + true + v142 + NotSet + + + StaticLibrary + true + v142 + NotSet + + + StaticLibrary + false + v142 + true + NotSet + false + + + StaticLibrary + false + v142 + true + NotSet + false + + + StaticLibrary + false + v142 + true + NotSet + false + + + StaticLibrary + false + v142 + true + NotSet + false + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + true + $(SolutionDir)build\$(ProjectName)-$(Platform)-$(Configuration)\ + $(SolutionDir)build\$(ProjectName)-$(Platform)-$(Configuration)\ + $(ProjectName)-$(Platform)-$(Configuration) + + + $(SolutionDir)build\$(ProjectName)-$(Platform)-$(Configuration)\ + $(ProjectName)-$(Platform)-$(Configuration) + true + $(SolutionDir)build\$(ProjectName)-$(Platform)-$(Configuration)\ + + + true + $(SolutionDir)build\$(ProjectName)-$(Platform)-$(Configuration)\ + $(SolutionDir)build\$(ProjectName)-$(Platform)-$(Configuration)\ + $(ProjectName)-$(Platform)-$(Configuration) + + + $(SolutionDir)build\$(ProjectName)-$(Platform)-$(Configuration)\ + $(ProjectName)-$(Platform)-$(Configuration) + true + $(SolutionDir)build\$(ProjectName)-$(Platform)-$(Configuration)\ + + + false + $(SolutionDir)build\$(ProjectName)-$(Platform)-$(Configuration)\ + $(SolutionDir)build\$(ProjectName)-$(Platform)-$(Configuration)\ + $(ProjectName)-$(Platform)-$(Configuration) + + + false + $(SolutionDir)build\$(ProjectName)-$(Platform)-$(Configuration)\ + $(SolutionDir)build\$(ProjectName)-$(Platform)-$(Configuration)\ + $(ProjectName)-$(Platform)-$(Configuration) + + + $(SolutionDir)build\$(ProjectName)-$(Platform)-$(Configuration)\ + $(ProjectName)-$(Platform)-$(Configuration) + false + $(SolutionDir)build\$(ProjectName)-$(Platform)-$(Configuration)\ + + + $(SolutionDir)build\$(ProjectName)-$(Platform)-$(Configuration)\ + $(ProjectName)-$(Platform)-$(Configuration) + false + $(SolutionDir)build\$(ProjectName)-$(Platform)-$(Configuration)\ + + + + + + TurnOffAllWarnings + Disabled + _CRT_NONSTDC_NO_DEPRECATE;_CRT_SECURE_NO_WARNINGS;WIN32;_DEBUG;_CONSOLE;_LIB;%(PreprocessorDefinitions) + true + ProgramDatabase + $(ProjectDir)\include;%(AdditionalIncludeDirectories) + false + stdcpp14 + true + true + + + Windows + true + SDL2.lib;SDL2main.lib;%(AdditionalDependencies) + $(SolutionDir)dep\lib32-debug;%(AdditionalLibraryDirectories) + + + + + + + + TurnOffAllWarnings + Disabled + _CRT_NONSTDC_NO_DEPRECATE;_CRT_SECURE_NO_WARNINGS;WIN32;_DEBUG;_CONSOLE;_LIB;%(PreprocessorDefinitions) + true + ProgramDatabase + $(ProjectDir)\include;%(AdditionalIncludeDirectories) + false + stdcpp14 + true + true + + + Windows + true + SDL2.lib;SDL2main.lib;%(AdditionalDependencies) + $(SolutionDir)dep\lib64-debug;%(AdditionalLibraryDirectories) + + + + + + + + TurnOffAllWarnings + Disabled + _CRT_NONSTDC_NO_DEPRECATE;_ITERATOR_DEBUG_LEVEL=1;_CRT_SECURE_NO_WARNINGS;WIN32;_DEBUGFAST;_DEBUG;_CONSOLE;_LIB;%(PreprocessorDefinitions) + true + ProgramDatabase + $(ProjectDir)\include;%(AdditionalIncludeDirectories) + Default + false + stdcpp14 + false + true + OnlyExplicitInline + true + + + Windows + true + SDL2.lib;SDL2main.lib;%(AdditionalDependencies) + $(SolutionDir)dep\lib32-debug;%(AdditionalLibraryDirectories) + + + + + + + + TurnOffAllWarnings + Disabled + _CRT_NONSTDC_NO_DEPRECATE;_ITERATOR_DEBUG_LEVEL=1;_CRT_SECURE_NO_WARNINGS;WIN32;_DEBUGFAST;_DEBUG;_CONSOLE;_LIB;%(PreprocessorDefinitions) + true + ProgramDatabase + $(ProjectDir)\include;%(AdditionalIncludeDirectories) + Default + false + stdcpp14 + false + true + OnlyExplicitInline + true + + + Windows + true + SDL2.lib;SDL2main.lib;%(AdditionalDependencies) + $(SolutionDir)dep\lib64-debug;%(AdditionalLibraryDirectories) + + + + + + TurnOffAllWarnings + + + MaxSpeed + true + _CRT_NONSTDC_NO_DEPRECATE;_CRT_SECURE_NO_WARNINGS;WIN32;NDEBUG;_CONSOLE;_LIB;%(PreprocessorDefinitions) + $(ProjectDir)\include;%(AdditionalIncludeDirectories) + false + stdcpp14 + true + true + + + Windows + true + true + true + SDL2.lib;SDL2main.lib;%(AdditionalDependencies) + $(SolutionDir)dep\lib32;%(AdditionalLibraryDirectories) + + + + + + TurnOffAllWarnings + + + MaxSpeed + true + _CRT_NONSTDC_NO_DEPRECATE;_CRT_SECURE_NO_WARNINGS;WIN32;NDEBUG;_CONSOLE;_LIB;%(PreprocessorDefinitions) + $(ProjectDir)\include;%(AdditionalIncludeDirectories) + true + stdcpp14 + true + true + true + + + Windows + true + true + true + SDL2.lib;SDL2main.lib;%(AdditionalDependencies) + $(SolutionDir)dep\lib32;%(AdditionalLibraryDirectories) + + + + + + TurnOffAllWarnings + + + MaxSpeed + true + _CRT_NONSTDC_NO_DEPRECATE;_CRT_SECURE_NO_WARNINGS;WIN32;NDEBUG;_CONSOLE;_LIB;%(PreprocessorDefinitions) + $(ProjectDir)\include;%(AdditionalIncludeDirectories) + false + stdcpp14 + true + true + + + Windows + true + true + true + SDL2.lib;SDL2main.lib;%(AdditionalDependencies) + $(SolutionDir)dep\lib64;%(AdditionalLibraryDirectories) + + + + + + TurnOffAllWarnings + + + MaxSpeed + true + _CRT_NONSTDC_NO_DEPRECATE;_CRT_SECURE_NO_WARNINGS;WIN32;NDEBUG;_CONSOLE;_LIB;%(PreprocessorDefinitions) + $(ProjectDir)\include;%(AdditionalIncludeDirectories) + true + stdcpp14 + true + true + true + + + Windows + true + true + true + SDL2.lib;SDL2main.lib;%(AdditionalDependencies) + $(SolutionDir)dep\lib64;%(AdditionalLibraryDirectories) + + + + + + + \ No newline at end of file diff --git a/dep/vulkan-loader/vulkan-loader.vcxproj.filters b/dep/vulkan-loader/vulkan-loader.vcxproj.filters new file mode 100644 index 000000000..548474ca8 --- /dev/null +++ b/dep/vulkan-loader/vulkan-loader.vcxproj.filters @@ -0,0 +1,74 @@ + + + + + + vulkan + + + vulkan + + + vulkan + + + vulkan + + + vulkan + + + vulkan + + + vulkan + + + vulkan + + + vulkan + + + vulkan + + + vulkan + + + vulkan + + + vulkan + + + vulkan + + + vulkan + + + vulkan + + + vulkan + + + vulkan + + + vulkan + + + + + {b9d919db-dff6-4d4e-b6fa-36ecaa507aea} + + + + + + + + + \ No newline at end of file diff --git a/duckstation.sln b/duckstation.sln index fee283659..8179f7aee 100644 --- a/duckstation.sln +++ b/duckstation.sln @@ -53,6 +53,10 @@ Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "discord-rpc", "dep\discord- EndProject Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "duckstation-libretro", "src\duckstation-libretro\duckstation-libretro.vcxproj", "{9D206548-DE8F-4D9D-A561-C7E5CD7A20DF}" EndProject +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "glslang", "dep\glslang\glslang.vcxproj", "{7F909E29-4808-4BD9-A60C-56C51A3AAEC2}" +EndProject +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "vulkan-loader", "dep\vulkan-loader\vulkan-loader.vcxproj", "{9C8DDEB0-2B8F-4F5F-BA86-127CDF27F035}" +EndProject Global GlobalSection(SolutionConfigurationPlatforms) = preSolution Debug|x64 = Debug|x64 @@ -449,6 +453,38 @@ Global {9D206548-DE8F-4D9D-A561-C7E5CD7A20DF}.ReleaseLTCG|x64.Build.0 = ReleaseLTCG|x64 {9D206548-DE8F-4D9D-A561-C7E5CD7A20DF}.ReleaseLTCG|x86.ActiveCfg = ReleaseLTCG|Win32 {9D206548-DE8F-4D9D-A561-C7E5CD7A20DF}.ReleaseLTCG|x86.Build.0 = ReleaseLTCG|Win32 + {7F909E29-4808-4BD9-A60C-56C51A3AAEC2}.Debug|x64.ActiveCfg = Debug|x64 + {7F909E29-4808-4BD9-A60C-56C51A3AAEC2}.Debug|x64.Build.0 = Debug|x64 + {7F909E29-4808-4BD9-A60C-56C51A3AAEC2}.Debug|x86.ActiveCfg = Debug|Win32 + {7F909E29-4808-4BD9-A60C-56C51A3AAEC2}.Debug|x86.Build.0 = Debug|Win32 + {7F909E29-4808-4BD9-A60C-56C51A3AAEC2}.DebugFast|x64.ActiveCfg = DebugFast|x64 + {7F909E29-4808-4BD9-A60C-56C51A3AAEC2}.DebugFast|x64.Build.0 = DebugFast|x64 + {7F909E29-4808-4BD9-A60C-56C51A3AAEC2}.DebugFast|x86.ActiveCfg = DebugFast|Win32 + {7F909E29-4808-4BD9-A60C-56C51A3AAEC2}.DebugFast|x86.Build.0 = DebugFast|Win32 + {7F909E29-4808-4BD9-A60C-56C51A3AAEC2}.Release|x64.ActiveCfg = Release|x64 + {7F909E29-4808-4BD9-A60C-56C51A3AAEC2}.Release|x64.Build.0 = Release|x64 + {7F909E29-4808-4BD9-A60C-56C51A3AAEC2}.Release|x86.ActiveCfg = Release|Win32 + {7F909E29-4808-4BD9-A60C-56C51A3AAEC2}.Release|x86.Build.0 = Release|Win32 + {7F909E29-4808-4BD9-A60C-56C51A3AAEC2}.ReleaseLTCG|x64.ActiveCfg = ReleaseLTCG|x64 + {7F909E29-4808-4BD9-A60C-56C51A3AAEC2}.ReleaseLTCG|x64.Build.0 = ReleaseLTCG|x64 + {7F909E29-4808-4BD9-A60C-56C51A3AAEC2}.ReleaseLTCG|x86.ActiveCfg = ReleaseLTCG|Win32 + {7F909E29-4808-4BD9-A60C-56C51A3AAEC2}.ReleaseLTCG|x86.Build.0 = ReleaseLTCG|Win32 + {9C8DDEB0-2B8F-4F5F-BA86-127CDF27F035}.Debug|x64.ActiveCfg = Debug|x64 + {9C8DDEB0-2B8F-4F5F-BA86-127CDF27F035}.Debug|x64.Build.0 = Debug|x64 + {9C8DDEB0-2B8F-4F5F-BA86-127CDF27F035}.Debug|x86.ActiveCfg = Debug|Win32 + {9C8DDEB0-2B8F-4F5F-BA86-127CDF27F035}.Debug|x86.Build.0 = Debug|Win32 + {9C8DDEB0-2B8F-4F5F-BA86-127CDF27F035}.DebugFast|x64.ActiveCfg = DebugFast|x64 + {9C8DDEB0-2B8F-4F5F-BA86-127CDF27F035}.DebugFast|x64.Build.0 = DebugFast|x64 + {9C8DDEB0-2B8F-4F5F-BA86-127CDF27F035}.DebugFast|x86.ActiveCfg = DebugFast|Win32 + {9C8DDEB0-2B8F-4F5F-BA86-127CDF27F035}.DebugFast|x86.Build.0 = DebugFast|Win32 + {9C8DDEB0-2B8F-4F5F-BA86-127CDF27F035}.Release|x64.ActiveCfg = Release|x64 + {9C8DDEB0-2B8F-4F5F-BA86-127CDF27F035}.Release|x64.Build.0 = Release|x64 + {9C8DDEB0-2B8F-4F5F-BA86-127CDF27F035}.Release|x86.ActiveCfg = Release|Win32 + {9C8DDEB0-2B8F-4F5F-BA86-127CDF27F035}.Release|x86.Build.0 = Release|Win32 + {9C8DDEB0-2B8F-4F5F-BA86-127CDF27F035}.ReleaseLTCG|x64.ActiveCfg = ReleaseLTCG|x64 + {9C8DDEB0-2B8F-4F5F-BA86-127CDF27F035}.ReleaseLTCG|x64.Build.0 = ReleaseLTCG|x64 + {9C8DDEB0-2B8F-4F5F-BA86-127CDF27F035}.ReleaseLTCG|x86.ActiveCfg = ReleaseLTCG|Win32 + {9C8DDEB0-2B8F-4F5F-BA86-127CDF27F035}.ReleaseLTCG|x86.Build.0 = ReleaseLTCG|Win32 EndGlobalSection GlobalSection(SolutionProperties) = preSolution HideSolutionNode = FALSE @@ -470,6 +506,8 @@ Global {09553C96-9F39-49BF-8AE6-7ACBD07C410C} = {BA490C0E-497D-4634-A21E-E65012006385} {49953E1B-2EF7-46A4-B88B-1BF9E099093B} = {BA490C0E-497D-4634-A21E-E65012006385} {4266505B-DBAF-484B-AB31-B53B9C8235B3} = {BA490C0E-497D-4634-A21E-E65012006385} + {7F909E29-4808-4BD9-A60C-56C51A3AAEC2} = {BA490C0E-497D-4634-A21E-E65012006385} + {9C8DDEB0-2B8F-4F5F-BA86-127CDF27F035} = {BA490C0E-497D-4634-A21E-E65012006385} EndGlobalSection GlobalSection(ExtensibilityGlobals) = postSolution SolutionGuid = {26E40B32-7C1D-48D0-95F4-1A500E054028} diff --git a/src/common/CMakeLists.txt b/src/common/CMakeLists.txt index 768174854..218ac4541 100644 --- a/src/common/CMakeLists.txt +++ b/src/common/CMakeLists.txt @@ -53,6 +53,7 @@ add_library(common rectangle.h progress_callback.cpp progress_callback.h + scope_guard.h state_wrapper.cpp state_wrapper.h string.cpp @@ -64,13 +65,33 @@ add_library(common timestamp.cpp timestamp.h types.h + vulkan/builders.cpp + vulkan/builders.h + vulkan/context.cpp + vulkan/context.h + vulkan/shader_cache.cpp + vulkan/shader_cache.h + vulkan/shader_compiler.cpp + vulkan/shader_compiler.h + vulkan/staging_buffer.cpp + vulkan/staging_buffer.h + vulkan/staging_texture.cpp + vulkan/staging_texture.h + vulkan/stream_buffer.cpp + vulkan/stream_buffer.h + vulkan/swap_chain.cpp + vulkan/swap_chain.h + vulkan/texture.cpp + vulkan/texture.h + vulkan/util.cpp + vulkan/util.h wav_writer.cpp wav_writer.h ) target_include_directories(common PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/..") target_include_directories(common PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/..") -target_link_libraries(common PRIVATE glad libcue Threads::Threads cubeb libchdr) +target_link_libraries(common PRIVATE glad libcue Threads::Threads cubeb libchdr glslang vulkan-loader) if(WIN32) target_sources(common PRIVATE diff --git a/src/common/common.vcxproj b/src/common/common.vcxproj index 57f492b4c..de0c5284b 100644 --- a/src/common/common.vcxproj +++ b/src/common/common.vcxproj @@ -70,6 +70,7 @@ + @@ -77,6 +78,16 @@ + + + + + + + + + + @@ -116,6 +127,16 @@ + + + + + + + + + + @@ -128,6 +149,9 @@ {43540154-9e1e-409c-834f-b84be5621388} + + {7f909e29-4808-4bd9-a60c-56c51a3aaec2} + {425d6c99-d1c8-43c2-b8ac-4d7b1d941017} @@ -276,7 +300,7 @@ WIN32;_DEBUG;_LIB;%(PreprocessorDefinitions) true ProgramDatabase - $(SolutionDir)dep\glad\include;$(SolutionDir)dep\cubeb\include;$(SolutionDir)dep\libcue\include;$(SolutionDir)dep\libchdr\include;$(SolutionDir)src;%(AdditionalIncludeDirectories) + $(SolutionDir)dep\glad\include;$(SolutionDir)dep\cubeb\include;$(SolutionDir)dep\libcue\include;$(SolutionDir)dep\libchdr\include;$(SolutionDir)dep\vulkan-loader\include;$(SolutionDir)dep\glslang;$(SolutionDir)src;%(AdditionalIncludeDirectories) true stdcpp17 true @@ -302,7 +326,7 @@ _ITERATOR_DEBUG_LEVEL=1;WIN32;_DEBUGFAST;_DEBUG;_LIB;%(PreprocessorDefinitions) true ProgramDatabase - $(SolutionDir)dep\glad\include;$(SolutionDir)dep\cubeb\include;$(SolutionDir)dep\libcue\include;$(SolutionDir)dep\libchdr\include;$(SolutionDir)src;%(AdditionalIncludeDirectories) + $(SolutionDir)dep\glad\include;$(SolutionDir)dep\cubeb\include;$(SolutionDir)dep\libcue\include;$(SolutionDir)dep\libchdr\include;$(SolutionDir)dep\vulkan-loader\include;$(SolutionDir)dep\glslang;$(SolutionDir)src;%(AdditionalIncludeDirectories) Default false true @@ -331,7 +355,7 @@ WIN32;_DEBUG;_LIB;%(PreprocessorDefinitions) true ProgramDatabase - $(SolutionDir)dep\glad\include;$(SolutionDir)dep\cubeb\include;$(SolutionDir)dep\libcue\include;$(SolutionDir)dep\libchdr\include;$(SolutionDir)src;%(AdditionalIncludeDirectories) + $(SolutionDir)dep\glad\include;$(SolutionDir)dep\cubeb\include;$(SolutionDir)dep\libcue\include;$(SolutionDir)dep\libchdr\include;$(SolutionDir)dep\vulkan-loader\include;$(SolutionDir)dep\glslang;$(SolutionDir)src;%(AdditionalIncludeDirectories) true stdcpp17 true @@ -357,7 +381,7 @@ _ITERATOR_DEBUG_LEVEL=1;WIN32;_DEBUGFAST;_DEBUG;_LIB;%(PreprocessorDefinitions) true ProgramDatabase - $(SolutionDir)dep\glad\include;$(SolutionDir)dep\cubeb\include;$(SolutionDir)dep\libcue\include;$(SolutionDir)dep\libchdr\include;$(SolutionDir)src;%(AdditionalIncludeDirectories) + $(SolutionDir)dep\glad\include;$(SolutionDir)dep\cubeb\include;$(SolutionDir)dep\libcue\include;$(SolutionDir)dep\libchdr\include;$(SolutionDir)dep\vulkan-loader\include;$(SolutionDir)dep\glslang;$(SolutionDir)src;%(AdditionalIncludeDirectories) Default false true @@ -387,7 +411,7 @@ true WIN32;NDEBUG;_LIB;%(PreprocessorDefinitions) true - $(SolutionDir)dep\glad\include;$(SolutionDir)dep\cubeb\include;$(SolutionDir)dep\libcue\include;$(SolutionDir)dep\libchdr\include;$(SolutionDir)src;%(AdditionalIncludeDirectories) + $(SolutionDir)dep\glad\include;$(SolutionDir)dep\cubeb\include;$(SolutionDir)dep\libcue\include;$(SolutionDir)dep\libchdr\include;$(SolutionDir)dep\vulkan-loader\include;$(SolutionDir)dep\glslang;$(SolutionDir)src;%(AdditionalIncludeDirectories) true stdcpp17 false @@ -417,7 +441,7 @@ true WIN32;NDEBUG;_LIB;%(PreprocessorDefinitions) true - $(SolutionDir)dep\glad\include;$(SolutionDir)dep\cubeb\include;$(SolutionDir)dep\libcue\include;$(SolutionDir)dep\libchdr\include;$(SolutionDir)src;%(AdditionalIncludeDirectories) + $(SolutionDir)dep\glad\include;$(SolutionDir)dep\cubeb\include;$(SolutionDir)dep\libcue\include;$(SolutionDir)dep\libchdr\include;$(SolutionDir)dep\vulkan-loader\include;$(SolutionDir)dep\glslang;$(SolutionDir)src;%(AdditionalIncludeDirectories) true true stdcpp17 @@ -447,7 +471,7 @@ true WIN32;NDEBUG;_LIB;%(PreprocessorDefinitions) true - $(SolutionDir)dep\glad\include;$(SolutionDir)dep\cubeb\include;$(SolutionDir)dep\libcue\include;$(SolutionDir)dep\libchdr\include;$(SolutionDir)src;%(AdditionalIncludeDirectories) + $(SolutionDir)dep\glad\include;$(SolutionDir)dep\cubeb\include;$(SolutionDir)dep\libcue\include;$(SolutionDir)dep\libchdr\include;$(SolutionDir)dep\vulkan-loader\include;$(SolutionDir)dep\glslang;$(SolutionDir)src;%(AdditionalIncludeDirectories) true stdcpp17 false @@ -477,7 +501,7 @@ true WIN32;NDEBUG;_LIB;%(PreprocessorDefinitions) true - $(SolutionDir)dep\glad\include;$(SolutionDir)dep\cubeb\include;$(SolutionDir)dep\libcue\include;$(SolutionDir)dep\libchdr\include;$(SolutionDir)src;%(AdditionalIncludeDirectories) + $(SolutionDir)dep\glad\include;$(SolutionDir)dep\cubeb\include;$(SolutionDir)dep\libcue\include;$(SolutionDir)dep\libchdr\include;$(SolutionDir)dep\vulkan-loader\include;$(SolutionDir)dep\glslang;$(SolutionDir)src;%(AdditionalIncludeDirectories) true true stdcpp17 diff --git a/src/common/common.vcxproj.filters b/src/common/common.vcxproj.filters index ea31c228d..a98013549 100644 --- a/src/common/common.vcxproj.filters +++ b/src/common/common.vcxproj.filters @@ -66,7 +66,38 @@ + + vulkan + + + vulkan + + + vulkan + + + vulkan + + + vulkan + + + vulkan + + + vulkan + + + + vulkan + + + vulkan + + + vulkan + @@ -127,6 +158,36 @@ gl + + vulkan + + + vulkan + + + vulkan + + + vulkan + + + vulkan + + + vulkan + + + vulkan + + + vulkan + + + vulkan + + + vulkan + @@ -138,5 +199,8 @@ {30251086-81f3-44f5-add4-7ff9a24098ab} + + {642ff5eb-af39-4aab-a42f-6eb8188a11d7} + \ No newline at end of file diff --git a/src/common/file_system.cpp b/src/common/file_system.cpp index b7b85bb1a..ecdb9a90b 100644 --- a/src/common/file_system.cpp +++ b/src/common/file_system.cpp @@ -401,6 +401,37 @@ std::FILE* OpenCFile(const char* filename, const char* mode) #endif } +std::optional> ReadBinaryFile(const char* filename) +{ + ManagedCFilePtr fp = OpenManagedCFile(filename, "rb"); + if (!fp) + return std::nullopt; + + std::fseek(fp.get(), 0, SEEK_END); + long size = std::ftell(fp.get()); + std::fseek(fp.get(), 0, SEEK_SET); + if (size < 0) + return std::nullopt; + + std::vector res(static_cast(size)); + if (size > 0 && std::fread(res.data(), 1u, static_cast(size), fp.get()) != static_cast(size)) + return std::nullopt; + + return res; +} + +bool WriteBinaryFile(const char* filename, const void* data, size_t data_length) +{ + ManagedCFilePtr fp = OpenManagedCFile(filename, "wb"); + if (!fp) + return false; + + if (data_length > 0 && std::fwrite(data, 1u, data_length, fp.get()) != data_length) + return false; + + return true; +} + void BuildOSPath(char* Destination, u32 cbDestination, const char* Path) { u32 i; diff --git a/src/common/file_system.h b/src/common/file_system.h index bb972e272..a2202f290 100644 --- a/src/common/file_system.h +++ b/src/common/file_system.h @@ -3,6 +3,7 @@ #include "types.h" #include #include +#include #include #include @@ -168,6 +169,9 @@ using ManagedCFilePtr = std::unique_ptr; ManagedCFilePtr OpenManagedCFile(const char* filename, const char* mode); std::FILE* OpenCFile(const char* filename, const char* mode); +std::optional> ReadBinaryFile(const char* filename); +bool WriteBinaryFile(const char* filename, const void* data, size_t data_length); + // creates a directory in the local filesystem // if the directory already exists, the return value will be true. // if Recursive is specified, all parent directories will be created diff --git a/src/common/scope_guard.h b/src/common/scope_guard.h new file mode 100644 index 000000000..c1cad06dd --- /dev/null +++ b/src/common/scope_guard.h @@ -0,0 +1,41 @@ +// Copyright 2015 Dolphin Emulator Project +// Licensed under GPLv2+ +// Refer to the license.txt file included. + +#pragma once + +#include + +namespace Common +{ +template +class ScopeGuard final +{ +public: + ScopeGuard(Callable&& finalizer) : m_finalizer(std::forward(finalizer)) {} + + ScopeGuard(ScopeGuard&& other) : m_finalizer(std::move(other.m_finalizer)) + { + other.m_finalizer = nullptr; + } + + ~ScopeGuard() { Exit(); } + void Dismiss() { m_finalizer.reset(); } + void Exit() + { + if (m_finalizer) + { + (*m_finalizer)(); // must not throw + m_finalizer.reset(); + } + } + + ScopeGuard(const ScopeGuard&) = delete; + + void operator=(const ScopeGuard&) = delete; + +private: + std::optional m_finalizer; +}; + +} // Namespace Common diff --git a/src/common/vulkan/builders.cpp b/src/common/vulkan/builders.cpp new file mode 100644 index 000000000..349b9263a --- /dev/null +++ b/src/common/vulkan/builders.cpp @@ -0,0 +1,740 @@ +#include "builders.h" +#include "../assert.h" +#include "util.h" + +namespace Vulkan { + +DescriptorSetLayoutBuilder::DescriptorSetLayoutBuilder() +{ + Clear(); +} + +void DescriptorSetLayoutBuilder::Clear() +{ + m_ci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO; + m_ci.pNext = nullptr; + m_ci.flags = 0; + m_ci.pBindings = nullptr; + m_ci.bindingCount = 0; +} + +VkDescriptorSetLayout DescriptorSetLayoutBuilder::Create(VkDevice device) +{ + VkDescriptorSetLayout layout; + VkResult res = vkCreateDescriptorSetLayout(device, &m_ci, nullptr, &layout); + if (res != VK_SUCCESS) + { + LOG_VULKAN_ERROR(res, "vkCreateDescriptorSetLayout() failed: "); + return VK_NULL_HANDLE; + } + + Clear(); + return layout; +} + +void DescriptorSetLayoutBuilder::AddBinding(u32 binding, VkDescriptorType dtype, u32 dcount, VkShaderStageFlags stages) +{ + Assert(m_ci.bindingCount < MAX_BINDINGS); + + VkDescriptorSetLayoutBinding& b = m_bindings[m_ci.bindingCount]; + b.binding = binding; + b.descriptorType = dtype; + b.descriptorCount = dcount; + b.stageFlags = stages; + b.pImmutableSamplers = nullptr; + + m_ci.pBindings = m_bindings.data(); + m_ci.bindingCount++; +} + +PipelineLayoutBuilder::PipelineLayoutBuilder() +{ + Clear(); +} + +void PipelineLayoutBuilder::Clear() +{ + m_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO; + m_ci.pNext = nullptr; + m_ci.flags = 0; + m_ci.pSetLayouts = nullptr; + m_ci.setLayoutCount = 0; + m_ci.pPushConstantRanges = nullptr; + m_ci.pushConstantRangeCount = 0; +} + +VkPipelineLayout PipelineLayoutBuilder::Create(VkDevice device) +{ + VkPipelineLayout layout; + VkResult res = vkCreatePipelineLayout(device, &m_ci, nullptr, &layout); + if (res != VK_SUCCESS) + { + LOG_VULKAN_ERROR(res, "vkCreatePipelineLayout() failed: "); + return VK_NULL_HANDLE; + } + + Clear(); + return layout; +} + +void PipelineLayoutBuilder::AddDescriptorSet(VkDescriptorSetLayout layout) +{ + Assert(m_ci.setLayoutCount < MAX_SETS); + + m_sets[m_ci.setLayoutCount] = layout; + + m_ci.setLayoutCount++; + m_ci.pSetLayouts = m_sets.data(); +} + +void PipelineLayoutBuilder::AddPushConstants(VkShaderStageFlags stages, u32 offset, u32 size) +{ + Assert(m_ci.pushConstantRangeCount < MAX_PUSH_CONSTANTS); + + VkPushConstantRange& r = m_push_constants[m_ci.pushConstantRangeCount]; + r.stageFlags = stages; + r.offset = offset; + r.size = size; + + m_ci.pushConstantRangeCount++; + m_ci.pPushConstantRanges = m_push_constants.data(); +} + +GraphicsPipelineBuilder::GraphicsPipelineBuilder() +{ + Clear(); +} + +void GraphicsPipelineBuilder::Clear() +{ + m_ci = {}; + m_ci.sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO; + + m_shader_stages = {}; + + m_vertex_input_state = {}; + m_vertex_input_state.sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO; + m_ci.pVertexInputState = &m_vertex_input_state; + m_vertex_attributes = {}; + m_vertex_buffers = {}; + + m_input_assembly = {}; + m_input_assembly.sType = VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO; + + m_rasterization_state = {}; + m_rasterization_state.sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO; + m_rasterization_state.lineWidth = 1.0f; + m_depth_state = {}; + m_depth_state.sType = VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO; + m_blend_state = {}; + m_blend_state.sType = VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO; + m_blend_attachments = {}; + + m_viewport_state = {}; + m_viewport_state.sType = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO; + m_viewport = {}; + m_scissor = {}; + + m_dynamic_state = {}; + m_dynamic_state.sType = VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO; + m_dynamic_state_values = {}; + + m_multisample_state = {}; + m_multisample_state.sType = VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO; + + // set defaults + SetNoCullRasterizationState(); + SetNoDepthTestState(); + SetNoBlendingState(); + SetPrimitiveTopology(VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST); + + // have to be specified even if dynamic + SetViewport(0.0f, 0.0f, 1.0f, 1.0f, 0.0f, 1.0f); + SetScissorRect(0, 0, 1, 1); + SetMultisamples(VK_SAMPLE_COUNT_1_BIT); +} + +VkPipeline GraphicsPipelineBuilder::Create(VkDevice device, VkPipelineCache pipeline_cache, bool clear /* = true */) +{ + VkPipeline pipeline; + VkResult res = vkCreateGraphicsPipelines(device, pipeline_cache, 1, &m_ci, nullptr, &pipeline); + if (res != VK_SUCCESS) + { + LOG_VULKAN_ERROR(res, "vkCreateGraphicsPipelines() failed: "); + return VK_NULL_HANDLE; + } + + if (clear) + Clear(); + + return pipeline; +} + +void GraphicsPipelineBuilder::SetShaderStage(VkShaderStageFlagBits stage, VkShaderModule module, + const char* entry_point) +{ + Assert(m_ci.stageCount < MAX_SHADER_STAGES); + + u32 index = 0; + for (; index < m_ci.stageCount; index++) + { + if (m_shader_stages[index].stage == stage) + break; + } + if (index == m_ci.stageCount) + { + m_ci.stageCount++; + m_ci.pStages = m_shader_stages.data(); + } + + VkPipelineShaderStageCreateInfo& s = m_shader_stages[index]; + s.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO; + s.stage = stage; + s.module = module; + s.pName = entry_point; +} + +void GraphicsPipelineBuilder::AddVertexBuffer(u32 binding, u32 stride, + VkVertexInputRate input_rate /*= VK_VERTEX_INPUT_RATE_VERTEX*/) +{ + Assert(m_vertex_input_state.vertexAttributeDescriptionCount < MAX_VERTEX_BUFFERS); + + VkVertexInputBindingDescription& b = m_vertex_buffers[m_vertex_input_state.vertexBindingDescriptionCount]; + b.binding = binding; + b.stride = stride; + b.inputRate = input_rate; + + m_vertex_input_state.vertexBindingDescriptionCount++; + m_vertex_input_state.pVertexBindingDescriptions = m_vertex_buffers.data(); + m_ci.pVertexInputState = &m_vertex_input_state; +} + +void GraphicsPipelineBuilder::AddVertexAttribute(u32 location, u32 binding, VkFormat format, u32 offset) +{ + Assert(m_vertex_input_state.vertexAttributeDescriptionCount < MAX_VERTEX_BUFFERS); + + VkVertexInputAttributeDescription& a = m_vertex_attributes[m_vertex_input_state.vertexAttributeDescriptionCount]; + a.location = location; + a.binding = binding; + a.format = format; + a.offset = offset; + + m_vertex_input_state.vertexAttributeDescriptionCount++; + m_vertex_input_state.pVertexAttributeDescriptions = m_vertex_attributes.data(); + m_ci.pVertexInputState = &m_vertex_input_state; +} + +void GraphicsPipelineBuilder::SetPrimitiveTopology(VkPrimitiveTopology topology, + bool enable_primitive_restart /*= false*/) +{ + m_input_assembly.topology = topology; + m_input_assembly.primitiveRestartEnable = enable_primitive_restart; + + m_ci.pInputAssemblyState = &m_input_assembly; +} + +void GraphicsPipelineBuilder::SetRasterizationState(VkPolygonMode polygon_mode, VkCullModeFlags cull_mode, + VkFrontFace front_face) +{ + m_rasterization_state.polygonMode = polygon_mode; + m_rasterization_state.cullMode = cull_mode; + m_rasterization_state.frontFace = front_face; + + m_ci.pRasterizationState = &m_rasterization_state; +} + +void GraphicsPipelineBuilder::SetLineWidth(float width) +{ + m_rasterization_state.lineWidth = width; +} + +void GraphicsPipelineBuilder::SetNoCullRasterizationState() +{ + SetRasterizationState(VK_POLYGON_MODE_FILL, VK_CULL_MODE_NONE, VK_FRONT_FACE_CLOCKWISE); +} + +void GraphicsPipelineBuilder::SetDepthState(bool depth_test, bool depth_write, VkCompareOp compare_op) +{ + m_depth_state.depthTestEnable = depth_test; + m_depth_state.depthWriteEnable = depth_write; + m_depth_state.depthCompareOp = compare_op; + + m_ci.pDepthStencilState = &m_depth_state; +} + +void GraphicsPipelineBuilder::SetNoDepthTestState() +{ + SetDepthState(false, false, VK_COMPARE_OP_ALWAYS); +} + +void GraphicsPipelineBuilder::SetBlendConstants(float r, float g, float b, float a) +{ + m_blend_state.blendConstants[0] = r; + m_blend_state.blendConstants[1] = g; + m_blend_state.blendConstants[2] = b; + m_blend_state.blendConstants[3] = a; + m_ci.pColorBlendState = &m_blend_state; +} + +void GraphicsPipelineBuilder::AddBlendAttachment( + bool blend_enable, VkBlendFactor src_factor, VkBlendFactor dst_factor, VkBlendOp op, + VkBlendFactor alpha_src_factor, VkBlendFactor alpha_dst_factor, VkBlendOp alpha_op, VkColorComponentFlags write_mask /* = VK_COLOR_COMPONENT_R_BIT | VK_COLOR_COMPONENT_G_BIT | VK_COLOR_COMPONENT_B_BIT | VK_COLOR_COMPONENT_A_BIT */) +{ + Assert(m_blend_state.attachmentCount < MAX_ATTACHMENTS); + + VkPipelineColorBlendAttachmentState& bs = m_blend_attachments[m_blend_state.attachmentCount]; + bs.blendEnable = blend_enable; + bs.srcColorBlendFactor = src_factor; + bs.dstColorBlendFactor = dst_factor; + bs.colorBlendOp = op; + bs.srcAlphaBlendFactor = alpha_src_factor; + bs.dstAlphaBlendFactor = alpha_dst_factor; + bs.alphaBlendOp = alpha_op; + bs.colorWriteMask = write_mask; + + m_blend_state.attachmentCount++; + m_blend_state.pAttachments = m_blend_attachments.data(); + m_ci.pColorBlendState = &m_blend_state; +} + +void GraphicsPipelineBuilder::SetBlendAttachment( + u32 attachment, bool blend_enable, VkBlendFactor src_factor, VkBlendFactor dst_factor, VkBlendOp op, + VkBlendFactor alpha_src_factor, VkBlendFactor alpha_dst_factor, VkBlendOp alpha_op, VkColorComponentFlags write_mask /*= VK_COLOR_COMPONENT_R_BIT | VK_COLOR_COMPONENT_G_BIT | VK_COLOR_COMPONENT_B_BIT | VK_COLOR_COMPONENT_A_BIT*/) +{ + Assert(attachment < MAX_ATTACHMENTS); + + VkPipelineColorBlendAttachmentState& bs = m_blend_attachments[attachment]; + bs.blendEnable = blend_enable; + bs.srcColorBlendFactor = src_factor; + bs.dstColorBlendFactor = dst_factor; + bs.colorBlendOp = op; + bs.srcAlphaBlendFactor = alpha_src_factor; + bs.dstAlphaBlendFactor = alpha_dst_factor; + bs.alphaBlendOp = alpha_op; + bs.colorWriteMask = write_mask; + + if (attachment >= m_blend_state.attachmentCount) + { + m_blend_state.attachmentCount = attachment + 1u; + m_blend_state.pAttachments = m_blend_attachments.data(); + m_ci.pColorBlendState = &m_blend_state; + } +} + +void GraphicsPipelineBuilder::ClearBlendAttachments() +{ + m_blend_attachments = {}; + m_blend_state.attachmentCount = 0; +} + +void GraphicsPipelineBuilder::SetNoBlendingState() +{ + ClearBlendAttachments(); + SetBlendAttachment(0, false, VK_BLEND_FACTOR_ONE, VK_BLEND_FACTOR_ZERO, VK_BLEND_OP_ADD, VK_BLEND_FACTOR_ONE, + VK_BLEND_FACTOR_ZERO, VK_BLEND_OP_ADD, + VK_COLOR_COMPONENT_R_BIT | VK_COLOR_COMPONENT_G_BIT | VK_COLOR_COMPONENT_B_BIT | + VK_COLOR_COMPONENT_A_BIT); +} + +void GraphicsPipelineBuilder::AddDynamicState(VkDynamicState state) +{ + Assert(m_dynamic_state.dynamicStateCount < MAX_DYNAMIC_STATE); + + m_dynamic_state_values[m_dynamic_state.dynamicStateCount] = state; + m_dynamic_state.dynamicStateCount++; + m_dynamic_state.pDynamicStates = m_dynamic_state_values.data(); + m_ci.pDynamicState = &m_dynamic_state; +} + +void GraphicsPipelineBuilder::SetDynamicViewportAndScissorState() +{ + AddDynamicState(VK_DYNAMIC_STATE_VIEWPORT); + AddDynamicState(VK_DYNAMIC_STATE_SCISSOR); +} + +void GraphicsPipelineBuilder::SetViewport(float x, float y, float width, float height, float min_depth, float max_depth) +{ + m_viewport.x = x; + m_viewport.y = y; + m_viewport.width = width; + m_viewport.height = height; + m_viewport.minDepth = min_depth; + m_viewport.maxDepth = max_depth; + + m_viewport_state.pViewports = &m_viewport; + m_viewport_state.viewportCount = 1u; + m_ci.pViewportState = &m_viewport_state; +} + +void GraphicsPipelineBuilder::SetScissorRect(s32 x, s32 y, u32 width, u32 height) +{ + m_scissor.offset.x = x; + m_scissor.offset.y = y; + m_scissor.extent.width = width; + m_scissor.extent.height = height; + + m_viewport_state.pScissors = &m_scissor; + m_viewport_state.scissorCount = 1u; + m_ci.pViewportState = &m_viewport_state; +} + +void GraphicsPipelineBuilder::SetMultisamples(VkSampleCountFlagBits samples) +{ + m_multisample_state.rasterizationSamples = samples; + m_ci.pMultisampleState = &m_multisample_state; +} + +void GraphicsPipelineBuilder::SetPipelineLayout(VkPipelineLayout layout) +{ + m_ci.layout = layout; +} + +void GraphicsPipelineBuilder::SetRenderPass(VkRenderPass render_pass, u32 subpass) +{ + m_ci.renderPass = render_pass; + m_ci.subpass = subpass; +} + +SamplerBuilder::SamplerBuilder() +{ + Clear(); +} + +void SamplerBuilder::Clear() +{ + m_ci = {}; + m_ci.sType = VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO; +} + +VkSampler SamplerBuilder::Create(VkDevice device, bool clear /* = true */) +{ + VkSampler sampler; + VkResult res = vkCreateSampler(device, &m_ci, nullptr, &sampler); + if (res != VK_SUCCESS) + { + LOG_VULKAN_ERROR(res, "vkCreateSampler() failed: "); + return VK_NULL_HANDLE; + } + + return sampler; +} + +void SamplerBuilder::SetFilter(VkFilter mag_filter, VkFilter min_filter, VkSamplerMipmapMode mip_filter) +{ + m_ci.magFilter = mag_filter; + m_ci.minFilter = min_filter; + m_ci.mipmapMode = mip_filter; +} + +void SamplerBuilder::SetAddressMode(VkSamplerAddressMode u, VkSamplerAddressMode v, VkSamplerAddressMode w) +{ + m_ci.addressModeU = u; + m_ci.addressModeV = v; + m_ci.addressModeW = w; +} + +void SamplerBuilder::SetPointSampler(VkSamplerAddressMode address_mode /* = VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER */) +{ + Clear(); + SetFilter(VK_FILTER_NEAREST, VK_FILTER_NEAREST, VK_SAMPLER_MIPMAP_MODE_NEAREST); + SetAddressMode(address_mode, address_mode, address_mode); +} + +void SamplerBuilder::SetLinearSampler(bool mipmaps, + VkSamplerAddressMode address_mode /* = VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER */) +{ + Clear(); + SetFilter(VK_FILTER_LINEAR, VK_FILTER_LINEAR, + mipmaps ? VK_SAMPLER_MIPMAP_MODE_LINEAR : VK_SAMPLER_MIPMAP_MODE_NEAREST); + SetAddressMode(address_mode, address_mode, address_mode); +} + +DescriptorSetUpdateBuilder::DescriptorSetUpdateBuilder() +{ + Clear(); +} + +void DescriptorSetUpdateBuilder::Clear() +{ + m_writes = {}; + m_num_writes = 0; +} + +void DescriptorSetUpdateBuilder::Update(VkDevice device, bool clear /*= true*/) +{ + Assert(m_num_writes > 0); + + vkUpdateDescriptorSets(device, m_num_writes, (m_num_writes > 0) ? m_writes.data() : nullptr, 0, nullptr); + + if (clear) + Clear(); +} + +void DescriptorSetUpdateBuilder::AddImageDescriptorWrite( + VkDescriptorSet set, u32 binding, VkImageView view, + VkImageLayout layout /*= VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL*/) +{ + Assert(m_num_writes < MAX_WRITES && m_num_infos < MAX_INFOS); + + VkDescriptorImageInfo& ii = m_infos[m_num_infos++].image; + ii.imageView = view; + ii.imageLayout = layout; + ii.sampler = VK_NULL_HANDLE; + + VkWriteDescriptorSet& dw = m_writes[m_num_writes++]; + dw.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; + dw.dstSet = set; + dw.dstBinding = binding; + dw.descriptorCount = 1; + dw.descriptorType = VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE; + dw.pImageInfo = ⅈ +} + +void DescriptorSetUpdateBuilder::AddSamplerDescriptorWrite(VkDescriptorSet set, u32 binding, VkSampler sampler) +{ + Assert(m_num_writes < MAX_WRITES && m_num_infos < MAX_INFOS); + + VkDescriptorImageInfo& ii = m_infos[m_num_infos++].image; + ii.imageView = VK_NULL_HANDLE; + ii.imageLayout = VK_IMAGE_LAYOUT_UNDEFINED; + ii.sampler = sampler; + + VkWriteDescriptorSet& dw = m_writes[m_num_writes++]; + dw.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; + dw.dstSet = set; + dw.dstBinding = binding; + dw.descriptorCount = 1; + dw.descriptorType = VK_DESCRIPTOR_TYPE_SAMPLER; + dw.pImageInfo = ⅈ +} + +void DescriptorSetUpdateBuilder::AddCombinedImageSamplerDescriptorWrite( + VkDescriptorSet set, u32 binding, VkImageView view, VkSampler sampler, + VkImageLayout layout /*= VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL*/) +{ + Assert(m_num_writes < MAX_WRITES && m_num_infos < MAX_INFOS); + + VkDescriptorImageInfo& ii = m_infos[m_num_infos++].image; + ii.imageView = view; + ii.imageLayout = layout; + ii.sampler = sampler; + + VkWriteDescriptorSet& dw = m_writes[m_num_writes++]; + dw.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; + dw.dstSet = set; + dw.dstBinding = binding; + dw.descriptorCount = 1; + dw.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER; + dw.pImageInfo = ⅈ +} + +void DescriptorSetUpdateBuilder::AddBufferDescriptorWrite(VkDescriptorSet set, u32 binding, VkDescriptorType dtype, + VkBuffer buffer, u32 offset, u32 size) +{ + Assert(m_num_writes < MAX_WRITES && m_num_infos < MAX_INFOS); + + VkDescriptorBufferInfo& bi = m_infos[m_num_infos++].buffer; + bi.buffer = buffer; + bi.offset = offset; + bi.range = size; + + VkWriteDescriptorSet& dw = m_writes[m_num_writes++]; + dw.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; + dw.dstSet = set; + dw.dstBinding = binding; + dw.descriptorCount = 1; + dw.descriptorType = dtype; + dw.pBufferInfo = &bi; +} + +void DescriptorSetUpdateBuilder::AddBufferViewDescriptorWrite(VkDescriptorSet set, u32 binding, VkDescriptorType dtype, + VkBufferView view) +{ + Assert(m_num_writes < MAX_WRITES && m_num_infos < MAX_INFOS); + + VkBufferView& bi = m_infos[m_num_infos++].buffer_view; + bi = view; + + VkWriteDescriptorSet& dw = m_writes[m_num_writes++]; + dw.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; + dw.dstSet = set; + dw.dstBinding = binding; + dw.descriptorCount = 1; + dw.descriptorType = dtype; + dw.pTexelBufferView = &bi; +} + +FramebufferBuilder::FramebufferBuilder() +{ + Clear(); +} + +void FramebufferBuilder::Clear() +{ + m_ci = {}; + m_ci.sType = VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO; + m_images = {}; +} + +VkFramebuffer FramebufferBuilder::Create(VkDevice device, bool clear /*= true*/) +{ + VkFramebuffer fb; + VkResult res = vkCreateFramebuffer(device, &m_ci, nullptr, &fb); + if (res != VK_SUCCESS) + { + LOG_VULKAN_ERROR(res, "vkCreateFramebuffer() failed: "); + return VK_NULL_HANDLE; + } + + if (clear) + Clear(); + + return fb; +} + +void FramebufferBuilder::AddAttachment(VkImageView image) +{ + Assert(m_ci.attachmentCount < MAX_ATTACHMENTS); + + m_images[m_ci.attachmentCount] = image; + + m_ci.attachmentCount++; + m_ci.pAttachments = m_images.data(); +} + +void FramebufferBuilder::SetSize(u32 width, u32 height, u32 layers) +{ + m_ci.width = width; + m_ci.height = height; + m_ci.layers = layers; +} + +void FramebufferBuilder::SetRenderPass(VkRenderPass render_pass) +{ + m_ci.renderPass = render_pass; +} + +RenderPassBuilder::RenderPassBuilder() +{ + Clear(); +} + +void RenderPassBuilder::Clear() +{ + m_ci = {}; + m_ci.sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO; + m_attachments = {}; + m_attachment_references = {}; + m_num_attachment_references = 0; + m_subpasses = {}; +} + +VkRenderPass RenderPassBuilder::Create(VkDevice device, bool clear /*= true*/) +{ + VkRenderPass rp; + VkResult res = vkCreateRenderPass(device, &m_ci, nullptr, &rp); + if (res != VK_SUCCESS) + { + LOG_VULKAN_ERROR(res, "vkCreateRenderPass() failed: "); + return VK_NULL_HANDLE; + } + + return rp; +} + +u32 RenderPassBuilder::AddAttachment(VkFormat format, VkSampleCountFlagBits samples, VkAttachmentLoadOp load_op, + VkAttachmentStoreOp store_op, VkImageLayout initial_layout, + VkImageLayout final_layout) +{ + Assert(m_ci.attachmentCount < MAX_ATTACHMENTS); + + const u32 index = m_ci.attachmentCount; + VkAttachmentDescription& ad = m_attachments[index]; + ad.format = format; + ad.samples = samples; + ad.loadOp = load_op; + ad.storeOp = store_op; + ad.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE; + ad.stencilStoreOp = VK_ATTACHMENT_STORE_OP_DONT_CARE; + ad.initialLayout = initial_layout; + ad.finalLayout = final_layout; + + m_ci.attachmentCount++; + m_ci.pAttachments = m_attachments.data(); + + return index; +} + +u32 RenderPassBuilder::AddSubpass() +{ + Assert(m_ci.subpassCount < MAX_SUBPASSES); + + const u32 index = m_ci.subpassCount; + VkSubpassDescription& sp = m_subpasses[index]; + sp.pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS; + + m_ci.subpassCount++; + m_ci.pSubpasses = m_subpasses.data(); + + return index; +} + +void RenderPassBuilder::AddSubpassColorAttachment(u32 subpass, u32 attachment, VkImageLayout layout) +{ + Assert(subpass < m_ci.subpassCount && m_num_attachment_references < MAX_ATTACHMENT_REFERENCES); + + VkAttachmentReference& ar = m_attachment_references[m_num_attachment_references++]; + ar.attachment = attachment; + ar.layout = layout; + + VkSubpassDescription& sp = m_subpasses[subpass]; + if (sp.colorAttachmentCount == 0) + sp.pColorAttachments = &ar; + sp.colorAttachmentCount++; +} + +void RenderPassBuilder::AddSubpassDepthAttachment(u32 subpass, u32 attachment, VkImageLayout layout) +{ + Assert(subpass < m_ci.subpassCount && m_num_attachment_references < MAX_ATTACHMENT_REFERENCES); + + VkAttachmentReference& ar = m_attachment_references[m_num_attachment_references++]; + ar.attachment = attachment; + ar.layout = layout; + + VkSubpassDescription& sp = m_subpasses[subpass]; + sp.pDepthStencilAttachment = &ar; +} + +BufferViewBuilder::BufferViewBuilder() +{ + Clear(); +} + +void BufferViewBuilder::Clear() +{ + m_ci = {}; + m_ci.sType = VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO; +} + +VkBufferView BufferViewBuilder::Create(VkDevice device, bool clear /*= true*/) +{ + VkBufferView bv; + VkResult res = vkCreateBufferView(device, &m_ci, nullptr, &bv); + if (res != VK_SUCCESS) + { + LOG_VULKAN_ERROR(res, "vkCreateBufferView() failed: "); + return VK_NULL_HANDLE; + } + + return bv; +} + +void BufferViewBuilder::Set(VkBuffer buffer, VkFormat format, u32 offset, u32 size) +{ + m_ci.buffer = buffer; + m_ci.format = format; + m_ci.offset = offset; + m_ci.range = size; +} + +} // namespace Vulkan \ No newline at end of file diff --git a/src/common/vulkan/builders.h b/src/common/vulkan/builders.h new file mode 100644 index 000000000..cd5032247 --- /dev/null +++ b/src/common/vulkan/builders.h @@ -0,0 +1,269 @@ +#pragma once +#include "../types.h" +#include "vulkan_loader.h" +#include + +namespace Vulkan { + +class DescriptorSetLayoutBuilder +{ +public: + enum : u32 + { + MAX_BINDINGS = 16, + }; + + DescriptorSetLayoutBuilder(); + + void Clear(); + + VkDescriptorSetLayout Create(VkDevice device); + + void AddBinding(u32 binding, VkDescriptorType dtype, u32 dcount, VkShaderStageFlags stages); + +private: + VkDescriptorSetLayoutCreateInfo m_ci{}; + std::array m_bindings{}; +}; + +class PipelineLayoutBuilder +{ +public: + enum : u32 + { + MAX_SETS = 8, + MAX_PUSH_CONSTANTS = 1 + }; + + PipelineLayoutBuilder(); + + void Clear(); + + VkPipelineLayout Create(VkDevice device); + + void AddDescriptorSet(VkDescriptorSetLayout layout); + + void AddPushConstants(VkShaderStageFlags stages, u32 offset, u32 size); + +private: + VkPipelineLayoutCreateInfo m_ci{}; + std::array m_sets{}; + std::array m_push_constants{}; +}; + +class GraphicsPipelineBuilder +{ +public: + enum : u32 + { + MAX_SHADER_STAGES = 3, + MAX_VERTEX_ATTRIBUTES = 16, + MAX_VERTEX_BUFFERS = 8, + MAX_ATTACHMENTS = 2, + MAX_DYNAMIC_STATE = 8 + }; + + GraphicsPipelineBuilder(); + + void Clear(); + + VkPipeline Create(VkDevice device, VkPipelineCache pipeline_cache = VK_NULL_HANDLE, bool clear = true); + + void SetShaderStage(VkShaderStageFlagBits stage, VkShaderModule module, const char* entry_point); + void SetVertexShader(VkShaderModule module) { SetShaderStage(VK_SHADER_STAGE_VERTEX_BIT, module, "main"); } + void SetGeometryShader(VkShaderModule module) { SetShaderStage(VK_SHADER_STAGE_GEOMETRY_BIT, module, "main"); } + void SetFragmentShader(VkShaderModule module) { SetShaderStage(VK_SHADER_STAGE_FRAGMENT_BIT, module, "main"); } + + void AddVertexBuffer(u32 binding, u32 stride, VkVertexInputRate input_rate = VK_VERTEX_INPUT_RATE_VERTEX); + void AddVertexAttribute(u32 location, u32 binding, VkFormat format, u32 offset); + + void SetPrimitiveTopology(VkPrimitiveTopology topology, bool enable_primitive_restart = false); + + void SetRasterizationState(VkPolygonMode polygon_mode, VkCullModeFlags cull_mode, VkFrontFace front_face); + void SetLineWidth(float width); + void SetNoCullRasterizationState(); + + void SetDepthState(bool depth_test, bool depth_write, VkCompareOp compare_op); + void SetNoDepthTestState(); + + void AddBlendAttachment(bool blend_enable, VkBlendFactor src_factor, VkBlendFactor dst_factor, VkBlendOp op, + VkBlendFactor alpha_src_factor, VkBlendFactor alpha_dst_factor, VkBlendOp alpha_op, + VkColorComponentFlags write_mask = VK_COLOR_COMPONENT_R_BIT | VK_COLOR_COMPONENT_G_BIT | + VK_COLOR_COMPONENT_B_BIT | VK_COLOR_COMPONENT_A_BIT); + void SetBlendAttachment(u32 attachment, bool blend_enable, VkBlendFactor src_factor, VkBlendFactor dst_factor, + VkBlendOp op, VkBlendFactor alpha_src_factor, VkBlendFactor alpha_dst_factor, + VkBlendOp alpha_op, + VkColorComponentFlags write_mask = VK_COLOR_COMPONENT_R_BIT | VK_COLOR_COMPONENT_G_BIT | + VK_COLOR_COMPONENT_B_BIT | VK_COLOR_COMPONENT_A_BIT); + void ClearBlendAttachments(); + + void SetBlendConstants(float r, float g, float b, float a); + void SetNoBlendingState(); + + void AddDynamicState(VkDynamicState state); + + void SetDynamicViewportAndScissorState(); + void SetViewport(float x, float y, float width, float height, float min_depth, float max_depth); + void SetScissorRect(s32 x, s32 y, u32 width, u32 height); + + void SetMultisamples(VkSampleCountFlagBits samples); + + void SetPipelineLayout(VkPipelineLayout layout); + void SetRenderPass(VkRenderPass render_pass, u32 subpass); + +private: + VkGraphicsPipelineCreateInfo m_ci; + std::array m_shader_stages; + + VkPipelineVertexInputStateCreateInfo m_vertex_input_state; + std::array m_vertex_buffers; + std::array m_vertex_attributes; + + VkPipelineInputAssemblyStateCreateInfo m_input_assembly; + + VkPipelineRasterizationStateCreateInfo m_rasterization_state; + VkPipelineDepthStencilStateCreateInfo m_depth_state; + + VkPipelineColorBlendStateCreateInfo m_blend_state; + std::array m_blend_attachments; + + VkPipelineViewportStateCreateInfo m_viewport_state; + VkViewport m_viewport; + VkRect2D m_scissor; + + VkPipelineDynamicStateCreateInfo m_dynamic_state; + std::array m_dynamic_state_values; + + VkPipelineMultisampleStateCreateInfo m_multisample_state; +}; + +class SamplerBuilder +{ +public: + SamplerBuilder(); + + void Clear(); + + VkSampler Create(VkDevice device, bool clear = true); + + void SetFilter(VkFilter mag_filter, VkFilter min_filter, VkSamplerMipmapMode mip_filter); + void SetAddressMode(VkSamplerAddressMode u, VkSamplerAddressMode v, VkSamplerAddressMode w); + + void SetPointSampler(VkSamplerAddressMode address_mode = VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER); + void SetLinearSampler(bool mipmaps, VkSamplerAddressMode address_mode = VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER); + +private: + VkSamplerCreateInfo m_ci; +}; + +class DescriptorSetUpdateBuilder +{ + enum : u32 + { + MAX_WRITES = 16, + MAX_INFOS = 16, + }; + +public: + DescriptorSetUpdateBuilder(); + + void Clear(); + + void Update(VkDevice device, bool clear = true); + + void AddImageDescriptorWrite(VkDescriptorSet set, u32 binding, VkImageView view, + VkImageLayout layout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL); + void AddSamplerDescriptorWrite(VkDescriptorSet set, u32 binding, VkSampler sampler); + void AddCombinedImageSamplerDescriptorWrite(VkDescriptorSet set, u32 binding, VkImageView view, VkSampler sampler, + VkImageLayout layout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL); + void AddBufferDescriptorWrite(VkDescriptorSet set, u32 binding, VkDescriptorType dtype, VkBuffer buffer, u32 offset, + u32 size); + void AddBufferViewDescriptorWrite(VkDescriptorSet set, u32 binding, VkDescriptorType dtype, VkBufferView view); + +private: + union InfoUnion + { + VkDescriptorBufferInfo buffer; + VkDescriptorImageInfo image; + VkBufferView buffer_view; + }; + + std::array m_writes; + u32 m_num_writes = 0; + + std::array m_infos; + u32 m_num_infos = 0; +}; + +class FramebufferBuilder +{ + enum : u32 + { + MAX_ATTACHMENTS = 2, + }; + +public: + FramebufferBuilder(); + + void Clear(); + + VkFramebuffer Create(VkDevice device, bool clear = true); + + void AddAttachment(VkImageView image); + + void SetSize(u32 width, u32 height, u32 layers); + + void SetRenderPass(VkRenderPass render_pass); + +private: + VkFramebufferCreateInfo m_ci; + std::array m_images; +}; + +class RenderPassBuilder +{ + enum : u32 + { + MAX_ATTACHMENTS = 2, + MAX_ATTACHMENT_REFERENCES = 2, + MAX_SUBPASSES = 1, + }; + +public: + RenderPassBuilder(); + + void Clear(); + + VkRenderPass Create(VkDevice device, bool clear = true); + + u32 AddAttachment(VkFormat format, VkSampleCountFlagBits samples, VkAttachmentLoadOp load_op, + VkAttachmentStoreOp store_op, VkImageLayout initial_layout, VkImageLayout final_layout); + + u32 AddSubpass(); + void AddSubpassColorAttachment(u32 subpass, u32 attachment, VkImageLayout layout); + void AddSubpassDepthAttachment(u32 subpass, u32 attachment, VkImageLayout layout); + +private: + VkRenderPassCreateInfo m_ci; + std::array m_attachments; + std::array m_attachment_references; + u32 m_num_attachment_references = 0; + std::array m_subpasses; +}; + +class BufferViewBuilder +{ +public: + BufferViewBuilder(); + + void Clear(); + + VkBufferView Create(VkDevice device, bool clear = true); + + void Set(VkBuffer buffer, VkFormat format, u32 offset, u32 size); + +private: + VkBufferViewCreateInfo m_ci; +}; + +} // namespace Vulkan \ No newline at end of file diff --git a/src/common/vulkan/context.cpp b/src/common/vulkan/context.cpp new file mode 100644 index 000000000..13a1111fb --- /dev/null +++ b/src/common/vulkan/context.cpp @@ -0,0 +1,1149 @@ +// Copyright 2016 Dolphin Emulator Project +// Copyright 2020 DuckStation Emulator Project +// Licensed under GPLv2+ +// Refer to the LICENSE file included. + +#include "context.h" +#include "../assert.h" +#include "../log.h" +#include "../window_info.h" +#include "swap_chain.h" +#include "util.h" +#include +#include +#include +Log_SetChannel(Vulkan::Context); + +std::unique_ptr g_vulkan_context; + +namespace Vulkan { + +Context::Context(VkInstance instance, VkPhysicalDevice physical_device) + : m_instance(instance), m_physical_device(physical_device) +{ + // Read device physical memory properties, we need it for allocating buffers + vkGetPhysicalDeviceProperties(physical_device, &m_device_properties); + vkGetPhysicalDeviceMemoryProperties(physical_device, &m_device_memory_properties); + + // Would any drivers be this silly? I hope not... + m_device_properties.limits.minUniformBufferOffsetAlignment = + std::max(m_device_properties.limits.minUniformBufferOffsetAlignment, static_cast(1)); + m_device_properties.limits.minTexelBufferOffsetAlignment = + std::max(m_device_properties.limits.minTexelBufferOffsetAlignment, static_cast(1)); + m_device_properties.limits.optimalBufferCopyOffsetAlignment = + std::max(m_device_properties.limits.optimalBufferCopyOffsetAlignment, static_cast(1)); + m_device_properties.limits.optimalBufferCopyRowPitchAlignment = + std::max(m_device_properties.limits.optimalBufferCopyRowPitchAlignment, static_cast(1)); +} + +Context::~Context() +{ + WaitForGPUIdle(); + DestroyRenderPassCache(); + DestroyGlobalDescriptorPool(); + DestroyCommandBuffers(); + + if (m_device != VK_NULL_HANDLE) + vkDestroyDevice(m_device, nullptr); + + if (m_debug_report_callback != VK_NULL_HANDLE) + DisableDebugReports(); + + vkDestroyInstance(m_instance, nullptr); +} + +bool Context::CheckValidationLayerAvailablility() +{ + u32 extension_count = 0; + VkResult res = vkEnumerateInstanceExtensionProperties(nullptr, &extension_count, nullptr); + if (res != VK_SUCCESS) + { + LOG_VULKAN_ERROR(res, "vkEnumerateInstanceExtensionProperties failed: "); + return false; + } + + std::vector extension_list(extension_count); + res = vkEnumerateInstanceExtensionProperties(nullptr, &extension_count, extension_list.data()); + Assert(res == VK_SUCCESS); + + u32 layer_count = 0; + res = vkEnumerateInstanceLayerProperties(&layer_count, nullptr); + if (res != VK_SUCCESS) + { + LOG_VULKAN_ERROR(res, "vkEnumerateInstanceExtensionProperties failed: "); + return false; + } + + std::vector layer_list(layer_count); + res = vkEnumerateInstanceLayerProperties(&layer_count, layer_list.data()); + Assert(res == VK_SUCCESS); + + // Check for both VK_EXT_debug_report and VK_LAYER_LUNARG_standard_validation + return (std::find_if(extension_list.begin(), extension_list.end(), + [](const auto& it) { + return strcmp(it.extensionName, VK_EXT_DEBUG_REPORT_EXTENSION_NAME) == 0; + }) != extension_list.end() && + std::find_if(layer_list.begin(), layer_list.end(), [](const auto& it) { + return strcmp(it.layerName, "VK_LAYER_LUNARG_standard_validation") == 0; + }) != layer_list.end()); +} + +VkInstance Context::CreateVulkanInstance(bool enable_surface, bool enable_debug_report, bool enable_validation_layer) +{ + ExtensionList enabled_extensions; + if (!SelectInstanceExtensions(&enabled_extensions, enable_surface, enable_debug_report)) + return VK_NULL_HANDLE; + + VkApplicationInfo app_info = {}; + app_info.sType = VK_STRUCTURE_TYPE_APPLICATION_INFO; + app_info.pNext = nullptr; + app_info.pApplicationName = "DuckStation"; + app_info.applicationVersion = VK_MAKE_VERSION(0, 1, 0); + app_info.pEngineName = "DuckStation"; + app_info.engineVersion = VK_MAKE_VERSION(0, 1, 0); + app_info.apiVersion = VK_MAKE_VERSION(1, 0, 0); + + VkInstanceCreateInfo instance_create_info = {}; + instance_create_info.sType = VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO; + instance_create_info.pNext = nullptr; + instance_create_info.flags = 0; + instance_create_info.pApplicationInfo = &app_info; + instance_create_info.enabledExtensionCount = static_cast(enabled_extensions.size()); + instance_create_info.ppEnabledExtensionNames = enabled_extensions.data(); + instance_create_info.enabledLayerCount = 0; + instance_create_info.ppEnabledLayerNames = nullptr; + + // Enable debug layer on debug builds + if (enable_validation_layer) + { + static const char* layer_names[] = {"VK_LAYER_LUNARG_standard_validation"}; + instance_create_info.enabledLayerCount = 1; + instance_create_info.ppEnabledLayerNames = layer_names; + } + + VkInstance instance; + VkResult res = vkCreateInstance(&instance_create_info, nullptr, &instance); + if (res != VK_SUCCESS) + { + LOG_VULKAN_ERROR(res, "vkCreateInstance failed: "); + return nullptr; + } + + return instance; +} + +bool Context::SelectInstanceExtensions(ExtensionList* extension_list, bool enable_surface, bool enable_debug_report) +{ + u32 extension_count = 0; + VkResult res = vkEnumerateInstanceExtensionProperties(nullptr, &extension_count, nullptr); + if (res != VK_SUCCESS) + { + LOG_VULKAN_ERROR(res, "vkEnumerateInstanceExtensionProperties failed: "); + return false; + } + + if (extension_count == 0) + { + Log_ErrorPrintf("Vulkan: No extensions supported by instance."); + return false; + } + + std::vector available_extension_list(extension_count); + res = vkEnumerateInstanceExtensionProperties(nullptr, &extension_count, available_extension_list.data()); + Assert(res == VK_SUCCESS); + + for (const auto& extension_properties : available_extension_list) + Log_InfoPrintf("Available extension: %s", extension_properties.extensionName); + + auto SupportsExtension = [&](const char* name, bool required) { + if (std::find_if(available_extension_list.begin(), available_extension_list.end(), + [&](const VkExtensionProperties& properties) { + return !strcmp(name, properties.extensionName); + }) != available_extension_list.end()) + { + Log_InfoPrintf("Enabling extension: %s", name); + extension_list->push_back(name); + return true; + } + + if (required) + Log_ErrorPrintf("Vulkan: Missing required extension %s.", name); + + return false; + }; + + // Common extensions + if (enable_surface && !SupportsExtension(VK_KHR_SURFACE_EXTENSION_NAME, true)) + return false; + +#if defined(VK_USE_PLATFORM_WIN32_KHR) + if (enable_surface && !SupportsExtension(VK_KHR_WIN32_SURFACE_EXTENSION_NAME, true)) + return false; +#elif defined(VK_USE_PLATFORM_XLIB_KHR) + if (enable_surface && !SupportsExtension(VK_KHR_XLIB_SURFACE_EXTENSION_NAME, true)) + return false; +#elif defined(VK_USE_PLATFORM_XCB_KHR) + if (enable_surface && !SupportsExtension(VK_KHR_XCB_SURFACE_EXTENSION_NAME, true)) + return false; +#elif defined(VK_USE_PLATFORM_ANDROID_KHR) + if (enable_surface && !SupportsExtension(VK_KHR_ANDROID_SURFACE_EXTENSION_NAME, true)) + return false; +#endif + + // VK_EXT_debug_report + if (enable_debug_report && !SupportsExtension(VK_EXT_DEBUG_REPORT_EXTENSION_NAME, false)) + Log_WarningPrintf("Vulkan: Debug report requested, but extension is not available."); + + return true; +} + +Context::GPUList Context::EnumerateGPUs(VkInstance instance) +{ + u32 gpu_count = 0; + VkResult res = vkEnumeratePhysicalDevices(instance, &gpu_count, nullptr); + if (res != VK_SUCCESS || gpu_count == 0) + { + LOG_VULKAN_ERROR(res, "vkEnumeratePhysicalDevices failed: "); + return {}; + } + + GPUList gpus; + gpus.resize(gpu_count); + + res = vkEnumeratePhysicalDevices(instance, &gpu_count, gpus.data()); + if (res != VK_SUCCESS) + { + LOG_VULKAN_ERROR(res, "vkEnumeratePhysicalDevices failed: "); + return {}; + } + + return gpus; +} + +Context::GPUNameList Context::EnumerateGPUNames(VkInstance instance) +{ + u32 gpu_count = 0; + VkResult res = vkEnumeratePhysicalDevices(instance, &gpu_count, nullptr); + if (res != VK_SUCCESS || gpu_count == 0) + { + LOG_VULKAN_ERROR(res, "vkEnumeratePhysicalDevices failed: "); + return {}; + } + + GPUList gpus; + gpus.resize(gpu_count); + + res = vkEnumeratePhysicalDevices(instance, &gpu_count, gpus.data()); + if (res != VK_SUCCESS) + { + LOG_VULKAN_ERROR(res, "vkEnumeratePhysicalDevices failed: "); + return {}; + } + + GPUNameList gpu_names; + gpu_names.reserve(gpu_count); + for (u32 i = 0; i < gpu_count; i++) + { + VkPhysicalDeviceProperties props = {}; + vkGetPhysicalDeviceProperties(gpus[i], &props); + gpu_names.emplace_back(props.deviceName); + } + + return gpu_names; +} + +bool Context::Create(u32 gpu_index, const WindowInfo* wi, std::unique_ptr* out_swap_chain, + bool enable_debug_reports, bool enable_validation_layer) +{ + AssertMsg(!g_vulkan_context, "Has no current context"); + + if (!Vulkan::LoadVulkanLibrary()) + { + Log_ErrorPrintf("Failed to load Vulkan library"); + return false; + } + + const bool enable_surface = (wi && wi->type != WindowInfo::Type::Surfaceless); + VkInstance instance = CreateVulkanInstance(enable_surface, enable_debug_reports, enable_validation_layer); + if (instance == VK_NULL_HANDLE) + { + Vulkan::UnloadVulkanLibrary(); + return false; + } + + if (!Vulkan::LoadVulkanInstanceFunctions(instance)) + { + Log_ErrorPrintf("Failed to load Vulkan instance functions"); + vkDestroyInstance(instance, nullptr); + Vulkan::UnloadVulkanLibrary(); + return false; + } + + GPUList gpus = EnumerateGPUs(instance); + if (gpus.empty()) + { + vkDestroyInstance(instance, nullptr); + Vulkan::UnloadVulkanLibrary(); + return false; + } + + GPUNameList gpu_names = EnumerateGPUNames(instance); + for (u32 i = 0; i < gpu_names.size(); i++) + Log_InfoPrintf("GPU %u: %s", static_cast(i), gpu_names[i].c_str()); + + if (gpu_index >= gpus.size()) + { + Log_WarningPrintf("GPU index (%u) out of range (%u), using first", gpu_index, static_cast(gpus.size())); + gpu_index = 0; + } + + VkSurfaceKHR surface = VK_NULL_HANDLE; + if (enable_surface && (surface = SwapChain::CreateVulkanSurface(instance, *wi)) == VK_NULL_HANDLE) + { + vkDestroyInstance(instance, nullptr); + Vulkan::UnloadVulkanLibrary(); + return false; + } + + g_vulkan_context.reset(new Context(instance, gpus[gpu_index])); + + // Enable debug reports if the "Host GPU" log category is enabled. + if (enable_debug_reports) + g_vulkan_context->EnableDebugReports(); + + // Attempt to create the device. + if (!g_vulkan_context->CreateDevice(surface, enable_validation_layer) || + !g_vulkan_context->CreateGlobalDescriptorPool() || !g_vulkan_context->CreateCommandBuffers() || + (enable_surface && (*out_swap_chain = SwapChain::Create(*wi, surface, true)) == nullptr)) + { + // Since we are destroying the instance, we're also responsible for destroying the surface. + if (surface != VK_NULL_HANDLE) + vkDestroySurfaceKHR(instance, surface, nullptr); + + Vulkan::UnloadVulkanLibrary(); + g_vulkan_context.reset(); + return false; + } + + return true; +} + +void Context::Destroy() +{ + AssertMsg(g_vulkan_context, "Has context"); + g_vulkan_context.reset(); +} + +bool Context::SelectDeviceExtensions(ExtensionList* extension_list, bool enable_surface) +{ + u32 extension_count = 0; + VkResult res = vkEnumerateDeviceExtensionProperties(m_physical_device, nullptr, &extension_count, nullptr); + if (res != VK_SUCCESS) + { + LOG_VULKAN_ERROR(res, "vkEnumerateDeviceExtensionProperties failed: "); + return false; + } + + if (extension_count == 0) + { + Log_ErrorPrintf("Vulkan: No extensions supported by device."); + return false; + } + + std::vector available_extension_list(extension_count); + res = + vkEnumerateDeviceExtensionProperties(m_physical_device, nullptr, &extension_count, available_extension_list.data()); + Assert(res == VK_SUCCESS); + + for (const auto& extension_properties : available_extension_list) + Log_InfoPrintf("Available extension: %s", extension_properties.extensionName); + + auto SupportsExtension = [&](const char* name, bool required) { + if (std::find_if(available_extension_list.begin(), available_extension_list.end(), + [&](const VkExtensionProperties& properties) { + return !strcmp(name, properties.extensionName); + }) != available_extension_list.end()) + { + Log_InfoPrintf("Enabling extension: %s", name); + extension_list->push_back(name); + return true; + } + + if (required) + Log_ErrorPrintf("Vulkan: Missing required extension %s.", name); + + return false; + }; + + if (enable_surface && !SupportsExtension(VK_KHR_SWAPCHAIN_EXTENSION_NAME, true)) + return false; + + return true; +} + +bool Context::SelectDeviceFeatures() +{ + VkPhysicalDeviceProperties properties; + vkGetPhysicalDeviceProperties(m_physical_device, &properties); + + VkPhysicalDeviceFeatures available_features; + vkGetPhysicalDeviceFeatures(m_physical_device, &available_features); + + // Enable the features we use. + m_device_features.dualSrcBlend = available_features.dualSrcBlend; + m_device_features.geometryShader = available_features.geometryShader; + m_device_features.fillModeNonSolid = available_features.fillModeNonSolid; + return true; +} + +bool Context::CreateDevice(VkSurfaceKHR surface, bool enable_validation_layer) +{ + u32 queue_family_count; + vkGetPhysicalDeviceQueueFamilyProperties(m_physical_device, &queue_family_count, nullptr); + if (queue_family_count == 0) + { + Log_ErrorPrintf("No queue families found on specified vulkan physical device."); + return false; + } + + std::vector queue_family_properties(queue_family_count); + vkGetPhysicalDeviceQueueFamilyProperties(m_physical_device, &queue_family_count, queue_family_properties.data()); + Log_InfoPrintf("%u vulkan queue families", queue_family_count); + + // Find graphics and present queues. + m_graphics_queue_family_index = queue_family_count; + m_present_queue_family_index = queue_family_count; + for (uint32_t i = 0; i < queue_family_count; i++) + { + VkBool32 graphics_supported = queue_family_properties[i].queueFlags & VK_QUEUE_GRAPHICS_BIT; + if (graphics_supported) + { + m_graphics_queue_family_index = i; + // Quit now, no need for a present queue. + if (!surface) + { + break; + } + } + + if (surface) + { + VkBool32 present_supported; + VkResult res = vkGetPhysicalDeviceSurfaceSupportKHR(m_physical_device, i, surface, &present_supported); + if (res != VK_SUCCESS) + { + LOG_VULKAN_ERROR(res, "vkGetPhysicalDeviceSurfaceSupportKHR failed: "); + return false; + } + + if (present_supported) + { + m_present_queue_family_index = i; + } + + // Prefer one queue family index that does both graphics and present. + if (graphics_supported && present_supported) + { + break; + } + } + } + if (m_graphics_queue_family_index == queue_family_count) + { + Log_ErrorPrintf("Vulkan: Failed to find an acceptable graphics queue."); + return false; + } + if (surface && m_present_queue_family_index == queue_family_count) + { + Log_ErrorPrintf("Vulkan: Failed to find an acceptable present queue."); + return false; + } + + VkDeviceCreateInfo device_info = {}; + device_info.sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO; + device_info.pNext = nullptr; + device_info.flags = 0; + + static constexpr float queue_priorities[] = {1.0f}; + VkDeviceQueueCreateInfo graphics_queue_info = {}; + graphics_queue_info.sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO; + graphics_queue_info.pNext = nullptr; + graphics_queue_info.flags = 0; + graphics_queue_info.queueFamilyIndex = m_graphics_queue_family_index; + graphics_queue_info.queueCount = 1; + graphics_queue_info.pQueuePriorities = queue_priorities; + + VkDeviceQueueCreateInfo present_queue_info = {}; + present_queue_info.sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO; + present_queue_info.pNext = nullptr; + present_queue_info.flags = 0; + present_queue_info.queueFamilyIndex = m_present_queue_family_index; + present_queue_info.queueCount = 1; + present_queue_info.pQueuePriorities = queue_priorities; + + std::array queue_infos = {{ + graphics_queue_info, + present_queue_info, + }}; + + device_info.queueCreateInfoCount = 1; + if (m_graphics_queue_family_index != m_present_queue_family_index) + { + device_info.queueCreateInfoCount = 2; + } + device_info.pQueueCreateInfos = queue_infos.data(); + + ExtensionList enabled_extensions; + if (!SelectDeviceExtensions(&enabled_extensions, surface != VK_NULL_HANDLE)) + return false; + + device_info.enabledLayerCount = 0; + device_info.ppEnabledLayerNames = nullptr; + device_info.enabledExtensionCount = static_cast(enabled_extensions.size()); + device_info.ppEnabledExtensionNames = enabled_extensions.data(); + + // Check for required features before creating. + if (!SelectDeviceFeatures()) + return false; + + device_info.pEnabledFeatures = &m_device_features; + + // Enable debug layer on debug builds + if (enable_validation_layer) + { + static const char* layer_names[] = {"VK_LAYER_LUNARG_standard_validation"}; + device_info.enabledLayerCount = 1; + device_info.ppEnabledLayerNames = layer_names; + } + + VkResult res = vkCreateDevice(m_physical_device, &device_info, nullptr, &m_device); + if (res != VK_SUCCESS) + { + LOG_VULKAN_ERROR(res, "vkCreateDevice failed: "); + return false; + } + + // With the device created, we can fill the remaining entry points. + if (!LoadVulkanDeviceFunctions(m_device)) + return false; + + // Grab the graphics and present queues. + vkGetDeviceQueue(m_device, m_graphics_queue_family_index, 0, &m_graphics_queue); + if (surface) + { + vkGetDeviceQueue(m_device, m_present_queue_family_index, 0, &m_present_queue); + } + return true; +} + +bool Context::CreateCommandBuffers() +{ + VkResult res; + + for (FrameResources& resources : m_frame_resources) + { + resources.needs_fence_wait = false; + + VkCommandPoolCreateInfo pool_info = {VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO, nullptr, 0, + m_graphics_queue_family_index}; + res = vkCreateCommandPool(m_device, &pool_info, nullptr, &resources.command_pool); + if (res != VK_SUCCESS) + { + LOG_VULKAN_ERROR(res, "vkCreateCommandPool failed: "); + return false; + } + + VkCommandBufferAllocateInfo buffer_info = {VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO, nullptr, + resources.command_pool, VK_COMMAND_BUFFER_LEVEL_PRIMARY, 1}; + + res = vkAllocateCommandBuffers(m_device, &buffer_info, &resources.command_buffer); + if (res != VK_SUCCESS) + { + LOG_VULKAN_ERROR(res, "vkAllocateCommandBuffers failed: "); + return false; + } + + VkFenceCreateInfo fence_info = {VK_STRUCTURE_TYPE_FENCE_CREATE_INFO, nullptr, VK_FENCE_CREATE_SIGNALED_BIT}; + + res = vkCreateFence(m_device, &fence_info, nullptr, &resources.fence); + if (res != VK_SUCCESS) + { + LOG_VULKAN_ERROR(res, "vkCreateFence failed: "); + return false; + } + + // TODO: A better way to choose the number of descriptors. + VkDescriptorPoolSize pool_sizes[] = {{VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC, 1024}, + {VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, 1024}, + {VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER, 16}}; + + VkDescriptorPoolCreateInfo pool_create_info = {VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO, + nullptr, + 0, + 1024, // TODO: tweak this + static_cast(countof(pool_sizes)), + pool_sizes}; + + res = vkCreateDescriptorPool(m_device, &pool_create_info, nullptr, &resources.descriptor_pool); + if (res != VK_SUCCESS) + { + LOG_VULKAN_ERROR(res, "vkCreateDescriptorPool failed: "); + return false; + } + } + + ActivateCommandBuffer(0); + return true; +} + +void Context::DestroyCommandBuffers() +{ + VkDevice device = m_device; + + for (FrameResources& resources : m_frame_resources) + { + for (auto& it : resources.cleanup_resources) + it(); + resources.cleanup_resources.clear(); + + if (resources.fence != VK_NULL_HANDLE) + { + vkDestroyFence(m_device, resources.fence, nullptr); + resources.fence = VK_NULL_HANDLE; + } + if (resources.descriptor_pool != VK_NULL_HANDLE) + { + vkDestroyDescriptorPool(m_device, resources.descriptor_pool, nullptr); + resources.descriptor_pool = VK_NULL_HANDLE; + } + if (resources.command_buffer != VK_NULL_HANDLE) + { + vkFreeCommandBuffers(m_device, resources.command_pool, 1, &resources.command_buffer); + resources.command_buffer = VK_NULL_HANDLE; + } + if (resources.command_pool != VK_NULL_HANDLE) + { + vkDestroyCommandPool(m_device, resources.command_pool, nullptr); + resources.command_pool = VK_NULL_HANDLE; + } + } +} + +bool Context::CreateGlobalDescriptorPool() +{ + // TODO: A better way to choose the number of descriptors. + VkDescriptorPoolSize pool_sizes[] = {{VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC, 1024}, + {VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, 1024}, + {VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER, 16}}; + + VkDescriptorPoolCreateInfo pool_create_info = {VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO, + nullptr, + VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, + 1024, // TODO: tweak this + static_cast(countof(pool_sizes)), + pool_sizes}; + + VkResult res = vkCreateDescriptorPool(m_device, &pool_create_info, nullptr, &m_global_descriptor_pool); + if (res != VK_SUCCESS) + { + LOG_VULKAN_ERROR(res, "vkCreateDescriptorPool failed: "); + return false; + } + + return true; +} + +void Context::DestroyGlobalDescriptorPool() +{ + if (m_global_descriptor_pool != VK_NULL_HANDLE) + { + vkDestroyDescriptorPool(m_device, m_global_descriptor_pool, nullptr); + m_global_descriptor_pool = VK_NULL_HANDLE; + } +} + +void Context::DestroyRenderPassCache() +{ + for (auto& it : m_render_pass_cache) + vkDestroyRenderPass(m_device, it.second, nullptr); + + m_render_pass_cache.clear(); +} + +VkDescriptorSet Context::AllocateDescriptorSet(VkDescriptorSetLayout set_layout) +{ + VkDescriptorSetAllocateInfo allocate_info = {VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO, nullptr, + m_frame_resources[m_current_frame].descriptor_pool, 1, &set_layout}; + + VkDescriptorSet descriptor_set; + VkResult res = vkAllocateDescriptorSets(m_device, &allocate_info, &descriptor_set); + if (res != VK_SUCCESS) + { + // Failing to allocate a descriptor set is not a fatal error, we can + // recover by moving to the next command buffer. + return VK_NULL_HANDLE; + } + + return descriptor_set; +} + +VkDescriptorSet Context::AllocateGlobalDescriptorSet(VkDescriptorSetLayout set_layout) +{ + VkDescriptorSetAllocateInfo allocate_info = {VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO, nullptr, + m_global_descriptor_pool, 1, &set_layout}; + + VkDescriptorSet descriptor_set; + VkResult res = vkAllocateDescriptorSets(m_device, &allocate_info, &descriptor_set); + if (res != VK_SUCCESS) + return VK_NULL_HANDLE; + + return descriptor_set; +} + +void Context::FreeGlobalDescriptorSet(VkDescriptorSet set) +{ + vkFreeDescriptorSets(m_device, m_global_descriptor_pool, 1, &set); +} + +void Context::WaitForFenceCounter(u64 fence_counter) +{ + if (m_completed_fence_counter >= fence_counter) + return; + + // Find the first command buffer which covers this counter value. + u32 index = (m_current_frame + 1) % NUM_COMMAND_BUFFERS; + while (index != m_current_frame) + { + if (m_frame_resources[index].fence_counter >= fence_counter) + break; + + index = (index + 1) % NUM_COMMAND_BUFFERS; + } + + Assert(index != m_current_frame); + WaitForCommandBufferCompletion(index); +} + +void Context::WaitForGPUIdle() +{ + vkDeviceWaitIdle(m_device); +} + +void Context::WaitForCommandBufferCompletion(u32 index) +{ + // Wait for this command buffer to be completed. + VkResult res = vkWaitForFences(m_device, 1, &m_frame_resources[index].fence, VK_TRUE, UINT64_MAX); + if (res != VK_SUCCESS) + LOG_VULKAN_ERROR(res, "vkWaitForFences failed: "); + + // Clean up any resources for command buffers between the last known completed buffer and this + // now-completed command buffer. If we use >2 buffers, this may be more than one buffer. + const u64 now_completed_counter = m_frame_resources[index].fence_counter; + u32 cleanup_index = (m_current_frame + 1) % NUM_COMMAND_BUFFERS; + while (cleanup_index != m_current_frame) + { + FrameResources& resources = m_frame_resources[cleanup_index]; + if (resources.fence_counter > now_completed_counter) + break; + + if (resources.fence_counter > m_completed_fence_counter) + { + for (auto& it : resources.cleanup_resources) + it(); + resources.cleanup_resources.clear(); + } + + cleanup_index = (cleanup_index + 1) % NUM_COMMAND_BUFFERS; + } + + m_completed_fence_counter = now_completed_counter; +} + +void Context::SubmitCommandBuffer(VkSemaphore wait_semaphore, VkSemaphore signal_semaphore, + VkSwapchainKHR present_swap_chain, uint32_t present_image_index) +{ + FrameResources& resources = m_frame_resources[m_current_frame]; + + // End the current command buffer. + VkResult res = vkEndCommandBuffer(resources.command_buffer); + if (res != VK_SUCCESS) + { + LOG_VULKAN_ERROR(res, "vkEndCommandBuffer failed: "); + Panic("Failed to end command buffer"); + } + + // This command buffer now has commands, so can't be re-used without waiting. + resources.needs_fence_wait = true; + + // This may be executed on the worker thread, so don't modify any state of the manager class. + uint32_t wait_bits = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT; + VkSubmitInfo submit_info = {VK_STRUCTURE_TYPE_SUBMIT_INFO, nullptr, 0, nullptr, &wait_bits, 1u, + &resources.command_buffer, 0, nullptr}; + + if (wait_semaphore != VK_NULL_HANDLE) + { + submit_info.pWaitSemaphores = &wait_semaphore; + submit_info.waitSemaphoreCount = 1; + } + + if (signal_semaphore != VK_NULL_HANDLE) + { + submit_info.signalSemaphoreCount = 1; + submit_info.pSignalSemaphores = &signal_semaphore; + } + + res = vkQueueSubmit(m_graphics_queue, 1, &submit_info, resources.fence); + if (res != VK_SUCCESS) + { + LOG_VULKAN_ERROR(res, "vkQueueSubmit failed: "); + Panic("Failed to submit command buffer."); + } + + // Do we have a swap chain to present? + if (present_swap_chain != VK_NULL_HANDLE) + { + // Should have a signal semaphore. + Assert(signal_semaphore != VK_NULL_HANDLE); + VkPresentInfoKHR present_info = {VK_STRUCTURE_TYPE_PRESENT_INFO_KHR, + nullptr, + 1, + &signal_semaphore, + 1, + &present_swap_chain, + &present_image_index, + nullptr}; + + res = vkQueuePresentKHR(m_present_queue, &present_info); + if (res != VK_SUCCESS) + { + // VK_ERROR_OUT_OF_DATE_KHR is not fatal, just means we need to recreate our swap chain. + if (res != VK_ERROR_OUT_OF_DATE_KHR && res != VK_SUBOPTIMAL_KHR) + LOG_VULKAN_ERROR(res, "vkQueuePresentKHR failed: "); + + m_last_present_failed = true; + } + } +} + +void Context::MoveToNextCommandBuffer() +{ + ActivateCommandBuffer((m_current_frame + 1) % NUM_COMMAND_BUFFERS); +} + +void Context::ActivateCommandBuffer(u32 index) +{ + FrameResources& resources = m_frame_resources[index]; + + // Wait for the GPU to finish with all resources for this command buffer. + if (resources.fence_counter > m_completed_fence_counter) + WaitForCommandBufferCompletion(index); + + // Reset fence to unsignaled before starting. + VkResult res = vkResetFences(m_device, 1, &resources.fence); + if (res != VK_SUCCESS) + LOG_VULKAN_ERROR(res, "vkResetFences failed: "); + + // Reset command pools to beginning since we can re-use the memory now + res = vkResetCommandPool(m_device, resources.command_pool, 0); + if (res != VK_SUCCESS) + LOG_VULKAN_ERROR(res, "vkResetCommandPool failed: "); + + // Enable commands to be recorded to the two buffers again. + VkCommandBufferBeginInfo begin_info = {VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO, nullptr, + VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT, nullptr}; + res = vkBeginCommandBuffer(resources.command_buffer, &begin_info); + if (res != VK_SUCCESS) + LOG_VULKAN_ERROR(res, "vkBeginCommandBuffer failed: "); + + // Also can do the same for the descriptor pools + res = vkResetDescriptorPool(m_device, resources.descriptor_pool, 0); + if (res != VK_SUCCESS) + LOG_VULKAN_ERROR(res, "vkResetDescriptorPool failed: "); + + m_current_frame = index; + m_current_command_buffer = resources.command_buffer; + resources.fence_counter = m_next_fence_counter++; +} + +void Context::ExecuteCommandBuffer(bool wait_for_completion) +{ + // If we're waiting for completion, don't bother waking the worker thread. + const u32 current_frame = m_current_frame; + SubmitCommandBuffer(); + MoveToNextCommandBuffer(); + + if (wait_for_completion) + WaitForCommandBufferCompletion(current_frame); +} + +bool Context::CheckLastPresentFail() +{ + bool res = m_last_present_failed; + m_last_present_failed = false; + return res; +} + +void Context::DeferBufferDestruction(VkBuffer object) +{ + FrameResources& resources = m_frame_resources[m_current_frame]; + resources.cleanup_resources.push_back([this, object]() { vkDestroyBuffer(m_device, object, nullptr); }); +} + +void Context::DeferBufferViewDestruction(VkBufferView object) +{ + FrameResources& resources = m_frame_resources[m_current_frame]; + resources.cleanup_resources.push_back([this, object]() { vkDestroyBufferView(m_device, object, nullptr); }); +} + +void Context::DeferDeviceMemoryDestruction(VkDeviceMemory object) +{ + FrameResources& resources = m_frame_resources[m_current_frame]; + resources.cleanup_resources.push_back([this, object]() { vkFreeMemory(m_device, object, nullptr); }); +} + +void Context::DeferFramebufferDestruction(VkFramebuffer object) +{ + FrameResources& resources = m_frame_resources[m_current_frame]; + resources.cleanup_resources.push_back([this, object]() { vkDestroyFramebuffer(m_device, object, nullptr); }); +} + +void Context::DeferImageDestruction(VkImage object) +{ + FrameResources& resources = m_frame_resources[m_current_frame]; + resources.cleanup_resources.push_back([this, object]() { vkDestroyImage(m_device, object, nullptr); }); +} + +void Context::DeferImageViewDestruction(VkImageView object) +{ + FrameResources& resources = m_frame_resources[m_current_frame]; + resources.cleanup_resources.push_back([this, object]() { vkDestroyImageView(m_device, object, nullptr); }); +} + +static VKAPI_ATTR VkBool32 VKAPI_CALL DebugReportCallback(VkDebugReportFlagsEXT flags, + VkDebugReportObjectTypeEXT objectType, uint64_t object, + size_t location, int32_t messageCode, + const char* pLayerPrefix, const char* pMessage, + void* pUserData) +{ + LOGLEVEL level; + if (flags & VK_DEBUG_REPORT_ERROR_BIT_EXT) + level = LOGLEVEL_ERROR; + else if (flags & (VK_DEBUG_REPORT_WARNING_BIT_EXT | VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT)) + level = LOGLEVEL_WARNING; + else if (flags & VK_DEBUG_REPORT_INFORMATION_BIT_EXT) + level = LOGLEVEL_INFO; + else + level = LOGLEVEL_DEBUG; + + Log::Writef("Vulkan", __func__, level, "Vulkan debug report: (%s) %s", pLayerPrefix ? pLayerPrefix : "", pMessage); + return VK_FALSE; +} + +bool Context::EnableDebugReports() +{ + // Already enabled? + if (m_debug_report_callback != VK_NULL_HANDLE) + return true; + + // Check for presence of the functions before calling + if (!vkCreateDebugReportCallbackEXT || !vkDestroyDebugReportCallbackEXT || !vkDebugReportMessageEXT) + { + return false; + } + + VkDebugReportCallbackCreateInfoEXT callback_info = { + VK_STRUCTURE_TYPE_DEBUG_REPORT_CALLBACK_CREATE_INFO_EXT, nullptr, + VK_DEBUG_REPORT_ERROR_BIT_EXT | VK_DEBUG_REPORT_WARNING_BIT_EXT | VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT | + VK_DEBUG_REPORT_INFORMATION_BIT_EXT | VK_DEBUG_REPORT_DEBUG_BIT_EXT, + DebugReportCallback, nullptr}; + + VkResult res = vkCreateDebugReportCallbackEXT(m_instance, &callback_info, nullptr, &m_debug_report_callback); + if (res != VK_SUCCESS) + { + LOG_VULKAN_ERROR(res, "vkCreateDebugReportCallbackEXT failed: "); + return false; + } + + return true; +} + +void Context::DisableDebugReports() +{ + if (m_debug_report_callback != VK_NULL_HANDLE) + { + vkDestroyDebugReportCallbackEXT(m_instance, m_debug_report_callback, nullptr); + m_debug_report_callback = VK_NULL_HANDLE; + } +} + +bool Context::GetMemoryType(u32 bits, VkMemoryPropertyFlags properties, u32* out_type_index) +{ + for (u32 i = 0; i < VK_MAX_MEMORY_TYPES; i++) + { + if ((bits & (1 << i)) != 0) + { + u32 supported = m_device_memory_properties.memoryTypes[i].propertyFlags & properties; + if (supported == properties) + { + *out_type_index = i; + return true; + } + } + } + + return false; +} + +u32 Context::GetMemoryType(u32 bits, VkMemoryPropertyFlags properties) +{ + u32 type_index = VK_MAX_MEMORY_TYPES; + if (!GetMemoryType(bits, properties, &type_index)) + { + Log_ErrorPrintf("Unable to find memory type for %x:%x", bits, properties); + Panic("Unable to find memory type"); + } + + return type_index; +} + +u32 Context::GetUploadMemoryType(u32 bits, bool* is_coherent) +{ + // Try for coherent memory first. + VkMemoryPropertyFlags flags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT; + + u32 type_index; + if (!GetMemoryType(bits, flags, &type_index)) + { + Log_WarningPrintf("Vulkan: Failed to find a coherent memory type for uploads, this will affect performance."); + + // Try non-coherent memory. + flags &= ~VK_MEMORY_PROPERTY_HOST_COHERENT_BIT; + if (!GetMemoryType(bits, flags, &type_index)) + { + // We shouldn't have any memory types that aren't host-visible. + Panic("Unable to get memory type for upload."); + type_index = 0; + } + } + + if (is_coherent) + *is_coherent = ((flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0); + + return type_index; +} + +u32 Context::GetReadbackMemoryType(u32 bits, bool* is_coherent, bool* is_cached) +{ + // Try for cached and coherent memory first. + VkMemoryPropertyFlags flags = + VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT; + + u32 type_index; + if (!GetMemoryType(bits, flags, &type_index)) + { + // For readbacks, caching is more important than coherency. + flags &= ~VK_MEMORY_PROPERTY_HOST_COHERENT_BIT; + if (!GetMemoryType(bits, flags, &type_index)) + { + Log_WarningPrintf("Vulkan: Failed to find a cached memory type for readbacks, this will affect " + "performance."); + + // Remove the cached bit as well. + flags &= ~VK_MEMORY_PROPERTY_HOST_CACHED_BIT; + if (!GetMemoryType(bits, flags, &type_index)) + { + // We shouldn't have any memory types that aren't host-visible. + Panic("Unable to get memory type for upload."); + type_index = 0; + } + } + } + + if (is_coherent) + *is_coherent = ((flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0); + if (is_cached) + *is_cached = ((flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) != 0); + + return type_index; +} + +VkRenderPass Context::GetRenderPass(VkFormat color_format, VkFormat depth_format, VkSampleCountFlagBits samples, + VkAttachmentLoadOp load_op) +{ + auto key = std::tie(color_format, depth_format, samples, load_op); + auto it = m_render_pass_cache.find(key); + if (it != m_render_pass_cache.end()) + return it->second; + + VkAttachmentReference color_reference; + VkAttachmentReference* color_reference_ptr = nullptr; + VkAttachmentReference depth_reference; + VkAttachmentReference* depth_reference_ptr = nullptr; + std::array attachments; + u32 num_attachments = 0; + if (color_format != VK_FORMAT_UNDEFINED) + { + attachments[num_attachments] = {0, + color_format, + samples, + load_op, + VK_ATTACHMENT_STORE_OP_STORE, + VK_ATTACHMENT_LOAD_OP_DONT_CARE, + VK_ATTACHMENT_STORE_OP_DONT_CARE, + VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, + VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL}; + color_reference.attachment = num_attachments; + color_reference.layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; + color_reference_ptr = &color_reference; + num_attachments++; + } + if (depth_format != VK_FORMAT_UNDEFINED) + { + attachments[num_attachments] = {0, + depth_format, + samples, + load_op, + VK_ATTACHMENT_STORE_OP_STORE, + VK_ATTACHMENT_LOAD_OP_DONT_CARE, + VK_ATTACHMENT_STORE_OP_DONT_CARE, + VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL, + VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL}; + depth_reference.attachment = num_attachments; + depth_reference.layout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL; + depth_reference_ptr = &depth_reference; + num_attachments++; + } + + VkSubpassDescription subpass = {0, + VK_PIPELINE_BIND_POINT_GRAPHICS, + 0, + nullptr, + color_reference_ptr ? 1u : 0u, + color_reference_ptr ? color_reference_ptr : nullptr, + nullptr, + depth_reference_ptr, + 0, + nullptr}; + VkRenderPassCreateInfo pass_info = {VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, + nullptr, + 0, + num_attachments, + attachments.data(), + 1, + &subpass, + 0, + nullptr}; + + VkRenderPass pass; + VkResult res = vkCreateRenderPass(m_device, &pass_info, nullptr, &pass); + if (res != VK_SUCCESS) + { + LOG_VULKAN_ERROR(res, "vkCreateRenderPass failed: "); + return VK_NULL_HANDLE; + } + + m_render_pass_cache.emplace(key, pass); + return pass; +} + +} // namespace Vulkan diff --git a/src/common/vulkan/context.h b/src/common/vulkan/context.h new file mode 100644 index 000000000..31163c38e --- /dev/null +++ b/src/common/vulkan/context.h @@ -0,0 +1,229 @@ +// Copyright 2016 Dolphin Emulator Project +// Copyright 2020 DuckStation Emulator Project +// Licensed under GPLv2+ +// Refer to the LICENSE file included. + +#pragma once + +#include "../types.h" +#include "vulkan_loader.h" +#include +#include +#include +#include +#include +#include + +struct WindowInfo; + +namespace Vulkan { + +class SwapChain; + +class Context +{ +public: + enum : u32 + { + NUM_COMMAND_BUFFERS = 2 + }; + + ~Context(); + + // Determines if the Vulkan validation layer is available on the system. + static bool CheckValidationLayerAvailablility(); + + // Helper method to create a Vulkan instance. + static VkInstance CreateVulkanInstance(bool enable_surface, bool enable_debug_report, bool enable_validation_layer); + + // Returns a list of Vulkan-compatible GPUs. + using GPUList = std::vector; + using GPUNameList = std::vector; + static GPUList EnumerateGPUs(VkInstance instance); + static GPUNameList EnumerateGPUNames(VkInstance instance); + + // Creates a new context and sets it up as global. + static bool Create(u32 gpu_index, const WindowInfo* wi, std::unique_ptr* out_swap_chain, + bool enable_debug_reports, bool enable_validation_layer); + + // Destroys context. + static void Destroy(); + + // Enable/disable debug message runtime. + bool EnableDebugReports(); + void DisableDebugReports(); + + // Global state accessors + ALWAYS_INLINE VkInstance GetVulkanInstance() const { return m_instance; } + ALWAYS_INLINE VkPhysicalDevice GetPhysicalDevice() const { return m_physical_device; } + ALWAYS_INLINE VkDevice GetDevice() const { return m_device; } + ALWAYS_INLINE VkQueue GetGraphicsQueue() const { return m_graphics_queue; } + ALWAYS_INLINE u32 GetGraphicsQueueFamilyIndex() const { return m_graphics_queue_family_index; } + ALWAYS_INLINE VkQueue GetPresentQueue() const { return m_present_queue; } + ALWAYS_INLINE u32 GetPresentQueueFamilyIndex() const { return m_present_queue_family_index; } + ALWAYS_INLINE const VkQueueFamilyProperties& GetGraphicsQueueProperties() const + { + return m_graphics_queue_properties; + } + ALWAYS_INLINE const VkPhysicalDeviceMemoryProperties& GetDeviceMemoryProperties() const + { + return m_device_memory_properties; + } + ALWAYS_INLINE const VkPhysicalDeviceProperties& GetDeviceProperties() const { return m_device_properties; } + ALWAYS_INLINE const VkPhysicalDeviceFeatures& GetDeviceFeatures() const { return m_device_features; } + ALWAYS_INLINE const VkPhysicalDeviceLimits& GetDeviceLimits() const { return m_device_properties.limits; } + + // Support bits + ALWAYS_INLINE bool SupportsGeometryShaders() const { return m_device_features.geometryShader == VK_TRUE; } + ALWAYS_INLINE bool SupportsDualSourceBlend() const { return m_device_features.dualSrcBlend == VK_TRUE; } + + // Helpers for getting constants + ALWAYS_INLINE VkDeviceSize GetUniformBufferAlignment() const + { + return m_device_properties.limits.minUniformBufferOffsetAlignment; + } + ALWAYS_INLINE VkDeviceSize GetTexelBufferAlignment() const + { + return m_device_properties.limits.minUniformBufferOffsetAlignment; + } + ALWAYS_INLINE VkDeviceSize GetBufferImageGranularity() const + { + return m_device_properties.limits.bufferImageGranularity; + } + + // Finds a memory type index for the specified memory properties and the bits returned by + // vkGetImageMemoryRequirements + bool GetMemoryType(u32 bits, VkMemoryPropertyFlags properties, u32* out_type_index); + u32 GetMemoryType(u32 bits, VkMemoryPropertyFlags properties); + + // Finds a memory type for upload or readback buffers. + u32 GetUploadMemoryType(u32 bits, bool* is_coherent = nullptr); + u32 GetReadbackMemoryType(u32 bits, bool* is_coherent = nullptr, bool* is_cached = nullptr); + + // Creates a simple render pass. + VkRenderPass GetRenderPass(VkFormat color_format, VkFormat depth_format, VkSampleCountFlagBits samples, + VkAttachmentLoadOp load_op); + + // These command buffers are allocated per-frame. They are valid until the command buffer + // is submitted, after that you should call these functions again. + ALWAYS_INLINE VkDescriptorPool GetGlobalDescriptorPool() const { return m_global_descriptor_pool; } + ALWAYS_INLINE VkCommandBuffer GetCurrentCommandBuffer() const { return m_current_command_buffer; } + ALWAYS_INLINE VkDescriptorPool GetCurrentDescriptorPool() const + { + return m_frame_resources[m_current_frame].descriptor_pool; + } + + /// Allocates a descriptor set from the pool reserved for the current frame. + VkDescriptorSet AllocateDescriptorSet(VkDescriptorSetLayout set_layout); + + /// Allocates a descriptor set from the pool reserved for the current frame. + VkDescriptorSet AllocateGlobalDescriptorSet(VkDescriptorSetLayout set_layout); + + /// Frees a descriptor set allocated from the global pool. + void FreeGlobalDescriptorSet(VkDescriptorSet set); + + // Gets the fence that will be signaled when the currently executing command buffer is + // queued and executed. Do not wait for this fence before the buffer is executed. + ALWAYS_INLINE VkFence GetCurrentCommandBufferFence() const { return m_frame_resources[m_current_frame].fence; } + + // Fence "counters" are used to track which commands have been completed by the GPU. + // If the last completed fence counter is greater or equal to N, it means that the work + // associated counter N has been completed by the GPU. The value of N to associate with + // commands can be retreived by calling GetCurrentFenceCounter(). + u64 GetCompletedFenceCounter() const { return m_completed_fence_counter; } + + // Gets the fence that will be signaled when the currently executing command buffer is + // queued and executed. Do not wait for this fence before the buffer is executed. + u64 GetCurrentFenceCounter() const { return m_frame_resources[m_current_frame].fence_counter; } + + void SubmitCommandBuffer(VkSemaphore wait_semaphore = VK_NULL_HANDLE, VkSemaphore signal_semaphore = VK_NULL_HANDLE, + VkSwapchainKHR present_swap_chain = VK_NULL_HANDLE, + uint32_t present_image_index = 0xFFFFFFFF); + void MoveToNextCommandBuffer(); + + void ExecuteCommandBuffer(bool wait_for_completion); + + // Was the last present submitted to the queue a failure? If so, we must recreate our swapchain. + bool CheckLastPresentFail(); + + // Schedule a vulkan resource for destruction later on. This will occur when the command buffer + // is next re-used, and the GPU has finished working with the specified resource. + void DeferBufferDestruction(VkBuffer object); + void DeferBufferViewDestruction(VkBufferView object); + void DeferDeviceMemoryDestruction(VkDeviceMemory object); + void DeferFramebufferDestruction(VkFramebuffer object); + void DeferImageDestruction(VkImage object); + void DeferImageViewDestruction(VkImageView object); + + // Wait for a fence to be completed. + // Also invokes callbacks for completion. + void WaitForFenceCounter(u64 fence_counter); + + void WaitForGPUIdle(); + +private: + Context(VkInstance instance, VkPhysicalDevice physical_device); + + using ExtensionList = std::vector; + static bool SelectInstanceExtensions(ExtensionList* extension_list, bool enable_surface, bool enable_debug_report); + bool SelectDeviceExtensions(ExtensionList* extension_list, bool enable_surface); + bool SelectDeviceFeatures(); + bool CreateDevice(VkSurfaceKHR surface, bool enable_validation_layer); + + bool CreateCommandBuffers(); + void DestroyCommandBuffers(); + bool CreateGlobalDescriptorPool(); + void DestroyGlobalDescriptorPool(); + void DestroyRenderPassCache(); + + void ActivateCommandBuffer(u32 index); + void WaitForCommandBufferCompletion(u32 index); + + struct FrameResources + { + // [0] - Init (upload) command buffer, [1] - draw command buffer + VkCommandPool command_pool = VK_NULL_HANDLE; + VkCommandBuffer command_buffer = VK_NULL_HANDLE; + VkDescriptorPool descriptor_pool = VK_NULL_HANDLE; + VkFence fence = VK_NULL_HANDLE; + u64 fence_counter = 0; + bool needs_fence_wait = false; + + std::vector> cleanup_resources; + }; + + VkInstance m_instance = VK_NULL_HANDLE; + VkPhysicalDevice m_physical_device = VK_NULL_HANDLE; + VkDevice m_device = VK_NULL_HANDLE; + + VkCommandBuffer m_current_command_buffer = VK_NULL_HANDLE; + + VkDescriptorPool m_global_descriptor_pool = VK_NULL_HANDLE; + + VkQueue m_graphics_queue = VK_NULL_HANDLE; + u32 m_graphics_queue_family_index = 0; + VkQueue m_present_queue = VK_NULL_HANDLE; + u32 m_present_queue_family_index = 0; + + std::array m_frame_resources; + u64 m_next_fence_counter = 1; + u64 m_completed_fence_counter = 0; + u32 m_current_frame; + + bool m_last_present_failed = false; + + // Render pass cache + using RenderPassCacheKey = std::tuple; + std::map m_render_pass_cache; + + VkDebugReportCallbackEXT m_debug_report_callback = VK_NULL_HANDLE; + + VkQueueFamilyProperties m_graphics_queue_properties = {}; + VkPhysicalDeviceFeatures m_device_features = {}; + VkPhysicalDeviceProperties m_device_properties = {}; + VkPhysicalDeviceMemoryProperties m_device_memory_properties = {}; +}; + +} // namespace Vulkan + +extern std::unique_ptr g_vulkan_context; diff --git a/src/common/vulkan/shader_cache.cpp b/src/common/vulkan/shader_cache.cpp new file mode 100644 index 000000000..bc69bcd72 --- /dev/null +++ b/src/common/vulkan/shader_cache.cpp @@ -0,0 +1,513 @@ +#include "shader_cache.h" +#include "../assert.h" +#include "../file_system.h" +#include "../log.h" +#include "../md5_digest.h" +#include "context.h" +#include "shader_compiler.h" +#include "util.h" +Log_SetChannel(Vulkan::ShaderCache); + +// TODO: store the driver version and stuff in the shader header + +std::unique_ptr g_vulkan_shader_cache; + +namespace Vulkan { + +using ShaderCompiler::SPIRVCodeType; +using ShaderCompiler::SPIRVCodeVector; + +#pragma pack(push, 4) +struct VK_PIPELINE_CACHE_HEADER +{ + u32 header_length; + u32 header_version; + u32 vendor_id; + u32 device_id; + u8 uuid[VK_UUID_SIZE]; +}; + +struct CacheIndexEntry +{ + u64 source_hash_low; + u64 source_hash_high; + u32 source_length; + u32 shader_type; + u32 file_offset; + u32 blob_size; +}; +#pragma pack(pop) + +static bool ValidatePipelineCacheHeader(const VK_PIPELINE_CACHE_HEADER& header) +{ + if (header.header_length < sizeof(VK_PIPELINE_CACHE_HEADER)) + { + Log_ErrorPrintf("Pipeline cache failed validation: Invalid header length"); + return false; + } + + if (header.header_version != VK_PIPELINE_CACHE_HEADER_VERSION_ONE) + { + Log_ErrorPrintf("Pipeline cache failed validation: Invalid header version"); + return false; + } + + if (header.vendor_id != g_vulkan_context->GetDeviceProperties().vendorID) + { + Log_ErrorPrintf("Pipeline cache failed validation: Incorrect vendor ID (file: 0x%X, device: 0x%X)", + header.vendor_id, g_vulkan_context->GetDeviceProperties().vendorID); + return false; + } + + if (header.device_id != g_vulkan_context->GetDeviceProperties().deviceID) + { + Log_ErrorPrintf("Pipeline cache failed validation: Incorrect device ID (file: 0x%X, device: 0x%X)", + header.device_id, g_vulkan_context->GetDeviceProperties().deviceID); + return false; + } + + if (std::memcmp(header.uuid, g_vulkan_context->GetDeviceProperties().pipelineCacheUUID, VK_UUID_SIZE) != 0) + { + Log_ErrorPrintf("Pipeline cache failed validation: Incorrect UUID"); + return false; + } + + return true; +} + +static void FillPipelineCacheHeader(VK_PIPELINE_CACHE_HEADER* header) +{ + header->header_length = sizeof(VK_PIPELINE_CACHE_HEADER); + header->header_version = VK_PIPELINE_CACHE_HEADER_VERSION_ONE; + header->vendor_id = g_vulkan_context->GetDeviceProperties().vendorID; + header->device_id = g_vulkan_context->GetDeviceProperties().deviceID; + std::memcpy(header->uuid, g_vulkan_context->GetDeviceProperties().pipelineCacheUUID, VK_UUID_SIZE); +} + +ShaderCache::ShaderCache() = default; + +ShaderCache::~ShaderCache() +{ + CloseShaderCache(); + FlushPipelineCache(); + ClosePipelineCache(); +} + +bool ShaderCache::CacheIndexKey::operator==(const CacheIndexKey& key) const +{ + return (source_hash_low == key.source_hash_low && source_hash_high == key.source_hash_high && + source_length == key.source_length && shader_type == key.shader_type); +} + +bool ShaderCache::CacheIndexKey::operator!=(const CacheIndexKey& key) const +{ + return (source_hash_low != key.source_hash_low || source_hash_high != key.source_hash_high || + source_length != key.source_length || shader_type != key.shader_type); +} + +void ShaderCache::Create(std::string_view base_path, bool debug) +{ + Assert(!g_vulkan_shader_cache); + g_vulkan_shader_cache.reset(new ShaderCache()); + g_vulkan_shader_cache->Open(base_path, debug); +} + +void ShaderCache::Destroy() +{ + g_vulkan_shader_cache.reset(); +} + +void ShaderCache::Open(std::string_view base_path, bool debug) +{ + m_debug = debug; + + m_pipeline_cache_filename = GetPipelineCacheBaseFileName(base_path, debug); + + const std::string base_filename = GetShaderCacheBaseFileName(base_path, debug); + const std::string index_filename = base_filename + ".idx"; + const std::string blob_filename = base_filename + ".bin"; + + if (!ReadExistingShaderCache(index_filename, blob_filename)) + CreateNewShaderCache(index_filename, blob_filename); + + if (!ReadExistingPipelineCache()) + CreateNewPipelineCache(); +} + +VkPipelineCache ShaderCache::GetPipelineCache(bool set_dirty /*= true*/) +{ + if (m_pipeline_cache == VK_NULL_HANDLE) + return VK_NULL_HANDLE; + + m_pipeline_cache_dirty |= set_dirty; + return m_pipeline_cache; +} + +bool ShaderCache::CreateNewShaderCache(const std::string& index_filename, const std::string& blob_filename) +{ + if (FileSystem::FileExists(index_filename.c_str())) + { + Log_WarningPrintf("Removing existing index file '%s'", index_filename.c_str()); + FileSystem::DeleteFile(index_filename.c_str()); + } + if (FileSystem::FileExists(blob_filename.c_str())) + { + Log_WarningPrintf("Removing existing blob file '%s'", blob_filename.c_str()); + FileSystem::DeleteFile(blob_filename.c_str()); + } + + m_index_file = FileSystem::OpenCFile(index_filename.c_str(), "wb"); + if (!m_index_file) + { + Log_ErrorPrintf("Failed to open index file '%s' for writing", index_filename.c_str()); + return false; + } + + const u32 index_version = FILE_VERSION; + VK_PIPELINE_CACHE_HEADER header; + FillPipelineCacheHeader(&header); + + if (std::fwrite(&index_version, sizeof(index_version), 1, m_index_file) != 1 || + std::fwrite(&header, sizeof(header), 1, m_index_file) != 1) + { + Log_ErrorPrintf("Failed to write header to index file '%s'", index_filename.c_str()); + std::fclose(m_index_file); + m_index_file = nullptr; + FileSystem::DeleteFile(index_filename.c_str()); + return false; + } + + m_blob_file = FileSystem::OpenCFile(blob_filename.c_str(), "w+b"); + if (!m_blob_file) + { + Log_ErrorPrintf("Failed to open blob file '%s' for writing", blob_filename.c_str()); + std::fclose(m_index_file); + m_index_file = nullptr; + FileSystem::DeleteFile(index_filename.c_str()); + return false; + } + + return true; +} + +bool ShaderCache::ReadExistingShaderCache(const std::string& index_filename, const std::string& blob_filename) +{ + m_index_file = FileSystem::OpenCFile(index_filename.c_str(), "r+b"); + if (!m_index_file) + return false; + + u32 file_version; + if (std::fread(&file_version, sizeof(file_version), 1, m_index_file) != 1 || file_version != FILE_VERSION) + { + Log_ErrorPrintf("Bad file version in '%s'", index_filename.c_str()); + std::fclose(m_index_file); + m_index_file = nullptr; + return false; + } + + VK_PIPELINE_CACHE_HEADER header; + if (std::fread(&header, sizeof(header), 1, m_index_file) != 1 || !ValidatePipelineCacheHeader(header)) + { + Log_ErrorPrintf("Mismatched pipeline cache header in '%s' (GPU/driver changed?)", index_filename.c_str()); + std::fclose(m_index_file); + m_index_file = nullptr; + return false; + } + + m_blob_file = FileSystem::OpenCFile(blob_filename.c_str(), "a+b"); + if (!m_blob_file) + { + Log_ErrorPrintf("Blob file '%s' is missing", blob_filename.c_str()); + std::fclose(m_index_file); + m_index_file = nullptr; + return false; + } + + std::fseek(m_blob_file, 0, SEEK_END); + const u32 blob_file_size = static_cast(std::ftell(m_blob_file)); + + for (;;) + { + CacheIndexEntry entry; + if (std::fread(&entry, sizeof(entry), 1, m_index_file) != 1 || + (entry.file_offset + entry.blob_size) > blob_file_size) + { + if (std::feof(m_index_file)) + break; + + Log_ErrorPrintf("Failed to read entry from '%s', corrupt file?", index_filename.c_str()); + m_index.clear(); + std::fclose(m_blob_file); + m_blob_file = nullptr; + std::fclose(m_index_file); + m_index_file = nullptr; + return false; + } + + const CacheIndexKey key{entry.source_hash_low, entry.source_hash_high, entry.source_length, + static_cast(entry.shader_type)}; + const CacheIndexData data{entry.file_offset, entry.blob_size}; + m_index.emplace(key, data); + } + + // ensure we don't write before seeking + std::fseek(m_index_file, 0, SEEK_END); + + Log_InfoPrintf("Read %zu entries from '%s'", m_index.size(), index_filename.c_str()); + return true; +} + +void ShaderCache::CloseShaderCache() +{ + if (m_index_file) + { + std::fclose(m_index_file); + m_index_file = nullptr; + } + if (m_blob_file) + { + std::fclose(m_blob_file); + m_blob_file = nullptr; + } +} + +bool ShaderCache::CreateNewPipelineCache() +{ + if (FileSystem::FileExists(m_pipeline_cache_filename.c_str())) + { + Log_WarningPrintf("Removing existing pipeline cache '%s'", m_pipeline_cache_filename.c_str()); + FileSystem::DeleteFile(m_pipeline_cache_filename.c_str()); + } + + const VkPipelineCacheCreateInfo ci{VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO, nullptr, 0, 0, nullptr}; + VkResult res = vkCreatePipelineCache(g_vulkan_context->GetDevice(), &ci, nullptr, &m_pipeline_cache); + if (res != VK_SUCCESS) + { + LOG_VULKAN_ERROR(res, "vkCreatePipelineCache() failed: "); + return false; + } + + m_pipeline_cache_dirty = true; + return true; +} + +bool ShaderCache::ReadExistingPipelineCache() +{ + std::optional> data = FileSystem::ReadBinaryFile(m_pipeline_cache_filename.c_str()); + if (!data.has_value()) + return false; + + if (data->size() < sizeof(VK_PIPELINE_CACHE_HEADER)) + { + Log_ErrorPrintf("Pipeline cache at '%s' is too small", m_pipeline_cache_filename.c_str()); + return false; + } + + VK_PIPELINE_CACHE_HEADER header; + std::memcpy(&header, data->data(), sizeof(header)); + if (!ValidatePipelineCacheHeader(header)) + return false; + + const VkPipelineCacheCreateInfo ci{VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO, nullptr, 0, data->size(), + data->data()}; + VkResult res = vkCreatePipelineCache(g_vulkan_context->GetDevice(), &ci, nullptr, &m_pipeline_cache); + if (res != VK_SUCCESS) + { + LOG_VULKAN_ERROR(res, "vkCreatePipelineCache() failed: "); + return false; + } + + return true; +} + +bool ShaderCache::FlushPipelineCache() +{ + if (m_pipeline_cache == VK_NULL_HANDLE || !m_pipeline_cache_dirty) + return false; + + size_t data_size; + VkResult res = vkGetPipelineCacheData(g_vulkan_context->GetDevice(), m_pipeline_cache, &data_size, nullptr); + if (res != VK_SUCCESS) + { + LOG_VULKAN_ERROR(res, "vkGetPipelineCacheData() failed: "); + return false; + } + + std::vector data(data_size); + res = vkGetPipelineCacheData(g_vulkan_context->GetDevice(), m_pipeline_cache, &data_size, data.data()); + if (res != VK_SUCCESS) + { + LOG_VULKAN_ERROR(res, "vkGetPipelineCacheData() (2) failed: "); + return false; + } + + data.resize(data_size); + + // Save disk writes if it hasn't changed, think of the poor SSDs. + std::optional> existing_data = FileSystem::ReadBinaryFile(m_pipeline_cache_filename.c_str()); + if (!existing_data.has_value() || existing_data->size() != data_size || + std::memcmp(existing_data->data(), data.data(), data_size) != 0) + { + Log_InfoPrintf("Writing %zu bytes to '%s'", data_size, m_pipeline_cache_filename.c_str()); + if (!FileSystem::WriteBinaryFile(m_pipeline_cache_filename.c_str(), data.data(), data.size())) + { + Log_ErrorPrintf("Failed to write pipeline cache to '%s'", m_pipeline_cache_filename.c_str()); + return false; + } + } + else + { + Log_WarningPrintf("Skipping updating pipeline cache '%s' due to no changes.", m_pipeline_cache_filename.c_str()); + } + + m_pipeline_cache_dirty = false; + return true; +} + +void ShaderCache::ClosePipelineCache() +{ + if (m_pipeline_cache == VK_NULL_HANDLE) + return; + + vkDestroyPipelineCache(g_vulkan_context->GetDevice(), m_pipeline_cache, nullptr); + m_pipeline_cache = VK_NULL_HANDLE; +} + +std::string ShaderCache::GetShaderCacheBaseFileName(const std::string_view& base_path, bool debug) +{ + std::string base_filename(base_path); + base_filename += FS_OSPATH_SEPERATOR_CHARACTER; + base_filename += "vulkan_shaders"; + + if (debug) + base_filename += "_debug"; + + return base_filename; +} + +std::string ShaderCache::GetPipelineCacheBaseFileName(const std::string_view& base_path, bool debug) +{ + std::string base_filename(base_path); + base_filename += FS_OSPATH_SEPERATOR_CHARACTER; + base_filename += "vulkan_pipelines"; + + if (debug) + base_filename += "_debug"; + + base_filename += ".bin"; + return base_filename; +} + +ShaderCache::CacheIndexKey ShaderCache::GetCacheKey(ShaderCompiler::Type type, const std::string_view& shader_code) +{ + union + { + struct + { + u64 hash_low; + u64 hash_high; + }; + u8 hash[16]; + }; + + MD5Digest digest; + digest.Update(shader_code.data(), static_cast(shader_code.length())); + digest.Final(hash); + + return CacheIndexKey{hash_low, hash_high, static_cast(shader_code.length()), type}; +} + +std::optional ShaderCache::GetShaderSPV(ShaderCompiler::Type type, + std::string_view shader_code) +{ + const auto key = GetCacheKey(type, shader_code); + auto iter = m_index.find(key); + if (iter == m_index.end()) + return CompileAndAddShaderSPV(key, shader_code); + + SPIRVCodeVector spv(iter->second.blob_size); + if (std::fseek(m_blob_file, iter->second.file_offset, SEEK_SET) != 0 || + std::fread(spv.data(), sizeof(SPIRVCodeType), iter->second.blob_size, m_blob_file) != iter->second.blob_size) + { + Log_ErrorPrintf("Read blob from file failed, recompiling"); + return ShaderCompiler::CompileShader(type, shader_code, m_debug); + } + + return spv; +} + +VkShaderModule ShaderCache::GetShaderModule(ShaderCompiler::Type type, std::string_view shader_code) +{ + std::optional spv = GetShaderSPV(type, shader_code); + if (!spv.has_value()) + return VK_NULL_HANDLE; + + const VkShaderModuleCreateInfo ci{VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO, nullptr, 0, + spv->size() * sizeof(SPIRVCodeType), spv->data()}; + + VkShaderModule mod; + VkResult res = vkCreateShaderModule(g_vulkan_context->GetDevice(), &ci, nullptr, &mod); + if (res != VK_SUCCESS) + { + LOG_VULKAN_ERROR(res, "vkCreateShaderModule() failed: "); + return VK_NULL_HANDLE; + } + + return mod; +} + +VkShaderModule ShaderCache::GetVertexShader(std::string_view shader_code) +{ + return GetShaderModule(ShaderCompiler::Type::Vertex, std::move(shader_code)); +} + +VkShaderModule ShaderCache::GetGeometryShader(std::string_view shader_code) +{ + return GetShaderModule(ShaderCompiler::Type::Geometry, std::move(shader_code)); +} + +VkShaderModule ShaderCache::GetFragmentShader(std::string_view shader_code) +{ + return GetShaderModule(ShaderCompiler::Type::Fragment, std::move(shader_code)); +} + +VkShaderModule ShaderCache::GetComputeShader(std::string_view shader_code) +{ + return GetShaderModule(ShaderCompiler::Type::Compute, std::move(shader_code)); +} + +std::optional ShaderCache::CompileAndAddShaderSPV(const CacheIndexKey& key, + std::string_view shader_code) +{ + std::optional spv = ShaderCompiler::CompileShader(key.shader_type, shader_code, m_debug); + if (!spv.has_value()) + return {}; + + if (!m_blob_file || std::fseek(m_blob_file, 0, SEEK_END) != 0) + return spv; + + CacheIndexData data; + data.file_offset = static_cast(std::ftell(m_blob_file)); + data.blob_size = static_cast(spv->size()); + + CacheIndexEntry entry = {}; + entry.source_hash_low = key.source_hash_low; + entry.source_hash_high = key.source_hash_high; + entry.source_length = key.source_length; + entry.shader_type = static_cast(key.shader_type); + entry.blob_size = data.blob_size; + entry.file_offset = data.file_offset; + + if (std::fwrite(spv->data(), sizeof(SPIRVCodeType), entry.blob_size, m_blob_file) != entry.blob_size || + std::fflush(m_blob_file) != 0 || std::fwrite(&entry, sizeof(entry), 1, m_index_file) != 1 || + std::fflush(m_index_file) != 0) + { + Log_ErrorPrintf("Failed to write shader blob to file"); + return spv; + } + + m_index.emplace(key, data); + return spv; +} + +} // namespace Vulkan \ No newline at end of file diff --git a/src/common/vulkan/shader_cache.h b/src/common/vulkan/shader_cache.h new file mode 100644 index 000000000..34db8a12a --- /dev/null +++ b/src/common/vulkan/shader_cache.h @@ -0,0 +1,100 @@ +#pragma once +#include "../hash_combine.h" +#include "../types.h" +#include "shader_compiler.h" +#include "vulkan_loader.h" +#include +#include +#include +#include +#include + +namespace Vulkan { + +class ShaderCache +{ +public: + ~ShaderCache(); + + static void Create(std::string_view base_path, bool debug); + static void Destroy(); + + /// Returns a handle to the pipeline cache. Set set_dirty to true if you are planning on writing to it externally. + VkPipelineCache GetPipelineCache(bool set_dirty = true); + + /// Writes pipeline cache to file, saving all newly compiled pipelines. + bool FlushPipelineCache(); + + std::optional GetShaderSPV(ShaderCompiler::Type type, std::string_view shader_code); + VkShaderModule GetShaderModule(ShaderCompiler::Type type, std::string_view shader_code); + + VkShaderModule GetVertexShader(std::string_view shader_code); + VkShaderModule GetGeometryShader(std::string_view shader_code); + VkShaderModule GetFragmentShader(std::string_view shader_code); + VkShaderModule GetComputeShader(std::string_view shader_code); + +private: + static constexpr u32 FILE_VERSION = 1; + + struct CacheIndexKey + { + u64 source_hash_low; + u64 source_hash_high; + u32 source_length; + ShaderCompiler::Type shader_type; + + bool operator==(const CacheIndexKey& key) const; + bool operator!=(const CacheIndexKey& key) const; + }; + + struct CacheIndexEntryHasher + { + std::size_t operator()(const CacheIndexKey& e) const noexcept + { + std::size_t h = 0; + hash_combine(h, e.source_hash_low, e.source_hash_high, e.source_length, e.shader_type); + return h; + } + }; + + struct CacheIndexData + { + u32 file_offset; + u32 blob_size; + }; + + using CacheIndex = std::unordered_map; + + ShaderCache(); + + static std::string GetShaderCacheBaseFileName(const std::string_view& base_path, bool debug); + static std::string GetPipelineCacheBaseFileName(const std::string_view& base_path, bool debug); + static CacheIndexKey GetCacheKey(ShaderCompiler::Type type, const std::string_view& shader_code); + + void Open(std::string_view base_path, bool debug); + + bool CreateNewShaderCache(const std::string& index_filename, const std::string& blob_filename); + bool ReadExistingShaderCache(const std::string& index_filename, const std::string& blob_filename); + void CloseShaderCache(); + + bool CreateNewPipelineCache(); + bool ReadExistingPipelineCache(); + void ClosePipelineCache(); + + std::optional CompileAndAddShaderSPV(const CacheIndexKey& key, + std::string_view shader_code); + + std::FILE* m_index_file = nullptr; + std::FILE* m_blob_file = nullptr; + std::string m_pipeline_cache_filename; + + CacheIndex m_index; + + VkPipelineCache m_pipeline_cache = VK_NULL_HANDLE; + bool m_debug = false; + bool m_pipeline_cache_dirty = false; +}; + +} // namespace Vulkan + +extern std::unique_ptr g_vulkan_shader_cache; diff --git a/src/common/vulkan/shader_compiler.cpp b/src/common/vulkan/shader_compiler.cpp new file mode 100644 index 000000000..bde1b32eb --- /dev/null +++ b/src/common/vulkan/shader_compiler.cpp @@ -0,0 +1,173 @@ +// Copyright 2016 Dolphin Emulator Project +// Copyright 2020 DuckStation Emulator Project +// Licensed under GPLv2+ +// Refer to the LICENSE file included. + +#include "shader_compiler.h" +#include "../assert.h" +#include "../log.h" +#include "../string_util.h" +#include "util.h" +#include +#include +#include +Log_SetChannel(Vulkan::ShaderCompiler); + +// glslang includes +#include "SPIRV/GlslangToSpv.h" +#include "StandAlone/ResourceLimits.h" +#include "glslang/Public/ShaderLang.h" + +namespace Vulkan::ShaderCompiler { +// Registers itself for cleanup via atexit +bool InitializeGlslang(); + +static unsigned s_next_bad_shader_id = 1; + +static std::optional CompileShaderToSPV(EShLanguage stage, const char* stage_filename, + std::string_view source) +{ + if (!InitializeGlslang()) + return std::nullopt; + + std::unique_ptr shader = std::make_unique(stage); + std::unique_ptr program; + glslang::TShader::ForbidIncluder includer; + EProfile profile = ECoreProfile; + EShMessages messages = static_cast(EShMsgDefault | EShMsgSpvRules | EShMsgVulkanRules); + int default_version = 450; + + std::string full_source_code; + const char* pass_source_code = source.data(); + int pass_source_code_length = static_cast(source.size()); + shader->setStringsWithLengths(&pass_source_code, &pass_source_code_length, 1); + + auto DumpBadShader = [&](const char* msg) { + std::string filename = StringUtil::StdStringFromFormat("bad_shader_%u.txt", s_next_bad_shader_id++); + Log::Writef("Vulkan", "CompileShaderToSPV", LOGLEVEL_ERROR, "%s, writing to %s", msg, filename.c_str()); + + std::ofstream ofs(filename.c_str(), std::ofstream::out | std::ofstream::binary); + if (ofs.is_open()) + { + ofs << source; + ofs << "\n"; + + ofs << msg << std::endl; + ofs << "Shader Info Log:" << std::endl; + ofs << shader->getInfoLog() << std::endl; + ofs << shader->getInfoDebugLog() << std::endl; + if (program) + { + ofs << "Program Info Log:" << std::endl; + ofs << program->getInfoLog() << std::endl; + ofs << program->getInfoDebugLog() << std::endl; + } + + ofs.close(); + } + }; + + if (!shader->parse(&glslang::DefaultTBuiltInResource, default_version, profile, false, true, messages, includer)) + { + DumpBadShader("Failed to parse shader"); + return std::nullopt; + } + + // Even though there's only a single shader, we still need to link it to generate SPV + program = std::make_unique(); + program->addShader(shader.get()); + if (!program->link(messages)) + { + DumpBadShader("Failed to link program"); + return std::nullopt; + } + + glslang::TIntermediate* intermediate = program->getIntermediate(stage); + if (!intermediate) + { + DumpBadShader("Failed to generate SPIR-V"); + return std::nullopt; + } + + SPIRVCodeVector out_code; + spv::SpvBuildLogger logger; + glslang::GlslangToSpv(*intermediate, out_code, &logger); + + // Write out messages + // Temporary: skip if it contains "Warning, version 450 is not yet complete; most version-specific + // features are present, but some are missing." + if (std::strlen(shader->getInfoLog()) > 108) + Log_WarningPrintf("Shader info log: %s", shader->getInfoLog()); + if (std::strlen(shader->getInfoDebugLog()) > 0) + Log_WarningPrintf("Shader debug info log: %s", shader->getInfoDebugLog()); + if (std::strlen(program->getInfoLog()) > 25) + Log_WarningPrintf("Program info log: %s", program->getInfoLog()); + if (std::strlen(program->getInfoDebugLog()) > 0) + Log_WarningPrintf("Program debug info log: %s", program->getInfoDebugLog()); + std::string spv_messages = logger.getAllMessages(); + if (!spv_messages.empty()) + Log_WarningPrintf("SPIR-V conversion messages: %s", spv_messages.c_str()); + + return out_code; +} + +bool InitializeGlslang() +{ + static bool glslang_initialized = false; + if (glslang_initialized) + return true; + + if (!glslang::InitializeProcess()) + { + Panic("Failed to initialize glslang shader compiler"); + return false; + } + + std::atexit([]() { glslang::FinalizeProcess(); }); + + glslang_initialized = true; + return true; +} + +std::optional CompileVertexShader(std::string_view source_code) +{ + return CompileShaderToSPV(EShLangVertex, "vs", source_code); +} + +std::optional CompileGeometryShader(std::string_view source_code) +{ + return CompileShaderToSPV(EShLangGeometry, "gs", source_code); +} + +std::optional CompileFragmentShader(std::string_view source_code) +{ + return CompileShaderToSPV(EShLangFragment, "ps", source_code); +} + +std::optional CompileComputeShader(std::string_view source_code) +{ + return CompileShaderToSPV(EShLangCompute, "cs", source_code); +} + +std::optional CompileShader(Type type, std::string_view source_code, bool debug) +{ + switch (type) + { + case Type::Vertex: + return CompileShaderToSPV(EShLangVertex, "vs", source_code); + + case Type::Geometry: + return CompileShaderToSPV(EShLangGeometry, "gs", source_code); + + case Type::Fragment: + return CompileShaderToSPV(EShLangFragment, "ps", source_code); + + case Type::Compute: + return CompileShaderToSPV(EShLangCompute, "cs", source_code); + + default: + return std::nullopt; + } +} + +} // namespace Vulkan::ShaderCompiler diff --git a/src/common/vulkan/shader_compiler.h b/src/common/vulkan/shader_compiler.h new file mode 100644 index 000000000..c39a06d1b --- /dev/null +++ b/src/common/vulkan/shader_compiler.h @@ -0,0 +1,42 @@ +// Copyright 2016 Dolphin Emulator Project +// Copyright 2020 DuckStation Emulator Project +// Licensed under GPLv2+ +// Refer to the LICENSE file included. + +#pragma once + +#include "../types.h" +#include +#include +#include + +namespace Vulkan::ShaderCompiler { + +// Shader types +enum class Type +{ + Vertex, + Geometry, + Fragment, + Compute +}; + +// SPIR-V compiled code type +using SPIRVCodeType = u32; +using SPIRVCodeVector = std::vector; + +// Compile a vertex shader to SPIR-V. +std::optional CompileVertexShader(std::string_view source_code); + +// Compile a geometry shader to SPIR-V. +std::optional CompileGeometryShader(std::string_view source_code); + +// Compile a fragment shader to SPIR-V. +std::optional CompileFragmentShader(std::string_view source_code); + +// Compile a compute shader to SPIR-V. +std::optional CompileComputeShader(std::string_view source_code); + +std::optional CompileShader(Type type, std::string_view source_code, bool debug); + +} // namespace Vulkan::ShaderCompiler diff --git a/src/common/vulkan/staging_buffer.cpp b/src/common/vulkan/staging_buffer.cpp new file mode 100644 index 000000000..7272b794d --- /dev/null +++ b/src/common/vulkan/staging_buffer.cpp @@ -0,0 +1,253 @@ +// Copyright 2016 Dolphin Emulator Project +// Copyright 2020 DuckStation Emulator Project +// Licensed under GPLv2+ +// Refer to the LICENSE file included. + +#include "staging_buffer.h" +#include "../assert.h" +#include "context.h" +#include "util.h" + +namespace Vulkan { +StagingBuffer::StagingBuffer() = default; + +StagingBuffer::StagingBuffer(StagingBuffer&& move) + : m_type(move.m_type), m_buffer(move.m_buffer), m_memory(move.m_memory), m_size(move.m_size), + m_coherent(move.m_coherent), m_map_pointer(move.m_map_pointer), m_map_offset(move.m_map_offset), + m_map_size(move.m_map_size) +{ + move.m_type = Type::Upload; + move.m_buffer = VK_NULL_HANDLE; + move.m_memory = VK_NULL_HANDLE; + move.m_size = 0; + move.m_coherent = false; + move.m_map_pointer = nullptr; + move.m_map_offset = 0; + move.m_map_size = 0; +} + +StagingBuffer::~StagingBuffer() +{ + if (IsValid()) + Destroy(true); +} + +StagingBuffer& StagingBuffer::operator=(StagingBuffer&& move) +{ + if (IsValid()) + Destroy(true); + + std::swap(m_type, move.m_type); + std::swap(m_buffer, move.m_buffer); + std::swap(m_memory, move.m_memory); + std::swap(m_size, move.m_size); + std::swap(m_coherent, move.m_coherent); + std::swap(m_map_pointer, move.m_map_pointer); + std::swap(m_map_offset, move.m_map_offset); + std::swap(m_map_size, move.m_map_size); + return *this; +} + +bool StagingBuffer::Map(VkDeviceSize offset, VkDeviceSize size) +{ + m_map_offset = offset; + if (size == VK_WHOLE_SIZE) + m_map_size = m_size - offset; + else + m_map_size = size; + + Assert(!m_map_pointer); + Assert(m_map_offset + m_map_size <= m_size); + + void* map_pointer; + VkResult res = vkMapMemory(g_vulkan_context->GetDevice(), m_memory, m_map_offset, m_map_size, 0, &map_pointer); + if (res != VK_SUCCESS) + { + LOG_VULKAN_ERROR(res, "vkMapMemory failed: "); + return false; + } + + m_map_pointer = reinterpret_cast(map_pointer); + return true; +} + +void StagingBuffer::Unmap() +{ + Assert(m_map_pointer); + + vkUnmapMemory(g_vulkan_context->GetDevice(), m_memory); + m_map_pointer = nullptr; + m_map_offset = 0; + m_map_size = 0; +} + +void StagingBuffer::FlushCPUCache(VkDeviceSize offset, VkDeviceSize size) +{ + Assert(offset >= m_map_offset); + if (m_coherent) + return; + + VkMappedMemoryRange range = {VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE, nullptr, m_memory, offset - m_map_offset, size}; + vkFlushMappedMemoryRanges(g_vulkan_context->GetDevice(), 1, &range); +} + +void StagingBuffer::InvalidateGPUCache(VkCommandBuffer command_buffer, VkAccessFlagBits dest_access_flags, + VkPipelineStageFlagBits dest_pipeline_stage, VkDeviceSize offset, + VkDeviceSize size) +{ + if (m_coherent) + return; + + Assert((offset + size) <= m_size || (offset < m_size && size == VK_WHOLE_SIZE)); + Util::BufferMemoryBarrier(command_buffer, m_buffer, VK_ACCESS_HOST_WRITE_BIT, dest_access_flags, offset, size, + VK_PIPELINE_STAGE_HOST_BIT, dest_pipeline_stage); +} + +void StagingBuffer::PrepareForGPUWrite(VkCommandBuffer command_buffer, VkAccessFlagBits dst_access_flags, + VkPipelineStageFlagBits dst_pipeline_stage, VkDeviceSize offset, + VkDeviceSize size) +{ + if (m_coherent) + return; + + Assert((offset + size) <= m_size || (offset < m_size && size == VK_WHOLE_SIZE)); + Util::BufferMemoryBarrier(command_buffer, m_buffer, 0, dst_access_flags, offset, size, + VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, dst_pipeline_stage); +} + +void StagingBuffer::FlushGPUCache(VkCommandBuffer command_buffer, VkAccessFlagBits src_access_flags, + VkPipelineStageFlagBits src_pipeline_stage, VkDeviceSize offset, VkDeviceSize size) +{ + if (m_coherent) + return; + + Assert((offset + size) <= m_size || (offset < m_size && size == VK_WHOLE_SIZE)); + Util::BufferMemoryBarrier(command_buffer, m_buffer, src_access_flags, VK_ACCESS_HOST_READ_BIT, offset, size, + src_pipeline_stage, VK_PIPELINE_STAGE_HOST_BIT); +} + +void StagingBuffer::InvalidateCPUCache(VkDeviceSize offset, VkDeviceSize size) +{ + Assert(offset >= m_map_offset); + if (m_coherent) + return; + + VkMappedMemoryRange range = {VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE, nullptr, m_memory, offset - m_map_offset, size}; + vkInvalidateMappedMemoryRanges(g_vulkan_context->GetDevice(), 1, &range); +} + +void StagingBuffer::Read(VkDeviceSize offset, void* data, size_t size, bool invalidate_caches) +{ + Assert((offset + size) <= m_size); + Assert(offset >= m_map_offset && size <= (m_map_size + (offset - m_map_offset))); + if (invalidate_caches) + InvalidateCPUCache(offset, size); + + memcpy(data, m_map_pointer + (offset - m_map_offset), size); +} + +void StagingBuffer::Write(VkDeviceSize offset, const void* data, size_t size, bool invalidate_caches) +{ + Assert((offset + size) <= m_size); + Assert(offset >= m_map_offset && size <= (m_map_size + (offset - m_map_offset))); + + memcpy(m_map_pointer + (offset - m_map_offset), data, size); + if (invalidate_caches) + FlushCPUCache(offset, size); +} + +bool StagingBuffer::AllocateBuffer(Type type, VkDeviceSize size, VkBufferUsageFlags usage, VkBuffer* out_buffer, + VkDeviceMemory* out_memory, bool* out_coherent) +{ + VkBufferCreateInfo buffer_create_info = { + VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, // VkStructureType sType + nullptr, // const void* pNext + 0, // VkBufferCreateFlags flags + size, // VkDeviceSize size + usage, // VkBufferUsageFlags usage + VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode sharingMode + 0, // uint32_t queueFamilyIndexCount + nullptr // const uint32_t* pQueueFamilyIndices + }; + VkResult res = vkCreateBuffer(g_vulkan_context->GetDevice(), &buffer_create_info, nullptr, out_buffer); + if (res != VK_SUCCESS) + { + LOG_VULKAN_ERROR(res, "vkCreateBuffer failed: "); + return false; + } + + VkMemoryRequirements requirements; + vkGetBufferMemoryRequirements(g_vulkan_context->GetDevice(), *out_buffer, &requirements); + + u32 type_index; + if (type == Type::Upload) + type_index = g_vulkan_context->GetUploadMemoryType(requirements.memoryTypeBits, out_coherent); + else + type_index = g_vulkan_context->GetReadbackMemoryType(requirements.memoryTypeBits, out_coherent); + + VkMemoryAllocateInfo memory_allocate_info = { + VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO, // VkStructureType sType + nullptr, // const void* pNext + requirements.size, // VkDeviceSize allocationSize + type_index // uint32_t memoryTypeIndex + }; + res = vkAllocateMemory(g_vulkan_context->GetDevice(), &memory_allocate_info, nullptr, out_memory); + if (res != VK_SUCCESS) + { + LOG_VULKAN_ERROR(res, "vkAllocateMemory failed: "); + vkDestroyBuffer(g_vulkan_context->GetDevice(), *out_buffer, nullptr); + return false; + } + + res = vkBindBufferMemory(g_vulkan_context->GetDevice(), *out_buffer, *out_memory, 0); + if (res != VK_SUCCESS) + { + LOG_VULKAN_ERROR(res, "vkBindBufferMemory failed: "); + vkDestroyBuffer(g_vulkan_context->GetDevice(), *out_buffer, nullptr); + vkFreeMemory(g_vulkan_context->GetDevice(), *out_memory, nullptr); + return false; + } + + return true; +} + +bool StagingBuffer::Create(Type type, VkDeviceSize size, VkBufferUsageFlags usage) +{ + if (!AllocateBuffer(type, size, usage, &m_buffer, &m_memory, &m_coherent)) + return false; + + m_type = type; + m_size = size; + return true; +} + +void StagingBuffer::Destroy(bool defer /* = true */) +{ + if (!IsValid()) + return; + + // Unmap before destroying + if (m_map_pointer) + Unmap(); + + if (defer) + g_vulkan_context->DeferBufferDestruction(m_buffer); + else + vkDestroyBuffer(g_vulkan_context->GetDevice(), m_buffer, nullptr); + + if (defer) + g_vulkan_context->DeferDeviceMemoryDestruction(m_memory); + else + vkFreeMemory(g_vulkan_context->GetDevice(), m_memory, nullptr); + + m_type = Type::Upload; + m_buffer = VK_NULL_HANDLE; + m_memory = VK_NULL_HANDLE; + m_size = 0; + m_coherent = false; + m_map_pointer = nullptr; + m_map_offset = 0; + m_map_size = 0; +} + +} // namespace Vulkan diff --git a/src/common/vulkan/staging_buffer.h b/src/common/vulkan/staging_buffer.h new file mode 100644 index 000000000..2d263acd7 --- /dev/null +++ b/src/common/vulkan/staging_buffer.h @@ -0,0 +1,90 @@ +// Copyright 2016 Dolphin Emulator Project +// Copyright 2020 DuckStation Emulator Project +// Licensed under GPLv2+ +// Refer to the LICENSE file included. + +#pragma once +#include "../types.h" +#include "vulkan_loader.h" +#include + +namespace Vulkan { + +class StagingBuffer +{ +public: + enum class Type + { + Upload, + Readback, + Mutable + }; + + StagingBuffer(); + StagingBuffer(StagingBuffer&& move); + StagingBuffer(const StagingBuffer&) = delete; + virtual ~StagingBuffer(); + + StagingBuffer& operator=(StagingBuffer&& move); + StagingBuffer& operator=(const StagingBuffer&) = delete; + + ALWAYS_INLINE Type GetType() const { return m_type; } + ALWAYS_INLINE VkDeviceSize GetSize() const { return m_size; } + ALWAYS_INLINE VkBuffer GetBuffer() const { return m_buffer; } + ALWAYS_INLINE bool IsMapped() const { return m_map_pointer != nullptr; } + ALWAYS_INLINE const char* GetMapPointer() const { return m_map_pointer; } + ALWAYS_INLINE char* GetMapPointer() { return m_map_pointer; } + ALWAYS_INLINE VkDeviceSize GetMapOffset() const { return m_map_offset; } + ALWAYS_INLINE VkDeviceSize GetMapSize() const { return m_map_size; } + ALWAYS_INLINE bool IsValid() const { return (m_buffer != VK_NULL_HANDLE); } + + bool Map(VkDeviceSize offset = 0, VkDeviceSize size = VK_WHOLE_SIZE); + void Unmap(); + + // Upload part 1: Prepare from device read from the CPU side + void FlushCPUCache(VkDeviceSize offset = 0, VkDeviceSize size = VK_WHOLE_SIZE); + + // Upload part 2: Prepare for device read from the GPU side + // Implicit when submitting the command buffer, so rarely needed. + void InvalidateGPUCache(VkCommandBuffer command_buffer, VkAccessFlagBits dst_access_flags, + VkPipelineStageFlagBits dst_pipeline_stage, VkDeviceSize offset = 0, + VkDeviceSize size = VK_WHOLE_SIZE); + + // Readback part 0: Prepare for GPU usage (if necessary) + void PrepareForGPUWrite(VkCommandBuffer command_buffer, VkAccessFlagBits dst_access_flags, + VkPipelineStageFlagBits dst_pipeline_stage, VkDeviceSize offset = 0, + VkDeviceSize size = VK_WHOLE_SIZE); + + // Readback part 1: Prepare for host readback from the GPU side + void FlushGPUCache(VkCommandBuffer command_buffer, VkAccessFlagBits src_access_flags, + VkPipelineStageFlagBits src_pipeline_stage, VkDeviceSize offset = 0, + VkDeviceSize size = VK_WHOLE_SIZE); + + // Readback part 2: Prepare for host readback from the CPU side + void InvalidateCPUCache(VkDeviceSize offset = 0, VkDeviceSize size = VK_WHOLE_SIZE); + + // offset is from the start of the buffer, not from the map offset + void Read(VkDeviceSize offset, void* data, size_t size, bool invalidate_caches = true); + void Write(VkDeviceSize offset, const void* data, size_t size, bool invalidate_caches = true); + + // Creates the optimal format of image copy. + bool Create(Type type, VkDeviceSize size, VkBufferUsageFlags usage); + + void Destroy(bool defer = true); + + // Allocates the resources needed to create a staging buffer. + static bool AllocateBuffer(Type type, VkDeviceSize size, VkBufferUsageFlags usage, VkBuffer* out_buffer, + VkDeviceMemory* out_memory, bool* out_coherent); + +protected: + Type m_type = Type::Upload; + VkBuffer m_buffer = VK_NULL_HANDLE; + VkDeviceMemory m_memory = VK_NULL_HANDLE; + VkDeviceSize m_size = 0; + bool m_coherent = false; + + char* m_map_pointer = nullptr; + VkDeviceSize m_map_offset = 0; + VkDeviceSize m_map_size = 0; +}; +} // namespace Vulkan diff --git a/src/common/vulkan/staging_texture.cpp b/src/common/vulkan/staging_texture.cpp new file mode 100644 index 000000000..0403a1164 --- /dev/null +++ b/src/common/vulkan/staging_texture.cpp @@ -0,0 +1,301 @@ +// Copyright 2016 Dolphin Emulator Project +// Copyright 2020 DuckStation Emulator Project +// Licensed under GPLv2+ +// Refer to the LICENSE file included. + +#include "staging_texture.h" +#include "../assert.h" +#include "context.h" +#include "util.h" + +namespace Vulkan { + +StagingTexture::StagingTexture() = default; + +StagingTexture::StagingTexture(StagingTexture&& move) + : m_staging_buffer(std::move(move.m_staging_buffer)), m_flush_fence_counter(move.m_flush_fence_counter), + m_width(move.m_width), m_height(move.m_height), m_texel_size(move.m_texel_size), m_map_stride(move.m_map_stride) +{ + move.m_flush_fence_counter = 0; + move.m_width = 0; + move.m_height = 0; + move.m_texel_size = 0; + move.m_map_stride = 0; +} + +StagingTexture& StagingTexture::operator=(StagingTexture&& move) +{ + if (IsValid()) + Destroy(true); + + std::swap(m_staging_buffer, move.m_staging_buffer); + std::swap(m_flush_fence_counter, move.m_flush_fence_counter); + std::swap(m_width, move.m_width); + std::swap(m_height, move.m_height); + std::swap(m_texel_size, move.m_texel_size); + std::swap(m_map_stride, move.m_map_stride); + return *this; +} + +StagingTexture::~StagingTexture() +{ + if (IsValid()) + Destroy(true); +} + +bool StagingTexture::Create(StagingBuffer::Type type, VkFormat format, u32 width, u32 height) +{ + const u32 texel_size = Util::GetTexelSize(format); + const u32 map_stride = texel_size * width; + const u32 buffer_size = map_stride * height; + + VkBufferUsageFlags usage_flags; + switch (type) + { + case StagingBuffer::Type::Readback: + usage_flags = VK_BUFFER_USAGE_TRANSFER_DST_BIT; + break; + case StagingBuffer::Type::Upload: + usage_flags = VK_BUFFER_USAGE_TRANSFER_SRC_BIT; + break; + case StagingBuffer::Type::Mutable: + default: + usage_flags = VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT; + break; + } + + StagingBuffer new_buffer; + if (!new_buffer.Create(type, buffer_size, usage_flags) || !new_buffer.Map()) + return false; + + if (IsValid()) + Destroy(true); + + m_staging_buffer = std::move(new_buffer); + m_width = width; + m_height = height; + m_texel_size = texel_size; + m_map_stride = map_stride; + return true; +} + +void StagingTexture::Destroy(bool defer /* = true */) +{ + if (!IsValid()) + return; + + m_staging_buffer.Destroy(defer); + m_flush_fence_counter = 0; + m_width = 0; + m_height = 0; + m_texel_size = 0; + m_map_stride = 0; +} + +void StagingTexture::CopyFromTexture(VkCommandBuffer command_buffer, Texture& src_texture, u32 src_x, u32 src_y, + u32 src_layer, u32 src_level, u32 dst_x, u32 dst_y, u32 width, u32 height) +{ + Assert(m_staging_buffer.GetType() == StagingBuffer::Type::Readback || + m_staging_buffer.GetType() == StagingBuffer::Type::Mutable); + Assert((src_x + width) <= src_texture.GetWidth() && (src_y + height) <= src_texture.GetHeight()); + Assert((dst_x + width) <= m_width && (dst_y + height) <= m_height); + + VkImageLayout old_layout = src_texture.GetLayout(); + src_texture.TransitionToLayout(command_buffer, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL); + + // Issue the image->buffer copy, but delay it for now. + VkBufferImageCopy image_copy = {}; + const VkImageAspectFlags aspect = + Util ::IsDepthFormat(src_texture.GetFormat()) ? VK_IMAGE_ASPECT_DEPTH_BIT : VK_IMAGE_ASPECT_COLOR_BIT; + image_copy.bufferOffset = static_cast(dst_y * m_map_stride + dst_x * m_texel_size); + image_copy.bufferRowLength = m_width; + image_copy.bufferImageHeight = 0; + image_copy.imageSubresource = {aspect, src_level, src_layer, 1}; + image_copy.imageOffset = {static_cast(src_x), static_cast(src_y), 0}; + image_copy.imageExtent = {width, height, 1u}; + vkCmdCopyImageToBuffer(command_buffer, src_texture.GetImage(), VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, + m_staging_buffer.GetBuffer(), 1, &image_copy); + + // Restore old source texture layout. + src_texture.TransitionToLayout(command_buffer, old_layout); +} + +void StagingTexture::CopyFromTexture(Texture& src_texture, u32 src_x, u32 src_y, u32 src_layer, u32 src_level, + u32 dst_x, u32 dst_y, u32 width, u32 height) +{ + CopyFromTexture(g_vulkan_context->GetCurrentCommandBuffer(), src_texture, src_x, src_y, src_layer, src_level, dst_x, + dst_y, width, height); + + m_needs_flush = true; + m_flush_fence_counter = g_vulkan_context->GetCurrentFenceCounter(); +} + +void StagingTexture::CopyToTexture(VkCommandBuffer command_buffer, u32 src_x, u32 src_y, Texture& dst_texture, + u32 dst_x, u32 dst_y, u32 dst_layer, u32 dst_level, u32 width, u32 height) +{ + Assert(m_staging_buffer.GetType() == StagingBuffer::Type::Upload || + m_staging_buffer.GetType() == StagingBuffer::Type::Mutable); + Assert((dst_x + width) <= dst_texture.GetWidth() && (dst_y + height) <= dst_texture.GetHeight()); + Assert((src_x + width) <= m_width && (src_y + height) <= m_height); + + // Flush caches before copying. + m_staging_buffer.FlushCPUCache(); + + VkImageLayout old_layout = dst_texture.GetLayout(); + dst_texture.TransitionToLayout(command_buffer, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL); + + // Issue the image->buffer copy, but delay it for now. + VkBufferImageCopy image_copy = {}; + image_copy.bufferOffset = static_cast(src_y * m_map_stride + src_x * m_texel_size); + image_copy.bufferRowLength = m_width; + image_copy.bufferImageHeight = 0; + image_copy.imageSubresource = {VK_IMAGE_ASPECT_COLOR_BIT, dst_level, dst_layer, 1}; + image_copy.imageOffset = {static_cast(dst_x), static_cast(dst_y), 0}; + image_copy.imageExtent = {width, height, 1u}; + vkCmdCopyBufferToImage(command_buffer, m_staging_buffer.GetBuffer(), dst_texture.GetImage(), + VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &image_copy); + + // Restore old source texture layout. + dst_texture.TransitionToLayout(command_buffer, old_layout); +} + +void StagingTexture::CopyToTexture(u32 src_x, u32 src_y, Texture& dst_texture, u32 dst_x, u32 dst_y, u32 dst_layer, + u32 dst_level, u32 width, u32 height) +{ + CopyToTexture(g_vulkan_context->GetCurrentCommandBuffer(), src_x, src_y, dst_texture, dst_x, dst_y, dst_layer, + dst_level, width, height); + + m_needs_flush = true; + m_flush_fence_counter = g_vulkan_context->GetCurrentFenceCounter(); +} + +bool StagingTexture::Map() +{ + return m_staging_buffer.Map(); +} + +void StagingTexture::Unmap() +{ + return m_staging_buffer.Unmap(); +} + +void StagingTexture::Flush() +{ + if (!m_needs_flush) + return; + + // Is this copy in the current command buffer? + if (g_vulkan_context->GetCurrentFenceCounter() == m_flush_fence_counter) + { + // Execute the command buffer and wait for it to finish. + g_vulkan_context->ExecuteCommandBuffer(true); + } + else + { + // Wait for the GPU to finish with it. + g_vulkan_context->WaitForFenceCounter(m_flush_fence_counter); + } + + // For readback textures, invalidate the CPU cache as there is new data there. + if (m_staging_buffer.GetType() == StagingBuffer::Type::Readback || + m_staging_buffer.GetType() == StagingBuffer::Type::Mutable) + { + m_staging_buffer.InvalidateCPUCache(); + } + + m_needs_flush = false; +} + +void StagingTexture::ReadTexels(u32 src_x, u32 src_y, u32 width, u32 height, void* out_ptr, u32 out_stride) +{ + Assert(m_staging_buffer.GetType() != StagingBuffer::Type::Upload); + if (!PrepareForAccess()) + return; + + Assert((src_x + width) <= m_width && (src_y + height) <= m_height); + + // Offset pointer to point to start of region being copied out. + const char* current_ptr = m_staging_buffer.GetMapPointer(); + current_ptr += src_y * m_map_stride; + current_ptr += src_x * m_texel_size; + + // Optimal path: same dimensions, same stride. + if (src_x == 0 && width == m_width && m_map_stride == out_stride) + { + std::memcpy(out_ptr, current_ptr, m_map_stride * height); + return; + } + + size_t copy_size = std::min(width * m_texel_size, m_map_stride); + char* dst_ptr = reinterpret_cast(out_ptr); + for (u32 row = 0; row < height; row++) + { + std::memcpy(dst_ptr, current_ptr, copy_size); + current_ptr += m_map_stride; + dst_ptr += out_stride; + } +} + +void StagingTexture::ReadTexel(u32 x, u32 y, void* out_ptr) +{ + Assert(m_staging_buffer.GetType() != StagingBuffer::Type::Upload); + if (!PrepareForAccess()) + return; + + Assert(x < m_width && y < m_height); + const char* src_ptr = GetMappedPointer() + y * GetMappedStride() + x * m_texel_size; + std::memcpy(out_ptr, src_ptr, m_texel_size); +} + +void StagingTexture::WriteTexels(u32 dst_x, u32 dst_y, u32 width, u32 height, const void* in_ptr, u32 in_stride) +{ + Assert(m_staging_buffer.GetType() != StagingBuffer::Type::Readback); + if (!PrepareForAccess()) + return; + + Assert((dst_x + width) <= m_width && (dst_y + height) <= m_height); + + // Offset pointer to point to start of region being copied to. + char* current_ptr = GetMappedPointer(); + current_ptr += dst_y * m_map_stride; + current_ptr += dst_x * m_texel_size; + + // Optimal path: same dimensions, same stride. + if (dst_x == 0 && width == m_width && m_map_stride == in_stride) + { + std::memcpy(current_ptr, in_ptr, m_map_stride * height); + return; + } + + size_t copy_size = std::min(width * m_texel_size, m_map_stride); + const char* src_ptr = reinterpret_cast(in_ptr); + for (u32 row = 0; row < height; row++) + { + std::memcpy(current_ptr, src_ptr, copy_size); + current_ptr += m_map_stride; + src_ptr += in_stride; + } +} + +void StagingTexture::WriteTexel(u32 x, u32 y, const void* in_ptr) +{ + if (!PrepareForAccess()) + return; + + Assert(x < m_width && y < m_height); + char* dest_ptr = GetMappedPointer() + y * m_map_stride + x * m_texel_size; + std::memcpy(dest_ptr, in_ptr, m_texel_size); +} + +bool StagingTexture::PrepareForAccess() +{ + if (m_needs_flush) + { + if (IsMapped()) + Unmap(); + Flush(); + } + return IsMapped() || Map(); +} + +} // namespace Vulkan \ No newline at end of file diff --git a/src/common/vulkan/staging_texture.h b/src/common/vulkan/staging_texture.h new file mode 100644 index 000000000..7f03e04dd --- /dev/null +++ b/src/common/vulkan/staging_texture.h @@ -0,0 +1,87 @@ +// Copyright 2016 Dolphin Emulator Project +// Copyright 2020 DuckStation Emulator Project +// Licensed under GPLv2+ +// Refer to the LICENSE file included. + +#pragma once +#include "staging_buffer.h" +#include "texture.h" + +namespace Vulkan { + +class StagingTexture final +{ +public: + StagingTexture(); + StagingTexture(StagingTexture&& move); + StagingTexture(const StagingTexture&) = delete; + ~StagingTexture(); + + StagingTexture& operator=(StagingTexture&& move); + StagingTexture& operator=(const StagingTexture&) = delete; + + ALWAYS_INLINE bool IsValid() const { return m_staging_buffer.IsValid(); } + ALWAYS_INLINE bool IsMapped() const { return m_staging_buffer.IsMapped(); } + ALWAYS_INLINE const char* GetMappedPointer() const { return m_staging_buffer.GetMapPointer(); } + ALWAYS_INLINE char* GetMappedPointer() { return m_staging_buffer.GetMapPointer(); } + ALWAYS_INLINE u32 GetMappedStride() const { return m_map_stride; } + ALWAYS_INLINE u32 GetWidth() const { return m_width; } + ALWAYS_INLINE u32 GetHeight() const { return m_height; } + + bool Create(StagingBuffer::Type type, VkFormat format, u32 width, u32 height); + void Destroy(bool defer = true); + + // Copies from the GPU texture object to the staging texture, which can be mapped/read by the CPU. + // Both src_rect and dst_rect must be with within the bounds of the the specified textures. + void CopyFromTexture(VkCommandBuffer command_buffer, Texture& src_texture, u32 src_x, u32 src_y, u32 src_layer, + u32 src_level, u32 dst_x, u32 dst_y, u32 width, u32 height); + void CopyFromTexture(Texture& src_texture, u32 src_x, u32 src_y, u32 src_layer, u32 src_level, u32 dst_x, u32 dst_y, + u32 width, u32 height); + + // Wrapper for copying a whole layer of a texture to a readback texture. + // Assumes that the level of src texture and this texture have the same dimensions. + void CopyToTexture(VkCommandBuffer command_buffer, u32 src_x, u32 src_y, Texture& dst_texture, u32 dst_x, u32 dst_y, + u32 dst_layer, u32 dst_level, u32 width, u32 height); + void CopyToTexture(u32 src_x, u32 src_y, Texture& dst_texture, u32 dst_x, u32 dst_y, u32 dst_layer, u32 dst_level, + u32 width, u32 height); + + // Maps the texture into the CPU address space, enabling it to read the contents. + // The Map call may not perform synchronization. If the contents of the staging texture + // has been updated by a CopyFromTexture call, you must call Flush() first. + // If persistent mapping is supported in the backend, this may be a no-op. + bool Map(); + + // Unmaps the CPU-readable copy of the texture. May be a no-op on backends which + // support persistent-mapped buffers. + void Unmap(); + + // Flushes pending writes from the CPU to the GPU, and reads from the GPU to the CPU. + // This may cause a command buffer flush depending on if one has occurred between the last + // call to CopyFromTexture()/CopyToTexture() and the Flush() call. + void Flush(); + + // Reads the specified rectangle from the staging texture to out_ptr, with the specified stride + // (length in bytes of each row). CopyFromTexture must be called first. The contents of any + // texels outside of the rectangle used for CopyFromTexture is undefined. + void ReadTexels(u32 src_x, u32 src_y, u32 width, u32 height, void* out_ptr, u32 out_stride); + void ReadTexel(u32 x, u32 y, void* out_ptr); + + // Copies the texels from in_ptr to the staging texture, which can be read by the GPU, with the + // specified stride (length in bytes of each row). After updating the staging texture with all + // changes, call CopyToTexture() to update the GPU copy. + void WriteTexels(u32 dst_x, u32 dst_y, u32 width, u32 height, const void* in_ptr, u32 in_stride); + void WriteTexel(u32 x, u32 y, const void* in_ptr); + +private: + bool PrepareForAccess(); + + StagingBuffer m_staging_buffer; + u64 m_flush_fence_counter = 0; + u32 m_width = 0; + u32 m_height = 0; + u32 m_texel_size = 0; + u32 m_map_stride = 0; + bool m_needs_flush = false; +}; + +} // namespace Vulkan \ No newline at end of file diff --git a/src/common/vulkan/stream_buffer.cpp b/src/common/vulkan/stream_buffer.cpp new file mode 100644 index 000000000..234021093 --- /dev/null +++ b/src/common/vulkan/stream_buffer.cpp @@ -0,0 +1,363 @@ +// Copyright 2016 Dolphin Emulator Project +// Copyright 2020 DuckStation Emulator Project +// Licensed under GPLv2+ +// Refer to the LICENSE file included. + +#include "stream_buffer.h" +#include "../align.h" +#include "../assert.h" +#include "../log.h" +#include "context.h" +#include "util.h" +Log_SetChannel(Vulkan::StreamBuffer); + +namespace Vulkan { + +StreamBuffer::StreamBuffer() = default; + +StreamBuffer::StreamBuffer(StreamBuffer&& move) + : m_usage(move.m_usage), m_size(move.m_size), m_current_offset(move.m_current_offset), + m_current_space(move.m_current_space), m_current_gpu_position(move.m_current_gpu_position), m_buffer(move.m_buffer), + m_memory(move.m_memory), m_host_pointer(move.m_host_pointer), m_tracked_fences(std::move(move.m_tracked_fences)), + m_coherent_mapping(move.m_coherent_mapping) +{ +} + +StreamBuffer::~StreamBuffer() +{ + if (IsValid()) + Destroy(true); +} + +StreamBuffer& StreamBuffer::operator=(StreamBuffer&& move) +{ + if (IsValid()) + Destroy(true); + + std::swap(m_usage, move.m_usage); + std::swap(m_size, move.m_size); + std::swap(m_current_offset, move.m_current_offset); + std::swap(m_current_space, move.m_current_space); + std::swap(m_current_gpu_position, move.m_current_gpu_position); + std::swap(m_buffer, move.m_buffer); + std::swap(m_memory, move.m_memory); + std::swap(m_host_pointer, move.m_host_pointer); + std::swap(m_tracked_fences, move.m_tracked_fences); + std::swap(m_coherent_mapping, move.m_coherent_mapping); + + return *this; +} + +bool StreamBuffer::Create(VkBufferUsageFlags usage, u32 size) +{ + // Create the buffer descriptor + VkBufferCreateInfo buffer_create_info = { + VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, // VkStructureType sType + nullptr, // const void* pNext + 0, // VkBufferCreateFlags flags + static_cast(size), // VkDeviceSize size + usage, // VkBufferUsageFlags usage + VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode sharingMode + 0, // uint32_t queueFamilyIndexCount + nullptr // const uint32_t* pQueueFamilyIndices + }; + + VkBuffer buffer = VK_NULL_HANDLE; + VkResult res = vkCreateBuffer(g_vulkan_context->GetDevice(), &buffer_create_info, nullptr, &buffer); + if (res != VK_SUCCESS) + { + LOG_VULKAN_ERROR(res, "vkCreateBuffer failed: "); + return false; + } + + // Get memory requirements (types etc) for this buffer + VkMemoryRequirements memory_requirements; + vkGetBufferMemoryRequirements(g_vulkan_context->GetDevice(), buffer, &memory_requirements); + + // Aim for a coherent mapping if possible. + u32 memory_type_index = + g_vulkan_context->GetUploadMemoryType(memory_requirements.memoryTypeBits, &m_coherent_mapping); + + // Allocate memory for backing this buffer + VkMemoryAllocateInfo memory_allocate_info = { + VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO, // VkStructureType sType + nullptr, // const void* pNext + memory_requirements.size, // VkDeviceSize allocationSize + memory_type_index // uint32_t memoryTypeIndex + }; + VkDeviceMemory memory = VK_NULL_HANDLE; + res = vkAllocateMemory(g_vulkan_context->GetDevice(), &memory_allocate_info, nullptr, &memory); + if (res != VK_SUCCESS) + { + LOG_VULKAN_ERROR(res, "vkAllocateMemory failed: "); + vkDestroyBuffer(g_vulkan_context->GetDevice(), buffer, nullptr); + return false; + } + + // Bind memory to buffer + res = vkBindBufferMemory(g_vulkan_context->GetDevice(), buffer, memory, 0); + if (res != VK_SUCCESS) + { + LOG_VULKAN_ERROR(res, "vkBindBufferMemory failed: "); + vkDestroyBuffer(g_vulkan_context->GetDevice(), buffer, nullptr); + vkFreeMemory(g_vulkan_context->GetDevice(), memory, nullptr); + return false; + } + + // Map this buffer into user-space + void* mapped_ptr = nullptr; + res = vkMapMemory(g_vulkan_context->GetDevice(), memory, 0, size, 0, &mapped_ptr); + if (res != VK_SUCCESS) + { + LOG_VULKAN_ERROR(res, "vkMapMemory failed: "); + vkDestroyBuffer(g_vulkan_context->GetDevice(), buffer, nullptr); + vkFreeMemory(g_vulkan_context->GetDevice(), memory, nullptr); + return false; + } + + // Unmap current host pointer (if there was a previous buffer) + if (m_host_pointer) + vkUnmapMemory(g_vulkan_context->GetDevice(), m_memory); + + if (IsValid()) + Destroy(true); + + // Replace with the new buffer + m_usage = usage; + m_size = size; + m_buffer = buffer; + m_memory = memory; + m_host_pointer = reinterpret_cast(mapped_ptr); + m_current_offset = 0; + m_current_gpu_position = 0; + m_tracked_fences.clear(); + return true; +} + +void StreamBuffer::Destroy(bool defer) +{ + if (m_host_pointer) + { + vkUnmapMemory(g_vulkan_context->GetDevice(), m_memory); + m_host_pointer = nullptr; + } + + if (m_buffer != VK_NULL_HANDLE) + { + if (defer) + g_vulkan_context->DeferBufferDestruction(m_buffer); + else + vkDestroyBuffer(g_vulkan_context->GetDevice(), m_buffer, nullptr); + m_buffer = VK_NULL_HANDLE; + } + if (m_memory != VK_NULL_HANDLE) + { + if (defer) + g_vulkan_context->DeferDeviceMemoryDestruction(m_memory); + else + vkFreeMemory(g_vulkan_context->GetDevice(), m_memory, nullptr); + m_memory = VK_NULL_HANDLE; + } +} + +bool StreamBuffer::ReserveMemory(u32 num_bytes, u32 alignment) +{ + const u32 required_bytes = num_bytes + alignment; + + // Check for sane allocations + if (required_bytes > m_size) + { + Log_ErrorPrintf("Attempting to allocate %u bytes from a %u byte stream buffer", static_cast(num_bytes), + static_cast(m_size)); + Panic("Stream buffer overflow"); + return false; + } + + // Is the GPU behind or up to date with our current offset? + UpdateCurrentFencePosition(); + if (m_current_offset >= m_current_gpu_position) + { + const u32 remaining_bytes = m_size - m_current_offset; + if (required_bytes <= remaining_bytes) + { + // Place at the current position, after the GPU position. + m_current_offset = Common::AlignUp(m_current_offset, alignment); + m_current_space = m_size - m_current_offset; + return true; + } + + // Check for space at the start of the buffer + // We use < here because we don't want to have the case of m_current_offset == + // m_current_gpu_position. That would mean the code above would assume the + // GPU has caught up to us, which it hasn't. + if (required_bytes < m_current_gpu_position) + { + // Reset offset to zero, since we're allocating behind the gpu now + m_current_offset = 0; + m_current_space = m_current_gpu_position; + return true; + } + } + + // Is the GPU ahead of our current offset? + if (m_current_offset < m_current_gpu_position) + { + // We have from m_current_offset..m_current_gpu_position space to use. + const u32 remaining_bytes = m_current_gpu_position - m_current_offset; + if (required_bytes < remaining_bytes) + { + // Place at the current position, since this is still behind the GPU. + m_current_offset = Common::AlignUp(m_current_offset, alignment); + m_current_space = m_current_gpu_position - m_current_offset; + return true; + } + } + + // Can we find a fence to wait on that will give us enough memory? + if (WaitForClearSpace(required_bytes)) + { + const u32 align_diff = Common::AlignUp(m_current_offset, alignment) - m_current_offset; + m_current_offset += align_diff; + m_current_space -= align_diff; + return true; + } + + // We tried everything we could, and still couldn't get anything. This means that too much space + // in the buffer is being used by the command buffer currently being recorded. Therefore, the + // only option is to execute it, and wait until it's done. + return false; +} + +void StreamBuffer::CommitMemory(u32 final_num_bytes) +{ + Assert((m_current_offset + final_num_bytes) <= m_size); + Assert(final_num_bytes <= m_current_space); + + // For non-coherent mappings, flush the memory range + if (!m_coherent_mapping) + { + VkMappedMemoryRange range = {VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE, nullptr, m_memory, m_current_offset, + final_num_bytes}; + vkFlushMappedMemoryRanges(g_vulkan_context->GetDevice(), 1, &range); + } + + m_current_offset += final_num_bytes; + m_current_space -= final_num_bytes; +} + +void StreamBuffer::UpdateCurrentFencePosition() +{ + // Don't create a tracking entry if the GPU is caught up with the buffer. + if (m_current_offset == m_current_gpu_position) + return; + + // Has the offset changed since the last fence? + const u64 counter = g_vulkan_context->GetCurrentFenceCounter(); + if (!m_tracked_fences.empty() && m_tracked_fences.back().first == counter) + { + // Still haven't executed a command buffer, so just update the offset. + m_tracked_fences.back().second = m_current_offset; + return; + } + + // New buffer, so update the GPU position while we're at it. + UpdateGPUPosition(); + m_tracked_fences.emplace_back(counter, m_current_offset); +} + +void StreamBuffer::UpdateGPUPosition() +{ + auto start = m_tracked_fences.begin(); + auto end = start; + + const u64 completed_counter = g_vulkan_context->GetCompletedFenceCounter(); + while (end != m_tracked_fences.end() && completed_counter >= end->first) + { + m_current_gpu_position = end->second; + ++end; + } + + if (start != end) + m_tracked_fences.erase(start, end); +} + +bool StreamBuffer::WaitForClearSpace(u32 num_bytes) +{ + u32 new_offset = 0; + u32 new_space = 0; + u32 new_gpu_position = 0; + + auto iter = m_tracked_fences.begin(); + for (; iter != m_tracked_fences.end(); ++iter) + { + // Would this fence bring us in line with the GPU? + // This is the "last resort" case, where a command buffer execution has been forced + // after no additional data has been written to it, so we can assume that after the + // fence has been signaled the entire buffer is now consumed. + u32 gpu_position = iter->second; + if (m_current_offset == gpu_position) + { + new_offset = 0; + new_space = m_size; + new_gpu_position = 0; + break; + } + + // Assuming that we wait for this fence, are we allocating in front of the GPU? + if (m_current_offset > gpu_position) + { + // This would suggest the GPU has now followed us and wrapped around, so we have from + // m_current_position..m_size free, as well as and 0..gpu_position. + const u32 remaining_space_after_offset = m_size - m_current_offset; + if (remaining_space_after_offset >= num_bytes) + { + // Switch to allocating in front of the GPU, using the remainder of the buffer. + new_offset = m_current_offset; + new_space = m_size - m_current_offset; + new_gpu_position = gpu_position; + break; + } + + // We can wrap around to the start, behind the GPU, if there is enough space. + // We use > here because otherwise we'd end up lining up with the GPU, and then the + // allocator would assume that the GPU has consumed what we just wrote. + if (gpu_position > num_bytes) + { + new_offset = 0; + new_space = gpu_position; + new_gpu_position = gpu_position; + break; + } + } + else + { + // We're currently allocating behind the GPU. This would give us between the current + // offset and the GPU position worth of space to work with. Again, > because we can't + // align the GPU position with the buffer offset. + u32 available_space_inbetween = gpu_position - m_current_offset; + if (available_space_inbetween > num_bytes) + { + // Leave the offset as-is, but update the GPU position. + new_offset = m_current_offset; + new_space = gpu_position - m_current_offset; + new_gpu_position = gpu_position; + break; + } + } + } + + // Did any fences satisfy this condition? + // Has the command buffer been executed yet? If not, the caller should execute it. + if (iter == m_tracked_fences.end() || iter->first == g_vulkan_context->GetCurrentFenceCounter()) + return false; + + // Wait until this fence is signaled. This will fire the callback, updating the GPU position. + g_vulkan_context->WaitForFenceCounter(iter->first); + m_tracked_fences.erase(m_tracked_fences.begin(), m_current_offset == iter->second ? m_tracked_fences.end() : ++iter); + m_current_offset = new_offset; + m_current_space = new_space; + m_current_gpu_position = new_gpu_position; + return true; +} + +} // namespace Vulkan diff --git a/src/common/vulkan/stream_buffer.h b/src/common/vulkan/stream_buffer.h new file mode 100644 index 000000000..c1895a9fd --- /dev/null +++ b/src/common/vulkan/stream_buffer.h @@ -0,0 +1,66 @@ +// Copyright 2016 Dolphin Emulator Project +// Copyright 2020 DuckStation Emulator Project +// Licensed under GPLv2+ +// Refer to the LICENSE file included. + +#pragma once + +#include "../types.h" +#include "vulkan_loader.h" +#include +#include + +namespace Vulkan { + +class StreamBuffer +{ +public: + StreamBuffer(); + StreamBuffer(StreamBuffer&& move); + StreamBuffer(const StreamBuffer&) = delete; + ~StreamBuffer(); + + StreamBuffer& operator=(StreamBuffer&& move); + StreamBuffer& operator=(const StreamBuffer&) = delete; + + ALWAYS_INLINE bool IsValid() const { return (m_buffer != VK_NULL_HANDLE); } + ALWAYS_INLINE VkBuffer GetBuffer() const { return m_buffer; } + ALWAYS_INLINE const VkBuffer* GetBufferPointer() const { return &m_buffer; } + ALWAYS_INLINE VkDeviceMemory GetDeviceMemory() const { return m_memory; } + ALWAYS_INLINE void* GetHostPointer() const { return m_host_pointer; } + ALWAYS_INLINE void* GetCurrentHostPointer() const { return m_host_pointer + m_current_offset; } + ALWAYS_INLINE u32 GetCurrentSize() const { return m_size; } + ALWAYS_INLINE u32 GetCurrentSpace() const { return m_current_space; } + ALWAYS_INLINE u32 GetCurrentOffset() const { return m_current_offset; } + + bool Create(VkBufferUsageFlags usage, u32 size); + void Destroy(bool defer); + + bool ReserveMemory(u32 num_bytes, u32 alignment); + void CommitMemory(u32 final_num_bytes); + +private: + bool AllocateBuffer(VkBufferUsageFlags usage, u32 size); + void UpdateCurrentFencePosition(); + void UpdateGPUPosition(); + + // Waits for as many fences as needed to allocate num_bytes bytes from the buffer. + bool WaitForClearSpace(u32 num_bytes); + + VkBufferUsageFlags m_usage = 0; + u32 m_size = 0; + u32 m_current_offset = 0; + u32 m_current_space = 0; + u32 m_current_gpu_position = 0; + + VkBuffer m_buffer = VK_NULL_HANDLE; + VkDeviceMemory m_memory = VK_NULL_HANDLE; + u8* m_host_pointer = nullptr; + + // List of fences and the corresponding positions in the buffer + std::deque> m_tracked_fences; + + bool m_coherent_mapping = false; +}; + +} // namespace Vulkan diff --git a/src/common/vulkan/swap_chain.cpp b/src/common/vulkan/swap_chain.cpp new file mode 100644 index 000000000..7e23ac94f --- /dev/null +++ b/src/common/vulkan/swap_chain.cpp @@ -0,0 +1,528 @@ +// Copyright 2016 Dolphin Emulator Project +// Copyright 2020 DuckStation Emulator Project +// Licensed under GPLv2+ +// Refer to the LICENSE file included. + +#include "swap_chain.h" +#include "../assert.h" +#include "../log.h" +#include "context.h" +#include "util.h" +#include +#include +Log_SetChannel(Vulkan::SwapChain); + +#if defined(VK_USE_PLATFORM_XLIB_KHR) +#include +#endif + +namespace Vulkan { +SwapChain::SwapChain(const WindowInfo& wi, VkSurfaceKHR surface, bool vsync) + : m_wi(wi), m_surface(surface), m_vsync_enabled(vsync) +{ +} + +SwapChain::~SwapChain() +{ + DestroySemaphores(); + DestroySwapChainImages(); + DestroySwapChain(); + DestroySurface(); +} + +VkSurfaceKHR SwapChain::CreateVulkanSurface(VkInstance instance, const WindowInfo& wi) +{ +#if defined(VK_USE_PLATFORM_WIN32_KHR) + if (wi.type == WindowInfo::Type::Win32) + { + VkWin32SurfaceCreateInfoKHR surface_create_info = { + VK_STRUCTURE_TYPE_WIN32_SURFACE_CREATE_INFO_KHR, // VkStructureType sType + nullptr, // const void* pNext + 0, // VkWin32SurfaceCreateFlagsKHR flags + nullptr, // HINSTANCE hinstance + reinterpret_cast(wi.window_handle) // HWND hwnd + }; + + VkSurfaceKHR surface; + VkResult res = vkCreateWin32SurfaceKHR(instance, &surface_create_info, nullptr, &surface); + if (res != VK_SUCCESS) + { + LOG_VULKAN_ERROR(res, "vkCreateWin32SurfaceKHR failed: "); + return VK_NULL_HANDLE; + } + + return surface; + } +#endif + +#if defined(VK_USE_PLATFORM_XLIB_KHR) + if (wi.type == WindowInfo::Type::X11) + { + VkXlibSurfaceCreateInfoKHR surface_create_info = { + VK_STRUCTURE_TYPE_XLIB_SURFACE_CREATE_INFO_KHR, // VkStructureType sType + nullptr, // const void* pNext + 0, // VkXlibSurfaceCreateFlagsKHR flags + static_cast(wi.display_connection), // Display* dpy + reinterpret_cast(wi.window_handle) // Window window + }; + + VkSurfaceKHR surface; + VkResult res = vkCreateXlibSurfaceKHR(instance, &surface_create_info, nullptr, &surface); + if (res != VK_SUCCESS) + { + LOG_VULKAN_ERROR(res, "vkCreateXlibSurfaceKHR failed: "); + return VK_NULL_HANDLE; + } + + return surface; + } +#endif + +#if defined(VK_USE_PLATFORM_ANDROID_KHR) + if (wi.type == WindowInfo::Type::Android) + { + VkAndroidSurfaceCreateInfoKHR surface_create_info = { + VK_STRUCTURE_TYPE_ANDROID_SURFACE_CREATE_INFO_KHR, // VkStructureType sType + nullptr, // const void* pNext + 0, // VkAndroidSurfaceCreateFlagsKHR flags + reinterpret_cast(wi.window_handle) // ANativeWindow* window + }; + + VkSurfaceKHR surface; + VkResult res = vkCreateAndroidSurfaceKHR(instance, &surface_create_info, nullptr, &surface); + if (res != VK_SUCCESS) + { + LOG_VULKAN_ERROR(res, "vkCreateAndroidSurfaceKHR failed: "); + return VK_NULL_HANDLE; + } + + return surface; + } +#endif + +#if defined(VK_USE_PLATFORM_METAL_EXT) + if (wi.type == WindowInfo::Type::MacOS) + { + // TODO: Create Metal layer + VkMetalSurfaceCreateInfoEXT surface_create_info = {VK_STRUCTURE_TYPE_METAL_SURFACE_CREATE_INFO_EXT, nullptr, 0, + static_cast(wi.window_handle)}; + + VkSurfaceKHR surface; + VkResult res = vkCreateMetalSurfaceEXT(instance, &surface_create_info, nullptr, &surface); + if (res != VK_SUCCESS) + { + LOG_VULKAN_ERROR(res, "vkCreateMetalSurfaceEXT failed: "); + return VK_NULL_HANDLE; + } + + return surface; + } +#endif + + return VK_NULL_HANDLE; +} + +std::unique_ptr SwapChain::Create(const WindowInfo& wi, VkSurfaceKHR surface, bool vsync) +{ + std::unique_ptr swap_chain = std::make_unique(wi, surface, vsync); + if (!swap_chain->CreateSwapChain() || !swap_chain->SetupSwapChainImages() || !swap_chain->CreateSemaphores()) + return nullptr; + + return swap_chain; +} + +bool SwapChain::SelectSurfaceFormat() +{ + u32 format_count; + VkResult res = + vkGetPhysicalDeviceSurfaceFormatsKHR(g_vulkan_context->GetPhysicalDevice(), m_surface, &format_count, nullptr); + if (res != VK_SUCCESS || format_count == 0) + { + LOG_VULKAN_ERROR(res, "vkGetPhysicalDeviceSurfaceFormatsKHR failed: "); + return false; + } + + std::vector surface_formats(format_count); + res = vkGetPhysicalDeviceSurfaceFormatsKHR(g_vulkan_context->GetPhysicalDevice(), m_surface, &format_count, + surface_formats.data()); + Assert(res == VK_SUCCESS); + + // If there is a single undefined surface format, the device doesn't care, so we'll just use RGBA + if (surface_formats[0].format == VK_FORMAT_UNDEFINED) + { + m_surface_format.format = VK_FORMAT_R8G8B8A8_UNORM; + m_surface_format.colorSpace = VK_COLOR_SPACE_SRGB_NONLINEAR_KHR; + return true; + } + + // Try to find a suitable format. + for (const VkSurfaceFormatKHR& surface_format : surface_formats) + { + // Some drivers seem to return a SRGB format here (Intel Mesa). + // This results in gamma correction when presenting to the screen, which we don't want. + // Use a linear format instead, if this is the case. + m_surface_format.format = Util::GetLinearFormat(surface_format.format); + m_surface_format.colorSpace = surface_format.colorSpace; + return true; + } + + Panic("Failed to find a suitable format for swap chain buffers."); + return false; +} + +bool SwapChain::SelectPresentMode() +{ + VkResult res; + u32 mode_count; + res = + vkGetPhysicalDeviceSurfacePresentModesKHR(g_vulkan_context->GetPhysicalDevice(), m_surface, &mode_count, nullptr); + if (res != VK_SUCCESS || mode_count == 0) + { + LOG_VULKAN_ERROR(res, "vkGetPhysicalDeviceSurfaceFormatsKHR failed: "); + return false; + } + + std::vector present_modes(mode_count); + res = vkGetPhysicalDeviceSurfacePresentModesKHR(g_vulkan_context->GetPhysicalDevice(), m_surface, &mode_count, + present_modes.data()); + Assert(res == VK_SUCCESS); + + // Checks if a particular mode is supported, if it is, returns that mode. + auto CheckForMode = [&present_modes](VkPresentModeKHR check_mode) { + auto it = std::find_if(present_modes.begin(), present_modes.end(), + [check_mode](VkPresentModeKHR mode) { return check_mode == mode; }); + return it != present_modes.end(); + }; + + // If vsync is enabled, use VK_PRESENT_MODE_FIFO_KHR. + // This check should not fail with conforming drivers, as the FIFO present mode is mandated by + // the specification (VK_KHR_swapchain). In case it isn't though, fall through to any other mode. + if (m_vsync_enabled && CheckForMode(VK_PRESENT_MODE_FIFO_KHR)) + { + m_present_mode = VK_PRESENT_MODE_FIFO_KHR; + return true; + } + + // Prefer screen-tearing, if possible, for lowest latency. + if (CheckForMode(VK_PRESENT_MODE_IMMEDIATE_KHR)) + { + m_present_mode = VK_PRESENT_MODE_IMMEDIATE_KHR; + return true; + } + + // Use optimized-vsync above vsync. + if (CheckForMode(VK_PRESENT_MODE_MAILBOX_KHR)) + { + m_present_mode = VK_PRESENT_MODE_MAILBOX_KHR; + return true; + } + + // Fall back to whatever is available. + m_present_mode = present_modes[0]; + return true; +} + +bool SwapChain::CreateSwapChain() +{ + // Look up surface properties to determine image count and dimensions + VkSurfaceCapabilitiesKHR surface_capabilities; + VkResult res = + vkGetPhysicalDeviceSurfaceCapabilitiesKHR(g_vulkan_context->GetPhysicalDevice(), m_surface, &surface_capabilities); + if (res != VK_SUCCESS) + { + LOG_VULKAN_ERROR(res, "vkGetPhysicalDeviceSurfaceCapabilitiesKHR failed: "); + return false; + } + + // Select swap chain format and present mode + if (!SelectSurfaceFormat() || !SelectPresentMode()) + return false; + + // Select number of images in swap chain, we prefer one buffer in the background to work on + u32 image_count = surface_capabilities.minImageCount + 1u; + + // maxImageCount can be zero, in which case there isn't an upper limit on the number of buffers. + if (surface_capabilities.maxImageCount > 0) + image_count = std::min(image_count, surface_capabilities.maxImageCount); + + // Determine the dimensions of the swap chain. Values of -1 indicate the size we specify here + // determines window size? + VkExtent2D size = surface_capabilities.currentExtent; + if (size.width == UINT32_MAX) + { + size.width = m_wi.surface_width; + size.height = m_wi.surface_height; + } + size.width = + std::clamp(size.width, surface_capabilities.minImageExtent.width, surface_capabilities.maxImageExtent.width); + size.height = + std::clamp(size.height, surface_capabilities.minImageExtent.height, surface_capabilities.maxImageExtent.height); + + // Prefer identity transform if possible + VkSurfaceTransformFlagBitsKHR transform = VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR; + if (!(surface_capabilities.supportedTransforms & VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR)) + transform = surface_capabilities.currentTransform; + + // Select swap chain flags, we only need a colour attachment + VkImageUsageFlags image_usage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT; + if (!(surface_capabilities.supportedUsageFlags & VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT)) + { + Log_ErrorPrintf("Vulkan: Swap chain does not support usage as color attachment"); + return false; + } + + // Store the old/current swap chain when recreating for resize + VkSwapchainKHR old_swap_chain = m_swap_chain; + m_swap_chain = VK_NULL_HANDLE; + + // Now we can actually create the swap chain + VkSwapchainCreateInfoKHR swap_chain_info = {VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR, + nullptr, + 0, + m_surface, + image_count, + m_surface_format.format, + m_surface_format.colorSpace, + size, + 1u, + image_usage, + VK_SHARING_MODE_EXCLUSIVE, + 0, + nullptr, + transform, + VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR, + m_present_mode, + VK_TRUE, + old_swap_chain}; + std::array indices = {{ + g_vulkan_context->GetGraphicsQueueFamilyIndex(), + g_vulkan_context->GetPresentQueueFamilyIndex(), + }}; + if (g_vulkan_context->GetGraphicsQueueFamilyIndex() != g_vulkan_context->GetPresentQueueFamilyIndex()) + { + swap_chain_info.imageSharingMode = VK_SHARING_MODE_CONCURRENT; + swap_chain_info.queueFamilyIndexCount = 2; + swap_chain_info.pQueueFamilyIndices = indices.data(); + } + + if (m_swap_chain == VK_NULL_HANDLE) + { + res = vkCreateSwapchainKHR(g_vulkan_context->GetDevice(), &swap_chain_info, nullptr, &m_swap_chain); + } + if (res != VK_SUCCESS) + { + LOG_VULKAN_ERROR(res, "vkCreateSwapchainKHR failed: "); + return false; + } + + // Now destroy the old swap chain, since it's been recreated. + // We can do this immediately since all work should have been completed before calling resize. + if (old_swap_chain != VK_NULL_HANDLE) + vkDestroySwapchainKHR(g_vulkan_context->GetDevice(), old_swap_chain, nullptr); + + m_width = size.width; + m_height = size.height; + return true; +} + +bool SwapChain::SetupSwapChainImages() +{ + Assert(m_images.empty()); + + u32 image_count; + VkResult res = vkGetSwapchainImagesKHR(g_vulkan_context->GetDevice(), m_swap_chain, &image_count, nullptr); + if (res != VK_SUCCESS) + { + LOG_VULKAN_ERROR(res, "vkGetSwapchainImagesKHR failed: "); + return false; + } + + std::vector images(image_count); + res = vkGetSwapchainImagesKHR(g_vulkan_context->GetDevice(), m_swap_chain, &image_count, images.data()); + Assert(res == VK_SUCCESS); + + m_load_render_pass = g_vulkan_context->GetRenderPass(m_surface_format.format, VK_FORMAT_UNDEFINED, + VK_SAMPLE_COUNT_1_BIT, VK_ATTACHMENT_LOAD_OP_LOAD); + m_clear_render_pass = g_vulkan_context->GetRenderPass(m_surface_format.format, VK_FORMAT_UNDEFINED, + VK_SAMPLE_COUNT_1_BIT, VK_ATTACHMENT_LOAD_OP_CLEAR); + if (m_load_render_pass == VK_NULL_HANDLE || m_clear_render_pass == VK_NULL_HANDLE) + { + Panic("Failed to get swap chain render passes."); + return false; + } + + m_images.reserve(image_count); + for (u32 i = 0; i < image_count; i++) + { + SwapChainImage image; + image.image = images[i]; + + // Create texture object, which creates a view of the backbuffer + if (!image.texture.Adopt(image.image, VK_IMAGE_VIEW_TYPE_2D, m_width, m_height, 1, 1, m_surface_format.format, + VK_SAMPLE_COUNT_1_BIT)) + { + return false; + } + + image.framebuffer = image.texture.CreateFramebuffer(m_load_render_pass); + if (image.framebuffer == VK_NULL_HANDLE) + return false; + + m_images.emplace_back(std::move(image)); + } + + return true; +} + +void SwapChain::DestroySwapChainImages() +{ + for (auto& it : m_images) + { + // Images themselves are cleaned up by the swap chain object + vkDestroyFramebuffer(g_vulkan_context->GetDevice(), it.framebuffer, nullptr); + } + m_images.clear(); +} + +void SwapChain::DestroySwapChain() +{ + if (m_swap_chain == VK_NULL_HANDLE) + return; + + vkDestroySwapchainKHR(g_vulkan_context->GetDevice(), m_swap_chain, nullptr); + m_swap_chain = VK_NULL_HANDLE; +} + +VkResult SwapChain::AcquireNextImage() +{ + VkResult res = vkAcquireNextImageKHR(g_vulkan_context->GetDevice(), m_swap_chain, UINT64_MAX, + m_image_available_semaphore, VK_NULL_HANDLE, &m_current_image); + if (res != VK_SUCCESS && res != VK_ERROR_OUT_OF_DATE_KHR && res != VK_SUBOPTIMAL_KHR) + LOG_VULKAN_ERROR(res, "vkAcquireNextImageKHR failed: "); + + return res; +} + +bool SwapChain::ResizeSwapChain(u32 new_width /* = 0 */, u32 new_height /* = 0 */) +{ + DestroySwapChainImages(); + + if (new_width != 0 && new_height != 0) + { + m_wi.surface_width = new_width; + m_wi.surface_height = new_height; + } + + if (!CreateSwapChain() || !SetupSwapChainImages()) + { + Panic("Failed to re-configure swap chain images, this is fatal (for now)"); + return false; + } + + return true; +} + +bool SwapChain::RecreateSwapChain() +{ + DestroySwapChainImages(); + DestroySwapChain(); + if (!CreateSwapChain() || !SetupSwapChainImages()) + { + Panic("Failed to re-configure swap chain images, this is fatal (for now)"); + return false; + } + + return true; +} + +bool SwapChain::SetVSync(bool enabled) +{ + if (m_vsync_enabled == enabled) + return true; + + // Recreate the swap chain with the new present mode. + m_vsync_enabled = enabled; + return RecreateSwapChain(); +} + +bool SwapChain::RecreateSurface(const WindowInfo& new_wi) +{ + // Destroy the old swap chain, images, and surface. + DestroySwapChainImages(); + DestroySwapChain(); + DestroySurface(); + + // Re-create the surface with the new native handle + m_wi = new_wi; + m_surface = CreateVulkanSurface(g_vulkan_context->GetVulkanInstance(), m_wi); + if (m_surface == VK_NULL_HANDLE) + return false; + + // The validation layers get angry at us if we don't call this before creating the swapchain. + VkBool32 present_supported = VK_TRUE; + VkResult res = + vkGetPhysicalDeviceSurfaceSupportKHR(g_vulkan_context->GetPhysicalDevice(), + g_vulkan_context->GetPresentQueueFamilyIndex(), m_surface, &present_supported); + if (res != VK_SUCCESS) + { + LOG_VULKAN_ERROR(res, "vkGetPhysicalDeviceSurfaceSupportKHR failed: "); + return false; + } + if (!present_supported) + { + Panic("Recreated surface does not support presenting."); + return false; + } + + // Finally re-create the swap chain + if (!CreateSwapChain() || !SetupSwapChainImages()) + return false; + + return true; +} + +void SwapChain::DestroySurface() +{ + vkDestroySurfaceKHR(g_vulkan_context->GetVulkanInstance(), m_surface, nullptr); + m_surface = VK_NULL_HANDLE; +} + +bool SwapChain::CreateSemaphores() +{ + // Create two semaphores, one that is triggered when the swapchain buffer is ready, another after + // submit and before present + VkSemaphoreCreateInfo semaphore_info = { + VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO, // VkStructureType sType + nullptr, // const void* pNext + 0 // VkSemaphoreCreateFlags flags + }; + + VkResult res; + if ((res = vkCreateSemaphore(g_vulkan_context->GetDevice(), &semaphore_info, nullptr, + &m_image_available_semaphore)) != VK_SUCCESS || + (res = vkCreateSemaphore(g_vulkan_context->GetDevice(), &semaphore_info, nullptr, + &m_rendering_finished_semaphore)) != VK_SUCCESS) + { + LOG_VULKAN_ERROR(res, "vkCreateSemaphore failed: "); + return false; + } + + return true; +} + +void SwapChain::DestroySemaphores() +{ + if (m_image_available_semaphore != VK_NULL_HANDLE) + { + vkDestroySemaphore(g_vulkan_context->GetDevice(), m_image_available_semaphore, nullptr); + m_image_available_semaphore = VK_NULL_HANDLE; + } + + if (m_rendering_finished_semaphore != VK_NULL_HANDLE) + { + vkDestroySemaphore(g_vulkan_context->GetDevice(), m_rendering_finished_semaphore, nullptr); + m_rendering_finished_semaphore = VK_NULL_HANDLE; + } +} + +} // namespace Vulkan diff --git a/src/common/vulkan/swap_chain.h b/src/common/vulkan/swap_chain.h new file mode 100644 index 000000000..a74788b66 --- /dev/null +++ b/src/common/vulkan/swap_chain.h @@ -0,0 +1,98 @@ +// Copyright 2016 Dolphin Emulator Project +// Copyright 2020 DuckStation Emulator Project +// Licensed under GPLv2+ +// Refer to the LICENSE file included. + +#pragma once + +#include "../types.h" +#include "../window_info.h" +#include "texture.h" +#include "vulkan_loader.h" +#include +#include + +namespace Vulkan { + +class SwapChain +{ +public: + SwapChain(const WindowInfo& wi, VkSurfaceKHR surface, bool vsync); + ~SwapChain(); + + // Creates a vulkan-renderable surface for the specified window handle. + static VkSurfaceKHR CreateVulkanSurface(VkInstance instance, const WindowInfo& wi); + + // Create a new swap chain from a pre-existing surface. + static std::unique_ptr Create(const WindowInfo& wi, VkSurfaceKHR surface, bool vsync); + + ALWAYS_INLINE VkSurfaceKHR GetSurface() const { return m_surface; } + ALWAYS_INLINE VkSurfaceFormatKHR GetSurfaceFormat() const { return m_surface_format; } + ALWAYS_INLINE VkFormat GetTextureFormat() const { return m_texture_format; } + ALWAYS_INLINE bool IsVSyncEnabled() const { return m_vsync_enabled; } + ALWAYS_INLINE VkSwapchainKHR GetSwapChain() const { return m_swap_chain; } + ALWAYS_INLINE u32 GetWidth() const { return m_width; } + ALWAYS_INLINE u32 GetHeight() const { return m_height; } + ALWAYS_INLINE u32 GetCurrentImageIndex() const { return m_current_image; } + ALWAYS_INLINE u32 GetImageCount() const { return static_cast(m_images.size()); } + ALWAYS_INLINE VkImage GetCurrentImage() const { return m_images[m_current_image].image; } + ALWAYS_INLINE const Texture& GetCurrentTexture() const { return m_images[m_current_image].texture; } + ALWAYS_INLINE Texture& GetCurrentTexture() { return m_images[m_current_image].texture; } + ALWAYS_INLINE VkFramebuffer GetCurrentFramebuffer() const { return m_images[m_current_image].framebuffer; } + ALWAYS_INLINE VkRenderPass GetLoadRenderPass() const { return m_load_render_pass; } + ALWAYS_INLINE VkRenderPass GetClearRenderPass() const { return m_clear_render_pass; } + ALWAYS_INLINE VkSemaphore GetImageAvailableSemaphore() const { return m_image_available_semaphore; } + ALWAYS_INLINE VkSemaphore GetRenderingFinishedSemaphore() const { return m_rendering_finished_semaphore; } + VkResult AcquireNextImage(); + + bool RecreateSurface(const WindowInfo& new_wi); + bool ResizeSwapChain(u32 new_width = 0, u32 new_height = 0); + bool RecreateSwapChain(); + + // Change vsync enabled state. This may fail as it causes a swapchain recreation. + bool SetVSync(bool enabled); + +private: + bool SelectSurfaceFormat(); + bool SelectPresentMode(); + + bool CreateSwapChain(); + void DestroySwapChain(); + + bool SetupSwapChainImages(); + void DestroySwapChainImages(); + + void DestroySurface(); + + bool CreateSemaphores(); + void DestroySemaphores(); + + struct SwapChainImage + { + VkImage image; + Texture texture; + VkFramebuffer framebuffer; + }; + + u32 m_width = 0; + u32 m_height = 0; + WindowInfo m_wi; + bool m_vsync_enabled = false; + + VkSurfaceKHR m_surface = VK_NULL_HANDLE; + VkSurfaceFormatKHR m_surface_format = {}; + VkPresentModeKHR m_present_mode = VK_PRESENT_MODE_IMMEDIATE_KHR; + VkFormat m_texture_format = VK_FORMAT_UNDEFINED; + + VkRenderPass m_load_render_pass = VK_NULL_HANDLE; + VkRenderPass m_clear_render_pass = VK_NULL_HANDLE; + + VkSemaphore m_image_available_semaphore = VK_NULL_HANDLE; + VkSemaphore m_rendering_finished_semaphore = VK_NULL_HANDLE; + + VkSwapchainKHR m_swap_chain = VK_NULL_HANDLE; + std::vector m_images; + u32 m_current_image = 0; +}; + +} // namespace Vulkan diff --git a/src/common/vulkan/texture.cpp b/src/common/vulkan/texture.cpp new file mode 100644 index 000000000..7078b8e6c --- /dev/null +++ b/src/common/vulkan/texture.cpp @@ -0,0 +1,374 @@ +// Copyright 2016 Dolphin Emulator Project +// Copyright 2020 DuckStation Emulator Project +// Licensed under GPLv2+ +// Refer to the LICENSE file included. + +#include "texture.h" +#include "../assert.h" +#include "context.h" +#include "util.h" +#include + +namespace Vulkan { +Texture::Texture() = default; + +Texture::Texture(Texture&& move) + : m_width(move.m_width), m_height(move.m_height), m_levels(move.m_levels), m_layers(move.m_layers), + m_format(move.m_format), m_samples(move.m_samples), m_view_type(move.m_view_type), m_layout(move.m_layout), + m_image(move.m_image), m_device_memory(move.m_device_memory), m_view(move.m_view) +{ + move.m_width = 0; + move.m_height = 0; + move.m_levels = 0; + move.m_layers = 0; + move.m_format = VK_FORMAT_UNDEFINED; + move.m_samples = VK_SAMPLE_COUNT_1_BIT; + move.m_view_type = VK_IMAGE_VIEW_TYPE_2D; + move.m_layout = VK_IMAGE_LAYOUT_UNDEFINED; + move.m_image = VK_NULL_HANDLE; + move.m_device_memory = VK_NULL_HANDLE; + move.m_view = VK_NULL_HANDLE; +} + +Texture::~Texture() +{ + if (IsValid()) + Destroy(true); +} + +Vulkan::Texture& Texture::operator=(Texture&& move) +{ + if (IsValid()) + Destroy(true); + + std::swap(m_width, move.m_width); + std::swap(m_height, move.m_height); + std::swap(m_levels, move.m_levels); + std::swap(m_layers, move.m_layers); + std::swap(m_format, move.m_format); + std::swap(m_samples, move.m_samples); + std::swap(m_view_type, move.m_view_type); + std::swap(m_layout, move.m_layout); + std::swap(m_image, move.m_image); + std::swap(m_device_memory, move.m_device_memory); + std::swap(m_view, move.m_view); + + return *this; +} + +bool Texture::Create(u32 width, u32 height, u32 levels, u32 layers, VkFormat format, VkSampleCountFlagBits samples, + VkImageViewType view_type, VkImageTiling tiling, VkImageUsageFlags usage) +{ + VkImageCreateInfo image_info = {VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, + nullptr, + 0, + VK_IMAGE_TYPE_2D, + format, + {width, height, 1}, + levels, + layers, + samples, + tiling, + usage, + VK_SHARING_MODE_EXCLUSIVE, + 0, + nullptr, + VK_IMAGE_LAYOUT_UNDEFINED}; + + VkImage image = VK_NULL_HANDLE; + VkResult res = vkCreateImage(g_vulkan_context->GetDevice(), &image_info, nullptr, &image); + if (res != VK_SUCCESS) + { + LOG_VULKAN_ERROR(res, "vkCreateImage failed: "); + return false; + } + + // Allocate memory to back this texture, we want device local memory in this case + VkMemoryRequirements memory_requirements; + vkGetImageMemoryRequirements(g_vulkan_context->GetDevice(), image, &memory_requirements); + + VkMemoryAllocateInfo memory_info = { + VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO, nullptr, memory_requirements.size, + g_vulkan_context->GetMemoryType(memory_requirements.memoryTypeBits, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT)}; + + VkDeviceMemory device_memory; + res = vkAllocateMemory(g_vulkan_context->GetDevice(), &memory_info, nullptr, &device_memory); + if (res != VK_SUCCESS) + { + LOG_VULKAN_ERROR(res, "vkAllocateMemory failed: "); + vkDestroyImage(g_vulkan_context->GetDevice(), image, nullptr); + return false; + } + + res = vkBindImageMemory(g_vulkan_context->GetDevice(), image, device_memory, 0); + if (res != VK_SUCCESS) + { + LOG_VULKAN_ERROR(res, "vkBindImageMemory failed: "); + vkDestroyImage(g_vulkan_context->GetDevice(), image, nullptr); + vkFreeMemory(g_vulkan_context->GetDevice(), device_memory, nullptr); + return false; + } + + VkImageViewCreateInfo view_info = {VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO, + nullptr, + 0, + image, + view_type, + format, + {VK_COMPONENT_SWIZZLE_IDENTITY, VK_COMPONENT_SWIZZLE_IDENTITY, + VK_COMPONENT_SWIZZLE_IDENTITY, VK_COMPONENT_SWIZZLE_IDENTITY}, + {Util::IsDepthFormat(format) ? + static_cast(VK_IMAGE_ASPECT_DEPTH_BIT) : + static_cast(VK_IMAGE_ASPECT_COLOR_BIT), + 0, levels, 0, layers}}; + + VkImageView view = VK_NULL_HANDLE; + res = vkCreateImageView(g_vulkan_context->GetDevice(), &view_info, nullptr, &view); + if (res != VK_SUCCESS) + { + LOG_VULKAN_ERROR(res, "vkCreateImageView failed: "); + vkDestroyImage(g_vulkan_context->GetDevice(), image, nullptr); + vkFreeMemory(g_vulkan_context->GetDevice(), device_memory, nullptr); + return false; + } + + if (IsValid()) + Destroy(true); + + m_width = width; + m_height = height; + m_levels = levels; + m_layers = layers; + m_format = format; + m_samples = samples; + m_view_type = view_type; + m_image = image; + m_device_memory = device_memory; + m_view = view; + return true; +} + +bool Texture::Adopt(VkImage existing_image, VkImageViewType view_type, u32 width, u32 height, u32 levels, u32 layers, + VkFormat format, VkSampleCountFlagBits samples) +{ + // Only need to create the image view, this is mainly for swap chains. + VkImageViewCreateInfo view_info = {VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO, + nullptr, + 0, + existing_image, + view_type, + format, + {VK_COMPONENT_SWIZZLE_IDENTITY, VK_COMPONENT_SWIZZLE_IDENTITY, + VK_COMPONENT_SWIZZLE_IDENTITY, VK_COMPONENT_SWIZZLE_IDENTITY}, + {Util::IsDepthFormat(format) ? + static_cast(VK_IMAGE_ASPECT_DEPTH_BIT) : + static_cast(VK_IMAGE_ASPECT_COLOR_BIT), + 0, levels, 0, layers}}; + + // Memory is managed by the owner of the image. + VkImageView view = VK_NULL_HANDLE; + VkResult res = vkCreateImageView(g_vulkan_context->GetDevice(), &view_info, nullptr, &view); + if (res != VK_SUCCESS) + { + LOG_VULKAN_ERROR(res, "vkCreateImageView failed: "); + return false; + } + + if (IsValid()) + Destroy(true); + + m_width = width; + m_height = height; + m_levels = levels; + m_layers = layers; + m_format = format; + m_samples = samples; + m_view_type = view_type; + m_image = existing_image; + m_view = view; + return true; +} + +void Texture::Destroy(bool defer /* = true */) +{ + if (m_view != VK_NULL_HANDLE) + { + if (defer) + g_vulkan_context->DeferImageViewDestruction(m_view); + else + vkDestroyImageView(g_vulkan_context->GetDevice(), m_view, nullptr); + m_view = VK_NULL_HANDLE; + } + + // If we don't have device memory allocated, the image is not owned by us (e.g. swapchain) + if (m_device_memory != VK_NULL_HANDLE) + { + DebugAssert(m_image != VK_NULL_HANDLE); + if (defer) + g_vulkan_context->DeferImageDestruction(m_image); + else + vkDestroyImage(g_vulkan_context->GetDevice(), m_image, nullptr); + m_image = VK_NULL_HANDLE; + + if (defer) + g_vulkan_context->DeferDeviceMemoryDestruction(m_device_memory); + else + vkFreeMemory(g_vulkan_context->GetDevice(), m_device_memory, nullptr); + m_device_memory = VK_NULL_HANDLE; + } + + m_width = 0; + m_height = 0; + m_levels = 0; + m_layers = 0; + m_format = VK_FORMAT_UNDEFINED; + m_samples = VK_SAMPLE_COUNT_1_BIT; + m_view_type = VK_IMAGE_VIEW_TYPE_2D; + m_layout = VK_IMAGE_LAYOUT_UNDEFINED; + m_image = VK_NULL_HANDLE; + m_device_memory = VK_NULL_HANDLE; + m_view = VK_NULL_HANDLE; +} + +void Texture::OverrideImageLayout(VkImageLayout new_layout) +{ + m_layout = new_layout; +} + +void Texture::TransitionToLayout(VkCommandBuffer command_buffer, VkImageLayout new_layout) +{ + if (m_layout == new_layout) + return; + + VkImageMemoryBarrier barrier = { + VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // VkStructureType sType + nullptr, // const void* pNext + 0, // VkAccessFlags srcAccessMask + 0, // VkAccessFlags dstAccessMask + m_layout, // VkImageLayout oldLayout + new_layout, // VkImageLayout newLayout + VK_QUEUE_FAMILY_IGNORED, // uint32_t srcQueueFamilyIndex + VK_QUEUE_FAMILY_IGNORED, // uint32_t dstQueueFamilyIndex + m_image, // VkImage image + {static_cast(Util::IsDepthFormat(m_format) ? VK_IMAGE_ASPECT_DEPTH_BIT : + VK_IMAGE_ASPECT_COLOR_BIT), + 0, m_levels, 0, m_layers} // VkImageSubresourceRange subresourceRange + }; + + // srcStageMask -> Stages that must complete before the barrier + // dstStageMask -> Stages that must wait for after the barrier before beginning + VkPipelineStageFlags srcStageMask, dstStageMask; + switch (m_layout) + { + case VK_IMAGE_LAYOUT_UNDEFINED: + // Layout undefined therefore contents undefined, and we don't care what happens to it. + barrier.srcAccessMask = 0; + srcStageMask = VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT; + break; + + case VK_IMAGE_LAYOUT_PREINITIALIZED: + // Image has been pre-initialized by the host, so ensure all writes have completed. + barrier.srcAccessMask = VK_ACCESS_HOST_WRITE_BIT; + srcStageMask = VK_PIPELINE_STAGE_HOST_BIT; + break; + + case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL: + // Image was being used as a color attachment, so ensure all writes have completed. + barrier.srcAccessMask = VK_ACCESS_COLOR_ATTACHMENT_READ_BIT | VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT; + srcStageMask = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT; + break; + + case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL: + // Image was being used as a depthstencil attachment, so ensure all writes have completed. + barrier.srcAccessMask = + VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT | VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT; + srcStageMask = VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT | VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT; + break; + + case VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL: + // Image was being used as a shader resource, make sure all reads have finished. + barrier.srcAccessMask = VK_ACCESS_SHADER_READ_BIT; + srcStageMask = VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT; + break; + + case VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL: + // Image was being used as a copy source, ensure all reads have finished. + barrier.srcAccessMask = VK_ACCESS_TRANSFER_READ_BIT; + srcStageMask = VK_PIPELINE_STAGE_TRANSFER_BIT; + break; + + case VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL: + // Image was being used as a copy destination, ensure all writes have finished. + barrier.srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT; + srcStageMask = VK_PIPELINE_STAGE_TRANSFER_BIT; + break; + + default: + srcStageMask = VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT; + break; + } + + switch (new_layout) + { + case VK_IMAGE_LAYOUT_UNDEFINED: + barrier.dstAccessMask = 0; + dstStageMask = VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT; + break; + + case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL: + barrier.dstAccessMask = VK_ACCESS_COLOR_ATTACHMENT_READ_BIT | VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT; + dstStageMask = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT; + break; + + case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL: + barrier.dstAccessMask = + VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT | VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT; + dstStageMask = VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT | VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT; + break; + + case VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL: + barrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT; + dstStageMask = VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT; + break; + + case VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL: + barrier.dstAccessMask = VK_ACCESS_TRANSFER_READ_BIT; + dstStageMask = VK_PIPELINE_STAGE_TRANSFER_BIT; + break; + + case VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL: + barrier.dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT; + dstStageMask = VK_PIPELINE_STAGE_TRANSFER_BIT; + break; + + case VK_IMAGE_LAYOUT_PRESENT_SRC_KHR: + srcStageMask = VK_PIPELINE_STAGE_ALL_COMMANDS_BIT; + dstStageMask = VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT; + break; + + default: + dstStageMask = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT; + break; + } + + vkCmdPipelineBarrier(command_buffer, srcStageMask, dstStageMask, 0, 0, nullptr, 0, nullptr, 1, &barrier); + + m_layout = new_layout; +} + +VkFramebuffer Texture::CreateFramebuffer(VkRenderPass render_pass) +{ + const VkFramebufferCreateInfo ci = { + VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO, nullptr, 0u, render_pass, 1, &m_view, m_width, m_height, m_layers}; + VkFramebuffer fb = VK_NULL_HANDLE; + VkResult res = vkCreateFramebuffer(g_vulkan_context->GetDevice(), &ci, nullptr, &fb); + if (res != VK_SUCCESS) + { + LOG_VULKAN_ERROR(res, "vkCreateFramebuffer() failed: "); + return VK_NULL_HANDLE; + } + + return fb; +} + +} // namespace Vulkan diff --git a/src/common/vulkan/texture.h b/src/common/vulkan/texture.h new file mode 100644 index 000000000..15d8dcdd5 --- /dev/null +++ b/src/common/vulkan/texture.h @@ -0,0 +1,72 @@ +// Copyright 2016 Dolphin Emulator Project +// Copyright 2020 DuckStation Emulator Project +// Licensed under GPLv2+ +// Refer to the LICENSE file included. + +#pragma once +#include "../types.h" +#include "vulkan_loader.h" +#include + +namespace Vulkan { +class Texture +{ +public: + Texture(); + Texture(Texture&& move); + Texture(const Texture&) = delete; + ~Texture(); + + Texture& operator=(Texture&& move); + Texture& operator=(const Texture&) = delete; + + ALWAYS_INLINE bool IsValid() const { return (m_image != VK_NULL_HANDLE); } + + /// An image is considered owned/managed if we control the memory. + ALWAYS_INLINE bool IsOwned() const { return (m_device_memory != VK_NULL_HANDLE); } + + ALWAYS_INLINE u32 GetWidth() const { return m_width; } + ALWAYS_INLINE u32 GetHeight() const { return m_height; } + ALWAYS_INLINE u32 GetLevels() const { return m_levels; } + ALWAYS_INLINE u32 GetLayers() const { return m_layers; } + ALWAYS_INLINE VkFormat GetFormat() const { return m_format; } + ALWAYS_INLINE VkSampleCountFlagBits GetSamples() const { return m_samples; } + ALWAYS_INLINE VkImageLayout GetLayout() const { return m_layout; } + ALWAYS_INLINE VkImageViewType GetViewType() const { return m_view_type; } + ALWAYS_INLINE VkImage GetImage() const { return m_image; } + ALWAYS_INLINE VkDeviceMemory GetDeviceMemory() const { return m_device_memory; } + ALWAYS_INLINE VkImageView GetView() const { return m_view; } + + bool Create(u32 width, u32 height, u32 levels, u32 layers, VkFormat format, VkSampleCountFlagBits samples, + VkImageViewType view_type, VkImageTiling tiling, VkImageUsageFlags usage); + + bool Adopt(VkImage existing_image, VkImageViewType view_type, u32 width, u32 height, u32 levels, u32 layers, + VkFormat format, VkSampleCountFlagBits samples); + + void Destroy(bool defer = true); + + // Used when the render pass is changing the image layout, or to force it to + // VK_IMAGE_LAYOUT_UNDEFINED, if the existing contents of the image is + // irrelevant and will not be loaded. + void OverrideImageLayout(VkImageLayout new_layout); + + void TransitionToLayout(VkCommandBuffer command_buffer, VkImageLayout new_layout); + + VkFramebuffer CreateFramebuffer(VkRenderPass render_pass); + +private: + u32 m_width = 0; + u32 m_height = 0; + u32 m_levels = 0; + u32 m_layers = 0; + VkFormat m_format = VK_FORMAT_UNDEFINED; + VkSampleCountFlagBits m_samples = VK_SAMPLE_COUNT_1_BIT; + VkImageViewType m_view_type = VK_IMAGE_VIEW_TYPE_2D; + VkImageLayout m_layout = VK_IMAGE_LAYOUT_UNDEFINED; + + VkImage m_image = VK_NULL_HANDLE; + VkDeviceMemory m_device_memory = VK_NULL_HANDLE; + VkImageView m_view = VK_NULL_HANDLE; +}; + +} // namespace Vulkan diff --git a/src/common/vulkan/util.cpp b/src/common/vulkan/util.cpp new file mode 100644 index 000000000..f4ef8b76c --- /dev/null +++ b/src/common/vulkan/util.cpp @@ -0,0 +1,402 @@ +// Copyright 2016 Dolphin Emulator Project +// Copyright 2020 DuckStation Emulator Project +// Licensed under GPLv2+ +// Refer to the LICENSE file included. + +#include "util.h" +#include "../assert.h" +#include "../log.h" +#include "../string_util.h" +#include "context.h" +#include "shader_compiler.h" +Log_SetChannel(Vulkan::Util); + +namespace Vulkan { +namespace Util { +bool IsDepthFormat(VkFormat format) +{ + switch (format) + { + case VK_FORMAT_D16_UNORM: + case VK_FORMAT_D16_UNORM_S8_UINT: + case VK_FORMAT_D24_UNORM_S8_UINT: + case VK_FORMAT_D32_SFLOAT: + case VK_FORMAT_D32_SFLOAT_S8_UINT: + return true; + default: + return false; + } +} + +bool IsCompressedFormat(VkFormat format) +{ + switch (format) + { + case VK_FORMAT_BC1_RGBA_UNORM_BLOCK: + case VK_FORMAT_BC2_UNORM_BLOCK: + case VK_FORMAT_BC3_UNORM_BLOCK: + case VK_FORMAT_BC7_UNORM_BLOCK: + return true; + + default: + return false; + } +} + +VkFormat GetLinearFormat(VkFormat format) +{ + switch (format) + { + case VK_FORMAT_R8_SRGB: + return VK_FORMAT_R8_UNORM; + case VK_FORMAT_R8G8_SRGB: + return VK_FORMAT_R8G8_UNORM; + case VK_FORMAT_R8G8B8_SRGB: + return VK_FORMAT_R8G8B8_UNORM; + case VK_FORMAT_R8G8B8A8_SRGB: + return VK_FORMAT_R8G8B8A8_UNORM; + case VK_FORMAT_B8G8R8_SRGB: + return VK_FORMAT_B8G8R8_UNORM; + case VK_FORMAT_B8G8R8A8_SRGB: + return VK_FORMAT_B8G8R8A8_UNORM; + default: + return format; + } +} + +u32 GetTexelSize(VkFormat format) +{ + // Only contains pixel formats we use. + switch (format) + { + case VK_FORMAT_R32_SFLOAT: + return 4; + + case VK_FORMAT_D32_SFLOAT: + return 4; + + case VK_FORMAT_R8G8B8A8_UNORM: + return 4; + + case VK_FORMAT_B8G8R8A8_UNORM: + return 4; + + case VK_FORMAT_BC1_RGBA_UNORM_BLOCK: + return 8; + + case VK_FORMAT_BC2_UNORM_BLOCK: + case VK_FORMAT_BC3_UNORM_BLOCK: + case VK_FORMAT_BC7_UNORM_BLOCK: + return 16; + + default: + Panic("Unhandled pixel format"); + return 1; + } +} + +u32 GetBlockSize(VkFormat format) +{ + switch (format) + { + case VK_FORMAT_BC1_RGBA_UNORM_BLOCK: + case VK_FORMAT_BC2_UNORM_BLOCK: + case VK_FORMAT_BC3_UNORM_BLOCK: + case VK_FORMAT_BC7_UNORM_BLOCK: + return 4; + + default: + return 1; + } +} + +VkRect2D ClampRect2D(const VkRect2D& rect, u32 width, u32 height) +{ + VkRect2D out; + out.offset.x = std::clamp(rect.offset.x, 0, static_cast(width - 1)); + out.offset.y = std::clamp(rect.offset.y, 0, static_cast(height - 1)); + out.extent.width = std::min(rect.extent.width, width - static_cast(rect.offset.x)); + out.extent.height = std::min(rect.extent.height, height - static_cast(rect.offset.y)); + return out; +} + +VkBlendFactor GetAlphaBlendFactor(VkBlendFactor factor) +{ + switch (factor) + { + case VK_BLEND_FACTOR_SRC_COLOR: + return VK_BLEND_FACTOR_SRC_ALPHA; + case VK_BLEND_FACTOR_ONE_MINUS_SRC_COLOR: + return VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA; + case VK_BLEND_FACTOR_DST_COLOR: + return VK_BLEND_FACTOR_DST_ALPHA; + case VK_BLEND_FACTOR_ONE_MINUS_DST_COLOR: + return VK_BLEND_FACTOR_ONE_MINUS_DST_ALPHA; + default: + return factor; + } +} + +void SetViewport(VkCommandBuffer command_buffer, int x, int y, int width, int height, float min_depth /*= 0.0f*/, + float max_depth /*= 1.0f*/) +{ + const VkViewport vp{static_cast(x), + static_cast(y), + static_cast(width), + static_cast(height), + min_depth, + max_depth}; + vkCmdSetViewport(command_buffer, 0, 1, &vp); +} + +void SetScissor(VkCommandBuffer command_buffer, int x, int y, int width, int height) +{ + const VkRect2D scissor{{x, y}, {static_cast(width), static_cast(height)}}; + vkCmdSetScissor(command_buffer, 0, 1, &scissor); +} + +void SetViewportAndScissor(VkCommandBuffer command_buffer, int x, int y, int width, int height, + float min_depth /* = 0.0f */, float max_depth /* = 1.0f */) +{ + const VkViewport vp{static_cast(x), + static_cast(y), + static_cast(width), + static_cast(height), + min_depth, + max_depth}; + const VkRect2D scissor{{x, y}, {static_cast(width), static_cast(height)}}; + vkCmdSetViewport(command_buffer, 0, 1, &vp); + vkCmdSetScissor(command_buffer, 0, 1, &scissor); +} + +void SafeDestroyFramebuffer(VkFramebuffer& fb) +{ + if (fb != VK_NULL_HANDLE) + { + vkDestroyFramebuffer(g_vulkan_context->GetDevice(), fb, nullptr); + fb = VK_NULL_HANDLE; + } +} + +void SafeDestroyPipeline(VkPipeline& p) +{ + if (p != VK_NULL_HANDLE) + { + vkDestroyPipeline(g_vulkan_context->GetDevice(), p, nullptr); + p = VK_NULL_HANDLE; + } +} + +void SafeDestroyPipelineLayout(VkPipelineLayout& pl) +{ + if (pl != VK_NULL_HANDLE) + { + vkDestroyPipelineLayout(g_vulkan_context->GetDevice(), pl, nullptr); + pl = VK_NULL_HANDLE; + } +} + +void SafeDestroyDescriptorSetLayout(VkDescriptorSetLayout& dsl) +{ + if (dsl != VK_NULL_HANDLE) + { + vkDestroyDescriptorSetLayout(g_vulkan_context->GetDevice(), dsl, nullptr); + dsl = VK_NULL_HANDLE; + } +} + +void SafeDestroyBufferView(VkBufferView& bv) +{ + if (bv != VK_NULL_HANDLE) + { + vkDestroyBufferView(g_vulkan_context->GetDevice(), bv, nullptr); + bv = VK_NULL_HANDLE; + } +} + +void SafeDestroySampler(VkSampler& samp) +{ + if (samp != VK_NULL_HANDLE) + { + vkDestroySampler(g_vulkan_context->GetDevice(), samp, nullptr); + samp = VK_NULL_HANDLE; + } +} + +void SafeFreeGlobalDescriptorSet(VkDescriptorSet& ds) +{ + if (ds != VK_NULL_HANDLE) + { + g_vulkan_context->FreeGlobalDescriptorSet(ds); + ds = VK_NULL_HANDLE; + } +} + +void BufferMemoryBarrier(VkCommandBuffer command_buffer, VkBuffer buffer, VkAccessFlags src_access_mask, + VkAccessFlags dst_access_mask, VkDeviceSize offset, VkDeviceSize size, + VkPipelineStageFlags src_stage_mask, VkPipelineStageFlags dst_stage_mask) +{ + VkBufferMemoryBarrier buffer_info = { + VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER, // VkStructureType sType + nullptr, // const void* pNext + src_access_mask, // VkAccessFlags srcAccessMask + dst_access_mask, // VkAccessFlags dstAccessMask + VK_QUEUE_FAMILY_IGNORED, // uint32_t srcQueueFamilyIndex + VK_QUEUE_FAMILY_IGNORED, // uint32_t dstQueueFamilyIndex + buffer, // VkBuffer buffer + offset, // VkDeviceSize offset + size // VkDeviceSize size + }; + + vkCmdPipelineBarrier(command_buffer, src_stage_mask, dst_stage_mask, 0, 0, nullptr, 1, &buffer_info, 0, nullptr); +} + +VkShaderModule CreateShaderModule(const u32* spv, size_t spv_word_count) +{ + VkShaderModuleCreateInfo info = {}; + info.sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO; + info.codeSize = spv_word_count * sizeof(u32); + info.pCode = spv; + + VkShaderModule module; + VkResult res = vkCreateShaderModule(g_vulkan_context->GetDevice(), &info, nullptr, &module); + if (res != VK_SUCCESS) + { + LOG_VULKAN_ERROR(res, "vkCreateShaderModule failed: "); + return VK_NULL_HANDLE; + } + + return module; +} + +VkShaderModule CompileAndCreateVertexShader(std::string_view source_code) +{ + std::optional code = ShaderCompiler::CompileVertexShader(source_code); + if (!code) + return VK_NULL_HANDLE; + + return CreateShaderModule(code->data(), code->size()); +} + +VkShaderModule CompileAndCreateGeometryShader(std::string_view source_code) +{ + std::optional code = ShaderCompiler::CompileGeometryShader(source_code); + if (!code) + return VK_NULL_HANDLE; + + return CreateShaderModule(code->data(), code->size()); +} + +VkShaderModule CompileAndCreateFragmentShader(std::string_view source_code) +{ + std::optional code = ShaderCompiler::CompileFragmentShader(source_code); + if (!code) + return VK_NULL_HANDLE; + + return CreateShaderModule(code->data(), code->size()); +} + +VkShaderModule CompileAndCreateComputeShader(std::string_view source_code) +{ + std::optional code = ShaderCompiler::CompileComputeShader(source_code); + if (!code) + return VK_NULL_HANDLE; + + return CreateShaderModule(code->data(), code->size()); +} + +const char* VkResultToString(VkResult res) +{ + switch (res) + { + case VK_SUCCESS: + return "VK_SUCCESS"; + + case VK_NOT_READY: + return "VK_NOT_READY"; + + case VK_TIMEOUT: + return "VK_TIMEOUT"; + + case VK_EVENT_SET: + return "VK_EVENT_SET"; + + case VK_EVENT_RESET: + return "VK_EVENT_RESET"; + + case VK_INCOMPLETE: + return "VK_INCOMPLETE"; + + case VK_ERROR_OUT_OF_HOST_MEMORY: + return "VK_ERROR_OUT_OF_HOST_MEMORY"; + + case VK_ERROR_OUT_OF_DEVICE_MEMORY: + return "VK_ERROR_OUT_OF_DEVICE_MEMORY"; + + case VK_ERROR_INITIALIZATION_FAILED: + return "VK_ERROR_INITIALIZATION_FAILED"; + + case VK_ERROR_DEVICE_LOST: + return "VK_ERROR_DEVICE_LOST"; + + case VK_ERROR_MEMORY_MAP_FAILED: + return "VK_ERROR_MEMORY_MAP_FAILED"; + + case VK_ERROR_LAYER_NOT_PRESENT: + return "VK_ERROR_LAYER_NOT_PRESENT"; + + case VK_ERROR_EXTENSION_NOT_PRESENT: + return "VK_ERROR_EXTENSION_NOT_PRESENT"; + + case VK_ERROR_FEATURE_NOT_PRESENT: + return "VK_ERROR_FEATURE_NOT_PRESENT"; + + case VK_ERROR_INCOMPATIBLE_DRIVER: + return "VK_ERROR_INCOMPATIBLE_DRIVER"; + + case VK_ERROR_TOO_MANY_OBJECTS: + return "VK_ERROR_TOO_MANY_OBJECTS"; + + case VK_ERROR_FORMAT_NOT_SUPPORTED: + return "VK_ERROR_FORMAT_NOT_SUPPORTED"; + + case VK_ERROR_SURFACE_LOST_KHR: + return "VK_ERROR_SURFACE_LOST_KHR"; + + case VK_ERROR_NATIVE_WINDOW_IN_USE_KHR: + return "VK_ERROR_NATIVE_WINDOW_IN_USE_KHR"; + + case VK_SUBOPTIMAL_KHR: + return "VK_SUBOPTIMAL_KHR"; + + case VK_ERROR_OUT_OF_DATE_KHR: + return "VK_ERROR_OUT_OF_DATE_KHR"; + + case VK_ERROR_INCOMPATIBLE_DISPLAY_KHR: + return "VK_ERROR_INCOMPATIBLE_DISPLAY_KHR"; + + case VK_ERROR_VALIDATION_FAILED_EXT: + return "VK_ERROR_VALIDATION_FAILED_EXT"; + + case VK_ERROR_INVALID_SHADER_NV: + return "VK_ERROR_INVALID_SHADER_NV"; + + default: + return "UNKNOWN_VK_RESULT"; + } +} + +void LogVulkanResult(int level, const char* func_name, VkResult res, const char* msg, ...) +{ + std::va_list ap; + va_start(ap, msg); + std::string real_msg = StringUtil::StdStringFromFormatV(msg, ap); + va_end(ap); + + Log::Writef("Vulkan", func_name, static_cast(level), "(%s) %s (%d: %s)", func_name, real_msg.c_str(), + static_cast(res), VkResultToString(res)); +} + +} // namespace Util + +} // namespace Vulkan diff --git a/src/common/vulkan/util.h b/src/common/vulkan/util.h new file mode 100644 index 000000000..9a3aed2df --- /dev/null +++ b/src/common/vulkan/util.h @@ -0,0 +1,80 @@ +// Copyright 2016 Dolphin Emulator Project +// Copyright 2020 DuckStation Emulator Project +// Licensed under GPLv2+ +// Refer to the LICENSE file included. + +#pragma once + +#include "../types.h" +#include "vulkan_loader.h" +#include +#include + +namespace Vulkan { +namespace Util { + +inline constexpr u32 MakeRGBA8Color(float r, float g, float b, float a) +{ + return (static_cast(std::clamp(static_cast(r * 255.0f), 0, 255)) << 0) | + (static_cast(std::clamp(static_cast(g * 255.0f), 0, 255)) << 8) | + (static_cast(std::clamp(static_cast(b * 255.0f), 0, 255)) << 16) | + (static_cast(std::clamp(static_cast(a * 255.0f), 0, 255)) << 24); +} + +bool IsDepthFormat(VkFormat format); +bool IsCompressedFormat(VkFormat format); +VkFormat GetLinearFormat(VkFormat format); +u32 GetTexelSize(VkFormat format); +u32 GetBlockSize(VkFormat format); + +// Clamps a VkRect2D to the specified dimensions. +VkRect2D ClampRect2D(const VkRect2D& rect, u32 width, u32 height); + +// Map {SRC,DST}_COLOR to {SRC,DST}_ALPHA +VkBlendFactor GetAlphaBlendFactor(VkBlendFactor factor); + +// Safe destroy helpers +void SafeDestroyFramebuffer(VkFramebuffer& fb); +void SafeDestroyPipeline(VkPipeline& p); +void SafeDestroyPipelineLayout(VkPipelineLayout& pl); +void SafeDestroyDescriptorSetLayout(VkDescriptorSetLayout& dsl); +void SafeDestroyBufferView(VkBufferView& bv); +void SafeDestroySampler(VkSampler& samp); +void SafeFreeGlobalDescriptorSet(VkDescriptorSet& ds); + +void SetViewport(VkCommandBuffer command_buffer, int x, int y, int width, int height, float min_depth = 0.0f, + float max_depth = 1.0f); +void SetScissor(VkCommandBuffer command_buffer, int x, int y, int width, int height); + +// Combines viewport and scissor updates +void SetViewportAndScissor(VkCommandBuffer command_buffer, int x, int y, int width, int height, float min_depth = 0.0f, + float max_depth = 1.0f); + +// Wrapper for creating an barrier on a buffer +void BufferMemoryBarrier(VkCommandBuffer command_buffer, VkBuffer buffer, VkAccessFlags src_access_mask, + VkAccessFlags dst_access_mask, VkDeviceSize offset, VkDeviceSize size, + VkPipelineStageFlags src_stage_mask, VkPipelineStageFlags dst_stage_mask); + +// Create a shader module from the specified SPIR-V. +VkShaderModule CreateShaderModule(const u32* spv, size_t spv_word_count); + +// Compile a vertex shader and create a shader module, discarding the intermediate SPIR-V. +VkShaderModule CompileAndCreateVertexShader(std::string_view source_code); + +// Compile a geometry shader and create a shader module, discarding the intermediate SPIR-V. +VkShaderModule CompileAndCreateGeometryShader(std::string_view source_code); + +// Compile a fragment shader and create a shader module, discarding the intermediate SPIR-V. +VkShaderModule CompileAndCreateFragmentShader(std::string_view source_code); + +// Compile a compute shader and create a shader module, discarding the intermediate SPIR-V. +VkShaderModule CompileAndCreateComputeShader(std::string_view source_code); + +const char* VkResultToString(VkResult res); +void LogVulkanResult(int level, const char* func_name, VkResult res, const char* msg, ...); + +#define LOG_VULKAN_ERROR(res, ...) ::Vulkan::Util::LogVulkanResult(1, __func__, res, __VA_ARGS__) + +} // namespace Util + +} // namespace Vulkan diff --git a/src/common/window_info.h b/src/common/window_info.h index 413f637dc..3a4fd91c7 100644 --- a/src/common/window_info.h +++ b/src/common/window_info.h @@ -1,4 +1,5 @@ #pragma once +#include "types.h" // Contains the information required to create a graphics context in a window. struct WindowInfo diff --git a/src/core/CMakeLists.txt b/src/core/CMakeLists.txt index d044e9650..c76e58ae9 100644 --- a/src/core/CMakeLists.txt +++ b/src/core/CMakeLists.txt @@ -36,6 +36,8 @@ add_library(core gpu_hw_opengl.h gpu_hw_shadergen.cpp gpu_hw_shadergen.h + gpu_hw_vulkan.cpp + gpu_hw_vulkan.h gpu_sw.cpp gpu_sw.h gte.cpp @@ -91,7 +93,7 @@ set(RECOMPILER_SRCS target_include_directories(core PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/..") target_include_directories(core PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/..") -target_link_libraries(core PUBLIC Threads::Threads common imgui tinyxml2 zlib) +target_link_libraries(core PUBLIC Threads::Threads common imgui tinyxml2 zlib vulkan-loader) target_link_libraries(core PRIVATE glad stb) if(WIN32) diff --git a/src/core/core.vcxproj b/src/core/core.vcxproj index 36392b7a6..75fc8c713 100644 --- a/src/core/core.vcxproj +++ b/src/core/core.vcxproj @@ -64,6 +64,7 @@ + @@ -105,6 +106,7 @@ + @@ -296,7 +298,7 @@ WITH_RECOMPILER=1;_CRT_SECURE_NO_WARNINGS;WIN32;_DEBUG;_CONSOLE;_LIB;%(PreprocessorDefinitions) true ProgramDatabase - $(SolutionDir)dep\msvc\include;$(SolutionDir)dep\glad\include;$(SolutionDir)dep\stb\include;$(SolutionDir)dep\imgui\include;$(SolutionDir)dep\xbyak\xbyak;$(SolutionDir)dep\tinyxml2\include;$(SolutionDir)dep\zlib\include;$(SolutionDir)src;%(AdditionalIncludeDirectories) + $(SolutionDir)dep\msvc\include;$(SolutionDir)dep\glad\include;$(SolutionDir)dep\stb\include;$(SolutionDir)dep\imgui\include;$(SolutionDir)dep\xbyak\xbyak;$(SolutionDir)dep\tinyxml2\include;$(SolutionDir)dep\zlib\include;$(SolutionDir)dep\vulkan-loader\include;$(SolutionDir)src;%(AdditionalIncludeDirectories) true false stdcpp17 @@ -321,7 +323,7 @@ WITH_RECOMPILER=1;_CRT_SECURE_NO_WARNINGS;WIN32;_DEBUG;_CONSOLE;_LIB;%(PreprocessorDefinitions) true ProgramDatabase - $(SolutionDir)dep\msvc\include;$(SolutionDir)dep\glad\include;$(SolutionDir)dep\stb\include;$(SolutionDir)dep\imgui\include;$(SolutionDir)dep\xbyak\xbyak;$(SolutionDir)dep\tinyxml2\include;$(SolutionDir)dep\zlib\include;$(SolutionDir)src;%(AdditionalIncludeDirectories) + $(SolutionDir)dep\msvc\include;$(SolutionDir)dep\glad\include;$(SolutionDir)dep\stb\include;$(SolutionDir)dep\imgui\include;$(SolutionDir)dep\xbyak\xbyak;$(SolutionDir)dep\tinyxml2\include;$(SolutionDir)dep\zlib\include;$(SolutionDir)dep\vulkan-loader\include;$(SolutionDir)src;%(AdditionalIncludeDirectories) true false stdcpp17 @@ -346,7 +348,7 @@ WITH_RECOMPILER=1;_ITERATOR_DEBUG_LEVEL=1;_CRT_SECURE_NO_WARNINGS;WIN32;_DEBUGFAST;_DEBUG;_CONSOLE;_LIB;%(PreprocessorDefinitions) true ProgramDatabase - $(SolutionDir)dep\msvc\include;$(SolutionDir)dep\glad\include;$(SolutionDir)dep\stb\include;$(SolutionDir)dep\imgui\include;$(SolutionDir)dep\xbyak\xbyak;$(SolutionDir)dep\tinyxml2\include;$(SolutionDir)dep\zlib\include;$(SolutionDir)src;%(AdditionalIncludeDirectories) + $(SolutionDir)dep\msvc\include;$(SolutionDir)dep\glad\include;$(SolutionDir)dep\stb\include;$(SolutionDir)dep\imgui\include;$(SolutionDir)dep\xbyak\xbyak;$(SolutionDir)dep\tinyxml2\include;$(SolutionDir)dep\zlib\include;$(SolutionDir)dep\vulkan-loader\include;$(SolutionDir)src;%(AdditionalIncludeDirectories) Default true false @@ -374,7 +376,7 @@ WITH_RECOMPILER=1;_ITERATOR_DEBUG_LEVEL=1;_CRT_SECURE_NO_WARNINGS;WIN32;_DEBUGFAST;_DEBUG;_CONSOLE;_LIB;%(PreprocessorDefinitions) true ProgramDatabase - $(SolutionDir)dep\msvc\include;$(SolutionDir)dep\glad\include;$(SolutionDir)dep\stb\include;$(SolutionDir)dep\imgui\include;$(SolutionDir)dep\xbyak\xbyak;$(SolutionDir)dep\tinyxml2\include;$(SolutionDir)dep\zlib\include;$(SolutionDir)src;%(AdditionalIncludeDirectories) + $(SolutionDir)dep\msvc\include;$(SolutionDir)dep\glad\include;$(SolutionDir)dep\stb\include;$(SolutionDir)dep\imgui\include;$(SolutionDir)dep\xbyak\xbyak;$(SolutionDir)dep\tinyxml2\include;$(SolutionDir)dep\zlib\include;$(SolutionDir)dep\vulkan-loader\include;$(SolutionDir)src;%(AdditionalIncludeDirectories) Default true false @@ -401,7 +403,7 @@ MaxSpeed true WITH_RECOMPILER=1;_CRT_SECURE_NO_WARNINGS;WIN32;NDEBUG;_CONSOLE;_LIB;%(PreprocessorDefinitions) - $(SolutionDir)dep\msvc\include;$(SolutionDir)dep\glad\include;$(SolutionDir)dep\stb\include;$(SolutionDir)dep\imgui\include;$(SolutionDir)dep\xbyak\xbyak;$(SolutionDir)dep\tinyxml2\include;$(SolutionDir)dep\zlib\include;$(SolutionDir)src;%(AdditionalIncludeDirectories) + $(SolutionDir)dep\msvc\include;$(SolutionDir)dep\glad\include;$(SolutionDir)dep\stb\include;$(SolutionDir)dep\imgui\include;$(SolutionDir)dep\xbyak\xbyak;$(SolutionDir)dep\tinyxml2\include;$(SolutionDir)dep\zlib\include;$(SolutionDir)dep\vulkan-loader\include;$(SolutionDir)src;%(AdditionalIncludeDirectories) true false stdcpp17 @@ -427,7 +429,7 @@ MaxSpeed true WITH_RECOMPILER=1;_CRT_SECURE_NO_WARNINGS;WIN32;NDEBUG;_CONSOLE;_LIB;%(PreprocessorDefinitions) - $(SolutionDir)dep\msvc\include;$(SolutionDir)dep\glad\include;$(SolutionDir)dep\stb\include;$(SolutionDir)dep\imgui\include;$(SolutionDir)dep\xbyak\xbyak;$(SolutionDir)dep\tinyxml2\include;$(SolutionDir)dep\zlib\include;$(SolutionDir)src;%(AdditionalIncludeDirectories) + $(SolutionDir)dep\msvc\include;$(SolutionDir)dep\glad\include;$(SolutionDir)dep\stb\include;$(SolutionDir)dep\imgui\include;$(SolutionDir)dep\xbyak\xbyak;$(SolutionDir)dep\tinyxml2\include;$(SolutionDir)dep\zlib\include;$(SolutionDir)dep\vulkan-loader\include;$(SolutionDir)src;%(AdditionalIncludeDirectories) true true stdcpp17 @@ -454,7 +456,7 @@ MaxSpeed true WITH_RECOMPILER=1;_CRT_SECURE_NO_WARNINGS;WIN32;NDEBUG;_CONSOLE;_LIB;%(PreprocessorDefinitions) - $(SolutionDir)dep\msvc\include;$(SolutionDir)dep\glad\include;$(SolutionDir)dep\stb\include;$(SolutionDir)dep\imgui\include;$(SolutionDir)dep\xbyak\xbyak;$(SolutionDir)dep\tinyxml2\include;$(SolutionDir)dep\zlib\include;$(SolutionDir)src;%(AdditionalIncludeDirectories) + $(SolutionDir)dep\msvc\include;$(SolutionDir)dep\glad\include;$(SolutionDir)dep\stb\include;$(SolutionDir)dep\imgui\include;$(SolutionDir)dep\xbyak\xbyak;$(SolutionDir)dep\tinyxml2\include;$(SolutionDir)dep\zlib\include;$(SolutionDir)dep\vulkan-loader\include;$(SolutionDir)src;%(AdditionalIncludeDirectories) true false stdcpp17 @@ -480,7 +482,7 @@ MaxSpeed true WITH_RECOMPILER=1;_CRT_SECURE_NO_WARNINGS;WIN32;NDEBUG;_CONSOLE;_LIB;%(PreprocessorDefinitions) - $(SolutionDir)dep\msvc\include;$(SolutionDir)dep\glad\include;$(SolutionDir)dep\stb\include;$(SolutionDir)dep\imgui\include;$(SolutionDir)dep\xbyak\xbyak;$(SolutionDir)dep\tinyxml2\include;$(SolutionDir)dep\zlib\include;$(SolutionDir)src;%(AdditionalIncludeDirectories) + $(SolutionDir)dep\msvc\include;$(SolutionDir)dep\glad\include;$(SolutionDir)dep\stb\include;$(SolutionDir)dep\imgui\include;$(SolutionDir)dep\xbyak\xbyak;$(SolutionDir)dep\tinyxml2\include;$(SolutionDir)dep\zlib\include;$(SolutionDir)dep\vulkan-loader\include;$(SolutionDir)src;%(AdditionalIncludeDirectories) true true stdcpp17 diff --git a/src/core/core.vcxproj.filters b/src/core/core.vcxproj.filters index 0b8e7f84a..a5d2d065e 100644 --- a/src/core/core.vcxproj.filters +++ b/src/core/core.vcxproj.filters @@ -44,6 +44,7 @@ + @@ -89,6 +90,7 @@ + diff --git a/src/core/gpu.h b/src/core/gpu.h index bf112a0b6..889e70c09 100644 --- a/src/core/gpu.h +++ b/src/core/gpu.h @@ -164,6 +164,9 @@ public: // gpu_hw_opengl.cpp static std::unique_ptr CreateHardwareOpenGLRenderer(); + // gpu_hw_vulkan.cpp + static std::unique_ptr CreateHardwareVulkanRenderer(); + // gpu_sw.cpp static std::unique_ptr CreateSoftwareRenderer(); @@ -602,7 +605,7 @@ protected: void ClearTextureWindowChangedFlag() { texture_window_changed = false; } } m_draw_mode = {}; - Common::Rectangle m_drawing_area; + Common::Rectangle m_drawing_area{0, 0, VRAM_WIDTH, VRAM_HEIGHT}; struct DrawingOffset { diff --git a/src/core/gpu_hw.cpp b/src/core/gpu_hw.cpp index edbe39b22..37d0f66b2 100644 --- a/src/core/gpu_hw.cpp +++ b/src/core/gpu_hw.cpp @@ -527,6 +527,36 @@ bool GPU_HW::UseVRAMCopyShader(u32 src_x, u32 src_y, u32 dst_x, u32 dst_y, u32 w .Intersects(Common::Rectangle::FromExtents(dst_x, dst_y, width, height))); } +GPU_HW::VRAMFillUBOData GPU_HW::GetVRAMFillUBOData(u32 x, u32 y, u32 width, u32 height, u32 color) const +{ + // drop precision unless true colour is enabled + if (!m_true_color) + color = RGBA5551ToRGBA8888(RGBA8888ToRGBA5551(color)); + + VRAMFillUBOData uniforms; + std::tie(uniforms.u_fill_color[0], uniforms.u_fill_color[1], uniforms.u_fill_color[2], uniforms.u_fill_color[3]) = + RGBA8ToFloat(color); + uniforms.u_interlaced_displayed_field = GetActiveLineLSB(); + return uniforms; +} + +GPU_HW::VRAMCopyUBOData GPU_HW::GetVRAMCopyUBOData(u32 src_x, u32 src_y, u32 dst_x, u32 dst_y, u32 width, + u32 height) const +{ + const VRAMCopyUBOData uniforms = {src_x * m_resolution_scale, + src_y * m_resolution_scale, + dst_x * m_resolution_scale, + dst_y * m_resolution_scale, + ((dst_x + width) % VRAM_WIDTH) * m_resolution_scale, + ((dst_y + height) % VRAM_HEIGHT) * m_resolution_scale, + width * m_resolution_scale, + height * m_resolution_scale, + m_GPUSTAT.set_mask_while_drawing ? 1u : 0u, + GetCurrentNormalizedBatchVertexDepthID()}; + + return uniforms; +} + GPU_HW::BatchPrimitive GPU_HW::GetPrimitiveForCommand(RenderCommand rc) { if (rc.primitive == Primitive::Line) diff --git a/src/core/gpu_hw.h b/src/core/gpu_hw.h index db411bd2c..3a98a7a27 100644 --- a/src/core/gpu_hw.h +++ b/src/core/gpu_hw.h @@ -14,9 +14,7 @@ public: enum class BatchPrimitive : u8 { Lines = 0, - LineStrip = 1, - Triangles = 2, - TriangleStrip = 3 + Triangles = 1 }; enum class BatchRenderMode : u8 @@ -119,6 +117,12 @@ protected: u32 u_set_mask_while_drawing; }; + struct VRAMFillUBOData + { + float u_fill_color[4]; + u32 u_interlaced_displayed_field; + }; + struct VRAMWriteUBOData { u32 u_base_coords[2]; @@ -230,6 +234,9 @@ protected: /// Returns true if the VRAM copy shader should be used (oversized copies, masking). bool UseVRAMCopyShader(u32 src_x, u32 src_y, u32 dst_x, u32 dst_y, u32 width, u32 height) const; + VRAMFillUBOData GetVRAMFillUBOData(u32 x, u32 y, u32 width, u32 height, u32 color) const; + VRAMCopyUBOData GetVRAMCopyUBOData(u32 src_x, u32 src_y, u32 dst_x, u32 dst_y, u32 width, u32 height) const; + /// Handles quads with flipped texture coordinate directions. static void HandleFlippedQuadTextureCoordinates(BatchVertex* vertices); diff --git a/src/core/gpu_hw_d3d11.cpp b/src/core/gpu_hw_d3d11.cpp index 5bc660b17..d9dd67777 100644 --- a/src/core/gpu_hw_d3d11.cpp +++ b/src/core/gpu_hw_d3d11.cpp @@ -534,9 +534,8 @@ void GPU_HW_D3D11::DrawBatchVertices(BatchRenderMode render_mode, u32 base_verte { const bool textured = (m_batch.texture_mode != TextureMode::Disabled); - static constexpr std::array d3d_primitives = { - {D3D11_PRIMITIVE_TOPOLOGY_LINELIST, D3D11_PRIMITIVE_TOPOLOGY_LINESTRIP, D3D11_PRIMITIVE_TOPOLOGY_TRIANGLELIST, - D3D11_PRIMITIVE_TOPOLOGY_TRIANGLESTRIP}}; + static constexpr std::array d3d_primitives = { + {D3D11_PRIMITIVE_TOPOLOGY_LINELIST, D3D11_PRIMITIVE_TOPOLOGY_TRIANGLELIST}}; m_context->IASetPrimitiveTopology(d3d_primitives[static_cast(m_batch.primitive)]); m_context->VSSetShader(m_batch_vertex_shaders[BoolToUInt8(textured)].Get(), nullptr, 0); diff --git a/src/core/gpu_hw_opengl.cpp b/src/core/gpu_hw_opengl.cpp index 120aebf12..be740165a 100644 --- a/src/core/gpu_hw_opengl.cpp +++ b/src/core/gpu_hw_opengl.cpp @@ -535,7 +535,7 @@ void GPU_HW_OpenGL::DrawBatchVertices(BatchRenderMode render_mode, u32 base_vert glDepthFunc(m_GPUSTAT.check_mask_before_draw ? GL_GEQUAL : GL_ALWAYS); - static constexpr std::array gl_primitives = {{GL_LINES, GL_LINE_STRIP, GL_TRIANGLES, GL_TRIANGLE_STRIP}}; + static constexpr std::array gl_primitives = {{GL_LINES, GL_TRIANGLES}}; glDrawArrays(gl_primitives[static_cast(m_batch.primitive)], m_batch_base_vertex, num_vertices); } diff --git a/src/core/gpu_hw_shadergen.cpp b/src/core/gpu_hw_shadergen.cpp index 52005eaec..464009e48 100644 --- a/src/core/gpu_hw_shadergen.cpp +++ b/src/core/gpu_hw_shadergen.cpp @@ -14,10 +14,11 @@ GPU_HW_ShaderGen::GPU_HW_ShaderGen(HostDisplay::RenderAPI render_api, u32 resolu { if (m_glsl) { - SetGLSLVersionString(); + if (m_render_api == HostDisplay::RenderAPI::OpenGL || m_render_api == HostDisplay::RenderAPI::OpenGLES) + SetGLSLVersionString(); - m_use_glsl_interface_blocks = (GLAD_GL_ES_VERSION_3_2 || GLAD_GL_VERSION_3_2); - m_use_glsl_binding_layout = UseGLSLBindingLayout(); + m_use_glsl_interface_blocks = (IsVulkan() || GLAD_GL_ES_VERSION_3_2 || GLAD_GL_VERSION_3_2); + m_use_glsl_binding_layout = (IsVulkan() || UseGLSLBindingLayout()); } } @@ -82,6 +83,8 @@ void GPU_HW_ShaderGen::WriteHeader(std::stringstream& ss) { if (m_render_api == HostDisplay::RenderAPI::OpenGL || m_render_api == HostDisplay::RenderAPI::OpenGLES) ss << m_glsl_version_string << "\n\n"; + else if (m_render_api == HostDisplay::RenderAPI::Vulkan) + ss << "#version 450 core\n\n"; // Extension enabling for OpenGL. if (m_render_api == HostDisplay::RenderAPI::OpenGLES) @@ -107,6 +110,7 @@ void GPU_HW_ShaderGen::WriteHeader(std::stringstream& ss) DefineMacro(ss, "API_OPENGL", m_render_api == HostDisplay::RenderAPI::OpenGL); DefineMacro(ss, "API_OPENGL_ES", m_render_api == HostDisplay::RenderAPI::OpenGLES); DefineMacro(ss, "API_D3D11", m_render_api == HostDisplay::RenderAPI::D3D11); + DefineMacro(ss, "API_VULKAN", m_render_api == HostDisplay::RenderAPI::Vulkan); if (m_render_api == HostDisplay::RenderAPI::OpenGLES) { @@ -219,9 +223,17 @@ float4 RGBA5551ToRGBA8(uint v) )"; } -void GPU_HW_ShaderGen::DeclareUniformBuffer(std::stringstream& ss, const std::initializer_list& members) +void GPU_HW_ShaderGen::DeclareUniformBuffer(std::stringstream& ss, const std::initializer_list& members, + bool push_constant_on_vulkan) { - if (m_glsl) + if (IsVulkan()) + { + if (push_constant_on_vulkan) + ss << "layout(push_constant) uniform PushConstants\n"; + else + ss << "layout(std140, set = 0, binding = 0) uniform UBOBlock\n"; + } + else if (m_glsl) { if (m_use_glsl_binding_layout) ss << "layout(std140, binding = 1) uniform UBOBlock\n"; @@ -243,7 +255,9 @@ void GPU_HW_ShaderGen::DeclareTexture(std::stringstream& ss, const char* name, u { if (m_glsl) { - if (m_use_glsl_binding_layout) + if (IsVulkan()) + ss << "layout(set = 0, binding = " << (index + 1u) << ") "; + else if (m_use_glsl_binding_layout) ss << "layout(binding = " << index << ") "; ss << "uniform sampler2D " << name << ";\n"; @@ -260,7 +274,9 @@ void GPU_HW_ShaderGen::DeclareTextureBuffer(std::stringstream& ss, const char* n { if (m_glsl) { - if (m_use_glsl_binding_layout) + if (IsVulkan()) + ss << "layout(set = 0, binding = " << index << ") "; + else if (m_use_glsl_binding_layout) ss << "layout(binding = " << index << ") "; ss << "uniform " << (is_int ? (is_unsigned ? "u" : "i") : "") << "samplerBuffer " << name << ";\n"; @@ -296,6 +312,9 @@ void GPU_HW_ShaderGen::DeclareVertexEntryPoint( if (m_use_glsl_interface_blocks) { + if (IsVulkan()) + ss << "layout(location = 0) "; + ss << "out VertexData {\n"; for (u32 i = 0; i < num_color_outputs; i++) ss << " float4 v_col" << i << ";\n"; @@ -321,7 +340,12 @@ void GPU_HW_ShaderGen::DeclareVertexEntryPoint( ss << "#define v_pos gl_Position\n\n"; if (declare_vertex_id) - ss << "#define v_id uint(gl_VertexID)\n"; + { + if (IsVulkan()) + ss << "#define v_id uint(gl_VertexIndex)\n"; + else + ss << "#define v_id uint(gl_VertexID)\n"; + } ss << "\n"; ss << "void main()\n"; @@ -366,6 +390,9 @@ void GPU_HW_ShaderGen::DeclareFragmentEntryPoint( { if (m_use_glsl_interface_blocks) { + if (IsVulkan()) + ss << "layout(location = 0) "; + ss << "in VertexData {\n"; for (u32 i = 0; i < num_color_inputs; i++) ss << " float4 v_col" << i << ";\n"; @@ -467,7 +494,8 @@ void GPU_HW_ShaderGen::WriteBatchUniformBuffer(std::stringstream& ss) DeclareUniformBuffer(ss, {"uint2 u_texture_window_mask", "uint2 u_texture_window_offset", "float u_src_alpha_factor", "float u_dst_alpha_factor", "uint u_interlaced_displayed_field", "uint u_base_vertex_depth_id", - "bool u_check_mask_before_draw", "bool u_set_mask_while_drawing"}); + "bool u_check_mask_before_draw", "bool u_set_mask_while_drawing"}, + false); } std::string GPU_HW_ShaderGen::GenerateBatchVertexShader(bool textured) @@ -507,6 +535,12 @@ std::string GPU_HW_ShaderGen::GenerateBatchVertexShader(bool textured) #if API_OPENGL || API_OPENGL_ES pos_y += EPSILON; #endif + + // NDC space Y flip in Vulkan. +#if API_VULKAN + pos_y = -pos_y; +#endif + v_pos = float4(pos_x, pos_y, 0.0, 1.0); #if API_D3D11 @@ -861,13 +895,18 @@ CONSTANT float2 WIDTH = (1.0 / float2(VRAM_SIZE)) * float2(RESOLUTION_SCALE, RES // GS is a pain, too different between HLSL and GLSL... if (m_glsl) { - ss << R"( -in VertexData { + if (IsVulkan()) + ss << "layout(location = 0) "; + + ss << R"(in VertexData { float4 v_col0; nointerpolation float v_depth; -} in_data[]; +} in_data[];)"; + + if (IsVulkan()) + ss << "layout(location = 0) "; -out VertexData { + ss << R"(out VertexData { float4 v_col0; nointerpolation float v_depth; } out_data; @@ -968,8 +1007,8 @@ std::string GPU_HW_ShaderGen::GenerateScreenQuadVertexShader() { v_tex0 = float2(float((v_id << 1) & 2u), float(v_id & 2u)); v_pos = float4(v_tex0 * float2(2.0f, -2.0f) + float2(-1.0f, 1.0f), 0.0f, 1.0f); - #if API_OPENGL || API_OPENGL_ES - v_pos.y = -gl_Position.y; + #if API_OPENGL || API_OPENGL_ES || API_VULKAN + v_pos.y = -v_pos.y; #endif } )"; @@ -981,7 +1020,7 @@ std::string GPU_HW_ShaderGen::GenerateFillFragmentShader() { std::stringstream ss; WriteHeader(ss); - DeclareUniformBuffer(ss, {"float4 u_fill_color"}); + DeclareUniformBuffer(ss, {"float4 u_fill_color"}, true); DeclareFragmentEntryPoint(ss, 0, 1, {}, false, 1, true); ss << R"( @@ -999,7 +1038,7 @@ std::string GPU_HW_ShaderGen::GenerateInterlacedFillFragmentShader() std::stringstream ss; WriteHeader(ss); WriteCommonFunctions(ss); - DeclareUniformBuffer(ss, {"float4 u_fill_color", "uint u_interlaced_displayed_field"}); + DeclareUniformBuffer(ss, {"float4 u_fill_color", "uint u_interlaced_displayed_field"}, true); DeclareFragmentEntryPoint(ss, 0, 1, {}, true, 1, true); ss << R"( @@ -1019,7 +1058,7 @@ std::string GPU_HW_ShaderGen::GenerateCopyFragmentShader() { std::stringstream ss; WriteHeader(ss); - DeclareUniformBuffer(ss, {"float4 u_src_rect"}); + DeclareUniformBuffer(ss, {"float4 u_src_rect"}, true); DeclareTexture(ss, "samp0", 0); DeclareFragmentEntryPoint(ss, 0, 1, {}, false, 1); @@ -1043,7 +1082,7 @@ std::string GPU_HW_ShaderGen::GenerateDisplayFragmentShader(bool depth_24bit, DefineMacro(ss, "INTERLEAVED", interlace_mode == GPU_HW::InterlacedRenderMode::InterleavedFields); WriteCommonFunctions(ss); - DeclareUniformBuffer(ss, {"uint2 u_vram_offset", "uint u_crop_left", "uint u_field_offset"}); + DeclareUniformBuffer(ss, {"uint2 u_vram_offset", "uint u_crop_left", "uint u_field_offset"}, true); DeclareTexture(ss, "samp0", 0); DeclareFragmentEntryPoint(ss, 0, 1, {}, true, 1); @@ -1093,7 +1132,7 @@ std::string GPU_HW_ShaderGen::GenerateVRAMReadFragmentShader() std::stringstream ss; WriteHeader(ss); WriteCommonFunctions(ss); - DeclareUniformBuffer(ss, {"uint2 u_base_coords", "uint2 u_size"}); + DeclareUniformBuffer(ss, {"uint2 u_base_coords", "uint2 u_size"}, true); DeclareTexture(ss, "samp0", 0); @@ -1146,8 +1185,10 @@ std::string GPU_HW_ShaderGen::GenerateVRAMWriteFragmentShader() std::stringstream ss; WriteHeader(ss); WriteCommonFunctions(ss); - DeclareUniformBuffer(ss, {"uint2 u_base_coords", "uint2 u_size", "uint u_buffer_base_offset", "uint u_mask_or_bits", - "float u_depth_value"}); + DeclareUniformBuffer( + ss, + {"uint2 u_base_coords", "uint2 u_size", "uint u_buffer_base_offset", "uint u_mask_or_bits", "float u_depth_value"}, + true); DeclareTextureBuffer(ss, "samp0", 0, true, true); DeclareFragmentEntryPoint(ss, 0, 1, {}, true, 1, true); @@ -1176,8 +1217,10 @@ std::string GPU_HW_ShaderGen::GenerateVRAMCopyFragmentShader() std::stringstream ss; WriteHeader(ss); WriteCommonFunctions(ss); - DeclareUniformBuffer(ss, {"uint2 u_src_coords", "uint2 u_dst_coords", "uint2 u_end_coords", "uint2 u_size", - "bool u_set_mask_bit", "float u_depth_value"}); + DeclareUniformBuffer(ss, + {"uint2 u_src_coords", "uint2 u_dst_coords", "uint2 u_end_coords", "uint2 u_size", + "bool u_set_mask_bit", "float u_depth_value"}, + true); DeclareTexture(ss, "samp0", 0); DeclareFragmentEntryPoint(ss, 0, 1, {}, true, 1, true); diff --git a/src/core/gpu_hw_shadergen.h b/src/core/gpu_hw_shadergen.h index 3b2270dcc..6f6624166 100644 --- a/src/core/gpu_hw_shadergen.h +++ b/src/core/gpu_hw_shadergen.h @@ -28,9 +28,12 @@ public: std::string GenerateVRAMUpdateDepthFragmentShader(); private: + ALWAYS_INLINE bool IsVulkan() const { return (m_render_api == HostDisplay::RenderAPI::Vulkan); } + void SetGLSLVersionString(); void WriteHeader(std::stringstream& ss); - void DeclareUniformBuffer(std::stringstream& ss, const std::initializer_list& members); + void DeclareUniformBuffer(std::stringstream& ss, const std::initializer_list& members, + bool push_constant_on_vulkan); void DeclareTexture(std::stringstream& ss, const char* name, u32 index); void DeclareTextureBuffer(std::stringstream& ss, const char* name, u32 index, bool is_int, bool is_unsigned); void DeclareVertexEntryPoint(std::stringstream& ss, const std::initializer_list& attributes, diff --git a/src/core/gpu_hw_vulkan.cpp b/src/core/gpu_hw_vulkan.cpp new file mode 100644 index 000000000..bdd99e68f --- /dev/null +++ b/src/core/gpu_hw_vulkan.cpp @@ -0,0 +1,1191 @@ +#include "gpu_hw_vulkan.h" +#include "common/assert.h" +#include "common/log.h" +#include "common/scope_guard.h" +#include "common/vulkan/builders.h" +#include "common/vulkan/context.h" +#include "common/vulkan/shader_cache.h" +#include "common/vulkan/util.h" +#include "gpu_hw_shadergen.h" +#include "host_display.h" +#include "host_interface.h" +#include "system.h" +Log_SetChannel(GPU_HW_Vulkan); + +GPU_HW_Vulkan::GPU_HW_Vulkan() = default; + +GPU_HW_Vulkan::~GPU_HW_Vulkan() +{ + if (m_host_display) + { + m_host_display->ClearDisplayTexture(); + ResetGraphicsAPIState(); + } + + DestroyResources(); +} + +bool GPU_HW_Vulkan::Initialize(HostDisplay* host_display, System* system, DMA* dma, + InterruptController* interrupt_controller, Timers* timers) +{ + if (host_display->GetRenderAPI() != HostDisplay::RenderAPI::Vulkan) + { + Log_ErrorPrintf("Host render API is incompatible"); + return false; + } + + Assert(g_vulkan_shader_cache); + SetCapabilities(); + + if (!GPU_HW::Initialize(host_display, system, dma, interrupt_controller, timers)) + return false; + + if (!CreatePipelineLayouts()) + { + Log_ErrorPrintf("Failed to create pipeline layouts"); + return false; + } + + if (!CreateSamplers()) + { + Log_ErrorPrintf("Failed to create samplers"); + return false; + } + + if (!CreateVertexBuffer()) + { + Log_ErrorPrintf("Failed to create vertex buffer"); + return false; + } + + if (!CreateUniformBuffer()) + { + Log_ErrorPrintf("Failed to create uniform buffer"); + return false; + } + + if (!CreateTextureBuffer()) + { + Log_ErrorPrintf("Failed to create texture buffer"); + return false; + } + + if (!CreateFramebuffer()) + { + Log_ErrorPrintf("Failed to create framebuffer"); + return false; + } + + if (!CompilePipelines()) + { + Log_ErrorPrintf("Failed to compile pipelines"); + return false; + } + + RestoreGraphicsAPIState(); + return true; +} + +void GPU_HW_Vulkan::Reset() +{ + GPU_HW::Reset(); + + if (InRenderPass()) + EndRenderPass(); + + ClearFramebuffer(); +} + +void GPU_HW_Vulkan::ResetGraphicsAPIState() +{ + GPU_HW::ResetGraphicsAPIState(); + + if (InRenderPass()) + EndRenderPass(); + + // vram texture is probably going to be displayed now + if (!IsDisplayDisabled()) + { + m_vram_texture.TransitionToLayout(g_vulkan_context->GetCurrentCommandBuffer(), + VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL); + } +} + +void GPU_HW_Vulkan::RestoreGraphicsAPIState() +{ + VkCommandBuffer cmdbuf = g_vulkan_context->GetCurrentCommandBuffer(); + m_vram_texture.TransitionToLayout(cmdbuf, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL); + + VkDeviceSize vertex_buffer_offset = 0; + vkCmdBindVertexBuffers(cmdbuf, 0, 1, m_vertex_stream_buffer.GetBufferPointer(), &vertex_buffer_offset); + Vulkan::Util::SetViewport(cmdbuf, 0, 0, m_vram_texture.GetWidth(), m_vram_texture.GetHeight()); + vkCmdBindDescriptorSets(cmdbuf, VK_PIPELINE_BIND_POINT_GRAPHICS, m_batch_pipeline_layout, 0, 1, + &m_batch_descriptor_set, 1, &m_current_uniform_buffer_offset); + SetScissorFromDrawingArea(); +} + +void GPU_HW_Vulkan::UpdateSettings() +{ + GPU_HW::UpdateSettings(); + + // Everything should be finished executing before recreating resources. + g_vulkan_context->ExecuteCommandBuffer(true); + + CreateFramebuffer(); + DestroyPipelines(); + CompilePipelines(); + RestoreGraphicsAPIState(); + UpdateDisplay(); +} + +void GPU_HW_Vulkan::MapBatchVertexPointer(u32 required_vertices) +{ + DebugAssert(!m_batch_start_vertex_ptr); + + const u32 required_space = required_vertices * sizeof(BatchVertex); + if (!m_vertex_stream_buffer.ReserveMemory(required_space, sizeof(BatchVertex))) + { + Log_PerfPrintf("Executing command buffer while waiting for %u bytes in vertex stream buffer", required_space); + EndRenderPass(); + g_vulkan_context->ExecuteCommandBuffer(false); + RestoreGraphicsAPIState(); + if (!m_vertex_stream_buffer.ReserveMemory(required_space, sizeof(BatchVertex))) + Panic("Failed to reserve vertex stream buffer memory"); + } + + m_batch_start_vertex_ptr = static_cast(m_vertex_stream_buffer.GetCurrentHostPointer()); + m_batch_current_vertex_ptr = m_batch_start_vertex_ptr; + m_batch_end_vertex_ptr = m_batch_start_vertex_ptr + (m_vertex_stream_buffer.GetCurrentSpace() / sizeof(BatchVertex)); + m_batch_base_vertex = m_vertex_stream_buffer.GetCurrentOffset() / sizeof(BatchVertex); +} + +void GPU_HW_Vulkan::UnmapBatchVertexPointer(u32 used_vertices) +{ + DebugAssert(m_batch_start_vertex_ptr); + if (used_vertices > 0) + m_vertex_stream_buffer.CommitMemory(used_vertices * sizeof(BatchVertex)); + + m_batch_start_vertex_ptr = nullptr; + m_batch_end_vertex_ptr = nullptr; + m_batch_current_vertex_ptr = nullptr; +} + +void GPU_HW_Vulkan::UploadUniformBuffer(const void* data, u32 data_size) +{ + const u32 alignment = static_cast(g_vulkan_context->GetUniformBufferAlignment()); + if (!m_uniform_stream_buffer.ReserveMemory(data_size, alignment)) + { + Log_PerfPrintf("Executing command buffer while waiting for %u bytes in uniform stream buffer", data_size); + EndRenderPass(); + g_vulkan_context->ExecuteCommandBuffer(false); + RestoreGraphicsAPIState(); + if (!m_uniform_stream_buffer.ReserveMemory(data_size, alignment)) + Panic("Failed to reserve uniform stream buffer memory"); + } + + m_current_uniform_buffer_offset = m_uniform_stream_buffer.GetCurrentOffset(); + std::memcpy(m_uniform_stream_buffer.GetCurrentHostPointer(), data, data_size); + m_uniform_stream_buffer.CommitMemory(data_size); + + vkCmdBindDescriptorSets(g_vulkan_context->GetCurrentCommandBuffer(), VK_PIPELINE_BIND_POINT_GRAPHICS, + m_batch_pipeline_layout, 0, 1, &m_batch_descriptor_set, 1, &m_current_uniform_buffer_offset); +} + +void GPU_HW_Vulkan::SetCapabilities() +{ + const u32 max_texture_size = g_vulkan_context->GetDeviceLimits().maxImageDimension2D; + const u32 max_texture_scale = max_texture_size / VRAM_WIDTH; + + Log_InfoPrintf("Max texture size: %ux%u", max_texture_size, max_texture_size); + + m_max_resolution_scale = max_texture_scale; + m_supports_dual_source_blend = true; +} + +void GPU_HW_Vulkan::DestroyResources() +{ + // Everything should be finished executing before recreating resources. + if (g_vulkan_context) + g_vulkan_context->ExecuteCommandBuffer(true); + + DestroyFramebuffer(); + DestroyPipelines(); + + Vulkan::Util::SafeFreeGlobalDescriptorSet(m_vram_write_descriptor_set); + Vulkan::Util::SafeDestroyBufferView(m_texture_stream_buffer_view); + + m_vertex_stream_buffer.Destroy(false); + m_uniform_stream_buffer.Destroy(false); + m_texture_stream_buffer.Destroy(false); + + Vulkan::Util::SafeDestroyPipelineLayout(m_vram_write_pipeline_layout); + Vulkan::Util::SafeDestroyPipelineLayout(m_single_sampler_pipeline_layout); + Vulkan::Util::SafeDestroyPipelineLayout(m_no_samplers_pipeline_layout); + Vulkan::Util::SafeDestroyPipelineLayout(m_batch_pipeline_layout); + Vulkan::Util::SafeDestroyDescriptorSetLayout(m_vram_write_descriptor_set_layout); + Vulkan::Util::SafeDestroyDescriptorSetLayout(m_single_sampler_descriptor_set_layout); + Vulkan::Util::SafeDestroyDescriptorSetLayout(m_batch_descriptor_set_layout); + Vulkan::Util::SafeDestroySampler(m_point_sampler); + Vulkan::Util::SafeDestroySampler(m_linear_sampler); +} + +void GPU_HW_Vulkan::BeginRenderPass(VkRenderPass render_pass, VkFramebuffer framebuffer, u32 x, u32 y, u32 width, + u32 height) +{ + DebugAssert(m_current_render_pass == VK_NULL_HANDLE); + + const VkRenderPassBeginInfo bi = {VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO, + nullptr, + render_pass, + framebuffer, + {{static_cast(x), static_cast(y)}, {width, height}}, + 0u, + nullptr}; + vkCmdBeginRenderPass(g_vulkan_context->GetCurrentCommandBuffer(), &bi, VK_SUBPASS_CONTENTS_INLINE); + m_current_render_pass = render_pass; +} + +void GPU_HW_Vulkan::BeginVRAMRenderPass() +{ + if (m_current_render_pass == m_vram_render_pass) + return; + + EndRenderPass(); + BeginRenderPass(m_vram_render_pass, m_vram_framebuffer, 0, 0, m_vram_texture.GetWidth(), m_vram_texture.GetHeight()); +} + +void GPU_HW_Vulkan::EndRenderPass() +{ + if (m_current_render_pass == VK_NULL_HANDLE) + return; + + vkCmdEndRenderPass(g_vulkan_context->GetCurrentCommandBuffer()); + m_current_render_pass = VK_NULL_HANDLE; +} + +bool GPU_HW_Vulkan::CreatePipelineLayouts() +{ + VkDevice device = g_vulkan_context->GetDevice(); + + Vulkan::DescriptorSetLayoutBuilder dslbuilder; + dslbuilder.AddBinding(0, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC, 1, + VK_SHADER_STAGE_VERTEX_BIT | VK_SHADER_STAGE_FRAGMENT_BIT); + dslbuilder.AddBinding(1, VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, 1, VK_SHADER_STAGE_FRAGMENT_BIT); + m_batch_descriptor_set_layout = dslbuilder.Create(device); + if (m_batch_descriptor_set_layout == VK_NULL_HANDLE) + return false; + + // textures start at 1 + dslbuilder.AddBinding(1, VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, 1, VK_SHADER_STAGE_FRAGMENT_BIT); + m_single_sampler_descriptor_set_layout = dslbuilder.Create(device); + if (m_single_sampler_descriptor_set_layout == VK_NULL_HANDLE) + return false; + + dslbuilder.AddBinding(0, VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER, 1, VK_SHADER_STAGE_FRAGMENT_BIT); + m_vram_write_descriptor_set_layout = dslbuilder.Create(device); + if (m_vram_write_descriptor_set_layout == VK_NULL_HANDLE) + return false; + + Vulkan::PipelineLayoutBuilder plbuilder; + plbuilder.AddDescriptorSet(m_batch_descriptor_set_layout); + m_batch_pipeline_layout = plbuilder.Create(device); + if (m_batch_pipeline_layout == VK_NULL_HANDLE) + return false; + + plbuilder.AddDescriptorSet(m_single_sampler_descriptor_set_layout); + plbuilder.AddPushConstants(VK_SHADER_STAGE_FRAGMENT_BIT, 0, MAX_PUSH_CONSTANTS_SIZE); + m_single_sampler_pipeline_layout = plbuilder.Create(device); + if (m_single_sampler_pipeline_layout == VK_NULL_HANDLE) + return false; + + plbuilder.AddPushConstants(VK_SHADER_STAGE_FRAGMENT_BIT, 0, MAX_PUSH_CONSTANTS_SIZE); + m_no_samplers_pipeline_layout = plbuilder.Create(device); + if (m_no_samplers_pipeline_layout == VK_NULL_HANDLE) + return false; + + plbuilder.AddDescriptorSet(m_vram_write_descriptor_set_layout); + plbuilder.AddPushConstants(VK_SHADER_STAGE_FRAGMENT_BIT, 0, MAX_PUSH_CONSTANTS_SIZE); + m_vram_write_pipeline_layout = plbuilder.Create(device); + if (m_vram_write_pipeline_layout == VK_NULL_HANDLE) + return false; + + return true; +} + +bool GPU_HW_Vulkan::CreateSamplers() +{ + VkDevice device = g_vulkan_context->GetDevice(); + + Vulkan::SamplerBuilder sbuilder; + sbuilder.SetPointSampler(VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER); + m_point_sampler = sbuilder.Create(device); + if (m_point_sampler == VK_NULL_HANDLE) + return false; + + sbuilder.SetLinearSampler(false, VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER); + m_linear_sampler = sbuilder.Create(device); + if (m_linear_sampler == VK_NULL_HANDLE) + return false; + + return true; +} + +bool GPU_HW_Vulkan::CreateFramebuffer() +{ + // save old vram texture/fbo, in case we're changing scale + auto old_vram_texture = std::move(m_vram_texture); + DestroyFramebuffer(); + + // scale vram size to internal resolution + const u32 texture_width = VRAM_WIDTH * m_resolution_scale; + const u32 texture_height = VRAM_HEIGHT * m_resolution_scale; + const VkFormat texture_format = VK_FORMAT_R8G8B8A8_UNORM; + const VkFormat depth_format = VK_FORMAT_D16_UNORM; + const VkSampleCountFlagBits samples = VK_SAMPLE_COUNT_1_BIT; + + if (!m_vram_texture.Create(texture_width, texture_height, 1, 1, texture_format, samples, VK_IMAGE_VIEW_TYPE_2D, + VK_IMAGE_TILING_OPTIMAL, + VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_SAMPLED_BIT | + VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT) || + !m_vram_depth_texture.Create(texture_width, texture_height, 1, 1, depth_format, samples, VK_IMAGE_VIEW_TYPE_2D, + VK_IMAGE_TILING_OPTIMAL, + VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT) || + !m_vram_read_texture.Create(texture_width, texture_height, 1, 1, texture_format, samples, VK_IMAGE_VIEW_TYPE_2D, + VK_IMAGE_TILING_OPTIMAL, + VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT) || + !m_display_texture.Create(texture_width, texture_height, 1, 1, texture_format, samples, VK_IMAGE_VIEW_TYPE_2D, + VK_IMAGE_TILING_OPTIMAL, + VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_SAMPLED_BIT | + VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT) || + !m_vram_readback_texture.Create(VRAM_WIDTH, VRAM_HEIGHT, 1, 1, texture_format, samples, VK_IMAGE_VIEW_TYPE_2D, + VK_IMAGE_TILING_OPTIMAL, + VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT) || + !m_vram_readback_staging_texture.Create(Vulkan::StagingBuffer::Type::Readback, texture_format, VRAM_WIDTH, + VRAM_HEIGHT)) + { + return false; + } + + m_vram_render_pass = + g_vulkan_context->GetRenderPass(texture_format, depth_format, samples, VK_ATTACHMENT_LOAD_OP_LOAD); + m_vram_update_depth_render_pass = + g_vulkan_context->GetRenderPass(VK_FORMAT_UNDEFINED, depth_format, samples, VK_ATTACHMENT_LOAD_OP_DONT_CARE); + m_display_render_pass = g_vulkan_context->GetRenderPass(m_display_texture.GetFormat(), VK_FORMAT_UNDEFINED, + m_display_texture.GetSamples(), VK_ATTACHMENT_LOAD_OP_LOAD); + m_vram_readback_render_pass = + g_vulkan_context->GetRenderPass(m_vram_readback_texture.GetFormat(), VK_FORMAT_UNDEFINED, + m_vram_readback_texture.GetSamples(), VK_ATTACHMENT_LOAD_OP_DONT_CARE); + + if (m_vram_render_pass == VK_NULL_HANDLE || m_vram_update_depth_render_pass == VK_NULL_HANDLE || + m_display_render_pass == VK_NULL_HANDLE || m_vram_readback_render_pass == VK_NULL_HANDLE) + { + return false; + } + + // vram framebuffer has both colour and depth + { + Vulkan::FramebufferBuilder fbb; + fbb.AddAttachment(m_vram_texture.GetView()); + fbb.AddAttachment(m_vram_depth_texture.GetView()); + fbb.SetRenderPass(m_vram_render_pass); + fbb.SetSize(m_vram_texture.GetWidth(), m_vram_texture.GetHeight(), m_vram_texture.GetLayers()); + m_vram_framebuffer = fbb.Create(g_vulkan_context->GetDevice()); + if (m_vram_framebuffer == VK_NULL_HANDLE) + return false; + } + + m_vram_update_depth_framebuffer = m_vram_depth_texture.CreateFramebuffer(m_vram_update_depth_render_pass); + m_vram_readback_framebuffer = m_vram_readback_texture.CreateFramebuffer(m_vram_readback_render_pass); + m_display_framebuffer = m_display_texture.CreateFramebuffer(m_display_render_pass); + if (m_vram_update_depth_framebuffer == VK_NULL_HANDLE || m_vram_readback_framebuffer == VK_NULL_HANDLE || + m_display_framebuffer == VK_NULL_HANDLE) + { + return false; + } + + VkCommandBuffer cmdbuf = g_vulkan_context->GetCurrentCommandBuffer(); + m_vram_texture.TransitionToLayout(cmdbuf, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL); + m_vram_depth_texture.TransitionToLayout(cmdbuf, VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL); + m_vram_read_texture.TransitionToLayout(cmdbuf, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL); + + Vulkan::DescriptorSetUpdateBuilder dsubuilder; + + m_batch_descriptor_set = g_vulkan_context->AllocateGlobalDescriptorSet(m_batch_descriptor_set_layout); + m_vram_copy_descriptor_set = g_vulkan_context->AllocateGlobalDescriptorSet(m_single_sampler_descriptor_set_layout); + m_vram_read_descriptor_set = g_vulkan_context->AllocateGlobalDescriptorSet(m_single_sampler_descriptor_set_layout); + if (m_batch_descriptor_set == VK_NULL_HANDLE || m_vram_copy_descriptor_set == VK_NULL_HANDLE || + m_vram_read_descriptor_set == VK_NULL_HANDLE) + { + return false; + } + + dsubuilder.AddBufferDescriptorWrite(m_batch_descriptor_set, 0, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC, + m_uniform_stream_buffer.GetBuffer(), 0, sizeof(BatchUBOData)); + dsubuilder.AddCombinedImageSamplerDescriptorWrite(m_batch_descriptor_set, 1, m_vram_read_texture.GetView(), + m_point_sampler, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL); + dsubuilder.AddCombinedImageSamplerDescriptorWrite(m_vram_copy_descriptor_set, 1, m_vram_read_texture.GetView(), + m_point_sampler, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL); + dsubuilder.AddCombinedImageSamplerDescriptorWrite(m_vram_read_descriptor_set, 1, m_vram_texture.GetView(), + m_point_sampler, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL); + dsubuilder.Update(g_vulkan_context->GetDevice()); + + if (old_vram_texture.IsValid()) + { + const bool linear_filter = old_vram_texture.GetWidth() > m_vram_texture.GetWidth(); + Log_DevPrintf("Scaling %ux%u VRAM texture to %ux%u using %s filter", old_vram_texture.GetWidth(), + old_vram_texture.GetHeight(), m_vram_texture.GetWidth(), m_vram_texture.GetHeight(), + linear_filter ? "linear" : "nearest"); + + m_vram_texture.TransitionToLayout(cmdbuf, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL); + old_vram_texture.TransitionToLayout(cmdbuf, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL); + + const VkImageBlit blit{ + {VK_IMAGE_ASPECT_COLOR_BIT, 0u, 0u, 1u}, + {{0, 0, 0}, {static_cast(old_vram_texture.GetWidth()), static_cast(old_vram_texture.GetHeight()), 1}}, + {VK_IMAGE_ASPECT_COLOR_BIT, 0u, 0u, 1u}, + {{0, 0, 0}, {static_cast(m_vram_texture.GetWidth()), static_cast(m_vram_texture.GetHeight()), 1}}}; + vkCmdBlitImage(cmdbuf, old_vram_texture.GetImage(), old_vram_texture.GetLayout(), m_vram_texture.GetImage(), + m_vram_texture.GetLayout(), 1, &blit, linear_filter ? VK_FILTER_LINEAR : VK_FILTER_NEAREST); + + m_vram_texture.TransitionToLayout(cmdbuf, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL); + + UpdateDepthBufferFromMaskBit(); + + // Can't immediately destroy because we're blitting in the current command buffer. + old_vram_texture.Destroy(true); + } + + SetFullVRAMDirtyRectangle(); + RestoreGraphicsAPIState(); + return true; +} + +void GPU_HW_Vulkan::ClearFramebuffer() +{ + VkCommandBuffer cmdbuf = g_vulkan_context->GetCurrentCommandBuffer(); + + m_vram_texture.TransitionToLayout(cmdbuf, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL); + m_vram_depth_texture.TransitionToLayout(cmdbuf, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL); + + static constexpr VkClearColorValue cc = {}; + static constexpr VkImageSubresourceRange csrr = {VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, 0u, 1u}; + static constexpr VkClearDepthStencilValue cds = {}; + static constexpr VkImageSubresourceRange dsrr = {VK_IMAGE_ASPECT_DEPTH_BIT, 0u, 1u, 0u, 1u}; + vkCmdClearColorImage(cmdbuf, m_vram_texture.GetImage(), m_vram_texture.GetLayout(), &cc, 1u, &csrr); + vkCmdClearDepthStencilImage(cmdbuf, m_vram_depth_texture.GetImage(), m_vram_depth_texture.GetLayout(), &cds, 1u, + &dsrr); + + m_vram_texture.TransitionToLayout(cmdbuf, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL); + m_vram_depth_texture.TransitionToLayout(cmdbuf, VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL); + + SetFullVRAMDirtyRectangle(); +} + +void GPU_HW_Vulkan::DestroyFramebuffer() +{ + Vulkan::Util::SafeFreeGlobalDescriptorSet(m_batch_descriptor_set); + Vulkan::Util::SafeFreeGlobalDescriptorSet(m_vram_copy_descriptor_set); + Vulkan::Util::SafeFreeGlobalDescriptorSet(m_vram_read_descriptor_set); + + Vulkan::Util::SafeDestroyFramebuffer(m_vram_framebuffer); + Vulkan::Util::SafeDestroyFramebuffer(m_vram_update_depth_framebuffer); + Vulkan::Util::SafeDestroyFramebuffer(m_vram_readback_framebuffer); + Vulkan::Util::SafeDestroyFramebuffer(m_display_framebuffer); + + m_vram_read_texture.Destroy(false); + m_vram_depth_texture.Destroy(false); + m_vram_texture.Destroy(false); + m_vram_readback_texture.Destroy(false); + m_display_texture.Destroy(false); + m_vram_readback_staging_texture.Destroy(false); +} + +bool GPU_HW_Vulkan::CreateVertexBuffer() +{ + return m_vertex_stream_buffer.Create(VK_BUFFER_USAGE_VERTEX_BUFFER_BIT, VERTEX_BUFFER_SIZE); +} + +bool GPU_HW_Vulkan::CreateUniformBuffer() +{ + return m_uniform_stream_buffer.Create(VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT, UNIFORM_BUFFER_SIZE); +} + +bool GPU_HW_Vulkan::CreateTextureBuffer() +{ + if (!m_texture_stream_buffer.Create(VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT, VRAM_UPDATE_TEXTURE_BUFFER_SIZE)) + return false; + + Vulkan::BufferViewBuilder bvbuilder; + bvbuilder.Set(m_texture_stream_buffer.GetBuffer(), VK_FORMAT_R16_UINT, 0, m_texture_stream_buffer.GetCurrentSize()); + m_texture_stream_buffer_view = bvbuilder.Create(g_vulkan_context->GetDevice()); + if (m_texture_stream_buffer_view == VK_NULL_HANDLE) + return false; + + m_vram_write_descriptor_set = g_vulkan_context->AllocateGlobalDescriptorSet(m_vram_write_descriptor_set_layout); + if (m_vram_write_descriptor_set == VK_NULL_HANDLE) + return false; + + Vulkan::DescriptorSetUpdateBuilder dsubuilder; + dsubuilder.AddBufferViewDescriptorWrite(m_vram_write_descriptor_set, 0, VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER, + m_texture_stream_buffer_view); + dsubuilder.Update(g_vulkan_context->GetDevice()); + return true; +} + +bool GPU_HW_Vulkan::CompilePipelines() +{ + static constexpr std::array primitive_mapping = { + {VK_PRIMITIVE_TOPOLOGY_LINE_LIST, VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST}}; + static constexpr std::array polygon_mode_mapping = {{VK_POLYGON_MODE_LINE, VK_POLYGON_MODE_FILL}}; + + VkDevice device = g_vulkan_context->GetDevice(); + VkPipelineCache pipeline_cache = g_vulkan_shader_cache->GetPipelineCache(); + + GPU_HW_ShaderGen shadergen(m_host_display->GetRenderAPI(), m_resolution_scale, m_true_color, m_scaled_dithering, + m_texture_filtering, m_supports_dual_source_blend); + + // vertex shaders - [textured] + // fragment shaders - [render_mode][texture_mode][dithering][interlacing] + DimensionalArray batch_vertex_shaders{}; + DimensionalArray batch_fragment_shaders{}; + Common::ScopeGuard batch_shader_guard([&batch_vertex_shaders, &batch_fragment_shaders]() { + batch_vertex_shaders.enumerate([](VkShaderModule& s) { + if (s != VK_NULL_HANDLE) + { + vkDestroyShaderModule(g_vulkan_context->GetDevice(), s, nullptr); + } + }); + batch_fragment_shaders.enumerate([](VkShaderModule& s) { + if (s != VK_NULL_HANDLE) + { + vkDestroyShaderModule(g_vulkan_context->GetDevice(), s, nullptr); + } + }); + }); + + for (u8 textured = 0; textured < 2; textured++) + { + const std::string vs = shadergen.GenerateBatchVertexShader(ConvertToBoolUnchecked(textured)); + VkShaderModule shader = g_vulkan_shader_cache->GetVertexShader(vs); + if (shader == VK_NULL_HANDLE) + return false; + + batch_vertex_shaders[textured] = shader; + } + + for (u8 render_mode = 0; render_mode < 4; render_mode++) + { + for (u8 texture_mode = 0; texture_mode < 9; texture_mode++) + { + for (u8 dithering = 0; dithering < 2; dithering++) + { + for (u8 interlacing = 0; interlacing < 2; interlacing++) + { + const std::string fs = shadergen.GenerateBatchFragmentShader( + static_cast(render_mode), static_cast(texture_mode), + ConvertToBoolUnchecked(dithering), ConvertToBoolUnchecked(interlacing)); + + VkShaderModule shader = g_vulkan_shader_cache->GetFragmentShader(fs); + if (shader == VK_NULL_HANDLE) + return false; + + batch_fragment_shaders[render_mode][texture_mode][dithering][interlacing] = shader; + } + } + } + } + + Vulkan::GraphicsPipelineBuilder gpbuilder; + + // [primitive][depth_test][render_mode][texture_mode][transparency_mode][dithering][interlacing] + for (u8 primitive = 0; primitive < 2; primitive++) + { + for (u8 depth_test = 0; depth_test < 2; depth_test++) + { + for (u8 render_mode = 0; render_mode < 4; render_mode++) + { + for (u8 transparency_mode = 0; transparency_mode < 5; transparency_mode++) + { + for (u8 texture_mode = 0; texture_mode < 9; texture_mode++) + { + for (u8 dithering = 0; dithering < 2; dithering++) + { + for (u8 interlacing = 0; interlacing < 2; interlacing++) + { + // TODO: GS + const bool textured = (static_cast(texture_mode) != TextureMode::Disabled); + + gpbuilder.SetPipelineLayout(m_batch_pipeline_layout); + gpbuilder.SetRenderPass(m_vram_render_pass, 0); + + gpbuilder.AddVertexBuffer(0, sizeof(BatchVertex), VK_VERTEX_INPUT_RATE_VERTEX); + gpbuilder.AddVertexAttribute(0, 0, VK_FORMAT_R32G32_SINT, offsetof(BatchVertex, x)); + gpbuilder.AddVertexAttribute(1, 0, VK_FORMAT_R8G8B8A8_UNORM, offsetof(BatchVertex, color)); + if (textured) + { + gpbuilder.AddVertexAttribute(2, 0, VK_FORMAT_R32_UINT, offsetof(BatchVertex, u)); + gpbuilder.AddVertexAttribute(3, 0, VK_FORMAT_R32_UINT, offsetof(BatchVertex, texpage)); + } + + gpbuilder.SetPrimitiveTopology(primitive_mapping[primitive]); + gpbuilder.SetVertexShader(batch_vertex_shaders[BoolToUInt8(textured)]); + gpbuilder.SetFragmentShader(batch_fragment_shaders[render_mode][texture_mode][dithering][interlacing]); + gpbuilder.SetRasterizationState(polygon_mode_mapping[primitive], VK_CULL_MODE_NONE, + VK_FRONT_FACE_CLOCKWISE); + gpbuilder.SetDepthState(depth_test != 0, true, + (depth_test != 0) ? VK_COMPARE_OP_GREATER_OR_EQUAL : VK_COMPARE_OP_ALWAYS); + + gpbuilder.SetNoBlendingState(); + if (static_cast(transparency_mode) != TransparencyMode::Disabled || + m_texture_filtering) + { + // TODO: Check dual-source blend support here. + gpbuilder.SetBlendAttachment( + 0, true, VK_BLEND_FACTOR_ONE, VK_BLEND_FACTOR_SRC1_ALPHA, + (static_cast(transparency_mode) == TransparencyMode::BackgroundMinusForeground) ? + VK_BLEND_OP_REVERSE_SUBTRACT : + VK_BLEND_OP_ADD, + VK_BLEND_FACTOR_ONE, VK_BLEND_FACTOR_ZERO, VK_BLEND_OP_ADD); + } + + gpbuilder.SetDynamicViewportAndScissorState(); + + VkPipeline pipeline = gpbuilder.Create(device, pipeline_cache); + if (pipeline == VK_NULL_HANDLE) + return false; + + m_batch_pipelines[primitive][depth_test][render_mode][texture_mode][transparency_mode][dithering] + [interlacing] = pipeline; + } + } + } + } + } + } + } + + batch_shader_guard.Exit(); + + VkShaderModule fullscreen_quad_vertex_shader = + g_vulkan_shader_cache->GetVertexShader(shadergen.GenerateScreenQuadVertexShader()); + if (fullscreen_quad_vertex_shader == VK_NULL_HANDLE) + return false; + + Common::ScopeGuard fullscreen_quad_vertex_shader_guard([&fullscreen_quad_vertex_shader]() { + vkDestroyShaderModule(g_vulkan_context->GetDevice(), fullscreen_quad_vertex_shader, nullptr); + }); + + // common state + gpbuilder.SetRenderPass(m_vram_render_pass, 0); + gpbuilder.SetPrimitiveTopology(VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST); + gpbuilder.SetNoCullRasterizationState(); + gpbuilder.SetNoDepthTestState(); + gpbuilder.SetNoBlendingState(); + gpbuilder.SetDynamicViewportAndScissorState(); + gpbuilder.SetVertexShader(fullscreen_quad_vertex_shader); + + // VRAM fill + { + for (u8 interlaced = 0; interlaced < 2; interlaced++) + { + VkShaderModule fs = g_vulkan_shader_cache->GetFragmentShader( + (interlaced == 0) ? shadergen.GenerateFillFragmentShader() : shadergen.GenerateInterlacedFillFragmentShader()); + if (fs == VK_NULL_HANDLE) + return false; + + gpbuilder.SetPipelineLayout(m_no_samplers_pipeline_layout); + gpbuilder.SetFragmentShader(fs); + gpbuilder.SetDepthState(true, true, VK_COMPARE_OP_ALWAYS); + + m_vram_fill_pipelines[interlaced] = gpbuilder.Create(device, pipeline_cache, false); + vkDestroyShaderModule(device, fs, nullptr); + if (m_vram_fill_pipelines[interlaced] == VK_NULL_HANDLE) + return false; + } + } + + // VRAM copy + { + VkShaderModule fs = g_vulkan_shader_cache->GetFragmentShader(shadergen.GenerateVRAMCopyFragmentShader()); + if (fs == VK_NULL_HANDLE) + return false; + + gpbuilder.SetPipelineLayout(m_single_sampler_pipeline_layout); + gpbuilder.SetFragmentShader(fs); + for (u8 depth_test = 0; depth_test < 2; depth_test++) + { + gpbuilder.SetDepthState((depth_test != 0), true, + (depth_test != 0) ? VK_COMPARE_OP_GREATER_OR_EQUAL : VK_COMPARE_OP_ALWAYS); + + m_vram_copy_pipelines[depth_test] = gpbuilder.Create(device, pipeline_cache, false); + if (m_vram_copy_pipelines[depth_test] == VK_NULL_HANDLE) + { + vkDestroyShaderModule(device, fs, nullptr); + return false; + } + } + + vkDestroyShaderModule(device, fs, nullptr); + } + + // VRAM write + { + VkShaderModule fs = g_vulkan_shader_cache->GetFragmentShader(shadergen.GenerateVRAMWriteFragmentShader()); + if (fs == VK_NULL_HANDLE) + return false; + + gpbuilder.SetPipelineLayout(m_vram_write_pipeline_layout); + gpbuilder.SetFragmentShader(fs); + for (u8 depth_test = 0; depth_test < 2; depth_test++) + { + gpbuilder.SetDepthState((depth_test != 0), true, + (depth_test != 0) ? VK_COMPARE_OP_GREATER_OR_EQUAL : VK_COMPARE_OP_ALWAYS); + m_vram_write_pipelines[depth_test] = gpbuilder.Create(device, pipeline_cache, false); + if (m_vram_write_pipelines[depth_test] == VK_NULL_HANDLE) + { + vkDestroyShaderModule(device, fs, nullptr); + return false; + } + } + + vkDestroyShaderModule(device, fs, nullptr); + } + + // VRAM update depth + { + VkShaderModule fs = g_vulkan_shader_cache->GetFragmentShader(shadergen.GenerateVRAMUpdateDepthFragmentShader()); + if (fs == VK_NULL_HANDLE) + return false; + + gpbuilder.SetRenderPass(m_vram_update_depth_render_pass, 0); + gpbuilder.SetPipelineLayout(m_single_sampler_pipeline_layout); + gpbuilder.SetFragmentShader(fs); + gpbuilder.SetDepthState(true, true, VK_COMPARE_OP_ALWAYS); + gpbuilder.SetBlendAttachment(0, false, VK_BLEND_FACTOR_ONE, VK_BLEND_FACTOR_ZERO, VK_BLEND_OP_ADD, + VK_BLEND_FACTOR_ONE, VK_BLEND_FACTOR_ZERO, VK_BLEND_OP_ADD, 0); + + m_vram_update_depth_pipeline = gpbuilder.Create(device, pipeline_cache, false); + vkDestroyShaderModule(device, fs, nullptr); + if (m_vram_update_depth_pipeline == VK_NULL_HANDLE) + return false; + } + + gpbuilder.Clear(); + + // VRAM read + { + VkShaderModule fs = g_vulkan_shader_cache->GetFragmentShader(shadergen.GenerateVRAMReadFragmentShader()); + if (fs == VK_NULL_HANDLE) + return false; + + gpbuilder.SetRenderPass(m_vram_readback_render_pass, 0); + gpbuilder.SetPipelineLayout(m_single_sampler_pipeline_layout); + gpbuilder.SetVertexShader(fullscreen_quad_vertex_shader); + gpbuilder.SetFragmentShader(fs); + gpbuilder.SetNoCullRasterizationState(); + gpbuilder.SetNoDepthTestState(); + gpbuilder.SetNoBlendingState(); + gpbuilder.SetDynamicViewportAndScissorState(); + + m_vram_readback_pipeline = gpbuilder.Create(device, pipeline_cache, false); + vkDestroyShaderModule(device, fs, nullptr); + if (m_vram_readback_pipeline == VK_NULL_HANDLE) + return false; + } + + gpbuilder.Clear(); + + // Display + { + gpbuilder.SetRenderPass(m_display_render_pass, 0); + gpbuilder.SetPipelineLayout(m_single_sampler_pipeline_layout); + gpbuilder.SetVertexShader(fullscreen_quad_vertex_shader); + gpbuilder.SetNoCullRasterizationState(); + gpbuilder.SetNoDepthTestState(); + gpbuilder.SetNoBlendingState(); + gpbuilder.SetDynamicViewportAndScissorState(); + + for (u8 depth_24 = 0; depth_24 < 2; depth_24++) + { + for (u8 interlace_mode = 0; interlace_mode < 3; interlace_mode++) + { + VkShaderModule fs = g_vulkan_shader_cache->GetFragmentShader(shadergen.GenerateDisplayFragmentShader( + ConvertToBoolUnchecked(depth_24), static_cast(interlace_mode))); + if (fs == VK_NULL_HANDLE) + return false; + + gpbuilder.SetFragmentShader(fs); + + m_display_pipelines[depth_24][interlace_mode] = gpbuilder.Create(device, pipeline_cache, false); + vkDestroyShaderModule(device, fs, nullptr); + if (m_display_pipelines[depth_24][interlace_mode] == VK_NULL_HANDLE) + return false; + } + } + } + + return true; +} + +void GPU_HW_Vulkan::DestroyPipelines() +{ + m_batch_pipelines.enumerate(Vulkan::Util::SafeDestroyPipeline); + + for (VkPipeline& p : m_vram_fill_pipelines) + Vulkan::Util::SafeDestroyPipeline(p); + + for (VkPipeline& p : m_vram_write_pipelines) + Vulkan::Util::SafeDestroyPipeline(p); + + for (VkPipeline& p : m_vram_copy_pipelines) + Vulkan::Util::SafeDestroyPipeline(p); + + Vulkan::Util::SafeDestroyPipeline(m_vram_readback_pipeline); + Vulkan::Util::SafeDestroyPipeline(m_vram_update_depth_pipeline); + + m_display_pipelines.enumerate(Vulkan::Util::SafeDestroyPipeline); +} + +void GPU_HW_Vulkan::DrawBatchVertices(BatchRenderMode render_mode, u32 base_vertex, u32 num_vertices) +{ + BeginVRAMRenderPass(); + + VkCommandBuffer cmdbuf = g_vulkan_context->GetCurrentCommandBuffer(); + + // [primitive][depth_test][render_mode][texture_mode][transparency_mode][dithering][interlacing] + VkPipeline pipeline = + m_batch_pipelines[static_cast(m_batch.primitive)][BoolToUInt8(m_batch.check_mask_before_draw)][static_cast( + render_mode)][static_cast(m_batch.texture_mode)][static_cast(m_batch.transparency_mode)] + [BoolToUInt8(m_batch.dithering)][BoolToUInt8(m_batch.interlacing)]; + + vkCmdBindPipeline(cmdbuf, VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline); + vkCmdDraw(cmdbuf, num_vertices, 1, base_vertex, 0); +} + +void GPU_HW_Vulkan::SetScissorFromDrawingArea() +{ + int left, top, right, bottom; + CalcScissorRect(&left, &top, &right, &bottom); + + Vulkan::Util::SetScissor(g_vulkan_context->GetCurrentCommandBuffer(), left, top, right - left, bottom - top); +} + +void GPU_HW_Vulkan::UpdateDisplay() +{ + GPU_HW::UpdateDisplay(); + + if (m_system->GetSettings().debugging.show_vram) + { + m_host_display->SetDisplayTexture(&m_vram_texture, m_vram_texture.GetWidth(), m_vram_texture.GetHeight(), 0, 0, + m_vram_texture.GetWidth(), m_vram_texture.GetHeight()); + m_host_display->SetDisplayParameters(VRAM_WIDTH, VRAM_HEIGHT, 0, 0, VRAM_WIDTH, VRAM_HEIGHT, + static_cast(VRAM_WIDTH) / static_cast(VRAM_HEIGHT)); + } + else + { + const u32 vram_offset_x = m_crtc_state.display_vram_left; + const u32 vram_offset_y = m_crtc_state.display_vram_top; + const u32 scaled_vram_offset_x = vram_offset_x * m_resolution_scale; + const u32 scaled_vram_offset_y = vram_offset_y * m_resolution_scale; + const u32 display_width = m_crtc_state.display_vram_width; + const u32 display_height = m_crtc_state.display_vram_height; + const u32 scaled_display_width = display_width * m_resolution_scale; + const u32 scaled_display_height = display_height * m_resolution_scale; + const InterlacedRenderMode interlaced = GetInterlacedRenderMode(); + + if (IsDisplayDisabled()) + { + m_host_display->ClearDisplayTexture(); + } + else if (!m_GPUSTAT.display_area_color_depth_24 && interlaced == InterlacedRenderMode::None && + (scaled_vram_offset_x + scaled_display_width) <= m_vram_texture.GetWidth() && + (scaled_vram_offset_y + scaled_display_height) <= m_vram_texture.GetHeight()) + { + m_host_display->SetDisplayTexture(&m_vram_texture, m_vram_texture.GetWidth(), m_vram_texture.GetHeight(), + scaled_vram_offset_x, scaled_vram_offset_y, scaled_display_width, + scaled_display_height); + } + else + { + EndRenderPass(); + + const u32 reinterpret_field_offset = (interlaced != InterlacedRenderMode::None) ? GetInterlacedDisplayField() : 0; + const u32 reinterpret_start_x = m_crtc_state.regs.X * m_resolution_scale; + const u32 reinterpret_crop_left = (m_crtc_state.display_vram_left - m_crtc_state.regs.X) * m_resolution_scale; + const u32 uniforms[4] = {reinterpret_start_x, scaled_vram_offset_y + reinterpret_field_offset, + reinterpret_crop_left, reinterpret_field_offset}; + + VkCommandBuffer cmdbuf = g_vulkan_context->GetCurrentCommandBuffer(); + m_display_texture.TransitionToLayout(cmdbuf, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL); + m_vram_texture.TransitionToLayout(cmdbuf, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL); + + BeginRenderPass(m_display_render_pass, m_display_framebuffer, 0, 0, scaled_display_width, scaled_display_height); + + vkCmdBindPipeline( + cmdbuf, VK_PIPELINE_BIND_POINT_GRAPHICS, + m_display_pipelines[BoolToUInt8(m_GPUSTAT.display_area_color_depth_24)][static_cast(interlaced)]); + vkCmdPushConstants(cmdbuf, m_single_sampler_pipeline_layout, VK_SHADER_STAGE_FRAGMENT_BIT, 0, sizeof(uniforms), + uniforms); + vkCmdBindDescriptorSets(cmdbuf, VK_PIPELINE_BIND_POINT_GRAPHICS, m_single_sampler_pipeline_layout, 0, 1, + &m_vram_read_descriptor_set, 0, nullptr); + Vulkan::Util::SetViewportAndScissor(cmdbuf, 0, 0, scaled_display_width, scaled_display_height); + vkCmdDraw(cmdbuf, 3, 1, 0, 0); + + EndRenderPass(); + + m_vram_texture.TransitionToLayout(cmdbuf, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL); + m_display_texture.TransitionToLayout(cmdbuf, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL); + + m_host_display->SetDisplayTexture(&m_display_texture, m_display_texture.GetWidth(), m_display_texture.GetHeight(), + 0, 0, scaled_display_width, scaled_display_height); + + RestoreGraphicsAPIState(); + } + + m_host_display->SetDisplayParameters(m_crtc_state.display_width, m_crtc_state.display_height, + m_crtc_state.display_origin_left, m_crtc_state.display_origin_top, + m_crtc_state.display_vram_width, m_crtc_state.display_vram_height, + m_crtc_state.display_aspect_ratio); + } +} + +void GPU_HW_Vulkan::ReadVRAM(u32 x, u32 y, u32 width, u32 height) +{ + // Get bounds with wrap-around handled. + const Common::Rectangle copy_rect = GetVRAMTransferBounds(x, y, width, height); + const u32 encoded_width = (copy_rect.GetWidth() + 1) / 2; + const u32 encoded_height = copy_rect.GetHeight(); + + EndRenderPass(); + + VkCommandBuffer cmdbuf = g_vulkan_context->GetCurrentCommandBuffer(); + m_vram_texture.TransitionToLayout(cmdbuf, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL); + m_vram_readback_texture.TransitionToLayout(cmdbuf, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL); + + BeginRenderPass(m_vram_readback_render_pass, m_vram_readback_framebuffer, 0, 0, encoded_width, encoded_height); + + // Encode the 24-bit texture as 16-bit. + const u32 uniforms[4] = {copy_rect.left, copy_rect.top, copy_rect.GetWidth(), copy_rect.GetHeight()}; + vkCmdBindPipeline(cmdbuf, VK_PIPELINE_BIND_POINT_GRAPHICS, m_vram_readback_pipeline); + vkCmdPushConstants(cmdbuf, m_single_sampler_pipeline_layout, VK_SHADER_STAGE_FRAGMENT_BIT, 0, sizeof(uniforms), + uniforms); + vkCmdBindDescriptorSets(cmdbuf, VK_PIPELINE_BIND_POINT_GRAPHICS, m_single_sampler_pipeline_layout, 0, 1, + &m_vram_read_descriptor_set, 0, nullptr); + Vulkan::Util::SetViewportAndScissor(cmdbuf, 0, 0, encoded_width, encoded_height); + vkCmdDraw(cmdbuf, 3, 1, 0, 0); + + EndRenderPass(); + + m_vram_readback_texture.TransitionToLayout(cmdbuf, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL); + m_vram_texture.TransitionToLayout(cmdbuf, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL); + + // Stage the readback. + m_vram_readback_staging_texture.CopyFromTexture(m_vram_readback_texture, 0, 0, 0, 0, 0, 0, encoded_width, + encoded_height); + + // And copy it into our shadow buffer (will execute command buffer and stall). + m_vram_readback_staging_texture.ReadTexels(0, 0, encoded_width, encoded_height, + &m_vram_shadow[copy_rect.top * VRAM_WIDTH + copy_rect.left], + VRAM_WIDTH * sizeof(u16)); + + RestoreGraphicsAPIState(); +} + +void GPU_HW_Vulkan::FillVRAM(u32 x, u32 y, u32 width, u32 height, u32 color) +{ + if ((x + width) > VRAM_WIDTH || (y + height) > VRAM_HEIGHT) + { + // CPU round trip if oversized for now. + Log_WarningPrintf("Oversized VRAM fill (%u-%u, %u-%u), CPU round trip", x, x + width, y, y + height); + ReadVRAM(0, 0, VRAM_WIDTH, VRAM_HEIGHT); + GPU::FillVRAM(x, y, width, height, color); + UpdateVRAM(0, 0, VRAM_WIDTH, VRAM_HEIGHT, m_vram_shadow.data()); + return; + } + + GPU_HW::FillVRAM(x, y, width, height, color); + + x *= m_resolution_scale; + y *= m_resolution_scale; + width *= m_resolution_scale; + height *= m_resolution_scale; + + BeginVRAMRenderPass(); + + VkCommandBuffer cmdbuf = g_vulkan_context->GetCurrentCommandBuffer(); + const VRAMFillUBOData uniforms = GetVRAMFillUBOData(x, y, width, height, color); + vkCmdPushConstants(cmdbuf, m_no_samplers_pipeline_layout, VK_SHADER_STAGE_FRAGMENT_BIT, 0, sizeof(uniforms), + &uniforms); + vkCmdBindPipeline(cmdbuf, VK_PIPELINE_BIND_POINT_GRAPHICS, + m_vram_fill_pipelines[BoolToUInt8(IsInterlacedRenderingEnabled())]); + Vulkan::Util::SetViewportAndScissor(cmdbuf, x, y, width, height); + vkCmdDraw(cmdbuf, 3, 1, 0, 0); + + RestoreGraphicsAPIState(); +} + +void GPU_HW_Vulkan::UpdateVRAM(u32 x, u32 y, u32 width, u32 height, const void* data) +{ + if ((x + width) > VRAM_WIDTH || (y + height) > VRAM_HEIGHT) + { + // CPU round trip if oversized for now. + Log_WarningPrintf("Oversized VRAM update (%u-%u, %u-%u), CPU round trip", x, x + width, y, y + height); + ReadVRAM(0, 0, VRAM_WIDTH, VRAM_HEIGHT); + GPU::UpdateVRAM(x, y, width, height, data); + UpdateVRAM(0, 0, VRAM_WIDTH, VRAM_HEIGHT, m_vram_shadow.data()); + return; + } + + GPU_HW::UpdateVRAM(x, y, width, height, data); + + const u32 data_size = width * height * sizeof(u16); + const u32 alignment = std::max(sizeof(u16), static_cast(g_vulkan_context->GetTexelBufferAlignment())); + if (!m_texture_stream_buffer.ReserveMemory(data_size, alignment)) + { + Log_PerfPrintf("Executing command buffer while waiting for %u bytes in stream buffer", data_size); + EndRenderPass(); + g_vulkan_context->ExecuteCommandBuffer(false); + RestoreGraphicsAPIState(); + if (!m_texture_stream_buffer.ReserveMemory(data_size, alignment)) + { + Panic("Failed to allocate space in stream buffer for VRAM write"); + return; + } + } + + const u32 start_index = m_texture_stream_buffer.GetCurrentOffset() / sizeof(u16); + std::memcpy(m_texture_stream_buffer.GetCurrentHostPointer(), data, data_size); + m_texture_stream_buffer.CommitMemory(data_size); + + BeginVRAMRenderPass(); + + VkCommandBuffer cmdbuf = g_vulkan_context->GetCurrentCommandBuffer(); + const VRAMWriteUBOData uniforms = {x, + y, + width, + height, + start_index, + m_GPUSTAT.set_mask_while_drawing ? 0x8000u : 0x00, + GetCurrentNormalizedBatchVertexDepthID()}; + vkCmdPushConstants(cmdbuf, m_vram_write_pipeline_layout, VK_SHADER_STAGE_FRAGMENT_BIT, 0, sizeof(uniforms), + &uniforms); + vkCmdBindPipeline(cmdbuf, VK_PIPELINE_BIND_POINT_GRAPHICS, + m_vram_write_pipelines[BoolToUInt8(m_GPUSTAT.check_mask_before_draw)]); + vkCmdBindDescriptorSets(cmdbuf, VK_PIPELINE_BIND_POINT_GRAPHICS, m_vram_write_pipeline_layout, 0, 1, + &m_vram_write_descriptor_set, 0, nullptr); + + // the viewport should already be set to the full vram, so just adjust the scissor + Vulkan::Util::SetScissor(cmdbuf, x * m_resolution_scale, y * m_resolution_scale, width * m_resolution_scale, + height * m_resolution_scale); + vkCmdDraw(cmdbuf, 3, 1, 0, 0); + + RestoreGraphicsAPIState(); +} + +void GPU_HW_Vulkan::CopyVRAM(u32 src_x, u32 src_y, u32 dst_x, u32 dst_y, u32 width, u32 height) +{ + if (UseVRAMCopyShader(src_x, src_y, dst_x, dst_y, width, height)) + { + const Common::Rectangle src_bounds = GetVRAMTransferBounds(src_x, src_y, width, height); + const Common::Rectangle dst_bounds = GetVRAMTransferBounds(dst_x, dst_y, width, height); + if (m_vram_dirty_rect.Intersects(src_bounds)) + UpdateVRAMReadTexture(); + IncludeVRAMDityRectangle(dst_bounds); + + const VRAMCopyUBOData uniforms(GetVRAMCopyUBOData(src_x, src_y, dst_x, dst_y, width, height)); + const Common::Rectangle dst_bounds_scaled(dst_bounds * m_resolution_scale); + + BeginVRAMRenderPass(); + + VkCommandBuffer cmdbuf = g_vulkan_context->GetCurrentCommandBuffer(); + vkCmdBindPipeline(cmdbuf, VK_PIPELINE_BIND_POINT_GRAPHICS, + m_vram_copy_pipelines[BoolToUInt8(m_GPUSTAT.check_mask_before_draw)]); + vkCmdBindDescriptorSets(cmdbuf, VK_PIPELINE_BIND_POINT_GRAPHICS, m_single_sampler_pipeline_layout, 0, 1, + &m_vram_copy_descriptor_set, 0, nullptr); + Vulkan::Util::SetViewportAndScissor(cmdbuf, dst_bounds_scaled.left, dst_bounds_scaled.top, + dst_bounds_scaled.GetWidth(), dst_bounds_scaled.GetHeight()); + vkCmdDraw(cmdbuf, 3, 1, 0, 0); + RestoreGraphicsAPIState(); + return; + } + + if (m_GPUSTAT.IsMaskingEnabled()) + Log_WarningPrintf("Masking enabled on VRAM copy - not implemented"); + + GPU_HW::CopyVRAM(src_x, src_y, dst_x, dst_y, width, height); + + src_x *= m_resolution_scale; + src_y *= m_resolution_scale; + dst_x *= m_resolution_scale; + dst_y *= m_resolution_scale; + width *= m_resolution_scale; + height *= m_resolution_scale; + + EndRenderPass(); + + VkCommandBuffer cmdbuf = g_vulkan_context->GetCurrentCommandBuffer(); + + m_vram_texture.TransitionToLayout(cmdbuf, VK_IMAGE_LAYOUT_GENERAL); + + const VkImageCopy ic{{VK_IMAGE_ASPECT_COLOR_BIT, 0u, 0u, 1u}, + {static_cast(src_x), static_cast(src_y), 0}, + {VK_IMAGE_ASPECT_COLOR_BIT, 0u, 0u, 1u}, + {static_cast(dst_x), static_cast(dst_y), 0}, + {width, height, 1u}}; + vkCmdCopyImage(cmdbuf, m_vram_texture.GetImage(), m_vram_texture.GetLayout(), m_vram_texture.GetImage(), + m_vram_texture.GetLayout(), 1, &ic); + + m_vram_texture.TransitionToLayout(cmdbuf, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL); +} + +void GPU_HW_Vulkan::UpdateVRAMReadTexture() +{ + EndRenderPass(); + + VkCommandBuffer cmdbuf = g_vulkan_context->GetCurrentCommandBuffer(); + m_vram_texture.TransitionToLayout(cmdbuf, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL); + m_vram_read_texture.TransitionToLayout(cmdbuf, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL); + + const auto scaled_rect = m_vram_dirty_rect * m_resolution_scale; + const VkImageCopy copy{{VK_IMAGE_ASPECT_COLOR_BIT, 0u, 0u, 1u}, + {static_cast(scaled_rect.left), static_cast(scaled_rect.top), 0}, + {VK_IMAGE_ASPECT_COLOR_BIT, 0u, 0u, 1u}, + {static_cast(scaled_rect.left), static_cast(scaled_rect.top), 0}, + {scaled_rect.GetWidth(), scaled_rect.GetHeight(), 1u}}; + + vkCmdCopyImage(cmdbuf, m_vram_texture.GetImage(), m_vram_texture.GetLayout(), m_vram_read_texture.GetImage(), + m_vram_read_texture.GetLayout(), 1u, ©); + + m_vram_read_texture.TransitionToLayout(cmdbuf, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL); + m_vram_texture.TransitionToLayout(cmdbuf, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL); + + GPU_HW::UpdateVRAMReadTexture(); +} + +void GPU_HW_Vulkan::UpdateDepthBufferFromMaskBit() +{ + EndRenderPass(); + + VkCommandBuffer cmdbuf = g_vulkan_context->GetCurrentCommandBuffer(); + m_vram_texture.TransitionToLayout(cmdbuf, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL); + + BeginRenderPass(m_vram_update_depth_render_pass, m_vram_update_depth_framebuffer, 0, 0, m_vram_texture.GetWidth(), + m_vram_texture.GetHeight()); + + vkCmdBindPipeline(cmdbuf, VK_PIPELINE_BIND_POINT_GRAPHICS, m_vram_update_depth_pipeline); + vkCmdBindDescriptorSets(cmdbuf, VK_PIPELINE_BIND_POINT_GRAPHICS, m_single_sampler_pipeline_layout, 0, 1, + &m_vram_read_descriptor_set, 0, nullptr); + Vulkan::Util::SetViewportAndScissor(cmdbuf, 0, 0, m_vram_texture.GetWidth(), m_vram_texture.GetHeight()); + vkCmdDraw(cmdbuf, 3, 1, 0, 0); + + EndRenderPass(); + + m_vram_texture.TransitionToLayout(cmdbuf, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL); + + RestoreGraphicsAPIState(); +} + +std::unique_ptr GPU::CreateHardwareVulkanRenderer() +{ + return std::make_unique(); +} diff --git a/src/core/gpu_hw_vulkan.h b/src/core/gpu_hw_vulkan.h new file mode 100644 index 000000000..6e211fbfd --- /dev/null +++ b/src/core/gpu_hw_vulkan.h @@ -0,0 +1,124 @@ +#pragma once +#include "common/dimensional_array.h" +#include "common/vulkan/staging_texture.h" +#include "common/vulkan/stream_buffer.h" +#include "common/vulkan/texture.h" +#include "gpu_hw.h" +#include +#include +#include + +class GPU_HW_Vulkan : public GPU_HW +{ +public: + GPU_HW_Vulkan(); + ~GPU_HW_Vulkan() override; + + bool Initialize(HostDisplay* host_display, System* system, DMA* dma, InterruptController* interrupt_controller, + Timers* timers) override; + void Reset() override; + + void ResetGraphicsAPIState() override; + void RestoreGraphicsAPIState() override; + void UpdateSettings() override; + +protected: + void UpdateDisplay() override; + void ReadVRAM(u32 x, u32 y, u32 width, u32 height) override; + void FillVRAM(u32 x, u32 y, u32 width, u32 height, u32 color) override; + void UpdateVRAM(u32 x, u32 y, u32 width, u32 height, const void* data) override; + void CopyVRAM(u32 src_x, u32 src_y, u32 dst_x, u32 dst_y, u32 width, u32 height) override; + void UpdateVRAMReadTexture() override; + void UpdateDepthBufferFromMaskBit() override; + void SetScissorFromDrawingArea() override; + void MapBatchVertexPointer(u32 required_vertices) override; + void UnmapBatchVertexPointer(u32 used_vertices) override; + void UploadUniformBuffer(const void* data, u32 data_size) override; + void DrawBatchVertices(BatchRenderMode render_mode, u32 base_vertex, u32 num_vertices) override; + +private: + enum : u32 + { + MAX_PUSH_CONSTANTS_SIZE = 64, + }; + void SetCapabilities(); + void DestroyResources(); + + ALWAYS_INLINE bool InRenderPass() const { return (m_current_render_pass != VK_NULL_HANDLE); } + void BeginRenderPass(VkRenderPass render_pass, VkFramebuffer framebuffer, u32 x, u32 y, u32 width, u32 height); + void BeginVRAMRenderPass(); + void EndRenderPass(); + + bool CreatePipelineLayouts(); + bool CreateSamplers(); + + bool CreateFramebuffer(); + void ClearFramebuffer(); + void DestroyFramebuffer(); + + bool CreateVertexBuffer(); + bool CreateUniformBuffer(); + bool CreateTextureBuffer(); + + bool CompilePipelines(); + void DestroyPipelines(); + + VkRenderPass m_current_render_pass = VK_NULL_HANDLE; + + VkRenderPass m_vram_render_pass = VK_NULL_HANDLE; + VkRenderPass m_vram_update_depth_render_pass = VK_NULL_HANDLE; + VkRenderPass m_display_render_pass = VK_NULL_HANDLE; + VkRenderPass m_vram_readback_render_pass = VK_NULL_HANDLE; + + VkDescriptorSetLayout m_batch_descriptor_set_layout = VK_NULL_HANDLE; + VkDescriptorSetLayout m_single_sampler_descriptor_set_layout = VK_NULL_HANDLE; + VkDescriptorSetLayout m_vram_write_descriptor_set_layout = VK_NULL_HANDLE; + + VkPipelineLayout m_batch_pipeline_layout = VK_NULL_HANDLE; + VkPipelineLayout m_no_samplers_pipeline_layout = VK_NULL_HANDLE; + VkPipelineLayout m_single_sampler_pipeline_layout = VK_NULL_HANDLE; + VkPipelineLayout m_vram_write_pipeline_layout = VK_NULL_HANDLE; + + Vulkan::Texture m_vram_texture; + Vulkan::Texture m_vram_depth_texture; + Vulkan::Texture m_vram_read_texture; + Vulkan::Texture m_vram_readback_texture; + Vulkan::StagingTexture m_vram_readback_staging_texture; + Vulkan::Texture m_display_texture; + + VkFramebuffer m_vram_framebuffer = VK_NULL_HANDLE; + VkFramebuffer m_vram_update_depth_framebuffer = VK_NULL_HANDLE; + VkFramebuffer m_vram_readback_framebuffer = VK_NULL_HANDLE; + VkFramebuffer m_display_framebuffer = VK_NULL_HANDLE; + + VkSampler m_point_sampler = VK_NULL_HANDLE; + VkSampler m_linear_sampler = VK_NULL_HANDLE; + + VkDescriptorSet m_batch_descriptor_set = VK_NULL_HANDLE; + VkDescriptorSet m_vram_copy_descriptor_set = VK_NULL_HANDLE; + VkDescriptorSet m_vram_read_descriptor_set = VK_NULL_HANDLE; + VkDescriptorSet m_vram_write_descriptor_set = VK_NULL_HANDLE; + + Vulkan::StreamBuffer m_vertex_stream_buffer; + Vulkan::StreamBuffer m_uniform_stream_buffer; + Vulkan::StreamBuffer m_texture_stream_buffer; + + u32 m_current_uniform_buffer_offset = 0; + VkBufferView m_texture_stream_buffer_view = VK_NULL_HANDLE; + + // [primitive][depth_test][render_mode][texture_mode][transparency_mode][dithering][interlacing] + DimensionalArray m_batch_pipelines{}; + + // [interlaced] + std::array m_vram_fill_pipelines{}; + + // [depth_test] + std::array m_vram_write_pipelines{}; + std::array m_vram_copy_pipelines{}; + + VkPipeline m_vram_readback_pipeline = VK_NULL_HANDLE; + VkPipeline m_vram_update_depth_pipeline = VK_NULL_HANDLE; + + // [depth_24][interlace_mode] + DimensionalArray m_display_pipelines{}; +}; diff --git a/src/core/host_display.h b/src/core/host_display.h index 687469d01..dd8141d33 100644 --- a/src/core/host_display.h +++ b/src/core/host_display.h @@ -24,6 +24,7 @@ public: { None, D3D11, + Vulkan, OpenGL, OpenGLES }; @@ -138,6 +139,7 @@ public: protected: ALWAYS_INLINE bool HasSoftwareCursor() const { return static_cast(m_cursor_texture); } + ALWAYS_INLINE bool HasDisplayTexture() const { return (m_display_texture_handle != nullptr); } void CalculateDrawRect(s32 window_width, s32 window_height, s32* out_left, s32* out_top, s32* out_width, s32* out_height, s32* out_left_padding, s32* out_top_padding, float* out_scale, diff --git a/src/core/host_interface.cpp b/src/core/host_interface.cpp index 53f8eda16..855cd8781 100644 --- a/src/core/host_interface.cpp +++ b/src/core/host_interface.cpp @@ -314,6 +314,11 @@ void HostInterface::OnRunningGameChanged() {} void HostInterface::OnControllerTypeChanged(u32 slot) {} +std::string HostInterface::GetShaderCacheDirectory() +{ + return GetUserDirectoryRelativePath("cache"); +} + void HostInterface::SetDefaultSettings(SettingsInterface& si) { si.SetStringValue("Console", "Region", Settings::GetConsoleRegionName(ConsoleRegion::Auto)); diff --git a/src/core/host_interface.h b/src/core/host_interface.h index b0738e67e..21f5e00eb 100644 --- a/src/core/host_interface.h +++ b/src/core/host_interface.h @@ -114,6 +114,9 @@ protected: virtual void OnRunningGameChanged(); virtual void OnControllerTypeChanged(u32 slot); + /// Returns the path to the shader cache directory. + virtual std::string GetShaderCacheDirectory(); + /// Restores all settings to defaults. virtual void SetDefaultSettings(SettingsInterface& si); diff --git a/src/core/settings.cpp b/src/core/settings.cpp index 7d6103ae7..313df095d 100644 --- a/src/core/settings.cpp +++ b/src/core/settings.cpp @@ -313,16 +313,16 @@ const char* Settings::GetCPUExecutionModeDisplayName(CPUExecutionMode mode) return s_cpu_execution_mode_display_names[static_cast(mode)]; } -static std::array s_gpu_renderer_names = {{ +static std::array s_gpu_renderer_names = {{ #ifdef WIN32 "D3D11", #endif - "OpenGL", "Software"}}; -static std::array s_gpu_renderer_display_names = {{ + "Vulkan", "OpenGL", "Software"}}; +static std::array s_gpu_renderer_display_names = {{ #ifdef WIN32 "Hardware (D3D11)", #endif - "Hardware (OpenGL)", "Software"}}; + "Hardware (Vulkan)", "Hardware (OpenGL)", "Software"}}; std::optional Settings::ParseRendererName(const char* str) { diff --git a/src/core/system.cpp b/src/core/system.cpp index e86fc2f49..064d12376 100644 --- a/src/core/system.cpp +++ b/src/core/system.cpp @@ -288,6 +288,10 @@ bool System::CreateGPU(GPURenderer renderer) m_gpu = GPU::CreateHardwareOpenGLRenderer(); break; + case GPURenderer::HardwareVulkan: + m_gpu = GPU::CreateHardwareVulkanRenderer(); + break; + #ifdef WIN32 case GPURenderer::HardwareD3D11: m_gpu = GPU::CreateHardwareD3D11Renderer(); diff --git a/src/core/types.h b/src/core/types.h index ca78133d5..d394acd22 100644 --- a/src/core/types.h +++ b/src/core/types.h @@ -53,6 +53,7 @@ enum class GPURenderer : u8 #ifdef WIN32 HardwareD3D11, #endif + HardwareVulkan, HardwareOpenGL, Software, Count diff --git a/src/duckstation-qt/CMakeLists.txt b/src/duckstation-qt/CMakeLists.txt index 6dc18d695..dfa086cd5 100644 --- a/src/duckstation-qt/CMakeLists.txt +++ b/src/duckstation-qt/CMakeLists.txt @@ -58,10 +58,12 @@ add_executable(duckstation-qt settingsdialog.cpp settingsdialog.h settingsdialog.ui + vulkanhostdisplay.cpp + vulkanhostdisplay.h ) target_include_directories(duckstation-qt PRIVATE "${Qt5Gui_PRIVATE_INCLUDE_DIRS}") -target_link_libraries(duckstation-qt PRIVATE frontend-common core common imgui glad minizip scmversion Qt5::Core Qt5::Gui Qt5::Widgets Qt5::Network) +target_link_libraries(duckstation-qt PRIVATE frontend-common core common imgui glad minizip scmversion vulkan-loader Qt5::Core Qt5::Gui Qt5::Widgets Qt5::Network) if(WIN32) target_sources(duckstation-qt PRIVATE diff --git a/src/duckstation-qt/d3d11hostdisplay.cpp b/src/duckstation-qt/d3d11hostdisplay.cpp index a3781b478..1e5ad79be 100644 --- a/src/duckstation-qt/d3d11hostdisplay.cpp +++ b/src/duckstation-qt/d3d11hostdisplay.cpp @@ -214,7 +214,7 @@ bool D3D11HostDisplay::createDeviceContext(bool debug_device) m_allow_tearing_supported = (allow_tearing_supported == TRUE); } - return true; + return createSwapChain(); } void D3D11HostDisplay::destroyDeviceContext() @@ -232,7 +232,7 @@ bool D3D11HostDisplay::shouldUseFlipModelSwapChain() const return m_widget->parent() == nullptr; } -bool D3D11HostDisplay::createSurface() +bool D3D11HostDisplay::createSwapChain() { m_using_flip_model_swap_chain = shouldUseFlipModelSwapChain(); @@ -282,11 +282,7 @@ bool D3D11HostDisplay::createSurface() if (FAILED(hr)) Log_WarningPrintf("MakeWindowAssociation() to disable ALT+ENTER failed"); - if (!createSwapChainRTV()) - return false; - - emit m_widget->windowResizedEvent(m_window_width, m_window_height); - return true; + return createSwapChainRTV(); } bool D3D11HostDisplay::createSwapChainRTV() @@ -314,6 +310,11 @@ bool D3D11HostDisplay::createSwapChainRTV() return true; } +bool D3D11HostDisplay::recreateSurface() +{ + return createSwapChain(); +} + void D3D11HostDisplay::destroySurface() { m_swap_chain_rtv.Reset(); diff --git a/src/duckstation-qt/d3d11hostdisplay.h b/src/duckstation-qt/d3d11hostdisplay.h index 4659e92c3..fa5bf1c65 100644 --- a/src/duckstation-qt/d3d11hostdisplay.h +++ b/src/duckstation-qt/d3d11hostdisplay.h @@ -22,7 +22,7 @@ public: bool hasDeviceContext() const override; bool createDeviceContext(bool debug_device) override; void destroyDeviceContext() override; - bool createSurface() override; + bool recreateSurface() override; void destroySurface() override; RenderAPI GetRenderAPI() const override; @@ -50,6 +50,7 @@ private: void destroyDeviceResources() override; bool shouldUseFlipModelSwapChain() const; + bool createSwapChain(); bool createSwapChainRTV(); void renderDisplay(); diff --git a/src/duckstation-qt/duckstation-qt.vcxproj b/src/duckstation-qt/duckstation-qt.vcxproj index 111ace081..1b55b2df5 100644 --- a/src/duckstation-qt/duckstation-qt.vcxproj +++ b/src/duckstation-qt/duckstation-qt.vcxproj @@ -59,8 +59,10 @@ + + @@ -97,6 +99,9 @@ {8bda439c-6358-45fb-9994-2ff083babe06} + + {9c8ddeb0-2b8f-4f5f-ba86-127cdf27f035} + {ee054e08-3799-4a59-a422-18259c105ffd} @@ -335,7 +340,7 @@ WITH_DISCORD_PRESENCE=1;_CRT_SECURE_NO_WARNINGS;WIN32;_DEBUG;_CONSOLE;_LIB;%(PreprocessorDefinitions) true ProgramDatabase - $(SolutionDir)dep\glad\Include;$(SolutionDir)dep\imgui\include;$(SolutionDir)dep\zlib\include;$(SolutionDir)dep\minizip\include;$(SolutionDir)src;$(SolutionDir)dep\msvc\qt5-x86\include;%(AdditionalIncludeDirectories) + $(SolutionDir)dep\glad\Include;$(SolutionDir)dep\imgui\include;$(SolutionDir)dep\zlib\include;$(SolutionDir)dep\minizip\include;$(SolutionDir)dep\vulkan-loader\include;$(SolutionDir)src;$(SolutionDir)dep\msvc\qt5-x86\include;%(AdditionalIncludeDirectories) true false stdcpp17 @@ -356,7 +361,7 @@ WITH_DISCORD_PRESENCE=1;_CRT_SECURE_NO_WARNINGS;WIN32;_DEBUG;_CONSOLE;_LIB;%(PreprocessorDefinitions) true ProgramDatabase - $(SolutionDir)dep\glad\Include;$(SolutionDir)dep\imgui\include;$(SolutionDir)dep\zlib\include;$(SolutionDir)dep\minizip\include;$(SolutionDir)src;$(SolutionDir)dep\msvc\qt5-x64\include;%(AdditionalIncludeDirectories) + $(SolutionDir)dep\glad\Include;$(SolutionDir)dep\imgui\include;$(SolutionDir)dep\zlib\include;$(SolutionDir)dep\minizip\include;$(SolutionDir)dep\vulkan-loader\include;$(SolutionDir)src;$(SolutionDir)dep\msvc\qt5-x64\include;%(AdditionalIncludeDirectories) true false stdcpp17 @@ -377,7 +382,7 @@ WITH_DISCORD_PRESENCE=1;_ITERATOR_DEBUG_LEVEL=1;_CRT_SECURE_NO_WARNINGS;WIN32;_DEBUGFAST;_DEBUG;_CONSOLE;_LIB;%(PreprocessorDefinitions) true ProgramDatabase - $(SolutionDir)dep\glad\Include;$(SolutionDir)dep\imgui\include;$(SolutionDir)dep\zlib\include;$(SolutionDir)dep\minizip\include;$(SolutionDir)src;$(SolutionDir)dep\msvc\qt5-x86\include;%(AdditionalIncludeDirectories) + $(SolutionDir)dep\glad\Include;$(SolutionDir)dep\imgui\include;$(SolutionDir)dep\zlib\include;$(SolutionDir)dep\minizip\include;$(SolutionDir)dep\vulkan-loader\include;$(SolutionDir)src;$(SolutionDir)dep\msvc\qt5-x86\include;%(AdditionalIncludeDirectories) Default true false @@ -400,7 +405,7 @@ WITH_DISCORD_PRESENCE=1;_ITERATOR_DEBUG_LEVEL=1;_CRT_SECURE_NO_WARNINGS;WIN32;_DEBUGFAST;_DEBUG;_CONSOLE;_LIB;%(PreprocessorDefinitions) true ProgramDatabase - $(SolutionDir)dep\glad\Include;$(SolutionDir)dep\imgui\include;$(SolutionDir)dep\zlib\include;$(SolutionDir)dep\minizip\include;$(SolutionDir)src;$(SolutionDir)dep\msvc\qt5-x64\include;%(AdditionalIncludeDirectories) + $(SolutionDir)dep\glad\Include;$(SolutionDir)dep\imgui\include;$(SolutionDir)dep\zlib\include;$(SolutionDir)dep\minizip\include;$(SolutionDir)dep\vulkan-loader\include;$(SolutionDir)src;$(SolutionDir)dep\msvc\qt5-x64\include;%(AdditionalIncludeDirectories) Default true false @@ -422,7 +427,7 @@ MaxSpeed true WITH_DISCORD_PRESENCE=1;_CRT_SECURE_NO_WARNINGS;WIN32;NDEBUG;_CONSOLE;_LIB;%(PreprocessorDefinitions) - $(SolutionDir)dep\glad\Include;$(SolutionDir)dep\imgui\include;$(SolutionDir)dep\zlib\include;$(SolutionDir)dep\minizip\include;$(SolutionDir)src;$(SolutionDir)dep\msvc\qt5-x86\include;%(AdditionalIncludeDirectories) + $(SolutionDir)dep\glad\Include;$(SolutionDir)dep\imgui\include;$(SolutionDir)dep\zlib\include;$(SolutionDir)dep\minizip\include;$(SolutionDir)dep\vulkan-loader\include;$(SolutionDir)src;$(SolutionDir)dep\msvc\qt5-x86\include;%(AdditionalIncludeDirectories) true false stdcpp17 @@ -445,7 +450,7 @@ MaxSpeed true WITH_DISCORD_PRESENCE=1;_CRT_SECURE_NO_WARNINGS;WIN32;NDEBUG;_CONSOLE;_LIB;%(PreprocessorDefinitions) - $(SolutionDir)dep\glad\Include;$(SolutionDir)dep\imgui\include;$(SolutionDir)dep\zlib\include;$(SolutionDir)dep\minizip\include;$(SolutionDir)src;$(SolutionDir)dep\msvc\qt5-x86\include;%(AdditionalIncludeDirectories) + $(SolutionDir)dep\glad\Include;$(SolutionDir)dep\imgui\include;$(SolutionDir)dep\zlib\include;$(SolutionDir)dep\minizip\include;$(SolutionDir)dep\vulkan-loader\include;$(SolutionDir)src;$(SolutionDir)dep\msvc\qt5-x86\include;%(AdditionalIncludeDirectories) true true stdcpp17 @@ -469,7 +474,7 @@ MaxSpeed true WITH_DISCORD_PRESENCE=1;_CRT_SECURE_NO_WARNINGS;WIN32;NDEBUG;_CONSOLE;_LIB;%(PreprocessorDefinitions) - $(SolutionDir)dep\glad\Include;$(SolutionDir)dep\imgui\include;$(SolutionDir)dep\zlib\include;$(SolutionDir)dep\minizip\include;$(SolutionDir)src;$(SolutionDir)dep\msvc\qt5-x64\include;%(AdditionalIncludeDirectories) + $(SolutionDir)dep\glad\Include;$(SolutionDir)dep\imgui\include;$(SolutionDir)dep\zlib\include;$(SolutionDir)dep\minizip\include;$(SolutionDir)dep\vulkan-loader\include;$(SolutionDir)src;$(SolutionDir)dep\msvc\qt5-x64\include;%(AdditionalIncludeDirectories) true false stdcpp17 @@ -492,7 +497,7 @@ MaxSpeed true WITH_DISCORD_PRESENCE=1;_CRT_SECURE_NO_WARNINGS;WIN32;NDEBUG;_CONSOLE;_LIB;%(PreprocessorDefinitions) - $(SolutionDir)dep\glad\Include;$(SolutionDir)dep\imgui\include;$(SolutionDir)dep\zlib\include;$(SolutionDir)dep\minizip\include;$(SolutionDir)src;$(SolutionDir)dep\msvc\qt5-x64\include;%(AdditionalIncludeDirectories) + $(SolutionDir)dep\glad\Include;$(SolutionDir)dep\imgui\include;$(SolutionDir)dep\zlib\include;$(SolutionDir)dep\minizip\include;$(SolutionDir)dep\vulkan-loader\include;$(SolutionDir)src;$(SolutionDir)dep\msvc\qt5-x64\include;%(AdditionalIncludeDirectories) true true stdcpp17 @@ -512,4 +517,4 @@ - + \ No newline at end of file diff --git a/src/duckstation-qt/duckstation-qt.vcxproj.filters b/src/duckstation-qt/duckstation-qt.vcxproj.filters index 5cf95dab4..1b30ee96e 100644 --- a/src/duckstation-qt/duckstation-qt.vcxproj.filters +++ b/src/duckstation-qt/duckstation-qt.vcxproj.filters @@ -16,7 +16,6 @@ - @@ -39,6 +38,13 @@ + + + + + + + @@ -48,6 +54,7 @@ + @@ -74,6 +81,8 @@ + + @@ -85,6 +94,7 @@ + @@ -95,4 +105,4 @@ - + \ No newline at end of file diff --git a/src/duckstation-qt/mainwindow.cpp b/src/duckstation-qt/mainwindow.cpp index 8b6bceea3..4d208127a 100644 --- a/src/duckstation-qt/mainwindow.cpp +++ b/src/duckstation-qt/mainwindow.cpp @@ -103,13 +103,6 @@ void MainWindow::createDisplay(QThread* worker_thread, bool use_debug_device, bo return; } - if (!m_host_display->createSurface()) - { - reportError(tr("Failed to create host display surface.")); - m_host_display->destroyDeviceContext(); - return; - } - m_host_display->deactivateDeviceContext(); } @@ -151,7 +144,7 @@ void MainWindow::updateDisplay(QThread* worker_thread, bool fullscreen, bool ren // we need the surface visible.. this might be able to be replaced with something else QCoreApplication::processEvents(QEventLoop::ExcludeUserInputEvents); - if (!m_host_display->createSurface()) + if (!m_host_display->recreateSurface()) Panic("Failed to recreate surface on new widget."); m_display_widget->setFocus(); diff --git a/src/duckstation-qt/openglhostdisplay.cpp b/src/duckstation-qt/openglhostdisplay.cpp index 4feaca1fc..c6234e7c6 100644 --- a/src/duckstation-qt/openglhostdisplay.cpp +++ b/src/duckstation-qt/openglhostdisplay.cpp @@ -4,15 +4,10 @@ #include "imgui.h" #include "qtdisplaywidget.h" #include "qthostinterface.h" -#include -#include #include #include #include #include -#if !defined(WIN32) && !defined(APPLE) -#include -#endif #include Log_SetChannel(OpenGLHostDisplay); @@ -193,49 +188,16 @@ bool OpenGLHostDisplay::hasDeviceContext() const return static_cast(m_gl_context); } -WindowInfo OpenGLHostDisplay::getWindowInfo() const +bool OpenGLHostDisplay::createDeviceContext(bool debug_device) { - WindowInfo wi; - - // Windows and Apple are easy here since there's no display connection. -#if defined(WIN32) - wi.type = WindowInfo::Type::Win32; - wi.window_handle = reinterpret_cast(m_widget->winId()); -#elif defined(__APPLE__) - wi.type = WindowInfo::Type::MacOS; - wi.window_handle = reinterpret_cast(m_widget->winId()); -#else - QPlatformNativeInterface* pni = QGuiApplication::platformNativeInterface(); - const QString platform_name = QGuiApplication::platformName(); - if (platform_name == QStringLiteral("xcb")) - { - wi.type = WindowInfo::Type::X11; - wi.display_connection = pni->nativeResourceForWindow("display", m_widget->windowHandle()); - wi.window_handle = reinterpret_cast(m_widget->winId()); - } - else if (platform_name == QStringLiteral("wayland")) - { - wi.type = WindowInfo::Type::Wayland; - wi.display_connection = pni->nativeResourceForWindow("display", m_widget->windowHandle()); - wi.window_handle = pni->nativeResourceForWindow("surface", m_widget->windowHandle()); - } - else - { - qCritical() << "Unknown PNI platform " << platform_name; - return wi; - } -#endif - - wi.surface_width = m_widget->width(); - wi.surface_height = m_widget->height(); - wi.surface_format = WindowInfo::SurfaceFormat::RGB8; + m_window_width = m_widget->scaledWindowWidth(); + m_window_height = m_widget->scaledWindowHeight(); - return wi; -} + std::optional wi = getWindowInfo(); + if (!wi.has_value()) + return false; -bool OpenGLHostDisplay::createDeviceContext(bool debug_device) -{ - m_gl_context = GL::Context::Create(getWindowInfo()); + m_gl_context = GL::Context::Create(wi.value()); if (!m_gl_context) { Log_ErrorPrintf("Failed to create any GL context"); @@ -245,7 +207,7 @@ bool OpenGLHostDisplay::createDeviceContext(bool debug_device) return true; } -bool OpenGLHostDisplay::initializeDeviceContext(bool debug_device) +bool OpenGLHostDisplay::initializeDeviceContext(std::string_view shader_cache_directory, bool debug_device) { if (debug_device && GLAD_GL_KHR_debug) { @@ -254,7 +216,7 @@ bool OpenGLHostDisplay::initializeDeviceContext(bool debug_device) glEnable(GL_DEBUG_OUTPUT_SYNCHRONOUS); } - if (!QtHostDisplay::initializeDeviceContext(debug_device)) + if (!QtHostDisplay::initializeDeviceContext(shader_cache_directory, debug_device)) { m_gl_context->DoneCurrent(); return false; @@ -286,14 +248,17 @@ void OpenGLHostDisplay::destroyDeviceContext() m_gl_context.reset(); } -bool OpenGLHostDisplay::createSurface() +bool OpenGLHostDisplay::recreateSurface() { m_window_width = m_widget->scaledWindowWidth(); m_window_height = m_widget->scaledWindowHeight(); - emit m_widget->windowResizedEvent(m_window_width, m_window_height); if (m_gl_context) - m_gl_context->ChangeSurface(getWindowInfo()); + { + std::optional wi = getWindowInfo(); + if (!wi.has_value() || !m_gl_context->ChangeSurface(wi.value())) + return false; + } return true; } diff --git a/src/duckstation-qt/openglhostdisplay.h b/src/duckstation-qt/openglhostdisplay.h index 6ec94678f..9342ae9f4 100644 --- a/src/duckstation-qt/openglhostdisplay.h +++ b/src/duckstation-qt/openglhostdisplay.h @@ -27,11 +27,11 @@ public: bool hasDeviceContext() const override; bool createDeviceContext(bool debug_device) override; - bool initializeDeviceContext(bool debug_device) override; + bool initializeDeviceContext(std::string_view shader_cache_directory, bool debug_device) override; bool activateDeviceContext() override; void deactivateDeviceContext() override; void destroyDeviceContext() override; - bool createSurface() override; + bool recreateSurface() override; void destroySurface(); RenderAPI GetRenderAPI() const override; @@ -54,8 +54,6 @@ private: const char* GetGLSLVersionString() const; std::string GetGLSLVersionHeader() const; - WindowInfo getWindowInfo() const; - bool createImGuiContext() override; void destroyImGuiContext() override; bool createDeviceResources() override; diff --git a/src/duckstation-qt/qthostdisplay.cpp b/src/duckstation-qt/qthostdisplay.cpp index c343611df..d7d0df492 100644 --- a/src/duckstation-qt/qthostdisplay.cpp +++ b/src/duckstation-qt/qthostdisplay.cpp @@ -4,7 +4,12 @@ #include "imgui.h" #include "qtdisplaywidget.h" #include "qthostinterface.h" +#include +#include #include +#if !defined(WIN32) && !defined(APPLE) +#include +#endif QtHostDisplay::QtHostDisplay(QtHostInterface* host_interface) : m_host_interface(host_interface) {} @@ -42,7 +47,7 @@ bool QtHostDisplay::createDeviceContext(bool debug_device) return false; } -bool QtHostDisplay::initializeDeviceContext(bool debug_device) +bool QtHostDisplay::initializeDeviceContext(std::string_view shader_cache_directory, bool debug_device) { if (!createImGuiContext() || !createDeviceResources()) return false; @@ -63,7 +68,7 @@ void QtHostDisplay::destroyDeviceContext() destroyDeviceResources(); } -bool QtHostDisplay::createSurface() +bool QtHostDisplay::recreateSurface() { return false; } @@ -118,3 +123,43 @@ void QtHostDisplay::updateImGuiDisplaySize() io.DisplaySize.x = static_cast(m_window_width); io.DisplaySize.y = static_cast(m_window_height); } + +std::optional QtHostDisplay::getWindowInfo() const +{ + WindowInfo wi; + + // Windows and Apple are easy here since there's no display connection. +#if defined(WIN32) + wi.type = WindowInfo::Type::Win32; + wi.window_handle = reinterpret_cast(m_widget->winId()); +#elif defined(__APPLE__) + wi.type = WindowInfo::Type::MacOS; + wi.window_handle = reinterpret_cast(m_widget->winId()); +#else + QPlatformNativeInterface* pni = QGuiApplication::platformNativeInterface(); + const QString platform_name = QGuiApplication::platformName(); + if (platform_name == QStringLiteral("xcb")) + { + wi.type = WindowInfo::Type::X11; + wi.display_connection = pni->nativeResourceForWindow("display", m_widget->windowHandle()); + wi.window_handle = reinterpret_cast(m_widget->winId()); + } + else if (platform_name == QStringLiteral("wayland")) + { + wi.type = WindowInfo::Type::Wayland; + wi.display_connection = pni->nativeResourceForWindow("display", m_widget->windowHandle()); + wi.window_handle = pni->nativeResourceForWindow("surface", m_widget->windowHandle()); + } + else + { + qCritical() << "Unknown PNI platform " << platform_name; + return std::nullopt; + } +#endif + + wi.surface_width = m_widget->width(); + wi.surface_height = m_widget->height(); + wi.surface_format = WindowInfo::SurfaceFormat::RGB8; + + return wi; +} diff --git a/src/duckstation-qt/qthostdisplay.h b/src/duckstation-qt/qthostdisplay.h index 285291ad9..dbf337406 100644 --- a/src/duckstation-qt/qthostdisplay.h +++ b/src/duckstation-qt/qthostdisplay.h @@ -1,6 +1,9 @@ #pragma once #include "common/types.h" +#include "common/window_info.h" #include "core/host_display.h" +#include +#include class QThread; class QWidget; @@ -22,11 +25,11 @@ public: virtual bool hasDeviceContext() const; virtual bool createDeviceContext(bool debug_device); - virtual bool initializeDeviceContext(bool debug_device); + virtual bool initializeDeviceContext(std::string_view shader_cache_directory, bool debug_device); virtual bool activateDeviceContext(); virtual void deactivateDeviceContext(); virtual void destroyDeviceContext(); - virtual bool createSurface(); + virtual bool recreateSurface(); virtual void destroySurface(); virtual void WindowResized(s32 new_window_width, s32 new_window_height) override; @@ -39,6 +42,8 @@ protected: virtual bool createDeviceResources(); virtual void destroyDeviceResources(); + std::optional getWindowInfo() const; + QtHostInterface* m_host_interface; QtDisplayWidget* m_widget = nullptr; }; diff --git a/src/duckstation-qt/qthostinterface.cpp b/src/duckstation-qt/qthostinterface.cpp index 18233d813..5f56492ae 100644 --- a/src/duckstation-qt/qthostinterface.cpp +++ b/src/duckstation-qt/qthostinterface.cpp @@ -15,6 +15,7 @@ #include "qtprogresscallback.h" #include "qtsettingsinterface.h" #include "qtutils.h" +#include "vulkanhostdisplay.h" #include #include #include @@ -327,7 +328,7 @@ bool QtHostInterface::AcquireHostDisplay() } if (!getHostDisplay()->activateDeviceContext() || - !getHostDisplay()->initializeDeviceContext(m_settings.gpu_use_debug_device)) + !getHostDisplay()->initializeDeviceContext(GetShaderCacheDirectory(), m_settings.gpu_use_debug_device)) { getHostDisplay()->destroyDeviceContext(); emit destroyDisplayRequested(); @@ -343,14 +344,26 @@ QtHostDisplay* QtHostInterface::createHostDisplay() { Assert(!getHostDisplay()); + switch (m_settings.gpu_renderer) + { + case GPURenderer::HardwareVulkan: + m_display = new VulkanHostDisplay(this); + break; + + case GPURenderer::HardwareOpenGL: +#ifndef WIN32 + default: +#endif + m_display = new OpenGLHostDisplay(this); + break; + #ifdef WIN32 - if (m_settings.gpu_renderer == GPURenderer::HardwareOpenGL) - m_display = new OpenGLHostDisplay(this); - else - m_display = new D3D11HostDisplay(this); -#else - m_display = new OpenGLHostDisplay(this); + case GPURenderer::HardwareD3D11: + default: + m_display = new D3D11HostDisplay(this); + break; #endif + } return getHostDisplay(); } diff --git a/src/duckstation-qt/vulkanhostdisplay.cpp b/src/duckstation-qt/vulkanhostdisplay.cpp new file mode 100644 index 000000000..ba86e0a1f --- /dev/null +++ b/src/duckstation-qt/vulkanhostdisplay.cpp @@ -0,0 +1,168 @@ +#include "vulkanhostdisplay.h" +#include "common/assert.h" +#include "common/log.h" +#include "imgui.h" +#include "qtdisplaywidget.h" +Log_SetChannel(VulkanHostDisplay); + +VulkanHostDisplay::VulkanHostDisplay(QtHostInterface* host_interface) : QtHostDisplay(host_interface) {} + +VulkanHostDisplay::~VulkanHostDisplay() = default; + +HostDisplay::RenderAPI VulkanHostDisplay::GetRenderAPI() const +{ + return m_vulkan_display.GetRenderAPI(); +} + +void* VulkanHostDisplay::GetRenderDevice() const +{ + return m_vulkan_display.GetRenderDevice(); +} + +void* VulkanHostDisplay::GetRenderContext() const +{ + return m_vulkan_display.GetRenderContext(); +} + +std::unique_ptr VulkanHostDisplay::CreateTexture(u32 width, u32 height, const void* initial_data, + u32 initial_data_stride, bool dynamic) +{ + return m_vulkan_display.CreateTexture(width, height, initial_data, initial_data_stride, dynamic); +} + +void VulkanHostDisplay::UpdateTexture(HostDisplayTexture* texture, u32 x, u32 y, u32 width, u32 height, + const void* texture_data, u32 texture_data_stride) +{ + m_vulkan_display.UpdateTexture(texture, x, y, width, height, texture_data, texture_data_stride); +} + +bool VulkanHostDisplay::DownloadTexture(const void* texture_handle, u32 x, u32 y, u32 width, u32 height, void* out_data, + u32 out_data_stride) +{ + return m_vulkan_display.DownloadTexture(texture_handle, x, y, width, height, out_data, out_data_stride); +} + +void VulkanHostDisplay::SetVSync(bool enabled) +{ + m_vulkan_display.SetVSync(enabled); +} + +bool VulkanHostDisplay::hasDeviceContext() const +{ + return m_vulkan_display.HasContext(); +} + +bool VulkanHostDisplay::createDeviceContext(bool debug_device) +{ + std::optional wi = getWindowInfo(); + if (!wi || !m_vulkan_display.CreateContextAndSwapChain(wi.value(), debug_device)) + return false; + + m_window_width = static_cast(m_vulkan_display.GetSwapChainWidth()); + m_window_height = static_cast(m_vulkan_display.GetSwapChainHeight()); + return true; +} + +bool VulkanHostDisplay::initializeDeviceContext(std::string_view shader_cache_directory, bool debug_device) +{ + m_vulkan_display.CreateShaderCache(shader_cache_directory, debug_device); + + return QtHostDisplay::initializeDeviceContext(shader_cache_directory, debug_device); +} + +bool VulkanHostDisplay::activateDeviceContext() +{ + return true; +} + +void VulkanHostDisplay::deactivateDeviceContext() {} + +void VulkanHostDisplay::destroyDeviceContext() +{ + QtHostDisplay::destroyDeviceContext(); + m_vulkan_display.DestroyShaderCache(); + m_vulkan_display.DestroySwapChain(); + m_vulkan_display.DestroyContext(); +} + +bool VulkanHostDisplay::recreateSurface() +{ + std::optional wi = getWindowInfo(); + if (!wi.has_value()) + return false; + + if (!m_vulkan_display.RecreateSwapChain(wi.value())) + return false; + + m_window_width = static_cast(m_vulkan_display.GetSwapChainWidth()); + m_window_height = static_cast(m_vulkan_display.GetSwapChainHeight()); + return true; +} + +void VulkanHostDisplay::destroySurface() +{ + m_vulkan_display.DestroySwapChain(); +} + +void VulkanHostDisplay::WindowResized(s32 new_window_width, s32 new_window_height) +{ + QtHostDisplay::WindowResized(new_window_width, new_window_height); + + m_vulkan_display.ResizeSwapChain(static_cast(new_window_width), static_cast(new_window_height)); + m_window_width = static_cast(m_vulkan_display.GetSwapChainWidth()); + m_window_height = static_cast(m_vulkan_display.GetSwapChainHeight()); +} + +bool VulkanHostDisplay::createDeviceResources() +{ + if (!QtHostDisplay::createDeviceResources()) + return false; + + return m_vulkan_display.CreateResources(); +} + +void VulkanHostDisplay::destroyDeviceResources() +{ + QtHostDisplay::destroyDeviceResources(); + m_vulkan_display.DestroyResources(); +} + +bool VulkanHostDisplay::createImGuiContext() +{ + if (!QtHostDisplay::createImGuiContext() || !m_vulkan_display.CreateImGuiContext()) + return false; + + ImGui::NewFrame(); + return true; +} + +void VulkanHostDisplay::destroyImGuiContext() +{ + m_vulkan_display.DestroyImGuiContext(); + QtHostDisplay::destroyImGuiContext(); +} + +void VulkanHostDisplay::Render() +{ + if (!m_vulkan_display.HasSwapChain() || !m_vulkan_display.BeginRender()) + return; + + if (HasDisplayTexture()) + { + const auto [left, top, width, height] = CalculateDrawRect(m_window_width, m_window_height, m_display_top_margin); + m_vulkan_display.RenderDisplay(left, top, width, height, m_display_texture_handle, m_display_texture_width, + m_display_texture_height, m_display_texture_view_x, m_display_texture_view_y, + m_display_texture_view_width, m_display_texture_view_height, + m_display_linear_filtering); + } + + m_vulkan_display.RenderImGui(); + + if (HasSoftwareCursor()) + { + const auto [left, top, width, height] = CalculateSoftwareCursorDrawRect(); + m_vulkan_display.RenderSoftwareCursor(left, top, width, height, m_cursor_texture.get()); + } + + m_vulkan_display.EndRenderAndPresent(); +} diff --git a/src/duckstation-qt/vulkanhostdisplay.h b/src/duckstation-qt/vulkanhostdisplay.h new file mode 100644 index 000000000..dcd496cab --- /dev/null +++ b/src/duckstation-qt/vulkanhostdisplay.h @@ -0,0 +1,49 @@ +#pragma once +#include "common/window_info.h" +#include "core/host_display.h" +#include "qtdisplaywidget.h" +#include "qthostdisplay.h" +#include "frontend-common/vulkan_host_display.h" +#include + +class QtHostInterface; + +class VulkanHostDisplay final : public QtHostDisplay +{ +public: + VulkanHostDisplay(QtHostInterface* host_interface); + ~VulkanHostDisplay(); + + bool hasDeviceContext() const override; + bool createDeviceContext(bool debug_device) override; + bool initializeDeviceContext(std::string_view shader_cache_directory, bool debug_device) override; + bool activateDeviceContext() override; + void deactivateDeviceContext() override; + void destroyDeviceContext() override; + bool recreateSurface() override; + void destroySurface(); + + RenderAPI GetRenderAPI() const override; + void* GetRenderDevice() const override; + void* GetRenderContext() const override; + void WindowResized(s32 new_window_width, s32 new_window_height) override; + + std::unique_ptr CreateTexture(u32 width, u32 height, const void* initial_data, + u32 initial_data_stride, bool dynamic) override; + void UpdateTexture(HostDisplayTexture* texture, u32 x, u32 y, u32 width, u32 height, const void* texture_data, + u32 texture_data_stride) override; + bool DownloadTexture(const void* texture_handle, u32 x, u32 y, u32 width, u32 height, void* out_data, + u32 out_data_stride) override; + + void SetVSync(bool enabled) override; + + void Render() override; + +private: + bool createImGuiContext() override; + void destroyImGuiContext() override; + bool createDeviceResources() override; + void destroyDeviceResources() override; + + FrontendCommon::VulkanHostDisplay m_vulkan_display; +}; diff --git a/src/duckstation-sdl/CMakeLists.txt b/src/duckstation-sdl/CMakeLists.txt index 4c5d5f29f..ba0029bbb 100644 --- a/src/duckstation-sdl/CMakeLists.txt +++ b/src/duckstation-sdl/CMakeLists.txt @@ -7,10 +7,14 @@ add_executable(duckstation-sdl sdl_host_interface.cpp sdl_host_interface.h sdl_key_names.h + sdl_util.cpp + sdl_util.h + sdl_vulkan_host_display.cpp + sdl_vulkan_host_display.h ) target_include_directories(duckstation-sdl PRIVATE ${SDL2_INCLUDE_DIRS}) -target_link_libraries(duckstation-sdl PRIVATE core common imgui nativefiledialog glad frontend-common scmversion ${SDL2_LIBRARIES}) +target_link_libraries(duckstation-sdl PRIVATE core common imgui nativefiledialog glad frontend-common scmversion vulkan-loader ${SDL2_LIBRARIES}) if(WIN32) target_sources(duckstation-sdl PRIVATE diff --git a/src/duckstation-sdl/d3d11_host_display.cpp b/src/duckstation-sdl/d3d11_host_display.cpp index e660e2305..9100d407a 100644 --- a/src/duckstation-sdl/d3d11_host_display.cpp +++ b/src/duckstation-sdl/d3d11_host_display.cpp @@ -381,6 +381,7 @@ bool D3D11HostDisplay::CreateImGuiContext() ImGui_ImplDX11_NewFrame(); ImGui_ImplSDL2_NewFrame(m_window); + ImGui::NewFrame(); return true; } diff --git a/src/duckstation-sdl/duckstation-sdl.vcxproj b/src/duckstation-sdl/duckstation-sdl.vcxproj index 9041db343..aa8d1d4fa 100644 --- a/src/duckstation-sdl/duckstation-sdl.vcxproj +++ b/src/duckstation-sdl/duckstation-sdl.vcxproj @@ -60,6 +60,8 @@ + + @@ -68,6 +70,8 @@ + + @@ -227,7 +231,7 @@ WITH_DISCORD_PRESENCE=1;_CRT_SECURE_NO_WARNINGS;WIN32;_DEBUG;_CONSOLE;_LIB;%(PreprocessorDefinitions) true ProgramDatabase - $(SolutionDir)dep\msvc\include;$(SolutionDir)dep\msvc\include\SDL;$(SolutionDir)dep\imgui\include;$(SolutionDir)dep\nativefiledialog\include;$(SolutionDir)dep\glad\include;$(SolutionDir)dep\simpleini\include;$(SolutionDir)src;%(AdditionalIncludeDirectories) + $(SolutionDir)dep\msvc\include;$(SolutionDir)dep\msvc\include\SDL;$(SolutionDir)dep\imgui\include;$(SolutionDir)dep\nativefiledialog\include;$(SolutionDir)dep\glad\include;$(SolutionDir)dep\simpleini\include;$(SolutionDir)dep\vulkan-loader\include;$(SolutionDir)src;%(AdditionalIncludeDirectories) true false stdcpp17 @@ -248,7 +252,7 @@ WITH_DISCORD_PRESENCE=1;_CRT_SECURE_NO_WARNINGS;WIN32;_DEBUG;_CONSOLE;_LIB;%(PreprocessorDefinitions) true ProgramDatabase - $(SolutionDir)dep\msvc\include;$(SolutionDir)dep\msvc\include\SDL;$(SolutionDir)dep\imgui\include;$(SolutionDir)dep\nativefiledialog\include;$(SolutionDir)dep\glad\include;$(SolutionDir)dep\simpleini\include;$(SolutionDir)src;%(AdditionalIncludeDirectories) + $(SolutionDir)dep\msvc\include;$(SolutionDir)dep\msvc\include\SDL;$(SolutionDir)dep\imgui\include;$(SolutionDir)dep\nativefiledialog\include;$(SolutionDir)dep\glad\include;$(SolutionDir)dep\simpleini\include;$(SolutionDir)dep\vulkan-loader\include;$(SolutionDir)src;%(AdditionalIncludeDirectories) true false stdcpp17 @@ -269,7 +273,7 @@ WITH_DISCORD_PRESENCE=1;_ITERATOR_DEBUG_LEVEL=1;_CRT_SECURE_NO_WARNINGS;WIN32;_DEBUGFAST;_DEBUG;_CONSOLE;_LIB;%(PreprocessorDefinitions) true ProgramDatabase - $(SolutionDir)dep\msvc\include;$(SolutionDir)dep\msvc\include\SDL;$(SolutionDir)dep\imgui\include;$(SolutionDir)dep\nativefiledialog\include;$(SolutionDir)dep\glad\include;$(SolutionDir)dep\simpleini\include;$(SolutionDir)src;%(AdditionalIncludeDirectories) + $(SolutionDir)dep\msvc\include;$(SolutionDir)dep\msvc\include\SDL;$(SolutionDir)dep\imgui\include;$(SolutionDir)dep\nativefiledialog\include;$(SolutionDir)dep\glad\include;$(SolutionDir)dep\simpleini\include;$(SolutionDir)dep\vulkan-loader\include;$(SolutionDir)src;%(AdditionalIncludeDirectories) Default true false @@ -293,7 +297,7 @@ WITH_DISCORD_PRESENCE=1;_ITERATOR_DEBUG_LEVEL=1;_CRT_SECURE_NO_WARNINGS;WIN32;_DEBUGFAST;_DEBUG;_CONSOLE;_LIB;%(PreprocessorDefinitions) true ProgramDatabase - $(SolutionDir)dep\msvc\include;$(SolutionDir)dep\msvc\include\SDL;$(SolutionDir)dep\imgui\include;$(SolutionDir)dep\nativefiledialog\include;$(SolutionDir)dep\glad\include;$(SolutionDir)dep\simpleini\include;$(SolutionDir)src;%(AdditionalIncludeDirectories) + $(SolutionDir)dep\msvc\include;$(SolutionDir)dep\msvc\include\SDL;$(SolutionDir)dep\imgui\include;$(SolutionDir)dep\nativefiledialog\include;$(SolutionDir)dep\glad\include;$(SolutionDir)dep\simpleini\include;$(SolutionDir)dep\vulkan-loader\include;$(SolutionDir)src;%(AdditionalIncludeDirectories) Default true false @@ -316,7 +320,7 @@ MaxSpeed true WITH_DISCORD_PRESENCE=1;_CRT_SECURE_NO_WARNINGS;WIN32;NDEBUG;_CONSOLE;_LIB;%(PreprocessorDefinitions) - $(SolutionDir)dep\msvc\include;$(SolutionDir)dep\msvc\include\SDL;$(SolutionDir)dep\imgui\include;$(SolutionDir)dep\nativefiledialog\include;$(SolutionDir)dep\glad\include;$(SolutionDir)dep\simpleini\include;$(SolutionDir)src;%(AdditionalIncludeDirectories) + $(SolutionDir)dep\msvc\include;$(SolutionDir)dep\msvc\include\SDL;$(SolutionDir)dep\imgui\include;$(SolutionDir)dep\nativefiledialog\include;$(SolutionDir)dep\glad\include;$(SolutionDir)dep\simpleini\include;$(SolutionDir)dep\vulkan-loader\include;$(SolutionDir)src;%(AdditionalIncludeDirectories) true false stdcpp17 @@ -339,7 +343,7 @@ MaxSpeed true WITH_DISCORD_PRESENCE=1;_CRT_SECURE_NO_WARNINGS;WIN32;NDEBUG;_CONSOLE;_LIB;%(PreprocessorDefinitions) - $(SolutionDir)dep\msvc\include;$(SolutionDir)dep\msvc\include\SDL;$(SolutionDir)dep\imgui\include;$(SolutionDir)dep\nativefiledialog\include;$(SolutionDir)dep\glad\include;$(SolutionDir)dep\simpleini\include;$(SolutionDir)src;%(AdditionalIncludeDirectories) + $(SolutionDir)dep\msvc\include;$(SolutionDir)dep\msvc\include\SDL;$(SolutionDir)dep\imgui\include;$(SolutionDir)dep\nativefiledialog\include;$(SolutionDir)dep\glad\include;$(SolutionDir)dep\simpleini\include;$(SolutionDir)dep\vulkan-loader\include;$(SolutionDir)src;%(AdditionalIncludeDirectories) true true stdcpp17 @@ -363,7 +367,7 @@ MaxSpeed true WITH_DISCORD_PRESENCE=1;_CRT_SECURE_NO_WARNINGS;WIN32;NDEBUG;_CONSOLE;_LIB;%(PreprocessorDefinitions) - $(SolutionDir)dep\msvc\include;$(SolutionDir)dep\msvc\include\SDL;$(SolutionDir)dep\imgui\include;$(SolutionDir)dep\nativefiledialog\include;$(SolutionDir)dep\glad\include;$(SolutionDir)dep\simpleini\include;$(SolutionDir)src;%(AdditionalIncludeDirectories) + $(SolutionDir)dep\msvc\include;$(SolutionDir)dep\msvc\include\SDL;$(SolutionDir)dep\imgui\include;$(SolutionDir)dep\nativefiledialog\include;$(SolutionDir)dep\glad\include;$(SolutionDir)dep\simpleini\include;$(SolutionDir)dep\vulkan-loader\include;$(SolutionDir)src;%(AdditionalIncludeDirectories) true false stdcpp17 @@ -386,7 +390,7 @@ MaxSpeed true WITH_DISCORD_PRESENCE=1;_CRT_SECURE_NO_WARNINGS;WIN32;NDEBUG;_CONSOLE;_LIB;%(PreprocessorDefinitions) - $(SolutionDir)dep\msvc\include;$(SolutionDir)dep\msvc\include\SDL;$(SolutionDir)dep\imgui\include;$(SolutionDir)dep\nativefiledialog\include;$(SolutionDir)dep\glad\include;$(SolutionDir)dep\simpleini\include;$(SolutionDir)src;%(AdditionalIncludeDirectories) + $(SolutionDir)dep\msvc\include;$(SolutionDir)dep\msvc\include\SDL;$(SolutionDir)dep\imgui\include;$(SolutionDir)dep\nativefiledialog\include;$(SolutionDir)dep\glad\include;$(SolutionDir)dep\simpleini\include;$(SolutionDir)dep\vulkan-loader\include;$(SolutionDir)src;%(AdditionalIncludeDirectories) true true stdcpp17 diff --git a/src/duckstation-sdl/duckstation-sdl.vcxproj.filters b/src/duckstation-sdl/duckstation-sdl.vcxproj.filters index cb46bd830..58924d54d 100644 --- a/src/duckstation-sdl/duckstation-sdl.vcxproj.filters +++ b/src/duckstation-sdl/duckstation-sdl.vcxproj.filters @@ -6,6 +6,8 @@ + + @@ -14,6 +16,8 @@ + + diff --git a/src/duckstation-sdl/opengl_host_display.cpp b/src/duckstation-sdl/opengl_host_display.cpp index bb5cb4ceb..9ea69e2ed 100644 --- a/src/duckstation-sdl/opengl_host_display.cpp +++ b/src/duckstation-sdl/opengl_host_display.cpp @@ -2,6 +2,7 @@ #include "common/assert.h" #include "common/log.h" #include "imgui_impl_sdl.h" +#include "sdl_util.h" #include #include #include @@ -9,17 +10,6 @@ #include Log_SetChannel(OpenGLHostDisplay); -#ifdef __APPLE__ -#include -struct NSView; - -static NSView* GetContentViewFromWindow(NSWindow* window) -{ - // window.contentView - return reinterpret_cast(objc_msgSend)(reinterpret_cast(window), sel_getUid("contentView")); -} -#endif - class OpenGLDisplayWidgetTexture : public HostDisplayTexture { public: @@ -220,52 +210,11 @@ static void APIENTRY GLDebugCallback(GLenum source, GLenum type, GLuint id, GLen bool OpenGLHostDisplay::CreateGLContext(bool debug_device) { - SDL_SysWMinfo syswm = {}; - SDL_VERSION(&syswm.version); - if (!SDL_GetWindowWMInfo(m_window, &syswm)) - { - Log_ErrorPrintf("SDL_GetWindowWMInfo failed"); + std::optional wi = SDLUtil::GetWindowInfoForSDLWindow(m_window); + if (!wi) return false; - } - - int window_width, window_height; - SDL_GetWindowSize(m_window, &window_width, &window_height); - - WindowInfo wi; - wi.surface_width = static_cast(window_width); - wi.surface_height = static_cast(window_height); - wi.surface_format = WindowInfo::SurfaceFormat::RGB8; - - switch (syswm.subsystem) - { -#ifdef SDL_VIDEO_DRIVER_WINDOWS - case SDL_SYSWM_WINDOWS: - wi.type = WindowInfo::Type::Win32; - wi.window_handle = syswm.info.win.window; - break; -#endif -#ifdef SDL_VIDEO_DRIVER_COCOA - case SDL_SYSWM_COCOA: - wi.type = WindowInfo::Type::MacOS; - wi.window_handle = GetContentViewFromWindow(syswm.info.cocoa.window); - break; -#endif - -#ifdef SDL_VIDEO_DRIVER_X11 - case SDL_SYSWM_X11: - wi.type = WindowInfo::Type::X11; - wi.window_handle = reinterpret_cast(static_cast(syswm.info.x11.window)); - wi.display_connection = syswm.info.x11.display; - break; -#endif - - default: - Log_ErrorPrintf("Unhandled syswm subsystem %u", static_cast(syswm.subsystem)); - return false; - } - - m_gl_context = GL::Context::Create(wi); + m_gl_context = GL::Context::Create(wi.value()); if (!m_gl_context) { Log_ErrorPrintf("Failed to create a GL context of any kind."); @@ -298,6 +247,7 @@ bool OpenGLHostDisplay::CreateImGuiContext() ImGui_ImplOpenGL3_NewFrame(); ImGui_ImplSDL2_NewFrame(m_window); + ImGui::NewFrame(); return true; } diff --git a/src/duckstation-sdl/sdl_host_interface.cpp b/src/duckstation-sdl/sdl_host_interface.cpp index 912247aba..49260089e 100644 --- a/src/duckstation-sdl/sdl_host_interface.cpp +++ b/src/duckstation-sdl/sdl_host_interface.cpp @@ -17,6 +17,7 @@ #include "opengl_host_display.h" #include "scmversion/scmversion.h" #include "sdl_key_names.h" +#include "sdl_vulkan_host_display.h" #include #include #include @@ -129,13 +130,29 @@ void SDLHostInterface::DestroySDLWindow() bool SDLHostInterface::CreateDisplay() { const bool debug_device = m_settings.gpu_use_debug_device; + const std::string shader_cache_directory(GetShaderCacheDirectory()); std::unique_ptr display; + + switch (m_settings.gpu_renderer) + { + case GPURenderer::HardwareVulkan: + display = SDLVulkanHostDisplay::Create(m_window, shader_cache_directory, debug_device); + break; + + case GPURenderer::HardwareOpenGL: +#ifndef WIN32 + default: +#endif + display = OpenGLHostDisplay::Create(m_window, debug_device); + break; + #ifdef WIN32 - display = UseOpenGLRenderer() ? OpenGLHostDisplay::Create(m_window, debug_device) : - D3D11HostDisplay::Create(m_window, debug_device); -#else - display = OpenGLHostDisplay::Create(m_window, debug_device); + case GPURenderer::HardwareD3D11: + default: + display = D3D11HostDisplay::Create(m_window, debug_device); + break; #endif + } if (!display) return false; @@ -181,13 +198,32 @@ void SDLHostInterface::UpdateFramebufferScale() bool SDLHostInterface::AcquireHostDisplay() { - // Handle renderer switch if required on Windows. -#ifdef WIN32 + // Handle renderer switch if required. const HostDisplay::RenderAPI render_api = m_display->GetRenderAPI(); - const bool render_api_is_gl = - render_api == HostDisplay::RenderAPI::OpenGL || render_api == HostDisplay::RenderAPI::OpenGLES; - const bool render_api_wants_gl = UseOpenGLRenderer(); - if (render_api_is_gl != render_api_wants_gl) + bool needs_switch = false; + switch (m_settings.gpu_renderer) + { +#ifdef WIN32 + case GPURenderer::HardwareD3D11: + needs_switch = (render_api != HostDisplay::RenderAPI::D3D11); + break; +#endif + + case GPURenderer::HardwareVulkan: + needs_switch = (render_api != HostDisplay::RenderAPI::Vulkan); + break; + + case GPURenderer::HardwareOpenGL: + needs_switch = (render_api != HostDisplay::RenderAPI::OpenGL && render_api != HostDisplay::RenderAPI::OpenGLES); + break; + + case GPURenderer::Software: + default: + needs_switch = false; + break; + } + + if (needs_switch) { ImGui::EndFrame(); DestroyDisplay(); @@ -198,10 +234,7 @@ bool SDLHostInterface::AcquireHostDisplay() if (!CreateDisplay()) Panic("Failed to recreate display on GPU renderer switch"); - - ImGui::NewFrame(); } -#endif return true; } @@ -358,8 +391,6 @@ bool SDLHostInterface::Initialize() RegisterHotkeys(); - ImGui::NewFrame(); - // process events to pick up controllers before updating input map ProcessEvents(); UpdateInputMap(); diff --git a/src/duckstation-sdl/sdl_host_interface.h b/src/duckstation-sdl/sdl_host_interface.h index 3625b4de0..ad019fbcd 100644 --- a/src/duckstation-sdl/sdl_host_interface.h +++ b/src/duckstation-sdl/sdl_host_interface.h @@ -58,12 +58,6 @@ protected: private: bool HasSystem() const { return static_cast(m_system); } -#ifdef WIN32 - bool UseOpenGLRenderer() const { return m_settings.gpu_renderer == GPURenderer::HardwareOpenGL; } -#else - bool UseOpenGLRenderer() const { return true; } -#endif - static float GetDPIScaleFactor(SDL_Window* window); bool CreateSDLWindow(); diff --git a/src/duckstation-sdl/sdl_util.cpp b/src/duckstation-sdl/sdl_util.cpp new file mode 100644 index 000000000..82a1b33cf --- /dev/null +++ b/src/duckstation-sdl/sdl_util.cpp @@ -0,0 +1,68 @@ +#include "sdl_util.h" +#include "common/log.h" +#include +Log_SetChannel(SDLUtil); + +#ifdef __APPLE__ +#include +struct NSView; + +static NSView* GetContentViewFromWindow(NSWindow* window) +{ + // window.contentView + return reinterpret_cast(objc_msgSend)(reinterpret_cast(window), sel_getUid("contentView")); +} +#endif + +namespace SDLUtil { + +std::optional GetWindowInfoForSDLWindow(SDL_Window* window) +{ + SDL_SysWMinfo syswm = {}; + SDL_VERSION(&syswm.version); + if (!SDL_GetWindowWMInfo(window, &syswm)) + { + Log_ErrorPrintf("SDL_GetWindowWMInfo failed"); + return std::nullopt; + } + + int window_width, window_height; + SDL_GetWindowSize(window, &window_width, &window_height); + + WindowInfo wi; + wi.surface_width = static_cast(window_width); + wi.surface_height = static_cast(window_height); + wi.surface_format = WindowInfo::SurfaceFormat::RGB8; + + switch (syswm.subsystem) + { +#ifdef SDL_VIDEO_DRIVER_WINDOWS + case SDL_SYSWM_WINDOWS: + wi.type = WindowInfo::Type::Win32; + wi.window_handle = syswm.info.win.window; + break; +#endif + +#ifdef SDL_VIDEO_DRIVER_COCOA + case SDL_SYSWM_COCOA: + wi.type = WindowInfo::Type::MacOS; + wi.window_handle = GetContentViewFromWindow(syswm.info.cocoa.window); + break; +#endif + +#ifdef SDL_VIDEO_DRIVER_X11 + case SDL_SYSWM_X11: + wi.type = WindowInfo::Type::X11; + wi.window_handle = reinterpret_cast(static_cast(syswm.info.x11.window)); + wi.display_connection = syswm.info.x11.display; + break; +#endif + + default: + Log_ErrorPrintf("Unhandled syswm subsystem %u", static_cast(syswm.subsystem)); + return std::nullopt; + } + + return wi; +} +} // namespace SDLUtil \ No newline at end of file diff --git a/src/duckstation-sdl/sdl_util.h b/src/duckstation-sdl/sdl_util.h new file mode 100644 index 000000000..ce00e7818 --- /dev/null +++ b/src/duckstation-sdl/sdl_util.h @@ -0,0 +1,10 @@ +#pragma once +#include "common/types.h" +#include "common/window_info.h" +#include + +struct SDL_Window; + +namespace SDLUtil { +std::optional GetWindowInfoForSDLWindow(SDL_Window* window); +} \ No newline at end of file diff --git a/src/duckstation-sdl/sdl_vulkan_host_display.cpp b/src/duckstation-sdl/sdl_vulkan_host_display.cpp new file mode 100644 index 000000000..9262b211e --- /dev/null +++ b/src/duckstation-sdl/sdl_vulkan_host_display.cpp @@ -0,0 +1,134 @@ +#include "sdl_vulkan_host_display.h" +#include "common/assert.h" +#include "common/log.h" +#include "imgui.h" +#include "imgui_impl_sdl.h" +#include "imgui_impl_vulkan.h" +#include "sdl_util.h" +#include +#include +Log_SetChannel(VulkanHostDisplay); + +SDLVulkanHostDisplay::SDLVulkanHostDisplay(SDL_Window* window) : m_window(window) +{ + SDL_GetWindowSize(window, &m_window_width, &m_window_height); +} + +SDLVulkanHostDisplay::~SDLVulkanHostDisplay() +{ + ImGui_ImplSDL2_Shutdown(); + m_display.DestroyImGuiContext(); + m_display.DestroyResources(); + m_display.DestroyShaderCache(); + m_display.DestroySwapChain(); + m_display.DestroyContext(); + + if (m_window) + SDL_DestroyWindow(m_window); +} + +std::unique_ptr SDLVulkanHostDisplay::Create(SDL_Window* window, std::string_view shader_cache_directory, + bool debug_device) +{ + std::unique_ptr display = std::make_unique(window); + if (!display->Initialize(shader_cache_directory, debug_device)) + return nullptr; + + return display; +} + +HostDisplay::RenderAPI SDLVulkanHostDisplay::GetRenderAPI() const +{ + return m_display.GetRenderAPI(); +} + +void* SDLVulkanHostDisplay::GetRenderDevice() const +{ + return m_display.GetRenderDevice(); +} + +void* SDLVulkanHostDisplay::GetRenderContext() const +{ + return m_display.GetRenderContext(); +} + +void SDLVulkanHostDisplay::WindowResized(s32 new_window_width, s32 new_window_height) +{ + m_display.ResizeSwapChain(static_cast(new_window_width), static_cast(new_window_height)); + m_window_width = static_cast(m_display.GetSwapChainWidth()); + m_window_height = static_cast(m_display.GetSwapChainHeight()); +} + +std::unique_ptr SDLVulkanHostDisplay::CreateTexture(u32 width, u32 height, const void* data, + u32 data_stride, bool dynamic /*= false*/) +{ + return m_display.CreateTexture(width, height, data, data_stride, dynamic); +} + +void SDLVulkanHostDisplay::UpdateTexture(HostDisplayTexture* texture, u32 x, u32 y, u32 width, u32 height, + const void* data, u32 data_stride) +{ + m_display.UpdateTexture(texture, x, y, width, height, data, data_stride); +} + +bool SDLVulkanHostDisplay::DownloadTexture(const void* texture_handle, u32 x, u32 y, u32 width, u32 height, + void* out_data, u32 out_data_stride) +{ + return m_display.DownloadTexture(texture_handle, x, y, width, height, out_data, out_data_stride); +} + +void SDLVulkanHostDisplay::SetVSync(bool enabled) +{ + m_display.SetVSync(enabled); +} + +bool SDLVulkanHostDisplay::Initialize(std::string_view shader_cache_directory, bool debug_device) +{ + std::optional wi = SDLUtil::GetWindowInfoForSDLWindow(m_window); + if (!wi.has_value()) + { + Log_ErrorPrintf("Failed to get window info for SDL window"); + return false; + } + + if (!m_display.CreateContextAndSwapChain(wi.value(), debug_device)) + return false; + + m_display.CreateShaderCache(shader_cache_directory, debug_device); + + if (!m_display.CreateResources()) + return false; + + if (!m_display.CreateImGuiContext() || !ImGui_ImplSDL2_InitForVulkan(m_window)) + return false; + + ImGui_ImplSDL2_NewFrame(m_window); + ImGui::NewFrame(); + return true; +} + +void SDLVulkanHostDisplay::Render() +{ + if (!m_display.BeginRender()) + return; + + if (HasDisplayTexture()) + { + const auto [left, top, width, height] = CalculateDrawRect(m_window_width, m_window_height, m_display_top_margin); + m_display.RenderDisplay(left, top, width, height, m_display_texture_handle, m_display_texture_width, + m_display_texture_height, m_display_texture_view_x, m_display_texture_view_y, + m_display_texture_view_width, m_display_texture_view_height, m_display_linear_filtering); + } + + m_display.RenderImGui(); + + if (HasSoftwareCursor()) + { + const auto [left, top, width, height] = CalculateSoftwareCursorDrawRect(); + m_display.RenderSoftwareCursor(left, top, width, height, m_cursor_texture.get()); + } + + m_display.EndRenderAndPresent(); + + ImGui_ImplSDL2_NewFrame(m_window); +} diff --git a/src/duckstation-sdl/sdl_vulkan_host_display.h b/src/duckstation-sdl/sdl_vulkan_host_display.h new file mode 100644 index 000000000..76032e945 --- /dev/null +++ b/src/duckstation-sdl/sdl_vulkan_host_display.h @@ -0,0 +1,40 @@ +#pragma once +#include "core/host_display.h" +#include "frontend-common/vulkan_host_display.h" +#include +#include + +class SDLVulkanHostDisplay final : public HostDisplay +{ +public: + SDLVulkanHostDisplay(SDL_Window* window); + ~SDLVulkanHostDisplay(); + + static std::unique_ptr Create(SDL_Window* window, std::string_view shader_cache_directory, + bool debug_device); + + RenderAPI GetRenderAPI() const override; + void* GetRenderDevice() const override; + void* GetRenderContext() const override; + + void WindowResized(s32 new_window_width, s32 new_window_height) override; + + std::unique_ptr CreateTexture(u32 width, u32 height, const void* data, u32 data_stride, + bool dynamic = false) override; + + void UpdateTexture(HostDisplayTexture* texture, u32 x, u32 y, u32 width, u32 height, const void* data, + u32 data_stride) override; + + bool DownloadTexture(const void* texture_handle, u32 x, u32 y, u32 width, u32 height, void* out_data, + u32 out_data_stride) override; + + void SetVSync(bool enabled) override; + + void Render() override; + +private: + bool Initialize(std::string_view shader_cache_directory, bool debug_device); + + SDL_Window* m_window = nullptr; + FrontendCommon::VulkanHostDisplay m_display; +}; diff --git a/src/frontend-common/CMakeLists.txt b/src/frontend-common/CMakeLists.txt index ecfa220bd..c70ce8ae3 100644 --- a/src/frontend-common/CMakeLists.txt +++ b/src/frontend-common/CMakeLists.txt @@ -11,9 +11,11 @@ add_library(frontend-common ini_settings_interface.h save_state_selector_ui.cpp save_state_selector_ui.h + vulkan_host_display.cpp + vulkan_host_display.h ) -target_link_libraries(frontend-common PUBLIC core common imgui simpleini scmversion) +target_link_libraries(frontend-common PUBLIC core common imgui simpleini scmversion vulkan-loader) if(SDL2_FOUND) target_sources(frontend-common PRIVATE @@ -43,4 +45,4 @@ endif() # Copy the provided data directory to the output directory. add_custom_command(TARGET frontend-common POST_BUILD COMMAND ${CMAKE_COMMAND} -E copy_directory "${CMAKE_SOURCE_DIR}/data" "${CMAKE_RUNTIME_OUTPUT_DIRECTORY}" -) \ No newline at end of file +) diff --git a/src/frontend-common/common_host_interface.cpp b/src/frontend-common/common_host_interface.cpp index 393b7fde6..e627cf209 100644 --- a/src/frontend-common/common_host_interface.cpp +++ b/src/frontend-common/common_host_interface.cpp @@ -95,6 +95,9 @@ void CommonHostInterface::Shutdown() { HostInterface::Shutdown(); + // this has gpu objects so it has to come first + m_save_state_selector_ui.reset(); + #ifdef WITH_DISCORD_PRESENCE ShutdownDiscordPresence(); #endif diff --git a/src/frontend-common/frontend-common.vcxproj b/src/frontend-common/frontend-common.vcxproj index 31b051895..983918f48 100644 --- a/src/frontend-common/frontend-common.vcxproj +++ b/src/frontend-common/frontend-common.vcxproj @@ -50,6 +50,9 @@ {3773f4cc-614e-4028-8595-22e08ca649e3} + + {9c8ddeb0-2b8f-4f5f-ba86-127cdf27f035} + {ee054e08-3799-4a59-a422-18259c105ffd} @@ -67,6 +70,7 @@ + @@ -78,6 +82,7 @@ + @@ -227,7 +232,7 @@ WITH_SDL2=1;WITH_DISCORD_PRESENCE=1;WIN32;_DEBUG;_LIB;%(PreprocessorDefinitions) true ProgramDatabase - $(SolutionDir)dep\msvc\include\SDL;$(SolutionDir)dep\imgui\include;$(SolutionDir)dep\simpleini\include;$(SolutionDir)dep\discord-rpc\include;$(SolutionDir)src;%(AdditionalIncludeDirectories) + $(SolutionDir)dep\msvc\include\SDL;$(SolutionDir)dep\imgui\include;$(SolutionDir)dep\simpleini\include;$(SolutionDir)dep\discord-rpc\include;$(SolutionDir)dep\vulkan-loader\include;$(SolutionDir)src;%(AdditionalIncludeDirectories) true stdcpp17 true @@ -254,7 +259,7 @@ WITH_SDL2=1;WITH_DISCORD_PRESENCE=1;_ITERATOR_DEBUG_LEVEL=1;WIN32;_DEBUGFAST;_DEBUG;_LIB;%(PreprocessorDefinitions) true ProgramDatabase - $(SolutionDir)dep\msvc\include\SDL;$(SolutionDir)dep\imgui\include;$(SolutionDir)dep\simpleini\include;$(SolutionDir)dep\discord-rpc\include;$(SolutionDir)src;%(AdditionalIncludeDirectories) + $(SolutionDir)dep\msvc\include\SDL;$(SolutionDir)dep\imgui\include;$(SolutionDir)dep\simpleini\include;$(SolutionDir)dep\discord-rpc\include;$(SolutionDir)dep\vulkan-loader\include;$(SolutionDir)src;%(AdditionalIncludeDirectories) Default false true @@ -284,7 +289,7 @@ WITH_SDL2=1;WITH_DISCORD_PRESENCE=1;WIN32;_DEBUG;_LIB;%(PreprocessorDefinitions) true ProgramDatabase - $(SolutionDir)dep\msvc\include\SDL;$(SolutionDir)dep\imgui\include;$(SolutionDir)dep\simpleini\include;$(SolutionDir)dep\discord-rpc\include;$(SolutionDir)src;%(AdditionalIncludeDirectories) + $(SolutionDir)dep\msvc\include\SDL;$(SolutionDir)dep\imgui\include;$(SolutionDir)dep\simpleini\include;$(SolutionDir)dep\discord-rpc\include;$(SolutionDir)dep\vulkan-loader\include;$(SolutionDir)src;%(AdditionalIncludeDirectories) true stdcpp17 true @@ -311,7 +316,7 @@ WITH_SDL2=1;WITH_DISCORD_PRESENCE=1;_ITERATOR_DEBUG_LEVEL=1;WIN32;_DEBUGFAST;_DEBUG;_LIB;%(PreprocessorDefinitions) true ProgramDatabase - $(SolutionDir)dep\msvc\include\SDL;$(SolutionDir)dep\imgui\include;$(SolutionDir)dep\simpleini\include;$(SolutionDir)dep\discord-rpc\include;$(SolutionDir)src;%(AdditionalIncludeDirectories) + $(SolutionDir)dep\msvc\include\SDL;$(SolutionDir)dep\imgui\include;$(SolutionDir)dep\simpleini\include;$(SolutionDir)dep\discord-rpc\include;$(SolutionDir)dep\vulkan-loader\include;$(SolutionDir)src;%(AdditionalIncludeDirectories) Default false true @@ -342,7 +347,7 @@ true WITH_SDL2=1;WITH_DISCORD_PRESENCE=1;WIN32;NDEBUG;_LIB;%(PreprocessorDefinitions) true - $(SolutionDir)dep\msvc\include\SDL;$(SolutionDir)dep\imgui\include;$(SolutionDir)dep\simpleini\include;$(SolutionDir)dep\discord-rpc\include;$(SolutionDir)src;%(AdditionalIncludeDirectories) + $(SolutionDir)dep\msvc\include\SDL;$(SolutionDir)dep\imgui\include;$(SolutionDir)dep\simpleini\include;$(SolutionDir)dep\discord-rpc\include;$(SolutionDir)dep\vulkan-loader\include;$(SolutionDir)src;%(AdditionalIncludeDirectories) true stdcpp17 false @@ -371,7 +376,7 @@ true WITH_SDL2=1;WITH_DISCORD_PRESENCE=1;WIN32;NDEBUG;_LIB;%(PreprocessorDefinitions) true - $(SolutionDir)dep\msvc\include\SDL;$(SolutionDir)dep\imgui\include;$(SolutionDir)dep\simpleini\include;$(SolutionDir)dep\discord-rpc\include;$(SolutionDir)src;%(AdditionalIncludeDirectories) + $(SolutionDir)dep\msvc\include\SDL;$(SolutionDir)dep\imgui\include;$(SolutionDir)dep\simpleini\include;$(SolutionDir)dep\discord-rpc\include;$(SolutionDir)dep\vulkan-loader\include;$(SolutionDir)src;%(AdditionalIncludeDirectories) true true stdcpp17 @@ -402,7 +407,7 @@ true WITH_SDL2=1;WITH_DISCORD_PRESENCE=1;WIN32;NDEBUG;_LIB;%(PreprocessorDefinitions) true - $(SolutionDir)dep\msvc\include\SDL;$(SolutionDir)dep\imgui\include;$(SolutionDir)dep\simpleini\include;$(SolutionDir)dep\discord-rpc\include;$(SolutionDir)src;%(AdditionalIncludeDirectories) + $(SolutionDir)dep\msvc\include\SDL;$(SolutionDir)dep\imgui\include;$(SolutionDir)dep\simpleini\include;$(SolutionDir)dep\discord-rpc\include;$(SolutionDir)dep\vulkan-loader\include;$(SolutionDir)src;%(AdditionalIncludeDirectories) true stdcpp17 false @@ -431,7 +436,7 @@ true WITH_SDL2=1;WITH_DISCORD_PRESENCE=1;WIN32;NDEBUG;_LIB;%(PreprocessorDefinitions) true - $(SolutionDir)dep\msvc\include\SDL;$(SolutionDir)dep\imgui\include;$(SolutionDir)dep\simpleini\include;$(SolutionDir)dep\discord-rpc\include;$(SolutionDir)src;%(AdditionalIncludeDirectories) + $(SolutionDir)dep\msvc\include\SDL;$(SolutionDir)dep\imgui\include;$(SolutionDir)dep\simpleini\include;$(SolutionDir)dep\discord-rpc\include;$(SolutionDir)dep\vulkan-loader\include;$(SolutionDir)src;%(AdditionalIncludeDirectories) true true stdcpp17 diff --git a/src/frontend-common/frontend-common.vcxproj.filters b/src/frontend-common/frontend-common.vcxproj.filters index bbe227b9f..93791c9ee 100644 --- a/src/frontend-common/frontend-common.vcxproj.filters +++ b/src/frontend-common/frontend-common.vcxproj.filters @@ -10,6 +10,7 @@ + @@ -21,6 +22,7 @@ + diff --git a/src/frontend-common/vulkan_host_display.cpp b/src/frontend-common/vulkan_host_display.cpp new file mode 100644 index 000000000..36ad9406c --- /dev/null +++ b/src/frontend-common/vulkan_host_display.cpp @@ -0,0 +1,492 @@ +#include "vulkan_host_display.h" +#include "common/assert.h" +#include "common/log.h" +#include "common/vulkan/builders.h" +#include "common/vulkan/context.h" +#include "common/vulkan/shader_cache.h" +#include "common/vulkan/staging_texture.h" +#include "common/vulkan/stream_buffer.h" +#include "common/vulkan/swap_chain.h" +#include "common/vulkan/util.h" +#include "imgui.h" +#include "imgui_impl_vulkan.h" +#include +Log_SetChannel(VulkanHostDisplay); + +namespace FrontendCommon { + +class VulkanHostDisplayTexture : public HostDisplayTexture +{ +public: + VulkanHostDisplayTexture(Vulkan::Texture texture, Vulkan::StagingTexture staging_texture) + : m_texture(std::move(texture)), m_staging_texture(std::move(staging_texture)) + { + } + ~VulkanHostDisplayTexture() override = default; + + void* GetHandle() const override { return const_cast(&m_texture); } + u32 GetWidth() const override { return m_texture.GetWidth(); } + u32 GetHeight() const override { return m_texture.GetHeight(); } + + const Vulkan::Texture& GetTexture() const { return m_texture; } + Vulkan::Texture& GetTexture() { return m_texture; } + Vulkan::StagingTexture& GetStagingTexture() { return m_staging_texture; } + + static std::unique_ptr Create(u32 width, u32 height, const void* data, u32 data_stride, + bool dynamic) + { + static constexpr VkFormat format = VK_FORMAT_R8G8B8A8_UNORM; + static constexpr VkImageUsageFlags usage = + VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_SAMPLED_BIT; + + Vulkan::Texture texture; + if (!texture.Create(width, height, 1, 1, format, VK_SAMPLE_COUNT_1_BIT, VK_IMAGE_VIEW_TYPE_2D, + VK_IMAGE_TILING_OPTIMAL, usage)) + { + return {}; + } + + Vulkan::StagingTexture staging_texture; + if (data || dynamic) + { + if (!staging_texture.Create(dynamic ? Vulkan::StagingBuffer::Type::Mutable : Vulkan::StagingBuffer::Type::Upload, + format, width, height)) + { + return {}; + } + } + + texture.TransitionToLayout(g_vulkan_context->GetCurrentCommandBuffer(), VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL); + + if (data) + { + staging_texture.WriteTexels(0, 0, width, height, data, data_stride); + staging_texture.CopyToTexture(g_vulkan_context->GetCurrentCommandBuffer(), 0, 0, texture, 0, 0, 0, 0, width, + height); + } + else + { + // clear it instead so we don't read uninitialized data (and keep the validation layer happy!) + static constexpr VkClearColorValue ccv = {}; + static constexpr VkImageSubresourceRange isr = {VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, 0u, 1u}; + vkCmdClearColorImage(g_vulkan_context->GetCurrentCommandBuffer(), texture.GetImage(), texture.GetLayout(), &ccv, + 1u, &isr); + } + + texture.TransitionToLayout(g_vulkan_context->GetCurrentCommandBuffer(), VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL); + + // don't need to keep the staging texture around if we're not dynamic + if (!dynamic) + staging_texture.Destroy(true); + + return std::make_unique(std::move(texture), std::move(staging_texture)); + } + +private: + Vulkan::Texture m_texture; + Vulkan::StagingTexture m_staging_texture; +}; + +VulkanHostDisplay::VulkanHostDisplay() = default; + +VulkanHostDisplay::~VulkanHostDisplay() +{ + AssertMsg(!g_vulkan_context, "Context should have been destroyed by now"); + AssertMsg(!m_swap_chain, "Swap chain should have been destroyed by now"); +} + +bool VulkanHostDisplay::RecreateSwapChain(const WindowInfo& new_wi) +{ + Assert(!m_swap_chain); + + VkSurfaceKHR surface = Vulkan::SwapChain::CreateVulkanSurface(g_vulkan_context->GetVulkanInstance(), new_wi); + if (surface == VK_NULL_HANDLE) + { + Log_ErrorPrintf("Failed to create new surface for swap chain"); + return false; + } + + m_swap_chain = Vulkan::SwapChain::Create(new_wi, surface, false); + if (!m_swap_chain) + { + Log_ErrorPrintf("Failed to create swap chain"); + return false; + } + + return true; +} + +void VulkanHostDisplay::ResizeSwapChain(u32 new_width, u32 new_height) +{ + g_vulkan_context->WaitForGPUIdle(); + + if (!m_swap_chain->ResizeSwapChain(new_width, new_height)) + Panic("Failed to resize swap chain"); + + ImGui::GetIO().DisplaySize.x = static_cast(m_swap_chain->GetWidth()); + ImGui::GetIO().DisplaySize.y = static_cast(m_swap_chain->GetHeight()); +} + +void VulkanHostDisplay::DestroySwapChain() +{ + m_swap_chain.reset(); +} + +std::unique_ptr VulkanHostDisplay::CreateTexture(u32 width, u32 height, const void* data, + u32 data_stride, bool dynamic) +{ + return VulkanHostDisplayTexture::Create(width, height, data, data_stride, dynamic); +} + +void VulkanHostDisplay::UpdateTexture(HostDisplayTexture* texture, u32 x, u32 y, u32 width, u32 height, + const void* data, u32 data_stride) +{ + VulkanHostDisplayTexture* vk_texture = static_cast(texture); + + Vulkan::StagingTexture* staging_texture; + if (vk_texture->GetStagingTexture().IsValid()) + { + staging_texture = &vk_texture->GetStagingTexture(); + } + else + { + // TODO: This should use a stream buffer instead for speed. + if (m_upload_staging_texture.IsValid()) + m_upload_staging_texture.Flush(); + + if ((m_upload_staging_texture.GetWidth() < width || m_upload_staging_texture.GetHeight() < height) && + !m_upload_staging_texture.Create(Vulkan::StagingBuffer::Type::Upload, VK_FORMAT_R8G8B8A8_UNORM, width, height)) + { + Panic("Failed to create upload staging texture"); + } + + staging_texture = &m_upload_staging_texture; + } + + staging_texture->WriteTexels(0, 0, width, height, data, data_stride); + staging_texture->CopyToTexture(0, 0, vk_texture->GetTexture(), x, y, 0, 0, width, height); +} + +bool VulkanHostDisplay::DownloadTexture(const void* texture_handle, u32 x, u32 y, u32 width, u32 height, void* out_data, + u32 out_data_stride) +{ + Vulkan::Texture* texture = static_cast(const_cast(texture_handle)); + + if ((m_readback_staging_texture.GetWidth() < width || m_readback_staging_texture.GetHeight() < height) && + !m_readback_staging_texture.Create(Vulkan::StagingBuffer::Type::Readback, VK_FORMAT_R8G8B8A8_UNORM, width, + height)) + { + return false; + } + + m_readback_staging_texture.CopyFromTexture(*texture, x, y, 0, 0, 0, 0, width, height); + m_readback_staging_texture.ReadTexels(0, 0, width, height, out_data, out_data_stride); + return true; +} + +void VulkanHostDisplay::SetVSync(bool enabled) +{ + // This swap chain should not be used by the current buffer, thus safe to destroy. + g_vulkan_context->WaitForGPUIdle(); + m_swap_chain->SetVSync(enabled); +} + +bool VulkanHostDisplay::CreateContextAndSwapChain(const WindowInfo& wi, bool debug_device) +{ + if (!Vulkan::Context::Create(0u, &wi, &m_swap_chain, debug_device, false)) + { + Log_ErrorPrintf("Failed to create Vulkan context"); + return false; + } + + return true; +} + +void VulkanHostDisplay::CreateShaderCache(std::string_view shader_cache_directory, bool debug_shaders) +{ + Vulkan::ShaderCache::Create(shader_cache_directory, debug_shaders); +} + +bool VulkanHostDisplay::HasContext() const +{ + return static_cast(g_vulkan_context); +} + +bool VulkanHostDisplay::CreateResources() +{ + static constexpr char fullscreen_quad_vertex_shader[] = R"( +#version 450 core + +layout(push_constant) uniform PushConstants { + uniform vec4 u_src_rect; +}; + +layout(location = 0) out vec2 v_tex0; + +void main() +{ + vec2 pos = vec2(float((gl_VertexIndex << 1) & 2), float(gl_VertexIndex & 2)); + v_tex0 = u_src_rect.xy + pos * u_src_rect.zw; + gl_Position = vec4(pos * vec2(2.0f, -2.0f) + vec2(-1.0f, 1.0f), 0.0f, 1.0f); + gl_Position.y = -gl_Position.y; +} +)"; + + static constexpr char display_fragment_shader[] = R"( +#version 450 core + +layout(set = 0, binding = 0) uniform sampler2D samp0; + +layout(location = 0) in vec2 v_tex0; +layout(location = 0) out vec4 o_col0; + +void main() +{ + o_col0 = texture(samp0, v_tex0); +} +)"; + + VkDevice device = g_vulkan_context->GetDevice(); + VkPipelineCache pipeline_cache = g_vulkan_shader_cache->GetPipelineCache(); + + Vulkan::DescriptorSetLayoutBuilder dslbuilder; + dslbuilder.AddBinding(0, VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, 1, VK_SHADER_STAGE_FRAGMENT_BIT); + m_descriptor_set_layout = dslbuilder.Create(device); + if (m_descriptor_set_layout == VK_NULL_HANDLE) + return false; + + Vulkan::PipelineLayoutBuilder plbuilder; + plbuilder.AddDescriptorSet(m_descriptor_set_layout); + plbuilder.AddPushConstants(VK_SHADER_STAGE_VERTEX_BIT, 0, sizeof(PushConstants)); + m_pipeline_layout = plbuilder.Create(device); + if (m_pipeline_layout == VK_NULL_HANDLE) + return false; + + VkShaderModule vertex_shader = g_vulkan_shader_cache->GetVertexShader(fullscreen_quad_vertex_shader); + if (vertex_shader == VK_NULL_HANDLE) + return false; + + VkShaderModule fragment_shader = g_vulkan_shader_cache->GetFragmentShader(display_fragment_shader); + if (fragment_shader == VK_NULL_HANDLE) + return false; + + Vulkan::GraphicsPipelineBuilder gpbuilder; + gpbuilder.SetVertexShader(vertex_shader); + gpbuilder.SetFragmentShader(fragment_shader); + gpbuilder.SetPrimitiveTopology(VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST); + gpbuilder.SetNoCullRasterizationState(); + gpbuilder.SetNoDepthTestState(); + gpbuilder.SetNoBlendingState(); + gpbuilder.SetDynamicViewportAndScissorState(); + gpbuilder.SetPipelineLayout(m_pipeline_layout); + gpbuilder.SetRenderPass(m_swap_chain->GetClearRenderPass(), 0); + + m_display_pipeline = gpbuilder.Create(device, pipeline_cache, false); + if (m_display_pipeline == VK_NULL_HANDLE) + return false; + + gpbuilder.SetBlendAttachment(0, true, VK_BLEND_FACTOR_SRC_ALPHA, VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA, VK_BLEND_OP_ADD, + VK_BLEND_FACTOR_ONE, VK_BLEND_FACTOR_ZERO, VK_BLEND_OP_ADD); + m_software_cursor_pipeline = gpbuilder.Create(device, pipeline_cache, false); + if (m_software_cursor_pipeline == VK_NULL_HANDLE) + return false; + + // don't need these anymore + vkDestroyShaderModule(device, vertex_shader, nullptr); + vkDestroyShaderModule(device, fragment_shader, nullptr); + + Vulkan::SamplerBuilder sbuilder; + sbuilder.SetPointSampler(VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER); + m_point_sampler = sbuilder.Create(device, true); + if (m_point_sampler == VK_NULL_HANDLE) + return false; + + sbuilder.SetLinearSampler(false, VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER); + m_linear_sampler = sbuilder.Create(device); + if (m_linear_sampler == VK_NULL_HANDLE) + return false; + + return true; +} + +void VulkanHostDisplay::DestroyResources() +{ + m_readback_staging_texture.Destroy(false); + m_upload_staging_texture.Destroy(false); + + Vulkan::Util::SafeDestroyPipeline(m_display_pipeline); + Vulkan::Util::SafeDestroyPipeline(m_software_cursor_pipeline); + Vulkan::Util::SafeDestroyPipelineLayout(m_pipeline_layout); + Vulkan::Util::SafeDestroyDescriptorSetLayout(m_descriptor_set_layout); + Vulkan::Util::SafeDestroySampler(m_point_sampler); + Vulkan::Util::SafeDestroySampler(m_linear_sampler); +} + +void VulkanHostDisplay::DestroyImGuiContext() +{ + ImGui_ImplVulkan_Shutdown(); +} + +void VulkanHostDisplay::DestroyContext() +{ + if (!g_vulkan_context) + return; + + g_vulkan_context->WaitForGPUIdle(); + Vulkan::Context::Destroy(); +} + +void VulkanHostDisplay::DestroyShaderCache() +{ + Vulkan::ShaderCache::Destroy(); +} + +bool VulkanHostDisplay::CreateImGuiContext() +{ + ImGui::GetIO().DisplaySize.x = static_cast(m_swap_chain->GetWidth()); + ImGui::GetIO().DisplaySize.y = static_cast(m_swap_chain->GetHeight()); + + ImGui_ImplVulkan_InitInfo vii = {}; + vii.Instance = g_vulkan_context->GetVulkanInstance(); + vii.PhysicalDevice = g_vulkan_context->GetPhysicalDevice(); + vii.Device = g_vulkan_context->GetDevice(); + vii.QueueFamily = g_vulkan_context->GetGraphicsQueueFamilyIndex(); + vii.Queue = g_vulkan_context->GetGraphicsQueue(); + vii.PipelineCache = g_vulkan_shader_cache->GetPipelineCache(); + vii.DescriptorPool = g_vulkan_context->GetGlobalDescriptorPool(); + vii.MinImageCount = m_swap_chain->GetImageCount(); + vii.ImageCount = m_swap_chain->GetImageCount(); + vii.MSAASamples = VK_SAMPLE_COUNT_1_BIT; + + if (!ImGui_ImplVulkan_Init(&vii, m_swap_chain->GetClearRenderPass()) || + !ImGui_ImplVulkan_CreateFontsTexture(g_vulkan_context->GetCurrentCommandBuffer())) + { + return false; + } + + ImGui_ImplVulkan_NewFrame(); + return true; +} + +bool VulkanHostDisplay::BeginRender() +{ + VkResult res = m_swap_chain->AcquireNextImage(); + if (res != VK_SUCCESS) + { + if (res == VK_SUBOPTIMAL_KHR || res == VK_ERROR_OUT_OF_DATE_KHR) + { + ResizeSwapChain(0, 0); + res = m_swap_chain->AcquireNextImage(); + } + + if (res != VK_SUCCESS) + { + Panic("Failed to acquire swap chain image"); + return false; + } + } + + VkCommandBuffer cmdbuffer = g_vulkan_context->GetCurrentCommandBuffer(); + Vulkan::Texture& swap_chain_texture = m_swap_chain->GetCurrentTexture(); + + // Swap chain images start in undefined + swap_chain_texture.OverrideImageLayout(VK_IMAGE_LAYOUT_UNDEFINED); + swap_chain_texture.TransitionToLayout(cmdbuffer, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL); + + const VkClearValue clear_value = {}; + + const VkRenderPassBeginInfo rp = {VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO, + nullptr, + m_swap_chain->GetClearRenderPass(), + m_swap_chain->GetCurrentFramebuffer(), + {{0, 0}, {m_swap_chain->GetWidth(), m_swap_chain->GetHeight()}}, + 1u, + &clear_value}; + vkCmdBeginRenderPass(cmdbuffer, &rp, VK_SUBPASS_CONTENTS_INLINE); + return true; +} + +void VulkanHostDisplay::EndRenderAndPresent() +{ + VkCommandBuffer cmdbuffer = g_vulkan_context->GetCurrentCommandBuffer(); + Vulkan::Texture& swap_chain_texture = m_swap_chain->GetCurrentTexture(); + + vkCmdEndRenderPass(cmdbuffer); + + swap_chain_texture.TransitionToLayout(cmdbuffer, VK_IMAGE_LAYOUT_PRESENT_SRC_KHR); + + g_vulkan_context->SubmitCommandBuffer(m_swap_chain->GetImageAvailableSemaphore(), + m_swap_chain->GetRenderingFinishedSemaphore(), m_swap_chain->GetSwapChain(), + m_swap_chain->GetCurrentImageIndex()); + g_vulkan_context->MoveToNextCommandBuffer(); + + ImGui::NewFrame(); + ImGui_ImplVulkan_NewFrame(); +} + +void VulkanHostDisplay::RenderDisplay(s32 left, s32 top, s32 width, s32 height, void* texture_handle, u32 texture_width, + u32 texture_height, u32 texture_view_x, u32 texture_view_y, + u32 texture_view_width, u32 texture_view_height, bool linear_filter) +{ + VkCommandBuffer cmdbuffer = g_vulkan_context->GetCurrentCommandBuffer(); + + VkDescriptorSet ds = g_vulkan_context->AllocateDescriptorSet(m_descriptor_set_layout); + if (ds == VK_NULL_HANDLE) + { + Log_ErrorPrintf("Skipping rendering display because of no descriptor set"); + return; + } + + { + const Vulkan::Texture* vktex = static_cast(texture_handle); + Vulkan::DescriptorSetUpdateBuilder dsupdate; + dsupdate.AddCombinedImageSamplerDescriptorWrite( + ds, 0, vktex->GetView(), linear_filter ? m_linear_sampler : m_point_sampler, vktex->GetLayout()); + dsupdate.Update(g_vulkan_context->GetDevice()); + } + + const PushConstants pc{static_cast(texture_view_x) / static_cast(texture_width), + static_cast(texture_view_y) / static_cast(texture_height), + (static_cast(texture_view_width) - 0.5f) / static_cast(texture_width), + (static_cast(texture_view_height) - 0.5f) / static_cast(texture_height)}; + + vkCmdBindPipeline(cmdbuffer, VK_PIPELINE_BIND_POINT_GRAPHICS, m_display_pipeline); + vkCmdPushConstants(cmdbuffer, m_pipeline_layout, VK_SHADER_STAGE_VERTEX_BIT, 0, sizeof(pc), &pc); + vkCmdBindDescriptorSets(cmdbuffer, VK_PIPELINE_BIND_POINT_GRAPHICS, m_pipeline_layout, 0, 1, &ds, 0, nullptr); + Vulkan::Util::SetViewportAndScissor(cmdbuffer, left, top, width, height); + vkCmdDraw(cmdbuffer, 3, 1, 0, 0); +} + +void VulkanHostDisplay::RenderImGui() +{ + ImGui::Render(); + ImGui_ImplVulkan_RenderDrawData(ImGui::GetDrawData(), g_vulkan_context->GetCurrentCommandBuffer()); +} + +void VulkanHostDisplay::RenderSoftwareCursor(s32 left, s32 top, s32 width, s32 height, HostDisplayTexture* texture) +{ + VkCommandBuffer cmdbuffer = g_vulkan_context->GetCurrentCommandBuffer(); + + VkDescriptorSet ds = g_vulkan_context->AllocateDescriptorSet(m_descriptor_set_layout); + if (ds == VK_NULL_HANDLE) + { + Log_ErrorPrintf("Skipping rendering software cursor because of no descriptor set"); + return; + } + + { + Vulkan::DescriptorSetUpdateBuilder dsupdate; + dsupdate.AddImageDescriptorWrite(ds, 0, static_cast(texture)->GetTexture().GetView()); + dsupdate.AddSamplerDescriptorWrite(ds, 0, m_linear_sampler); + dsupdate.Update(g_vulkan_context->GetDevice()); + } + + const PushConstants pc{0.0f, 0.0f, 1.0f, 1.0f}; + vkCmdBindPipeline(cmdbuffer, VK_PIPELINE_BIND_POINT_GRAPHICS, m_display_pipeline); + vkCmdPushConstants(cmdbuffer, m_pipeline_layout, VK_SHADER_STAGE_VERTEX_BIT, 0, sizeof(pc), &pc); + vkCmdBindDescriptorSets(cmdbuffer, VK_PIPELINE_BIND_POINT_GRAPHICS, m_pipeline_layout, 0, 1, &ds, 0, nullptr); + Vulkan::Util::SetViewportAndScissor(cmdbuffer, left, top, width, height); + vkCmdDraw(cmdbuffer, 3, 1, 0, 0); +} + +} // namespace FrontendCommon \ No newline at end of file diff --git a/src/frontend-common/vulkan_host_display.h b/src/frontend-common/vulkan_host_display.h new file mode 100644 index 000000000..774c74f79 --- /dev/null +++ b/src/frontend-common/vulkan_host_display.h @@ -0,0 +1,87 @@ +#pragma once +#include "common/vulkan/staging_texture.h" +#include "common/vulkan/swap_chain.h" +#include "common/window_info.h" +#include "core/host_display.h" +#include "vulkan_loader.h" +#include +#include + +namespace Vulkan { +class StreamBuffer; +class SwapChain; +} // namespace Vulkan + +namespace FrontendCommon { + +class VulkanHostDisplay +{ +public: + VulkanHostDisplay(); + ~VulkanHostDisplay(); + + ALWAYS_INLINE HostDisplay::RenderAPI GetRenderAPI() const { return HostDisplay::RenderAPI::Vulkan; } + ALWAYS_INLINE void* GetRenderDevice() const { return nullptr; } + ALWAYS_INLINE void* GetRenderContext() const { return nullptr; } + + std::unique_ptr CreateTexture(u32 width, u32 height, const void* data, u32 data_stride, + bool dynamic); + void UpdateTexture(HostDisplayTexture* texture, u32 x, u32 y, u32 width, u32 height, const void* data, + u32 data_stride); + bool DownloadTexture(const void* texture_handle, u32 x, u32 y, u32 width, u32 height, void* out_data, + u32 out_data_stride); + + void SetVSync(bool enabled); + + bool BeginRender(); + void RenderDisplay(s32 left, s32 top, s32 width, s32 height, void* texture_handle, u32 texture_width, + u32 texture_height, u32 texture_view_x, u32 texture_view_y, u32 texture_view_width, + u32 texture_view_height, bool linear_filter); + void RenderImGui(); + void RenderSoftwareCursor(s32 left, s32 top, s32 width, s32 height, HostDisplayTexture* texture_handle); + void EndRenderAndPresent(); + + bool CreateContextAndSwapChain(const WindowInfo& wi, bool debug_device); + bool HasContext() const; + void DestroyContext(); + + void CreateShaderCache(std::string_view shader_cache_directory, bool debug_shaders); + void DestroyShaderCache(); + + bool CreateResources(); + void DestroyResources(); + + bool CreateImGuiContext(); + void DestroyImGuiContext(); + + ALWAYS_INLINE u32 GetSwapChainWidth() const { return m_swap_chain->GetWidth(); } + ALWAYS_INLINE u32 GetSwapChainHeight() const { return m_swap_chain->GetHeight(); } + ALWAYS_INLINE bool HasSwapChain() const { return static_cast(m_swap_chain); } + + bool RecreateSwapChain(const WindowInfo& new_wi); + void ResizeSwapChain(u32 new_width, u32 new_height); + void DestroySwapChain(); + +private: + struct PushConstants + { + float src_rect_left; + float src_rect_top; + float src_rect_width; + float src_rect_height; + }; + + std::unique_ptr m_swap_chain; + + VkDescriptorSetLayout m_descriptor_set_layout = VK_NULL_HANDLE; + VkPipelineLayout m_pipeline_layout = VK_NULL_HANDLE; + VkPipeline m_software_cursor_pipeline = VK_NULL_HANDLE; + VkPipeline m_display_pipeline = VK_NULL_HANDLE; + VkSampler m_point_sampler = VK_NULL_HANDLE; + VkSampler m_linear_sampler = VK_NULL_HANDLE; + + Vulkan::StagingTexture m_upload_staging_texture; + Vulkan::StagingTexture m_readback_staging_texture; +}; + +} // namespace FrontendCommon \ No newline at end of file