Common: Add a range of Vulkan helper/wrapper classes

This commit is contained in:
Connor McLaughlin 2020-06-19 00:18:07 +10:00
parent 2c0a28398c
commit 1399e95b77
23 changed files with 6044 additions and 9 deletions

View file

@ -65,13 +65,33 @@ add_library(common
timestamp.cpp timestamp.cpp
timestamp.h timestamp.h
types.h types.h
vulkan/builders.cpp
vulkan/builders.h
vulkan/context.cpp
vulkan/context.h
vulkan/shader_cache.cpp
vulkan/shader_cache.h
vulkan/shader_compiler.cpp
vulkan/shader_compiler.h
vulkan/staging_buffer.cpp
vulkan/staging_buffer.h
vulkan/staging_texture.cpp
vulkan/staging_texture.h
vulkan/stream_buffer.cpp
vulkan/stream_buffer.h
vulkan/swap_chain.cpp
vulkan/swap_chain.h
vulkan/texture.cpp
vulkan/texture.h
vulkan/util.cpp
vulkan/util.h
wav_writer.cpp wav_writer.cpp
wav_writer.h wav_writer.h
) )
target_include_directories(common PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/..") target_include_directories(common PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/..")
target_include_directories(common PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/..") target_include_directories(common PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/..")
target_link_libraries(common PRIVATE glad libcue Threads::Threads cubeb libchdr) target_link_libraries(common PRIVATE glad libcue Threads::Threads cubeb libchdr glslang vulkan-loader)
if(WIN32) if(WIN32)
target_sources(common PRIVATE target_sources(common PRIVATE

View file

@ -78,6 +78,16 @@
<ClInclude Include="timestamp.h" /> <ClInclude Include="timestamp.h" />
<ClInclude Include="types.h" /> <ClInclude Include="types.h" />
<ClInclude Include="cd_xa.h" /> <ClInclude Include="cd_xa.h" />
<ClInclude Include="vulkan\builders.h" />
<ClInclude Include="vulkan\context.h" />
<ClInclude Include="vulkan\shader_cache.h" />
<ClInclude Include="vulkan\shader_compiler.h" />
<ClInclude Include="vulkan\staging_buffer.h" />
<ClInclude Include="vulkan\staging_texture.h" />
<ClInclude Include="vulkan\stream_buffer.h" />
<ClInclude Include="vulkan\swap_chain.h" />
<ClInclude Include="vulkan\texture.h" />
<ClInclude Include="vulkan\util.h" />
<ClInclude Include="wav_writer.h" /> <ClInclude Include="wav_writer.h" />
<ClInclude Include="window_info.h" /> <ClInclude Include="window_info.h" />
</ItemGroup> </ItemGroup>
@ -117,6 +127,16 @@
<ClCompile Include="string_util.cpp" /> <ClCompile Include="string_util.cpp" />
<ClCompile Include="timer.cpp" /> <ClCompile Include="timer.cpp" />
<ClCompile Include="timestamp.cpp" /> <ClCompile Include="timestamp.cpp" />
<ClCompile Include="vulkan\builders.cpp" />
<ClCompile Include="vulkan\context.cpp" />
<ClCompile Include="vulkan\shader_cache.cpp" />
<ClCompile Include="vulkan\shader_compiler.cpp" />
<ClCompile Include="vulkan\staging_buffer.cpp" />
<ClCompile Include="vulkan\staging_texture.cpp" />
<ClCompile Include="vulkan\stream_buffer.cpp" />
<ClCompile Include="vulkan\swap_chain.cpp" />
<ClCompile Include="vulkan\texture.cpp" />
<ClCompile Include="vulkan\util.cpp" />
<ClCompile Include="wav_writer.cpp" /> <ClCompile Include="wav_writer.cpp" />
</ItemGroup> </ItemGroup>
<ItemGroup> <ItemGroup>
@ -129,6 +149,9 @@
<ProjectReference Include="..\..\dep\glad\glad.vcxproj"> <ProjectReference Include="..\..\dep\glad\glad.vcxproj">
<Project>{43540154-9e1e-409c-834f-b84be5621388}</Project> <Project>{43540154-9e1e-409c-834f-b84be5621388}</Project>
</ProjectReference> </ProjectReference>
<ProjectReference Include="..\..\dep\glslang\glslang.vcxproj">
<Project>{7f909e29-4808-4bd9-a60c-56c51a3aaec2}</Project>
</ProjectReference>
<ProjectReference Include="..\..\dep\libchdr\libchdr.vcxproj"> <ProjectReference Include="..\..\dep\libchdr\libchdr.vcxproj">
<Project>{425d6c99-d1c8-43c2-b8ac-4d7b1d941017}</Project> <Project>{425d6c99-d1c8-43c2-b8ac-4d7b1d941017}</Project>
</ProjectReference> </ProjectReference>
@ -277,7 +300,7 @@
<PreprocessorDefinitions>WIN32;_DEBUG;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions> <PreprocessorDefinitions>WIN32;_DEBUG;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<SDLCheck>true</SDLCheck> <SDLCheck>true</SDLCheck>
<DebugInformationFormat>ProgramDatabase</DebugInformationFormat> <DebugInformationFormat>ProgramDatabase</DebugInformationFormat>
<AdditionalIncludeDirectories>$(SolutionDir)dep\glad\include;$(SolutionDir)dep\cubeb\include;$(SolutionDir)dep\libcue\include;$(SolutionDir)dep\libchdr\include;$(SolutionDir)src;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories> <AdditionalIncludeDirectories>$(SolutionDir)dep\glad\include;$(SolutionDir)dep\cubeb\include;$(SolutionDir)dep\libcue\include;$(SolutionDir)dep\libchdr\include;$(SolutionDir)dep\vulkan-loader\include;$(SolutionDir)dep\glslang;$(SolutionDir)src;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
<MultiProcessorCompilation>true</MultiProcessorCompilation> <MultiProcessorCompilation>true</MultiProcessorCompilation>
<LanguageStandard>stdcpp17</LanguageStandard> <LanguageStandard>stdcpp17</LanguageStandard>
<ConformanceMode>true</ConformanceMode> <ConformanceMode>true</ConformanceMode>
@ -303,7 +326,7 @@
<PreprocessorDefinitions>_ITERATOR_DEBUG_LEVEL=1;WIN32;_DEBUGFAST;_DEBUG;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions> <PreprocessorDefinitions>_ITERATOR_DEBUG_LEVEL=1;WIN32;_DEBUGFAST;_DEBUG;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<SDLCheck>true</SDLCheck> <SDLCheck>true</SDLCheck>
<DebugInformationFormat>ProgramDatabase</DebugInformationFormat> <DebugInformationFormat>ProgramDatabase</DebugInformationFormat>
<AdditionalIncludeDirectories>$(SolutionDir)dep\glad\include;$(SolutionDir)dep\cubeb\include;$(SolutionDir)dep\libcue\include;$(SolutionDir)dep\libchdr\include;$(SolutionDir)src;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories> <AdditionalIncludeDirectories>$(SolutionDir)dep\glad\include;$(SolutionDir)dep\cubeb\include;$(SolutionDir)dep\libcue\include;$(SolutionDir)dep\libchdr\include;$(SolutionDir)dep\vulkan-loader\include;$(SolutionDir)dep\glslang;$(SolutionDir)src;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
<BasicRuntimeChecks>Default</BasicRuntimeChecks> <BasicRuntimeChecks>Default</BasicRuntimeChecks>
<SupportJustMyCode>false</SupportJustMyCode> <SupportJustMyCode>false</SupportJustMyCode>
<MultiProcessorCompilation>true</MultiProcessorCompilation> <MultiProcessorCompilation>true</MultiProcessorCompilation>
@ -332,7 +355,7 @@
<PreprocessorDefinitions>WIN32;_DEBUG;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions> <PreprocessorDefinitions>WIN32;_DEBUG;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<SDLCheck>true</SDLCheck> <SDLCheck>true</SDLCheck>
<DebugInformationFormat>ProgramDatabase</DebugInformationFormat> <DebugInformationFormat>ProgramDatabase</DebugInformationFormat>
<AdditionalIncludeDirectories>$(SolutionDir)dep\glad\include;$(SolutionDir)dep\cubeb\include;$(SolutionDir)dep\libcue\include;$(SolutionDir)dep\libchdr\include;$(SolutionDir)src;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories> <AdditionalIncludeDirectories>$(SolutionDir)dep\glad\include;$(SolutionDir)dep\cubeb\include;$(SolutionDir)dep\libcue\include;$(SolutionDir)dep\libchdr\include;$(SolutionDir)dep\vulkan-loader\include;$(SolutionDir)dep\glslang;$(SolutionDir)src;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
<MultiProcessorCompilation>true</MultiProcessorCompilation> <MultiProcessorCompilation>true</MultiProcessorCompilation>
<LanguageStandard>stdcpp17</LanguageStandard> <LanguageStandard>stdcpp17</LanguageStandard>
<ConformanceMode>true</ConformanceMode> <ConformanceMode>true</ConformanceMode>
@ -358,7 +381,7 @@
<PreprocessorDefinitions>_ITERATOR_DEBUG_LEVEL=1;WIN32;_DEBUGFAST;_DEBUG;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions> <PreprocessorDefinitions>_ITERATOR_DEBUG_LEVEL=1;WIN32;_DEBUGFAST;_DEBUG;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<SDLCheck>true</SDLCheck> <SDLCheck>true</SDLCheck>
<DebugInformationFormat>ProgramDatabase</DebugInformationFormat> <DebugInformationFormat>ProgramDatabase</DebugInformationFormat>
<AdditionalIncludeDirectories>$(SolutionDir)dep\glad\include;$(SolutionDir)dep\cubeb\include;$(SolutionDir)dep\libcue\include;$(SolutionDir)dep\libchdr\include;$(SolutionDir)src;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories> <AdditionalIncludeDirectories>$(SolutionDir)dep\glad\include;$(SolutionDir)dep\cubeb\include;$(SolutionDir)dep\libcue\include;$(SolutionDir)dep\libchdr\include;$(SolutionDir)dep\vulkan-loader\include;$(SolutionDir)dep\glslang;$(SolutionDir)src;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
<BasicRuntimeChecks>Default</BasicRuntimeChecks> <BasicRuntimeChecks>Default</BasicRuntimeChecks>
<SupportJustMyCode>false</SupportJustMyCode> <SupportJustMyCode>false</SupportJustMyCode>
<MultiProcessorCompilation>true</MultiProcessorCompilation> <MultiProcessorCompilation>true</MultiProcessorCompilation>
@ -388,7 +411,7 @@
<IntrinsicFunctions>true</IntrinsicFunctions> <IntrinsicFunctions>true</IntrinsicFunctions>
<PreprocessorDefinitions>WIN32;NDEBUG;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions> <PreprocessorDefinitions>WIN32;NDEBUG;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<SDLCheck>true</SDLCheck> <SDLCheck>true</SDLCheck>
<AdditionalIncludeDirectories>$(SolutionDir)dep\glad\include;$(SolutionDir)dep\cubeb\include;$(SolutionDir)dep\libcue\include;$(SolutionDir)dep\libchdr\include;$(SolutionDir)src;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories> <AdditionalIncludeDirectories>$(SolutionDir)dep\glad\include;$(SolutionDir)dep\cubeb\include;$(SolutionDir)dep\libcue\include;$(SolutionDir)dep\libchdr\include;$(SolutionDir)dep\vulkan-loader\include;$(SolutionDir)dep\glslang;$(SolutionDir)src;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
<MultiProcessorCompilation>true</MultiProcessorCompilation> <MultiProcessorCompilation>true</MultiProcessorCompilation>
<LanguageStandard>stdcpp17</LanguageStandard> <LanguageStandard>stdcpp17</LanguageStandard>
<WholeProgramOptimization>false</WholeProgramOptimization> <WholeProgramOptimization>false</WholeProgramOptimization>
@ -418,7 +441,7 @@
<IntrinsicFunctions>true</IntrinsicFunctions> <IntrinsicFunctions>true</IntrinsicFunctions>
<PreprocessorDefinitions>WIN32;NDEBUG;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions> <PreprocessorDefinitions>WIN32;NDEBUG;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<SDLCheck>true</SDLCheck> <SDLCheck>true</SDLCheck>
<AdditionalIncludeDirectories>$(SolutionDir)dep\glad\include;$(SolutionDir)dep\cubeb\include;$(SolutionDir)dep\libcue\include;$(SolutionDir)dep\libchdr\include;$(SolutionDir)src;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories> <AdditionalIncludeDirectories>$(SolutionDir)dep\glad\include;$(SolutionDir)dep\cubeb\include;$(SolutionDir)dep\libcue\include;$(SolutionDir)dep\libchdr\include;$(SolutionDir)dep\vulkan-loader\include;$(SolutionDir)dep\glslang;$(SolutionDir)src;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
<OmitFramePointers>true</OmitFramePointers> <OmitFramePointers>true</OmitFramePointers>
<MultiProcessorCompilation>true</MultiProcessorCompilation> <MultiProcessorCompilation>true</MultiProcessorCompilation>
<LanguageStandard>stdcpp17</LanguageStandard> <LanguageStandard>stdcpp17</LanguageStandard>
@ -448,7 +471,7 @@
<IntrinsicFunctions>true</IntrinsicFunctions> <IntrinsicFunctions>true</IntrinsicFunctions>
<PreprocessorDefinitions>WIN32;NDEBUG;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions> <PreprocessorDefinitions>WIN32;NDEBUG;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<SDLCheck>true</SDLCheck> <SDLCheck>true</SDLCheck>
<AdditionalIncludeDirectories>$(SolutionDir)dep\glad\include;$(SolutionDir)dep\cubeb\include;$(SolutionDir)dep\libcue\include;$(SolutionDir)dep\libchdr\include;$(SolutionDir)src;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories> <AdditionalIncludeDirectories>$(SolutionDir)dep\glad\include;$(SolutionDir)dep\cubeb\include;$(SolutionDir)dep\libcue\include;$(SolutionDir)dep\libchdr\include;$(SolutionDir)dep\vulkan-loader\include;$(SolutionDir)dep\glslang;$(SolutionDir)src;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
<MultiProcessorCompilation>true</MultiProcessorCompilation> <MultiProcessorCompilation>true</MultiProcessorCompilation>
<LanguageStandard>stdcpp17</LanguageStandard> <LanguageStandard>stdcpp17</LanguageStandard>
<WholeProgramOptimization>false</WholeProgramOptimization> <WholeProgramOptimization>false</WholeProgramOptimization>
@ -478,7 +501,7 @@
<IntrinsicFunctions>true</IntrinsicFunctions> <IntrinsicFunctions>true</IntrinsicFunctions>
<PreprocessorDefinitions>WIN32;NDEBUG;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions> <PreprocessorDefinitions>WIN32;NDEBUG;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<SDLCheck>true</SDLCheck> <SDLCheck>true</SDLCheck>
<AdditionalIncludeDirectories>$(SolutionDir)dep\glad\include;$(SolutionDir)dep\cubeb\include;$(SolutionDir)dep\libcue\include;$(SolutionDir)dep\libchdr\include;$(SolutionDir)src;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories> <AdditionalIncludeDirectories>$(SolutionDir)dep\glad\include;$(SolutionDir)dep\cubeb\include;$(SolutionDir)dep\libcue\include;$(SolutionDir)dep\libchdr\include;$(SolutionDir)dep\vulkan-loader\include;$(SolutionDir)dep\glslang;$(SolutionDir)src;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
<OmitFramePointers>true</OmitFramePointers> <OmitFramePointers>true</OmitFramePointers>
<MultiProcessorCompilation>true</MultiProcessorCompilation> <MultiProcessorCompilation>true</MultiProcessorCompilation>
<LanguageStandard>stdcpp17</LanguageStandard> <LanguageStandard>stdcpp17</LanguageStandard>

View file

@ -66,8 +66,38 @@
</ClInclude> </ClInclude>
<ClInclude Include="window_info.h" /> <ClInclude Include="window_info.h" />
<ClInclude Include="cd_image_hasher.h" /> <ClInclude Include="cd_image_hasher.h" />
<ClInclude Include="vulkan\texture.h">
<Filter>vulkan</Filter>
</ClInclude>
<ClInclude Include="vulkan\staging_buffer.h">
<Filter>vulkan</Filter>
</ClInclude>
<ClInclude Include="vulkan\stream_buffer.h">
<Filter>vulkan</Filter>
</ClInclude>
<ClInclude Include="vulkan\shader_compiler.h">
<Filter>vulkan</Filter>
</ClInclude>
<ClInclude Include="vulkan\util.h">
<Filter>vulkan</Filter>
</ClInclude>
<ClInclude Include="vulkan\swap_chain.h">
<Filter>vulkan</Filter>
</ClInclude>
<ClInclude Include="vulkan\staging_texture.h">
<Filter>vulkan</Filter>
</ClInclude>
<ClInclude Include="dimensional_array.h" /> <ClInclude Include="dimensional_array.h" />
<ClInclude Include="scope_guard.h" /> <ClInclude Include="scope_guard.h" />
<ClInclude Include="vulkan\context.h">
<Filter>vulkan</Filter>
</ClInclude>
<ClInclude Include="vulkan\builders.h">
<Filter>vulkan</Filter>
</ClInclude>
<ClInclude Include="vulkan\shader_cache.h">
<Filter>vulkan</Filter>
</ClInclude>
</ItemGroup> </ItemGroup>
<ItemGroup> <ItemGroup>
<ClCompile Include="jit_code_buffer.cpp" /> <ClCompile Include="jit_code_buffer.cpp" />
@ -128,6 +158,36 @@
<Filter>gl</Filter> <Filter>gl</Filter>
</ClCompile> </ClCompile>
<ClCompile Include="cd_image_hasher.cpp" /> <ClCompile Include="cd_image_hasher.cpp" />
<ClCompile Include="vulkan\texture.cpp">
<Filter>vulkan</Filter>
</ClCompile>
<ClCompile Include="vulkan\context.cpp">
<Filter>vulkan</Filter>
</ClCompile>
<ClCompile Include="vulkan\staging_buffer.cpp">
<Filter>vulkan</Filter>
</ClCompile>
<ClCompile Include="vulkan\stream_buffer.cpp">
<Filter>vulkan</Filter>
</ClCompile>
<ClCompile Include="vulkan\util.cpp">
<Filter>vulkan</Filter>
</ClCompile>
<ClCompile Include="vulkan\shader_compiler.cpp">
<Filter>vulkan</Filter>
</ClCompile>
<ClCompile Include="vulkan\swap_chain.cpp">
<Filter>vulkan</Filter>
</ClCompile>
<ClCompile Include="vulkan\staging_texture.cpp">
<Filter>vulkan</Filter>
</ClCompile>
<ClCompile Include="vulkan\builders.cpp">
<Filter>vulkan</Filter>
</ClCompile>
<ClCompile Include="vulkan\shader_cache.cpp">
<Filter>vulkan</Filter>
</ClCompile>
</ItemGroup> </ItemGroup>
<ItemGroup> <ItemGroup>
<Natvis Include="bitfield.natvis" /> <Natvis Include="bitfield.natvis" />
@ -139,5 +199,8 @@
<Filter Include="d3d11"> <Filter Include="d3d11">
<UniqueIdentifier>{30251086-81f3-44f5-add4-7ff9a24098ab}</UniqueIdentifier> <UniqueIdentifier>{30251086-81f3-44f5-add4-7ff9a24098ab}</UniqueIdentifier>
</Filter> </Filter>
<Filter Include="vulkan">
<UniqueIdentifier>{642ff5eb-af39-4aab-a42f-6eb8188a11d7}</UniqueIdentifier>
</Filter>
</ItemGroup> </ItemGroup>
</Project> </Project>

View file

@ -0,0 +1,740 @@
#include "builders.h"
#include "../assert.h"
#include "util.h"
namespace Vulkan {
DescriptorSetLayoutBuilder::DescriptorSetLayoutBuilder()
{
Clear();
}
void DescriptorSetLayoutBuilder::Clear()
{
m_ci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO;
m_ci.pNext = nullptr;
m_ci.flags = 0;
m_ci.pBindings = nullptr;
m_ci.bindingCount = 0;
}
VkDescriptorSetLayout DescriptorSetLayoutBuilder::Create(VkDevice device)
{
VkDescriptorSetLayout layout;
VkResult res = vkCreateDescriptorSetLayout(device, &m_ci, nullptr, &layout);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkCreateDescriptorSetLayout() failed: ");
return VK_NULL_HANDLE;
}
Clear();
return layout;
}
void DescriptorSetLayoutBuilder::AddBinding(u32 binding, VkDescriptorType dtype, u32 dcount, VkShaderStageFlags stages)
{
Assert(m_ci.bindingCount < MAX_BINDINGS);
VkDescriptorSetLayoutBinding& b = m_bindings[m_ci.bindingCount];
b.binding = binding;
b.descriptorType = dtype;
b.descriptorCount = dcount;
b.stageFlags = stages;
b.pImmutableSamplers = nullptr;
m_ci.pBindings = m_bindings.data();
m_ci.bindingCount++;
}
PipelineLayoutBuilder::PipelineLayoutBuilder()
{
Clear();
}
void PipelineLayoutBuilder::Clear()
{
m_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO;
m_ci.pNext = nullptr;
m_ci.flags = 0;
m_ci.pSetLayouts = nullptr;
m_ci.setLayoutCount = 0;
m_ci.pPushConstantRanges = nullptr;
m_ci.pushConstantRangeCount = 0;
}
VkPipelineLayout PipelineLayoutBuilder::Create(VkDevice device)
{
VkPipelineLayout layout;
VkResult res = vkCreatePipelineLayout(device, &m_ci, nullptr, &layout);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkCreatePipelineLayout() failed: ");
return VK_NULL_HANDLE;
}
Clear();
return layout;
}
void PipelineLayoutBuilder::AddDescriptorSet(VkDescriptorSetLayout layout)
{
Assert(m_ci.setLayoutCount < MAX_SETS);
m_sets[m_ci.setLayoutCount] = layout;
m_ci.setLayoutCount++;
m_ci.pSetLayouts = m_sets.data();
}
void PipelineLayoutBuilder::AddPushConstants(VkShaderStageFlags stages, u32 offset, u32 size)
{
Assert(m_ci.pushConstantRangeCount < MAX_PUSH_CONSTANTS);
VkPushConstantRange& r = m_push_constants[m_ci.pushConstantRangeCount];
r.stageFlags = stages;
r.offset = offset;
r.size = size;
m_ci.pushConstantRangeCount++;
m_ci.pPushConstantRanges = m_push_constants.data();
}
GraphicsPipelineBuilder::GraphicsPipelineBuilder()
{
Clear();
}
void GraphicsPipelineBuilder::Clear()
{
m_ci = {};
m_ci.sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO;
m_shader_stages = {};
m_vertex_input_state = {};
m_vertex_input_state.sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO;
m_ci.pVertexInputState = &m_vertex_input_state;
m_vertex_attributes = {};
m_vertex_buffers = {};
m_input_assembly = {};
m_input_assembly.sType = VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO;
m_rasterization_state = {};
m_rasterization_state.sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO;
m_rasterization_state.lineWidth = 1.0f;
m_depth_state = {};
m_depth_state.sType = VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO;
m_blend_state = {};
m_blend_state.sType = VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO;
m_blend_attachments = {};
m_viewport_state = {};
m_viewport_state.sType = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO;
m_viewport = {};
m_scissor = {};
m_dynamic_state = {};
m_dynamic_state.sType = VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO;
m_dynamic_state_values = {};
m_multisample_state = {};
m_multisample_state.sType = VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO;
// set defaults
SetNoCullRasterizationState();
SetNoDepthTestState();
SetNoBlendingState();
SetPrimitiveTopology(VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST);
// have to be specified even if dynamic
SetViewport(0.0f, 0.0f, 1.0f, 1.0f, 0.0f, 1.0f);
SetScissorRect(0, 0, 1, 1);
SetMultisamples(VK_SAMPLE_COUNT_1_BIT);
}
VkPipeline GraphicsPipelineBuilder::Create(VkDevice device, VkPipelineCache pipeline_cache, bool clear /* = true */)
{
VkPipeline pipeline;
VkResult res = vkCreateGraphicsPipelines(device, pipeline_cache, 1, &m_ci, nullptr, &pipeline);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkCreateGraphicsPipelines() failed: ");
return VK_NULL_HANDLE;
}
if (clear)
Clear();
return pipeline;
}
void GraphicsPipelineBuilder::SetShaderStage(VkShaderStageFlagBits stage, VkShaderModule module,
const char* entry_point)
{
Assert(m_ci.stageCount < MAX_SHADER_STAGES);
u32 index = 0;
for (; index < m_ci.stageCount; index++)
{
if (m_shader_stages[index].stage == stage)
break;
}
if (index == m_ci.stageCount)
{
m_ci.stageCount++;
m_ci.pStages = m_shader_stages.data();
}
VkPipelineShaderStageCreateInfo& s = m_shader_stages[index];
s.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO;
s.stage = stage;
s.module = module;
s.pName = entry_point;
}
void GraphicsPipelineBuilder::AddVertexBuffer(u32 binding, u32 stride,
VkVertexInputRate input_rate /*= VK_VERTEX_INPUT_RATE_VERTEX*/)
{
Assert(m_vertex_input_state.vertexAttributeDescriptionCount < MAX_VERTEX_BUFFERS);
VkVertexInputBindingDescription& b = m_vertex_buffers[m_vertex_input_state.vertexBindingDescriptionCount];
b.binding = binding;
b.stride = stride;
b.inputRate = input_rate;
m_vertex_input_state.vertexBindingDescriptionCount++;
m_vertex_input_state.pVertexBindingDescriptions = m_vertex_buffers.data();
m_ci.pVertexInputState = &m_vertex_input_state;
}
void GraphicsPipelineBuilder::AddVertexAttribute(u32 location, u32 binding, VkFormat format, u32 offset)
{
Assert(m_vertex_input_state.vertexAttributeDescriptionCount < MAX_VERTEX_BUFFERS);
VkVertexInputAttributeDescription& a = m_vertex_attributes[m_vertex_input_state.vertexAttributeDescriptionCount];
a.location = location;
a.binding = binding;
a.format = format;
a.offset = offset;
m_vertex_input_state.vertexAttributeDescriptionCount++;
m_vertex_input_state.pVertexAttributeDescriptions = m_vertex_attributes.data();
m_ci.pVertexInputState = &m_vertex_input_state;
}
void GraphicsPipelineBuilder::SetPrimitiveTopology(VkPrimitiveTopology topology,
bool enable_primitive_restart /*= false*/)
{
m_input_assembly.topology = topology;
m_input_assembly.primitiveRestartEnable = enable_primitive_restart;
m_ci.pInputAssemblyState = &m_input_assembly;
}
void GraphicsPipelineBuilder::SetRasterizationState(VkPolygonMode polygon_mode, VkCullModeFlags cull_mode,
VkFrontFace front_face)
{
m_rasterization_state.polygonMode = polygon_mode;
m_rasterization_state.cullMode = cull_mode;
m_rasterization_state.frontFace = front_face;
m_ci.pRasterizationState = &m_rasterization_state;
}
void GraphicsPipelineBuilder::SetLineWidth(float width)
{
m_rasterization_state.lineWidth = width;
}
void GraphicsPipelineBuilder::SetNoCullRasterizationState()
{
SetRasterizationState(VK_POLYGON_MODE_FILL, VK_CULL_MODE_NONE, VK_FRONT_FACE_CLOCKWISE);
}
void GraphicsPipelineBuilder::SetDepthState(bool depth_test, bool depth_write, VkCompareOp compare_op)
{
m_depth_state.depthTestEnable = depth_test;
m_depth_state.depthWriteEnable = depth_write;
m_depth_state.depthCompareOp = compare_op;
m_ci.pDepthStencilState = &m_depth_state;
}
void GraphicsPipelineBuilder::SetNoDepthTestState()
{
SetDepthState(false, false, VK_COMPARE_OP_ALWAYS);
}
void GraphicsPipelineBuilder::SetBlendConstants(float r, float g, float b, float a)
{
m_blend_state.blendConstants[0] = r;
m_blend_state.blendConstants[1] = g;
m_blend_state.blendConstants[2] = b;
m_blend_state.blendConstants[3] = a;
m_ci.pColorBlendState = &m_blend_state;
}
void GraphicsPipelineBuilder::AddBlendAttachment(
bool blend_enable, VkBlendFactor src_factor, VkBlendFactor dst_factor, VkBlendOp op,
VkBlendFactor alpha_src_factor, VkBlendFactor alpha_dst_factor, VkBlendOp alpha_op, VkColorComponentFlags write_mask /* = VK_COLOR_COMPONENT_R_BIT | VK_COLOR_COMPONENT_G_BIT | VK_COLOR_COMPONENT_B_BIT | VK_COLOR_COMPONENT_A_BIT */)
{
Assert(m_blend_state.attachmentCount < MAX_ATTACHMENTS);
VkPipelineColorBlendAttachmentState& bs = m_blend_attachments[m_blend_state.attachmentCount];
bs.blendEnable = blend_enable;
bs.srcColorBlendFactor = src_factor;
bs.dstColorBlendFactor = dst_factor;
bs.colorBlendOp = op;
bs.srcAlphaBlendFactor = alpha_src_factor;
bs.dstAlphaBlendFactor = alpha_dst_factor;
bs.alphaBlendOp = alpha_op;
bs.colorWriteMask = write_mask;
m_blend_state.attachmentCount++;
m_blend_state.pAttachments = m_blend_attachments.data();
m_ci.pColorBlendState = &m_blend_state;
}
void GraphicsPipelineBuilder::SetBlendAttachment(
u32 attachment, bool blend_enable, VkBlendFactor src_factor, VkBlendFactor dst_factor, VkBlendOp op,
VkBlendFactor alpha_src_factor, VkBlendFactor alpha_dst_factor, VkBlendOp alpha_op, VkColorComponentFlags write_mask /*= VK_COLOR_COMPONENT_R_BIT | VK_COLOR_COMPONENT_G_BIT | VK_COLOR_COMPONENT_B_BIT | VK_COLOR_COMPONENT_A_BIT*/)
{
Assert(attachment < MAX_ATTACHMENTS);
VkPipelineColorBlendAttachmentState& bs = m_blend_attachments[attachment];
bs.blendEnable = blend_enable;
bs.srcColorBlendFactor = src_factor;
bs.dstColorBlendFactor = dst_factor;
bs.colorBlendOp = op;
bs.srcAlphaBlendFactor = alpha_src_factor;
bs.dstAlphaBlendFactor = alpha_dst_factor;
bs.alphaBlendOp = alpha_op;
bs.colorWriteMask = write_mask;
if (attachment >= m_blend_state.attachmentCount)
{
m_blend_state.attachmentCount = attachment + 1u;
m_blend_state.pAttachments = m_blend_attachments.data();
m_ci.pColorBlendState = &m_blend_state;
}
}
void GraphicsPipelineBuilder::ClearBlendAttachments()
{
m_blend_attachments = {};
m_blend_state.attachmentCount = 0;
}
void GraphicsPipelineBuilder::SetNoBlendingState()
{
ClearBlendAttachments();
SetBlendAttachment(0, false, VK_BLEND_FACTOR_ONE, VK_BLEND_FACTOR_ZERO, VK_BLEND_OP_ADD, VK_BLEND_FACTOR_ONE,
VK_BLEND_FACTOR_ZERO, VK_BLEND_OP_ADD,
VK_COLOR_COMPONENT_R_BIT | VK_COLOR_COMPONENT_G_BIT | VK_COLOR_COMPONENT_B_BIT |
VK_COLOR_COMPONENT_A_BIT);
}
void GraphicsPipelineBuilder::AddDynamicState(VkDynamicState state)
{
Assert(m_dynamic_state.dynamicStateCount < MAX_DYNAMIC_STATE);
m_dynamic_state_values[m_dynamic_state.dynamicStateCount] = state;
m_dynamic_state.dynamicStateCount++;
m_dynamic_state.pDynamicStates = m_dynamic_state_values.data();
m_ci.pDynamicState = &m_dynamic_state;
}
void GraphicsPipelineBuilder::SetDynamicViewportAndScissorState()
{
AddDynamicState(VK_DYNAMIC_STATE_VIEWPORT);
AddDynamicState(VK_DYNAMIC_STATE_SCISSOR);
}
void GraphicsPipelineBuilder::SetViewport(float x, float y, float width, float height, float min_depth, float max_depth)
{
m_viewport.x = x;
m_viewport.y = y;
m_viewport.width = width;
m_viewport.height = height;
m_viewport.minDepth = min_depth;
m_viewport.maxDepth = max_depth;
m_viewport_state.pViewports = &m_viewport;
m_viewport_state.viewportCount = 1u;
m_ci.pViewportState = &m_viewport_state;
}
void GraphicsPipelineBuilder::SetScissorRect(s32 x, s32 y, u32 width, u32 height)
{
m_scissor.offset.x = x;
m_scissor.offset.y = y;
m_scissor.extent.width = width;
m_scissor.extent.height = height;
m_viewport_state.pScissors = &m_scissor;
m_viewport_state.scissorCount = 1u;
m_ci.pViewportState = &m_viewport_state;
}
void GraphicsPipelineBuilder::SetMultisamples(VkSampleCountFlagBits samples)
{
m_multisample_state.rasterizationSamples = samples;
m_ci.pMultisampleState = &m_multisample_state;
}
void GraphicsPipelineBuilder::SetPipelineLayout(VkPipelineLayout layout)
{
m_ci.layout = layout;
}
void GraphicsPipelineBuilder::SetRenderPass(VkRenderPass render_pass, u32 subpass)
{
m_ci.renderPass = render_pass;
m_ci.subpass = subpass;
}
SamplerBuilder::SamplerBuilder()
{
Clear();
}
void SamplerBuilder::Clear()
{
m_ci = {};
m_ci.sType = VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO;
}
VkSampler SamplerBuilder::Create(VkDevice device, bool clear /* = true */)
{
VkSampler sampler;
VkResult res = vkCreateSampler(device, &m_ci, nullptr, &sampler);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkCreateSampler() failed: ");
return VK_NULL_HANDLE;
}
return sampler;
}
void SamplerBuilder::SetFilter(VkFilter mag_filter, VkFilter min_filter, VkSamplerMipmapMode mip_filter)
{
m_ci.magFilter = mag_filter;
m_ci.minFilter = min_filter;
m_ci.mipmapMode = mip_filter;
}
void SamplerBuilder::SetAddressMode(VkSamplerAddressMode u, VkSamplerAddressMode v, VkSamplerAddressMode w)
{
m_ci.addressModeU = u;
m_ci.addressModeV = v;
m_ci.addressModeW = w;
}
void SamplerBuilder::SetPointSampler(VkSamplerAddressMode address_mode /* = VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER */)
{
Clear();
SetFilter(VK_FILTER_NEAREST, VK_FILTER_NEAREST, VK_SAMPLER_MIPMAP_MODE_NEAREST);
SetAddressMode(address_mode, address_mode, address_mode);
}
void SamplerBuilder::SetLinearSampler(bool mipmaps,
VkSamplerAddressMode address_mode /* = VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER */)
{
Clear();
SetFilter(VK_FILTER_LINEAR, VK_FILTER_LINEAR,
mipmaps ? VK_SAMPLER_MIPMAP_MODE_LINEAR : VK_SAMPLER_MIPMAP_MODE_NEAREST);
SetAddressMode(address_mode, address_mode, address_mode);
}
DescriptorSetUpdateBuilder::DescriptorSetUpdateBuilder()
{
Clear();
}
void DescriptorSetUpdateBuilder::Clear()
{
m_writes = {};
m_num_writes = 0;
}
void DescriptorSetUpdateBuilder::Update(VkDevice device, bool clear /*= true*/)
{
Assert(m_num_writes > 0);
vkUpdateDescriptorSets(device, m_num_writes, (m_num_writes > 0) ? m_writes.data() : nullptr, 0, nullptr);
if (clear)
Clear();
}
void DescriptorSetUpdateBuilder::AddImageDescriptorWrite(
VkDescriptorSet set, u32 binding, VkImageView view,
VkImageLayout layout /*= VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL*/)
{
Assert(m_num_writes < MAX_WRITES && m_num_infos < MAX_INFOS);
VkDescriptorImageInfo& ii = m_infos[m_num_infos++].image;
ii.imageView = view;
ii.imageLayout = layout;
ii.sampler = VK_NULL_HANDLE;
VkWriteDescriptorSet& dw = m_writes[m_num_writes++];
dw.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
dw.dstSet = set;
dw.dstBinding = binding;
dw.descriptorCount = 1;
dw.descriptorType = VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE;
dw.pImageInfo = &ii;
}
void DescriptorSetUpdateBuilder::AddSamplerDescriptorWrite(VkDescriptorSet set, u32 binding, VkSampler sampler)
{
Assert(m_num_writes < MAX_WRITES && m_num_infos < MAX_INFOS);
VkDescriptorImageInfo& ii = m_infos[m_num_infos++].image;
ii.imageView = VK_NULL_HANDLE;
ii.imageLayout = VK_IMAGE_LAYOUT_UNDEFINED;
ii.sampler = sampler;
VkWriteDescriptorSet& dw = m_writes[m_num_writes++];
dw.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
dw.dstSet = set;
dw.dstBinding = binding;
dw.descriptorCount = 1;
dw.descriptorType = VK_DESCRIPTOR_TYPE_SAMPLER;
dw.pImageInfo = &ii;
}
void DescriptorSetUpdateBuilder::AddCombinedImageSamplerDescriptorWrite(
VkDescriptorSet set, u32 binding, VkImageView view, VkSampler sampler,
VkImageLayout layout /*= VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL*/)
{
Assert(m_num_writes < MAX_WRITES && m_num_infos < MAX_INFOS);
VkDescriptorImageInfo& ii = m_infos[m_num_infos++].image;
ii.imageView = view;
ii.imageLayout = layout;
ii.sampler = sampler;
VkWriteDescriptorSet& dw = m_writes[m_num_writes++];
dw.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
dw.dstSet = set;
dw.dstBinding = binding;
dw.descriptorCount = 1;
dw.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
dw.pImageInfo = &ii;
}
void DescriptorSetUpdateBuilder::AddBufferDescriptorWrite(VkDescriptorSet set, u32 binding, VkDescriptorType dtype,
VkBuffer buffer, u32 offset, u32 size)
{
Assert(m_num_writes < MAX_WRITES && m_num_infos < MAX_INFOS);
VkDescriptorBufferInfo& bi = m_infos[m_num_infos++].buffer;
bi.buffer = buffer;
bi.offset = offset;
bi.range = size;
VkWriteDescriptorSet& dw = m_writes[m_num_writes++];
dw.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
dw.dstSet = set;
dw.dstBinding = binding;
dw.descriptorCount = 1;
dw.descriptorType = dtype;
dw.pBufferInfo = &bi;
}
void DescriptorSetUpdateBuilder::AddBufferViewDescriptorWrite(VkDescriptorSet set, u32 binding, VkDescriptorType dtype,
VkBufferView view)
{
Assert(m_num_writes < MAX_WRITES && m_num_infos < MAX_INFOS);
VkBufferView& bi = m_infos[m_num_infos++].buffer_view;
bi = view;
VkWriteDescriptorSet& dw = m_writes[m_num_writes++];
dw.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
dw.dstSet = set;
dw.dstBinding = binding;
dw.descriptorCount = 1;
dw.descriptorType = dtype;
dw.pTexelBufferView = &bi;
}
FramebufferBuilder::FramebufferBuilder()
{
Clear();
}
void FramebufferBuilder::Clear()
{
m_ci = {};
m_ci.sType = VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO;
m_images = {};
}
VkFramebuffer FramebufferBuilder::Create(VkDevice device, bool clear /*= true*/)
{
VkFramebuffer fb;
VkResult res = vkCreateFramebuffer(device, &m_ci, nullptr, &fb);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkCreateFramebuffer() failed: ");
return VK_NULL_HANDLE;
}
if (clear)
Clear();
return fb;
}
void FramebufferBuilder::AddAttachment(VkImageView image)
{
Assert(m_ci.attachmentCount < MAX_ATTACHMENTS);
m_images[m_ci.attachmentCount] = image;
m_ci.attachmentCount++;
m_ci.pAttachments = m_images.data();
}
void FramebufferBuilder::SetSize(u32 width, u32 height, u32 layers)
{
m_ci.width = width;
m_ci.height = height;
m_ci.layers = layers;
}
void FramebufferBuilder::SetRenderPass(VkRenderPass render_pass)
{
m_ci.renderPass = render_pass;
}
RenderPassBuilder::RenderPassBuilder()
{
Clear();
}
void RenderPassBuilder::Clear()
{
m_ci = {};
m_ci.sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO;
m_attachments = {};
m_attachment_references = {};
m_num_attachment_references = 0;
m_subpasses = {};
}
VkRenderPass RenderPassBuilder::Create(VkDevice device, bool clear /*= true*/)
{
VkRenderPass rp;
VkResult res = vkCreateRenderPass(device, &m_ci, nullptr, &rp);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkCreateRenderPass() failed: ");
return VK_NULL_HANDLE;
}
return rp;
}
u32 RenderPassBuilder::AddAttachment(VkFormat format, VkSampleCountFlagBits samples, VkAttachmentLoadOp load_op,
VkAttachmentStoreOp store_op, VkImageLayout initial_layout,
VkImageLayout final_layout)
{
Assert(m_ci.attachmentCount < MAX_ATTACHMENTS);
const u32 index = m_ci.attachmentCount;
VkAttachmentDescription& ad = m_attachments[index];
ad.format = format;
ad.samples = samples;
ad.loadOp = load_op;
ad.storeOp = store_op;
ad.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
ad.stencilStoreOp = VK_ATTACHMENT_STORE_OP_DONT_CARE;
ad.initialLayout = initial_layout;
ad.finalLayout = final_layout;
m_ci.attachmentCount++;
m_ci.pAttachments = m_attachments.data();
return index;
}
u32 RenderPassBuilder::AddSubpass()
{
Assert(m_ci.subpassCount < MAX_SUBPASSES);
const u32 index = m_ci.subpassCount;
VkSubpassDescription& sp = m_subpasses[index];
sp.pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS;
m_ci.subpassCount++;
m_ci.pSubpasses = m_subpasses.data();
return index;
}
void RenderPassBuilder::AddSubpassColorAttachment(u32 subpass, u32 attachment, VkImageLayout layout)
{
Assert(subpass < m_ci.subpassCount && m_num_attachment_references < MAX_ATTACHMENT_REFERENCES);
VkAttachmentReference& ar = m_attachment_references[m_num_attachment_references++];
ar.attachment = attachment;
ar.layout = layout;
VkSubpassDescription& sp = m_subpasses[subpass];
if (sp.colorAttachmentCount == 0)
sp.pColorAttachments = &ar;
sp.colorAttachmentCount++;
}
void RenderPassBuilder::AddSubpassDepthAttachment(u32 subpass, u32 attachment, VkImageLayout layout)
{
Assert(subpass < m_ci.subpassCount && m_num_attachment_references < MAX_ATTACHMENT_REFERENCES);
VkAttachmentReference& ar = m_attachment_references[m_num_attachment_references++];
ar.attachment = attachment;
ar.layout = layout;
VkSubpassDescription& sp = m_subpasses[subpass];
sp.pDepthStencilAttachment = &ar;
}
BufferViewBuilder::BufferViewBuilder()
{
Clear();
}
void BufferViewBuilder::Clear()
{
m_ci = {};
m_ci.sType = VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO;
}
VkBufferView BufferViewBuilder::Create(VkDevice device, bool clear /*= true*/)
{
VkBufferView bv;
VkResult res = vkCreateBufferView(device, &m_ci, nullptr, &bv);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkCreateBufferView() failed: ");
return VK_NULL_HANDLE;
}
return bv;
}
void BufferViewBuilder::Set(VkBuffer buffer, VkFormat format, u32 offset, u32 size)
{
m_ci.buffer = buffer;
m_ci.format = format;
m_ci.offset = offset;
m_ci.range = size;
}
} // namespace Vulkan

View file

@ -0,0 +1,269 @@
#pragma once
#include "../types.h"
#include "vulkan_loader.h"
#include <array>
namespace Vulkan {
class DescriptorSetLayoutBuilder
{
public:
enum : u32
{
MAX_BINDINGS = 16,
};
DescriptorSetLayoutBuilder();
void Clear();
VkDescriptorSetLayout Create(VkDevice device);
void AddBinding(u32 binding, VkDescriptorType dtype, u32 dcount, VkShaderStageFlags stages);
private:
VkDescriptorSetLayoutCreateInfo m_ci{};
std::array<VkDescriptorSetLayoutBinding, MAX_BINDINGS> m_bindings{};
};
class PipelineLayoutBuilder
{
public:
enum : u32
{
MAX_SETS = 8,
MAX_PUSH_CONSTANTS = 1
};
PipelineLayoutBuilder();
void Clear();
VkPipelineLayout Create(VkDevice device);
void AddDescriptorSet(VkDescriptorSetLayout layout);
void AddPushConstants(VkShaderStageFlags stages, u32 offset, u32 size);
private:
VkPipelineLayoutCreateInfo m_ci{};
std::array<VkDescriptorSetLayout, MAX_SETS> m_sets{};
std::array<VkPushConstantRange, MAX_PUSH_CONSTANTS> m_push_constants{};
};
class GraphicsPipelineBuilder
{
public:
enum : u32
{
MAX_SHADER_STAGES = 3,
MAX_VERTEX_ATTRIBUTES = 16,
MAX_VERTEX_BUFFERS = 8,
MAX_ATTACHMENTS = 2,
MAX_DYNAMIC_STATE = 8
};
GraphicsPipelineBuilder();
void Clear();
VkPipeline Create(VkDevice device, VkPipelineCache pipeline_cache = VK_NULL_HANDLE, bool clear = true);
void SetShaderStage(VkShaderStageFlagBits stage, VkShaderModule module, const char* entry_point);
void SetVertexShader(VkShaderModule module) { SetShaderStage(VK_SHADER_STAGE_VERTEX_BIT, module, "main"); }
void SetGeometryShader(VkShaderModule module) { SetShaderStage(VK_SHADER_STAGE_GEOMETRY_BIT, module, "main"); }
void SetFragmentShader(VkShaderModule module) { SetShaderStage(VK_SHADER_STAGE_FRAGMENT_BIT, module, "main"); }
void AddVertexBuffer(u32 binding, u32 stride, VkVertexInputRate input_rate = VK_VERTEX_INPUT_RATE_VERTEX);
void AddVertexAttribute(u32 location, u32 binding, VkFormat format, u32 offset);
void SetPrimitiveTopology(VkPrimitiveTopology topology, bool enable_primitive_restart = false);
void SetRasterizationState(VkPolygonMode polygon_mode, VkCullModeFlags cull_mode, VkFrontFace front_face);
void SetLineWidth(float width);
void SetNoCullRasterizationState();
void SetDepthState(bool depth_test, bool depth_write, VkCompareOp compare_op);
void SetNoDepthTestState();
void AddBlendAttachment(bool blend_enable, VkBlendFactor src_factor, VkBlendFactor dst_factor, VkBlendOp op,
VkBlendFactor alpha_src_factor, VkBlendFactor alpha_dst_factor, VkBlendOp alpha_op,
VkColorComponentFlags write_mask = VK_COLOR_COMPONENT_R_BIT | VK_COLOR_COMPONENT_G_BIT |
VK_COLOR_COMPONENT_B_BIT | VK_COLOR_COMPONENT_A_BIT);
void SetBlendAttachment(u32 attachment, bool blend_enable, VkBlendFactor src_factor, VkBlendFactor dst_factor,
VkBlendOp op, VkBlendFactor alpha_src_factor, VkBlendFactor alpha_dst_factor,
VkBlendOp alpha_op,
VkColorComponentFlags write_mask = VK_COLOR_COMPONENT_R_BIT | VK_COLOR_COMPONENT_G_BIT |
VK_COLOR_COMPONENT_B_BIT | VK_COLOR_COMPONENT_A_BIT);
void ClearBlendAttachments();
void SetBlendConstants(float r, float g, float b, float a);
void SetNoBlendingState();
void AddDynamicState(VkDynamicState state);
void SetDynamicViewportAndScissorState();
void SetViewport(float x, float y, float width, float height, float min_depth, float max_depth);
void SetScissorRect(s32 x, s32 y, u32 width, u32 height);
void SetMultisamples(VkSampleCountFlagBits samples);
void SetPipelineLayout(VkPipelineLayout layout);
void SetRenderPass(VkRenderPass render_pass, u32 subpass);
private:
VkGraphicsPipelineCreateInfo m_ci;
std::array<VkPipelineShaderStageCreateInfo, MAX_SHADER_STAGES> m_shader_stages;
VkPipelineVertexInputStateCreateInfo m_vertex_input_state;
std::array<VkVertexInputBindingDescription, MAX_VERTEX_BUFFERS> m_vertex_buffers;
std::array<VkVertexInputAttributeDescription, MAX_VERTEX_ATTRIBUTES> m_vertex_attributes;
VkPipelineInputAssemblyStateCreateInfo m_input_assembly;
VkPipelineRasterizationStateCreateInfo m_rasterization_state;
VkPipelineDepthStencilStateCreateInfo m_depth_state;
VkPipelineColorBlendStateCreateInfo m_blend_state;
std::array<VkPipelineColorBlendAttachmentState, MAX_ATTACHMENTS> m_blend_attachments;
VkPipelineViewportStateCreateInfo m_viewport_state;
VkViewport m_viewport;
VkRect2D m_scissor;
VkPipelineDynamicStateCreateInfo m_dynamic_state;
std::array<VkDynamicState, MAX_DYNAMIC_STATE> m_dynamic_state_values;
VkPipelineMultisampleStateCreateInfo m_multisample_state;
};
class SamplerBuilder
{
public:
SamplerBuilder();
void Clear();
VkSampler Create(VkDevice device, bool clear = true);
void SetFilter(VkFilter mag_filter, VkFilter min_filter, VkSamplerMipmapMode mip_filter);
void SetAddressMode(VkSamplerAddressMode u, VkSamplerAddressMode v, VkSamplerAddressMode w);
void SetPointSampler(VkSamplerAddressMode address_mode = VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER);
void SetLinearSampler(bool mipmaps, VkSamplerAddressMode address_mode = VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER);
private:
VkSamplerCreateInfo m_ci;
};
class DescriptorSetUpdateBuilder
{
enum : u32
{
MAX_WRITES = 16,
MAX_INFOS = 16,
};
public:
DescriptorSetUpdateBuilder();
void Clear();
void Update(VkDevice device, bool clear = true);
void AddImageDescriptorWrite(VkDescriptorSet set, u32 binding, VkImageView view,
VkImageLayout layout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL);
void AddSamplerDescriptorWrite(VkDescriptorSet set, u32 binding, VkSampler sampler);
void AddCombinedImageSamplerDescriptorWrite(VkDescriptorSet set, u32 binding, VkImageView view, VkSampler sampler,
VkImageLayout layout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL);
void AddBufferDescriptorWrite(VkDescriptorSet set, u32 binding, VkDescriptorType dtype, VkBuffer buffer, u32 offset,
u32 size);
void AddBufferViewDescriptorWrite(VkDescriptorSet set, u32 binding, VkDescriptorType dtype, VkBufferView view);
private:
union InfoUnion
{
VkDescriptorBufferInfo buffer;
VkDescriptorImageInfo image;
VkBufferView buffer_view;
};
std::array<VkWriteDescriptorSet, MAX_WRITES> m_writes;
u32 m_num_writes = 0;
std::array<InfoUnion, MAX_INFOS> m_infos;
u32 m_num_infos = 0;
};
class FramebufferBuilder
{
enum : u32
{
MAX_ATTACHMENTS = 2,
};
public:
FramebufferBuilder();
void Clear();
VkFramebuffer Create(VkDevice device, bool clear = true);
void AddAttachment(VkImageView image);
void SetSize(u32 width, u32 height, u32 layers);
void SetRenderPass(VkRenderPass render_pass);
private:
VkFramebufferCreateInfo m_ci;
std::array<VkImageView, MAX_ATTACHMENTS> m_images;
};
class RenderPassBuilder
{
enum : u32
{
MAX_ATTACHMENTS = 2,
MAX_ATTACHMENT_REFERENCES = 2,
MAX_SUBPASSES = 1,
};
public:
RenderPassBuilder();
void Clear();
VkRenderPass Create(VkDevice device, bool clear = true);
u32 AddAttachment(VkFormat format, VkSampleCountFlagBits samples, VkAttachmentLoadOp load_op,
VkAttachmentStoreOp store_op, VkImageLayout initial_layout, VkImageLayout final_layout);
u32 AddSubpass();
void AddSubpassColorAttachment(u32 subpass, u32 attachment, VkImageLayout layout);
void AddSubpassDepthAttachment(u32 subpass, u32 attachment, VkImageLayout layout);
private:
VkRenderPassCreateInfo m_ci;
std::array<VkAttachmentDescription, MAX_ATTACHMENTS> m_attachments;
std::array<VkAttachmentReference, MAX_ATTACHMENT_REFERENCES> m_attachment_references;
u32 m_num_attachment_references = 0;
std::array<VkSubpassDescription, MAX_SUBPASSES> m_subpasses;
};
class BufferViewBuilder
{
public:
BufferViewBuilder();
void Clear();
VkBufferView Create(VkDevice device, bool clear = true);
void Set(VkBuffer buffer, VkFormat format, u32 offset, u32 size);
private:
VkBufferViewCreateInfo m_ci;
};
} // namespace Vulkan

File diff suppressed because it is too large Load diff

229
src/common/vulkan/context.h Normal file
View file

@ -0,0 +1,229 @@
// Copyright 2016 Dolphin Emulator Project
// Copyright 2020 DuckStation Emulator Project
// Licensed under GPLv2+
// Refer to the LICENSE file included.
#pragma once
#include "../types.h"
#include "vulkan_loader.h"
#include <array>
#include <functional>
#include <map>
#include <memory>
#include <string>
#include <vector>
struct WindowInfo;
namespace Vulkan {
class SwapChain;
class Context
{
public:
enum : u32
{
NUM_COMMAND_BUFFERS = 2
};
~Context();
// Determines if the Vulkan validation layer is available on the system.
static bool CheckValidationLayerAvailablility();
// Helper method to create a Vulkan instance.
static VkInstance CreateVulkanInstance(bool enable_surface, bool enable_debug_report, bool enable_validation_layer);
// Returns a list of Vulkan-compatible GPUs.
using GPUList = std::vector<VkPhysicalDevice>;
using GPUNameList = std::vector<std::string>;
static GPUList EnumerateGPUs(VkInstance instance);
static GPUNameList EnumerateGPUNames(VkInstance instance);
// Creates a new context and sets it up as global.
static bool Create(u32 gpu_index, const WindowInfo* wi, std::unique_ptr<SwapChain>* out_swap_chain,
bool enable_debug_reports, bool enable_validation_layer);
// Destroys context.
static void Destroy();
// Enable/disable debug message runtime.
bool EnableDebugReports();
void DisableDebugReports();
// Global state accessors
ALWAYS_INLINE VkInstance GetVulkanInstance() const { return m_instance; }
ALWAYS_INLINE VkPhysicalDevice GetPhysicalDevice() const { return m_physical_device; }
ALWAYS_INLINE VkDevice GetDevice() const { return m_device; }
ALWAYS_INLINE VkQueue GetGraphicsQueue() const { return m_graphics_queue; }
ALWAYS_INLINE u32 GetGraphicsQueueFamilyIndex() const { return m_graphics_queue_family_index; }
ALWAYS_INLINE VkQueue GetPresentQueue() const { return m_present_queue; }
ALWAYS_INLINE u32 GetPresentQueueFamilyIndex() const { return m_present_queue_family_index; }
ALWAYS_INLINE const VkQueueFamilyProperties& GetGraphicsQueueProperties() const
{
return m_graphics_queue_properties;
}
ALWAYS_INLINE const VkPhysicalDeviceMemoryProperties& GetDeviceMemoryProperties() const
{
return m_device_memory_properties;
}
ALWAYS_INLINE const VkPhysicalDeviceProperties& GetDeviceProperties() const { return m_device_properties; }
ALWAYS_INLINE const VkPhysicalDeviceFeatures& GetDeviceFeatures() const { return m_device_features; }
ALWAYS_INLINE const VkPhysicalDeviceLimits& GetDeviceLimits() const { return m_device_properties.limits; }
// Support bits
ALWAYS_INLINE bool SupportsGeometryShaders() const { return m_device_features.geometryShader == VK_TRUE; }
ALWAYS_INLINE bool SupportsDualSourceBlend() const { return m_device_features.dualSrcBlend == VK_TRUE; }
// Helpers for getting constants
ALWAYS_INLINE VkDeviceSize GetUniformBufferAlignment() const
{
return m_device_properties.limits.minUniformBufferOffsetAlignment;
}
ALWAYS_INLINE VkDeviceSize GetTexelBufferAlignment() const
{
return m_device_properties.limits.minUniformBufferOffsetAlignment;
}
ALWAYS_INLINE VkDeviceSize GetBufferImageGranularity() const
{
return m_device_properties.limits.bufferImageGranularity;
}
// Finds a memory type index for the specified memory properties and the bits returned by
// vkGetImageMemoryRequirements
bool GetMemoryType(u32 bits, VkMemoryPropertyFlags properties, u32* out_type_index);
u32 GetMemoryType(u32 bits, VkMemoryPropertyFlags properties);
// Finds a memory type for upload or readback buffers.
u32 GetUploadMemoryType(u32 bits, bool* is_coherent = nullptr);
u32 GetReadbackMemoryType(u32 bits, bool* is_coherent = nullptr, bool* is_cached = nullptr);
// Creates a simple render pass.
VkRenderPass GetRenderPass(VkFormat color_format, VkFormat depth_format, VkSampleCountFlagBits samples,
VkAttachmentLoadOp load_op);
// These command buffers are allocated per-frame. They are valid until the command buffer
// is submitted, after that you should call these functions again.
ALWAYS_INLINE VkDescriptorPool GetGlobalDescriptorPool() const { return m_global_descriptor_pool; }
ALWAYS_INLINE VkCommandBuffer GetCurrentCommandBuffer() const { return m_current_command_buffer; }
ALWAYS_INLINE VkDescriptorPool GetCurrentDescriptorPool() const
{
return m_frame_resources[m_current_frame].descriptor_pool;
}
/// Allocates a descriptor set from the pool reserved for the current frame.
VkDescriptorSet AllocateDescriptorSet(VkDescriptorSetLayout set_layout);
/// Allocates a descriptor set from the pool reserved for the current frame.
VkDescriptorSet AllocateGlobalDescriptorSet(VkDescriptorSetLayout set_layout);
/// Frees a descriptor set allocated from the global pool.
void FreeGlobalDescriptorSet(VkDescriptorSet set);
// Gets the fence that will be signaled when the currently executing command buffer is
// queued and executed. Do not wait for this fence before the buffer is executed.
ALWAYS_INLINE VkFence GetCurrentCommandBufferFence() const { return m_frame_resources[m_current_frame].fence; }
// Fence "counters" are used to track which commands have been completed by the GPU.
// If the last completed fence counter is greater or equal to N, it means that the work
// associated counter N has been completed by the GPU. The value of N to associate with
// commands can be retreived by calling GetCurrentFenceCounter().
u64 GetCompletedFenceCounter() const { return m_completed_fence_counter; }
// Gets the fence that will be signaled when the currently executing command buffer is
// queued and executed. Do not wait for this fence before the buffer is executed.
u64 GetCurrentFenceCounter() const { return m_frame_resources[m_current_frame].fence_counter; }
void SubmitCommandBuffer(VkSemaphore wait_semaphore = VK_NULL_HANDLE, VkSemaphore signal_semaphore = VK_NULL_HANDLE,
VkSwapchainKHR present_swap_chain = VK_NULL_HANDLE,
uint32_t present_image_index = 0xFFFFFFFF);
void MoveToNextCommandBuffer();
void ExecuteCommandBuffer(bool wait_for_completion);
// Was the last present submitted to the queue a failure? If so, we must recreate our swapchain.
bool CheckLastPresentFail();
// Schedule a vulkan resource for destruction later on. This will occur when the command buffer
// is next re-used, and the GPU has finished working with the specified resource.
void DeferBufferDestruction(VkBuffer object);
void DeferBufferViewDestruction(VkBufferView object);
void DeferDeviceMemoryDestruction(VkDeviceMemory object);
void DeferFramebufferDestruction(VkFramebuffer object);
void DeferImageDestruction(VkImage object);
void DeferImageViewDestruction(VkImageView object);
// Wait for a fence to be completed.
// Also invokes callbacks for completion.
void WaitForFenceCounter(u64 fence_counter);
void WaitForGPUIdle();
private:
Context(VkInstance instance, VkPhysicalDevice physical_device);
using ExtensionList = std::vector<const char*>;
static bool SelectInstanceExtensions(ExtensionList* extension_list, bool enable_surface, bool enable_debug_report);
bool SelectDeviceExtensions(ExtensionList* extension_list, bool enable_surface);
bool SelectDeviceFeatures();
bool CreateDevice(VkSurfaceKHR surface, bool enable_validation_layer);
bool CreateCommandBuffers();
void DestroyCommandBuffers();
bool CreateGlobalDescriptorPool();
void DestroyGlobalDescriptorPool();
void DestroyRenderPassCache();
void ActivateCommandBuffer(u32 index);
void WaitForCommandBufferCompletion(u32 index);
struct FrameResources
{
// [0] - Init (upload) command buffer, [1] - draw command buffer
VkCommandPool command_pool = VK_NULL_HANDLE;
VkCommandBuffer command_buffer = VK_NULL_HANDLE;
VkDescriptorPool descriptor_pool = VK_NULL_HANDLE;
VkFence fence = VK_NULL_HANDLE;
u64 fence_counter = 0;
bool needs_fence_wait = false;
std::vector<std::function<void()>> cleanup_resources;
};
VkInstance m_instance = VK_NULL_HANDLE;
VkPhysicalDevice m_physical_device = VK_NULL_HANDLE;
VkDevice m_device = VK_NULL_HANDLE;
VkCommandBuffer m_current_command_buffer = VK_NULL_HANDLE;
VkDescriptorPool m_global_descriptor_pool = VK_NULL_HANDLE;
VkQueue m_graphics_queue = VK_NULL_HANDLE;
u32 m_graphics_queue_family_index = 0;
VkQueue m_present_queue = VK_NULL_HANDLE;
u32 m_present_queue_family_index = 0;
std::array<FrameResources, NUM_COMMAND_BUFFERS> m_frame_resources;
u64 m_next_fence_counter = 1;
u64 m_completed_fence_counter = 0;
u32 m_current_frame;
bool m_last_present_failed = false;
// Render pass cache
using RenderPassCacheKey = std::tuple<VkFormat, VkFormat, VkSampleCountFlagBits, VkAttachmentLoadOp>;
std::map<RenderPassCacheKey, VkRenderPass> m_render_pass_cache;
VkDebugReportCallbackEXT m_debug_report_callback = VK_NULL_HANDLE;
VkQueueFamilyProperties m_graphics_queue_properties = {};
VkPhysicalDeviceFeatures m_device_features = {};
VkPhysicalDeviceProperties m_device_properties = {};
VkPhysicalDeviceMemoryProperties m_device_memory_properties = {};
};
} // namespace Vulkan
extern std::unique_ptr<Vulkan::Context> g_vulkan_context;

View file

@ -0,0 +1,513 @@
#include "shader_cache.h"
#include "../assert.h"
#include "../file_system.h"
#include "../log.h"
#include "../md5_digest.h"
#include "context.h"
#include "shader_compiler.h"
#include "util.h"
Log_SetChannel(Vulkan::ShaderCache);
// TODO: store the driver version and stuff in the shader header
std::unique_ptr<Vulkan::ShaderCache> g_vulkan_shader_cache;
namespace Vulkan {
using ShaderCompiler::SPIRVCodeType;
using ShaderCompiler::SPIRVCodeVector;
#pragma pack(push, 4)
struct VK_PIPELINE_CACHE_HEADER
{
u32 header_length;
u32 header_version;
u32 vendor_id;
u32 device_id;
u8 uuid[VK_UUID_SIZE];
};
struct CacheIndexEntry
{
u64 source_hash_low;
u64 source_hash_high;
u32 source_length;
u32 shader_type;
u32 file_offset;
u32 blob_size;
};
#pragma pack(pop)
static bool ValidatePipelineCacheHeader(const VK_PIPELINE_CACHE_HEADER& header)
{
if (header.header_length < sizeof(VK_PIPELINE_CACHE_HEADER))
{
Log_ErrorPrintf("Pipeline cache failed validation: Invalid header length");
return false;
}
if (header.header_version != VK_PIPELINE_CACHE_HEADER_VERSION_ONE)
{
Log_ErrorPrintf("Pipeline cache failed validation: Invalid header version");
return false;
}
if (header.vendor_id != g_vulkan_context->GetDeviceProperties().vendorID)
{
Log_ErrorPrintf("Pipeline cache failed validation: Incorrect vendor ID (file: 0x%X, device: 0x%X)",
header.vendor_id, g_vulkan_context->GetDeviceProperties().vendorID);
return false;
}
if (header.device_id != g_vulkan_context->GetDeviceProperties().deviceID)
{
Log_ErrorPrintf("Pipeline cache failed validation: Incorrect device ID (file: 0x%X, device: 0x%X)",
header.device_id, g_vulkan_context->GetDeviceProperties().deviceID);
return false;
}
if (std::memcmp(header.uuid, g_vulkan_context->GetDeviceProperties().pipelineCacheUUID, VK_UUID_SIZE) != 0)
{
Log_ErrorPrintf("Pipeline cache failed validation: Incorrect UUID");
return false;
}
return true;
}
static void FillPipelineCacheHeader(VK_PIPELINE_CACHE_HEADER* header)
{
header->header_length = sizeof(VK_PIPELINE_CACHE_HEADER);
header->header_version = VK_PIPELINE_CACHE_HEADER_VERSION_ONE;
header->vendor_id = g_vulkan_context->GetDeviceProperties().vendorID;
header->device_id = g_vulkan_context->GetDeviceProperties().deviceID;
std::memcpy(header->uuid, g_vulkan_context->GetDeviceProperties().pipelineCacheUUID, VK_UUID_SIZE);
}
ShaderCache::ShaderCache() = default;
ShaderCache::~ShaderCache()
{
CloseShaderCache();
FlushPipelineCache();
ClosePipelineCache();
}
bool ShaderCache::CacheIndexKey::operator==(const CacheIndexKey& key) const
{
return (source_hash_low == key.source_hash_low && source_hash_high == key.source_hash_high &&
source_length == key.source_length && shader_type == key.shader_type);
}
bool ShaderCache::CacheIndexKey::operator!=(const CacheIndexKey& key) const
{
return (source_hash_low != key.source_hash_low || source_hash_high != key.source_hash_high ||
source_length != key.source_length || shader_type != key.shader_type);
}
void ShaderCache::Create(std::string_view base_path, bool debug)
{
Assert(!g_vulkan_shader_cache);
g_vulkan_shader_cache.reset(new ShaderCache());
g_vulkan_shader_cache->Open(base_path, debug);
}
void ShaderCache::Destroy()
{
g_vulkan_shader_cache.reset();
}
void ShaderCache::Open(std::string_view base_path, bool debug)
{
m_debug = debug;
m_pipeline_cache_filename = GetPipelineCacheBaseFileName(base_path, debug);
const std::string base_filename = GetShaderCacheBaseFileName(base_path, debug);
const std::string index_filename = base_filename + ".idx";
const std::string blob_filename = base_filename + ".bin";
if (!ReadExistingShaderCache(index_filename, blob_filename))
CreateNewShaderCache(index_filename, blob_filename);
if (!ReadExistingPipelineCache())
CreateNewPipelineCache();
}
VkPipelineCache ShaderCache::GetPipelineCache(bool set_dirty /*= true*/)
{
if (m_pipeline_cache == VK_NULL_HANDLE)
return VK_NULL_HANDLE;
m_pipeline_cache_dirty |= set_dirty;
return m_pipeline_cache;
}
bool ShaderCache::CreateNewShaderCache(const std::string& index_filename, const std::string& blob_filename)
{
if (FileSystem::FileExists(index_filename.c_str()))
{
Log_WarningPrintf("Removing existing index file '%s'", index_filename.c_str());
FileSystem::DeleteFile(index_filename.c_str());
}
if (FileSystem::FileExists(blob_filename.c_str()))
{
Log_WarningPrintf("Removing existing blob file '%s'", blob_filename.c_str());
FileSystem::DeleteFile(blob_filename.c_str());
}
m_index_file = FileSystem::OpenCFile(index_filename.c_str(), "wb");
if (!m_index_file)
{
Log_ErrorPrintf("Failed to open index file '%s' for writing", index_filename.c_str());
return false;
}
const u32 index_version = FILE_VERSION;
VK_PIPELINE_CACHE_HEADER header;
FillPipelineCacheHeader(&header);
if (std::fwrite(&index_version, sizeof(index_version), 1, m_index_file) != 1 ||
std::fwrite(&header, sizeof(header), 1, m_index_file) != 1)
{
Log_ErrorPrintf("Failed to write header to index file '%s'", index_filename.c_str());
std::fclose(m_index_file);
m_index_file = nullptr;
FileSystem::DeleteFile(index_filename.c_str());
return false;
}
m_blob_file = FileSystem::OpenCFile(blob_filename.c_str(), "w+b");
if (!m_blob_file)
{
Log_ErrorPrintf("Failed to open blob file '%s' for writing", blob_filename.c_str());
std::fclose(m_index_file);
m_index_file = nullptr;
FileSystem::DeleteFile(index_filename.c_str());
return false;
}
return true;
}
bool ShaderCache::ReadExistingShaderCache(const std::string& index_filename, const std::string& blob_filename)
{
m_index_file = FileSystem::OpenCFile(index_filename.c_str(), "r+b");
if (!m_index_file)
return false;
u32 file_version;
if (std::fread(&file_version, sizeof(file_version), 1, m_index_file) != 1 || file_version != FILE_VERSION)
{
Log_ErrorPrintf("Bad file version in '%s'", index_filename.c_str());
std::fclose(m_index_file);
m_index_file = nullptr;
return false;
}
VK_PIPELINE_CACHE_HEADER header;
if (std::fread(&header, sizeof(header), 1, m_index_file) != 1 || !ValidatePipelineCacheHeader(header))
{
Log_ErrorPrintf("Mismatched pipeline cache header in '%s' (GPU/driver changed?)", index_filename.c_str());
std::fclose(m_index_file);
m_index_file = nullptr;
return false;
}
m_blob_file = FileSystem::OpenCFile(blob_filename.c_str(), "a+b");
if (!m_blob_file)
{
Log_ErrorPrintf("Blob file '%s' is missing", blob_filename.c_str());
std::fclose(m_index_file);
m_index_file = nullptr;
return false;
}
std::fseek(m_blob_file, 0, SEEK_END);
const u32 blob_file_size = static_cast<u32>(std::ftell(m_blob_file));
for (;;)
{
CacheIndexEntry entry;
if (std::fread(&entry, sizeof(entry), 1, m_index_file) != 1 ||
(entry.file_offset + entry.blob_size) > blob_file_size)
{
if (std::feof(m_index_file))
break;
Log_ErrorPrintf("Failed to read entry from '%s', corrupt file?", index_filename.c_str());
m_index.clear();
std::fclose(m_blob_file);
m_blob_file = nullptr;
std::fclose(m_index_file);
m_index_file = nullptr;
return false;
}
const CacheIndexKey key{entry.source_hash_low, entry.source_hash_high, entry.source_length,
static_cast<ShaderCompiler::Type>(entry.shader_type)};
const CacheIndexData data{entry.file_offset, entry.blob_size};
m_index.emplace(key, data);
}
// ensure we don't write before seeking
std::fseek(m_index_file, 0, SEEK_END);
Log_InfoPrintf("Read %zu entries from '%s'", m_index.size(), index_filename.c_str());
return true;
}
void ShaderCache::CloseShaderCache()
{
if (m_index_file)
{
std::fclose(m_index_file);
m_index_file = nullptr;
}
if (m_blob_file)
{
std::fclose(m_blob_file);
m_blob_file = nullptr;
}
}
bool ShaderCache::CreateNewPipelineCache()
{
if (FileSystem::FileExists(m_pipeline_cache_filename.c_str()))
{
Log_WarningPrintf("Removing existing pipeline cache '%s'", m_pipeline_cache_filename.c_str());
FileSystem::DeleteFile(m_pipeline_cache_filename.c_str());
}
const VkPipelineCacheCreateInfo ci{VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO, nullptr, 0, 0, nullptr};
VkResult res = vkCreatePipelineCache(g_vulkan_context->GetDevice(), &ci, nullptr, &m_pipeline_cache);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkCreatePipelineCache() failed: ");
return false;
}
m_pipeline_cache_dirty = true;
return true;
}
bool ShaderCache::ReadExistingPipelineCache()
{
std::optional<std::vector<u8>> data = FileSystem::ReadBinaryFile(m_pipeline_cache_filename.c_str());
if (!data.has_value())
return false;
if (data->size() < sizeof(VK_PIPELINE_CACHE_HEADER))
{
Log_ErrorPrintf("Pipeline cache at '%s' is too small", m_pipeline_cache_filename.c_str());
return false;
}
VK_PIPELINE_CACHE_HEADER header;
std::memcpy(&header, data->data(), sizeof(header));
if (!ValidatePipelineCacheHeader(header))
return false;
const VkPipelineCacheCreateInfo ci{VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO, nullptr, 0, data->size(),
data->data()};
VkResult res = vkCreatePipelineCache(g_vulkan_context->GetDevice(), &ci, nullptr, &m_pipeline_cache);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkCreatePipelineCache() failed: ");
return false;
}
return true;
}
bool ShaderCache::FlushPipelineCache()
{
if (m_pipeline_cache == VK_NULL_HANDLE || !m_pipeline_cache_dirty)
return false;
size_t data_size;
VkResult res = vkGetPipelineCacheData(g_vulkan_context->GetDevice(), m_pipeline_cache, &data_size, nullptr);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkGetPipelineCacheData() failed: ");
return false;
}
std::vector<u8> data(data_size);
res = vkGetPipelineCacheData(g_vulkan_context->GetDevice(), m_pipeline_cache, &data_size, data.data());
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkGetPipelineCacheData() (2) failed: ");
return false;
}
data.resize(data_size);
// Save disk writes if it hasn't changed, think of the poor SSDs.
std::optional<std::vector<u8>> existing_data = FileSystem::ReadBinaryFile(m_pipeline_cache_filename.c_str());
if (!existing_data.has_value() || existing_data->size() != data_size ||
std::memcmp(existing_data->data(), data.data(), data_size) != 0)
{
Log_InfoPrintf("Writing %zu bytes to '%s'", data_size, m_pipeline_cache_filename.c_str());
if (!FileSystem::WriteBinaryFile(m_pipeline_cache_filename.c_str(), data.data(), data.size()))
{
Log_ErrorPrintf("Failed to write pipeline cache to '%s'", m_pipeline_cache_filename.c_str());
return false;
}
}
else
{
Log_WarningPrintf("Skipping updating pipeline cache '%s' due to no changes.", m_pipeline_cache_filename.c_str());
}
m_pipeline_cache_dirty = false;
return true;
}
void ShaderCache::ClosePipelineCache()
{
if (m_pipeline_cache == VK_NULL_HANDLE)
return;
vkDestroyPipelineCache(g_vulkan_context->GetDevice(), m_pipeline_cache, nullptr);
m_pipeline_cache = VK_NULL_HANDLE;
}
std::string ShaderCache::GetShaderCacheBaseFileName(const std::string_view& base_path, bool debug)
{
std::string base_filename(base_path);
base_filename += FS_OSPATH_SEPERATOR_CHARACTER;
base_filename += "vulkan_shaders";
if (debug)
base_filename += "_debug";
return base_filename;
}
std::string ShaderCache::GetPipelineCacheBaseFileName(const std::string_view& base_path, bool debug)
{
std::string base_filename(base_path);
base_filename += FS_OSPATH_SEPERATOR_CHARACTER;
base_filename += "vulkan_pipelines";
if (debug)
base_filename += "_debug";
base_filename += ".bin";
return base_filename;
}
ShaderCache::CacheIndexKey ShaderCache::GetCacheKey(ShaderCompiler::Type type, const std::string_view& shader_code)
{
union
{
struct
{
u64 hash_low;
u64 hash_high;
};
u8 hash[16];
};
MD5Digest digest;
digest.Update(shader_code.data(), static_cast<u32>(shader_code.length()));
digest.Final(hash);
return CacheIndexKey{hash_low, hash_high, static_cast<u32>(shader_code.length()), type};
}
std::optional<ShaderCompiler::SPIRVCodeVector> ShaderCache::GetShaderSPV(ShaderCompiler::Type type,
std::string_view shader_code)
{
const auto key = GetCacheKey(type, shader_code);
auto iter = m_index.find(key);
if (iter == m_index.end())
return CompileAndAddShaderSPV(key, shader_code);
SPIRVCodeVector spv(iter->second.blob_size);
if (std::fseek(m_blob_file, iter->second.file_offset, SEEK_SET) != 0 ||
std::fread(spv.data(), sizeof(SPIRVCodeType), iter->second.blob_size, m_blob_file) != iter->second.blob_size)
{
Log_ErrorPrintf("Read blob from file failed, recompiling");
return ShaderCompiler::CompileShader(type, shader_code, m_debug);
}
return spv;
}
VkShaderModule ShaderCache::GetShaderModule(ShaderCompiler::Type type, std::string_view shader_code)
{
std::optional<SPIRVCodeVector> spv = GetShaderSPV(type, shader_code);
if (!spv.has_value())
return VK_NULL_HANDLE;
const VkShaderModuleCreateInfo ci{VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO, nullptr, 0,
spv->size() * sizeof(SPIRVCodeType), spv->data()};
VkShaderModule mod;
VkResult res = vkCreateShaderModule(g_vulkan_context->GetDevice(), &ci, nullptr, &mod);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkCreateShaderModule() failed: ");
return VK_NULL_HANDLE;
}
return mod;
}
VkShaderModule ShaderCache::GetVertexShader(std::string_view shader_code)
{
return GetShaderModule(ShaderCompiler::Type::Vertex, std::move(shader_code));
}
VkShaderModule ShaderCache::GetGeometryShader(std::string_view shader_code)
{
return GetShaderModule(ShaderCompiler::Type::Geometry, std::move(shader_code));
}
VkShaderModule ShaderCache::GetFragmentShader(std::string_view shader_code)
{
return GetShaderModule(ShaderCompiler::Type::Fragment, std::move(shader_code));
}
VkShaderModule ShaderCache::GetComputeShader(std::string_view shader_code)
{
return GetShaderModule(ShaderCompiler::Type::Compute, std::move(shader_code));
}
std::optional<ShaderCompiler::SPIRVCodeVector> ShaderCache::CompileAndAddShaderSPV(const CacheIndexKey& key,
std::string_view shader_code)
{
std::optional<SPIRVCodeVector> spv = ShaderCompiler::CompileShader(key.shader_type, shader_code, m_debug);
if (!spv.has_value())
return {};
if (!m_blob_file || std::fseek(m_blob_file, 0, SEEK_END) != 0)
return spv;
CacheIndexData data;
data.file_offset = static_cast<u32>(std::ftell(m_blob_file));
data.blob_size = static_cast<u32>(spv->size());
CacheIndexEntry entry = {};
entry.source_hash_low = key.source_hash_low;
entry.source_hash_high = key.source_hash_high;
entry.source_length = key.source_length;
entry.shader_type = static_cast<u32>(key.shader_type);
entry.blob_size = data.blob_size;
entry.file_offset = data.file_offset;
if (std::fwrite(spv->data(), sizeof(SPIRVCodeType), entry.blob_size, m_blob_file) != entry.blob_size ||
std::fflush(m_blob_file) != 0 || std::fwrite(&entry, sizeof(entry), 1, m_index_file) != 1 ||
std::fflush(m_index_file) != 0)
{
Log_ErrorPrintf("Failed to write shader blob to file");
return spv;
}
m_index.emplace(key, data);
return spv;
}
} // namespace Vulkan

View file

@ -0,0 +1,100 @@
#pragma once
#include "../hash_combine.h"
#include "../types.h"
#include "shader_compiler.h"
#include "vulkan_loader.h"
#include <cstdio>
#include <optional>
#include <string_view>
#include <unordered_map>
#include <vector>
namespace Vulkan {
class ShaderCache
{
public:
~ShaderCache();
static void Create(std::string_view base_path, bool debug);
static void Destroy();
/// Returns a handle to the pipeline cache. Set set_dirty to true if you are planning on writing to it externally.
VkPipelineCache GetPipelineCache(bool set_dirty = true);
/// Writes pipeline cache to file, saving all newly compiled pipelines.
bool FlushPipelineCache();
std::optional<ShaderCompiler::SPIRVCodeVector> GetShaderSPV(ShaderCompiler::Type type, std::string_view shader_code);
VkShaderModule GetShaderModule(ShaderCompiler::Type type, std::string_view shader_code);
VkShaderModule GetVertexShader(std::string_view shader_code);
VkShaderModule GetGeometryShader(std::string_view shader_code);
VkShaderModule GetFragmentShader(std::string_view shader_code);
VkShaderModule GetComputeShader(std::string_view shader_code);
private:
static constexpr u32 FILE_VERSION = 1;
struct CacheIndexKey
{
u64 source_hash_low;
u64 source_hash_high;
u32 source_length;
ShaderCompiler::Type shader_type;
bool operator==(const CacheIndexKey& key) const;
bool operator!=(const CacheIndexKey& key) const;
};
struct CacheIndexEntryHasher
{
std::size_t operator()(const CacheIndexKey& e) const noexcept
{
std::size_t h = 0;
hash_combine(h, e.source_hash_low, e.source_hash_high, e.source_length, e.shader_type);
return h;
}
};
struct CacheIndexData
{
u32 file_offset;
u32 blob_size;
};
using CacheIndex = std::unordered_map<CacheIndexKey, CacheIndexData, CacheIndexEntryHasher>;
ShaderCache();
static std::string GetShaderCacheBaseFileName(const std::string_view& base_path, bool debug);
static std::string GetPipelineCacheBaseFileName(const std::string_view& base_path, bool debug);
static CacheIndexKey GetCacheKey(ShaderCompiler::Type type, const std::string_view& shader_code);
void Open(std::string_view base_path, bool debug);
bool CreateNewShaderCache(const std::string& index_filename, const std::string& blob_filename);
bool ReadExistingShaderCache(const std::string& index_filename, const std::string& blob_filename);
void CloseShaderCache();
bool CreateNewPipelineCache();
bool ReadExistingPipelineCache();
void ClosePipelineCache();
std::optional<ShaderCompiler::SPIRVCodeVector> CompileAndAddShaderSPV(const CacheIndexKey& key,
std::string_view shader_code);
std::FILE* m_index_file = nullptr;
std::FILE* m_blob_file = nullptr;
std::string m_pipeline_cache_filename;
CacheIndex m_index;
VkPipelineCache m_pipeline_cache = VK_NULL_HANDLE;
bool m_debug = false;
bool m_pipeline_cache_dirty = false;
};
} // namespace Vulkan
extern std::unique_ptr<Vulkan::ShaderCache> g_vulkan_shader_cache;

View file

@ -0,0 +1,173 @@
// Copyright 2016 Dolphin Emulator Project
// Copyright 2020 DuckStation Emulator Project
// Licensed under GPLv2+
// Refer to the LICENSE file included.
#include "shader_compiler.h"
#include "../assert.h"
#include "../log.h"
#include "../string_util.h"
#include "util.h"
#include <cstring>
#include <fstream>
#include <memory>
Log_SetChannel(Vulkan::ShaderCompiler);
// glslang includes
#include "SPIRV/GlslangToSpv.h"
#include "StandAlone/ResourceLimits.h"
#include "glslang/Public/ShaderLang.h"
namespace Vulkan::ShaderCompiler {
// Registers itself for cleanup via atexit
bool InitializeGlslang();
static unsigned s_next_bad_shader_id = 1;
static std::optional<SPIRVCodeVector> CompileShaderToSPV(EShLanguage stage, const char* stage_filename,
std::string_view source)
{
if (!InitializeGlslang())
return std::nullopt;
std::unique_ptr<glslang::TShader> shader = std::make_unique<glslang::TShader>(stage);
std::unique_ptr<glslang::TProgram> program;
glslang::TShader::ForbidIncluder includer;
EProfile profile = ECoreProfile;
EShMessages messages = static_cast<EShMessages>(EShMsgDefault | EShMsgSpvRules | EShMsgVulkanRules);
int default_version = 450;
std::string full_source_code;
const char* pass_source_code = source.data();
int pass_source_code_length = static_cast<int>(source.size());
shader->setStringsWithLengths(&pass_source_code, &pass_source_code_length, 1);
auto DumpBadShader = [&](const char* msg) {
std::string filename = StringUtil::StdStringFromFormat("bad_shader_%u.txt", s_next_bad_shader_id++);
Log::Writef("Vulkan", "CompileShaderToSPV", LOGLEVEL_ERROR, "%s, writing to %s", msg, filename.c_str());
std::ofstream ofs(filename.c_str(), std::ofstream::out | std::ofstream::binary);
if (ofs.is_open())
{
ofs << source;
ofs << "\n";
ofs << msg << std::endl;
ofs << "Shader Info Log:" << std::endl;
ofs << shader->getInfoLog() << std::endl;
ofs << shader->getInfoDebugLog() << std::endl;
if (program)
{
ofs << "Program Info Log:" << std::endl;
ofs << program->getInfoLog() << std::endl;
ofs << program->getInfoDebugLog() << std::endl;
}
ofs.close();
}
};
if (!shader->parse(&glslang::DefaultTBuiltInResource, default_version, profile, false, true, messages, includer))
{
DumpBadShader("Failed to parse shader");
return std::nullopt;
}
// Even though there's only a single shader, we still need to link it to generate SPV
program = std::make_unique<glslang::TProgram>();
program->addShader(shader.get());
if (!program->link(messages))
{
DumpBadShader("Failed to link program");
return std::nullopt;
}
glslang::TIntermediate* intermediate = program->getIntermediate(stage);
if (!intermediate)
{
DumpBadShader("Failed to generate SPIR-V");
return std::nullopt;
}
SPIRVCodeVector out_code;
spv::SpvBuildLogger logger;
glslang::GlslangToSpv(*intermediate, out_code, &logger);
// Write out messages
// Temporary: skip if it contains "Warning, version 450 is not yet complete; most version-specific
// features are present, but some are missing."
if (std::strlen(shader->getInfoLog()) > 108)
Log_WarningPrintf("Shader info log: %s", shader->getInfoLog());
if (std::strlen(shader->getInfoDebugLog()) > 0)
Log_WarningPrintf("Shader debug info log: %s", shader->getInfoDebugLog());
if (std::strlen(program->getInfoLog()) > 25)
Log_WarningPrintf("Program info log: %s", program->getInfoLog());
if (std::strlen(program->getInfoDebugLog()) > 0)
Log_WarningPrintf("Program debug info log: %s", program->getInfoDebugLog());
std::string spv_messages = logger.getAllMessages();
if (!spv_messages.empty())
Log_WarningPrintf("SPIR-V conversion messages: %s", spv_messages.c_str());
return out_code;
}
bool InitializeGlslang()
{
static bool glslang_initialized = false;
if (glslang_initialized)
return true;
if (!glslang::InitializeProcess())
{
Panic("Failed to initialize glslang shader compiler");
return false;
}
std::atexit([]() { glslang::FinalizeProcess(); });
glslang_initialized = true;
return true;
}
std::optional<SPIRVCodeVector> CompileVertexShader(std::string_view source_code)
{
return CompileShaderToSPV(EShLangVertex, "vs", source_code);
}
std::optional<SPIRVCodeVector> CompileGeometryShader(std::string_view source_code)
{
return CompileShaderToSPV(EShLangGeometry, "gs", source_code);
}
std::optional<SPIRVCodeVector> CompileFragmentShader(std::string_view source_code)
{
return CompileShaderToSPV(EShLangFragment, "ps", source_code);
}
std::optional<SPIRVCodeVector> CompileComputeShader(std::string_view source_code)
{
return CompileShaderToSPV(EShLangCompute, "cs", source_code);
}
std::optional<ShaderCompiler::SPIRVCodeVector> CompileShader(Type type, std::string_view source_code, bool debug)
{
switch (type)
{
case Type::Vertex:
return CompileShaderToSPV(EShLangVertex, "vs", source_code);
case Type::Geometry:
return CompileShaderToSPV(EShLangGeometry, "gs", source_code);
case Type::Fragment:
return CompileShaderToSPV(EShLangFragment, "ps", source_code);
case Type::Compute:
return CompileShaderToSPV(EShLangCompute, "cs", source_code);
default:
return std::nullopt;
}
}
} // namespace Vulkan::ShaderCompiler

View file

@ -0,0 +1,42 @@
// Copyright 2016 Dolphin Emulator Project
// Copyright 2020 DuckStation Emulator Project
// Licensed under GPLv2+
// Refer to the LICENSE file included.
#pragma once
#include "../types.h"
#include <optional>
#include <string_view>
#include <vector>
namespace Vulkan::ShaderCompiler {
// Shader types
enum class Type
{
Vertex,
Geometry,
Fragment,
Compute
};
// SPIR-V compiled code type
using SPIRVCodeType = u32;
using SPIRVCodeVector = std::vector<SPIRVCodeType>;
// Compile a vertex shader to SPIR-V.
std::optional<SPIRVCodeVector> CompileVertexShader(std::string_view source_code);
// Compile a geometry shader to SPIR-V.
std::optional<SPIRVCodeVector> CompileGeometryShader(std::string_view source_code);
// Compile a fragment shader to SPIR-V.
std::optional<SPIRVCodeVector> CompileFragmentShader(std::string_view source_code);
// Compile a compute shader to SPIR-V.
std::optional<SPIRVCodeVector> CompileComputeShader(std::string_view source_code);
std::optional<SPIRVCodeVector> CompileShader(Type type, std::string_view source_code, bool debug);
} // namespace Vulkan::ShaderCompiler

View file

@ -0,0 +1,253 @@
// Copyright 2016 Dolphin Emulator Project
// Copyright 2020 DuckStation Emulator Project
// Licensed under GPLv2+
// Refer to the LICENSE file included.
#include "staging_buffer.h"
#include "../assert.h"
#include "context.h"
#include "util.h"
namespace Vulkan {
StagingBuffer::StagingBuffer() = default;
StagingBuffer::StagingBuffer(StagingBuffer&& move)
: m_type(move.m_type), m_buffer(move.m_buffer), m_memory(move.m_memory), m_size(move.m_size),
m_coherent(move.m_coherent), m_map_pointer(move.m_map_pointer), m_map_offset(move.m_map_offset),
m_map_size(move.m_map_size)
{
move.m_type = Type::Upload;
move.m_buffer = VK_NULL_HANDLE;
move.m_memory = VK_NULL_HANDLE;
move.m_size = 0;
move.m_coherent = false;
move.m_map_pointer = nullptr;
move.m_map_offset = 0;
move.m_map_size = 0;
}
StagingBuffer::~StagingBuffer()
{
if (IsValid())
Destroy(true);
}
StagingBuffer& StagingBuffer::operator=(StagingBuffer&& move)
{
if (IsValid())
Destroy(true);
std::swap(m_type, move.m_type);
std::swap(m_buffer, move.m_buffer);
std::swap(m_memory, move.m_memory);
std::swap(m_size, move.m_size);
std::swap(m_coherent, move.m_coherent);
std::swap(m_map_pointer, move.m_map_pointer);
std::swap(m_map_offset, move.m_map_offset);
std::swap(m_map_size, move.m_map_size);
return *this;
}
bool StagingBuffer::Map(VkDeviceSize offset, VkDeviceSize size)
{
m_map_offset = offset;
if (size == VK_WHOLE_SIZE)
m_map_size = m_size - offset;
else
m_map_size = size;
Assert(!m_map_pointer);
Assert(m_map_offset + m_map_size <= m_size);
void* map_pointer;
VkResult res = vkMapMemory(g_vulkan_context->GetDevice(), m_memory, m_map_offset, m_map_size, 0, &map_pointer);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkMapMemory failed: ");
return false;
}
m_map_pointer = reinterpret_cast<char*>(map_pointer);
return true;
}
void StagingBuffer::Unmap()
{
Assert(m_map_pointer);
vkUnmapMemory(g_vulkan_context->GetDevice(), m_memory);
m_map_pointer = nullptr;
m_map_offset = 0;
m_map_size = 0;
}
void StagingBuffer::FlushCPUCache(VkDeviceSize offset, VkDeviceSize size)
{
Assert(offset >= m_map_offset);
if (m_coherent)
return;
VkMappedMemoryRange range = {VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE, nullptr, m_memory, offset - m_map_offset, size};
vkFlushMappedMemoryRanges(g_vulkan_context->GetDevice(), 1, &range);
}
void StagingBuffer::InvalidateGPUCache(VkCommandBuffer command_buffer, VkAccessFlagBits dest_access_flags,
VkPipelineStageFlagBits dest_pipeline_stage, VkDeviceSize offset,
VkDeviceSize size)
{
if (m_coherent)
return;
Assert((offset + size) <= m_size || (offset < m_size && size == VK_WHOLE_SIZE));
Util::BufferMemoryBarrier(command_buffer, m_buffer, VK_ACCESS_HOST_WRITE_BIT, dest_access_flags, offset, size,
VK_PIPELINE_STAGE_HOST_BIT, dest_pipeline_stage);
}
void StagingBuffer::PrepareForGPUWrite(VkCommandBuffer command_buffer, VkAccessFlagBits dst_access_flags,
VkPipelineStageFlagBits dst_pipeline_stage, VkDeviceSize offset,
VkDeviceSize size)
{
if (m_coherent)
return;
Assert((offset + size) <= m_size || (offset < m_size && size == VK_WHOLE_SIZE));
Util::BufferMemoryBarrier(command_buffer, m_buffer, 0, dst_access_flags, offset, size,
VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, dst_pipeline_stage);
}
void StagingBuffer::FlushGPUCache(VkCommandBuffer command_buffer, VkAccessFlagBits src_access_flags,
VkPipelineStageFlagBits src_pipeline_stage, VkDeviceSize offset, VkDeviceSize size)
{
if (m_coherent)
return;
Assert((offset + size) <= m_size || (offset < m_size && size == VK_WHOLE_SIZE));
Util::BufferMemoryBarrier(command_buffer, m_buffer, src_access_flags, VK_ACCESS_HOST_READ_BIT, offset, size,
src_pipeline_stage, VK_PIPELINE_STAGE_HOST_BIT);
}
void StagingBuffer::InvalidateCPUCache(VkDeviceSize offset, VkDeviceSize size)
{
Assert(offset >= m_map_offset);
if (m_coherent)
return;
VkMappedMemoryRange range = {VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE, nullptr, m_memory, offset - m_map_offset, size};
vkInvalidateMappedMemoryRanges(g_vulkan_context->GetDevice(), 1, &range);
}
void StagingBuffer::Read(VkDeviceSize offset, void* data, size_t size, bool invalidate_caches)
{
Assert((offset + size) <= m_size);
Assert(offset >= m_map_offset && size <= (m_map_size + (offset - m_map_offset)));
if (invalidate_caches)
InvalidateCPUCache(offset, size);
memcpy(data, m_map_pointer + (offset - m_map_offset), size);
}
void StagingBuffer::Write(VkDeviceSize offset, const void* data, size_t size, bool invalidate_caches)
{
Assert((offset + size) <= m_size);
Assert(offset >= m_map_offset && size <= (m_map_size + (offset - m_map_offset)));
memcpy(m_map_pointer + (offset - m_map_offset), data, size);
if (invalidate_caches)
FlushCPUCache(offset, size);
}
bool StagingBuffer::AllocateBuffer(Type type, VkDeviceSize size, VkBufferUsageFlags usage, VkBuffer* out_buffer,
VkDeviceMemory* out_memory, bool* out_coherent)
{
VkBufferCreateInfo buffer_create_info = {
VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, // VkStructureType sType
nullptr, // const void* pNext
0, // VkBufferCreateFlags flags
size, // VkDeviceSize size
usage, // VkBufferUsageFlags usage
VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode sharingMode
0, // uint32_t queueFamilyIndexCount
nullptr // const uint32_t* pQueueFamilyIndices
};
VkResult res = vkCreateBuffer(g_vulkan_context->GetDevice(), &buffer_create_info, nullptr, out_buffer);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkCreateBuffer failed: ");
return false;
}
VkMemoryRequirements requirements;
vkGetBufferMemoryRequirements(g_vulkan_context->GetDevice(), *out_buffer, &requirements);
u32 type_index;
if (type == Type::Upload)
type_index = g_vulkan_context->GetUploadMemoryType(requirements.memoryTypeBits, out_coherent);
else
type_index = g_vulkan_context->GetReadbackMemoryType(requirements.memoryTypeBits, out_coherent);
VkMemoryAllocateInfo memory_allocate_info = {
VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO, // VkStructureType sType
nullptr, // const void* pNext
requirements.size, // VkDeviceSize allocationSize
type_index // uint32_t memoryTypeIndex
};
res = vkAllocateMemory(g_vulkan_context->GetDevice(), &memory_allocate_info, nullptr, out_memory);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkAllocateMemory failed: ");
vkDestroyBuffer(g_vulkan_context->GetDevice(), *out_buffer, nullptr);
return false;
}
res = vkBindBufferMemory(g_vulkan_context->GetDevice(), *out_buffer, *out_memory, 0);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkBindBufferMemory failed: ");
vkDestroyBuffer(g_vulkan_context->GetDevice(), *out_buffer, nullptr);
vkFreeMemory(g_vulkan_context->GetDevice(), *out_memory, nullptr);
return false;
}
return true;
}
bool StagingBuffer::Create(Type type, VkDeviceSize size, VkBufferUsageFlags usage)
{
if (!AllocateBuffer(type, size, usage, &m_buffer, &m_memory, &m_coherent))
return false;
m_type = type;
m_size = size;
return true;
}
void StagingBuffer::Destroy(bool defer /* = true */)
{
if (!IsValid())
return;
// Unmap before destroying
if (m_map_pointer)
Unmap();
if (defer)
g_vulkan_context->DeferBufferDestruction(m_buffer);
else
vkDestroyBuffer(g_vulkan_context->GetDevice(), m_buffer, nullptr);
if (defer)
g_vulkan_context->DeferDeviceMemoryDestruction(m_memory);
else
vkFreeMemory(g_vulkan_context->GetDevice(), m_memory, nullptr);
m_type = Type::Upload;
m_buffer = VK_NULL_HANDLE;
m_memory = VK_NULL_HANDLE;
m_size = 0;
m_coherent = false;
m_map_pointer = nullptr;
m_map_offset = 0;
m_map_size = 0;
}
} // namespace Vulkan

View file

@ -0,0 +1,90 @@
// Copyright 2016 Dolphin Emulator Project
// Copyright 2020 DuckStation Emulator Project
// Licensed under GPLv2+
// Refer to the LICENSE file included.
#pragma once
#include "../types.h"
#include "vulkan_loader.h"
#include <memory>
namespace Vulkan {
class StagingBuffer
{
public:
enum class Type
{
Upload,
Readback,
Mutable
};
StagingBuffer();
StagingBuffer(StagingBuffer&& move);
StagingBuffer(const StagingBuffer&) = delete;
virtual ~StagingBuffer();
StagingBuffer& operator=(StagingBuffer&& move);
StagingBuffer& operator=(const StagingBuffer&) = delete;
ALWAYS_INLINE Type GetType() const { return m_type; }
ALWAYS_INLINE VkDeviceSize GetSize() const { return m_size; }
ALWAYS_INLINE VkBuffer GetBuffer() const { return m_buffer; }
ALWAYS_INLINE bool IsMapped() const { return m_map_pointer != nullptr; }
ALWAYS_INLINE const char* GetMapPointer() const { return m_map_pointer; }
ALWAYS_INLINE char* GetMapPointer() { return m_map_pointer; }
ALWAYS_INLINE VkDeviceSize GetMapOffset() const { return m_map_offset; }
ALWAYS_INLINE VkDeviceSize GetMapSize() const { return m_map_size; }
ALWAYS_INLINE bool IsValid() const { return (m_buffer != VK_NULL_HANDLE); }
bool Map(VkDeviceSize offset = 0, VkDeviceSize size = VK_WHOLE_SIZE);
void Unmap();
// Upload part 1: Prepare from device read from the CPU side
void FlushCPUCache(VkDeviceSize offset = 0, VkDeviceSize size = VK_WHOLE_SIZE);
// Upload part 2: Prepare for device read from the GPU side
// Implicit when submitting the command buffer, so rarely needed.
void InvalidateGPUCache(VkCommandBuffer command_buffer, VkAccessFlagBits dst_access_flags,
VkPipelineStageFlagBits dst_pipeline_stage, VkDeviceSize offset = 0,
VkDeviceSize size = VK_WHOLE_SIZE);
// Readback part 0: Prepare for GPU usage (if necessary)
void PrepareForGPUWrite(VkCommandBuffer command_buffer, VkAccessFlagBits dst_access_flags,
VkPipelineStageFlagBits dst_pipeline_stage, VkDeviceSize offset = 0,
VkDeviceSize size = VK_WHOLE_SIZE);
// Readback part 1: Prepare for host readback from the GPU side
void FlushGPUCache(VkCommandBuffer command_buffer, VkAccessFlagBits src_access_flags,
VkPipelineStageFlagBits src_pipeline_stage, VkDeviceSize offset = 0,
VkDeviceSize size = VK_WHOLE_SIZE);
// Readback part 2: Prepare for host readback from the CPU side
void InvalidateCPUCache(VkDeviceSize offset = 0, VkDeviceSize size = VK_WHOLE_SIZE);
// offset is from the start of the buffer, not from the map offset
void Read(VkDeviceSize offset, void* data, size_t size, bool invalidate_caches = true);
void Write(VkDeviceSize offset, const void* data, size_t size, bool invalidate_caches = true);
// Creates the optimal format of image copy.
bool Create(Type type, VkDeviceSize size, VkBufferUsageFlags usage);
void Destroy(bool defer = true);
// Allocates the resources needed to create a staging buffer.
static bool AllocateBuffer(Type type, VkDeviceSize size, VkBufferUsageFlags usage, VkBuffer* out_buffer,
VkDeviceMemory* out_memory, bool* out_coherent);
protected:
Type m_type = Type::Upload;
VkBuffer m_buffer = VK_NULL_HANDLE;
VkDeviceMemory m_memory = VK_NULL_HANDLE;
VkDeviceSize m_size = 0;
bool m_coherent = false;
char* m_map_pointer = nullptr;
VkDeviceSize m_map_offset = 0;
VkDeviceSize m_map_size = 0;
};
} // namespace Vulkan

View file

@ -0,0 +1,301 @@
// Copyright 2016 Dolphin Emulator Project
// Copyright 2020 DuckStation Emulator Project
// Licensed under GPLv2+
// Refer to the LICENSE file included.
#include "staging_texture.h"
#include "../assert.h"
#include "context.h"
#include "util.h"
namespace Vulkan {
StagingTexture::StagingTexture() = default;
StagingTexture::StagingTexture(StagingTexture&& move)
: m_staging_buffer(std::move(move.m_staging_buffer)), m_flush_fence_counter(move.m_flush_fence_counter),
m_width(move.m_width), m_height(move.m_height), m_texel_size(move.m_texel_size), m_map_stride(move.m_map_stride)
{
move.m_flush_fence_counter = 0;
move.m_width = 0;
move.m_height = 0;
move.m_texel_size = 0;
move.m_map_stride = 0;
}
StagingTexture& StagingTexture::operator=(StagingTexture&& move)
{
if (IsValid())
Destroy(true);
std::swap(m_staging_buffer, move.m_staging_buffer);
std::swap(m_flush_fence_counter, move.m_flush_fence_counter);
std::swap(m_width, move.m_width);
std::swap(m_height, move.m_height);
std::swap(m_texel_size, move.m_texel_size);
std::swap(m_map_stride, move.m_map_stride);
return *this;
}
StagingTexture::~StagingTexture()
{
if (IsValid())
Destroy(true);
}
bool StagingTexture::Create(StagingBuffer::Type type, VkFormat format, u32 width, u32 height)
{
const u32 texel_size = Util::GetTexelSize(format);
const u32 map_stride = texel_size * width;
const u32 buffer_size = map_stride * height;
VkBufferUsageFlags usage_flags;
switch (type)
{
case StagingBuffer::Type::Readback:
usage_flags = VK_BUFFER_USAGE_TRANSFER_DST_BIT;
break;
case StagingBuffer::Type::Upload:
usage_flags = VK_BUFFER_USAGE_TRANSFER_SRC_BIT;
break;
case StagingBuffer::Type::Mutable:
default:
usage_flags = VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT;
break;
}
StagingBuffer new_buffer;
if (!new_buffer.Create(type, buffer_size, usage_flags) || !new_buffer.Map())
return false;
if (IsValid())
Destroy(true);
m_staging_buffer = std::move(new_buffer);
m_width = width;
m_height = height;
m_texel_size = texel_size;
m_map_stride = map_stride;
return true;
}
void StagingTexture::Destroy(bool defer /* = true */)
{
if (!IsValid())
return;
m_staging_buffer.Destroy(defer);
m_flush_fence_counter = 0;
m_width = 0;
m_height = 0;
m_texel_size = 0;
m_map_stride = 0;
}
void StagingTexture::CopyFromTexture(VkCommandBuffer command_buffer, Texture& src_texture, u32 src_x, u32 src_y,
u32 src_layer, u32 src_level, u32 dst_x, u32 dst_y, u32 width, u32 height)
{
Assert(m_staging_buffer.GetType() == StagingBuffer::Type::Readback ||
m_staging_buffer.GetType() == StagingBuffer::Type::Mutable);
Assert((src_x + width) <= src_texture.GetWidth() && (src_y + height) <= src_texture.GetHeight());
Assert((dst_x + width) <= m_width && (dst_y + height) <= m_height);
VkImageLayout old_layout = src_texture.GetLayout();
src_texture.TransitionToLayout(command_buffer, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL);
// Issue the image->buffer copy, but delay it for now.
VkBufferImageCopy image_copy = {};
const VkImageAspectFlags aspect =
Util ::IsDepthFormat(src_texture.GetFormat()) ? VK_IMAGE_ASPECT_DEPTH_BIT : VK_IMAGE_ASPECT_COLOR_BIT;
image_copy.bufferOffset = static_cast<VkDeviceSize>(dst_y * m_map_stride + dst_x * m_texel_size);
image_copy.bufferRowLength = m_width;
image_copy.bufferImageHeight = 0;
image_copy.imageSubresource = {aspect, src_level, src_layer, 1};
image_copy.imageOffset = {static_cast<int32_t>(src_x), static_cast<int32_t>(src_y), 0};
image_copy.imageExtent = {width, height, 1u};
vkCmdCopyImageToBuffer(command_buffer, src_texture.GetImage(), VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
m_staging_buffer.GetBuffer(), 1, &image_copy);
// Restore old source texture layout.
src_texture.TransitionToLayout(command_buffer, old_layout);
}
void StagingTexture::CopyFromTexture(Texture& src_texture, u32 src_x, u32 src_y, u32 src_layer, u32 src_level,
u32 dst_x, u32 dst_y, u32 width, u32 height)
{
CopyFromTexture(g_vulkan_context->GetCurrentCommandBuffer(), src_texture, src_x, src_y, src_layer, src_level, dst_x,
dst_y, width, height);
m_needs_flush = true;
m_flush_fence_counter = g_vulkan_context->GetCurrentFenceCounter();
}
void StagingTexture::CopyToTexture(VkCommandBuffer command_buffer, u32 src_x, u32 src_y, Texture& dst_texture,
u32 dst_x, u32 dst_y, u32 dst_layer, u32 dst_level, u32 width, u32 height)
{
Assert(m_staging_buffer.GetType() == StagingBuffer::Type::Upload ||
m_staging_buffer.GetType() == StagingBuffer::Type::Mutable);
Assert((dst_x + width) <= dst_texture.GetWidth() && (dst_y + height) <= dst_texture.GetHeight());
Assert((src_x + width) <= m_width && (src_y + height) <= m_height);
// Flush caches before copying.
m_staging_buffer.FlushCPUCache();
VkImageLayout old_layout = dst_texture.GetLayout();
dst_texture.TransitionToLayout(command_buffer, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL);
// Issue the image->buffer copy, but delay it for now.
VkBufferImageCopy image_copy = {};
image_copy.bufferOffset = static_cast<VkDeviceSize>(src_y * m_map_stride + src_x * m_texel_size);
image_copy.bufferRowLength = m_width;
image_copy.bufferImageHeight = 0;
image_copy.imageSubresource = {VK_IMAGE_ASPECT_COLOR_BIT, dst_level, dst_layer, 1};
image_copy.imageOffset = {static_cast<int32_t>(dst_x), static_cast<int32_t>(dst_y), 0};
image_copy.imageExtent = {width, height, 1u};
vkCmdCopyBufferToImage(command_buffer, m_staging_buffer.GetBuffer(), dst_texture.GetImage(),
VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &image_copy);
// Restore old source texture layout.
dst_texture.TransitionToLayout(command_buffer, old_layout);
}
void StagingTexture::CopyToTexture(u32 src_x, u32 src_y, Texture& dst_texture, u32 dst_x, u32 dst_y, u32 dst_layer,
u32 dst_level, u32 width, u32 height)
{
CopyToTexture(g_vulkan_context->GetCurrentCommandBuffer(), src_x, src_y, dst_texture, dst_x, dst_y, dst_layer,
dst_level, width, height);
m_needs_flush = true;
m_flush_fence_counter = g_vulkan_context->GetCurrentFenceCounter();
}
bool StagingTexture::Map()
{
return m_staging_buffer.Map();
}
void StagingTexture::Unmap()
{
return m_staging_buffer.Unmap();
}
void StagingTexture::Flush()
{
if (!m_needs_flush)
return;
// Is this copy in the current command buffer?
if (g_vulkan_context->GetCurrentFenceCounter() == m_flush_fence_counter)
{
// Execute the command buffer and wait for it to finish.
g_vulkan_context->ExecuteCommandBuffer(true);
}
else
{
// Wait for the GPU to finish with it.
g_vulkan_context->WaitForFenceCounter(m_flush_fence_counter);
}
// For readback textures, invalidate the CPU cache as there is new data there.
if (m_staging_buffer.GetType() == StagingBuffer::Type::Readback ||
m_staging_buffer.GetType() == StagingBuffer::Type::Mutable)
{
m_staging_buffer.InvalidateCPUCache();
}
m_needs_flush = false;
}
void StagingTexture::ReadTexels(u32 src_x, u32 src_y, u32 width, u32 height, void* out_ptr, u32 out_stride)
{
Assert(m_staging_buffer.GetType() != StagingBuffer::Type::Upload);
if (!PrepareForAccess())
return;
Assert((src_x + width) <= m_width && (src_y + height) <= m_height);
// Offset pointer to point to start of region being copied out.
const char* current_ptr = m_staging_buffer.GetMapPointer();
current_ptr += src_y * m_map_stride;
current_ptr += src_x * m_texel_size;
// Optimal path: same dimensions, same stride.
if (src_x == 0 && width == m_width && m_map_stride == out_stride)
{
std::memcpy(out_ptr, current_ptr, m_map_stride * height);
return;
}
size_t copy_size = std::min<u32>(width * m_texel_size, m_map_stride);
char* dst_ptr = reinterpret_cast<char*>(out_ptr);
for (u32 row = 0; row < height; row++)
{
std::memcpy(dst_ptr, current_ptr, copy_size);
current_ptr += m_map_stride;
dst_ptr += out_stride;
}
}
void StagingTexture::ReadTexel(u32 x, u32 y, void* out_ptr)
{
Assert(m_staging_buffer.GetType() != StagingBuffer::Type::Upload);
if (!PrepareForAccess())
return;
Assert(x < m_width && y < m_height);
const char* src_ptr = GetMappedPointer() + y * GetMappedStride() + x * m_texel_size;
std::memcpy(out_ptr, src_ptr, m_texel_size);
}
void StagingTexture::WriteTexels(u32 dst_x, u32 dst_y, u32 width, u32 height, const void* in_ptr, u32 in_stride)
{
Assert(m_staging_buffer.GetType() != StagingBuffer::Type::Readback);
if (!PrepareForAccess())
return;
Assert((dst_x + width) <= m_width && (dst_y + height) <= m_height);
// Offset pointer to point to start of region being copied to.
char* current_ptr = GetMappedPointer();
current_ptr += dst_y * m_map_stride;
current_ptr += dst_x * m_texel_size;
// Optimal path: same dimensions, same stride.
if (dst_x == 0 && width == m_width && m_map_stride == in_stride)
{
std::memcpy(current_ptr, in_ptr, m_map_stride * height);
return;
}
size_t copy_size = std::min<u32>(width * m_texel_size, m_map_stride);
const char* src_ptr = reinterpret_cast<const char*>(in_ptr);
for (u32 row = 0; row < height; row++)
{
std::memcpy(current_ptr, src_ptr, copy_size);
current_ptr += m_map_stride;
src_ptr += in_stride;
}
}
void StagingTexture::WriteTexel(u32 x, u32 y, const void* in_ptr)
{
if (!PrepareForAccess())
return;
Assert(x < m_width && y < m_height);
char* dest_ptr = GetMappedPointer() + y * m_map_stride + x * m_texel_size;
std::memcpy(dest_ptr, in_ptr, m_texel_size);
}
bool StagingTexture::PrepareForAccess()
{
if (m_needs_flush)
{
if (IsMapped())
Unmap();
Flush();
}
return IsMapped() || Map();
}
} // namespace Vulkan

View file

@ -0,0 +1,87 @@
// Copyright 2016 Dolphin Emulator Project
// Copyright 2020 DuckStation Emulator Project
// Licensed under GPLv2+
// Refer to the LICENSE file included.
#pragma once
#include "staging_buffer.h"
#include "texture.h"
namespace Vulkan {
class StagingTexture final
{
public:
StagingTexture();
StagingTexture(StagingTexture&& move);
StagingTexture(const StagingTexture&) = delete;
~StagingTexture();
StagingTexture& operator=(StagingTexture&& move);
StagingTexture& operator=(const StagingTexture&) = delete;
ALWAYS_INLINE bool IsValid() const { return m_staging_buffer.IsValid(); }
ALWAYS_INLINE bool IsMapped() const { return m_staging_buffer.IsMapped(); }
ALWAYS_INLINE const char* GetMappedPointer() const { return m_staging_buffer.GetMapPointer(); }
ALWAYS_INLINE char* GetMappedPointer() { return m_staging_buffer.GetMapPointer(); }
ALWAYS_INLINE u32 GetMappedStride() const { return m_map_stride; }
ALWAYS_INLINE u32 GetWidth() const { return m_width; }
ALWAYS_INLINE u32 GetHeight() const { return m_height; }
bool Create(StagingBuffer::Type type, VkFormat format, u32 width, u32 height);
void Destroy(bool defer = true);
// Copies from the GPU texture object to the staging texture, which can be mapped/read by the CPU.
// Both src_rect and dst_rect must be with within the bounds of the the specified textures.
void CopyFromTexture(VkCommandBuffer command_buffer, Texture& src_texture, u32 src_x, u32 src_y, u32 src_layer,
u32 src_level, u32 dst_x, u32 dst_y, u32 width, u32 height);
void CopyFromTexture(Texture& src_texture, u32 src_x, u32 src_y, u32 src_layer, u32 src_level, u32 dst_x, u32 dst_y,
u32 width, u32 height);
// Wrapper for copying a whole layer of a texture to a readback texture.
// Assumes that the level of src texture and this texture have the same dimensions.
void CopyToTexture(VkCommandBuffer command_buffer, u32 src_x, u32 src_y, Texture& dst_texture, u32 dst_x, u32 dst_y,
u32 dst_layer, u32 dst_level, u32 width, u32 height);
void CopyToTexture(u32 src_x, u32 src_y, Texture& dst_texture, u32 dst_x, u32 dst_y, u32 dst_layer, u32 dst_level,
u32 width, u32 height);
// Maps the texture into the CPU address space, enabling it to read the contents.
// The Map call may not perform synchronization. If the contents of the staging texture
// has been updated by a CopyFromTexture call, you must call Flush() first.
// If persistent mapping is supported in the backend, this may be a no-op.
bool Map();
// Unmaps the CPU-readable copy of the texture. May be a no-op on backends which
// support persistent-mapped buffers.
void Unmap();
// Flushes pending writes from the CPU to the GPU, and reads from the GPU to the CPU.
// This may cause a command buffer flush depending on if one has occurred between the last
// call to CopyFromTexture()/CopyToTexture() and the Flush() call.
void Flush();
// Reads the specified rectangle from the staging texture to out_ptr, with the specified stride
// (length in bytes of each row). CopyFromTexture must be called first. The contents of any
// texels outside of the rectangle used for CopyFromTexture is undefined.
void ReadTexels(u32 src_x, u32 src_y, u32 width, u32 height, void* out_ptr, u32 out_stride);
void ReadTexel(u32 x, u32 y, void* out_ptr);
// Copies the texels from in_ptr to the staging texture, which can be read by the GPU, with the
// specified stride (length in bytes of each row). After updating the staging texture with all
// changes, call CopyToTexture() to update the GPU copy.
void WriteTexels(u32 dst_x, u32 dst_y, u32 width, u32 height, const void* in_ptr, u32 in_stride);
void WriteTexel(u32 x, u32 y, const void* in_ptr);
private:
bool PrepareForAccess();
StagingBuffer m_staging_buffer;
u64 m_flush_fence_counter = 0;
u32 m_width = 0;
u32 m_height = 0;
u32 m_texel_size = 0;
u32 m_map_stride = 0;
bool m_needs_flush = false;
};
} // namespace Vulkan

View file

@ -0,0 +1,363 @@
// Copyright 2016 Dolphin Emulator Project
// Copyright 2020 DuckStation Emulator Project
// Licensed under GPLv2+
// Refer to the LICENSE file included.
#include "stream_buffer.h"
#include "../align.h"
#include "../assert.h"
#include "../log.h"
#include "context.h"
#include "util.h"
Log_SetChannel(Vulkan::StreamBuffer);
namespace Vulkan {
StreamBuffer::StreamBuffer() = default;
StreamBuffer::StreamBuffer(StreamBuffer&& move)
: m_usage(move.m_usage), m_size(move.m_size), m_current_offset(move.m_current_offset),
m_current_space(move.m_current_space), m_current_gpu_position(move.m_current_gpu_position), m_buffer(move.m_buffer),
m_memory(move.m_memory), m_host_pointer(move.m_host_pointer), m_tracked_fences(std::move(move.m_tracked_fences)),
m_coherent_mapping(move.m_coherent_mapping)
{
}
StreamBuffer::~StreamBuffer()
{
if (IsValid())
Destroy(true);
}
StreamBuffer& StreamBuffer::operator=(StreamBuffer&& move)
{
if (IsValid())
Destroy(true);
std::swap(m_usage, move.m_usage);
std::swap(m_size, move.m_size);
std::swap(m_current_offset, move.m_current_offset);
std::swap(m_current_space, move.m_current_space);
std::swap(m_current_gpu_position, move.m_current_gpu_position);
std::swap(m_buffer, move.m_buffer);
std::swap(m_memory, move.m_memory);
std::swap(m_host_pointer, move.m_host_pointer);
std::swap(m_tracked_fences, move.m_tracked_fences);
std::swap(m_coherent_mapping, move.m_coherent_mapping);
return *this;
}
bool StreamBuffer::Create(VkBufferUsageFlags usage, u32 size)
{
// Create the buffer descriptor
VkBufferCreateInfo buffer_create_info = {
VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, // VkStructureType sType
nullptr, // const void* pNext
0, // VkBufferCreateFlags flags
static_cast<VkDeviceSize>(size), // VkDeviceSize size
usage, // VkBufferUsageFlags usage
VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode sharingMode
0, // uint32_t queueFamilyIndexCount
nullptr // const uint32_t* pQueueFamilyIndices
};
VkBuffer buffer = VK_NULL_HANDLE;
VkResult res = vkCreateBuffer(g_vulkan_context->GetDevice(), &buffer_create_info, nullptr, &buffer);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkCreateBuffer failed: ");
return false;
}
// Get memory requirements (types etc) for this buffer
VkMemoryRequirements memory_requirements;
vkGetBufferMemoryRequirements(g_vulkan_context->GetDevice(), buffer, &memory_requirements);
// Aim for a coherent mapping if possible.
u32 memory_type_index =
g_vulkan_context->GetUploadMemoryType(memory_requirements.memoryTypeBits, &m_coherent_mapping);
// Allocate memory for backing this buffer
VkMemoryAllocateInfo memory_allocate_info = {
VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO, // VkStructureType sType
nullptr, // const void* pNext
memory_requirements.size, // VkDeviceSize allocationSize
memory_type_index // uint32_t memoryTypeIndex
};
VkDeviceMemory memory = VK_NULL_HANDLE;
res = vkAllocateMemory(g_vulkan_context->GetDevice(), &memory_allocate_info, nullptr, &memory);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkAllocateMemory failed: ");
vkDestroyBuffer(g_vulkan_context->GetDevice(), buffer, nullptr);
return false;
}
// Bind memory to buffer
res = vkBindBufferMemory(g_vulkan_context->GetDevice(), buffer, memory, 0);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkBindBufferMemory failed: ");
vkDestroyBuffer(g_vulkan_context->GetDevice(), buffer, nullptr);
vkFreeMemory(g_vulkan_context->GetDevice(), memory, nullptr);
return false;
}
// Map this buffer into user-space
void* mapped_ptr = nullptr;
res = vkMapMemory(g_vulkan_context->GetDevice(), memory, 0, size, 0, &mapped_ptr);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkMapMemory failed: ");
vkDestroyBuffer(g_vulkan_context->GetDevice(), buffer, nullptr);
vkFreeMemory(g_vulkan_context->GetDevice(), memory, nullptr);
return false;
}
// Unmap current host pointer (if there was a previous buffer)
if (m_host_pointer)
vkUnmapMemory(g_vulkan_context->GetDevice(), m_memory);
if (IsValid())
Destroy(true);
// Replace with the new buffer
m_usage = usage;
m_size = size;
m_buffer = buffer;
m_memory = memory;
m_host_pointer = reinterpret_cast<u8*>(mapped_ptr);
m_current_offset = 0;
m_current_gpu_position = 0;
m_tracked_fences.clear();
return true;
}
void StreamBuffer::Destroy(bool defer)
{
if (m_host_pointer)
{
vkUnmapMemory(g_vulkan_context->GetDevice(), m_memory);
m_host_pointer = nullptr;
}
if (m_buffer != VK_NULL_HANDLE)
{
if (defer)
g_vulkan_context->DeferBufferDestruction(m_buffer);
else
vkDestroyBuffer(g_vulkan_context->GetDevice(), m_buffer, nullptr);
m_buffer = VK_NULL_HANDLE;
}
if (m_memory != VK_NULL_HANDLE)
{
if (defer)
g_vulkan_context->DeferDeviceMemoryDestruction(m_memory);
else
vkFreeMemory(g_vulkan_context->GetDevice(), m_memory, nullptr);
m_memory = VK_NULL_HANDLE;
}
}
bool StreamBuffer::ReserveMemory(u32 num_bytes, u32 alignment)
{
const u32 required_bytes = num_bytes + alignment;
// Check for sane allocations
if (required_bytes > m_size)
{
Log_ErrorPrintf("Attempting to allocate %u bytes from a %u byte stream buffer", static_cast<u32>(num_bytes),
static_cast<u32>(m_size));
Panic("Stream buffer overflow");
return false;
}
// Is the GPU behind or up to date with our current offset?
UpdateCurrentFencePosition();
if (m_current_offset >= m_current_gpu_position)
{
const u32 remaining_bytes = m_size - m_current_offset;
if (required_bytes <= remaining_bytes)
{
// Place at the current position, after the GPU position.
m_current_offset = Common::AlignUp(m_current_offset, alignment);
m_current_space = m_size - m_current_offset;
return true;
}
// Check for space at the start of the buffer
// We use < here because we don't want to have the case of m_current_offset ==
// m_current_gpu_position. That would mean the code above would assume the
// GPU has caught up to us, which it hasn't.
if (required_bytes < m_current_gpu_position)
{
// Reset offset to zero, since we're allocating behind the gpu now
m_current_offset = 0;
m_current_space = m_current_gpu_position;
return true;
}
}
// Is the GPU ahead of our current offset?
if (m_current_offset < m_current_gpu_position)
{
// We have from m_current_offset..m_current_gpu_position space to use.
const u32 remaining_bytes = m_current_gpu_position - m_current_offset;
if (required_bytes < remaining_bytes)
{
// Place at the current position, since this is still behind the GPU.
m_current_offset = Common::AlignUp(m_current_offset, alignment);
m_current_space = m_current_gpu_position - m_current_offset;
return true;
}
}
// Can we find a fence to wait on that will give us enough memory?
if (WaitForClearSpace(required_bytes))
{
const u32 align_diff = Common::AlignUp(m_current_offset, alignment) - m_current_offset;
m_current_offset += align_diff;
m_current_space -= align_diff;
return true;
}
// We tried everything we could, and still couldn't get anything. This means that too much space
// in the buffer is being used by the command buffer currently being recorded. Therefore, the
// only option is to execute it, and wait until it's done.
return false;
}
void StreamBuffer::CommitMemory(u32 final_num_bytes)
{
Assert((m_current_offset + final_num_bytes) <= m_size);
Assert(final_num_bytes <= m_current_space);
// For non-coherent mappings, flush the memory range
if (!m_coherent_mapping)
{
VkMappedMemoryRange range = {VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE, nullptr, m_memory, m_current_offset,
final_num_bytes};
vkFlushMappedMemoryRanges(g_vulkan_context->GetDevice(), 1, &range);
}
m_current_offset += final_num_bytes;
m_current_space -= final_num_bytes;
}
void StreamBuffer::UpdateCurrentFencePosition()
{
// Don't create a tracking entry if the GPU is caught up with the buffer.
if (m_current_offset == m_current_gpu_position)
return;
// Has the offset changed since the last fence?
const u64 counter = g_vulkan_context->GetCurrentFenceCounter();
if (!m_tracked_fences.empty() && m_tracked_fences.back().first == counter)
{
// Still haven't executed a command buffer, so just update the offset.
m_tracked_fences.back().second = m_current_offset;
return;
}
// New buffer, so update the GPU position while we're at it.
UpdateGPUPosition();
m_tracked_fences.emplace_back(counter, m_current_offset);
}
void StreamBuffer::UpdateGPUPosition()
{
auto start = m_tracked_fences.begin();
auto end = start;
const u64 completed_counter = g_vulkan_context->GetCompletedFenceCounter();
while (end != m_tracked_fences.end() && completed_counter >= end->first)
{
m_current_gpu_position = end->second;
++end;
}
if (start != end)
m_tracked_fences.erase(start, end);
}
bool StreamBuffer::WaitForClearSpace(u32 num_bytes)
{
u32 new_offset = 0;
u32 new_space = 0;
u32 new_gpu_position = 0;
auto iter = m_tracked_fences.begin();
for (; iter != m_tracked_fences.end(); ++iter)
{
// Would this fence bring us in line with the GPU?
// This is the "last resort" case, where a command buffer execution has been forced
// after no additional data has been written to it, so we can assume that after the
// fence has been signaled the entire buffer is now consumed.
u32 gpu_position = iter->second;
if (m_current_offset == gpu_position)
{
new_offset = 0;
new_space = m_size;
new_gpu_position = 0;
break;
}
// Assuming that we wait for this fence, are we allocating in front of the GPU?
if (m_current_offset > gpu_position)
{
// This would suggest the GPU has now followed us and wrapped around, so we have from
// m_current_position..m_size free, as well as and 0..gpu_position.
const u32 remaining_space_after_offset = m_size - m_current_offset;
if (remaining_space_after_offset >= num_bytes)
{
// Switch to allocating in front of the GPU, using the remainder of the buffer.
new_offset = m_current_offset;
new_space = m_size - m_current_offset;
new_gpu_position = gpu_position;
break;
}
// We can wrap around to the start, behind the GPU, if there is enough space.
// We use > here because otherwise we'd end up lining up with the GPU, and then the
// allocator would assume that the GPU has consumed what we just wrote.
if (gpu_position > num_bytes)
{
new_offset = 0;
new_space = gpu_position;
new_gpu_position = gpu_position;
break;
}
}
else
{
// We're currently allocating behind the GPU. This would give us between the current
// offset and the GPU position worth of space to work with. Again, > because we can't
// align the GPU position with the buffer offset.
u32 available_space_inbetween = gpu_position - m_current_offset;
if (available_space_inbetween > num_bytes)
{
// Leave the offset as-is, but update the GPU position.
new_offset = m_current_offset;
new_space = gpu_position - m_current_offset;
new_gpu_position = gpu_position;
break;
}
}
}
// Did any fences satisfy this condition?
// Has the command buffer been executed yet? If not, the caller should execute it.
if (iter == m_tracked_fences.end() || iter->first == g_vulkan_context->GetCurrentFenceCounter())
return false;
// Wait until this fence is signaled. This will fire the callback, updating the GPU position.
g_vulkan_context->WaitForFenceCounter(iter->first);
m_tracked_fences.erase(m_tracked_fences.begin(), m_current_offset == iter->second ? m_tracked_fences.end() : ++iter);
m_current_offset = new_offset;
m_current_space = new_space;
m_current_gpu_position = new_gpu_position;
return true;
}
} // namespace Vulkan

View file

@ -0,0 +1,66 @@
// Copyright 2016 Dolphin Emulator Project
// Copyright 2020 DuckStation Emulator Project
// Licensed under GPLv2+
// Refer to the LICENSE file included.
#pragma once
#include "../types.h"
#include "vulkan_loader.h"
#include <deque>
#include <memory>
namespace Vulkan {
class StreamBuffer
{
public:
StreamBuffer();
StreamBuffer(StreamBuffer&& move);
StreamBuffer(const StreamBuffer&) = delete;
~StreamBuffer();
StreamBuffer& operator=(StreamBuffer&& move);
StreamBuffer& operator=(const StreamBuffer&) = delete;
ALWAYS_INLINE bool IsValid() const { return (m_buffer != VK_NULL_HANDLE); }
ALWAYS_INLINE VkBuffer GetBuffer() const { return m_buffer; }
ALWAYS_INLINE const VkBuffer* GetBufferPointer() const { return &m_buffer; }
ALWAYS_INLINE VkDeviceMemory GetDeviceMemory() const { return m_memory; }
ALWAYS_INLINE void* GetHostPointer() const { return m_host_pointer; }
ALWAYS_INLINE void* GetCurrentHostPointer() const { return m_host_pointer + m_current_offset; }
ALWAYS_INLINE u32 GetCurrentSize() const { return m_size; }
ALWAYS_INLINE u32 GetCurrentSpace() const { return m_current_space; }
ALWAYS_INLINE u32 GetCurrentOffset() const { return m_current_offset; }
bool Create(VkBufferUsageFlags usage, u32 size);
void Destroy(bool defer);
bool ReserveMemory(u32 num_bytes, u32 alignment);
void CommitMemory(u32 final_num_bytes);
private:
bool AllocateBuffer(VkBufferUsageFlags usage, u32 size);
void UpdateCurrentFencePosition();
void UpdateGPUPosition();
// Waits for as many fences as needed to allocate num_bytes bytes from the buffer.
bool WaitForClearSpace(u32 num_bytes);
VkBufferUsageFlags m_usage = 0;
u32 m_size = 0;
u32 m_current_offset = 0;
u32 m_current_space = 0;
u32 m_current_gpu_position = 0;
VkBuffer m_buffer = VK_NULL_HANDLE;
VkDeviceMemory m_memory = VK_NULL_HANDLE;
u8* m_host_pointer = nullptr;
// List of fences and the corresponding positions in the buffer
std::deque<std::pair<u64, u32>> m_tracked_fences;
bool m_coherent_mapping = false;
};
} // namespace Vulkan

View file

@ -0,0 +1,528 @@
// Copyright 2016 Dolphin Emulator Project
// Copyright 2020 DuckStation Emulator Project
// Licensed under GPLv2+
// Refer to the LICENSE file included.
#include "swap_chain.h"
#include "../assert.h"
#include "../log.h"
#include "context.h"
#include "util.h"
#include <algorithm>
#include <array>
Log_SetChannel(Vulkan::SwapChain);
#if defined(VK_USE_PLATFORM_XLIB_KHR)
#include <X11/Xlib.h>
#endif
namespace Vulkan {
SwapChain::SwapChain(const WindowInfo& wi, VkSurfaceKHR surface, bool vsync)
: m_wi(wi), m_surface(surface), m_vsync_enabled(vsync)
{
}
SwapChain::~SwapChain()
{
DestroySemaphores();
DestroySwapChainImages();
DestroySwapChain();
DestroySurface();
}
VkSurfaceKHR SwapChain::CreateVulkanSurface(VkInstance instance, const WindowInfo& wi)
{
#if defined(VK_USE_PLATFORM_WIN32_KHR)
if (wi.type == WindowInfo::Type::Win32)
{
VkWin32SurfaceCreateInfoKHR surface_create_info = {
VK_STRUCTURE_TYPE_WIN32_SURFACE_CREATE_INFO_KHR, // VkStructureType sType
nullptr, // const void* pNext
0, // VkWin32SurfaceCreateFlagsKHR flags
nullptr, // HINSTANCE hinstance
reinterpret_cast<HWND>(wi.window_handle) // HWND hwnd
};
VkSurfaceKHR surface;
VkResult res = vkCreateWin32SurfaceKHR(instance, &surface_create_info, nullptr, &surface);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkCreateWin32SurfaceKHR failed: ");
return VK_NULL_HANDLE;
}
return surface;
}
#endif
#if defined(VK_USE_PLATFORM_XLIB_KHR)
if (wi.type == WindowInfo::Type::X11)
{
VkXlibSurfaceCreateInfoKHR surface_create_info = {
VK_STRUCTURE_TYPE_XLIB_SURFACE_CREATE_INFO_KHR, // VkStructureType sType
nullptr, // const void* pNext
0, // VkXlibSurfaceCreateFlagsKHR flags
static_cast<Display*>(wi.display_connection), // Display* dpy
reinterpret_cast<Window>(wi.window_handle) // Window window
};
VkSurfaceKHR surface;
VkResult res = vkCreateXlibSurfaceKHR(instance, &surface_create_info, nullptr, &surface);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkCreateXlibSurfaceKHR failed: ");
return VK_NULL_HANDLE;
}
return surface;
}
#endif
#if defined(VK_USE_PLATFORM_ANDROID_KHR)
if (wi.type == WindowInfo::Type::Android)
{
VkAndroidSurfaceCreateInfoKHR surface_create_info = {
VK_STRUCTURE_TYPE_ANDROID_SURFACE_CREATE_INFO_KHR, // VkStructureType sType
nullptr, // const void* pNext
0, // VkAndroidSurfaceCreateFlagsKHR flags
reinterpret_cast<ANativeWindow*>(wi.window_handle) // ANativeWindow* window
};
VkSurfaceKHR surface;
VkResult res = vkCreateAndroidSurfaceKHR(instance, &surface_create_info, nullptr, &surface);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkCreateAndroidSurfaceKHR failed: ");
return VK_NULL_HANDLE;
}
return surface;
}
#endif
#if defined(VK_USE_PLATFORM_METAL_EXT)
if (wi.type == WindowInfo::Type::MacOS)
{
// TODO: Create Metal layer
VkMetalSurfaceCreateInfoEXT surface_create_info = {VK_STRUCTURE_TYPE_METAL_SURFACE_CREATE_INFO_EXT, nullptr, 0,
static_cast<const CAMetalLayer*>(wi.window_handle)};
VkSurfaceKHR surface;
VkResult res = vkCreateMetalSurfaceEXT(instance, &surface_create_info, nullptr, &surface);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkCreateMetalSurfaceEXT failed: ");
return VK_NULL_HANDLE;
}
return surface;
}
#endif
return VK_NULL_HANDLE;
}
std::unique_ptr<SwapChain> SwapChain::Create(const WindowInfo& wi, VkSurfaceKHR surface, bool vsync)
{
std::unique_ptr<SwapChain> swap_chain = std::make_unique<SwapChain>(wi, surface, vsync);
if (!swap_chain->CreateSwapChain() || !swap_chain->SetupSwapChainImages() || !swap_chain->CreateSemaphores())
return nullptr;
return swap_chain;
}
bool SwapChain::SelectSurfaceFormat()
{
u32 format_count;
VkResult res =
vkGetPhysicalDeviceSurfaceFormatsKHR(g_vulkan_context->GetPhysicalDevice(), m_surface, &format_count, nullptr);
if (res != VK_SUCCESS || format_count == 0)
{
LOG_VULKAN_ERROR(res, "vkGetPhysicalDeviceSurfaceFormatsKHR failed: ");
return false;
}
std::vector<VkSurfaceFormatKHR> surface_formats(format_count);
res = vkGetPhysicalDeviceSurfaceFormatsKHR(g_vulkan_context->GetPhysicalDevice(), m_surface, &format_count,
surface_formats.data());
Assert(res == VK_SUCCESS);
// If there is a single undefined surface format, the device doesn't care, so we'll just use RGBA
if (surface_formats[0].format == VK_FORMAT_UNDEFINED)
{
m_surface_format.format = VK_FORMAT_R8G8B8A8_UNORM;
m_surface_format.colorSpace = VK_COLOR_SPACE_SRGB_NONLINEAR_KHR;
return true;
}
// Try to find a suitable format.
for (const VkSurfaceFormatKHR& surface_format : surface_formats)
{
// Some drivers seem to return a SRGB format here (Intel Mesa).
// This results in gamma correction when presenting to the screen, which we don't want.
// Use a linear format instead, if this is the case.
m_surface_format.format = Util::GetLinearFormat(surface_format.format);
m_surface_format.colorSpace = surface_format.colorSpace;
return true;
}
Panic("Failed to find a suitable format for swap chain buffers.");
return false;
}
bool SwapChain::SelectPresentMode()
{
VkResult res;
u32 mode_count;
res =
vkGetPhysicalDeviceSurfacePresentModesKHR(g_vulkan_context->GetPhysicalDevice(), m_surface, &mode_count, nullptr);
if (res != VK_SUCCESS || mode_count == 0)
{
LOG_VULKAN_ERROR(res, "vkGetPhysicalDeviceSurfaceFormatsKHR failed: ");
return false;
}
std::vector<VkPresentModeKHR> present_modes(mode_count);
res = vkGetPhysicalDeviceSurfacePresentModesKHR(g_vulkan_context->GetPhysicalDevice(), m_surface, &mode_count,
present_modes.data());
Assert(res == VK_SUCCESS);
// Checks if a particular mode is supported, if it is, returns that mode.
auto CheckForMode = [&present_modes](VkPresentModeKHR check_mode) {
auto it = std::find_if(present_modes.begin(), present_modes.end(),
[check_mode](VkPresentModeKHR mode) { return check_mode == mode; });
return it != present_modes.end();
};
// If vsync is enabled, use VK_PRESENT_MODE_FIFO_KHR.
// This check should not fail with conforming drivers, as the FIFO present mode is mandated by
// the specification (VK_KHR_swapchain). In case it isn't though, fall through to any other mode.
if (m_vsync_enabled && CheckForMode(VK_PRESENT_MODE_FIFO_KHR))
{
m_present_mode = VK_PRESENT_MODE_FIFO_KHR;
return true;
}
// Prefer screen-tearing, if possible, for lowest latency.
if (CheckForMode(VK_PRESENT_MODE_IMMEDIATE_KHR))
{
m_present_mode = VK_PRESENT_MODE_IMMEDIATE_KHR;
return true;
}
// Use optimized-vsync above vsync.
if (CheckForMode(VK_PRESENT_MODE_MAILBOX_KHR))
{
m_present_mode = VK_PRESENT_MODE_MAILBOX_KHR;
return true;
}
// Fall back to whatever is available.
m_present_mode = present_modes[0];
return true;
}
bool SwapChain::CreateSwapChain()
{
// Look up surface properties to determine image count and dimensions
VkSurfaceCapabilitiesKHR surface_capabilities;
VkResult res =
vkGetPhysicalDeviceSurfaceCapabilitiesKHR(g_vulkan_context->GetPhysicalDevice(), m_surface, &surface_capabilities);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkGetPhysicalDeviceSurfaceCapabilitiesKHR failed: ");
return false;
}
// Select swap chain format and present mode
if (!SelectSurfaceFormat() || !SelectPresentMode())
return false;
// Select number of images in swap chain, we prefer one buffer in the background to work on
u32 image_count = surface_capabilities.minImageCount + 1u;
// maxImageCount can be zero, in which case there isn't an upper limit on the number of buffers.
if (surface_capabilities.maxImageCount > 0)
image_count = std::min(image_count, surface_capabilities.maxImageCount);
// Determine the dimensions of the swap chain. Values of -1 indicate the size we specify here
// determines window size?
VkExtent2D size = surface_capabilities.currentExtent;
if (size.width == UINT32_MAX)
{
size.width = m_wi.surface_width;
size.height = m_wi.surface_height;
}
size.width =
std::clamp(size.width, surface_capabilities.minImageExtent.width, surface_capabilities.maxImageExtent.width);
size.height =
std::clamp(size.height, surface_capabilities.minImageExtent.height, surface_capabilities.maxImageExtent.height);
// Prefer identity transform if possible
VkSurfaceTransformFlagBitsKHR transform = VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR;
if (!(surface_capabilities.supportedTransforms & VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR))
transform = surface_capabilities.currentTransform;
// Select swap chain flags, we only need a colour attachment
VkImageUsageFlags image_usage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
if (!(surface_capabilities.supportedUsageFlags & VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT))
{
Log_ErrorPrintf("Vulkan: Swap chain does not support usage as color attachment");
return false;
}
// Store the old/current swap chain when recreating for resize
VkSwapchainKHR old_swap_chain = m_swap_chain;
m_swap_chain = VK_NULL_HANDLE;
// Now we can actually create the swap chain
VkSwapchainCreateInfoKHR swap_chain_info = {VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR,
nullptr,
0,
m_surface,
image_count,
m_surface_format.format,
m_surface_format.colorSpace,
size,
1u,
image_usage,
VK_SHARING_MODE_EXCLUSIVE,
0,
nullptr,
transform,
VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR,
m_present_mode,
VK_TRUE,
old_swap_chain};
std::array<uint32_t, 2> indices = {{
g_vulkan_context->GetGraphicsQueueFamilyIndex(),
g_vulkan_context->GetPresentQueueFamilyIndex(),
}};
if (g_vulkan_context->GetGraphicsQueueFamilyIndex() != g_vulkan_context->GetPresentQueueFamilyIndex())
{
swap_chain_info.imageSharingMode = VK_SHARING_MODE_CONCURRENT;
swap_chain_info.queueFamilyIndexCount = 2;
swap_chain_info.pQueueFamilyIndices = indices.data();
}
if (m_swap_chain == VK_NULL_HANDLE)
{
res = vkCreateSwapchainKHR(g_vulkan_context->GetDevice(), &swap_chain_info, nullptr, &m_swap_chain);
}
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkCreateSwapchainKHR failed: ");
return false;
}
// Now destroy the old swap chain, since it's been recreated.
// We can do this immediately since all work should have been completed before calling resize.
if (old_swap_chain != VK_NULL_HANDLE)
vkDestroySwapchainKHR(g_vulkan_context->GetDevice(), old_swap_chain, nullptr);
m_width = size.width;
m_height = size.height;
return true;
}
bool SwapChain::SetupSwapChainImages()
{
Assert(m_images.empty());
u32 image_count;
VkResult res = vkGetSwapchainImagesKHR(g_vulkan_context->GetDevice(), m_swap_chain, &image_count, nullptr);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkGetSwapchainImagesKHR failed: ");
return false;
}
std::vector<VkImage> images(image_count);
res = vkGetSwapchainImagesKHR(g_vulkan_context->GetDevice(), m_swap_chain, &image_count, images.data());
Assert(res == VK_SUCCESS);
m_load_render_pass = g_vulkan_context->GetRenderPass(m_surface_format.format, VK_FORMAT_UNDEFINED,
VK_SAMPLE_COUNT_1_BIT, VK_ATTACHMENT_LOAD_OP_LOAD);
m_clear_render_pass = g_vulkan_context->GetRenderPass(m_surface_format.format, VK_FORMAT_UNDEFINED,
VK_SAMPLE_COUNT_1_BIT, VK_ATTACHMENT_LOAD_OP_CLEAR);
if (m_load_render_pass == VK_NULL_HANDLE || m_clear_render_pass == VK_NULL_HANDLE)
{
Panic("Failed to get swap chain render passes.");
return false;
}
m_images.reserve(image_count);
for (u32 i = 0; i < image_count; i++)
{
SwapChainImage image;
image.image = images[i];
// Create texture object, which creates a view of the backbuffer
if (!image.texture.Adopt(image.image, VK_IMAGE_VIEW_TYPE_2D, m_width, m_height, 1, 1, m_surface_format.format,
VK_SAMPLE_COUNT_1_BIT))
{
return false;
}
image.framebuffer = image.texture.CreateFramebuffer(m_load_render_pass);
if (image.framebuffer == VK_NULL_HANDLE)
return false;
m_images.emplace_back(std::move(image));
}
return true;
}
void SwapChain::DestroySwapChainImages()
{
for (auto& it : m_images)
{
// Images themselves are cleaned up by the swap chain object
vkDestroyFramebuffer(g_vulkan_context->GetDevice(), it.framebuffer, nullptr);
}
m_images.clear();
}
void SwapChain::DestroySwapChain()
{
if (m_swap_chain == VK_NULL_HANDLE)
return;
vkDestroySwapchainKHR(g_vulkan_context->GetDevice(), m_swap_chain, nullptr);
m_swap_chain = VK_NULL_HANDLE;
}
VkResult SwapChain::AcquireNextImage()
{
VkResult res = vkAcquireNextImageKHR(g_vulkan_context->GetDevice(), m_swap_chain, UINT64_MAX,
m_image_available_semaphore, VK_NULL_HANDLE, &m_current_image);
if (res != VK_SUCCESS && res != VK_ERROR_OUT_OF_DATE_KHR && res != VK_SUBOPTIMAL_KHR)
LOG_VULKAN_ERROR(res, "vkAcquireNextImageKHR failed: ");
return res;
}
bool SwapChain::ResizeSwapChain(u32 new_width /* = 0 */, u32 new_height /* = 0 */)
{
DestroySwapChainImages();
if (new_width != 0 && new_height != 0)
{
m_wi.surface_width = new_width;
m_wi.surface_height = new_height;
}
if (!CreateSwapChain() || !SetupSwapChainImages())
{
Panic("Failed to re-configure swap chain images, this is fatal (for now)");
return false;
}
return true;
}
bool SwapChain::RecreateSwapChain()
{
DestroySwapChainImages();
DestroySwapChain();
if (!CreateSwapChain() || !SetupSwapChainImages())
{
Panic("Failed to re-configure swap chain images, this is fatal (for now)");
return false;
}
return true;
}
bool SwapChain::SetVSync(bool enabled)
{
if (m_vsync_enabled == enabled)
return true;
// Recreate the swap chain with the new present mode.
m_vsync_enabled = enabled;
return RecreateSwapChain();
}
bool SwapChain::RecreateSurface(const WindowInfo& new_wi)
{
// Destroy the old swap chain, images, and surface.
DestroySwapChainImages();
DestroySwapChain();
DestroySurface();
// Re-create the surface with the new native handle
m_wi = new_wi;
m_surface = CreateVulkanSurface(g_vulkan_context->GetVulkanInstance(), m_wi);
if (m_surface == VK_NULL_HANDLE)
return false;
// The validation layers get angry at us if we don't call this before creating the swapchain.
VkBool32 present_supported = VK_TRUE;
VkResult res =
vkGetPhysicalDeviceSurfaceSupportKHR(g_vulkan_context->GetPhysicalDevice(),
g_vulkan_context->GetPresentQueueFamilyIndex(), m_surface, &present_supported);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkGetPhysicalDeviceSurfaceSupportKHR failed: ");
return false;
}
if (!present_supported)
{
Panic("Recreated surface does not support presenting.");
return false;
}
// Finally re-create the swap chain
if (!CreateSwapChain() || !SetupSwapChainImages())
return false;
return true;
}
void SwapChain::DestroySurface()
{
vkDestroySurfaceKHR(g_vulkan_context->GetVulkanInstance(), m_surface, nullptr);
m_surface = VK_NULL_HANDLE;
}
bool SwapChain::CreateSemaphores()
{
// Create two semaphores, one that is triggered when the swapchain buffer is ready, another after
// submit and before present
VkSemaphoreCreateInfo semaphore_info = {
VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO, // VkStructureType sType
nullptr, // const void* pNext
0 // VkSemaphoreCreateFlags flags
};
VkResult res;
if ((res = vkCreateSemaphore(g_vulkan_context->GetDevice(), &semaphore_info, nullptr,
&m_image_available_semaphore)) != VK_SUCCESS ||
(res = vkCreateSemaphore(g_vulkan_context->GetDevice(), &semaphore_info, nullptr,
&m_rendering_finished_semaphore)) != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkCreateSemaphore failed: ");
return false;
}
return true;
}
void SwapChain::DestroySemaphores()
{
if (m_image_available_semaphore != VK_NULL_HANDLE)
{
vkDestroySemaphore(g_vulkan_context->GetDevice(), m_image_available_semaphore, nullptr);
m_image_available_semaphore = VK_NULL_HANDLE;
}
if (m_rendering_finished_semaphore != VK_NULL_HANDLE)
{
vkDestroySemaphore(g_vulkan_context->GetDevice(), m_rendering_finished_semaphore, nullptr);
m_rendering_finished_semaphore = VK_NULL_HANDLE;
}
}
} // namespace Vulkan

View file

@ -0,0 +1,98 @@
// Copyright 2016 Dolphin Emulator Project
// Copyright 2020 DuckStation Emulator Project
// Licensed under GPLv2+
// Refer to the LICENSE file included.
#pragma once
#include "../types.h"
#include "../window_info.h"
#include "texture.h"
#include "vulkan_loader.h"
#include <memory>
#include <vector>
namespace Vulkan {
class SwapChain
{
public:
SwapChain(const WindowInfo& wi, VkSurfaceKHR surface, bool vsync);
~SwapChain();
// Creates a vulkan-renderable surface for the specified window handle.
static VkSurfaceKHR CreateVulkanSurface(VkInstance instance, const WindowInfo& wi);
// Create a new swap chain from a pre-existing surface.
static std::unique_ptr<SwapChain> Create(const WindowInfo& wi, VkSurfaceKHR surface, bool vsync);
ALWAYS_INLINE VkSurfaceKHR GetSurface() const { return m_surface; }
ALWAYS_INLINE VkSurfaceFormatKHR GetSurfaceFormat() const { return m_surface_format; }
ALWAYS_INLINE VkFormat GetTextureFormat() const { return m_texture_format; }
ALWAYS_INLINE bool IsVSyncEnabled() const { return m_vsync_enabled; }
ALWAYS_INLINE VkSwapchainKHR GetSwapChain() const { return m_swap_chain; }
ALWAYS_INLINE u32 GetWidth() const { return m_width; }
ALWAYS_INLINE u32 GetHeight() const { return m_height; }
ALWAYS_INLINE u32 GetCurrentImageIndex() const { return m_current_image; }
ALWAYS_INLINE u32 GetImageCount() const { return static_cast<u32>(m_images.size()); }
ALWAYS_INLINE VkImage GetCurrentImage() const { return m_images[m_current_image].image; }
ALWAYS_INLINE const Texture& GetCurrentTexture() const { return m_images[m_current_image].texture; }
ALWAYS_INLINE Texture& GetCurrentTexture() { return m_images[m_current_image].texture; }
ALWAYS_INLINE VkFramebuffer GetCurrentFramebuffer() const { return m_images[m_current_image].framebuffer; }
ALWAYS_INLINE VkRenderPass GetLoadRenderPass() const { return m_load_render_pass; }
ALWAYS_INLINE VkRenderPass GetClearRenderPass() const { return m_clear_render_pass; }
ALWAYS_INLINE VkSemaphore GetImageAvailableSemaphore() const { return m_image_available_semaphore; }
ALWAYS_INLINE VkSemaphore GetRenderingFinishedSemaphore() const { return m_rendering_finished_semaphore; }
VkResult AcquireNextImage();
bool RecreateSurface(const WindowInfo& new_wi);
bool ResizeSwapChain(u32 new_width = 0, u32 new_height = 0);
bool RecreateSwapChain();
// Change vsync enabled state. This may fail as it causes a swapchain recreation.
bool SetVSync(bool enabled);
private:
bool SelectSurfaceFormat();
bool SelectPresentMode();
bool CreateSwapChain();
void DestroySwapChain();
bool SetupSwapChainImages();
void DestroySwapChainImages();
void DestroySurface();
bool CreateSemaphores();
void DestroySemaphores();
struct SwapChainImage
{
VkImage image;
Texture texture;
VkFramebuffer framebuffer;
};
u32 m_width = 0;
u32 m_height = 0;
WindowInfo m_wi;
bool m_vsync_enabled = false;
VkSurfaceKHR m_surface = VK_NULL_HANDLE;
VkSurfaceFormatKHR m_surface_format = {};
VkPresentModeKHR m_present_mode = VK_PRESENT_MODE_IMMEDIATE_KHR;
VkFormat m_texture_format = VK_FORMAT_UNDEFINED;
VkRenderPass m_load_render_pass = VK_NULL_HANDLE;
VkRenderPass m_clear_render_pass = VK_NULL_HANDLE;
VkSemaphore m_image_available_semaphore = VK_NULL_HANDLE;
VkSemaphore m_rendering_finished_semaphore = VK_NULL_HANDLE;
VkSwapchainKHR m_swap_chain = VK_NULL_HANDLE;
std::vector<SwapChainImage> m_images;
u32 m_current_image = 0;
};
} // namespace Vulkan

View file

@ -0,0 +1,374 @@
// Copyright 2016 Dolphin Emulator Project
// Copyright 2020 DuckStation Emulator Project
// Licensed under GPLv2+
// Refer to the LICENSE file included.
#include "texture.h"
#include "../assert.h"
#include "context.h"
#include "util.h"
#include <algorithm>
namespace Vulkan {
Texture::Texture() = default;
Texture::Texture(Texture&& move)
: m_width(move.m_width), m_height(move.m_height), m_levels(move.m_levels), m_layers(move.m_layers),
m_format(move.m_format), m_samples(move.m_samples), m_view_type(move.m_view_type), m_layout(move.m_layout),
m_image(move.m_image), m_device_memory(move.m_device_memory), m_view(move.m_view)
{
move.m_width = 0;
move.m_height = 0;
move.m_levels = 0;
move.m_layers = 0;
move.m_format = VK_FORMAT_UNDEFINED;
move.m_samples = VK_SAMPLE_COUNT_1_BIT;
move.m_view_type = VK_IMAGE_VIEW_TYPE_2D;
move.m_layout = VK_IMAGE_LAYOUT_UNDEFINED;
move.m_image = VK_NULL_HANDLE;
move.m_device_memory = VK_NULL_HANDLE;
move.m_view = VK_NULL_HANDLE;
}
Texture::~Texture()
{
if (IsValid())
Destroy(true);
}
Vulkan::Texture& Texture::operator=(Texture&& move)
{
if (IsValid())
Destroy(true);
std::swap(m_width, move.m_width);
std::swap(m_height, move.m_height);
std::swap(m_levels, move.m_levels);
std::swap(m_layers, move.m_layers);
std::swap(m_format, move.m_format);
std::swap(m_samples, move.m_samples);
std::swap(m_view_type, move.m_view_type);
std::swap(m_layout, move.m_layout);
std::swap(m_image, move.m_image);
std::swap(m_device_memory, move.m_device_memory);
std::swap(m_view, move.m_view);
return *this;
}
bool Texture::Create(u32 width, u32 height, u32 levels, u32 layers, VkFormat format, VkSampleCountFlagBits samples,
VkImageViewType view_type, VkImageTiling tiling, VkImageUsageFlags usage)
{
VkImageCreateInfo image_info = {VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
nullptr,
0,
VK_IMAGE_TYPE_2D,
format,
{width, height, 1},
levels,
layers,
samples,
tiling,
usage,
VK_SHARING_MODE_EXCLUSIVE,
0,
nullptr,
VK_IMAGE_LAYOUT_UNDEFINED};
VkImage image = VK_NULL_HANDLE;
VkResult res = vkCreateImage(g_vulkan_context->GetDevice(), &image_info, nullptr, &image);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkCreateImage failed: ");
return false;
}
// Allocate memory to back this texture, we want device local memory in this case
VkMemoryRequirements memory_requirements;
vkGetImageMemoryRequirements(g_vulkan_context->GetDevice(), image, &memory_requirements);
VkMemoryAllocateInfo memory_info = {
VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO, nullptr, memory_requirements.size,
g_vulkan_context->GetMemoryType(memory_requirements.memoryTypeBits, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT)};
VkDeviceMemory device_memory;
res = vkAllocateMemory(g_vulkan_context->GetDevice(), &memory_info, nullptr, &device_memory);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkAllocateMemory failed: ");
vkDestroyImage(g_vulkan_context->GetDevice(), image, nullptr);
return false;
}
res = vkBindImageMemory(g_vulkan_context->GetDevice(), image, device_memory, 0);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkBindImageMemory failed: ");
vkDestroyImage(g_vulkan_context->GetDevice(), image, nullptr);
vkFreeMemory(g_vulkan_context->GetDevice(), device_memory, nullptr);
return false;
}
VkImageViewCreateInfo view_info = {VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
nullptr,
0,
image,
view_type,
format,
{VK_COMPONENT_SWIZZLE_IDENTITY, VK_COMPONENT_SWIZZLE_IDENTITY,
VK_COMPONENT_SWIZZLE_IDENTITY, VK_COMPONENT_SWIZZLE_IDENTITY},
{Util::IsDepthFormat(format) ?
static_cast<VkImageAspectFlags>(VK_IMAGE_ASPECT_DEPTH_BIT) :
static_cast<VkImageAspectFlags>(VK_IMAGE_ASPECT_COLOR_BIT),
0, levels, 0, layers}};
VkImageView view = VK_NULL_HANDLE;
res = vkCreateImageView(g_vulkan_context->GetDevice(), &view_info, nullptr, &view);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkCreateImageView failed: ");
vkDestroyImage(g_vulkan_context->GetDevice(), image, nullptr);
vkFreeMemory(g_vulkan_context->GetDevice(), device_memory, nullptr);
return false;
}
if (IsValid())
Destroy(true);
m_width = width;
m_height = height;
m_levels = levels;
m_layers = layers;
m_format = format;
m_samples = samples;
m_view_type = view_type;
m_image = image;
m_device_memory = device_memory;
m_view = view;
return true;
}
bool Texture::Adopt(VkImage existing_image, VkImageViewType view_type, u32 width, u32 height, u32 levels, u32 layers,
VkFormat format, VkSampleCountFlagBits samples)
{
// Only need to create the image view, this is mainly for swap chains.
VkImageViewCreateInfo view_info = {VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
nullptr,
0,
existing_image,
view_type,
format,
{VK_COMPONENT_SWIZZLE_IDENTITY, VK_COMPONENT_SWIZZLE_IDENTITY,
VK_COMPONENT_SWIZZLE_IDENTITY, VK_COMPONENT_SWIZZLE_IDENTITY},
{Util::IsDepthFormat(format) ?
static_cast<VkImageAspectFlags>(VK_IMAGE_ASPECT_DEPTH_BIT) :
static_cast<VkImageAspectFlags>(VK_IMAGE_ASPECT_COLOR_BIT),
0, levels, 0, layers}};
// Memory is managed by the owner of the image.
VkImageView view = VK_NULL_HANDLE;
VkResult res = vkCreateImageView(g_vulkan_context->GetDevice(), &view_info, nullptr, &view);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkCreateImageView failed: ");
return false;
}
if (IsValid())
Destroy(true);
m_width = width;
m_height = height;
m_levels = levels;
m_layers = layers;
m_format = format;
m_samples = samples;
m_view_type = view_type;
m_image = existing_image;
m_view = view;
return true;
}
void Texture::Destroy(bool defer /* = true */)
{
if (m_view != VK_NULL_HANDLE)
{
if (defer)
g_vulkan_context->DeferImageViewDestruction(m_view);
else
vkDestroyImageView(g_vulkan_context->GetDevice(), m_view, nullptr);
m_view = VK_NULL_HANDLE;
}
// If we don't have device memory allocated, the image is not owned by us (e.g. swapchain)
if (m_device_memory != VK_NULL_HANDLE)
{
DebugAssert(m_image != VK_NULL_HANDLE);
if (defer)
g_vulkan_context->DeferImageDestruction(m_image);
else
vkDestroyImage(g_vulkan_context->GetDevice(), m_image, nullptr);
m_image = VK_NULL_HANDLE;
if (defer)
g_vulkan_context->DeferDeviceMemoryDestruction(m_device_memory);
else
vkFreeMemory(g_vulkan_context->GetDevice(), m_device_memory, nullptr);
m_device_memory = VK_NULL_HANDLE;
}
m_width = 0;
m_height = 0;
m_levels = 0;
m_layers = 0;
m_format = VK_FORMAT_UNDEFINED;
m_samples = VK_SAMPLE_COUNT_1_BIT;
m_view_type = VK_IMAGE_VIEW_TYPE_2D;
m_layout = VK_IMAGE_LAYOUT_UNDEFINED;
m_image = VK_NULL_HANDLE;
m_device_memory = VK_NULL_HANDLE;
m_view = VK_NULL_HANDLE;
}
void Texture::OverrideImageLayout(VkImageLayout new_layout)
{
m_layout = new_layout;
}
void Texture::TransitionToLayout(VkCommandBuffer command_buffer, VkImageLayout new_layout)
{
if (m_layout == new_layout)
return;
VkImageMemoryBarrier barrier = {
VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // VkStructureType sType
nullptr, // const void* pNext
0, // VkAccessFlags srcAccessMask
0, // VkAccessFlags dstAccessMask
m_layout, // VkImageLayout oldLayout
new_layout, // VkImageLayout newLayout
VK_QUEUE_FAMILY_IGNORED, // uint32_t srcQueueFamilyIndex
VK_QUEUE_FAMILY_IGNORED, // uint32_t dstQueueFamilyIndex
m_image, // VkImage image
{static_cast<VkImageAspectFlags>(Util::IsDepthFormat(m_format) ? VK_IMAGE_ASPECT_DEPTH_BIT :
VK_IMAGE_ASPECT_COLOR_BIT),
0, m_levels, 0, m_layers} // VkImageSubresourceRange subresourceRange
};
// srcStageMask -> Stages that must complete before the barrier
// dstStageMask -> Stages that must wait for after the barrier before beginning
VkPipelineStageFlags srcStageMask, dstStageMask;
switch (m_layout)
{
case VK_IMAGE_LAYOUT_UNDEFINED:
// Layout undefined therefore contents undefined, and we don't care what happens to it.
barrier.srcAccessMask = 0;
srcStageMask = VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT;
break;
case VK_IMAGE_LAYOUT_PREINITIALIZED:
// Image has been pre-initialized by the host, so ensure all writes have completed.
barrier.srcAccessMask = VK_ACCESS_HOST_WRITE_BIT;
srcStageMask = VK_PIPELINE_STAGE_HOST_BIT;
break;
case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL:
// Image was being used as a color attachment, so ensure all writes have completed.
barrier.srcAccessMask = VK_ACCESS_COLOR_ATTACHMENT_READ_BIT | VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
srcStageMask = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
break;
case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL:
// Image was being used as a depthstencil attachment, so ensure all writes have completed.
barrier.srcAccessMask =
VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT | VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
srcStageMask = VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT | VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT;
break;
case VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL:
// Image was being used as a shader resource, make sure all reads have finished.
barrier.srcAccessMask = VK_ACCESS_SHADER_READ_BIT;
srcStageMask = VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
break;
case VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL:
// Image was being used as a copy source, ensure all reads have finished.
barrier.srcAccessMask = VK_ACCESS_TRANSFER_READ_BIT;
srcStageMask = VK_PIPELINE_STAGE_TRANSFER_BIT;
break;
case VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL:
// Image was being used as a copy destination, ensure all writes have finished.
barrier.srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
srcStageMask = VK_PIPELINE_STAGE_TRANSFER_BIT;
break;
default:
srcStageMask = VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT;
break;
}
switch (new_layout)
{
case VK_IMAGE_LAYOUT_UNDEFINED:
barrier.dstAccessMask = 0;
dstStageMask = VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT;
break;
case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL:
barrier.dstAccessMask = VK_ACCESS_COLOR_ATTACHMENT_READ_BIT | VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
dstStageMask = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
break;
case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL:
barrier.dstAccessMask =
VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT | VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
dstStageMask = VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT | VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT;
break;
case VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL:
barrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT;
dstStageMask = VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
break;
case VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL:
barrier.dstAccessMask = VK_ACCESS_TRANSFER_READ_BIT;
dstStageMask = VK_PIPELINE_STAGE_TRANSFER_BIT;
break;
case VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL:
barrier.dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
dstStageMask = VK_PIPELINE_STAGE_TRANSFER_BIT;
break;
case VK_IMAGE_LAYOUT_PRESENT_SRC_KHR:
srcStageMask = VK_PIPELINE_STAGE_ALL_COMMANDS_BIT;
dstStageMask = VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT;
break;
default:
dstStageMask = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
break;
}
vkCmdPipelineBarrier(command_buffer, srcStageMask, dstStageMask, 0, 0, nullptr, 0, nullptr, 1, &barrier);
m_layout = new_layout;
}
VkFramebuffer Texture::CreateFramebuffer(VkRenderPass render_pass)
{
const VkFramebufferCreateInfo ci = {
VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO, nullptr, 0u, render_pass, 1, &m_view, m_width, m_height, m_layers};
VkFramebuffer fb = VK_NULL_HANDLE;
VkResult res = vkCreateFramebuffer(g_vulkan_context->GetDevice(), &ci, nullptr, &fb);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkCreateFramebuffer() failed: ");
return VK_NULL_HANDLE;
}
return fb;
}
} // namespace Vulkan

View file

@ -0,0 +1,72 @@
// Copyright 2016 Dolphin Emulator Project
// Copyright 2020 DuckStation Emulator Project
// Licensed under GPLv2+
// Refer to the LICENSE file included.
#pragma once
#include "../types.h"
#include "vulkan_loader.h"
#include <memory>
namespace Vulkan {
class Texture
{
public:
Texture();
Texture(Texture&& move);
Texture(const Texture&) = delete;
~Texture();
Texture& operator=(Texture&& move);
Texture& operator=(const Texture&) = delete;
ALWAYS_INLINE bool IsValid() const { return (m_image != VK_NULL_HANDLE); }
/// An image is considered owned/managed if we control the memory.
ALWAYS_INLINE bool IsOwned() const { return (m_device_memory != VK_NULL_HANDLE); }
ALWAYS_INLINE u32 GetWidth() const { return m_width; }
ALWAYS_INLINE u32 GetHeight() const { return m_height; }
ALWAYS_INLINE u32 GetLevels() const { return m_levels; }
ALWAYS_INLINE u32 GetLayers() const { return m_layers; }
ALWAYS_INLINE VkFormat GetFormat() const { return m_format; }
ALWAYS_INLINE VkSampleCountFlagBits GetSamples() const { return m_samples; }
ALWAYS_INLINE VkImageLayout GetLayout() const { return m_layout; }
ALWAYS_INLINE VkImageViewType GetViewType() const { return m_view_type; }
ALWAYS_INLINE VkImage GetImage() const { return m_image; }
ALWAYS_INLINE VkDeviceMemory GetDeviceMemory() const { return m_device_memory; }
ALWAYS_INLINE VkImageView GetView() const { return m_view; }
bool Create(u32 width, u32 height, u32 levels, u32 layers, VkFormat format, VkSampleCountFlagBits samples,
VkImageViewType view_type, VkImageTiling tiling, VkImageUsageFlags usage);
bool Adopt(VkImage existing_image, VkImageViewType view_type, u32 width, u32 height, u32 levels, u32 layers,
VkFormat format, VkSampleCountFlagBits samples);
void Destroy(bool defer = true);
// Used when the render pass is changing the image layout, or to force it to
// VK_IMAGE_LAYOUT_UNDEFINED, if the existing contents of the image is
// irrelevant and will not be loaded.
void OverrideImageLayout(VkImageLayout new_layout);
void TransitionToLayout(VkCommandBuffer command_buffer, VkImageLayout new_layout);
VkFramebuffer CreateFramebuffer(VkRenderPass render_pass);
private:
u32 m_width = 0;
u32 m_height = 0;
u32 m_levels = 0;
u32 m_layers = 0;
VkFormat m_format = VK_FORMAT_UNDEFINED;
VkSampleCountFlagBits m_samples = VK_SAMPLE_COUNT_1_BIT;
VkImageViewType m_view_type = VK_IMAGE_VIEW_TYPE_2D;
VkImageLayout m_layout = VK_IMAGE_LAYOUT_UNDEFINED;
VkImage m_image = VK_NULL_HANDLE;
VkDeviceMemory m_device_memory = VK_NULL_HANDLE;
VkImageView m_view = VK_NULL_HANDLE;
};
} // namespace Vulkan

402
src/common/vulkan/util.cpp Normal file
View file

@ -0,0 +1,402 @@
// Copyright 2016 Dolphin Emulator Project
// Copyright 2020 DuckStation Emulator Project
// Licensed under GPLv2+
// Refer to the LICENSE file included.
#include "util.h"
#include "../assert.h"
#include "../log.h"
#include "../string_util.h"
#include "context.h"
#include "shader_compiler.h"
Log_SetChannel(Vulkan::Util);
namespace Vulkan {
namespace Util {
bool IsDepthFormat(VkFormat format)
{
switch (format)
{
case VK_FORMAT_D16_UNORM:
case VK_FORMAT_D16_UNORM_S8_UINT:
case VK_FORMAT_D24_UNORM_S8_UINT:
case VK_FORMAT_D32_SFLOAT:
case VK_FORMAT_D32_SFLOAT_S8_UINT:
return true;
default:
return false;
}
}
bool IsCompressedFormat(VkFormat format)
{
switch (format)
{
case VK_FORMAT_BC1_RGBA_UNORM_BLOCK:
case VK_FORMAT_BC2_UNORM_BLOCK:
case VK_FORMAT_BC3_UNORM_BLOCK:
case VK_FORMAT_BC7_UNORM_BLOCK:
return true;
default:
return false;
}
}
VkFormat GetLinearFormat(VkFormat format)
{
switch (format)
{
case VK_FORMAT_R8_SRGB:
return VK_FORMAT_R8_UNORM;
case VK_FORMAT_R8G8_SRGB:
return VK_FORMAT_R8G8_UNORM;
case VK_FORMAT_R8G8B8_SRGB:
return VK_FORMAT_R8G8B8_UNORM;
case VK_FORMAT_R8G8B8A8_SRGB:
return VK_FORMAT_R8G8B8A8_UNORM;
case VK_FORMAT_B8G8R8_SRGB:
return VK_FORMAT_B8G8R8_UNORM;
case VK_FORMAT_B8G8R8A8_SRGB:
return VK_FORMAT_B8G8R8A8_UNORM;
default:
return format;
}
}
u32 GetTexelSize(VkFormat format)
{
// Only contains pixel formats we use.
switch (format)
{
case VK_FORMAT_R32_SFLOAT:
return 4;
case VK_FORMAT_D32_SFLOAT:
return 4;
case VK_FORMAT_R8G8B8A8_UNORM:
return 4;
case VK_FORMAT_B8G8R8A8_UNORM:
return 4;
case VK_FORMAT_BC1_RGBA_UNORM_BLOCK:
return 8;
case VK_FORMAT_BC2_UNORM_BLOCK:
case VK_FORMAT_BC3_UNORM_BLOCK:
case VK_FORMAT_BC7_UNORM_BLOCK:
return 16;
default:
Panic("Unhandled pixel format");
return 1;
}
}
u32 GetBlockSize(VkFormat format)
{
switch (format)
{
case VK_FORMAT_BC1_RGBA_UNORM_BLOCK:
case VK_FORMAT_BC2_UNORM_BLOCK:
case VK_FORMAT_BC3_UNORM_BLOCK:
case VK_FORMAT_BC7_UNORM_BLOCK:
return 4;
default:
return 1;
}
}
VkRect2D ClampRect2D(const VkRect2D& rect, u32 width, u32 height)
{
VkRect2D out;
out.offset.x = std::clamp(rect.offset.x, 0, static_cast<int>(width - 1));
out.offset.y = std::clamp(rect.offset.y, 0, static_cast<int>(height - 1));
out.extent.width = std::min(rect.extent.width, width - static_cast<int>(rect.offset.x));
out.extent.height = std::min(rect.extent.height, height - static_cast<int>(rect.offset.y));
return out;
}
VkBlendFactor GetAlphaBlendFactor(VkBlendFactor factor)
{
switch (factor)
{
case VK_BLEND_FACTOR_SRC_COLOR:
return VK_BLEND_FACTOR_SRC_ALPHA;
case VK_BLEND_FACTOR_ONE_MINUS_SRC_COLOR:
return VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA;
case VK_BLEND_FACTOR_DST_COLOR:
return VK_BLEND_FACTOR_DST_ALPHA;
case VK_BLEND_FACTOR_ONE_MINUS_DST_COLOR:
return VK_BLEND_FACTOR_ONE_MINUS_DST_ALPHA;
default:
return factor;
}
}
void SetViewport(VkCommandBuffer command_buffer, int x, int y, int width, int height, float min_depth /*= 0.0f*/,
float max_depth /*= 1.0f*/)
{
const VkViewport vp{static_cast<float>(x),
static_cast<float>(y),
static_cast<float>(width),
static_cast<float>(height),
min_depth,
max_depth};
vkCmdSetViewport(command_buffer, 0, 1, &vp);
}
void SetScissor(VkCommandBuffer command_buffer, int x, int y, int width, int height)
{
const VkRect2D scissor{{x, y}, {static_cast<u32>(width), static_cast<u32>(height)}};
vkCmdSetScissor(command_buffer, 0, 1, &scissor);
}
void SetViewportAndScissor(VkCommandBuffer command_buffer, int x, int y, int width, int height,
float min_depth /* = 0.0f */, float max_depth /* = 1.0f */)
{
const VkViewport vp{static_cast<float>(x),
static_cast<float>(y),
static_cast<float>(width),
static_cast<float>(height),
min_depth,
max_depth};
const VkRect2D scissor{{x, y}, {static_cast<u32>(width), static_cast<u32>(height)}};
vkCmdSetViewport(command_buffer, 0, 1, &vp);
vkCmdSetScissor(command_buffer, 0, 1, &scissor);
}
void SafeDestroyFramebuffer(VkFramebuffer& fb)
{
if (fb != VK_NULL_HANDLE)
{
vkDestroyFramebuffer(g_vulkan_context->GetDevice(), fb, nullptr);
fb = VK_NULL_HANDLE;
}
}
void SafeDestroyPipeline(VkPipeline& p)
{
if (p != VK_NULL_HANDLE)
{
vkDestroyPipeline(g_vulkan_context->GetDevice(), p, nullptr);
p = VK_NULL_HANDLE;
}
}
void SafeDestroyPipelineLayout(VkPipelineLayout& pl)
{
if (pl != VK_NULL_HANDLE)
{
vkDestroyPipelineLayout(g_vulkan_context->GetDevice(), pl, nullptr);
pl = VK_NULL_HANDLE;
}
}
void SafeDestroyDescriptorSetLayout(VkDescriptorSetLayout& dsl)
{
if (dsl != VK_NULL_HANDLE)
{
vkDestroyDescriptorSetLayout(g_vulkan_context->GetDevice(), dsl, nullptr);
dsl = VK_NULL_HANDLE;
}
}
void SafeDestroyBufferView(VkBufferView& bv)
{
if (bv != VK_NULL_HANDLE)
{
vkDestroyBufferView(g_vulkan_context->GetDevice(), bv, nullptr);
bv = VK_NULL_HANDLE;
}
}
void SafeDestroySampler(VkSampler& samp)
{
if (samp != VK_NULL_HANDLE)
{
vkDestroySampler(g_vulkan_context->GetDevice(), samp, nullptr);
samp = VK_NULL_HANDLE;
}
}
void SafeFreeGlobalDescriptorSet(VkDescriptorSet& ds)
{
if (ds != VK_NULL_HANDLE)
{
g_vulkan_context->FreeGlobalDescriptorSet(ds);
ds = VK_NULL_HANDLE;
}
}
void BufferMemoryBarrier(VkCommandBuffer command_buffer, VkBuffer buffer, VkAccessFlags src_access_mask,
VkAccessFlags dst_access_mask, VkDeviceSize offset, VkDeviceSize size,
VkPipelineStageFlags src_stage_mask, VkPipelineStageFlags dst_stage_mask)
{
VkBufferMemoryBarrier buffer_info = {
VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER, // VkStructureType sType
nullptr, // const void* pNext
src_access_mask, // VkAccessFlags srcAccessMask
dst_access_mask, // VkAccessFlags dstAccessMask
VK_QUEUE_FAMILY_IGNORED, // uint32_t srcQueueFamilyIndex
VK_QUEUE_FAMILY_IGNORED, // uint32_t dstQueueFamilyIndex
buffer, // VkBuffer buffer
offset, // VkDeviceSize offset
size // VkDeviceSize size
};
vkCmdPipelineBarrier(command_buffer, src_stage_mask, dst_stage_mask, 0, 0, nullptr, 1, &buffer_info, 0, nullptr);
}
VkShaderModule CreateShaderModule(const u32* spv, size_t spv_word_count)
{
VkShaderModuleCreateInfo info = {};
info.sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO;
info.codeSize = spv_word_count * sizeof(u32);
info.pCode = spv;
VkShaderModule module;
VkResult res = vkCreateShaderModule(g_vulkan_context->GetDevice(), &info, nullptr, &module);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkCreateShaderModule failed: ");
return VK_NULL_HANDLE;
}
return module;
}
VkShaderModule CompileAndCreateVertexShader(std::string_view source_code)
{
std::optional<ShaderCompiler::SPIRVCodeVector> code = ShaderCompiler::CompileVertexShader(source_code);
if (!code)
return VK_NULL_HANDLE;
return CreateShaderModule(code->data(), code->size());
}
VkShaderModule CompileAndCreateGeometryShader(std::string_view source_code)
{
std::optional<ShaderCompiler::SPIRVCodeVector> code = ShaderCompiler::CompileGeometryShader(source_code);
if (!code)
return VK_NULL_HANDLE;
return CreateShaderModule(code->data(), code->size());
}
VkShaderModule CompileAndCreateFragmentShader(std::string_view source_code)
{
std::optional<ShaderCompiler::SPIRVCodeVector> code = ShaderCompiler::CompileFragmentShader(source_code);
if (!code)
return VK_NULL_HANDLE;
return CreateShaderModule(code->data(), code->size());
}
VkShaderModule CompileAndCreateComputeShader(std::string_view source_code)
{
std::optional<ShaderCompiler::SPIRVCodeVector> code = ShaderCompiler::CompileComputeShader(source_code);
if (!code)
return VK_NULL_HANDLE;
return CreateShaderModule(code->data(), code->size());
}
const char* VkResultToString(VkResult res)
{
switch (res)
{
case VK_SUCCESS:
return "VK_SUCCESS";
case VK_NOT_READY:
return "VK_NOT_READY";
case VK_TIMEOUT:
return "VK_TIMEOUT";
case VK_EVENT_SET:
return "VK_EVENT_SET";
case VK_EVENT_RESET:
return "VK_EVENT_RESET";
case VK_INCOMPLETE:
return "VK_INCOMPLETE";
case VK_ERROR_OUT_OF_HOST_MEMORY:
return "VK_ERROR_OUT_OF_HOST_MEMORY";
case VK_ERROR_OUT_OF_DEVICE_MEMORY:
return "VK_ERROR_OUT_OF_DEVICE_MEMORY";
case VK_ERROR_INITIALIZATION_FAILED:
return "VK_ERROR_INITIALIZATION_FAILED";
case VK_ERROR_DEVICE_LOST:
return "VK_ERROR_DEVICE_LOST";
case VK_ERROR_MEMORY_MAP_FAILED:
return "VK_ERROR_MEMORY_MAP_FAILED";
case VK_ERROR_LAYER_NOT_PRESENT:
return "VK_ERROR_LAYER_NOT_PRESENT";
case VK_ERROR_EXTENSION_NOT_PRESENT:
return "VK_ERROR_EXTENSION_NOT_PRESENT";
case VK_ERROR_FEATURE_NOT_PRESENT:
return "VK_ERROR_FEATURE_NOT_PRESENT";
case VK_ERROR_INCOMPATIBLE_DRIVER:
return "VK_ERROR_INCOMPATIBLE_DRIVER";
case VK_ERROR_TOO_MANY_OBJECTS:
return "VK_ERROR_TOO_MANY_OBJECTS";
case VK_ERROR_FORMAT_NOT_SUPPORTED:
return "VK_ERROR_FORMAT_NOT_SUPPORTED";
case VK_ERROR_SURFACE_LOST_KHR:
return "VK_ERROR_SURFACE_LOST_KHR";
case VK_ERROR_NATIVE_WINDOW_IN_USE_KHR:
return "VK_ERROR_NATIVE_WINDOW_IN_USE_KHR";
case VK_SUBOPTIMAL_KHR:
return "VK_SUBOPTIMAL_KHR";
case VK_ERROR_OUT_OF_DATE_KHR:
return "VK_ERROR_OUT_OF_DATE_KHR";
case VK_ERROR_INCOMPATIBLE_DISPLAY_KHR:
return "VK_ERROR_INCOMPATIBLE_DISPLAY_KHR";
case VK_ERROR_VALIDATION_FAILED_EXT:
return "VK_ERROR_VALIDATION_FAILED_EXT";
case VK_ERROR_INVALID_SHADER_NV:
return "VK_ERROR_INVALID_SHADER_NV";
default:
return "UNKNOWN_VK_RESULT";
}
}
void LogVulkanResult(int level, const char* func_name, VkResult res, const char* msg, ...)
{
std::va_list ap;
va_start(ap, msg);
std::string real_msg = StringUtil::StdStringFromFormatV(msg, ap);
va_end(ap);
Log::Writef("Vulkan", func_name, static_cast<LOGLEVEL>(level), "(%s) %s (%d: %s)", func_name, real_msg.c_str(),
static_cast<int>(res), VkResultToString(res));
}
} // namespace Util
} // namespace Vulkan

80
src/common/vulkan/util.h Normal file
View file

@ -0,0 +1,80 @@
// Copyright 2016 Dolphin Emulator Project
// Copyright 2020 DuckStation Emulator Project
// Licensed under GPLv2+
// Refer to the LICENSE file included.
#pragma once
#include "../types.h"
#include "vulkan_loader.h"
#include <algorithm>
#include <string_view>
namespace Vulkan {
namespace Util {
inline constexpr u32 MakeRGBA8Color(float r, float g, float b, float a)
{
return (static_cast<u32>(std::clamp(static_cast<int>(r * 255.0f), 0, 255)) << 0) |
(static_cast<u32>(std::clamp(static_cast<int>(g * 255.0f), 0, 255)) << 8) |
(static_cast<u32>(std::clamp(static_cast<int>(b * 255.0f), 0, 255)) << 16) |
(static_cast<u32>(std::clamp(static_cast<int>(a * 255.0f), 0, 255)) << 24);
}
bool IsDepthFormat(VkFormat format);
bool IsCompressedFormat(VkFormat format);
VkFormat GetLinearFormat(VkFormat format);
u32 GetTexelSize(VkFormat format);
u32 GetBlockSize(VkFormat format);
// Clamps a VkRect2D to the specified dimensions.
VkRect2D ClampRect2D(const VkRect2D& rect, u32 width, u32 height);
// Map {SRC,DST}_COLOR to {SRC,DST}_ALPHA
VkBlendFactor GetAlphaBlendFactor(VkBlendFactor factor);
// Safe destroy helpers
void SafeDestroyFramebuffer(VkFramebuffer& fb);
void SafeDestroyPipeline(VkPipeline& p);
void SafeDestroyPipelineLayout(VkPipelineLayout& pl);
void SafeDestroyDescriptorSetLayout(VkDescriptorSetLayout& dsl);
void SafeDestroyBufferView(VkBufferView& bv);
void SafeDestroySampler(VkSampler& samp);
void SafeFreeGlobalDescriptorSet(VkDescriptorSet& ds);
void SetViewport(VkCommandBuffer command_buffer, int x, int y, int width, int height, float min_depth = 0.0f,
float max_depth = 1.0f);
void SetScissor(VkCommandBuffer command_buffer, int x, int y, int width, int height);
// Combines viewport and scissor updates
void SetViewportAndScissor(VkCommandBuffer command_buffer, int x, int y, int width, int height, float min_depth = 0.0f,
float max_depth = 1.0f);
// Wrapper for creating an barrier on a buffer
void BufferMemoryBarrier(VkCommandBuffer command_buffer, VkBuffer buffer, VkAccessFlags src_access_mask,
VkAccessFlags dst_access_mask, VkDeviceSize offset, VkDeviceSize size,
VkPipelineStageFlags src_stage_mask, VkPipelineStageFlags dst_stage_mask);
// Create a shader module from the specified SPIR-V.
VkShaderModule CreateShaderModule(const u32* spv, size_t spv_word_count);
// Compile a vertex shader and create a shader module, discarding the intermediate SPIR-V.
VkShaderModule CompileAndCreateVertexShader(std::string_view source_code);
// Compile a geometry shader and create a shader module, discarding the intermediate SPIR-V.
VkShaderModule CompileAndCreateGeometryShader(std::string_view source_code);
// Compile a fragment shader and create a shader module, discarding the intermediate SPIR-V.
VkShaderModule CompileAndCreateFragmentShader(std::string_view source_code);
// Compile a compute shader and create a shader module, discarding the intermediate SPIR-V.
VkShaderModule CompileAndCreateComputeShader(std::string_view source_code);
const char* VkResultToString(VkResult res);
void LogVulkanResult(int level, const char* func_name, VkResult res, const char* msg, ...);
#define LOG_VULKAN_ERROR(res, ...) ::Vulkan::Util::LogVulkanResult(1, __func__, res, __VA_ARGS__)
} // namespace Util
} // namespace Vulkan