mirror of
https://github.com/RetroDECK/Duckstation.git
synced 2024-11-21 21:35:38 +00:00
MetalDevice: Implement pipeline cache
This commit is contained in:
parent
191957547a
commit
bd99688b9d
|
@ -291,51 +291,45 @@ void D3D12Device::GetPipelineCacheHeader(PIPELINE_CACHE_HEADER* hdr)
|
|||
hdr->unused = 0;
|
||||
}
|
||||
|
||||
bool D3D12Device::ReadPipelineCache(std::optional<DynamicHeapArray<u8>> data)
|
||||
bool D3D12Device::ReadPipelineCache(DynamicHeapArray<u8> data, Error* error)
|
||||
{
|
||||
PIPELINE_CACHE_HEADER expected_header;
|
||||
GetPipelineCacheHeader(&expected_header);
|
||||
if (data.has_value() && (data->size() < sizeof(PIPELINE_CACHE_HEADER) ||
|
||||
std::memcmp(data->data(), &expected_header, sizeof(PIPELINE_CACHE_HEADER)) != 0))
|
||||
if ((data.size() < sizeof(PIPELINE_CACHE_HEADER) ||
|
||||
std::memcmp(data.data(), &expected_header, sizeof(PIPELINE_CACHE_HEADER)) != 0))
|
||||
{
|
||||
WARNING_LOG("Pipeline cache header does not match current device, ignoring.");
|
||||
data.reset();
|
||||
Error::SetStringView(error, "Pipeline cache header does not match current device.");
|
||||
return false;
|
||||
}
|
||||
|
||||
HRESULT hr =
|
||||
m_device->CreatePipelineLibrary(data.has_value() ? (data->data() + sizeof(PIPELINE_CACHE_HEADER)) : nullptr,
|
||||
data.has_value() ? (data->size() - sizeof(PIPELINE_CACHE_HEADER)) : 0,
|
||||
const HRESULT hr =
|
||||
m_device->CreatePipelineLibrary(&data[sizeof(PIPELINE_CACHE_HEADER)], data.size() - sizeof(PIPELINE_CACHE_HEADER),
|
||||
IID_PPV_ARGS(m_pipeline_library.ReleaseAndGetAddressOf()));
|
||||
if (SUCCEEDED(hr))
|
||||
{
|
||||
if (data.has_value())
|
||||
s_pipeline_cache_data = std::move(data.value());
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
// Try without the cache data.
|
||||
if (data.has_value())
|
||||
{
|
||||
WARNING_LOG("CreatePipelineLibrary() failed, trying without cache data. Error: {}",
|
||||
Error::CreateHResult(hr).GetDescription());
|
||||
|
||||
hr = m_device->CreatePipelineLibrary(nullptr, 0, IID_PPV_ARGS(m_pipeline_library.ReleaseAndGetAddressOf()));
|
||||
if (SUCCEEDED(hr))
|
||||
return true;
|
||||
}
|
||||
|
||||
if (FAILED(hr))
|
||||
{
|
||||
WARNING_LOG("CreatePipelineLibrary() failed, pipeline caching will not be available. Error: {}",
|
||||
Error::CreateHResult(hr).GetDescription());
|
||||
Error::SetHResult(error, "CreatePipelineLibrary() failed: ", hr);
|
||||
return false;
|
||||
}
|
||||
|
||||
// Have to keep the buffer around, DX doesn't take a copy.
|
||||
s_pipeline_cache_data = std::move(data);
|
||||
return true;
|
||||
}
|
||||
|
||||
bool D3D12Device::CreatePipelineCache(const std::string& path, Error* error)
|
||||
{
|
||||
const HRESULT hr =
|
||||
m_device->CreatePipelineLibrary(nullptr, 0, IID_PPV_ARGS(m_pipeline_library.ReleaseAndGetAddressOf()));
|
||||
if (FAILED(hr))
|
||||
{
|
||||
Error::SetHResult(error, "CreatePipelineLibrary() failed: ", hr);
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool D3D12Device::GetPipelineCacheData(DynamicHeapArray<u8>* data)
|
||||
bool D3D12Device::GetPipelineCacheData(DynamicHeapArray<u8>* data, Error* error)
|
||||
{
|
||||
if (!m_pipeline_library)
|
||||
return false;
|
||||
|
@ -356,7 +350,7 @@ bool D3D12Device::GetPipelineCacheData(DynamicHeapArray<u8>* data)
|
|||
const HRESULT hr = m_pipeline_library->Serialize(data->data() + sizeof(PIPELINE_CACHE_HEADER), size);
|
||||
if (FAILED(hr))
|
||||
{
|
||||
ERROR_LOG("Serialize() failed with HRESULT {:08X}", static_cast<unsigned>(hr));
|
||||
Error::SetHResult(error, "Serialize() failed: ", hr);
|
||||
data->deallocate();
|
||||
return false;
|
||||
}
|
||||
|
|
|
@ -187,8 +187,9 @@ protected:
|
|||
FeatureMask disabled_features, Error* error) override;
|
||||
void DestroyDevice() override;
|
||||
|
||||
bool ReadPipelineCache(std::optional<DynamicHeapArray<u8>> data) override;
|
||||
bool GetPipelineCacheData(DynamicHeapArray<u8>* data) override;
|
||||
bool ReadPipelineCache(DynamicHeapArray<u8> data, Error* error) override;
|
||||
bool CreatePipelineCache(const std::string& path, Error* error) override;
|
||||
bool GetPipelineCacheData(DynamicHeapArray<u8>* data, Error* error) override;
|
||||
|
||||
private:
|
||||
enum DIRTY_FLAG : u32
|
||||
|
|
|
@ -428,14 +428,29 @@ void GPUDevice::OpenShaderCache(std::string_view base_path, u32 version)
|
|||
}
|
||||
|
||||
s_pipeline_cache_path = {};
|
||||
s_pipeline_cache_size = 0;
|
||||
s_pipeline_cache_hash = {};
|
||||
|
||||
if (m_features.pipeline_cache && !base_path.empty())
|
||||
{
|
||||
const std::string basename = GetShaderCacheBaseName("pipelines");
|
||||
std::string filename = Path::Combine(base_path, TinyString::from_format("{}.bin", basename));
|
||||
if (OpenPipelineCache(filename))
|
||||
s_pipeline_cache_path = std::move(filename);
|
||||
else
|
||||
WARNING_LOG("Failed to read pipeline cache.");
|
||||
Error error;
|
||||
s_pipeline_cache_path =
|
||||
Path::Combine(base_path, TinyString::from_format("{}.bin", GetShaderCacheBaseName("pipelines")));
|
||||
if (FileSystem::FileExists(s_pipeline_cache_path.c_str()))
|
||||
{
|
||||
if (OpenPipelineCache(s_pipeline_cache_path, &error))
|
||||
return;
|
||||
|
||||
WARNING_LOG("Failed to read pipeline cache '{}': {}", Path::GetFileName(s_pipeline_cache_path),
|
||||
error.GetDescription());
|
||||
}
|
||||
|
||||
if (!CreatePipelineCache(s_pipeline_cache_path, &error))
|
||||
{
|
||||
WARNING_LOG("Failed to create pipeline cache '{}': {}", Path::GetFileName(s_pipeline_cache_path),
|
||||
error.GetDescription());
|
||||
s_pipeline_cache_path = {};
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -445,25 +460,11 @@ void GPUDevice::CloseShaderCache()
|
|||
|
||||
if (!s_pipeline_cache_path.empty())
|
||||
{
|
||||
DynamicHeapArray<u8> data;
|
||||
if (GetPipelineCacheData(&data))
|
||||
Error error;
|
||||
if (!ClosePipelineCache(s_pipeline_cache_path, &error))
|
||||
{
|
||||
// Save disk writes if it hasn't changed, think of the poor SSDs.
|
||||
if (s_pipeline_cache_size != data.size() || s_pipeline_cache_hash != SHA1Digest::GetDigest(data.cspan()))
|
||||
{
|
||||
Error error;
|
||||
INFO_LOG("Compressing and writing {} bytes to '{}'", data.size(), Path::GetFileName(s_pipeline_cache_path));
|
||||
if (!CompressHelpers::CompressToFile(CompressHelpers::CompressType::Zstandard, s_pipeline_cache_path.c_str(),
|
||||
data.cspan(), -1, true, &error))
|
||||
{
|
||||
ERROR_LOG("Failed to write pipeline cache to '{}': {}", Path::GetFileName(s_pipeline_cache_path),
|
||||
error.GetDescription());
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
INFO_LOG("Skipping updating pipeline cache '{}' due to no changes.", Path::GetFileName(s_pipeline_cache_path));
|
||||
}
|
||||
WARNING_LOG("Failed to close pipeline cache '{}': {}", Path::GetFileName(s_pipeline_cache_path),
|
||||
error.GetDescription());
|
||||
}
|
||||
|
||||
s_pipeline_cache_path = {};
|
||||
|
@ -480,49 +481,56 @@ std::string GPUDevice::GetShaderCacheBaseName(std::string_view type) const
|
|||
return fmt::format("{}_{}{}", lower_api_name, type, debug_suffix);
|
||||
}
|
||||
|
||||
bool GPUDevice::OpenPipelineCache(const std::string& filename)
|
||||
bool GPUDevice::OpenPipelineCache(const std::string& path, Error* error)
|
||||
{
|
||||
s_pipeline_cache_size = 0;
|
||||
s_pipeline_cache_hash = {};
|
||||
CompressHelpers::OptionalByteBuffer data =
|
||||
CompressHelpers::DecompressFile(CompressHelpers::CompressType::Zstandard, path.c_str(), std::nullopt, error);
|
||||
if (!data.has_value())
|
||||
return false;
|
||||
|
||||
Error error;
|
||||
CompressHelpers::OptionalByteBuffer data;
|
||||
if (FileSystem::FileExists(filename.c_str()))
|
||||
{
|
||||
data =
|
||||
CompressHelpers::DecompressFile(CompressHelpers::CompressType::Zstandard, filename.c_str(), std::nullopt, &error);
|
||||
if (data.has_value())
|
||||
{
|
||||
s_pipeline_cache_size = data->size();
|
||||
s_pipeline_cache_hash = SHA1Digest::GetDigest(data->cspan());
|
||||
}
|
||||
else
|
||||
{
|
||||
ERROR_LOG("Failed to load pipeline cache from '{}': {}", Path::GetFileName(filename), error.GetDescription());
|
||||
}
|
||||
}
|
||||
const size_t cache_size = data->size();
|
||||
const std::array<u8, SHA1Digest::DIGEST_SIZE> cache_hash = SHA1Digest::GetDigest(data->cspan());
|
||||
|
||||
INFO_LOG("Loading {} byte pipeline cache with hash {}", s_pipeline_cache_size,
|
||||
SHA1Digest::DigestToString(s_pipeline_cache_hash));
|
||||
|
||||
if (ReadPipelineCache(std::move(data)))
|
||||
{
|
||||
return true;
|
||||
}
|
||||
else
|
||||
{
|
||||
s_pipeline_cache_size = 0;
|
||||
s_pipeline_cache_hash = {};
|
||||
if (!ReadPipelineCache(std::move(data.value()), error))
|
||||
return false;
|
||||
}
|
||||
|
||||
s_pipeline_cache_size = cache_size;
|
||||
s_pipeline_cache_hash = cache_hash;
|
||||
return true;
|
||||
}
|
||||
|
||||
bool GPUDevice::ReadPipelineCache(std::optional<DynamicHeapArray<u8>> data)
|
||||
bool GPUDevice::CreatePipelineCache(const std::string& path, Error* error)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
bool GPUDevice::GetPipelineCacheData(DynamicHeapArray<u8>* data)
|
||||
bool GPUDevice::ClosePipelineCache(const std::string& path, Error* error)
|
||||
{
|
||||
DynamicHeapArray<u8> data;
|
||||
if (!GetPipelineCacheData(&data, error))
|
||||
return false;
|
||||
|
||||
// Save disk writes if it hasn't changed, think of the poor SSDs.
|
||||
if (s_pipeline_cache_size == data.size() && s_pipeline_cache_hash == SHA1Digest::GetDigest(data.cspan()))
|
||||
{
|
||||
INFO_LOG("Skipping updating pipeline cache '{}' due to no changes.", Path::GetFileName(path));
|
||||
return true;
|
||||
}
|
||||
|
||||
INFO_LOG("Compressing and writing {} bytes to '{}'", data.size(), Path::GetFileName(path));
|
||||
return CompressHelpers::CompressToFile(CompressHelpers::CompressType::Zstandard, path.c_str(), data.cspan(), -1, true,
|
||||
error);
|
||||
}
|
||||
|
||||
bool GPUDevice::ReadPipelineCache(DynamicHeapArray<u8> data, Error* error)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
bool GPUDevice::GetPipelineCacheData(DynamicHeapArray<u8>* data, Error* error)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
@ -1705,7 +1713,7 @@ bool GPUDevice::TranslateVulkanSpvToLanguage(const std::span<const u8> spirv, GP
|
|||
|
||||
if (m_features.framebuffer_fetch &&
|
||||
((sres = dyn_libs::spvc_compiler_options_set_uint(soptions, SPVC_COMPILER_OPTION_MSL_VERSION,
|
||||
SPVC_MAKE_MSL_VERSION(2, 3, 0))) != SPVC_SUCCESS))
|
||||
target_version)) != SPVC_SUCCESS))
|
||||
{
|
||||
Error::SetStringFmt(error, "spvc_compiler_options_set_uint(SPVC_COMPILER_OPTION_MSL_VERSION) failed: {}",
|
||||
static_cast<int>(sres));
|
||||
|
|
|
@ -750,9 +750,11 @@ protected:
|
|||
virtual void DestroyDevice() = 0;
|
||||
|
||||
std::string GetShaderCacheBaseName(std::string_view type) const;
|
||||
virtual bool OpenPipelineCache(const std::string& filename);
|
||||
virtual bool ReadPipelineCache(std::optional<DynamicHeapArray<u8>> data);
|
||||
virtual bool GetPipelineCacheData(DynamicHeapArray<u8>* data);
|
||||
virtual bool OpenPipelineCache(const std::string& path, Error* error);
|
||||
virtual bool CreatePipelineCache(const std::string& path, Error* error);
|
||||
virtual bool ReadPipelineCache(DynamicHeapArray<u8> data, Error* error);
|
||||
virtual bool GetPipelineCacheData(DynamicHeapArray<u8>* data, Error* error);
|
||||
virtual bool ClosePipelineCache(const std::string& path, Error* error);
|
||||
|
||||
virtual std::unique_ptr<GPUShader> CreateShaderFromBinary(GPUShaderStage stage, std::span<const u8> data,
|
||||
Error* error) = 0;
|
||||
|
|
|
@ -288,6 +288,9 @@ protected:
|
|||
bool CreateDevice(std::string_view adapter, std::optional<bool> exclusive_fullscreen_control,
|
||||
FeatureMask disabled_features, Error* error) override;
|
||||
void DestroyDevice() override;
|
||||
bool OpenPipelineCache(const std::string& path, Error* error) override;
|
||||
bool CreatePipelineCache(const std::string& path, Error* error) override;
|
||||
bool ClosePipelineCache(const std::string& path, Error* error) override;
|
||||
|
||||
private:
|
||||
static constexpr u32 VERTEX_BUFFER_SIZE = 8 * 1024 * 1024;
|
||||
|
@ -372,6 +375,7 @@ private:
|
|||
MetalStreamBuffer m_texture_upload_buffer;
|
||||
|
||||
id<MTLLibrary> m_shaders = nil;
|
||||
id<MTLBinaryArchive> m_pipeline_archive = nil;
|
||||
std::vector<std::pair<std::pair<GPUTexture::Format, GPUTexture::Format>, id<MTLComputePipelineState>>>
|
||||
m_resolve_pipelines;
|
||||
std::vector<std::pair<ClearPipelineConfig, id<MTLRenderPipelineState>>> m_clear_pipelines;
|
||||
|
@ -400,6 +404,7 @@ private:
|
|||
GSVector4i m_current_scissor = {};
|
||||
|
||||
bool m_vsync_enabled = false;
|
||||
bool m_pipeline_cache_modified = false;
|
||||
|
||||
double m_accumulated_gpu_time = 0;
|
||||
double m_last_gpu_time_end = 0;
|
||||
|
|
|
@ -155,8 +155,7 @@ MetalDevice::MetalDevice() : m_current_viewport(0, 0, 1, 1), m_current_scissor(0
|
|||
|
||||
MetalDevice::~MetalDevice()
|
||||
{
|
||||
Assert(m_layer == nil);
|
||||
Assert(m_device == nil);
|
||||
Assert(m_pipeline_archive == nil && m_layer == nil && m_device == nil);
|
||||
}
|
||||
|
||||
bool MetalDevice::HasSurface() const
|
||||
|
@ -251,8 +250,9 @@ bool MetalDevice::CreateDevice(std::string_view adapter, std::optional<bool> exc
|
|||
|
||||
void MetalDevice::SetFeatures(FeatureMask disabled_features)
|
||||
{
|
||||
// Set version to Metal 2.3, that's all we're using. Use SPIRV-Cross version encoding.
|
||||
m_render_api = RenderAPI::Metal;
|
||||
m_render_api_version = 100; // TODO: Make this more meaningful.
|
||||
m_render_api_version = 20300;
|
||||
m_max_texture_size = GetMetalMaxTextureSize(m_device);
|
||||
m_max_multisamples = GetMetalMaxMultisamples(m_device);
|
||||
|
||||
|
@ -277,8 +277,15 @@ void MetalDevice::SetFeatures(FeatureMask disabled_features)
|
|||
m_features.explicit_present = false;
|
||||
m_features.timed_present = true;
|
||||
m_features.shader_cache = true;
|
||||
m_features.pipeline_cache = false;
|
||||
m_features.pipeline_cache = true;
|
||||
m_features.prefer_unused_textures = true;
|
||||
|
||||
// Disable pipeline cache on Intel, apparently it's buggy.
|
||||
if ([[m_device name] containsString:@"Intel"])
|
||||
{
|
||||
WARNING_LOG("Disabling Metal pipeline cache on Intel GPU.");
|
||||
m_features.pipeline_cache = false;
|
||||
}
|
||||
}
|
||||
|
||||
bool MetalDevice::LoadShaders()
|
||||
|
@ -313,6 +320,76 @@ bool MetalDevice::LoadShaders()
|
|||
}
|
||||
}
|
||||
|
||||
bool MetalDevice::OpenPipelineCache(const std::string& path, Error* error)
|
||||
{
|
||||
@autoreleasepool
|
||||
{
|
||||
MTLBinaryArchiveDescriptor* archiveDescriptor = [[[MTLBinaryArchiveDescriptor alloc] init] autorelease];
|
||||
archiveDescriptor.url = [NSURL fileURLWithPath:StringViewToNSString(path)];
|
||||
|
||||
NSError* nserror = nil;
|
||||
m_pipeline_archive = [m_device newBinaryArchiveWithDescriptor:archiveDescriptor error:&nserror];
|
||||
if (m_pipeline_archive == nil)
|
||||
{
|
||||
NSErrorToErrorObject(error, "newBinaryArchiveWithDescriptor failed: ", nserror);
|
||||
return false;
|
||||
}
|
||||
|
||||
m_pipeline_cache_modified = false;
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
bool MetalDevice::CreatePipelineCache(const std::string& path, Error* error)
|
||||
{
|
||||
@autoreleasepool
|
||||
{
|
||||
MTLBinaryArchiveDescriptor* archiveDescriptor = [[[MTLBinaryArchiveDescriptor alloc] init] autorelease];
|
||||
archiveDescriptor.url = nil;
|
||||
|
||||
NSError* nserror = nil;
|
||||
m_pipeline_archive = [m_device newBinaryArchiveWithDescriptor:archiveDescriptor error:&nserror];
|
||||
if (m_pipeline_archive == nil)
|
||||
{
|
||||
NSErrorToErrorObject(error, "newBinaryArchiveWithDescriptor failed: ", nserror);
|
||||
return false;
|
||||
}
|
||||
|
||||
m_pipeline_cache_modified = false;
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
bool MetalDevice::ClosePipelineCache(const std::string& path, Error* error)
|
||||
{
|
||||
if (!m_pipeline_archive)
|
||||
return false;
|
||||
|
||||
const ScopedGuard closer = [this]() {
|
||||
[m_pipeline_archive release];
|
||||
m_pipeline_archive = nil;
|
||||
};
|
||||
|
||||
if (!m_pipeline_cache_modified)
|
||||
{
|
||||
INFO_LOG("Not saving pipeline cache, it has not been modified.");
|
||||
return true;
|
||||
}
|
||||
|
||||
@autoreleasepool
|
||||
{
|
||||
NSURL* url = [NSURL fileURLWithPath:StringViewToNSString(path)];
|
||||
NSError* nserror = nil;
|
||||
if (![m_pipeline_archive serializeToURL:url error:&nserror])
|
||||
{
|
||||
NSErrorToErrorObject(error, "serializeToURL failed: ", nserror);
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
id<MTLFunction> MetalDevice::GetFunctionFromLibrary(id<MTLLibrary> library, NSString* name)
|
||||
{
|
||||
id<MTLFunction> function = [library newFunctionWithName:name];
|
||||
|
@ -609,30 +686,13 @@ void MetalShader::SetDebugName(std::string_view name)
|
|||
}
|
||||
}
|
||||
|
||||
// TODO: Clean this up, somehow..
|
||||
namespace EmuFolders {
|
||||
extern std::string DataRoot;
|
||||
}
|
||||
static void DumpShader(u32 n, std::string_view suffix, std::string_view data)
|
||||
{
|
||||
if (data.empty())
|
||||
return;
|
||||
|
||||
auto fp = FileSystem::OpenManagedCFile(
|
||||
Path::Combine(EmuFolders::DataRoot, fmt::format("shader{}_{}.txt", suffix, n)).c_str(), "wb");
|
||||
if (!fp)
|
||||
return;
|
||||
|
||||
std::fwrite(data.data(), data.length(), 1, fp.get());
|
||||
}
|
||||
|
||||
std::unique_ptr<GPUShader> MetalDevice::CreateShaderFromMSL(GPUShaderStage stage, std::string_view source,
|
||||
std::string_view entry_point, Error* error)
|
||||
{
|
||||
@autoreleasepool
|
||||
{
|
||||
NSString* const ns_source = StringViewToNSString(source);
|
||||
NSError* nserror = nullptr;
|
||||
NSError* nserror = nil;
|
||||
id<MTLLibrary> library = [m_device newLibraryWithSource:ns_source options:nil error:&nserror];
|
||||
if (!library)
|
||||
{
|
||||
|
@ -897,13 +957,41 @@ std::unique_ptr<GPUPipeline> MetalDevice::CreatePipeline(const GPUPipeline::Grap
|
|||
if (config.layout == GPUPipeline::Layout::SingleTextureBufferAndPushConstants)
|
||||
desc.fragmentBuffers[1].mutability = MTLMutabilityImmutable;
|
||||
|
||||
NSError* nserror = nullptr;
|
||||
id<MTLRenderPipelineState> pipeline = [m_device newRenderPipelineStateWithDescriptor:desc error:&nserror];
|
||||
NSError* nserror = nil;
|
||||
|
||||
// Try cached first.
|
||||
id<MTLRenderPipelineState> pipeline = nil;
|
||||
if (m_pipeline_archive != nil)
|
||||
{
|
||||
desc.binaryArchives = [NSArray arrayWithObjects:m_pipeline_archive, nil];
|
||||
pipeline = [m_device newRenderPipelineStateWithDescriptor:desc
|
||||
options:MTLPipelineOptionFailOnBinaryArchiveMiss
|
||||
reflection:nil
|
||||
error:&nserror];
|
||||
if (pipeline == nil)
|
||||
{
|
||||
// Add it to the cache.
|
||||
if (![m_pipeline_archive addRenderPipelineFunctionsWithDescriptor:desc error:&nserror])
|
||||
{
|
||||
LogNSError(nserror, "Failed to add render pipeline to binary archive");
|
||||
desc.binaryArchives = nil;
|
||||
}
|
||||
else
|
||||
{
|
||||
m_pipeline_cache_modified = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (pipeline == nil)
|
||||
{
|
||||
LogNSError(nserror, "Failed to create render pipeline state");
|
||||
NSErrorToErrorObject(error, "newRenderPipelineStateWithDescriptor failed: ", nserror);
|
||||
return {};
|
||||
pipeline = [m_device newRenderPipelineStateWithDescriptor:desc error:&nserror];
|
||||
if (pipeline == nil)
|
||||
{
|
||||
LogNSError(nserror, "Failed to create render pipeline state");
|
||||
NSErrorToErrorObject(error, "newRenderPipelineStateWithDescriptor failed: ", nserror);
|
||||
return {};
|
||||
}
|
||||
}
|
||||
|
||||
return std::unique_ptr<GPUPipeline>(new MetalPipeline(pipeline, depth, cull_mode, primitive));
|
||||
|
|
|
@ -529,7 +529,6 @@ void OpenGLDevice::DestroyDevice()
|
|||
if (!m_gl_context)
|
||||
return;
|
||||
|
||||
ClosePipelineCache();
|
||||
DestroyBuffers();
|
||||
|
||||
m_gl_context->DoneCurrent();
|
||||
|
|
|
@ -136,8 +136,9 @@ protected:
|
|||
FeatureMask disabled_features, Error* error) override;
|
||||
void DestroyDevice() override;
|
||||
|
||||
bool OpenPipelineCache(const std::string& filename) override;
|
||||
bool GetPipelineCacheData(DynamicHeapArray<u8>* data) override;
|
||||
bool OpenPipelineCache(const std::string& path, Error* error) override;
|
||||
bool CreatePipelineCache(const std::string& path, Error* error) override;
|
||||
bool ClosePipelineCache(const std::string& path, Error* error) override;
|
||||
|
||||
private:
|
||||
static constexpr u8 NUM_TIMESTAMP_QUERIES = 3;
|
||||
|
@ -172,7 +173,6 @@ private:
|
|||
const GPUPipeline::GraphicsConfig& plconfig);
|
||||
void AddToPipelineCache(OpenGLPipeline::ProgramCacheItem* it);
|
||||
bool DiscardPipelineCache();
|
||||
void ClosePipelineCache();
|
||||
|
||||
void ApplyRasterizationState(GPUPipeline::RasterizationState rs);
|
||||
void ApplyDepthState(GPUPipeline::DepthState ds);
|
||||
|
@ -224,7 +224,6 @@ private:
|
|||
bool m_timestamp_query_started = false;
|
||||
|
||||
std::FILE* m_pipeline_disk_cache_file = nullptr;
|
||||
std::string m_pipeline_disk_cache_filename;
|
||||
u32 m_pipeline_disk_cache_data_end = 0;
|
||||
bool m_pipeline_disk_cache_changed = false;
|
||||
|
||||
|
|
|
@ -753,49 +753,29 @@ void OpenGLDevice::SetPipeline(GPUPipeline* pipeline)
|
|||
}
|
||||
}
|
||||
|
||||
bool OpenGLDevice::OpenPipelineCache(const std::string& filename)
|
||||
bool OpenGLDevice::OpenPipelineCache(const std::string& path, Error* error)
|
||||
{
|
||||
DebugAssert(!m_pipeline_disk_cache_file);
|
||||
|
||||
Error error;
|
||||
m_pipeline_disk_cache_file = FileSystem::OpenCFile(filename.c_str(), "r+b", &error);
|
||||
m_pipeline_disk_cache_filename = filename;
|
||||
|
||||
m_pipeline_disk_cache_file = FileSystem::OpenCFile(path.c_str(), "r+b", error);
|
||||
if (!m_pipeline_disk_cache_file)
|
||||
{
|
||||
// Multiple instances running? Ignore.
|
||||
if (errno == EACCES)
|
||||
{
|
||||
m_pipeline_disk_cache_filename = {};
|
||||
return true;
|
||||
}
|
||||
|
||||
// If it doesn't exist, we're going to create it.
|
||||
if (errno != ENOENT)
|
||||
{
|
||||
WARNING_LOG("Failed to open shader cache: {}", error.GetDescription());
|
||||
m_pipeline_disk_cache_filename = {};
|
||||
return false;
|
||||
}
|
||||
|
||||
WARNING_LOG("Disk cache does not exist, creating.");
|
||||
return DiscardPipelineCache();
|
||||
}
|
||||
return false;
|
||||
|
||||
// Read footer.
|
||||
const s64 size = FileSystem::FSize64(m_pipeline_disk_cache_file);
|
||||
if (size < static_cast<s64>(sizeof(PipelineDiskCacheFooter)) ||
|
||||
size >= static_cast<s64>(std::numeric_limits<u32>::max()))
|
||||
{
|
||||
return DiscardPipelineCache();
|
||||
Error::SetStringView(error, "Invalid cache file size.");
|
||||
return false;
|
||||
}
|
||||
|
||||
PipelineDiskCacheFooter file_footer;
|
||||
if (FileSystem::FSeek64(m_pipeline_disk_cache_file, size - sizeof(PipelineDiskCacheFooter), SEEK_SET) != 0 ||
|
||||
std::fread(&file_footer, sizeof(file_footer), 1, m_pipeline_disk_cache_file) != 1)
|
||||
{
|
||||
ERROR_LOG("Failed to read disk cache footer.");
|
||||
return DiscardPipelineCache();
|
||||
Error::SetStringView(error, "Invalid cache file footer.");
|
||||
return false;
|
||||
}
|
||||
|
||||
PipelineDiskCacheFooter expected_footer;
|
||||
|
@ -809,8 +789,8 @@ bool OpenGLDevice::OpenPipelineCache(const std::string& filename)
|
|||
std::strncmp(file_footer.driver_version, expected_footer.driver_version, std::size(file_footer.driver_version)) !=
|
||||
0)
|
||||
{
|
||||
ERROR_LOG("Disk cache does not match expected driver/version.");
|
||||
return DiscardPipelineCache();
|
||||
Error::SetStringView(error, "Cache does not match expected driver/version.");
|
||||
return false;
|
||||
}
|
||||
|
||||
m_pipeline_disk_cache_data_end = static_cast<u32>(size) - sizeof(PipelineDiskCacheFooter) -
|
||||
|
@ -818,8 +798,8 @@ bool OpenGLDevice::OpenPipelineCache(const std::string& filename)
|
|||
if (m_pipeline_disk_cache_data_end < 0 ||
|
||||
FileSystem::FSeek64(m_pipeline_disk_cache_file, m_pipeline_disk_cache_data_end, SEEK_SET) != 0)
|
||||
{
|
||||
ERROR_LOG("Failed to seek to start of index entries.");
|
||||
return DiscardPipelineCache();
|
||||
Error::SetStringView(error, "Failed to seek to start of index entries.");
|
||||
return false;
|
||||
}
|
||||
|
||||
// Read entries.
|
||||
|
@ -829,14 +809,16 @@ bool OpenGLDevice::OpenPipelineCache(const std::string& filename)
|
|||
if (std::fread(&entry, sizeof(entry), 1, m_pipeline_disk_cache_file) != 1 ||
|
||||
(static_cast<s64>(entry.offset) + static_cast<s64>(entry.compressed_size)) >= size)
|
||||
{
|
||||
ERROR_LOG("Failed to read disk cache entry.");
|
||||
return DiscardPipelineCache();
|
||||
Error::SetStringView(error, "Failed to read disk cache entry.");
|
||||
m_program_cache.clear();
|
||||
return false;
|
||||
}
|
||||
|
||||
if (m_program_cache.find(entry.key) != m_program_cache.end())
|
||||
{
|
||||
ERROR_LOG("Duplicate program in disk cache.");
|
||||
return DiscardPipelineCache();
|
||||
Error::SetStringView(error, "Duplicate program in disk cache.");
|
||||
m_program_cache.clear();
|
||||
return false;
|
||||
}
|
||||
|
||||
OpenGLPipeline::ProgramCacheItem pitem;
|
||||
|
@ -853,10 +835,15 @@ bool OpenGLDevice::OpenPipelineCache(const std::string& filename)
|
|||
return true;
|
||||
}
|
||||
|
||||
bool OpenGLDevice::GetPipelineCacheData(DynamicHeapArray<u8>* data)
|
||||
bool OpenGLDevice::CreatePipelineCache(const std::string& path, Error* error)
|
||||
{
|
||||
// Self-managed.
|
||||
return false;
|
||||
m_pipeline_disk_cache_file = FileSystem::OpenCFile(path.c_str(), "w+b", error);
|
||||
if (!m_pipeline_disk_cache_file)
|
||||
return false;
|
||||
|
||||
m_pipeline_disk_cache_data_end = 0;
|
||||
m_pipeline_disk_cache_changed = true;
|
||||
return true;
|
||||
}
|
||||
|
||||
GLuint OpenGLDevice::CreateProgramFromPipelineCache(const OpenGLPipeline::ProgramCacheItem& it,
|
||||
|
@ -976,42 +963,42 @@ bool OpenGLDevice::DiscardPipelineCache()
|
|||
it = m_program_cache.erase(it);
|
||||
}
|
||||
|
||||
if (m_pipeline_disk_cache_file)
|
||||
std::fclose(m_pipeline_disk_cache_file);
|
||||
|
||||
Error error;
|
||||
m_pipeline_disk_cache_data_end = 0;
|
||||
m_pipeline_disk_cache_file = FileSystem::OpenCFile(m_pipeline_disk_cache_filename.c_str(), "w+b", &error);
|
||||
if (!m_pipeline_disk_cache_file) [[unlikely]]
|
||||
if (!m_pipeline_disk_cache_file)
|
||||
{
|
||||
ERROR_LOG("Failed to reopen pipeline cache: {}", error.GetDescription());
|
||||
m_pipeline_disk_cache_filename = {};
|
||||
// Probably shouldn't get here...
|
||||
return false;
|
||||
}
|
||||
|
||||
Error error;
|
||||
if (!FileSystem::FTruncate64(m_pipeline_disk_cache_file, 0, &error))
|
||||
{
|
||||
ERROR_LOG("Failed to truncate pipeline cache: {}", error.GetDescription());
|
||||
std::fclose(m_pipeline_disk_cache_file);
|
||||
m_pipeline_disk_cache_file = nullptr;
|
||||
return false;
|
||||
}
|
||||
|
||||
m_pipeline_disk_cache_data_end = 0;
|
||||
m_pipeline_disk_cache_changed = true;
|
||||
return true;
|
||||
}
|
||||
|
||||
void OpenGLDevice::ClosePipelineCache()
|
||||
bool OpenGLDevice::ClosePipelineCache(const std::string& filename, Error* error)
|
||||
{
|
||||
const ScopedGuard file_closer = [this]() {
|
||||
if (m_pipeline_disk_cache_file)
|
||||
{
|
||||
std::fclose(m_pipeline_disk_cache_file);
|
||||
m_pipeline_disk_cache_file = nullptr;
|
||||
}
|
||||
};
|
||||
|
||||
if (!m_pipeline_disk_cache_changed)
|
||||
{
|
||||
VERBOSE_LOG("Not updating pipeline cache because it has not changed.");
|
||||
return;
|
||||
std::fclose(m_pipeline_disk_cache_file);
|
||||
m_pipeline_disk_cache_file = nullptr;
|
||||
return true;
|
||||
}
|
||||
|
||||
if (FileSystem::FSeek64(m_pipeline_disk_cache_file, m_pipeline_disk_cache_data_end, SEEK_SET) != 0) [[unlikely]]
|
||||
// Rewrite footer/index entries.
|
||||
if (!FileSystem::FSeek64(m_pipeline_disk_cache_file, m_pipeline_disk_cache_data_end, SEEK_SET, error) != 0)
|
||||
{
|
||||
ERROR_LOG("Failed to seek to data end.");
|
||||
return;
|
||||
std::fclose(m_pipeline_disk_cache_file);
|
||||
m_pipeline_disk_cache_file = nullptr;
|
||||
return false;
|
||||
}
|
||||
|
||||
u32 count = 0;
|
||||
|
@ -1030,8 +1017,10 @@ void OpenGLDevice::ClosePipelineCache()
|
|||
|
||||
if (std::fwrite(&entry, sizeof(entry), 1, m_pipeline_disk_cache_file) != 1) [[unlikely]]
|
||||
{
|
||||
ERROR_LOG("Failed to write index entry.");
|
||||
return;
|
||||
Error::SetErrno(error, "fwrite() for entry failed: ", errno);
|
||||
std::fclose(m_pipeline_disk_cache_file);
|
||||
m_pipeline_disk_cache_file = nullptr;
|
||||
return false;
|
||||
}
|
||||
|
||||
count++;
|
||||
|
@ -1042,5 +1031,14 @@ void OpenGLDevice::ClosePipelineCache()
|
|||
footer.num_programs = count;
|
||||
|
||||
if (std::fwrite(&footer, sizeof(footer), 1, m_pipeline_disk_cache_file) != 1) [[unlikely]]
|
||||
ERROR_LOG("Failed to write footer.");
|
||||
{
|
||||
Error::SetErrno(error, "fwrite() for footer failed: ", errno);
|
||||
std::fclose(m_pipeline_disk_cache_file);
|
||||
m_pipeline_disk_cache_file = nullptr;
|
||||
}
|
||||
|
||||
if (std::fclose(m_pipeline_disk_cache_file) != 0)
|
||||
Error::SetErrno(error, "fclose() failed: ", errno);
|
||||
m_pipeline_disk_cache_file = nullptr;
|
||||
return true;
|
||||
}
|
||||
|
|
|
@ -2098,37 +2098,37 @@ void VulkanDevice::DestroyDevice()
|
|||
Vulkan::UnloadVulkanLibrary();
|
||||
}
|
||||
|
||||
bool VulkanDevice::ValidatePipelineCacheHeader(const VK_PIPELINE_CACHE_HEADER& header)
|
||||
bool VulkanDevice::ValidatePipelineCacheHeader(const VK_PIPELINE_CACHE_HEADER& header, Error* error)
|
||||
{
|
||||
if (header.header_length < sizeof(VK_PIPELINE_CACHE_HEADER))
|
||||
{
|
||||
ERROR_LOG("Pipeline cache failed validation: Invalid header length");
|
||||
Error::SetStringView(error, "Invalid header length");
|
||||
return false;
|
||||
}
|
||||
|
||||
if (header.header_version != VK_PIPELINE_CACHE_HEADER_VERSION_ONE)
|
||||
{
|
||||
ERROR_LOG("Pipeline cache failed validation: Invalid header version");
|
||||
Error::SetStringView(error, "Invalid header version");
|
||||
return false;
|
||||
}
|
||||
|
||||
if (header.vendor_id != m_device_properties.vendorID)
|
||||
{
|
||||
ERROR_LOG("Pipeline cache failed validation: Incorrect vendor ID (file: 0x{:X}, device: 0x{:X})", header.vendor_id,
|
||||
m_device_properties.vendorID);
|
||||
Error::SetStringFmt(error, "Incorrect vendor ID (file: 0x{:X}, device: 0x{:X})", header.vendor_id,
|
||||
m_device_properties.vendorID);
|
||||
return false;
|
||||
}
|
||||
|
||||
if (header.device_id != m_device_properties.deviceID)
|
||||
{
|
||||
ERROR_LOG("Pipeline cache failed validation: Incorrect device ID (file: 0x{:X}, device: 0x{:X})", header.device_id,
|
||||
m_device_properties.deviceID);
|
||||
Error::SetStringFmt(error, "Incorrect device ID (file: 0x{:X}, device: 0x{:X})", header.device_id,
|
||||
m_device_properties.deviceID);
|
||||
return false;
|
||||
}
|
||||
|
||||
if (std::memcmp(header.uuid, m_device_properties.pipelineCacheUUID, VK_UUID_SIZE) != 0)
|
||||
{
|
||||
ERROR_LOG("Pipeline cache failed validation: Incorrect UUID");
|
||||
Error::SetStringView(error, "Incorrect UUID");
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -2144,35 +2144,46 @@ void VulkanDevice::FillPipelineCacheHeader(VK_PIPELINE_CACHE_HEADER* header)
|
|||
std::memcpy(header->uuid, m_device_properties.pipelineCacheUUID, VK_UUID_SIZE);
|
||||
}
|
||||
|
||||
bool VulkanDevice::ReadPipelineCache(std::optional<DynamicHeapArray<u8>> data)
|
||||
bool VulkanDevice::ReadPipelineCache(DynamicHeapArray<u8> data, Error* error)
|
||||
{
|
||||
if (data.has_value())
|
||||
if (data.size() < sizeof(VK_PIPELINE_CACHE_HEADER))
|
||||
{
|
||||
if (data->size() < sizeof(VK_PIPELINE_CACHE_HEADER))
|
||||
{
|
||||
ERROR_LOG("Pipeline cache is too small, ignoring.");
|
||||
data.reset();
|
||||
}
|
||||
|
||||
VK_PIPELINE_CACHE_HEADER header;
|
||||
std::memcpy(&header, data->data(), sizeof(header));
|
||||
if (!ValidatePipelineCacheHeader(header))
|
||||
data.reset();
|
||||
Error::SetStringView(error, "Pipeline cache is too small.");
|
||||
return false;
|
||||
}
|
||||
|
||||
const VkPipelineCacheCreateInfo ci{VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO, nullptr, 0,
|
||||
data.has_value() ? data->size() : 0, data.has_value() ? data->data() : nullptr};
|
||||
// alignment reasons...
|
||||
VK_PIPELINE_CACHE_HEADER header;
|
||||
std::memcpy(&header, data.data(), sizeof(header));
|
||||
if (!ValidatePipelineCacheHeader(header, error))
|
||||
return false;
|
||||
|
||||
const VkPipelineCacheCreateInfo ci{VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO, nullptr, 0, data.size(),
|
||||
data.data()};
|
||||
VkResult res = vkCreatePipelineCache(m_device, &ci, nullptr, &m_pipeline_cache);
|
||||
if (res != VK_SUCCESS)
|
||||
{
|
||||
LOG_VULKAN_ERROR(res, "vkCreatePipelineCache() failed: ");
|
||||
Vulkan::SetErrorObject(error, "vkCreatePipelineCache() failed: ", res);
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool VulkanDevice::GetPipelineCacheData(DynamicHeapArray<u8>* data)
|
||||
bool VulkanDevice::CreatePipelineCache(const std::string& path, Error* error)
|
||||
{
|
||||
const VkPipelineCacheCreateInfo ci{VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO, nullptr, 0, 0, nullptr};
|
||||
VkResult res = vkCreatePipelineCache(m_device, &ci, nullptr, &m_pipeline_cache);
|
||||
if (res != VK_SUCCESS)
|
||||
{
|
||||
Vulkan::SetErrorObject(error, "vkCreatePipelineCache() failed: ", res);
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool VulkanDevice::GetPipelineCacheData(DynamicHeapArray<u8>* data, Error* error)
|
||||
{
|
||||
if (m_pipeline_cache == VK_NULL_HANDLE)
|
||||
return false;
|
||||
|
@ -2181,7 +2192,7 @@ bool VulkanDevice::GetPipelineCacheData(DynamicHeapArray<u8>* data)
|
|||
VkResult res = vkGetPipelineCacheData(m_device, m_pipeline_cache, &data_size, nullptr);
|
||||
if (res != VK_SUCCESS)
|
||||
{
|
||||
LOG_VULKAN_ERROR(res, "vkGetPipelineCacheData() failed: ");
|
||||
Vulkan::SetErrorObject(error, "vkGetPipelineCacheData() failed: ", res);
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -2189,7 +2200,7 @@ bool VulkanDevice::GetPipelineCacheData(DynamicHeapArray<u8>* data)
|
|||
res = vkGetPipelineCacheData(m_device, m_pipeline_cache, &data_size, data->data());
|
||||
if (res != VK_SUCCESS)
|
||||
{
|
||||
LOG_VULKAN_ERROR(res, "vkGetPipelineCacheData() (2) failed: ");
|
||||
Vulkan::SetErrorObject(error, "vkGetPipelineCacheData() (2) failed: ", res);
|
||||
return false;
|
||||
}
|
||||
|
||||
|
|
|
@ -234,8 +234,9 @@ protected:
|
|||
FeatureMask disabled_features, Error* error) override;
|
||||
void DestroyDevice() override;
|
||||
|
||||
bool ReadPipelineCache(std::optional<DynamicHeapArray<u8>> data) override;
|
||||
bool GetPipelineCacheData(DynamicHeapArray<u8>* data) override;
|
||||
bool ReadPipelineCache(DynamicHeapArray<u8> data, Error* error) override;
|
||||
bool CreatePipelineCache(const std::string& path, Error* error) override;
|
||||
bool GetPipelineCacheData(DynamicHeapArray<u8>* data, Error* error) override;
|
||||
|
||||
private:
|
||||
enum DIRTY_FLAG : u32
|
||||
|
@ -305,7 +306,7 @@ private:
|
|||
static VkInstance CreateVulkanInstance(const WindowInfo& wi, OptionalExtensions* oe, bool enable_debug_utils,
|
||||
bool enable_validation_layer);
|
||||
|
||||
bool ValidatePipelineCacheHeader(const VK_PIPELINE_CACHE_HEADER& header);
|
||||
bool ValidatePipelineCacheHeader(const VK_PIPELINE_CACHE_HEADER& header, Error* error);
|
||||
void FillPipelineCacheHeader(VK_PIPELINE_CACHE_HEADER* header);
|
||||
|
||||
// Enable/disable debug message runtime.
|
||||
|
|
Loading…
Reference in a new issue