mirror of
https://github.com/RetroDECK/Duckstation.git
synced 2024-11-22 22:05:38 +00:00
Common: Persistent-mapped stream buffer implementation
This commit is contained in:
parent
ff8cef4da3
commit
26c22f003f
|
@ -1,9 +1,12 @@
|
||||||
#include "stream_buffer.h"
|
#include "stream_buffer.h"
|
||||||
|
#include "YBaseLib/Assert.h"
|
||||||
|
#include <array>
|
||||||
|
#include <cstdio>
|
||||||
|
|
||||||
namespace GL {
|
namespace GL {
|
||||||
|
|
||||||
StreamBuffer::StreamBuffer(GLenum target, GLuint buffer_id, u32 size)
|
StreamBuffer::StreamBuffer(GLenum target, GLuint buffer_id, u32 size)
|
||||||
: m_target(target), m_buffer_id(buffer_id), m_size(size), m_cpu_buffer(size)
|
: m_target(target), m_buffer_id(buffer_id), m_size(size)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -22,37 +25,274 @@ void StreamBuffer::Unbind()
|
||||||
glBindBuffer(m_target, 0);
|
glBindBuffer(m_target, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
StreamBuffer::MappingResult StreamBuffer::Map(u32 alignment, u32 min_size)
|
namespace detail {
|
||||||
{
|
|
||||||
return MappingResult{static_cast<void*>(m_cpu_buffer.data()), 0, 0, m_size / alignment};
|
|
||||||
}
|
|
||||||
|
|
||||||
void StreamBuffer::Unmap(u32 used_size)
|
// Uses glBufferSubData() to update. Preferred for drivers which don't support {ARB,EXT}_buffer_storage.
|
||||||
|
class BufferSubDataStreamBuffer final : public StreamBuffer
|
||||||
{
|
{
|
||||||
if (used_size == 0)
|
public:
|
||||||
return;
|
~BufferSubDataStreamBuffer() override = default;
|
||||||
|
|
||||||
glBindBuffer(m_target, m_buffer_id);
|
MappingResult Map(u32 alignment, u32 min_size) override
|
||||||
glBufferSubData(m_target, 0, used_size, m_cpu_buffer.data());
|
{
|
||||||
}
|
return MappingResult{static_cast<void*>(m_cpu_buffer.data()), 0, 0, m_size / alignment};
|
||||||
|
}
|
||||||
|
|
||||||
|
void Unmap(u32 used_size) override
|
||||||
|
{
|
||||||
|
if (used_size == 0)
|
||||||
|
return;
|
||||||
|
|
||||||
|
glBindBuffer(m_target, m_buffer_id);
|
||||||
|
glBufferSubData(m_target, 0, used_size, m_cpu_buffer.data());
|
||||||
|
}
|
||||||
|
|
||||||
|
static std::unique_ptr<StreamBuffer> Create(GLenum target, u32 size)
|
||||||
|
{
|
||||||
|
glGetError();
|
||||||
|
|
||||||
|
GLuint buffer_id;
|
||||||
|
glGenBuffers(1, &buffer_id);
|
||||||
|
glBindBuffer(target, buffer_id);
|
||||||
|
glBufferData(target, size, nullptr, GL_STREAM_DRAW);
|
||||||
|
|
||||||
|
GLenum err = glGetError();
|
||||||
|
if (err != GL_NO_ERROR)
|
||||||
|
{
|
||||||
|
glDeleteBuffers(1, &buffer_id);
|
||||||
|
return {};
|
||||||
|
}
|
||||||
|
|
||||||
|
return std::unique_ptr<StreamBuffer>(new BufferSubDataStreamBuffer(target, buffer_id, size));
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
BufferSubDataStreamBuffer(GLenum target, GLuint buffer_id, u32 size)
|
||||||
|
: StreamBuffer(target, buffer_id, size), m_cpu_buffer(size)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
std::vector<u8> m_cpu_buffer;
|
||||||
|
};
|
||||||
|
|
||||||
|
// Uses BufferData() to orphan the buffer after every update. Used on Mali where BufferSubData forces a sync.
|
||||||
|
class BufferDataStreamBuffer final : public StreamBuffer
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
~BufferDataStreamBuffer() override = default;
|
||||||
|
|
||||||
|
MappingResult Map(u32 alignment, u32 min_size) override
|
||||||
|
{
|
||||||
|
return MappingResult{static_cast<void*>(m_cpu_buffer.data()), 0, 0, m_size / alignment};
|
||||||
|
}
|
||||||
|
|
||||||
|
void Unmap(u32 used_size) override
|
||||||
|
{
|
||||||
|
if (used_size == 0)
|
||||||
|
return;
|
||||||
|
|
||||||
|
glBindBuffer(m_target, m_buffer_id);
|
||||||
|
glBufferData(m_target, used_size, m_cpu_buffer.data(), GL_STREAM_DRAW);
|
||||||
|
}
|
||||||
|
|
||||||
|
static std::unique_ptr<StreamBuffer> Create(GLenum target, u32 size)
|
||||||
|
{
|
||||||
|
glGetError();
|
||||||
|
|
||||||
|
GLuint buffer_id;
|
||||||
|
glGenBuffers(1, &buffer_id);
|
||||||
|
glBindBuffer(target, buffer_id);
|
||||||
|
glBufferData(target, size, nullptr, GL_STREAM_DRAW);
|
||||||
|
|
||||||
|
GLenum err = glGetError();
|
||||||
|
if (err != GL_NO_ERROR)
|
||||||
|
{
|
||||||
|
glDeleteBuffers(1, &buffer_id);
|
||||||
|
return {};
|
||||||
|
}
|
||||||
|
|
||||||
|
return std::unique_ptr<StreamBuffer>(new BufferDataStreamBuffer(target, buffer_id, size));
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
BufferDataStreamBuffer(GLenum target, GLuint buffer_id, u32 size)
|
||||||
|
: StreamBuffer(target, buffer_id, size), m_cpu_buffer(size)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
std::vector<u8> m_cpu_buffer;
|
||||||
|
};
|
||||||
|
|
||||||
|
// Base class for implementations which require syncing.
|
||||||
|
class SyncingStreamBuffer : public StreamBuffer
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
enum : u32
|
||||||
|
{
|
||||||
|
NUM_SYNC_POINTS = 16
|
||||||
|
};
|
||||||
|
|
||||||
|
virtual ~SyncingStreamBuffer() override
|
||||||
|
{
|
||||||
|
for (u32 i = m_available_block_index; i <= m_used_block_index; i++)
|
||||||
|
{
|
||||||
|
DebugAssert(m_sync_objects[i]);
|
||||||
|
glDeleteSync(m_sync_objects[i]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
protected:
|
||||||
|
SyncingStreamBuffer(GLenum target, GLuint buffer_id, u32 size)
|
||||||
|
: StreamBuffer(target, buffer_id, size), m_bytes_per_block((size + (NUM_SYNC_POINTS)-1) / NUM_SYNC_POINTS)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
u32 GetSyncIndexForOffset(u32 offset) { return offset / m_bytes_per_block; }
|
||||||
|
|
||||||
|
void AddSyncsForOffset(u32 offset)
|
||||||
|
{
|
||||||
|
const u32 end = GetSyncIndexForOffset(offset);
|
||||||
|
for (; m_used_block_index < end; m_used_block_index++)
|
||||||
|
{
|
||||||
|
DebugAssert(!m_sync_objects[m_used_block_index]);
|
||||||
|
m_sync_objects[m_used_block_index] = glFenceSync(GL_SYNC_GPU_COMMANDS_COMPLETE, 0);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void WaitForSync(GLsync& sync)
|
||||||
|
{
|
||||||
|
glClientWaitSync(sync, GL_SYNC_FLUSH_COMMANDS_BIT, GL_TIMEOUT_IGNORED);
|
||||||
|
glDeleteSync(sync);
|
||||||
|
sync = nullptr;
|
||||||
|
}
|
||||||
|
|
||||||
|
void EnsureSyncsWaitedForOffset(u32 offset)
|
||||||
|
{
|
||||||
|
const u32 end = std::min<u32>(GetSyncIndexForOffset(offset) + 1, NUM_SYNC_POINTS);
|
||||||
|
for (; m_available_block_index < end; m_available_block_index++)
|
||||||
|
{
|
||||||
|
DebugAssert(m_sync_objects[m_available_block_index]);
|
||||||
|
WaitForSync(m_sync_objects[m_available_block_index]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void AllocateSpace(u32 size)
|
||||||
|
{
|
||||||
|
// add sync objects for writes since the last allocation
|
||||||
|
AddSyncsForOffset(m_position);
|
||||||
|
|
||||||
|
// wait for sync objects for the space we want to use
|
||||||
|
EnsureSyncsWaitedForOffset(m_position + size);
|
||||||
|
|
||||||
|
// wrap-around?
|
||||||
|
if ((m_position + size) > m_size)
|
||||||
|
{
|
||||||
|
// current position ... buffer end
|
||||||
|
AddSyncsForOffset(m_size);
|
||||||
|
|
||||||
|
// rewind, and try again
|
||||||
|
m_position = 0;
|
||||||
|
|
||||||
|
// wait for the sync at the start of the buffer
|
||||||
|
WaitForSync(m_sync_objects[0]);
|
||||||
|
m_available_block_index = 1;
|
||||||
|
|
||||||
|
// and however much more we need to satisfy the allocation
|
||||||
|
EnsureSyncsWaitedForOffset(size);
|
||||||
|
m_used_block_index = 0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
u32 m_position = 0;
|
||||||
|
u32 m_used_block_index = 0;
|
||||||
|
u32 m_available_block_index = NUM_SYNC_POINTS;
|
||||||
|
u32 m_bytes_per_block;
|
||||||
|
std::array<GLsync, NUM_SYNC_POINTS> m_sync_objects{};
|
||||||
|
};
|
||||||
|
|
||||||
|
class BufferStorageStreamBuffer : public SyncingStreamBuffer
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
~BufferStorageStreamBuffer() override
|
||||||
|
{
|
||||||
|
glBindBuffer(m_target, m_buffer_id);
|
||||||
|
glUnmapBuffer(m_target);
|
||||||
|
}
|
||||||
|
|
||||||
|
MappingResult Map(u32 alignment, u32 min_size) override
|
||||||
|
{
|
||||||
|
if (m_position > 0)
|
||||||
|
m_position = Common::AlignUp(m_position, alignment);
|
||||||
|
|
||||||
|
AllocateSpace(min_size);
|
||||||
|
DebugAssert((m_position + min_size) <= (m_available_block_index * m_bytes_per_block));
|
||||||
|
|
||||||
|
const u32 free_space_in_block = ((m_available_block_index * m_bytes_per_block) - m_position);
|
||||||
|
return MappingResult{static_cast<void*>(m_mapped_ptr + m_position), m_position, m_position / alignment,
|
||||||
|
free_space_in_block / alignment};
|
||||||
|
}
|
||||||
|
|
||||||
|
void Unmap(u32 used_size) override
|
||||||
|
{
|
||||||
|
DebugAssert((m_position + used_size) <= m_size);
|
||||||
|
m_position += used_size;
|
||||||
|
}
|
||||||
|
|
||||||
|
static std::unique_ptr<StreamBuffer> Create(GLenum target, u32 size)
|
||||||
|
{
|
||||||
|
glGetError();
|
||||||
|
|
||||||
|
GLuint buffer_id;
|
||||||
|
glGenBuffers(1, &buffer_id);
|
||||||
|
glBindBuffer(target, buffer_id);
|
||||||
|
if (GLAD_GL_VERSION_4_4 || GLAD_GL_ARB_buffer_storage)
|
||||||
|
glBufferStorage(target, size, nullptr, GL_MAP_WRITE_BIT | GL_MAP_PERSISTENT_BIT | GL_MAP_COHERENT_BIT);
|
||||||
|
else if (GLAD_GL_EXT_buffer_storage)
|
||||||
|
glBufferStorageEXT(target, size, nullptr, GL_MAP_WRITE_BIT | GL_MAP_PERSISTENT_BIT | GL_MAP_COHERENT_BIT);
|
||||||
|
|
||||||
|
GLenum err = glGetError();
|
||||||
|
if (err != GL_NO_ERROR)
|
||||||
|
{
|
||||||
|
glDeleteBuffers(1, &buffer_id);
|
||||||
|
return {};
|
||||||
|
}
|
||||||
|
|
||||||
|
u8* mapped_ptr = static_cast<u8*>(
|
||||||
|
glMapBufferRange(target, 0, size, GL_MAP_WRITE_BIT | GL_MAP_PERSISTENT_BIT | GL_MAP_COHERENT_BIT));
|
||||||
|
Assert(mapped_ptr);
|
||||||
|
|
||||||
|
return std::unique_ptr<StreamBuffer>(new BufferStorageStreamBuffer(target, buffer_id, size, mapped_ptr));
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
BufferStorageStreamBuffer(GLenum target, GLuint buffer_id, u32 size, u8* mapped_ptr)
|
||||||
|
: SyncingStreamBuffer(target, buffer_id, size), m_mapped_ptr(mapped_ptr)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
u8* m_mapped_ptr;
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace detail
|
||||||
|
|
||||||
std::unique_ptr<StreamBuffer> StreamBuffer::Create(GLenum target, u32 size)
|
std::unique_ptr<StreamBuffer> StreamBuffer::Create(GLenum target, u32 size)
|
||||||
{
|
{
|
||||||
glGetError();
|
std::unique_ptr<StreamBuffer> buf;
|
||||||
|
if (GLAD_GL_VERSION_4_4 || GLAD_GL_ARB_buffer_storage || GLAD_GL_EXT_buffer_storage)
|
||||||
GLuint buffer_id;
|
|
||||||
glGenBuffers(1, &buffer_id);
|
|
||||||
glBindBuffer(target, buffer_id);
|
|
||||||
glBufferData(target, size, nullptr, GL_STREAM_DRAW);
|
|
||||||
|
|
||||||
GLenum err = glGetError();
|
|
||||||
if (err != GL_NO_ERROR)
|
|
||||||
{
|
{
|
||||||
glDeleteBuffers(1, &buffer_id);
|
buf = detail::BufferStorageStreamBuffer::Create(target, size);
|
||||||
return {};
|
if (buf)
|
||||||
|
return buf;
|
||||||
}
|
}
|
||||||
|
|
||||||
return std::unique_ptr<StreamBuffer>(new StreamBuffer(target, buffer_id, size));
|
const char* vendor = reinterpret_cast<const char*>(glGetString(GL_VENDOR));
|
||||||
|
if (std::strcmp(vendor, "ARM") == 0 || std::strcmp(vendor, "Qualcomm") == 0)
|
||||||
|
{
|
||||||
|
// Mali and Adreno drivers can't do sub-buffer tracking...
|
||||||
|
return detail::BufferDataStreamBuffer::Create(target, size);
|
||||||
|
}
|
||||||
|
|
||||||
|
return detail::BufferSubDataStreamBuffer::Create(target, size);
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace GL
|
} // namespace GL
|
|
@ -6,11 +6,10 @@
|
||||||
#include <vector>
|
#include <vector>
|
||||||
|
|
||||||
namespace GL {
|
namespace GL {
|
||||||
// TODO: Persistent mapping-based implementation
|
|
||||||
class StreamBuffer
|
class StreamBuffer
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
~StreamBuffer();
|
virtual ~StreamBuffer();
|
||||||
|
|
||||||
ALWAYS_INLINE GLuint GetGLBufferId() const { return m_buffer_id; }
|
ALWAYS_INLINE GLuint GetGLBufferId() const { return m_buffer_id; }
|
||||||
ALWAYS_INLINE GLenum GetGLTarget() const { return m_target; }
|
ALWAYS_INLINE GLenum GetGLTarget() const { return m_target; }
|
||||||
|
@ -27,18 +26,16 @@ public:
|
||||||
u32 space_aligned; // remaining space / alignment
|
u32 space_aligned; // remaining space / alignment
|
||||||
};
|
};
|
||||||
|
|
||||||
MappingResult Map(u32 alignment, u32 min_size);
|
virtual MappingResult Map(u32 alignment, u32 min_size) = 0;
|
||||||
void Unmap(u32 used_size);
|
virtual void Unmap(u32 used_size) = 0;
|
||||||
|
|
||||||
static std::unique_ptr<StreamBuffer> Create(GLenum target, u32 size);
|
static std::unique_ptr<StreamBuffer> Create(GLenum target, u32 size);
|
||||||
|
|
||||||
private:
|
protected:
|
||||||
StreamBuffer(GLenum target, GLuint buffer_id, u32 size);
|
StreamBuffer(GLenum target, GLuint buffer_id, u32 size);
|
||||||
|
|
||||||
GLenum m_target;
|
GLenum m_target;
|
||||||
GLuint m_buffer_id;
|
GLuint m_buffer_id;
|
||||||
u32 m_size;
|
u32 m_size;
|
||||||
|
|
||||||
std::vector<u8> m_cpu_buffer;
|
|
||||||
};
|
};
|
||||||
} // namespace GL
|
} // namespace GL
|
|
@ -756,14 +756,14 @@ void GPU_HW_OpenGL::FlushRender()
|
||||||
if (m_batch.NeedsTwoPassRendering())
|
if (m_batch.NeedsTwoPassRendering())
|
||||||
{
|
{
|
||||||
SetDrawState(BatchRenderMode::OnlyTransparent);
|
SetDrawState(BatchRenderMode::OnlyTransparent);
|
||||||
glDrawArrays(gl_primitives[static_cast<u8>(m_batch.primitive)], 0, vertex_count);
|
glDrawArrays(gl_primitives[static_cast<u8>(m_batch.primitive)], m_batch_base_vertex, vertex_count);
|
||||||
SetDrawState(BatchRenderMode::OnlyOpaque);
|
SetDrawState(BatchRenderMode::OnlyOpaque);
|
||||||
glDrawArrays(gl_primitives[static_cast<u8>(m_batch.primitive)], 0, vertex_count);
|
glDrawArrays(gl_primitives[static_cast<u8>(m_batch.primitive)], m_batch_base_vertex, vertex_count);
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
SetDrawState(m_batch.GetRenderMode());
|
SetDrawState(m_batch.GetRenderMode());
|
||||||
glDrawArrays(gl_primitives[static_cast<u8>(m_batch.primitive)], 0, vertex_count);
|
glDrawArrays(gl_primitives[static_cast<u8>(m_batch.primitive)], m_batch_base_vertex, vertex_count);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Reference in a new issue