mirror of
https://github.com/RetroDECK/Duckstation.git
synced 2024-11-25 15:15:40 +00:00
Common: Remove ThreadPool class
No longer needed.
This commit is contained in:
parent
5b00ffb6cb
commit
6f66460ec0
|
@ -1,14 +0,0 @@
|
||||||
# Source: https://github.com/stenzek/duckstation/issues/626#issuecomment-660718306
|
|
||||||
|
|
||||||
# Target system
|
|
||||||
SET(CMAKE_SYSTEM_NAME Linux)
|
|
||||||
SET(CMAKE_SYSTEM_PROCESSOR aarch64)
|
|
||||||
SET(CMAKE_SYSTEM_VERSION 1)
|
|
||||||
set(CMAKE_CROSSCOMPILING TRUE)
|
|
||||||
|
|
||||||
# Cross compiler
|
|
||||||
SET(CMAKE_C_COMPILER aarch64-linux-gnu-gcc)
|
|
||||||
SET(CMAKE_CXX_COMPILER aarch64-linux-gnu-g++)
|
|
||||||
set(CMAKE_LIBRARY_ARCHITECTURE aarch64-linux-gnu)
|
|
||||||
|
|
||||||
set(THREADS_PTHREAD_ARG "0" CACHE STRING "Result from TRY_RUN" FORCE)
|
|
|
@ -1,14 +0,0 @@
|
||||||
# Source: https://github.com/stenzek/duckstation/issues/626#issuecomment-660718306
|
|
||||||
|
|
||||||
# Target system
|
|
||||||
SET(CMAKE_SYSTEM_NAME Linux)
|
|
||||||
SET(CMAKE_SYSTEM_PROCESSOR armv7l)
|
|
||||||
SET(CMAKE_SYSTEM_VERSION 1)
|
|
||||||
set(CMAKE_CROSSCOMPILING TRUE)
|
|
||||||
|
|
||||||
# Cross compiler
|
|
||||||
SET(CMAKE_C_COMPILER arm-linux-gnueabihf-gcc)
|
|
||||||
SET(CMAKE_CXX_COMPILER arm-linux-gnueabihf-g++)
|
|
||||||
set(CMAKE_LIBRARY_ARCHITECTURE arm-linux-gnueabihf)
|
|
||||||
|
|
||||||
set(THREADS_PTHREAD_ARG "0" CACHE STRING "Result from TRY_RUN" FORCE)
|
|
|
@ -49,8 +49,6 @@ add_library(common
|
||||||
small_string.h
|
small_string.h
|
||||||
string_util.cpp
|
string_util.cpp
|
||||||
string_util.h
|
string_util.h
|
||||||
thirdparty/thread_pool.cpp
|
|
||||||
thirdparty/thread_pool.h
|
|
||||||
threading.cpp
|
threading.cpp
|
||||||
threading.h
|
threading.h
|
||||||
timer.cpp
|
timer.cpp
|
||||||
|
|
111
src/common/thirdparty/thread_pool.cpp
vendored
111
src/common/thirdparty/thread_pool.cpp
vendored
|
@ -1,111 +0,0 @@
|
||||||
// From https://raw.githubusercontent.com/cbraley/threadpool/master/src/thread_pool.cc
|
|
||||||
|
|
||||||
#include "thread_pool.h"
|
|
||||||
|
|
||||||
#include <cassert>
|
|
||||||
|
|
||||||
namespace cb {
|
|
||||||
|
|
||||||
// static
|
|
||||||
unsigned int ThreadPool::GetNumLogicalCores() {
|
|
||||||
// TODO(cbraley): Apparently this is broken in some older stdlib
|
|
||||||
// implementations?
|
|
||||||
const unsigned int dflt = std::thread::hardware_concurrency();
|
|
||||||
if (dflt == 0) {
|
|
||||||
// TODO(cbraley): Return some error code instead.
|
|
||||||
return 16;
|
|
||||||
} else {
|
|
||||||
return dflt;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
ThreadPool::~ThreadPool() {
|
|
||||||
// TODO(cbraley): The current thread could help out to drain the work_ queue
|
|
||||||
// faster - for example, if there is work that hasn't yet been scheduled this
|
|
||||||
// thread could "pitch in" to help finish faster.
|
|
||||||
|
|
||||||
{
|
|
||||||
std::lock_guard<std::mutex> scoped_lock(mu_);
|
|
||||||
exit_ = true;
|
|
||||||
}
|
|
||||||
condvar_.notify_all(); // Tell *all* workers we are ready.
|
|
||||||
|
|
||||||
for (std::thread& thread : workers_) {
|
|
||||||
thread.join();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void ThreadPool::Wait() {
|
|
||||||
std::unique_lock<std::mutex> lock(mu_);
|
|
||||||
if (!work_.empty()) {
|
|
||||||
work_done_condvar_.wait(lock, [this] { return work_.empty(); });
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
ThreadPool::ThreadPool(int num_workers)
|
|
||||||
: num_workers_(num_workers), exit_(false) {
|
|
||||||
assert(num_workers_ > 0);
|
|
||||||
// TODO(cbraley): Handle thread construction exceptions.
|
|
||||||
workers_.reserve(num_workers_);
|
|
||||||
for (int i = 0; i < num_workers_; ++i) {
|
|
||||||
workers_.emplace_back(&ThreadPool::ThreadLoop, this);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void ThreadPool::Schedule(std::function<void(void)> func) {
|
|
||||||
ScheduleAndGetFuture(std::move(func)); // We ignore the returned std::future.
|
|
||||||
}
|
|
||||||
|
|
||||||
void ThreadPool::ThreadLoop() {
|
|
||||||
// Wait until the ThreadPool sends us work.
|
|
||||||
while (true) {
|
|
||||||
WorkItem work_item;
|
|
||||||
|
|
||||||
int prev_work_size = -1;
|
|
||||||
{
|
|
||||||
std::unique_lock<std::mutex> lock(mu_);
|
|
||||||
condvar_.wait(lock, [this] { return exit_ || (!work_.empty()); });
|
|
||||||
// ...after the wait(), we hold the lock.
|
|
||||||
|
|
||||||
// If all the work is done and exit_ is true, break out of the loop.
|
|
||||||
if (exit_ && work_.empty()) {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Pop the work off of the queue - we are careful to execute the
|
|
||||||
// work_item.func callback only after we have released the lock.
|
|
||||||
prev_work_size = work_.size();
|
|
||||||
work_item = std::move(work_.front());
|
|
||||||
work_.pop();
|
|
||||||
}
|
|
||||||
|
|
||||||
// We are careful to do the work without the lock held!
|
|
||||||
// TODO(cbraley): Handle exceptions properly.
|
|
||||||
work_item.func(); // Do work.
|
|
||||||
|
|
||||||
if (work_done_callback_) {
|
|
||||||
work_done_callback_(prev_work_size - 1);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Notify a condvar is all work is done.
|
|
||||||
{
|
|
||||||
std::unique_lock<std::mutex> lock(mu_);
|
|
||||||
if (work_.empty() && prev_work_size == 1) {
|
|
||||||
work_done_condvar_.notify_all();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
int ThreadPool::OutstandingWorkSize() const {
|
|
||||||
std::lock_guard<std::mutex> scoped_lock(mu_);
|
|
||||||
return work_.size();
|
|
||||||
}
|
|
||||||
|
|
||||||
int ThreadPool::NumWorkers() const { return num_workers_; }
|
|
||||||
|
|
||||||
void ThreadPool::SetWorkDoneCallback(std::function<void(int)> func) {
|
|
||||||
work_done_callback_ = std::move(func);
|
|
||||||
}
|
|
||||||
|
|
||||||
} // namespace cb
|
|
234
src/common/thirdparty/thread_pool.h
vendored
234
src/common/thirdparty/thread_pool.h
vendored
|
@ -1,234 +0,0 @@
|
||||||
// From https://raw.githubusercontent.com/cbraley/threadpool/master/src/thread_pool.h
|
|
||||||
|
|
||||||
#ifndef SRC_THREAD_POOL_H_
|
|
||||||
#define SRC_THREAD_POOL_H_
|
|
||||||
|
|
||||||
// A simple thread pool class.
|
|
||||||
// Usage examples:
|
|
||||||
//
|
|
||||||
// {
|
|
||||||
// ThreadPool pool(16); // 16 worker threads.
|
|
||||||
// for (int i = 0; i < 100; ++i) {
|
|
||||||
// pool.Schedule([i]() {
|
|
||||||
// DoSlowExpensiveOperation(i);
|
|
||||||
// });
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// // `pool` goes out of scope here - the code will block in the ~ThreadPool
|
|
||||||
// // destructor until all work is complete.
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// // TODO(cbraley): Add examples with std::future.
|
|
||||||
|
|
||||||
#include <condition_variable>
|
|
||||||
#include <functional>
|
|
||||||
#include <future>
|
|
||||||
#include <mutex>
|
|
||||||
#include <queue>
|
|
||||||
#include <thread>
|
|
||||||
#include <vector>
|
|
||||||
|
|
||||||
// We want to use std::invoke if C++17 is available, and fallback to "hand
|
|
||||||
// crafted" code if std::invoke isn't available.
|
|
||||||
#if __cplusplus >= 201703L
|
|
||||||
#define INVOKE_MACRO(CALLABLE, ARGS_TYPE, ARGS) std::invoke(CALLABLE, std::forward<ARGS_TYPE>(ARGS)...)
|
|
||||||
#elif __cplusplus >= 201103L
|
|
||||||
// Update this with http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2014/n4169.html.
|
|
||||||
#define INVOKE_MACRO(CALLABLE, ARGS_TYPE, ARGS) CALLABLE(std::forward<ARGS_TYPE>(ARGS)...)
|
|
||||||
#else
|
|
||||||
#error ("C++ version is too old! C++98 is not supported.")
|
|
||||||
#endif
|
|
||||||
|
|
||||||
namespace cb {
|
|
||||||
|
|
||||||
class ThreadPool {
|
|
||||||
public:
|
|
||||||
// Create a thread pool with `num_workers` dedicated worker threads.
|
|
||||||
explicit ThreadPool(int num_workers);
|
|
||||||
|
|
||||||
// Default construction is disallowed.
|
|
||||||
ThreadPool() = delete;
|
|
||||||
|
|
||||||
// Get the number of logical cores on the CPU. This is implemented using
|
|
||||||
// std::thread::hardware_concurrency().
|
|
||||||
// https://en.cppreference.com/w/cpp/thread/thread/hardware_concurrency
|
|
||||||
static unsigned int GetNumLogicalCores();
|
|
||||||
|
|
||||||
// The `ThreadPool` destructor blocks until all outstanding work is complete.
|
|
||||||
~ThreadPool();
|
|
||||||
|
|
||||||
// No copying, assigning, or std::move-ing.
|
|
||||||
ThreadPool& operator=(const ThreadPool&) = delete;
|
|
||||||
ThreadPool(const ThreadPool&) = delete;
|
|
||||||
ThreadPool(ThreadPool&&) = delete;
|
|
||||||
ThreadPool& operator=(ThreadPool&&) = delete;
|
|
||||||
|
|
||||||
// Add the function `func` to the thread pool. `func` will be executed at some
|
|
||||||
// point in the future on an arbitrary thread.
|
|
||||||
void Schedule(std::function<void(void)> func);
|
|
||||||
|
|
||||||
// Add `func` to the thread pool, and return a std::future that can be used to
|
|
||||||
// access the function's return value.
|
|
||||||
//
|
|
||||||
// *** Usage example ***
|
|
||||||
// Don't be alarmed by this function's tricky looking signature - this is
|
|
||||||
// very easy to use. Here's an example:
|
|
||||||
//
|
|
||||||
// int ComputeSum(std::vector<int>& values) {
|
|
||||||
// int sum = 0;
|
|
||||||
// for (const int& v : values) {
|
|
||||||
// sum += v;
|
|
||||||
// }
|
|
||||||
// return sum;
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// ThreadPool pool = ...;
|
|
||||||
// std::vector<int> numbers = ...;
|
|
||||||
//
|
|
||||||
// std::future<int> sum_future = ScheduleAndGetFuture(
|
|
||||||
// []() {
|
|
||||||
// return ComputeSum(numbers);
|
|
||||||
// });
|
|
||||||
//
|
|
||||||
// // Do other work...
|
|
||||||
//
|
|
||||||
// std::cout << "The sum is " << sum_future.get() << std::endl;
|
|
||||||
//
|
|
||||||
// *** Details ***
|
|
||||||
// Given a callable `func` that returns a value of type `RetT`, this
|
|
||||||
// function returns a std::future<RetT> that can be used to access
|
|
||||||
// `func`'s results.
|
|
||||||
template <typename FuncT, typename... ArgsT>
|
|
||||||
auto ScheduleAndGetFuture(FuncT&& func, ArgsT&&... args)
|
|
||||||
-> std::future<decltype(INVOKE_MACRO(func, ArgsT, args))>;
|
|
||||||
|
|
||||||
// Wait for all outstanding work to be completed.
|
|
||||||
void Wait();
|
|
||||||
|
|
||||||
// Return the number of outstanding functions to be executed.
|
|
||||||
int OutstandingWorkSize() const;
|
|
||||||
|
|
||||||
// Return the number of threads in the pool.
|
|
||||||
int NumWorkers() const;
|
|
||||||
|
|
||||||
void SetWorkDoneCallback(std::function<void(int)> func);
|
|
||||||
|
|
||||||
private:
|
|
||||||
void ThreadLoop();
|
|
||||||
|
|
||||||
// Number of worker threads - fixed at construction time.
|
|
||||||
int num_workers_;
|
|
||||||
|
|
||||||
// The destructor sets `exit_` to true and then notifies all workers. `exit_`
|
|
||||||
// causes each thread to break out of their work loop.
|
|
||||||
bool exit_;
|
|
||||||
|
|
||||||
mutable std::mutex mu_;
|
|
||||||
|
|
||||||
// Work queue. Guarded by `mu_`.
|
|
||||||
struct WorkItem {
|
|
||||||
std::function<void(void)> func;
|
|
||||||
};
|
|
||||||
std::queue<WorkItem> work_;
|
|
||||||
|
|
||||||
// Condition variable used to notify worker threads that new work is
|
|
||||||
// available.
|
|
||||||
std::condition_variable condvar_;
|
|
||||||
|
|
||||||
// Worker threads.
|
|
||||||
std::vector<std::thread> workers_;
|
|
||||||
|
|
||||||
// Condition variable used to notify that all work is complete - the work
|
|
||||||
// queue has "run dry".
|
|
||||||
std::condition_variable work_done_condvar_;
|
|
||||||
|
|
||||||
// Whenever a work item is complete, we call this callback. If this is empty,
|
|
||||||
// nothing is done.
|
|
||||||
std::function<void(int)> work_done_callback_;
|
|
||||||
};
|
|
||||||
|
|
||||||
namespace impl {
|
|
||||||
|
|
||||||
// This helper class simply returns a std::function that executes:
|
|
||||||
// ReturnT x = func();
|
|
||||||
// promise->set_value(x);
|
|
||||||
// However, this is tricky in the case where T == void. The code above won't
|
|
||||||
// compile if ReturnT == void, and neither will
|
|
||||||
// promise->set_value(func());
|
|
||||||
// To workaround this, we use a template specialization for the case where
|
|
||||||
// ReturnT is void. If the "regular void" proposal is accepted, this could be
|
|
||||||
// simpler:
|
|
||||||
// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2016/p0146r1.html.
|
|
||||||
|
|
||||||
// The non-specialized `FuncWrapper` implementation handles callables that
|
|
||||||
// return a non-void value.
|
|
||||||
template <typename ReturnT>
|
|
||||||
struct FuncWrapper {
|
|
||||||
template <typename FuncT, typename... ArgsT>
|
|
||||||
std::function<void()> GetWrapped(
|
|
||||||
FuncT&& func, std::shared_ptr<std::promise<ReturnT>> promise,
|
|
||||||
ArgsT&&... args) {
|
|
||||||
// TODO(cbraley): Capturing by value is inefficient. It would be more
|
|
||||||
// efficient to move-capture everything, but we can't do this until C++14
|
|
||||||
// generalized lambda capture is available. Can we use std::bind instead to
|
|
||||||
// make this more efficient and still use C++11?
|
|
||||||
return [promise, func, args...]() mutable {
|
|
||||||
promise->set_value(INVOKE_MACRO(func, ArgsT, args));
|
|
||||||
};
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
template <typename FuncT, typename... ArgsT>
|
|
||||||
void InvokeVoidRet(FuncT&& func, std::shared_ptr<std::promise<void>> promise,
|
|
||||||
ArgsT&&... args) {
|
|
||||||
INVOKE_MACRO(func, ArgsT, args);
|
|
||||||
promise->set_value();
|
|
||||||
}
|
|
||||||
|
|
||||||
// This `FuncWrapper` specialization handles callables that return void.
|
|
||||||
template <>
|
|
||||||
struct FuncWrapper<void> {
|
|
||||||
template <typename FuncT, typename... ArgsT>
|
|
||||||
std::function<void()> GetWrapped(FuncT&& func,
|
|
||||||
std::shared_ptr<std::promise<void>> promise,
|
|
||||||
ArgsT&&... args) {
|
|
||||||
return [promise, func, args...]() mutable {
|
|
||||||
INVOKE_MACRO(func, ArgsT, args);
|
|
||||||
promise->set_value();
|
|
||||||
};
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
} // namespace impl
|
|
||||||
|
|
||||||
template <typename FuncT, typename... ArgsT>
|
|
||||||
auto ThreadPool::ScheduleAndGetFuture(FuncT&& func, ArgsT&&... args)
|
|
||||||
-> std::future<decltype(INVOKE_MACRO(func, ArgsT, args))> {
|
|
||||||
using ReturnT = decltype(INVOKE_MACRO(func, ArgsT, args));
|
|
||||||
|
|
||||||
// We are only allocating this std::promise in a shared_ptr because
|
|
||||||
// std::promise is non-copyable.
|
|
||||||
std::shared_ptr<std::promise<ReturnT>> promise =
|
|
||||||
std::make_shared<std::promise<ReturnT>>();
|
|
||||||
std::future<ReturnT> ret_future = promise->get_future();
|
|
||||||
|
|
||||||
impl::FuncWrapper<ReturnT> func_wrapper;
|
|
||||||
std::function<void()> wrapped_func = func_wrapper.GetWrapped(
|
|
||||||
std::move(func), std::move(promise), std::forward<ArgsT>(args)...);
|
|
||||||
|
|
||||||
// Acquire the lock, and then push the WorkItem onto the queue.
|
|
||||||
{
|
|
||||||
std::lock_guard<std::mutex> scoped_lock(mu_);
|
|
||||||
WorkItem work;
|
|
||||||
work.func = std::move(wrapped_func);
|
|
||||||
work_.emplace(std::move(work));
|
|
||||||
}
|
|
||||||
condvar_.notify_one(); // Tell one worker we are ready.
|
|
||||||
return ret_future;
|
|
||||||
}
|
|
||||||
|
|
||||||
} // namespace cb
|
|
||||||
|
|
||||||
#undef INVOKE_MACRO
|
|
||||||
|
|
||||||
#endif // SRC_THREAD_POOL_H_
|
|
Loading…
Reference in a new issue