200 lines
5.4 KiB
C
200 lines
5.4 KiB
C
|
|
#pragma once
|
||
|
|
|
||
|
|
#include <atomic>
|
||
|
|
#include <memory>
|
||
|
|
#include <vector>
|
||
|
|
#include <queue>
|
||
|
|
#include <thread>
|
||
|
|
#include <mutex>
|
||
|
|
#include <condition_variable>
|
||
|
|
#include <functional>
|
||
|
|
#include <future>
|
||
|
|
|
||
|
|
namespace EngineManager {
|
||
|
|
|
||
|
|
// thread communication
|
||
|
|
template<typename T>
|
||
|
|
class LockFreeQueue {
|
||
|
|
private:
|
||
|
|
struct Node {
|
||
|
|
std::atomic<T*> data{nullptr};
|
||
|
|
std::atomic<Node*> next{nullptr};
|
||
|
|
};
|
||
|
|
|
||
|
|
std::atomic<Node*> head_{nullptr};
|
||
|
|
std::atomic<Node*> tail_{nullptr};
|
||
|
|
|
||
|
|
public:
|
||
|
|
LockFreeQueue() {
|
||
|
|
Node* dummy = new Node;
|
||
|
|
head_.store(dummy);
|
||
|
|
tail_.store(dummy);
|
||
|
|
}
|
||
|
|
|
||
|
|
~LockFreeQueue() {
|
||
|
|
while (Node* const old_head = head_.load()) {
|
||
|
|
head_.store(old_head->next);
|
||
|
|
delete old_head->data.load();
|
||
|
|
delete old_head;
|
||
|
|
}
|
||
|
|
}
|
||
|
|
|
||
|
|
void push(T item) {
|
||
|
|
Node* new_node = new Node;
|
||
|
|
T* data = new T(std::move(item));
|
||
|
|
new_node->data.store(data);
|
||
|
|
|
||
|
|
Node* prev_tail = tail_.exchange(new_node);
|
||
|
|
prev_tail->next.store(new_node);
|
||
|
|
}
|
||
|
|
|
||
|
|
bool try_pop(T& result) {
|
||
|
|
Node* head = head_.load();
|
||
|
|
Node* next = head->next.load();
|
||
|
|
|
||
|
|
if (next == nullptr) return false;
|
||
|
|
|
||
|
|
T* data = next->data.load();
|
||
|
|
if (data == nullptr) return false;
|
||
|
|
|
||
|
|
result = *data;
|
||
|
|
delete data;
|
||
|
|
head_.store(next);
|
||
|
|
delete head;
|
||
|
|
return true;
|
||
|
|
}
|
||
|
|
|
||
|
|
bool empty() const {
|
||
|
|
Node* head = head_.load();
|
||
|
|
return (head->next.load() == nullptr);
|
||
|
|
}
|
||
|
|
};
|
||
|
|
|
||
|
|
// thread pool stealing
|
||
|
|
class ThreadPool {
|
||
|
|
private:
|
||
|
|
std::vector<std::thread> workers_;
|
||
|
|
std::queue<std::function<void()>> tasks_;
|
||
|
|
std::mutex queue_mutex_;
|
||
|
|
std::condition_variable condition_;
|
||
|
|
std::atomic<bool> stop_{false};
|
||
|
|
|
||
|
|
public:
|
||
|
|
explicit ThreadPool(size_t num_threads = std::thread::hardware_concurrency()) {
|
||
|
|
for (size_t i = 0; i < num_threads; ++i) {
|
||
|
|
workers_.emplace_back([this] {
|
||
|
|
for (;;) {
|
||
|
|
std::function<void()> task;
|
||
|
|
|
||
|
|
{
|
||
|
|
std::unique_lock<std::mutex> lock(queue_mutex_);
|
||
|
|
condition_.wait(lock, [this] { return stop_ || !tasks_.empty(); });
|
||
|
|
|
||
|
|
if (stop_ && tasks_.empty()) return;
|
||
|
|
|
||
|
|
task = std::move(tasks_.front());
|
||
|
|
tasks_.pop();
|
||
|
|
}
|
||
|
|
|
||
|
|
task();
|
||
|
|
}
|
||
|
|
});
|
||
|
|
}
|
||
|
|
}
|
||
|
|
|
||
|
|
template<class F, class... Args>
|
||
|
|
auto enqueue(F&& f, Args&&... args) -> std::future<typename std::invoke_result<F, Args...>::type> {
|
||
|
|
using return_type = typename std::invoke_result<F, Args...>::type;
|
||
|
|
|
||
|
|
auto task = std::make_shared<std::packaged_task<return_type()>>(
|
||
|
|
std::bind(std::forward<F>(f), std::forward<Args>(args)...)
|
||
|
|
);
|
||
|
|
|
||
|
|
std::future<return_type> res = task->get_future();
|
||
|
|
|
||
|
|
{
|
||
|
|
std::unique_lock<std::mutex> lock(queue_mutex_);
|
||
|
|
if (stop_) {
|
||
|
|
throw std::runtime_error("ThreadPool: enqueue stopped");
|
||
|
|
}
|
||
|
|
|
||
|
|
tasks_.emplace([task](){ (*task)(); });
|
||
|
|
}
|
||
|
|
|
||
|
|
condition_.notify_one();
|
||
|
|
return res;
|
||
|
|
}
|
||
|
|
|
||
|
|
size_t pending_tasks() const {
|
||
|
|
std::lock_guard<std::mutex> lock(const_cast<std::mutex&>(queue_mutex_));
|
||
|
|
return tasks_.size();
|
||
|
|
}
|
||
|
|
|
||
|
|
~ThreadPool() {
|
||
|
|
stop_ = true;
|
||
|
|
condition_.notify_all();
|
||
|
|
|
||
|
|
for (std::thread& worker : workers_) {
|
||
|
|
worker.join();
|
||
|
|
}
|
||
|
|
}
|
||
|
|
};
|
||
|
|
|
||
|
|
class AsyncEngine {
|
||
|
|
private:
|
||
|
|
static std::unique_ptr<AsyncEngine> instance_;
|
||
|
|
static std::once_flag initialized_;
|
||
|
|
|
||
|
|
std::unique_ptr<ThreadPool> main_pool_;
|
||
|
|
std::unique_ptr<ThreadPool> io_pool_;
|
||
|
|
LockFreeQueue<std::function<void()>> event_queue_;
|
||
|
|
|
||
|
|
AsyncEngine() {
|
||
|
|
unsigned int hw_threads = std::thread::hardware_concurrency();
|
||
|
|
size_t main_threads = hw_threads > 4 ? hw_threads / 2 : 2;
|
||
|
|
size_t io_threads = hw_threads > 8 ? 4 : 2;
|
||
|
|
|
||
|
|
main_pool_ = std::make_unique<ThreadPool>(main_threads);
|
||
|
|
io_pool_ = std::make_unique<ThreadPool>(io_threads);
|
||
|
|
}
|
||
|
|
|
||
|
|
public:
|
||
|
|
static AsyncEngine& instance() {
|
||
|
|
std::call_once(initialized_, []() {
|
||
|
|
instance_ = std::unique_ptr<AsyncEngine>(new AsyncEngine());
|
||
|
|
});
|
||
|
|
return *instance_;
|
||
|
|
}
|
||
|
|
|
||
|
|
// main
|
||
|
|
template<class F, class... Args>
|
||
|
|
auto execute(F&& f, Args&&... args) -> std::future<typename std::invoke_result<F, Args...>::type> {
|
||
|
|
return main_pool_->enqueue(std::forward<F>(f), std::forward<Args>(args)...);
|
||
|
|
}
|
||
|
|
|
||
|
|
template<class F, class... Args>
|
||
|
|
auto execute_io(F&& f, Args&&... args) -> std::future<typename std::invoke_result<F, Args...>::type> {
|
||
|
|
return io_pool_->enqueue(std::forward<F>(f), std::forward<Args>(args)...);
|
||
|
|
}
|
||
|
|
|
||
|
|
void push_event(std::function<void()> event) {
|
||
|
|
event_queue_.push(std::move(event));
|
||
|
|
}
|
||
|
|
|
||
|
|
void process_events() {
|
||
|
|
std::function<void()> event;
|
||
|
|
while (event_queue_.try_pop(event)) {
|
||
|
|
event();
|
||
|
|
}
|
||
|
|
}
|
||
|
|
|
||
|
|
size_t pending_main_tasks() const { return main_pool_->pending_tasks(); }
|
||
|
|
size_t pending_io_tasks() const { return io_pool_->pending_tasks(); }
|
||
|
|
|
||
|
|
static void shutdown() {
|
||
|
|
instance_.reset();
|
||
|
|
}
|
||
|
|
};
|
||
|
|
|
||
|
|
}
|