#pragma once #include #include #include #include #include #include #include #include #include namespace EngineManager { // thread communication template class LockFreeQueue { private: struct Node { std::atomic data{nullptr}; std::atomic next{nullptr}; }; std::atomic head_{nullptr}; std::atomic tail_{nullptr}; public: LockFreeQueue() { Node* dummy = new Node; head_.store(dummy); tail_.store(dummy); } ~LockFreeQueue() { while (Node* const old_head = head_.load()) { head_.store(old_head->next); delete old_head->data.load(); delete old_head; } } void push(T item) { Node* new_node = new Node; T* data = new T(std::move(item)); new_node->data.store(data); Node* prev_tail = tail_.exchange(new_node); prev_tail->next.store(new_node); } bool try_pop(T& result) { Node* head = head_.load(); Node* next = head->next.load(); if (next == nullptr) return false; T* data = next->data.load(); if (data == nullptr) return false; result = *data; delete data; head_.store(next); delete head; return true; } bool empty() const { Node* head = head_.load(); return (head->next.load() == nullptr); } }; // thread pool stealing class ThreadPool { private: std::vector workers_; std::queue> tasks_; std::mutex queue_mutex_; std::condition_variable condition_; std::atomic stop_{false}; public: explicit ThreadPool(size_t num_threads = std::thread::hardware_concurrency()) { for (size_t i = 0; i < num_threads; ++i) { workers_.emplace_back([this] { for (;;) { std::function task; { std::unique_lock lock(queue_mutex_); condition_.wait(lock, [this] { return stop_ || !tasks_.empty(); }); if (stop_ && tasks_.empty()) return; task = std::move(tasks_.front()); tasks_.pop(); } task(); } }); } } template auto enqueue(F&& f, Args&&... args) -> std::future::type> { using return_type = typename std::invoke_result::type; auto task = std::make_shared>( std::bind(std::forward(f), std::forward(args)...) ); std::future res = task->get_future(); { std::unique_lock lock(queue_mutex_); if (stop_) { throw std::runtime_error("ThreadPool: enqueue stopped"); } tasks_.emplace([task](){ (*task)(); }); } condition_.notify_one(); return res; } size_t pending_tasks() const { std::lock_guard lock(const_cast(queue_mutex_)); return tasks_.size(); } ~ThreadPool() { stop_ = true; condition_.notify_all(); for (std::thread& worker : workers_) { worker.join(); } } }; class AsyncEngine { private: static std::unique_ptr instance_; static std::once_flag initialized_; std::unique_ptr main_pool_; std::unique_ptr io_pool_; LockFreeQueue> event_queue_; AsyncEngine() { unsigned int hw_threads = std::thread::hardware_concurrency(); size_t main_threads = hw_threads > 4 ? hw_threads / 2 : 2; size_t io_threads = hw_threads > 8 ? 4 : 2; main_pool_ = std::make_unique(main_threads); io_pool_ = std::make_unique(io_threads); } public: static AsyncEngine& instance() { std::call_once(initialized_, []() { instance_ = std::unique_ptr(new AsyncEngine()); }); return *instance_; } // main template auto execute(F&& f, Args&&... args) -> std::future::type> { return main_pool_->enqueue(std::forward(f), std::forward(args)...); } template auto execute_io(F&& f, Args&&... args) -> std::future::type> { return io_pool_->enqueue(std::forward(f), std::forward(args)...); } void push_event(std::function event) { event_queue_.push(std::move(event)); } void process_events() { std::function event; while (event_queue_.try_pop(event)) { event(); } } size_t pending_main_tasks() const { return main_pool_->pending_tasks(); } size_t pending_io_tasks() const { return io_pool_->pending_tasks(); } static void shutdown() { instance_.reset(); } }; }