#pragma once #include #include #include #include #include #include #include #include class ThreadPool { private: std::vector workers_; std::queue> tasks_; std::mutex queue_mutex_; std::condition_variable condition_; std::atomic stop_{false}; public: explicit ThreadPool(size_t num_threads = std::thread::hardware_concurrency()) { for (size_t i = 0; i < num_threads; ++i) { workers_.emplace_back([this] { for (;;) { std::function task; { std::unique_lock lock(queue_mutex_); condition_.wait(lock, [this] { return stop_ || !tasks_.empty(); }); if (stop_ && tasks_.empty()) return; task = std::move(tasks_.front()); tasks_.pop(); } task(); } }); } } template auto enqueue(F&& f, Args&&... args) -> std::future::type> { using return_type = typename std::invoke_result::type; auto task = std::make_shared>( std::bind(std::forward(f), std::forward(args)...) ); std::future res = task->get_future(); { std::unique_lock lock(queue_mutex_); if (stop_) { throw std::runtime_error("enqueue on stopped ThreadPool"); } tasks_.emplace([task](){ (*task)(); }); } condition_.notify_one(); return res; } size_t pending_tasks() const { std::lock_guard lock(const_cast(queue_mutex_)); return tasks_.size(); } ~ThreadPool() { stop_ = true; condition_.notify_all(); for (std::thread& worker : workers_) { worker.join(); } } };