这里介绍一下boost两种线程池的使用,以及C++11实现线程池。
Header: boost/asio/thread_pool.hpp
Convenience header: boost/asio.hpp
void my_task()
{
...
}
...
// Launch the pool with four threads.
boost::asio::thread_pool pool(4);
// Submit a function to the pool.
boost::asio::post(pool, my_task);
// Submit a lambda object to the pool.
boost::asio::post(pool,
[]()
{
...
});
// Wait for all tasks in the pool to complete.
pool.join();
http://threadpool.sourceforge.net/
这个线程池不需要编译,只要在项目中包含其头文件就可以了。
#include
#include "threadpool.hpp"
using namespace std;
using namespace boost::threadpool;
// Some example tasks
void first_task()
{
cout << "first task is running\n" ;
}
void second_task()
{
cout << "second task is running\n" ;
}
void task_with_parameter(int value)
{
cout << "task_with_parameter(" << value << ")\n";
}
int main(int argc,char *argv[])
{
// Create fifo thread pool container with two threads.
pool tp(2);
// Add some tasks to the pool.
tp.schedule(&first_task);
tp.schedule(&second_task);
tp.schedule(boost::bind(task_with_parameter, 4));
// Wait until all tasks are finished.
tp.wait();
// Now all tasks are finished!
return(0);
}
一般异步调用中,返回值的获取有同步获取和异步获取两种形式。
同步获取返回值:
int task_int_23()
{
cout<<"task_int_23()\n";
return 23;
}
future res = schedule(tp, &task_int_23);
res.wait();
cout<<"get res value:"<
不知道是设计者就不打算使用异步回调获取返回值还是我看的不够仔细,异步获取返回值的方式还真没有找着,只好自己简单的写了一个回调的仿函数来实现异步返回值的获取。
//R为任务函数的返回值类型
template
class callback_task
{
typedef boost::function callback;
typedef boost::function function;
private:
callback c_;
function f_;
public:
//F: 任务执行函数 C:结果回调函数
template
callback_task(F f,C c)
{
f_ = f;
c_ = c;
}
void operator()()
{
c_(f_());
}
};
通过这个对象可以很容易的实现异步结果的回调。
//task_int_23的结果回调函数
void callback(int k)
{
cout<<"get callback value:"<(&task_int_23,&callback));
#include
#include
#include
#include
#include
#include
#include
#include
namespace asio = boost::asio;
int sleep_print(int seconds) {
std::cout << "goint to sleep (" << seconds << ")" << std::endl;
sleep(seconds);
std::cout << "wake up (" << seconds << ")" << std::endl;
return 0;
}
typedef boost::packaged_task task_t;
typedef boost::shared_ptr ptask_t;
void push_job(int seconds, boost::asio::io_service& io_service, std::vector >& pending_data) {
ptask_t task = boost::make_shared(boost::bind(&sleep_print, seconds));
boost::shared_future fut(task->get_future());
pending_data.push_back(fut);
io_service.post(boost::bind(&task_t::operator(), task));
}
int main() {
boost::asio::io_service io_service;
boost::thread_group threads;
boost::asio::io_service::work work(io_service);
for (int i = 0; i < boost::thread::hardware_concurrency() ; ++i)
{
threads.create_thread(boost::bind(&boost::asio::io_service::run,
&io_service));
}
std::vector > pending_data; // vector of futures
sleep_print(2);
push_job(3, io_service, pending_data);
push_job(4, io_service, pending_data);
// boost::thread task(boost::move(pt)); // launch task on a thread
boost::wait_for_all(pending_data.begin(), pending_data.end());
push_job(3, io_service, pending_data);
push_job(4, io_service, pending_data);
push_job(5, io_service, pending_data);
boost::wait_for_all(pending_data.begin(), pending_data.end());
return 0;
}
template
class work_queue
{
public:
work_queue()
{
work_ctrl_ = new boost::asio::io_service::work(io_service_);
int workers = boost::thread::hardware_concurrency();
if(NWorkers > 0)
workers = NWorkers;
for (std::size_t i = 0; i < workers; ++i)
{
threads_.create_thread(boost::bind(&asio::io_service::run, &io_service_));
}
}
virtual ~work_queue()
{
delete work_ctrl_;
}
template
void add_task(TTask task)
{
// c++11
// io_service_.dispatch(std::move(task));
io_service_.dispatch(task);
}
private:
boost::asio::io_service io_service_;
boost::thread_group threads_;
boost::asio::io_service::work *work_ctrl_;
};
// application class
class myapp : work_queue<0>
{
public:
void add_result(vector< vector > kernel2d)
{
boost::lock_guard lock(mutex_);
task_count_++;
result_.push_back(kernel2d);
if(task_count_== 3)
{
cout << "all tasks are completed, waiting ctrl-c to display the results..." << endl;
}
}
int operator()(const std::vector< application::application_ctrl::string_type >& args,
application::application_ctrl& ctrl)
{
// your application logic here!
task_count_ = 0;
// our tasks
add_task(gaussian_blur<3>( boost::bind( &myapp::add_result, this, _1 )));
add_task(gaussian_blur<6>( boost::bind( &myapp::add_result, this, _1 )));
add_task(gaussian_blur<9>( boost::bind( &myapp::add_result, this, _1 )));
ctrl.wait_for_termination_request();
return 0;
}
int stop()
{
std::cout << "Result..." << std::endl;
for(int i = 0; i < result_.size(); ++i)
{
cout << i << " : -----------------------" << std::endl;
vector< vector > & kernel2d = result_[i];
for (int row = 0; row < kernel2d.size(); row++)
{
for (int col = 0; col < kernel2d[row].size(); col++)
{
cout << setprecision(5) << fixed << kernel2d[row][col] << " ";
}
cout << endl;
}
}
return 1;
}
private:
boost::mutex mutex_;
vector< vector< vector > > result_;
int task_count_;
}; // myapp
主要步骤如下:
设定线程池中所提供的服务线程数
int threads = thread::hardware_concurrency();
每个线程都应该执行一个无限循环,无限循环中等待新任务到达,并执行任务
vector pool;
for (int i = 0; i < threads; i++)
{
pool.push_back(thread(Infinite_loop_function));
}
无限循环function
while(true)
{
{
unique_lock lock(queue_mutex);
condition.wait(lock,[]{return !Queue.empty()});
Task = Queue.front();
Queue.pop();
}
Task();
}
向任务队列中添加任务 具体实现例子
void enqueue(function new_task)
{
{
unique_lock lock(queue_mutex);
Queue.push(new_task);
}
condition.notify_one();
}
class ThreadPool {
public:
ThreadPool(size_t threads) : stop(false)
{
for(size_t i = 0;i task;
{
std::unique_lock lock(this->queue_mutex);
this->condition.wait(lock,
[this]{ return this->stop || !this->tasks.empty(); });
if(this->stop && this->tasks.empty())
return;
task = std::move(this->tasks.front());
this->tasks.pop();
}
task();
}
}
);
}
// add new work item to the pool
void enqueue(std::function& task)
{
{
std::unique_lock lock(queue_mutex);
// don't allow enqueueing after stopping the pool
if(stop)
throw std::runtime_error("enqueue on stopped ThreadPool");
tasks.emplace(task);
}
condition.notify_one();
}
~ThreadPool()
{
{
std::unique_lock lock(queue_mutex);
stop = true;
}
condition.notify_all();
for(std::thread &worker: workers)
worker.join();
}
private:
std::vector< std::thread > workers;
// the task queue
std::queue< std::function > tasks;
// synchronization
std::mutex queue_mutex;
std::condition_variable condition;
bool stop;
};
参考
https://www.codeproject.com/Articles/664709/Creating-a-Work-Queue-Thread-Pool-Application-Usin
https://ce39906.github.io/2018/03/29/C-Thread-Pool-%E4%BD%BF%E7%94%A8%E8%A7%A3%E6%9E%90/
https://www.jianshu.com/p/f7b7083738c3