I have addressed the critique for this post and resubmitted it for iterative review; C++ multithread pool class.
Class for creating thread pools with the latest C++ standard. Currently C++17 and C++2a.
- It currently only accepts and executes parameterless routines.
- Multiple functions may be enqueued via variadic template or stl vector.
- The number of threads created in the pool is relative to the hardware concurrency by means of a user multiplier. If the hardware concurrency cannot be determined it will default to four and the number of threads created will be two, four, or eight depending on the multiplier used.
Please review code correctness, best practices, design and implementation.
Please assume the namespace Mercer is to be used as a cross-platform library.
This code was also available on GitHub, but now contains current iteration.
mercer.h
//File mercer.h
//Author Michael Mercer
//Copyleft CC BY-SA
//Description Header for universal declarations used in namespace Mercer
#ifndef MERCER_H_0000
#define MERCER_H_0000
/*#####################----- Mercer -----###################*/
/* universal declarations */
namespace Mercer
{
enum struct execution: bool {failure, success};
}
/* */
/*#####################----- Mercer -----###################*/
#endif //MERCER_H_0000
multithread.h
//File multithread.h
//Author Michael Mercer
//Copyleft CC BY-SA
//Description Header for multithread class
#ifndef MULTITHREAD_H_0000
#define MULTITHREAD_H_0000
/*#####################----- multithread -----###################*/
/* class for multithread interface */
/* GCC Bug 84330 - [6/7 Regression] [concepts] ICE with broken constraint
#ifndef __cpp_concepts
static_assert(false, "Concepts TS NOT found");
#endif
#include <type_traits>
template<typename dataType>
concept bool Function()
{
return std::is_function<dataType>::value;
}
*/
#include <deque>
#include <queue>
#include <mutex>
#include <vector>
#include <memory>
#include <thread>
#include <functional>
#include <condition_variable>
#include "mercer.h"
namespace Mercer
{
//Multithread class
//if !joinable no new routines may be enqueued
class multithread
{
class implementation;
std::unique_ptr<implementation> data;
public:
enum struct concurrency: int {half, full, twofold};
multithread(concurrency quantity);
multithread(); //concurrency::full
~multithread();
execution enqueue(const std::vector<std::function<void()>>&&);
//consumes std::vector iff execution::success
execution enqueue(const std::function<void()>&&);
template<typename ... dataType>
execution enqueue(const std::function<void()>&& proximate ,
const std::function<void()>&& penproximate,
dataType ... parameters )
{
if(execution::success==
enqueue(std::forward<const std::function<void()>>(proximate ) ))
enqueue(std::forward<const std::function<void()>>(penproximate) ,
std::forward<dataType >(parameters )...);
else
return execution::failure;
return execution::success;
}
execution join();
execution detach();
bool thrown() const noexcept;
std::exception_ptr getNextException() const;
//If thrown()==true, will never throw
//If get final exception, thrown() will reset to false
};
}//namespace Mercer
/* */
/*#####################----- multithread -----###################*/
#endif //MULTITHREAD_H_0000
multithread.cpp
//File multithread.cpp
//Author Michael Mercer
//Copyleft CC BY-SA
//Description Source for multithread class
/*#####################----- multithread -----###################*/
/* class for multithread interface */
#include "multithread.h"
using Mercer::multithread;
using Mercer::execution;
using function = std::function<void()>;
struct multithread::implementation
{
enum struct close: bool {detach, join};
std::queue<std::exception_ptr> exceptions;
bool open ;
std::deque <function> line ;
std::mutex door ;
std::condition_variable guard;
std::vector<std::thread> pool ;
implementation(concurrency quantity) :
open(true),
line(),
door(),
guard(),
pool(std::invoke( [&]
{
std::vector<std::thread> temp;
unsigned threads = std::thread::hardware_concurrency();
if(threads==0)
threads=4;
switch(quantity)
{
case concurrency::half : threads /= 2; break;
case concurrency::full : break;
case concurrency::twofold: threads *= 2; break;
}
temp.reserve(threads);
for(auto i=threads; i>0; i--)
temp.emplace_back( [&]
{
function next;
bool perpetual = true;
while(perpetual)
{
std::unique_lock lock(door);
guard.wait(lock, [&]
{
return !line.empty() || !open;
} );
if(!line.empty())
{
next = std::forward<function>(line.front());
line.pop_front();
if(!open && line.empty())
perpetual = false;
lock.unlock();
guard.notify_one();
try
{
next();
}
catch(...)
{
exceptions.emplace(
std::current_exception() );
}
}
else if(!open)
perpetual = false;
}
}
);
return temp;
}) )
{}
template<close closeType>
execution close()
{
auto result = execution::success;
if (open==true)
{
open = false;
guard.notify_all();
for (auto&& thread : pool)
if (thread.joinable())
switch(closeType)
{
case close::join : thread.join() ; break;
case close::detach: thread.detach(); break;
}
pool.clear();
pool.shrink_to_fit();
}
else
result = execution::failure;
return result;
}
};
multithread::multithread(concurrency quantity):
data(std::make_unique<implementation>(quantity))
{}
multithread::multithread():
data(std::make_unique<implementation>(concurrency::full))
{}
execution multithread::join()
{
return data->close<implementation::close::join>();
}
execution multithread::detach()
{
return data->close<implementation::close::detach>();
}
multithread::~multithread()
{
join();
}
execution multithread::enqueue(const function&& item)
{
auto result = execution::success;
if (data->open==true)
{
std::scoped_lock(data->door);
data->line.emplace_back(std::forward<const function>(item));
data->guard.notify_all();
}
else
result = execution::failure;
return result;
}
execution multithread::enqueue(const std::vector<function>&& adjunct)
{
auto result = execution::success;
if (data->open==true)
{
std::scoped_lock(data->door);
data->line.insert(data->line.end(),
make_move_iterator(adjunct.begin()) ,
make_move_iterator(adjunct.end() ));
data->guard.notify_all();
}
else
result = execution::failure;
return result;
}
bool multithread::thrown() const noexcept
{
return data->exceptions.empty() ? false : true;
}
std::exception_ptr multithread::getNextException() const
{
if(thrown())
{
auto temp = std::forward<std::exception_ptr>(data->exceptions.front());
data->exceptions.pop();
return temp;
}
else
throw std::out_of_range("Thrown() is false, no exception to get");
}
/* */
/*#####################----- multithread -----###################*/
1 Answer 1
multithread::enqueue
parameter type issues
This section is trying to address all overloads of this member function, as these issues affect all of them.
Why take parameters of the form const X&&
(where X
is either std::function<void()>
or std::vector<std::function<void()>>
)?
Going from this comment //consumes std::vector iff execution::success
, my best guess is that those was intended to just be plain rvalue references (without the const
), since you simply cannot "consume" (i.e. modify) a const X
.
This also means going through all the trouble of using a
move_iterator
on theconst std::vector<std::function<void()>>&&
overload would be for nought, as it would have to make a copy.
So, let's just drop the const
.
Now, there still seems to be some confusion about std::forward
. std::forward
is intended to be used on so called forwarding references (reference of type T&&
, where T
is deduced locally), where it passes them on as the function received them (i.e. it moves rvalue references, but not lvalue references).
Since we know that the parameters in calls to std::forward
are actual rvalue references (*), we can simply call std::move
instead.
(*) Well, there's the exception in the template overload:
dataType... parameters
.First off,
dataType
will never be deduced to be any kind of reference; instead, copies will be created and passed on.std::forward
masked these copies by callingstd::move
on them (since it didn't get instantiated with a reference type). This would make the actual relevant parameters (proximate
and possiblypenproximate
) rvalue references, as required, and move-construct the newparameters
.This can be fixed by changing
dataType... parameters
todataType&&... parameters
. Now, sinceparameters
are forward references, we actually want to callstd::forward<dataType>(parameters)...
. Now they get passed through bystd::forward
without any copies being made.Additionally, calling
enqueue
with anything but astd::function<void>&&
now causes a compile time error, as the compiler cannot match a lvalue reference to a rvalue reference parameter. (This could optionally be explicitly asserted up front with some template metaprogramming in order to give a cleaner error message.)
Concurrency issues
While unlikely to ever happen on a x86 CPU, technically accesses to implementation::open
are possible data races (there's no synchronization around reads or writes).
Also, since the value of open
ostensibly doesn't get changed inside the lambda passed to the threads, the optimizer could cache that value in a register (unlikely, but allowed), so changes to implementation::open
might not be visible to the worker threads at all!
A simple solution to both of these issues would be chaging implementation::open
to std::atomic<bool>
.
General stuff
A lot of functions have the following pattern:
XXX return_value = default_value; if(condition) { /* do_stuff */ } else { return_value = other_value; } return return_value;
This can be simplified to:
if(!condition) return other_value; /* do_stuff */ return default_value;
The function called to initialize
implementation::pool
and the worker thread functions could be refactored into their own functions. This would increase readability by quite a lot.
-
1\$\begingroup\$ Currently pressed for time, might add more later. \$\endgroup\$hoffmale– hoffmale2018年08月27日 09:52:55 +00:00Commented Aug 27, 2018 at 9:52
-
//Description Header for universal declarations used in namespace Mercer
. If you are going to have comments (a code smell for now writing readable code). Then they should be meaningful. Maintaining comments is just as important as maintaining the code. Garbage/Useless comments are not maintained and they will fall out of sync with the code. When code and comments are out of sync hell is unleashed. Is the code correct and comments wrong or vice versa. How can I tell. The answer: Only write comments that provide real information. \$\endgroup\$GCC Bug 84330 - [6/7 Regression] [concepts]
. Would have been nice with a link so we can go look at the bug report. \$\endgroup\$NAMED
functions. \$\endgroup\$