mirror of
https://github.com/Milxnor/Project-Reboot-3.0.git
synced 2026-01-14 03:02:22 +01:00
update spdlog (it still dont work), fix 6.3(1)
This commit is contained in:
142
vendor/spdlog/details/thread_pool-inl.h
vendored
142
vendor/spdlog/details/thread_pool-inl.h
vendored
@@ -4,26 +4,26 @@
|
||||
#pragma once
|
||||
|
||||
#ifndef SPDLOG_HEADER_ONLY
|
||||
# include <spdlog/details/thread_pool.h>
|
||||
#include <spdlog/details/thread_pool.h>
|
||||
#endif
|
||||
|
||||
#include <spdlog/common.h>
|
||||
#include <cassert>
|
||||
#include <spdlog/common.h>
|
||||
|
||||
namespace spdlog {
|
||||
namespace details {
|
||||
|
||||
SPDLOG_INLINE thread_pool::thread_pool(
|
||||
size_t q_max_items, size_t threads_n, std::function<void()> on_thread_start, std::function<void()> on_thread_stop)
|
||||
: q_(q_max_items)
|
||||
{
|
||||
if (threads_n == 0 || threads_n > 1000)
|
||||
{
|
||||
throw_spdlog_ex("spdlog::thread_pool(): invalid threads_n param (valid "
|
||||
"range is 1-1000)");
|
||||
SPDLOG_INLINE thread_pool::thread_pool(size_t q_max_items,
|
||||
size_t threads_n,
|
||||
std::function<void()> on_thread_start,
|
||||
std::function<void()> on_thread_stop)
|
||||
: q_(q_max_items) {
|
||||
if (threads_n == 0 || threads_n > 1000) {
|
||||
throw_spdlog_ex(
|
||||
"spdlog::thread_pool(): invalid threads_n param (valid "
|
||||
"range is 1-1000)");
|
||||
}
|
||||
for (size_t i = 0; i < threads_n; i++)
|
||||
{
|
||||
for (size_t i = 0; i < threads_n; i++) {
|
||||
threads_.emplace_back([this, on_thread_start, on_thread_stop] {
|
||||
on_thread_start();
|
||||
this->thread_pool::worker_loop_();
|
||||
@@ -32,105 +32,101 @@ SPDLOG_INLINE thread_pool::thread_pool(
|
||||
}
|
||||
}
|
||||
|
||||
SPDLOG_INLINE thread_pool::thread_pool(size_t q_max_items, size_t threads_n, std::function<void()> on_thread_start)
|
||||
: thread_pool(q_max_items, threads_n, on_thread_start, [] {})
|
||||
{}
|
||||
SPDLOG_INLINE thread_pool::thread_pool(size_t q_max_items,
|
||||
size_t threads_n,
|
||||
std::function<void()> on_thread_start)
|
||||
: thread_pool(q_max_items, threads_n, on_thread_start, [] {}) {}
|
||||
|
||||
SPDLOG_INLINE thread_pool::thread_pool(size_t q_max_items, size_t threads_n)
|
||||
: thread_pool(
|
||||
q_max_items, threads_n, [] {}, [] {})
|
||||
{}
|
||||
q_max_items, threads_n, [] {}, [] {}) {}
|
||||
|
||||
// message all threads to terminate gracefully join them
|
||||
SPDLOG_INLINE thread_pool::~thread_pool()
|
||||
{
|
||||
SPDLOG_TRY
|
||||
{
|
||||
for (size_t i = 0; i < threads_.size(); i++)
|
||||
{
|
||||
SPDLOG_INLINE thread_pool::~thread_pool() {
|
||||
SPDLOG_TRY {
|
||||
for (size_t i = 0; i < threads_.size(); i++) {
|
||||
post_async_msg_(async_msg(async_msg_type::terminate), async_overflow_policy::block);
|
||||
}
|
||||
|
||||
for (auto &t : threads_)
|
||||
{
|
||||
for (auto &t : threads_) {
|
||||
t.join();
|
||||
}
|
||||
}
|
||||
SPDLOG_CATCH_STD
|
||||
}
|
||||
|
||||
void SPDLOG_INLINE thread_pool::post_log(async_logger_ptr &&worker_ptr, const details::log_msg &msg, async_overflow_policy overflow_policy)
|
||||
{
|
||||
void SPDLOG_INLINE thread_pool::post_log(async_logger_ptr &&worker_ptr,
|
||||
const details::log_msg &msg,
|
||||
async_overflow_policy overflow_policy) {
|
||||
async_msg async_m(std::move(worker_ptr), async_msg_type::log, msg);
|
||||
post_async_msg_(std::move(async_m), overflow_policy);
|
||||
}
|
||||
|
||||
void SPDLOG_INLINE thread_pool::post_flush(async_logger_ptr &&worker_ptr, async_overflow_policy overflow_policy)
|
||||
{
|
||||
post_async_msg_(async_msg(std::move(worker_ptr), async_msg_type::flush), overflow_policy);
|
||||
std::future<void> SPDLOG_INLINE thread_pool::post_flush(async_logger_ptr &&worker_ptr,
|
||||
async_overflow_policy overflow_policy) {
|
||||
std::promise<void> promise;
|
||||
std::future<void> future = promise.get_future();
|
||||
post_async_msg_(async_msg(std::move(worker_ptr), async_msg_type::flush, std::move(promise)),
|
||||
overflow_policy);
|
||||
return future;
|
||||
}
|
||||
|
||||
size_t SPDLOG_INLINE thread_pool::overrun_counter()
|
||||
{
|
||||
return q_.overrun_counter();
|
||||
}
|
||||
size_t SPDLOG_INLINE thread_pool::overrun_counter() { return q_.overrun_counter(); }
|
||||
|
||||
size_t SPDLOG_INLINE thread_pool::queue_size()
|
||||
{
|
||||
return q_.size();
|
||||
}
|
||||
void SPDLOG_INLINE thread_pool::reset_overrun_counter() { q_.reset_overrun_counter(); }
|
||||
|
||||
void SPDLOG_INLINE thread_pool::post_async_msg_(async_msg &&new_msg, async_overflow_policy overflow_policy)
|
||||
{
|
||||
if (overflow_policy == async_overflow_policy::block)
|
||||
{
|
||||
size_t SPDLOG_INLINE thread_pool::discard_counter() { return q_.discard_counter(); }
|
||||
|
||||
void SPDLOG_INLINE thread_pool::reset_discard_counter() { q_.reset_discard_counter(); }
|
||||
|
||||
size_t SPDLOG_INLINE thread_pool::queue_size() { return q_.size(); }
|
||||
|
||||
void SPDLOG_INLINE thread_pool::post_async_msg_(async_msg &&new_msg,
|
||||
async_overflow_policy overflow_policy) {
|
||||
if (overflow_policy == async_overflow_policy::block) {
|
||||
q_.enqueue(std::move(new_msg));
|
||||
}
|
||||
else
|
||||
{
|
||||
} else if (overflow_policy == async_overflow_policy::overrun_oldest) {
|
||||
q_.enqueue_nowait(std::move(new_msg));
|
||||
} else {
|
||||
assert(overflow_policy == async_overflow_policy::discard_new);
|
||||
q_.enqueue_if_have_room(std::move(new_msg));
|
||||
}
|
||||
}
|
||||
|
||||
void SPDLOG_INLINE thread_pool::worker_loop_()
|
||||
{
|
||||
while (process_next_msg_()) {}
|
||||
void SPDLOG_INLINE thread_pool::worker_loop_() {
|
||||
while (process_next_msg_()) {
|
||||
}
|
||||
}
|
||||
|
||||
// process next message in the queue
|
||||
// return true if this thread should still be active (while no terminate msg
|
||||
// was received)
|
||||
bool SPDLOG_INLINE thread_pool::process_next_msg_()
|
||||
{
|
||||
bool SPDLOG_INLINE thread_pool::process_next_msg_() {
|
||||
async_msg incoming_async_msg;
|
||||
bool dequeued = q_.dequeue_for(incoming_async_msg, std::chrono::seconds(10));
|
||||
if (!dequeued)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
q_.dequeue(incoming_async_msg);
|
||||
|
||||
switch (incoming_async_msg.msg_type)
|
||||
{
|
||||
case async_msg_type::log: {
|
||||
incoming_async_msg.worker_ptr->backend_sink_it_(incoming_async_msg);
|
||||
return true;
|
||||
}
|
||||
case async_msg_type::flush: {
|
||||
incoming_async_msg.worker_ptr->backend_flush_();
|
||||
return true;
|
||||
}
|
||||
switch (incoming_async_msg.msg_type) {
|
||||
case async_msg_type::log: {
|
||||
incoming_async_msg.worker_ptr->backend_sink_it_(incoming_async_msg);
|
||||
return true;
|
||||
}
|
||||
case async_msg_type::flush: {
|
||||
incoming_async_msg.worker_ptr->backend_flush_();
|
||||
incoming_async_msg.flush_promise.set_value();
|
||||
return true;
|
||||
}
|
||||
|
||||
case async_msg_type::terminate: {
|
||||
return false;
|
||||
}
|
||||
case async_msg_type::terminate: {
|
||||
return false;
|
||||
}
|
||||
|
||||
default: {
|
||||
assert(false);
|
||||
}
|
||||
default: {
|
||||
assert(false);
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
} // namespace details
|
||||
} // namespace spdlog
|
||||
} // namespace details
|
||||
} // namespace spdlog
|
||||
|
||||
Reference in New Issue
Block a user