citra-emu
/
citra
Archived
1
0
Fork 0

threadsafe_queue: Add WaitIfEmpty and use it in logging

This commit is contained in:
B3n30 2018-09-09 13:08:57 +02:00
parent 874a95cea7
commit 9b49a79a72
3 changed files with 27 additions and 13 deletions

View File

@ -38,9 +38,7 @@ public:
const Impl& operator=(Impl const&) = delete;
void PushEntry(Entry e) {
std::lock_guard<std::mutex> lock(message_mutex);
message_queue.Push(std::move(e));
message_cv.notify_one();
}
void AddBackend(std::unique_ptr<Backend> backend) {
@ -83,14 +81,13 @@ private:
backend->Write(e);
}
};
while (true) {
std::unique_lock<std::mutex> lock(message_mutex);
message_cv.wait(lock, [&] { return !running || message_queue.Pop(entry); });
if (!running) {
while (message_queue.PopWait(entry)) {
if (entry.final_entry) {
break;
}
write_logs(entry);
}
// Drain the logging queue. Only writes out up to MAX_LOGS_TO_WRITE to prevent a case
// where a system is repeatedly spamming logs even on close.
constexpr int MAX_LOGS_TO_WRITE = 100;
@ -102,14 +99,13 @@ private:
}
~Impl() {
running = false;
message_cv.notify_one();
Entry entry;
entry.final_entry = true;
message_queue.Push(entry);
backend_thread.join();
}
std::atomic_bool running{true};
std::mutex message_mutex, writing_mutex;
std::condition_variable message_cv;
std::mutex writing_mutex;
std::thread backend_thread;
std::vector<std::unique_ptr<Backend>> backends;
Common::MPSCQueue<Log::Entry> message_queue;

View File

@ -28,6 +28,7 @@ struct Entry {
unsigned int line_num;
std::string function;
std::string message;
bool final_entry = false;
Entry() = default;
Entry(Entry&& o) = default;

View File

@ -9,6 +9,7 @@
#include <algorithm>
#include <atomic>
#include <condition_variable>
#include <cstddef>
#include <mutex>
#include "common/common_types.h"
@ -41,7 +42,7 @@ public:
template <typename Arg>
void Push(Arg&& t) {
// create the element, add it to the queue
write_ptr->current = std::forward<Arg>(t);
write_ptr->current = std::move(t);
// set the next pointer to a new element ptr
// then advance the write pointer
ElementPtr* new_ptr = new ElementPtr();
@ -49,6 +50,7 @@ public:
write_ptr = new_ptr;
if (NeedSize)
size++;
cv.notify_one();
}
void Pop() {
@ -66,10 +68,11 @@ public:
if (Empty())
return false;
ElementPtr* tmpptr = read_ptr;
if (NeedSize)
size--;
ElementPtr* tmpptr = read_ptr;
read_ptr = tmpptr->next.load(std::memory_order_acquire);
t = std::move(tmpptr->current);
tmpptr->next.store(nullptr);
@ -77,6 +80,14 @@ public:
return true;
}
bool PopWait(T& t) {
if (Empty()) {
std::unique_lock<std::mutex> lock(cv_mutex);
cv.wait(lock, [this]() { return !Empty(); });
}
return Pop(t);
}
// not thread-safe
void Clear() {
size.store(0);
@ -104,6 +115,8 @@ private:
ElementPtr* write_ptr;
ElementPtr* read_ptr;
std::atomic<u32> size;
std::mutex cv_mutex;
std::condition_variable cv;
};
// a simple thread-safe,
@ -138,6 +151,10 @@ public:
return spsc_queue.Pop(t);
}
bool PopWait(T& t) {
return spsc_queue.PopWait(t);
}
// not thread-safe
void Clear() {
spsc_queue.Clear();