mirror of
https://github.com/polybar/polybar.git
synced 2024-11-11 13:50:56 -05:00
concurrency_util: Mutex wrapper
This commit is contained in:
parent
f80d8ebf5b
commit
334c454eec
2 changed files with 61 additions and 52 deletions
|
@ -1,6 +1,7 @@
|
|||
#pragma once
|
||||
|
||||
#include <atomic>
|
||||
#include <chrono>
|
||||
#include <map>
|
||||
#include <mutex>
|
||||
#include <thread>
|
||||
|
@ -10,6 +11,8 @@
|
|||
|
||||
POLYBAR_NS
|
||||
|
||||
namespace chrono =std::chrono;
|
||||
using namespace std::chrono_literals;
|
||||
namespace this_thread = std::this_thread;
|
||||
|
||||
using std::atomic;
|
||||
|
@ -17,57 +20,50 @@ using std::map;
|
|||
using std::mutex;
|
||||
using std::thread;
|
||||
|
||||
namespace concurrency_util {
|
||||
namespace locking_strategy {
|
||||
struct no_backoff {
|
||||
bool operator()() {
|
||||
return true;
|
||||
}
|
||||
};
|
||||
|
||||
struct yield_backoff {
|
||||
bool operator()() {
|
||||
this_thread::yield();
|
||||
return false;
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
class spin_lock : public non_copyable_mixin<spin_lock> {
|
||||
public:
|
||||
/**
|
||||
* Construct spin_lock
|
||||
*/
|
||||
spin_lock() = default;
|
||||
|
||||
/**
|
||||
* Lock using custom strategy
|
||||
*/
|
||||
template <typename Backoff>
|
||||
void lock(Backoff backoff) noexcept {
|
||||
while (m_locked.test_and_set(std::memory_order_acquire)) {
|
||||
backoff();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Lock using default strategy
|
||||
*/
|
||||
void lock() noexcept {
|
||||
lock(locking_strategy::no_backoff{});
|
||||
}
|
||||
|
||||
/**
|
||||
* Unlock
|
||||
*/
|
||||
void unlock() noexcept {
|
||||
m_locked.clear(std::memory_order_release);
|
||||
}
|
||||
|
||||
protected:
|
||||
std::atomic_flag m_locked{false};
|
||||
class spin_lock : public non_copyable_mixin<spin_lock> {
|
||||
public:
|
||||
struct no_backoff_strategy {
|
||||
bool operator()();
|
||||
};
|
||||
struct yield_backoff_strategy {
|
||||
bool operator()();
|
||||
};
|
||||
|
||||
public:
|
||||
explicit spin_lock() = default;
|
||||
|
||||
template <typename Backoff>
|
||||
void lock(Backoff backoff) noexcept {
|
||||
while (m_locked.test_and_set(std::memory_order_acquire)) {
|
||||
backoff();
|
||||
}
|
||||
}
|
||||
|
||||
void lock() noexcept;
|
||||
void unlock() noexcept;
|
||||
|
||||
protected:
|
||||
std::atomic_flag m_locked{false};
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
class mutex_wrapper : public T {
|
||||
public:
|
||||
template <typename... Args>
|
||||
explicit mutex_wrapper(Args&&... args) : T(forward<Args>(args)...) {}
|
||||
|
||||
void lock() const noexcept {
|
||||
m_mtx.lock();
|
||||
}
|
||||
void unlock() const noexcept {
|
||||
m_mtx.unlock();
|
||||
};
|
||||
|
||||
private:
|
||||
mutable mutex m_mtx;
|
||||
};
|
||||
|
||||
namespace concurrency_util {
|
||||
size_t thread_id(const thread::id id);
|
||||
}
|
||||
|
||||
|
|
|
@ -2,12 +2,25 @@
|
|||
|
||||
POLYBAR_NS
|
||||
|
||||
bool spin_lock::no_backoff_strategy::operator()() {
|
||||
return true;
|
||||
}
|
||||
bool spin_lock::yield_backoff_strategy::operator()() {
|
||||
this_thread::yield();
|
||||
return false;
|
||||
}
|
||||
void spin_lock::lock() noexcept {
|
||||
lock(no_backoff_strategy{});
|
||||
}
|
||||
void spin_lock::unlock() noexcept {
|
||||
m_locked.clear(std::memory_order_release);
|
||||
}
|
||||
|
||||
namespace concurrency_util {
|
||||
size_t thread_id(const thread::id id) {
|
||||
static size_t idx{1_z};
|
||||
static mutex mtx;
|
||||
static map<thread::id, size_t> ids;
|
||||
std::lock_guard<mutex> lock(mtx);
|
||||
static mutex_wrapper<map<thread::id, size_t>> ids;
|
||||
std::lock_guard<decltype(ids)> lock(ids);
|
||||
if (ids.find(id) == ids.end()) {
|
||||
ids[id] = idx++;
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue