forked from max0x7ba/atomic_queue
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathspinlock.h
More file actions
122 lines (89 loc) · 3.38 KB
/
spinlock.h
File metadata and controls
122 lines (89 loc) · 3.38 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
/* -*- mode: c++; c-basic-offset: 4; indent-tabs-mode: nil; tab-width: 4 -*- */
#ifndef ATOMIC_QUEUE_SPIN_LOCK_H_INCLUDED
#define ATOMIC_QUEUE_SPIN_LOCK_H_INCLUDED
// Copyright (c) 2019 Maxim Egorushkin. MIT License. See the full licence in file LICENSE.
#include "defs.h"
#include <atomic>
#include <cstdlib>
#include <mutex>
#include <pthread.h>
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
namespace atomic_queue {
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
class Spinlock {
pthread_spinlock_t s_;
public:
using scoped_lock = std::lock_guard<Spinlock>;
Spinlock() noexcept {
if(::pthread_spin_init(&s_, 0))
std::abort();
}
~Spinlock() noexcept {
::pthread_spin_destroy(&s_);
}
void lock() noexcept {
if(::pthread_spin_lock(&s_))
std::abort();
}
void unlock() noexcept {
if(::pthread_spin_unlock(&s_))
std::abort();
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
class FairSpinlock {
alignas(CACHE_LINE_SIZE) std::atomic<unsigned> ticket_{0};
alignas(CACHE_LINE_SIZE) std::atomic<unsigned> next_{0};
public:
using scoped_lock = std::lock_guard<FairSpinlock>;
void lock() noexcept {
auto ticket = ticket_.fetch_add(1, std::memory_order_relaxed);
while(next_.load(std::memory_order_acquire) != ticket)
spin_loop_pause();
}
void unlock() noexcept {
next_.fetch_add(1, std::memory_order_release);
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
class UnfairSpinlock {
std::atomic<unsigned> lock_{0};
public:
using scoped_lock = std::lock_guard<UnfairSpinlock>;
void lock() noexcept {
for(;;) {
if(!lock_.load(std::memory_order_relaxed) && !lock_.exchange(1, std::memory_order_acquire))
return;
spin_loop_pause();
}
}
void unlock() noexcept {
lock_.store(0, std::memory_order_release);
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
class SpinlockHle {
int lock_ = 0;
#ifdef __gcc__
static constexpr int HLE_ACQUIRE = __ATOMIC_HLE_ACQUIRE;
static constexpr int HLE_RELEASE = __ATOMIC_HLE_RELEASE;
#else
static constexpr int HLE_ACQUIRE = 0;
static constexpr int HLE_RELEASE = 0;
#endif
public:
using scoped_lock = std::lock_guard<Spinlock>;
void lock() noexcept {
for(int expected = 0;
!__atomic_compare_exchange_n(&lock_, &expected, 1, false, __ATOMIC_ACQUIRE | HLE_ACQUIRE, __ATOMIC_RELAXED);
expected = 0)
spin_loop_pause();
}
void unlock() noexcept {
__atomic_store_n(&lock_, 0, __ATOMIC_RELEASE | HLE_RELEASE);
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace atomic_queue
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
#endif // ATOMIC_QUEUE_SPIN_LOCK_H_INCLUDED