33 #ifndef TSAN_ANNOTATIONS
34 #define TSAN_ANNOTATIONS 0
39 const unsigned __tsan_mutex_linker_init = 1 << 0;
40 void __tsan_mutex_pre_lock(
void *addr,
unsigned flags);
41 void __tsan_mutex_post_lock(
void *addr,
unsigned flags,
int recursion);
42 int __tsan_mutex_pre_unlock(
void *addr,
unsigned flags);
43 void __tsan_mutex_post_unlock(
void *addr,
unsigned flags);
44 void __tsan_mutex_pre_signal(
void *addr,
unsigned flags);
45 void __tsan_mutex_post_signal(
void *addr,
unsigned flags);
53 namespace Synchronization {
59 __tsan_mutex_pre_lock(mutex, __tsan_mutex_linker_init);
63 __tsan_mutex_post_lock(mutex, __tsan_mutex_linker_init, 1);
67 (void)__tsan_mutex_pre_unlock(mutex, __tsan_mutex_linker_init);
70 __tsan_mutex_post_unlock(mutex, __tsan_mutex_linker_init);
73 __tsan_mutex_pre_signal(cond, 0);
76 __tsan_mutex_post_signal(cond, 0);
101 if (spin_count > 0) {
104 return spin_count > 0;
113 static constexpr
uint8_t lock_bit = 0x01;
114 static constexpr
uint8_t queue_lock_bit = 0x02;
115 static constexpr
uint8_t parked_bit = 0x02;
149 if_tsan_pre_lock(
this);
154 if (!atomic_cas_weak_acquire_relaxed(&state, &expected, &desired)) {
158 if_tsan_post_lock(
this);
162 if_tsan_pre_unlock(
this);
167 bool no_thread_queuing = (val & queue_lock_bit) == 0;
169 bool some_queued = (val & ~(
uintptr_t)(queue_lock_bit | lock_bit)) != 0;
170 if (no_thread_queuing && some_queued) {
174 if_tsan_post_unlock(
this);
178 WEAK void word_lock::lock_full() {
179 spin_control spinner;
181 atomic_load_relaxed(&state, &expected);
184 if (!(expected & lock_bit)) {
187 if (atomic_cas_weak_acquire_relaxed(&state, &expected, &desired)) {
193 if (((expected & ~(
uintptr_t)(queue_lock_bit | lock_bit)) != 0) && spinner.should_spin()) {
195 atomic_load_relaxed(&state, &expected);
199 word_lock_queue_data node;
201 node.parker.prepare_park();
204 word_lock_queue_data *head = (word_lock_queue_data *)(expected & ~(
uintptr_t)(queue_lock_bit | lock_bit));
205 if (head ==
nullptr) {
217 if (atomic_cas_weak_release_relaxed(&state, &expected, &desired)) {
220 atomic_load_relaxed(&state, &expected);
225 WEAK void word_lock::unlock_full() {
227 atomic_load_relaxed(&state, &expected);
232 bool thread_queuing = (expected & queue_lock_bit);
234 bool none_queued = (expected & ~(
uintptr_t)(queue_lock_bit | lock_bit)) == 0;
235 if (thread_queuing || none_queued) {
239 uintptr_t desired = expected | queue_lock_bit;
240 if (atomic_cas_weak_acquire_relaxed(&state, &expected, &desired)) {
246 word_lock_queue_data *head = (word_lock_queue_data *)(expected & ~(
uintptr_t)(queue_lock_bit | lock_bit));
247 word_lock_queue_data *current = head;
248 word_lock_queue_data *tail = current->tail;
249 while (tail ==
nullptr) {
250 word_lock_queue_data *next = current->next;
252 next->prev = current;
254 tail = current->tail;
260 if (expected & lock_bit) {
262 if (atomic_cas_weak_relacq_relaxed(&state, &expected, &desired)) {
265 atomic_thread_fence_acquire();
269 word_lock_queue_data *new_tail = tail->prev;
270 if (new_tail ==
nullptr) {
271 bool continue_outer =
false;
272 while (!continue_outer) {
274 if (atomic_cas_weak_relacq_relaxed(&state, &expected, &desired)) {
277 if ((expected & ~(
uintptr_t)(queue_lock_bit | lock_bit)) == 0) {
280 atomic_thread_fence_acquire();
281 continue_outer =
true;
285 if (continue_outer) {
289 head->tail = new_tail;
290 atomic_and_fetch_release(&state, ~(
uintptr_t)queue_lock_bit);
296 tail->parker.unpark_start();
297 tail->parker.unpark();
298 tail->parker.unpark_finish();
331 WEAK void dump_hash() {
335 while (head !=
nullptr) {
336 print(
nullptr) <<
"Bucket index " << i <<
" addr " << (
void *)head->
sleep_address <<
"\n";
378 uintptr_t hash_from = addr_hash(addr_from);
386 if (hash_from == hash_to) {
390 }
else if (hash_from < hash_to) {
410 if (&buckets.
from == &buckets.
to) {
412 }
else if (&buckets.
from > &buckets.
to) {
461 if (bucket.
head !=
nullptr) {
484 while (data !=
nullptr) {
487 if (cur_addr == addr) {
489 *data_location = next;
491 bool more_waiters =
false;
493 if (bucket.
tail == data) {
497 while (data2 !=
nullptr && !more_waiters) {
500 more_waiters = (cur_addr2 == addr);
507 data->
parker.unpark_start();
510 data->
parker.unpark_finish();
513 return more_waiters ? 1 : 0;
515 data_location = &data->
next;
545 while (data !=
nullptr) {
550 if (cur_addr == addr_from) {
551 *data_location = next;
560 if (requeue ==
nullptr) {
563 requeue_tail->
next = data;
572 data_location = &data->
next;
578 if (requeue !=
nullptr) {
579 requeue_tail->
next =
nullptr;
580 if (buckets.
to.
head ==
nullptr) {
581 buckets.
to.
head = requeue;
585 buckets.
to.
tail = requeue_tail;
590 if (wakeup !=
nullptr) {
592 wakeup->
parker.unpark_start();
595 wakeup->
parker.unpark_finish();
600 return wakeup !=
nullptr && action.
unpark_one;
614 return result == (lock_bit | parked_bit);
619 uintptr_t return_state = more_waiters ? parked_bit : 0;
620 atomic_store_release(
lock_state, &return_state);
632 atomic_load_relaxed(&state, &expected);
635 if (!(expected & lock_bit)) {
637 if (atomic_cas_weak_acquire_relaxed(&state, &expected, &desired)) {
648 atomic_load_relaxed(&state, &expected);
653 if ((expected & parked_bit) == 0) {
654 uintptr_t desired = expected | parked_bit;
655 if (!atomic_cas_weak_relaxed_relaxed(&state, &expected, &desired)) {
668 atomic_load_relaxed(&state, &expected);
677 if (atomic_cas_strong_release_relaxed(&state, &expected, &desired)) {
690 if (!atomic_cas_weak_acquire_relaxed(&state, &expected, &desired)) {
699 if (!atomic_cas_weak_release_relaxed(&state, &expected, &desired)) {
706 atomic_load_relaxed(&state, &val);
708 if (!(val & lock_bit)) {
713 if (atomic_cas_weak_relaxed_relaxed(&state, &val, &desired)) {
720 atomic_or_fetch_relaxed(&state, parked_bit);
739 #if 0 // TODO: figure out why this was here.
773 if (action.unpark_one && some_requeued) {
822 if_tsan_pre_signal(
this);
825 atomic_load_relaxed(&state, &val);
827 if_tsan_post_signal(
this);
832 if_tsan_post_signal(
this);
836 if_tsan_pre_signal(
this);
838 atomic_load_relaxed(&state, &val);
840 if_tsan_post_signal(
this);
845 if_tsan_post_signal(
this);
854 if_tsan_pre_lock(mutex);
858 atomic_load_relaxed((
uintptr_t *)mutex, &val);
861 if_tsan_post_lock(mutex);
903 fast_cond->
wait(fast_mutex);
916 if (array ==
nullptr) {
922 if (array->
array ==
nullptr) {