33#ifndef TSAN_ANNOTATIONS
34#define TSAN_ANNOTATIONS 0
39const unsigned __tsan_mutex_linker_init = 1 << 0;
40void __tsan_mutex_pre_lock(
void *addr,
unsigned flags);
41void __tsan_mutex_post_lock(
void *addr,
unsigned flags,
int recursion);
42int __tsan_mutex_pre_unlock(
void *addr,
unsigned flags);
43void __tsan_mutex_post_unlock(
void *addr,
unsigned flags);
44void __tsan_mutex_pre_signal(
void *addr,
unsigned flags);
45void __tsan_mutex_post_signal(
void *addr,
unsigned flags);
53namespace Synchronization {
59 __tsan_mutex_pre_lock(mutex, __tsan_mutex_linker_init);
63 __tsan_mutex_post_lock(mutex, __tsan_mutex_linker_init, 1);
67 (void)__tsan_mutex_pre_unlock(mutex, __tsan_mutex_linker_init);
70 __tsan_mutex_post_unlock(mutex, __tsan_mutex_linker_init);
73 __tsan_mutex_pre_signal(cond, 0);
76 __tsan_mutex_post_signal(cond, 0);
101 if (spin_count > 0) {
104 return spin_count > 0;
113static constexpr uint8_t lock_bit = 0x01;
114static constexpr uint8_t queue_lock_bit = 0x02;
115static constexpr uint8_t parked_bit = 0x02;
149 if_tsan_pre_lock(
this);
154 if (!atomic_cas_weak_acquire_relaxed(&state, &expected, &desired)) {
158 if_tsan_post_lock(
this);
162 if_tsan_pre_unlock(
this);
167 bool no_thread_queuing = (val & queue_lock_bit) == 0;
169 bool some_queued = (val & ~(
uintptr_t)(queue_lock_bit | lock_bit)) != 0;
170 if (no_thread_queuing && some_queued) {
174 if_tsan_post_unlock(
this);
178WEAK void word_lock::lock_full() {
179 spin_control spinner;
181 atomic_load_relaxed(&state, &expected);
184 if (!(expected & lock_bit)) {
187 if (atomic_cas_weak_acquire_relaxed(&state, &expected, &desired)) {
193 if (((expected & ~(
uintptr_t)(queue_lock_bit | lock_bit)) != 0) && spinner.should_spin()) {
195 atomic_load_relaxed(&state, &expected);
199 word_lock_queue_data node;
201 node.parker.prepare_park();
204 word_lock_queue_data *head = (word_lock_queue_data *)(expected & ~(
uintptr_t)(queue_lock_bit | lock_bit));
205 if (head ==
nullptr) {
217 if (atomic_cas_weak_release_relaxed(&state, &expected, &desired)) {
220 atomic_load_relaxed(&state, &expected);
225WEAK void word_lock::unlock_full() {
227 atomic_load_relaxed(&state, &expected);
232 bool thread_queuing = (expected & queue_lock_bit);
234 bool none_queued = (expected & ~(
uintptr_t)(queue_lock_bit | lock_bit)) == 0;
235 if (thread_queuing || none_queued) {
239 uintptr_t desired = expected | queue_lock_bit;
240 if (atomic_cas_weak_acquire_relaxed(&state, &expected, &desired)) {
246 word_lock_queue_data *head = (word_lock_queue_data *)(expected & ~(
uintptr_t)(queue_lock_bit | lock_bit));
247 word_lock_queue_data *current = head;
248 word_lock_queue_data *tail = current->tail;
249 while (tail ==
nullptr) {
250 word_lock_queue_data *next = current->next;
252 next->prev = current;
254 tail = current->tail;
260 if (expected & lock_bit) {
262 if (atomic_cas_weak_relacq_relaxed(&state, &expected, &desired)) {
265 atomic_thread_fence_acquire();
269 word_lock_queue_data *new_tail = tail->prev;
270 if (new_tail ==
nullptr) {
271 bool continue_outer =
false;
272 while (!continue_outer) {
274 if (atomic_cas_weak_relacq_relaxed(&state, &expected, &desired)) {
277 if ((expected & ~(
uintptr_t)(queue_lock_bit | lock_bit)) == 0) {
280 atomic_thread_fence_acquire();
281 continue_outer =
true;
285 if (continue_outer) {
289 head->tail = new_tail;
290 atomic_and_fetch_release(&state, ~(
uintptr_t)queue_lock_bit);
296 tail->parker.unpark_start();
297 tail->parker.unpark();
298 tail->parker.unpark_finish();
331WEAK void dump_hash() {
335 while (head !=
nullptr) {
336 print(
nullptr) <<
"Bucket index " << i <<
" addr " << (
void *)head->
sleep_address <<
"\n";
378 uintptr_t hash_from = addr_hash(addr_from);
386 if (hash_from == hash_to) {
390 }
else if (hash_from < hash_to) {
410 if (&buckets.
from == &buckets.
to) {
412 }
else if (&buckets.
from > &buckets.
to) {
461 if (bucket.
head !=
nullptr) {
484 while (data !=
nullptr) {
487 if (cur_addr == addr) {
489 *data_location = next;
491 bool more_waiters =
false;
493 if (bucket.
tail == data) {
497 while (data2 !=
nullptr && !more_waiters) {
500 more_waiters = (cur_addr2 == addr);
507 data->
parker.unpark_start();
510 data->
parker.unpark_finish();
513 return more_waiters ? 1 : 0;
515 data_location = &data->
next;
545 while (data !=
nullptr) {
550 if (cur_addr == addr_from) {
551 *data_location = next;
560 if (requeue ==
nullptr) {
563 requeue_tail->
next = data;
572 data_location = &data->
next;
578 if (requeue !=
nullptr) {
579 requeue_tail->
next =
nullptr;
580 if (buckets.
to.
head ==
nullptr) {
581 buckets.
to.
head = requeue;
585 buckets.
to.
tail = requeue_tail;
590 if (wakeup !=
nullptr) {
592 wakeup->
parker.unpark_start();
595 wakeup->
parker.unpark_finish();
600 return wakeup !=
nullptr && action.
unpark_one;
614 return result == (lock_bit | parked_bit);
619 uintptr_t return_state = more_waiters ? parked_bit : 0;
620 atomic_store_release(
lock_state, &return_state);
632 atomic_load_relaxed(&state, &expected);
635 if (!(expected & lock_bit)) {
637 if (atomic_cas_weak_acquire_relaxed(&state, &expected, &desired)) {
648 atomic_load_relaxed(&state, &expected);
653 if ((expected & parked_bit) == 0) {
654 uintptr_t desired = expected | parked_bit;
655 if (!atomic_cas_weak_relaxed_relaxed(&state, &expected, &desired)) {
668 atomic_load_relaxed(&state, &expected);
677 if (atomic_cas_strong_release_relaxed(&state, &expected, &desired)) {
690 if (!atomic_cas_weak_acquire_relaxed(&state, &expected, &desired)) {
699 if (!atomic_cas_weak_release_relaxed(&state, &expected, &desired)) {
706 atomic_load_relaxed(&state, &val);
708 if (!(val & lock_bit)) {
713 if (atomic_cas_weak_relaxed_relaxed(&state, &val, &desired)) {
720 atomic_or_fetch_relaxed(&state, parked_bit);
773 if (action.unpark_one && some_requeued) {
822 if_tsan_pre_signal(
this);
825 atomic_load_relaxed(&state, &val);
827 if_tsan_post_signal(
this);
832 if_tsan_post_signal(
this);
836 if_tsan_pre_signal(
this);
838 atomic_load_relaxed(&state, &val);
840 if_tsan_post_signal(
this);
845 if_tsan_post_signal(
this);
854 if_tsan_pre_lock(mutex);
858 atomic_load_relaxed((
uintptr_t *)mutex, &val);
861 if_tsan_post_lock(mutex);
903 fast_cond->
wait(fast_mutex);
916 if (array ==
nullptr) {
922 if (array->
array ==
nullptr) {
This file declares the routines used by Halide internally in its runtime.
void * halide_malloc(void *user_context, size_t x)
Halide calls these functions to allocate and free memory.
void halide_free(void *user_context, void *ptr)
@ halide_error_code_success
There was no error.
ALWAYS_INLINE void signal()
ALWAYS_INLINE void broadcast()
ALWAYS_INLINE void wait(fast_mutex *mutex)
ALWAYS_INLINE void make_parked()
ALWAYS_INLINE void lock()
ALWAYS_INLINE void unlock()
ALWAYS_INLINE bool make_parked_if_locked()
ALWAYS_INLINE bool should_spin()
ALWAYS_INLINE void reset()
ALWAYS_INLINE void unlock()
ALWAYS_INLINE void lock()
WEAK hash_bucket & lock_bucket(uintptr_t addr)
WEAK void unlock_bucket_pair(bucket_pair &buckets)
constexpr int HASH_TABLE_SIZE
constexpr int HASH_TABLE_BITS
WEAK bucket_pair lock_bucket_pair(uintptr_t addr_from, uintptr_t addr_to)
constexpr int LOAD_FACTOR
This file defines the class FunctionDAG, which is our representation of a Halide pipeline,...
@ Internal
Not visible externally, similar to 'static' linkage in C.
Expr print(const std::vector< Expr > &values)
Create an Expr that prints out its value whenever it is evaluated.
unsigned __INT64_TYPE__ uint64_t
#define halide_debug_assert(user_context, cond)
halide_debug_assert() is like halide_assert(), but only expands into a check when DEBUG_RUNTIME is de...
__UINTPTR_TYPE__ uintptr_t
unsigned __INT8_TYPE__ uint8_t
void halide_thread_yield()
void * memset(void *s, int val, size_t n)
#define halide_abort_if_false(user_context, cond)
uintptr_t *const cond_state
ALWAYS_INLINE broadcast_parking_control(uintptr_t *cond_state, fast_mutex *mutex)
bool validate(validate_action &action) final
void requeue_callback(const validate_action &action, bool one_to_wake, bool some_requeued) final
ALWAYS_INLINE bucket_pair(hash_bucket &from, hash_bucket &to)
hash_bucket buckets[HASH_TABLE_SIZE]
uintptr_t *const lock_state
bool validate(validate_action &action) final
uintptr_t unpark(int unparked, bool more_waiters) final
ALWAYS_INLINE mutex_parking_control(uintptr_t *lock_state)
virtual uintptr_t unpark(int unparked, bool more_waiters)
virtual void requeue_callback(const validate_action &action, bool one_to_wake, bool some_requeued)
uintptr_t park(uintptr_t addr)
int unpark_requeue(uintptr_t addr_from, uintptr_t addr_to, uintptr_t unpark_info)
virtual void before_sleep()
uintptr_t unpark_one(uintptr_t addr)
virtual bool validate(validate_action &action)
ALWAYS_INLINE signal_parking_control(uintptr_t *cond_state, fast_mutex *mutex)
uintptr_t unpark(int unparked, bool more_waiters) final
uintptr_t *const cond_state
uintptr_t invalid_unpark_info
uintptr_t unpark(int unparked, bool more_waiters) final
ALWAYS_INLINE wait_parking_control(uintptr_t *cond_state, fast_mutex *mutex)
uintptr_t *const cond_state
void before_sleep() final
bool validate(validate_action &action) final
word_lock_queue_data * prev
word_lock_queue_data * tail
word_lock_queue_data * next
Cross platform condition variable.
struct halide_mutex * array
WEAK int halide_mutex_array_lock(struct halide_mutex_array *array, int entry)
WEAK void halide_mutex_array_destroy(void *user_context, void *array)
WEAK void halide_mutex_unlock(halide_mutex *mutex)
WEAK void halide_cond_signal(struct halide_cond *cond)
WEAK int halide_mutex_array_unlock(struct halide_mutex_array *array, int entry)
WEAK void halide_mutex_lock(halide_mutex *mutex)
A basic set of mutex and condition variable functions, which call platform specific code for mutual e...
WEAK void halide_cond_wait(struct halide_cond *cond, struct halide_mutex *mutex)
WEAK void halide_cond_broadcast(struct halide_cond *cond)
WEAK halide_mutex_array * halide_mutex_array_create(uint64_t sz)