1 #ifndef QEMU_THREAD_H 2 #define QEMU_THREAD_H 3 4 #include "qemu/processor.h" 5 #include "qemu/atomic.h" 6 7 typedef struct QemuCond QemuCond; 8 typedef struct QemuSemaphore QemuSemaphore; 9 typedef struct QemuEvent QemuEvent; 10 typedef struct QemuLockCnt QemuLockCnt; 11 typedef struct QemuThread QemuThread; 12 13 #ifdef _WIN32 14 #include "qemu/thread-win32.h" 15 #else 16 #include "qemu/thread-posix.h" 17 #endif 18 19 /* include QSP header once QemuMutex, QemuCond etc. are defined */ 20 #include "qemu/qsp.h" 21 22 #define QEMU_THREAD_JOINABLE 0 23 #define QEMU_THREAD_DETACHED 1 24 25 void qemu_mutex_init(QemuMutex *mutex); 26 void qemu_mutex_destroy(QemuMutex *mutex); 27 int qemu_mutex_trylock_impl(QemuMutex *mutex, const char *file, const int line); 28 void qemu_mutex_lock_impl(QemuMutex *mutex, const char *file, const int line); 29 void qemu_mutex_unlock_impl(QemuMutex *mutex, const char *file, const int line); 30 31 void qemu_rec_mutex_init(QemuRecMutex *mutex); 32 void qemu_rec_mutex_destroy(QemuRecMutex *mutex); 33 void qemu_rec_mutex_lock_impl(QemuRecMutex *mutex, const char *file, int line); 34 int qemu_rec_mutex_trylock_impl(QemuRecMutex *mutex, const char *file, int line); 35 void qemu_rec_mutex_unlock_impl(QemuRecMutex *mutex, const char *file, int line); 36 37 typedef void (*QemuMutexLockFunc)(QemuMutex *m, const char *f, int l); 38 typedef int (*QemuMutexTrylockFunc)(QemuMutex *m, const char *f, int l); 39 typedef void (*QemuRecMutexLockFunc)(QemuRecMutex *m, const char *f, int l); 40 typedef int (*QemuRecMutexTrylockFunc)(QemuRecMutex *m, const char *f, int l); 41 typedef void (*QemuCondWaitFunc)(QemuCond *c, QemuMutex *m, const char *f, 42 int l); 43 typedef bool (*QemuCondTimedWaitFunc)(QemuCond *c, QemuMutex *m, int ms, 44 const char *f, int l); 45 46 extern QemuMutexLockFunc qemu_bql_mutex_lock_func; 47 extern QemuMutexLockFunc qemu_mutex_lock_func; 48 extern QemuMutexTrylockFunc qemu_mutex_trylock_func; 49 extern QemuRecMutexLockFunc qemu_rec_mutex_lock_func; 50 extern QemuRecMutexTrylockFunc qemu_rec_mutex_trylock_func; 51 extern QemuCondWaitFunc qemu_cond_wait_func; 52 extern QemuCondTimedWaitFunc qemu_cond_timedwait_func; 53 54 /* convenience macros to bypass the profiler */ 55 #define qemu_mutex_lock__raw(m) \ 56 qemu_mutex_lock_impl(m, __FILE__, __LINE__) 57 #define qemu_mutex_trylock__raw(m) \ 58 qemu_mutex_trylock_impl(m, __FILE__, __LINE__) 59 60 #ifdef __COVERITY__ 61 /* 62 * Coverity is severely confused by the indirect function calls, 63 * hide them. 64 */ 65 #define qemu_mutex_lock(m) \ 66 qemu_mutex_lock_impl(m, __FILE__, __LINE__) 67 #define qemu_mutex_trylock(m) \ 68 qemu_mutex_trylock_impl(m, __FILE__, __LINE__) 69 #define qemu_rec_mutex_lock(m) \ 70 qemu_rec_mutex_lock_impl(m, __FILE__, __LINE__) 71 #define qemu_rec_mutex_trylock(m) \ 72 qemu_rec_mutex_trylock_impl(m, __FILE__, __LINE__) 73 #define qemu_cond_wait(c, m) \ 74 qemu_cond_wait_impl(c, m, __FILE__, __LINE__) 75 #define qemu_cond_timedwait(c, m, ms) \ 76 qemu_cond_timedwait_impl(c, m, ms, __FILE__, __LINE__) 77 #else 78 #define qemu_mutex_lock(m) ({ \ 79 QemuMutexLockFunc _f = qatomic_read(&qemu_mutex_lock_func); \ 80 _f(m, __FILE__, __LINE__); \ 81 }) 82 83 #define qemu_mutex_trylock(m) ({ \ 84 QemuMutexTrylockFunc _f = qatomic_read(&qemu_mutex_trylock_func); \ 85 _f(m, __FILE__, __LINE__); \ 86 }) 87 88 #define qemu_rec_mutex_lock(m) ({ \ 89 QemuRecMutexLockFunc _f = qatomic_read(&qemu_rec_mutex_lock_func);\ 90 _f(m, __FILE__, __LINE__); \ 91 }) 92 93 #define qemu_rec_mutex_trylock(m) ({ \ 94 QemuRecMutexTrylockFunc _f; \ 95 _f = qatomic_read(&qemu_rec_mutex_trylock_func); \ 96 _f(m, __FILE__, __LINE__); \ 97 }) 98 99 #define qemu_cond_wait(c, m) ({ \ 100 QemuCondWaitFunc _f = qatomic_read(&qemu_cond_wait_func); \ 101 _f(c, m, __FILE__, __LINE__); \ 102 }) 103 104 #define qemu_cond_timedwait(c, m, ms) ({ \ 105 QemuCondTimedWaitFunc _f = qatomic_read(&qemu_cond_timedwait_func);\ 106 _f(c, m, ms, __FILE__, __LINE__); \ 107 }) 108 #endif 109 110 #define qemu_mutex_unlock(mutex) \ 111 qemu_mutex_unlock_impl(mutex, __FILE__, __LINE__) 112 113 #define qemu_rec_mutex_unlock(mutex) \ 114 qemu_rec_mutex_unlock_impl(mutex, __FILE__, __LINE__) 115 116 static inline void (qemu_mutex_lock)(QemuMutex *mutex) 117 { 118 qemu_mutex_lock(mutex); 119 } 120 121 static inline int (qemu_mutex_trylock)(QemuMutex *mutex) 122 { 123 return qemu_mutex_trylock(mutex); 124 } 125 126 static inline void (qemu_mutex_unlock)(QemuMutex *mutex) 127 { 128 qemu_mutex_unlock(mutex); 129 } 130 131 static inline void (qemu_rec_mutex_lock)(QemuRecMutex *mutex) 132 { 133 qemu_rec_mutex_lock(mutex); 134 } 135 136 static inline int (qemu_rec_mutex_trylock)(QemuRecMutex *mutex) 137 { 138 return qemu_rec_mutex_trylock(mutex); 139 } 140 141 static inline void (qemu_rec_mutex_unlock)(QemuRecMutex *mutex) 142 { 143 qemu_rec_mutex_unlock(mutex); 144 } 145 146 void qemu_cond_init(QemuCond *cond); 147 void qemu_cond_destroy(QemuCond *cond); 148 149 /* 150 * IMPORTANT: The implementation does not guarantee that pthread_cond_signal 151 * and pthread_cond_broadcast can be called except while the same mutex is 152 * held as in the corresponding pthread_cond_wait calls! 153 */ 154 void qemu_cond_signal(QemuCond *cond); 155 void qemu_cond_broadcast(QemuCond *cond); 156 void qemu_cond_wait_impl(QemuCond *cond, QemuMutex *mutex, 157 const char *file, const int line); 158 bool qemu_cond_timedwait_impl(QemuCond *cond, QemuMutex *mutex, int ms, 159 const char *file, const int line); 160 161 static inline void (qemu_cond_wait)(QemuCond *cond, QemuMutex *mutex) 162 { 163 qemu_cond_wait(cond, mutex); 164 } 165 166 /* Returns true if timeout has not expired, and false otherwise */ 167 static inline bool (qemu_cond_timedwait)(QemuCond *cond, QemuMutex *mutex, 168 int ms) 169 { 170 return qemu_cond_timedwait(cond, mutex, ms); 171 } 172 173 void qemu_sem_init(QemuSemaphore *sem, int init); 174 void qemu_sem_post(QemuSemaphore *sem); 175 void qemu_sem_wait(QemuSemaphore *sem); 176 int qemu_sem_timedwait(QemuSemaphore *sem, int ms); 177 void qemu_sem_destroy(QemuSemaphore *sem); 178 179 void qemu_event_init(QemuEvent *ev, bool init); 180 void qemu_event_set(QemuEvent *ev); 181 void qemu_event_reset(QemuEvent *ev); 182 void qemu_event_wait(QemuEvent *ev); 183 void qemu_event_destroy(QemuEvent *ev); 184 185 void qemu_thread_create(QemuThread *thread, const char *name, 186 void *(*start_routine)(void *), 187 void *arg, int mode); 188 int qemu_thread_set_affinity(QemuThread *thread, unsigned long *host_cpus, 189 unsigned long nbits); 190 int qemu_thread_get_affinity(QemuThread *thread, unsigned long **host_cpus, 191 unsigned long *nbits); 192 void *qemu_thread_join(QemuThread *thread); 193 void qemu_thread_get_self(QemuThread *thread); 194 bool qemu_thread_is_self(QemuThread *thread); 195 G_NORETURN void qemu_thread_exit(void *retval); 196 void qemu_thread_naming(bool enable); 197 198 struct Notifier; 199 /** 200 * qemu_thread_atexit_add: 201 * @notifier: Notifier to add 202 * 203 * Add the specified notifier to a list which will be run via 204 * notifier_list_notify() when this thread exits (either by calling 205 * qemu_thread_exit() or by returning from its start_routine). 206 * The usual usage is that the caller passes a Notifier which is 207 * a per-thread variable; it can then use the callback to free 208 * other per-thread data. 209 * 210 * If the thread exits as part of the entire process exiting, 211 * it is unspecified whether notifiers are called or not. 212 */ 213 void qemu_thread_atexit_add(struct Notifier *notifier); 214 /** 215 * qemu_thread_atexit_remove: 216 * @notifier: Notifier to remove 217 * 218 * Remove the specified notifier from the thread-exit notification 219 * list. It is not valid to try to remove a notifier which is not 220 * on the list. 221 */ 222 void qemu_thread_atexit_remove(struct Notifier *notifier); 223 224 #ifdef CONFIG_TSAN 225 #include <sanitizer/tsan_interface.h> 226 #endif 227 228 struct QemuSpin { 229 int value; 230 }; 231 232 static inline void qemu_spin_init(QemuSpin *spin) 233 { 234 qatomic_set(&spin->value, 0); 235 #ifdef CONFIG_TSAN 236 __tsan_mutex_create(spin, __tsan_mutex_not_static); 237 #endif 238 } 239 240 /* const parameter because the only purpose here is the TSAN annotation */ 241 static inline void qemu_spin_destroy(const QemuSpin *spin) 242 { 243 #ifdef CONFIG_TSAN 244 __tsan_mutex_destroy((void *)spin, __tsan_mutex_not_static); 245 #endif 246 } 247 248 static inline void qemu_spin_lock(QemuSpin *spin) 249 { 250 #ifdef CONFIG_TSAN 251 __tsan_mutex_pre_lock(spin, 0); 252 #endif 253 while (unlikely(qatomic_xchg(&spin->value, 1))) { 254 while (qatomic_read(&spin->value)) { 255 cpu_relax(); 256 } 257 } 258 #ifdef CONFIG_TSAN 259 __tsan_mutex_post_lock(spin, 0, 0); 260 #endif 261 } 262 263 static inline bool qemu_spin_trylock(QemuSpin *spin) 264 { 265 #ifdef CONFIG_TSAN 266 __tsan_mutex_pre_lock(spin, __tsan_mutex_try_lock); 267 #endif 268 bool busy = qatomic_xchg(&spin->value, true); 269 #ifdef CONFIG_TSAN 270 unsigned flags = __tsan_mutex_try_lock; 271 flags |= busy ? __tsan_mutex_try_lock_failed : 0; 272 __tsan_mutex_post_lock(spin, flags, 0); 273 #endif 274 return busy; 275 } 276 277 static inline bool qemu_spin_locked(QemuSpin *spin) 278 { 279 return qatomic_read(&spin->value); 280 } 281 282 static inline void qemu_spin_unlock(QemuSpin *spin) 283 { 284 #ifdef CONFIG_TSAN 285 __tsan_mutex_pre_unlock(spin, 0); 286 #endif 287 qatomic_store_release(&spin->value, 0); 288 #ifdef CONFIG_TSAN 289 __tsan_mutex_post_unlock(spin, 0); 290 #endif 291 } 292 293 struct QemuLockCnt { 294 #ifndef CONFIG_LINUX 295 QemuMutex mutex; 296 #endif 297 unsigned count; 298 }; 299 300 /** 301 * qemu_lockcnt_init: initialize a QemuLockcnt 302 * @lockcnt: the lockcnt to initialize 303 * 304 * Initialize lockcnt's counter to zero and prepare its mutex 305 * for usage. 306 */ 307 void qemu_lockcnt_init(QemuLockCnt *lockcnt); 308 309 /** 310 * qemu_lockcnt_destroy: destroy a QemuLockcnt 311 * @lockcnt: the lockcnt to destruct 312 * 313 * Destroy lockcnt's mutex. 314 */ 315 void qemu_lockcnt_destroy(QemuLockCnt *lockcnt); 316 317 /** 318 * qemu_lockcnt_inc: increment a QemuLockCnt's counter 319 * @lockcnt: the lockcnt to operate on 320 * 321 * If the lockcnt's count is zero, wait for critical sections 322 * to finish and increment lockcnt's count to 1. If the count 323 * is not zero, just increment it. 324 * 325 * Because this function can wait on the mutex, it must not be 326 * called while the lockcnt's mutex is held by the current thread. 327 * For the same reason, qemu_lockcnt_inc can also contribute to 328 * AB-BA deadlocks. This is a sample deadlock scenario: 329 * 330 * thread 1 thread 2 331 * ------------------------------------------------------- 332 * qemu_lockcnt_lock(&lc1); 333 * qemu_lockcnt_lock(&lc2); 334 * qemu_lockcnt_inc(&lc2); 335 * qemu_lockcnt_inc(&lc1); 336 */ 337 void qemu_lockcnt_inc(QemuLockCnt *lockcnt); 338 339 /** 340 * qemu_lockcnt_dec: decrement a QemuLockCnt's counter 341 * @lockcnt: the lockcnt to operate on 342 */ 343 void qemu_lockcnt_dec(QemuLockCnt *lockcnt); 344 345 /** 346 * qemu_lockcnt_dec_and_lock: decrement a QemuLockCnt's counter and 347 * possibly lock it. 348 * @lockcnt: the lockcnt to operate on 349 * 350 * Decrement lockcnt's count. If the new count is zero, lock 351 * the mutex and return true. Otherwise, return false. 352 */ 353 bool qemu_lockcnt_dec_and_lock(QemuLockCnt *lockcnt); 354 355 /** 356 * qemu_lockcnt_dec_if_lock: possibly decrement a QemuLockCnt's counter and 357 * lock it. 358 * @lockcnt: the lockcnt to operate on 359 * 360 * If the count is 1, decrement the count to zero, lock 361 * the mutex and return true. Otherwise, return false. 362 */ 363 bool qemu_lockcnt_dec_if_lock(QemuLockCnt *lockcnt); 364 365 /** 366 * qemu_lockcnt_lock: lock a QemuLockCnt's mutex. 367 * @lockcnt: the lockcnt to operate on 368 * 369 * Remember that concurrent visits are not blocked unless the count is 370 * also zero. You can use qemu_lockcnt_count to check for this inside a 371 * critical section. 372 */ 373 void qemu_lockcnt_lock(QemuLockCnt *lockcnt); 374 375 /** 376 * qemu_lockcnt_unlock: release a QemuLockCnt's mutex. 377 * @lockcnt: the lockcnt to operate on. 378 */ 379 void qemu_lockcnt_unlock(QemuLockCnt *lockcnt); 380 381 /** 382 * qemu_lockcnt_inc_and_unlock: combined unlock/increment on a QemuLockCnt. 383 * @lockcnt: the lockcnt to operate on. 384 * 385 * This is the same as 386 * 387 * qemu_lockcnt_unlock(lockcnt); 388 * qemu_lockcnt_inc(lockcnt); 389 * 390 * but more efficient. 391 */ 392 void qemu_lockcnt_inc_and_unlock(QemuLockCnt *lockcnt); 393 394 /** 395 * qemu_lockcnt_count: query a LockCnt's count. 396 * @lockcnt: the lockcnt to query. 397 * 398 * Note that the count can change at any time. Still, while the 399 * lockcnt is locked, one can usefully check whether the count 400 * is non-zero. 401 */ 402 unsigned qemu_lockcnt_count(QemuLockCnt *lockcnt); 403 404 #endif 405