1 #ifndef QEMU_THREAD_H 2 #define QEMU_THREAD_H 3 4 #include "qemu/processor.h" 5 #include "qemu/atomic.h" 6 7 typedef struct QemuCond QemuCond; 8 typedef struct QemuSemaphore QemuSemaphore; 9 typedef struct QemuEvent QemuEvent; 10 typedef struct QemuLockCnt QemuLockCnt; 11 typedef struct QemuThread QemuThread; 12 13 #ifdef _WIN32 14 #include "qemu/thread-win32.h" 15 #else 16 #include "qemu/thread-posix.h" 17 #endif 18 19 /* include QSP header once QemuMutex, QemuCond etc. are defined */ 20 #include "qemu/qsp.h" 21 22 #define QEMU_THREAD_JOINABLE 0 23 #define QEMU_THREAD_DETACHED 1 24 25 void qemu_mutex_init(QemuMutex *mutex); 26 void qemu_mutex_destroy(QemuMutex *mutex); 27 int qemu_mutex_trylock_impl(QemuMutex *mutex, const char *file, const int line); 28 void qemu_mutex_lock_impl(QemuMutex *mutex, const char *file, const int line); 29 void qemu_mutex_unlock_impl(QemuMutex *mutex, const char *file, const int line); 30 31 typedef void (*QemuMutexLockFunc)(QemuMutex *m, const char *f, int l); 32 typedef int (*QemuMutexTrylockFunc)(QemuMutex *m, const char *f, int l); 33 typedef void (*QemuRecMutexLockFunc)(QemuRecMutex *m, const char *f, int l); 34 typedef int (*QemuRecMutexTrylockFunc)(QemuRecMutex *m, const char *f, int l); 35 typedef void (*QemuCondWaitFunc)(QemuCond *c, QemuMutex *m, const char *f, 36 int l); 37 38 extern QemuMutexLockFunc qemu_bql_mutex_lock_func; 39 extern QemuMutexLockFunc qemu_mutex_lock_func; 40 extern QemuMutexTrylockFunc qemu_mutex_trylock_func; 41 extern QemuRecMutexLockFunc qemu_rec_mutex_lock_func; 42 extern QemuRecMutexTrylockFunc qemu_rec_mutex_trylock_func; 43 extern QemuCondWaitFunc qemu_cond_wait_func; 44 45 /* convenience macros to bypass the profiler */ 46 #define qemu_mutex_lock__raw(m) \ 47 qemu_mutex_lock_impl(m, __FILE__, __LINE__) 48 #define qemu_mutex_trylock__raw(m) \ 49 qemu_mutex_trylock_impl(m, __FILE__, __LINE__) 50 51 #ifdef __COVERITY__ 52 /* 53 * Coverity is severely confused by the indirect function calls, 54 * hide them. 55 */ 56 #define qemu_mutex_lock(m) \ 57 qemu_mutex_lock_impl(m, __FILE__, __LINE__); 58 #define qemu_mutex_trylock(m) \ 59 qemu_mutex_trylock_impl(m, __FILE__, __LINE__); 60 #define qemu_rec_mutex_lock(m) \ 61 qemu_rec_mutex_lock_impl(m, __FILE__, __LINE__); 62 #define qemu_rec_mutex_trylock(m) \ 63 qemu_rec_mutex_trylock_impl(m, __FILE__, __LINE__); 64 #define qemu_cond_wait(c, m) \ 65 qemu_cond_wait_impl(c, m, __FILE__, __LINE__); 66 #else 67 #define qemu_mutex_lock(m) ({ \ 68 QemuMutexLockFunc _f = atomic_read(&qemu_mutex_lock_func); \ 69 _f(m, __FILE__, __LINE__); \ 70 }) 71 72 #define qemu_mutex_trylock(m) ({ \ 73 QemuMutexTrylockFunc _f = atomic_read(&qemu_mutex_trylock_func); \ 74 _f(m, __FILE__, __LINE__); \ 75 }) 76 77 #define qemu_rec_mutex_lock(m) ({ \ 78 QemuRecMutexLockFunc _f = atomic_read(&qemu_rec_mutex_lock_func); \ 79 _f(m, __FILE__, __LINE__); \ 80 }) 81 82 #define qemu_rec_mutex_trylock(m) ({ \ 83 QemuRecMutexTrylockFunc _f; \ 84 _f = atomic_read(&qemu_rec_mutex_trylock_func); \ 85 _f(m, __FILE__, __LINE__); \ 86 }) 87 88 #define qemu_cond_wait(c, m) ({ \ 89 QemuCondWaitFunc _f = atomic_read(&qemu_cond_wait_func); \ 90 _f(c, m, __FILE__, __LINE__); \ 91 }) 92 #endif 93 94 #define qemu_mutex_unlock(mutex) \ 95 qemu_mutex_unlock_impl(mutex, __FILE__, __LINE__) 96 97 static inline void (qemu_mutex_lock)(QemuMutex *mutex) 98 { 99 qemu_mutex_lock(mutex); 100 } 101 102 static inline int (qemu_mutex_trylock)(QemuMutex *mutex) 103 { 104 return qemu_mutex_trylock(mutex); 105 } 106 107 static inline void (qemu_mutex_unlock)(QemuMutex *mutex) 108 { 109 qemu_mutex_unlock(mutex); 110 } 111 112 static inline void (qemu_rec_mutex_lock)(QemuRecMutex *mutex) 113 { 114 qemu_rec_mutex_lock(mutex); 115 } 116 117 static inline int (qemu_rec_mutex_trylock)(QemuRecMutex *mutex) 118 { 119 return qemu_rec_mutex_trylock(mutex); 120 } 121 122 /* Prototypes for other functions are in thread-posix.h/thread-win32.h. */ 123 void qemu_rec_mutex_init(QemuRecMutex *mutex); 124 125 void qemu_cond_init(QemuCond *cond); 126 void qemu_cond_destroy(QemuCond *cond); 127 128 /* 129 * IMPORTANT: The implementation does not guarantee that pthread_cond_signal 130 * and pthread_cond_broadcast can be called except while the same mutex is 131 * held as in the corresponding pthread_cond_wait calls! 132 */ 133 void qemu_cond_signal(QemuCond *cond); 134 void qemu_cond_broadcast(QemuCond *cond); 135 void qemu_cond_wait_impl(QemuCond *cond, QemuMutex *mutex, 136 const char *file, const int line); 137 138 static inline void (qemu_cond_wait)(QemuCond *cond, QemuMutex *mutex) 139 { 140 qemu_cond_wait(cond, mutex); 141 } 142 143 void qemu_sem_init(QemuSemaphore *sem, int init); 144 void qemu_sem_post(QemuSemaphore *sem); 145 void qemu_sem_wait(QemuSemaphore *sem); 146 int qemu_sem_timedwait(QemuSemaphore *sem, int ms); 147 void qemu_sem_destroy(QemuSemaphore *sem); 148 149 void qemu_event_init(QemuEvent *ev, bool init); 150 void qemu_event_set(QemuEvent *ev); 151 void qemu_event_reset(QemuEvent *ev); 152 void qemu_event_wait(QemuEvent *ev); 153 void qemu_event_destroy(QemuEvent *ev); 154 155 void qemu_thread_create(QemuThread *thread, const char *name, 156 void *(*start_routine)(void *), 157 void *arg, int mode); 158 void *qemu_thread_join(QemuThread *thread); 159 void qemu_thread_get_self(QemuThread *thread); 160 bool qemu_thread_is_self(QemuThread *thread); 161 void qemu_thread_exit(void *retval); 162 void qemu_thread_naming(bool enable); 163 164 struct Notifier; 165 void qemu_thread_atexit_add(struct Notifier *notifier); 166 void qemu_thread_atexit_remove(struct Notifier *notifier); 167 168 struct QemuSpin { 169 int value; 170 }; 171 172 static inline void qemu_spin_init(QemuSpin *spin) 173 { 174 __sync_lock_release(&spin->value); 175 } 176 177 static inline void qemu_spin_lock(QemuSpin *spin) 178 { 179 while (unlikely(__sync_lock_test_and_set(&spin->value, true))) { 180 while (atomic_read(&spin->value)) { 181 cpu_relax(); 182 } 183 } 184 } 185 186 static inline bool qemu_spin_trylock(QemuSpin *spin) 187 { 188 return __sync_lock_test_and_set(&spin->value, true); 189 } 190 191 static inline bool qemu_spin_locked(QemuSpin *spin) 192 { 193 return atomic_read(&spin->value); 194 } 195 196 static inline void qemu_spin_unlock(QemuSpin *spin) 197 { 198 __sync_lock_release(&spin->value); 199 } 200 201 struct QemuLockCnt { 202 #ifndef CONFIG_LINUX 203 QemuMutex mutex; 204 #endif 205 unsigned count; 206 }; 207 208 /** 209 * qemu_lockcnt_init: initialize a QemuLockcnt 210 * @lockcnt: the lockcnt to initialize 211 * 212 * Initialize lockcnt's counter to zero and prepare its mutex 213 * for usage. 214 */ 215 void qemu_lockcnt_init(QemuLockCnt *lockcnt); 216 217 /** 218 * qemu_lockcnt_destroy: destroy a QemuLockcnt 219 * @lockcnt: the lockcnt to destruct 220 * 221 * Destroy lockcnt's mutex. 222 */ 223 void qemu_lockcnt_destroy(QemuLockCnt *lockcnt); 224 225 /** 226 * qemu_lockcnt_inc: increment a QemuLockCnt's counter 227 * @lockcnt: the lockcnt to operate on 228 * 229 * If the lockcnt's count is zero, wait for critical sections 230 * to finish and increment lockcnt's count to 1. If the count 231 * is not zero, just increment it. 232 * 233 * Because this function can wait on the mutex, it must not be 234 * called while the lockcnt's mutex is held by the current thread. 235 * For the same reason, qemu_lockcnt_inc can also contribute to 236 * AB-BA deadlocks. This is a sample deadlock scenario: 237 * 238 * thread 1 thread 2 239 * ------------------------------------------------------- 240 * qemu_lockcnt_lock(&lc1); 241 * qemu_lockcnt_lock(&lc2); 242 * qemu_lockcnt_inc(&lc2); 243 * qemu_lockcnt_inc(&lc1); 244 */ 245 void qemu_lockcnt_inc(QemuLockCnt *lockcnt); 246 247 /** 248 * qemu_lockcnt_dec: decrement a QemuLockCnt's counter 249 * @lockcnt: the lockcnt to operate on 250 */ 251 void qemu_lockcnt_dec(QemuLockCnt *lockcnt); 252 253 /** 254 * qemu_lockcnt_dec_and_lock: decrement a QemuLockCnt's counter and 255 * possibly lock it. 256 * @lockcnt: the lockcnt to operate on 257 * 258 * Decrement lockcnt's count. If the new count is zero, lock 259 * the mutex and return true. Otherwise, return false. 260 */ 261 bool qemu_lockcnt_dec_and_lock(QemuLockCnt *lockcnt); 262 263 /** 264 * qemu_lockcnt_dec_if_lock: possibly decrement a QemuLockCnt's counter and 265 * lock it. 266 * @lockcnt: the lockcnt to operate on 267 * 268 * If the count is 1, decrement the count to zero, lock 269 * the mutex and return true. Otherwise, return false. 270 */ 271 bool qemu_lockcnt_dec_if_lock(QemuLockCnt *lockcnt); 272 273 /** 274 * qemu_lockcnt_lock: lock a QemuLockCnt's mutex. 275 * @lockcnt: the lockcnt to operate on 276 * 277 * Remember that concurrent visits are not blocked unless the count is 278 * also zero. You can use qemu_lockcnt_count to check for this inside a 279 * critical section. 280 */ 281 void qemu_lockcnt_lock(QemuLockCnt *lockcnt); 282 283 /** 284 * qemu_lockcnt_unlock: release a QemuLockCnt's mutex. 285 * @lockcnt: the lockcnt to operate on. 286 */ 287 void qemu_lockcnt_unlock(QemuLockCnt *lockcnt); 288 289 /** 290 * qemu_lockcnt_inc_and_unlock: combined unlock/increment on a QemuLockCnt. 291 * @lockcnt: the lockcnt to operate on. 292 * 293 * This is the same as 294 * 295 * qemu_lockcnt_unlock(lockcnt); 296 * qemu_lockcnt_inc(lockcnt); 297 * 298 * but more efficient. 299 */ 300 void qemu_lockcnt_inc_and_unlock(QemuLockCnt *lockcnt); 301 302 /** 303 * qemu_lockcnt_count: query a LockCnt's count. 304 * @lockcnt: the lockcnt to query. 305 * 306 * Note that the count can change at any time. Still, while the 307 * lockcnt is locked, one can usefully check whether the count 308 * is non-zero. 309 */ 310 unsigned qemu_lockcnt_count(QemuLockCnt *lockcnt); 311 312 #endif 313