1 /* 2 * Wrappers around mutex/cond/thread functions 3 * 4 * Copyright Red Hat, Inc. 2009 5 * 6 * Author: 7 * Marcelo Tosatti <mtosatti@redhat.com> 8 * 9 * This work is licensed under the terms of the GNU GPL, version 2 or later. 10 * See the COPYING file in the top-level directory. 11 * 12 */ 13 #include "qemu/osdep.h" 14 #include "qemu/thread.h" 15 #include "qemu/atomic.h" 16 #include "qemu/notify.h" 17 18 static bool name_threads; 19 20 void qemu_thread_naming(bool enable) 21 { 22 name_threads = enable; 23 24 #ifndef CONFIG_THREAD_SETNAME_BYTHREAD 25 /* This is a debugging option, not fatal */ 26 if (enable) { 27 fprintf(stderr, "qemu: thread naming not supported on this host\n"); 28 } 29 #endif 30 } 31 32 static void error_exit(int err, const char *msg) 33 { 34 fprintf(stderr, "qemu: %s: %s\n", msg, strerror(err)); 35 abort(); 36 } 37 38 void qemu_mutex_init(QemuMutex *mutex) 39 { 40 int err; 41 42 err = pthread_mutex_init(&mutex->lock, NULL); 43 if (err) 44 error_exit(err, __func__); 45 } 46 47 void qemu_mutex_destroy(QemuMutex *mutex) 48 { 49 int err; 50 51 err = pthread_mutex_destroy(&mutex->lock); 52 if (err) 53 error_exit(err, __func__); 54 } 55 56 void qemu_mutex_lock(QemuMutex *mutex) 57 { 58 int err; 59 60 err = pthread_mutex_lock(&mutex->lock); 61 if (err) 62 error_exit(err, __func__); 63 } 64 65 int qemu_mutex_trylock(QemuMutex *mutex) 66 { 67 return pthread_mutex_trylock(&mutex->lock); 68 } 69 70 void qemu_mutex_unlock(QemuMutex *mutex) 71 { 72 int err; 73 74 err = pthread_mutex_unlock(&mutex->lock); 75 if (err) 76 error_exit(err, __func__); 77 } 78 79 void qemu_rec_mutex_init(QemuRecMutex *mutex) 80 { 81 int err; 82 pthread_mutexattr_t attr; 83 84 pthread_mutexattr_init(&attr); 85 pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE); 86 err = pthread_mutex_init(&mutex->lock, &attr); 87 pthread_mutexattr_destroy(&attr); 88 if (err) { 89 error_exit(err, __func__); 90 } 91 } 92 93 void qemu_cond_init(QemuCond *cond) 94 { 95 int err; 96 97 err = pthread_cond_init(&cond->cond, NULL); 98 if (err) 99 error_exit(err, __func__); 100 } 101 102 void qemu_cond_destroy(QemuCond *cond) 103 { 104 int err; 105 106 err = pthread_cond_destroy(&cond->cond); 107 if (err) 108 error_exit(err, __func__); 109 } 110 111 void qemu_cond_signal(QemuCond *cond) 112 { 113 int err; 114 115 err = pthread_cond_signal(&cond->cond); 116 if (err) 117 error_exit(err, __func__); 118 } 119 120 void qemu_cond_broadcast(QemuCond *cond) 121 { 122 int err; 123 124 err = pthread_cond_broadcast(&cond->cond); 125 if (err) 126 error_exit(err, __func__); 127 } 128 129 void qemu_cond_wait(QemuCond *cond, QemuMutex *mutex) 130 { 131 int err; 132 133 err = pthread_cond_wait(&cond->cond, &mutex->lock); 134 if (err) 135 error_exit(err, __func__); 136 } 137 138 void qemu_sem_init(QemuSemaphore *sem, int init) 139 { 140 int rc; 141 142 #if defined(__APPLE__) || defined(__NetBSD__) 143 rc = pthread_mutex_init(&sem->lock, NULL); 144 if (rc != 0) { 145 error_exit(rc, __func__); 146 } 147 rc = pthread_cond_init(&sem->cond, NULL); 148 if (rc != 0) { 149 error_exit(rc, __func__); 150 } 151 if (init < 0) { 152 error_exit(EINVAL, __func__); 153 } 154 sem->count = init; 155 #else 156 rc = sem_init(&sem->sem, 0, init); 157 if (rc < 0) { 158 error_exit(errno, __func__); 159 } 160 #endif 161 } 162 163 void qemu_sem_destroy(QemuSemaphore *sem) 164 { 165 int rc; 166 167 #if defined(__APPLE__) || defined(__NetBSD__) 168 rc = pthread_cond_destroy(&sem->cond); 169 if (rc < 0) { 170 error_exit(rc, __func__); 171 } 172 rc = pthread_mutex_destroy(&sem->lock); 173 if (rc < 0) { 174 error_exit(rc, __func__); 175 } 176 #else 177 rc = sem_destroy(&sem->sem); 178 if (rc < 0) { 179 error_exit(errno, __func__); 180 } 181 #endif 182 } 183 184 void qemu_sem_post(QemuSemaphore *sem) 185 { 186 int rc; 187 188 #if defined(__APPLE__) || defined(__NetBSD__) 189 pthread_mutex_lock(&sem->lock); 190 if (sem->count == UINT_MAX) { 191 rc = EINVAL; 192 } else { 193 sem->count++; 194 rc = pthread_cond_signal(&sem->cond); 195 } 196 pthread_mutex_unlock(&sem->lock); 197 if (rc != 0) { 198 error_exit(rc, __func__); 199 } 200 #else 201 rc = sem_post(&sem->sem); 202 if (rc < 0) { 203 error_exit(errno, __func__); 204 } 205 #endif 206 } 207 208 static void compute_abs_deadline(struct timespec *ts, int ms) 209 { 210 struct timeval tv; 211 gettimeofday(&tv, NULL); 212 ts->tv_nsec = tv.tv_usec * 1000 + (ms % 1000) * 1000000; 213 ts->tv_sec = tv.tv_sec + ms / 1000; 214 if (ts->tv_nsec >= 1000000000) { 215 ts->tv_sec++; 216 ts->tv_nsec -= 1000000000; 217 } 218 } 219 220 int qemu_sem_timedwait(QemuSemaphore *sem, int ms) 221 { 222 int rc; 223 struct timespec ts; 224 225 #if defined(__APPLE__) || defined(__NetBSD__) 226 rc = 0; 227 compute_abs_deadline(&ts, ms); 228 pthread_mutex_lock(&sem->lock); 229 while (sem->count == 0) { 230 rc = pthread_cond_timedwait(&sem->cond, &sem->lock, &ts); 231 if (rc == ETIMEDOUT) { 232 break; 233 } 234 if (rc != 0) { 235 error_exit(rc, __func__); 236 } 237 } 238 if (rc != ETIMEDOUT) { 239 --sem->count; 240 } 241 pthread_mutex_unlock(&sem->lock); 242 return (rc == ETIMEDOUT ? -1 : 0); 243 #else 244 if (ms <= 0) { 245 /* This is cheaper than sem_timedwait. */ 246 do { 247 rc = sem_trywait(&sem->sem); 248 } while (rc == -1 && errno == EINTR); 249 if (rc == -1 && errno == EAGAIN) { 250 return -1; 251 } 252 } else { 253 compute_abs_deadline(&ts, ms); 254 do { 255 rc = sem_timedwait(&sem->sem, &ts); 256 } while (rc == -1 && errno == EINTR); 257 if (rc == -1 && errno == ETIMEDOUT) { 258 return -1; 259 } 260 } 261 if (rc < 0) { 262 error_exit(errno, __func__); 263 } 264 return 0; 265 #endif 266 } 267 268 void qemu_sem_wait(QemuSemaphore *sem) 269 { 270 int rc; 271 272 #if defined(__APPLE__) || defined(__NetBSD__) 273 pthread_mutex_lock(&sem->lock); 274 while (sem->count == 0) { 275 rc = pthread_cond_wait(&sem->cond, &sem->lock); 276 if (rc != 0) { 277 error_exit(rc, __func__); 278 } 279 } 280 --sem->count; 281 pthread_mutex_unlock(&sem->lock); 282 #else 283 do { 284 rc = sem_wait(&sem->sem); 285 } while (rc == -1 && errno == EINTR); 286 if (rc < 0) { 287 error_exit(errno, __func__); 288 } 289 #endif 290 } 291 292 #ifdef __linux__ 293 #include "qemu/futex.h" 294 #else 295 static inline void qemu_futex_wake(QemuEvent *ev, int n) 296 { 297 pthread_mutex_lock(&ev->lock); 298 if (n == 1) { 299 pthread_cond_signal(&ev->cond); 300 } else { 301 pthread_cond_broadcast(&ev->cond); 302 } 303 pthread_mutex_unlock(&ev->lock); 304 } 305 306 static inline void qemu_futex_wait(QemuEvent *ev, unsigned val) 307 { 308 pthread_mutex_lock(&ev->lock); 309 if (ev->value == val) { 310 pthread_cond_wait(&ev->cond, &ev->lock); 311 } 312 pthread_mutex_unlock(&ev->lock); 313 } 314 #endif 315 316 /* Valid transitions: 317 * - free->set, when setting the event 318 * - busy->set, when setting the event, followed by qemu_futex_wake 319 * - set->free, when resetting the event 320 * - free->busy, when waiting 321 * 322 * set->busy does not happen (it can be observed from the outside but 323 * it really is set->free->busy). 324 * 325 * busy->free provably cannot happen; to enforce it, the set->free transition 326 * is done with an OR, which becomes a no-op if the event has concurrently 327 * transitioned to free or busy. 328 */ 329 330 #define EV_SET 0 331 #define EV_FREE 1 332 #define EV_BUSY -1 333 334 void qemu_event_init(QemuEvent *ev, bool init) 335 { 336 #ifndef __linux__ 337 pthread_mutex_init(&ev->lock, NULL); 338 pthread_cond_init(&ev->cond, NULL); 339 #endif 340 341 ev->value = (init ? EV_SET : EV_FREE); 342 } 343 344 void qemu_event_destroy(QemuEvent *ev) 345 { 346 #ifndef __linux__ 347 pthread_mutex_destroy(&ev->lock); 348 pthread_cond_destroy(&ev->cond); 349 #endif 350 } 351 352 void qemu_event_set(QemuEvent *ev) 353 { 354 /* qemu_event_set has release semantics, but because it *loads* 355 * ev->value we need a full memory barrier here. 356 */ 357 smp_mb(); 358 if (atomic_read(&ev->value) != EV_SET) { 359 if (atomic_xchg(&ev->value, EV_SET) == EV_BUSY) { 360 /* There were waiters, wake them up. */ 361 qemu_futex_wake(ev, INT_MAX); 362 } 363 } 364 } 365 366 void qemu_event_reset(QemuEvent *ev) 367 { 368 unsigned value; 369 370 value = atomic_read(&ev->value); 371 smp_mb_acquire(); 372 if (value == EV_SET) { 373 /* 374 * If there was a concurrent reset (or even reset+wait), 375 * do nothing. Otherwise change EV_SET->EV_FREE. 376 */ 377 atomic_or(&ev->value, EV_FREE); 378 } 379 } 380 381 void qemu_event_wait(QemuEvent *ev) 382 { 383 unsigned value; 384 385 value = atomic_read(&ev->value); 386 smp_mb_acquire(); 387 if (value != EV_SET) { 388 if (value == EV_FREE) { 389 /* 390 * Leave the event reset and tell qemu_event_set that there 391 * are waiters. No need to retry, because there cannot be 392 * a concurrent busy->free transition. After the CAS, the 393 * event will be either set or busy. 394 */ 395 if (atomic_cmpxchg(&ev->value, EV_FREE, EV_BUSY) == EV_SET) { 396 return; 397 } 398 } 399 qemu_futex_wait(ev, EV_BUSY); 400 } 401 } 402 403 static pthread_key_t exit_key; 404 405 union NotifierThreadData { 406 void *ptr; 407 NotifierList list; 408 }; 409 QEMU_BUILD_BUG_ON(sizeof(union NotifierThreadData) != sizeof(void *)); 410 411 void qemu_thread_atexit_add(Notifier *notifier) 412 { 413 union NotifierThreadData ntd; 414 ntd.ptr = pthread_getspecific(exit_key); 415 notifier_list_add(&ntd.list, notifier); 416 pthread_setspecific(exit_key, ntd.ptr); 417 } 418 419 void qemu_thread_atexit_remove(Notifier *notifier) 420 { 421 union NotifierThreadData ntd; 422 ntd.ptr = pthread_getspecific(exit_key); 423 notifier_remove(notifier); 424 pthread_setspecific(exit_key, ntd.ptr); 425 } 426 427 static void qemu_thread_atexit_run(void *arg) 428 { 429 union NotifierThreadData ntd = { .ptr = arg }; 430 notifier_list_notify(&ntd.list, NULL); 431 } 432 433 static void __attribute__((constructor)) qemu_thread_atexit_init(void) 434 { 435 pthread_key_create(&exit_key, qemu_thread_atexit_run); 436 } 437 438 439 /* Attempt to set the threads name; note that this is for debug, so 440 * we're not going to fail if we can't set it. 441 */ 442 static void qemu_thread_set_name(QemuThread *thread, const char *name) 443 { 444 #ifdef CONFIG_PTHREAD_SETNAME_NP 445 pthread_setname_np(thread->thread, name); 446 #endif 447 } 448 449 void qemu_thread_create(QemuThread *thread, const char *name, 450 void *(*start_routine)(void*), 451 void *arg, int mode) 452 { 453 sigset_t set, oldset; 454 int err; 455 pthread_attr_t attr; 456 457 err = pthread_attr_init(&attr); 458 if (err) { 459 error_exit(err, __func__); 460 } 461 462 /* Leave signal handling to the iothread. */ 463 sigfillset(&set); 464 pthread_sigmask(SIG_SETMASK, &set, &oldset); 465 err = pthread_create(&thread->thread, &attr, start_routine, arg); 466 if (err) 467 error_exit(err, __func__); 468 469 if (name_threads) { 470 qemu_thread_set_name(thread, name); 471 } 472 473 if (mode == QEMU_THREAD_DETACHED) { 474 err = pthread_detach(thread->thread); 475 if (err) { 476 error_exit(err, __func__); 477 } 478 } 479 pthread_sigmask(SIG_SETMASK, &oldset, NULL); 480 481 pthread_attr_destroy(&attr); 482 } 483 484 void qemu_thread_get_self(QemuThread *thread) 485 { 486 thread->thread = pthread_self(); 487 } 488 489 bool qemu_thread_is_self(QemuThread *thread) 490 { 491 return pthread_equal(pthread_self(), thread->thread); 492 } 493 494 void qemu_thread_exit(void *retval) 495 { 496 pthread_exit(retval); 497 } 498 499 void *qemu_thread_join(QemuThread *thread) 500 { 501 int err; 502 void *ret; 503 504 err = pthread_join(thread->thread, &ret); 505 if (err) { 506 error_exit(err, __func__); 507 } 508 return ret; 509 } 510