xref: /openbmc/qemu/util/qemu-thread-posix.c (revision e4ec5ad4)
1 /*
2  * Wrappers around mutex/cond/thread functions
3  *
4  * Copyright Red Hat, Inc. 2009
5  *
6  * Author:
7  *  Marcelo Tosatti <mtosatti@redhat.com>
8  *
9  * This work is licensed under the terms of the GNU GPL, version 2 or later.
10  * See the COPYING file in the top-level directory.
11  *
12  */
13 #include "qemu/osdep.h"
14 #include "qemu/thread.h"
15 #include "qemu/atomic.h"
16 #include "qemu/notify.h"
17 #include "qemu-thread-common.h"
18 
19 static bool name_threads;
20 
21 void qemu_thread_naming(bool enable)
22 {
23     name_threads = enable;
24 
25 #ifndef CONFIG_THREAD_SETNAME_BYTHREAD
26     /* This is a debugging option, not fatal */
27     if (enable) {
28         fprintf(stderr, "qemu: thread naming not supported on this host\n");
29     }
30 #endif
31 }
32 
33 static void error_exit(int err, const char *msg)
34 {
35     fprintf(stderr, "qemu: %s: %s\n", msg, strerror(err));
36     abort();
37 }
38 
39 static void compute_abs_deadline(struct timespec *ts, int ms)
40 {
41     struct timeval tv;
42     gettimeofday(&tv, NULL);
43     ts->tv_nsec = tv.tv_usec * 1000 + (ms % 1000) * 1000000;
44     ts->tv_sec = tv.tv_sec + ms / 1000;
45     if (ts->tv_nsec >= 1000000000) {
46         ts->tv_sec++;
47         ts->tv_nsec -= 1000000000;
48     }
49 }
50 
51 void qemu_mutex_init(QemuMutex *mutex)
52 {
53     int err;
54 
55     err = pthread_mutex_init(&mutex->lock, NULL);
56     if (err)
57         error_exit(err, __func__);
58     qemu_mutex_post_init(mutex);
59 }
60 
61 void qemu_mutex_destroy(QemuMutex *mutex)
62 {
63     int err;
64 
65     assert(mutex->initialized);
66     mutex->initialized = false;
67     err = pthread_mutex_destroy(&mutex->lock);
68     if (err)
69         error_exit(err, __func__);
70 }
71 
72 void qemu_mutex_lock_impl(QemuMutex *mutex, const char *file, const int line)
73 {
74     int err;
75 
76     assert(mutex->initialized);
77     qemu_mutex_pre_lock(mutex, file, line);
78     err = pthread_mutex_lock(&mutex->lock);
79     if (err)
80         error_exit(err, __func__);
81     qemu_mutex_post_lock(mutex, file, line);
82 }
83 
84 int qemu_mutex_trylock_impl(QemuMutex *mutex, const char *file, const int line)
85 {
86     int err;
87 
88     assert(mutex->initialized);
89     err = pthread_mutex_trylock(&mutex->lock);
90     if (err == 0) {
91         qemu_mutex_post_lock(mutex, file, line);
92         return 0;
93     }
94     if (err != EBUSY) {
95         error_exit(err, __func__);
96     }
97     return -EBUSY;
98 }
99 
100 void qemu_mutex_unlock_impl(QemuMutex *mutex, const char *file, const int line)
101 {
102     int err;
103 
104     assert(mutex->initialized);
105     qemu_mutex_pre_unlock(mutex, file, line);
106     err = pthread_mutex_unlock(&mutex->lock);
107     if (err)
108         error_exit(err, __func__);
109 }
110 
111 void qemu_rec_mutex_init(QemuRecMutex *mutex)
112 {
113     int err;
114     pthread_mutexattr_t attr;
115 
116     pthread_mutexattr_init(&attr);
117     pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE);
118     err = pthread_mutex_init(&mutex->lock, &attr);
119     pthread_mutexattr_destroy(&attr);
120     if (err) {
121         error_exit(err, __func__);
122     }
123     mutex->initialized = true;
124 }
125 
126 void qemu_cond_init(QemuCond *cond)
127 {
128     int err;
129 
130     err = pthread_cond_init(&cond->cond, NULL);
131     if (err)
132         error_exit(err, __func__);
133     cond->initialized = true;
134 }
135 
136 void qemu_cond_destroy(QemuCond *cond)
137 {
138     int err;
139 
140     assert(cond->initialized);
141     cond->initialized = false;
142     err = pthread_cond_destroy(&cond->cond);
143     if (err)
144         error_exit(err, __func__);
145 }
146 
147 void qemu_cond_signal(QemuCond *cond)
148 {
149     int err;
150 
151     assert(cond->initialized);
152     err = pthread_cond_signal(&cond->cond);
153     if (err)
154         error_exit(err, __func__);
155 }
156 
157 void qemu_cond_broadcast(QemuCond *cond)
158 {
159     int err;
160 
161     assert(cond->initialized);
162     err = pthread_cond_broadcast(&cond->cond);
163     if (err)
164         error_exit(err, __func__);
165 }
166 
167 void qemu_cond_wait_impl(QemuCond *cond, QemuMutex *mutex, const char *file, const int line)
168 {
169     int err;
170 
171     assert(cond->initialized);
172     qemu_mutex_pre_unlock(mutex, file, line);
173     err = pthread_cond_wait(&cond->cond, &mutex->lock);
174     qemu_mutex_post_lock(mutex, file, line);
175     if (err)
176         error_exit(err, __func__);
177 }
178 
179 bool qemu_cond_timedwait_impl(QemuCond *cond, QemuMutex *mutex, int ms,
180                               const char *file, const int line)
181 {
182     int err;
183     struct timespec ts;
184 
185     assert(cond->initialized);
186     trace_qemu_mutex_unlock(mutex, file, line);
187     compute_abs_deadline(&ts, ms);
188     err = pthread_cond_timedwait(&cond->cond, &mutex->lock, &ts);
189     trace_qemu_mutex_locked(mutex, file, line);
190     if (err && err != ETIMEDOUT) {
191         error_exit(err, __func__);
192     }
193     return err != ETIMEDOUT;
194 }
195 
196 void qemu_sem_init(QemuSemaphore *sem, int init)
197 {
198     int rc;
199 
200 #ifndef CONFIG_SEM_TIMEDWAIT
201     rc = pthread_mutex_init(&sem->lock, NULL);
202     if (rc != 0) {
203         error_exit(rc, __func__);
204     }
205     rc = pthread_cond_init(&sem->cond, NULL);
206     if (rc != 0) {
207         error_exit(rc, __func__);
208     }
209     if (init < 0) {
210         error_exit(EINVAL, __func__);
211     }
212     sem->count = init;
213 #else
214     rc = sem_init(&sem->sem, 0, init);
215     if (rc < 0) {
216         error_exit(errno, __func__);
217     }
218 #endif
219     sem->initialized = true;
220 }
221 
222 void qemu_sem_destroy(QemuSemaphore *sem)
223 {
224     int rc;
225 
226     assert(sem->initialized);
227     sem->initialized = false;
228 #ifndef CONFIG_SEM_TIMEDWAIT
229     rc = pthread_cond_destroy(&sem->cond);
230     if (rc < 0) {
231         error_exit(rc, __func__);
232     }
233     rc = pthread_mutex_destroy(&sem->lock);
234     if (rc < 0) {
235         error_exit(rc, __func__);
236     }
237 #else
238     rc = sem_destroy(&sem->sem);
239     if (rc < 0) {
240         error_exit(errno, __func__);
241     }
242 #endif
243 }
244 
245 void qemu_sem_post(QemuSemaphore *sem)
246 {
247     int rc;
248 
249     assert(sem->initialized);
250 #ifndef CONFIG_SEM_TIMEDWAIT
251     pthread_mutex_lock(&sem->lock);
252     if (sem->count == UINT_MAX) {
253         rc = EINVAL;
254     } else {
255         sem->count++;
256         rc = pthread_cond_signal(&sem->cond);
257     }
258     pthread_mutex_unlock(&sem->lock);
259     if (rc != 0) {
260         error_exit(rc, __func__);
261     }
262 #else
263     rc = sem_post(&sem->sem);
264     if (rc < 0) {
265         error_exit(errno, __func__);
266     }
267 #endif
268 }
269 
270 int qemu_sem_timedwait(QemuSemaphore *sem, int ms)
271 {
272     int rc;
273     struct timespec ts;
274 
275     assert(sem->initialized);
276 #ifndef CONFIG_SEM_TIMEDWAIT
277     rc = 0;
278     compute_abs_deadline(&ts, ms);
279     pthread_mutex_lock(&sem->lock);
280     while (sem->count == 0) {
281         rc = pthread_cond_timedwait(&sem->cond, &sem->lock, &ts);
282         if (rc == ETIMEDOUT) {
283             break;
284         }
285         if (rc != 0) {
286             error_exit(rc, __func__);
287         }
288     }
289     if (rc != ETIMEDOUT) {
290         --sem->count;
291     }
292     pthread_mutex_unlock(&sem->lock);
293     return (rc == ETIMEDOUT ? -1 : 0);
294 #else
295     if (ms <= 0) {
296         /* This is cheaper than sem_timedwait.  */
297         do {
298             rc = sem_trywait(&sem->sem);
299         } while (rc == -1 && errno == EINTR);
300         if (rc == -1 && errno == EAGAIN) {
301             return -1;
302         }
303     } else {
304         compute_abs_deadline(&ts, ms);
305         do {
306             rc = sem_timedwait(&sem->sem, &ts);
307         } while (rc == -1 && errno == EINTR);
308         if (rc == -1 && errno == ETIMEDOUT) {
309             return -1;
310         }
311     }
312     if (rc < 0) {
313         error_exit(errno, __func__);
314     }
315     return 0;
316 #endif
317 }
318 
319 void qemu_sem_wait(QemuSemaphore *sem)
320 {
321     int rc;
322 
323     assert(sem->initialized);
324 #ifndef CONFIG_SEM_TIMEDWAIT
325     pthread_mutex_lock(&sem->lock);
326     while (sem->count == 0) {
327         rc = pthread_cond_wait(&sem->cond, &sem->lock);
328         if (rc != 0) {
329             error_exit(rc, __func__);
330         }
331     }
332     --sem->count;
333     pthread_mutex_unlock(&sem->lock);
334 #else
335     do {
336         rc = sem_wait(&sem->sem);
337     } while (rc == -1 && errno == EINTR);
338     if (rc < 0) {
339         error_exit(errno, __func__);
340     }
341 #endif
342 }
343 
344 #ifdef __linux__
345 #include "qemu/futex.h"
346 #else
347 static inline void qemu_futex_wake(QemuEvent *ev, int n)
348 {
349     assert(ev->initialized);
350     pthread_mutex_lock(&ev->lock);
351     if (n == 1) {
352         pthread_cond_signal(&ev->cond);
353     } else {
354         pthread_cond_broadcast(&ev->cond);
355     }
356     pthread_mutex_unlock(&ev->lock);
357 }
358 
359 static inline void qemu_futex_wait(QemuEvent *ev, unsigned val)
360 {
361     assert(ev->initialized);
362     pthread_mutex_lock(&ev->lock);
363     if (ev->value == val) {
364         pthread_cond_wait(&ev->cond, &ev->lock);
365     }
366     pthread_mutex_unlock(&ev->lock);
367 }
368 #endif
369 
370 /* Valid transitions:
371  * - free->set, when setting the event
372  * - busy->set, when setting the event, followed by qemu_futex_wake
373  * - set->free, when resetting the event
374  * - free->busy, when waiting
375  *
376  * set->busy does not happen (it can be observed from the outside but
377  * it really is set->free->busy).
378  *
379  * busy->free provably cannot happen; to enforce it, the set->free transition
380  * is done with an OR, which becomes a no-op if the event has concurrently
381  * transitioned to free or busy.
382  */
383 
384 #define EV_SET         0
385 #define EV_FREE        1
386 #define EV_BUSY       -1
387 
388 void qemu_event_init(QemuEvent *ev, bool init)
389 {
390 #ifndef __linux__
391     pthread_mutex_init(&ev->lock, NULL);
392     pthread_cond_init(&ev->cond, NULL);
393 #endif
394 
395     ev->value = (init ? EV_SET : EV_FREE);
396     ev->initialized = true;
397 }
398 
399 void qemu_event_destroy(QemuEvent *ev)
400 {
401     assert(ev->initialized);
402     ev->initialized = false;
403 #ifndef __linux__
404     pthread_mutex_destroy(&ev->lock);
405     pthread_cond_destroy(&ev->cond);
406 #endif
407 }
408 
409 void qemu_event_set(QemuEvent *ev)
410 {
411     /* qemu_event_set has release semantics, but because it *loads*
412      * ev->value we need a full memory barrier here.
413      */
414     assert(ev->initialized);
415     smp_mb();
416     if (atomic_read(&ev->value) != EV_SET) {
417         if (atomic_xchg(&ev->value, EV_SET) == EV_BUSY) {
418             /* There were waiters, wake them up.  */
419             qemu_futex_wake(ev, INT_MAX);
420         }
421     }
422 }
423 
424 void qemu_event_reset(QemuEvent *ev)
425 {
426     unsigned value;
427 
428     assert(ev->initialized);
429     value = atomic_read(&ev->value);
430     smp_mb_acquire();
431     if (value == EV_SET) {
432         /*
433          * If there was a concurrent reset (or even reset+wait),
434          * do nothing.  Otherwise change EV_SET->EV_FREE.
435          */
436         atomic_or(&ev->value, EV_FREE);
437     }
438 }
439 
440 void qemu_event_wait(QemuEvent *ev)
441 {
442     unsigned value;
443 
444     assert(ev->initialized);
445     value = atomic_read(&ev->value);
446     smp_mb_acquire();
447     if (value != EV_SET) {
448         if (value == EV_FREE) {
449             /*
450              * Leave the event reset and tell qemu_event_set that there
451              * are waiters.  No need to retry, because there cannot be
452              * a concurrent busy->free transition.  After the CAS, the
453              * event will be either set or busy.
454              */
455             if (atomic_cmpxchg(&ev->value, EV_FREE, EV_BUSY) == EV_SET) {
456                 return;
457             }
458         }
459         qemu_futex_wait(ev, EV_BUSY);
460     }
461 }
462 
463 static __thread NotifierList thread_exit;
464 
465 /*
466  * Note that in this implementation you can register a thread-exit
467  * notifier for the main thread, but it will never be called.
468  * This is OK because main thread exit can only happen when the
469  * entire process is exiting, and the API allows notifiers to not
470  * be called on process exit.
471  */
472 void qemu_thread_atexit_add(Notifier *notifier)
473 {
474     notifier_list_add(&thread_exit, notifier);
475 }
476 
477 void qemu_thread_atexit_remove(Notifier *notifier)
478 {
479     notifier_remove(notifier);
480 }
481 
482 static void qemu_thread_atexit_notify(void *arg)
483 {
484     /*
485      * Called when non-main thread exits (via qemu_thread_exit()
486      * or by returning from its start routine.)
487      */
488     notifier_list_notify(&thread_exit, NULL);
489 }
490 
491 typedef struct {
492     void *(*start_routine)(void *);
493     void *arg;
494     char *name;
495 } QemuThreadArgs;
496 
497 static void *qemu_thread_start(void *args)
498 {
499     QemuThreadArgs *qemu_thread_args = args;
500     void *(*start_routine)(void *) = qemu_thread_args->start_routine;
501     void *arg = qemu_thread_args->arg;
502     void *r;
503 
504 #ifdef CONFIG_THREAD_SETNAME_BYTHREAD
505     /* Attempt to set the threads name; note that this is for debug, so
506      * we're not going to fail if we can't set it.
507      */
508     if (name_threads && qemu_thread_args->name) {
509 # if defined(CONFIG_PTHREAD_SETNAME_NP_W_TID)
510         pthread_setname_np(pthread_self(), qemu_thread_args->name);
511 # elif defined(CONFIG_PTHREAD_SETNAME_NP_WO_TID)
512         pthread_setname_np(qemu_thread_args->name);
513 # endif
514     }
515 #endif
516     g_free(qemu_thread_args->name);
517     g_free(qemu_thread_args);
518     pthread_cleanup_push(qemu_thread_atexit_notify, NULL);
519     r = start_routine(arg);
520     pthread_cleanup_pop(1);
521     return r;
522 }
523 
524 void qemu_thread_create(QemuThread *thread, const char *name,
525                        void *(*start_routine)(void*),
526                        void *arg, int mode)
527 {
528     sigset_t set, oldset;
529     int err;
530     pthread_attr_t attr;
531     QemuThreadArgs *qemu_thread_args;
532 
533     err = pthread_attr_init(&attr);
534     if (err) {
535         error_exit(err, __func__);
536     }
537 
538     if (mode == QEMU_THREAD_DETACHED) {
539         pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
540     }
541 
542     /* Leave signal handling to the iothread.  */
543     sigfillset(&set);
544     /* Blocking the signals can result in undefined behaviour. */
545     sigdelset(&set, SIGSEGV);
546     sigdelset(&set, SIGFPE);
547     sigdelset(&set, SIGILL);
548     /* TODO avoid SIGBUS loss on macOS */
549     pthread_sigmask(SIG_SETMASK, &set, &oldset);
550 
551     qemu_thread_args = g_new0(QemuThreadArgs, 1);
552     qemu_thread_args->name = g_strdup(name);
553     qemu_thread_args->start_routine = start_routine;
554     qemu_thread_args->arg = arg;
555 
556     err = pthread_create(&thread->thread, &attr,
557                          qemu_thread_start, qemu_thread_args);
558 
559     if (err)
560         error_exit(err, __func__);
561 
562     pthread_sigmask(SIG_SETMASK, &oldset, NULL);
563 
564     pthread_attr_destroy(&attr);
565 }
566 
567 void qemu_thread_get_self(QemuThread *thread)
568 {
569     thread->thread = pthread_self();
570 }
571 
572 bool qemu_thread_is_self(QemuThread *thread)
573 {
574    return pthread_equal(pthread_self(), thread->thread);
575 }
576 
577 void qemu_thread_exit(void *retval)
578 {
579     pthread_exit(retval);
580 }
581 
582 void *qemu_thread_join(QemuThread *thread)
583 {
584     int err;
585     void *ret;
586 
587     err = pthread_join(thread->thread, &ret);
588     if (err) {
589         error_exit(err, __func__);
590     }
591     return ret;
592 }
593