xref: /openbmc/qemu/util/qemu-thread-posix.c (revision cae41fda)
1 /*
2  * Wrappers around mutex/cond/thread functions
3  *
4  * Copyright Red Hat, Inc. 2009
5  *
6  * Author:
7  *  Marcelo Tosatti <mtosatti@redhat.com>
8  *
9  * This work is licensed under the terms of the GNU GPL, version 2 or later.
10  * See the COPYING file in the top-level directory.
11  *
12  */
13 #include "qemu/osdep.h"
14 #ifdef __linux__
15 #include <sys/syscall.h>
16 #include <linux/futex.h>
17 #endif
18 #include "qemu/thread.h"
19 #include "qemu/atomic.h"
20 #include "qemu/notify.h"
21 
22 static bool name_threads;
23 
24 void qemu_thread_naming(bool enable)
25 {
26     name_threads = enable;
27 
28 #ifndef CONFIG_THREAD_SETNAME_BYTHREAD
29     /* This is a debugging option, not fatal */
30     if (enable) {
31         fprintf(stderr, "qemu: thread naming not supported on this host\n");
32     }
33 #endif
34 }
35 
36 static void error_exit(int err, const char *msg)
37 {
38     fprintf(stderr, "qemu: %s: %s\n", msg, strerror(err));
39     abort();
40 }
41 
42 void qemu_mutex_init(QemuMutex *mutex)
43 {
44     int err;
45 
46     err = pthread_mutex_init(&mutex->lock, NULL);
47     if (err)
48         error_exit(err, __func__);
49 }
50 
51 void qemu_mutex_destroy(QemuMutex *mutex)
52 {
53     int err;
54 
55     err = pthread_mutex_destroy(&mutex->lock);
56     if (err)
57         error_exit(err, __func__);
58 }
59 
60 void qemu_mutex_lock(QemuMutex *mutex)
61 {
62     int err;
63 
64     err = pthread_mutex_lock(&mutex->lock);
65     if (err)
66         error_exit(err, __func__);
67 }
68 
69 int qemu_mutex_trylock(QemuMutex *mutex)
70 {
71     return pthread_mutex_trylock(&mutex->lock);
72 }
73 
74 void qemu_mutex_unlock(QemuMutex *mutex)
75 {
76     int err;
77 
78     err = pthread_mutex_unlock(&mutex->lock);
79     if (err)
80         error_exit(err, __func__);
81 }
82 
83 void qemu_rec_mutex_init(QemuRecMutex *mutex)
84 {
85     int err;
86     pthread_mutexattr_t attr;
87 
88     pthread_mutexattr_init(&attr);
89     pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE);
90     err = pthread_mutex_init(&mutex->lock, &attr);
91     pthread_mutexattr_destroy(&attr);
92     if (err) {
93         error_exit(err, __func__);
94     }
95 }
96 
97 void qemu_cond_init(QemuCond *cond)
98 {
99     int err;
100 
101     err = pthread_cond_init(&cond->cond, NULL);
102     if (err)
103         error_exit(err, __func__);
104 }
105 
106 void qemu_cond_destroy(QemuCond *cond)
107 {
108     int err;
109 
110     err = pthread_cond_destroy(&cond->cond);
111     if (err)
112         error_exit(err, __func__);
113 }
114 
115 void qemu_cond_signal(QemuCond *cond)
116 {
117     int err;
118 
119     err = pthread_cond_signal(&cond->cond);
120     if (err)
121         error_exit(err, __func__);
122 }
123 
124 void qemu_cond_broadcast(QemuCond *cond)
125 {
126     int err;
127 
128     err = pthread_cond_broadcast(&cond->cond);
129     if (err)
130         error_exit(err, __func__);
131 }
132 
133 void qemu_cond_wait(QemuCond *cond, QemuMutex *mutex)
134 {
135     int err;
136 
137     err = pthread_cond_wait(&cond->cond, &mutex->lock);
138     if (err)
139         error_exit(err, __func__);
140 }
141 
142 void qemu_sem_init(QemuSemaphore *sem, int init)
143 {
144     int rc;
145 
146 #if defined(__APPLE__) || defined(__NetBSD__)
147     rc = pthread_mutex_init(&sem->lock, NULL);
148     if (rc != 0) {
149         error_exit(rc, __func__);
150     }
151     rc = pthread_cond_init(&sem->cond, NULL);
152     if (rc != 0) {
153         error_exit(rc, __func__);
154     }
155     if (init < 0) {
156         error_exit(EINVAL, __func__);
157     }
158     sem->count = init;
159 #else
160     rc = sem_init(&sem->sem, 0, init);
161     if (rc < 0) {
162         error_exit(errno, __func__);
163     }
164 #endif
165 }
166 
167 void qemu_sem_destroy(QemuSemaphore *sem)
168 {
169     int rc;
170 
171 #if defined(__APPLE__) || defined(__NetBSD__)
172     rc = pthread_cond_destroy(&sem->cond);
173     if (rc < 0) {
174         error_exit(rc, __func__);
175     }
176     rc = pthread_mutex_destroy(&sem->lock);
177     if (rc < 0) {
178         error_exit(rc, __func__);
179     }
180 #else
181     rc = sem_destroy(&sem->sem);
182     if (rc < 0) {
183         error_exit(errno, __func__);
184     }
185 #endif
186 }
187 
188 void qemu_sem_post(QemuSemaphore *sem)
189 {
190     int rc;
191 
192 #if defined(__APPLE__) || defined(__NetBSD__)
193     pthread_mutex_lock(&sem->lock);
194     if (sem->count == UINT_MAX) {
195         rc = EINVAL;
196     } else {
197         sem->count++;
198         rc = pthread_cond_signal(&sem->cond);
199     }
200     pthread_mutex_unlock(&sem->lock);
201     if (rc != 0) {
202         error_exit(rc, __func__);
203     }
204 #else
205     rc = sem_post(&sem->sem);
206     if (rc < 0) {
207         error_exit(errno, __func__);
208     }
209 #endif
210 }
211 
212 static void compute_abs_deadline(struct timespec *ts, int ms)
213 {
214     struct timeval tv;
215     gettimeofday(&tv, NULL);
216     ts->tv_nsec = tv.tv_usec * 1000 + (ms % 1000) * 1000000;
217     ts->tv_sec = tv.tv_sec + ms / 1000;
218     if (ts->tv_nsec >= 1000000000) {
219         ts->tv_sec++;
220         ts->tv_nsec -= 1000000000;
221     }
222 }
223 
224 int qemu_sem_timedwait(QemuSemaphore *sem, int ms)
225 {
226     int rc;
227     struct timespec ts;
228 
229 #if defined(__APPLE__) || defined(__NetBSD__)
230     rc = 0;
231     compute_abs_deadline(&ts, ms);
232     pthread_mutex_lock(&sem->lock);
233     while (sem->count == 0) {
234         rc = pthread_cond_timedwait(&sem->cond, &sem->lock, &ts);
235         if (rc == ETIMEDOUT) {
236             break;
237         }
238         if (rc != 0) {
239             error_exit(rc, __func__);
240         }
241     }
242     if (rc != ETIMEDOUT) {
243         --sem->count;
244     }
245     pthread_mutex_unlock(&sem->lock);
246     return (rc == ETIMEDOUT ? -1 : 0);
247 #else
248     if (ms <= 0) {
249         /* This is cheaper than sem_timedwait.  */
250         do {
251             rc = sem_trywait(&sem->sem);
252         } while (rc == -1 && errno == EINTR);
253         if (rc == -1 && errno == EAGAIN) {
254             return -1;
255         }
256     } else {
257         compute_abs_deadline(&ts, ms);
258         do {
259             rc = sem_timedwait(&sem->sem, &ts);
260         } while (rc == -1 && errno == EINTR);
261         if (rc == -1 && errno == ETIMEDOUT) {
262             return -1;
263         }
264     }
265     if (rc < 0) {
266         error_exit(errno, __func__);
267     }
268     return 0;
269 #endif
270 }
271 
272 void qemu_sem_wait(QemuSemaphore *sem)
273 {
274     int rc;
275 
276 #if defined(__APPLE__) || defined(__NetBSD__)
277     pthread_mutex_lock(&sem->lock);
278     while (sem->count == 0) {
279         rc = pthread_cond_wait(&sem->cond, &sem->lock);
280         if (rc != 0) {
281             error_exit(rc, __func__);
282         }
283     }
284     --sem->count;
285     pthread_mutex_unlock(&sem->lock);
286 #else
287     do {
288         rc = sem_wait(&sem->sem);
289     } while (rc == -1 && errno == EINTR);
290     if (rc < 0) {
291         error_exit(errno, __func__);
292     }
293 #endif
294 }
295 
296 #ifdef __linux__
297 #define futex(...)              syscall(__NR_futex, __VA_ARGS__)
298 
299 static inline void futex_wake(QemuEvent *ev, int n)
300 {
301     futex(ev, FUTEX_WAKE, n, NULL, NULL, 0);
302 }
303 
304 static inline void futex_wait(QemuEvent *ev, unsigned val)
305 {
306     while (futex(ev, FUTEX_WAIT, (int) val, NULL, NULL, 0)) {
307         switch (errno) {
308         case EWOULDBLOCK:
309             return;
310         case EINTR:
311             break; /* get out of switch and retry */
312         default:
313             abort();
314         }
315     }
316 }
317 #else
318 static inline void futex_wake(QemuEvent *ev, int n)
319 {
320     pthread_mutex_lock(&ev->lock);
321     if (n == 1) {
322         pthread_cond_signal(&ev->cond);
323     } else {
324         pthread_cond_broadcast(&ev->cond);
325     }
326     pthread_mutex_unlock(&ev->lock);
327 }
328 
329 static inline void futex_wait(QemuEvent *ev, unsigned val)
330 {
331     pthread_mutex_lock(&ev->lock);
332     if (ev->value == val) {
333         pthread_cond_wait(&ev->cond, &ev->lock);
334     }
335     pthread_mutex_unlock(&ev->lock);
336 }
337 #endif
338 
339 /* Valid transitions:
340  * - free->set, when setting the event
341  * - busy->set, when setting the event, followed by futex_wake
342  * - set->free, when resetting the event
343  * - free->busy, when waiting
344  *
345  * set->busy does not happen (it can be observed from the outside but
346  * it really is set->free->busy).
347  *
348  * busy->free provably cannot happen; to enforce it, the set->free transition
349  * is done with an OR, which becomes a no-op if the event has concurrently
350  * transitioned to free or busy.
351  */
352 
353 #define EV_SET         0
354 #define EV_FREE        1
355 #define EV_BUSY       -1
356 
357 void qemu_event_init(QemuEvent *ev, bool init)
358 {
359 #ifndef __linux__
360     pthread_mutex_init(&ev->lock, NULL);
361     pthread_cond_init(&ev->cond, NULL);
362 #endif
363 
364     ev->value = (init ? EV_SET : EV_FREE);
365 }
366 
367 void qemu_event_destroy(QemuEvent *ev)
368 {
369 #ifndef __linux__
370     pthread_mutex_destroy(&ev->lock);
371     pthread_cond_destroy(&ev->cond);
372 #endif
373 }
374 
375 void qemu_event_set(QemuEvent *ev)
376 {
377     /* qemu_event_set has release semantics, but because it *loads*
378      * ev->value we need a full memory barrier here.
379      */
380     smp_mb();
381     if (atomic_read(&ev->value) != EV_SET) {
382         if (atomic_xchg(&ev->value, EV_SET) == EV_BUSY) {
383             /* There were waiters, wake them up.  */
384             futex_wake(ev, INT_MAX);
385         }
386     }
387 }
388 
389 void qemu_event_reset(QemuEvent *ev)
390 {
391     unsigned value;
392 
393     value = atomic_read(&ev->value);
394     smp_mb_acquire();
395     if (value == EV_SET) {
396         /*
397          * If there was a concurrent reset (or even reset+wait),
398          * do nothing.  Otherwise change EV_SET->EV_FREE.
399          */
400         atomic_or(&ev->value, EV_FREE);
401     }
402 }
403 
404 void qemu_event_wait(QemuEvent *ev)
405 {
406     unsigned value;
407 
408     value = atomic_read(&ev->value);
409     smp_mb_acquire();
410     if (value != EV_SET) {
411         if (value == EV_FREE) {
412             /*
413              * Leave the event reset and tell qemu_event_set that there
414              * are waiters.  No need to retry, because there cannot be
415              * a concurrent busy->free transition.  After the CAS, the
416              * event will be either set or busy.
417              */
418             if (atomic_cmpxchg(&ev->value, EV_FREE, EV_BUSY) == EV_SET) {
419                 return;
420             }
421         }
422         futex_wait(ev, EV_BUSY);
423     }
424 }
425 
426 static pthread_key_t exit_key;
427 
428 union NotifierThreadData {
429     void *ptr;
430     NotifierList list;
431 };
432 QEMU_BUILD_BUG_ON(sizeof(union NotifierThreadData) != sizeof(void *));
433 
434 void qemu_thread_atexit_add(Notifier *notifier)
435 {
436     union NotifierThreadData ntd;
437     ntd.ptr = pthread_getspecific(exit_key);
438     notifier_list_add(&ntd.list, notifier);
439     pthread_setspecific(exit_key, ntd.ptr);
440 }
441 
442 void qemu_thread_atexit_remove(Notifier *notifier)
443 {
444     union NotifierThreadData ntd;
445     ntd.ptr = pthread_getspecific(exit_key);
446     notifier_remove(notifier);
447     pthread_setspecific(exit_key, ntd.ptr);
448 }
449 
450 static void qemu_thread_atexit_run(void *arg)
451 {
452     union NotifierThreadData ntd = { .ptr = arg };
453     notifier_list_notify(&ntd.list, NULL);
454 }
455 
456 static void __attribute__((constructor)) qemu_thread_atexit_init(void)
457 {
458     pthread_key_create(&exit_key, qemu_thread_atexit_run);
459 }
460 
461 
462 /* Attempt to set the threads name; note that this is for debug, so
463  * we're not going to fail if we can't set it.
464  */
465 static void qemu_thread_set_name(QemuThread *thread, const char *name)
466 {
467 #ifdef CONFIG_PTHREAD_SETNAME_NP
468     pthread_setname_np(thread->thread, name);
469 #endif
470 }
471 
472 void qemu_thread_create(QemuThread *thread, const char *name,
473                        void *(*start_routine)(void*),
474                        void *arg, int mode)
475 {
476     sigset_t set, oldset;
477     int err;
478     pthread_attr_t attr;
479 
480     err = pthread_attr_init(&attr);
481     if (err) {
482         error_exit(err, __func__);
483     }
484     if (mode == QEMU_THREAD_DETACHED) {
485         err = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
486         if (err) {
487             error_exit(err, __func__);
488         }
489     }
490 
491     /* Leave signal handling to the iothread.  */
492     sigfillset(&set);
493     pthread_sigmask(SIG_SETMASK, &set, &oldset);
494     err = pthread_create(&thread->thread, &attr, start_routine, arg);
495     if (err)
496         error_exit(err, __func__);
497 
498     if (name_threads) {
499         qemu_thread_set_name(thread, name);
500     }
501 
502     pthread_sigmask(SIG_SETMASK, &oldset, NULL);
503 
504     pthread_attr_destroy(&attr);
505 }
506 
507 void qemu_thread_get_self(QemuThread *thread)
508 {
509     thread->thread = pthread_self();
510 }
511 
512 bool qemu_thread_is_self(QemuThread *thread)
513 {
514    return pthread_equal(pthread_self(), thread->thread);
515 }
516 
517 void qemu_thread_exit(void *retval)
518 {
519     pthread_exit(retval);
520 }
521 
522 void *qemu_thread_join(QemuThread *thread)
523 {
524     int err;
525     void *ret;
526 
527     err = pthread_join(thread->thread, &ret);
528     if (err) {
529         error_exit(err, __func__);
530     }
531     return ret;
532 }
533