xref: /openbmc/qemu/util/qemu-thread-posix.c (revision 21f5826a)
1 /*
2  * Wrappers around mutex/cond/thread functions
3  *
4  * Copyright Red Hat, Inc. 2009
5  *
6  * Author:
7  *  Marcelo Tosatti <mtosatti@redhat.com>
8  *
9  * This work is licensed under the terms of the GNU GPL, version 2 or later.
10  * See the COPYING file in the top-level directory.
11  *
12  */
13 #include <stdlib.h>
14 #include <stdio.h>
15 #include <errno.h>
16 #include <time.h>
17 #include <signal.h>
18 #include <stdint.h>
19 #include <string.h>
20 #include <limits.h>
21 #include <unistd.h>
22 #include <sys/time.h>
23 #ifdef __linux__
24 #include <sys/syscall.h>
25 #include <linux/futex.h>
26 #endif
27 #include "qemu/thread.h"
28 #include "qemu/atomic.h"
29 #include "qemu/notify.h"
30 
31 static bool name_threads;
32 
33 void qemu_thread_naming(bool enable)
34 {
35     name_threads = enable;
36 
37 #ifndef CONFIG_THREAD_SETNAME_BYTHREAD
38     /* This is a debugging option, not fatal */
39     if (enable) {
40         fprintf(stderr, "qemu: thread naming not supported on this host\n");
41     }
42 #endif
43 }
44 
45 static void error_exit(int err, const char *msg)
46 {
47     fprintf(stderr, "qemu: %s: %s\n", msg, strerror(err));
48     abort();
49 }
50 
51 void qemu_mutex_init(QemuMutex *mutex)
52 {
53     int err;
54     pthread_mutexattr_t mutexattr;
55 
56     pthread_mutexattr_init(&mutexattr);
57     pthread_mutexattr_settype(&mutexattr, PTHREAD_MUTEX_ERRORCHECK);
58     err = pthread_mutex_init(&mutex->lock, &mutexattr);
59     pthread_mutexattr_destroy(&mutexattr);
60     if (err)
61         error_exit(err, __func__);
62 }
63 
64 void qemu_mutex_destroy(QemuMutex *mutex)
65 {
66     int err;
67 
68     err = pthread_mutex_destroy(&mutex->lock);
69     if (err)
70         error_exit(err, __func__);
71 }
72 
73 void qemu_mutex_lock(QemuMutex *mutex)
74 {
75     int err;
76 
77     err = pthread_mutex_lock(&mutex->lock);
78     if (err)
79         error_exit(err, __func__);
80 }
81 
82 int qemu_mutex_trylock(QemuMutex *mutex)
83 {
84     return pthread_mutex_trylock(&mutex->lock);
85 }
86 
87 void qemu_mutex_unlock(QemuMutex *mutex)
88 {
89     int err;
90 
91     err = pthread_mutex_unlock(&mutex->lock);
92     if (err)
93         error_exit(err, __func__);
94 }
95 
96 void qemu_cond_init(QemuCond *cond)
97 {
98     int err;
99 
100     err = pthread_cond_init(&cond->cond, NULL);
101     if (err)
102         error_exit(err, __func__);
103 }
104 
105 void qemu_cond_destroy(QemuCond *cond)
106 {
107     int err;
108 
109     err = pthread_cond_destroy(&cond->cond);
110     if (err)
111         error_exit(err, __func__);
112 }
113 
114 void qemu_cond_signal(QemuCond *cond)
115 {
116     int err;
117 
118     err = pthread_cond_signal(&cond->cond);
119     if (err)
120         error_exit(err, __func__);
121 }
122 
123 void qemu_cond_broadcast(QemuCond *cond)
124 {
125     int err;
126 
127     err = pthread_cond_broadcast(&cond->cond);
128     if (err)
129         error_exit(err, __func__);
130 }
131 
132 void qemu_cond_wait(QemuCond *cond, QemuMutex *mutex)
133 {
134     int err;
135 
136     err = pthread_cond_wait(&cond->cond, &mutex->lock);
137     if (err)
138         error_exit(err, __func__);
139 }
140 
141 void qemu_sem_init(QemuSemaphore *sem, int init)
142 {
143     int rc;
144 
145 #if defined(__APPLE__) || defined(__NetBSD__)
146     rc = pthread_mutex_init(&sem->lock, NULL);
147     if (rc != 0) {
148         error_exit(rc, __func__);
149     }
150     rc = pthread_cond_init(&sem->cond, NULL);
151     if (rc != 0) {
152         error_exit(rc, __func__);
153     }
154     if (init < 0) {
155         error_exit(EINVAL, __func__);
156     }
157     sem->count = init;
158 #else
159     rc = sem_init(&sem->sem, 0, init);
160     if (rc < 0) {
161         error_exit(errno, __func__);
162     }
163 #endif
164 }
165 
166 void qemu_sem_destroy(QemuSemaphore *sem)
167 {
168     int rc;
169 
170 #if defined(__APPLE__) || defined(__NetBSD__)
171     rc = pthread_cond_destroy(&sem->cond);
172     if (rc < 0) {
173         error_exit(rc, __func__);
174     }
175     rc = pthread_mutex_destroy(&sem->lock);
176     if (rc < 0) {
177         error_exit(rc, __func__);
178     }
179 #else
180     rc = sem_destroy(&sem->sem);
181     if (rc < 0) {
182         error_exit(errno, __func__);
183     }
184 #endif
185 }
186 
187 void qemu_sem_post(QemuSemaphore *sem)
188 {
189     int rc;
190 
191 #if defined(__APPLE__) || defined(__NetBSD__)
192     pthread_mutex_lock(&sem->lock);
193     if (sem->count == UINT_MAX) {
194         rc = EINVAL;
195     } else {
196         sem->count++;
197         rc = pthread_cond_signal(&sem->cond);
198     }
199     pthread_mutex_unlock(&sem->lock);
200     if (rc != 0) {
201         error_exit(rc, __func__);
202     }
203 #else
204     rc = sem_post(&sem->sem);
205     if (rc < 0) {
206         error_exit(errno, __func__);
207     }
208 #endif
209 }
210 
211 static void compute_abs_deadline(struct timespec *ts, int ms)
212 {
213     struct timeval tv;
214     gettimeofday(&tv, NULL);
215     ts->tv_nsec = tv.tv_usec * 1000 + (ms % 1000) * 1000000;
216     ts->tv_sec = tv.tv_sec + ms / 1000;
217     if (ts->tv_nsec >= 1000000000) {
218         ts->tv_sec++;
219         ts->tv_nsec -= 1000000000;
220     }
221 }
222 
223 int qemu_sem_timedwait(QemuSemaphore *sem, int ms)
224 {
225     int rc;
226     struct timespec ts;
227 
228 #if defined(__APPLE__) || defined(__NetBSD__)
229     rc = 0;
230     compute_abs_deadline(&ts, ms);
231     pthread_mutex_lock(&sem->lock);
232     while (sem->count == 0) {
233         rc = pthread_cond_timedwait(&sem->cond, &sem->lock, &ts);
234         if (rc == ETIMEDOUT) {
235             break;
236         }
237         if (rc != 0) {
238             error_exit(rc, __func__);
239         }
240     }
241     if (rc != ETIMEDOUT) {
242         --sem->count;
243     }
244     pthread_mutex_unlock(&sem->lock);
245     return (rc == ETIMEDOUT ? -1 : 0);
246 #else
247     if (ms <= 0) {
248         /* This is cheaper than sem_timedwait.  */
249         do {
250             rc = sem_trywait(&sem->sem);
251         } while (rc == -1 && errno == EINTR);
252         if (rc == -1 && errno == EAGAIN) {
253             return -1;
254         }
255     } else {
256         compute_abs_deadline(&ts, ms);
257         do {
258             rc = sem_timedwait(&sem->sem, &ts);
259         } while (rc == -1 && errno == EINTR);
260         if (rc == -1 && errno == ETIMEDOUT) {
261             return -1;
262         }
263     }
264     if (rc < 0) {
265         error_exit(errno, __func__);
266     }
267     return 0;
268 #endif
269 }
270 
271 void qemu_sem_wait(QemuSemaphore *sem)
272 {
273     int rc;
274 
275 #if defined(__APPLE__) || defined(__NetBSD__)
276     pthread_mutex_lock(&sem->lock);
277     while (sem->count == 0) {
278         rc = pthread_cond_wait(&sem->cond, &sem->lock);
279         if (rc != 0) {
280             error_exit(rc, __func__);
281         }
282     }
283     --sem->count;
284     pthread_mutex_unlock(&sem->lock);
285 #else
286     do {
287         rc = sem_wait(&sem->sem);
288     } while (rc == -1 && errno == EINTR);
289     if (rc < 0) {
290         error_exit(errno, __func__);
291     }
292 #endif
293 }
294 
295 #ifdef __linux__
296 #define futex(...)              syscall(__NR_futex, __VA_ARGS__)
297 
298 static inline void futex_wake(QemuEvent *ev, int n)
299 {
300     futex(ev, FUTEX_WAKE, n, NULL, NULL, 0);
301 }
302 
303 static inline void futex_wait(QemuEvent *ev, unsigned val)
304 {
305     futex(ev, FUTEX_WAIT, (int) val, NULL, NULL, 0);
306 }
307 #else
308 static inline void futex_wake(QemuEvent *ev, int n)
309 {
310     pthread_mutex_lock(&ev->lock);
311     if (n == 1) {
312         pthread_cond_signal(&ev->cond);
313     } else {
314         pthread_cond_broadcast(&ev->cond);
315     }
316     pthread_mutex_unlock(&ev->lock);
317 }
318 
319 static inline void futex_wait(QemuEvent *ev, unsigned val)
320 {
321     pthread_mutex_lock(&ev->lock);
322     if (ev->value == val) {
323         pthread_cond_wait(&ev->cond, &ev->lock);
324     }
325     pthread_mutex_unlock(&ev->lock);
326 }
327 #endif
328 
329 /* Valid transitions:
330  * - free->set, when setting the event
331  * - busy->set, when setting the event, followed by futex_wake
332  * - set->free, when resetting the event
333  * - free->busy, when waiting
334  *
335  * set->busy does not happen (it can be observed from the outside but
336  * it really is set->free->busy).
337  *
338  * busy->free provably cannot happen; to enforce it, the set->free transition
339  * is done with an OR, which becomes a no-op if the event has concurrently
340  * transitioned to free or busy.
341  */
342 
343 #define EV_SET         0
344 #define EV_FREE        1
345 #define EV_BUSY       -1
346 
347 void qemu_event_init(QemuEvent *ev, bool init)
348 {
349 #ifndef __linux__
350     pthread_mutex_init(&ev->lock, NULL);
351     pthread_cond_init(&ev->cond, NULL);
352 #endif
353 
354     ev->value = (init ? EV_SET : EV_FREE);
355 }
356 
357 void qemu_event_destroy(QemuEvent *ev)
358 {
359 #ifndef __linux__
360     pthread_mutex_destroy(&ev->lock);
361     pthread_cond_destroy(&ev->cond);
362 #endif
363 }
364 
365 void qemu_event_set(QemuEvent *ev)
366 {
367     if (atomic_mb_read(&ev->value) != EV_SET) {
368         if (atomic_xchg(&ev->value, EV_SET) == EV_BUSY) {
369             /* There were waiters, wake them up.  */
370             futex_wake(ev, INT_MAX);
371         }
372     }
373 }
374 
375 void qemu_event_reset(QemuEvent *ev)
376 {
377     if (atomic_mb_read(&ev->value) == EV_SET) {
378         /*
379          * If there was a concurrent reset (or even reset+wait),
380          * do nothing.  Otherwise change EV_SET->EV_FREE.
381          */
382         atomic_or(&ev->value, EV_FREE);
383     }
384 }
385 
386 void qemu_event_wait(QemuEvent *ev)
387 {
388     unsigned value;
389 
390     value = atomic_mb_read(&ev->value);
391     if (value != EV_SET) {
392         if (value == EV_FREE) {
393             /*
394              * Leave the event reset and tell qemu_event_set that there
395              * are waiters.  No need to retry, because there cannot be
396              * a concurent busy->free transition.  After the CAS, the
397              * event will be either set or busy.
398              */
399             if (atomic_cmpxchg(&ev->value, EV_FREE, EV_BUSY) == EV_SET) {
400                 return;
401             }
402         }
403         futex_wait(ev, EV_BUSY);
404     }
405 }
406 
407 static pthread_key_t exit_key;
408 
409 union NotifierThreadData {
410     void *ptr;
411     NotifierList list;
412 };
413 QEMU_BUILD_BUG_ON(sizeof(union NotifierThreadData) != sizeof(void *));
414 
415 void qemu_thread_atexit_add(Notifier *notifier)
416 {
417     union NotifierThreadData ntd;
418     ntd.ptr = pthread_getspecific(exit_key);
419     notifier_list_add(&ntd.list, notifier);
420     pthread_setspecific(exit_key, ntd.ptr);
421 }
422 
423 void qemu_thread_atexit_remove(Notifier *notifier)
424 {
425     union NotifierThreadData ntd;
426     ntd.ptr = pthread_getspecific(exit_key);
427     notifier_remove(notifier);
428     pthread_setspecific(exit_key, ntd.ptr);
429 }
430 
431 static void qemu_thread_atexit_run(void *arg)
432 {
433     union NotifierThreadData ntd = { .ptr = arg };
434     notifier_list_notify(&ntd.list, NULL);
435 }
436 
437 static void __attribute__((constructor)) qemu_thread_atexit_init(void)
438 {
439     pthread_key_create(&exit_key, qemu_thread_atexit_run);
440 }
441 
442 
443 /* Attempt to set the threads name; note that this is for debug, so
444  * we're not going to fail if we can't set it.
445  */
446 static void qemu_thread_set_name(QemuThread *thread, const char *name)
447 {
448 #ifdef CONFIG_PTHREAD_SETNAME_NP
449     pthread_setname_np(thread->thread, name);
450 #endif
451 }
452 
453 void qemu_thread_create(QemuThread *thread, const char *name,
454                        void *(*start_routine)(void*),
455                        void *arg, int mode)
456 {
457     sigset_t set, oldset;
458     int err;
459     pthread_attr_t attr;
460 
461     err = pthread_attr_init(&attr);
462     if (err) {
463         error_exit(err, __func__);
464     }
465     if (mode == QEMU_THREAD_DETACHED) {
466         err = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
467         if (err) {
468             error_exit(err, __func__);
469         }
470     }
471 
472     /* Leave signal handling to the iothread.  */
473     sigfillset(&set);
474     pthread_sigmask(SIG_SETMASK, &set, &oldset);
475     err = pthread_create(&thread->thread, &attr, start_routine, arg);
476     if (err)
477         error_exit(err, __func__);
478 
479     if (name_threads) {
480         qemu_thread_set_name(thread, name);
481     }
482 
483     pthread_sigmask(SIG_SETMASK, &oldset, NULL);
484 
485     pthread_attr_destroy(&attr);
486 }
487 
488 void qemu_thread_get_self(QemuThread *thread)
489 {
490     thread->thread = pthread_self();
491 }
492 
493 bool qemu_thread_is_self(QemuThread *thread)
494 {
495    return pthread_equal(pthread_self(), thread->thread);
496 }
497 
498 void qemu_thread_exit(void *retval)
499 {
500     pthread_exit(retval);
501 }
502 
503 void *qemu_thread_join(QemuThread *thread)
504 {
505     int err;
506     void *ret;
507 
508     err = pthread_join(thread->thread, &ret);
509     if (err) {
510         error_exit(err, __func__);
511     }
512     return ret;
513 }
514