xref: /openbmc/qemu/util/qemu-thread-posix.c (revision 2266d443)
1 /*
2  * Wrappers around mutex/cond/thread functions
3  *
4  * Copyright Red Hat, Inc. 2009
5  *
6  * Author:
7  *  Marcelo Tosatti <mtosatti@redhat.com>
8  *
9  * This work is licensed under the terms of the GNU GPL, version 2 or later.
10  * See the COPYING file in the top-level directory.
11  *
12  */
13 #include "qemu/osdep.h"
14 #include "qemu/thread.h"
15 #include "qemu/atomic.h"
16 #include "qemu/notify.h"
17 #include "qemu-thread-common.h"
18 
19 static bool name_threads;
20 
21 void qemu_thread_naming(bool enable)
22 {
23     name_threads = enable;
24 
25 #ifndef CONFIG_THREAD_SETNAME_BYTHREAD
26     /* This is a debugging option, not fatal */
27     if (enable) {
28         fprintf(stderr, "qemu: thread naming not supported on this host\n");
29     }
30 #endif
31 }
32 
33 static void error_exit(int err, const char *msg)
34 {
35     fprintf(stderr, "qemu: %s: %s\n", msg, strerror(err));
36     abort();
37 }
38 
39 void qemu_mutex_init(QemuMutex *mutex)
40 {
41     int err;
42 
43     err = pthread_mutex_init(&mutex->lock, NULL);
44     if (err)
45         error_exit(err, __func__);
46     qemu_mutex_post_init(mutex);
47 }
48 
49 void qemu_mutex_destroy(QemuMutex *mutex)
50 {
51     int err;
52 
53     assert(mutex->initialized);
54     mutex->initialized = false;
55     err = pthread_mutex_destroy(&mutex->lock);
56     if (err)
57         error_exit(err, __func__);
58 }
59 
60 void qemu_mutex_lock_impl(QemuMutex *mutex, const char *file, const int line)
61 {
62     int err;
63 
64     assert(mutex->initialized);
65     qemu_mutex_pre_lock(mutex, file, line);
66     err = pthread_mutex_lock(&mutex->lock);
67     if (err)
68         error_exit(err, __func__);
69     qemu_mutex_post_lock(mutex, file, line);
70 }
71 
72 int qemu_mutex_trylock_impl(QemuMutex *mutex, const char *file, const int line)
73 {
74     int err;
75 
76     assert(mutex->initialized);
77     err = pthread_mutex_trylock(&mutex->lock);
78     if (err == 0) {
79         qemu_mutex_post_lock(mutex, file, line);
80         return 0;
81     }
82     if (err != EBUSY) {
83         error_exit(err, __func__);
84     }
85     return -EBUSY;
86 }
87 
88 void qemu_mutex_unlock_impl(QemuMutex *mutex, const char *file, const int line)
89 {
90     int err;
91 
92     assert(mutex->initialized);
93     qemu_mutex_pre_unlock(mutex, file, line);
94     err = pthread_mutex_unlock(&mutex->lock);
95     if (err)
96         error_exit(err, __func__);
97 }
98 
99 void qemu_rec_mutex_init(QemuRecMutex *mutex)
100 {
101     int err;
102     pthread_mutexattr_t attr;
103 
104     pthread_mutexattr_init(&attr);
105     pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE);
106     err = pthread_mutex_init(&mutex->lock, &attr);
107     pthread_mutexattr_destroy(&attr);
108     if (err) {
109         error_exit(err, __func__);
110     }
111     mutex->initialized = true;
112 }
113 
114 void qemu_cond_init(QemuCond *cond)
115 {
116     int err;
117 
118     err = pthread_cond_init(&cond->cond, NULL);
119     if (err)
120         error_exit(err, __func__);
121     cond->initialized = true;
122 }
123 
124 void qemu_cond_destroy(QemuCond *cond)
125 {
126     int err;
127 
128     assert(cond->initialized);
129     cond->initialized = false;
130     err = pthread_cond_destroy(&cond->cond);
131     if (err)
132         error_exit(err, __func__);
133 }
134 
135 void qemu_cond_signal(QemuCond *cond)
136 {
137     int err;
138 
139     assert(cond->initialized);
140     err = pthread_cond_signal(&cond->cond);
141     if (err)
142         error_exit(err, __func__);
143 }
144 
145 void qemu_cond_broadcast(QemuCond *cond)
146 {
147     int err;
148 
149     assert(cond->initialized);
150     err = pthread_cond_broadcast(&cond->cond);
151     if (err)
152         error_exit(err, __func__);
153 }
154 
155 void qemu_cond_wait_impl(QemuCond *cond, QemuMutex *mutex, const char *file, const int line)
156 {
157     int err;
158 
159     assert(cond->initialized);
160     qemu_mutex_pre_unlock(mutex, file, line);
161     err = pthread_cond_wait(&cond->cond, &mutex->lock);
162     qemu_mutex_post_lock(mutex, file, line);
163     if (err)
164         error_exit(err, __func__);
165 }
166 
167 void qemu_sem_init(QemuSemaphore *sem, int init)
168 {
169     int rc;
170 
171 #ifndef CONFIG_SEM_TIMEDWAIT
172     rc = pthread_mutex_init(&sem->lock, NULL);
173     if (rc != 0) {
174         error_exit(rc, __func__);
175     }
176     rc = pthread_cond_init(&sem->cond, NULL);
177     if (rc != 0) {
178         error_exit(rc, __func__);
179     }
180     if (init < 0) {
181         error_exit(EINVAL, __func__);
182     }
183     sem->count = init;
184 #else
185     rc = sem_init(&sem->sem, 0, init);
186     if (rc < 0) {
187         error_exit(errno, __func__);
188     }
189 #endif
190     sem->initialized = true;
191 }
192 
193 void qemu_sem_destroy(QemuSemaphore *sem)
194 {
195     int rc;
196 
197     assert(sem->initialized);
198     sem->initialized = false;
199 #ifndef CONFIG_SEM_TIMEDWAIT
200     rc = pthread_cond_destroy(&sem->cond);
201     if (rc < 0) {
202         error_exit(rc, __func__);
203     }
204     rc = pthread_mutex_destroy(&sem->lock);
205     if (rc < 0) {
206         error_exit(rc, __func__);
207     }
208 #else
209     rc = sem_destroy(&sem->sem);
210     if (rc < 0) {
211         error_exit(errno, __func__);
212     }
213 #endif
214 }
215 
216 void qemu_sem_post(QemuSemaphore *sem)
217 {
218     int rc;
219 
220     assert(sem->initialized);
221 #ifndef CONFIG_SEM_TIMEDWAIT
222     pthread_mutex_lock(&sem->lock);
223     if (sem->count == UINT_MAX) {
224         rc = EINVAL;
225     } else {
226         sem->count++;
227         rc = pthread_cond_signal(&sem->cond);
228     }
229     pthread_mutex_unlock(&sem->lock);
230     if (rc != 0) {
231         error_exit(rc, __func__);
232     }
233 #else
234     rc = sem_post(&sem->sem);
235     if (rc < 0) {
236         error_exit(errno, __func__);
237     }
238 #endif
239 }
240 
241 static void compute_abs_deadline(struct timespec *ts, int ms)
242 {
243     struct timeval tv;
244     gettimeofday(&tv, NULL);
245     ts->tv_nsec = tv.tv_usec * 1000 + (ms % 1000) * 1000000;
246     ts->tv_sec = tv.tv_sec + ms / 1000;
247     if (ts->tv_nsec >= 1000000000) {
248         ts->tv_sec++;
249         ts->tv_nsec -= 1000000000;
250     }
251 }
252 
253 int qemu_sem_timedwait(QemuSemaphore *sem, int ms)
254 {
255     int rc;
256     struct timespec ts;
257 
258     assert(sem->initialized);
259 #ifndef CONFIG_SEM_TIMEDWAIT
260     rc = 0;
261     compute_abs_deadline(&ts, ms);
262     pthread_mutex_lock(&sem->lock);
263     while (sem->count == 0) {
264         rc = pthread_cond_timedwait(&sem->cond, &sem->lock, &ts);
265         if (rc == ETIMEDOUT) {
266             break;
267         }
268         if (rc != 0) {
269             error_exit(rc, __func__);
270         }
271     }
272     if (rc != ETIMEDOUT) {
273         --sem->count;
274     }
275     pthread_mutex_unlock(&sem->lock);
276     return (rc == ETIMEDOUT ? -1 : 0);
277 #else
278     if (ms <= 0) {
279         /* This is cheaper than sem_timedwait.  */
280         do {
281             rc = sem_trywait(&sem->sem);
282         } while (rc == -1 && errno == EINTR);
283         if (rc == -1 && errno == EAGAIN) {
284             return -1;
285         }
286     } else {
287         compute_abs_deadline(&ts, ms);
288         do {
289             rc = sem_timedwait(&sem->sem, &ts);
290         } while (rc == -1 && errno == EINTR);
291         if (rc == -1 && errno == ETIMEDOUT) {
292             return -1;
293         }
294     }
295     if (rc < 0) {
296         error_exit(errno, __func__);
297     }
298     return 0;
299 #endif
300 }
301 
302 void qemu_sem_wait(QemuSemaphore *sem)
303 {
304     int rc;
305 
306     assert(sem->initialized);
307 #ifndef CONFIG_SEM_TIMEDWAIT
308     pthread_mutex_lock(&sem->lock);
309     while (sem->count == 0) {
310         rc = pthread_cond_wait(&sem->cond, &sem->lock);
311         if (rc != 0) {
312             error_exit(rc, __func__);
313         }
314     }
315     --sem->count;
316     pthread_mutex_unlock(&sem->lock);
317 #else
318     do {
319         rc = sem_wait(&sem->sem);
320     } while (rc == -1 && errno == EINTR);
321     if (rc < 0) {
322         error_exit(errno, __func__);
323     }
324 #endif
325 }
326 
327 #ifdef __linux__
328 #include "qemu/futex.h"
329 #else
330 static inline void qemu_futex_wake(QemuEvent *ev, int n)
331 {
332     assert(ev->initialized);
333     pthread_mutex_lock(&ev->lock);
334     if (n == 1) {
335         pthread_cond_signal(&ev->cond);
336     } else {
337         pthread_cond_broadcast(&ev->cond);
338     }
339     pthread_mutex_unlock(&ev->lock);
340 }
341 
342 static inline void qemu_futex_wait(QemuEvent *ev, unsigned val)
343 {
344     assert(ev->initialized);
345     pthread_mutex_lock(&ev->lock);
346     if (ev->value == val) {
347         pthread_cond_wait(&ev->cond, &ev->lock);
348     }
349     pthread_mutex_unlock(&ev->lock);
350 }
351 #endif
352 
353 /* Valid transitions:
354  * - free->set, when setting the event
355  * - busy->set, when setting the event, followed by qemu_futex_wake
356  * - set->free, when resetting the event
357  * - free->busy, when waiting
358  *
359  * set->busy does not happen (it can be observed from the outside but
360  * it really is set->free->busy).
361  *
362  * busy->free provably cannot happen; to enforce it, the set->free transition
363  * is done with an OR, which becomes a no-op if the event has concurrently
364  * transitioned to free or busy.
365  */
366 
367 #define EV_SET         0
368 #define EV_FREE        1
369 #define EV_BUSY       -1
370 
371 void qemu_event_init(QemuEvent *ev, bool init)
372 {
373 #ifndef __linux__
374     pthread_mutex_init(&ev->lock, NULL);
375     pthread_cond_init(&ev->cond, NULL);
376 #endif
377 
378     ev->value = (init ? EV_SET : EV_FREE);
379     ev->initialized = true;
380 }
381 
382 void qemu_event_destroy(QemuEvent *ev)
383 {
384     assert(ev->initialized);
385     ev->initialized = false;
386 #ifndef __linux__
387     pthread_mutex_destroy(&ev->lock);
388     pthread_cond_destroy(&ev->cond);
389 #endif
390 }
391 
392 void qemu_event_set(QemuEvent *ev)
393 {
394     /* qemu_event_set has release semantics, but because it *loads*
395      * ev->value we need a full memory barrier here.
396      */
397     assert(ev->initialized);
398     smp_mb();
399     if (atomic_read(&ev->value) != EV_SET) {
400         if (atomic_xchg(&ev->value, EV_SET) == EV_BUSY) {
401             /* There were waiters, wake them up.  */
402             qemu_futex_wake(ev, INT_MAX);
403         }
404     }
405 }
406 
407 void qemu_event_reset(QemuEvent *ev)
408 {
409     unsigned value;
410 
411     assert(ev->initialized);
412     value = atomic_read(&ev->value);
413     smp_mb_acquire();
414     if (value == EV_SET) {
415         /*
416          * If there was a concurrent reset (or even reset+wait),
417          * do nothing.  Otherwise change EV_SET->EV_FREE.
418          */
419         atomic_or(&ev->value, EV_FREE);
420     }
421 }
422 
423 void qemu_event_wait(QemuEvent *ev)
424 {
425     unsigned value;
426 
427     assert(ev->initialized);
428     value = atomic_read(&ev->value);
429     smp_mb_acquire();
430     if (value != EV_SET) {
431         if (value == EV_FREE) {
432             /*
433              * Leave the event reset and tell qemu_event_set that there
434              * are waiters.  No need to retry, because there cannot be
435              * a concurrent busy->free transition.  After the CAS, the
436              * event will be either set or busy.
437              */
438             if (atomic_cmpxchg(&ev->value, EV_FREE, EV_BUSY) == EV_SET) {
439                 return;
440             }
441         }
442         qemu_futex_wait(ev, EV_BUSY);
443     }
444 }
445 
446 static pthread_key_t exit_key;
447 
448 union NotifierThreadData {
449     void *ptr;
450     NotifierList list;
451 };
452 QEMU_BUILD_BUG_ON(sizeof(union NotifierThreadData) != sizeof(void *));
453 
454 void qemu_thread_atexit_add(Notifier *notifier)
455 {
456     union NotifierThreadData ntd;
457     ntd.ptr = pthread_getspecific(exit_key);
458     notifier_list_add(&ntd.list, notifier);
459     pthread_setspecific(exit_key, ntd.ptr);
460 }
461 
462 void qemu_thread_atexit_remove(Notifier *notifier)
463 {
464     union NotifierThreadData ntd;
465     ntd.ptr = pthread_getspecific(exit_key);
466     notifier_remove(notifier);
467     pthread_setspecific(exit_key, ntd.ptr);
468 }
469 
470 static void qemu_thread_atexit_run(void *arg)
471 {
472     union NotifierThreadData ntd = { .ptr = arg };
473     notifier_list_notify(&ntd.list, NULL);
474 }
475 
476 static void __attribute__((constructor)) qemu_thread_atexit_init(void)
477 {
478     pthread_key_create(&exit_key, qemu_thread_atexit_run);
479 }
480 
481 
482 typedef struct {
483     void *(*start_routine)(void *);
484     void *arg;
485     char *name;
486 } QemuThreadArgs;
487 
488 static void *qemu_thread_start(void *args)
489 {
490     QemuThreadArgs *qemu_thread_args = args;
491     void *(*start_routine)(void *) = qemu_thread_args->start_routine;
492     void *arg = qemu_thread_args->arg;
493 
494 #ifdef CONFIG_PTHREAD_SETNAME_NP
495     /* Attempt to set the threads name; note that this is for debug, so
496      * we're not going to fail if we can't set it.
497      */
498     if (name_threads && qemu_thread_args->name) {
499         pthread_setname_np(pthread_self(), qemu_thread_args->name);
500     }
501 #endif
502     g_free(qemu_thread_args->name);
503     g_free(qemu_thread_args);
504     return start_routine(arg);
505 }
506 
507 void qemu_thread_create(QemuThread *thread, const char *name,
508                        void *(*start_routine)(void*),
509                        void *arg, int mode)
510 {
511     sigset_t set, oldset;
512     int err;
513     pthread_attr_t attr;
514     QemuThreadArgs *qemu_thread_args;
515 
516     err = pthread_attr_init(&attr);
517     if (err) {
518         error_exit(err, __func__);
519     }
520 
521     if (mode == QEMU_THREAD_DETACHED) {
522         pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
523     }
524 
525     /* Leave signal handling to the iothread.  */
526     sigfillset(&set);
527     pthread_sigmask(SIG_SETMASK, &set, &oldset);
528 
529     qemu_thread_args = g_new0(QemuThreadArgs, 1);
530     qemu_thread_args->name = g_strdup(name);
531     qemu_thread_args->start_routine = start_routine;
532     qemu_thread_args->arg = arg;
533 
534     err = pthread_create(&thread->thread, &attr,
535                          qemu_thread_start, qemu_thread_args);
536 
537     if (err)
538         error_exit(err, __func__);
539 
540     pthread_sigmask(SIG_SETMASK, &oldset, NULL);
541 
542     pthread_attr_destroy(&attr);
543 }
544 
545 void qemu_thread_get_self(QemuThread *thread)
546 {
547     thread->thread = pthread_self();
548 }
549 
550 bool qemu_thread_is_self(QemuThread *thread)
551 {
552    return pthread_equal(pthread_self(), thread->thread);
553 }
554 
555 void qemu_thread_exit(void *retval)
556 {
557     pthread_exit(retval);
558 }
559 
560 void *qemu_thread_join(QemuThread *thread)
561 {
562     int err;
563     void *ret;
564 
565     err = pthread_join(thread->thread, &ret);
566     if (err) {
567         error_exit(err, __func__);
568     }
569     return ret;
570 }
571