xref: /openbmc/qemu/util/qemu-thread-posix.c (revision 01c22f2c)
1 /*
2  * Wrappers around mutex/cond/thread functions
3  *
4  * Copyright Red Hat, Inc. 2009
5  *
6  * Author:
7  *  Marcelo Tosatti <mtosatti@redhat.com>
8  *
9  * This work is licensed under the terms of the GNU GPL, version 2 or later.
10  * See the COPYING file in the top-level directory.
11  *
12  */
13 #include <stdlib.h>
14 #include <stdio.h>
15 #include <errno.h>
16 #include <time.h>
17 #include <signal.h>
18 #include <stdint.h>
19 #include <string.h>
20 #include <limits.h>
21 #include <unistd.h>
22 #include <sys/time.h>
23 #ifdef __linux__
24 #include <sys/syscall.h>
25 #include <linux/futex.h>
26 #endif
27 #include "qemu/thread.h"
28 #include "qemu/atomic.h"
29 
30 static bool name_threads;
31 
32 void qemu_thread_naming(bool enable)
33 {
34     name_threads = enable;
35 }
36 
37 static void error_exit(int err, const char *msg)
38 {
39     fprintf(stderr, "qemu: %s: %s\n", msg, strerror(err));
40     abort();
41 }
42 
43 void qemu_mutex_init(QemuMutex *mutex)
44 {
45     int err;
46     pthread_mutexattr_t mutexattr;
47 
48     pthread_mutexattr_init(&mutexattr);
49     pthread_mutexattr_settype(&mutexattr, PTHREAD_MUTEX_ERRORCHECK);
50     err = pthread_mutex_init(&mutex->lock, &mutexattr);
51     pthread_mutexattr_destroy(&mutexattr);
52     if (err)
53         error_exit(err, __func__);
54 }
55 
56 void qemu_mutex_destroy(QemuMutex *mutex)
57 {
58     int err;
59 
60     err = pthread_mutex_destroy(&mutex->lock);
61     if (err)
62         error_exit(err, __func__);
63 }
64 
65 void qemu_mutex_lock(QemuMutex *mutex)
66 {
67     int err;
68 
69     err = pthread_mutex_lock(&mutex->lock);
70     if (err)
71         error_exit(err, __func__);
72 }
73 
74 int qemu_mutex_trylock(QemuMutex *mutex)
75 {
76     return pthread_mutex_trylock(&mutex->lock);
77 }
78 
79 void qemu_mutex_unlock(QemuMutex *mutex)
80 {
81     int err;
82 
83     err = pthread_mutex_unlock(&mutex->lock);
84     if (err)
85         error_exit(err, __func__);
86 }
87 
88 void qemu_cond_init(QemuCond *cond)
89 {
90     int err;
91 
92     err = pthread_cond_init(&cond->cond, NULL);
93     if (err)
94         error_exit(err, __func__);
95 }
96 
97 void qemu_cond_destroy(QemuCond *cond)
98 {
99     int err;
100 
101     err = pthread_cond_destroy(&cond->cond);
102     if (err)
103         error_exit(err, __func__);
104 }
105 
106 void qemu_cond_signal(QemuCond *cond)
107 {
108     int err;
109 
110     err = pthread_cond_signal(&cond->cond);
111     if (err)
112         error_exit(err, __func__);
113 }
114 
115 void qemu_cond_broadcast(QemuCond *cond)
116 {
117     int err;
118 
119     err = pthread_cond_broadcast(&cond->cond);
120     if (err)
121         error_exit(err, __func__);
122 }
123 
124 void qemu_cond_wait(QemuCond *cond, QemuMutex *mutex)
125 {
126     int err;
127 
128     err = pthread_cond_wait(&cond->cond, &mutex->lock);
129     if (err)
130         error_exit(err, __func__);
131 }
132 
133 void qemu_sem_init(QemuSemaphore *sem, int init)
134 {
135     int rc;
136 
137 #if defined(__APPLE__) || defined(__NetBSD__)
138     rc = pthread_mutex_init(&sem->lock, NULL);
139     if (rc != 0) {
140         error_exit(rc, __func__);
141     }
142     rc = pthread_cond_init(&sem->cond, NULL);
143     if (rc != 0) {
144         error_exit(rc, __func__);
145     }
146     if (init < 0) {
147         error_exit(EINVAL, __func__);
148     }
149     sem->count = init;
150 #else
151     rc = sem_init(&sem->sem, 0, init);
152     if (rc < 0) {
153         error_exit(errno, __func__);
154     }
155 #endif
156 }
157 
158 void qemu_sem_destroy(QemuSemaphore *sem)
159 {
160     int rc;
161 
162 #if defined(__APPLE__) || defined(__NetBSD__)
163     rc = pthread_cond_destroy(&sem->cond);
164     if (rc < 0) {
165         error_exit(rc, __func__);
166     }
167     rc = pthread_mutex_destroy(&sem->lock);
168     if (rc < 0) {
169         error_exit(rc, __func__);
170     }
171 #else
172     rc = sem_destroy(&sem->sem);
173     if (rc < 0) {
174         error_exit(errno, __func__);
175     }
176 #endif
177 }
178 
179 void qemu_sem_post(QemuSemaphore *sem)
180 {
181     int rc;
182 
183 #if defined(__APPLE__) || defined(__NetBSD__)
184     pthread_mutex_lock(&sem->lock);
185     if (sem->count == UINT_MAX) {
186         rc = EINVAL;
187     } else {
188         sem->count++;
189         rc = pthread_cond_signal(&sem->cond);
190     }
191     pthread_mutex_unlock(&sem->lock);
192     if (rc != 0) {
193         error_exit(rc, __func__);
194     }
195 #else
196     rc = sem_post(&sem->sem);
197     if (rc < 0) {
198         error_exit(errno, __func__);
199     }
200 #endif
201 }
202 
203 static void compute_abs_deadline(struct timespec *ts, int ms)
204 {
205     struct timeval tv;
206     gettimeofday(&tv, NULL);
207     ts->tv_nsec = tv.tv_usec * 1000 + (ms % 1000) * 1000000;
208     ts->tv_sec = tv.tv_sec + ms / 1000;
209     if (ts->tv_nsec >= 1000000000) {
210         ts->tv_sec++;
211         ts->tv_nsec -= 1000000000;
212     }
213 }
214 
215 int qemu_sem_timedwait(QemuSemaphore *sem, int ms)
216 {
217     int rc;
218     struct timespec ts;
219 
220 #if defined(__APPLE__) || defined(__NetBSD__)
221     rc = 0;
222     compute_abs_deadline(&ts, ms);
223     pthread_mutex_lock(&sem->lock);
224     while (sem->count == 0) {
225         rc = pthread_cond_timedwait(&sem->cond, &sem->lock, &ts);
226         if (rc == ETIMEDOUT) {
227             break;
228         }
229         if (rc != 0) {
230             error_exit(rc, __func__);
231         }
232     }
233     if (rc != ETIMEDOUT) {
234         --sem->count;
235     }
236     pthread_mutex_unlock(&sem->lock);
237     return (rc == ETIMEDOUT ? -1 : 0);
238 #else
239     if (ms <= 0) {
240         /* This is cheaper than sem_timedwait.  */
241         do {
242             rc = sem_trywait(&sem->sem);
243         } while (rc == -1 && errno == EINTR);
244         if (rc == -1 && errno == EAGAIN) {
245             return -1;
246         }
247     } else {
248         compute_abs_deadline(&ts, ms);
249         do {
250             rc = sem_timedwait(&sem->sem, &ts);
251         } while (rc == -1 && errno == EINTR);
252         if (rc == -1 && errno == ETIMEDOUT) {
253             return -1;
254         }
255     }
256     if (rc < 0) {
257         error_exit(errno, __func__);
258     }
259     return 0;
260 #endif
261 }
262 
263 void qemu_sem_wait(QemuSemaphore *sem)
264 {
265     int rc;
266 
267 #if defined(__APPLE__) || defined(__NetBSD__)
268     pthread_mutex_lock(&sem->lock);
269     while (sem->count == 0) {
270         rc = pthread_cond_wait(&sem->cond, &sem->lock);
271         if (rc != 0) {
272             error_exit(rc, __func__);
273         }
274     }
275     --sem->count;
276     pthread_mutex_unlock(&sem->lock);
277 #else
278     do {
279         rc = sem_wait(&sem->sem);
280     } while (rc == -1 && errno == EINTR);
281     if (rc < 0) {
282         error_exit(errno, __func__);
283     }
284 #endif
285 }
286 
287 #ifdef __linux__
288 #define futex(...)              syscall(__NR_futex, __VA_ARGS__)
289 
290 static inline void futex_wake(QemuEvent *ev, int n)
291 {
292     futex(ev, FUTEX_WAKE, n, NULL, NULL, 0);
293 }
294 
295 static inline void futex_wait(QemuEvent *ev, unsigned val)
296 {
297     futex(ev, FUTEX_WAIT, (int) val, NULL, NULL, 0);
298 }
299 #else
300 static inline void futex_wake(QemuEvent *ev, int n)
301 {
302     if (n == 1) {
303         pthread_cond_signal(&ev->cond);
304     } else {
305         pthread_cond_broadcast(&ev->cond);
306     }
307 }
308 
309 static inline void futex_wait(QemuEvent *ev, unsigned val)
310 {
311     pthread_mutex_lock(&ev->lock);
312     if (ev->value == val) {
313         pthread_cond_wait(&ev->cond, &ev->lock);
314     }
315     pthread_mutex_unlock(&ev->lock);
316 }
317 #endif
318 
319 /* Valid transitions:
320  * - free->set, when setting the event
321  * - busy->set, when setting the event, followed by futex_wake
322  * - set->free, when resetting the event
323  * - free->busy, when waiting
324  *
325  * set->busy does not happen (it can be observed from the outside but
326  * it really is set->free->busy).
327  *
328  * busy->free provably cannot happen; to enforce it, the set->free transition
329  * is done with an OR, which becomes a no-op if the event has concurrently
330  * transitioned to free or busy.
331  */
332 
333 #define EV_SET         0
334 #define EV_FREE        1
335 #define EV_BUSY       -1
336 
337 void qemu_event_init(QemuEvent *ev, bool init)
338 {
339 #ifndef __linux__
340     pthread_mutex_init(&ev->lock, NULL);
341     pthread_cond_init(&ev->cond, NULL);
342 #endif
343 
344     ev->value = (init ? EV_SET : EV_FREE);
345 }
346 
347 void qemu_event_destroy(QemuEvent *ev)
348 {
349 #ifndef __linux__
350     pthread_mutex_destroy(&ev->lock);
351     pthread_cond_destroy(&ev->cond);
352 #endif
353 }
354 
355 void qemu_event_set(QemuEvent *ev)
356 {
357     if (atomic_mb_read(&ev->value) != EV_SET) {
358         if (atomic_xchg(&ev->value, EV_SET) == EV_BUSY) {
359             /* There were waiters, wake them up.  */
360             futex_wake(ev, INT_MAX);
361         }
362     }
363 }
364 
365 void qemu_event_reset(QemuEvent *ev)
366 {
367     if (atomic_mb_read(&ev->value) == EV_SET) {
368         /*
369          * If there was a concurrent reset (or even reset+wait),
370          * do nothing.  Otherwise change EV_SET->EV_FREE.
371          */
372         atomic_or(&ev->value, EV_FREE);
373     }
374 }
375 
376 void qemu_event_wait(QemuEvent *ev)
377 {
378     unsigned value;
379 
380     value = atomic_mb_read(&ev->value);
381     if (value != EV_SET) {
382         if (value == EV_FREE) {
383             /*
384              * Leave the event reset and tell qemu_event_set that there
385              * are waiters.  No need to retry, because there cannot be
386              * a concurent busy->free transition.  After the CAS, the
387              * event will be either set or busy.
388              */
389             if (atomic_cmpxchg(&ev->value, EV_FREE, EV_BUSY) == EV_SET) {
390                 return;
391             }
392         }
393         futex_wait(ev, EV_BUSY);
394     }
395 }
396 
397 void qemu_thread_create(QemuThread *thread, const char *name,
398                        void *(*start_routine)(void*),
399                        void *arg, int mode)
400 {
401     sigset_t set, oldset;
402     int err;
403     pthread_attr_t attr;
404 
405     err = pthread_attr_init(&attr);
406     if (err) {
407         error_exit(err, __func__);
408     }
409     if (mode == QEMU_THREAD_DETACHED) {
410         err = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
411         if (err) {
412             error_exit(err, __func__);
413         }
414     }
415 
416     /* Leave signal handling to the iothread.  */
417     sigfillset(&set);
418     pthread_sigmask(SIG_SETMASK, &set, &oldset);
419     err = pthread_create(&thread->thread, &attr, start_routine, arg);
420     if (err)
421         error_exit(err, __func__);
422 
423 #if defined(__GLIBC__) && (__GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 12))
424     if (name_threads) {
425         pthread_setname_np(thread->thread, name);
426     }
427 #endif
428 
429     pthread_sigmask(SIG_SETMASK, &oldset, NULL);
430 
431     pthread_attr_destroy(&attr);
432 }
433 
434 void qemu_thread_get_self(QemuThread *thread)
435 {
436     thread->thread = pthread_self();
437 }
438 
439 bool qemu_thread_is_self(QemuThread *thread)
440 {
441    return pthread_equal(pthread_self(), thread->thread);
442 }
443 
444 void qemu_thread_exit(void *retval)
445 {
446     pthread_exit(retval);
447 }
448 
449 void *qemu_thread_join(QemuThread *thread)
450 {
451     int err;
452     void *ret;
453 
454     err = pthread_join(thread->thread, &ret);
455     if (err) {
456         error_exit(err, __func__);
457     }
458     return ret;
459 }
460