xref: /openbmc/qemu/util/qemu-thread-win32.c (revision 91bfcdb0)
1 /*
2  * Win32 implementation for mutex/cond/thread functions
3  *
4  * Copyright Red Hat, Inc. 2010
5  *
6  * Author:
7  *  Paolo Bonzini <pbonzini@redhat.com>
8  *
9  * This work is licensed under the terms of the GNU GPL, version 2 or later.
10  * See the COPYING file in the top-level directory.
11  *
12  */
13 #include "qemu-common.h"
14 #include "qemu/thread.h"
15 #include "qemu/notify.h"
16 #include <process.h>
17 #include <assert.h>
18 #include <limits.h>
19 
20 static bool name_threads;
21 
22 void qemu_thread_naming(bool enable)
23 {
24     /* But note we don't actually name them on Windows yet */
25     name_threads = enable;
26 
27     fprintf(stderr, "qemu: thread naming not supported on this host\n");
28 }
29 
30 static void error_exit(int err, const char *msg)
31 {
32     char *pstr;
33 
34     FormatMessage(FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_ALLOCATE_BUFFER,
35                   NULL, err, 0, (LPTSTR)&pstr, 2, NULL);
36     fprintf(stderr, "qemu: %s: %s\n", msg, pstr);
37     LocalFree(pstr);
38     abort();
39 }
40 
41 void qemu_mutex_init(QemuMutex *mutex)
42 {
43     mutex->owner = 0;
44     InitializeCriticalSection(&mutex->lock);
45 }
46 
47 void qemu_mutex_destroy(QemuMutex *mutex)
48 {
49     assert(mutex->owner == 0);
50     DeleteCriticalSection(&mutex->lock);
51 }
52 
53 void qemu_mutex_lock(QemuMutex *mutex)
54 {
55     EnterCriticalSection(&mutex->lock);
56 
57     /* Win32 CRITICAL_SECTIONs are recursive.  Assert that we're not
58      * using them as such.
59      */
60     assert(mutex->owner == 0);
61     mutex->owner = GetCurrentThreadId();
62 }
63 
64 int qemu_mutex_trylock(QemuMutex *mutex)
65 {
66     int owned;
67 
68     owned = TryEnterCriticalSection(&mutex->lock);
69     if (owned) {
70         assert(mutex->owner == 0);
71         mutex->owner = GetCurrentThreadId();
72     }
73     return !owned;
74 }
75 
76 void qemu_mutex_unlock(QemuMutex *mutex)
77 {
78     assert(mutex->owner == GetCurrentThreadId());
79     mutex->owner = 0;
80     LeaveCriticalSection(&mutex->lock);
81 }
82 
83 void qemu_cond_init(QemuCond *cond)
84 {
85     memset(cond, 0, sizeof(*cond));
86 
87     cond->sema = CreateSemaphore(NULL, 0, LONG_MAX, NULL);
88     if (!cond->sema) {
89         error_exit(GetLastError(), __func__);
90     }
91     cond->continue_event = CreateEvent(NULL,    /* security */
92                                        FALSE,   /* auto-reset */
93                                        FALSE,   /* not signaled */
94                                        NULL);   /* name */
95     if (!cond->continue_event) {
96         error_exit(GetLastError(), __func__);
97     }
98 }
99 
100 void qemu_cond_destroy(QemuCond *cond)
101 {
102     BOOL result;
103     result = CloseHandle(cond->continue_event);
104     if (!result) {
105         error_exit(GetLastError(), __func__);
106     }
107     cond->continue_event = 0;
108     result = CloseHandle(cond->sema);
109     if (!result) {
110         error_exit(GetLastError(), __func__);
111     }
112     cond->sema = 0;
113 }
114 
115 void qemu_cond_signal(QemuCond *cond)
116 {
117     DWORD result;
118 
119     /*
120      * Signal only when there are waiters.  cond->waiters is
121      * incremented by pthread_cond_wait under the external lock,
122      * so we are safe about that.
123      */
124     if (cond->waiters == 0) {
125         return;
126     }
127 
128     /*
129      * Waiting threads decrement it outside the external lock, but
130      * only if another thread is executing pthread_cond_broadcast and
131      * has the mutex.  So, it also cannot be decremented concurrently
132      * with this particular access.
133      */
134     cond->target = cond->waiters - 1;
135     result = SignalObjectAndWait(cond->sema, cond->continue_event,
136                                  INFINITE, FALSE);
137     if (result == WAIT_ABANDONED || result == WAIT_FAILED) {
138         error_exit(GetLastError(), __func__);
139     }
140 }
141 
142 void qemu_cond_broadcast(QemuCond *cond)
143 {
144     BOOLEAN result;
145     /*
146      * As in pthread_cond_signal, access to cond->waiters and
147      * cond->target is locked via the external mutex.
148      */
149     if (cond->waiters == 0) {
150         return;
151     }
152 
153     cond->target = 0;
154     result = ReleaseSemaphore(cond->sema, cond->waiters, NULL);
155     if (!result) {
156         error_exit(GetLastError(), __func__);
157     }
158 
159     /*
160      * At this point all waiters continue. Each one takes its
161      * slice of the semaphore. Now it's our turn to wait: Since
162      * the external mutex is held, no thread can leave cond_wait,
163      * yet. For this reason, we can be sure that no thread gets
164      * a chance to eat *more* than one slice. OTOH, it means
165      * that the last waiter must send us a wake-up.
166      */
167     WaitForSingleObject(cond->continue_event, INFINITE);
168 }
169 
170 void qemu_cond_wait(QemuCond *cond, QemuMutex *mutex)
171 {
172     /*
173      * This access is protected under the mutex.
174      */
175     cond->waiters++;
176 
177     /*
178      * Unlock external mutex and wait for signal.
179      * NOTE: we've held mutex locked long enough to increment
180      * waiters count above, so there's no problem with
181      * leaving mutex unlocked before we wait on semaphore.
182      */
183     qemu_mutex_unlock(mutex);
184     WaitForSingleObject(cond->sema, INFINITE);
185 
186     /* Now waiters must rendez-vous with the signaling thread and
187      * let it continue.  For cond_broadcast this has heavy contention
188      * and triggers thundering herd.  So goes life.
189      *
190      * Decrease waiters count.  The mutex is not taken, so we have
191      * to do this atomically.
192      *
193      * All waiters contend for the mutex at the end of this function
194      * until the signaling thread relinquishes it.  To ensure
195      * each waiter consumes exactly one slice of the semaphore,
196      * the signaling thread stops until it is told by the last
197      * waiter that it can go on.
198      */
199     if (InterlockedDecrement(&cond->waiters) == cond->target) {
200         SetEvent(cond->continue_event);
201     }
202 
203     qemu_mutex_lock(mutex);
204 }
205 
206 void qemu_sem_init(QemuSemaphore *sem, int init)
207 {
208     /* Manual reset.  */
209     sem->sema = CreateSemaphore(NULL, init, LONG_MAX, NULL);
210 }
211 
212 void qemu_sem_destroy(QemuSemaphore *sem)
213 {
214     CloseHandle(sem->sema);
215 }
216 
217 void qemu_sem_post(QemuSemaphore *sem)
218 {
219     ReleaseSemaphore(sem->sema, 1, NULL);
220 }
221 
222 int qemu_sem_timedwait(QemuSemaphore *sem, int ms)
223 {
224     int rc = WaitForSingleObject(sem->sema, ms);
225     if (rc == WAIT_OBJECT_0) {
226         return 0;
227     }
228     if (rc != WAIT_TIMEOUT) {
229         error_exit(GetLastError(), __func__);
230     }
231     return -1;
232 }
233 
234 void qemu_sem_wait(QemuSemaphore *sem)
235 {
236     if (WaitForSingleObject(sem->sema, INFINITE) != WAIT_OBJECT_0) {
237         error_exit(GetLastError(), __func__);
238     }
239 }
240 
241 /* Wrap a Win32 manual-reset event with a fast userspace path.  The idea
242  * is to reset the Win32 event lazily, as part of a test-reset-test-wait
243  * sequence.  Such a sequence is, indeed, how QemuEvents are used by
244  * RCU and other subsystems!
245  *
246  * Valid transitions:
247  * - free->set, when setting the event
248  * - busy->set, when setting the event, followed by futex_wake
249  * - set->free, when resetting the event
250  * - free->busy, when waiting
251  *
252  * set->busy does not happen (it can be observed from the outside but
253  * it really is set->free->busy).
254  *
255  * busy->free provably cannot happen; to enforce it, the set->free transition
256  * is done with an OR, which becomes a no-op if the event has concurrently
257  * transitioned to free or busy (and is faster than cmpxchg).
258  */
259 
260 #define EV_SET         0
261 #define EV_FREE        1
262 #define EV_BUSY       -1
263 
264 void qemu_event_init(QemuEvent *ev, bool init)
265 {
266     /* Manual reset.  */
267     ev->event = CreateEvent(NULL, TRUE, TRUE, NULL);
268     ev->value = (init ? EV_SET : EV_FREE);
269 }
270 
271 void qemu_event_destroy(QemuEvent *ev)
272 {
273     CloseHandle(ev->event);
274 }
275 
276 void qemu_event_set(QemuEvent *ev)
277 {
278     if (atomic_mb_read(&ev->value) != EV_SET) {
279         if (atomic_xchg(&ev->value, EV_SET) == EV_BUSY) {
280             /* There were waiters, wake them up.  */
281             SetEvent(ev->event);
282         }
283     }
284 }
285 
286 void qemu_event_reset(QemuEvent *ev)
287 {
288     if (atomic_mb_read(&ev->value) == EV_SET) {
289         /* If there was a concurrent reset (or even reset+wait),
290          * do nothing.  Otherwise change EV_SET->EV_FREE.
291          */
292         atomic_or(&ev->value, EV_FREE);
293     }
294 }
295 
296 void qemu_event_wait(QemuEvent *ev)
297 {
298     unsigned value;
299 
300     value = atomic_mb_read(&ev->value);
301     if (value != EV_SET) {
302         if (value == EV_FREE) {
303             /* qemu_event_set is not yet going to call SetEvent, but we are
304              * going to do another check for EV_SET below when setting EV_BUSY.
305              * At that point it is safe to call WaitForSingleObject.
306              */
307             ResetEvent(ev->event);
308 
309             /* Tell qemu_event_set that there are waiters.  No need to retry
310              * because there cannot be a concurent busy->free transition.
311              * After the CAS, the event will be either set or busy.
312              */
313             if (atomic_cmpxchg(&ev->value, EV_FREE, EV_BUSY) == EV_SET) {
314                 value = EV_SET;
315             } else {
316                 value = EV_BUSY;
317             }
318         }
319         if (value == EV_BUSY) {
320             WaitForSingleObject(ev->event, INFINITE);
321         }
322     }
323 }
324 
325 struct QemuThreadData {
326     /* Passed to win32_start_routine.  */
327     void             *(*start_routine)(void *);
328     void             *arg;
329     short             mode;
330     NotifierList      exit;
331 
332     /* Only used for joinable threads. */
333     bool              exited;
334     void             *ret;
335     CRITICAL_SECTION  cs;
336 };
337 
338 static bool atexit_registered;
339 static NotifierList main_thread_exit;
340 
341 static __thread QemuThreadData *qemu_thread_data;
342 
343 static void run_main_thread_exit(void)
344 {
345     notifier_list_notify(&main_thread_exit, NULL);
346 }
347 
348 void qemu_thread_atexit_add(Notifier *notifier)
349 {
350     if (!qemu_thread_data) {
351         if (!atexit_registered) {
352             atexit_registered = true;
353             atexit(run_main_thread_exit);
354         }
355         notifier_list_add(&main_thread_exit, notifier);
356     } else {
357         notifier_list_add(&qemu_thread_data->exit, notifier);
358     }
359 }
360 
361 void qemu_thread_atexit_remove(Notifier *notifier)
362 {
363     notifier_remove(notifier);
364 }
365 
366 static unsigned __stdcall win32_start_routine(void *arg)
367 {
368     QemuThreadData *data = (QemuThreadData *) arg;
369     void *(*start_routine)(void *) = data->start_routine;
370     void *thread_arg = data->arg;
371 
372     qemu_thread_data = data;
373     qemu_thread_exit(start_routine(thread_arg));
374     abort();
375 }
376 
377 void qemu_thread_exit(void *arg)
378 {
379     QemuThreadData *data = qemu_thread_data;
380 
381     notifier_list_notify(&data->exit, NULL);
382     if (data->mode == QEMU_THREAD_JOINABLE) {
383         data->ret = arg;
384         EnterCriticalSection(&data->cs);
385         data->exited = true;
386         LeaveCriticalSection(&data->cs);
387     } else {
388         g_free(data);
389     }
390     _endthreadex(0);
391 }
392 
393 void *qemu_thread_join(QemuThread *thread)
394 {
395     QemuThreadData *data;
396     void *ret;
397     HANDLE handle;
398 
399     data = thread->data;
400     if (data->mode == QEMU_THREAD_DETACHED) {
401         return NULL;
402     }
403 
404     /*
405      * Because multiple copies of the QemuThread can exist via
406      * qemu_thread_get_self, we need to store a value that cannot
407      * leak there.  The simplest, non racy way is to store the TID,
408      * discard the handle that _beginthreadex gives back, and
409      * get another copy of the handle here.
410      */
411     handle = qemu_thread_get_handle(thread);
412     if (handle) {
413         WaitForSingleObject(handle, INFINITE);
414         CloseHandle(handle);
415     }
416     ret = data->ret;
417     DeleteCriticalSection(&data->cs);
418     g_free(data);
419     return ret;
420 }
421 
422 void qemu_thread_create(QemuThread *thread, const char *name,
423                        void *(*start_routine)(void *),
424                        void *arg, int mode)
425 {
426     HANDLE hThread;
427     struct QemuThreadData *data;
428 
429     data = g_malloc(sizeof *data);
430     data->start_routine = start_routine;
431     data->arg = arg;
432     data->mode = mode;
433     data->exited = false;
434     notifier_list_init(&data->exit);
435 
436     if (data->mode != QEMU_THREAD_DETACHED) {
437         InitializeCriticalSection(&data->cs);
438     }
439 
440     hThread = (HANDLE) _beginthreadex(NULL, 0, win32_start_routine,
441                                       data, 0, &thread->tid);
442     if (!hThread) {
443         error_exit(GetLastError(), __func__);
444     }
445     CloseHandle(hThread);
446     thread->data = data;
447 }
448 
449 void qemu_thread_get_self(QemuThread *thread)
450 {
451     thread->data = qemu_thread_data;
452     thread->tid = GetCurrentThreadId();
453 }
454 
455 HANDLE qemu_thread_get_handle(QemuThread *thread)
456 {
457     QemuThreadData *data;
458     HANDLE handle;
459 
460     data = thread->data;
461     if (data->mode == QEMU_THREAD_DETACHED) {
462         return NULL;
463     }
464 
465     EnterCriticalSection(&data->cs);
466     if (!data->exited) {
467         handle = OpenThread(SYNCHRONIZE | THREAD_SUSPEND_RESUME, FALSE,
468                             thread->tid);
469     } else {
470         handle = NULL;
471     }
472     LeaveCriticalSection(&data->cs);
473     return handle;
474 }
475 
476 bool qemu_thread_is_self(QemuThread *thread)
477 {
478     return GetCurrentThreadId() == thread->tid;
479 }
480