1 /* 2 * Win32 implementation for mutex/cond/thread functions 3 * 4 * Copyright Red Hat, Inc. 2010 5 * 6 * Author: 7 * Paolo Bonzini <pbonzini@redhat.com> 8 * 9 * This work is licensed under the terms of the GNU GPL, version 2 or later. 10 * See the COPYING file in the top-level directory. 11 * 12 */ 13 #include "qemu-common.h" 14 #include "qemu/thread.h" 15 #include <process.h> 16 #include <assert.h> 17 #include <limits.h> 18 19 static void error_exit(int err, const char *msg) 20 { 21 char *pstr; 22 23 FormatMessage(FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_ALLOCATE_BUFFER, 24 NULL, err, 0, (LPTSTR)&pstr, 2, NULL); 25 fprintf(stderr, "qemu: %s: %s\n", msg, pstr); 26 LocalFree(pstr); 27 abort(); 28 } 29 30 void qemu_mutex_init(QemuMutex *mutex) 31 { 32 mutex->owner = 0; 33 InitializeCriticalSection(&mutex->lock); 34 } 35 36 void qemu_mutex_destroy(QemuMutex *mutex) 37 { 38 assert(mutex->owner == 0); 39 DeleteCriticalSection(&mutex->lock); 40 } 41 42 void qemu_mutex_lock(QemuMutex *mutex) 43 { 44 EnterCriticalSection(&mutex->lock); 45 46 /* Win32 CRITICAL_SECTIONs are recursive. Assert that we're not 47 * using them as such. 48 */ 49 assert(mutex->owner == 0); 50 mutex->owner = GetCurrentThreadId(); 51 } 52 53 int qemu_mutex_trylock(QemuMutex *mutex) 54 { 55 int owned; 56 57 owned = TryEnterCriticalSection(&mutex->lock); 58 if (owned) { 59 assert(mutex->owner == 0); 60 mutex->owner = GetCurrentThreadId(); 61 } 62 return !owned; 63 } 64 65 void qemu_mutex_unlock(QemuMutex *mutex) 66 { 67 assert(mutex->owner == GetCurrentThreadId()); 68 mutex->owner = 0; 69 LeaveCriticalSection(&mutex->lock); 70 } 71 72 void qemu_cond_init(QemuCond *cond) 73 { 74 memset(cond, 0, sizeof(*cond)); 75 76 cond->sema = CreateSemaphore(NULL, 0, LONG_MAX, NULL); 77 if (!cond->sema) { 78 error_exit(GetLastError(), __func__); 79 } 80 cond->continue_event = CreateEvent(NULL, /* security */ 81 FALSE, /* auto-reset */ 82 FALSE, /* not signaled */ 83 NULL); /* name */ 84 if (!cond->continue_event) { 85 error_exit(GetLastError(), __func__); 86 } 87 } 88 89 void qemu_cond_destroy(QemuCond *cond) 90 { 91 BOOL result; 92 result = CloseHandle(cond->continue_event); 93 if (!result) { 94 error_exit(GetLastError(), __func__); 95 } 96 cond->continue_event = 0; 97 result = CloseHandle(cond->sema); 98 if (!result) { 99 error_exit(GetLastError(), __func__); 100 } 101 cond->sema = 0; 102 } 103 104 void qemu_cond_signal(QemuCond *cond) 105 { 106 DWORD result; 107 108 /* 109 * Signal only when there are waiters. cond->waiters is 110 * incremented by pthread_cond_wait under the external lock, 111 * so we are safe about that. 112 */ 113 if (cond->waiters == 0) { 114 return; 115 } 116 117 /* 118 * Waiting threads decrement it outside the external lock, but 119 * only if another thread is executing pthread_cond_broadcast and 120 * has the mutex. So, it also cannot be decremented concurrently 121 * with this particular access. 122 */ 123 cond->target = cond->waiters - 1; 124 result = SignalObjectAndWait(cond->sema, cond->continue_event, 125 INFINITE, FALSE); 126 if (result == WAIT_ABANDONED || result == WAIT_FAILED) { 127 error_exit(GetLastError(), __func__); 128 } 129 } 130 131 void qemu_cond_broadcast(QemuCond *cond) 132 { 133 BOOLEAN result; 134 /* 135 * As in pthread_cond_signal, access to cond->waiters and 136 * cond->target is locked via the external mutex. 137 */ 138 if (cond->waiters == 0) { 139 return; 140 } 141 142 cond->target = 0; 143 result = ReleaseSemaphore(cond->sema, cond->waiters, NULL); 144 if (!result) { 145 error_exit(GetLastError(), __func__); 146 } 147 148 /* 149 * At this point all waiters continue. Each one takes its 150 * slice of the semaphore. Now it's our turn to wait: Since 151 * the external mutex is held, no thread can leave cond_wait, 152 * yet. For this reason, we can be sure that no thread gets 153 * a chance to eat *more* than one slice. OTOH, it means 154 * that the last waiter must send us a wake-up. 155 */ 156 WaitForSingleObject(cond->continue_event, INFINITE); 157 } 158 159 void qemu_cond_wait(QemuCond *cond, QemuMutex *mutex) 160 { 161 /* 162 * This access is protected under the mutex. 163 */ 164 cond->waiters++; 165 166 /* 167 * Unlock external mutex and wait for signal. 168 * NOTE: we've held mutex locked long enough to increment 169 * waiters count above, so there's no problem with 170 * leaving mutex unlocked before we wait on semaphore. 171 */ 172 qemu_mutex_unlock(mutex); 173 WaitForSingleObject(cond->sema, INFINITE); 174 175 /* Now waiters must rendez-vous with the signaling thread and 176 * let it continue. For cond_broadcast this has heavy contention 177 * and triggers thundering herd. So goes life. 178 * 179 * Decrease waiters count. The mutex is not taken, so we have 180 * to do this atomically. 181 * 182 * All waiters contend for the mutex at the end of this function 183 * until the signaling thread relinquishes it. To ensure 184 * each waiter consumes exactly one slice of the semaphore, 185 * the signaling thread stops until it is told by the last 186 * waiter that it can go on. 187 */ 188 if (InterlockedDecrement(&cond->waiters) == cond->target) { 189 SetEvent(cond->continue_event); 190 } 191 192 qemu_mutex_lock(mutex); 193 } 194 195 void qemu_sem_init(QemuSemaphore *sem, int init) 196 { 197 /* Manual reset. */ 198 sem->sema = CreateSemaphore(NULL, init, LONG_MAX, NULL); 199 } 200 201 void qemu_sem_destroy(QemuSemaphore *sem) 202 { 203 CloseHandle(sem->sema); 204 } 205 206 void qemu_sem_post(QemuSemaphore *sem) 207 { 208 ReleaseSemaphore(sem->sema, 1, NULL); 209 } 210 211 int qemu_sem_timedwait(QemuSemaphore *sem, int ms) 212 { 213 int rc = WaitForSingleObject(sem->sema, ms); 214 if (rc == WAIT_OBJECT_0) { 215 return 0; 216 } 217 if (rc != WAIT_TIMEOUT) { 218 error_exit(GetLastError(), __func__); 219 } 220 return -1; 221 } 222 223 void qemu_sem_wait(QemuSemaphore *sem) 224 { 225 if (WaitForSingleObject(sem->sema, INFINITE) != WAIT_OBJECT_0) { 226 error_exit(GetLastError(), __func__); 227 } 228 } 229 230 struct QemuThreadData { 231 /* Passed to win32_start_routine. */ 232 void *(*start_routine)(void *); 233 void *arg; 234 short mode; 235 236 /* Only used for joinable threads. */ 237 bool exited; 238 void *ret; 239 CRITICAL_SECTION cs; 240 }; 241 242 static __thread QemuThreadData *qemu_thread_data; 243 244 static unsigned __stdcall win32_start_routine(void *arg) 245 { 246 QemuThreadData *data = (QemuThreadData *) arg; 247 void *(*start_routine)(void *) = data->start_routine; 248 void *thread_arg = data->arg; 249 250 if (data->mode == QEMU_THREAD_DETACHED) { 251 g_free(data); 252 data = NULL; 253 } 254 qemu_thread_data = data; 255 qemu_thread_exit(start_routine(thread_arg)); 256 abort(); 257 } 258 259 void qemu_thread_exit(void *arg) 260 { 261 QemuThreadData *data = qemu_thread_data; 262 263 if (data) { 264 assert(data->mode != QEMU_THREAD_DETACHED); 265 data->ret = arg; 266 EnterCriticalSection(&data->cs); 267 data->exited = true; 268 LeaveCriticalSection(&data->cs); 269 } 270 _endthreadex(0); 271 } 272 273 void *qemu_thread_join(QemuThread *thread) 274 { 275 QemuThreadData *data; 276 void *ret; 277 HANDLE handle; 278 279 data = thread->data; 280 if (!data) { 281 return NULL; 282 } 283 /* 284 * Because multiple copies of the QemuThread can exist via 285 * qemu_thread_get_self, we need to store a value that cannot 286 * leak there. The simplest, non racy way is to store the TID, 287 * discard the handle that _beginthreadex gives back, and 288 * get another copy of the handle here. 289 */ 290 handle = qemu_thread_get_handle(thread); 291 if (handle) { 292 WaitForSingleObject(handle, INFINITE); 293 CloseHandle(handle); 294 } 295 ret = data->ret; 296 assert(data->mode != QEMU_THREAD_DETACHED); 297 DeleteCriticalSection(&data->cs); 298 g_free(data); 299 return ret; 300 } 301 302 void qemu_thread_create(QemuThread *thread, 303 void *(*start_routine)(void *), 304 void *arg, int mode) 305 { 306 HANDLE hThread; 307 struct QemuThreadData *data; 308 309 data = g_malloc(sizeof *data); 310 data->start_routine = start_routine; 311 data->arg = arg; 312 data->mode = mode; 313 data->exited = false; 314 315 if (data->mode != QEMU_THREAD_DETACHED) { 316 InitializeCriticalSection(&data->cs); 317 } 318 319 hThread = (HANDLE) _beginthreadex(NULL, 0, win32_start_routine, 320 data, 0, &thread->tid); 321 if (!hThread) { 322 error_exit(GetLastError(), __func__); 323 } 324 CloseHandle(hThread); 325 thread->data = (mode == QEMU_THREAD_DETACHED) ? NULL : data; 326 } 327 328 void qemu_thread_get_self(QemuThread *thread) 329 { 330 thread->data = qemu_thread_data; 331 thread->tid = GetCurrentThreadId(); 332 } 333 334 HANDLE qemu_thread_get_handle(QemuThread *thread) 335 { 336 QemuThreadData *data; 337 HANDLE handle; 338 339 data = thread->data; 340 if (!data) { 341 return NULL; 342 } 343 344 assert(data->mode != QEMU_THREAD_DETACHED); 345 EnterCriticalSection(&data->cs); 346 if (!data->exited) { 347 handle = OpenThread(SYNCHRONIZE | THREAD_SUSPEND_RESUME, FALSE, 348 thread->tid); 349 } else { 350 handle = NULL; 351 } 352 LeaveCriticalSection(&data->cs); 353 return handle; 354 } 355 356 bool qemu_thread_is_self(QemuThread *thread) 357 { 358 return GetCurrentThreadId() == thread->tid; 359 } 360