1 /* 2 * os-win32.c 3 * 4 * Copyright (c) 2003-2008 Fabrice Bellard 5 * Copyright (c) 2010-2016 Red Hat, Inc. 6 * 7 * QEMU library functions for win32 which are shared between QEMU and 8 * the QEMU tools. 9 * 10 * Permission is hereby granted, free of charge, to any person obtaining a copy 11 * of this software and associated documentation files (the "Software"), to deal 12 * in the Software without restriction, including without limitation the rights 13 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 14 * copies of the Software, and to permit persons to whom the Software is 15 * furnished to do so, subject to the following conditions: 16 * 17 * The above copyright notice and this permission notice shall be included in 18 * all copies or substantial portions of the Software. 19 * 20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 23 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 24 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 25 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 26 * THE SOFTWARE. 27 * 28 * The implementation of g_poll (functions poll_rest, g_poll) at the end of 29 * this file are based on code from GNOME glib-2 and use a different license, 30 * see the license comment there. 31 */ 32 #include "qemu/osdep.h" 33 #include <windows.h> 34 #include "qapi/error.h" 35 #include "sysemu/sysemu.h" 36 #include "qemu/main-loop.h" 37 #include "trace.h" 38 #include "qemu/sockets.h" 39 #include "qemu/cutils.h" 40 41 /* this must come after including "trace.h" */ 42 #include <shlobj.h> 43 44 void *qemu_oom_check(void *ptr) 45 { 46 if (ptr == NULL) { 47 fprintf(stderr, "Failed to allocate memory: %lu\n", GetLastError()); 48 abort(); 49 } 50 return ptr; 51 } 52 53 void *qemu_try_memalign(size_t alignment, size_t size) 54 { 55 void *ptr; 56 57 if (!size) { 58 abort(); 59 } 60 ptr = VirtualAlloc(NULL, size, MEM_COMMIT, PAGE_READWRITE); 61 trace_qemu_memalign(alignment, size, ptr); 62 return ptr; 63 } 64 65 void *qemu_memalign(size_t alignment, size_t size) 66 { 67 return qemu_oom_check(qemu_try_memalign(alignment, size)); 68 } 69 70 static int get_allocation_granularity(void) 71 { 72 SYSTEM_INFO system_info; 73 74 GetSystemInfo(&system_info); 75 return system_info.dwAllocationGranularity; 76 } 77 78 void *qemu_anon_ram_alloc(size_t size, uint64_t *align, bool shared) 79 { 80 void *ptr; 81 82 ptr = VirtualAlloc(NULL, size, MEM_COMMIT, PAGE_READWRITE); 83 trace_qemu_anon_ram_alloc(size, ptr); 84 85 if (ptr && align) { 86 *align = MAX(get_allocation_granularity(), getpagesize()); 87 } 88 return ptr; 89 } 90 91 void qemu_vfree(void *ptr) 92 { 93 trace_qemu_vfree(ptr); 94 if (ptr) { 95 VirtualFree(ptr, 0, MEM_RELEASE); 96 } 97 } 98 99 void qemu_anon_ram_free(void *ptr, size_t size) 100 { 101 trace_qemu_anon_ram_free(ptr, size); 102 if (ptr) { 103 VirtualFree(ptr, 0, MEM_RELEASE); 104 } 105 } 106 107 #ifndef CONFIG_LOCALTIME_R 108 /* FIXME: add proper locking */ 109 struct tm *gmtime_r(const time_t *timep, struct tm *result) 110 { 111 struct tm *p = gmtime(timep); 112 memset(result, 0, sizeof(*result)); 113 if (p) { 114 *result = *p; 115 p = result; 116 } 117 return p; 118 } 119 120 /* FIXME: add proper locking */ 121 struct tm *localtime_r(const time_t *timep, struct tm *result) 122 { 123 struct tm *p = localtime(timep); 124 memset(result, 0, sizeof(*result)); 125 if (p) { 126 *result = *p; 127 p = result; 128 } 129 return p; 130 } 131 #endif /* CONFIG_LOCALTIME_R */ 132 133 void qemu_set_block(int fd) 134 { 135 unsigned long opt = 0; 136 WSAEventSelect(fd, NULL, 0); 137 ioctlsocket(fd, FIONBIO, &opt); 138 } 139 140 void qemu_set_nonblock(int fd) 141 { 142 unsigned long opt = 1; 143 ioctlsocket(fd, FIONBIO, &opt); 144 qemu_fd_register(fd); 145 } 146 147 int socket_set_fast_reuse(int fd) 148 { 149 /* Enabling the reuse of an endpoint that was used by a socket still in 150 * TIME_WAIT state is usually performed by setting SO_REUSEADDR. On Windows 151 * fast reuse is the default and SO_REUSEADDR does strange things. So we 152 * don't have to do anything here. More info can be found at: 153 * http://msdn.microsoft.com/en-us/library/windows/desktop/ms740621.aspx */ 154 return 0; 155 } 156 157 158 static int socket_error(void) 159 { 160 switch (WSAGetLastError()) { 161 case 0: 162 return 0; 163 case WSAEINTR: 164 return EINTR; 165 case WSAEINVAL: 166 return EINVAL; 167 case WSA_INVALID_HANDLE: 168 return EBADF; 169 case WSA_NOT_ENOUGH_MEMORY: 170 return ENOMEM; 171 case WSA_INVALID_PARAMETER: 172 return EINVAL; 173 case WSAENAMETOOLONG: 174 return ENAMETOOLONG; 175 case WSAENOTEMPTY: 176 return ENOTEMPTY; 177 case WSAEWOULDBLOCK: 178 /* not using EWOULDBLOCK as we don't want code to have 179 * to check both EWOULDBLOCK and EAGAIN */ 180 return EAGAIN; 181 case WSAEINPROGRESS: 182 return EINPROGRESS; 183 case WSAEALREADY: 184 return EALREADY; 185 case WSAENOTSOCK: 186 return ENOTSOCK; 187 case WSAEDESTADDRREQ: 188 return EDESTADDRREQ; 189 case WSAEMSGSIZE: 190 return EMSGSIZE; 191 case WSAEPROTOTYPE: 192 return EPROTOTYPE; 193 case WSAENOPROTOOPT: 194 return ENOPROTOOPT; 195 case WSAEPROTONOSUPPORT: 196 return EPROTONOSUPPORT; 197 case WSAEOPNOTSUPP: 198 return EOPNOTSUPP; 199 case WSAEAFNOSUPPORT: 200 return EAFNOSUPPORT; 201 case WSAEADDRINUSE: 202 return EADDRINUSE; 203 case WSAEADDRNOTAVAIL: 204 return EADDRNOTAVAIL; 205 case WSAENETDOWN: 206 return ENETDOWN; 207 case WSAENETUNREACH: 208 return ENETUNREACH; 209 case WSAENETRESET: 210 return ENETRESET; 211 case WSAECONNABORTED: 212 return ECONNABORTED; 213 case WSAECONNRESET: 214 return ECONNRESET; 215 case WSAENOBUFS: 216 return ENOBUFS; 217 case WSAEISCONN: 218 return EISCONN; 219 case WSAENOTCONN: 220 return ENOTCONN; 221 case WSAETIMEDOUT: 222 return ETIMEDOUT; 223 case WSAECONNREFUSED: 224 return ECONNREFUSED; 225 case WSAELOOP: 226 return ELOOP; 227 case WSAEHOSTUNREACH: 228 return EHOSTUNREACH; 229 default: 230 return EIO; 231 } 232 } 233 234 int inet_aton(const char *cp, struct in_addr *ia) 235 { 236 uint32_t addr = inet_addr(cp); 237 if (addr == 0xffffffff) { 238 return 0; 239 } 240 ia->s_addr = addr; 241 return 1; 242 } 243 244 void qemu_set_cloexec(int fd) 245 { 246 } 247 248 /* Offset between 1/1/1601 and 1/1/1970 in 100 nanosec units */ 249 #define _W32_FT_OFFSET (116444736000000000ULL) 250 251 int qemu_gettimeofday(qemu_timeval *tp) 252 { 253 union { 254 unsigned long long ns100; /*time since 1 Jan 1601 in 100ns units */ 255 FILETIME ft; 256 } _now; 257 258 if(tp) { 259 GetSystemTimeAsFileTime (&_now.ft); 260 tp->tv_usec=(long)((_now.ns100 / 10ULL) % 1000000ULL ); 261 tp->tv_sec= (long)((_now.ns100 - _W32_FT_OFFSET) / 10000000ULL); 262 } 263 /* Always return 0 as per Open Group Base Specifications Issue 6. 264 Do not set errno on error. */ 265 return 0; 266 } 267 268 int qemu_get_thread_id(void) 269 { 270 return GetCurrentThreadId(); 271 } 272 273 char * 274 qemu_get_local_state_pathname(const char *relative_pathname) 275 { 276 HRESULT result; 277 char base_path[MAX_PATH+1] = ""; 278 279 result = SHGetFolderPath(NULL, CSIDL_COMMON_APPDATA, NULL, 280 /* SHGFP_TYPE_CURRENT */ 0, base_path); 281 if (result != S_OK) { 282 /* misconfigured environment */ 283 g_critical("CSIDL_COMMON_APPDATA unavailable: %ld", (long)result); 284 abort(); 285 } 286 return g_strdup_printf("%s" G_DIR_SEPARATOR_S "%s", base_path, 287 relative_pathname); 288 } 289 290 void qemu_set_tty_echo(int fd, bool echo) 291 { 292 HANDLE handle = (HANDLE)_get_osfhandle(fd); 293 DWORD dwMode = 0; 294 295 if (handle == INVALID_HANDLE_VALUE) { 296 return; 297 } 298 299 GetConsoleMode(handle, &dwMode); 300 301 if (echo) { 302 SetConsoleMode(handle, dwMode | ENABLE_ECHO_INPUT | ENABLE_LINE_INPUT); 303 } else { 304 SetConsoleMode(handle, 305 dwMode & ~(ENABLE_ECHO_INPUT | ENABLE_LINE_INPUT)); 306 } 307 } 308 309 static char exec_dir[PATH_MAX]; 310 311 void qemu_init_exec_dir(const char *argv0) 312 { 313 314 char *p; 315 char buf[MAX_PATH]; 316 DWORD len; 317 318 len = GetModuleFileName(NULL, buf, sizeof(buf) - 1); 319 if (len == 0) { 320 return; 321 } 322 323 buf[len] = 0; 324 p = buf + len - 1; 325 while (p != buf && *p != '\\') { 326 p--; 327 } 328 *p = 0; 329 if (access(buf, R_OK) == 0) { 330 pstrcpy(exec_dir, sizeof(exec_dir), buf); 331 } 332 } 333 334 char *qemu_get_exec_dir(void) 335 { 336 return g_strdup(exec_dir); 337 } 338 339 #if !GLIB_CHECK_VERSION(2, 50, 0) 340 /* 341 * The original implementation of g_poll from glib has a problem on Windows 342 * when using timeouts < 10 ms. 343 * 344 * Whenever g_poll is called with timeout < 10 ms, it does a quick poll instead 345 * of wait. This causes significant performance degradation of QEMU. 346 * 347 * The following code is a copy of the original code from glib/gpoll.c 348 * (glib commit 20f4d1820b8d4d0fc4447188e33efffd6d4a88d8 from 2014-02-19). 349 * Some debug code was removed and the code was reformatted. 350 * All other code modifications are marked with 'QEMU'. 351 */ 352 353 /* 354 * gpoll.c: poll(2) abstraction 355 * Copyright 1998 Owen Taylor 356 * Copyright 2008 Red Hat, Inc. 357 * 358 * This library is free software; you can redistribute it and/or 359 * modify it under the terms of the GNU Lesser General Public 360 * License as published by the Free Software Foundation; either 361 * version 2 of the License, or (at your option) any later version. 362 * 363 * This library is distributed in the hope that it will be useful, 364 * but WITHOUT ANY WARRANTY; without even the implied warranty of 365 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 366 * Lesser General Public License for more details. 367 * 368 * You should have received a copy of the GNU Lesser General Public 369 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 370 */ 371 372 static int poll_rest(gboolean poll_msgs, HANDLE *handles, gint nhandles, 373 GPollFD *fds, guint nfds, gint timeout) 374 { 375 DWORD ready; 376 GPollFD *f; 377 int recursed_result; 378 379 if (poll_msgs) { 380 /* Wait for either messages or handles 381 * -> Use MsgWaitForMultipleObjectsEx 382 */ 383 ready = MsgWaitForMultipleObjectsEx(nhandles, handles, timeout, 384 QS_ALLINPUT, MWMO_ALERTABLE); 385 386 if (ready == WAIT_FAILED) { 387 gchar *emsg = g_win32_error_message(GetLastError()); 388 g_warning("MsgWaitForMultipleObjectsEx failed: %s", emsg); 389 g_free(emsg); 390 } 391 } else if (nhandles == 0) { 392 /* No handles to wait for, just the timeout */ 393 if (timeout == INFINITE) { 394 ready = WAIT_FAILED; 395 } else { 396 SleepEx(timeout, TRUE); 397 ready = WAIT_TIMEOUT; 398 } 399 } else { 400 /* Wait for just handles 401 * -> Use WaitForMultipleObjectsEx 402 */ 403 ready = 404 WaitForMultipleObjectsEx(nhandles, handles, FALSE, timeout, TRUE); 405 if (ready == WAIT_FAILED) { 406 gchar *emsg = g_win32_error_message(GetLastError()); 407 g_warning("WaitForMultipleObjectsEx failed: %s", emsg); 408 g_free(emsg); 409 } 410 } 411 412 if (ready == WAIT_FAILED) { 413 return -1; 414 } else if (ready == WAIT_TIMEOUT || ready == WAIT_IO_COMPLETION) { 415 return 0; 416 } else if (poll_msgs && ready == WAIT_OBJECT_0 + nhandles) { 417 for (f = fds; f < &fds[nfds]; ++f) { 418 if (f->fd == G_WIN32_MSG_HANDLE && f->events & G_IO_IN) { 419 f->revents |= G_IO_IN; 420 } 421 } 422 423 /* If we have a timeout, or no handles to poll, be satisfied 424 * with just noticing we have messages waiting. 425 */ 426 if (timeout != 0 || nhandles == 0) { 427 return 1; 428 } 429 430 /* If no timeout and handles to poll, recurse to poll them, 431 * too. 432 */ 433 recursed_result = poll_rest(FALSE, handles, nhandles, fds, nfds, 0); 434 return (recursed_result == -1) ? -1 : 1 + recursed_result; 435 } else if (/* QEMU: removed the following unneeded statement which causes 436 * a compiler warning: ready >= WAIT_OBJECT_0 && */ 437 ready < WAIT_OBJECT_0 + nhandles) { 438 for (f = fds; f < &fds[nfds]; ++f) { 439 if ((HANDLE) f->fd == handles[ready - WAIT_OBJECT_0]) { 440 f->revents = f->events; 441 } 442 } 443 444 /* If no timeout and polling several handles, recurse to poll 445 * the rest of them. 446 */ 447 if (timeout == 0 && nhandles > 1) { 448 /* Remove the handle that fired */ 449 int i; 450 for (i = ready - WAIT_OBJECT_0 + 1; i < nhandles; i++) { 451 handles[i-1] = handles[i]; 452 } 453 nhandles--; 454 recursed_result = poll_rest(FALSE, handles, nhandles, fds, nfds, 0); 455 return (recursed_result == -1) ? -1 : 1 + recursed_result; 456 } 457 return 1; 458 } 459 460 return 0; 461 } 462 463 gint g_poll(GPollFD *fds, guint nfds, gint timeout) 464 { 465 HANDLE handles[MAXIMUM_WAIT_OBJECTS]; 466 gboolean poll_msgs = FALSE; 467 GPollFD *f; 468 gint nhandles = 0; 469 int retval; 470 471 for (f = fds; f < &fds[nfds]; ++f) { 472 if (f->fd == G_WIN32_MSG_HANDLE && (f->events & G_IO_IN)) { 473 poll_msgs = TRUE; 474 } else if (f->fd > 0) { 475 /* Don't add the same handle several times into the array, as 476 * docs say that is not allowed, even if it actually does seem 477 * to work. 478 */ 479 gint i; 480 481 for (i = 0; i < nhandles; i++) { 482 if (handles[i] == (HANDLE) f->fd) { 483 break; 484 } 485 } 486 487 if (i == nhandles) { 488 if (nhandles == MAXIMUM_WAIT_OBJECTS) { 489 g_warning("Too many handles to wait for!\n"); 490 break; 491 } else { 492 handles[nhandles++] = (HANDLE) f->fd; 493 } 494 } 495 } 496 } 497 498 for (f = fds; f < &fds[nfds]; ++f) { 499 f->revents = 0; 500 } 501 502 if (timeout == -1) { 503 timeout = INFINITE; 504 } 505 506 /* Polling for several things? */ 507 if (nhandles > 1 || (nhandles > 0 && poll_msgs)) { 508 /* First check if one or several of them are immediately 509 * available 510 */ 511 retval = poll_rest(poll_msgs, handles, nhandles, fds, nfds, 0); 512 513 /* If not, and we have a significant timeout, poll again with 514 * timeout then. Note that this will return indication for only 515 * one event, or only for messages. We ignore timeouts less than 516 * ten milliseconds as they are mostly pointless on Windows, the 517 * MsgWaitForMultipleObjectsEx() call will timeout right away 518 * anyway. 519 * 520 * Modification for QEMU: replaced timeout >= 10 by timeout > 0. 521 */ 522 if (retval == 0 && (timeout == INFINITE || timeout > 0)) { 523 retval = poll_rest(poll_msgs, handles, nhandles, 524 fds, nfds, timeout); 525 } 526 } else { 527 /* Just polling for one thing, so no need to check first if 528 * available immediately 529 */ 530 retval = poll_rest(poll_msgs, handles, nhandles, fds, nfds, timeout); 531 } 532 533 if (retval == -1) { 534 for (f = fds; f < &fds[nfds]; ++f) { 535 f->revents = 0; 536 } 537 } 538 539 return retval; 540 } 541 #endif 542 543 int getpagesize(void) 544 { 545 SYSTEM_INFO system_info; 546 547 GetSystemInfo(&system_info); 548 return system_info.dwPageSize; 549 } 550 551 void os_mem_prealloc(int fd, char *area, size_t memory, int smp_cpus, 552 Error **errp) 553 { 554 int i; 555 size_t pagesize = getpagesize(); 556 557 memory = (memory + pagesize - 1) & -pagesize; 558 for (i = 0; i < memory / pagesize; i++) { 559 memset(area + pagesize * i, 0, 1); 560 } 561 } 562 563 uint64_t qemu_get_pmem_size(const char *filename, Error **errp) 564 { 565 error_setg(errp, "pmem support not available"); 566 return 0; 567 } 568 569 char *qemu_get_pid_name(pid_t pid) 570 { 571 /* XXX Implement me */ 572 abort(); 573 } 574 575 576 pid_t qemu_fork(Error **errp) 577 { 578 errno = ENOSYS; 579 error_setg_errno(errp, errno, 580 "cannot fork child process"); 581 return -1; 582 } 583 584 585 #undef connect 586 int qemu_connect_wrap(int sockfd, const struct sockaddr *addr, 587 socklen_t addrlen) 588 { 589 int ret; 590 ret = connect(sockfd, addr, addrlen); 591 if (ret < 0) { 592 errno = socket_error(); 593 } 594 return ret; 595 } 596 597 598 #undef listen 599 int qemu_listen_wrap(int sockfd, int backlog) 600 { 601 int ret; 602 ret = listen(sockfd, backlog); 603 if (ret < 0) { 604 errno = socket_error(); 605 } 606 return ret; 607 } 608 609 610 #undef bind 611 int qemu_bind_wrap(int sockfd, const struct sockaddr *addr, 612 socklen_t addrlen) 613 { 614 int ret; 615 ret = bind(sockfd, addr, addrlen); 616 if (ret < 0) { 617 errno = socket_error(); 618 } 619 return ret; 620 } 621 622 623 #undef socket 624 int qemu_socket_wrap(int domain, int type, int protocol) 625 { 626 int ret; 627 ret = socket(domain, type, protocol); 628 if (ret < 0) { 629 errno = socket_error(); 630 } 631 return ret; 632 } 633 634 635 #undef accept 636 int qemu_accept_wrap(int sockfd, struct sockaddr *addr, 637 socklen_t *addrlen) 638 { 639 int ret; 640 ret = accept(sockfd, addr, addrlen); 641 if (ret < 0) { 642 errno = socket_error(); 643 } 644 return ret; 645 } 646 647 648 #undef shutdown 649 int qemu_shutdown_wrap(int sockfd, int how) 650 { 651 int ret; 652 ret = shutdown(sockfd, how); 653 if (ret < 0) { 654 errno = socket_error(); 655 } 656 return ret; 657 } 658 659 660 #undef ioctlsocket 661 int qemu_ioctlsocket_wrap(int fd, int req, void *val) 662 { 663 int ret; 664 ret = ioctlsocket(fd, req, val); 665 if (ret < 0) { 666 errno = socket_error(); 667 } 668 return ret; 669 } 670 671 672 #undef closesocket 673 int qemu_closesocket_wrap(int fd) 674 { 675 int ret; 676 ret = closesocket(fd); 677 if (ret < 0) { 678 errno = socket_error(); 679 } 680 return ret; 681 } 682 683 684 #undef getsockopt 685 int qemu_getsockopt_wrap(int sockfd, int level, int optname, 686 void *optval, socklen_t *optlen) 687 { 688 int ret; 689 ret = getsockopt(sockfd, level, optname, optval, optlen); 690 if (ret < 0) { 691 errno = socket_error(); 692 } 693 return ret; 694 } 695 696 697 #undef setsockopt 698 int qemu_setsockopt_wrap(int sockfd, int level, int optname, 699 const void *optval, socklen_t optlen) 700 { 701 int ret; 702 ret = setsockopt(sockfd, level, optname, optval, optlen); 703 if (ret < 0) { 704 errno = socket_error(); 705 } 706 return ret; 707 } 708 709 710 #undef getpeername 711 int qemu_getpeername_wrap(int sockfd, struct sockaddr *addr, 712 socklen_t *addrlen) 713 { 714 int ret; 715 ret = getpeername(sockfd, addr, addrlen); 716 if (ret < 0) { 717 errno = socket_error(); 718 } 719 return ret; 720 } 721 722 723 #undef getsockname 724 int qemu_getsockname_wrap(int sockfd, struct sockaddr *addr, 725 socklen_t *addrlen) 726 { 727 int ret; 728 ret = getsockname(sockfd, addr, addrlen); 729 if (ret < 0) { 730 errno = socket_error(); 731 } 732 return ret; 733 } 734 735 736 #undef send 737 ssize_t qemu_send_wrap(int sockfd, const void *buf, size_t len, int flags) 738 { 739 int ret; 740 ret = send(sockfd, buf, len, flags); 741 if (ret < 0) { 742 errno = socket_error(); 743 } 744 return ret; 745 } 746 747 748 #undef sendto 749 ssize_t qemu_sendto_wrap(int sockfd, const void *buf, size_t len, int flags, 750 const struct sockaddr *addr, socklen_t addrlen) 751 { 752 int ret; 753 ret = sendto(sockfd, buf, len, flags, addr, addrlen); 754 if (ret < 0) { 755 errno = socket_error(); 756 } 757 return ret; 758 } 759 760 761 #undef recv 762 ssize_t qemu_recv_wrap(int sockfd, void *buf, size_t len, int flags) 763 { 764 int ret; 765 ret = recv(sockfd, buf, len, flags); 766 if (ret < 0) { 767 errno = socket_error(); 768 } 769 return ret; 770 } 771 772 773 #undef recvfrom 774 ssize_t qemu_recvfrom_wrap(int sockfd, void *buf, size_t len, int flags, 775 struct sockaddr *addr, socklen_t *addrlen) 776 { 777 int ret; 778 ret = recvfrom(sockfd, buf, len, flags, addr, addrlen); 779 if (ret < 0) { 780 errno = socket_error(); 781 } 782 return ret; 783 } 784 785 bool qemu_write_pidfile(const char *filename, Error **errp) 786 { 787 char buffer[128]; 788 int len; 789 HANDLE file; 790 OVERLAPPED overlap; 791 BOOL ret; 792 memset(&overlap, 0, sizeof(overlap)); 793 794 file = CreateFile(filename, GENERIC_WRITE, FILE_SHARE_READ, NULL, 795 OPEN_ALWAYS, FILE_ATTRIBUTE_NORMAL, NULL); 796 797 if (file == INVALID_HANDLE_VALUE) { 798 error_setg(errp, "Failed to create PID file"); 799 return false; 800 } 801 len = snprintf(buffer, sizeof(buffer), FMT_pid "\n", (pid_t)getpid()); 802 ret = WriteFile(file, (LPCVOID)buffer, (DWORD)len, 803 NULL, &overlap); 804 CloseHandle(file); 805 if (ret == 0) { 806 error_setg(errp, "Failed to write PID file"); 807 return false; 808 } 809 return true; 810 } 811