1 /* 2 * QEMU System Emulator 3 * 4 * Copyright (c) 2003-2008 Fabrice Bellard 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a copy 7 * of this software and associated documentation files (the "Software"), to deal 8 * in the Software without restriction, including without limitation the rights 9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 * copies of the Software, and to permit persons to whom the Software is 11 * furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 22 * THE SOFTWARE. 23 */ 24 #include "qemu/osdep.h" 25 #include <zlib.h> 26 #include "qemu/madvise.h" 27 #include "qemu/error-report.h" 28 #include "qemu/iov.h" 29 #include "migration.h" 30 #include "migration-stats.h" 31 #include "qemu-file.h" 32 #include "trace.h" 33 #include "options.h" 34 #include "qapi/error.h" 35 #include "rdma.h" 36 #include "io/channel-file.h" 37 38 #define IO_BUF_SIZE 32768 39 #define MAX_IOV_SIZE MIN_CONST(IOV_MAX, 64) 40 41 struct QEMUFile { 42 QIOChannel *ioc; 43 bool is_writable; 44 45 int buf_index; 46 int buf_size; /* 0 when writing */ 47 uint8_t buf[IO_BUF_SIZE]; 48 49 DECLARE_BITMAP(may_free, MAX_IOV_SIZE); 50 struct iovec iov[MAX_IOV_SIZE]; 51 unsigned int iovcnt; 52 53 int last_error; 54 Error *last_error_obj; 55 }; 56 57 /* 58 * Stop a file from being read/written - not all backing files can do this 59 * typically only sockets can. 60 * 61 * TODO: convert to propagate Error objects instead of squashing 62 * to a fixed errno value 63 */ 64 int qemu_file_shutdown(QEMUFile *f) 65 { 66 Error *err = NULL; 67 68 /* 69 * We must set qemufile error before the real shutdown(), otherwise 70 * there can be a race window where we thought IO all went though 71 * (because last_error==NULL) but actually IO has already stopped. 72 * 73 * If without correct ordering, the race can happen like this: 74 * 75 * page receiver other thread 76 * ------------- ------------ 77 * qemu_get_buffer() 78 * do shutdown() 79 * returns 0 (buffer all zero) 80 * (we didn't check this retcode) 81 * try to detect IO error 82 * last_error==NULL, IO okay 83 * install ALL-ZERO page 84 * set last_error 85 * --> guest crash! 86 */ 87 if (!f->last_error) { 88 qemu_file_set_error(f, -EIO); 89 } 90 91 if (!qio_channel_has_feature(f->ioc, 92 QIO_CHANNEL_FEATURE_SHUTDOWN)) { 93 return -ENOSYS; 94 } 95 96 if (qio_channel_shutdown(f->ioc, QIO_CHANNEL_SHUTDOWN_BOTH, &err) < 0) { 97 error_report_err(err); 98 return -EIO; 99 } 100 101 return 0; 102 } 103 104 static QEMUFile *qemu_file_new_impl(QIOChannel *ioc, bool is_writable) 105 { 106 QEMUFile *f; 107 108 f = g_new0(QEMUFile, 1); 109 110 object_ref(ioc); 111 f->ioc = ioc; 112 f->is_writable = is_writable; 113 114 return f; 115 } 116 117 /* 118 * Result: QEMUFile* for a 'return path' for comms in the opposite direction 119 * NULL if not available 120 */ 121 QEMUFile *qemu_file_get_return_path(QEMUFile *f) 122 { 123 return qemu_file_new_impl(f->ioc, !f->is_writable); 124 } 125 126 QEMUFile *qemu_file_new_output(QIOChannel *ioc) 127 { 128 return qemu_file_new_impl(ioc, true); 129 } 130 131 QEMUFile *qemu_file_new_input(QIOChannel *ioc) 132 { 133 return qemu_file_new_impl(ioc, false); 134 } 135 136 /* 137 * Get last error for stream f with optional Error* 138 * 139 * Return negative error value if there has been an error on previous 140 * operations, return 0 if no error happened. 141 * 142 * If errp is specified, a verbose error message will be copied over. 143 */ 144 int qemu_file_get_error_obj(QEMUFile *f, Error **errp) 145 { 146 if (!f->last_error) { 147 return 0; 148 } 149 150 /* There is an error */ 151 if (errp) { 152 if (f->last_error_obj) { 153 *errp = error_copy(f->last_error_obj); 154 } else { 155 error_setg_errno(errp, -f->last_error, "Channel error"); 156 } 157 } 158 159 return f->last_error; 160 } 161 162 /* 163 * Get last error for either stream f1 or f2 with optional Error*. 164 * The error returned (non-zero) can be either from f1 or f2. 165 * 166 * If any of the qemufile* is NULL, then skip the check on that file. 167 * 168 * When there is no error on both qemufile, zero is returned. 169 */ 170 int qemu_file_get_error_obj_any(QEMUFile *f1, QEMUFile *f2, Error **errp) 171 { 172 int ret = 0; 173 174 if (f1) { 175 ret = qemu_file_get_error_obj(f1, errp); 176 /* If there's already error detected, return */ 177 if (ret) { 178 return ret; 179 } 180 } 181 182 if (f2) { 183 ret = qemu_file_get_error_obj(f2, errp); 184 } 185 186 return ret; 187 } 188 189 /* 190 * Set the last error for stream f with optional Error* 191 */ 192 void qemu_file_set_error_obj(QEMUFile *f, int ret, Error *err) 193 { 194 if (f->last_error == 0 && ret) { 195 f->last_error = ret; 196 error_propagate(&f->last_error_obj, err); 197 } else if (err) { 198 error_report_err(err); 199 } 200 } 201 202 /* 203 * Get last error for stream f 204 * 205 * Return negative error value if there has been an error on previous 206 * operations, return 0 if no error happened. 207 * 208 */ 209 int qemu_file_get_error(QEMUFile *f) 210 { 211 return f->last_error; 212 } 213 214 /* 215 * Set the last error for stream f 216 */ 217 void qemu_file_set_error(QEMUFile *f, int ret) 218 { 219 qemu_file_set_error_obj(f, ret, NULL); 220 } 221 222 static bool qemu_file_is_writable(QEMUFile *f) 223 { 224 return f->is_writable; 225 } 226 227 static void qemu_iovec_release_ram(QEMUFile *f) 228 { 229 struct iovec iov; 230 unsigned long idx; 231 232 /* Find and release all the contiguous memory ranges marked as may_free. */ 233 idx = find_next_bit(f->may_free, f->iovcnt, 0); 234 if (idx >= f->iovcnt) { 235 return; 236 } 237 iov = f->iov[idx]; 238 239 /* The madvise() in the loop is called for iov within a continuous range and 240 * then reinitialize the iov. And in the end, madvise() is called for the 241 * last iov. 242 */ 243 while ((idx = find_next_bit(f->may_free, f->iovcnt, idx + 1)) < f->iovcnt) { 244 /* check for adjacent buffer and coalesce them */ 245 if (iov.iov_base + iov.iov_len == f->iov[idx].iov_base) { 246 iov.iov_len += f->iov[idx].iov_len; 247 continue; 248 } 249 if (qemu_madvise(iov.iov_base, iov.iov_len, QEMU_MADV_DONTNEED) < 0) { 250 error_report("migrate: madvise DONTNEED failed %p %zd: %s", 251 iov.iov_base, iov.iov_len, strerror(errno)); 252 } 253 iov = f->iov[idx]; 254 } 255 if (qemu_madvise(iov.iov_base, iov.iov_len, QEMU_MADV_DONTNEED) < 0) { 256 error_report("migrate: madvise DONTNEED failed %p %zd: %s", 257 iov.iov_base, iov.iov_len, strerror(errno)); 258 } 259 memset(f->may_free, 0, sizeof(f->may_free)); 260 } 261 262 bool qemu_file_is_seekable(QEMUFile *f) 263 { 264 return qio_channel_has_feature(f->ioc, QIO_CHANNEL_FEATURE_SEEKABLE); 265 } 266 267 /** 268 * Flushes QEMUFile buffer 269 * 270 * This will flush all pending data. If data was only partially flushed, it 271 * will set an error state. 272 */ 273 int qemu_fflush(QEMUFile *f) 274 { 275 if (!qemu_file_is_writable(f)) { 276 return f->last_error; 277 } 278 279 if (f->last_error) { 280 return f->last_error; 281 } 282 if (f->iovcnt > 0) { 283 Error *local_error = NULL; 284 if (qio_channel_writev_all(f->ioc, 285 f->iov, f->iovcnt, 286 &local_error) < 0) { 287 qemu_file_set_error_obj(f, -EIO, local_error); 288 } else { 289 uint64_t size = iov_size(f->iov, f->iovcnt); 290 stat64_add(&mig_stats.qemu_file_transferred, size); 291 } 292 293 qemu_iovec_release_ram(f); 294 } 295 296 f->buf_index = 0; 297 f->iovcnt = 0; 298 return f->last_error; 299 } 300 301 /* 302 * Attempt to fill the buffer from the underlying file 303 * Returns the number of bytes read, or negative value for an error. 304 * 305 * Note that it can return a partially full buffer even in a not error/not EOF 306 * case if the underlying file descriptor gives a short read, and that can 307 * happen even on a blocking fd. 308 */ 309 static ssize_t coroutine_mixed_fn qemu_fill_buffer(QEMUFile *f) 310 { 311 int len; 312 int pending; 313 Error *local_error = NULL; 314 315 assert(!qemu_file_is_writable(f)); 316 317 pending = f->buf_size - f->buf_index; 318 if (pending > 0) { 319 memmove(f->buf, f->buf + f->buf_index, pending); 320 } 321 f->buf_index = 0; 322 f->buf_size = pending; 323 324 if (qemu_file_get_error(f)) { 325 return 0; 326 } 327 328 do { 329 len = qio_channel_read(f->ioc, 330 (char *)f->buf + pending, 331 IO_BUF_SIZE - pending, 332 &local_error); 333 if (len == QIO_CHANNEL_ERR_BLOCK) { 334 if (qemu_in_coroutine()) { 335 qio_channel_yield(f->ioc, G_IO_IN); 336 } else { 337 qio_channel_wait(f->ioc, G_IO_IN); 338 } 339 } else if (len < 0) { 340 len = -EIO; 341 } 342 } while (len == QIO_CHANNEL_ERR_BLOCK); 343 344 if (len > 0) { 345 f->buf_size += len; 346 } else if (len == 0) { 347 qemu_file_set_error_obj(f, -EIO, local_error); 348 } else { 349 qemu_file_set_error_obj(f, len, local_error); 350 } 351 352 return len; 353 } 354 355 /** Closes the file 356 * 357 * Returns negative error value if any error happened on previous operations or 358 * while closing the file. Returns 0 or positive number on success. 359 * 360 * The meaning of return value on success depends on the specific backend 361 * being used. 362 */ 363 int qemu_fclose(QEMUFile *f) 364 { 365 int ret = qemu_fflush(f); 366 int ret2 = qio_channel_close(f->ioc, NULL); 367 if (ret >= 0) { 368 ret = ret2; 369 } 370 g_clear_pointer(&f->ioc, object_unref); 371 error_free(f->last_error_obj); 372 g_free(f); 373 trace_qemu_file_fclose(); 374 return ret; 375 } 376 377 /* 378 * Add buf to iovec. Do flush if iovec is full. 379 * 380 * Return values: 381 * 1 iovec is full and flushed 382 * 0 iovec is not flushed 383 * 384 */ 385 static int add_to_iovec(QEMUFile *f, const uint8_t *buf, size_t size, 386 bool may_free) 387 { 388 /* check for adjacent buffer and coalesce them */ 389 if (f->iovcnt > 0 && buf == f->iov[f->iovcnt - 1].iov_base + 390 f->iov[f->iovcnt - 1].iov_len && 391 may_free == test_bit(f->iovcnt - 1, f->may_free)) 392 { 393 f->iov[f->iovcnt - 1].iov_len += size; 394 } else { 395 if (f->iovcnt >= MAX_IOV_SIZE) { 396 /* Should only happen if a previous fflush failed */ 397 assert(qemu_file_get_error(f) || !qemu_file_is_writable(f)); 398 return 1; 399 } 400 if (may_free) { 401 set_bit(f->iovcnt, f->may_free); 402 } 403 f->iov[f->iovcnt].iov_base = (uint8_t *)buf; 404 f->iov[f->iovcnt++].iov_len = size; 405 } 406 407 if (f->iovcnt >= MAX_IOV_SIZE) { 408 qemu_fflush(f); 409 return 1; 410 } 411 412 return 0; 413 } 414 415 static void add_buf_to_iovec(QEMUFile *f, size_t len) 416 { 417 if (!add_to_iovec(f, f->buf + f->buf_index, len, false)) { 418 f->buf_index += len; 419 if (f->buf_index == IO_BUF_SIZE) { 420 qemu_fflush(f); 421 } 422 } 423 } 424 425 void qemu_put_buffer_async(QEMUFile *f, const uint8_t *buf, size_t size, 426 bool may_free) 427 { 428 if (f->last_error) { 429 return; 430 } 431 432 add_to_iovec(f, buf, size, may_free); 433 } 434 435 void qemu_put_buffer(QEMUFile *f, const uint8_t *buf, size_t size) 436 { 437 size_t l; 438 439 if (f->last_error) { 440 return; 441 } 442 443 while (size > 0) { 444 l = IO_BUF_SIZE - f->buf_index; 445 if (l > size) { 446 l = size; 447 } 448 memcpy(f->buf + f->buf_index, buf, l); 449 add_buf_to_iovec(f, l); 450 if (qemu_file_get_error(f)) { 451 break; 452 } 453 buf += l; 454 size -= l; 455 } 456 } 457 458 void qemu_put_buffer_at(QEMUFile *f, const uint8_t *buf, size_t buflen, 459 off_t pos) 460 { 461 Error *err = NULL; 462 size_t ret; 463 464 if (f->last_error) { 465 return; 466 } 467 468 qemu_fflush(f); 469 ret = qio_channel_pwrite(f->ioc, (char *)buf, buflen, pos, &err); 470 471 if (err) { 472 qemu_file_set_error_obj(f, -EIO, err); 473 return; 474 } 475 476 if ((ssize_t)ret == QIO_CHANNEL_ERR_BLOCK) { 477 qemu_file_set_error_obj(f, -EAGAIN, NULL); 478 return; 479 } 480 481 if (ret != buflen) { 482 error_setg(&err, "Partial write of size %zu, expected %zu", ret, 483 buflen); 484 qemu_file_set_error_obj(f, -EIO, err); 485 return; 486 } 487 488 stat64_add(&mig_stats.qemu_file_transferred, buflen); 489 490 return; 491 } 492 493 494 size_t qemu_get_buffer_at(QEMUFile *f, const uint8_t *buf, size_t buflen, 495 off_t pos) 496 { 497 Error *err = NULL; 498 size_t ret; 499 500 if (f->last_error) { 501 return 0; 502 } 503 504 ret = qio_channel_pread(f->ioc, (char *)buf, buflen, pos, &err); 505 506 if ((ssize_t)ret == -1 || err) { 507 qemu_file_set_error_obj(f, -EIO, err); 508 return 0; 509 } 510 511 if ((ssize_t)ret == QIO_CHANNEL_ERR_BLOCK) { 512 qemu_file_set_error_obj(f, -EAGAIN, NULL); 513 return 0; 514 } 515 516 if (ret != buflen) { 517 error_setg(&err, "Partial read of size %zu, expected %zu", ret, buflen); 518 qemu_file_set_error_obj(f, -EIO, err); 519 return 0; 520 } 521 522 return ret; 523 } 524 525 void qemu_set_offset(QEMUFile *f, off_t off, int whence) 526 { 527 Error *err = NULL; 528 off_t ret; 529 530 if (qemu_file_is_writable(f)) { 531 qemu_fflush(f); 532 } else { 533 /* Drop all cached buffers if existed; will trigger a re-fill later */ 534 f->buf_index = 0; 535 f->buf_size = 0; 536 } 537 538 ret = qio_channel_io_seek(f->ioc, off, whence, &err); 539 if (ret == (off_t)-1) { 540 qemu_file_set_error_obj(f, -EIO, err); 541 } 542 } 543 544 off_t qemu_get_offset(QEMUFile *f) 545 { 546 Error *err = NULL; 547 off_t ret; 548 549 qemu_fflush(f); 550 551 ret = qio_channel_io_seek(f->ioc, 0, SEEK_CUR, &err); 552 if (ret == (off_t)-1) { 553 qemu_file_set_error_obj(f, -EIO, err); 554 } 555 return ret; 556 } 557 558 559 void qemu_put_byte(QEMUFile *f, int v) 560 { 561 if (f->last_error) { 562 return; 563 } 564 565 f->buf[f->buf_index] = v; 566 add_buf_to_iovec(f, 1); 567 } 568 569 void qemu_file_skip(QEMUFile *f, int size) 570 { 571 if (f->buf_index + size <= f->buf_size) { 572 f->buf_index += size; 573 } 574 } 575 576 /* 577 * Read 'size' bytes from file (at 'offset') without moving the 578 * pointer and set 'buf' to point to that data. 579 * 580 * It will return size bytes unless there was an error, in which case it will 581 * return as many as it managed to read (assuming blocking fd's which 582 * all current QEMUFile are) 583 */ 584 size_t coroutine_mixed_fn qemu_peek_buffer(QEMUFile *f, uint8_t **buf, size_t size, size_t offset) 585 { 586 ssize_t pending; 587 size_t index; 588 589 assert(!qemu_file_is_writable(f)); 590 assert(offset < IO_BUF_SIZE); 591 assert(size <= IO_BUF_SIZE - offset); 592 593 /* The 1st byte to read from */ 594 index = f->buf_index + offset; 595 /* The number of available bytes starting at index */ 596 pending = f->buf_size - index; 597 598 /* 599 * qemu_fill_buffer might return just a few bytes, even when there isn't 600 * an error, so loop collecting them until we get enough. 601 */ 602 while (pending < size) { 603 int received = qemu_fill_buffer(f); 604 605 if (received <= 0) { 606 break; 607 } 608 609 index = f->buf_index + offset; 610 pending = f->buf_size - index; 611 } 612 613 if (pending <= 0) { 614 return 0; 615 } 616 if (size > pending) { 617 size = pending; 618 } 619 620 *buf = f->buf + index; 621 return size; 622 } 623 624 /* 625 * Read 'size' bytes of data from the file into buf. 626 * 'size' can be larger than the internal buffer. 627 * 628 * It will return size bytes unless there was an error, in which case it will 629 * return as many as it managed to read (assuming blocking fd's which 630 * all current QEMUFile are) 631 */ 632 size_t coroutine_mixed_fn qemu_get_buffer(QEMUFile *f, uint8_t *buf, size_t size) 633 { 634 size_t pending = size; 635 size_t done = 0; 636 637 while (pending > 0) { 638 size_t res; 639 uint8_t *src; 640 641 res = qemu_peek_buffer(f, &src, MIN(pending, IO_BUF_SIZE), 0); 642 if (res == 0) { 643 return done; 644 } 645 memcpy(buf, src, res); 646 qemu_file_skip(f, res); 647 buf += res; 648 pending -= res; 649 done += res; 650 } 651 return done; 652 } 653 654 /* 655 * Read 'size' bytes of data from the file. 656 * 'size' can be larger than the internal buffer. 657 * 658 * The data: 659 * may be held on an internal buffer (in which case *buf is updated 660 * to point to it) that is valid until the next qemu_file operation. 661 * OR 662 * will be copied to the *buf that was passed in. 663 * 664 * The code tries to avoid the copy if possible. 665 * 666 * It will return size bytes unless there was an error, in which case it will 667 * return as many as it managed to read (assuming blocking fd's which 668 * all current QEMUFile are) 669 * 670 * Note: Since **buf may get changed, the caller should take care to 671 * keep a pointer to the original buffer if it needs to deallocate it. 672 */ 673 size_t coroutine_mixed_fn qemu_get_buffer_in_place(QEMUFile *f, uint8_t **buf, size_t size) 674 { 675 if (size < IO_BUF_SIZE) { 676 size_t res; 677 uint8_t *src = NULL; 678 679 res = qemu_peek_buffer(f, &src, size, 0); 680 681 if (res == size) { 682 qemu_file_skip(f, res); 683 *buf = src; 684 return res; 685 } 686 } 687 688 return qemu_get_buffer(f, *buf, size); 689 } 690 691 /* 692 * Peeks a single byte from the buffer; this isn't guaranteed to work if 693 * offset leaves a gap after the previous read/peeked data. 694 */ 695 int coroutine_mixed_fn qemu_peek_byte(QEMUFile *f, int offset) 696 { 697 int index = f->buf_index + offset; 698 699 assert(!qemu_file_is_writable(f)); 700 assert(offset < IO_BUF_SIZE); 701 702 if (index >= f->buf_size) { 703 qemu_fill_buffer(f); 704 index = f->buf_index + offset; 705 if (index >= f->buf_size) { 706 return 0; 707 } 708 } 709 return f->buf[index]; 710 } 711 712 int coroutine_mixed_fn qemu_get_byte(QEMUFile *f) 713 { 714 int result; 715 716 result = qemu_peek_byte(f, 0); 717 qemu_file_skip(f, 1); 718 return result; 719 } 720 721 uint64_t qemu_file_transferred(QEMUFile *f) 722 { 723 uint64_t ret = stat64_get(&mig_stats.qemu_file_transferred); 724 int i; 725 726 g_assert(qemu_file_is_writable(f)); 727 728 for (i = 0; i < f->iovcnt; i++) { 729 ret += f->iov[i].iov_len; 730 } 731 732 return ret; 733 } 734 735 void qemu_put_be16(QEMUFile *f, unsigned int v) 736 { 737 qemu_put_byte(f, v >> 8); 738 qemu_put_byte(f, v); 739 } 740 741 void qemu_put_be32(QEMUFile *f, unsigned int v) 742 { 743 qemu_put_byte(f, v >> 24); 744 qemu_put_byte(f, v >> 16); 745 qemu_put_byte(f, v >> 8); 746 qemu_put_byte(f, v); 747 } 748 749 void qemu_put_be64(QEMUFile *f, uint64_t v) 750 { 751 qemu_put_be32(f, v >> 32); 752 qemu_put_be32(f, v); 753 } 754 755 unsigned int qemu_get_be16(QEMUFile *f) 756 { 757 unsigned int v; 758 v = qemu_get_byte(f) << 8; 759 v |= qemu_get_byte(f); 760 return v; 761 } 762 763 unsigned int qemu_get_be32(QEMUFile *f) 764 { 765 unsigned int v; 766 v = (unsigned int)qemu_get_byte(f) << 24; 767 v |= qemu_get_byte(f) << 16; 768 v |= qemu_get_byte(f) << 8; 769 v |= qemu_get_byte(f); 770 return v; 771 } 772 773 uint64_t qemu_get_be64(QEMUFile *f) 774 { 775 uint64_t v; 776 v = (uint64_t)qemu_get_be32(f) << 32; 777 v |= qemu_get_be32(f); 778 return v; 779 } 780 781 /* 782 * Get a string whose length is determined by a single preceding byte 783 * A preallocated 256 byte buffer must be passed in. 784 * Returns: len on success and a 0 terminated string in the buffer 785 * else 0 786 * (Note a 0 length string will return 0 either way) 787 */ 788 size_t coroutine_fn qemu_get_counted_string(QEMUFile *f, char buf[256]) 789 { 790 size_t len = qemu_get_byte(f); 791 size_t res = qemu_get_buffer(f, (uint8_t *)buf, len); 792 793 buf[res] = 0; 794 795 return res == len ? res : 0; 796 } 797 798 /* 799 * Put a string with one preceding byte containing its length. The length of 800 * the string should be less than 256. 801 */ 802 void qemu_put_counted_string(QEMUFile *f, const char *str) 803 { 804 size_t len = strlen(str); 805 806 assert(len < 256); 807 qemu_put_byte(f, len); 808 qemu_put_buffer(f, (const uint8_t *)str, len); 809 } 810 811 /* 812 * Set the blocking state of the QEMUFile. 813 * Note: On some transports the OS only keeps a single blocking state for 814 * both directions, and thus changing the blocking on the main 815 * QEMUFile can also affect the return path. 816 */ 817 void qemu_file_set_blocking(QEMUFile *f, bool block) 818 { 819 qio_channel_set_blocking(f->ioc, block, NULL); 820 } 821 822 /* 823 * qemu_file_get_ioc: 824 * 825 * Get the ioc object for the file, without incrementing 826 * the reference count. 827 * 828 * Returns: the ioc object 829 */ 830 QIOChannel *qemu_file_get_ioc(QEMUFile *file) 831 { 832 return file->ioc; 833 } 834 835 /* 836 * Read size bytes from QEMUFile f and write them to fd. 837 */ 838 int qemu_file_get_to_fd(QEMUFile *f, int fd, size_t size) 839 { 840 while (size) { 841 size_t pending = f->buf_size - f->buf_index; 842 ssize_t rc; 843 844 if (!pending) { 845 rc = qemu_fill_buffer(f); 846 if (rc < 0) { 847 return rc; 848 } 849 if (rc == 0) { 850 return -EIO; 851 } 852 continue; 853 } 854 855 rc = write(fd, f->buf + f->buf_index, MIN(pending, size)); 856 if (rc < 0) { 857 return -errno; 858 } 859 if (rc == 0) { 860 return -EIO; 861 } 862 f->buf_index += rc; 863 size -= rc; 864 } 865 866 return 0; 867 } 868