1 /* 2 * QEMU System Emulator 3 * 4 * Copyright (c) 2003-2008 Fabrice Bellard 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a copy 7 * of this software and associated documentation files (the "Software"), to deal 8 * in the Software without restriction, including without limitation the rights 9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 * copies of the Software, and to permit persons to whom the Software is 11 * furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 22 * THE SOFTWARE. 23 */ 24 #include "qemu/osdep.h" 25 #include <zlib.h> 26 #include "qemu/madvise.h" 27 #include "qemu/error-report.h" 28 #include "qemu/iov.h" 29 #include "migration.h" 30 #include "qemu-file.h" 31 #include "trace.h" 32 #include "qapi/error.h" 33 34 #define IO_BUF_SIZE 32768 35 #define MAX_IOV_SIZE MIN_CONST(IOV_MAX, 64) 36 37 struct QEMUFile { 38 const QEMUFileOps *ops; 39 const QEMUFileHooks *hooks; 40 void *opaque; 41 42 int64_t bytes_xfer; 43 int64_t xfer_limit; 44 45 int64_t pos; /* start of buffer when writing, end of buffer 46 when reading */ 47 int buf_index; 48 int buf_size; /* 0 when writing */ 49 uint8_t buf[IO_BUF_SIZE]; 50 51 DECLARE_BITMAP(may_free, MAX_IOV_SIZE); 52 struct iovec iov[MAX_IOV_SIZE]; 53 unsigned int iovcnt; 54 55 int last_error; 56 Error *last_error_obj; 57 /* has the file has been shutdown */ 58 bool shutdown; 59 /* Whether opaque points to a QIOChannel */ 60 bool has_ioc; 61 }; 62 63 /* 64 * Stop a file from being read/written - not all backing files can do this 65 * typically only sockets can. 66 */ 67 int qemu_file_shutdown(QEMUFile *f) 68 { 69 int ret; 70 71 f->shutdown = true; 72 if (!f->ops->shut_down) { 73 return -ENOSYS; 74 } 75 ret = f->ops->shut_down(f->opaque, true, true, NULL); 76 77 if (!f->last_error) { 78 qemu_file_set_error(f, -EIO); 79 } 80 return ret; 81 } 82 83 /* 84 * Result: QEMUFile* for a 'return path' for comms in the opposite direction 85 * NULL if not available 86 */ 87 QEMUFile *qemu_file_get_return_path(QEMUFile *f) 88 { 89 if (!f->ops->get_return_path) { 90 return NULL; 91 } 92 return f->ops->get_return_path(f->opaque); 93 } 94 95 bool qemu_file_mode_is_not_valid(const char *mode) 96 { 97 if (mode == NULL || 98 (mode[0] != 'r' && mode[0] != 'w') || 99 mode[1] != 'b' || mode[2] != 0) { 100 fprintf(stderr, "qemu_fopen: Argument validity check failed\n"); 101 return true; 102 } 103 104 return false; 105 } 106 107 QEMUFile *qemu_fopen_ops(void *opaque, const QEMUFileOps *ops, bool has_ioc) 108 { 109 QEMUFile *f; 110 111 f = g_new0(QEMUFile, 1); 112 113 f->opaque = opaque; 114 f->ops = ops; 115 f->has_ioc = has_ioc; 116 return f; 117 } 118 119 120 void qemu_file_set_hooks(QEMUFile *f, const QEMUFileHooks *hooks) 121 { 122 f->hooks = hooks; 123 } 124 125 /* 126 * Get last error for stream f with optional Error* 127 * 128 * Return negative error value if there has been an error on previous 129 * operations, return 0 if no error happened. 130 * Optional, it returns Error* in errp, but it may be NULL even if return value 131 * is not 0. 132 * 133 */ 134 int qemu_file_get_error_obj(QEMUFile *f, Error **errp) 135 { 136 if (errp) { 137 *errp = f->last_error_obj ? error_copy(f->last_error_obj) : NULL; 138 } 139 return f->last_error; 140 } 141 142 /* 143 * Set the last error for stream f with optional Error* 144 */ 145 void qemu_file_set_error_obj(QEMUFile *f, int ret, Error *err) 146 { 147 if (f->last_error == 0 && ret) { 148 f->last_error = ret; 149 error_propagate(&f->last_error_obj, err); 150 } else if (err) { 151 error_report_err(err); 152 } 153 } 154 155 /* 156 * Get last error for stream f 157 * 158 * Return negative error value if there has been an error on previous 159 * operations, return 0 if no error happened. 160 * 161 */ 162 int qemu_file_get_error(QEMUFile *f) 163 { 164 return qemu_file_get_error_obj(f, NULL); 165 } 166 167 /* 168 * Set the last error for stream f 169 */ 170 void qemu_file_set_error(QEMUFile *f, int ret) 171 { 172 qemu_file_set_error_obj(f, ret, NULL); 173 } 174 175 bool qemu_file_is_writable(QEMUFile *f) 176 { 177 return f->ops->writev_buffer; 178 } 179 180 static void qemu_iovec_release_ram(QEMUFile *f) 181 { 182 struct iovec iov; 183 unsigned long idx; 184 185 /* Find and release all the contiguous memory ranges marked as may_free. */ 186 idx = find_next_bit(f->may_free, f->iovcnt, 0); 187 if (idx >= f->iovcnt) { 188 return; 189 } 190 iov = f->iov[idx]; 191 192 /* The madvise() in the loop is called for iov within a continuous range and 193 * then reinitialize the iov. And in the end, madvise() is called for the 194 * last iov. 195 */ 196 while ((idx = find_next_bit(f->may_free, f->iovcnt, idx + 1)) < f->iovcnt) { 197 /* check for adjacent buffer and coalesce them */ 198 if (iov.iov_base + iov.iov_len == f->iov[idx].iov_base) { 199 iov.iov_len += f->iov[idx].iov_len; 200 continue; 201 } 202 if (qemu_madvise(iov.iov_base, iov.iov_len, QEMU_MADV_DONTNEED) < 0) { 203 error_report("migrate: madvise DONTNEED failed %p %zd: %s", 204 iov.iov_base, iov.iov_len, strerror(errno)); 205 } 206 iov = f->iov[idx]; 207 } 208 if (qemu_madvise(iov.iov_base, iov.iov_len, QEMU_MADV_DONTNEED) < 0) { 209 error_report("migrate: madvise DONTNEED failed %p %zd: %s", 210 iov.iov_base, iov.iov_len, strerror(errno)); 211 } 212 memset(f->may_free, 0, sizeof(f->may_free)); 213 } 214 215 /** 216 * Flushes QEMUFile buffer 217 * 218 * This will flush all pending data. If data was only partially flushed, it 219 * will set an error state. 220 */ 221 void qemu_fflush(QEMUFile *f) 222 { 223 ssize_t ret = 0; 224 ssize_t expect = 0; 225 Error *local_error = NULL; 226 227 if (!qemu_file_is_writable(f)) { 228 return; 229 } 230 231 if (f->shutdown) { 232 return; 233 } 234 if (f->iovcnt > 0) { 235 expect = iov_size(f->iov, f->iovcnt); 236 ret = f->ops->writev_buffer(f->opaque, f->iov, f->iovcnt, f->pos, 237 &local_error); 238 239 qemu_iovec_release_ram(f); 240 } 241 242 if (ret >= 0) { 243 f->pos += ret; 244 } 245 /* We expect the QEMUFile write impl to send the full 246 * data set we requested, so sanity check that. 247 */ 248 if (ret != expect) { 249 qemu_file_set_error_obj(f, ret < 0 ? ret : -EIO, local_error); 250 } 251 f->buf_index = 0; 252 f->iovcnt = 0; 253 } 254 255 void ram_control_before_iterate(QEMUFile *f, uint64_t flags) 256 { 257 int ret = 0; 258 259 if (f->hooks && f->hooks->before_ram_iterate) { 260 ret = f->hooks->before_ram_iterate(f, f->opaque, flags, NULL); 261 if (ret < 0) { 262 qemu_file_set_error(f, ret); 263 } 264 } 265 } 266 267 void ram_control_after_iterate(QEMUFile *f, uint64_t flags) 268 { 269 int ret = 0; 270 271 if (f->hooks && f->hooks->after_ram_iterate) { 272 ret = f->hooks->after_ram_iterate(f, f->opaque, flags, NULL); 273 if (ret < 0) { 274 qemu_file_set_error(f, ret); 275 } 276 } 277 } 278 279 void ram_control_load_hook(QEMUFile *f, uint64_t flags, void *data) 280 { 281 int ret = -EINVAL; 282 283 if (f->hooks && f->hooks->hook_ram_load) { 284 ret = f->hooks->hook_ram_load(f, f->opaque, flags, data); 285 if (ret < 0) { 286 qemu_file_set_error(f, ret); 287 } 288 } else { 289 /* 290 * Hook is a hook specifically requested by the source sending a flag 291 * that expects there to be a hook on the destination. 292 */ 293 if (flags == RAM_CONTROL_HOOK) { 294 qemu_file_set_error(f, ret); 295 } 296 } 297 } 298 299 size_t ram_control_save_page(QEMUFile *f, ram_addr_t block_offset, 300 ram_addr_t offset, size_t size, 301 uint64_t *bytes_sent) 302 { 303 if (f->hooks && f->hooks->save_page) { 304 int ret = f->hooks->save_page(f, f->opaque, block_offset, 305 offset, size, bytes_sent); 306 if (ret != RAM_SAVE_CONTROL_NOT_SUPP) { 307 f->bytes_xfer += size; 308 } 309 310 if (ret != RAM_SAVE_CONTROL_DELAYED && 311 ret != RAM_SAVE_CONTROL_NOT_SUPP) { 312 if (bytes_sent && *bytes_sent > 0) { 313 qemu_update_position(f, *bytes_sent); 314 } else if (ret < 0) { 315 qemu_file_set_error(f, ret); 316 } 317 } 318 319 return ret; 320 } 321 322 return RAM_SAVE_CONTROL_NOT_SUPP; 323 } 324 325 /* 326 * Attempt to fill the buffer from the underlying file 327 * Returns the number of bytes read, or negative value for an error. 328 * 329 * Note that it can return a partially full buffer even in a not error/not EOF 330 * case if the underlying file descriptor gives a short read, and that can 331 * happen even on a blocking fd. 332 */ 333 static ssize_t qemu_fill_buffer(QEMUFile *f) 334 { 335 int len; 336 int pending; 337 Error *local_error = NULL; 338 339 assert(!qemu_file_is_writable(f)); 340 341 pending = f->buf_size - f->buf_index; 342 if (pending > 0) { 343 memmove(f->buf, f->buf + f->buf_index, pending); 344 } 345 f->buf_index = 0; 346 f->buf_size = pending; 347 348 if (f->shutdown) { 349 return 0; 350 } 351 352 len = f->ops->get_buffer(f->opaque, f->buf + pending, f->pos, 353 IO_BUF_SIZE - pending, &local_error); 354 if (len > 0) { 355 f->buf_size += len; 356 f->pos += len; 357 } else if (len == 0) { 358 qemu_file_set_error_obj(f, -EIO, local_error); 359 } else if (len != -EAGAIN) { 360 qemu_file_set_error_obj(f, len, local_error); 361 } else { 362 error_free(local_error); 363 } 364 365 return len; 366 } 367 368 void qemu_update_position(QEMUFile *f, size_t size) 369 { 370 f->pos += size; 371 } 372 373 /** Closes the file 374 * 375 * Returns negative error value if any error happened on previous operations or 376 * while closing the file. Returns 0 or positive number on success. 377 * 378 * The meaning of return value on success depends on the specific backend 379 * being used. 380 */ 381 int qemu_fclose(QEMUFile *f) 382 { 383 int ret; 384 qemu_fflush(f); 385 ret = qemu_file_get_error(f); 386 387 if (f->ops->close) { 388 int ret2 = f->ops->close(f->opaque, NULL); 389 if (ret >= 0) { 390 ret = ret2; 391 } 392 } 393 /* If any error was spotted before closing, we should report it 394 * instead of the close() return value. 395 */ 396 if (f->last_error) { 397 ret = f->last_error; 398 } 399 error_free(f->last_error_obj); 400 g_free(f); 401 trace_qemu_file_fclose(); 402 return ret; 403 } 404 405 /* 406 * Add buf to iovec. Do flush if iovec is full. 407 * 408 * Return values: 409 * 1 iovec is full and flushed 410 * 0 iovec is not flushed 411 * 412 */ 413 static int add_to_iovec(QEMUFile *f, const uint8_t *buf, size_t size, 414 bool may_free) 415 { 416 /* check for adjacent buffer and coalesce them */ 417 if (f->iovcnt > 0 && buf == f->iov[f->iovcnt - 1].iov_base + 418 f->iov[f->iovcnt - 1].iov_len && 419 may_free == test_bit(f->iovcnt - 1, f->may_free)) 420 { 421 f->iov[f->iovcnt - 1].iov_len += size; 422 } else { 423 if (f->iovcnt >= MAX_IOV_SIZE) { 424 /* Should only happen if a previous fflush failed */ 425 assert(f->shutdown || !qemu_file_is_writable(f)); 426 return 1; 427 } 428 if (may_free) { 429 set_bit(f->iovcnt, f->may_free); 430 } 431 f->iov[f->iovcnt].iov_base = (uint8_t *)buf; 432 f->iov[f->iovcnt++].iov_len = size; 433 } 434 435 if (f->iovcnt >= MAX_IOV_SIZE) { 436 qemu_fflush(f); 437 return 1; 438 } 439 440 return 0; 441 } 442 443 static void add_buf_to_iovec(QEMUFile *f, size_t len) 444 { 445 if (!add_to_iovec(f, f->buf + f->buf_index, len, false)) { 446 f->buf_index += len; 447 if (f->buf_index == IO_BUF_SIZE) { 448 qemu_fflush(f); 449 } 450 } 451 } 452 453 void qemu_put_buffer_async(QEMUFile *f, const uint8_t *buf, size_t size, 454 bool may_free) 455 { 456 if (f->last_error) { 457 return; 458 } 459 460 f->bytes_xfer += size; 461 add_to_iovec(f, buf, size, may_free); 462 } 463 464 void qemu_put_buffer(QEMUFile *f, const uint8_t *buf, size_t size) 465 { 466 size_t l; 467 468 if (f->last_error) { 469 return; 470 } 471 472 while (size > 0) { 473 l = IO_BUF_SIZE - f->buf_index; 474 if (l > size) { 475 l = size; 476 } 477 memcpy(f->buf + f->buf_index, buf, l); 478 f->bytes_xfer += l; 479 add_buf_to_iovec(f, l); 480 if (qemu_file_get_error(f)) { 481 break; 482 } 483 buf += l; 484 size -= l; 485 } 486 } 487 488 void qemu_put_byte(QEMUFile *f, int v) 489 { 490 if (f->last_error) { 491 return; 492 } 493 494 f->buf[f->buf_index] = v; 495 f->bytes_xfer++; 496 add_buf_to_iovec(f, 1); 497 } 498 499 void qemu_file_skip(QEMUFile *f, int size) 500 { 501 if (f->buf_index + size <= f->buf_size) { 502 f->buf_index += size; 503 } 504 } 505 506 /* 507 * Read 'size' bytes from file (at 'offset') without moving the 508 * pointer and set 'buf' to point to that data. 509 * 510 * It will return size bytes unless there was an error, in which case it will 511 * return as many as it managed to read (assuming blocking fd's which 512 * all current QEMUFile are) 513 */ 514 size_t qemu_peek_buffer(QEMUFile *f, uint8_t **buf, size_t size, size_t offset) 515 { 516 ssize_t pending; 517 size_t index; 518 519 assert(!qemu_file_is_writable(f)); 520 assert(offset < IO_BUF_SIZE); 521 assert(size <= IO_BUF_SIZE - offset); 522 523 /* The 1st byte to read from */ 524 index = f->buf_index + offset; 525 /* The number of available bytes starting at index */ 526 pending = f->buf_size - index; 527 528 /* 529 * qemu_fill_buffer might return just a few bytes, even when there isn't 530 * an error, so loop collecting them until we get enough. 531 */ 532 while (pending < size) { 533 int received = qemu_fill_buffer(f); 534 535 if (received <= 0) { 536 break; 537 } 538 539 index = f->buf_index + offset; 540 pending = f->buf_size - index; 541 } 542 543 if (pending <= 0) { 544 return 0; 545 } 546 if (size > pending) { 547 size = pending; 548 } 549 550 *buf = f->buf + index; 551 return size; 552 } 553 554 /* 555 * Read 'size' bytes of data from the file into buf. 556 * 'size' can be larger than the internal buffer. 557 * 558 * It will return size bytes unless there was an error, in which case it will 559 * return as many as it managed to read (assuming blocking fd's which 560 * all current QEMUFile are) 561 */ 562 size_t qemu_get_buffer(QEMUFile *f, uint8_t *buf, size_t size) 563 { 564 size_t pending = size; 565 size_t done = 0; 566 567 while (pending > 0) { 568 size_t res; 569 uint8_t *src; 570 571 res = qemu_peek_buffer(f, &src, MIN(pending, IO_BUF_SIZE), 0); 572 if (res == 0) { 573 return done; 574 } 575 memcpy(buf, src, res); 576 qemu_file_skip(f, res); 577 buf += res; 578 pending -= res; 579 done += res; 580 } 581 return done; 582 } 583 584 /* 585 * Read 'size' bytes of data from the file. 586 * 'size' can be larger than the internal buffer. 587 * 588 * The data: 589 * may be held on an internal buffer (in which case *buf is updated 590 * to point to it) that is valid until the next qemu_file operation. 591 * OR 592 * will be copied to the *buf that was passed in. 593 * 594 * The code tries to avoid the copy if possible. 595 * 596 * It will return size bytes unless there was an error, in which case it will 597 * return as many as it managed to read (assuming blocking fd's which 598 * all current QEMUFile are) 599 * 600 * Note: Since **buf may get changed, the caller should take care to 601 * keep a pointer to the original buffer if it needs to deallocate it. 602 */ 603 size_t qemu_get_buffer_in_place(QEMUFile *f, uint8_t **buf, size_t size) 604 { 605 if (size < IO_BUF_SIZE) { 606 size_t res; 607 uint8_t *src = NULL; 608 609 res = qemu_peek_buffer(f, &src, size, 0); 610 611 if (res == size) { 612 qemu_file_skip(f, res); 613 *buf = src; 614 return res; 615 } 616 } 617 618 return qemu_get_buffer(f, *buf, size); 619 } 620 621 /* 622 * Peeks a single byte from the buffer; this isn't guaranteed to work if 623 * offset leaves a gap after the previous read/peeked data. 624 */ 625 int qemu_peek_byte(QEMUFile *f, int offset) 626 { 627 int index = f->buf_index + offset; 628 629 assert(!qemu_file_is_writable(f)); 630 assert(offset < IO_BUF_SIZE); 631 632 if (index >= f->buf_size) { 633 qemu_fill_buffer(f); 634 index = f->buf_index + offset; 635 if (index >= f->buf_size) { 636 return 0; 637 } 638 } 639 return f->buf[index]; 640 } 641 642 int qemu_get_byte(QEMUFile *f) 643 { 644 int result; 645 646 result = qemu_peek_byte(f, 0); 647 qemu_file_skip(f, 1); 648 return result; 649 } 650 651 int64_t qemu_ftell_fast(QEMUFile *f) 652 { 653 int64_t ret = f->pos; 654 int i; 655 656 for (i = 0; i < f->iovcnt; i++) { 657 ret += f->iov[i].iov_len; 658 } 659 660 return ret; 661 } 662 663 int64_t qemu_ftell(QEMUFile *f) 664 { 665 qemu_fflush(f); 666 return f->pos; 667 } 668 669 int qemu_file_rate_limit(QEMUFile *f) 670 { 671 if (f->shutdown) { 672 return 1; 673 } 674 if (qemu_file_get_error(f)) { 675 return 1; 676 } 677 if (f->xfer_limit > 0 && f->bytes_xfer > f->xfer_limit) { 678 return 1; 679 } 680 return 0; 681 } 682 683 int64_t qemu_file_get_rate_limit(QEMUFile *f) 684 { 685 return f->xfer_limit; 686 } 687 688 void qemu_file_set_rate_limit(QEMUFile *f, int64_t limit) 689 { 690 f->xfer_limit = limit; 691 } 692 693 void qemu_file_reset_rate_limit(QEMUFile *f) 694 { 695 f->bytes_xfer = 0; 696 } 697 698 void qemu_file_update_transfer(QEMUFile *f, int64_t len) 699 { 700 f->bytes_xfer += len; 701 } 702 703 void qemu_put_be16(QEMUFile *f, unsigned int v) 704 { 705 qemu_put_byte(f, v >> 8); 706 qemu_put_byte(f, v); 707 } 708 709 void qemu_put_be32(QEMUFile *f, unsigned int v) 710 { 711 qemu_put_byte(f, v >> 24); 712 qemu_put_byte(f, v >> 16); 713 qemu_put_byte(f, v >> 8); 714 qemu_put_byte(f, v); 715 } 716 717 void qemu_put_be64(QEMUFile *f, uint64_t v) 718 { 719 qemu_put_be32(f, v >> 32); 720 qemu_put_be32(f, v); 721 } 722 723 unsigned int qemu_get_be16(QEMUFile *f) 724 { 725 unsigned int v; 726 v = qemu_get_byte(f) << 8; 727 v |= qemu_get_byte(f); 728 return v; 729 } 730 731 unsigned int qemu_get_be32(QEMUFile *f) 732 { 733 unsigned int v; 734 v = (unsigned int)qemu_get_byte(f) << 24; 735 v |= qemu_get_byte(f) << 16; 736 v |= qemu_get_byte(f) << 8; 737 v |= qemu_get_byte(f); 738 return v; 739 } 740 741 uint64_t qemu_get_be64(QEMUFile *f) 742 { 743 uint64_t v; 744 v = (uint64_t)qemu_get_be32(f) << 32; 745 v |= qemu_get_be32(f); 746 return v; 747 } 748 749 /* return the size after compression, or negative value on error */ 750 static int qemu_compress_data(z_stream *stream, uint8_t *dest, size_t dest_len, 751 const uint8_t *source, size_t source_len) 752 { 753 int err; 754 755 err = deflateReset(stream); 756 if (err != Z_OK) { 757 return -1; 758 } 759 760 stream->avail_in = source_len; 761 stream->next_in = (uint8_t *)source; 762 stream->avail_out = dest_len; 763 stream->next_out = dest; 764 765 err = deflate(stream, Z_FINISH); 766 if (err != Z_STREAM_END) { 767 return -1; 768 } 769 770 return stream->next_out - dest; 771 } 772 773 /* Compress size bytes of data start at p and store the compressed 774 * data to the buffer of f. 775 * 776 * Since the file is dummy file with empty_ops, return -1 if f has no space to 777 * save the compressed data. 778 */ 779 ssize_t qemu_put_compression_data(QEMUFile *f, z_stream *stream, 780 const uint8_t *p, size_t size) 781 { 782 ssize_t blen = IO_BUF_SIZE - f->buf_index - sizeof(int32_t); 783 784 if (blen < compressBound(size)) { 785 return -1; 786 } 787 788 blen = qemu_compress_data(stream, f->buf + f->buf_index + sizeof(int32_t), 789 blen, p, size); 790 if (blen < 0) { 791 return -1; 792 } 793 794 qemu_put_be32(f, blen); 795 add_buf_to_iovec(f, blen); 796 return blen + sizeof(int32_t); 797 } 798 799 /* Put the data in the buffer of f_src to the buffer of f_des, and 800 * then reset the buf_index of f_src to 0. 801 */ 802 803 int qemu_put_qemu_file(QEMUFile *f_des, QEMUFile *f_src) 804 { 805 int len = 0; 806 807 if (f_src->buf_index > 0) { 808 len = f_src->buf_index; 809 qemu_put_buffer(f_des, f_src->buf, f_src->buf_index); 810 f_src->buf_index = 0; 811 f_src->iovcnt = 0; 812 } 813 return len; 814 } 815 816 /* 817 * Get a string whose length is determined by a single preceding byte 818 * A preallocated 256 byte buffer must be passed in. 819 * Returns: len on success and a 0 terminated string in the buffer 820 * else 0 821 * (Note a 0 length string will return 0 either way) 822 */ 823 size_t qemu_get_counted_string(QEMUFile *f, char buf[256]) 824 { 825 size_t len = qemu_get_byte(f); 826 size_t res = qemu_get_buffer(f, (uint8_t *)buf, len); 827 828 buf[res] = 0; 829 830 return res == len ? res : 0; 831 } 832 833 /* 834 * Put a string with one preceding byte containing its length. The length of 835 * the string should be less than 256. 836 */ 837 void qemu_put_counted_string(QEMUFile *f, const char *str) 838 { 839 size_t len = strlen(str); 840 841 assert(len < 256); 842 qemu_put_byte(f, len); 843 qemu_put_buffer(f, (const uint8_t *)str, len); 844 } 845 846 /* 847 * Set the blocking state of the QEMUFile. 848 * Note: On some transports the OS only keeps a single blocking state for 849 * both directions, and thus changing the blocking on the main 850 * QEMUFile can also affect the return path. 851 */ 852 void qemu_file_set_blocking(QEMUFile *f, bool block) 853 { 854 if (f->ops->set_blocking) { 855 f->ops->set_blocking(f->opaque, block, NULL); 856 } 857 } 858 859 /* 860 * Return the ioc object if it's a migration channel. Note: it can return NULL 861 * for callers passing in a non-migration qemufile. E.g. see qemu_fopen_bdrv() 862 * and its usage in e.g. load_snapshot(). So we need to check against NULL 863 * before using it. If without the check, migration_incoming_state_destroy() 864 * could fail for load_snapshot(). 865 */ 866 QIOChannel *qemu_file_get_ioc(QEMUFile *file) 867 { 868 return file->has_ioc ? QIO_CHANNEL(file->opaque) : NULL; 869 } 870