1 /* 2 * QEMU System Emulator 3 * 4 * Copyright (c) 2003-2008 Fabrice Bellard 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a copy 7 * of this software and associated documentation files (the "Software"), to deal 8 * in the Software without restriction, including without limitation the rights 9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 * copies of the Software, and to permit persons to whom the Software is 11 * furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 22 * THE SOFTWARE. 23 */ 24 #include "qemu/osdep.h" 25 #include "qemu/madvise.h" 26 #include "qemu/error-report.h" 27 #include "qemu/iov.h" 28 #include "migration.h" 29 #include "migration-stats.h" 30 #include "qemu-file.h" 31 #include "trace.h" 32 #include "options.h" 33 #include "qapi/error.h" 34 #include "rdma.h" 35 #include "io/channel-file.h" 36 37 #define IO_BUF_SIZE 32768 38 #define MAX_IOV_SIZE MIN_CONST(IOV_MAX, 64) 39 40 struct QEMUFile { 41 QIOChannel *ioc; 42 bool is_writable; 43 44 int buf_index; 45 int buf_size; /* 0 when writing */ 46 uint8_t buf[IO_BUF_SIZE]; 47 48 DECLARE_BITMAP(may_free, MAX_IOV_SIZE); 49 struct iovec iov[MAX_IOV_SIZE]; 50 unsigned int iovcnt; 51 52 int last_error; 53 Error *last_error_obj; 54 }; 55 56 /* 57 * Stop a file from being read/written - not all backing files can do this 58 * typically only sockets can. 59 * 60 * TODO: convert to propagate Error objects instead of squashing 61 * to a fixed errno value 62 */ 63 int qemu_file_shutdown(QEMUFile *f) 64 { 65 Error *err = NULL; 66 67 /* 68 * We must set qemufile error before the real shutdown(), otherwise 69 * there can be a race window where we thought IO all went though 70 * (because last_error==NULL) but actually IO has already stopped. 71 * 72 * If without correct ordering, the race can happen like this: 73 * 74 * page receiver other thread 75 * ------------- ------------ 76 * qemu_get_buffer() 77 * do shutdown() 78 * returns 0 (buffer all zero) 79 * (we didn't check this retcode) 80 * try to detect IO error 81 * last_error==NULL, IO okay 82 * install ALL-ZERO page 83 * set last_error 84 * --> guest crash! 85 */ 86 if (!f->last_error) { 87 qemu_file_set_error(f, -EIO); 88 } 89 90 if (!qio_channel_has_feature(f->ioc, 91 QIO_CHANNEL_FEATURE_SHUTDOWN)) { 92 return -ENOSYS; 93 } 94 95 if (qio_channel_shutdown(f->ioc, QIO_CHANNEL_SHUTDOWN_BOTH, &err) < 0) { 96 error_report_err(err); 97 return -EIO; 98 } 99 100 return 0; 101 } 102 103 static QEMUFile *qemu_file_new_impl(QIOChannel *ioc, bool is_writable) 104 { 105 QEMUFile *f; 106 107 f = g_new0(QEMUFile, 1); 108 109 object_ref(ioc); 110 f->ioc = ioc; 111 f->is_writable = is_writable; 112 113 return f; 114 } 115 116 /* 117 * Result: QEMUFile* for a 'return path' for comms in the opposite direction 118 * NULL if not available 119 */ 120 QEMUFile *qemu_file_get_return_path(QEMUFile *f) 121 { 122 return qemu_file_new_impl(f->ioc, !f->is_writable); 123 } 124 125 QEMUFile *qemu_file_new_output(QIOChannel *ioc) 126 { 127 return qemu_file_new_impl(ioc, true); 128 } 129 130 QEMUFile *qemu_file_new_input(QIOChannel *ioc) 131 { 132 return qemu_file_new_impl(ioc, false); 133 } 134 135 /* 136 * Get last error for stream f with optional Error* 137 * 138 * Return negative error value if there has been an error on previous 139 * operations, return 0 if no error happened. 140 * 141 * If errp is specified, a verbose error message will be copied over. 142 */ 143 int qemu_file_get_error_obj(QEMUFile *f, Error **errp) 144 { 145 if (!f->last_error) { 146 return 0; 147 } 148 149 /* There is an error */ 150 if (errp) { 151 if (f->last_error_obj) { 152 *errp = error_copy(f->last_error_obj); 153 } else { 154 error_setg_errno(errp, -f->last_error, "Channel error"); 155 } 156 } 157 158 return f->last_error; 159 } 160 161 /* 162 * Get last error for either stream f1 or f2 with optional Error*. 163 * The error returned (non-zero) can be either from f1 or f2. 164 * 165 * If any of the qemufile* is NULL, then skip the check on that file. 166 * 167 * When there is no error on both qemufile, zero is returned. 168 */ 169 int qemu_file_get_error_obj_any(QEMUFile *f1, QEMUFile *f2, Error **errp) 170 { 171 int ret = 0; 172 173 if (f1) { 174 ret = qemu_file_get_error_obj(f1, errp); 175 /* If there's already error detected, return */ 176 if (ret) { 177 return ret; 178 } 179 } 180 181 if (f2) { 182 ret = qemu_file_get_error_obj(f2, errp); 183 } 184 185 return ret; 186 } 187 188 /* 189 * Set the last error for stream f with optional Error* 190 */ 191 void qemu_file_set_error_obj(QEMUFile *f, int ret, Error *err) 192 { 193 if (f->last_error == 0 && ret) { 194 f->last_error = ret; 195 error_propagate(&f->last_error_obj, err); 196 } else if (err) { 197 error_report_err(err); 198 } 199 } 200 201 /* 202 * Get last error for stream f 203 * 204 * Return negative error value if there has been an error on previous 205 * operations, return 0 if no error happened. 206 * 207 */ 208 int qemu_file_get_error(QEMUFile *f) 209 { 210 return f->last_error; 211 } 212 213 /* 214 * Set the last error for stream f 215 */ 216 void qemu_file_set_error(QEMUFile *f, int ret) 217 { 218 qemu_file_set_error_obj(f, ret, NULL); 219 } 220 221 static bool qemu_file_is_writable(QEMUFile *f) 222 { 223 return f->is_writable; 224 } 225 226 static void qemu_iovec_release_ram(QEMUFile *f) 227 { 228 struct iovec iov; 229 unsigned long idx; 230 231 /* Find and release all the contiguous memory ranges marked as may_free. */ 232 idx = find_next_bit(f->may_free, f->iovcnt, 0); 233 if (idx >= f->iovcnt) { 234 return; 235 } 236 iov = f->iov[idx]; 237 238 /* The madvise() in the loop is called for iov within a continuous range and 239 * then reinitialize the iov. And in the end, madvise() is called for the 240 * last iov. 241 */ 242 while ((idx = find_next_bit(f->may_free, f->iovcnt, idx + 1)) < f->iovcnt) { 243 /* check for adjacent buffer and coalesce them */ 244 if (iov.iov_base + iov.iov_len == f->iov[idx].iov_base) { 245 iov.iov_len += f->iov[idx].iov_len; 246 continue; 247 } 248 if (qemu_madvise(iov.iov_base, iov.iov_len, QEMU_MADV_DONTNEED) < 0) { 249 error_report("migrate: madvise DONTNEED failed %p %zd: %s", 250 iov.iov_base, iov.iov_len, strerror(errno)); 251 } 252 iov = f->iov[idx]; 253 } 254 if (qemu_madvise(iov.iov_base, iov.iov_len, QEMU_MADV_DONTNEED) < 0) { 255 error_report("migrate: madvise DONTNEED failed %p %zd: %s", 256 iov.iov_base, iov.iov_len, strerror(errno)); 257 } 258 memset(f->may_free, 0, sizeof(f->may_free)); 259 } 260 261 bool qemu_file_is_seekable(QEMUFile *f) 262 { 263 return qio_channel_has_feature(f->ioc, QIO_CHANNEL_FEATURE_SEEKABLE); 264 } 265 266 /** 267 * Flushes QEMUFile buffer 268 * 269 * This will flush all pending data. If data was only partially flushed, it 270 * will set an error state. 271 */ 272 int qemu_fflush(QEMUFile *f) 273 { 274 if (!qemu_file_is_writable(f)) { 275 return f->last_error; 276 } 277 278 if (f->last_error) { 279 return f->last_error; 280 } 281 if (f->iovcnt > 0) { 282 Error *local_error = NULL; 283 if (qio_channel_writev_all(f->ioc, 284 f->iov, f->iovcnt, 285 &local_error) < 0) { 286 qemu_file_set_error_obj(f, -EIO, local_error); 287 } else { 288 uint64_t size = iov_size(f->iov, f->iovcnt); 289 stat64_add(&mig_stats.qemu_file_transferred, size); 290 } 291 292 qemu_iovec_release_ram(f); 293 } 294 295 f->buf_index = 0; 296 f->iovcnt = 0; 297 return f->last_error; 298 } 299 300 /* 301 * Attempt to fill the buffer from the underlying file 302 * Returns the number of bytes read, or negative value for an error. 303 * 304 * Note that it can return a partially full buffer even in a not error/not EOF 305 * case if the underlying file descriptor gives a short read, and that can 306 * happen even on a blocking fd. 307 */ 308 static ssize_t coroutine_mixed_fn qemu_fill_buffer(QEMUFile *f) 309 { 310 int len; 311 int pending; 312 Error *local_error = NULL; 313 314 assert(!qemu_file_is_writable(f)); 315 316 pending = f->buf_size - f->buf_index; 317 if (pending > 0) { 318 memmove(f->buf, f->buf + f->buf_index, pending); 319 } 320 f->buf_index = 0; 321 f->buf_size = pending; 322 323 if (qemu_file_get_error(f)) { 324 return 0; 325 } 326 327 do { 328 len = qio_channel_read(f->ioc, 329 (char *)f->buf + pending, 330 IO_BUF_SIZE - pending, 331 &local_error); 332 if (len == QIO_CHANNEL_ERR_BLOCK) { 333 if (qemu_in_coroutine()) { 334 qio_channel_yield(f->ioc, G_IO_IN); 335 } else { 336 qio_channel_wait(f->ioc, G_IO_IN); 337 } 338 } else if (len < 0) { 339 len = -EIO; 340 } 341 } while (len == QIO_CHANNEL_ERR_BLOCK); 342 343 if (len > 0) { 344 f->buf_size += len; 345 } else if (len == 0) { 346 qemu_file_set_error_obj(f, -EIO, local_error); 347 } else { 348 qemu_file_set_error_obj(f, len, local_error); 349 } 350 351 return len; 352 } 353 354 /** Closes the file 355 * 356 * Returns negative error value if any error happened on previous operations or 357 * while closing the file. Returns 0 or positive number on success. 358 * 359 * The meaning of return value on success depends on the specific backend 360 * being used. 361 */ 362 int qemu_fclose(QEMUFile *f) 363 { 364 int ret = qemu_fflush(f); 365 int ret2 = qio_channel_close(f->ioc, NULL); 366 if (ret >= 0) { 367 ret = ret2; 368 } 369 g_clear_pointer(&f->ioc, object_unref); 370 error_free(f->last_error_obj); 371 g_free(f); 372 trace_qemu_file_fclose(); 373 return ret; 374 } 375 376 /* 377 * Add buf to iovec. Do flush if iovec is full. 378 * 379 * Return values: 380 * 1 iovec is full and flushed 381 * 0 iovec is not flushed 382 * 383 */ 384 static int add_to_iovec(QEMUFile *f, const uint8_t *buf, size_t size, 385 bool may_free) 386 { 387 /* check for adjacent buffer and coalesce them */ 388 if (f->iovcnt > 0 && buf == f->iov[f->iovcnt - 1].iov_base + 389 f->iov[f->iovcnt - 1].iov_len && 390 may_free == test_bit(f->iovcnt - 1, f->may_free)) 391 { 392 f->iov[f->iovcnt - 1].iov_len += size; 393 } else { 394 if (f->iovcnt >= MAX_IOV_SIZE) { 395 /* Should only happen if a previous fflush failed */ 396 assert(qemu_file_get_error(f) || !qemu_file_is_writable(f)); 397 return 1; 398 } 399 if (may_free) { 400 set_bit(f->iovcnt, f->may_free); 401 } 402 f->iov[f->iovcnt].iov_base = (uint8_t *)buf; 403 f->iov[f->iovcnt++].iov_len = size; 404 } 405 406 if (f->iovcnt >= MAX_IOV_SIZE) { 407 qemu_fflush(f); 408 return 1; 409 } 410 411 return 0; 412 } 413 414 static void add_buf_to_iovec(QEMUFile *f, size_t len) 415 { 416 if (!add_to_iovec(f, f->buf + f->buf_index, len, false)) { 417 f->buf_index += len; 418 if (f->buf_index == IO_BUF_SIZE) { 419 qemu_fflush(f); 420 } 421 } 422 } 423 424 void qemu_put_buffer_async(QEMUFile *f, const uint8_t *buf, size_t size, 425 bool may_free) 426 { 427 if (f->last_error) { 428 return; 429 } 430 431 add_to_iovec(f, buf, size, may_free); 432 } 433 434 void qemu_put_buffer(QEMUFile *f, const uint8_t *buf, size_t size) 435 { 436 size_t l; 437 438 if (f->last_error) { 439 return; 440 } 441 442 while (size > 0) { 443 l = IO_BUF_SIZE - f->buf_index; 444 if (l > size) { 445 l = size; 446 } 447 memcpy(f->buf + f->buf_index, buf, l); 448 add_buf_to_iovec(f, l); 449 if (qemu_file_get_error(f)) { 450 break; 451 } 452 buf += l; 453 size -= l; 454 } 455 } 456 457 void qemu_put_buffer_at(QEMUFile *f, const uint8_t *buf, size_t buflen, 458 off_t pos) 459 { 460 Error *err = NULL; 461 size_t ret; 462 463 if (f->last_error) { 464 return; 465 } 466 467 qemu_fflush(f); 468 ret = qio_channel_pwrite(f->ioc, (char *)buf, buflen, pos, &err); 469 470 if (err) { 471 qemu_file_set_error_obj(f, -EIO, err); 472 return; 473 } 474 475 if ((ssize_t)ret == QIO_CHANNEL_ERR_BLOCK) { 476 qemu_file_set_error_obj(f, -EAGAIN, NULL); 477 return; 478 } 479 480 if (ret != buflen) { 481 error_setg(&err, "Partial write of size %zu, expected %zu", ret, 482 buflen); 483 qemu_file_set_error_obj(f, -EIO, err); 484 return; 485 } 486 487 stat64_add(&mig_stats.qemu_file_transferred, buflen); 488 489 return; 490 } 491 492 493 size_t qemu_get_buffer_at(QEMUFile *f, const uint8_t *buf, size_t buflen, 494 off_t pos) 495 { 496 Error *err = NULL; 497 size_t ret; 498 499 if (f->last_error) { 500 return 0; 501 } 502 503 ret = qio_channel_pread(f->ioc, (char *)buf, buflen, pos, &err); 504 505 if ((ssize_t)ret == -1 || err) { 506 qemu_file_set_error_obj(f, -EIO, err); 507 return 0; 508 } 509 510 if ((ssize_t)ret == QIO_CHANNEL_ERR_BLOCK) { 511 qemu_file_set_error_obj(f, -EAGAIN, NULL); 512 return 0; 513 } 514 515 if (ret != buflen) { 516 error_setg(&err, "Partial read of size %zu, expected %zu", ret, buflen); 517 qemu_file_set_error_obj(f, -EIO, err); 518 return 0; 519 } 520 521 return ret; 522 } 523 524 void qemu_set_offset(QEMUFile *f, off_t off, int whence) 525 { 526 Error *err = NULL; 527 off_t ret; 528 529 if (qemu_file_is_writable(f)) { 530 qemu_fflush(f); 531 } else { 532 /* Drop all cached buffers if existed; will trigger a re-fill later */ 533 f->buf_index = 0; 534 f->buf_size = 0; 535 } 536 537 ret = qio_channel_io_seek(f->ioc, off, whence, &err); 538 if (ret == (off_t)-1) { 539 qemu_file_set_error_obj(f, -EIO, err); 540 } 541 } 542 543 off_t qemu_get_offset(QEMUFile *f) 544 { 545 Error *err = NULL; 546 off_t ret; 547 548 qemu_fflush(f); 549 550 ret = qio_channel_io_seek(f->ioc, 0, SEEK_CUR, &err); 551 if (ret == (off_t)-1) { 552 qemu_file_set_error_obj(f, -EIO, err); 553 } 554 return ret; 555 } 556 557 558 void qemu_put_byte(QEMUFile *f, int v) 559 { 560 if (f->last_error) { 561 return; 562 } 563 564 f->buf[f->buf_index] = v; 565 add_buf_to_iovec(f, 1); 566 } 567 568 void qemu_file_skip(QEMUFile *f, int size) 569 { 570 if (f->buf_index + size <= f->buf_size) { 571 f->buf_index += size; 572 } 573 } 574 575 /* 576 * Read 'size' bytes from file (at 'offset') without moving the 577 * pointer and set 'buf' to point to that data. 578 * 579 * It will return size bytes unless there was an error, in which case it will 580 * return as many as it managed to read (assuming blocking fd's which 581 * all current QEMUFile are) 582 */ 583 size_t coroutine_mixed_fn qemu_peek_buffer(QEMUFile *f, uint8_t **buf, size_t size, size_t offset) 584 { 585 ssize_t pending; 586 size_t index; 587 588 assert(!qemu_file_is_writable(f)); 589 assert(offset < IO_BUF_SIZE); 590 assert(size <= IO_BUF_SIZE - offset); 591 592 /* The 1st byte to read from */ 593 index = f->buf_index + offset; 594 /* The number of available bytes starting at index */ 595 pending = f->buf_size - index; 596 597 /* 598 * qemu_fill_buffer might return just a few bytes, even when there isn't 599 * an error, so loop collecting them until we get enough. 600 */ 601 while (pending < size) { 602 int received = qemu_fill_buffer(f); 603 604 if (received <= 0) { 605 break; 606 } 607 608 index = f->buf_index + offset; 609 pending = f->buf_size - index; 610 } 611 612 if (pending <= 0) { 613 return 0; 614 } 615 if (size > pending) { 616 size = pending; 617 } 618 619 *buf = f->buf + index; 620 return size; 621 } 622 623 /* 624 * Read 'size' bytes of data from the file into buf. 625 * 'size' can be larger than the internal buffer. 626 * 627 * It will return size bytes unless there was an error, in which case it will 628 * return as many as it managed to read (assuming blocking fd's which 629 * all current QEMUFile are) 630 */ 631 size_t coroutine_mixed_fn qemu_get_buffer(QEMUFile *f, uint8_t *buf, size_t size) 632 { 633 size_t pending = size; 634 size_t done = 0; 635 636 while (pending > 0) { 637 size_t res; 638 uint8_t *src; 639 640 res = qemu_peek_buffer(f, &src, MIN(pending, IO_BUF_SIZE), 0); 641 if (res == 0) { 642 return done; 643 } 644 memcpy(buf, src, res); 645 qemu_file_skip(f, res); 646 buf += res; 647 pending -= res; 648 done += res; 649 } 650 return done; 651 } 652 653 /* 654 * Read 'size' bytes of data from the file. 655 * 'size' can be larger than the internal buffer. 656 * 657 * The data: 658 * may be held on an internal buffer (in which case *buf is updated 659 * to point to it) that is valid until the next qemu_file operation. 660 * OR 661 * will be copied to the *buf that was passed in. 662 * 663 * The code tries to avoid the copy if possible. 664 * 665 * It will return size bytes unless there was an error, in which case it will 666 * return as many as it managed to read (assuming blocking fd's which 667 * all current QEMUFile are) 668 * 669 * Note: Since **buf may get changed, the caller should take care to 670 * keep a pointer to the original buffer if it needs to deallocate it. 671 */ 672 size_t coroutine_mixed_fn qemu_get_buffer_in_place(QEMUFile *f, uint8_t **buf, size_t size) 673 { 674 if (size < IO_BUF_SIZE) { 675 size_t res; 676 uint8_t *src = NULL; 677 678 res = qemu_peek_buffer(f, &src, size, 0); 679 680 if (res == size) { 681 qemu_file_skip(f, res); 682 *buf = src; 683 return res; 684 } 685 } 686 687 return qemu_get_buffer(f, *buf, size); 688 } 689 690 /* 691 * Peeks a single byte from the buffer; this isn't guaranteed to work if 692 * offset leaves a gap after the previous read/peeked data. 693 */ 694 int coroutine_mixed_fn qemu_peek_byte(QEMUFile *f, int offset) 695 { 696 int index = f->buf_index + offset; 697 698 assert(!qemu_file_is_writable(f)); 699 assert(offset < IO_BUF_SIZE); 700 701 if (index >= f->buf_size) { 702 qemu_fill_buffer(f); 703 index = f->buf_index + offset; 704 if (index >= f->buf_size) { 705 return 0; 706 } 707 } 708 return f->buf[index]; 709 } 710 711 int coroutine_mixed_fn qemu_get_byte(QEMUFile *f) 712 { 713 int result; 714 715 result = qemu_peek_byte(f, 0); 716 qemu_file_skip(f, 1); 717 return result; 718 } 719 720 uint64_t qemu_file_transferred(QEMUFile *f) 721 { 722 uint64_t ret = stat64_get(&mig_stats.qemu_file_transferred); 723 int i; 724 725 g_assert(qemu_file_is_writable(f)); 726 727 for (i = 0; i < f->iovcnt; i++) { 728 ret += f->iov[i].iov_len; 729 } 730 731 return ret; 732 } 733 734 void qemu_put_be16(QEMUFile *f, unsigned int v) 735 { 736 qemu_put_byte(f, v >> 8); 737 qemu_put_byte(f, v); 738 } 739 740 void qemu_put_be32(QEMUFile *f, unsigned int v) 741 { 742 qemu_put_byte(f, v >> 24); 743 qemu_put_byte(f, v >> 16); 744 qemu_put_byte(f, v >> 8); 745 qemu_put_byte(f, v); 746 } 747 748 void qemu_put_be64(QEMUFile *f, uint64_t v) 749 { 750 qemu_put_be32(f, v >> 32); 751 qemu_put_be32(f, v); 752 } 753 754 unsigned int qemu_get_be16(QEMUFile *f) 755 { 756 unsigned int v; 757 v = qemu_get_byte(f) << 8; 758 v |= qemu_get_byte(f); 759 return v; 760 } 761 762 unsigned int qemu_get_be32(QEMUFile *f) 763 { 764 unsigned int v; 765 v = (unsigned int)qemu_get_byte(f) << 24; 766 v |= qemu_get_byte(f) << 16; 767 v |= qemu_get_byte(f) << 8; 768 v |= qemu_get_byte(f); 769 return v; 770 } 771 772 uint64_t qemu_get_be64(QEMUFile *f) 773 { 774 uint64_t v; 775 v = (uint64_t)qemu_get_be32(f) << 32; 776 v |= qemu_get_be32(f); 777 return v; 778 } 779 780 /* 781 * Get a string whose length is determined by a single preceding byte 782 * A preallocated 256 byte buffer must be passed in. 783 * Returns: len on success and a 0 terminated string in the buffer 784 * else 0 785 * (Note a 0 length string will return 0 either way) 786 */ 787 size_t coroutine_fn qemu_get_counted_string(QEMUFile *f, char buf[256]) 788 { 789 size_t len = qemu_get_byte(f); 790 size_t res = qemu_get_buffer(f, (uint8_t *)buf, len); 791 792 buf[res] = 0; 793 794 return res == len ? res : 0; 795 } 796 797 /* 798 * Put a string with one preceding byte containing its length. The length of 799 * the string should be less than 256. 800 */ 801 void qemu_put_counted_string(QEMUFile *f, const char *str) 802 { 803 size_t len = strlen(str); 804 805 assert(len < 256); 806 qemu_put_byte(f, len); 807 qemu_put_buffer(f, (const uint8_t *)str, len); 808 } 809 810 /* 811 * Set the blocking state of the QEMUFile. 812 * Note: On some transports the OS only keeps a single blocking state for 813 * both directions, and thus changing the blocking on the main 814 * QEMUFile can also affect the return path. 815 */ 816 void qemu_file_set_blocking(QEMUFile *f, bool block) 817 { 818 qio_channel_set_blocking(f->ioc, block, NULL); 819 } 820 821 /* 822 * qemu_file_get_ioc: 823 * 824 * Get the ioc object for the file, without incrementing 825 * the reference count. 826 * 827 * Returns: the ioc object 828 */ 829 QIOChannel *qemu_file_get_ioc(QEMUFile *file) 830 { 831 return file->ioc; 832 } 833 834 /* 835 * Read size bytes from QEMUFile f and write them to fd. 836 */ 837 int qemu_file_get_to_fd(QEMUFile *f, int fd, size_t size) 838 { 839 while (size) { 840 size_t pending = f->buf_size - f->buf_index; 841 ssize_t rc; 842 843 if (!pending) { 844 rc = qemu_fill_buffer(f); 845 if (rc < 0) { 846 return rc; 847 } 848 if (rc == 0) { 849 return -EIO; 850 } 851 continue; 852 } 853 854 rc = write(fd, f->buf + f->buf_index, MIN(pending, size)); 855 if (rc < 0) { 856 return -errno; 857 } 858 if (rc == 0) { 859 return -EIO; 860 } 861 f->buf_index += rc; 862 size -= rc; 863 } 864 865 return 0; 866 } 867