1 /* 2 * Block driver for RAW files (posix) 3 * 4 * Copyright (c) 2006 Fabrice Bellard 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a copy 7 * of this software and associated documentation files (the "Software"), to deal 8 * in the Software without restriction, including without limitation the rights 9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 * copies of the Software, and to permit persons to whom the Software is 11 * furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 22 * THE SOFTWARE. 23 */ 24 25 #include "qemu/osdep.h" 26 #include "qemu-common.h" 27 #include "qapi/error.h" 28 #include "qemu/cutils.h" 29 #include "qemu/error-report.h" 30 #include "block/block_int.h" 31 #include "qemu/module.h" 32 #include "qemu/option.h" 33 #include "trace.h" 34 #include "block/thread-pool.h" 35 #include "qemu/iov.h" 36 #include "block/raw-aio.h" 37 #include "qapi/qmp/qdict.h" 38 #include "qapi/qmp/qstring.h" 39 40 #include "scsi/pr-manager.h" 41 #include "scsi/constants.h" 42 43 #if defined(__APPLE__) && (__MACH__) 44 #include <paths.h> 45 #include <sys/param.h> 46 #include <IOKit/IOKitLib.h> 47 #include <IOKit/IOBSD.h> 48 #include <IOKit/storage/IOMediaBSDClient.h> 49 #include <IOKit/storage/IOMedia.h> 50 #include <IOKit/storage/IOCDMedia.h> 51 //#include <IOKit/storage/IOCDTypes.h> 52 #include <IOKit/storage/IODVDMedia.h> 53 #include <CoreFoundation/CoreFoundation.h> 54 #endif 55 56 #ifdef __sun__ 57 #define _POSIX_PTHREAD_SEMANTICS 1 58 #include <sys/dkio.h> 59 #endif 60 #ifdef __linux__ 61 #include <sys/ioctl.h> 62 #include <sys/param.h> 63 #include <sys/syscall.h> 64 #include <linux/cdrom.h> 65 #include <linux/fd.h> 66 #include <linux/fs.h> 67 #include <linux/hdreg.h> 68 #include <scsi/sg.h> 69 #ifdef __s390__ 70 #include <asm/dasd.h> 71 #endif 72 #ifndef FS_NOCOW_FL 73 #define FS_NOCOW_FL 0x00800000 /* Do not cow file */ 74 #endif 75 #endif 76 #if defined(CONFIG_FALLOCATE_PUNCH_HOLE) || defined(CONFIG_FALLOCATE_ZERO_RANGE) 77 #include <linux/falloc.h> 78 #endif 79 #if defined (__FreeBSD__) || defined(__FreeBSD_kernel__) 80 #include <sys/disk.h> 81 #include <sys/cdio.h> 82 #endif 83 84 #ifdef __OpenBSD__ 85 #include <sys/ioctl.h> 86 #include <sys/disklabel.h> 87 #include <sys/dkio.h> 88 #endif 89 90 #ifdef __NetBSD__ 91 #include <sys/ioctl.h> 92 #include <sys/disklabel.h> 93 #include <sys/dkio.h> 94 #include <sys/disk.h> 95 #endif 96 97 #ifdef __DragonFly__ 98 #include <sys/ioctl.h> 99 #include <sys/diskslice.h> 100 #endif 101 102 #ifdef CONFIG_XFS 103 #include <xfs/xfs.h> 104 #endif 105 106 #include "trace.h" 107 108 /* OS X does not have O_DSYNC */ 109 #ifndef O_DSYNC 110 #ifdef O_SYNC 111 #define O_DSYNC O_SYNC 112 #elif defined(O_FSYNC) 113 #define O_DSYNC O_FSYNC 114 #endif 115 #endif 116 117 /* Approximate O_DIRECT with O_DSYNC if O_DIRECT isn't available */ 118 #ifndef O_DIRECT 119 #define O_DIRECT O_DSYNC 120 #endif 121 122 #define FTYPE_FILE 0 123 #define FTYPE_CD 1 124 125 #define MAX_BLOCKSIZE 4096 126 127 /* Posix file locking bytes. Libvirt takes byte 0, we start from higher bytes, 128 * leaving a few more bytes for its future use. */ 129 #define RAW_LOCK_PERM_BASE 100 130 #define RAW_LOCK_SHARED_BASE 200 131 132 typedef struct BDRVRawState { 133 int fd; 134 bool use_lock; 135 int type; 136 int open_flags; 137 size_t buf_align; 138 139 /* The current permissions. */ 140 uint64_t perm; 141 uint64_t shared_perm; 142 143 /* The perms bits whose corresponding bytes are already locked in 144 * s->fd. */ 145 uint64_t locked_perm; 146 uint64_t locked_shared_perm; 147 148 int perm_change_fd; 149 int perm_change_flags; 150 BDRVReopenState *reopen_state; 151 152 #ifdef CONFIG_XFS 153 bool is_xfs:1; 154 #endif 155 bool has_discard:1; 156 bool has_write_zeroes:1; 157 bool discard_zeroes:1; 158 bool use_linux_aio:1; 159 bool use_linux_io_uring:1; 160 bool page_cache_inconsistent:1; 161 bool has_fallocate; 162 bool needs_alignment; 163 bool drop_cache; 164 bool check_cache_dropped; 165 struct { 166 uint64_t discard_nb_ok; 167 uint64_t discard_nb_failed; 168 uint64_t discard_bytes_ok; 169 } stats; 170 171 PRManager *pr_mgr; 172 } BDRVRawState; 173 174 typedef struct BDRVRawReopenState { 175 int fd; 176 int open_flags; 177 bool drop_cache; 178 bool check_cache_dropped; 179 } BDRVRawReopenState; 180 181 static int fd_open(BlockDriverState *bs); 182 static int64_t raw_getlength(BlockDriverState *bs); 183 184 typedef struct RawPosixAIOData { 185 BlockDriverState *bs; 186 int aio_type; 187 int aio_fildes; 188 189 off_t aio_offset; 190 uint64_t aio_nbytes; 191 192 union { 193 struct { 194 struct iovec *iov; 195 int niov; 196 } io; 197 struct { 198 uint64_t cmd; 199 void *buf; 200 } ioctl; 201 struct { 202 int aio_fd2; 203 off_t aio_offset2; 204 } copy_range; 205 struct { 206 PreallocMode prealloc; 207 Error **errp; 208 } truncate; 209 }; 210 } RawPosixAIOData; 211 212 #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__) 213 static int cdrom_reopen(BlockDriverState *bs); 214 #endif 215 216 #if defined(__NetBSD__) 217 static int raw_normalize_devicepath(const char **filename, Error **errp) 218 { 219 static char namebuf[PATH_MAX]; 220 const char *dp, *fname; 221 struct stat sb; 222 223 fname = *filename; 224 dp = strrchr(fname, '/'); 225 if (lstat(fname, &sb) < 0) { 226 error_setg_file_open(errp, errno, fname); 227 return -errno; 228 } 229 230 if (!S_ISBLK(sb.st_mode)) { 231 return 0; 232 } 233 234 if (dp == NULL) { 235 snprintf(namebuf, PATH_MAX, "r%s", fname); 236 } else { 237 snprintf(namebuf, PATH_MAX, "%.*s/r%s", 238 (int)(dp - fname), fname, dp + 1); 239 } 240 *filename = namebuf; 241 warn_report("%s is a block device, using %s", fname, *filename); 242 243 return 0; 244 } 245 #else 246 static int raw_normalize_devicepath(const char **filename, Error **errp) 247 { 248 return 0; 249 } 250 #endif 251 252 /* 253 * Get logical block size via ioctl. On success store it in @sector_size_p. 254 */ 255 static int probe_logical_blocksize(int fd, unsigned int *sector_size_p) 256 { 257 unsigned int sector_size; 258 bool success = false; 259 int i; 260 261 errno = ENOTSUP; 262 static const unsigned long ioctl_list[] = { 263 #ifdef BLKSSZGET 264 BLKSSZGET, 265 #endif 266 #ifdef DKIOCGETBLOCKSIZE 267 DKIOCGETBLOCKSIZE, 268 #endif 269 #ifdef DIOCGSECTORSIZE 270 DIOCGSECTORSIZE, 271 #endif 272 }; 273 274 /* Try a few ioctls to get the right size */ 275 for (i = 0; i < (int)ARRAY_SIZE(ioctl_list); i++) { 276 if (ioctl(fd, ioctl_list[i], §or_size) >= 0) { 277 *sector_size_p = sector_size; 278 success = true; 279 } 280 } 281 282 return success ? 0 : -errno; 283 } 284 285 /** 286 * Get physical block size of @fd. 287 * On success, store it in @blk_size and return 0. 288 * On failure, return -errno. 289 */ 290 static int probe_physical_blocksize(int fd, unsigned int *blk_size) 291 { 292 #ifdef BLKPBSZGET 293 if (ioctl(fd, BLKPBSZGET, blk_size) < 0) { 294 return -errno; 295 } 296 return 0; 297 #else 298 return -ENOTSUP; 299 #endif 300 } 301 302 /* Check if read is allowed with given memory buffer and length. 303 * 304 * This function is used to check O_DIRECT memory buffer and request alignment. 305 */ 306 static bool raw_is_io_aligned(int fd, void *buf, size_t len) 307 { 308 ssize_t ret = pread(fd, buf, len, 0); 309 310 if (ret >= 0) { 311 return true; 312 } 313 314 #ifdef __linux__ 315 /* The Linux kernel returns EINVAL for misaligned O_DIRECT reads. Ignore 316 * other errors (e.g. real I/O error), which could happen on a failed 317 * drive, since we only care about probing alignment. 318 */ 319 if (errno != EINVAL) { 320 return true; 321 } 322 #endif 323 324 return false; 325 } 326 327 static void raw_probe_alignment(BlockDriverState *bs, int fd, Error **errp) 328 { 329 BDRVRawState *s = bs->opaque; 330 char *buf; 331 size_t max_align = MAX(MAX_BLOCKSIZE, qemu_real_host_page_size); 332 size_t alignments[] = {1, 512, 1024, 2048, 4096}; 333 334 /* For SCSI generic devices the alignment is not really used. 335 With buffered I/O, we don't have any restrictions. */ 336 if (bdrv_is_sg(bs) || !s->needs_alignment) { 337 bs->bl.request_alignment = 1; 338 s->buf_align = 1; 339 return; 340 } 341 342 bs->bl.request_alignment = 0; 343 s->buf_align = 0; 344 /* Let's try to use the logical blocksize for the alignment. */ 345 if (probe_logical_blocksize(fd, &bs->bl.request_alignment) < 0) { 346 bs->bl.request_alignment = 0; 347 } 348 #ifdef CONFIG_XFS 349 if (s->is_xfs) { 350 struct dioattr da; 351 if (xfsctl(NULL, fd, XFS_IOC_DIOINFO, &da) >= 0) { 352 bs->bl.request_alignment = da.d_miniosz; 353 /* The kernel returns wrong information for d_mem */ 354 /* s->buf_align = da.d_mem; */ 355 } 356 } 357 #endif 358 359 /* 360 * If we could not get the sizes so far, we can only guess them. First try 361 * to detect request alignment, since it is more likely to succeed. Then 362 * try to detect buf_align, which cannot be detected in some cases (e.g. 363 * Gluster). If buf_align cannot be detected, we fallback to the value of 364 * request_alignment. 365 */ 366 367 if (!bs->bl.request_alignment) { 368 int i; 369 size_t align; 370 buf = qemu_memalign(max_align, max_align); 371 for (i = 0; i < ARRAY_SIZE(alignments); i++) { 372 align = alignments[i]; 373 if (raw_is_io_aligned(fd, buf, align)) { 374 /* Fallback to safe value. */ 375 bs->bl.request_alignment = (align != 1) ? align : max_align; 376 break; 377 } 378 } 379 qemu_vfree(buf); 380 } 381 382 if (!s->buf_align) { 383 int i; 384 size_t align; 385 buf = qemu_memalign(max_align, 2 * max_align); 386 for (i = 0; i < ARRAY_SIZE(alignments); i++) { 387 align = alignments[i]; 388 if (raw_is_io_aligned(fd, buf + align, max_align)) { 389 /* Fallback to request_alignment. */ 390 s->buf_align = (align != 1) ? align : bs->bl.request_alignment; 391 break; 392 } 393 } 394 qemu_vfree(buf); 395 } 396 397 if (!s->buf_align || !bs->bl.request_alignment) { 398 error_setg(errp, "Could not find working O_DIRECT alignment"); 399 error_append_hint(errp, "Try cache.direct=off\n"); 400 } 401 } 402 403 static void raw_parse_flags(int bdrv_flags, int *open_flags, bool has_writers) 404 { 405 bool read_write = false; 406 assert(open_flags != NULL); 407 408 *open_flags |= O_BINARY; 409 *open_flags &= ~O_ACCMODE; 410 411 if (bdrv_flags & BDRV_O_AUTO_RDONLY) { 412 read_write = has_writers; 413 } else if (bdrv_flags & BDRV_O_RDWR) { 414 read_write = true; 415 } 416 417 if (read_write) { 418 *open_flags |= O_RDWR; 419 } else { 420 *open_flags |= O_RDONLY; 421 } 422 423 /* Use O_DSYNC for write-through caching, no flags for write-back caching, 424 * and O_DIRECT for no caching. */ 425 if ((bdrv_flags & BDRV_O_NOCACHE)) { 426 *open_flags |= O_DIRECT; 427 } 428 } 429 430 static void raw_parse_filename(const char *filename, QDict *options, 431 Error **errp) 432 { 433 bdrv_parse_filename_strip_prefix(filename, "file:", options); 434 } 435 436 static QemuOptsList raw_runtime_opts = { 437 .name = "raw", 438 .head = QTAILQ_HEAD_INITIALIZER(raw_runtime_opts.head), 439 .desc = { 440 { 441 .name = "filename", 442 .type = QEMU_OPT_STRING, 443 .help = "File name of the image", 444 }, 445 { 446 .name = "aio", 447 .type = QEMU_OPT_STRING, 448 .help = "host AIO implementation (threads, native, io_uring)", 449 }, 450 { 451 .name = "locking", 452 .type = QEMU_OPT_STRING, 453 .help = "file locking mode (on/off/auto, default: auto)", 454 }, 455 { 456 .name = "pr-manager", 457 .type = QEMU_OPT_STRING, 458 .help = "id of persistent reservation manager object (default: none)", 459 }, 460 #if defined(__linux__) 461 { 462 .name = "drop-cache", 463 .type = QEMU_OPT_BOOL, 464 .help = "invalidate page cache during live migration (default: on)", 465 }, 466 #endif 467 { 468 .name = "x-check-cache-dropped", 469 .type = QEMU_OPT_BOOL, 470 .help = "check that page cache was dropped on live migration (default: off)" 471 }, 472 { /* end of list */ } 473 }, 474 }; 475 476 static const char *const mutable_opts[] = { "x-check-cache-dropped", NULL }; 477 478 static int raw_open_common(BlockDriverState *bs, QDict *options, 479 int bdrv_flags, int open_flags, 480 bool device, Error **errp) 481 { 482 BDRVRawState *s = bs->opaque; 483 QemuOpts *opts; 484 Error *local_err = NULL; 485 const char *filename = NULL; 486 const char *str; 487 BlockdevAioOptions aio, aio_default; 488 int fd, ret; 489 struct stat st; 490 OnOffAuto locking; 491 492 opts = qemu_opts_create(&raw_runtime_opts, NULL, 0, &error_abort); 493 qemu_opts_absorb_qdict(opts, options, &local_err); 494 if (local_err) { 495 error_propagate(errp, local_err); 496 ret = -EINVAL; 497 goto fail; 498 } 499 500 filename = qemu_opt_get(opts, "filename"); 501 502 ret = raw_normalize_devicepath(&filename, errp); 503 if (ret != 0) { 504 goto fail; 505 } 506 507 if (bdrv_flags & BDRV_O_NATIVE_AIO) { 508 aio_default = BLOCKDEV_AIO_OPTIONS_NATIVE; 509 #ifdef CONFIG_LINUX_IO_URING 510 } else if (bdrv_flags & BDRV_O_IO_URING) { 511 aio_default = BLOCKDEV_AIO_OPTIONS_IO_URING; 512 #endif 513 } else { 514 aio_default = BLOCKDEV_AIO_OPTIONS_THREADS; 515 } 516 517 aio = qapi_enum_parse(&BlockdevAioOptions_lookup, 518 qemu_opt_get(opts, "aio"), 519 aio_default, &local_err); 520 if (local_err) { 521 error_propagate(errp, local_err); 522 ret = -EINVAL; 523 goto fail; 524 } 525 526 s->use_linux_aio = (aio == BLOCKDEV_AIO_OPTIONS_NATIVE); 527 #ifdef CONFIG_LINUX_IO_URING 528 s->use_linux_io_uring = (aio == BLOCKDEV_AIO_OPTIONS_IO_URING); 529 #endif 530 531 locking = qapi_enum_parse(&OnOffAuto_lookup, 532 qemu_opt_get(opts, "locking"), 533 ON_OFF_AUTO_AUTO, &local_err); 534 if (local_err) { 535 error_propagate(errp, local_err); 536 ret = -EINVAL; 537 goto fail; 538 } 539 switch (locking) { 540 case ON_OFF_AUTO_ON: 541 s->use_lock = true; 542 if (!qemu_has_ofd_lock()) { 543 warn_report("File lock requested but OFD locking syscall is " 544 "unavailable, falling back to POSIX file locks"); 545 error_printf("Due to the implementation, locks can be lost " 546 "unexpectedly.\n"); 547 } 548 break; 549 case ON_OFF_AUTO_OFF: 550 s->use_lock = false; 551 break; 552 case ON_OFF_AUTO_AUTO: 553 s->use_lock = qemu_has_ofd_lock(); 554 break; 555 default: 556 abort(); 557 } 558 559 str = qemu_opt_get(opts, "pr-manager"); 560 if (str) { 561 s->pr_mgr = pr_manager_lookup(str, &local_err); 562 if (local_err) { 563 error_propagate(errp, local_err); 564 ret = -EINVAL; 565 goto fail; 566 } 567 } 568 569 s->drop_cache = qemu_opt_get_bool(opts, "drop-cache", true); 570 s->check_cache_dropped = qemu_opt_get_bool(opts, "x-check-cache-dropped", 571 false); 572 573 s->open_flags = open_flags; 574 raw_parse_flags(bdrv_flags, &s->open_flags, false); 575 576 s->fd = -1; 577 fd = qemu_open(filename, s->open_flags, 0644); 578 ret = fd < 0 ? -errno : 0; 579 580 if (ret < 0) { 581 error_setg_file_open(errp, -ret, filename); 582 if (ret == -EROFS) { 583 ret = -EACCES; 584 } 585 goto fail; 586 } 587 s->fd = fd; 588 589 s->perm = 0; 590 s->shared_perm = BLK_PERM_ALL; 591 592 #ifdef CONFIG_LINUX_AIO 593 /* Currently Linux does AIO only for files opened with O_DIRECT */ 594 if (s->use_linux_aio) { 595 if (!(s->open_flags & O_DIRECT)) { 596 error_setg(errp, "aio=native was specified, but it requires " 597 "cache.direct=on, which was not specified."); 598 ret = -EINVAL; 599 goto fail; 600 } 601 if (!aio_setup_linux_aio(bdrv_get_aio_context(bs), errp)) { 602 error_prepend(errp, "Unable to use native AIO: "); 603 goto fail; 604 } 605 } 606 #else 607 if (s->use_linux_aio) { 608 error_setg(errp, "aio=native was specified, but is not supported " 609 "in this build."); 610 ret = -EINVAL; 611 goto fail; 612 } 613 #endif /* !defined(CONFIG_LINUX_AIO) */ 614 615 #ifdef CONFIG_LINUX_IO_URING 616 if (s->use_linux_io_uring) { 617 if (!aio_setup_linux_io_uring(bdrv_get_aio_context(bs), errp)) { 618 error_prepend(errp, "Unable to use io_uring: "); 619 goto fail; 620 } 621 } 622 #else 623 if (s->use_linux_io_uring) { 624 error_setg(errp, "aio=io_uring was specified, but is not supported " 625 "in this build."); 626 ret = -EINVAL; 627 goto fail; 628 } 629 #endif /* !defined(CONFIG_LINUX_IO_URING) */ 630 631 s->has_discard = true; 632 s->has_write_zeroes = true; 633 if ((bs->open_flags & BDRV_O_NOCACHE) != 0) { 634 s->needs_alignment = true; 635 } 636 637 if (fstat(s->fd, &st) < 0) { 638 ret = -errno; 639 error_setg_errno(errp, errno, "Could not stat file"); 640 goto fail; 641 } 642 643 if (!device) { 644 if (S_ISBLK(st.st_mode)) { 645 warn_report("Opening a block device as a file using the '%s' " 646 "driver is deprecated", bs->drv->format_name); 647 } else if (S_ISCHR(st.st_mode)) { 648 warn_report("Opening a character device as a file using the '%s' " 649 "driver is deprecated", bs->drv->format_name); 650 } else if (!S_ISREG(st.st_mode)) { 651 error_setg(errp, "A regular file was expected by the '%s' driver, " 652 "but something else was given", bs->drv->format_name); 653 ret = -EINVAL; 654 goto fail; 655 } else { 656 s->discard_zeroes = true; 657 s->has_fallocate = true; 658 } 659 } else { 660 if (!(S_ISCHR(st.st_mode) || S_ISBLK(st.st_mode))) { 661 error_setg(errp, "'%s' driver expects either " 662 "a character or block device", bs->drv->format_name); 663 ret = -EINVAL; 664 goto fail; 665 } 666 } 667 668 if (S_ISBLK(st.st_mode)) { 669 #ifdef BLKDISCARDZEROES 670 unsigned int arg; 671 if (ioctl(s->fd, BLKDISCARDZEROES, &arg) == 0 && arg) { 672 s->discard_zeroes = true; 673 } 674 #endif 675 #ifdef __linux__ 676 /* On Linux 3.10, BLKDISCARD leaves stale data in the page cache. Do 677 * not rely on the contents of discarded blocks unless using O_DIRECT. 678 * Same for BLKZEROOUT. 679 */ 680 if (!(bs->open_flags & BDRV_O_NOCACHE)) { 681 s->discard_zeroes = false; 682 s->has_write_zeroes = false; 683 } 684 #endif 685 } 686 #ifdef __FreeBSD__ 687 if (S_ISCHR(st.st_mode)) { 688 /* 689 * The file is a char device (disk), which on FreeBSD isn't behind 690 * a pager, so force all requests to be aligned. This is needed 691 * so QEMU makes sure all IO operations on the device are aligned 692 * to sector size, or else FreeBSD will reject them with EINVAL. 693 */ 694 s->needs_alignment = true; 695 } 696 #endif 697 698 #ifdef CONFIG_XFS 699 if (platform_test_xfs_fd(s->fd)) { 700 s->is_xfs = true; 701 } 702 #endif 703 704 bs->supported_zero_flags = BDRV_REQ_MAY_UNMAP | BDRV_REQ_NO_FALLBACK; 705 if (S_ISREG(st.st_mode)) { 706 /* When extending regular files, we get zeros from the OS */ 707 bs->supported_truncate_flags = BDRV_REQ_ZERO_WRITE; 708 } 709 ret = 0; 710 fail: 711 if (filename && (bdrv_flags & BDRV_O_TEMPORARY)) { 712 unlink(filename); 713 } 714 qemu_opts_del(opts); 715 return ret; 716 } 717 718 static int raw_open(BlockDriverState *bs, QDict *options, int flags, 719 Error **errp) 720 { 721 BDRVRawState *s = bs->opaque; 722 723 s->type = FTYPE_FILE; 724 return raw_open_common(bs, options, flags, 0, false, errp); 725 } 726 727 typedef enum { 728 RAW_PL_PREPARE, 729 RAW_PL_COMMIT, 730 RAW_PL_ABORT, 731 } RawPermLockOp; 732 733 #define PERM_FOREACH(i) \ 734 for ((i) = 0; (1ULL << (i)) <= BLK_PERM_ALL; i++) 735 736 /* Lock bytes indicated by @perm_lock_bits and @shared_perm_lock_bits in the 737 * file; if @unlock == true, also unlock the unneeded bytes. 738 * @shared_perm_lock_bits is the mask of all permissions that are NOT shared. 739 */ 740 static int raw_apply_lock_bytes(BDRVRawState *s, int fd, 741 uint64_t perm_lock_bits, 742 uint64_t shared_perm_lock_bits, 743 bool unlock, Error **errp) 744 { 745 int ret; 746 int i; 747 uint64_t locked_perm, locked_shared_perm; 748 749 if (s) { 750 locked_perm = s->locked_perm; 751 locked_shared_perm = s->locked_shared_perm; 752 } else { 753 /* 754 * We don't have the previous bits, just lock/unlock for each of the 755 * requested bits. 756 */ 757 if (unlock) { 758 locked_perm = BLK_PERM_ALL; 759 locked_shared_perm = BLK_PERM_ALL; 760 } else { 761 locked_perm = 0; 762 locked_shared_perm = 0; 763 } 764 } 765 766 PERM_FOREACH(i) { 767 int off = RAW_LOCK_PERM_BASE + i; 768 uint64_t bit = (1ULL << i); 769 if ((perm_lock_bits & bit) && !(locked_perm & bit)) { 770 ret = qemu_lock_fd(fd, off, 1, false); 771 if (ret) { 772 error_setg(errp, "Failed to lock byte %d", off); 773 return ret; 774 } else if (s) { 775 s->locked_perm |= bit; 776 } 777 } else if (unlock && (locked_perm & bit) && !(perm_lock_bits & bit)) { 778 ret = qemu_unlock_fd(fd, off, 1); 779 if (ret) { 780 error_setg(errp, "Failed to unlock byte %d", off); 781 return ret; 782 } else if (s) { 783 s->locked_perm &= ~bit; 784 } 785 } 786 } 787 PERM_FOREACH(i) { 788 int off = RAW_LOCK_SHARED_BASE + i; 789 uint64_t bit = (1ULL << i); 790 if ((shared_perm_lock_bits & bit) && !(locked_shared_perm & bit)) { 791 ret = qemu_lock_fd(fd, off, 1, false); 792 if (ret) { 793 error_setg(errp, "Failed to lock byte %d", off); 794 return ret; 795 } else if (s) { 796 s->locked_shared_perm |= bit; 797 } 798 } else if (unlock && (locked_shared_perm & bit) && 799 !(shared_perm_lock_bits & bit)) { 800 ret = qemu_unlock_fd(fd, off, 1); 801 if (ret) { 802 error_setg(errp, "Failed to unlock byte %d", off); 803 return ret; 804 } else if (s) { 805 s->locked_shared_perm &= ~bit; 806 } 807 } 808 } 809 return 0; 810 } 811 812 /* Check "unshared" bytes implied by @perm and ~@shared_perm in the file. */ 813 static int raw_check_lock_bytes(int fd, uint64_t perm, uint64_t shared_perm, 814 Error **errp) 815 { 816 int ret; 817 int i; 818 819 PERM_FOREACH(i) { 820 int off = RAW_LOCK_SHARED_BASE + i; 821 uint64_t p = 1ULL << i; 822 if (perm & p) { 823 ret = qemu_lock_fd_test(fd, off, 1, true); 824 if (ret) { 825 char *perm_name = bdrv_perm_names(p); 826 error_setg(errp, 827 "Failed to get \"%s\" lock", 828 perm_name); 829 g_free(perm_name); 830 return ret; 831 } 832 } 833 } 834 PERM_FOREACH(i) { 835 int off = RAW_LOCK_PERM_BASE + i; 836 uint64_t p = 1ULL << i; 837 if (!(shared_perm & p)) { 838 ret = qemu_lock_fd_test(fd, off, 1, true); 839 if (ret) { 840 char *perm_name = bdrv_perm_names(p); 841 error_setg(errp, 842 "Failed to get shared \"%s\" lock", 843 perm_name); 844 g_free(perm_name); 845 return ret; 846 } 847 } 848 } 849 return 0; 850 } 851 852 static int raw_handle_perm_lock(BlockDriverState *bs, 853 RawPermLockOp op, 854 uint64_t new_perm, uint64_t new_shared, 855 Error **errp) 856 { 857 BDRVRawState *s = bs->opaque; 858 int ret = 0; 859 Error *local_err = NULL; 860 861 if (!s->use_lock) { 862 return 0; 863 } 864 865 if (bdrv_get_flags(bs) & BDRV_O_INACTIVE) { 866 return 0; 867 } 868 869 switch (op) { 870 case RAW_PL_PREPARE: 871 if ((s->perm | new_perm) == s->perm && 872 (s->shared_perm & new_shared) == s->shared_perm) 873 { 874 /* 875 * We are going to unlock bytes, it should not fail. If it fail due 876 * to some fs-dependent permission-unrelated reasons (which occurs 877 * sometimes on NFS and leads to abort in bdrv_replace_child) we 878 * can't prevent such errors by any check here. And we ignore them 879 * anyway in ABORT and COMMIT. 880 */ 881 return 0; 882 } 883 ret = raw_apply_lock_bytes(s, s->fd, s->perm | new_perm, 884 ~s->shared_perm | ~new_shared, 885 false, errp); 886 if (!ret) { 887 ret = raw_check_lock_bytes(s->fd, new_perm, new_shared, errp); 888 if (!ret) { 889 return 0; 890 } 891 error_append_hint(errp, 892 "Is another process using the image [%s]?\n", 893 bs->filename); 894 } 895 /* fall through to unlock bytes. */ 896 case RAW_PL_ABORT: 897 raw_apply_lock_bytes(s, s->fd, s->perm, ~s->shared_perm, 898 true, &local_err); 899 if (local_err) { 900 /* Theoretically the above call only unlocks bytes and it cannot 901 * fail. Something weird happened, report it. 902 */ 903 warn_report_err(local_err); 904 } 905 break; 906 case RAW_PL_COMMIT: 907 raw_apply_lock_bytes(s, s->fd, new_perm, ~new_shared, 908 true, &local_err); 909 if (local_err) { 910 /* Theoretically the above call only unlocks bytes and it cannot 911 * fail. Something weird happened, report it. 912 */ 913 warn_report_err(local_err); 914 } 915 break; 916 } 917 return ret; 918 } 919 920 static int raw_reconfigure_getfd(BlockDriverState *bs, int flags, 921 int *open_flags, uint64_t perm, bool force_dup, 922 Error **errp) 923 { 924 BDRVRawState *s = bs->opaque; 925 int fd = -1; 926 int ret; 927 bool has_writers = perm & 928 (BLK_PERM_WRITE | BLK_PERM_WRITE_UNCHANGED | BLK_PERM_RESIZE); 929 int fcntl_flags = O_APPEND | O_NONBLOCK; 930 #ifdef O_NOATIME 931 fcntl_flags |= O_NOATIME; 932 #endif 933 934 *open_flags = 0; 935 if (s->type == FTYPE_CD) { 936 *open_flags |= O_NONBLOCK; 937 } 938 939 raw_parse_flags(flags, open_flags, has_writers); 940 941 #ifdef O_ASYNC 942 /* Not all operating systems have O_ASYNC, and those that don't 943 * will not let us track the state into rs->open_flags (typically 944 * you achieve the same effect with an ioctl, for example I_SETSIG 945 * on Solaris). But we do not use O_ASYNC, so that's fine. 946 */ 947 assert((s->open_flags & O_ASYNC) == 0); 948 #endif 949 950 if (!force_dup && *open_flags == s->open_flags) { 951 /* We're lucky, the existing fd is fine */ 952 return s->fd; 953 } 954 955 if ((*open_flags & ~fcntl_flags) == (s->open_flags & ~fcntl_flags)) { 956 /* dup the original fd */ 957 fd = qemu_dup(s->fd); 958 if (fd >= 0) { 959 ret = fcntl_setfl(fd, *open_flags); 960 if (ret) { 961 qemu_close(fd); 962 fd = -1; 963 } 964 } 965 } 966 967 /* If we cannot use fcntl, or fcntl failed, fall back to qemu_open() */ 968 if (fd == -1) { 969 const char *normalized_filename = bs->filename; 970 ret = raw_normalize_devicepath(&normalized_filename, errp); 971 if (ret >= 0) { 972 assert(!(*open_flags & O_CREAT)); 973 fd = qemu_open(normalized_filename, *open_flags); 974 if (fd == -1) { 975 error_setg_errno(errp, errno, "Could not reopen file"); 976 return -1; 977 } 978 } 979 } 980 981 return fd; 982 } 983 984 static int raw_reopen_prepare(BDRVReopenState *state, 985 BlockReopenQueue *queue, Error **errp) 986 { 987 BDRVRawState *s; 988 BDRVRawReopenState *rs; 989 QemuOpts *opts; 990 int ret; 991 Error *local_err = NULL; 992 993 assert(state != NULL); 994 assert(state->bs != NULL); 995 996 s = state->bs->opaque; 997 998 state->opaque = g_new0(BDRVRawReopenState, 1); 999 rs = state->opaque; 1000 1001 /* Handle options changes */ 1002 opts = qemu_opts_create(&raw_runtime_opts, NULL, 0, &error_abort); 1003 qemu_opts_absorb_qdict(opts, state->options, &local_err); 1004 if (local_err) { 1005 error_propagate(errp, local_err); 1006 ret = -EINVAL; 1007 goto out; 1008 } 1009 1010 rs->drop_cache = qemu_opt_get_bool_del(opts, "drop-cache", true); 1011 rs->check_cache_dropped = 1012 qemu_opt_get_bool_del(opts, "x-check-cache-dropped", false); 1013 1014 /* This driver's reopen function doesn't currently allow changing 1015 * other options, so let's put them back in the original QDict and 1016 * bdrv_reopen_prepare() will detect changes and complain. */ 1017 qemu_opts_to_qdict(opts, state->options); 1018 1019 rs->fd = raw_reconfigure_getfd(state->bs, state->flags, &rs->open_flags, 1020 state->perm, true, &local_err); 1021 if (local_err) { 1022 error_propagate(errp, local_err); 1023 ret = -1; 1024 goto out; 1025 } 1026 1027 /* Fail already reopen_prepare() if we can't get a working O_DIRECT 1028 * alignment with the new fd. */ 1029 if (rs->fd != -1) { 1030 raw_probe_alignment(state->bs, rs->fd, &local_err); 1031 if (local_err) { 1032 error_propagate(errp, local_err); 1033 ret = -EINVAL; 1034 goto out_fd; 1035 } 1036 } 1037 1038 s->reopen_state = state; 1039 ret = 0; 1040 out_fd: 1041 if (ret < 0) { 1042 qemu_close(rs->fd); 1043 rs->fd = -1; 1044 } 1045 out: 1046 qemu_opts_del(opts); 1047 return ret; 1048 } 1049 1050 static void raw_reopen_commit(BDRVReopenState *state) 1051 { 1052 BDRVRawReopenState *rs = state->opaque; 1053 BDRVRawState *s = state->bs->opaque; 1054 1055 s->drop_cache = rs->drop_cache; 1056 s->check_cache_dropped = rs->check_cache_dropped; 1057 s->open_flags = rs->open_flags; 1058 1059 qemu_close(s->fd); 1060 s->fd = rs->fd; 1061 1062 g_free(state->opaque); 1063 state->opaque = NULL; 1064 1065 assert(s->reopen_state == state); 1066 s->reopen_state = NULL; 1067 } 1068 1069 1070 static void raw_reopen_abort(BDRVReopenState *state) 1071 { 1072 BDRVRawReopenState *rs = state->opaque; 1073 BDRVRawState *s = state->bs->opaque; 1074 1075 /* nothing to do if NULL, we didn't get far enough */ 1076 if (rs == NULL) { 1077 return; 1078 } 1079 1080 if (rs->fd >= 0) { 1081 qemu_close(rs->fd); 1082 rs->fd = -1; 1083 } 1084 g_free(state->opaque); 1085 state->opaque = NULL; 1086 1087 assert(s->reopen_state == state); 1088 s->reopen_state = NULL; 1089 } 1090 1091 static int sg_get_max_transfer_length(int fd) 1092 { 1093 #ifdef BLKSECTGET 1094 int max_bytes = 0; 1095 1096 if (ioctl(fd, BLKSECTGET, &max_bytes) == 0) { 1097 return max_bytes; 1098 } else { 1099 return -errno; 1100 } 1101 #else 1102 return -ENOSYS; 1103 #endif 1104 } 1105 1106 static int sg_get_max_segments(int fd) 1107 { 1108 #ifdef CONFIG_LINUX 1109 char buf[32]; 1110 const char *end; 1111 char *sysfspath = NULL; 1112 int ret; 1113 int sysfd = -1; 1114 long max_segments; 1115 struct stat st; 1116 1117 if (fstat(fd, &st)) { 1118 ret = -errno; 1119 goto out; 1120 } 1121 1122 sysfspath = g_strdup_printf("/sys/dev/block/%u:%u/queue/max_segments", 1123 major(st.st_rdev), minor(st.st_rdev)); 1124 sysfd = open(sysfspath, O_RDONLY); 1125 if (sysfd == -1) { 1126 ret = -errno; 1127 goto out; 1128 } 1129 do { 1130 ret = read(sysfd, buf, sizeof(buf) - 1); 1131 } while (ret == -1 && errno == EINTR); 1132 if (ret < 0) { 1133 ret = -errno; 1134 goto out; 1135 } else if (ret == 0) { 1136 ret = -EIO; 1137 goto out; 1138 } 1139 buf[ret] = 0; 1140 /* The file is ended with '\n', pass 'end' to accept that. */ 1141 ret = qemu_strtol(buf, &end, 10, &max_segments); 1142 if (ret == 0 && end && *end == '\n') { 1143 ret = max_segments; 1144 } 1145 1146 out: 1147 if (sysfd != -1) { 1148 close(sysfd); 1149 } 1150 g_free(sysfspath); 1151 return ret; 1152 #else 1153 return -ENOTSUP; 1154 #endif 1155 } 1156 1157 static void raw_refresh_limits(BlockDriverState *bs, Error **errp) 1158 { 1159 BDRVRawState *s = bs->opaque; 1160 1161 if (bs->sg) { 1162 int ret = sg_get_max_transfer_length(s->fd); 1163 1164 if (ret > 0 && ret <= BDRV_REQUEST_MAX_BYTES) { 1165 bs->bl.max_transfer = pow2floor(ret); 1166 } 1167 1168 ret = sg_get_max_segments(s->fd); 1169 if (ret > 0) { 1170 bs->bl.max_transfer = MIN(bs->bl.max_transfer, 1171 ret * qemu_real_host_page_size); 1172 } 1173 } 1174 1175 raw_probe_alignment(bs, s->fd, errp); 1176 bs->bl.min_mem_alignment = s->buf_align; 1177 bs->bl.opt_mem_alignment = MAX(s->buf_align, qemu_real_host_page_size); 1178 } 1179 1180 static int check_for_dasd(int fd) 1181 { 1182 #ifdef BIODASDINFO2 1183 struct dasd_information2_t info = {0}; 1184 1185 return ioctl(fd, BIODASDINFO2, &info); 1186 #else 1187 return -1; 1188 #endif 1189 } 1190 1191 /** 1192 * Try to get @bs's logical and physical block size. 1193 * On success, store them in @bsz and return zero. 1194 * On failure, return negative errno. 1195 */ 1196 static int hdev_probe_blocksizes(BlockDriverState *bs, BlockSizes *bsz) 1197 { 1198 BDRVRawState *s = bs->opaque; 1199 int ret; 1200 1201 /* If DASD, get blocksizes */ 1202 if (check_for_dasd(s->fd) < 0) { 1203 return -ENOTSUP; 1204 } 1205 ret = probe_logical_blocksize(s->fd, &bsz->log); 1206 if (ret < 0) { 1207 return ret; 1208 } 1209 return probe_physical_blocksize(s->fd, &bsz->phys); 1210 } 1211 1212 /** 1213 * Try to get @bs's geometry: cyls, heads, sectors. 1214 * On success, store them in @geo and return 0. 1215 * On failure return -errno. 1216 * (Allows block driver to assign default geometry values that guest sees) 1217 */ 1218 #ifdef __linux__ 1219 static int hdev_probe_geometry(BlockDriverState *bs, HDGeometry *geo) 1220 { 1221 BDRVRawState *s = bs->opaque; 1222 struct hd_geometry ioctl_geo = {0}; 1223 1224 /* If DASD, get its geometry */ 1225 if (check_for_dasd(s->fd) < 0) { 1226 return -ENOTSUP; 1227 } 1228 if (ioctl(s->fd, HDIO_GETGEO, &ioctl_geo) < 0) { 1229 return -errno; 1230 } 1231 /* HDIO_GETGEO may return success even though geo contains zeros 1232 (e.g. certain multipath setups) */ 1233 if (!ioctl_geo.heads || !ioctl_geo.sectors || !ioctl_geo.cylinders) { 1234 return -ENOTSUP; 1235 } 1236 /* Do not return a geometry for partition */ 1237 if (ioctl_geo.start != 0) { 1238 return -ENOTSUP; 1239 } 1240 geo->heads = ioctl_geo.heads; 1241 geo->sectors = ioctl_geo.sectors; 1242 geo->cylinders = ioctl_geo.cylinders; 1243 1244 return 0; 1245 } 1246 #else /* __linux__ */ 1247 static int hdev_probe_geometry(BlockDriverState *bs, HDGeometry *geo) 1248 { 1249 return -ENOTSUP; 1250 } 1251 #endif 1252 1253 #if defined(__linux__) 1254 static int handle_aiocb_ioctl(void *opaque) 1255 { 1256 RawPosixAIOData *aiocb = opaque; 1257 int ret; 1258 1259 ret = ioctl(aiocb->aio_fildes, aiocb->ioctl.cmd, aiocb->ioctl.buf); 1260 if (ret == -1) { 1261 return -errno; 1262 } 1263 1264 return 0; 1265 } 1266 #endif /* linux */ 1267 1268 static int handle_aiocb_flush(void *opaque) 1269 { 1270 RawPosixAIOData *aiocb = opaque; 1271 BDRVRawState *s = aiocb->bs->opaque; 1272 int ret; 1273 1274 if (s->page_cache_inconsistent) { 1275 return -EIO; 1276 } 1277 1278 ret = qemu_fdatasync(aiocb->aio_fildes); 1279 if (ret == -1) { 1280 /* There is no clear definition of the semantics of a failing fsync(), 1281 * so we may have to assume the worst. The sad truth is that this 1282 * assumption is correct for Linux. Some pages are now probably marked 1283 * clean in the page cache even though they are inconsistent with the 1284 * on-disk contents. The next fdatasync() call would succeed, but no 1285 * further writeback attempt will be made. We can't get back to a state 1286 * in which we know what is on disk (we would have to rewrite 1287 * everything that was touched since the last fdatasync() at least), so 1288 * make bdrv_flush() fail permanently. Given that the behaviour isn't 1289 * really defined, I have little hope that other OSes are doing better. 1290 * 1291 * Obviously, this doesn't affect O_DIRECT, which bypasses the page 1292 * cache. */ 1293 if ((s->open_flags & O_DIRECT) == 0) { 1294 s->page_cache_inconsistent = true; 1295 } 1296 return -errno; 1297 } 1298 return 0; 1299 } 1300 1301 #ifdef CONFIG_PREADV 1302 1303 static bool preadv_present = true; 1304 1305 static ssize_t 1306 qemu_preadv(int fd, const struct iovec *iov, int nr_iov, off_t offset) 1307 { 1308 return preadv(fd, iov, nr_iov, offset); 1309 } 1310 1311 static ssize_t 1312 qemu_pwritev(int fd, const struct iovec *iov, int nr_iov, off_t offset) 1313 { 1314 return pwritev(fd, iov, nr_iov, offset); 1315 } 1316 1317 #else 1318 1319 static bool preadv_present = false; 1320 1321 static ssize_t 1322 qemu_preadv(int fd, const struct iovec *iov, int nr_iov, off_t offset) 1323 { 1324 return -ENOSYS; 1325 } 1326 1327 static ssize_t 1328 qemu_pwritev(int fd, const struct iovec *iov, int nr_iov, off_t offset) 1329 { 1330 return -ENOSYS; 1331 } 1332 1333 #endif 1334 1335 static ssize_t handle_aiocb_rw_vector(RawPosixAIOData *aiocb) 1336 { 1337 ssize_t len; 1338 1339 do { 1340 if (aiocb->aio_type & QEMU_AIO_WRITE) 1341 len = qemu_pwritev(aiocb->aio_fildes, 1342 aiocb->io.iov, 1343 aiocb->io.niov, 1344 aiocb->aio_offset); 1345 else 1346 len = qemu_preadv(aiocb->aio_fildes, 1347 aiocb->io.iov, 1348 aiocb->io.niov, 1349 aiocb->aio_offset); 1350 } while (len == -1 && errno == EINTR); 1351 1352 if (len == -1) { 1353 return -errno; 1354 } 1355 return len; 1356 } 1357 1358 /* 1359 * Read/writes the data to/from a given linear buffer. 1360 * 1361 * Returns the number of bytes handles or -errno in case of an error. Short 1362 * reads are only returned if the end of the file is reached. 1363 */ 1364 static ssize_t handle_aiocb_rw_linear(RawPosixAIOData *aiocb, char *buf) 1365 { 1366 ssize_t offset = 0; 1367 ssize_t len; 1368 1369 while (offset < aiocb->aio_nbytes) { 1370 if (aiocb->aio_type & QEMU_AIO_WRITE) { 1371 len = pwrite(aiocb->aio_fildes, 1372 (const char *)buf + offset, 1373 aiocb->aio_nbytes - offset, 1374 aiocb->aio_offset + offset); 1375 } else { 1376 len = pread(aiocb->aio_fildes, 1377 buf + offset, 1378 aiocb->aio_nbytes - offset, 1379 aiocb->aio_offset + offset); 1380 } 1381 if (len == -1 && errno == EINTR) { 1382 continue; 1383 } else if (len == -1 && errno == EINVAL && 1384 (aiocb->bs->open_flags & BDRV_O_NOCACHE) && 1385 !(aiocb->aio_type & QEMU_AIO_WRITE) && 1386 offset > 0) { 1387 /* O_DIRECT pread() may fail with EINVAL when offset is unaligned 1388 * after a short read. Assume that O_DIRECT short reads only occur 1389 * at EOF. Therefore this is a short read, not an I/O error. 1390 */ 1391 break; 1392 } else if (len == -1) { 1393 offset = -errno; 1394 break; 1395 } else if (len == 0) { 1396 break; 1397 } 1398 offset += len; 1399 } 1400 1401 return offset; 1402 } 1403 1404 static int handle_aiocb_rw(void *opaque) 1405 { 1406 RawPosixAIOData *aiocb = opaque; 1407 ssize_t nbytes; 1408 char *buf; 1409 1410 if (!(aiocb->aio_type & QEMU_AIO_MISALIGNED)) { 1411 /* 1412 * If there is just a single buffer, and it is properly aligned 1413 * we can just use plain pread/pwrite without any problems. 1414 */ 1415 if (aiocb->io.niov == 1) { 1416 nbytes = handle_aiocb_rw_linear(aiocb, aiocb->io.iov->iov_base); 1417 goto out; 1418 } 1419 /* 1420 * We have more than one iovec, and all are properly aligned. 1421 * 1422 * Try preadv/pwritev first and fall back to linearizing the 1423 * buffer if it's not supported. 1424 */ 1425 if (preadv_present) { 1426 nbytes = handle_aiocb_rw_vector(aiocb); 1427 if (nbytes == aiocb->aio_nbytes || 1428 (nbytes < 0 && nbytes != -ENOSYS)) { 1429 goto out; 1430 } 1431 preadv_present = false; 1432 } 1433 1434 /* 1435 * XXX(hch): short read/write. no easy way to handle the reminder 1436 * using these interfaces. For now retry using plain 1437 * pread/pwrite? 1438 */ 1439 } 1440 1441 /* 1442 * Ok, we have to do it the hard way, copy all segments into 1443 * a single aligned buffer. 1444 */ 1445 buf = qemu_try_blockalign(aiocb->bs, aiocb->aio_nbytes); 1446 if (buf == NULL) { 1447 nbytes = -ENOMEM; 1448 goto out; 1449 } 1450 1451 if (aiocb->aio_type & QEMU_AIO_WRITE) { 1452 char *p = buf; 1453 int i; 1454 1455 for (i = 0; i < aiocb->io.niov; ++i) { 1456 memcpy(p, aiocb->io.iov[i].iov_base, aiocb->io.iov[i].iov_len); 1457 p += aiocb->io.iov[i].iov_len; 1458 } 1459 assert(p - buf == aiocb->aio_nbytes); 1460 } 1461 1462 nbytes = handle_aiocb_rw_linear(aiocb, buf); 1463 if (!(aiocb->aio_type & QEMU_AIO_WRITE)) { 1464 char *p = buf; 1465 size_t count = aiocb->aio_nbytes, copy; 1466 int i; 1467 1468 for (i = 0; i < aiocb->io.niov && count; ++i) { 1469 copy = count; 1470 if (copy > aiocb->io.iov[i].iov_len) { 1471 copy = aiocb->io.iov[i].iov_len; 1472 } 1473 memcpy(aiocb->io.iov[i].iov_base, p, copy); 1474 assert(count >= copy); 1475 p += copy; 1476 count -= copy; 1477 } 1478 assert(count == 0); 1479 } 1480 qemu_vfree(buf); 1481 1482 out: 1483 if (nbytes == aiocb->aio_nbytes) { 1484 return 0; 1485 } else if (nbytes >= 0 && nbytes < aiocb->aio_nbytes) { 1486 if (aiocb->aio_type & QEMU_AIO_WRITE) { 1487 return -EINVAL; 1488 } else { 1489 iov_memset(aiocb->io.iov, aiocb->io.niov, nbytes, 1490 0, aiocb->aio_nbytes - nbytes); 1491 return 0; 1492 } 1493 } else { 1494 assert(nbytes < 0); 1495 return nbytes; 1496 } 1497 } 1498 1499 static int translate_err(int err) 1500 { 1501 if (err == -ENODEV || err == -ENOSYS || err == -EOPNOTSUPP || 1502 err == -ENOTTY) { 1503 err = -ENOTSUP; 1504 } 1505 return err; 1506 } 1507 1508 #ifdef CONFIG_FALLOCATE 1509 static int do_fallocate(int fd, int mode, off_t offset, off_t len) 1510 { 1511 do { 1512 if (fallocate(fd, mode, offset, len) == 0) { 1513 return 0; 1514 } 1515 } while (errno == EINTR); 1516 return translate_err(-errno); 1517 } 1518 #endif 1519 1520 static ssize_t handle_aiocb_write_zeroes_block(RawPosixAIOData *aiocb) 1521 { 1522 int ret = -ENOTSUP; 1523 BDRVRawState *s = aiocb->bs->opaque; 1524 1525 if (!s->has_write_zeroes) { 1526 return -ENOTSUP; 1527 } 1528 1529 #ifdef BLKZEROOUT 1530 /* The BLKZEROOUT implementation in the kernel doesn't set 1531 * BLKDEV_ZERO_NOFALLBACK, so we can't call this if we have to avoid slow 1532 * fallbacks. */ 1533 if (!(aiocb->aio_type & QEMU_AIO_NO_FALLBACK)) { 1534 do { 1535 uint64_t range[2] = { aiocb->aio_offset, aiocb->aio_nbytes }; 1536 if (ioctl(aiocb->aio_fildes, BLKZEROOUT, range) == 0) { 1537 return 0; 1538 } 1539 } while (errno == EINTR); 1540 1541 ret = translate_err(-errno); 1542 if (ret == -ENOTSUP) { 1543 s->has_write_zeroes = false; 1544 } 1545 } 1546 #endif 1547 1548 return ret; 1549 } 1550 1551 static int handle_aiocb_write_zeroes(void *opaque) 1552 { 1553 RawPosixAIOData *aiocb = opaque; 1554 #ifdef CONFIG_FALLOCATE 1555 BDRVRawState *s = aiocb->bs->opaque; 1556 int64_t len; 1557 #endif 1558 1559 if (aiocb->aio_type & QEMU_AIO_BLKDEV) { 1560 return handle_aiocb_write_zeroes_block(aiocb); 1561 } 1562 1563 #ifdef CONFIG_FALLOCATE_ZERO_RANGE 1564 if (s->has_write_zeroes) { 1565 int ret = do_fallocate(s->fd, FALLOC_FL_ZERO_RANGE, 1566 aiocb->aio_offset, aiocb->aio_nbytes); 1567 if (ret == -EINVAL) { 1568 /* 1569 * Allow falling back to pwrite for file systems that 1570 * do not support fallocate() for an unaligned byte range. 1571 */ 1572 return -ENOTSUP; 1573 } 1574 if (ret == 0 || ret != -ENOTSUP) { 1575 return ret; 1576 } 1577 s->has_write_zeroes = false; 1578 } 1579 #endif 1580 1581 #ifdef CONFIG_FALLOCATE_PUNCH_HOLE 1582 if (s->has_discard && s->has_fallocate) { 1583 int ret = do_fallocate(s->fd, 1584 FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE, 1585 aiocb->aio_offset, aiocb->aio_nbytes); 1586 if (ret == 0) { 1587 ret = do_fallocate(s->fd, 0, aiocb->aio_offset, aiocb->aio_nbytes); 1588 if (ret == 0 || ret != -ENOTSUP) { 1589 return ret; 1590 } 1591 s->has_fallocate = false; 1592 } else if (ret != -ENOTSUP) { 1593 return ret; 1594 } else { 1595 s->has_discard = false; 1596 } 1597 } 1598 #endif 1599 1600 #ifdef CONFIG_FALLOCATE 1601 /* Last resort: we are trying to extend the file with zeroed data. This 1602 * can be done via fallocate(fd, 0) */ 1603 len = bdrv_getlength(aiocb->bs); 1604 if (s->has_fallocate && len >= 0 && aiocb->aio_offset >= len) { 1605 int ret = do_fallocate(s->fd, 0, aiocb->aio_offset, aiocb->aio_nbytes); 1606 if (ret == 0 || ret != -ENOTSUP) { 1607 return ret; 1608 } 1609 s->has_fallocate = false; 1610 } 1611 #endif 1612 1613 return -ENOTSUP; 1614 } 1615 1616 static int handle_aiocb_write_zeroes_unmap(void *opaque) 1617 { 1618 RawPosixAIOData *aiocb = opaque; 1619 BDRVRawState *s G_GNUC_UNUSED = aiocb->bs->opaque; 1620 int ret; 1621 1622 /* First try to write zeros and unmap at the same time */ 1623 1624 #ifdef CONFIG_FALLOCATE_PUNCH_HOLE 1625 ret = do_fallocate(s->fd, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE, 1626 aiocb->aio_offset, aiocb->aio_nbytes); 1627 if (ret != -ENOTSUP) { 1628 return ret; 1629 } 1630 #endif 1631 1632 /* If we couldn't manage to unmap while guaranteed that the area reads as 1633 * all-zero afterwards, just write zeroes without unmapping */ 1634 ret = handle_aiocb_write_zeroes(aiocb); 1635 return ret; 1636 } 1637 1638 #ifndef HAVE_COPY_FILE_RANGE 1639 static off_t copy_file_range(int in_fd, off_t *in_off, int out_fd, 1640 off_t *out_off, size_t len, unsigned int flags) 1641 { 1642 #ifdef __NR_copy_file_range 1643 return syscall(__NR_copy_file_range, in_fd, in_off, out_fd, 1644 out_off, len, flags); 1645 #else 1646 errno = ENOSYS; 1647 return -1; 1648 #endif 1649 } 1650 #endif 1651 1652 static int handle_aiocb_copy_range(void *opaque) 1653 { 1654 RawPosixAIOData *aiocb = opaque; 1655 uint64_t bytes = aiocb->aio_nbytes; 1656 off_t in_off = aiocb->aio_offset; 1657 off_t out_off = aiocb->copy_range.aio_offset2; 1658 1659 while (bytes) { 1660 ssize_t ret = copy_file_range(aiocb->aio_fildes, &in_off, 1661 aiocb->copy_range.aio_fd2, &out_off, 1662 bytes, 0); 1663 trace_file_copy_file_range(aiocb->bs, aiocb->aio_fildes, in_off, 1664 aiocb->copy_range.aio_fd2, out_off, bytes, 1665 0, ret); 1666 if (ret == 0) { 1667 /* No progress (e.g. when beyond EOF), let the caller fall back to 1668 * buffer I/O. */ 1669 return -ENOSPC; 1670 } 1671 if (ret < 0) { 1672 switch (errno) { 1673 case ENOSYS: 1674 return -ENOTSUP; 1675 case EINTR: 1676 continue; 1677 default: 1678 return -errno; 1679 } 1680 } 1681 bytes -= ret; 1682 } 1683 return 0; 1684 } 1685 1686 static int handle_aiocb_discard(void *opaque) 1687 { 1688 RawPosixAIOData *aiocb = opaque; 1689 int ret = -EOPNOTSUPP; 1690 BDRVRawState *s = aiocb->bs->opaque; 1691 1692 if (!s->has_discard) { 1693 return -ENOTSUP; 1694 } 1695 1696 if (aiocb->aio_type & QEMU_AIO_BLKDEV) { 1697 #ifdef BLKDISCARD 1698 do { 1699 uint64_t range[2] = { aiocb->aio_offset, aiocb->aio_nbytes }; 1700 if (ioctl(aiocb->aio_fildes, BLKDISCARD, range) == 0) { 1701 return 0; 1702 } 1703 } while (errno == EINTR); 1704 1705 ret = -errno; 1706 #endif 1707 } else { 1708 #ifdef CONFIG_FALLOCATE_PUNCH_HOLE 1709 ret = do_fallocate(s->fd, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE, 1710 aiocb->aio_offset, aiocb->aio_nbytes); 1711 #endif 1712 } 1713 1714 ret = translate_err(ret); 1715 if (ret == -ENOTSUP) { 1716 s->has_discard = false; 1717 } 1718 return ret; 1719 } 1720 1721 /* 1722 * Help alignment probing by allocating the first block. 1723 * 1724 * When reading with direct I/O from unallocated area on Gluster backed by XFS, 1725 * reading succeeds regardless of request length. In this case we fallback to 1726 * safe alignment which is not optimal. Allocating the first block avoids this 1727 * fallback. 1728 * 1729 * fd may be opened with O_DIRECT, but we don't know the buffer alignment or 1730 * request alignment, so we use safe values. 1731 * 1732 * Returns: 0 on success, -errno on failure. Since this is an optimization, 1733 * caller may ignore failures. 1734 */ 1735 static int allocate_first_block(int fd, size_t max_size) 1736 { 1737 size_t write_size = (max_size < MAX_BLOCKSIZE) 1738 ? BDRV_SECTOR_SIZE 1739 : MAX_BLOCKSIZE; 1740 size_t max_align = MAX(MAX_BLOCKSIZE, qemu_real_host_page_size); 1741 void *buf; 1742 ssize_t n; 1743 int ret; 1744 1745 buf = qemu_memalign(max_align, write_size); 1746 memset(buf, 0, write_size); 1747 1748 do { 1749 n = pwrite(fd, buf, write_size, 0); 1750 } while (n == -1 && errno == EINTR); 1751 1752 ret = (n == -1) ? -errno : 0; 1753 1754 qemu_vfree(buf); 1755 return ret; 1756 } 1757 1758 static int handle_aiocb_truncate(void *opaque) 1759 { 1760 RawPosixAIOData *aiocb = opaque; 1761 int result = 0; 1762 int64_t current_length = 0; 1763 char *buf = NULL; 1764 struct stat st; 1765 int fd = aiocb->aio_fildes; 1766 int64_t offset = aiocb->aio_offset; 1767 PreallocMode prealloc = aiocb->truncate.prealloc; 1768 Error **errp = aiocb->truncate.errp; 1769 1770 if (fstat(fd, &st) < 0) { 1771 result = -errno; 1772 error_setg_errno(errp, -result, "Could not stat file"); 1773 return result; 1774 } 1775 1776 current_length = st.st_size; 1777 if (current_length > offset && prealloc != PREALLOC_MODE_OFF) { 1778 error_setg(errp, "Cannot use preallocation for shrinking files"); 1779 return -ENOTSUP; 1780 } 1781 1782 switch (prealloc) { 1783 #ifdef CONFIG_POSIX_FALLOCATE 1784 case PREALLOC_MODE_FALLOC: 1785 /* 1786 * Truncating before posix_fallocate() makes it about twice slower on 1787 * file systems that do not support fallocate(), trying to check if a 1788 * block is allocated before allocating it, so don't do that here. 1789 */ 1790 if (offset != current_length) { 1791 result = -posix_fallocate(fd, current_length, 1792 offset - current_length); 1793 if (result != 0) { 1794 /* posix_fallocate() doesn't set errno. */ 1795 error_setg_errno(errp, -result, 1796 "Could not preallocate new data"); 1797 } else if (current_length == 0) { 1798 /* 1799 * posix_fallocate() uses fallocate() if the filesystem 1800 * supports it, or fallback to manually writing zeroes. If 1801 * fallocate() was used, unaligned reads from the fallocated 1802 * area in raw_probe_alignment() will succeed, hence we need to 1803 * allocate the first block. 1804 * 1805 * Optimize future alignment probing; ignore failures. 1806 */ 1807 allocate_first_block(fd, offset); 1808 } 1809 } else { 1810 result = 0; 1811 } 1812 goto out; 1813 #endif 1814 case PREALLOC_MODE_FULL: 1815 { 1816 int64_t num = 0, left = offset - current_length; 1817 off_t seek_result; 1818 1819 /* 1820 * Knowing the final size from the beginning could allow the file 1821 * system driver to do less allocations and possibly avoid 1822 * fragmentation of the file. 1823 */ 1824 if (ftruncate(fd, offset) != 0) { 1825 result = -errno; 1826 error_setg_errno(errp, -result, "Could not resize file"); 1827 goto out; 1828 } 1829 1830 buf = g_malloc0(65536); 1831 1832 seek_result = lseek(fd, current_length, SEEK_SET); 1833 if (seek_result < 0) { 1834 result = -errno; 1835 error_setg_errno(errp, -result, 1836 "Failed to seek to the old end of file"); 1837 goto out; 1838 } 1839 1840 while (left > 0) { 1841 num = MIN(left, 65536); 1842 result = write(fd, buf, num); 1843 if (result < 0) { 1844 if (errno == EINTR) { 1845 continue; 1846 } 1847 result = -errno; 1848 error_setg_errno(errp, -result, 1849 "Could not write zeros for preallocation"); 1850 goto out; 1851 } 1852 left -= result; 1853 } 1854 if (result >= 0) { 1855 result = fsync(fd); 1856 if (result < 0) { 1857 result = -errno; 1858 error_setg_errno(errp, -result, 1859 "Could not flush file to disk"); 1860 goto out; 1861 } 1862 } 1863 goto out; 1864 } 1865 case PREALLOC_MODE_OFF: 1866 if (ftruncate(fd, offset) != 0) { 1867 result = -errno; 1868 error_setg_errno(errp, -result, "Could not resize file"); 1869 } else if (current_length == 0 && offset > current_length) { 1870 /* Optimize future alignment probing; ignore failures. */ 1871 allocate_first_block(fd, offset); 1872 } 1873 return result; 1874 default: 1875 result = -ENOTSUP; 1876 error_setg(errp, "Unsupported preallocation mode: %s", 1877 PreallocMode_str(prealloc)); 1878 return result; 1879 } 1880 1881 out: 1882 if (result < 0) { 1883 if (ftruncate(fd, current_length) < 0) { 1884 error_report("Failed to restore old file length: %s", 1885 strerror(errno)); 1886 } 1887 } 1888 1889 g_free(buf); 1890 return result; 1891 } 1892 1893 static int coroutine_fn raw_thread_pool_submit(BlockDriverState *bs, 1894 ThreadPoolFunc func, void *arg) 1895 { 1896 /* @bs can be NULL, bdrv_get_aio_context() returns the main context then */ 1897 ThreadPool *pool = aio_get_thread_pool(bdrv_get_aio_context(bs)); 1898 return thread_pool_submit_co(pool, func, arg); 1899 } 1900 1901 static int coroutine_fn raw_co_prw(BlockDriverState *bs, uint64_t offset, 1902 uint64_t bytes, QEMUIOVector *qiov, int type) 1903 { 1904 BDRVRawState *s = bs->opaque; 1905 RawPosixAIOData acb; 1906 1907 if (fd_open(bs) < 0) 1908 return -EIO; 1909 1910 /* 1911 * When using O_DIRECT, the request must be aligned to be able to use 1912 * either libaio or io_uring interface. If not fail back to regular thread 1913 * pool read/write code which emulates this for us if we 1914 * set QEMU_AIO_MISALIGNED. 1915 */ 1916 if (s->needs_alignment && !bdrv_qiov_is_aligned(bs, qiov)) { 1917 type |= QEMU_AIO_MISALIGNED; 1918 #ifdef CONFIG_LINUX_IO_URING 1919 } else if (s->use_linux_io_uring) { 1920 LuringState *aio = aio_get_linux_io_uring(bdrv_get_aio_context(bs)); 1921 assert(qiov->size == bytes); 1922 return luring_co_submit(bs, aio, s->fd, offset, qiov, type); 1923 #endif 1924 #ifdef CONFIG_LINUX_AIO 1925 } else if (s->use_linux_aio) { 1926 LinuxAioState *aio = aio_get_linux_aio(bdrv_get_aio_context(bs)); 1927 assert(qiov->size == bytes); 1928 return laio_co_submit(bs, aio, s->fd, offset, qiov, type); 1929 #endif 1930 } 1931 1932 acb = (RawPosixAIOData) { 1933 .bs = bs, 1934 .aio_fildes = s->fd, 1935 .aio_type = type, 1936 .aio_offset = offset, 1937 .aio_nbytes = bytes, 1938 .io = { 1939 .iov = qiov->iov, 1940 .niov = qiov->niov, 1941 }, 1942 }; 1943 1944 assert(qiov->size == bytes); 1945 return raw_thread_pool_submit(bs, handle_aiocb_rw, &acb); 1946 } 1947 1948 static int coroutine_fn raw_co_preadv(BlockDriverState *bs, uint64_t offset, 1949 uint64_t bytes, QEMUIOVector *qiov, 1950 int flags) 1951 { 1952 return raw_co_prw(bs, offset, bytes, qiov, QEMU_AIO_READ); 1953 } 1954 1955 static int coroutine_fn raw_co_pwritev(BlockDriverState *bs, uint64_t offset, 1956 uint64_t bytes, QEMUIOVector *qiov, 1957 int flags) 1958 { 1959 assert(flags == 0); 1960 return raw_co_prw(bs, offset, bytes, qiov, QEMU_AIO_WRITE); 1961 } 1962 1963 static void raw_aio_plug(BlockDriverState *bs) 1964 { 1965 BDRVRawState __attribute__((unused)) *s = bs->opaque; 1966 #ifdef CONFIG_LINUX_AIO 1967 if (s->use_linux_aio) { 1968 LinuxAioState *aio = aio_get_linux_aio(bdrv_get_aio_context(bs)); 1969 laio_io_plug(bs, aio); 1970 } 1971 #endif 1972 #ifdef CONFIG_LINUX_IO_URING 1973 if (s->use_linux_io_uring) { 1974 LuringState *aio = aio_get_linux_io_uring(bdrv_get_aio_context(bs)); 1975 luring_io_plug(bs, aio); 1976 } 1977 #endif 1978 } 1979 1980 static void raw_aio_unplug(BlockDriverState *bs) 1981 { 1982 BDRVRawState __attribute__((unused)) *s = bs->opaque; 1983 #ifdef CONFIG_LINUX_AIO 1984 if (s->use_linux_aio) { 1985 LinuxAioState *aio = aio_get_linux_aio(bdrv_get_aio_context(bs)); 1986 laio_io_unplug(bs, aio); 1987 } 1988 #endif 1989 #ifdef CONFIG_LINUX_IO_URING 1990 if (s->use_linux_io_uring) { 1991 LuringState *aio = aio_get_linux_io_uring(bdrv_get_aio_context(bs)); 1992 luring_io_unplug(bs, aio); 1993 } 1994 #endif 1995 } 1996 1997 static int raw_co_flush_to_disk(BlockDriverState *bs) 1998 { 1999 BDRVRawState *s = bs->opaque; 2000 RawPosixAIOData acb; 2001 int ret; 2002 2003 ret = fd_open(bs); 2004 if (ret < 0) { 2005 return ret; 2006 } 2007 2008 acb = (RawPosixAIOData) { 2009 .bs = bs, 2010 .aio_fildes = s->fd, 2011 .aio_type = QEMU_AIO_FLUSH, 2012 }; 2013 2014 #ifdef CONFIG_LINUX_IO_URING 2015 if (s->use_linux_io_uring) { 2016 LuringState *aio = aio_get_linux_io_uring(bdrv_get_aio_context(bs)); 2017 return luring_co_submit(bs, aio, s->fd, 0, NULL, QEMU_AIO_FLUSH); 2018 } 2019 #endif 2020 return raw_thread_pool_submit(bs, handle_aiocb_flush, &acb); 2021 } 2022 2023 static void raw_aio_attach_aio_context(BlockDriverState *bs, 2024 AioContext *new_context) 2025 { 2026 BDRVRawState __attribute__((unused)) *s = bs->opaque; 2027 #ifdef CONFIG_LINUX_AIO 2028 if (s->use_linux_aio) { 2029 Error *local_err = NULL; 2030 if (!aio_setup_linux_aio(new_context, &local_err)) { 2031 error_reportf_err(local_err, "Unable to use native AIO, " 2032 "falling back to thread pool: "); 2033 s->use_linux_aio = false; 2034 } 2035 } 2036 #endif 2037 #ifdef CONFIG_LINUX_IO_URING 2038 if (s->use_linux_io_uring) { 2039 Error *local_err; 2040 if (!aio_setup_linux_io_uring(new_context, &local_err)) { 2041 error_reportf_err(local_err, "Unable to use linux io_uring, " 2042 "falling back to thread pool: "); 2043 s->use_linux_io_uring = false; 2044 } 2045 } 2046 #endif 2047 } 2048 2049 static void raw_close(BlockDriverState *bs) 2050 { 2051 BDRVRawState *s = bs->opaque; 2052 2053 if (s->fd >= 0) { 2054 qemu_close(s->fd); 2055 s->fd = -1; 2056 } 2057 } 2058 2059 /** 2060 * Truncates the given regular file @fd to @offset and, when growing, fills the 2061 * new space according to @prealloc. 2062 * 2063 * Returns: 0 on success, -errno on failure. 2064 */ 2065 static int coroutine_fn 2066 raw_regular_truncate(BlockDriverState *bs, int fd, int64_t offset, 2067 PreallocMode prealloc, Error **errp) 2068 { 2069 RawPosixAIOData acb; 2070 2071 acb = (RawPosixAIOData) { 2072 .bs = bs, 2073 .aio_fildes = fd, 2074 .aio_type = QEMU_AIO_TRUNCATE, 2075 .aio_offset = offset, 2076 .truncate = { 2077 .prealloc = prealloc, 2078 .errp = errp, 2079 }, 2080 }; 2081 2082 return raw_thread_pool_submit(bs, handle_aiocb_truncate, &acb); 2083 } 2084 2085 static int coroutine_fn raw_co_truncate(BlockDriverState *bs, int64_t offset, 2086 bool exact, PreallocMode prealloc, 2087 BdrvRequestFlags flags, Error **errp) 2088 { 2089 BDRVRawState *s = bs->opaque; 2090 struct stat st; 2091 int ret; 2092 2093 if (fstat(s->fd, &st)) { 2094 ret = -errno; 2095 error_setg_errno(errp, -ret, "Failed to fstat() the file"); 2096 return ret; 2097 } 2098 2099 if (S_ISREG(st.st_mode)) { 2100 /* Always resizes to the exact @offset */ 2101 return raw_regular_truncate(bs, s->fd, offset, prealloc, errp); 2102 } 2103 2104 if (prealloc != PREALLOC_MODE_OFF) { 2105 error_setg(errp, "Preallocation mode '%s' unsupported for this " 2106 "non-regular file", PreallocMode_str(prealloc)); 2107 return -ENOTSUP; 2108 } 2109 2110 if (S_ISCHR(st.st_mode) || S_ISBLK(st.st_mode)) { 2111 int64_t cur_length = raw_getlength(bs); 2112 2113 if (offset != cur_length && exact) { 2114 error_setg(errp, "Cannot resize device files"); 2115 return -ENOTSUP; 2116 } else if (offset > cur_length) { 2117 error_setg(errp, "Cannot grow device files"); 2118 return -EINVAL; 2119 } 2120 } else { 2121 error_setg(errp, "Resizing this file is not supported"); 2122 return -ENOTSUP; 2123 } 2124 2125 return 0; 2126 } 2127 2128 #ifdef __OpenBSD__ 2129 static int64_t raw_getlength(BlockDriverState *bs) 2130 { 2131 BDRVRawState *s = bs->opaque; 2132 int fd = s->fd; 2133 struct stat st; 2134 2135 if (fstat(fd, &st)) 2136 return -errno; 2137 if (S_ISCHR(st.st_mode) || S_ISBLK(st.st_mode)) { 2138 struct disklabel dl; 2139 2140 if (ioctl(fd, DIOCGDINFO, &dl)) 2141 return -errno; 2142 return (uint64_t)dl.d_secsize * 2143 dl.d_partitions[DISKPART(st.st_rdev)].p_size; 2144 } else 2145 return st.st_size; 2146 } 2147 #elif defined(__NetBSD__) 2148 static int64_t raw_getlength(BlockDriverState *bs) 2149 { 2150 BDRVRawState *s = bs->opaque; 2151 int fd = s->fd; 2152 struct stat st; 2153 2154 if (fstat(fd, &st)) 2155 return -errno; 2156 if (S_ISCHR(st.st_mode) || S_ISBLK(st.st_mode)) { 2157 struct dkwedge_info dkw; 2158 2159 if (ioctl(fd, DIOCGWEDGEINFO, &dkw) != -1) { 2160 return dkw.dkw_size * 512; 2161 } else { 2162 struct disklabel dl; 2163 2164 if (ioctl(fd, DIOCGDINFO, &dl)) 2165 return -errno; 2166 return (uint64_t)dl.d_secsize * 2167 dl.d_partitions[DISKPART(st.st_rdev)].p_size; 2168 } 2169 } else 2170 return st.st_size; 2171 } 2172 #elif defined(__sun__) 2173 static int64_t raw_getlength(BlockDriverState *bs) 2174 { 2175 BDRVRawState *s = bs->opaque; 2176 struct dk_minfo minfo; 2177 int ret; 2178 int64_t size; 2179 2180 ret = fd_open(bs); 2181 if (ret < 0) { 2182 return ret; 2183 } 2184 2185 /* 2186 * Use the DKIOCGMEDIAINFO ioctl to read the size. 2187 */ 2188 ret = ioctl(s->fd, DKIOCGMEDIAINFO, &minfo); 2189 if (ret != -1) { 2190 return minfo.dki_lbsize * minfo.dki_capacity; 2191 } 2192 2193 /* 2194 * There are reports that lseek on some devices fails, but 2195 * irc discussion said that contingency on contingency was overkill. 2196 */ 2197 size = lseek(s->fd, 0, SEEK_END); 2198 if (size < 0) { 2199 return -errno; 2200 } 2201 return size; 2202 } 2203 #elif defined(CONFIG_BSD) 2204 static int64_t raw_getlength(BlockDriverState *bs) 2205 { 2206 BDRVRawState *s = bs->opaque; 2207 int fd = s->fd; 2208 int64_t size; 2209 struct stat sb; 2210 #if defined (__FreeBSD__) || defined(__FreeBSD_kernel__) 2211 int reopened = 0; 2212 #endif 2213 int ret; 2214 2215 ret = fd_open(bs); 2216 if (ret < 0) 2217 return ret; 2218 2219 #if defined (__FreeBSD__) || defined(__FreeBSD_kernel__) 2220 again: 2221 #endif 2222 if (!fstat(fd, &sb) && (S_IFCHR & sb.st_mode)) { 2223 #ifdef DIOCGMEDIASIZE 2224 if (ioctl(fd, DIOCGMEDIASIZE, (off_t *)&size)) 2225 #elif defined(DIOCGPART) 2226 { 2227 struct partinfo pi; 2228 if (ioctl(fd, DIOCGPART, &pi) == 0) 2229 size = pi.media_size; 2230 else 2231 size = 0; 2232 } 2233 if (size == 0) 2234 #endif 2235 #if defined(__APPLE__) && defined(__MACH__) 2236 { 2237 uint64_t sectors = 0; 2238 uint32_t sector_size = 0; 2239 2240 if (ioctl(fd, DKIOCGETBLOCKCOUNT, §ors) == 0 2241 && ioctl(fd, DKIOCGETBLOCKSIZE, §or_size) == 0) { 2242 size = sectors * sector_size; 2243 } else { 2244 size = lseek(fd, 0LL, SEEK_END); 2245 if (size < 0) { 2246 return -errno; 2247 } 2248 } 2249 } 2250 #else 2251 size = lseek(fd, 0LL, SEEK_END); 2252 if (size < 0) { 2253 return -errno; 2254 } 2255 #endif 2256 #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__) 2257 switch(s->type) { 2258 case FTYPE_CD: 2259 /* XXX FreeBSD acd returns UINT_MAX sectors for an empty drive */ 2260 if (size == 2048LL * (unsigned)-1) 2261 size = 0; 2262 /* XXX no disc? maybe we need to reopen... */ 2263 if (size <= 0 && !reopened && cdrom_reopen(bs) >= 0) { 2264 reopened = 1; 2265 goto again; 2266 } 2267 } 2268 #endif 2269 } else { 2270 size = lseek(fd, 0, SEEK_END); 2271 if (size < 0) { 2272 return -errno; 2273 } 2274 } 2275 return size; 2276 } 2277 #else 2278 static int64_t raw_getlength(BlockDriverState *bs) 2279 { 2280 BDRVRawState *s = bs->opaque; 2281 int ret; 2282 int64_t size; 2283 2284 ret = fd_open(bs); 2285 if (ret < 0) { 2286 return ret; 2287 } 2288 2289 size = lseek(s->fd, 0, SEEK_END); 2290 if (size < 0) { 2291 return -errno; 2292 } 2293 return size; 2294 } 2295 #endif 2296 2297 static int64_t raw_get_allocated_file_size(BlockDriverState *bs) 2298 { 2299 struct stat st; 2300 BDRVRawState *s = bs->opaque; 2301 2302 if (fstat(s->fd, &st) < 0) { 2303 return -errno; 2304 } 2305 return (int64_t)st.st_blocks * 512; 2306 } 2307 2308 static int coroutine_fn 2309 raw_co_create(BlockdevCreateOptions *options, Error **errp) 2310 { 2311 BlockdevCreateOptionsFile *file_opts; 2312 Error *local_err = NULL; 2313 int fd; 2314 uint64_t perm, shared; 2315 int result = 0; 2316 2317 /* Validate options and set default values */ 2318 assert(options->driver == BLOCKDEV_DRIVER_FILE); 2319 file_opts = &options->u.file; 2320 2321 if (!file_opts->has_nocow) { 2322 file_opts->nocow = false; 2323 } 2324 if (!file_opts->has_preallocation) { 2325 file_opts->preallocation = PREALLOC_MODE_OFF; 2326 } 2327 2328 /* Create file */ 2329 fd = qemu_open(file_opts->filename, O_RDWR | O_CREAT | O_BINARY, 0644); 2330 if (fd < 0) { 2331 result = -errno; 2332 error_setg_errno(errp, -result, "Could not create file"); 2333 goto out; 2334 } 2335 2336 /* Take permissions: We want to discard everything, so we need 2337 * BLK_PERM_WRITE; and truncation to the desired size requires 2338 * BLK_PERM_RESIZE. 2339 * On the other hand, we cannot share the RESIZE permission 2340 * because we promise that after this function, the file has the 2341 * size given in the options. If someone else were to resize it 2342 * concurrently, we could not guarantee that. 2343 * Note that after this function, we can no longer guarantee that 2344 * the file is not touched by a third party, so it may be resized 2345 * then. */ 2346 perm = BLK_PERM_WRITE | BLK_PERM_RESIZE; 2347 shared = BLK_PERM_ALL & ~BLK_PERM_RESIZE; 2348 2349 /* Step one: Take locks */ 2350 result = raw_apply_lock_bytes(NULL, fd, perm, ~shared, false, errp); 2351 if (result < 0) { 2352 goto out_close; 2353 } 2354 2355 /* Step two: Check that nobody else has taken conflicting locks */ 2356 result = raw_check_lock_bytes(fd, perm, shared, errp); 2357 if (result < 0) { 2358 error_append_hint(errp, 2359 "Is another process using the image [%s]?\n", 2360 file_opts->filename); 2361 goto out_unlock; 2362 } 2363 2364 /* Clear the file by truncating it to 0 */ 2365 result = raw_regular_truncate(NULL, fd, 0, PREALLOC_MODE_OFF, errp); 2366 if (result < 0) { 2367 goto out_unlock; 2368 } 2369 2370 if (file_opts->nocow) { 2371 #ifdef __linux__ 2372 /* Set NOCOW flag to solve performance issue on fs like btrfs. 2373 * This is an optimisation. The FS_IOC_SETFLAGS ioctl return value 2374 * will be ignored since any failure of this operation should not 2375 * block the left work. 2376 */ 2377 int attr; 2378 if (ioctl(fd, FS_IOC_GETFLAGS, &attr) == 0) { 2379 attr |= FS_NOCOW_FL; 2380 ioctl(fd, FS_IOC_SETFLAGS, &attr); 2381 } 2382 #endif 2383 } 2384 2385 /* Resize and potentially preallocate the file to the desired 2386 * final size */ 2387 result = raw_regular_truncate(NULL, fd, file_opts->size, 2388 file_opts->preallocation, errp); 2389 if (result < 0) { 2390 goto out_unlock; 2391 } 2392 2393 out_unlock: 2394 raw_apply_lock_bytes(NULL, fd, 0, 0, true, &local_err); 2395 if (local_err) { 2396 /* The above call should not fail, and if it does, that does 2397 * not mean the whole creation operation has failed. So 2398 * report it the user for their convenience, but do not report 2399 * it to the caller. */ 2400 warn_report_err(local_err); 2401 } 2402 2403 out_close: 2404 if (qemu_close(fd) != 0 && result == 0) { 2405 result = -errno; 2406 error_setg_errno(errp, -result, "Could not close the new file"); 2407 } 2408 out: 2409 return result; 2410 } 2411 2412 static int coroutine_fn raw_co_create_opts(BlockDriver *drv, 2413 const char *filename, 2414 QemuOpts *opts, 2415 Error **errp) 2416 { 2417 BlockdevCreateOptions options; 2418 int64_t total_size = 0; 2419 bool nocow = false; 2420 PreallocMode prealloc; 2421 char *buf = NULL; 2422 Error *local_err = NULL; 2423 2424 /* Skip file: protocol prefix */ 2425 strstart(filename, "file:", &filename); 2426 2427 /* Read out options */ 2428 total_size = ROUND_UP(qemu_opt_get_size_del(opts, BLOCK_OPT_SIZE, 0), 2429 BDRV_SECTOR_SIZE); 2430 nocow = qemu_opt_get_bool(opts, BLOCK_OPT_NOCOW, false); 2431 buf = qemu_opt_get_del(opts, BLOCK_OPT_PREALLOC); 2432 prealloc = qapi_enum_parse(&PreallocMode_lookup, buf, 2433 PREALLOC_MODE_OFF, &local_err); 2434 g_free(buf); 2435 if (local_err) { 2436 error_propagate(errp, local_err); 2437 return -EINVAL; 2438 } 2439 2440 options = (BlockdevCreateOptions) { 2441 .driver = BLOCKDEV_DRIVER_FILE, 2442 .u.file = { 2443 .filename = (char *) filename, 2444 .size = total_size, 2445 .has_preallocation = true, 2446 .preallocation = prealloc, 2447 .has_nocow = true, 2448 .nocow = nocow, 2449 }, 2450 }; 2451 return raw_co_create(&options, errp); 2452 } 2453 2454 static int coroutine_fn raw_co_delete_file(BlockDriverState *bs, 2455 Error **errp) 2456 { 2457 struct stat st; 2458 int ret; 2459 2460 if (!(stat(bs->filename, &st) == 0) || !S_ISREG(st.st_mode)) { 2461 error_setg_errno(errp, ENOENT, "%s is not a regular file", 2462 bs->filename); 2463 return -ENOENT; 2464 } 2465 2466 ret = unlink(bs->filename); 2467 if (ret < 0) { 2468 ret = -errno; 2469 error_setg_errno(errp, -ret, "Error when deleting file %s", 2470 bs->filename); 2471 } 2472 2473 return ret; 2474 } 2475 2476 /* 2477 * Find allocation range in @bs around offset @start. 2478 * May change underlying file descriptor's file offset. 2479 * If @start is not in a hole, store @start in @data, and the 2480 * beginning of the next hole in @hole, and return 0. 2481 * If @start is in a non-trailing hole, store @start in @hole and the 2482 * beginning of the next non-hole in @data, and return 0. 2483 * If @start is in a trailing hole or beyond EOF, return -ENXIO. 2484 * If we can't find out, return a negative errno other than -ENXIO. 2485 */ 2486 static int find_allocation(BlockDriverState *bs, off_t start, 2487 off_t *data, off_t *hole) 2488 { 2489 #if defined SEEK_HOLE && defined SEEK_DATA 2490 BDRVRawState *s = bs->opaque; 2491 off_t offs; 2492 2493 /* 2494 * SEEK_DATA cases: 2495 * D1. offs == start: start is in data 2496 * D2. offs > start: start is in a hole, next data at offs 2497 * D3. offs < 0, errno = ENXIO: either start is in a trailing hole 2498 * or start is beyond EOF 2499 * If the latter happens, the file has been truncated behind 2500 * our back since we opened it. All bets are off then. 2501 * Treating like a trailing hole is simplest. 2502 * D4. offs < 0, errno != ENXIO: we learned nothing 2503 */ 2504 offs = lseek(s->fd, start, SEEK_DATA); 2505 if (offs < 0) { 2506 return -errno; /* D3 or D4 */ 2507 } 2508 2509 if (offs < start) { 2510 /* This is not a valid return by lseek(). We are safe to just return 2511 * -EIO in this case, and we'll treat it like D4. */ 2512 return -EIO; 2513 } 2514 2515 if (offs > start) { 2516 /* D2: in hole, next data at offs */ 2517 *hole = start; 2518 *data = offs; 2519 return 0; 2520 } 2521 2522 /* D1: in data, end not yet known */ 2523 2524 /* 2525 * SEEK_HOLE cases: 2526 * H1. offs == start: start is in a hole 2527 * If this happens here, a hole has been dug behind our back 2528 * since the previous lseek(). 2529 * H2. offs > start: either start is in data, next hole at offs, 2530 * or start is in trailing hole, EOF at offs 2531 * Linux treats trailing holes like any other hole: offs == 2532 * start. Solaris seeks to EOF instead: offs > start (blech). 2533 * If that happens here, a hole has been dug behind our back 2534 * since the previous lseek(). 2535 * H3. offs < 0, errno = ENXIO: start is beyond EOF 2536 * If this happens, the file has been truncated behind our 2537 * back since we opened it. Treat it like a trailing hole. 2538 * H4. offs < 0, errno != ENXIO: we learned nothing 2539 * Pretend we know nothing at all, i.e. "forget" about D1. 2540 */ 2541 offs = lseek(s->fd, start, SEEK_HOLE); 2542 if (offs < 0) { 2543 return -errno; /* D1 and (H3 or H4) */ 2544 } 2545 2546 if (offs < start) { 2547 /* This is not a valid return by lseek(). We are safe to just return 2548 * -EIO in this case, and we'll treat it like H4. */ 2549 return -EIO; 2550 } 2551 2552 if (offs > start) { 2553 /* 2554 * D1 and H2: either in data, next hole at offs, or it was in 2555 * data but is now in a trailing hole. In the latter case, 2556 * all bets are off. Treating it as if it there was data all 2557 * the way to EOF is safe, so simply do that. 2558 */ 2559 *data = start; 2560 *hole = offs; 2561 return 0; 2562 } 2563 2564 /* D1 and H1 */ 2565 return -EBUSY; 2566 #else 2567 return -ENOTSUP; 2568 #endif 2569 } 2570 2571 /* 2572 * Returns the allocation status of the specified offset. 2573 * 2574 * The block layer guarantees 'offset' and 'bytes' are within bounds. 2575 * 2576 * 'pnum' is set to the number of bytes (including and immediately following 2577 * the specified offset) that are known to be in the same 2578 * allocated/unallocated state. 2579 * 2580 * 'bytes' is the max value 'pnum' should be set to. 2581 */ 2582 static int coroutine_fn raw_co_block_status(BlockDriverState *bs, 2583 bool want_zero, 2584 int64_t offset, 2585 int64_t bytes, int64_t *pnum, 2586 int64_t *map, 2587 BlockDriverState **file) 2588 { 2589 off_t data = 0, hole = 0; 2590 int ret; 2591 2592 assert(QEMU_IS_ALIGNED(offset | bytes, bs->bl.request_alignment)); 2593 2594 ret = fd_open(bs); 2595 if (ret < 0) { 2596 return ret; 2597 } 2598 2599 if (!want_zero) { 2600 *pnum = bytes; 2601 *map = offset; 2602 *file = bs; 2603 return BDRV_BLOCK_DATA | BDRV_BLOCK_OFFSET_VALID; 2604 } 2605 2606 ret = find_allocation(bs, offset, &data, &hole); 2607 if (ret == -ENXIO) { 2608 /* Trailing hole */ 2609 *pnum = bytes; 2610 ret = BDRV_BLOCK_ZERO; 2611 } else if (ret < 0) { 2612 /* No info available, so pretend there are no holes */ 2613 *pnum = bytes; 2614 ret = BDRV_BLOCK_DATA; 2615 } else if (data == offset) { 2616 /* On a data extent, compute bytes to the end of the extent, 2617 * possibly including a partial sector at EOF. */ 2618 *pnum = MIN(bytes, hole - offset); 2619 2620 /* 2621 * We are not allowed to return partial sectors, though, so 2622 * round up if necessary. 2623 */ 2624 if (!QEMU_IS_ALIGNED(*pnum, bs->bl.request_alignment)) { 2625 int64_t file_length = raw_getlength(bs); 2626 if (file_length > 0) { 2627 /* Ignore errors, this is just a safeguard */ 2628 assert(hole == file_length); 2629 } 2630 *pnum = ROUND_UP(*pnum, bs->bl.request_alignment); 2631 } 2632 2633 ret = BDRV_BLOCK_DATA; 2634 } else { 2635 /* On a hole, compute bytes to the beginning of the next extent. */ 2636 assert(hole == offset); 2637 *pnum = MIN(bytes, data - offset); 2638 ret = BDRV_BLOCK_ZERO; 2639 } 2640 *map = offset; 2641 *file = bs; 2642 return ret | BDRV_BLOCK_OFFSET_VALID; 2643 } 2644 2645 #if defined(__linux__) 2646 /* Verify that the file is not in the page cache */ 2647 static void check_cache_dropped(BlockDriverState *bs, Error **errp) 2648 { 2649 const size_t window_size = 128 * 1024 * 1024; 2650 BDRVRawState *s = bs->opaque; 2651 void *window = NULL; 2652 size_t length = 0; 2653 unsigned char *vec; 2654 size_t page_size; 2655 off_t offset; 2656 off_t end; 2657 2658 /* mincore(2) page status information requires 1 byte per page */ 2659 page_size = sysconf(_SC_PAGESIZE); 2660 vec = g_malloc(DIV_ROUND_UP(window_size, page_size)); 2661 2662 end = raw_getlength(bs); 2663 2664 for (offset = 0; offset < end; offset += window_size) { 2665 void *new_window; 2666 size_t new_length; 2667 size_t vec_end; 2668 size_t i; 2669 int ret; 2670 2671 /* Unmap previous window if size has changed */ 2672 new_length = MIN(end - offset, window_size); 2673 if (new_length != length) { 2674 munmap(window, length); 2675 window = NULL; 2676 length = 0; 2677 } 2678 2679 new_window = mmap(window, new_length, PROT_NONE, MAP_PRIVATE, 2680 s->fd, offset); 2681 if (new_window == MAP_FAILED) { 2682 error_setg_errno(errp, errno, "mmap failed"); 2683 break; 2684 } 2685 2686 window = new_window; 2687 length = new_length; 2688 2689 ret = mincore(window, length, vec); 2690 if (ret < 0) { 2691 error_setg_errno(errp, errno, "mincore failed"); 2692 break; 2693 } 2694 2695 vec_end = DIV_ROUND_UP(length, page_size); 2696 for (i = 0; i < vec_end; i++) { 2697 if (vec[i] & 0x1) { 2698 break; 2699 } 2700 } 2701 if (i < vec_end) { 2702 error_setg(errp, "page cache still in use!"); 2703 break; 2704 } 2705 } 2706 2707 if (window) { 2708 munmap(window, length); 2709 } 2710 2711 g_free(vec); 2712 } 2713 #endif /* __linux__ */ 2714 2715 static void coroutine_fn raw_co_invalidate_cache(BlockDriverState *bs, 2716 Error **errp) 2717 { 2718 BDRVRawState *s = bs->opaque; 2719 int ret; 2720 2721 ret = fd_open(bs); 2722 if (ret < 0) { 2723 error_setg_errno(errp, -ret, "The file descriptor is not open"); 2724 return; 2725 } 2726 2727 if (!s->drop_cache) { 2728 return; 2729 } 2730 2731 if (s->open_flags & O_DIRECT) { 2732 return; /* No host kernel page cache */ 2733 } 2734 2735 #if defined(__linux__) 2736 /* This sets the scene for the next syscall... */ 2737 ret = bdrv_co_flush(bs); 2738 if (ret < 0) { 2739 error_setg_errno(errp, -ret, "flush failed"); 2740 return; 2741 } 2742 2743 /* Linux does not invalidate pages that are dirty, locked, or mmapped by a 2744 * process. These limitations are okay because we just fsynced the file, 2745 * we don't use mmap, and the file should not be in use by other processes. 2746 */ 2747 ret = posix_fadvise(s->fd, 0, 0, POSIX_FADV_DONTNEED); 2748 if (ret != 0) { /* the return value is a positive errno */ 2749 error_setg_errno(errp, ret, "fadvise failed"); 2750 return; 2751 } 2752 2753 if (s->check_cache_dropped) { 2754 check_cache_dropped(bs, errp); 2755 } 2756 #else /* __linux__ */ 2757 /* Do nothing. Live migration to a remote host with cache.direct=off is 2758 * unsupported on other host operating systems. Cache consistency issues 2759 * may occur but no error is reported here, partly because that's the 2760 * historical behavior and partly because it's hard to differentiate valid 2761 * configurations that should not cause errors. 2762 */ 2763 #endif /* !__linux__ */ 2764 } 2765 2766 static void raw_account_discard(BDRVRawState *s, uint64_t nbytes, int ret) 2767 { 2768 if (ret) { 2769 s->stats.discard_nb_failed++; 2770 } else { 2771 s->stats.discard_nb_ok++; 2772 s->stats.discard_bytes_ok += nbytes; 2773 } 2774 } 2775 2776 static coroutine_fn int 2777 raw_do_pdiscard(BlockDriverState *bs, int64_t offset, int bytes, bool blkdev) 2778 { 2779 BDRVRawState *s = bs->opaque; 2780 RawPosixAIOData acb; 2781 int ret; 2782 2783 acb = (RawPosixAIOData) { 2784 .bs = bs, 2785 .aio_fildes = s->fd, 2786 .aio_type = QEMU_AIO_DISCARD, 2787 .aio_offset = offset, 2788 .aio_nbytes = bytes, 2789 }; 2790 2791 if (blkdev) { 2792 acb.aio_type |= QEMU_AIO_BLKDEV; 2793 } 2794 2795 ret = raw_thread_pool_submit(bs, handle_aiocb_discard, &acb); 2796 raw_account_discard(s, bytes, ret); 2797 return ret; 2798 } 2799 2800 static coroutine_fn int 2801 raw_co_pdiscard(BlockDriverState *bs, int64_t offset, int bytes) 2802 { 2803 return raw_do_pdiscard(bs, offset, bytes, false); 2804 } 2805 2806 static int coroutine_fn 2807 raw_do_pwrite_zeroes(BlockDriverState *bs, int64_t offset, int bytes, 2808 BdrvRequestFlags flags, bool blkdev) 2809 { 2810 BDRVRawState *s = bs->opaque; 2811 RawPosixAIOData acb; 2812 ThreadPoolFunc *handler; 2813 2814 #ifdef CONFIG_FALLOCATE 2815 if (offset + bytes > bs->total_sectors * BDRV_SECTOR_SIZE) { 2816 BdrvTrackedRequest *req; 2817 uint64_t end; 2818 2819 /* 2820 * This is a workaround for a bug in the Linux XFS driver, 2821 * where writes submitted through the AIO interface will be 2822 * discarded if they happen beyond a concurrently running 2823 * fallocate() that increases the file length (i.e., both the 2824 * write and the fallocate() happen beyond the EOF). 2825 * 2826 * To work around it, we extend the tracked request for this 2827 * zero write until INT64_MAX (effectively infinity), and mark 2828 * it as serializing. 2829 * 2830 * We have to enable this workaround for all filesystems and 2831 * AIO modes (not just XFS with aio=native), because for 2832 * remote filesystems we do not know the host configuration. 2833 */ 2834 2835 req = bdrv_co_get_self_request(bs); 2836 assert(req); 2837 assert(req->type == BDRV_TRACKED_WRITE); 2838 assert(req->offset <= offset); 2839 assert(req->offset + req->bytes >= offset + bytes); 2840 2841 end = INT64_MAX & -(uint64_t)bs->bl.request_alignment; 2842 req->bytes = end - req->offset; 2843 req->overlap_bytes = req->bytes; 2844 2845 bdrv_mark_request_serialising(req, bs->bl.request_alignment); 2846 } 2847 #endif 2848 2849 acb = (RawPosixAIOData) { 2850 .bs = bs, 2851 .aio_fildes = s->fd, 2852 .aio_type = QEMU_AIO_WRITE_ZEROES, 2853 .aio_offset = offset, 2854 .aio_nbytes = bytes, 2855 }; 2856 2857 if (blkdev) { 2858 acb.aio_type |= QEMU_AIO_BLKDEV; 2859 } 2860 if (flags & BDRV_REQ_NO_FALLBACK) { 2861 acb.aio_type |= QEMU_AIO_NO_FALLBACK; 2862 } 2863 2864 if (flags & BDRV_REQ_MAY_UNMAP) { 2865 acb.aio_type |= QEMU_AIO_DISCARD; 2866 handler = handle_aiocb_write_zeroes_unmap; 2867 } else { 2868 handler = handle_aiocb_write_zeroes; 2869 } 2870 2871 return raw_thread_pool_submit(bs, handler, &acb); 2872 } 2873 2874 static int coroutine_fn raw_co_pwrite_zeroes( 2875 BlockDriverState *bs, int64_t offset, 2876 int bytes, BdrvRequestFlags flags) 2877 { 2878 return raw_do_pwrite_zeroes(bs, offset, bytes, flags, false); 2879 } 2880 2881 static int raw_get_info(BlockDriverState *bs, BlockDriverInfo *bdi) 2882 { 2883 BDRVRawState *s = bs->opaque; 2884 2885 bdi->unallocated_blocks_are_zero = s->discard_zeroes; 2886 return 0; 2887 } 2888 2889 static BlockStatsSpecificFile get_blockstats_specific_file(BlockDriverState *bs) 2890 { 2891 BDRVRawState *s = bs->opaque; 2892 return (BlockStatsSpecificFile) { 2893 .discard_nb_ok = s->stats.discard_nb_ok, 2894 .discard_nb_failed = s->stats.discard_nb_failed, 2895 .discard_bytes_ok = s->stats.discard_bytes_ok, 2896 }; 2897 } 2898 2899 static BlockStatsSpecific *raw_get_specific_stats(BlockDriverState *bs) 2900 { 2901 BlockStatsSpecific *stats = g_new(BlockStatsSpecific, 1); 2902 2903 stats->driver = BLOCKDEV_DRIVER_FILE; 2904 stats->u.file = get_blockstats_specific_file(bs); 2905 2906 return stats; 2907 } 2908 2909 static BlockStatsSpecific *hdev_get_specific_stats(BlockDriverState *bs) 2910 { 2911 BlockStatsSpecific *stats = g_new(BlockStatsSpecific, 1); 2912 2913 stats->driver = BLOCKDEV_DRIVER_HOST_DEVICE; 2914 stats->u.host_device = get_blockstats_specific_file(bs); 2915 2916 return stats; 2917 } 2918 2919 static QemuOptsList raw_create_opts = { 2920 .name = "raw-create-opts", 2921 .head = QTAILQ_HEAD_INITIALIZER(raw_create_opts.head), 2922 .desc = { 2923 { 2924 .name = BLOCK_OPT_SIZE, 2925 .type = QEMU_OPT_SIZE, 2926 .help = "Virtual disk size" 2927 }, 2928 { 2929 .name = BLOCK_OPT_NOCOW, 2930 .type = QEMU_OPT_BOOL, 2931 .help = "Turn off copy-on-write (valid only on btrfs)" 2932 }, 2933 { 2934 .name = BLOCK_OPT_PREALLOC, 2935 .type = QEMU_OPT_STRING, 2936 .help = "Preallocation mode (allowed values: off" 2937 #ifdef CONFIG_POSIX_FALLOCATE 2938 ", falloc" 2939 #endif 2940 ", full)" 2941 }, 2942 { /* end of list */ } 2943 } 2944 }; 2945 2946 static int raw_check_perm(BlockDriverState *bs, uint64_t perm, uint64_t shared, 2947 Error **errp) 2948 { 2949 BDRVRawState *s = bs->opaque; 2950 BDRVRawReopenState *rs = NULL; 2951 int open_flags; 2952 int ret; 2953 2954 if (s->perm_change_fd) { 2955 /* 2956 * In the context of reopen, this function may be called several times 2957 * (directly and recursively while change permissions of the parent). 2958 * This is even true for children that don't inherit from the original 2959 * reopen node, so s->reopen_state is not set. 2960 * 2961 * Ignore all but the first call. 2962 */ 2963 return 0; 2964 } 2965 2966 if (s->reopen_state) { 2967 /* We already have a new file descriptor to set permissions for */ 2968 assert(s->reopen_state->perm == perm); 2969 assert(s->reopen_state->shared_perm == shared); 2970 rs = s->reopen_state->opaque; 2971 s->perm_change_fd = rs->fd; 2972 s->perm_change_flags = rs->open_flags; 2973 } else { 2974 /* We may need a new fd if auto-read-only switches the mode */ 2975 ret = raw_reconfigure_getfd(bs, bs->open_flags, &open_flags, perm, 2976 false, errp); 2977 if (ret < 0) { 2978 return ret; 2979 } else if (ret != s->fd) { 2980 s->perm_change_fd = ret; 2981 s->perm_change_flags = open_flags; 2982 } 2983 } 2984 2985 /* Prepare permissions on old fd to avoid conflicts between old and new, 2986 * but keep everything locked that new will need. */ 2987 ret = raw_handle_perm_lock(bs, RAW_PL_PREPARE, perm, shared, errp); 2988 if (ret < 0) { 2989 goto fail; 2990 } 2991 2992 /* Copy locks to the new fd */ 2993 if (s->perm_change_fd) { 2994 ret = raw_apply_lock_bytes(NULL, s->perm_change_fd, perm, ~shared, 2995 false, errp); 2996 if (ret < 0) { 2997 raw_handle_perm_lock(bs, RAW_PL_ABORT, 0, 0, NULL); 2998 goto fail; 2999 } 3000 } 3001 return 0; 3002 3003 fail: 3004 if (s->perm_change_fd && !s->reopen_state) { 3005 qemu_close(s->perm_change_fd); 3006 } 3007 s->perm_change_fd = 0; 3008 return ret; 3009 } 3010 3011 static void raw_set_perm(BlockDriverState *bs, uint64_t perm, uint64_t shared) 3012 { 3013 BDRVRawState *s = bs->opaque; 3014 3015 /* For reopen, we have already switched to the new fd (.bdrv_set_perm is 3016 * called after .bdrv_reopen_commit) */ 3017 if (s->perm_change_fd && s->fd != s->perm_change_fd) { 3018 qemu_close(s->fd); 3019 s->fd = s->perm_change_fd; 3020 s->open_flags = s->perm_change_flags; 3021 } 3022 s->perm_change_fd = 0; 3023 3024 raw_handle_perm_lock(bs, RAW_PL_COMMIT, perm, shared, NULL); 3025 s->perm = perm; 3026 s->shared_perm = shared; 3027 } 3028 3029 static void raw_abort_perm_update(BlockDriverState *bs) 3030 { 3031 BDRVRawState *s = bs->opaque; 3032 3033 /* For reopen, .bdrv_reopen_abort is called afterwards and will close 3034 * the file descriptor. */ 3035 if (s->perm_change_fd && !s->reopen_state) { 3036 qemu_close(s->perm_change_fd); 3037 } 3038 s->perm_change_fd = 0; 3039 3040 raw_handle_perm_lock(bs, RAW_PL_ABORT, 0, 0, NULL); 3041 } 3042 3043 static int coroutine_fn raw_co_copy_range_from( 3044 BlockDriverState *bs, BdrvChild *src, uint64_t src_offset, 3045 BdrvChild *dst, uint64_t dst_offset, uint64_t bytes, 3046 BdrvRequestFlags read_flags, BdrvRequestFlags write_flags) 3047 { 3048 return bdrv_co_copy_range_to(src, src_offset, dst, dst_offset, bytes, 3049 read_flags, write_flags); 3050 } 3051 3052 static int coroutine_fn raw_co_copy_range_to(BlockDriverState *bs, 3053 BdrvChild *src, 3054 uint64_t src_offset, 3055 BdrvChild *dst, 3056 uint64_t dst_offset, 3057 uint64_t bytes, 3058 BdrvRequestFlags read_flags, 3059 BdrvRequestFlags write_flags) 3060 { 3061 RawPosixAIOData acb; 3062 BDRVRawState *s = bs->opaque; 3063 BDRVRawState *src_s; 3064 3065 assert(dst->bs == bs); 3066 if (src->bs->drv->bdrv_co_copy_range_to != raw_co_copy_range_to) { 3067 return -ENOTSUP; 3068 } 3069 3070 src_s = src->bs->opaque; 3071 if (fd_open(src->bs) < 0 || fd_open(dst->bs) < 0) { 3072 return -EIO; 3073 } 3074 3075 acb = (RawPosixAIOData) { 3076 .bs = bs, 3077 .aio_type = QEMU_AIO_COPY_RANGE, 3078 .aio_fildes = src_s->fd, 3079 .aio_offset = src_offset, 3080 .aio_nbytes = bytes, 3081 .copy_range = { 3082 .aio_fd2 = s->fd, 3083 .aio_offset2 = dst_offset, 3084 }, 3085 }; 3086 3087 return raw_thread_pool_submit(bs, handle_aiocb_copy_range, &acb); 3088 } 3089 3090 BlockDriver bdrv_file = { 3091 .format_name = "file", 3092 .protocol_name = "file", 3093 .instance_size = sizeof(BDRVRawState), 3094 .bdrv_needs_filename = true, 3095 .bdrv_probe = NULL, /* no probe for protocols */ 3096 .bdrv_parse_filename = raw_parse_filename, 3097 .bdrv_file_open = raw_open, 3098 .bdrv_reopen_prepare = raw_reopen_prepare, 3099 .bdrv_reopen_commit = raw_reopen_commit, 3100 .bdrv_reopen_abort = raw_reopen_abort, 3101 .bdrv_close = raw_close, 3102 .bdrv_co_create = raw_co_create, 3103 .bdrv_co_create_opts = raw_co_create_opts, 3104 .bdrv_has_zero_init = bdrv_has_zero_init_1, 3105 .bdrv_has_zero_init_truncate = bdrv_has_zero_init_1, 3106 .bdrv_co_block_status = raw_co_block_status, 3107 .bdrv_co_invalidate_cache = raw_co_invalidate_cache, 3108 .bdrv_co_pwrite_zeroes = raw_co_pwrite_zeroes, 3109 .bdrv_co_delete_file = raw_co_delete_file, 3110 3111 .bdrv_co_preadv = raw_co_preadv, 3112 .bdrv_co_pwritev = raw_co_pwritev, 3113 .bdrv_co_flush_to_disk = raw_co_flush_to_disk, 3114 .bdrv_co_pdiscard = raw_co_pdiscard, 3115 .bdrv_co_copy_range_from = raw_co_copy_range_from, 3116 .bdrv_co_copy_range_to = raw_co_copy_range_to, 3117 .bdrv_refresh_limits = raw_refresh_limits, 3118 .bdrv_io_plug = raw_aio_plug, 3119 .bdrv_io_unplug = raw_aio_unplug, 3120 .bdrv_attach_aio_context = raw_aio_attach_aio_context, 3121 3122 .bdrv_co_truncate = raw_co_truncate, 3123 .bdrv_getlength = raw_getlength, 3124 .bdrv_get_info = raw_get_info, 3125 .bdrv_get_allocated_file_size 3126 = raw_get_allocated_file_size, 3127 .bdrv_get_specific_stats = raw_get_specific_stats, 3128 .bdrv_check_perm = raw_check_perm, 3129 .bdrv_set_perm = raw_set_perm, 3130 .bdrv_abort_perm_update = raw_abort_perm_update, 3131 .create_opts = &raw_create_opts, 3132 .mutable_opts = mutable_opts, 3133 }; 3134 3135 /***********************************************/ 3136 /* host device */ 3137 3138 #if defined(__APPLE__) && defined(__MACH__) 3139 static kern_return_t GetBSDPath(io_iterator_t mediaIterator, char *bsdPath, 3140 CFIndex maxPathSize, int flags); 3141 static char *FindEjectableOpticalMedia(io_iterator_t *mediaIterator) 3142 { 3143 kern_return_t kernResult = KERN_FAILURE; 3144 mach_port_t masterPort; 3145 CFMutableDictionaryRef classesToMatch; 3146 const char *matching_array[] = {kIODVDMediaClass, kIOCDMediaClass}; 3147 char *mediaType = NULL; 3148 3149 kernResult = IOMasterPort( MACH_PORT_NULL, &masterPort ); 3150 if ( KERN_SUCCESS != kernResult ) { 3151 printf( "IOMasterPort returned %d\n", kernResult ); 3152 } 3153 3154 int index; 3155 for (index = 0; index < ARRAY_SIZE(matching_array); index++) { 3156 classesToMatch = IOServiceMatching(matching_array[index]); 3157 if (classesToMatch == NULL) { 3158 error_report("IOServiceMatching returned NULL for %s", 3159 matching_array[index]); 3160 continue; 3161 } 3162 CFDictionarySetValue(classesToMatch, CFSTR(kIOMediaEjectableKey), 3163 kCFBooleanTrue); 3164 kernResult = IOServiceGetMatchingServices(masterPort, classesToMatch, 3165 mediaIterator); 3166 if (kernResult != KERN_SUCCESS) { 3167 error_report("Note: IOServiceGetMatchingServices returned %d", 3168 kernResult); 3169 continue; 3170 } 3171 3172 /* If a match was found, leave the loop */ 3173 if (*mediaIterator != 0) { 3174 trace_file_FindEjectableOpticalMedia(matching_array[index]); 3175 mediaType = g_strdup(matching_array[index]); 3176 break; 3177 } 3178 } 3179 return mediaType; 3180 } 3181 3182 kern_return_t GetBSDPath(io_iterator_t mediaIterator, char *bsdPath, 3183 CFIndex maxPathSize, int flags) 3184 { 3185 io_object_t nextMedia; 3186 kern_return_t kernResult = KERN_FAILURE; 3187 *bsdPath = '\0'; 3188 nextMedia = IOIteratorNext( mediaIterator ); 3189 if ( nextMedia ) 3190 { 3191 CFTypeRef bsdPathAsCFString; 3192 bsdPathAsCFString = IORegistryEntryCreateCFProperty( nextMedia, CFSTR( kIOBSDNameKey ), kCFAllocatorDefault, 0 ); 3193 if ( bsdPathAsCFString ) { 3194 size_t devPathLength; 3195 strcpy( bsdPath, _PATH_DEV ); 3196 if (flags & BDRV_O_NOCACHE) { 3197 strcat(bsdPath, "r"); 3198 } 3199 devPathLength = strlen( bsdPath ); 3200 if ( CFStringGetCString( bsdPathAsCFString, bsdPath + devPathLength, maxPathSize - devPathLength, kCFStringEncodingASCII ) ) { 3201 kernResult = KERN_SUCCESS; 3202 } 3203 CFRelease( bsdPathAsCFString ); 3204 } 3205 IOObjectRelease( nextMedia ); 3206 } 3207 3208 return kernResult; 3209 } 3210 3211 /* Sets up a real cdrom for use in QEMU */ 3212 static bool setup_cdrom(char *bsd_path, Error **errp) 3213 { 3214 int index, num_of_test_partitions = 2, fd; 3215 char test_partition[MAXPATHLEN]; 3216 bool partition_found = false; 3217 3218 /* look for a working partition */ 3219 for (index = 0; index < num_of_test_partitions; index++) { 3220 snprintf(test_partition, sizeof(test_partition), "%ss%d", bsd_path, 3221 index); 3222 fd = qemu_open(test_partition, O_RDONLY | O_BINARY | O_LARGEFILE); 3223 if (fd >= 0) { 3224 partition_found = true; 3225 qemu_close(fd); 3226 break; 3227 } 3228 } 3229 3230 /* if a working partition on the device was not found */ 3231 if (partition_found == false) { 3232 error_setg(errp, "Failed to find a working partition on disc"); 3233 } else { 3234 trace_file_setup_cdrom(test_partition); 3235 pstrcpy(bsd_path, MAXPATHLEN, test_partition); 3236 } 3237 return partition_found; 3238 } 3239 3240 /* Prints directions on mounting and unmounting a device */ 3241 static void print_unmounting_directions(const char *file_name) 3242 { 3243 error_report("If device %s is mounted on the desktop, unmount" 3244 " it first before using it in QEMU", file_name); 3245 error_report("Command to unmount device: diskutil unmountDisk %s", 3246 file_name); 3247 error_report("Command to mount device: diskutil mountDisk %s", file_name); 3248 } 3249 3250 #endif /* defined(__APPLE__) && defined(__MACH__) */ 3251 3252 static int hdev_probe_device(const char *filename) 3253 { 3254 struct stat st; 3255 3256 /* allow a dedicated CD-ROM driver to match with a higher priority */ 3257 if (strstart(filename, "/dev/cdrom", NULL)) 3258 return 50; 3259 3260 if (stat(filename, &st) >= 0 && 3261 (S_ISCHR(st.st_mode) || S_ISBLK(st.st_mode))) { 3262 return 100; 3263 } 3264 3265 return 0; 3266 } 3267 3268 static int check_hdev_writable(BDRVRawState *s) 3269 { 3270 #if defined(BLKROGET) 3271 /* Linux block devices can be configured "read-only" using blockdev(8). 3272 * This is independent of device node permissions and therefore open(2) 3273 * with O_RDWR succeeds. Actual writes fail with EPERM. 3274 * 3275 * bdrv_open() is supposed to fail if the disk is read-only. Explicitly 3276 * check for read-only block devices so that Linux block devices behave 3277 * properly. 3278 */ 3279 struct stat st; 3280 int readonly = 0; 3281 3282 if (fstat(s->fd, &st)) { 3283 return -errno; 3284 } 3285 3286 if (!S_ISBLK(st.st_mode)) { 3287 return 0; 3288 } 3289 3290 if (ioctl(s->fd, BLKROGET, &readonly) < 0) { 3291 return -errno; 3292 } 3293 3294 if (readonly) { 3295 return -EACCES; 3296 } 3297 #endif /* defined(BLKROGET) */ 3298 return 0; 3299 } 3300 3301 static void hdev_parse_filename(const char *filename, QDict *options, 3302 Error **errp) 3303 { 3304 bdrv_parse_filename_strip_prefix(filename, "host_device:", options); 3305 } 3306 3307 static bool hdev_is_sg(BlockDriverState *bs) 3308 { 3309 3310 #if defined(__linux__) 3311 3312 BDRVRawState *s = bs->opaque; 3313 struct stat st; 3314 struct sg_scsi_id scsiid; 3315 int sg_version; 3316 int ret; 3317 3318 if (stat(bs->filename, &st) < 0 || !S_ISCHR(st.st_mode)) { 3319 return false; 3320 } 3321 3322 ret = ioctl(s->fd, SG_GET_VERSION_NUM, &sg_version); 3323 if (ret < 0) { 3324 return false; 3325 } 3326 3327 ret = ioctl(s->fd, SG_GET_SCSI_ID, &scsiid); 3328 if (ret >= 0) { 3329 trace_file_hdev_is_sg(scsiid.scsi_type, sg_version); 3330 return true; 3331 } 3332 3333 #endif 3334 3335 return false; 3336 } 3337 3338 static int hdev_open(BlockDriverState *bs, QDict *options, int flags, 3339 Error **errp) 3340 { 3341 BDRVRawState *s = bs->opaque; 3342 Error *local_err = NULL; 3343 int ret; 3344 3345 #if defined(__APPLE__) && defined(__MACH__) 3346 /* 3347 * Caution: while qdict_get_str() is fine, getting non-string types 3348 * would require more care. When @options come from -blockdev or 3349 * blockdev_add, its members are typed according to the QAPI 3350 * schema, but when they come from -drive, they're all QString. 3351 */ 3352 const char *filename = qdict_get_str(options, "filename"); 3353 char bsd_path[MAXPATHLEN] = ""; 3354 bool error_occurred = false; 3355 3356 /* If using a real cdrom */ 3357 if (strcmp(filename, "/dev/cdrom") == 0) { 3358 char *mediaType = NULL; 3359 kern_return_t ret_val; 3360 io_iterator_t mediaIterator = 0; 3361 3362 mediaType = FindEjectableOpticalMedia(&mediaIterator); 3363 if (mediaType == NULL) { 3364 error_setg(errp, "Please make sure your CD/DVD is in the optical" 3365 " drive"); 3366 error_occurred = true; 3367 goto hdev_open_Mac_error; 3368 } 3369 3370 ret_val = GetBSDPath(mediaIterator, bsd_path, sizeof(bsd_path), flags); 3371 if (ret_val != KERN_SUCCESS) { 3372 error_setg(errp, "Could not get BSD path for optical drive"); 3373 error_occurred = true; 3374 goto hdev_open_Mac_error; 3375 } 3376 3377 /* If a real optical drive was not found */ 3378 if (bsd_path[0] == '\0') { 3379 error_setg(errp, "Failed to obtain bsd path for optical drive"); 3380 error_occurred = true; 3381 goto hdev_open_Mac_error; 3382 } 3383 3384 /* If using a cdrom disc and finding a partition on the disc failed */ 3385 if (strncmp(mediaType, kIOCDMediaClass, 9) == 0 && 3386 setup_cdrom(bsd_path, errp) == false) { 3387 print_unmounting_directions(bsd_path); 3388 error_occurred = true; 3389 goto hdev_open_Mac_error; 3390 } 3391 3392 qdict_put_str(options, "filename", bsd_path); 3393 3394 hdev_open_Mac_error: 3395 g_free(mediaType); 3396 if (mediaIterator) { 3397 IOObjectRelease(mediaIterator); 3398 } 3399 if (error_occurred) { 3400 return -ENOENT; 3401 } 3402 } 3403 #endif /* defined(__APPLE__) && defined(__MACH__) */ 3404 3405 s->type = FTYPE_FILE; 3406 3407 ret = raw_open_common(bs, options, flags, 0, true, &local_err); 3408 if (ret < 0) { 3409 error_propagate(errp, local_err); 3410 #if defined(__APPLE__) && defined(__MACH__) 3411 if (*bsd_path) { 3412 filename = bsd_path; 3413 } 3414 /* if a physical device experienced an error while being opened */ 3415 if (strncmp(filename, "/dev/", 5) == 0) { 3416 print_unmounting_directions(filename); 3417 } 3418 #endif /* defined(__APPLE__) && defined(__MACH__) */ 3419 return ret; 3420 } 3421 3422 /* Since this does ioctl the device must be already opened */ 3423 bs->sg = hdev_is_sg(bs); 3424 3425 if (flags & BDRV_O_RDWR) { 3426 ret = check_hdev_writable(s); 3427 if (ret < 0) { 3428 raw_close(bs); 3429 error_setg_errno(errp, -ret, "The device is not writable"); 3430 return ret; 3431 } 3432 } 3433 3434 return ret; 3435 } 3436 3437 #if defined(__linux__) 3438 static int coroutine_fn 3439 hdev_co_ioctl(BlockDriverState *bs, unsigned long int req, void *buf) 3440 { 3441 BDRVRawState *s = bs->opaque; 3442 RawPosixAIOData acb; 3443 int ret; 3444 3445 ret = fd_open(bs); 3446 if (ret < 0) { 3447 return ret; 3448 } 3449 3450 if (req == SG_IO && s->pr_mgr) { 3451 struct sg_io_hdr *io_hdr = buf; 3452 if (io_hdr->cmdp[0] == PERSISTENT_RESERVE_OUT || 3453 io_hdr->cmdp[0] == PERSISTENT_RESERVE_IN) { 3454 return pr_manager_execute(s->pr_mgr, bdrv_get_aio_context(bs), 3455 s->fd, io_hdr); 3456 } 3457 } 3458 3459 acb = (RawPosixAIOData) { 3460 .bs = bs, 3461 .aio_type = QEMU_AIO_IOCTL, 3462 .aio_fildes = s->fd, 3463 .aio_offset = 0, 3464 .ioctl = { 3465 .buf = buf, 3466 .cmd = req, 3467 }, 3468 }; 3469 3470 return raw_thread_pool_submit(bs, handle_aiocb_ioctl, &acb); 3471 } 3472 #endif /* linux */ 3473 3474 static int fd_open(BlockDriverState *bs) 3475 { 3476 BDRVRawState *s = bs->opaque; 3477 3478 /* this is just to ensure s->fd is sane (its called by io ops) */ 3479 if (s->fd >= 0) 3480 return 0; 3481 return -EIO; 3482 } 3483 3484 static coroutine_fn int 3485 hdev_co_pdiscard(BlockDriverState *bs, int64_t offset, int bytes) 3486 { 3487 BDRVRawState *s = bs->opaque; 3488 int ret; 3489 3490 ret = fd_open(bs); 3491 if (ret < 0) { 3492 raw_account_discard(s, bytes, ret); 3493 return ret; 3494 } 3495 return raw_do_pdiscard(bs, offset, bytes, true); 3496 } 3497 3498 static coroutine_fn int hdev_co_pwrite_zeroes(BlockDriverState *bs, 3499 int64_t offset, int bytes, BdrvRequestFlags flags) 3500 { 3501 int rc; 3502 3503 rc = fd_open(bs); 3504 if (rc < 0) { 3505 return rc; 3506 } 3507 3508 return raw_do_pwrite_zeroes(bs, offset, bytes, flags, true); 3509 } 3510 3511 static BlockDriver bdrv_host_device = { 3512 .format_name = "host_device", 3513 .protocol_name = "host_device", 3514 .instance_size = sizeof(BDRVRawState), 3515 .bdrv_needs_filename = true, 3516 .bdrv_probe_device = hdev_probe_device, 3517 .bdrv_parse_filename = hdev_parse_filename, 3518 .bdrv_file_open = hdev_open, 3519 .bdrv_close = raw_close, 3520 .bdrv_reopen_prepare = raw_reopen_prepare, 3521 .bdrv_reopen_commit = raw_reopen_commit, 3522 .bdrv_reopen_abort = raw_reopen_abort, 3523 .bdrv_co_create_opts = bdrv_co_create_opts_simple, 3524 .create_opts = &bdrv_create_opts_simple, 3525 .mutable_opts = mutable_opts, 3526 .bdrv_co_invalidate_cache = raw_co_invalidate_cache, 3527 .bdrv_co_pwrite_zeroes = hdev_co_pwrite_zeroes, 3528 3529 .bdrv_co_preadv = raw_co_preadv, 3530 .bdrv_co_pwritev = raw_co_pwritev, 3531 .bdrv_co_flush_to_disk = raw_co_flush_to_disk, 3532 .bdrv_co_pdiscard = hdev_co_pdiscard, 3533 .bdrv_co_copy_range_from = raw_co_copy_range_from, 3534 .bdrv_co_copy_range_to = raw_co_copy_range_to, 3535 .bdrv_refresh_limits = raw_refresh_limits, 3536 .bdrv_io_plug = raw_aio_plug, 3537 .bdrv_io_unplug = raw_aio_unplug, 3538 .bdrv_attach_aio_context = raw_aio_attach_aio_context, 3539 3540 .bdrv_co_truncate = raw_co_truncate, 3541 .bdrv_getlength = raw_getlength, 3542 .bdrv_get_info = raw_get_info, 3543 .bdrv_get_allocated_file_size 3544 = raw_get_allocated_file_size, 3545 .bdrv_get_specific_stats = hdev_get_specific_stats, 3546 .bdrv_check_perm = raw_check_perm, 3547 .bdrv_set_perm = raw_set_perm, 3548 .bdrv_abort_perm_update = raw_abort_perm_update, 3549 .bdrv_probe_blocksizes = hdev_probe_blocksizes, 3550 .bdrv_probe_geometry = hdev_probe_geometry, 3551 3552 /* generic scsi device */ 3553 #ifdef __linux__ 3554 .bdrv_co_ioctl = hdev_co_ioctl, 3555 #endif 3556 }; 3557 3558 #if defined(__linux__) || defined(__FreeBSD__) || defined(__FreeBSD_kernel__) 3559 static void cdrom_parse_filename(const char *filename, QDict *options, 3560 Error **errp) 3561 { 3562 bdrv_parse_filename_strip_prefix(filename, "host_cdrom:", options); 3563 } 3564 #endif 3565 3566 #ifdef __linux__ 3567 static int cdrom_open(BlockDriverState *bs, QDict *options, int flags, 3568 Error **errp) 3569 { 3570 BDRVRawState *s = bs->opaque; 3571 3572 s->type = FTYPE_CD; 3573 3574 /* open will not fail even if no CD is inserted, so add O_NONBLOCK */ 3575 return raw_open_common(bs, options, flags, O_NONBLOCK, true, errp); 3576 } 3577 3578 static int cdrom_probe_device(const char *filename) 3579 { 3580 int fd, ret; 3581 int prio = 0; 3582 struct stat st; 3583 3584 fd = qemu_open(filename, O_RDONLY | O_NONBLOCK); 3585 if (fd < 0) { 3586 goto out; 3587 } 3588 ret = fstat(fd, &st); 3589 if (ret == -1 || !S_ISBLK(st.st_mode)) { 3590 goto outc; 3591 } 3592 3593 /* Attempt to detect via a CDROM specific ioctl */ 3594 ret = ioctl(fd, CDROM_DRIVE_STATUS, CDSL_CURRENT); 3595 if (ret >= 0) 3596 prio = 100; 3597 3598 outc: 3599 qemu_close(fd); 3600 out: 3601 return prio; 3602 } 3603 3604 static bool cdrom_is_inserted(BlockDriverState *bs) 3605 { 3606 BDRVRawState *s = bs->opaque; 3607 int ret; 3608 3609 ret = ioctl(s->fd, CDROM_DRIVE_STATUS, CDSL_CURRENT); 3610 return ret == CDS_DISC_OK; 3611 } 3612 3613 static void cdrom_eject(BlockDriverState *bs, bool eject_flag) 3614 { 3615 BDRVRawState *s = bs->opaque; 3616 3617 if (eject_flag) { 3618 if (ioctl(s->fd, CDROMEJECT, NULL) < 0) 3619 perror("CDROMEJECT"); 3620 } else { 3621 if (ioctl(s->fd, CDROMCLOSETRAY, NULL) < 0) 3622 perror("CDROMEJECT"); 3623 } 3624 } 3625 3626 static void cdrom_lock_medium(BlockDriverState *bs, bool locked) 3627 { 3628 BDRVRawState *s = bs->opaque; 3629 3630 if (ioctl(s->fd, CDROM_LOCKDOOR, locked) < 0) { 3631 /* 3632 * Note: an error can happen if the distribution automatically 3633 * mounts the CD-ROM 3634 */ 3635 /* perror("CDROM_LOCKDOOR"); */ 3636 } 3637 } 3638 3639 static BlockDriver bdrv_host_cdrom = { 3640 .format_name = "host_cdrom", 3641 .protocol_name = "host_cdrom", 3642 .instance_size = sizeof(BDRVRawState), 3643 .bdrv_needs_filename = true, 3644 .bdrv_probe_device = cdrom_probe_device, 3645 .bdrv_parse_filename = cdrom_parse_filename, 3646 .bdrv_file_open = cdrom_open, 3647 .bdrv_close = raw_close, 3648 .bdrv_reopen_prepare = raw_reopen_prepare, 3649 .bdrv_reopen_commit = raw_reopen_commit, 3650 .bdrv_reopen_abort = raw_reopen_abort, 3651 .bdrv_co_create_opts = bdrv_co_create_opts_simple, 3652 .create_opts = &bdrv_create_opts_simple, 3653 .mutable_opts = mutable_opts, 3654 .bdrv_co_invalidate_cache = raw_co_invalidate_cache, 3655 3656 .bdrv_co_preadv = raw_co_preadv, 3657 .bdrv_co_pwritev = raw_co_pwritev, 3658 .bdrv_co_flush_to_disk = raw_co_flush_to_disk, 3659 .bdrv_refresh_limits = raw_refresh_limits, 3660 .bdrv_io_plug = raw_aio_plug, 3661 .bdrv_io_unplug = raw_aio_unplug, 3662 .bdrv_attach_aio_context = raw_aio_attach_aio_context, 3663 3664 .bdrv_co_truncate = raw_co_truncate, 3665 .bdrv_getlength = raw_getlength, 3666 .has_variable_length = true, 3667 .bdrv_get_allocated_file_size 3668 = raw_get_allocated_file_size, 3669 3670 /* removable device support */ 3671 .bdrv_is_inserted = cdrom_is_inserted, 3672 .bdrv_eject = cdrom_eject, 3673 .bdrv_lock_medium = cdrom_lock_medium, 3674 3675 /* generic scsi device */ 3676 .bdrv_co_ioctl = hdev_co_ioctl, 3677 }; 3678 #endif /* __linux__ */ 3679 3680 #if defined (__FreeBSD__) || defined(__FreeBSD_kernel__) 3681 static int cdrom_open(BlockDriverState *bs, QDict *options, int flags, 3682 Error **errp) 3683 { 3684 BDRVRawState *s = bs->opaque; 3685 Error *local_err = NULL; 3686 int ret; 3687 3688 s->type = FTYPE_CD; 3689 3690 ret = raw_open_common(bs, options, flags, 0, true, &local_err); 3691 if (ret) { 3692 error_propagate(errp, local_err); 3693 return ret; 3694 } 3695 3696 /* make sure the door isn't locked at this time */ 3697 ioctl(s->fd, CDIOCALLOW); 3698 return 0; 3699 } 3700 3701 static int cdrom_probe_device(const char *filename) 3702 { 3703 if (strstart(filename, "/dev/cd", NULL) || 3704 strstart(filename, "/dev/acd", NULL)) 3705 return 100; 3706 return 0; 3707 } 3708 3709 static int cdrom_reopen(BlockDriverState *bs) 3710 { 3711 BDRVRawState *s = bs->opaque; 3712 int fd; 3713 3714 /* 3715 * Force reread of possibly changed/newly loaded disc, 3716 * FreeBSD seems to not notice sometimes... 3717 */ 3718 if (s->fd >= 0) 3719 qemu_close(s->fd); 3720 fd = qemu_open(bs->filename, s->open_flags, 0644); 3721 if (fd < 0) { 3722 s->fd = -1; 3723 return -EIO; 3724 } 3725 s->fd = fd; 3726 3727 /* make sure the door isn't locked at this time */ 3728 ioctl(s->fd, CDIOCALLOW); 3729 return 0; 3730 } 3731 3732 static bool cdrom_is_inserted(BlockDriverState *bs) 3733 { 3734 return raw_getlength(bs) > 0; 3735 } 3736 3737 static void cdrom_eject(BlockDriverState *bs, bool eject_flag) 3738 { 3739 BDRVRawState *s = bs->opaque; 3740 3741 if (s->fd < 0) 3742 return; 3743 3744 (void) ioctl(s->fd, CDIOCALLOW); 3745 3746 if (eject_flag) { 3747 if (ioctl(s->fd, CDIOCEJECT) < 0) 3748 perror("CDIOCEJECT"); 3749 } else { 3750 if (ioctl(s->fd, CDIOCCLOSE) < 0) 3751 perror("CDIOCCLOSE"); 3752 } 3753 3754 cdrom_reopen(bs); 3755 } 3756 3757 static void cdrom_lock_medium(BlockDriverState *bs, bool locked) 3758 { 3759 BDRVRawState *s = bs->opaque; 3760 3761 if (s->fd < 0) 3762 return; 3763 if (ioctl(s->fd, (locked ? CDIOCPREVENT : CDIOCALLOW)) < 0) { 3764 /* 3765 * Note: an error can happen if the distribution automatically 3766 * mounts the CD-ROM 3767 */ 3768 /* perror("CDROM_LOCKDOOR"); */ 3769 } 3770 } 3771 3772 static BlockDriver bdrv_host_cdrom = { 3773 .format_name = "host_cdrom", 3774 .protocol_name = "host_cdrom", 3775 .instance_size = sizeof(BDRVRawState), 3776 .bdrv_needs_filename = true, 3777 .bdrv_probe_device = cdrom_probe_device, 3778 .bdrv_parse_filename = cdrom_parse_filename, 3779 .bdrv_file_open = cdrom_open, 3780 .bdrv_close = raw_close, 3781 .bdrv_reopen_prepare = raw_reopen_prepare, 3782 .bdrv_reopen_commit = raw_reopen_commit, 3783 .bdrv_reopen_abort = raw_reopen_abort, 3784 .bdrv_co_create_opts = bdrv_co_create_opts_simple, 3785 .create_opts = &bdrv_create_opts_simple, 3786 .mutable_opts = mutable_opts, 3787 3788 .bdrv_co_preadv = raw_co_preadv, 3789 .bdrv_co_pwritev = raw_co_pwritev, 3790 .bdrv_co_flush_to_disk = raw_co_flush_to_disk, 3791 .bdrv_refresh_limits = raw_refresh_limits, 3792 .bdrv_io_plug = raw_aio_plug, 3793 .bdrv_io_unplug = raw_aio_unplug, 3794 .bdrv_attach_aio_context = raw_aio_attach_aio_context, 3795 3796 .bdrv_co_truncate = raw_co_truncate, 3797 .bdrv_getlength = raw_getlength, 3798 .has_variable_length = true, 3799 .bdrv_get_allocated_file_size 3800 = raw_get_allocated_file_size, 3801 3802 /* removable device support */ 3803 .bdrv_is_inserted = cdrom_is_inserted, 3804 .bdrv_eject = cdrom_eject, 3805 .bdrv_lock_medium = cdrom_lock_medium, 3806 }; 3807 #endif /* __FreeBSD__ */ 3808 3809 static void bdrv_file_init(void) 3810 { 3811 /* 3812 * Register all the drivers. Note that order is important, the driver 3813 * registered last will get probed first. 3814 */ 3815 bdrv_register(&bdrv_file); 3816 bdrv_register(&bdrv_host_device); 3817 #ifdef __linux__ 3818 bdrv_register(&bdrv_host_cdrom); 3819 #endif 3820 #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__) 3821 bdrv_register(&bdrv_host_cdrom); 3822 #endif 3823 } 3824 3825 block_init(bdrv_file_init); 3826