1 /* 2 * Block driver for RAW files (posix) 3 * 4 * Copyright (c) 2006 Fabrice Bellard 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a copy 7 * of this software and associated documentation files (the "Software"), to deal 8 * in the Software without restriction, including without limitation the rights 9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 * copies of the Software, and to permit persons to whom the Software is 11 * furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 22 * THE SOFTWARE. 23 */ 24 25 #include "qemu/osdep.h" 26 #include "qapi/error.h" 27 #include "qemu/cutils.h" 28 #include "qemu/error-report.h" 29 #include "block/block_int.h" 30 #include "qemu/module.h" 31 #include "qemu/option.h" 32 #include "trace.h" 33 #include "block/thread-pool.h" 34 #include "qemu/iov.h" 35 #include "block/raw-aio.h" 36 #include "qapi/qmp/qdict.h" 37 #include "qapi/qmp/qstring.h" 38 39 #include "scsi/pr-manager.h" 40 #include "scsi/constants.h" 41 42 #if defined(__APPLE__) && (__MACH__) 43 #include <paths.h> 44 #include <sys/param.h> 45 #include <IOKit/IOKitLib.h> 46 #include <IOKit/IOBSD.h> 47 #include <IOKit/storage/IOMediaBSDClient.h> 48 #include <IOKit/storage/IOMedia.h> 49 #include <IOKit/storage/IOCDMedia.h> 50 //#include <IOKit/storage/IOCDTypes.h> 51 #include <IOKit/storage/IODVDMedia.h> 52 #include <CoreFoundation/CoreFoundation.h> 53 #endif 54 55 #ifdef __sun__ 56 #define _POSIX_PTHREAD_SEMANTICS 1 57 #include <sys/dkio.h> 58 #endif 59 #ifdef __linux__ 60 #include <sys/ioctl.h> 61 #include <sys/param.h> 62 #include <sys/syscall.h> 63 #include <linux/cdrom.h> 64 #include <linux/fd.h> 65 #include <linux/fs.h> 66 #include <linux/hdreg.h> 67 #include <scsi/sg.h> 68 #ifdef __s390__ 69 #include <asm/dasd.h> 70 #endif 71 #ifndef FS_NOCOW_FL 72 #define FS_NOCOW_FL 0x00800000 /* Do not cow file */ 73 #endif 74 #endif 75 #if defined(CONFIG_FALLOCATE_PUNCH_HOLE) || defined(CONFIG_FALLOCATE_ZERO_RANGE) 76 #include <linux/falloc.h> 77 #endif 78 #if defined (__FreeBSD__) || defined(__FreeBSD_kernel__) 79 #include <sys/disk.h> 80 #include <sys/cdio.h> 81 #endif 82 83 #ifdef __OpenBSD__ 84 #include <sys/ioctl.h> 85 #include <sys/disklabel.h> 86 #include <sys/dkio.h> 87 #endif 88 89 #ifdef __NetBSD__ 90 #include <sys/ioctl.h> 91 #include <sys/disklabel.h> 92 #include <sys/dkio.h> 93 #include <sys/disk.h> 94 #endif 95 96 #ifdef __DragonFly__ 97 #include <sys/ioctl.h> 98 #include <sys/diskslice.h> 99 #endif 100 101 #ifdef CONFIG_XFS 102 #include <xfs/xfs.h> 103 #endif 104 105 //#define DEBUG_BLOCK 106 107 #ifdef DEBUG_BLOCK 108 # define DEBUG_BLOCK_PRINT 1 109 #else 110 # define DEBUG_BLOCK_PRINT 0 111 #endif 112 #define DPRINTF(fmt, ...) \ 113 do { \ 114 if (DEBUG_BLOCK_PRINT) { \ 115 printf(fmt, ## __VA_ARGS__); \ 116 } \ 117 } while (0) 118 119 /* OS X does not have O_DSYNC */ 120 #ifndef O_DSYNC 121 #ifdef O_SYNC 122 #define O_DSYNC O_SYNC 123 #elif defined(O_FSYNC) 124 #define O_DSYNC O_FSYNC 125 #endif 126 #endif 127 128 /* Approximate O_DIRECT with O_DSYNC if O_DIRECT isn't available */ 129 #ifndef O_DIRECT 130 #define O_DIRECT O_DSYNC 131 #endif 132 133 #define FTYPE_FILE 0 134 #define FTYPE_CD 1 135 136 #define MAX_BLOCKSIZE 4096 137 138 /* Posix file locking bytes. Libvirt takes byte 0, we start from higher bytes, 139 * leaving a few more bytes for its future use. */ 140 #define RAW_LOCK_PERM_BASE 100 141 #define RAW_LOCK_SHARED_BASE 200 142 143 typedef struct BDRVRawState { 144 int fd; 145 int lock_fd; 146 bool use_lock; 147 int type; 148 int open_flags; 149 size_t buf_align; 150 151 /* The current permissions. */ 152 uint64_t perm; 153 uint64_t shared_perm; 154 155 #ifdef CONFIG_XFS 156 bool is_xfs:1; 157 #endif 158 bool has_discard:1; 159 bool has_write_zeroes:1; 160 bool discard_zeroes:1; 161 bool use_linux_aio:1; 162 bool page_cache_inconsistent:1; 163 bool has_fallocate; 164 bool needs_alignment; 165 bool check_cache_dropped; 166 167 PRManager *pr_mgr; 168 } BDRVRawState; 169 170 typedef struct BDRVRawReopenState { 171 int fd; 172 int open_flags; 173 bool check_cache_dropped; 174 } BDRVRawReopenState; 175 176 static int fd_open(BlockDriverState *bs); 177 static int64_t raw_getlength(BlockDriverState *bs); 178 179 typedef struct RawPosixAIOData { 180 BlockDriverState *bs; 181 int aio_fildes; 182 union { 183 struct iovec *aio_iov; 184 void *aio_ioctl_buf; 185 }; 186 int aio_niov; 187 uint64_t aio_nbytes; 188 #define aio_ioctl_cmd aio_nbytes /* for QEMU_AIO_IOCTL */ 189 off_t aio_offset; 190 int aio_type; 191 int aio_fd2; 192 off_t aio_offset2; 193 } RawPosixAIOData; 194 195 #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__) 196 static int cdrom_reopen(BlockDriverState *bs); 197 #endif 198 199 #if defined(__NetBSD__) 200 static int raw_normalize_devicepath(const char **filename) 201 { 202 static char namebuf[PATH_MAX]; 203 const char *dp, *fname; 204 struct stat sb; 205 206 fname = *filename; 207 dp = strrchr(fname, '/'); 208 if (lstat(fname, &sb) < 0) { 209 fprintf(stderr, "%s: stat failed: %s\n", 210 fname, strerror(errno)); 211 return -errno; 212 } 213 214 if (!S_ISBLK(sb.st_mode)) { 215 return 0; 216 } 217 218 if (dp == NULL) { 219 snprintf(namebuf, PATH_MAX, "r%s", fname); 220 } else { 221 snprintf(namebuf, PATH_MAX, "%.*s/r%s", 222 (int)(dp - fname), fname, dp + 1); 223 } 224 fprintf(stderr, "%s is a block device", fname); 225 *filename = namebuf; 226 fprintf(stderr, ", using %s\n", *filename); 227 228 return 0; 229 } 230 #else 231 static int raw_normalize_devicepath(const char **filename) 232 { 233 return 0; 234 } 235 #endif 236 237 /* 238 * Get logical block size via ioctl. On success store it in @sector_size_p. 239 */ 240 static int probe_logical_blocksize(int fd, unsigned int *sector_size_p) 241 { 242 unsigned int sector_size; 243 bool success = false; 244 int i; 245 246 errno = ENOTSUP; 247 static const unsigned long ioctl_list[] = { 248 #ifdef BLKSSZGET 249 BLKSSZGET, 250 #endif 251 #ifdef DKIOCGETBLOCKSIZE 252 DKIOCGETBLOCKSIZE, 253 #endif 254 #ifdef DIOCGSECTORSIZE 255 DIOCGSECTORSIZE, 256 #endif 257 }; 258 259 /* Try a few ioctls to get the right size */ 260 for (i = 0; i < (int)ARRAY_SIZE(ioctl_list); i++) { 261 if (ioctl(fd, ioctl_list[i], §or_size) >= 0) { 262 *sector_size_p = sector_size; 263 success = true; 264 } 265 } 266 267 return success ? 0 : -errno; 268 } 269 270 /** 271 * Get physical block size of @fd. 272 * On success, store it in @blk_size and return 0. 273 * On failure, return -errno. 274 */ 275 static int probe_physical_blocksize(int fd, unsigned int *blk_size) 276 { 277 #ifdef BLKPBSZGET 278 if (ioctl(fd, BLKPBSZGET, blk_size) < 0) { 279 return -errno; 280 } 281 return 0; 282 #else 283 return -ENOTSUP; 284 #endif 285 } 286 287 /* Check if read is allowed with given memory buffer and length. 288 * 289 * This function is used to check O_DIRECT memory buffer and request alignment. 290 */ 291 static bool raw_is_io_aligned(int fd, void *buf, size_t len) 292 { 293 ssize_t ret = pread(fd, buf, len, 0); 294 295 if (ret >= 0) { 296 return true; 297 } 298 299 #ifdef __linux__ 300 /* The Linux kernel returns EINVAL for misaligned O_DIRECT reads. Ignore 301 * other errors (e.g. real I/O error), which could happen on a failed 302 * drive, since we only care about probing alignment. 303 */ 304 if (errno != EINVAL) { 305 return true; 306 } 307 #endif 308 309 return false; 310 } 311 312 static void raw_probe_alignment(BlockDriverState *bs, int fd, Error **errp) 313 { 314 BDRVRawState *s = bs->opaque; 315 char *buf; 316 size_t max_align = MAX(MAX_BLOCKSIZE, getpagesize()); 317 318 /* For SCSI generic devices the alignment is not really used. 319 With buffered I/O, we don't have any restrictions. */ 320 if (bdrv_is_sg(bs) || !s->needs_alignment) { 321 bs->bl.request_alignment = 1; 322 s->buf_align = 1; 323 return; 324 } 325 326 bs->bl.request_alignment = 0; 327 s->buf_align = 0; 328 /* Let's try to use the logical blocksize for the alignment. */ 329 if (probe_logical_blocksize(fd, &bs->bl.request_alignment) < 0) { 330 bs->bl.request_alignment = 0; 331 } 332 #ifdef CONFIG_XFS 333 if (s->is_xfs) { 334 struct dioattr da; 335 if (xfsctl(NULL, fd, XFS_IOC_DIOINFO, &da) >= 0) { 336 bs->bl.request_alignment = da.d_miniosz; 337 /* The kernel returns wrong information for d_mem */ 338 /* s->buf_align = da.d_mem; */ 339 } 340 } 341 #endif 342 343 /* If we could not get the sizes so far, we can only guess them */ 344 if (!s->buf_align) { 345 size_t align; 346 buf = qemu_memalign(max_align, 2 * max_align); 347 for (align = 512; align <= max_align; align <<= 1) { 348 if (raw_is_io_aligned(fd, buf + align, max_align)) { 349 s->buf_align = align; 350 break; 351 } 352 } 353 qemu_vfree(buf); 354 } 355 356 if (!bs->bl.request_alignment) { 357 size_t align; 358 buf = qemu_memalign(s->buf_align, max_align); 359 for (align = 512; align <= max_align; align <<= 1) { 360 if (raw_is_io_aligned(fd, buf, align)) { 361 bs->bl.request_alignment = align; 362 break; 363 } 364 } 365 qemu_vfree(buf); 366 } 367 368 if (!s->buf_align || !bs->bl.request_alignment) { 369 error_setg(errp, "Could not find working O_DIRECT alignment"); 370 error_append_hint(errp, "Try cache.direct=off\n"); 371 } 372 } 373 374 static void raw_parse_flags(int bdrv_flags, int *open_flags) 375 { 376 assert(open_flags != NULL); 377 378 *open_flags |= O_BINARY; 379 *open_flags &= ~O_ACCMODE; 380 if (bdrv_flags & BDRV_O_RDWR) { 381 *open_flags |= O_RDWR; 382 } else { 383 *open_flags |= O_RDONLY; 384 } 385 386 /* Use O_DSYNC for write-through caching, no flags for write-back caching, 387 * and O_DIRECT for no caching. */ 388 if ((bdrv_flags & BDRV_O_NOCACHE)) { 389 *open_flags |= O_DIRECT; 390 } 391 } 392 393 static void raw_parse_filename(const char *filename, QDict *options, 394 Error **errp) 395 { 396 bdrv_parse_filename_strip_prefix(filename, "file:", options); 397 } 398 399 static QemuOptsList raw_runtime_opts = { 400 .name = "raw", 401 .head = QTAILQ_HEAD_INITIALIZER(raw_runtime_opts.head), 402 .desc = { 403 { 404 .name = "filename", 405 .type = QEMU_OPT_STRING, 406 .help = "File name of the image", 407 }, 408 { 409 .name = "aio", 410 .type = QEMU_OPT_STRING, 411 .help = "host AIO implementation (threads, native)", 412 }, 413 { 414 .name = "locking", 415 .type = QEMU_OPT_STRING, 416 .help = "file locking mode (on/off/auto, default: auto)", 417 }, 418 { 419 .name = "pr-manager", 420 .type = QEMU_OPT_STRING, 421 .help = "id of persistent reservation manager object (default: none)", 422 }, 423 { 424 .name = "x-check-cache-dropped", 425 .type = QEMU_OPT_BOOL, 426 .help = "check that page cache was dropped on live migration (default: off)" 427 }, 428 { /* end of list */ } 429 }, 430 }; 431 432 static int raw_open_common(BlockDriverState *bs, QDict *options, 433 int bdrv_flags, int open_flags, Error **errp) 434 { 435 BDRVRawState *s = bs->opaque; 436 QemuOpts *opts; 437 Error *local_err = NULL; 438 const char *filename = NULL; 439 const char *str; 440 BlockdevAioOptions aio, aio_default; 441 int fd, ret; 442 struct stat st; 443 OnOffAuto locking; 444 445 opts = qemu_opts_create(&raw_runtime_opts, NULL, 0, &error_abort); 446 qemu_opts_absorb_qdict(opts, options, &local_err); 447 if (local_err) { 448 error_propagate(errp, local_err); 449 ret = -EINVAL; 450 goto fail; 451 } 452 453 filename = qemu_opt_get(opts, "filename"); 454 455 ret = raw_normalize_devicepath(&filename); 456 if (ret != 0) { 457 error_setg_errno(errp, -ret, "Could not normalize device path"); 458 goto fail; 459 } 460 461 aio_default = (bdrv_flags & BDRV_O_NATIVE_AIO) 462 ? BLOCKDEV_AIO_OPTIONS_NATIVE 463 : BLOCKDEV_AIO_OPTIONS_THREADS; 464 aio = qapi_enum_parse(&BlockdevAioOptions_lookup, 465 qemu_opt_get(opts, "aio"), 466 aio_default, &local_err); 467 if (local_err) { 468 error_propagate(errp, local_err); 469 ret = -EINVAL; 470 goto fail; 471 } 472 s->use_linux_aio = (aio == BLOCKDEV_AIO_OPTIONS_NATIVE); 473 474 locking = qapi_enum_parse(&OnOffAuto_lookup, 475 qemu_opt_get(opts, "locking"), 476 ON_OFF_AUTO_AUTO, &local_err); 477 if (local_err) { 478 error_propagate(errp, local_err); 479 ret = -EINVAL; 480 goto fail; 481 } 482 switch (locking) { 483 case ON_OFF_AUTO_ON: 484 s->use_lock = true; 485 if (!qemu_has_ofd_lock()) { 486 fprintf(stderr, 487 "File lock requested but OFD locking syscall is " 488 "unavailable, falling back to POSIX file locks.\n" 489 "Due to the implementation, locks can be lost " 490 "unexpectedly.\n"); 491 } 492 break; 493 case ON_OFF_AUTO_OFF: 494 s->use_lock = false; 495 break; 496 case ON_OFF_AUTO_AUTO: 497 s->use_lock = qemu_has_ofd_lock(); 498 break; 499 default: 500 abort(); 501 } 502 503 str = qemu_opt_get(opts, "pr-manager"); 504 if (str) { 505 s->pr_mgr = pr_manager_lookup(str, &local_err); 506 if (local_err) { 507 error_propagate(errp, local_err); 508 ret = -EINVAL; 509 goto fail; 510 } 511 } 512 513 s->check_cache_dropped = qemu_opt_get_bool(opts, "x-check-cache-dropped", 514 false); 515 516 s->open_flags = open_flags; 517 raw_parse_flags(bdrv_flags, &s->open_flags); 518 519 s->fd = -1; 520 fd = qemu_open(filename, s->open_flags, 0644); 521 if (fd < 0) { 522 ret = -errno; 523 error_setg_errno(errp, errno, "Could not open '%s'", filename); 524 if (ret == -EROFS) { 525 ret = -EACCES; 526 } 527 goto fail; 528 } 529 s->fd = fd; 530 531 s->lock_fd = -1; 532 if (s->use_lock) { 533 fd = qemu_open(filename, s->open_flags); 534 if (fd < 0) { 535 ret = -errno; 536 error_setg_errno(errp, errno, "Could not open '%s' for locking", 537 filename); 538 qemu_close(s->fd); 539 goto fail; 540 } 541 s->lock_fd = fd; 542 } 543 s->perm = 0; 544 s->shared_perm = BLK_PERM_ALL; 545 546 #ifdef CONFIG_LINUX_AIO 547 /* Currently Linux does AIO only for files opened with O_DIRECT */ 548 if (s->use_linux_aio && !(s->open_flags & O_DIRECT)) { 549 error_setg(errp, "aio=native was specified, but it requires " 550 "cache.direct=on, which was not specified."); 551 ret = -EINVAL; 552 goto fail; 553 } 554 #else 555 if (s->use_linux_aio) { 556 error_setg(errp, "aio=native was specified, but is not supported " 557 "in this build."); 558 ret = -EINVAL; 559 goto fail; 560 } 561 #endif /* !defined(CONFIG_LINUX_AIO) */ 562 563 s->has_discard = true; 564 s->has_write_zeroes = true; 565 if ((bs->open_flags & BDRV_O_NOCACHE) != 0) { 566 s->needs_alignment = true; 567 } 568 569 if (fstat(s->fd, &st) < 0) { 570 ret = -errno; 571 error_setg_errno(errp, errno, "Could not stat file"); 572 goto fail; 573 } 574 if (S_ISREG(st.st_mode)) { 575 s->discard_zeroes = true; 576 s->has_fallocate = true; 577 } 578 if (S_ISBLK(st.st_mode)) { 579 #ifdef BLKDISCARDZEROES 580 unsigned int arg; 581 if (ioctl(s->fd, BLKDISCARDZEROES, &arg) == 0 && arg) { 582 s->discard_zeroes = true; 583 } 584 #endif 585 #ifdef __linux__ 586 /* On Linux 3.10, BLKDISCARD leaves stale data in the page cache. Do 587 * not rely on the contents of discarded blocks unless using O_DIRECT. 588 * Same for BLKZEROOUT. 589 */ 590 if (!(bs->open_flags & BDRV_O_NOCACHE)) { 591 s->discard_zeroes = false; 592 s->has_write_zeroes = false; 593 } 594 #endif 595 } 596 #ifdef __FreeBSD__ 597 if (S_ISCHR(st.st_mode)) { 598 /* 599 * The file is a char device (disk), which on FreeBSD isn't behind 600 * a pager, so force all requests to be aligned. This is needed 601 * so QEMU makes sure all IO operations on the device are aligned 602 * to sector size, or else FreeBSD will reject them with EINVAL. 603 */ 604 s->needs_alignment = true; 605 } 606 #endif 607 608 #ifdef CONFIG_XFS 609 if (platform_test_xfs_fd(s->fd)) { 610 s->is_xfs = true; 611 } 612 #endif 613 614 bs->supported_zero_flags = s->discard_zeroes ? BDRV_REQ_MAY_UNMAP : 0; 615 ret = 0; 616 fail: 617 if (filename && (bdrv_flags & BDRV_O_TEMPORARY)) { 618 unlink(filename); 619 } 620 qemu_opts_del(opts); 621 return ret; 622 } 623 624 static int raw_open(BlockDriverState *bs, QDict *options, int flags, 625 Error **errp) 626 { 627 BDRVRawState *s = bs->opaque; 628 629 s->type = FTYPE_FILE; 630 return raw_open_common(bs, options, flags, 0, errp); 631 } 632 633 typedef enum { 634 RAW_PL_PREPARE, 635 RAW_PL_COMMIT, 636 RAW_PL_ABORT, 637 } RawPermLockOp; 638 639 #define PERM_FOREACH(i) \ 640 for ((i) = 0; (1ULL << (i)) <= BLK_PERM_ALL; i++) 641 642 /* Lock bytes indicated by @perm_lock_bits and @shared_perm_lock_bits in the 643 * file; if @unlock == true, also unlock the unneeded bytes. 644 * @shared_perm_lock_bits is the mask of all permissions that are NOT shared. 645 */ 646 static int raw_apply_lock_bytes(BDRVRawState *s, 647 uint64_t perm_lock_bits, 648 uint64_t shared_perm_lock_bits, 649 bool unlock, Error **errp) 650 { 651 int ret; 652 int i; 653 654 PERM_FOREACH(i) { 655 int off = RAW_LOCK_PERM_BASE + i; 656 if (perm_lock_bits & (1ULL << i)) { 657 ret = qemu_lock_fd(s->lock_fd, off, 1, false); 658 if (ret) { 659 error_setg(errp, "Failed to lock byte %d", off); 660 return ret; 661 } 662 } else if (unlock) { 663 ret = qemu_unlock_fd(s->lock_fd, off, 1); 664 if (ret) { 665 error_setg(errp, "Failed to unlock byte %d", off); 666 return ret; 667 } 668 } 669 } 670 PERM_FOREACH(i) { 671 int off = RAW_LOCK_SHARED_BASE + i; 672 if (shared_perm_lock_bits & (1ULL << i)) { 673 ret = qemu_lock_fd(s->lock_fd, off, 1, false); 674 if (ret) { 675 error_setg(errp, "Failed to lock byte %d", off); 676 return ret; 677 } 678 } else if (unlock) { 679 ret = qemu_unlock_fd(s->lock_fd, off, 1); 680 if (ret) { 681 error_setg(errp, "Failed to unlock byte %d", off); 682 return ret; 683 } 684 } 685 } 686 return 0; 687 } 688 689 /* Check "unshared" bytes implied by @perm and ~@shared_perm in the file. */ 690 static int raw_check_lock_bytes(BDRVRawState *s, 691 uint64_t perm, uint64_t shared_perm, 692 Error **errp) 693 { 694 int ret; 695 int i; 696 697 PERM_FOREACH(i) { 698 int off = RAW_LOCK_SHARED_BASE + i; 699 uint64_t p = 1ULL << i; 700 if (perm & p) { 701 ret = qemu_lock_fd_test(s->lock_fd, off, 1, true); 702 if (ret) { 703 char *perm_name = bdrv_perm_names(p); 704 error_setg(errp, 705 "Failed to get \"%s\" lock", 706 perm_name); 707 g_free(perm_name); 708 error_append_hint(errp, 709 "Is another process using the image?\n"); 710 return ret; 711 } 712 } 713 } 714 PERM_FOREACH(i) { 715 int off = RAW_LOCK_PERM_BASE + i; 716 uint64_t p = 1ULL << i; 717 if (!(shared_perm & p)) { 718 ret = qemu_lock_fd_test(s->lock_fd, off, 1, true); 719 if (ret) { 720 char *perm_name = bdrv_perm_names(p); 721 error_setg(errp, 722 "Failed to get shared \"%s\" lock", 723 perm_name); 724 g_free(perm_name); 725 error_append_hint(errp, 726 "Is another process using the image?\n"); 727 return ret; 728 } 729 } 730 } 731 return 0; 732 } 733 734 static int raw_handle_perm_lock(BlockDriverState *bs, 735 RawPermLockOp op, 736 uint64_t new_perm, uint64_t new_shared, 737 Error **errp) 738 { 739 BDRVRawState *s = bs->opaque; 740 int ret = 0; 741 Error *local_err = NULL; 742 743 if (!s->use_lock) { 744 return 0; 745 } 746 747 if (bdrv_get_flags(bs) & BDRV_O_INACTIVE) { 748 return 0; 749 } 750 751 assert(s->lock_fd > 0); 752 753 switch (op) { 754 case RAW_PL_PREPARE: 755 ret = raw_apply_lock_bytes(s, s->perm | new_perm, 756 ~s->shared_perm | ~new_shared, 757 false, errp); 758 if (!ret) { 759 ret = raw_check_lock_bytes(s, new_perm, new_shared, errp); 760 if (!ret) { 761 return 0; 762 } 763 } 764 op = RAW_PL_ABORT; 765 /* fall through to unlock bytes. */ 766 case RAW_PL_ABORT: 767 raw_apply_lock_bytes(s, s->perm, ~s->shared_perm, true, &local_err); 768 if (local_err) { 769 /* Theoretically the above call only unlocks bytes and it cannot 770 * fail. Something weird happened, report it. 771 */ 772 error_report_err(local_err); 773 } 774 break; 775 case RAW_PL_COMMIT: 776 raw_apply_lock_bytes(s, new_perm, ~new_shared, true, &local_err); 777 if (local_err) { 778 /* Theoretically the above call only unlocks bytes and it cannot 779 * fail. Something weird happened, report it. 780 */ 781 error_report_err(local_err); 782 } 783 break; 784 } 785 return ret; 786 } 787 788 static int raw_reopen_prepare(BDRVReopenState *state, 789 BlockReopenQueue *queue, Error **errp) 790 { 791 BDRVRawState *s; 792 BDRVRawReopenState *rs; 793 QemuOpts *opts; 794 int ret = 0; 795 Error *local_err = NULL; 796 797 assert(state != NULL); 798 assert(state->bs != NULL); 799 800 s = state->bs->opaque; 801 802 state->opaque = g_new0(BDRVRawReopenState, 1); 803 rs = state->opaque; 804 rs->fd = -1; 805 806 /* Handle options changes */ 807 opts = qemu_opts_create(&raw_runtime_opts, NULL, 0, &error_abort); 808 qemu_opts_absorb_qdict(opts, state->options, &local_err); 809 if (local_err) { 810 error_propagate(errp, local_err); 811 ret = -EINVAL; 812 goto out; 813 } 814 815 rs->check_cache_dropped = qemu_opt_get_bool(opts, "x-check-cache-dropped", 816 s->check_cache_dropped); 817 818 if (s->type == FTYPE_CD) { 819 rs->open_flags |= O_NONBLOCK; 820 } 821 822 raw_parse_flags(state->flags, &rs->open_flags); 823 824 int fcntl_flags = O_APPEND | O_NONBLOCK; 825 #ifdef O_NOATIME 826 fcntl_flags |= O_NOATIME; 827 #endif 828 829 #ifdef O_ASYNC 830 /* Not all operating systems have O_ASYNC, and those that don't 831 * will not let us track the state into rs->open_flags (typically 832 * you achieve the same effect with an ioctl, for example I_SETSIG 833 * on Solaris). But we do not use O_ASYNC, so that's fine. 834 */ 835 assert((s->open_flags & O_ASYNC) == 0); 836 #endif 837 838 if ((rs->open_flags & ~fcntl_flags) == (s->open_flags & ~fcntl_flags)) { 839 /* dup the original fd */ 840 rs->fd = qemu_dup(s->fd); 841 if (rs->fd >= 0) { 842 ret = fcntl_setfl(rs->fd, rs->open_flags); 843 if (ret) { 844 qemu_close(rs->fd); 845 rs->fd = -1; 846 } 847 } 848 } 849 850 /* If we cannot use fcntl, or fcntl failed, fall back to qemu_open() */ 851 if (rs->fd == -1) { 852 const char *normalized_filename = state->bs->filename; 853 ret = raw_normalize_devicepath(&normalized_filename); 854 if (ret < 0) { 855 error_setg_errno(errp, -ret, "Could not normalize device path"); 856 } else { 857 assert(!(rs->open_flags & O_CREAT)); 858 rs->fd = qemu_open(normalized_filename, rs->open_flags); 859 if (rs->fd == -1) { 860 error_setg_errno(errp, errno, "Could not reopen file"); 861 ret = -1; 862 } 863 } 864 } 865 866 /* Fail already reopen_prepare() if we can't get a working O_DIRECT 867 * alignment with the new fd. */ 868 if (rs->fd != -1) { 869 raw_probe_alignment(state->bs, rs->fd, &local_err); 870 if (local_err) { 871 qemu_close(rs->fd); 872 rs->fd = -1; 873 error_propagate(errp, local_err); 874 ret = -EINVAL; 875 } 876 } 877 878 out: 879 qemu_opts_del(opts); 880 return ret; 881 } 882 883 static void raw_reopen_commit(BDRVReopenState *state) 884 { 885 BDRVRawReopenState *rs = state->opaque; 886 BDRVRawState *s = state->bs->opaque; 887 888 s->check_cache_dropped = rs->check_cache_dropped; 889 s->open_flags = rs->open_flags; 890 891 qemu_close(s->fd); 892 s->fd = rs->fd; 893 894 g_free(state->opaque); 895 state->opaque = NULL; 896 } 897 898 899 static void raw_reopen_abort(BDRVReopenState *state) 900 { 901 BDRVRawReopenState *rs = state->opaque; 902 903 /* nothing to do if NULL, we didn't get far enough */ 904 if (rs == NULL) { 905 return; 906 } 907 908 if (rs->fd >= 0) { 909 qemu_close(rs->fd); 910 rs->fd = -1; 911 } 912 g_free(state->opaque); 913 state->opaque = NULL; 914 } 915 916 static int hdev_get_max_transfer_length(BlockDriverState *bs, int fd) 917 { 918 #ifdef BLKSECTGET 919 int max_bytes = 0; 920 short max_sectors = 0; 921 if (bs->sg && ioctl(fd, BLKSECTGET, &max_bytes) == 0) { 922 return max_bytes; 923 } else if (!bs->sg && ioctl(fd, BLKSECTGET, &max_sectors) == 0) { 924 return max_sectors << BDRV_SECTOR_BITS; 925 } else { 926 return -errno; 927 } 928 #else 929 return -ENOSYS; 930 #endif 931 } 932 933 static int hdev_get_max_segments(const struct stat *st) 934 { 935 #ifdef CONFIG_LINUX 936 char buf[32]; 937 const char *end; 938 char *sysfspath; 939 int ret; 940 int fd = -1; 941 long max_segments; 942 943 sysfspath = g_strdup_printf("/sys/dev/block/%u:%u/queue/max_segments", 944 major(st->st_rdev), minor(st->st_rdev)); 945 fd = open(sysfspath, O_RDONLY); 946 if (fd == -1) { 947 ret = -errno; 948 goto out; 949 } 950 do { 951 ret = read(fd, buf, sizeof(buf) - 1); 952 } while (ret == -1 && errno == EINTR); 953 if (ret < 0) { 954 ret = -errno; 955 goto out; 956 } else if (ret == 0) { 957 ret = -EIO; 958 goto out; 959 } 960 buf[ret] = 0; 961 /* The file is ended with '\n', pass 'end' to accept that. */ 962 ret = qemu_strtol(buf, &end, 10, &max_segments); 963 if (ret == 0 && end && *end == '\n') { 964 ret = max_segments; 965 } 966 967 out: 968 if (fd != -1) { 969 close(fd); 970 } 971 g_free(sysfspath); 972 return ret; 973 #else 974 return -ENOTSUP; 975 #endif 976 } 977 978 static void raw_refresh_limits(BlockDriverState *bs, Error **errp) 979 { 980 BDRVRawState *s = bs->opaque; 981 struct stat st; 982 983 if (!fstat(s->fd, &st)) { 984 if (S_ISBLK(st.st_mode) || S_ISCHR(st.st_mode)) { 985 int ret = hdev_get_max_transfer_length(bs, s->fd); 986 if (ret > 0 && ret <= BDRV_REQUEST_MAX_BYTES) { 987 bs->bl.max_transfer = pow2floor(ret); 988 } 989 ret = hdev_get_max_segments(&st); 990 if (ret > 0) { 991 bs->bl.max_transfer = MIN(bs->bl.max_transfer, 992 ret * getpagesize()); 993 } 994 } 995 } 996 997 raw_probe_alignment(bs, s->fd, errp); 998 bs->bl.min_mem_alignment = s->buf_align; 999 bs->bl.opt_mem_alignment = MAX(s->buf_align, getpagesize()); 1000 } 1001 1002 static int check_for_dasd(int fd) 1003 { 1004 #ifdef BIODASDINFO2 1005 struct dasd_information2_t info = {0}; 1006 1007 return ioctl(fd, BIODASDINFO2, &info); 1008 #else 1009 return -1; 1010 #endif 1011 } 1012 1013 /** 1014 * Try to get @bs's logical and physical block size. 1015 * On success, store them in @bsz and return zero. 1016 * On failure, return negative errno. 1017 */ 1018 static int hdev_probe_blocksizes(BlockDriverState *bs, BlockSizes *bsz) 1019 { 1020 BDRVRawState *s = bs->opaque; 1021 int ret; 1022 1023 /* If DASD, get blocksizes */ 1024 if (check_for_dasd(s->fd) < 0) { 1025 return -ENOTSUP; 1026 } 1027 ret = probe_logical_blocksize(s->fd, &bsz->log); 1028 if (ret < 0) { 1029 return ret; 1030 } 1031 return probe_physical_blocksize(s->fd, &bsz->phys); 1032 } 1033 1034 /** 1035 * Try to get @bs's geometry: cyls, heads, sectors. 1036 * On success, store them in @geo and return 0. 1037 * On failure return -errno. 1038 * (Allows block driver to assign default geometry values that guest sees) 1039 */ 1040 #ifdef __linux__ 1041 static int hdev_probe_geometry(BlockDriverState *bs, HDGeometry *geo) 1042 { 1043 BDRVRawState *s = bs->opaque; 1044 struct hd_geometry ioctl_geo = {0}; 1045 1046 /* If DASD, get its geometry */ 1047 if (check_for_dasd(s->fd) < 0) { 1048 return -ENOTSUP; 1049 } 1050 if (ioctl(s->fd, HDIO_GETGEO, &ioctl_geo) < 0) { 1051 return -errno; 1052 } 1053 /* HDIO_GETGEO may return success even though geo contains zeros 1054 (e.g. certain multipath setups) */ 1055 if (!ioctl_geo.heads || !ioctl_geo.sectors || !ioctl_geo.cylinders) { 1056 return -ENOTSUP; 1057 } 1058 /* Do not return a geometry for partition */ 1059 if (ioctl_geo.start != 0) { 1060 return -ENOTSUP; 1061 } 1062 geo->heads = ioctl_geo.heads; 1063 geo->sectors = ioctl_geo.sectors; 1064 geo->cylinders = ioctl_geo.cylinders; 1065 1066 return 0; 1067 } 1068 #else /* __linux__ */ 1069 static int hdev_probe_geometry(BlockDriverState *bs, HDGeometry *geo) 1070 { 1071 return -ENOTSUP; 1072 } 1073 #endif 1074 1075 static ssize_t handle_aiocb_ioctl(RawPosixAIOData *aiocb) 1076 { 1077 int ret; 1078 1079 ret = ioctl(aiocb->aio_fildes, aiocb->aio_ioctl_cmd, aiocb->aio_ioctl_buf); 1080 if (ret == -1) { 1081 return -errno; 1082 } 1083 1084 return 0; 1085 } 1086 1087 static ssize_t handle_aiocb_flush(RawPosixAIOData *aiocb) 1088 { 1089 BDRVRawState *s = aiocb->bs->opaque; 1090 int ret; 1091 1092 if (s->page_cache_inconsistent) { 1093 return -EIO; 1094 } 1095 1096 ret = qemu_fdatasync(aiocb->aio_fildes); 1097 if (ret == -1) { 1098 /* There is no clear definition of the semantics of a failing fsync(), 1099 * so we may have to assume the worst. The sad truth is that this 1100 * assumption is correct for Linux. Some pages are now probably marked 1101 * clean in the page cache even though they are inconsistent with the 1102 * on-disk contents. The next fdatasync() call would succeed, but no 1103 * further writeback attempt will be made. We can't get back to a state 1104 * in which we know what is on disk (we would have to rewrite 1105 * everything that was touched since the last fdatasync() at least), so 1106 * make bdrv_flush() fail permanently. Given that the behaviour isn't 1107 * really defined, I have little hope that other OSes are doing better. 1108 * 1109 * Obviously, this doesn't affect O_DIRECT, which bypasses the page 1110 * cache. */ 1111 if ((s->open_flags & O_DIRECT) == 0) { 1112 s->page_cache_inconsistent = true; 1113 } 1114 return -errno; 1115 } 1116 return 0; 1117 } 1118 1119 #ifdef CONFIG_PREADV 1120 1121 static bool preadv_present = true; 1122 1123 static ssize_t 1124 qemu_preadv(int fd, const struct iovec *iov, int nr_iov, off_t offset) 1125 { 1126 return preadv(fd, iov, nr_iov, offset); 1127 } 1128 1129 static ssize_t 1130 qemu_pwritev(int fd, const struct iovec *iov, int nr_iov, off_t offset) 1131 { 1132 return pwritev(fd, iov, nr_iov, offset); 1133 } 1134 1135 #else 1136 1137 static bool preadv_present = false; 1138 1139 static ssize_t 1140 qemu_preadv(int fd, const struct iovec *iov, int nr_iov, off_t offset) 1141 { 1142 return -ENOSYS; 1143 } 1144 1145 static ssize_t 1146 qemu_pwritev(int fd, const struct iovec *iov, int nr_iov, off_t offset) 1147 { 1148 return -ENOSYS; 1149 } 1150 1151 #endif 1152 1153 static ssize_t handle_aiocb_rw_vector(RawPosixAIOData *aiocb) 1154 { 1155 ssize_t len; 1156 1157 do { 1158 if (aiocb->aio_type & QEMU_AIO_WRITE) 1159 len = qemu_pwritev(aiocb->aio_fildes, 1160 aiocb->aio_iov, 1161 aiocb->aio_niov, 1162 aiocb->aio_offset); 1163 else 1164 len = qemu_preadv(aiocb->aio_fildes, 1165 aiocb->aio_iov, 1166 aiocb->aio_niov, 1167 aiocb->aio_offset); 1168 } while (len == -1 && errno == EINTR); 1169 1170 if (len == -1) { 1171 return -errno; 1172 } 1173 return len; 1174 } 1175 1176 /* 1177 * Read/writes the data to/from a given linear buffer. 1178 * 1179 * Returns the number of bytes handles or -errno in case of an error. Short 1180 * reads are only returned if the end of the file is reached. 1181 */ 1182 static ssize_t handle_aiocb_rw_linear(RawPosixAIOData *aiocb, char *buf) 1183 { 1184 ssize_t offset = 0; 1185 ssize_t len; 1186 1187 while (offset < aiocb->aio_nbytes) { 1188 if (aiocb->aio_type & QEMU_AIO_WRITE) { 1189 len = pwrite(aiocb->aio_fildes, 1190 (const char *)buf + offset, 1191 aiocb->aio_nbytes - offset, 1192 aiocb->aio_offset + offset); 1193 } else { 1194 len = pread(aiocb->aio_fildes, 1195 buf + offset, 1196 aiocb->aio_nbytes - offset, 1197 aiocb->aio_offset + offset); 1198 } 1199 if (len == -1 && errno == EINTR) { 1200 continue; 1201 } else if (len == -1 && errno == EINVAL && 1202 (aiocb->bs->open_flags & BDRV_O_NOCACHE) && 1203 !(aiocb->aio_type & QEMU_AIO_WRITE) && 1204 offset > 0) { 1205 /* O_DIRECT pread() may fail with EINVAL when offset is unaligned 1206 * after a short read. Assume that O_DIRECT short reads only occur 1207 * at EOF. Therefore this is a short read, not an I/O error. 1208 */ 1209 break; 1210 } else if (len == -1) { 1211 offset = -errno; 1212 break; 1213 } else if (len == 0) { 1214 break; 1215 } 1216 offset += len; 1217 } 1218 1219 return offset; 1220 } 1221 1222 static ssize_t handle_aiocb_rw(RawPosixAIOData *aiocb) 1223 { 1224 ssize_t nbytes; 1225 char *buf; 1226 1227 if (!(aiocb->aio_type & QEMU_AIO_MISALIGNED)) { 1228 /* 1229 * If there is just a single buffer, and it is properly aligned 1230 * we can just use plain pread/pwrite without any problems. 1231 */ 1232 if (aiocb->aio_niov == 1) { 1233 return handle_aiocb_rw_linear(aiocb, aiocb->aio_iov->iov_base); 1234 } 1235 /* 1236 * We have more than one iovec, and all are properly aligned. 1237 * 1238 * Try preadv/pwritev first and fall back to linearizing the 1239 * buffer if it's not supported. 1240 */ 1241 if (preadv_present) { 1242 nbytes = handle_aiocb_rw_vector(aiocb); 1243 if (nbytes == aiocb->aio_nbytes || 1244 (nbytes < 0 && nbytes != -ENOSYS)) { 1245 return nbytes; 1246 } 1247 preadv_present = false; 1248 } 1249 1250 /* 1251 * XXX(hch): short read/write. no easy way to handle the reminder 1252 * using these interfaces. For now retry using plain 1253 * pread/pwrite? 1254 */ 1255 } 1256 1257 /* 1258 * Ok, we have to do it the hard way, copy all segments into 1259 * a single aligned buffer. 1260 */ 1261 buf = qemu_try_blockalign(aiocb->bs, aiocb->aio_nbytes); 1262 if (buf == NULL) { 1263 return -ENOMEM; 1264 } 1265 1266 if (aiocb->aio_type & QEMU_AIO_WRITE) { 1267 char *p = buf; 1268 int i; 1269 1270 for (i = 0; i < aiocb->aio_niov; ++i) { 1271 memcpy(p, aiocb->aio_iov[i].iov_base, aiocb->aio_iov[i].iov_len); 1272 p += aiocb->aio_iov[i].iov_len; 1273 } 1274 assert(p - buf == aiocb->aio_nbytes); 1275 } 1276 1277 nbytes = handle_aiocb_rw_linear(aiocb, buf); 1278 if (!(aiocb->aio_type & QEMU_AIO_WRITE)) { 1279 char *p = buf; 1280 size_t count = aiocb->aio_nbytes, copy; 1281 int i; 1282 1283 for (i = 0; i < aiocb->aio_niov && count; ++i) { 1284 copy = count; 1285 if (copy > aiocb->aio_iov[i].iov_len) { 1286 copy = aiocb->aio_iov[i].iov_len; 1287 } 1288 memcpy(aiocb->aio_iov[i].iov_base, p, copy); 1289 assert(count >= copy); 1290 p += copy; 1291 count -= copy; 1292 } 1293 assert(count == 0); 1294 } 1295 qemu_vfree(buf); 1296 1297 return nbytes; 1298 } 1299 1300 #ifdef CONFIG_XFS 1301 static int xfs_write_zeroes(BDRVRawState *s, int64_t offset, uint64_t bytes) 1302 { 1303 struct xfs_flock64 fl; 1304 int err; 1305 1306 memset(&fl, 0, sizeof(fl)); 1307 fl.l_whence = SEEK_SET; 1308 fl.l_start = offset; 1309 fl.l_len = bytes; 1310 1311 if (xfsctl(NULL, s->fd, XFS_IOC_ZERO_RANGE, &fl) < 0) { 1312 err = errno; 1313 DPRINTF("cannot write zero range (%s)\n", strerror(errno)); 1314 return -err; 1315 } 1316 1317 return 0; 1318 } 1319 1320 static int xfs_discard(BDRVRawState *s, int64_t offset, uint64_t bytes) 1321 { 1322 struct xfs_flock64 fl; 1323 int err; 1324 1325 memset(&fl, 0, sizeof(fl)); 1326 fl.l_whence = SEEK_SET; 1327 fl.l_start = offset; 1328 fl.l_len = bytes; 1329 1330 if (xfsctl(NULL, s->fd, XFS_IOC_UNRESVSP64, &fl) < 0) { 1331 err = errno; 1332 DPRINTF("cannot punch hole (%s)\n", strerror(errno)); 1333 return -err; 1334 } 1335 1336 return 0; 1337 } 1338 #endif 1339 1340 static int translate_err(int err) 1341 { 1342 if (err == -ENODEV || err == -ENOSYS || err == -EOPNOTSUPP || 1343 err == -ENOTTY) { 1344 err = -ENOTSUP; 1345 } 1346 return err; 1347 } 1348 1349 #ifdef CONFIG_FALLOCATE 1350 static int do_fallocate(int fd, int mode, off_t offset, off_t len) 1351 { 1352 do { 1353 if (fallocate(fd, mode, offset, len) == 0) { 1354 return 0; 1355 } 1356 } while (errno == EINTR); 1357 return translate_err(-errno); 1358 } 1359 #endif 1360 1361 static ssize_t handle_aiocb_write_zeroes_block(RawPosixAIOData *aiocb) 1362 { 1363 int ret = -ENOTSUP; 1364 BDRVRawState *s = aiocb->bs->opaque; 1365 1366 if (!s->has_write_zeroes) { 1367 return -ENOTSUP; 1368 } 1369 1370 #ifdef BLKZEROOUT 1371 do { 1372 uint64_t range[2] = { aiocb->aio_offset, aiocb->aio_nbytes }; 1373 if (ioctl(aiocb->aio_fildes, BLKZEROOUT, range) == 0) { 1374 return 0; 1375 } 1376 } while (errno == EINTR); 1377 1378 ret = translate_err(-errno); 1379 #endif 1380 1381 if (ret == -ENOTSUP) { 1382 s->has_write_zeroes = false; 1383 } 1384 return ret; 1385 } 1386 1387 static ssize_t handle_aiocb_write_zeroes(RawPosixAIOData *aiocb) 1388 { 1389 #if defined(CONFIG_FALLOCATE) || defined(CONFIG_XFS) 1390 BDRVRawState *s = aiocb->bs->opaque; 1391 #endif 1392 #ifdef CONFIG_FALLOCATE 1393 int64_t len; 1394 #endif 1395 1396 if (aiocb->aio_type & QEMU_AIO_BLKDEV) { 1397 return handle_aiocb_write_zeroes_block(aiocb); 1398 } 1399 1400 #ifdef CONFIG_XFS 1401 if (s->is_xfs) { 1402 return xfs_write_zeroes(s, aiocb->aio_offset, aiocb->aio_nbytes); 1403 } 1404 #endif 1405 1406 #ifdef CONFIG_FALLOCATE_ZERO_RANGE 1407 if (s->has_write_zeroes) { 1408 int ret = do_fallocate(s->fd, FALLOC_FL_ZERO_RANGE, 1409 aiocb->aio_offset, aiocb->aio_nbytes); 1410 if (ret == 0 || ret != -ENOTSUP) { 1411 return ret; 1412 } 1413 s->has_write_zeroes = false; 1414 } 1415 #endif 1416 1417 #ifdef CONFIG_FALLOCATE_PUNCH_HOLE 1418 if (s->has_discard && s->has_fallocate) { 1419 int ret = do_fallocate(s->fd, 1420 FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE, 1421 aiocb->aio_offset, aiocb->aio_nbytes); 1422 if (ret == 0) { 1423 ret = do_fallocate(s->fd, 0, aiocb->aio_offset, aiocb->aio_nbytes); 1424 if (ret == 0 || ret != -ENOTSUP) { 1425 return ret; 1426 } 1427 s->has_fallocate = false; 1428 } else if (ret != -ENOTSUP) { 1429 return ret; 1430 } else { 1431 s->has_discard = false; 1432 } 1433 } 1434 #endif 1435 1436 #ifdef CONFIG_FALLOCATE 1437 /* Last resort: we are trying to extend the file with zeroed data. This 1438 * can be done via fallocate(fd, 0) */ 1439 len = bdrv_getlength(aiocb->bs); 1440 if (s->has_fallocate && len >= 0 && aiocb->aio_offset >= len) { 1441 int ret = do_fallocate(s->fd, 0, aiocb->aio_offset, aiocb->aio_nbytes); 1442 if (ret == 0 || ret != -ENOTSUP) { 1443 return ret; 1444 } 1445 s->has_fallocate = false; 1446 } 1447 #endif 1448 1449 return -ENOTSUP; 1450 } 1451 1452 #ifndef HAVE_COPY_FILE_RANGE 1453 static off_t copy_file_range(int in_fd, off_t *in_off, int out_fd, 1454 off_t *out_off, size_t len, unsigned int flags) 1455 { 1456 #ifdef __NR_copy_file_range 1457 return syscall(__NR_copy_file_range, in_fd, in_off, out_fd, 1458 out_off, len, flags); 1459 #else 1460 errno = ENOSYS; 1461 return -1; 1462 #endif 1463 } 1464 #endif 1465 1466 static ssize_t handle_aiocb_copy_range(RawPosixAIOData *aiocb) 1467 { 1468 uint64_t bytes = aiocb->aio_nbytes; 1469 off_t in_off = aiocb->aio_offset; 1470 off_t out_off = aiocb->aio_offset2; 1471 1472 while (bytes) { 1473 ssize_t ret = copy_file_range(aiocb->aio_fildes, &in_off, 1474 aiocb->aio_fd2, &out_off, 1475 bytes, 0); 1476 if (ret == -EINTR) { 1477 continue; 1478 } 1479 if (ret < 0) { 1480 if (errno == ENOSYS) { 1481 return -ENOTSUP; 1482 } else { 1483 return -errno; 1484 } 1485 } 1486 if (!ret) { 1487 /* No progress (e.g. when beyond EOF), fall back to buffer I/O. */ 1488 return -ENOTSUP; 1489 } 1490 bytes -= ret; 1491 } 1492 return 0; 1493 } 1494 1495 static ssize_t handle_aiocb_discard(RawPosixAIOData *aiocb) 1496 { 1497 int ret = -EOPNOTSUPP; 1498 BDRVRawState *s = aiocb->bs->opaque; 1499 1500 if (!s->has_discard) { 1501 return -ENOTSUP; 1502 } 1503 1504 if (aiocb->aio_type & QEMU_AIO_BLKDEV) { 1505 #ifdef BLKDISCARD 1506 do { 1507 uint64_t range[2] = { aiocb->aio_offset, aiocb->aio_nbytes }; 1508 if (ioctl(aiocb->aio_fildes, BLKDISCARD, range) == 0) { 1509 return 0; 1510 } 1511 } while (errno == EINTR); 1512 1513 ret = -errno; 1514 #endif 1515 } else { 1516 #ifdef CONFIG_XFS 1517 if (s->is_xfs) { 1518 return xfs_discard(s, aiocb->aio_offset, aiocb->aio_nbytes); 1519 } 1520 #endif 1521 1522 #ifdef CONFIG_FALLOCATE_PUNCH_HOLE 1523 ret = do_fallocate(s->fd, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE, 1524 aiocb->aio_offset, aiocb->aio_nbytes); 1525 #endif 1526 } 1527 1528 ret = translate_err(ret); 1529 if (ret == -ENOTSUP) { 1530 s->has_discard = false; 1531 } 1532 return ret; 1533 } 1534 1535 static int aio_worker(void *arg) 1536 { 1537 RawPosixAIOData *aiocb = arg; 1538 ssize_t ret = 0; 1539 1540 switch (aiocb->aio_type & QEMU_AIO_TYPE_MASK) { 1541 case QEMU_AIO_READ: 1542 ret = handle_aiocb_rw(aiocb); 1543 if (ret >= 0 && ret < aiocb->aio_nbytes) { 1544 iov_memset(aiocb->aio_iov, aiocb->aio_niov, ret, 1545 0, aiocb->aio_nbytes - ret); 1546 1547 ret = aiocb->aio_nbytes; 1548 } 1549 if (ret == aiocb->aio_nbytes) { 1550 ret = 0; 1551 } else if (ret >= 0 && ret < aiocb->aio_nbytes) { 1552 ret = -EINVAL; 1553 } 1554 break; 1555 case QEMU_AIO_WRITE: 1556 ret = handle_aiocb_rw(aiocb); 1557 if (ret == aiocb->aio_nbytes) { 1558 ret = 0; 1559 } else if (ret >= 0 && ret < aiocb->aio_nbytes) { 1560 ret = -EINVAL; 1561 } 1562 break; 1563 case QEMU_AIO_FLUSH: 1564 ret = handle_aiocb_flush(aiocb); 1565 break; 1566 case QEMU_AIO_IOCTL: 1567 ret = handle_aiocb_ioctl(aiocb); 1568 break; 1569 case QEMU_AIO_DISCARD: 1570 ret = handle_aiocb_discard(aiocb); 1571 break; 1572 case QEMU_AIO_WRITE_ZEROES: 1573 ret = handle_aiocb_write_zeroes(aiocb); 1574 break; 1575 case QEMU_AIO_COPY_RANGE: 1576 ret = handle_aiocb_copy_range(aiocb); 1577 break; 1578 default: 1579 fprintf(stderr, "invalid aio request (0x%x)\n", aiocb->aio_type); 1580 ret = -EINVAL; 1581 break; 1582 } 1583 1584 g_free(aiocb); 1585 return ret; 1586 } 1587 1588 static int paio_submit_co_full(BlockDriverState *bs, int fd, 1589 int64_t offset, int fd2, int64_t offset2, 1590 QEMUIOVector *qiov, 1591 int bytes, int type) 1592 { 1593 RawPosixAIOData *acb = g_new(RawPosixAIOData, 1); 1594 ThreadPool *pool; 1595 1596 acb->bs = bs; 1597 acb->aio_type = type; 1598 acb->aio_fildes = fd; 1599 acb->aio_fd2 = fd2; 1600 acb->aio_offset2 = offset2; 1601 1602 acb->aio_nbytes = bytes; 1603 acb->aio_offset = offset; 1604 1605 if (qiov) { 1606 acb->aio_iov = qiov->iov; 1607 acb->aio_niov = qiov->niov; 1608 assert(qiov->size == bytes); 1609 } 1610 1611 trace_paio_submit_co(offset, bytes, type); 1612 pool = aio_get_thread_pool(bdrv_get_aio_context(bs)); 1613 return thread_pool_submit_co(pool, aio_worker, acb); 1614 } 1615 1616 static inline int paio_submit_co(BlockDriverState *bs, int fd, 1617 int64_t offset, QEMUIOVector *qiov, 1618 int bytes, int type) 1619 { 1620 return paio_submit_co_full(bs, fd, offset, -1, 0, qiov, bytes, type); 1621 } 1622 1623 static BlockAIOCB *paio_submit(BlockDriverState *bs, int fd, 1624 int64_t offset, QEMUIOVector *qiov, int bytes, 1625 BlockCompletionFunc *cb, void *opaque, int type) 1626 { 1627 RawPosixAIOData *acb = g_new(RawPosixAIOData, 1); 1628 ThreadPool *pool; 1629 1630 acb->bs = bs; 1631 acb->aio_type = type; 1632 acb->aio_fildes = fd; 1633 1634 acb->aio_nbytes = bytes; 1635 acb->aio_offset = offset; 1636 1637 if (qiov) { 1638 acb->aio_iov = qiov->iov; 1639 acb->aio_niov = qiov->niov; 1640 assert(qiov->size == acb->aio_nbytes); 1641 } 1642 1643 trace_paio_submit(acb, opaque, offset, bytes, type); 1644 pool = aio_get_thread_pool(bdrv_get_aio_context(bs)); 1645 return thread_pool_submit_aio(pool, aio_worker, acb, cb, opaque); 1646 } 1647 1648 static int coroutine_fn raw_co_prw(BlockDriverState *bs, uint64_t offset, 1649 uint64_t bytes, QEMUIOVector *qiov, int type) 1650 { 1651 BDRVRawState *s = bs->opaque; 1652 1653 if (fd_open(bs) < 0) 1654 return -EIO; 1655 1656 /* 1657 * Check if the underlying device requires requests to be aligned, 1658 * and if the request we are trying to submit is aligned or not. 1659 * If this is the case tell the low-level driver that it needs 1660 * to copy the buffer. 1661 */ 1662 if (s->needs_alignment) { 1663 if (!bdrv_qiov_is_aligned(bs, qiov)) { 1664 type |= QEMU_AIO_MISALIGNED; 1665 #ifdef CONFIG_LINUX_AIO 1666 } else if (s->use_linux_aio) { 1667 LinuxAioState *aio = aio_get_linux_aio(bdrv_get_aio_context(bs)); 1668 assert(qiov->size == bytes); 1669 return laio_co_submit(bs, aio, s->fd, offset, qiov, type); 1670 #endif 1671 } 1672 } 1673 1674 return paio_submit_co(bs, s->fd, offset, qiov, bytes, type); 1675 } 1676 1677 static int coroutine_fn raw_co_preadv(BlockDriverState *bs, uint64_t offset, 1678 uint64_t bytes, QEMUIOVector *qiov, 1679 int flags) 1680 { 1681 return raw_co_prw(bs, offset, bytes, qiov, QEMU_AIO_READ); 1682 } 1683 1684 static int coroutine_fn raw_co_pwritev(BlockDriverState *bs, uint64_t offset, 1685 uint64_t bytes, QEMUIOVector *qiov, 1686 int flags) 1687 { 1688 assert(flags == 0); 1689 return raw_co_prw(bs, offset, bytes, qiov, QEMU_AIO_WRITE); 1690 } 1691 1692 static void raw_aio_plug(BlockDriverState *bs) 1693 { 1694 #ifdef CONFIG_LINUX_AIO 1695 BDRVRawState *s = bs->opaque; 1696 if (s->use_linux_aio) { 1697 LinuxAioState *aio = aio_get_linux_aio(bdrv_get_aio_context(bs)); 1698 laio_io_plug(bs, aio); 1699 } 1700 #endif 1701 } 1702 1703 static void raw_aio_unplug(BlockDriverState *bs) 1704 { 1705 #ifdef CONFIG_LINUX_AIO 1706 BDRVRawState *s = bs->opaque; 1707 if (s->use_linux_aio) { 1708 LinuxAioState *aio = aio_get_linux_aio(bdrv_get_aio_context(bs)); 1709 laio_io_unplug(bs, aio); 1710 } 1711 #endif 1712 } 1713 1714 static BlockAIOCB *raw_aio_flush(BlockDriverState *bs, 1715 BlockCompletionFunc *cb, void *opaque) 1716 { 1717 BDRVRawState *s = bs->opaque; 1718 1719 if (fd_open(bs) < 0) 1720 return NULL; 1721 1722 return paio_submit(bs, s->fd, 0, NULL, 0, cb, opaque, QEMU_AIO_FLUSH); 1723 } 1724 1725 static void raw_close(BlockDriverState *bs) 1726 { 1727 BDRVRawState *s = bs->opaque; 1728 1729 if (s->fd >= 0) { 1730 qemu_close(s->fd); 1731 s->fd = -1; 1732 } 1733 if (s->lock_fd >= 0) { 1734 qemu_close(s->lock_fd); 1735 s->lock_fd = -1; 1736 } 1737 } 1738 1739 /** 1740 * Truncates the given regular file @fd to @offset and, when growing, fills the 1741 * new space according to @prealloc. 1742 * 1743 * Returns: 0 on success, -errno on failure. 1744 */ 1745 static int raw_regular_truncate(int fd, int64_t offset, PreallocMode prealloc, 1746 Error **errp) 1747 { 1748 int result = 0; 1749 int64_t current_length = 0; 1750 char *buf = NULL; 1751 struct stat st; 1752 1753 if (fstat(fd, &st) < 0) { 1754 result = -errno; 1755 error_setg_errno(errp, -result, "Could not stat file"); 1756 return result; 1757 } 1758 1759 current_length = st.st_size; 1760 if (current_length > offset && prealloc != PREALLOC_MODE_OFF) { 1761 error_setg(errp, "Cannot use preallocation for shrinking files"); 1762 return -ENOTSUP; 1763 } 1764 1765 switch (prealloc) { 1766 #ifdef CONFIG_POSIX_FALLOCATE 1767 case PREALLOC_MODE_FALLOC: 1768 /* 1769 * Truncating before posix_fallocate() makes it about twice slower on 1770 * file systems that do not support fallocate(), trying to check if a 1771 * block is allocated before allocating it, so don't do that here. 1772 */ 1773 if (offset != current_length) { 1774 result = -posix_fallocate(fd, current_length, offset - current_length); 1775 if (result != 0) { 1776 /* posix_fallocate() doesn't set errno. */ 1777 error_setg_errno(errp, -result, 1778 "Could not preallocate new data"); 1779 } 1780 } else { 1781 result = 0; 1782 } 1783 goto out; 1784 #endif 1785 case PREALLOC_MODE_FULL: 1786 { 1787 int64_t num = 0, left = offset - current_length; 1788 off_t seek_result; 1789 1790 /* 1791 * Knowing the final size from the beginning could allow the file 1792 * system driver to do less allocations and possibly avoid 1793 * fragmentation of the file. 1794 */ 1795 if (ftruncate(fd, offset) != 0) { 1796 result = -errno; 1797 error_setg_errno(errp, -result, "Could not resize file"); 1798 goto out; 1799 } 1800 1801 buf = g_malloc0(65536); 1802 1803 seek_result = lseek(fd, current_length, SEEK_SET); 1804 if (seek_result < 0) { 1805 result = -errno; 1806 error_setg_errno(errp, -result, 1807 "Failed to seek to the old end of file"); 1808 goto out; 1809 } 1810 1811 while (left > 0) { 1812 num = MIN(left, 65536); 1813 result = write(fd, buf, num); 1814 if (result < 0) { 1815 result = -errno; 1816 error_setg_errno(errp, -result, 1817 "Could not write zeros for preallocation"); 1818 goto out; 1819 } 1820 left -= result; 1821 } 1822 if (result >= 0) { 1823 result = fsync(fd); 1824 if (result < 0) { 1825 result = -errno; 1826 error_setg_errno(errp, -result, 1827 "Could not flush file to disk"); 1828 goto out; 1829 } 1830 } 1831 goto out; 1832 } 1833 case PREALLOC_MODE_OFF: 1834 if (ftruncate(fd, offset) != 0) { 1835 result = -errno; 1836 error_setg_errno(errp, -result, "Could not resize file"); 1837 } 1838 return result; 1839 default: 1840 result = -ENOTSUP; 1841 error_setg(errp, "Unsupported preallocation mode: %s", 1842 PreallocMode_str(prealloc)); 1843 return result; 1844 } 1845 1846 out: 1847 if (result < 0) { 1848 if (ftruncate(fd, current_length) < 0) { 1849 error_report("Failed to restore old file length: %s", 1850 strerror(errno)); 1851 } 1852 } 1853 1854 g_free(buf); 1855 return result; 1856 } 1857 1858 static int raw_truncate(BlockDriverState *bs, int64_t offset, 1859 PreallocMode prealloc, Error **errp) 1860 { 1861 BDRVRawState *s = bs->opaque; 1862 struct stat st; 1863 int ret; 1864 1865 if (fstat(s->fd, &st)) { 1866 ret = -errno; 1867 error_setg_errno(errp, -ret, "Failed to fstat() the file"); 1868 return ret; 1869 } 1870 1871 if (S_ISREG(st.st_mode)) { 1872 return raw_regular_truncate(s->fd, offset, prealloc, errp); 1873 } 1874 1875 if (prealloc != PREALLOC_MODE_OFF) { 1876 error_setg(errp, "Preallocation mode '%s' unsupported for this " 1877 "non-regular file", PreallocMode_str(prealloc)); 1878 return -ENOTSUP; 1879 } 1880 1881 if (S_ISCHR(st.st_mode) || S_ISBLK(st.st_mode)) { 1882 if (offset > raw_getlength(bs)) { 1883 error_setg(errp, "Cannot grow device files"); 1884 return -EINVAL; 1885 } 1886 } else { 1887 error_setg(errp, "Resizing this file is not supported"); 1888 return -ENOTSUP; 1889 } 1890 1891 return 0; 1892 } 1893 1894 #ifdef __OpenBSD__ 1895 static int64_t raw_getlength(BlockDriverState *bs) 1896 { 1897 BDRVRawState *s = bs->opaque; 1898 int fd = s->fd; 1899 struct stat st; 1900 1901 if (fstat(fd, &st)) 1902 return -errno; 1903 if (S_ISCHR(st.st_mode) || S_ISBLK(st.st_mode)) { 1904 struct disklabel dl; 1905 1906 if (ioctl(fd, DIOCGDINFO, &dl)) 1907 return -errno; 1908 return (uint64_t)dl.d_secsize * 1909 dl.d_partitions[DISKPART(st.st_rdev)].p_size; 1910 } else 1911 return st.st_size; 1912 } 1913 #elif defined(__NetBSD__) 1914 static int64_t raw_getlength(BlockDriverState *bs) 1915 { 1916 BDRVRawState *s = bs->opaque; 1917 int fd = s->fd; 1918 struct stat st; 1919 1920 if (fstat(fd, &st)) 1921 return -errno; 1922 if (S_ISCHR(st.st_mode) || S_ISBLK(st.st_mode)) { 1923 struct dkwedge_info dkw; 1924 1925 if (ioctl(fd, DIOCGWEDGEINFO, &dkw) != -1) { 1926 return dkw.dkw_size * 512; 1927 } else { 1928 struct disklabel dl; 1929 1930 if (ioctl(fd, DIOCGDINFO, &dl)) 1931 return -errno; 1932 return (uint64_t)dl.d_secsize * 1933 dl.d_partitions[DISKPART(st.st_rdev)].p_size; 1934 } 1935 } else 1936 return st.st_size; 1937 } 1938 #elif defined(__sun__) 1939 static int64_t raw_getlength(BlockDriverState *bs) 1940 { 1941 BDRVRawState *s = bs->opaque; 1942 struct dk_minfo minfo; 1943 int ret; 1944 int64_t size; 1945 1946 ret = fd_open(bs); 1947 if (ret < 0) { 1948 return ret; 1949 } 1950 1951 /* 1952 * Use the DKIOCGMEDIAINFO ioctl to read the size. 1953 */ 1954 ret = ioctl(s->fd, DKIOCGMEDIAINFO, &minfo); 1955 if (ret != -1) { 1956 return minfo.dki_lbsize * minfo.dki_capacity; 1957 } 1958 1959 /* 1960 * There are reports that lseek on some devices fails, but 1961 * irc discussion said that contingency on contingency was overkill. 1962 */ 1963 size = lseek(s->fd, 0, SEEK_END); 1964 if (size < 0) { 1965 return -errno; 1966 } 1967 return size; 1968 } 1969 #elif defined(CONFIG_BSD) 1970 static int64_t raw_getlength(BlockDriverState *bs) 1971 { 1972 BDRVRawState *s = bs->opaque; 1973 int fd = s->fd; 1974 int64_t size; 1975 struct stat sb; 1976 #if defined (__FreeBSD__) || defined(__FreeBSD_kernel__) 1977 int reopened = 0; 1978 #endif 1979 int ret; 1980 1981 ret = fd_open(bs); 1982 if (ret < 0) 1983 return ret; 1984 1985 #if defined (__FreeBSD__) || defined(__FreeBSD_kernel__) 1986 again: 1987 #endif 1988 if (!fstat(fd, &sb) && (S_IFCHR & sb.st_mode)) { 1989 #ifdef DIOCGMEDIASIZE 1990 if (ioctl(fd, DIOCGMEDIASIZE, (off_t *)&size)) 1991 #elif defined(DIOCGPART) 1992 { 1993 struct partinfo pi; 1994 if (ioctl(fd, DIOCGPART, &pi) == 0) 1995 size = pi.media_size; 1996 else 1997 size = 0; 1998 } 1999 if (size == 0) 2000 #endif 2001 #if defined(__APPLE__) && defined(__MACH__) 2002 { 2003 uint64_t sectors = 0; 2004 uint32_t sector_size = 0; 2005 2006 if (ioctl(fd, DKIOCGETBLOCKCOUNT, §ors) == 0 2007 && ioctl(fd, DKIOCGETBLOCKSIZE, §or_size) == 0) { 2008 size = sectors * sector_size; 2009 } else { 2010 size = lseek(fd, 0LL, SEEK_END); 2011 if (size < 0) { 2012 return -errno; 2013 } 2014 } 2015 } 2016 #else 2017 size = lseek(fd, 0LL, SEEK_END); 2018 if (size < 0) { 2019 return -errno; 2020 } 2021 #endif 2022 #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__) 2023 switch(s->type) { 2024 case FTYPE_CD: 2025 /* XXX FreeBSD acd returns UINT_MAX sectors for an empty drive */ 2026 if (size == 2048LL * (unsigned)-1) 2027 size = 0; 2028 /* XXX no disc? maybe we need to reopen... */ 2029 if (size <= 0 && !reopened && cdrom_reopen(bs) >= 0) { 2030 reopened = 1; 2031 goto again; 2032 } 2033 } 2034 #endif 2035 } else { 2036 size = lseek(fd, 0, SEEK_END); 2037 if (size < 0) { 2038 return -errno; 2039 } 2040 } 2041 return size; 2042 } 2043 #else 2044 static int64_t raw_getlength(BlockDriverState *bs) 2045 { 2046 BDRVRawState *s = bs->opaque; 2047 int ret; 2048 int64_t size; 2049 2050 ret = fd_open(bs); 2051 if (ret < 0) { 2052 return ret; 2053 } 2054 2055 size = lseek(s->fd, 0, SEEK_END); 2056 if (size < 0) { 2057 return -errno; 2058 } 2059 return size; 2060 } 2061 #endif 2062 2063 static int64_t raw_get_allocated_file_size(BlockDriverState *bs) 2064 { 2065 struct stat st; 2066 BDRVRawState *s = bs->opaque; 2067 2068 if (fstat(s->fd, &st) < 0) { 2069 return -errno; 2070 } 2071 return (int64_t)st.st_blocks * 512; 2072 } 2073 2074 static int raw_co_create(BlockdevCreateOptions *options, Error **errp) 2075 { 2076 BlockdevCreateOptionsFile *file_opts; 2077 int fd; 2078 int result = 0; 2079 2080 /* Validate options and set default values */ 2081 assert(options->driver == BLOCKDEV_DRIVER_FILE); 2082 file_opts = &options->u.file; 2083 2084 if (!file_opts->has_nocow) { 2085 file_opts->nocow = false; 2086 } 2087 if (!file_opts->has_preallocation) { 2088 file_opts->preallocation = PREALLOC_MODE_OFF; 2089 } 2090 2091 /* Create file */ 2092 fd = qemu_open(file_opts->filename, O_RDWR | O_CREAT | O_TRUNC | O_BINARY, 2093 0644); 2094 if (fd < 0) { 2095 result = -errno; 2096 error_setg_errno(errp, -result, "Could not create file"); 2097 goto out; 2098 } 2099 2100 if (file_opts->nocow) { 2101 #ifdef __linux__ 2102 /* Set NOCOW flag to solve performance issue on fs like btrfs. 2103 * This is an optimisation. The FS_IOC_SETFLAGS ioctl return value 2104 * will be ignored since any failure of this operation should not 2105 * block the left work. 2106 */ 2107 int attr; 2108 if (ioctl(fd, FS_IOC_GETFLAGS, &attr) == 0) { 2109 attr |= FS_NOCOW_FL; 2110 ioctl(fd, FS_IOC_SETFLAGS, &attr); 2111 } 2112 #endif 2113 } 2114 2115 result = raw_regular_truncate(fd, file_opts->size, file_opts->preallocation, 2116 errp); 2117 if (result < 0) { 2118 goto out_close; 2119 } 2120 2121 out_close: 2122 if (qemu_close(fd) != 0 && result == 0) { 2123 result = -errno; 2124 error_setg_errno(errp, -result, "Could not close the new file"); 2125 } 2126 out: 2127 return result; 2128 } 2129 2130 static int coroutine_fn raw_co_create_opts(const char *filename, QemuOpts *opts, 2131 Error **errp) 2132 { 2133 BlockdevCreateOptions options; 2134 int64_t total_size = 0; 2135 bool nocow = false; 2136 PreallocMode prealloc; 2137 char *buf = NULL; 2138 Error *local_err = NULL; 2139 2140 /* Skip file: protocol prefix */ 2141 strstart(filename, "file:", &filename); 2142 2143 /* Read out options */ 2144 total_size = ROUND_UP(qemu_opt_get_size_del(opts, BLOCK_OPT_SIZE, 0), 2145 BDRV_SECTOR_SIZE); 2146 nocow = qemu_opt_get_bool(opts, BLOCK_OPT_NOCOW, false); 2147 buf = qemu_opt_get_del(opts, BLOCK_OPT_PREALLOC); 2148 prealloc = qapi_enum_parse(&PreallocMode_lookup, buf, 2149 PREALLOC_MODE_OFF, &local_err); 2150 g_free(buf); 2151 if (local_err) { 2152 error_propagate(errp, local_err); 2153 return -EINVAL; 2154 } 2155 2156 options = (BlockdevCreateOptions) { 2157 .driver = BLOCKDEV_DRIVER_FILE, 2158 .u.file = { 2159 .filename = (char *) filename, 2160 .size = total_size, 2161 .has_preallocation = true, 2162 .preallocation = prealloc, 2163 .has_nocow = true, 2164 .nocow = nocow, 2165 }, 2166 }; 2167 return raw_co_create(&options, errp); 2168 } 2169 2170 /* 2171 * Find allocation range in @bs around offset @start. 2172 * May change underlying file descriptor's file offset. 2173 * If @start is not in a hole, store @start in @data, and the 2174 * beginning of the next hole in @hole, and return 0. 2175 * If @start is in a non-trailing hole, store @start in @hole and the 2176 * beginning of the next non-hole in @data, and return 0. 2177 * If @start is in a trailing hole or beyond EOF, return -ENXIO. 2178 * If we can't find out, return a negative errno other than -ENXIO. 2179 */ 2180 static int find_allocation(BlockDriverState *bs, off_t start, 2181 off_t *data, off_t *hole) 2182 { 2183 #if defined SEEK_HOLE && defined SEEK_DATA 2184 BDRVRawState *s = bs->opaque; 2185 off_t offs; 2186 2187 /* 2188 * SEEK_DATA cases: 2189 * D1. offs == start: start is in data 2190 * D2. offs > start: start is in a hole, next data at offs 2191 * D3. offs < 0, errno = ENXIO: either start is in a trailing hole 2192 * or start is beyond EOF 2193 * If the latter happens, the file has been truncated behind 2194 * our back since we opened it. All bets are off then. 2195 * Treating like a trailing hole is simplest. 2196 * D4. offs < 0, errno != ENXIO: we learned nothing 2197 */ 2198 offs = lseek(s->fd, start, SEEK_DATA); 2199 if (offs < 0) { 2200 return -errno; /* D3 or D4 */ 2201 } 2202 2203 if (offs < start) { 2204 /* This is not a valid return by lseek(). We are safe to just return 2205 * -EIO in this case, and we'll treat it like D4. */ 2206 return -EIO; 2207 } 2208 2209 if (offs > start) { 2210 /* D2: in hole, next data at offs */ 2211 *hole = start; 2212 *data = offs; 2213 return 0; 2214 } 2215 2216 /* D1: in data, end not yet known */ 2217 2218 /* 2219 * SEEK_HOLE cases: 2220 * H1. offs == start: start is in a hole 2221 * If this happens here, a hole has been dug behind our back 2222 * since the previous lseek(). 2223 * H2. offs > start: either start is in data, next hole at offs, 2224 * or start is in trailing hole, EOF at offs 2225 * Linux treats trailing holes like any other hole: offs == 2226 * start. Solaris seeks to EOF instead: offs > start (blech). 2227 * If that happens here, a hole has been dug behind our back 2228 * since the previous lseek(). 2229 * H3. offs < 0, errno = ENXIO: start is beyond EOF 2230 * If this happens, the file has been truncated behind our 2231 * back since we opened it. Treat it like a trailing hole. 2232 * H4. offs < 0, errno != ENXIO: we learned nothing 2233 * Pretend we know nothing at all, i.e. "forget" about D1. 2234 */ 2235 offs = lseek(s->fd, start, SEEK_HOLE); 2236 if (offs < 0) { 2237 return -errno; /* D1 and (H3 or H4) */ 2238 } 2239 2240 if (offs < start) { 2241 /* This is not a valid return by lseek(). We are safe to just return 2242 * -EIO in this case, and we'll treat it like H4. */ 2243 return -EIO; 2244 } 2245 2246 if (offs > start) { 2247 /* 2248 * D1 and H2: either in data, next hole at offs, or it was in 2249 * data but is now in a trailing hole. In the latter case, 2250 * all bets are off. Treating it as if it there was data all 2251 * the way to EOF is safe, so simply do that. 2252 */ 2253 *data = start; 2254 *hole = offs; 2255 return 0; 2256 } 2257 2258 /* D1 and H1 */ 2259 return -EBUSY; 2260 #else 2261 return -ENOTSUP; 2262 #endif 2263 } 2264 2265 /* 2266 * Returns the allocation status of the specified offset. 2267 * 2268 * The block layer guarantees 'offset' and 'bytes' are within bounds. 2269 * 2270 * 'pnum' is set to the number of bytes (including and immediately following 2271 * the specified offset) that are known to be in the same 2272 * allocated/unallocated state. 2273 * 2274 * 'bytes' is the max value 'pnum' should be set to. 2275 */ 2276 static int coroutine_fn raw_co_block_status(BlockDriverState *bs, 2277 bool want_zero, 2278 int64_t offset, 2279 int64_t bytes, int64_t *pnum, 2280 int64_t *map, 2281 BlockDriverState **file) 2282 { 2283 off_t data = 0, hole = 0; 2284 int ret; 2285 2286 ret = fd_open(bs); 2287 if (ret < 0) { 2288 return ret; 2289 } 2290 2291 if (!want_zero) { 2292 *pnum = bytes; 2293 *map = offset; 2294 *file = bs; 2295 return BDRV_BLOCK_DATA | BDRV_BLOCK_OFFSET_VALID; 2296 } 2297 2298 ret = find_allocation(bs, offset, &data, &hole); 2299 if (ret == -ENXIO) { 2300 /* Trailing hole */ 2301 *pnum = bytes; 2302 ret = BDRV_BLOCK_ZERO; 2303 } else if (ret < 0) { 2304 /* No info available, so pretend there are no holes */ 2305 *pnum = bytes; 2306 ret = BDRV_BLOCK_DATA; 2307 } else if (data == offset) { 2308 /* On a data extent, compute bytes to the end of the extent, 2309 * possibly including a partial sector at EOF. */ 2310 *pnum = MIN(bytes, hole - offset); 2311 ret = BDRV_BLOCK_DATA; 2312 } else { 2313 /* On a hole, compute bytes to the beginning of the next extent. */ 2314 assert(hole == offset); 2315 *pnum = MIN(bytes, data - offset); 2316 ret = BDRV_BLOCK_ZERO; 2317 } 2318 *map = offset; 2319 *file = bs; 2320 return ret | BDRV_BLOCK_OFFSET_VALID; 2321 } 2322 2323 #if defined(__linux__) 2324 /* Verify that the file is not in the page cache */ 2325 static void check_cache_dropped(BlockDriverState *bs, Error **errp) 2326 { 2327 const size_t window_size = 128 * 1024 * 1024; 2328 BDRVRawState *s = bs->opaque; 2329 void *window = NULL; 2330 size_t length = 0; 2331 unsigned char *vec; 2332 size_t page_size; 2333 off_t offset; 2334 off_t end; 2335 2336 /* mincore(2) page status information requires 1 byte per page */ 2337 page_size = sysconf(_SC_PAGESIZE); 2338 vec = g_malloc(DIV_ROUND_UP(window_size, page_size)); 2339 2340 end = raw_getlength(bs); 2341 2342 for (offset = 0; offset < end; offset += window_size) { 2343 void *new_window; 2344 size_t new_length; 2345 size_t vec_end; 2346 size_t i; 2347 int ret; 2348 2349 /* Unmap previous window if size has changed */ 2350 new_length = MIN(end - offset, window_size); 2351 if (new_length != length) { 2352 munmap(window, length); 2353 window = NULL; 2354 length = 0; 2355 } 2356 2357 new_window = mmap(window, new_length, PROT_NONE, MAP_PRIVATE, 2358 s->fd, offset); 2359 if (new_window == MAP_FAILED) { 2360 error_setg_errno(errp, errno, "mmap failed"); 2361 break; 2362 } 2363 2364 window = new_window; 2365 length = new_length; 2366 2367 ret = mincore(window, length, vec); 2368 if (ret < 0) { 2369 error_setg_errno(errp, errno, "mincore failed"); 2370 break; 2371 } 2372 2373 vec_end = DIV_ROUND_UP(length, page_size); 2374 for (i = 0; i < vec_end; i++) { 2375 if (vec[i] & 0x1) { 2376 error_setg(errp, "page cache still in use!"); 2377 break; 2378 } 2379 } 2380 } 2381 2382 if (window) { 2383 munmap(window, length); 2384 } 2385 2386 g_free(vec); 2387 } 2388 #endif /* __linux__ */ 2389 2390 static void coroutine_fn raw_co_invalidate_cache(BlockDriverState *bs, 2391 Error **errp) 2392 { 2393 BDRVRawState *s = bs->opaque; 2394 int ret; 2395 2396 ret = fd_open(bs); 2397 if (ret < 0) { 2398 error_setg_errno(errp, -ret, "The file descriptor is not open"); 2399 return; 2400 } 2401 2402 if (s->open_flags & O_DIRECT) { 2403 return; /* No host kernel page cache */ 2404 } 2405 2406 #if defined(__linux__) 2407 /* This sets the scene for the next syscall... */ 2408 ret = bdrv_co_flush(bs); 2409 if (ret < 0) { 2410 error_setg_errno(errp, -ret, "flush failed"); 2411 return; 2412 } 2413 2414 /* Linux does not invalidate pages that are dirty, locked, or mmapped by a 2415 * process. These limitations are okay because we just fsynced the file, 2416 * we don't use mmap, and the file should not be in use by other processes. 2417 */ 2418 ret = posix_fadvise(s->fd, 0, 0, POSIX_FADV_DONTNEED); 2419 if (ret != 0) { /* the return value is a positive errno */ 2420 error_setg_errno(errp, ret, "fadvise failed"); 2421 return; 2422 } 2423 2424 if (s->check_cache_dropped) { 2425 check_cache_dropped(bs, errp); 2426 } 2427 #else /* __linux__ */ 2428 /* Do nothing. Live migration to a remote host with cache.direct=off is 2429 * unsupported on other host operating systems. Cache consistency issues 2430 * may occur but no error is reported here, partly because that's the 2431 * historical behavior and partly because it's hard to differentiate valid 2432 * configurations that should not cause errors. 2433 */ 2434 #endif /* !__linux__ */ 2435 } 2436 2437 static coroutine_fn BlockAIOCB *raw_aio_pdiscard(BlockDriverState *bs, 2438 int64_t offset, int bytes, 2439 BlockCompletionFunc *cb, void *opaque) 2440 { 2441 BDRVRawState *s = bs->opaque; 2442 2443 return paio_submit(bs, s->fd, offset, NULL, bytes, 2444 cb, opaque, QEMU_AIO_DISCARD); 2445 } 2446 2447 static int coroutine_fn raw_co_pwrite_zeroes( 2448 BlockDriverState *bs, int64_t offset, 2449 int bytes, BdrvRequestFlags flags) 2450 { 2451 BDRVRawState *s = bs->opaque; 2452 2453 if (!(flags & BDRV_REQ_MAY_UNMAP)) { 2454 return paio_submit_co(bs, s->fd, offset, NULL, bytes, 2455 QEMU_AIO_WRITE_ZEROES); 2456 } else if (s->discard_zeroes) { 2457 return paio_submit_co(bs, s->fd, offset, NULL, bytes, 2458 QEMU_AIO_DISCARD); 2459 } 2460 return -ENOTSUP; 2461 } 2462 2463 static int raw_get_info(BlockDriverState *bs, BlockDriverInfo *bdi) 2464 { 2465 BDRVRawState *s = bs->opaque; 2466 2467 bdi->unallocated_blocks_are_zero = s->discard_zeroes; 2468 return 0; 2469 } 2470 2471 static QemuOptsList raw_create_opts = { 2472 .name = "raw-create-opts", 2473 .head = QTAILQ_HEAD_INITIALIZER(raw_create_opts.head), 2474 .desc = { 2475 { 2476 .name = BLOCK_OPT_SIZE, 2477 .type = QEMU_OPT_SIZE, 2478 .help = "Virtual disk size" 2479 }, 2480 { 2481 .name = BLOCK_OPT_NOCOW, 2482 .type = QEMU_OPT_BOOL, 2483 .help = "Turn off copy-on-write (valid only on btrfs)" 2484 }, 2485 { 2486 .name = BLOCK_OPT_PREALLOC, 2487 .type = QEMU_OPT_STRING, 2488 .help = "Preallocation mode (allowed values: off, falloc, full)" 2489 }, 2490 { /* end of list */ } 2491 } 2492 }; 2493 2494 static int raw_check_perm(BlockDriverState *bs, uint64_t perm, uint64_t shared, 2495 Error **errp) 2496 { 2497 return raw_handle_perm_lock(bs, RAW_PL_PREPARE, perm, shared, errp); 2498 } 2499 2500 static void raw_set_perm(BlockDriverState *bs, uint64_t perm, uint64_t shared) 2501 { 2502 BDRVRawState *s = bs->opaque; 2503 raw_handle_perm_lock(bs, RAW_PL_COMMIT, perm, shared, NULL); 2504 s->perm = perm; 2505 s->shared_perm = shared; 2506 } 2507 2508 static void raw_abort_perm_update(BlockDriverState *bs) 2509 { 2510 raw_handle_perm_lock(bs, RAW_PL_ABORT, 0, 0, NULL); 2511 } 2512 2513 static int coroutine_fn raw_co_copy_range_from(BlockDriverState *bs, 2514 BdrvChild *src, uint64_t src_offset, 2515 BdrvChild *dst, uint64_t dst_offset, 2516 uint64_t bytes, BdrvRequestFlags flags) 2517 { 2518 return bdrv_co_copy_range_to(src, src_offset, dst, dst_offset, bytes, flags); 2519 } 2520 2521 static int coroutine_fn raw_co_copy_range_to(BlockDriverState *bs, 2522 BdrvChild *src, uint64_t src_offset, 2523 BdrvChild *dst, uint64_t dst_offset, 2524 uint64_t bytes, BdrvRequestFlags flags) 2525 { 2526 BDRVRawState *s = bs->opaque; 2527 BDRVRawState *src_s; 2528 2529 assert(dst->bs == bs); 2530 if (src->bs->drv->bdrv_co_copy_range_to != raw_co_copy_range_to) { 2531 return -ENOTSUP; 2532 } 2533 2534 src_s = src->bs->opaque; 2535 if (fd_open(bs) < 0 || fd_open(bs) < 0) { 2536 return -EIO; 2537 } 2538 return paio_submit_co_full(bs, src_s->fd, src_offset, s->fd, dst_offset, 2539 NULL, bytes, QEMU_AIO_COPY_RANGE); 2540 } 2541 2542 BlockDriver bdrv_file = { 2543 .format_name = "file", 2544 .protocol_name = "file", 2545 .instance_size = sizeof(BDRVRawState), 2546 .bdrv_needs_filename = true, 2547 .bdrv_probe = NULL, /* no probe for protocols */ 2548 .bdrv_parse_filename = raw_parse_filename, 2549 .bdrv_file_open = raw_open, 2550 .bdrv_reopen_prepare = raw_reopen_prepare, 2551 .bdrv_reopen_commit = raw_reopen_commit, 2552 .bdrv_reopen_abort = raw_reopen_abort, 2553 .bdrv_close = raw_close, 2554 .bdrv_co_create = raw_co_create, 2555 .bdrv_co_create_opts = raw_co_create_opts, 2556 .bdrv_has_zero_init = bdrv_has_zero_init_1, 2557 .bdrv_co_block_status = raw_co_block_status, 2558 .bdrv_co_invalidate_cache = raw_co_invalidate_cache, 2559 .bdrv_co_pwrite_zeroes = raw_co_pwrite_zeroes, 2560 2561 .bdrv_co_preadv = raw_co_preadv, 2562 .bdrv_co_pwritev = raw_co_pwritev, 2563 .bdrv_aio_flush = raw_aio_flush, 2564 .bdrv_aio_pdiscard = raw_aio_pdiscard, 2565 .bdrv_co_copy_range_from = raw_co_copy_range_from, 2566 .bdrv_co_copy_range_to = raw_co_copy_range_to, 2567 .bdrv_refresh_limits = raw_refresh_limits, 2568 .bdrv_io_plug = raw_aio_plug, 2569 .bdrv_io_unplug = raw_aio_unplug, 2570 2571 .bdrv_truncate = raw_truncate, 2572 .bdrv_getlength = raw_getlength, 2573 .bdrv_get_info = raw_get_info, 2574 .bdrv_get_allocated_file_size 2575 = raw_get_allocated_file_size, 2576 .bdrv_check_perm = raw_check_perm, 2577 .bdrv_set_perm = raw_set_perm, 2578 .bdrv_abort_perm_update = raw_abort_perm_update, 2579 .create_opts = &raw_create_opts, 2580 }; 2581 2582 /***********************************************/ 2583 /* host device */ 2584 2585 #if defined(__APPLE__) && defined(__MACH__) 2586 static kern_return_t GetBSDPath(io_iterator_t mediaIterator, char *bsdPath, 2587 CFIndex maxPathSize, int flags); 2588 static char *FindEjectableOpticalMedia(io_iterator_t *mediaIterator) 2589 { 2590 kern_return_t kernResult = KERN_FAILURE; 2591 mach_port_t masterPort; 2592 CFMutableDictionaryRef classesToMatch; 2593 const char *matching_array[] = {kIODVDMediaClass, kIOCDMediaClass}; 2594 char *mediaType = NULL; 2595 2596 kernResult = IOMasterPort( MACH_PORT_NULL, &masterPort ); 2597 if ( KERN_SUCCESS != kernResult ) { 2598 printf( "IOMasterPort returned %d\n", kernResult ); 2599 } 2600 2601 int index; 2602 for (index = 0; index < ARRAY_SIZE(matching_array); index++) { 2603 classesToMatch = IOServiceMatching(matching_array[index]); 2604 if (classesToMatch == NULL) { 2605 error_report("IOServiceMatching returned NULL for %s", 2606 matching_array[index]); 2607 continue; 2608 } 2609 CFDictionarySetValue(classesToMatch, CFSTR(kIOMediaEjectableKey), 2610 kCFBooleanTrue); 2611 kernResult = IOServiceGetMatchingServices(masterPort, classesToMatch, 2612 mediaIterator); 2613 if (kernResult != KERN_SUCCESS) { 2614 error_report("Note: IOServiceGetMatchingServices returned %d", 2615 kernResult); 2616 continue; 2617 } 2618 2619 /* If a match was found, leave the loop */ 2620 if (*mediaIterator != 0) { 2621 DPRINTF("Matching using %s\n", matching_array[index]); 2622 mediaType = g_strdup(matching_array[index]); 2623 break; 2624 } 2625 } 2626 return mediaType; 2627 } 2628 2629 kern_return_t GetBSDPath(io_iterator_t mediaIterator, char *bsdPath, 2630 CFIndex maxPathSize, int flags) 2631 { 2632 io_object_t nextMedia; 2633 kern_return_t kernResult = KERN_FAILURE; 2634 *bsdPath = '\0'; 2635 nextMedia = IOIteratorNext( mediaIterator ); 2636 if ( nextMedia ) 2637 { 2638 CFTypeRef bsdPathAsCFString; 2639 bsdPathAsCFString = IORegistryEntryCreateCFProperty( nextMedia, CFSTR( kIOBSDNameKey ), kCFAllocatorDefault, 0 ); 2640 if ( bsdPathAsCFString ) { 2641 size_t devPathLength; 2642 strcpy( bsdPath, _PATH_DEV ); 2643 if (flags & BDRV_O_NOCACHE) { 2644 strcat(bsdPath, "r"); 2645 } 2646 devPathLength = strlen( bsdPath ); 2647 if ( CFStringGetCString( bsdPathAsCFString, bsdPath + devPathLength, maxPathSize - devPathLength, kCFStringEncodingASCII ) ) { 2648 kernResult = KERN_SUCCESS; 2649 } 2650 CFRelease( bsdPathAsCFString ); 2651 } 2652 IOObjectRelease( nextMedia ); 2653 } 2654 2655 return kernResult; 2656 } 2657 2658 /* Sets up a real cdrom for use in QEMU */ 2659 static bool setup_cdrom(char *bsd_path, Error **errp) 2660 { 2661 int index, num_of_test_partitions = 2, fd; 2662 char test_partition[MAXPATHLEN]; 2663 bool partition_found = false; 2664 2665 /* look for a working partition */ 2666 for (index = 0; index < num_of_test_partitions; index++) { 2667 snprintf(test_partition, sizeof(test_partition), "%ss%d", bsd_path, 2668 index); 2669 fd = qemu_open(test_partition, O_RDONLY | O_BINARY | O_LARGEFILE); 2670 if (fd >= 0) { 2671 partition_found = true; 2672 qemu_close(fd); 2673 break; 2674 } 2675 } 2676 2677 /* if a working partition on the device was not found */ 2678 if (partition_found == false) { 2679 error_setg(errp, "Failed to find a working partition on disc"); 2680 } else { 2681 DPRINTF("Using %s as optical disc\n", test_partition); 2682 pstrcpy(bsd_path, MAXPATHLEN, test_partition); 2683 } 2684 return partition_found; 2685 } 2686 2687 /* Prints directions on mounting and unmounting a device */ 2688 static void print_unmounting_directions(const char *file_name) 2689 { 2690 error_report("If device %s is mounted on the desktop, unmount" 2691 " it first before using it in QEMU", file_name); 2692 error_report("Command to unmount device: diskutil unmountDisk %s", 2693 file_name); 2694 error_report("Command to mount device: diskutil mountDisk %s", file_name); 2695 } 2696 2697 #endif /* defined(__APPLE__) && defined(__MACH__) */ 2698 2699 static int hdev_probe_device(const char *filename) 2700 { 2701 struct stat st; 2702 2703 /* allow a dedicated CD-ROM driver to match with a higher priority */ 2704 if (strstart(filename, "/dev/cdrom", NULL)) 2705 return 50; 2706 2707 if (stat(filename, &st) >= 0 && 2708 (S_ISCHR(st.st_mode) || S_ISBLK(st.st_mode))) { 2709 return 100; 2710 } 2711 2712 return 0; 2713 } 2714 2715 static int check_hdev_writable(BDRVRawState *s) 2716 { 2717 #if defined(BLKROGET) 2718 /* Linux block devices can be configured "read-only" using blockdev(8). 2719 * This is independent of device node permissions and therefore open(2) 2720 * with O_RDWR succeeds. Actual writes fail with EPERM. 2721 * 2722 * bdrv_open() is supposed to fail if the disk is read-only. Explicitly 2723 * check for read-only block devices so that Linux block devices behave 2724 * properly. 2725 */ 2726 struct stat st; 2727 int readonly = 0; 2728 2729 if (fstat(s->fd, &st)) { 2730 return -errno; 2731 } 2732 2733 if (!S_ISBLK(st.st_mode)) { 2734 return 0; 2735 } 2736 2737 if (ioctl(s->fd, BLKROGET, &readonly) < 0) { 2738 return -errno; 2739 } 2740 2741 if (readonly) { 2742 return -EACCES; 2743 } 2744 #endif /* defined(BLKROGET) */ 2745 return 0; 2746 } 2747 2748 static void hdev_parse_filename(const char *filename, QDict *options, 2749 Error **errp) 2750 { 2751 bdrv_parse_filename_strip_prefix(filename, "host_device:", options); 2752 } 2753 2754 static bool hdev_is_sg(BlockDriverState *bs) 2755 { 2756 2757 #if defined(__linux__) 2758 2759 BDRVRawState *s = bs->opaque; 2760 struct stat st; 2761 struct sg_scsi_id scsiid; 2762 int sg_version; 2763 int ret; 2764 2765 if (stat(bs->filename, &st) < 0 || !S_ISCHR(st.st_mode)) { 2766 return false; 2767 } 2768 2769 ret = ioctl(s->fd, SG_GET_VERSION_NUM, &sg_version); 2770 if (ret < 0) { 2771 return false; 2772 } 2773 2774 ret = ioctl(s->fd, SG_GET_SCSI_ID, &scsiid); 2775 if (ret >= 0) { 2776 DPRINTF("SG device found: type=%d, version=%d\n", 2777 scsiid.scsi_type, sg_version); 2778 return true; 2779 } 2780 2781 #endif 2782 2783 return false; 2784 } 2785 2786 static int hdev_open(BlockDriverState *bs, QDict *options, int flags, 2787 Error **errp) 2788 { 2789 BDRVRawState *s = bs->opaque; 2790 Error *local_err = NULL; 2791 int ret; 2792 2793 #if defined(__APPLE__) && defined(__MACH__) 2794 /* 2795 * Caution: while qdict_get_str() is fine, getting non-string types 2796 * would require more care. When @options come from -blockdev or 2797 * blockdev_add, its members are typed according to the QAPI 2798 * schema, but when they come from -drive, they're all QString. 2799 */ 2800 const char *filename = qdict_get_str(options, "filename"); 2801 char bsd_path[MAXPATHLEN] = ""; 2802 bool error_occurred = false; 2803 2804 /* If using a real cdrom */ 2805 if (strcmp(filename, "/dev/cdrom") == 0) { 2806 char *mediaType = NULL; 2807 kern_return_t ret_val; 2808 io_iterator_t mediaIterator = 0; 2809 2810 mediaType = FindEjectableOpticalMedia(&mediaIterator); 2811 if (mediaType == NULL) { 2812 error_setg(errp, "Please make sure your CD/DVD is in the optical" 2813 " drive"); 2814 error_occurred = true; 2815 goto hdev_open_Mac_error; 2816 } 2817 2818 ret_val = GetBSDPath(mediaIterator, bsd_path, sizeof(bsd_path), flags); 2819 if (ret_val != KERN_SUCCESS) { 2820 error_setg(errp, "Could not get BSD path for optical drive"); 2821 error_occurred = true; 2822 goto hdev_open_Mac_error; 2823 } 2824 2825 /* If a real optical drive was not found */ 2826 if (bsd_path[0] == '\0') { 2827 error_setg(errp, "Failed to obtain bsd path for optical drive"); 2828 error_occurred = true; 2829 goto hdev_open_Mac_error; 2830 } 2831 2832 /* If using a cdrom disc and finding a partition on the disc failed */ 2833 if (strncmp(mediaType, kIOCDMediaClass, 9) == 0 && 2834 setup_cdrom(bsd_path, errp) == false) { 2835 print_unmounting_directions(bsd_path); 2836 error_occurred = true; 2837 goto hdev_open_Mac_error; 2838 } 2839 2840 qdict_put_str(options, "filename", bsd_path); 2841 2842 hdev_open_Mac_error: 2843 g_free(mediaType); 2844 if (mediaIterator) { 2845 IOObjectRelease(mediaIterator); 2846 } 2847 if (error_occurred) { 2848 return -ENOENT; 2849 } 2850 } 2851 #endif /* defined(__APPLE__) && defined(__MACH__) */ 2852 2853 s->type = FTYPE_FILE; 2854 2855 ret = raw_open_common(bs, options, flags, 0, &local_err); 2856 if (ret < 0) { 2857 error_propagate(errp, local_err); 2858 #if defined(__APPLE__) && defined(__MACH__) 2859 if (*bsd_path) { 2860 filename = bsd_path; 2861 } 2862 /* if a physical device experienced an error while being opened */ 2863 if (strncmp(filename, "/dev/", 5) == 0) { 2864 print_unmounting_directions(filename); 2865 } 2866 #endif /* defined(__APPLE__) && defined(__MACH__) */ 2867 return ret; 2868 } 2869 2870 /* Since this does ioctl the device must be already opened */ 2871 bs->sg = hdev_is_sg(bs); 2872 2873 if (flags & BDRV_O_RDWR) { 2874 ret = check_hdev_writable(s); 2875 if (ret < 0) { 2876 raw_close(bs); 2877 error_setg_errno(errp, -ret, "The device is not writable"); 2878 return ret; 2879 } 2880 } 2881 2882 return ret; 2883 } 2884 2885 #if defined(__linux__) 2886 2887 static BlockAIOCB *hdev_aio_ioctl(BlockDriverState *bs, 2888 unsigned long int req, void *buf, 2889 BlockCompletionFunc *cb, void *opaque) 2890 { 2891 BDRVRawState *s = bs->opaque; 2892 RawPosixAIOData *acb; 2893 ThreadPool *pool; 2894 2895 if (fd_open(bs) < 0) 2896 return NULL; 2897 2898 if (req == SG_IO && s->pr_mgr) { 2899 struct sg_io_hdr *io_hdr = buf; 2900 if (io_hdr->cmdp[0] == PERSISTENT_RESERVE_OUT || 2901 io_hdr->cmdp[0] == PERSISTENT_RESERVE_IN) { 2902 return pr_manager_execute(s->pr_mgr, bdrv_get_aio_context(bs), 2903 s->fd, io_hdr, cb, opaque); 2904 } 2905 } 2906 2907 acb = g_new(RawPosixAIOData, 1); 2908 acb->bs = bs; 2909 acb->aio_type = QEMU_AIO_IOCTL; 2910 acb->aio_fildes = s->fd; 2911 acb->aio_offset = 0; 2912 acb->aio_ioctl_buf = buf; 2913 acb->aio_ioctl_cmd = req; 2914 pool = aio_get_thread_pool(bdrv_get_aio_context(bs)); 2915 return thread_pool_submit_aio(pool, aio_worker, acb, cb, opaque); 2916 } 2917 #endif /* linux */ 2918 2919 static int fd_open(BlockDriverState *bs) 2920 { 2921 BDRVRawState *s = bs->opaque; 2922 2923 /* this is just to ensure s->fd is sane (its called by io ops) */ 2924 if (s->fd >= 0) 2925 return 0; 2926 return -EIO; 2927 } 2928 2929 static coroutine_fn BlockAIOCB *hdev_aio_pdiscard(BlockDriverState *bs, 2930 int64_t offset, int bytes, 2931 BlockCompletionFunc *cb, void *opaque) 2932 { 2933 BDRVRawState *s = bs->opaque; 2934 2935 if (fd_open(bs) < 0) { 2936 return NULL; 2937 } 2938 return paio_submit(bs, s->fd, offset, NULL, bytes, 2939 cb, opaque, QEMU_AIO_DISCARD|QEMU_AIO_BLKDEV); 2940 } 2941 2942 static coroutine_fn int hdev_co_pwrite_zeroes(BlockDriverState *bs, 2943 int64_t offset, int bytes, BdrvRequestFlags flags) 2944 { 2945 BDRVRawState *s = bs->opaque; 2946 int rc; 2947 2948 rc = fd_open(bs); 2949 if (rc < 0) { 2950 return rc; 2951 } 2952 if (!(flags & BDRV_REQ_MAY_UNMAP)) { 2953 return paio_submit_co(bs, s->fd, offset, NULL, bytes, 2954 QEMU_AIO_WRITE_ZEROES|QEMU_AIO_BLKDEV); 2955 } else if (s->discard_zeroes) { 2956 return paio_submit_co(bs, s->fd, offset, NULL, bytes, 2957 QEMU_AIO_DISCARD|QEMU_AIO_BLKDEV); 2958 } 2959 return -ENOTSUP; 2960 } 2961 2962 static int coroutine_fn hdev_co_create_opts(const char *filename, QemuOpts *opts, 2963 Error **errp) 2964 { 2965 int fd; 2966 int ret = 0; 2967 struct stat stat_buf; 2968 int64_t total_size = 0; 2969 bool has_prefix; 2970 2971 /* This function is used by both protocol block drivers and therefore either 2972 * of these prefixes may be given. 2973 * The return value has to be stored somewhere, otherwise this is an error 2974 * due to -Werror=unused-value. */ 2975 has_prefix = 2976 strstart(filename, "host_device:", &filename) || 2977 strstart(filename, "host_cdrom:" , &filename); 2978 2979 (void)has_prefix; 2980 2981 ret = raw_normalize_devicepath(&filename); 2982 if (ret < 0) { 2983 error_setg_errno(errp, -ret, "Could not normalize device path"); 2984 return ret; 2985 } 2986 2987 /* Read out options */ 2988 total_size = ROUND_UP(qemu_opt_get_size_del(opts, BLOCK_OPT_SIZE, 0), 2989 BDRV_SECTOR_SIZE); 2990 2991 fd = qemu_open(filename, O_WRONLY | O_BINARY); 2992 if (fd < 0) { 2993 ret = -errno; 2994 error_setg_errno(errp, -ret, "Could not open device"); 2995 return ret; 2996 } 2997 2998 if (fstat(fd, &stat_buf) < 0) { 2999 ret = -errno; 3000 error_setg_errno(errp, -ret, "Could not stat device"); 3001 } else if (!S_ISBLK(stat_buf.st_mode) && !S_ISCHR(stat_buf.st_mode)) { 3002 error_setg(errp, 3003 "The given file is neither a block nor a character device"); 3004 ret = -ENODEV; 3005 } else if (lseek(fd, 0, SEEK_END) < total_size) { 3006 error_setg(errp, "Device is too small"); 3007 ret = -ENOSPC; 3008 } 3009 3010 if (!ret && total_size) { 3011 uint8_t buf[BDRV_SECTOR_SIZE] = { 0 }; 3012 int64_t zero_size = MIN(BDRV_SECTOR_SIZE, total_size); 3013 if (lseek(fd, 0, SEEK_SET) == -1) { 3014 ret = -errno; 3015 } else { 3016 ret = qemu_write_full(fd, buf, zero_size); 3017 ret = ret == zero_size ? 0 : -errno; 3018 } 3019 } 3020 qemu_close(fd); 3021 return ret; 3022 } 3023 3024 static BlockDriver bdrv_host_device = { 3025 .format_name = "host_device", 3026 .protocol_name = "host_device", 3027 .instance_size = sizeof(BDRVRawState), 3028 .bdrv_needs_filename = true, 3029 .bdrv_probe_device = hdev_probe_device, 3030 .bdrv_parse_filename = hdev_parse_filename, 3031 .bdrv_file_open = hdev_open, 3032 .bdrv_close = raw_close, 3033 .bdrv_reopen_prepare = raw_reopen_prepare, 3034 .bdrv_reopen_commit = raw_reopen_commit, 3035 .bdrv_reopen_abort = raw_reopen_abort, 3036 .bdrv_co_create_opts = hdev_co_create_opts, 3037 .create_opts = &raw_create_opts, 3038 .bdrv_co_invalidate_cache = raw_co_invalidate_cache, 3039 .bdrv_co_pwrite_zeroes = hdev_co_pwrite_zeroes, 3040 3041 .bdrv_co_preadv = raw_co_preadv, 3042 .bdrv_co_pwritev = raw_co_pwritev, 3043 .bdrv_aio_flush = raw_aio_flush, 3044 .bdrv_aio_pdiscard = hdev_aio_pdiscard, 3045 .bdrv_co_copy_range_from = raw_co_copy_range_from, 3046 .bdrv_co_copy_range_to = raw_co_copy_range_to, 3047 .bdrv_refresh_limits = raw_refresh_limits, 3048 .bdrv_io_plug = raw_aio_plug, 3049 .bdrv_io_unplug = raw_aio_unplug, 3050 3051 .bdrv_truncate = raw_truncate, 3052 .bdrv_getlength = raw_getlength, 3053 .bdrv_get_info = raw_get_info, 3054 .bdrv_get_allocated_file_size 3055 = raw_get_allocated_file_size, 3056 .bdrv_check_perm = raw_check_perm, 3057 .bdrv_set_perm = raw_set_perm, 3058 .bdrv_abort_perm_update = raw_abort_perm_update, 3059 .bdrv_probe_blocksizes = hdev_probe_blocksizes, 3060 .bdrv_probe_geometry = hdev_probe_geometry, 3061 3062 /* generic scsi device */ 3063 #ifdef __linux__ 3064 .bdrv_aio_ioctl = hdev_aio_ioctl, 3065 #endif 3066 }; 3067 3068 #if defined(__linux__) || defined(__FreeBSD__) || defined(__FreeBSD_kernel__) 3069 static void cdrom_parse_filename(const char *filename, QDict *options, 3070 Error **errp) 3071 { 3072 bdrv_parse_filename_strip_prefix(filename, "host_cdrom:", options); 3073 } 3074 #endif 3075 3076 #ifdef __linux__ 3077 static int cdrom_open(BlockDriverState *bs, QDict *options, int flags, 3078 Error **errp) 3079 { 3080 BDRVRawState *s = bs->opaque; 3081 3082 s->type = FTYPE_CD; 3083 3084 /* open will not fail even if no CD is inserted, so add O_NONBLOCK */ 3085 return raw_open_common(bs, options, flags, O_NONBLOCK, errp); 3086 } 3087 3088 static int cdrom_probe_device(const char *filename) 3089 { 3090 int fd, ret; 3091 int prio = 0; 3092 struct stat st; 3093 3094 fd = qemu_open(filename, O_RDONLY | O_NONBLOCK); 3095 if (fd < 0) { 3096 goto out; 3097 } 3098 ret = fstat(fd, &st); 3099 if (ret == -1 || !S_ISBLK(st.st_mode)) { 3100 goto outc; 3101 } 3102 3103 /* Attempt to detect via a CDROM specific ioctl */ 3104 ret = ioctl(fd, CDROM_DRIVE_STATUS, CDSL_CURRENT); 3105 if (ret >= 0) 3106 prio = 100; 3107 3108 outc: 3109 qemu_close(fd); 3110 out: 3111 return prio; 3112 } 3113 3114 static bool cdrom_is_inserted(BlockDriverState *bs) 3115 { 3116 BDRVRawState *s = bs->opaque; 3117 int ret; 3118 3119 ret = ioctl(s->fd, CDROM_DRIVE_STATUS, CDSL_CURRENT); 3120 return ret == CDS_DISC_OK; 3121 } 3122 3123 static void cdrom_eject(BlockDriverState *bs, bool eject_flag) 3124 { 3125 BDRVRawState *s = bs->opaque; 3126 3127 if (eject_flag) { 3128 if (ioctl(s->fd, CDROMEJECT, NULL) < 0) 3129 perror("CDROMEJECT"); 3130 } else { 3131 if (ioctl(s->fd, CDROMCLOSETRAY, NULL) < 0) 3132 perror("CDROMEJECT"); 3133 } 3134 } 3135 3136 static void cdrom_lock_medium(BlockDriverState *bs, bool locked) 3137 { 3138 BDRVRawState *s = bs->opaque; 3139 3140 if (ioctl(s->fd, CDROM_LOCKDOOR, locked) < 0) { 3141 /* 3142 * Note: an error can happen if the distribution automatically 3143 * mounts the CD-ROM 3144 */ 3145 /* perror("CDROM_LOCKDOOR"); */ 3146 } 3147 } 3148 3149 static BlockDriver bdrv_host_cdrom = { 3150 .format_name = "host_cdrom", 3151 .protocol_name = "host_cdrom", 3152 .instance_size = sizeof(BDRVRawState), 3153 .bdrv_needs_filename = true, 3154 .bdrv_probe_device = cdrom_probe_device, 3155 .bdrv_parse_filename = cdrom_parse_filename, 3156 .bdrv_file_open = cdrom_open, 3157 .bdrv_close = raw_close, 3158 .bdrv_reopen_prepare = raw_reopen_prepare, 3159 .bdrv_reopen_commit = raw_reopen_commit, 3160 .bdrv_reopen_abort = raw_reopen_abort, 3161 .bdrv_co_create_opts = hdev_co_create_opts, 3162 .create_opts = &raw_create_opts, 3163 .bdrv_co_invalidate_cache = raw_co_invalidate_cache, 3164 3165 3166 .bdrv_co_preadv = raw_co_preadv, 3167 .bdrv_co_pwritev = raw_co_pwritev, 3168 .bdrv_aio_flush = raw_aio_flush, 3169 .bdrv_refresh_limits = raw_refresh_limits, 3170 .bdrv_io_plug = raw_aio_plug, 3171 .bdrv_io_unplug = raw_aio_unplug, 3172 3173 .bdrv_truncate = raw_truncate, 3174 .bdrv_getlength = raw_getlength, 3175 .has_variable_length = true, 3176 .bdrv_get_allocated_file_size 3177 = raw_get_allocated_file_size, 3178 3179 /* removable device support */ 3180 .bdrv_is_inserted = cdrom_is_inserted, 3181 .bdrv_eject = cdrom_eject, 3182 .bdrv_lock_medium = cdrom_lock_medium, 3183 3184 /* generic scsi device */ 3185 .bdrv_aio_ioctl = hdev_aio_ioctl, 3186 }; 3187 #endif /* __linux__ */ 3188 3189 #if defined (__FreeBSD__) || defined(__FreeBSD_kernel__) 3190 static int cdrom_open(BlockDriverState *bs, QDict *options, int flags, 3191 Error **errp) 3192 { 3193 BDRVRawState *s = bs->opaque; 3194 Error *local_err = NULL; 3195 int ret; 3196 3197 s->type = FTYPE_CD; 3198 3199 ret = raw_open_common(bs, options, flags, 0, &local_err); 3200 if (ret) { 3201 error_propagate(errp, local_err); 3202 return ret; 3203 } 3204 3205 /* make sure the door isn't locked at this time */ 3206 ioctl(s->fd, CDIOCALLOW); 3207 return 0; 3208 } 3209 3210 static int cdrom_probe_device(const char *filename) 3211 { 3212 if (strstart(filename, "/dev/cd", NULL) || 3213 strstart(filename, "/dev/acd", NULL)) 3214 return 100; 3215 return 0; 3216 } 3217 3218 static int cdrom_reopen(BlockDriverState *bs) 3219 { 3220 BDRVRawState *s = bs->opaque; 3221 int fd; 3222 3223 /* 3224 * Force reread of possibly changed/newly loaded disc, 3225 * FreeBSD seems to not notice sometimes... 3226 */ 3227 if (s->fd >= 0) 3228 qemu_close(s->fd); 3229 fd = qemu_open(bs->filename, s->open_flags, 0644); 3230 if (fd < 0) { 3231 s->fd = -1; 3232 return -EIO; 3233 } 3234 s->fd = fd; 3235 3236 /* make sure the door isn't locked at this time */ 3237 ioctl(s->fd, CDIOCALLOW); 3238 return 0; 3239 } 3240 3241 static bool cdrom_is_inserted(BlockDriverState *bs) 3242 { 3243 return raw_getlength(bs) > 0; 3244 } 3245 3246 static void cdrom_eject(BlockDriverState *bs, bool eject_flag) 3247 { 3248 BDRVRawState *s = bs->opaque; 3249 3250 if (s->fd < 0) 3251 return; 3252 3253 (void) ioctl(s->fd, CDIOCALLOW); 3254 3255 if (eject_flag) { 3256 if (ioctl(s->fd, CDIOCEJECT) < 0) 3257 perror("CDIOCEJECT"); 3258 } else { 3259 if (ioctl(s->fd, CDIOCCLOSE) < 0) 3260 perror("CDIOCCLOSE"); 3261 } 3262 3263 cdrom_reopen(bs); 3264 } 3265 3266 static void cdrom_lock_medium(BlockDriverState *bs, bool locked) 3267 { 3268 BDRVRawState *s = bs->opaque; 3269 3270 if (s->fd < 0) 3271 return; 3272 if (ioctl(s->fd, (locked ? CDIOCPREVENT : CDIOCALLOW)) < 0) { 3273 /* 3274 * Note: an error can happen if the distribution automatically 3275 * mounts the CD-ROM 3276 */ 3277 /* perror("CDROM_LOCKDOOR"); */ 3278 } 3279 } 3280 3281 static BlockDriver bdrv_host_cdrom = { 3282 .format_name = "host_cdrom", 3283 .protocol_name = "host_cdrom", 3284 .instance_size = sizeof(BDRVRawState), 3285 .bdrv_needs_filename = true, 3286 .bdrv_probe_device = cdrom_probe_device, 3287 .bdrv_parse_filename = cdrom_parse_filename, 3288 .bdrv_file_open = cdrom_open, 3289 .bdrv_close = raw_close, 3290 .bdrv_reopen_prepare = raw_reopen_prepare, 3291 .bdrv_reopen_commit = raw_reopen_commit, 3292 .bdrv_reopen_abort = raw_reopen_abort, 3293 .bdrv_co_create_opts = hdev_co_create_opts, 3294 .create_opts = &raw_create_opts, 3295 3296 .bdrv_co_preadv = raw_co_preadv, 3297 .bdrv_co_pwritev = raw_co_pwritev, 3298 .bdrv_aio_flush = raw_aio_flush, 3299 .bdrv_refresh_limits = raw_refresh_limits, 3300 .bdrv_io_plug = raw_aio_plug, 3301 .bdrv_io_unplug = raw_aio_unplug, 3302 3303 .bdrv_truncate = raw_truncate, 3304 .bdrv_getlength = raw_getlength, 3305 .has_variable_length = true, 3306 .bdrv_get_allocated_file_size 3307 = raw_get_allocated_file_size, 3308 3309 /* removable device support */ 3310 .bdrv_is_inserted = cdrom_is_inserted, 3311 .bdrv_eject = cdrom_eject, 3312 .bdrv_lock_medium = cdrom_lock_medium, 3313 }; 3314 #endif /* __FreeBSD__ */ 3315 3316 static void bdrv_file_init(void) 3317 { 3318 /* 3319 * Register all the drivers. Note that order is important, the driver 3320 * registered last will get probed first. 3321 */ 3322 bdrv_register(&bdrv_file); 3323 bdrv_register(&bdrv_host_device); 3324 #ifdef __linux__ 3325 bdrv_register(&bdrv_host_cdrom); 3326 #endif 3327 #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__) 3328 bdrv_register(&bdrv_host_cdrom); 3329 #endif 3330 } 3331 3332 block_init(bdrv_file_init); 3333