xref: /openbmc/qemu/block/file-posix.c (revision e0c0965f)
1 /*
2  * Block driver for RAW files (posix)
3  *
4  * Copyright (c) 2006 Fabrice Bellard
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to deal
8  * in the Software without restriction, including without limitation the rights
9  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10  * copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22  * THE SOFTWARE.
23  */
24 
25 #include "qemu/osdep.h"
26 #include "qemu-common.h"
27 #include "qapi/error.h"
28 #include "qemu/cutils.h"
29 #include "qemu/error-report.h"
30 #include "block/block_int.h"
31 #include "qemu/module.h"
32 #include "qemu/option.h"
33 #include "trace.h"
34 #include "block/thread-pool.h"
35 #include "qemu/iov.h"
36 #include "block/raw-aio.h"
37 #include "qapi/qmp/qdict.h"
38 #include "qapi/qmp/qstring.h"
39 
40 #include "scsi/pr-manager.h"
41 #include "scsi/constants.h"
42 
43 #if defined(__APPLE__) && (__MACH__)
44 #include <paths.h>
45 #include <sys/param.h>
46 #include <IOKit/IOKitLib.h>
47 #include <IOKit/IOBSD.h>
48 #include <IOKit/storage/IOMediaBSDClient.h>
49 #include <IOKit/storage/IOMedia.h>
50 #include <IOKit/storage/IOCDMedia.h>
51 //#include <IOKit/storage/IOCDTypes.h>
52 #include <IOKit/storage/IODVDMedia.h>
53 #include <CoreFoundation/CoreFoundation.h>
54 #endif
55 
56 #ifdef __sun__
57 #define _POSIX_PTHREAD_SEMANTICS 1
58 #include <sys/dkio.h>
59 #endif
60 #ifdef __linux__
61 #include <sys/ioctl.h>
62 #include <sys/param.h>
63 #include <sys/syscall.h>
64 #include <linux/cdrom.h>
65 #include <linux/fd.h>
66 #include <linux/fs.h>
67 #include <linux/hdreg.h>
68 #include <scsi/sg.h>
69 #ifdef __s390__
70 #include <asm/dasd.h>
71 #endif
72 #ifndef FS_NOCOW_FL
73 #define FS_NOCOW_FL                     0x00800000 /* Do not cow file */
74 #endif
75 #endif
76 #if defined(CONFIG_FALLOCATE_PUNCH_HOLE) || defined(CONFIG_FALLOCATE_ZERO_RANGE)
77 #include <linux/falloc.h>
78 #endif
79 #if defined (__FreeBSD__) || defined(__FreeBSD_kernel__)
80 #include <sys/disk.h>
81 #include <sys/cdio.h>
82 #endif
83 
84 #ifdef __OpenBSD__
85 #include <sys/ioctl.h>
86 #include <sys/disklabel.h>
87 #include <sys/dkio.h>
88 #endif
89 
90 #ifdef __NetBSD__
91 #include <sys/ioctl.h>
92 #include <sys/disklabel.h>
93 #include <sys/dkio.h>
94 #include <sys/disk.h>
95 #endif
96 
97 #ifdef __DragonFly__
98 #include <sys/ioctl.h>
99 #include <sys/diskslice.h>
100 #endif
101 
102 #ifdef CONFIG_XFS
103 #include <xfs/xfs.h>
104 #endif
105 
106 #include "trace.h"
107 
108 /* OS X does not have O_DSYNC */
109 #ifndef O_DSYNC
110 #ifdef O_SYNC
111 #define O_DSYNC O_SYNC
112 #elif defined(O_FSYNC)
113 #define O_DSYNC O_FSYNC
114 #endif
115 #endif
116 
117 /* Approximate O_DIRECT with O_DSYNC if O_DIRECT isn't available */
118 #ifndef O_DIRECT
119 #define O_DIRECT O_DSYNC
120 #endif
121 
122 #define FTYPE_FILE   0
123 #define FTYPE_CD     1
124 
125 #define MAX_BLOCKSIZE	4096
126 
127 /* Posix file locking bytes. Libvirt takes byte 0, we start from higher bytes,
128  * leaving a few more bytes for its future use. */
129 #define RAW_LOCK_PERM_BASE             100
130 #define RAW_LOCK_SHARED_BASE           200
131 
132 typedef struct BDRVRawState {
133     int fd;
134     bool use_lock;
135     int type;
136     int open_flags;
137     size_t buf_align;
138 
139     /* The current permissions. */
140     uint64_t perm;
141     uint64_t shared_perm;
142 
143     /* The perms bits whose corresponding bytes are already locked in
144      * s->fd. */
145     uint64_t locked_perm;
146     uint64_t locked_shared_perm;
147 
148     int perm_change_fd;
149     int perm_change_flags;
150     BDRVReopenState *reopen_state;
151 
152 #ifdef CONFIG_XFS
153     bool is_xfs:1;
154 #endif
155     bool has_discard:1;
156     bool has_write_zeroes:1;
157     bool discard_zeroes:1;
158     bool use_linux_aio:1;
159     bool page_cache_inconsistent:1;
160     bool has_fallocate;
161     bool needs_alignment;
162     bool drop_cache;
163     bool check_cache_dropped;
164     struct {
165         uint64_t discard_nb_ok;
166         uint64_t discard_nb_failed;
167         uint64_t discard_bytes_ok;
168     } stats;
169 
170     PRManager *pr_mgr;
171 } BDRVRawState;
172 
173 typedef struct BDRVRawReopenState {
174     int fd;
175     int open_flags;
176     bool drop_cache;
177     bool check_cache_dropped;
178 } BDRVRawReopenState;
179 
180 static int fd_open(BlockDriverState *bs);
181 static int64_t raw_getlength(BlockDriverState *bs);
182 
183 typedef struct RawPosixAIOData {
184     BlockDriverState *bs;
185     int aio_type;
186     int aio_fildes;
187 
188     off_t aio_offset;
189     uint64_t aio_nbytes;
190 
191     union {
192         struct {
193             struct iovec *iov;
194             int niov;
195         } io;
196         struct {
197             uint64_t cmd;
198             void *buf;
199         } ioctl;
200         struct {
201             int aio_fd2;
202             off_t aio_offset2;
203         } copy_range;
204         struct {
205             PreallocMode prealloc;
206             Error **errp;
207         } truncate;
208     };
209 } RawPosixAIOData;
210 
211 #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
212 static int cdrom_reopen(BlockDriverState *bs);
213 #endif
214 
215 #if defined(__NetBSD__)
216 static int raw_normalize_devicepath(const char **filename, Error **errp)
217 {
218     static char namebuf[PATH_MAX];
219     const char *dp, *fname;
220     struct stat sb;
221 
222     fname = *filename;
223     dp = strrchr(fname, '/');
224     if (lstat(fname, &sb) < 0) {
225         error_setg_file_open(errp, errno, fname);
226         return -errno;
227     }
228 
229     if (!S_ISBLK(sb.st_mode)) {
230         return 0;
231     }
232 
233     if (dp == NULL) {
234         snprintf(namebuf, PATH_MAX, "r%s", fname);
235     } else {
236         snprintf(namebuf, PATH_MAX, "%.*s/r%s",
237             (int)(dp - fname), fname, dp + 1);
238     }
239     *filename = namebuf;
240     warn_report("%s is a block device, using %s", fname, *filename);
241 
242     return 0;
243 }
244 #else
245 static int raw_normalize_devicepath(const char **filename, Error **errp)
246 {
247     return 0;
248 }
249 #endif
250 
251 /*
252  * Get logical block size via ioctl. On success store it in @sector_size_p.
253  */
254 static int probe_logical_blocksize(int fd, unsigned int *sector_size_p)
255 {
256     unsigned int sector_size;
257     bool success = false;
258     int i;
259 
260     errno = ENOTSUP;
261     static const unsigned long ioctl_list[] = {
262 #ifdef BLKSSZGET
263         BLKSSZGET,
264 #endif
265 #ifdef DKIOCGETBLOCKSIZE
266         DKIOCGETBLOCKSIZE,
267 #endif
268 #ifdef DIOCGSECTORSIZE
269         DIOCGSECTORSIZE,
270 #endif
271     };
272 
273     /* Try a few ioctls to get the right size */
274     for (i = 0; i < (int)ARRAY_SIZE(ioctl_list); i++) {
275         if (ioctl(fd, ioctl_list[i], &sector_size) >= 0) {
276             *sector_size_p = sector_size;
277             success = true;
278         }
279     }
280 
281     return success ? 0 : -errno;
282 }
283 
284 /**
285  * Get physical block size of @fd.
286  * On success, store it in @blk_size and return 0.
287  * On failure, return -errno.
288  */
289 static int probe_physical_blocksize(int fd, unsigned int *blk_size)
290 {
291 #ifdef BLKPBSZGET
292     if (ioctl(fd, BLKPBSZGET, blk_size) < 0) {
293         return -errno;
294     }
295     return 0;
296 #else
297     return -ENOTSUP;
298 #endif
299 }
300 
301 /* Check if read is allowed with given memory buffer and length.
302  *
303  * This function is used to check O_DIRECT memory buffer and request alignment.
304  */
305 static bool raw_is_io_aligned(int fd, void *buf, size_t len)
306 {
307     ssize_t ret = pread(fd, buf, len, 0);
308 
309     if (ret >= 0) {
310         return true;
311     }
312 
313 #ifdef __linux__
314     /* The Linux kernel returns EINVAL for misaligned O_DIRECT reads.  Ignore
315      * other errors (e.g. real I/O error), which could happen on a failed
316      * drive, since we only care about probing alignment.
317      */
318     if (errno != EINVAL) {
319         return true;
320     }
321 #endif
322 
323     return false;
324 }
325 
326 static void raw_probe_alignment(BlockDriverState *bs, int fd, Error **errp)
327 {
328     BDRVRawState *s = bs->opaque;
329     char *buf;
330     size_t max_align = MAX(MAX_BLOCKSIZE, qemu_real_host_page_size);
331     size_t alignments[] = {1, 512, 1024, 2048, 4096};
332 
333     /* For SCSI generic devices the alignment is not really used.
334        With buffered I/O, we don't have any restrictions. */
335     if (bdrv_is_sg(bs) || !s->needs_alignment) {
336         bs->bl.request_alignment = 1;
337         s->buf_align = 1;
338         return;
339     }
340 
341     bs->bl.request_alignment = 0;
342     s->buf_align = 0;
343     /* Let's try to use the logical blocksize for the alignment. */
344     if (probe_logical_blocksize(fd, &bs->bl.request_alignment) < 0) {
345         bs->bl.request_alignment = 0;
346     }
347 #ifdef CONFIG_XFS
348     if (s->is_xfs) {
349         struct dioattr da;
350         if (xfsctl(NULL, fd, XFS_IOC_DIOINFO, &da) >= 0) {
351             bs->bl.request_alignment = da.d_miniosz;
352             /* The kernel returns wrong information for d_mem */
353             /* s->buf_align = da.d_mem; */
354         }
355     }
356 #endif
357 
358     /*
359      * If we could not get the sizes so far, we can only guess them. First try
360      * to detect request alignment, since it is more likely to succeed. Then
361      * try to detect buf_align, which cannot be detected in some cases (e.g.
362      * Gluster). If buf_align cannot be detected, we fallback to the value of
363      * request_alignment.
364      */
365 
366     if (!bs->bl.request_alignment) {
367         int i;
368         size_t align;
369         buf = qemu_memalign(max_align, max_align);
370         for (i = 0; i < ARRAY_SIZE(alignments); i++) {
371             align = alignments[i];
372             if (raw_is_io_aligned(fd, buf, align)) {
373                 /* Fallback to safe value. */
374                 bs->bl.request_alignment = (align != 1) ? align : max_align;
375                 break;
376             }
377         }
378         qemu_vfree(buf);
379     }
380 
381     if (!s->buf_align) {
382         int i;
383         size_t align;
384         buf = qemu_memalign(max_align, 2 * max_align);
385         for (i = 0; i < ARRAY_SIZE(alignments); i++) {
386             align = alignments[i];
387             if (raw_is_io_aligned(fd, buf + align, max_align)) {
388                 /* Fallback to request_alignment. */
389                 s->buf_align = (align != 1) ? align : bs->bl.request_alignment;
390                 break;
391             }
392         }
393         qemu_vfree(buf);
394     }
395 
396     if (!s->buf_align || !bs->bl.request_alignment) {
397         error_setg(errp, "Could not find working O_DIRECT alignment");
398         error_append_hint(errp, "Try cache.direct=off\n");
399     }
400 }
401 
402 static void raw_parse_flags(int bdrv_flags, int *open_flags, bool has_writers)
403 {
404     bool read_write = false;
405     assert(open_flags != NULL);
406 
407     *open_flags |= O_BINARY;
408     *open_flags &= ~O_ACCMODE;
409 
410     if (bdrv_flags & BDRV_O_AUTO_RDONLY) {
411         read_write = has_writers;
412     } else if (bdrv_flags & BDRV_O_RDWR) {
413         read_write = true;
414     }
415 
416     if (read_write) {
417         *open_flags |= O_RDWR;
418     } else {
419         *open_flags |= O_RDONLY;
420     }
421 
422     /* Use O_DSYNC for write-through caching, no flags for write-back caching,
423      * and O_DIRECT for no caching. */
424     if ((bdrv_flags & BDRV_O_NOCACHE)) {
425         *open_flags |= O_DIRECT;
426     }
427 }
428 
429 static void raw_parse_filename(const char *filename, QDict *options,
430                                Error **errp)
431 {
432     bdrv_parse_filename_strip_prefix(filename, "file:", options);
433 }
434 
435 static QemuOptsList raw_runtime_opts = {
436     .name = "raw",
437     .head = QTAILQ_HEAD_INITIALIZER(raw_runtime_opts.head),
438     .desc = {
439         {
440             .name = "filename",
441             .type = QEMU_OPT_STRING,
442             .help = "File name of the image",
443         },
444         {
445             .name = "aio",
446             .type = QEMU_OPT_STRING,
447             .help = "host AIO implementation (threads, native)",
448         },
449         {
450             .name = "locking",
451             .type = QEMU_OPT_STRING,
452             .help = "file locking mode (on/off/auto, default: auto)",
453         },
454         {
455             .name = "pr-manager",
456             .type = QEMU_OPT_STRING,
457             .help = "id of persistent reservation manager object (default: none)",
458         },
459 #if defined(__linux__)
460         {
461             .name = "drop-cache",
462             .type = QEMU_OPT_BOOL,
463             .help = "invalidate page cache during live migration (default: on)",
464         },
465 #endif
466         {
467             .name = "x-check-cache-dropped",
468             .type = QEMU_OPT_BOOL,
469             .help = "check that page cache was dropped on live migration (default: off)"
470         },
471         { /* end of list */ }
472     },
473 };
474 
475 static const char *const mutable_opts[] = { "x-check-cache-dropped", NULL };
476 
477 static int raw_open_common(BlockDriverState *bs, QDict *options,
478                            int bdrv_flags, int open_flags,
479                            bool device, Error **errp)
480 {
481     BDRVRawState *s = bs->opaque;
482     QemuOpts *opts;
483     Error *local_err = NULL;
484     const char *filename = NULL;
485     const char *str;
486     BlockdevAioOptions aio, aio_default;
487     int fd, ret;
488     struct stat st;
489     OnOffAuto locking;
490 
491     opts = qemu_opts_create(&raw_runtime_opts, NULL, 0, &error_abort);
492     qemu_opts_absorb_qdict(opts, options, &local_err);
493     if (local_err) {
494         error_propagate(errp, local_err);
495         ret = -EINVAL;
496         goto fail;
497     }
498 
499     filename = qemu_opt_get(opts, "filename");
500 
501     ret = raw_normalize_devicepath(&filename, errp);
502     if (ret != 0) {
503         goto fail;
504     }
505 
506     aio_default = (bdrv_flags & BDRV_O_NATIVE_AIO)
507                   ? BLOCKDEV_AIO_OPTIONS_NATIVE
508                   : BLOCKDEV_AIO_OPTIONS_THREADS;
509     aio = qapi_enum_parse(&BlockdevAioOptions_lookup,
510                           qemu_opt_get(opts, "aio"),
511                           aio_default, &local_err);
512     if (local_err) {
513         error_propagate(errp, local_err);
514         ret = -EINVAL;
515         goto fail;
516     }
517     s->use_linux_aio = (aio == BLOCKDEV_AIO_OPTIONS_NATIVE);
518 
519     locking = qapi_enum_parse(&OnOffAuto_lookup,
520                               qemu_opt_get(opts, "locking"),
521                               ON_OFF_AUTO_AUTO, &local_err);
522     if (local_err) {
523         error_propagate(errp, local_err);
524         ret = -EINVAL;
525         goto fail;
526     }
527     switch (locking) {
528     case ON_OFF_AUTO_ON:
529         s->use_lock = true;
530         if (!qemu_has_ofd_lock()) {
531             warn_report("File lock requested but OFD locking syscall is "
532                         "unavailable, falling back to POSIX file locks");
533             error_printf("Due to the implementation, locks can be lost "
534                          "unexpectedly.\n");
535         }
536         break;
537     case ON_OFF_AUTO_OFF:
538         s->use_lock = false;
539         break;
540     case ON_OFF_AUTO_AUTO:
541         s->use_lock = qemu_has_ofd_lock();
542         break;
543     default:
544         abort();
545     }
546 
547     str = qemu_opt_get(opts, "pr-manager");
548     if (str) {
549         s->pr_mgr = pr_manager_lookup(str, &local_err);
550         if (local_err) {
551             error_propagate(errp, local_err);
552             ret = -EINVAL;
553             goto fail;
554         }
555     }
556 
557     s->drop_cache = qemu_opt_get_bool(opts, "drop-cache", true);
558     s->check_cache_dropped = qemu_opt_get_bool(opts, "x-check-cache-dropped",
559                                                false);
560 
561     s->open_flags = open_flags;
562     raw_parse_flags(bdrv_flags, &s->open_flags, false);
563 
564     s->fd = -1;
565     fd = qemu_open(filename, s->open_flags, 0644);
566     ret = fd < 0 ? -errno : 0;
567 
568     if (ret < 0) {
569         error_setg_file_open(errp, -ret, filename);
570         if (ret == -EROFS) {
571             ret = -EACCES;
572         }
573         goto fail;
574     }
575     s->fd = fd;
576 
577     s->perm = 0;
578     s->shared_perm = BLK_PERM_ALL;
579 
580 #ifdef CONFIG_LINUX_AIO
581      /* Currently Linux does AIO only for files opened with O_DIRECT */
582     if (s->use_linux_aio) {
583         if (!(s->open_flags & O_DIRECT)) {
584             error_setg(errp, "aio=native was specified, but it requires "
585                              "cache.direct=on, which was not specified.");
586             ret = -EINVAL;
587             goto fail;
588         }
589         if (!aio_setup_linux_aio(bdrv_get_aio_context(bs), errp)) {
590             error_prepend(errp, "Unable to use native AIO: ");
591             goto fail;
592         }
593     }
594 #else
595     if (s->use_linux_aio) {
596         error_setg(errp, "aio=native was specified, but is not supported "
597                          "in this build.");
598         ret = -EINVAL;
599         goto fail;
600     }
601 #endif /* !defined(CONFIG_LINUX_AIO) */
602 
603     s->has_discard = true;
604     s->has_write_zeroes = true;
605     if ((bs->open_flags & BDRV_O_NOCACHE) != 0) {
606         s->needs_alignment = true;
607     }
608 
609     if (fstat(s->fd, &st) < 0) {
610         ret = -errno;
611         error_setg_errno(errp, errno, "Could not stat file");
612         goto fail;
613     }
614 
615     if (!device) {
616         if (S_ISBLK(st.st_mode)) {
617             warn_report("Opening a block device as a file using the '%s' "
618                         "driver is deprecated", bs->drv->format_name);
619         } else if (S_ISCHR(st.st_mode)) {
620             warn_report("Opening a character device as a file using the '%s' "
621                         "driver is deprecated", bs->drv->format_name);
622         } else if (!S_ISREG(st.st_mode)) {
623             error_setg(errp, "A regular file was expected by the '%s' driver, "
624                        "but something else was given", bs->drv->format_name);
625             ret = -EINVAL;
626             goto fail;
627         } else {
628             s->discard_zeroes = true;
629             s->has_fallocate = true;
630         }
631     } else {
632         if (!(S_ISCHR(st.st_mode) || S_ISBLK(st.st_mode))) {
633             error_setg(errp, "'%s' driver expects either "
634                        "a character or block device", bs->drv->format_name);
635             ret = -EINVAL;
636             goto fail;
637         }
638     }
639 
640     if (S_ISBLK(st.st_mode)) {
641 #ifdef BLKDISCARDZEROES
642         unsigned int arg;
643         if (ioctl(s->fd, BLKDISCARDZEROES, &arg) == 0 && arg) {
644             s->discard_zeroes = true;
645         }
646 #endif
647 #ifdef __linux__
648         /* On Linux 3.10, BLKDISCARD leaves stale data in the page cache.  Do
649          * not rely on the contents of discarded blocks unless using O_DIRECT.
650          * Same for BLKZEROOUT.
651          */
652         if (!(bs->open_flags & BDRV_O_NOCACHE)) {
653             s->discard_zeroes = false;
654             s->has_write_zeroes = false;
655         }
656 #endif
657     }
658 #ifdef __FreeBSD__
659     if (S_ISCHR(st.st_mode)) {
660         /*
661          * The file is a char device (disk), which on FreeBSD isn't behind
662          * a pager, so force all requests to be aligned. This is needed
663          * so QEMU makes sure all IO operations on the device are aligned
664          * to sector size, or else FreeBSD will reject them with EINVAL.
665          */
666         s->needs_alignment = true;
667     }
668 #endif
669 
670 #ifdef CONFIG_XFS
671     if (platform_test_xfs_fd(s->fd)) {
672         s->is_xfs = true;
673     }
674 #endif
675 
676     bs->supported_zero_flags = BDRV_REQ_MAY_UNMAP | BDRV_REQ_NO_FALLBACK;
677     ret = 0;
678 fail:
679     if (filename && (bdrv_flags & BDRV_O_TEMPORARY)) {
680         unlink(filename);
681     }
682     qemu_opts_del(opts);
683     return ret;
684 }
685 
686 static int raw_open(BlockDriverState *bs, QDict *options, int flags,
687                     Error **errp)
688 {
689     BDRVRawState *s = bs->opaque;
690 
691     s->type = FTYPE_FILE;
692     return raw_open_common(bs, options, flags, 0, false, errp);
693 }
694 
695 typedef enum {
696     RAW_PL_PREPARE,
697     RAW_PL_COMMIT,
698     RAW_PL_ABORT,
699 } RawPermLockOp;
700 
701 #define PERM_FOREACH(i) \
702     for ((i) = 0; (1ULL << (i)) <= BLK_PERM_ALL; i++)
703 
704 /* Lock bytes indicated by @perm_lock_bits and @shared_perm_lock_bits in the
705  * file; if @unlock == true, also unlock the unneeded bytes.
706  * @shared_perm_lock_bits is the mask of all permissions that are NOT shared.
707  */
708 static int raw_apply_lock_bytes(BDRVRawState *s, int fd,
709                                 uint64_t perm_lock_bits,
710                                 uint64_t shared_perm_lock_bits,
711                                 bool unlock, Error **errp)
712 {
713     int ret;
714     int i;
715     uint64_t locked_perm, locked_shared_perm;
716 
717     if (s) {
718         locked_perm = s->locked_perm;
719         locked_shared_perm = s->locked_shared_perm;
720     } else {
721         /*
722          * We don't have the previous bits, just lock/unlock for each of the
723          * requested bits.
724          */
725         if (unlock) {
726             locked_perm = BLK_PERM_ALL;
727             locked_shared_perm = BLK_PERM_ALL;
728         } else {
729             locked_perm = 0;
730             locked_shared_perm = 0;
731         }
732     }
733 
734     PERM_FOREACH(i) {
735         int off = RAW_LOCK_PERM_BASE + i;
736         uint64_t bit = (1ULL << i);
737         if ((perm_lock_bits & bit) && !(locked_perm & bit)) {
738             ret = qemu_lock_fd(fd, off, 1, false);
739             if (ret) {
740                 error_setg(errp, "Failed to lock byte %d", off);
741                 return ret;
742             } else if (s) {
743                 s->locked_perm |= bit;
744             }
745         } else if (unlock && (locked_perm & bit) && !(perm_lock_bits & bit)) {
746             ret = qemu_unlock_fd(fd, off, 1);
747             if (ret) {
748                 error_setg(errp, "Failed to unlock byte %d", off);
749                 return ret;
750             } else if (s) {
751                 s->locked_perm &= ~bit;
752             }
753         }
754     }
755     PERM_FOREACH(i) {
756         int off = RAW_LOCK_SHARED_BASE + i;
757         uint64_t bit = (1ULL << i);
758         if ((shared_perm_lock_bits & bit) && !(locked_shared_perm & bit)) {
759             ret = qemu_lock_fd(fd, off, 1, false);
760             if (ret) {
761                 error_setg(errp, "Failed to lock byte %d", off);
762                 return ret;
763             } else if (s) {
764                 s->locked_shared_perm |= bit;
765             }
766         } else if (unlock && (locked_shared_perm & bit) &&
767                    !(shared_perm_lock_bits & bit)) {
768             ret = qemu_unlock_fd(fd, off, 1);
769             if (ret) {
770                 error_setg(errp, "Failed to unlock byte %d", off);
771                 return ret;
772             } else if (s) {
773                 s->locked_shared_perm &= ~bit;
774             }
775         }
776     }
777     return 0;
778 }
779 
780 /* Check "unshared" bytes implied by @perm and ~@shared_perm in the file. */
781 static int raw_check_lock_bytes(int fd, uint64_t perm, uint64_t shared_perm,
782                                 Error **errp)
783 {
784     int ret;
785     int i;
786 
787     PERM_FOREACH(i) {
788         int off = RAW_LOCK_SHARED_BASE + i;
789         uint64_t p = 1ULL << i;
790         if (perm & p) {
791             ret = qemu_lock_fd_test(fd, off, 1, true);
792             if (ret) {
793                 char *perm_name = bdrv_perm_names(p);
794                 error_setg(errp,
795                            "Failed to get \"%s\" lock",
796                            perm_name);
797                 g_free(perm_name);
798                 return ret;
799             }
800         }
801     }
802     PERM_FOREACH(i) {
803         int off = RAW_LOCK_PERM_BASE + i;
804         uint64_t p = 1ULL << i;
805         if (!(shared_perm & p)) {
806             ret = qemu_lock_fd_test(fd, off, 1, true);
807             if (ret) {
808                 char *perm_name = bdrv_perm_names(p);
809                 error_setg(errp,
810                            "Failed to get shared \"%s\" lock",
811                            perm_name);
812                 g_free(perm_name);
813                 return ret;
814             }
815         }
816     }
817     return 0;
818 }
819 
820 static int raw_handle_perm_lock(BlockDriverState *bs,
821                                 RawPermLockOp op,
822                                 uint64_t new_perm, uint64_t new_shared,
823                                 Error **errp)
824 {
825     BDRVRawState *s = bs->opaque;
826     int ret = 0;
827     Error *local_err = NULL;
828 
829     if (!s->use_lock) {
830         return 0;
831     }
832 
833     if (bdrv_get_flags(bs) & BDRV_O_INACTIVE) {
834         return 0;
835     }
836 
837     switch (op) {
838     case RAW_PL_PREPARE:
839         if ((s->perm | new_perm) == s->perm &&
840             (s->shared_perm & new_shared) == s->shared_perm)
841         {
842             /*
843              * We are going to unlock bytes, it should not fail. If it fail due
844              * to some fs-dependent permission-unrelated reasons (which occurs
845              * sometimes on NFS and leads to abort in bdrv_replace_child) we
846              * can't prevent such errors by any check here. And we ignore them
847              * anyway in ABORT and COMMIT.
848              */
849             return 0;
850         }
851         ret = raw_apply_lock_bytes(s, s->fd, s->perm | new_perm,
852                                    ~s->shared_perm | ~new_shared,
853                                    false, errp);
854         if (!ret) {
855             ret = raw_check_lock_bytes(s->fd, new_perm, new_shared, errp);
856             if (!ret) {
857                 return 0;
858             }
859             error_append_hint(errp,
860                               "Is another process using the image [%s]?\n",
861                               bs->filename);
862         }
863         op = RAW_PL_ABORT;
864         /* fall through to unlock bytes. */
865     case RAW_PL_ABORT:
866         raw_apply_lock_bytes(s, s->fd, s->perm, ~s->shared_perm,
867                              true, &local_err);
868         if (local_err) {
869             /* Theoretically the above call only unlocks bytes and it cannot
870              * fail. Something weird happened, report it.
871              */
872             warn_report_err(local_err);
873         }
874         break;
875     case RAW_PL_COMMIT:
876         raw_apply_lock_bytes(s, s->fd, new_perm, ~new_shared,
877                              true, &local_err);
878         if (local_err) {
879             /* Theoretically the above call only unlocks bytes and it cannot
880              * fail. Something weird happened, report it.
881              */
882             warn_report_err(local_err);
883         }
884         break;
885     }
886     return ret;
887 }
888 
889 static int raw_reconfigure_getfd(BlockDriverState *bs, int flags,
890                                  int *open_flags, uint64_t perm, bool force_dup,
891                                  Error **errp)
892 {
893     BDRVRawState *s = bs->opaque;
894     int fd = -1;
895     int ret;
896     bool has_writers = perm &
897         (BLK_PERM_WRITE | BLK_PERM_WRITE_UNCHANGED | BLK_PERM_RESIZE);
898     int fcntl_flags = O_APPEND | O_NONBLOCK;
899 #ifdef O_NOATIME
900     fcntl_flags |= O_NOATIME;
901 #endif
902 
903     *open_flags = 0;
904     if (s->type == FTYPE_CD) {
905         *open_flags |= O_NONBLOCK;
906     }
907 
908     raw_parse_flags(flags, open_flags, has_writers);
909 
910 #ifdef O_ASYNC
911     /* Not all operating systems have O_ASYNC, and those that don't
912      * will not let us track the state into rs->open_flags (typically
913      * you achieve the same effect with an ioctl, for example I_SETSIG
914      * on Solaris). But we do not use O_ASYNC, so that's fine.
915      */
916     assert((s->open_flags & O_ASYNC) == 0);
917 #endif
918 
919     if (!force_dup && *open_flags == s->open_flags) {
920         /* We're lucky, the existing fd is fine */
921         return s->fd;
922     }
923 
924     if ((*open_flags & ~fcntl_flags) == (s->open_flags & ~fcntl_flags)) {
925         /* dup the original fd */
926         fd = qemu_dup(s->fd);
927         if (fd >= 0) {
928             ret = fcntl_setfl(fd, *open_flags);
929             if (ret) {
930                 qemu_close(fd);
931                 fd = -1;
932             }
933         }
934     }
935 
936     /* If we cannot use fcntl, or fcntl failed, fall back to qemu_open() */
937     if (fd == -1) {
938         const char *normalized_filename = bs->filename;
939         ret = raw_normalize_devicepath(&normalized_filename, errp);
940         if (ret >= 0) {
941             assert(!(*open_flags & O_CREAT));
942             fd = qemu_open(normalized_filename, *open_flags);
943             if (fd == -1) {
944                 error_setg_errno(errp, errno, "Could not reopen file");
945                 return -1;
946             }
947         }
948     }
949 
950     return fd;
951 }
952 
953 static int raw_reopen_prepare(BDRVReopenState *state,
954                               BlockReopenQueue *queue, Error **errp)
955 {
956     BDRVRawState *s;
957     BDRVRawReopenState *rs;
958     QemuOpts *opts;
959     int ret;
960     Error *local_err = NULL;
961 
962     assert(state != NULL);
963     assert(state->bs != NULL);
964 
965     s = state->bs->opaque;
966 
967     state->opaque = g_new0(BDRVRawReopenState, 1);
968     rs = state->opaque;
969 
970     /* Handle options changes */
971     opts = qemu_opts_create(&raw_runtime_opts, NULL, 0, &error_abort);
972     qemu_opts_absorb_qdict(opts, state->options, &local_err);
973     if (local_err) {
974         error_propagate(errp, local_err);
975         ret = -EINVAL;
976         goto out;
977     }
978 
979     rs->drop_cache = qemu_opt_get_bool_del(opts, "drop-cache", true);
980     rs->check_cache_dropped =
981         qemu_opt_get_bool_del(opts, "x-check-cache-dropped", false);
982 
983     /* This driver's reopen function doesn't currently allow changing
984      * other options, so let's put them back in the original QDict and
985      * bdrv_reopen_prepare() will detect changes and complain. */
986     qemu_opts_to_qdict(opts, state->options);
987 
988     rs->fd = raw_reconfigure_getfd(state->bs, state->flags, &rs->open_flags,
989                                    state->perm, true, &local_err);
990     if (local_err) {
991         error_propagate(errp, local_err);
992         ret = -1;
993         goto out;
994     }
995 
996     /* Fail already reopen_prepare() if we can't get a working O_DIRECT
997      * alignment with the new fd. */
998     if (rs->fd != -1) {
999         raw_probe_alignment(state->bs, rs->fd, &local_err);
1000         if (local_err) {
1001             error_propagate(errp, local_err);
1002             ret = -EINVAL;
1003             goto out_fd;
1004         }
1005     }
1006 
1007     s->reopen_state = state;
1008     ret = 0;
1009 out_fd:
1010     if (ret < 0) {
1011         qemu_close(rs->fd);
1012         rs->fd = -1;
1013     }
1014 out:
1015     qemu_opts_del(opts);
1016     return ret;
1017 }
1018 
1019 static void raw_reopen_commit(BDRVReopenState *state)
1020 {
1021     BDRVRawReopenState *rs = state->opaque;
1022     BDRVRawState *s = state->bs->opaque;
1023 
1024     s->drop_cache = rs->drop_cache;
1025     s->check_cache_dropped = rs->check_cache_dropped;
1026     s->open_flags = rs->open_flags;
1027 
1028     qemu_close(s->fd);
1029     s->fd = rs->fd;
1030 
1031     g_free(state->opaque);
1032     state->opaque = NULL;
1033 
1034     assert(s->reopen_state == state);
1035     s->reopen_state = NULL;
1036 }
1037 
1038 
1039 static void raw_reopen_abort(BDRVReopenState *state)
1040 {
1041     BDRVRawReopenState *rs = state->opaque;
1042     BDRVRawState *s = state->bs->opaque;
1043 
1044      /* nothing to do if NULL, we didn't get far enough */
1045     if (rs == NULL) {
1046         return;
1047     }
1048 
1049     if (rs->fd >= 0) {
1050         qemu_close(rs->fd);
1051         rs->fd = -1;
1052     }
1053     g_free(state->opaque);
1054     state->opaque = NULL;
1055 
1056     assert(s->reopen_state == state);
1057     s->reopen_state = NULL;
1058 }
1059 
1060 static int sg_get_max_transfer_length(int fd)
1061 {
1062 #ifdef BLKSECTGET
1063     int max_bytes = 0;
1064 
1065     if (ioctl(fd, BLKSECTGET, &max_bytes) == 0) {
1066         return max_bytes;
1067     } else {
1068         return -errno;
1069     }
1070 #else
1071     return -ENOSYS;
1072 #endif
1073 }
1074 
1075 static int sg_get_max_segments(int fd)
1076 {
1077 #ifdef CONFIG_LINUX
1078     char buf[32];
1079     const char *end;
1080     char *sysfspath = NULL;
1081     int ret;
1082     int sysfd = -1;
1083     long max_segments;
1084     struct stat st;
1085 
1086     if (fstat(fd, &st)) {
1087         ret = -errno;
1088         goto out;
1089     }
1090 
1091     sysfspath = g_strdup_printf("/sys/dev/block/%u:%u/queue/max_segments",
1092                                 major(st.st_rdev), minor(st.st_rdev));
1093     sysfd = open(sysfspath, O_RDONLY);
1094     if (sysfd == -1) {
1095         ret = -errno;
1096         goto out;
1097     }
1098     do {
1099         ret = read(sysfd, buf, sizeof(buf) - 1);
1100     } while (ret == -1 && errno == EINTR);
1101     if (ret < 0) {
1102         ret = -errno;
1103         goto out;
1104     } else if (ret == 0) {
1105         ret = -EIO;
1106         goto out;
1107     }
1108     buf[ret] = 0;
1109     /* The file is ended with '\n', pass 'end' to accept that. */
1110     ret = qemu_strtol(buf, &end, 10, &max_segments);
1111     if (ret == 0 && end && *end == '\n') {
1112         ret = max_segments;
1113     }
1114 
1115 out:
1116     if (sysfd != -1) {
1117         close(sysfd);
1118     }
1119     g_free(sysfspath);
1120     return ret;
1121 #else
1122     return -ENOTSUP;
1123 #endif
1124 }
1125 
1126 static void raw_refresh_limits(BlockDriverState *bs, Error **errp)
1127 {
1128     BDRVRawState *s = bs->opaque;
1129 
1130     if (bs->sg) {
1131         int ret = sg_get_max_transfer_length(s->fd);
1132 
1133         if (ret > 0 && ret <= BDRV_REQUEST_MAX_BYTES) {
1134             bs->bl.max_transfer = pow2floor(ret);
1135         }
1136 
1137         ret = sg_get_max_segments(s->fd);
1138         if (ret > 0) {
1139             bs->bl.max_transfer = MIN(bs->bl.max_transfer,
1140                                       ret * qemu_real_host_page_size);
1141         }
1142     }
1143 
1144     raw_probe_alignment(bs, s->fd, errp);
1145     bs->bl.min_mem_alignment = s->buf_align;
1146     bs->bl.opt_mem_alignment = MAX(s->buf_align, qemu_real_host_page_size);
1147 }
1148 
1149 static int check_for_dasd(int fd)
1150 {
1151 #ifdef BIODASDINFO2
1152     struct dasd_information2_t info = {0};
1153 
1154     return ioctl(fd, BIODASDINFO2, &info);
1155 #else
1156     return -1;
1157 #endif
1158 }
1159 
1160 /**
1161  * Try to get @bs's logical and physical block size.
1162  * On success, store them in @bsz and return zero.
1163  * On failure, return negative errno.
1164  */
1165 static int hdev_probe_blocksizes(BlockDriverState *bs, BlockSizes *bsz)
1166 {
1167     BDRVRawState *s = bs->opaque;
1168     int ret;
1169 
1170     /* If DASD, get blocksizes */
1171     if (check_for_dasd(s->fd) < 0) {
1172         return -ENOTSUP;
1173     }
1174     ret = probe_logical_blocksize(s->fd, &bsz->log);
1175     if (ret < 0) {
1176         return ret;
1177     }
1178     return probe_physical_blocksize(s->fd, &bsz->phys);
1179 }
1180 
1181 /**
1182  * Try to get @bs's geometry: cyls, heads, sectors.
1183  * On success, store them in @geo and return 0.
1184  * On failure return -errno.
1185  * (Allows block driver to assign default geometry values that guest sees)
1186  */
1187 #ifdef __linux__
1188 static int hdev_probe_geometry(BlockDriverState *bs, HDGeometry *geo)
1189 {
1190     BDRVRawState *s = bs->opaque;
1191     struct hd_geometry ioctl_geo = {0};
1192 
1193     /* If DASD, get its geometry */
1194     if (check_for_dasd(s->fd) < 0) {
1195         return -ENOTSUP;
1196     }
1197     if (ioctl(s->fd, HDIO_GETGEO, &ioctl_geo) < 0) {
1198         return -errno;
1199     }
1200     /* HDIO_GETGEO may return success even though geo contains zeros
1201        (e.g. certain multipath setups) */
1202     if (!ioctl_geo.heads || !ioctl_geo.sectors || !ioctl_geo.cylinders) {
1203         return -ENOTSUP;
1204     }
1205     /* Do not return a geometry for partition */
1206     if (ioctl_geo.start != 0) {
1207         return -ENOTSUP;
1208     }
1209     geo->heads = ioctl_geo.heads;
1210     geo->sectors = ioctl_geo.sectors;
1211     geo->cylinders = ioctl_geo.cylinders;
1212 
1213     return 0;
1214 }
1215 #else /* __linux__ */
1216 static int hdev_probe_geometry(BlockDriverState *bs, HDGeometry *geo)
1217 {
1218     return -ENOTSUP;
1219 }
1220 #endif
1221 
1222 #if defined(__linux__)
1223 static int handle_aiocb_ioctl(void *opaque)
1224 {
1225     RawPosixAIOData *aiocb = opaque;
1226     int ret;
1227 
1228     ret = ioctl(aiocb->aio_fildes, aiocb->ioctl.cmd, aiocb->ioctl.buf);
1229     if (ret == -1) {
1230         return -errno;
1231     }
1232 
1233     return 0;
1234 }
1235 #endif /* linux */
1236 
1237 static int handle_aiocb_flush(void *opaque)
1238 {
1239     RawPosixAIOData *aiocb = opaque;
1240     BDRVRawState *s = aiocb->bs->opaque;
1241     int ret;
1242 
1243     if (s->page_cache_inconsistent) {
1244         return -EIO;
1245     }
1246 
1247     ret = qemu_fdatasync(aiocb->aio_fildes);
1248     if (ret == -1) {
1249         /* There is no clear definition of the semantics of a failing fsync(),
1250          * so we may have to assume the worst. The sad truth is that this
1251          * assumption is correct for Linux. Some pages are now probably marked
1252          * clean in the page cache even though they are inconsistent with the
1253          * on-disk contents. The next fdatasync() call would succeed, but no
1254          * further writeback attempt will be made. We can't get back to a state
1255          * in which we know what is on disk (we would have to rewrite
1256          * everything that was touched since the last fdatasync() at least), so
1257          * make bdrv_flush() fail permanently. Given that the behaviour isn't
1258          * really defined, I have little hope that other OSes are doing better.
1259          *
1260          * Obviously, this doesn't affect O_DIRECT, which bypasses the page
1261          * cache. */
1262         if ((s->open_flags & O_DIRECT) == 0) {
1263             s->page_cache_inconsistent = true;
1264         }
1265         return -errno;
1266     }
1267     return 0;
1268 }
1269 
1270 #ifdef CONFIG_PREADV
1271 
1272 static bool preadv_present = true;
1273 
1274 static ssize_t
1275 qemu_preadv(int fd, const struct iovec *iov, int nr_iov, off_t offset)
1276 {
1277     return preadv(fd, iov, nr_iov, offset);
1278 }
1279 
1280 static ssize_t
1281 qemu_pwritev(int fd, const struct iovec *iov, int nr_iov, off_t offset)
1282 {
1283     return pwritev(fd, iov, nr_iov, offset);
1284 }
1285 
1286 #else
1287 
1288 static bool preadv_present = false;
1289 
1290 static ssize_t
1291 qemu_preadv(int fd, const struct iovec *iov, int nr_iov, off_t offset)
1292 {
1293     return -ENOSYS;
1294 }
1295 
1296 static ssize_t
1297 qemu_pwritev(int fd, const struct iovec *iov, int nr_iov, off_t offset)
1298 {
1299     return -ENOSYS;
1300 }
1301 
1302 #endif
1303 
1304 static ssize_t handle_aiocb_rw_vector(RawPosixAIOData *aiocb)
1305 {
1306     ssize_t len;
1307 
1308     do {
1309         if (aiocb->aio_type & QEMU_AIO_WRITE)
1310             len = qemu_pwritev(aiocb->aio_fildes,
1311                                aiocb->io.iov,
1312                                aiocb->io.niov,
1313                                aiocb->aio_offset);
1314          else
1315             len = qemu_preadv(aiocb->aio_fildes,
1316                               aiocb->io.iov,
1317                               aiocb->io.niov,
1318                               aiocb->aio_offset);
1319     } while (len == -1 && errno == EINTR);
1320 
1321     if (len == -1) {
1322         return -errno;
1323     }
1324     return len;
1325 }
1326 
1327 /*
1328  * Read/writes the data to/from a given linear buffer.
1329  *
1330  * Returns the number of bytes handles or -errno in case of an error. Short
1331  * reads are only returned if the end of the file is reached.
1332  */
1333 static ssize_t handle_aiocb_rw_linear(RawPosixAIOData *aiocb, char *buf)
1334 {
1335     ssize_t offset = 0;
1336     ssize_t len;
1337 
1338     while (offset < aiocb->aio_nbytes) {
1339         if (aiocb->aio_type & QEMU_AIO_WRITE) {
1340             len = pwrite(aiocb->aio_fildes,
1341                          (const char *)buf + offset,
1342                          aiocb->aio_nbytes - offset,
1343                          aiocb->aio_offset + offset);
1344         } else {
1345             len = pread(aiocb->aio_fildes,
1346                         buf + offset,
1347                         aiocb->aio_nbytes - offset,
1348                         aiocb->aio_offset + offset);
1349         }
1350         if (len == -1 && errno == EINTR) {
1351             continue;
1352         } else if (len == -1 && errno == EINVAL &&
1353                    (aiocb->bs->open_flags & BDRV_O_NOCACHE) &&
1354                    !(aiocb->aio_type & QEMU_AIO_WRITE) &&
1355                    offset > 0) {
1356             /* O_DIRECT pread() may fail with EINVAL when offset is unaligned
1357              * after a short read.  Assume that O_DIRECT short reads only occur
1358              * at EOF.  Therefore this is a short read, not an I/O error.
1359              */
1360             break;
1361         } else if (len == -1) {
1362             offset = -errno;
1363             break;
1364         } else if (len == 0) {
1365             break;
1366         }
1367         offset += len;
1368     }
1369 
1370     return offset;
1371 }
1372 
1373 static int handle_aiocb_rw(void *opaque)
1374 {
1375     RawPosixAIOData *aiocb = opaque;
1376     ssize_t nbytes;
1377     char *buf;
1378 
1379     if (!(aiocb->aio_type & QEMU_AIO_MISALIGNED)) {
1380         /*
1381          * If there is just a single buffer, and it is properly aligned
1382          * we can just use plain pread/pwrite without any problems.
1383          */
1384         if (aiocb->io.niov == 1) {
1385             nbytes = handle_aiocb_rw_linear(aiocb, aiocb->io.iov->iov_base);
1386             goto out;
1387         }
1388         /*
1389          * We have more than one iovec, and all are properly aligned.
1390          *
1391          * Try preadv/pwritev first and fall back to linearizing the
1392          * buffer if it's not supported.
1393          */
1394         if (preadv_present) {
1395             nbytes = handle_aiocb_rw_vector(aiocb);
1396             if (nbytes == aiocb->aio_nbytes ||
1397                 (nbytes < 0 && nbytes != -ENOSYS)) {
1398                 goto out;
1399             }
1400             preadv_present = false;
1401         }
1402 
1403         /*
1404          * XXX(hch): short read/write.  no easy way to handle the reminder
1405          * using these interfaces.  For now retry using plain
1406          * pread/pwrite?
1407          */
1408     }
1409 
1410     /*
1411      * Ok, we have to do it the hard way, copy all segments into
1412      * a single aligned buffer.
1413      */
1414     buf = qemu_try_blockalign(aiocb->bs, aiocb->aio_nbytes);
1415     if (buf == NULL) {
1416         nbytes = -ENOMEM;
1417         goto out;
1418     }
1419 
1420     if (aiocb->aio_type & QEMU_AIO_WRITE) {
1421         char *p = buf;
1422         int i;
1423 
1424         for (i = 0; i < aiocb->io.niov; ++i) {
1425             memcpy(p, aiocb->io.iov[i].iov_base, aiocb->io.iov[i].iov_len);
1426             p += aiocb->io.iov[i].iov_len;
1427         }
1428         assert(p - buf == aiocb->aio_nbytes);
1429     }
1430 
1431     nbytes = handle_aiocb_rw_linear(aiocb, buf);
1432     if (!(aiocb->aio_type & QEMU_AIO_WRITE)) {
1433         char *p = buf;
1434         size_t count = aiocb->aio_nbytes, copy;
1435         int i;
1436 
1437         for (i = 0; i < aiocb->io.niov && count; ++i) {
1438             copy = count;
1439             if (copy > aiocb->io.iov[i].iov_len) {
1440                 copy = aiocb->io.iov[i].iov_len;
1441             }
1442             memcpy(aiocb->io.iov[i].iov_base, p, copy);
1443             assert(count >= copy);
1444             p     += copy;
1445             count -= copy;
1446         }
1447         assert(count == 0);
1448     }
1449     qemu_vfree(buf);
1450 
1451 out:
1452     if (nbytes == aiocb->aio_nbytes) {
1453         return 0;
1454     } else if (nbytes >= 0 && nbytes < aiocb->aio_nbytes) {
1455         if (aiocb->aio_type & QEMU_AIO_WRITE) {
1456             return -EINVAL;
1457         } else {
1458             iov_memset(aiocb->io.iov, aiocb->io.niov, nbytes,
1459                       0, aiocb->aio_nbytes - nbytes);
1460             return 0;
1461         }
1462     } else {
1463         assert(nbytes < 0);
1464         return nbytes;
1465     }
1466 }
1467 
1468 static int translate_err(int err)
1469 {
1470     if (err == -ENODEV || err == -ENOSYS || err == -EOPNOTSUPP ||
1471         err == -ENOTTY) {
1472         err = -ENOTSUP;
1473     }
1474     return err;
1475 }
1476 
1477 #ifdef CONFIG_FALLOCATE
1478 static int do_fallocate(int fd, int mode, off_t offset, off_t len)
1479 {
1480     do {
1481         if (fallocate(fd, mode, offset, len) == 0) {
1482             return 0;
1483         }
1484     } while (errno == EINTR);
1485     return translate_err(-errno);
1486 }
1487 #endif
1488 
1489 static ssize_t handle_aiocb_write_zeroes_block(RawPosixAIOData *aiocb)
1490 {
1491     int ret = -ENOTSUP;
1492     BDRVRawState *s = aiocb->bs->opaque;
1493 
1494     if (!s->has_write_zeroes) {
1495         return -ENOTSUP;
1496     }
1497 
1498 #ifdef BLKZEROOUT
1499     /* The BLKZEROOUT implementation in the kernel doesn't set
1500      * BLKDEV_ZERO_NOFALLBACK, so we can't call this if we have to avoid slow
1501      * fallbacks. */
1502     if (!(aiocb->aio_type & QEMU_AIO_NO_FALLBACK)) {
1503         do {
1504             uint64_t range[2] = { aiocb->aio_offset, aiocb->aio_nbytes };
1505             if (ioctl(aiocb->aio_fildes, BLKZEROOUT, range) == 0) {
1506                 return 0;
1507             }
1508         } while (errno == EINTR);
1509 
1510         ret = translate_err(-errno);
1511         if (ret == -ENOTSUP) {
1512             s->has_write_zeroes = false;
1513         }
1514     }
1515 #endif
1516 
1517     return ret;
1518 }
1519 
1520 static int handle_aiocb_write_zeroes(void *opaque)
1521 {
1522     RawPosixAIOData *aiocb = opaque;
1523 #ifdef CONFIG_FALLOCATE
1524     BDRVRawState *s = aiocb->bs->opaque;
1525     int64_t len;
1526 #endif
1527 
1528     if (aiocb->aio_type & QEMU_AIO_BLKDEV) {
1529         return handle_aiocb_write_zeroes_block(aiocb);
1530     }
1531 
1532 #ifdef CONFIG_FALLOCATE_ZERO_RANGE
1533     if (s->has_write_zeroes) {
1534         int ret = do_fallocate(s->fd, FALLOC_FL_ZERO_RANGE,
1535                                aiocb->aio_offset, aiocb->aio_nbytes);
1536         if (ret == -EINVAL) {
1537             /*
1538              * Allow falling back to pwrite for file systems that
1539              * do not support fallocate() for an unaligned byte range.
1540              */
1541             return -ENOTSUP;
1542         }
1543         if (ret == 0 || ret != -ENOTSUP) {
1544             return ret;
1545         }
1546         s->has_write_zeroes = false;
1547     }
1548 #endif
1549 
1550 #ifdef CONFIG_FALLOCATE_PUNCH_HOLE
1551     if (s->has_discard && s->has_fallocate) {
1552         int ret = do_fallocate(s->fd,
1553                                FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
1554                                aiocb->aio_offset, aiocb->aio_nbytes);
1555         if (ret == 0) {
1556             ret = do_fallocate(s->fd, 0, aiocb->aio_offset, aiocb->aio_nbytes);
1557             if (ret == 0 || ret != -ENOTSUP) {
1558                 return ret;
1559             }
1560             s->has_fallocate = false;
1561         } else if (ret != -ENOTSUP) {
1562             return ret;
1563         } else {
1564             s->has_discard = false;
1565         }
1566     }
1567 #endif
1568 
1569 #ifdef CONFIG_FALLOCATE
1570     /* Last resort: we are trying to extend the file with zeroed data. This
1571      * can be done via fallocate(fd, 0) */
1572     len = bdrv_getlength(aiocb->bs);
1573     if (s->has_fallocate && len >= 0 && aiocb->aio_offset >= len) {
1574         int ret = do_fallocate(s->fd, 0, aiocb->aio_offset, aiocb->aio_nbytes);
1575         if (ret == 0 || ret != -ENOTSUP) {
1576             return ret;
1577         }
1578         s->has_fallocate = false;
1579     }
1580 #endif
1581 
1582     return -ENOTSUP;
1583 }
1584 
1585 static int handle_aiocb_write_zeroes_unmap(void *opaque)
1586 {
1587     RawPosixAIOData *aiocb = opaque;
1588     BDRVRawState *s G_GNUC_UNUSED = aiocb->bs->opaque;
1589     int ret;
1590 
1591     /* First try to write zeros and unmap at the same time */
1592 
1593 #ifdef CONFIG_FALLOCATE_PUNCH_HOLE
1594     ret = do_fallocate(s->fd, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
1595                        aiocb->aio_offset, aiocb->aio_nbytes);
1596     if (ret != -ENOTSUP) {
1597         return ret;
1598     }
1599 #endif
1600 
1601     /* If we couldn't manage to unmap while guaranteed that the area reads as
1602      * all-zero afterwards, just write zeroes without unmapping */
1603     ret = handle_aiocb_write_zeroes(aiocb);
1604     return ret;
1605 }
1606 
1607 #ifndef HAVE_COPY_FILE_RANGE
1608 static off_t copy_file_range(int in_fd, off_t *in_off, int out_fd,
1609                              off_t *out_off, size_t len, unsigned int flags)
1610 {
1611 #ifdef __NR_copy_file_range
1612     return syscall(__NR_copy_file_range, in_fd, in_off, out_fd,
1613                    out_off, len, flags);
1614 #else
1615     errno = ENOSYS;
1616     return -1;
1617 #endif
1618 }
1619 #endif
1620 
1621 static int handle_aiocb_copy_range(void *opaque)
1622 {
1623     RawPosixAIOData *aiocb = opaque;
1624     uint64_t bytes = aiocb->aio_nbytes;
1625     off_t in_off = aiocb->aio_offset;
1626     off_t out_off = aiocb->copy_range.aio_offset2;
1627 
1628     while (bytes) {
1629         ssize_t ret = copy_file_range(aiocb->aio_fildes, &in_off,
1630                                       aiocb->copy_range.aio_fd2, &out_off,
1631                                       bytes, 0);
1632         trace_file_copy_file_range(aiocb->bs, aiocb->aio_fildes, in_off,
1633                                    aiocb->copy_range.aio_fd2, out_off, bytes,
1634                                    0, ret);
1635         if (ret == 0) {
1636             /* No progress (e.g. when beyond EOF), let the caller fall back to
1637              * buffer I/O. */
1638             return -ENOSPC;
1639         }
1640         if (ret < 0) {
1641             switch (errno) {
1642             case ENOSYS:
1643                 return -ENOTSUP;
1644             case EINTR:
1645                 continue;
1646             default:
1647                 return -errno;
1648             }
1649         }
1650         bytes -= ret;
1651     }
1652     return 0;
1653 }
1654 
1655 static int handle_aiocb_discard(void *opaque)
1656 {
1657     RawPosixAIOData *aiocb = opaque;
1658     int ret = -EOPNOTSUPP;
1659     BDRVRawState *s = aiocb->bs->opaque;
1660 
1661     if (!s->has_discard) {
1662         return -ENOTSUP;
1663     }
1664 
1665     if (aiocb->aio_type & QEMU_AIO_BLKDEV) {
1666 #ifdef BLKDISCARD
1667         do {
1668             uint64_t range[2] = { aiocb->aio_offset, aiocb->aio_nbytes };
1669             if (ioctl(aiocb->aio_fildes, BLKDISCARD, range) == 0) {
1670                 return 0;
1671             }
1672         } while (errno == EINTR);
1673 
1674         ret = -errno;
1675 #endif
1676     } else {
1677 #ifdef CONFIG_FALLOCATE_PUNCH_HOLE
1678         ret = do_fallocate(s->fd, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
1679                            aiocb->aio_offset, aiocb->aio_nbytes);
1680 #endif
1681     }
1682 
1683     ret = translate_err(ret);
1684     if (ret == -ENOTSUP) {
1685         s->has_discard = false;
1686     }
1687     return ret;
1688 }
1689 
1690 /*
1691  * Help alignment probing by allocating the first block.
1692  *
1693  * When reading with direct I/O from unallocated area on Gluster backed by XFS,
1694  * reading succeeds regardless of request length. In this case we fallback to
1695  * safe alignment which is not optimal. Allocating the first block avoids this
1696  * fallback.
1697  *
1698  * fd may be opened with O_DIRECT, but we don't know the buffer alignment or
1699  * request alignment, so we use safe values.
1700  *
1701  * Returns: 0 on success, -errno on failure. Since this is an optimization,
1702  * caller may ignore failures.
1703  */
1704 static int allocate_first_block(int fd, size_t max_size)
1705 {
1706     size_t write_size = (max_size < MAX_BLOCKSIZE)
1707         ? BDRV_SECTOR_SIZE
1708         : MAX_BLOCKSIZE;
1709     size_t max_align = MAX(MAX_BLOCKSIZE, qemu_real_host_page_size);
1710     void *buf;
1711     ssize_t n;
1712     int ret;
1713 
1714     buf = qemu_memalign(max_align, write_size);
1715     memset(buf, 0, write_size);
1716 
1717     do {
1718         n = pwrite(fd, buf, write_size, 0);
1719     } while (n == -1 && errno == EINTR);
1720 
1721     ret = (n == -1) ? -errno : 0;
1722 
1723     qemu_vfree(buf);
1724     return ret;
1725 }
1726 
1727 static int handle_aiocb_truncate(void *opaque)
1728 {
1729     RawPosixAIOData *aiocb = opaque;
1730     int result = 0;
1731     int64_t current_length = 0;
1732     char *buf = NULL;
1733     struct stat st;
1734     int fd = aiocb->aio_fildes;
1735     int64_t offset = aiocb->aio_offset;
1736     PreallocMode prealloc = aiocb->truncate.prealloc;
1737     Error **errp = aiocb->truncate.errp;
1738 
1739     if (fstat(fd, &st) < 0) {
1740         result = -errno;
1741         error_setg_errno(errp, -result, "Could not stat file");
1742         return result;
1743     }
1744 
1745     current_length = st.st_size;
1746     if (current_length > offset && prealloc != PREALLOC_MODE_OFF) {
1747         error_setg(errp, "Cannot use preallocation for shrinking files");
1748         return -ENOTSUP;
1749     }
1750 
1751     switch (prealloc) {
1752 #ifdef CONFIG_POSIX_FALLOCATE
1753     case PREALLOC_MODE_FALLOC:
1754         /*
1755          * Truncating before posix_fallocate() makes it about twice slower on
1756          * file systems that do not support fallocate(), trying to check if a
1757          * block is allocated before allocating it, so don't do that here.
1758          */
1759         if (offset != current_length) {
1760             result = -posix_fallocate(fd, current_length,
1761                                       offset - current_length);
1762             if (result != 0) {
1763                 /* posix_fallocate() doesn't set errno. */
1764                 error_setg_errno(errp, -result,
1765                                  "Could not preallocate new data");
1766             } else if (current_length == 0) {
1767                 /*
1768                  * posix_fallocate() uses fallocate() if the filesystem
1769                  * supports it, or fallback to manually writing zeroes. If
1770                  * fallocate() was used, unaligned reads from the fallocated
1771                  * area in raw_probe_alignment() will succeed, hence we need to
1772                  * allocate the first block.
1773                  *
1774                  * Optimize future alignment probing; ignore failures.
1775                  */
1776                 allocate_first_block(fd, offset);
1777             }
1778         } else {
1779             result = 0;
1780         }
1781         goto out;
1782 #endif
1783     case PREALLOC_MODE_FULL:
1784     {
1785         int64_t num = 0, left = offset - current_length;
1786         off_t seek_result;
1787 
1788         /*
1789          * Knowing the final size from the beginning could allow the file
1790          * system driver to do less allocations and possibly avoid
1791          * fragmentation of the file.
1792          */
1793         if (ftruncate(fd, offset) != 0) {
1794             result = -errno;
1795             error_setg_errno(errp, -result, "Could not resize file");
1796             goto out;
1797         }
1798 
1799         buf = g_malloc0(65536);
1800 
1801         seek_result = lseek(fd, current_length, SEEK_SET);
1802         if (seek_result < 0) {
1803             result = -errno;
1804             error_setg_errno(errp, -result,
1805                              "Failed to seek to the old end of file");
1806             goto out;
1807         }
1808 
1809         while (left > 0) {
1810             num = MIN(left, 65536);
1811             result = write(fd, buf, num);
1812             if (result < 0) {
1813                 if (errno == EINTR) {
1814                     continue;
1815                 }
1816                 result = -errno;
1817                 error_setg_errno(errp, -result,
1818                                  "Could not write zeros for preallocation");
1819                 goto out;
1820             }
1821             left -= result;
1822         }
1823         if (result >= 0) {
1824             result = fsync(fd);
1825             if (result < 0) {
1826                 result = -errno;
1827                 error_setg_errno(errp, -result,
1828                                  "Could not flush file to disk");
1829                 goto out;
1830             }
1831         }
1832         goto out;
1833     }
1834     case PREALLOC_MODE_OFF:
1835         if (ftruncate(fd, offset) != 0) {
1836             result = -errno;
1837             error_setg_errno(errp, -result, "Could not resize file");
1838         } else if (current_length == 0 && offset > current_length) {
1839             /* Optimize future alignment probing; ignore failures. */
1840             allocate_first_block(fd, offset);
1841         }
1842         return result;
1843     default:
1844         result = -ENOTSUP;
1845         error_setg(errp, "Unsupported preallocation mode: %s",
1846                    PreallocMode_str(prealloc));
1847         return result;
1848     }
1849 
1850 out:
1851     if (result < 0) {
1852         if (ftruncate(fd, current_length) < 0) {
1853             error_report("Failed to restore old file length: %s",
1854                          strerror(errno));
1855         }
1856     }
1857 
1858     g_free(buf);
1859     return result;
1860 }
1861 
1862 static int coroutine_fn raw_thread_pool_submit(BlockDriverState *bs,
1863                                                ThreadPoolFunc func, void *arg)
1864 {
1865     /* @bs can be NULL, bdrv_get_aio_context() returns the main context then */
1866     ThreadPool *pool = aio_get_thread_pool(bdrv_get_aio_context(bs));
1867     return thread_pool_submit_co(pool, func, arg);
1868 }
1869 
1870 static int coroutine_fn raw_co_prw(BlockDriverState *bs, uint64_t offset,
1871                                    uint64_t bytes, QEMUIOVector *qiov, int type)
1872 {
1873     BDRVRawState *s = bs->opaque;
1874     RawPosixAIOData acb;
1875 
1876     if (fd_open(bs) < 0)
1877         return -EIO;
1878 
1879     /*
1880      * Check if the underlying device requires requests to be aligned,
1881      * and if the request we are trying to submit is aligned or not.
1882      * If this is the case tell the low-level driver that it needs
1883      * to copy the buffer.
1884      */
1885     if (s->needs_alignment) {
1886         if (!bdrv_qiov_is_aligned(bs, qiov)) {
1887             type |= QEMU_AIO_MISALIGNED;
1888 #ifdef CONFIG_LINUX_AIO
1889         } else if (s->use_linux_aio) {
1890             LinuxAioState *aio = aio_get_linux_aio(bdrv_get_aio_context(bs));
1891             assert(qiov->size == bytes);
1892             return laio_co_submit(bs, aio, s->fd, offset, qiov, type);
1893 #endif
1894         }
1895     }
1896 
1897     acb = (RawPosixAIOData) {
1898         .bs             = bs,
1899         .aio_fildes     = s->fd,
1900         .aio_type       = type,
1901         .aio_offset     = offset,
1902         .aio_nbytes     = bytes,
1903         .io             = {
1904             .iov            = qiov->iov,
1905             .niov           = qiov->niov,
1906         },
1907     };
1908 
1909     assert(qiov->size == bytes);
1910     return raw_thread_pool_submit(bs, handle_aiocb_rw, &acb);
1911 }
1912 
1913 static int coroutine_fn raw_co_preadv(BlockDriverState *bs, uint64_t offset,
1914                                       uint64_t bytes, QEMUIOVector *qiov,
1915                                       int flags)
1916 {
1917     return raw_co_prw(bs, offset, bytes, qiov, QEMU_AIO_READ);
1918 }
1919 
1920 static int coroutine_fn raw_co_pwritev(BlockDriverState *bs, uint64_t offset,
1921                                        uint64_t bytes, QEMUIOVector *qiov,
1922                                        int flags)
1923 {
1924     assert(flags == 0);
1925     return raw_co_prw(bs, offset, bytes, qiov, QEMU_AIO_WRITE);
1926 }
1927 
1928 static void raw_aio_plug(BlockDriverState *bs)
1929 {
1930 #ifdef CONFIG_LINUX_AIO
1931     BDRVRawState *s = bs->opaque;
1932     if (s->use_linux_aio) {
1933         LinuxAioState *aio = aio_get_linux_aio(bdrv_get_aio_context(bs));
1934         laio_io_plug(bs, aio);
1935     }
1936 #endif
1937 }
1938 
1939 static void raw_aio_unplug(BlockDriverState *bs)
1940 {
1941 #ifdef CONFIG_LINUX_AIO
1942     BDRVRawState *s = bs->opaque;
1943     if (s->use_linux_aio) {
1944         LinuxAioState *aio = aio_get_linux_aio(bdrv_get_aio_context(bs));
1945         laio_io_unplug(bs, aio);
1946     }
1947 #endif
1948 }
1949 
1950 static int raw_co_flush_to_disk(BlockDriverState *bs)
1951 {
1952     BDRVRawState *s = bs->opaque;
1953     RawPosixAIOData acb;
1954     int ret;
1955 
1956     ret = fd_open(bs);
1957     if (ret < 0) {
1958         return ret;
1959     }
1960 
1961     acb = (RawPosixAIOData) {
1962         .bs             = bs,
1963         .aio_fildes     = s->fd,
1964         .aio_type       = QEMU_AIO_FLUSH,
1965     };
1966 
1967     return raw_thread_pool_submit(bs, handle_aiocb_flush, &acb);
1968 }
1969 
1970 static void raw_aio_attach_aio_context(BlockDriverState *bs,
1971                                        AioContext *new_context)
1972 {
1973 #ifdef CONFIG_LINUX_AIO
1974     BDRVRawState *s = bs->opaque;
1975     if (s->use_linux_aio) {
1976         Error *local_err = NULL;
1977         if (!aio_setup_linux_aio(new_context, &local_err)) {
1978             error_reportf_err(local_err, "Unable to use native AIO, "
1979                                          "falling back to thread pool: ");
1980             s->use_linux_aio = false;
1981         }
1982     }
1983 #endif
1984 }
1985 
1986 static void raw_close(BlockDriverState *bs)
1987 {
1988     BDRVRawState *s = bs->opaque;
1989 
1990     if (s->fd >= 0) {
1991         qemu_close(s->fd);
1992         s->fd = -1;
1993     }
1994 }
1995 
1996 /**
1997  * Truncates the given regular file @fd to @offset and, when growing, fills the
1998  * new space according to @prealloc.
1999  *
2000  * Returns: 0 on success, -errno on failure.
2001  */
2002 static int coroutine_fn
2003 raw_regular_truncate(BlockDriverState *bs, int fd, int64_t offset,
2004                      PreallocMode prealloc, Error **errp)
2005 {
2006     RawPosixAIOData acb;
2007 
2008     acb = (RawPosixAIOData) {
2009         .bs             = bs,
2010         .aio_fildes     = fd,
2011         .aio_type       = QEMU_AIO_TRUNCATE,
2012         .aio_offset     = offset,
2013         .truncate       = {
2014             .prealloc       = prealloc,
2015             .errp           = errp,
2016         },
2017     };
2018 
2019     return raw_thread_pool_submit(bs, handle_aiocb_truncate, &acb);
2020 }
2021 
2022 static int coroutine_fn raw_co_truncate(BlockDriverState *bs, int64_t offset,
2023                                         bool exact, PreallocMode prealloc,
2024                                         Error **errp)
2025 {
2026     BDRVRawState *s = bs->opaque;
2027     struct stat st;
2028     int ret;
2029 
2030     if (fstat(s->fd, &st)) {
2031         ret = -errno;
2032         error_setg_errno(errp, -ret, "Failed to fstat() the file");
2033         return ret;
2034     }
2035 
2036     if (S_ISREG(st.st_mode)) {
2037         /* Always resizes to the exact @offset */
2038         return raw_regular_truncate(bs, s->fd, offset, prealloc, errp);
2039     }
2040 
2041     if (prealloc != PREALLOC_MODE_OFF) {
2042         error_setg(errp, "Preallocation mode '%s' unsupported for this "
2043                    "non-regular file", PreallocMode_str(prealloc));
2044         return -ENOTSUP;
2045     }
2046 
2047     if (S_ISCHR(st.st_mode) || S_ISBLK(st.st_mode)) {
2048         int64_t cur_length = raw_getlength(bs);
2049 
2050         if (offset != cur_length && exact) {
2051             error_setg(errp, "Cannot resize device files");
2052             return -ENOTSUP;
2053         } else if (offset > cur_length) {
2054             error_setg(errp, "Cannot grow device files");
2055             return -EINVAL;
2056         }
2057     } else {
2058         error_setg(errp, "Resizing this file is not supported");
2059         return -ENOTSUP;
2060     }
2061 
2062     return 0;
2063 }
2064 
2065 #ifdef __OpenBSD__
2066 static int64_t raw_getlength(BlockDriverState *bs)
2067 {
2068     BDRVRawState *s = bs->opaque;
2069     int fd = s->fd;
2070     struct stat st;
2071 
2072     if (fstat(fd, &st))
2073         return -errno;
2074     if (S_ISCHR(st.st_mode) || S_ISBLK(st.st_mode)) {
2075         struct disklabel dl;
2076 
2077         if (ioctl(fd, DIOCGDINFO, &dl))
2078             return -errno;
2079         return (uint64_t)dl.d_secsize *
2080             dl.d_partitions[DISKPART(st.st_rdev)].p_size;
2081     } else
2082         return st.st_size;
2083 }
2084 #elif defined(__NetBSD__)
2085 static int64_t raw_getlength(BlockDriverState *bs)
2086 {
2087     BDRVRawState *s = bs->opaque;
2088     int fd = s->fd;
2089     struct stat st;
2090 
2091     if (fstat(fd, &st))
2092         return -errno;
2093     if (S_ISCHR(st.st_mode) || S_ISBLK(st.st_mode)) {
2094         struct dkwedge_info dkw;
2095 
2096         if (ioctl(fd, DIOCGWEDGEINFO, &dkw) != -1) {
2097             return dkw.dkw_size * 512;
2098         } else {
2099             struct disklabel dl;
2100 
2101             if (ioctl(fd, DIOCGDINFO, &dl))
2102                 return -errno;
2103             return (uint64_t)dl.d_secsize *
2104                 dl.d_partitions[DISKPART(st.st_rdev)].p_size;
2105         }
2106     } else
2107         return st.st_size;
2108 }
2109 #elif defined(__sun__)
2110 static int64_t raw_getlength(BlockDriverState *bs)
2111 {
2112     BDRVRawState *s = bs->opaque;
2113     struct dk_minfo minfo;
2114     int ret;
2115     int64_t size;
2116 
2117     ret = fd_open(bs);
2118     if (ret < 0) {
2119         return ret;
2120     }
2121 
2122     /*
2123      * Use the DKIOCGMEDIAINFO ioctl to read the size.
2124      */
2125     ret = ioctl(s->fd, DKIOCGMEDIAINFO, &minfo);
2126     if (ret != -1) {
2127         return minfo.dki_lbsize * minfo.dki_capacity;
2128     }
2129 
2130     /*
2131      * There are reports that lseek on some devices fails, but
2132      * irc discussion said that contingency on contingency was overkill.
2133      */
2134     size = lseek(s->fd, 0, SEEK_END);
2135     if (size < 0) {
2136         return -errno;
2137     }
2138     return size;
2139 }
2140 #elif defined(CONFIG_BSD)
2141 static int64_t raw_getlength(BlockDriverState *bs)
2142 {
2143     BDRVRawState *s = bs->opaque;
2144     int fd = s->fd;
2145     int64_t size;
2146     struct stat sb;
2147 #if defined (__FreeBSD__) || defined(__FreeBSD_kernel__)
2148     int reopened = 0;
2149 #endif
2150     int ret;
2151 
2152     ret = fd_open(bs);
2153     if (ret < 0)
2154         return ret;
2155 
2156 #if defined (__FreeBSD__) || defined(__FreeBSD_kernel__)
2157 again:
2158 #endif
2159     if (!fstat(fd, &sb) && (S_IFCHR & sb.st_mode)) {
2160 #ifdef DIOCGMEDIASIZE
2161         if (ioctl(fd, DIOCGMEDIASIZE, (off_t *)&size))
2162 #elif defined(DIOCGPART)
2163         {
2164                 struct partinfo pi;
2165                 if (ioctl(fd, DIOCGPART, &pi) == 0)
2166                         size = pi.media_size;
2167                 else
2168                         size = 0;
2169         }
2170         if (size == 0)
2171 #endif
2172 #if defined(__APPLE__) && defined(__MACH__)
2173         {
2174             uint64_t sectors = 0;
2175             uint32_t sector_size = 0;
2176 
2177             if (ioctl(fd, DKIOCGETBLOCKCOUNT, &sectors) == 0
2178                && ioctl(fd, DKIOCGETBLOCKSIZE, &sector_size) == 0) {
2179                 size = sectors * sector_size;
2180             } else {
2181                 size = lseek(fd, 0LL, SEEK_END);
2182                 if (size < 0) {
2183                     return -errno;
2184                 }
2185             }
2186         }
2187 #else
2188         size = lseek(fd, 0LL, SEEK_END);
2189         if (size < 0) {
2190             return -errno;
2191         }
2192 #endif
2193 #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
2194         switch(s->type) {
2195         case FTYPE_CD:
2196             /* XXX FreeBSD acd returns UINT_MAX sectors for an empty drive */
2197             if (size == 2048LL * (unsigned)-1)
2198                 size = 0;
2199             /* XXX no disc?  maybe we need to reopen... */
2200             if (size <= 0 && !reopened && cdrom_reopen(bs) >= 0) {
2201                 reopened = 1;
2202                 goto again;
2203             }
2204         }
2205 #endif
2206     } else {
2207         size = lseek(fd, 0, SEEK_END);
2208         if (size < 0) {
2209             return -errno;
2210         }
2211     }
2212     return size;
2213 }
2214 #else
2215 static int64_t raw_getlength(BlockDriverState *bs)
2216 {
2217     BDRVRawState *s = bs->opaque;
2218     int ret;
2219     int64_t size;
2220 
2221     ret = fd_open(bs);
2222     if (ret < 0) {
2223         return ret;
2224     }
2225 
2226     size = lseek(s->fd, 0, SEEK_END);
2227     if (size < 0) {
2228         return -errno;
2229     }
2230     return size;
2231 }
2232 #endif
2233 
2234 static int64_t raw_get_allocated_file_size(BlockDriverState *bs)
2235 {
2236     struct stat st;
2237     BDRVRawState *s = bs->opaque;
2238 
2239     if (fstat(s->fd, &st) < 0) {
2240         return -errno;
2241     }
2242     return (int64_t)st.st_blocks * 512;
2243 }
2244 
2245 static int coroutine_fn
2246 raw_co_create(BlockdevCreateOptions *options, Error **errp)
2247 {
2248     BlockdevCreateOptionsFile *file_opts;
2249     Error *local_err = NULL;
2250     int fd;
2251     uint64_t perm, shared;
2252     int result = 0;
2253 
2254     /* Validate options and set default values */
2255     assert(options->driver == BLOCKDEV_DRIVER_FILE);
2256     file_opts = &options->u.file;
2257 
2258     if (!file_opts->has_nocow) {
2259         file_opts->nocow = false;
2260     }
2261     if (!file_opts->has_preallocation) {
2262         file_opts->preallocation = PREALLOC_MODE_OFF;
2263     }
2264 
2265     /* Create file */
2266     fd = qemu_open(file_opts->filename, O_RDWR | O_CREAT | O_BINARY, 0644);
2267     if (fd < 0) {
2268         result = -errno;
2269         error_setg_errno(errp, -result, "Could not create file");
2270         goto out;
2271     }
2272 
2273     /* Take permissions: We want to discard everything, so we need
2274      * BLK_PERM_WRITE; and truncation to the desired size requires
2275      * BLK_PERM_RESIZE.
2276      * On the other hand, we cannot share the RESIZE permission
2277      * because we promise that after this function, the file has the
2278      * size given in the options.  If someone else were to resize it
2279      * concurrently, we could not guarantee that.
2280      * Note that after this function, we can no longer guarantee that
2281      * the file is not touched by a third party, so it may be resized
2282      * then. */
2283     perm = BLK_PERM_WRITE | BLK_PERM_RESIZE;
2284     shared = BLK_PERM_ALL & ~BLK_PERM_RESIZE;
2285 
2286     /* Step one: Take locks */
2287     result = raw_apply_lock_bytes(NULL, fd, perm, ~shared, false, errp);
2288     if (result < 0) {
2289         goto out_close;
2290     }
2291 
2292     /* Step two: Check that nobody else has taken conflicting locks */
2293     result = raw_check_lock_bytes(fd, perm, shared, errp);
2294     if (result < 0) {
2295         error_append_hint(errp,
2296                           "Is another process using the image [%s]?\n",
2297                           file_opts->filename);
2298         goto out_unlock;
2299     }
2300 
2301     /* Clear the file by truncating it to 0 */
2302     result = raw_regular_truncate(NULL, fd, 0, PREALLOC_MODE_OFF, errp);
2303     if (result < 0) {
2304         goto out_unlock;
2305     }
2306 
2307     if (file_opts->nocow) {
2308 #ifdef __linux__
2309         /* Set NOCOW flag to solve performance issue on fs like btrfs.
2310          * This is an optimisation. The FS_IOC_SETFLAGS ioctl return value
2311          * will be ignored since any failure of this operation should not
2312          * block the left work.
2313          */
2314         int attr;
2315         if (ioctl(fd, FS_IOC_GETFLAGS, &attr) == 0) {
2316             attr |= FS_NOCOW_FL;
2317             ioctl(fd, FS_IOC_SETFLAGS, &attr);
2318         }
2319 #endif
2320     }
2321 
2322     /* Resize and potentially preallocate the file to the desired
2323      * final size */
2324     result = raw_regular_truncate(NULL, fd, file_opts->size,
2325                                   file_opts->preallocation, errp);
2326     if (result < 0) {
2327         goto out_unlock;
2328     }
2329 
2330 out_unlock:
2331     raw_apply_lock_bytes(NULL, fd, 0, 0, true, &local_err);
2332     if (local_err) {
2333         /* The above call should not fail, and if it does, that does
2334          * not mean the whole creation operation has failed.  So
2335          * report it the user for their convenience, but do not report
2336          * it to the caller. */
2337         warn_report_err(local_err);
2338     }
2339 
2340 out_close:
2341     if (qemu_close(fd) != 0 && result == 0) {
2342         result = -errno;
2343         error_setg_errno(errp, -result, "Could not close the new file");
2344     }
2345 out:
2346     return result;
2347 }
2348 
2349 static int coroutine_fn raw_co_create_opts(const char *filename, QemuOpts *opts,
2350                                            Error **errp)
2351 {
2352     BlockdevCreateOptions options;
2353     int64_t total_size = 0;
2354     bool nocow = false;
2355     PreallocMode prealloc;
2356     char *buf = NULL;
2357     Error *local_err = NULL;
2358 
2359     /* Skip file: protocol prefix */
2360     strstart(filename, "file:", &filename);
2361 
2362     /* Read out options */
2363     total_size = ROUND_UP(qemu_opt_get_size_del(opts, BLOCK_OPT_SIZE, 0),
2364                           BDRV_SECTOR_SIZE);
2365     nocow = qemu_opt_get_bool(opts, BLOCK_OPT_NOCOW, false);
2366     buf = qemu_opt_get_del(opts, BLOCK_OPT_PREALLOC);
2367     prealloc = qapi_enum_parse(&PreallocMode_lookup, buf,
2368                                PREALLOC_MODE_OFF, &local_err);
2369     g_free(buf);
2370     if (local_err) {
2371         error_propagate(errp, local_err);
2372         return -EINVAL;
2373     }
2374 
2375     options = (BlockdevCreateOptions) {
2376         .driver     = BLOCKDEV_DRIVER_FILE,
2377         .u.file     = {
2378             .filename           = (char *) filename,
2379             .size               = total_size,
2380             .has_preallocation  = true,
2381             .preallocation      = prealloc,
2382             .has_nocow          = true,
2383             .nocow              = nocow,
2384         },
2385     };
2386     return raw_co_create(&options, errp);
2387 }
2388 
2389 /*
2390  * Find allocation range in @bs around offset @start.
2391  * May change underlying file descriptor's file offset.
2392  * If @start is not in a hole, store @start in @data, and the
2393  * beginning of the next hole in @hole, and return 0.
2394  * If @start is in a non-trailing hole, store @start in @hole and the
2395  * beginning of the next non-hole in @data, and return 0.
2396  * If @start is in a trailing hole or beyond EOF, return -ENXIO.
2397  * If we can't find out, return a negative errno other than -ENXIO.
2398  */
2399 static int find_allocation(BlockDriverState *bs, off_t start,
2400                            off_t *data, off_t *hole)
2401 {
2402 #if defined SEEK_HOLE && defined SEEK_DATA
2403     BDRVRawState *s = bs->opaque;
2404     off_t offs;
2405 
2406     /*
2407      * SEEK_DATA cases:
2408      * D1. offs == start: start is in data
2409      * D2. offs > start: start is in a hole, next data at offs
2410      * D3. offs < 0, errno = ENXIO: either start is in a trailing hole
2411      *                              or start is beyond EOF
2412      *     If the latter happens, the file has been truncated behind
2413      *     our back since we opened it.  All bets are off then.
2414      *     Treating like a trailing hole is simplest.
2415      * D4. offs < 0, errno != ENXIO: we learned nothing
2416      */
2417     offs = lseek(s->fd, start, SEEK_DATA);
2418     if (offs < 0) {
2419         return -errno;          /* D3 or D4 */
2420     }
2421 
2422     if (offs < start) {
2423         /* This is not a valid return by lseek().  We are safe to just return
2424          * -EIO in this case, and we'll treat it like D4. */
2425         return -EIO;
2426     }
2427 
2428     if (offs > start) {
2429         /* D2: in hole, next data at offs */
2430         *hole = start;
2431         *data = offs;
2432         return 0;
2433     }
2434 
2435     /* D1: in data, end not yet known */
2436 
2437     /*
2438      * SEEK_HOLE cases:
2439      * H1. offs == start: start is in a hole
2440      *     If this happens here, a hole has been dug behind our back
2441      *     since the previous lseek().
2442      * H2. offs > start: either start is in data, next hole at offs,
2443      *                   or start is in trailing hole, EOF at offs
2444      *     Linux treats trailing holes like any other hole: offs ==
2445      *     start.  Solaris seeks to EOF instead: offs > start (blech).
2446      *     If that happens here, a hole has been dug behind our back
2447      *     since the previous lseek().
2448      * H3. offs < 0, errno = ENXIO: start is beyond EOF
2449      *     If this happens, the file has been truncated behind our
2450      *     back since we opened it.  Treat it like a trailing hole.
2451      * H4. offs < 0, errno != ENXIO: we learned nothing
2452      *     Pretend we know nothing at all, i.e. "forget" about D1.
2453      */
2454     offs = lseek(s->fd, start, SEEK_HOLE);
2455     if (offs < 0) {
2456         return -errno;          /* D1 and (H3 or H4) */
2457     }
2458 
2459     if (offs < start) {
2460         /* This is not a valid return by lseek().  We are safe to just return
2461          * -EIO in this case, and we'll treat it like H4. */
2462         return -EIO;
2463     }
2464 
2465     if (offs > start) {
2466         /*
2467          * D1 and H2: either in data, next hole at offs, or it was in
2468          * data but is now in a trailing hole.  In the latter case,
2469          * all bets are off.  Treating it as if it there was data all
2470          * the way to EOF is safe, so simply do that.
2471          */
2472         *data = start;
2473         *hole = offs;
2474         return 0;
2475     }
2476 
2477     /* D1 and H1 */
2478     return -EBUSY;
2479 #else
2480     return -ENOTSUP;
2481 #endif
2482 }
2483 
2484 /*
2485  * Returns the allocation status of the specified offset.
2486  *
2487  * The block layer guarantees 'offset' and 'bytes' are within bounds.
2488  *
2489  * 'pnum' is set to the number of bytes (including and immediately following
2490  * the specified offset) that are known to be in the same
2491  * allocated/unallocated state.
2492  *
2493  * 'bytes' is the max value 'pnum' should be set to.
2494  */
2495 static int coroutine_fn raw_co_block_status(BlockDriverState *bs,
2496                                             bool want_zero,
2497                                             int64_t offset,
2498                                             int64_t bytes, int64_t *pnum,
2499                                             int64_t *map,
2500                                             BlockDriverState **file)
2501 {
2502     off_t data = 0, hole = 0;
2503     int ret;
2504 
2505     assert(QEMU_IS_ALIGNED(offset | bytes, bs->bl.request_alignment));
2506 
2507     ret = fd_open(bs);
2508     if (ret < 0) {
2509         return ret;
2510     }
2511 
2512     if (!want_zero) {
2513         *pnum = bytes;
2514         *map = offset;
2515         *file = bs;
2516         return BDRV_BLOCK_DATA | BDRV_BLOCK_OFFSET_VALID;
2517     }
2518 
2519     ret = find_allocation(bs, offset, &data, &hole);
2520     if (ret == -ENXIO) {
2521         /* Trailing hole */
2522         *pnum = bytes;
2523         ret = BDRV_BLOCK_ZERO;
2524     } else if (ret < 0) {
2525         /* No info available, so pretend there are no holes */
2526         *pnum = bytes;
2527         ret = BDRV_BLOCK_DATA;
2528     } else if (data == offset) {
2529         /* On a data extent, compute bytes to the end of the extent,
2530          * possibly including a partial sector at EOF. */
2531         *pnum = MIN(bytes, hole - offset);
2532 
2533         /*
2534          * We are not allowed to return partial sectors, though, so
2535          * round up if necessary.
2536          */
2537         if (!QEMU_IS_ALIGNED(*pnum, bs->bl.request_alignment)) {
2538             int64_t file_length = raw_getlength(bs);
2539             if (file_length > 0) {
2540                 /* Ignore errors, this is just a safeguard */
2541                 assert(hole == file_length);
2542             }
2543             *pnum = ROUND_UP(*pnum, bs->bl.request_alignment);
2544         }
2545 
2546         ret = BDRV_BLOCK_DATA;
2547     } else {
2548         /* On a hole, compute bytes to the beginning of the next extent.  */
2549         assert(hole == offset);
2550         *pnum = MIN(bytes, data - offset);
2551         ret = BDRV_BLOCK_ZERO;
2552     }
2553     *map = offset;
2554     *file = bs;
2555     return ret | BDRV_BLOCK_OFFSET_VALID;
2556 }
2557 
2558 #if defined(__linux__)
2559 /* Verify that the file is not in the page cache */
2560 static void check_cache_dropped(BlockDriverState *bs, Error **errp)
2561 {
2562     const size_t window_size = 128 * 1024 * 1024;
2563     BDRVRawState *s = bs->opaque;
2564     void *window = NULL;
2565     size_t length = 0;
2566     unsigned char *vec;
2567     size_t page_size;
2568     off_t offset;
2569     off_t end;
2570 
2571     /* mincore(2) page status information requires 1 byte per page */
2572     page_size = sysconf(_SC_PAGESIZE);
2573     vec = g_malloc(DIV_ROUND_UP(window_size, page_size));
2574 
2575     end = raw_getlength(bs);
2576 
2577     for (offset = 0; offset < end; offset += window_size) {
2578         void *new_window;
2579         size_t new_length;
2580         size_t vec_end;
2581         size_t i;
2582         int ret;
2583 
2584         /* Unmap previous window if size has changed */
2585         new_length = MIN(end - offset, window_size);
2586         if (new_length != length) {
2587             munmap(window, length);
2588             window = NULL;
2589             length = 0;
2590         }
2591 
2592         new_window = mmap(window, new_length, PROT_NONE, MAP_PRIVATE,
2593                           s->fd, offset);
2594         if (new_window == MAP_FAILED) {
2595             error_setg_errno(errp, errno, "mmap failed");
2596             break;
2597         }
2598 
2599         window = new_window;
2600         length = new_length;
2601 
2602         ret = mincore(window, length, vec);
2603         if (ret < 0) {
2604             error_setg_errno(errp, errno, "mincore failed");
2605             break;
2606         }
2607 
2608         vec_end = DIV_ROUND_UP(length, page_size);
2609         for (i = 0; i < vec_end; i++) {
2610             if (vec[i] & 0x1) {
2611                 error_setg(errp, "page cache still in use!");
2612                 break;
2613             }
2614         }
2615     }
2616 
2617     if (window) {
2618         munmap(window, length);
2619     }
2620 
2621     g_free(vec);
2622 }
2623 #endif /* __linux__ */
2624 
2625 static void coroutine_fn raw_co_invalidate_cache(BlockDriverState *bs,
2626                                                  Error **errp)
2627 {
2628     BDRVRawState *s = bs->opaque;
2629     int ret;
2630 
2631     ret = fd_open(bs);
2632     if (ret < 0) {
2633         error_setg_errno(errp, -ret, "The file descriptor is not open");
2634         return;
2635     }
2636 
2637     if (!s->drop_cache) {
2638         return;
2639     }
2640 
2641     if (s->open_flags & O_DIRECT) {
2642         return; /* No host kernel page cache */
2643     }
2644 
2645 #if defined(__linux__)
2646     /* This sets the scene for the next syscall... */
2647     ret = bdrv_co_flush(bs);
2648     if (ret < 0) {
2649         error_setg_errno(errp, -ret, "flush failed");
2650         return;
2651     }
2652 
2653     /* Linux does not invalidate pages that are dirty, locked, or mmapped by a
2654      * process.  These limitations are okay because we just fsynced the file,
2655      * we don't use mmap, and the file should not be in use by other processes.
2656      */
2657     ret = posix_fadvise(s->fd, 0, 0, POSIX_FADV_DONTNEED);
2658     if (ret != 0) { /* the return value is a positive errno */
2659         error_setg_errno(errp, ret, "fadvise failed");
2660         return;
2661     }
2662 
2663     if (s->check_cache_dropped) {
2664         check_cache_dropped(bs, errp);
2665     }
2666 #else /* __linux__ */
2667     /* Do nothing.  Live migration to a remote host with cache.direct=off is
2668      * unsupported on other host operating systems.  Cache consistency issues
2669      * may occur but no error is reported here, partly because that's the
2670      * historical behavior and partly because it's hard to differentiate valid
2671      * configurations that should not cause errors.
2672      */
2673 #endif /* !__linux__ */
2674 }
2675 
2676 static void raw_account_discard(BDRVRawState *s, uint64_t nbytes, int ret)
2677 {
2678     if (ret) {
2679         s->stats.discard_nb_failed++;
2680     } else {
2681         s->stats.discard_nb_ok++;
2682         s->stats.discard_bytes_ok += nbytes;
2683     }
2684 }
2685 
2686 static coroutine_fn int
2687 raw_do_pdiscard(BlockDriverState *bs, int64_t offset, int bytes, bool blkdev)
2688 {
2689     BDRVRawState *s = bs->opaque;
2690     RawPosixAIOData acb;
2691     int ret;
2692 
2693     acb = (RawPosixAIOData) {
2694         .bs             = bs,
2695         .aio_fildes     = s->fd,
2696         .aio_type       = QEMU_AIO_DISCARD,
2697         .aio_offset     = offset,
2698         .aio_nbytes     = bytes,
2699     };
2700 
2701     if (blkdev) {
2702         acb.aio_type |= QEMU_AIO_BLKDEV;
2703     }
2704 
2705     ret = raw_thread_pool_submit(bs, handle_aiocb_discard, &acb);
2706     raw_account_discard(s, bytes, ret);
2707     return ret;
2708 }
2709 
2710 static coroutine_fn int
2711 raw_co_pdiscard(BlockDriverState *bs, int64_t offset, int bytes)
2712 {
2713     return raw_do_pdiscard(bs, offset, bytes, false);
2714 }
2715 
2716 static int coroutine_fn
2717 raw_do_pwrite_zeroes(BlockDriverState *bs, int64_t offset, int bytes,
2718                      BdrvRequestFlags flags, bool blkdev)
2719 {
2720     BDRVRawState *s = bs->opaque;
2721     RawPosixAIOData acb;
2722     ThreadPoolFunc *handler;
2723 
2724 #ifdef CONFIG_FALLOCATE
2725     if (offset + bytes > bs->total_sectors * BDRV_SECTOR_SIZE) {
2726         BdrvTrackedRequest *req;
2727         uint64_t end;
2728 
2729         /*
2730          * This is a workaround for a bug in the Linux XFS driver,
2731          * where writes submitted through the AIO interface will be
2732          * discarded if they happen beyond a concurrently running
2733          * fallocate() that increases the file length (i.e., both the
2734          * write and the fallocate() happen beyond the EOF).
2735          *
2736          * To work around it, we extend the tracked request for this
2737          * zero write until INT64_MAX (effectively infinity), and mark
2738          * it as serializing.
2739          *
2740          * We have to enable this workaround for all filesystems and
2741          * AIO modes (not just XFS with aio=native), because for
2742          * remote filesystems we do not know the host configuration.
2743          */
2744 
2745         req = bdrv_co_get_self_request(bs);
2746         assert(req);
2747         assert(req->type == BDRV_TRACKED_WRITE);
2748         assert(req->offset <= offset);
2749         assert(req->offset + req->bytes >= offset + bytes);
2750 
2751         end = INT64_MAX & -(uint64_t)bs->bl.request_alignment;
2752         req->bytes = end - req->offset;
2753         req->overlap_bytes = req->bytes;
2754 
2755         bdrv_mark_request_serialising(req, bs->bl.request_alignment);
2756         bdrv_wait_serialising_requests(req);
2757     }
2758 #endif
2759 
2760     acb = (RawPosixAIOData) {
2761         .bs             = bs,
2762         .aio_fildes     = s->fd,
2763         .aio_type       = QEMU_AIO_WRITE_ZEROES,
2764         .aio_offset     = offset,
2765         .aio_nbytes     = bytes,
2766     };
2767 
2768     if (blkdev) {
2769         acb.aio_type |= QEMU_AIO_BLKDEV;
2770     }
2771     if (flags & BDRV_REQ_NO_FALLBACK) {
2772         acb.aio_type |= QEMU_AIO_NO_FALLBACK;
2773     }
2774 
2775     if (flags & BDRV_REQ_MAY_UNMAP) {
2776         acb.aio_type |= QEMU_AIO_DISCARD;
2777         handler = handle_aiocb_write_zeroes_unmap;
2778     } else {
2779         handler = handle_aiocb_write_zeroes;
2780     }
2781 
2782     return raw_thread_pool_submit(bs, handler, &acb);
2783 }
2784 
2785 static int coroutine_fn raw_co_pwrite_zeroes(
2786     BlockDriverState *bs, int64_t offset,
2787     int bytes, BdrvRequestFlags flags)
2788 {
2789     return raw_do_pwrite_zeroes(bs, offset, bytes, flags, false);
2790 }
2791 
2792 static int raw_get_info(BlockDriverState *bs, BlockDriverInfo *bdi)
2793 {
2794     BDRVRawState *s = bs->opaque;
2795 
2796     bdi->unallocated_blocks_are_zero = s->discard_zeroes;
2797     return 0;
2798 }
2799 
2800 static BlockStatsSpecificFile get_blockstats_specific_file(BlockDriverState *bs)
2801 {
2802     BDRVRawState *s = bs->opaque;
2803     return (BlockStatsSpecificFile) {
2804         .discard_nb_ok = s->stats.discard_nb_ok,
2805         .discard_nb_failed = s->stats.discard_nb_failed,
2806         .discard_bytes_ok = s->stats.discard_bytes_ok,
2807     };
2808 }
2809 
2810 static BlockStatsSpecific *raw_get_specific_stats(BlockDriverState *bs)
2811 {
2812     BlockStatsSpecific *stats = g_new(BlockStatsSpecific, 1);
2813 
2814     stats->driver = BLOCKDEV_DRIVER_FILE;
2815     stats->u.file = get_blockstats_specific_file(bs);
2816 
2817     return stats;
2818 }
2819 
2820 static BlockStatsSpecific *hdev_get_specific_stats(BlockDriverState *bs)
2821 {
2822     BlockStatsSpecific *stats = g_new(BlockStatsSpecific, 1);
2823 
2824     stats->driver = BLOCKDEV_DRIVER_HOST_DEVICE;
2825     stats->u.host_device = get_blockstats_specific_file(bs);
2826 
2827     return stats;
2828 }
2829 
2830 static QemuOptsList raw_create_opts = {
2831     .name = "raw-create-opts",
2832     .head = QTAILQ_HEAD_INITIALIZER(raw_create_opts.head),
2833     .desc = {
2834         {
2835             .name = BLOCK_OPT_SIZE,
2836             .type = QEMU_OPT_SIZE,
2837             .help = "Virtual disk size"
2838         },
2839         {
2840             .name = BLOCK_OPT_NOCOW,
2841             .type = QEMU_OPT_BOOL,
2842             .help = "Turn off copy-on-write (valid only on btrfs)"
2843         },
2844         {
2845             .name = BLOCK_OPT_PREALLOC,
2846             .type = QEMU_OPT_STRING,
2847             .help = "Preallocation mode (allowed values: off"
2848 #ifdef CONFIG_POSIX_FALLOCATE
2849                     ", falloc"
2850 #endif
2851                     ", full)"
2852         },
2853         { /* end of list */ }
2854     }
2855 };
2856 
2857 static int raw_check_perm(BlockDriverState *bs, uint64_t perm, uint64_t shared,
2858                           Error **errp)
2859 {
2860     BDRVRawState *s = bs->opaque;
2861     BDRVRawReopenState *rs = NULL;
2862     int open_flags;
2863     int ret;
2864 
2865     if (s->perm_change_fd) {
2866         /*
2867          * In the context of reopen, this function may be called several times
2868          * (directly and recursively while change permissions of the parent).
2869          * This is even true for children that don't inherit from the original
2870          * reopen node, so s->reopen_state is not set.
2871          *
2872          * Ignore all but the first call.
2873          */
2874         return 0;
2875     }
2876 
2877     if (s->reopen_state) {
2878         /* We already have a new file descriptor to set permissions for */
2879         assert(s->reopen_state->perm == perm);
2880         assert(s->reopen_state->shared_perm == shared);
2881         rs = s->reopen_state->opaque;
2882         s->perm_change_fd = rs->fd;
2883         s->perm_change_flags = rs->open_flags;
2884     } else {
2885         /* We may need a new fd if auto-read-only switches the mode */
2886         ret = raw_reconfigure_getfd(bs, bs->open_flags, &open_flags, perm,
2887                                     false, errp);
2888         if (ret < 0) {
2889             return ret;
2890         } else if (ret != s->fd) {
2891             s->perm_change_fd = ret;
2892             s->perm_change_flags = open_flags;
2893         }
2894     }
2895 
2896     /* Prepare permissions on old fd to avoid conflicts between old and new,
2897      * but keep everything locked that new will need. */
2898     ret = raw_handle_perm_lock(bs, RAW_PL_PREPARE, perm, shared, errp);
2899     if (ret < 0) {
2900         goto fail;
2901     }
2902 
2903     /* Copy locks to the new fd */
2904     if (s->perm_change_fd) {
2905         ret = raw_apply_lock_bytes(NULL, s->perm_change_fd, perm, ~shared,
2906                                    false, errp);
2907         if (ret < 0) {
2908             raw_handle_perm_lock(bs, RAW_PL_ABORT, 0, 0, NULL);
2909             goto fail;
2910         }
2911     }
2912     return 0;
2913 
2914 fail:
2915     if (s->perm_change_fd && !s->reopen_state) {
2916         qemu_close(s->perm_change_fd);
2917     }
2918     s->perm_change_fd = 0;
2919     return ret;
2920 }
2921 
2922 static void raw_set_perm(BlockDriverState *bs, uint64_t perm, uint64_t shared)
2923 {
2924     BDRVRawState *s = bs->opaque;
2925 
2926     /* For reopen, we have already switched to the new fd (.bdrv_set_perm is
2927      * called after .bdrv_reopen_commit) */
2928     if (s->perm_change_fd && s->fd != s->perm_change_fd) {
2929         qemu_close(s->fd);
2930         s->fd = s->perm_change_fd;
2931         s->open_flags = s->perm_change_flags;
2932     }
2933     s->perm_change_fd = 0;
2934 
2935     raw_handle_perm_lock(bs, RAW_PL_COMMIT, perm, shared, NULL);
2936     s->perm = perm;
2937     s->shared_perm = shared;
2938 }
2939 
2940 static void raw_abort_perm_update(BlockDriverState *bs)
2941 {
2942     BDRVRawState *s = bs->opaque;
2943 
2944     /* For reopen, .bdrv_reopen_abort is called afterwards and will close
2945      * the file descriptor. */
2946     if (s->perm_change_fd && !s->reopen_state) {
2947         qemu_close(s->perm_change_fd);
2948     }
2949     s->perm_change_fd = 0;
2950 
2951     raw_handle_perm_lock(bs, RAW_PL_ABORT, 0, 0, NULL);
2952 }
2953 
2954 static int coroutine_fn raw_co_copy_range_from(
2955         BlockDriverState *bs, BdrvChild *src, uint64_t src_offset,
2956         BdrvChild *dst, uint64_t dst_offset, uint64_t bytes,
2957         BdrvRequestFlags read_flags, BdrvRequestFlags write_flags)
2958 {
2959     return bdrv_co_copy_range_to(src, src_offset, dst, dst_offset, bytes,
2960                                  read_flags, write_flags);
2961 }
2962 
2963 static int coroutine_fn raw_co_copy_range_to(BlockDriverState *bs,
2964                                              BdrvChild *src,
2965                                              uint64_t src_offset,
2966                                              BdrvChild *dst,
2967                                              uint64_t dst_offset,
2968                                              uint64_t bytes,
2969                                              BdrvRequestFlags read_flags,
2970                                              BdrvRequestFlags write_flags)
2971 {
2972     RawPosixAIOData acb;
2973     BDRVRawState *s = bs->opaque;
2974     BDRVRawState *src_s;
2975 
2976     assert(dst->bs == bs);
2977     if (src->bs->drv->bdrv_co_copy_range_to != raw_co_copy_range_to) {
2978         return -ENOTSUP;
2979     }
2980 
2981     src_s = src->bs->opaque;
2982     if (fd_open(src->bs) < 0 || fd_open(dst->bs) < 0) {
2983         return -EIO;
2984     }
2985 
2986     acb = (RawPosixAIOData) {
2987         .bs             = bs,
2988         .aio_type       = QEMU_AIO_COPY_RANGE,
2989         .aio_fildes     = src_s->fd,
2990         .aio_offset     = src_offset,
2991         .aio_nbytes     = bytes,
2992         .copy_range     = {
2993             .aio_fd2        = s->fd,
2994             .aio_offset2    = dst_offset,
2995         },
2996     };
2997 
2998     return raw_thread_pool_submit(bs, handle_aiocb_copy_range, &acb);
2999 }
3000 
3001 BlockDriver bdrv_file = {
3002     .format_name = "file",
3003     .protocol_name = "file",
3004     .instance_size = sizeof(BDRVRawState),
3005     .bdrv_needs_filename = true,
3006     .bdrv_probe = NULL, /* no probe for protocols */
3007     .bdrv_parse_filename = raw_parse_filename,
3008     .bdrv_file_open = raw_open,
3009     .bdrv_reopen_prepare = raw_reopen_prepare,
3010     .bdrv_reopen_commit = raw_reopen_commit,
3011     .bdrv_reopen_abort = raw_reopen_abort,
3012     .bdrv_close = raw_close,
3013     .bdrv_co_create = raw_co_create,
3014     .bdrv_co_create_opts = raw_co_create_opts,
3015     .bdrv_has_zero_init = bdrv_has_zero_init_1,
3016     .bdrv_has_zero_init_truncate = bdrv_has_zero_init_1,
3017     .bdrv_co_block_status = raw_co_block_status,
3018     .bdrv_co_invalidate_cache = raw_co_invalidate_cache,
3019     .bdrv_co_pwrite_zeroes = raw_co_pwrite_zeroes,
3020 
3021     .bdrv_co_preadv         = raw_co_preadv,
3022     .bdrv_co_pwritev        = raw_co_pwritev,
3023     .bdrv_co_flush_to_disk  = raw_co_flush_to_disk,
3024     .bdrv_co_pdiscard       = raw_co_pdiscard,
3025     .bdrv_co_copy_range_from = raw_co_copy_range_from,
3026     .bdrv_co_copy_range_to  = raw_co_copy_range_to,
3027     .bdrv_refresh_limits = raw_refresh_limits,
3028     .bdrv_io_plug = raw_aio_plug,
3029     .bdrv_io_unplug = raw_aio_unplug,
3030     .bdrv_attach_aio_context = raw_aio_attach_aio_context,
3031 
3032     .bdrv_co_truncate = raw_co_truncate,
3033     .bdrv_getlength = raw_getlength,
3034     .bdrv_get_info = raw_get_info,
3035     .bdrv_get_allocated_file_size
3036                         = raw_get_allocated_file_size,
3037     .bdrv_get_specific_stats = raw_get_specific_stats,
3038     .bdrv_check_perm = raw_check_perm,
3039     .bdrv_set_perm   = raw_set_perm,
3040     .bdrv_abort_perm_update = raw_abort_perm_update,
3041     .create_opts = &raw_create_opts,
3042     .mutable_opts = mutable_opts,
3043 };
3044 
3045 /***********************************************/
3046 /* host device */
3047 
3048 #if defined(__APPLE__) && defined(__MACH__)
3049 static kern_return_t GetBSDPath(io_iterator_t mediaIterator, char *bsdPath,
3050                                 CFIndex maxPathSize, int flags);
3051 static char *FindEjectableOpticalMedia(io_iterator_t *mediaIterator)
3052 {
3053     kern_return_t kernResult = KERN_FAILURE;
3054     mach_port_t     masterPort;
3055     CFMutableDictionaryRef  classesToMatch;
3056     const char *matching_array[] = {kIODVDMediaClass, kIOCDMediaClass};
3057     char *mediaType = NULL;
3058 
3059     kernResult = IOMasterPort( MACH_PORT_NULL, &masterPort );
3060     if ( KERN_SUCCESS != kernResult ) {
3061         printf( "IOMasterPort returned %d\n", kernResult );
3062     }
3063 
3064     int index;
3065     for (index = 0; index < ARRAY_SIZE(matching_array); index++) {
3066         classesToMatch = IOServiceMatching(matching_array[index]);
3067         if (classesToMatch == NULL) {
3068             error_report("IOServiceMatching returned NULL for %s",
3069                          matching_array[index]);
3070             continue;
3071         }
3072         CFDictionarySetValue(classesToMatch, CFSTR(kIOMediaEjectableKey),
3073                              kCFBooleanTrue);
3074         kernResult = IOServiceGetMatchingServices(masterPort, classesToMatch,
3075                                                   mediaIterator);
3076         if (kernResult != KERN_SUCCESS) {
3077             error_report("Note: IOServiceGetMatchingServices returned %d",
3078                          kernResult);
3079             continue;
3080         }
3081 
3082         /* If a match was found, leave the loop */
3083         if (*mediaIterator != 0) {
3084             trace_file_FindEjectableOpticalMedia(matching_array[index]);
3085             mediaType = g_strdup(matching_array[index]);
3086             break;
3087         }
3088     }
3089     return mediaType;
3090 }
3091 
3092 kern_return_t GetBSDPath(io_iterator_t mediaIterator, char *bsdPath,
3093                          CFIndex maxPathSize, int flags)
3094 {
3095     io_object_t     nextMedia;
3096     kern_return_t   kernResult = KERN_FAILURE;
3097     *bsdPath = '\0';
3098     nextMedia = IOIteratorNext( mediaIterator );
3099     if ( nextMedia )
3100     {
3101         CFTypeRef   bsdPathAsCFString;
3102     bsdPathAsCFString = IORegistryEntryCreateCFProperty( nextMedia, CFSTR( kIOBSDNameKey ), kCFAllocatorDefault, 0 );
3103         if ( bsdPathAsCFString ) {
3104             size_t devPathLength;
3105             strcpy( bsdPath, _PATH_DEV );
3106             if (flags & BDRV_O_NOCACHE) {
3107                 strcat(bsdPath, "r");
3108             }
3109             devPathLength = strlen( bsdPath );
3110             if ( CFStringGetCString( bsdPathAsCFString, bsdPath + devPathLength, maxPathSize - devPathLength, kCFStringEncodingASCII ) ) {
3111                 kernResult = KERN_SUCCESS;
3112             }
3113             CFRelease( bsdPathAsCFString );
3114         }
3115         IOObjectRelease( nextMedia );
3116     }
3117 
3118     return kernResult;
3119 }
3120 
3121 /* Sets up a real cdrom for use in QEMU */
3122 static bool setup_cdrom(char *bsd_path, Error **errp)
3123 {
3124     int index, num_of_test_partitions = 2, fd;
3125     char test_partition[MAXPATHLEN];
3126     bool partition_found = false;
3127 
3128     /* look for a working partition */
3129     for (index = 0; index < num_of_test_partitions; index++) {
3130         snprintf(test_partition, sizeof(test_partition), "%ss%d", bsd_path,
3131                  index);
3132         fd = qemu_open(test_partition, O_RDONLY | O_BINARY | O_LARGEFILE);
3133         if (fd >= 0) {
3134             partition_found = true;
3135             qemu_close(fd);
3136             break;
3137         }
3138     }
3139 
3140     /* if a working partition on the device was not found */
3141     if (partition_found == false) {
3142         error_setg(errp, "Failed to find a working partition on disc");
3143     } else {
3144         trace_file_setup_cdrom(test_partition);
3145         pstrcpy(bsd_path, MAXPATHLEN, test_partition);
3146     }
3147     return partition_found;
3148 }
3149 
3150 /* Prints directions on mounting and unmounting a device */
3151 static void print_unmounting_directions(const char *file_name)
3152 {
3153     error_report("If device %s is mounted on the desktop, unmount"
3154                  " it first before using it in QEMU", file_name);
3155     error_report("Command to unmount device: diskutil unmountDisk %s",
3156                  file_name);
3157     error_report("Command to mount device: diskutil mountDisk %s", file_name);
3158 }
3159 
3160 #endif /* defined(__APPLE__) && defined(__MACH__) */
3161 
3162 static int hdev_probe_device(const char *filename)
3163 {
3164     struct stat st;
3165 
3166     /* allow a dedicated CD-ROM driver to match with a higher priority */
3167     if (strstart(filename, "/dev/cdrom", NULL))
3168         return 50;
3169 
3170     if (stat(filename, &st) >= 0 &&
3171             (S_ISCHR(st.st_mode) || S_ISBLK(st.st_mode))) {
3172         return 100;
3173     }
3174 
3175     return 0;
3176 }
3177 
3178 static int check_hdev_writable(BDRVRawState *s)
3179 {
3180 #if defined(BLKROGET)
3181     /* Linux block devices can be configured "read-only" using blockdev(8).
3182      * This is independent of device node permissions and therefore open(2)
3183      * with O_RDWR succeeds.  Actual writes fail with EPERM.
3184      *
3185      * bdrv_open() is supposed to fail if the disk is read-only.  Explicitly
3186      * check for read-only block devices so that Linux block devices behave
3187      * properly.
3188      */
3189     struct stat st;
3190     int readonly = 0;
3191 
3192     if (fstat(s->fd, &st)) {
3193         return -errno;
3194     }
3195 
3196     if (!S_ISBLK(st.st_mode)) {
3197         return 0;
3198     }
3199 
3200     if (ioctl(s->fd, BLKROGET, &readonly) < 0) {
3201         return -errno;
3202     }
3203 
3204     if (readonly) {
3205         return -EACCES;
3206     }
3207 #endif /* defined(BLKROGET) */
3208     return 0;
3209 }
3210 
3211 static void hdev_parse_filename(const char *filename, QDict *options,
3212                                 Error **errp)
3213 {
3214     bdrv_parse_filename_strip_prefix(filename, "host_device:", options);
3215 }
3216 
3217 static bool hdev_is_sg(BlockDriverState *bs)
3218 {
3219 
3220 #if defined(__linux__)
3221 
3222     BDRVRawState *s = bs->opaque;
3223     struct stat st;
3224     struct sg_scsi_id scsiid;
3225     int sg_version;
3226     int ret;
3227 
3228     if (stat(bs->filename, &st) < 0 || !S_ISCHR(st.st_mode)) {
3229         return false;
3230     }
3231 
3232     ret = ioctl(s->fd, SG_GET_VERSION_NUM, &sg_version);
3233     if (ret < 0) {
3234         return false;
3235     }
3236 
3237     ret = ioctl(s->fd, SG_GET_SCSI_ID, &scsiid);
3238     if (ret >= 0) {
3239         trace_file_hdev_is_sg(scsiid.scsi_type, sg_version);
3240         return true;
3241     }
3242 
3243 #endif
3244 
3245     return false;
3246 }
3247 
3248 static int hdev_open(BlockDriverState *bs, QDict *options, int flags,
3249                      Error **errp)
3250 {
3251     BDRVRawState *s = bs->opaque;
3252     Error *local_err = NULL;
3253     int ret;
3254 
3255 #if defined(__APPLE__) && defined(__MACH__)
3256     /*
3257      * Caution: while qdict_get_str() is fine, getting non-string types
3258      * would require more care.  When @options come from -blockdev or
3259      * blockdev_add, its members are typed according to the QAPI
3260      * schema, but when they come from -drive, they're all QString.
3261      */
3262     const char *filename = qdict_get_str(options, "filename");
3263     char bsd_path[MAXPATHLEN] = "";
3264     bool error_occurred = false;
3265 
3266     /* If using a real cdrom */
3267     if (strcmp(filename, "/dev/cdrom") == 0) {
3268         char *mediaType = NULL;
3269         kern_return_t ret_val;
3270         io_iterator_t mediaIterator = 0;
3271 
3272         mediaType = FindEjectableOpticalMedia(&mediaIterator);
3273         if (mediaType == NULL) {
3274             error_setg(errp, "Please make sure your CD/DVD is in the optical"
3275                        " drive");
3276             error_occurred = true;
3277             goto hdev_open_Mac_error;
3278         }
3279 
3280         ret_val = GetBSDPath(mediaIterator, bsd_path, sizeof(bsd_path), flags);
3281         if (ret_val != KERN_SUCCESS) {
3282             error_setg(errp, "Could not get BSD path for optical drive");
3283             error_occurred = true;
3284             goto hdev_open_Mac_error;
3285         }
3286 
3287         /* If a real optical drive was not found */
3288         if (bsd_path[0] == '\0') {
3289             error_setg(errp, "Failed to obtain bsd path for optical drive");
3290             error_occurred = true;
3291             goto hdev_open_Mac_error;
3292         }
3293 
3294         /* If using a cdrom disc and finding a partition on the disc failed */
3295         if (strncmp(mediaType, kIOCDMediaClass, 9) == 0 &&
3296             setup_cdrom(bsd_path, errp) == false) {
3297             print_unmounting_directions(bsd_path);
3298             error_occurred = true;
3299             goto hdev_open_Mac_error;
3300         }
3301 
3302         qdict_put_str(options, "filename", bsd_path);
3303 
3304 hdev_open_Mac_error:
3305         g_free(mediaType);
3306         if (mediaIterator) {
3307             IOObjectRelease(mediaIterator);
3308         }
3309         if (error_occurred) {
3310             return -ENOENT;
3311         }
3312     }
3313 #endif /* defined(__APPLE__) && defined(__MACH__) */
3314 
3315     s->type = FTYPE_FILE;
3316 
3317     ret = raw_open_common(bs, options, flags, 0, true, &local_err);
3318     if (ret < 0) {
3319         error_propagate(errp, local_err);
3320 #if defined(__APPLE__) && defined(__MACH__)
3321         if (*bsd_path) {
3322             filename = bsd_path;
3323         }
3324         /* if a physical device experienced an error while being opened */
3325         if (strncmp(filename, "/dev/", 5) == 0) {
3326             print_unmounting_directions(filename);
3327         }
3328 #endif /* defined(__APPLE__) && defined(__MACH__) */
3329         return ret;
3330     }
3331 
3332     /* Since this does ioctl the device must be already opened */
3333     bs->sg = hdev_is_sg(bs);
3334 
3335     if (flags & BDRV_O_RDWR) {
3336         ret = check_hdev_writable(s);
3337         if (ret < 0) {
3338             raw_close(bs);
3339             error_setg_errno(errp, -ret, "The device is not writable");
3340             return ret;
3341         }
3342     }
3343 
3344     return ret;
3345 }
3346 
3347 #if defined(__linux__)
3348 static int coroutine_fn
3349 hdev_co_ioctl(BlockDriverState *bs, unsigned long int req, void *buf)
3350 {
3351     BDRVRawState *s = bs->opaque;
3352     RawPosixAIOData acb;
3353     int ret;
3354 
3355     ret = fd_open(bs);
3356     if (ret < 0) {
3357         return ret;
3358     }
3359 
3360     if (req == SG_IO && s->pr_mgr) {
3361         struct sg_io_hdr *io_hdr = buf;
3362         if (io_hdr->cmdp[0] == PERSISTENT_RESERVE_OUT ||
3363             io_hdr->cmdp[0] == PERSISTENT_RESERVE_IN) {
3364             return pr_manager_execute(s->pr_mgr, bdrv_get_aio_context(bs),
3365                                       s->fd, io_hdr);
3366         }
3367     }
3368 
3369     acb = (RawPosixAIOData) {
3370         .bs         = bs,
3371         .aio_type   = QEMU_AIO_IOCTL,
3372         .aio_fildes = s->fd,
3373         .aio_offset = 0,
3374         .ioctl      = {
3375             .buf        = buf,
3376             .cmd        = req,
3377         },
3378     };
3379 
3380     return raw_thread_pool_submit(bs, handle_aiocb_ioctl, &acb);
3381 }
3382 #endif /* linux */
3383 
3384 static int fd_open(BlockDriverState *bs)
3385 {
3386     BDRVRawState *s = bs->opaque;
3387 
3388     /* this is just to ensure s->fd is sane (its called by io ops) */
3389     if (s->fd >= 0)
3390         return 0;
3391     return -EIO;
3392 }
3393 
3394 static coroutine_fn int
3395 hdev_co_pdiscard(BlockDriverState *bs, int64_t offset, int bytes)
3396 {
3397     BDRVRawState *s = bs->opaque;
3398     int ret;
3399 
3400     ret = fd_open(bs);
3401     if (ret < 0) {
3402         raw_account_discard(s, bytes, ret);
3403         return ret;
3404     }
3405     return raw_do_pdiscard(bs, offset, bytes, true);
3406 }
3407 
3408 static coroutine_fn int hdev_co_pwrite_zeroes(BlockDriverState *bs,
3409     int64_t offset, int bytes, BdrvRequestFlags flags)
3410 {
3411     int rc;
3412 
3413     rc = fd_open(bs);
3414     if (rc < 0) {
3415         return rc;
3416     }
3417 
3418     return raw_do_pwrite_zeroes(bs, offset, bytes, flags, true);
3419 }
3420 
3421 static int coroutine_fn hdev_co_create_opts(const char *filename, QemuOpts *opts,
3422                                             Error **errp)
3423 {
3424     int fd;
3425     int ret = 0;
3426     struct stat stat_buf;
3427     int64_t total_size = 0;
3428     bool has_prefix;
3429 
3430     /* This function is used by both protocol block drivers and therefore either
3431      * of these prefixes may be given.
3432      * The return value has to be stored somewhere, otherwise this is an error
3433      * due to -Werror=unused-value. */
3434     has_prefix =
3435         strstart(filename, "host_device:", &filename) ||
3436         strstart(filename, "host_cdrom:" , &filename);
3437 
3438     (void)has_prefix;
3439 
3440     ret = raw_normalize_devicepath(&filename, errp);
3441     if (ret < 0) {
3442         return ret;
3443     }
3444 
3445     /* Read out options */
3446     total_size = ROUND_UP(qemu_opt_get_size_del(opts, BLOCK_OPT_SIZE, 0),
3447                           BDRV_SECTOR_SIZE);
3448 
3449     fd = qemu_open(filename, O_WRONLY | O_BINARY);
3450     if (fd < 0) {
3451         ret = -errno;
3452         error_setg_errno(errp, -ret, "Could not open device");
3453         return ret;
3454     }
3455 
3456     if (fstat(fd, &stat_buf) < 0) {
3457         ret = -errno;
3458         error_setg_errno(errp, -ret, "Could not stat device");
3459     } else if (!S_ISBLK(stat_buf.st_mode) && !S_ISCHR(stat_buf.st_mode)) {
3460         error_setg(errp,
3461                    "The given file is neither a block nor a character device");
3462         ret = -ENODEV;
3463     } else if (lseek(fd, 0, SEEK_END) < total_size) {
3464         error_setg(errp, "Device is too small");
3465         ret = -ENOSPC;
3466     }
3467 
3468     if (!ret && total_size) {
3469         uint8_t buf[BDRV_SECTOR_SIZE] = { 0 };
3470         int64_t zero_size = MIN(BDRV_SECTOR_SIZE, total_size);
3471         if (lseek(fd, 0, SEEK_SET) == -1) {
3472             ret = -errno;
3473         } else {
3474             ret = qemu_write_full(fd, buf, zero_size);
3475             ret = ret == zero_size ? 0 : -errno;
3476         }
3477     }
3478     qemu_close(fd);
3479     return ret;
3480 }
3481 
3482 static BlockDriver bdrv_host_device = {
3483     .format_name        = "host_device",
3484     .protocol_name        = "host_device",
3485     .instance_size      = sizeof(BDRVRawState),
3486     .bdrv_needs_filename = true,
3487     .bdrv_probe_device  = hdev_probe_device,
3488     .bdrv_parse_filename = hdev_parse_filename,
3489     .bdrv_file_open     = hdev_open,
3490     .bdrv_close         = raw_close,
3491     .bdrv_reopen_prepare = raw_reopen_prepare,
3492     .bdrv_reopen_commit  = raw_reopen_commit,
3493     .bdrv_reopen_abort   = raw_reopen_abort,
3494     .bdrv_co_create_opts = hdev_co_create_opts,
3495     .create_opts         = &raw_create_opts,
3496     .mutable_opts        = mutable_opts,
3497     .bdrv_co_invalidate_cache = raw_co_invalidate_cache,
3498     .bdrv_co_pwrite_zeroes = hdev_co_pwrite_zeroes,
3499 
3500     .bdrv_co_preadv         = raw_co_preadv,
3501     .bdrv_co_pwritev        = raw_co_pwritev,
3502     .bdrv_co_flush_to_disk  = raw_co_flush_to_disk,
3503     .bdrv_co_pdiscard       = hdev_co_pdiscard,
3504     .bdrv_co_copy_range_from = raw_co_copy_range_from,
3505     .bdrv_co_copy_range_to  = raw_co_copy_range_to,
3506     .bdrv_refresh_limits = raw_refresh_limits,
3507     .bdrv_io_plug = raw_aio_plug,
3508     .bdrv_io_unplug = raw_aio_unplug,
3509     .bdrv_attach_aio_context = raw_aio_attach_aio_context,
3510 
3511     .bdrv_co_truncate       = raw_co_truncate,
3512     .bdrv_getlength	= raw_getlength,
3513     .bdrv_get_info = raw_get_info,
3514     .bdrv_get_allocated_file_size
3515                         = raw_get_allocated_file_size,
3516     .bdrv_get_specific_stats = hdev_get_specific_stats,
3517     .bdrv_check_perm = raw_check_perm,
3518     .bdrv_set_perm   = raw_set_perm,
3519     .bdrv_abort_perm_update = raw_abort_perm_update,
3520     .bdrv_probe_blocksizes = hdev_probe_blocksizes,
3521     .bdrv_probe_geometry = hdev_probe_geometry,
3522 
3523     /* generic scsi device */
3524 #ifdef __linux__
3525     .bdrv_co_ioctl          = hdev_co_ioctl,
3526 #endif
3527 };
3528 
3529 #if defined(__linux__) || defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
3530 static void cdrom_parse_filename(const char *filename, QDict *options,
3531                                  Error **errp)
3532 {
3533     bdrv_parse_filename_strip_prefix(filename, "host_cdrom:", options);
3534 }
3535 #endif
3536 
3537 #ifdef __linux__
3538 static int cdrom_open(BlockDriverState *bs, QDict *options, int flags,
3539                       Error **errp)
3540 {
3541     BDRVRawState *s = bs->opaque;
3542 
3543     s->type = FTYPE_CD;
3544 
3545     /* open will not fail even if no CD is inserted, so add O_NONBLOCK */
3546     return raw_open_common(bs, options, flags, O_NONBLOCK, true, errp);
3547 }
3548 
3549 static int cdrom_probe_device(const char *filename)
3550 {
3551     int fd, ret;
3552     int prio = 0;
3553     struct stat st;
3554 
3555     fd = qemu_open(filename, O_RDONLY | O_NONBLOCK);
3556     if (fd < 0) {
3557         goto out;
3558     }
3559     ret = fstat(fd, &st);
3560     if (ret == -1 || !S_ISBLK(st.st_mode)) {
3561         goto outc;
3562     }
3563 
3564     /* Attempt to detect via a CDROM specific ioctl */
3565     ret = ioctl(fd, CDROM_DRIVE_STATUS, CDSL_CURRENT);
3566     if (ret >= 0)
3567         prio = 100;
3568 
3569 outc:
3570     qemu_close(fd);
3571 out:
3572     return prio;
3573 }
3574 
3575 static bool cdrom_is_inserted(BlockDriverState *bs)
3576 {
3577     BDRVRawState *s = bs->opaque;
3578     int ret;
3579 
3580     ret = ioctl(s->fd, CDROM_DRIVE_STATUS, CDSL_CURRENT);
3581     return ret == CDS_DISC_OK;
3582 }
3583 
3584 static void cdrom_eject(BlockDriverState *bs, bool eject_flag)
3585 {
3586     BDRVRawState *s = bs->opaque;
3587 
3588     if (eject_flag) {
3589         if (ioctl(s->fd, CDROMEJECT, NULL) < 0)
3590             perror("CDROMEJECT");
3591     } else {
3592         if (ioctl(s->fd, CDROMCLOSETRAY, NULL) < 0)
3593             perror("CDROMEJECT");
3594     }
3595 }
3596 
3597 static void cdrom_lock_medium(BlockDriverState *bs, bool locked)
3598 {
3599     BDRVRawState *s = bs->opaque;
3600 
3601     if (ioctl(s->fd, CDROM_LOCKDOOR, locked) < 0) {
3602         /*
3603          * Note: an error can happen if the distribution automatically
3604          * mounts the CD-ROM
3605          */
3606         /* perror("CDROM_LOCKDOOR"); */
3607     }
3608 }
3609 
3610 static BlockDriver bdrv_host_cdrom = {
3611     .format_name        = "host_cdrom",
3612     .protocol_name      = "host_cdrom",
3613     .instance_size      = sizeof(BDRVRawState),
3614     .bdrv_needs_filename = true,
3615     .bdrv_probe_device	= cdrom_probe_device,
3616     .bdrv_parse_filename = cdrom_parse_filename,
3617     .bdrv_file_open     = cdrom_open,
3618     .bdrv_close         = raw_close,
3619     .bdrv_reopen_prepare = raw_reopen_prepare,
3620     .bdrv_reopen_commit  = raw_reopen_commit,
3621     .bdrv_reopen_abort   = raw_reopen_abort,
3622     .bdrv_co_create_opts = hdev_co_create_opts,
3623     .create_opts         = &raw_create_opts,
3624     .mutable_opts        = mutable_opts,
3625     .bdrv_co_invalidate_cache = raw_co_invalidate_cache,
3626 
3627 
3628     .bdrv_co_preadv         = raw_co_preadv,
3629     .bdrv_co_pwritev        = raw_co_pwritev,
3630     .bdrv_co_flush_to_disk  = raw_co_flush_to_disk,
3631     .bdrv_refresh_limits = raw_refresh_limits,
3632     .bdrv_io_plug = raw_aio_plug,
3633     .bdrv_io_unplug = raw_aio_unplug,
3634     .bdrv_attach_aio_context = raw_aio_attach_aio_context,
3635 
3636     .bdrv_co_truncate    = raw_co_truncate,
3637     .bdrv_getlength      = raw_getlength,
3638     .has_variable_length = true,
3639     .bdrv_get_allocated_file_size
3640                         = raw_get_allocated_file_size,
3641 
3642     /* removable device support */
3643     .bdrv_is_inserted   = cdrom_is_inserted,
3644     .bdrv_eject         = cdrom_eject,
3645     .bdrv_lock_medium   = cdrom_lock_medium,
3646 
3647     /* generic scsi device */
3648     .bdrv_co_ioctl      = hdev_co_ioctl,
3649 };
3650 #endif /* __linux__ */
3651 
3652 #if defined (__FreeBSD__) || defined(__FreeBSD_kernel__)
3653 static int cdrom_open(BlockDriverState *bs, QDict *options, int flags,
3654                       Error **errp)
3655 {
3656     BDRVRawState *s = bs->opaque;
3657     Error *local_err = NULL;
3658     int ret;
3659 
3660     s->type = FTYPE_CD;
3661 
3662     ret = raw_open_common(bs, options, flags, 0, true, &local_err);
3663     if (ret) {
3664         error_propagate(errp, local_err);
3665         return ret;
3666     }
3667 
3668     /* make sure the door isn't locked at this time */
3669     ioctl(s->fd, CDIOCALLOW);
3670     return 0;
3671 }
3672 
3673 static int cdrom_probe_device(const char *filename)
3674 {
3675     if (strstart(filename, "/dev/cd", NULL) ||
3676             strstart(filename, "/dev/acd", NULL))
3677         return 100;
3678     return 0;
3679 }
3680 
3681 static int cdrom_reopen(BlockDriverState *bs)
3682 {
3683     BDRVRawState *s = bs->opaque;
3684     int fd;
3685 
3686     /*
3687      * Force reread of possibly changed/newly loaded disc,
3688      * FreeBSD seems to not notice sometimes...
3689      */
3690     if (s->fd >= 0)
3691         qemu_close(s->fd);
3692     fd = qemu_open(bs->filename, s->open_flags, 0644);
3693     if (fd < 0) {
3694         s->fd = -1;
3695         return -EIO;
3696     }
3697     s->fd = fd;
3698 
3699     /* make sure the door isn't locked at this time */
3700     ioctl(s->fd, CDIOCALLOW);
3701     return 0;
3702 }
3703 
3704 static bool cdrom_is_inserted(BlockDriverState *bs)
3705 {
3706     return raw_getlength(bs) > 0;
3707 }
3708 
3709 static void cdrom_eject(BlockDriverState *bs, bool eject_flag)
3710 {
3711     BDRVRawState *s = bs->opaque;
3712 
3713     if (s->fd < 0)
3714         return;
3715 
3716     (void) ioctl(s->fd, CDIOCALLOW);
3717 
3718     if (eject_flag) {
3719         if (ioctl(s->fd, CDIOCEJECT) < 0)
3720             perror("CDIOCEJECT");
3721     } else {
3722         if (ioctl(s->fd, CDIOCCLOSE) < 0)
3723             perror("CDIOCCLOSE");
3724     }
3725 
3726     cdrom_reopen(bs);
3727 }
3728 
3729 static void cdrom_lock_medium(BlockDriverState *bs, bool locked)
3730 {
3731     BDRVRawState *s = bs->opaque;
3732 
3733     if (s->fd < 0)
3734         return;
3735     if (ioctl(s->fd, (locked ? CDIOCPREVENT : CDIOCALLOW)) < 0) {
3736         /*
3737          * Note: an error can happen if the distribution automatically
3738          * mounts the CD-ROM
3739          */
3740         /* perror("CDROM_LOCKDOOR"); */
3741     }
3742 }
3743 
3744 static BlockDriver bdrv_host_cdrom = {
3745     .format_name        = "host_cdrom",
3746     .protocol_name      = "host_cdrom",
3747     .instance_size      = sizeof(BDRVRawState),
3748     .bdrv_needs_filename = true,
3749     .bdrv_probe_device	= cdrom_probe_device,
3750     .bdrv_parse_filename = cdrom_parse_filename,
3751     .bdrv_file_open     = cdrom_open,
3752     .bdrv_close         = raw_close,
3753     .bdrv_reopen_prepare = raw_reopen_prepare,
3754     .bdrv_reopen_commit  = raw_reopen_commit,
3755     .bdrv_reopen_abort   = raw_reopen_abort,
3756     .bdrv_co_create_opts = hdev_co_create_opts,
3757     .create_opts        = &raw_create_opts,
3758     .mutable_opts       = mutable_opts,
3759 
3760     .bdrv_co_preadv         = raw_co_preadv,
3761     .bdrv_co_pwritev        = raw_co_pwritev,
3762     .bdrv_co_flush_to_disk  = raw_co_flush_to_disk,
3763     .bdrv_refresh_limits = raw_refresh_limits,
3764     .bdrv_io_plug = raw_aio_plug,
3765     .bdrv_io_unplug = raw_aio_unplug,
3766     .bdrv_attach_aio_context = raw_aio_attach_aio_context,
3767 
3768     .bdrv_co_truncate    = raw_co_truncate,
3769     .bdrv_getlength      = raw_getlength,
3770     .has_variable_length = true,
3771     .bdrv_get_allocated_file_size
3772                         = raw_get_allocated_file_size,
3773 
3774     /* removable device support */
3775     .bdrv_is_inserted   = cdrom_is_inserted,
3776     .bdrv_eject         = cdrom_eject,
3777     .bdrv_lock_medium   = cdrom_lock_medium,
3778 };
3779 #endif /* __FreeBSD__ */
3780 
3781 static void bdrv_file_init(void)
3782 {
3783     /*
3784      * Register all the drivers.  Note that order is important, the driver
3785      * registered last will get probed first.
3786      */
3787     bdrv_register(&bdrv_file);
3788     bdrv_register(&bdrv_host_device);
3789 #ifdef __linux__
3790     bdrv_register(&bdrv_host_cdrom);
3791 #endif
3792 #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
3793     bdrv_register(&bdrv_host_cdrom);
3794 #endif
3795 }
3796 
3797 block_init(bdrv_file_init);
3798