1c1bb86cdSEric Blake /*
2c1bb86cdSEric Blake * Block driver for RAW files (posix)
3c1bb86cdSEric Blake *
4c1bb86cdSEric Blake * Copyright (c) 2006 Fabrice Bellard
5c1bb86cdSEric Blake *
6c1bb86cdSEric Blake * Permission is hereby granted, free of charge, to any person obtaining a copy
7c1bb86cdSEric Blake * of this software and associated documentation files (the "Software"), to deal
8c1bb86cdSEric Blake * in the Software without restriction, including without limitation the rights
9c1bb86cdSEric Blake * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10c1bb86cdSEric Blake * copies of the Software, and to permit persons to whom the Software is
11c1bb86cdSEric Blake * furnished to do so, subject to the following conditions:
12c1bb86cdSEric Blake *
13c1bb86cdSEric Blake * The above copyright notice and this permission notice shall be included in
14c1bb86cdSEric Blake * all copies or substantial portions of the Software.
15c1bb86cdSEric Blake *
16c1bb86cdSEric Blake * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17c1bb86cdSEric Blake * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18c1bb86cdSEric Blake * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19c1bb86cdSEric Blake * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20c1bb86cdSEric Blake * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21c1bb86cdSEric Blake * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22c1bb86cdSEric Blake * THE SOFTWARE.
23c1bb86cdSEric Blake */
24922a01a0SMarkus Armbruster
25c1bb86cdSEric Blake #include "qemu/osdep.h"
26c1bb86cdSEric Blake #include "qapi/error.h"
27c1bb86cdSEric Blake #include "qemu/cutils.h"
28c1bb86cdSEric Blake #include "qemu/error-report.h"
29e2c1c34fSMarkus Armbruster #include "block/block-io.h"
30c1bb86cdSEric Blake #include "block/block_int.h"
31c1bb86cdSEric Blake #include "qemu/module.h"
32922a01a0SMarkus Armbruster #include "qemu/option.h"
33ffa244c8SKevin Wolf #include "qemu/units.h"
345df022cfSPeter Maydell #include "qemu/memalign.h"
35c1bb86cdSEric Blake #include "trace.h"
36c1bb86cdSEric Blake #include "block/thread-pool.h"
37c1bb86cdSEric Blake #include "qemu/iov.h"
38c1bb86cdSEric Blake #include "block/raw-aio.h"
39452fcdbcSMarkus Armbruster #include "qapi/qmp/qdict.h"
40c1bb86cdSEric Blake #include "qapi/qmp/qstring.h"
41c1bb86cdSEric Blake
427c9e5276SPaolo Bonzini #include "scsi/pr-manager.h"
437c9e5276SPaolo Bonzini #include "scsi/constants.h"
447c9e5276SPaolo Bonzini
45c1bb86cdSEric Blake #if defined(__APPLE__) && (__MACH__)
4614176c8dSJoelle van Dyne #include <sys/ioctl.h>
4714176c8dSJoelle van Dyne #if defined(HAVE_HOST_BLOCK_DEVICE)
48c1bb86cdSEric Blake #include <paths.h>
49c1bb86cdSEric Blake #include <sys/param.h>
500dfc7af2SAkihiko Odaki #include <sys/mount.h>
51c1bb86cdSEric Blake #include <IOKit/IOKitLib.h>
52c1bb86cdSEric Blake #include <IOKit/IOBSD.h>
53c1bb86cdSEric Blake #include <IOKit/storage/IOMediaBSDClient.h>
54c1bb86cdSEric Blake #include <IOKit/storage/IOMedia.h>
55c1bb86cdSEric Blake #include <IOKit/storage/IOCDMedia.h>
56c1bb86cdSEric Blake //#include <IOKit/storage/IOCDTypes.h>
57c1bb86cdSEric Blake #include <IOKit/storage/IODVDMedia.h>
58c1bb86cdSEric Blake #include <CoreFoundation/CoreFoundation.h>
5914176c8dSJoelle van Dyne #endif /* defined(HAVE_HOST_BLOCK_DEVICE) */
60c1bb86cdSEric Blake #endif
61c1bb86cdSEric Blake
62c1bb86cdSEric Blake #ifdef __sun__
63c1bb86cdSEric Blake #define _POSIX_PTHREAD_SEMANTICS 1
64c1bb86cdSEric Blake #include <sys/dkio.h>
65c1bb86cdSEric Blake #endif
66c1bb86cdSEric Blake #ifdef __linux__
67c1bb86cdSEric Blake #include <sys/ioctl.h>
68c1bb86cdSEric Blake #include <sys/param.h>
691efad060SFam Zheng #include <sys/syscall.h>
705edc8557SKevin Wolf #include <sys/vfs.h>
716d43eaa3SSam Li #if defined(CONFIG_BLKZONED)
726d43eaa3SSam Li #include <linux/blkzoned.h>
736d43eaa3SSam Li #endif
74c1bb86cdSEric Blake #include <linux/cdrom.h>
75c1bb86cdSEric Blake #include <linux/fd.h>
76c1bb86cdSEric Blake #include <linux/fs.h>
77c1bb86cdSEric Blake #include <linux/hdreg.h>
785edc8557SKevin Wolf #include <linux/magic.h>
79c1bb86cdSEric Blake #include <scsi/sg.h>
80c1bb86cdSEric Blake #ifdef __s390__
81c1bb86cdSEric Blake #include <asm/dasd.h>
82c1bb86cdSEric Blake #endif
83c1bb86cdSEric Blake #ifndef FS_NOCOW_FL
84c1bb86cdSEric Blake #define FS_NOCOW_FL 0x00800000 /* Do not cow file */
85c1bb86cdSEric Blake #endif
86c1bb86cdSEric Blake #endif
87c1bb86cdSEric Blake #if defined(CONFIG_FALLOCATE_PUNCH_HOLE) || defined(CONFIG_FALLOCATE_ZERO_RANGE)
88c1bb86cdSEric Blake #include <linux/falloc.h>
89c1bb86cdSEric Blake #endif
90c1bb86cdSEric Blake #if defined (__FreeBSD__) || defined(__FreeBSD_kernel__)
91c1bb86cdSEric Blake #include <sys/disk.h>
92c1bb86cdSEric Blake #include <sys/cdio.h>
93c1bb86cdSEric Blake #endif
94c1bb86cdSEric Blake
95c1bb86cdSEric Blake #ifdef __OpenBSD__
96c1bb86cdSEric Blake #include <sys/ioctl.h>
97c1bb86cdSEric Blake #include <sys/disklabel.h>
98c1bb86cdSEric Blake #include <sys/dkio.h>
99c1bb86cdSEric Blake #endif
100c1bb86cdSEric Blake
101c1bb86cdSEric Blake #ifdef __NetBSD__
102c1bb86cdSEric Blake #include <sys/ioctl.h>
103c1bb86cdSEric Blake #include <sys/disklabel.h>
104c1bb86cdSEric Blake #include <sys/dkio.h>
105c1bb86cdSEric Blake #include <sys/disk.h>
106c1bb86cdSEric Blake #endif
107c1bb86cdSEric Blake
108c1bb86cdSEric Blake #ifdef __DragonFly__
109c1bb86cdSEric Blake #include <sys/ioctl.h>
110c1bb86cdSEric Blake #include <sys/diskslice.h>
111c1bb86cdSEric Blake #endif
112c1bb86cdSEric Blake
113c1bb86cdSEric Blake /* OS X does not have O_DSYNC */
114c1bb86cdSEric Blake #ifndef O_DSYNC
115c1bb86cdSEric Blake #ifdef O_SYNC
116c1bb86cdSEric Blake #define O_DSYNC O_SYNC
117c1bb86cdSEric Blake #elif defined(O_FSYNC)
118c1bb86cdSEric Blake #define O_DSYNC O_FSYNC
119c1bb86cdSEric Blake #endif
120c1bb86cdSEric Blake #endif
121c1bb86cdSEric Blake
122c1bb86cdSEric Blake /* Approximate O_DIRECT with O_DSYNC if O_DIRECT isn't available */
123c1bb86cdSEric Blake #ifndef O_DIRECT
124c1bb86cdSEric Blake #define O_DIRECT O_DSYNC
125c1bb86cdSEric Blake #endif
126c1bb86cdSEric Blake
127c1bb86cdSEric Blake #define FTYPE_FILE 0
128c1bb86cdSEric Blake #define FTYPE_CD 1
129c1bb86cdSEric Blake
130c1bb86cdSEric Blake #define MAX_BLOCKSIZE 4096
131c1bb86cdSEric Blake
132244a5668SFam Zheng /* Posix file locking bytes. Libvirt takes byte 0, we start from higher bytes,
133244a5668SFam Zheng * leaving a few more bytes for its future use. */
134244a5668SFam Zheng #define RAW_LOCK_PERM_BASE 100
135244a5668SFam Zheng #define RAW_LOCK_SHARED_BASE 200
136244a5668SFam Zheng
137c1bb86cdSEric Blake typedef struct BDRVRawState {
138c1bb86cdSEric Blake int fd;
139244a5668SFam Zheng bool use_lock;
140c1bb86cdSEric Blake int type;
141c1bb86cdSEric Blake int open_flags;
142c1bb86cdSEric Blake size_t buf_align;
143c1bb86cdSEric Blake
144244a5668SFam Zheng /* The current permissions. */
145244a5668SFam Zheng uint64_t perm;
146244a5668SFam Zheng uint64_t shared_perm;
147244a5668SFam Zheng
1482996ffadSFam Zheng /* The perms bits whose corresponding bytes are already locked in
149f2e3af29SFam Zheng * s->fd. */
1502996ffadSFam Zheng uint64_t locked_perm;
1512996ffadSFam Zheng uint64_t locked_shared_perm;
1522996ffadSFam Zheng
153684960d4SStefano Garzarella uint64_t aio_max_batch;
154684960d4SStefano Garzarella
1556ceabe6fSKevin Wolf int perm_change_fd;
156094e3639SMax Reitz int perm_change_flags;
157e0c9cf3aSKevin Wolf BDRVReopenState *reopen_state;
158e0c9cf3aSKevin Wolf
159c1bb86cdSEric Blake bool has_discard:1;
160c1bb86cdSEric Blake bool has_write_zeroes:1;
161c1bb86cdSEric Blake bool use_linux_aio:1;
16224687abfSPrasad Pandit bool has_laio_fdsync:1;
163c6447510SAarushi Mehta bool use_linux_io_uring:1;
164c7ddc882SDaniel P. Berrangé int page_cache_inconsistent; /* errno from fdatasync failure */
165c1bb86cdSEric Blake bool has_fallocate;
166c1bb86cdSEric Blake bool needs_alignment;
1675dbd0ce1SKevin Wolf bool force_alignment;
168f357fcd8SStefan Hajnoczi bool drop_cache;
16931be8a2aSStefan Hajnoczi bool check_cache_dropped;
1701c450366SAnton Nefedov struct {
1711c450366SAnton Nefedov uint64_t discard_nb_ok;
1721c450366SAnton Nefedov uint64_t discard_nb_failed;
1731c450366SAnton Nefedov uint64_t discard_bytes_ok;
1741c450366SAnton Nefedov } stats;
1757c9e5276SPaolo Bonzini
1767c9e5276SPaolo Bonzini PRManager *pr_mgr;
177c1bb86cdSEric Blake } BDRVRawState;
178c1bb86cdSEric Blake
179c1bb86cdSEric Blake typedef struct BDRVRawReopenState {
180c1bb86cdSEric Blake int open_flags;
181f357fcd8SStefan Hajnoczi bool drop_cache;
18231be8a2aSStefan Hajnoczi bool check_cache_dropped;
183c1bb86cdSEric Blake } BDRVRawReopenState;
184c1bb86cdSEric Blake
fd_open(BlockDriverState * bs)18514176c8dSJoelle van Dyne static int fd_open(BlockDriverState *bs)
18614176c8dSJoelle van Dyne {
18714176c8dSJoelle van Dyne BDRVRawState *s = bs->opaque;
18814176c8dSJoelle van Dyne
18914176c8dSJoelle van Dyne /* this is just to ensure s->fd is sane (its called by io ops) */
19014176c8dSJoelle van Dyne if (s->fd >= 0) {
19114176c8dSJoelle van Dyne return 0;
19214176c8dSJoelle van Dyne }
19314176c8dSJoelle van Dyne return -EIO;
19414176c8dSJoelle van Dyne }
19514176c8dSJoelle van Dyne
19636c6c877SPaolo Bonzini static int64_t raw_getlength(BlockDriverState *bs);
197c1bb86cdSEric Blake
198c1bb86cdSEric Blake typedef struct RawPosixAIOData {
199c1bb86cdSEric Blake BlockDriverState *bs;
200c1bb86cdSEric Blake int aio_type;
201d57c44d0SKevin Wolf int aio_fildes;
202d57c44d0SKevin Wolf
203d57c44d0SKevin Wolf off_t aio_offset;
204d57c44d0SKevin Wolf uint64_t aio_nbytes;
205d57c44d0SKevin Wolf
20693f4e2ffSKevin Wolf union {
20793f4e2ffSKevin Wolf struct {
208d57c44d0SKevin Wolf struct iovec *iov;
209d57c44d0SKevin Wolf int niov;
210d57c44d0SKevin Wolf } io;
211d57c44d0SKevin Wolf struct {
212d57c44d0SKevin Wolf uint64_t cmd;
213d57c44d0SKevin Wolf void *buf;
214d57c44d0SKevin Wolf } ioctl;
215d57c44d0SKevin Wolf struct {
2161efad060SFam Zheng int aio_fd2;
2171efad060SFam Zheng off_t aio_offset2;
218d57c44d0SKevin Wolf } copy_range;
21993f4e2ffSKevin Wolf struct {
22093f4e2ffSKevin Wolf PreallocMode prealloc;
22193f4e2ffSKevin Wolf Error **errp;
222d57c44d0SKevin Wolf } truncate;
2236d43eaa3SSam Li struct {
2246d43eaa3SSam Li unsigned int *nr_zones;
2256d43eaa3SSam Li BlockZoneDescriptor *zones;
2266d43eaa3SSam Li } zone_report;
2276d43eaa3SSam Li struct {
2286d43eaa3SSam Li unsigned long op;
2296d43eaa3SSam Li } zone_mgmt;
23093f4e2ffSKevin Wolf };
231c1bb86cdSEric Blake } RawPosixAIOData;
232c1bb86cdSEric Blake
233c1bb86cdSEric Blake #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
234c1bb86cdSEric Blake static int cdrom_reopen(BlockDriverState *bs);
235c1bb86cdSEric Blake #endif
236c1bb86cdSEric Blake
237797e3e38SDavid Edmondson /*
238797e3e38SDavid Edmondson * Elide EAGAIN and EACCES details when failing to lock, as this
239797e3e38SDavid Edmondson * indicates that the specified file region is already locked by
240797e3e38SDavid Edmondson * another process, which is considered a common scenario.
241797e3e38SDavid Edmondson */
242797e3e38SDavid Edmondson #define raw_lock_error_setg_errno(errp, err, fmt, ...) \
243797e3e38SDavid Edmondson do { \
244797e3e38SDavid Edmondson if ((err) == EAGAIN || (err) == EACCES) { \
245797e3e38SDavid Edmondson error_setg((errp), (fmt), ## __VA_ARGS__); \
246797e3e38SDavid Edmondson } else { \
247797e3e38SDavid Edmondson error_setg_errno((errp), (err), (fmt), ## __VA_ARGS__); \
248797e3e38SDavid Edmondson } \
249797e3e38SDavid Edmondson } while (0)
250797e3e38SDavid Edmondson
251c1bb86cdSEric Blake #if defined(__NetBSD__)
raw_normalize_devicepath(const char ** filename,Error ** errp)252db0754dfSFam Zheng static int raw_normalize_devicepath(const char **filename, Error **errp)
253c1bb86cdSEric Blake {
254c1bb86cdSEric Blake static char namebuf[PATH_MAX];
255c1bb86cdSEric Blake const char *dp, *fname;
256c1bb86cdSEric Blake struct stat sb;
257c1bb86cdSEric Blake
258c1bb86cdSEric Blake fname = *filename;
259c1bb86cdSEric Blake dp = strrchr(fname, '/');
260c1bb86cdSEric Blake if (lstat(fname, &sb) < 0) {
261f6fc1e30SPaolo Bonzini error_setg_file_open(errp, errno, fname);
262c1bb86cdSEric Blake return -errno;
263c1bb86cdSEric Blake }
264c1bb86cdSEric Blake
265c1bb86cdSEric Blake if (!S_ISBLK(sb.st_mode)) {
266c1bb86cdSEric Blake return 0;
267c1bb86cdSEric Blake }
268c1bb86cdSEric Blake
269c1bb86cdSEric Blake if (dp == NULL) {
270c1bb86cdSEric Blake snprintf(namebuf, PATH_MAX, "r%s", fname);
271c1bb86cdSEric Blake } else {
272c1bb86cdSEric Blake snprintf(namebuf, PATH_MAX, "%.*s/r%s",
273c1bb86cdSEric Blake (int)(dp - fname), fname, dp + 1);
274c1bb86cdSEric Blake }
275c1bb86cdSEric Blake *filename = namebuf;
276db0754dfSFam Zheng warn_report("%s is a block device, using %s", fname, *filename);
277c1bb86cdSEric Blake
278c1bb86cdSEric Blake return 0;
279c1bb86cdSEric Blake }
280c1bb86cdSEric Blake #else
raw_normalize_devicepath(const char ** filename,Error ** errp)281db0754dfSFam Zheng static int raw_normalize_devicepath(const char **filename, Error **errp)
282c1bb86cdSEric Blake {
283c1bb86cdSEric Blake return 0;
284c1bb86cdSEric Blake }
285c1bb86cdSEric Blake #endif
286c1bb86cdSEric Blake
287c1bb86cdSEric Blake /*
288c1bb86cdSEric Blake * Get logical block size via ioctl. On success store it in @sector_size_p.
289c1bb86cdSEric Blake */
probe_logical_blocksize(int fd,unsigned int * sector_size_p)290c1bb86cdSEric Blake static int probe_logical_blocksize(int fd, unsigned int *sector_size_p)
291c1bb86cdSEric Blake {
292c1bb86cdSEric Blake unsigned int sector_size;
293c1bb86cdSEric Blake bool success = false;
294700f9ce0SPeter Maydell int i;
295c1bb86cdSEric Blake
296c1bb86cdSEric Blake errno = ENOTSUP;
297700f9ce0SPeter Maydell static const unsigned long ioctl_list[] = {
298c1bb86cdSEric Blake #ifdef BLKSSZGET
299700f9ce0SPeter Maydell BLKSSZGET,
300c1bb86cdSEric Blake #endif
301c1bb86cdSEric Blake #ifdef DKIOCGETBLOCKSIZE
302700f9ce0SPeter Maydell DKIOCGETBLOCKSIZE,
303c1bb86cdSEric Blake #endif
304c1bb86cdSEric Blake #ifdef DIOCGSECTORSIZE
305700f9ce0SPeter Maydell DIOCGSECTORSIZE,
306700f9ce0SPeter Maydell #endif
307700f9ce0SPeter Maydell };
308700f9ce0SPeter Maydell
309700f9ce0SPeter Maydell /* Try a few ioctls to get the right size */
310700f9ce0SPeter Maydell for (i = 0; i < (int)ARRAY_SIZE(ioctl_list); i++) {
311700f9ce0SPeter Maydell if (ioctl(fd, ioctl_list[i], §or_size) >= 0) {
312c1bb86cdSEric Blake *sector_size_p = sector_size;
313c1bb86cdSEric Blake success = true;
314c1bb86cdSEric Blake }
315700f9ce0SPeter Maydell }
316c1bb86cdSEric Blake
317c1bb86cdSEric Blake return success ? 0 : -errno;
318c1bb86cdSEric Blake }
319c1bb86cdSEric Blake
320c1bb86cdSEric Blake /**
321c1bb86cdSEric Blake * Get physical block size of @fd.
322c1bb86cdSEric Blake * On success, store it in @blk_size and return 0.
323c1bb86cdSEric Blake * On failure, return -errno.
324c1bb86cdSEric Blake */
probe_physical_blocksize(int fd,unsigned int * blk_size)325c1bb86cdSEric Blake static int probe_physical_blocksize(int fd, unsigned int *blk_size)
326c1bb86cdSEric Blake {
327c1bb86cdSEric Blake #ifdef BLKPBSZGET
328c1bb86cdSEric Blake if (ioctl(fd, BLKPBSZGET, blk_size) < 0) {
329c1bb86cdSEric Blake return -errno;
330c1bb86cdSEric Blake }
331c1bb86cdSEric Blake return 0;
332c1bb86cdSEric Blake #else
333c1bb86cdSEric Blake return -ENOTSUP;
334c1bb86cdSEric Blake #endif
335c1bb86cdSEric Blake }
336c1bb86cdSEric Blake
3375edc8557SKevin Wolf /*
3385edc8557SKevin Wolf * Returns true if no alignment restrictions are necessary even for files
3395edc8557SKevin Wolf * opened with O_DIRECT.
3405edc8557SKevin Wolf *
3415edc8557SKevin Wolf * raw_probe_alignment() probes the required alignment and assume that 1 means
3425edc8557SKevin Wolf * the probing failed, so it falls back to a safe default of 4k. This can be
3435edc8557SKevin Wolf * avoided if we know that byte alignment is okay for the file.
3445edc8557SKevin Wolf */
dio_byte_aligned(int fd)3455edc8557SKevin Wolf static bool dio_byte_aligned(int fd)
3465edc8557SKevin Wolf {
3475edc8557SKevin Wolf #ifdef __linux__
3485edc8557SKevin Wolf struct statfs buf;
3495edc8557SKevin Wolf int ret;
3505edc8557SKevin Wolf
3515edc8557SKevin Wolf ret = fstatfs(fd, &buf);
3525edc8557SKevin Wolf if (ret == 0 && buf.f_type == NFS_SUPER_MAGIC) {
3535edc8557SKevin Wolf return true;
3545edc8557SKevin Wolf }
3555edc8557SKevin Wolf #endif
3565edc8557SKevin Wolf return false;
3575edc8557SKevin Wolf }
3585edc8557SKevin Wolf
raw_needs_alignment(BlockDriverState * bs)3595dbd0ce1SKevin Wolf static bool raw_needs_alignment(BlockDriverState *bs)
3605dbd0ce1SKevin Wolf {
3615dbd0ce1SKevin Wolf BDRVRawState *s = bs->opaque;
3625dbd0ce1SKevin Wolf
3635dbd0ce1SKevin Wolf if ((bs->open_flags & BDRV_O_NOCACHE) != 0 && !dio_byte_aligned(s->fd)) {
3645dbd0ce1SKevin Wolf return true;
3655dbd0ce1SKevin Wolf }
3665dbd0ce1SKevin Wolf
3675dbd0ce1SKevin Wolf return s->force_alignment;
3685dbd0ce1SKevin Wolf }
3695dbd0ce1SKevin Wolf
370c1bb86cdSEric Blake /* Check if read is allowed with given memory buffer and length.
371c1bb86cdSEric Blake *
372c1bb86cdSEric Blake * This function is used to check O_DIRECT memory buffer and request alignment.
373c1bb86cdSEric Blake */
raw_is_io_aligned(int fd,void * buf,size_t len)374c1bb86cdSEric Blake static bool raw_is_io_aligned(int fd, void *buf, size_t len)
375c1bb86cdSEric Blake {
376c1bb86cdSEric Blake ssize_t ret = pread(fd, buf, len, 0);
377c1bb86cdSEric Blake
378c1bb86cdSEric Blake if (ret >= 0) {
379c1bb86cdSEric Blake return true;
380c1bb86cdSEric Blake }
381c1bb86cdSEric Blake
382c1bb86cdSEric Blake #ifdef __linux__
383c1bb86cdSEric Blake /* The Linux kernel returns EINVAL for misaligned O_DIRECT reads. Ignore
384c1bb86cdSEric Blake * other errors (e.g. real I/O error), which could happen on a failed
385c1bb86cdSEric Blake * drive, since we only care about probing alignment.
386c1bb86cdSEric Blake */
387c1bb86cdSEric Blake if (errno != EINVAL) {
388c1bb86cdSEric Blake return true;
389c1bb86cdSEric Blake }
390c1bb86cdSEric Blake #endif
391c1bb86cdSEric Blake
392c1bb86cdSEric Blake return false;
393c1bb86cdSEric Blake }
394c1bb86cdSEric Blake
raw_probe_alignment(BlockDriverState * bs,int fd,Error ** errp)395c1bb86cdSEric Blake static void raw_probe_alignment(BlockDriverState *bs, int fd, Error **errp)
396c1bb86cdSEric Blake {
397c1bb86cdSEric Blake BDRVRawState *s = bs->opaque;
398c1bb86cdSEric Blake char *buf;
3998e3b0cbbSMarc-André Lureau size_t max_align = MAX(MAX_BLOCKSIZE, qemu_real_host_page_size());
400a6b257a0SNir Soffer size_t alignments[] = {1, 512, 1024, 2048, 4096};
401c1bb86cdSEric Blake
402c1bb86cdSEric Blake /* For SCSI generic devices the alignment is not really used.
403c1bb86cdSEric Blake With buffered I/O, we don't have any restrictions. */
404c1bb86cdSEric Blake if (bdrv_is_sg(bs) || !s->needs_alignment) {
405c1bb86cdSEric Blake bs->bl.request_alignment = 1;
406c1bb86cdSEric Blake s->buf_align = 1;
407c1bb86cdSEric Blake return;
408c1bb86cdSEric Blake }
409c1bb86cdSEric Blake
410c1bb86cdSEric Blake bs->bl.request_alignment = 0;
411c1bb86cdSEric Blake s->buf_align = 0;
412c1bb86cdSEric Blake /* Let's try to use the logical blocksize for the alignment. */
413c1bb86cdSEric Blake if (probe_logical_blocksize(fd, &bs->bl.request_alignment) < 0) {
414c1bb86cdSEric Blake bs->bl.request_alignment = 0;
415c1bb86cdSEric Blake }
416a5730b8bSThomas Huth
417a5730b8bSThomas Huth #ifdef __linux__
418a5730b8bSThomas Huth /*
419a5730b8bSThomas Huth * The XFS ioctl definitions are shipped in extra packages that might
420a5730b8bSThomas Huth * not always be available. Since we just need the XFS_IOC_DIOINFO ioctl
421a5730b8bSThomas Huth * here, we simply use our own definition instead:
422a5730b8bSThomas Huth */
423a5730b8bSThomas Huth struct xfs_dioattr {
424a5730b8bSThomas Huth uint32_t d_mem;
425a5730b8bSThomas Huth uint32_t d_miniosz;
426a5730b8bSThomas Huth uint32_t d_maxiosz;
427a5730b8bSThomas Huth } da;
428a5730b8bSThomas Huth if (ioctl(fd, _IOR('X', 30, struct xfs_dioattr), &da) >= 0) {
429c1bb86cdSEric Blake bs->bl.request_alignment = da.d_miniosz;
430c1bb86cdSEric Blake /* The kernel returns wrong information for d_mem */
431c1bb86cdSEric Blake /* s->buf_align = da.d_mem; */
432c1bb86cdSEric Blake }
433c1bb86cdSEric Blake #endif
434c1bb86cdSEric Blake
435a6b257a0SNir Soffer /*
436a6b257a0SNir Soffer * If we could not get the sizes so far, we can only guess them. First try
437a6b257a0SNir Soffer * to detect request alignment, since it is more likely to succeed. Then
438a6b257a0SNir Soffer * try to detect buf_align, which cannot be detected in some cases (e.g.
439a6b257a0SNir Soffer * Gluster). If buf_align cannot be detected, we fallback to the value of
440a6b257a0SNir Soffer * request_alignment.
441a6b257a0SNir Soffer */
442a6b257a0SNir Soffer
443a6b257a0SNir Soffer if (!bs->bl.request_alignment) {
444a6b257a0SNir Soffer int i;
445c1bb86cdSEric Blake size_t align;
446a6b257a0SNir Soffer buf = qemu_memalign(max_align, max_align);
447a6b257a0SNir Soffer for (i = 0; i < ARRAY_SIZE(alignments); i++) {
448a6b257a0SNir Soffer align = alignments[i];
449a6b257a0SNir Soffer if (raw_is_io_aligned(fd, buf, align)) {
450a6b257a0SNir Soffer /* Fallback to safe value. */
451a6b257a0SNir Soffer bs->bl.request_alignment = (align != 1) ? align : max_align;
452c1bb86cdSEric Blake break;
453c1bb86cdSEric Blake }
454c1bb86cdSEric Blake }
455c1bb86cdSEric Blake qemu_vfree(buf);
456c1bb86cdSEric Blake }
457c1bb86cdSEric Blake
458a6b257a0SNir Soffer if (!s->buf_align) {
459a6b257a0SNir Soffer int i;
460c1bb86cdSEric Blake size_t align;
461a6b257a0SNir Soffer buf = qemu_memalign(max_align, 2 * max_align);
462a6b257a0SNir Soffer for (i = 0; i < ARRAY_SIZE(alignments); i++) {
463a6b257a0SNir Soffer align = alignments[i];
464a6b257a0SNir Soffer if (raw_is_io_aligned(fd, buf + align, max_align)) {
465236094c7SStefan Hajnoczi /* Fallback to request_alignment. */
466a6b257a0SNir Soffer s->buf_align = (align != 1) ? align : bs->bl.request_alignment;
467c1bb86cdSEric Blake break;
468c1bb86cdSEric Blake }
469c1bb86cdSEric Blake }
470c1bb86cdSEric Blake qemu_vfree(buf);
471c1bb86cdSEric Blake }
472c1bb86cdSEric Blake
473c1bb86cdSEric Blake if (!s->buf_align || !bs->bl.request_alignment) {
474c1bb86cdSEric Blake error_setg(errp, "Could not find working O_DIRECT alignment");
475c1bb86cdSEric Blake error_append_hint(errp, "Try cache.direct=off\n");
476c1bb86cdSEric Blake }
477c1bb86cdSEric Blake }
478c1bb86cdSEric Blake
check_hdev_writable(int fd)479bca5283bSKevin Wolf static int check_hdev_writable(int fd)
48020eaf1bfSKevin Wolf {
48120eaf1bfSKevin Wolf #if defined(BLKROGET)
48220eaf1bfSKevin Wolf /* Linux block devices can be configured "read-only" using blockdev(8).
48320eaf1bfSKevin Wolf * This is independent of device node permissions and therefore open(2)
48420eaf1bfSKevin Wolf * with O_RDWR succeeds. Actual writes fail with EPERM.
48520eaf1bfSKevin Wolf *
48620eaf1bfSKevin Wolf * bdrv_open() is supposed to fail if the disk is read-only. Explicitly
48720eaf1bfSKevin Wolf * check for read-only block devices so that Linux block devices behave
48820eaf1bfSKevin Wolf * properly.
48920eaf1bfSKevin Wolf */
49020eaf1bfSKevin Wolf struct stat st;
49120eaf1bfSKevin Wolf int readonly = 0;
49220eaf1bfSKevin Wolf
493bca5283bSKevin Wolf if (fstat(fd, &st)) {
49420eaf1bfSKevin Wolf return -errno;
49520eaf1bfSKevin Wolf }
49620eaf1bfSKevin Wolf
49720eaf1bfSKevin Wolf if (!S_ISBLK(st.st_mode)) {
49820eaf1bfSKevin Wolf return 0;
49920eaf1bfSKevin Wolf }
50020eaf1bfSKevin Wolf
501bca5283bSKevin Wolf if (ioctl(fd, BLKROGET, &readonly) < 0) {
50220eaf1bfSKevin Wolf return -errno;
50320eaf1bfSKevin Wolf }
50420eaf1bfSKevin Wolf
50520eaf1bfSKevin Wolf if (readonly) {
50620eaf1bfSKevin Wolf return -EACCES;
50720eaf1bfSKevin Wolf }
50820eaf1bfSKevin Wolf #endif /* defined(BLKROGET) */
50920eaf1bfSKevin Wolf return 0;
51020eaf1bfSKevin Wolf }
51120eaf1bfSKevin Wolf
raw_parse_flags(int bdrv_flags,int * open_flags,bool has_writers)51223dece19SKevin Wolf static void raw_parse_flags(int bdrv_flags, int *open_flags, bool has_writers)
513c1bb86cdSEric Blake {
51423dece19SKevin Wolf bool read_write = false;
515c1bb86cdSEric Blake assert(open_flags != NULL);
516c1bb86cdSEric Blake
517c1bb86cdSEric Blake *open_flags |= O_BINARY;
518c1bb86cdSEric Blake *open_flags &= ~O_ACCMODE;
51923dece19SKevin Wolf
52023dece19SKevin Wolf if (bdrv_flags & BDRV_O_AUTO_RDONLY) {
52123dece19SKevin Wolf read_write = has_writers;
52223dece19SKevin Wolf } else if (bdrv_flags & BDRV_O_RDWR) {
52323dece19SKevin Wolf read_write = true;
52423dece19SKevin Wolf }
52523dece19SKevin Wolf
52623dece19SKevin Wolf if (read_write) {
527c1bb86cdSEric Blake *open_flags |= O_RDWR;
528c1bb86cdSEric Blake } else {
529c1bb86cdSEric Blake *open_flags |= O_RDONLY;
530c1bb86cdSEric Blake }
531c1bb86cdSEric Blake
532c1bb86cdSEric Blake /* Use O_DSYNC for write-through caching, no flags for write-back caching,
533c1bb86cdSEric Blake * and O_DIRECT for no caching. */
534c1bb86cdSEric Blake if ((bdrv_flags & BDRV_O_NOCACHE)) {
535c1bb86cdSEric Blake *open_flags |= O_DIRECT;
536c1bb86cdSEric Blake }
537c1bb86cdSEric Blake }
538c1bb86cdSEric Blake
raw_parse_filename(const char * filename,QDict * options,Error ** errp)539c1bb86cdSEric Blake static void raw_parse_filename(const char *filename, QDict *options,
540c1bb86cdSEric Blake Error **errp)
541c1bb86cdSEric Blake {
54203c320d8SMax Reitz bdrv_parse_filename_strip_prefix(filename, "file:", options);
543c1bb86cdSEric Blake }
544c1bb86cdSEric Blake
545c1bb86cdSEric Blake static QemuOptsList raw_runtime_opts = {
546c1bb86cdSEric Blake .name = "raw",
547c1bb86cdSEric Blake .head = QTAILQ_HEAD_INITIALIZER(raw_runtime_opts.head),
548c1bb86cdSEric Blake .desc = {
549c1bb86cdSEric Blake {
550c1bb86cdSEric Blake .name = "filename",
551c1bb86cdSEric Blake .type = QEMU_OPT_STRING,
552c1bb86cdSEric Blake .help = "File name of the image",
553c1bb86cdSEric Blake },
554c1bb86cdSEric Blake {
555c1bb86cdSEric Blake .name = "aio",
556c1bb86cdSEric Blake .type = QEMU_OPT_STRING,
557c6447510SAarushi Mehta .help = "host AIO implementation (threads, native, io_uring)",
558c1bb86cdSEric Blake },
55916b48d5dSFam Zheng {
560684960d4SStefano Garzarella .name = "aio-max-batch",
561684960d4SStefano Garzarella .type = QEMU_OPT_NUMBER,
562684960d4SStefano Garzarella .help = "AIO max batch size (0 = auto handled by AIO backend, default: 0)",
563684960d4SStefano Garzarella },
564684960d4SStefano Garzarella {
56516b48d5dSFam Zheng .name = "locking",
56616b48d5dSFam Zheng .type = QEMU_OPT_STRING,
56716b48d5dSFam Zheng .help = "file locking mode (on/off/auto, default: auto)",
56816b48d5dSFam Zheng },
5697c9e5276SPaolo Bonzini {
5707c9e5276SPaolo Bonzini .name = "pr-manager",
5717c9e5276SPaolo Bonzini .type = QEMU_OPT_STRING,
5727c9e5276SPaolo Bonzini .help = "id of persistent reservation manager object (default: none)",
5737c9e5276SPaolo Bonzini },
574f357fcd8SStefan Hajnoczi #if defined(__linux__)
575f357fcd8SStefan Hajnoczi {
576f357fcd8SStefan Hajnoczi .name = "drop-cache",
577f357fcd8SStefan Hajnoczi .type = QEMU_OPT_BOOL,
578f357fcd8SStefan Hajnoczi .help = "invalidate page cache during live migration (default: on)",
579f357fcd8SStefan Hajnoczi },
580f357fcd8SStefan Hajnoczi #endif
58131be8a2aSStefan Hajnoczi {
58231be8a2aSStefan Hajnoczi .name = "x-check-cache-dropped",
58331be8a2aSStefan Hajnoczi .type = QEMU_OPT_BOOL,
58431be8a2aSStefan Hajnoczi .help = "check that page cache was dropped on live migration (default: off)"
58531be8a2aSStefan Hajnoczi },
586c1bb86cdSEric Blake { /* end of list */ }
587c1bb86cdSEric Blake },
588c1bb86cdSEric Blake };
589c1bb86cdSEric Blake
5908a2ce0bcSAlberto Garcia static const char *const mutable_opts[] = { "x-check-cache-dropped", NULL };
5918a2ce0bcSAlberto Garcia
raw_open_common(BlockDriverState * bs,QDict * options,int bdrv_flags,int open_flags,bool device,Error ** errp)592c1bb86cdSEric Blake static int raw_open_common(BlockDriverState *bs, QDict *options,
593230ff739SJohn Snow int bdrv_flags, int open_flags,
594230ff739SJohn Snow bool device, Error **errp)
595c1bb86cdSEric Blake {
596c1bb86cdSEric Blake BDRVRawState *s = bs->opaque;
597c1bb86cdSEric Blake QemuOpts *opts;
598c1bb86cdSEric Blake Error *local_err = NULL;
599c1bb86cdSEric Blake const char *filename = NULL;
6007c9e5276SPaolo Bonzini const char *str;
601c1bb86cdSEric Blake BlockdevAioOptions aio, aio_default;
602c1bb86cdSEric Blake int fd, ret;
603c1bb86cdSEric Blake struct stat st;
604244a5668SFam Zheng OnOffAuto locking;
605c1bb86cdSEric Blake
606c1bb86cdSEric Blake opts = qemu_opts_create(&raw_runtime_opts, NULL, 0, &error_abort);
607af175e85SMarkus Armbruster if (!qemu_opts_absorb_qdict(opts, options, errp)) {
608c1bb86cdSEric Blake ret = -EINVAL;
609c1bb86cdSEric Blake goto fail;
610c1bb86cdSEric Blake }
611c1bb86cdSEric Blake
612c1bb86cdSEric Blake filename = qemu_opt_get(opts, "filename");
613c1bb86cdSEric Blake
614db0754dfSFam Zheng ret = raw_normalize_devicepath(&filename, errp);
615c1bb86cdSEric Blake if (ret != 0) {
616c1bb86cdSEric Blake goto fail;
617c1bb86cdSEric Blake }
618c1bb86cdSEric Blake
619c6447510SAarushi Mehta if (bdrv_flags & BDRV_O_NATIVE_AIO) {
620c6447510SAarushi Mehta aio_default = BLOCKDEV_AIO_OPTIONS_NATIVE;
621c6447510SAarushi Mehta #ifdef CONFIG_LINUX_IO_URING
622c6447510SAarushi Mehta } else if (bdrv_flags & BDRV_O_IO_URING) {
623c6447510SAarushi Mehta aio_default = BLOCKDEV_AIO_OPTIONS_IO_URING;
624c6447510SAarushi Mehta #endif
625c6447510SAarushi Mehta } else {
626c6447510SAarushi Mehta aio_default = BLOCKDEV_AIO_OPTIONS_THREADS;
627c6447510SAarushi Mehta }
628c6447510SAarushi Mehta
629f7abe0ecSMarc-André Lureau aio = qapi_enum_parse(&BlockdevAioOptions_lookup,
630f7abe0ecSMarc-André Lureau qemu_opt_get(opts, "aio"),
63106c60b6cSMarkus Armbruster aio_default, &local_err);
632c1bb86cdSEric Blake if (local_err) {
633c1bb86cdSEric Blake error_propagate(errp, local_err);
634c1bb86cdSEric Blake ret = -EINVAL;
635c1bb86cdSEric Blake goto fail;
636c1bb86cdSEric Blake }
637c6447510SAarushi Mehta
638c1bb86cdSEric Blake s->use_linux_aio = (aio == BLOCKDEV_AIO_OPTIONS_NATIVE);
639c6447510SAarushi Mehta #ifdef CONFIG_LINUX_IO_URING
640c6447510SAarushi Mehta s->use_linux_io_uring = (aio == BLOCKDEV_AIO_OPTIONS_IO_URING);
641c6447510SAarushi Mehta #endif
642c1bb86cdSEric Blake
643684960d4SStefano Garzarella s->aio_max_batch = qemu_opt_get_number(opts, "aio-max-batch", 0);
644684960d4SStefano Garzarella
645f7abe0ecSMarc-André Lureau locking = qapi_enum_parse(&OnOffAuto_lookup,
646f7abe0ecSMarc-André Lureau qemu_opt_get(opts, "locking"),
64706c60b6cSMarkus Armbruster ON_OFF_AUTO_AUTO, &local_err);
648244a5668SFam Zheng if (local_err) {
649244a5668SFam Zheng error_propagate(errp, local_err);
650244a5668SFam Zheng ret = -EINVAL;
651244a5668SFam Zheng goto fail;
652244a5668SFam Zheng }
653244a5668SFam Zheng switch (locking) {
654244a5668SFam Zheng case ON_OFF_AUTO_ON:
655244a5668SFam Zheng s->use_lock = true;
6562b218f5dSFam Zheng if (!qemu_has_ofd_lock()) {
657db0754dfSFam Zheng warn_report("File lock requested but OFD locking syscall is "
658db0754dfSFam Zheng "unavailable, falling back to POSIX file locks");
659db0754dfSFam Zheng error_printf("Due to the implementation, locks can be lost "
6602b218f5dSFam Zheng "unexpectedly.\n");
6612b218f5dSFam Zheng }
662244a5668SFam Zheng break;
663244a5668SFam Zheng case ON_OFF_AUTO_OFF:
664244a5668SFam Zheng s->use_lock = false;
665244a5668SFam Zheng break;
666244a5668SFam Zheng case ON_OFF_AUTO_AUTO:
6672b218f5dSFam Zheng s->use_lock = qemu_has_ofd_lock();
668244a5668SFam Zheng break;
669244a5668SFam Zheng default:
670244a5668SFam Zheng abort();
671244a5668SFam Zheng }
672244a5668SFam Zheng
6737c9e5276SPaolo Bonzini str = qemu_opt_get(opts, "pr-manager");
6747c9e5276SPaolo Bonzini if (str) {
6757c9e5276SPaolo Bonzini s->pr_mgr = pr_manager_lookup(str, &local_err);
6767c9e5276SPaolo Bonzini if (local_err) {
6777c9e5276SPaolo Bonzini error_propagate(errp, local_err);
6787c9e5276SPaolo Bonzini ret = -EINVAL;
6797c9e5276SPaolo Bonzini goto fail;
6807c9e5276SPaolo Bonzini }
6817c9e5276SPaolo Bonzini }
6827c9e5276SPaolo Bonzini
683f357fcd8SStefan Hajnoczi s->drop_cache = qemu_opt_get_bool(opts, "drop-cache", true);
68431be8a2aSStefan Hajnoczi s->check_cache_dropped = qemu_opt_get_bool(opts, "x-check-cache-dropped",
68531be8a2aSStefan Hajnoczi false);
68631be8a2aSStefan Hajnoczi
687c1bb86cdSEric Blake s->open_flags = open_flags;
68823dece19SKevin Wolf raw_parse_flags(bdrv_flags, &s->open_flags, false);
689c1bb86cdSEric Blake
690c1bb86cdSEric Blake s->fd = -1;
691b18a24a9SDaniel P. Berrangé fd = qemu_open(filename, s->open_flags, errp);
69264107dc0SKevin Wolf ret = fd < 0 ? -errno : 0;
69364107dc0SKevin Wolf
69464107dc0SKevin Wolf if (ret < 0) {
695c1bb86cdSEric Blake if (ret == -EROFS) {
696c1bb86cdSEric Blake ret = -EACCES;
697c1bb86cdSEric Blake }
698c1bb86cdSEric Blake goto fail;
699c1bb86cdSEric Blake }
700c1bb86cdSEric Blake s->fd = fd;
701c1bb86cdSEric Blake
702bca5283bSKevin Wolf /* Check s->open_flags rather than bdrv_flags due to auto-read-only */
703bca5283bSKevin Wolf if (s->open_flags & O_RDWR) {
704bca5283bSKevin Wolf ret = check_hdev_writable(s->fd);
705bca5283bSKevin Wolf if (ret < 0) {
706bca5283bSKevin Wolf error_setg_errno(errp, -ret, "The device is not writable");
707bca5283bSKevin Wolf goto fail;
708bca5283bSKevin Wolf }
709bca5283bSKevin Wolf }
710bca5283bSKevin Wolf
711244a5668SFam Zheng s->perm = 0;
712244a5668SFam Zheng s->shared_perm = BLK_PERM_ALL;
713244a5668SFam Zheng
714c1bb86cdSEric Blake #ifdef CONFIG_LINUX_AIO
715c1bb86cdSEric Blake /* Currently Linux does AIO only for files opened with O_DIRECT */
716cd0c0db0SStefan Hajnoczi if (s->use_linux_aio && !(s->open_flags & O_DIRECT)) {
717c1bb86cdSEric Blake error_setg(errp, "aio=native was specified, but it requires "
718c1bb86cdSEric Blake "cache.direct=on, which was not specified.");
719c1bb86cdSEric Blake ret = -EINVAL;
720c1bb86cdSEric Blake goto fail;
721c1bb86cdSEric Blake }
72224687abfSPrasad Pandit if (s->use_linux_aio) {
72324687abfSPrasad Pandit s->has_laio_fdsync = laio_has_fdsync(s->fd);
72424687abfSPrasad Pandit }
725c1bb86cdSEric Blake #else
726c1bb86cdSEric Blake if (s->use_linux_aio) {
727c1bb86cdSEric Blake error_setg(errp, "aio=native was specified, but is not supported "
728c1bb86cdSEric Blake "in this build.");
729c1bb86cdSEric Blake ret = -EINVAL;
730c1bb86cdSEric Blake goto fail;
731c1bb86cdSEric Blake }
732c1bb86cdSEric Blake #endif /* !defined(CONFIG_LINUX_AIO) */
733c1bb86cdSEric Blake
734cd0c0db0SStefan Hajnoczi #ifndef CONFIG_LINUX_IO_URING
735c6447510SAarushi Mehta if (s->use_linux_io_uring) {
736c6447510SAarushi Mehta error_setg(errp, "aio=io_uring was specified, but is not supported "
737c6447510SAarushi Mehta "in this build.");
738c6447510SAarushi Mehta ret = -EINVAL;
739c6447510SAarushi Mehta goto fail;
740c6447510SAarushi Mehta }
741c6447510SAarushi Mehta #endif /* !defined(CONFIG_LINUX_IO_URING) */
742c6447510SAarushi Mehta
743c1bb86cdSEric Blake s->has_discard = true;
744c1bb86cdSEric Blake s->has_write_zeroes = true;
745c1bb86cdSEric Blake
746c1bb86cdSEric Blake if (fstat(s->fd, &st) < 0) {
747c1bb86cdSEric Blake ret = -errno;
748c1bb86cdSEric Blake error_setg_errno(errp, errno, "Could not stat file");
749c1bb86cdSEric Blake goto fail;
750c1bb86cdSEric Blake }
751230ff739SJohn Snow
752230ff739SJohn Snow if (!device) {
7538d17adf3SDaniel P. Berrangé if (!S_ISREG(st.st_mode)) {
7548d17adf3SDaniel P. Berrangé error_setg(errp, "'%s' driver requires '%s' to be a regular file",
7558d17adf3SDaniel P. Berrangé bs->drv->format_name, bs->filename);
756230ff739SJohn Snow ret = -EINVAL;
757230ff739SJohn Snow goto fail;
758230ff739SJohn Snow } else {
759c1bb86cdSEric Blake s->has_fallocate = true;
760c1bb86cdSEric Blake }
761230ff739SJohn Snow } else {
762230ff739SJohn Snow if (!(S_ISCHR(st.st_mode) || S_ISBLK(st.st_mode))) {
7638d17adf3SDaniel P. Berrangé error_setg(errp, "'%s' driver requires '%s' to be either "
7648d17adf3SDaniel P. Berrangé "a character or block device",
7658d17adf3SDaniel P. Berrangé bs->drv->format_name, bs->filename);
766230ff739SJohn Snow ret = -EINVAL;
767230ff739SJohn Snow goto fail;
768230ff739SJohn Snow }
769230ff739SJohn Snow }
770774c726cSSam Li #ifdef CONFIG_BLKZONED
771774c726cSSam Li /*
772774c726cSSam Li * The kernel page cache does not reliably work for writes to SWR zones
773774c726cSSam Li * of zoned block device because it can not guarantee the order of writes.
774774c726cSSam Li */
775774c726cSSam Li if ((bs->bl.zoned != BLK_Z_NONE) &&
776774c726cSSam Li (!(s->open_flags & O_DIRECT))) {
777774c726cSSam Li error_setg(errp, "The driver supports zoned devices, and it requires "
778774c726cSSam Li "cache.direct=on, which was not specified.");
779774c726cSSam Li return -EINVAL; /* No host kernel page cache */
780774c726cSSam Li }
781774c726cSSam Li #endif
782230ff739SJohn Snow
783c1bb86cdSEric Blake if (S_ISBLK(st.st_mode)) {
784c1bb86cdSEric Blake #ifdef __linux__
785c1bb86cdSEric Blake /* On Linux 3.10, BLKDISCARD leaves stale data in the page cache. Do
786c1bb86cdSEric Blake * not rely on the contents of discarded blocks unless using O_DIRECT.
787c1bb86cdSEric Blake * Same for BLKZEROOUT.
788c1bb86cdSEric Blake */
789c1bb86cdSEric Blake if (!(bs->open_flags & BDRV_O_NOCACHE)) {
790c1bb86cdSEric Blake s->has_write_zeroes = false;
791c1bb86cdSEric Blake }
792c1bb86cdSEric Blake #endif
793c1bb86cdSEric Blake }
794c1bb86cdSEric Blake #ifdef __FreeBSD__
795c1bb86cdSEric Blake if (S_ISCHR(st.st_mode)) {
796c1bb86cdSEric Blake /*
797c1bb86cdSEric Blake * The file is a char device (disk), which on FreeBSD isn't behind
798c1bb86cdSEric Blake * a pager, so force all requests to be aligned. This is needed
799c1bb86cdSEric Blake * so QEMU makes sure all IO operations on the device are aligned
800c1bb86cdSEric Blake * to sector size, or else FreeBSD will reject them with EINVAL.
801c1bb86cdSEric Blake */
8025dbd0ce1SKevin Wolf s->force_alignment = true;
803c1bb86cdSEric Blake }
804c1bb86cdSEric Blake #endif
8055dbd0ce1SKevin Wolf s->needs_alignment = raw_needs_alignment(bs);
806c1bb86cdSEric Blake
807738301e1SKevin Wolf bs->supported_zero_flags = BDRV_REQ_MAY_UNMAP | BDRV_REQ_NO_FALLBACK;
8082f0c6e7aSKevin Wolf if (S_ISREG(st.st_mode)) {
8092f0c6e7aSKevin Wolf /* When extending regular files, we get zeros from the OS */
8102f0c6e7aSKevin Wolf bs->supported_truncate_flags = BDRV_REQ_ZERO_WRITE;
8112f0c6e7aSKevin Wolf }
812c1bb86cdSEric Blake ret = 0;
813c1bb86cdSEric Blake fail:
814a8c5cf27SKevin Wolf if (ret < 0 && s->fd != -1) {
815a8c5cf27SKevin Wolf qemu_close(s->fd);
816a8c5cf27SKevin Wolf }
817c1bb86cdSEric Blake if (filename && (bdrv_flags & BDRV_O_TEMPORARY)) {
818c1bb86cdSEric Blake unlink(filename);
819c1bb86cdSEric Blake }
820c1bb86cdSEric Blake qemu_opts_del(opts);
821c1bb86cdSEric Blake return ret;
822c1bb86cdSEric Blake }
823c1bb86cdSEric Blake
raw_open(BlockDriverState * bs,QDict * options,int flags,Error ** errp)824c1bb86cdSEric Blake static int raw_open(BlockDriverState *bs, QDict *options, int flags,
825c1bb86cdSEric Blake Error **errp)
826c1bb86cdSEric Blake {
827c1bb86cdSEric Blake BDRVRawState *s = bs->opaque;
828c1bb86cdSEric Blake
829c1bb86cdSEric Blake s->type = FTYPE_FILE;
830230ff739SJohn Snow return raw_open_common(bs, options, flags, 0, false, errp);
831c1bb86cdSEric Blake }
832c1bb86cdSEric Blake
833244a5668SFam Zheng typedef enum {
834244a5668SFam Zheng RAW_PL_PREPARE,
835244a5668SFam Zheng RAW_PL_COMMIT,
836244a5668SFam Zheng RAW_PL_ABORT,
837244a5668SFam Zheng } RawPermLockOp;
838244a5668SFam Zheng
839244a5668SFam Zheng #define PERM_FOREACH(i) \
840244a5668SFam Zheng for ((i) = 0; (1ULL << (i)) <= BLK_PERM_ALL; i++)
841244a5668SFam Zheng
842244a5668SFam Zheng /* Lock bytes indicated by @perm_lock_bits and @shared_perm_lock_bits in the
843244a5668SFam Zheng * file; if @unlock == true, also unlock the unneeded bytes.
844244a5668SFam Zheng * @shared_perm_lock_bits is the mask of all permissions that are NOT shared.
845244a5668SFam Zheng */
raw_apply_lock_bytes(BDRVRawState * s,int fd,uint64_t perm_lock_bits,uint64_t shared_perm_lock_bits,bool unlock,Error ** errp)8462996ffadSFam Zheng static int raw_apply_lock_bytes(BDRVRawState *s, int fd,
847244a5668SFam Zheng uint64_t perm_lock_bits,
848244a5668SFam Zheng uint64_t shared_perm_lock_bits,
849244a5668SFam Zheng bool unlock, Error **errp)
850244a5668SFam Zheng {
851244a5668SFam Zheng int ret;
852244a5668SFam Zheng int i;
8532996ffadSFam Zheng uint64_t locked_perm, locked_shared_perm;
8542996ffadSFam Zheng
8552996ffadSFam Zheng if (s) {
8562996ffadSFam Zheng locked_perm = s->locked_perm;
8572996ffadSFam Zheng locked_shared_perm = s->locked_shared_perm;
8582996ffadSFam Zheng } else {
8592996ffadSFam Zheng /*
8602996ffadSFam Zheng * We don't have the previous bits, just lock/unlock for each of the
8612996ffadSFam Zheng * requested bits.
8622996ffadSFam Zheng */
8632996ffadSFam Zheng if (unlock) {
8642996ffadSFam Zheng locked_perm = BLK_PERM_ALL;
8652996ffadSFam Zheng locked_shared_perm = BLK_PERM_ALL;
8662996ffadSFam Zheng } else {
8672996ffadSFam Zheng locked_perm = 0;
8682996ffadSFam Zheng locked_shared_perm = 0;
8692996ffadSFam Zheng }
8702996ffadSFam Zheng }
871244a5668SFam Zheng
872244a5668SFam Zheng PERM_FOREACH(i) {
873244a5668SFam Zheng int off = RAW_LOCK_PERM_BASE + i;
8742996ffadSFam Zheng uint64_t bit = (1ULL << i);
8752996ffadSFam Zheng if ((perm_lock_bits & bit) && !(locked_perm & bit)) {
876d0a96155SMax Reitz ret = qemu_lock_fd(fd, off, 1, false);
877244a5668SFam Zheng if (ret) {
878797e3e38SDavid Edmondson raw_lock_error_setg_errno(errp, -ret, "Failed to lock byte %d",
879797e3e38SDavid Edmondson off);
880244a5668SFam Zheng return ret;
8812996ffadSFam Zheng } else if (s) {
8822996ffadSFam Zheng s->locked_perm |= bit;
883244a5668SFam Zheng }
8842996ffadSFam Zheng } else if (unlock && (locked_perm & bit) && !(perm_lock_bits & bit)) {
885d0a96155SMax Reitz ret = qemu_unlock_fd(fd, off, 1);
886244a5668SFam Zheng if (ret) {
887797e3e38SDavid Edmondson error_setg_errno(errp, -ret, "Failed to unlock byte %d", off);
888244a5668SFam Zheng return ret;
8892996ffadSFam Zheng } else if (s) {
8902996ffadSFam Zheng s->locked_perm &= ~bit;
891244a5668SFam Zheng }
892244a5668SFam Zheng }
893244a5668SFam Zheng }
894244a5668SFam Zheng PERM_FOREACH(i) {
895244a5668SFam Zheng int off = RAW_LOCK_SHARED_BASE + i;
8962996ffadSFam Zheng uint64_t bit = (1ULL << i);
8972996ffadSFam Zheng if ((shared_perm_lock_bits & bit) && !(locked_shared_perm & bit)) {
898d0a96155SMax Reitz ret = qemu_lock_fd(fd, off, 1, false);
899244a5668SFam Zheng if (ret) {
900797e3e38SDavid Edmondson raw_lock_error_setg_errno(errp, -ret, "Failed to lock byte %d",
901797e3e38SDavid Edmondson off);
902244a5668SFam Zheng return ret;
9032996ffadSFam Zheng } else if (s) {
9042996ffadSFam Zheng s->locked_shared_perm |= bit;
905244a5668SFam Zheng }
9062996ffadSFam Zheng } else if (unlock && (locked_shared_perm & bit) &&
9072996ffadSFam Zheng !(shared_perm_lock_bits & bit)) {
908d0a96155SMax Reitz ret = qemu_unlock_fd(fd, off, 1);
909244a5668SFam Zheng if (ret) {
910797e3e38SDavid Edmondson error_setg_errno(errp, -ret, "Failed to unlock byte %d", off);
911244a5668SFam Zheng return ret;
9122996ffadSFam Zheng } else if (s) {
9132996ffadSFam Zheng s->locked_shared_perm &= ~bit;
914244a5668SFam Zheng }
915244a5668SFam Zheng }
916244a5668SFam Zheng }
917244a5668SFam Zheng return 0;
918244a5668SFam Zheng }
919244a5668SFam Zheng
920244a5668SFam Zheng /* Check "unshared" bytes implied by @perm and ~@shared_perm in the file. */
raw_check_lock_bytes(int fd,uint64_t perm,uint64_t shared_perm,Error ** errp)921d0a96155SMax Reitz static int raw_check_lock_bytes(int fd, uint64_t perm, uint64_t shared_perm,
922244a5668SFam Zheng Error **errp)
923244a5668SFam Zheng {
924244a5668SFam Zheng int ret;
925244a5668SFam Zheng int i;
926244a5668SFam Zheng
927244a5668SFam Zheng PERM_FOREACH(i) {
928244a5668SFam Zheng int off = RAW_LOCK_SHARED_BASE + i;
929244a5668SFam Zheng uint64_t p = 1ULL << i;
930244a5668SFam Zheng if (perm & p) {
931d0a96155SMax Reitz ret = qemu_lock_fd_test(fd, off, 1, true);
932244a5668SFam Zheng if (ret) {
933244a5668SFam Zheng char *perm_name = bdrv_perm_names(p);
934797e3e38SDavid Edmondson
935797e3e38SDavid Edmondson raw_lock_error_setg_errno(errp, -ret,
936244a5668SFam Zheng "Failed to get \"%s\" lock",
937244a5668SFam Zheng perm_name);
938244a5668SFam Zheng g_free(perm_name);
939244a5668SFam Zheng return ret;
940244a5668SFam Zheng }
941244a5668SFam Zheng }
942244a5668SFam Zheng }
943244a5668SFam Zheng PERM_FOREACH(i) {
944244a5668SFam Zheng int off = RAW_LOCK_PERM_BASE + i;
945244a5668SFam Zheng uint64_t p = 1ULL << i;
946244a5668SFam Zheng if (!(shared_perm & p)) {
947d0a96155SMax Reitz ret = qemu_lock_fd_test(fd, off, 1, true);
948244a5668SFam Zheng if (ret) {
949244a5668SFam Zheng char *perm_name = bdrv_perm_names(p);
950797e3e38SDavid Edmondson
951797e3e38SDavid Edmondson raw_lock_error_setg_errno(errp, -ret,
952244a5668SFam Zheng "Failed to get shared \"%s\" lock",
953244a5668SFam Zheng perm_name);
954244a5668SFam Zheng g_free(perm_name);
955244a5668SFam Zheng return ret;
956244a5668SFam Zheng }
957244a5668SFam Zheng }
958244a5668SFam Zheng }
959244a5668SFam Zheng return 0;
960244a5668SFam Zheng }
961244a5668SFam Zheng
raw_handle_perm_lock(BlockDriverState * bs,RawPermLockOp op,uint64_t new_perm,uint64_t new_shared,Error ** errp)962244a5668SFam Zheng static int raw_handle_perm_lock(BlockDriverState *bs,
963244a5668SFam Zheng RawPermLockOp op,
964244a5668SFam Zheng uint64_t new_perm, uint64_t new_shared,
965244a5668SFam Zheng Error **errp)
966244a5668SFam Zheng {
967244a5668SFam Zheng BDRVRawState *s = bs->opaque;
968244a5668SFam Zheng int ret = 0;
969244a5668SFam Zheng Error *local_err = NULL;
970244a5668SFam Zheng
971244a5668SFam Zheng if (!s->use_lock) {
972244a5668SFam Zheng return 0;
973244a5668SFam Zheng }
974244a5668SFam Zheng
975244a5668SFam Zheng if (bdrv_get_flags(bs) & BDRV_O_INACTIVE) {
976244a5668SFam Zheng return 0;
977244a5668SFam Zheng }
978244a5668SFam Zheng
979244a5668SFam Zheng switch (op) {
980244a5668SFam Zheng case RAW_PL_PREPARE:
981696aaaedSVladimir Sementsov-Ogievskiy if ((s->perm | new_perm) == s->perm &&
982696aaaedSVladimir Sementsov-Ogievskiy (s->shared_perm & new_shared) == s->shared_perm)
983696aaaedSVladimir Sementsov-Ogievskiy {
984696aaaedSVladimir Sementsov-Ogievskiy /*
985696aaaedSVladimir Sementsov-Ogievskiy * We are going to unlock bytes, it should not fail. If it fail due
986696aaaedSVladimir Sementsov-Ogievskiy * to some fs-dependent permission-unrelated reasons (which occurs
987696aaaedSVladimir Sementsov-Ogievskiy * sometimes on NFS and leads to abort in bdrv_replace_child) we
988696aaaedSVladimir Sementsov-Ogievskiy * can't prevent such errors by any check here. And we ignore them
989696aaaedSVladimir Sementsov-Ogievskiy * anyway in ABORT and COMMIT.
990696aaaedSVladimir Sementsov-Ogievskiy */
991696aaaedSVladimir Sementsov-Ogievskiy return 0;
992696aaaedSVladimir Sementsov-Ogievskiy }
993f2e3af29SFam Zheng ret = raw_apply_lock_bytes(s, s->fd, s->perm | new_perm,
994244a5668SFam Zheng ~s->shared_perm | ~new_shared,
995244a5668SFam Zheng false, errp);
996244a5668SFam Zheng if (!ret) {
997f2e3af29SFam Zheng ret = raw_check_lock_bytes(s->fd, new_perm, new_shared, errp);
998244a5668SFam Zheng if (!ret) {
999244a5668SFam Zheng return 0;
1000244a5668SFam Zheng }
1001b857431dSFam Zheng error_append_hint(errp,
1002b857431dSFam Zheng "Is another process using the image [%s]?\n",
1003b857431dSFam Zheng bs->filename);
1004244a5668SFam Zheng }
1005244a5668SFam Zheng /* fall through to unlock bytes. */
1006244a5668SFam Zheng case RAW_PL_ABORT:
1007f2e3af29SFam Zheng raw_apply_lock_bytes(s, s->fd, s->perm, ~s->shared_perm,
1008d0a96155SMax Reitz true, &local_err);
1009244a5668SFam Zheng if (local_err) {
1010244a5668SFam Zheng /* Theoretically the above call only unlocks bytes and it cannot
1011244a5668SFam Zheng * fail. Something weird happened, report it.
1012244a5668SFam Zheng */
1013db0754dfSFam Zheng warn_report_err(local_err);
1014244a5668SFam Zheng }
1015244a5668SFam Zheng break;
1016244a5668SFam Zheng case RAW_PL_COMMIT:
1017f2e3af29SFam Zheng raw_apply_lock_bytes(s, s->fd, new_perm, ~new_shared,
1018d0a96155SMax Reitz true, &local_err);
1019244a5668SFam Zheng if (local_err) {
1020244a5668SFam Zheng /* Theoretically the above call only unlocks bytes and it cannot
1021244a5668SFam Zheng * fail. Something weird happened, report it.
1022244a5668SFam Zheng */
1023db0754dfSFam Zheng warn_report_err(local_err);
1024244a5668SFam Zheng }
1025244a5668SFam Zheng break;
1026244a5668SFam Zheng }
1027244a5668SFam Zheng return ret;
1028244a5668SFam Zheng }
1029244a5668SFam Zheng
1030ad24b679SMarc-André Lureau /* Sets a specific flag */
fcntl_setfl(int fd,int flag)1031ad24b679SMarc-André Lureau static int fcntl_setfl(int fd, int flag)
1032ad24b679SMarc-André Lureau {
1033ad24b679SMarc-André Lureau int flags;
1034ad24b679SMarc-André Lureau
1035ad24b679SMarc-André Lureau flags = fcntl(fd, F_GETFL);
1036ad24b679SMarc-André Lureau if (flags == -1) {
1037ad24b679SMarc-André Lureau return -errno;
1038ad24b679SMarc-André Lureau }
1039ad24b679SMarc-André Lureau if (fcntl(fd, F_SETFL, flags | flag) == -1) {
1040ad24b679SMarc-André Lureau return -errno;
1041ad24b679SMarc-André Lureau }
1042ad24b679SMarc-André Lureau return 0;
1043ad24b679SMarc-André Lureau }
1044ad24b679SMarc-André Lureau
raw_reconfigure_getfd(BlockDriverState * bs,int flags,int * open_flags,uint64_t perm,Error ** errp)10455cec2870SKevin Wolf static int raw_reconfigure_getfd(BlockDriverState *bs, int flags,
1046b67e3538SDenis V. Lunev via int *open_flags, uint64_t perm, Error **errp)
10475cec2870SKevin Wolf {
10485cec2870SKevin Wolf BDRVRawState *s = bs->opaque;
10495cec2870SKevin Wolf int fd = -1;
10505cec2870SKevin Wolf int ret;
105123dece19SKevin Wolf bool has_writers = perm &
105223dece19SKevin Wolf (BLK_PERM_WRITE | BLK_PERM_WRITE_UNCHANGED | BLK_PERM_RESIZE);
10535cec2870SKevin Wolf int fcntl_flags = O_APPEND | O_NONBLOCK;
10545cec2870SKevin Wolf #ifdef O_NOATIME
10555cec2870SKevin Wolf fcntl_flags |= O_NOATIME;
10565cec2870SKevin Wolf #endif
10575cec2870SKevin Wolf
10585cec2870SKevin Wolf *open_flags = 0;
10595cec2870SKevin Wolf if (s->type == FTYPE_CD) {
10605cec2870SKevin Wolf *open_flags |= O_NONBLOCK;
10615cec2870SKevin Wolf }
10625cec2870SKevin Wolf
106323dece19SKevin Wolf raw_parse_flags(flags, open_flags, has_writers);
10645cec2870SKevin Wolf
10655cec2870SKevin Wolf #ifdef O_ASYNC
10665cec2870SKevin Wolf /* Not all operating systems have O_ASYNC, and those that don't
10675cec2870SKevin Wolf * will not let us track the state into rs->open_flags (typically
10685cec2870SKevin Wolf * you achieve the same effect with an ioctl, for example I_SETSIG
10695cec2870SKevin Wolf * on Solaris). But we do not use O_ASYNC, so that's fine.
10705cec2870SKevin Wolf */
10715cec2870SKevin Wolf assert((s->open_flags & O_ASYNC) == 0);
10725cec2870SKevin Wolf #endif
10735cec2870SKevin Wolf
1074b67e3538SDenis V. Lunev via if (*open_flags == s->open_flags) {
10756ceabe6fSKevin Wolf /* We're lucky, the existing fd is fine */
10766ceabe6fSKevin Wolf return s->fd;
10776ceabe6fSKevin Wolf }
10786ceabe6fSKevin Wolf
10795cec2870SKevin Wolf if ((*open_flags & ~fcntl_flags) == (s->open_flags & ~fcntl_flags)) {
10805cec2870SKevin Wolf /* dup the original fd */
10815cec2870SKevin Wolf fd = qemu_dup(s->fd);
10825cec2870SKevin Wolf if (fd >= 0) {
10835cec2870SKevin Wolf ret = fcntl_setfl(fd, *open_flags);
10845cec2870SKevin Wolf if (ret) {
10855cec2870SKevin Wolf qemu_close(fd);
10865cec2870SKevin Wolf fd = -1;
10875cec2870SKevin Wolf }
10885cec2870SKevin Wolf }
10895cec2870SKevin Wolf }
10905cec2870SKevin Wolf
1091b18a24a9SDaniel P. Berrangé /* If we cannot use fcntl, or fcntl failed, fall back to qemu_open() */
10925cec2870SKevin Wolf if (fd == -1) {
10935cec2870SKevin Wolf const char *normalized_filename = bs->filename;
10945cec2870SKevin Wolf ret = raw_normalize_devicepath(&normalized_filename, errp);
10955cec2870SKevin Wolf if (ret >= 0) {
1096b18a24a9SDaniel P. Berrangé fd = qemu_open(normalized_filename, *open_flags, errp);
10975cec2870SKevin Wolf if (fd == -1) {
10985cec2870SKevin Wolf return -1;
10995cec2870SKevin Wolf }
11005cec2870SKevin Wolf }
11015cec2870SKevin Wolf }
11025cec2870SKevin Wolf
1103bca5283bSKevin Wolf if (fd != -1 && (*open_flags & O_RDWR)) {
1104bca5283bSKevin Wolf ret = check_hdev_writable(fd);
1105bca5283bSKevin Wolf if (ret < 0) {
1106bca5283bSKevin Wolf qemu_close(fd);
1107bca5283bSKevin Wolf error_setg_errno(errp, -ret, "The device is not writable");
1108bca5283bSKevin Wolf return -1;
1109bca5283bSKevin Wolf }
1110bca5283bSKevin Wolf }
1111bca5283bSKevin Wolf
11125cec2870SKevin Wolf return fd;
11135cec2870SKevin Wolf }
11145cec2870SKevin Wolf
raw_reopen_prepare(BDRVReopenState * state,BlockReopenQueue * queue,Error ** errp)1115c1bb86cdSEric Blake static int raw_reopen_prepare(BDRVReopenState *state,
1116c1bb86cdSEric Blake BlockReopenQueue *queue, Error **errp)
1117c1bb86cdSEric Blake {
1118c1bb86cdSEric Blake BDRVRawState *s;
1119c1bb86cdSEric Blake BDRVRawReopenState *rs;
112031be8a2aSStefan Hajnoczi QemuOpts *opts;
1121a6aeca0cSKevin Wolf int ret;
1122c1bb86cdSEric Blake
1123c1bb86cdSEric Blake assert(state != NULL);
1124c1bb86cdSEric Blake assert(state->bs != NULL);
1125c1bb86cdSEric Blake
1126c1bb86cdSEric Blake s = state->bs->opaque;
1127c1bb86cdSEric Blake
1128c1bb86cdSEric Blake state->opaque = g_new0(BDRVRawReopenState, 1);
1129c1bb86cdSEric Blake rs = state->opaque;
113031be8a2aSStefan Hajnoczi
113131be8a2aSStefan Hajnoczi /* Handle options changes */
113231be8a2aSStefan Hajnoczi opts = qemu_opts_create(&raw_runtime_opts, NULL, 0, &error_abort);
1133af175e85SMarkus Armbruster if (!qemu_opts_absorb_qdict(opts, state->options, errp)) {
113431be8a2aSStefan Hajnoczi ret = -EINVAL;
113531be8a2aSStefan Hajnoczi goto out;
113631be8a2aSStefan Hajnoczi }
113731be8a2aSStefan Hajnoczi
1138f357fcd8SStefan Hajnoczi rs->drop_cache = qemu_opt_get_bool_del(opts, "drop-cache", true);
11398d324575SAlberto Garcia rs->check_cache_dropped =
11408d324575SAlberto Garcia qemu_opt_get_bool_del(opts, "x-check-cache-dropped", false);
11418d324575SAlberto Garcia
11428d324575SAlberto Garcia /* This driver's reopen function doesn't currently allow changing
11438d324575SAlberto Garcia * other options, so let's put them back in the original QDict and
11448d324575SAlberto Garcia * bdrv_reopen_prepare() will detect changes and complain. */
11458d324575SAlberto Garcia qemu_opts_to_qdict(opts, state->options);
1146c1bb86cdSEric Blake
114772373e40SVladimir Sementsov-Ogievskiy /*
114872373e40SVladimir Sementsov-Ogievskiy * As part of reopen prepare we also want to create new fd by
114972373e40SVladimir Sementsov-Ogievskiy * raw_reconfigure_getfd(). But it wants updated "perm", when in
115072373e40SVladimir Sementsov-Ogievskiy * bdrv_reopen_multiple() .bdrv_reopen_prepare() callback called prior to
11513202d8e4SMichael Tokarev * permission update. Happily, permission update is always a part
11523202d8e4SMichael Tokarev * (a separate stage) of bdrv_reopen_multiple() so we can rely on this
11533202d8e4SMichael Tokarev * fact and reconfigure fd in raw_check_perm().
115472373e40SVladimir Sementsov-Ogievskiy */
1155c1bb86cdSEric Blake
1156e0c9cf3aSKevin Wolf s->reopen_state = state;
1157a6aeca0cSKevin Wolf ret = 0;
115872373e40SVladimir Sementsov-Ogievskiy
115931be8a2aSStefan Hajnoczi out:
116031be8a2aSStefan Hajnoczi qemu_opts_del(opts);
1161c1bb86cdSEric Blake return ret;
1162c1bb86cdSEric Blake }
1163c1bb86cdSEric Blake
raw_reopen_commit(BDRVReopenState * state)1164c1bb86cdSEric Blake static void raw_reopen_commit(BDRVReopenState *state)
1165c1bb86cdSEric Blake {
1166c1bb86cdSEric Blake BDRVRawReopenState *rs = state->opaque;
1167c1bb86cdSEric Blake BDRVRawState *s = state->bs->opaque;
1168c1bb86cdSEric Blake
1169f357fcd8SStefan Hajnoczi s->drop_cache = rs->drop_cache;
117031be8a2aSStefan Hajnoczi s->check_cache_dropped = rs->check_cache_dropped;
1171c1bb86cdSEric Blake s->open_flags = rs->open_flags;
1172c1bb86cdSEric Blake g_free(state->opaque);
1173c1bb86cdSEric Blake state->opaque = NULL;
1174e0c9cf3aSKevin Wolf
1175e0c9cf3aSKevin Wolf assert(s->reopen_state == state);
1176e0c9cf3aSKevin Wolf s->reopen_state = NULL;
1177c1bb86cdSEric Blake }
1178c1bb86cdSEric Blake
1179c1bb86cdSEric Blake
raw_reopen_abort(BDRVReopenState * state)1180c1bb86cdSEric Blake static void raw_reopen_abort(BDRVReopenState *state)
1181c1bb86cdSEric Blake {
1182c1bb86cdSEric Blake BDRVRawReopenState *rs = state->opaque;
1183e0c9cf3aSKevin Wolf BDRVRawState *s = state->bs->opaque;
1184c1bb86cdSEric Blake
1185c1bb86cdSEric Blake /* nothing to do if NULL, we didn't get far enough */
1186c1bb86cdSEric Blake if (rs == NULL) {
1187c1bb86cdSEric Blake return;
1188c1bb86cdSEric Blake }
1189c1bb86cdSEric Blake
1190c1bb86cdSEric Blake g_free(state->opaque);
1191c1bb86cdSEric Blake state->opaque = NULL;
1192e0c9cf3aSKevin Wolf
1193e0c9cf3aSKevin Wolf assert(s->reopen_state == state);
1194e0c9cf3aSKevin Wolf s->reopen_state = NULL;
1195c1bb86cdSEric Blake }
1196c1bb86cdSEric Blake
hdev_get_max_hw_transfer(int fd,struct stat * st)119718473467SPaolo Bonzini static int hdev_get_max_hw_transfer(int fd, struct stat *st)
1198c1bb86cdSEric Blake {
1199c1bb86cdSEric Blake #ifdef BLKSECTGET
120018473467SPaolo Bonzini if (S_ISBLK(st->st_mode)) {
120118473467SPaolo Bonzini unsigned short max_sectors = 0;
120218473467SPaolo Bonzini if (ioctl(fd, BLKSECTGET, &max_sectors) == 0) {
120318473467SPaolo Bonzini return max_sectors * 512;
120418473467SPaolo Bonzini }
120518473467SPaolo Bonzini } else {
120648265250SEric Farman int max_bytes = 0;
1207867eccfeSMaxim Levitsky if (ioctl(fd, BLKSECTGET, &max_bytes) == 0) {
120848265250SEric Farman return max_bytes;
1209c1bb86cdSEric Blake }
121018473467SPaolo Bonzini }
121118473467SPaolo Bonzini return -errno;
1212c1bb86cdSEric Blake #else
1213c1bb86cdSEric Blake return -ENOSYS;
1214c1bb86cdSEric Blake #endif
1215c1bb86cdSEric Blake }
1216c1bb86cdSEric Blake
1217a735b56eSSam Li /*
1218a735b56eSSam Li * Get a sysfs attribute value as character string.
1219a735b56eSSam Li */
1220a735b56eSSam Li #ifdef CONFIG_LINUX
get_sysfs_str_val(struct stat * st,const char * attribute,char ** val)1221a735b56eSSam Li static int get_sysfs_str_val(struct stat *st, const char *attribute,
1222a735b56eSSam Li char **val) {
1223a735b56eSSam Li g_autofree char *sysfspath = NULL;
1224a735b56eSSam Li size_t len;
1225a735b56eSSam Li
1226a735b56eSSam Li if (!S_ISBLK(st->st_mode)) {
1227a735b56eSSam Li return -ENOTSUP;
1228a735b56eSSam Li }
1229a735b56eSSam Li
1230a735b56eSSam Li sysfspath = g_strdup_printf("/sys/dev/block/%u:%u/queue/%s",
1231a735b56eSSam Li major(st->st_rdev), minor(st->st_rdev),
1232a735b56eSSam Li attribute);
123329a242e1SSam Li if (!g_file_get_contents(sysfspath, val, &len, NULL)) {
1234a735b56eSSam Li return -ENOENT;
1235a735b56eSSam Li }
1236a735b56eSSam Li
1237a735b56eSSam Li /* The file is ended with '\n' */
1238a735b56eSSam Li char *p;
1239a735b56eSSam Li p = *val;
1240a735b56eSSam Li if (*(p + len - 1) == '\n') {
1241a735b56eSSam Li *(p + len - 1) = '\0';
1242a735b56eSSam Li }
124329a242e1SSam Li return 0;
1244a735b56eSSam Li }
1245a735b56eSSam Li #endif
1246a735b56eSSam Li
12476d43eaa3SSam Li #if defined(CONFIG_BLKZONED)
get_sysfs_zoned_model(struct stat * st,BlockZoneModel * zoned)1248a735b56eSSam Li static int get_sysfs_zoned_model(struct stat *st, BlockZoneModel *zoned)
1249a735b56eSSam Li {
1250a735b56eSSam Li g_autofree char *val = NULL;
1251a735b56eSSam Li int ret;
1252a735b56eSSam Li
1253a735b56eSSam Li ret = get_sysfs_str_val(st, "zoned", &val);
1254a735b56eSSam Li if (ret < 0) {
1255a735b56eSSam Li return ret;
1256a735b56eSSam Li }
1257a735b56eSSam Li
1258a735b56eSSam Li if (strcmp(val, "host-managed") == 0) {
1259a735b56eSSam Li *zoned = BLK_Z_HM;
1260a735b56eSSam Li } else if (strcmp(val, "host-aware") == 0) {
1261a735b56eSSam Li *zoned = BLK_Z_HA;
1262a735b56eSSam Li } else if (strcmp(val, "none") == 0) {
1263a735b56eSSam Li *zoned = BLK_Z_NONE;
1264a735b56eSSam Li } else {
1265a735b56eSSam Li return -ENOTSUP;
1266a735b56eSSam Li }
1267a735b56eSSam Li return 0;
1268a735b56eSSam Li }
12696d43eaa3SSam Li #endif /* defined(CONFIG_BLKZONED) */
1270a735b56eSSam Li
1271a735b56eSSam Li /*
1272a735b56eSSam Li * Get a sysfs attribute value as a long integer.
1273a735b56eSSam Li */
1274a735b56eSSam Li #ifdef CONFIG_LINUX
get_sysfs_long_val(struct stat * st,const char * attribute)1275a735b56eSSam Li static long get_sysfs_long_val(struct stat *st, const char *attribute)
1276a735b56eSSam Li {
1277a735b56eSSam Li g_autofree char *str = NULL;
1278a735b56eSSam Li const char *end;
1279a735b56eSSam Li long val;
1280a735b56eSSam Li int ret;
1281a735b56eSSam Li
1282a735b56eSSam Li ret = get_sysfs_str_val(st, attribute, &str);
1283a735b56eSSam Li if (ret < 0) {
1284a735b56eSSam Li return ret;
1285a735b56eSSam Li }
1286a735b56eSSam Li
1287a735b56eSSam Li /* The file is ended with '\n', pass 'end' to accept that. */
1288a735b56eSSam Li ret = qemu_strtol(str, &end, 10, &val);
1289a735b56eSSam Li if (ret == 0 && end && *end == '\0') {
1290a735b56eSSam Li ret = val;
1291a735b56eSSam Li }
1292a735b56eSSam Li return ret;
1293a735b56eSSam Li }
1294a735b56eSSam Li #endif
1295a735b56eSSam Li
hdev_get_max_segments(int fd,struct stat * st)129618473467SPaolo Bonzini static int hdev_get_max_segments(int fd, struct stat *st)
12979103f1ceSFam Zheng {
12989103f1ceSFam Zheng #ifdef CONFIG_LINUX
12999103f1ceSFam Zheng int ret;
1300867eccfeSMaxim Levitsky
130118473467SPaolo Bonzini if (S_ISCHR(st->st_mode)) {
13028ad5ab61SPaolo Bonzini if (ioctl(fd, SG_GET_SG_TABLESIZE, &ret) == 0) {
13038ad5ab61SPaolo Bonzini return ret;
13048ad5ab61SPaolo Bonzini }
13058ad5ab61SPaolo Bonzini return -ENOTSUP;
13068ad5ab61SPaolo Bonzini }
1307a735b56eSSam Li return get_sysfs_long_val(st, "max_segments");
13089103f1ceSFam Zheng #else
13099103f1ceSFam Zheng return -ENOTSUP;
13109103f1ceSFam Zheng #endif
13119103f1ceSFam Zheng }
13129103f1ceSFam Zheng
13136d43eaa3SSam Li #if defined(CONFIG_BLKZONED)
1314a3c41f06SSam Li /*
1315a3c41f06SSam Li * If the reset_all flag is true, then the wps of zone whose state is
1316a3c41f06SSam Li * not readonly or offline should be all reset to the start sector.
1317a3c41f06SSam Li * Else, take the real wp of the device.
1318a3c41f06SSam Li */
get_zones_wp(BlockDriverState * bs,int fd,int64_t offset,unsigned int nrz,bool reset_all)1319a3c41f06SSam Li static int get_zones_wp(BlockDriverState *bs, int fd, int64_t offset,
1320a3c41f06SSam Li unsigned int nrz, bool reset_all)
1321a3c41f06SSam Li {
1322a3c41f06SSam Li struct blk_zone *blkz;
1323a3c41f06SSam Li size_t rep_size;
1324a3c41f06SSam Li uint64_t sector = offset >> BDRV_SECTOR_BITS;
1325a3c41f06SSam Li BlockZoneWps *wps = bs->wps;
1326a3c41f06SSam Li unsigned int j = offset / bs->bl.zone_size;
1327a3c41f06SSam Li unsigned int n = 0, i = 0;
1328a3c41f06SSam Li int ret;
1329a3c41f06SSam Li rep_size = sizeof(struct blk_zone_report) + nrz * sizeof(struct blk_zone);
1330a3c41f06SSam Li g_autofree struct blk_zone_report *rep = NULL;
1331a3c41f06SSam Li
1332a3c41f06SSam Li rep = g_malloc(rep_size);
1333a3c41f06SSam Li blkz = (struct blk_zone *)(rep + 1);
1334a3c41f06SSam Li while (n < nrz) {
1335a3c41f06SSam Li memset(rep, 0, rep_size);
1336a3c41f06SSam Li rep->sector = sector;
1337a3c41f06SSam Li rep->nr_zones = nrz - n;
1338a3c41f06SSam Li
1339a3c41f06SSam Li do {
1340a3c41f06SSam Li ret = ioctl(fd, BLKREPORTZONE, rep);
1341a3c41f06SSam Li } while (ret != 0 && errno == EINTR);
1342a3c41f06SSam Li if (ret != 0) {
1343a3c41f06SSam Li error_report("%d: ioctl BLKREPORTZONE at %" PRId64 " failed %d",
1344a3c41f06SSam Li fd, offset, errno);
1345a3c41f06SSam Li return -errno;
1346a3c41f06SSam Li }
1347a3c41f06SSam Li
1348a3c41f06SSam Li if (!rep->nr_zones) {
1349a3c41f06SSam Li break;
1350a3c41f06SSam Li }
1351a3c41f06SSam Li
1352a3c41f06SSam Li for (i = 0; i < rep->nr_zones; ++i, ++n, ++j) {
1353a3c41f06SSam Li /*
1354a3c41f06SSam Li * The wp tracking cares only about sequential writes required and
1355a3c41f06SSam Li * sequential write preferred zones so that the wp can advance to
1356a3c41f06SSam Li * the right location.
1357a3c41f06SSam Li * Use the most significant bit of the wp location to indicate the
1358a3c41f06SSam Li * zone type: 0 for SWR/SWP zones and 1 for conventional zones.
1359a3c41f06SSam Li */
1360a3c41f06SSam Li if (blkz[i].type == BLK_ZONE_TYPE_CONVENTIONAL) {
1361a3c41f06SSam Li wps->wp[j] |= 1ULL << 63;
1362a3c41f06SSam Li } else {
1363a3c41f06SSam Li switch(blkz[i].cond) {
1364a3c41f06SSam Li case BLK_ZONE_COND_FULL:
1365a3c41f06SSam Li case BLK_ZONE_COND_READONLY:
1366a3c41f06SSam Li /* Zone not writable */
1367a3c41f06SSam Li wps->wp[j] = (blkz[i].start + blkz[i].len) << BDRV_SECTOR_BITS;
1368a3c41f06SSam Li break;
1369a3c41f06SSam Li case BLK_ZONE_COND_OFFLINE:
1370a3c41f06SSam Li /* Zone not writable nor readable */
1371a3c41f06SSam Li wps->wp[j] = (blkz[i].start) << BDRV_SECTOR_BITS;
1372a3c41f06SSam Li break;
1373a3c41f06SSam Li default:
1374a3c41f06SSam Li if (reset_all) {
1375a3c41f06SSam Li wps->wp[j] = blkz[i].start << BDRV_SECTOR_BITS;
1376a3c41f06SSam Li } else {
1377a3c41f06SSam Li wps->wp[j] = blkz[i].wp << BDRV_SECTOR_BITS;
1378a3c41f06SSam Li }
1379a3c41f06SSam Li break;
1380a3c41f06SSam Li }
1381a3c41f06SSam Li }
1382a3c41f06SSam Li }
1383a3c41f06SSam Li sector = blkz[i - 1].start + blkz[i - 1].len;
1384a3c41f06SSam Li }
1385a3c41f06SSam Li
1386a3c41f06SSam Li return 0;
1387a3c41f06SSam Li }
1388a3c41f06SSam Li
update_zones_wp(BlockDriverState * bs,int fd,int64_t offset,unsigned int nrz)1389a3c41f06SSam Li static void update_zones_wp(BlockDriverState *bs, int fd, int64_t offset,
1390a3c41f06SSam Li unsigned int nrz)
1391a3c41f06SSam Li {
1392a3c41f06SSam Li if (get_zones_wp(bs, fd, offset, nrz, 0) < 0) {
1393a3c41f06SSam Li error_report("update zone wp failed");
1394a3c41f06SSam Li }
1395a3c41f06SSam Li }
1396a3c41f06SSam Li
raw_refresh_zoned_limits(BlockDriverState * bs,struct stat * st,Error ** errp)1397a735b56eSSam Li static void raw_refresh_zoned_limits(BlockDriverState *bs, struct stat *st,
1398a735b56eSSam Li Error **errp)
1399a735b56eSSam Li {
1400a3c41f06SSam Li BDRVRawState *s = bs->opaque;
1401*eb5d28c7SMarc-André Lureau BlockZoneModel zoned = BLK_Z_NONE;
1402a735b56eSSam Li int ret;
1403a735b56eSSam Li
1404a735b56eSSam Li ret = get_sysfs_zoned_model(st, &zoned);
1405a735b56eSSam Li if (ret < 0 || zoned == BLK_Z_NONE) {
140656d1a022SHanna Czenczek goto no_zoned;
1407a735b56eSSam Li }
1408a735b56eSSam Li bs->bl.zoned = zoned;
14096d43eaa3SSam Li
14106d43eaa3SSam Li ret = get_sysfs_long_val(st, "max_open_zones");
14116d43eaa3SSam Li if (ret >= 0) {
14126d43eaa3SSam Li bs->bl.max_open_zones = ret;
1413a735b56eSSam Li }
1414a735b56eSSam Li
14156d43eaa3SSam Li ret = get_sysfs_long_val(st, "max_active_zones");
14166d43eaa3SSam Li if (ret >= 0) {
14176d43eaa3SSam Li bs->bl.max_active_zones = ret;
14186d43eaa3SSam Li }
14196d43eaa3SSam Li
14206d43eaa3SSam Li /*
14216d43eaa3SSam Li * The zoned device must at least have zone size and nr_zones fields.
14226d43eaa3SSam Li */
14236d43eaa3SSam Li ret = get_sysfs_long_val(st, "chunk_sectors");
14246d43eaa3SSam Li if (ret < 0) {
14256d43eaa3SSam Li error_setg_errno(errp, -ret, "Unable to read chunk_sectors "
14266d43eaa3SSam Li "sysfs attribute");
142756d1a022SHanna Czenczek goto no_zoned;
14286d43eaa3SSam Li } else if (!ret) {
14296d43eaa3SSam Li error_setg(errp, "Read 0 from chunk_sectors sysfs attribute");
143056d1a022SHanna Czenczek goto no_zoned;
14316d43eaa3SSam Li }
14326d43eaa3SSam Li bs->bl.zone_size = ret << BDRV_SECTOR_BITS;
14336d43eaa3SSam Li
14346d43eaa3SSam Li ret = get_sysfs_long_val(st, "nr_zones");
14356d43eaa3SSam Li if (ret < 0) {
14366d43eaa3SSam Li error_setg_errno(errp, -ret, "Unable to read nr_zones "
14376d43eaa3SSam Li "sysfs attribute");
143856d1a022SHanna Czenczek goto no_zoned;
14396d43eaa3SSam Li } else if (!ret) {
14406d43eaa3SSam Li error_setg(errp, "Read 0 from nr_zones sysfs attribute");
144156d1a022SHanna Czenczek goto no_zoned;
14426d43eaa3SSam Li }
14436d43eaa3SSam Li bs->bl.nr_zones = ret;
14446d43eaa3SSam Li
14456d43eaa3SSam Li ret = get_sysfs_long_val(st, "zone_append_max_bytes");
14466d43eaa3SSam Li if (ret > 0) {
14476d43eaa3SSam Li bs->bl.max_append_sectors = ret >> BDRV_SECTOR_BITS;
14486d43eaa3SSam Li }
1449a3c41f06SSam Li
1450a3c41f06SSam Li ret = get_sysfs_long_val(st, "physical_block_size");
1451a3c41f06SSam Li if (ret >= 0) {
1452a3c41f06SSam Li bs->bl.write_granularity = ret;
1453a3c41f06SSam Li }
1454a3c41f06SSam Li
1455a3c41f06SSam Li /* The refresh_limits() function can be called multiple times. */
1456a3c41f06SSam Li g_free(bs->wps);
1457a3c41f06SSam Li bs->wps = g_malloc(sizeof(BlockZoneWps) +
1458a3c41f06SSam Li sizeof(int64_t) * bs->bl.nr_zones);
1459a3c41f06SSam Li ret = get_zones_wp(bs, s->fd, 0, bs->bl.nr_zones, 0);
1460a3c41f06SSam Li if (ret < 0) {
1461a3c41f06SSam Li error_setg_errno(errp, -ret, "report wps failed");
146256d1a022SHanna Czenczek goto no_zoned;
1463a3c41f06SSam Li }
1464a3c41f06SSam Li qemu_co_mutex_init(&bs->wps->colock);
146556d1a022SHanna Czenczek return;
146656d1a022SHanna Czenczek
146756d1a022SHanna Czenczek no_zoned:
146856d1a022SHanna Czenczek bs->bl.zoned = BLK_Z_NONE;
146956d1a022SHanna Czenczek g_free(bs->wps);
147056d1a022SHanna Czenczek bs->wps = NULL;
14716d43eaa3SSam Li }
14726d43eaa3SSam Li #else /* !defined(CONFIG_BLKZONED) */
raw_refresh_zoned_limits(BlockDriverState * bs,struct stat * st,Error ** errp)14736d43eaa3SSam Li static void raw_refresh_zoned_limits(BlockDriverState *bs, struct stat *st,
14746d43eaa3SSam Li Error **errp)
14756d43eaa3SSam Li {
14766d43eaa3SSam Li bs->bl.zoned = BLK_Z_NONE;
14776d43eaa3SSam Li }
14786d43eaa3SSam Li #endif /* !defined(CONFIG_BLKZONED) */
14796d43eaa3SSam Li
raw_refresh_limits(BlockDriverState * bs,Error ** errp)1480c1bb86cdSEric Blake static void raw_refresh_limits(BlockDriverState *bs, Error **errp)
1481c1bb86cdSEric Blake {
1482c1bb86cdSEric Blake BDRVRawState *s = bs->opaque;
148318473467SPaolo Bonzini struct stat st;
1484c1bb86cdSEric Blake
14855dbd0ce1SKevin Wolf s->needs_alignment = raw_needs_alignment(bs);
1486c1bb86cdSEric Blake raw_probe_alignment(bs, s->fd, errp);
14875dbd0ce1SKevin Wolf
1488c1bb86cdSEric Blake bs->bl.min_mem_alignment = s->buf_align;
14898e3b0cbbSMarc-André Lureau bs->bl.opt_mem_alignment = MAX(s->buf_align, qemu_real_host_page_size());
149018473467SPaolo Bonzini
149118473467SPaolo Bonzini /*
149218473467SPaolo Bonzini * Maximum transfers are best effort, so it is okay to ignore any
149318473467SPaolo Bonzini * errors. That said, based on the man page errors in fstat would be
149418473467SPaolo Bonzini * very much unexpected; the only possible case seems to be ENOMEM.
149518473467SPaolo Bonzini */
149618473467SPaolo Bonzini if (fstat(s->fd, &st)) {
149718473467SPaolo Bonzini return;
149818473467SPaolo Bonzini }
149918473467SPaolo Bonzini
15000dfc7af2SAkihiko Odaki #if defined(__APPLE__) && (__MACH__)
15010dfc7af2SAkihiko Odaki struct statfs buf;
15020dfc7af2SAkihiko Odaki
15030dfc7af2SAkihiko Odaki if (!fstatfs(s->fd, &buf)) {
15040dfc7af2SAkihiko Odaki bs->bl.opt_transfer = buf.f_iosize;
15050dfc7af2SAkihiko Odaki bs->bl.pdiscard_alignment = buf.f_bsize;
15060dfc7af2SAkihiko Odaki }
15070dfc7af2SAkihiko Odaki #endif
15080dfc7af2SAkihiko Odaki
1509006e1962SDenis V. Lunev if (bdrv_is_sg(bs) || S_ISBLK(st.st_mode)) {
151018473467SPaolo Bonzini int ret = hdev_get_max_hw_transfer(s->fd, &st);
151118473467SPaolo Bonzini
151218473467SPaolo Bonzini if (ret > 0 && ret <= BDRV_REQUEST_MAX_BYTES) {
151318473467SPaolo Bonzini bs->bl.max_hw_transfer = ret;
151418473467SPaolo Bonzini }
151518473467SPaolo Bonzini
151618473467SPaolo Bonzini ret = hdev_get_max_segments(s->fd, &st);
151718473467SPaolo Bonzini if (ret > 0) {
1518cc071629SPaolo Bonzini bs->bl.max_hw_iov = ret;
151918473467SPaolo Bonzini }
152018473467SPaolo Bonzini }
1521a735b56eSSam Li
1522a735b56eSSam Li raw_refresh_zoned_limits(bs, &st, errp);
1523c1bb86cdSEric Blake }
1524c1bb86cdSEric Blake
check_for_dasd(int fd)1525c1bb86cdSEric Blake static int check_for_dasd(int fd)
1526c1bb86cdSEric Blake {
1527c1bb86cdSEric Blake #ifdef BIODASDINFO2
1528c1bb86cdSEric Blake struct dasd_information2_t info = {0};
1529c1bb86cdSEric Blake
1530c1bb86cdSEric Blake return ioctl(fd, BIODASDINFO2, &info);
1531c1bb86cdSEric Blake #else
1532c1bb86cdSEric Blake return -1;
1533c1bb86cdSEric Blake #endif
1534c1bb86cdSEric Blake }
1535c1bb86cdSEric Blake
1536c1bb86cdSEric Blake /**
1537c1bb86cdSEric Blake * Try to get @bs's logical and physical block size.
1538c1bb86cdSEric Blake * On success, store them in @bsz and return zero.
1539c1bb86cdSEric Blake * On failure, return negative errno.
1540c1bb86cdSEric Blake */
hdev_probe_blocksizes(BlockDriverState * bs,BlockSizes * bsz)1541c1bb86cdSEric Blake static int hdev_probe_blocksizes(BlockDriverState *bs, BlockSizes *bsz)
1542c1bb86cdSEric Blake {
1543c1bb86cdSEric Blake BDRVRawState *s = bs->opaque;
1544c1bb86cdSEric Blake int ret;
1545c1bb86cdSEric Blake
15466d43eaa3SSam Li /* If DASD or zoned devices, get blocksizes */
1547c1bb86cdSEric Blake if (check_for_dasd(s->fd) < 0) {
15486d43eaa3SSam Li /* zoned devices are not DASD */
15496d43eaa3SSam Li if (bs->bl.zoned == BLK_Z_NONE) {
1550c1bb86cdSEric Blake return -ENOTSUP;
1551c1bb86cdSEric Blake }
15526d43eaa3SSam Li }
1553c1bb86cdSEric Blake ret = probe_logical_blocksize(s->fd, &bsz->log);
1554c1bb86cdSEric Blake if (ret < 0) {
1555c1bb86cdSEric Blake return ret;
1556c1bb86cdSEric Blake }
1557c1bb86cdSEric Blake return probe_physical_blocksize(s->fd, &bsz->phys);
1558c1bb86cdSEric Blake }
1559c1bb86cdSEric Blake
1560c1bb86cdSEric Blake /**
1561c1bb86cdSEric Blake * Try to get @bs's geometry: cyls, heads, sectors.
1562c1bb86cdSEric Blake * On success, store them in @geo and return 0.
1563c1bb86cdSEric Blake * On failure return -errno.
1564c1bb86cdSEric Blake * (Allows block driver to assign default geometry values that guest sees)
1565c1bb86cdSEric Blake */
1566c1bb86cdSEric Blake #ifdef __linux__
hdev_probe_geometry(BlockDriverState * bs,HDGeometry * geo)1567c1bb86cdSEric Blake static int hdev_probe_geometry(BlockDriverState *bs, HDGeometry *geo)
1568c1bb86cdSEric Blake {
1569c1bb86cdSEric Blake BDRVRawState *s = bs->opaque;
1570c1bb86cdSEric Blake struct hd_geometry ioctl_geo = {0};
1571c1bb86cdSEric Blake
1572c1bb86cdSEric Blake /* If DASD, get its geometry */
1573c1bb86cdSEric Blake if (check_for_dasd(s->fd) < 0) {
1574c1bb86cdSEric Blake return -ENOTSUP;
1575c1bb86cdSEric Blake }
1576c1bb86cdSEric Blake if (ioctl(s->fd, HDIO_GETGEO, &ioctl_geo) < 0) {
1577c1bb86cdSEric Blake return -errno;
1578c1bb86cdSEric Blake }
1579c1bb86cdSEric Blake /* HDIO_GETGEO may return success even though geo contains zeros
1580c1bb86cdSEric Blake (e.g. certain multipath setups) */
1581c1bb86cdSEric Blake if (!ioctl_geo.heads || !ioctl_geo.sectors || !ioctl_geo.cylinders) {
1582c1bb86cdSEric Blake return -ENOTSUP;
1583c1bb86cdSEric Blake }
1584c1bb86cdSEric Blake /* Do not return a geometry for partition */
1585c1bb86cdSEric Blake if (ioctl_geo.start != 0) {
1586c1bb86cdSEric Blake return -ENOTSUP;
1587c1bb86cdSEric Blake }
1588c1bb86cdSEric Blake geo->heads = ioctl_geo.heads;
1589c1bb86cdSEric Blake geo->sectors = ioctl_geo.sectors;
1590c1bb86cdSEric Blake geo->cylinders = ioctl_geo.cylinders;
1591c1bb86cdSEric Blake
1592c1bb86cdSEric Blake return 0;
1593c1bb86cdSEric Blake }
1594c1bb86cdSEric Blake #else /* __linux__ */
hdev_probe_geometry(BlockDriverState * bs,HDGeometry * geo)1595c1bb86cdSEric Blake static int hdev_probe_geometry(BlockDriverState *bs, HDGeometry *geo)
1596c1bb86cdSEric Blake {
1597c1bb86cdSEric Blake return -ENOTSUP;
1598c1bb86cdSEric Blake }
1599c1bb86cdSEric Blake #endif
1600c1bb86cdSEric Blake
160103425671SKevin Wolf #if defined(__linux__)
handle_aiocb_ioctl(void * opaque)160203425671SKevin Wolf static int handle_aiocb_ioctl(void *opaque)
1603c1bb86cdSEric Blake {
160403425671SKevin Wolf RawPosixAIOData *aiocb = opaque;
1605c1bb86cdSEric Blake int ret;
1606c1bb86cdSEric Blake
160737b0b24eSNikita Ivanov ret = RETRY_ON_EINTR(
160837b0b24eSNikita Ivanov ioctl(aiocb->aio_fildes, aiocb->ioctl.cmd, aiocb->ioctl.buf)
160937b0b24eSNikita Ivanov );
1610c1bb86cdSEric Blake if (ret == -1) {
1611c1bb86cdSEric Blake return -errno;
1612c1bb86cdSEric Blake }
1613c1bb86cdSEric Blake
1614c1bb86cdSEric Blake return 0;
1615c1bb86cdSEric Blake }
161603425671SKevin Wolf #endif /* linux */
1617c1bb86cdSEric Blake
handle_aiocb_flush(void * opaque)161806dc9bd5SKevin Wolf static int handle_aiocb_flush(void *opaque)
1619c1bb86cdSEric Blake {
162006dc9bd5SKevin Wolf RawPosixAIOData *aiocb = opaque;
1621e5bcf967SKevin Wolf BDRVRawState *s = aiocb->bs->opaque;
1622c1bb86cdSEric Blake int ret;
1623c1bb86cdSEric Blake
1624e5bcf967SKevin Wolf if (s->page_cache_inconsistent) {
1625c7ddc882SDaniel P. Berrangé return -s->page_cache_inconsistent;
1626e5bcf967SKevin Wolf }
1627e5bcf967SKevin Wolf
1628c1bb86cdSEric Blake ret = qemu_fdatasync(aiocb->aio_fildes);
1629c1bb86cdSEric Blake if (ret == -1) {
163060ff2ae2SDaniel P. Berrangé trace_file_flush_fdatasync_failed(errno);
163160ff2ae2SDaniel P. Berrangé
1632e5bcf967SKevin Wolf /* There is no clear definition of the semantics of a failing fsync(),
1633e5bcf967SKevin Wolf * so we may have to assume the worst. The sad truth is that this
1634e5bcf967SKevin Wolf * assumption is correct for Linux. Some pages are now probably marked
1635e5bcf967SKevin Wolf * clean in the page cache even though they are inconsistent with the
1636e5bcf967SKevin Wolf * on-disk contents. The next fdatasync() call would succeed, but no
1637e5bcf967SKevin Wolf * further writeback attempt will be made. We can't get back to a state
1638e5bcf967SKevin Wolf * in which we know what is on disk (we would have to rewrite
1639e5bcf967SKevin Wolf * everything that was touched since the last fdatasync() at least), so
1640e5bcf967SKevin Wolf * make bdrv_flush() fail permanently. Given that the behaviour isn't
1641e5bcf967SKevin Wolf * really defined, I have little hope that other OSes are doing better.
1642e5bcf967SKevin Wolf *
1643e5bcf967SKevin Wolf * Obviously, this doesn't affect O_DIRECT, which bypasses the page
1644e5bcf967SKevin Wolf * cache. */
1645e5bcf967SKevin Wolf if ((s->open_flags & O_DIRECT) == 0) {
1646c7ddc882SDaniel P. Berrangé s->page_cache_inconsistent = errno;
1647e5bcf967SKevin Wolf }
1648c1bb86cdSEric Blake return -errno;
1649c1bb86cdSEric Blake }
1650c1bb86cdSEric Blake return 0;
1651c1bb86cdSEric Blake }
1652c1bb86cdSEric Blake
1653c1bb86cdSEric Blake #ifdef CONFIG_PREADV
1654c1bb86cdSEric Blake
1655c1bb86cdSEric Blake static bool preadv_present = true;
1656c1bb86cdSEric Blake
1657c1bb86cdSEric Blake static ssize_t
qemu_preadv(int fd,const struct iovec * iov,int nr_iov,off_t offset)1658c1bb86cdSEric Blake qemu_preadv(int fd, const struct iovec *iov, int nr_iov, off_t offset)
1659c1bb86cdSEric Blake {
1660c1bb86cdSEric Blake return preadv(fd, iov, nr_iov, offset);
1661c1bb86cdSEric Blake }
1662c1bb86cdSEric Blake
1663c1bb86cdSEric Blake static ssize_t
qemu_pwritev(int fd,const struct iovec * iov,int nr_iov,off_t offset)1664c1bb86cdSEric Blake qemu_pwritev(int fd, const struct iovec *iov, int nr_iov, off_t offset)
1665c1bb86cdSEric Blake {
1666c1bb86cdSEric Blake return pwritev(fd, iov, nr_iov, offset);
1667c1bb86cdSEric Blake }
1668c1bb86cdSEric Blake
1669c1bb86cdSEric Blake #else
1670c1bb86cdSEric Blake
1671c1bb86cdSEric Blake static bool preadv_present = false;
1672c1bb86cdSEric Blake
1673c1bb86cdSEric Blake static ssize_t
qemu_preadv(int fd,const struct iovec * iov,int nr_iov,off_t offset)1674c1bb86cdSEric Blake qemu_preadv(int fd, const struct iovec *iov, int nr_iov, off_t offset)
1675c1bb86cdSEric Blake {
1676c1bb86cdSEric Blake return -ENOSYS;
1677c1bb86cdSEric Blake }
1678c1bb86cdSEric Blake
1679c1bb86cdSEric Blake static ssize_t
qemu_pwritev(int fd,const struct iovec * iov,int nr_iov,off_t offset)1680c1bb86cdSEric Blake qemu_pwritev(int fd, const struct iovec *iov, int nr_iov, off_t offset)
1681c1bb86cdSEric Blake {
1682c1bb86cdSEric Blake return -ENOSYS;
1683c1bb86cdSEric Blake }
1684c1bb86cdSEric Blake
1685c1bb86cdSEric Blake #endif
1686c1bb86cdSEric Blake
handle_aiocb_rw_vector(RawPosixAIOData * aiocb)1687c1bb86cdSEric Blake static ssize_t handle_aiocb_rw_vector(RawPosixAIOData *aiocb)
1688c1bb86cdSEric Blake {
1689c1bb86cdSEric Blake ssize_t len;
1690c1bb86cdSEric Blake
169137b0b24eSNikita Ivanov len = RETRY_ON_EINTR(
16924751d09aSSam Li (aiocb->aio_type & (QEMU_AIO_WRITE | QEMU_AIO_ZONE_APPEND)) ?
169337b0b24eSNikita Ivanov qemu_pwritev(aiocb->aio_fildes,
1694d57c44d0SKevin Wolf aiocb->io.iov,
1695d57c44d0SKevin Wolf aiocb->io.niov,
169637b0b24eSNikita Ivanov aiocb->aio_offset) :
169737b0b24eSNikita Ivanov qemu_preadv(aiocb->aio_fildes,
1698d57c44d0SKevin Wolf aiocb->io.iov,
1699d57c44d0SKevin Wolf aiocb->io.niov,
170037b0b24eSNikita Ivanov aiocb->aio_offset)
170137b0b24eSNikita Ivanov );
1702c1bb86cdSEric Blake
1703c1bb86cdSEric Blake if (len == -1) {
1704c1bb86cdSEric Blake return -errno;
1705c1bb86cdSEric Blake }
1706c1bb86cdSEric Blake return len;
1707c1bb86cdSEric Blake }
1708c1bb86cdSEric Blake
1709c1bb86cdSEric Blake /*
1710c1bb86cdSEric Blake * Read/writes the data to/from a given linear buffer.
1711c1bb86cdSEric Blake *
1712c1bb86cdSEric Blake * Returns the number of bytes handles or -errno in case of an error. Short
1713c1bb86cdSEric Blake * reads are only returned if the end of the file is reached.
1714c1bb86cdSEric Blake */
handle_aiocb_rw_linear(RawPosixAIOData * aiocb,char * buf)1715c1bb86cdSEric Blake static ssize_t handle_aiocb_rw_linear(RawPosixAIOData *aiocb, char *buf)
1716c1bb86cdSEric Blake {
1717c1bb86cdSEric Blake ssize_t offset = 0;
1718c1bb86cdSEric Blake ssize_t len;
1719c1bb86cdSEric Blake
1720c1bb86cdSEric Blake while (offset < aiocb->aio_nbytes) {
17214751d09aSSam Li if (aiocb->aio_type & (QEMU_AIO_WRITE | QEMU_AIO_ZONE_APPEND)) {
1722c1bb86cdSEric Blake len = pwrite(aiocb->aio_fildes,
1723c1bb86cdSEric Blake (const char *)buf + offset,
1724c1bb86cdSEric Blake aiocb->aio_nbytes - offset,
1725c1bb86cdSEric Blake aiocb->aio_offset + offset);
1726c1bb86cdSEric Blake } else {
1727c1bb86cdSEric Blake len = pread(aiocb->aio_fildes,
1728c1bb86cdSEric Blake buf + offset,
1729c1bb86cdSEric Blake aiocb->aio_nbytes - offset,
1730c1bb86cdSEric Blake aiocb->aio_offset + offset);
1731c1bb86cdSEric Blake }
1732c1bb86cdSEric Blake if (len == -1 && errno == EINTR) {
1733c1bb86cdSEric Blake continue;
1734c1bb86cdSEric Blake } else if (len == -1 && errno == EINVAL &&
1735c1bb86cdSEric Blake (aiocb->bs->open_flags & BDRV_O_NOCACHE) &&
1736c1bb86cdSEric Blake !(aiocb->aio_type & QEMU_AIO_WRITE) &&
1737c1bb86cdSEric Blake offset > 0) {
1738c1bb86cdSEric Blake /* O_DIRECT pread() may fail with EINVAL when offset is unaligned
1739c1bb86cdSEric Blake * after a short read. Assume that O_DIRECT short reads only occur
1740c1bb86cdSEric Blake * at EOF. Therefore this is a short read, not an I/O error.
1741c1bb86cdSEric Blake */
1742c1bb86cdSEric Blake break;
1743c1bb86cdSEric Blake } else if (len == -1) {
1744c1bb86cdSEric Blake offset = -errno;
1745c1bb86cdSEric Blake break;
1746c1bb86cdSEric Blake } else if (len == 0) {
1747c1bb86cdSEric Blake break;
1748c1bb86cdSEric Blake }
1749c1bb86cdSEric Blake offset += len;
1750c1bb86cdSEric Blake }
1751c1bb86cdSEric Blake
1752c1bb86cdSEric Blake return offset;
1753c1bb86cdSEric Blake }
1754c1bb86cdSEric Blake
handle_aiocb_rw(void * opaque)1755999e6b69SKevin Wolf static int handle_aiocb_rw(void *opaque)
1756c1bb86cdSEric Blake {
1757999e6b69SKevin Wolf RawPosixAIOData *aiocb = opaque;
1758c1bb86cdSEric Blake ssize_t nbytes;
1759c1bb86cdSEric Blake char *buf;
1760c1bb86cdSEric Blake
1761c1bb86cdSEric Blake if (!(aiocb->aio_type & QEMU_AIO_MISALIGNED)) {
1762c1bb86cdSEric Blake /*
1763c1bb86cdSEric Blake * If there is just a single buffer, and it is properly aligned
1764c1bb86cdSEric Blake * we can just use plain pread/pwrite without any problems.
1765c1bb86cdSEric Blake */
1766d57c44d0SKevin Wolf if (aiocb->io.niov == 1) {
176754c7ca1bSKevin Wolf nbytes = handle_aiocb_rw_linear(aiocb, aiocb->io.iov->iov_base);
176854c7ca1bSKevin Wolf goto out;
1769c1bb86cdSEric Blake }
1770c1bb86cdSEric Blake /*
1771c1bb86cdSEric Blake * We have more than one iovec, and all are properly aligned.
1772c1bb86cdSEric Blake *
1773c1bb86cdSEric Blake * Try preadv/pwritev first and fall back to linearizing the
1774c1bb86cdSEric Blake * buffer if it's not supported.
1775c1bb86cdSEric Blake */
1776c1bb86cdSEric Blake if (preadv_present) {
1777c1bb86cdSEric Blake nbytes = handle_aiocb_rw_vector(aiocb);
1778c1bb86cdSEric Blake if (nbytes == aiocb->aio_nbytes ||
1779c1bb86cdSEric Blake (nbytes < 0 && nbytes != -ENOSYS)) {
178054c7ca1bSKevin Wolf goto out;
1781c1bb86cdSEric Blake }
1782c1bb86cdSEric Blake preadv_present = false;
1783c1bb86cdSEric Blake }
1784c1bb86cdSEric Blake
1785c1bb86cdSEric Blake /*
1786c1bb86cdSEric Blake * XXX(hch): short read/write. no easy way to handle the reminder
1787c1bb86cdSEric Blake * using these interfaces. For now retry using plain
1788c1bb86cdSEric Blake * pread/pwrite?
1789c1bb86cdSEric Blake */
1790c1bb86cdSEric Blake }
1791c1bb86cdSEric Blake
1792c1bb86cdSEric Blake /*
1793c1bb86cdSEric Blake * Ok, we have to do it the hard way, copy all segments into
1794c1bb86cdSEric Blake * a single aligned buffer.
1795c1bb86cdSEric Blake */
1796c1bb86cdSEric Blake buf = qemu_try_blockalign(aiocb->bs, aiocb->aio_nbytes);
1797c1bb86cdSEric Blake if (buf == NULL) {
179854c7ca1bSKevin Wolf nbytes = -ENOMEM;
179954c7ca1bSKevin Wolf goto out;
1800c1bb86cdSEric Blake }
1801c1bb86cdSEric Blake
1802c1bb86cdSEric Blake if (aiocb->aio_type & QEMU_AIO_WRITE) {
1803c1bb86cdSEric Blake char *p = buf;
1804c1bb86cdSEric Blake int i;
1805c1bb86cdSEric Blake
1806d57c44d0SKevin Wolf for (i = 0; i < aiocb->io.niov; ++i) {
1807d57c44d0SKevin Wolf memcpy(p, aiocb->io.iov[i].iov_base, aiocb->io.iov[i].iov_len);
1808d57c44d0SKevin Wolf p += aiocb->io.iov[i].iov_len;
1809c1bb86cdSEric Blake }
1810c1bb86cdSEric Blake assert(p - buf == aiocb->aio_nbytes);
1811c1bb86cdSEric Blake }
1812c1bb86cdSEric Blake
1813c1bb86cdSEric Blake nbytes = handle_aiocb_rw_linear(aiocb, buf);
18144751d09aSSam Li if (!(aiocb->aio_type & (QEMU_AIO_WRITE | QEMU_AIO_ZONE_APPEND))) {
1815c1bb86cdSEric Blake char *p = buf;
1816c1bb86cdSEric Blake size_t count = aiocb->aio_nbytes, copy;
1817c1bb86cdSEric Blake int i;
1818c1bb86cdSEric Blake
1819d57c44d0SKevin Wolf for (i = 0; i < aiocb->io.niov && count; ++i) {
1820c1bb86cdSEric Blake copy = count;
1821d57c44d0SKevin Wolf if (copy > aiocb->io.iov[i].iov_len) {
1822d57c44d0SKevin Wolf copy = aiocb->io.iov[i].iov_len;
1823c1bb86cdSEric Blake }
1824d57c44d0SKevin Wolf memcpy(aiocb->io.iov[i].iov_base, p, copy);
1825c1bb86cdSEric Blake assert(count >= copy);
1826c1bb86cdSEric Blake p += copy;
1827c1bb86cdSEric Blake count -= copy;
1828c1bb86cdSEric Blake }
1829c1bb86cdSEric Blake assert(count == 0);
1830c1bb86cdSEric Blake }
1831c1bb86cdSEric Blake qemu_vfree(buf);
1832c1bb86cdSEric Blake
183354c7ca1bSKevin Wolf out:
183454c7ca1bSKevin Wolf if (nbytes == aiocb->aio_nbytes) {
183554c7ca1bSKevin Wolf return 0;
183654c7ca1bSKevin Wolf } else if (nbytes >= 0 && nbytes < aiocb->aio_nbytes) {
183754c7ca1bSKevin Wolf if (aiocb->aio_type & QEMU_AIO_WRITE) {
183854c7ca1bSKevin Wolf return -EINVAL;
183954c7ca1bSKevin Wolf } else {
184054c7ca1bSKevin Wolf iov_memset(aiocb->io.iov, aiocb->io.niov, nbytes,
184154c7ca1bSKevin Wolf 0, aiocb->aio_nbytes - nbytes);
184254c7ca1bSKevin Wolf return 0;
184354c7ca1bSKevin Wolf }
184454c7ca1bSKevin Wolf } else {
184554c7ca1bSKevin Wolf assert(nbytes < 0);
1846c1bb86cdSEric Blake return nbytes;
1847c1bb86cdSEric Blake }
184854c7ca1bSKevin Wolf }
1849c1bb86cdSEric Blake
18500dfc7af2SAkihiko Odaki #if defined(CONFIG_FALLOCATE) || defined(BLKZEROOUT) || defined(BLKDISCARD)
translate_err(int err)1851c1bb86cdSEric Blake static int translate_err(int err)
1852c1bb86cdSEric Blake {
1853c1bb86cdSEric Blake if (err == -ENODEV || err == -ENOSYS || err == -EOPNOTSUPP ||
1854c1bb86cdSEric Blake err == -ENOTTY) {
1855c1bb86cdSEric Blake err = -ENOTSUP;
1856c1bb86cdSEric Blake }
1857c1bb86cdSEric Blake return err;
1858c1bb86cdSEric Blake }
18590dfc7af2SAkihiko Odaki #endif
1860c1bb86cdSEric Blake
1861c1bb86cdSEric Blake #ifdef CONFIG_FALLOCATE
do_fallocate(int fd,int mode,off_t offset,off_t len)1862c1bb86cdSEric Blake static int do_fallocate(int fd, int mode, off_t offset, off_t len)
1863c1bb86cdSEric Blake {
1864c1bb86cdSEric Blake do {
1865c1bb86cdSEric Blake if (fallocate(fd, mode, offset, len) == 0) {
1866c1bb86cdSEric Blake return 0;
1867c1bb86cdSEric Blake }
1868c1bb86cdSEric Blake } while (errno == EINTR);
1869c1bb86cdSEric Blake return translate_err(-errno);
1870c1bb86cdSEric Blake }
1871c1bb86cdSEric Blake #endif
1872c1bb86cdSEric Blake
handle_aiocb_write_zeroes_block(RawPosixAIOData * aiocb)1873c1bb86cdSEric Blake static ssize_t handle_aiocb_write_zeroes_block(RawPosixAIOData *aiocb)
1874c1bb86cdSEric Blake {
1875c1bb86cdSEric Blake int ret = -ENOTSUP;
1876c1bb86cdSEric Blake BDRVRawState *s = aiocb->bs->opaque;
1877c1bb86cdSEric Blake
1878c1bb86cdSEric Blake if (!s->has_write_zeroes) {
1879c1bb86cdSEric Blake return -ENOTSUP;
1880c1bb86cdSEric Blake }
1881c1bb86cdSEric Blake
1882c1bb86cdSEric Blake #ifdef BLKZEROOUT
1883738301e1SKevin Wolf /* The BLKZEROOUT implementation in the kernel doesn't set
1884738301e1SKevin Wolf * BLKDEV_ZERO_NOFALLBACK, so we can't call this if we have to avoid slow
1885738301e1SKevin Wolf * fallbacks. */
1886738301e1SKevin Wolf if (!(aiocb->aio_type & QEMU_AIO_NO_FALLBACK)) {
1887c1bb86cdSEric Blake do {
1888c1bb86cdSEric Blake uint64_t range[2] = { aiocb->aio_offset, aiocb->aio_nbytes };
1889c1bb86cdSEric Blake if (ioctl(aiocb->aio_fildes, BLKZEROOUT, range) == 0) {
1890c1bb86cdSEric Blake return 0;
1891c1bb86cdSEric Blake }
1892c1bb86cdSEric Blake } while (errno == EINTR);
1893c1bb86cdSEric Blake
1894c1bb86cdSEric Blake ret = translate_err(-errno);
1895c1bb86cdSEric Blake if (ret == -ENOTSUP) {
1896c1bb86cdSEric Blake s->has_write_zeroes = false;
1897c1bb86cdSEric Blake }
1898effecce6SKevin Wolf }
1899effecce6SKevin Wolf #endif
1900effecce6SKevin Wolf
1901c1bb86cdSEric Blake return ret;
1902c1bb86cdSEric Blake }
1903c1bb86cdSEric Blake
handle_aiocb_write_zeroes(void * opaque)19047154d8aeSKevin Wolf static int handle_aiocb_write_zeroes(void *opaque)
1905c1bb86cdSEric Blake {
19067154d8aeSKevin Wolf RawPosixAIOData *aiocb = opaque;
190770d9110bSDenis V. Lunev #ifdef CONFIG_FALLOCATE
1908b2c6f23fSMax Reitz BDRVRawState *s = aiocb->bs->opaque;
190970d9110bSDenis V. Lunev int64_t len;
191070d9110bSDenis V. Lunev #endif
1911c1bb86cdSEric Blake
1912c1bb86cdSEric Blake if (aiocb->aio_type & QEMU_AIO_BLKDEV) {
1913c1bb86cdSEric Blake return handle_aiocb_write_zeroes_block(aiocb);
1914c1bb86cdSEric Blake }
1915c1bb86cdSEric Blake
1916c1bb86cdSEric Blake #ifdef CONFIG_FALLOCATE_ZERO_RANGE
1917c1bb86cdSEric Blake if (s->has_write_zeroes) {
1918c1bb86cdSEric Blake int ret = do_fallocate(s->fd, FALLOC_FL_ZERO_RANGE,
1919c1bb86cdSEric Blake aiocb->aio_offset, aiocb->aio_nbytes);
1920fa95e9fbSThomas Huth if (ret == -ENOTSUP) {
1921fa95e9fbSThomas Huth s->has_write_zeroes = false;
1922fa95e9fbSThomas Huth } else if (ret == 0 || ret != -EINVAL) {
1923c1bb86cdSEric Blake return ret;
1924c1bb86cdSEric Blake }
1925fa95e9fbSThomas Huth /*
1926fa95e9fbSThomas Huth * Note: Some file systems do not like unaligned byte ranges, and
1927fa95e9fbSThomas Huth * return EINVAL in such a case, though they should not do it according
1928fa95e9fbSThomas Huth * to the man-page of fallocate(). Thus we simply ignore this return
1929fa95e9fbSThomas Huth * value and try the other fallbacks instead.
1930fa95e9fbSThomas Huth */
1931c1bb86cdSEric Blake }
1932c1bb86cdSEric Blake #endif
1933c1bb86cdSEric Blake
1934c1bb86cdSEric Blake #ifdef CONFIG_FALLOCATE_PUNCH_HOLE
1935c1bb86cdSEric Blake if (s->has_discard && s->has_fallocate) {
1936c1bb86cdSEric Blake int ret = do_fallocate(s->fd,
1937c1bb86cdSEric Blake FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
1938c1bb86cdSEric Blake aiocb->aio_offset, aiocb->aio_nbytes);
1939c1bb86cdSEric Blake if (ret == 0) {
1940c1bb86cdSEric Blake ret = do_fallocate(s->fd, 0, aiocb->aio_offset, aiocb->aio_nbytes);
1941c1bb86cdSEric Blake if (ret == 0 || ret != -ENOTSUP) {
1942c1bb86cdSEric Blake return ret;
1943c1bb86cdSEric Blake }
1944c1bb86cdSEric Blake s->has_fallocate = false;
194573ebf297SThomas Huth } else if (ret == -EINVAL) {
194673ebf297SThomas Huth /*
194773ebf297SThomas Huth * Some file systems like older versions of GPFS do not like un-
194873ebf297SThomas Huth * aligned byte ranges, and return EINVAL in such a case, though
194973ebf297SThomas Huth * they should not do it according to the man-page of fallocate().
195073ebf297SThomas Huth * Warn about the bad filesystem and try the final fallback instead.
195173ebf297SThomas Huth */
195273ebf297SThomas Huth warn_report_once("Your file system is misbehaving: "
195373ebf297SThomas Huth "fallocate(FALLOC_FL_PUNCH_HOLE) returned EINVAL. "
195468857f13SMichael Tokarev "Please report this bug to your file system "
195573ebf297SThomas Huth "vendor.");
1956c1bb86cdSEric Blake } else if (ret != -ENOTSUP) {
1957c1bb86cdSEric Blake return ret;
1958c1bb86cdSEric Blake } else {
1959c1bb86cdSEric Blake s->has_discard = false;
1960c1bb86cdSEric Blake }
1961c1bb86cdSEric Blake }
1962c1bb86cdSEric Blake #endif
1963c1bb86cdSEric Blake
1964c1bb86cdSEric Blake #ifdef CONFIG_FALLOCATE
196570d9110bSDenis V. Lunev /* Last resort: we are trying to extend the file with zeroed data. This
196670d9110bSDenis V. Lunev * can be done via fallocate(fd, 0) */
196736c6c877SPaolo Bonzini len = raw_getlength(aiocb->bs);
196870d9110bSDenis V. Lunev if (s->has_fallocate && len >= 0 && aiocb->aio_offset >= len) {
1969c1bb86cdSEric Blake int ret = do_fallocate(s->fd, 0, aiocb->aio_offset, aiocb->aio_nbytes);
1970c1bb86cdSEric Blake if (ret == 0 || ret != -ENOTSUP) {
1971c1bb86cdSEric Blake return ret;
1972c1bb86cdSEric Blake }
1973c1bb86cdSEric Blake s->has_fallocate = false;
1974c1bb86cdSEric Blake }
1975c1bb86cdSEric Blake #endif
1976c1bb86cdSEric Blake
1977c1bb86cdSEric Blake return -ENOTSUP;
1978c1bb86cdSEric Blake }
1979c1bb86cdSEric Blake
handle_aiocb_write_zeroes_unmap(void * opaque)19807154d8aeSKevin Wolf static int handle_aiocb_write_zeroes_unmap(void *opaque)
198134fa110eSKevin Wolf {
19827154d8aeSKevin Wolf RawPosixAIOData *aiocb = opaque;
198334fa110eSKevin Wolf BDRVRawState *s G_GNUC_UNUSED = aiocb->bs->opaque;
198434fa110eSKevin Wolf
198534fa110eSKevin Wolf /* First try to write zeros and unmap at the same time */
198634fa110eSKevin Wolf
198734fa110eSKevin Wolf #ifdef CONFIG_FALLOCATE_PUNCH_HOLE
1988b3ac2b94SSimran Singhal int ret = do_fallocate(s->fd, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
198934fa110eSKevin Wolf aiocb->aio_offset, aiocb->aio_nbytes);
1990bae127d4SAntoine Damhet switch (ret) {
1991bae127d4SAntoine Damhet case -ENOTSUP:
1992bae127d4SAntoine Damhet case -EINVAL:
1993ece4fa91SMaxim Levitsky case -EBUSY:
1994bae127d4SAntoine Damhet break;
1995bae127d4SAntoine Damhet default:
199634fa110eSKevin Wolf return ret;
199734fa110eSKevin Wolf }
199834fa110eSKevin Wolf #endif
199934fa110eSKevin Wolf
200034fa110eSKevin Wolf /* If we couldn't manage to unmap while guaranteed that the area reads as
200134fa110eSKevin Wolf * all-zero afterwards, just write zeroes without unmapping */
2002b3ac2b94SSimran Singhal return handle_aiocb_write_zeroes(aiocb);
200334fa110eSKevin Wolf }
200434fa110eSKevin Wolf
20051efad060SFam Zheng #ifndef HAVE_COPY_FILE_RANGE
copy_file_range(int in_fd,off_t * in_off,int out_fd,off_t * out_off,size_t len,unsigned int flags)20061efad060SFam Zheng static off_t copy_file_range(int in_fd, off_t *in_off, int out_fd,
20071efad060SFam Zheng off_t *out_off, size_t len, unsigned int flags)
20081efad060SFam Zheng {
20091efad060SFam Zheng #ifdef __NR_copy_file_range
20101efad060SFam Zheng return syscall(__NR_copy_file_range, in_fd, in_off, out_fd,
20111efad060SFam Zheng out_off, len, flags);
20121efad060SFam Zheng #else
20131efad060SFam Zheng errno = ENOSYS;
20141efad060SFam Zheng return -1;
20151efad060SFam Zheng #endif
20161efad060SFam Zheng }
20171efad060SFam Zheng #endif
20181efad060SFam Zheng
20196d43eaa3SSam Li /*
20206d43eaa3SSam Li * parse_zone - Fill a zone descriptor
20216d43eaa3SSam Li */
20226d43eaa3SSam Li #if defined(CONFIG_BLKZONED)
parse_zone(struct BlockZoneDescriptor * zone,const struct blk_zone * blkz)20236d43eaa3SSam Li static inline int parse_zone(struct BlockZoneDescriptor *zone,
20246d43eaa3SSam Li const struct blk_zone *blkz) {
20256d43eaa3SSam Li zone->start = blkz->start << BDRV_SECTOR_BITS;
20266d43eaa3SSam Li zone->length = blkz->len << BDRV_SECTOR_BITS;
20276d43eaa3SSam Li zone->wp = blkz->wp << BDRV_SECTOR_BITS;
20286d43eaa3SSam Li
20296d43eaa3SSam Li #ifdef HAVE_BLK_ZONE_REP_CAPACITY
20306d43eaa3SSam Li zone->cap = blkz->capacity << BDRV_SECTOR_BITS;
20316d43eaa3SSam Li #else
20326d43eaa3SSam Li zone->cap = blkz->len << BDRV_SECTOR_BITS;
20336d43eaa3SSam Li #endif
20346d43eaa3SSam Li
20356d43eaa3SSam Li switch (blkz->type) {
20366d43eaa3SSam Li case BLK_ZONE_TYPE_SEQWRITE_REQ:
20376d43eaa3SSam Li zone->type = BLK_ZT_SWR;
20386d43eaa3SSam Li break;
20396d43eaa3SSam Li case BLK_ZONE_TYPE_SEQWRITE_PREF:
20406d43eaa3SSam Li zone->type = BLK_ZT_SWP;
20416d43eaa3SSam Li break;
20426d43eaa3SSam Li case BLK_ZONE_TYPE_CONVENTIONAL:
20436d43eaa3SSam Li zone->type = BLK_ZT_CONV;
20446d43eaa3SSam Li break;
20456d43eaa3SSam Li default:
20466d43eaa3SSam Li error_report("Unsupported zone type: 0x%x", blkz->type);
20476d43eaa3SSam Li return -ENOTSUP;
20486d43eaa3SSam Li }
20496d43eaa3SSam Li
20506d43eaa3SSam Li switch (blkz->cond) {
20516d43eaa3SSam Li case BLK_ZONE_COND_NOT_WP:
20526d43eaa3SSam Li zone->state = BLK_ZS_NOT_WP;
20536d43eaa3SSam Li break;
20546d43eaa3SSam Li case BLK_ZONE_COND_EMPTY:
20556d43eaa3SSam Li zone->state = BLK_ZS_EMPTY;
20566d43eaa3SSam Li break;
20576d43eaa3SSam Li case BLK_ZONE_COND_IMP_OPEN:
20586d43eaa3SSam Li zone->state = BLK_ZS_IOPEN;
20596d43eaa3SSam Li break;
20606d43eaa3SSam Li case BLK_ZONE_COND_EXP_OPEN:
20616d43eaa3SSam Li zone->state = BLK_ZS_EOPEN;
20626d43eaa3SSam Li break;
20636d43eaa3SSam Li case BLK_ZONE_COND_CLOSED:
20646d43eaa3SSam Li zone->state = BLK_ZS_CLOSED;
20656d43eaa3SSam Li break;
20666d43eaa3SSam Li case BLK_ZONE_COND_READONLY:
20676d43eaa3SSam Li zone->state = BLK_ZS_RDONLY;
20686d43eaa3SSam Li break;
20696d43eaa3SSam Li case BLK_ZONE_COND_FULL:
20706d43eaa3SSam Li zone->state = BLK_ZS_FULL;
20716d43eaa3SSam Li break;
20726d43eaa3SSam Li case BLK_ZONE_COND_OFFLINE:
20736d43eaa3SSam Li zone->state = BLK_ZS_OFFLINE;
20746d43eaa3SSam Li break;
20756d43eaa3SSam Li default:
20766d43eaa3SSam Li error_report("Unsupported zone state: 0x%x", blkz->cond);
20776d43eaa3SSam Li return -ENOTSUP;
20786d43eaa3SSam Li }
20796d43eaa3SSam Li return 0;
20806d43eaa3SSam Li }
20816d43eaa3SSam Li #endif
20826d43eaa3SSam Li
20836d43eaa3SSam Li #if defined(CONFIG_BLKZONED)
handle_aiocb_zone_report(void * opaque)20846d43eaa3SSam Li static int handle_aiocb_zone_report(void *opaque)
20856d43eaa3SSam Li {
20866d43eaa3SSam Li RawPosixAIOData *aiocb = opaque;
20876d43eaa3SSam Li int fd = aiocb->aio_fildes;
20886d43eaa3SSam Li unsigned int *nr_zones = aiocb->zone_report.nr_zones;
20896d43eaa3SSam Li BlockZoneDescriptor *zones = aiocb->zone_report.zones;
20906d43eaa3SSam Li /* zoned block devices use 512-byte sectors */
20916d43eaa3SSam Li uint64_t sector = aiocb->aio_offset / 512;
20926d43eaa3SSam Li
20936d43eaa3SSam Li struct blk_zone *blkz;
20946d43eaa3SSam Li size_t rep_size;
20956d43eaa3SSam Li unsigned int nrz;
20966d43eaa3SSam Li int ret;
20976d43eaa3SSam Li unsigned int n = 0, i = 0;
20986d43eaa3SSam Li
20996d43eaa3SSam Li nrz = *nr_zones;
21006d43eaa3SSam Li rep_size = sizeof(struct blk_zone_report) + nrz * sizeof(struct blk_zone);
21016d43eaa3SSam Li g_autofree struct blk_zone_report *rep = NULL;
21026d43eaa3SSam Li rep = g_malloc(rep_size);
21036d43eaa3SSam Li
21046d43eaa3SSam Li blkz = (struct blk_zone *)(rep + 1);
21056d43eaa3SSam Li while (n < nrz) {
21066d43eaa3SSam Li memset(rep, 0, rep_size);
21076d43eaa3SSam Li rep->sector = sector;
21086d43eaa3SSam Li rep->nr_zones = nrz - n;
21096d43eaa3SSam Li
21106d43eaa3SSam Li do {
21116d43eaa3SSam Li ret = ioctl(fd, BLKREPORTZONE, rep);
21126d43eaa3SSam Li } while (ret != 0 && errno == EINTR);
21136d43eaa3SSam Li if (ret != 0) {
21146d43eaa3SSam Li error_report("%d: ioctl BLKREPORTZONE at %" PRId64 " failed %d",
21156d43eaa3SSam Li fd, sector, errno);
21166d43eaa3SSam Li return -errno;
21176d43eaa3SSam Li }
21186d43eaa3SSam Li
21196d43eaa3SSam Li if (!rep->nr_zones) {
21206d43eaa3SSam Li break;
21216d43eaa3SSam Li }
21226d43eaa3SSam Li
21236d43eaa3SSam Li for (i = 0; i < rep->nr_zones; i++, n++) {
21246d43eaa3SSam Li ret = parse_zone(&zones[n], &blkz[i]);
21256d43eaa3SSam Li if (ret != 0) {
21266d43eaa3SSam Li return ret;
21276d43eaa3SSam Li }
21286d43eaa3SSam Li
21296d43eaa3SSam Li /* The next report should start after the last zone reported */
21306d43eaa3SSam Li sector = blkz[i].start + blkz[i].len;
21316d43eaa3SSam Li }
21326d43eaa3SSam Li }
21336d43eaa3SSam Li
21346d43eaa3SSam Li *nr_zones = n;
21356d43eaa3SSam Li return 0;
21366d43eaa3SSam Li }
21376d43eaa3SSam Li #endif
21386d43eaa3SSam Li
21396d43eaa3SSam Li #if defined(CONFIG_BLKZONED)
handle_aiocb_zone_mgmt(void * opaque)21406d43eaa3SSam Li static int handle_aiocb_zone_mgmt(void *opaque)
21416d43eaa3SSam Li {
21426d43eaa3SSam Li RawPosixAIOData *aiocb = opaque;
21436d43eaa3SSam Li int fd = aiocb->aio_fildes;
21446d43eaa3SSam Li uint64_t sector = aiocb->aio_offset / 512;
21456d43eaa3SSam Li int64_t nr_sectors = aiocb->aio_nbytes / 512;
21466d43eaa3SSam Li struct blk_zone_range range;
21476d43eaa3SSam Li int ret;
21486d43eaa3SSam Li
21496d43eaa3SSam Li /* Execute the operation */
21506d43eaa3SSam Li range.sector = sector;
21516d43eaa3SSam Li range.nr_sectors = nr_sectors;
21526d43eaa3SSam Li do {
21536d43eaa3SSam Li ret = ioctl(fd, aiocb->zone_mgmt.op, &range);
21546d43eaa3SSam Li } while (ret != 0 && errno == EINTR);
21556d43eaa3SSam Li
2156a3c41f06SSam Li return ret < 0 ? -errno : ret;
21576d43eaa3SSam Li }
21586d43eaa3SSam Li #endif
21596d43eaa3SSam Li
handle_aiocb_copy_range(void * opaque)216058a209c4SKevin Wolf static int handle_aiocb_copy_range(void *opaque)
21611efad060SFam Zheng {
216258a209c4SKevin Wolf RawPosixAIOData *aiocb = opaque;
21631efad060SFam Zheng uint64_t bytes = aiocb->aio_nbytes;
21641efad060SFam Zheng off_t in_off = aiocb->aio_offset;
2165d57c44d0SKevin Wolf off_t out_off = aiocb->copy_range.aio_offset2;
21661efad060SFam Zheng
21671efad060SFam Zheng while (bytes) {
21681efad060SFam Zheng ssize_t ret = copy_file_range(aiocb->aio_fildes, &in_off,
2169d57c44d0SKevin Wolf aiocb->copy_range.aio_fd2, &out_off,
21701efad060SFam Zheng bytes, 0);
2171ecc983a5SFam Zheng trace_file_copy_file_range(aiocb->bs, aiocb->aio_fildes, in_off,
2172d57c44d0SKevin Wolf aiocb->copy_range.aio_fd2, out_off, bytes,
2173d57c44d0SKevin Wolf 0, ret);
2174c436e3d0SFam Zheng if (ret == 0) {
2175c436e3d0SFam Zheng /* No progress (e.g. when beyond EOF), let the caller fall back to
2176c436e3d0SFam Zheng * buffer I/O. */
2177c436e3d0SFam Zheng return -ENOSPC;
21781efad060SFam Zheng }
21791efad060SFam Zheng if (ret < 0) {
2180c436e3d0SFam Zheng switch (errno) {
2181c436e3d0SFam Zheng case ENOSYS:
21821efad060SFam Zheng return -ENOTSUP;
2183c436e3d0SFam Zheng case EINTR:
2184c436e3d0SFam Zheng continue;
2185c436e3d0SFam Zheng default:
21861efad060SFam Zheng return -errno;
21871efad060SFam Zheng }
21881efad060SFam Zheng }
21891efad060SFam Zheng bytes -= ret;
21901efad060SFam Zheng }
21911efad060SFam Zheng return 0;
21921efad060SFam Zheng }
21931efad060SFam Zheng
handle_aiocb_discard(void * opaque)219446ee0f46SKevin Wolf static int handle_aiocb_discard(void *opaque)
2195c1bb86cdSEric Blake {
219646ee0f46SKevin Wolf RawPosixAIOData *aiocb = opaque;
219713a02833SAri Sundholm int ret = -ENOTSUP;
2198c1bb86cdSEric Blake BDRVRawState *s = aiocb->bs->opaque;
2199c1bb86cdSEric Blake
2200c1bb86cdSEric Blake if (!s->has_discard) {
2201c1bb86cdSEric Blake return -ENOTSUP;
2202c1bb86cdSEric Blake }
2203c1bb86cdSEric Blake
2204c1bb86cdSEric Blake if (aiocb->aio_type & QEMU_AIO_BLKDEV) {
2205c1bb86cdSEric Blake #ifdef BLKDISCARD
2206c1bb86cdSEric Blake do {
2207c1bb86cdSEric Blake uint64_t range[2] = { aiocb->aio_offset, aiocb->aio_nbytes };
2208c1bb86cdSEric Blake if (ioctl(aiocb->aio_fildes, BLKDISCARD, range) == 0) {
2209c1bb86cdSEric Blake return 0;
2210c1bb86cdSEric Blake }
2211c1bb86cdSEric Blake } while (errno == EINTR);
2212c1bb86cdSEric Blake
22130dfc7af2SAkihiko Odaki ret = translate_err(-errno);
2214c1bb86cdSEric Blake #endif
2215c1bb86cdSEric Blake } else {
2216c1bb86cdSEric Blake #ifdef CONFIG_FALLOCATE_PUNCH_HOLE
2217c1bb86cdSEric Blake ret = do_fallocate(s->fd, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
2218c1bb86cdSEric Blake aiocb->aio_offset, aiocb->aio_nbytes);
221913a02833SAri Sundholm ret = translate_err(ret);
22200dfc7af2SAkihiko Odaki #elif defined(__APPLE__) && (__MACH__)
22210dfc7af2SAkihiko Odaki fpunchhole_t fpunchhole;
22220dfc7af2SAkihiko Odaki fpunchhole.fp_flags = 0;
22230dfc7af2SAkihiko Odaki fpunchhole.reserved = 0;
22240dfc7af2SAkihiko Odaki fpunchhole.fp_offset = aiocb->aio_offset;
22250dfc7af2SAkihiko Odaki fpunchhole.fp_length = aiocb->aio_nbytes;
22260dfc7af2SAkihiko Odaki if (fcntl(s->fd, F_PUNCHHOLE, &fpunchhole) == -1) {
22270dfc7af2SAkihiko Odaki ret = errno == ENODEV ? -ENOTSUP : -errno;
22280dfc7af2SAkihiko Odaki } else {
22290dfc7af2SAkihiko Odaki ret = 0;
22300dfc7af2SAkihiko Odaki }
2231c1bb86cdSEric Blake #endif
2232c1bb86cdSEric Blake }
2233c1bb86cdSEric Blake
2234c1bb86cdSEric Blake if (ret == -ENOTSUP) {
2235c1bb86cdSEric Blake s->has_discard = false;
2236c1bb86cdSEric Blake }
2237c1bb86cdSEric Blake return ret;
2238c1bb86cdSEric Blake }
2239c1bb86cdSEric Blake
22403a20013fSNir Soffer /*
22413a20013fSNir Soffer * Help alignment probing by allocating the first block.
22423a20013fSNir Soffer *
22433a20013fSNir Soffer * When reading with direct I/O from unallocated area on Gluster backed by XFS,
22443a20013fSNir Soffer * reading succeeds regardless of request length. In this case we fallback to
22453a20013fSNir Soffer * safe alignment which is not optimal. Allocating the first block avoids this
22463a20013fSNir Soffer * fallback.
22473a20013fSNir Soffer *
22483a20013fSNir Soffer * fd may be opened with O_DIRECT, but we don't know the buffer alignment or
22493a20013fSNir Soffer * request alignment, so we use safe values.
22503a20013fSNir Soffer *
22513a20013fSNir Soffer * Returns: 0 on success, -errno on failure. Since this is an optimization,
22523a20013fSNir Soffer * caller may ignore failures.
22533a20013fSNir Soffer */
allocate_first_block(int fd,size_t max_size)22543a20013fSNir Soffer static int allocate_first_block(int fd, size_t max_size)
22553a20013fSNir Soffer {
22563a20013fSNir Soffer size_t write_size = (max_size < MAX_BLOCKSIZE)
22573a20013fSNir Soffer ? BDRV_SECTOR_SIZE
22583a20013fSNir Soffer : MAX_BLOCKSIZE;
22598e3b0cbbSMarc-André Lureau size_t max_align = MAX(MAX_BLOCKSIZE, qemu_real_host_page_size());
22603a20013fSNir Soffer void *buf;
22613a20013fSNir Soffer ssize_t n;
22623a20013fSNir Soffer int ret;
22633a20013fSNir Soffer
22643a20013fSNir Soffer buf = qemu_memalign(max_align, write_size);
22653a20013fSNir Soffer memset(buf, 0, write_size);
22663a20013fSNir Soffer
226737b0b24eSNikita Ivanov n = RETRY_ON_EINTR(pwrite(fd, buf, write_size, 0));
22683a20013fSNir Soffer
22693a20013fSNir Soffer ret = (n == -1) ? -errno : 0;
22703a20013fSNir Soffer
22713a20013fSNir Soffer qemu_vfree(buf);
22723a20013fSNir Soffer return ret;
22733a20013fSNir Soffer }
22743a20013fSNir Soffer
handle_aiocb_truncate(void * opaque)227529cb4c01SKevin Wolf static int handle_aiocb_truncate(void *opaque)
227693f4e2ffSKevin Wolf {
227729cb4c01SKevin Wolf RawPosixAIOData *aiocb = opaque;
227893f4e2ffSKevin Wolf int result = 0;
227993f4e2ffSKevin Wolf int64_t current_length = 0;
228093f4e2ffSKevin Wolf char *buf = NULL;
228193f4e2ffSKevin Wolf struct stat st;
228293f4e2ffSKevin Wolf int fd = aiocb->aio_fildes;
228393f4e2ffSKevin Wolf int64_t offset = aiocb->aio_offset;
2284d57c44d0SKevin Wolf PreallocMode prealloc = aiocb->truncate.prealloc;
2285d57c44d0SKevin Wolf Error **errp = aiocb->truncate.errp;
228693f4e2ffSKevin Wolf
228793f4e2ffSKevin Wolf if (fstat(fd, &st) < 0) {
228893f4e2ffSKevin Wolf result = -errno;
228993f4e2ffSKevin Wolf error_setg_errno(errp, -result, "Could not stat file");
229093f4e2ffSKevin Wolf return result;
229193f4e2ffSKevin Wolf }
229293f4e2ffSKevin Wolf
229393f4e2ffSKevin Wolf current_length = st.st_size;
2294d57c44d0SKevin Wolf if (current_length > offset && prealloc != PREALLOC_MODE_OFF) {
229593f4e2ffSKevin Wolf error_setg(errp, "Cannot use preallocation for shrinking files");
229693f4e2ffSKevin Wolf return -ENOTSUP;
229793f4e2ffSKevin Wolf }
229893f4e2ffSKevin Wolf
2299d57c44d0SKevin Wolf switch (prealloc) {
230093f4e2ffSKevin Wolf #ifdef CONFIG_POSIX_FALLOCATE
230193f4e2ffSKevin Wolf case PREALLOC_MODE_FALLOC:
230293f4e2ffSKevin Wolf /*
230393f4e2ffSKevin Wolf * Truncating before posix_fallocate() makes it about twice slower on
230493f4e2ffSKevin Wolf * file systems that do not support fallocate(), trying to check if a
230593f4e2ffSKevin Wolf * block is allocated before allocating it, so don't do that here.
230693f4e2ffSKevin Wolf */
230793f4e2ffSKevin Wolf if (offset != current_length) {
230893f4e2ffSKevin Wolf result = -posix_fallocate(fd, current_length,
230993f4e2ffSKevin Wolf offset - current_length);
231093f4e2ffSKevin Wolf if (result != 0) {
231193f4e2ffSKevin Wolf /* posix_fallocate() doesn't set errno. */
231293f4e2ffSKevin Wolf error_setg_errno(errp, -result,
231393f4e2ffSKevin Wolf "Could not preallocate new data");
23143a20013fSNir Soffer } else if (current_length == 0) {
23153a20013fSNir Soffer /*
23163a20013fSNir Soffer * posix_fallocate() uses fallocate() if the filesystem
23173a20013fSNir Soffer * supports it, or fallback to manually writing zeroes. If
23183a20013fSNir Soffer * fallocate() was used, unaligned reads from the fallocated
23193a20013fSNir Soffer * area in raw_probe_alignment() will succeed, hence we need to
23203a20013fSNir Soffer * allocate the first block.
23213a20013fSNir Soffer *
23223a20013fSNir Soffer * Optimize future alignment probing; ignore failures.
23233a20013fSNir Soffer */
23243a20013fSNir Soffer allocate_first_block(fd, offset);
232593f4e2ffSKevin Wolf }
232693f4e2ffSKevin Wolf } else {
232793f4e2ffSKevin Wolf result = 0;
232893f4e2ffSKevin Wolf }
232993f4e2ffSKevin Wolf goto out;
233093f4e2ffSKevin Wolf #endif
233193f4e2ffSKevin Wolf case PREALLOC_MODE_FULL:
233293f4e2ffSKevin Wolf {
233393f4e2ffSKevin Wolf int64_t num = 0, left = offset - current_length;
233493f4e2ffSKevin Wolf off_t seek_result;
233593f4e2ffSKevin Wolf
233693f4e2ffSKevin Wolf /*
233793f4e2ffSKevin Wolf * Knowing the final size from the beginning could allow the file
233893f4e2ffSKevin Wolf * system driver to do less allocations and possibly avoid
233993f4e2ffSKevin Wolf * fragmentation of the file.
234093f4e2ffSKevin Wolf */
234193f4e2ffSKevin Wolf if (ftruncate(fd, offset) != 0) {
234293f4e2ffSKevin Wolf result = -errno;
234393f4e2ffSKevin Wolf error_setg_errno(errp, -result, "Could not resize file");
234493f4e2ffSKevin Wolf goto out;
234593f4e2ffSKevin Wolf }
234693f4e2ffSKevin Wolf
234793f4e2ffSKevin Wolf buf = g_malloc0(65536);
234893f4e2ffSKevin Wolf
234993f4e2ffSKevin Wolf seek_result = lseek(fd, current_length, SEEK_SET);
235093f4e2ffSKevin Wolf if (seek_result < 0) {
235193f4e2ffSKevin Wolf result = -errno;
235293f4e2ffSKevin Wolf error_setg_errno(errp, -result,
235393f4e2ffSKevin Wolf "Failed to seek to the old end of file");
235493f4e2ffSKevin Wolf goto out;
235593f4e2ffSKevin Wolf }
235693f4e2ffSKevin Wolf
235793f4e2ffSKevin Wolf while (left > 0) {
235893f4e2ffSKevin Wolf num = MIN(left, 65536);
235993f4e2ffSKevin Wolf result = write(fd, buf, num);
236093f4e2ffSKevin Wolf if (result < 0) {
2361a1c81f4fSFam Zheng if (errno == EINTR) {
2362a1c81f4fSFam Zheng continue;
2363a1c81f4fSFam Zheng }
236493f4e2ffSKevin Wolf result = -errno;
236593f4e2ffSKevin Wolf error_setg_errno(errp, -result,
236693f4e2ffSKevin Wolf "Could not write zeros for preallocation");
236793f4e2ffSKevin Wolf goto out;
236893f4e2ffSKevin Wolf }
236993f4e2ffSKevin Wolf left -= result;
237093f4e2ffSKevin Wolf }
237193f4e2ffSKevin Wolf if (result >= 0) {
237293f4e2ffSKevin Wolf result = fsync(fd);
237393f4e2ffSKevin Wolf if (result < 0) {
237493f4e2ffSKevin Wolf result = -errno;
237593f4e2ffSKevin Wolf error_setg_errno(errp, -result,
237693f4e2ffSKevin Wolf "Could not flush file to disk");
237793f4e2ffSKevin Wolf goto out;
237893f4e2ffSKevin Wolf }
237993f4e2ffSKevin Wolf }
238093f4e2ffSKevin Wolf goto out;
238193f4e2ffSKevin Wolf }
238293f4e2ffSKevin Wolf case PREALLOC_MODE_OFF:
238393f4e2ffSKevin Wolf if (ftruncate(fd, offset) != 0) {
238493f4e2ffSKevin Wolf result = -errno;
238593f4e2ffSKevin Wolf error_setg_errno(errp, -result, "Could not resize file");
23863a20013fSNir Soffer } else if (current_length == 0 && offset > current_length) {
23873a20013fSNir Soffer /* Optimize future alignment probing; ignore failures. */
23883a20013fSNir Soffer allocate_first_block(fd, offset);
238993f4e2ffSKevin Wolf }
239093f4e2ffSKevin Wolf return result;
239193f4e2ffSKevin Wolf default:
239293f4e2ffSKevin Wolf result = -ENOTSUP;
239393f4e2ffSKevin Wolf error_setg(errp, "Unsupported preallocation mode: %s",
2394d57c44d0SKevin Wolf PreallocMode_str(prealloc));
239593f4e2ffSKevin Wolf return result;
239693f4e2ffSKevin Wolf }
239793f4e2ffSKevin Wolf
239893f4e2ffSKevin Wolf out:
239993f4e2ffSKevin Wolf if (result < 0) {
240093f4e2ffSKevin Wolf if (ftruncate(fd, current_length) < 0) {
240193f4e2ffSKevin Wolf error_report("Failed to restore old file length: %s",
240293f4e2ffSKevin Wolf strerror(errno));
240393f4e2ffSKevin Wolf }
240493f4e2ffSKevin Wolf }
240593f4e2ffSKevin Wolf
240693f4e2ffSKevin Wolf g_free(buf);
240793f4e2ffSKevin Wolf return result;
240893f4e2ffSKevin Wolf }
240993f4e2ffSKevin Wolf
raw_thread_pool_submit(ThreadPoolFunc func,void * arg)24100fdb7311SEmanuele Giuseppe Esposito static int coroutine_fn raw_thread_pool_submit(ThreadPoolFunc func, void *arg)
24115d5de250SKevin Wolf {
2412aef04fc7SEmanuele Giuseppe Esposito return thread_pool_submit_co(func, arg);
24135d5de250SKevin Wolf }
24145d5de250SKevin Wolf
2415a7c5f67aSKeith Busch /*
2416a7c5f67aSKeith Busch * Check if all memory in this vector is sector aligned.
2417a7c5f67aSKeith Busch */
bdrv_qiov_is_aligned(BlockDriverState * bs,QEMUIOVector * qiov)2418a7c5f67aSKeith Busch static bool bdrv_qiov_is_aligned(BlockDriverState *bs, QEMUIOVector *qiov)
2419a7c5f67aSKeith Busch {
2420a7c5f67aSKeith Busch int i;
2421a7c5f67aSKeith Busch size_t alignment = bdrv_min_mem_align(bs);
242225474d90SKeith Busch size_t len = bs->bl.request_alignment;
2423a7c5f67aSKeith Busch IO_CODE();
2424a7c5f67aSKeith Busch
2425a7c5f67aSKeith Busch for (i = 0; i < qiov->niov; i++) {
2426a7c5f67aSKeith Busch if ((uintptr_t) qiov->iov[i].iov_base % alignment) {
2427a7c5f67aSKeith Busch return false;
2428a7c5f67aSKeith Busch }
242925474d90SKeith Busch if (qiov->iov[i].iov_len % len) {
2430a7c5f67aSKeith Busch return false;
2431a7c5f67aSKeith Busch }
2432a7c5f67aSKeith Busch }
2433a7c5f67aSKeith Busch
2434a7c5f67aSKeith Busch return true;
2435a7c5f67aSKeith Busch }
2436a7c5f67aSKeith Busch
2437cd0c0db0SStefan Hajnoczi #ifdef CONFIG_LINUX_IO_URING
raw_check_linux_io_uring(BDRVRawState * s)2438cd0c0db0SStefan Hajnoczi static inline bool raw_check_linux_io_uring(BDRVRawState *s)
2439cd0c0db0SStefan Hajnoczi {
2440cd0c0db0SStefan Hajnoczi Error *local_err = NULL;
2441cd0c0db0SStefan Hajnoczi AioContext *ctx;
2442cd0c0db0SStefan Hajnoczi
2443cd0c0db0SStefan Hajnoczi if (!s->use_linux_io_uring) {
2444cd0c0db0SStefan Hajnoczi return false;
2445cd0c0db0SStefan Hajnoczi }
2446cd0c0db0SStefan Hajnoczi
2447cd0c0db0SStefan Hajnoczi ctx = qemu_get_current_aio_context();
2448cd0c0db0SStefan Hajnoczi if (unlikely(!aio_setup_linux_io_uring(ctx, &local_err))) {
2449cd0c0db0SStefan Hajnoczi error_reportf_err(local_err, "Unable to use linux io_uring, "
2450cd0c0db0SStefan Hajnoczi "falling back to thread pool: ");
2451cd0c0db0SStefan Hajnoczi s->use_linux_io_uring = false;
2452cd0c0db0SStefan Hajnoczi return false;
2453cd0c0db0SStefan Hajnoczi }
2454cd0c0db0SStefan Hajnoczi return true;
2455cd0c0db0SStefan Hajnoczi }
2456cd0c0db0SStefan Hajnoczi #endif
2457cd0c0db0SStefan Hajnoczi
2458cd0c0db0SStefan Hajnoczi #ifdef CONFIG_LINUX_AIO
raw_check_linux_aio(BDRVRawState * s)2459cd0c0db0SStefan Hajnoczi static inline bool raw_check_linux_aio(BDRVRawState *s)
2460cd0c0db0SStefan Hajnoczi {
2461cd0c0db0SStefan Hajnoczi Error *local_err = NULL;
2462cd0c0db0SStefan Hajnoczi AioContext *ctx;
2463cd0c0db0SStefan Hajnoczi
2464cd0c0db0SStefan Hajnoczi if (!s->use_linux_aio) {
2465cd0c0db0SStefan Hajnoczi return false;
2466cd0c0db0SStefan Hajnoczi }
2467cd0c0db0SStefan Hajnoczi
2468cd0c0db0SStefan Hajnoczi ctx = qemu_get_current_aio_context();
2469cd0c0db0SStefan Hajnoczi if (unlikely(!aio_setup_linux_aio(ctx, &local_err))) {
2470cd0c0db0SStefan Hajnoczi error_reportf_err(local_err, "Unable to use Linux AIO, "
2471cd0c0db0SStefan Hajnoczi "falling back to thread pool: ");
2472cd0c0db0SStefan Hajnoczi s->use_linux_aio = false;
2473cd0c0db0SStefan Hajnoczi return false;
2474cd0c0db0SStefan Hajnoczi }
2475cd0c0db0SStefan Hajnoczi return true;
2476cd0c0db0SStefan Hajnoczi }
2477cd0c0db0SStefan Hajnoczi #endif
2478cd0c0db0SStefan Hajnoczi
raw_co_prw(BlockDriverState * bs,int64_t * offset_ptr,uint64_t bytes,QEMUIOVector * qiov,int type)2479ad4feacaSNaohiro Aota static int coroutine_fn raw_co_prw(BlockDriverState *bs, int64_t *offset_ptr,
2480c1bb86cdSEric Blake uint64_t bytes, QEMUIOVector *qiov, int type)
2481c1bb86cdSEric Blake {
2482c1bb86cdSEric Blake BDRVRawState *s = bs->opaque;
2483999e6b69SKevin Wolf RawPosixAIOData acb;
2484a3c41f06SSam Li int ret;
2485ad4feacaSNaohiro Aota uint64_t offset = *offset_ptr;
2486c1bb86cdSEric Blake
2487c1bb86cdSEric Blake if (fd_open(bs) < 0)
2488c1bb86cdSEric Blake return -EIO;
2489a3c41f06SSam Li #if defined(CONFIG_BLKZONED)
24904b5d80f3SHanna Czenczek if ((type & (QEMU_AIO_WRITE | QEMU_AIO_ZONE_APPEND)) &&
24914b5d80f3SHanna Czenczek bs->bl.zoned != BLK_Z_NONE) {
2492a3c41f06SSam Li qemu_co_mutex_lock(&bs->wps->colock);
24934b5d80f3SHanna Czenczek if (type & QEMU_AIO_ZONE_APPEND) {
24944751d09aSSam Li int index = offset / bs->bl.zone_size;
24954751d09aSSam Li offset = bs->wps->wp[index];
24964751d09aSSam Li }
2497a3c41f06SSam Li }
2498a3c41f06SSam Li #endif
2499c1bb86cdSEric Blake
2500c1bb86cdSEric Blake /*
2501c6447510SAarushi Mehta * When using O_DIRECT, the request must be aligned to be able to use
2502c6447510SAarushi Mehta * either libaio or io_uring interface. If not fail back to regular thread
2503c6447510SAarushi Mehta * pool read/write code which emulates this for us if we
2504c6447510SAarushi Mehta * set QEMU_AIO_MISALIGNED.
2505c1bb86cdSEric Blake */
2506c6447510SAarushi Mehta if (s->needs_alignment && !bdrv_qiov_is_aligned(bs, qiov)) {
2507c1bb86cdSEric Blake type |= QEMU_AIO_MISALIGNED;
2508c6447510SAarushi Mehta #ifdef CONFIG_LINUX_IO_URING
2509cd0c0db0SStefan Hajnoczi } else if (raw_check_linux_io_uring(s)) {
2510c6447510SAarushi Mehta assert(qiov->size == bytes);
2511a3c41f06SSam Li ret = luring_co_submit(bs, s->fd, offset, qiov, type);
2512a3c41f06SSam Li goto out;
2513c6447510SAarushi Mehta #endif
2514c1bb86cdSEric Blake #ifdef CONFIG_LINUX_AIO
2515cd0c0db0SStefan Hajnoczi } else if (raw_check_linux_aio(s)) {
2516c1bb86cdSEric Blake assert(qiov->size == bytes);
2517a3c41f06SSam Li ret = laio_co_submit(s->fd, offset, qiov, type,
2518a3c41f06SSam Li s->aio_max_batch);
2519a3c41f06SSam Li goto out;
2520c1bb86cdSEric Blake #endif
2521c1bb86cdSEric Blake }
2522c1bb86cdSEric Blake
2523999e6b69SKevin Wolf acb = (RawPosixAIOData) {
2524999e6b69SKevin Wolf .bs = bs,
2525999e6b69SKevin Wolf .aio_fildes = s->fd,
2526999e6b69SKevin Wolf .aio_type = type,
2527999e6b69SKevin Wolf .aio_offset = offset,
2528999e6b69SKevin Wolf .aio_nbytes = bytes,
2529999e6b69SKevin Wolf .io = {
2530999e6b69SKevin Wolf .iov = qiov->iov,
2531999e6b69SKevin Wolf .niov = qiov->niov,
2532999e6b69SKevin Wolf },
2533999e6b69SKevin Wolf };
2534999e6b69SKevin Wolf
2535999e6b69SKevin Wolf assert(qiov->size == bytes);
2536a3c41f06SSam Li ret = raw_thread_pool_submit(handle_aiocb_rw, &acb);
2537a3c41f06SSam Li goto out; /* Avoid the compiler err of unused label */
2538a3c41f06SSam Li
2539a3c41f06SSam Li out:
2540a3c41f06SSam Li #if defined(CONFIG_BLKZONED)
25414b5d80f3SHanna Czenczek if ((type & (QEMU_AIO_WRITE | QEMU_AIO_ZONE_APPEND)) &&
25424b5d80f3SHanna Czenczek bs->bl.zoned != BLK_Z_NONE) {
2543a3c41f06SSam Li BlockZoneWps *wps = bs->wps;
2544a3c41f06SSam Li if (ret == 0) {
2545a3c41f06SSam Li uint64_t *wp = &wps->wp[offset / bs->bl.zone_size];
2546a3c41f06SSam Li if (!BDRV_ZT_IS_CONV(*wp)) {
25474751d09aSSam Li if (type & QEMU_AIO_ZONE_APPEND) {
2548ad4feacaSNaohiro Aota *offset_ptr = *wp;
2549ad4feacaSNaohiro Aota trace_zbd_zone_append_complete(bs, *offset_ptr
25506c811e19SSam Li >> BDRV_SECTOR_BITS);
25514751d09aSSam Li }
2552a3c41f06SSam Li /* Advance the wp if needed */
2553a3c41f06SSam Li if (offset + bytes > *wp) {
2554a3c41f06SSam Li *wp = offset + bytes;
2555a3c41f06SSam Li }
2556a3c41f06SSam Li }
2557a3c41f06SSam Li } else {
255810b9e080SSam Li /*
255910b9e080SSam Li * write and append write are not allowed to cross zone boundaries
256010b9e080SSam Li */
256110b9e080SSam Li update_zones_wp(bs, s->fd, offset, 1);
2562a3c41f06SSam Li }
2563a3c41f06SSam Li
2564a3c41f06SSam Li qemu_co_mutex_unlock(&wps->colock);
2565a3c41f06SSam Li }
2566a3c41f06SSam Li #endif
2567a3c41f06SSam Li return ret;
2568c1bb86cdSEric Blake }
2569c1bb86cdSEric Blake
raw_co_preadv(BlockDriverState * bs,int64_t offset,int64_t bytes,QEMUIOVector * qiov,BdrvRequestFlags flags)2570f7ef38ddSVladimir Sementsov-Ogievskiy static int coroutine_fn raw_co_preadv(BlockDriverState *bs, int64_t offset,
2571f7ef38ddSVladimir Sementsov-Ogievskiy int64_t bytes, QEMUIOVector *qiov,
2572f7ef38ddSVladimir Sementsov-Ogievskiy BdrvRequestFlags flags)
2573c1bb86cdSEric Blake {
2574ad4feacaSNaohiro Aota return raw_co_prw(bs, &offset, bytes, qiov, QEMU_AIO_READ);
2575c1bb86cdSEric Blake }
2576c1bb86cdSEric Blake
raw_co_pwritev(BlockDriverState * bs,int64_t offset,int64_t bytes,QEMUIOVector * qiov,BdrvRequestFlags flags)2577e75abedaSVladimir Sementsov-Ogievskiy static int coroutine_fn raw_co_pwritev(BlockDriverState *bs, int64_t offset,
2578e75abedaSVladimir Sementsov-Ogievskiy int64_t bytes, QEMUIOVector *qiov,
2579e75abedaSVladimir Sementsov-Ogievskiy BdrvRequestFlags flags)
2580c1bb86cdSEric Blake {
2581ad4feacaSNaohiro Aota return raw_co_prw(bs, &offset, bytes, qiov, QEMU_AIO_WRITE);
2582c1bb86cdSEric Blake }
2583c1bb86cdSEric Blake
raw_co_flush_to_disk(BlockDriverState * bs)2584dda56b75SPaolo Bonzini static int coroutine_fn raw_co_flush_to_disk(BlockDriverState *bs)
2585c1bb86cdSEric Blake {
2586c1bb86cdSEric Blake BDRVRawState *s = bs->opaque;
258706dc9bd5SKevin Wolf RawPosixAIOData acb;
258833d70fb6SKevin Wolf int ret;
2589c1bb86cdSEric Blake
259033d70fb6SKevin Wolf ret = fd_open(bs);
259133d70fb6SKevin Wolf if (ret < 0) {
259233d70fb6SKevin Wolf return ret;
259333d70fb6SKevin Wolf }
2594c1bb86cdSEric Blake
259506dc9bd5SKevin Wolf acb = (RawPosixAIOData) {
259606dc9bd5SKevin Wolf .bs = bs,
259706dc9bd5SKevin Wolf .aio_fildes = s->fd,
259806dc9bd5SKevin Wolf .aio_type = QEMU_AIO_FLUSH,
259906dc9bd5SKevin Wolf };
260006dc9bd5SKevin Wolf
2601c6447510SAarushi Mehta #ifdef CONFIG_LINUX_IO_URING
2602cd0c0db0SStefan Hajnoczi if (raw_check_linux_io_uring(s)) {
2603a75e4e43SEmanuele Giuseppe Esposito return luring_co_submit(bs, s->fd, 0, NULL, QEMU_AIO_FLUSH);
2604c6447510SAarushi Mehta }
2605c6447510SAarushi Mehta #endif
260624687abfSPrasad Pandit #ifdef CONFIG_LINUX_AIO
260724687abfSPrasad Pandit if (s->has_laio_fdsync && raw_check_linux_aio(s)) {
260824687abfSPrasad Pandit return laio_co_submit(s->fd, 0, NULL, QEMU_AIO_FLUSH, 0);
260924687abfSPrasad Pandit }
261024687abfSPrasad Pandit #endif
26110fdb7311SEmanuele Giuseppe Esposito return raw_thread_pool_submit(handle_aiocb_flush, &acb);
2612c1bb86cdSEric Blake }
2613c1bb86cdSEric Blake
raw_close(BlockDriverState * bs)2614c1bb86cdSEric Blake static void raw_close(BlockDriverState *bs)
2615c1bb86cdSEric Blake {
2616c1bb86cdSEric Blake BDRVRawState *s = bs->opaque;
2617c1bb86cdSEric Blake
2618c1bb86cdSEric Blake if (s->fd >= 0) {
2619a3c41f06SSam Li #if defined(CONFIG_BLKZONED)
2620a3c41f06SSam Li g_free(bs->wps);
2621a3c41f06SSam Li #endif
2622c1bb86cdSEric Blake qemu_close(s->fd);
2623c1bb86cdSEric Blake s->fd = -1;
2624c1bb86cdSEric Blake }
2625c1bb86cdSEric Blake }
2626c1bb86cdSEric Blake
2627d0bc9e5dSMax Reitz /**
2628d0bc9e5dSMax Reitz * Truncates the given regular file @fd to @offset and, when growing, fills the
2629d0bc9e5dSMax Reitz * new space according to @prealloc.
2630d0bc9e5dSMax Reitz *
2631d0bc9e5dSMax Reitz * Returns: 0 on success, -errno on failure.
2632d0bc9e5dSMax Reitz */
263393f4e2ffSKevin Wolf static int coroutine_fn
raw_regular_truncate(BlockDriverState * bs,int fd,int64_t offset,PreallocMode prealloc,Error ** errp)263493f4e2ffSKevin Wolf raw_regular_truncate(BlockDriverState *bs, int fd, int64_t offset,
263593f4e2ffSKevin Wolf PreallocMode prealloc, Error **errp)
26369f63b07eSMax Reitz {
263729cb4c01SKevin Wolf RawPosixAIOData acb;
2638d0bc9e5dSMax Reitz
263929cb4c01SKevin Wolf acb = (RawPosixAIOData) {
264093f4e2ffSKevin Wolf .bs = bs,
264193f4e2ffSKevin Wolf .aio_fildes = fd,
264293f4e2ffSKevin Wolf .aio_type = QEMU_AIO_TRUNCATE,
264393f4e2ffSKevin Wolf .aio_offset = offset,
2644d57c44d0SKevin Wolf .truncate = {
264593f4e2ffSKevin Wolf .prealloc = prealloc,
264693f4e2ffSKevin Wolf .errp = errp,
2647d57c44d0SKevin Wolf },
264893f4e2ffSKevin Wolf };
2649d0bc9e5dSMax Reitz
26500fdb7311SEmanuele Giuseppe Esposito return raw_thread_pool_submit(handle_aiocb_truncate, &acb);
26519f63b07eSMax Reitz }
26529f63b07eSMax Reitz
raw_co_truncate(BlockDriverState * bs,int64_t offset,bool exact,PreallocMode prealloc,BdrvRequestFlags flags,Error ** errp)2653061ca8a3SKevin Wolf static int coroutine_fn raw_co_truncate(BlockDriverState *bs, int64_t offset,
2654c80d8b06SMax Reitz bool exact, PreallocMode prealloc,
265592b92799SKevin Wolf BdrvRequestFlags flags, Error **errp)
2656c1bb86cdSEric Blake {
2657c1bb86cdSEric Blake BDRVRawState *s = bs->opaque;
2658c1bb86cdSEric Blake struct stat st;
2659f59adb32SMax Reitz int ret;
2660c1bb86cdSEric Blake
2661c1bb86cdSEric Blake if (fstat(s->fd, &st)) {
2662f59adb32SMax Reitz ret = -errno;
2663f59adb32SMax Reitz error_setg_errno(errp, -ret, "Failed to fstat() the file");
2664f59adb32SMax Reitz return ret;
2665c1bb86cdSEric Blake }
2666c1bb86cdSEric Blake
2667c1bb86cdSEric Blake if (S_ISREG(st.st_mode)) {
266882325ae5SMax Reitz /* Always resizes to the exact @offset */
266993f4e2ffSKevin Wolf return raw_regular_truncate(bs, s->fd, offset, prealloc, errp);
2670c1bb86cdSEric Blake }
267135d72602SMax Reitz
267235d72602SMax Reitz if (prealloc != PREALLOC_MODE_OFF) {
267335d72602SMax Reitz error_setg(errp, "Preallocation mode '%s' unsupported for this "
2674977c736fSMarkus Armbruster "non-regular file", PreallocMode_str(prealloc));
267535d72602SMax Reitz return -ENOTSUP;
267635d72602SMax Reitz }
267735d72602SMax Reitz
267835d72602SMax Reitz if (S_ISCHR(st.st_mode) || S_ISBLK(st.st_mode)) {
267936c6c877SPaolo Bonzini int64_t cur_length = raw_getlength(bs);
268082325ae5SMax Reitz
268182325ae5SMax Reitz if (offset != cur_length && exact) {
268282325ae5SMax Reitz error_setg(errp, "Cannot resize device files");
268382325ae5SMax Reitz return -ENOTSUP;
268482325ae5SMax Reitz } else if (offset > cur_length) {
2685f59adb32SMax Reitz error_setg(errp, "Cannot grow device files");
2686c1bb86cdSEric Blake return -EINVAL;
2687c1bb86cdSEric Blake }
2688c1bb86cdSEric Blake } else {
2689f59adb32SMax Reitz error_setg(errp, "Resizing this file is not supported");
2690c1bb86cdSEric Blake return -ENOTSUP;
2691c1bb86cdSEric Blake }
2692c1bb86cdSEric Blake
2693c1bb86cdSEric Blake return 0;
2694c1bb86cdSEric Blake }
2695c1bb86cdSEric Blake
2696c1bb86cdSEric Blake #ifdef __OpenBSD__
raw_getlength(BlockDriverState * bs)269736c6c877SPaolo Bonzini static int64_t raw_getlength(BlockDriverState *bs)
2698c1bb86cdSEric Blake {
2699c1bb86cdSEric Blake BDRVRawState *s = bs->opaque;
2700c1bb86cdSEric Blake int fd = s->fd;
2701c1bb86cdSEric Blake struct stat st;
2702c1bb86cdSEric Blake
2703c1bb86cdSEric Blake if (fstat(fd, &st))
2704c1bb86cdSEric Blake return -errno;
2705c1bb86cdSEric Blake if (S_ISCHR(st.st_mode) || S_ISBLK(st.st_mode)) {
2706c1bb86cdSEric Blake struct disklabel dl;
2707c1bb86cdSEric Blake
2708c1bb86cdSEric Blake if (ioctl(fd, DIOCGDINFO, &dl))
2709c1bb86cdSEric Blake return -errno;
2710c1bb86cdSEric Blake return (uint64_t)dl.d_secsize *
2711c1bb86cdSEric Blake dl.d_partitions[DISKPART(st.st_rdev)].p_size;
2712c1bb86cdSEric Blake } else
2713c1bb86cdSEric Blake return st.st_size;
2714c1bb86cdSEric Blake }
2715c1bb86cdSEric Blake #elif defined(__NetBSD__)
raw_getlength(BlockDriverState * bs)271636c6c877SPaolo Bonzini static int64_t raw_getlength(BlockDriverState *bs)
2717c1bb86cdSEric Blake {
2718c1bb86cdSEric Blake BDRVRawState *s = bs->opaque;
2719c1bb86cdSEric Blake int fd = s->fd;
2720c1bb86cdSEric Blake struct stat st;
2721c1bb86cdSEric Blake
2722c1bb86cdSEric Blake if (fstat(fd, &st))
2723c1bb86cdSEric Blake return -errno;
2724c1bb86cdSEric Blake if (S_ISCHR(st.st_mode) || S_ISBLK(st.st_mode)) {
2725c1bb86cdSEric Blake struct dkwedge_info dkw;
2726c1bb86cdSEric Blake
2727c1bb86cdSEric Blake if (ioctl(fd, DIOCGWEDGEINFO, &dkw) != -1) {
2728c1bb86cdSEric Blake return dkw.dkw_size * 512;
2729c1bb86cdSEric Blake } else {
2730c1bb86cdSEric Blake struct disklabel dl;
2731c1bb86cdSEric Blake
2732c1bb86cdSEric Blake if (ioctl(fd, DIOCGDINFO, &dl))
2733c1bb86cdSEric Blake return -errno;
2734c1bb86cdSEric Blake return (uint64_t)dl.d_secsize *
2735c1bb86cdSEric Blake dl.d_partitions[DISKPART(st.st_rdev)].p_size;
2736c1bb86cdSEric Blake }
2737c1bb86cdSEric Blake } else
2738c1bb86cdSEric Blake return st.st_size;
2739c1bb86cdSEric Blake }
2740c1bb86cdSEric Blake #elif defined(__sun__)
raw_getlength(BlockDriverState * bs)274136c6c877SPaolo Bonzini static int64_t raw_getlength(BlockDriverState *bs)
2742c1bb86cdSEric Blake {
2743c1bb86cdSEric Blake BDRVRawState *s = bs->opaque;
2744c1bb86cdSEric Blake struct dk_minfo minfo;
2745c1bb86cdSEric Blake int ret;
2746c1bb86cdSEric Blake int64_t size;
2747c1bb86cdSEric Blake
2748c1bb86cdSEric Blake ret = fd_open(bs);
2749c1bb86cdSEric Blake if (ret < 0) {
2750c1bb86cdSEric Blake return ret;
2751c1bb86cdSEric Blake }
2752c1bb86cdSEric Blake
2753c1bb86cdSEric Blake /*
2754c1bb86cdSEric Blake * Use the DKIOCGMEDIAINFO ioctl to read the size.
2755c1bb86cdSEric Blake */
2756c1bb86cdSEric Blake ret = ioctl(s->fd, DKIOCGMEDIAINFO, &minfo);
2757c1bb86cdSEric Blake if (ret != -1) {
2758c1bb86cdSEric Blake return minfo.dki_lbsize * minfo.dki_capacity;
2759c1bb86cdSEric Blake }
2760c1bb86cdSEric Blake
2761c1bb86cdSEric Blake /*
2762c1bb86cdSEric Blake * There are reports that lseek on some devices fails, but
2763c1bb86cdSEric Blake * irc discussion said that contingency on contingency was overkill.
2764c1bb86cdSEric Blake */
2765c1bb86cdSEric Blake size = lseek(s->fd, 0, SEEK_END);
2766c1bb86cdSEric Blake if (size < 0) {
2767c1bb86cdSEric Blake return -errno;
2768c1bb86cdSEric Blake }
2769c1bb86cdSEric Blake return size;
2770c1bb86cdSEric Blake }
2771c1bb86cdSEric Blake #elif defined(CONFIG_BSD)
raw_getlength(BlockDriverState * bs)277236c6c877SPaolo Bonzini static int64_t raw_getlength(BlockDriverState *bs)
2773c1bb86cdSEric Blake {
2774c1bb86cdSEric Blake BDRVRawState *s = bs->opaque;
2775c1bb86cdSEric Blake int fd = s->fd;
2776c1bb86cdSEric Blake int64_t size;
2777c1bb86cdSEric Blake struct stat sb;
2778c1bb86cdSEric Blake #if defined (__FreeBSD__) || defined(__FreeBSD_kernel__)
2779c1bb86cdSEric Blake int reopened = 0;
2780c1bb86cdSEric Blake #endif
2781c1bb86cdSEric Blake int ret;
2782c1bb86cdSEric Blake
2783c1bb86cdSEric Blake ret = fd_open(bs);
2784c1bb86cdSEric Blake if (ret < 0)
2785c1bb86cdSEric Blake return ret;
2786c1bb86cdSEric Blake
2787c1bb86cdSEric Blake #if defined (__FreeBSD__) || defined(__FreeBSD_kernel__)
2788c1bb86cdSEric Blake again:
2789c1bb86cdSEric Blake #endif
2790c1bb86cdSEric Blake if (!fstat(fd, &sb) && (S_IFCHR & sb.st_mode)) {
2791267cd53fSPaolo Bonzini size = 0;
2792c1bb86cdSEric Blake #ifdef DIOCGMEDIASIZE
2793267cd53fSPaolo Bonzini if (ioctl(fd, DIOCGMEDIASIZE, (off_t *)&size)) {
2794c1bb86cdSEric Blake size = 0;
2795c1bb86cdSEric Blake }
2796267cd53fSPaolo Bonzini #endif
2797267cd53fSPaolo Bonzini #ifdef DIOCGPART
2798267cd53fSPaolo Bonzini if (size == 0) {
2799267cd53fSPaolo Bonzini struct partinfo pi;
2800267cd53fSPaolo Bonzini if (ioctl(fd, DIOCGPART, &pi) == 0) {
2801267cd53fSPaolo Bonzini size = pi.media_size;
2802267cd53fSPaolo Bonzini }
2803267cd53fSPaolo Bonzini }
2804c1bb86cdSEric Blake #endif
280509e20abdSJoelle van Dyne #if defined(DKIOCGETBLOCKCOUNT) && defined(DKIOCGETBLOCKSIZE)
2806267cd53fSPaolo Bonzini if (size == 0) {
2807c1bb86cdSEric Blake uint64_t sectors = 0;
2808c1bb86cdSEric Blake uint32_t sector_size = 0;
2809c1bb86cdSEric Blake
2810c1bb86cdSEric Blake if (ioctl(fd, DKIOCGETBLOCKCOUNT, §ors) == 0
2811c1bb86cdSEric Blake && ioctl(fd, DKIOCGETBLOCKSIZE, §or_size) == 0) {
2812c1bb86cdSEric Blake size = sectors * sector_size;
2813c1bb86cdSEric Blake }
2814c1bb86cdSEric Blake }
2815c1bb86cdSEric Blake #endif
2816267cd53fSPaolo Bonzini if (size == 0) {
2817267cd53fSPaolo Bonzini size = lseek(fd, 0LL, SEEK_END);
2818267cd53fSPaolo Bonzini }
2819267cd53fSPaolo Bonzini if (size < 0) {
2820267cd53fSPaolo Bonzini return -errno;
2821267cd53fSPaolo Bonzini }
2822c1bb86cdSEric Blake #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
2823c1bb86cdSEric Blake switch(s->type) {
2824c1bb86cdSEric Blake case FTYPE_CD:
2825c1bb86cdSEric Blake /* XXX FreeBSD acd returns UINT_MAX sectors for an empty drive */
2826c1bb86cdSEric Blake if (size == 2048LL * (unsigned)-1)
2827c1bb86cdSEric Blake size = 0;
2828c1bb86cdSEric Blake /* XXX no disc? maybe we need to reopen... */
2829c1bb86cdSEric Blake if (size <= 0 && !reopened && cdrom_reopen(bs) >= 0) {
2830c1bb86cdSEric Blake reopened = 1;
2831c1bb86cdSEric Blake goto again;
2832c1bb86cdSEric Blake }
2833c1bb86cdSEric Blake }
2834c1bb86cdSEric Blake #endif
2835c1bb86cdSEric Blake } else {
2836c1bb86cdSEric Blake size = lseek(fd, 0, SEEK_END);
2837c1bb86cdSEric Blake if (size < 0) {
2838c1bb86cdSEric Blake return -errno;
2839c1bb86cdSEric Blake }
2840c1bb86cdSEric Blake }
2841c1bb86cdSEric Blake return size;
2842c1bb86cdSEric Blake }
2843c1bb86cdSEric Blake #else
raw_getlength(BlockDriverState * bs)284436c6c877SPaolo Bonzini static int64_t raw_getlength(BlockDriverState *bs)
2845c1bb86cdSEric Blake {
2846c1bb86cdSEric Blake BDRVRawState *s = bs->opaque;
2847c1bb86cdSEric Blake int ret;
2848c1bb86cdSEric Blake int64_t size;
2849c1bb86cdSEric Blake
2850c1bb86cdSEric Blake ret = fd_open(bs);
2851c1bb86cdSEric Blake if (ret < 0) {
2852c1bb86cdSEric Blake return ret;
2853c1bb86cdSEric Blake }
2854c1bb86cdSEric Blake
2855c1bb86cdSEric Blake size = lseek(s->fd, 0, SEEK_END);
2856c1bb86cdSEric Blake if (size < 0) {
2857c1bb86cdSEric Blake return -errno;
2858c1bb86cdSEric Blake }
2859c1bb86cdSEric Blake return size;
2860c1bb86cdSEric Blake }
2861c1bb86cdSEric Blake #endif
2862c1bb86cdSEric Blake
raw_co_getlength(BlockDriverState * bs)286336c6c877SPaolo Bonzini static int64_t coroutine_fn raw_co_getlength(BlockDriverState *bs)
286436c6c877SPaolo Bonzini {
286536c6c877SPaolo Bonzini return raw_getlength(bs);
286636c6c877SPaolo Bonzini }
286736c6c877SPaolo Bonzini
raw_co_get_allocated_file_size(BlockDriverState * bs)286882618d7bSEmanuele Giuseppe Esposito static int64_t coroutine_fn raw_co_get_allocated_file_size(BlockDriverState *bs)
2869c1bb86cdSEric Blake {
2870c1bb86cdSEric Blake struct stat st;
2871c1bb86cdSEric Blake BDRVRawState *s = bs->opaque;
2872c1bb86cdSEric Blake
2873c1bb86cdSEric Blake if (fstat(s->fd, &st) < 0) {
2874c1bb86cdSEric Blake return -errno;
2875c1bb86cdSEric Blake }
2876c1bb86cdSEric Blake return (int64_t)st.st_blocks * 512;
2877c1bb86cdSEric Blake }
2878c1bb86cdSEric Blake
287993f4e2ffSKevin Wolf static int coroutine_fn
raw_co_create(BlockdevCreateOptions * options,Error ** errp)288093f4e2ffSKevin Wolf raw_co_create(BlockdevCreateOptions *options, Error **errp)
2881c1bb86cdSEric Blake {
2882927f11e1SKevin Wolf BlockdevCreateOptionsFile *file_opts;
28837c20c808SMax Reitz Error *local_err = NULL;
2884c1bb86cdSEric Blake int fd;
2885d815efcaSMax Reitz uint64_t perm, shared;
2886c1bb86cdSEric Blake int result = 0;
2887c1bb86cdSEric Blake
2888927f11e1SKevin Wolf /* Validate options and set default values */
2889927f11e1SKevin Wolf assert(options->driver == BLOCKDEV_DRIVER_FILE);
2890927f11e1SKevin Wolf file_opts = &options->u.file;
2891c1bb86cdSEric Blake
2892927f11e1SKevin Wolf if (!file_opts->has_nocow) {
2893927f11e1SKevin Wolf file_opts->nocow = false;
2894927f11e1SKevin Wolf }
2895927f11e1SKevin Wolf if (!file_opts->has_preallocation) {
2896927f11e1SKevin Wolf file_opts->preallocation = PREALLOC_MODE_OFF;
2897c1bb86cdSEric Blake }
2898ffa244c8SKevin Wolf if (!file_opts->has_extent_size_hint) {
2899ffa244c8SKevin Wolf file_opts->extent_size_hint = 1 * MiB;
2900ffa244c8SKevin Wolf }
2901ffa244c8SKevin Wolf if (file_opts->extent_size_hint > UINT32_MAX) {
2902ffa244c8SKevin Wolf result = -EINVAL;
2903ffa244c8SKevin Wolf error_setg(errp, "Extent size hint is too large");
2904ffa244c8SKevin Wolf goto out;
2905ffa244c8SKevin Wolf }
2906c1bb86cdSEric Blake
2907927f11e1SKevin Wolf /* Create file */
2908b18a24a9SDaniel P. Berrangé fd = qemu_create(file_opts->filename, O_RDWR | O_BINARY, 0644, errp);
2909c1bb86cdSEric Blake if (fd < 0) {
2910c1bb86cdSEric Blake result = -errno;
2911c1bb86cdSEric Blake goto out;
2912c1bb86cdSEric Blake }
2913c1bb86cdSEric Blake
2914b8cf1913SMax Reitz /* Take permissions: We want to discard everything, so we need
2915b8cf1913SMax Reitz * BLK_PERM_WRITE; and truncation to the desired size requires
2916b8cf1913SMax Reitz * BLK_PERM_RESIZE.
2917b8cf1913SMax Reitz * On the other hand, we cannot share the RESIZE permission
2918b8cf1913SMax Reitz * because we promise that after this function, the file has the
2919b8cf1913SMax Reitz * size given in the options. If someone else were to resize it
2920b8cf1913SMax Reitz * concurrently, we could not guarantee that.
2921b8cf1913SMax Reitz * Note that after this function, we can no longer guarantee that
2922b8cf1913SMax Reitz * the file is not touched by a third party, so it may be resized
2923b8cf1913SMax Reitz * then. */
2924b8cf1913SMax Reitz perm = BLK_PERM_WRITE | BLK_PERM_RESIZE;
2925b8cf1913SMax Reitz shared = BLK_PERM_ALL & ~BLK_PERM_RESIZE;
2926b8cf1913SMax Reitz
2927b8cf1913SMax Reitz /* Step one: Take locks */
29282996ffadSFam Zheng result = raw_apply_lock_bytes(NULL, fd, perm, ~shared, false, errp);
2929b8cf1913SMax Reitz if (result < 0) {
2930b8cf1913SMax Reitz goto out_close;
2931b8cf1913SMax Reitz }
2932b8cf1913SMax Reitz
2933b8cf1913SMax Reitz /* Step two: Check that nobody else has taken conflicting locks */
2934b8cf1913SMax Reitz result = raw_check_lock_bytes(fd, perm, shared, errp);
2935b8cf1913SMax Reitz if (result < 0) {
2936b857431dSFam Zheng error_append_hint(errp,
2937b857431dSFam Zheng "Is another process using the image [%s]?\n",
2938b857431dSFam Zheng file_opts->filename);
29397c20c808SMax Reitz goto out_unlock;
2940b8cf1913SMax Reitz }
2941b8cf1913SMax Reitz
2942b8cf1913SMax Reitz /* Clear the file by truncating it to 0 */
294393f4e2ffSKevin Wolf result = raw_regular_truncate(NULL, fd, 0, PREALLOC_MODE_OFF, errp);
2944b8cf1913SMax Reitz if (result < 0) {
29457c20c808SMax Reitz goto out_unlock;
2946b8cf1913SMax Reitz }
2947b8cf1913SMax Reitz
2948927f11e1SKevin Wolf if (file_opts->nocow) {
2949c1bb86cdSEric Blake #ifdef __linux__
2950c1bb86cdSEric Blake /* Set NOCOW flag to solve performance issue on fs like btrfs.
2951c1bb86cdSEric Blake * This is an optimisation. The FS_IOC_SETFLAGS ioctl return value
2952c1bb86cdSEric Blake * will be ignored since any failure of this operation should not
2953c1bb86cdSEric Blake * block the left work.
2954c1bb86cdSEric Blake */
2955c1bb86cdSEric Blake int attr;
2956c1bb86cdSEric Blake if (ioctl(fd, FS_IOC_GETFLAGS, &attr) == 0) {
2957c1bb86cdSEric Blake attr |= FS_NOCOW_FL;
2958c1bb86cdSEric Blake ioctl(fd, FS_IOC_SETFLAGS, &attr);
2959c1bb86cdSEric Blake }
2960c1bb86cdSEric Blake #endif
2961c1bb86cdSEric Blake }
2962ffa244c8SKevin Wolf #ifdef FS_IOC_FSSETXATTR
2963ffa244c8SKevin Wolf /*
2964ffa244c8SKevin Wolf * Try to set the extent size hint. Failure is not fatal, and a warning is
2965ffa244c8SKevin Wolf * only printed if the option was explicitly specified.
2966ffa244c8SKevin Wolf */
2967ffa244c8SKevin Wolf {
2968ffa244c8SKevin Wolf struct fsxattr attr;
2969ffa244c8SKevin Wolf result = ioctl(fd, FS_IOC_FSGETXATTR, &attr);
2970ffa244c8SKevin Wolf if (result == 0) {
2971ffa244c8SKevin Wolf attr.fsx_xflags |= FS_XFLAG_EXTSIZE;
2972ffa244c8SKevin Wolf attr.fsx_extsize = file_opts->extent_size_hint;
2973ffa244c8SKevin Wolf result = ioctl(fd, FS_IOC_FSSETXATTR, &attr);
2974ffa244c8SKevin Wolf }
2975ffa244c8SKevin Wolf if (result < 0 && file_opts->has_extent_size_hint &&
2976ffa244c8SKevin Wolf file_opts->extent_size_hint)
2977ffa244c8SKevin Wolf {
2978ffa244c8SKevin Wolf warn_report("Failed to set extent size hint: %s",
2979ffa244c8SKevin Wolf strerror(errno));
2980ffa244c8SKevin Wolf }
2981ffa244c8SKevin Wolf }
2982ffa244c8SKevin Wolf #endif
2983c1bb86cdSEric Blake
2984b8cf1913SMax Reitz /* Resize and potentially preallocate the file to the desired
2985b8cf1913SMax Reitz * final size */
298693f4e2ffSKevin Wolf result = raw_regular_truncate(NULL, fd, file_opts->size,
298793f4e2ffSKevin Wolf file_opts->preallocation, errp);
29889f63b07eSMax Reitz if (result < 0) {
29897c20c808SMax Reitz goto out_unlock;
29907c20c808SMax Reitz }
29917c20c808SMax Reitz
29927c20c808SMax Reitz out_unlock:
29932996ffadSFam Zheng raw_apply_lock_bytes(NULL, fd, 0, 0, true, &local_err);
29947c20c808SMax Reitz if (local_err) {
29957c20c808SMax Reitz /* The above call should not fail, and if it does, that does
29967c20c808SMax Reitz * not mean the whole creation operation has failed. So
29977c20c808SMax Reitz * report it the user for their convenience, but do not report
29987c20c808SMax Reitz * it to the caller. */
2999db0754dfSFam Zheng warn_report_err(local_err);
30005a1dad9dSNir Soffer }
30015a1dad9dSNir Soffer
30025a1dad9dSNir Soffer out_close:
3003c1bb86cdSEric Blake if (qemu_close(fd) != 0 && result == 0) {
3004c1bb86cdSEric Blake result = -errno;
3005c1bb86cdSEric Blake error_setg_errno(errp, -result, "Could not close the new file");
3006c1bb86cdSEric Blake }
3007c1bb86cdSEric Blake out:
3008c1bb86cdSEric Blake return result;
3009c1bb86cdSEric Blake }
3010c1bb86cdSEric Blake
30114ec8df01SKevin Wolf static int coroutine_fn GRAPH_RDLOCK
raw_co_create_opts(BlockDriver * drv,const char * filename,QemuOpts * opts,Error ** errp)30124ec8df01SKevin Wolf raw_co_create_opts(BlockDriver *drv, const char *filename,
30134ec8df01SKevin Wolf QemuOpts *opts, Error **errp)
3014927f11e1SKevin Wolf {
3015927f11e1SKevin Wolf BlockdevCreateOptions options;
3016927f11e1SKevin Wolf int64_t total_size = 0;
3017ffa244c8SKevin Wolf int64_t extent_size_hint = 0;
3018ffa244c8SKevin Wolf bool has_extent_size_hint = false;
3019927f11e1SKevin Wolf bool nocow = false;
3020927f11e1SKevin Wolf PreallocMode prealloc;
3021927f11e1SKevin Wolf char *buf = NULL;
3022927f11e1SKevin Wolf Error *local_err = NULL;
3023927f11e1SKevin Wolf
3024927f11e1SKevin Wolf /* Skip file: protocol prefix */
3025927f11e1SKevin Wolf strstart(filename, "file:", &filename);
3026927f11e1SKevin Wolf
3027927f11e1SKevin Wolf /* Read out options */
3028927f11e1SKevin Wolf total_size = ROUND_UP(qemu_opt_get_size_del(opts, BLOCK_OPT_SIZE, 0),
3029927f11e1SKevin Wolf BDRV_SECTOR_SIZE);
3030ffa244c8SKevin Wolf if (qemu_opt_get(opts, BLOCK_OPT_EXTENT_SIZE_HINT)) {
3031ffa244c8SKevin Wolf has_extent_size_hint = true;
3032ffa244c8SKevin Wolf extent_size_hint =
3033ffa244c8SKevin Wolf qemu_opt_get_size_del(opts, BLOCK_OPT_EXTENT_SIZE_HINT, -1);
3034ffa244c8SKevin Wolf }
3035927f11e1SKevin Wolf nocow = qemu_opt_get_bool(opts, BLOCK_OPT_NOCOW, false);
3036927f11e1SKevin Wolf buf = qemu_opt_get_del(opts, BLOCK_OPT_PREALLOC);
3037927f11e1SKevin Wolf prealloc = qapi_enum_parse(&PreallocMode_lookup, buf,
3038927f11e1SKevin Wolf PREALLOC_MODE_OFF, &local_err);
3039927f11e1SKevin Wolf g_free(buf);
3040927f11e1SKevin Wolf if (local_err) {
3041927f11e1SKevin Wolf error_propagate(errp, local_err);
3042927f11e1SKevin Wolf return -EINVAL;
3043927f11e1SKevin Wolf }
3044927f11e1SKevin Wolf
3045927f11e1SKevin Wolf options = (BlockdevCreateOptions) {
3046927f11e1SKevin Wolf .driver = BLOCKDEV_DRIVER_FILE,
3047927f11e1SKevin Wolf .u.file = {
3048927f11e1SKevin Wolf .filename = (char *) filename,
3049927f11e1SKevin Wolf .size = total_size,
3050927f11e1SKevin Wolf .has_preallocation = true,
3051927f11e1SKevin Wolf .preallocation = prealloc,
3052927f11e1SKevin Wolf .has_nocow = true,
3053927f11e1SKevin Wolf .nocow = nocow,
3054ffa244c8SKevin Wolf .has_extent_size_hint = has_extent_size_hint,
3055ffa244c8SKevin Wolf .extent_size_hint = extent_size_hint,
3056927f11e1SKevin Wolf },
3057927f11e1SKevin Wolf };
3058927f11e1SKevin Wolf return raw_co_create(&options, errp);
3059927f11e1SKevin Wolf }
3060927f11e1SKevin Wolf
raw_co_delete_file(BlockDriverState * bs,Error ** errp)30619bffae14SDaniel Henrique Barboza static int coroutine_fn raw_co_delete_file(BlockDriverState *bs,
30629bffae14SDaniel Henrique Barboza Error **errp)
30639bffae14SDaniel Henrique Barboza {
30649bffae14SDaniel Henrique Barboza struct stat st;
30659bffae14SDaniel Henrique Barboza int ret;
30669bffae14SDaniel Henrique Barboza
30679bffae14SDaniel Henrique Barboza if (!(stat(bs->filename, &st) == 0) || !S_ISREG(st.st_mode)) {
30689bffae14SDaniel Henrique Barboza error_setg_errno(errp, ENOENT, "%s is not a regular file",
30699bffae14SDaniel Henrique Barboza bs->filename);
30709bffae14SDaniel Henrique Barboza return -ENOENT;
30719bffae14SDaniel Henrique Barboza }
30729bffae14SDaniel Henrique Barboza
30739bffae14SDaniel Henrique Barboza ret = unlink(bs->filename);
30749bffae14SDaniel Henrique Barboza if (ret < 0) {
30759bffae14SDaniel Henrique Barboza ret = -errno;
30769bffae14SDaniel Henrique Barboza error_setg_errno(errp, -ret, "Error when deleting file %s",
30779bffae14SDaniel Henrique Barboza bs->filename);
30789bffae14SDaniel Henrique Barboza }
30799bffae14SDaniel Henrique Barboza
30809bffae14SDaniel Henrique Barboza return ret;
30819bffae14SDaniel Henrique Barboza }
30829bffae14SDaniel Henrique Barboza
3083c1bb86cdSEric Blake /*
3084c1bb86cdSEric Blake * Find allocation range in @bs around offset @start.
3085c1bb86cdSEric Blake * May change underlying file descriptor's file offset.
3086c1bb86cdSEric Blake * If @start is not in a hole, store @start in @data, and the
3087c1bb86cdSEric Blake * beginning of the next hole in @hole, and return 0.
3088c1bb86cdSEric Blake * If @start is in a non-trailing hole, store @start in @hole and the
3089c1bb86cdSEric Blake * beginning of the next non-hole in @data, and return 0.
3090c1bb86cdSEric Blake * If @start is in a trailing hole or beyond EOF, return -ENXIO.
3091c1bb86cdSEric Blake * If we can't find out, return a negative errno other than -ENXIO.
3092c1bb86cdSEric Blake */
find_allocation(BlockDriverState * bs,off_t start,off_t * data,off_t * hole)3093c1bb86cdSEric Blake static int find_allocation(BlockDriverState *bs, off_t start,
3094c1bb86cdSEric Blake off_t *data, off_t *hole)
3095c1bb86cdSEric Blake {
3096c1bb86cdSEric Blake #if defined SEEK_HOLE && defined SEEK_DATA
3097c1bb86cdSEric Blake BDRVRawState *s = bs->opaque;
3098c1bb86cdSEric Blake off_t offs;
3099c1bb86cdSEric Blake
3100c1bb86cdSEric Blake /*
3101c1bb86cdSEric Blake * SEEK_DATA cases:
3102c1bb86cdSEric Blake * D1. offs == start: start is in data
3103c1bb86cdSEric Blake * D2. offs > start: start is in a hole, next data at offs
3104c1bb86cdSEric Blake * D3. offs < 0, errno = ENXIO: either start is in a trailing hole
3105c1bb86cdSEric Blake * or start is beyond EOF
3106c1bb86cdSEric Blake * If the latter happens, the file has been truncated behind
3107c1bb86cdSEric Blake * our back since we opened it. All bets are off then.
3108c1bb86cdSEric Blake * Treating like a trailing hole is simplest.
3109c1bb86cdSEric Blake * D4. offs < 0, errno != ENXIO: we learned nothing
3110c1bb86cdSEric Blake */
3111c1bb86cdSEric Blake offs = lseek(s->fd, start, SEEK_DATA);
3112c1bb86cdSEric Blake if (offs < 0) {
3113c1bb86cdSEric Blake return -errno; /* D3 or D4 */
3114c1bb86cdSEric Blake }
3115a03083a0SJeff Cody
3116a03083a0SJeff Cody if (offs < start) {
3117a03083a0SJeff Cody /* This is not a valid return by lseek(). We are safe to just return
3118a03083a0SJeff Cody * -EIO in this case, and we'll treat it like D4. */
3119a03083a0SJeff Cody return -EIO;
3120a03083a0SJeff Cody }
3121c1bb86cdSEric Blake
3122c1bb86cdSEric Blake if (offs > start) {
3123c1bb86cdSEric Blake /* D2: in hole, next data at offs */
3124c1bb86cdSEric Blake *hole = start;
3125c1bb86cdSEric Blake *data = offs;
3126c1bb86cdSEric Blake return 0;
3127c1bb86cdSEric Blake }
3128c1bb86cdSEric Blake
3129c1bb86cdSEric Blake /* D1: in data, end not yet known */
3130c1bb86cdSEric Blake
3131c1bb86cdSEric Blake /*
3132c1bb86cdSEric Blake * SEEK_HOLE cases:
3133c1bb86cdSEric Blake * H1. offs == start: start is in a hole
3134c1bb86cdSEric Blake * If this happens here, a hole has been dug behind our back
3135c1bb86cdSEric Blake * since the previous lseek().
3136c1bb86cdSEric Blake * H2. offs > start: either start is in data, next hole at offs,
3137c1bb86cdSEric Blake * or start is in trailing hole, EOF at offs
3138c1bb86cdSEric Blake * Linux treats trailing holes like any other hole: offs ==
3139c1bb86cdSEric Blake * start. Solaris seeks to EOF instead: offs > start (blech).
3140c1bb86cdSEric Blake * If that happens here, a hole has been dug behind our back
3141c1bb86cdSEric Blake * since the previous lseek().
3142c1bb86cdSEric Blake * H3. offs < 0, errno = ENXIO: start is beyond EOF
3143c1bb86cdSEric Blake * If this happens, the file has been truncated behind our
3144c1bb86cdSEric Blake * back since we opened it. Treat it like a trailing hole.
3145c1bb86cdSEric Blake * H4. offs < 0, errno != ENXIO: we learned nothing
3146c1bb86cdSEric Blake * Pretend we know nothing at all, i.e. "forget" about D1.
3147c1bb86cdSEric Blake */
3148c1bb86cdSEric Blake offs = lseek(s->fd, start, SEEK_HOLE);
3149c1bb86cdSEric Blake if (offs < 0) {
3150c1bb86cdSEric Blake return -errno; /* D1 and (H3 or H4) */
3151c1bb86cdSEric Blake }
3152a03083a0SJeff Cody
3153a03083a0SJeff Cody if (offs < start) {
3154a03083a0SJeff Cody /* This is not a valid return by lseek(). We are safe to just return
3155a03083a0SJeff Cody * -EIO in this case, and we'll treat it like H4. */
3156a03083a0SJeff Cody return -EIO;
3157a03083a0SJeff Cody }
3158c1bb86cdSEric Blake
3159c1bb86cdSEric Blake if (offs > start) {
3160c1bb86cdSEric Blake /*
3161c1bb86cdSEric Blake * D1 and H2: either in data, next hole at offs, or it was in
3162c1bb86cdSEric Blake * data but is now in a trailing hole. In the latter case,
3163c1bb86cdSEric Blake * all bets are off. Treating it as if it there was data all
3164c1bb86cdSEric Blake * the way to EOF is safe, so simply do that.
3165c1bb86cdSEric Blake */
3166c1bb86cdSEric Blake *data = start;
3167c1bb86cdSEric Blake *hole = offs;
3168c1bb86cdSEric Blake return 0;
3169c1bb86cdSEric Blake }
3170c1bb86cdSEric Blake
3171c1bb86cdSEric Blake /* D1 and H1 */
3172c1bb86cdSEric Blake return -EBUSY;
3173c1bb86cdSEric Blake #else
3174c1bb86cdSEric Blake return -ENOTSUP;
3175c1bb86cdSEric Blake #endif
3176c1bb86cdSEric Blake }
3177c1bb86cdSEric Blake
3178c1bb86cdSEric Blake /*
3179a290f085SEric Blake * Returns the allocation status of the specified offset.
3180c1bb86cdSEric Blake *
3181a290f085SEric Blake * The block layer guarantees 'offset' and 'bytes' are within bounds.
3182c1bb86cdSEric Blake *
3183a290f085SEric Blake * 'pnum' is set to the number of bytes (including and immediately following
3184a290f085SEric Blake * the specified offset) that are known to be in the same
3185c1bb86cdSEric Blake * allocated/unallocated state.
3186c1bb86cdSEric Blake *
3187869e7ee8SHanna Reitz * 'bytes' is a soft cap for 'pnum'. If the information is free, 'pnum' may
3188869e7ee8SHanna Reitz * well exceed it.
3189c1bb86cdSEric Blake */
raw_co_block_status(BlockDriverState * bs,bool want_zero,int64_t offset,int64_t bytes,int64_t * pnum,int64_t * map,BlockDriverState ** file)3190a290f085SEric Blake static int coroutine_fn raw_co_block_status(BlockDriverState *bs,
3191a290f085SEric Blake bool want_zero,
3192a290f085SEric Blake int64_t offset,
3193a290f085SEric Blake int64_t bytes, int64_t *pnum,
3194a290f085SEric Blake int64_t *map,
3195c1bb86cdSEric Blake BlockDriverState **file)
3196c1bb86cdSEric Blake {
3197a290f085SEric Blake off_t data = 0, hole = 0;
3198c1bb86cdSEric Blake int ret;
3199c1bb86cdSEric Blake
32009c3db310SMax Reitz assert(QEMU_IS_ALIGNED(offset | bytes, bs->bl.request_alignment));
32019c3db310SMax Reitz
3202c1bb86cdSEric Blake ret = fd_open(bs);
3203c1bb86cdSEric Blake if (ret < 0) {
3204c1bb86cdSEric Blake return ret;
3205c1bb86cdSEric Blake }
3206c1bb86cdSEric Blake
3207a290f085SEric Blake if (!want_zero) {
3208a290f085SEric Blake *pnum = bytes;
3209a290f085SEric Blake *map = offset;
3210a290f085SEric Blake *file = bs;
3211a290f085SEric Blake return BDRV_BLOCK_DATA | BDRV_BLOCK_OFFSET_VALID;
3212c1bb86cdSEric Blake }
3213c1bb86cdSEric Blake
3214a290f085SEric Blake ret = find_allocation(bs, offset, &data, &hole);
3215c1bb86cdSEric Blake if (ret == -ENXIO) {
3216c1bb86cdSEric Blake /* Trailing hole */
3217a290f085SEric Blake *pnum = bytes;
3218c1bb86cdSEric Blake ret = BDRV_BLOCK_ZERO;
3219c1bb86cdSEric Blake } else if (ret < 0) {
3220c1bb86cdSEric Blake /* No info available, so pretend there are no holes */
3221a290f085SEric Blake *pnum = bytes;
3222c1bb86cdSEric Blake ret = BDRV_BLOCK_DATA;
3223a290f085SEric Blake } else if (data == offset) {
3224a290f085SEric Blake /* On a data extent, compute bytes to the end of the extent,
3225c1bb86cdSEric Blake * possibly including a partial sector at EOF. */
3226869e7ee8SHanna Reitz *pnum = hole - offset;
32279c3db310SMax Reitz
32289c3db310SMax Reitz /*
32299c3db310SMax Reitz * We are not allowed to return partial sectors, though, so
32309c3db310SMax Reitz * round up if necessary.
32319c3db310SMax Reitz */
32329c3db310SMax Reitz if (!QEMU_IS_ALIGNED(*pnum, bs->bl.request_alignment)) {
323336c6c877SPaolo Bonzini int64_t file_length = raw_getlength(bs);
32349c3db310SMax Reitz if (file_length > 0) {
32359c3db310SMax Reitz /* Ignore errors, this is just a safeguard */
32369c3db310SMax Reitz assert(hole == file_length);
32379c3db310SMax Reitz }
32389c3db310SMax Reitz *pnum = ROUND_UP(*pnum, bs->bl.request_alignment);
32399c3db310SMax Reitz }
32409c3db310SMax Reitz
3241c1bb86cdSEric Blake ret = BDRV_BLOCK_DATA;
3242c1bb86cdSEric Blake } else {
3243a290f085SEric Blake /* On a hole, compute bytes to the beginning of the next extent. */
3244a290f085SEric Blake assert(hole == offset);
3245869e7ee8SHanna Reitz *pnum = data - offset;
3246c1bb86cdSEric Blake ret = BDRV_BLOCK_ZERO;
3247c1bb86cdSEric Blake }
3248a290f085SEric Blake *map = offset;
3249c1bb86cdSEric Blake *file = bs;
3250a290f085SEric Blake return ret | BDRV_BLOCK_OFFSET_VALID;
3251c1bb86cdSEric Blake }
3252c1bb86cdSEric Blake
325331be8a2aSStefan Hajnoczi #if defined(__linux__)
325431be8a2aSStefan Hajnoczi /* Verify that the file is not in the page cache */
check_cache_dropped(BlockDriverState * bs,Error ** errp)325536c6c877SPaolo Bonzini static void check_cache_dropped(BlockDriverState *bs, Error **errp)
325631be8a2aSStefan Hajnoczi {
325731be8a2aSStefan Hajnoczi const size_t window_size = 128 * 1024 * 1024;
325831be8a2aSStefan Hajnoczi BDRVRawState *s = bs->opaque;
325931be8a2aSStefan Hajnoczi void *window = NULL;
326031be8a2aSStefan Hajnoczi size_t length = 0;
326131be8a2aSStefan Hajnoczi unsigned char *vec;
326231be8a2aSStefan Hajnoczi size_t page_size;
326331be8a2aSStefan Hajnoczi off_t offset;
326431be8a2aSStefan Hajnoczi off_t end;
326531be8a2aSStefan Hajnoczi
326631be8a2aSStefan Hajnoczi /* mincore(2) page status information requires 1 byte per page */
326731be8a2aSStefan Hajnoczi page_size = sysconf(_SC_PAGESIZE);
326831be8a2aSStefan Hajnoczi vec = g_malloc(DIV_ROUND_UP(window_size, page_size));
326931be8a2aSStefan Hajnoczi
327036c6c877SPaolo Bonzini end = raw_getlength(bs);
327131be8a2aSStefan Hajnoczi
327231be8a2aSStefan Hajnoczi for (offset = 0; offset < end; offset += window_size) {
327331be8a2aSStefan Hajnoczi void *new_window;
327431be8a2aSStefan Hajnoczi size_t new_length;
327531be8a2aSStefan Hajnoczi size_t vec_end;
327631be8a2aSStefan Hajnoczi size_t i;
327731be8a2aSStefan Hajnoczi int ret;
327831be8a2aSStefan Hajnoczi
327931be8a2aSStefan Hajnoczi /* Unmap previous window if size has changed */
328031be8a2aSStefan Hajnoczi new_length = MIN(end - offset, window_size);
328131be8a2aSStefan Hajnoczi if (new_length != length) {
328231be8a2aSStefan Hajnoczi munmap(window, length);
328331be8a2aSStefan Hajnoczi window = NULL;
328431be8a2aSStefan Hajnoczi length = 0;
328531be8a2aSStefan Hajnoczi }
328631be8a2aSStefan Hajnoczi
328731be8a2aSStefan Hajnoczi new_window = mmap(window, new_length, PROT_NONE, MAP_PRIVATE,
328831be8a2aSStefan Hajnoczi s->fd, offset);
328931be8a2aSStefan Hajnoczi if (new_window == MAP_FAILED) {
329031be8a2aSStefan Hajnoczi error_setg_errno(errp, errno, "mmap failed");
329131be8a2aSStefan Hajnoczi break;
329231be8a2aSStefan Hajnoczi }
329331be8a2aSStefan Hajnoczi
329431be8a2aSStefan Hajnoczi window = new_window;
329531be8a2aSStefan Hajnoczi length = new_length;
329631be8a2aSStefan Hajnoczi
329731be8a2aSStefan Hajnoczi ret = mincore(window, length, vec);
329831be8a2aSStefan Hajnoczi if (ret < 0) {
329931be8a2aSStefan Hajnoczi error_setg_errno(errp, errno, "mincore failed");
330031be8a2aSStefan Hajnoczi break;
330131be8a2aSStefan Hajnoczi }
330231be8a2aSStefan Hajnoczi
330331be8a2aSStefan Hajnoczi vec_end = DIV_ROUND_UP(length, page_size);
330431be8a2aSStefan Hajnoczi for (i = 0; i < vec_end; i++) {
330531be8a2aSStefan Hajnoczi if (vec[i] & 0x1) {
330631be8a2aSStefan Hajnoczi break;
330731be8a2aSStefan Hajnoczi }
330831be8a2aSStefan Hajnoczi }
330977ed971bSMarkus Armbruster if (i < vec_end) {
331077ed971bSMarkus Armbruster error_setg(errp, "page cache still in use!");
331177ed971bSMarkus Armbruster break;
331277ed971bSMarkus Armbruster }
331331be8a2aSStefan Hajnoczi }
331431be8a2aSStefan Hajnoczi
331531be8a2aSStefan Hajnoczi if (window) {
331631be8a2aSStefan Hajnoczi munmap(window, length);
331731be8a2aSStefan Hajnoczi }
331831be8a2aSStefan Hajnoczi
331931be8a2aSStefan Hajnoczi g_free(vec);
332031be8a2aSStefan Hajnoczi }
332131be8a2aSStefan Hajnoczi #endif /* __linux__ */
332231be8a2aSStefan Hajnoczi
332388095349SEmanuele Giuseppe Esposito static void coroutine_fn GRAPH_RDLOCK
raw_co_invalidate_cache(BlockDriverState * bs,Error ** errp)332488095349SEmanuele Giuseppe Esposito raw_co_invalidate_cache(BlockDriverState *bs, Error **errp)
3325dd577a26SStefan Hajnoczi {
3326dd577a26SStefan Hajnoczi BDRVRawState *s = bs->opaque;
3327dd577a26SStefan Hajnoczi int ret;
3328dd577a26SStefan Hajnoczi
3329dd577a26SStefan Hajnoczi ret = fd_open(bs);
3330dd577a26SStefan Hajnoczi if (ret < 0) {
3331dd577a26SStefan Hajnoczi error_setg_errno(errp, -ret, "The file descriptor is not open");
3332dd577a26SStefan Hajnoczi return;
3333dd577a26SStefan Hajnoczi }
3334dd577a26SStefan Hajnoczi
3335f357fcd8SStefan Hajnoczi if (!s->drop_cache) {
3336f357fcd8SStefan Hajnoczi return;
3337f357fcd8SStefan Hajnoczi }
3338f357fcd8SStefan Hajnoczi
3339dd577a26SStefan Hajnoczi if (s->open_flags & O_DIRECT) {
3340dd577a26SStefan Hajnoczi return; /* No host kernel page cache */
3341dd577a26SStefan Hajnoczi }
3342dd577a26SStefan Hajnoczi
3343dd577a26SStefan Hajnoczi #if defined(__linux__)
3344dd577a26SStefan Hajnoczi /* This sets the scene for the next syscall... */
3345dd577a26SStefan Hajnoczi ret = bdrv_co_flush(bs);
3346dd577a26SStefan Hajnoczi if (ret < 0) {
3347dd577a26SStefan Hajnoczi error_setg_errno(errp, -ret, "flush failed");
3348dd577a26SStefan Hajnoczi return;
3349dd577a26SStefan Hajnoczi }
3350dd577a26SStefan Hajnoczi
3351dd577a26SStefan Hajnoczi /* Linux does not invalidate pages that are dirty, locked, or mmapped by a
3352dd577a26SStefan Hajnoczi * process. These limitations are okay because we just fsynced the file,
3353dd577a26SStefan Hajnoczi * we don't use mmap, and the file should not be in use by other processes.
3354dd577a26SStefan Hajnoczi */
3355dd577a26SStefan Hajnoczi ret = posix_fadvise(s->fd, 0, 0, POSIX_FADV_DONTNEED);
3356dd577a26SStefan Hajnoczi if (ret != 0) { /* the return value is a positive errno */
3357dd577a26SStefan Hajnoczi error_setg_errno(errp, ret, "fadvise failed");
3358dd577a26SStefan Hajnoczi return;
3359dd577a26SStefan Hajnoczi }
336031be8a2aSStefan Hajnoczi
336131be8a2aSStefan Hajnoczi if (s->check_cache_dropped) {
336231be8a2aSStefan Hajnoczi check_cache_dropped(bs, errp);
336331be8a2aSStefan Hajnoczi }
3364dd577a26SStefan Hajnoczi #else /* __linux__ */
3365dd577a26SStefan Hajnoczi /* Do nothing. Live migration to a remote host with cache.direct=off is
3366dd577a26SStefan Hajnoczi * unsupported on other host operating systems. Cache consistency issues
3367dd577a26SStefan Hajnoczi * may occur but no error is reported here, partly because that's the
3368dd577a26SStefan Hajnoczi * historical behavior and partly because it's hard to differentiate valid
3369dd577a26SStefan Hajnoczi * configurations that should not cause errors.
3370dd577a26SStefan Hajnoczi */
3371dd577a26SStefan Hajnoczi #endif /* !__linux__ */
3372dd577a26SStefan Hajnoczi }
3373dd577a26SStefan Hajnoczi
raw_account_discard(BDRVRawState * s,uint64_t nbytes,int ret)33741c450366SAnton Nefedov static void raw_account_discard(BDRVRawState *s, uint64_t nbytes, int ret)
33751c450366SAnton Nefedov {
33761c450366SAnton Nefedov if (ret) {
33771c450366SAnton Nefedov s->stats.discard_nb_failed++;
33781c450366SAnton Nefedov } else {
33791c450366SAnton Nefedov s->stats.discard_nb_ok++;
33801c450366SAnton Nefedov s->stats.discard_bytes_ok += nbytes;
33811c450366SAnton Nefedov }
33821c450366SAnton Nefedov }
33831c450366SAnton Nefedov
33846d43eaa3SSam Li /*
33856d43eaa3SSam Li * zone report - Get a zone block device's information in the form
33866d43eaa3SSam Li * of an array of zone descriptors.
33876d43eaa3SSam Li * zones is an array of zone descriptors to hold zone information on reply;
33886d43eaa3SSam Li * offset can be any byte within the entire size of the device;
33893202d8e4SMichael Tokarev * nr_zones is the maximum number of sectors the command should operate on.
33906d43eaa3SSam Li */
33916d43eaa3SSam Li #if defined(CONFIG_BLKZONED)
raw_co_zone_report(BlockDriverState * bs,int64_t offset,unsigned int * nr_zones,BlockZoneDescriptor * zones)33926d43eaa3SSam Li static int coroutine_fn raw_co_zone_report(BlockDriverState *bs, int64_t offset,
33936d43eaa3SSam Li unsigned int *nr_zones,
33946d43eaa3SSam Li BlockZoneDescriptor *zones) {
33956d43eaa3SSam Li BDRVRawState *s = bs->opaque;
33966d43eaa3SSam Li RawPosixAIOData acb = (RawPosixAIOData) {
33976d43eaa3SSam Li .bs = bs,
33986d43eaa3SSam Li .aio_fildes = s->fd,
33996d43eaa3SSam Li .aio_type = QEMU_AIO_ZONE_REPORT,
34006d43eaa3SSam Li .aio_offset = offset,
34016d43eaa3SSam Li .zone_report = {
34026d43eaa3SSam Li .nr_zones = nr_zones,
34036d43eaa3SSam Li .zones = zones,
34046d43eaa3SSam Li },
34056d43eaa3SSam Li };
34066d43eaa3SSam Li
3407142e307eSSam Li trace_zbd_zone_report(bs, *nr_zones, offset >> BDRV_SECTOR_BITS);
34086d43eaa3SSam Li return raw_thread_pool_submit(handle_aiocb_zone_report, &acb);
34096d43eaa3SSam Li }
34106d43eaa3SSam Li #endif
34116d43eaa3SSam Li
34126d43eaa3SSam Li /*
34136d43eaa3SSam Li * zone management operations - Execute an operation on a zone
34146d43eaa3SSam Li */
34156d43eaa3SSam Li #if defined(CONFIG_BLKZONED)
raw_co_zone_mgmt(BlockDriverState * bs,BlockZoneOp op,int64_t offset,int64_t len)34166d43eaa3SSam Li static int coroutine_fn raw_co_zone_mgmt(BlockDriverState *bs, BlockZoneOp op,
34176d43eaa3SSam Li int64_t offset, int64_t len) {
34186d43eaa3SSam Li BDRVRawState *s = bs->opaque;
34196d43eaa3SSam Li RawPosixAIOData acb;
34206d43eaa3SSam Li int64_t zone_size, zone_size_mask;
34216d43eaa3SSam Li const char *op_name;
34226d43eaa3SSam Li unsigned long zo;
34236d43eaa3SSam Li int ret;
3424a3c41f06SSam Li BlockZoneWps *wps = bs->wps;
34256d43eaa3SSam Li int64_t capacity = bs->total_sectors << BDRV_SECTOR_BITS;
34266d43eaa3SSam Li
34276d43eaa3SSam Li zone_size = bs->bl.zone_size;
34286d43eaa3SSam Li zone_size_mask = zone_size - 1;
34296d43eaa3SSam Li if (offset & zone_size_mask) {
34306d43eaa3SSam Li error_report("sector offset %" PRId64 " is not aligned to zone size "
34316d43eaa3SSam Li "%" PRId64 "", offset / 512, zone_size / 512);
34326d43eaa3SSam Li return -EINVAL;
34336d43eaa3SSam Li }
34346d43eaa3SSam Li
34356d43eaa3SSam Li if (((offset + len) < capacity && len & zone_size_mask) ||
34366d43eaa3SSam Li offset + len > capacity) {
34376d43eaa3SSam Li error_report("number of sectors %" PRId64 " is not aligned to zone size"
34386d43eaa3SSam Li " %" PRId64 "", len / 512, zone_size / 512);
34396d43eaa3SSam Li return -EINVAL;
34406d43eaa3SSam Li }
34416d43eaa3SSam Li
3442a3c41f06SSam Li uint32_t i = offset / bs->bl.zone_size;
3443a3c41f06SSam Li uint32_t nrz = len / bs->bl.zone_size;
3444a3c41f06SSam Li uint64_t *wp = &wps->wp[i];
3445a3c41f06SSam Li if (BDRV_ZT_IS_CONV(*wp) && len != capacity) {
3446a3c41f06SSam Li error_report("zone mgmt operations are not allowed for conventional zones");
3447a3c41f06SSam Li return -EIO;
3448a3c41f06SSam Li }
3449a3c41f06SSam Li
34506d43eaa3SSam Li switch (op) {
34516d43eaa3SSam Li case BLK_ZO_OPEN:
34526d43eaa3SSam Li op_name = "BLKOPENZONE";
34536d43eaa3SSam Li zo = BLKOPENZONE;
34546d43eaa3SSam Li break;
34556d43eaa3SSam Li case BLK_ZO_CLOSE:
34566d43eaa3SSam Li op_name = "BLKCLOSEZONE";
34576d43eaa3SSam Li zo = BLKCLOSEZONE;
34586d43eaa3SSam Li break;
34596d43eaa3SSam Li case BLK_ZO_FINISH:
34606d43eaa3SSam Li op_name = "BLKFINISHZONE";
34616d43eaa3SSam Li zo = BLKFINISHZONE;
34626d43eaa3SSam Li break;
34636d43eaa3SSam Li case BLK_ZO_RESET:
34646d43eaa3SSam Li op_name = "BLKRESETZONE";
34656d43eaa3SSam Li zo = BLKRESETZONE;
34666d43eaa3SSam Li break;
34676d43eaa3SSam Li default:
34686d43eaa3SSam Li error_report("Unsupported zone op: 0x%x", op);
34696d43eaa3SSam Li return -ENOTSUP;
34706d43eaa3SSam Li }
34716d43eaa3SSam Li
34726d43eaa3SSam Li acb = (RawPosixAIOData) {
34736d43eaa3SSam Li .bs = bs,
34746d43eaa3SSam Li .aio_fildes = s->fd,
34756d43eaa3SSam Li .aio_type = QEMU_AIO_ZONE_MGMT,
34766d43eaa3SSam Li .aio_offset = offset,
34776d43eaa3SSam Li .aio_nbytes = len,
34786d43eaa3SSam Li .zone_mgmt = {
34796d43eaa3SSam Li .op = zo,
34806d43eaa3SSam Li },
34816d43eaa3SSam Li };
34826d43eaa3SSam Li
3483142e307eSSam Li trace_zbd_zone_mgmt(bs, op_name, offset >> BDRV_SECTOR_BITS,
3484142e307eSSam Li len >> BDRV_SECTOR_BITS);
34856d43eaa3SSam Li ret = raw_thread_pool_submit(handle_aiocb_zone_mgmt, &acb);
34866d43eaa3SSam Li if (ret != 0) {
348710b9e080SSam Li update_zones_wp(bs, s->fd, offset, nrz);
34886d43eaa3SSam Li error_report("ioctl %s failed %d", op_name, ret);
3489a3c41f06SSam Li return ret;
3490a3c41f06SSam Li }
3491a3c41f06SSam Li
3492a3c41f06SSam Li if (zo == BLKRESETZONE && len == capacity) {
3493a3c41f06SSam Li ret = get_zones_wp(bs, s->fd, 0, bs->bl.nr_zones, 1);
3494a3c41f06SSam Li if (ret < 0) {
3495a3c41f06SSam Li error_report("reporting single wp failed");
3496a3c41f06SSam Li return ret;
3497a3c41f06SSam Li }
3498a3c41f06SSam Li } else if (zo == BLKRESETZONE) {
3499a3c41f06SSam Li for (unsigned int j = 0; j < nrz; ++j) {
3500a3c41f06SSam Li wp[j] = offset + j * zone_size;
3501a3c41f06SSam Li }
3502a3c41f06SSam Li } else if (zo == BLKFINISHZONE) {
3503a3c41f06SSam Li for (unsigned int j = 0; j < nrz; ++j) {
3504a3c41f06SSam Li /* The zoned device allows the last zone smaller that the
3505a3c41f06SSam Li * zone size. */
3506a3c41f06SSam Li wp[j] = MIN(offset + (j + 1) * zone_size, offset + len);
3507a3c41f06SSam Li }
35086d43eaa3SSam Li }
35096d43eaa3SSam Li
35106d43eaa3SSam Li return ret;
35116d43eaa3SSam Li }
35126d43eaa3SSam Li #endif
35136d43eaa3SSam Li
35144751d09aSSam Li #if defined(CONFIG_BLKZONED)
raw_co_zone_append(BlockDriverState * bs,int64_t * offset,QEMUIOVector * qiov,BdrvRequestFlags flags)35154751d09aSSam Li static int coroutine_fn raw_co_zone_append(BlockDriverState *bs,
35164751d09aSSam Li int64_t *offset,
35174751d09aSSam Li QEMUIOVector *qiov,
35184751d09aSSam Li BdrvRequestFlags flags) {
35194751d09aSSam Li assert(flags == 0);
35204751d09aSSam Li int64_t zone_size_mask = bs->bl.zone_size - 1;
35214751d09aSSam Li int64_t iov_len = 0;
35224751d09aSSam Li int64_t len = 0;
35234751d09aSSam Li
35244751d09aSSam Li if (*offset & zone_size_mask) {
35254751d09aSSam Li error_report("sector offset %" PRId64 " is not aligned to zone size "
35264751d09aSSam Li "%" PRId32 "", *offset / 512, bs->bl.zone_size / 512);
35274751d09aSSam Li return -EINVAL;
35284751d09aSSam Li }
35294751d09aSSam Li
35304751d09aSSam Li int64_t wg = bs->bl.write_granularity;
35314751d09aSSam Li int64_t wg_mask = wg - 1;
35324751d09aSSam Li for (int i = 0; i < qiov->niov; i++) {
35334751d09aSSam Li iov_len = qiov->iov[i].iov_len;
35344751d09aSSam Li if (iov_len & wg_mask) {
35354751d09aSSam Li error_report("len of IOVector[%d] %" PRId64 " is not aligned to "
35364751d09aSSam Li "block size %" PRId64 "", i, iov_len, wg);
35374751d09aSSam Li return -EINVAL;
35384751d09aSSam Li }
35394751d09aSSam Li len += iov_len;
35404751d09aSSam Li }
35414751d09aSSam Li
35426c811e19SSam Li trace_zbd_zone_append(bs, *offset >> BDRV_SECTOR_BITS);
3543ad4feacaSNaohiro Aota return raw_co_prw(bs, offset, len, qiov, QEMU_AIO_ZONE_APPEND);
35444751d09aSSam Li }
35454751d09aSSam Li #endif
35464751d09aSSam Li
354733d70fb6SKevin Wolf static coroutine_fn int
raw_do_pdiscard(BlockDriverState * bs,int64_t offset,int64_t bytes,bool blkdev)35480c802287SVladimir Sementsov-Ogievskiy raw_do_pdiscard(BlockDriverState *bs, int64_t offset, int64_t bytes,
35490c802287SVladimir Sementsov-Ogievskiy bool blkdev)
3550c1bb86cdSEric Blake {
3551c1bb86cdSEric Blake BDRVRawState *s = bs->opaque;
355246ee0f46SKevin Wolf RawPosixAIOData acb;
35531c450366SAnton Nefedov int ret;
3554c1bb86cdSEric Blake
355546ee0f46SKevin Wolf acb = (RawPosixAIOData) {
355646ee0f46SKevin Wolf .bs = bs,
355746ee0f46SKevin Wolf .aio_fildes = s->fd,
355846ee0f46SKevin Wolf .aio_type = QEMU_AIO_DISCARD,
355946ee0f46SKevin Wolf .aio_offset = offset,
356046ee0f46SKevin Wolf .aio_nbytes = bytes,
356146ee0f46SKevin Wolf };
356246ee0f46SKevin Wolf
356346ee0f46SKevin Wolf if (blkdev) {
356446ee0f46SKevin Wolf acb.aio_type |= QEMU_AIO_BLKDEV;
356546ee0f46SKevin Wolf }
356646ee0f46SKevin Wolf
35670fdb7311SEmanuele Giuseppe Esposito ret = raw_thread_pool_submit(handle_aiocb_discard, &acb);
35681c450366SAnton Nefedov raw_account_discard(s, bytes, ret);
35691c450366SAnton Nefedov return ret;
357046ee0f46SKevin Wolf }
357146ee0f46SKevin Wolf
357246ee0f46SKevin Wolf static coroutine_fn int
raw_co_pdiscard(BlockDriverState * bs,int64_t offset,int64_t bytes)35730c802287SVladimir Sementsov-Ogievskiy raw_co_pdiscard(BlockDriverState *bs, int64_t offset, int64_t bytes)
357446ee0f46SKevin Wolf {
357546ee0f46SKevin Wolf return raw_do_pdiscard(bs, offset, bytes, false);
3576c1bb86cdSEric Blake }
3577c1bb86cdSEric Blake
35787154d8aeSKevin Wolf static int coroutine_fn
raw_do_pwrite_zeroes(BlockDriverState * bs,int64_t offset,int64_t bytes,BdrvRequestFlags flags,bool blkdev)3579f34b2bcfSVladimir Sementsov-Ogievskiy raw_do_pwrite_zeroes(BlockDriverState *bs, int64_t offset, int64_t bytes,
35807154d8aeSKevin Wolf BdrvRequestFlags flags, bool blkdev)
35817154d8aeSKevin Wolf {
35827154d8aeSKevin Wolf BDRVRawState *s = bs->opaque;
35837154d8aeSKevin Wolf RawPosixAIOData acb;
35847154d8aeSKevin Wolf ThreadPoolFunc *handler;
35857154d8aeSKevin Wolf
3586292d06b9SMax Reitz #ifdef CONFIG_FALLOCATE
3587292d06b9SMax Reitz if (offset + bytes > bs->total_sectors * BDRV_SECTOR_SIZE) {
3588292d06b9SMax Reitz BdrvTrackedRequest *req;
3589292d06b9SMax Reitz
3590292d06b9SMax Reitz /*
3591292d06b9SMax Reitz * This is a workaround for a bug in the Linux XFS driver,
3592292d06b9SMax Reitz * where writes submitted through the AIO interface will be
3593292d06b9SMax Reitz * discarded if they happen beyond a concurrently running
3594292d06b9SMax Reitz * fallocate() that increases the file length (i.e., both the
3595292d06b9SMax Reitz * write and the fallocate() happen beyond the EOF).
3596292d06b9SMax Reitz *
3597292d06b9SMax Reitz * To work around it, we extend the tracked request for this
3598292d06b9SMax Reitz * zero write until INT64_MAX (effectively infinity), and mark
3599292d06b9SMax Reitz * it as serializing.
3600292d06b9SMax Reitz *
3601292d06b9SMax Reitz * We have to enable this workaround for all filesystems and
3602292d06b9SMax Reitz * AIO modes (not just XFS with aio=native), because for
3603292d06b9SMax Reitz * remote filesystems we do not know the host configuration.
3604292d06b9SMax Reitz */
3605292d06b9SMax Reitz
3606292d06b9SMax Reitz req = bdrv_co_get_self_request(bs);
3607292d06b9SMax Reitz assert(req);
3608292d06b9SMax Reitz assert(req->type == BDRV_TRACKED_WRITE);
3609292d06b9SMax Reitz assert(req->offset <= offset);
3610292d06b9SMax Reitz assert(req->offset + req->bytes >= offset + bytes);
3611292d06b9SMax Reitz
36128b117001SVladimir Sementsov-Ogievskiy req->bytes = BDRV_MAX_LENGTH - req->offset;
36138b117001SVladimir Sementsov-Ogievskiy
361469b55e03SVladimir Sementsov-Ogievskiy bdrv_check_request(req->offset, req->bytes, &error_abort);
3615292d06b9SMax Reitz
36168ac5aab2SVladimir Sementsov-Ogievskiy bdrv_make_request_serialising(req, bs->bl.request_alignment);
3617292d06b9SMax Reitz }
3618292d06b9SMax Reitz #endif
3619292d06b9SMax Reitz
36207154d8aeSKevin Wolf acb = (RawPosixAIOData) {
36217154d8aeSKevin Wolf .bs = bs,
36227154d8aeSKevin Wolf .aio_fildes = s->fd,
36237154d8aeSKevin Wolf .aio_type = QEMU_AIO_WRITE_ZEROES,
36247154d8aeSKevin Wolf .aio_offset = offset,
36257154d8aeSKevin Wolf .aio_nbytes = bytes,
36267154d8aeSKevin Wolf };
36277154d8aeSKevin Wolf
36287154d8aeSKevin Wolf if (blkdev) {
36297154d8aeSKevin Wolf acb.aio_type |= QEMU_AIO_BLKDEV;
36307154d8aeSKevin Wolf }
3631738301e1SKevin Wolf if (flags & BDRV_REQ_NO_FALLBACK) {
3632738301e1SKevin Wolf acb.aio_type |= QEMU_AIO_NO_FALLBACK;
3633738301e1SKevin Wolf }
36347154d8aeSKevin Wolf
36357154d8aeSKevin Wolf if (flags & BDRV_REQ_MAY_UNMAP) {
36367154d8aeSKevin Wolf acb.aio_type |= QEMU_AIO_DISCARD;
36377154d8aeSKevin Wolf handler = handle_aiocb_write_zeroes_unmap;
36387154d8aeSKevin Wolf } else {
36397154d8aeSKevin Wolf handler = handle_aiocb_write_zeroes;
36407154d8aeSKevin Wolf }
36417154d8aeSKevin Wolf
36420fdb7311SEmanuele Giuseppe Esposito return raw_thread_pool_submit(handler, &acb);
36437154d8aeSKevin Wolf }
36447154d8aeSKevin Wolf
raw_co_pwrite_zeroes(BlockDriverState * bs,int64_t offset,int64_t bytes,BdrvRequestFlags flags)3645c1bb86cdSEric Blake static int coroutine_fn raw_co_pwrite_zeroes(
3646c1bb86cdSEric Blake BlockDriverState *bs, int64_t offset,
3647f34b2bcfSVladimir Sementsov-Ogievskiy int64_t bytes, BdrvRequestFlags flags)
3648c1bb86cdSEric Blake {
36497154d8aeSKevin Wolf return raw_do_pwrite_zeroes(bs, offset, bytes, flags, false);
3650c1bb86cdSEric Blake }
3651c1bb86cdSEric Blake
36523d47eb0aSEmanuele Giuseppe Esposito static int coroutine_fn
raw_co_get_info(BlockDriverState * bs,BlockDriverInfo * bdi)36533d47eb0aSEmanuele Giuseppe Esposito raw_co_get_info(BlockDriverState *bs, BlockDriverInfo *bdi)
3654c1bb86cdSEric Blake {
3655c1bb86cdSEric Blake return 0;
3656c1bb86cdSEric Blake }
3657c1bb86cdSEric Blake
raw_get_specific_info(BlockDriverState * bs,Error ** errp)36587f36a50aSHanna Reitz static ImageInfoSpecific *raw_get_specific_info(BlockDriverState *bs,
36597f36a50aSHanna Reitz Error **errp)
36607f36a50aSHanna Reitz {
36617f36a50aSHanna Reitz ImageInfoSpecificFile *file_info = g_new0(ImageInfoSpecificFile, 1);
36627f36a50aSHanna Reitz ImageInfoSpecific *spec_info = g_new(ImageInfoSpecific, 1);
36637f36a50aSHanna Reitz
36647f36a50aSHanna Reitz *spec_info = (ImageInfoSpecific){
36657f36a50aSHanna Reitz .type = IMAGE_INFO_SPECIFIC_KIND_FILE,
36667f36a50aSHanna Reitz .u.file.data = file_info,
36677f36a50aSHanna Reitz };
36687f36a50aSHanna Reitz
36697f36a50aSHanna Reitz #ifdef FS_IOC_FSGETXATTR
36707f36a50aSHanna Reitz {
36717f36a50aSHanna Reitz BDRVRawState *s = bs->opaque;
36727f36a50aSHanna Reitz struct fsxattr attr;
36737f36a50aSHanna Reitz int ret;
36747f36a50aSHanna Reitz
36757f36a50aSHanna Reitz ret = ioctl(s->fd, FS_IOC_FSGETXATTR, &attr);
36767f36a50aSHanna Reitz if (!ret && attr.fsx_extsize != 0) {
36777f36a50aSHanna Reitz file_info->has_extent_size_hint = true;
36787f36a50aSHanna Reitz file_info->extent_size_hint = attr.fsx_extsize;
36797f36a50aSHanna Reitz }
36807f36a50aSHanna Reitz }
36817f36a50aSHanna Reitz #endif
36827f36a50aSHanna Reitz
36837f36a50aSHanna Reitz return spec_info;
36847f36a50aSHanna Reitz }
36857f36a50aSHanna Reitz
get_blockstats_specific_file(BlockDriverState * bs)3686d9245599SAnton Nefedov static BlockStatsSpecificFile get_blockstats_specific_file(BlockDriverState *bs)
3687d9245599SAnton Nefedov {
3688d9245599SAnton Nefedov BDRVRawState *s = bs->opaque;
3689d9245599SAnton Nefedov return (BlockStatsSpecificFile) {
3690d9245599SAnton Nefedov .discard_nb_ok = s->stats.discard_nb_ok,
3691d9245599SAnton Nefedov .discard_nb_failed = s->stats.discard_nb_failed,
3692d9245599SAnton Nefedov .discard_bytes_ok = s->stats.discard_bytes_ok,
3693d9245599SAnton Nefedov };
3694d9245599SAnton Nefedov }
3695d9245599SAnton Nefedov
raw_get_specific_stats(BlockDriverState * bs)3696d9245599SAnton Nefedov static BlockStatsSpecific *raw_get_specific_stats(BlockDriverState *bs)
3697d9245599SAnton Nefedov {
3698d9245599SAnton Nefedov BlockStatsSpecific *stats = g_new(BlockStatsSpecific, 1);
3699d9245599SAnton Nefedov
3700d9245599SAnton Nefedov stats->driver = BLOCKDEV_DRIVER_FILE;
3701d9245599SAnton Nefedov stats->u.file = get_blockstats_specific_file(bs);
3702d9245599SAnton Nefedov
3703d9245599SAnton Nefedov return stats;
3704d9245599SAnton Nefedov }
3705d9245599SAnton Nefedov
370614176c8dSJoelle van Dyne #if defined(HAVE_HOST_BLOCK_DEVICE)
hdev_get_specific_stats(BlockDriverState * bs)3707d9245599SAnton Nefedov static BlockStatsSpecific *hdev_get_specific_stats(BlockDriverState *bs)
3708d9245599SAnton Nefedov {
3709d9245599SAnton Nefedov BlockStatsSpecific *stats = g_new(BlockStatsSpecific, 1);
3710d9245599SAnton Nefedov
3711d9245599SAnton Nefedov stats->driver = BLOCKDEV_DRIVER_HOST_DEVICE;
3712d9245599SAnton Nefedov stats->u.host_device = get_blockstats_specific_file(bs);
3713d9245599SAnton Nefedov
3714d9245599SAnton Nefedov return stats;
3715d9245599SAnton Nefedov }
371614176c8dSJoelle van Dyne #endif /* HAVE_HOST_BLOCK_DEVICE */
3717d9245599SAnton Nefedov
3718c1bb86cdSEric Blake static QemuOptsList raw_create_opts = {
3719c1bb86cdSEric Blake .name = "raw-create-opts",
3720c1bb86cdSEric Blake .head = QTAILQ_HEAD_INITIALIZER(raw_create_opts.head),
3721c1bb86cdSEric Blake .desc = {
3722c1bb86cdSEric Blake {
3723c1bb86cdSEric Blake .name = BLOCK_OPT_SIZE,
3724c1bb86cdSEric Blake .type = QEMU_OPT_SIZE,
3725c1bb86cdSEric Blake .help = "Virtual disk size"
3726c1bb86cdSEric Blake },
3727c1bb86cdSEric Blake {
3728c1bb86cdSEric Blake .name = BLOCK_OPT_NOCOW,
3729c1bb86cdSEric Blake .type = QEMU_OPT_BOOL,
3730c1bb86cdSEric Blake .help = "Turn off copy-on-write (valid only on btrfs)"
3731c1bb86cdSEric Blake },
3732c1bb86cdSEric Blake {
3733c1bb86cdSEric Blake .name = BLOCK_OPT_PREALLOC,
3734c1bb86cdSEric Blake .type = QEMU_OPT_STRING,
3735abea0053SStefano Garzarella .help = "Preallocation mode (allowed values: off"
3736abea0053SStefano Garzarella #ifdef CONFIG_POSIX_FALLOCATE
3737abea0053SStefano Garzarella ", falloc"
3738abea0053SStefano Garzarella #endif
3739abea0053SStefano Garzarella ", full)"
3740c1bb86cdSEric Blake },
3741ffa244c8SKevin Wolf {
3742ffa244c8SKevin Wolf .name = BLOCK_OPT_EXTENT_SIZE_HINT,
3743ffa244c8SKevin Wolf .type = QEMU_OPT_SIZE,
3744ffa244c8SKevin Wolf .help = "Extent size hint for the image file, 0 to disable"
3745ffa244c8SKevin Wolf },
3746c1bb86cdSEric Blake { /* end of list */ }
3747c1bb86cdSEric Blake }
3748c1bb86cdSEric Blake };
3749c1bb86cdSEric Blake
raw_check_perm(BlockDriverState * bs,uint64_t perm,uint64_t shared,Error ** errp)3750244a5668SFam Zheng static int raw_check_perm(BlockDriverState *bs, uint64_t perm, uint64_t shared,
3751244a5668SFam Zheng Error **errp)
3752244a5668SFam Zheng {
37536ceabe6fSKevin Wolf BDRVRawState *s = bs->opaque;
375472373e40SVladimir Sementsov-Ogievskiy int input_flags = s->reopen_state ? s->reopen_state->flags : bs->open_flags;
37556ceabe6fSKevin Wolf int open_flags;
37566ceabe6fSKevin Wolf int ret;
37576ceabe6fSKevin Wolf
37586ceabe6fSKevin Wolf /* We may need a new fd if auto-read-only switches the mode */
3759b67e3538SDenis V. Lunev via ret = raw_reconfigure_getfd(bs, input_flags, &open_flags, perm, errp);
37606ceabe6fSKevin Wolf if (ret < 0) {
37616ceabe6fSKevin Wolf return ret;
37626ceabe6fSKevin Wolf } else if (ret != s->fd) {
376372373e40SVladimir Sementsov-Ogievskiy Error *local_err = NULL;
376472373e40SVladimir Sementsov-Ogievskiy
376572373e40SVladimir Sementsov-Ogievskiy /*
376672373e40SVladimir Sementsov-Ogievskiy * Fail already check_perm() if we can't get a working O_DIRECT
376772373e40SVladimir Sementsov-Ogievskiy * alignment with the new fd.
376872373e40SVladimir Sementsov-Ogievskiy */
376972373e40SVladimir Sementsov-Ogievskiy raw_probe_alignment(bs, ret, &local_err);
377072373e40SVladimir Sementsov-Ogievskiy if (local_err) {
377172373e40SVladimir Sementsov-Ogievskiy error_propagate(errp, local_err);
377272373e40SVladimir Sementsov-Ogievskiy return -EINVAL;
377372373e40SVladimir Sementsov-Ogievskiy }
377472373e40SVladimir Sementsov-Ogievskiy
37756ceabe6fSKevin Wolf s->perm_change_fd = ret;
3776094e3639SMax Reitz s->perm_change_flags = open_flags;
37776ceabe6fSKevin Wolf }
37786ceabe6fSKevin Wolf
37796ceabe6fSKevin Wolf /* Prepare permissions on old fd to avoid conflicts between old and new,
37806ceabe6fSKevin Wolf * but keep everything locked that new will need. */
37816ceabe6fSKevin Wolf ret = raw_handle_perm_lock(bs, RAW_PL_PREPARE, perm, shared, errp);
37826ceabe6fSKevin Wolf if (ret < 0) {
37836ceabe6fSKevin Wolf goto fail;
37846ceabe6fSKevin Wolf }
37856ceabe6fSKevin Wolf
37866ceabe6fSKevin Wolf /* Copy locks to the new fd */
3787eb43ea16SLi Feng if (s->perm_change_fd && s->use_lock) {
37886ceabe6fSKevin Wolf ret = raw_apply_lock_bytes(NULL, s->perm_change_fd, perm, ~shared,
37896ceabe6fSKevin Wolf false, errp);
37906ceabe6fSKevin Wolf if (ret < 0) {
37916ceabe6fSKevin Wolf raw_handle_perm_lock(bs, RAW_PL_ABORT, 0, 0, NULL);
37926ceabe6fSKevin Wolf goto fail;
37936ceabe6fSKevin Wolf }
37946ceabe6fSKevin Wolf }
37956ceabe6fSKevin Wolf return 0;
37966ceabe6fSKevin Wolf
37976ceabe6fSKevin Wolf fail:
379872373e40SVladimir Sementsov-Ogievskiy if (s->perm_change_fd) {
37996ceabe6fSKevin Wolf qemu_close(s->perm_change_fd);
38006ceabe6fSKevin Wolf }
38016ceabe6fSKevin Wolf s->perm_change_fd = 0;
38026ceabe6fSKevin Wolf return ret;
3803244a5668SFam Zheng }
3804244a5668SFam Zheng
raw_set_perm(BlockDriverState * bs,uint64_t perm,uint64_t shared)3805244a5668SFam Zheng static void raw_set_perm(BlockDriverState *bs, uint64_t perm, uint64_t shared)
3806244a5668SFam Zheng {
3807244a5668SFam Zheng BDRVRawState *s = bs->opaque;
38086ceabe6fSKevin Wolf
38096ceabe6fSKevin Wolf /* For reopen, we have already switched to the new fd (.bdrv_set_perm is
38106ceabe6fSKevin Wolf * called after .bdrv_reopen_commit) */
38116ceabe6fSKevin Wolf if (s->perm_change_fd && s->fd != s->perm_change_fd) {
38126ceabe6fSKevin Wolf qemu_close(s->fd);
38136ceabe6fSKevin Wolf s->fd = s->perm_change_fd;
3814094e3639SMax Reitz s->open_flags = s->perm_change_flags;
38156ceabe6fSKevin Wolf }
38166ceabe6fSKevin Wolf s->perm_change_fd = 0;
38176ceabe6fSKevin Wolf
3818244a5668SFam Zheng raw_handle_perm_lock(bs, RAW_PL_COMMIT, perm, shared, NULL);
3819244a5668SFam Zheng s->perm = perm;
3820244a5668SFam Zheng s->shared_perm = shared;
3821244a5668SFam Zheng }
3822244a5668SFam Zheng
raw_abort_perm_update(BlockDriverState * bs)3823244a5668SFam Zheng static void raw_abort_perm_update(BlockDriverState *bs)
3824244a5668SFam Zheng {
38256ceabe6fSKevin Wolf BDRVRawState *s = bs->opaque;
38266ceabe6fSKevin Wolf
38276ceabe6fSKevin Wolf /* For reopen, .bdrv_reopen_abort is called afterwards and will close
38286ceabe6fSKevin Wolf * the file descriptor. */
382972373e40SVladimir Sementsov-Ogievskiy if (s->perm_change_fd) {
38306ceabe6fSKevin Wolf qemu_close(s->perm_change_fd);
38316ceabe6fSKevin Wolf }
38326ceabe6fSKevin Wolf s->perm_change_fd = 0;
38336ceabe6fSKevin Wolf
3834244a5668SFam Zheng raw_handle_perm_lock(bs, RAW_PL_ABORT, 0, 0, NULL);
3835244a5668SFam Zheng }
3836244a5668SFam Zheng
raw_co_copy_range_from(BlockDriverState * bs,BdrvChild * src,int64_t src_offset,BdrvChild * dst,int64_t dst_offset,int64_t bytes,BdrvRequestFlags read_flags,BdrvRequestFlags write_flags)3837742bf09bSEmanuele Giuseppe Esposito static int coroutine_fn GRAPH_RDLOCK raw_co_copy_range_from(
383848535049SVladimir Sementsov-Ogievskiy BlockDriverState *bs, BdrvChild *src, int64_t src_offset,
383948535049SVladimir Sementsov-Ogievskiy BdrvChild *dst, int64_t dst_offset, int64_t bytes,
384067b51fb9SVladimir Sementsov-Ogievskiy BdrvRequestFlags read_flags, BdrvRequestFlags write_flags)
38411efad060SFam Zheng {
384267b51fb9SVladimir Sementsov-Ogievskiy return bdrv_co_copy_range_to(src, src_offset, dst, dst_offset, bytes,
384367b51fb9SVladimir Sementsov-Ogievskiy read_flags, write_flags);
38441efad060SFam Zheng }
38451efad060SFam Zheng
3846742bf09bSEmanuele Giuseppe Esposito static int coroutine_fn GRAPH_RDLOCK
raw_co_copy_range_to(BlockDriverState * bs,BdrvChild * src,int64_t src_offset,BdrvChild * dst,int64_t dst_offset,int64_t bytes,BdrvRequestFlags read_flags,BdrvRequestFlags write_flags)3847742bf09bSEmanuele Giuseppe Esposito raw_co_copy_range_to(BlockDriverState *bs,
3848742bf09bSEmanuele Giuseppe Esposito BdrvChild *src, int64_t src_offset,
3849742bf09bSEmanuele Giuseppe Esposito BdrvChild *dst, int64_t dst_offset,
3850742bf09bSEmanuele Giuseppe Esposito int64_t bytes, BdrvRequestFlags read_flags,
385167b51fb9SVladimir Sementsov-Ogievskiy BdrvRequestFlags write_flags)
38521efad060SFam Zheng {
385358a209c4SKevin Wolf RawPosixAIOData acb;
38541efad060SFam Zheng BDRVRawState *s = bs->opaque;
38551efad060SFam Zheng BDRVRawState *src_s;
38561efad060SFam Zheng
38571efad060SFam Zheng assert(dst->bs == bs);
38581efad060SFam Zheng if (src->bs->drv->bdrv_co_copy_range_to != raw_co_copy_range_to) {
38591efad060SFam Zheng return -ENOTSUP;
38601efad060SFam Zheng }
38611efad060SFam Zheng
38621efad060SFam Zheng src_s = src->bs->opaque;
38639f850f67SFam Zheng if (fd_open(src->bs) < 0 || fd_open(dst->bs) < 0) {
38641efad060SFam Zheng return -EIO;
38651efad060SFam Zheng }
386658a209c4SKevin Wolf
386758a209c4SKevin Wolf acb = (RawPosixAIOData) {
386858a209c4SKevin Wolf .bs = bs,
386958a209c4SKevin Wolf .aio_type = QEMU_AIO_COPY_RANGE,
387058a209c4SKevin Wolf .aio_fildes = src_s->fd,
387158a209c4SKevin Wolf .aio_offset = src_offset,
387258a209c4SKevin Wolf .aio_nbytes = bytes,
387358a209c4SKevin Wolf .copy_range = {
387458a209c4SKevin Wolf .aio_fd2 = s->fd,
387558a209c4SKevin Wolf .aio_offset2 = dst_offset,
387658a209c4SKevin Wolf },
387758a209c4SKevin Wolf };
387858a209c4SKevin Wolf
38790fdb7311SEmanuele Giuseppe Esposito return raw_thread_pool_submit(handle_aiocb_copy_range, &acb);
38801efad060SFam Zheng }
38811efad060SFam Zheng
3882c1bb86cdSEric Blake BlockDriver bdrv_file = {
3883c1bb86cdSEric Blake .format_name = "file",
3884c1bb86cdSEric Blake .protocol_name = "file",
3885c1bb86cdSEric Blake .instance_size = sizeof(BDRVRawState),
3886c1bb86cdSEric Blake .bdrv_needs_filename = true,
3887c1bb86cdSEric Blake .bdrv_probe = NULL, /* no probe for protocols */
3888c1bb86cdSEric Blake .bdrv_parse_filename = raw_parse_filename,
388944b424dcSPaolo Bonzini .bdrv_open = raw_open,
3890c1bb86cdSEric Blake .bdrv_reopen_prepare = raw_reopen_prepare,
3891c1bb86cdSEric Blake .bdrv_reopen_commit = raw_reopen_commit,
3892c1bb86cdSEric Blake .bdrv_reopen_abort = raw_reopen_abort,
3893c1bb86cdSEric Blake .bdrv_close = raw_close,
3894927f11e1SKevin Wolf .bdrv_co_create = raw_co_create,
3895efc75e2aSStefan Hajnoczi .bdrv_co_create_opts = raw_co_create_opts,
3896c1bb86cdSEric Blake .bdrv_has_zero_init = bdrv_has_zero_init_1,
3897a290f085SEric Blake .bdrv_co_block_status = raw_co_block_status,
3898dd577a26SStefan Hajnoczi .bdrv_co_invalidate_cache = raw_co_invalidate_cache,
3899c1bb86cdSEric Blake .bdrv_co_pwrite_zeroes = raw_co_pwrite_zeroes,
39009bffae14SDaniel Henrique Barboza .bdrv_co_delete_file = raw_co_delete_file,
3901c1bb86cdSEric Blake
3902c1bb86cdSEric Blake .bdrv_co_preadv = raw_co_preadv,
3903c1bb86cdSEric Blake .bdrv_co_pwritev = raw_co_pwritev,
390433d70fb6SKevin Wolf .bdrv_co_flush_to_disk = raw_co_flush_to_disk,
390533d70fb6SKevin Wolf .bdrv_co_pdiscard = raw_co_pdiscard,
39061efad060SFam Zheng .bdrv_co_copy_range_from = raw_co_copy_range_from,
39071efad060SFam Zheng .bdrv_co_copy_range_to = raw_co_copy_range_to,
3908c1bb86cdSEric Blake .bdrv_refresh_limits = raw_refresh_limits,
3909c1bb86cdSEric Blake
3910061ca8a3SKevin Wolf .bdrv_co_truncate = raw_co_truncate,
3911c86422c5SEmanuele Giuseppe Esposito .bdrv_co_getlength = raw_co_getlength,
39123d47eb0aSEmanuele Giuseppe Esposito .bdrv_co_get_info = raw_co_get_info,
39137f36a50aSHanna Reitz .bdrv_get_specific_info = raw_get_specific_info,
391482618d7bSEmanuele Giuseppe Esposito .bdrv_co_get_allocated_file_size = raw_co_get_allocated_file_size,
3915d9245599SAnton Nefedov .bdrv_get_specific_stats = raw_get_specific_stats,
3916244a5668SFam Zheng .bdrv_check_perm = raw_check_perm,
3917244a5668SFam Zheng .bdrv_set_perm = raw_set_perm,
3918244a5668SFam Zheng .bdrv_abort_perm_update = raw_abort_perm_update,
3919c1bb86cdSEric Blake .create_opts = &raw_create_opts,
39208a2ce0bcSAlberto Garcia .mutable_opts = mutable_opts,
3921c1bb86cdSEric Blake };
3922c1bb86cdSEric Blake
3923c1bb86cdSEric Blake /***********************************************/
3924c1bb86cdSEric Blake /* host device */
3925c1bb86cdSEric Blake
392614176c8dSJoelle van Dyne #if defined(HAVE_HOST_BLOCK_DEVICE)
392714176c8dSJoelle van Dyne
3928c1bb86cdSEric Blake #if defined(__APPLE__) && defined(__MACH__)
3929c1bb86cdSEric Blake static kern_return_t GetBSDPath(io_iterator_t mediaIterator, char *bsdPath,
3930c1bb86cdSEric Blake CFIndex maxPathSize, int flags);
3931aa44d3f6SPhilippe Mathieu-Daudé
FindEjectableOpticalMedia(io_iterator_t * mediaIterator)3932c1bb86cdSEric Blake static char *FindEjectableOpticalMedia(io_iterator_t *mediaIterator)
3933c1bb86cdSEric Blake {
3934c1bb86cdSEric Blake kern_return_t kernResult = KERN_FAILURE;
3935aa44d3f6SPhilippe Mathieu-Daudé mach_port_t mainPort;
3936c1bb86cdSEric Blake CFMutableDictionaryRef classesToMatch;
3937c1bb86cdSEric Blake const char *matching_array[] = {kIODVDMediaClass, kIOCDMediaClass};
3938c1bb86cdSEric Blake char *mediaType = NULL;
3939c1bb86cdSEric Blake
3940aa44d3f6SPhilippe Mathieu-Daudé kernResult = IOMainPort(MACH_PORT_NULL, &mainPort);
3941c1bb86cdSEric Blake if ( KERN_SUCCESS != kernResult ) {
3942aa44d3f6SPhilippe Mathieu-Daudé printf("IOMainPort returned %d\n", kernResult);
3943c1bb86cdSEric Blake }
3944c1bb86cdSEric Blake
3945c1bb86cdSEric Blake int index;
3946c1bb86cdSEric Blake for (index = 0; index < ARRAY_SIZE(matching_array); index++) {
3947c1bb86cdSEric Blake classesToMatch = IOServiceMatching(matching_array[index]);
3948c1bb86cdSEric Blake if (classesToMatch == NULL) {
3949c1bb86cdSEric Blake error_report("IOServiceMatching returned NULL for %s",
3950c1bb86cdSEric Blake matching_array[index]);
3951c1bb86cdSEric Blake continue;
3952c1bb86cdSEric Blake }
3953c1bb86cdSEric Blake CFDictionarySetValue(classesToMatch, CFSTR(kIOMediaEjectableKey),
3954c1bb86cdSEric Blake kCFBooleanTrue);
3955aa44d3f6SPhilippe Mathieu-Daudé kernResult = IOServiceGetMatchingServices(mainPort, classesToMatch,
3956c1bb86cdSEric Blake mediaIterator);
3957c1bb86cdSEric Blake if (kernResult != KERN_SUCCESS) {
3958c1bb86cdSEric Blake error_report("Note: IOServiceGetMatchingServices returned %d",
3959c1bb86cdSEric Blake kernResult);
3960c1bb86cdSEric Blake continue;
3961c1bb86cdSEric Blake }
3962c1bb86cdSEric Blake
3963c1bb86cdSEric Blake /* If a match was found, leave the loop */
3964c1bb86cdSEric Blake if (*mediaIterator != 0) {
39654f7d28d7SLaurent Vivier trace_file_FindEjectableOpticalMedia(matching_array[index]);
3966c1bb86cdSEric Blake mediaType = g_strdup(matching_array[index]);
3967c1bb86cdSEric Blake break;
3968c1bb86cdSEric Blake }
3969c1bb86cdSEric Blake }
3970c1bb86cdSEric Blake return mediaType;
3971c1bb86cdSEric Blake }
3972c1bb86cdSEric Blake
GetBSDPath(io_iterator_t mediaIterator,char * bsdPath,CFIndex maxPathSize,int flags)3973c1bb86cdSEric Blake kern_return_t GetBSDPath(io_iterator_t mediaIterator, char *bsdPath,
3974c1bb86cdSEric Blake CFIndex maxPathSize, int flags)
3975c1bb86cdSEric Blake {
3976c1bb86cdSEric Blake io_object_t nextMedia;
3977c1bb86cdSEric Blake kern_return_t kernResult = KERN_FAILURE;
3978c1bb86cdSEric Blake *bsdPath = '\0';
3979c1bb86cdSEric Blake nextMedia = IOIteratorNext( mediaIterator );
3980c1bb86cdSEric Blake if ( nextMedia )
3981c1bb86cdSEric Blake {
3982c1bb86cdSEric Blake CFTypeRef bsdPathAsCFString;
3983c1bb86cdSEric Blake bsdPathAsCFString = IORegistryEntryCreateCFProperty( nextMedia, CFSTR( kIOBSDNameKey ), kCFAllocatorDefault, 0 );
3984c1bb86cdSEric Blake if ( bsdPathAsCFString ) {
3985c1bb86cdSEric Blake size_t devPathLength;
3986c1bb86cdSEric Blake strcpy( bsdPath, _PATH_DEV );
3987c1bb86cdSEric Blake if (flags & BDRV_O_NOCACHE) {
3988c1bb86cdSEric Blake strcat(bsdPath, "r");
3989c1bb86cdSEric Blake }
3990c1bb86cdSEric Blake devPathLength = strlen( bsdPath );
3991c1bb86cdSEric Blake if ( CFStringGetCString( bsdPathAsCFString, bsdPath + devPathLength, maxPathSize - devPathLength, kCFStringEncodingASCII ) ) {
3992c1bb86cdSEric Blake kernResult = KERN_SUCCESS;
3993c1bb86cdSEric Blake }
3994c1bb86cdSEric Blake CFRelease( bsdPathAsCFString );
3995c1bb86cdSEric Blake }
3996c1bb86cdSEric Blake IOObjectRelease( nextMedia );
3997c1bb86cdSEric Blake }
3998c1bb86cdSEric Blake
3999c1bb86cdSEric Blake return kernResult;
4000c1bb86cdSEric Blake }
4001c1bb86cdSEric Blake
4002c1bb86cdSEric Blake /* Sets up a real cdrom for use in QEMU */
setup_cdrom(char * bsd_path,Error ** errp)4003c1bb86cdSEric Blake static bool setup_cdrom(char *bsd_path, Error **errp)
4004c1bb86cdSEric Blake {
4005c1bb86cdSEric Blake int index, num_of_test_partitions = 2, fd;
4006c1bb86cdSEric Blake char test_partition[MAXPATHLEN];
4007c1bb86cdSEric Blake bool partition_found = false;
4008c1bb86cdSEric Blake
4009c1bb86cdSEric Blake /* look for a working partition */
4010c1bb86cdSEric Blake for (index = 0; index < num_of_test_partitions; index++) {
4011c1bb86cdSEric Blake snprintf(test_partition, sizeof(test_partition), "%ss%d", bsd_path,
4012c1bb86cdSEric Blake index);
4013b18a24a9SDaniel P. Berrangé fd = qemu_open(test_partition, O_RDONLY | O_BINARY | O_LARGEFILE, NULL);
4014c1bb86cdSEric Blake if (fd >= 0) {
4015c1bb86cdSEric Blake partition_found = true;
4016c1bb86cdSEric Blake qemu_close(fd);
4017c1bb86cdSEric Blake break;
4018c1bb86cdSEric Blake }
4019c1bb86cdSEric Blake }
4020c1bb86cdSEric Blake
4021c1bb86cdSEric Blake /* if a working partition on the device was not found */
4022c1bb86cdSEric Blake if (partition_found == false) {
4023c1bb86cdSEric Blake error_setg(errp, "Failed to find a working partition on disc");
4024c1bb86cdSEric Blake } else {
40254f7d28d7SLaurent Vivier trace_file_setup_cdrom(test_partition);
4026c1bb86cdSEric Blake pstrcpy(bsd_path, MAXPATHLEN, test_partition);
4027c1bb86cdSEric Blake }
4028c1bb86cdSEric Blake return partition_found;
4029c1bb86cdSEric Blake }
4030c1bb86cdSEric Blake
4031c1bb86cdSEric Blake /* Prints directions on mounting and unmounting a device */
print_unmounting_directions(const char * file_name)4032c1bb86cdSEric Blake static void print_unmounting_directions(const char *file_name)
4033c1bb86cdSEric Blake {
4034c1bb86cdSEric Blake error_report("If device %s is mounted on the desktop, unmount"
4035c1bb86cdSEric Blake " it first before using it in QEMU", file_name);
4036c1bb86cdSEric Blake error_report("Command to unmount device: diskutil unmountDisk %s",
4037c1bb86cdSEric Blake file_name);
4038c1bb86cdSEric Blake error_report("Command to mount device: diskutil mountDisk %s", file_name);
4039c1bb86cdSEric Blake }
4040c1bb86cdSEric Blake
4041c1bb86cdSEric Blake #endif /* defined(__APPLE__) && defined(__MACH__) */
4042c1bb86cdSEric Blake
hdev_probe_device(const char * filename)4043c1bb86cdSEric Blake static int hdev_probe_device(const char *filename)
4044c1bb86cdSEric Blake {
4045c1bb86cdSEric Blake struct stat st;
4046c1bb86cdSEric Blake
4047c1bb86cdSEric Blake /* allow a dedicated CD-ROM driver to match with a higher priority */
4048c1bb86cdSEric Blake if (strstart(filename, "/dev/cdrom", NULL))
4049c1bb86cdSEric Blake return 50;
4050c1bb86cdSEric Blake
4051c1bb86cdSEric Blake if (stat(filename, &st) >= 0 &&
4052c1bb86cdSEric Blake (S_ISCHR(st.st_mode) || S_ISBLK(st.st_mode))) {
4053c1bb86cdSEric Blake return 100;
4054c1bb86cdSEric Blake }
4055c1bb86cdSEric Blake
4056c1bb86cdSEric Blake return 0;
4057c1bb86cdSEric Blake }
4058c1bb86cdSEric Blake
hdev_parse_filename(const char * filename,QDict * options,Error ** errp)4059c1bb86cdSEric Blake static void hdev_parse_filename(const char *filename, QDict *options,
4060c1bb86cdSEric Blake Error **errp)
4061c1bb86cdSEric Blake {
406203c320d8SMax Reitz bdrv_parse_filename_strip_prefix(filename, "host_device:", options);
4063c1bb86cdSEric Blake }
4064c1bb86cdSEric Blake
hdev_is_sg(BlockDriverState * bs)4065c1bb86cdSEric Blake static bool hdev_is_sg(BlockDriverState *bs)
4066c1bb86cdSEric Blake {
4067c1bb86cdSEric Blake
4068c1bb86cdSEric Blake #if defined(__linux__)
4069c1bb86cdSEric Blake
4070c1bb86cdSEric Blake BDRVRawState *s = bs->opaque;
4071c1bb86cdSEric Blake struct stat st;
4072c1bb86cdSEric Blake struct sg_scsi_id scsiid;
4073c1bb86cdSEric Blake int sg_version;
4074c1bb86cdSEric Blake int ret;
4075c1bb86cdSEric Blake
4076c1bb86cdSEric Blake if (stat(bs->filename, &st) < 0 || !S_ISCHR(st.st_mode)) {
4077c1bb86cdSEric Blake return false;
4078c1bb86cdSEric Blake }
4079c1bb86cdSEric Blake
4080c1bb86cdSEric Blake ret = ioctl(s->fd, SG_GET_VERSION_NUM, &sg_version);
4081c1bb86cdSEric Blake if (ret < 0) {
4082c1bb86cdSEric Blake return false;
4083c1bb86cdSEric Blake }
4084c1bb86cdSEric Blake
4085c1bb86cdSEric Blake ret = ioctl(s->fd, SG_GET_SCSI_ID, &scsiid);
4086c1bb86cdSEric Blake if (ret >= 0) {
40874f7d28d7SLaurent Vivier trace_file_hdev_is_sg(scsiid.scsi_type, sg_version);
4088c1bb86cdSEric Blake return true;
4089c1bb86cdSEric Blake }
4090c1bb86cdSEric Blake
4091c1bb86cdSEric Blake #endif
4092c1bb86cdSEric Blake
4093c1bb86cdSEric Blake return false;
4094c1bb86cdSEric Blake }
4095c1bb86cdSEric Blake
hdev_open(BlockDriverState * bs,QDict * options,int flags,Error ** errp)4096c1bb86cdSEric Blake static int hdev_open(BlockDriverState *bs, QDict *options, int flags,
4097c1bb86cdSEric Blake Error **errp)
4098c1bb86cdSEric Blake {
4099c1bb86cdSEric Blake BDRVRawState *s = bs->opaque;
4100c1bb86cdSEric Blake int ret;
4101c1bb86cdSEric Blake
4102c1bb86cdSEric Blake #if defined(__APPLE__) && defined(__MACH__)
4103129c7d1cSMarkus Armbruster /*
4104129c7d1cSMarkus Armbruster * Caution: while qdict_get_str() is fine, getting non-string types
4105129c7d1cSMarkus Armbruster * would require more care. When @options come from -blockdev or
4106129c7d1cSMarkus Armbruster * blockdev_add, its members are typed according to the QAPI
4107129c7d1cSMarkus Armbruster * schema, but when they come from -drive, they're all QString.
4108129c7d1cSMarkus Armbruster */
4109c1bb86cdSEric Blake const char *filename = qdict_get_str(options, "filename");
4110c1bb86cdSEric Blake char bsd_path[MAXPATHLEN] = "";
4111c1bb86cdSEric Blake bool error_occurred = false;
4112c1bb86cdSEric Blake
4113c1bb86cdSEric Blake /* If using a real cdrom */
4114c1bb86cdSEric Blake if (strcmp(filename, "/dev/cdrom") == 0) {
4115c1bb86cdSEric Blake char *mediaType = NULL;
4116c1bb86cdSEric Blake kern_return_t ret_val;
4117c1bb86cdSEric Blake io_iterator_t mediaIterator = 0;
4118c1bb86cdSEric Blake
4119c1bb86cdSEric Blake mediaType = FindEjectableOpticalMedia(&mediaIterator);
4120c1bb86cdSEric Blake if (mediaType == NULL) {
4121c1bb86cdSEric Blake error_setg(errp, "Please make sure your CD/DVD is in the optical"
4122c1bb86cdSEric Blake " drive");
4123c1bb86cdSEric Blake error_occurred = true;
4124c1bb86cdSEric Blake goto hdev_open_Mac_error;
4125c1bb86cdSEric Blake }
4126c1bb86cdSEric Blake
4127c1bb86cdSEric Blake ret_val = GetBSDPath(mediaIterator, bsd_path, sizeof(bsd_path), flags);
4128c1bb86cdSEric Blake if (ret_val != KERN_SUCCESS) {
4129c1bb86cdSEric Blake error_setg(errp, "Could not get BSD path for optical drive");
4130c1bb86cdSEric Blake error_occurred = true;
4131c1bb86cdSEric Blake goto hdev_open_Mac_error;
4132c1bb86cdSEric Blake }
4133c1bb86cdSEric Blake
4134c1bb86cdSEric Blake /* If a real optical drive was not found */
4135c1bb86cdSEric Blake if (bsd_path[0] == '\0') {
4136c1bb86cdSEric Blake error_setg(errp, "Failed to obtain bsd path for optical drive");
4137c1bb86cdSEric Blake error_occurred = true;
4138c1bb86cdSEric Blake goto hdev_open_Mac_error;
4139c1bb86cdSEric Blake }
4140c1bb86cdSEric Blake
4141c1bb86cdSEric Blake /* If using a cdrom disc and finding a partition on the disc failed */
4142c1bb86cdSEric Blake if (strncmp(mediaType, kIOCDMediaClass, 9) == 0 &&
4143c1bb86cdSEric Blake setup_cdrom(bsd_path, errp) == false) {
4144c1bb86cdSEric Blake print_unmounting_directions(bsd_path);
4145c1bb86cdSEric Blake error_occurred = true;
4146c1bb86cdSEric Blake goto hdev_open_Mac_error;
4147c1bb86cdSEric Blake }
4148c1bb86cdSEric Blake
414946f5ac20SEric Blake qdict_put_str(options, "filename", bsd_path);
4150c1bb86cdSEric Blake
4151c1bb86cdSEric Blake hdev_open_Mac_error:
4152c1bb86cdSEric Blake g_free(mediaType);
4153c1bb86cdSEric Blake if (mediaIterator) {
4154c1bb86cdSEric Blake IOObjectRelease(mediaIterator);
4155c1bb86cdSEric Blake }
4156c1bb86cdSEric Blake if (error_occurred) {
4157c1bb86cdSEric Blake return -ENOENT;
4158c1bb86cdSEric Blake }
4159c1bb86cdSEric Blake }
4160c1bb86cdSEric Blake #endif /* defined(__APPLE__) && defined(__MACH__) */
4161c1bb86cdSEric Blake
4162c1bb86cdSEric Blake s->type = FTYPE_FILE;
4163c1bb86cdSEric Blake
4164668f62ecSMarkus Armbruster ret = raw_open_common(bs, options, flags, 0, true, errp);
4165c1bb86cdSEric Blake if (ret < 0) {
4166c1bb86cdSEric Blake #if defined(__APPLE__) && defined(__MACH__)
4167c1bb86cdSEric Blake if (*bsd_path) {
4168c1bb86cdSEric Blake filename = bsd_path;
4169c1bb86cdSEric Blake }
4170c1bb86cdSEric Blake /* if a physical device experienced an error while being opened */
4171c1bb86cdSEric Blake if (strncmp(filename, "/dev/", 5) == 0) {
4172c1bb86cdSEric Blake print_unmounting_directions(filename);
4173c1bb86cdSEric Blake }
4174c1bb86cdSEric Blake #endif /* defined(__APPLE__) && defined(__MACH__) */
4175c1bb86cdSEric Blake return ret;
4176c1bb86cdSEric Blake }
4177c1bb86cdSEric Blake
4178c1bb86cdSEric Blake /* Since this does ioctl the device must be already opened */
4179c1bb86cdSEric Blake bs->sg = hdev_is_sg(bs);
4180c1bb86cdSEric Blake
4181c1bb86cdSEric Blake return ret;
4182c1bb86cdSEric Blake }
4183c1bb86cdSEric Blake
4184c1bb86cdSEric Blake #if defined(__linux__)
41852f3a7ab3SKevin Wolf static int coroutine_fn
hdev_co_ioctl(BlockDriverState * bs,unsigned long int req,void * buf)41862f3a7ab3SKevin Wolf hdev_co_ioctl(BlockDriverState *bs, unsigned long int req, void *buf)
4187c1bb86cdSEric Blake {
4188c1bb86cdSEric Blake BDRVRawState *s = bs->opaque;
418903425671SKevin Wolf RawPosixAIOData acb;
41902f3a7ab3SKevin Wolf int ret;
4191c1bb86cdSEric Blake
41922f3a7ab3SKevin Wolf ret = fd_open(bs);
41932f3a7ab3SKevin Wolf if (ret < 0) {
41942f3a7ab3SKevin Wolf return ret;
41952f3a7ab3SKevin Wolf }
4196c1bb86cdSEric Blake
41977c9e5276SPaolo Bonzini if (req == SG_IO && s->pr_mgr) {
41987c9e5276SPaolo Bonzini struct sg_io_hdr *io_hdr = buf;
41997c9e5276SPaolo Bonzini if (io_hdr->cmdp[0] == PERSISTENT_RESERVE_OUT ||
42007c9e5276SPaolo Bonzini io_hdr->cmdp[0] == PERSISTENT_RESERVE_IN) {
42010fdb7311SEmanuele Giuseppe Esposito return pr_manager_execute(s->pr_mgr, qemu_get_current_aio_context(),
42022f3a7ab3SKevin Wolf s->fd, io_hdr);
42037c9e5276SPaolo Bonzini }
42047c9e5276SPaolo Bonzini }
42057c9e5276SPaolo Bonzini
420603425671SKevin Wolf acb = (RawPosixAIOData) {
420703425671SKevin Wolf .bs = bs,
420803425671SKevin Wolf .aio_type = QEMU_AIO_IOCTL,
420903425671SKevin Wolf .aio_fildes = s->fd,
421003425671SKevin Wolf .aio_offset = 0,
421103425671SKevin Wolf .ioctl = {
421203425671SKevin Wolf .buf = buf,
421303425671SKevin Wolf .cmd = req,
421403425671SKevin Wolf },
421503425671SKevin Wolf };
421603425671SKevin Wolf
42170fdb7311SEmanuele Giuseppe Esposito return raw_thread_pool_submit(handle_aiocb_ioctl, &acb);
4218c1bb86cdSEric Blake }
4219c1bb86cdSEric Blake #endif /* linux */
4220c1bb86cdSEric Blake
422133d70fb6SKevin Wolf static coroutine_fn int
hdev_co_pdiscard(BlockDriverState * bs,int64_t offset,int64_t bytes)42220c802287SVladimir Sementsov-Ogievskiy hdev_co_pdiscard(BlockDriverState *bs, int64_t offset, int64_t bytes)
4223c1bb86cdSEric Blake {
42241c450366SAnton Nefedov BDRVRawState *s = bs->opaque;
422533d70fb6SKevin Wolf int ret;
4226c1bb86cdSEric Blake
422733d70fb6SKevin Wolf ret = fd_open(bs);
422833d70fb6SKevin Wolf if (ret < 0) {
42291c450366SAnton Nefedov raw_account_discard(s, bytes, ret);
423033d70fb6SKevin Wolf return ret;
4231c1bb86cdSEric Blake }
423246ee0f46SKevin Wolf return raw_do_pdiscard(bs, offset, bytes, true);
4233c1bb86cdSEric Blake }
4234c1bb86cdSEric Blake
hdev_co_pwrite_zeroes(BlockDriverState * bs,int64_t offset,int64_t bytes,BdrvRequestFlags flags)4235c1bb86cdSEric Blake static coroutine_fn int hdev_co_pwrite_zeroes(BlockDriverState *bs,
4236f34b2bcfSVladimir Sementsov-Ogievskiy int64_t offset, int64_t bytes, BdrvRequestFlags flags)
4237c1bb86cdSEric Blake {
4238c1bb86cdSEric Blake int rc;
4239c1bb86cdSEric Blake
4240c1bb86cdSEric Blake rc = fd_open(bs);
4241c1bb86cdSEric Blake if (rc < 0) {
4242c1bb86cdSEric Blake return rc;
4243c1bb86cdSEric Blake }
424434fa110eSKevin Wolf
42457154d8aeSKevin Wolf return raw_do_pwrite_zeroes(bs, offset, bytes, flags, true);
4246c1bb86cdSEric Blake }
4247c1bb86cdSEric Blake
4248c1bb86cdSEric Blake static BlockDriver bdrv_host_device = {
4249c1bb86cdSEric Blake .format_name = "host_device",
4250c1bb86cdSEric Blake .protocol_name = "host_device",
4251c1bb86cdSEric Blake .instance_size = sizeof(BDRVRawState),
4252c1bb86cdSEric Blake .bdrv_needs_filename = true,
4253c1bb86cdSEric Blake .bdrv_probe_device = hdev_probe_device,
4254c1bb86cdSEric Blake .bdrv_parse_filename = hdev_parse_filename,
425544b424dcSPaolo Bonzini .bdrv_open = hdev_open,
4256c1bb86cdSEric Blake .bdrv_close = raw_close,
4257c1bb86cdSEric Blake .bdrv_reopen_prepare = raw_reopen_prepare,
4258c1bb86cdSEric Blake .bdrv_reopen_commit = raw_reopen_commit,
4259c1bb86cdSEric Blake .bdrv_reopen_abort = raw_reopen_abort,
42605a5e7f8cSMaxim Levitsky .bdrv_co_create_opts = bdrv_co_create_opts_simple,
42615a5e7f8cSMaxim Levitsky .create_opts = &bdrv_create_opts_simple,
42628a2ce0bcSAlberto Garcia .mutable_opts = mutable_opts,
4263dd577a26SStefan Hajnoczi .bdrv_co_invalidate_cache = raw_co_invalidate_cache,
4264c1bb86cdSEric Blake .bdrv_co_pwrite_zeroes = hdev_co_pwrite_zeroes,
4265c1bb86cdSEric Blake
4266c1bb86cdSEric Blake .bdrv_co_preadv = raw_co_preadv,
4267c1bb86cdSEric Blake .bdrv_co_pwritev = raw_co_pwritev,
426833d70fb6SKevin Wolf .bdrv_co_flush_to_disk = raw_co_flush_to_disk,
426933d70fb6SKevin Wolf .bdrv_co_pdiscard = hdev_co_pdiscard,
42701efad060SFam Zheng .bdrv_co_copy_range_from = raw_co_copy_range_from,
42711efad060SFam Zheng .bdrv_co_copy_range_to = raw_co_copy_range_to,
4272c1bb86cdSEric Blake .bdrv_refresh_limits = raw_refresh_limits,
4273c1bb86cdSEric Blake
4274061ca8a3SKevin Wolf .bdrv_co_truncate = raw_co_truncate,
4275c86422c5SEmanuele Giuseppe Esposito .bdrv_co_getlength = raw_co_getlength,
42763d47eb0aSEmanuele Giuseppe Esposito .bdrv_co_get_info = raw_co_get_info,
42777f36a50aSHanna Reitz .bdrv_get_specific_info = raw_get_specific_info,
427882618d7bSEmanuele Giuseppe Esposito .bdrv_co_get_allocated_file_size = raw_co_get_allocated_file_size,
4279d9245599SAnton Nefedov .bdrv_get_specific_stats = hdev_get_specific_stats,
4280244a5668SFam Zheng .bdrv_check_perm = raw_check_perm,
4281244a5668SFam Zheng .bdrv_set_perm = raw_set_perm,
4282244a5668SFam Zheng .bdrv_abort_perm_update = raw_abort_perm_update,
4283c1bb86cdSEric Blake .bdrv_probe_blocksizes = hdev_probe_blocksizes,
4284c1bb86cdSEric Blake .bdrv_probe_geometry = hdev_probe_geometry,
4285c1bb86cdSEric Blake
4286c1bb86cdSEric Blake /* generic scsi device */
4287c1bb86cdSEric Blake #ifdef __linux__
42882f3a7ab3SKevin Wolf .bdrv_co_ioctl = hdev_co_ioctl,
4289c1bb86cdSEric Blake #endif
42906d43eaa3SSam Li
42916d43eaa3SSam Li /* zoned device */
42926d43eaa3SSam Li #if defined(CONFIG_BLKZONED)
42936d43eaa3SSam Li /* zone management operations */
42946d43eaa3SSam Li .bdrv_co_zone_report = raw_co_zone_report,
42956d43eaa3SSam Li .bdrv_co_zone_mgmt = raw_co_zone_mgmt,
42964751d09aSSam Li .bdrv_co_zone_append = raw_co_zone_append,
42976d43eaa3SSam Li #endif
4298c1bb86cdSEric Blake };
4299c1bb86cdSEric Blake
4300c1bb86cdSEric Blake #if defined(__linux__) || defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
cdrom_parse_filename(const char * filename,QDict * options,Error ** errp)4301c1bb86cdSEric Blake static void cdrom_parse_filename(const char *filename, QDict *options,
4302c1bb86cdSEric Blake Error **errp)
4303c1bb86cdSEric Blake {
430403c320d8SMax Reitz bdrv_parse_filename_strip_prefix(filename, "host_cdrom:", options);
4305c1bb86cdSEric Blake }
43068c6f27e7SPaolo Bonzini
cdrom_refresh_limits(BlockDriverState * bs,Error ** errp)43078c6f27e7SPaolo Bonzini static void cdrom_refresh_limits(BlockDriverState *bs, Error **errp)
43088c6f27e7SPaolo Bonzini {
43098c6f27e7SPaolo Bonzini bs->bl.has_variable_length = true;
43108c6f27e7SPaolo Bonzini raw_refresh_limits(bs, errp);
43118c6f27e7SPaolo Bonzini }
4312c1bb86cdSEric Blake #endif
4313c1bb86cdSEric Blake
4314c1bb86cdSEric Blake #ifdef __linux__
cdrom_open(BlockDriverState * bs,QDict * options,int flags,Error ** errp)4315c1bb86cdSEric Blake static int cdrom_open(BlockDriverState *bs, QDict *options, int flags,
4316c1bb86cdSEric Blake Error **errp)
4317c1bb86cdSEric Blake {
4318c1bb86cdSEric Blake BDRVRawState *s = bs->opaque;
4319c1bb86cdSEric Blake
4320c1bb86cdSEric Blake s->type = FTYPE_CD;
4321c1bb86cdSEric Blake
4322c1bb86cdSEric Blake /* open will not fail even if no CD is inserted, so add O_NONBLOCK */
4323230ff739SJohn Snow return raw_open_common(bs, options, flags, O_NONBLOCK, true, errp);
4324c1bb86cdSEric Blake }
4325c1bb86cdSEric Blake
cdrom_probe_device(const char * filename)4326c1bb86cdSEric Blake static int cdrom_probe_device(const char *filename)
4327c1bb86cdSEric Blake {
4328c1bb86cdSEric Blake int fd, ret;
4329c1bb86cdSEric Blake int prio = 0;
4330c1bb86cdSEric Blake struct stat st;
4331c1bb86cdSEric Blake
4332b18a24a9SDaniel P. Berrangé fd = qemu_open(filename, O_RDONLY | O_NONBLOCK, NULL);
4333c1bb86cdSEric Blake if (fd < 0) {
4334c1bb86cdSEric Blake goto out;
4335c1bb86cdSEric Blake }
4336c1bb86cdSEric Blake ret = fstat(fd, &st);
4337c1bb86cdSEric Blake if (ret == -1 || !S_ISBLK(st.st_mode)) {
4338c1bb86cdSEric Blake goto outc;
4339c1bb86cdSEric Blake }
4340c1bb86cdSEric Blake
4341c1bb86cdSEric Blake /* Attempt to detect via a CDROM specific ioctl */
4342c1bb86cdSEric Blake ret = ioctl(fd, CDROM_DRIVE_STATUS, CDSL_CURRENT);
4343c1bb86cdSEric Blake if (ret >= 0)
4344c1bb86cdSEric Blake prio = 100;
4345c1bb86cdSEric Blake
4346c1bb86cdSEric Blake outc:
4347c1bb86cdSEric Blake qemu_close(fd);
4348c1bb86cdSEric Blake out:
4349c1bb86cdSEric Blake return prio;
4350c1bb86cdSEric Blake }
4351c1bb86cdSEric Blake
cdrom_co_is_inserted(BlockDriverState * bs)43521e97be91SEmanuele Giuseppe Esposito static bool coroutine_fn cdrom_co_is_inserted(BlockDriverState *bs)
4353c1bb86cdSEric Blake {
4354c1bb86cdSEric Blake BDRVRawState *s = bs->opaque;
4355c1bb86cdSEric Blake int ret;
4356c1bb86cdSEric Blake
4357c1bb86cdSEric Blake ret = ioctl(s->fd, CDROM_DRIVE_STATUS, CDSL_CURRENT);
4358c1bb86cdSEric Blake return ret == CDS_DISC_OK;
4359c1bb86cdSEric Blake }
4360c1bb86cdSEric Blake
cdrom_co_eject(BlockDriverState * bs,bool eject_flag)43612531b390SEmanuele Giuseppe Esposito static void coroutine_fn cdrom_co_eject(BlockDriverState *bs, bool eject_flag)
4362c1bb86cdSEric Blake {
4363c1bb86cdSEric Blake BDRVRawState *s = bs->opaque;
4364c1bb86cdSEric Blake
4365c1bb86cdSEric Blake if (eject_flag) {
4366c1bb86cdSEric Blake if (ioctl(s->fd, CDROMEJECT, NULL) < 0)
4367c1bb86cdSEric Blake perror("CDROMEJECT");
4368c1bb86cdSEric Blake } else {
4369c1bb86cdSEric Blake if (ioctl(s->fd, CDROMCLOSETRAY, NULL) < 0)
4370c1bb86cdSEric Blake perror("CDROMEJECT");
4371c1bb86cdSEric Blake }
4372c1bb86cdSEric Blake }
4373c1bb86cdSEric Blake
cdrom_co_lock_medium(BlockDriverState * bs,bool locked)43742c75261cSEmanuele Giuseppe Esposito static void coroutine_fn cdrom_co_lock_medium(BlockDriverState *bs, bool locked)
4375c1bb86cdSEric Blake {
4376c1bb86cdSEric Blake BDRVRawState *s = bs->opaque;
4377c1bb86cdSEric Blake
4378c1bb86cdSEric Blake if (ioctl(s->fd, CDROM_LOCKDOOR, locked) < 0) {
4379c1bb86cdSEric Blake /*
4380c1bb86cdSEric Blake * Note: an error can happen if the distribution automatically
4381c1bb86cdSEric Blake * mounts the CD-ROM
4382c1bb86cdSEric Blake */
4383c1bb86cdSEric Blake /* perror("CDROM_LOCKDOOR"); */
4384c1bb86cdSEric Blake }
4385c1bb86cdSEric Blake }
4386c1bb86cdSEric Blake
4387c1bb86cdSEric Blake static BlockDriver bdrv_host_cdrom = {
4388c1bb86cdSEric Blake .format_name = "host_cdrom",
4389c1bb86cdSEric Blake .protocol_name = "host_cdrom",
4390c1bb86cdSEric Blake .instance_size = sizeof(BDRVRawState),
4391c1bb86cdSEric Blake .bdrv_needs_filename = true,
4392c1bb86cdSEric Blake .bdrv_probe_device = cdrom_probe_device,
4393c1bb86cdSEric Blake .bdrv_parse_filename = cdrom_parse_filename,
439444b424dcSPaolo Bonzini .bdrv_open = cdrom_open,
4395c1bb86cdSEric Blake .bdrv_close = raw_close,
4396c1bb86cdSEric Blake .bdrv_reopen_prepare = raw_reopen_prepare,
4397c1bb86cdSEric Blake .bdrv_reopen_commit = raw_reopen_commit,
4398c1bb86cdSEric Blake .bdrv_reopen_abort = raw_reopen_abort,
43995a5e7f8cSMaxim Levitsky .bdrv_co_create_opts = bdrv_co_create_opts_simple,
44005a5e7f8cSMaxim Levitsky .create_opts = &bdrv_create_opts_simple,
44018a2ce0bcSAlberto Garcia .mutable_opts = mutable_opts,
4402dd577a26SStefan Hajnoczi .bdrv_co_invalidate_cache = raw_co_invalidate_cache,
4403c1bb86cdSEric Blake
4404c1bb86cdSEric Blake .bdrv_co_preadv = raw_co_preadv,
4405c1bb86cdSEric Blake .bdrv_co_pwritev = raw_co_pwritev,
440633d70fb6SKevin Wolf .bdrv_co_flush_to_disk = raw_co_flush_to_disk,
44078c6f27e7SPaolo Bonzini .bdrv_refresh_limits = cdrom_refresh_limits,
4408c1bb86cdSEric Blake
4409061ca8a3SKevin Wolf .bdrv_co_truncate = raw_co_truncate,
4410c86422c5SEmanuele Giuseppe Esposito .bdrv_co_getlength = raw_co_getlength,
441182618d7bSEmanuele Giuseppe Esposito .bdrv_co_get_allocated_file_size = raw_co_get_allocated_file_size,
4412c1bb86cdSEric Blake
4413c1bb86cdSEric Blake /* removable device support */
44141e97be91SEmanuele Giuseppe Esposito .bdrv_co_is_inserted = cdrom_co_is_inserted,
44152531b390SEmanuele Giuseppe Esposito .bdrv_co_eject = cdrom_co_eject,
44162c75261cSEmanuele Giuseppe Esposito .bdrv_co_lock_medium = cdrom_co_lock_medium,
4417c1bb86cdSEric Blake
4418c1bb86cdSEric Blake /* generic scsi device */
44192f3a7ab3SKevin Wolf .bdrv_co_ioctl = hdev_co_ioctl,
4420c1bb86cdSEric Blake };
4421c1bb86cdSEric Blake #endif /* __linux__ */
4422c1bb86cdSEric Blake
4423c1bb86cdSEric Blake #if defined (__FreeBSD__) || defined(__FreeBSD_kernel__)
cdrom_open(BlockDriverState * bs,QDict * options,int flags,Error ** errp)4424c1bb86cdSEric Blake static int cdrom_open(BlockDriverState *bs, QDict *options, int flags,
4425c1bb86cdSEric Blake Error **errp)
4426c1bb86cdSEric Blake {
4427c1bb86cdSEric Blake BDRVRawState *s = bs->opaque;
4428c1bb86cdSEric Blake int ret;
4429c1bb86cdSEric Blake
4430c1bb86cdSEric Blake s->type = FTYPE_CD;
4431c1bb86cdSEric Blake
4432668f62ecSMarkus Armbruster ret = raw_open_common(bs, options, flags, 0, true, errp);
4433c1bb86cdSEric Blake if (ret) {
4434c1bb86cdSEric Blake return ret;
4435c1bb86cdSEric Blake }
4436c1bb86cdSEric Blake
4437c1bb86cdSEric Blake /* make sure the door isn't locked at this time */
4438c1bb86cdSEric Blake ioctl(s->fd, CDIOCALLOW);
4439c1bb86cdSEric Blake return 0;
4440c1bb86cdSEric Blake }
4441c1bb86cdSEric Blake
cdrom_probe_device(const char * filename)4442c1bb86cdSEric Blake static int cdrom_probe_device(const char *filename)
4443c1bb86cdSEric Blake {
4444c1bb86cdSEric Blake if (strstart(filename, "/dev/cd", NULL) ||
4445c1bb86cdSEric Blake strstart(filename, "/dev/acd", NULL))
4446c1bb86cdSEric Blake return 100;
4447c1bb86cdSEric Blake return 0;
4448c1bb86cdSEric Blake }
4449c1bb86cdSEric Blake
cdrom_reopen(BlockDriverState * bs)4450c1bb86cdSEric Blake static int cdrom_reopen(BlockDriverState *bs)
4451c1bb86cdSEric Blake {
4452c1bb86cdSEric Blake BDRVRawState *s = bs->opaque;
4453c1bb86cdSEric Blake int fd;
4454c1bb86cdSEric Blake
4455c1bb86cdSEric Blake /*
4456c1bb86cdSEric Blake * Force reread of possibly changed/newly loaded disc,
4457c1bb86cdSEric Blake * FreeBSD seems to not notice sometimes...
4458c1bb86cdSEric Blake */
4459c1bb86cdSEric Blake if (s->fd >= 0)
4460c1bb86cdSEric Blake qemu_close(s->fd);
4461b18a24a9SDaniel P. Berrangé fd = qemu_open(bs->filename, s->open_flags, NULL);
4462c1bb86cdSEric Blake if (fd < 0) {
4463c1bb86cdSEric Blake s->fd = -1;
4464c1bb86cdSEric Blake return -EIO;
4465c1bb86cdSEric Blake }
4466c1bb86cdSEric Blake s->fd = fd;
4467c1bb86cdSEric Blake
4468c1bb86cdSEric Blake /* make sure the door isn't locked at this time */
4469c1bb86cdSEric Blake ioctl(s->fd, CDIOCALLOW);
4470c1bb86cdSEric Blake return 0;
4471c1bb86cdSEric Blake }
4472c1bb86cdSEric Blake
cdrom_co_is_inserted(BlockDriverState * bs)44731e97be91SEmanuele Giuseppe Esposito static bool coroutine_fn cdrom_co_is_inserted(BlockDriverState *bs)
4474c1bb86cdSEric Blake {
447536c6c877SPaolo Bonzini return raw_getlength(bs) > 0;
4476c1bb86cdSEric Blake }
4477c1bb86cdSEric Blake
cdrom_co_eject(BlockDriverState * bs,bool eject_flag)44782531b390SEmanuele Giuseppe Esposito static void coroutine_fn cdrom_co_eject(BlockDriverState *bs, bool eject_flag)
4479c1bb86cdSEric Blake {
4480c1bb86cdSEric Blake BDRVRawState *s = bs->opaque;
4481c1bb86cdSEric Blake
4482c1bb86cdSEric Blake if (s->fd < 0)
4483c1bb86cdSEric Blake return;
4484c1bb86cdSEric Blake
4485c1bb86cdSEric Blake (void) ioctl(s->fd, CDIOCALLOW);
4486c1bb86cdSEric Blake
4487c1bb86cdSEric Blake if (eject_flag) {
4488c1bb86cdSEric Blake if (ioctl(s->fd, CDIOCEJECT) < 0)
4489c1bb86cdSEric Blake perror("CDIOCEJECT");
4490c1bb86cdSEric Blake } else {
4491c1bb86cdSEric Blake if (ioctl(s->fd, CDIOCCLOSE) < 0)
4492c1bb86cdSEric Blake perror("CDIOCCLOSE");
4493c1bb86cdSEric Blake }
4494c1bb86cdSEric Blake
4495c1bb86cdSEric Blake cdrom_reopen(bs);
4496c1bb86cdSEric Blake }
4497c1bb86cdSEric Blake
cdrom_co_lock_medium(BlockDriverState * bs,bool locked)44982c75261cSEmanuele Giuseppe Esposito static void coroutine_fn cdrom_co_lock_medium(BlockDriverState *bs, bool locked)
4499c1bb86cdSEric Blake {
4500c1bb86cdSEric Blake BDRVRawState *s = bs->opaque;
4501c1bb86cdSEric Blake
4502c1bb86cdSEric Blake if (s->fd < 0)
4503c1bb86cdSEric Blake return;
4504c1bb86cdSEric Blake if (ioctl(s->fd, (locked ? CDIOCPREVENT : CDIOCALLOW)) < 0) {
4505c1bb86cdSEric Blake /*
4506c1bb86cdSEric Blake * Note: an error can happen if the distribution automatically
4507c1bb86cdSEric Blake * mounts the CD-ROM
4508c1bb86cdSEric Blake */
4509c1bb86cdSEric Blake /* perror("CDROM_LOCKDOOR"); */
4510c1bb86cdSEric Blake }
4511c1bb86cdSEric Blake }
4512c1bb86cdSEric Blake
4513c1bb86cdSEric Blake static BlockDriver bdrv_host_cdrom = {
4514c1bb86cdSEric Blake .format_name = "host_cdrom",
4515c1bb86cdSEric Blake .protocol_name = "host_cdrom",
4516c1bb86cdSEric Blake .instance_size = sizeof(BDRVRawState),
4517c1bb86cdSEric Blake .bdrv_needs_filename = true,
4518c1bb86cdSEric Blake .bdrv_probe_device = cdrom_probe_device,
4519c1bb86cdSEric Blake .bdrv_parse_filename = cdrom_parse_filename,
452044b424dcSPaolo Bonzini .bdrv_open = cdrom_open,
4521c1bb86cdSEric Blake .bdrv_close = raw_close,
4522c1bb86cdSEric Blake .bdrv_reopen_prepare = raw_reopen_prepare,
4523c1bb86cdSEric Blake .bdrv_reopen_commit = raw_reopen_commit,
4524c1bb86cdSEric Blake .bdrv_reopen_abort = raw_reopen_abort,
45255a5e7f8cSMaxim Levitsky .bdrv_co_create_opts = bdrv_co_create_opts_simple,
45265a5e7f8cSMaxim Levitsky .create_opts = &bdrv_create_opts_simple,
45278a2ce0bcSAlberto Garcia .mutable_opts = mutable_opts,
4528c1bb86cdSEric Blake
4529c1bb86cdSEric Blake .bdrv_co_preadv = raw_co_preadv,
4530c1bb86cdSEric Blake .bdrv_co_pwritev = raw_co_pwritev,
453133d70fb6SKevin Wolf .bdrv_co_flush_to_disk = raw_co_flush_to_disk,
45328c6f27e7SPaolo Bonzini .bdrv_refresh_limits = cdrom_refresh_limits,
4533c1bb86cdSEric Blake
4534061ca8a3SKevin Wolf .bdrv_co_truncate = raw_co_truncate,
4535c86422c5SEmanuele Giuseppe Esposito .bdrv_co_getlength = raw_co_getlength,
453682618d7bSEmanuele Giuseppe Esposito .bdrv_co_get_allocated_file_size = raw_co_get_allocated_file_size,
4537c1bb86cdSEric Blake
4538c1bb86cdSEric Blake /* removable device support */
45391e97be91SEmanuele Giuseppe Esposito .bdrv_co_is_inserted = cdrom_co_is_inserted,
45402531b390SEmanuele Giuseppe Esposito .bdrv_co_eject = cdrom_co_eject,
45412c75261cSEmanuele Giuseppe Esposito .bdrv_co_lock_medium = cdrom_co_lock_medium,
4542c1bb86cdSEric Blake };
4543c1bb86cdSEric Blake #endif /* __FreeBSD__ */
4544c1bb86cdSEric Blake
454514176c8dSJoelle van Dyne #endif /* HAVE_HOST_BLOCK_DEVICE */
454614176c8dSJoelle van Dyne
bdrv_file_init(void)4547c1bb86cdSEric Blake static void bdrv_file_init(void)
4548c1bb86cdSEric Blake {
4549c1bb86cdSEric Blake /*
4550c1bb86cdSEric Blake * Register all the drivers. Note that order is important, the driver
4551c1bb86cdSEric Blake * registered last will get probed first.
4552c1bb86cdSEric Blake */
4553c1bb86cdSEric Blake bdrv_register(&bdrv_file);
455414176c8dSJoelle van Dyne #if defined(HAVE_HOST_BLOCK_DEVICE)
4555c1bb86cdSEric Blake bdrv_register(&bdrv_host_device);
4556c1bb86cdSEric Blake #ifdef __linux__
4557c1bb86cdSEric Blake bdrv_register(&bdrv_host_cdrom);
4558c1bb86cdSEric Blake #endif
4559c1bb86cdSEric Blake #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
4560c1bb86cdSEric Blake bdrv_register(&bdrv_host_cdrom);
4561c1bb86cdSEric Blake #endif
456214176c8dSJoelle van Dyne #endif /* HAVE_HOST_BLOCK_DEVICE */
4563c1bb86cdSEric Blake }
4564c1bb86cdSEric Blake
4565c1bb86cdSEric Blake block_init(bdrv_file_init);
4566