1 /*
2 * QEMU I/O channels block driver
3 *
4 * Copyright (c) 2022 Red Hat, Inc.
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 *
19 */
20
21 #include "qemu/osdep.h"
22 #include "migration/channel-block.h"
23 #include "qapi/error.h"
24 #include "block/block.h"
25 #include "trace.h"
26
27 QIOChannelBlock *
qio_channel_block_new(BlockDriverState * bs)28 qio_channel_block_new(BlockDriverState *bs)
29 {
30 QIOChannelBlock *ioc;
31
32 ioc = QIO_CHANNEL_BLOCK(object_new(TYPE_QIO_CHANNEL_BLOCK));
33
34 bdrv_ref(bs);
35 ioc->bs = bs;
36
37 return ioc;
38 }
39
40
41 static void
qio_channel_block_finalize(Object * obj)42 qio_channel_block_finalize(Object *obj)
43 {
44 QIOChannelBlock *ioc = QIO_CHANNEL_BLOCK(obj);
45
46 g_clear_pointer(&ioc->bs, bdrv_unref);
47 }
48
49
50 static ssize_t
qio_channel_block_readv(QIOChannel * ioc,const struct iovec * iov,size_t niov,int ** fds,size_t * nfds,int flags,Error ** errp)51 qio_channel_block_readv(QIOChannel *ioc,
52 const struct iovec *iov,
53 size_t niov,
54 int **fds,
55 size_t *nfds,
56 int flags,
57 Error **errp)
58 {
59 QIOChannelBlock *bioc = QIO_CHANNEL_BLOCK(ioc);
60 QEMUIOVector qiov;
61 int ret;
62
63 qemu_iovec_init_external(&qiov, (struct iovec *)iov, niov);
64 ret = bdrv_readv_vmstate(bioc->bs, &qiov, bioc->offset);
65 if (ret < 0) {
66 error_setg_errno(errp, -ret, "bdrv_readv_vmstate failed");
67 return -1;
68 }
69
70 bioc->offset += qiov.size;
71 return qiov.size;
72 }
73
74
75 static ssize_t
qio_channel_block_writev(QIOChannel * ioc,const struct iovec * iov,size_t niov,int * fds,size_t nfds,int flags,Error ** errp)76 qio_channel_block_writev(QIOChannel *ioc,
77 const struct iovec *iov,
78 size_t niov,
79 int *fds,
80 size_t nfds,
81 int flags,
82 Error **errp)
83 {
84 QIOChannelBlock *bioc = QIO_CHANNEL_BLOCK(ioc);
85 QEMUIOVector qiov;
86 int ret;
87
88 qemu_iovec_init_external(&qiov, (struct iovec *)iov, niov);
89 ret = bdrv_writev_vmstate(bioc->bs, &qiov, bioc->offset);
90 if (ret < 0) {
91 error_setg_errno(errp, -ret, "bdrv_writev_vmstate failed");
92 return -1;
93 }
94
95 bioc->offset += qiov.size;
96 return qiov.size;
97 }
98
99
100 static int
qio_channel_block_set_blocking(QIOChannel * ioc,bool enabled,Error ** errp)101 qio_channel_block_set_blocking(QIOChannel *ioc,
102 bool enabled,
103 Error **errp)
104 {
105 if (!enabled) {
106 error_setg(errp, "Non-blocking mode not supported for block devices");
107 return -1;
108 }
109 return 0;
110 }
111
112
113 static off_t
qio_channel_block_seek(QIOChannel * ioc,off_t offset,int whence,Error ** errp)114 qio_channel_block_seek(QIOChannel *ioc,
115 off_t offset,
116 int whence,
117 Error **errp)
118 {
119 QIOChannelBlock *bioc = QIO_CHANNEL_BLOCK(ioc);
120
121 switch (whence) {
122 case SEEK_SET:
123 bioc->offset = offset;
124 break;
125 case SEEK_CUR:
126 bioc->offset += whence;
127 break;
128 case SEEK_END:
129 error_setg(errp, "Size of VMstate region is unknown");
130 return (off_t)-1;
131 default:
132 g_assert_not_reached();
133 }
134
135 return bioc->offset;
136 }
137
138
139 static int
qio_channel_block_close(QIOChannel * ioc,Error ** errp)140 qio_channel_block_close(QIOChannel *ioc,
141 Error **errp)
142 {
143 QIOChannelBlock *bioc = QIO_CHANNEL_BLOCK(ioc);
144 int rv = bdrv_flush(bioc->bs);
145
146 if (rv < 0) {
147 error_setg_errno(errp, -rv,
148 "Unable to flush VMState");
149 return -1;
150 }
151
152 g_clear_pointer(&bioc->bs, bdrv_unref);
153 bioc->offset = 0;
154
155 return 0;
156 }
157
158
159 static void
qio_channel_block_set_aio_fd_handler(QIOChannel * ioc,AioContext * read_ctx,IOHandler * io_read,AioContext * write_ctx,IOHandler * io_write,void * opaque)160 qio_channel_block_set_aio_fd_handler(QIOChannel *ioc,
161 AioContext *read_ctx,
162 IOHandler *io_read,
163 AioContext *write_ctx,
164 IOHandler *io_write,
165 void *opaque)
166 {
167 /* XXX anything we can do here ? */
168 }
169
170
171 static void
qio_channel_block_class_init(ObjectClass * klass,void * class_data G_GNUC_UNUSED)172 qio_channel_block_class_init(ObjectClass *klass,
173 void *class_data G_GNUC_UNUSED)
174 {
175 QIOChannelClass *ioc_klass = QIO_CHANNEL_CLASS(klass);
176
177 ioc_klass->io_writev = qio_channel_block_writev;
178 ioc_klass->io_readv = qio_channel_block_readv;
179 ioc_klass->io_set_blocking = qio_channel_block_set_blocking;
180 ioc_klass->io_seek = qio_channel_block_seek;
181 ioc_klass->io_close = qio_channel_block_close;
182 ioc_klass->io_set_aio_fd_handler = qio_channel_block_set_aio_fd_handler;
183 }
184
185 static const TypeInfo qio_channel_block_info = {
186 .parent = TYPE_QIO_CHANNEL,
187 .name = TYPE_QIO_CHANNEL_BLOCK,
188 .instance_size = sizeof(QIOChannelBlock),
189 .instance_finalize = qio_channel_block_finalize,
190 .class_init = qio_channel_block_class_init,
191 };
192
193 static void
qio_channel_block_register_types(void)194 qio_channel_block_register_types(void)
195 {
196 type_register_static(&qio_channel_block_info);
197 }
198
199 type_init(qio_channel_block_register_types);
200