1 /*
2 * QEMU I/O channels
3 *
4 * Copyright (c) 2015 Red Hat, Inc.
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 *
19 */
20
21 #include "qemu/osdep.h"
22 #include "block/aio-wait.h"
23 #include "io/channel.h"
24 #include "qapi/error.h"
25 #include "qemu/main-loop.h"
26 #include "qemu/module.h"
27 #include "qemu/iov.h"
28
qio_channel_has_feature(QIOChannel * ioc,QIOChannelFeature feature)29 bool qio_channel_has_feature(QIOChannel *ioc,
30 QIOChannelFeature feature)
31 {
32 return ioc->features & (1 << feature);
33 }
34
35
qio_channel_set_feature(QIOChannel * ioc,QIOChannelFeature feature)36 void qio_channel_set_feature(QIOChannel *ioc,
37 QIOChannelFeature feature)
38 {
39 ioc->features |= (1 << feature);
40 }
41
42
qio_channel_set_name(QIOChannel * ioc,const char * name)43 void qio_channel_set_name(QIOChannel *ioc,
44 const char *name)
45 {
46 g_free(ioc->name);
47 ioc->name = g_strdup(name);
48 }
49
50
qio_channel_readv_full(QIOChannel * ioc,const struct iovec * iov,size_t niov,int ** fds,size_t * nfds,int flags,Error ** errp)51 ssize_t qio_channel_readv_full(QIOChannel *ioc,
52 const struct iovec *iov,
53 size_t niov,
54 int **fds,
55 size_t *nfds,
56 int flags,
57 Error **errp)
58 {
59 QIOChannelClass *klass = QIO_CHANNEL_GET_CLASS(ioc);
60
61 if ((fds || nfds) &&
62 !qio_channel_has_feature(ioc, QIO_CHANNEL_FEATURE_FD_PASS)) {
63 error_setg_errno(errp, EINVAL,
64 "Channel does not support file descriptor passing");
65 return -1;
66 }
67
68 if ((flags & QIO_CHANNEL_READ_FLAG_MSG_PEEK) &&
69 !qio_channel_has_feature(ioc, QIO_CHANNEL_FEATURE_READ_MSG_PEEK)) {
70 error_setg_errno(errp, EINVAL,
71 "Channel does not support peek read");
72 return -1;
73 }
74
75 return klass->io_readv(ioc, iov, niov, fds, nfds, flags, errp);
76 }
77
78
qio_channel_writev_full(QIOChannel * ioc,const struct iovec * iov,size_t niov,int * fds,size_t nfds,int flags,Error ** errp)79 ssize_t qio_channel_writev_full(QIOChannel *ioc,
80 const struct iovec *iov,
81 size_t niov,
82 int *fds,
83 size_t nfds,
84 int flags,
85 Error **errp)
86 {
87 QIOChannelClass *klass = QIO_CHANNEL_GET_CLASS(ioc);
88
89 if (fds || nfds) {
90 if (!qio_channel_has_feature(ioc, QIO_CHANNEL_FEATURE_FD_PASS)) {
91 error_setg_errno(errp, EINVAL,
92 "Channel does not support file descriptor passing");
93 return -1;
94 }
95 if (flags & QIO_CHANNEL_WRITE_FLAG_ZERO_COPY) {
96 error_setg_errno(errp, EINVAL,
97 "Zero Copy does not support file descriptor passing");
98 return -1;
99 }
100 }
101
102 if ((flags & QIO_CHANNEL_WRITE_FLAG_ZERO_COPY) &&
103 !qio_channel_has_feature(ioc, QIO_CHANNEL_FEATURE_WRITE_ZERO_COPY)) {
104 error_setg_errno(errp, EINVAL,
105 "Requested Zero Copy feature is not available");
106 return -1;
107 }
108
109 return klass->io_writev(ioc, iov, niov, fds, nfds, flags, errp);
110 }
111
112
qio_channel_readv_all_eof(QIOChannel * ioc,const struct iovec * iov,size_t niov,Error ** errp)113 int coroutine_mixed_fn qio_channel_readv_all_eof(QIOChannel *ioc,
114 const struct iovec *iov,
115 size_t niov,
116 Error **errp)
117 {
118 return qio_channel_readv_full_all_eof(ioc, iov, niov, NULL, NULL, errp);
119 }
120
qio_channel_readv_all(QIOChannel * ioc,const struct iovec * iov,size_t niov,Error ** errp)121 int coroutine_mixed_fn qio_channel_readv_all(QIOChannel *ioc,
122 const struct iovec *iov,
123 size_t niov,
124 Error **errp)
125 {
126 return qio_channel_readv_full_all(ioc, iov, niov, NULL, NULL, errp);
127 }
128
qio_channel_readv_full_all_eof(QIOChannel * ioc,const struct iovec * iov,size_t niov,int ** fds,size_t * nfds,Error ** errp)129 int coroutine_mixed_fn qio_channel_readv_full_all_eof(QIOChannel *ioc,
130 const struct iovec *iov,
131 size_t niov,
132 int **fds, size_t *nfds,
133 Error **errp)
134 {
135 int ret = -1;
136 struct iovec *local_iov = g_new(struct iovec, niov);
137 struct iovec *local_iov_head = local_iov;
138 unsigned int nlocal_iov = niov;
139 int **local_fds = fds;
140 size_t *local_nfds = nfds;
141 bool partial = false;
142
143 if (nfds) {
144 *nfds = 0;
145 }
146
147 if (fds) {
148 *fds = NULL;
149 }
150
151 nlocal_iov = iov_copy(local_iov, nlocal_iov,
152 iov, niov,
153 0, iov_size(iov, niov));
154
155 while ((nlocal_iov > 0) || local_fds) {
156 ssize_t len;
157 len = qio_channel_readv_full(ioc, local_iov, nlocal_iov, local_fds,
158 local_nfds, 0, errp);
159 if (len == QIO_CHANNEL_ERR_BLOCK) {
160 if (qemu_in_coroutine()) {
161 qio_channel_yield(ioc, G_IO_IN);
162 } else {
163 qio_channel_wait(ioc, G_IO_IN);
164 }
165 continue;
166 }
167
168 if (len == 0) {
169 if (local_nfds && *local_nfds) {
170 /*
171 * Got some FDs, but no data yet. This isn't an EOF
172 * scenario (yet), so carry on to try to read data
173 * on next loop iteration
174 */
175 goto next_iter;
176 } else if (!partial) {
177 /* No fds and no data - EOF before any data read */
178 ret = 0;
179 goto cleanup;
180 } else {
181 len = -1;
182 error_setg(errp,
183 "Unexpected end-of-file before all data were read");
184 /* Fallthrough into len < 0 handling */
185 }
186 }
187
188 if (len < 0) {
189 /* Close any FDs we previously received */
190 if (nfds && fds) {
191 size_t i;
192 for (i = 0; i < (*nfds); i++) {
193 close((*fds)[i]);
194 }
195 g_free(*fds);
196 *fds = NULL;
197 *nfds = 0;
198 }
199 goto cleanup;
200 }
201
202 if (nlocal_iov) {
203 iov_discard_front(&local_iov, &nlocal_iov, len);
204 }
205
206 next_iter:
207 partial = true;
208 local_fds = NULL;
209 local_nfds = NULL;
210 }
211
212 ret = 1;
213
214 cleanup:
215 g_free(local_iov_head);
216 return ret;
217 }
218
qio_channel_readv_full_all(QIOChannel * ioc,const struct iovec * iov,size_t niov,int ** fds,size_t * nfds,Error ** errp)219 int coroutine_mixed_fn qio_channel_readv_full_all(QIOChannel *ioc,
220 const struct iovec *iov,
221 size_t niov,
222 int **fds, size_t *nfds,
223 Error **errp)
224 {
225 int ret = qio_channel_readv_full_all_eof(ioc, iov, niov, fds, nfds, errp);
226
227 if (ret == 0) {
228 error_setg(errp, "Unexpected end-of-file before all data were read");
229 return -1;
230 }
231 if (ret == 1) {
232 return 0;
233 }
234
235 return ret;
236 }
237
qio_channel_writev_all(QIOChannel * ioc,const struct iovec * iov,size_t niov,Error ** errp)238 int coroutine_mixed_fn qio_channel_writev_all(QIOChannel *ioc,
239 const struct iovec *iov,
240 size_t niov,
241 Error **errp)
242 {
243 return qio_channel_writev_full_all(ioc, iov, niov, NULL, 0, 0, errp);
244 }
245
qio_channel_writev_full_all(QIOChannel * ioc,const struct iovec * iov,size_t niov,int * fds,size_t nfds,int flags,Error ** errp)246 int coroutine_mixed_fn qio_channel_writev_full_all(QIOChannel *ioc,
247 const struct iovec *iov,
248 size_t niov,
249 int *fds, size_t nfds,
250 int flags, Error **errp)
251 {
252 int ret = -1;
253 struct iovec *local_iov = g_new(struct iovec, niov);
254 struct iovec *local_iov_head = local_iov;
255 unsigned int nlocal_iov = niov;
256
257 nlocal_iov = iov_copy(local_iov, nlocal_iov,
258 iov, niov,
259 0, iov_size(iov, niov));
260
261 while (nlocal_iov > 0) {
262 ssize_t len;
263
264 len = qio_channel_writev_full(ioc, local_iov, nlocal_iov, fds,
265 nfds, flags, errp);
266
267 if (len == QIO_CHANNEL_ERR_BLOCK) {
268 if (qemu_in_coroutine()) {
269 qio_channel_yield(ioc, G_IO_OUT);
270 } else {
271 qio_channel_wait(ioc, G_IO_OUT);
272 }
273 continue;
274 }
275 if (len < 0) {
276 goto cleanup;
277 }
278
279 iov_discard_front(&local_iov, &nlocal_iov, len);
280
281 fds = NULL;
282 nfds = 0;
283 }
284
285 ret = 0;
286 cleanup:
287 g_free(local_iov_head);
288 return ret;
289 }
290
qio_channel_readv(QIOChannel * ioc,const struct iovec * iov,size_t niov,Error ** errp)291 ssize_t qio_channel_readv(QIOChannel *ioc,
292 const struct iovec *iov,
293 size_t niov,
294 Error **errp)
295 {
296 return qio_channel_readv_full(ioc, iov, niov, NULL, NULL, 0, errp);
297 }
298
299
qio_channel_writev(QIOChannel * ioc,const struct iovec * iov,size_t niov,Error ** errp)300 ssize_t qio_channel_writev(QIOChannel *ioc,
301 const struct iovec *iov,
302 size_t niov,
303 Error **errp)
304 {
305 return qio_channel_writev_full(ioc, iov, niov, NULL, 0, 0, errp);
306 }
307
308
qio_channel_read(QIOChannel * ioc,char * buf,size_t buflen,Error ** errp)309 ssize_t qio_channel_read(QIOChannel *ioc,
310 char *buf,
311 size_t buflen,
312 Error **errp)
313 {
314 struct iovec iov = { .iov_base = buf, .iov_len = buflen };
315 return qio_channel_readv_full(ioc, &iov, 1, NULL, NULL, 0, errp);
316 }
317
318
qio_channel_write(QIOChannel * ioc,const char * buf,size_t buflen,Error ** errp)319 ssize_t qio_channel_write(QIOChannel *ioc,
320 const char *buf,
321 size_t buflen,
322 Error **errp)
323 {
324 struct iovec iov = { .iov_base = (char *)buf, .iov_len = buflen };
325 return qio_channel_writev_full(ioc, &iov, 1, NULL, 0, 0, errp);
326 }
327
328
qio_channel_read_all_eof(QIOChannel * ioc,char * buf,size_t buflen,Error ** errp)329 int coroutine_mixed_fn qio_channel_read_all_eof(QIOChannel *ioc,
330 char *buf,
331 size_t buflen,
332 Error **errp)
333 {
334 struct iovec iov = { .iov_base = buf, .iov_len = buflen };
335 return qio_channel_readv_all_eof(ioc, &iov, 1, errp);
336 }
337
338
qio_channel_read_all(QIOChannel * ioc,char * buf,size_t buflen,Error ** errp)339 int coroutine_mixed_fn qio_channel_read_all(QIOChannel *ioc,
340 char *buf,
341 size_t buflen,
342 Error **errp)
343 {
344 struct iovec iov = { .iov_base = buf, .iov_len = buflen };
345 return qio_channel_readv_all(ioc, &iov, 1, errp);
346 }
347
348
qio_channel_write_all(QIOChannel * ioc,const char * buf,size_t buflen,Error ** errp)349 int coroutine_mixed_fn qio_channel_write_all(QIOChannel *ioc,
350 const char *buf,
351 size_t buflen,
352 Error **errp)
353 {
354 struct iovec iov = { .iov_base = (char *)buf, .iov_len = buflen };
355 return qio_channel_writev_all(ioc, &iov, 1, errp);
356 }
357
358
qio_channel_set_blocking(QIOChannel * ioc,bool enabled,Error ** errp)359 int qio_channel_set_blocking(QIOChannel *ioc,
360 bool enabled,
361 Error **errp)
362 {
363 QIOChannelClass *klass = QIO_CHANNEL_GET_CLASS(ioc);
364 return klass->io_set_blocking(ioc, enabled, errp);
365 }
366
367
qio_channel_set_follow_coroutine_ctx(QIOChannel * ioc,bool enabled)368 void qio_channel_set_follow_coroutine_ctx(QIOChannel *ioc, bool enabled)
369 {
370 ioc->follow_coroutine_ctx = enabled;
371 }
372
373
qio_channel_close(QIOChannel * ioc,Error ** errp)374 int qio_channel_close(QIOChannel *ioc,
375 Error **errp)
376 {
377 QIOChannelClass *klass = QIO_CHANNEL_GET_CLASS(ioc);
378 return klass->io_close(ioc, errp);
379 }
380
381
qio_channel_create_watch(QIOChannel * ioc,GIOCondition condition)382 GSource *qio_channel_create_watch(QIOChannel *ioc,
383 GIOCondition condition)
384 {
385 QIOChannelClass *klass = QIO_CHANNEL_GET_CLASS(ioc);
386 GSource *ret = klass->io_create_watch(ioc, condition);
387
388 if (ioc->name) {
389 g_source_set_name(ret, ioc->name);
390 }
391
392 return ret;
393 }
394
395
qio_channel_set_aio_fd_handler(QIOChannel * ioc,AioContext * read_ctx,IOHandler * io_read,AioContext * write_ctx,IOHandler * io_write,void * opaque)396 void qio_channel_set_aio_fd_handler(QIOChannel *ioc,
397 AioContext *read_ctx,
398 IOHandler *io_read,
399 AioContext *write_ctx,
400 IOHandler *io_write,
401 void *opaque)
402 {
403 QIOChannelClass *klass = QIO_CHANNEL_GET_CLASS(ioc);
404
405 klass->io_set_aio_fd_handler(ioc, read_ctx, io_read, write_ctx, io_write,
406 opaque);
407 }
408
qio_channel_add_watch_full(QIOChannel * ioc,GIOCondition condition,QIOChannelFunc func,gpointer user_data,GDestroyNotify notify,GMainContext * context)409 guint qio_channel_add_watch_full(QIOChannel *ioc,
410 GIOCondition condition,
411 QIOChannelFunc func,
412 gpointer user_data,
413 GDestroyNotify notify,
414 GMainContext *context)
415 {
416 GSource *source;
417 guint id;
418
419 source = qio_channel_create_watch(ioc, condition);
420
421 g_source_set_callback(source, (GSourceFunc)func, user_data, notify);
422
423 id = g_source_attach(source, context);
424 g_source_unref(source);
425
426 return id;
427 }
428
qio_channel_add_watch(QIOChannel * ioc,GIOCondition condition,QIOChannelFunc func,gpointer user_data,GDestroyNotify notify)429 guint qio_channel_add_watch(QIOChannel *ioc,
430 GIOCondition condition,
431 QIOChannelFunc func,
432 gpointer user_data,
433 GDestroyNotify notify)
434 {
435 return qio_channel_add_watch_full(ioc, condition, func,
436 user_data, notify, NULL);
437 }
438
qio_channel_add_watch_source(QIOChannel * ioc,GIOCondition condition,QIOChannelFunc func,gpointer user_data,GDestroyNotify notify,GMainContext * context)439 GSource *qio_channel_add_watch_source(QIOChannel *ioc,
440 GIOCondition condition,
441 QIOChannelFunc func,
442 gpointer user_data,
443 GDestroyNotify notify,
444 GMainContext *context)
445 {
446 GSource *source;
447 guint id;
448
449 id = qio_channel_add_watch_full(ioc, condition, func,
450 user_data, notify, context);
451 source = g_main_context_find_source_by_id(context, id);
452 g_source_ref(source);
453 return source;
454 }
455
456
qio_channel_pwritev(QIOChannel * ioc,const struct iovec * iov,size_t niov,off_t offset,Error ** errp)457 ssize_t qio_channel_pwritev(QIOChannel *ioc, const struct iovec *iov,
458 size_t niov, off_t offset, Error **errp)
459 {
460 QIOChannelClass *klass = QIO_CHANNEL_GET_CLASS(ioc);
461
462 if (!klass->io_pwritev) {
463 error_setg(errp, "Channel does not support pwritev");
464 return -1;
465 }
466
467 if (!qio_channel_has_feature(ioc, QIO_CHANNEL_FEATURE_SEEKABLE)) {
468 error_setg_errno(errp, EINVAL, "Requested channel is not seekable");
469 return -1;
470 }
471
472 return klass->io_pwritev(ioc, iov, niov, offset, errp);
473 }
474
qio_channel_pwrite(QIOChannel * ioc,char * buf,size_t buflen,off_t offset,Error ** errp)475 ssize_t qio_channel_pwrite(QIOChannel *ioc, char *buf, size_t buflen,
476 off_t offset, Error **errp)
477 {
478 struct iovec iov = {
479 .iov_base = buf,
480 .iov_len = buflen
481 };
482
483 return qio_channel_pwritev(ioc, &iov, 1, offset, errp);
484 }
485
qio_channel_preadv(QIOChannel * ioc,const struct iovec * iov,size_t niov,off_t offset,Error ** errp)486 ssize_t qio_channel_preadv(QIOChannel *ioc, const struct iovec *iov,
487 size_t niov, off_t offset, Error **errp)
488 {
489 QIOChannelClass *klass = QIO_CHANNEL_GET_CLASS(ioc);
490
491 if (!klass->io_preadv) {
492 error_setg(errp, "Channel does not support preadv");
493 return -1;
494 }
495
496 if (!qio_channel_has_feature(ioc, QIO_CHANNEL_FEATURE_SEEKABLE)) {
497 error_setg_errno(errp, EINVAL, "Requested channel is not seekable");
498 return -1;
499 }
500
501 return klass->io_preadv(ioc, iov, niov, offset, errp);
502 }
503
qio_channel_pread(QIOChannel * ioc,char * buf,size_t buflen,off_t offset,Error ** errp)504 ssize_t qio_channel_pread(QIOChannel *ioc, char *buf, size_t buflen,
505 off_t offset, Error **errp)
506 {
507 struct iovec iov = {
508 .iov_base = buf,
509 .iov_len = buflen
510 };
511
512 return qio_channel_preadv(ioc, &iov, 1, offset, errp);
513 }
514
qio_channel_shutdown(QIOChannel * ioc,QIOChannelShutdown how,Error ** errp)515 int qio_channel_shutdown(QIOChannel *ioc,
516 QIOChannelShutdown how,
517 Error **errp)
518 {
519 QIOChannelClass *klass = QIO_CHANNEL_GET_CLASS(ioc);
520
521 if (!klass->io_shutdown) {
522 error_setg(errp, "Data path shutdown not supported");
523 return -1;
524 }
525
526 return klass->io_shutdown(ioc, how, errp);
527 }
528
529
qio_channel_set_delay(QIOChannel * ioc,bool enabled)530 void qio_channel_set_delay(QIOChannel *ioc,
531 bool enabled)
532 {
533 QIOChannelClass *klass = QIO_CHANNEL_GET_CLASS(ioc);
534
535 if (klass->io_set_delay) {
536 klass->io_set_delay(ioc, enabled);
537 }
538 }
539
540
qio_channel_set_cork(QIOChannel * ioc,bool enabled)541 void qio_channel_set_cork(QIOChannel *ioc,
542 bool enabled)
543 {
544 QIOChannelClass *klass = QIO_CHANNEL_GET_CLASS(ioc);
545
546 if (klass->io_set_cork) {
547 klass->io_set_cork(ioc, enabled);
548 }
549 }
550
qio_channel_get_peerpid(QIOChannel * ioc,unsigned int * pid,Error ** errp)551 int qio_channel_get_peerpid(QIOChannel *ioc,
552 unsigned int *pid,
553 Error **errp)
554 {
555 QIOChannelClass *klass = QIO_CHANNEL_GET_CLASS(ioc);
556
557 if (!klass->io_peerpid) {
558 error_setg(errp, "Channel does not support peer pid");
559 return -1;
560 }
561 klass->io_peerpid(ioc, pid, errp);
562 return 0;
563 }
564
qio_channel_io_seek(QIOChannel * ioc,off_t offset,int whence,Error ** errp)565 off_t qio_channel_io_seek(QIOChannel *ioc,
566 off_t offset,
567 int whence,
568 Error **errp)
569 {
570 QIOChannelClass *klass = QIO_CHANNEL_GET_CLASS(ioc);
571
572 if (!klass->io_seek) {
573 error_setg(errp, "Channel does not support random access");
574 return -1;
575 }
576
577 return klass->io_seek(ioc, offset, whence, errp);
578 }
579
qio_channel_flush(QIOChannel * ioc,Error ** errp)580 int qio_channel_flush(QIOChannel *ioc,
581 Error **errp)
582 {
583 QIOChannelClass *klass = QIO_CHANNEL_GET_CLASS(ioc);
584
585 if (!klass->io_flush ||
586 !qio_channel_has_feature(ioc, QIO_CHANNEL_FEATURE_WRITE_ZERO_COPY)) {
587 return 0;
588 }
589
590 return klass->io_flush(ioc, errp);
591 }
592
593
qio_channel_restart_read(void * opaque)594 static void qio_channel_restart_read(void *opaque)
595 {
596 QIOChannel *ioc = opaque;
597 Coroutine *co = qatomic_xchg(&ioc->read_coroutine, NULL);
598
599 if (!co) {
600 return;
601 }
602
603 /* Assert that aio_co_wake() reenters the coroutine directly */
604 assert(qemu_get_current_aio_context() ==
605 qemu_coroutine_get_aio_context(co));
606 aio_co_wake(co);
607 }
608
qio_channel_restart_write(void * opaque)609 static void qio_channel_restart_write(void *opaque)
610 {
611 QIOChannel *ioc = opaque;
612 Coroutine *co = qatomic_xchg(&ioc->write_coroutine, NULL);
613
614 if (!co) {
615 return;
616 }
617
618 /* Assert that aio_co_wake() reenters the coroutine directly */
619 assert(qemu_get_current_aio_context() ==
620 qemu_coroutine_get_aio_context(co));
621 aio_co_wake(co);
622 }
623
624 static void coroutine_fn
qio_channel_set_fd_handlers(QIOChannel * ioc,GIOCondition condition)625 qio_channel_set_fd_handlers(QIOChannel *ioc, GIOCondition condition)
626 {
627 AioContext *ctx = ioc->follow_coroutine_ctx ?
628 qemu_coroutine_get_aio_context(qemu_coroutine_self()) :
629 iohandler_get_aio_context();
630 AioContext *read_ctx = NULL;
631 IOHandler *io_read = NULL;
632 AioContext *write_ctx = NULL;
633 IOHandler *io_write = NULL;
634
635 if (condition == G_IO_IN) {
636 ioc->read_coroutine = qemu_coroutine_self();
637 ioc->read_ctx = ctx;
638 read_ctx = ctx;
639 io_read = qio_channel_restart_read;
640
641 /*
642 * Thread safety: if the other coroutine is set and its AioContext
643 * matches ours, then there is mutual exclusion between read and write
644 * because they share a single thread and it's safe to set both read
645 * and write fd handlers here. If the AioContext does not match ours,
646 * then both threads may run in parallel but there is no shared state
647 * to worry about.
648 */
649 if (ioc->write_coroutine && ioc->write_ctx == ctx) {
650 write_ctx = ctx;
651 io_write = qio_channel_restart_write;
652 }
653 } else if (condition == G_IO_OUT) {
654 ioc->write_coroutine = qemu_coroutine_self();
655 ioc->write_ctx = ctx;
656 write_ctx = ctx;
657 io_write = qio_channel_restart_write;
658 if (ioc->read_coroutine && ioc->read_ctx == ctx) {
659 read_ctx = ctx;
660 io_read = qio_channel_restart_read;
661 }
662 } else {
663 abort();
664 }
665
666 qio_channel_set_aio_fd_handler(ioc, read_ctx, io_read,
667 write_ctx, io_write, ioc);
668 }
669
670 static void coroutine_fn
qio_channel_clear_fd_handlers(QIOChannel * ioc,GIOCondition condition)671 qio_channel_clear_fd_handlers(QIOChannel *ioc, GIOCondition condition)
672 {
673 AioContext *read_ctx = NULL;
674 IOHandler *io_read = NULL;
675 AioContext *write_ctx = NULL;
676 IOHandler *io_write = NULL;
677 AioContext *ctx;
678
679 if (condition == G_IO_IN) {
680 ctx = ioc->read_ctx;
681 read_ctx = ctx;
682 io_read = NULL;
683 if (ioc->write_coroutine && ioc->write_ctx == ctx) {
684 write_ctx = ctx;
685 io_write = qio_channel_restart_write;
686 }
687 } else if (condition == G_IO_OUT) {
688 ctx = ioc->write_ctx;
689 write_ctx = ctx;
690 io_write = NULL;
691 if (ioc->read_coroutine && ioc->read_ctx == ctx) {
692 read_ctx = ctx;
693 io_read = qio_channel_restart_read;
694 }
695 } else {
696 abort();
697 }
698
699 qio_channel_set_aio_fd_handler(ioc, read_ctx, io_read,
700 write_ctx, io_write, ioc);
701 }
702
qio_channel_yield(QIOChannel * ioc,GIOCondition condition)703 void coroutine_fn qio_channel_yield(QIOChannel *ioc,
704 GIOCondition condition)
705 {
706 AioContext *ioc_ctx;
707
708 assert(qemu_in_coroutine());
709 ioc_ctx = qemu_coroutine_get_aio_context(qemu_coroutine_self());
710
711 if (condition == G_IO_IN) {
712 assert(!ioc->read_coroutine);
713 } else if (condition == G_IO_OUT) {
714 assert(!ioc->write_coroutine);
715 } else {
716 abort();
717 }
718 qio_channel_set_fd_handlers(ioc, condition);
719 qemu_coroutine_yield();
720 assert(in_aio_context_home_thread(ioc_ctx));
721
722 /* Allow interrupting the operation by reentering the coroutine other than
723 * through the aio_fd_handlers. */
724 if (condition == G_IO_IN) {
725 assert(ioc->read_coroutine == NULL);
726 } else if (condition == G_IO_OUT) {
727 assert(ioc->write_coroutine == NULL);
728 }
729 qio_channel_clear_fd_handlers(ioc, condition);
730 }
731
qio_channel_wake_read(QIOChannel * ioc)732 void qio_channel_wake_read(QIOChannel *ioc)
733 {
734 Coroutine *co = qatomic_xchg(&ioc->read_coroutine, NULL);
735 if (co) {
736 aio_co_wake(co);
737 }
738 }
739
qio_channel_wait_complete(QIOChannel * ioc,GIOCondition condition,gpointer opaque)740 static gboolean qio_channel_wait_complete(QIOChannel *ioc,
741 GIOCondition condition,
742 gpointer opaque)
743 {
744 GMainLoop *loop = opaque;
745
746 g_main_loop_quit(loop);
747 return FALSE;
748 }
749
750
qio_channel_wait(QIOChannel * ioc,GIOCondition condition)751 void qio_channel_wait(QIOChannel *ioc,
752 GIOCondition condition)
753 {
754 GMainContext *ctxt = g_main_context_new();
755 GMainLoop *loop = g_main_loop_new(ctxt, TRUE);
756 GSource *source;
757
758 source = qio_channel_create_watch(ioc, condition);
759
760 g_source_set_callback(source,
761 (GSourceFunc)qio_channel_wait_complete,
762 loop,
763 NULL);
764
765 g_source_attach(source, ctxt);
766
767 g_main_loop_run(loop);
768
769 g_source_unref(source);
770 g_main_loop_unref(loop);
771 g_main_context_unref(ctxt);
772 }
773
774
qio_channel_finalize(Object * obj)775 static void qio_channel_finalize(Object *obj)
776 {
777 QIOChannel *ioc = QIO_CHANNEL(obj);
778
779 /* Must not have coroutines in qio_channel_yield() */
780 assert(!ioc->read_coroutine);
781 assert(!ioc->write_coroutine);
782
783 g_free(ioc->name);
784
785 #ifdef _WIN32
786 if (ioc->event) {
787 CloseHandle(ioc->event);
788 }
789 #endif
790 }
791
792 static const TypeInfo qio_channel_info = {
793 .parent = TYPE_OBJECT,
794 .name = TYPE_QIO_CHANNEL,
795 .instance_size = sizeof(QIOChannel),
796 .instance_finalize = qio_channel_finalize,
797 .abstract = true,
798 .class_size = sizeof(QIOChannelClass),
799 };
800
801
qio_channel_register_types(void)802 static void qio_channel_register_types(void)
803 {
804 type_register_static(&qio_channel_info);
805 }
806
807
808 type_init(qio_channel_register_types);
809