1 /* 2 * QEMU I/O channels 3 * 4 * Copyright (c) 2015 Red Hat, Inc. 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2.1 of the License, or (at your option) any later version. 10 * 11 * This library is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18 * 19 */ 20 21 #ifndef QIO_CHANNEL_H 22 #define QIO_CHANNEL_H 23 24 #include "qom/object.h" 25 #include "qemu/coroutine-core.h" 26 #include "block/aio.h" 27 28 #define TYPE_QIO_CHANNEL "qio-channel" 29 OBJECT_DECLARE_TYPE(QIOChannel, QIOChannelClass, 30 QIO_CHANNEL) 31 32 33 #define QIO_CHANNEL_ERR_BLOCK -2 34 35 #define QIO_CHANNEL_WRITE_FLAG_ZERO_COPY 0x1 36 37 #define QIO_CHANNEL_READ_FLAG_MSG_PEEK 0x1 38 39 typedef enum QIOChannelFeature QIOChannelFeature; 40 41 enum QIOChannelFeature { 42 QIO_CHANNEL_FEATURE_FD_PASS, 43 QIO_CHANNEL_FEATURE_SHUTDOWN, 44 QIO_CHANNEL_FEATURE_LISTEN, 45 QIO_CHANNEL_FEATURE_WRITE_ZERO_COPY, 46 QIO_CHANNEL_FEATURE_READ_MSG_PEEK, 47 }; 48 49 50 typedef enum QIOChannelShutdown QIOChannelShutdown; 51 52 enum QIOChannelShutdown { 53 QIO_CHANNEL_SHUTDOWN_READ = 1, 54 QIO_CHANNEL_SHUTDOWN_WRITE = 2, 55 QIO_CHANNEL_SHUTDOWN_BOTH = 3, 56 }; 57 58 typedef gboolean (*QIOChannelFunc)(QIOChannel *ioc, 59 GIOCondition condition, 60 gpointer data); 61 62 /** 63 * QIOChannel: 64 * 65 * The QIOChannel defines the core API for a generic I/O channel 66 * class hierarchy. It is inspired by GIOChannel, but has the 67 * following differences 68 * 69 * - Use QOM to properly support arbitrary subclassing 70 * - Support use of iovecs for efficient I/O with multiple blocks 71 * - None of the character set translation, binary data exclusively 72 * - Direct support for QEMU Error object reporting 73 * - File descriptor passing 74 * 75 * This base class is abstract so cannot be instantiated. There 76 * will be subclasses for dealing with sockets, files, and higher 77 * level protocols such as TLS, WebSocket, etc. 78 */ 79 80 struct QIOChannel { 81 Object parent; 82 unsigned int features; /* bitmask of QIOChannelFeatures */ 83 char *name; 84 AioContext *ctx; 85 Coroutine *read_coroutine; 86 Coroutine *write_coroutine; 87 #ifdef _WIN32 88 HANDLE event; /* For use with GSource on Win32 */ 89 #endif 90 }; 91 92 /** 93 * QIOChannelClass: 94 * 95 * This class defines the contract that all subclasses 96 * must follow to provide specific channel implementations. 97 * The first five callbacks are mandatory to support, others 98 * provide additional optional features. 99 * 100 * Consult the corresponding public API docs for a description 101 * of the semantics of each callback. io_shutdown in particular 102 * must be thread-safe, terminate quickly and must not block. 103 */ 104 struct QIOChannelClass { 105 ObjectClass parent; 106 107 /* Mandatory callbacks */ 108 ssize_t (*io_writev)(QIOChannel *ioc, 109 const struct iovec *iov, 110 size_t niov, 111 int *fds, 112 size_t nfds, 113 int flags, 114 Error **errp); 115 ssize_t (*io_readv)(QIOChannel *ioc, 116 const struct iovec *iov, 117 size_t niov, 118 int **fds, 119 size_t *nfds, 120 int flags, 121 Error **errp); 122 int (*io_close)(QIOChannel *ioc, 123 Error **errp); 124 GSource * (*io_create_watch)(QIOChannel *ioc, 125 GIOCondition condition); 126 int (*io_set_blocking)(QIOChannel *ioc, 127 bool enabled, 128 Error **errp); 129 130 /* Optional callbacks */ 131 int (*io_shutdown)(QIOChannel *ioc, 132 QIOChannelShutdown how, 133 Error **errp); 134 void (*io_set_cork)(QIOChannel *ioc, 135 bool enabled); 136 void (*io_set_delay)(QIOChannel *ioc, 137 bool enabled); 138 off_t (*io_seek)(QIOChannel *ioc, 139 off_t offset, 140 int whence, 141 Error **errp); 142 void (*io_set_aio_fd_handler)(QIOChannel *ioc, 143 AioContext *ctx, 144 IOHandler *io_read, 145 IOHandler *io_write, 146 void *opaque); 147 int (*io_flush)(QIOChannel *ioc, 148 Error **errp); 149 }; 150 151 /* General I/O handling functions */ 152 153 /** 154 * qio_channel_has_feature: 155 * @ioc: the channel object 156 * @feature: the feature to check support of 157 * 158 * Determine whether the channel implementation supports 159 * the optional feature named in @feature. 160 * 161 * Returns: true if supported, false otherwise. 162 */ 163 bool qio_channel_has_feature(QIOChannel *ioc, 164 QIOChannelFeature feature); 165 166 /** 167 * qio_channel_set_feature: 168 * @ioc: the channel object 169 * @feature: the feature to set support for 170 * 171 * Add channel support for the feature named in @feature. 172 */ 173 void qio_channel_set_feature(QIOChannel *ioc, 174 QIOChannelFeature feature); 175 176 /** 177 * qio_channel_set_name: 178 * @ioc: the channel object 179 * @name: the name of the channel 180 * 181 * Sets the name of the channel, which serves as an aid 182 * to debugging. The name is used when creating GSource 183 * watches for this channel. 184 */ 185 void qio_channel_set_name(QIOChannel *ioc, 186 const char *name); 187 188 /** 189 * qio_channel_readv_full: 190 * @ioc: the channel object 191 * @iov: the array of memory regions to read data into 192 * @niov: the length of the @iov array 193 * @fds: pointer to an array that will received file handles 194 * @nfds: pointer filled with number of elements in @fds on return 195 * @flags: read flags (QIO_CHANNEL_READ_FLAG_*) 196 * @errp: pointer to a NULL-initialized error object 197 * 198 * Read data from the IO channel, storing it in the 199 * memory regions referenced by @iov. Each element 200 * in the @iov will be fully populated with data 201 * before the next one is used. The @niov parameter 202 * specifies the total number of elements in @iov. 203 * 204 * It is not required for all @iov to be filled with 205 * data. If the channel is in blocking mode, at least 206 * one byte of data will be read, but no more is 207 * guaranteed. If the channel is non-blocking and no 208 * data is available, it will return QIO_CHANNEL_ERR_BLOCK 209 * 210 * If the channel has passed any file descriptors, 211 * the @fds array pointer will be allocated and 212 * the elements filled with the received file 213 * descriptors. The @nfds pointer will be updated 214 * to indicate the size of the @fds array that 215 * was allocated. It is the callers responsibility 216 * to call close() on each file descriptor and to 217 * call g_free() on the array pointer in @fds. 218 * 219 * It is an error to pass a non-NULL @fds parameter 220 * unless qio_channel_has_feature() returns a true 221 * value for the QIO_CHANNEL_FEATURE_FD_PASS constant. 222 * 223 * Returns: the number of bytes read, or -1 on error, 224 * or QIO_CHANNEL_ERR_BLOCK if no data is available 225 * and the channel is non-blocking 226 */ 227 ssize_t qio_channel_readv_full(QIOChannel *ioc, 228 const struct iovec *iov, 229 size_t niov, 230 int **fds, 231 size_t *nfds, 232 int flags, 233 Error **errp); 234 235 236 /** 237 * qio_channel_writev_full: 238 * @ioc: the channel object 239 * @iov: the array of memory regions to write data from 240 * @niov: the length of the @iov array 241 * @fds: an array of file handles to send 242 * @nfds: number of file handles in @fds 243 * @flags: write flags (QIO_CHANNEL_WRITE_FLAG_*) 244 * @errp: pointer to a NULL-initialized error object 245 * 246 * Write data to the IO channel, reading it from the 247 * memory regions referenced by @iov. Each element 248 * in the @iov will be fully sent, before the next 249 * one is used. The @niov parameter specifies the 250 * total number of elements in @iov. 251 * 252 * It is not required for all @iov data to be fully 253 * sent. If the channel is in blocking mode, at least 254 * one byte of data will be sent, but no more is 255 * guaranteed. If the channel is non-blocking and no 256 * data can be sent, it will return QIO_CHANNEL_ERR_BLOCK 257 * 258 * If there are file descriptors to send, the @fds 259 * array should be non-NULL and provide the handles. 260 * All file descriptors will be sent if at least one 261 * byte of data was sent. 262 * 263 * It is an error to pass a non-NULL @fds parameter 264 * unless qio_channel_has_feature() returns a true 265 * value for the QIO_CHANNEL_FEATURE_FD_PASS constant. 266 * 267 * Returns: the number of bytes sent, or -1 on error, 268 * or QIO_CHANNEL_ERR_BLOCK if no data is can be sent 269 * and the channel is non-blocking 270 */ 271 ssize_t qio_channel_writev_full(QIOChannel *ioc, 272 const struct iovec *iov, 273 size_t niov, 274 int *fds, 275 size_t nfds, 276 int flags, 277 Error **errp); 278 279 /** 280 * qio_channel_readv_all_eof: 281 * @ioc: the channel object 282 * @iov: the array of memory regions to read data into 283 * @niov: the length of the @iov array 284 * @errp: pointer to a NULL-initialized error object 285 * 286 * Read data from the IO channel, storing it in the 287 * memory regions referenced by @iov. Each element 288 * in the @iov will be fully populated with data 289 * before the next one is used. The @niov parameter 290 * specifies the total number of elements in @iov. 291 * 292 * The function will wait for all requested data 293 * to be read, yielding from the current coroutine 294 * if required. 295 * 296 * If end-of-file occurs before any data is read, 297 * no error is reported; otherwise, if it occurs 298 * before all requested data has been read, an error 299 * will be reported. 300 * 301 * Returns: 1 if all bytes were read, 0 if end-of-file 302 * occurs without data, or -1 on error 303 */ 304 int coroutine_mixed_fn qio_channel_readv_all_eof(QIOChannel *ioc, 305 const struct iovec *iov, 306 size_t niov, 307 Error **errp); 308 309 /** 310 * qio_channel_readv_all: 311 * @ioc: the channel object 312 * @iov: the array of memory regions to read data into 313 * @niov: the length of the @iov array 314 * @errp: pointer to a NULL-initialized error object 315 * 316 * Read data from the IO channel, storing it in the 317 * memory regions referenced by @iov. Each element 318 * in the @iov will be fully populated with data 319 * before the next one is used. The @niov parameter 320 * specifies the total number of elements in @iov. 321 * 322 * The function will wait for all requested data 323 * to be read, yielding from the current coroutine 324 * if required. 325 * 326 * If end-of-file occurs before all requested data 327 * has been read, an error will be reported. 328 * 329 * Returns: 0 if all bytes were read, or -1 on error 330 */ 331 int coroutine_mixed_fn qio_channel_readv_all(QIOChannel *ioc, 332 const struct iovec *iov, 333 size_t niov, 334 Error **errp); 335 336 337 /** 338 * qio_channel_writev_all: 339 * @ioc: the channel object 340 * @iov: the array of memory regions to write data from 341 * @niov: the length of the @iov array 342 * @errp: pointer to a NULL-initialized error object 343 * 344 * Write data to the IO channel, reading it from the 345 * memory regions referenced by @iov. Each element 346 * in the @iov will be fully sent, before the next 347 * one is used. The @niov parameter specifies the 348 * total number of elements in @iov. 349 * 350 * The function will wait for all requested data 351 * to be written, yielding from the current coroutine 352 * if required. 353 * 354 * Returns: 0 if all bytes were written, or -1 on error 355 */ 356 int coroutine_mixed_fn qio_channel_writev_all(QIOChannel *ioc, 357 const struct iovec *iov, 358 size_t niov, 359 Error **errp); 360 361 /** 362 * qio_channel_readv: 363 * @ioc: the channel object 364 * @iov: the array of memory regions to read data into 365 * @niov: the length of the @iov array 366 * @errp: pointer to a NULL-initialized error object 367 * 368 * Behaves as qio_channel_readv_full() but does not support 369 * receiving of file handles. 370 */ 371 ssize_t qio_channel_readv(QIOChannel *ioc, 372 const struct iovec *iov, 373 size_t niov, 374 Error **errp); 375 376 /** 377 * qio_channel_writev: 378 * @ioc: the channel object 379 * @iov: the array of memory regions to write data from 380 * @niov: the length of the @iov array 381 * @errp: pointer to a NULL-initialized error object 382 * 383 * Behaves as qio_channel_writev_full() but does not support 384 * sending of file handles. 385 */ 386 ssize_t qio_channel_writev(QIOChannel *ioc, 387 const struct iovec *iov, 388 size_t niov, 389 Error **errp); 390 391 /** 392 * qio_channel_read: 393 * @ioc: the channel object 394 * @buf: the memory region to read data into 395 * @buflen: the length of @buf 396 * @errp: pointer to a NULL-initialized error object 397 * 398 * Behaves as qio_channel_readv_full() but does not support 399 * receiving of file handles, and only supports reading into 400 * a single memory region. 401 */ 402 ssize_t qio_channel_read(QIOChannel *ioc, 403 char *buf, 404 size_t buflen, 405 Error **errp); 406 407 /** 408 * qio_channel_write: 409 * @ioc: the channel object 410 * @buf: the memory regions to send data from 411 * @buflen: the length of @buf 412 * @errp: pointer to a NULL-initialized error object 413 * 414 * Behaves as qio_channel_writev_full() but does not support 415 * sending of file handles, and only supports writing from a 416 * single memory region. 417 */ 418 ssize_t qio_channel_write(QIOChannel *ioc, 419 const char *buf, 420 size_t buflen, 421 Error **errp); 422 423 /** 424 * qio_channel_read_all_eof: 425 * @ioc: the channel object 426 * @buf: the memory region to read data into 427 * @buflen: the number of bytes to @buf 428 * @errp: pointer to a NULL-initialized error object 429 * 430 * Reads @buflen bytes into @buf, possibly blocking or (if the 431 * channel is non-blocking) yielding from the current coroutine 432 * multiple times until the entire content is read. If end-of-file 433 * occurs immediately it is not an error, but if it occurs after 434 * data has been read it will return an error rather than a 435 * short-read. Otherwise behaves as qio_channel_read(). 436 * 437 * Returns: 1 if all bytes were read, 0 if end-of-file occurs 438 * without data, or -1 on error 439 */ 440 int coroutine_mixed_fn qio_channel_read_all_eof(QIOChannel *ioc, 441 char *buf, 442 size_t buflen, 443 Error **errp); 444 445 /** 446 * qio_channel_read_all: 447 * @ioc: the channel object 448 * @buf: the memory region to read data into 449 * @buflen: the number of bytes to @buf 450 * @errp: pointer to a NULL-initialized error object 451 * 452 * Reads @buflen bytes into @buf, possibly blocking or (if the 453 * channel is non-blocking) yielding from the current coroutine 454 * multiple times until the entire content is read. If end-of-file 455 * occurs it will return an error rather than a short-read. Otherwise 456 * behaves as qio_channel_read(). 457 * 458 * Returns: 0 if all bytes were read, or -1 on error 459 */ 460 int coroutine_mixed_fn qio_channel_read_all(QIOChannel *ioc, 461 char *buf, 462 size_t buflen, 463 Error **errp); 464 465 /** 466 * qio_channel_write_all: 467 * @ioc: the channel object 468 * @buf: the memory region to write data into 469 * @buflen: the number of bytes to @buf 470 * @errp: pointer to a NULL-initialized error object 471 * 472 * Writes @buflen bytes from @buf, possibly blocking or (if the 473 * channel is non-blocking) yielding from the current coroutine 474 * multiple times until the entire content is written. Otherwise 475 * behaves as qio_channel_write(). 476 * 477 * Returns: 0 if all bytes were written, or -1 on error 478 */ 479 int coroutine_mixed_fn qio_channel_write_all(QIOChannel *ioc, 480 const char *buf, 481 size_t buflen, 482 Error **errp); 483 484 /** 485 * qio_channel_set_blocking: 486 * @ioc: the channel object 487 * @enabled: the blocking flag state 488 * @errp: pointer to a NULL-initialized error object 489 * 490 * If @enabled is true, then the channel is put into 491 * blocking mode, otherwise it will be non-blocking. 492 * 493 * In non-blocking mode, read/write operations may 494 * return QIO_CHANNEL_ERR_BLOCK if they would otherwise 495 * block on I/O 496 */ 497 int qio_channel_set_blocking(QIOChannel *ioc, 498 bool enabled, 499 Error **errp); 500 501 /** 502 * qio_channel_close: 503 * @ioc: the channel object 504 * @errp: pointer to a NULL-initialized error object 505 * 506 * Close the channel, flushing any pending I/O 507 * 508 * Returns: 0 on success, -1 on error 509 */ 510 int qio_channel_close(QIOChannel *ioc, 511 Error **errp); 512 513 /** 514 * qio_channel_shutdown: 515 * @ioc: the channel object 516 * @how: the direction to shutdown 517 * @errp: pointer to a NULL-initialized error object 518 * 519 * Shutdowns transmission and/or receiving of data 520 * without closing the underlying transport. 521 * 522 * Not all implementations will support this facility, 523 * so may report an error. To avoid errors, the 524 * caller may check for the feature flag 525 * QIO_CHANNEL_FEATURE_SHUTDOWN prior to calling 526 * this method. 527 * 528 * This function is thread-safe, terminates quickly and does not block. 529 * 530 * Returns: 0 on success, -1 on error 531 */ 532 int qio_channel_shutdown(QIOChannel *ioc, 533 QIOChannelShutdown how, 534 Error **errp); 535 536 /** 537 * qio_channel_set_delay: 538 * @ioc: the channel object 539 * @enabled: the new flag state 540 * 541 * Controls whether the underlying transport is 542 * permitted to delay writes in order to merge 543 * small packets. If @enabled is true, then the 544 * writes may be delayed in order to opportunistically 545 * merge small packets into larger ones. If @enabled 546 * is false, writes are dispatched immediately with 547 * no delay. 548 * 549 * When @enabled is false, applications may wish to 550 * use the qio_channel_set_cork() method to explicitly 551 * control write merging. 552 * 553 * On channels which are backed by a socket, this 554 * API corresponds to the inverse of TCP_NODELAY flag, 555 * controlling whether the Nagle algorithm is active. 556 * 557 * This setting is merely a hint, so implementations are 558 * free to ignore this without it being considered an 559 * error. 560 */ 561 void qio_channel_set_delay(QIOChannel *ioc, 562 bool enabled); 563 564 /** 565 * qio_channel_set_cork: 566 * @ioc: the channel object 567 * @enabled: the new flag state 568 * 569 * Controls whether the underlying transport is 570 * permitted to dispatch data that is written. 571 * If @enabled is true, then any data written will 572 * be queued in local buffers until @enabled is 573 * set to false once again. 574 * 575 * This feature is typically used when the automatic 576 * write coalescing facility is disabled via the 577 * qio_channel_set_delay() method. 578 * 579 * On channels which are backed by a socket, this 580 * API corresponds to the TCP_CORK flag. 581 * 582 * This setting is merely a hint, so implementations are 583 * free to ignore this without it being considered an 584 * error. 585 */ 586 void qio_channel_set_cork(QIOChannel *ioc, 587 bool enabled); 588 589 590 /** 591 * qio_channel_seek: 592 * @ioc: the channel object 593 * @offset: the position to seek to, relative to @whence 594 * @whence: one of the (POSIX) SEEK_* constants listed below 595 * @errp: pointer to a NULL-initialized error object 596 * 597 * Moves the current I/O position within the channel 598 * @ioc, to be @offset. The value of @offset is 599 * interpreted relative to @whence: 600 * 601 * SEEK_SET - the position is set to @offset bytes 602 * SEEK_CUR - the position is moved by @offset bytes 603 * SEEK_END - the position is set to end of the file plus @offset bytes 604 * 605 * Not all implementations will support this facility, 606 * so may report an error. 607 * 608 * Returns: the new position on success, (off_t)-1 on failure 609 */ 610 off_t qio_channel_io_seek(QIOChannel *ioc, 611 off_t offset, 612 int whence, 613 Error **errp); 614 615 616 /** 617 * qio_channel_create_watch: 618 * @ioc: the channel object 619 * @condition: the I/O condition to monitor 620 * 621 * Create a new main loop source that is used to watch 622 * for the I/O condition @condition. Typically the 623 * qio_channel_add_watch() method would be used instead 624 * of this, since it directly attaches a callback to 625 * the source 626 * 627 * Returns: the new main loop source. 628 */ 629 GSource *qio_channel_create_watch(QIOChannel *ioc, 630 GIOCondition condition); 631 632 /** 633 * qio_channel_add_watch: 634 * @ioc: the channel object 635 * @condition: the I/O condition to monitor 636 * @func: callback to invoke when the source becomes ready 637 * @user_data: opaque data to pass to @func 638 * @notify: callback to free @user_data 639 * 640 * Create a new main loop source that is used to watch 641 * for the I/O condition @condition. The callback @func 642 * will be registered against the source, to be invoked 643 * when the source becomes ready. The optional @user_data 644 * will be passed to @func when it is invoked. The @notify 645 * callback will be used to free @user_data when the 646 * watch is deleted 647 * 648 * The returned source ID can be used with g_source_remove() 649 * to remove and free the source when no longer required. 650 * Alternatively the @func callback can return a FALSE 651 * value. 652 * 653 * Returns: the source ID 654 */ 655 guint qio_channel_add_watch(QIOChannel *ioc, 656 GIOCondition condition, 657 QIOChannelFunc func, 658 gpointer user_data, 659 GDestroyNotify notify); 660 661 /** 662 * qio_channel_add_watch_full: 663 * @ioc: the channel object 664 * @condition: the I/O condition to monitor 665 * @func: callback to invoke when the source becomes ready 666 * @user_data: opaque data to pass to @func 667 * @notify: callback to free @user_data 668 * @context: the context to run the watch source 669 * 670 * Similar as qio_channel_add_watch(), but allows to specify context 671 * to run the watch source. 672 * 673 * Returns: the source ID 674 */ 675 guint qio_channel_add_watch_full(QIOChannel *ioc, 676 GIOCondition condition, 677 QIOChannelFunc func, 678 gpointer user_data, 679 GDestroyNotify notify, 680 GMainContext *context); 681 682 /** 683 * qio_channel_add_watch_source: 684 * @ioc: the channel object 685 * @condition: the I/O condition to monitor 686 * @func: callback to invoke when the source becomes ready 687 * @user_data: opaque data to pass to @func 688 * @notify: callback to free @user_data 689 * @context: gcontext to bind the source to 690 * 691 * Similar as qio_channel_add_watch(), but allows to specify context 692 * to run the watch source, meanwhile return the GSource object 693 * instead of tag ID, with the GSource referenced already. 694 * 695 * Note: callers is responsible to unref the source when not needed. 696 * 697 * Returns: the source pointer 698 */ 699 GSource *qio_channel_add_watch_source(QIOChannel *ioc, 700 GIOCondition condition, 701 QIOChannelFunc func, 702 gpointer user_data, 703 GDestroyNotify notify, 704 GMainContext *context); 705 706 /** 707 * qio_channel_attach_aio_context: 708 * @ioc: the channel object 709 * @ctx: the #AioContext to set the handlers on 710 * 711 * Request that qio_channel_yield() sets I/O handlers on 712 * the given #AioContext. If @ctx is %NULL, qio_channel_yield() 713 * uses QEMU's main thread event loop. 714 * 715 * You can move a #QIOChannel from one #AioContext to another even if 716 * I/O handlers are set for a coroutine. However, #QIOChannel provides 717 * no synchronization between the calls to qio_channel_yield() and 718 * qio_channel_attach_aio_context(). 719 * 720 * Therefore you should first call qio_channel_detach_aio_context() 721 * to ensure that the coroutine is not entered concurrently. Then, 722 * while the coroutine has yielded, call qio_channel_attach_aio_context(), 723 * and then aio_co_schedule() to place the coroutine on the new 724 * #AioContext. The calls to qio_channel_detach_aio_context() 725 * and qio_channel_attach_aio_context() should be protected with 726 * aio_context_acquire() and aio_context_release(). 727 */ 728 void qio_channel_attach_aio_context(QIOChannel *ioc, 729 AioContext *ctx); 730 731 /** 732 * qio_channel_detach_aio_context: 733 * @ioc: the channel object 734 * 735 * Disable any I/O handlers set by qio_channel_yield(). With the 736 * help of aio_co_schedule(), this allows moving a coroutine that was 737 * paused by qio_channel_yield() to another context. 738 */ 739 void qio_channel_detach_aio_context(QIOChannel *ioc); 740 741 /** 742 * qio_channel_yield: 743 * @ioc: the channel object 744 * @condition: the I/O condition to wait for 745 * 746 * Yields execution from the current coroutine until the condition 747 * indicated by @condition becomes available. @condition must 748 * be either %G_IO_IN or %G_IO_OUT; it cannot contain both. In 749 * addition, no two coroutine can be waiting on the same condition 750 * and channel at the same time. 751 * 752 * This must only be called from coroutine context. It is safe to 753 * reenter the coroutine externally while it is waiting; in this 754 * case the function will return even if @condition is not yet 755 * available. 756 */ 757 void coroutine_fn qio_channel_yield(QIOChannel *ioc, 758 GIOCondition condition); 759 760 /** 761 * qio_channel_wait: 762 * @ioc: the channel object 763 * @condition: the I/O condition to wait for 764 * 765 * Block execution from the current thread until 766 * the condition indicated by @condition becomes 767 * available. 768 * 769 * This will enter a nested event loop to perform 770 * the wait. 771 */ 772 void qio_channel_wait(QIOChannel *ioc, 773 GIOCondition condition); 774 775 /** 776 * qio_channel_set_aio_fd_handler: 777 * @ioc: the channel object 778 * @ctx: the AioContext to set the handlers on 779 * @io_read: the read handler 780 * @io_write: the write handler 781 * @opaque: the opaque value passed to the handler 782 * 783 * This is used internally by qio_channel_yield(). It can 784 * be used by channel implementations to forward the handlers 785 * to another channel (e.g. from #QIOChannelTLS to the 786 * underlying socket). 787 */ 788 void qio_channel_set_aio_fd_handler(QIOChannel *ioc, 789 AioContext *ctx, 790 IOHandler *io_read, 791 IOHandler *io_write, 792 void *opaque); 793 794 /** 795 * qio_channel_readv_full_all_eof: 796 * @ioc: the channel object 797 * @iov: the array of memory regions to read data to 798 * @niov: the length of the @iov array 799 * @fds: an array of file handles to read 800 * @nfds: number of file handles in @fds 801 * @errp: pointer to a NULL-initialized error object 802 * 803 * 804 * Performs same function as qio_channel_readv_all_eof. 805 * Additionally, attempts to read file descriptors shared 806 * over the channel. The function will wait for all 807 * requested data to be read, yielding from the current 808 * coroutine if required. data refers to both file 809 * descriptors and the iovs. 810 * 811 * Returns: 1 if all bytes were read, 0 if end-of-file 812 * occurs without data, or -1 on error 813 */ 814 815 int coroutine_mixed_fn qio_channel_readv_full_all_eof(QIOChannel *ioc, 816 const struct iovec *iov, 817 size_t niov, 818 int **fds, size_t *nfds, 819 Error **errp); 820 821 /** 822 * qio_channel_readv_full_all: 823 * @ioc: the channel object 824 * @iov: the array of memory regions to read data to 825 * @niov: the length of the @iov array 826 * @fds: an array of file handles to read 827 * @nfds: number of file handles in @fds 828 * @errp: pointer to a NULL-initialized error object 829 * 830 * 831 * Performs same function as qio_channel_readv_all_eof. 832 * Additionally, attempts to read file descriptors shared 833 * over the channel. The function will wait for all 834 * requested data to be read, yielding from the current 835 * coroutine if required. data refers to both file 836 * descriptors and the iovs. 837 * 838 * Returns: 0 if all bytes were read, or -1 on error 839 */ 840 841 int coroutine_mixed_fn qio_channel_readv_full_all(QIOChannel *ioc, 842 const struct iovec *iov, 843 size_t niov, 844 int **fds, size_t *nfds, 845 Error **errp); 846 847 /** 848 * qio_channel_writev_full_all: 849 * @ioc: the channel object 850 * @iov: the array of memory regions to write data from 851 * @niov: the length of the @iov array 852 * @fds: an array of file handles to send 853 * @nfds: number of file handles in @fds 854 * @flags: write flags (QIO_CHANNEL_WRITE_FLAG_*) 855 * @errp: pointer to a NULL-initialized error object 856 * 857 * 858 * Behaves like qio_channel_writev_full but will attempt 859 * to send all data passed (file handles and memory regions). 860 * The function will wait for all requested data 861 * to be written, yielding from the current coroutine 862 * if required. 863 * 864 * If QIO_CHANNEL_WRITE_FLAG_ZERO_COPY is passed in flags, 865 * instead of waiting for all requested data to be written, 866 * this function will wait until it's all queued for writing. 867 * In this case, if the buffer gets changed between queueing and 868 * sending, the updated buffer will be sent. If this is not a 869 * desired behavior, it's suggested to call qio_channel_flush() 870 * before reusing the buffer. 871 * 872 * Returns: 0 if all bytes were written, or -1 on error 873 */ 874 875 int coroutine_mixed_fn qio_channel_writev_full_all(QIOChannel *ioc, 876 const struct iovec *iov, 877 size_t niov, 878 int *fds, size_t nfds, 879 int flags, Error **errp); 880 881 /** 882 * qio_channel_flush: 883 * @ioc: the channel object 884 * @errp: pointer to a NULL-initialized error object 885 * 886 * Will block until every packet queued with 887 * qio_channel_writev_full() + QIO_CHANNEL_WRITE_FLAG_ZERO_COPY 888 * is sent, or return in case of any error. 889 * 890 * If not implemented, acts as a no-op, and returns 0. 891 * 892 * Returns -1 if any error is found, 893 * 1 if every send failed to use zero copy. 894 * 0 otherwise. 895 */ 896 897 int qio_channel_flush(QIOChannel *ioc, 898 Error **errp); 899 900 #endif /* QIO_CHANNEL_H */ 901