1 /* 2 * QEMU I/O channels 3 * 4 * Copyright (c) 2015 Red Hat, Inc. 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2.1 of the License, or (at your option) any later version. 10 * 11 * This library is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18 * 19 */ 20 21 #ifndef QIO_CHANNEL_H 22 #define QIO_CHANNEL_H 23 24 #include "qom/object.h" 25 #include "qemu/coroutine.h" 26 #include "block/aio.h" 27 28 #define TYPE_QIO_CHANNEL "qio-channel" 29 OBJECT_DECLARE_TYPE(QIOChannel, QIOChannelClass, 30 QIO_CHANNEL) 31 32 33 #define QIO_CHANNEL_ERR_BLOCK -2 34 35 #define QIO_CHANNEL_WRITE_FLAG_ZERO_COPY 0x1 36 37 typedef enum QIOChannelFeature QIOChannelFeature; 38 39 enum QIOChannelFeature { 40 QIO_CHANNEL_FEATURE_FD_PASS, 41 QIO_CHANNEL_FEATURE_SHUTDOWN, 42 QIO_CHANNEL_FEATURE_LISTEN, 43 QIO_CHANNEL_FEATURE_WRITE_ZERO_COPY, 44 }; 45 46 47 typedef enum QIOChannelShutdown QIOChannelShutdown; 48 49 enum QIOChannelShutdown { 50 QIO_CHANNEL_SHUTDOWN_READ = 1, 51 QIO_CHANNEL_SHUTDOWN_WRITE = 2, 52 QIO_CHANNEL_SHUTDOWN_BOTH = 3, 53 }; 54 55 typedef gboolean (*QIOChannelFunc)(QIOChannel *ioc, 56 GIOCondition condition, 57 gpointer data); 58 59 /** 60 * QIOChannel: 61 * 62 * The QIOChannel defines the core API for a generic I/O channel 63 * class hierarchy. It is inspired by GIOChannel, but has the 64 * following differences 65 * 66 * - Use QOM to properly support arbitrary subclassing 67 * - Support use of iovecs for efficient I/O with multiple blocks 68 * - None of the character set translation, binary data exclusively 69 * - Direct support for QEMU Error object reporting 70 * - File descriptor passing 71 * 72 * This base class is abstract so cannot be instantiated. There 73 * will be subclasses for dealing with sockets, files, and higher 74 * level protocols such as TLS, WebSocket, etc. 75 */ 76 77 struct QIOChannel { 78 Object parent; 79 unsigned int features; /* bitmask of QIOChannelFeatures */ 80 char *name; 81 AioContext *ctx; 82 Coroutine *read_coroutine; 83 Coroutine *write_coroutine; 84 #ifdef _WIN32 85 HANDLE event; /* For use with GSource on Win32 */ 86 #endif 87 }; 88 89 /** 90 * QIOChannelClass: 91 * 92 * This class defines the contract that all subclasses 93 * must follow to provide specific channel implementations. 94 * The first five callbacks are mandatory to support, others 95 * provide additional optional features. 96 * 97 * Consult the corresponding public API docs for a description 98 * of the semantics of each callback. io_shutdown in particular 99 * must be thread-safe, terminate quickly and must not block. 100 */ 101 struct QIOChannelClass { 102 ObjectClass parent; 103 104 /* Mandatory callbacks */ 105 ssize_t (*io_writev)(QIOChannel *ioc, 106 const struct iovec *iov, 107 size_t niov, 108 int *fds, 109 size_t nfds, 110 int flags, 111 Error **errp); 112 ssize_t (*io_readv)(QIOChannel *ioc, 113 const struct iovec *iov, 114 size_t niov, 115 int **fds, 116 size_t *nfds, 117 Error **errp); 118 int (*io_close)(QIOChannel *ioc, 119 Error **errp); 120 GSource * (*io_create_watch)(QIOChannel *ioc, 121 GIOCondition condition); 122 int (*io_set_blocking)(QIOChannel *ioc, 123 bool enabled, 124 Error **errp); 125 126 /* Optional callbacks */ 127 int (*io_shutdown)(QIOChannel *ioc, 128 QIOChannelShutdown how, 129 Error **errp); 130 void (*io_set_cork)(QIOChannel *ioc, 131 bool enabled); 132 void (*io_set_delay)(QIOChannel *ioc, 133 bool enabled); 134 off_t (*io_seek)(QIOChannel *ioc, 135 off_t offset, 136 int whence, 137 Error **errp); 138 void (*io_set_aio_fd_handler)(QIOChannel *ioc, 139 AioContext *ctx, 140 IOHandler *io_read, 141 IOHandler *io_write, 142 void *opaque); 143 int (*io_flush)(QIOChannel *ioc, 144 Error **errp); 145 }; 146 147 /* General I/O handling functions */ 148 149 /** 150 * qio_channel_has_feature: 151 * @ioc: the channel object 152 * @feature: the feature to check support of 153 * 154 * Determine whether the channel implementation supports 155 * the optional feature named in @feature. 156 * 157 * Returns: true if supported, false otherwise. 158 */ 159 bool qio_channel_has_feature(QIOChannel *ioc, 160 QIOChannelFeature feature); 161 162 /** 163 * qio_channel_set_feature: 164 * @ioc: the channel object 165 * @feature: the feature to set support for 166 * 167 * Add channel support for the feature named in @feature. 168 */ 169 void qio_channel_set_feature(QIOChannel *ioc, 170 QIOChannelFeature feature); 171 172 /** 173 * qio_channel_set_name: 174 * @ioc: the channel object 175 * @name: the name of the channel 176 * 177 * Sets the name of the channel, which serves as an aid 178 * to debugging. The name is used when creating GSource 179 * watches for this channel. 180 */ 181 void qio_channel_set_name(QIOChannel *ioc, 182 const char *name); 183 184 /** 185 * qio_channel_readv_full: 186 * @ioc: the channel object 187 * @iov: the array of memory regions to read data into 188 * @niov: the length of the @iov array 189 * @fds: pointer to an array that will received file handles 190 * @nfds: pointer filled with number of elements in @fds on return 191 * @errp: pointer to a NULL-initialized error object 192 * 193 * Read data from the IO channel, storing it in the 194 * memory regions referenced by @iov. Each element 195 * in the @iov will be fully populated with data 196 * before the next one is used. The @niov parameter 197 * specifies the total number of elements in @iov. 198 * 199 * It is not required for all @iov to be filled with 200 * data. If the channel is in blocking mode, at least 201 * one byte of data will be read, but no more is 202 * guaranteed. If the channel is non-blocking and no 203 * data is available, it will return QIO_CHANNEL_ERR_BLOCK 204 * 205 * If the channel has passed any file descriptors, 206 * the @fds array pointer will be allocated and 207 * the elements filled with the received file 208 * descriptors. The @nfds pointer will be updated 209 * to indicate the size of the @fds array that 210 * was allocated. It is the callers responsibility 211 * to call close() on each file descriptor and to 212 * call g_free() on the array pointer in @fds. 213 * 214 * It is an error to pass a non-NULL @fds parameter 215 * unless qio_channel_has_feature() returns a true 216 * value for the QIO_CHANNEL_FEATURE_FD_PASS constant. 217 * 218 * Returns: the number of bytes read, or -1 on error, 219 * or QIO_CHANNEL_ERR_BLOCK if no data is available 220 * and the channel is non-blocking 221 */ 222 ssize_t qio_channel_readv_full(QIOChannel *ioc, 223 const struct iovec *iov, 224 size_t niov, 225 int **fds, 226 size_t *nfds, 227 Error **errp); 228 229 230 /** 231 * qio_channel_writev_full: 232 * @ioc: the channel object 233 * @iov: the array of memory regions to write data from 234 * @niov: the length of the @iov array 235 * @fds: an array of file handles to send 236 * @nfds: number of file handles in @fds 237 * @flags: write flags (QIO_CHANNEL_WRITE_FLAG_*) 238 * @errp: pointer to a NULL-initialized error object 239 * 240 * Write data to the IO channel, reading it from the 241 * memory regions referenced by @iov. Each element 242 * in the @iov will be fully sent, before the next 243 * one is used. The @niov parameter specifies the 244 * total number of elements in @iov. 245 * 246 * It is not required for all @iov data to be fully 247 * sent. If the channel is in blocking mode, at least 248 * one byte of data will be sent, but no more is 249 * guaranteed. If the channel is non-blocking and no 250 * data can be sent, it will return QIO_CHANNEL_ERR_BLOCK 251 * 252 * If there are file descriptors to send, the @fds 253 * array should be non-NULL and provide the handles. 254 * All file descriptors will be sent if at least one 255 * byte of data was sent. 256 * 257 * It is an error to pass a non-NULL @fds parameter 258 * unless qio_channel_has_feature() returns a true 259 * value for the QIO_CHANNEL_FEATURE_FD_PASS constant. 260 * 261 * Returns: the number of bytes sent, or -1 on error, 262 * or QIO_CHANNEL_ERR_BLOCK if no data is can be sent 263 * and the channel is non-blocking 264 */ 265 ssize_t qio_channel_writev_full(QIOChannel *ioc, 266 const struct iovec *iov, 267 size_t niov, 268 int *fds, 269 size_t nfds, 270 int flags, 271 Error **errp); 272 273 /** 274 * qio_channel_readv_all_eof: 275 * @ioc: the channel object 276 * @iov: the array of memory regions to read data into 277 * @niov: the length of the @iov array 278 * @errp: pointer to a NULL-initialized error object 279 * 280 * Read data from the IO channel, storing it in the 281 * memory regions referenced by @iov. Each element 282 * in the @iov will be fully populated with data 283 * before the next one is used. The @niov parameter 284 * specifies the total number of elements in @iov. 285 * 286 * The function will wait for all requested data 287 * to be read, yielding from the current coroutine 288 * if required. 289 * 290 * If end-of-file occurs before any data is read, 291 * no error is reported; otherwise, if it occurs 292 * before all requested data has been read, an error 293 * will be reported. 294 * 295 * Returns: 1 if all bytes were read, 0 if end-of-file 296 * occurs without data, or -1 on error 297 */ 298 int qio_channel_readv_all_eof(QIOChannel *ioc, 299 const struct iovec *iov, 300 size_t niov, 301 Error **errp); 302 303 /** 304 * qio_channel_readv_all: 305 * @ioc: the channel object 306 * @iov: the array of memory regions to read data into 307 * @niov: the length of the @iov array 308 * @errp: pointer to a NULL-initialized error object 309 * 310 * Read data from the IO channel, storing it in the 311 * memory regions referenced by @iov. Each element 312 * in the @iov will be fully populated with data 313 * before the next one is used. The @niov parameter 314 * specifies the total number of elements in @iov. 315 * 316 * The function will wait for all requested data 317 * to be read, yielding from the current coroutine 318 * if required. 319 * 320 * If end-of-file occurs before all requested data 321 * has been read, an error will be reported. 322 * 323 * Returns: 0 if all bytes were read, or -1 on error 324 */ 325 int qio_channel_readv_all(QIOChannel *ioc, 326 const struct iovec *iov, 327 size_t niov, 328 Error **errp); 329 330 331 /** 332 * qio_channel_writev_all: 333 * @ioc: the channel object 334 * @iov: the array of memory regions to write data from 335 * @niov: the length of the @iov array 336 * @errp: pointer to a NULL-initialized error object 337 * 338 * Write data to the IO channel, reading it from the 339 * memory regions referenced by @iov. Each element 340 * in the @iov will be fully sent, before the next 341 * one is used. The @niov parameter specifies the 342 * total number of elements in @iov. 343 * 344 * The function will wait for all requested data 345 * to be written, yielding from the current coroutine 346 * if required. 347 * 348 * Returns: 0 if all bytes were written, or -1 on error 349 */ 350 int qio_channel_writev_all(QIOChannel *ioc, 351 const struct iovec *iov, 352 size_t niov, 353 Error **erp); 354 355 /** 356 * qio_channel_readv: 357 * @ioc: the channel object 358 * @iov: the array of memory regions to read data into 359 * @niov: the length of the @iov array 360 * @errp: pointer to a NULL-initialized error object 361 * 362 * Behaves as qio_channel_readv_full() but does not support 363 * receiving of file handles. 364 */ 365 ssize_t qio_channel_readv(QIOChannel *ioc, 366 const struct iovec *iov, 367 size_t niov, 368 Error **errp); 369 370 /** 371 * qio_channel_writev: 372 * @ioc: the channel object 373 * @iov: the array of memory regions to write data from 374 * @niov: the length of the @iov array 375 * @errp: pointer to a NULL-initialized error object 376 * 377 * Behaves as qio_channel_writev_full() but does not support 378 * sending of file handles. 379 */ 380 ssize_t qio_channel_writev(QIOChannel *ioc, 381 const struct iovec *iov, 382 size_t niov, 383 Error **errp); 384 385 /** 386 * qio_channel_read: 387 * @ioc: the channel object 388 * @buf: the memory region to read data into 389 * @buflen: the length of @buf 390 * @errp: pointer to a NULL-initialized error object 391 * 392 * Behaves as qio_channel_readv_full() but does not support 393 * receiving of file handles, and only supports reading into 394 * a single memory region. 395 */ 396 ssize_t qio_channel_read(QIOChannel *ioc, 397 char *buf, 398 size_t buflen, 399 Error **errp); 400 401 /** 402 * qio_channel_write: 403 * @ioc: the channel object 404 * @buf: the memory regions to send data from 405 * @buflen: the length of @buf 406 * @errp: pointer to a NULL-initialized error object 407 * 408 * Behaves as qio_channel_writev_full() but does not support 409 * sending of file handles, and only supports writing from a 410 * single memory region. 411 */ 412 ssize_t qio_channel_write(QIOChannel *ioc, 413 const char *buf, 414 size_t buflen, 415 Error **errp); 416 417 /** 418 * qio_channel_read_all_eof: 419 * @ioc: the channel object 420 * @buf: the memory region to read data into 421 * @buflen: the number of bytes to @buf 422 * @errp: pointer to a NULL-initialized error object 423 * 424 * Reads @buflen bytes into @buf, possibly blocking or (if the 425 * channel is non-blocking) yielding from the current coroutine 426 * multiple times until the entire content is read. If end-of-file 427 * occurs immediately it is not an error, but if it occurs after 428 * data has been read it will return an error rather than a 429 * short-read. Otherwise behaves as qio_channel_read(). 430 * 431 * Returns: 1 if all bytes were read, 0 if end-of-file occurs 432 * without data, or -1 on error 433 */ 434 int qio_channel_read_all_eof(QIOChannel *ioc, 435 char *buf, 436 size_t buflen, 437 Error **errp); 438 439 /** 440 * qio_channel_read_all: 441 * @ioc: the channel object 442 * @buf: the memory region to read data into 443 * @buflen: the number of bytes to @buf 444 * @errp: pointer to a NULL-initialized error object 445 * 446 * Reads @buflen bytes into @buf, possibly blocking or (if the 447 * channel is non-blocking) yielding from the current coroutine 448 * multiple times until the entire content is read. If end-of-file 449 * occurs it will return an error rather than a short-read. Otherwise 450 * behaves as qio_channel_read(). 451 * 452 * Returns: 0 if all bytes were read, or -1 on error 453 */ 454 int qio_channel_read_all(QIOChannel *ioc, 455 char *buf, 456 size_t buflen, 457 Error **errp); 458 459 /** 460 * qio_channel_write_all: 461 * @ioc: the channel object 462 * @buf: the memory region to write data into 463 * @buflen: the number of bytes to @buf 464 * @errp: pointer to a NULL-initialized error object 465 * 466 * Writes @buflen bytes from @buf, possibly blocking or (if the 467 * channel is non-blocking) yielding from the current coroutine 468 * multiple times until the entire content is written. Otherwise 469 * behaves as qio_channel_write(). 470 * 471 * Returns: 0 if all bytes were written, or -1 on error 472 */ 473 int qio_channel_write_all(QIOChannel *ioc, 474 const char *buf, 475 size_t buflen, 476 Error **errp); 477 478 /** 479 * qio_channel_set_blocking: 480 * @ioc: the channel object 481 * @enabled: the blocking flag state 482 * @errp: pointer to a NULL-initialized error object 483 * 484 * If @enabled is true, then the channel is put into 485 * blocking mode, otherwise it will be non-blocking. 486 * 487 * In non-blocking mode, read/write operations may 488 * return QIO_CHANNEL_ERR_BLOCK if they would otherwise 489 * block on I/O 490 */ 491 int qio_channel_set_blocking(QIOChannel *ioc, 492 bool enabled, 493 Error **errp); 494 495 /** 496 * qio_channel_close: 497 * @ioc: the channel object 498 * @errp: pointer to a NULL-initialized error object 499 * 500 * Close the channel, flushing any pending I/O 501 * 502 * Returns: 0 on success, -1 on error 503 */ 504 int qio_channel_close(QIOChannel *ioc, 505 Error **errp); 506 507 /** 508 * qio_channel_shutdown: 509 * @ioc: the channel object 510 * @how: the direction to shutdown 511 * @errp: pointer to a NULL-initialized error object 512 * 513 * Shutdowns transmission and/or receiving of data 514 * without closing the underlying transport. 515 * 516 * Not all implementations will support this facility, 517 * so may report an error. To avoid errors, the 518 * caller may check for the feature flag 519 * QIO_CHANNEL_FEATURE_SHUTDOWN prior to calling 520 * this method. 521 * 522 * This function is thread-safe, terminates quickly and does not block. 523 * 524 * Returns: 0 on success, -1 on error 525 */ 526 int qio_channel_shutdown(QIOChannel *ioc, 527 QIOChannelShutdown how, 528 Error **errp); 529 530 /** 531 * qio_channel_set_delay: 532 * @ioc: the channel object 533 * @enabled: the new flag state 534 * 535 * Controls whether the underlying transport is 536 * permitted to delay writes in order to merge 537 * small packets. If @enabled is true, then the 538 * writes may be delayed in order to opportunistically 539 * merge small packets into larger ones. If @enabled 540 * is false, writes are dispatched immediately with 541 * no delay. 542 * 543 * When @enabled is false, applications may wish to 544 * use the qio_channel_set_cork() method to explicitly 545 * control write merging. 546 * 547 * On channels which are backed by a socket, this 548 * API corresponds to the inverse of TCP_NODELAY flag, 549 * controlling whether the Nagle algorithm is active. 550 * 551 * This setting is merely a hint, so implementations are 552 * free to ignore this without it being considered an 553 * error. 554 */ 555 void qio_channel_set_delay(QIOChannel *ioc, 556 bool enabled); 557 558 /** 559 * qio_channel_set_cork: 560 * @ioc: the channel object 561 * @enabled: the new flag state 562 * 563 * Controls whether the underlying transport is 564 * permitted to dispatch data that is written. 565 * If @enabled is true, then any data written will 566 * be queued in local buffers until @enabled is 567 * set to false once again. 568 * 569 * This feature is typically used when the automatic 570 * write coalescing facility is disabled via the 571 * qio_channel_set_delay() method. 572 * 573 * On channels which are backed by a socket, this 574 * API corresponds to the TCP_CORK flag. 575 * 576 * This setting is merely a hint, so implementations are 577 * free to ignore this without it being considered an 578 * error. 579 */ 580 void qio_channel_set_cork(QIOChannel *ioc, 581 bool enabled); 582 583 584 /** 585 * qio_channel_seek: 586 * @ioc: the channel object 587 * @offset: the position to seek to, relative to @whence 588 * @whence: one of the (POSIX) SEEK_* constants listed below 589 * @errp: pointer to a NULL-initialized error object 590 * 591 * Moves the current I/O position within the channel 592 * @ioc, to be @offset. The value of @offset is 593 * interpreted relative to @whence: 594 * 595 * SEEK_SET - the position is set to @offset bytes 596 * SEEK_CUR - the position is moved by @offset bytes 597 * SEEK_END - the position is set to end of the file plus @offset bytes 598 * 599 * Not all implementations will support this facility, 600 * so may report an error. 601 * 602 * Returns: the new position on success, (off_t)-1 on failure 603 */ 604 off_t qio_channel_io_seek(QIOChannel *ioc, 605 off_t offset, 606 int whence, 607 Error **errp); 608 609 610 /** 611 * qio_channel_create_watch: 612 * @ioc: the channel object 613 * @condition: the I/O condition to monitor 614 * 615 * Create a new main loop source that is used to watch 616 * for the I/O condition @condition. Typically the 617 * qio_channel_add_watch() method would be used instead 618 * of this, since it directly attaches a callback to 619 * the source 620 * 621 * Returns: the new main loop source. 622 */ 623 GSource *qio_channel_create_watch(QIOChannel *ioc, 624 GIOCondition condition); 625 626 /** 627 * qio_channel_add_watch: 628 * @ioc: the channel object 629 * @condition: the I/O condition to monitor 630 * @func: callback to invoke when the source becomes ready 631 * @user_data: opaque data to pass to @func 632 * @notify: callback to free @user_data 633 * 634 * Create a new main loop source that is used to watch 635 * for the I/O condition @condition. The callback @func 636 * will be registered against the source, to be invoked 637 * when the source becomes ready. The optional @user_data 638 * will be passed to @func when it is invoked. The @notify 639 * callback will be used to free @user_data when the 640 * watch is deleted 641 * 642 * The returned source ID can be used with g_source_remove() 643 * to remove and free the source when no longer required. 644 * Alternatively the @func callback can return a FALSE 645 * value. 646 * 647 * Returns: the source ID 648 */ 649 guint qio_channel_add_watch(QIOChannel *ioc, 650 GIOCondition condition, 651 QIOChannelFunc func, 652 gpointer user_data, 653 GDestroyNotify notify); 654 655 /** 656 * qio_channel_add_watch_full: 657 * @ioc: the channel object 658 * @condition: the I/O condition to monitor 659 * @func: callback to invoke when the source becomes ready 660 * @user_data: opaque data to pass to @func 661 * @notify: callback to free @user_data 662 * @context: the context to run the watch source 663 * 664 * Similar as qio_channel_add_watch(), but allows to specify context 665 * to run the watch source. 666 * 667 * Returns: the source ID 668 */ 669 guint qio_channel_add_watch_full(QIOChannel *ioc, 670 GIOCondition condition, 671 QIOChannelFunc func, 672 gpointer user_data, 673 GDestroyNotify notify, 674 GMainContext *context); 675 676 /** 677 * qio_channel_add_watch_source: 678 * @ioc: the channel object 679 * @condition: the I/O condition to monitor 680 * @func: callback to invoke when the source becomes ready 681 * @user_data: opaque data to pass to @func 682 * @notify: callback to free @user_data 683 * @context: gcontext to bind the source to 684 * 685 * Similar as qio_channel_add_watch(), but allows to specify context 686 * to run the watch source, meanwhile return the GSource object 687 * instead of tag ID, with the GSource referenced already. 688 * 689 * Note: callers is responsible to unref the source when not needed. 690 * 691 * Returns: the source pointer 692 */ 693 GSource *qio_channel_add_watch_source(QIOChannel *ioc, 694 GIOCondition condition, 695 QIOChannelFunc func, 696 gpointer user_data, 697 GDestroyNotify notify, 698 GMainContext *context); 699 700 /** 701 * qio_channel_attach_aio_context: 702 * @ioc: the channel object 703 * @ctx: the #AioContext to set the handlers on 704 * 705 * Request that qio_channel_yield() sets I/O handlers on 706 * the given #AioContext. If @ctx is %NULL, qio_channel_yield() 707 * uses QEMU's main thread event loop. 708 * 709 * You can move a #QIOChannel from one #AioContext to another even if 710 * I/O handlers are set for a coroutine. However, #QIOChannel provides 711 * no synchronization between the calls to qio_channel_yield() and 712 * qio_channel_attach_aio_context(). 713 * 714 * Therefore you should first call qio_channel_detach_aio_context() 715 * to ensure that the coroutine is not entered concurrently. Then, 716 * while the coroutine has yielded, call qio_channel_attach_aio_context(), 717 * and then aio_co_schedule() to place the coroutine on the new 718 * #AioContext. The calls to qio_channel_detach_aio_context() 719 * and qio_channel_attach_aio_context() should be protected with 720 * aio_context_acquire() and aio_context_release(). 721 */ 722 void qio_channel_attach_aio_context(QIOChannel *ioc, 723 AioContext *ctx); 724 725 /** 726 * qio_channel_detach_aio_context: 727 * @ioc: the channel object 728 * 729 * Disable any I/O handlers set by qio_channel_yield(). With the 730 * help of aio_co_schedule(), this allows moving a coroutine that was 731 * paused by qio_channel_yield() to another context. 732 */ 733 void qio_channel_detach_aio_context(QIOChannel *ioc); 734 735 /** 736 * qio_channel_yield: 737 * @ioc: the channel object 738 * @condition: the I/O condition to wait for 739 * 740 * Yields execution from the current coroutine until the condition 741 * indicated by @condition becomes available. @condition must 742 * be either %G_IO_IN or %G_IO_OUT; it cannot contain both. In 743 * addition, no two coroutine can be waiting on the same condition 744 * and channel at the same time. 745 * 746 * This must only be called from coroutine context. It is safe to 747 * reenter the coroutine externally while it is waiting; in this 748 * case the function will return even if @condition is not yet 749 * available. 750 */ 751 void coroutine_fn qio_channel_yield(QIOChannel *ioc, 752 GIOCondition condition); 753 754 /** 755 * qio_channel_wait: 756 * @ioc: the channel object 757 * @condition: the I/O condition to wait for 758 * 759 * Block execution from the current thread until 760 * the condition indicated by @condition becomes 761 * available. 762 * 763 * This will enter a nested event loop to perform 764 * the wait. 765 */ 766 void qio_channel_wait(QIOChannel *ioc, 767 GIOCondition condition); 768 769 /** 770 * qio_channel_set_aio_fd_handler: 771 * @ioc: the channel object 772 * @ctx: the AioContext to set the handlers on 773 * @io_read: the read handler 774 * @io_write: the write handler 775 * @opaque: the opaque value passed to the handler 776 * 777 * This is used internally by qio_channel_yield(). It can 778 * be used by channel implementations to forward the handlers 779 * to another channel (e.g. from #QIOChannelTLS to the 780 * underlying socket). 781 */ 782 void qio_channel_set_aio_fd_handler(QIOChannel *ioc, 783 AioContext *ctx, 784 IOHandler *io_read, 785 IOHandler *io_write, 786 void *opaque); 787 788 /** 789 * qio_channel_readv_full_all_eof: 790 * @ioc: the channel object 791 * @iov: the array of memory regions to read data to 792 * @niov: the length of the @iov array 793 * @fds: an array of file handles to read 794 * @nfds: number of file handles in @fds 795 * @errp: pointer to a NULL-initialized error object 796 * 797 * 798 * Performs same function as qio_channel_readv_all_eof. 799 * Additionally, attempts to read file descriptors shared 800 * over the channel. The function will wait for all 801 * requested data to be read, yielding from the current 802 * coroutine if required. data refers to both file 803 * descriptors and the iovs. 804 * 805 * Returns: 1 if all bytes were read, 0 if end-of-file 806 * occurs without data, or -1 on error 807 */ 808 809 int qio_channel_readv_full_all_eof(QIOChannel *ioc, 810 const struct iovec *iov, 811 size_t niov, 812 int **fds, size_t *nfds, 813 Error **errp); 814 815 /** 816 * qio_channel_readv_full_all: 817 * @ioc: the channel object 818 * @iov: the array of memory regions to read data to 819 * @niov: the length of the @iov array 820 * @fds: an array of file handles to read 821 * @nfds: number of file handles in @fds 822 * @errp: pointer to a NULL-initialized error object 823 * 824 * 825 * Performs same function as qio_channel_readv_all_eof. 826 * Additionally, attempts to read file descriptors shared 827 * over the channel. The function will wait for all 828 * requested data to be read, yielding from the current 829 * coroutine if required. data refers to both file 830 * descriptors and the iovs. 831 * 832 * Returns: 0 if all bytes were read, or -1 on error 833 */ 834 835 int qio_channel_readv_full_all(QIOChannel *ioc, 836 const struct iovec *iov, 837 size_t niov, 838 int **fds, size_t *nfds, 839 Error **errp); 840 841 /** 842 * qio_channel_writev_full_all: 843 * @ioc: the channel object 844 * @iov: the array of memory regions to write data from 845 * @niov: the length of the @iov array 846 * @fds: an array of file handles to send 847 * @nfds: number of file handles in @fds 848 * @flags: write flags (QIO_CHANNEL_WRITE_FLAG_*) 849 * @errp: pointer to a NULL-initialized error object 850 * 851 * 852 * Behaves like qio_channel_writev_full but will attempt 853 * to send all data passed (file handles and memory regions). 854 * The function will wait for all requested data 855 * to be written, yielding from the current coroutine 856 * if required. 857 * 858 * If QIO_CHANNEL_WRITE_FLAG_ZERO_COPY is passed in flags, 859 * instead of waiting for all requested data to be written, 860 * this function will wait until it's all queued for writing. 861 * In this case, if the buffer gets changed between queueing and 862 * sending, the updated buffer will be sent. If this is not a 863 * desired behavior, it's suggested to call qio_channel_flush() 864 * before reusing the buffer. 865 * 866 * Returns: 0 if all bytes were written, or -1 on error 867 */ 868 869 int qio_channel_writev_full_all(QIOChannel *ioc, 870 const struct iovec *iov, 871 size_t niov, 872 int *fds, size_t nfds, 873 int flags, Error **errp); 874 875 /** 876 * qio_channel_flush: 877 * @ioc: the channel object 878 * @errp: pointer to a NULL-initialized error object 879 * 880 * Will block until every packet queued with 881 * qio_channel_writev_full() + QIO_CHANNEL_WRITE_FLAG_ZERO_COPY 882 * is sent, or return in case of any error. 883 * 884 * If not implemented, acts as a no-op, and returns 0. 885 * 886 * Returns -1 if any error is found, 887 * 1 if every send failed to use zero copy. 888 * 0 otherwise. 889 */ 890 891 int qio_channel_flush(QIOChannel *ioc, 892 Error **errp); 893 894 #endif /* QIO_CHANNEL_H */ 895