1 /* 2 * QEMU I/O channels 3 * 4 * Copyright (c) 2015 Red Hat, Inc. 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2 of the License, or (at your option) any later version. 10 * 11 * This library is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18 * 19 */ 20 21 #ifndef QIO_CHANNEL_H 22 #define QIO_CHANNEL_H 23 24 #include "qemu-common.h" 25 #include "qom/object.h" 26 #include "qemu/coroutine.h" 27 #include "block/aio.h" 28 29 #define TYPE_QIO_CHANNEL "qio-channel" 30 #define QIO_CHANNEL(obj) \ 31 OBJECT_CHECK(QIOChannel, (obj), TYPE_QIO_CHANNEL) 32 #define QIO_CHANNEL_CLASS(klass) \ 33 OBJECT_CLASS_CHECK(QIOChannelClass, klass, TYPE_QIO_CHANNEL) 34 #define QIO_CHANNEL_GET_CLASS(obj) \ 35 OBJECT_GET_CLASS(QIOChannelClass, obj, TYPE_QIO_CHANNEL) 36 37 typedef struct QIOChannel QIOChannel; 38 typedef struct QIOChannelClass QIOChannelClass; 39 40 #define QIO_CHANNEL_ERR_BLOCK -2 41 42 typedef enum QIOChannelFeature QIOChannelFeature; 43 44 enum QIOChannelFeature { 45 QIO_CHANNEL_FEATURE_FD_PASS, 46 QIO_CHANNEL_FEATURE_SHUTDOWN, 47 QIO_CHANNEL_FEATURE_LISTEN, 48 }; 49 50 51 typedef enum QIOChannelShutdown QIOChannelShutdown; 52 53 enum QIOChannelShutdown { 54 QIO_CHANNEL_SHUTDOWN_BOTH, 55 QIO_CHANNEL_SHUTDOWN_READ, 56 QIO_CHANNEL_SHUTDOWN_WRITE, 57 }; 58 59 typedef gboolean (*QIOChannelFunc)(QIOChannel *ioc, 60 GIOCondition condition, 61 gpointer data); 62 63 /** 64 * QIOChannel: 65 * 66 * The QIOChannel defines the core API for a generic I/O channel 67 * class hierarchy. It is inspired by GIOChannel, but has the 68 * following differences 69 * 70 * - Use QOM to properly support arbitrary subclassing 71 * - Support use of iovecs for efficient I/O with multiple blocks 72 * - None of the character set translation, binary data exclusively 73 * - Direct support for QEMU Error object reporting 74 * - File descriptor passing 75 * 76 * This base class is abstract so cannot be instantiated. There 77 * will be subclasses for dealing with sockets, files, and higher 78 * level protocols such as TLS, WebSocket, etc. 79 */ 80 81 struct QIOChannel { 82 Object parent; 83 unsigned int features; /* bitmask of QIOChannelFeatures */ 84 char *name; 85 AioContext *ctx; 86 Coroutine *read_coroutine; 87 Coroutine *write_coroutine; 88 #ifdef _WIN32 89 HANDLE event; /* For use with GSource on Win32 */ 90 #endif 91 }; 92 93 /** 94 * QIOChannelClass: 95 * 96 * This class defines the contract that all subclasses 97 * must follow to provide specific channel implementations. 98 * The first five callbacks are mandatory to support, others 99 * provide additional optional features. 100 * 101 * Consult the corresponding public API docs for a description 102 * of the semantics of each callback 103 */ 104 struct QIOChannelClass { 105 ObjectClass parent; 106 107 /* Mandatory callbacks */ 108 ssize_t (*io_writev)(QIOChannel *ioc, 109 const struct iovec *iov, 110 size_t niov, 111 int *fds, 112 size_t nfds, 113 Error **errp); 114 ssize_t (*io_readv)(QIOChannel *ioc, 115 const struct iovec *iov, 116 size_t niov, 117 int **fds, 118 size_t *nfds, 119 Error **errp); 120 int (*io_close)(QIOChannel *ioc, 121 Error **errp); 122 GSource * (*io_create_watch)(QIOChannel *ioc, 123 GIOCondition condition); 124 int (*io_set_blocking)(QIOChannel *ioc, 125 bool enabled, 126 Error **errp); 127 128 /* Optional callbacks */ 129 int (*io_shutdown)(QIOChannel *ioc, 130 QIOChannelShutdown how, 131 Error **errp); 132 void (*io_set_cork)(QIOChannel *ioc, 133 bool enabled); 134 void (*io_set_delay)(QIOChannel *ioc, 135 bool enabled); 136 off_t (*io_seek)(QIOChannel *ioc, 137 off_t offset, 138 int whence, 139 Error **errp); 140 void (*io_set_aio_fd_handler)(QIOChannel *ioc, 141 AioContext *ctx, 142 IOHandler *io_read, 143 IOHandler *io_write, 144 void *opaque); 145 }; 146 147 /* General I/O handling functions */ 148 149 /** 150 * qio_channel_has_feature: 151 * @ioc: the channel object 152 * @feature: the feature to check support of 153 * 154 * Determine whether the channel implementation supports 155 * the optional feature named in @feature. 156 * 157 * Returns: true if supported, false otherwise. 158 */ 159 bool qio_channel_has_feature(QIOChannel *ioc, 160 QIOChannelFeature feature); 161 162 /** 163 * qio_channel_set_feature: 164 * @ioc: the channel object 165 * @feature: the feature to set support for 166 * 167 * Add channel support for the feature named in @feature. 168 */ 169 void qio_channel_set_feature(QIOChannel *ioc, 170 QIOChannelFeature feature); 171 172 /** 173 * qio_channel_set_name: 174 * @ioc: the channel object 175 * @name: the name of the channel 176 * 177 * Sets the name of the channel, which serves as an aid 178 * to debugging. The name is used when creating GSource 179 * watches for this channel. 180 */ 181 void qio_channel_set_name(QIOChannel *ioc, 182 const char *name); 183 184 /** 185 * qio_channel_readv_full: 186 * @ioc: the channel object 187 * @iov: the array of memory regions to read data into 188 * @niov: the length of the @iov array 189 * @fds: pointer to an array that will received file handles 190 * @nfds: pointer filled with number of elements in @fds on return 191 * @errp: pointer to a NULL-initialized error object 192 * 193 * Read data from the IO channel, storing it in the 194 * memory regions referenced by @iov. Each element 195 * in the @iov will be fully populated with data 196 * before the next one is used. The @niov parameter 197 * specifies the total number of elements in @iov. 198 * 199 * It is not required for all @iov to be filled with 200 * data. If the channel is in blocking mode, at least 201 * one byte of data will be read, but no more is 202 * guaranteed. If the channel is non-blocking and no 203 * data is available, it will return QIO_CHANNEL_ERR_BLOCK 204 * 205 * If the channel has passed any file descriptors, 206 * the @fds array pointer will be allocated and 207 * the elements filled with the received file 208 * descriptors. The @nfds pointer will be updated 209 * to indicate the size of the @fds array that 210 * was allocated. It is the callers responsibility 211 * to call close() on each file descriptor and to 212 * call g_free() on the array pointer in @fds. 213 * 214 * It is an error to pass a non-NULL @fds parameter 215 * unless qio_channel_has_feature() returns a true 216 * value for the QIO_CHANNEL_FEATURE_FD_PASS constant. 217 * 218 * Returns: the number of bytes read, or -1 on error, 219 * or QIO_CHANNEL_ERR_BLOCK if no data is available 220 * and the channel is non-blocking 221 */ 222 ssize_t qio_channel_readv_full(QIOChannel *ioc, 223 const struct iovec *iov, 224 size_t niov, 225 int **fds, 226 size_t *nfds, 227 Error **errp); 228 229 230 /** 231 * qio_channel_writev_full: 232 * @ioc: the channel object 233 * @iov: the array of memory regions to write data from 234 * @niov: the length of the @iov array 235 * @fds: an array of file handles to send 236 * @nfds: number of file handles in @fds 237 * @errp: pointer to a NULL-initialized error object 238 * 239 * Write data to the IO channel, reading it from the 240 * memory regions referenced by @iov. Each element 241 * in the @iov will be fully sent, before the next 242 * one is used. The @niov parameter specifies the 243 * total number of elements in @iov. 244 * 245 * It is not required for all @iov data to be fully 246 * sent. If the channel is in blocking mode, at least 247 * one byte of data will be sent, but no more is 248 * guaranteed. If the channel is non-blocking and no 249 * data can be sent, it will return QIO_CHANNEL_ERR_BLOCK 250 * 251 * If there are file descriptors to send, the @fds 252 * array should be non-NULL and provide the handles. 253 * All file descriptors will be sent if at least one 254 * byte of data was sent. 255 * 256 * It is an error to pass a non-NULL @fds parameter 257 * unless qio_channel_has_feature() returns a true 258 * value for the QIO_CHANNEL_FEATURE_FD_PASS constant. 259 * 260 * Returns: the number of bytes sent, or -1 on error, 261 * or QIO_CHANNEL_ERR_BLOCK if no data is can be sent 262 * and the channel is non-blocking 263 */ 264 ssize_t qio_channel_writev_full(QIOChannel *ioc, 265 const struct iovec *iov, 266 size_t niov, 267 int *fds, 268 size_t nfds, 269 Error **errp); 270 271 /** 272 * qio_channel_readv_all: 273 * @ioc: the channel object 274 * @iov: the array of memory regions to read data into 275 * @niov: the length of the @iov array 276 * @errp: pointer to a NULL-initialized error object 277 * 278 * Read data from the IO channel, storing it in the 279 * memory regions referenced by @iov. Each element 280 * in the @iov will be fully populated with data 281 * before the next one is used. The @niov parameter 282 * specifies the total number of elements in @iov. 283 * 284 * The function will wait for all requested data 285 * to be read, yielding from the current coroutine 286 * if required. 287 * 288 * If end-of-file occurs before all requested data 289 * has been read, an error will be reported. 290 * 291 * Returns: 0 if all bytes were read, or -1 on error 292 */ 293 int qio_channel_readv_all(QIOChannel *ioc, 294 const struct iovec *iov, 295 size_t niov, 296 Error **errp); 297 298 299 /** 300 * qio_channel_writev_all: 301 * @ioc: the channel object 302 * @iov: the array of memory regions to write data from 303 * @niov: the length of the @iov array 304 * @errp: pointer to a NULL-initialized error object 305 * 306 * Write data to the IO channel, reading it from the 307 * memory regions referenced by @iov. Each element 308 * in the @iov will be fully sent, before the next 309 * one is used. The @niov parameter specifies the 310 * total number of elements in @iov. 311 * 312 * The function will wait for all requested data 313 * to be written, yielding from the current coroutine 314 * if required. 315 * 316 * Returns: 0 if all bytes were written, or -1 on error 317 */ 318 int qio_channel_writev_all(QIOChannel *ioc, 319 const struct iovec *iov, 320 size_t niov, 321 Error **erp); 322 323 /** 324 * qio_channel_readv: 325 * @ioc: the channel object 326 * @iov: the array of memory regions to read data into 327 * @niov: the length of the @iov array 328 * @errp: pointer to a NULL-initialized error object 329 * 330 * Behaves as qio_channel_readv_full() but does not support 331 * receiving of file handles. 332 */ 333 ssize_t qio_channel_readv(QIOChannel *ioc, 334 const struct iovec *iov, 335 size_t niov, 336 Error **errp); 337 338 /** 339 * qio_channel_writev: 340 * @ioc: the channel object 341 * @iov: the array of memory regions to write data from 342 * @niov: the length of the @iov array 343 * @errp: pointer to a NULL-initialized error object 344 * 345 * Behaves as qio_channel_writev_full() but does not support 346 * sending of file handles. 347 */ 348 ssize_t qio_channel_writev(QIOChannel *ioc, 349 const struct iovec *iov, 350 size_t niov, 351 Error **errp); 352 353 /** 354 * qio_channel_read: 355 * @ioc: the channel object 356 * @buf: the memory region to read data into 357 * @buflen: the length of @buf 358 * @errp: pointer to a NULL-initialized error object 359 * 360 * Behaves as qio_channel_readv_full() but does not support 361 * receiving of file handles, and only supports reading into 362 * a single memory region. 363 */ 364 ssize_t qio_channel_read(QIOChannel *ioc, 365 char *buf, 366 size_t buflen, 367 Error **errp); 368 369 /** 370 * qio_channel_write: 371 * @ioc: the channel object 372 * @buf: the memory regions to send data from 373 * @buflen: the length of @buf 374 * @errp: pointer to a NULL-initialized error object 375 * 376 * Behaves as qio_channel_writev_full() but does not support 377 * sending of file handles, and only supports writing from a 378 * single memory region. 379 */ 380 ssize_t qio_channel_write(QIOChannel *ioc, 381 const char *buf, 382 size_t buflen, 383 Error **errp); 384 385 /** 386 * qio_channel_read_all: 387 * @ioc: the channel object 388 * @buf: the memory region to read data into 389 * @buflen: the number of bytes to @buf 390 * @errp: pointer to a NULL-initialized error object 391 * 392 * Reads @buflen bytes into @buf, possibly blocking or (if the 393 * channel is non-blocking) yielding from the current coroutine 394 * multiple times until the entire content is read. If end-of-file 395 * occurs it will return an error rather than a short-read. Otherwise 396 * behaves as qio_channel_read(). 397 * 398 * Returns: 0 if all bytes were read, or -1 on error 399 */ 400 int qio_channel_read_all(QIOChannel *ioc, 401 char *buf, 402 size_t buflen, 403 Error **errp); 404 /** 405 * qio_channel_write_all: 406 * @ioc: the channel object 407 * @buf: the memory region to write data into 408 * @buflen: the number of bytes to @buf 409 * @errp: pointer to a NULL-initialized error object 410 * 411 * Writes @buflen bytes from @buf, possibly blocking or (if the 412 * channel is non-blocking) yielding from the current coroutine 413 * multiple times until the entire content is written. Otherwise 414 * behaves as qio_channel_write(). 415 * 416 * Returns: 0 if all bytes were written, or -1 on error 417 */ 418 int qio_channel_write_all(QIOChannel *ioc, 419 const char *buf, 420 size_t buflen, 421 Error **errp); 422 423 /** 424 * qio_channel_set_blocking: 425 * @ioc: the channel object 426 * @enabled: the blocking flag state 427 * @errp: pointer to a NULL-initialized error object 428 * 429 * If @enabled is true, then the channel is put into 430 * blocking mode, otherwise it will be non-blocking. 431 * 432 * In non-blocking mode, read/write operations may 433 * return QIO_CHANNEL_ERR_BLOCK if they would otherwise 434 * block on I/O 435 */ 436 int qio_channel_set_blocking(QIOChannel *ioc, 437 bool enabled, 438 Error **errp); 439 440 /** 441 * qio_channel_close: 442 * @ioc: the channel object 443 * @errp: pointer to a NULL-initialized error object 444 * 445 * Close the channel, flushing any pending I/O 446 * 447 * Returns: 0 on success, -1 on error 448 */ 449 int qio_channel_close(QIOChannel *ioc, 450 Error **errp); 451 452 /** 453 * qio_channel_shutdown: 454 * @ioc: the channel object 455 * @how: the direction to shutdown 456 * @errp: pointer to a NULL-initialized error object 457 * 458 * Shutdowns transmission and/or receiving of data 459 * without closing the underlying transport. 460 * 461 * Not all implementations will support this facility, 462 * so may report an error. To avoid errors, the 463 * caller may check for the feature flag 464 * QIO_CHANNEL_FEATURE_SHUTDOWN prior to calling 465 * this method. 466 * 467 * Returns: 0 on success, -1 on error 468 */ 469 int qio_channel_shutdown(QIOChannel *ioc, 470 QIOChannelShutdown how, 471 Error **errp); 472 473 /** 474 * qio_channel_set_delay: 475 * @ioc: the channel object 476 * @enabled: the new flag state 477 * 478 * Controls whether the underlying transport is 479 * permitted to delay writes in order to merge 480 * small packets. If @enabled is true, then the 481 * writes may be delayed in order to opportunistically 482 * merge small packets into larger ones. If @enabled 483 * is false, writes are dispatched immediately with 484 * no delay. 485 * 486 * When @enabled is false, applications may wish to 487 * use the qio_channel_set_cork() method to explicitly 488 * control write merging. 489 * 490 * On channels which are backed by a socket, this 491 * API corresponds to the inverse of TCP_NODELAY flag, 492 * controlling whether the Nagle algorithm is active. 493 * 494 * This setting is merely a hint, so implementations are 495 * free to ignore this without it being considered an 496 * error. 497 */ 498 void qio_channel_set_delay(QIOChannel *ioc, 499 bool enabled); 500 501 /** 502 * qio_channel_set_cork: 503 * @ioc: the channel object 504 * @enabled: the new flag state 505 * 506 * Controls whether the underlying transport is 507 * permitted to dispatch data that is written. 508 * If @enabled is true, then any data written will 509 * be queued in local buffers until @enabled is 510 * set to false once again. 511 * 512 * This feature is typically used when the automatic 513 * write coalescing facility is disabled via the 514 * qio_channel_set_delay() method. 515 * 516 * On channels which are backed by a socket, this 517 * API corresponds to the TCP_CORK flag. 518 * 519 * This setting is merely a hint, so implementations are 520 * free to ignore this without it being considered an 521 * error. 522 */ 523 void qio_channel_set_cork(QIOChannel *ioc, 524 bool enabled); 525 526 527 /** 528 * qio_channel_seek: 529 * @ioc: the channel object 530 * @offset: the position to seek to, relative to @whence 531 * @whence: one of the (POSIX) SEEK_* constants listed below 532 * @errp: pointer to a NULL-initialized error object 533 * 534 * Moves the current I/O position within the channel 535 * @ioc, to be @offset. The value of @offset is 536 * interpreted relative to @whence: 537 * 538 * SEEK_SET - the position is set to @offset bytes 539 * SEEK_CUR - the position is moved by @offset bytes 540 * SEEK_END - the position is set to end of the file plus @offset bytes 541 * 542 * Not all implementations will support this facility, 543 * so may report an error. 544 * 545 * Returns: the new position on success, (off_t)-1 on failure 546 */ 547 off_t qio_channel_io_seek(QIOChannel *ioc, 548 off_t offset, 549 int whence, 550 Error **errp); 551 552 553 /** 554 * qio_channel_create_watch: 555 * @ioc: the channel object 556 * @condition: the I/O condition to monitor 557 * 558 * Create a new main loop source that is used to watch 559 * for the I/O condition @condition. Typically the 560 * qio_channel_add_watch() method would be used instead 561 * of this, since it directly attaches a callback to 562 * the source 563 * 564 * Returns: the new main loop source. 565 */ 566 GSource *qio_channel_create_watch(QIOChannel *ioc, 567 GIOCondition condition); 568 569 /** 570 * qio_channel_add_watch: 571 * @ioc: the channel object 572 * @condition: the I/O condition to monitor 573 * @func: callback to invoke when the source becomes ready 574 * @user_data: opaque data to pass to @func 575 * @notify: callback to free @user_data 576 * 577 * Create a new main loop source that is used to watch 578 * for the I/O condition @condition. The callback @func 579 * will be registered against the source, to be invoked 580 * when the source becomes ready. The optional @user_data 581 * will be passed to @func when it is invoked. The @notify 582 * callback will be used to free @user_data when the 583 * watch is deleted 584 * 585 * The returned source ID can be used with g_source_remove() 586 * to remove and free the source when no longer required. 587 * Alternatively the @func callback can return a FALSE 588 * value. 589 * 590 * Returns: the source ID 591 */ 592 guint qio_channel_add_watch(QIOChannel *ioc, 593 GIOCondition condition, 594 QIOChannelFunc func, 595 gpointer user_data, 596 GDestroyNotify notify); 597 598 599 /** 600 * qio_channel_attach_aio_context: 601 * @ioc: the channel object 602 * @ctx: the #AioContext to set the handlers on 603 * 604 * Request that qio_channel_yield() sets I/O handlers on 605 * the given #AioContext. If @ctx is %NULL, qio_channel_yield() 606 * uses QEMU's main thread event loop. 607 * 608 * You can move a #QIOChannel from one #AioContext to another even if 609 * I/O handlers are set for a coroutine. However, #QIOChannel provides 610 * no synchronization between the calls to qio_channel_yield() and 611 * qio_channel_attach_aio_context(). 612 * 613 * Therefore you should first call qio_channel_detach_aio_context() 614 * to ensure that the coroutine is not entered concurrently. Then, 615 * while the coroutine has yielded, call qio_channel_attach_aio_context(), 616 * and then aio_co_schedule() to place the coroutine on the new 617 * #AioContext. The calls to qio_channel_detach_aio_context() 618 * and qio_channel_attach_aio_context() should be protected with 619 * aio_context_acquire() and aio_context_release(). 620 */ 621 void qio_channel_attach_aio_context(QIOChannel *ioc, 622 AioContext *ctx); 623 624 /** 625 * qio_channel_detach_aio_context: 626 * @ioc: the channel object 627 * 628 * Disable any I/O handlers set by qio_channel_yield(). With the 629 * help of aio_co_schedule(), this allows moving a coroutine that was 630 * paused by qio_channel_yield() to another context. 631 */ 632 void qio_channel_detach_aio_context(QIOChannel *ioc); 633 634 /** 635 * qio_channel_yield: 636 * @ioc: the channel object 637 * @condition: the I/O condition to wait for 638 * 639 * Yields execution from the current coroutine until the condition 640 * indicated by @condition becomes available. @condition must 641 * be either %G_IO_IN or %G_IO_OUT; it cannot contain both. In 642 * addition, no two coroutine can be waiting on the same condition 643 * and channel at the same time. 644 * 645 * This must only be called from coroutine context 646 */ 647 void qio_channel_yield(QIOChannel *ioc, 648 GIOCondition condition); 649 650 /** 651 * qio_channel_wait: 652 * @ioc: the channel object 653 * @condition: the I/O condition to wait for 654 * 655 * Block execution from the current thread until 656 * the condition indicated by @condition becomes 657 * available. 658 * 659 * This will enter a nested event loop to perform 660 * the wait. 661 */ 662 void qio_channel_wait(QIOChannel *ioc, 663 GIOCondition condition); 664 665 /** 666 * qio_channel_set_aio_fd_handler: 667 * @ioc: the channel object 668 * @ctx: the AioContext to set the handlers on 669 * @io_read: the read handler 670 * @io_write: the write handler 671 * @opaque: the opaque value passed to the handler 672 * 673 * This is used internally by qio_channel_yield(). It can 674 * be used by channel implementations to forward the handlers 675 * to another channel (e.g. from #QIOChannelTLS to the 676 * underlying socket). 677 */ 678 void qio_channel_set_aio_fd_handler(QIOChannel *ioc, 679 AioContext *ctx, 680 IOHandler *io_read, 681 IOHandler *io_write, 682 void *opaque); 683 684 #endif /* QIO_CHANNEL_H */ 685