1 /* 2 * QEMU Block driver for NBD 3 * 4 * Copyright (c) 2019 Virtuozzo International GmbH. 5 * Copyright (C) 2016 Red Hat, Inc. 6 * Copyright (C) 2008 Bull S.A.S. 7 * Author: Laurent Vivier <Laurent.Vivier@bull.net> 8 * 9 * Some parts: 10 * Copyright (C) 2007 Anthony Liguori <anthony@codemonkey.ws> 11 * 12 * Permission is hereby granted, free of charge, to any person obtaining a copy 13 * of this software and associated documentation files (the "Software"), to deal 14 * in the Software without restriction, including without limitation the rights 15 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 16 * copies of the Software, and to permit persons to whom the Software is 17 * furnished to do so, subject to the following conditions: 18 * 19 * The above copyright notice and this permission notice shall be included in 20 * all copies or substantial portions of the Software. 21 * 22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 23 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 24 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 25 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 26 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 27 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 28 * THE SOFTWARE. 29 */ 30 31 #include "qemu/osdep.h" 32 33 #include "trace.h" 34 #include "qemu/uri.h" 35 #include "qemu/option.h" 36 #include "qemu/cutils.h" 37 #include "qemu/main-loop.h" 38 #include "qemu/atomic.h" 39 40 #include "qapi/qapi-visit-sockets.h" 41 #include "qapi/qmp/qstring.h" 42 #include "qapi/clone-visitor.h" 43 44 #include "block/qdict.h" 45 #include "block/nbd.h" 46 #include "block/block_int.h" 47 #include "block/coroutines.h" 48 49 #include "qemu/yank.h" 50 51 #define EN_OPTSTR ":exportname=" 52 #define MAX_NBD_REQUESTS 16 53 54 #define HANDLE_TO_INDEX(bs, handle) ((handle) ^ (uint64_t)(intptr_t)(bs)) 55 #define INDEX_TO_HANDLE(bs, index) ((index) ^ (uint64_t)(intptr_t)(bs)) 56 57 typedef struct { 58 Coroutine *coroutine; 59 uint64_t offset; /* original offset of the request */ 60 bool receiving; /* sleeping in the yield in nbd_receive_replies */ 61 bool reply_possible; /* reply header not yet received */ 62 } NBDClientRequest; 63 64 typedef enum NBDClientState { 65 NBD_CLIENT_CONNECTING_WAIT, 66 NBD_CLIENT_CONNECTING_NOWAIT, 67 NBD_CLIENT_CONNECTED, 68 NBD_CLIENT_QUIT 69 } NBDClientState; 70 71 typedef struct BDRVNBDState { 72 QIOChannel *ioc; /* The current I/O channel */ 73 NBDExportInfo info; 74 75 CoMutex send_mutex; 76 CoQueue free_sema; 77 78 CoMutex receive_mutex; 79 int in_flight; 80 NBDClientState state; 81 82 QEMUTimer *reconnect_delay_timer; 83 84 NBDClientRequest requests[MAX_NBD_REQUESTS]; 85 NBDReply reply; 86 BlockDriverState *bs; 87 88 /* Connection parameters */ 89 uint32_t reconnect_delay; 90 SocketAddress *saddr; 91 char *export, *tlscredsid; 92 QCryptoTLSCreds *tlscreds; 93 const char *hostname; 94 char *x_dirty_bitmap; 95 bool alloc_depth; 96 97 NBDClientConnection *conn; 98 } BDRVNBDState; 99 100 static void nbd_yank(void *opaque); 101 102 static void nbd_clear_bdrvstate(BlockDriverState *bs) 103 { 104 BDRVNBDState *s = (BDRVNBDState *)bs->opaque; 105 106 nbd_client_connection_release(s->conn); 107 s->conn = NULL; 108 109 yank_unregister_instance(BLOCKDEV_YANK_INSTANCE(bs->node_name)); 110 111 object_unref(OBJECT(s->tlscreds)); 112 qapi_free_SocketAddress(s->saddr); 113 s->saddr = NULL; 114 g_free(s->export); 115 s->export = NULL; 116 g_free(s->tlscredsid); 117 s->tlscredsid = NULL; 118 g_free(s->x_dirty_bitmap); 119 s->x_dirty_bitmap = NULL; 120 } 121 122 static bool nbd_client_connected(BDRVNBDState *s) 123 { 124 return qatomic_load_acquire(&s->state) == NBD_CLIENT_CONNECTED; 125 } 126 127 static bool nbd_recv_coroutine_wake_one(NBDClientRequest *req) 128 { 129 if (req->receiving) { 130 req->receiving = false; 131 aio_co_wake(req->coroutine); 132 return true; 133 } 134 135 return false; 136 } 137 138 static void nbd_recv_coroutines_wake(BDRVNBDState *s, bool all) 139 { 140 int i; 141 142 for (i = 0; i < MAX_NBD_REQUESTS; i++) { 143 if (nbd_recv_coroutine_wake_one(&s->requests[i]) && !all) { 144 return; 145 } 146 } 147 } 148 149 static void nbd_channel_error(BDRVNBDState *s, int ret) 150 { 151 if (nbd_client_connected(s)) { 152 qio_channel_shutdown(s->ioc, QIO_CHANNEL_SHUTDOWN_BOTH, NULL); 153 } 154 155 if (ret == -EIO) { 156 if (nbd_client_connected(s)) { 157 s->state = s->reconnect_delay ? NBD_CLIENT_CONNECTING_WAIT : 158 NBD_CLIENT_CONNECTING_NOWAIT; 159 } 160 } else { 161 s->state = NBD_CLIENT_QUIT; 162 } 163 164 nbd_recv_coroutines_wake(s, true); 165 } 166 167 static void reconnect_delay_timer_del(BDRVNBDState *s) 168 { 169 if (s->reconnect_delay_timer) { 170 timer_free(s->reconnect_delay_timer); 171 s->reconnect_delay_timer = NULL; 172 } 173 } 174 175 static void reconnect_delay_timer_cb(void *opaque) 176 { 177 BDRVNBDState *s = opaque; 178 179 if (qatomic_load_acquire(&s->state) == NBD_CLIENT_CONNECTING_WAIT) { 180 s->state = NBD_CLIENT_CONNECTING_NOWAIT; 181 nbd_co_establish_connection_cancel(s->conn); 182 while (qemu_co_enter_next(&s->free_sema, NULL)) { 183 /* Resume all queued requests */ 184 } 185 } 186 187 reconnect_delay_timer_del(s); 188 } 189 190 static void reconnect_delay_timer_init(BDRVNBDState *s, uint64_t expire_time_ns) 191 { 192 if (qatomic_load_acquire(&s->state) != NBD_CLIENT_CONNECTING_WAIT) { 193 return; 194 } 195 196 assert(!s->reconnect_delay_timer); 197 s->reconnect_delay_timer = aio_timer_new(bdrv_get_aio_context(s->bs), 198 QEMU_CLOCK_REALTIME, 199 SCALE_NS, 200 reconnect_delay_timer_cb, s); 201 timer_mod(s->reconnect_delay_timer, expire_time_ns); 202 } 203 204 static void nbd_teardown_connection(BlockDriverState *bs) 205 { 206 BDRVNBDState *s = (BDRVNBDState *)bs->opaque; 207 208 assert(!s->in_flight); 209 210 if (s->ioc) { 211 qio_channel_shutdown(s->ioc, QIO_CHANNEL_SHUTDOWN_BOTH, NULL); 212 yank_unregister_function(BLOCKDEV_YANK_INSTANCE(s->bs->node_name), 213 nbd_yank, s->bs); 214 object_unref(OBJECT(s->ioc)); 215 s->ioc = NULL; 216 } 217 218 s->state = NBD_CLIENT_QUIT; 219 } 220 221 static bool nbd_client_connecting(BDRVNBDState *s) 222 { 223 NBDClientState state = qatomic_load_acquire(&s->state); 224 return state == NBD_CLIENT_CONNECTING_WAIT || 225 state == NBD_CLIENT_CONNECTING_NOWAIT; 226 } 227 228 static bool nbd_client_connecting_wait(BDRVNBDState *s) 229 { 230 return qatomic_load_acquire(&s->state) == NBD_CLIENT_CONNECTING_WAIT; 231 } 232 233 /* 234 * Update @bs with information learned during a completed negotiation process. 235 * Return failure if the server's advertised options are incompatible with the 236 * client's needs. 237 */ 238 static int nbd_handle_updated_info(BlockDriverState *bs, Error **errp) 239 { 240 BDRVNBDState *s = (BDRVNBDState *)bs->opaque; 241 int ret; 242 243 if (s->x_dirty_bitmap) { 244 if (!s->info.base_allocation) { 245 error_setg(errp, "requested x-dirty-bitmap %s not found", 246 s->x_dirty_bitmap); 247 return -EINVAL; 248 } 249 if (strcmp(s->x_dirty_bitmap, "qemu:allocation-depth") == 0) { 250 s->alloc_depth = true; 251 } 252 } 253 254 if (s->info.flags & NBD_FLAG_READ_ONLY) { 255 ret = bdrv_apply_auto_read_only(bs, "NBD export is read-only", errp); 256 if (ret < 0) { 257 return ret; 258 } 259 } 260 261 if (s->info.flags & NBD_FLAG_SEND_FUA) { 262 bs->supported_write_flags = BDRV_REQ_FUA; 263 bs->supported_zero_flags |= BDRV_REQ_FUA; 264 } 265 266 if (s->info.flags & NBD_FLAG_SEND_WRITE_ZEROES) { 267 bs->supported_zero_flags |= BDRV_REQ_MAY_UNMAP; 268 if (s->info.flags & NBD_FLAG_SEND_FAST_ZERO) { 269 bs->supported_zero_flags |= BDRV_REQ_NO_FALLBACK; 270 } 271 } 272 273 trace_nbd_client_handshake_success(s->export); 274 275 return 0; 276 } 277 278 int coroutine_fn nbd_co_do_establish_connection(BlockDriverState *bs, 279 Error **errp) 280 { 281 BDRVNBDState *s = (BDRVNBDState *)bs->opaque; 282 int ret; 283 bool blocking = nbd_client_connecting_wait(s); 284 285 assert(!s->ioc); 286 287 s->ioc = nbd_co_establish_connection(s->conn, &s->info, blocking, errp); 288 if (!s->ioc) { 289 return -ECONNREFUSED; 290 } 291 292 yank_register_function(BLOCKDEV_YANK_INSTANCE(s->bs->node_name), nbd_yank, 293 bs); 294 295 ret = nbd_handle_updated_info(s->bs, NULL); 296 if (ret < 0) { 297 /* 298 * We have connected, but must fail for other reasons. 299 * Send NBD_CMD_DISC as a courtesy to the server. 300 */ 301 NBDRequest request = { .type = NBD_CMD_DISC }; 302 303 nbd_send_request(s->ioc, &request); 304 305 yank_unregister_function(BLOCKDEV_YANK_INSTANCE(s->bs->node_name), 306 nbd_yank, bs); 307 object_unref(OBJECT(s->ioc)); 308 s->ioc = NULL; 309 310 return ret; 311 } 312 313 qio_channel_set_blocking(s->ioc, false, NULL); 314 qio_channel_attach_aio_context(s->ioc, bdrv_get_aio_context(bs)); 315 316 /* successfully connected */ 317 s->state = NBD_CLIENT_CONNECTED; 318 qemu_co_queue_restart_all(&s->free_sema); 319 320 return 0; 321 } 322 323 /* called under s->send_mutex */ 324 static coroutine_fn void nbd_reconnect_attempt(BDRVNBDState *s) 325 { 326 assert(nbd_client_connecting(s)); 327 assert(s->in_flight == 0); 328 329 if (nbd_client_connecting_wait(s) && s->reconnect_delay && 330 !s->reconnect_delay_timer) 331 { 332 /* 333 * It's first reconnect attempt after switching to 334 * NBD_CLIENT_CONNECTING_WAIT 335 */ 336 reconnect_delay_timer_init(s, 337 qemu_clock_get_ns(QEMU_CLOCK_REALTIME) + 338 s->reconnect_delay * NANOSECONDS_PER_SECOND); 339 } 340 341 /* 342 * Now we are sure that nobody is accessing the channel, and no one will 343 * try until we set the state to CONNECTED. 344 */ 345 346 /* Finalize previous connection if any */ 347 if (s->ioc) { 348 qio_channel_detach_aio_context(QIO_CHANNEL(s->ioc)); 349 yank_unregister_function(BLOCKDEV_YANK_INSTANCE(s->bs->node_name), 350 nbd_yank, s->bs); 351 object_unref(OBJECT(s->ioc)); 352 s->ioc = NULL; 353 } 354 355 nbd_co_do_establish_connection(s->bs, NULL); 356 } 357 358 static coroutine_fn int nbd_receive_replies(BDRVNBDState *s, uint64_t handle) 359 { 360 int ret; 361 uint64_t ind = HANDLE_TO_INDEX(s, handle), ind2; 362 QEMU_LOCK_GUARD(&s->receive_mutex); 363 364 while (true) { 365 if (s->reply.handle == handle) { 366 /* We are done */ 367 return 0; 368 } 369 370 if (!nbd_client_connected(s)) { 371 return -EIO; 372 } 373 374 if (s->reply.handle != 0) { 375 /* 376 * Some other request is being handled now. It should already be 377 * woken by whoever set s->reply.handle (or never wait in this 378 * yield). So, we should not wake it here. 379 */ 380 ind2 = HANDLE_TO_INDEX(s, s->reply.handle); 381 assert(!s->requests[ind2].receiving); 382 383 s->requests[ind].receiving = true; 384 qemu_co_mutex_unlock(&s->receive_mutex); 385 386 qemu_coroutine_yield(); 387 /* 388 * We may be woken for 3 reasons: 389 * 1. From this function, executing in parallel coroutine, when our 390 * handle is received. 391 * 2. From nbd_channel_error(), when connection is lost. 392 * 3. From nbd_co_receive_one_chunk(), when previous request is 393 * finished and s->reply.handle set to 0. 394 * Anyway, it's OK to lock the mutex and go to the next iteration. 395 */ 396 397 qemu_co_mutex_lock(&s->receive_mutex); 398 assert(!s->requests[ind].receiving); 399 continue; 400 } 401 402 /* We are under mutex and handle is 0. We have to do the dirty work. */ 403 assert(s->reply.handle == 0); 404 ret = nbd_receive_reply(s->bs, s->ioc, &s->reply, NULL); 405 if (ret <= 0) { 406 ret = ret ? ret : -EIO; 407 nbd_channel_error(s, ret); 408 return ret; 409 } 410 if (nbd_reply_is_structured(&s->reply) && !s->info.structured_reply) { 411 nbd_channel_error(s, -EINVAL); 412 return -EINVAL; 413 } 414 if (s->reply.handle == handle) { 415 /* We are done */ 416 return 0; 417 } 418 ind2 = HANDLE_TO_INDEX(s, s->reply.handle); 419 if (ind2 >= MAX_NBD_REQUESTS || !s->requests[ind2].reply_possible) { 420 nbd_channel_error(s, -EINVAL); 421 return -EINVAL; 422 } 423 nbd_recv_coroutine_wake_one(&s->requests[ind2]); 424 } 425 } 426 427 static int nbd_co_send_request(BlockDriverState *bs, 428 NBDRequest *request, 429 QEMUIOVector *qiov) 430 { 431 BDRVNBDState *s = (BDRVNBDState *)bs->opaque; 432 int rc, i = -1; 433 434 qemu_co_mutex_lock(&s->send_mutex); 435 436 while (s->in_flight == MAX_NBD_REQUESTS || 437 (!nbd_client_connected(s) && s->in_flight > 0)) 438 { 439 qemu_co_queue_wait(&s->free_sema, &s->send_mutex); 440 } 441 442 if (nbd_client_connecting(s)) { 443 nbd_reconnect_attempt(s); 444 } 445 446 if (!nbd_client_connected(s)) { 447 rc = -EIO; 448 goto err; 449 } 450 451 s->in_flight++; 452 453 for (i = 0; i < MAX_NBD_REQUESTS; i++) { 454 if (s->requests[i].coroutine == NULL) { 455 break; 456 } 457 } 458 459 g_assert(qemu_in_coroutine()); 460 assert(i < MAX_NBD_REQUESTS); 461 462 s->requests[i].coroutine = qemu_coroutine_self(); 463 s->requests[i].offset = request->from; 464 s->requests[i].receiving = false; 465 s->requests[i].reply_possible = true; 466 467 request->handle = INDEX_TO_HANDLE(s, i); 468 469 assert(s->ioc); 470 471 if (qiov) { 472 qio_channel_set_cork(s->ioc, true); 473 rc = nbd_send_request(s->ioc, request); 474 if (nbd_client_connected(s) && rc >= 0) { 475 if (qio_channel_writev_all(s->ioc, qiov->iov, qiov->niov, 476 NULL) < 0) { 477 rc = -EIO; 478 } 479 } else if (rc >= 0) { 480 rc = -EIO; 481 } 482 qio_channel_set_cork(s->ioc, false); 483 } else { 484 rc = nbd_send_request(s->ioc, request); 485 } 486 487 err: 488 if (rc < 0) { 489 nbd_channel_error(s, rc); 490 if (i != -1) { 491 s->requests[i].coroutine = NULL; 492 s->in_flight--; 493 qemu_co_queue_next(&s->free_sema); 494 } 495 } 496 qemu_co_mutex_unlock(&s->send_mutex); 497 return rc; 498 } 499 500 static inline uint16_t payload_advance16(uint8_t **payload) 501 { 502 *payload += 2; 503 return lduw_be_p(*payload - 2); 504 } 505 506 static inline uint32_t payload_advance32(uint8_t **payload) 507 { 508 *payload += 4; 509 return ldl_be_p(*payload - 4); 510 } 511 512 static inline uint64_t payload_advance64(uint8_t **payload) 513 { 514 *payload += 8; 515 return ldq_be_p(*payload - 8); 516 } 517 518 static int nbd_parse_offset_hole_payload(BDRVNBDState *s, 519 NBDStructuredReplyChunk *chunk, 520 uint8_t *payload, uint64_t orig_offset, 521 QEMUIOVector *qiov, Error **errp) 522 { 523 uint64_t offset; 524 uint32_t hole_size; 525 526 if (chunk->length != sizeof(offset) + sizeof(hole_size)) { 527 error_setg(errp, "Protocol error: invalid payload for " 528 "NBD_REPLY_TYPE_OFFSET_HOLE"); 529 return -EINVAL; 530 } 531 532 offset = payload_advance64(&payload); 533 hole_size = payload_advance32(&payload); 534 535 if (!hole_size || offset < orig_offset || hole_size > qiov->size || 536 offset > orig_offset + qiov->size - hole_size) { 537 error_setg(errp, "Protocol error: server sent chunk exceeding requested" 538 " region"); 539 return -EINVAL; 540 } 541 if (s->info.min_block && 542 !QEMU_IS_ALIGNED(hole_size, s->info.min_block)) { 543 trace_nbd_structured_read_compliance("hole"); 544 } 545 546 qemu_iovec_memset(qiov, offset - orig_offset, 0, hole_size); 547 548 return 0; 549 } 550 551 /* 552 * nbd_parse_blockstatus_payload 553 * Based on our request, we expect only one extent in reply, for the 554 * base:allocation context. 555 */ 556 static int nbd_parse_blockstatus_payload(BDRVNBDState *s, 557 NBDStructuredReplyChunk *chunk, 558 uint8_t *payload, uint64_t orig_length, 559 NBDExtent *extent, Error **errp) 560 { 561 uint32_t context_id; 562 563 /* The server succeeded, so it must have sent [at least] one extent */ 564 if (chunk->length < sizeof(context_id) + sizeof(*extent)) { 565 error_setg(errp, "Protocol error: invalid payload for " 566 "NBD_REPLY_TYPE_BLOCK_STATUS"); 567 return -EINVAL; 568 } 569 570 context_id = payload_advance32(&payload); 571 if (s->info.context_id != context_id) { 572 error_setg(errp, "Protocol error: unexpected context id %d for " 573 "NBD_REPLY_TYPE_BLOCK_STATUS, when negotiated context " 574 "id is %d", context_id, 575 s->info.context_id); 576 return -EINVAL; 577 } 578 579 extent->length = payload_advance32(&payload); 580 extent->flags = payload_advance32(&payload); 581 582 if (extent->length == 0) { 583 error_setg(errp, "Protocol error: server sent status chunk with " 584 "zero length"); 585 return -EINVAL; 586 } 587 588 /* 589 * A server sending unaligned block status is in violation of the 590 * protocol, but as qemu-nbd 3.1 is such a server (at least for 591 * POSIX files that are not a multiple of 512 bytes, since qemu 592 * rounds files up to 512-byte multiples but lseek(SEEK_HOLE) 593 * still sees an implicit hole beyond the real EOF), it's nicer to 594 * work around the misbehaving server. If the request included 595 * more than the final unaligned block, truncate it back to an 596 * aligned result; if the request was only the final block, round 597 * up to the full block and change the status to fully-allocated 598 * (always a safe status, even if it loses information). 599 */ 600 if (s->info.min_block && !QEMU_IS_ALIGNED(extent->length, 601 s->info.min_block)) { 602 trace_nbd_parse_blockstatus_compliance("extent length is unaligned"); 603 if (extent->length > s->info.min_block) { 604 extent->length = QEMU_ALIGN_DOWN(extent->length, 605 s->info.min_block); 606 } else { 607 extent->length = s->info.min_block; 608 extent->flags = 0; 609 } 610 } 611 612 /* 613 * We used NBD_CMD_FLAG_REQ_ONE, so the server should not have 614 * sent us any more than one extent, nor should it have included 615 * status beyond our request in that extent. However, it's easy 616 * enough to ignore the server's noncompliance without killing the 617 * connection; just ignore trailing extents, and clamp things to 618 * the length of our request. 619 */ 620 if (chunk->length > sizeof(context_id) + sizeof(*extent)) { 621 trace_nbd_parse_blockstatus_compliance("more than one extent"); 622 } 623 if (extent->length > orig_length) { 624 extent->length = orig_length; 625 trace_nbd_parse_blockstatus_compliance("extent length too large"); 626 } 627 628 /* 629 * HACK: if we are using x-dirty-bitmaps to access 630 * qemu:allocation-depth, treat all depths > 2 the same as 2, 631 * since nbd_client_co_block_status is only expecting the low two 632 * bits to be set. 633 */ 634 if (s->alloc_depth && extent->flags > 2) { 635 extent->flags = 2; 636 } 637 638 return 0; 639 } 640 641 /* 642 * nbd_parse_error_payload 643 * on success @errp contains message describing nbd error reply 644 */ 645 static int nbd_parse_error_payload(NBDStructuredReplyChunk *chunk, 646 uint8_t *payload, int *request_ret, 647 Error **errp) 648 { 649 uint32_t error; 650 uint16_t message_size; 651 652 assert(chunk->type & (1 << 15)); 653 654 if (chunk->length < sizeof(error) + sizeof(message_size)) { 655 error_setg(errp, 656 "Protocol error: invalid payload for structured error"); 657 return -EINVAL; 658 } 659 660 error = nbd_errno_to_system_errno(payload_advance32(&payload)); 661 if (error == 0) { 662 error_setg(errp, "Protocol error: server sent structured error chunk " 663 "with error = 0"); 664 return -EINVAL; 665 } 666 667 *request_ret = -error; 668 message_size = payload_advance16(&payload); 669 670 if (message_size > chunk->length - sizeof(error) - sizeof(message_size)) { 671 error_setg(errp, "Protocol error: server sent structured error chunk " 672 "with incorrect message size"); 673 return -EINVAL; 674 } 675 676 /* TODO: Add a trace point to mention the server complaint */ 677 678 /* TODO handle ERROR_OFFSET */ 679 680 return 0; 681 } 682 683 static int nbd_co_receive_offset_data_payload(BDRVNBDState *s, 684 uint64_t orig_offset, 685 QEMUIOVector *qiov, Error **errp) 686 { 687 QEMUIOVector sub_qiov; 688 uint64_t offset; 689 size_t data_size; 690 int ret; 691 NBDStructuredReplyChunk *chunk = &s->reply.structured; 692 693 assert(nbd_reply_is_structured(&s->reply)); 694 695 /* The NBD spec requires at least one byte of payload */ 696 if (chunk->length <= sizeof(offset)) { 697 error_setg(errp, "Protocol error: invalid payload for " 698 "NBD_REPLY_TYPE_OFFSET_DATA"); 699 return -EINVAL; 700 } 701 702 if (nbd_read64(s->ioc, &offset, "OFFSET_DATA offset", errp) < 0) { 703 return -EIO; 704 } 705 706 data_size = chunk->length - sizeof(offset); 707 assert(data_size); 708 if (offset < orig_offset || data_size > qiov->size || 709 offset > orig_offset + qiov->size - data_size) { 710 error_setg(errp, "Protocol error: server sent chunk exceeding requested" 711 " region"); 712 return -EINVAL; 713 } 714 if (s->info.min_block && !QEMU_IS_ALIGNED(data_size, s->info.min_block)) { 715 trace_nbd_structured_read_compliance("data"); 716 } 717 718 qemu_iovec_init(&sub_qiov, qiov->niov); 719 qemu_iovec_concat(&sub_qiov, qiov, offset - orig_offset, data_size); 720 ret = qio_channel_readv_all(s->ioc, sub_qiov.iov, sub_qiov.niov, errp); 721 qemu_iovec_destroy(&sub_qiov); 722 723 return ret < 0 ? -EIO : 0; 724 } 725 726 #define NBD_MAX_MALLOC_PAYLOAD 1000 727 static coroutine_fn int nbd_co_receive_structured_payload( 728 BDRVNBDState *s, void **payload, Error **errp) 729 { 730 int ret; 731 uint32_t len; 732 733 assert(nbd_reply_is_structured(&s->reply)); 734 735 len = s->reply.structured.length; 736 737 if (len == 0) { 738 return 0; 739 } 740 741 if (payload == NULL) { 742 error_setg(errp, "Unexpected structured payload"); 743 return -EINVAL; 744 } 745 746 if (len > NBD_MAX_MALLOC_PAYLOAD) { 747 error_setg(errp, "Payload too large"); 748 return -EINVAL; 749 } 750 751 *payload = g_new(char, len); 752 ret = nbd_read(s->ioc, *payload, len, "structured payload", errp); 753 if (ret < 0) { 754 g_free(*payload); 755 *payload = NULL; 756 return ret; 757 } 758 759 return 0; 760 } 761 762 /* 763 * nbd_co_do_receive_one_chunk 764 * for simple reply: 765 * set request_ret to received reply error 766 * if qiov is not NULL: read payload to @qiov 767 * for structured reply chunk: 768 * if error chunk: read payload, set @request_ret, do not set @payload 769 * else if offset_data chunk: read payload data to @qiov, do not set @payload 770 * else: read payload to @payload 771 * 772 * If function fails, @errp contains corresponding error message, and the 773 * connection with the server is suspect. If it returns 0, then the 774 * transaction succeeded (although @request_ret may be a negative errno 775 * corresponding to the server's error reply), and errp is unchanged. 776 */ 777 static coroutine_fn int nbd_co_do_receive_one_chunk( 778 BDRVNBDState *s, uint64_t handle, bool only_structured, 779 int *request_ret, QEMUIOVector *qiov, void **payload, Error **errp) 780 { 781 int ret; 782 int i = HANDLE_TO_INDEX(s, handle); 783 void *local_payload = NULL; 784 NBDStructuredReplyChunk *chunk; 785 786 if (payload) { 787 *payload = NULL; 788 } 789 *request_ret = 0; 790 791 nbd_receive_replies(s, handle); 792 if (!nbd_client_connected(s)) { 793 error_setg(errp, "Connection closed"); 794 return -EIO; 795 } 796 assert(s->ioc); 797 798 assert(s->reply.handle == handle); 799 800 if (nbd_reply_is_simple(&s->reply)) { 801 if (only_structured) { 802 error_setg(errp, "Protocol error: simple reply when structured " 803 "reply chunk was expected"); 804 return -EINVAL; 805 } 806 807 *request_ret = -nbd_errno_to_system_errno(s->reply.simple.error); 808 if (*request_ret < 0 || !qiov) { 809 return 0; 810 } 811 812 return qio_channel_readv_all(s->ioc, qiov->iov, qiov->niov, 813 errp) < 0 ? -EIO : 0; 814 } 815 816 /* handle structured reply chunk */ 817 assert(s->info.structured_reply); 818 chunk = &s->reply.structured; 819 820 if (chunk->type == NBD_REPLY_TYPE_NONE) { 821 if (!(chunk->flags & NBD_REPLY_FLAG_DONE)) { 822 error_setg(errp, "Protocol error: NBD_REPLY_TYPE_NONE chunk without" 823 " NBD_REPLY_FLAG_DONE flag set"); 824 return -EINVAL; 825 } 826 if (chunk->length) { 827 error_setg(errp, "Protocol error: NBD_REPLY_TYPE_NONE chunk with" 828 " nonzero length"); 829 return -EINVAL; 830 } 831 return 0; 832 } 833 834 if (chunk->type == NBD_REPLY_TYPE_OFFSET_DATA) { 835 if (!qiov) { 836 error_setg(errp, "Unexpected NBD_REPLY_TYPE_OFFSET_DATA chunk"); 837 return -EINVAL; 838 } 839 840 return nbd_co_receive_offset_data_payload(s, s->requests[i].offset, 841 qiov, errp); 842 } 843 844 if (nbd_reply_type_is_error(chunk->type)) { 845 payload = &local_payload; 846 } 847 848 ret = nbd_co_receive_structured_payload(s, payload, errp); 849 if (ret < 0) { 850 return ret; 851 } 852 853 if (nbd_reply_type_is_error(chunk->type)) { 854 ret = nbd_parse_error_payload(chunk, local_payload, request_ret, errp); 855 g_free(local_payload); 856 return ret; 857 } 858 859 return 0; 860 } 861 862 /* 863 * nbd_co_receive_one_chunk 864 * Read reply, wake up connection_co and set s->quit if needed. 865 * Return value is a fatal error code or normal nbd reply error code 866 */ 867 static coroutine_fn int nbd_co_receive_one_chunk( 868 BDRVNBDState *s, uint64_t handle, bool only_structured, 869 int *request_ret, QEMUIOVector *qiov, NBDReply *reply, void **payload, 870 Error **errp) 871 { 872 int ret = nbd_co_do_receive_one_chunk(s, handle, only_structured, 873 request_ret, qiov, payload, errp); 874 875 if (ret < 0) { 876 memset(reply, 0, sizeof(*reply)); 877 nbd_channel_error(s, ret); 878 } else { 879 /* For assert at loop start in nbd_connection_entry */ 880 *reply = s->reply; 881 } 882 s->reply.handle = 0; 883 884 nbd_recv_coroutines_wake(s, false); 885 886 return ret; 887 } 888 889 typedef struct NBDReplyChunkIter { 890 int ret; 891 int request_ret; 892 Error *err; 893 bool done, only_structured; 894 } NBDReplyChunkIter; 895 896 static void nbd_iter_channel_error(NBDReplyChunkIter *iter, 897 int ret, Error **local_err) 898 { 899 assert(local_err && *local_err); 900 assert(ret < 0); 901 902 if (!iter->ret) { 903 iter->ret = ret; 904 error_propagate(&iter->err, *local_err); 905 } else { 906 error_free(*local_err); 907 } 908 909 *local_err = NULL; 910 } 911 912 static void nbd_iter_request_error(NBDReplyChunkIter *iter, int ret) 913 { 914 assert(ret < 0); 915 916 if (!iter->request_ret) { 917 iter->request_ret = ret; 918 } 919 } 920 921 /* 922 * NBD_FOREACH_REPLY_CHUNK 923 * The pointer stored in @payload requires g_free() to free it. 924 */ 925 #define NBD_FOREACH_REPLY_CHUNK(s, iter, handle, structured, \ 926 qiov, reply, payload) \ 927 for (iter = (NBDReplyChunkIter) { .only_structured = structured }; \ 928 nbd_reply_chunk_iter_receive(s, &iter, handle, qiov, reply, payload);) 929 930 /* 931 * nbd_reply_chunk_iter_receive 932 * The pointer stored in @payload requires g_free() to free it. 933 */ 934 static bool nbd_reply_chunk_iter_receive(BDRVNBDState *s, 935 NBDReplyChunkIter *iter, 936 uint64_t handle, 937 QEMUIOVector *qiov, NBDReply *reply, 938 void **payload) 939 { 940 int ret, request_ret; 941 NBDReply local_reply; 942 NBDStructuredReplyChunk *chunk; 943 Error *local_err = NULL; 944 if (!nbd_client_connected(s)) { 945 error_setg(&local_err, "Connection closed"); 946 nbd_iter_channel_error(iter, -EIO, &local_err); 947 goto break_loop; 948 } 949 950 if (iter->done) { 951 /* Previous iteration was last. */ 952 goto break_loop; 953 } 954 955 if (reply == NULL) { 956 reply = &local_reply; 957 } 958 959 ret = nbd_co_receive_one_chunk(s, handle, iter->only_structured, 960 &request_ret, qiov, reply, payload, 961 &local_err); 962 if (ret < 0) { 963 nbd_iter_channel_error(iter, ret, &local_err); 964 } else if (request_ret < 0) { 965 nbd_iter_request_error(iter, request_ret); 966 } 967 968 /* Do not execute the body of NBD_FOREACH_REPLY_CHUNK for simple reply. */ 969 if (nbd_reply_is_simple(reply) || !nbd_client_connected(s)) { 970 goto break_loop; 971 } 972 973 chunk = &reply->structured; 974 iter->only_structured = true; 975 976 if (chunk->type == NBD_REPLY_TYPE_NONE) { 977 /* NBD_REPLY_FLAG_DONE is already checked in nbd_co_receive_one_chunk */ 978 assert(chunk->flags & NBD_REPLY_FLAG_DONE); 979 goto break_loop; 980 } 981 982 if (chunk->flags & NBD_REPLY_FLAG_DONE) { 983 /* This iteration is last. */ 984 iter->done = true; 985 } 986 987 /* Execute the loop body */ 988 return true; 989 990 break_loop: 991 s->requests[HANDLE_TO_INDEX(s, handle)].coroutine = NULL; 992 993 qemu_co_mutex_lock(&s->send_mutex); 994 s->in_flight--; 995 qemu_co_queue_next(&s->free_sema); 996 qemu_co_mutex_unlock(&s->send_mutex); 997 998 return false; 999 } 1000 1001 static int nbd_co_receive_return_code(BDRVNBDState *s, uint64_t handle, 1002 int *request_ret, Error **errp) 1003 { 1004 NBDReplyChunkIter iter; 1005 1006 NBD_FOREACH_REPLY_CHUNK(s, iter, handle, false, NULL, NULL, NULL) { 1007 /* nbd_reply_chunk_iter_receive does all the work */ 1008 } 1009 1010 error_propagate(errp, iter.err); 1011 *request_ret = iter.request_ret; 1012 return iter.ret; 1013 } 1014 1015 static int nbd_co_receive_cmdread_reply(BDRVNBDState *s, uint64_t handle, 1016 uint64_t offset, QEMUIOVector *qiov, 1017 int *request_ret, Error **errp) 1018 { 1019 NBDReplyChunkIter iter; 1020 NBDReply reply; 1021 void *payload = NULL; 1022 Error *local_err = NULL; 1023 1024 NBD_FOREACH_REPLY_CHUNK(s, iter, handle, s->info.structured_reply, 1025 qiov, &reply, &payload) 1026 { 1027 int ret; 1028 NBDStructuredReplyChunk *chunk = &reply.structured; 1029 1030 assert(nbd_reply_is_structured(&reply)); 1031 1032 switch (chunk->type) { 1033 case NBD_REPLY_TYPE_OFFSET_DATA: 1034 /* 1035 * special cased in nbd_co_receive_one_chunk, data is already 1036 * in qiov 1037 */ 1038 break; 1039 case NBD_REPLY_TYPE_OFFSET_HOLE: 1040 ret = nbd_parse_offset_hole_payload(s, &reply.structured, payload, 1041 offset, qiov, &local_err); 1042 if (ret < 0) { 1043 nbd_channel_error(s, ret); 1044 nbd_iter_channel_error(&iter, ret, &local_err); 1045 } 1046 break; 1047 default: 1048 if (!nbd_reply_type_is_error(chunk->type)) { 1049 /* not allowed reply type */ 1050 nbd_channel_error(s, -EINVAL); 1051 error_setg(&local_err, 1052 "Unexpected reply type: %d (%s) for CMD_READ", 1053 chunk->type, nbd_reply_type_lookup(chunk->type)); 1054 nbd_iter_channel_error(&iter, -EINVAL, &local_err); 1055 } 1056 } 1057 1058 g_free(payload); 1059 payload = NULL; 1060 } 1061 1062 error_propagate(errp, iter.err); 1063 *request_ret = iter.request_ret; 1064 return iter.ret; 1065 } 1066 1067 static int nbd_co_receive_blockstatus_reply(BDRVNBDState *s, 1068 uint64_t handle, uint64_t length, 1069 NBDExtent *extent, 1070 int *request_ret, Error **errp) 1071 { 1072 NBDReplyChunkIter iter; 1073 NBDReply reply; 1074 void *payload = NULL; 1075 Error *local_err = NULL; 1076 bool received = false; 1077 1078 assert(!extent->length); 1079 NBD_FOREACH_REPLY_CHUNK(s, iter, handle, false, NULL, &reply, &payload) { 1080 int ret; 1081 NBDStructuredReplyChunk *chunk = &reply.structured; 1082 1083 assert(nbd_reply_is_structured(&reply)); 1084 1085 switch (chunk->type) { 1086 case NBD_REPLY_TYPE_BLOCK_STATUS: 1087 if (received) { 1088 nbd_channel_error(s, -EINVAL); 1089 error_setg(&local_err, "Several BLOCK_STATUS chunks in reply"); 1090 nbd_iter_channel_error(&iter, -EINVAL, &local_err); 1091 } 1092 received = true; 1093 1094 ret = nbd_parse_blockstatus_payload(s, &reply.structured, 1095 payload, length, extent, 1096 &local_err); 1097 if (ret < 0) { 1098 nbd_channel_error(s, ret); 1099 nbd_iter_channel_error(&iter, ret, &local_err); 1100 } 1101 break; 1102 default: 1103 if (!nbd_reply_type_is_error(chunk->type)) { 1104 nbd_channel_error(s, -EINVAL); 1105 error_setg(&local_err, 1106 "Unexpected reply type: %d (%s) " 1107 "for CMD_BLOCK_STATUS", 1108 chunk->type, nbd_reply_type_lookup(chunk->type)); 1109 nbd_iter_channel_error(&iter, -EINVAL, &local_err); 1110 } 1111 } 1112 1113 g_free(payload); 1114 payload = NULL; 1115 } 1116 1117 if (!extent->length && !iter.request_ret) { 1118 error_setg(&local_err, "Server did not reply with any status extents"); 1119 nbd_iter_channel_error(&iter, -EIO, &local_err); 1120 } 1121 1122 error_propagate(errp, iter.err); 1123 *request_ret = iter.request_ret; 1124 return iter.ret; 1125 } 1126 1127 static int nbd_co_request(BlockDriverState *bs, NBDRequest *request, 1128 QEMUIOVector *write_qiov) 1129 { 1130 int ret, request_ret; 1131 Error *local_err = NULL; 1132 BDRVNBDState *s = (BDRVNBDState *)bs->opaque; 1133 1134 assert(request->type != NBD_CMD_READ); 1135 if (write_qiov) { 1136 assert(request->type == NBD_CMD_WRITE); 1137 assert(request->len == iov_size(write_qiov->iov, write_qiov->niov)); 1138 } else { 1139 assert(request->type != NBD_CMD_WRITE); 1140 } 1141 1142 do { 1143 ret = nbd_co_send_request(bs, request, write_qiov); 1144 if (ret < 0) { 1145 continue; 1146 } 1147 1148 ret = nbd_co_receive_return_code(s, request->handle, 1149 &request_ret, &local_err); 1150 if (local_err) { 1151 trace_nbd_co_request_fail(request->from, request->len, 1152 request->handle, request->flags, 1153 request->type, 1154 nbd_cmd_lookup(request->type), 1155 ret, error_get_pretty(local_err)); 1156 error_free(local_err); 1157 local_err = NULL; 1158 } 1159 } while (ret < 0 && nbd_client_connecting_wait(s)); 1160 1161 return ret ? ret : request_ret; 1162 } 1163 1164 static int nbd_client_co_preadv(BlockDriverState *bs, int64_t offset, 1165 int64_t bytes, QEMUIOVector *qiov, 1166 BdrvRequestFlags flags) 1167 { 1168 int ret, request_ret; 1169 Error *local_err = NULL; 1170 BDRVNBDState *s = (BDRVNBDState *)bs->opaque; 1171 NBDRequest request = { 1172 .type = NBD_CMD_READ, 1173 .from = offset, 1174 .len = bytes, 1175 }; 1176 1177 assert(bytes <= NBD_MAX_BUFFER_SIZE); 1178 assert(!flags); 1179 1180 if (!bytes) { 1181 return 0; 1182 } 1183 /* 1184 * Work around the fact that the block layer doesn't do 1185 * byte-accurate sizing yet - if the read exceeds the server's 1186 * advertised size because the block layer rounded size up, then 1187 * truncate the request to the server and tail-pad with zero. 1188 */ 1189 if (offset >= s->info.size) { 1190 assert(bytes < BDRV_SECTOR_SIZE); 1191 qemu_iovec_memset(qiov, 0, 0, bytes); 1192 return 0; 1193 } 1194 if (offset + bytes > s->info.size) { 1195 uint64_t slop = offset + bytes - s->info.size; 1196 1197 assert(slop < BDRV_SECTOR_SIZE); 1198 qemu_iovec_memset(qiov, bytes - slop, 0, slop); 1199 request.len -= slop; 1200 } 1201 1202 do { 1203 ret = nbd_co_send_request(bs, &request, NULL); 1204 if (ret < 0) { 1205 continue; 1206 } 1207 1208 ret = nbd_co_receive_cmdread_reply(s, request.handle, offset, qiov, 1209 &request_ret, &local_err); 1210 if (local_err) { 1211 trace_nbd_co_request_fail(request.from, request.len, request.handle, 1212 request.flags, request.type, 1213 nbd_cmd_lookup(request.type), 1214 ret, error_get_pretty(local_err)); 1215 error_free(local_err); 1216 local_err = NULL; 1217 } 1218 } while (ret < 0 && nbd_client_connecting_wait(s)); 1219 1220 return ret ? ret : request_ret; 1221 } 1222 1223 static int nbd_client_co_pwritev(BlockDriverState *bs, int64_t offset, 1224 int64_t bytes, QEMUIOVector *qiov, 1225 BdrvRequestFlags flags) 1226 { 1227 BDRVNBDState *s = (BDRVNBDState *)bs->opaque; 1228 NBDRequest request = { 1229 .type = NBD_CMD_WRITE, 1230 .from = offset, 1231 .len = bytes, 1232 }; 1233 1234 assert(!(s->info.flags & NBD_FLAG_READ_ONLY)); 1235 if (flags & BDRV_REQ_FUA) { 1236 assert(s->info.flags & NBD_FLAG_SEND_FUA); 1237 request.flags |= NBD_CMD_FLAG_FUA; 1238 } 1239 1240 assert(bytes <= NBD_MAX_BUFFER_SIZE); 1241 1242 if (!bytes) { 1243 return 0; 1244 } 1245 return nbd_co_request(bs, &request, qiov); 1246 } 1247 1248 static int nbd_client_co_pwrite_zeroes(BlockDriverState *bs, int64_t offset, 1249 int64_t bytes, BdrvRequestFlags flags) 1250 { 1251 BDRVNBDState *s = (BDRVNBDState *)bs->opaque; 1252 NBDRequest request = { 1253 .type = NBD_CMD_WRITE_ZEROES, 1254 .from = offset, 1255 .len = bytes, /* .len is uint32_t actually */ 1256 }; 1257 1258 assert(bytes <= UINT32_MAX); /* rely on max_pwrite_zeroes */ 1259 1260 assert(!(s->info.flags & NBD_FLAG_READ_ONLY)); 1261 if (!(s->info.flags & NBD_FLAG_SEND_WRITE_ZEROES)) { 1262 return -ENOTSUP; 1263 } 1264 1265 if (flags & BDRV_REQ_FUA) { 1266 assert(s->info.flags & NBD_FLAG_SEND_FUA); 1267 request.flags |= NBD_CMD_FLAG_FUA; 1268 } 1269 if (!(flags & BDRV_REQ_MAY_UNMAP)) { 1270 request.flags |= NBD_CMD_FLAG_NO_HOLE; 1271 } 1272 if (flags & BDRV_REQ_NO_FALLBACK) { 1273 assert(s->info.flags & NBD_FLAG_SEND_FAST_ZERO); 1274 request.flags |= NBD_CMD_FLAG_FAST_ZERO; 1275 } 1276 1277 if (!bytes) { 1278 return 0; 1279 } 1280 return nbd_co_request(bs, &request, NULL); 1281 } 1282 1283 static int nbd_client_co_flush(BlockDriverState *bs) 1284 { 1285 BDRVNBDState *s = (BDRVNBDState *)bs->opaque; 1286 NBDRequest request = { .type = NBD_CMD_FLUSH }; 1287 1288 if (!(s->info.flags & NBD_FLAG_SEND_FLUSH)) { 1289 return 0; 1290 } 1291 1292 request.from = 0; 1293 request.len = 0; 1294 1295 return nbd_co_request(bs, &request, NULL); 1296 } 1297 1298 static int nbd_client_co_pdiscard(BlockDriverState *bs, int64_t offset, 1299 int64_t bytes) 1300 { 1301 BDRVNBDState *s = (BDRVNBDState *)bs->opaque; 1302 NBDRequest request = { 1303 .type = NBD_CMD_TRIM, 1304 .from = offset, 1305 .len = bytes, /* len is uint32_t */ 1306 }; 1307 1308 assert(bytes <= UINT32_MAX); /* rely on max_pdiscard */ 1309 1310 assert(!(s->info.flags & NBD_FLAG_READ_ONLY)); 1311 if (!(s->info.flags & NBD_FLAG_SEND_TRIM) || !bytes) { 1312 return 0; 1313 } 1314 1315 return nbd_co_request(bs, &request, NULL); 1316 } 1317 1318 static int coroutine_fn nbd_client_co_block_status( 1319 BlockDriverState *bs, bool want_zero, int64_t offset, int64_t bytes, 1320 int64_t *pnum, int64_t *map, BlockDriverState **file) 1321 { 1322 int ret, request_ret; 1323 NBDExtent extent = { 0 }; 1324 BDRVNBDState *s = (BDRVNBDState *)bs->opaque; 1325 Error *local_err = NULL; 1326 1327 NBDRequest request = { 1328 .type = NBD_CMD_BLOCK_STATUS, 1329 .from = offset, 1330 .len = MIN(QEMU_ALIGN_DOWN(INT_MAX, bs->bl.request_alignment), 1331 MIN(bytes, s->info.size - offset)), 1332 .flags = NBD_CMD_FLAG_REQ_ONE, 1333 }; 1334 1335 if (!s->info.base_allocation) { 1336 *pnum = bytes; 1337 *map = offset; 1338 *file = bs; 1339 return BDRV_BLOCK_DATA | BDRV_BLOCK_OFFSET_VALID; 1340 } 1341 1342 /* 1343 * Work around the fact that the block layer doesn't do 1344 * byte-accurate sizing yet - if the status request exceeds the 1345 * server's advertised size because the block layer rounded size 1346 * up, we truncated the request to the server (above), or are 1347 * called on just the hole. 1348 */ 1349 if (offset >= s->info.size) { 1350 *pnum = bytes; 1351 assert(bytes < BDRV_SECTOR_SIZE); 1352 /* Intentionally don't report offset_valid for the hole */ 1353 return BDRV_BLOCK_ZERO; 1354 } 1355 1356 if (s->info.min_block) { 1357 assert(QEMU_IS_ALIGNED(request.len, s->info.min_block)); 1358 } 1359 do { 1360 ret = nbd_co_send_request(bs, &request, NULL); 1361 if (ret < 0) { 1362 continue; 1363 } 1364 1365 ret = nbd_co_receive_blockstatus_reply(s, request.handle, bytes, 1366 &extent, &request_ret, 1367 &local_err); 1368 if (local_err) { 1369 trace_nbd_co_request_fail(request.from, request.len, request.handle, 1370 request.flags, request.type, 1371 nbd_cmd_lookup(request.type), 1372 ret, error_get_pretty(local_err)); 1373 error_free(local_err); 1374 local_err = NULL; 1375 } 1376 } while (ret < 0 && nbd_client_connecting_wait(s)); 1377 1378 if (ret < 0 || request_ret < 0) { 1379 return ret ? ret : request_ret; 1380 } 1381 1382 assert(extent.length); 1383 *pnum = extent.length; 1384 *map = offset; 1385 *file = bs; 1386 return (extent.flags & NBD_STATE_HOLE ? 0 : BDRV_BLOCK_DATA) | 1387 (extent.flags & NBD_STATE_ZERO ? BDRV_BLOCK_ZERO : 0) | 1388 BDRV_BLOCK_OFFSET_VALID; 1389 } 1390 1391 static int nbd_client_reopen_prepare(BDRVReopenState *state, 1392 BlockReopenQueue *queue, Error **errp) 1393 { 1394 BDRVNBDState *s = (BDRVNBDState *)state->bs->opaque; 1395 1396 if ((state->flags & BDRV_O_RDWR) && (s->info.flags & NBD_FLAG_READ_ONLY)) { 1397 error_setg(errp, "Can't reopen read-only NBD mount as read/write"); 1398 return -EACCES; 1399 } 1400 return 0; 1401 } 1402 1403 static void nbd_yank(void *opaque) 1404 { 1405 BlockDriverState *bs = opaque; 1406 BDRVNBDState *s = (BDRVNBDState *)bs->opaque; 1407 1408 qatomic_store_release(&s->state, NBD_CLIENT_QUIT); 1409 qio_channel_shutdown(QIO_CHANNEL(s->ioc), QIO_CHANNEL_SHUTDOWN_BOTH, NULL); 1410 } 1411 1412 static void nbd_client_close(BlockDriverState *bs) 1413 { 1414 BDRVNBDState *s = (BDRVNBDState *)bs->opaque; 1415 NBDRequest request = { .type = NBD_CMD_DISC }; 1416 1417 if (s->ioc) { 1418 nbd_send_request(s->ioc, &request); 1419 } 1420 1421 nbd_teardown_connection(bs); 1422 } 1423 1424 1425 /* 1426 * Parse nbd_open options 1427 */ 1428 1429 static int nbd_parse_uri(const char *filename, QDict *options) 1430 { 1431 URI *uri; 1432 const char *p; 1433 QueryParams *qp = NULL; 1434 int ret = 0; 1435 bool is_unix; 1436 1437 uri = uri_parse(filename); 1438 if (!uri) { 1439 return -EINVAL; 1440 } 1441 1442 /* transport */ 1443 if (!g_strcmp0(uri->scheme, "nbd")) { 1444 is_unix = false; 1445 } else if (!g_strcmp0(uri->scheme, "nbd+tcp")) { 1446 is_unix = false; 1447 } else if (!g_strcmp0(uri->scheme, "nbd+unix")) { 1448 is_unix = true; 1449 } else { 1450 ret = -EINVAL; 1451 goto out; 1452 } 1453 1454 p = uri->path ? uri->path : ""; 1455 if (p[0] == '/') { 1456 p++; 1457 } 1458 if (p[0]) { 1459 qdict_put_str(options, "export", p); 1460 } 1461 1462 qp = query_params_parse(uri->query); 1463 if (qp->n > 1 || (is_unix && !qp->n) || (!is_unix && qp->n)) { 1464 ret = -EINVAL; 1465 goto out; 1466 } 1467 1468 if (is_unix) { 1469 /* nbd+unix:///export?socket=path */ 1470 if (uri->server || uri->port || strcmp(qp->p[0].name, "socket")) { 1471 ret = -EINVAL; 1472 goto out; 1473 } 1474 qdict_put_str(options, "server.type", "unix"); 1475 qdict_put_str(options, "server.path", qp->p[0].value); 1476 } else { 1477 QString *host; 1478 char *port_str; 1479 1480 /* nbd[+tcp]://host[:port]/export */ 1481 if (!uri->server) { 1482 ret = -EINVAL; 1483 goto out; 1484 } 1485 1486 /* strip braces from literal IPv6 address */ 1487 if (uri->server[0] == '[') { 1488 host = qstring_from_substr(uri->server, 1, 1489 strlen(uri->server) - 1); 1490 } else { 1491 host = qstring_from_str(uri->server); 1492 } 1493 1494 qdict_put_str(options, "server.type", "inet"); 1495 qdict_put(options, "server.host", host); 1496 1497 port_str = g_strdup_printf("%d", uri->port ?: NBD_DEFAULT_PORT); 1498 qdict_put_str(options, "server.port", port_str); 1499 g_free(port_str); 1500 } 1501 1502 out: 1503 if (qp) { 1504 query_params_free(qp); 1505 } 1506 uri_free(uri); 1507 return ret; 1508 } 1509 1510 static bool nbd_has_filename_options_conflict(QDict *options, Error **errp) 1511 { 1512 const QDictEntry *e; 1513 1514 for (e = qdict_first(options); e; e = qdict_next(options, e)) { 1515 if (!strcmp(e->key, "host") || 1516 !strcmp(e->key, "port") || 1517 !strcmp(e->key, "path") || 1518 !strcmp(e->key, "export") || 1519 strstart(e->key, "server.", NULL)) 1520 { 1521 error_setg(errp, "Option '%s' cannot be used with a file name", 1522 e->key); 1523 return true; 1524 } 1525 } 1526 1527 return false; 1528 } 1529 1530 static void nbd_parse_filename(const char *filename, QDict *options, 1531 Error **errp) 1532 { 1533 g_autofree char *file = NULL; 1534 char *export_name; 1535 const char *host_spec; 1536 const char *unixpath; 1537 1538 if (nbd_has_filename_options_conflict(options, errp)) { 1539 return; 1540 } 1541 1542 if (strstr(filename, "://")) { 1543 int ret = nbd_parse_uri(filename, options); 1544 if (ret < 0) { 1545 error_setg(errp, "No valid URL specified"); 1546 } 1547 return; 1548 } 1549 1550 file = g_strdup(filename); 1551 1552 export_name = strstr(file, EN_OPTSTR); 1553 if (export_name) { 1554 if (export_name[strlen(EN_OPTSTR)] == 0) { 1555 return; 1556 } 1557 export_name[0] = 0; /* truncate 'file' */ 1558 export_name += strlen(EN_OPTSTR); 1559 1560 qdict_put_str(options, "export", export_name); 1561 } 1562 1563 /* extract the host_spec - fail if it's not nbd:... */ 1564 if (!strstart(file, "nbd:", &host_spec)) { 1565 error_setg(errp, "File name string for NBD must start with 'nbd:'"); 1566 return; 1567 } 1568 1569 if (!*host_spec) { 1570 return; 1571 } 1572 1573 /* are we a UNIX or TCP socket? */ 1574 if (strstart(host_spec, "unix:", &unixpath)) { 1575 qdict_put_str(options, "server.type", "unix"); 1576 qdict_put_str(options, "server.path", unixpath); 1577 } else { 1578 InetSocketAddress *addr = g_new(InetSocketAddress, 1); 1579 1580 if (inet_parse(addr, host_spec, errp)) { 1581 goto out_inet; 1582 } 1583 1584 qdict_put_str(options, "server.type", "inet"); 1585 qdict_put_str(options, "server.host", addr->host); 1586 qdict_put_str(options, "server.port", addr->port); 1587 out_inet: 1588 qapi_free_InetSocketAddress(addr); 1589 } 1590 } 1591 1592 static bool nbd_process_legacy_socket_options(QDict *output_options, 1593 QemuOpts *legacy_opts, 1594 Error **errp) 1595 { 1596 const char *path = qemu_opt_get(legacy_opts, "path"); 1597 const char *host = qemu_opt_get(legacy_opts, "host"); 1598 const char *port = qemu_opt_get(legacy_opts, "port"); 1599 const QDictEntry *e; 1600 1601 if (!path && !host && !port) { 1602 return true; 1603 } 1604 1605 for (e = qdict_first(output_options); e; e = qdict_next(output_options, e)) 1606 { 1607 if (strstart(e->key, "server.", NULL)) { 1608 error_setg(errp, "Cannot use 'server' and path/host/port at the " 1609 "same time"); 1610 return false; 1611 } 1612 } 1613 1614 if (path && host) { 1615 error_setg(errp, "path and host may not be used at the same time"); 1616 return false; 1617 } else if (path) { 1618 if (port) { 1619 error_setg(errp, "port may not be used without host"); 1620 return false; 1621 } 1622 1623 qdict_put_str(output_options, "server.type", "unix"); 1624 qdict_put_str(output_options, "server.path", path); 1625 } else if (host) { 1626 qdict_put_str(output_options, "server.type", "inet"); 1627 qdict_put_str(output_options, "server.host", host); 1628 qdict_put_str(output_options, "server.port", 1629 port ?: stringify(NBD_DEFAULT_PORT)); 1630 } 1631 1632 return true; 1633 } 1634 1635 static SocketAddress *nbd_config(BDRVNBDState *s, QDict *options, 1636 Error **errp) 1637 { 1638 SocketAddress *saddr = NULL; 1639 QDict *addr = NULL; 1640 Visitor *iv = NULL; 1641 1642 qdict_extract_subqdict(options, &addr, "server."); 1643 if (!qdict_size(addr)) { 1644 error_setg(errp, "NBD server address missing"); 1645 goto done; 1646 } 1647 1648 iv = qobject_input_visitor_new_flat_confused(addr, errp); 1649 if (!iv) { 1650 goto done; 1651 } 1652 1653 if (!visit_type_SocketAddress(iv, NULL, &saddr, errp)) { 1654 goto done; 1655 } 1656 1657 if (socket_address_parse_named_fd(saddr, errp) < 0) { 1658 qapi_free_SocketAddress(saddr); 1659 saddr = NULL; 1660 goto done; 1661 } 1662 1663 done: 1664 qobject_unref(addr); 1665 visit_free(iv); 1666 return saddr; 1667 } 1668 1669 static QCryptoTLSCreds *nbd_get_tls_creds(const char *id, Error **errp) 1670 { 1671 Object *obj; 1672 QCryptoTLSCreds *creds; 1673 1674 obj = object_resolve_path_component( 1675 object_get_objects_root(), id); 1676 if (!obj) { 1677 error_setg(errp, "No TLS credentials with id '%s'", 1678 id); 1679 return NULL; 1680 } 1681 creds = (QCryptoTLSCreds *) 1682 object_dynamic_cast(obj, TYPE_QCRYPTO_TLS_CREDS); 1683 if (!creds) { 1684 error_setg(errp, "Object with id '%s' is not TLS credentials", 1685 id); 1686 return NULL; 1687 } 1688 1689 if (!qcrypto_tls_creds_check_endpoint(creds, 1690 QCRYPTO_TLS_CREDS_ENDPOINT_CLIENT, 1691 errp)) { 1692 return NULL; 1693 } 1694 object_ref(obj); 1695 return creds; 1696 } 1697 1698 1699 static QemuOptsList nbd_runtime_opts = { 1700 .name = "nbd", 1701 .head = QTAILQ_HEAD_INITIALIZER(nbd_runtime_opts.head), 1702 .desc = { 1703 { 1704 .name = "host", 1705 .type = QEMU_OPT_STRING, 1706 .help = "TCP host to connect to", 1707 }, 1708 { 1709 .name = "port", 1710 .type = QEMU_OPT_STRING, 1711 .help = "TCP port to connect to", 1712 }, 1713 { 1714 .name = "path", 1715 .type = QEMU_OPT_STRING, 1716 .help = "Unix socket path to connect to", 1717 }, 1718 { 1719 .name = "export", 1720 .type = QEMU_OPT_STRING, 1721 .help = "Name of the NBD export to open", 1722 }, 1723 { 1724 .name = "tls-creds", 1725 .type = QEMU_OPT_STRING, 1726 .help = "ID of the TLS credentials to use", 1727 }, 1728 { 1729 .name = "x-dirty-bitmap", 1730 .type = QEMU_OPT_STRING, 1731 .help = "experimental: expose named dirty bitmap in place of " 1732 "block status", 1733 }, 1734 { 1735 .name = "reconnect-delay", 1736 .type = QEMU_OPT_NUMBER, 1737 .help = "On an unexpected disconnect, the nbd client tries to " 1738 "connect again until succeeding or encountering a serious " 1739 "error. During the first @reconnect-delay seconds, all " 1740 "requests are paused and will be rerun on a successful " 1741 "reconnect. After that time, any delayed requests and all " 1742 "future requests before a successful reconnect will " 1743 "immediately fail. Default 0", 1744 }, 1745 { /* end of list */ } 1746 }, 1747 }; 1748 1749 static int nbd_process_options(BlockDriverState *bs, QDict *options, 1750 Error **errp) 1751 { 1752 BDRVNBDState *s = bs->opaque; 1753 QemuOpts *opts; 1754 int ret = -EINVAL; 1755 1756 opts = qemu_opts_create(&nbd_runtime_opts, NULL, 0, &error_abort); 1757 if (!qemu_opts_absorb_qdict(opts, options, errp)) { 1758 goto error; 1759 } 1760 1761 /* Translate @host, @port, and @path to a SocketAddress */ 1762 if (!nbd_process_legacy_socket_options(options, opts, errp)) { 1763 goto error; 1764 } 1765 1766 /* Pop the config into our state object. Exit if invalid. */ 1767 s->saddr = nbd_config(s, options, errp); 1768 if (!s->saddr) { 1769 goto error; 1770 } 1771 1772 s->export = g_strdup(qemu_opt_get(opts, "export")); 1773 if (s->export && strlen(s->export) > NBD_MAX_STRING_SIZE) { 1774 error_setg(errp, "export name too long to send to server"); 1775 goto error; 1776 } 1777 1778 s->tlscredsid = g_strdup(qemu_opt_get(opts, "tls-creds")); 1779 if (s->tlscredsid) { 1780 s->tlscreds = nbd_get_tls_creds(s->tlscredsid, errp); 1781 if (!s->tlscreds) { 1782 goto error; 1783 } 1784 1785 /* TODO SOCKET_ADDRESS_KIND_FD where fd has AF_INET or AF_INET6 */ 1786 if (s->saddr->type != SOCKET_ADDRESS_TYPE_INET) { 1787 error_setg(errp, "TLS only supported over IP sockets"); 1788 goto error; 1789 } 1790 s->hostname = s->saddr->u.inet.host; 1791 } 1792 1793 s->x_dirty_bitmap = g_strdup(qemu_opt_get(opts, "x-dirty-bitmap")); 1794 if (s->x_dirty_bitmap && strlen(s->x_dirty_bitmap) > NBD_MAX_STRING_SIZE) { 1795 error_setg(errp, "x-dirty-bitmap query too long to send to server"); 1796 goto error; 1797 } 1798 1799 s->reconnect_delay = qemu_opt_get_number(opts, "reconnect-delay", 0); 1800 1801 ret = 0; 1802 1803 error: 1804 qemu_opts_del(opts); 1805 return ret; 1806 } 1807 1808 static int nbd_open(BlockDriverState *bs, QDict *options, int flags, 1809 Error **errp) 1810 { 1811 int ret; 1812 BDRVNBDState *s = (BDRVNBDState *)bs->opaque; 1813 1814 s->bs = bs; 1815 qemu_co_mutex_init(&s->send_mutex); 1816 qemu_co_queue_init(&s->free_sema); 1817 qemu_co_mutex_init(&s->receive_mutex); 1818 1819 if (!yank_register_instance(BLOCKDEV_YANK_INSTANCE(bs->node_name), errp)) { 1820 return -EEXIST; 1821 } 1822 1823 ret = nbd_process_options(bs, options, errp); 1824 if (ret < 0) { 1825 goto fail; 1826 } 1827 1828 s->conn = nbd_client_connection_new(s->saddr, true, s->export, 1829 s->x_dirty_bitmap, s->tlscreds); 1830 1831 /* TODO: Configurable retry-until-timeout behaviour. */ 1832 s->state = NBD_CLIENT_CONNECTING_WAIT; 1833 ret = nbd_do_establish_connection(bs, errp); 1834 if (ret < 0) { 1835 goto fail; 1836 } 1837 1838 nbd_client_connection_enable_retry(s->conn); 1839 1840 return 0; 1841 1842 fail: 1843 nbd_clear_bdrvstate(bs); 1844 return ret; 1845 } 1846 1847 static int nbd_co_flush(BlockDriverState *bs) 1848 { 1849 return nbd_client_co_flush(bs); 1850 } 1851 1852 static void nbd_refresh_limits(BlockDriverState *bs, Error **errp) 1853 { 1854 BDRVNBDState *s = (BDRVNBDState *)bs->opaque; 1855 uint32_t min = s->info.min_block; 1856 uint32_t max = MIN_NON_ZERO(NBD_MAX_BUFFER_SIZE, s->info.max_block); 1857 1858 /* 1859 * If the server did not advertise an alignment: 1860 * - a size that is not sector-aligned implies that an alignment 1861 * of 1 can be used to access those tail bytes 1862 * - advertisement of block status requires an alignment of 1, so 1863 * that we don't violate block layer constraints that block 1864 * status is always aligned (as we can't control whether the 1865 * server will report sub-sector extents, such as a hole at EOF 1866 * on an unaligned POSIX file) 1867 * - otherwise, assume the server is so old that we are safer avoiding 1868 * sub-sector requests 1869 */ 1870 if (!min) { 1871 min = (!QEMU_IS_ALIGNED(s->info.size, BDRV_SECTOR_SIZE) || 1872 s->info.base_allocation) ? 1 : BDRV_SECTOR_SIZE; 1873 } 1874 1875 bs->bl.request_alignment = min; 1876 bs->bl.max_pdiscard = QEMU_ALIGN_DOWN(INT_MAX, min); 1877 bs->bl.max_pwrite_zeroes = max; 1878 bs->bl.max_transfer = max; 1879 1880 if (s->info.opt_block && 1881 s->info.opt_block > bs->bl.opt_transfer) { 1882 bs->bl.opt_transfer = s->info.opt_block; 1883 } 1884 } 1885 1886 static void nbd_close(BlockDriverState *bs) 1887 { 1888 nbd_client_close(bs); 1889 nbd_clear_bdrvstate(bs); 1890 } 1891 1892 /* 1893 * NBD cannot truncate, but if the caller asks to truncate to the same size, or 1894 * to a smaller size with exact=false, there is no reason to fail the 1895 * operation. 1896 * 1897 * Preallocation mode is ignored since it does not seems useful to fail when 1898 * we never change anything. 1899 */ 1900 static int coroutine_fn nbd_co_truncate(BlockDriverState *bs, int64_t offset, 1901 bool exact, PreallocMode prealloc, 1902 BdrvRequestFlags flags, Error **errp) 1903 { 1904 BDRVNBDState *s = bs->opaque; 1905 1906 if (offset != s->info.size && exact) { 1907 error_setg(errp, "Cannot resize NBD nodes"); 1908 return -ENOTSUP; 1909 } 1910 1911 if (offset > s->info.size) { 1912 error_setg(errp, "Cannot grow NBD nodes"); 1913 return -EINVAL; 1914 } 1915 1916 return 0; 1917 } 1918 1919 static int64_t nbd_getlength(BlockDriverState *bs) 1920 { 1921 BDRVNBDState *s = bs->opaque; 1922 1923 return s->info.size; 1924 } 1925 1926 static void nbd_refresh_filename(BlockDriverState *bs) 1927 { 1928 BDRVNBDState *s = bs->opaque; 1929 const char *host = NULL, *port = NULL, *path = NULL; 1930 size_t len = 0; 1931 1932 if (s->saddr->type == SOCKET_ADDRESS_TYPE_INET) { 1933 const InetSocketAddress *inet = &s->saddr->u.inet; 1934 if (!inet->has_ipv4 && !inet->has_ipv6 && !inet->has_to) { 1935 host = inet->host; 1936 port = inet->port; 1937 } 1938 } else if (s->saddr->type == SOCKET_ADDRESS_TYPE_UNIX) { 1939 path = s->saddr->u.q_unix.path; 1940 } /* else can't represent as pseudo-filename */ 1941 1942 if (path && s->export) { 1943 len = snprintf(bs->exact_filename, sizeof(bs->exact_filename), 1944 "nbd+unix:///%s?socket=%s", s->export, path); 1945 } else if (path && !s->export) { 1946 len = snprintf(bs->exact_filename, sizeof(bs->exact_filename), 1947 "nbd+unix://?socket=%s", path); 1948 } else if (host && s->export) { 1949 len = snprintf(bs->exact_filename, sizeof(bs->exact_filename), 1950 "nbd://%s:%s/%s", host, port, s->export); 1951 } else if (host && !s->export) { 1952 len = snprintf(bs->exact_filename, sizeof(bs->exact_filename), 1953 "nbd://%s:%s", host, port); 1954 } 1955 if (len >= sizeof(bs->exact_filename)) { 1956 /* Name is too long to represent exactly, so leave it empty. */ 1957 bs->exact_filename[0] = '\0'; 1958 } 1959 } 1960 1961 static char *nbd_dirname(BlockDriverState *bs, Error **errp) 1962 { 1963 /* The generic bdrv_dirname() implementation is able to work out some 1964 * directory name for NBD nodes, but that would be wrong. So far there is no 1965 * specification for how "export paths" would work, so NBD does not have 1966 * directory names. */ 1967 error_setg(errp, "Cannot generate a base directory for NBD nodes"); 1968 return NULL; 1969 } 1970 1971 static const char *const nbd_strong_runtime_opts[] = { 1972 "path", 1973 "host", 1974 "port", 1975 "export", 1976 "tls-creds", 1977 "server.", 1978 1979 NULL 1980 }; 1981 1982 static void nbd_cancel_in_flight(BlockDriverState *bs) 1983 { 1984 BDRVNBDState *s = (BDRVNBDState *)bs->opaque; 1985 1986 reconnect_delay_timer_del(s); 1987 1988 if (s->state == NBD_CLIENT_CONNECTING_WAIT) { 1989 s->state = NBD_CLIENT_CONNECTING_NOWAIT; 1990 qemu_co_queue_restart_all(&s->free_sema); 1991 } 1992 1993 nbd_co_establish_connection_cancel(s->conn); 1994 } 1995 1996 static BlockDriver bdrv_nbd = { 1997 .format_name = "nbd", 1998 .protocol_name = "nbd", 1999 .instance_size = sizeof(BDRVNBDState), 2000 .bdrv_parse_filename = nbd_parse_filename, 2001 .bdrv_co_create_opts = bdrv_co_create_opts_simple, 2002 .create_opts = &bdrv_create_opts_simple, 2003 .bdrv_file_open = nbd_open, 2004 .bdrv_reopen_prepare = nbd_client_reopen_prepare, 2005 .bdrv_co_preadv = nbd_client_co_preadv, 2006 .bdrv_co_pwritev = nbd_client_co_pwritev, 2007 .bdrv_co_pwrite_zeroes = nbd_client_co_pwrite_zeroes, 2008 .bdrv_close = nbd_close, 2009 .bdrv_co_flush_to_os = nbd_co_flush, 2010 .bdrv_co_pdiscard = nbd_client_co_pdiscard, 2011 .bdrv_refresh_limits = nbd_refresh_limits, 2012 .bdrv_co_truncate = nbd_co_truncate, 2013 .bdrv_getlength = nbd_getlength, 2014 .bdrv_refresh_filename = nbd_refresh_filename, 2015 .bdrv_co_block_status = nbd_client_co_block_status, 2016 .bdrv_dirname = nbd_dirname, 2017 .strong_runtime_opts = nbd_strong_runtime_opts, 2018 .bdrv_cancel_in_flight = nbd_cancel_in_flight, 2019 }; 2020 2021 static BlockDriver bdrv_nbd_tcp = { 2022 .format_name = "nbd", 2023 .protocol_name = "nbd+tcp", 2024 .instance_size = sizeof(BDRVNBDState), 2025 .bdrv_parse_filename = nbd_parse_filename, 2026 .bdrv_co_create_opts = bdrv_co_create_opts_simple, 2027 .create_opts = &bdrv_create_opts_simple, 2028 .bdrv_file_open = nbd_open, 2029 .bdrv_reopen_prepare = nbd_client_reopen_prepare, 2030 .bdrv_co_preadv = nbd_client_co_preadv, 2031 .bdrv_co_pwritev = nbd_client_co_pwritev, 2032 .bdrv_co_pwrite_zeroes = nbd_client_co_pwrite_zeroes, 2033 .bdrv_close = nbd_close, 2034 .bdrv_co_flush_to_os = nbd_co_flush, 2035 .bdrv_co_pdiscard = nbd_client_co_pdiscard, 2036 .bdrv_refresh_limits = nbd_refresh_limits, 2037 .bdrv_co_truncate = nbd_co_truncate, 2038 .bdrv_getlength = nbd_getlength, 2039 .bdrv_refresh_filename = nbd_refresh_filename, 2040 .bdrv_co_block_status = nbd_client_co_block_status, 2041 .bdrv_dirname = nbd_dirname, 2042 .strong_runtime_opts = nbd_strong_runtime_opts, 2043 .bdrv_cancel_in_flight = nbd_cancel_in_flight, 2044 }; 2045 2046 static BlockDriver bdrv_nbd_unix = { 2047 .format_name = "nbd", 2048 .protocol_name = "nbd+unix", 2049 .instance_size = sizeof(BDRVNBDState), 2050 .bdrv_parse_filename = nbd_parse_filename, 2051 .bdrv_co_create_opts = bdrv_co_create_opts_simple, 2052 .create_opts = &bdrv_create_opts_simple, 2053 .bdrv_file_open = nbd_open, 2054 .bdrv_reopen_prepare = nbd_client_reopen_prepare, 2055 .bdrv_co_preadv = nbd_client_co_preadv, 2056 .bdrv_co_pwritev = nbd_client_co_pwritev, 2057 .bdrv_co_pwrite_zeroes = nbd_client_co_pwrite_zeroes, 2058 .bdrv_close = nbd_close, 2059 .bdrv_co_flush_to_os = nbd_co_flush, 2060 .bdrv_co_pdiscard = nbd_client_co_pdiscard, 2061 .bdrv_refresh_limits = nbd_refresh_limits, 2062 .bdrv_co_truncate = nbd_co_truncate, 2063 .bdrv_getlength = nbd_getlength, 2064 .bdrv_refresh_filename = nbd_refresh_filename, 2065 .bdrv_co_block_status = nbd_client_co_block_status, 2066 .bdrv_dirname = nbd_dirname, 2067 .strong_runtime_opts = nbd_strong_runtime_opts, 2068 .bdrv_cancel_in_flight = nbd_cancel_in_flight, 2069 }; 2070 2071 static void bdrv_nbd_init(void) 2072 { 2073 bdrv_register(&bdrv_nbd); 2074 bdrv_register(&bdrv_nbd_tcp); 2075 bdrv_register(&bdrv_nbd_unix); 2076 } 2077 2078 block_init(bdrv_nbd_init); 2079