1 /* 2 * Copyright Red Hat 3 * Copyright (C) 2005 Anthony Liguori <anthony@codemonkey.ws> 4 * 5 * Network Block Device Server Side 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License as published by 9 * the Free Software Foundation; under version 2 of the License. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 22 #include "block/block_int.h" 23 #include "block/export.h" 24 #include "block/dirty-bitmap.h" 25 #include "qapi/error.h" 26 #include "qemu/queue.h" 27 #include "trace.h" 28 #include "nbd-internal.h" 29 #include "qemu/units.h" 30 #include "qemu/memalign.h" 31 32 #define NBD_META_ID_BASE_ALLOCATION 0 33 #define NBD_META_ID_ALLOCATION_DEPTH 1 34 /* Dirty bitmaps use 'NBD_META_ID_DIRTY_BITMAP + i', so keep this id last. */ 35 #define NBD_META_ID_DIRTY_BITMAP 2 36 37 /* 38 * NBD_MAX_BLOCK_STATUS_EXTENTS: 1 MiB of extents data. An empirical 39 * constant. If an increase is needed, note that the NBD protocol 40 * recommends no larger than 32 mb, so that the client won't consider 41 * the reply as a denial of service attack. 42 */ 43 #define NBD_MAX_BLOCK_STATUS_EXTENTS (1 * MiB / 8) 44 45 static int system_errno_to_nbd_errno(int err) 46 { 47 switch (err) { 48 case 0: 49 return NBD_SUCCESS; 50 case EPERM: 51 case EROFS: 52 return NBD_EPERM; 53 case EIO: 54 return NBD_EIO; 55 case ENOMEM: 56 return NBD_ENOMEM; 57 #ifdef EDQUOT 58 case EDQUOT: 59 #endif 60 case EFBIG: 61 case ENOSPC: 62 return NBD_ENOSPC; 63 case EOVERFLOW: 64 return NBD_EOVERFLOW; 65 case ENOTSUP: 66 #if ENOTSUP != EOPNOTSUPP 67 case EOPNOTSUPP: 68 #endif 69 return NBD_ENOTSUP; 70 case ESHUTDOWN: 71 return NBD_ESHUTDOWN; 72 case EINVAL: 73 default: 74 return NBD_EINVAL; 75 } 76 } 77 78 /* Definitions for opaque data types */ 79 80 typedef struct NBDRequestData NBDRequestData; 81 82 struct NBDRequestData { 83 NBDClient *client; 84 uint8_t *data; 85 bool complete; 86 }; 87 88 struct NBDExport { 89 BlockExport common; 90 91 char *name; 92 char *description; 93 uint64_t size; 94 uint16_t nbdflags; 95 QTAILQ_HEAD(, NBDClient) clients; 96 QTAILQ_ENTRY(NBDExport) next; 97 98 BlockBackend *eject_notifier_blk; 99 Notifier eject_notifier; 100 101 bool allocation_depth; 102 BdrvDirtyBitmap **export_bitmaps; 103 size_t nr_export_bitmaps; 104 }; 105 106 static QTAILQ_HEAD(, NBDExport) exports = QTAILQ_HEAD_INITIALIZER(exports); 107 108 /* 109 * NBDMetaContexts represents a list of meta contexts in use, 110 * as selected by NBD_OPT_SET_META_CONTEXT. Also used for 111 * NBD_OPT_LIST_META_CONTEXT. 112 */ 113 struct NBDMetaContexts { 114 const NBDExport *exp; /* associated export */ 115 size_t count; /* number of negotiated contexts */ 116 bool base_allocation; /* export base:allocation context (block status) */ 117 bool allocation_depth; /* export qemu:allocation-depth */ 118 bool *bitmaps; /* 119 * export qemu:dirty-bitmap:<export bitmap name>, 120 * sized by exp->nr_export_bitmaps 121 */ 122 }; 123 124 struct NBDClient { 125 int refcount; 126 void (*close_fn)(NBDClient *client, bool negotiated); 127 128 NBDExport *exp; 129 QCryptoTLSCreds *tlscreds; 130 char *tlsauthz; 131 QIOChannelSocket *sioc; /* The underlying data channel */ 132 QIOChannel *ioc; /* The current I/O channel which may differ (eg TLS) */ 133 134 Coroutine *recv_coroutine; 135 136 CoMutex send_lock; 137 Coroutine *send_coroutine; 138 139 bool read_yielding; 140 bool quiescing; 141 142 QTAILQ_ENTRY(NBDClient) next; 143 int nb_requests; 144 bool closing; 145 146 uint32_t check_align; /* If non-zero, check for aligned client requests */ 147 148 NBDMode mode; 149 NBDMetaContexts contexts; /* Negotiated meta contexts */ 150 151 uint32_t opt; /* Current option being negotiated */ 152 uint32_t optlen; /* remaining length of data in ioc for the option being 153 negotiated now */ 154 }; 155 156 static void nbd_client_receive_next_request(NBDClient *client); 157 158 /* Basic flow for negotiation 159 160 Server Client 161 Negotiate 162 163 or 164 165 Server Client 166 Negotiate #1 167 Option 168 Negotiate #2 169 170 ---- 171 172 followed by 173 174 Server Client 175 Request 176 Response 177 Request 178 Response 179 ... 180 ... 181 Request (type == 2) 182 183 */ 184 185 static inline void set_be_option_rep(NBDOptionReply *rep, uint32_t option, 186 uint32_t type, uint32_t length) 187 { 188 stq_be_p(&rep->magic, NBD_REP_MAGIC); 189 stl_be_p(&rep->option, option); 190 stl_be_p(&rep->type, type); 191 stl_be_p(&rep->length, length); 192 } 193 194 /* Send a reply header, including length, but no payload. 195 * Return -errno on error, 0 on success. */ 196 static int nbd_negotiate_send_rep_len(NBDClient *client, uint32_t type, 197 uint32_t len, Error **errp) 198 { 199 NBDOptionReply rep; 200 201 trace_nbd_negotiate_send_rep_len(client->opt, nbd_opt_lookup(client->opt), 202 type, nbd_rep_lookup(type), len); 203 204 assert(len < NBD_MAX_BUFFER_SIZE); 205 206 set_be_option_rep(&rep, client->opt, type, len); 207 return nbd_write(client->ioc, &rep, sizeof(rep), errp); 208 } 209 210 /* Send a reply header with default 0 length. 211 * Return -errno on error, 0 on success. */ 212 static int nbd_negotiate_send_rep(NBDClient *client, uint32_t type, 213 Error **errp) 214 { 215 return nbd_negotiate_send_rep_len(client, type, 0, errp); 216 } 217 218 /* Send an error reply. 219 * Return -errno on error, 0 on success. */ 220 static int G_GNUC_PRINTF(4, 0) 221 nbd_negotiate_send_rep_verr(NBDClient *client, uint32_t type, 222 Error **errp, const char *fmt, va_list va) 223 { 224 ERRP_GUARD(); 225 g_autofree char *msg = NULL; 226 int ret; 227 size_t len; 228 229 msg = g_strdup_vprintf(fmt, va); 230 len = strlen(msg); 231 assert(len < NBD_MAX_STRING_SIZE); 232 trace_nbd_negotiate_send_rep_err(msg); 233 ret = nbd_negotiate_send_rep_len(client, type, len, errp); 234 if (ret < 0) { 235 return ret; 236 } 237 if (nbd_write(client->ioc, msg, len, errp) < 0) { 238 error_prepend(errp, "write failed (error message): "); 239 return -EIO; 240 } 241 242 return 0; 243 } 244 245 /* 246 * Return a malloc'd copy of @name suitable for use in an error reply. 247 */ 248 static char * 249 nbd_sanitize_name(const char *name) 250 { 251 if (strnlen(name, 80) < 80) { 252 return g_strdup(name); 253 } 254 /* XXX Should we also try to sanitize any control characters? */ 255 return g_strdup_printf("%.80s...", name); 256 } 257 258 /* Send an error reply. 259 * Return -errno on error, 0 on success. */ 260 static int G_GNUC_PRINTF(4, 5) 261 nbd_negotiate_send_rep_err(NBDClient *client, uint32_t type, 262 Error **errp, const char *fmt, ...) 263 { 264 va_list va; 265 int ret; 266 267 va_start(va, fmt); 268 ret = nbd_negotiate_send_rep_verr(client, type, errp, fmt, va); 269 va_end(va); 270 return ret; 271 } 272 273 /* Drop remainder of the current option, and send a reply with the 274 * given error type and message. Return -errno on read or write 275 * failure; or 0 if connection is still live. */ 276 static int G_GNUC_PRINTF(4, 0) 277 nbd_opt_vdrop(NBDClient *client, uint32_t type, Error **errp, 278 const char *fmt, va_list va) 279 { 280 int ret = nbd_drop(client->ioc, client->optlen, errp); 281 282 client->optlen = 0; 283 if (!ret) { 284 ret = nbd_negotiate_send_rep_verr(client, type, errp, fmt, va); 285 } 286 return ret; 287 } 288 289 static int G_GNUC_PRINTF(4, 5) 290 nbd_opt_drop(NBDClient *client, uint32_t type, Error **errp, 291 const char *fmt, ...) 292 { 293 int ret; 294 va_list va; 295 296 va_start(va, fmt); 297 ret = nbd_opt_vdrop(client, type, errp, fmt, va); 298 va_end(va); 299 300 return ret; 301 } 302 303 static int G_GNUC_PRINTF(3, 4) 304 nbd_opt_invalid(NBDClient *client, Error **errp, const char *fmt, ...) 305 { 306 int ret; 307 va_list va; 308 309 va_start(va, fmt); 310 ret = nbd_opt_vdrop(client, NBD_REP_ERR_INVALID, errp, fmt, va); 311 va_end(va); 312 313 return ret; 314 } 315 316 /* Read size bytes from the unparsed payload of the current option. 317 * If @check_nul, require that no NUL bytes appear in buffer. 318 * Return -errno on I/O error, 0 if option was completely handled by 319 * sending a reply about inconsistent lengths, or 1 on success. */ 320 static int nbd_opt_read(NBDClient *client, void *buffer, size_t size, 321 bool check_nul, Error **errp) 322 { 323 if (size > client->optlen) { 324 return nbd_opt_invalid(client, errp, 325 "Inconsistent lengths in option %s", 326 nbd_opt_lookup(client->opt)); 327 } 328 client->optlen -= size; 329 if (qio_channel_read_all(client->ioc, buffer, size, errp) < 0) { 330 return -EIO; 331 } 332 333 if (check_nul && strnlen(buffer, size) != size) { 334 return nbd_opt_invalid(client, errp, 335 "Unexpected embedded NUL in option %s", 336 nbd_opt_lookup(client->opt)); 337 } 338 return 1; 339 } 340 341 /* Drop size bytes from the unparsed payload of the current option. 342 * Return -errno on I/O error, 0 if option was completely handled by 343 * sending a reply about inconsistent lengths, or 1 on success. */ 344 static int nbd_opt_skip(NBDClient *client, size_t size, Error **errp) 345 { 346 if (size > client->optlen) { 347 return nbd_opt_invalid(client, errp, 348 "Inconsistent lengths in option %s", 349 nbd_opt_lookup(client->opt)); 350 } 351 client->optlen -= size; 352 return nbd_drop(client->ioc, size, errp) < 0 ? -EIO : 1; 353 } 354 355 /* nbd_opt_read_name 356 * 357 * Read a string with the format: 358 * uint32_t len (<= NBD_MAX_STRING_SIZE) 359 * len bytes string (not 0-terminated) 360 * 361 * On success, @name will be allocated. 362 * If @length is non-null, it will be set to the actual string length. 363 * 364 * Return -errno on I/O error, 0 if option was completely handled by 365 * sending a reply about inconsistent lengths, or 1 on success. 366 */ 367 static int nbd_opt_read_name(NBDClient *client, char **name, uint32_t *length, 368 Error **errp) 369 { 370 int ret; 371 uint32_t len; 372 g_autofree char *local_name = NULL; 373 374 *name = NULL; 375 ret = nbd_opt_read(client, &len, sizeof(len), false, errp); 376 if (ret <= 0) { 377 return ret; 378 } 379 len = cpu_to_be32(len); 380 381 if (len > NBD_MAX_STRING_SIZE) { 382 return nbd_opt_invalid(client, errp, 383 "Invalid name length: %" PRIu32, len); 384 } 385 386 local_name = g_malloc(len + 1); 387 ret = nbd_opt_read(client, local_name, len, true, errp); 388 if (ret <= 0) { 389 return ret; 390 } 391 local_name[len] = '\0'; 392 393 if (length) { 394 *length = len; 395 } 396 *name = g_steal_pointer(&local_name); 397 398 return 1; 399 } 400 401 /* Send a single NBD_REP_SERVER reply to NBD_OPT_LIST, including payload. 402 * Return -errno on error, 0 on success. */ 403 static int nbd_negotiate_send_rep_list(NBDClient *client, NBDExport *exp, 404 Error **errp) 405 { 406 ERRP_GUARD(); 407 size_t name_len, desc_len; 408 uint32_t len; 409 const char *name = exp->name ? exp->name : ""; 410 const char *desc = exp->description ? exp->description : ""; 411 QIOChannel *ioc = client->ioc; 412 int ret; 413 414 trace_nbd_negotiate_send_rep_list(name, desc); 415 name_len = strlen(name); 416 desc_len = strlen(desc); 417 assert(name_len <= NBD_MAX_STRING_SIZE && desc_len <= NBD_MAX_STRING_SIZE); 418 len = name_len + desc_len + sizeof(len); 419 ret = nbd_negotiate_send_rep_len(client, NBD_REP_SERVER, len, errp); 420 if (ret < 0) { 421 return ret; 422 } 423 424 len = cpu_to_be32(name_len); 425 if (nbd_write(ioc, &len, sizeof(len), errp) < 0) { 426 error_prepend(errp, "write failed (name length): "); 427 return -EINVAL; 428 } 429 430 if (nbd_write(ioc, name, name_len, errp) < 0) { 431 error_prepend(errp, "write failed (name buffer): "); 432 return -EINVAL; 433 } 434 435 if (nbd_write(ioc, desc, desc_len, errp) < 0) { 436 error_prepend(errp, "write failed (description buffer): "); 437 return -EINVAL; 438 } 439 440 return 0; 441 } 442 443 /* Process the NBD_OPT_LIST command, with a potential series of replies. 444 * Return -errno on error, 0 on success. */ 445 static int nbd_negotiate_handle_list(NBDClient *client, Error **errp) 446 { 447 NBDExport *exp; 448 assert(client->opt == NBD_OPT_LIST); 449 450 /* For each export, send a NBD_REP_SERVER reply. */ 451 QTAILQ_FOREACH(exp, &exports, next) { 452 if (nbd_negotiate_send_rep_list(client, exp, errp)) { 453 return -EINVAL; 454 } 455 } 456 /* Finish with a NBD_REP_ACK. */ 457 return nbd_negotiate_send_rep(client, NBD_REP_ACK, errp); 458 } 459 460 static void nbd_check_meta_export(NBDClient *client, NBDExport *exp) 461 { 462 if (exp != client->contexts.exp) { 463 client->contexts.count = 0; 464 } 465 } 466 467 /* Send a reply to NBD_OPT_EXPORT_NAME. 468 * Return -errno on error, 0 on success. */ 469 static int nbd_negotiate_handle_export_name(NBDClient *client, bool no_zeroes, 470 Error **errp) 471 { 472 ERRP_GUARD(); 473 g_autofree char *name = NULL; 474 char buf[NBD_REPLY_EXPORT_NAME_SIZE] = ""; 475 size_t len; 476 int ret; 477 uint16_t myflags; 478 479 /* Client sends: 480 [20 .. xx] export name (length bytes) 481 Server replies: 482 [ 0 .. 7] size 483 [ 8 .. 9] export flags 484 [10 .. 133] reserved (0) [unless no_zeroes] 485 */ 486 trace_nbd_negotiate_handle_export_name(); 487 if (client->mode >= NBD_MODE_EXTENDED) { 488 error_setg(errp, "Extended headers already negotiated"); 489 return -EINVAL; 490 } 491 if (client->optlen > NBD_MAX_STRING_SIZE) { 492 error_setg(errp, "Bad length received"); 493 return -EINVAL; 494 } 495 name = g_malloc(client->optlen + 1); 496 if (nbd_read(client->ioc, name, client->optlen, "export name", errp) < 0) { 497 return -EIO; 498 } 499 name[client->optlen] = '\0'; 500 client->optlen = 0; 501 502 trace_nbd_negotiate_handle_export_name_request(name); 503 504 client->exp = nbd_export_find(name); 505 if (!client->exp) { 506 error_setg(errp, "export not found"); 507 return -EINVAL; 508 } 509 nbd_check_meta_export(client, client->exp); 510 511 myflags = client->exp->nbdflags; 512 if (client->mode >= NBD_MODE_STRUCTURED) { 513 myflags |= NBD_FLAG_SEND_DF; 514 } 515 trace_nbd_negotiate_new_style_size_flags(client->exp->size, myflags); 516 stq_be_p(buf, client->exp->size); 517 stw_be_p(buf + 8, myflags); 518 len = no_zeroes ? 10 : sizeof(buf); 519 ret = nbd_write(client->ioc, buf, len, errp); 520 if (ret < 0) { 521 error_prepend(errp, "write failed: "); 522 return ret; 523 } 524 525 QTAILQ_INSERT_TAIL(&client->exp->clients, client, next); 526 blk_exp_ref(&client->exp->common); 527 528 return 0; 529 } 530 531 /* Send a single NBD_REP_INFO, with a buffer @buf of @length bytes. 532 * The buffer does NOT include the info type prefix. 533 * Return -errno on error, 0 if ready to send more. */ 534 static int nbd_negotiate_send_info(NBDClient *client, 535 uint16_t info, uint32_t length, void *buf, 536 Error **errp) 537 { 538 int rc; 539 540 trace_nbd_negotiate_send_info(info, nbd_info_lookup(info), length); 541 rc = nbd_negotiate_send_rep_len(client, NBD_REP_INFO, 542 sizeof(info) + length, errp); 543 if (rc < 0) { 544 return rc; 545 } 546 info = cpu_to_be16(info); 547 if (nbd_write(client->ioc, &info, sizeof(info), errp) < 0) { 548 return -EIO; 549 } 550 if (nbd_write(client->ioc, buf, length, errp) < 0) { 551 return -EIO; 552 } 553 return 0; 554 } 555 556 /* nbd_reject_length: Handle any unexpected payload. 557 * @fatal requests that we quit talking to the client, even if we are able 558 * to successfully send an error reply. 559 * Return: 560 * -errno transmission error occurred or @fatal was requested, errp is set 561 * 0 error message successfully sent to client, errp is not set 562 */ 563 static int nbd_reject_length(NBDClient *client, bool fatal, Error **errp) 564 { 565 int ret; 566 567 assert(client->optlen); 568 ret = nbd_opt_invalid(client, errp, "option '%s' has unexpected length", 569 nbd_opt_lookup(client->opt)); 570 if (fatal && !ret) { 571 error_setg(errp, "option '%s' has unexpected length", 572 nbd_opt_lookup(client->opt)); 573 return -EINVAL; 574 } 575 return ret; 576 } 577 578 /* Handle NBD_OPT_INFO and NBD_OPT_GO. 579 * Return -errno on error, 0 if ready for next option, and 1 to move 580 * into transmission phase. */ 581 static int nbd_negotiate_handle_info(NBDClient *client, Error **errp) 582 { 583 int rc; 584 g_autofree char *name = NULL; 585 NBDExport *exp; 586 uint16_t requests; 587 uint16_t request; 588 uint32_t namelen = 0; 589 bool sendname = false; 590 bool blocksize = false; 591 uint32_t sizes[3]; 592 char buf[sizeof(uint64_t) + sizeof(uint16_t)]; 593 uint32_t check_align = 0; 594 uint16_t myflags; 595 596 /* Client sends: 597 4 bytes: L, name length (can be 0) 598 L bytes: export name 599 2 bytes: N, number of requests (can be 0) 600 N * 2 bytes: N requests 601 */ 602 rc = nbd_opt_read_name(client, &name, &namelen, errp); 603 if (rc <= 0) { 604 return rc; 605 } 606 trace_nbd_negotiate_handle_export_name_request(name); 607 608 rc = nbd_opt_read(client, &requests, sizeof(requests), false, errp); 609 if (rc <= 0) { 610 return rc; 611 } 612 requests = be16_to_cpu(requests); 613 trace_nbd_negotiate_handle_info_requests(requests); 614 while (requests--) { 615 rc = nbd_opt_read(client, &request, sizeof(request), false, errp); 616 if (rc <= 0) { 617 return rc; 618 } 619 request = be16_to_cpu(request); 620 trace_nbd_negotiate_handle_info_request(request, 621 nbd_info_lookup(request)); 622 /* We care about NBD_INFO_NAME and NBD_INFO_BLOCK_SIZE; 623 * everything else is either a request we don't know or 624 * something we send regardless of request */ 625 switch (request) { 626 case NBD_INFO_NAME: 627 sendname = true; 628 break; 629 case NBD_INFO_BLOCK_SIZE: 630 blocksize = true; 631 break; 632 } 633 } 634 if (client->optlen) { 635 return nbd_reject_length(client, false, errp); 636 } 637 638 exp = nbd_export_find(name); 639 if (!exp) { 640 g_autofree char *sane_name = nbd_sanitize_name(name); 641 642 return nbd_negotiate_send_rep_err(client, NBD_REP_ERR_UNKNOWN, 643 errp, "export '%s' not present", 644 sane_name); 645 } 646 if (client->opt == NBD_OPT_GO) { 647 nbd_check_meta_export(client, exp); 648 } 649 650 /* Don't bother sending NBD_INFO_NAME unless client requested it */ 651 if (sendname) { 652 rc = nbd_negotiate_send_info(client, NBD_INFO_NAME, namelen, name, 653 errp); 654 if (rc < 0) { 655 return rc; 656 } 657 } 658 659 /* Send NBD_INFO_DESCRIPTION only if available, regardless of 660 * client request */ 661 if (exp->description) { 662 size_t len = strlen(exp->description); 663 664 assert(len <= NBD_MAX_STRING_SIZE); 665 rc = nbd_negotiate_send_info(client, NBD_INFO_DESCRIPTION, 666 len, exp->description, errp); 667 if (rc < 0) { 668 return rc; 669 } 670 } 671 672 /* Send NBD_INFO_BLOCK_SIZE always, but tweak the minimum size 673 * according to whether the client requested it, and according to 674 * whether this is OPT_INFO or OPT_GO. */ 675 /* minimum - 1 for back-compat, or actual if client will obey it. */ 676 if (client->opt == NBD_OPT_INFO || blocksize) { 677 check_align = sizes[0] = blk_get_request_alignment(exp->common.blk); 678 } else { 679 sizes[0] = 1; 680 } 681 assert(sizes[0] <= NBD_MAX_BUFFER_SIZE); 682 /* preferred - Hard-code to 4096 for now. 683 * TODO: is blk_bs(blk)->bl.opt_transfer appropriate? */ 684 sizes[1] = MAX(4096, sizes[0]); 685 /* maximum - At most 32M, but smaller as appropriate. */ 686 sizes[2] = MIN(blk_get_max_transfer(exp->common.blk), NBD_MAX_BUFFER_SIZE); 687 trace_nbd_negotiate_handle_info_block_size(sizes[0], sizes[1], sizes[2]); 688 sizes[0] = cpu_to_be32(sizes[0]); 689 sizes[1] = cpu_to_be32(sizes[1]); 690 sizes[2] = cpu_to_be32(sizes[2]); 691 rc = nbd_negotiate_send_info(client, NBD_INFO_BLOCK_SIZE, 692 sizeof(sizes), sizes, errp); 693 if (rc < 0) { 694 return rc; 695 } 696 697 /* Send NBD_INFO_EXPORT always */ 698 myflags = exp->nbdflags; 699 if (client->mode >= NBD_MODE_STRUCTURED) { 700 myflags |= NBD_FLAG_SEND_DF; 701 } 702 trace_nbd_negotiate_new_style_size_flags(exp->size, myflags); 703 stq_be_p(buf, exp->size); 704 stw_be_p(buf + 8, myflags); 705 rc = nbd_negotiate_send_info(client, NBD_INFO_EXPORT, 706 sizeof(buf), buf, errp); 707 if (rc < 0) { 708 return rc; 709 } 710 711 /* 712 * If the client is just asking for NBD_OPT_INFO, but forgot to 713 * request block sizes in a situation that would impact 714 * performance, then return an error. But for NBD_OPT_GO, we 715 * tolerate all clients, regardless of alignments. 716 */ 717 if (client->opt == NBD_OPT_INFO && !blocksize && 718 blk_get_request_alignment(exp->common.blk) > 1) { 719 return nbd_negotiate_send_rep_err(client, 720 NBD_REP_ERR_BLOCK_SIZE_REQD, 721 errp, 722 "request NBD_INFO_BLOCK_SIZE to " 723 "use this export"); 724 } 725 726 /* Final reply */ 727 rc = nbd_negotiate_send_rep(client, NBD_REP_ACK, errp); 728 if (rc < 0) { 729 return rc; 730 } 731 732 if (client->opt == NBD_OPT_GO) { 733 client->exp = exp; 734 client->check_align = check_align; 735 QTAILQ_INSERT_TAIL(&client->exp->clients, client, next); 736 blk_exp_ref(&client->exp->common); 737 rc = 1; 738 } 739 return rc; 740 } 741 742 743 /* Handle NBD_OPT_STARTTLS. Return NULL to drop connection, or else the 744 * new channel for all further (now-encrypted) communication. */ 745 static QIOChannel *nbd_negotiate_handle_starttls(NBDClient *client, 746 Error **errp) 747 { 748 QIOChannel *ioc; 749 QIOChannelTLS *tioc; 750 struct NBDTLSHandshakeData data = { 0 }; 751 752 assert(client->opt == NBD_OPT_STARTTLS); 753 754 trace_nbd_negotiate_handle_starttls(); 755 ioc = client->ioc; 756 757 if (nbd_negotiate_send_rep(client, NBD_REP_ACK, errp) < 0) { 758 return NULL; 759 } 760 761 tioc = qio_channel_tls_new_server(ioc, 762 client->tlscreds, 763 client->tlsauthz, 764 errp); 765 if (!tioc) { 766 return NULL; 767 } 768 769 qio_channel_set_name(QIO_CHANNEL(tioc), "nbd-server-tls"); 770 trace_nbd_negotiate_handle_starttls_handshake(); 771 data.loop = g_main_loop_new(g_main_context_default(), FALSE); 772 qio_channel_tls_handshake(tioc, 773 nbd_tls_handshake, 774 &data, 775 NULL, 776 NULL); 777 778 if (!data.complete) { 779 g_main_loop_run(data.loop); 780 } 781 g_main_loop_unref(data.loop); 782 if (data.error) { 783 object_unref(OBJECT(tioc)); 784 error_propagate(errp, data.error); 785 return NULL; 786 } 787 788 return QIO_CHANNEL(tioc); 789 } 790 791 /* nbd_negotiate_send_meta_context 792 * 793 * Send one chunk of reply to NBD_OPT_{LIST,SET}_META_CONTEXT 794 * 795 * For NBD_OPT_LIST_META_CONTEXT @context_id is ignored, 0 is used instead. 796 */ 797 static int nbd_negotiate_send_meta_context(NBDClient *client, 798 const char *context, 799 uint32_t context_id, 800 Error **errp) 801 { 802 NBDOptionReplyMetaContext opt; 803 struct iovec iov[] = { 804 {.iov_base = &opt, .iov_len = sizeof(opt)}, 805 {.iov_base = (void *)context, .iov_len = strlen(context)} 806 }; 807 808 assert(iov[1].iov_len <= NBD_MAX_STRING_SIZE); 809 if (client->opt == NBD_OPT_LIST_META_CONTEXT) { 810 context_id = 0; 811 } 812 813 trace_nbd_negotiate_meta_query_reply(context, context_id); 814 set_be_option_rep(&opt.h, client->opt, NBD_REP_META_CONTEXT, 815 sizeof(opt) - sizeof(opt.h) + iov[1].iov_len); 816 stl_be_p(&opt.context_id, context_id); 817 818 return qio_channel_writev_all(client->ioc, iov, 2, errp) < 0 ? -EIO : 0; 819 } 820 821 /* 822 * Return true if @query matches @pattern, or if @query is empty when 823 * the @client is performing _LIST_. 824 */ 825 static bool nbd_meta_empty_or_pattern(NBDClient *client, const char *pattern, 826 const char *query) 827 { 828 if (!*query) { 829 trace_nbd_negotiate_meta_query_parse("empty"); 830 return client->opt == NBD_OPT_LIST_META_CONTEXT; 831 } 832 if (strcmp(query, pattern) == 0) { 833 trace_nbd_negotiate_meta_query_parse(pattern); 834 return true; 835 } 836 trace_nbd_negotiate_meta_query_skip("pattern not matched"); 837 return false; 838 } 839 840 /* 841 * Return true and adjust @str in place if it begins with @prefix. 842 */ 843 static bool nbd_strshift(const char **str, const char *prefix) 844 { 845 size_t len = strlen(prefix); 846 847 if (strncmp(*str, prefix, len) == 0) { 848 *str += len; 849 return true; 850 } 851 return false; 852 } 853 854 /* nbd_meta_base_query 855 * 856 * Handle queries to 'base' namespace. For now, only the base:allocation 857 * context is available. Return true if @query has been handled. 858 */ 859 static bool nbd_meta_base_query(NBDClient *client, NBDMetaContexts *meta, 860 const char *query) 861 { 862 if (!nbd_strshift(&query, "base:")) { 863 return false; 864 } 865 trace_nbd_negotiate_meta_query_parse("base:"); 866 867 if (nbd_meta_empty_or_pattern(client, "allocation", query)) { 868 meta->base_allocation = true; 869 } 870 return true; 871 } 872 873 /* nbd_meta_qemu_query 874 * 875 * Handle queries to 'qemu' namespace. For now, only the qemu:dirty-bitmap: 876 * and qemu:allocation-depth contexts are available. Return true if @query 877 * has been handled. 878 */ 879 static bool nbd_meta_qemu_query(NBDClient *client, NBDMetaContexts *meta, 880 const char *query) 881 { 882 size_t i; 883 884 if (!nbd_strshift(&query, "qemu:")) { 885 return false; 886 } 887 trace_nbd_negotiate_meta_query_parse("qemu:"); 888 889 if (!*query) { 890 if (client->opt == NBD_OPT_LIST_META_CONTEXT) { 891 meta->allocation_depth = meta->exp->allocation_depth; 892 if (meta->exp->nr_export_bitmaps) { 893 memset(meta->bitmaps, 1, meta->exp->nr_export_bitmaps); 894 } 895 } 896 trace_nbd_negotiate_meta_query_parse("empty"); 897 return true; 898 } 899 900 if (strcmp(query, "allocation-depth") == 0) { 901 trace_nbd_negotiate_meta_query_parse("allocation-depth"); 902 meta->allocation_depth = meta->exp->allocation_depth; 903 return true; 904 } 905 906 if (nbd_strshift(&query, "dirty-bitmap:")) { 907 trace_nbd_negotiate_meta_query_parse("dirty-bitmap:"); 908 if (!*query) { 909 if (client->opt == NBD_OPT_LIST_META_CONTEXT && 910 meta->exp->nr_export_bitmaps) { 911 memset(meta->bitmaps, 1, meta->exp->nr_export_bitmaps); 912 } 913 trace_nbd_negotiate_meta_query_parse("empty"); 914 return true; 915 } 916 917 for (i = 0; i < meta->exp->nr_export_bitmaps; i++) { 918 const char *bm_name; 919 920 bm_name = bdrv_dirty_bitmap_name(meta->exp->export_bitmaps[i]); 921 if (strcmp(bm_name, query) == 0) { 922 meta->bitmaps[i] = true; 923 trace_nbd_negotiate_meta_query_parse(query); 924 return true; 925 } 926 } 927 trace_nbd_negotiate_meta_query_skip("no dirty-bitmap match"); 928 return true; 929 } 930 931 trace_nbd_negotiate_meta_query_skip("unknown qemu context"); 932 return true; 933 } 934 935 /* nbd_negotiate_meta_query 936 * 937 * Parse namespace name and call corresponding function to parse body of the 938 * query. 939 * 940 * The only supported namespaces are 'base' and 'qemu'. 941 * 942 * Return -errno on I/O error, 0 if option was completely handled by 943 * sending a reply about inconsistent lengths, or 1 on success. */ 944 static int nbd_negotiate_meta_query(NBDClient *client, 945 NBDMetaContexts *meta, Error **errp) 946 { 947 int ret; 948 g_autofree char *query = NULL; 949 uint32_t len; 950 951 ret = nbd_opt_read(client, &len, sizeof(len), false, errp); 952 if (ret <= 0) { 953 return ret; 954 } 955 len = cpu_to_be32(len); 956 957 if (len > NBD_MAX_STRING_SIZE) { 958 trace_nbd_negotiate_meta_query_skip("length too long"); 959 return nbd_opt_skip(client, len, errp); 960 } 961 962 query = g_malloc(len + 1); 963 ret = nbd_opt_read(client, query, len, true, errp); 964 if (ret <= 0) { 965 return ret; 966 } 967 query[len] = '\0'; 968 969 if (nbd_meta_base_query(client, meta, query)) { 970 return 1; 971 } 972 if (nbd_meta_qemu_query(client, meta, query)) { 973 return 1; 974 } 975 976 trace_nbd_negotiate_meta_query_skip("unknown namespace"); 977 return 1; 978 } 979 980 /* nbd_negotiate_meta_queries 981 * Handle NBD_OPT_LIST_META_CONTEXT and NBD_OPT_SET_META_CONTEXT 982 * 983 * Return -errno on I/O error, or 0 if option was completely handled. */ 984 static int nbd_negotiate_meta_queries(NBDClient *client, Error **errp) 985 { 986 int ret; 987 g_autofree char *export_name = NULL; 988 /* Mark unused to work around https://bugs.llvm.org/show_bug.cgi?id=3888 */ 989 g_autofree G_GNUC_UNUSED bool *bitmaps = NULL; 990 NBDMetaContexts local_meta = {0}; 991 NBDMetaContexts *meta; 992 uint32_t nb_queries; 993 size_t i; 994 size_t count = 0; 995 996 if (client->opt == NBD_OPT_SET_META_CONTEXT && 997 client->mode < NBD_MODE_STRUCTURED) { 998 return nbd_opt_invalid(client, errp, 999 "request option '%s' when structured reply " 1000 "is not negotiated", 1001 nbd_opt_lookup(client->opt)); 1002 } 1003 1004 if (client->opt == NBD_OPT_LIST_META_CONTEXT) { 1005 /* Only change the caller's meta on SET. */ 1006 meta = &local_meta; 1007 } else { 1008 meta = &client->contexts; 1009 } 1010 1011 g_free(meta->bitmaps); 1012 memset(meta, 0, sizeof(*meta)); 1013 1014 ret = nbd_opt_read_name(client, &export_name, NULL, errp); 1015 if (ret <= 0) { 1016 return ret; 1017 } 1018 1019 meta->exp = nbd_export_find(export_name); 1020 if (meta->exp == NULL) { 1021 g_autofree char *sane_name = nbd_sanitize_name(export_name); 1022 1023 return nbd_opt_drop(client, NBD_REP_ERR_UNKNOWN, errp, 1024 "export '%s' not present", sane_name); 1025 } 1026 meta->bitmaps = g_new0(bool, meta->exp->nr_export_bitmaps); 1027 if (client->opt == NBD_OPT_LIST_META_CONTEXT) { 1028 bitmaps = meta->bitmaps; 1029 } 1030 1031 ret = nbd_opt_read(client, &nb_queries, sizeof(nb_queries), false, errp); 1032 if (ret <= 0) { 1033 return ret; 1034 } 1035 nb_queries = cpu_to_be32(nb_queries); 1036 trace_nbd_negotiate_meta_context(nbd_opt_lookup(client->opt), 1037 export_name, nb_queries); 1038 1039 if (client->opt == NBD_OPT_LIST_META_CONTEXT && !nb_queries) { 1040 /* enable all known contexts */ 1041 meta->base_allocation = true; 1042 meta->allocation_depth = meta->exp->allocation_depth; 1043 if (meta->exp->nr_export_bitmaps) { 1044 memset(meta->bitmaps, 1, meta->exp->nr_export_bitmaps); 1045 } 1046 } else { 1047 for (i = 0; i < nb_queries; ++i) { 1048 ret = nbd_negotiate_meta_query(client, meta, errp); 1049 if (ret <= 0) { 1050 return ret; 1051 } 1052 } 1053 } 1054 1055 if (meta->base_allocation) { 1056 ret = nbd_negotiate_send_meta_context(client, "base:allocation", 1057 NBD_META_ID_BASE_ALLOCATION, 1058 errp); 1059 if (ret < 0) { 1060 return ret; 1061 } 1062 count++; 1063 } 1064 1065 if (meta->allocation_depth) { 1066 ret = nbd_negotiate_send_meta_context(client, "qemu:allocation-depth", 1067 NBD_META_ID_ALLOCATION_DEPTH, 1068 errp); 1069 if (ret < 0) { 1070 return ret; 1071 } 1072 count++; 1073 } 1074 1075 for (i = 0; i < meta->exp->nr_export_bitmaps; i++) { 1076 const char *bm_name; 1077 g_autofree char *context = NULL; 1078 1079 if (!meta->bitmaps[i]) { 1080 continue; 1081 } 1082 1083 bm_name = bdrv_dirty_bitmap_name(meta->exp->export_bitmaps[i]); 1084 context = g_strdup_printf("qemu:dirty-bitmap:%s", bm_name); 1085 1086 ret = nbd_negotiate_send_meta_context(client, context, 1087 NBD_META_ID_DIRTY_BITMAP + i, 1088 errp); 1089 if (ret < 0) { 1090 return ret; 1091 } 1092 count++; 1093 } 1094 1095 ret = nbd_negotiate_send_rep(client, NBD_REP_ACK, errp); 1096 if (ret == 0) { 1097 meta->count = count; 1098 } 1099 1100 return ret; 1101 } 1102 1103 /* nbd_negotiate_options 1104 * Process all NBD_OPT_* client option commands, during fixed newstyle 1105 * negotiation. 1106 * Return: 1107 * -errno on error, errp is set 1108 * 0 on successful negotiation, errp is not set 1109 * 1 if client sent NBD_OPT_ABORT, i.e. on valid disconnect, 1110 * errp is not set 1111 */ 1112 static int nbd_negotiate_options(NBDClient *client, Error **errp) 1113 { 1114 uint32_t flags; 1115 bool fixedNewstyle = false; 1116 bool no_zeroes = false; 1117 1118 /* Client sends: 1119 [ 0 .. 3] client flags 1120 1121 Then we loop until NBD_OPT_EXPORT_NAME or NBD_OPT_GO: 1122 [ 0 .. 7] NBD_OPTS_MAGIC 1123 [ 8 .. 11] NBD option 1124 [12 .. 15] Data length 1125 ... Rest of request 1126 1127 [ 0 .. 7] NBD_OPTS_MAGIC 1128 [ 8 .. 11] Second NBD option 1129 [12 .. 15] Data length 1130 ... Rest of request 1131 */ 1132 1133 if (nbd_read32(client->ioc, &flags, "flags", errp) < 0) { 1134 return -EIO; 1135 } 1136 client->mode = NBD_MODE_EXPORT_NAME; 1137 trace_nbd_negotiate_options_flags(flags); 1138 if (flags & NBD_FLAG_C_FIXED_NEWSTYLE) { 1139 fixedNewstyle = true; 1140 flags &= ~NBD_FLAG_C_FIXED_NEWSTYLE; 1141 client->mode = NBD_MODE_SIMPLE; 1142 } 1143 if (flags & NBD_FLAG_C_NO_ZEROES) { 1144 no_zeroes = true; 1145 flags &= ~NBD_FLAG_C_NO_ZEROES; 1146 } 1147 if (flags != 0) { 1148 error_setg(errp, "Unknown client flags 0x%" PRIx32 " received", flags); 1149 return -EINVAL; 1150 } 1151 1152 while (1) { 1153 int ret; 1154 uint32_t option, length; 1155 uint64_t magic; 1156 1157 if (nbd_read64(client->ioc, &magic, "opts magic", errp) < 0) { 1158 return -EINVAL; 1159 } 1160 trace_nbd_negotiate_options_check_magic(magic); 1161 if (magic != NBD_OPTS_MAGIC) { 1162 error_setg(errp, "Bad magic received"); 1163 return -EINVAL; 1164 } 1165 1166 if (nbd_read32(client->ioc, &option, "option", errp) < 0) { 1167 return -EINVAL; 1168 } 1169 client->opt = option; 1170 1171 if (nbd_read32(client->ioc, &length, "option length", errp) < 0) { 1172 return -EINVAL; 1173 } 1174 assert(!client->optlen); 1175 client->optlen = length; 1176 1177 if (length > NBD_MAX_BUFFER_SIZE) { 1178 error_setg(errp, "len (%" PRIu32 ") is larger than max len (%u)", 1179 length, NBD_MAX_BUFFER_SIZE); 1180 return -EINVAL; 1181 } 1182 1183 trace_nbd_negotiate_options_check_option(option, 1184 nbd_opt_lookup(option)); 1185 if (client->tlscreds && 1186 client->ioc == (QIOChannel *)client->sioc) { 1187 QIOChannel *tioc; 1188 if (!fixedNewstyle) { 1189 error_setg(errp, "Unsupported option 0x%" PRIx32, option); 1190 return -EINVAL; 1191 } 1192 switch (option) { 1193 case NBD_OPT_STARTTLS: 1194 if (length) { 1195 /* Unconditionally drop the connection if the client 1196 * can't start a TLS negotiation correctly */ 1197 return nbd_reject_length(client, true, errp); 1198 } 1199 tioc = nbd_negotiate_handle_starttls(client, errp); 1200 if (!tioc) { 1201 return -EIO; 1202 } 1203 ret = 0; 1204 object_unref(OBJECT(client->ioc)); 1205 client->ioc = tioc; 1206 break; 1207 1208 case NBD_OPT_EXPORT_NAME: 1209 /* No way to return an error to client, so drop connection */ 1210 error_setg(errp, "Option 0x%x not permitted before TLS", 1211 option); 1212 return -EINVAL; 1213 1214 default: 1215 /* Let the client keep trying, unless they asked to 1216 * quit. Always try to give an error back to the 1217 * client; but when replying to OPT_ABORT, be aware 1218 * that the client may hang up before receiving the 1219 * error, in which case we are fine ignoring the 1220 * resulting EPIPE. */ 1221 ret = nbd_opt_drop(client, NBD_REP_ERR_TLS_REQD, 1222 option == NBD_OPT_ABORT ? NULL : errp, 1223 "Option 0x%" PRIx32 1224 " not permitted before TLS", option); 1225 if (option == NBD_OPT_ABORT) { 1226 return 1; 1227 } 1228 break; 1229 } 1230 } else if (fixedNewstyle) { 1231 switch (option) { 1232 case NBD_OPT_LIST: 1233 if (length) { 1234 ret = nbd_reject_length(client, false, errp); 1235 } else { 1236 ret = nbd_negotiate_handle_list(client, errp); 1237 } 1238 break; 1239 1240 case NBD_OPT_ABORT: 1241 /* NBD spec says we must try to reply before 1242 * disconnecting, but that we must also tolerate 1243 * guests that don't wait for our reply. */ 1244 nbd_negotiate_send_rep(client, NBD_REP_ACK, NULL); 1245 return 1; 1246 1247 case NBD_OPT_EXPORT_NAME: 1248 return nbd_negotiate_handle_export_name(client, no_zeroes, 1249 errp); 1250 1251 case NBD_OPT_INFO: 1252 case NBD_OPT_GO: 1253 ret = nbd_negotiate_handle_info(client, errp); 1254 if (ret == 1) { 1255 assert(option == NBD_OPT_GO); 1256 return 0; 1257 } 1258 break; 1259 1260 case NBD_OPT_STARTTLS: 1261 if (length) { 1262 ret = nbd_reject_length(client, false, errp); 1263 } else if (client->tlscreds) { 1264 ret = nbd_negotiate_send_rep_err(client, 1265 NBD_REP_ERR_INVALID, errp, 1266 "TLS already enabled"); 1267 } else { 1268 ret = nbd_negotiate_send_rep_err(client, 1269 NBD_REP_ERR_POLICY, errp, 1270 "TLS not configured"); 1271 } 1272 break; 1273 1274 case NBD_OPT_STRUCTURED_REPLY: 1275 if (length) { 1276 ret = nbd_reject_length(client, false, errp); 1277 } else if (client->mode >= NBD_MODE_EXTENDED) { 1278 ret = nbd_negotiate_send_rep_err( 1279 client, NBD_REP_ERR_EXT_HEADER_REQD, errp, 1280 "extended headers already negotiated"); 1281 } else if (client->mode >= NBD_MODE_STRUCTURED) { 1282 ret = nbd_negotiate_send_rep_err( 1283 client, NBD_REP_ERR_INVALID, errp, 1284 "structured reply already negotiated"); 1285 } else { 1286 ret = nbd_negotiate_send_rep(client, NBD_REP_ACK, errp); 1287 client->mode = NBD_MODE_STRUCTURED; 1288 } 1289 break; 1290 1291 case NBD_OPT_LIST_META_CONTEXT: 1292 case NBD_OPT_SET_META_CONTEXT: 1293 ret = nbd_negotiate_meta_queries(client, errp); 1294 break; 1295 1296 case NBD_OPT_EXTENDED_HEADERS: 1297 if (length) { 1298 ret = nbd_reject_length(client, false, errp); 1299 } else if (client->mode >= NBD_MODE_EXTENDED) { 1300 ret = nbd_negotiate_send_rep_err( 1301 client, NBD_REP_ERR_INVALID, errp, 1302 "extended headers already negotiated"); 1303 } else { 1304 ret = nbd_negotiate_send_rep(client, NBD_REP_ACK, errp); 1305 client->mode = NBD_MODE_EXTENDED; 1306 } 1307 break; 1308 1309 default: 1310 ret = nbd_opt_drop(client, NBD_REP_ERR_UNSUP, errp, 1311 "Unsupported option %" PRIu32 " (%s)", 1312 option, nbd_opt_lookup(option)); 1313 break; 1314 } 1315 } else { 1316 /* 1317 * If broken new-style we should drop the connection 1318 * for anything except NBD_OPT_EXPORT_NAME 1319 */ 1320 switch (option) { 1321 case NBD_OPT_EXPORT_NAME: 1322 return nbd_negotiate_handle_export_name(client, no_zeroes, 1323 errp); 1324 1325 default: 1326 error_setg(errp, "Unsupported option %" PRIu32 " (%s)", 1327 option, nbd_opt_lookup(option)); 1328 return -EINVAL; 1329 } 1330 } 1331 if (ret < 0) { 1332 return ret; 1333 } 1334 } 1335 } 1336 1337 /* nbd_negotiate 1338 * Return: 1339 * -errno on error, errp is set 1340 * 0 on successful negotiation, errp is not set 1341 * 1 if client sent NBD_OPT_ABORT, i.e. on valid disconnect, 1342 * errp is not set 1343 */ 1344 static coroutine_fn int nbd_negotiate(NBDClient *client, Error **errp) 1345 { 1346 ERRP_GUARD(); 1347 char buf[NBD_OLDSTYLE_NEGOTIATE_SIZE] = ""; 1348 int ret; 1349 1350 /* Old style negotiation header, no room for options 1351 [ 0 .. 7] passwd ("NBDMAGIC") 1352 [ 8 .. 15] magic (NBD_CLIENT_MAGIC) 1353 [16 .. 23] size 1354 [24 .. 27] export flags (zero-extended) 1355 [28 .. 151] reserved (0) 1356 1357 New style negotiation header, client can send options 1358 [ 0 .. 7] passwd ("NBDMAGIC") 1359 [ 8 .. 15] magic (NBD_OPTS_MAGIC) 1360 [16 .. 17] server flags (0) 1361 ....options sent, ending in NBD_OPT_EXPORT_NAME or NBD_OPT_GO.... 1362 */ 1363 1364 qio_channel_set_blocking(client->ioc, false, NULL); 1365 qio_channel_set_follow_coroutine_ctx(client->ioc, true); 1366 1367 trace_nbd_negotiate_begin(); 1368 memcpy(buf, "NBDMAGIC", 8); 1369 1370 stq_be_p(buf + 8, NBD_OPTS_MAGIC); 1371 stw_be_p(buf + 16, NBD_FLAG_FIXED_NEWSTYLE | NBD_FLAG_NO_ZEROES); 1372 1373 if (nbd_write(client->ioc, buf, 18, errp) < 0) { 1374 error_prepend(errp, "write failed: "); 1375 return -EINVAL; 1376 } 1377 ret = nbd_negotiate_options(client, errp); 1378 if (ret != 0) { 1379 if (ret < 0) { 1380 error_prepend(errp, "option negotiation failed: "); 1381 } 1382 return ret; 1383 } 1384 1385 assert(!client->optlen); 1386 trace_nbd_negotiate_success(); 1387 1388 return 0; 1389 } 1390 1391 /* nbd_read_eof 1392 * Tries to read @size bytes from @ioc. This is a local implementation of 1393 * qio_channel_readv_all_eof. We have it here because we need it to be 1394 * interruptible and to know when the coroutine is yielding. 1395 * Returns 1 on success 1396 * 0 on eof, when no data was read (errp is not set) 1397 * negative errno on failure (errp is set) 1398 */ 1399 static inline int coroutine_fn 1400 nbd_read_eof(NBDClient *client, void *buffer, size_t size, Error **errp) 1401 { 1402 bool partial = false; 1403 1404 assert(size); 1405 while (size > 0) { 1406 struct iovec iov = { .iov_base = buffer, .iov_len = size }; 1407 ssize_t len; 1408 1409 len = qio_channel_readv(client->ioc, &iov, 1, errp); 1410 if (len == QIO_CHANNEL_ERR_BLOCK) { 1411 client->read_yielding = true; 1412 qio_channel_yield(client->ioc, G_IO_IN); 1413 client->read_yielding = false; 1414 if (client->quiescing) { 1415 return -EAGAIN; 1416 } 1417 continue; 1418 } else if (len < 0) { 1419 return -EIO; 1420 } else if (len == 0) { 1421 if (partial) { 1422 error_setg(errp, 1423 "Unexpected end-of-file before all bytes were read"); 1424 return -EIO; 1425 } else { 1426 return 0; 1427 } 1428 } 1429 1430 partial = true; 1431 size -= len; 1432 buffer = (uint8_t *) buffer + len; 1433 } 1434 return 1; 1435 } 1436 1437 static int coroutine_fn nbd_receive_request(NBDClient *client, NBDRequest *request, 1438 Error **errp) 1439 { 1440 uint8_t buf[NBD_EXTENDED_REQUEST_SIZE]; 1441 uint32_t magic, expect; 1442 int ret; 1443 size_t size = client->mode >= NBD_MODE_EXTENDED ? 1444 NBD_EXTENDED_REQUEST_SIZE : NBD_REQUEST_SIZE; 1445 1446 ret = nbd_read_eof(client, buf, size, errp); 1447 if (ret < 0) { 1448 return ret; 1449 } 1450 if (ret == 0) { 1451 return -EIO; 1452 } 1453 1454 /* 1455 * Compact request 1456 * [ 0 .. 3] magic (NBD_REQUEST_MAGIC) 1457 * [ 4 .. 5] flags (NBD_CMD_FLAG_FUA, ...) 1458 * [ 6 .. 7] type (NBD_CMD_READ, ...) 1459 * [ 8 .. 15] cookie 1460 * [16 .. 23] from 1461 * [24 .. 27] len 1462 * Extended request 1463 * [ 0 .. 3] magic (NBD_EXTENDED_REQUEST_MAGIC) 1464 * [ 4 .. 5] flags (NBD_CMD_FLAG_FUA, NBD_CMD_FLAG_PAYLOAD_LEN, ...) 1465 * [ 6 .. 7] type (NBD_CMD_READ, ...) 1466 * [ 8 .. 15] cookie 1467 * [16 .. 23] from 1468 * [24 .. 31] len 1469 */ 1470 1471 magic = ldl_be_p(buf); 1472 request->flags = lduw_be_p(buf + 4); 1473 request->type = lduw_be_p(buf + 6); 1474 request->cookie = ldq_be_p(buf + 8); 1475 request->from = ldq_be_p(buf + 16); 1476 if (client->mode >= NBD_MODE_EXTENDED) { 1477 request->len = ldq_be_p(buf + 24); 1478 expect = NBD_EXTENDED_REQUEST_MAGIC; 1479 } else { 1480 request->len = (uint32_t)ldl_be_p(buf + 24); /* widen 32 to 64 bits */ 1481 expect = NBD_REQUEST_MAGIC; 1482 } 1483 1484 trace_nbd_receive_request(magic, request->flags, request->type, 1485 request->from, request->len); 1486 1487 if (magic != expect) { 1488 error_setg(errp, "invalid magic (got 0x%" PRIx32 ", expected 0x%" 1489 PRIx32 ")", magic, expect); 1490 return -EINVAL; 1491 } 1492 return 0; 1493 } 1494 1495 #define MAX_NBD_REQUESTS 16 1496 1497 void nbd_client_get(NBDClient *client) 1498 { 1499 client->refcount++; 1500 } 1501 1502 void nbd_client_put(NBDClient *client) 1503 { 1504 if (--client->refcount == 0) { 1505 /* The last reference should be dropped by client->close, 1506 * which is called by client_close. 1507 */ 1508 assert(client->closing); 1509 1510 object_unref(OBJECT(client->sioc)); 1511 object_unref(OBJECT(client->ioc)); 1512 if (client->tlscreds) { 1513 object_unref(OBJECT(client->tlscreds)); 1514 } 1515 g_free(client->tlsauthz); 1516 if (client->exp) { 1517 QTAILQ_REMOVE(&client->exp->clients, client, next); 1518 blk_exp_unref(&client->exp->common); 1519 } 1520 g_free(client->contexts.bitmaps); 1521 g_free(client); 1522 } 1523 } 1524 1525 static void client_close(NBDClient *client, bool negotiated) 1526 { 1527 if (client->closing) { 1528 return; 1529 } 1530 1531 client->closing = true; 1532 1533 /* Force requests to finish. They will drop their own references, 1534 * then we'll close the socket and free the NBDClient. 1535 */ 1536 qio_channel_shutdown(client->ioc, QIO_CHANNEL_SHUTDOWN_BOTH, 1537 NULL); 1538 1539 /* Also tell the client, so that they release their reference. */ 1540 if (client->close_fn) { 1541 client->close_fn(client, negotiated); 1542 } 1543 } 1544 1545 static NBDRequestData *nbd_request_get(NBDClient *client) 1546 { 1547 NBDRequestData *req; 1548 1549 assert(client->nb_requests <= MAX_NBD_REQUESTS - 1); 1550 client->nb_requests++; 1551 1552 req = g_new0(NBDRequestData, 1); 1553 nbd_client_get(client); 1554 req->client = client; 1555 return req; 1556 } 1557 1558 static void nbd_request_put(NBDRequestData *req) 1559 { 1560 NBDClient *client = req->client; 1561 1562 if (req->data) { 1563 qemu_vfree(req->data); 1564 } 1565 g_free(req); 1566 1567 client->nb_requests--; 1568 1569 if (client->quiescing && client->nb_requests == 0) { 1570 aio_wait_kick(); 1571 } 1572 1573 nbd_client_receive_next_request(client); 1574 1575 nbd_client_put(client); 1576 } 1577 1578 static void blk_aio_attached(AioContext *ctx, void *opaque) 1579 { 1580 NBDExport *exp = opaque; 1581 NBDClient *client; 1582 1583 trace_nbd_blk_aio_attached(exp->name, ctx); 1584 1585 exp->common.ctx = ctx; 1586 1587 QTAILQ_FOREACH(client, &exp->clients, next) { 1588 assert(client->nb_requests == 0); 1589 assert(client->recv_coroutine == NULL); 1590 assert(client->send_coroutine == NULL); 1591 } 1592 } 1593 1594 static void blk_aio_detach(void *opaque) 1595 { 1596 NBDExport *exp = opaque; 1597 1598 trace_nbd_blk_aio_detach(exp->name, exp->common.ctx); 1599 1600 exp->common.ctx = NULL; 1601 } 1602 1603 static void nbd_drained_begin(void *opaque) 1604 { 1605 NBDExport *exp = opaque; 1606 NBDClient *client; 1607 1608 QTAILQ_FOREACH(client, &exp->clients, next) { 1609 client->quiescing = true; 1610 } 1611 } 1612 1613 static void nbd_drained_end(void *opaque) 1614 { 1615 NBDExport *exp = opaque; 1616 NBDClient *client; 1617 1618 QTAILQ_FOREACH(client, &exp->clients, next) { 1619 client->quiescing = false; 1620 nbd_client_receive_next_request(client); 1621 } 1622 } 1623 1624 static bool nbd_drained_poll(void *opaque) 1625 { 1626 NBDExport *exp = opaque; 1627 NBDClient *client; 1628 1629 QTAILQ_FOREACH(client, &exp->clients, next) { 1630 if (client->nb_requests != 0) { 1631 /* 1632 * If there's a coroutine waiting for a request on nbd_read_eof() 1633 * enter it here so we don't depend on the client to wake it up. 1634 */ 1635 if (client->recv_coroutine != NULL && client->read_yielding) { 1636 qio_channel_wake_read(client->ioc); 1637 } 1638 1639 return true; 1640 } 1641 } 1642 1643 return false; 1644 } 1645 1646 static void nbd_eject_notifier(Notifier *n, void *data) 1647 { 1648 NBDExport *exp = container_of(n, NBDExport, eject_notifier); 1649 1650 blk_exp_request_shutdown(&exp->common); 1651 } 1652 1653 void nbd_export_set_on_eject_blk(BlockExport *exp, BlockBackend *blk) 1654 { 1655 NBDExport *nbd_exp = container_of(exp, NBDExport, common); 1656 assert(exp->drv == &blk_exp_nbd); 1657 assert(nbd_exp->eject_notifier_blk == NULL); 1658 1659 blk_ref(blk); 1660 nbd_exp->eject_notifier_blk = blk; 1661 nbd_exp->eject_notifier.notify = nbd_eject_notifier; 1662 blk_add_remove_bs_notifier(blk, &nbd_exp->eject_notifier); 1663 } 1664 1665 static const BlockDevOps nbd_block_ops = { 1666 .drained_begin = nbd_drained_begin, 1667 .drained_end = nbd_drained_end, 1668 .drained_poll = nbd_drained_poll, 1669 }; 1670 1671 static int nbd_export_create(BlockExport *blk_exp, BlockExportOptions *exp_args, 1672 Error **errp) 1673 { 1674 NBDExport *exp = container_of(blk_exp, NBDExport, common); 1675 BlockExportOptionsNbd *arg = &exp_args->u.nbd; 1676 const char *name = arg->name ?: exp_args->node_name; 1677 BlockBackend *blk = blk_exp->blk; 1678 int64_t size; 1679 uint64_t perm, shared_perm; 1680 bool readonly = !exp_args->writable; 1681 BlockDirtyBitmapOrStrList *bitmaps; 1682 size_t i; 1683 int ret; 1684 1685 assert(exp_args->type == BLOCK_EXPORT_TYPE_NBD); 1686 1687 if (!nbd_server_is_running()) { 1688 error_setg(errp, "NBD server not running"); 1689 return -EINVAL; 1690 } 1691 1692 if (strlen(name) > NBD_MAX_STRING_SIZE) { 1693 error_setg(errp, "export name '%s' too long", name); 1694 return -EINVAL; 1695 } 1696 1697 if (arg->description && strlen(arg->description) > NBD_MAX_STRING_SIZE) { 1698 error_setg(errp, "description '%s' too long", arg->description); 1699 return -EINVAL; 1700 } 1701 1702 if (nbd_export_find(name)) { 1703 error_setg(errp, "NBD server already has export named '%s'", name); 1704 return -EEXIST; 1705 } 1706 1707 size = blk_getlength(blk); 1708 if (size < 0) { 1709 error_setg_errno(errp, -size, 1710 "Failed to determine the NBD export's length"); 1711 return size; 1712 } 1713 1714 /* Don't allow resize while the NBD server is running, otherwise we don't 1715 * care what happens with the node. */ 1716 blk_get_perm(blk, &perm, &shared_perm); 1717 ret = blk_set_perm(blk, perm, shared_perm & ~BLK_PERM_RESIZE, errp); 1718 if (ret < 0) { 1719 return ret; 1720 } 1721 1722 QTAILQ_INIT(&exp->clients); 1723 exp->name = g_strdup(name); 1724 exp->description = g_strdup(arg->description); 1725 exp->nbdflags = (NBD_FLAG_HAS_FLAGS | NBD_FLAG_SEND_FLUSH | 1726 NBD_FLAG_SEND_FUA | NBD_FLAG_SEND_CACHE); 1727 1728 if (nbd_server_max_connections() != 1) { 1729 exp->nbdflags |= NBD_FLAG_CAN_MULTI_CONN; 1730 } 1731 if (readonly) { 1732 exp->nbdflags |= NBD_FLAG_READ_ONLY; 1733 } else { 1734 exp->nbdflags |= (NBD_FLAG_SEND_TRIM | NBD_FLAG_SEND_WRITE_ZEROES | 1735 NBD_FLAG_SEND_FAST_ZERO); 1736 } 1737 exp->size = QEMU_ALIGN_DOWN(size, BDRV_SECTOR_SIZE); 1738 1739 for (bitmaps = arg->bitmaps; bitmaps; bitmaps = bitmaps->next) { 1740 exp->nr_export_bitmaps++; 1741 } 1742 exp->export_bitmaps = g_new0(BdrvDirtyBitmap *, exp->nr_export_bitmaps); 1743 for (i = 0, bitmaps = arg->bitmaps; bitmaps; 1744 i++, bitmaps = bitmaps->next) 1745 { 1746 const char *bitmap; 1747 BlockDriverState *bs = blk_bs(blk); 1748 BdrvDirtyBitmap *bm = NULL; 1749 1750 switch (bitmaps->value->type) { 1751 case QTYPE_QSTRING: 1752 bitmap = bitmaps->value->u.local; 1753 while (bs) { 1754 bm = bdrv_find_dirty_bitmap(bs, bitmap); 1755 if (bm != NULL) { 1756 break; 1757 } 1758 1759 bs = bdrv_filter_or_cow_bs(bs); 1760 } 1761 1762 if (bm == NULL) { 1763 ret = -ENOENT; 1764 error_setg(errp, "Bitmap '%s' is not found", 1765 bitmaps->value->u.local); 1766 goto fail; 1767 } 1768 1769 if (readonly && bdrv_is_writable(bs) && 1770 bdrv_dirty_bitmap_enabled(bm)) { 1771 ret = -EINVAL; 1772 error_setg(errp, "Enabled bitmap '%s' incompatible with " 1773 "readonly export", bitmap); 1774 goto fail; 1775 } 1776 break; 1777 case QTYPE_QDICT: 1778 bitmap = bitmaps->value->u.external.name; 1779 bm = block_dirty_bitmap_lookup(bitmaps->value->u.external.node, 1780 bitmap, NULL, errp); 1781 if (!bm) { 1782 ret = -ENOENT; 1783 goto fail; 1784 } 1785 break; 1786 default: 1787 abort(); 1788 } 1789 1790 assert(bm); 1791 1792 if (bdrv_dirty_bitmap_check(bm, BDRV_BITMAP_ALLOW_RO, errp)) { 1793 ret = -EINVAL; 1794 goto fail; 1795 } 1796 1797 exp->export_bitmaps[i] = bm; 1798 assert(strlen(bitmap) <= BDRV_BITMAP_MAX_NAME_SIZE); 1799 } 1800 1801 /* Mark bitmaps busy in a separate loop, to simplify roll-back concerns. */ 1802 for (i = 0; i < exp->nr_export_bitmaps; i++) { 1803 bdrv_dirty_bitmap_set_busy(exp->export_bitmaps[i], true); 1804 } 1805 1806 exp->allocation_depth = arg->allocation_depth; 1807 1808 /* 1809 * We need to inhibit request queuing in the block layer to ensure we can 1810 * be properly quiesced when entering a drained section, as our coroutines 1811 * servicing pending requests might enter blk_pread(). 1812 */ 1813 blk_set_disable_request_queuing(blk, true); 1814 1815 blk_add_aio_context_notifier(blk, blk_aio_attached, blk_aio_detach, exp); 1816 1817 blk_set_dev_ops(blk, &nbd_block_ops, exp); 1818 1819 QTAILQ_INSERT_TAIL(&exports, exp, next); 1820 1821 return 0; 1822 1823 fail: 1824 g_free(exp->export_bitmaps); 1825 g_free(exp->name); 1826 g_free(exp->description); 1827 return ret; 1828 } 1829 1830 NBDExport *nbd_export_find(const char *name) 1831 { 1832 NBDExport *exp; 1833 QTAILQ_FOREACH(exp, &exports, next) { 1834 if (strcmp(name, exp->name) == 0) { 1835 return exp; 1836 } 1837 } 1838 1839 return NULL; 1840 } 1841 1842 AioContext * 1843 nbd_export_aio_context(NBDExport *exp) 1844 { 1845 return exp->common.ctx; 1846 } 1847 1848 static void nbd_export_request_shutdown(BlockExport *blk_exp) 1849 { 1850 NBDExport *exp = container_of(blk_exp, NBDExport, common); 1851 NBDClient *client, *next; 1852 1853 blk_exp_ref(&exp->common); 1854 /* 1855 * TODO: Should we expand QMP NbdServerRemoveNode enum to allow a 1856 * close mode that stops advertising the export to new clients but 1857 * still permits existing clients to run to completion? Because of 1858 * that possibility, nbd_export_close() can be called more than 1859 * once on an export. 1860 */ 1861 QTAILQ_FOREACH_SAFE(client, &exp->clients, next, next) { 1862 client_close(client, true); 1863 } 1864 if (exp->name) { 1865 g_free(exp->name); 1866 exp->name = NULL; 1867 QTAILQ_REMOVE(&exports, exp, next); 1868 } 1869 blk_exp_unref(&exp->common); 1870 } 1871 1872 static void nbd_export_delete(BlockExport *blk_exp) 1873 { 1874 size_t i; 1875 NBDExport *exp = container_of(blk_exp, NBDExport, common); 1876 1877 assert(exp->name == NULL); 1878 assert(QTAILQ_EMPTY(&exp->clients)); 1879 1880 g_free(exp->description); 1881 exp->description = NULL; 1882 1883 if (exp->eject_notifier_blk) { 1884 notifier_remove(&exp->eject_notifier); 1885 blk_unref(exp->eject_notifier_blk); 1886 } 1887 blk_remove_aio_context_notifier(exp->common.blk, blk_aio_attached, 1888 blk_aio_detach, exp); 1889 blk_set_disable_request_queuing(exp->common.blk, false); 1890 1891 for (i = 0; i < exp->nr_export_bitmaps; i++) { 1892 bdrv_dirty_bitmap_set_busy(exp->export_bitmaps[i], false); 1893 } 1894 } 1895 1896 const BlockExportDriver blk_exp_nbd = { 1897 .type = BLOCK_EXPORT_TYPE_NBD, 1898 .instance_size = sizeof(NBDExport), 1899 .create = nbd_export_create, 1900 .delete = nbd_export_delete, 1901 .request_shutdown = nbd_export_request_shutdown, 1902 }; 1903 1904 static int coroutine_fn nbd_co_send_iov(NBDClient *client, struct iovec *iov, 1905 unsigned niov, Error **errp) 1906 { 1907 int ret; 1908 1909 g_assert(qemu_in_coroutine()); 1910 qemu_co_mutex_lock(&client->send_lock); 1911 client->send_coroutine = qemu_coroutine_self(); 1912 1913 ret = qio_channel_writev_all(client->ioc, iov, niov, errp) < 0 ? -EIO : 0; 1914 1915 client->send_coroutine = NULL; 1916 qemu_co_mutex_unlock(&client->send_lock); 1917 1918 return ret; 1919 } 1920 1921 static inline void set_be_simple_reply(NBDSimpleReply *reply, uint64_t error, 1922 uint64_t cookie) 1923 { 1924 stl_be_p(&reply->magic, NBD_SIMPLE_REPLY_MAGIC); 1925 stl_be_p(&reply->error, error); 1926 stq_be_p(&reply->cookie, cookie); 1927 } 1928 1929 static int coroutine_fn nbd_co_send_simple_reply(NBDClient *client, 1930 NBDRequest *request, 1931 uint32_t error, 1932 void *data, 1933 uint64_t len, 1934 Error **errp) 1935 { 1936 NBDSimpleReply reply; 1937 int nbd_err = system_errno_to_nbd_errno(error); 1938 struct iovec iov[] = { 1939 {.iov_base = &reply, .iov_len = sizeof(reply)}, 1940 {.iov_base = data, .iov_len = len} 1941 }; 1942 1943 assert(!len || !nbd_err); 1944 assert(len <= NBD_MAX_BUFFER_SIZE); 1945 assert(client->mode < NBD_MODE_STRUCTURED || 1946 (client->mode == NBD_MODE_STRUCTURED && 1947 request->type != NBD_CMD_READ)); 1948 trace_nbd_co_send_simple_reply(request->cookie, nbd_err, 1949 nbd_err_lookup(nbd_err), len); 1950 set_be_simple_reply(&reply, nbd_err, request->cookie); 1951 1952 return nbd_co_send_iov(client, iov, 2, errp); 1953 } 1954 1955 /* 1956 * Prepare the header of a reply chunk for network transmission. 1957 * 1958 * On input, @iov is partially initialized: iov[0].iov_base must point 1959 * to an uninitialized NBDReply, while the remaining @niov elements 1960 * (if any) must be ready for transmission. This function then 1961 * populates iov[0] for transmission. 1962 */ 1963 static inline void set_be_chunk(NBDClient *client, struct iovec *iov, 1964 size_t niov, uint16_t flags, uint16_t type, 1965 NBDRequest *request) 1966 { 1967 size_t i, length = 0; 1968 1969 for (i = 1; i < niov; i++) { 1970 length += iov[i].iov_len; 1971 } 1972 assert(length <= NBD_MAX_BUFFER_SIZE + sizeof(NBDStructuredReadData)); 1973 1974 if (client->mode >= NBD_MODE_EXTENDED) { 1975 NBDExtendedReplyChunk *chunk = iov->iov_base; 1976 1977 iov[0].iov_len = sizeof(*chunk); 1978 stl_be_p(&chunk->magic, NBD_EXTENDED_REPLY_MAGIC); 1979 stw_be_p(&chunk->flags, flags); 1980 stw_be_p(&chunk->type, type); 1981 stq_be_p(&chunk->cookie, request->cookie); 1982 stq_be_p(&chunk->offset, request->from); 1983 stq_be_p(&chunk->length, length); 1984 } else { 1985 NBDStructuredReplyChunk *chunk = iov->iov_base; 1986 1987 iov[0].iov_len = sizeof(*chunk); 1988 stl_be_p(&chunk->magic, NBD_STRUCTURED_REPLY_MAGIC); 1989 stw_be_p(&chunk->flags, flags); 1990 stw_be_p(&chunk->type, type); 1991 stq_be_p(&chunk->cookie, request->cookie); 1992 stl_be_p(&chunk->length, length); 1993 } 1994 } 1995 1996 static int coroutine_fn nbd_co_send_chunk_done(NBDClient *client, 1997 NBDRequest *request, 1998 Error **errp) 1999 { 2000 NBDReply hdr; 2001 struct iovec iov[] = { 2002 {.iov_base = &hdr}, 2003 }; 2004 2005 trace_nbd_co_send_chunk_done(request->cookie); 2006 set_be_chunk(client, iov, 1, NBD_REPLY_FLAG_DONE, 2007 NBD_REPLY_TYPE_NONE, request); 2008 return nbd_co_send_iov(client, iov, 1, errp); 2009 } 2010 2011 static int coroutine_fn nbd_co_send_chunk_read(NBDClient *client, 2012 NBDRequest *request, 2013 uint64_t offset, 2014 void *data, 2015 uint64_t size, 2016 bool final, 2017 Error **errp) 2018 { 2019 NBDReply hdr; 2020 NBDStructuredReadData chunk; 2021 struct iovec iov[] = { 2022 {.iov_base = &hdr}, 2023 {.iov_base = &chunk, .iov_len = sizeof(chunk)}, 2024 {.iov_base = data, .iov_len = size} 2025 }; 2026 2027 assert(size && size <= NBD_MAX_BUFFER_SIZE); 2028 trace_nbd_co_send_chunk_read(request->cookie, offset, data, size); 2029 set_be_chunk(client, iov, 3, final ? NBD_REPLY_FLAG_DONE : 0, 2030 NBD_REPLY_TYPE_OFFSET_DATA, request); 2031 stq_be_p(&chunk.offset, offset); 2032 2033 return nbd_co_send_iov(client, iov, 3, errp); 2034 } 2035 2036 static int coroutine_fn nbd_co_send_chunk_error(NBDClient *client, 2037 NBDRequest *request, 2038 uint32_t error, 2039 const char *msg, 2040 Error **errp) 2041 { 2042 NBDReply hdr; 2043 NBDStructuredError chunk; 2044 int nbd_err = system_errno_to_nbd_errno(error); 2045 struct iovec iov[] = { 2046 {.iov_base = &hdr}, 2047 {.iov_base = &chunk, .iov_len = sizeof(chunk)}, 2048 {.iov_base = (char *)msg, .iov_len = msg ? strlen(msg) : 0}, 2049 }; 2050 2051 assert(nbd_err); 2052 trace_nbd_co_send_chunk_error(request->cookie, nbd_err, 2053 nbd_err_lookup(nbd_err), msg ? msg : ""); 2054 set_be_chunk(client, iov, 3, NBD_REPLY_FLAG_DONE, 2055 NBD_REPLY_TYPE_ERROR, request); 2056 stl_be_p(&chunk.error, nbd_err); 2057 stw_be_p(&chunk.message_length, iov[2].iov_len); 2058 2059 return nbd_co_send_iov(client, iov, 3, errp); 2060 } 2061 2062 /* Do a sparse read and send the structured reply to the client. 2063 * Returns -errno if sending fails. blk_co_block_status_above() failure is 2064 * reported to the client, at which point this function succeeds. 2065 */ 2066 static int coroutine_fn nbd_co_send_sparse_read(NBDClient *client, 2067 NBDRequest *request, 2068 uint64_t offset, 2069 uint8_t *data, 2070 uint64_t size, 2071 Error **errp) 2072 { 2073 int ret = 0; 2074 NBDExport *exp = client->exp; 2075 size_t progress = 0; 2076 2077 assert(size <= NBD_MAX_BUFFER_SIZE); 2078 while (progress < size) { 2079 int64_t pnum; 2080 int status = blk_co_block_status_above(exp->common.blk, NULL, 2081 offset + progress, 2082 size - progress, &pnum, NULL, 2083 NULL); 2084 bool final; 2085 2086 if (status < 0) { 2087 char *msg = g_strdup_printf("unable to check for holes: %s", 2088 strerror(-status)); 2089 2090 ret = nbd_co_send_chunk_error(client, request, -status, msg, errp); 2091 g_free(msg); 2092 return ret; 2093 } 2094 assert(pnum && pnum <= size - progress); 2095 final = progress + pnum == size; 2096 if (status & BDRV_BLOCK_ZERO) { 2097 NBDReply hdr; 2098 NBDStructuredReadHole chunk; 2099 struct iovec iov[] = { 2100 {.iov_base = &hdr}, 2101 {.iov_base = &chunk, .iov_len = sizeof(chunk)}, 2102 }; 2103 2104 trace_nbd_co_send_chunk_read_hole(request->cookie, 2105 offset + progress, pnum); 2106 set_be_chunk(client, iov, 2, 2107 final ? NBD_REPLY_FLAG_DONE : 0, 2108 NBD_REPLY_TYPE_OFFSET_HOLE, request); 2109 stq_be_p(&chunk.offset, offset + progress); 2110 stl_be_p(&chunk.length, pnum); 2111 ret = nbd_co_send_iov(client, iov, 2, errp); 2112 } else { 2113 ret = blk_co_pread(exp->common.blk, offset + progress, pnum, 2114 data + progress, 0); 2115 if (ret < 0) { 2116 error_setg_errno(errp, -ret, "reading from file failed"); 2117 break; 2118 } 2119 ret = nbd_co_send_chunk_read(client, request, offset + progress, 2120 data + progress, pnum, final, errp); 2121 } 2122 2123 if (ret < 0) { 2124 break; 2125 } 2126 progress += pnum; 2127 } 2128 return ret; 2129 } 2130 2131 typedef struct NBDExtentArray { 2132 NBDExtent64 *extents; 2133 unsigned int nb_alloc; 2134 unsigned int count; 2135 uint64_t total_length; 2136 bool extended; 2137 bool can_add; 2138 bool converted_to_be; 2139 } NBDExtentArray; 2140 2141 static NBDExtentArray *nbd_extent_array_new(unsigned int nb_alloc, 2142 NBDMode mode) 2143 { 2144 NBDExtentArray *ea = g_new0(NBDExtentArray, 1); 2145 2146 assert(mode >= NBD_MODE_STRUCTURED); 2147 ea->nb_alloc = nb_alloc; 2148 ea->extents = g_new(NBDExtent64, nb_alloc); 2149 ea->extended = mode >= NBD_MODE_EXTENDED; 2150 ea->can_add = true; 2151 2152 return ea; 2153 } 2154 2155 static void nbd_extent_array_free(NBDExtentArray *ea) 2156 { 2157 g_free(ea->extents); 2158 g_free(ea); 2159 } 2160 G_DEFINE_AUTOPTR_CLEANUP_FUNC(NBDExtentArray, nbd_extent_array_free) 2161 2162 /* Further modifications of the array after conversion are abandoned */ 2163 static void nbd_extent_array_convert_to_be(NBDExtentArray *ea) 2164 { 2165 int i; 2166 2167 assert(!ea->converted_to_be); 2168 assert(ea->extended); 2169 ea->can_add = false; 2170 ea->converted_to_be = true; 2171 2172 for (i = 0; i < ea->count; i++) { 2173 ea->extents[i].length = cpu_to_be64(ea->extents[i].length); 2174 ea->extents[i].flags = cpu_to_be64(ea->extents[i].flags); 2175 } 2176 } 2177 2178 /* Further modifications of the array after conversion are abandoned */ 2179 static NBDExtent32 *nbd_extent_array_convert_to_narrow(NBDExtentArray *ea) 2180 { 2181 int i; 2182 NBDExtent32 *extents = g_new(NBDExtent32, ea->count); 2183 2184 assert(!ea->converted_to_be); 2185 assert(!ea->extended); 2186 ea->can_add = false; 2187 ea->converted_to_be = true; 2188 2189 for (i = 0; i < ea->count; i++) { 2190 assert((ea->extents[i].length | ea->extents[i].flags) <= UINT32_MAX); 2191 extents[i].length = cpu_to_be32(ea->extents[i].length); 2192 extents[i].flags = cpu_to_be32(ea->extents[i].flags); 2193 } 2194 2195 return extents; 2196 } 2197 2198 /* 2199 * Add extent to NBDExtentArray. If extent can't be added (no available space), 2200 * return -1. 2201 * For safety, when returning -1 for the first time, .can_add is set to false, 2202 * and further calls to nbd_extent_array_add() will crash. 2203 * (this avoids the situation where a caller ignores failure to add one extent, 2204 * where adding another extent that would squash into the last array entry 2205 * would result in an incorrect range reported to the client) 2206 */ 2207 static int nbd_extent_array_add(NBDExtentArray *ea, 2208 uint64_t length, uint32_t flags) 2209 { 2210 assert(ea->can_add); 2211 2212 if (!length) { 2213 return 0; 2214 } 2215 if (!ea->extended) { 2216 assert(length <= UINT32_MAX); 2217 } 2218 2219 /* Extend previous extent if flags are the same */ 2220 if (ea->count > 0 && flags == ea->extents[ea->count - 1].flags) { 2221 uint64_t sum = length + ea->extents[ea->count - 1].length; 2222 2223 /* 2224 * sum cannot overflow: the block layer bounds image size at 2225 * 2^63, and ea->extents[].length comes from the block layer. 2226 */ 2227 assert(sum >= length); 2228 if (sum <= UINT32_MAX || ea->extended) { 2229 ea->extents[ea->count - 1].length = sum; 2230 ea->total_length += length; 2231 return 0; 2232 } 2233 } 2234 2235 if (ea->count >= ea->nb_alloc) { 2236 ea->can_add = false; 2237 return -1; 2238 } 2239 2240 ea->total_length += length; 2241 ea->extents[ea->count] = (NBDExtent64) {.length = length, .flags = flags}; 2242 ea->count++; 2243 2244 return 0; 2245 } 2246 2247 static int coroutine_fn blockstatus_to_extents(BlockBackend *blk, 2248 uint64_t offset, uint64_t bytes, 2249 NBDExtentArray *ea) 2250 { 2251 while (bytes) { 2252 uint32_t flags; 2253 int64_t num; 2254 int ret = blk_co_block_status_above(blk, NULL, offset, bytes, &num, 2255 NULL, NULL); 2256 2257 if (ret < 0) { 2258 return ret; 2259 } 2260 2261 flags = (ret & BDRV_BLOCK_DATA ? 0 : NBD_STATE_HOLE) | 2262 (ret & BDRV_BLOCK_ZERO ? NBD_STATE_ZERO : 0); 2263 2264 if (nbd_extent_array_add(ea, num, flags) < 0) { 2265 return 0; 2266 } 2267 2268 offset += num; 2269 bytes -= num; 2270 } 2271 2272 return 0; 2273 } 2274 2275 static int coroutine_fn blockalloc_to_extents(BlockBackend *blk, 2276 uint64_t offset, uint64_t bytes, 2277 NBDExtentArray *ea) 2278 { 2279 while (bytes) { 2280 int64_t num; 2281 int ret = blk_co_is_allocated_above(blk, NULL, false, offset, bytes, 2282 &num); 2283 2284 if (ret < 0) { 2285 return ret; 2286 } 2287 2288 if (nbd_extent_array_add(ea, num, ret) < 0) { 2289 return 0; 2290 } 2291 2292 offset += num; 2293 bytes -= num; 2294 } 2295 2296 return 0; 2297 } 2298 2299 /* 2300 * nbd_co_send_extents 2301 * 2302 * @ea is converted to BE by the function 2303 * @last controls whether NBD_REPLY_FLAG_DONE is sent. 2304 */ 2305 static int coroutine_fn 2306 nbd_co_send_extents(NBDClient *client, NBDRequest *request, NBDExtentArray *ea, 2307 bool last, uint32_t context_id, Error **errp) 2308 { 2309 NBDReply hdr; 2310 NBDStructuredMeta meta; 2311 NBDExtendedMeta meta_ext; 2312 g_autofree NBDExtent32 *extents = NULL; 2313 uint16_t type; 2314 struct iovec iov[] = { {.iov_base = &hdr}, {0}, {0} }; 2315 2316 if (client->mode >= NBD_MODE_EXTENDED) { 2317 type = NBD_REPLY_TYPE_BLOCK_STATUS_EXT; 2318 2319 iov[1].iov_base = &meta_ext; 2320 iov[1].iov_len = sizeof(meta_ext); 2321 stl_be_p(&meta_ext.context_id, context_id); 2322 stl_be_p(&meta_ext.count, ea->count); 2323 2324 nbd_extent_array_convert_to_be(ea); 2325 iov[2].iov_base = ea->extents; 2326 iov[2].iov_len = ea->count * sizeof(ea->extents[0]); 2327 } else { 2328 type = NBD_REPLY_TYPE_BLOCK_STATUS; 2329 2330 iov[1].iov_base = &meta; 2331 iov[1].iov_len = sizeof(meta); 2332 stl_be_p(&meta.context_id, context_id); 2333 2334 extents = nbd_extent_array_convert_to_narrow(ea); 2335 iov[2].iov_base = extents; 2336 iov[2].iov_len = ea->count * sizeof(extents[0]); 2337 } 2338 2339 trace_nbd_co_send_extents(request->cookie, ea->count, context_id, 2340 ea->total_length, last); 2341 set_be_chunk(client, iov, 3, last ? NBD_REPLY_FLAG_DONE : 0, type, 2342 request); 2343 2344 return nbd_co_send_iov(client, iov, 3, errp); 2345 } 2346 2347 /* Get block status from the exported device and send it to the client */ 2348 static int 2349 coroutine_fn nbd_co_send_block_status(NBDClient *client, NBDRequest *request, 2350 BlockBackend *blk, uint64_t offset, 2351 uint64_t length, bool dont_fragment, 2352 bool last, uint32_t context_id, 2353 Error **errp) 2354 { 2355 int ret; 2356 unsigned int nb_extents = dont_fragment ? 1 : NBD_MAX_BLOCK_STATUS_EXTENTS; 2357 g_autoptr(NBDExtentArray) ea = 2358 nbd_extent_array_new(nb_extents, client->mode); 2359 2360 if (context_id == NBD_META_ID_BASE_ALLOCATION) { 2361 ret = blockstatus_to_extents(blk, offset, length, ea); 2362 } else { 2363 ret = blockalloc_to_extents(blk, offset, length, ea); 2364 } 2365 if (ret < 0) { 2366 return nbd_co_send_chunk_error(client, request, -ret, 2367 "can't get block status", errp); 2368 } 2369 2370 return nbd_co_send_extents(client, request, ea, last, context_id, errp); 2371 } 2372 2373 /* Populate @ea from a dirty bitmap. */ 2374 static void bitmap_to_extents(BdrvDirtyBitmap *bitmap, 2375 uint64_t offset, uint64_t length, 2376 NBDExtentArray *es) 2377 { 2378 int64_t start, dirty_start, dirty_count; 2379 int64_t end = offset + length; 2380 bool full = false; 2381 int64_t bound = es->extended ? INT64_MAX : INT32_MAX; 2382 2383 bdrv_dirty_bitmap_lock(bitmap); 2384 2385 for (start = offset; 2386 bdrv_dirty_bitmap_next_dirty_area(bitmap, start, end, bound, 2387 &dirty_start, &dirty_count); 2388 start = dirty_start + dirty_count) 2389 { 2390 if ((nbd_extent_array_add(es, dirty_start - start, 0) < 0) || 2391 (nbd_extent_array_add(es, dirty_count, NBD_STATE_DIRTY) < 0)) 2392 { 2393 full = true; 2394 break; 2395 } 2396 } 2397 2398 if (!full) { 2399 /* last non dirty extent, nothing to do if array is now full */ 2400 (void) nbd_extent_array_add(es, end - start, 0); 2401 } 2402 2403 bdrv_dirty_bitmap_unlock(bitmap); 2404 } 2405 2406 static int coroutine_fn nbd_co_send_bitmap(NBDClient *client, 2407 NBDRequest *request, 2408 BdrvDirtyBitmap *bitmap, 2409 uint64_t offset, 2410 uint64_t length, bool dont_fragment, 2411 bool last, uint32_t context_id, 2412 Error **errp) 2413 { 2414 unsigned int nb_extents = dont_fragment ? 1 : NBD_MAX_BLOCK_STATUS_EXTENTS; 2415 g_autoptr(NBDExtentArray) ea = 2416 nbd_extent_array_new(nb_extents, client->mode); 2417 2418 bitmap_to_extents(bitmap, offset, length, ea); 2419 2420 return nbd_co_send_extents(client, request, ea, last, context_id, errp); 2421 } 2422 2423 /* nbd_co_receive_request 2424 * Collect a client request. Return 0 if request looks valid, -EIO to drop 2425 * connection right away, -EAGAIN to indicate we were interrupted and the 2426 * channel should be quiesced, and any other negative value to report an error 2427 * to the client (although the caller may still need to disconnect after 2428 * reporting the error). 2429 */ 2430 static int coroutine_fn nbd_co_receive_request(NBDRequestData *req, 2431 NBDRequest *request, 2432 Error **errp) 2433 { 2434 NBDClient *client = req->client; 2435 bool extended_with_payload; 2436 bool check_length = false; 2437 bool check_rofs = false; 2438 bool allocate_buffer = false; 2439 bool payload_okay = false; 2440 uint64_t payload_len = 0; 2441 int valid_flags = NBD_CMD_FLAG_FUA; 2442 int ret; 2443 2444 g_assert(qemu_in_coroutine()); 2445 assert(client->recv_coroutine == qemu_coroutine_self()); 2446 ret = nbd_receive_request(client, request, errp); 2447 if (ret < 0) { 2448 return ret; 2449 } 2450 2451 trace_nbd_co_receive_request_decode_type(request->cookie, request->type, 2452 nbd_cmd_lookup(request->type)); 2453 extended_with_payload = client->mode >= NBD_MODE_EXTENDED && 2454 request->flags & NBD_CMD_FLAG_PAYLOAD_LEN; 2455 if (extended_with_payload) { 2456 payload_len = request->len; 2457 check_length = true; 2458 } 2459 2460 switch (request->type) { 2461 case NBD_CMD_DISC: 2462 /* Special case: we're going to disconnect without a reply, 2463 * whether or not flags, from, or len are bogus */ 2464 req->complete = true; 2465 return -EIO; 2466 2467 case NBD_CMD_READ: 2468 if (client->mode >= NBD_MODE_STRUCTURED) { 2469 valid_flags |= NBD_CMD_FLAG_DF; 2470 } 2471 check_length = true; 2472 allocate_buffer = true; 2473 break; 2474 2475 case NBD_CMD_WRITE: 2476 if (client->mode >= NBD_MODE_EXTENDED) { 2477 if (!extended_with_payload) { 2478 /* The client is noncompliant. Trace it, but proceed. */ 2479 trace_nbd_co_receive_ext_payload_compliance(request->from, 2480 request->len); 2481 } 2482 valid_flags |= NBD_CMD_FLAG_PAYLOAD_LEN; 2483 } 2484 payload_okay = true; 2485 payload_len = request->len; 2486 check_length = true; 2487 allocate_buffer = true; 2488 check_rofs = true; 2489 break; 2490 2491 case NBD_CMD_FLUSH: 2492 break; 2493 2494 case NBD_CMD_TRIM: 2495 check_rofs = true; 2496 break; 2497 2498 case NBD_CMD_CACHE: 2499 check_length = true; 2500 break; 2501 2502 case NBD_CMD_WRITE_ZEROES: 2503 valid_flags |= NBD_CMD_FLAG_NO_HOLE | NBD_CMD_FLAG_FAST_ZERO; 2504 check_rofs = true; 2505 break; 2506 2507 case NBD_CMD_BLOCK_STATUS: 2508 valid_flags |= NBD_CMD_FLAG_REQ_ONE; 2509 break; 2510 2511 default: 2512 /* Unrecognized, will fail later */ 2513 ; 2514 } 2515 2516 /* Payload and buffer handling. */ 2517 if (!payload_len) { 2518 req->complete = true; 2519 } 2520 if (check_length && request->len > NBD_MAX_BUFFER_SIZE) { 2521 /* READ, WRITE, CACHE */ 2522 error_setg(errp, "len (%" PRIu64 ") is larger than max len (%u)", 2523 request->len, NBD_MAX_BUFFER_SIZE); 2524 return -EINVAL; 2525 } 2526 if (payload_len && !payload_okay) { 2527 /* 2528 * For now, we don't support payloads on other commands; but 2529 * we can keep the connection alive by ignoring the payload. 2530 * We will fail the command later with NBD_EINVAL for the use 2531 * of an unsupported flag (and not for access beyond bounds). 2532 */ 2533 assert(request->type != NBD_CMD_WRITE); 2534 request->len = 0; 2535 } 2536 if (allocate_buffer) { 2537 /* READ, WRITE */ 2538 req->data = blk_try_blockalign(client->exp->common.blk, 2539 request->len); 2540 if (req->data == NULL) { 2541 error_setg(errp, "No memory"); 2542 return -ENOMEM; 2543 } 2544 } 2545 if (payload_len) { 2546 if (payload_okay) { 2547 /* WRITE */ 2548 assert(req->data); 2549 ret = nbd_read(client->ioc, req->data, payload_len, 2550 "CMD_WRITE data", errp); 2551 } else { 2552 ret = nbd_drop(client->ioc, payload_len, errp); 2553 } 2554 if (ret < 0) { 2555 return -EIO; 2556 } 2557 req->complete = true; 2558 trace_nbd_co_receive_request_payload_received(request->cookie, 2559 payload_len); 2560 } 2561 2562 /* Sanity checks. */ 2563 if (client->exp->nbdflags & NBD_FLAG_READ_ONLY && check_rofs) { 2564 /* WRITE, TRIM, WRITE_ZEROES */ 2565 error_setg(errp, "Export is read-only"); 2566 return -EROFS; 2567 } 2568 if (request->from > client->exp->size || 2569 request->len > client->exp->size - request->from) { 2570 error_setg(errp, "operation past EOF; From: %" PRIu64 ", Len: %" PRIu64 2571 ", Size: %" PRIu64, request->from, request->len, 2572 client->exp->size); 2573 return (request->type == NBD_CMD_WRITE || 2574 request->type == NBD_CMD_WRITE_ZEROES) ? -ENOSPC : -EINVAL; 2575 } 2576 if (client->check_align && !QEMU_IS_ALIGNED(request->from | request->len, 2577 client->check_align)) { 2578 /* 2579 * The block layer gracefully handles unaligned requests, but 2580 * it's still worth tracing client non-compliance 2581 */ 2582 trace_nbd_co_receive_align_compliance(nbd_cmd_lookup(request->type), 2583 request->from, 2584 request->len, 2585 client->check_align); 2586 } 2587 if (request->flags & ~valid_flags) { 2588 error_setg(errp, "unsupported flags for command %s (got 0x%x)", 2589 nbd_cmd_lookup(request->type), request->flags); 2590 return -EINVAL; 2591 } 2592 2593 return 0; 2594 } 2595 2596 /* Send simple reply without a payload, or a structured error 2597 * @error_msg is ignored if @ret >= 0 2598 * Returns 0 if connection is still live, -errno on failure to talk to client 2599 */ 2600 static coroutine_fn int nbd_send_generic_reply(NBDClient *client, 2601 NBDRequest *request, 2602 int ret, 2603 const char *error_msg, 2604 Error **errp) 2605 { 2606 if (client->mode >= NBD_MODE_STRUCTURED && ret < 0) { 2607 return nbd_co_send_chunk_error(client, request, -ret, error_msg, errp); 2608 } else if (client->mode >= NBD_MODE_EXTENDED) { 2609 return nbd_co_send_chunk_done(client, request, errp); 2610 } else { 2611 return nbd_co_send_simple_reply(client, request, ret < 0 ? -ret : 0, 2612 NULL, 0, errp); 2613 } 2614 } 2615 2616 /* Handle NBD_CMD_READ request. 2617 * Return -errno if sending fails. Other errors are reported directly to the 2618 * client as an error reply. */ 2619 static coroutine_fn int nbd_do_cmd_read(NBDClient *client, NBDRequest *request, 2620 uint8_t *data, Error **errp) 2621 { 2622 int ret; 2623 NBDExport *exp = client->exp; 2624 2625 assert(request->type == NBD_CMD_READ); 2626 assert(request->len <= NBD_MAX_BUFFER_SIZE); 2627 2628 /* XXX: NBD Protocol only documents use of FUA with WRITE */ 2629 if (request->flags & NBD_CMD_FLAG_FUA) { 2630 ret = blk_co_flush(exp->common.blk); 2631 if (ret < 0) { 2632 return nbd_send_generic_reply(client, request, ret, 2633 "flush failed", errp); 2634 } 2635 } 2636 2637 if (client->mode >= NBD_MODE_STRUCTURED && 2638 !(request->flags & NBD_CMD_FLAG_DF) && request->len) 2639 { 2640 return nbd_co_send_sparse_read(client, request, request->from, 2641 data, request->len, errp); 2642 } 2643 2644 ret = blk_co_pread(exp->common.blk, request->from, request->len, data, 0); 2645 if (ret < 0) { 2646 return nbd_send_generic_reply(client, request, ret, 2647 "reading from file failed", errp); 2648 } 2649 2650 if (client->mode >= NBD_MODE_STRUCTURED) { 2651 if (request->len) { 2652 return nbd_co_send_chunk_read(client, request, request->from, data, 2653 request->len, true, errp); 2654 } else { 2655 return nbd_co_send_chunk_done(client, request, errp); 2656 } 2657 } else { 2658 return nbd_co_send_simple_reply(client, request, 0, 2659 data, request->len, errp); 2660 } 2661 } 2662 2663 /* 2664 * nbd_do_cmd_cache 2665 * 2666 * Handle NBD_CMD_CACHE request. 2667 * Return -errno if sending fails. Other errors are reported directly to the 2668 * client as an error reply. 2669 */ 2670 static coroutine_fn int nbd_do_cmd_cache(NBDClient *client, NBDRequest *request, 2671 Error **errp) 2672 { 2673 int ret; 2674 NBDExport *exp = client->exp; 2675 2676 assert(request->type == NBD_CMD_CACHE); 2677 assert(request->len <= NBD_MAX_BUFFER_SIZE); 2678 2679 ret = blk_co_preadv(exp->common.blk, request->from, request->len, 2680 NULL, BDRV_REQ_COPY_ON_READ | BDRV_REQ_PREFETCH); 2681 2682 return nbd_send_generic_reply(client, request, ret, 2683 "caching data failed", errp); 2684 } 2685 2686 /* Handle NBD request. 2687 * Return -errno if sending fails. Other errors are reported directly to the 2688 * client as an error reply. */ 2689 static coroutine_fn int nbd_handle_request(NBDClient *client, 2690 NBDRequest *request, 2691 uint8_t *data, Error **errp) 2692 { 2693 int ret; 2694 int flags; 2695 NBDExport *exp = client->exp; 2696 char *msg; 2697 size_t i; 2698 2699 switch (request->type) { 2700 case NBD_CMD_CACHE: 2701 return nbd_do_cmd_cache(client, request, errp); 2702 2703 case NBD_CMD_READ: 2704 return nbd_do_cmd_read(client, request, data, errp); 2705 2706 case NBD_CMD_WRITE: 2707 flags = 0; 2708 if (request->flags & NBD_CMD_FLAG_FUA) { 2709 flags |= BDRV_REQ_FUA; 2710 } 2711 assert(request->len <= NBD_MAX_BUFFER_SIZE); 2712 ret = blk_co_pwrite(exp->common.blk, request->from, request->len, data, 2713 flags); 2714 return nbd_send_generic_reply(client, request, ret, 2715 "writing to file failed", errp); 2716 2717 case NBD_CMD_WRITE_ZEROES: 2718 flags = 0; 2719 if (request->flags & NBD_CMD_FLAG_FUA) { 2720 flags |= BDRV_REQ_FUA; 2721 } 2722 if (!(request->flags & NBD_CMD_FLAG_NO_HOLE)) { 2723 flags |= BDRV_REQ_MAY_UNMAP; 2724 } 2725 if (request->flags & NBD_CMD_FLAG_FAST_ZERO) { 2726 flags |= BDRV_REQ_NO_FALLBACK; 2727 } 2728 ret = blk_co_pwrite_zeroes(exp->common.blk, request->from, request->len, 2729 flags); 2730 return nbd_send_generic_reply(client, request, ret, 2731 "writing to file failed", errp); 2732 2733 case NBD_CMD_DISC: 2734 /* unreachable, thanks to special case in nbd_co_receive_request() */ 2735 abort(); 2736 2737 case NBD_CMD_FLUSH: 2738 ret = blk_co_flush(exp->common.blk); 2739 return nbd_send_generic_reply(client, request, ret, 2740 "flush failed", errp); 2741 2742 case NBD_CMD_TRIM: 2743 ret = blk_co_pdiscard(exp->common.blk, request->from, request->len); 2744 if (ret >= 0 && request->flags & NBD_CMD_FLAG_FUA) { 2745 ret = blk_co_flush(exp->common.blk); 2746 } 2747 return nbd_send_generic_reply(client, request, ret, 2748 "discard failed", errp); 2749 2750 case NBD_CMD_BLOCK_STATUS: 2751 if (!request->len) { 2752 return nbd_send_generic_reply(client, request, -EINVAL, 2753 "need non-zero length", errp); 2754 } 2755 assert(client->mode >= NBD_MODE_EXTENDED || 2756 request->len <= UINT32_MAX); 2757 if (client->contexts.count) { 2758 bool dont_fragment = request->flags & NBD_CMD_FLAG_REQ_ONE; 2759 int contexts_remaining = client->contexts.count; 2760 2761 if (client->contexts.base_allocation) { 2762 ret = nbd_co_send_block_status(client, request, 2763 exp->common.blk, 2764 request->from, 2765 request->len, dont_fragment, 2766 !--contexts_remaining, 2767 NBD_META_ID_BASE_ALLOCATION, 2768 errp); 2769 if (ret < 0) { 2770 return ret; 2771 } 2772 } 2773 2774 if (client->contexts.allocation_depth) { 2775 ret = nbd_co_send_block_status(client, request, 2776 exp->common.blk, 2777 request->from, request->len, 2778 dont_fragment, 2779 !--contexts_remaining, 2780 NBD_META_ID_ALLOCATION_DEPTH, 2781 errp); 2782 if (ret < 0) { 2783 return ret; 2784 } 2785 } 2786 2787 for (i = 0; i < client->exp->nr_export_bitmaps; i++) { 2788 if (!client->contexts.bitmaps[i]) { 2789 continue; 2790 } 2791 ret = nbd_co_send_bitmap(client, request, 2792 client->exp->export_bitmaps[i], 2793 request->from, request->len, 2794 dont_fragment, !--contexts_remaining, 2795 NBD_META_ID_DIRTY_BITMAP + i, errp); 2796 if (ret < 0) { 2797 return ret; 2798 } 2799 } 2800 2801 assert(!contexts_remaining); 2802 2803 return 0; 2804 } else { 2805 return nbd_send_generic_reply(client, request, -EINVAL, 2806 "CMD_BLOCK_STATUS not negotiated", 2807 errp); 2808 } 2809 2810 default: 2811 msg = g_strdup_printf("invalid request type (%" PRIu32 ") received", 2812 request->type); 2813 ret = nbd_send_generic_reply(client, request, -EINVAL, msg, 2814 errp); 2815 g_free(msg); 2816 return ret; 2817 } 2818 } 2819 2820 /* Owns a reference to the NBDClient passed as opaque. */ 2821 static coroutine_fn void nbd_trip(void *opaque) 2822 { 2823 NBDClient *client = opaque; 2824 NBDRequestData *req; 2825 NBDRequest request = { 0 }; /* GCC thinks it can be used uninitialized */ 2826 int ret; 2827 Error *local_err = NULL; 2828 2829 trace_nbd_trip(); 2830 if (client->closing) { 2831 nbd_client_put(client); 2832 return; 2833 } 2834 2835 if (client->quiescing) { 2836 /* 2837 * We're switching between AIO contexts. Don't attempt to receive a new 2838 * request and kick the main context which may be waiting for us. 2839 */ 2840 nbd_client_put(client); 2841 client->recv_coroutine = NULL; 2842 aio_wait_kick(); 2843 return; 2844 } 2845 2846 req = nbd_request_get(client); 2847 ret = nbd_co_receive_request(req, &request, &local_err); 2848 client->recv_coroutine = NULL; 2849 2850 if (client->closing) { 2851 /* 2852 * The client may be closed when we are blocked in 2853 * nbd_co_receive_request() 2854 */ 2855 goto done; 2856 } 2857 2858 if (ret == -EAGAIN) { 2859 assert(client->quiescing); 2860 goto done; 2861 } 2862 2863 nbd_client_receive_next_request(client); 2864 if (ret == -EIO) { 2865 goto disconnect; 2866 } 2867 2868 qio_channel_set_cork(client->ioc, true); 2869 2870 if (ret < 0) { 2871 /* It wasn't -EIO, so, according to nbd_co_receive_request() 2872 * semantics, we should return the error to the client. */ 2873 Error *export_err = local_err; 2874 2875 local_err = NULL; 2876 ret = nbd_send_generic_reply(client, &request, -EINVAL, 2877 error_get_pretty(export_err), &local_err); 2878 error_free(export_err); 2879 } else { 2880 ret = nbd_handle_request(client, &request, req->data, &local_err); 2881 } 2882 if (ret < 0) { 2883 error_prepend(&local_err, "Failed to send reply: "); 2884 goto disconnect; 2885 } 2886 2887 /* We must disconnect after NBD_CMD_WRITE if we did not 2888 * read the payload. 2889 */ 2890 if (!req->complete) { 2891 error_setg(&local_err, "Request handling failed in intermediate state"); 2892 goto disconnect; 2893 } 2894 2895 qio_channel_set_cork(client->ioc, false); 2896 done: 2897 nbd_request_put(req); 2898 nbd_client_put(client); 2899 return; 2900 2901 disconnect: 2902 if (local_err) { 2903 error_reportf_err(local_err, "Disconnect client, due to: "); 2904 } 2905 nbd_request_put(req); 2906 client_close(client, true); 2907 nbd_client_put(client); 2908 } 2909 2910 static void nbd_client_receive_next_request(NBDClient *client) 2911 { 2912 if (!client->recv_coroutine && client->nb_requests < MAX_NBD_REQUESTS && 2913 !client->quiescing) { 2914 nbd_client_get(client); 2915 client->recv_coroutine = qemu_coroutine_create(nbd_trip, client); 2916 aio_co_schedule(client->exp->common.ctx, client->recv_coroutine); 2917 } 2918 } 2919 2920 static coroutine_fn void nbd_co_client_start(void *opaque) 2921 { 2922 NBDClient *client = opaque; 2923 Error *local_err = NULL; 2924 2925 qemu_co_mutex_init(&client->send_lock); 2926 2927 if (nbd_negotiate(client, &local_err)) { 2928 if (local_err) { 2929 error_report_err(local_err); 2930 } 2931 client_close(client, false); 2932 return; 2933 } 2934 2935 nbd_client_receive_next_request(client); 2936 } 2937 2938 /* 2939 * Create a new client listener using the given channel @sioc. 2940 * Begin servicing it in a coroutine. When the connection closes, call 2941 * @close_fn with an indication of whether the client completed negotiation. 2942 */ 2943 void nbd_client_new(QIOChannelSocket *sioc, 2944 QCryptoTLSCreds *tlscreds, 2945 const char *tlsauthz, 2946 void (*close_fn)(NBDClient *, bool)) 2947 { 2948 NBDClient *client; 2949 Coroutine *co; 2950 2951 client = g_new0(NBDClient, 1); 2952 client->refcount = 1; 2953 client->tlscreds = tlscreds; 2954 if (tlscreds) { 2955 object_ref(OBJECT(client->tlscreds)); 2956 } 2957 client->tlsauthz = g_strdup(tlsauthz); 2958 client->sioc = sioc; 2959 qio_channel_set_delay(QIO_CHANNEL(sioc), false); 2960 object_ref(OBJECT(client->sioc)); 2961 client->ioc = QIO_CHANNEL(sioc); 2962 object_ref(OBJECT(client->ioc)); 2963 client->close_fn = close_fn; 2964 2965 co = qemu_coroutine_create(nbd_co_client_start, client); 2966 qemu_coroutine_enter(co); 2967 } 2968