1 /* 2 * Copyright (c) 2018 Citrix Systems Inc. 3 * (c) Gerd Hoffmann <kraxel@redhat.com> 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License as published by 7 * the Free Software Foundation; under version 2 of the License. 8 * 9 * This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License along 15 * with this program; if not, see <http://www.gnu.org/licenses/>. 16 * 17 * Contributions after 2012-01-13 are licensed under the terms of the 18 * GNU GPL, version 2 or (at your option) any later version. 19 */ 20 21 #include "qemu/osdep.h" 22 #include "qemu/defer-call.h" 23 #include "qemu/error-report.h" 24 #include "qemu/main-loop.h" 25 #include "qemu/memalign.h" 26 #include "qapi/error.h" 27 #include "hw/xen/xen.h" 28 #include "hw/block/xen_blkif.h" 29 #include "hw/xen/interface/io/ring.h" 30 #include "sysemu/block-backend.h" 31 #include "sysemu/iothread.h" 32 #include "xen-block.h" 33 34 typedef struct XenBlockRequest { 35 blkif_request_t req; 36 int16_t status; 37 off_t start; 38 QEMUIOVector v; 39 void *buf; 40 size_t size; 41 int presync; 42 int aio_inflight; 43 int aio_errors; 44 XenBlockDataPlane *dataplane; 45 QLIST_ENTRY(XenBlockRequest) list; 46 BlockAcctCookie acct; 47 } XenBlockRequest; 48 49 struct XenBlockDataPlane { 50 XenDevice *xendev; 51 XenEventChannel *event_channel; 52 unsigned int *ring_ref; 53 unsigned int nr_ring_ref; 54 void *sring; 55 int protocol; 56 blkif_back_rings_t rings; 57 int more_work; 58 QLIST_HEAD(inflight_head, XenBlockRequest) inflight; 59 QLIST_HEAD(freelist_head, XenBlockRequest) freelist; 60 int requests_total; 61 int requests_inflight; 62 unsigned int max_requests; 63 BlockBackend *blk; 64 unsigned int sector_size; 65 QEMUBH *bh; 66 IOThread *iothread; 67 AioContext *ctx; 68 }; 69 70 static int xen_block_send_response(XenBlockRequest *request); 71 72 static void reset_request(XenBlockRequest *request) 73 { 74 memset(&request->req, 0, sizeof(request->req)); 75 request->status = 0; 76 request->start = 0; 77 request->size = 0; 78 request->presync = 0; 79 80 request->aio_inflight = 0; 81 request->aio_errors = 0; 82 83 request->dataplane = NULL; 84 memset(&request->list, 0, sizeof(request->list)); 85 memset(&request->acct, 0, sizeof(request->acct)); 86 87 qemu_iovec_reset(&request->v); 88 } 89 90 static XenBlockRequest *xen_block_start_request(XenBlockDataPlane *dataplane) 91 { 92 XenBlockRequest *request = NULL; 93 94 if (QLIST_EMPTY(&dataplane->freelist)) { 95 if (dataplane->requests_total >= dataplane->max_requests) { 96 goto out; 97 } 98 /* allocate new struct */ 99 request = g_malloc0(sizeof(*request)); 100 request->dataplane = dataplane; 101 /* 102 * We cannot need more pages per requests than this, and since we 103 * re-use requests, allocate the memory once here. It will be freed 104 * xen_block_dataplane_destroy() when the request list is freed. 105 */ 106 request->buf = qemu_memalign(XEN_PAGE_SIZE, 107 BLKIF_MAX_SEGMENTS_PER_REQUEST * 108 XEN_PAGE_SIZE); 109 dataplane->requests_total++; 110 qemu_iovec_init(&request->v, 1); 111 } else { 112 /* get one from freelist */ 113 request = QLIST_FIRST(&dataplane->freelist); 114 QLIST_REMOVE(request, list); 115 } 116 QLIST_INSERT_HEAD(&dataplane->inflight, request, list); 117 dataplane->requests_inflight++; 118 119 out: 120 return request; 121 } 122 123 static void xen_block_complete_request(XenBlockRequest *request) 124 { 125 XenBlockDataPlane *dataplane = request->dataplane; 126 127 if (xen_block_send_response(request)) { 128 Error *local_err = NULL; 129 130 xen_device_notify_event_channel(dataplane->xendev, 131 dataplane->event_channel, 132 &local_err); 133 if (local_err) { 134 error_report_err(local_err); 135 } 136 } 137 138 QLIST_REMOVE(request, list); 139 dataplane->requests_inflight--; 140 reset_request(request); 141 request->dataplane = dataplane; 142 QLIST_INSERT_HEAD(&dataplane->freelist, request, list); 143 } 144 145 /* 146 * translate request into iovec + start offset 147 * do sanity checks along the way 148 */ 149 static int xen_block_parse_request(XenBlockRequest *request) 150 { 151 XenBlockDataPlane *dataplane = request->dataplane; 152 size_t len; 153 int i; 154 155 switch (request->req.operation) { 156 case BLKIF_OP_READ: 157 break; 158 case BLKIF_OP_FLUSH_DISKCACHE: 159 request->presync = 1; 160 if (!request->req.nr_segments) { 161 return 0; 162 } 163 /* fall through */ 164 case BLKIF_OP_WRITE: 165 break; 166 case BLKIF_OP_DISCARD: 167 return 0; 168 default: 169 error_report("error: unknown operation (%d)", request->req.operation); 170 goto err; 171 }; 172 173 if (request->req.operation != BLKIF_OP_READ && 174 !blk_is_writable(dataplane->blk)) { 175 error_report("error: write req for ro device"); 176 goto err; 177 } 178 179 request->start = request->req.sector_number * dataplane->sector_size; 180 for (i = 0; i < request->req.nr_segments; i++) { 181 if (i == BLKIF_MAX_SEGMENTS_PER_REQUEST) { 182 error_report("error: nr_segments too big"); 183 goto err; 184 } 185 if (request->req.seg[i].first_sect > request->req.seg[i].last_sect) { 186 error_report("error: first > last sector"); 187 goto err; 188 } 189 if (request->req.seg[i].last_sect * dataplane->sector_size >= 190 XEN_PAGE_SIZE) { 191 error_report("error: page crossing"); 192 goto err; 193 } 194 195 len = (request->req.seg[i].last_sect - 196 request->req.seg[i].first_sect + 1) * dataplane->sector_size; 197 request->size += len; 198 } 199 if (request->start + request->size > blk_getlength(dataplane->blk)) { 200 error_report("error: access beyond end of file"); 201 goto err; 202 } 203 return 0; 204 205 err: 206 request->status = BLKIF_RSP_ERROR; 207 return -1; 208 } 209 210 static int xen_block_copy_request(XenBlockRequest *request) 211 { 212 XenBlockDataPlane *dataplane = request->dataplane; 213 XenDevice *xendev = dataplane->xendev; 214 XenDeviceGrantCopySegment segs[BLKIF_MAX_SEGMENTS_PER_REQUEST]; 215 int i, count; 216 bool to_domain = (request->req.operation == BLKIF_OP_READ); 217 void *virt = request->buf; 218 Error *local_err = NULL; 219 220 if (request->req.nr_segments == 0) { 221 return 0; 222 } 223 224 count = request->req.nr_segments; 225 226 for (i = 0; i < count; i++) { 227 if (to_domain) { 228 segs[i].dest.foreign.ref = request->req.seg[i].gref; 229 segs[i].dest.foreign.offset = request->req.seg[i].first_sect * 230 dataplane->sector_size; 231 segs[i].source.virt = virt; 232 } else { 233 segs[i].source.foreign.ref = request->req.seg[i].gref; 234 segs[i].source.foreign.offset = request->req.seg[i].first_sect * 235 dataplane->sector_size; 236 segs[i].dest.virt = virt; 237 } 238 segs[i].len = (request->req.seg[i].last_sect - 239 request->req.seg[i].first_sect + 1) * 240 dataplane->sector_size; 241 virt += segs[i].len; 242 } 243 244 xen_device_copy_grant_refs(xendev, to_domain, segs, count, &local_err); 245 246 if (local_err) { 247 error_reportf_err(local_err, "failed to copy data: "); 248 249 request->aio_errors++; 250 return -1; 251 } 252 253 return 0; 254 } 255 256 static int xen_block_do_aio(XenBlockRequest *request); 257 258 static void xen_block_complete_aio(void *opaque, int ret) 259 { 260 XenBlockRequest *request = opaque; 261 XenBlockDataPlane *dataplane = request->dataplane; 262 263 if (ret != 0) { 264 error_report("%s I/O error", 265 request->req.operation == BLKIF_OP_READ ? 266 "read" : "write"); 267 request->aio_errors++; 268 } 269 270 request->aio_inflight--; 271 if (request->presync) { 272 request->presync = 0; 273 xen_block_do_aio(request); 274 return; 275 } 276 if (request->aio_inflight > 0) { 277 return; 278 } 279 280 switch (request->req.operation) { 281 case BLKIF_OP_READ: 282 /* in case of failure request->aio_errors is increased */ 283 if (ret == 0) { 284 xen_block_copy_request(request); 285 } 286 break; 287 case BLKIF_OP_WRITE: 288 case BLKIF_OP_FLUSH_DISKCACHE: 289 default: 290 break; 291 } 292 293 request->status = request->aio_errors ? BLKIF_RSP_ERROR : BLKIF_RSP_OKAY; 294 295 switch (request->req.operation) { 296 case BLKIF_OP_WRITE: 297 case BLKIF_OP_FLUSH_DISKCACHE: 298 if (!request->req.nr_segments) { 299 break; 300 } 301 /* fall through */ 302 case BLKIF_OP_READ: 303 if (request->status == BLKIF_RSP_OKAY) { 304 block_acct_done(blk_get_stats(dataplane->blk), &request->acct); 305 } else { 306 block_acct_failed(blk_get_stats(dataplane->blk), &request->acct); 307 } 308 break; 309 case BLKIF_OP_DISCARD: 310 default: 311 break; 312 } 313 314 xen_block_complete_request(request); 315 316 if (dataplane->more_work) { 317 qemu_bh_schedule(dataplane->bh); 318 } 319 } 320 321 static bool xen_block_split_discard(XenBlockRequest *request, 322 blkif_sector_t sector_number, 323 uint64_t nr_sectors) 324 { 325 XenBlockDataPlane *dataplane = request->dataplane; 326 int64_t byte_offset; 327 int byte_chunk; 328 uint64_t byte_remaining; 329 uint64_t sec_start = sector_number; 330 uint64_t sec_count = nr_sectors; 331 332 /* Wrap around, or overflowing byte limit? */ 333 if (sec_start + sec_count < sec_count || 334 sec_start + sec_count > INT64_MAX / dataplane->sector_size) { 335 return false; 336 } 337 338 byte_offset = sec_start * dataplane->sector_size; 339 byte_remaining = sec_count * dataplane->sector_size; 340 341 do { 342 byte_chunk = byte_remaining > BDRV_REQUEST_MAX_BYTES ? 343 BDRV_REQUEST_MAX_BYTES : byte_remaining; 344 request->aio_inflight++; 345 blk_aio_pdiscard(dataplane->blk, byte_offset, byte_chunk, 346 xen_block_complete_aio, request); 347 byte_remaining -= byte_chunk; 348 byte_offset += byte_chunk; 349 } while (byte_remaining > 0); 350 351 return true; 352 } 353 354 static int xen_block_do_aio(XenBlockRequest *request) 355 { 356 XenBlockDataPlane *dataplane = request->dataplane; 357 358 if (request->req.nr_segments && 359 (request->req.operation == BLKIF_OP_WRITE || 360 request->req.operation == BLKIF_OP_FLUSH_DISKCACHE) && 361 xen_block_copy_request(request)) { 362 goto err; 363 } 364 365 request->aio_inflight++; 366 if (request->presync) { 367 blk_aio_flush(request->dataplane->blk, xen_block_complete_aio, 368 request); 369 return 0; 370 } 371 372 switch (request->req.operation) { 373 case BLKIF_OP_READ: 374 qemu_iovec_add(&request->v, request->buf, request->size); 375 block_acct_start(blk_get_stats(dataplane->blk), &request->acct, 376 request->v.size, BLOCK_ACCT_READ); 377 request->aio_inflight++; 378 blk_aio_preadv(dataplane->blk, request->start, &request->v, 0, 379 xen_block_complete_aio, request); 380 break; 381 case BLKIF_OP_WRITE: 382 case BLKIF_OP_FLUSH_DISKCACHE: 383 if (!request->req.nr_segments) { 384 break; 385 } 386 387 qemu_iovec_add(&request->v, request->buf, request->size); 388 block_acct_start(blk_get_stats(dataplane->blk), &request->acct, 389 request->v.size, 390 request->req.operation == BLKIF_OP_WRITE ? 391 BLOCK_ACCT_WRITE : BLOCK_ACCT_FLUSH); 392 request->aio_inflight++; 393 blk_aio_pwritev(dataplane->blk, request->start, &request->v, 0, 394 xen_block_complete_aio, request); 395 break; 396 case BLKIF_OP_DISCARD: 397 { 398 struct blkif_request_discard *req = (void *)&request->req; 399 if (!xen_block_split_discard(request, req->sector_number, 400 req->nr_sectors)) { 401 goto err; 402 } 403 break; 404 } 405 default: 406 /* unknown operation (shouldn't happen -- parse catches this) */ 407 goto err; 408 } 409 410 xen_block_complete_aio(request, 0); 411 412 return 0; 413 414 err: 415 request->status = BLKIF_RSP_ERROR; 416 xen_block_complete_request(request); 417 return -1; 418 } 419 420 static int xen_block_send_response(XenBlockRequest *request) 421 { 422 XenBlockDataPlane *dataplane = request->dataplane; 423 int send_notify = 0; 424 int have_requests = 0; 425 blkif_response_t *resp; 426 427 /* Place on the response ring for the relevant domain. */ 428 switch (dataplane->protocol) { 429 case BLKIF_PROTOCOL_NATIVE: 430 resp = (blkif_response_t *)RING_GET_RESPONSE( 431 &dataplane->rings.native, 432 dataplane->rings.native.rsp_prod_pvt); 433 break; 434 case BLKIF_PROTOCOL_X86_32: 435 resp = (blkif_response_t *)RING_GET_RESPONSE( 436 &dataplane->rings.x86_32_part, 437 dataplane->rings.x86_32_part.rsp_prod_pvt); 438 break; 439 case BLKIF_PROTOCOL_X86_64: 440 resp = (blkif_response_t *)RING_GET_RESPONSE( 441 &dataplane->rings.x86_64_part, 442 dataplane->rings.x86_64_part.rsp_prod_pvt); 443 break; 444 default: 445 return 0; 446 } 447 448 resp->id = request->req.id; 449 resp->operation = request->req.operation; 450 resp->status = request->status; 451 452 dataplane->rings.common.rsp_prod_pvt++; 453 454 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&dataplane->rings.common, 455 send_notify); 456 if (dataplane->rings.common.rsp_prod_pvt == 457 dataplane->rings.common.req_cons) { 458 /* 459 * Tail check for pending requests. Allows frontend to avoid 460 * notifications if requests are already in flight (lower 461 * overheads and promotes batching). 462 */ 463 RING_FINAL_CHECK_FOR_REQUESTS(&dataplane->rings.common, 464 have_requests); 465 } else if (RING_HAS_UNCONSUMED_REQUESTS(&dataplane->rings.common)) { 466 have_requests = 1; 467 } 468 469 if (have_requests) { 470 dataplane->more_work++; 471 } 472 return send_notify; 473 } 474 475 static int xen_block_get_request(XenBlockDataPlane *dataplane, 476 XenBlockRequest *request, RING_IDX rc) 477 { 478 switch (dataplane->protocol) { 479 case BLKIF_PROTOCOL_NATIVE: { 480 blkif_request_t *req = 481 RING_GET_REQUEST(&dataplane->rings.native, rc); 482 483 memcpy(&request->req, req, sizeof(request->req)); 484 break; 485 } 486 case BLKIF_PROTOCOL_X86_32: { 487 blkif_x86_32_request_t *req = 488 RING_GET_REQUEST(&dataplane->rings.x86_32_part, rc); 489 490 blkif_get_x86_32_req(&request->req, req); 491 break; 492 } 493 case BLKIF_PROTOCOL_X86_64: { 494 blkif_x86_64_request_t *req = 495 RING_GET_REQUEST(&dataplane->rings.x86_64_part, rc); 496 497 blkif_get_x86_64_req(&request->req, req); 498 break; 499 } 500 } 501 /* Prevent the compiler from accessing the on-ring fields instead. */ 502 barrier(); 503 return 0; 504 } 505 506 /* 507 * Threshold of in-flight requests above which we will start using 508 * defer_call_begin()/defer_call_end() to batch requests. 509 */ 510 #define IO_PLUG_THRESHOLD 1 511 512 static bool xen_block_handle_requests(XenBlockDataPlane *dataplane) 513 { 514 RING_IDX rc, rp; 515 XenBlockRequest *request; 516 int inflight_atstart = dataplane->requests_inflight; 517 int batched = 0; 518 bool done_something = false; 519 520 dataplane->more_work = 0; 521 522 rc = dataplane->rings.common.req_cons; 523 rp = dataplane->rings.common.sring->req_prod; 524 xen_rmb(); /* Ensure we see queued requests up to 'rp'. */ 525 526 /* 527 * If there was more than IO_PLUG_THRESHOLD requests in flight 528 * when we got here, this is an indication that there the bottleneck 529 * is below us, so it's worth beginning to batch up I/O requests 530 * rather than submitting them immediately. The maximum number 531 * of requests we're willing to batch is the number already in 532 * flight, so it can grow up to max_requests when the bottleneck 533 * is below us. 534 */ 535 if (inflight_atstart > IO_PLUG_THRESHOLD) { 536 defer_call_begin(); 537 } 538 while (rc != rp) { 539 /* pull request from ring */ 540 if (RING_REQUEST_CONS_OVERFLOW(&dataplane->rings.common, rc)) { 541 break; 542 } 543 request = xen_block_start_request(dataplane); 544 if (request == NULL) { 545 dataplane->more_work++; 546 break; 547 } 548 xen_block_get_request(dataplane, request, rc); 549 dataplane->rings.common.req_cons = ++rc; 550 done_something = true; 551 552 /* parse them */ 553 if (xen_block_parse_request(request) != 0) { 554 switch (request->req.operation) { 555 case BLKIF_OP_READ: 556 block_acct_invalid(blk_get_stats(dataplane->blk), 557 BLOCK_ACCT_READ); 558 break; 559 case BLKIF_OP_WRITE: 560 block_acct_invalid(blk_get_stats(dataplane->blk), 561 BLOCK_ACCT_WRITE); 562 break; 563 case BLKIF_OP_FLUSH_DISKCACHE: 564 block_acct_invalid(blk_get_stats(dataplane->blk), 565 BLOCK_ACCT_FLUSH); 566 default: 567 break; 568 }; 569 570 xen_block_complete_request(request); 571 continue; 572 } 573 574 if (inflight_atstart > IO_PLUG_THRESHOLD && 575 batched >= inflight_atstart) { 576 defer_call_end(); 577 } 578 xen_block_do_aio(request); 579 if (inflight_atstart > IO_PLUG_THRESHOLD) { 580 if (batched >= inflight_atstart) { 581 defer_call_begin(); 582 batched = 0; 583 } else { 584 batched++; 585 } 586 } 587 } 588 if (inflight_atstart > IO_PLUG_THRESHOLD) { 589 defer_call_end(); 590 } 591 592 return done_something; 593 } 594 595 static void xen_block_dataplane_bh(void *opaque) 596 { 597 XenBlockDataPlane *dataplane = opaque; 598 599 xen_block_handle_requests(dataplane); 600 } 601 602 static bool xen_block_dataplane_event(void *opaque) 603 { 604 XenBlockDataPlane *dataplane = opaque; 605 606 return xen_block_handle_requests(dataplane); 607 } 608 609 XenBlockDataPlane *xen_block_dataplane_create(XenDevice *xendev, 610 BlockBackend *blk, 611 unsigned int sector_size, 612 IOThread *iothread) 613 { 614 XenBlockDataPlane *dataplane = g_new0(XenBlockDataPlane, 1); 615 616 dataplane->xendev = xendev; 617 dataplane->blk = blk; 618 dataplane->sector_size = sector_size; 619 620 QLIST_INIT(&dataplane->inflight); 621 QLIST_INIT(&dataplane->freelist); 622 623 if (iothread) { 624 dataplane->iothread = iothread; 625 object_ref(OBJECT(dataplane->iothread)); 626 dataplane->ctx = iothread_get_aio_context(dataplane->iothread); 627 } else { 628 dataplane->ctx = qemu_get_aio_context(); 629 } 630 dataplane->bh = aio_bh_new_guarded(dataplane->ctx, xen_block_dataplane_bh, 631 dataplane, 632 &DEVICE(xendev)->mem_reentrancy_guard); 633 634 return dataplane; 635 } 636 637 void xen_block_dataplane_destroy(XenBlockDataPlane *dataplane) 638 { 639 XenBlockRequest *request; 640 641 if (!dataplane) { 642 return; 643 } 644 645 while (!QLIST_EMPTY(&dataplane->freelist)) { 646 request = QLIST_FIRST(&dataplane->freelist); 647 QLIST_REMOVE(request, list); 648 qemu_iovec_destroy(&request->v); 649 qemu_vfree(request->buf); 650 g_free(request); 651 } 652 653 qemu_bh_delete(dataplane->bh); 654 if (dataplane->iothread) { 655 object_unref(OBJECT(dataplane->iothread)); 656 } 657 658 g_free(dataplane); 659 } 660 661 void xen_block_dataplane_detach(XenBlockDataPlane *dataplane) 662 { 663 if (!dataplane || !dataplane->event_channel) { 664 return; 665 } 666 667 /* Only reason for failure is a NULL channel */ 668 xen_device_set_event_channel_context(dataplane->xendev, 669 dataplane->event_channel, 670 NULL, &error_abort); 671 } 672 673 void xen_block_dataplane_attach(XenBlockDataPlane *dataplane) 674 { 675 if (!dataplane || !dataplane->event_channel) { 676 return; 677 } 678 679 /* Only reason for failure is a NULL channel */ 680 xen_device_set_event_channel_context(dataplane->xendev, 681 dataplane->event_channel, 682 dataplane->ctx, &error_abort); 683 } 684 685 void xen_block_dataplane_stop(XenBlockDataPlane *dataplane) 686 { 687 XenDevice *xendev; 688 689 if (!dataplane) { 690 return; 691 } 692 693 xendev = dataplane->xendev; 694 695 if (!blk_in_drain(dataplane->blk)) { 696 xen_block_dataplane_detach(dataplane); 697 } 698 699 /* Xen doesn't have multiple users for nodes, so this can't fail */ 700 blk_set_aio_context(dataplane->blk, qemu_get_aio_context(), &error_abort); 701 702 /* 703 * Now that the context has been moved onto the main thread, cancel 704 * further processing. 705 */ 706 qemu_bh_cancel(dataplane->bh); 707 708 if (dataplane->event_channel) { 709 Error *local_err = NULL; 710 711 xen_device_unbind_event_channel(xendev, dataplane->event_channel, 712 &local_err); 713 dataplane->event_channel = NULL; 714 715 if (local_err) { 716 error_report_err(local_err); 717 } 718 } 719 720 if (dataplane->sring) { 721 Error *local_err = NULL; 722 723 xen_device_unmap_grant_refs(xendev, dataplane->sring, 724 dataplane->ring_ref, 725 dataplane->nr_ring_ref, &local_err); 726 dataplane->sring = NULL; 727 728 if (local_err) { 729 error_report_err(local_err); 730 } 731 } 732 733 g_free(dataplane->ring_ref); 734 dataplane->ring_ref = NULL; 735 } 736 737 void xen_block_dataplane_start(XenBlockDataPlane *dataplane, 738 const unsigned int ring_ref[], 739 unsigned int nr_ring_ref, 740 unsigned int event_channel, 741 unsigned int protocol, 742 Error **errp) 743 { 744 ERRP_GUARD(); 745 XenDevice *xendev = dataplane->xendev; 746 unsigned int ring_size; 747 unsigned int i; 748 749 dataplane->nr_ring_ref = nr_ring_ref; 750 dataplane->ring_ref = g_new(unsigned int, nr_ring_ref); 751 752 for (i = 0; i < nr_ring_ref; i++) { 753 dataplane->ring_ref[i] = ring_ref[i]; 754 } 755 756 dataplane->protocol = protocol; 757 758 ring_size = XEN_PAGE_SIZE * dataplane->nr_ring_ref; 759 switch (dataplane->protocol) { 760 case BLKIF_PROTOCOL_NATIVE: 761 { 762 dataplane->max_requests = __CONST_RING_SIZE(blkif, ring_size); 763 break; 764 } 765 case BLKIF_PROTOCOL_X86_32: 766 { 767 dataplane->max_requests = __CONST_RING_SIZE(blkif_x86_32, ring_size); 768 break; 769 } 770 case BLKIF_PROTOCOL_X86_64: 771 { 772 dataplane->max_requests = __CONST_RING_SIZE(blkif_x86_64, ring_size); 773 break; 774 } 775 default: 776 error_setg(errp, "unknown protocol %u", dataplane->protocol); 777 return; 778 } 779 780 xen_device_set_max_grant_refs(xendev, dataplane->nr_ring_ref, 781 errp); 782 if (*errp) { 783 goto stop; 784 } 785 786 dataplane->sring = xen_device_map_grant_refs(xendev, 787 dataplane->ring_ref, 788 dataplane->nr_ring_ref, 789 PROT_READ | PROT_WRITE, 790 errp); 791 if (*errp) { 792 goto stop; 793 } 794 795 switch (dataplane->protocol) { 796 case BLKIF_PROTOCOL_NATIVE: 797 { 798 blkif_sring_t *sring_native = dataplane->sring; 799 800 BACK_RING_INIT(&dataplane->rings.native, sring_native, ring_size); 801 break; 802 } 803 case BLKIF_PROTOCOL_X86_32: 804 { 805 blkif_x86_32_sring_t *sring_x86_32 = dataplane->sring; 806 807 BACK_RING_INIT(&dataplane->rings.x86_32_part, sring_x86_32, 808 ring_size); 809 break; 810 } 811 case BLKIF_PROTOCOL_X86_64: 812 { 813 blkif_x86_64_sring_t *sring_x86_64 = dataplane->sring; 814 815 BACK_RING_INIT(&dataplane->rings.x86_64_part, sring_x86_64, 816 ring_size); 817 break; 818 } 819 } 820 821 dataplane->event_channel = 822 xen_device_bind_event_channel(xendev, event_channel, 823 xen_block_dataplane_event, dataplane, 824 errp); 825 if (*errp) { 826 goto stop; 827 } 828 829 /* If other users keep the BlockBackend in the iothread, that's ok */ 830 blk_set_aio_context(dataplane->blk, dataplane->ctx, NULL); 831 832 if (!blk_in_drain(dataplane->blk)) { 833 xen_block_dataplane_attach(dataplane); 834 } 835 836 return; 837 838 stop: 839 xen_block_dataplane_stop(dataplane); 840 } 841