1 /* 2 * Copyright (c) 2018 Citrix Systems Inc. 3 * (c) Gerd Hoffmann <kraxel@redhat.com> 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License as published by 7 * the Free Software Foundation; under version 2 of the License. 8 * 9 * This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License along 15 * with this program; if not, see <http://www.gnu.org/licenses/>. 16 * 17 * Contributions after 2012-01-13 are licensed under the terms of the 18 * GNU GPL, version 2 or (at your option) any later version. 19 */ 20 21 #include "qemu/osdep.h" 22 #include "qemu/defer-call.h" 23 #include "qemu/error-report.h" 24 #include "qemu/main-loop.h" 25 #include "qemu/memalign.h" 26 #include "qapi/error.h" 27 #include "hw/xen/xen.h" 28 #include "hw/block/xen_blkif.h" 29 #include "hw/xen/interface/io/ring.h" 30 #include "sysemu/block-backend.h" 31 #include "sysemu/iothread.h" 32 #include "xen-block.h" 33 34 typedef struct XenBlockRequest { 35 blkif_request_t req; 36 int16_t status; 37 off_t start; 38 QEMUIOVector v; 39 void *buf; 40 size_t size; 41 int presync; 42 int aio_inflight; 43 int aio_errors; 44 XenBlockDataPlane *dataplane; 45 QLIST_ENTRY(XenBlockRequest) list; 46 BlockAcctCookie acct; 47 } XenBlockRequest; 48 49 struct XenBlockDataPlane { 50 XenDevice *xendev; 51 XenEventChannel *event_channel; 52 unsigned int *ring_ref; 53 unsigned int nr_ring_ref; 54 void *sring; 55 int protocol; 56 blkif_back_rings_t rings; 57 int more_work; 58 QLIST_HEAD(inflight_head, XenBlockRequest) inflight; 59 QLIST_HEAD(freelist_head, XenBlockRequest) freelist; 60 int requests_total; 61 int requests_inflight; 62 unsigned int max_requests; 63 BlockBackend *blk; 64 unsigned int sector_size; 65 QEMUBH *bh; 66 IOThread *iothread; 67 AioContext *ctx; 68 }; 69 70 static int xen_block_send_response(XenBlockRequest *request); 71 72 static void reset_request(XenBlockRequest *request) 73 { 74 memset(&request->req, 0, sizeof(request->req)); 75 request->status = 0; 76 request->start = 0; 77 request->size = 0; 78 request->presync = 0; 79 80 request->aio_inflight = 0; 81 request->aio_errors = 0; 82 83 request->dataplane = NULL; 84 memset(&request->list, 0, sizeof(request->list)); 85 memset(&request->acct, 0, sizeof(request->acct)); 86 87 qemu_iovec_reset(&request->v); 88 } 89 90 static XenBlockRequest *xen_block_start_request(XenBlockDataPlane *dataplane) 91 { 92 XenBlockRequest *request = NULL; 93 94 if (QLIST_EMPTY(&dataplane->freelist)) { 95 if (dataplane->requests_total >= dataplane->max_requests) { 96 goto out; 97 } 98 /* allocate new struct */ 99 request = g_malloc0(sizeof(*request)); 100 request->dataplane = dataplane; 101 /* 102 * We cannot need more pages per requests than this, and since we 103 * re-use requests, allocate the memory once here. It will be freed 104 * xen_block_dataplane_destroy() when the request list is freed. 105 */ 106 request->buf = qemu_memalign(XEN_PAGE_SIZE, 107 BLKIF_MAX_SEGMENTS_PER_REQUEST * 108 XEN_PAGE_SIZE); 109 dataplane->requests_total++; 110 qemu_iovec_init(&request->v, 1); 111 } else { 112 /* get one from freelist */ 113 request = QLIST_FIRST(&dataplane->freelist); 114 QLIST_REMOVE(request, list); 115 } 116 QLIST_INSERT_HEAD(&dataplane->inflight, request, list); 117 dataplane->requests_inflight++; 118 119 out: 120 return request; 121 } 122 123 static void xen_block_complete_request(XenBlockRequest *request) 124 { 125 XenBlockDataPlane *dataplane = request->dataplane; 126 127 if (xen_block_send_response(request)) { 128 Error *local_err = NULL; 129 130 xen_device_notify_event_channel(dataplane->xendev, 131 dataplane->event_channel, 132 &local_err); 133 if (local_err) { 134 error_report_err(local_err); 135 } 136 } 137 138 QLIST_REMOVE(request, list); 139 dataplane->requests_inflight--; 140 reset_request(request); 141 request->dataplane = dataplane; 142 QLIST_INSERT_HEAD(&dataplane->freelist, request, list); 143 } 144 145 /* 146 * translate request into iovec + start offset 147 * do sanity checks along the way 148 */ 149 static int xen_block_parse_request(XenBlockRequest *request) 150 { 151 XenBlockDataPlane *dataplane = request->dataplane; 152 size_t len; 153 int i; 154 155 switch (request->req.operation) { 156 case BLKIF_OP_READ: 157 break; 158 case BLKIF_OP_FLUSH_DISKCACHE: 159 request->presync = 1; 160 if (!request->req.nr_segments) { 161 return 0; 162 } 163 /* fall through */ 164 case BLKIF_OP_WRITE: 165 break; 166 case BLKIF_OP_DISCARD: 167 return 0; 168 default: 169 error_report("error: unknown operation (%d)", request->req.operation); 170 goto err; 171 }; 172 173 if (request->req.operation != BLKIF_OP_READ && 174 !blk_is_writable(dataplane->blk)) { 175 error_report("error: write req for ro device"); 176 goto err; 177 } 178 179 request->start = request->req.sector_number * dataplane->sector_size; 180 for (i = 0; i < request->req.nr_segments; i++) { 181 if (i == BLKIF_MAX_SEGMENTS_PER_REQUEST) { 182 error_report("error: nr_segments too big"); 183 goto err; 184 } 185 if (request->req.seg[i].first_sect > request->req.seg[i].last_sect) { 186 error_report("error: first > last sector"); 187 goto err; 188 } 189 if (request->req.seg[i].last_sect * dataplane->sector_size >= 190 XEN_PAGE_SIZE) { 191 error_report("error: page crossing"); 192 goto err; 193 } 194 195 len = (request->req.seg[i].last_sect - 196 request->req.seg[i].first_sect + 1) * dataplane->sector_size; 197 request->size += len; 198 } 199 if (request->start + request->size > blk_getlength(dataplane->blk)) { 200 error_report("error: access beyond end of file"); 201 goto err; 202 } 203 return 0; 204 205 err: 206 request->status = BLKIF_RSP_ERROR; 207 return -1; 208 } 209 210 static int xen_block_copy_request(XenBlockRequest *request) 211 { 212 XenBlockDataPlane *dataplane = request->dataplane; 213 XenDevice *xendev = dataplane->xendev; 214 XenDeviceGrantCopySegment segs[BLKIF_MAX_SEGMENTS_PER_REQUEST]; 215 int i, count; 216 bool to_domain = (request->req.operation == BLKIF_OP_READ); 217 void *virt = request->buf; 218 Error *local_err = NULL; 219 220 if (request->req.nr_segments == 0) { 221 return 0; 222 } 223 224 count = request->req.nr_segments; 225 226 for (i = 0; i < count; i++) { 227 if (to_domain) { 228 segs[i].dest.foreign.ref = request->req.seg[i].gref; 229 segs[i].dest.foreign.offset = request->req.seg[i].first_sect * 230 dataplane->sector_size; 231 segs[i].source.virt = virt; 232 } else { 233 segs[i].source.foreign.ref = request->req.seg[i].gref; 234 segs[i].source.foreign.offset = request->req.seg[i].first_sect * 235 dataplane->sector_size; 236 segs[i].dest.virt = virt; 237 } 238 segs[i].len = (request->req.seg[i].last_sect - 239 request->req.seg[i].first_sect + 1) * 240 dataplane->sector_size; 241 virt += segs[i].len; 242 } 243 244 xen_device_copy_grant_refs(xendev, to_domain, segs, count, &local_err); 245 246 if (local_err) { 247 error_reportf_err(local_err, "failed to copy data: "); 248 249 request->aio_errors++; 250 return -1; 251 } 252 253 return 0; 254 } 255 256 static int xen_block_do_aio(XenBlockRequest *request); 257 258 static void xen_block_complete_aio(void *opaque, int ret) 259 { 260 XenBlockRequest *request = opaque; 261 XenBlockDataPlane *dataplane = request->dataplane; 262 263 aio_context_acquire(dataplane->ctx); 264 265 if (ret != 0) { 266 error_report("%s I/O error", 267 request->req.operation == BLKIF_OP_READ ? 268 "read" : "write"); 269 request->aio_errors++; 270 } 271 272 request->aio_inflight--; 273 if (request->presync) { 274 request->presync = 0; 275 xen_block_do_aio(request); 276 goto done; 277 } 278 if (request->aio_inflight > 0) { 279 goto done; 280 } 281 282 switch (request->req.operation) { 283 case BLKIF_OP_READ: 284 /* in case of failure request->aio_errors is increased */ 285 if (ret == 0) { 286 xen_block_copy_request(request); 287 } 288 break; 289 case BLKIF_OP_WRITE: 290 case BLKIF_OP_FLUSH_DISKCACHE: 291 default: 292 break; 293 } 294 295 request->status = request->aio_errors ? BLKIF_RSP_ERROR : BLKIF_RSP_OKAY; 296 297 switch (request->req.operation) { 298 case BLKIF_OP_WRITE: 299 case BLKIF_OP_FLUSH_DISKCACHE: 300 if (!request->req.nr_segments) { 301 break; 302 } 303 /* fall through */ 304 case BLKIF_OP_READ: 305 if (request->status == BLKIF_RSP_OKAY) { 306 block_acct_done(blk_get_stats(dataplane->blk), &request->acct); 307 } else { 308 block_acct_failed(blk_get_stats(dataplane->blk), &request->acct); 309 } 310 break; 311 case BLKIF_OP_DISCARD: 312 default: 313 break; 314 } 315 316 xen_block_complete_request(request); 317 318 if (dataplane->more_work) { 319 qemu_bh_schedule(dataplane->bh); 320 } 321 322 done: 323 aio_context_release(dataplane->ctx); 324 } 325 326 static bool xen_block_split_discard(XenBlockRequest *request, 327 blkif_sector_t sector_number, 328 uint64_t nr_sectors) 329 { 330 XenBlockDataPlane *dataplane = request->dataplane; 331 int64_t byte_offset; 332 int byte_chunk; 333 uint64_t byte_remaining; 334 uint64_t sec_start = sector_number; 335 uint64_t sec_count = nr_sectors; 336 337 /* Wrap around, or overflowing byte limit? */ 338 if (sec_start + sec_count < sec_count || 339 sec_start + sec_count > INT64_MAX / dataplane->sector_size) { 340 return false; 341 } 342 343 byte_offset = sec_start * dataplane->sector_size; 344 byte_remaining = sec_count * dataplane->sector_size; 345 346 do { 347 byte_chunk = byte_remaining > BDRV_REQUEST_MAX_BYTES ? 348 BDRV_REQUEST_MAX_BYTES : byte_remaining; 349 request->aio_inflight++; 350 blk_aio_pdiscard(dataplane->blk, byte_offset, byte_chunk, 351 xen_block_complete_aio, request); 352 byte_remaining -= byte_chunk; 353 byte_offset += byte_chunk; 354 } while (byte_remaining > 0); 355 356 return true; 357 } 358 359 static int xen_block_do_aio(XenBlockRequest *request) 360 { 361 XenBlockDataPlane *dataplane = request->dataplane; 362 363 if (request->req.nr_segments && 364 (request->req.operation == BLKIF_OP_WRITE || 365 request->req.operation == BLKIF_OP_FLUSH_DISKCACHE) && 366 xen_block_copy_request(request)) { 367 goto err; 368 } 369 370 request->aio_inflight++; 371 if (request->presync) { 372 blk_aio_flush(request->dataplane->blk, xen_block_complete_aio, 373 request); 374 return 0; 375 } 376 377 switch (request->req.operation) { 378 case BLKIF_OP_READ: 379 qemu_iovec_add(&request->v, request->buf, request->size); 380 block_acct_start(blk_get_stats(dataplane->blk), &request->acct, 381 request->v.size, BLOCK_ACCT_READ); 382 request->aio_inflight++; 383 blk_aio_preadv(dataplane->blk, request->start, &request->v, 0, 384 xen_block_complete_aio, request); 385 break; 386 case BLKIF_OP_WRITE: 387 case BLKIF_OP_FLUSH_DISKCACHE: 388 if (!request->req.nr_segments) { 389 break; 390 } 391 392 qemu_iovec_add(&request->v, request->buf, request->size); 393 block_acct_start(blk_get_stats(dataplane->blk), &request->acct, 394 request->v.size, 395 request->req.operation == BLKIF_OP_WRITE ? 396 BLOCK_ACCT_WRITE : BLOCK_ACCT_FLUSH); 397 request->aio_inflight++; 398 blk_aio_pwritev(dataplane->blk, request->start, &request->v, 0, 399 xen_block_complete_aio, request); 400 break; 401 case BLKIF_OP_DISCARD: 402 { 403 struct blkif_request_discard *req = (void *)&request->req; 404 if (!xen_block_split_discard(request, req->sector_number, 405 req->nr_sectors)) { 406 goto err; 407 } 408 break; 409 } 410 default: 411 /* unknown operation (shouldn't happen -- parse catches this) */ 412 goto err; 413 } 414 415 xen_block_complete_aio(request, 0); 416 417 return 0; 418 419 err: 420 request->status = BLKIF_RSP_ERROR; 421 xen_block_complete_request(request); 422 return -1; 423 } 424 425 static int xen_block_send_response(XenBlockRequest *request) 426 { 427 XenBlockDataPlane *dataplane = request->dataplane; 428 int send_notify = 0; 429 int have_requests = 0; 430 blkif_response_t *resp; 431 432 /* Place on the response ring for the relevant domain. */ 433 switch (dataplane->protocol) { 434 case BLKIF_PROTOCOL_NATIVE: 435 resp = (blkif_response_t *)RING_GET_RESPONSE( 436 &dataplane->rings.native, 437 dataplane->rings.native.rsp_prod_pvt); 438 break; 439 case BLKIF_PROTOCOL_X86_32: 440 resp = (blkif_response_t *)RING_GET_RESPONSE( 441 &dataplane->rings.x86_32_part, 442 dataplane->rings.x86_32_part.rsp_prod_pvt); 443 break; 444 case BLKIF_PROTOCOL_X86_64: 445 resp = (blkif_response_t *)RING_GET_RESPONSE( 446 &dataplane->rings.x86_64_part, 447 dataplane->rings.x86_64_part.rsp_prod_pvt); 448 break; 449 default: 450 return 0; 451 } 452 453 resp->id = request->req.id; 454 resp->operation = request->req.operation; 455 resp->status = request->status; 456 457 dataplane->rings.common.rsp_prod_pvt++; 458 459 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&dataplane->rings.common, 460 send_notify); 461 if (dataplane->rings.common.rsp_prod_pvt == 462 dataplane->rings.common.req_cons) { 463 /* 464 * Tail check for pending requests. Allows frontend to avoid 465 * notifications if requests are already in flight (lower 466 * overheads and promotes batching). 467 */ 468 RING_FINAL_CHECK_FOR_REQUESTS(&dataplane->rings.common, 469 have_requests); 470 } else if (RING_HAS_UNCONSUMED_REQUESTS(&dataplane->rings.common)) { 471 have_requests = 1; 472 } 473 474 if (have_requests) { 475 dataplane->more_work++; 476 } 477 return send_notify; 478 } 479 480 static int xen_block_get_request(XenBlockDataPlane *dataplane, 481 XenBlockRequest *request, RING_IDX rc) 482 { 483 switch (dataplane->protocol) { 484 case BLKIF_PROTOCOL_NATIVE: { 485 blkif_request_t *req = 486 RING_GET_REQUEST(&dataplane->rings.native, rc); 487 488 memcpy(&request->req, req, sizeof(request->req)); 489 break; 490 } 491 case BLKIF_PROTOCOL_X86_32: { 492 blkif_x86_32_request_t *req = 493 RING_GET_REQUEST(&dataplane->rings.x86_32_part, rc); 494 495 blkif_get_x86_32_req(&request->req, req); 496 break; 497 } 498 case BLKIF_PROTOCOL_X86_64: { 499 blkif_x86_64_request_t *req = 500 RING_GET_REQUEST(&dataplane->rings.x86_64_part, rc); 501 502 blkif_get_x86_64_req(&request->req, req); 503 break; 504 } 505 } 506 /* Prevent the compiler from accessing the on-ring fields instead. */ 507 barrier(); 508 return 0; 509 } 510 511 /* 512 * Threshold of in-flight requests above which we will start using 513 * defer_call_begin()/defer_call_end() to batch requests. 514 */ 515 #define IO_PLUG_THRESHOLD 1 516 517 static bool xen_block_handle_requests(XenBlockDataPlane *dataplane) 518 { 519 RING_IDX rc, rp; 520 XenBlockRequest *request; 521 int inflight_atstart = dataplane->requests_inflight; 522 int batched = 0; 523 bool done_something = false; 524 525 dataplane->more_work = 0; 526 527 rc = dataplane->rings.common.req_cons; 528 rp = dataplane->rings.common.sring->req_prod; 529 xen_rmb(); /* Ensure we see queued requests up to 'rp'. */ 530 531 /* 532 * If there was more than IO_PLUG_THRESHOLD requests in flight 533 * when we got here, this is an indication that there the bottleneck 534 * is below us, so it's worth beginning to batch up I/O requests 535 * rather than submitting them immediately. The maximum number 536 * of requests we're willing to batch is the number already in 537 * flight, so it can grow up to max_requests when the bottleneck 538 * is below us. 539 */ 540 if (inflight_atstart > IO_PLUG_THRESHOLD) { 541 defer_call_begin(); 542 } 543 while (rc != rp) { 544 /* pull request from ring */ 545 if (RING_REQUEST_CONS_OVERFLOW(&dataplane->rings.common, rc)) { 546 break; 547 } 548 request = xen_block_start_request(dataplane); 549 if (request == NULL) { 550 dataplane->more_work++; 551 break; 552 } 553 xen_block_get_request(dataplane, request, rc); 554 dataplane->rings.common.req_cons = ++rc; 555 done_something = true; 556 557 /* parse them */ 558 if (xen_block_parse_request(request) != 0) { 559 switch (request->req.operation) { 560 case BLKIF_OP_READ: 561 block_acct_invalid(blk_get_stats(dataplane->blk), 562 BLOCK_ACCT_READ); 563 break; 564 case BLKIF_OP_WRITE: 565 block_acct_invalid(blk_get_stats(dataplane->blk), 566 BLOCK_ACCT_WRITE); 567 break; 568 case BLKIF_OP_FLUSH_DISKCACHE: 569 block_acct_invalid(blk_get_stats(dataplane->blk), 570 BLOCK_ACCT_FLUSH); 571 default: 572 break; 573 }; 574 575 xen_block_complete_request(request); 576 continue; 577 } 578 579 if (inflight_atstart > IO_PLUG_THRESHOLD && 580 batched >= inflight_atstart) { 581 defer_call_end(); 582 } 583 xen_block_do_aio(request); 584 if (inflight_atstart > IO_PLUG_THRESHOLD) { 585 if (batched >= inflight_atstart) { 586 defer_call_begin(); 587 batched = 0; 588 } else { 589 batched++; 590 } 591 } 592 } 593 if (inflight_atstart > IO_PLUG_THRESHOLD) { 594 defer_call_end(); 595 } 596 597 return done_something; 598 } 599 600 static void xen_block_dataplane_bh(void *opaque) 601 { 602 XenBlockDataPlane *dataplane = opaque; 603 604 aio_context_acquire(dataplane->ctx); 605 xen_block_handle_requests(dataplane); 606 aio_context_release(dataplane->ctx); 607 } 608 609 static bool xen_block_dataplane_event(void *opaque) 610 { 611 XenBlockDataPlane *dataplane = opaque; 612 613 return xen_block_handle_requests(dataplane); 614 } 615 616 XenBlockDataPlane *xen_block_dataplane_create(XenDevice *xendev, 617 BlockBackend *blk, 618 unsigned int sector_size, 619 IOThread *iothread) 620 { 621 XenBlockDataPlane *dataplane = g_new0(XenBlockDataPlane, 1); 622 623 dataplane->xendev = xendev; 624 dataplane->blk = blk; 625 dataplane->sector_size = sector_size; 626 627 QLIST_INIT(&dataplane->inflight); 628 QLIST_INIT(&dataplane->freelist); 629 630 if (iothread) { 631 dataplane->iothread = iothread; 632 object_ref(OBJECT(dataplane->iothread)); 633 dataplane->ctx = iothread_get_aio_context(dataplane->iothread); 634 } else { 635 dataplane->ctx = qemu_get_aio_context(); 636 } 637 dataplane->bh = aio_bh_new_guarded(dataplane->ctx, xen_block_dataplane_bh, 638 dataplane, 639 &DEVICE(xendev)->mem_reentrancy_guard); 640 641 return dataplane; 642 } 643 644 void xen_block_dataplane_destroy(XenBlockDataPlane *dataplane) 645 { 646 XenBlockRequest *request; 647 648 if (!dataplane) { 649 return; 650 } 651 652 while (!QLIST_EMPTY(&dataplane->freelist)) { 653 request = QLIST_FIRST(&dataplane->freelist); 654 QLIST_REMOVE(request, list); 655 qemu_iovec_destroy(&request->v); 656 qemu_vfree(request->buf); 657 g_free(request); 658 } 659 660 qemu_bh_delete(dataplane->bh); 661 if (dataplane->iothread) { 662 object_unref(OBJECT(dataplane->iothread)); 663 } 664 665 g_free(dataplane); 666 } 667 668 void xen_block_dataplane_detach(XenBlockDataPlane *dataplane) 669 { 670 if (!dataplane || !dataplane->event_channel) { 671 return; 672 } 673 674 /* Only reason for failure is a NULL channel */ 675 xen_device_set_event_channel_context(dataplane->xendev, 676 dataplane->event_channel, 677 NULL, &error_abort); 678 } 679 680 void xen_block_dataplane_attach(XenBlockDataPlane *dataplane) 681 { 682 if (!dataplane || !dataplane->event_channel) { 683 return; 684 } 685 686 /* Only reason for failure is a NULL channel */ 687 xen_device_set_event_channel_context(dataplane->xendev, 688 dataplane->event_channel, 689 dataplane->ctx, &error_abort); 690 } 691 692 void xen_block_dataplane_stop(XenBlockDataPlane *dataplane) 693 { 694 XenDevice *xendev; 695 696 if (!dataplane) { 697 return; 698 } 699 700 xendev = dataplane->xendev; 701 702 if (!blk_in_drain(dataplane->blk)) { 703 xen_block_dataplane_detach(dataplane); 704 } 705 706 aio_context_acquire(dataplane->ctx); 707 /* Xen doesn't have multiple users for nodes, so this can't fail */ 708 blk_set_aio_context(dataplane->blk, qemu_get_aio_context(), &error_abort); 709 aio_context_release(dataplane->ctx); 710 711 /* 712 * Now that the context has been moved onto the main thread, cancel 713 * further processing. 714 */ 715 qemu_bh_cancel(dataplane->bh); 716 717 if (dataplane->event_channel) { 718 Error *local_err = NULL; 719 720 xen_device_unbind_event_channel(xendev, dataplane->event_channel, 721 &local_err); 722 dataplane->event_channel = NULL; 723 724 if (local_err) { 725 error_report_err(local_err); 726 } 727 } 728 729 if (dataplane->sring) { 730 Error *local_err = NULL; 731 732 xen_device_unmap_grant_refs(xendev, dataplane->sring, 733 dataplane->ring_ref, 734 dataplane->nr_ring_ref, &local_err); 735 dataplane->sring = NULL; 736 737 if (local_err) { 738 error_report_err(local_err); 739 } 740 } 741 742 g_free(dataplane->ring_ref); 743 dataplane->ring_ref = NULL; 744 } 745 746 void xen_block_dataplane_start(XenBlockDataPlane *dataplane, 747 const unsigned int ring_ref[], 748 unsigned int nr_ring_ref, 749 unsigned int event_channel, 750 unsigned int protocol, 751 Error **errp) 752 { 753 ERRP_GUARD(); 754 XenDevice *xendev = dataplane->xendev; 755 AioContext *old_context; 756 unsigned int ring_size; 757 unsigned int i; 758 759 dataplane->nr_ring_ref = nr_ring_ref; 760 dataplane->ring_ref = g_new(unsigned int, nr_ring_ref); 761 762 for (i = 0; i < nr_ring_ref; i++) { 763 dataplane->ring_ref[i] = ring_ref[i]; 764 } 765 766 dataplane->protocol = protocol; 767 768 ring_size = XEN_PAGE_SIZE * dataplane->nr_ring_ref; 769 switch (dataplane->protocol) { 770 case BLKIF_PROTOCOL_NATIVE: 771 { 772 dataplane->max_requests = __CONST_RING_SIZE(blkif, ring_size); 773 break; 774 } 775 case BLKIF_PROTOCOL_X86_32: 776 { 777 dataplane->max_requests = __CONST_RING_SIZE(blkif_x86_32, ring_size); 778 break; 779 } 780 case BLKIF_PROTOCOL_X86_64: 781 { 782 dataplane->max_requests = __CONST_RING_SIZE(blkif_x86_64, ring_size); 783 break; 784 } 785 default: 786 error_setg(errp, "unknown protocol %u", dataplane->protocol); 787 return; 788 } 789 790 xen_device_set_max_grant_refs(xendev, dataplane->nr_ring_ref, 791 errp); 792 if (*errp) { 793 goto stop; 794 } 795 796 dataplane->sring = xen_device_map_grant_refs(xendev, 797 dataplane->ring_ref, 798 dataplane->nr_ring_ref, 799 PROT_READ | PROT_WRITE, 800 errp); 801 if (*errp) { 802 goto stop; 803 } 804 805 switch (dataplane->protocol) { 806 case BLKIF_PROTOCOL_NATIVE: 807 { 808 blkif_sring_t *sring_native = dataplane->sring; 809 810 BACK_RING_INIT(&dataplane->rings.native, sring_native, ring_size); 811 break; 812 } 813 case BLKIF_PROTOCOL_X86_32: 814 { 815 blkif_x86_32_sring_t *sring_x86_32 = dataplane->sring; 816 817 BACK_RING_INIT(&dataplane->rings.x86_32_part, sring_x86_32, 818 ring_size); 819 break; 820 } 821 case BLKIF_PROTOCOL_X86_64: 822 { 823 blkif_x86_64_sring_t *sring_x86_64 = dataplane->sring; 824 825 BACK_RING_INIT(&dataplane->rings.x86_64_part, sring_x86_64, 826 ring_size); 827 break; 828 } 829 } 830 831 dataplane->event_channel = 832 xen_device_bind_event_channel(xendev, event_channel, 833 xen_block_dataplane_event, dataplane, 834 errp); 835 if (*errp) { 836 goto stop; 837 } 838 839 old_context = blk_get_aio_context(dataplane->blk); 840 aio_context_acquire(old_context); 841 /* If other users keep the BlockBackend in the iothread, that's ok */ 842 blk_set_aio_context(dataplane->blk, dataplane->ctx, NULL); 843 aio_context_release(old_context); 844 845 if (!blk_in_drain(dataplane->blk)) { 846 xen_block_dataplane_attach(dataplane); 847 } 848 849 return; 850 851 stop: 852 xen_block_dataplane_stop(dataplane); 853 } 854