1 /* 2 * QEMU Block driver for iSCSI images 3 * 4 * Copyright (c) 2010-2011 Ronnie Sahlberg <ronniesahlberg@gmail.com> 5 * Copyright (c) 2012-2015 Peter Lieven <pl@kamp.de> 6 * 7 * Permission is hereby granted, free of charge, to any person obtaining a copy 8 * of this software and associated documentation files (the "Software"), to deal 9 * in the Software without restriction, including without limitation the rights 10 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 11 * copies of the Software, and to permit persons to whom the Software is 12 * furnished to do so, subject to the following conditions: 13 * 14 * The above copyright notice and this permission notice shall be included in 15 * all copies or substantial portions of the Software. 16 * 17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 23 * THE SOFTWARE. 24 */ 25 26 #include "config-host.h" 27 28 #include <poll.h> 29 #include <math.h> 30 #include <arpa/inet.h> 31 #include "qemu-common.h" 32 #include "qemu/config-file.h" 33 #include "qemu/error-report.h" 34 #include "qemu/bitops.h" 35 #include "qemu/bitmap.h" 36 #include "block/block_int.h" 37 #include "block/scsi.h" 38 #include "qemu/iov.h" 39 #include "sysemu/sysemu.h" 40 #include "qmp-commands.h" 41 #include "qapi/qmp/qstring.h" 42 43 #include <iscsi/iscsi.h> 44 #include <iscsi/scsi-lowlevel.h> 45 46 #ifdef __linux__ 47 #include <scsi/sg.h> 48 #include <block/scsi.h> 49 #endif 50 51 typedef struct IscsiLun { 52 struct iscsi_context *iscsi; 53 AioContext *aio_context; 54 int lun; 55 enum scsi_inquiry_peripheral_device_type type; 56 int block_size; 57 uint64_t num_blocks; 58 int events; 59 QEMUTimer *nop_timer; 60 QEMUTimer *event_timer; 61 struct scsi_inquiry_logical_block_provisioning lbp; 62 struct scsi_inquiry_block_limits bl; 63 unsigned char *zeroblock; 64 unsigned long *allocationmap; 65 int cluster_sectors; 66 bool use_16_for_rw; 67 bool write_protected; 68 bool lbpme; 69 bool lbprz; 70 bool dpofua; 71 bool has_write_same; 72 bool force_next_flush; 73 } IscsiLun; 74 75 typedef struct IscsiTask { 76 int status; 77 int complete; 78 int retries; 79 int do_retry; 80 struct scsi_task *task; 81 Coroutine *co; 82 QEMUBH *bh; 83 IscsiLun *iscsilun; 84 QEMUTimer retry_timer; 85 bool force_next_flush; 86 } IscsiTask; 87 88 typedef struct IscsiAIOCB { 89 BlockAIOCB common; 90 QEMUIOVector *qiov; 91 QEMUBH *bh; 92 IscsiLun *iscsilun; 93 struct scsi_task *task; 94 uint8_t *buf; 95 int status; 96 int64_t sector_num; 97 int nb_sectors; 98 #ifdef __linux__ 99 sg_io_hdr_t *ioh; 100 #endif 101 } IscsiAIOCB; 102 103 #define EVENT_INTERVAL 250 104 #define NOP_INTERVAL 5000 105 #define MAX_NOP_FAILURES 3 106 #define ISCSI_CMD_RETRIES ARRAY_SIZE(iscsi_retry_times) 107 static const unsigned iscsi_retry_times[] = {8, 32, 128, 512, 2048, 8192, 32768}; 108 109 /* this threshold is a trade-off knob to choose between 110 * the potential additional overhead of an extra GET_LBA_STATUS request 111 * vs. unnecessarily reading a lot of zero sectors over the wire. 112 * If a read request is greater or equal than ISCSI_CHECKALLOC_THRES 113 * sectors we check the allocation status of the area covered by the 114 * request first if the allocationmap indicates that the area might be 115 * unallocated. */ 116 #define ISCSI_CHECKALLOC_THRES 64 117 118 static void 119 iscsi_bh_cb(void *p) 120 { 121 IscsiAIOCB *acb = p; 122 123 qemu_bh_delete(acb->bh); 124 125 g_free(acb->buf); 126 acb->buf = NULL; 127 128 acb->common.cb(acb->common.opaque, acb->status); 129 130 if (acb->task != NULL) { 131 scsi_free_scsi_task(acb->task); 132 acb->task = NULL; 133 } 134 135 qemu_aio_unref(acb); 136 } 137 138 static void 139 iscsi_schedule_bh(IscsiAIOCB *acb) 140 { 141 if (acb->bh) { 142 return; 143 } 144 acb->bh = aio_bh_new(acb->iscsilun->aio_context, iscsi_bh_cb, acb); 145 qemu_bh_schedule(acb->bh); 146 } 147 148 static void iscsi_co_generic_bh_cb(void *opaque) 149 { 150 struct IscsiTask *iTask = opaque; 151 iTask->complete = 1; 152 qemu_bh_delete(iTask->bh); 153 qemu_coroutine_enter(iTask->co, NULL); 154 } 155 156 static void iscsi_retry_timer_expired(void *opaque) 157 { 158 struct IscsiTask *iTask = opaque; 159 iTask->complete = 1; 160 if (iTask->co) { 161 qemu_coroutine_enter(iTask->co, NULL); 162 } 163 } 164 165 static inline unsigned exp_random(double mean) 166 { 167 return -mean * log((double)rand() / RAND_MAX); 168 } 169 170 static void 171 iscsi_co_generic_cb(struct iscsi_context *iscsi, int status, 172 void *command_data, void *opaque) 173 { 174 struct IscsiTask *iTask = opaque; 175 struct scsi_task *task = command_data; 176 177 iTask->status = status; 178 iTask->do_retry = 0; 179 iTask->task = task; 180 181 if (status != SCSI_STATUS_GOOD) { 182 if (iTask->retries++ < ISCSI_CMD_RETRIES) { 183 if (status == SCSI_STATUS_CHECK_CONDITION 184 && task->sense.key == SCSI_SENSE_UNIT_ATTENTION) { 185 error_report("iSCSI CheckCondition: %s", 186 iscsi_get_error(iscsi)); 187 iTask->do_retry = 1; 188 goto out; 189 } 190 /* status 0x28 is SCSI_TASK_SET_FULL. It was first introduced 191 * in libiscsi 1.10.0. Hardcode this value here to avoid 192 * the need to bump the libiscsi requirement to 1.10.0 */ 193 if (status == SCSI_STATUS_BUSY || status == 0x28) { 194 unsigned retry_time = 195 exp_random(iscsi_retry_times[iTask->retries - 1]); 196 error_report("iSCSI Busy/TaskSetFull (retry #%u in %u ms): %s", 197 iTask->retries, retry_time, 198 iscsi_get_error(iscsi)); 199 aio_timer_init(iTask->iscsilun->aio_context, 200 &iTask->retry_timer, QEMU_CLOCK_REALTIME, 201 SCALE_MS, iscsi_retry_timer_expired, iTask); 202 timer_mod(&iTask->retry_timer, 203 qemu_clock_get_ms(QEMU_CLOCK_REALTIME) + retry_time); 204 iTask->do_retry = 1; 205 return; 206 } 207 } 208 error_report("iSCSI Failure: %s", iscsi_get_error(iscsi)); 209 } else { 210 iTask->iscsilun->force_next_flush |= iTask->force_next_flush; 211 } 212 213 out: 214 if (iTask->co) { 215 iTask->bh = aio_bh_new(iTask->iscsilun->aio_context, 216 iscsi_co_generic_bh_cb, iTask); 217 qemu_bh_schedule(iTask->bh); 218 } else { 219 iTask->complete = 1; 220 } 221 } 222 223 static void iscsi_co_init_iscsitask(IscsiLun *iscsilun, struct IscsiTask *iTask) 224 { 225 *iTask = (struct IscsiTask) { 226 .co = qemu_coroutine_self(), 227 .iscsilun = iscsilun, 228 }; 229 } 230 231 static void 232 iscsi_abort_task_cb(struct iscsi_context *iscsi, int status, void *command_data, 233 void *private_data) 234 { 235 IscsiAIOCB *acb = private_data; 236 237 acb->status = -ECANCELED; 238 iscsi_schedule_bh(acb); 239 } 240 241 static void 242 iscsi_aio_cancel(BlockAIOCB *blockacb) 243 { 244 IscsiAIOCB *acb = (IscsiAIOCB *)blockacb; 245 IscsiLun *iscsilun = acb->iscsilun; 246 247 if (acb->status != -EINPROGRESS) { 248 return; 249 } 250 251 /* send a task mgmt call to the target to cancel the task on the target */ 252 iscsi_task_mgmt_abort_task_async(iscsilun->iscsi, acb->task, 253 iscsi_abort_task_cb, acb); 254 255 } 256 257 static const AIOCBInfo iscsi_aiocb_info = { 258 .aiocb_size = sizeof(IscsiAIOCB), 259 .cancel_async = iscsi_aio_cancel, 260 }; 261 262 263 static void iscsi_process_read(void *arg); 264 static void iscsi_process_write(void *arg); 265 266 static void 267 iscsi_set_events(IscsiLun *iscsilun) 268 { 269 struct iscsi_context *iscsi = iscsilun->iscsi; 270 int ev = iscsi_which_events(iscsi); 271 272 if (ev != iscsilun->events) { 273 aio_set_fd_handler(iscsilun->aio_context, 274 iscsi_get_fd(iscsi), 275 (ev & POLLIN) ? iscsi_process_read : NULL, 276 (ev & POLLOUT) ? iscsi_process_write : NULL, 277 iscsilun); 278 iscsilun->events = ev; 279 } 280 281 /* newer versions of libiscsi may return zero events. In this 282 * case start a timer to ensure we are able to return to service 283 * once this situation changes. */ 284 if (!ev) { 285 timer_mod(iscsilun->event_timer, 286 qemu_clock_get_ms(QEMU_CLOCK_REALTIME) + EVENT_INTERVAL); 287 } 288 } 289 290 static void iscsi_timed_set_events(void *opaque) 291 { 292 IscsiLun *iscsilun = opaque; 293 iscsi_set_events(iscsilun); 294 } 295 296 static void 297 iscsi_process_read(void *arg) 298 { 299 IscsiLun *iscsilun = arg; 300 struct iscsi_context *iscsi = iscsilun->iscsi; 301 302 iscsi_service(iscsi, POLLIN); 303 iscsi_set_events(iscsilun); 304 } 305 306 static void 307 iscsi_process_write(void *arg) 308 { 309 IscsiLun *iscsilun = arg; 310 struct iscsi_context *iscsi = iscsilun->iscsi; 311 312 iscsi_service(iscsi, POLLOUT); 313 iscsi_set_events(iscsilun); 314 } 315 316 static int64_t sector_lun2qemu(int64_t sector, IscsiLun *iscsilun) 317 { 318 return sector * iscsilun->block_size / BDRV_SECTOR_SIZE; 319 } 320 321 static int64_t sector_qemu2lun(int64_t sector, IscsiLun *iscsilun) 322 { 323 return sector * BDRV_SECTOR_SIZE / iscsilun->block_size; 324 } 325 326 static bool is_request_lun_aligned(int64_t sector_num, int nb_sectors, 327 IscsiLun *iscsilun) 328 { 329 if ((sector_num * BDRV_SECTOR_SIZE) % iscsilun->block_size || 330 (nb_sectors * BDRV_SECTOR_SIZE) % iscsilun->block_size) { 331 error_report("iSCSI misaligned request: " 332 "iscsilun->block_size %u, sector_num %" PRIi64 333 ", nb_sectors %d", 334 iscsilun->block_size, sector_num, nb_sectors); 335 return 0; 336 } 337 return 1; 338 } 339 340 static unsigned long *iscsi_allocationmap_init(IscsiLun *iscsilun) 341 { 342 return bitmap_try_new(DIV_ROUND_UP(sector_lun2qemu(iscsilun->num_blocks, 343 iscsilun), 344 iscsilun->cluster_sectors)); 345 } 346 347 static void iscsi_allocationmap_set(IscsiLun *iscsilun, int64_t sector_num, 348 int nb_sectors) 349 { 350 if (iscsilun->allocationmap == NULL) { 351 return; 352 } 353 bitmap_set(iscsilun->allocationmap, 354 sector_num / iscsilun->cluster_sectors, 355 DIV_ROUND_UP(nb_sectors, iscsilun->cluster_sectors)); 356 } 357 358 static void iscsi_allocationmap_clear(IscsiLun *iscsilun, int64_t sector_num, 359 int nb_sectors) 360 { 361 int64_t cluster_num, nb_clusters; 362 if (iscsilun->allocationmap == NULL) { 363 return; 364 } 365 cluster_num = DIV_ROUND_UP(sector_num, iscsilun->cluster_sectors); 366 nb_clusters = (sector_num + nb_sectors) / iscsilun->cluster_sectors 367 - cluster_num; 368 if (nb_clusters > 0) { 369 bitmap_clear(iscsilun->allocationmap, cluster_num, nb_clusters); 370 } 371 } 372 373 static int coroutine_fn iscsi_co_writev(BlockDriverState *bs, 374 int64_t sector_num, int nb_sectors, 375 QEMUIOVector *iov) 376 { 377 IscsiLun *iscsilun = bs->opaque; 378 struct IscsiTask iTask; 379 uint64_t lba; 380 uint32_t num_sectors; 381 int fua; 382 383 if (!is_request_lun_aligned(sector_num, nb_sectors, iscsilun)) { 384 return -EINVAL; 385 } 386 387 if (bs->bl.max_transfer_length && nb_sectors > bs->bl.max_transfer_length) { 388 error_report("iSCSI Error: Write of %d sectors exceeds max_xfer_len " 389 "of %d sectors", nb_sectors, bs->bl.max_transfer_length); 390 return -EINVAL; 391 } 392 393 lba = sector_qemu2lun(sector_num, iscsilun); 394 num_sectors = sector_qemu2lun(nb_sectors, iscsilun); 395 iscsi_co_init_iscsitask(iscsilun, &iTask); 396 retry: 397 fua = iscsilun->dpofua && !bs->enable_write_cache; 398 iTask.force_next_flush = !fua; 399 if (iscsilun->use_16_for_rw) { 400 iTask.task = iscsi_write16_task(iscsilun->iscsi, iscsilun->lun, lba, 401 NULL, num_sectors * iscsilun->block_size, 402 iscsilun->block_size, 0, 0, fua, 0, 0, 403 iscsi_co_generic_cb, &iTask); 404 } else { 405 iTask.task = iscsi_write10_task(iscsilun->iscsi, iscsilun->lun, lba, 406 NULL, num_sectors * iscsilun->block_size, 407 iscsilun->block_size, 0, 0, fua, 0, 0, 408 iscsi_co_generic_cb, &iTask); 409 } 410 if (iTask.task == NULL) { 411 return -ENOMEM; 412 } 413 scsi_task_set_iov_out(iTask.task, (struct scsi_iovec *) iov->iov, 414 iov->niov); 415 while (!iTask.complete) { 416 iscsi_set_events(iscsilun); 417 qemu_coroutine_yield(); 418 } 419 420 if (iTask.task != NULL) { 421 scsi_free_scsi_task(iTask.task); 422 iTask.task = NULL; 423 } 424 425 if (iTask.do_retry) { 426 iTask.complete = 0; 427 goto retry; 428 } 429 430 if (iTask.status != SCSI_STATUS_GOOD) { 431 return -EIO; 432 } 433 434 iscsi_allocationmap_set(iscsilun, sector_num, nb_sectors); 435 436 return 0; 437 } 438 439 440 static bool iscsi_allocationmap_is_allocated(IscsiLun *iscsilun, 441 int64_t sector_num, int nb_sectors) 442 { 443 unsigned long size; 444 if (iscsilun->allocationmap == NULL) { 445 return true; 446 } 447 size = DIV_ROUND_UP(sector_num + nb_sectors, iscsilun->cluster_sectors); 448 return !(find_next_bit(iscsilun->allocationmap, size, 449 sector_num / iscsilun->cluster_sectors) == size); 450 } 451 452 static int64_t coroutine_fn iscsi_co_get_block_status(BlockDriverState *bs, 453 int64_t sector_num, 454 int nb_sectors, int *pnum) 455 { 456 IscsiLun *iscsilun = bs->opaque; 457 struct scsi_get_lba_status *lbas = NULL; 458 struct scsi_lba_status_descriptor *lbasd = NULL; 459 struct IscsiTask iTask; 460 int64_t ret; 461 462 iscsi_co_init_iscsitask(iscsilun, &iTask); 463 464 if (!is_request_lun_aligned(sector_num, nb_sectors, iscsilun)) { 465 ret = -EINVAL; 466 goto out; 467 } 468 469 /* default to all sectors allocated */ 470 ret = BDRV_BLOCK_DATA; 471 ret |= (sector_num << BDRV_SECTOR_BITS) | BDRV_BLOCK_OFFSET_VALID; 472 *pnum = nb_sectors; 473 474 /* LUN does not support logical block provisioning */ 475 if (!iscsilun->lbpme) { 476 goto out; 477 } 478 479 retry: 480 if (iscsi_get_lba_status_task(iscsilun->iscsi, iscsilun->lun, 481 sector_qemu2lun(sector_num, iscsilun), 482 8 + 16, iscsi_co_generic_cb, 483 &iTask) == NULL) { 484 ret = -ENOMEM; 485 goto out; 486 } 487 488 while (!iTask.complete) { 489 iscsi_set_events(iscsilun); 490 qemu_coroutine_yield(); 491 } 492 493 if (iTask.do_retry) { 494 if (iTask.task != NULL) { 495 scsi_free_scsi_task(iTask.task); 496 iTask.task = NULL; 497 } 498 iTask.complete = 0; 499 goto retry; 500 } 501 502 if (iTask.status != SCSI_STATUS_GOOD) { 503 /* in case the get_lba_status_callout fails (i.e. 504 * because the device is busy or the cmd is not 505 * supported) we pretend all blocks are allocated 506 * for backwards compatibility */ 507 goto out; 508 } 509 510 lbas = scsi_datain_unmarshall(iTask.task); 511 if (lbas == NULL) { 512 ret = -EIO; 513 goto out; 514 } 515 516 lbasd = &lbas->descriptors[0]; 517 518 if (sector_qemu2lun(sector_num, iscsilun) != lbasd->lba) { 519 ret = -EIO; 520 goto out; 521 } 522 523 *pnum = sector_lun2qemu(lbasd->num_blocks, iscsilun); 524 525 if (lbasd->provisioning == SCSI_PROVISIONING_TYPE_DEALLOCATED || 526 lbasd->provisioning == SCSI_PROVISIONING_TYPE_ANCHORED) { 527 ret &= ~BDRV_BLOCK_DATA; 528 if (iscsilun->lbprz) { 529 ret |= BDRV_BLOCK_ZERO; 530 } 531 } 532 533 if (ret & BDRV_BLOCK_ZERO) { 534 iscsi_allocationmap_clear(iscsilun, sector_num, *pnum); 535 } else { 536 iscsi_allocationmap_set(iscsilun, sector_num, *pnum); 537 } 538 539 if (*pnum > nb_sectors) { 540 *pnum = nb_sectors; 541 } 542 out: 543 if (iTask.task != NULL) { 544 scsi_free_scsi_task(iTask.task); 545 } 546 return ret; 547 } 548 549 static int coroutine_fn iscsi_co_readv(BlockDriverState *bs, 550 int64_t sector_num, int nb_sectors, 551 QEMUIOVector *iov) 552 { 553 IscsiLun *iscsilun = bs->opaque; 554 struct IscsiTask iTask; 555 uint64_t lba; 556 uint32_t num_sectors; 557 558 if (!is_request_lun_aligned(sector_num, nb_sectors, iscsilun)) { 559 return -EINVAL; 560 } 561 562 if (bs->bl.max_transfer_length && nb_sectors > bs->bl.max_transfer_length) { 563 error_report("iSCSI Error: Read of %d sectors exceeds max_xfer_len " 564 "of %d sectors", nb_sectors, bs->bl.max_transfer_length); 565 return -EINVAL; 566 } 567 568 if (iscsilun->lbprz && nb_sectors >= ISCSI_CHECKALLOC_THRES && 569 !iscsi_allocationmap_is_allocated(iscsilun, sector_num, nb_sectors)) { 570 int64_t ret; 571 int pnum; 572 ret = iscsi_co_get_block_status(bs, sector_num, INT_MAX, &pnum); 573 if (ret < 0) { 574 return ret; 575 } 576 if (ret & BDRV_BLOCK_ZERO && pnum >= nb_sectors) { 577 qemu_iovec_memset(iov, 0, 0x00, iov->size); 578 return 0; 579 } 580 } 581 582 lba = sector_qemu2lun(sector_num, iscsilun); 583 num_sectors = sector_qemu2lun(nb_sectors, iscsilun); 584 585 iscsi_co_init_iscsitask(iscsilun, &iTask); 586 retry: 587 if (iscsilun->use_16_for_rw) { 588 iTask.task = iscsi_read16_task(iscsilun->iscsi, iscsilun->lun, lba, 589 num_sectors * iscsilun->block_size, 590 iscsilun->block_size, 0, 0, 0, 0, 0, 591 iscsi_co_generic_cb, &iTask); 592 } else { 593 iTask.task = iscsi_read10_task(iscsilun->iscsi, iscsilun->lun, lba, 594 num_sectors * iscsilun->block_size, 595 iscsilun->block_size, 596 0, 0, 0, 0, 0, 597 iscsi_co_generic_cb, &iTask); 598 } 599 if (iTask.task == NULL) { 600 return -ENOMEM; 601 } 602 scsi_task_set_iov_in(iTask.task, (struct scsi_iovec *) iov->iov, iov->niov); 603 604 while (!iTask.complete) { 605 iscsi_set_events(iscsilun); 606 qemu_coroutine_yield(); 607 } 608 609 if (iTask.task != NULL) { 610 scsi_free_scsi_task(iTask.task); 611 iTask.task = NULL; 612 } 613 614 if (iTask.do_retry) { 615 iTask.complete = 0; 616 goto retry; 617 } 618 619 if (iTask.status != SCSI_STATUS_GOOD) { 620 return -EIO; 621 } 622 623 return 0; 624 } 625 626 static int coroutine_fn iscsi_co_flush(BlockDriverState *bs) 627 { 628 IscsiLun *iscsilun = bs->opaque; 629 struct IscsiTask iTask; 630 631 if (!iscsilun->force_next_flush) { 632 return 0; 633 } 634 iscsilun->force_next_flush = false; 635 636 iscsi_co_init_iscsitask(iscsilun, &iTask); 637 retry: 638 if (iscsi_synchronizecache10_task(iscsilun->iscsi, iscsilun->lun, 0, 0, 0, 639 0, iscsi_co_generic_cb, &iTask) == NULL) { 640 return -ENOMEM; 641 } 642 643 while (!iTask.complete) { 644 iscsi_set_events(iscsilun); 645 qemu_coroutine_yield(); 646 } 647 648 if (iTask.task != NULL) { 649 scsi_free_scsi_task(iTask.task); 650 iTask.task = NULL; 651 } 652 653 if (iTask.do_retry) { 654 iTask.complete = 0; 655 goto retry; 656 } 657 658 if (iTask.status != SCSI_STATUS_GOOD) { 659 return -EIO; 660 } 661 662 return 0; 663 } 664 665 #ifdef __linux__ 666 static void 667 iscsi_aio_ioctl_cb(struct iscsi_context *iscsi, int status, 668 void *command_data, void *opaque) 669 { 670 IscsiAIOCB *acb = opaque; 671 672 g_free(acb->buf); 673 acb->buf = NULL; 674 675 acb->status = 0; 676 if (status < 0) { 677 error_report("Failed to ioctl(SG_IO) to iSCSI lun. %s", 678 iscsi_get_error(iscsi)); 679 acb->status = -EIO; 680 } 681 682 acb->ioh->driver_status = 0; 683 acb->ioh->host_status = 0; 684 acb->ioh->resid = 0; 685 686 #define SG_ERR_DRIVER_SENSE 0x08 687 688 if (status == SCSI_STATUS_CHECK_CONDITION && acb->task->datain.size >= 2) { 689 int ss; 690 691 acb->ioh->driver_status |= SG_ERR_DRIVER_SENSE; 692 693 acb->ioh->sb_len_wr = acb->task->datain.size - 2; 694 ss = (acb->ioh->mx_sb_len >= acb->ioh->sb_len_wr) ? 695 acb->ioh->mx_sb_len : acb->ioh->sb_len_wr; 696 memcpy(acb->ioh->sbp, &acb->task->datain.data[2], ss); 697 } 698 699 iscsi_schedule_bh(acb); 700 } 701 702 static BlockAIOCB *iscsi_aio_ioctl(BlockDriverState *bs, 703 unsigned long int req, void *buf, 704 BlockCompletionFunc *cb, void *opaque) 705 { 706 IscsiLun *iscsilun = bs->opaque; 707 struct iscsi_context *iscsi = iscsilun->iscsi; 708 struct iscsi_data data; 709 IscsiAIOCB *acb; 710 711 assert(req == SG_IO); 712 713 acb = qemu_aio_get(&iscsi_aiocb_info, bs, cb, opaque); 714 715 acb->iscsilun = iscsilun; 716 acb->bh = NULL; 717 acb->status = -EINPROGRESS; 718 acb->buf = NULL; 719 acb->ioh = buf; 720 721 acb->task = malloc(sizeof(struct scsi_task)); 722 if (acb->task == NULL) { 723 error_report("iSCSI: Failed to allocate task for scsi command. %s", 724 iscsi_get_error(iscsi)); 725 qemu_aio_unref(acb); 726 return NULL; 727 } 728 memset(acb->task, 0, sizeof(struct scsi_task)); 729 730 switch (acb->ioh->dxfer_direction) { 731 case SG_DXFER_TO_DEV: 732 acb->task->xfer_dir = SCSI_XFER_WRITE; 733 break; 734 case SG_DXFER_FROM_DEV: 735 acb->task->xfer_dir = SCSI_XFER_READ; 736 break; 737 default: 738 acb->task->xfer_dir = SCSI_XFER_NONE; 739 break; 740 } 741 742 acb->task->cdb_size = acb->ioh->cmd_len; 743 memcpy(&acb->task->cdb[0], acb->ioh->cmdp, acb->ioh->cmd_len); 744 acb->task->expxferlen = acb->ioh->dxfer_len; 745 746 data.size = 0; 747 if (acb->task->xfer_dir == SCSI_XFER_WRITE) { 748 if (acb->ioh->iovec_count == 0) { 749 data.data = acb->ioh->dxferp; 750 data.size = acb->ioh->dxfer_len; 751 } else { 752 scsi_task_set_iov_out(acb->task, 753 (struct scsi_iovec *) acb->ioh->dxferp, 754 acb->ioh->iovec_count); 755 } 756 } 757 758 if (iscsi_scsi_command_async(iscsi, iscsilun->lun, acb->task, 759 iscsi_aio_ioctl_cb, 760 (data.size > 0) ? &data : NULL, 761 acb) != 0) { 762 scsi_free_scsi_task(acb->task); 763 qemu_aio_unref(acb); 764 return NULL; 765 } 766 767 /* tell libiscsi to read straight into the buffer we got from ioctl */ 768 if (acb->task->xfer_dir == SCSI_XFER_READ) { 769 if (acb->ioh->iovec_count == 0) { 770 scsi_task_add_data_in_buffer(acb->task, 771 acb->ioh->dxfer_len, 772 acb->ioh->dxferp); 773 } else { 774 scsi_task_set_iov_in(acb->task, 775 (struct scsi_iovec *) acb->ioh->dxferp, 776 acb->ioh->iovec_count); 777 } 778 } 779 780 iscsi_set_events(iscsilun); 781 782 return &acb->common; 783 } 784 785 static void ioctl_cb(void *opaque, int status) 786 { 787 int *p_status = opaque; 788 *p_status = status; 789 } 790 791 static int iscsi_ioctl(BlockDriverState *bs, unsigned long int req, void *buf) 792 { 793 IscsiLun *iscsilun = bs->opaque; 794 int status; 795 796 switch (req) { 797 case SG_GET_VERSION_NUM: 798 *(int *)buf = 30000; 799 break; 800 case SG_GET_SCSI_ID: 801 ((struct sg_scsi_id *)buf)->scsi_type = iscsilun->type; 802 break; 803 case SG_IO: 804 status = -EINPROGRESS; 805 iscsi_aio_ioctl(bs, req, buf, ioctl_cb, &status); 806 807 while (status == -EINPROGRESS) { 808 aio_poll(iscsilun->aio_context, true); 809 } 810 811 return 0; 812 default: 813 return -1; 814 } 815 return 0; 816 } 817 #endif 818 819 static int64_t 820 iscsi_getlength(BlockDriverState *bs) 821 { 822 IscsiLun *iscsilun = bs->opaque; 823 int64_t len; 824 825 len = iscsilun->num_blocks; 826 len *= iscsilun->block_size; 827 828 return len; 829 } 830 831 static int 832 coroutine_fn iscsi_co_discard(BlockDriverState *bs, int64_t sector_num, 833 int nb_sectors) 834 { 835 IscsiLun *iscsilun = bs->opaque; 836 struct IscsiTask iTask; 837 struct unmap_list list; 838 839 if (!is_request_lun_aligned(sector_num, nb_sectors, iscsilun)) { 840 return -EINVAL; 841 } 842 843 if (!iscsilun->lbp.lbpu) { 844 /* UNMAP is not supported by the target */ 845 return 0; 846 } 847 848 list.lba = sector_qemu2lun(sector_num, iscsilun); 849 list.num = sector_qemu2lun(nb_sectors, iscsilun); 850 851 iscsi_co_init_iscsitask(iscsilun, &iTask); 852 retry: 853 if (iscsi_unmap_task(iscsilun->iscsi, iscsilun->lun, 0, 0, &list, 1, 854 iscsi_co_generic_cb, &iTask) == NULL) { 855 return -ENOMEM; 856 } 857 858 while (!iTask.complete) { 859 iscsi_set_events(iscsilun); 860 qemu_coroutine_yield(); 861 } 862 863 if (iTask.task != NULL) { 864 scsi_free_scsi_task(iTask.task); 865 iTask.task = NULL; 866 } 867 868 if (iTask.do_retry) { 869 iTask.complete = 0; 870 goto retry; 871 } 872 873 if (iTask.status == SCSI_STATUS_CHECK_CONDITION) { 874 /* the target might fail with a check condition if it 875 is not happy with the alignment of the UNMAP request 876 we silently fail in this case */ 877 return 0; 878 } 879 880 if (iTask.status != SCSI_STATUS_GOOD) { 881 return -EIO; 882 } 883 884 iscsi_allocationmap_clear(iscsilun, sector_num, nb_sectors); 885 886 return 0; 887 } 888 889 static int 890 coroutine_fn iscsi_co_write_zeroes(BlockDriverState *bs, int64_t sector_num, 891 int nb_sectors, BdrvRequestFlags flags) 892 { 893 IscsiLun *iscsilun = bs->opaque; 894 struct IscsiTask iTask; 895 uint64_t lba; 896 uint32_t nb_blocks; 897 bool use_16_for_ws = iscsilun->use_16_for_rw; 898 899 if (!is_request_lun_aligned(sector_num, nb_sectors, iscsilun)) { 900 return -EINVAL; 901 } 902 903 if (flags & BDRV_REQ_MAY_UNMAP) { 904 if (!use_16_for_ws && !iscsilun->lbp.lbpws10) { 905 /* WRITESAME10 with UNMAP is unsupported try WRITESAME16 */ 906 use_16_for_ws = true; 907 } 908 if (use_16_for_ws && !iscsilun->lbp.lbpws) { 909 /* WRITESAME16 with UNMAP is not supported by the target, 910 * fall back and try WRITESAME10/16 without UNMAP */ 911 flags &= ~BDRV_REQ_MAY_UNMAP; 912 use_16_for_ws = iscsilun->use_16_for_rw; 913 } 914 } 915 916 if (!(flags & BDRV_REQ_MAY_UNMAP) && !iscsilun->has_write_same) { 917 /* WRITESAME without UNMAP is not supported by the target */ 918 return -ENOTSUP; 919 } 920 921 lba = sector_qemu2lun(sector_num, iscsilun); 922 nb_blocks = sector_qemu2lun(nb_sectors, iscsilun); 923 924 if (iscsilun->zeroblock == NULL) { 925 iscsilun->zeroblock = g_try_malloc0(iscsilun->block_size); 926 if (iscsilun->zeroblock == NULL) { 927 return -ENOMEM; 928 } 929 } 930 931 iscsi_co_init_iscsitask(iscsilun, &iTask); 932 iTask.force_next_flush = true; 933 retry: 934 if (use_16_for_ws) { 935 iTask.task = iscsi_writesame16_task(iscsilun->iscsi, iscsilun->lun, lba, 936 iscsilun->zeroblock, iscsilun->block_size, 937 nb_blocks, 0, !!(flags & BDRV_REQ_MAY_UNMAP), 938 0, 0, iscsi_co_generic_cb, &iTask); 939 } else { 940 iTask.task = iscsi_writesame10_task(iscsilun->iscsi, iscsilun->lun, lba, 941 iscsilun->zeroblock, iscsilun->block_size, 942 nb_blocks, 0, !!(flags & BDRV_REQ_MAY_UNMAP), 943 0, 0, iscsi_co_generic_cb, &iTask); 944 } 945 if (iTask.task == NULL) { 946 return -ENOMEM; 947 } 948 949 while (!iTask.complete) { 950 iscsi_set_events(iscsilun); 951 qemu_coroutine_yield(); 952 } 953 954 if (iTask.status == SCSI_STATUS_CHECK_CONDITION && 955 iTask.task->sense.key == SCSI_SENSE_ILLEGAL_REQUEST && 956 (iTask.task->sense.ascq == SCSI_SENSE_ASCQ_INVALID_OPERATION_CODE || 957 iTask.task->sense.ascq == SCSI_SENSE_ASCQ_INVALID_FIELD_IN_CDB)) { 958 /* WRITE SAME is not supported by the target */ 959 iscsilun->has_write_same = false; 960 scsi_free_scsi_task(iTask.task); 961 return -ENOTSUP; 962 } 963 964 if (iTask.task != NULL) { 965 scsi_free_scsi_task(iTask.task); 966 iTask.task = NULL; 967 } 968 969 if (iTask.do_retry) { 970 iTask.complete = 0; 971 goto retry; 972 } 973 974 if (iTask.status != SCSI_STATUS_GOOD) { 975 return -EIO; 976 } 977 978 if (flags & BDRV_REQ_MAY_UNMAP) { 979 iscsi_allocationmap_clear(iscsilun, sector_num, nb_sectors); 980 } else { 981 iscsi_allocationmap_set(iscsilun, sector_num, nb_sectors); 982 } 983 984 return 0; 985 } 986 987 static void parse_chap(struct iscsi_context *iscsi, const char *target, 988 Error **errp) 989 { 990 QemuOptsList *list; 991 QemuOpts *opts; 992 const char *user = NULL; 993 const char *password = NULL; 994 995 list = qemu_find_opts("iscsi"); 996 if (!list) { 997 return; 998 } 999 1000 opts = qemu_opts_find(list, target); 1001 if (opts == NULL) { 1002 opts = QTAILQ_FIRST(&list->head); 1003 if (!opts) { 1004 return; 1005 } 1006 } 1007 1008 user = qemu_opt_get(opts, "user"); 1009 if (!user) { 1010 return; 1011 } 1012 1013 password = qemu_opt_get(opts, "password"); 1014 if (!password) { 1015 error_setg(errp, "CHAP username specified but no password was given"); 1016 return; 1017 } 1018 1019 if (iscsi_set_initiator_username_pwd(iscsi, user, password)) { 1020 error_setg(errp, "Failed to set initiator username and password"); 1021 } 1022 } 1023 1024 static void parse_header_digest(struct iscsi_context *iscsi, const char *target, 1025 Error **errp) 1026 { 1027 QemuOptsList *list; 1028 QemuOpts *opts; 1029 const char *digest = NULL; 1030 1031 list = qemu_find_opts("iscsi"); 1032 if (!list) { 1033 return; 1034 } 1035 1036 opts = qemu_opts_find(list, target); 1037 if (opts == NULL) { 1038 opts = QTAILQ_FIRST(&list->head); 1039 if (!opts) { 1040 return; 1041 } 1042 } 1043 1044 digest = qemu_opt_get(opts, "header-digest"); 1045 if (!digest) { 1046 return; 1047 } 1048 1049 if (!strcmp(digest, "CRC32C")) { 1050 iscsi_set_header_digest(iscsi, ISCSI_HEADER_DIGEST_CRC32C); 1051 } else if (!strcmp(digest, "NONE")) { 1052 iscsi_set_header_digest(iscsi, ISCSI_HEADER_DIGEST_NONE); 1053 } else if (!strcmp(digest, "CRC32C-NONE")) { 1054 iscsi_set_header_digest(iscsi, ISCSI_HEADER_DIGEST_CRC32C_NONE); 1055 } else if (!strcmp(digest, "NONE-CRC32C")) { 1056 iscsi_set_header_digest(iscsi, ISCSI_HEADER_DIGEST_NONE_CRC32C); 1057 } else { 1058 error_setg(errp, "Invalid header-digest setting : %s", digest); 1059 } 1060 } 1061 1062 static char *parse_initiator_name(const char *target) 1063 { 1064 QemuOptsList *list; 1065 QemuOpts *opts; 1066 const char *name; 1067 char *iscsi_name; 1068 UuidInfo *uuid_info; 1069 1070 list = qemu_find_opts("iscsi"); 1071 if (list) { 1072 opts = qemu_opts_find(list, target); 1073 if (!opts) { 1074 opts = QTAILQ_FIRST(&list->head); 1075 } 1076 if (opts) { 1077 name = qemu_opt_get(opts, "initiator-name"); 1078 if (name) { 1079 return g_strdup(name); 1080 } 1081 } 1082 } 1083 1084 uuid_info = qmp_query_uuid(NULL); 1085 if (strcmp(uuid_info->UUID, UUID_NONE) == 0) { 1086 name = qemu_get_vm_name(); 1087 } else { 1088 name = uuid_info->UUID; 1089 } 1090 iscsi_name = g_strdup_printf("iqn.2008-11.org.linux-kvm%s%s", 1091 name ? ":" : "", name ? name : ""); 1092 qapi_free_UuidInfo(uuid_info); 1093 return iscsi_name; 1094 } 1095 1096 static void iscsi_nop_timed_event(void *opaque) 1097 { 1098 IscsiLun *iscsilun = opaque; 1099 1100 if (iscsi_get_nops_in_flight(iscsilun->iscsi) > MAX_NOP_FAILURES) { 1101 error_report("iSCSI: NOP timeout. Reconnecting..."); 1102 iscsi_reconnect(iscsilun->iscsi); 1103 } 1104 1105 if (iscsi_nop_out_async(iscsilun->iscsi, NULL, NULL, 0, NULL) != 0) { 1106 error_report("iSCSI: failed to sent NOP-Out. Disabling NOP messages."); 1107 return; 1108 } 1109 1110 timer_mod(iscsilun->nop_timer, qemu_clock_get_ms(QEMU_CLOCK_REALTIME) + NOP_INTERVAL); 1111 iscsi_set_events(iscsilun); 1112 } 1113 1114 static void iscsi_readcapacity_sync(IscsiLun *iscsilun, Error **errp) 1115 { 1116 struct scsi_task *task = NULL; 1117 struct scsi_readcapacity10 *rc10 = NULL; 1118 struct scsi_readcapacity16 *rc16 = NULL; 1119 int retries = ISCSI_CMD_RETRIES; 1120 1121 do { 1122 if (task != NULL) { 1123 scsi_free_scsi_task(task); 1124 task = NULL; 1125 } 1126 1127 switch (iscsilun->type) { 1128 case TYPE_DISK: 1129 task = iscsi_readcapacity16_sync(iscsilun->iscsi, iscsilun->lun); 1130 if (task != NULL && task->status == SCSI_STATUS_GOOD) { 1131 rc16 = scsi_datain_unmarshall(task); 1132 if (rc16 == NULL) { 1133 error_setg(errp, "iSCSI: Failed to unmarshall readcapacity16 data."); 1134 } else { 1135 iscsilun->block_size = rc16->block_length; 1136 iscsilun->num_blocks = rc16->returned_lba + 1; 1137 iscsilun->lbpme = !!rc16->lbpme; 1138 iscsilun->lbprz = !!rc16->lbprz; 1139 iscsilun->use_16_for_rw = (rc16->returned_lba > 0xffffffff); 1140 } 1141 } 1142 break; 1143 case TYPE_ROM: 1144 task = iscsi_readcapacity10_sync(iscsilun->iscsi, iscsilun->lun, 0, 0); 1145 if (task != NULL && task->status == SCSI_STATUS_GOOD) { 1146 rc10 = scsi_datain_unmarshall(task); 1147 if (rc10 == NULL) { 1148 error_setg(errp, "iSCSI: Failed to unmarshall readcapacity10 data."); 1149 } else { 1150 iscsilun->block_size = rc10->block_size; 1151 if (rc10->lba == 0) { 1152 /* blank disk loaded */ 1153 iscsilun->num_blocks = 0; 1154 } else { 1155 iscsilun->num_blocks = rc10->lba + 1; 1156 } 1157 } 1158 } 1159 break; 1160 default: 1161 return; 1162 } 1163 } while (task != NULL && task->status == SCSI_STATUS_CHECK_CONDITION 1164 && task->sense.key == SCSI_SENSE_UNIT_ATTENTION 1165 && retries-- > 0); 1166 1167 if (task == NULL || task->status != SCSI_STATUS_GOOD) { 1168 error_setg(errp, "iSCSI: failed to send readcapacity10 command."); 1169 } 1170 if (task) { 1171 scsi_free_scsi_task(task); 1172 } 1173 } 1174 1175 /* TODO Convert to fine grained options */ 1176 static QemuOptsList runtime_opts = { 1177 .name = "iscsi", 1178 .head = QTAILQ_HEAD_INITIALIZER(runtime_opts.head), 1179 .desc = { 1180 { 1181 .name = "filename", 1182 .type = QEMU_OPT_STRING, 1183 .help = "URL to the iscsi image", 1184 }, 1185 { /* end of list */ } 1186 }, 1187 }; 1188 1189 static struct scsi_task *iscsi_do_inquiry(struct iscsi_context *iscsi, int lun, 1190 int evpd, int pc, void **inq, Error **errp) 1191 { 1192 int full_size; 1193 struct scsi_task *task = NULL; 1194 task = iscsi_inquiry_sync(iscsi, lun, evpd, pc, 64); 1195 if (task == NULL || task->status != SCSI_STATUS_GOOD) { 1196 goto fail; 1197 } 1198 full_size = scsi_datain_getfullsize(task); 1199 if (full_size > task->datain.size) { 1200 scsi_free_scsi_task(task); 1201 1202 /* we need more data for the full list */ 1203 task = iscsi_inquiry_sync(iscsi, lun, evpd, pc, full_size); 1204 if (task == NULL || task->status != SCSI_STATUS_GOOD) { 1205 goto fail; 1206 } 1207 } 1208 1209 *inq = scsi_datain_unmarshall(task); 1210 if (*inq == NULL) { 1211 error_setg(errp, "iSCSI: failed to unmarshall inquiry datain blob"); 1212 goto fail_with_err; 1213 } 1214 1215 return task; 1216 1217 fail: 1218 error_setg(errp, "iSCSI: Inquiry command failed : %s", 1219 iscsi_get_error(iscsi)); 1220 fail_with_err: 1221 if (task != NULL) { 1222 scsi_free_scsi_task(task); 1223 } 1224 return NULL; 1225 } 1226 1227 static void iscsi_detach_aio_context(BlockDriverState *bs) 1228 { 1229 IscsiLun *iscsilun = bs->opaque; 1230 1231 aio_set_fd_handler(iscsilun->aio_context, 1232 iscsi_get_fd(iscsilun->iscsi), 1233 NULL, NULL, NULL); 1234 iscsilun->events = 0; 1235 1236 if (iscsilun->nop_timer) { 1237 timer_del(iscsilun->nop_timer); 1238 timer_free(iscsilun->nop_timer); 1239 iscsilun->nop_timer = NULL; 1240 } 1241 if (iscsilun->event_timer) { 1242 timer_del(iscsilun->event_timer); 1243 timer_free(iscsilun->event_timer); 1244 iscsilun->event_timer = NULL; 1245 } 1246 } 1247 1248 static void iscsi_attach_aio_context(BlockDriverState *bs, 1249 AioContext *new_context) 1250 { 1251 IscsiLun *iscsilun = bs->opaque; 1252 1253 iscsilun->aio_context = new_context; 1254 iscsi_set_events(iscsilun); 1255 1256 /* Set up a timer for sending out iSCSI NOPs */ 1257 iscsilun->nop_timer = aio_timer_new(iscsilun->aio_context, 1258 QEMU_CLOCK_REALTIME, SCALE_MS, 1259 iscsi_nop_timed_event, iscsilun); 1260 timer_mod(iscsilun->nop_timer, 1261 qemu_clock_get_ms(QEMU_CLOCK_REALTIME) + NOP_INTERVAL); 1262 1263 /* Prepare a timer for a delayed call to iscsi_set_events */ 1264 iscsilun->event_timer = aio_timer_new(iscsilun->aio_context, 1265 QEMU_CLOCK_REALTIME, SCALE_MS, 1266 iscsi_timed_set_events, iscsilun); 1267 } 1268 1269 static void iscsi_modesense_sync(IscsiLun *iscsilun) 1270 { 1271 struct scsi_task *task; 1272 struct scsi_mode_sense *ms = NULL; 1273 iscsilun->write_protected = false; 1274 iscsilun->dpofua = false; 1275 1276 task = iscsi_modesense6_sync(iscsilun->iscsi, iscsilun->lun, 1277 1, SCSI_MODESENSE_PC_CURRENT, 1278 0x3F, 0, 255); 1279 if (task == NULL) { 1280 error_report("iSCSI: Failed to send MODE_SENSE(6) command: %s", 1281 iscsi_get_error(iscsilun->iscsi)); 1282 goto out; 1283 } 1284 1285 if (task->status != SCSI_STATUS_GOOD) { 1286 error_report("iSCSI: Failed MODE_SENSE(6), LUN assumed writable"); 1287 goto out; 1288 } 1289 ms = scsi_datain_unmarshall(task); 1290 if (!ms) { 1291 error_report("iSCSI: Failed to unmarshall MODE_SENSE(6) data: %s", 1292 iscsi_get_error(iscsilun->iscsi)); 1293 goto out; 1294 } 1295 iscsilun->write_protected = ms->device_specific_parameter & 0x80; 1296 iscsilun->dpofua = ms->device_specific_parameter & 0x10; 1297 1298 out: 1299 if (task) { 1300 scsi_free_scsi_task(task); 1301 } 1302 } 1303 1304 /* 1305 * We support iscsi url's on the form 1306 * iscsi://[<username>%<password>@]<host>[:<port>]/<targetname>/<lun> 1307 */ 1308 static int iscsi_open(BlockDriverState *bs, QDict *options, int flags, 1309 Error **errp) 1310 { 1311 IscsiLun *iscsilun = bs->opaque; 1312 struct iscsi_context *iscsi = NULL; 1313 struct iscsi_url *iscsi_url = NULL; 1314 struct scsi_task *task = NULL; 1315 struct scsi_inquiry_standard *inq = NULL; 1316 struct scsi_inquiry_supported_pages *inq_vpd; 1317 char *initiator_name = NULL; 1318 QemuOpts *opts; 1319 Error *local_err = NULL; 1320 const char *filename; 1321 int i, ret = 0; 1322 1323 opts = qemu_opts_create(&runtime_opts, NULL, 0, &error_abort); 1324 qemu_opts_absorb_qdict(opts, options, &local_err); 1325 if (local_err) { 1326 error_propagate(errp, local_err); 1327 ret = -EINVAL; 1328 goto out; 1329 } 1330 1331 filename = qemu_opt_get(opts, "filename"); 1332 1333 iscsi_url = iscsi_parse_full_url(iscsi, filename); 1334 if (iscsi_url == NULL) { 1335 error_setg(errp, "Failed to parse URL : %s", filename); 1336 ret = -EINVAL; 1337 goto out; 1338 } 1339 1340 memset(iscsilun, 0, sizeof(IscsiLun)); 1341 1342 initiator_name = parse_initiator_name(iscsi_url->target); 1343 1344 iscsi = iscsi_create_context(initiator_name); 1345 if (iscsi == NULL) { 1346 error_setg(errp, "iSCSI: Failed to create iSCSI context."); 1347 ret = -ENOMEM; 1348 goto out; 1349 } 1350 1351 if (iscsi_set_targetname(iscsi, iscsi_url->target)) { 1352 error_setg(errp, "iSCSI: Failed to set target name."); 1353 ret = -EINVAL; 1354 goto out; 1355 } 1356 1357 if (iscsi_url->user[0] != '\0') { 1358 ret = iscsi_set_initiator_username_pwd(iscsi, iscsi_url->user, 1359 iscsi_url->passwd); 1360 if (ret != 0) { 1361 error_setg(errp, "Failed to set initiator username and password"); 1362 ret = -EINVAL; 1363 goto out; 1364 } 1365 } 1366 1367 /* check if we got CHAP username/password via the options */ 1368 parse_chap(iscsi, iscsi_url->target, &local_err); 1369 if (local_err != NULL) { 1370 error_propagate(errp, local_err); 1371 ret = -EINVAL; 1372 goto out; 1373 } 1374 1375 if (iscsi_set_session_type(iscsi, ISCSI_SESSION_NORMAL) != 0) { 1376 error_setg(errp, "iSCSI: Failed to set session type to normal."); 1377 ret = -EINVAL; 1378 goto out; 1379 } 1380 1381 iscsi_set_header_digest(iscsi, ISCSI_HEADER_DIGEST_NONE_CRC32C); 1382 1383 /* check if we got HEADER_DIGEST via the options */ 1384 parse_header_digest(iscsi, iscsi_url->target, &local_err); 1385 if (local_err != NULL) { 1386 error_propagate(errp, local_err); 1387 ret = -EINVAL; 1388 goto out; 1389 } 1390 1391 if (iscsi_full_connect_sync(iscsi, iscsi_url->portal, iscsi_url->lun) != 0) { 1392 error_setg(errp, "iSCSI: Failed to connect to LUN : %s", 1393 iscsi_get_error(iscsi)); 1394 ret = -EINVAL; 1395 goto out; 1396 } 1397 1398 iscsilun->iscsi = iscsi; 1399 iscsilun->aio_context = bdrv_get_aio_context(bs); 1400 iscsilun->lun = iscsi_url->lun; 1401 iscsilun->has_write_same = true; 1402 1403 task = iscsi_do_inquiry(iscsilun->iscsi, iscsilun->lun, 0, 0, 1404 (void **) &inq, errp); 1405 if (task == NULL) { 1406 ret = -EINVAL; 1407 goto out; 1408 } 1409 iscsilun->type = inq->periperal_device_type; 1410 scsi_free_scsi_task(task); 1411 task = NULL; 1412 1413 iscsi_modesense_sync(iscsilun); 1414 1415 /* Check the write protect flag of the LUN if we want to write */ 1416 if (iscsilun->type == TYPE_DISK && (flags & BDRV_O_RDWR) && 1417 iscsilun->write_protected) { 1418 error_setg(errp, "Cannot open a write protected LUN as read-write"); 1419 ret = -EACCES; 1420 goto out; 1421 } 1422 1423 iscsi_readcapacity_sync(iscsilun, &local_err); 1424 if (local_err != NULL) { 1425 error_propagate(errp, local_err); 1426 ret = -EINVAL; 1427 goto out; 1428 } 1429 bs->total_sectors = sector_lun2qemu(iscsilun->num_blocks, iscsilun); 1430 bs->request_alignment = iscsilun->block_size; 1431 1432 /* We don't have any emulation for devices other than disks and CD-ROMs, so 1433 * this must be sg ioctl compatible. We force it to be sg, otherwise qemu 1434 * will try to read from the device to guess the image format. 1435 */ 1436 if (iscsilun->type != TYPE_DISK && iscsilun->type != TYPE_ROM) { 1437 bs->sg = 1; 1438 } 1439 1440 task = iscsi_do_inquiry(iscsilun->iscsi, iscsilun->lun, 1, 1441 SCSI_INQUIRY_PAGECODE_SUPPORTED_VPD_PAGES, 1442 (void **) &inq_vpd, errp); 1443 if (task == NULL) { 1444 ret = -EINVAL; 1445 goto out; 1446 } 1447 for (i = 0; i < inq_vpd->num_pages; i++) { 1448 struct scsi_task *inq_task; 1449 struct scsi_inquiry_logical_block_provisioning *inq_lbp; 1450 struct scsi_inquiry_block_limits *inq_bl; 1451 switch (inq_vpd->pages[i]) { 1452 case SCSI_INQUIRY_PAGECODE_LOGICAL_BLOCK_PROVISIONING: 1453 inq_task = iscsi_do_inquiry(iscsilun->iscsi, iscsilun->lun, 1, 1454 SCSI_INQUIRY_PAGECODE_LOGICAL_BLOCK_PROVISIONING, 1455 (void **) &inq_lbp, errp); 1456 if (inq_task == NULL) { 1457 ret = -EINVAL; 1458 goto out; 1459 } 1460 memcpy(&iscsilun->lbp, inq_lbp, 1461 sizeof(struct scsi_inquiry_logical_block_provisioning)); 1462 scsi_free_scsi_task(inq_task); 1463 break; 1464 case SCSI_INQUIRY_PAGECODE_BLOCK_LIMITS: 1465 inq_task = iscsi_do_inquiry(iscsilun->iscsi, iscsilun->lun, 1, 1466 SCSI_INQUIRY_PAGECODE_BLOCK_LIMITS, 1467 (void **) &inq_bl, errp); 1468 if (inq_task == NULL) { 1469 ret = -EINVAL; 1470 goto out; 1471 } 1472 memcpy(&iscsilun->bl, inq_bl, 1473 sizeof(struct scsi_inquiry_block_limits)); 1474 scsi_free_scsi_task(inq_task); 1475 break; 1476 default: 1477 break; 1478 } 1479 } 1480 scsi_free_scsi_task(task); 1481 task = NULL; 1482 1483 iscsi_attach_aio_context(bs, iscsilun->aio_context); 1484 1485 /* Guess the internal cluster (page) size of the iscsi target by the means 1486 * of opt_unmap_gran. Transfer the unmap granularity only if it has a 1487 * reasonable size */ 1488 if (iscsilun->bl.opt_unmap_gran * iscsilun->block_size >= 4 * 1024 && 1489 iscsilun->bl.opt_unmap_gran * iscsilun->block_size <= 16 * 1024 * 1024) { 1490 iscsilun->cluster_sectors = (iscsilun->bl.opt_unmap_gran * 1491 iscsilun->block_size) >> BDRV_SECTOR_BITS; 1492 if (iscsilun->lbprz) { 1493 iscsilun->allocationmap = iscsi_allocationmap_init(iscsilun); 1494 if (iscsilun->allocationmap == NULL) { 1495 ret = -ENOMEM; 1496 } 1497 } 1498 } 1499 1500 out: 1501 qemu_opts_del(opts); 1502 g_free(initiator_name); 1503 if (iscsi_url != NULL) { 1504 iscsi_destroy_url(iscsi_url); 1505 } 1506 if (task != NULL) { 1507 scsi_free_scsi_task(task); 1508 } 1509 1510 if (ret) { 1511 if (iscsi != NULL) { 1512 if (iscsi_is_logged_in(iscsi)) { 1513 iscsi_logout_sync(iscsi); 1514 } 1515 iscsi_destroy_context(iscsi); 1516 } 1517 memset(iscsilun, 0, sizeof(IscsiLun)); 1518 } 1519 return ret; 1520 } 1521 1522 static void iscsi_close(BlockDriverState *bs) 1523 { 1524 IscsiLun *iscsilun = bs->opaque; 1525 struct iscsi_context *iscsi = iscsilun->iscsi; 1526 1527 iscsi_detach_aio_context(bs); 1528 if (iscsi_is_logged_in(iscsi)) { 1529 iscsi_logout_sync(iscsi); 1530 } 1531 iscsi_destroy_context(iscsi); 1532 g_free(iscsilun->zeroblock); 1533 g_free(iscsilun->allocationmap); 1534 memset(iscsilun, 0, sizeof(IscsiLun)); 1535 } 1536 1537 static int sector_limits_lun2qemu(int64_t sector, IscsiLun *iscsilun) 1538 { 1539 return MIN(sector_lun2qemu(sector, iscsilun), INT_MAX / 2 + 1); 1540 } 1541 1542 static void iscsi_refresh_limits(BlockDriverState *bs, Error **errp) 1543 { 1544 /* We don't actually refresh here, but just return data queried in 1545 * iscsi_open(): iscsi targets don't change their limits. */ 1546 1547 IscsiLun *iscsilun = bs->opaque; 1548 uint32_t max_xfer_len = iscsilun->use_16_for_rw ? 0xffffffff : 0xffff; 1549 1550 if (iscsilun->bl.max_xfer_len) { 1551 max_xfer_len = MIN(max_xfer_len, iscsilun->bl.max_xfer_len); 1552 } 1553 1554 bs->bl.max_transfer_length = sector_limits_lun2qemu(max_xfer_len, iscsilun); 1555 1556 if (iscsilun->lbp.lbpu) { 1557 if (iscsilun->bl.max_unmap < 0xffffffff) { 1558 bs->bl.max_discard = 1559 sector_limits_lun2qemu(iscsilun->bl.max_unmap, iscsilun); 1560 } 1561 bs->bl.discard_alignment = 1562 sector_limits_lun2qemu(iscsilun->bl.opt_unmap_gran, iscsilun); 1563 } 1564 1565 if (iscsilun->bl.max_ws_len < 0xffffffff) { 1566 bs->bl.max_write_zeroes = 1567 sector_limits_lun2qemu(iscsilun->bl.max_ws_len, iscsilun); 1568 } 1569 if (iscsilun->lbp.lbpws) { 1570 bs->bl.write_zeroes_alignment = 1571 sector_limits_lun2qemu(iscsilun->bl.opt_unmap_gran, iscsilun); 1572 } 1573 bs->bl.opt_transfer_length = 1574 sector_limits_lun2qemu(iscsilun->bl.opt_xfer_len, iscsilun); 1575 } 1576 1577 /* Note that this will not re-establish a connection with an iSCSI target - it 1578 * is effectively a NOP. */ 1579 static int iscsi_reopen_prepare(BDRVReopenState *state, 1580 BlockReopenQueue *queue, Error **errp) 1581 { 1582 IscsiLun *iscsilun = state->bs->opaque; 1583 1584 if (state->flags & BDRV_O_RDWR && iscsilun->write_protected) { 1585 error_setg(errp, "Cannot open a write protected LUN as read-write"); 1586 return -EACCES; 1587 } 1588 return 0; 1589 } 1590 1591 static int iscsi_truncate(BlockDriverState *bs, int64_t offset) 1592 { 1593 IscsiLun *iscsilun = bs->opaque; 1594 Error *local_err = NULL; 1595 1596 if (iscsilun->type != TYPE_DISK) { 1597 return -ENOTSUP; 1598 } 1599 1600 iscsi_readcapacity_sync(iscsilun, &local_err); 1601 if (local_err != NULL) { 1602 error_free(local_err); 1603 return -EIO; 1604 } 1605 1606 if (offset > iscsi_getlength(bs)) { 1607 return -EINVAL; 1608 } 1609 1610 if (iscsilun->allocationmap != NULL) { 1611 g_free(iscsilun->allocationmap); 1612 iscsilun->allocationmap = iscsi_allocationmap_init(iscsilun); 1613 } 1614 1615 return 0; 1616 } 1617 1618 static int iscsi_create(const char *filename, QemuOpts *opts, Error **errp) 1619 { 1620 int ret = 0; 1621 int64_t total_size = 0; 1622 BlockDriverState *bs; 1623 IscsiLun *iscsilun = NULL; 1624 QDict *bs_options; 1625 1626 bs = bdrv_new(); 1627 1628 /* Read out options */ 1629 total_size = DIV_ROUND_UP(qemu_opt_get_size_del(opts, BLOCK_OPT_SIZE, 0), 1630 BDRV_SECTOR_SIZE); 1631 bs->opaque = g_new0(struct IscsiLun, 1); 1632 iscsilun = bs->opaque; 1633 1634 bs_options = qdict_new(); 1635 qdict_put(bs_options, "filename", qstring_from_str(filename)); 1636 ret = iscsi_open(bs, bs_options, 0, NULL); 1637 QDECREF(bs_options); 1638 1639 if (ret != 0) { 1640 goto out; 1641 } 1642 iscsi_detach_aio_context(bs); 1643 if (iscsilun->type != TYPE_DISK) { 1644 ret = -ENODEV; 1645 goto out; 1646 } 1647 if (bs->total_sectors < total_size) { 1648 ret = -ENOSPC; 1649 goto out; 1650 } 1651 1652 ret = 0; 1653 out: 1654 if (iscsilun->iscsi != NULL) { 1655 iscsi_destroy_context(iscsilun->iscsi); 1656 } 1657 g_free(bs->opaque); 1658 bs->opaque = NULL; 1659 bdrv_unref(bs); 1660 return ret; 1661 } 1662 1663 static int iscsi_get_info(BlockDriverState *bs, BlockDriverInfo *bdi) 1664 { 1665 IscsiLun *iscsilun = bs->opaque; 1666 bdi->unallocated_blocks_are_zero = iscsilun->lbprz; 1667 bdi->can_write_zeroes_with_unmap = iscsilun->lbprz && iscsilun->lbp.lbpws; 1668 bdi->cluster_size = iscsilun->cluster_sectors * BDRV_SECTOR_SIZE; 1669 return 0; 1670 } 1671 1672 static QemuOptsList iscsi_create_opts = { 1673 .name = "iscsi-create-opts", 1674 .head = QTAILQ_HEAD_INITIALIZER(iscsi_create_opts.head), 1675 .desc = { 1676 { 1677 .name = BLOCK_OPT_SIZE, 1678 .type = QEMU_OPT_SIZE, 1679 .help = "Virtual disk size" 1680 }, 1681 { /* end of list */ } 1682 } 1683 }; 1684 1685 static BlockDriver bdrv_iscsi = { 1686 .format_name = "iscsi", 1687 .protocol_name = "iscsi", 1688 1689 .instance_size = sizeof(IscsiLun), 1690 .bdrv_needs_filename = true, 1691 .bdrv_file_open = iscsi_open, 1692 .bdrv_close = iscsi_close, 1693 .bdrv_create = iscsi_create, 1694 .create_opts = &iscsi_create_opts, 1695 .bdrv_reopen_prepare = iscsi_reopen_prepare, 1696 1697 .bdrv_getlength = iscsi_getlength, 1698 .bdrv_get_info = iscsi_get_info, 1699 .bdrv_truncate = iscsi_truncate, 1700 .bdrv_refresh_limits = iscsi_refresh_limits, 1701 1702 .bdrv_co_get_block_status = iscsi_co_get_block_status, 1703 .bdrv_co_discard = iscsi_co_discard, 1704 .bdrv_co_write_zeroes = iscsi_co_write_zeroes, 1705 .bdrv_co_readv = iscsi_co_readv, 1706 .bdrv_co_writev = iscsi_co_writev, 1707 .bdrv_co_flush_to_disk = iscsi_co_flush, 1708 1709 #ifdef __linux__ 1710 .bdrv_ioctl = iscsi_ioctl, 1711 .bdrv_aio_ioctl = iscsi_aio_ioctl, 1712 #endif 1713 1714 .bdrv_detach_aio_context = iscsi_detach_aio_context, 1715 .bdrv_attach_aio_context = iscsi_attach_aio_context, 1716 }; 1717 1718 static QemuOptsList qemu_iscsi_opts = { 1719 .name = "iscsi", 1720 .head = QTAILQ_HEAD_INITIALIZER(qemu_iscsi_opts.head), 1721 .desc = { 1722 { 1723 .name = "user", 1724 .type = QEMU_OPT_STRING, 1725 .help = "username for CHAP authentication to target", 1726 },{ 1727 .name = "password", 1728 .type = QEMU_OPT_STRING, 1729 .help = "password for CHAP authentication to target", 1730 },{ 1731 .name = "header-digest", 1732 .type = QEMU_OPT_STRING, 1733 .help = "HeaderDigest setting. " 1734 "{CRC32C|CRC32C-NONE|NONE-CRC32C|NONE}", 1735 },{ 1736 .name = "initiator-name", 1737 .type = QEMU_OPT_STRING, 1738 .help = "Initiator iqn name to use when connecting", 1739 }, 1740 { /* end of list */ } 1741 }, 1742 }; 1743 1744 static void iscsi_block_init(void) 1745 { 1746 bdrv_register(&bdrv_iscsi); 1747 qemu_add_opts(&qemu_iscsi_opts); 1748 } 1749 1750 block_init(iscsi_block_init); 1751