1 /* 2 * QEMU Block driver for iSCSI images 3 * 4 * Copyright (c) 2010-2011 Ronnie Sahlberg <ronniesahlberg@gmail.com> 5 * Copyright (c) 2012-2015 Peter Lieven <pl@kamp.de> 6 * 7 * Permission is hereby granted, free of charge, to any person obtaining a copy 8 * of this software and associated documentation files (the "Software"), to deal 9 * in the Software without restriction, including without limitation the rights 10 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 11 * copies of the Software, and to permit persons to whom the Software is 12 * furnished to do so, subject to the following conditions: 13 * 14 * The above copyright notice and this permission notice shall be included in 15 * all copies or substantial portions of the Software. 16 * 17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 23 * THE SOFTWARE. 24 */ 25 26 #include "config-host.h" 27 28 #include <poll.h> 29 #include <math.h> 30 #include <arpa/inet.h> 31 #include "qemu-common.h" 32 #include "qemu/config-file.h" 33 #include "qemu/error-report.h" 34 #include "qemu/bitops.h" 35 #include "qemu/bitmap.h" 36 #include "block/block_int.h" 37 #include "block/scsi.h" 38 #include "qemu/iov.h" 39 #include "sysemu/sysemu.h" 40 #include "qmp-commands.h" 41 #include "qapi/qmp/qstring.h" 42 43 #include <iscsi/iscsi.h> 44 #include <iscsi/scsi-lowlevel.h> 45 46 #ifdef __linux__ 47 #include <scsi/sg.h> 48 #include <block/scsi.h> 49 #endif 50 51 typedef struct IscsiLun { 52 struct iscsi_context *iscsi; 53 AioContext *aio_context; 54 int lun; 55 enum scsi_inquiry_peripheral_device_type type; 56 int block_size; 57 uint64_t num_blocks; 58 int events; 59 QEMUTimer *nop_timer; 60 QEMUTimer *event_timer; 61 struct scsi_inquiry_logical_block_provisioning lbp; 62 struct scsi_inquiry_block_limits bl; 63 unsigned char *zeroblock; 64 unsigned long *allocationmap; 65 int cluster_sectors; 66 bool use_16_for_rw; 67 bool write_protected; 68 bool lbpme; 69 bool lbprz; 70 bool dpofua; 71 bool has_write_same; 72 bool force_next_flush; 73 bool request_timed_out; 74 } IscsiLun; 75 76 typedef struct IscsiTask { 77 int status; 78 int complete; 79 int retries; 80 int do_retry; 81 struct scsi_task *task; 82 Coroutine *co; 83 QEMUBH *bh; 84 IscsiLun *iscsilun; 85 QEMUTimer retry_timer; 86 bool force_next_flush; 87 int err_code; 88 } IscsiTask; 89 90 typedef struct IscsiAIOCB { 91 BlockAIOCB common; 92 QEMUIOVector *qiov; 93 QEMUBH *bh; 94 IscsiLun *iscsilun; 95 struct scsi_task *task; 96 uint8_t *buf; 97 int status; 98 int64_t sector_num; 99 int nb_sectors; 100 int ret; 101 #ifdef __linux__ 102 sg_io_hdr_t *ioh; 103 #endif 104 } IscsiAIOCB; 105 106 /* libiscsi uses time_t so its enough to process events every second */ 107 #define EVENT_INTERVAL 1000 108 #define NOP_INTERVAL 5000 109 #define MAX_NOP_FAILURES 3 110 #define ISCSI_CMD_RETRIES ARRAY_SIZE(iscsi_retry_times) 111 static const unsigned iscsi_retry_times[] = {8, 32, 128, 512, 2048, 8192, 32768}; 112 113 /* this threshold is a trade-off knob to choose between 114 * the potential additional overhead of an extra GET_LBA_STATUS request 115 * vs. unnecessarily reading a lot of zero sectors over the wire. 116 * If a read request is greater or equal than ISCSI_CHECKALLOC_THRES 117 * sectors we check the allocation status of the area covered by the 118 * request first if the allocationmap indicates that the area might be 119 * unallocated. */ 120 #define ISCSI_CHECKALLOC_THRES 64 121 122 static void 123 iscsi_bh_cb(void *p) 124 { 125 IscsiAIOCB *acb = p; 126 127 qemu_bh_delete(acb->bh); 128 129 g_free(acb->buf); 130 acb->buf = NULL; 131 132 acb->common.cb(acb->common.opaque, acb->status); 133 134 if (acb->task != NULL) { 135 scsi_free_scsi_task(acb->task); 136 acb->task = NULL; 137 } 138 139 qemu_aio_unref(acb); 140 } 141 142 static void 143 iscsi_schedule_bh(IscsiAIOCB *acb) 144 { 145 if (acb->bh) { 146 return; 147 } 148 acb->bh = aio_bh_new(acb->iscsilun->aio_context, iscsi_bh_cb, acb); 149 qemu_bh_schedule(acb->bh); 150 } 151 152 static void iscsi_co_generic_bh_cb(void *opaque) 153 { 154 struct IscsiTask *iTask = opaque; 155 iTask->complete = 1; 156 qemu_bh_delete(iTask->bh); 157 qemu_coroutine_enter(iTask->co, NULL); 158 } 159 160 static void iscsi_retry_timer_expired(void *opaque) 161 { 162 struct IscsiTask *iTask = opaque; 163 iTask->complete = 1; 164 if (iTask->co) { 165 qemu_coroutine_enter(iTask->co, NULL); 166 } 167 } 168 169 static inline unsigned exp_random(double mean) 170 { 171 return -mean * log((double)rand() / RAND_MAX); 172 } 173 174 /* SCSI_SENSE_ASCQ_INVALID_FIELD_IN_PARAMETER_LIST was introduced in 175 * libiscsi 1.10.0, together with other constants we need. Use it as 176 * a hint that we have to define them ourselves if needed, to keep the 177 * minimum required libiscsi version at 1.9.0. We use an ASCQ macro for 178 * the test because SCSI_STATUS_* is an enum. 179 * 180 * To guard against future changes where SCSI_SENSE_ASCQ_* also becomes 181 * an enum, check against the LIBISCSI_API_VERSION macro, which was 182 * introduced in 1.11.0. If it is present, there is no need to define 183 * anything. 184 */ 185 #if !defined(SCSI_SENSE_ASCQ_INVALID_FIELD_IN_PARAMETER_LIST) && \ 186 !defined(LIBISCSI_API_VERSION) 187 #define SCSI_STATUS_TASK_SET_FULL 0x28 188 #define SCSI_STATUS_TIMEOUT 0x0f000002 189 #define SCSI_SENSE_ASCQ_INVALID_FIELD_IN_PARAMETER_LIST 0x2600 190 #define SCSI_SENSE_ASCQ_PARAMETER_LIST_LENGTH_ERROR 0x1a00 191 #endif 192 193 static int iscsi_translate_sense(struct scsi_sense *sense) 194 { 195 int ret; 196 197 switch (sense->key) { 198 case SCSI_SENSE_NOT_READY: 199 return -EBUSY; 200 case SCSI_SENSE_DATA_PROTECTION: 201 return -EACCES; 202 case SCSI_SENSE_COMMAND_ABORTED: 203 return -ECANCELED; 204 case SCSI_SENSE_ILLEGAL_REQUEST: 205 /* Parse ASCQ */ 206 break; 207 default: 208 return -EIO; 209 } 210 switch (sense->ascq) { 211 case SCSI_SENSE_ASCQ_PARAMETER_LIST_LENGTH_ERROR: 212 case SCSI_SENSE_ASCQ_INVALID_OPERATION_CODE: 213 case SCSI_SENSE_ASCQ_INVALID_FIELD_IN_CDB: 214 case SCSI_SENSE_ASCQ_INVALID_FIELD_IN_PARAMETER_LIST: 215 ret = -EINVAL; 216 break; 217 case SCSI_SENSE_ASCQ_LBA_OUT_OF_RANGE: 218 ret = -ENOSPC; 219 break; 220 case SCSI_SENSE_ASCQ_LOGICAL_UNIT_NOT_SUPPORTED: 221 ret = -ENOTSUP; 222 break; 223 case SCSI_SENSE_ASCQ_MEDIUM_NOT_PRESENT: 224 case SCSI_SENSE_ASCQ_MEDIUM_NOT_PRESENT_TRAY_CLOSED: 225 case SCSI_SENSE_ASCQ_MEDIUM_NOT_PRESENT_TRAY_OPEN: 226 ret = -ENOMEDIUM; 227 break; 228 case SCSI_SENSE_ASCQ_WRITE_PROTECTED: 229 ret = -EACCES; 230 break; 231 default: 232 ret = -EIO; 233 break; 234 } 235 return ret; 236 } 237 238 static void 239 iscsi_co_generic_cb(struct iscsi_context *iscsi, int status, 240 void *command_data, void *opaque) 241 { 242 struct IscsiTask *iTask = opaque; 243 struct scsi_task *task = command_data; 244 245 iTask->status = status; 246 iTask->do_retry = 0; 247 iTask->task = task; 248 249 if (status != SCSI_STATUS_GOOD) { 250 if (iTask->retries++ < ISCSI_CMD_RETRIES) { 251 if (status == SCSI_STATUS_CHECK_CONDITION 252 && task->sense.key == SCSI_SENSE_UNIT_ATTENTION) { 253 error_report("iSCSI CheckCondition: %s", 254 iscsi_get_error(iscsi)); 255 iTask->do_retry = 1; 256 goto out; 257 } 258 if (status == SCSI_STATUS_BUSY || 259 status == SCSI_STATUS_TIMEOUT || 260 status == SCSI_STATUS_TASK_SET_FULL) { 261 unsigned retry_time = 262 exp_random(iscsi_retry_times[iTask->retries - 1]); 263 if (status == SCSI_STATUS_TIMEOUT) { 264 /* make sure the request is rescheduled AFTER the 265 * reconnect is initiated */ 266 retry_time = EVENT_INTERVAL * 2; 267 iTask->iscsilun->request_timed_out = true; 268 } 269 error_report("iSCSI Busy/TaskSetFull/TimeOut" 270 " (retry #%u in %u ms): %s", 271 iTask->retries, retry_time, 272 iscsi_get_error(iscsi)); 273 aio_timer_init(iTask->iscsilun->aio_context, 274 &iTask->retry_timer, QEMU_CLOCK_REALTIME, 275 SCALE_MS, iscsi_retry_timer_expired, iTask); 276 timer_mod(&iTask->retry_timer, 277 qemu_clock_get_ms(QEMU_CLOCK_REALTIME) + retry_time); 278 iTask->do_retry = 1; 279 return; 280 } 281 } 282 iTask->err_code = iscsi_translate_sense(&task->sense); 283 error_report("iSCSI Failure: %s", iscsi_get_error(iscsi)); 284 } else { 285 iTask->iscsilun->force_next_flush |= iTask->force_next_flush; 286 } 287 288 out: 289 if (iTask->co) { 290 iTask->bh = aio_bh_new(iTask->iscsilun->aio_context, 291 iscsi_co_generic_bh_cb, iTask); 292 qemu_bh_schedule(iTask->bh); 293 } else { 294 iTask->complete = 1; 295 } 296 } 297 298 static void iscsi_co_init_iscsitask(IscsiLun *iscsilun, struct IscsiTask *iTask) 299 { 300 *iTask = (struct IscsiTask) { 301 .co = qemu_coroutine_self(), 302 .iscsilun = iscsilun, 303 }; 304 } 305 306 static void 307 iscsi_abort_task_cb(struct iscsi_context *iscsi, int status, void *command_data, 308 void *private_data) 309 { 310 IscsiAIOCB *acb = private_data; 311 312 acb->status = -ECANCELED; 313 iscsi_schedule_bh(acb); 314 } 315 316 static void 317 iscsi_aio_cancel(BlockAIOCB *blockacb) 318 { 319 IscsiAIOCB *acb = (IscsiAIOCB *)blockacb; 320 IscsiLun *iscsilun = acb->iscsilun; 321 322 if (acb->status != -EINPROGRESS) { 323 return; 324 } 325 326 /* send a task mgmt call to the target to cancel the task on the target */ 327 iscsi_task_mgmt_abort_task_async(iscsilun->iscsi, acb->task, 328 iscsi_abort_task_cb, acb); 329 330 } 331 332 static const AIOCBInfo iscsi_aiocb_info = { 333 .aiocb_size = sizeof(IscsiAIOCB), 334 .cancel_async = iscsi_aio_cancel, 335 }; 336 337 338 static void iscsi_process_read(void *arg); 339 static void iscsi_process_write(void *arg); 340 341 static void 342 iscsi_set_events(IscsiLun *iscsilun) 343 { 344 struct iscsi_context *iscsi = iscsilun->iscsi; 345 int ev = iscsi_which_events(iscsi); 346 347 if (ev != iscsilun->events) { 348 aio_set_fd_handler(iscsilun->aio_context, iscsi_get_fd(iscsi), 349 false, 350 (ev & POLLIN) ? iscsi_process_read : NULL, 351 (ev & POLLOUT) ? iscsi_process_write : NULL, 352 iscsilun); 353 iscsilun->events = ev; 354 } 355 } 356 357 static void iscsi_timed_check_events(void *opaque) 358 { 359 IscsiLun *iscsilun = opaque; 360 361 /* check for timed out requests */ 362 iscsi_service(iscsilun->iscsi, 0); 363 364 if (iscsilun->request_timed_out) { 365 iscsilun->request_timed_out = false; 366 iscsi_reconnect(iscsilun->iscsi); 367 } 368 369 /* newer versions of libiscsi may return zero events. Ensure we are able 370 * to return to service once this situation changes. */ 371 iscsi_set_events(iscsilun); 372 373 timer_mod(iscsilun->event_timer, 374 qemu_clock_get_ms(QEMU_CLOCK_REALTIME) + EVENT_INTERVAL); 375 } 376 377 static void 378 iscsi_process_read(void *arg) 379 { 380 IscsiLun *iscsilun = arg; 381 struct iscsi_context *iscsi = iscsilun->iscsi; 382 383 iscsi_service(iscsi, POLLIN); 384 iscsi_set_events(iscsilun); 385 } 386 387 static void 388 iscsi_process_write(void *arg) 389 { 390 IscsiLun *iscsilun = arg; 391 struct iscsi_context *iscsi = iscsilun->iscsi; 392 393 iscsi_service(iscsi, POLLOUT); 394 iscsi_set_events(iscsilun); 395 } 396 397 static int64_t sector_lun2qemu(int64_t sector, IscsiLun *iscsilun) 398 { 399 return sector * iscsilun->block_size / BDRV_SECTOR_SIZE; 400 } 401 402 static int64_t sector_qemu2lun(int64_t sector, IscsiLun *iscsilun) 403 { 404 return sector * BDRV_SECTOR_SIZE / iscsilun->block_size; 405 } 406 407 static bool is_request_lun_aligned(int64_t sector_num, int nb_sectors, 408 IscsiLun *iscsilun) 409 { 410 if ((sector_num * BDRV_SECTOR_SIZE) % iscsilun->block_size || 411 (nb_sectors * BDRV_SECTOR_SIZE) % iscsilun->block_size) { 412 error_report("iSCSI misaligned request: " 413 "iscsilun->block_size %u, sector_num %" PRIi64 414 ", nb_sectors %d", 415 iscsilun->block_size, sector_num, nb_sectors); 416 return 0; 417 } 418 return 1; 419 } 420 421 static unsigned long *iscsi_allocationmap_init(IscsiLun *iscsilun) 422 { 423 return bitmap_try_new(DIV_ROUND_UP(sector_lun2qemu(iscsilun->num_blocks, 424 iscsilun), 425 iscsilun->cluster_sectors)); 426 } 427 428 static void iscsi_allocationmap_set(IscsiLun *iscsilun, int64_t sector_num, 429 int nb_sectors) 430 { 431 if (iscsilun->allocationmap == NULL) { 432 return; 433 } 434 bitmap_set(iscsilun->allocationmap, 435 sector_num / iscsilun->cluster_sectors, 436 DIV_ROUND_UP(nb_sectors, iscsilun->cluster_sectors)); 437 } 438 439 static void iscsi_allocationmap_clear(IscsiLun *iscsilun, int64_t sector_num, 440 int nb_sectors) 441 { 442 int64_t cluster_num, nb_clusters; 443 if (iscsilun->allocationmap == NULL) { 444 return; 445 } 446 cluster_num = DIV_ROUND_UP(sector_num, iscsilun->cluster_sectors); 447 nb_clusters = (sector_num + nb_sectors) / iscsilun->cluster_sectors 448 - cluster_num; 449 if (nb_clusters > 0) { 450 bitmap_clear(iscsilun->allocationmap, cluster_num, nb_clusters); 451 } 452 } 453 454 static int coroutine_fn iscsi_co_writev(BlockDriverState *bs, 455 int64_t sector_num, int nb_sectors, 456 QEMUIOVector *iov) 457 { 458 IscsiLun *iscsilun = bs->opaque; 459 struct IscsiTask iTask; 460 uint64_t lba; 461 uint32_t num_sectors; 462 int fua; 463 464 if (!is_request_lun_aligned(sector_num, nb_sectors, iscsilun)) { 465 return -EINVAL; 466 } 467 468 if (bs->bl.max_transfer_length && nb_sectors > bs->bl.max_transfer_length) { 469 error_report("iSCSI Error: Write of %d sectors exceeds max_xfer_len " 470 "of %d sectors", nb_sectors, bs->bl.max_transfer_length); 471 return -EINVAL; 472 } 473 474 lba = sector_qemu2lun(sector_num, iscsilun); 475 num_sectors = sector_qemu2lun(nb_sectors, iscsilun); 476 iscsi_co_init_iscsitask(iscsilun, &iTask); 477 retry: 478 fua = iscsilun->dpofua && !bs->enable_write_cache; 479 iTask.force_next_flush = !fua; 480 if (iscsilun->use_16_for_rw) { 481 iTask.task = iscsi_write16_task(iscsilun->iscsi, iscsilun->lun, lba, 482 NULL, num_sectors * iscsilun->block_size, 483 iscsilun->block_size, 0, 0, fua, 0, 0, 484 iscsi_co_generic_cb, &iTask); 485 } else { 486 iTask.task = iscsi_write10_task(iscsilun->iscsi, iscsilun->lun, lba, 487 NULL, num_sectors * iscsilun->block_size, 488 iscsilun->block_size, 0, 0, fua, 0, 0, 489 iscsi_co_generic_cb, &iTask); 490 } 491 if (iTask.task == NULL) { 492 return -ENOMEM; 493 } 494 scsi_task_set_iov_out(iTask.task, (struct scsi_iovec *) iov->iov, 495 iov->niov); 496 while (!iTask.complete) { 497 iscsi_set_events(iscsilun); 498 qemu_coroutine_yield(); 499 } 500 501 if (iTask.task != NULL) { 502 scsi_free_scsi_task(iTask.task); 503 iTask.task = NULL; 504 } 505 506 if (iTask.do_retry) { 507 iTask.complete = 0; 508 goto retry; 509 } 510 511 if (iTask.status != SCSI_STATUS_GOOD) { 512 return iTask.err_code; 513 } 514 515 iscsi_allocationmap_set(iscsilun, sector_num, nb_sectors); 516 517 return 0; 518 } 519 520 521 static bool iscsi_allocationmap_is_allocated(IscsiLun *iscsilun, 522 int64_t sector_num, int nb_sectors) 523 { 524 unsigned long size; 525 if (iscsilun->allocationmap == NULL) { 526 return true; 527 } 528 size = DIV_ROUND_UP(sector_num + nb_sectors, iscsilun->cluster_sectors); 529 return !(find_next_bit(iscsilun->allocationmap, size, 530 sector_num / iscsilun->cluster_sectors) == size); 531 } 532 533 static int64_t coroutine_fn iscsi_co_get_block_status(BlockDriverState *bs, 534 int64_t sector_num, 535 int nb_sectors, int *pnum) 536 { 537 IscsiLun *iscsilun = bs->opaque; 538 struct scsi_get_lba_status *lbas = NULL; 539 struct scsi_lba_status_descriptor *lbasd = NULL; 540 struct IscsiTask iTask; 541 int64_t ret; 542 543 iscsi_co_init_iscsitask(iscsilun, &iTask); 544 545 if (!is_request_lun_aligned(sector_num, nb_sectors, iscsilun)) { 546 ret = -EINVAL; 547 goto out; 548 } 549 550 /* default to all sectors allocated */ 551 ret = BDRV_BLOCK_DATA; 552 ret |= (sector_num << BDRV_SECTOR_BITS) | BDRV_BLOCK_OFFSET_VALID; 553 *pnum = nb_sectors; 554 555 /* LUN does not support logical block provisioning */ 556 if (!iscsilun->lbpme) { 557 goto out; 558 } 559 560 retry: 561 if (iscsi_get_lba_status_task(iscsilun->iscsi, iscsilun->lun, 562 sector_qemu2lun(sector_num, iscsilun), 563 8 + 16, iscsi_co_generic_cb, 564 &iTask) == NULL) { 565 ret = -ENOMEM; 566 goto out; 567 } 568 569 while (!iTask.complete) { 570 iscsi_set_events(iscsilun); 571 qemu_coroutine_yield(); 572 } 573 574 if (iTask.do_retry) { 575 if (iTask.task != NULL) { 576 scsi_free_scsi_task(iTask.task); 577 iTask.task = NULL; 578 } 579 iTask.complete = 0; 580 goto retry; 581 } 582 583 if (iTask.status != SCSI_STATUS_GOOD) { 584 /* in case the get_lba_status_callout fails (i.e. 585 * because the device is busy or the cmd is not 586 * supported) we pretend all blocks are allocated 587 * for backwards compatibility */ 588 goto out; 589 } 590 591 lbas = scsi_datain_unmarshall(iTask.task); 592 if (lbas == NULL) { 593 ret = -EIO; 594 goto out; 595 } 596 597 lbasd = &lbas->descriptors[0]; 598 599 if (sector_qemu2lun(sector_num, iscsilun) != lbasd->lba) { 600 ret = -EIO; 601 goto out; 602 } 603 604 *pnum = sector_lun2qemu(lbasd->num_blocks, iscsilun); 605 606 if (lbasd->provisioning == SCSI_PROVISIONING_TYPE_DEALLOCATED || 607 lbasd->provisioning == SCSI_PROVISIONING_TYPE_ANCHORED) { 608 ret &= ~BDRV_BLOCK_DATA; 609 if (iscsilun->lbprz) { 610 ret |= BDRV_BLOCK_ZERO; 611 } 612 } 613 614 if (ret & BDRV_BLOCK_ZERO) { 615 iscsi_allocationmap_clear(iscsilun, sector_num, *pnum); 616 } else { 617 iscsi_allocationmap_set(iscsilun, sector_num, *pnum); 618 } 619 620 if (*pnum > nb_sectors) { 621 *pnum = nb_sectors; 622 } 623 out: 624 if (iTask.task != NULL) { 625 scsi_free_scsi_task(iTask.task); 626 } 627 return ret; 628 } 629 630 static int coroutine_fn iscsi_co_readv(BlockDriverState *bs, 631 int64_t sector_num, int nb_sectors, 632 QEMUIOVector *iov) 633 { 634 IscsiLun *iscsilun = bs->opaque; 635 struct IscsiTask iTask; 636 uint64_t lba; 637 uint32_t num_sectors; 638 639 if (!is_request_lun_aligned(sector_num, nb_sectors, iscsilun)) { 640 return -EINVAL; 641 } 642 643 if (bs->bl.max_transfer_length && nb_sectors > bs->bl.max_transfer_length) { 644 error_report("iSCSI Error: Read of %d sectors exceeds max_xfer_len " 645 "of %d sectors", nb_sectors, bs->bl.max_transfer_length); 646 return -EINVAL; 647 } 648 649 if (iscsilun->lbprz && nb_sectors >= ISCSI_CHECKALLOC_THRES && 650 !iscsi_allocationmap_is_allocated(iscsilun, sector_num, nb_sectors)) { 651 int64_t ret; 652 int pnum; 653 ret = iscsi_co_get_block_status(bs, sector_num, INT_MAX, &pnum); 654 if (ret < 0) { 655 return ret; 656 } 657 if (ret & BDRV_BLOCK_ZERO && pnum >= nb_sectors) { 658 qemu_iovec_memset(iov, 0, 0x00, iov->size); 659 return 0; 660 } 661 } 662 663 lba = sector_qemu2lun(sector_num, iscsilun); 664 num_sectors = sector_qemu2lun(nb_sectors, iscsilun); 665 666 iscsi_co_init_iscsitask(iscsilun, &iTask); 667 retry: 668 if (iscsilun->use_16_for_rw) { 669 iTask.task = iscsi_read16_task(iscsilun->iscsi, iscsilun->lun, lba, 670 num_sectors * iscsilun->block_size, 671 iscsilun->block_size, 0, 0, 0, 0, 0, 672 iscsi_co_generic_cb, &iTask); 673 } else { 674 iTask.task = iscsi_read10_task(iscsilun->iscsi, iscsilun->lun, lba, 675 num_sectors * iscsilun->block_size, 676 iscsilun->block_size, 677 0, 0, 0, 0, 0, 678 iscsi_co_generic_cb, &iTask); 679 } 680 if (iTask.task == NULL) { 681 return -ENOMEM; 682 } 683 scsi_task_set_iov_in(iTask.task, (struct scsi_iovec *) iov->iov, iov->niov); 684 685 while (!iTask.complete) { 686 iscsi_set_events(iscsilun); 687 qemu_coroutine_yield(); 688 } 689 690 if (iTask.task != NULL) { 691 scsi_free_scsi_task(iTask.task); 692 iTask.task = NULL; 693 } 694 695 if (iTask.do_retry) { 696 iTask.complete = 0; 697 goto retry; 698 } 699 700 if (iTask.status != SCSI_STATUS_GOOD) { 701 return iTask.err_code; 702 } 703 704 return 0; 705 } 706 707 static int coroutine_fn iscsi_co_flush(BlockDriverState *bs) 708 { 709 IscsiLun *iscsilun = bs->opaque; 710 struct IscsiTask iTask; 711 712 if (!iscsilun->force_next_flush) { 713 return 0; 714 } 715 iscsilun->force_next_flush = false; 716 717 iscsi_co_init_iscsitask(iscsilun, &iTask); 718 retry: 719 if (iscsi_synchronizecache10_task(iscsilun->iscsi, iscsilun->lun, 0, 0, 0, 720 0, iscsi_co_generic_cb, &iTask) == NULL) { 721 return -ENOMEM; 722 } 723 724 while (!iTask.complete) { 725 iscsi_set_events(iscsilun); 726 qemu_coroutine_yield(); 727 } 728 729 if (iTask.task != NULL) { 730 scsi_free_scsi_task(iTask.task); 731 iTask.task = NULL; 732 } 733 734 if (iTask.do_retry) { 735 iTask.complete = 0; 736 goto retry; 737 } 738 739 if (iTask.status != SCSI_STATUS_GOOD) { 740 return iTask.err_code; 741 } 742 743 return 0; 744 } 745 746 #ifdef __linux__ 747 static void 748 iscsi_aio_ioctl_cb(struct iscsi_context *iscsi, int status, 749 void *command_data, void *opaque) 750 { 751 IscsiAIOCB *acb = opaque; 752 753 g_free(acb->buf); 754 acb->buf = NULL; 755 756 acb->status = 0; 757 if (status < 0) { 758 error_report("Failed to ioctl(SG_IO) to iSCSI lun. %s", 759 iscsi_get_error(iscsi)); 760 acb->status = iscsi_translate_sense(&acb->task->sense); 761 } 762 763 acb->ioh->driver_status = 0; 764 acb->ioh->host_status = 0; 765 acb->ioh->resid = 0; 766 767 #define SG_ERR_DRIVER_SENSE 0x08 768 769 if (status == SCSI_STATUS_CHECK_CONDITION && acb->task->datain.size >= 2) { 770 int ss; 771 772 acb->ioh->driver_status |= SG_ERR_DRIVER_SENSE; 773 774 acb->ioh->sb_len_wr = acb->task->datain.size - 2; 775 ss = (acb->ioh->mx_sb_len >= acb->ioh->sb_len_wr) ? 776 acb->ioh->mx_sb_len : acb->ioh->sb_len_wr; 777 memcpy(acb->ioh->sbp, &acb->task->datain.data[2], ss); 778 } 779 780 iscsi_schedule_bh(acb); 781 } 782 783 static void iscsi_ioctl_bh_completion(void *opaque) 784 { 785 IscsiAIOCB *acb = opaque; 786 787 qemu_bh_delete(acb->bh); 788 acb->common.cb(acb->common.opaque, acb->ret); 789 qemu_aio_unref(acb); 790 } 791 792 static void iscsi_ioctl_handle_emulated(IscsiAIOCB *acb, int req, void *buf) 793 { 794 BlockDriverState *bs = acb->common.bs; 795 IscsiLun *iscsilun = bs->opaque; 796 int ret = 0; 797 798 switch (req) { 799 case SG_GET_VERSION_NUM: 800 *(int *)buf = 30000; 801 break; 802 case SG_GET_SCSI_ID: 803 ((struct sg_scsi_id *)buf)->scsi_type = iscsilun->type; 804 break; 805 default: 806 ret = -EINVAL; 807 } 808 assert(!acb->bh); 809 acb->bh = aio_bh_new(bdrv_get_aio_context(bs), 810 iscsi_ioctl_bh_completion, acb); 811 acb->ret = ret; 812 qemu_bh_schedule(acb->bh); 813 } 814 815 static BlockAIOCB *iscsi_aio_ioctl(BlockDriverState *bs, 816 unsigned long int req, void *buf, 817 BlockCompletionFunc *cb, void *opaque) 818 { 819 IscsiLun *iscsilun = bs->opaque; 820 struct iscsi_context *iscsi = iscsilun->iscsi; 821 struct iscsi_data data; 822 IscsiAIOCB *acb; 823 824 acb = qemu_aio_get(&iscsi_aiocb_info, bs, cb, opaque); 825 826 acb->iscsilun = iscsilun; 827 acb->bh = NULL; 828 acb->status = -EINPROGRESS; 829 acb->buf = NULL; 830 acb->ioh = buf; 831 832 if (req != SG_IO) { 833 iscsi_ioctl_handle_emulated(acb, req, buf); 834 return &acb->common; 835 } 836 837 acb->task = malloc(sizeof(struct scsi_task)); 838 if (acb->task == NULL) { 839 error_report("iSCSI: Failed to allocate task for scsi command. %s", 840 iscsi_get_error(iscsi)); 841 qemu_aio_unref(acb); 842 return NULL; 843 } 844 memset(acb->task, 0, sizeof(struct scsi_task)); 845 846 switch (acb->ioh->dxfer_direction) { 847 case SG_DXFER_TO_DEV: 848 acb->task->xfer_dir = SCSI_XFER_WRITE; 849 break; 850 case SG_DXFER_FROM_DEV: 851 acb->task->xfer_dir = SCSI_XFER_READ; 852 break; 853 default: 854 acb->task->xfer_dir = SCSI_XFER_NONE; 855 break; 856 } 857 858 acb->task->cdb_size = acb->ioh->cmd_len; 859 memcpy(&acb->task->cdb[0], acb->ioh->cmdp, acb->ioh->cmd_len); 860 acb->task->expxferlen = acb->ioh->dxfer_len; 861 862 data.size = 0; 863 if (acb->task->xfer_dir == SCSI_XFER_WRITE) { 864 if (acb->ioh->iovec_count == 0) { 865 data.data = acb->ioh->dxferp; 866 data.size = acb->ioh->dxfer_len; 867 } else { 868 scsi_task_set_iov_out(acb->task, 869 (struct scsi_iovec *) acb->ioh->dxferp, 870 acb->ioh->iovec_count); 871 } 872 } 873 874 if (iscsi_scsi_command_async(iscsi, iscsilun->lun, acb->task, 875 iscsi_aio_ioctl_cb, 876 (data.size > 0) ? &data : NULL, 877 acb) != 0) { 878 scsi_free_scsi_task(acb->task); 879 qemu_aio_unref(acb); 880 return NULL; 881 } 882 883 /* tell libiscsi to read straight into the buffer we got from ioctl */ 884 if (acb->task->xfer_dir == SCSI_XFER_READ) { 885 if (acb->ioh->iovec_count == 0) { 886 scsi_task_add_data_in_buffer(acb->task, 887 acb->ioh->dxfer_len, 888 acb->ioh->dxferp); 889 } else { 890 scsi_task_set_iov_in(acb->task, 891 (struct scsi_iovec *) acb->ioh->dxferp, 892 acb->ioh->iovec_count); 893 } 894 } 895 896 iscsi_set_events(iscsilun); 897 898 return &acb->common; 899 } 900 901 #endif 902 903 static int64_t 904 iscsi_getlength(BlockDriverState *bs) 905 { 906 IscsiLun *iscsilun = bs->opaque; 907 int64_t len; 908 909 len = iscsilun->num_blocks; 910 len *= iscsilun->block_size; 911 912 return len; 913 } 914 915 static int 916 coroutine_fn iscsi_co_discard(BlockDriverState *bs, int64_t sector_num, 917 int nb_sectors) 918 { 919 IscsiLun *iscsilun = bs->opaque; 920 struct IscsiTask iTask; 921 struct unmap_list list; 922 923 if (!is_request_lun_aligned(sector_num, nb_sectors, iscsilun)) { 924 return -EINVAL; 925 } 926 927 if (!iscsilun->lbp.lbpu) { 928 /* UNMAP is not supported by the target */ 929 return 0; 930 } 931 932 list.lba = sector_qemu2lun(sector_num, iscsilun); 933 list.num = sector_qemu2lun(nb_sectors, iscsilun); 934 935 iscsi_co_init_iscsitask(iscsilun, &iTask); 936 retry: 937 if (iscsi_unmap_task(iscsilun->iscsi, iscsilun->lun, 0, 0, &list, 1, 938 iscsi_co_generic_cb, &iTask) == NULL) { 939 return -ENOMEM; 940 } 941 942 while (!iTask.complete) { 943 iscsi_set_events(iscsilun); 944 qemu_coroutine_yield(); 945 } 946 947 if (iTask.task != NULL) { 948 scsi_free_scsi_task(iTask.task); 949 iTask.task = NULL; 950 } 951 952 if (iTask.do_retry) { 953 iTask.complete = 0; 954 goto retry; 955 } 956 957 if (iTask.status == SCSI_STATUS_CHECK_CONDITION) { 958 /* the target might fail with a check condition if it 959 is not happy with the alignment of the UNMAP request 960 we silently fail in this case */ 961 return 0; 962 } 963 964 if (iTask.status != SCSI_STATUS_GOOD) { 965 return iTask.err_code; 966 } 967 968 iscsi_allocationmap_clear(iscsilun, sector_num, nb_sectors); 969 970 return 0; 971 } 972 973 static int 974 coroutine_fn iscsi_co_write_zeroes(BlockDriverState *bs, int64_t sector_num, 975 int nb_sectors, BdrvRequestFlags flags) 976 { 977 IscsiLun *iscsilun = bs->opaque; 978 struct IscsiTask iTask; 979 uint64_t lba; 980 uint32_t nb_blocks; 981 bool use_16_for_ws = iscsilun->use_16_for_rw; 982 983 if (!is_request_lun_aligned(sector_num, nb_sectors, iscsilun)) { 984 return -EINVAL; 985 } 986 987 if (flags & BDRV_REQ_MAY_UNMAP) { 988 if (!use_16_for_ws && !iscsilun->lbp.lbpws10) { 989 /* WRITESAME10 with UNMAP is unsupported try WRITESAME16 */ 990 use_16_for_ws = true; 991 } 992 if (use_16_for_ws && !iscsilun->lbp.lbpws) { 993 /* WRITESAME16 with UNMAP is not supported by the target, 994 * fall back and try WRITESAME10/16 without UNMAP */ 995 flags &= ~BDRV_REQ_MAY_UNMAP; 996 use_16_for_ws = iscsilun->use_16_for_rw; 997 } 998 } 999 1000 if (!(flags & BDRV_REQ_MAY_UNMAP) && !iscsilun->has_write_same) { 1001 /* WRITESAME without UNMAP is not supported by the target */ 1002 return -ENOTSUP; 1003 } 1004 1005 lba = sector_qemu2lun(sector_num, iscsilun); 1006 nb_blocks = sector_qemu2lun(nb_sectors, iscsilun); 1007 1008 if (iscsilun->zeroblock == NULL) { 1009 iscsilun->zeroblock = g_try_malloc0(iscsilun->block_size); 1010 if (iscsilun->zeroblock == NULL) { 1011 return -ENOMEM; 1012 } 1013 } 1014 1015 iscsi_co_init_iscsitask(iscsilun, &iTask); 1016 iTask.force_next_flush = true; 1017 retry: 1018 if (use_16_for_ws) { 1019 iTask.task = iscsi_writesame16_task(iscsilun->iscsi, iscsilun->lun, lba, 1020 iscsilun->zeroblock, iscsilun->block_size, 1021 nb_blocks, 0, !!(flags & BDRV_REQ_MAY_UNMAP), 1022 0, 0, iscsi_co_generic_cb, &iTask); 1023 } else { 1024 iTask.task = iscsi_writesame10_task(iscsilun->iscsi, iscsilun->lun, lba, 1025 iscsilun->zeroblock, iscsilun->block_size, 1026 nb_blocks, 0, !!(flags & BDRV_REQ_MAY_UNMAP), 1027 0, 0, iscsi_co_generic_cb, &iTask); 1028 } 1029 if (iTask.task == NULL) { 1030 return -ENOMEM; 1031 } 1032 1033 while (!iTask.complete) { 1034 iscsi_set_events(iscsilun); 1035 qemu_coroutine_yield(); 1036 } 1037 1038 if (iTask.status == SCSI_STATUS_CHECK_CONDITION && 1039 iTask.task->sense.key == SCSI_SENSE_ILLEGAL_REQUEST && 1040 (iTask.task->sense.ascq == SCSI_SENSE_ASCQ_INVALID_OPERATION_CODE || 1041 iTask.task->sense.ascq == SCSI_SENSE_ASCQ_INVALID_FIELD_IN_CDB)) { 1042 /* WRITE SAME is not supported by the target */ 1043 iscsilun->has_write_same = false; 1044 scsi_free_scsi_task(iTask.task); 1045 return -ENOTSUP; 1046 } 1047 1048 if (iTask.task != NULL) { 1049 scsi_free_scsi_task(iTask.task); 1050 iTask.task = NULL; 1051 } 1052 1053 if (iTask.do_retry) { 1054 iTask.complete = 0; 1055 goto retry; 1056 } 1057 1058 if (iTask.status != SCSI_STATUS_GOOD) { 1059 return iTask.err_code; 1060 } 1061 1062 if (flags & BDRV_REQ_MAY_UNMAP) { 1063 iscsi_allocationmap_clear(iscsilun, sector_num, nb_sectors); 1064 } else { 1065 iscsi_allocationmap_set(iscsilun, sector_num, nb_sectors); 1066 } 1067 1068 return 0; 1069 } 1070 1071 static void parse_chap(struct iscsi_context *iscsi, const char *target, 1072 Error **errp) 1073 { 1074 QemuOptsList *list; 1075 QemuOpts *opts; 1076 const char *user = NULL; 1077 const char *password = NULL; 1078 1079 list = qemu_find_opts("iscsi"); 1080 if (!list) { 1081 return; 1082 } 1083 1084 opts = qemu_opts_find(list, target); 1085 if (opts == NULL) { 1086 opts = QTAILQ_FIRST(&list->head); 1087 if (!opts) { 1088 return; 1089 } 1090 } 1091 1092 user = qemu_opt_get(opts, "user"); 1093 if (!user) { 1094 return; 1095 } 1096 1097 password = qemu_opt_get(opts, "password"); 1098 if (!password) { 1099 error_setg(errp, "CHAP username specified but no password was given"); 1100 return; 1101 } 1102 1103 if (iscsi_set_initiator_username_pwd(iscsi, user, password)) { 1104 error_setg(errp, "Failed to set initiator username and password"); 1105 } 1106 } 1107 1108 static void parse_header_digest(struct iscsi_context *iscsi, const char *target, 1109 Error **errp) 1110 { 1111 QemuOptsList *list; 1112 QemuOpts *opts; 1113 const char *digest = NULL; 1114 1115 list = qemu_find_opts("iscsi"); 1116 if (!list) { 1117 return; 1118 } 1119 1120 opts = qemu_opts_find(list, target); 1121 if (opts == NULL) { 1122 opts = QTAILQ_FIRST(&list->head); 1123 if (!opts) { 1124 return; 1125 } 1126 } 1127 1128 digest = qemu_opt_get(opts, "header-digest"); 1129 if (!digest) { 1130 return; 1131 } 1132 1133 if (!strcmp(digest, "CRC32C")) { 1134 iscsi_set_header_digest(iscsi, ISCSI_HEADER_DIGEST_CRC32C); 1135 } else if (!strcmp(digest, "NONE")) { 1136 iscsi_set_header_digest(iscsi, ISCSI_HEADER_DIGEST_NONE); 1137 } else if (!strcmp(digest, "CRC32C-NONE")) { 1138 iscsi_set_header_digest(iscsi, ISCSI_HEADER_DIGEST_CRC32C_NONE); 1139 } else if (!strcmp(digest, "NONE-CRC32C")) { 1140 iscsi_set_header_digest(iscsi, ISCSI_HEADER_DIGEST_NONE_CRC32C); 1141 } else { 1142 error_setg(errp, "Invalid header-digest setting : %s", digest); 1143 } 1144 } 1145 1146 static char *parse_initiator_name(const char *target) 1147 { 1148 QemuOptsList *list; 1149 QemuOpts *opts; 1150 const char *name; 1151 char *iscsi_name; 1152 UuidInfo *uuid_info; 1153 1154 list = qemu_find_opts("iscsi"); 1155 if (list) { 1156 opts = qemu_opts_find(list, target); 1157 if (!opts) { 1158 opts = QTAILQ_FIRST(&list->head); 1159 } 1160 if (opts) { 1161 name = qemu_opt_get(opts, "initiator-name"); 1162 if (name) { 1163 return g_strdup(name); 1164 } 1165 } 1166 } 1167 1168 uuid_info = qmp_query_uuid(NULL); 1169 if (strcmp(uuid_info->UUID, UUID_NONE) == 0) { 1170 name = qemu_get_vm_name(); 1171 } else { 1172 name = uuid_info->UUID; 1173 } 1174 iscsi_name = g_strdup_printf("iqn.2008-11.org.linux-kvm%s%s", 1175 name ? ":" : "", name ? name : ""); 1176 qapi_free_UuidInfo(uuid_info); 1177 return iscsi_name; 1178 } 1179 1180 static int parse_timeout(const char *target) 1181 { 1182 QemuOptsList *list; 1183 QemuOpts *opts; 1184 const char *timeout; 1185 1186 list = qemu_find_opts("iscsi"); 1187 if (list) { 1188 opts = qemu_opts_find(list, target); 1189 if (!opts) { 1190 opts = QTAILQ_FIRST(&list->head); 1191 } 1192 if (opts) { 1193 timeout = qemu_opt_get(opts, "timeout"); 1194 if (timeout) { 1195 return atoi(timeout); 1196 } 1197 } 1198 } 1199 1200 return 0; 1201 } 1202 1203 static void iscsi_nop_timed_event(void *opaque) 1204 { 1205 IscsiLun *iscsilun = opaque; 1206 1207 if (iscsi_get_nops_in_flight(iscsilun->iscsi) >= MAX_NOP_FAILURES) { 1208 error_report("iSCSI: NOP timeout. Reconnecting..."); 1209 iscsilun->request_timed_out = true; 1210 } else if (iscsi_nop_out_async(iscsilun->iscsi, NULL, NULL, 0, NULL) != 0) { 1211 error_report("iSCSI: failed to sent NOP-Out. Disabling NOP messages."); 1212 return; 1213 } 1214 1215 timer_mod(iscsilun->nop_timer, qemu_clock_get_ms(QEMU_CLOCK_REALTIME) + NOP_INTERVAL); 1216 iscsi_set_events(iscsilun); 1217 } 1218 1219 static void iscsi_readcapacity_sync(IscsiLun *iscsilun, Error **errp) 1220 { 1221 struct scsi_task *task = NULL; 1222 struct scsi_readcapacity10 *rc10 = NULL; 1223 struct scsi_readcapacity16 *rc16 = NULL; 1224 int retries = ISCSI_CMD_RETRIES; 1225 1226 do { 1227 if (task != NULL) { 1228 scsi_free_scsi_task(task); 1229 task = NULL; 1230 } 1231 1232 switch (iscsilun->type) { 1233 case TYPE_DISK: 1234 task = iscsi_readcapacity16_sync(iscsilun->iscsi, iscsilun->lun); 1235 if (task != NULL && task->status == SCSI_STATUS_GOOD) { 1236 rc16 = scsi_datain_unmarshall(task); 1237 if (rc16 == NULL) { 1238 error_setg(errp, "iSCSI: Failed to unmarshall readcapacity16 data."); 1239 } else { 1240 iscsilun->block_size = rc16->block_length; 1241 iscsilun->num_blocks = rc16->returned_lba + 1; 1242 iscsilun->lbpme = !!rc16->lbpme; 1243 iscsilun->lbprz = !!rc16->lbprz; 1244 iscsilun->use_16_for_rw = (rc16->returned_lba > 0xffffffff); 1245 } 1246 } 1247 break; 1248 case TYPE_ROM: 1249 task = iscsi_readcapacity10_sync(iscsilun->iscsi, iscsilun->lun, 0, 0); 1250 if (task != NULL && task->status == SCSI_STATUS_GOOD) { 1251 rc10 = scsi_datain_unmarshall(task); 1252 if (rc10 == NULL) { 1253 error_setg(errp, "iSCSI: Failed to unmarshall readcapacity10 data."); 1254 } else { 1255 iscsilun->block_size = rc10->block_size; 1256 if (rc10->lba == 0) { 1257 /* blank disk loaded */ 1258 iscsilun->num_blocks = 0; 1259 } else { 1260 iscsilun->num_blocks = rc10->lba + 1; 1261 } 1262 } 1263 } 1264 break; 1265 default: 1266 return; 1267 } 1268 } while (task != NULL && task->status == SCSI_STATUS_CHECK_CONDITION 1269 && task->sense.key == SCSI_SENSE_UNIT_ATTENTION 1270 && retries-- > 0); 1271 1272 if (task == NULL || task->status != SCSI_STATUS_GOOD) { 1273 error_setg(errp, "iSCSI: failed to send readcapacity10/16 command"); 1274 } else if (!iscsilun->block_size || 1275 iscsilun->block_size % BDRV_SECTOR_SIZE) { 1276 error_setg(errp, "iSCSI: the target returned an invalid " 1277 "block size of %d.", iscsilun->block_size); 1278 } 1279 if (task) { 1280 scsi_free_scsi_task(task); 1281 } 1282 } 1283 1284 /* TODO Convert to fine grained options */ 1285 static QemuOptsList runtime_opts = { 1286 .name = "iscsi", 1287 .head = QTAILQ_HEAD_INITIALIZER(runtime_opts.head), 1288 .desc = { 1289 { 1290 .name = "filename", 1291 .type = QEMU_OPT_STRING, 1292 .help = "URL to the iscsi image", 1293 }, 1294 { /* end of list */ } 1295 }, 1296 }; 1297 1298 static struct scsi_task *iscsi_do_inquiry(struct iscsi_context *iscsi, int lun, 1299 int evpd, int pc, void **inq, Error **errp) 1300 { 1301 int full_size; 1302 struct scsi_task *task = NULL; 1303 task = iscsi_inquiry_sync(iscsi, lun, evpd, pc, 64); 1304 if (task == NULL || task->status != SCSI_STATUS_GOOD) { 1305 goto fail; 1306 } 1307 full_size = scsi_datain_getfullsize(task); 1308 if (full_size > task->datain.size) { 1309 scsi_free_scsi_task(task); 1310 1311 /* we need more data for the full list */ 1312 task = iscsi_inquiry_sync(iscsi, lun, evpd, pc, full_size); 1313 if (task == NULL || task->status != SCSI_STATUS_GOOD) { 1314 goto fail; 1315 } 1316 } 1317 1318 *inq = scsi_datain_unmarshall(task); 1319 if (*inq == NULL) { 1320 error_setg(errp, "iSCSI: failed to unmarshall inquiry datain blob"); 1321 goto fail_with_err; 1322 } 1323 1324 return task; 1325 1326 fail: 1327 error_setg(errp, "iSCSI: Inquiry command failed : %s", 1328 iscsi_get_error(iscsi)); 1329 fail_with_err: 1330 if (task != NULL) { 1331 scsi_free_scsi_task(task); 1332 } 1333 return NULL; 1334 } 1335 1336 static void iscsi_detach_aio_context(BlockDriverState *bs) 1337 { 1338 IscsiLun *iscsilun = bs->opaque; 1339 1340 aio_set_fd_handler(iscsilun->aio_context, iscsi_get_fd(iscsilun->iscsi), 1341 false, NULL, NULL, NULL); 1342 iscsilun->events = 0; 1343 1344 if (iscsilun->nop_timer) { 1345 timer_del(iscsilun->nop_timer); 1346 timer_free(iscsilun->nop_timer); 1347 iscsilun->nop_timer = NULL; 1348 } 1349 if (iscsilun->event_timer) { 1350 timer_del(iscsilun->event_timer); 1351 timer_free(iscsilun->event_timer); 1352 iscsilun->event_timer = NULL; 1353 } 1354 } 1355 1356 static void iscsi_attach_aio_context(BlockDriverState *bs, 1357 AioContext *new_context) 1358 { 1359 IscsiLun *iscsilun = bs->opaque; 1360 1361 iscsilun->aio_context = new_context; 1362 iscsi_set_events(iscsilun); 1363 1364 /* Set up a timer for sending out iSCSI NOPs */ 1365 iscsilun->nop_timer = aio_timer_new(iscsilun->aio_context, 1366 QEMU_CLOCK_REALTIME, SCALE_MS, 1367 iscsi_nop_timed_event, iscsilun); 1368 timer_mod(iscsilun->nop_timer, 1369 qemu_clock_get_ms(QEMU_CLOCK_REALTIME) + NOP_INTERVAL); 1370 1371 /* Set up a timer for periodic calls to iscsi_set_events and to 1372 * scan for command timeout */ 1373 iscsilun->event_timer = aio_timer_new(iscsilun->aio_context, 1374 QEMU_CLOCK_REALTIME, SCALE_MS, 1375 iscsi_timed_check_events, iscsilun); 1376 timer_mod(iscsilun->event_timer, 1377 qemu_clock_get_ms(QEMU_CLOCK_REALTIME) + EVENT_INTERVAL); 1378 } 1379 1380 static void iscsi_modesense_sync(IscsiLun *iscsilun) 1381 { 1382 struct scsi_task *task; 1383 struct scsi_mode_sense *ms = NULL; 1384 iscsilun->write_protected = false; 1385 iscsilun->dpofua = false; 1386 1387 task = iscsi_modesense6_sync(iscsilun->iscsi, iscsilun->lun, 1388 1, SCSI_MODESENSE_PC_CURRENT, 1389 0x3F, 0, 255); 1390 if (task == NULL) { 1391 error_report("iSCSI: Failed to send MODE_SENSE(6) command: %s", 1392 iscsi_get_error(iscsilun->iscsi)); 1393 goto out; 1394 } 1395 1396 if (task->status != SCSI_STATUS_GOOD) { 1397 error_report("iSCSI: Failed MODE_SENSE(6), LUN assumed writable"); 1398 goto out; 1399 } 1400 ms = scsi_datain_unmarshall(task); 1401 if (!ms) { 1402 error_report("iSCSI: Failed to unmarshall MODE_SENSE(6) data: %s", 1403 iscsi_get_error(iscsilun->iscsi)); 1404 goto out; 1405 } 1406 iscsilun->write_protected = ms->device_specific_parameter & 0x80; 1407 iscsilun->dpofua = ms->device_specific_parameter & 0x10; 1408 1409 out: 1410 if (task) { 1411 scsi_free_scsi_task(task); 1412 } 1413 } 1414 1415 /* 1416 * We support iscsi url's on the form 1417 * iscsi://[<username>%<password>@]<host>[:<port>]/<targetname>/<lun> 1418 */ 1419 static int iscsi_open(BlockDriverState *bs, QDict *options, int flags, 1420 Error **errp) 1421 { 1422 IscsiLun *iscsilun = bs->opaque; 1423 struct iscsi_context *iscsi = NULL; 1424 struct iscsi_url *iscsi_url = NULL; 1425 struct scsi_task *task = NULL; 1426 struct scsi_inquiry_standard *inq = NULL; 1427 struct scsi_inquiry_supported_pages *inq_vpd; 1428 char *initiator_name = NULL; 1429 QemuOpts *opts; 1430 Error *local_err = NULL; 1431 const char *filename; 1432 int i, ret = 0, timeout = 0; 1433 1434 opts = qemu_opts_create(&runtime_opts, NULL, 0, &error_abort); 1435 qemu_opts_absorb_qdict(opts, options, &local_err); 1436 if (local_err) { 1437 error_propagate(errp, local_err); 1438 ret = -EINVAL; 1439 goto out; 1440 } 1441 1442 filename = qemu_opt_get(opts, "filename"); 1443 1444 iscsi_url = iscsi_parse_full_url(iscsi, filename); 1445 if (iscsi_url == NULL) { 1446 error_setg(errp, "Failed to parse URL : %s", filename); 1447 ret = -EINVAL; 1448 goto out; 1449 } 1450 1451 memset(iscsilun, 0, sizeof(IscsiLun)); 1452 1453 initiator_name = parse_initiator_name(iscsi_url->target); 1454 1455 iscsi = iscsi_create_context(initiator_name); 1456 if (iscsi == NULL) { 1457 error_setg(errp, "iSCSI: Failed to create iSCSI context."); 1458 ret = -ENOMEM; 1459 goto out; 1460 } 1461 1462 if (iscsi_set_targetname(iscsi, iscsi_url->target)) { 1463 error_setg(errp, "iSCSI: Failed to set target name."); 1464 ret = -EINVAL; 1465 goto out; 1466 } 1467 1468 if (iscsi_url->user[0] != '\0') { 1469 ret = iscsi_set_initiator_username_pwd(iscsi, iscsi_url->user, 1470 iscsi_url->passwd); 1471 if (ret != 0) { 1472 error_setg(errp, "Failed to set initiator username and password"); 1473 ret = -EINVAL; 1474 goto out; 1475 } 1476 } 1477 1478 /* check if we got CHAP username/password via the options */ 1479 parse_chap(iscsi, iscsi_url->target, &local_err); 1480 if (local_err != NULL) { 1481 error_propagate(errp, local_err); 1482 ret = -EINVAL; 1483 goto out; 1484 } 1485 1486 if (iscsi_set_session_type(iscsi, ISCSI_SESSION_NORMAL) != 0) { 1487 error_setg(errp, "iSCSI: Failed to set session type to normal."); 1488 ret = -EINVAL; 1489 goto out; 1490 } 1491 1492 iscsi_set_header_digest(iscsi, ISCSI_HEADER_DIGEST_NONE_CRC32C); 1493 1494 /* check if we got HEADER_DIGEST via the options */ 1495 parse_header_digest(iscsi, iscsi_url->target, &local_err); 1496 if (local_err != NULL) { 1497 error_propagate(errp, local_err); 1498 ret = -EINVAL; 1499 goto out; 1500 } 1501 1502 /* timeout handling is broken in libiscsi before 1.15.0 */ 1503 timeout = parse_timeout(iscsi_url->target); 1504 #if defined(LIBISCSI_API_VERSION) && LIBISCSI_API_VERSION >= 20150621 1505 iscsi_set_timeout(iscsi, timeout); 1506 #else 1507 if (timeout) { 1508 error_report("iSCSI: ignoring timeout value for libiscsi <1.15.0"); 1509 } 1510 #endif 1511 1512 if (iscsi_full_connect_sync(iscsi, iscsi_url->portal, iscsi_url->lun) != 0) { 1513 error_setg(errp, "iSCSI: Failed to connect to LUN : %s", 1514 iscsi_get_error(iscsi)); 1515 ret = -EINVAL; 1516 goto out; 1517 } 1518 1519 iscsilun->iscsi = iscsi; 1520 iscsilun->aio_context = bdrv_get_aio_context(bs); 1521 iscsilun->lun = iscsi_url->lun; 1522 iscsilun->has_write_same = true; 1523 1524 task = iscsi_do_inquiry(iscsilun->iscsi, iscsilun->lun, 0, 0, 1525 (void **) &inq, errp); 1526 if (task == NULL) { 1527 ret = -EINVAL; 1528 goto out; 1529 } 1530 iscsilun->type = inq->periperal_device_type; 1531 scsi_free_scsi_task(task); 1532 task = NULL; 1533 1534 iscsi_modesense_sync(iscsilun); 1535 1536 /* Check the write protect flag of the LUN if we want to write */ 1537 if (iscsilun->type == TYPE_DISK && (flags & BDRV_O_RDWR) && 1538 iscsilun->write_protected) { 1539 error_setg(errp, "Cannot open a write protected LUN as read-write"); 1540 ret = -EACCES; 1541 goto out; 1542 } 1543 1544 iscsi_readcapacity_sync(iscsilun, &local_err); 1545 if (local_err != NULL) { 1546 error_propagate(errp, local_err); 1547 ret = -EINVAL; 1548 goto out; 1549 } 1550 bs->total_sectors = sector_lun2qemu(iscsilun->num_blocks, iscsilun); 1551 bs->request_alignment = iscsilun->block_size; 1552 1553 /* We don't have any emulation for devices other than disks and CD-ROMs, so 1554 * this must be sg ioctl compatible. We force it to be sg, otherwise qemu 1555 * will try to read from the device to guess the image format. 1556 */ 1557 if (iscsilun->type != TYPE_DISK && iscsilun->type != TYPE_ROM) { 1558 bs->sg = 1; 1559 } 1560 1561 task = iscsi_do_inquiry(iscsilun->iscsi, iscsilun->lun, 1, 1562 SCSI_INQUIRY_PAGECODE_SUPPORTED_VPD_PAGES, 1563 (void **) &inq_vpd, errp); 1564 if (task == NULL) { 1565 ret = -EINVAL; 1566 goto out; 1567 } 1568 for (i = 0; i < inq_vpd->num_pages; i++) { 1569 struct scsi_task *inq_task; 1570 struct scsi_inquiry_logical_block_provisioning *inq_lbp; 1571 struct scsi_inquiry_block_limits *inq_bl; 1572 switch (inq_vpd->pages[i]) { 1573 case SCSI_INQUIRY_PAGECODE_LOGICAL_BLOCK_PROVISIONING: 1574 inq_task = iscsi_do_inquiry(iscsilun->iscsi, iscsilun->lun, 1, 1575 SCSI_INQUIRY_PAGECODE_LOGICAL_BLOCK_PROVISIONING, 1576 (void **) &inq_lbp, errp); 1577 if (inq_task == NULL) { 1578 ret = -EINVAL; 1579 goto out; 1580 } 1581 memcpy(&iscsilun->lbp, inq_lbp, 1582 sizeof(struct scsi_inquiry_logical_block_provisioning)); 1583 scsi_free_scsi_task(inq_task); 1584 break; 1585 case SCSI_INQUIRY_PAGECODE_BLOCK_LIMITS: 1586 inq_task = iscsi_do_inquiry(iscsilun->iscsi, iscsilun->lun, 1, 1587 SCSI_INQUIRY_PAGECODE_BLOCK_LIMITS, 1588 (void **) &inq_bl, errp); 1589 if (inq_task == NULL) { 1590 ret = -EINVAL; 1591 goto out; 1592 } 1593 memcpy(&iscsilun->bl, inq_bl, 1594 sizeof(struct scsi_inquiry_block_limits)); 1595 scsi_free_scsi_task(inq_task); 1596 break; 1597 default: 1598 break; 1599 } 1600 } 1601 scsi_free_scsi_task(task); 1602 task = NULL; 1603 1604 iscsi_attach_aio_context(bs, iscsilun->aio_context); 1605 1606 /* Guess the internal cluster (page) size of the iscsi target by the means 1607 * of opt_unmap_gran. Transfer the unmap granularity only if it has a 1608 * reasonable size */ 1609 if (iscsilun->bl.opt_unmap_gran * iscsilun->block_size >= 4 * 1024 && 1610 iscsilun->bl.opt_unmap_gran * iscsilun->block_size <= 16 * 1024 * 1024) { 1611 iscsilun->cluster_sectors = (iscsilun->bl.opt_unmap_gran * 1612 iscsilun->block_size) >> BDRV_SECTOR_BITS; 1613 if (iscsilun->lbprz) { 1614 iscsilun->allocationmap = iscsi_allocationmap_init(iscsilun); 1615 if (iscsilun->allocationmap == NULL) { 1616 ret = -ENOMEM; 1617 } 1618 } 1619 } 1620 1621 out: 1622 qemu_opts_del(opts); 1623 g_free(initiator_name); 1624 if (iscsi_url != NULL) { 1625 iscsi_destroy_url(iscsi_url); 1626 } 1627 if (task != NULL) { 1628 scsi_free_scsi_task(task); 1629 } 1630 1631 if (ret) { 1632 if (iscsi != NULL) { 1633 if (iscsi_is_logged_in(iscsi)) { 1634 iscsi_logout_sync(iscsi); 1635 } 1636 iscsi_destroy_context(iscsi); 1637 } 1638 memset(iscsilun, 0, sizeof(IscsiLun)); 1639 } 1640 return ret; 1641 } 1642 1643 static void iscsi_close(BlockDriverState *bs) 1644 { 1645 IscsiLun *iscsilun = bs->opaque; 1646 struct iscsi_context *iscsi = iscsilun->iscsi; 1647 1648 iscsi_detach_aio_context(bs); 1649 if (iscsi_is_logged_in(iscsi)) { 1650 iscsi_logout_sync(iscsi); 1651 } 1652 iscsi_destroy_context(iscsi); 1653 g_free(iscsilun->zeroblock); 1654 g_free(iscsilun->allocationmap); 1655 memset(iscsilun, 0, sizeof(IscsiLun)); 1656 } 1657 1658 static int sector_limits_lun2qemu(int64_t sector, IscsiLun *iscsilun) 1659 { 1660 return MIN(sector_lun2qemu(sector, iscsilun), INT_MAX / 2 + 1); 1661 } 1662 1663 static void iscsi_refresh_limits(BlockDriverState *bs, Error **errp) 1664 { 1665 /* We don't actually refresh here, but just return data queried in 1666 * iscsi_open(): iscsi targets don't change their limits. */ 1667 1668 IscsiLun *iscsilun = bs->opaque; 1669 uint32_t max_xfer_len = iscsilun->use_16_for_rw ? 0xffffffff : 0xffff; 1670 1671 if (iscsilun->bl.max_xfer_len) { 1672 max_xfer_len = MIN(max_xfer_len, iscsilun->bl.max_xfer_len); 1673 } 1674 1675 bs->bl.max_transfer_length = sector_limits_lun2qemu(max_xfer_len, iscsilun); 1676 1677 if (iscsilun->lbp.lbpu) { 1678 if (iscsilun->bl.max_unmap < 0xffffffff) { 1679 bs->bl.max_discard = 1680 sector_limits_lun2qemu(iscsilun->bl.max_unmap, iscsilun); 1681 } 1682 bs->bl.discard_alignment = 1683 sector_limits_lun2qemu(iscsilun->bl.opt_unmap_gran, iscsilun); 1684 } 1685 1686 if (iscsilun->bl.max_ws_len < 0xffffffff) { 1687 bs->bl.max_write_zeroes = 1688 sector_limits_lun2qemu(iscsilun->bl.max_ws_len, iscsilun); 1689 } 1690 if (iscsilun->lbp.lbpws) { 1691 bs->bl.write_zeroes_alignment = 1692 sector_limits_lun2qemu(iscsilun->bl.opt_unmap_gran, iscsilun); 1693 } 1694 bs->bl.opt_transfer_length = 1695 sector_limits_lun2qemu(iscsilun->bl.opt_xfer_len, iscsilun); 1696 } 1697 1698 /* Note that this will not re-establish a connection with an iSCSI target - it 1699 * is effectively a NOP. */ 1700 static int iscsi_reopen_prepare(BDRVReopenState *state, 1701 BlockReopenQueue *queue, Error **errp) 1702 { 1703 IscsiLun *iscsilun = state->bs->opaque; 1704 1705 if (state->flags & BDRV_O_RDWR && iscsilun->write_protected) { 1706 error_setg(errp, "Cannot open a write protected LUN as read-write"); 1707 return -EACCES; 1708 } 1709 return 0; 1710 } 1711 1712 static int iscsi_truncate(BlockDriverState *bs, int64_t offset) 1713 { 1714 IscsiLun *iscsilun = bs->opaque; 1715 Error *local_err = NULL; 1716 1717 if (iscsilun->type != TYPE_DISK) { 1718 return -ENOTSUP; 1719 } 1720 1721 iscsi_readcapacity_sync(iscsilun, &local_err); 1722 if (local_err != NULL) { 1723 error_free(local_err); 1724 return -EIO; 1725 } 1726 1727 if (offset > iscsi_getlength(bs)) { 1728 return -EINVAL; 1729 } 1730 1731 if (iscsilun->allocationmap != NULL) { 1732 g_free(iscsilun->allocationmap); 1733 iscsilun->allocationmap = iscsi_allocationmap_init(iscsilun); 1734 } 1735 1736 return 0; 1737 } 1738 1739 static int iscsi_create(const char *filename, QemuOpts *opts, Error **errp) 1740 { 1741 int ret = 0; 1742 int64_t total_size = 0; 1743 BlockDriverState *bs; 1744 IscsiLun *iscsilun = NULL; 1745 QDict *bs_options; 1746 1747 bs = bdrv_new(); 1748 1749 /* Read out options */ 1750 total_size = DIV_ROUND_UP(qemu_opt_get_size_del(opts, BLOCK_OPT_SIZE, 0), 1751 BDRV_SECTOR_SIZE); 1752 bs->opaque = g_new0(struct IscsiLun, 1); 1753 iscsilun = bs->opaque; 1754 1755 bs_options = qdict_new(); 1756 qdict_put(bs_options, "filename", qstring_from_str(filename)); 1757 ret = iscsi_open(bs, bs_options, 0, NULL); 1758 QDECREF(bs_options); 1759 1760 if (ret != 0) { 1761 goto out; 1762 } 1763 iscsi_detach_aio_context(bs); 1764 if (iscsilun->type != TYPE_DISK) { 1765 ret = -ENODEV; 1766 goto out; 1767 } 1768 if (bs->total_sectors < total_size) { 1769 ret = -ENOSPC; 1770 goto out; 1771 } 1772 1773 ret = 0; 1774 out: 1775 if (iscsilun->iscsi != NULL) { 1776 iscsi_destroy_context(iscsilun->iscsi); 1777 } 1778 g_free(bs->opaque); 1779 bs->opaque = NULL; 1780 bdrv_unref(bs); 1781 return ret; 1782 } 1783 1784 static int iscsi_get_info(BlockDriverState *bs, BlockDriverInfo *bdi) 1785 { 1786 IscsiLun *iscsilun = bs->opaque; 1787 bdi->unallocated_blocks_are_zero = iscsilun->lbprz; 1788 bdi->can_write_zeroes_with_unmap = iscsilun->lbprz && iscsilun->lbp.lbpws; 1789 bdi->cluster_size = iscsilun->cluster_sectors * BDRV_SECTOR_SIZE; 1790 return 0; 1791 } 1792 1793 static QemuOptsList iscsi_create_opts = { 1794 .name = "iscsi-create-opts", 1795 .head = QTAILQ_HEAD_INITIALIZER(iscsi_create_opts.head), 1796 .desc = { 1797 { 1798 .name = BLOCK_OPT_SIZE, 1799 .type = QEMU_OPT_SIZE, 1800 .help = "Virtual disk size" 1801 }, 1802 { /* end of list */ } 1803 } 1804 }; 1805 1806 static BlockDriver bdrv_iscsi = { 1807 .format_name = "iscsi", 1808 .protocol_name = "iscsi", 1809 1810 .instance_size = sizeof(IscsiLun), 1811 .bdrv_needs_filename = true, 1812 .bdrv_file_open = iscsi_open, 1813 .bdrv_close = iscsi_close, 1814 .bdrv_create = iscsi_create, 1815 .create_opts = &iscsi_create_opts, 1816 .bdrv_reopen_prepare = iscsi_reopen_prepare, 1817 1818 .bdrv_getlength = iscsi_getlength, 1819 .bdrv_get_info = iscsi_get_info, 1820 .bdrv_truncate = iscsi_truncate, 1821 .bdrv_refresh_limits = iscsi_refresh_limits, 1822 1823 .bdrv_co_get_block_status = iscsi_co_get_block_status, 1824 .bdrv_co_discard = iscsi_co_discard, 1825 .bdrv_co_write_zeroes = iscsi_co_write_zeroes, 1826 .bdrv_co_readv = iscsi_co_readv, 1827 .bdrv_co_writev = iscsi_co_writev, 1828 .bdrv_co_flush_to_disk = iscsi_co_flush, 1829 1830 #ifdef __linux__ 1831 .bdrv_aio_ioctl = iscsi_aio_ioctl, 1832 #endif 1833 1834 .bdrv_detach_aio_context = iscsi_detach_aio_context, 1835 .bdrv_attach_aio_context = iscsi_attach_aio_context, 1836 }; 1837 1838 static QemuOptsList qemu_iscsi_opts = { 1839 .name = "iscsi", 1840 .head = QTAILQ_HEAD_INITIALIZER(qemu_iscsi_opts.head), 1841 .desc = { 1842 { 1843 .name = "user", 1844 .type = QEMU_OPT_STRING, 1845 .help = "username for CHAP authentication to target", 1846 },{ 1847 .name = "password", 1848 .type = QEMU_OPT_STRING, 1849 .help = "password for CHAP authentication to target", 1850 },{ 1851 .name = "header-digest", 1852 .type = QEMU_OPT_STRING, 1853 .help = "HeaderDigest setting. " 1854 "{CRC32C|CRC32C-NONE|NONE-CRC32C|NONE}", 1855 },{ 1856 .name = "initiator-name", 1857 .type = QEMU_OPT_STRING, 1858 .help = "Initiator iqn name to use when connecting", 1859 },{ 1860 .name = "timeout", 1861 .type = QEMU_OPT_NUMBER, 1862 .help = "Request timeout in seconds (default 0 = no timeout)", 1863 }, 1864 { /* end of list */ } 1865 }, 1866 }; 1867 1868 static void iscsi_block_init(void) 1869 { 1870 bdrv_register(&bdrv_iscsi); 1871 qemu_add_opts(&qemu_iscsi_opts); 1872 } 1873 1874 block_init(iscsi_block_init); 1875