1 /* 2 * QEMU UFS Logical Unit 3 * 4 * Copyright (c) 2023 Samsung Electronics Co., Ltd. All rights reserved. 5 * 6 * Written by Jeuk Kim <jeuk20.kim@samsung.com> 7 * 8 * This code is licensed under the GNU GPL v2 or later. 9 */ 10 11 #include "qemu/osdep.h" 12 #include "qemu/units.h" 13 #include "qapi/error.h" 14 #include "qemu/memalign.h" 15 #include "hw/scsi/scsi.h" 16 #include "scsi/constants.h" 17 #include "sysemu/block-backend.h" 18 #include "qemu/cutils.h" 19 #include "trace.h" 20 #include "ufs.h" 21 22 /* 23 * The code below handling SCSI commands is copied from hw/scsi/scsi-disk.c, 24 * with minor adjustments to make it work for UFS. 25 */ 26 27 #define SCSI_DMA_BUF_SIZE (128 * KiB) 28 #define SCSI_MAX_INQUIRY_LEN 256 29 #define SCSI_INQUIRY_DATA_SIZE 36 30 #define SCSI_MAX_MODE_LEN 256 31 32 typedef struct UfsSCSIReq { 33 SCSIRequest req; 34 /* Both sector and sector_count are in terms of BDRV_SECTOR_SIZE bytes. */ 35 uint64_t sector; 36 uint32_t sector_count; 37 uint32_t buflen; 38 bool started; 39 bool need_fua_emulation; 40 struct iovec iov; 41 QEMUIOVector qiov; 42 BlockAcctCookie acct; 43 } UfsSCSIReq; 44 45 static void ufs_scsi_free_request(SCSIRequest *req) 46 { 47 UfsSCSIReq *r = DO_UPCAST(UfsSCSIReq, req, req); 48 49 qemu_vfree(r->iov.iov_base); 50 } 51 52 static void scsi_check_condition(UfsSCSIReq *r, SCSISense sense) 53 { 54 trace_ufs_scsi_check_condition(r->req.tag, sense.key, sense.asc, 55 sense.ascq); 56 scsi_req_build_sense(&r->req, sense); 57 scsi_req_complete(&r->req, CHECK_CONDITION); 58 } 59 60 static int ufs_scsi_emulate_vpd_page(SCSIRequest *req, uint8_t *outbuf, 61 uint32_t outbuf_len) 62 { 63 UfsHc *u = UFS(req->bus->qbus.parent); 64 UfsLu *lu = DO_UPCAST(UfsLu, qdev, req->dev); 65 uint8_t page_code = req->cmd.buf[2]; 66 int start, buflen = 0; 67 68 if (outbuf_len < SCSI_INQUIRY_DATA_SIZE) { 69 return -1; 70 } 71 72 outbuf[buflen++] = lu->qdev.type & 0x1f; 73 outbuf[buflen++] = page_code; 74 outbuf[buflen++] = 0x00; 75 outbuf[buflen++] = 0x00; 76 start = buflen; 77 78 switch (page_code) { 79 case 0x00: /* Supported page codes, mandatory */ 80 { 81 trace_ufs_scsi_emulate_vpd_page_00(req->cmd.xfer); 82 outbuf[buflen++] = 0x00; /* list of supported pages (this page) */ 83 if (u->params.serial) { 84 outbuf[buflen++] = 0x80; /* unit serial number */ 85 } 86 outbuf[buflen++] = 0x87; /* mode page policy */ 87 break; 88 } 89 case 0x80: /* Device serial number, optional */ 90 { 91 int l; 92 93 if (!u->params.serial) { 94 trace_ufs_scsi_emulate_vpd_page_80_not_supported(); 95 return -1; 96 } 97 98 l = strlen(u->params.serial); 99 if (l > SCSI_INQUIRY_DATA_SIZE) { 100 l = SCSI_INQUIRY_DATA_SIZE; 101 } 102 103 trace_ufs_scsi_emulate_vpd_page_80(req->cmd.xfer); 104 memcpy(outbuf + buflen, u->params.serial, l); 105 buflen += l; 106 break; 107 } 108 case 0x87: /* Mode Page Policy, mandatory */ 109 { 110 trace_ufs_scsi_emulate_vpd_page_87(req->cmd.xfer); 111 outbuf[buflen++] = 0x3f; /* apply to all mode pages and subpages */ 112 outbuf[buflen++] = 0xff; 113 outbuf[buflen++] = 0; /* shared */ 114 outbuf[buflen++] = 0; 115 break; 116 } 117 default: 118 return -1; 119 } 120 /* done with EVPD */ 121 assert(buflen - start <= 255); 122 outbuf[start - 1] = buflen - start; 123 return buflen; 124 } 125 126 static int ufs_scsi_emulate_inquiry(SCSIRequest *req, uint8_t *outbuf, 127 uint32_t outbuf_len) 128 { 129 int buflen = 0; 130 131 if (outbuf_len < SCSI_INQUIRY_DATA_SIZE) { 132 return -1; 133 } 134 135 if (req->cmd.buf[1] & 0x1) { 136 /* Vital product data */ 137 return ufs_scsi_emulate_vpd_page(req, outbuf, outbuf_len); 138 } 139 140 /* Standard INQUIRY data */ 141 if (req->cmd.buf[2] != 0) { 142 return -1; 143 } 144 145 /* PAGE CODE == 0 */ 146 buflen = req->cmd.xfer; 147 if (buflen > SCSI_MAX_INQUIRY_LEN) { 148 buflen = SCSI_MAX_INQUIRY_LEN; 149 } 150 151 if (is_wlun(req->lun)) { 152 outbuf[0] = TYPE_WLUN; 153 } else { 154 outbuf[0] = 0; 155 } 156 outbuf[1] = 0; 157 158 strpadcpy((char *)&outbuf[16], 16, "QEMU UFS", ' '); 159 strpadcpy((char *)&outbuf[8], 8, "QEMU", ' '); 160 161 memset(&outbuf[32], 0, 4); 162 163 outbuf[2] = 0x06; /* SPC-4 */ 164 outbuf[3] = 0x2; 165 166 if (buflen > SCSI_INQUIRY_DATA_SIZE) { 167 outbuf[4] = buflen - 5; /* Additional Length = (Len - 1) - 4 */ 168 } else { 169 /* 170 * If the allocation length of CDB is too small, the additional 171 * length is not adjusted 172 */ 173 outbuf[4] = SCSI_INQUIRY_DATA_SIZE - 5; 174 } 175 176 /* Support TCQ. */ 177 outbuf[7] = req->bus->info->tcq ? 0x02 : 0; 178 return buflen; 179 } 180 181 static int mode_sense_page(UfsLu *lu, int page, uint8_t **p_outbuf, 182 int page_control) 183 { 184 static const int mode_sense_valid[0x3f] = { 185 [MODE_PAGE_CACHING] = 1, 186 [MODE_PAGE_R_W_ERROR] = 1, 187 [MODE_PAGE_CONTROL] = 1, 188 }; 189 190 uint8_t *p = *p_outbuf + 2; 191 int length; 192 193 assert(page < ARRAY_SIZE(mode_sense_valid)); 194 if ((mode_sense_valid[page]) == 0) { 195 return -1; 196 } 197 198 /* 199 * If Changeable Values are requested, a mask denoting those mode parameters 200 * that are changeable shall be returned. As we currently don't support 201 * parameter changes via MODE_SELECT all bits are returned set to zero. 202 * The buffer was already memset to zero by the caller of this function. 203 */ 204 switch (page) { 205 case MODE_PAGE_CACHING: 206 length = 0x12; 207 if (page_control == 1 || /* Changeable Values */ 208 blk_enable_write_cache(lu->qdev.conf.blk)) { 209 p[0] = 4; /* WCE */ 210 } 211 break; 212 213 case MODE_PAGE_R_W_ERROR: 214 length = 10; 215 if (page_control == 1) { /* Changeable Values */ 216 break; 217 } 218 p[0] = 0x80; /* Automatic Write Reallocation Enabled */ 219 break; 220 221 case MODE_PAGE_CONTROL: 222 length = 10; 223 if (page_control == 1) { /* Changeable Values */ 224 break; 225 } 226 p[1] = 0x10; /* Queue Algorithm modifier */ 227 p[8] = 0xff; /* Busy Timeout Period */ 228 p[9] = 0xff; 229 break; 230 231 default: 232 return -1; 233 } 234 235 assert(length < 256); 236 (*p_outbuf)[0] = page; 237 (*p_outbuf)[1] = length; 238 *p_outbuf += length + 2; 239 return length + 2; 240 } 241 242 static int ufs_scsi_emulate_mode_sense(UfsSCSIReq *r, uint8_t *outbuf) 243 { 244 UfsLu *lu = DO_UPCAST(UfsLu, qdev, r->req.dev); 245 bool dbd; 246 int page, buflen, ret, page_control; 247 uint8_t *p; 248 uint8_t dev_specific_param = 0; 249 250 dbd = (r->req.cmd.buf[1] & 0x8) != 0; 251 if (!dbd) { 252 return -1; 253 } 254 255 page = r->req.cmd.buf[2] & 0x3f; 256 page_control = (r->req.cmd.buf[2] & 0xc0) >> 6; 257 258 trace_ufs_scsi_emulate_mode_sense((r->req.cmd.buf[0] == MODE_SENSE) ? 6 : 259 10, 260 page, r->req.cmd.xfer, page_control); 261 memset(outbuf, 0, r->req.cmd.xfer); 262 p = outbuf; 263 264 if (!blk_is_writable(lu->qdev.conf.blk)) { 265 dev_specific_param |= 0x80; /* Readonly. */ 266 } 267 268 p[2] = 0; /* Medium type. */ 269 p[3] = dev_specific_param; 270 p[6] = p[7] = 0; /* Block descriptor length. */ 271 p += 8; 272 273 if (page_control == 3) { 274 /* Saved Values */ 275 scsi_check_condition(r, SENSE_CODE(SAVING_PARAMS_NOT_SUPPORTED)); 276 return -1; 277 } 278 279 if (page == 0x3f) { 280 for (page = 0; page <= 0x3e; page++) { 281 mode_sense_page(lu, page, &p, page_control); 282 } 283 } else { 284 ret = mode_sense_page(lu, page, &p, page_control); 285 if (ret == -1) { 286 return -1; 287 } 288 } 289 290 buflen = p - outbuf; 291 /* 292 * The mode data length field specifies the length in bytes of the 293 * following data that is available to be transferred. The mode data 294 * length does not include itself. 295 */ 296 outbuf[0] = ((buflen - 2) >> 8) & 0xff; 297 outbuf[1] = (buflen - 2) & 0xff; 298 return buflen; 299 } 300 301 /* 302 * scsi_handle_rw_error has two return values. False means that the error 303 * must be ignored, true means that the error has been processed and the 304 * caller should not do anything else for this request. Note that 305 * scsi_handle_rw_error always manages its reference counts, independent 306 * of the return value. 307 */ 308 static bool scsi_handle_rw_error(UfsSCSIReq *r, int ret, bool acct_failed) 309 { 310 bool is_read = (r->req.cmd.mode == SCSI_XFER_FROM_DEV); 311 UfsLu *lu = DO_UPCAST(UfsLu, qdev, r->req.dev); 312 SCSISense sense = SENSE_CODE(NO_SENSE); 313 int error = 0; 314 bool req_has_sense = false; 315 BlockErrorAction action; 316 int status; 317 318 if (ret < 0) { 319 status = scsi_sense_from_errno(-ret, &sense); 320 error = -ret; 321 } else { 322 /* A passthrough command has completed with nonzero status. */ 323 status = ret; 324 if (status == CHECK_CONDITION) { 325 req_has_sense = true; 326 error = scsi_sense_buf_to_errno(r->req.sense, sizeof(r->req.sense)); 327 } else { 328 error = EINVAL; 329 } 330 } 331 332 /* 333 * Check whether the error has to be handled by the guest or should 334 * rather follow the rerror=/werror= settings. Guest-handled errors 335 * are usually retried immediately, so do not post them to QMP and 336 * do not account them as failed I/O. 337 */ 338 if (req_has_sense && scsi_sense_buf_is_guest_recoverable( 339 r->req.sense, sizeof(r->req.sense))) { 340 action = BLOCK_ERROR_ACTION_REPORT; 341 acct_failed = false; 342 } else { 343 action = blk_get_error_action(lu->qdev.conf.blk, is_read, error); 344 blk_error_action(lu->qdev.conf.blk, action, is_read, error); 345 } 346 347 switch (action) { 348 case BLOCK_ERROR_ACTION_REPORT: 349 if (acct_failed) { 350 block_acct_failed(blk_get_stats(lu->qdev.conf.blk), &r->acct); 351 } 352 if (!req_has_sense && status == CHECK_CONDITION) { 353 scsi_req_build_sense(&r->req, sense); 354 } 355 scsi_req_complete(&r->req, status); 356 return true; 357 358 case BLOCK_ERROR_ACTION_IGNORE: 359 return false; 360 361 case BLOCK_ERROR_ACTION_STOP: 362 scsi_req_retry(&r->req); 363 return true; 364 365 default: 366 g_assert_not_reached(); 367 } 368 } 369 370 static bool ufs_scsi_req_check_error(UfsSCSIReq *r, int ret, bool acct_failed) 371 { 372 if (r->req.io_canceled) { 373 scsi_req_cancel_complete(&r->req); 374 return true; 375 } 376 377 if (ret < 0) { 378 return scsi_handle_rw_error(r, ret, acct_failed); 379 } 380 381 return false; 382 } 383 384 static void scsi_aio_complete(void *opaque, int ret) 385 { 386 UfsSCSIReq *r = (UfsSCSIReq *)opaque; 387 UfsLu *lu = DO_UPCAST(UfsLu, qdev, r->req.dev); 388 389 assert(r->req.aiocb != NULL); 390 r->req.aiocb = NULL; 391 aio_context_acquire(blk_get_aio_context(lu->qdev.conf.blk)); 392 if (ufs_scsi_req_check_error(r, ret, true)) { 393 goto done; 394 } 395 396 block_acct_done(blk_get_stats(lu->qdev.conf.blk), &r->acct); 397 scsi_req_complete(&r->req, GOOD); 398 399 done: 400 aio_context_release(blk_get_aio_context(lu->qdev.conf.blk)); 401 scsi_req_unref(&r->req); 402 } 403 404 static int32_t ufs_scsi_emulate_command(SCSIRequest *req, uint8_t *buf) 405 { 406 UfsSCSIReq *r = DO_UPCAST(UfsSCSIReq, req, req); 407 UfsLu *lu = DO_UPCAST(UfsLu, qdev, req->dev); 408 uint32_t last_block = 0; 409 uint8_t *outbuf; 410 int buflen; 411 412 switch (req->cmd.buf[0]) { 413 case INQUIRY: 414 case MODE_SENSE_10: 415 case START_STOP: 416 case REQUEST_SENSE: 417 break; 418 419 default: 420 if (!blk_is_available(lu->qdev.conf.blk)) { 421 scsi_check_condition(r, SENSE_CODE(NO_MEDIUM)); 422 return 0; 423 } 424 break; 425 } 426 427 /* 428 * FIXME: we shouldn't return anything bigger than 4k, but the code 429 * requires the buffer to be as big as req->cmd.xfer in several 430 * places. So, do not allow CDBs with a very large ALLOCATION 431 * LENGTH. The real fix would be to modify scsi_read_data and 432 * dma_buf_read, so that they return data beyond the buflen 433 * as all zeros. 434 */ 435 if (req->cmd.xfer > 65536) { 436 goto illegal_request; 437 } 438 r->buflen = MAX(4096, req->cmd.xfer); 439 440 if (!r->iov.iov_base) { 441 r->iov.iov_base = blk_blockalign(lu->qdev.conf.blk, r->buflen); 442 } 443 444 outbuf = r->iov.iov_base; 445 memset(outbuf, 0, r->buflen); 446 switch (req->cmd.buf[0]) { 447 case TEST_UNIT_READY: 448 assert(blk_is_available(lu->qdev.conf.blk)); 449 break; 450 case INQUIRY: 451 buflen = ufs_scsi_emulate_inquiry(req, outbuf, r->buflen); 452 if (buflen < 0) { 453 goto illegal_request; 454 } 455 break; 456 case MODE_SENSE_10: 457 buflen = ufs_scsi_emulate_mode_sense(r, outbuf); 458 if (buflen < 0) { 459 goto illegal_request; 460 } 461 break; 462 case READ_CAPACITY_10: 463 /* The normal LEN field for this command is zero. */ 464 memset(outbuf, 0, 8); 465 if (lu->qdev.max_lba > 0) { 466 last_block = lu->qdev.max_lba - 1; 467 }; 468 outbuf[0] = (last_block >> 24) & 0xff; 469 outbuf[1] = (last_block >> 16) & 0xff; 470 outbuf[2] = (last_block >> 8) & 0xff; 471 outbuf[3] = last_block & 0xff; 472 outbuf[4] = (lu->qdev.blocksize >> 24) & 0xff; 473 outbuf[5] = (lu->qdev.blocksize >> 16) & 0xff; 474 outbuf[6] = (lu->qdev.blocksize >> 8) & 0xff; 475 outbuf[7] = lu->qdev.blocksize & 0xff; 476 break; 477 case REQUEST_SENSE: 478 /* Just return "NO SENSE". */ 479 buflen = scsi_convert_sense(NULL, 0, outbuf, r->buflen, 480 (req->cmd.buf[1] & 1) == 0); 481 if (buflen < 0) { 482 goto illegal_request; 483 } 484 break; 485 case SYNCHRONIZE_CACHE: 486 /* The request is used as the AIO opaque value, so add a ref. */ 487 scsi_req_ref(&r->req); 488 block_acct_start(blk_get_stats(lu->qdev.conf.blk), &r->acct, 0, 489 BLOCK_ACCT_FLUSH); 490 r->req.aiocb = blk_aio_flush(lu->qdev.conf.blk, scsi_aio_complete, r); 491 return 0; 492 case VERIFY_10: 493 trace_ufs_scsi_emulate_command_VERIFY((req->cmd.buf[1] >> 1) & 3); 494 if (req->cmd.buf[1] & 6) { 495 goto illegal_request; 496 } 497 break; 498 case SERVICE_ACTION_IN_16: 499 /* Service Action In subcommands. */ 500 if ((req->cmd.buf[1] & 31) == SAI_READ_CAPACITY_16) { 501 trace_ufs_scsi_emulate_command_SAI_16(); 502 memset(outbuf, 0, req->cmd.xfer); 503 504 if (lu->qdev.max_lba > 0) { 505 last_block = lu->qdev.max_lba - 1; 506 }; 507 outbuf[0] = 0; 508 outbuf[1] = 0; 509 outbuf[2] = 0; 510 outbuf[3] = 0; 511 outbuf[4] = (last_block >> 24) & 0xff; 512 outbuf[5] = (last_block >> 16) & 0xff; 513 outbuf[6] = (last_block >> 8) & 0xff; 514 outbuf[7] = last_block & 0xff; 515 outbuf[8] = (lu->qdev.blocksize >> 24) & 0xff; 516 outbuf[9] = (lu->qdev.blocksize >> 16) & 0xff; 517 outbuf[10] = (lu->qdev.blocksize >> 8) & 0xff; 518 outbuf[11] = lu->qdev.blocksize & 0xff; 519 outbuf[12] = 0; 520 outbuf[13] = get_physical_block_exp(&lu->qdev.conf); 521 522 if (lu->unit_desc.provisioning_type == 2 || 523 lu->unit_desc.provisioning_type == 3) { 524 outbuf[14] = 0x80; 525 } 526 /* Protection, exponent and lowest lba field left blank. */ 527 break; 528 } 529 trace_ufs_scsi_emulate_command_SAI_unsupported(); 530 goto illegal_request; 531 case MODE_SELECT_10: 532 trace_ufs_scsi_emulate_command_MODE_SELECT_10(r->req.cmd.xfer); 533 break; 534 case START_STOP: 535 /* 536 * TODO: START_STOP is not yet implemented. It always returns success. 537 * Revisit it when ufs power management is implemented. 538 */ 539 trace_ufs_scsi_emulate_command_START_STOP(); 540 break; 541 case FORMAT_UNIT: 542 trace_ufs_scsi_emulate_command_FORMAT_UNIT(); 543 break; 544 case SEND_DIAGNOSTIC: 545 trace_ufs_scsi_emulate_command_SEND_DIAGNOSTIC(); 546 break; 547 default: 548 trace_ufs_scsi_emulate_command_UNKNOWN(buf[0], 549 scsi_command_name(buf[0])); 550 scsi_check_condition(r, SENSE_CODE(INVALID_OPCODE)); 551 return 0; 552 } 553 assert(!r->req.aiocb); 554 r->iov.iov_len = MIN(r->buflen, req->cmd.xfer); 555 if (r->iov.iov_len == 0) { 556 scsi_req_complete(&r->req, GOOD); 557 } 558 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) { 559 assert(r->iov.iov_len == req->cmd.xfer); 560 return -r->iov.iov_len; 561 } else { 562 return r->iov.iov_len; 563 } 564 565 illegal_request: 566 if (r->req.status == -1) { 567 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); 568 } 569 return 0; 570 } 571 572 static void ufs_scsi_emulate_read_data(SCSIRequest *req) 573 { 574 UfsSCSIReq *r = DO_UPCAST(UfsSCSIReq, req, req); 575 int buflen = r->iov.iov_len; 576 577 if (buflen) { 578 trace_ufs_scsi_emulate_read_data(buflen); 579 r->iov.iov_len = 0; 580 r->started = true; 581 scsi_req_data(&r->req, buflen); 582 return; 583 } 584 585 /* This also clears the sense buffer for REQUEST SENSE. */ 586 scsi_req_complete(&r->req, GOOD); 587 } 588 589 static int ufs_scsi_check_mode_select(UfsLu *lu, int page, uint8_t *inbuf, 590 int inlen) 591 { 592 uint8_t mode_current[SCSI_MAX_MODE_LEN]; 593 uint8_t mode_changeable[SCSI_MAX_MODE_LEN]; 594 uint8_t *p; 595 int len, expected_len, changeable_len, i; 596 597 /* 598 * The input buffer does not include the page header, so it is 599 * off by 2 bytes. 600 */ 601 expected_len = inlen + 2; 602 if (expected_len > SCSI_MAX_MODE_LEN) { 603 return -1; 604 } 605 606 /* MODE_PAGE_ALLS is only valid for MODE SENSE commands */ 607 if (page == MODE_PAGE_ALLS) { 608 return -1; 609 } 610 611 p = mode_current; 612 memset(mode_current, 0, inlen + 2); 613 len = mode_sense_page(lu, page, &p, 0); 614 if (len < 0 || len != expected_len) { 615 return -1; 616 } 617 618 p = mode_changeable; 619 memset(mode_changeable, 0, inlen + 2); 620 changeable_len = mode_sense_page(lu, page, &p, 1); 621 assert(changeable_len == len); 622 623 /* 624 * Check that unchangeable bits are the same as what MODE SENSE 625 * would return. 626 */ 627 for (i = 2; i < len; i++) { 628 if (((mode_current[i] ^ inbuf[i - 2]) & ~mode_changeable[i]) != 0) { 629 return -1; 630 } 631 } 632 return 0; 633 } 634 635 static void ufs_scsi_apply_mode_select(UfsLu *lu, int page, uint8_t *p) 636 { 637 switch (page) { 638 case MODE_PAGE_CACHING: 639 blk_set_enable_write_cache(lu->qdev.conf.blk, (p[0] & 4) != 0); 640 break; 641 642 default: 643 break; 644 } 645 } 646 647 static int mode_select_pages(UfsSCSIReq *r, uint8_t *p, int len, bool change) 648 { 649 UfsLu *lu = DO_UPCAST(UfsLu, qdev, r->req.dev); 650 651 while (len > 0) { 652 int page, page_len; 653 654 page = p[0] & 0x3f; 655 if (p[0] & 0x40) { 656 goto invalid_param; 657 } else { 658 if (len < 2) { 659 goto invalid_param_len; 660 } 661 page_len = p[1]; 662 p += 2; 663 len -= 2; 664 } 665 666 if (page_len > len) { 667 goto invalid_param_len; 668 } 669 670 if (!change) { 671 if (ufs_scsi_check_mode_select(lu, page, p, page_len) < 0) { 672 goto invalid_param; 673 } 674 } else { 675 ufs_scsi_apply_mode_select(lu, page, p); 676 } 677 678 p += page_len; 679 len -= page_len; 680 } 681 return 0; 682 683 invalid_param: 684 scsi_check_condition(r, SENSE_CODE(INVALID_PARAM)); 685 return -1; 686 687 invalid_param_len: 688 scsi_check_condition(r, SENSE_CODE(INVALID_PARAM_LEN)); 689 return -1; 690 } 691 692 static void ufs_scsi_emulate_mode_select(UfsSCSIReq *r, uint8_t *inbuf) 693 { 694 UfsLu *lu = DO_UPCAST(UfsLu, qdev, r->req.dev); 695 uint8_t *p = inbuf; 696 int len = r->req.cmd.xfer; 697 int hdr_len = 8; 698 int bd_len; 699 int pass; 700 701 /* We only support PF=1, SP=0. */ 702 if ((r->req.cmd.buf[1] & 0x11) != 0x10) { 703 goto invalid_field; 704 } 705 706 if (len < hdr_len) { 707 goto invalid_param_len; 708 } 709 710 bd_len = lduw_be_p(&p[6]); 711 if (bd_len != 0) { 712 goto invalid_param; 713 } 714 715 len -= hdr_len; 716 p += hdr_len; 717 718 /* Ensure no change is made if there is an error! */ 719 for (pass = 0; pass < 2; pass++) { 720 if (mode_select_pages(r, p, len, pass == 1) < 0) { 721 assert(pass == 0); 722 return; 723 } 724 } 725 726 if (!blk_enable_write_cache(lu->qdev.conf.blk)) { 727 /* The request is used as the AIO opaque value, so add a ref. */ 728 scsi_req_ref(&r->req); 729 block_acct_start(blk_get_stats(lu->qdev.conf.blk), &r->acct, 0, 730 BLOCK_ACCT_FLUSH); 731 r->req.aiocb = blk_aio_flush(lu->qdev.conf.blk, scsi_aio_complete, r); 732 return; 733 } 734 735 scsi_req_complete(&r->req, GOOD); 736 return; 737 738 invalid_param: 739 scsi_check_condition(r, SENSE_CODE(INVALID_PARAM)); 740 return; 741 742 invalid_param_len: 743 scsi_check_condition(r, SENSE_CODE(INVALID_PARAM_LEN)); 744 return; 745 746 invalid_field: 747 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); 748 } 749 750 /* block_num and nb_blocks expected to be in qdev blocksize */ 751 static inline bool check_lba_range(UfsLu *lu, uint64_t block_num, 752 uint32_t nb_blocks) 753 { 754 /* 755 * The first line tests that no overflow happens when computing the last 756 * block. The second line tests that the last accessed block is in 757 * range. 758 * 759 * Careful, the computations should not underflow for nb_blocks == 0, 760 * and a 0-block read to the first LBA beyond the end of device is 761 * valid. 762 */ 763 return (block_num <= block_num + nb_blocks && 764 block_num + nb_blocks <= lu->qdev.max_lba + 1); 765 } 766 767 static void ufs_scsi_emulate_write_data(SCSIRequest *req) 768 { 769 UfsSCSIReq *r = DO_UPCAST(UfsSCSIReq, req, req); 770 771 if (r->iov.iov_len) { 772 int buflen = r->iov.iov_len; 773 trace_ufs_scsi_emulate_write_data(buflen); 774 r->iov.iov_len = 0; 775 scsi_req_data(&r->req, buflen); 776 return; 777 } 778 779 switch (req->cmd.buf[0]) { 780 case MODE_SELECT_10: 781 /* This also clears the sense buffer for REQUEST SENSE. */ 782 ufs_scsi_emulate_mode_select(r, r->iov.iov_base); 783 break; 784 default: 785 abort(); 786 } 787 } 788 789 /* Return a pointer to the data buffer. */ 790 static uint8_t *ufs_scsi_get_buf(SCSIRequest *req) 791 { 792 UfsSCSIReq *r = DO_UPCAST(UfsSCSIReq, req, req); 793 794 return (uint8_t *)r->iov.iov_base; 795 } 796 797 static int32_t ufs_scsi_dma_command(SCSIRequest *req, uint8_t *buf) 798 { 799 UfsSCSIReq *r = DO_UPCAST(UfsSCSIReq, req, req); 800 UfsLu *lu = DO_UPCAST(UfsLu, qdev, req->dev); 801 uint32_t len; 802 uint8_t command; 803 804 command = buf[0]; 805 806 if (!blk_is_available(lu->qdev.conf.blk)) { 807 scsi_check_condition(r, SENSE_CODE(NO_MEDIUM)); 808 return 0; 809 } 810 811 len = scsi_data_cdb_xfer(r->req.cmd.buf); 812 switch (command) { 813 case READ_6: 814 case READ_10: 815 trace_ufs_scsi_dma_command_READ(r->req.cmd.lba, len); 816 if (r->req.cmd.buf[1] & 0xe0) { 817 goto illegal_request; 818 } 819 if (!check_lba_range(lu, r->req.cmd.lba, len)) { 820 goto illegal_lba; 821 } 822 r->sector = r->req.cmd.lba * (lu->qdev.blocksize / BDRV_SECTOR_SIZE); 823 r->sector_count = len * (lu->qdev.blocksize / BDRV_SECTOR_SIZE); 824 break; 825 case WRITE_6: 826 case WRITE_10: 827 trace_ufs_scsi_dma_command_WRITE(r->req.cmd.lba, len); 828 if (!blk_is_writable(lu->qdev.conf.blk)) { 829 scsi_check_condition(r, SENSE_CODE(WRITE_PROTECTED)); 830 return 0; 831 } 832 if (r->req.cmd.buf[1] & 0xe0) { 833 goto illegal_request; 834 } 835 if (!check_lba_range(lu, r->req.cmd.lba, len)) { 836 goto illegal_lba; 837 } 838 r->sector = r->req.cmd.lba * (lu->qdev.blocksize / BDRV_SECTOR_SIZE); 839 r->sector_count = len * (lu->qdev.blocksize / BDRV_SECTOR_SIZE); 840 break; 841 default: 842 abort(); 843 illegal_request: 844 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); 845 return 0; 846 illegal_lba: 847 scsi_check_condition(r, SENSE_CODE(LBA_OUT_OF_RANGE)); 848 return 0; 849 } 850 r->need_fua_emulation = ((r->req.cmd.buf[1] & 8) != 0); 851 if (r->sector_count == 0) { 852 scsi_req_complete(&r->req, GOOD); 853 } 854 assert(r->iov.iov_len == 0); 855 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) { 856 return -r->sector_count * BDRV_SECTOR_SIZE; 857 } else { 858 return r->sector_count * BDRV_SECTOR_SIZE; 859 } 860 } 861 862 static void scsi_write_do_fua(UfsSCSIReq *r) 863 { 864 UfsLu *lu = DO_UPCAST(UfsLu, qdev, r->req.dev); 865 866 assert(r->req.aiocb == NULL); 867 assert(!r->req.io_canceled); 868 869 if (r->need_fua_emulation) { 870 block_acct_start(blk_get_stats(lu->qdev.conf.blk), &r->acct, 0, 871 BLOCK_ACCT_FLUSH); 872 r->req.aiocb = blk_aio_flush(lu->qdev.conf.blk, scsi_aio_complete, r); 873 return; 874 } 875 876 scsi_req_complete(&r->req, GOOD); 877 scsi_req_unref(&r->req); 878 } 879 880 static void scsi_dma_complete_noio(UfsSCSIReq *r, int ret) 881 { 882 assert(r->req.aiocb == NULL); 883 if (ufs_scsi_req_check_error(r, ret, false)) { 884 goto done; 885 } 886 887 r->sector += r->sector_count; 888 r->sector_count = 0; 889 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) { 890 scsi_write_do_fua(r); 891 return; 892 } else { 893 scsi_req_complete(&r->req, GOOD); 894 } 895 896 done: 897 scsi_req_unref(&r->req); 898 } 899 900 static void scsi_dma_complete(void *opaque, int ret) 901 { 902 UfsSCSIReq *r = (UfsSCSIReq *)opaque; 903 UfsLu *lu = DO_UPCAST(UfsLu, qdev, r->req.dev); 904 905 assert(r->req.aiocb != NULL); 906 r->req.aiocb = NULL; 907 908 aio_context_acquire(blk_get_aio_context(lu->qdev.conf.blk)); 909 if (ret < 0) { 910 block_acct_failed(blk_get_stats(lu->qdev.conf.blk), &r->acct); 911 } else { 912 block_acct_done(blk_get_stats(lu->qdev.conf.blk), &r->acct); 913 } 914 scsi_dma_complete_noio(r, ret); 915 aio_context_release(blk_get_aio_context(lu->qdev.conf.blk)); 916 } 917 918 static BlockAIOCB *scsi_dma_readv(int64_t offset, QEMUIOVector *iov, 919 BlockCompletionFunc *cb, void *cb_opaque, 920 void *opaque) 921 { 922 UfsSCSIReq *r = opaque; 923 UfsLu *lu = DO_UPCAST(UfsLu, qdev, r->req.dev); 924 return blk_aio_preadv(lu->qdev.conf.blk, offset, iov, 0, cb, cb_opaque); 925 } 926 927 static void scsi_init_iovec(UfsSCSIReq *r, size_t size) 928 { 929 UfsLu *lu = DO_UPCAST(UfsLu, qdev, r->req.dev); 930 931 if (!r->iov.iov_base) { 932 r->buflen = size; 933 r->iov.iov_base = blk_blockalign(lu->qdev.conf.blk, r->buflen); 934 } 935 r->iov.iov_len = MIN(r->sector_count * BDRV_SECTOR_SIZE, r->buflen); 936 qemu_iovec_init_external(&r->qiov, &r->iov, 1); 937 } 938 939 static void scsi_read_complete_noio(UfsSCSIReq *r, int ret) 940 { 941 uint32_t n; 942 943 assert(r->req.aiocb == NULL); 944 if (ufs_scsi_req_check_error(r, ret, false)) { 945 goto done; 946 } 947 948 n = r->qiov.size / BDRV_SECTOR_SIZE; 949 r->sector += n; 950 r->sector_count -= n; 951 scsi_req_data(&r->req, r->qiov.size); 952 953 done: 954 scsi_req_unref(&r->req); 955 } 956 957 static void scsi_read_complete(void *opaque, int ret) 958 { 959 UfsSCSIReq *r = (UfsSCSIReq *)opaque; 960 UfsLu *lu = DO_UPCAST(UfsLu, qdev, r->req.dev); 961 962 assert(r->req.aiocb != NULL); 963 r->req.aiocb = NULL; 964 trace_ufs_scsi_read_data_count(r->sector_count); 965 aio_context_acquire(blk_get_aio_context(lu->qdev.conf.blk)); 966 if (ret < 0) { 967 block_acct_failed(blk_get_stats(lu->qdev.conf.blk), &r->acct); 968 } else { 969 block_acct_done(blk_get_stats(lu->qdev.conf.blk), &r->acct); 970 trace_ufs_scsi_read_complete(r->req.tag, r->qiov.size); 971 } 972 scsi_read_complete_noio(r, ret); 973 aio_context_release(blk_get_aio_context(lu->qdev.conf.blk)); 974 } 975 976 /* Actually issue a read to the block device. */ 977 static void scsi_do_read(UfsSCSIReq *r, int ret) 978 { 979 UfsLu *lu = DO_UPCAST(UfsLu, qdev, r->req.dev); 980 981 assert(r->req.aiocb == NULL); 982 if (ufs_scsi_req_check_error(r, ret, false)) { 983 goto done; 984 } 985 986 /* The request is used as the AIO opaque value, so add a ref. */ 987 scsi_req_ref(&r->req); 988 989 if (r->req.sg) { 990 dma_acct_start(lu->qdev.conf.blk, &r->acct, r->req.sg, BLOCK_ACCT_READ); 991 r->req.residual -= r->req.sg->size; 992 r->req.aiocb = dma_blk_io( 993 blk_get_aio_context(lu->qdev.conf.blk), r->req.sg, 994 r->sector << BDRV_SECTOR_BITS, BDRV_SECTOR_SIZE, scsi_dma_readv, r, 995 scsi_dma_complete, r, DMA_DIRECTION_FROM_DEVICE); 996 } else { 997 scsi_init_iovec(r, SCSI_DMA_BUF_SIZE); 998 block_acct_start(blk_get_stats(lu->qdev.conf.blk), &r->acct, 999 r->qiov.size, BLOCK_ACCT_READ); 1000 r->req.aiocb = scsi_dma_readv(r->sector << BDRV_SECTOR_BITS, &r->qiov, 1001 scsi_read_complete, r, r); 1002 } 1003 1004 done: 1005 scsi_req_unref(&r->req); 1006 } 1007 1008 static void scsi_do_read_cb(void *opaque, int ret) 1009 { 1010 UfsSCSIReq *r = (UfsSCSIReq *)opaque; 1011 UfsLu *lu = DO_UPCAST(UfsLu, qdev, r->req.dev); 1012 1013 assert(r->req.aiocb != NULL); 1014 r->req.aiocb = NULL; 1015 1016 aio_context_acquire(blk_get_aio_context(lu->qdev.conf.blk)); 1017 if (ret < 0) { 1018 block_acct_failed(blk_get_stats(lu->qdev.conf.blk), &r->acct); 1019 } else { 1020 block_acct_done(blk_get_stats(lu->qdev.conf.blk), &r->acct); 1021 } 1022 scsi_do_read(opaque, ret); 1023 aio_context_release(blk_get_aio_context(lu->qdev.conf.blk)); 1024 } 1025 1026 /* Read more data from scsi device into buffer. */ 1027 static void scsi_read_data(SCSIRequest *req) 1028 { 1029 UfsSCSIReq *r = DO_UPCAST(UfsSCSIReq, req, req); 1030 UfsLu *lu = DO_UPCAST(UfsLu, qdev, r->req.dev); 1031 bool first; 1032 1033 trace_ufs_scsi_read_data_count(r->sector_count); 1034 if (r->sector_count == 0) { 1035 /* This also clears the sense buffer for REQUEST SENSE. */ 1036 scsi_req_complete(&r->req, GOOD); 1037 return; 1038 } 1039 1040 /* No data transfer may already be in progress */ 1041 assert(r->req.aiocb == NULL); 1042 1043 /* The request is used as the AIO opaque value, so add a ref. */ 1044 scsi_req_ref(&r->req); 1045 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) { 1046 trace_ufs_scsi_read_data_invalid(); 1047 scsi_read_complete_noio(r, -EINVAL); 1048 return; 1049 } 1050 1051 if (!blk_is_available(req->dev->conf.blk)) { 1052 scsi_read_complete_noio(r, -ENOMEDIUM); 1053 return; 1054 } 1055 1056 first = !r->started; 1057 r->started = true; 1058 if (first && r->need_fua_emulation) { 1059 block_acct_start(blk_get_stats(lu->qdev.conf.blk), &r->acct, 0, 1060 BLOCK_ACCT_FLUSH); 1061 r->req.aiocb = blk_aio_flush(lu->qdev.conf.blk, scsi_do_read_cb, r); 1062 } else { 1063 scsi_do_read(r, 0); 1064 } 1065 } 1066 1067 static void scsi_write_complete_noio(UfsSCSIReq *r, int ret) 1068 { 1069 uint32_t n; 1070 1071 assert(r->req.aiocb == NULL); 1072 if (ufs_scsi_req_check_error(r, ret, false)) { 1073 goto done; 1074 } 1075 1076 n = r->qiov.size / BDRV_SECTOR_SIZE; 1077 r->sector += n; 1078 r->sector_count -= n; 1079 if (r->sector_count == 0) { 1080 scsi_write_do_fua(r); 1081 return; 1082 } else { 1083 scsi_init_iovec(r, SCSI_DMA_BUF_SIZE); 1084 trace_ufs_scsi_write_complete_noio(r->req.tag, r->qiov.size); 1085 scsi_req_data(&r->req, r->qiov.size); 1086 } 1087 1088 done: 1089 scsi_req_unref(&r->req); 1090 } 1091 1092 static void scsi_write_complete(void *opaque, int ret) 1093 { 1094 UfsSCSIReq *r = (UfsSCSIReq *)opaque; 1095 UfsLu *lu = DO_UPCAST(UfsLu, qdev, r->req.dev); 1096 1097 assert(r->req.aiocb != NULL); 1098 r->req.aiocb = NULL; 1099 1100 aio_context_acquire(blk_get_aio_context(lu->qdev.conf.blk)); 1101 if (ret < 0) { 1102 block_acct_failed(blk_get_stats(lu->qdev.conf.blk), &r->acct); 1103 } else { 1104 block_acct_done(blk_get_stats(lu->qdev.conf.blk), &r->acct); 1105 } 1106 scsi_write_complete_noio(r, ret); 1107 aio_context_release(blk_get_aio_context(lu->qdev.conf.blk)); 1108 } 1109 1110 static BlockAIOCB *scsi_dma_writev(int64_t offset, QEMUIOVector *iov, 1111 BlockCompletionFunc *cb, void *cb_opaque, 1112 void *opaque) 1113 { 1114 UfsSCSIReq *r = opaque; 1115 UfsLu *lu = DO_UPCAST(UfsLu, qdev, r->req.dev); 1116 return blk_aio_pwritev(lu->qdev.conf.blk, offset, iov, 0, cb, cb_opaque); 1117 } 1118 1119 static void scsi_write_data(SCSIRequest *req) 1120 { 1121 UfsSCSIReq *r = DO_UPCAST(UfsSCSIReq, req, req); 1122 UfsLu *lu = DO_UPCAST(UfsLu, qdev, r->req.dev); 1123 1124 /* No data transfer may already be in progress */ 1125 assert(r->req.aiocb == NULL); 1126 1127 /* The request is used as the AIO opaque value, so add a ref. */ 1128 scsi_req_ref(&r->req); 1129 if (r->req.cmd.mode != SCSI_XFER_TO_DEV) { 1130 trace_ufs_scsi_write_data_invalid(); 1131 scsi_write_complete_noio(r, -EINVAL); 1132 return; 1133 } 1134 1135 if (!r->req.sg && !r->qiov.size) { 1136 /* Called for the first time. Ask the driver to send us more data. */ 1137 r->started = true; 1138 scsi_write_complete_noio(r, 0); 1139 return; 1140 } 1141 if (!blk_is_available(req->dev->conf.blk)) { 1142 scsi_write_complete_noio(r, -ENOMEDIUM); 1143 return; 1144 } 1145 1146 if (r->req.sg) { 1147 dma_acct_start(lu->qdev.conf.blk, &r->acct, r->req.sg, 1148 BLOCK_ACCT_WRITE); 1149 r->req.residual -= r->req.sg->size; 1150 r->req.aiocb = dma_blk_io( 1151 blk_get_aio_context(lu->qdev.conf.blk), r->req.sg, 1152 r->sector << BDRV_SECTOR_BITS, BDRV_SECTOR_SIZE, scsi_dma_writev, r, 1153 scsi_dma_complete, r, DMA_DIRECTION_TO_DEVICE); 1154 } else { 1155 block_acct_start(blk_get_stats(lu->qdev.conf.blk), &r->acct, 1156 r->qiov.size, BLOCK_ACCT_WRITE); 1157 r->req.aiocb = scsi_dma_writev(r->sector << BDRV_SECTOR_BITS, &r->qiov, 1158 scsi_write_complete, r, r); 1159 } 1160 } 1161 1162 static const SCSIReqOps ufs_scsi_emulate_reqops = { 1163 .size = sizeof(UfsSCSIReq), 1164 .free_req = ufs_scsi_free_request, 1165 .send_command = ufs_scsi_emulate_command, 1166 .read_data = ufs_scsi_emulate_read_data, 1167 .write_data = ufs_scsi_emulate_write_data, 1168 .get_buf = ufs_scsi_get_buf, 1169 }; 1170 1171 static const SCSIReqOps ufs_scsi_dma_reqops = { 1172 .size = sizeof(UfsSCSIReq), 1173 .free_req = ufs_scsi_free_request, 1174 .send_command = ufs_scsi_dma_command, 1175 .read_data = scsi_read_data, 1176 .write_data = scsi_write_data, 1177 .get_buf = ufs_scsi_get_buf, 1178 }; 1179 1180 /* 1181 * Following commands are not yet supported 1182 * PRE_FETCH(10), 1183 * UNMAP, 1184 * WRITE_BUFFER, READ_BUFFER, 1185 * SECURITY_PROTOCOL_IN, SECURITY_PROTOCOL_OUT 1186 */ 1187 static const SCSIReqOps *const ufs_scsi_reqops_dispatch[256] = { 1188 [TEST_UNIT_READY] = &ufs_scsi_emulate_reqops, 1189 [INQUIRY] = &ufs_scsi_emulate_reqops, 1190 [MODE_SENSE_10] = &ufs_scsi_emulate_reqops, 1191 [START_STOP] = &ufs_scsi_emulate_reqops, 1192 [READ_CAPACITY_10] = &ufs_scsi_emulate_reqops, 1193 [REQUEST_SENSE] = &ufs_scsi_emulate_reqops, 1194 [SYNCHRONIZE_CACHE] = &ufs_scsi_emulate_reqops, 1195 [MODE_SELECT_10] = &ufs_scsi_emulate_reqops, 1196 [VERIFY_10] = &ufs_scsi_emulate_reqops, 1197 [FORMAT_UNIT] = &ufs_scsi_emulate_reqops, 1198 [SERVICE_ACTION_IN_16] = &ufs_scsi_emulate_reqops, 1199 [SEND_DIAGNOSTIC] = &ufs_scsi_emulate_reqops, 1200 1201 [READ_6] = &ufs_scsi_dma_reqops, 1202 [READ_10] = &ufs_scsi_dma_reqops, 1203 [WRITE_6] = &ufs_scsi_dma_reqops, 1204 [WRITE_10] = &ufs_scsi_dma_reqops, 1205 }; 1206 1207 static SCSIRequest *scsi_new_request(SCSIDevice *dev, uint32_t tag, 1208 uint32_t lun, uint8_t *buf, 1209 void *hba_private) 1210 { 1211 UfsLu *lu = DO_UPCAST(UfsLu, qdev, dev); 1212 SCSIRequest *req; 1213 const SCSIReqOps *ops; 1214 uint8_t command; 1215 1216 command = buf[0]; 1217 ops = ufs_scsi_reqops_dispatch[command]; 1218 if (!ops) { 1219 ops = &ufs_scsi_emulate_reqops; 1220 } 1221 req = scsi_req_alloc(ops, &lu->qdev, tag, lun, hba_private); 1222 1223 return req; 1224 } 1225 1226 static Property ufs_lu_props[] = { 1227 DEFINE_PROP_DRIVE("drive", UfsLu, qdev.conf.blk), 1228 DEFINE_PROP_END_OF_LIST(), 1229 }; 1230 1231 static bool ufs_lu_brdv_init(UfsLu *lu, Error **errp) 1232 { 1233 SCSIDevice *dev = &lu->qdev; 1234 bool read_only; 1235 1236 if (!lu->qdev.conf.blk) { 1237 error_setg(errp, "drive property not set"); 1238 return false; 1239 } 1240 1241 if (!blkconf_blocksizes(&lu->qdev.conf, errp)) { 1242 return false; 1243 } 1244 1245 if (blk_get_aio_context(lu->qdev.conf.blk) != qemu_get_aio_context() && 1246 !lu->qdev.hba_supports_iothread) { 1247 error_setg(errp, "HBA does not support iothreads"); 1248 return false; 1249 } 1250 1251 read_only = !blk_supports_write_perm(lu->qdev.conf.blk); 1252 1253 if (!blkconf_apply_backend_options(&dev->conf, read_only, 1254 dev->type == TYPE_DISK, errp)) { 1255 return false; 1256 } 1257 1258 if (blk_is_sg(lu->qdev.conf.blk)) { 1259 error_setg(errp, "unwanted /dev/sg*"); 1260 return false; 1261 } 1262 1263 blk_iostatus_enable(lu->qdev.conf.blk); 1264 return true; 1265 } 1266 1267 static bool ufs_add_lu(UfsHc *u, UfsLu *lu, Error **errp) 1268 { 1269 BlockBackend *blk = lu->qdev.conf.blk; 1270 int64_t brdv_len = blk_getlength(blk); 1271 uint64_t raw_dev_cap = 1272 be64_to_cpu(u->geometry_desc.total_raw_device_capacity); 1273 1274 if (u->device_desc.number_lu >= UFS_MAX_LUS) { 1275 error_setg(errp, "ufs host controller has too many logical units."); 1276 return false; 1277 } 1278 1279 if (u->lus[lu->lun] != NULL) { 1280 error_setg(errp, "ufs logical unit %d already exists.", lu->lun); 1281 return false; 1282 } 1283 1284 u->lus[lu->lun] = lu; 1285 u->device_desc.number_lu++; 1286 raw_dev_cap += (brdv_len >> UFS_GEOMETRY_CAPACITY_SHIFT); 1287 u->geometry_desc.total_raw_device_capacity = cpu_to_be64(raw_dev_cap); 1288 return true; 1289 } 1290 1291 static inline uint8_t ufs_log2(uint64_t input) 1292 { 1293 int log = 0; 1294 while (input >>= 1) { 1295 log++; 1296 } 1297 return log; 1298 } 1299 1300 static void ufs_init_lu(UfsLu *lu) 1301 { 1302 BlockBackend *blk = lu->qdev.conf.blk; 1303 int64_t brdv_len = blk_getlength(blk); 1304 1305 lu->lun = lu->qdev.lun; 1306 memset(&lu->unit_desc, 0, sizeof(lu->unit_desc)); 1307 lu->unit_desc.length = sizeof(UnitDescriptor); 1308 lu->unit_desc.descriptor_idn = UFS_QUERY_DESC_IDN_UNIT; 1309 lu->unit_desc.lu_enable = 0x01; 1310 lu->unit_desc.logical_block_size = ufs_log2(lu->qdev.blocksize); 1311 lu->unit_desc.unit_index = lu->qdev.lun; 1312 lu->unit_desc.logical_block_count = 1313 cpu_to_be64(brdv_len / (1 << lu->unit_desc.logical_block_size)); 1314 } 1315 1316 static bool ufs_lu_check_constraints(UfsLu *lu, Error **errp) 1317 { 1318 if (!lu->qdev.conf.blk) { 1319 error_setg(errp, "drive property not set"); 1320 return false; 1321 } 1322 1323 if (lu->qdev.channel != 0) { 1324 error_setg(errp, "ufs logical unit does not support channel"); 1325 return false; 1326 } 1327 1328 if (lu->qdev.lun >= UFS_MAX_LUS) { 1329 error_setg(errp, "lun must be between 1 and %d", UFS_MAX_LUS - 1); 1330 return false; 1331 } 1332 1333 return true; 1334 } 1335 1336 static void ufs_lu_realize(SCSIDevice *dev, Error **errp) 1337 { 1338 UfsLu *lu = DO_UPCAST(UfsLu, qdev, dev); 1339 BusState *s = qdev_get_parent_bus(&dev->qdev); 1340 UfsHc *u = UFS(s->parent); 1341 AioContext *ctx = NULL; 1342 uint64_t nb_sectors, nb_blocks; 1343 1344 if (!ufs_lu_check_constraints(lu, errp)) { 1345 return; 1346 } 1347 1348 ctx = blk_get_aio_context(lu->qdev.conf.blk); 1349 aio_context_acquire(ctx); 1350 if (!blkconf_blocksizes(&lu->qdev.conf, errp)) { 1351 goto out; 1352 } 1353 1354 lu->qdev.blocksize = UFS_BLOCK_SIZE; 1355 blk_get_geometry(lu->qdev.conf.blk, &nb_sectors); 1356 nb_blocks = nb_sectors / (lu->qdev.blocksize / BDRV_SECTOR_SIZE); 1357 if (nb_blocks > UINT32_MAX) { 1358 nb_blocks = UINT32_MAX; 1359 } 1360 lu->qdev.max_lba = nb_blocks; 1361 lu->qdev.type = TYPE_DISK; 1362 1363 ufs_init_lu(lu); 1364 if (!ufs_add_lu(u, lu, errp)) { 1365 goto out; 1366 } 1367 1368 ufs_lu_brdv_init(lu, errp); 1369 1370 out: 1371 aio_context_release(ctx); 1372 } 1373 1374 static void ufs_lu_unrealize(SCSIDevice *dev) 1375 { 1376 UfsLu *lu = DO_UPCAST(UfsLu, qdev, dev); 1377 1378 blk_drain(lu->qdev.conf.blk); 1379 } 1380 1381 static void ufs_wlu_realize(DeviceState *qdev, Error **errp) 1382 { 1383 UfsWLu *wlu = UFSWLU(qdev); 1384 SCSIDevice *dev = &wlu->qdev; 1385 1386 if (!is_wlun(dev->lun)) { 1387 error_setg(errp, "not well-known logical unit number"); 1388 return; 1389 } 1390 1391 QTAILQ_INIT(&dev->requests); 1392 } 1393 1394 static void ufs_lu_class_init(ObjectClass *oc, void *data) 1395 { 1396 DeviceClass *dc = DEVICE_CLASS(oc); 1397 SCSIDeviceClass *sc = SCSI_DEVICE_CLASS(oc); 1398 1399 sc->realize = ufs_lu_realize; 1400 sc->unrealize = ufs_lu_unrealize; 1401 sc->alloc_req = scsi_new_request; 1402 dc->bus_type = TYPE_UFS_BUS; 1403 device_class_set_props(dc, ufs_lu_props); 1404 dc->desc = "Virtual UFS logical unit"; 1405 } 1406 1407 static void ufs_wlu_class_init(ObjectClass *oc, void *data) 1408 { 1409 DeviceClass *dc = DEVICE_CLASS(oc); 1410 SCSIDeviceClass *sc = SCSI_DEVICE_CLASS(oc); 1411 1412 /* 1413 * The realize() function of TYPE_SCSI_DEVICE causes a segmentation fault 1414 * if a block drive does not exist. Define a new realize function for 1415 * well-known LUs that do not have a block drive. 1416 */ 1417 dc->realize = ufs_wlu_realize; 1418 sc->alloc_req = scsi_new_request; 1419 dc->bus_type = TYPE_UFS_BUS; 1420 dc->desc = "Virtual UFS well-known logical unit"; 1421 } 1422 1423 static const TypeInfo ufs_lu_info = { 1424 .name = TYPE_UFS_LU, 1425 .parent = TYPE_SCSI_DEVICE, 1426 .class_init = ufs_lu_class_init, 1427 .instance_size = sizeof(UfsLu), 1428 }; 1429 1430 static const TypeInfo ufs_wlu_info = { 1431 .name = TYPE_UFS_WLU, 1432 .parent = TYPE_SCSI_DEVICE, 1433 .class_init = ufs_wlu_class_init, 1434 .instance_size = sizeof(UfsWLu), 1435 }; 1436 1437 static void ufs_lu_register_types(void) 1438 { 1439 type_register_static(&ufs_lu_info); 1440 type_register_static(&ufs_wlu_info); 1441 } 1442 1443 type_init(ufs_lu_register_types) 1444