1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /******************************************************************************* 3 * Filename: target_core_pscsi.c 4 * 5 * This file contains the generic target mode <-> Linux SCSI subsystem plugin. 6 * 7 * (c) Copyright 2003-2013 Datera, Inc. 8 * 9 * Nicholas A. Bellinger <nab@kernel.org> 10 * 11 ******************************************************************************/ 12 13 #include <linux/string.h> 14 #include <linux/parser.h> 15 #include <linux/timer.h> 16 #include <linux/blkdev.h> 17 #include <linux/blk_types.h> 18 #include <linux/slab.h> 19 #include <linux/spinlock.h> 20 #include <linux/cdrom.h> 21 #include <linux/ratelimit.h> 22 #include <linux/module.h> 23 #include <asm/unaligned.h> 24 25 #include <scsi/scsi_device.h> 26 #include <scsi/scsi_host.h> 27 #include <scsi/scsi_tcq.h> 28 29 #include <target/target_core_base.h> 30 #include <target/target_core_backend.h> 31 32 #include "target_core_alua.h" 33 #include "target_core_internal.h" 34 #include "target_core_pscsi.h" 35 36 static inline struct pscsi_dev_virt *PSCSI_DEV(struct se_device *dev) 37 { 38 return container_of(dev, struct pscsi_dev_virt, dev); 39 } 40 41 static sense_reason_t pscsi_execute_cmd(struct se_cmd *cmd); 42 static void pscsi_req_done(struct request *, blk_status_t); 43 44 /* pscsi_attach_hba(): 45 * 46 * pscsi_get_sh() used scsi_host_lookup() to locate struct Scsi_Host. 47 * from the passed SCSI Host ID. 48 */ 49 static int pscsi_attach_hba(struct se_hba *hba, u32 host_id) 50 { 51 struct pscsi_hba_virt *phv; 52 53 phv = kzalloc(sizeof(struct pscsi_hba_virt), GFP_KERNEL); 54 if (!phv) { 55 pr_err("Unable to allocate struct pscsi_hba_virt\n"); 56 return -ENOMEM; 57 } 58 phv->phv_host_id = host_id; 59 phv->phv_mode = PHV_VIRTUAL_HOST_ID; 60 61 hba->hba_ptr = phv; 62 63 pr_debug("CORE_HBA[%d] - TCM SCSI HBA Driver %s on" 64 " Generic Target Core Stack %s\n", hba->hba_id, 65 PSCSI_VERSION, TARGET_CORE_VERSION); 66 pr_debug("CORE_HBA[%d] - Attached SCSI HBA to Generic\n", 67 hba->hba_id); 68 69 return 0; 70 } 71 72 static void pscsi_detach_hba(struct se_hba *hba) 73 { 74 struct pscsi_hba_virt *phv = hba->hba_ptr; 75 struct Scsi_Host *scsi_host = phv->phv_lld_host; 76 77 if (scsi_host) { 78 scsi_host_put(scsi_host); 79 80 pr_debug("CORE_HBA[%d] - Detached SCSI HBA: %s from" 81 " Generic Target Core\n", hba->hba_id, 82 (scsi_host->hostt->name) ? (scsi_host->hostt->name) : 83 "Unknown"); 84 } else 85 pr_debug("CORE_HBA[%d] - Detached Virtual SCSI HBA" 86 " from Generic Target Core\n", hba->hba_id); 87 88 kfree(phv); 89 hba->hba_ptr = NULL; 90 } 91 92 static int pscsi_pmode_enable_hba(struct se_hba *hba, unsigned long mode_flag) 93 { 94 struct pscsi_hba_virt *phv = hba->hba_ptr; 95 struct Scsi_Host *sh = phv->phv_lld_host; 96 /* 97 * Release the struct Scsi_Host 98 */ 99 if (!mode_flag) { 100 if (!sh) 101 return 0; 102 103 phv->phv_lld_host = NULL; 104 phv->phv_mode = PHV_VIRTUAL_HOST_ID; 105 106 pr_debug("CORE_HBA[%d] - Disabled pSCSI HBA Passthrough" 107 " %s\n", hba->hba_id, (sh->hostt->name) ? 108 (sh->hostt->name) : "Unknown"); 109 110 scsi_host_put(sh); 111 return 0; 112 } 113 /* 114 * Otherwise, locate struct Scsi_Host from the original passed 115 * pSCSI Host ID and enable for phba mode 116 */ 117 sh = scsi_host_lookup(phv->phv_host_id); 118 if (!sh) { 119 pr_err("pSCSI: Unable to locate SCSI Host for" 120 " phv_host_id: %d\n", phv->phv_host_id); 121 return -EINVAL; 122 } 123 124 phv->phv_lld_host = sh; 125 phv->phv_mode = PHV_LLD_SCSI_HOST_NO; 126 127 pr_debug("CORE_HBA[%d] - Enabled pSCSI HBA Passthrough %s\n", 128 hba->hba_id, (sh->hostt->name) ? (sh->hostt->name) : "Unknown"); 129 130 return 1; 131 } 132 133 static void pscsi_tape_read_blocksize(struct se_device *dev, 134 struct scsi_device *sdev) 135 { 136 unsigned char cdb[MAX_COMMAND_SIZE], *buf; 137 int ret; 138 139 buf = kzalloc(12, GFP_KERNEL); 140 if (!buf) 141 goto out_free; 142 143 memset(cdb, 0, MAX_COMMAND_SIZE); 144 cdb[0] = MODE_SENSE; 145 cdb[4] = 0x0c; /* 12 bytes */ 146 147 ret = scsi_execute_req(sdev, cdb, DMA_FROM_DEVICE, buf, 12, NULL, 148 HZ, 1, NULL); 149 if (ret) 150 goto out_free; 151 152 /* 153 * If MODE_SENSE still returns zero, set the default value to 1024. 154 */ 155 sdev->sector_size = get_unaligned_be24(&buf[9]); 156 out_free: 157 if (!sdev->sector_size) 158 sdev->sector_size = 1024; 159 160 kfree(buf); 161 } 162 163 static void 164 pscsi_set_inquiry_info(struct scsi_device *sdev, struct t10_wwn *wwn) 165 { 166 if (sdev->inquiry_len < INQUIRY_LEN) 167 return; 168 /* 169 * Use sdev->inquiry data from drivers/scsi/scsi_scan.c:scsi_add_lun() 170 */ 171 BUILD_BUG_ON(sizeof(wwn->vendor) != INQUIRY_VENDOR_LEN + 1); 172 snprintf(wwn->vendor, sizeof(wwn->vendor), 173 "%." __stringify(INQUIRY_VENDOR_LEN) "s", sdev->vendor); 174 BUILD_BUG_ON(sizeof(wwn->model) != INQUIRY_MODEL_LEN + 1); 175 snprintf(wwn->model, sizeof(wwn->model), 176 "%." __stringify(INQUIRY_MODEL_LEN) "s", sdev->model); 177 BUILD_BUG_ON(sizeof(wwn->revision) != INQUIRY_REVISION_LEN + 1); 178 snprintf(wwn->revision, sizeof(wwn->revision), 179 "%." __stringify(INQUIRY_REVISION_LEN) "s", sdev->rev); 180 } 181 182 static int 183 pscsi_get_inquiry_vpd_serial(struct scsi_device *sdev, struct t10_wwn *wwn) 184 { 185 unsigned char cdb[MAX_COMMAND_SIZE], *buf; 186 int ret; 187 188 buf = kzalloc(INQUIRY_VPD_SERIAL_LEN, GFP_KERNEL); 189 if (!buf) 190 return -ENOMEM; 191 192 memset(cdb, 0, MAX_COMMAND_SIZE); 193 cdb[0] = INQUIRY; 194 cdb[1] = 0x01; /* Query VPD */ 195 cdb[2] = 0x80; /* Unit Serial Number */ 196 put_unaligned_be16(INQUIRY_VPD_SERIAL_LEN, &cdb[3]); 197 198 ret = scsi_execute_req(sdev, cdb, DMA_FROM_DEVICE, buf, 199 INQUIRY_VPD_SERIAL_LEN, NULL, HZ, 1, NULL); 200 if (ret) 201 goto out_free; 202 203 snprintf(&wwn->unit_serial[0], INQUIRY_VPD_SERIAL_LEN, "%s", &buf[4]); 204 205 wwn->t10_dev->dev_flags |= DF_FIRMWARE_VPD_UNIT_SERIAL; 206 207 kfree(buf); 208 return 0; 209 210 out_free: 211 kfree(buf); 212 return -EPERM; 213 } 214 215 static void 216 pscsi_get_inquiry_vpd_device_ident(struct scsi_device *sdev, 217 struct t10_wwn *wwn) 218 { 219 unsigned char cdb[MAX_COMMAND_SIZE], *buf, *page_83; 220 int ident_len, page_len, off = 4, ret; 221 struct t10_vpd *vpd; 222 223 buf = kzalloc(INQUIRY_VPD_SERIAL_LEN, GFP_KERNEL); 224 if (!buf) 225 return; 226 227 memset(cdb, 0, MAX_COMMAND_SIZE); 228 cdb[0] = INQUIRY; 229 cdb[1] = 0x01; /* Query VPD */ 230 cdb[2] = 0x83; /* Device Identifier */ 231 put_unaligned_be16(INQUIRY_VPD_DEVICE_IDENTIFIER_LEN, &cdb[3]); 232 233 ret = scsi_execute_req(sdev, cdb, DMA_FROM_DEVICE, buf, 234 INQUIRY_VPD_DEVICE_IDENTIFIER_LEN, 235 NULL, HZ, 1, NULL); 236 if (ret) 237 goto out; 238 239 page_len = get_unaligned_be16(&buf[2]); 240 while (page_len > 0) { 241 /* Grab a pointer to the Identification descriptor */ 242 page_83 = &buf[off]; 243 ident_len = page_83[3]; 244 if (!ident_len) { 245 pr_err("page_83[3]: identifier" 246 " length zero!\n"); 247 break; 248 } 249 pr_debug("T10 VPD Identifier Length: %d\n", ident_len); 250 251 vpd = kzalloc(sizeof(struct t10_vpd), GFP_KERNEL); 252 if (!vpd) { 253 pr_err("Unable to allocate memory for" 254 " struct t10_vpd\n"); 255 goto out; 256 } 257 INIT_LIST_HEAD(&vpd->vpd_list); 258 259 transport_set_vpd_proto_id(vpd, page_83); 260 transport_set_vpd_assoc(vpd, page_83); 261 262 if (transport_set_vpd_ident_type(vpd, page_83) < 0) { 263 off += (ident_len + 4); 264 page_len -= (ident_len + 4); 265 kfree(vpd); 266 continue; 267 } 268 if (transport_set_vpd_ident(vpd, page_83) < 0) { 269 off += (ident_len + 4); 270 page_len -= (ident_len + 4); 271 kfree(vpd); 272 continue; 273 } 274 275 list_add_tail(&vpd->vpd_list, &wwn->t10_vpd_list); 276 off += (ident_len + 4); 277 page_len -= (ident_len + 4); 278 } 279 280 out: 281 kfree(buf); 282 } 283 284 static int pscsi_add_device_to_list(struct se_device *dev, 285 struct scsi_device *sd) 286 { 287 struct pscsi_dev_virt *pdv = PSCSI_DEV(dev); 288 struct request_queue *q = sd->request_queue; 289 290 pdv->pdv_sd = sd; 291 292 if (!sd->queue_depth) { 293 sd->queue_depth = PSCSI_DEFAULT_QUEUEDEPTH; 294 295 pr_err("Set broken SCSI Device %d:%d:%llu" 296 " queue_depth to %d\n", sd->channel, sd->id, 297 sd->lun, sd->queue_depth); 298 } 299 300 dev->dev_attrib.hw_block_size = 301 min_not_zero((int)sd->sector_size, 512); 302 dev->dev_attrib.hw_max_sectors = 303 min_not_zero(sd->host->max_sectors, queue_max_hw_sectors(q)); 304 dev->dev_attrib.hw_queue_depth = sd->queue_depth; 305 306 /* 307 * Setup our standard INQUIRY info into se_dev->t10_wwn 308 */ 309 pscsi_set_inquiry_info(sd, &dev->t10_wwn); 310 311 /* 312 * Locate VPD WWN Information used for various purposes within 313 * the Storage Engine. 314 */ 315 if (!pscsi_get_inquiry_vpd_serial(sd, &dev->t10_wwn)) { 316 /* 317 * If VPD Unit Serial returned GOOD status, try 318 * VPD Device Identification page (0x83). 319 */ 320 pscsi_get_inquiry_vpd_device_ident(sd, &dev->t10_wwn); 321 } 322 323 /* 324 * For TYPE_TAPE, attempt to determine blocksize with MODE_SENSE. 325 */ 326 if (sd->type == TYPE_TAPE) { 327 pscsi_tape_read_blocksize(dev, sd); 328 dev->dev_attrib.hw_block_size = sd->sector_size; 329 } 330 return 0; 331 } 332 333 static struct se_device *pscsi_alloc_device(struct se_hba *hba, 334 const char *name) 335 { 336 struct pscsi_dev_virt *pdv; 337 338 pdv = kzalloc(sizeof(struct pscsi_dev_virt), GFP_KERNEL); 339 if (!pdv) { 340 pr_err("Unable to allocate memory for struct pscsi_dev_virt\n"); 341 return NULL; 342 } 343 344 pr_debug("PSCSI: Allocated pdv: %p for %s\n", pdv, name); 345 return &pdv->dev; 346 } 347 348 /* 349 * Called with struct Scsi_Host->host_lock called. 350 */ 351 static int pscsi_create_type_disk(struct se_device *dev, struct scsi_device *sd) 352 __releases(sh->host_lock) 353 { 354 struct pscsi_hba_virt *phv = dev->se_hba->hba_ptr; 355 struct pscsi_dev_virt *pdv = PSCSI_DEV(dev); 356 struct Scsi_Host *sh = sd->host; 357 struct block_device *bd; 358 int ret; 359 360 if (scsi_device_get(sd)) { 361 pr_err("scsi_device_get() failed for %d:%d:%d:%llu\n", 362 sh->host_no, sd->channel, sd->id, sd->lun); 363 spin_unlock_irq(sh->host_lock); 364 return -EIO; 365 } 366 spin_unlock_irq(sh->host_lock); 367 /* 368 * Claim exclusive struct block_device access to struct scsi_device 369 * for TYPE_DISK and TYPE_ZBC using supplied udev_path 370 */ 371 bd = blkdev_get_by_path(dev->udev_path, 372 FMODE_WRITE|FMODE_READ|FMODE_EXCL, pdv); 373 if (IS_ERR(bd)) { 374 pr_err("pSCSI: blkdev_get_by_path() failed\n"); 375 scsi_device_put(sd); 376 return PTR_ERR(bd); 377 } 378 pdv->pdv_bd = bd; 379 380 ret = pscsi_add_device_to_list(dev, sd); 381 if (ret) { 382 blkdev_put(pdv->pdv_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL); 383 scsi_device_put(sd); 384 return ret; 385 } 386 387 pr_debug("CORE_PSCSI[%d] - Added TYPE_%s for %d:%d:%d:%llu\n", 388 phv->phv_host_id, sd->type == TYPE_DISK ? "DISK" : "ZBC", 389 sh->host_no, sd->channel, sd->id, sd->lun); 390 return 0; 391 } 392 393 /* 394 * Called with struct Scsi_Host->host_lock called. 395 */ 396 static int pscsi_create_type_nondisk(struct se_device *dev, struct scsi_device *sd) 397 __releases(sh->host_lock) 398 { 399 struct pscsi_hba_virt *phv = dev->se_hba->hba_ptr; 400 struct Scsi_Host *sh = sd->host; 401 int ret; 402 403 if (scsi_device_get(sd)) { 404 pr_err("scsi_device_get() failed for %d:%d:%d:%llu\n", 405 sh->host_no, sd->channel, sd->id, sd->lun); 406 spin_unlock_irq(sh->host_lock); 407 return -EIO; 408 } 409 spin_unlock_irq(sh->host_lock); 410 411 ret = pscsi_add_device_to_list(dev, sd); 412 if (ret) { 413 scsi_device_put(sd); 414 return ret; 415 } 416 pr_debug("CORE_PSCSI[%d] - Added Type: %s for %d:%d:%d:%llu\n", 417 phv->phv_host_id, scsi_device_type(sd->type), sh->host_no, 418 sd->channel, sd->id, sd->lun); 419 420 return 0; 421 } 422 423 static int pscsi_configure_device(struct se_device *dev) 424 { 425 struct se_hba *hba = dev->se_hba; 426 struct pscsi_dev_virt *pdv = PSCSI_DEV(dev); 427 struct scsi_device *sd; 428 struct pscsi_hba_virt *phv = dev->se_hba->hba_ptr; 429 struct Scsi_Host *sh = phv->phv_lld_host; 430 int legacy_mode_enable = 0; 431 int ret; 432 433 if (!(pdv->pdv_flags & PDF_HAS_CHANNEL_ID) || 434 !(pdv->pdv_flags & PDF_HAS_TARGET_ID) || 435 !(pdv->pdv_flags & PDF_HAS_LUN_ID)) { 436 pr_err("Missing scsi_channel_id=, scsi_target_id= and" 437 " scsi_lun_id= parameters\n"); 438 return -EINVAL; 439 } 440 441 /* 442 * If not running in PHV_LLD_SCSI_HOST_NO mode, locate the 443 * struct Scsi_Host we will need to bring the TCM/pSCSI object online 444 */ 445 if (!sh) { 446 if (phv->phv_mode == PHV_LLD_SCSI_HOST_NO) { 447 pr_err("pSCSI: Unable to locate struct" 448 " Scsi_Host for PHV_LLD_SCSI_HOST_NO\n"); 449 return -ENODEV; 450 } 451 /* 452 * For the newer PHV_VIRTUAL_HOST_ID struct scsi_device 453 * reference, we enforce that udev_path has been set 454 */ 455 if (!(dev->dev_flags & DF_USING_UDEV_PATH)) { 456 pr_err("pSCSI: udev_path attribute has not" 457 " been set before ENABLE=1\n"); 458 return -EINVAL; 459 } 460 /* 461 * If no scsi_host_id= was passed for PHV_VIRTUAL_HOST_ID, 462 * use the original TCM hba ID to reference Linux/SCSI Host No 463 * and enable for PHV_LLD_SCSI_HOST_NO mode. 464 */ 465 if (!(pdv->pdv_flags & PDF_HAS_VIRT_HOST_ID)) { 466 if (hba->dev_count) { 467 pr_err("pSCSI: Unable to set hba_mode" 468 " with active devices\n"); 469 return -EEXIST; 470 } 471 472 if (pscsi_pmode_enable_hba(hba, 1) != 1) 473 return -ENODEV; 474 475 legacy_mode_enable = 1; 476 hba->hba_flags |= HBA_FLAGS_PSCSI_MODE; 477 sh = phv->phv_lld_host; 478 } else { 479 sh = scsi_host_lookup(pdv->pdv_host_id); 480 if (!sh) { 481 pr_err("pSCSI: Unable to locate" 482 " pdv_host_id: %d\n", pdv->pdv_host_id); 483 return -EINVAL; 484 } 485 pdv->pdv_lld_host = sh; 486 } 487 } else { 488 if (phv->phv_mode == PHV_VIRTUAL_HOST_ID) { 489 pr_err("pSCSI: PHV_VIRTUAL_HOST_ID set while" 490 " struct Scsi_Host exists\n"); 491 return -EEXIST; 492 } 493 } 494 495 spin_lock_irq(sh->host_lock); 496 list_for_each_entry(sd, &sh->__devices, siblings) { 497 if ((pdv->pdv_channel_id != sd->channel) || 498 (pdv->pdv_target_id != sd->id) || 499 (pdv->pdv_lun_id != sd->lun)) 500 continue; 501 /* 502 * Functions will release the held struct scsi_host->host_lock 503 * before calling calling pscsi_add_device_to_list() to register 504 * struct scsi_device with target_core_mod. 505 */ 506 switch (sd->type) { 507 case TYPE_DISK: 508 case TYPE_ZBC: 509 ret = pscsi_create_type_disk(dev, sd); 510 break; 511 default: 512 ret = pscsi_create_type_nondisk(dev, sd); 513 break; 514 } 515 516 if (ret) { 517 if (phv->phv_mode == PHV_VIRTUAL_HOST_ID) 518 scsi_host_put(sh); 519 else if (legacy_mode_enable) { 520 pscsi_pmode_enable_hba(hba, 0); 521 hba->hba_flags &= ~HBA_FLAGS_PSCSI_MODE; 522 } 523 pdv->pdv_sd = NULL; 524 return ret; 525 } 526 return 0; 527 } 528 spin_unlock_irq(sh->host_lock); 529 530 pr_err("pSCSI: Unable to locate %d:%d:%d:%d\n", sh->host_no, 531 pdv->pdv_channel_id, pdv->pdv_target_id, pdv->pdv_lun_id); 532 533 if (phv->phv_mode == PHV_VIRTUAL_HOST_ID) 534 scsi_host_put(sh); 535 else if (legacy_mode_enable) { 536 pscsi_pmode_enable_hba(hba, 0); 537 hba->hba_flags &= ~HBA_FLAGS_PSCSI_MODE; 538 } 539 540 return -ENODEV; 541 } 542 543 static void pscsi_dev_call_rcu(struct rcu_head *p) 544 { 545 struct se_device *dev = container_of(p, struct se_device, rcu_head); 546 struct pscsi_dev_virt *pdv = PSCSI_DEV(dev); 547 548 kfree(pdv); 549 } 550 551 static void pscsi_free_device(struct se_device *dev) 552 { 553 call_rcu(&dev->rcu_head, pscsi_dev_call_rcu); 554 } 555 556 static void pscsi_destroy_device(struct se_device *dev) 557 { 558 struct pscsi_dev_virt *pdv = PSCSI_DEV(dev); 559 struct pscsi_hba_virt *phv = dev->se_hba->hba_ptr; 560 struct scsi_device *sd = pdv->pdv_sd; 561 562 if (sd) { 563 /* 564 * Release exclusive pSCSI internal struct block_device claim for 565 * struct scsi_device with TYPE_DISK or TYPE_ZBC 566 * from pscsi_create_type_disk() 567 */ 568 if ((sd->type == TYPE_DISK || sd->type == TYPE_ZBC) && 569 pdv->pdv_bd) { 570 blkdev_put(pdv->pdv_bd, 571 FMODE_WRITE|FMODE_READ|FMODE_EXCL); 572 pdv->pdv_bd = NULL; 573 } 574 /* 575 * For HBA mode PHV_LLD_SCSI_HOST_NO, release the reference 576 * to struct Scsi_Host now. 577 */ 578 if ((phv->phv_mode == PHV_LLD_SCSI_HOST_NO) && 579 (phv->phv_lld_host != NULL)) 580 scsi_host_put(phv->phv_lld_host); 581 else if (pdv->pdv_lld_host) 582 scsi_host_put(pdv->pdv_lld_host); 583 584 scsi_device_put(sd); 585 586 pdv->pdv_sd = NULL; 587 } 588 } 589 590 static void pscsi_complete_cmd(struct se_cmd *cmd, u8 scsi_status, 591 unsigned char *req_sense) 592 { 593 struct pscsi_dev_virt *pdv = PSCSI_DEV(cmd->se_dev); 594 struct scsi_device *sd = pdv->pdv_sd; 595 struct pscsi_plugin_task *pt = cmd->priv; 596 unsigned char *cdb; 597 /* 598 * Special case for REPORT_LUNs handling where pscsi_plugin_task has 599 * not been allocated because TCM is handling the emulation directly. 600 */ 601 if (!pt) 602 return; 603 604 cdb = &pt->pscsi_cdb[0]; 605 /* 606 * Hack to make sure that Write-Protect modepage is set if R/O mode is 607 * forced. 608 */ 609 if (!cmd->data_length) 610 goto after_mode_sense; 611 612 if (((cdb[0] == MODE_SENSE) || (cdb[0] == MODE_SENSE_10)) && 613 scsi_status == SAM_STAT_GOOD) { 614 bool read_only = target_lun_is_rdonly(cmd); 615 616 if (read_only) { 617 unsigned char *buf; 618 619 buf = transport_kmap_data_sg(cmd); 620 if (!buf) { 621 ; /* XXX: TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE */ 622 } else { 623 if (cdb[0] == MODE_SENSE_10) { 624 if (!(buf[3] & 0x80)) 625 buf[3] |= 0x80; 626 } else { 627 if (!(buf[2] & 0x80)) 628 buf[2] |= 0x80; 629 } 630 631 transport_kunmap_data_sg(cmd); 632 } 633 } 634 } 635 after_mode_sense: 636 637 if (sd->type != TYPE_TAPE || !cmd->data_length) 638 goto after_mode_select; 639 640 /* 641 * Hack to correctly obtain the initiator requested blocksize for 642 * TYPE_TAPE. Since this value is dependent upon each tape media, 643 * struct scsi_device->sector_size will not contain the correct value 644 * by default, so we go ahead and set it so 645 * TRANSPORT(dev)->get_blockdev() returns the correct value to the 646 * storage engine. 647 */ 648 if (((cdb[0] == MODE_SELECT) || (cdb[0] == MODE_SELECT_10)) && 649 scsi_status == SAM_STAT_GOOD) { 650 unsigned char *buf; 651 u16 bdl; 652 u32 blocksize; 653 654 buf = sg_virt(&cmd->t_data_sg[0]); 655 if (!buf) { 656 pr_err("Unable to get buf for scatterlist\n"); 657 goto after_mode_select; 658 } 659 660 if (cdb[0] == MODE_SELECT) 661 bdl = buf[3]; 662 else 663 bdl = get_unaligned_be16(&buf[6]); 664 665 if (!bdl) 666 goto after_mode_select; 667 668 if (cdb[0] == MODE_SELECT) 669 blocksize = get_unaligned_be24(&buf[9]); 670 else 671 blocksize = get_unaligned_be24(&buf[13]); 672 673 sd->sector_size = blocksize; 674 } 675 after_mode_select: 676 677 if (scsi_status == SAM_STAT_CHECK_CONDITION) { 678 transport_copy_sense_to_cmd(cmd, req_sense); 679 680 /* 681 * check for TAPE device reads with 682 * FM/EOM/ILI set, so that we can get data 683 * back despite framework assumption that a 684 * check condition means there is no data 685 */ 686 if (sd->type == TYPE_TAPE && 687 cmd->data_direction == DMA_FROM_DEVICE) { 688 /* 689 * is sense data valid, fixed format, 690 * and have FM, EOM, or ILI set? 691 */ 692 if (req_sense[0] == 0xf0 && /* valid, fixed format */ 693 req_sense[2] & 0xe0 && /* FM, EOM, or ILI */ 694 (req_sense[2] & 0xf) == 0) { /* key==NO_SENSE */ 695 pr_debug("Tape FM/EOM/ILI status detected. Treat as normal read.\n"); 696 cmd->se_cmd_flags |= SCF_TREAT_READ_AS_NORMAL; 697 } 698 } 699 } 700 } 701 702 enum { 703 Opt_scsi_host_id, Opt_scsi_channel_id, Opt_scsi_target_id, 704 Opt_scsi_lun_id, Opt_err 705 }; 706 707 static match_table_t tokens = { 708 {Opt_scsi_host_id, "scsi_host_id=%d"}, 709 {Opt_scsi_channel_id, "scsi_channel_id=%d"}, 710 {Opt_scsi_target_id, "scsi_target_id=%d"}, 711 {Opt_scsi_lun_id, "scsi_lun_id=%d"}, 712 {Opt_err, NULL} 713 }; 714 715 static ssize_t pscsi_set_configfs_dev_params(struct se_device *dev, 716 const char *page, ssize_t count) 717 { 718 struct pscsi_dev_virt *pdv = PSCSI_DEV(dev); 719 struct pscsi_hba_virt *phv = dev->se_hba->hba_ptr; 720 char *orig, *ptr, *opts; 721 substring_t args[MAX_OPT_ARGS]; 722 int ret = 0, arg, token; 723 724 opts = kstrdup(page, GFP_KERNEL); 725 if (!opts) 726 return -ENOMEM; 727 728 orig = opts; 729 730 while ((ptr = strsep(&opts, ",\n")) != NULL) { 731 if (!*ptr) 732 continue; 733 734 token = match_token(ptr, tokens, args); 735 switch (token) { 736 case Opt_scsi_host_id: 737 if (phv->phv_mode == PHV_LLD_SCSI_HOST_NO) { 738 pr_err("PSCSI[%d]: Unable to accept" 739 " scsi_host_id while phv_mode ==" 740 " PHV_LLD_SCSI_HOST_NO\n", 741 phv->phv_host_id); 742 ret = -EINVAL; 743 goto out; 744 } 745 ret = match_int(args, &arg); 746 if (ret) 747 goto out; 748 pdv->pdv_host_id = arg; 749 pr_debug("PSCSI[%d]: Referencing SCSI Host ID:" 750 " %d\n", phv->phv_host_id, pdv->pdv_host_id); 751 pdv->pdv_flags |= PDF_HAS_VIRT_HOST_ID; 752 break; 753 case Opt_scsi_channel_id: 754 ret = match_int(args, &arg); 755 if (ret) 756 goto out; 757 pdv->pdv_channel_id = arg; 758 pr_debug("PSCSI[%d]: Referencing SCSI Channel" 759 " ID: %d\n", phv->phv_host_id, 760 pdv->pdv_channel_id); 761 pdv->pdv_flags |= PDF_HAS_CHANNEL_ID; 762 break; 763 case Opt_scsi_target_id: 764 ret = match_int(args, &arg); 765 if (ret) 766 goto out; 767 pdv->pdv_target_id = arg; 768 pr_debug("PSCSI[%d]: Referencing SCSI Target" 769 " ID: %d\n", phv->phv_host_id, 770 pdv->pdv_target_id); 771 pdv->pdv_flags |= PDF_HAS_TARGET_ID; 772 break; 773 case Opt_scsi_lun_id: 774 ret = match_int(args, &arg); 775 if (ret) 776 goto out; 777 pdv->pdv_lun_id = arg; 778 pr_debug("PSCSI[%d]: Referencing SCSI LUN ID:" 779 " %d\n", phv->phv_host_id, pdv->pdv_lun_id); 780 pdv->pdv_flags |= PDF_HAS_LUN_ID; 781 break; 782 default: 783 break; 784 } 785 } 786 787 out: 788 kfree(orig); 789 return (!ret) ? count : ret; 790 } 791 792 static ssize_t pscsi_show_configfs_dev_params(struct se_device *dev, char *b) 793 { 794 struct pscsi_hba_virt *phv = dev->se_hba->hba_ptr; 795 struct pscsi_dev_virt *pdv = PSCSI_DEV(dev); 796 struct scsi_device *sd = pdv->pdv_sd; 797 unsigned char host_id[16]; 798 ssize_t bl; 799 800 if (phv->phv_mode == PHV_VIRTUAL_HOST_ID) 801 snprintf(host_id, 16, "%d", pdv->pdv_host_id); 802 else 803 snprintf(host_id, 16, "PHBA Mode"); 804 805 bl = sprintf(b, "SCSI Device Bus Location:" 806 " Channel ID: %d Target ID: %d LUN: %d Host ID: %s\n", 807 pdv->pdv_channel_id, pdv->pdv_target_id, pdv->pdv_lun_id, 808 host_id); 809 810 if (sd) { 811 bl += sprintf(b + bl, " Vendor: %." 812 __stringify(INQUIRY_VENDOR_LEN) "s", sd->vendor); 813 bl += sprintf(b + bl, " Model: %." 814 __stringify(INQUIRY_MODEL_LEN) "s", sd->model); 815 bl += sprintf(b + bl, " Rev: %." 816 __stringify(INQUIRY_REVISION_LEN) "s\n", sd->rev); 817 } 818 return bl; 819 } 820 821 static void pscsi_bi_endio(struct bio *bio) 822 { 823 bio_put(bio); 824 } 825 826 static inline struct bio *pscsi_get_bio(int nr_vecs) 827 { 828 struct bio *bio; 829 /* 830 * Use bio_malloc() following the comment in for bio -> struct request 831 * in block/blk-core.c:blk_make_request() 832 */ 833 bio = bio_kmalloc(GFP_KERNEL, nr_vecs); 834 if (!bio) { 835 pr_err("PSCSI: bio_kmalloc() failed\n"); 836 return NULL; 837 } 838 bio->bi_end_io = pscsi_bi_endio; 839 840 return bio; 841 } 842 843 static sense_reason_t 844 pscsi_map_sg(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents, 845 struct request *req) 846 { 847 struct pscsi_dev_virt *pdv = PSCSI_DEV(cmd->se_dev); 848 struct bio *bio = NULL; 849 struct page *page; 850 struct scatterlist *sg; 851 u32 data_len = cmd->data_length, i, len, bytes, off; 852 int nr_pages = (cmd->data_length + sgl[0].offset + 853 PAGE_SIZE - 1) >> PAGE_SHIFT; 854 int nr_vecs = 0, rc; 855 int rw = (cmd->data_direction == DMA_TO_DEVICE); 856 857 BUG_ON(!cmd->data_length); 858 859 pr_debug("PSCSI: nr_pages: %d\n", nr_pages); 860 861 for_each_sg(sgl, sg, sgl_nents, i) { 862 page = sg_page(sg); 863 off = sg->offset; 864 len = sg->length; 865 866 pr_debug("PSCSI: i: %d page: %p len: %d off: %d\n", i, 867 page, len, off); 868 869 /* 870 * We only have one page of data in each sg element, 871 * we can not cross a page boundary. 872 */ 873 if (off + len > PAGE_SIZE) 874 goto fail; 875 876 if (len > 0 && data_len > 0) { 877 bytes = min_t(unsigned int, len, PAGE_SIZE - off); 878 bytes = min(bytes, data_len); 879 880 if (!bio) { 881 new_bio: 882 nr_vecs = bio_max_segs(nr_pages); 883 /* 884 * Calls bio_kmalloc() and sets bio->bi_end_io() 885 */ 886 bio = pscsi_get_bio(nr_vecs); 887 if (!bio) 888 goto fail; 889 890 if (rw) 891 bio_set_op_attrs(bio, REQ_OP_WRITE, 0); 892 893 pr_debug("PSCSI: Allocated bio: %p," 894 " dir: %s nr_vecs: %d\n", bio, 895 (rw) ? "rw" : "r", nr_vecs); 896 } 897 898 pr_debug("PSCSI: Calling bio_add_pc_page() i: %d" 899 " bio: %p page: %p len: %d off: %d\n", i, bio, 900 page, len, off); 901 902 rc = bio_add_pc_page(pdv->pdv_sd->request_queue, 903 bio, page, bytes, off); 904 pr_debug("PSCSI: bio->bi_vcnt: %d nr_vecs: %d\n", 905 bio_segments(bio), nr_vecs); 906 if (rc != bytes) { 907 pr_debug("PSCSI: Reached bio->bi_vcnt max:" 908 " %d i: %d bio: %p, allocating another" 909 " bio\n", bio->bi_vcnt, i, bio); 910 911 rc = blk_rq_append_bio(req, bio); 912 if (rc) { 913 pr_err("pSCSI: failed to append bio\n"); 914 goto fail; 915 } 916 917 /* 918 * Clear the pointer so that another bio will 919 * be allocated with pscsi_get_bio() above. 920 */ 921 bio = NULL; 922 goto new_bio; 923 } 924 925 data_len -= bytes; 926 } 927 } 928 929 if (bio) { 930 rc = blk_rq_append_bio(req, bio); 931 if (rc) { 932 pr_err("pSCSI: failed to append bio\n"); 933 goto fail; 934 } 935 } 936 937 return 0; 938 fail: 939 if (bio) 940 bio_put(bio); 941 while (req->bio) { 942 bio = req->bio; 943 req->bio = bio->bi_next; 944 bio_put(bio); 945 } 946 req->biotail = NULL; 947 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 948 } 949 950 static sense_reason_t 951 pscsi_parse_cdb(struct se_cmd *cmd) 952 { 953 if (cmd->se_cmd_flags & SCF_BIDI) 954 return TCM_UNSUPPORTED_SCSI_OPCODE; 955 956 return passthrough_parse_cdb(cmd, pscsi_execute_cmd); 957 } 958 959 static sense_reason_t 960 pscsi_execute_cmd(struct se_cmd *cmd) 961 { 962 struct scatterlist *sgl = cmd->t_data_sg; 963 u32 sgl_nents = cmd->t_data_nents; 964 struct pscsi_dev_virt *pdv = PSCSI_DEV(cmd->se_dev); 965 struct pscsi_plugin_task *pt; 966 struct request *req; 967 sense_reason_t ret; 968 969 /* 970 * Dynamically alloc cdb space, since it may be larger than 971 * TCM_MAX_COMMAND_SIZE 972 */ 973 pt = kzalloc(sizeof(*pt) + scsi_command_size(cmd->t_task_cdb), GFP_KERNEL); 974 if (!pt) { 975 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 976 } 977 cmd->priv = pt; 978 979 memcpy(pt->pscsi_cdb, cmd->t_task_cdb, 980 scsi_command_size(cmd->t_task_cdb)); 981 982 req = scsi_alloc_request(pdv->pdv_sd->request_queue, 983 cmd->data_direction == DMA_TO_DEVICE ? 984 REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0); 985 if (IS_ERR(req)) { 986 ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 987 goto fail; 988 } 989 990 if (sgl) { 991 ret = pscsi_map_sg(cmd, sgl, sgl_nents, req); 992 if (ret) 993 goto fail_put_request; 994 } 995 996 req->end_io = pscsi_req_done; 997 req->end_io_data = cmd; 998 scsi_req(req)->cmd_len = scsi_command_size(pt->pscsi_cdb); 999 scsi_req(req)->cmd = &pt->pscsi_cdb[0]; 1000 if (pdv->pdv_sd->type == TYPE_DISK || 1001 pdv->pdv_sd->type == TYPE_ZBC) 1002 req->timeout = PS_TIMEOUT_DISK; 1003 else 1004 req->timeout = PS_TIMEOUT_OTHER; 1005 scsi_req(req)->retries = PS_RETRY; 1006 1007 blk_execute_rq_nowait(req, cmd->sam_task_attr == TCM_HEAD_TAG, 1008 pscsi_req_done); 1009 1010 return 0; 1011 1012 fail_put_request: 1013 blk_mq_free_request(req); 1014 fail: 1015 kfree(pt); 1016 return ret; 1017 } 1018 1019 /* pscsi_get_device_type(): 1020 * 1021 * 1022 */ 1023 static u32 pscsi_get_device_type(struct se_device *dev) 1024 { 1025 struct pscsi_dev_virt *pdv = PSCSI_DEV(dev); 1026 struct scsi_device *sd = pdv->pdv_sd; 1027 1028 return (sd) ? sd->type : TYPE_NO_LUN; 1029 } 1030 1031 static sector_t pscsi_get_blocks(struct se_device *dev) 1032 { 1033 struct pscsi_dev_virt *pdv = PSCSI_DEV(dev); 1034 1035 if (pdv->pdv_bd) 1036 return bdev_nr_sectors(pdv->pdv_bd); 1037 return 0; 1038 } 1039 1040 static void pscsi_req_done(struct request *req, blk_status_t status) 1041 { 1042 struct se_cmd *cmd = req->end_io_data; 1043 struct pscsi_plugin_task *pt = cmd->priv; 1044 int result = scsi_req(req)->result; 1045 enum sam_status scsi_status = result & 0xff; 1046 1047 if (scsi_status != SAM_STAT_GOOD) { 1048 pr_debug("PSCSI Status Byte exception at cmd: %p CDB:" 1049 " 0x%02x Result: 0x%08x\n", cmd, pt->pscsi_cdb[0], 1050 result); 1051 } 1052 1053 pscsi_complete_cmd(cmd, scsi_status, scsi_req(req)->sense); 1054 1055 switch (host_byte(result)) { 1056 case DID_OK: 1057 target_complete_cmd_with_length(cmd, scsi_status, 1058 cmd->data_length - scsi_req(req)->resid_len); 1059 break; 1060 default: 1061 pr_debug("PSCSI Host Byte exception at cmd: %p CDB:" 1062 " 0x%02x Result: 0x%08x\n", cmd, pt->pscsi_cdb[0], 1063 result); 1064 target_complete_cmd(cmd, SAM_STAT_CHECK_CONDITION); 1065 break; 1066 } 1067 1068 blk_mq_free_request(req); 1069 kfree(pt); 1070 } 1071 1072 static const struct target_backend_ops pscsi_ops = { 1073 .name = "pscsi", 1074 .owner = THIS_MODULE, 1075 .transport_flags_default = TRANSPORT_FLAG_PASSTHROUGH | 1076 TRANSPORT_FLAG_PASSTHROUGH_ALUA | 1077 TRANSPORT_FLAG_PASSTHROUGH_PGR, 1078 .attach_hba = pscsi_attach_hba, 1079 .detach_hba = pscsi_detach_hba, 1080 .pmode_enable_hba = pscsi_pmode_enable_hba, 1081 .alloc_device = pscsi_alloc_device, 1082 .configure_device = pscsi_configure_device, 1083 .destroy_device = pscsi_destroy_device, 1084 .free_device = pscsi_free_device, 1085 .parse_cdb = pscsi_parse_cdb, 1086 .set_configfs_dev_params = pscsi_set_configfs_dev_params, 1087 .show_configfs_dev_params = pscsi_show_configfs_dev_params, 1088 .get_device_type = pscsi_get_device_type, 1089 .get_blocks = pscsi_get_blocks, 1090 .tb_dev_attrib_attrs = passthrough_attrib_attrs, 1091 }; 1092 1093 static int __init pscsi_module_init(void) 1094 { 1095 return transport_backend_register(&pscsi_ops); 1096 } 1097 1098 static void __exit pscsi_module_exit(void) 1099 { 1100 target_backend_unregister(&pscsi_ops); 1101 } 1102 1103 MODULE_DESCRIPTION("TCM PSCSI subsystem plugin"); 1104 MODULE_AUTHOR("nab@Linux-iSCSI.org"); 1105 MODULE_LICENSE("GPL"); 1106 1107 module_init(pscsi_module_init); 1108 module_exit(pscsi_module_exit); 1109