1 /******************************************************************************* 2 * 3 * This file contains the Linux/SCSI LLD virtual SCSI initiator driver 4 * for emulated SAS initiator ports 5 * 6 * © Copyright 2011-2013 Datera, Inc. 7 * 8 * Licensed to the Linux Foundation under the General Public License (GPL) version 2. 9 * 10 * Author: Nicholas A. Bellinger <nab@risingtidesystems.com> 11 * 12 * This program is free software; you can redistribute it and/or modify 13 * it under the terms of the GNU General Public License as published by 14 * the Free Software Foundation; either version 2 of the License, or 15 * (at your option) any later version. 16 * 17 * This program is distributed in the hope that it will be useful, 18 * but WITHOUT ANY WARRANTY; without even the implied warranty of 19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 20 * GNU General Public License for more details. 21 ****************************************************************************/ 22 23 #include <linux/module.h> 24 #include <linux/moduleparam.h> 25 #include <linux/init.h> 26 #include <linux/slab.h> 27 #include <linux/types.h> 28 #include <linux/configfs.h> 29 #include <scsi/scsi.h> 30 #include <scsi/scsi_tcq.h> 31 #include <scsi/scsi_host.h> 32 #include <scsi/scsi_device.h> 33 #include <scsi/scsi_cmnd.h> 34 35 #include <target/target_core_base.h> 36 #include <target/target_core_fabric.h> 37 38 #include "tcm_loop.h" 39 40 #define to_tcm_loop_hba(hba) container_of(hba, struct tcm_loop_hba, dev) 41 42 static struct workqueue_struct *tcm_loop_workqueue; 43 static struct kmem_cache *tcm_loop_cmd_cache; 44 45 static int tcm_loop_hba_no_cnt; 46 47 static int tcm_loop_queue_status(struct se_cmd *se_cmd); 48 49 /* 50 * Called from struct target_core_fabric_ops->check_stop_free() 51 */ 52 static int tcm_loop_check_stop_free(struct se_cmd *se_cmd) 53 { 54 return transport_generic_free_cmd(se_cmd, 0); 55 } 56 57 static void tcm_loop_release_cmd(struct se_cmd *se_cmd) 58 { 59 struct tcm_loop_cmd *tl_cmd = container_of(se_cmd, 60 struct tcm_loop_cmd, tl_se_cmd); 61 62 kmem_cache_free(tcm_loop_cmd_cache, tl_cmd); 63 } 64 65 static int tcm_loop_show_info(struct seq_file *m, struct Scsi_Host *host) 66 { 67 seq_puts(m, "tcm_loop_proc_info()\n"); 68 return 0; 69 } 70 71 static int tcm_loop_driver_probe(struct device *); 72 static int tcm_loop_driver_remove(struct device *); 73 74 static int pseudo_lld_bus_match(struct device *dev, 75 struct device_driver *dev_driver) 76 { 77 return 1; 78 } 79 80 static struct bus_type tcm_loop_lld_bus = { 81 .name = "tcm_loop_bus", 82 .match = pseudo_lld_bus_match, 83 .probe = tcm_loop_driver_probe, 84 .remove = tcm_loop_driver_remove, 85 }; 86 87 static struct device_driver tcm_loop_driverfs = { 88 .name = "tcm_loop", 89 .bus = &tcm_loop_lld_bus, 90 }; 91 /* 92 * Used with root_device_register() in tcm_loop_alloc_core_bus() below 93 */ 94 static struct device *tcm_loop_primary; 95 96 static void tcm_loop_submission_work(struct work_struct *work) 97 { 98 struct tcm_loop_cmd *tl_cmd = 99 container_of(work, struct tcm_loop_cmd, work); 100 struct se_cmd *se_cmd = &tl_cmd->tl_se_cmd; 101 struct scsi_cmnd *sc = tl_cmd->sc; 102 struct tcm_loop_nexus *tl_nexus; 103 struct tcm_loop_hba *tl_hba; 104 struct tcm_loop_tpg *tl_tpg; 105 struct scatterlist *sgl_bidi = NULL; 106 u32 sgl_bidi_count = 0, transfer_length; 107 int rc; 108 109 tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host); 110 tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id]; 111 112 /* 113 * Ensure that this tl_tpg reference from the incoming sc->device->id 114 * has already been configured via tcm_loop_make_naa_tpg(). 115 */ 116 if (!tl_tpg->tl_hba) { 117 set_host_byte(sc, DID_NO_CONNECT); 118 goto out_done; 119 } 120 if (tl_tpg->tl_transport_status == TCM_TRANSPORT_OFFLINE) { 121 set_host_byte(sc, DID_TRANSPORT_DISRUPTED); 122 goto out_done; 123 } 124 tl_nexus = tl_tpg->tl_nexus; 125 if (!tl_nexus) { 126 scmd_printk(KERN_ERR, sc, 127 "TCM_Loop I_T Nexus does not exist\n"); 128 set_host_byte(sc, DID_ERROR); 129 goto out_done; 130 } 131 if (scsi_bidi_cmnd(sc)) { 132 struct scsi_data_buffer *sdb = scsi_in(sc); 133 134 sgl_bidi = sdb->table.sgl; 135 sgl_bidi_count = sdb->table.nents; 136 se_cmd->se_cmd_flags |= SCF_BIDI; 137 138 } 139 140 transfer_length = scsi_transfer_length(sc); 141 if (!scsi_prot_sg_count(sc) && 142 scsi_get_prot_op(sc) != SCSI_PROT_NORMAL) { 143 se_cmd->prot_pto = true; 144 /* 145 * loopback transport doesn't support 146 * WRITE_GENERATE, READ_STRIP protection 147 * information operations, go ahead unprotected. 148 */ 149 transfer_length = scsi_bufflen(sc); 150 } 151 152 se_cmd->tag = tl_cmd->sc_cmd_tag; 153 rc = target_submit_cmd_map_sgls(se_cmd, tl_nexus->se_sess, sc->cmnd, 154 &tl_cmd->tl_sense_buf[0], tl_cmd->sc->device->lun, 155 transfer_length, TCM_SIMPLE_TAG, 156 sc->sc_data_direction, 0, 157 scsi_sglist(sc), scsi_sg_count(sc), 158 sgl_bidi, sgl_bidi_count, 159 scsi_prot_sglist(sc), scsi_prot_sg_count(sc)); 160 if (rc < 0) { 161 set_host_byte(sc, DID_NO_CONNECT); 162 goto out_done; 163 } 164 return; 165 166 out_done: 167 kmem_cache_free(tcm_loop_cmd_cache, tl_cmd); 168 sc->scsi_done(sc); 169 } 170 171 /* 172 * ->queuecommand can be and usually is called from interrupt context, so 173 * defer the actual submission to a workqueue. 174 */ 175 static int tcm_loop_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *sc) 176 { 177 struct tcm_loop_cmd *tl_cmd; 178 179 pr_debug("%s() %d:%d:%d:%llu got CDB: 0x%02x scsi_buf_len: %u\n", 180 __func__, sc->device->host->host_no, sc->device->id, 181 sc->device->channel, sc->device->lun, sc->cmnd[0], 182 scsi_bufflen(sc)); 183 184 tl_cmd = kmem_cache_zalloc(tcm_loop_cmd_cache, GFP_ATOMIC); 185 if (!tl_cmd) { 186 set_host_byte(sc, DID_ERROR); 187 sc->scsi_done(sc); 188 return 0; 189 } 190 191 tl_cmd->sc = sc; 192 tl_cmd->sc_cmd_tag = sc->request->tag; 193 INIT_WORK(&tl_cmd->work, tcm_loop_submission_work); 194 queue_work(tcm_loop_workqueue, &tl_cmd->work); 195 return 0; 196 } 197 198 /* 199 * Called from SCSI EH process context to issue a LUN_RESET TMR 200 * to struct scsi_device 201 */ 202 static int tcm_loop_issue_tmr(struct tcm_loop_tpg *tl_tpg, 203 u64 lun, int task, enum tcm_tmreq_table tmr) 204 { 205 struct se_cmd *se_cmd; 206 struct se_session *se_sess; 207 struct tcm_loop_nexus *tl_nexus; 208 struct tcm_loop_cmd *tl_cmd; 209 int ret = TMR_FUNCTION_FAILED, rc; 210 211 /* 212 * Locate the tl_nexus and se_sess pointers 213 */ 214 tl_nexus = tl_tpg->tl_nexus; 215 if (!tl_nexus) { 216 pr_err("Unable to perform device reset without active I_T Nexus\n"); 217 return ret; 218 } 219 220 tl_cmd = kmem_cache_zalloc(tcm_loop_cmd_cache, GFP_KERNEL); 221 if (!tl_cmd) 222 return ret; 223 224 init_completion(&tl_cmd->tmr_done); 225 226 se_cmd = &tl_cmd->tl_se_cmd; 227 se_sess = tl_tpg->tl_nexus->se_sess; 228 229 rc = target_submit_tmr(se_cmd, se_sess, tl_cmd->tl_sense_buf, lun, 230 NULL, tmr, GFP_KERNEL, task, 231 TARGET_SCF_ACK_KREF); 232 if (rc < 0) 233 goto release; 234 wait_for_completion(&tl_cmd->tmr_done); 235 ret = se_cmd->se_tmr_req->response; 236 target_put_sess_cmd(se_cmd); 237 238 out: 239 return ret; 240 241 release: 242 kmem_cache_free(tcm_loop_cmd_cache, tl_cmd); 243 goto out; 244 } 245 246 static int tcm_loop_abort_task(struct scsi_cmnd *sc) 247 { 248 struct tcm_loop_hba *tl_hba; 249 struct tcm_loop_tpg *tl_tpg; 250 int ret = FAILED; 251 252 /* 253 * Locate the tcm_loop_hba_t pointer 254 */ 255 tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host); 256 tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id]; 257 ret = tcm_loop_issue_tmr(tl_tpg, sc->device->lun, 258 sc->request->tag, TMR_ABORT_TASK); 259 return (ret == TMR_FUNCTION_COMPLETE) ? SUCCESS : FAILED; 260 } 261 262 /* 263 * Called from SCSI EH process context to issue a LUN_RESET TMR 264 * to struct scsi_device 265 */ 266 static int tcm_loop_device_reset(struct scsi_cmnd *sc) 267 { 268 struct tcm_loop_hba *tl_hba; 269 struct tcm_loop_tpg *tl_tpg; 270 int ret = FAILED; 271 272 /* 273 * Locate the tcm_loop_hba_t pointer 274 */ 275 tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host); 276 tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id]; 277 278 ret = tcm_loop_issue_tmr(tl_tpg, sc->device->lun, 279 0, TMR_LUN_RESET); 280 return (ret == TMR_FUNCTION_COMPLETE) ? SUCCESS : FAILED; 281 } 282 283 static int tcm_loop_target_reset(struct scsi_cmnd *sc) 284 { 285 struct tcm_loop_hba *tl_hba; 286 struct tcm_loop_tpg *tl_tpg; 287 288 /* 289 * Locate the tcm_loop_hba_t pointer 290 */ 291 tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host); 292 if (!tl_hba) { 293 pr_err("Unable to perform device reset without active I_T Nexus\n"); 294 return FAILED; 295 } 296 /* 297 * Locate the tl_tpg pointer from TargetID in sc->device->id 298 */ 299 tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id]; 300 if (tl_tpg) { 301 tl_tpg->tl_transport_status = TCM_TRANSPORT_ONLINE; 302 return SUCCESS; 303 } 304 return FAILED; 305 } 306 307 static int tcm_loop_slave_alloc(struct scsi_device *sd) 308 { 309 blk_queue_flag_set(QUEUE_FLAG_BIDI, sd->request_queue); 310 return 0; 311 } 312 313 static struct scsi_host_template tcm_loop_driver_template = { 314 .show_info = tcm_loop_show_info, 315 .proc_name = "tcm_loopback", 316 .name = "TCM_Loopback", 317 .queuecommand = tcm_loop_queuecommand, 318 .change_queue_depth = scsi_change_queue_depth, 319 .eh_abort_handler = tcm_loop_abort_task, 320 .eh_device_reset_handler = tcm_loop_device_reset, 321 .eh_target_reset_handler = tcm_loop_target_reset, 322 .can_queue = 1024, 323 .this_id = -1, 324 .sg_tablesize = 256, 325 .cmd_per_lun = 1024, 326 .max_sectors = 0xFFFF, 327 .dma_boundary = PAGE_SIZE - 1, 328 .slave_alloc = tcm_loop_slave_alloc, 329 .module = THIS_MODULE, 330 .track_queue_depth = 1, 331 }; 332 333 static int tcm_loop_driver_probe(struct device *dev) 334 { 335 struct tcm_loop_hba *tl_hba; 336 struct Scsi_Host *sh; 337 int error, host_prot; 338 339 tl_hba = to_tcm_loop_hba(dev); 340 341 sh = scsi_host_alloc(&tcm_loop_driver_template, 342 sizeof(struct tcm_loop_hba)); 343 if (!sh) { 344 pr_err("Unable to allocate struct scsi_host\n"); 345 return -ENODEV; 346 } 347 tl_hba->sh = sh; 348 349 /* 350 * Assign the struct tcm_loop_hba pointer to struct Scsi_Host->hostdata 351 */ 352 *((struct tcm_loop_hba **)sh->hostdata) = tl_hba; 353 /* 354 * Setup single ID, Channel and LUN for now.. 355 */ 356 sh->max_id = 2; 357 sh->max_lun = 0; 358 sh->max_channel = 0; 359 sh->max_cmd_len = SCSI_MAX_VARLEN_CDB_SIZE; 360 361 host_prot = SHOST_DIF_TYPE1_PROTECTION | SHOST_DIF_TYPE2_PROTECTION | 362 SHOST_DIF_TYPE3_PROTECTION | SHOST_DIX_TYPE1_PROTECTION | 363 SHOST_DIX_TYPE2_PROTECTION | SHOST_DIX_TYPE3_PROTECTION; 364 365 scsi_host_set_prot(sh, host_prot); 366 scsi_host_set_guard(sh, SHOST_DIX_GUARD_CRC); 367 368 error = scsi_add_host(sh, &tl_hba->dev); 369 if (error) { 370 pr_err("%s: scsi_add_host failed\n", __func__); 371 scsi_host_put(sh); 372 return -ENODEV; 373 } 374 return 0; 375 } 376 377 static int tcm_loop_driver_remove(struct device *dev) 378 { 379 struct tcm_loop_hba *tl_hba; 380 struct Scsi_Host *sh; 381 382 tl_hba = to_tcm_loop_hba(dev); 383 sh = tl_hba->sh; 384 385 scsi_remove_host(sh); 386 scsi_host_put(sh); 387 return 0; 388 } 389 390 static void tcm_loop_release_adapter(struct device *dev) 391 { 392 struct tcm_loop_hba *tl_hba = to_tcm_loop_hba(dev); 393 394 kfree(tl_hba); 395 } 396 397 /* 398 * Called from tcm_loop_make_scsi_hba() in tcm_loop_configfs.c 399 */ 400 static int tcm_loop_setup_hba_bus(struct tcm_loop_hba *tl_hba, int tcm_loop_host_id) 401 { 402 int ret; 403 404 tl_hba->dev.bus = &tcm_loop_lld_bus; 405 tl_hba->dev.parent = tcm_loop_primary; 406 tl_hba->dev.release = &tcm_loop_release_adapter; 407 dev_set_name(&tl_hba->dev, "tcm_loop_adapter_%d", tcm_loop_host_id); 408 409 ret = device_register(&tl_hba->dev); 410 if (ret) { 411 pr_err("device_register() failed for tl_hba->dev: %d\n", ret); 412 return -ENODEV; 413 } 414 415 return 0; 416 } 417 418 /* 419 * Called from tcm_loop_fabric_init() in tcl_loop_fabric.c to load the emulated 420 * tcm_loop SCSI bus. 421 */ 422 static int tcm_loop_alloc_core_bus(void) 423 { 424 int ret; 425 426 tcm_loop_primary = root_device_register("tcm_loop_0"); 427 if (IS_ERR(tcm_loop_primary)) { 428 pr_err("Unable to allocate tcm_loop_primary\n"); 429 return PTR_ERR(tcm_loop_primary); 430 } 431 432 ret = bus_register(&tcm_loop_lld_bus); 433 if (ret) { 434 pr_err("bus_register() failed for tcm_loop_lld_bus\n"); 435 goto dev_unreg; 436 } 437 438 ret = driver_register(&tcm_loop_driverfs); 439 if (ret) { 440 pr_err("driver_register() failed for tcm_loop_driverfs\n"); 441 goto bus_unreg; 442 } 443 444 pr_debug("Initialized TCM Loop Core Bus\n"); 445 return ret; 446 447 bus_unreg: 448 bus_unregister(&tcm_loop_lld_bus); 449 dev_unreg: 450 root_device_unregister(tcm_loop_primary); 451 return ret; 452 } 453 454 static void tcm_loop_release_core_bus(void) 455 { 456 driver_unregister(&tcm_loop_driverfs); 457 bus_unregister(&tcm_loop_lld_bus); 458 root_device_unregister(tcm_loop_primary); 459 460 pr_debug("Releasing TCM Loop Core BUS\n"); 461 } 462 463 static inline struct tcm_loop_tpg *tl_tpg(struct se_portal_group *se_tpg) 464 { 465 return container_of(se_tpg, struct tcm_loop_tpg, tl_se_tpg); 466 } 467 468 static char *tcm_loop_get_endpoint_wwn(struct se_portal_group *se_tpg) 469 { 470 /* 471 * Return the passed NAA identifier for the Target Port 472 */ 473 return &tl_tpg(se_tpg)->tl_hba->tl_wwn_address[0]; 474 } 475 476 static u16 tcm_loop_get_tag(struct se_portal_group *se_tpg) 477 { 478 /* 479 * This Tag is used when forming SCSI Name identifier in EVPD=1 0x83 480 * to represent the SCSI Target Port. 481 */ 482 return tl_tpg(se_tpg)->tl_tpgt; 483 } 484 485 /* 486 * Returning (1) here allows for target_core_mod struct se_node_acl to be generated 487 * based upon the incoming fabric dependent SCSI Initiator Port 488 */ 489 static int tcm_loop_check_demo_mode(struct se_portal_group *se_tpg) 490 { 491 return 1; 492 } 493 494 static int tcm_loop_check_demo_mode_cache(struct se_portal_group *se_tpg) 495 { 496 return 0; 497 } 498 499 /* 500 * Allow I_T Nexus full READ-WRITE access without explict Initiator Node ACLs for 501 * local virtual Linux/SCSI LLD passthrough into VM hypervisor guest 502 */ 503 static int tcm_loop_check_demo_mode_write_protect(struct se_portal_group *se_tpg) 504 { 505 return 0; 506 } 507 508 /* 509 * Because TCM_Loop does not use explict ACLs and MappedLUNs, this will 510 * never be called for TCM_Loop by target_core_fabric_configfs.c code. 511 * It has been added here as a nop for target_fabric_tf_ops_check() 512 */ 513 static int tcm_loop_check_prod_mode_write_protect(struct se_portal_group *se_tpg) 514 { 515 return 0; 516 } 517 518 static int tcm_loop_check_prot_fabric_only(struct se_portal_group *se_tpg) 519 { 520 struct tcm_loop_tpg *tl_tpg = container_of(se_tpg, struct tcm_loop_tpg, 521 tl_se_tpg); 522 return tl_tpg->tl_fabric_prot_type; 523 } 524 525 static u32 tcm_loop_get_inst_index(struct se_portal_group *se_tpg) 526 { 527 return 1; 528 } 529 530 static u32 tcm_loop_sess_get_index(struct se_session *se_sess) 531 { 532 return 1; 533 } 534 535 static void tcm_loop_set_default_node_attributes(struct se_node_acl *se_acl) 536 { 537 return; 538 } 539 540 static int tcm_loop_get_cmd_state(struct se_cmd *se_cmd) 541 { 542 struct tcm_loop_cmd *tl_cmd = container_of(se_cmd, 543 struct tcm_loop_cmd, tl_se_cmd); 544 545 return tl_cmd->sc_cmd_state; 546 } 547 548 static int tcm_loop_write_pending(struct se_cmd *se_cmd) 549 { 550 /* 551 * Since Linux/SCSI has already sent down a struct scsi_cmnd 552 * sc->sc_data_direction of DMA_TO_DEVICE with struct scatterlist array 553 * memory, and memory has already been mapped to struct se_cmd->t_mem_list 554 * format with transport_generic_map_mem_to_cmd(). 555 * 556 * We now tell TCM to add this WRITE CDB directly into the TCM storage 557 * object execution queue. 558 */ 559 target_execute_cmd(se_cmd); 560 return 0; 561 } 562 563 static int tcm_loop_write_pending_status(struct se_cmd *se_cmd) 564 { 565 return 0; 566 } 567 568 static int tcm_loop_queue_data_in(struct se_cmd *se_cmd) 569 { 570 struct tcm_loop_cmd *tl_cmd = container_of(se_cmd, 571 struct tcm_loop_cmd, tl_se_cmd); 572 struct scsi_cmnd *sc = tl_cmd->sc; 573 574 pr_debug("%s() called for scsi_cmnd: %p cdb: 0x%02x\n", 575 __func__, sc, sc->cmnd[0]); 576 577 sc->result = SAM_STAT_GOOD; 578 set_host_byte(sc, DID_OK); 579 if ((se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) || 580 (se_cmd->se_cmd_flags & SCF_UNDERFLOW_BIT)) 581 scsi_set_resid(sc, se_cmd->residual_count); 582 sc->scsi_done(sc); 583 return 0; 584 } 585 586 static int tcm_loop_queue_status(struct se_cmd *se_cmd) 587 { 588 struct tcm_loop_cmd *tl_cmd = container_of(se_cmd, 589 struct tcm_loop_cmd, tl_se_cmd); 590 struct scsi_cmnd *sc = tl_cmd->sc; 591 592 pr_debug("%s() called for scsi_cmnd: %p cdb: 0x%02x\n", 593 __func__, sc, sc->cmnd[0]); 594 595 if (se_cmd->sense_buffer && 596 ((se_cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) || 597 (se_cmd->se_cmd_flags & SCF_EMULATED_TASK_SENSE))) { 598 599 memcpy(sc->sense_buffer, se_cmd->sense_buffer, 600 SCSI_SENSE_BUFFERSIZE); 601 sc->result = SAM_STAT_CHECK_CONDITION; 602 set_driver_byte(sc, DRIVER_SENSE); 603 } else 604 sc->result = se_cmd->scsi_status; 605 606 set_host_byte(sc, DID_OK); 607 if ((se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) || 608 (se_cmd->se_cmd_flags & SCF_UNDERFLOW_BIT)) 609 scsi_set_resid(sc, se_cmd->residual_count); 610 sc->scsi_done(sc); 611 return 0; 612 } 613 614 static void tcm_loop_queue_tm_rsp(struct se_cmd *se_cmd) 615 { 616 struct tcm_loop_cmd *tl_cmd = container_of(se_cmd, 617 struct tcm_loop_cmd, tl_se_cmd); 618 619 /* Wake up tcm_loop_issue_tmr(). */ 620 complete(&tl_cmd->tmr_done); 621 } 622 623 static void tcm_loop_aborted_task(struct se_cmd *se_cmd) 624 { 625 return; 626 } 627 628 static char *tcm_loop_dump_proto_id(struct tcm_loop_hba *tl_hba) 629 { 630 switch (tl_hba->tl_proto_id) { 631 case SCSI_PROTOCOL_SAS: 632 return "SAS"; 633 case SCSI_PROTOCOL_FCP: 634 return "FCP"; 635 case SCSI_PROTOCOL_ISCSI: 636 return "iSCSI"; 637 default: 638 break; 639 } 640 641 return "Unknown"; 642 } 643 644 /* Start items for tcm_loop_port_cit */ 645 646 static int tcm_loop_port_link( 647 struct se_portal_group *se_tpg, 648 struct se_lun *lun) 649 { 650 struct tcm_loop_tpg *tl_tpg = container_of(se_tpg, 651 struct tcm_loop_tpg, tl_se_tpg); 652 struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba; 653 654 atomic_inc_mb(&tl_tpg->tl_tpg_port_count); 655 /* 656 * Add Linux/SCSI struct scsi_device by HCTL 657 */ 658 scsi_add_device(tl_hba->sh, 0, tl_tpg->tl_tpgt, lun->unpacked_lun); 659 660 pr_debug("TCM_Loop_ConfigFS: Port Link Successful\n"); 661 return 0; 662 } 663 664 static void tcm_loop_port_unlink( 665 struct se_portal_group *se_tpg, 666 struct se_lun *se_lun) 667 { 668 struct scsi_device *sd; 669 struct tcm_loop_hba *tl_hba; 670 struct tcm_loop_tpg *tl_tpg; 671 672 tl_tpg = container_of(se_tpg, struct tcm_loop_tpg, tl_se_tpg); 673 tl_hba = tl_tpg->tl_hba; 674 675 sd = scsi_device_lookup(tl_hba->sh, 0, tl_tpg->tl_tpgt, 676 se_lun->unpacked_lun); 677 if (!sd) { 678 pr_err("Unable to locate struct scsi_device for %d:%d:%llu\n", 679 0, tl_tpg->tl_tpgt, se_lun->unpacked_lun); 680 return; 681 } 682 /* 683 * Remove Linux/SCSI struct scsi_device by HCTL 684 */ 685 scsi_remove_device(sd); 686 scsi_device_put(sd); 687 688 atomic_dec_mb(&tl_tpg->tl_tpg_port_count); 689 690 pr_debug("TCM_Loop_ConfigFS: Port Unlink Successful\n"); 691 } 692 693 /* End items for tcm_loop_port_cit */ 694 695 static ssize_t tcm_loop_tpg_attrib_fabric_prot_type_show( 696 struct config_item *item, char *page) 697 { 698 struct se_portal_group *se_tpg = attrib_to_tpg(item); 699 struct tcm_loop_tpg *tl_tpg = container_of(se_tpg, struct tcm_loop_tpg, 700 tl_se_tpg); 701 702 return sprintf(page, "%d\n", tl_tpg->tl_fabric_prot_type); 703 } 704 705 static ssize_t tcm_loop_tpg_attrib_fabric_prot_type_store( 706 struct config_item *item, const char *page, size_t count) 707 { 708 struct se_portal_group *se_tpg = attrib_to_tpg(item); 709 struct tcm_loop_tpg *tl_tpg = container_of(se_tpg, struct tcm_loop_tpg, 710 tl_se_tpg); 711 unsigned long val; 712 int ret = kstrtoul(page, 0, &val); 713 714 if (ret) { 715 pr_err("kstrtoul() returned %d for fabric_prot_type\n", ret); 716 return ret; 717 } 718 if (val != 0 && val != 1 && val != 3) { 719 pr_err("Invalid qla2xxx fabric_prot_type: %lu\n", val); 720 return -EINVAL; 721 } 722 tl_tpg->tl_fabric_prot_type = val; 723 724 return count; 725 } 726 727 CONFIGFS_ATTR(tcm_loop_tpg_attrib_, fabric_prot_type); 728 729 static struct configfs_attribute *tcm_loop_tpg_attrib_attrs[] = { 730 &tcm_loop_tpg_attrib_attr_fabric_prot_type, 731 NULL, 732 }; 733 734 /* Start items for tcm_loop_nexus_cit */ 735 736 static int tcm_loop_alloc_sess_cb(struct se_portal_group *se_tpg, 737 struct se_session *se_sess, void *p) 738 { 739 struct tcm_loop_tpg *tl_tpg = container_of(se_tpg, 740 struct tcm_loop_tpg, tl_se_tpg); 741 742 tl_tpg->tl_nexus = p; 743 return 0; 744 } 745 746 static int tcm_loop_make_nexus( 747 struct tcm_loop_tpg *tl_tpg, 748 const char *name) 749 { 750 struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba; 751 struct tcm_loop_nexus *tl_nexus; 752 int ret; 753 754 if (tl_tpg->tl_nexus) { 755 pr_debug("tl_tpg->tl_nexus already exists\n"); 756 return -EEXIST; 757 } 758 759 tl_nexus = kzalloc(sizeof(*tl_nexus), GFP_KERNEL); 760 if (!tl_nexus) 761 return -ENOMEM; 762 763 tl_nexus->se_sess = target_setup_session(&tl_tpg->tl_se_tpg, 0, 0, 764 TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS, 765 name, tl_nexus, tcm_loop_alloc_sess_cb); 766 if (IS_ERR(tl_nexus->se_sess)) { 767 ret = PTR_ERR(tl_nexus->se_sess); 768 kfree(tl_nexus); 769 return ret; 770 } 771 772 pr_debug("TCM_Loop_ConfigFS: Established I_T Nexus to emulated %s Initiator Port: %s\n", 773 tcm_loop_dump_proto_id(tl_hba), name); 774 return 0; 775 } 776 777 static int tcm_loop_drop_nexus( 778 struct tcm_loop_tpg *tpg) 779 { 780 struct se_session *se_sess; 781 struct tcm_loop_nexus *tl_nexus; 782 783 tl_nexus = tpg->tl_nexus; 784 if (!tl_nexus) 785 return -ENODEV; 786 787 se_sess = tl_nexus->se_sess; 788 if (!se_sess) 789 return -ENODEV; 790 791 if (atomic_read(&tpg->tl_tpg_port_count)) { 792 pr_err("Unable to remove TCM_Loop I_T Nexus with active TPG port count: %d\n", 793 atomic_read(&tpg->tl_tpg_port_count)); 794 return -EPERM; 795 } 796 797 pr_debug("TCM_Loop_ConfigFS: Removing I_T Nexus to emulated %s Initiator Port: %s\n", 798 tcm_loop_dump_proto_id(tpg->tl_hba), 799 tl_nexus->se_sess->se_node_acl->initiatorname); 800 /* 801 * Release the SCSI I_T Nexus to the emulated Target Port 802 */ 803 target_remove_session(se_sess); 804 tpg->tl_nexus = NULL; 805 kfree(tl_nexus); 806 return 0; 807 } 808 809 /* End items for tcm_loop_nexus_cit */ 810 811 static ssize_t tcm_loop_tpg_nexus_show(struct config_item *item, char *page) 812 { 813 struct se_portal_group *se_tpg = to_tpg(item); 814 struct tcm_loop_tpg *tl_tpg = container_of(se_tpg, 815 struct tcm_loop_tpg, tl_se_tpg); 816 struct tcm_loop_nexus *tl_nexus; 817 ssize_t ret; 818 819 tl_nexus = tl_tpg->tl_nexus; 820 if (!tl_nexus) 821 return -ENODEV; 822 823 ret = snprintf(page, PAGE_SIZE, "%s\n", 824 tl_nexus->se_sess->se_node_acl->initiatorname); 825 826 return ret; 827 } 828 829 static ssize_t tcm_loop_tpg_nexus_store(struct config_item *item, 830 const char *page, size_t count) 831 { 832 struct se_portal_group *se_tpg = to_tpg(item); 833 struct tcm_loop_tpg *tl_tpg = container_of(se_tpg, 834 struct tcm_loop_tpg, tl_se_tpg); 835 struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba; 836 unsigned char i_port[TL_WWN_ADDR_LEN], *ptr, *port_ptr; 837 int ret; 838 /* 839 * Shutdown the active I_T nexus if 'NULL' is passed.. 840 */ 841 if (!strncmp(page, "NULL", 4)) { 842 ret = tcm_loop_drop_nexus(tl_tpg); 843 return (!ret) ? count : ret; 844 } 845 /* 846 * Otherwise make sure the passed virtual Initiator port WWN matches 847 * the fabric protocol_id set in tcm_loop_make_scsi_hba(), and call 848 * tcm_loop_make_nexus() 849 */ 850 if (strlen(page) >= TL_WWN_ADDR_LEN) { 851 pr_err("Emulated NAA Sas Address: %s, exceeds max: %d\n", 852 page, TL_WWN_ADDR_LEN); 853 return -EINVAL; 854 } 855 snprintf(&i_port[0], TL_WWN_ADDR_LEN, "%s", page); 856 857 ptr = strstr(i_port, "naa."); 858 if (ptr) { 859 if (tl_hba->tl_proto_id != SCSI_PROTOCOL_SAS) { 860 pr_err("Passed SAS Initiator Port %s does not match target port protoid: %s\n", 861 i_port, tcm_loop_dump_proto_id(tl_hba)); 862 return -EINVAL; 863 } 864 port_ptr = &i_port[0]; 865 goto check_newline; 866 } 867 ptr = strstr(i_port, "fc."); 868 if (ptr) { 869 if (tl_hba->tl_proto_id != SCSI_PROTOCOL_FCP) { 870 pr_err("Passed FCP Initiator Port %s does not match target port protoid: %s\n", 871 i_port, tcm_loop_dump_proto_id(tl_hba)); 872 return -EINVAL; 873 } 874 port_ptr = &i_port[3]; /* Skip over "fc." */ 875 goto check_newline; 876 } 877 ptr = strstr(i_port, "iqn."); 878 if (ptr) { 879 if (tl_hba->tl_proto_id != SCSI_PROTOCOL_ISCSI) { 880 pr_err("Passed iSCSI Initiator Port %s does not match target port protoid: %s\n", 881 i_port, tcm_loop_dump_proto_id(tl_hba)); 882 return -EINVAL; 883 } 884 port_ptr = &i_port[0]; 885 goto check_newline; 886 } 887 pr_err("Unable to locate prefix for emulated Initiator Port: %s\n", 888 i_port); 889 return -EINVAL; 890 /* 891 * Clear any trailing newline for the NAA WWN 892 */ 893 check_newline: 894 if (i_port[strlen(i_port)-1] == '\n') 895 i_port[strlen(i_port)-1] = '\0'; 896 897 ret = tcm_loop_make_nexus(tl_tpg, port_ptr); 898 if (ret < 0) 899 return ret; 900 901 return count; 902 } 903 904 static ssize_t tcm_loop_tpg_transport_status_show(struct config_item *item, 905 char *page) 906 { 907 struct se_portal_group *se_tpg = to_tpg(item); 908 struct tcm_loop_tpg *tl_tpg = container_of(se_tpg, 909 struct tcm_loop_tpg, tl_se_tpg); 910 const char *status = NULL; 911 ssize_t ret = -EINVAL; 912 913 switch (tl_tpg->tl_transport_status) { 914 case TCM_TRANSPORT_ONLINE: 915 status = "online"; 916 break; 917 case TCM_TRANSPORT_OFFLINE: 918 status = "offline"; 919 break; 920 default: 921 break; 922 } 923 924 if (status) 925 ret = snprintf(page, PAGE_SIZE, "%s\n", status); 926 927 return ret; 928 } 929 930 static ssize_t tcm_loop_tpg_transport_status_store(struct config_item *item, 931 const char *page, size_t count) 932 { 933 struct se_portal_group *se_tpg = to_tpg(item); 934 struct tcm_loop_tpg *tl_tpg = container_of(se_tpg, 935 struct tcm_loop_tpg, tl_se_tpg); 936 937 if (!strncmp(page, "online", 6)) { 938 tl_tpg->tl_transport_status = TCM_TRANSPORT_ONLINE; 939 return count; 940 } 941 if (!strncmp(page, "offline", 7)) { 942 tl_tpg->tl_transport_status = TCM_TRANSPORT_OFFLINE; 943 if (tl_tpg->tl_nexus) { 944 struct se_session *tl_sess = tl_tpg->tl_nexus->se_sess; 945 946 core_allocate_nexus_loss_ua(tl_sess->se_node_acl); 947 } 948 return count; 949 } 950 return -EINVAL; 951 } 952 953 static ssize_t tcm_loop_tpg_address_show(struct config_item *item, 954 char *page) 955 { 956 struct se_portal_group *se_tpg = to_tpg(item); 957 struct tcm_loop_tpg *tl_tpg = container_of(se_tpg, 958 struct tcm_loop_tpg, tl_se_tpg); 959 struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba; 960 961 return snprintf(page, PAGE_SIZE, "%d:0:%d\n", 962 tl_hba->sh->host_no, tl_tpg->tl_tpgt); 963 } 964 965 CONFIGFS_ATTR(tcm_loop_tpg_, nexus); 966 CONFIGFS_ATTR(tcm_loop_tpg_, transport_status); 967 CONFIGFS_ATTR_RO(tcm_loop_tpg_, address); 968 969 static struct configfs_attribute *tcm_loop_tpg_attrs[] = { 970 &tcm_loop_tpg_attr_nexus, 971 &tcm_loop_tpg_attr_transport_status, 972 &tcm_loop_tpg_attr_address, 973 NULL, 974 }; 975 976 /* Start items for tcm_loop_naa_cit */ 977 978 static struct se_portal_group *tcm_loop_make_naa_tpg(struct se_wwn *wwn, 979 const char *name) 980 { 981 struct tcm_loop_hba *tl_hba = container_of(wwn, 982 struct tcm_loop_hba, tl_hba_wwn); 983 struct tcm_loop_tpg *tl_tpg; 984 int ret; 985 unsigned long tpgt; 986 987 if (strstr(name, "tpgt_") != name) { 988 pr_err("Unable to locate \"tpgt_#\" directory group\n"); 989 return ERR_PTR(-EINVAL); 990 } 991 if (kstrtoul(name+5, 10, &tpgt)) 992 return ERR_PTR(-EINVAL); 993 994 if (tpgt >= TL_TPGS_PER_HBA) { 995 pr_err("Passed tpgt: %lu exceeds TL_TPGS_PER_HBA: %u\n", 996 tpgt, TL_TPGS_PER_HBA); 997 return ERR_PTR(-EINVAL); 998 } 999 tl_tpg = &tl_hba->tl_hba_tpgs[tpgt]; 1000 tl_tpg->tl_hba = tl_hba; 1001 tl_tpg->tl_tpgt = tpgt; 1002 /* 1003 * Register the tl_tpg as a emulated TCM Target Endpoint 1004 */ 1005 ret = core_tpg_register(wwn, &tl_tpg->tl_se_tpg, tl_hba->tl_proto_id); 1006 if (ret < 0) 1007 return ERR_PTR(-ENOMEM); 1008 1009 pr_debug("TCM_Loop_ConfigFS: Allocated Emulated %s Target Port %s,t,0x%04lx\n", 1010 tcm_loop_dump_proto_id(tl_hba), 1011 config_item_name(&wwn->wwn_group.cg_item), tpgt); 1012 return &tl_tpg->tl_se_tpg; 1013 } 1014 1015 static void tcm_loop_drop_naa_tpg( 1016 struct se_portal_group *se_tpg) 1017 { 1018 struct se_wwn *wwn = se_tpg->se_tpg_wwn; 1019 struct tcm_loop_tpg *tl_tpg = container_of(se_tpg, 1020 struct tcm_loop_tpg, tl_se_tpg); 1021 struct tcm_loop_hba *tl_hba; 1022 unsigned short tpgt; 1023 1024 tl_hba = tl_tpg->tl_hba; 1025 tpgt = tl_tpg->tl_tpgt; 1026 /* 1027 * Release the I_T Nexus for the Virtual target link if present 1028 */ 1029 tcm_loop_drop_nexus(tl_tpg); 1030 /* 1031 * Deregister the tl_tpg as a emulated TCM Target Endpoint 1032 */ 1033 core_tpg_deregister(se_tpg); 1034 1035 tl_tpg->tl_hba = NULL; 1036 tl_tpg->tl_tpgt = 0; 1037 1038 pr_debug("TCM_Loop_ConfigFS: Deallocated Emulated %s Target Port %s,t,0x%04x\n", 1039 tcm_loop_dump_proto_id(tl_hba), 1040 config_item_name(&wwn->wwn_group.cg_item), tpgt); 1041 } 1042 1043 /* End items for tcm_loop_naa_cit */ 1044 1045 /* Start items for tcm_loop_cit */ 1046 1047 static struct se_wwn *tcm_loop_make_scsi_hba( 1048 struct target_fabric_configfs *tf, 1049 struct config_group *group, 1050 const char *name) 1051 { 1052 struct tcm_loop_hba *tl_hba; 1053 struct Scsi_Host *sh; 1054 char *ptr; 1055 int ret, off = 0; 1056 1057 tl_hba = kzalloc(sizeof(*tl_hba), GFP_KERNEL); 1058 if (!tl_hba) 1059 return ERR_PTR(-ENOMEM); 1060 1061 /* 1062 * Determine the emulated Protocol Identifier and Target Port Name 1063 * based on the incoming configfs directory name. 1064 */ 1065 ptr = strstr(name, "naa."); 1066 if (ptr) { 1067 tl_hba->tl_proto_id = SCSI_PROTOCOL_SAS; 1068 goto check_len; 1069 } 1070 ptr = strstr(name, "fc."); 1071 if (ptr) { 1072 tl_hba->tl_proto_id = SCSI_PROTOCOL_FCP; 1073 off = 3; /* Skip over "fc." */ 1074 goto check_len; 1075 } 1076 ptr = strstr(name, "iqn."); 1077 if (!ptr) { 1078 pr_err("Unable to locate prefix for emulated Target Port: %s\n", 1079 name); 1080 ret = -EINVAL; 1081 goto out; 1082 } 1083 tl_hba->tl_proto_id = SCSI_PROTOCOL_ISCSI; 1084 1085 check_len: 1086 if (strlen(name) >= TL_WWN_ADDR_LEN) { 1087 pr_err("Emulated NAA %s Address: %s, exceeds max: %d\n", 1088 name, tcm_loop_dump_proto_id(tl_hba), TL_WWN_ADDR_LEN); 1089 ret = -EINVAL; 1090 goto out; 1091 } 1092 snprintf(&tl_hba->tl_wwn_address[0], TL_WWN_ADDR_LEN, "%s", &name[off]); 1093 1094 /* 1095 * Call device_register(tl_hba->dev) to register the emulated 1096 * Linux/SCSI LLD of type struct Scsi_Host at tl_hba->sh after 1097 * device_register() callbacks in tcm_loop_driver_probe() 1098 */ 1099 ret = tcm_loop_setup_hba_bus(tl_hba, tcm_loop_hba_no_cnt); 1100 if (ret) 1101 goto out; 1102 1103 sh = tl_hba->sh; 1104 tcm_loop_hba_no_cnt++; 1105 pr_debug("TCM_Loop_ConfigFS: Allocated emulated Target %s Address: %s at Linux/SCSI Host ID: %d\n", 1106 tcm_loop_dump_proto_id(tl_hba), name, sh->host_no); 1107 return &tl_hba->tl_hba_wwn; 1108 out: 1109 kfree(tl_hba); 1110 return ERR_PTR(ret); 1111 } 1112 1113 static void tcm_loop_drop_scsi_hba( 1114 struct se_wwn *wwn) 1115 { 1116 struct tcm_loop_hba *tl_hba = container_of(wwn, 1117 struct tcm_loop_hba, tl_hba_wwn); 1118 1119 pr_debug("TCM_Loop_ConfigFS: Deallocating emulated Target %s Address: %s at Linux/SCSI Host ID: %d\n", 1120 tcm_loop_dump_proto_id(tl_hba), tl_hba->tl_wwn_address, 1121 tl_hba->sh->host_no); 1122 /* 1123 * Call device_unregister() on the original tl_hba->dev. 1124 * tcm_loop_fabric_scsi.c:tcm_loop_release_adapter() will 1125 * release *tl_hba; 1126 */ 1127 device_unregister(&tl_hba->dev); 1128 } 1129 1130 /* Start items for tcm_loop_cit */ 1131 static ssize_t tcm_loop_wwn_version_show(struct config_item *item, char *page) 1132 { 1133 return sprintf(page, "TCM Loopback Fabric module %s\n", TCM_LOOP_VERSION); 1134 } 1135 1136 CONFIGFS_ATTR_RO(tcm_loop_wwn_, version); 1137 1138 static struct configfs_attribute *tcm_loop_wwn_attrs[] = { 1139 &tcm_loop_wwn_attr_version, 1140 NULL, 1141 }; 1142 1143 /* End items for tcm_loop_cit */ 1144 1145 static const struct target_core_fabric_ops loop_ops = { 1146 .module = THIS_MODULE, 1147 .fabric_name = "loopback", 1148 .tpg_get_wwn = tcm_loop_get_endpoint_wwn, 1149 .tpg_get_tag = tcm_loop_get_tag, 1150 .tpg_check_demo_mode = tcm_loop_check_demo_mode, 1151 .tpg_check_demo_mode_cache = tcm_loop_check_demo_mode_cache, 1152 .tpg_check_demo_mode_write_protect = 1153 tcm_loop_check_demo_mode_write_protect, 1154 .tpg_check_prod_mode_write_protect = 1155 tcm_loop_check_prod_mode_write_protect, 1156 .tpg_check_prot_fabric_only = tcm_loop_check_prot_fabric_only, 1157 .tpg_get_inst_index = tcm_loop_get_inst_index, 1158 .check_stop_free = tcm_loop_check_stop_free, 1159 .release_cmd = tcm_loop_release_cmd, 1160 .sess_get_index = tcm_loop_sess_get_index, 1161 .write_pending = tcm_loop_write_pending, 1162 .write_pending_status = tcm_loop_write_pending_status, 1163 .set_default_node_attributes = tcm_loop_set_default_node_attributes, 1164 .get_cmd_state = tcm_loop_get_cmd_state, 1165 .queue_data_in = tcm_loop_queue_data_in, 1166 .queue_status = tcm_loop_queue_status, 1167 .queue_tm_rsp = tcm_loop_queue_tm_rsp, 1168 .aborted_task = tcm_loop_aborted_task, 1169 .fabric_make_wwn = tcm_loop_make_scsi_hba, 1170 .fabric_drop_wwn = tcm_loop_drop_scsi_hba, 1171 .fabric_make_tpg = tcm_loop_make_naa_tpg, 1172 .fabric_drop_tpg = tcm_loop_drop_naa_tpg, 1173 .fabric_post_link = tcm_loop_port_link, 1174 .fabric_pre_unlink = tcm_loop_port_unlink, 1175 .tfc_wwn_attrs = tcm_loop_wwn_attrs, 1176 .tfc_tpg_base_attrs = tcm_loop_tpg_attrs, 1177 .tfc_tpg_attrib_attrs = tcm_loop_tpg_attrib_attrs, 1178 }; 1179 1180 static int __init tcm_loop_fabric_init(void) 1181 { 1182 int ret = -ENOMEM; 1183 1184 tcm_loop_workqueue = alloc_workqueue("tcm_loop", 0, 0); 1185 if (!tcm_loop_workqueue) 1186 goto out; 1187 1188 tcm_loop_cmd_cache = kmem_cache_create("tcm_loop_cmd_cache", 1189 sizeof(struct tcm_loop_cmd), 1190 __alignof__(struct tcm_loop_cmd), 1191 0, NULL); 1192 if (!tcm_loop_cmd_cache) { 1193 pr_debug("kmem_cache_create() for tcm_loop_cmd_cache failed\n"); 1194 goto out_destroy_workqueue; 1195 } 1196 1197 ret = tcm_loop_alloc_core_bus(); 1198 if (ret) 1199 goto out_destroy_cache; 1200 1201 ret = target_register_template(&loop_ops); 1202 if (ret) 1203 goto out_release_core_bus; 1204 1205 return 0; 1206 1207 out_release_core_bus: 1208 tcm_loop_release_core_bus(); 1209 out_destroy_cache: 1210 kmem_cache_destroy(tcm_loop_cmd_cache); 1211 out_destroy_workqueue: 1212 destroy_workqueue(tcm_loop_workqueue); 1213 out: 1214 return ret; 1215 } 1216 1217 static void __exit tcm_loop_fabric_exit(void) 1218 { 1219 target_unregister_template(&loop_ops); 1220 tcm_loop_release_core_bus(); 1221 kmem_cache_destroy(tcm_loop_cmd_cache); 1222 destroy_workqueue(tcm_loop_workqueue); 1223 } 1224 1225 MODULE_DESCRIPTION("TCM loopback virtual Linux/SCSI fabric module"); 1226 MODULE_AUTHOR("Nicholas A. Bellinger <nab@risingtidesystems.com>"); 1227 MODULE_LICENSE("GPL"); 1228 module_init(tcm_loop_fabric_init); 1229 module_exit(tcm_loop_fabric_exit); 1230