1 /******************************************************************************* 2 * 3 * This file contains the Linux/SCSI LLD virtual SCSI initiator driver 4 * for emulated SAS initiator ports 5 * 6 * © Copyright 2011-2013 Datera, Inc. 7 * 8 * Licensed to the Linux Foundation under the General Public License (GPL) version 2. 9 * 10 * Author: Nicholas A. Bellinger <nab@risingtidesystems.com> 11 * 12 * This program is free software; you can redistribute it and/or modify 13 * it under the terms of the GNU General Public License as published by 14 * the Free Software Foundation; either version 2 of the License, or 15 * (at your option) any later version. 16 * 17 * This program is distributed in the hope that it will be useful, 18 * but WITHOUT ANY WARRANTY; without even the implied warranty of 19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 20 * GNU General Public License for more details. 21 ****************************************************************************/ 22 23 #include <linux/module.h> 24 #include <linux/moduleparam.h> 25 #include <linux/init.h> 26 #include <linux/slab.h> 27 #include <linux/types.h> 28 #include <linux/configfs.h> 29 #include <scsi/scsi.h> 30 #include <scsi/scsi_tcq.h> 31 #include <scsi/scsi_host.h> 32 #include <scsi/scsi_device.h> 33 #include <scsi/scsi_cmnd.h> 34 35 #include <target/target_core_base.h> 36 #include <target/target_core_fabric.h> 37 38 #include "tcm_loop.h" 39 40 #define to_tcm_loop_hba(hba) container_of(hba, struct tcm_loop_hba, dev) 41 42 static struct workqueue_struct *tcm_loop_workqueue; 43 static struct kmem_cache *tcm_loop_cmd_cache; 44 45 static int tcm_loop_hba_no_cnt; 46 47 static int tcm_loop_queue_status(struct se_cmd *se_cmd); 48 49 /* 50 * Called from struct target_core_fabric_ops->check_stop_free() 51 */ 52 static int tcm_loop_check_stop_free(struct se_cmd *se_cmd) 53 { 54 return transport_generic_free_cmd(se_cmd, 0); 55 } 56 57 static void tcm_loop_release_cmd(struct se_cmd *se_cmd) 58 { 59 struct tcm_loop_cmd *tl_cmd = container_of(se_cmd, 60 struct tcm_loop_cmd, tl_se_cmd); 61 62 kmem_cache_free(tcm_loop_cmd_cache, tl_cmd); 63 } 64 65 static int tcm_loop_show_info(struct seq_file *m, struct Scsi_Host *host) 66 { 67 seq_printf(m, "tcm_loop_proc_info()\n"); 68 return 0; 69 } 70 71 static int tcm_loop_driver_probe(struct device *); 72 static int tcm_loop_driver_remove(struct device *); 73 74 static int pseudo_lld_bus_match(struct device *dev, 75 struct device_driver *dev_driver) 76 { 77 return 1; 78 } 79 80 static struct bus_type tcm_loop_lld_bus = { 81 .name = "tcm_loop_bus", 82 .match = pseudo_lld_bus_match, 83 .probe = tcm_loop_driver_probe, 84 .remove = tcm_loop_driver_remove, 85 }; 86 87 static struct device_driver tcm_loop_driverfs = { 88 .name = "tcm_loop", 89 .bus = &tcm_loop_lld_bus, 90 }; 91 /* 92 * Used with root_device_register() in tcm_loop_alloc_core_bus() below 93 */ 94 static struct device *tcm_loop_primary; 95 96 static void tcm_loop_submission_work(struct work_struct *work) 97 { 98 struct tcm_loop_cmd *tl_cmd = 99 container_of(work, struct tcm_loop_cmd, work); 100 struct se_cmd *se_cmd = &tl_cmd->tl_se_cmd; 101 struct scsi_cmnd *sc = tl_cmd->sc; 102 struct tcm_loop_nexus *tl_nexus; 103 struct tcm_loop_hba *tl_hba; 104 struct tcm_loop_tpg *tl_tpg; 105 struct scatterlist *sgl_bidi = NULL; 106 u32 sgl_bidi_count = 0, transfer_length; 107 int rc; 108 109 tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host); 110 tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id]; 111 112 /* 113 * Ensure that this tl_tpg reference from the incoming sc->device->id 114 * has already been configured via tcm_loop_make_naa_tpg(). 115 */ 116 if (!tl_tpg->tl_hba) { 117 set_host_byte(sc, DID_NO_CONNECT); 118 goto out_done; 119 } 120 if (tl_tpg->tl_transport_status == TCM_TRANSPORT_OFFLINE) { 121 set_host_byte(sc, DID_TRANSPORT_DISRUPTED); 122 goto out_done; 123 } 124 tl_nexus = tl_tpg->tl_nexus; 125 if (!tl_nexus) { 126 scmd_printk(KERN_ERR, sc, "TCM_Loop I_T Nexus" 127 " does not exist\n"); 128 set_host_byte(sc, DID_ERROR); 129 goto out_done; 130 } 131 if (scsi_bidi_cmnd(sc)) { 132 struct scsi_data_buffer *sdb = scsi_in(sc); 133 134 sgl_bidi = sdb->table.sgl; 135 sgl_bidi_count = sdb->table.nents; 136 se_cmd->se_cmd_flags |= SCF_BIDI; 137 138 } 139 140 transfer_length = scsi_transfer_length(sc); 141 if (!scsi_prot_sg_count(sc) && 142 scsi_get_prot_op(sc) != SCSI_PROT_NORMAL) { 143 se_cmd->prot_pto = true; 144 /* 145 * loopback transport doesn't support 146 * WRITE_GENERATE, READ_STRIP protection 147 * information operations, go ahead unprotected. 148 */ 149 transfer_length = scsi_bufflen(sc); 150 } 151 152 se_cmd->tag = tl_cmd->sc_cmd_tag; 153 rc = target_submit_cmd_map_sgls(se_cmd, tl_nexus->se_sess, sc->cmnd, 154 &tl_cmd->tl_sense_buf[0], tl_cmd->sc->device->lun, 155 transfer_length, TCM_SIMPLE_TAG, 156 sc->sc_data_direction, 0, 157 scsi_sglist(sc), scsi_sg_count(sc), 158 sgl_bidi, sgl_bidi_count, 159 scsi_prot_sglist(sc), scsi_prot_sg_count(sc)); 160 if (rc < 0) { 161 set_host_byte(sc, DID_NO_CONNECT); 162 goto out_done; 163 } 164 return; 165 166 out_done: 167 kmem_cache_free(tcm_loop_cmd_cache, tl_cmd); 168 sc->scsi_done(sc); 169 return; 170 } 171 172 /* 173 * ->queuecommand can be and usually is called from interrupt context, so 174 * defer the actual submission to a workqueue. 175 */ 176 static int tcm_loop_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *sc) 177 { 178 struct tcm_loop_cmd *tl_cmd; 179 180 pr_debug("tcm_loop_queuecommand() %d:%d:%d:%llu got CDB: 0x%02x" 181 " scsi_buf_len: %u\n", sc->device->host->host_no, 182 sc->device->id, sc->device->channel, sc->device->lun, 183 sc->cmnd[0], scsi_bufflen(sc)); 184 185 tl_cmd = kmem_cache_zalloc(tcm_loop_cmd_cache, GFP_ATOMIC); 186 if (!tl_cmd) { 187 pr_err("Unable to allocate struct tcm_loop_cmd\n"); 188 set_host_byte(sc, DID_ERROR); 189 sc->scsi_done(sc); 190 return 0; 191 } 192 193 tl_cmd->sc = sc; 194 tl_cmd->sc_cmd_tag = sc->request->tag; 195 INIT_WORK(&tl_cmd->work, tcm_loop_submission_work); 196 queue_work(tcm_loop_workqueue, &tl_cmd->work); 197 return 0; 198 } 199 200 /* 201 * Called from SCSI EH process context to issue a LUN_RESET TMR 202 * to struct scsi_device 203 */ 204 static int tcm_loop_issue_tmr(struct tcm_loop_tpg *tl_tpg, 205 u64 lun, int task, enum tcm_tmreq_table tmr) 206 { 207 struct se_cmd *se_cmd = NULL; 208 struct se_session *se_sess; 209 struct tcm_loop_nexus *tl_nexus; 210 struct tcm_loop_cmd *tl_cmd = NULL; 211 int ret = TMR_FUNCTION_FAILED, rc; 212 213 /* 214 * Locate the tl_nexus and se_sess pointers 215 */ 216 tl_nexus = tl_tpg->tl_nexus; 217 if (!tl_nexus) { 218 pr_err("Unable to perform device reset without" 219 " active I_T Nexus\n"); 220 return ret; 221 } 222 223 tl_cmd = kmem_cache_zalloc(tcm_loop_cmd_cache, GFP_KERNEL); 224 if (!tl_cmd) { 225 pr_err("Unable to allocate memory for tl_cmd\n"); 226 return ret; 227 } 228 229 init_completion(&tl_cmd->tmr_done); 230 231 se_cmd = &tl_cmd->tl_se_cmd; 232 se_sess = tl_tpg->tl_nexus->se_sess; 233 234 rc = target_submit_tmr(se_cmd, se_sess, tl_cmd->tl_sense_buf, lun, 235 NULL, tmr, GFP_KERNEL, task, 236 TARGET_SCF_ACK_KREF); 237 if (rc < 0) 238 goto release; 239 wait_for_completion(&tl_cmd->tmr_done); 240 ret = se_cmd->se_tmr_req->response; 241 target_put_sess_cmd(se_cmd); 242 243 out: 244 return ret; 245 246 release: 247 if (se_cmd) 248 transport_generic_free_cmd(se_cmd, 0); 249 else 250 kmem_cache_free(tcm_loop_cmd_cache, tl_cmd); 251 goto out; 252 } 253 254 static int tcm_loop_abort_task(struct scsi_cmnd *sc) 255 { 256 struct tcm_loop_hba *tl_hba; 257 struct tcm_loop_tpg *tl_tpg; 258 int ret = FAILED; 259 260 /* 261 * Locate the tcm_loop_hba_t pointer 262 */ 263 tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host); 264 tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id]; 265 ret = tcm_loop_issue_tmr(tl_tpg, sc->device->lun, 266 sc->request->tag, TMR_ABORT_TASK); 267 return (ret == TMR_FUNCTION_COMPLETE) ? SUCCESS : FAILED; 268 } 269 270 /* 271 * Called from SCSI EH process context to issue a LUN_RESET TMR 272 * to struct scsi_device 273 */ 274 static int tcm_loop_device_reset(struct scsi_cmnd *sc) 275 { 276 struct tcm_loop_hba *tl_hba; 277 struct tcm_loop_tpg *tl_tpg; 278 int ret = FAILED; 279 280 /* 281 * Locate the tcm_loop_hba_t pointer 282 */ 283 tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host); 284 tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id]; 285 286 ret = tcm_loop_issue_tmr(tl_tpg, sc->device->lun, 287 0, TMR_LUN_RESET); 288 return (ret == TMR_FUNCTION_COMPLETE) ? SUCCESS : FAILED; 289 } 290 291 static int tcm_loop_target_reset(struct scsi_cmnd *sc) 292 { 293 struct tcm_loop_hba *tl_hba; 294 struct tcm_loop_tpg *tl_tpg; 295 296 /* 297 * Locate the tcm_loop_hba_t pointer 298 */ 299 tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host); 300 if (!tl_hba) { 301 pr_err("Unable to perform device reset without" 302 " active I_T Nexus\n"); 303 return FAILED; 304 } 305 /* 306 * Locate the tl_tpg pointer from TargetID in sc->device->id 307 */ 308 tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id]; 309 if (tl_tpg) { 310 tl_tpg->tl_transport_status = TCM_TRANSPORT_ONLINE; 311 return SUCCESS; 312 } 313 return FAILED; 314 } 315 316 static int tcm_loop_slave_alloc(struct scsi_device *sd) 317 { 318 set_bit(QUEUE_FLAG_BIDI, &sd->request_queue->queue_flags); 319 return 0; 320 } 321 322 static struct scsi_host_template tcm_loop_driver_template = { 323 .show_info = tcm_loop_show_info, 324 .proc_name = "tcm_loopback", 325 .name = "TCM_Loopback", 326 .queuecommand = tcm_loop_queuecommand, 327 .change_queue_depth = scsi_change_queue_depth, 328 .eh_abort_handler = tcm_loop_abort_task, 329 .eh_device_reset_handler = tcm_loop_device_reset, 330 .eh_target_reset_handler = tcm_loop_target_reset, 331 .can_queue = 1024, 332 .this_id = -1, 333 .sg_tablesize = 256, 334 .cmd_per_lun = 1024, 335 .max_sectors = 0xFFFF, 336 .use_clustering = DISABLE_CLUSTERING, 337 .slave_alloc = tcm_loop_slave_alloc, 338 .module = THIS_MODULE, 339 .track_queue_depth = 1, 340 }; 341 342 static int tcm_loop_driver_probe(struct device *dev) 343 { 344 struct tcm_loop_hba *tl_hba; 345 struct Scsi_Host *sh; 346 int error, host_prot; 347 348 tl_hba = to_tcm_loop_hba(dev); 349 350 sh = scsi_host_alloc(&tcm_loop_driver_template, 351 sizeof(struct tcm_loop_hba)); 352 if (!sh) { 353 pr_err("Unable to allocate struct scsi_host\n"); 354 return -ENODEV; 355 } 356 tl_hba->sh = sh; 357 358 /* 359 * Assign the struct tcm_loop_hba pointer to struct Scsi_Host->hostdata 360 */ 361 *((struct tcm_loop_hba **)sh->hostdata) = tl_hba; 362 /* 363 * Setup single ID, Channel and LUN for now.. 364 */ 365 sh->max_id = 2; 366 sh->max_lun = 0; 367 sh->max_channel = 0; 368 sh->max_cmd_len = SCSI_MAX_VARLEN_CDB_SIZE; 369 370 host_prot = SHOST_DIF_TYPE1_PROTECTION | SHOST_DIF_TYPE2_PROTECTION | 371 SHOST_DIF_TYPE3_PROTECTION | SHOST_DIX_TYPE1_PROTECTION | 372 SHOST_DIX_TYPE2_PROTECTION | SHOST_DIX_TYPE3_PROTECTION; 373 374 scsi_host_set_prot(sh, host_prot); 375 scsi_host_set_guard(sh, SHOST_DIX_GUARD_CRC); 376 377 error = scsi_add_host(sh, &tl_hba->dev); 378 if (error) { 379 pr_err("%s: scsi_add_host failed\n", __func__); 380 scsi_host_put(sh); 381 return -ENODEV; 382 } 383 return 0; 384 } 385 386 static int tcm_loop_driver_remove(struct device *dev) 387 { 388 struct tcm_loop_hba *tl_hba; 389 struct Scsi_Host *sh; 390 391 tl_hba = to_tcm_loop_hba(dev); 392 sh = tl_hba->sh; 393 394 scsi_remove_host(sh); 395 scsi_host_put(sh); 396 return 0; 397 } 398 399 static void tcm_loop_release_adapter(struct device *dev) 400 { 401 struct tcm_loop_hba *tl_hba = to_tcm_loop_hba(dev); 402 403 kfree(tl_hba); 404 } 405 406 /* 407 * Called from tcm_loop_make_scsi_hba() in tcm_loop_configfs.c 408 */ 409 static int tcm_loop_setup_hba_bus(struct tcm_loop_hba *tl_hba, int tcm_loop_host_id) 410 { 411 int ret; 412 413 tl_hba->dev.bus = &tcm_loop_lld_bus; 414 tl_hba->dev.parent = tcm_loop_primary; 415 tl_hba->dev.release = &tcm_loop_release_adapter; 416 dev_set_name(&tl_hba->dev, "tcm_loop_adapter_%d", tcm_loop_host_id); 417 418 ret = device_register(&tl_hba->dev); 419 if (ret) { 420 pr_err("device_register() failed for" 421 " tl_hba->dev: %d\n", ret); 422 return -ENODEV; 423 } 424 425 return 0; 426 } 427 428 /* 429 * Called from tcm_loop_fabric_init() in tcl_loop_fabric.c to load the emulated 430 * tcm_loop SCSI bus. 431 */ 432 static int tcm_loop_alloc_core_bus(void) 433 { 434 int ret; 435 436 tcm_loop_primary = root_device_register("tcm_loop_0"); 437 if (IS_ERR(tcm_loop_primary)) { 438 pr_err("Unable to allocate tcm_loop_primary\n"); 439 return PTR_ERR(tcm_loop_primary); 440 } 441 442 ret = bus_register(&tcm_loop_lld_bus); 443 if (ret) { 444 pr_err("bus_register() failed for tcm_loop_lld_bus\n"); 445 goto dev_unreg; 446 } 447 448 ret = driver_register(&tcm_loop_driverfs); 449 if (ret) { 450 pr_err("driver_register() failed for" 451 "tcm_loop_driverfs\n"); 452 goto bus_unreg; 453 } 454 455 pr_debug("Initialized TCM Loop Core Bus\n"); 456 return ret; 457 458 bus_unreg: 459 bus_unregister(&tcm_loop_lld_bus); 460 dev_unreg: 461 root_device_unregister(tcm_loop_primary); 462 return ret; 463 } 464 465 static void tcm_loop_release_core_bus(void) 466 { 467 driver_unregister(&tcm_loop_driverfs); 468 bus_unregister(&tcm_loop_lld_bus); 469 root_device_unregister(tcm_loop_primary); 470 471 pr_debug("Releasing TCM Loop Core BUS\n"); 472 } 473 474 static char *tcm_loop_get_fabric_name(void) 475 { 476 return "loopback"; 477 } 478 479 static inline struct tcm_loop_tpg *tl_tpg(struct se_portal_group *se_tpg) 480 { 481 return container_of(se_tpg, struct tcm_loop_tpg, tl_se_tpg); 482 } 483 484 static char *tcm_loop_get_endpoint_wwn(struct se_portal_group *se_tpg) 485 { 486 /* 487 * Return the passed NAA identifier for the Target Port 488 */ 489 return &tl_tpg(se_tpg)->tl_hba->tl_wwn_address[0]; 490 } 491 492 static u16 tcm_loop_get_tag(struct se_portal_group *se_tpg) 493 { 494 /* 495 * This Tag is used when forming SCSI Name identifier in EVPD=1 0x83 496 * to represent the SCSI Target Port. 497 */ 498 return tl_tpg(se_tpg)->tl_tpgt; 499 } 500 501 /* 502 * Returning (1) here allows for target_core_mod struct se_node_acl to be generated 503 * based upon the incoming fabric dependent SCSI Initiator Port 504 */ 505 static int tcm_loop_check_demo_mode(struct se_portal_group *se_tpg) 506 { 507 return 1; 508 } 509 510 static int tcm_loop_check_demo_mode_cache(struct se_portal_group *se_tpg) 511 { 512 return 0; 513 } 514 515 /* 516 * Allow I_T Nexus full READ-WRITE access without explict Initiator Node ACLs for 517 * local virtual Linux/SCSI LLD passthrough into VM hypervisor guest 518 */ 519 static int tcm_loop_check_demo_mode_write_protect(struct se_portal_group *se_tpg) 520 { 521 return 0; 522 } 523 524 /* 525 * Because TCM_Loop does not use explict ACLs and MappedLUNs, this will 526 * never be called for TCM_Loop by target_core_fabric_configfs.c code. 527 * It has been added here as a nop for target_fabric_tf_ops_check() 528 */ 529 static int tcm_loop_check_prod_mode_write_protect(struct se_portal_group *se_tpg) 530 { 531 return 0; 532 } 533 534 static int tcm_loop_check_prot_fabric_only(struct se_portal_group *se_tpg) 535 { 536 struct tcm_loop_tpg *tl_tpg = container_of(se_tpg, struct tcm_loop_tpg, 537 tl_se_tpg); 538 return tl_tpg->tl_fabric_prot_type; 539 } 540 541 static u32 tcm_loop_get_inst_index(struct se_portal_group *se_tpg) 542 { 543 return 1; 544 } 545 546 static u32 tcm_loop_sess_get_index(struct se_session *se_sess) 547 { 548 return 1; 549 } 550 551 static void tcm_loop_set_default_node_attributes(struct se_node_acl *se_acl) 552 { 553 return; 554 } 555 556 static int tcm_loop_get_cmd_state(struct se_cmd *se_cmd) 557 { 558 struct tcm_loop_cmd *tl_cmd = container_of(se_cmd, 559 struct tcm_loop_cmd, tl_se_cmd); 560 561 return tl_cmd->sc_cmd_state; 562 } 563 564 static int tcm_loop_write_pending(struct se_cmd *se_cmd) 565 { 566 /* 567 * Since Linux/SCSI has already sent down a struct scsi_cmnd 568 * sc->sc_data_direction of DMA_TO_DEVICE with struct scatterlist array 569 * memory, and memory has already been mapped to struct se_cmd->t_mem_list 570 * format with transport_generic_map_mem_to_cmd(). 571 * 572 * We now tell TCM to add this WRITE CDB directly into the TCM storage 573 * object execution queue. 574 */ 575 target_execute_cmd(se_cmd); 576 return 0; 577 } 578 579 static int tcm_loop_write_pending_status(struct se_cmd *se_cmd) 580 { 581 return 0; 582 } 583 584 static int tcm_loop_queue_data_in(struct se_cmd *se_cmd) 585 { 586 struct tcm_loop_cmd *tl_cmd = container_of(se_cmd, 587 struct tcm_loop_cmd, tl_se_cmd); 588 struct scsi_cmnd *sc = tl_cmd->sc; 589 590 pr_debug("tcm_loop_queue_data_in() called for scsi_cmnd: %p" 591 " cdb: 0x%02x\n", sc, sc->cmnd[0]); 592 593 sc->result = SAM_STAT_GOOD; 594 set_host_byte(sc, DID_OK); 595 if ((se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) || 596 (se_cmd->se_cmd_flags & SCF_UNDERFLOW_BIT)) 597 scsi_set_resid(sc, se_cmd->residual_count); 598 sc->scsi_done(sc); 599 return 0; 600 } 601 602 static int tcm_loop_queue_status(struct se_cmd *se_cmd) 603 { 604 struct tcm_loop_cmd *tl_cmd = container_of(se_cmd, 605 struct tcm_loop_cmd, tl_se_cmd); 606 struct scsi_cmnd *sc = tl_cmd->sc; 607 608 pr_debug("tcm_loop_queue_status() called for scsi_cmnd: %p" 609 " cdb: 0x%02x\n", sc, sc->cmnd[0]); 610 611 if (se_cmd->sense_buffer && 612 ((se_cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) || 613 (se_cmd->se_cmd_flags & SCF_EMULATED_TASK_SENSE))) { 614 615 memcpy(sc->sense_buffer, se_cmd->sense_buffer, 616 SCSI_SENSE_BUFFERSIZE); 617 sc->result = SAM_STAT_CHECK_CONDITION; 618 set_driver_byte(sc, DRIVER_SENSE); 619 } else 620 sc->result = se_cmd->scsi_status; 621 622 set_host_byte(sc, DID_OK); 623 if ((se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) || 624 (se_cmd->se_cmd_flags & SCF_UNDERFLOW_BIT)) 625 scsi_set_resid(sc, se_cmd->residual_count); 626 sc->scsi_done(sc); 627 return 0; 628 } 629 630 static void tcm_loop_queue_tm_rsp(struct se_cmd *se_cmd) 631 { 632 struct tcm_loop_cmd *tl_cmd = container_of(se_cmd, 633 struct tcm_loop_cmd, tl_se_cmd); 634 635 /* Wake up tcm_loop_issue_tmr(). */ 636 complete(&tl_cmd->tmr_done); 637 } 638 639 static void tcm_loop_aborted_task(struct se_cmd *se_cmd) 640 { 641 return; 642 } 643 644 static char *tcm_loop_dump_proto_id(struct tcm_loop_hba *tl_hba) 645 { 646 switch (tl_hba->tl_proto_id) { 647 case SCSI_PROTOCOL_SAS: 648 return "SAS"; 649 case SCSI_PROTOCOL_FCP: 650 return "FCP"; 651 case SCSI_PROTOCOL_ISCSI: 652 return "iSCSI"; 653 default: 654 break; 655 } 656 657 return "Unknown"; 658 } 659 660 /* Start items for tcm_loop_port_cit */ 661 662 static int tcm_loop_port_link( 663 struct se_portal_group *se_tpg, 664 struct se_lun *lun) 665 { 666 struct tcm_loop_tpg *tl_tpg = container_of(se_tpg, 667 struct tcm_loop_tpg, tl_se_tpg); 668 struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba; 669 670 atomic_inc_mb(&tl_tpg->tl_tpg_port_count); 671 /* 672 * Add Linux/SCSI struct scsi_device by HCTL 673 */ 674 scsi_add_device(tl_hba->sh, 0, tl_tpg->tl_tpgt, lun->unpacked_lun); 675 676 pr_debug("TCM_Loop_ConfigFS: Port Link Successful\n"); 677 return 0; 678 } 679 680 static void tcm_loop_port_unlink( 681 struct se_portal_group *se_tpg, 682 struct se_lun *se_lun) 683 { 684 struct scsi_device *sd; 685 struct tcm_loop_hba *tl_hba; 686 struct tcm_loop_tpg *tl_tpg; 687 688 tl_tpg = container_of(se_tpg, struct tcm_loop_tpg, tl_se_tpg); 689 tl_hba = tl_tpg->tl_hba; 690 691 sd = scsi_device_lookup(tl_hba->sh, 0, tl_tpg->tl_tpgt, 692 se_lun->unpacked_lun); 693 if (!sd) { 694 pr_err("Unable to locate struct scsi_device for %d:%d:" 695 "%llu\n", 0, tl_tpg->tl_tpgt, se_lun->unpacked_lun); 696 return; 697 } 698 /* 699 * Remove Linux/SCSI struct scsi_device by HCTL 700 */ 701 scsi_remove_device(sd); 702 scsi_device_put(sd); 703 704 atomic_dec_mb(&tl_tpg->tl_tpg_port_count); 705 706 pr_debug("TCM_Loop_ConfigFS: Port Unlink Successful\n"); 707 } 708 709 /* End items for tcm_loop_port_cit */ 710 711 static ssize_t tcm_loop_tpg_attrib_fabric_prot_type_show( 712 struct config_item *item, char *page) 713 { 714 struct se_portal_group *se_tpg = attrib_to_tpg(item); 715 struct tcm_loop_tpg *tl_tpg = container_of(se_tpg, struct tcm_loop_tpg, 716 tl_se_tpg); 717 718 return sprintf(page, "%d\n", tl_tpg->tl_fabric_prot_type); 719 } 720 721 static ssize_t tcm_loop_tpg_attrib_fabric_prot_type_store( 722 struct config_item *item, const char *page, size_t count) 723 { 724 struct se_portal_group *se_tpg = attrib_to_tpg(item); 725 struct tcm_loop_tpg *tl_tpg = container_of(se_tpg, struct tcm_loop_tpg, 726 tl_se_tpg); 727 unsigned long val; 728 int ret = kstrtoul(page, 0, &val); 729 730 if (ret) { 731 pr_err("kstrtoul() returned %d for fabric_prot_type\n", ret); 732 return ret; 733 } 734 if (val != 0 && val != 1 && val != 3) { 735 pr_err("Invalid qla2xxx fabric_prot_type: %lu\n", val); 736 return -EINVAL; 737 } 738 tl_tpg->tl_fabric_prot_type = val; 739 740 return count; 741 } 742 743 CONFIGFS_ATTR(tcm_loop_tpg_attrib_, fabric_prot_type); 744 745 static struct configfs_attribute *tcm_loop_tpg_attrib_attrs[] = { 746 &tcm_loop_tpg_attrib_attr_fabric_prot_type, 747 NULL, 748 }; 749 750 /* Start items for tcm_loop_nexus_cit */ 751 752 static int tcm_loop_alloc_sess_cb(struct se_portal_group *se_tpg, 753 struct se_session *se_sess, void *p) 754 { 755 struct tcm_loop_tpg *tl_tpg = container_of(se_tpg, 756 struct tcm_loop_tpg, tl_se_tpg); 757 758 tl_tpg->tl_nexus = p; 759 return 0; 760 } 761 762 static int tcm_loop_make_nexus( 763 struct tcm_loop_tpg *tl_tpg, 764 const char *name) 765 { 766 struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba; 767 struct tcm_loop_nexus *tl_nexus; 768 int ret; 769 770 if (tl_tpg->tl_nexus) { 771 pr_debug("tl_tpg->tl_nexus already exists\n"); 772 return -EEXIST; 773 } 774 775 tl_nexus = kzalloc(sizeof(struct tcm_loop_nexus), GFP_KERNEL); 776 if (!tl_nexus) { 777 pr_err("Unable to allocate struct tcm_loop_nexus\n"); 778 return -ENOMEM; 779 } 780 781 tl_nexus->se_sess = target_alloc_session(&tl_tpg->tl_se_tpg, 0, 0, 782 TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS, 783 name, tl_nexus, tcm_loop_alloc_sess_cb); 784 if (IS_ERR(tl_nexus->se_sess)) { 785 ret = PTR_ERR(tl_nexus->se_sess); 786 kfree(tl_nexus); 787 return ret; 788 } 789 790 pr_debug("TCM_Loop_ConfigFS: Established I_T Nexus to emulated" 791 " %s Initiator Port: %s\n", tcm_loop_dump_proto_id(tl_hba), 792 name); 793 return 0; 794 } 795 796 static int tcm_loop_drop_nexus( 797 struct tcm_loop_tpg *tpg) 798 { 799 struct se_session *se_sess; 800 struct tcm_loop_nexus *tl_nexus; 801 802 tl_nexus = tpg->tl_nexus; 803 if (!tl_nexus) 804 return -ENODEV; 805 806 se_sess = tl_nexus->se_sess; 807 if (!se_sess) 808 return -ENODEV; 809 810 if (atomic_read(&tpg->tl_tpg_port_count)) { 811 pr_err("Unable to remove TCM_Loop I_T Nexus with" 812 " active TPG port count: %d\n", 813 atomic_read(&tpg->tl_tpg_port_count)); 814 return -EPERM; 815 } 816 817 pr_debug("TCM_Loop_ConfigFS: Removing I_T Nexus to emulated" 818 " %s Initiator Port: %s\n", tcm_loop_dump_proto_id(tpg->tl_hba), 819 tl_nexus->se_sess->se_node_acl->initiatorname); 820 /* 821 * Release the SCSI I_T Nexus to the emulated Target Port 822 */ 823 transport_deregister_session(tl_nexus->se_sess); 824 tpg->tl_nexus = NULL; 825 kfree(tl_nexus); 826 return 0; 827 } 828 829 /* End items for tcm_loop_nexus_cit */ 830 831 static ssize_t tcm_loop_tpg_nexus_show(struct config_item *item, char *page) 832 { 833 struct se_portal_group *se_tpg = to_tpg(item); 834 struct tcm_loop_tpg *tl_tpg = container_of(se_tpg, 835 struct tcm_loop_tpg, tl_se_tpg); 836 struct tcm_loop_nexus *tl_nexus; 837 ssize_t ret; 838 839 tl_nexus = tl_tpg->tl_nexus; 840 if (!tl_nexus) 841 return -ENODEV; 842 843 ret = snprintf(page, PAGE_SIZE, "%s\n", 844 tl_nexus->se_sess->se_node_acl->initiatorname); 845 846 return ret; 847 } 848 849 static ssize_t tcm_loop_tpg_nexus_store(struct config_item *item, 850 const char *page, size_t count) 851 { 852 struct se_portal_group *se_tpg = to_tpg(item); 853 struct tcm_loop_tpg *tl_tpg = container_of(se_tpg, 854 struct tcm_loop_tpg, tl_se_tpg); 855 struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba; 856 unsigned char i_port[TL_WWN_ADDR_LEN], *ptr, *port_ptr; 857 int ret; 858 /* 859 * Shutdown the active I_T nexus if 'NULL' is passed.. 860 */ 861 if (!strncmp(page, "NULL", 4)) { 862 ret = tcm_loop_drop_nexus(tl_tpg); 863 return (!ret) ? count : ret; 864 } 865 /* 866 * Otherwise make sure the passed virtual Initiator port WWN matches 867 * the fabric protocol_id set in tcm_loop_make_scsi_hba(), and call 868 * tcm_loop_make_nexus() 869 */ 870 if (strlen(page) >= TL_WWN_ADDR_LEN) { 871 pr_err("Emulated NAA Sas Address: %s, exceeds" 872 " max: %d\n", page, TL_WWN_ADDR_LEN); 873 return -EINVAL; 874 } 875 snprintf(&i_port[0], TL_WWN_ADDR_LEN, "%s", page); 876 877 ptr = strstr(i_port, "naa."); 878 if (ptr) { 879 if (tl_hba->tl_proto_id != SCSI_PROTOCOL_SAS) { 880 pr_err("Passed SAS Initiator Port %s does not" 881 " match target port protoid: %s\n", i_port, 882 tcm_loop_dump_proto_id(tl_hba)); 883 return -EINVAL; 884 } 885 port_ptr = &i_port[0]; 886 goto check_newline; 887 } 888 ptr = strstr(i_port, "fc."); 889 if (ptr) { 890 if (tl_hba->tl_proto_id != SCSI_PROTOCOL_FCP) { 891 pr_err("Passed FCP Initiator Port %s does not" 892 " match target port protoid: %s\n", i_port, 893 tcm_loop_dump_proto_id(tl_hba)); 894 return -EINVAL; 895 } 896 port_ptr = &i_port[3]; /* Skip over "fc." */ 897 goto check_newline; 898 } 899 ptr = strstr(i_port, "iqn."); 900 if (ptr) { 901 if (tl_hba->tl_proto_id != SCSI_PROTOCOL_ISCSI) { 902 pr_err("Passed iSCSI Initiator Port %s does not" 903 " match target port protoid: %s\n", i_port, 904 tcm_loop_dump_proto_id(tl_hba)); 905 return -EINVAL; 906 } 907 port_ptr = &i_port[0]; 908 goto check_newline; 909 } 910 pr_err("Unable to locate prefix for emulated Initiator Port:" 911 " %s\n", i_port); 912 return -EINVAL; 913 /* 914 * Clear any trailing newline for the NAA WWN 915 */ 916 check_newline: 917 if (i_port[strlen(i_port)-1] == '\n') 918 i_port[strlen(i_port)-1] = '\0'; 919 920 ret = tcm_loop_make_nexus(tl_tpg, port_ptr); 921 if (ret < 0) 922 return ret; 923 924 return count; 925 } 926 927 static ssize_t tcm_loop_tpg_transport_status_show(struct config_item *item, 928 char *page) 929 { 930 struct se_portal_group *se_tpg = to_tpg(item); 931 struct tcm_loop_tpg *tl_tpg = container_of(se_tpg, 932 struct tcm_loop_tpg, tl_se_tpg); 933 const char *status = NULL; 934 ssize_t ret = -EINVAL; 935 936 switch (tl_tpg->tl_transport_status) { 937 case TCM_TRANSPORT_ONLINE: 938 status = "online"; 939 break; 940 case TCM_TRANSPORT_OFFLINE: 941 status = "offline"; 942 break; 943 default: 944 break; 945 } 946 947 if (status) 948 ret = snprintf(page, PAGE_SIZE, "%s\n", status); 949 950 return ret; 951 } 952 953 static ssize_t tcm_loop_tpg_transport_status_store(struct config_item *item, 954 const char *page, size_t count) 955 { 956 struct se_portal_group *se_tpg = to_tpg(item); 957 struct tcm_loop_tpg *tl_tpg = container_of(se_tpg, 958 struct tcm_loop_tpg, tl_se_tpg); 959 960 if (!strncmp(page, "online", 6)) { 961 tl_tpg->tl_transport_status = TCM_TRANSPORT_ONLINE; 962 return count; 963 } 964 if (!strncmp(page, "offline", 7)) { 965 tl_tpg->tl_transport_status = TCM_TRANSPORT_OFFLINE; 966 if (tl_tpg->tl_nexus) { 967 struct se_session *tl_sess = tl_tpg->tl_nexus->se_sess; 968 969 core_allocate_nexus_loss_ua(tl_sess->se_node_acl); 970 } 971 return count; 972 } 973 return -EINVAL; 974 } 975 976 static ssize_t tcm_loop_tpg_address_show(struct config_item *item, 977 char *page) 978 { 979 struct se_portal_group *se_tpg = to_tpg(item); 980 struct tcm_loop_tpg *tl_tpg = container_of(se_tpg, 981 struct tcm_loop_tpg, tl_se_tpg); 982 struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba; 983 984 return snprintf(page, PAGE_SIZE, "%d:0:%d\n", 985 tl_hba->sh->host_no, tl_tpg->tl_tpgt); 986 } 987 988 CONFIGFS_ATTR(tcm_loop_tpg_, nexus); 989 CONFIGFS_ATTR(tcm_loop_tpg_, transport_status); 990 CONFIGFS_ATTR_RO(tcm_loop_tpg_, address); 991 992 static struct configfs_attribute *tcm_loop_tpg_attrs[] = { 993 &tcm_loop_tpg_attr_nexus, 994 &tcm_loop_tpg_attr_transport_status, 995 &tcm_loop_tpg_attr_address, 996 NULL, 997 }; 998 999 /* Start items for tcm_loop_naa_cit */ 1000 1001 static struct se_portal_group *tcm_loop_make_naa_tpg( 1002 struct se_wwn *wwn, 1003 struct config_group *group, 1004 const char *name) 1005 { 1006 struct tcm_loop_hba *tl_hba = container_of(wwn, 1007 struct tcm_loop_hba, tl_hba_wwn); 1008 struct tcm_loop_tpg *tl_tpg; 1009 int ret; 1010 unsigned long tpgt; 1011 1012 if (strstr(name, "tpgt_") != name) { 1013 pr_err("Unable to locate \"tpgt_#\" directory" 1014 " group\n"); 1015 return ERR_PTR(-EINVAL); 1016 } 1017 if (kstrtoul(name+5, 10, &tpgt)) 1018 return ERR_PTR(-EINVAL); 1019 1020 if (tpgt >= TL_TPGS_PER_HBA) { 1021 pr_err("Passed tpgt: %lu exceeds TL_TPGS_PER_HBA:" 1022 " %u\n", tpgt, TL_TPGS_PER_HBA); 1023 return ERR_PTR(-EINVAL); 1024 } 1025 tl_tpg = &tl_hba->tl_hba_tpgs[tpgt]; 1026 tl_tpg->tl_hba = tl_hba; 1027 tl_tpg->tl_tpgt = tpgt; 1028 /* 1029 * Register the tl_tpg as a emulated TCM Target Endpoint 1030 */ 1031 ret = core_tpg_register(wwn, &tl_tpg->tl_se_tpg, tl_hba->tl_proto_id); 1032 if (ret < 0) 1033 return ERR_PTR(-ENOMEM); 1034 1035 pr_debug("TCM_Loop_ConfigFS: Allocated Emulated %s" 1036 " Target Port %s,t,0x%04lx\n", tcm_loop_dump_proto_id(tl_hba), 1037 config_item_name(&wwn->wwn_group.cg_item), tpgt); 1038 1039 return &tl_tpg->tl_se_tpg; 1040 } 1041 1042 static void tcm_loop_drop_naa_tpg( 1043 struct se_portal_group *se_tpg) 1044 { 1045 struct se_wwn *wwn = se_tpg->se_tpg_wwn; 1046 struct tcm_loop_tpg *tl_tpg = container_of(se_tpg, 1047 struct tcm_loop_tpg, tl_se_tpg); 1048 struct tcm_loop_hba *tl_hba; 1049 unsigned short tpgt; 1050 1051 tl_hba = tl_tpg->tl_hba; 1052 tpgt = tl_tpg->tl_tpgt; 1053 /* 1054 * Release the I_T Nexus for the Virtual target link if present 1055 */ 1056 tcm_loop_drop_nexus(tl_tpg); 1057 /* 1058 * Deregister the tl_tpg as a emulated TCM Target Endpoint 1059 */ 1060 core_tpg_deregister(se_tpg); 1061 1062 tl_tpg->tl_hba = NULL; 1063 tl_tpg->tl_tpgt = 0; 1064 1065 pr_debug("TCM_Loop_ConfigFS: Deallocated Emulated %s" 1066 " Target Port %s,t,0x%04x\n", tcm_loop_dump_proto_id(tl_hba), 1067 config_item_name(&wwn->wwn_group.cg_item), tpgt); 1068 } 1069 1070 /* End items for tcm_loop_naa_cit */ 1071 1072 /* Start items for tcm_loop_cit */ 1073 1074 static struct se_wwn *tcm_loop_make_scsi_hba( 1075 struct target_fabric_configfs *tf, 1076 struct config_group *group, 1077 const char *name) 1078 { 1079 struct tcm_loop_hba *tl_hba; 1080 struct Scsi_Host *sh; 1081 char *ptr; 1082 int ret, off = 0; 1083 1084 tl_hba = kzalloc(sizeof(struct tcm_loop_hba), GFP_KERNEL); 1085 if (!tl_hba) { 1086 pr_err("Unable to allocate struct tcm_loop_hba\n"); 1087 return ERR_PTR(-ENOMEM); 1088 } 1089 /* 1090 * Determine the emulated Protocol Identifier and Target Port Name 1091 * based on the incoming configfs directory name. 1092 */ 1093 ptr = strstr(name, "naa."); 1094 if (ptr) { 1095 tl_hba->tl_proto_id = SCSI_PROTOCOL_SAS; 1096 goto check_len; 1097 } 1098 ptr = strstr(name, "fc."); 1099 if (ptr) { 1100 tl_hba->tl_proto_id = SCSI_PROTOCOL_FCP; 1101 off = 3; /* Skip over "fc." */ 1102 goto check_len; 1103 } 1104 ptr = strstr(name, "iqn."); 1105 if (!ptr) { 1106 pr_err("Unable to locate prefix for emulated Target " 1107 "Port: %s\n", name); 1108 ret = -EINVAL; 1109 goto out; 1110 } 1111 tl_hba->tl_proto_id = SCSI_PROTOCOL_ISCSI; 1112 1113 check_len: 1114 if (strlen(name) >= TL_WWN_ADDR_LEN) { 1115 pr_err("Emulated NAA %s Address: %s, exceeds" 1116 " max: %d\n", name, tcm_loop_dump_proto_id(tl_hba), 1117 TL_WWN_ADDR_LEN); 1118 ret = -EINVAL; 1119 goto out; 1120 } 1121 snprintf(&tl_hba->tl_wwn_address[0], TL_WWN_ADDR_LEN, "%s", &name[off]); 1122 1123 /* 1124 * Call device_register(tl_hba->dev) to register the emulated 1125 * Linux/SCSI LLD of type struct Scsi_Host at tl_hba->sh after 1126 * device_register() callbacks in tcm_loop_driver_probe() 1127 */ 1128 ret = tcm_loop_setup_hba_bus(tl_hba, tcm_loop_hba_no_cnt); 1129 if (ret) 1130 goto out; 1131 1132 sh = tl_hba->sh; 1133 tcm_loop_hba_no_cnt++; 1134 pr_debug("TCM_Loop_ConfigFS: Allocated emulated Target" 1135 " %s Address: %s at Linux/SCSI Host ID: %d\n", 1136 tcm_loop_dump_proto_id(tl_hba), name, sh->host_no); 1137 1138 return &tl_hba->tl_hba_wwn; 1139 out: 1140 kfree(tl_hba); 1141 return ERR_PTR(ret); 1142 } 1143 1144 static void tcm_loop_drop_scsi_hba( 1145 struct se_wwn *wwn) 1146 { 1147 struct tcm_loop_hba *tl_hba = container_of(wwn, 1148 struct tcm_loop_hba, tl_hba_wwn); 1149 1150 pr_debug("TCM_Loop_ConfigFS: Deallocating emulated Target" 1151 " %s Address: %s at Linux/SCSI Host ID: %d\n", 1152 tcm_loop_dump_proto_id(tl_hba), tl_hba->tl_wwn_address, 1153 tl_hba->sh->host_no); 1154 /* 1155 * Call device_unregister() on the original tl_hba->dev. 1156 * tcm_loop_fabric_scsi.c:tcm_loop_release_adapter() will 1157 * release *tl_hba; 1158 */ 1159 device_unregister(&tl_hba->dev); 1160 } 1161 1162 /* Start items for tcm_loop_cit */ 1163 static ssize_t tcm_loop_wwn_version_show(struct config_item *item, char *page) 1164 { 1165 return sprintf(page, "TCM Loopback Fabric module %s\n", TCM_LOOP_VERSION); 1166 } 1167 1168 CONFIGFS_ATTR_RO(tcm_loop_wwn_, version); 1169 1170 static struct configfs_attribute *tcm_loop_wwn_attrs[] = { 1171 &tcm_loop_wwn_attr_version, 1172 NULL, 1173 }; 1174 1175 /* End items for tcm_loop_cit */ 1176 1177 static const struct target_core_fabric_ops loop_ops = { 1178 .module = THIS_MODULE, 1179 .name = "loopback", 1180 .get_fabric_name = tcm_loop_get_fabric_name, 1181 .tpg_get_wwn = tcm_loop_get_endpoint_wwn, 1182 .tpg_get_tag = tcm_loop_get_tag, 1183 .tpg_check_demo_mode = tcm_loop_check_demo_mode, 1184 .tpg_check_demo_mode_cache = tcm_loop_check_demo_mode_cache, 1185 .tpg_check_demo_mode_write_protect = 1186 tcm_loop_check_demo_mode_write_protect, 1187 .tpg_check_prod_mode_write_protect = 1188 tcm_loop_check_prod_mode_write_protect, 1189 .tpg_check_prot_fabric_only = tcm_loop_check_prot_fabric_only, 1190 .tpg_get_inst_index = tcm_loop_get_inst_index, 1191 .check_stop_free = tcm_loop_check_stop_free, 1192 .release_cmd = tcm_loop_release_cmd, 1193 .sess_get_index = tcm_loop_sess_get_index, 1194 .write_pending = tcm_loop_write_pending, 1195 .write_pending_status = tcm_loop_write_pending_status, 1196 .set_default_node_attributes = tcm_loop_set_default_node_attributes, 1197 .get_cmd_state = tcm_loop_get_cmd_state, 1198 .queue_data_in = tcm_loop_queue_data_in, 1199 .queue_status = tcm_loop_queue_status, 1200 .queue_tm_rsp = tcm_loop_queue_tm_rsp, 1201 .aborted_task = tcm_loop_aborted_task, 1202 .fabric_make_wwn = tcm_loop_make_scsi_hba, 1203 .fabric_drop_wwn = tcm_loop_drop_scsi_hba, 1204 .fabric_make_tpg = tcm_loop_make_naa_tpg, 1205 .fabric_drop_tpg = tcm_loop_drop_naa_tpg, 1206 .fabric_post_link = tcm_loop_port_link, 1207 .fabric_pre_unlink = tcm_loop_port_unlink, 1208 .tfc_wwn_attrs = tcm_loop_wwn_attrs, 1209 .tfc_tpg_base_attrs = tcm_loop_tpg_attrs, 1210 .tfc_tpg_attrib_attrs = tcm_loop_tpg_attrib_attrs, 1211 }; 1212 1213 static int __init tcm_loop_fabric_init(void) 1214 { 1215 int ret = -ENOMEM; 1216 1217 tcm_loop_workqueue = alloc_workqueue("tcm_loop", 0, 0); 1218 if (!tcm_loop_workqueue) 1219 goto out; 1220 1221 tcm_loop_cmd_cache = kmem_cache_create("tcm_loop_cmd_cache", 1222 sizeof(struct tcm_loop_cmd), 1223 __alignof__(struct tcm_loop_cmd), 1224 0, NULL); 1225 if (!tcm_loop_cmd_cache) { 1226 pr_debug("kmem_cache_create() for" 1227 " tcm_loop_cmd_cache failed\n"); 1228 goto out_destroy_workqueue; 1229 } 1230 1231 ret = tcm_loop_alloc_core_bus(); 1232 if (ret) 1233 goto out_destroy_cache; 1234 1235 ret = target_register_template(&loop_ops); 1236 if (ret) 1237 goto out_release_core_bus; 1238 1239 return 0; 1240 1241 out_release_core_bus: 1242 tcm_loop_release_core_bus(); 1243 out_destroy_cache: 1244 kmem_cache_destroy(tcm_loop_cmd_cache); 1245 out_destroy_workqueue: 1246 destroy_workqueue(tcm_loop_workqueue); 1247 out: 1248 return ret; 1249 } 1250 1251 static void __exit tcm_loop_fabric_exit(void) 1252 { 1253 target_unregister_template(&loop_ops); 1254 tcm_loop_release_core_bus(); 1255 kmem_cache_destroy(tcm_loop_cmd_cache); 1256 destroy_workqueue(tcm_loop_workqueue); 1257 } 1258 1259 MODULE_DESCRIPTION("TCM loopback virtual Linux/SCSI fabric module"); 1260 MODULE_AUTHOR("Nicholas A. Bellinger <nab@risingtidesystems.com>"); 1261 MODULE_LICENSE("GPL"); 1262 module_init(tcm_loop_fabric_init); 1263 module_exit(tcm_loop_fabric_exit); 1264