1 /******************************************************************************* 2 * 3 * This file contains the Linux/SCSI LLD virtual SCSI initiator driver 4 * for emulated SAS initiator ports 5 * 6 * © Copyright 2011-2013 Datera, Inc. 7 * 8 * Licensed to the Linux Foundation under the General Public License (GPL) version 2. 9 * 10 * Author: Nicholas A. Bellinger <nab@risingtidesystems.com> 11 * 12 * This program is free software; you can redistribute it and/or modify 13 * it under the terms of the GNU General Public License as published by 14 * the Free Software Foundation; either version 2 of the License, or 15 * (at your option) any later version. 16 * 17 * This program is distributed in the hope that it will be useful, 18 * but WITHOUT ANY WARRANTY; without even the implied warranty of 19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 20 * GNU General Public License for more details. 21 ****************************************************************************/ 22 23 #include <linux/module.h> 24 #include <linux/moduleparam.h> 25 #include <linux/init.h> 26 #include <linux/slab.h> 27 #include <linux/types.h> 28 #include <linux/configfs.h> 29 #include <scsi/scsi.h> 30 #include <scsi/scsi_tcq.h> 31 #include <scsi/scsi_host.h> 32 #include <scsi/scsi_device.h> 33 #include <scsi/scsi_cmnd.h> 34 35 #include <target/target_core_base.h> 36 #include <target/target_core_fabric.h> 37 38 #include "tcm_loop.h" 39 40 #define to_tcm_loop_hba(hba) container_of(hba, struct tcm_loop_hba, dev) 41 42 static struct workqueue_struct *tcm_loop_workqueue; 43 static struct kmem_cache *tcm_loop_cmd_cache; 44 45 static int tcm_loop_hba_no_cnt; 46 47 static int tcm_loop_queue_status(struct se_cmd *se_cmd); 48 49 /* 50 * Called from struct target_core_fabric_ops->check_stop_free() 51 */ 52 static int tcm_loop_check_stop_free(struct se_cmd *se_cmd) 53 { 54 return transport_generic_free_cmd(se_cmd, 0); 55 } 56 57 static void tcm_loop_release_cmd(struct se_cmd *se_cmd) 58 { 59 struct tcm_loop_cmd *tl_cmd = container_of(se_cmd, 60 struct tcm_loop_cmd, tl_se_cmd); 61 62 kmem_cache_free(tcm_loop_cmd_cache, tl_cmd); 63 } 64 65 static int tcm_loop_show_info(struct seq_file *m, struct Scsi_Host *host) 66 { 67 seq_puts(m, "tcm_loop_proc_info()\n"); 68 return 0; 69 } 70 71 static int tcm_loop_driver_probe(struct device *); 72 static int tcm_loop_driver_remove(struct device *); 73 74 static int pseudo_lld_bus_match(struct device *dev, 75 struct device_driver *dev_driver) 76 { 77 return 1; 78 } 79 80 static struct bus_type tcm_loop_lld_bus = { 81 .name = "tcm_loop_bus", 82 .match = pseudo_lld_bus_match, 83 .probe = tcm_loop_driver_probe, 84 .remove = tcm_loop_driver_remove, 85 }; 86 87 static struct device_driver tcm_loop_driverfs = { 88 .name = "tcm_loop", 89 .bus = &tcm_loop_lld_bus, 90 }; 91 /* 92 * Used with root_device_register() in tcm_loop_alloc_core_bus() below 93 */ 94 static struct device *tcm_loop_primary; 95 96 static void tcm_loop_submission_work(struct work_struct *work) 97 { 98 struct tcm_loop_cmd *tl_cmd = 99 container_of(work, struct tcm_loop_cmd, work); 100 struct se_cmd *se_cmd = &tl_cmd->tl_se_cmd; 101 struct scsi_cmnd *sc = tl_cmd->sc; 102 struct tcm_loop_nexus *tl_nexus; 103 struct tcm_loop_hba *tl_hba; 104 struct tcm_loop_tpg *tl_tpg; 105 struct scatterlist *sgl_bidi = NULL; 106 u32 sgl_bidi_count = 0, transfer_length; 107 int rc; 108 109 tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host); 110 tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id]; 111 112 /* 113 * Ensure that this tl_tpg reference from the incoming sc->device->id 114 * has already been configured via tcm_loop_make_naa_tpg(). 115 */ 116 if (!tl_tpg->tl_hba) { 117 set_host_byte(sc, DID_NO_CONNECT); 118 goto out_done; 119 } 120 if (tl_tpg->tl_transport_status == TCM_TRANSPORT_OFFLINE) { 121 set_host_byte(sc, DID_TRANSPORT_DISRUPTED); 122 goto out_done; 123 } 124 tl_nexus = tl_tpg->tl_nexus; 125 if (!tl_nexus) { 126 scmd_printk(KERN_ERR, sc, 127 "TCM_Loop I_T Nexus does not exist\n"); 128 set_host_byte(sc, DID_ERROR); 129 goto out_done; 130 } 131 if (scsi_bidi_cmnd(sc)) { 132 struct scsi_data_buffer *sdb = scsi_in(sc); 133 134 sgl_bidi = sdb->table.sgl; 135 sgl_bidi_count = sdb->table.nents; 136 se_cmd->se_cmd_flags |= SCF_BIDI; 137 138 } 139 140 transfer_length = scsi_transfer_length(sc); 141 if (!scsi_prot_sg_count(sc) && 142 scsi_get_prot_op(sc) != SCSI_PROT_NORMAL) { 143 se_cmd->prot_pto = true; 144 /* 145 * loopback transport doesn't support 146 * WRITE_GENERATE, READ_STRIP protection 147 * information operations, go ahead unprotected. 148 */ 149 transfer_length = scsi_bufflen(sc); 150 } 151 152 se_cmd->tag = tl_cmd->sc_cmd_tag; 153 rc = target_submit_cmd_map_sgls(se_cmd, tl_nexus->se_sess, sc->cmnd, 154 &tl_cmd->tl_sense_buf[0], tl_cmd->sc->device->lun, 155 transfer_length, TCM_SIMPLE_TAG, 156 sc->sc_data_direction, 0, 157 scsi_sglist(sc), scsi_sg_count(sc), 158 sgl_bidi, sgl_bidi_count, 159 scsi_prot_sglist(sc), scsi_prot_sg_count(sc)); 160 if (rc < 0) { 161 set_host_byte(sc, DID_NO_CONNECT); 162 goto out_done; 163 } 164 return; 165 166 out_done: 167 kmem_cache_free(tcm_loop_cmd_cache, tl_cmd); 168 sc->scsi_done(sc); 169 } 170 171 /* 172 * ->queuecommand can be and usually is called from interrupt context, so 173 * defer the actual submission to a workqueue. 174 */ 175 static int tcm_loop_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *sc) 176 { 177 struct tcm_loop_cmd *tl_cmd; 178 179 pr_debug("%s() %d:%d:%d:%llu got CDB: 0x%02x scsi_buf_len: %u\n", 180 __func__, sc->device->host->host_no, sc->device->id, 181 sc->device->channel, sc->device->lun, sc->cmnd[0], 182 scsi_bufflen(sc)); 183 184 tl_cmd = kmem_cache_zalloc(tcm_loop_cmd_cache, GFP_ATOMIC); 185 if (!tl_cmd) { 186 set_host_byte(sc, DID_ERROR); 187 sc->scsi_done(sc); 188 return 0; 189 } 190 191 tl_cmd->sc = sc; 192 tl_cmd->sc_cmd_tag = sc->request->tag; 193 INIT_WORK(&tl_cmd->work, tcm_loop_submission_work); 194 queue_work(tcm_loop_workqueue, &tl_cmd->work); 195 return 0; 196 } 197 198 /* 199 * Called from SCSI EH process context to issue a LUN_RESET TMR 200 * to struct scsi_device 201 */ 202 static int tcm_loop_issue_tmr(struct tcm_loop_tpg *tl_tpg, 203 u64 lun, int task, enum tcm_tmreq_table tmr) 204 { 205 struct se_cmd *se_cmd; 206 struct se_session *se_sess; 207 struct tcm_loop_nexus *tl_nexus; 208 struct tcm_loop_cmd *tl_cmd; 209 int ret = TMR_FUNCTION_FAILED, rc; 210 211 /* 212 * Locate the tl_nexus and se_sess pointers 213 */ 214 tl_nexus = tl_tpg->tl_nexus; 215 if (!tl_nexus) { 216 pr_err("Unable to perform device reset without active I_T Nexus\n"); 217 return ret; 218 } 219 220 tl_cmd = kmem_cache_zalloc(tcm_loop_cmd_cache, GFP_KERNEL); 221 if (!tl_cmd) 222 return ret; 223 224 init_completion(&tl_cmd->tmr_done); 225 226 se_cmd = &tl_cmd->tl_se_cmd; 227 se_sess = tl_tpg->tl_nexus->se_sess; 228 229 rc = target_submit_tmr(se_cmd, se_sess, tl_cmd->tl_sense_buf, lun, 230 NULL, tmr, GFP_KERNEL, task, 231 TARGET_SCF_ACK_KREF); 232 if (rc < 0) 233 goto release; 234 wait_for_completion(&tl_cmd->tmr_done); 235 ret = se_cmd->se_tmr_req->response; 236 target_put_sess_cmd(se_cmd); 237 238 out: 239 return ret; 240 241 release: 242 kmem_cache_free(tcm_loop_cmd_cache, tl_cmd); 243 goto out; 244 } 245 246 static int tcm_loop_abort_task(struct scsi_cmnd *sc) 247 { 248 struct tcm_loop_hba *tl_hba; 249 struct tcm_loop_tpg *tl_tpg; 250 int ret = FAILED; 251 252 /* 253 * Locate the tcm_loop_hba_t pointer 254 */ 255 tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host); 256 tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id]; 257 ret = tcm_loop_issue_tmr(tl_tpg, sc->device->lun, 258 sc->request->tag, TMR_ABORT_TASK); 259 return (ret == TMR_FUNCTION_COMPLETE) ? SUCCESS : FAILED; 260 } 261 262 /* 263 * Called from SCSI EH process context to issue a LUN_RESET TMR 264 * to struct scsi_device 265 */ 266 static int tcm_loop_device_reset(struct scsi_cmnd *sc) 267 { 268 struct tcm_loop_hba *tl_hba; 269 struct tcm_loop_tpg *tl_tpg; 270 int ret = FAILED; 271 272 /* 273 * Locate the tcm_loop_hba_t pointer 274 */ 275 tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host); 276 tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id]; 277 278 ret = tcm_loop_issue_tmr(tl_tpg, sc->device->lun, 279 0, TMR_LUN_RESET); 280 return (ret == TMR_FUNCTION_COMPLETE) ? SUCCESS : FAILED; 281 } 282 283 static int tcm_loop_target_reset(struct scsi_cmnd *sc) 284 { 285 struct tcm_loop_hba *tl_hba; 286 struct tcm_loop_tpg *tl_tpg; 287 288 /* 289 * Locate the tcm_loop_hba_t pointer 290 */ 291 tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host); 292 if (!tl_hba) { 293 pr_err("Unable to perform device reset without active I_T Nexus\n"); 294 return FAILED; 295 } 296 /* 297 * Locate the tl_tpg pointer from TargetID in sc->device->id 298 */ 299 tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id]; 300 if (tl_tpg) { 301 tl_tpg->tl_transport_status = TCM_TRANSPORT_ONLINE; 302 return SUCCESS; 303 } 304 return FAILED; 305 } 306 307 static int tcm_loop_slave_alloc(struct scsi_device *sd) 308 { 309 blk_queue_flag_set(QUEUE_FLAG_BIDI, sd->request_queue); 310 return 0; 311 } 312 313 static struct scsi_host_template tcm_loop_driver_template = { 314 .show_info = tcm_loop_show_info, 315 .proc_name = "tcm_loopback", 316 .name = "TCM_Loopback", 317 .queuecommand = tcm_loop_queuecommand, 318 .change_queue_depth = scsi_change_queue_depth, 319 .eh_abort_handler = tcm_loop_abort_task, 320 .eh_device_reset_handler = tcm_loop_device_reset, 321 .eh_target_reset_handler = tcm_loop_target_reset, 322 .can_queue = 1024, 323 .this_id = -1, 324 .sg_tablesize = 256, 325 .cmd_per_lun = 1024, 326 .max_sectors = 0xFFFF, 327 .use_clustering = DISABLE_CLUSTERING, 328 .slave_alloc = tcm_loop_slave_alloc, 329 .module = THIS_MODULE, 330 .track_queue_depth = 1, 331 }; 332 333 static int tcm_loop_driver_probe(struct device *dev) 334 { 335 struct tcm_loop_hba *tl_hba; 336 struct Scsi_Host *sh; 337 int error, host_prot; 338 339 tl_hba = to_tcm_loop_hba(dev); 340 341 sh = scsi_host_alloc(&tcm_loop_driver_template, 342 sizeof(struct tcm_loop_hba)); 343 if (!sh) { 344 pr_err("Unable to allocate struct scsi_host\n"); 345 return -ENODEV; 346 } 347 tl_hba->sh = sh; 348 349 /* 350 * Assign the struct tcm_loop_hba pointer to struct Scsi_Host->hostdata 351 */ 352 *((struct tcm_loop_hba **)sh->hostdata) = tl_hba; 353 /* 354 * Setup single ID, Channel and LUN for now.. 355 */ 356 sh->max_id = 2; 357 sh->max_lun = 0; 358 sh->max_channel = 0; 359 sh->max_cmd_len = SCSI_MAX_VARLEN_CDB_SIZE; 360 361 host_prot = SHOST_DIF_TYPE1_PROTECTION | SHOST_DIF_TYPE2_PROTECTION | 362 SHOST_DIF_TYPE3_PROTECTION | SHOST_DIX_TYPE1_PROTECTION | 363 SHOST_DIX_TYPE2_PROTECTION | SHOST_DIX_TYPE3_PROTECTION; 364 365 scsi_host_set_prot(sh, host_prot); 366 scsi_host_set_guard(sh, SHOST_DIX_GUARD_CRC); 367 368 error = scsi_add_host(sh, &tl_hba->dev); 369 if (error) { 370 pr_err("%s: scsi_add_host failed\n", __func__); 371 scsi_host_put(sh); 372 return -ENODEV; 373 } 374 return 0; 375 } 376 377 static int tcm_loop_driver_remove(struct device *dev) 378 { 379 struct tcm_loop_hba *tl_hba; 380 struct Scsi_Host *sh; 381 382 tl_hba = to_tcm_loop_hba(dev); 383 sh = tl_hba->sh; 384 385 scsi_remove_host(sh); 386 scsi_host_put(sh); 387 return 0; 388 } 389 390 static void tcm_loop_release_adapter(struct device *dev) 391 { 392 struct tcm_loop_hba *tl_hba = to_tcm_loop_hba(dev); 393 394 kfree(tl_hba); 395 } 396 397 /* 398 * Called from tcm_loop_make_scsi_hba() in tcm_loop_configfs.c 399 */ 400 static int tcm_loop_setup_hba_bus(struct tcm_loop_hba *tl_hba, int tcm_loop_host_id) 401 { 402 int ret; 403 404 tl_hba->dev.bus = &tcm_loop_lld_bus; 405 tl_hba->dev.parent = tcm_loop_primary; 406 tl_hba->dev.release = &tcm_loop_release_adapter; 407 dev_set_name(&tl_hba->dev, "tcm_loop_adapter_%d", tcm_loop_host_id); 408 409 ret = device_register(&tl_hba->dev); 410 if (ret) { 411 pr_err("device_register() failed for tl_hba->dev: %d\n", ret); 412 return -ENODEV; 413 } 414 415 return 0; 416 } 417 418 /* 419 * Called from tcm_loop_fabric_init() in tcl_loop_fabric.c to load the emulated 420 * tcm_loop SCSI bus. 421 */ 422 static int tcm_loop_alloc_core_bus(void) 423 { 424 int ret; 425 426 tcm_loop_primary = root_device_register("tcm_loop_0"); 427 if (IS_ERR(tcm_loop_primary)) { 428 pr_err("Unable to allocate tcm_loop_primary\n"); 429 return PTR_ERR(tcm_loop_primary); 430 } 431 432 ret = bus_register(&tcm_loop_lld_bus); 433 if (ret) { 434 pr_err("bus_register() failed for tcm_loop_lld_bus\n"); 435 goto dev_unreg; 436 } 437 438 ret = driver_register(&tcm_loop_driverfs); 439 if (ret) { 440 pr_err("driver_register() failed for tcm_loop_driverfs\n"); 441 goto bus_unreg; 442 } 443 444 pr_debug("Initialized TCM Loop Core Bus\n"); 445 return ret; 446 447 bus_unreg: 448 bus_unregister(&tcm_loop_lld_bus); 449 dev_unreg: 450 root_device_unregister(tcm_loop_primary); 451 return ret; 452 } 453 454 static void tcm_loop_release_core_bus(void) 455 { 456 driver_unregister(&tcm_loop_driverfs); 457 bus_unregister(&tcm_loop_lld_bus); 458 root_device_unregister(tcm_loop_primary); 459 460 pr_debug("Releasing TCM Loop Core BUS\n"); 461 } 462 463 static char *tcm_loop_get_fabric_name(void) 464 { 465 return "loopback"; 466 } 467 468 static inline struct tcm_loop_tpg *tl_tpg(struct se_portal_group *se_tpg) 469 { 470 return container_of(se_tpg, struct tcm_loop_tpg, tl_se_tpg); 471 } 472 473 static char *tcm_loop_get_endpoint_wwn(struct se_portal_group *se_tpg) 474 { 475 /* 476 * Return the passed NAA identifier for the Target Port 477 */ 478 return &tl_tpg(se_tpg)->tl_hba->tl_wwn_address[0]; 479 } 480 481 static u16 tcm_loop_get_tag(struct se_portal_group *se_tpg) 482 { 483 /* 484 * This Tag is used when forming SCSI Name identifier in EVPD=1 0x83 485 * to represent the SCSI Target Port. 486 */ 487 return tl_tpg(se_tpg)->tl_tpgt; 488 } 489 490 /* 491 * Returning (1) here allows for target_core_mod struct se_node_acl to be generated 492 * based upon the incoming fabric dependent SCSI Initiator Port 493 */ 494 static int tcm_loop_check_demo_mode(struct se_portal_group *se_tpg) 495 { 496 return 1; 497 } 498 499 static int tcm_loop_check_demo_mode_cache(struct se_portal_group *se_tpg) 500 { 501 return 0; 502 } 503 504 /* 505 * Allow I_T Nexus full READ-WRITE access without explict Initiator Node ACLs for 506 * local virtual Linux/SCSI LLD passthrough into VM hypervisor guest 507 */ 508 static int tcm_loop_check_demo_mode_write_protect(struct se_portal_group *se_tpg) 509 { 510 return 0; 511 } 512 513 /* 514 * Because TCM_Loop does not use explict ACLs and MappedLUNs, this will 515 * never be called for TCM_Loop by target_core_fabric_configfs.c code. 516 * It has been added here as a nop for target_fabric_tf_ops_check() 517 */ 518 static int tcm_loop_check_prod_mode_write_protect(struct se_portal_group *se_tpg) 519 { 520 return 0; 521 } 522 523 static int tcm_loop_check_prot_fabric_only(struct se_portal_group *se_tpg) 524 { 525 struct tcm_loop_tpg *tl_tpg = container_of(se_tpg, struct tcm_loop_tpg, 526 tl_se_tpg); 527 return tl_tpg->tl_fabric_prot_type; 528 } 529 530 static u32 tcm_loop_get_inst_index(struct se_portal_group *se_tpg) 531 { 532 return 1; 533 } 534 535 static u32 tcm_loop_sess_get_index(struct se_session *se_sess) 536 { 537 return 1; 538 } 539 540 static void tcm_loop_set_default_node_attributes(struct se_node_acl *se_acl) 541 { 542 return; 543 } 544 545 static int tcm_loop_get_cmd_state(struct se_cmd *se_cmd) 546 { 547 struct tcm_loop_cmd *tl_cmd = container_of(se_cmd, 548 struct tcm_loop_cmd, tl_se_cmd); 549 550 return tl_cmd->sc_cmd_state; 551 } 552 553 static int tcm_loop_write_pending(struct se_cmd *se_cmd) 554 { 555 /* 556 * Since Linux/SCSI has already sent down a struct scsi_cmnd 557 * sc->sc_data_direction of DMA_TO_DEVICE with struct scatterlist array 558 * memory, and memory has already been mapped to struct se_cmd->t_mem_list 559 * format with transport_generic_map_mem_to_cmd(). 560 * 561 * We now tell TCM to add this WRITE CDB directly into the TCM storage 562 * object execution queue. 563 */ 564 target_execute_cmd(se_cmd); 565 return 0; 566 } 567 568 static int tcm_loop_write_pending_status(struct se_cmd *se_cmd) 569 { 570 return 0; 571 } 572 573 static int tcm_loop_queue_data_in(struct se_cmd *se_cmd) 574 { 575 struct tcm_loop_cmd *tl_cmd = container_of(se_cmd, 576 struct tcm_loop_cmd, tl_se_cmd); 577 struct scsi_cmnd *sc = tl_cmd->sc; 578 579 pr_debug("%s() called for scsi_cmnd: %p cdb: 0x%02x\n", 580 __func__, sc, sc->cmnd[0]); 581 582 sc->result = SAM_STAT_GOOD; 583 set_host_byte(sc, DID_OK); 584 if ((se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) || 585 (se_cmd->se_cmd_flags & SCF_UNDERFLOW_BIT)) 586 scsi_set_resid(sc, se_cmd->residual_count); 587 sc->scsi_done(sc); 588 return 0; 589 } 590 591 static int tcm_loop_queue_status(struct se_cmd *se_cmd) 592 { 593 struct tcm_loop_cmd *tl_cmd = container_of(se_cmd, 594 struct tcm_loop_cmd, tl_se_cmd); 595 struct scsi_cmnd *sc = tl_cmd->sc; 596 597 pr_debug("%s() called for scsi_cmnd: %p cdb: 0x%02x\n", 598 __func__, sc, sc->cmnd[0]); 599 600 if (se_cmd->sense_buffer && 601 ((se_cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) || 602 (se_cmd->se_cmd_flags & SCF_EMULATED_TASK_SENSE))) { 603 604 memcpy(sc->sense_buffer, se_cmd->sense_buffer, 605 SCSI_SENSE_BUFFERSIZE); 606 sc->result = SAM_STAT_CHECK_CONDITION; 607 set_driver_byte(sc, DRIVER_SENSE); 608 } else 609 sc->result = se_cmd->scsi_status; 610 611 set_host_byte(sc, DID_OK); 612 if ((se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) || 613 (se_cmd->se_cmd_flags & SCF_UNDERFLOW_BIT)) 614 scsi_set_resid(sc, se_cmd->residual_count); 615 sc->scsi_done(sc); 616 return 0; 617 } 618 619 static void tcm_loop_queue_tm_rsp(struct se_cmd *se_cmd) 620 { 621 struct tcm_loop_cmd *tl_cmd = container_of(se_cmd, 622 struct tcm_loop_cmd, tl_se_cmd); 623 624 /* Wake up tcm_loop_issue_tmr(). */ 625 complete(&tl_cmd->tmr_done); 626 } 627 628 static void tcm_loop_aborted_task(struct se_cmd *se_cmd) 629 { 630 return; 631 } 632 633 static char *tcm_loop_dump_proto_id(struct tcm_loop_hba *tl_hba) 634 { 635 switch (tl_hba->tl_proto_id) { 636 case SCSI_PROTOCOL_SAS: 637 return "SAS"; 638 case SCSI_PROTOCOL_FCP: 639 return "FCP"; 640 case SCSI_PROTOCOL_ISCSI: 641 return "iSCSI"; 642 default: 643 break; 644 } 645 646 return "Unknown"; 647 } 648 649 /* Start items for tcm_loop_port_cit */ 650 651 static int tcm_loop_port_link( 652 struct se_portal_group *se_tpg, 653 struct se_lun *lun) 654 { 655 struct tcm_loop_tpg *tl_tpg = container_of(se_tpg, 656 struct tcm_loop_tpg, tl_se_tpg); 657 struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba; 658 659 atomic_inc_mb(&tl_tpg->tl_tpg_port_count); 660 /* 661 * Add Linux/SCSI struct scsi_device by HCTL 662 */ 663 scsi_add_device(tl_hba->sh, 0, tl_tpg->tl_tpgt, lun->unpacked_lun); 664 665 pr_debug("TCM_Loop_ConfigFS: Port Link Successful\n"); 666 return 0; 667 } 668 669 static void tcm_loop_port_unlink( 670 struct se_portal_group *se_tpg, 671 struct se_lun *se_lun) 672 { 673 struct scsi_device *sd; 674 struct tcm_loop_hba *tl_hba; 675 struct tcm_loop_tpg *tl_tpg; 676 677 tl_tpg = container_of(se_tpg, struct tcm_loop_tpg, tl_se_tpg); 678 tl_hba = tl_tpg->tl_hba; 679 680 sd = scsi_device_lookup(tl_hba->sh, 0, tl_tpg->tl_tpgt, 681 se_lun->unpacked_lun); 682 if (!sd) { 683 pr_err("Unable to locate struct scsi_device for %d:%d:%llu\n", 684 0, tl_tpg->tl_tpgt, se_lun->unpacked_lun); 685 return; 686 } 687 /* 688 * Remove Linux/SCSI struct scsi_device by HCTL 689 */ 690 scsi_remove_device(sd); 691 scsi_device_put(sd); 692 693 atomic_dec_mb(&tl_tpg->tl_tpg_port_count); 694 695 pr_debug("TCM_Loop_ConfigFS: Port Unlink Successful\n"); 696 } 697 698 /* End items for tcm_loop_port_cit */ 699 700 static ssize_t tcm_loop_tpg_attrib_fabric_prot_type_show( 701 struct config_item *item, char *page) 702 { 703 struct se_portal_group *se_tpg = attrib_to_tpg(item); 704 struct tcm_loop_tpg *tl_tpg = container_of(se_tpg, struct tcm_loop_tpg, 705 tl_se_tpg); 706 707 return sprintf(page, "%d\n", tl_tpg->tl_fabric_prot_type); 708 } 709 710 static ssize_t tcm_loop_tpg_attrib_fabric_prot_type_store( 711 struct config_item *item, const char *page, size_t count) 712 { 713 struct se_portal_group *se_tpg = attrib_to_tpg(item); 714 struct tcm_loop_tpg *tl_tpg = container_of(se_tpg, struct tcm_loop_tpg, 715 tl_se_tpg); 716 unsigned long val; 717 int ret = kstrtoul(page, 0, &val); 718 719 if (ret) { 720 pr_err("kstrtoul() returned %d for fabric_prot_type\n", ret); 721 return ret; 722 } 723 if (val != 0 && val != 1 && val != 3) { 724 pr_err("Invalid qla2xxx fabric_prot_type: %lu\n", val); 725 return -EINVAL; 726 } 727 tl_tpg->tl_fabric_prot_type = val; 728 729 return count; 730 } 731 732 CONFIGFS_ATTR(tcm_loop_tpg_attrib_, fabric_prot_type); 733 734 static struct configfs_attribute *tcm_loop_tpg_attrib_attrs[] = { 735 &tcm_loop_tpg_attrib_attr_fabric_prot_type, 736 NULL, 737 }; 738 739 /* Start items for tcm_loop_nexus_cit */ 740 741 static int tcm_loop_alloc_sess_cb(struct se_portal_group *se_tpg, 742 struct se_session *se_sess, void *p) 743 { 744 struct tcm_loop_tpg *tl_tpg = container_of(se_tpg, 745 struct tcm_loop_tpg, tl_se_tpg); 746 747 tl_tpg->tl_nexus = p; 748 return 0; 749 } 750 751 static int tcm_loop_make_nexus( 752 struct tcm_loop_tpg *tl_tpg, 753 const char *name) 754 { 755 struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba; 756 struct tcm_loop_nexus *tl_nexus; 757 int ret; 758 759 if (tl_tpg->tl_nexus) { 760 pr_debug("tl_tpg->tl_nexus already exists\n"); 761 return -EEXIST; 762 } 763 764 tl_nexus = kzalloc(sizeof(*tl_nexus), GFP_KERNEL); 765 if (!tl_nexus) 766 return -ENOMEM; 767 768 tl_nexus->se_sess = target_setup_session(&tl_tpg->tl_se_tpg, 0, 0, 769 TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS, 770 name, tl_nexus, tcm_loop_alloc_sess_cb); 771 if (IS_ERR(tl_nexus->se_sess)) { 772 ret = PTR_ERR(tl_nexus->se_sess); 773 kfree(tl_nexus); 774 return ret; 775 } 776 777 pr_debug("TCM_Loop_ConfigFS: Established I_T Nexus to emulated %s Initiator Port: %s\n", 778 tcm_loop_dump_proto_id(tl_hba), name); 779 return 0; 780 } 781 782 static int tcm_loop_drop_nexus( 783 struct tcm_loop_tpg *tpg) 784 { 785 struct se_session *se_sess; 786 struct tcm_loop_nexus *tl_nexus; 787 788 tl_nexus = tpg->tl_nexus; 789 if (!tl_nexus) 790 return -ENODEV; 791 792 se_sess = tl_nexus->se_sess; 793 if (!se_sess) 794 return -ENODEV; 795 796 if (atomic_read(&tpg->tl_tpg_port_count)) { 797 pr_err("Unable to remove TCM_Loop I_T Nexus with active TPG port count: %d\n", 798 atomic_read(&tpg->tl_tpg_port_count)); 799 return -EPERM; 800 } 801 802 pr_debug("TCM_Loop_ConfigFS: Removing I_T Nexus to emulated %s Initiator Port: %s\n", 803 tcm_loop_dump_proto_id(tpg->tl_hba), 804 tl_nexus->se_sess->se_node_acl->initiatorname); 805 /* 806 * Release the SCSI I_T Nexus to the emulated Target Port 807 */ 808 target_remove_session(se_sess); 809 tpg->tl_nexus = NULL; 810 kfree(tl_nexus); 811 return 0; 812 } 813 814 /* End items for tcm_loop_nexus_cit */ 815 816 static ssize_t tcm_loop_tpg_nexus_show(struct config_item *item, char *page) 817 { 818 struct se_portal_group *se_tpg = to_tpg(item); 819 struct tcm_loop_tpg *tl_tpg = container_of(se_tpg, 820 struct tcm_loop_tpg, tl_se_tpg); 821 struct tcm_loop_nexus *tl_nexus; 822 ssize_t ret; 823 824 tl_nexus = tl_tpg->tl_nexus; 825 if (!tl_nexus) 826 return -ENODEV; 827 828 ret = snprintf(page, PAGE_SIZE, "%s\n", 829 tl_nexus->se_sess->se_node_acl->initiatorname); 830 831 return ret; 832 } 833 834 static ssize_t tcm_loop_tpg_nexus_store(struct config_item *item, 835 const char *page, size_t count) 836 { 837 struct se_portal_group *se_tpg = to_tpg(item); 838 struct tcm_loop_tpg *tl_tpg = container_of(se_tpg, 839 struct tcm_loop_tpg, tl_se_tpg); 840 struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba; 841 unsigned char i_port[TL_WWN_ADDR_LEN], *ptr, *port_ptr; 842 int ret; 843 /* 844 * Shutdown the active I_T nexus if 'NULL' is passed.. 845 */ 846 if (!strncmp(page, "NULL", 4)) { 847 ret = tcm_loop_drop_nexus(tl_tpg); 848 return (!ret) ? count : ret; 849 } 850 /* 851 * Otherwise make sure the passed virtual Initiator port WWN matches 852 * the fabric protocol_id set in tcm_loop_make_scsi_hba(), and call 853 * tcm_loop_make_nexus() 854 */ 855 if (strlen(page) >= TL_WWN_ADDR_LEN) { 856 pr_err("Emulated NAA Sas Address: %s, exceeds max: %d\n", 857 page, TL_WWN_ADDR_LEN); 858 return -EINVAL; 859 } 860 snprintf(&i_port[0], TL_WWN_ADDR_LEN, "%s", page); 861 862 ptr = strstr(i_port, "naa."); 863 if (ptr) { 864 if (tl_hba->tl_proto_id != SCSI_PROTOCOL_SAS) { 865 pr_err("Passed SAS Initiator Port %s does not match target port protoid: %s\n", 866 i_port, tcm_loop_dump_proto_id(tl_hba)); 867 return -EINVAL; 868 } 869 port_ptr = &i_port[0]; 870 goto check_newline; 871 } 872 ptr = strstr(i_port, "fc."); 873 if (ptr) { 874 if (tl_hba->tl_proto_id != SCSI_PROTOCOL_FCP) { 875 pr_err("Passed FCP Initiator Port %s does not match target port protoid: %s\n", 876 i_port, tcm_loop_dump_proto_id(tl_hba)); 877 return -EINVAL; 878 } 879 port_ptr = &i_port[3]; /* Skip over "fc." */ 880 goto check_newline; 881 } 882 ptr = strstr(i_port, "iqn."); 883 if (ptr) { 884 if (tl_hba->tl_proto_id != SCSI_PROTOCOL_ISCSI) { 885 pr_err("Passed iSCSI Initiator Port %s does not match target port protoid: %s\n", 886 i_port, tcm_loop_dump_proto_id(tl_hba)); 887 return -EINVAL; 888 } 889 port_ptr = &i_port[0]; 890 goto check_newline; 891 } 892 pr_err("Unable to locate prefix for emulated Initiator Port: %s\n", 893 i_port); 894 return -EINVAL; 895 /* 896 * Clear any trailing newline for the NAA WWN 897 */ 898 check_newline: 899 if (i_port[strlen(i_port)-1] == '\n') 900 i_port[strlen(i_port)-1] = '\0'; 901 902 ret = tcm_loop_make_nexus(tl_tpg, port_ptr); 903 if (ret < 0) 904 return ret; 905 906 return count; 907 } 908 909 static ssize_t tcm_loop_tpg_transport_status_show(struct config_item *item, 910 char *page) 911 { 912 struct se_portal_group *se_tpg = to_tpg(item); 913 struct tcm_loop_tpg *tl_tpg = container_of(se_tpg, 914 struct tcm_loop_tpg, tl_se_tpg); 915 const char *status = NULL; 916 ssize_t ret = -EINVAL; 917 918 switch (tl_tpg->tl_transport_status) { 919 case TCM_TRANSPORT_ONLINE: 920 status = "online"; 921 break; 922 case TCM_TRANSPORT_OFFLINE: 923 status = "offline"; 924 break; 925 default: 926 break; 927 } 928 929 if (status) 930 ret = snprintf(page, PAGE_SIZE, "%s\n", status); 931 932 return ret; 933 } 934 935 static ssize_t tcm_loop_tpg_transport_status_store(struct config_item *item, 936 const char *page, size_t count) 937 { 938 struct se_portal_group *se_tpg = to_tpg(item); 939 struct tcm_loop_tpg *tl_tpg = container_of(se_tpg, 940 struct tcm_loop_tpg, tl_se_tpg); 941 942 if (!strncmp(page, "online", 6)) { 943 tl_tpg->tl_transport_status = TCM_TRANSPORT_ONLINE; 944 return count; 945 } 946 if (!strncmp(page, "offline", 7)) { 947 tl_tpg->tl_transport_status = TCM_TRANSPORT_OFFLINE; 948 if (tl_tpg->tl_nexus) { 949 struct se_session *tl_sess = tl_tpg->tl_nexus->se_sess; 950 951 core_allocate_nexus_loss_ua(tl_sess->se_node_acl); 952 } 953 return count; 954 } 955 return -EINVAL; 956 } 957 958 static ssize_t tcm_loop_tpg_address_show(struct config_item *item, 959 char *page) 960 { 961 struct se_portal_group *se_tpg = to_tpg(item); 962 struct tcm_loop_tpg *tl_tpg = container_of(se_tpg, 963 struct tcm_loop_tpg, tl_se_tpg); 964 struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba; 965 966 return snprintf(page, PAGE_SIZE, "%d:0:%d\n", 967 tl_hba->sh->host_no, tl_tpg->tl_tpgt); 968 } 969 970 CONFIGFS_ATTR(tcm_loop_tpg_, nexus); 971 CONFIGFS_ATTR(tcm_loop_tpg_, transport_status); 972 CONFIGFS_ATTR_RO(tcm_loop_tpg_, address); 973 974 static struct configfs_attribute *tcm_loop_tpg_attrs[] = { 975 &tcm_loop_tpg_attr_nexus, 976 &tcm_loop_tpg_attr_transport_status, 977 &tcm_loop_tpg_attr_address, 978 NULL, 979 }; 980 981 /* Start items for tcm_loop_naa_cit */ 982 983 static struct se_portal_group *tcm_loop_make_naa_tpg(struct se_wwn *wwn, 984 const char *name) 985 { 986 struct tcm_loop_hba *tl_hba = container_of(wwn, 987 struct tcm_loop_hba, tl_hba_wwn); 988 struct tcm_loop_tpg *tl_tpg; 989 int ret; 990 unsigned long tpgt; 991 992 if (strstr(name, "tpgt_") != name) { 993 pr_err("Unable to locate \"tpgt_#\" directory group\n"); 994 return ERR_PTR(-EINVAL); 995 } 996 if (kstrtoul(name+5, 10, &tpgt)) 997 return ERR_PTR(-EINVAL); 998 999 if (tpgt >= TL_TPGS_PER_HBA) { 1000 pr_err("Passed tpgt: %lu exceeds TL_TPGS_PER_HBA: %u\n", 1001 tpgt, TL_TPGS_PER_HBA); 1002 return ERR_PTR(-EINVAL); 1003 } 1004 tl_tpg = &tl_hba->tl_hba_tpgs[tpgt]; 1005 tl_tpg->tl_hba = tl_hba; 1006 tl_tpg->tl_tpgt = tpgt; 1007 /* 1008 * Register the tl_tpg as a emulated TCM Target Endpoint 1009 */ 1010 ret = core_tpg_register(wwn, &tl_tpg->tl_se_tpg, tl_hba->tl_proto_id); 1011 if (ret < 0) 1012 return ERR_PTR(-ENOMEM); 1013 1014 pr_debug("TCM_Loop_ConfigFS: Allocated Emulated %s Target Port %s,t,0x%04lx\n", 1015 tcm_loop_dump_proto_id(tl_hba), 1016 config_item_name(&wwn->wwn_group.cg_item), tpgt); 1017 return &tl_tpg->tl_se_tpg; 1018 } 1019 1020 static void tcm_loop_drop_naa_tpg( 1021 struct se_portal_group *se_tpg) 1022 { 1023 struct se_wwn *wwn = se_tpg->se_tpg_wwn; 1024 struct tcm_loop_tpg *tl_tpg = container_of(se_tpg, 1025 struct tcm_loop_tpg, tl_se_tpg); 1026 struct tcm_loop_hba *tl_hba; 1027 unsigned short tpgt; 1028 1029 tl_hba = tl_tpg->tl_hba; 1030 tpgt = tl_tpg->tl_tpgt; 1031 /* 1032 * Release the I_T Nexus for the Virtual target link if present 1033 */ 1034 tcm_loop_drop_nexus(tl_tpg); 1035 /* 1036 * Deregister the tl_tpg as a emulated TCM Target Endpoint 1037 */ 1038 core_tpg_deregister(se_tpg); 1039 1040 tl_tpg->tl_hba = NULL; 1041 tl_tpg->tl_tpgt = 0; 1042 1043 pr_debug("TCM_Loop_ConfigFS: Deallocated Emulated %s Target Port %s,t,0x%04x\n", 1044 tcm_loop_dump_proto_id(tl_hba), 1045 config_item_name(&wwn->wwn_group.cg_item), tpgt); 1046 } 1047 1048 /* End items for tcm_loop_naa_cit */ 1049 1050 /* Start items for tcm_loop_cit */ 1051 1052 static struct se_wwn *tcm_loop_make_scsi_hba( 1053 struct target_fabric_configfs *tf, 1054 struct config_group *group, 1055 const char *name) 1056 { 1057 struct tcm_loop_hba *tl_hba; 1058 struct Scsi_Host *sh; 1059 char *ptr; 1060 int ret, off = 0; 1061 1062 tl_hba = kzalloc(sizeof(*tl_hba), GFP_KERNEL); 1063 if (!tl_hba) 1064 return ERR_PTR(-ENOMEM); 1065 1066 /* 1067 * Determine the emulated Protocol Identifier and Target Port Name 1068 * based on the incoming configfs directory name. 1069 */ 1070 ptr = strstr(name, "naa."); 1071 if (ptr) { 1072 tl_hba->tl_proto_id = SCSI_PROTOCOL_SAS; 1073 goto check_len; 1074 } 1075 ptr = strstr(name, "fc."); 1076 if (ptr) { 1077 tl_hba->tl_proto_id = SCSI_PROTOCOL_FCP; 1078 off = 3; /* Skip over "fc." */ 1079 goto check_len; 1080 } 1081 ptr = strstr(name, "iqn."); 1082 if (!ptr) { 1083 pr_err("Unable to locate prefix for emulated Target Port: %s\n", 1084 name); 1085 ret = -EINVAL; 1086 goto out; 1087 } 1088 tl_hba->tl_proto_id = SCSI_PROTOCOL_ISCSI; 1089 1090 check_len: 1091 if (strlen(name) >= TL_WWN_ADDR_LEN) { 1092 pr_err("Emulated NAA %s Address: %s, exceeds max: %d\n", 1093 name, tcm_loop_dump_proto_id(tl_hba), TL_WWN_ADDR_LEN); 1094 ret = -EINVAL; 1095 goto out; 1096 } 1097 snprintf(&tl_hba->tl_wwn_address[0], TL_WWN_ADDR_LEN, "%s", &name[off]); 1098 1099 /* 1100 * Call device_register(tl_hba->dev) to register the emulated 1101 * Linux/SCSI LLD of type struct Scsi_Host at tl_hba->sh after 1102 * device_register() callbacks in tcm_loop_driver_probe() 1103 */ 1104 ret = tcm_loop_setup_hba_bus(tl_hba, tcm_loop_hba_no_cnt); 1105 if (ret) 1106 goto out; 1107 1108 sh = tl_hba->sh; 1109 tcm_loop_hba_no_cnt++; 1110 pr_debug("TCM_Loop_ConfigFS: Allocated emulated Target %s Address: %s at Linux/SCSI Host ID: %d\n", 1111 tcm_loop_dump_proto_id(tl_hba), name, sh->host_no); 1112 return &tl_hba->tl_hba_wwn; 1113 out: 1114 kfree(tl_hba); 1115 return ERR_PTR(ret); 1116 } 1117 1118 static void tcm_loop_drop_scsi_hba( 1119 struct se_wwn *wwn) 1120 { 1121 struct tcm_loop_hba *tl_hba = container_of(wwn, 1122 struct tcm_loop_hba, tl_hba_wwn); 1123 1124 pr_debug("TCM_Loop_ConfigFS: Deallocating emulated Target %s Address: %s at Linux/SCSI Host ID: %d\n", 1125 tcm_loop_dump_proto_id(tl_hba), tl_hba->tl_wwn_address, 1126 tl_hba->sh->host_no); 1127 /* 1128 * Call device_unregister() on the original tl_hba->dev. 1129 * tcm_loop_fabric_scsi.c:tcm_loop_release_adapter() will 1130 * release *tl_hba; 1131 */ 1132 device_unregister(&tl_hba->dev); 1133 } 1134 1135 /* Start items for tcm_loop_cit */ 1136 static ssize_t tcm_loop_wwn_version_show(struct config_item *item, char *page) 1137 { 1138 return sprintf(page, "TCM Loopback Fabric module %s\n", TCM_LOOP_VERSION); 1139 } 1140 1141 CONFIGFS_ATTR_RO(tcm_loop_wwn_, version); 1142 1143 static struct configfs_attribute *tcm_loop_wwn_attrs[] = { 1144 &tcm_loop_wwn_attr_version, 1145 NULL, 1146 }; 1147 1148 /* End items for tcm_loop_cit */ 1149 1150 static const struct target_core_fabric_ops loop_ops = { 1151 .module = THIS_MODULE, 1152 .name = "loopback", 1153 .get_fabric_name = tcm_loop_get_fabric_name, 1154 .tpg_get_wwn = tcm_loop_get_endpoint_wwn, 1155 .tpg_get_tag = tcm_loop_get_tag, 1156 .tpg_check_demo_mode = tcm_loop_check_demo_mode, 1157 .tpg_check_demo_mode_cache = tcm_loop_check_demo_mode_cache, 1158 .tpg_check_demo_mode_write_protect = 1159 tcm_loop_check_demo_mode_write_protect, 1160 .tpg_check_prod_mode_write_protect = 1161 tcm_loop_check_prod_mode_write_protect, 1162 .tpg_check_prot_fabric_only = tcm_loop_check_prot_fabric_only, 1163 .tpg_get_inst_index = tcm_loop_get_inst_index, 1164 .check_stop_free = tcm_loop_check_stop_free, 1165 .release_cmd = tcm_loop_release_cmd, 1166 .sess_get_index = tcm_loop_sess_get_index, 1167 .write_pending = tcm_loop_write_pending, 1168 .write_pending_status = tcm_loop_write_pending_status, 1169 .set_default_node_attributes = tcm_loop_set_default_node_attributes, 1170 .get_cmd_state = tcm_loop_get_cmd_state, 1171 .queue_data_in = tcm_loop_queue_data_in, 1172 .queue_status = tcm_loop_queue_status, 1173 .queue_tm_rsp = tcm_loop_queue_tm_rsp, 1174 .aborted_task = tcm_loop_aborted_task, 1175 .fabric_make_wwn = tcm_loop_make_scsi_hba, 1176 .fabric_drop_wwn = tcm_loop_drop_scsi_hba, 1177 .fabric_make_tpg = tcm_loop_make_naa_tpg, 1178 .fabric_drop_tpg = tcm_loop_drop_naa_tpg, 1179 .fabric_post_link = tcm_loop_port_link, 1180 .fabric_pre_unlink = tcm_loop_port_unlink, 1181 .tfc_wwn_attrs = tcm_loop_wwn_attrs, 1182 .tfc_tpg_base_attrs = tcm_loop_tpg_attrs, 1183 .tfc_tpg_attrib_attrs = tcm_loop_tpg_attrib_attrs, 1184 }; 1185 1186 static int __init tcm_loop_fabric_init(void) 1187 { 1188 int ret = -ENOMEM; 1189 1190 tcm_loop_workqueue = alloc_workqueue("tcm_loop", 0, 0); 1191 if (!tcm_loop_workqueue) 1192 goto out; 1193 1194 tcm_loop_cmd_cache = kmem_cache_create("tcm_loop_cmd_cache", 1195 sizeof(struct tcm_loop_cmd), 1196 __alignof__(struct tcm_loop_cmd), 1197 0, NULL); 1198 if (!tcm_loop_cmd_cache) { 1199 pr_debug("kmem_cache_create() for tcm_loop_cmd_cache failed\n"); 1200 goto out_destroy_workqueue; 1201 } 1202 1203 ret = tcm_loop_alloc_core_bus(); 1204 if (ret) 1205 goto out_destroy_cache; 1206 1207 ret = target_register_template(&loop_ops); 1208 if (ret) 1209 goto out_release_core_bus; 1210 1211 return 0; 1212 1213 out_release_core_bus: 1214 tcm_loop_release_core_bus(); 1215 out_destroy_cache: 1216 kmem_cache_destroy(tcm_loop_cmd_cache); 1217 out_destroy_workqueue: 1218 destroy_workqueue(tcm_loop_workqueue); 1219 out: 1220 return ret; 1221 } 1222 1223 static void __exit tcm_loop_fabric_exit(void) 1224 { 1225 target_unregister_template(&loop_ops); 1226 tcm_loop_release_core_bus(); 1227 kmem_cache_destroy(tcm_loop_cmd_cache); 1228 destroy_workqueue(tcm_loop_workqueue); 1229 } 1230 1231 MODULE_DESCRIPTION("TCM loopback virtual Linux/SCSI fabric module"); 1232 MODULE_AUTHOR("Nicholas A. Bellinger <nab@risingtidesystems.com>"); 1233 MODULE_LICENSE("GPL"); 1234 module_init(tcm_loop_fabric_init); 1235 module_exit(tcm_loop_fabric_exit); 1236