1 /*******************************************************************************
2  *
3  * This file contains the Linux/SCSI LLD virtual SCSI initiator driver
4  * for emulated SAS initiator ports
5  *
6  * © Copyright 2011-2013 Datera, Inc.
7  *
8  * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
9  *
10  * Author: Nicholas A. Bellinger <nab@risingtidesystems.com>
11  *
12  * This program is free software; you can redistribute it and/or modify
13  * it under the terms of the GNU General Public License as published by
14  * the Free Software Foundation; either version 2 of the License, or
15  * (at your option) any later version.
16  *
17  * This program is distributed in the hope that it will be useful,
18  * but WITHOUT ANY WARRANTY; without even the implied warranty of
19  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
20  * GNU General Public License for more details.
21  ****************************************************************************/
22 
23 #include <linux/module.h>
24 #include <linux/moduleparam.h>
25 #include <linux/init.h>
26 #include <linux/slab.h>
27 #include <linux/types.h>
28 #include <linux/configfs.h>
29 #include <scsi/scsi.h>
30 #include <scsi/scsi_tcq.h>
31 #include <scsi/scsi_host.h>
32 #include <scsi/scsi_device.h>
33 #include <scsi/scsi_cmnd.h>
34 
35 #include <target/target_core_base.h>
36 #include <target/target_core_fabric.h>
37 
38 #include "tcm_loop.h"
39 
40 #define to_tcm_loop_hba(hba)	container_of(hba, struct tcm_loop_hba, dev)
41 
42 static struct workqueue_struct *tcm_loop_workqueue;
43 static struct kmem_cache *tcm_loop_cmd_cache;
44 
45 static int tcm_loop_hba_no_cnt;
46 
47 static int tcm_loop_queue_status(struct se_cmd *se_cmd);
48 
49 /*
50  * Called from struct target_core_fabric_ops->check_stop_free()
51  */
52 static int tcm_loop_check_stop_free(struct se_cmd *se_cmd)
53 {
54 	/*
55 	 * Do not release struct se_cmd's containing a valid TMR
56 	 * pointer.  These will be released directly in tcm_loop_device_reset()
57 	 * with transport_generic_free_cmd().
58 	 */
59 	if (se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)
60 		return 0;
61 	/*
62 	 * Release the struct se_cmd, which will make a callback to release
63 	 * struct tcm_loop_cmd * in tcm_loop_deallocate_core_cmd()
64 	 */
65 	transport_generic_free_cmd(se_cmd, 0);
66 	return 1;
67 }
68 
69 static void tcm_loop_release_cmd(struct se_cmd *se_cmd)
70 {
71 	struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
72 				struct tcm_loop_cmd, tl_se_cmd);
73 
74 	kmem_cache_free(tcm_loop_cmd_cache, tl_cmd);
75 }
76 
77 static int tcm_loop_show_info(struct seq_file *m, struct Scsi_Host *host)
78 {
79 	seq_printf(m, "tcm_loop_proc_info()\n");
80 	return 0;
81 }
82 
83 static int tcm_loop_driver_probe(struct device *);
84 static int tcm_loop_driver_remove(struct device *);
85 
86 static int pseudo_lld_bus_match(struct device *dev,
87 				struct device_driver *dev_driver)
88 {
89 	return 1;
90 }
91 
92 static struct bus_type tcm_loop_lld_bus = {
93 	.name			= "tcm_loop_bus",
94 	.match			= pseudo_lld_bus_match,
95 	.probe			= tcm_loop_driver_probe,
96 	.remove			= tcm_loop_driver_remove,
97 };
98 
99 static struct device_driver tcm_loop_driverfs = {
100 	.name			= "tcm_loop",
101 	.bus			= &tcm_loop_lld_bus,
102 };
103 /*
104  * Used with root_device_register() in tcm_loop_alloc_core_bus() below
105  */
106 static struct device *tcm_loop_primary;
107 
108 static void tcm_loop_submission_work(struct work_struct *work)
109 {
110 	struct tcm_loop_cmd *tl_cmd =
111 		container_of(work, struct tcm_loop_cmd, work);
112 	struct se_cmd *se_cmd = &tl_cmd->tl_se_cmd;
113 	struct scsi_cmnd *sc = tl_cmd->sc;
114 	struct tcm_loop_nexus *tl_nexus;
115 	struct tcm_loop_hba *tl_hba;
116 	struct tcm_loop_tpg *tl_tpg;
117 	struct scatterlist *sgl_bidi = NULL;
118 	u32 sgl_bidi_count = 0, transfer_length;
119 	int rc;
120 
121 	tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
122 	tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
123 
124 	/*
125 	 * Ensure that this tl_tpg reference from the incoming sc->device->id
126 	 * has already been configured via tcm_loop_make_naa_tpg().
127 	 */
128 	if (!tl_tpg->tl_hba) {
129 		set_host_byte(sc, DID_NO_CONNECT);
130 		goto out_done;
131 	}
132 	if (tl_tpg->tl_transport_status == TCM_TRANSPORT_OFFLINE) {
133 		set_host_byte(sc, DID_TRANSPORT_DISRUPTED);
134 		goto out_done;
135 	}
136 	tl_nexus = tl_tpg->tl_nexus;
137 	if (!tl_nexus) {
138 		scmd_printk(KERN_ERR, sc, "TCM_Loop I_T Nexus"
139 				" does not exist\n");
140 		set_host_byte(sc, DID_ERROR);
141 		goto out_done;
142 	}
143 	if (scsi_bidi_cmnd(sc)) {
144 		struct scsi_data_buffer *sdb = scsi_in(sc);
145 
146 		sgl_bidi = sdb->table.sgl;
147 		sgl_bidi_count = sdb->table.nents;
148 		se_cmd->se_cmd_flags |= SCF_BIDI;
149 
150 	}
151 
152 	transfer_length = scsi_transfer_length(sc);
153 	if (!scsi_prot_sg_count(sc) &&
154 	    scsi_get_prot_op(sc) != SCSI_PROT_NORMAL) {
155 		se_cmd->prot_pto = true;
156 		/*
157 		 * loopback transport doesn't support
158 		 * WRITE_GENERATE, READ_STRIP protection
159 		 * information operations, go ahead unprotected.
160 		 */
161 		transfer_length = scsi_bufflen(sc);
162 	}
163 
164 	se_cmd->tag = tl_cmd->sc_cmd_tag;
165 	rc = target_submit_cmd_map_sgls(se_cmd, tl_nexus->se_sess, sc->cmnd,
166 			&tl_cmd->tl_sense_buf[0], tl_cmd->sc->device->lun,
167 			transfer_length, TCM_SIMPLE_TAG,
168 			sc->sc_data_direction, 0,
169 			scsi_sglist(sc), scsi_sg_count(sc),
170 			sgl_bidi, sgl_bidi_count,
171 			scsi_prot_sglist(sc), scsi_prot_sg_count(sc));
172 	if (rc < 0) {
173 		set_host_byte(sc, DID_NO_CONNECT);
174 		goto out_done;
175 	}
176 	return;
177 
178 out_done:
179 	kmem_cache_free(tcm_loop_cmd_cache, tl_cmd);
180 	sc->scsi_done(sc);
181 	return;
182 }
183 
184 /*
185  * ->queuecommand can be and usually is called from interrupt context, so
186  * defer the actual submission to a workqueue.
187  */
188 static int tcm_loop_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *sc)
189 {
190 	struct tcm_loop_cmd *tl_cmd;
191 
192 	pr_debug("tcm_loop_queuecommand() %d:%d:%d:%llu got CDB: 0x%02x"
193 		" scsi_buf_len: %u\n", sc->device->host->host_no,
194 		sc->device->id, sc->device->channel, sc->device->lun,
195 		sc->cmnd[0], scsi_bufflen(sc));
196 
197 	tl_cmd = kmem_cache_zalloc(tcm_loop_cmd_cache, GFP_ATOMIC);
198 	if (!tl_cmd) {
199 		pr_err("Unable to allocate struct tcm_loop_cmd\n");
200 		set_host_byte(sc, DID_ERROR);
201 		sc->scsi_done(sc);
202 		return 0;
203 	}
204 
205 	tl_cmd->sc = sc;
206 	tl_cmd->sc_cmd_tag = sc->request->tag;
207 	INIT_WORK(&tl_cmd->work, tcm_loop_submission_work);
208 	queue_work(tcm_loop_workqueue, &tl_cmd->work);
209 	return 0;
210 }
211 
212 /*
213  * Called from SCSI EH process context to issue a LUN_RESET TMR
214  * to struct scsi_device
215  */
216 static int tcm_loop_issue_tmr(struct tcm_loop_tpg *tl_tpg,
217 			      u64 lun, int task, enum tcm_tmreq_table tmr)
218 {
219 	struct se_cmd *se_cmd = NULL;
220 	struct se_session *se_sess;
221 	struct se_portal_group *se_tpg;
222 	struct tcm_loop_nexus *tl_nexus;
223 	struct tcm_loop_cmd *tl_cmd = NULL;
224 	struct tcm_loop_tmr *tl_tmr = NULL;
225 	int ret = TMR_FUNCTION_FAILED, rc;
226 
227 	/*
228 	 * Locate the tl_nexus and se_sess pointers
229 	 */
230 	tl_nexus = tl_tpg->tl_nexus;
231 	if (!tl_nexus) {
232 		pr_err("Unable to perform device reset without"
233 				" active I_T Nexus\n");
234 		return ret;
235 	}
236 
237 	tl_cmd = kmem_cache_zalloc(tcm_loop_cmd_cache, GFP_KERNEL);
238 	if (!tl_cmd) {
239 		pr_err("Unable to allocate memory for tl_cmd\n");
240 		return ret;
241 	}
242 
243 	tl_tmr = kzalloc(sizeof(struct tcm_loop_tmr), GFP_KERNEL);
244 	if (!tl_tmr) {
245 		pr_err("Unable to allocate memory for tl_tmr\n");
246 		goto release;
247 	}
248 	init_waitqueue_head(&tl_tmr->tl_tmr_wait);
249 
250 	se_cmd = &tl_cmd->tl_se_cmd;
251 	se_tpg = &tl_tpg->tl_se_tpg;
252 	se_sess = tl_tpg->tl_nexus->se_sess;
253 	/*
254 	 * Initialize struct se_cmd descriptor from target_core_mod infrastructure
255 	 */
256 	transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess, 0,
257 				DMA_NONE, TCM_SIMPLE_TAG,
258 				&tl_cmd->tl_sense_buf[0]);
259 
260 	rc = core_tmr_alloc_req(se_cmd, tl_tmr, tmr, GFP_KERNEL);
261 	if (rc < 0)
262 		goto release;
263 
264 	if (tmr == TMR_ABORT_TASK)
265 		se_cmd->se_tmr_req->ref_task_tag = task;
266 
267 	/*
268 	 * Locate the underlying TCM struct se_lun
269 	 */
270 	if (transport_lookup_tmr_lun(se_cmd, lun) < 0) {
271 		ret = TMR_LUN_DOES_NOT_EXIST;
272 		goto release;
273 	}
274 	/*
275 	 * Queue the TMR to TCM Core and sleep waiting for
276 	 * tcm_loop_queue_tm_rsp() to wake us up.
277 	 */
278 	transport_generic_handle_tmr(se_cmd);
279 	wait_event(tl_tmr->tl_tmr_wait, atomic_read(&tl_tmr->tmr_complete));
280 	/*
281 	 * The TMR LUN_RESET has completed, check the response status and
282 	 * then release allocations.
283 	 */
284 	ret = se_cmd->se_tmr_req->response;
285 release:
286 	if (se_cmd)
287 		transport_generic_free_cmd(se_cmd, 1);
288 	else
289 		kmem_cache_free(tcm_loop_cmd_cache, tl_cmd);
290 	kfree(tl_tmr);
291 	return ret;
292 }
293 
294 static int tcm_loop_abort_task(struct scsi_cmnd *sc)
295 {
296 	struct tcm_loop_hba *tl_hba;
297 	struct tcm_loop_tpg *tl_tpg;
298 	int ret = FAILED;
299 
300 	/*
301 	 * Locate the tcm_loop_hba_t pointer
302 	 */
303 	tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
304 	tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
305 	ret = tcm_loop_issue_tmr(tl_tpg, sc->device->lun,
306 				 sc->request->tag, TMR_ABORT_TASK);
307 	return (ret == TMR_FUNCTION_COMPLETE) ? SUCCESS : FAILED;
308 }
309 
310 /*
311  * Called from SCSI EH process context to issue a LUN_RESET TMR
312  * to struct scsi_device
313  */
314 static int tcm_loop_device_reset(struct scsi_cmnd *sc)
315 {
316 	struct tcm_loop_hba *tl_hba;
317 	struct tcm_loop_tpg *tl_tpg;
318 	int ret = FAILED;
319 
320 	/*
321 	 * Locate the tcm_loop_hba_t pointer
322 	 */
323 	tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
324 	tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
325 
326 	ret = tcm_loop_issue_tmr(tl_tpg, sc->device->lun,
327 				 0, TMR_LUN_RESET);
328 	return (ret == TMR_FUNCTION_COMPLETE) ? SUCCESS : FAILED;
329 }
330 
331 static int tcm_loop_target_reset(struct scsi_cmnd *sc)
332 {
333 	struct tcm_loop_hba *tl_hba;
334 	struct tcm_loop_tpg *tl_tpg;
335 
336 	/*
337 	 * Locate the tcm_loop_hba_t pointer
338 	 */
339 	tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
340 	if (!tl_hba) {
341 		pr_err("Unable to perform device reset without"
342 				" active I_T Nexus\n");
343 		return FAILED;
344 	}
345 	/*
346 	 * Locate the tl_tpg pointer from TargetID in sc->device->id
347 	 */
348 	tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
349 	if (tl_tpg) {
350 		tl_tpg->tl_transport_status = TCM_TRANSPORT_ONLINE;
351 		return SUCCESS;
352 	}
353 	return FAILED;
354 }
355 
356 static int tcm_loop_slave_alloc(struct scsi_device *sd)
357 {
358 	set_bit(QUEUE_FLAG_BIDI, &sd->request_queue->queue_flags);
359 	return 0;
360 }
361 
362 static struct scsi_host_template tcm_loop_driver_template = {
363 	.show_info		= tcm_loop_show_info,
364 	.proc_name		= "tcm_loopback",
365 	.name			= "TCM_Loopback",
366 	.queuecommand		= tcm_loop_queuecommand,
367 	.change_queue_depth	= scsi_change_queue_depth,
368 	.eh_abort_handler = tcm_loop_abort_task,
369 	.eh_device_reset_handler = tcm_loop_device_reset,
370 	.eh_target_reset_handler = tcm_loop_target_reset,
371 	.can_queue		= 1024,
372 	.this_id		= -1,
373 	.sg_tablesize		= 256,
374 	.cmd_per_lun		= 1024,
375 	.max_sectors		= 0xFFFF,
376 	.use_clustering		= DISABLE_CLUSTERING,
377 	.slave_alloc		= tcm_loop_slave_alloc,
378 	.module			= THIS_MODULE,
379 	.track_queue_depth	= 1,
380 };
381 
382 static int tcm_loop_driver_probe(struct device *dev)
383 {
384 	struct tcm_loop_hba *tl_hba;
385 	struct Scsi_Host *sh;
386 	int error, host_prot;
387 
388 	tl_hba = to_tcm_loop_hba(dev);
389 
390 	sh = scsi_host_alloc(&tcm_loop_driver_template,
391 			sizeof(struct tcm_loop_hba));
392 	if (!sh) {
393 		pr_err("Unable to allocate struct scsi_host\n");
394 		return -ENODEV;
395 	}
396 	tl_hba->sh = sh;
397 
398 	/*
399 	 * Assign the struct tcm_loop_hba pointer to struct Scsi_Host->hostdata
400 	 */
401 	*((struct tcm_loop_hba **)sh->hostdata) = tl_hba;
402 	/*
403 	 * Setup single ID, Channel and LUN for now..
404 	 */
405 	sh->max_id = 2;
406 	sh->max_lun = 0;
407 	sh->max_channel = 0;
408 	sh->max_cmd_len = SCSI_MAX_VARLEN_CDB_SIZE;
409 
410 	host_prot = SHOST_DIF_TYPE1_PROTECTION | SHOST_DIF_TYPE2_PROTECTION |
411 		    SHOST_DIF_TYPE3_PROTECTION | SHOST_DIX_TYPE1_PROTECTION |
412 		    SHOST_DIX_TYPE2_PROTECTION | SHOST_DIX_TYPE3_PROTECTION;
413 
414 	scsi_host_set_prot(sh, host_prot);
415 	scsi_host_set_guard(sh, SHOST_DIX_GUARD_CRC);
416 
417 	error = scsi_add_host(sh, &tl_hba->dev);
418 	if (error) {
419 		pr_err("%s: scsi_add_host failed\n", __func__);
420 		scsi_host_put(sh);
421 		return -ENODEV;
422 	}
423 	return 0;
424 }
425 
426 static int tcm_loop_driver_remove(struct device *dev)
427 {
428 	struct tcm_loop_hba *tl_hba;
429 	struct Scsi_Host *sh;
430 
431 	tl_hba = to_tcm_loop_hba(dev);
432 	sh = tl_hba->sh;
433 
434 	scsi_remove_host(sh);
435 	scsi_host_put(sh);
436 	return 0;
437 }
438 
439 static void tcm_loop_release_adapter(struct device *dev)
440 {
441 	struct tcm_loop_hba *tl_hba = to_tcm_loop_hba(dev);
442 
443 	kfree(tl_hba);
444 }
445 
446 /*
447  * Called from tcm_loop_make_scsi_hba() in tcm_loop_configfs.c
448  */
449 static int tcm_loop_setup_hba_bus(struct tcm_loop_hba *tl_hba, int tcm_loop_host_id)
450 {
451 	int ret;
452 
453 	tl_hba->dev.bus = &tcm_loop_lld_bus;
454 	tl_hba->dev.parent = tcm_loop_primary;
455 	tl_hba->dev.release = &tcm_loop_release_adapter;
456 	dev_set_name(&tl_hba->dev, "tcm_loop_adapter_%d", tcm_loop_host_id);
457 
458 	ret = device_register(&tl_hba->dev);
459 	if (ret) {
460 		pr_err("device_register() failed for"
461 				" tl_hba->dev: %d\n", ret);
462 		return -ENODEV;
463 	}
464 
465 	return 0;
466 }
467 
468 /*
469  * Called from tcm_loop_fabric_init() in tcl_loop_fabric.c to load the emulated
470  * tcm_loop SCSI bus.
471  */
472 static int tcm_loop_alloc_core_bus(void)
473 {
474 	int ret;
475 
476 	tcm_loop_primary = root_device_register("tcm_loop_0");
477 	if (IS_ERR(tcm_loop_primary)) {
478 		pr_err("Unable to allocate tcm_loop_primary\n");
479 		return PTR_ERR(tcm_loop_primary);
480 	}
481 
482 	ret = bus_register(&tcm_loop_lld_bus);
483 	if (ret) {
484 		pr_err("bus_register() failed for tcm_loop_lld_bus\n");
485 		goto dev_unreg;
486 	}
487 
488 	ret = driver_register(&tcm_loop_driverfs);
489 	if (ret) {
490 		pr_err("driver_register() failed for"
491 				"tcm_loop_driverfs\n");
492 		goto bus_unreg;
493 	}
494 
495 	pr_debug("Initialized TCM Loop Core Bus\n");
496 	return ret;
497 
498 bus_unreg:
499 	bus_unregister(&tcm_loop_lld_bus);
500 dev_unreg:
501 	root_device_unregister(tcm_loop_primary);
502 	return ret;
503 }
504 
505 static void tcm_loop_release_core_bus(void)
506 {
507 	driver_unregister(&tcm_loop_driverfs);
508 	bus_unregister(&tcm_loop_lld_bus);
509 	root_device_unregister(tcm_loop_primary);
510 
511 	pr_debug("Releasing TCM Loop Core BUS\n");
512 }
513 
514 static char *tcm_loop_get_fabric_name(void)
515 {
516 	return "loopback";
517 }
518 
519 static inline struct tcm_loop_tpg *tl_tpg(struct se_portal_group *se_tpg)
520 {
521 	return container_of(se_tpg, struct tcm_loop_tpg, tl_se_tpg);
522 }
523 
524 static char *tcm_loop_get_endpoint_wwn(struct se_portal_group *se_tpg)
525 {
526 	/*
527 	 * Return the passed NAA identifier for the Target Port
528 	 */
529 	return &tl_tpg(se_tpg)->tl_hba->tl_wwn_address[0];
530 }
531 
532 static u16 tcm_loop_get_tag(struct se_portal_group *se_tpg)
533 {
534 	/*
535 	 * This Tag is used when forming SCSI Name identifier in EVPD=1 0x83
536 	 * to represent the SCSI Target Port.
537 	 */
538 	return tl_tpg(se_tpg)->tl_tpgt;
539 }
540 
541 /*
542  * Returning (1) here allows for target_core_mod struct se_node_acl to be generated
543  * based upon the incoming fabric dependent SCSI Initiator Port
544  */
545 static int tcm_loop_check_demo_mode(struct se_portal_group *se_tpg)
546 {
547 	return 1;
548 }
549 
550 static int tcm_loop_check_demo_mode_cache(struct se_portal_group *se_tpg)
551 {
552 	return 0;
553 }
554 
555 /*
556  * Allow I_T Nexus full READ-WRITE access without explict Initiator Node ACLs for
557  * local virtual Linux/SCSI LLD passthrough into VM hypervisor guest
558  */
559 static int tcm_loop_check_demo_mode_write_protect(struct se_portal_group *se_tpg)
560 {
561 	return 0;
562 }
563 
564 /*
565  * Because TCM_Loop does not use explict ACLs and MappedLUNs, this will
566  * never be called for TCM_Loop by target_core_fabric_configfs.c code.
567  * It has been added here as a nop for target_fabric_tf_ops_check()
568  */
569 static int tcm_loop_check_prod_mode_write_protect(struct se_portal_group *se_tpg)
570 {
571 	return 0;
572 }
573 
574 static int tcm_loop_check_prot_fabric_only(struct se_portal_group *se_tpg)
575 {
576 	struct tcm_loop_tpg *tl_tpg = container_of(se_tpg, struct tcm_loop_tpg,
577 						   tl_se_tpg);
578 	return tl_tpg->tl_fabric_prot_type;
579 }
580 
581 static u32 tcm_loop_get_inst_index(struct se_portal_group *se_tpg)
582 {
583 	return 1;
584 }
585 
586 static u32 tcm_loop_sess_get_index(struct se_session *se_sess)
587 {
588 	return 1;
589 }
590 
591 static void tcm_loop_set_default_node_attributes(struct se_node_acl *se_acl)
592 {
593 	return;
594 }
595 
596 static int tcm_loop_get_cmd_state(struct se_cmd *se_cmd)
597 {
598 	struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
599 			struct tcm_loop_cmd, tl_se_cmd);
600 
601 	return tl_cmd->sc_cmd_state;
602 }
603 
604 static int tcm_loop_write_pending(struct se_cmd *se_cmd)
605 {
606 	/*
607 	 * Since Linux/SCSI has already sent down a struct scsi_cmnd
608 	 * sc->sc_data_direction of DMA_TO_DEVICE with struct scatterlist array
609 	 * memory, and memory has already been mapped to struct se_cmd->t_mem_list
610 	 * format with transport_generic_map_mem_to_cmd().
611 	 *
612 	 * We now tell TCM to add this WRITE CDB directly into the TCM storage
613 	 * object execution queue.
614 	 */
615 	target_execute_cmd(se_cmd);
616 	return 0;
617 }
618 
619 static int tcm_loop_write_pending_status(struct se_cmd *se_cmd)
620 {
621 	return 0;
622 }
623 
624 static int tcm_loop_queue_data_in(struct se_cmd *se_cmd)
625 {
626 	struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
627 				struct tcm_loop_cmd, tl_se_cmd);
628 	struct scsi_cmnd *sc = tl_cmd->sc;
629 
630 	pr_debug("tcm_loop_queue_data_in() called for scsi_cmnd: %p"
631 		     " cdb: 0x%02x\n", sc, sc->cmnd[0]);
632 
633 	sc->result = SAM_STAT_GOOD;
634 	set_host_byte(sc, DID_OK);
635 	if ((se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) ||
636 	    (se_cmd->se_cmd_flags & SCF_UNDERFLOW_BIT))
637 		scsi_set_resid(sc, se_cmd->residual_count);
638 	sc->scsi_done(sc);
639 	return 0;
640 }
641 
642 static int tcm_loop_queue_status(struct se_cmd *se_cmd)
643 {
644 	struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
645 				struct tcm_loop_cmd, tl_se_cmd);
646 	struct scsi_cmnd *sc = tl_cmd->sc;
647 
648 	pr_debug("tcm_loop_queue_status() called for scsi_cmnd: %p"
649 			" cdb: 0x%02x\n", sc, sc->cmnd[0]);
650 
651 	if (se_cmd->sense_buffer &&
652 	   ((se_cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) ||
653 	    (se_cmd->se_cmd_flags & SCF_EMULATED_TASK_SENSE))) {
654 
655 		memcpy(sc->sense_buffer, se_cmd->sense_buffer,
656 				SCSI_SENSE_BUFFERSIZE);
657 		sc->result = SAM_STAT_CHECK_CONDITION;
658 		set_driver_byte(sc, DRIVER_SENSE);
659 	} else
660 		sc->result = se_cmd->scsi_status;
661 
662 	set_host_byte(sc, DID_OK);
663 	if ((se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) ||
664 	    (se_cmd->se_cmd_flags & SCF_UNDERFLOW_BIT))
665 		scsi_set_resid(sc, se_cmd->residual_count);
666 	sc->scsi_done(sc);
667 	return 0;
668 }
669 
670 static void tcm_loop_queue_tm_rsp(struct se_cmd *se_cmd)
671 {
672 	struct se_tmr_req *se_tmr = se_cmd->se_tmr_req;
673 	struct tcm_loop_tmr *tl_tmr = se_tmr->fabric_tmr_ptr;
674 	/*
675 	 * The SCSI EH thread will be sleeping on se_tmr->tl_tmr_wait, go ahead
676 	 * and wake up the wait_queue_head_t in tcm_loop_device_reset()
677 	 */
678 	atomic_set(&tl_tmr->tmr_complete, 1);
679 	wake_up(&tl_tmr->tl_tmr_wait);
680 }
681 
682 static void tcm_loop_aborted_task(struct se_cmd *se_cmd)
683 {
684 	return;
685 }
686 
687 static char *tcm_loop_dump_proto_id(struct tcm_loop_hba *tl_hba)
688 {
689 	switch (tl_hba->tl_proto_id) {
690 	case SCSI_PROTOCOL_SAS:
691 		return "SAS";
692 	case SCSI_PROTOCOL_FCP:
693 		return "FCP";
694 	case SCSI_PROTOCOL_ISCSI:
695 		return "iSCSI";
696 	default:
697 		break;
698 	}
699 
700 	return "Unknown";
701 }
702 
703 /* Start items for tcm_loop_port_cit */
704 
705 static int tcm_loop_port_link(
706 	struct se_portal_group *se_tpg,
707 	struct se_lun *lun)
708 {
709 	struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
710 				struct tcm_loop_tpg, tl_se_tpg);
711 	struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
712 
713 	atomic_inc_mb(&tl_tpg->tl_tpg_port_count);
714 	/*
715 	 * Add Linux/SCSI struct scsi_device by HCTL
716 	 */
717 	scsi_add_device(tl_hba->sh, 0, tl_tpg->tl_tpgt, lun->unpacked_lun);
718 
719 	pr_debug("TCM_Loop_ConfigFS: Port Link Successful\n");
720 	return 0;
721 }
722 
723 static void tcm_loop_port_unlink(
724 	struct se_portal_group *se_tpg,
725 	struct se_lun *se_lun)
726 {
727 	struct scsi_device *sd;
728 	struct tcm_loop_hba *tl_hba;
729 	struct tcm_loop_tpg *tl_tpg;
730 
731 	tl_tpg = container_of(se_tpg, struct tcm_loop_tpg, tl_se_tpg);
732 	tl_hba = tl_tpg->tl_hba;
733 
734 	sd = scsi_device_lookup(tl_hba->sh, 0, tl_tpg->tl_tpgt,
735 				se_lun->unpacked_lun);
736 	if (!sd) {
737 		pr_err("Unable to locate struct scsi_device for %d:%d:"
738 			"%llu\n", 0, tl_tpg->tl_tpgt, se_lun->unpacked_lun);
739 		return;
740 	}
741 	/*
742 	 * Remove Linux/SCSI struct scsi_device by HCTL
743 	 */
744 	scsi_remove_device(sd);
745 	scsi_device_put(sd);
746 
747 	atomic_dec_mb(&tl_tpg->tl_tpg_port_count);
748 
749 	pr_debug("TCM_Loop_ConfigFS: Port Unlink Successful\n");
750 }
751 
752 /* End items for tcm_loop_port_cit */
753 
754 static ssize_t tcm_loop_tpg_attrib_fabric_prot_type_show(
755 		struct config_item *item, char *page)
756 {
757 	struct se_portal_group *se_tpg = attrib_to_tpg(item);
758 	struct tcm_loop_tpg *tl_tpg = container_of(se_tpg, struct tcm_loop_tpg,
759 						   tl_se_tpg);
760 
761 	return sprintf(page, "%d\n", tl_tpg->tl_fabric_prot_type);
762 }
763 
764 static ssize_t tcm_loop_tpg_attrib_fabric_prot_type_store(
765 		struct config_item *item, const char *page, size_t count)
766 {
767 	struct se_portal_group *se_tpg = attrib_to_tpg(item);
768 	struct tcm_loop_tpg *tl_tpg = container_of(se_tpg, struct tcm_loop_tpg,
769 						   tl_se_tpg);
770 	unsigned long val;
771 	int ret = kstrtoul(page, 0, &val);
772 
773 	if (ret) {
774 		pr_err("kstrtoul() returned %d for fabric_prot_type\n", ret);
775 		return ret;
776 	}
777 	if (val != 0 && val != 1 && val != 3) {
778 		pr_err("Invalid qla2xxx fabric_prot_type: %lu\n", val);
779 		return -EINVAL;
780 	}
781 	tl_tpg->tl_fabric_prot_type = val;
782 
783 	return count;
784 }
785 
786 CONFIGFS_ATTR(tcm_loop_tpg_attrib_, fabric_prot_type);
787 
788 static struct configfs_attribute *tcm_loop_tpg_attrib_attrs[] = {
789 	&tcm_loop_tpg_attrib_attr_fabric_prot_type,
790 	NULL,
791 };
792 
793 /* Start items for tcm_loop_nexus_cit */
794 
795 static int tcm_loop_alloc_sess_cb(struct se_portal_group *se_tpg,
796 				  struct se_session *se_sess, void *p)
797 {
798 	struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
799 					struct tcm_loop_tpg, tl_se_tpg);
800 
801 	tl_tpg->tl_nexus = p;
802 	return 0;
803 }
804 
805 static int tcm_loop_make_nexus(
806 	struct tcm_loop_tpg *tl_tpg,
807 	const char *name)
808 {
809 	struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
810 	struct tcm_loop_nexus *tl_nexus;
811 	int ret;
812 
813 	if (tl_tpg->tl_nexus) {
814 		pr_debug("tl_tpg->tl_nexus already exists\n");
815 		return -EEXIST;
816 	}
817 
818 	tl_nexus = kzalloc(sizeof(struct tcm_loop_nexus), GFP_KERNEL);
819 	if (!tl_nexus) {
820 		pr_err("Unable to allocate struct tcm_loop_nexus\n");
821 		return -ENOMEM;
822 	}
823 
824 	tl_nexus->se_sess = target_alloc_session(&tl_tpg->tl_se_tpg, 0, 0,
825 					TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS,
826 					name, tl_nexus, tcm_loop_alloc_sess_cb);
827 	if (IS_ERR(tl_nexus->se_sess)) {
828 		ret = PTR_ERR(tl_nexus->se_sess);
829 		kfree(tl_nexus);
830 		return ret;
831 	}
832 
833 	pr_debug("TCM_Loop_ConfigFS: Established I_T Nexus to emulated"
834 		" %s Initiator Port: %s\n", tcm_loop_dump_proto_id(tl_hba),
835 		name);
836 	return 0;
837 }
838 
839 static int tcm_loop_drop_nexus(
840 	struct tcm_loop_tpg *tpg)
841 {
842 	struct se_session *se_sess;
843 	struct tcm_loop_nexus *tl_nexus;
844 
845 	tl_nexus = tpg->tl_nexus;
846 	if (!tl_nexus)
847 		return -ENODEV;
848 
849 	se_sess = tl_nexus->se_sess;
850 	if (!se_sess)
851 		return -ENODEV;
852 
853 	if (atomic_read(&tpg->tl_tpg_port_count)) {
854 		pr_err("Unable to remove TCM_Loop I_T Nexus with"
855 			" active TPG port count: %d\n",
856 			atomic_read(&tpg->tl_tpg_port_count));
857 		return -EPERM;
858 	}
859 
860 	pr_debug("TCM_Loop_ConfigFS: Removing I_T Nexus to emulated"
861 		" %s Initiator Port: %s\n", tcm_loop_dump_proto_id(tpg->tl_hba),
862 		tl_nexus->se_sess->se_node_acl->initiatorname);
863 	/*
864 	 * Release the SCSI I_T Nexus to the emulated Target Port
865 	 */
866 	transport_deregister_session(tl_nexus->se_sess);
867 	tpg->tl_nexus = NULL;
868 	kfree(tl_nexus);
869 	return 0;
870 }
871 
872 /* End items for tcm_loop_nexus_cit */
873 
874 static ssize_t tcm_loop_tpg_nexus_show(struct config_item *item, char *page)
875 {
876 	struct se_portal_group *se_tpg = to_tpg(item);
877 	struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
878 			struct tcm_loop_tpg, tl_se_tpg);
879 	struct tcm_loop_nexus *tl_nexus;
880 	ssize_t ret;
881 
882 	tl_nexus = tl_tpg->tl_nexus;
883 	if (!tl_nexus)
884 		return -ENODEV;
885 
886 	ret = snprintf(page, PAGE_SIZE, "%s\n",
887 		tl_nexus->se_sess->se_node_acl->initiatorname);
888 
889 	return ret;
890 }
891 
892 static ssize_t tcm_loop_tpg_nexus_store(struct config_item *item,
893 		const char *page, size_t count)
894 {
895 	struct se_portal_group *se_tpg = to_tpg(item);
896 	struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
897 			struct tcm_loop_tpg, tl_se_tpg);
898 	struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
899 	unsigned char i_port[TL_WWN_ADDR_LEN], *ptr, *port_ptr;
900 	int ret;
901 	/*
902 	 * Shutdown the active I_T nexus if 'NULL' is passed..
903 	 */
904 	if (!strncmp(page, "NULL", 4)) {
905 		ret = tcm_loop_drop_nexus(tl_tpg);
906 		return (!ret) ? count : ret;
907 	}
908 	/*
909 	 * Otherwise make sure the passed virtual Initiator port WWN matches
910 	 * the fabric protocol_id set in tcm_loop_make_scsi_hba(), and call
911 	 * tcm_loop_make_nexus()
912 	 */
913 	if (strlen(page) >= TL_WWN_ADDR_LEN) {
914 		pr_err("Emulated NAA Sas Address: %s, exceeds"
915 				" max: %d\n", page, TL_WWN_ADDR_LEN);
916 		return -EINVAL;
917 	}
918 	snprintf(&i_port[0], TL_WWN_ADDR_LEN, "%s", page);
919 
920 	ptr = strstr(i_port, "naa.");
921 	if (ptr) {
922 		if (tl_hba->tl_proto_id != SCSI_PROTOCOL_SAS) {
923 			pr_err("Passed SAS Initiator Port %s does not"
924 				" match target port protoid: %s\n", i_port,
925 				tcm_loop_dump_proto_id(tl_hba));
926 			return -EINVAL;
927 		}
928 		port_ptr = &i_port[0];
929 		goto check_newline;
930 	}
931 	ptr = strstr(i_port, "fc.");
932 	if (ptr) {
933 		if (tl_hba->tl_proto_id != SCSI_PROTOCOL_FCP) {
934 			pr_err("Passed FCP Initiator Port %s does not"
935 				" match target port protoid: %s\n", i_port,
936 				tcm_loop_dump_proto_id(tl_hba));
937 			return -EINVAL;
938 		}
939 		port_ptr = &i_port[3]; /* Skip over "fc." */
940 		goto check_newline;
941 	}
942 	ptr = strstr(i_port, "iqn.");
943 	if (ptr) {
944 		if (tl_hba->tl_proto_id != SCSI_PROTOCOL_ISCSI) {
945 			pr_err("Passed iSCSI Initiator Port %s does not"
946 				" match target port protoid: %s\n", i_port,
947 				tcm_loop_dump_proto_id(tl_hba));
948 			return -EINVAL;
949 		}
950 		port_ptr = &i_port[0];
951 		goto check_newline;
952 	}
953 	pr_err("Unable to locate prefix for emulated Initiator Port:"
954 			" %s\n", i_port);
955 	return -EINVAL;
956 	/*
957 	 * Clear any trailing newline for the NAA WWN
958 	 */
959 check_newline:
960 	if (i_port[strlen(i_port)-1] == '\n')
961 		i_port[strlen(i_port)-1] = '\0';
962 
963 	ret = tcm_loop_make_nexus(tl_tpg, port_ptr);
964 	if (ret < 0)
965 		return ret;
966 
967 	return count;
968 }
969 
970 static ssize_t tcm_loop_tpg_transport_status_show(struct config_item *item,
971 		char *page)
972 {
973 	struct se_portal_group *se_tpg = to_tpg(item);
974 	struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
975 			struct tcm_loop_tpg, tl_se_tpg);
976 	const char *status = NULL;
977 	ssize_t ret = -EINVAL;
978 
979 	switch (tl_tpg->tl_transport_status) {
980 	case TCM_TRANSPORT_ONLINE:
981 		status = "online";
982 		break;
983 	case TCM_TRANSPORT_OFFLINE:
984 		status = "offline";
985 		break;
986 	default:
987 		break;
988 	}
989 
990 	if (status)
991 		ret = snprintf(page, PAGE_SIZE, "%s\n", status);
992 
993 	return ret;
994 }
995 
996 static ssize_t tcm_loop_tpg_transport_status_store(struct config_item *item,
997 		const char *page, size_t count)
998 {
999 	struct se_portal_group *se_tpg = to_tpg(item);
1000 	struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
1001 			struct tcm_loop_tpg, tl_se_tpg);
1002 
1003 	if (!strncmp(page, "online", 6)) {
1004 		tl_tpg->tl_transport_status = TCM_TRANSPORT_ONLINE;
1005 		return count;
1006 	}
1007 	if (!strncmp(page, "offline", 7)) {
1008 		tl_tpg->tl_transport_status = TCM_TRANSPORT_OFFLINE;
1009 		if (tl_tpg->tl_nexus) {
1010 			struct se_session *tl_sess = tl_tpg->tl_nexus->se_sess;
1011 
1012 			core_allocate_nexus_loss_ua(tl_sess->se_node_acl);
1013 		}
1014 		return count;
1015 	}
1016 	return -EINVAL;
1017 }
1018 
1019 static ssize_t tcm_loop_tpg_address_show(struct config_item *item,
1020 					 char *page)
1021 {
1022 	struct se_portal_group *se_tpg = to_tpg(item);
1023 	struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
1024 			struct tcm_loop_tpg, tl_se_tpg);
1025 	struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
1026 
1027 	return snprintf(page, PAGE_SIZE, "%d:0:%d\n",
1028 			tl_hba->sh->host_no, tl_tpg->tl_tpgt);
1029 }
1030 
1031 CONFIGFS_ATTR(tcm_loop_tpg_, nexus);
1032 CONFIGFS_ATTR(tcm_loop_tpg_, transport_status);
1033 CONFIGFS_ATTR_RO(tcm_loop_tpg_, address);
1034 
1035 static struct configfs_attribute *tcm_loop_tpg_attrs[] = {
1036 	&tcm_loop_tpg_attr_nexus,
1037 	&tcm_loop_tpg_attr_transport_status,
1038 	&tcm_loop_tpg_attr_address,
1039 	NULL,
1040 };
1041 
1042 /* Start items for tcm_loop_naa_cit */
1043 
1044 static struct se_portal_group *tcm_loop_make_naa_tpg(
1045 	struct se_wwn *wwn,
1046 	struct config_group *group,
1047 	const char *name)
1048 {
1049 	struct tcm_loop_hba *tl_hba = container_of(wwn,
1050 			struct tcm_loop_hba, tl_hba_wwn);
1051 	struct tcm_loop_tpg *tl_tpg;
1052 	int ret;
1053 	unsigned long tpgt;
1054 
1055 	if (strstr(name, "tpgt_") != name) {
1056 		pr_err("Unable to locate \"tpgt_#\" directory"
1057 				" group\n");
1058 		return ERR_PTR(-EINVAL);
1059 	}
1060 	if (kstrtoul(name+5, 10, &tpgt))
1061 		return ERR_PTR(-EINVAL);
1062 
1063 	if (tpgt >= TL_TPGS_PER_HBA) {
1064 		pr_err("Passed tpgt: %lu exceeds TL_TPGS_PER_HBA:"
1065 				" %u\n", tpgt, TL_TPGS_PER_HBA);
1066 		return ERR_PTR(-EINVAL);
1067 	}
1068 	tl_tpg = &tl_hba->tl_hba_tpgs[tpgt];
1069 	tl_tpg->tl_hba = tl_hba;
1070 	tl_tpg->tl_tpgt = tpgt;
1071 	/*
1072 	 * Register the tl_tpg as a emulated TCM Target Endpoint
1073 	 */
1074 	ret = core_tpg_register(wwn, &tl_tpg->tl_se_tpg, tl_hba->tl_proto_id);
1075 	if (ret < 0)
1076 		return ERR_PTR(-ENOMEM);
1077 
1078 	pr_debug("TCM_Loop_ConfigFS: Allocated Emulated %s"
1079 		" Target Port %s,t,0x%04lx\n", tcm_loop_dump_proto_id(tl_hba),
1080 		config_item_name(&wwn->wwn_group.cg_item), tpgt);
1081 
1082 	return &tl_tpg->tl_se_tpg;
1083 }
1084 
1085 static void tcm_loop_drop_naa_tpg(
1086 	struct se_portal_group *se_tpg)
1087 {
1088 	struct se_wwn *wwn = se_tpg->se_tpg_wwn;
1089 	struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
1090 				struct tcm_loop_tpg, tl_se_tpg);
1091 	struct tcm_loop_hba *tl_hba;
1092 	unsigned short tpgt;
1093 
1094 	tl_hba = tl_tpg->tl_hba;
1095 	tpgt = tl_tpg->tl_tpgt;
1096 	/*
1097 	 * Release the I_T Nexus for the Virtual target link if present
1098 	 */
1099 	tcm_loop_drop_nexus(tl_tpg);
1100 	/*
1101 	 * Deregister the tl_tpg as a emulated TCM Target Endpoint
1102 	 */
1103 	core_tpg_deregister(se_tpg);
1104 
1105 	tl_tpg->tl_hba = NULL;
1106 	tl_tpg->tl_tpgt = 0;
1107 
1108 	pr_debug("TCM_Loop_ConfigFS: Deallocated Emulated %s"
1109 		" Target Port %s,t,0x%04x\n", tcm_loop_dump_proto_id(tl_hba),
1110 		config_item_name(&wwn->wwn_group.cg_item), tpgt);
1111 }
1112 
1113 /* End items for tcm_loop_naa_cit */
1114 
1115 /* Start items for tcm_loop_cit */
1116 
1117 static struct se_wwn *tcm_loop_make_scsi_hba(
1118 	struct target_fabric_configfs *tf,
1119 	struct config_group *group,
1120 	const char *name)
1121 {
1122 	struct tcm_loop_hba *tl_hba;
1123 	struct Scsi_Host *sh;
1124 	char *ptr;
1125 	int ret, off = 0;
1126 
1127 	tl_hba = kzalloc(sizeof(struct tcm_loop_hba), GFP_KERNEL);
1128 	if (!tl_hba) {
1129 		pr_err("Unable to allocate struct tcm_loop_hba\n");
1130 		return ERR_PTR(-ENOMEM);
1131 	}
1132 	/*
1133 	 * Determine the emulated Protocol Identifier and Target Port Name
1134 	 * based on the incoming configfs directory name.
1135 	 */
1136 	ptr = strstr(name, "naa.");
1137 	if (ptr) {
1138 		tl_hba->tl_proto_id = SCSI_PROTOCOL_SAS;
1139 		goto check_len;
1140 	}
1141 	ptr = strstr(name, "fc.");
1142 	if (ptr) {
1143 		tl_hba->tl_proto_id = SCSI_PROTOCOL_FCP;
1144 		off = 3; /* Skip over "fc." */
1145 		goto check_len;
1146 	}
1147 	ptr = strstr(name, "iqn.");
1148 	if (!ptr) {
1149 		pr_err("Unable to locate prefix for emulated Target "
1150 				"Port: %s\n", name);
1151 		ret = -EINVAL;
1152 		goto out;
1153 	}
1154 	tl_hba->tl_proto_id = SCSI_PROTOCOL_ISCSI;
1155 
1156 check_len:
1157 	if (strlen(name) >= TL_WWN_ADDR_LEN) {
1158 		pr_err("Emulated NAA %s Address: %s, exceeds"
1159 			" max: %d\n", name, tcm_loop_dump_proto_id(tl_hba),
1160 			TL_WWN_ADDR_LEN);
1161 		ret = -EINVAL;
1162 		goto out;
1163 	}
1164 	snprintf(&tl_hba->tl_wwn_address[0], TL_WWN_ADDR_LEN, "%s", &name[off]);
1165 
1166 	/*
1167 	 * Call device_register(tl_hba->dev) to register the emulated
1168 	 * Linux/SCSI LLD of type struct Scsi_Host at tl_hba->sh after
1169 	 * device_register() callbacks in tcm_loop_driver_probe()
1170 	 */
1171 	ret = tcm_loop_setup_hba_bus(tl_hba, tcm_loop_hba_no_cnt);
1172 	if (ret)
1173 		goto out;
1174 
1175 	sh = tl_hba->sh;
1176 	tcm_loop_hba_no_cnt++;
1177 	pr_debug("TCM_Loop_ConfigFS: Allocated emulated Target"
1178 		" %s Address: %s at Linux/SCSI Host ID: %d\n",
1179 		tcm_loop_dump_proto_id(tl_hba), name, sh->host_no);
1180 
1181 	return &tl_hba->tl_hba_wwn;
1182 out:
1183 	kfree(tl_hba);
1184 	return ERR_PTR(ret);
1185 }
1186 
1187 static void tcm_loop_drop_scsi_hba(
1188 	struct se_wwn *wwn)
1189 {
1190 	struct tcm_loop_hba *tl_hba = container_of(wwn,
1191 				struct tcm_loop_hba, tl_hba_wwn);
1192 
1193 	pr_debug("TCM_Loop_ConfigFS: Deallocating emulated Target"
1194 		" %s Address: %s at Linux/SCSI Host ID: %d\n",
1195 		tcm_loop_dump_proto_id(tl_hba), tl_hba->tl_wwn_address,
1196 		tl_hba->sh->host_no);
1197 	/*
1198 	 * Call device_unregister() on the original tl_hba->dev.
1199 	 * tcm_loop_fabric_scsi.c:tcm_loop_release_adapter() will
1200 	 * release *tl_hba;
1201 	 */
1202 	device_unregister(&tl_hba->dev);
1203 }
1204 
1205 /* Start items for tcm_loop_cit */
1206 static ssize_t tcm_loop_wwn_version_show(struct config_item *item, char *page)
1207 {
1208 	return sprintf(page, "TCM Loopback Fabric module %s\n", TCM_LOOP_VERSION);
1209 }
1210 
1211 CONFIGFS_ATTR_RO(tcm_loop_wwn_, version);
1212 
1213 static struct configfs_attribute *tcm_loop_wwn_attrs[] = {
1214 	&tcm_loop_wwn_attr_version,
1215 	NULL,
1216 };
1217 
1218 /* End items for tcm_loop_cit */
1219 
1220 static const struct target_core_fabric_ops loop_ops = {
1221 	.module				= THIS_MODULE,
1222 	.name				= "loopback",
1223 	.get_fabric_name		= tcm_loop_get_fabric_name,
1224 	.tpg_get_wwn			= tcm_loop_get_endpoint_wwn,
1225 	.tpg_get_tag			= tcm_loop_get_tag,
1226 	.tpg_check_demo_mode		= tcm_loop_check_demo_mode,
1227 	.tpg_check_demo_mode_cache	= tcm_loop_check_demo_mode_cache,
1228 	.tpg_check_demo_mode_write_protect =
1229 				tcm_loop_check_demo_mode_write_protect,
1230 	.tpg_check_prod_mode_write_protect =
1231 				tcm_loop_check_prod_mode_write_protect,
1232 	.tpg_check_prot_fabric_only	= tcm_loop_check_prot_fabric_only,
1233 	.tpg_get_inst_index		= tcm_loop_get_inst_index,
1234 	.check_stop_free		= tcm_loop_check_stop_free,
1235 	.release_cmd			= tcm_loop_release_cmd,
1236 	.sess_get_index			= tcm_loop_sess_get_index,
1237 	.write_pending			= tcm_loop_write_pending,
1238 	.write_pending_status		= tcm_loop_write_pending_status,
1239 	.set_default_node_attributes	= tcm_loop_set_default_node_attributes,
1240 	.get_cmd_state			= tcm_loop_get_cmd_state,
1241 	.queue_data_in			= tcm_loop_queue_data_in,
1242 	.queue_status			= tcm_loop_queue_status,
1243 	.queue_tm_rsp			= tcm_loop_queue_tm_rsp,
1244 	.aborted_task			= tcm_loop_aborted_task,
1245 	.fabric_make_wwn		= tcm_loop_make_scsi_hba,
1246 	.fabric_drop_wwn		= tcm_loop_drop_scsi_hba,
1247 	.fabric_make_tpg		= tcm_loop_make_naa_tpg,
1248 	.fabric_drop_tpg		= tcm_loop_drop_naa_tpg,
1249 	.fabric_post_link		= tcm_loop_port_link,
1250 	.fabric_pre_unlink		= tcm_loop_port_unlink,
1251 	.tfc_wwn_attrs			= tcm_loop_wwn_attrs,
1252 	.tfc_tpg_base_attrs		= tcm_loop_tpg_attrs,
1253 	.tfc_tpg_attrib_attrs		= tcm_loop_tpg_attrib_attrs,
1254 };
1255 
1256 static int __init tcm_loop_fabric_init(void)
1257 {
1258 	int ret = -ENOMEM;
1259 
1260 	tcm_loop_workqueue = alloc_workqueue("tcm_loop", 0, 0);
1261 	if (!tcm_loop_workqueue)
1262 		goto out;
1263 
1264 	tcm_loop_cmd_cache = kmem_cache_create("tcm_loop_cmd_cache",
1265 				sizeof(struct tcm_loop_cmd),
1266 				__alignof__(struct tcm_loop_cmd),
1267 				0, NULL);
1268 	if (!tcm_loop_cmd_cache) {
1269 		pr_debug("kmem_cache_create() for"
1270 			" tcm_loop_cmd_cache failed\n");
1271 		goto out_destroy_workqueue;
1272 	}
1273 
1274 	ret = tcm_loop_alloc_core_bus();
1275 	if (ret)
1276 		goto out_destroy_cache;
1277 
1278 	ret = target_register_template(&loop_ops);
1279 	if (ret)
1280 		goto out_release_core_bus;
1281 
1282 	return 0;
1283 
1284 out_release_core_bus:
1285 	tcm_loop_release_core_bus();
1286 out_destroy_cache:
1287 	kmem_cache_destroy(tcm_loop_cmd_cache);
1288 out_destroy_workqueue:
1289 	destroy_workqueue(tcm_loop_workqueue);
1290 out:
1291 	return ret;
1292 }
1293 
1294 static void __exit tcm_loop_fabric_exit(void)
1295 {
1296 	target_unregister_template(&loop_ops);
1297 	tcm_loop_release_core_bus();
1298 	kmem_cache_destroy(tcm_loop_cmd_cache);
1299 	destroy_workqueue(tcm_loop_workqueue);
1300 }
1301 
1302 MODULE_DESCRIPTION("TCM loopback virtual Linux/SCSI fabric module");
1303 MODULE_AUTHOR("Nicholas A. Bellinger <nab@risingtidesystems.com>");
1304 MODULE_LICENSE("GPL");
1305 module_init(tcm_loop_fabric_init);
1306 module_exit(tcm_loop_fabric_exit);
1307