1 /*******************************************************************************
2  * Filename:  target_core_device.c (based on iscsi_target_device.c)
3  *
4  * This file contains the TCM Virtual Device and Disk Transport
5  * agnostic related functions.
6  *
7  * Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc.
8  * Copyright (c) 2005-2006 SBE, Inc.  All Rights Reserved.
9  * Copyright (c) 2007-2010 Rising Tide Systems
10  * Copyright (c) 2008-2010 Linux-iSCSI.org
11  *
12  * Nicholas A. Bellinger <nab@kernel.org>
13  *
14  * This program is free software; you can redistribute it and/or modify
15  * it under the terms of the GNU General Public License as published by
16  * the Free Software Foundation; either version 2 of the License, or
17  * (at your option) any later version.
18  *
19  * This program is distributed in the hope that it will be useful,
20  * but WITHOUT ANY WARRANTY; without even the implied warranty of
21  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
22  * GNU General Public License for more details.
23  *
24  * You should have received a copy of the GNU General Public License
25  * along with this program; if not, write to the Free Software
26  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
27  *
28  ******************************************************************************/
29 
30 #include <linux/net.h>
31 #include <linux/string.h>
32 #include <linux/delay.h>
33 #include <linux/timer.h>
34 #include <linux/slab.h>
35 #include <linux/spinlock.h>
36 #include <linux/kthread.h>
37 #include <linux/in.h>
38 #include <linux/export.h>
39 #include <net/sock.h>
40 #include <net/tcp.h>
41 #include <scsi/scsi.h>
42 #include <scsi/scsi_device.h>
43 
44 #include <target/target_core_base.h>
45 #include <target/target_core_backend.h>
46 #include <target/target_core_fabric.h>
47 
48 #include "target_core_internal.h"
49 #include "target_core_alua.h"
50 #include "target_core_pr.h"
51 #include "target_core_ua.h"
52 
53 static void se_dev_start(struct se_device *dev);
54 static void se_dev_stop(struct se_device *dev);
55 
56 static struct se_hba *lun0_hba;
57 static struct se_subsystem_dev *lun0_su_dev;
58 /* not static, needed by tpg.c */
59 struct se_device *g_lun0_dev;
60 
61 int transport_lookup_cmd_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
62 {
63 	struct se_lun *se_lun = NULL;
64 	struct se_session *se_sess = se_cmd->se_sess;
65 	struct se_device *dev;
66 	unsigned long flags;
67 
68 	if (unpacked_lun >= TRANSPORT_MAX_LUNS_PER_TPG) {
69 		se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN;
70 		se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
71 		return -ENODEV;
72 	}
73 
74 	spin_lock_irqsave(&se_sess->se_node_acl->device_list_lock, flags);
75 	se_cmd->se_deve = se_sess->se_node_acl->device_list[unpacked_lun];
76 	if (se_cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) {
77 		struct se_dev_entry *deve = se_cmd->se_deve;
78 
79 		deve->total_cmds++;
80 		deve->total_bytes += se_cmd->data_length;
81 
82 		if ((se_cmd->data_direction == DMA_TO_DEVICE) &&
83 		    (deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY)) {
84 			se_cmd->scsi_sense_reason = TCM_WRITE_PROTECTED;
85 			se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
86 			pr_err("TARGET_CORE[%s]: Detected WRITE_PROTECTED LUN"
87 				" Access for 0x%08x\n",
88 				se_cmd->se_tfo->get_fabric_name(),
89 				unpacked_lun);
90 			spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags);
91 			return -EACCES;
92 		}
93 
94 		if (se_cmd->data_direction == DMA_TO_DEVICE)
95 			deve->write_bytes += se_cmd->data_length;
96 		else if (se_cmd->data_direction == DMA_FROM_DEVICE)
97 			deve->read_bytes += se_cmd->data_length;
98 
99 		deve->deve_cmds++;
100 
101 		se_lun = deve->se_lun;
102 		se_cmd->se_lun = deve->se_lun;
103 		se_cmd->pr_res_key = deve->pr_res_key;
104 		se_cmd->orig_fe_lun = unpacked_lun;
105 		se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
106 	}
107 	spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags);
108 
109 	if (!se_lun) {
110 		/*
111 		 * Use the se_portal_group->tpg_virt_lun0 to allow for
112 		 * REPORT_LUNS, et al to be returned when no active
113 		 * MappedLUN=0 exists for this Initiator Port.
114 		 */
115 		if (unpacked_lun != 0) {
116 			se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN;
117 			se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
118 			pr_err("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
119 				" Access for 0x%08x\n",
120 				se_cmd->se_tfo->get_fabric_name(),
121 				unpacked_lun);
122 			return -ENODEV;
123 		}
124 		/*
125 		 * Force WRITE PROTECT for virtual LUN 0
126 		 */
127 		if ((se_cmd->data_direction != DMA_FROM_DEVICE) &&
128 		    (se_cmd->data_direction != DMA_NONE)) {
129 			se_cmd->scsi_sense_reason = TCM_WRITE_PROTECTED;
130 			se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
131 			return -EACCES;
132 		}
133 
134 		se_lun = &se_sess->se_tpg->tpg_virt_lun0;
135 		se_cmd->se_lun = &se_sess->se_tpg->tpg_virt_lun0;
136 		se_cmd->orig_fe_lun = 0;
137 		se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
138 	}
139 	/*
140 	 * Determine if the struct se_lun is online.
141 	 * FIXME: Check for LUN_RESET + UNIT Attention
142 	 */
143 	if (se_dev_check_online(se_lun->lun_se_dev) != 0) {
144 		se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN;
145 		se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
146 		return -ENODEV;
147 	}
148 
149 	/* Directly associate cmd with se_dev */
150 	se_cmd->se_dev = se_lun->lun_se_dev;
151 
152 	/* TODO: get rid of this and use atomics for stats */
153 	dev = se_lun->lun_se_dev;
154 	spin_lock_irqsave(&dev->stats_lock, flags);
155 	dev->num_cmds++;
156 	if (se_cmd->data_direction == DMA_TO_DEVICE)
157 		dev->write_bytes += se_cmd->data_length;
158 	else if (se_cmd->data_direction == DMA_FROM_DEVICE)
159 		dev->read_bytes += se_cmd->data_length;
160 	spin_unlock_irqrestore(&dev->stats_lock, flags);
161 
162 	spin_lock_irqsave(&se_lun->lun_cmd_lock, flags);
163 	list_add_tail(&se_cmd->se_lun_node, &se_lun->lun_cmd_list);
164 	spin_unlock_irqrestore(&se_lun->lun_cmd_lock, flags);
165 
166 	return 0;
167 }
168 EXPORT_SYMBOL(transport_lookup_cmd_lun);
169 
170 int transport_lookup_tmr_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
171 {
172 	struct se_dev_entry *deve;
173 	struct se_lun *se_lun = NULL;
174 	struct se_session *se_sess = se_cmd->se_sess;
175 	struct se_tmr_req *se_tmr = se_cmd->se_tmr_req;
176 	unsigned long flags;
177 
178 	if (unpacked_lun >= TRANSPORT_MAX_LUNS_PER_TPG) {
179 		se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN;
180 		se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
181 		return -ENODEV;
182 	}
183 
184 	spin_lock_irqsave(&se_sess->se_node_acl->device_list_lock, flags);
185 	se_cmd->se_deve = se_sess->se_node_acl->device_list[unpacked_lun];
186 	deve = se_cmd->se_deve;
187 
188 	if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) {
189 		se_tmr->tmr_lun = deve->se_lun;
190 		se_cmd->se_lun = deve->se_lun;
191 		se_lun = deve->se_lun;
192 		se_cmd->pr_res_key = deve->pr_res_key;
193 		se_cmd->orig_fe_lun = unpacked_lun;
194 	}
195 	spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags);
196 
197 	if (!se_lun) {
198 		pr_debug("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
199 			" Access for 0x%08x\n",
200 			se_cmd->se_tfo->get_fabric_name(),
201 			unpacked_lun);
202 		se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
203 		return -ENODEV;
204 	}
205 	/*
206 	 * Determine if the struct se_lun is online.
207 	 * FIXME: Check for LUN_RESET + UNIT Attention
208 	 */
209 	if (se_dev_check_online(se_lun->lun_se_dev) != 0) {
210 		se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
211 		return -ENODEV;
212 	}
213 
214 	/* Directly associate cmd with se_dev */
215 	se_cmd->se_dev = se_lun->lun_se_dev;
216 	se_tmr->tmr_dev = se_lun->lun_se_dev;
217 
218 	spin_lock_irqsave(&se_tmr->tmr_dev->se_tmr_lock, flags);
219 	list_add_tail(&se_tmr->tmr_list, &se_tmr->tmr_dev->dev_tmr_list);
220 	spin_unlock_irqrestore(&se_tmr->tmr_dev->se_tmr_lock, flags);
221 
222 	return 0;
223 }
224 EXPORT_SYMBOL(transport_lookup_tmr_lun);
225 
226 /*
227  * This function is called from core_scsi3_emulate_pro_register_and_move()
228  * and core_scsi3_decode_spec_i_port(), and will increment &deve->pr_ref_count
229  * when a matching rtpi is found.
230  */
231 struct se_dev_entry *core_get_se_deve_from_rtpi(
232 	struct se_node_acl *nacl,
233 	u16 rtpi)
234 {
235 	struct se_dev_entry *deve;
236 	struct se_lun *lun;
237 	struct se_port *port;
238 	struct se_portal_group *tpg = nacl->se_tpg;
239 	u32 i;
240 
241 	spin_lock_irq(&nacl->device_list_lock);
242 	for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
243 		deve = nacl->device_list[i];
244 
245 		if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS))
246 			continue;
247 
248 		lun = deve->se_lun;
249 		if (!lun) {
250 			pr_err("%s device entries device pointer is"
251 				" NULL, but Initiator has access.\n",
252 				tpg->se_tpg_tfo->get_fabric_name());
253 			continue;
254 		}
255 		port = lun->lun_sep;
256 		if (!port) {
257 			pr_err("%s device entries device pointer is"
258 				" NULL, but Initiator has access.\n",
259 				tpg->se_tpg_tfo->get_fabric_name());
260 			continue;
261 		}
262 		if (port->sep_rtpi != rtpi)
263 			continue;
264 
265 		atomic_inc(&deve->pr_ref_count);
266 		smp_mb__after_atomic_inc();
267 		spin_unlock_irq(&nacl->device_list_lock);
268 
269 		return deve;
270 	}
271 	spin_unlock_irq(&nacl->device_list_lock);
272 
273 	return NULL;
274 }
275 
276 int core_free_device_list_for_node(
277 	struct se_node_acl *nacl,
278 	struct se_portal_group *tpg)
279 {
280 	struct se_dev_entry *deve;
281 	struct se_lun *lun;
282 	u32 i;
283 
284 	if (!nacl->device_list)
285 		return 0;
286 
287 	spin_lock_irq(&nacl->device_list_lock);
288 	for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
289 		deve = nacl->device_list[i];
290 
291 		if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS))
292 			continue;
293 
294 		if (!deve->se_lun) {
295 			pr_err("%s device entries device pointer is"
296 				" NULL, but Initiator has access.\n",
297 				tpg->se_tpg_tfo->get_fabric_name());
298 			continue;
299 		}
300 		lun = deve->se_lun;
301 
302 		spin_unlock_irq(&nacl->device_list_lock);
303 		core_disable_device_list_for_node(lun, NULL, deve->mapped_lun,
304 			TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg);
305 		spin_lock_irq(&nacl->device_list_lock);
306 	}
307 	spin_unlock_irq(&nacl->device_list_lock);
308 
309 	array_free(nacl->device_list, TRANSPORT_MAX_LUNS_PER_TPG);
310 	nacl->device_list = NULL;
311 
312 	return 0;
313 }
314 
315 void core_dec_lacl_count(struct se_node_acl *se_nacl, struct se_cmd *se_cmd)
316 {
317 	struct se_dev_entry *deve;
318 	unsigned long flags;
319 
320 	spin_lock_irqsave(&se_nacl->device_list_lock, flags);
321 	deve = se_nacl->device_list[se_cmd->orig_fe_lun];
322 	deve->deve_cmds--;
323 	spin_unlock_irqrestore(&se_nacl->device_list_lock, flags);
324 }
325 
326 void core_update_device_list_access(
327 	u32 mapped_lun,
328 	u32 lun_access,
329 	struct se_node_acl *nacl)
330 {
331 	struct se_dev_entry *deve;
332 
333 	spin_lock_irq(&nacl->device_list_lock);
334 	deve = nacl->device_list[mapped_lun];
335 	if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) {
336 		deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_ONLY;
337 		deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_WRITE;
338 	} else {
339 		deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_WRITE;
340 		deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY;
341 	}
342 	spin_unlock_irq(&nacl->device_list_lock);
343 }
344 
345 /*      core_enable_device_list_for_node():
346  *
347  *
348  */
349 int core_enable_device_list_for_node(
350 	struct se_lun *lun,
351 	struct se_lun_acl *lun_acl,
352 	u32 mapped_lun,
353 	u32 lun_access,
354 	struct se_node_acl *nacl,
355 	struct se_portal_group *tpg)
356 {
357 	struct se_port *port = lun->lun_sep;
358 	struct se_dev_entry *deve;
359 
360 	spin_lock_irq(&nacl->device_list_lock);
361 
362 	deve = nacl->device_list[mapped_lun];
363 
364 	/*
365 	 * Check if the call is handling demo mode -> explict LUN ACL
366 	 * transition.  This transition must be for the same struct se_lun
367 	 * + mapped_lun that was setup in demo mode..
368 	 */
369 	if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) {
370 		if (deve->se_lun_acl != NULL) {
371 			pr_err("struct se_dev_entry->se_lun_acl"
372 			       " already set for demo mode -> explict"
373 			       " LUN ACL transition\n");
374 			spin_unlock_irq(&nacl->device_list_lock);
375 			return -EINVAL;
376 		}
377 		if (deve->se_lun != lun) {
378 			pr_err("struct se_dev_entry->se_lun does"
379 			       " match passed struct se_lun for demo mode"
380 			       " -> explict LUN ACL transition\n");
381 			spin_unlock_irq(&nacl->device_list_lock);
382 			return -EINVAL;
383 		}
384 		deve->se_lun_acl = lun_acl;
385 
386 		if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) {
387 			deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_ONLY;
388 			deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_WRITE;
389 		} else {
390 			deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_WRITE;
391 			deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY;
392 		}
393 
394 		spin_unlock_irq(&nacl->device_list_lock);
395 		return 0;
396 	}
397 
398 	deve->se_lun = lun;
399 	deve->se_lun_acl = lun_acl;
400 	deve->mapped_lun = mapped_lun;
401 	deve->lun_flags |= TRANSPORT_LUNFLAGS_INITIATOR_ACCESS;
402 
403 	if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) {
404 		deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_ONLY;
405 		deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_WRITE;
406 	} else {
407 		deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_WRITE;
408 		deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY;
409 	}
410 
411 	deve->creation_time = get_jiffies_64();
412 	deve->attach_count++;
413 	spin_unlock_irq(&nacl->device_list_lock);
414 
415 	spin_lock_bh(&port->sep_alua_lock);
416 	list_add_tail(&deve->alua_port_list, &port->sep_alua_list);
417 	spin_unlock_bh(&port->sep_alua_lock);
418 
419 	return 0;
420 }
421 
422 /*      core_disable_device_list_for_node():
423  *
424  *
425  */
426 int core_disable_device_list_for_node(
427 	struct se_lun *lun,
428 	struct se_lun_acl *lun_acl,
429 	u32 mapped_lun,
430 	u32 lun_access,
431 	struct se_node_acl *nacl,
432 	struct se_portal_group *tpg)
433 {
434 	struct se_port *port = lun->lun_sep;
435 	struct se_dev_entry *deve = nacl->device_list[mapped_lun];
436 
437 	/*
438 	 * If the MappedLUN entry is being disabled, the entry in
439 	 * port->sep_alua_list must be removed now before clearing the
440 	 * struct se_dev_entry pointers below as logic in
441 	 * core_alua_do_transition_tg_pt() depends on these being present.
442 	 *
443 	 * deve->se_lun_acl will be NULL for demo-mode created LUNs
444 	 * that have not been explicitly converted to MappedLUNs ->
445 	 * struct se_lun_acl, but we remove deve->alua_port_list from
446 	 * port->sep_alua_list. This also means that active UAs and
447 	 * NodeACL context specific PR metadata for demo-mode
448 	 * MappedLUN *deve will be released below..
449 	 */
450 	spin_lock_bh(&port->sep_alua_lock);
451 	list_del(&deve->alua_port_list);
452 	spin_unlock_bh(&port->sep_alua_lock);
453 	/*
454 	 * Wait for any in process SPEC_I_PT=1 or REGISTER_AND_MOVE
455 	 * PR operation to complete.
456 	 */
457 	while (atomic_read(&deve->pr_ref_count) != 0)
458 		cpu_relax();
459 
460 	spin_lock_irq(&nacl->device_list_lock);
461 	/*
462 	 * Disable struct se_dev_entry LUN ACL mapping
463 	 */
464 	core_scsi3_ua_release_all(deve);
465 	deve->se_lun = NULL;
466 	deve->se_lun_acl = NULL;
467 	deve->lun_flags = 0;
468 	deve->creation_time = 0;
469 	deve->attach_count--;
470 	spin_unlock_irq(&nacl->device_list_lock);
471 
472 	core_scsi3_free_pr_reg_from_nacl(lun->lun_se_dev, nacl);
473 	return 0;
474 }
475 
476 /*      core_clear_lun_from_tpg():
477  *
478  *
479  */
480 void core_clear_lun_from_tpg(struct se_lun *lun, struct se_portal_group *tpg)
481 {
482 	struct se_node_acl *nacl;
483 	struct se_dev_entry *deve;
484 	u32 i;
485 
486 	spin_lock_irq(&tpg->acl_node_lock);
487 	list_for_each_entry(nacl, &tpg->acl_node_list, acl_list) {
488 		spin_unlock_irq(&tpg->acl_node_lock);
489 
490 		spin_lock_irq(&nacl->device_list_lock);
491 		for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
492 			deve = nacl->device_list[i];
493 			if (lun != deve->se_lun)
494 				continue;
495 			spin_unlock_irq(&nacl->device_list_lock);
496 
497 			core_disable_device_list_for_node(lun, NULL,
498 				deve->mapped_lun, TRANSPORT_LUNFLAGS_NO_ACCESS,
499 				nacl, tpg);
500 
501 			spin_lock_irq(&nacl->device_list_lock);
502 		}
503 		spin_unlock_irq(&nacl->device_list_lock);
504 
505 		spin_lock_irq(&tpg->acl_node_lock);
506 	}
507 	spin_unlock_irq(&tpg->acl_node_lock);
508 }
509 
510 static struct se_port *core_alloc_port(struct se_device *dev)
511 {
512 	struct se_port *port, *port_tmp;
513 
514 	port = kzalloc(sizeof(struct se_port), GFP_KERNEL);
515 	if (!port) {
516 		pr_err("Unable to allocate struct se_port\n");
517 		return ERR_PTR(-ENOMEM);
518 	}
519 	INIT_LIST_HEAD(&port->sep_alua_list);
520 	INIT_LIST_HEAD(&port->sep_list);
521 	atomic_set(&port->sep_tg_pt_secondary_offline, 0);
522 	spin_lock_init(&port->sep_alua_lock);
523 	mutex_init(&port->sep_tg_pt_md_mutex);
524 
525 	spin_lock(&dev->se_port_lock);
526 	if (dev->dev_port_count == 0x0000ffff) {
527 		pr_warn("Reached dev->dev_port_count =="
528 				" 0x0000ffff\n");
529 		spin_unlock(&dev->se_port_lock);
530 		return ERR_PTR(-ENOSPC);
531 	}
532 again:
533 	/*
534 	 * Allocate the next RELATIVE TARGET PORT IDENTIFIER for this struct se_device
535 	 * Here is the table from spc4r17 section 7.7.3.8.
536 	 *
537 	 *    Table 473 -- RELATIVE TARGET PORT IDENTIFIER field
538 	 *
539 	 * Code      Description
540 	 * 0h        Reserved
541 	 * 1h        Relative port 1, historically known as port A
542 	 * 2h        Relative port 2, historically known as port B
543 	 * 3h to FFFFh    Relative port 3 through 65 535
544 	 */
545 	port->sep_rtpi = dev->dev_rpti_counter++;
546 	if (!port->sep_rtpi)
547 		goto again;
548 
549 	list_for_each_entry(port_tmp, &dev->dev_sep_list, sep_list) {
550 		/*
551 		 * Make sure RELATIVE TARGET PORT IDENTIFIER is unique
552 		 * for 16-bit wrap..
553 		 */
554 		if (port->sep_rtpi == port_tmp->sep_rtpi)
555 			goto again;
556 	}
557 	spin_unlock(&dev->se_port_lock);
558 
559 	return port;
560 }
561 
562 static void core_export_port(
563 	struct se_device *dev,
564 	struct se_portal_group *tpg,
565 	struct se_port *port,
566 	struct se_lun *lun)
567 {
568 	struct se_subsystem_dev *su_dev = dev->se_sub_dev;
569 	struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem = NULL;
570 
571 	spin_lock(&dev->se_port_lock);
572 	spin_lock(&lun->lun_sep_lock);
573 	port->sep_tpg = tpg;
574 	port->sep_lun = lun;
575 	lun->lun_sep = port;
576 	spin_unlock(&lun->lun_sep_lock);
577 
578 	list_add_tail(&port->sep_list, &dev->dev_sep_list);
579 	spin_unlock(&dev->se_port_lock);
580 
581 	if (su_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED) {
582 		tg_pt_gp_mem = core_alua_allocate_tg_pt_gp_mem(port);
583 		if (IS_ERR(tg_pt_gp_mem) || !tg_pt_gp_mem) {
584 			pr_err("Unable to allocate t10_alua_tg_pt"
585 					"_gp_member_t\n");
586 			return;
587 		}
588 		spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
589 		__core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem,
590 			su_dev->t10_alua.default_tg_pt_gp);
591 		spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
592 		pr_debug("%s/%s: Adding to default ALUA Target Port"
593 			" Group: alua/default_tg_pt_gp\n",
594 			dev->transport->name, tpg->se_tpg_tfo->get_fabric_name());
595 	}
596 
597 	dev->dev_port_count++;
598 	port->sep_index = port->sep_rtpi; /* RELATIVE TARGET PORT IDENTIFIER */
599 }
600 
601 /*
602  *	Called with struct se_device->se_port_lock spinlock held.
603  */
604 static void core_release_port(struct se_device *dev, struct se_port *port)
605 	__releases(&dev->se_port_lock) __acquires(&dev->se_port_lock)
606 {
607 	/*
608 	 * Wait for any port reference for PR ALL_TG_PT=1 operation
609 	 * to complete in __core_scsi3_alloc_registration()
610 	 */
611 	spin_unlock(&dev->se_port_lock);
612 	if (atomic_read(&port->sep_tg_pt_ref_cnt))
613 		cpu_relax();
614 	spin_lock(&dev->se_port_lock);
615 
616 	core_alua_free_tg_pt_gp_mem(port);
617 
618 	list_del(&port->sep_list);
619 	dev->dev_port_count--;
620 	kfree(port);
621 }
622 
623 int core_dev_export(
624 	struct se_device *dev,
625 	struct se_portal_group *tpg,
626 	struct se_lun *lun)
627 {
628 	struct se_port *port;
629 
630 	port = core_alloc_port(dev);
631 	if (IS_ERR(port))
632 		return PTR_ERR(port);
633 
634 	lun->lun_se_dev = dev;
635 	se_dev_start(dev);
636 
637 	atomic_inc(&dev->dev_export_obj.obj_access_count);
638 	core_export_port(dev, tpg, port, lun);
639 	return 0;
640 }
641 
642 void core_dev_unexport(
643 	struct se_device *dev,
644 	struct se_portal_group *tpg,
645 	struct se_lun *lun)
646 {
647 	struct se_port *port = lun->lun_sep;
648 
649 	spin_lock(&lun->lun_sep_lock);
650 	if (lun->lun_se_dev == NULL) {
651 		spin_unlock(&lun->lun_sep_lock);
652 		return;
653 	}
654 	spin_unlock(&lun->lun_sep_lock);
655 
656 	spin_lock(&dev->se_port_lock);
657 	atomic_dec(&dev->dev_export_obj.obj_access_count);
658 	core_release_port(dev, port);
659 	spin_unlock(&dev->se_port_lock);
660 
661 	se_dev_stop(dev);
662 	lun->lun_se_dev = NULL;
663 }
664 
665 int target_report_luns(struct se_cmd *se_cmd)
666 {
667 	struct se_dev_entry *deve;
668 	struct se_session *se_sess = se_cmd->se_sess;
669 	unsigned char *buf;
670 	u32 lun_count = 0, offset = 8, i;
671 
672 	if (se_cmd->data_length < 16) {
673 		pr_warn("REPORT LUNS allocation length %u too small\n",
674 			se_cmd->data_length);
675 		se_cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
676 		return -EINVAL;
677 	}
678 
679 	buf = transport_kmap_data_sg(se_cmd);
680 	if (!buf)
681 		return -ENOMEM;
682 
683 	/*
684 	 * If no struct se_session pointer is present, this struct se_cmd is
685 	 * coming via a target_core_mod PASSTHROUGH op, and not through
686 	 * a $FABRIC_MOD.  In that case, report LUN=0 only.
687 	 */
688 	if (!se_sess) {
689 		int_to_scsilun(0, (struct scsi_lun *)&buf[offset]);
690 		lun_count = 1;
691 		goto done;
692 	}
693 
694 	spin_lock_irq(&se_sess->se_node_acl->device_list_lock);
695 	for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
696 		deve = se_sess->se_node_acl->device_list[i];
697 		if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS))
698 			continue;
699 		/*
700 		 * We determine the correct LUN LIST LENGTH even once we
701 		 * have reached the initial allocation length.
702 		 * See SPC2-R20 7.19.
703 		 */
704 		lun_count++;
705 		if ((offset + 8) > se_cmd->data_length)
706 			continue;
707 
708 		int_to_scsilun(deve->mapped_lun, (struct scsi_lun *)&buf[offset]);
709 		offset += 8;
710 	}
711 	spin_unlock_irq(&se_sess->se_node_acl->device_list_lock);
712 
713 	/*
714 	 * See SPC3 r07, page 159.
715 	 */
716 done:
717 	lun_count *= 8;
718 	buf[0] = ((lun_count >> 24) & 0xff);
719 	buf[1] = ((lun_count >> 16) & 0xff);
720 	buf[2] = ((lun_count >> 8) & 0xff);
721 	buf[3] = (lun_count & 0xff);
722 	transport_kunmap_data_sg(se_cmd);
723 
724 	target_complete_cmd(se_cmd, GOOD);
725 	return 0;
726 }
727 
728 /*	se_release_device_for_hba():
729  *
730  *
731  */
732 void se_release_device_for_hba(struct se_device *dev)
733 {
734 	struct se_hba *hba = dev->se_hba;
735 
736 	if ((dev->dev_status & TRANSPORT_DEVICE_ACTIVATED) ||
737 	    (dev->dev_status & TRANSPORT_DEVICE_DEACTIVATED) ||
738 	    (dev->dev_status & TRANSPORT_DEVICE_SHUTDOWN) ||
739 	    (dev->dev_status & TRANSPORT_DEVICE_OFFLINE_ACTIVATED) ||
740 	    (dev->dev_status & TRANSPORT_DEVICE_OFFLINE_DEACTIVATED))
741 		se_dev_stop(dev);
742 
743 	if (dev->dev_ptr) {
744 		destroy_workqueue(dev->tmr_wq);
745 		if (dev->transport->free_device)
746 			dev->transport->free_device(dev->dev_ptr);
747 	}
748 
749 	spin_lock(&hba->device_lock);
750 	list_del(&dev->dev_list);
751 	hba->dev_count--;
752 	spin_unlock(&hba->device_lock);
753 
754 	core_scsi3_free_all_registrations(dev);
755 	se_release_vpd_for_dev(dev);
756 
757 	kfree(dev);
758 }
759 
760 void se_release_vpd_for_dev(struct se_device *dev)
761 {
762 	struct t10_vpd *vpd, *vpd_tmp;
763 
764 	spin_lock(&dev->se_sub_dev->t10_wwn.t10_vpd_lock);
765 	list_for_each_entry_safe(vpd, vpd_tmp,
766 			&dev->se_sub_dev->t10_wwn.t10_vpd_list, vpd_list) {
767 		list_del(&vpd->vpd_list);
768 		kfree(vpd);
769 	}
770 	spin_unlock(&dev->se_sub_dev->t10_wwn.t10_vpd_lock);
771 }
772 
773 /*	se_free_virtual_device():
774  *
775  *	Used for IBLOCK, RAMDISK, and FILEIO Transport Drivers.
776  */
777 int se_free_virtual_device(struct se_device *dev, struct se_hba *hba)
778 {
779 	if (!list_empty(&dev->dev_sep_list))
780 		dump_stack();
781 
782 	core_alua_free_lu_gp_mem(dev);
783 	se_release_device_for_hba(dev);
784 
785 	return 0;
786 }
787 
788 static void se_dev_start(struct se_device *dev)
789 {
790 	struct se_hba *hba = dev->se_hba;
791 
792 	spin_lock(&hba->device_lock);
793 	atomic_inc(&dev->dev_obj.obj_access_count);
794 	if (atomic_read(&dev->dev_obj.obj_access_count) == 1) {
795 		if (dev->dev_status & TRANSPORT_DEVICE_DEACTIVATED) {
796 			dev->dev_status &= ~TRANSPORT_DEVICE_DEACTIVATED;
797 			dev->dev_status |= TRANSPORT_DEVICE_ACTIVATED;
798 		} else if (dev->dev_status &
799 			   TRANSPORT_DEVICE_OFFLINE_DEACTIVATED) {
800 			dev->dev_status &=
801 				~TRANSPORT_DEVICE_OFFLINE_DEACTIVATED;
802 			dev->dev_status |= TRANSPORT_DEVICE_OFFLINE_ACTIVATED;
803 		}
804 	}
805 	spin_unlock(&hba->device_lock);
806 }
807 
808 static void se_dev_stop(struct se_device *dev)
809 {
810 	struct se_hba *hba = dev->se_hba;
811 
812 	spin_lock(&hba->device_lock);
813 	atomic_dec(&dev->dev_obj.obj_access_count);
814 	if (atomic_read(&dev->dev_obj.obj_access_count) == 0) {
815 		if (dev->dev_status & TRANSPORT_DEVICE_ACTIVATED) {
816 			dev->dev_status &= ~TRANSPORT_DEVICE_ACTIVATED;
817 			dev->dev_status |= TRANSPORT_DEVICE_DEACTIVATED;
818 		} else if (dev->dev_status &
819 			   TRANSPORT_DEVICE_OFFLINE_ACTIVATED) {
820 			dev->dev_status &= ~TRANSPORT_DEVICE_OFFLINE_ACTIVATED;
821 			dev->dev_status |= TRANSPORT_DEVICE_OFFLINE_DEACTIVATED;
822 		}
823 	}
824 	spin_unlock(&hba->device_lock);
825 }
826 
827 int se_dev_check_online(struct se_device *dev)
828 {
829 	unsigned long flags;
830 	int ret;
831 
832 	spin_lock_irqsave(&dev->dev_status_lock, flags);
833 	ret = ((dev->dev_status & TRANSPORT_DEVICE_ACTIVATED) ||
834 	       (dev->dev_status & TRANSPORT_DEVICE_DEACTIVATED)) ? 0 : 1;
835 	spin_unlock_irqrestore(&dev->dev_status_lock, flags);
836 
837 	return ret;
838 }
839 
840 int se_dev_check_shutdown(struct se_device *dev)
841 {
842 	int ret;
843 
844 	spin_lock_irq(&dev->dev_status_lock);
845 	ret = (dev->dev_status & TRANSPORT_DEVICE_SHUTDOWN);
846 	spin_unlock_irq(&dev->dev_status_lock);
847 
848 	return ret;
849 }
850 
851 static u32 se_dev_align_max_sectors(u32 max_sectors, u32 block_size)
852 {
853 	u32 tmp, aligned_max_sectors;
854 	/*
855 	 * Limit max_sectors to a PAGE_SIZE aligned value for modern
856 	 * transport_allocate_data_tasks() operation.
857 	 */
858 	tmp = rounddown((max_sectors * block_size), PAGE_SIZE);
859 	aligned_max_sectors = (tmp / block_size);
860 	if (max_sectors != aligned_max_sectors) {
861 		printk(KERN_INFO "Rounding down aligned max_sectors from %u"
862 				" to %u\n", max_sectors, aligned_max_sectors);
863 		return aligned_max_sectors;
864 	}
865 
866 	return max_sectors;
867 }
868 
869 void se_dev_set_default_attribs(
870 	struct se_device *dev,
871 	struct se_dev_limits *dev_limits)
872 {
873 	struct queue_limits *limits = &dev_limits->limits;
874 
875 	dev->se_sub_dev->se_dev_attrib.emulate_dpo = DA_EMULATE_DPO;
876 	dev->se_sub_dev->se_dev_attrib.emulate_fua_write = DA_EMULATE_FUA_WRITE;
877 	dev->se_sub_dev->se_dev_attrib.emulate_fua_read = DA_EMULATE_FUA_READ;
878 	dev->se_sub_dev->se_dev_attrib.emulate_write_cache = DA_EMULATE_WRITE_CACHE;
879 	dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl = DA_EMULATE_UA_INTLLCK_CTRL;
880 	dev->se_sub_dev->se_dev_attrib.emulate_tas = DA_EMULATE_TAS;
881 	dev->se_sub_dev->se_dev_attrib.emulate_tpu = DA_EMULATE_TPU;
882 	dev->se_sub_dev->se_dev_attrib.emulate_tpws = DA_EMULATE_TPWS;
883 	dev->se_sub_dev->se_dev_attrib.emulate_reservations = DA_EMULATE_RESERVATIONS;
884 	dev->se_sub_dev->se_dev_attrib.emulate_alua = DA_EMULATE_ALUA;
885 	dev->se_sub_dev->se_dev_attrib.enforce_pr_isids = DA_ENFORCE_PR_ISIDS;
886 	dev->se_sub_dev->se_dev_attrib.is_nonrot = DA_IS_NONROT;
887 	dev->se_sub_dev->se_dev_attrib.emulate_rest_reord = DA_EMULATE_REST_REORD;
888 	/*
889 	 * The TPU=1 and TPWS=1 settings will be set in TCM/IBLOCK
890 	 * iblock_create_virtdevice() from struct queue_limits values
891 	 * if blk_queue_discard()==1
892 	 */
893 	dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count = DA_MAX_UNMAP_LBA_COUNT;
894 	dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count =
895 		DA_MAX_UNMAP_BLOCK_DESC_COUNT;
896 	dev->se_sub_dev->se_dev_attrib.unmap_granularity = DA_UNMAP_GRANULARITY_DEFAULT;
897 	dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment =
898 				DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT;
899 	/*
900 	 * block_size is based on subsystem plugin dependent requirements.
901 	 */
902 	dev->se_sub_dev->se_dev_attrib.hw_block_size = limits->logical_block_size;
903 	dev->se_sub_dev->se_dev_attrib.block_size = limits->logical_block_size;
904 	/*
905 	 * Align max_hw_sectors down to PAGE_SIZE I/O transfers
906 	 */
907 	limits->max_hw_sectors = se_dev_align_max_sectors(limits->max_hw_sectors,
908 						limits->logical_block_size);
909 	dev->se_sub_dev->se_dev_attrib.hw_max_sectors = limits->max_hw_sectors;
910 
911 	/*
912 	 * Set fabric_max_sectors, which is reported in block limits
913 	 * VPD page (B0h).
914 	 */
915 	dev->se_sub_dev->se_dev_attrib.fabric_max_sectors = DA_FABRIC_MAX_SECTORS;
916 	/*
917 	 * Set optimal_sectors from fabric_max_sectors, which can be
918 	 * lowered via configfs.
919 	 */
920 	dev->se_sub_dev->se_dev_attrib.optimal_sectors = DA_FABRIC_MAX_SECTORS;
921 	/*
922 	 * queue_depth is based on subsystem plugin dependent requirements.
923 	 */
924 	dev->se_sub_dev->se_dev_attrib.hw_queue_depth = dev_limits->hw_queue_depth;
925 	dev->se_sub_dev->se_dev_attrib.queue_depth = dev_limits->queue_depth;
926 }
927 
928 int se_dev_set_max_unmap_lba_count(
929 	struct se_device *dev,
930 	u32 max_unmap_lba_count)
931 {
932 	dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count = max_unmap_lba_count;
933 	pr_debug("dev[%p]: Set max_unmap_lba_count: %u\n",
934 			dev, dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count);
935 	return 0;
936 }
937 
938 int se_dev_set_max_unmap_block_desc_count(
939 	struct se_device *dev,
940 	u32 max_unmap_block_desc_count)
941 {
942 	dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count =
943 		max_unmap_block_desc_count;
944 	pr_debug("dev[%p]: Set max_unmap_block_desc_count: %u\n",
945 			dev, dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count);
946 	return 0;
947 }
948 
949 int se_dev_set_unmap_granularity(
950 	struct se_device *dev,
951 	u32 unmap_granularity)
952 {
953 	dev->se_sub_dev->se_dev_attrib.unmap_granularity = unmap_granularity;
954 	pr_debug("dev[%p]: Set unmap_granularity: %u\n",
955 			dev, dev->se_sub_dev->se_dev_attrib.unmap_granularity);
956 	return 0;
957 }
958 
959 int se_dev_set_unmap_granularity_alignment(
960 	struct se_device *dev,
961 	u32 unmap_granularity_alignment)
962 {
963 	dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment = unmap_granularity_alignment;
964 	pr_debug("dev[%p]: Set unmap_granularity_alignment: %u\n",
965 			dev, dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment);
966 	return 0;
967 }
968 
969 int se_dev_set_emulate_dpo(struct se_device *dev, int flag)
970 {
971 	if (flag != 0 && flag != 1) {
972 		pr_err("Illegal value %d\n", flag);
973 		return -EINVAL;
974 	}
975 
976 	if (flag) {
977 		pr_err("dpo_emulated not supported\n");
978 		return -EINVAL;
979 	}
980 
981 	return 0;
982 }
983 
984 int se_dev_set_emulate_fua_write(struct se_device *dev, int flag)
985 {
986 	if (flag != 0 && flag != 1) {
987 		pr_err("Illegal value %d\n", flag);
988 		return -EINVAL;
989 	}
990 
991 	if (flag &&
992 	    dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
993 		pr_err("emulate_fua_write not supported for pSCSI\n");
994 		return -EINVAL;
995 	}
996 	dev->se_sub_dev->se_dev_attrib.emulate_fua_write = flag;
997 	pr_debug("dev[%p]: SE Device Forced Unit Access WRITEs: %d\n",
998 			dev, dev->se_sub_dev->se_dev_attrib.emulate_fua_write);
999 	return 0;
1000 }
1001 
1002 int se_dev_set_emulate_fua_read(struct se_device *dev, int flag)
1003 {
1004 	if (flag != 0 && flag != 1) {
1005 		pr_err("Illegal value %d\n", flag);
1006 		return -EINVAL;
1007 	}
1008 
1009 	if (flag) {
1010 		pr_err("ua read emulated not supported\n");
1011 		return -EINVAL;
1012 	}
1013 
1014 	return 0;
1015 }
1016 
1017 int se_dev_set_emulate_write_cache(struct se_device *dev, int flag)
1018 {
1019 	if (flag != 0 && flag != 1) {
1020 		pr_err("Illegal value %d\n", flag);
1021 		return -EINVAL;
1022 	}
1023 	if (flag &&
1024 	    dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
1025 		pr_err("emulate_write_cache not supported for pSCSI\n");
1026 		return -EINVAL;
1027 	}
1028 	dev->se_sub_dev->se_dev_attrib.emulate_write_cache = flag;
1029 	pr_debug("dev[%p]: SE Device WRITE_CACHE_EMULATION flag: %d\n",
1030 			dev, dev->se_sub_dev->se_dev_attrib.emulate_write_cache);
1031 	return 0;
1032 }
1033 
1034 int se_dev_set_emulate_ua_intlck_ctrl(struct se_device *dev, int flag)
1035 {
1036 	if ((flag != 0) && (flag != 1) && (flag != 2)) {
1037 		pr_err("Illegal value %d\n", flag);
1038 		return -EINVAL;
1039 	}
1040 
1041 	if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
1042 		pr_err("dev[%p]: Unable to change SE Device"
1043 			" UA_INTRLCK_CTRL while dev_export_obj: %d count"
1044 			" exists\n", dev,
1045 			atomic_read(&dev->dev_export_obj.obj_access_count));
1046 		return -EINVAL;
1047 	}
1048 	dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl = flag;
1049 	pr_debug("dev[%p]: SE Device UA_INTRLCK_CTRL flag: %d\n",
1050 		dev, dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl);
1051 
1052 	return 0;
1053 }
1054 
1055 int se_dev_set_emulate_tas(struct se_device *dev, int flag)
1056 {
1057 	if ((flag != 0) && (flag != 1)) {
1058 		pr_err("Illegal value %d\n", flag);
1059 		return -EINVAL;
1060 	}
1061 
1062 	if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
1063 		pr_err("dev[%p]: Unable to change SE Device TAS while"
1064 			" dev_export_obj: %d count exists\n", dev,
1065 			atomic_read(&dev->dev_export_obj.obj_access_count));
1066 		return -EINVAL;
1067 	}
1068 	dev->se_sub_dev->se_dev_attrib.emulate_tas = flag;
1069 	pr_debug("dev[%p]: SE Device TASK_ABORTED status bit: %s\n",
1070 		dev, (dev->se_sub_dev->se_dev_attrib.emulate_tas) ? "Enabled" : "Disabled");
1071 
1072 	return 0;
1073 }
1074 
1075 int se_dev_set_emulate_tpu(struct se_device *dev, int flag)
1076 {
1077 	if ((flag != 0) && (flag != 1)) {
1078 		pr_err("Illegal value %d\n", flag);
1079 		return -EINVAL;
1080 	}
1081 	/*
1082 	 * We expect this value to be non-zero when generic Block Layer
1083 	 * Discard supported is detected iblock_create_virtdevice().
1084 	 */
1085 	if (flag && !dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count) {
1086 		pr_err("Generic Block Discard not supported\n");
1087 		return -ENOSYS;
1088 	}
1089 
1090 	dev->se_sub_dev->se_dev_attrib.emulate_tpu = flag;
1091 	pr_debug("dev[%p]: SE Device Thin Provisioning UNMAP bit: %d\n",
1092 				dev, flag);
1093 	return 0;
1094 }
1095 
1096 int se_dev_set_emulate_tpws(struct se_device *dev, int flag)
1097 {
1098 	if ((flag != 0) && (flag != 1)) {
1099 		pr_err("Illegal value %d\n", flag);
1100 		return -EINVAL;
1101 	}
1102 	/*
1103 	 * We expect this value to be non-zero when generic Block Layer
1104 	 * Discard supported is detected iblock_create_virtdevice().
1105 	 */
1106 	if (flag && !dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count) {
1107 		pr_err("Generic Block Discard not supported\n");
1108 		return -ENOSYS;
1109 	}
1110 
1111 	dev->se_sub_dev->se_dev_attrib.emulate_tpws = flag;
1112 	pr_debug("dev[%p]: SE Device Thin Provisioning WRITE_SAME: %d\n",
1113 				dev, flag);
1114 	return 0;
1115 }
1116 
1117 int se_dev_set_enforce_pr_isids(struct se_device *dev, int flag)
1118 {
1119 	if ((flag != 0) && (flag != 1)) {
1120 		pr_err("Illegal value %d\n", flag);
1121 		return -EINVAL;
1122 	}
1123 	dev->se_sub_dev->se_dev_attrib.enforce_pr_isids = flag;
1124 	pr_debug("dev[%p]: SE Device enforce_pr_isids bit: %s\n", dev,
1125 		(dev->se_sub_dev->se_dev_attrib.enforce_pr_isids) ? "Enabled" : "Disabled");
1126 	return 0;
1127 }
1128 
1129 int se_dev_set_is_nonrot(struct se_device *dev, int flag)
1130 {
1131 	if ((flag != 0) && (flag != 1)) {
1132 		printk(KERN_ERR "Illegal value %d\n", flag);
1133 		return -EINVAL;
1134 	}
1135 	dev->se_sub_dev->se_dev_attrib.is_nonrot = flag;
1136 	pr_debug("dev[%p]: SE Device is_nonrot bit: %d\n",
1137 	       dev, flag);
1138 	return 0;
1139 }
1140 
1141 int se_dev_set_emulate_rest_reord(struct se_device *dev, int flag)
1142 {
1143 	if (flag != 0) {
1144 		printk(KERN_ERR "dev[%p]: SE Device emulatation of restricted"
1145 			" reordering not implemented\n", dev);
1146 		return -ENOSYS;
1147 	}
1148 	dev->se_sub_dev->se_dev_attrib.emulate_rest_reord = flag;
1149 	pr_debug("dev[%p]: SE Device emulate_rest_reord: %d\n", dev, flag);
1150 	return 0;
1151 }
1152 
1153 /*
1154  * Note, this can only be called on unexported SE Device Object.
1155  */
1156 int se_dev_set_queue_depth(struct se_device *dev, u32 queue_depth)
1157 {
1158 	if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
1159 		pr_err("dev[%p]: Unable to change SE Device TCQ while"
1160 			" dev_export_obj: %d count exists\n", dev,
1161 			atomic_read(&dev->dev_export_obj.obj_access_count));
1162 		return -EINVAL;
1163 	}
1164 	if (!queue_depth) {
1165 		pr_err("dev[%p]: Illegal ZERO value for queue"
1166 			"_depth\n", dev);
1167 		return -EINVAL;
1168 	}
1169 
1170 	if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
1171 		if (queue_depth > dev->se_sub_dev->se_dev_attrib.hw_queue_depth) {
1172 			pr_err("dev[%p]: Passed queue_depth: %u"
1173 				" exceeds TCM/SE_Device TCQ: %u\n",
1174 				dev, queue_depth,
1175 				dev->se_sub_dev->se_dev_attrib.hw_queue_depth);
1176 			return -EINVAL;
1177 		}
1178 	} else {
1179 		if (queue_depth > dev->se_sub_dev->se_dev_attrib.queue_depth) {
1180 			if (queue_depth > dev->se_sub_dev->se_dev_attrib.hw_queue_depth) {
1181 				pr_err("dev[%p]: Passed queue_depth:"
1182 					" %u exceeds TCM/SE_Device MAX"
1183 					" TCQ: %u\n", dev, queue_depth,
1184 					dev->se_sub_dev->se_dev_attrib.hw_queue_depth);
1185 				return -EINVAL;
1186 			}
1187 		}
1188 	}
1189 
1190 	dev->se_sub_dev->se_dev_attrib.queue_depth = dev->queue_depth = queue_depth;
1191 	pr_debug("dev[%p]: SE Device TCQ Depth changed to: %u\n",
1192 			dev, queue_depth);
1193 	return 0;
1194 }
1195 
1196 int se_dev_set_fabric_max_sectors(struct se_device *dev, u32 fabric_max_sectors)
1197 {
1198 	if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
1199 		pr_err("dev[%p]: Unable to change SE Device"
1200 			" fabric_max_sectors while dev_export_obj: %d count exists\n",
1201 			dev, atomic_read(&dev->dev_export_obj.obj_access_count));
1202 		return -EINVAL;
1203 	}
1204 	if (!fabric_max_sectors) {
1205 		pr_err("dev[%p]: Illegal ZERO value for"
1206 			" fabric_max_sectors\n", dev);
1207 		return -EINVAL;
1208 	}
1209 	if (fabric_max_sectors < DA_STATUS_MAX_SECTORS_MIN) {
1210 		pr_err("dev[%p]: Passed fabric_max_sectors: %u less than"
1211 			" DA_STATUS_MAX_SECTORS_MIN: %u\n", dev, fabric_max_sectors,
1212 				DA_STATUS_MAX_SECTORS_MIN);
1213 		return -EINVAL;
1214 	}
1215 	if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
1216 		if (fabric_max_sectors > dev->se_sub_dev->se_dev_attrib.hw_max_sectors) {
1217 			pr_err("dev[%p]: Passed fabric_max_sectors: %u"
1218 				" greater than TCM/SE_Device max_sectors:"
1219 				" %u\n", dev, fabric_max_sectors,
1220 				dev->se_sub_dev->se_dev_attrib.hw_max_sectors);
1221 			 return -EINVAL;
1222 		}
1223 	} else {
1224 		if (fabric_max_sectors > DA_STATUS_MAX_SECTORS_MAX) {
1225 			pr_err("dev[%p]: Passed fabric_max_sectors: %u"
1226 				" greater than DA_STATUS_MAX_SECTORS_MAX:"
1227 				" %u\n", dev, fabric_max_sectors,
1228 				DA_STATUS_MAX_SECTORS_MAX);
1229 			return -EINVAL;
1230 		}
1231 	}
1232 	/*
1233 	 * Align max_sectors down to PAGE_SIZE to follow transport_allocate_data_tasks()
1234 	 */
1235 	fabric_max_sectors = se_dev_align_max_sectors(fabric_max_sectors,
1236 						      dev->se_sub_dev->se_dev_attrib.block_size);
1237 
1238 	dev->se_sub_dev->se_dev_attrib.fabric_max_sectors = fabric_max_sectors;
1239 	pr_debug("dev[%p]: SE Device max_sectors changed to %u\n",
1240 			dev, fabric_max_sectors);
1241 	return 0;
1242 }
1243 
1244 int se_dev_set_optimal_sectors(struct se_device *dev, u32 optimal_sectors)
1245 {
1246 	if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
1247 		pr_err("dev[%p]: Unable to change SE Device"
1248 			" optimal_sectors while dev_export_obj: %d count exists\n",
1249 			dev, atomic_read(&dev->dev_export_obj.obj_access_count));
1250 		return -EINVAL;
1251 	}
1252 	if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
1253 		pr_err("dev[%p]: Passed optimal_sectors cannot be"
1254 				" changed for TCM/pSCSI\n", dev);
1255 		return -EINVAL;
1256 	}
1257 	if (optimal_sectors > dev->se_sub_dev->se_dev_attrib.fabric_max_sectors) {
1258 		pr_err("dev[%p]: Passed optimal_sectors %u cannot be"
1259 			" greater than fabric_max_sectors: %u\n", dev,
1260 			optimal_sectors, dev->se_sub_dev->se_dev_attrib.fabric_max_sectors);
1261 		return -EINVAL;
1262 	}
1263 
1264 	dev->se_sub_dev->se_dev_attrib.optimal_sectors = optimal_sectors;
1265 	pr_debug("dev[%p]: SE Device optimal_sectors changed to %u\n",
1266 			dev, optimal_sectors);
1267 	return 0;
1268 }
1269 
1270 int se_dev_set_block_size(struct se_device *dev, u32 block_size)
1271 {
1272 	if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
1273 		pr_err("dev[%p]: Unable to change SE Device block_size"
1274 			" while dev_export_obj: %d count exists\n", dev,
1275 			atomic_read(&dev->dev_export_obj.obj_access_count));
1276 		return -EINVAL;
1277 	}
1278 
1279 	if ((block_size != 512) &&
1280 	    (block_size != 1024) &&
1281 	    (block_size != 2048) &&
1282 	    (block_size != 4096)) {
1283 		pr_err("dev[%p]: Illegal value for block_device: %u"
1284 			" for SE device, must be 512, 1024, 2048 or 4096\n",
1285 			dev, block_size);
1286 		return -EINVAL;
1287 	}
1288 
1289 	if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
1290 		pr_err("dev[%p]: Not allowed to change block_size for"
1291 			" Physical Device, use for Linux/SCSI to change"
1292 			" block_size for underlying hardware\n", dev);
1293 		return -EINVAL;
1294 	}
1295 
1296 	dev->se_sub_dev->se_dev_attrib.block_size = block_size;
1297 	pr_debug("dev[%p]: SE Device block_size changed to %u\n",
1298 			dev, block_size);
1299 	return 0;
1300 }
1301 
1302 struct se_lun *core_dev_add_lun(
1303 	struct se_portal_group *tpg,
1304 	struct se_device *dev,
1305 	u32 lun)
1306 {
1307 	struct se_lun *lun_p;
1308 	int rc;
1309 
1310 	if (atomic_read(&dev->dev_access_obj.obj_access_count) != 0) {
1311 		pr_err("Unable to export struct se_device while dev_access_obj: %d\n",
1312 			atomic_read(&dev->dev_access_obj.obj_access_count));
1313 		return ERR_PTR(-EACCES);
1314 	}
1315 
1316 	lun_p = core_tpg_pre_addlun(tpg, lun);
1317 	if (IS_ERR(lun_p))
1318 		return lun_p;
1319 
1320 	rc = core_tpg_post_addlun(tpg, lun_p,
1321 				TRANSPORT_LUNFLAGS_READ_WRITE, dev);
1322 	if (rc < 0)
1323 		return ERR_PTR(rc);
1324 
1325 	pr_debug("%s_TPG[%u]_LUN[%u] - Activated %s Logical Unit from"
1326 		" CORE HBA: %u\n", tpg->se_tpg_tfo->get_fabric_name(),
1327 		tpg->se_tpg_tfo->tpg_get_tag(tpg), lun_p->unpacked_lun,
1328 		tpg->se_tpg_tfo->get_fabric_name(), dev->se_hba->hba_id);
1329 	/*
1330 	 * Update LUN maps for dynamically added initiators when
1331 	 * generate_node_acl is enabled.
1332 	 */
1333 	if (tpg->se_tpg_tfo->tpg_check_demo_mode(tpg)) {
1334 		struct se_node_acl *acl;
1335 		spin_lock_irq(&tpg->acl_node_lock);
1336 		list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
1337 			if (acl->dynamic_node_acl &&
1338 			    (!tpg->se_tpg_tfo->tpg_check_demo_mode_login_only ||
1339 			     !tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg))) {
1340 				spin_unlock_irq(&tpg->acl_node_lock);
1341 				core_tpg_add_node_to_devs(acl, tpg);
1342 				spin_lock_irq(&tpg->acl_node_lock);
1343 			}
1344 		}
1345 		spin_unlock_irq(&tpg->acl_node_lock);
1346 	}
1347 
1348 	return lun_p;
1349 }
1350 
1351 /*      core_dev_del_lun():
1352  *
1353  *
1354  */
1355 int core_dev_del_lun(
1356 	struct se_portal_group *tpg,
1357 	u32 unpacked_lun)
1358 {
1359 	struct se_lun *lun;
1360 
1361 	lun = core_tpg_pre_dellun(tpg, unpacked_lun);
1362 	if (IS_ERR(lun))
1363 		return PTR_ERR(lun);
1364 
1365 	core_tpg_post_dellun(tpg, lun);
1366 
1367 	pr_debug("%s_TPG[%u]_LUN[%u] - Deactivated %s Logical Unit from"
1368 		" device object\n", tpg->se_tpg_tfo->get_fabric_name(),
1369 		tpg->se_tpg_tfo->tpg_get_tag(tpg), unpacked_lun,
1370 		tpg->se_tpg_tfo->get_fabric_name());
1371 
1372 	return 0;
1373 }
1374 
1375 struct se_lun *core_get_lun_from_tpg(struct se_portal_group *tpg, u32 unpacked_lun)
1376 {
1377 	struct se_lun *lun;
1378 
1379 	spin_lock(&tpg->tpg_lun_lock);
1380 	if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
1381 		pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS"
1382 			"_PER_TPG-1: %u for Target Portal Group: %hu\n",
1383 			tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
1384 			TRANSPORT_MAX_LUNS_PER_TPG-1,
1385 			tpg->se_tpg_tfo->tpg_get_tag(tpg));
1386 		spin_unlock(&tpg->tpg_lun_lock);
1387 		return NULL;
1388 	}
1389 	lun = tpg->tpg_lun_list[unpacked_lun];
1390 
1391 	if (lun->lun_status != TRANSPORT_LUN_STATUS_FREE) {
1392 		pr_err("%s Logical Unit Number: %u is not free on"
1393 			" Target Portal Group: %hu, ignoring request.\n",
1394 			tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
1395 			tpg->se_tpg_tfo->tpg_get_tag(tpg));
1396 		spin_unlock(&tpg->tpg_lun_lock);
1397 		return NULL;
1398 	}
1399 	spin_unlock(&tpg->tpg_lun_lock);
1400 
1401 	return lun;
1402 }
1403 
1404 /*      core_dev_get_lun():
1405  *
1406  *
1407  */
1408 static struct se_lun *core_dev_get_lun(struct se_portal_group *tpg, u32 unpacked_lun)
1409 {
1410 	struct se_lun *lun;
1411 
1412 	spin_lock(&tpg->tpg_lun_lock);
1413 	if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
1414 		pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER"
1415 			"_TPG-1: %u for Target Portal Group: %hu\n",
1416 			tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
1417 			TRANSPORT_MAX_LUNS_PER_TPG-1,
1418 			tpg->se_tpg_tfo->tpg_get_tag(tpg));
1419 		spin_unlock(&tpg->tpg_lun_lock);
1420 		return NULL;
1421 	}
1422 	lun = tpg->tpg_lun_list[unpacked_lun];
1423 
1424 	if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) {
1425 		pr_err("%s Logical Unit Number: %u is not active on"
1426 			" Target Portal Group: %hu, ignoring request.\n",
1427 			tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
1428 			tpg->se_tpg_tfo->tpg_get_tag(tpg));
1429 		spin_unlock(&tpg->tpg_lun_lock);
1430 		return NULL;
1431 	}
1432 	spin_unlock(&tpg->tpg_lun_lock);
1433 
1434 	return lun;
1435 }
1436 
1437 struct se_lun_acl *core_dev_init_initiator_node_lun_acl(
1438 	struct se_portal_group *tpg,
1439 	u32 mapped_lun,
1440 	char *initiatorname,
1441 	int *ret)
1442 {
1443 	struct se_lun_acl *lacl;
1444 	struct se_node_acl *nacl;
1445 
1446 	if (strlen(initiatorname) >= TRANSPORT_IQN_LEN) {
1447 		pr_err("%s InitiatorName exceeds maximum size.\n",
1448 			tpg->se_tpg_tfo->get_fabric_name());
1449 		*ret = -EOVERFLOW;
1450 		return NULL;
1451 	}
1452 	nacl = core_tpg_get_initiator_node_acl(tpg, initiatorname);
1453 	if (!nacl) {
1454 		*ret = -EINVAL;
1455 		return NULL;
1456 	}
1457 	lacl = kzalloc(sizeof(struct se_lun_acl), GFP_KERNEL);
1458 	if (!lacl) {
1459 		pr_err("Unable to allocate memory for struct se_lun_acl.\n");
1460 		*ret = -ENOMEM;
1461 		return NULL;
1462 	}
1463 
1464 	INIT_LIST_HEAD(&lacl->lacl_list);
1465 	lacl->mapped_lun = mapped_lun;
1466 	lacl->se_lun_nacl = nacl;
1467 	snprintf(lacl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
1468 
1469 	return lacl;
1470 }
1471 
1472 int core_dev_add_initiator_node_lun_acl(
1473 	struct se_portal_group *tpg,
1474 	struct se_lun_acl *lacl,
1475 	u32 unpacked_lun,
1476 	u32 lun_access)
1477 {
1478 	struct se_lun *lun;
1479 	struct se_node_acl *nacl;
1480 
1481 	lun = core_dev_get_lun(tpg, unpacked_lun);
1482 	if (!lun) {
1483 		pr_err("%s Logical Unit Number: %u is not active on"
1484 			" Target Portal Group: %hu, ignoring request.\n",
1485 			tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
1486 			tpg->se_tpg_tfo->tpg_get_tag(tpg));
1487 		return -EINVAL;
1488 	}
1489 
1490 	nacl = lacl->se_lun_nacl;
1491 	if (!nacl)
1492 		return -EINVAL;
1493 
1494 	if ((lun->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) &&
1495 	    (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE))
1496 		lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
1497 
1498 	lacl->se_lun = lun;
1499 
1500 	if (core_enable_device_list_for_node(lun, lacl, lacl->mapped_lun,
1501 			lun_access, nacl, tpg) < 0)
1502 		return -EINVAL;
1503 
1504 	spin_lock(&lun->lun_acl_lock);
1505 	list_add_tail(&lacl->lacl_list, &lun->lun_acl_list);
1506 	atomic_inc(&lun->lun_acl_count);
1507 	smp_mb__after_atomic_inc();
1508 	spin_unlock(&lun->lun_acl_lock);
1509 
1510 	pr_debug("%s_TPG[%hu]_LUN[%u->%u] - Added %s ACL for "
1511 		" InitiatorNode: %s\n", tpg->se_tpg_tfo->get_fabric_name(),
1512 		tpg->se_tpg_tfo->tpg_get_tag(tpg), unpacked_lun, lacl->mapped_lun,
1513 		(lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) ? "RW" : "RO",
1514 		lacl->initiatorname);
1515 	/*
1516 	 * Check to see if there are any existing persistent reservation APTPL
1517 	 * pre-registrations that need to be enabled for this LUN ACL..
1518 	 */
1519 	core_scsi3_check_aptpl_registration(lun->lun_se_dev, tpg, lun, lacl);
1520 	return 0;
1521 }
1522 
1523 /*      core_dev_del_initiator_node_lun_acl():
1524  *
1525  *
1526  */
1527 int core_dev_del_initiator_node_lun_acl(
1528 	struct se_portal_group *tpg,
1529 	struct se_lun *lun,
1530 	struct se_lun_acl *lacl)
1531 {
1532 	struct se_node_acl *nacl;
1533 
1534 	nacl = lacl->se_lun_nacl;
1535 	if (!nacl)
1536 		return -EINVAL;
1537 
1538 	spin_lock(&lun->lun_acl_lock);
1539 	list_del(&lacl->lacl_list);
1540 	atomic_dec(&lun->lun_acl_count);
1541 	smp_mb__after_atomic_dec();
1542 	spin_unlock(&lun->lun_acl_lock);
1543 
1544 	core_disable_device_list_for_node(lun, NULL, lacl->mapped_lun,
1545 		TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg);
1546 
1547 	lacl->se_lun = NULL;
1548 
1549 	pr_debug("%s_TPG[%hu]_LUN[%u] - Removed ACL for"
1550 		" InitiatorNode: %s Mapped LUN: %u\n",
1551 		tpg->se_tpg_tfo->get_fabric_name(),
1552 		tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
1553 		lacl->initiatorname, lacl->mapped_lun);
1554 
1555 	return 0;
1556 }
1557 
1558 void core_dev_free_initiator_node_lun_acl(
1559 	struct se_portal_group *tpg,
1560 	struct se_lun_acl *lacl)
1561 {
1562 	pr_debug("%s_TPG[%hu] - Freeing ACL for %s InitiatorNode: %s"
1563 		" Mapped LUN: %u\n", tpg->se_tpg_tfo->get_fabric_name(),
1564 		tpg->se_tpg_tfo->tpg_get_tag(tpg),
1565 		tpg->se_tpg_tfo->get_fabric_name(),
1566 		lacl->initiatorname, lacl->mapped_lun);
1567 
1568 	kfree(lacl);
1569 }
1570 
1571 int core_dev_setup_virtual_lun0(void)
1572 {
1573 	struct se_hba *hba;
1574 	struct se_device *dev;
1575 	struct se_subsystem_dev *se_dev = NULL;
1576 	struct se_subsystem_api *t;
1577 	char buf[16];
1578 	int ret;
1579 
1580 	hba = core_alloc_hba("rd_mcp", 0, HBA_FLAGS_INTERNAL_USE);
1581 	if (IS_ERR(hba))
1582 		return PTR_ERR(hba);
1583 
1584 	lun0_hba = hba;
1585 	t = hba->transport;
1586 
1587 	se_dev = kzalloc(sizeof(struct se_subsystem_dev), GFP_KERNEL);
1588 	if (!se_dev) {
1589 		pr_err("Unable to allocate memory for"
1590 				" struct se_subsystem_dev\n");
1591 		ret = -ENOMEM;
1592 		goto out;
1593 	}
1594 	INIT_LIST_HEAD(&se_dev->t10_wwn.t10_vpd_list);
1595 	spin_lock_init(&se_dev->t10_wwn.t10_vpd_lock);
1596 	INIT_LIST_HEAD(&se_dev->t10_pr.registration_list);
1597 	INIT_LIST_HEAD(&se_dev->t10_pr.aptpl_reg_list);
1598 	spin_lock_init(&se_dev->t10_pr.registration_lock);
1599 	spin_lock_init(&se_dev->t10_pr.aptpl_reg_lock);
1600 	INIT_LIST_HEAD(&se_dev->t10_alua.tg_pt_gps_list);
1601 	spin_lock_init(&se_dev->t10_alua.tg_pt_gps_lock);
1602 	spin_lock_init(&se_dev->se_dev_lock);
1603 	se_dev->t10_pr.pr_aptpl_buf_len = PR_APTPL_BUF_LEN;
1604 	se_dev->t10_wwn.t10_sub_dev = se_dev;
1605 	se_dev->t10_alua.t10_sub_dev = se_dev;
1606 	se_dev->se_dev_attrib.da_sub_dev = se_dev;
1607 	se_dev->se_dev_hba = hba;
1608 
1609 	se_dev->se_dev_su_ptr = t->allocate_virtdevice(hba, "virt_lun0");
1610 	if (!se_dev->se_dev_su_ptr) {
1611 		pr_err("Unable to locate subsystem dependent pointer"
1612 			" from allocate_virtdevice()\n");
1613 		ret = -ENOMEM;
1614 		goto out;
1615 	}
1616 	lun0_su_dev = se_dev;
1617 
1618 	memset(buf, 0, 16);
1619 	sprintf(buf, "rd_pages=8");
1620 	t->set_configfs_dev_params(hba, se_dev, buf, sizeof(buf));
1621 
1622 	dev = t->create_virtdevice(hba, se_dev, se_dev->se_dev_su_ptr);
1623 	if (IS_ERR(dev)) {
1624 		ret = PTR_ERR(dev);
1625 		goto out;
1626 	}
1627 	se_dev->se_dev_ptr = dev;
1628 	g_lun0_dev = dev;
1629 
1630 	return 0;
1631 out:
1632 	lun0_su_dev = NULL;
1633 	kfree(se_dev);
1634 	if (lun0_hba) {
1635 		core_delete_hba(lun0_hba);
1636 		lun0_hba = NULL;
1637 	}
1638 	return ret;
1639 }
1640 
1641 
1642 void core_dev_release_virtual_lun0(void)
1643 {
1644 	struct se_hba *hba = lun0_hba;
1645 	struct se_subsystem_dev *su_dev = lun0_su_dev;
1646 
1647 	if (!hba)
1648 		return;
1649 
1650 	if (g_lun0_dev)
1651 		se_free_virtual_device(g_lun0_dev, hba);
1652 
1653 	kfree(su_dev);
1654 	core_delete_hba(hba);
1655 }
1656