1 /*******************************************************************************
2  * Filename:  target_core_device.c (based on iscsi_target_device.c)
3  *
4  * This file contains the TCM Virtual Device and Disk Transport
5  * agnostic related functions.
6  *
7  * Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc.
8  * Copyright (c) 2005-2006 SBE, Inc.  All Rights Reserved.
9  * Copyright (c) 2007-2010 Rising Tide Systems
10  * Copyright (c) 2008-2010 Linux-iSCSI.org
11  *
12  * Nicholas A. Bellinger <nab@kernel.org>
13  *
14  * This program is free software; you can redistribute it and/or modify
15  * it under the terms of the GNU General Public License as published by
16  * the Free Software Foundation; either version 2 of the License, or
17  * (at your option) any later version.
18  *
19  * This program is distributed in the hope that it will be useful,
20  * but WITHOUT ANY WARRANTY; without even the implied warranty of
21  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
22  * GNU General Public License for more details.
23  *
24  * You should have received a copy of the GNU General Public License
25  * along with this program; if not, write to the Free Software
26  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
27  *
28  ******************************************************************************/
29 
30 #include <linux/net.h>
31 #include <linux/string.h>
32 #include <linux/delay.h>
33 #include <linux/timer.h>
34 #include <linux/slab.h>
35 #include <linux/spinlock.h>
36 #include <linux/kthread.h>
37 #include <linux/in.h>
38 #include <linux/export.h>
39 #include <net/sock.h>
40 #include <net/tcp.h>
41 #include <scsi/scsi.h>
42 #include <scsi/scsi_device.h>
43 
44 #include <target/target_core_base.h>
45 #include <target/target_core_backend.h>
46 #include <target/target_core_fabric.h>
47 
48 #include "target_core_internal.h"
49 #include "target_core_alua.h"
50 #include "target_core_pr.h"
51 #include "target_core_ua.h"
52 
53 static void se_dev_start(struct se_device *dev);
54 static void se_dev_stop(struct se_device *dev);
55 
56 static struct se_hba *lun0_hba;
57 static struct se_subsystem_dev *lun0_su_dev;
58 /* not static, needed by tpg.c */
59 struct se_device *g_lun0_dev;
60 
61 int transport_lookup_cmd_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
62 {
63 	struct se_lun *se_lun = NULL;
64 	struct se_session *se_sess = se_cmd->se_sess;
65 	struct se_device *dev;
66 	unsigned long flags;
67 
68 	if (unpacked_lun >= TRANSPORT_MAX_LUNS_PER_TPG) {
69 		se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN;
70 		se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
71 		return -ENODEV;
72 	}
73 
74 	spin_lock_irqsave(&se_sess->se_node_acl->device_list_lock, flags);
75 	se_cmd->se_deve = se_sess->se_node_acl->device_list[unpacked_lun];
76 	if (se_cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) {
77 		struct se_dev_entry *deve = se_cmd->se_deve;
78 
79 		deve->total_cmds++;
80 		deve->total_bytes += se_cmd->data_length;
81 
82 		if ((se_cmd->data_direction == DMA_TO_DEVICE) &&
83 		    (deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY)) {
84 			se_cmd->scsi_sense_reason = TCM_WRITE_PROTECTED;
85 			se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
86 			pr_err("TARGET_CORE[%s]: Detected WRITE_PROTECTED LUN"
87 				" Access for 0x%08x\n",
88 				se_cmd->se_tfo->get_fabric_name(),
89 				unpacked_lun);
90 			spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags);
91 			return -EACCES;
92 		}
93 
94 		if (se_cmd->data_direction == DMA_TO_DEVICE)
95 			deve->write_bytes += se_cmd->data_length;
96 		else if (se_cmd->data_direction == DMA_FROM_DEVICE)
97 			deve->read_bytes += se_cmd->data_length;
98 
99 		deve->deve_cmds++;
100 
101 		se_lun = deve->se_lun;
102 		se_cmd->se_lun = deve->se_lun;
103 		se_cmd->pr_res_key = deve->pr_res_key;
104 		se_cmd->orig_fe_lun = unpacked_lun;
105 		se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
106 	}
107 	spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags);
108 
109 	if (!se_lun) {
110 		/*
111 		 * Use the se_portal_group->tpg_virt_lun0 to allow for
112 		 * REPORT_LUNS, et al to be returned when no active
113 		 * MappedLUN=0 exists for this Initiator Port.
114 		 */
115 		if (unpacked_lun != 0) {
116 			se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN;
117 			se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
118 			pr_err("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
119 				" Access for 0x%08x\n",
120 				se_cmd->se_tfo->get_fabric_name(),
121 				unpacked_lun);
122 			return -ENODEV;
123 		}
124 		/*
125 		 * Force WRITE PROTECT for virtual LUN 0
126 		 */
127 		if ((se_cmd->data_direction != DMA_FROM_DEVICE) &&
128 		    (se_cmd->data_direction != DMA_NONE)) {
129 			se_cmd->scsi_sense_reason = TCM_WRITE_PROTECTED;
130 			se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
131 			return -EACCES;
132 		}
133 
134 		se_lun = &se_sess->se_tpg->tpg_virt_lun0;
135 		se_cmd->se_lun = &se_sess->se_tpg->tpg_virt_lun0;
136 		se_cmd->orig_fe_lun = 0;
137 		se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
138 	}
139 	/*
140 	 * Determine if the struct se_lun is online.
141 	 * FIXME: Check for LUN_RESET + UNIT Attention
142 	 */
143 	if (se_dev_check_online(se_lun->lun_se_dev) != 0) {
144 		se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN;
145 		se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
146 		return -ENODEV;
147 	}
148 
149 	/* Directly associate cmd with se_dev */
150 	se_cmd->se_dev = se_lun->lun_se_dev;
151 
152 	/* TODO: get rid of this and use atomics for stats */
153 	dev = se_lun->lun_se_dev;
154 	spin_lock_irqsave(&dev->stats_lock, flags);
155 	dev->num_cmds++;
156 	if (se_cmd->data_direction == DMA_TO_DEVICE)
157 		dev->write_bytes += se_cmd->data_length;
158 	else if (se_cmd->data_direction == DMA_FROM_DEVICE)
159 		dev->read_bytes += se_cmd->data_length;
160 	spin_unlock_irqrestore(&dev->stats_lock, flags);
161 
162 	spin_lock_irqsave(&se_lun->lun_cmd_lock, flags);
163 	list_add_tail(&se_cmd->se_lun_node, &se_lun->lun_cmd_list);
164 	spin_unlock_irqrestore(&se_lun->lun_cmd_lock, flags);
165 
166 	return 0;
167 }
168 EXPORT_SYMBOL(transport_lookup_cmd_lun);
169 
170 int transport_lookup_tmr_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
171 {
172 	struct se_dev_entry *deve;
173 	struct se_lun *se_lun = NULL;
174 	struct se_session *se_sess = se_cmd->se_sess;
175 	struct se_tmr_req *se_tmr = se_cmd->se_tmr_req;
176 	unsigned long flags;
177 
178 	if (unpacked_lun >= TRANSPORT_MAX_LUNS_PER_TPG) {
179 		se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN;
180 		se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
181 		return -ENODEV;
182 	}
183 
184 	spin_lock_irqsave(&se_sess->se_node_acl->device_list_lock, flags);
185 	se_cmd->se_deve = se_sess->se_node_acl->device_list[unpacked_lun];
186 	deve = se_cmd->se_deve;
187 
188 	if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) {
189 		se_tmr->tmr_lun = deve->se_lun;
190 		se_cmd->se_lun = deve->se_lun;
191 		se_lun = deve->se_lun;
192 		se_cmd->pr_res_key = deve->pr_res_key;
193 		se_cmd->orig_fe_lun = unpacked_lun;
194 	}
195 	spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags);
196 
197 	if (!se_lun) {
198 		pr_debug("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
199 			" Access for 0x%08x\n",
200 			se_cmd->se_tfo->get_fabric_name(),
201 			unpacked_lun);
202 		se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
203 		return -ENODEV;
204 	}
205 	/*
206 	 * Determine if the struct se_lun is online.
207 	 * FIXME: Check for LUN_RESET + UNIT Attention
208 	 */
209 	if (se_dev_check_online(se_lun->lun_se_dev) != 0) {
210 		se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
211 		return -ENODEV;
212 	}
213 
214 	/* Directly associate cmd with se_dev */
215 	se_cmd->se_dev = se_lun->lun_se_dev;
216 	se_tmr->tmr_dev = se_lun->lun_se_dev;
217 
218 	spin_lock_irqsave(&se_tmr->tmr_dev->se_tmr_lock, flags);
219 	list_add_tail(&se_tmr->tmr_list, &se_tmr->tmr_dev->dev_tmr_list);
220 	spin_unlock_irqrestore(&se_tmr->tmr_dev->se_tmr_lock, flags);
221 
222 	return 0;
223 }
224 EXPORT_SYMBOL(transport_lookup_tmr_lun);
225 
226 /*
227  * This function is called from core_scsi3_emulate_pro_register_and_move()
228  * and core_scsi3_decode_spec_i_port(), and will increment &deve->pr_ref_count
229  * when a matching rtpi is found.
230  */
231 struct se_dev_entry *core_get_se_deve_from_rtpi(
232 	struct se_node_acl *nacl,
233 	u16 rtpi)
234 {
235 	struct se_dev_entry *deve;
236 	struct se_lun *lun;
237 	struct se_port *port;
238 	struct se_portal_group *tpg = nacl->se_tpg;
239 	u32 i;
240 
241 	spin_lock_irq(&nacl->device_list_lock);
242 	for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
243 		deve = nacl->device_list[i];
244 
245 		if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS))
246 			continue;
247 
248 		lun = deve->se_lun;
249 		if (!lun) {
250 			pr_err("%s device entries device pointer is"
251 				" NULL, but Initiator has access.\n",
252 				tpg->se_tpg_tfo->get_fabric_name());
253 			continue;
254 		}
255 		port = lun->lun_sep;
256 		if (!port) {
257 			pr_err("%s device entries device pointer is"
258 				" NULL, but Initiator has access.\n",
259 				tpg->se_tpg_tfo->get_fabric_name());
260 			continue;
261 		}
262 		if (port->sep_rtpi != rtpi)
263 			continue;
264 
265 		atomic_inc(&deve->pr_ref_count);
266 		smp_mb__after_atomic_inc();
267 		spin_unlock_irq(&nacl->device_list_lock);
268 
269 		return deve;
270 	}
271 	spin_unlock_irq(&nacl->device_list_lock);
272 
273 	return NULL;
274 }
275 
276 int core_free_device_list_for_node(
277 	struct se_node_acl *nacl,
278 	struct se_portal_group *tpg)
279 {
280 	struct se_dev_entry *deve;
281 	struct se_lun *lun;
282 	u32 i;
283 
284 	if (!nacl->device_list)
285 		return 0;
286 
287 	spin_lock_irq(&nacl->device_list_lock);
288 	for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
289 		deve = nacl->device_list[i];
290 
291 		if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS))
292 			continue;
293 
294 		if (!deve->se_lun) {
295 			pr_err("%s device entries device pointer is"
296 				" NULL, but Initiator has access.\n",
297 				tpg->se_tpg_tfo->get_fabric_name());
298 			continue;
299 		}
300 		lun = deve->se_lun;
301 
302 		spin_unlock_irq(&nacl->device_list_lock);
303 		core_update_device_list_for_node(lun, NULL, deve->mapped_lun,
304 			TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg, 0);
305 		spin_lock_irq(&nacl->device_list_lock);
306 	}
307 	spin_unlock_irq(&nacl->device_list_lock);
308 
309 	array_free(nacl->device_list, TRANSPORT_MAX_LUNS_PER_TPG);
310 	nacl->device_list = NULL;
311 
312 	return 0;
313 }
314 
315 void core_dec_lacl_count(struct se_node_acl *se_nacl, struct se_cmd *se_cmd)
316 {
317 	struct se_dev_entry *deve;
318 	unsigned long flags;
319 
320 	spin_lock_irqsave(&se_nacl->device_list_lock, flags);
321 	deve = se_nacl->device_list[se_cmd->orig_fe_lun];
322 	deve->deve_cmds--;
323 	spin_unlock_irqrestore(&se_nacl->device_list_lock, flags);
324 }
325 
326 void core_update_device_list_access(
327 	u32 mapped_lun,
328 	u32 lun_access,
329 	struct se_node_acl *nacl)
330 {
331 	struct se_dev_entry *deve;
332 
333 	spin_lock_irq(&nacl->device_list_lock);
334 	deve = nacl->device_list[mapped_lun];
335 	if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) {
336 		deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_ONLY;
337 		deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_WRITE;
338 	} else {
339 		deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_WRITE;
340 		deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY;
341 	}
342 	spin_unlock_irq(&nacl->device_list_lock);
343 }
344 
345 /*      core_update_device_list_for_node():
346  *
347  *
348  */
349 int core_update_device_list_for_node(
350 	struct se_lun *lun,
351 	struct se_lun_acl *lun_acl,
352 	u32 mapped_lun,
353 	u32 lun_access,
354 	struct se_node_acl *nacl,
355 	struct se_portal_group *tpg,
356 	int enable)
357 {
358 	struct se_port *port = lun->lun_sep;
359 	struct se_dev_entry *deve = nacl->device_list[mapped_lun];
360 	int trans = 0;
361 	/*
362 	 * If the MappedLUN entry is being disabled, the entry in
363 	 * port->sep_alua_list must be removed now before clearing the
364 	 * struct se_dev_entry pointers below as logic in
365 	 * core_alua_do_transition_tg_pt() depends on these being present.
366 	 */
367 	if (!enable) {
368 		/*
369 		 * deve->se_lun_acl will be NULL for demo-mode created LUNs
370 		 * that have not been explicitly concerted to MappedLUNs ->
371 		 * struct se_lun_acl, but we remove deve->alua_port_list from
372 		 * port->sep_alua_list. This also means that active UAs and
373 		 * NodeACL context specific PR metadata for demo-mode
374 		 * MappedLUN *deve will be released below..
375 		 */
376 		spin_lock_bh(&port->sep_alua_lock);
377 		list_del(&deve->alua_port_list);
378 		spin_unlock_bh(&port->sep_alua_lock);
379 	}
380 
381 	spin_lock_irq(&nacl->device_list_lock);
382 	if (enable) {
383 		/*
384 		 * Check if the call is handling demo mode -> explict LUN ACL
385 		 * transition.  This transition must be for the same struct se_lun
386 		 * + mapped_lun that was setup in demo mode..
387 		 */
388 		if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) {
389 			if (deve->se_lun_acl != NULL) {
390 				pr_err("struct se_dev_entry->se_lun_acl"
391 					" already set for demo mode -> explict"
392 					" LUN ACL transition\n");
393 				spin_unlock_irq(&nacl->device_list_lock);
394 				return -EINVAL;
395 			}
396 			if (deve->se_lun != lun) {
397 				pr_err("struct se_dev_entry->se_lun does"
398 					" match passed struct se_lun for demo mode"
399 					" -> explict LUN ACL transition\n");
400 				spin_unlock_irq(&nacl->device_list_lock);
401 				return -EINVAL;
402 			}
403 			deve->se_lun_acl = lun_acl;
404 			trans = 1;
405 		} else {
406 			deve->se_lun = lun;
407 			deve->se_lun_acl = lun_acl;
408 			deve->mapped_lun = mapped_lun;
409 			deve->lun_flags |= TRANSPORT_LUNFLAGS_INITIATOR_ACCESS;
410 		}
411 
412 		if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) {
413 			deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_ONLY;
414 			deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_WRITE;
415 		} else {
416 			deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_WRITE;
417 			deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY;
418 		}
419 
420 		if (trans) {
421 			spin_unlock_irq(&nacl->device_list_lock);
422 			return 0;
423 		}
424 		deve->creation_time = get_jiffies_64();
425 		deve->attach_count++;
426 		spin_unlock_irq(&nacl->device_list_lock);
427 
428 		spin_lock_bh(&port->sep_alua_lock);
429 		list_add_tail(&deve->alua_port_list, &port->sep_alua_list);
430 		spin_unlock_bh(&port->sep_alua_lock);
431 
432 		return 0;
433 	}
434 	/*
435 	 * Wait for any in process SPEC_I_PT=1 or REGISTER_AND_MOVE
436 	 * PR operation to complete.
437 	 */
438 	spin_unlock_irq(&nacl->device_list_lock);
439 	while (atomic_read(&deve->pr_ref_count) != 0)
440 		cpu_relax();
441 	spin_lock_irq(&nacl->device_list_lock);
442 	/*
443 	 * Disable struct se_dev_entry LUN ACL mapping
444 	 */
445 	core_scsi3_ua_release_all(deve);
446 	deve->se_lun = NULL;
447 	deve->se_lun_acl = NULL;
448 	deve->lun_flags = 0;
449 	deve->creation_time = 0;
450 	deve->attach_count--;
451 	spin_unlock_irq(&nacl->device_list_lock);
452 
453 	core_scsi3_free_pr_reg_from_nacl(lun->lun_se_dev, nacl);
454 	return 0;
455 }
456 
457 /*      core_clear_lun_from_tpg():
458  *
459  *
460  */
461 void core_clear_lun_from_tpg(struct se_lun *lun, struct se_portal_group *tpg)
462 {
463 	struct se_node_acl *nacl;
464 	struct se_dev_entry *deve;
465 	u32 i;
466 
467 	spin_lock_irq(&tpg->acl_node_lock);
468 	list_for_each_entry(nacl, &tpg->acl_node_list, acl_list) {
469 		spin_unlock_irq(&tpg->acl_node_lock);
470 
471 		spin_lock_irq(&nacl->device_list_lock);
472 		for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
473 			deve = nacl->device_list[i];
474 			if (lun != deve->se_lun)
475 				continue;
476 			spin_unlock_irq(&nacl->device_list_lock);
477 
478 			core_update_device_list_for_node(lun, NULL,
479 				deve->mapped_lun, TRANSPORT_LUNFLAGS_NO_ACCESS,
480 				nacl, tpg, 0);
481 
482 			spin_lock_irq(&nacl->device_list_lock);
483 		}
484 		spin_unlock_irq(&nacl->device_list_lock);
485 
486 		spin_lock_irq(&tpg->acl_node_lock);
487 	}
488 	spin_unlock_irq(&tpg->acl_node_lock);
489 }
490 
491 static struct se_port *core_alloc_port(struct se_device *dev)
492 {
493 	struct se_port *port, *port_tmp;
494 
495 	port = kzalloc(sizeof(struct se_port), GFP_KERNEL);
496 	if (!port) {
497 		pr_err("Unable to allocate struct se_port\n");
498 		return ERR_PTR(-ENOMEM);
499 	}
500 	INIT_LIST_HEAD(&port->sep_alua_list);
501 	INIT_LIST_HEAD(&port->sep_list);
502 	atomic_set(&port->sep_tg_pt_secondary_offline, 0);
503 	spin_lock_init(&port->sep_alua_lock);
504 	mutex_init(&port->sep_tg_pt_md_mutex);
505 
506 	spin_lock(&dev->se_port_lock);
507 	if (dev->dev_port_count == 0x0000ffff) {
508 		pr_warn("Reached dev->dev_port_count =="
509 				" 0x0000ffff\n");
510 		spin_unlock(&dev->se_port_lock);
511 		return ERR_PTR(-ENOSPC);
512 	}
513 again:
514 	/*
515 	 * Allocate the next RELATIVE TARGET PORT IDENTIFER for this struct se_device
516 	 * Here is the table from spc4r17 section 7.7.3.8.
517 	 *
518 	 *    Table 473 -- RELATIVE TARGET PORT IDENTIFIER field
519 	 *
520 	 * Code      Description
521 	 * 0h        Reserved
522 	 * 1h        Relative port 1, historically known as port A
523 	 * 2h        Relative port 2, historically known as port B
524 	 * 3h to FFFFh    Relative port 3 through 65 535
525 	 */
526 	port->sep_rtpi = dev->dev_rpti_counter++;
527 	if (!port->sep_rtpi)
528 		goto again;
529 
530 	list_for_each_entry(port_tmp, &dev->dev_sep_list, sep_list) {
531 		/*
532 		 * Make sure RELATIVE TARGET PORT IDENTIFER is unique
533 		 * for 16-bit wrap..
534 		 */
535 		if (port->sep_rtpi == port_tmp->sep_rtpi)
536 			goto again;
537 	}
538 	spin_unlock(&dev->se_port_lock);
539 
540 	return port;
541 }
542 
543 static void core_export_port(
544 	struct se_device *dev,
545 	struct se_portal_group *tpg,
546 	struct se_port *port,
547 	struct se_lun *lun)
548 {
549 	struct se_subsystem_dev *su_dev = dev->se_sub_dev;
550 	struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem = NULL;
551 
552 	spin_lock(&dev->se_port_lock);
553 	spin_lock(&lun->lun_sep_lock);
554 	port->sep_tpg = tpg;
555 	port->sep_lun = lun;
556 	lun->lun_sep = port;
557 	spin_unlock(&lun->lun_sep_lock);
558 
559 	list_add_tail(&port->sep_list, &dev->dev_sep_list);
560 	spin_unlock(&dev->se_port_lock);
561 
562 	if (su_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED) {
563 		tg_pt_gp_mem = core_alua_allocate_tg_pt_gp_mem(port);
564 		if (IS_ERR(tg_pt_gp_mem) || !tg_pt_gp_mem) {
565 			pr_err("Unable to allocate t10_alua_tg_pt"
566 					"_gp_member_t\n");
567 			return;
568 		}
569 		spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
570 		__core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem,
571 			su_dev->t10_alua.default_tg_pt_gp);
572 		spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
573 		pr_debug("%s/%s: Adding to default ALUA Target Port"
574 			" Group: alua/default_tg_pt_gp\n",
575 			dev->transport->name, tpg->se_tpg_tfo->get_fabric_name());
576 	}
577 
578 	dev->dev_port_count++;
579 	port->sep_index = port->sep_rtpi; /* RELATIVE TARGET PORT IDENTIFER */
580 }
581 
582 /*
583  *	Called with struct se_device->se_port_lock spinlock held.
584  */
585 static void core_release_port(struct se_device *dev, struct se_port *port)
586 	__releases(&dev->se_port_lock) __acquires(&dev->se_port_lock)
587 {
588 	/*
589 	 * Wait for any port reference for PR ALL_TG_PT=1 operation
590 	 * to complete in __core_scsi3_alloc_registration()
591 	 */
592 	spin_unlock(&dev->se_port_lock);
593 	if (atomic_read(&port->sep_tg_pt_ref_cnt))
594 		cpu_relax();
595 	spin_lock(&dev->se_port_lock);
596 
597 	core_alua_free_tg_pt_gp_mem(port);
598 
599 	list_del(&port->sep_list);
600 	dev->dev_port_count--;
601 	kfree(port);
602 }
603 
604 int core_dev_export(
605 	struct se_device *dev,
606 	struct se_portal_group *tpg,
607 	struct se_lun *lun)
608 {
609 	struct se_port *port;
610 
611 	port = core_alloc_port(dev);
612 	if (IS_ERR(port))
613 		return PTR_ERR(port);
614 
615 	lun->lun_se_dev = dev;
616 	se_dev_start(dev);
617 
618 	atomic_inc(&dev->dev_export_obj.obj_access_count);
619 	core_export_port(dev, tpg, port, lun);
620 	return 0;
621 }
622 
623 void core_dev_unexport(
624 	struct se_device *dev,
625 	struct se_portal_group *tpg,
626 	struct se_lun *lun)
627 {
628 	struct se_port *port = lun->lun_sep;
629 
630 	spin_lock(&lun->lun_sep_lock);
631 	if (lun->lun_se_dev == NULL) {
632 		spin_unlock(&lun->lun_sep_lock);
633 		return;
634 	}
635 	spin_unlock(&lun->lun_sep_lock);
636 
637 	spin_lock(&dev->se_port_lock);
638 	atomic_dec(&dev->dev_export_obj.obj_access_count);
639 	core_release_port(dev, port);
640 	spin_unlock(&dev->se_port_lock);
641 
642 	se_dev_stop(dev);
643 	lun->lun_se_dev = NULL;
644 }
645 
646 int target_report_luns(struct se_task *se_task)
647 {
648 	struct se_cmd *se_cmd = se_task->task_se_cmd;
649 	struct se_dev_entry *deve;
650 	struct se_session *se_sess = se_cmd->se_sess;
651 	unsigned char *buf;
652 	u32 lun_count = 0, offset = 8, i;
653 
654 	buf = transport_kmap_data_sg(se_cmd);
655 	if (!buf)
656 		return -ENOMEM;
657 
658 	/*
659 	 * If no struct se_session pointer is present, this struct se_cmd is
660 	 * coming via a target_core_mod PASSTHROUGH op, and not through
661 	 * a $FABRIC_MOD.  In that case, report LUN=0 only.
662 	 */
663 	if (!se_sess) {
664 		int_to_scsilun(0, (struct scsi_lun *)&buf[offset]);
665 		lun_count = 1;
666 		goto done;
667 	}
668 
669 	spin_lock_irq(&se_sess->se_node_acl->device_list_lock);
670 	for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
671 		deve = se_sess->se_node_acl->device_list[i];
672 		if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS))
673 			continue;
674 		/*
675 		 * We determine the correct LUN LIST LENGTH even once we
676 		 * have reached the initial allocation length.
677 		 * See SPC2-R20 7.19.
678 		 */
679 		lun_count++;
680 		if ((offset + 8) > se_cmd->data_length)
681 			continue;
682 
683 		int_to_scsilun(deve->mapped_lun, (struct scsi_lun *)&buf[offset]);
684 		offset += 8;
685 	}
686 	spin_unlock_irq(&se_sess->se_node_acl->device_list_lock);
687 
688 	/*
689 	 * See SPC3 r07, page 159.
690 	 */
691 done:
692 	lun_count *= 8;
693 	buf[0] = ((lun_count >> 24) & 0xff);
694 	buf[1] = ((lun_count >> 16) & 0xff);
695 	buf[2] = ((lun_count >> 8) & 0xff);
696 	buf[3] = (lun_count & 0xff);
697 	transport_kunmap_data_sg(se_cmd);
698 
699 	se_task->task_scsi_status = GOOD;
700 	transport_complete_task(se_task, 1);
701 	return 0;
702 }
703 
704 /*	se_release_device_for_hba():
705  *
706  *
707  */
708 void se_release_device_for_hba(struct se_device *dev)
709 {
710 	struct se_hba *hba = dev->se_hba;
711 
712 	if ((dev->dev_status & TRANSPORT_DEVICE_ACTIVATED) ||
713 	    (dev->dev_status & TRANSPORT_DEVICE_DEACTIVATED) ||
714 	    (dev->dev_status & TRANSPORT_DEVICE_SHUTDOWN) ||
715 	    (dev->dev_status & TRANSPORT_DEVICE_OFFLINE_ACTIVATED) ||
716 	    (dev->dev_status & TRANSPORT_DEVICE_OFFLINE_DEACTIVATED))
717 		se_dev_stop(dev);
718 
719 	if (dev->dev_ptr) {
720 		kthread_stop(dev->process_thread);
721 		if (dev->transport->free_device)
722 			dev->transport->free_device(dev->dev_ptr);
723 	}
724 
725 	spin_lock(&hba->device_lock);
726 	list_del(&dev->dev_list);
727 	hba->dev_count--;
728 	spin_unlock(&hba->device_lock);
729 
730 	core_scsi3_free_all_registrations(dev);
731 	se_release_vpd_for_dev(dev);
732 
733 	kfree(dev);
734 }
735 
736 void se_release_vpd_for_dev(struct se_device *dev)
737 {
738 	struct t10_vpd *vpd, *vpd_tmp;
739 
740 	spin_lock(&dev->se_sub_dev->t10_wwn.t10_vpd_lock);
741 	list_for_each_entry_safe(vpd, vpd_tmp,
742 			&dev->se_sub_dev->t10_wwn.t10_vpd_list, vpd_list) {
743 		list_del(&vpd->vpd_list);
744 		kfree(vpd);
745 	}
746 	spin_unlock(&dev->se_sub_dev->t10_wwn.t10_vpd_lock);
747 }
748 
749 /*	se_free_virtual_device():
750  *
751  *	Used for IBLOCK, RAMDISK, and FILEIO Transport Drivers.
752  */
753 int se_free_virtual_device(struct se_device *dev, struct se_hba *hba)
754 {
755 	if (!list_empty(&dev->dev_sep_list))
756 		dump_stack();
757 
758 	core_alua_free_lu_gp_mem(dev);
759 	se_release_device_for_hba(dev);
760 
761 	return 0;
762 }
763 
764 static void se_dev_start(struct se_device *dev)
765 {
766 	struct se_hba *hba = dev->se_hba;
767 
768 	spin_lock(&hba->device_lock);
769 	atomic_inc(&dev->dev_obj.obj_access_count);
770 	if (atomic_read(&dev->dev_obj.obj_access_count) == 1) {
771 		if (dev->dev_status & TRANSPORT_DEVICE_DEACTIVATED) {
772 			dev->dev_status &= ~TRANSPORT_DEVICE_DEACTIVATED;
773 			dev->dev_status |= TRANSPORT_DEVICE_ACTIVATED;
774 		} else if (dev->dev_status &
775 			   TRANSPORT_DEVICE_OFFLINE_DEACTIVATED) {
776 			dev->dev_status &=
777 				~TRANSPORT_DEVICE_OFFLINE_DEACTIVATED;
778 			dev->dev_status |= TRANSPORT_DEVICE_OFFLINE_ACTIVATED;
779 		}
780 	}
781 	spin_unlock(&hba->device_lock);
782 }
783 
784 static void se_dev_stop(struct se_device *dev)
785 {
786 	struct se_hba *hba = dev->se_hba;
787 
788 	spin_lock(&hba->device_lock);
789 	atomic_dec(&dev->dev_obj.obj_access_count);
790 	if (atomic_read(&dev->dev_obj.obj_access_count) == 0) {
791 		if (dev->dev_status & TRANSPORT_DEVICE_ACTIVATED) {
792 			dev->dev_status &= ~TRANSPORT_DEVICE_ACTIVATED;
793 			dev->dev_status |= TRANSPORT_DEVICE_DEACTIVATED;
794 		} else if (dev->dev_status &
795 			   TRANSPORT_DEVICE_OFFLINE_ACTIVATED) {
796 			dev->dev_status &= ~TRANSPORT_DEVICE_OFFLINE_ACTIVATED;
797 			dev->dev_status |= TRANSPORT_DEVICE_OFFLINE_DEACTIVATED;
798 		}
799 	}
800 	spin_unlock(&hba->device_lock);
801 }
802 
803 int se_dev_check_online(struct se_device *dev)
804 {
805 	unsigned long flags;
806 	int ret;
807 
808 	spin_lock_irqsave(&dev->dev_status_lock, flags);
809 	ret = ((dev->dev_status & TRANSPORT_DEVICE_ACTIVATED) ||
810 	       (dev->dev_status & TRANSPORT_DEVICE_DEACTIVATED)) ? 0 : 1;
811 	spin_unlock_irqrestore(&dev->dev_status_lock, flags);
812 
813 	return ret;
814 }
815 
816 int se_dev_check_shutdown(struct se_device *dev)
817 {
818 	int ret;
819 
820 	spin_lock_irq(&dev->dev_status_lock);
821 	ret = (dev->dev_status & TRANSPORT_DEVICE_SHUTDOWN);
822 	spin_unlock_irq(&dev->dev_status_lock);
823 
824 	return ret;
825 }
826 
827 u32 se_dev_align_max_sectors(u32 max_sectors, u32 block_size)
828 {
829 	u32 tmp, aligned_max_sectors;
830 	/*
831 	 * Limit max_sectors to a PAGE_SIZE aligned value for modern
832 	 * transport_allocate_data_tasks() operation.
833 	 */
834 	tmp = rounddown((max_sectors * block_size), PAGE_SIZE);
835 	aligned_max_sectors = (tmp / block_size);
836 	if (max_sectors != aligned_max_sectors) {
837 		printk(KERN_INFO "Rounding down aligned max_sectors from %u"
838 				" to %u\n", max_sectors, aligned_max_sectors);
839 		return aligned_max_sectors;
840 	}
841 
842 	return max_sectors;
843 }
844 
845 void se_dev_set_default_attribs(
846 	struct se_device *dev,
847 	struct se_dev_limits *dev_limits)
848 {
849 	struct queue_limits *limits = &dev_limits->limits;
850 
851 	dev->se_sub_dev->se_dev_attrib.emulate_dpo = DA_EMULATE_DPO;
852 	dev->se_sub_dev->se_dev_attrib.emulate_fua_write = DA_EMULATE_FUA_WRITE;
853 	dev->se_sub_dev->se_dev_attrib.emulate_fua_read = DA_EMULATE_FUA_READ;
854 	dev->se_sub_dev->se_dev_attrib.emulate_write_cache = DA_EMULATE_WRITE_CACHE;
855 	dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl = DA_EMULATE_UA_INTLLCK_CTRL;
856 	dev->se_sub_dev->se_dev_attrib.emulate_tas = DA_EMULATE_TAS;
857 	dev->se_sub_dev->se_dev_attrib.emulate_tpu = DA_EMULATE_TPU;
858 	dev->se_sub_dev->se_dev_attrib.emulate_tpws = DA_EMULATE_TPWS;
859 	dev->se_sub_dev->se_dev_attrib.emulate_reservations = DA_EMULATE_RESERVATIONS;
860 	dev->se_sub_dev->se_dev_attrib.emulate_alua = DA_EMULATE_ALUA;
861 	dev->se_sub_dev->se_dev_attrib.enforce_pr_isids = DA_ENFORCE_PR_ISIDS;
862 	dev->se_sub_dev->se_dev_attrib.is_nonrot = DA_IS_NONROT;
863 	dev->se_sub_dev->se_dev_attrib.emulate_rest_reord = DA_EMULATE_REST_REORD;
864 	/*
865 	 * The TPU=1 and TPWS=1 settings will be set in TCM/IBLOCK
866 	 * iblock_create_virtdevice() from struct queue_limits values
867 	 * if blk_queue_discard()==1
868 	 */
869 	dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count = DA_MAX_UNMAP_LBA_COUNT;
870 	dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count =
871 		DA_MAX_UNMAP_BLOCK_DESC_COUNT;
872 	dev->se_sub_dev->se_dev_attrib.unmap_granularity = DA_UNMAP_GRANULARITY_DEFAULT;
873 	dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment =
874 				DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT;
875 	/*
876 	 * block_size is based on subsystem plugin dependent requirements.
877 	 */
878 	dev->se_sub_dev->se_dev_attrib.hw_block_size = limits->logical_block_size;
879 	dev->se_sub_dev->se_dev_attrib.block_size = limits->logical_block_size;
880 	/*
881 	 * max_sectors is based on subsystem plugin dependent requirements.
882 	 */
883 	dev->se_sub_dev->se_dev_attrib.hw_max_sectors = limits->max_hw_sectors;
884 	/*
885 	 * Align max_sectors down to PAGE_SIZE to follow transport_allocate_data_tasks()
886 	 */
887 	limits->max_sectors = se_dev_align_max_sectors(limits->max_sectors,
888 						limits->logical_block_size);
889 	dev->se_sub_dev->se_dev_attrib.max_sectors = limits->max_sectors;
890 	/*
891 	 * Set fabric_max_sectors, which is reported in block limits
892 	 * VPD page (B0h).
893 	 */
894 	dev->se_sub_dev->se_dev_attrib.fabric_max_sectors = DA_FABRIC_MAX_SECTORS;
895 	/*
896 	 * Set optimal_sectors from fabric_max_sectors, which can be
897 	 * lowered via configfs.
898 	 */
899 	dev->se_sub_dev->se_dev_attrib.optimal_sectors = DA_FABRIC_MAX_SECTORS;
900 	/*
901 	 * queue_depth is based on subsystem plugin dependent requirements.
902 	 */
903 	dev->se_sub_dev->se_dev_attrib.hw_queue_depth = dev_limits->hw_queue_depth;
904 	dev->se_sub_dev->se_dev_attrib.queue_depth = dev_limits->queue_depth;
905 }
906 
907 int se_dev_set_max_unmap_lba_count(
908 	struct se_device *dev,
909 	u32 max_unmap_lba_count)
910 {
911 	dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count = max_unmap_lba_count;
912 	pr_debug("dev[%p]: Set max_unmap_lba_count: %u\n",
913 			dev, dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count);
914 	return 0;
915 }
916 
917 int se_dev_set_max_unmap_block_desc_count(
918 	struct se_device *dev,
919 	u32 max_unmap_block_desc_count)
920 {
921 	dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count =
922 		max_unmap_block_desc_count;
923 	pr_debug("dev[%p]: Set max_unmap_block_desc_count: %u\n",
924 			dev, dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count);
925 	return 0;
926 }
927 
928 int se_dev_set_unmap_granularity(
929 	struct se_device *dev,
930 	u32 unmap_granularity)
931 {
932 	dev->se_sub_dev->se_dev_attrib.unmap_granularity = unmap_granularity;
933 	pr_debug("dev[%p]: Set unmap_granularity: %u\n",
934 			dev, dev->se_sub_dev->se_dev_attrib.unmap_granularity);
935 	return 0;
936 }
937 
938 int se_dev_set_unmap_granularity_alignment(
939 	struct se_device *dev,
940 	u32 unmap_granularity_alignment)
941 {
942 	dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment = unmap_granularity_alignment;
943 	pr_debug("dev[%p]: Set unmap_granularity_alignment: %u\n",
944 			dev, dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment);
945 	return 0;
946 }
947 
948 int se_dev_set_emulate_dpo(struct se_device *dev, int flag)
949 {
950 	if (flag != 0 && flag != 1) {
951 		pr_err("Illegal value %d\n", flag);
952 		return -EINVAL;
953 	}
954 
955 	if (flag) {
956 		pr_err("dpo_emulated not supported\n");
957 		return -EINVAL;
958 	}
959 
960 	return 0;
961 }
962 
963 int se_dev_set_emulate_fua_write(struct se_device *dev, int flag)
964 {
965 	if (flag != 0 && flag != 1) {
966 		pr_err("Illegal value %d\n", flag);
967 		return -EINVAL;
968 	}
969 
970 	if (flag && dev->transport->fua_write_emulated == 0) {
971 		pr_err("fua_write_emulated not supported\n");
972 		return -EINVAL;
973 	}
974 	dev->se_sub_dev->se_dev_attrib.emulate_fua_write = flag;
975 	pr_debug("dev[%p]: SE Device Forced Unit Access WRITEs: %d\n",
976 			dev, dev->se_sub_dev->se_dev_attrib.emulate_fua_write);
977 	return 0;
978 }
979 
980 int se_dev_set_emulate_fua_read(struct se_device *dev, int flag)
981 {
982 	if (flag != 0 && flag != 1) {
983 		pr_err("Illegal value %d\n", flag);
984 		return -EINVAL;
985 	}
986 
987 	if (flag) {
988 		pr_err("ua read emulated not supported\n");
989 		return -EINVAL;
990 	}
991 
992 	return 0;
993 }
994 
995 int se_dev_set_emulate_write_cache(struct se_device *dev, int flag)
996 {
997 	if (flag != 0 && flag != 1) {
998 		pr_err("Illegal value %d\n", flag);
999 		return -EINVAL;
1000 	}
1001 	if (flag && dev->transport->write_cache_emulated == 0) {
1002 		pr_err("write_cache_emulated not supported\n");
1003 		return -EINVAL;
1004 	}
1005 	dev->se_sub_dev->se_dev_attrib.emulate_write_cache = flag;
1006 	pr_debug("dev[%p]: SE Device WRITE_CACHE_EMULATION flag: %d\n",
1007 			dev, dev->se_sub_dev->se_dev_attrib.emulate_write_cache);
1008 	return 0;
1009 }
1010 
1011 int se_dev_set_emulate_ua_intlck_ctrl(struct se_device *dev, int flag)
1012 {
1013 	if ((flag != 0) && (flag != 1) && (flag != 2)) {
1014 		pr_err("Illegal value %d\n", flag);
1015 		return -EINVAL;
1016 	}
1017 
1018 	if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
1019 		pr_err("dev[%p]: Unable to change SE Device"
1020 			" UA_INTRLCK_CTRL while dev_export_obj: %d count"
1021 			" exists\n", dev,
1022 			atomic_read(&dev->dev_export_obj.obj_access_count));
1023 		return -EINVAL;
1024 	}
1025 	dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl = flag;
1026 	pr_debug("dev[%p]: SE Device UA_INTRLCK_CTRL flag: %d\n",
1027 		dev, dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl);
1028 
1029 	return 0;
1030 }
1031 
1032 int se_dev_set_emulate_tas(struct se_device *dev, int flag)
1033 {
1034 	if ((flag != 0) && (flag != 1)) {
1035 		pr_err("Illegal value %d\n", flag);
1036 		return -EINVAL;
1037 	}
1038 
1039 	if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
1040 		pr_err("dev[%p]: Unable to change SE Device TAS while"
1041 			" dev_export_obj: %d count exists\n", dev,
1042 			atomic_read(&dev->dev_export_obj.obj_access_count));
1043 		return -EINVAL;
1044 	}
1045 	dev->se_sub_dev->se_dev_attrib.emulate_tas = flag;
1046 	pr_debug("dev[%p]: SE Device TASK_ABORTED status bit: %s\n",
1047 		dev, (dev->se_sub_dev->se_dev_attrib.emulate_tas) ? "Enabled" : "Disabled");
1048 
1049 	return 0;
1050 }
1051 
1052 int se_dev_set_emulate_tpu(struct se_device *dev, int flag)
1053 {
1054 	if ((flag != 0) && (flag != 1)) {
1055 		pr_err("Illegal value %d\n", flag);
1056 		return -EINVAL;
1057 	}
1058 	/*
1059 	 * We expect this value to be non-zero when generic Block Layer
1060 	 * Discard supported is detected iblock_create_virtdevice().
1061 	 */
1062 	if (flag && !dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count) {
1063 		pr_err("Generic Block Discard not supported\n");
1064 		return -ENOSYS;
1065 	}
1066 
1067 	dev->se_sub_dev->se_dev_attrib.emulate_tpu = flag;
1068 	pr_debug("dev[%p]: SE Device Thin Provisioning UNMAP bit: %d\n",
1069 				dev, flag);
1070 	return 0;
1071 }
1072 
1073 int se_dev_set_emulate_tpws(struct se_device *dev, int flag)
1074 {
1075 	if ((flag != 0) && (flag != 1)) {
1076 		pr_err("Illegal value %d\n", flag);
1077 		return -EINVAL;
1078 	}
1079 	/*
1080 	 * We expect this value to be non-zero when generic Block Layer
1081 	 * Discard supported is detected iblock_create_virtdevice().
1082 	 */
1083 	if (flag && !dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count) {
1084 		pr_err("Generic Block Discard not supported\n");
1085 		return -ENOSYS;
1086 	}
1087 
1088 	dev->se_sub_dev->se_dev_attrib.emulate_tpws = flag;
1089 	pr_debug("dev[%p]: SE Device Thin Provisioning WRITE_SAME: %d\n",
1090 				dev, flag);
1091 	return 0;
1092 }
1093 
1094 int se_dev_set_enforce_pr_isids(struct se_device *dev, int flag)
1095 {
1096 	if ((flag != 0) && (flag != 1)) {
1097 		pr_err("Illegal value %d\n", flag);
1098 		return -EINVAL;
1099 	}
1100 	dev->se_sub_dev->se_dev_attrib.enforce_pr_isids = flag;
1101 	pr_debug("dev[%p]: SE Device enforce_pr_isids bit: %s\n", dev,
1102 		(dev->se_sub_dev->se_dev_attrib.enforce_pr_isids) ? "Enabled" : "Disabled");
1103 	return 0;
1104 }
1105 
1106 int se_dev_set_is_nonrot(struct se_device *dev, int flag)
1107 {
1108 	if ((flag != 0) && (flag != 1)) {
1109 		printk(KERN_ERR "Illegal value %d\n", flag);
1110 		return -EINVAL;
1111 	}
1112 	dev->se_sub_dev->se_dev_attrib.is_nonrot = flag;
1113 	pr_debug("dev[%p]: SE Device is_nonrot bit: %d\n",
1114 	       dev, flag);
1115 	return 0;
1116 }
1117 
1118 int se_dev_set_emulate_rest_reord(struct se_device *dev, int flag)
1119 {
1120 	if (flag != 0) {
1121 		printk(KERN_ERR "dev[%p]: SE Device emulatation of restricted"
1122 			" reordering not implemented\n", dev);
1123 		return -ENOSYS;
1124 	}
1125 	dev->se_sub_dev->se_dev_attrib.emulate_rest_reord = flag;
1126 	pr_debug("dev[%p]: SE Device emulate_rest_reord: %d\n", dev, flag);
1127 	return 0;
1128 }
1129 
1130 /*
1131  * Note, this can only be called on unexported SE Device Object.
1132  */
1133 int se_dev_set_queue_depth(struct se_device *dev, u32 queue_depth)
1134 {
1135 	if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
1136 		pr_err("dev[%p]: Unable to change SE Device TCQ while"
1137 			" dev_export_obj: %d count exists\n", dev,
1138 			atomic_read(&dev->dev_export_obj.obj_access_count));
1139 		return -EINVAL;
1140 	}
1141 	if (!queue_depth) {
1142 		pr_err("dev[%p]: Illegal ZERO value for queue"
1143 			"_depth\n", dev);
1144 		return -EINVAL;
1145 	}
1146 
1147 	if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
1148 		if (queue_depth > dev->se_sub_dev->se_dev_attrib.hw_queue_depth) {
1149 			pr_err("dev[%p]: Passed queue_depth: %u"
1150 				" exceeds TCM/SE_Device TCQ: %u\n",
1151 				dev, queue_depth,
1152 				dev->se_sub_dev->se_dev_attrib.hw_queue_depth);
1153 			return -EINVAL;
1154 		}
1155 	} else {
1156 		if (queue_depth > dev->se_sub_dev->se_dev_attrib.queue_depth) {
1157 			if (queue_depth > dev->se_sub_dev->se_dev_attrib.hw_queue_depth) {
1158 				pr_err("dev[%p]: Passed queue_depth:"
1159 					" %u exceeds TCM/SE_Device MAX"
1160 					" TCQ: %u\n", dev, queue_depth,
1161 					dev->se_sub_dev->se_dev_attrib.hw_queue_depth);
1162 				return -EINVAL;
1163 			}
1164 		}
1165 	}
1166 
1167 	dev->se_sub_dev->se_dev_attrib.queue_depth = dev->queue_depth = queue_depth;
1168 	pr_debug("dev[%p]: SE Device TCQ Depth changed to: %u\n",
1169 			dev, queue_depth);
1170 	return 0;
1171 }
1172 
1173 int se_dev_set_max_sectors(struct se_device *dev, u32 max_sectors)
1174 {
1175 	int force = 0; /* Force setting for VDEVS */
1176 
1177 	if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
1178 		pr_err("dev[%p]: Unable to change SE Device"
1179 			" max_sectors while dev_export_obj: %d count exists\n",
1180 			dev, atomic_read(&dev->dev_export_obj.obj_access_count));
1181 		return -EINVAL;
1182 	}
1183 	if (!max_sectors) {
1184 		pr_err("dev[%p]: Illegal ZERO value for"
1185 			" max_sectors\n", dev);
1186 		return -EINVAL;
1187 	}
1188 	if (max_sectors < DA_STATUS_MAX_SECTORS_MIN) {
1189 		pr_err("dev[%p]: Passed max_sectors: %u less than"
1190 			" DA_STATUS_MAX_SECTORS_MIN: %u\n", dev, max_sectors,
1191 				DA_STATUS_MAX_SECTORS_MIN);
1192 		return -EINVAL;
1193 	}
1194 	if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
1195 		if (max_sectors > dev->se_sub_dev->se_dev_attrib.hw_max_sectors) {
1196 			pr_err("dev[%p]: Passed max_sectors: %u"
1197 				" greater than TCM/SE_Device max_sectors:"
1198 				" %u\n", dev, max_sectors,
1199 				dev->se_sub_dev->se_dev_attrib.hw_max_sectors);
1200 			 return -EINVAL;
1201 		}
1202 	} else {
1203 		if (!force && (max_sectors >
1204 				 dev->se_sub_dev->se_dev_attrib.hw_max_sectors)) {
1205 			pr_err("dev[%p]: Passed max_sectors: %u"
1206 				" greater than TCM/SE_Device max_sectors"
1207 				": %u, use force=1 to override.\n", dev,
1208 				max_sectors, dev->se_sub_dev->se_dev_attrib.hw_max_sectors);
1209 			return -EINVAL;
1210 		}
1211 		if (max_sectors > DA_STATUS_MAX_SECTORS_MAX) {
1212 			pr_err("dev[%p]: Passed max_sectors: %u"
1213 				" greater than DA_STATUS_MAX_SECTORS_MAX:"
1214 				" %u\n", dev, max_sectors,
1215 				DA_STATUS_MAX_SECTORS_MAX);
1216 			return -EINVAL;
1217 		}
1218 	}
1219 	/*
1220 	 * Align max_sectors down to PAGE_SIZE to follow transport_allocate_data_tasks()
1221 	 */
1222 	max_sectors = se_dev_align_max_sectors(max_sectors,
1223 				dev->se_sub_dev->se_dev_attrib.block_size);
1224 
1225 	dev->se_sub_dev->se_dev_attrib.max_sectors = max_sectors;
1226 	pr_debug("dev[%p]: SE Device max_sectors changed to %u\n",
1227 			dev, max_sectors);
1228 	return 0;
1229 }
1230 
1231 int se_dev_set_fabric_max_sectors(struct se_device *dev, u32 fabric_max_sectors)
1232 {
1233 	if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
1234 		pr_err("dev[%p]: Unable to change SE Device"
1235 			" fabric_max_sectors while dev_export_obj: %d count exists\n",
1236 			dev, atomic_read(&dev->dev_export_obj.obj_access_count));
1237 		return -EINVAL;
1238 	}
1239 	if (!fabric_max_sectors) {
1240 		pr_err("dev[%p]: Illegal ZERO value for"
1241 			" fabric_max_sectors\n", dev);
1242 		return -EINVAL;
1243 	}
1244 	if (fabric_max_sectors < DA_STATUS_MAX_SECTORS_MIN) {
1245 		pr_err("dev[%p]: Passed fabric_max_sectors: %u less than"
1246 			" DA_STATUS_MAX_SECTORS_MIN: %u\n", dev, fabric_max_sectors,
1247 				DA_STATUS_MAX_SECTORS_MIN);
1248 		return -EINVAL;
1249 	}
1250 	if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
1251 		if (fabric_max_sectors > dev->se_sub_dev->se_dev_attrib.hw_max_sectors) {
1252 			pr_err("dev[%p]: Passed fabric_max_sectors: %u"
1253 				" greater than TCM/SE_Device max_sectors:"
1254 				" %u\n", dev, fabric_max_sectors,
1255 				dev->se_sub_dev->se_dev_attrib.hw_max_sectors);
1256 			 return -EINVAL;
1257 		}
1258 	} else {
1259 		if (fabric_max_sectors > DA_STATUS_MAX_SECTORS_MAX) {
1260 			pr_err("dev[%p]: Passed fabric_max_sectors: %u"
1261 				" greater than DA_STATUS_MAX_SECTORS_MAX:"
1262 				" %u\n", dev, fabric_max_sectors,
1263 				DA_STATUS_MAX_SECTORS_MAX);
1264 			return -EINVAL;
1265 		}
1266 	}
1267 	/*
1268 	 * Align max_sectors down to PAGE_SIZE to follow transport_allocate_data_tasks()
1269 	 */
1270 	fabric_max_sectors = se_dev_align_max_sectors(fabric_max_sectors,
1271 						      dev->se_sub_dev->se_dev_attrib.block_size);
1272 
1273 	dev->se_sub_dev->se_dev_attrib.fabric_max_sectors = fabric_max_sectors;
1274 	pr_debug("dev[%p]: SE Device max_sectors changed to %u\n",
1275 			dev, fabric_max_sectors);
1276 	return 0;
1277 }
1278 
1279 int se_dev_set_optimal_sectors(struct se_device *dev, u32 optimal_sectors)
1280 {
1281 	if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
1282 		pr_err("dev[%p]: Unable to change SE Device"
1283 			" optimal_sectors while dev_export_obj: %d count exists\n",
1284 			dev, atomic_read(&dev->dev_export_obj.obj_access_count));
1285 		return -EINVAL;
1286 	}
1287 	if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
1288 		pr_err("dev[%p]: Passed optimal_sectors cannot be"
1289 				" changed for TCM/pSCSI\n", dev);
1290 		return -EINVAL;
1291 	}
1292 	if (optimal_sectors > dev->se_sub_dev->se_dev_attrib.fabric_max_sectors) {
1293 		pr_err("dev[%p]: Passed optimal_sectors %u cannot be"
1294 			" greater than fabric_max_sectors: %u\n", dev,
1295 			optimal_sectors, dev->se_sub_dev->se_dev_attrib.fabric_max_sectors);
1296 		return -EINVAL;
1297 	}
1298 
1299 	dev->se_sub_dev->se_dev_attrib.optimal_sectors = optimal_sectors;
1300 	pr_debug("dev[%p]: SE Device optimal_sectors changed to %u\n",
1301 			dev, optimal_sectors);
1302 	return 0;
1303 }
1304 
1305 int se_dev_set_block_size(struct se_device *dev, u32 block_size)
1306 {
1307 	if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
1308 		pr_err("dev[%p]: Unable to change SE Device block_size"
1309 			" while dev_export_obj: %d count exists\n", dev,
1310 			atomic_read(&dev->dev_export_obj.obj_access_count));
1311 		return -EINVAL;
1312 	}
1313 
1314 	if ((block_size != 512) &&
1315 	    (block_size != 1024) &&
1316 	    (block_size != 2048) &&
1317 	    (block_size != 4096)) {
1318 		pr_err("dev[%p]: Illegal value for block_device: %u"
1319 			" for SE device, must be 512, 1024, 2048 or 4096\n",
1320 			dev, block_size);
1321 		return -EINVAL;
1322 	}
1323 
1324 	if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
1325 		pr_err("dev[%p]: Not allowed to change block_size for"
1326 			" Physical Device, use for Linux/SCSI to change"
1327 			" block_size for underlying hardware\n", dev);
1328 		return -EINVAL;
1329 	}
1330 
1331 	dev->se_sub_dev->se_dev_attrib.block_size = block_size;
1332 	pr_debug("dev[%p]: SE Device block_size changed to %u\n",
1333 			dev, block_size);
1334 	return 0;
1335 }
1336 
1337 struct se_lun *core_dev_add_lun(
1338 	struct se_portal_group *tpg,
1339 	struct se_hba *hba,
1340 	struct se_device *dev,
1341 	u32 lun)
1342 {
1343 	struct se_lun *lun_p;
1344 	u32 lun_access = 0;
1345 	int rc;
1346 
1347 	if (atomic_read(&dev->dev_access_obj.obj_access_count) != 0) {
1348 		pr_err("Unable to export struct se_device while dev_access_obj: %d\n",
1349 			atomic_read(&dev->dev_access_obj.obj_access_count));
1350 		return ERR_PTR(-EACCES);
1351 	}
1352 
1353 	lun_p = core_tpg_pre_addlun(tpg, lun);
1354 	if (IS_ERR(lun_p))
1355 		return lun_p;
1356 
1357 	if (dev->dev_flags & DF_READ_ONLY)
1358 		lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
1359 	else
1360 		lun_access = TRANSPORT_LUNFLAGS_READ_WRITE;
1361 
1362 	rc = core_tpg_post_addlun(tpg, lun_p, lun_access, dev);
1363 	if (rc < 0)
1364 		return ERR_PTR(rc);
1365 
1366 	pr_debug("%s_TPG[%u]_LUN[%u] - Activated %s Logical Unit from"
1367 		" CORE HBA: %u\n", tpg->se_tpg_tfo->get_fabric_name(),
1368 		tpg->se_tpg_tfo->tpg_get_tag(tpg), lun_p->unpacked_lun,
1369 		tpg->se_tpg_tfo->get_fabric_name(), hba->hba_id);
1370 	/*
1371 	 * Update LUN maps for dynamically added initiators when
1372 	 * generate_node_acl is enabled.
1373 	 */
1374 	if (tpg->se_tpg_tfo->tpg_check_demo_mode(tpg)) {
1375 		struct se_node_acl *acl;
1376 		spin_lock_irq(&tpg->acl_node_lock);
1377 		list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
1378 			if (acl->dynamic_node_acl &&
1379 			    (!tpg->se_tpg_tfo->tpg_check_demo_mode_login_only ||
1380 			     !tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg))) {
1381 				spin_unlock_irq(&tpg->acl_node_lock);
1382 				core_tpg_add_node_to_devs(acl, tpg);
1383 				spin_lock_irq(&tpg->acl_node_lock);
1384 			}
1385 		}
1386 		spin_unlock_irq(&tpg->acl_node_lock);
1387 	}
1388 
1389 	return lun_p;
1390 }
1391 
1392 /*      core_dev_del_lun():
1393  *
1394  *
1395  */
1396 int core_dev_del_lun(
1397 	struct se_portal_group *tpg,
1398 	u32 unpacked_lun)
1399 {
1400 	struct se_lun *lun;
1401 
1402 	lun = core_tpg_pre_dellun(tpg, unpacked_lun);
1403 	if (IS_ERR(lun))
1404 		return PTR_ERR(lun);
1405 
1406 	core_tpg_post_dellun(tpg, lun);
1407 
1408 	pr_debug("%s_TPG[%u]_LUN[%u] - Deactivated %s Logical Unit from"
1409 		" device object\n", tpg->se_tpg_tfo->get_fabric_name(),
1410 		tpg->se_tpg_tfo->tpg_get_tag(tpg), unpacked_lun,
1411 		tpg->se_tpg_tfo->get_fabric_name());
1412 
1413 	return 0;
1414 }
1415 
1416 struct se_lun *core_get_lun_from_tpg(struct se_portal_group *tpg, u32 unpacked_lun)
1417 {
1418 	struct se_lun *lun;
1419 
1420 	spin_lock(&tpg->tpg_lun_lock);
1421 	if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
1422 		pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS"
1423 			"_PER_TPG-1: %u for Target Portal Group: %hu\n",
1424 			tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
1425 			TRANSPORT_MAX_LUNS_PER_TPG-1,
1426 			tpg->se_tpg_tfo->tpg_get_tag(tpg));
1427 		spin_unlock(&tpg->tpg_lun_lock);
1428 		return NULL;
1429 	}
1430 	lun = tpg->tpg_lun_list[unpacked_lun];
1431 
1432 	if (lun->lun_status != TRANSPORT_LUN_STATUS_FREE) {
1433 		pr_err("%s Logical Unit Number: %u is not free on"
1434 			" Target Portal Group: %hu, ignoring request.\n",
1435 			tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
1436 			tpg->se_tpg_tfo->tpg_get_tag(tpg));
1437 		spin_unlock(&tpg->tpg_lun_lock);
1438 		return NULL;
1439 	}
1440 	spin_unlock(&tpg->tpg_lun_lock);
1441 
1442 	return lun;
1443 }
1444 
1445 /*      core_dev_get_lun():
1446  *
1447  *
1448  */
1449 static struct se_lun *core_dev_get_lun(struct se_portal_group *tpg, u32 unpacked_lun)
1450 {
1451 	struct se_lun *lun;
1452 
1453 	spin_lock(&tpg->tpg_lun_lock);
1454 	if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
1455 		pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER"
1456 			"_TPG-1: %u for Target Portal Group: %hu\n",
1457 			tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
1458 			TRANSPORT_MAX_LUNS_PER_TPG-1,
1459 			tpg->se_tpg_tfo->tpg_get_tag(tpg));
1460 		spin_unlock(&tpg->tpg_lun_lock);
1461 		return NULL;
1462 	}
1463 	lun = tpg->tpg_lun_list[unpacked_lun];
1464 
1465 	if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) {
1466 		pr_err("%s Logical Unit Number: %u is not active on"
1467 			" Target Portal Group: %hu, ignoring request.\n",
1468 			tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
1469 			tpg->se_tpg_tfo->tpg_get_tag(tpg));
1470 		spin_unlock(&tpg->tpg_lun_lock);
1471 		return NULL;
1472 	}
1473 	spin_unlock(&tpg->tpg_lun_lock);
1474 
1475 	return lun;
1476 }
1477 
1478 struct se_lun_acl *core_dev_init_initiator_node_lun_acl(
1479 	struct se_portal_group *tpg,
1480 	u32 mapped_lun,
1481 	char *initiatorname,
1482 	int *ret)
1483 {
1484 	struct se_lun_acl *lacl;
1485 	struct se_node_acl *nacl;
1486 
1487 	if (strlen(initiatorname) >= TRANSPORT_IQN_LEN) {
1488 		pr_err("%s InitiatorName exceeds maximum size.\n",
1489 			tpg->se_tpg_tfo->get_fabric_name());
1490 		*ret = -EOVERFLOW;
1491 		return NULL;
1492 	}
1493 	nacl = core_tpg_get_initiator_node_acl(tpg, initiatorname);
1494 	if (!nacl) {
1495 		*ret = -EINVAL;
1496 		return NULL;
1497 	}
1498 	lacl = kzalloc(sizeof(struct se_lun_acl), GFP_KERNEL);
1499 	if (!lacl) {
1500 		pr_err("Unable to allocate memory for struct se_lun_acl.\n");
1501 		*ret = -ENOMEM;
1502 		return NULL;
1503 	}
1504 
1505 	INIT_LIST_HEAD(&lacl->lacl_list);
1506 	lacl->mapped_lun = mapped_lun;
1507 	lacl->se_lun_nacl = nacl;
1508 	snprintf(lacl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
1509 
1510 	return lacl;
1511 }
1512 
1513 int core_dev_add_initiator_node_lun_acl(
1514 	struct se_portal_group *tpg,
1515 	struct se_lun_acl *lacl,
1516 	u32 unpacked_lun,
1517 	u32 lun_access)
1518 {
1519 	struct se_lun *lun;
1520 	struct se_node_acl *nacl;
1521 
1522 	lun = core_dev_get_lun(tpg, unpacked_lun);
1523 	if (!lun) {
1524 		pr_err("%s Logical Unit Number: %u is not active on"
1525 			" Target Portal Group: %hu, ignoring request.\n",
1526 			tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
1527 			tpg->se_tpg_tfo->tpg_get_tag(tpg));
1528 		return -EINVAL;
1529 	}
1530 
1531 	nacl = lacl->se_lun_nacl;
1532 	if (!nacl)
1533 		return -EINVAL;
1534 
1535 	if ((lun->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) &&
1536 	    (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE))
1537 		lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
1538 
1539 	lacl->se_lun = lun;
1540 
1541 	if (core_update_device_list_for_node(lun, lacl, lacl->mapped_lun,
1542 			lun_access, nacl, tpg, 1) < 0)
1543 		return -EINVAL;
1544 
1545 	spin_lock(&lun->lun_acl_lock);
1546 	list_add_tail(&lacl->lacl_list, &lun->lun_acl_list);
1547 	atomic_inc(&lun->lun_acl_count);
1548 	smp_mb__after_atomic_inc();
1549 	spin_unlock(&lun->lun_acl_lock);
1550 
1551 	pr_debug("%s_TPG[%hu]_LUN[%u->%u] - Added %s ACL for "
1552 		" InitiatorNode: %s\n", tpg->se_tpg_tfo->get_fabric_name(),
1553 		tpg->se_tpg_tfo->tpg_get_tag(tpg), unpacked_lun, lacl->mapped_lun,
1554 		(lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) ? "RW" : "RO",
1555 		lacl->initiatorname);
1556 	/*
1557 	 * Check to see if there are any existing persistent reservation APTPL
1558 	 * pre-registrations that need to be enabled for this LUN ACL..
1559 	 */
1560 	core_scsi3_check_aptpl_registration(lun->lun_se_dev, tpg, lun, lacl);
1561 	return 0;
1562 }
1563 
1564 /*      core_dev_del_initiator_node_lun_acl():
1565  *
1566  *
1567  */
1568 int core_dev_del_initiator_node_lun_acl(
1569 	struct se_portal_group *tpg,
1570 	struct se_lun *lun,
1571 	struct se_lun_acl *lacl)
1572 {
1573 	struct se_node_acl *nacl;
1574 
1575 	nacl = lacl->se_lun_nacl;
1576 	if (!nacl)
1577 		return -EINVAL;
1578 
1579 	spin_lock(&lun->lun_acl_lock);
1580 	list_del(&lacl->lacl_list);
1581 	atomic_dec(&lun->lun_acl_count);
1582 	smp_mb__after_atomic_dec();
1583 	spin_unlock(&lun->lun_acl_lock);
1584 
1585 	core_update_device_list_for_node(lun, NULL, lacl->mapped_lun,
1586 		TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg, 0);
1587 
1588 	lacl->se_lun = NULL;
1589 
1590 	pr_debug("%s_TPG[%hu]_LUN[%u] - Removed ACL for"
1591 		" InitiatorNode: %s Mapped LUN: %u\n",
1592 		tpg->se_tpg_tfo->get_fabric_name(),
1593 		tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
1594 		lacl->initiatorname, lacl->mapped_lun);
1595 
1596 	return 0;
1597 }
1598 
1599 void core_dev_free_initiator_node_lun_acl(
1600 	struct se_portal_group *tpg,
1601 	struct se_lun_acl *lacl)
1602 {
1603 	pr_debug("%s_TPG[%hu] - Freeing ACL for %s InitiatorNode: %s"
1604 		" Mapped LUN: %u\n", tpg->se_tpg_tfo->get_fabric_name(),
1605 		tpg->se_tpg_tfo->tpg_get_tag(tpg),
1606 		tpg->se_tpg_tfo->get_fabric_name(),
1607 		lacl->initiatorname, lacl->mapped_lun);
1608 
1609 	kfree(lacl);
1610 }
1611 
1612 int core_dev_setup_virtual_lun0(void)
1613 {
1614 	struct se_hba *hba;
1615 	struct se_device *dev;
1616 	struct se_subsystem_dev *se_dev = NULL;
1617 	struct se_subsystem_api *t;
1618 	char buf[16];
1619 	int ret;
1620 
1621 	hba = core_alloc_hba("rd_mcp", 0, HBA_FLAGS_INTERNAL_USE);
1622 	if (IS_ERR(hba))
1623 		return PTR_ERR(hba);
1624 
1625 	lun0_hba = hba;
1626 	t = hba->transport;
1627 
1628 	se_dev = kzalloc(sizeof(struct se_subsystem_dev), GFP_KERNEL);
1629 	if (!se_dev) {
1630 		pr_err("Unable to allocate memory for"
1631 				" struct se_subsystem_dev\n");
1632 		ret = -ENOMEM;
1633 		goto out;
1634 	}
1635 	INIT_LIST_HEAD(&se_dev->t10_wwn.t10_vpd_list);
1636 	spin_lock_init(&se_dev->t10_wwn.t10_vpd_lock);
1637 	INIT_LIST_HEAD(&se_dev->t10_pr.registration_list);
1638 	INIT_LIST_HEAD(&se_dev->t10_pr.aptpl_reg_list);
1639 	spin_lock_init(&se_dev->t10_pr.registration_lock);
1640 	spin_lock_init(&se_dev->t10_pr.aptpl_reg_lock);
1641 	INIT_LIST_HEAD(&se_dev->t10_alua.tg_pt_gps_list);
1642 	spin_lock_init(&se_dev->t10_alua.tg_pt_gps_lock);
1643 	spin_lock_init(&se_dev->se_dev_lock);
1644 	se_dev->t10_pr.pr_aptpl_buf_len = PR_APTPL_BUF_LEN;
1645 	se_dev->t10_wwn.t10_sub_dev = se_dev;
1646 	se_dev->t10_alua.t10_sub_dev = se_dev;
1647 	se_dev->se_dev_attrib.da_sub_dev = se_dev;
1648 	se_dev->se_dev_hba = hba;
1649 
1650 	se_dev->se_dev_su_ptr = t->allocate_virtdevice(hba, "virt_lun0");
1651 	if (!se_dev->se_dev_su_ptr) {
1652 		pr_err("Unable to locate subsystem dependent pointer"
1653 			" from allocate_virtdevice()\n");
1654 		ret = -ENOMEM;
1655 		goto out;
1656 	}
1657 	lun0_su_dev = se_dev;
1658 
1659 	memset(buf, 0, 16);
1660 	sprintf(buf, "rd_pages=8");
1661 	t->set_configfs_dev_params(hba, se_dev, buf, sizeof(buf));
1662 
1663 	dev = t->create_virtdevice(hba, se_dev, se_dev->se_dev_su_ptr);
1664 	if (IS_ERR(dev)) {
1665 		ret = PTR_ERR(dev);
1666 		goto out;
1667 	}
1668 	se_dev->se_dev_ptr = dev;
1669 	g_lun0_dev = dev;
1670 
1671 	return 0;
1672 out:
1673 	lun0_su_dev = NULL;
1674 	kfree(se_dev);
1675 	if (lun0_hba) {
1676 		core_delete_hba(lun0_hba);
1677 		lun0_hba = NULL;
1678 	}
1679 	return ret;
1680 }
1681 
1682 
1683 void core_dev_release_virtual_lun0(void)
1684 {
1685 	struct se_hba *hba = lun0_hba;
1686 	struct se_subsystem_dev *su_dev = lun0_su_dev;
1687 
1688 	if (!hba)
1689 		return;
1690 
1691 	if (g_lun0_dev)
1692 		se_free_virtual_device(g_lun0_dev, hba);
1693 
1694 	kfree(su_dev);
1695 	core_delete_hba(hba);
1696 }
1697