1 /*******************************************************************************
2  * Filename:  target_core_tpg.c
3  *
4  * This file contains generic Target Portal Group related functions.
5  *
6  * (c) Copyright 2002-2012 RisingTide Systems LLC.
7  *
8  * Nicholas A. Bellinger <nab@kernel.org>
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License as published by
12  * the Free Software Foundation; either version 2 of the License, or
13  * (at your option) any later version.
14  *
15  * This program is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18  * GNU General Public License for more details.
19  *
20  * You should have received a copy of the GNU General Public License
21  * along with this program; if not, write to the Free Software
22  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
23  *
24  ******************************************************************************/
25 
26 #include <linux/net.h>
27 #include <linux/string.h>
28 #include <linux/timer.h>
29 #include <linux/slab.h>
30 #include <linux/spinlock.h>
31 #include <linux/in.h>
32 #include <linux/export.h>
33 #include <net/sock.h>
34 #include <net/tcp.h>
35 #include <scsi/scsi.h>
36 #include <scsi/scsi_cmnd.h>
37 
38 #include <target/target_core_base.h>
39 #include <target/target_core_backend.h>
40 #include <target/target_core_fabric.h>
41 
42 #include "target_core_internal.h"
43 
44 extern struct se_device *g_lun0_dev;
45 
46 static DEFINE_SPINLOCK(tpg_lock);
47 static LIST_HEAD(tpg_list);
48 
49 /*	core_clear_initiator_node_from_tpg():
50  *
51  *
52  */
53 static void core_clear_initiator_node_from_tpg(
54 	struct se_node_acl *nacl,
55 	struct se_portal_group *tpg)
56 {
57 	int i;
58 	struct se_dev_entry *deve;
59 	struct se_lun *lun;
60 
61 	spin_lock_irq(&nacl->device_list_lock);
62 	for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
63 		deve = nacl->device_list[i];
64 
65 		if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS))
66 			continue;
67 
68 		if (!deve->se_lun) {
69 			pr_err("%s device entries device pointer is"
70 				" NULL, but Initiator has access.\n",
71 				tpg->se_tpg_tfo->get_fabric_name());
72 			continue;
73 		}
74 
75 		lun = deve->se_lun;
76 		spin_unlock_irq(&nacl->device_list_lock);
77 		core_disable_device_list_for_node(lun, NULL, deve->mapped_lun,
78 			TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg);
79 
80 		spin_lock_irq(&nacl->device_list_lock);
81 	}
82 	spin_unlock_irq(&nacl->device_list_lock);
83 }
84 
85 /*	__core_tpg_get_initiator_node_acl():
86  *
87  *	spin_lock_bh(&tpg->acl_node_lock); must be held when calling
88  */
89 struct se_node_acl *__core_tpg_get_initiator_node_acl(
90 	struct se_portal_group *tpg,
91 	const char *initiatorname)
92 {
93 	struct se_node_acl *acl;
94 
95 	list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
96 		if (!strcmp(acl->initiatorname, initiatorname))
97 			return acl;
98 	}
99 
100 	return NULL;
101 }
102 
103 /*	core_tpg_get_initiator_node_acl():
104  *
105  *
106  */
107 struct se_node_acl *core_tpg_get_initiator_node_acl(
108 	struct se_portal_group *tpg,
109 	unsigned char *initiatorname)
110 {
111 	struct se_node_acl *acl;
112 
113 	spin_lock_irq(&tpg->acl_node_lock);
114 	list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
115 		if (!strcmp(acl->initiatorname, initiatorname) &&
116 		    !acl->dynamic_node_acl) {
117 			spin_unlock_irq(&tpg->acl_node_lock);
118 			return acl;
119 		}
120 	}
121 	spin_unlock_irq(&tpg->acl_node_lock);
122 
123 	return NULL;
124 }
125 
126 /*	core_tpg_add_node_to_devs():
127  *
128  *
129  */
130 void core_tpg_add_node_to_devs(
131 	struct se_node_acl *acl,
132 	struct se_portal_group *tpg)
133 {
134 	int i = 0;
135 	u32 lun_access = 0;
136 	struct se_lun *lun;
137 	struct se_device *dev;
138 
139 	spin_lock(&tpg->tpg_lun_lock);
140 	for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
141 		lun = tpg->tpg_lun_list[i];
142 		if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE)
143 			continue;
144 
145 		spin_unlock(&tpg->tpg_lun_lock);
146 
147 		dev = lun->lun_se_dev;
148 		/*
149 		 * By default in LIO-Target $FABRIC_MOD,
150 		 * demo_mode_write_protect is ON, or READ_ONLY;
151 		 */
152 		if (!tpg->se_tpg_tfo->tpg_check_demo_mode_write_protect(tpg)) {
153 			lun_access = TRANSPORT_LUNFLAGS_READ_WRITE;
154 		} else {
155 			/*
156 			 * Allow only optical drives to issue R/W in default RO
157 			 * demo mode.
158 			 */
159 			if (dev->transport->get_device_type(dev) == TYPE_DISK)
160 				lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
161 			else
162 				lun_access = TRANSPORT_LUNFLAGS_READ_WRITE;
163 		}
164 
165 		pr_debug("TARGET_CORE[%s]->TPG[%u]_LUN[%u] - Adding %s"
166 			" access for LUN in Demo Mode\n",
167 			tpg->se_tpg_tfo->get_fabric_name(),
168 			tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
169 			(lun_access == TRANSPORT_LUNFLAGS_READ_WRITE) ?
170 			"READ-WRITE" : "READ-ONLY");
171 
172 		core_enable_device_list_for_node(lun, NULL, lun->unpacked_lun,
173 				lun_access, acl, tpg);
174 		spin_lock(&tpg->tpg_lun_lock);
175 	}
176 	spin_unlock(&tpg->tpg_lun_lock);
177 }
178 
179 /*      core_set_queue_depth_for_node():
180  *
181  *
182  */
183 static int core_set_queue_depth_for_node(
184 	struct se_portal_group *tpg,
185 	struct se_node_acl *acl)
186 {
187 	if (!acl->queue_depth) {
188 		pr_err("Queue depth for %s Initiator Node: %s is 0,"
189 			"defaulting to 1.\n", tpg->se_tpg_tfo->get_fabric_name(),
190 			acl->initiatorname);
191 		acl->queue_depth = 1;
192 	}
193 
194 	return 0;
195 }
196 
197 void array_free(void *array, int n)
198 {
199 	void **a = array;
200 	int i;
201 
202 	for (i = 0; i < n; i++)
203 		kfree(a[i]);
204 	kfree(a);
205 }
206 
207 static void *array_zalloc(int n, size_t size, gfp_t flags)
208 {
209 	void **a;
210 	int i;
211 
212 	a = kzalloc(n * sizeof(void*), flags);
213 	if (!a)
214 		return NULL;
215 	for (i = 0; i < n; i++) {
216 		a[i] = kzalloc(size, flags);
217 		if (!a[i]) {
218 			array_free(a, n);
219 			return NULL;
220 		}
221 	}
222 	return a;
223 }
224 
225 /*      core_create_device_list_for_node():
226  *
227  *
228  */
229 static int core_create_device_list_for_node(struct se_node_acl *nacl)
230 {
231 	struct se_dev_entry *deve;
232 	int i;
233 
234 	nacl->device_list = array_zalloc(TRANSPORT_MAX_LUNS_PER_TPG,
235 			sizeof(struct se_dev_entry), GFP_KERNEL);
236 	if (!nacl->device_list) {
237 		pr_err("Unable to allocate memory for"
238 			" struct se_node_acl->device_list\n");
239 		return -ENOMEM;
240 	}
241 	for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
242 		deve = nacl->device_list[i];
243 
244 		atomic_set(&deve->ua_count, 0);
245 		atomic_set(&deve->pr_ref_count, 0);
246 		spin_lock_init(&deve->ua_lock);
247 		INIT_LIST_HEAD(&deve->alua_port_list);
248 		INIT_LIST_HEAD(&deve->ua_list);
249 	}
250 
251 	return 0;
252 }
253 
254 /*	core_tpg_check_initiator_node_acl()
255  *
256  *
257  */
258 struct se_node_acl *core_tpg_check_initiator_node_acl(
259 	struct se_portal_group *tpg,
260 	unsigned char *initiatorname)
261 {
262 	struct se_node_acl *acl;
263 
264 	acl = core_tpg_get_initiator_node_acl(tpg, initiatorname);
265 	if (acl)
266 		return acl;
267 
268 	if (!tpg->se_tpg_tfo->tpg_check_demo_mode(tpg))
269 		return NULL;
270 
271 	acl =  tpg->se_tpg_tfo->tpg_alloc_fabric_acl(tpg);
272 	if (!acl)
273 		return NULL;
274 
275 	INIT_LIST_HEAD(&acl->acl_list);
276 	INIT_LIST_HEAD(&acl->acl_sess_list);
277 	kref_init(&acl->acl_kref);
278 	init_completion(&acl->acl_free_comp);
279 	spin_lock_init(&acl->device_list_lock);
280 	spin_lock_init(&acl->nacl_sess_lock);
281 	atomic_set(&acl->acl_pr_ref_count, 0);
282 	acl->queue_depth = tpg->se_tpg_tfo->tpg_get_default_depth(tpg);
283 	snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
284 	acl->se_tpg = tpg;
285 	acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX);
286 	spin_lock_init(&acl->stats_lock);
287 	acl->dynamic_node_acl = 1;
288 
289 	tpg->se_tpg_tfo->set_default_node_attributes(acl);
290 
291 	if (core_create_device_list_for_node(acl) < 0) {
292 		tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, acl);
293 		return NULL;
294 	}
295 
296 	if (core_set_queue_depth_for_node(tpg, acl) < 0) {
297 		core_free_device_list_for_node(acl, tpg);
298 		tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, acl);
299 		return NULL;
300 	}
301 	/*
302 	 * Here we only create demo-mode MappedLUNs from the active
303 	 * TPG LUNs if the fabric is not explicitly asking for
304 	 * tpg_check_demo_mode_login_only() == 1.
305 	 */
306 	if ((tpg->se_tpg_tfo->tpg_check_demo_mode_login_only == NULL) ||
307 	    (tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg) != 1))
308 		core_tpg_add_node_to_devs(acl, tpg);
309 
310 	spin_lock_irq(&tpg->acl_node_lock);
311 	list_add_tail(&acl->acl_list, &tpg->acl_node_list);
312 	tpg->num_node_acls++;
313 	spin_unlock_irq(&tpg->acl_node_lock);
314 
315 	pr_debug("%s_TPG[%u] - Added DYNAMIC ACL with TCQ Depth: %d for %s"
316 		" Initiator Node: %s\n", tpg->se_tpg_tfo->get_fabric_name(),
317 		tpg->se_tpg_tfo->tpg_get_tag(tpg), acl->queue_depth,
318 		tpg->se_tpg_tfo->get_fabric_name(), initiatorname);
319 
320 	return acl;
321 }
322 EXPORT_SYMBOL(core_tpg_check_initiator_node_acl);
323 
324 void core_tpg_wait_for_nacl_pr_ref(struct se_node_acl *nacl)
325 {
326 	while (atomic_read(&nacl->acl_pr_ref_count) != 0)
327 		cpu_relax();
328 }
329 
330 void core_tpg_clear_object_luns(struct se_portal_group *tpg)
331 {
332 	int i;
333 	struct se_lun *lun;
334 
335 	spin_lock(&tpg->tpg_lun_lock);
336 	for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
337 		lun = tpg->tpg_lun_list[i];
338 
339 		if ((lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) ||
340 		    (lun->lun_se_dev == NULL))
341 			continue;
342 
343 		spin_unlock(&tpg->tpg_lun_lock);
344 		core_dev_del_lun(tpg, lun->unpacked_lun);
345 		spin_lock(&tpg->tpg_lun_lock);
346 	}
347 	spin_unlock(&tpg->tpg_lun_lock);
348 }
349 EXPORT_SYMBOL(core_tpg_clear_object_luns);
350 
351 /*	core_tpg_add_initiator_node_acl():
352  *
353  *
354  */
355 struct se_node_acl *core_tpg_add_initiator_node_acl(
356 	struct se_portal_group *tpg,
357 	struct se_node_acl *se_nacl,
358 	const char *initiatorname,
359 	u32 queue_depth)
360 {
361 	struct se_node_acl *acl = NULL;
362 
363 	spin_lock_irq(&tpg->acl_node_lock);
364 	acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
365 	if (acl) {
366 		if (acl->dynamic_node_acl) {
367 			acl->dynamic_node_acl = 0;
368 			pr_debug("%s_TPG[%u] - Replacing dynamic ACL"
369 				" for %s\n", tpg->se_tpg_tfo->get_fabric_name(),
370 				tpg->se_tpg_tfo->tpg_get_tag(tpg), initiatorname);
371 			spin_unlock_irq(&tpg->acl_node_lock);
372 			/*
373 			 * Release the locally allocated struct se_node_acl
374 			 * because * core_tpg_add_initiator_node_acl() returned
375 			 * a pointer to an existing demo mode node ACL.
376 			 */
377 			if (se_nacl)
378 				tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg,
379 							se_nacl);
380 			goto done;
381 		}
382 
383 		pr_err("ACL entry for %s Initiator"
384 			" Node %s already exists for TPG %u, ignoring"
385 			" request.\n",  tpg->se_tpg_tfo->get_fabric_name(),
386 			initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg));
387 		spin_unlock_irq(&tpg->acl_node_lock);
388 		return ERR_PTR(-EEXIST);
389 	}
390 	spin_unlock_irq(&tpg->acl_node_lock);
391 
392 	if (!se_nacl) {
393 		pr_err("struct se_node_acl pointer is NULL\n");
394 		return ERR_PTR(-EINVAL);
395 	}
396 	/*
397 	 * For v4.x logic the se_node_acl_s is hanging off a fabric
398 	 * dependent structure allocated via
399 	 * struct target_core_fabric_ops->fabric_make_nodeacl()
400 	 */
401 	acl = se_nacl;
402 
403 	INIT_LIST_HEAD(&acl->acl_list);
404 	INIT_LIST_HEAD(&acl->acl_sess_list);
405 	kref_init(&acl->acl_kref);
406 	init_completion(&acl->acl_free_comp);
407 	spin_lock_init(&acl->device_list_lock);
408 	spin_lock_init(&acl->nacl_sess_lock);
409 	atomic_set(&acl->acl_pr_ref_count, 0);
410 	acl->queue_depth = queue_depth;
411 	snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
412 	acl->se_tpg = tpg;
413 	acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX);
414 	spin_lock_init(&acl->stats_lock);
415 
416 	tpg->se_tpg_tfo->set_default_node_attributes(acl);
417 
418 	if (core_create_device_list_for_node(acl) < 0) {
419 		tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, acl);
420 		return ERR_PTR(-ENOMEM);
421 	}
422 
423 	if (core_set_queue_depth_for_node(tpg, acl) < 0) {
424 		core_free_device_list_for_node(acl, tpg);
425 		tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, acl);
426 		return ERR_PTR(-EINVAL);
427 	}
428 
429 	spin_lock_irq(&tpg->acl_node_lock);
430 	list_add_tail(&acl->acl_list, &tpg->acl_node_list);
431 	tpg->num_node_acls++;
432 	spin_unlock_irq(&tpg->acl_node_lock);
433 
434 done:
435 	pr_debug("%s_TPG[%hu] - Added ACL with TCQ Depth: %d for %s"
436 		" Initiator Node: %s\n", tpg->se_tpg_tfo->get_fabric_name(),
437 		tpg->se_tpg_tfo->tpg_get_tag(tpg), acl->queue_depth,
438 		tpg->se_tpg_tfo->get_fabric_name(), initiatorname);
439 
440 	return acl;
441 }
442 EXPORT_SYMBOL(core_tpg_add_initiator_node_acl);
443 
444 /*	core_tpg_del_initiator_node_acl():
445  *
446  *
447  */
448 int core_tpg_del_initiator_node_acl(
449 	struct se_portal_group *tpg,
450 	struct se_node_acl *acl,
451 	int force)
452 {
453 	LIST_HEAD(sess_list);
454 	struct se_session *sess, *sess_tmp;
455 	unsigned long flags;
456 	int rc;
457 
458 	spin_lock_irq(&tpg->acl_node_lock);
459 	if (acl->dynamic_node_acl) {
460 		acl->dynamic_node_acl = 0;
461 	}
462 	list_del(&acl->acl_list);
463 	tpg->num_node_acls--;
464 	spin_unlock_irq(&tpg->acl_node_lock);
465 
466 	spin_lock_irqsave(&acl->nacl_sess_lock, flags);
467 	acl->acl_stop = 1;
468 
469 	list_for_each_entry_safe(sess, sess_tmp, &acl->acl_sess_list,
470 				sess_acl_list) {
471 		if (sess->sess_tearing_down != 0)
472 			continue;
473 
474 		target_get_session(sess);
475 		list_move(&sess->sess_acl_list, &sess_list);
476 	}
477 	spin_unlock_irqrestore(&acl->nacl_sess_lock, flags);
478 
479 	list_for_each_entry_safe(sess, sess_tmp, &sess_list, sess_acl_list) {
480 		list_del(&sess->sess_acl_list);
481 
482 		rc = tpg->se_tpg_tfo->shutdown_session(sess);
483 		target_put_session(sess);
484 		if (!rc)
485 			continue;
486 		target_put_session(sess);
487 	}
488 	target_put_nacl(acl);
489 	/*
490 	 * Wait for last target_put_nacl() to complete in target_complete_nacl()
491 	 * for active fabric session transport_deregister_session() callbacks.
492 	 */
493 	wait_for_completion(&acl->acl_free_comp);
494 
495 	core_tpg_wait_for_nacl_pr_ref(acl);
496 	core_clear_initiator_node_from_tpg(acl, tpg);
497 	core_free_device_list_for_node(acl, tpg);
498 
499 	pr_debug("%s_TPG[%hu] - Deleted ACL with TCQ Depth: %d for %s"
500 		" Initiator Node: %s\n", tpg->se_tpg_tfo->get_fabric_name(),
501 		tpg->se_tpg_tfo->tpg_get_tag(tpg), acl->queue_depth,
502 		tpg->se_tpg_tfo->get_fabric_name(), acl->initiatorname);
503 
504 	return 0;
505 }
506 EXPORT_SYMBOL(core_tpg_del_initiator_node_acl);
507 
508 /*	core_tpg_set_initiator_node_queue_depth():
509  *
510  *
511  */
512 int core_tpg_set_initiator_node_queue_depth(
513 	struct se_portal_group *tpg,
514 	unsigned char *initiatorname,
515 	u32 queue_depth,
516 	int force)
517 {
518 	struct se_session *sess, *init_sess = NULL;
519 	struct se_node_acl *acl;
520 	unsigned long flags;
521 	int dynamic_acl = 0;
522 
523 	spin_lock_irq(&tpg->acl_node_lock);
524 	acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
525 	if (!acl) {
526 		pr_err("Access Control List entry for %s Initiator"
527 			" Node %s does not exists for TPG %hu, ignoring"
528 			" request.\n", tpg->se_tpg_tfo->get_fabric_name(),
529 			initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg));
530 		spin_unlock_irq(&tpg->acl_node_lock);
531 		return -ENODEV;
532 	}
533 	if (acl->dynamic_node_acl) {
534 		acl->dynamic_node_acl = 0;
535 		dynamic_acl = 1;
536 	}
537 	spin_unlock_irq(&tpg->acl_node_lock);
538 
539 	spin_lock_irqsave(&tpg->session_lock, flags);
540 	list_for_each_entry(sess, &tpg->tpg_sess_list, sess_list) {
541 		if (sess->se_node_acl != acl)
542 			continue;
543 
544 		if (!force) {
545 			pr_err("Unable to change queue depth for %s"
546 				" Initiator Node: %s while session is"
547 				" operational.  To forcefully change the queue"
548 				" depth and force session reinstatement"
549 				" use the \"force=1\" parameter.\n",
550 				tpg->se_tpg_tfo->get_fabric_name(), initiatorname);
551 			spin_unlock_irqrestore(&tpg->session_lock, flags);
552 
553 			spin_lock_irq(&tpg->acl_node_lock);
554 			if (dynamic_acl)
555 				acl->dynamic_node_acl = 1;
556 			spin_unlock_irq(&tpg->acl_node_lock);
557 			return -EEXIST;
558 		}
559 		/*
560 		 * Determine if the session needs to be closed by our context.
561 		 */
562 		if (!tpg->se_tpg_tfo->shutdown_session(sess))
563 			continue;
564 
565 		init_sess = sess;
566 		break;
567 	}
568 
569 	/*
570 	 * User has requested to change the queue depth for a Initiator Node.
571 	 * Change the value in the Node's struct se_node_acl, and call
572 	 * core_set_queue_depth_for_node() to add the requested queue depth.
573 	 *
574 	 * Finally call  tpg->se_tpg_tfo->close_session() to force session
575 	 * reinstatement to occur if there is an active session for the
576 	 * $FABRIC_MOD Initiator Node in question.
577 	 */
578 	acl->queue_depth = queue_depth;
579 
580 	if (core_set_queue_depth_for_node(tpg, acl) < 0) {
581 		spin_unlock_irqrestore(&tpg->session_lock, flags);
582 		/*
583 		 * Force session reinstatement if
584 		 * core_set_queue_depth_for_node() failed, because we assume
585 		 * the $FABRIC_MOD has already the set session reinstatement
586 		 * bit from tpg->se_tpg_tfo->shutdown_session() called above.
587 		 */
588 		if (init_sess)
589 			tpg->se_tpg_tfo->close_session(init_sess);
590 
591 		spin_lock_irq(&tpg->acl_node_lock);
592 		if (dynamic_acl)
593 			acl->dynamic_node_acl = 1;
594 		spin_unlock_irq(&tpg->acl_node_lock);
595 		return -EINVAL;
596 	}
597 	spin_unlock_irqrestore(&tpg->session_lock, flags);
598 	/*
599 	 * If the $FABRIC_MOD session for the Initiator Node ACL exists,
600 	 * forcefully shutdown the $FABRIC_MOD session/nexus.
601 	 */
602 	if (init_sess)
603 		tpg->se_tpg_tfo->close_session(init_sess);
604 
605 	pr_debug("Successfully changed queue depth to: %d for Initiator"
606 		" Node: %s on %s Target Portal Group: %u\n", queue_depth,
607 		initiatorname, tpg->se_tpg_tfo->get_fabric_name(),
608 		tpg->se_tpg_tfo->tpg_get_tag(tpg));
609 
610 	spin_lock_irq(&tpg->acl_node_lock);
611 	if (dynamic_acl)
612 		acl->dynamic_node_acl = 1;
613 	spin_unlock_irq(&tpg->acl_node_lock);
614 
615 	return 0;
616 }
617 EXPORT_SYMBOL(core_tpg_set_initiator_node_queue_depth);
618 
619 /*	core_tpg_set_initiator_node_tag():
620  *
621  *	Initiator nodeacl tags are not used internally, but may be used by
622  *	userspace to emulate aliases or groups.
623  *	Returns length of newly-set tag or -EINVAL.
624  */
625 int core_tpg_set_initiator_node_tag(
626 	struct se_portal_group *tpg,
627 	struct se_node_acl *acl,
628 	const char *new_tag)
629 {
630 	if (strlen(new_tag) >= MAX_ACL_TAG_SIZE)
631 		return -EINVAL;
632 
633 	if (!strncmp("NULL", new_tag, 4)) {
634 		acl->acl_tag[0] = '\0';
635 		return 0;
636 	}
637 
638 	return snprintf(acl->acl_tag, MAX_ACL_TAG_SIZE, "%s", new_tag);
639 }
640 EXPORT_SYMBOL(core_tpg_set_initiator_node_tag);
641 
642 static int core_tpg_setup_virtual_lun0(struct se_portal_group *se_tpg)
643 {
644 	/* Set in core_dev_setup_virtual_lun0() */
645 	struct se_device *dev = g_lun0_dev;
646 	struct se_lun *lun = &se_tpg->tpg_virt_lun0;
647 	u32 lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
648 	int ret;
649 
650 	lun->unpacked_lun = 0;
651 	lun->lun_status = TRANSPORT_LUN_STATUS_FREE;
652 	atomic_set(&lun->lun_acl_count, 0);
653 	init_completion(&lun->lun_shutdown_comp);
654 	INIT_LIST_HEAD(&lun->lun_acl_list);
655 	INIT_LIST_HEAD(&lun->lun_cmd_list);
656 	spin_lock_init(&lun->lun_acl_lock);
657 	spin_lock_init(&lun->lun_cmd_lock);
658 	spin_lock_init(&lun->lun_sep_lock);
659 
660 	ret = core_tpg_post_addlun(se_tpg, lun, lun_access, dev);
661 	if (ret < 0)
662 		return ret;
663 
664 	return 0;
665 }
666 
667 static void core_tpg_release_virtual_lun0(struct se_portal_group *se_tpg)
668 {
669 	struct se_lun *lun = &se_tpg->tpg_virt_lun0;
670 
671 	core_tpg_post_dellun(se_tpg, lun);
672 }
673 
674 int core_tpg_register(
675 	struct target_core_fabric_ops *tfo,
676 	struct se_wwn *se_wwn,
677 	struct se_portal_group *se_tpg,
678 	void *tpg_fabric_ptr,
679 	int se_tpg_type)
680 {
681 	struct se_lun *lun;
682 	u32 i;
683 
684 	se_tpg->tpg_lun_list = array_zalloc(TRANSPORT_MAX_LUNS_PER_TPG,
685 			sizeof(struct se_lun), GFP_KERNEL);
686 	if (!se_tpg->tpg_lun_list) {
687 		pr_err("Unable to allocate struct se_portal_group->"
688 				"tpg_lun_list\n");
689 		return -ENOMEM;
690 	}
691 
692 	for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
693 		lun = se_tpg->tpg_lun_list[i];
694 		lun->unpacked_lun = i;
695 		lun->lun_link_magic = SE_LUN_LINK_MAGIC;
696 		lun->lun_status = TRANSPORT_LUN_STATUS_FREE;
697 		atomic_set(&lun->lun_acl_count, 0);
698 		init_completion(&lun->lun_shutdown_comp);
699 		INIT_LIST_HEAD(&lun->lun_acl_list);
700 		INIT_LIST_HEAD(&lun->lun_cmd_list);
701 		spin_lock_init(&lun->lun_acl_lock);
702 		spin_lock_init(&lun->lun_cmd_lock);
703 		spin_lock_init(&lun->lun_sep_lock);
704 	}
705 
706 	se_tpg->se_tpg_type = se_tpg_type;
707 	se_tpg->se_tpg_fabric_ptr = tpg_fabric_ptr;
708 	se_tpg->se_tpg_tfo = tfo;
709 	se_tpg->se_tpg_wwn = se_wwn;
710 	atomic_set(&se_tpg->tpg_pr_ref_count, 0);
711 	INIT_LIST_HEAD(&se_tpg->acl_node_list);
712 	INIT_LIST_HEAD(&se_tpg->se_tpg_node);
713 	INIT_LIST_HEAD(&se_tpg->tpg_sess_list);
714 	spin_lock_init(&se_tpg->acl_node_lock);
715 	spin_lock_init(&se_tpg->session_lock);
716 	spin_lock_init(&se_tpg->tpg_lun_lock);
717 
718 	if (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) {
719 		if (core_tpg_setup_virtual_lun0(se_tpg) < 0) {
720 			kfree(se_tpg);
721 			return -ENOMEM;
722 		}
723 	}
724 
725 	spin_lock_bh(&tpg_lock);
726 	list_add_tail(&se_tpg->se_tpg_node, &tpg_list);
727 	spin_unlock_bh(&tpg_lock);
728 
729 	pr_debug("TARGET_CORE[%s]: Allocated %s struct se_portal_group for"
730 		" endpoint: %s, Portal Tag: %u\n", tfo->get_fabric_name(),
731 		(se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) ?
732 		"Normal" : "Discovery", (tfo->tpg_get_wwn(se_tpg) == NULL) ?
733 		"None" : tfo->tpg_get_wwn(se_tpg), tfo->tpg_get_tag(se_tpg));
734 
735 	return 0;
736 }
737 EXPORT_SYMBOL(core_tpg_register);
738 
739 int core_tpg_deregister(struct se_portal_group *se_tpg)
740 {
741 	struct se_node_acl *nacl, *nacl_tmp;
742 
743 	pr_debug("TARGET_CORE[%s]: Deallocating %s struct se_portal_group"
744 		" for endpoint: %s Portal Tag %u\n",
745 		(se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) ?
746 		"Normal" : "Discovery", se_tpg->se_tpg_tfo->get_fabric_name(),
747 		se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg),
748 		se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg));
749 
750 	spin_lock_bh(&tpg_lock);
751 	list_del(&se_tpg->se_tpg_node);
752 	spin_unlock_bh(&tpg_lock);
753 
754 	while (atomic_read(&se_tpg->tpg_pr_ref_count) != 0)
755 		cpu_relax();
756 	/*
757 	 * Release any remaining demo-mode generated se_node_acl that have
758 	 * not been released because of TFO->tpg_check_demo_mode_cache() == 1
759 	 * in transport_deregister_session().
760 	 */
761 	spin_lock_irq(&se_tpg->acl_node_lock);
762 	list_for_each_entry_safe(nacl, nacl_tmp, &se_tpg->acl_node_list,
763 			acl_list) {
764 		list_del(&nacl->acl_list);
765 		se_tpg->num_node_acls--;
766 		spin_unlock_irq(&se_tpg->acl_node_lock);
767 
768 		core_tpg_wait_for_nacl_pr_ref(nacl);
769 		core_free_device_list_for_node(nacl, se_tpg);
770 		se_tpg->se_tpg_tfo->tpg_release_fabric_acl(se_tpg, nacl);
771 
772 		spin_lock_irq(&se_tpg->acl_node_lock);
773 	}
774 	spin_unlock_irq(&se_tpg->acl_node_lock);
775 
776 	if (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL)
777 		core_tpg_release_virtual_lun0(se_tpg);
778 
779 	se_tpg->se_tpg_fabric_ptr = NULL;
780 	array_free(se_tpg->tpg_lun_list, TRANSPORT_MAX_LUNS_PER_TPG);
781 	return 0;
782 }
783 EXPORT_SYMBOL(core_tpg_deregister);
784 
785 struct se_lun *core_tpg_pre_addlun(
786 	struct se_portal_group *tpg,
787 	u32 unpacked_lun)
788 {
789 	struct se_lun *lun;
790 
791 	if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
792 		pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER_TPG"
793 			"-1: %u for Target Portal Group: %u\n",
794 			tpg->se_tpg_tfo->get_fabric_name(),
795 			unpacked_lun, TRANSPORT_MAX_LUNS_PER_TPG-1,
796 			tpg->se_tpg_tfo->tpg_get_tag(tpg));
797 		return ERR_PTR(-EOVERFLOW);
798 	}
799 
800 	spin_lock(&tpg->tpg_lun_lock);
801 	lun = tpg->tpg_lun_list[unpacked_lun];
802 	if (lun->lun_status == TRANSPORT_LUN_STATUS_ACTIVE) {
803 		pr_err("TPG Logical Unit Number: %u is already active"
804 			" on %s Target Portal Group: %u, ignoring request.\n",
805 			unpacked_lun, tpg->se_tpg_tfo->get_fabric_name(),
806 			tpg->se_tpg_tfo->tpg_get_tag(tpg));
807 		spin_unlock(&tpg->tpg_lun_lock);
808 		return ERR_PTR(-EINVAL);
809 	}
810 	spin_unlock(&tpg->tpg_lun_lock);
811 
812 	return lun;
813 }
814 
815 int core_tpg_post_addlun(
816 	struct se_portal_group *tpg,
817 	struct se_lun *lun,
818 	u32 lun_access,
819 	void *lun_ptr)
820 {
821 	int ret;
822 
823 	ret = core_dev_export(lun_ptr, tpg, lun);
824 	if (ret < 0)
825 		return ret;
826 
827 	spin_lock(&tpg->tpg_lun_lock);
828 	lun->lun_access = lun_access;
829 	lun->lun_status = TRANSPORT_LUN_STATUS_ACTIVE;
830 	spin_unlock(&tpg->tpg_lun_lock);
831 
832 	return 0;
833 }
834 
835 static void core_tpg_shutdown_lun(
836 	struct se_portal_group *tpg,
837 	struct se_lun *lun)
838 {
839 	core_clear_lun_from_tpg(lun, tpg);
840 	transport_clear_lun_from_sessions(lun);
841 }
842 
843 struct se_lun *core_tpg_pre_dellun(
844 	struct se_portal_group *tpg,
845 	u32 unpacked_lun)
846 {
847 	struct se_lun *lun;
848 
849 	if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
850 		pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER_TPG"
851 			"-1: %u for Target Portal Group: %u\n",
852 			tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
853 			TRANSPORT_MAX_LUNS_PER_TPG-1,
854 			tpg->se_tpg_tfo->tpg_get_tag(tpg));
855 		return ERR_PTR(-EOVERFLOW);
856 	}
857 
858 	spin_lock(&tpg->tpg_lun_lock);
859 	lun = tpg->tpg_lun_list[unpacked_lun];
860 	if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) {
861 		pr_err("%s Logical Unit Number: %u is not active on"
862 			" Target Portal Group: %u, ignoring request.\n",
863 			tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
864 			tpg->se_tpg_tfo->tpg_get_tag(tpg));
865 		spin_unlock(&tpg->tpg_lun_lock);
866 		return ERR_PTR(-ENODEV);
867 	}
868 	spin_unlock(&tpg->tpg_lun_lock);
869 
870 	return lun;
871 }
872 
873 int core_tpg_post_dellun(
874 	struct se_portal_group *tpg,
875 	struct se_lun *lun)
876 {
877 	core_tpg_shutdown_lun(tpg, lun);
878 
879 	core_dev_unexport(lun->lun_se_dev, tpg, lun);
880 
881 	spin_lock(&tpg->tpg_lun_lock);
882 	lun->lun_status = TRANSPORT_LUN_STATUS_FREE;
883 	spin_unlock(&tpg->tpg_lun_lock);
884 
885 	return 0;
886 }
887