1 /*******************************************************************************
2  * Filename:  target_core_tpg.c
3  *
4  * This file contains generic Target Portal Group related functions.
5  *
6  * (c) Copyright 2002-2013 Datera, Inc.
7  *
8  * Nicholas A. Bellinger <nab@kernel.org>
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License as published by
12  * the Free Software Foundation; either version 2 of the License, or
13  * (at your option) any later version.
14  *
15  * This program is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18  * GNU General Public License for more details.
19  *
20  * You should have received a copy of the GNU General Public License
21  * along with this program; if not, write to the Free Software
22  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
23  *
24  ******************************************************************************/
25 
26 #include <linux/net.h>
27 #include <linux/string.h>
28 #include <linux/timer.h>
29 #include <linux/slab.h>
30 #include <linux/spinlock.h>
31 #include <linux/in.h>
32 #include <linux/export.h>
33 #include <net/sock.h>
34 #include <net/tcp.h>
35 #include <scsi/scsi.h>
36 #include <scsi/scsi_cmnd.h>
37 
38 #include <target/target_core_base.h>
39 #include <target/target_core_backend.h>
40 #include <target/target_core_configfs.h>
41 #include <target/target_core_fabric.h>
42 
43 #include "target_core_internal.h"
44 #include "target_core_alua.h"
45 #include "target_core_pr.h"
46 
47 extern struct se_device *g_lun0_dev;
48 
49 static DEFINE_SPINLOCK(tpg_lock);
50 static LIST_HEAD(tpg_list);
51 
52 /*	__core_tpg_get_initiator_node_acl():
53  *
54  *	mutex_lock(&tpg->acl_node_mutex); must be held when calling
55  */
56 struct se_node_acl *__core_tpg_get_initiator_node_acl(
57 	struct se_portal_group *tpg,
58 	const char *initiatorname)
59 {
60 	struct se_node_acl *acl;
61 
62 	list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
63 		if (!strcmp(acl->initiatorname, initiatorname))
64 			return acl;
65 	}
66 
67 	return NULL;
68 }
69 
70 /*	core_tpg_get_initiator_node_acl():
71  *
72  *
73  */
74 struct se_node_acl *core_tpg_get_initiator_node_acl(
75 	struct se_portal_group *tpg,
76 	unsigned char *initiatorname)
77 {
78 	struct se_node_acl *acl;
79 
80 	mutex_lock(&tpg->acl_node_mutex);
81 	acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
82 	mutex_unlock(&tpg->acl_node_mutex);
83 
84 	return acl;
85 }
86 EXPORT_SYMBOL(core_tpg_get_initiator_node_acl);
87 
88 /*	core_tpg_add_node_to_devs():
89  *
90  *
91  */
92 void core_tpg_add_node_to_devs(
93 	struct se_node_acl *acl,
94 	struct se_portal_group *tpg,
95 	struct se_lun *lun_orig)
96 {
97 	u32 lun_access = 0;
98 	struct se_lun *lun;
99 	struct se_device *dev;
100 
101 	mutex_lock(&tpg->tpg_lun_mutex);
102 	hlist_for_each_entry_rcu(lun, &tpg->tpg_lun_hlist, link) {
103 		if (lun_orig && lun != lun_orig)
104 			continue;
105 
106 		dev = rcu_dereference_check(lun->lun_se_dev,
107 					    lockdep_is_held(&tpg->tpg_lun_mutex));
108 		/*
109 		 * By default in LIO-Target $FABRIC_MOD,
110 		 * demo_mode_write_protect is ON, or READ_ONLY;
111 		 */
112 		if (!tpg->se_tpg_tfo->tpg_check_demo_mode_write_protect(tpg)) {
113 			lun_access = TRANSPORT_LUNFLAGS_READ_WRITE;
114 		} else {
115 			/*
116 			 * Allow only optical drives to issue R/W in default RO
117 			 * demo mode.
118 			 */
119 			if (dev->transport->get_device_type(dev) == TYPE_DISK)
120 				lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
121 			else
122 				lun_access = TRANSPORT_LUNFLAGS_READ_WRITE;
123 		}
124 
125 		pr_debug("TARGET_CORE[%s]->TPG[%u]_LUN[%llu] - Adding %s"
126 			" access for LUN in Demo Mode\n",
127 			tpg->se_tpg_tfo->get_fabric_name(),
128 			tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
129 			(lun_access == TRANSPORT_LUNFLAGS_READ_WRITE) ?
130 			"READ-WRITE" : "READ-ONLY");
131 
132 		core_enable_device_list_for_node(lun, NULL, lun->unpacked_lun,
133 						 lun_access, acl, tpg);
134 		/*
135 		 * Check to see if there are any existing persistent reservation
136 		 * APTPL pre-registrations that need to be enabled for this dynamic
137 		 * LUN ACL now..
138 		 */
139 		core_scsi3_check_aptpl_registration(dev, tpg, lun, acl,
140 						    lun->unpacked_lun);
141 	}
142 	mutex_unlock(&tpg->tpg_lun_mutex);
143 }
144 
145 /*      core_set_queue_depth_for_node():
146  *
147  *
148  */
149 static int core_set_queue_depth_for_node(
150 	struct se_portal_group *tpg,
151 	struct se_node_acl *acl)
152 {
153 	if (!acl->queue_depth) {
154 		pr_err("Queue depth for %s Initiator Node: %s is 0,"
155 			"defaulting to 1.\n", tpg->se_tpg_tfo->get_fabric_name(),
156 			acl->initiatorname);
157 		acl->queue_depth = 1;
158 	}
159 
160 	return 0;
161 }
162 
163 static struct se_node_acl *target_alloc_node_acl(struct se_portal_group *tpg,
164 		const unsigned char *initiatorname)
165 {
166 	struct se_node_acl *acl;
167 
168 	acl = kzalloc(max(sizeof(*acl), tpg->se_tpg_tfo->node_acl_size),
169 			GFP_KERNEL);
170 	if (!acl)
171 		return NULL;
172 
173 	INIT_LIST_HEAD(&acl->acl_list);
174 	INIT_LIST_HEAD(&acl->acl_sess_list);
175 	INIT_HLIST_HEAD(&acl->lun_entry_hlist);
176 	kref_init(&acl->acl_kref);
177 	init_completion(&acl->acl_free_comp);
178 	spin_lock_init(&acl->nacl_sess_lock);
179 	mutex_init(&acl->lun_entry_mutex);
180 	atomic_set(&acl->acl_pr_ref_count, 0);
181 	if (tpg->se_tpg_tfo->tpg_get_default_depth)
182 		acl->queue_depth = tpg->se_tpg_tfo->tpg_get_default_depth(tpg);
183 	else
184 		acl->queue_depth = 1;
185 	snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
186 	acl->se_tpg = tpg;
187 	acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX);
188 
189 	tpg->se_tpg_tfo->set_default_node_attributes(acl);
190 
191 	if (core_set_queue_depth_for_node(tpg, acl) < 0)
192 		goto out_free_acl;
193 
194 	return acl;
195 
196 out_free_acl:
197 	kfree(acl);
198 	return NULL;
199 }
200 
201 static void target_add_node_acl(struct se_node_acl *acl)
202 {
203 	struct se_portal_group *tpg = acl->se_tpg;
204 
205 	mutex_lock(&tpg->acl_node_mutex);
206 	list_add_tail(&acl->acl_list, &tpg->acl_node_list);
207 	tpg->num_node_acls++;
208 	mutex_unlock(&tpg->acl_node_mutex);
209 
210 	pr_debug("%s_TPG[%hu] - Added %s ACL with TCQ Depth: %d for %s"
211 		" Initiator Node: %s\n",
212 		tpg->se_tpg_tfo->get_fabric_name(),
213 		tpg->se_tpg_tfo->tpg_get_tag(tpg),
214 		acl->dynamic_node_acl ? "DYNAMIC" : "",
215 		acl->queue_depth,
216 		tpg->se_tpg_tfo->get_fabric_name(),
217 		acl->initiatorname);
218 }
219 
220 struct se_node_acl *core_tpg_check_initiator_node_acl(
221 	struct se_portal_group *tpg,
222 	unsigned char *initiatorname)
223 {
224 	struct se_node_acl *acl;
225 
226 	acl = core_tpg_get_initiator_node_acl(tpg, initiatorname);
227 	if (acl)
228 		return acl;
229 
230 	if (!tpg->se_tpg_tfo->tpg_check_demo_mode(tpg))
231 		return NULL;
232 
233 	acl = target_alloc_node_acl(tpg, initiatorname);
234 	if (!acl)
235 		return NULL;
236 	acl->dynamic_node_acl = 1;
237 
238 	/*
239 	 * Here we only create demo-mode MappedLUNs from the active
240 	 * TPG LUNs if the fabric is not explicitly asking for
241 	 * tpg_check_demo_mode_login_only() == 1.
242 	 */
243 	if ((tpg->se_tpg_tfo->tpg_check_demo_mode_login_only == NULL) ||
244 	    (tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg) != 1))
245 		core_tpg_add_node_to_devs(acl, tpg, NULL);
246 
247 	target_add_node_acl(acl);
248 	return acl;
249 }
250 EXPORT_SYMBOL(core_tpg_check_initiator_node_acl);
251 
252 void core_tpg_wait_for_nacl_pr_ref(struct se_node_acl *nacl)
253 {
254 	while (atomic_read(&nacl->acl_pr_ref_count) != 0)
255 		cpu_relax();
256 }
257 
258 struct se_node_acl *core_tpg_add_initiator_node_acl(
259 	struct se_portal_group *tpg,
260 	const char *initiatorname)
261 {
262 	struct se_node_acl *acl;
263 
264 	mutex_lock(&tpg->acl_node_mutex);
265 	acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
266 	if (acl) {
267 		if (acl->dynamic_node_acl) {
268 			acl->dynamic_node_acl = 0;
269 			pr_debug("%s_TPG[%u] - Replacing dynamic ACL"
270 				" for %s\n", tpg->se_tpg_tfo->get_fabric_name(),
271 				tpg->se_tpg_tfo->tpg_get_tag(tpg), initiatorname);
272 			mutex_unlock(&tpg->acl_node_mutex);
273 			return acl;
274 		}
275 
276 		pr_err("ACL entry for %s Initiator"
277 			" Node %s already exists for TPG %u, ignoring"
278 			" request.\n",  tpg->se_tpg_tfo->get_fabric_name(),
279 			initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg));
280 		mutex_unlock(&tpg->acl_node_mutex);
281 		return ERR_PTR(-EEXIST);
282 	}
283 	mutex_unlock(&tpg->acl_node_mutex);
284 
285 	acl = target_alloc_node_acl(tpg, initiatorname);
286 	if (!acl)
287 		return ERR_PTR(-ENOMEM);
288 
289 	target_add_node_acl(acl);
290 	return acl;
291 }
292 
293 void core_tpg_del_initiator_node_acl(struct se_node_acl *acl)
294 {
295 	struct se_portal_group *tpg = acl->se_tpg;
296 	LIST_HEAD(sess_list);
297 	struct se_session *sess, *sess_tmp;
298 	unsigned long flags;
299 	int rc;
300 
301 	mutex_lock(&tpg->acl_node_mutex);
302 	if (acl->dynamic_node_acl) {
303 		acl->dynamic_node_acl = 0;
304 	}
305 	list_del(&acl->acl_list);
306 	tpg->num_node_acls--;
307 	mutex_unlock(&tpg->acl_node_mutex);
308 
309 	spin_lock_irqsave(&acl->nacl_sess_lock, flags);
310 	acl->acl_stop = 1;
311 
312 	list_for_each_entry_safe(sess, sess_tmp, &acl->acl_sess_list,
313 				sess_acl_list) {
314 		if (sess->sess_tearing_down != 0)
315 			continue;
316 
317 		target_get_session(sess);
318 		list_move(&sess->sess_acl_list, &sess_list);
319 	}
320 	spin_unlock_irqrestore(&acl->nacl_sess_lock, flags);
321 
322 	list_for_each_entry_safe(sess, sess_tmp, &sess_list, sess_acl_list) {
323 		list_del(&sess->sess_acl_list);
324 
325 		rc = tpg->se_tpg_tfo->shutdown_session(sess);
326 		target_put_session(sess);
327 		if (!rc)
328 			continue;
329 		target_put_session(sess);
330 	}
331 	target_put_nacl(acl);
332 	/*
333 	 * Wait for last target_put_nacl() to complete in target_complete_nacl()
334 	 * for active fabric session transport_deregister_session() callbacks.
335 	 */
336 	wait_for_completion(&acl->acl_free_comp);
337 
338 	core_tpg_wait_for_nacl_pr_ref(acl);
339 	core_free_device_list_for_node(acl, tpg);
340 
341 	pr_debug("%s_TPG[%hu] - Deleted ACL with TCQ Depth: %d for %s"
342 		" Initiator Node: %s\n", tpg->se_tpg_tfo->get_fabric_name(),
343 		tpg->se_tpg_tfo->tpg_get_tag(tpg), acl->queue_depth,
344 		tpg->se_tpg_tfo->get_fabric_name(), acl->initiatorname);
345 
346 	kfree(acl);
347 }
348 
349 /*	core_tpg_set_initiator_node_queue_depth():
350  *
351  *
352  */
353 int core_tpg_set_initiator_node_queue_depth(
354 	struct se_portal_group *tpg,
355 	unsigned char *initiatorname,
356 	u32 queue_depth,
357 	int force)
358 {
359 	struct se_session *sess, *init_sess = NULL;
360 	struct se_node_acl *acl;
361 	unsigned long flags;
362 	int dynamic_acl = 0;
363 
364 	mutex_lock(&tpg->acl_node_mutex);
365 	acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
366 	if (!acl) {
367 		pr_err("Access Control List entry for %s Initiator"
368 			" Node %s does not exists for TPG %hu, ignoring"
369 			" request.\n", tpg->se_tpg_tfo->get_fabric_name(),
370 			initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg));
371 		mutex_unlock(&tpg->acl_node_mutex);
372 		return -ENODEV;
373 	}
374 	if (acl->dynamic_node_acl) {
375 		acl->dynamic_node_acl = 0;
376 		dynamic_acl = 1;
377 	}
378 	mutex_unlock(&tpg->acl_node_mutex);
379 
380 	spin_lock_irqsave(&tpg->session_lock, flags);
381 	list_for_each_entry(sess, &tpg->tpg_sess_list, sess_list) {
382 		if (sess->se_node_acl != acl)
383 			continue;
384 
385 		if (!force) {
386 			pr_err("Unable to change queue depth for %s"
387 				" Initiator Node: %s while session is"
388 				" operational.  To forcefully change the queue"
389 				" depth and force session reinstatement"
390 				" use the \"force=1\" parameter.\n",
391 				tpg->se_tpg_tfo->get_fabric_name(), initiatorname);
392 			spin_unlock_irqrestore(&tpg->session_lock, flags);
393 
394 			mutex_lock(&tpg->acl_node_mutex);
395 			if (dynamic_acl)
396 				acl->dynamic_node_acl = 1;
397 			mutex_unlock(&tpg->acl_node_mutex);
398 			return -EEXIST;
399 		}
400 		/*
401 		 * Determine if the session needs to be closed by our context.
402 		 */
403 		if (!tpg->se_tpg_tfo->shutdown_session(sess))
404 			continue;
405 
406 		init_sess = sess;
407 		break;
408 	}
409 
410 	/*
411 	 * User has requested to change the queue depth for a Initiator Node.
412 	 * Change the value in the Node's struct se_node_acl, and call
413 	 * core_set_queue_depth_for_node() to add the requested queue depth.
414 	 *
415 	 * Finally call  tpg->se_tpg_tfo->close_session() to force session
416 	 * reinstatement to occur if there is an active session for the
417 	 * $FABRIC_MOD Initiator Node in question.
418 	 */
419 	acl->queue_depth = queue_depth;
420 
421 	if (core_set_queue_depth_for_node(tpg, acl) < 0) {
422 		spin_unlock_irqrestore(&tpg->session_lock, flags);
423 		/*
424 		 * Force session reinstatement if
425 		 * core_set_queue_depth_for_node() failed, because we assume
426 		 * the $FABRIC_MOD has already the set session reinstatement
427 		 * bit from tpg->se_tpg_tfo->shutdown_session() called above.
428 		 */
429 		if (init_sess)
430 			tpg->se_tpg_tfo->close_session(init_sess);
431 
432 		mutex_lock(&tpg->acl_node_mutex);
433 		if (dynamic_acl)
434 			acl->dynamic_node_acl = 1;
435 		mutex_unlock(&tpg->acl_node_mutex);
436 		return -EINVAL;
437 	}
438 	spin_unlock_irqrestore(&tpg->session_lock, flags);
439 	/*
440 	 * If the $FABRIC_MOD session for the Initiator Node ACL exists,
441 	 * forcefully shutdown the $FABRIC_MOD session/nexus.
442 	 */
443 	if (init_sess)
444 		tpg->se_tpg_tfo->close_session(init_sess);
445 
446 	pr_debug("Successfully changed queue depth to: %d for Initiator"
447 		" Node: %s on %s Target Portal Group: %u\n", queue_depth,
448 		initiatorname, tpg->se_tpg_tfo->get_fabric_name(),
449 		tpg->se_tpg_tfo->tpg_get_tag(tpg));
450 
451 	mutex_lock(&tpg->acl_node_mutex);
452 	if (dynamic_acl)
453 		acl->dynamic_node_acl = 1;
454 	mutex_unlock(&tpg->acl_node_mutex);
455 
456 	return 0;
457 }
458 EXPORT_SYMBOL(core_tpg_set_initiator_node_queue_depth);
459 
460 /*	core_tpg_set_initiator_node_tag():
461  *
462  *	Initiator nodeacl tags are not used internally, but may be used by
463  *	userspace to emulate aliases or groups.
464  *	Returns length of newly-set tag or -EINVAL.
465  */
466 int core_tpg_set_initiator_node_tag(
467 	struct se_portal_group *tpg,
468 	struct se_node_acl *acl,
469 	const char *new_tag)
470 {
471 	if (strlen(new_tag) >= MAX_ACL_TAG_SIZE)
472 		return -EINVAL;
473 
474 	if (!strncmp("NULL", new_tag, 4)) {
475 		acl->acl_tag[0] = '\0';
476 		return 0;
477 	}
478 
479 	return snprintf(acl->acl_tag, MAX_ACL_TAG_SIZE, "%s", new_tag);
480 }
481 EXPORT_SYMBOL(core_tpg_set_initiator_node_tag);
482 
483 static void core_tpg_lun_ref_release(struct percpu_ref *ref)
484 {
485 	struct se_lun *lun = container_of(ref, struct se_lun, lun_ref);
486 
487 	complete(&lun->lun_ref_comp);
488 }
489 
490 int core_tpg_register(
491 	struct se_wwn *se_wwn,
492 	struct se_portal_group *se_tpg,
493 	int proto_id)
494 {
495 	int ret;
496 
497 	if (!se_tpg)
498 		return -EINVAL;
499 	/*
500 	 * For the typical case where core_tpg_register() is called by a
501 	 * fabric driver from target_core_fabric_ops->fabric_make_tpg()
502 	 * configfs context, use the original tf_ops pointer already saved
503 	 * by target-core in target_fabric_make_wwn().
504 	 *
505 	 * Otherwise, for special cases like iscsi-target discovery TPGs
506 	 * the caller is responsible for setting ->se_tpg_tfo ahead of
507 	 * calling core_tpg_register().
508 	 */
509 	if (se_wwn)
510 		se_tpg->se_tpg_tfo = se_wwn->wwn_tf->tf_ops;
511 
512 	if (!se_tpg->se_tpg_tfo) {
513 		pr_err("Unable to locate se_tpg->se_tpg_tfo pointer\n");
514 		return -EINVAL;
515 	}
516 
517 	INIT_HLIST_HEAD(&se_tpg->tpg_lun_hlist);
518 	se_tpg->proto_id = proto_id;
519 	se_tpg->se_tpg_wwn = se_wwn;
520 	atomic_set(&se_tpg->tpg_pr_ref_count, 0);
521 	INIT_LIST_HEAD(&se_tpg->acl_node_list);
522 	INIT_LIST_HEAD(&se_tpg->se_tpg_node);
523 	INIT_LIST_HEAD(&se_tpg->tpg_sess_list);
524 	spin_lock_init(&se_tpg->session_lock);
525 	mutex_init(&se_tpg->tpg_lun_mutex);
526 	mutex_init(&se_tpg->acl_node_mutex);
527 
528 	if (se_tpg->proto_id >= 0) {
529 		se_tpg->tpg_virt_lun0 = core_tpg_alloc_lun(se_tpg, 0);
530 		if (IS_ERR(se_tpg->tpg_virt_lun0))
531 			return PTR_ERR(se_tpg->tpg_virt_lun0);
532 
533 		ret = core_tpg_add_lun(se_tpg, se_tpg->tpg_virt_lun0,
534 				TRANSPORT_LUNFLAGS_READ_ONLY, g_lun0_dev);
535 		if (ret < 0) {
536 			kfree(se_tpg->tpg_virt_lun0);
537 			return ret;
538 		}
539 	}
540 
541 	spin_lock_bh(&tpg_lock);
542 	list_add_tail(&se_tpg->se_tpg_node, &tpg_list);
543 	spin_unlock_bh(&tpg_lock);
544 
545 	pr_debug("TARGET_CORE[%s]: Allocated portal_group for endpoint: %s, "
546 		 "Proto: %d, Portal Tag: %u\n", se_tpg->se_tpg_tfo->get_fabric_name(),
547 		se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg) ?
548 		se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg) : NULL,
549 		se_tpg->proto_id, se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg));
550 
551 	return 0;
552 }
553 EXPORT_SYMBOL(core_tpg_register);
554 
555 int core_tpg_deregister(struct se_portal_group *se_tpg)
556 {
557 	const struct target_core_fabric_ops *tfo = se_tpg->se_tpg_tfo;
558 	struct se_node_acl *nacl, *nacl_tmp;
559 	LIST_HEAD(node_list);
560 
561 	pr_debug("TARGET_CORE[%s]: Deallocating portal_group for endpoint: %s, "
562 		 "Proto: %d, Portal Tag: %u\n", tfo->get_fabric_name(),
563 		tfo->tpg_get_wwn(se_tpg) ? tfo->tpg_get_wwn(se_tpg) : NULL,
564 		se_tpg->proto_id, tfo->tpg_get_tag(se_tpg));
565 
566 	spin_lock_bh(&tpg_lock);
567 	list_del(&se_tpg->se_tpg_node);
568 	spin_unlock_bh(&tpg_lock);
569 
570 	while (atomic_read(&se_tpg->tpg_pr_ref_count) != 0)
571 		cpu_relax();
572 
573 	mutex_lock(&se_tpg->acl_node_mutex);
574 	list_splice_init(&se_tpg->acl_node_list, &node_list);
575 	mutex_unlock(&se_tpg->acl_node_mutex);
576 	/*
577 	 * Release any remaining demo-mode generated se_node_acl that have
578 	 * not been released because of TFO->tpg_check_demo_mode_cache() == 1
579 	 * in transport_deregister_session().
580 	 */
581 	list_for_each_entry_safe(nacl, nacl_tmp, &node_list, acl_list) {
582 		list_del(&nacl->acl_list);
583 		se_tpg->num_node_acls--;
584 
585 		core_tpg_wait_for_nacl_pr_ref(nacl);
586 		core_free_device_list_for_node(nacl, se_tpg);
587 		kfree(nacl);
588 	}
589 
590 	if (se_tpg->proto_id >= 0) {
591 		core_tpg_remove_lun(se_tpg, se_tpg->tpg_virt_lun0);
592 		kfree_rcu(se_tpg->tpg_virt_lun0, rcu_head);
593 	}
594 
595 	return 0;
596 }
597 EXPORT_SYMBOL(core_tpg_deregister);
598 
599 struct se_lun *core_tpg_alloc_lun(
600 	struct se_portal_group *tpg,
601 	u64 unpacked_lun)
602 {
603 	struct se_lun *lun;
604 
605 	lun = kzalloc(sizeof(*lun), GFP_KERNEL);
606 	if (!lun) {
607 		pr_err("Unable to allocate se_lun memory\n");
608 		return ERR_PTR(-ENOMEM);
609 	}
610 	lun->unpacked_lun = unpacked_lun;
611 	lun->lun_link_magic = SE_LUN_LINK_MAGIC;
612 	atomic_set(&lun->lun_acl_count, 0);
613 	init_completion(&lun->lun_ref_comp);
614 	INIT_LIST_HEAD(&lun->lun_deve_list);
615 	INIT_LIST_HEAD(&lun->lun_dev_link);
616 	atomic_set(&lun->lun_tg_pt_secondary_offline, 0);
617 	spin_lock_init(&lun->lun_deve_lock);
618 	mutex_init(&lun->lun_tg_pt_md_mutex);
619 	INIT_LIST_HEAD(&lun->lun_tg_pt_gp_link);
620 	spin_lock_init(&lun->lun_tg_pt_gp_lock);
621 	lun->lun_tpg = tpg;
622 
623 	return lun;
624 }
625 
626 int core_tpg_add_lun(
627 	struct se_portal_group *tpg,
628 	struct se_lun *lun,
629 	u32 lun_access,
630 	struct se_device *dev)
631 {
632 	int ret;
633 
634 	ret = percpu_ref_init(&lun->lun_ref, core_tpg_lun_ref_release, 0,
635 			      GFP_KERNEL);
636 	if (ret < 0)
637 		goto out;
638 
639 	ret = core_alloc_rtpi(lun, dev);
640 	if (ret)
641 		goto out_kill_ref;
642 
643 	if (!(dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) &&
644 	    !(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE))
645 		target_attach_tg_pt_gp(lun, dev->t10_alua.default_tg_pt_gp);
646 
647 	mutex_lock(&tpg->tpg_lun_mutex);
648 
649 	spin_lock(&dev->se_port_lock);
650 	lun->lun_index = dev->dev_index;
651 	rcu_assign_pointer(lun->lun_se_dev, dev);
652 	dev->export_count++;
653 	list_add_tail(&lun->lun_dev_link, &dev->dev_sep_list);
654 	spin_unlock(&dev->se_port_lock);
655 
656 	lun->lun_access = lun_access;
657 	if (!(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE))
658 		hlist_add_head_rcu(&lun->link, &tpg->tpg_lun_hlist);
659 	mutex_unlock(&tpg->tpg_lun_mutex);
660 
661 	return 0;
662 
663 out_kill_ref:
664 	percpu_ref_exit(&lun->lun_ref);
665 out:
666 	return ret;
667 }
668 
669 void core_tpg_remove_lun(
670 	struct se_portal_group *tpg,
671 	struct se_lun *lun)
672 {
673 	/*
674 	 * rcu_dereference_raw protected by se_lun->lun_group symlink
675 	 * reference to se_device->dev_group.
676 	 */
677 	struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev);
678 
679 	core_clear_lun_from_tpg(lun, tpg);
680 	/*
681 	 * Wait for any active I/O references to percpu se_lun->lun_ref to
682 	 * be released.  Also, se_lun->lun_ref is now used by PR and ALUA
683 	 * logic when referencing a remote target port during ALL_TGT_PT=1
684 	 * and generating UNIT_ATTENTIONs for ALUA access state transition.
685 	 */
686 	transport_clear_lun_ref(lun);
687 
688 	mutex_lock(&tpg->tpg_lun_mutex);
689 	if (lun->lun_se_dev) {
690 		target_detach_tg_pt_gp(lun);
691 
692 		spin_lock(&dev->se_port_lock);
693 		list_del(&lun->lun_dev_link);
694 		dev->export_count--;
695 		rcu_assign_pointer(lun->lun_se_dev, NULL);
696 		spin_unlock(&dev->se_port_lock);
697 	}
698 	if (!(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE))
699 		hlist_del_rcu(&lun->link);
700 	mutex_unlock(&tpg->tpg_lun_mutex);
701 
702 	percpu_ref_exit(&lun->lun_ref);
703 }
704