1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*******************************************************************************
3  * Filename:  target_core_tpg.c
4  *
5  * This file contains generic Target Portal Group related functions.
6  *
7  * (c) Copyright 2002-2013 Datera, Inc.
8  *
9  * Nicholas A. Bellinger <nab@kernel.org>
10  *
11  ******************************************************************************/
12 
13 #include <linux/net.h>
14 #include <linux/string.h>
15 #include <linux/timer.h>
16 #include <linux/slab.h>
17 #include <linux/spinlock.h>
18 #include <linux/in.h>
19 #include <linux/export.h>
20 #include <net/sock.h>
21 #include <net/tcp.h>
22 #include <scsi/scsi_proto.h>
23 
24 #include <target/target_core_base.h>
25 #include <target/target_core_backend.h>
26 #include <target/target_core_fabric.h>
27 
28 #include "target_core_internal.h"
29 #include "target_core_alua.h"
30 #include "target_core_pr.h"
31 #include "target_core_ua.h"
32 
33 extern struct se_device *g_lun0_dev;
34 static DEFINE_XARRAY_ALLOC(tpg_xa);
35 
36 /*	__core_tpg_get_initiator_node_acl():
37  *
38  *	mutex_lock(&tpg->acl_node_mutex); must be held when calling
39  */
40 struct se_node_acl *__core_tpg_get_initiator_node_acl(
41 	struct se_portal_group *tpg,
42 	const char *initiatorname)
43 {
44 	struct se_node_acl *acl;
45 
46 	list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
47 		if (!strcmp(acl->initiatorname, initiatorname))
48 			return acl;
49 	}
50 
51 	return NULL;
52 }
53 
54 /*	core_tpg_get_initiator_node_acl():
55  *
56  *
57  */
58 struct se_node_acl *core_tpg_get_initiator_node_acl(
59 	struct se_portal_group *tpg,
60 	unsigned char *initiatorname)
61 {
62 	struct se_node_acl *acl;
63 	/*
64 	 * Obtain se_node_acl->acl_kref using fabric driver provided
65 	 * initiatorname[] during node acl endpoint lookup driven by
66 	 * new se_session login.
67 	 *
68 	 * The reference is held until se_session shutdown -> release
69 	 * occurs via fabric driver invoked transport_deregister_session()
70 	 * or transport_free_session() code.
71 	 */
72 	mutex_lock(&tpg->acl_node_mutex);
73 	acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
74 	if (acl) {
75 		if (!kref_get_unless_zero(&acl->acl_kref))
76 			acl = NULL;
77 	}
78 	mutex_unlock(&tpg->acl_node_mutex);
79 
80 	return acl;
81 }
82 EXPORT_SYMBOL(core_tpg_get_initiator_node_acl);
83 
84 void core_allocate_nexus_loss_ua(
85 	struct se_node_acl *nacl)
86 {
87 	struct se_dev_entry *deve;
88 
89 	if (!nacl)
90 		return;
91 
92 	rcu_read_lock();
93 	hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link)
94 		core_scsi3_ua_allocate(deve, 0x29,
95 			ASCQ_29H_NEXUS_LOSS_OCCURRED);
96 	rcu_read_unlock();
97 }
98 EXPORT_SYMBOL(core_allocate_nexus_loss_ua);
99 
100 /*	core_tpg_add_node_to_devs():
101  *
102  *
103  */
104 void core_tpg_add_node_to_devs(
105 	struct se_node_acl *acl,
106 	struct se_portal_group *tpg,
107 	struct se_lun *lun_orig)
108 {
109 	bool lun_access_ro = true;
110 	struct se_lun *lun;
111 	struct se_device *dev;
112 
113 	mutex_lock(&tpg->tpg_lun_mutex);
114 	hlist_for_each_entry_rcu(lun, &tpg->tpg_lun_hlist, link) {
115 		if (lun_orig && lun != lun_orig)
116 			continue;
117 
118 		dev = rcu_dereference_check(lun->lun_se_dev,
119 					    lockdep_is_held(&tpg->tpg_lun_mutex));
120 		/*
121 		 * By default in LIO-Target $FABRIC_MOD,
122 		 * demo_mode_write_protect is ON, or READ_ONLY;
123 		 */
124 		if (!tpg->se_tpg_tfo->tpg_check_demo_mode_write_protect(tpg)) {
125 			lun_access_ro = false;
126 		} else {
127 			/*
128 			 * Allow only optical drives to issue R/W in default RO
129 			 * demo mode.
130 			 */
131 			if (dev->transport->get_device_type(dev) == TYPE_DISK)
132 				lun_access_ro = true;
133 			else
134 				lun_access_ro = false;
135 		}
136 
137 		pr_debug("TARGET_CORE[%s]->TPG[%u]_LUN[%llu] - Adding %s"
138 			" access for LUN in Demo Mode\n",
139 			tpg->se_tpg_tfo->fabric_name,
140 			tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
141 			lun_access_ro ? "READ-ONLY" : "READ-WRITE");
142 
143 		core_enable_device_list_for_node(lun, NULL, lun->unpacked_lun,
144 						 lun_access_ro, acl, tpg);
145 		/*
146 		 * Check to see if there are any existing persistent reservation
147 		 * APTPL pre-registrations that need to be enabled for this dynamic
148 		 * LUN ACL now..
149 		 */
150 		core_scsi3_check_aptpl_registration(dev, tpg, lun, acl,
151 						    lun->unpacked_lun);
152 	}
153 	mutex_unlock(&tpg->tpg_lun_mutex);
154 }
155 
156 static void
157 target_set_nacl_queue_depth(struct se_portal_group *tpg,
158 			    struct se_node_acl *acl, u32 queue_depth)
159 {
160 	acl->queue_depth = queue_depth;
161 
162 	if (!acl->queue_depth) {
163 		pr_warn("Queue depth for %s Initiator Node: %s is 0,"
164 			"defaulting to 1.\n", tpg->se_tpg_tfo->fabric_name,
165 			acl->initiatorname);
166 		acl->queue_depth = 1;
167 	}
168 }
169 
170 static struct se_node_acl *target_alloc_node_acl(struct se_portal_group *tpg,
171 		const unsigned char *initiatorname)
172 {
173 	struct se_node_acl *acl;
174 	u32 queue_depth;
175 
176 	acl = kzalloc(max(sizeof(*acl), tpg->se_tpg_tfo->node_acl_size),
177 			GFP_KERNEL);
178 	if (!acl)
179 		return NULL;
180 
181 	INIT_LIST_HEAD(&acl->acl_list);
182 	INIT_LIST_HEAD(&acl->acl_sess_list);
183 	INIT_HLIST_HEAD(&acl->lun_entry_hlist);
184 	kref_init(&acl->acl_kref);
185 	init_completion(&acl->acl_free_comp);
186 	spin_lock_init(&acl->nacl_sess_lock);
187 	mutex_init(&acl->lun_entry_mutex);
188 	atomic_set(&acl->acl_pr_ref_count, 0);
189 
190 	if (tpg->se_tpg_tfo->tpg_get_default_depth)
191 		queue_depth = tpg->se_tpg_tfo->tpg_get_default_depth(tpg);
192 	else
193 		queue_depth = 1;
194 	target_set_nacl_queue_depth(tpg, acl, queue_depth);
195 
196 	snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
197 	acl->se_tpg = tpg;
198 	acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX);
199 
200 	tpg->se_tpg_tfo->set_default_node_attributes(acl);
201 
202 	return acl;
203 }
204 
205 static void target_add_node_acl(struct se_node_acl *acl)
206 {
207 	struct se_portal_group *tpg = acl->se_tpg;
208 
209 	mutex_lock(&tpg->acl_node_mutex);
210 	list_add_tail(&acl->acl_list, &tpg->acl_node_list);
211 	mutex_unlock(&tpg->acl_node_mutex);
212 
213 	pr_debug("%s_TPG[%hu] - Added %s ACL with TCQ Depth: %d for %s"
214 		" Initiator Node: %s\n",
215 		tpg->se_tpg_tfo->fabric_name,
216 		tpg->se_tpg_tfo->tpg_get_tag(tpg),
217 		acl->dynamic_node_acl ? "DYNAMIC" : "",
218 		acl->queue_depth,
219 		tpg->se_tpg_tfo->fabric_name,
220 		acl->initiatorname);
221 }
222 
223 bool target_tpg_has_node_acl(struct se_portal_group *tpg,
224 			     const char *initiatorname)
225 {
226 	struct se_node_acl *acl;
227 	bool found = false;
228 
229 	mutex_lock(&tpg->acl_node_mutex);
230 	list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
231 		if (!strcmp(acl->initiatorname, initiatorname)) {
232 			found = true;
233 			break;
234 		}
235 	}
236 	mutex_unlock(&tpg->acl_node_mutex);
237 
238 	return found;
239 }
240 EXPORT_SYMBOL(target_tpg_has_node_acl);
241 
242 struct se_node_acl *core_tpg_check_initiator_node_acl(
243 	struct se_portal_group *tpg,
244 	unsigned char *initiatorname)
245 {
246 	struct se_node_acl *acl;
247 
248 	acl = core_tpg_get_initiator_node_acl(tpg, initiatorname);
249 	if (acl)
250 		return acl;
251 
252 	if (!tpg->se_tpg_tfo->tpg_check_demo_mode(tpg))
253 		return NULL;
254 
255 	acl = target_alloc_node_acl(tpg, initiatorname);
256 	if (!acl)
257 		return NULL;
258 	/*
259 	 * When allocating a dynamically generated node_acl, go ahead
260 	 * and take the extra kref now before returning to the fabric
261 	 * driver caller.
262 	 *
263 	 * Note this reference will be released at session shutdown
264 	 * time within transport_free_session() code.
265 	 */
266 	kref_get(&acl->acl_kref);
267 	acl->dynamic_node_acl = 1;
268 
269 	/*
270 	 * Here we only create demo-mode MappedLUNs from the active
271 	 * TPG LUNs if the fabric is not explicitly asking for
272 	 * tpg_check_demo_mode_login_only() == 1.
273 	 */
274 	if ((tpg->se_tpg_tfo->tpg_check_demo_mode_login_only == NULL) ||
275 	    (tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg) != 1))
276 		core_tpg_add_node_to_devs(acl, tpg, NULL);
277 
278 	target_add_node_acl(acl);
279 	return acl;
280 }
281 EXPORT_SYMBOL(core_tpg_check_initiator_node_acl);
282 
283 void core_tpg_wait_for_nacl_pr_ref(struct se_node_acl *nacl)
284 {
285 	while (atomic_read(&nacl->acl_pr_ref_count) != 0)
286 		cpu_relax();
287 }
288 
289 struct se_node_acl *core_tpg_add_initiator_node_acl(
290 	struct se_portal_group *tpg,
291 	const char *initiatorname)
292 {
293 	struct se_node_acl *acl;
294 
295 	mutex_lock(&tpg->acl_node_mutex);
296 	acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
297 	if (acl) {
298 		if (acl->dynamic_node_acl) {
299 			acl->dynamic_node_acl = 0;
300 			pr_debug("%s_TPG[%u] - Replacing dynamic ACL"
301 				" for %s\n", tpg->se_tpg_tfo->fabric_name,
302 				tpg->se_tpg_tfo->tpg_get_tag(tpg), initiatorname);
303 			mutex_unlock(&tpg->acl_node_mutex);
304 			return acl;
305 		}
306 
307 		pr_err("ACL entry for %s Initiator"
308 			" Node %s already exists for TPG %u, ignoring"
309 			" request.\n",  tpg->se_tpg_tfo->fabric_name,
310 			initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg));
311 		mutex_unlock(&tpg->acl_node_mutex);
312 		return ERR_PTR(-EEXIST);
313 	}
314 	mutex_unlock(&tpg->acl_node_mutex);
315 
316 	acl = target_alloc_node_acl(tpg, initiatorname);
317 	if (!acl)
318 		return ERR_PTR(-ENOMEM);
319 
320 	target_add_node_acl(acl);
321 	return acl;
322 }
323 
324 static void target_shutdown_sessions(struct se_node_acl *acl)
325 {
326 	struct se_session *sess;
327 	unsigned long flags;
328 
329 restart:
330 	spin_lock_irqsave(&acl->nacl_sess_lock, flags);
331 	list_for_each_entry(sess, &acl->acl_sess_list, sess_acl_list) {
332 		if (sess->cmd_cnt && atomic_read(&sess->cmd_cnt->stopped))
333 			continue;
334 
335 		list_del_init(&sess->sess_acl_list);
336 		spin_unlock_irqrestore(&acl->nacl_sess_lock, flags);
337 
338 		if (acl->se_tpg->se_tpg_tfo->close_session)
339 			acl->se_tpg->se_tpg_tfo->close_session(sess);
340 		goto restart;
341 	}
342 	spin_unlock_irqrestore(&acl->nacl_sess_lock, flags);
343 }
344 
345 void core_tpg_del_initiator_node_acl(struct se_node_acl *acl)
346 {
347 	struct se_portal_group *tpg = acl->se_tpg;
348 
349 	mutex_lock(&tpg->acl_node_mutex);
350 	if (acl->dynamic_node_acl)
351 		acl->dynamic_node_acl = 0;
352 	list_del_init(&acl->acl_list);
353 	mutex_unlock(&tpg->acl_node_mutex);
354 
355 	target_shutdown_sessions(acl);
356 
357 	target_put_nacl(acl);
358 	/*
359 	 * Wait for last target_put_nacl() to complete in target_complete_nacl()
360 	 * for active fabric session transport_deregister_session() callbacks.
361 	 */
362 	wait_for_completion(&acl->acl_free_comp);
363 
364 	core_tpg_wait_for_nacl_pr_ref(acl);
365 	core_free_device_list_for_node(acl, tpg);
366 
367 	pr_debug("%s_TPG[%hu] - Deleted ACL with TCQ Depth: %d for %s"
368 		" Initiator Node: %s\n", tpg->se_tpg_tfo->fabric_name,
369 		tpg->se_tpg_tfo->tpg_get_tag(tpg), acl->queue_depth,
370 		tpg->se_tpg_tfo->fabric_name, acl->initiatorname);
371 
372 	kfree(acl);
373 }
374 
375 /*	core_tpg_set_initiator_node_queue_depth():
376  *
377  *
378  */
379 int core_tpg_set_initiator_node_queue_depth(
380 	struct se_node_acl *acl,
381 	u32 queue_depth)
382 {
383 	struct se_portal_group *tpg = acl->se_tpg;
384 
385 	/*
386 	 * Allow the setting of se_node_acl queue_depth to be idempotent,
387 	 * and not force a session shutdown event if the value is not
388 	 * changing.
389 	 */
390 	if (acl->queue_depth == queue_depth)
391 		return 0;
392 	/*
393 	 * User has requested to change the queue depth for a Initiator Node.
394 	 * Change the value in the Node's struct se_node_acl, and call
395 	 * target_set_nacl_queue_depth() to set the new queue depth.
396 	 */
397 	target_set_nacl_queue_depth(tpg, acl, queue_depth);
398 
399 	/*
400 	 * Shutdown all pending sessions to force session reinstatement.
401 	 */
402 	target_shutdown_sessions(acl);
403 
404 	pr_debug("Successfully changed queue depth to: %d for Initiator"
405 		" Node: %s on %s Target Portal Group: %u\n", acl->queue_depth,
406 		acl->initiatorname, tpg->se_tpg_tfo->fabric_name,
407 		tpg->se_tpg_tfo->tpg_get_tag(tpg));
408 
409 	return 0;
410 }
411 EXPORT_SYMBOL(core_tpg_set_initiator_node_queue_depth);
412 
413 /*	core_tpg_set_initiator_node_tag():
414  *
415  *	Initiator nodeacl tags are not used internally, but may be used by
416  *	userspace to emulate aliases or groups.
417  *	Returns length of newly-set tag or -EINVAL.
418  */
419 int core_tpg_set_initiator_node_tag(
420 	struct se_portal_group *tpg,
421 	struct se_node_acl *acl,
422 	const char *new_tag)
423 {
424 	if (strlen(new_tag) >= MAX_ACL_TAG_SIZE)
425 		return -EINVAL;
426 
427 	if (!strncmp("NULL", new_tag, 4)) {
428 		acl->acl_tag[0] = '\0';
429 		return 0;
430 	}
431 
432 	return snprintf(acl->acl_tag, MAX_ACL_TAG_SIZE, "%s", new_tag);
433 }
434 EXPORT_SYMBOL(core_tpg_set_initiator_node_tag);
435 
436 static void core_tpg_lun_ref_release(struct percpu_ref *ref)
437 {
438 	struct se_lun *lun = container_of(ref, struct se_lun, lun_ref);
439 
440 	complete(&lun->lun_shutdown_comp);
441 }
442 
443 static int target_tpg_register_rtpi(struct se_portal_group *se_tpg)
444 {
445 	u32 val;
446 	int ret;
447 
448 	if (se_tpg->rtpi_manual) {
449 		ret = xa_insert(&tpg_xa, se_tpg->tpg_rtpi, se_tpg, GFP_KERNEL);
450 		if (ret) {
451 			pr_info("%s_TPG[%hu] - Can not set RTPI %#x, it is already busy",
452 				se_tpg->se_tpg_tfo->fabric_name,
453 				se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg),
454 				se_tpg->tpg_rtpi);
455 			return -EINVAL;
456 		}
457 	} else {
458 		ret = xa_alloc(&tpg_xa, &val, se_tpg,
459 			       XA_LIMIT(1, USHRT_MAX), GFP_KERNEL);
460 		if (!ret)
461 			se_tpg->tpg_rtpi = val;
462 	}
463 
464 	return ret;
465 }
466 
467 static void target_tpg_deregister_rtpi(struct se_portal_group *se_tpg)
468 {
469 	if (se_tpg->tpg_rtpi && se_tpg->enabled)
470 		xa_erase(&tpg_xa, se_tpg->tpg_rtpi);
471 }
472 
473 int target_tpg_enable(struct se_portal_group *se_tpg)
474 {
475 	int ret;
476 
477 	ret = target_tpg_register_rtpi(se_tpg);
478 	if (ret)
479 		return ret;
480 
481 	ret = se_tpg->se_tpg_tfo->fabric_enable_tpg(se_tpg, true);
482 	if (ret) {
483 		target_tpg_deregister_rtpi(se_tpg);
484 		return ret;
485 	}
486 
487 	se_tpg->enabled = true;
488 
489 	return 0;
490 }
491 
492 int target_tpg_disable(struct se_portal_group *se_tpg)
493 {
494 	int ret;
495 
496 	target_tpg_deregister_rtpi(se_tpg);
497 
498 	ret = se_tpg->se_tpg_tfo->fabric_enable_tpg(se_tpg, false);
499 	if (!ret)
500 		se_tpg->enabled = false;
501 
502 	return ret;
503 }
504 
505 /* Does not change se_wwn->priv. */
506 int core_tpg_register(
507 	struct se_wwn *se_wwn,
508 	struct se_portal_group *se_tpg,
509 	int proto_id)
510 {
511 	int ret;
512 
513 	if (!se_tpg)
514 		return -EINVAL;
515 	/*
516 	 * For the typical case where core_tpg_register() is called by a
517 	 * fabric driver from target_core_fabric_ops->fabric_make_tpg()
518 	 * configfs context, use the original tf_ops pointer already saved
519 	 * by target-core in target_fabric_make_wwn().
520 	 *
521 	 * Otherwise, for special cases like iscsi-target discovery TPGs
522 	 * the caller is responsible for setting ->se_tpg_tfo ahead of
523 	 * calling core_tpg_register().
524 	 */
525 	if (se_wwn)
526 		se_tpg->se_tpg_tfo = se_wwn->wwn_tf->tf_ops;
527 
528 	if (!se_tpg->se_tpg_tfo) {
529 		pr_err("Unable to locate se_tpg->se_tpg_tfo pointer\n");
530 		return -EINVAL;
531 	}
532 
533 	INIT_HLIST_HEAD(&se_tpg->tpg_lun_hlist);
534 	se_tpg->proto_id = proto_id;
535 	se_tpg->se_tpg_wwn = se_wwn;
536 	atomic_set(&se_tpg->tpg_pr_ref_count, 0);
537 	INIT_LIST_HEAD(&se_tpg->acl_node_list);
538 	INIT_LIST_HEAD(&se_tpg->tpg_sess_list);
539 	spin_lock_init(&se_tpg->session_lock);
540 	mutex_init(&se_tpg->tpg_lun_mutex);
541 	mutex_init(&se_tpg->acl_node_mutex);
542 
543 	if (se_tpg->proto_id >= 0) {
544 		se_tpg->tpg_virt_lun0 = core_tpg_alloc_lun(se_tpg, 0);
545 		if (IS_ERR(se_tpg->tpg_virt_lun0))
546 			return PTR_ERR(se_tpg->tpg_virt_lun0);
547 
548 		ret = core_tpg_add_lun(se_tpg, se_tpg->tpg_virt_lun0,
549 				true, g_lun0_dev);
550 		if (ret < 0) {
551 			kfree(se_tpg->tpg_virt_lun0);
552 			return ret;
553 		}
554 	}
555 
556 	pr_debug("TARGET_CORE[%s]: Allocated portal_group for endpoint: %s, "
557 		 "Proto: %d, Portal Tag: %u\n", se_tpg->se_tpg_tfo->fabric_name,
558 		se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg) ?
559 		se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg) : NULL,
560 		se_tpg->proto_id, se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg));
561 
562 	return 0;
563 }
564 EXPORT_SYMBOL(core_tpg_register);
565 
566 int core_tpg_deregister(struct se_portal_group *se_tpg)
567 {
568 	const struct target_core_fabric_ops *tfo = se_tpg->se_tpg_tfo;
569 	struct se_node_acl *nacl, *nacl_tmp;
570 	LIST_HEAD(node_list);
571 
572 	pr_debug("TARGET_CORE[%s]: Deallocating portal_group for endpoint: %s, "
573 		 "Proto: %d, Portal Tag: %u\n", tfo->fabric_name,
574 		tfo->tpg_get_wwn(se_tpg) ? tfo->tpg_get_wwn(se_tpg) : NULL,
575 		se_tpg->proto_id, tfo->tpg_get_tag(se_tpg));
576 
577 	while (atomic_read(&se_tpg->tpg_pr_ref_count) != 0)
578 		cpu_relax();
579 
580 	mutex_lock(&se_tpg->acl_node_mutex);
581 	list_splice_init(&se_tpg->acl_node_list, &node_list);
582 	mutex_unlock(&se_tpg->acl_node_mutex);
583 	/*
584 	 * Release any remaining demo-mode generated se_node_acl that have
585 	 * not been released because of TFO->tpg_check_demo_mode_cache() == 1
586 	 * in transport_deregister_session().
587 	 */
588 	list_for_each_entry_safe(nacl, nacl_tmp, &node_list, acl_list) {
589 		list_del_init(&nacl->acl_list);
590 
591 		core_tpg_wait_for_nacl_pr_ref(nacl);
592 		core_free_device_list_for_node(nacl, se_tpg);
593 		kfree(nacl);
594 	}
595 
596 	if (se_tpg->proto_id >= 0) {
597 		core_tpg_remove_lun(se_tpg, se_tpg->tpg_virt_lun0);
598 		kfree_rcu(se_tpg->tpg_virt_lun0, rcu_head);
599 	}
600 
601 	target_tpg_deregister_rtpi(se_tpg);
602 
603 	return 0;
604 }
605 EXPORT_SYMBOL(core_tpg_deregister);
606 
607 struct se_lun *core_tpg_alloc_lun(
608 	struct se_portal_group *tpg,
609 	u64 unpacked_lun)
610 {
611 	struct se_lun *lun;
612 
613 	lun = kzalloc(sizeof(*lun), GFP_KERNEL);
614 	if (!lun) {
615 		pr_err("Unable to allocate se_lun memory\n");
616 		return ERR_PTR(-ENOMEM);
617 	}
618 	lun->unpacked_lun = unpacked_lun;
619 	atomic_set(&lun->lun_acl_count, 0);
620 	init_completion(&lun->lun_shutdown_comp);
621 	INIT_LIST_HEAD(&lun->lun_deve_list);
622 	INIT_LIST_HEAD(&lun->lun_dev_link);
623 	atomic_set(&lun->lun_tg_pt_secondary_offline, 0);
624 	spin_lock_init(&lun->lun_deve_lock);
625 	mutex_init(&lun->lun_tg_pt_md_mutex);
626 	INIT_LIST_HEAD(&lun->lun_tg_pt_gp_link);
627 	spin_lock_init(&lun->lun_tg_pt_gp_lock);
628 	lun->lun_tpg = tpg;
629 
630 	return lun;
631 }
632 
633 int core_tpg_add_lun(
634 	struct se_portal_group *tpg,
635 	struct se_lun *lun,
636 	bool lun_access_ro,
637 	struct se_device *dev)
638 {
639 	int ret;
640 
641 	ret = percpu_ref_init(&lun->lun_ref, core_tpg_lun_ref_release, 0,
642 			      GFP_KERNEL);
643 	if (ret < 0)
644 		goto out;
645 
646 	if (!(dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_ALUA) &&
647 	    !(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE))
648 		target_attach_tg_pt_gp(lun, dev->t10_alua.default_tg_pt_gp);
649 
650 	mutex_lock(&tpg->tpg_lun_mutex);
651 
652 	spin_lock(&dev->se_port_lock);
653 	lun->lun_index = dev->dev_index;
654 	rcu_assign_pointer(lun->lun_se_dev, dev);
655 	dev->export_count++;
656 	list_add_tail(&lun->lun_dev_link, &dev->dev_sep_list);
657 	spin_unlock(&dev->se_port_lock);
658 
659 	if (dev->dev_flags & DF_READ_ONLY)
660 		lun->lun_access_ro = true;
661 	else
662 		lun->lun_access_ro = lun_access_ro;
663 	if (!(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE))
664 		hlist_add_head_rcu(&lun->link, &tpg->tpg_lun_hlist);
665 	mutex_unlock(&tpg->tpg_lun_mutex);
666 
667 	return 0;
668 
669 out:
670 	return ret;
671 }
672 
673 void core_tpg_remove_lun(
674 	struct se_portal_group *tpg,
675 	struct se_lun *lun)
676 {
677 	/*
678 	 * rcu_dereference_raw protected by se_lun->lun_group symlink
679 	 * reference to se_device->dev_group.
680 	 */
681 	struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev);
682 
683 	lun->lun_shutdown = true;
684 
685 	core_clear_lun_from_tpg(lun, tpg);
686 	/*
687 	 * Wait for any active I/O references to percpu se_lun->lun_ref to
688 	 * be released.  Also, se_lun->lun_ref is now used by PR and ALUA
689 	 * logic when referencing a remote target port during ALL_TGT_PT=1
690 	 * and generating UNIT_ATTENTIONs for ALUA access state transition.
691 	 */
692 	transport_clear_lun_ref(lun);
693 
694 	mutex_lock(&tpg->tpg_lun_mutex);
695 	if (lun->lun_se_dev) {
696 		target_detach_tg_pt_gp(lun);
697 
698 		spin_lock(&dev->se_port_lock);
699 		list_del(&lun->lun_dev_link);
700 		dev->export_count--;
701 		rcu_assign_pointer(lun->lun_se_dev, NULL);
702 		spin_unlock(&dev->se_port_lock);
703 	}
704 	if (!(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE))
705 		hlist_del_rcu(&lun->link);
706 
707 	lun->lun_shutdown = false;
708 	mutex_unlock(&tpg->tpg_lun_mutex);
709 
710 	percpu_ref_exit(&lun->lun_ref);
711 }
712