1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*******************************************************************************
3  * Filename:  target_core_device.c (based on iscsi_target_device.c)
4  *
5  * This file contains the TCM Virtual Device and Disk Transport
6  * agnostic related functions.
7  *
8  * (c) Copyright 2003-2013 Datera, Inc.
9  *
10  * Nicholas A. Bellinger <nab@kernel.org>
11  *
12  ******************************************************************************/
13 
14 #include <linux/net.h>
15 #include <linux/string.h>
16 #include <linux/delay.h>
17 #include <linux/timer.h>
18 #include <linux/slab.h>
19 #include <linux/spinlock.h>
20 #include <linux/kthread.h>
21 #include <linux/in.h>
22 #include <linux/export.h>
23 #include <linux/t10-pi.h>
24 #include <asm/unaligned.h>
25 #include <net/sock.h>
26 #include <net/tcp.h>
27 #include <scsi/scsi_common.h>
28 #include <scsi/scsi_proto.h>
29 
30 #include <target/target_core_base.h>
31 #include <target/target_core_backend.h>
32 #include <target/target_core_fabric.h>
33 
34 #include "target_core_internal.h"
35 #include "target_core_alua.h"
36 #include "target_core_pr.h"
37 #include "target_core_ua.h"
38 
39 static DEFINE_MUTEX(device_mutex);
40 static LIST_HEAD(device_list);
41 static DEFINE_IDR(devices_idr);
42 
43 static struct se_hba *lun0_hba;
44 /* not static, needed by tpg.c */
45 struct se_device *g_lun0_dev;
46 
47 sense_reason_t
48 transport_lookup_cmd_lun(struct se_cmd *se_cmd, u64 unpacked_lun)
49 {
50 	struct se_lun *se_lun = NULL;
51 	struct se_session *se_sess = se_cmd->se_sess;
52 	struct se_node_acl *nacl = se_sess->se_node_acl;
53 	struct se_dev_entry *deve;
54 	sense_reason_t ret = TCM_NO_SENSE;
55 
56 	rcu_read_lock();
57 	deve = target_nacl_find_deve(nacl, unpacked_lun);
58 	if (deve) {
59 		atomic_long_inc(&deve->total_cmds);
60 
61 		if (se_cmd->data_direction == DMA_TO_DEVICE)
62 			atomic_long_add(se_cmd->data_length,
63 					&deve->write_bytes);
64 		else if (se_cmd->data_direction == DMA_FROM_DEVICE)
65 			atomic_long_add(se_cmd->data_length,
66 					&deve->read_bytes);
67 
68 		se_lun = rcu_dereference(deve->se_lun);
69 
70 		if (!percpu_ref_tryget_live(&se_lun->lun_ref)) {
71 			se_lun = NULL;
72 			goto out_unlock;
73 		}
74 
75 		se_cmd->se_lun = se_lun;
76 		se_cmd->pr_res_key = deve->pr_res_key;
77 		se_cmd->orig_fe_lun = unpacked_lun;
78 		se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
79 		se_cmd->lun_ref_active = true;
80 
81 		if ((se_cmd->data_direction == DMA_TO_DEVICE) &&
82 		    deve->lun_access_ro) {
83 			pr_err("TARGET_CORE[%s]: Detected WRITE_PROTECTED LUN"
84 				" Access for 0x%08llx\n",
85 				se_cmd->se_tfo->fabric_name,
86 				unpacked_lun);
87 			rcu_read_unlock();
88 			ret = TCM_WRITE_PROTECTED;
89 			goto ref_dev;
90 		}
91 	}
92 out_unlock:
93 	rcu_read_unlock();
94 
95 	if (!se_lun) {
96 		/*
97 		 * Use the se_portal_group->tpg_virt_lun0 to allow for
98 		 * REPORT_LUNS, et al to be returned when no active
99 		 * MappedLUN=0 exists for this Initiator Port.
100 		 */
101 		if (unpacked_lun != 0) {
102 			pr_err("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
103 				" Access for 0x%08llx\n",
104 				se_cmd->se_tfo->fabric_name,
105 				unpacked_lun);
106 			return TCM_NON_EXISTENT_LUN;
107 		}
108 
109 		se_lun = se_sess->se_tpg->tpg_virt_lun0;
110 		se_cmd->se_lun = se_sess->se_tpg->tpg_virt_lun0;
111 		se_cmd->orig_fe_lun = 0;
112 		se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
113 
114 		percpu_ref_get(&se_lun->lun_ref);
115 		se_cmd->lun_ref_active = true;
116 
117 		/*
118 		 * Force WRITE PROTECT for virtual LUN 0
119 		 */
120 		if ((se_cmd->data_direction != DMA_FROM_DEVICE) &&
121 		    (se_cmd->data_direction != DMA_NONE)) {
122 			ret = TCM_WRITE_PROTECTED;
123 			goto ref_dev;
124 		}
125 	}
126 	/*
127 	 * RCU reference protected by percpu se_lun->lun_ref taken above that
128 	 * must drop to zero (including initial reference) before this se_lun
129 	 * pointer can be kfree_rcu() by the final se_lun->lun_group put via
130 	 * target_core_fabric_configfs.c:target_fabric_port_release
131 	 */
132 ref_dev:
133 	se_cmd->se_dev = rcu_dereference_raw(se_lun->lun_se_dev);
134 	atomic_long_inc(&se_cmd->se_dev->num_cmds);
135 
136 	if (se_cmd->data_direction == DMA_TO_DEVICE)
137 		atomic_long_add(se_cmd->data_length,
138 				&se_cmd->se_dev->write_bytes);
139 	else if (se_cmd->data_direction == DMA_FROM_DEVICE)
140 		atomic_long_add(se_cmd->data_length,
141 				&se_cmd->se_dev->read_bytes);
142 
143 	return ret;
144 }
145 EXPORT_SYMBOL(transport_lookup_cmd_lun);
146 
147 int transport_lookup_tmr_lun(struct se_cmd *se_cmd, u64 unpacked_lun)
148 {
149 	struct se_dev_entry *deve;
150 	struct se_lun *se_lun = NULL;
151 	struct se_session *se_sess = se_cmd->se_sess;
152 	struct se_node_acl *nacl = se_sess->se_node_acl;
153 	struct se_tmr_req *se_tmr = se_cmd->se_tmr_req;
154 	unsigned long flags;
155 
156 	rcu_read_lock();
157 	deve = target_nacl_find_deve(nacl, unpacked_lun);
158 	if (deve) {
159 		se_lun = rcu_dereference(deve->se_lun);
160 
161 		if (!percpu_ref_tryget_live(&se_lun->lun_ref)) {
162 			se_lun = NULL;
163 			goto out_unlock;
164 		}
165 
166 		se_cmd->se_lun = se_lun;
167 		se_cmd->pr_res_key = deve->pr_res_key;
168 		se_cmd->orig_fe_lun = unpacked_lun;
169 		se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
170 		se_cmd->lun_ref_active = true;
171 	}
172 out_unlock:
173 	rcu_read_unlock();
174 
175 	if (!se_lun) {
176 		pr_debug("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
177 			" Access for 0x%08llx\n",
178 			se_cmd->se_tfo->fabric_name,
179 			unpacked_lun);
180 		return -ENODEV;
181 	}
182 	se_cmd->se_dev = rcu_dereference_raw(se_lun->lun_se_dev);
183 	se_tmr->tmr_dev = rcu_dereference_raw(se_lun->lun_se_dev);
184 
185 	spin_lock_irqsave(&se_tmr->tmr_dev->se_tmr_lock, flags);
186 	list_add_tail(&se_tmr->tmr_list, &se_tmr->tmr_dev->dev_tmr_list);
187 	spin_unlock_irqrestore(&se_tmr->tmr_dev->se_tmr_lock, flags);
188 
189 	return 0;
190 }
191 EXPORT_SYMBOL(transport_lookup_tmr_lun);
192 
193 bool target_lun_is_rdonly(struct se_cmd *cmd)
194 {
195 	struct se_session *se_sess = cmd->se_sess;
196 	struct se_dev_entry *deve;
197 	bool ret;
198 
199 	rcu_read_lock();
200 	deve = target_nacl_find_deve(se_sess->se_node_acl, cmd->orig_fe_lun);
201 	ret = deve && deve->lun_access_ro;
202 	rcu_read_unlock();
203 
204 	return ret;
205 }
206 EXPORT_SYMBOL(target_lun_is_rdonly);
207 
208 /*
209  * This function is called from core_scsi3_emulate_pro_register_and_move()
210  * and core_scsi3_decode_spec_i_port(), and will increment &deve->pr_kref
211  * when a matching rtpi is found.
212  */
213 struct se_dev_entry *core_get_se_deve_from_rtpi(
214 	struct se_node_acl *nacl,
215 	u16 rtpi)
216 {
217 	struct se_dev_entry *deve;
218 	struct se_lun *lun;
219 	struct se_portal_group *tpg = nacl->se_tpg;
220 
221 	rcu_read_lock();
222 	hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) {
223 		lun = rcu_dereference(deve->se_lun);
224 		if (!lun) {
225 			pr_err("%s device entries device pointer is"
226 				" NULL, but Initiator has access.\n",
227 				tpg->se_tpg_tfo->fabric_name);
228 			continue;
229 		}
230 		if (lun->lun_rtpi != rtpi)
231 			continue;
232 
233 		kref_get(&deve->pr_kref);
234 		rcu_read_unlock();
235 
236 		return deve;
237 	}
238 	rcu_read_unlock();
239 
240 	return NULL;
241 }
242 
243 void core_free_device_list_for_node(
244 	struct se_node_acl *nacl,
245 	struct se_portal_group *tpg)
246 {
247 	struct se_dev_entry *deve;
248 
249 	mutex_lock(&nacl->lun_entry_mutex);
250 	hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) {
251 		struct se_lun *lun = rcu_dereference_check(deve->se_lun,
252 					lockdep_is_held(&nacl->lun_entry_mutex));
253 		core_disable_device_list_for_node(lun, deve, nacl, tpg);
254 	}
255 	mutex_unlock(&nacl->lun_entry_mutex);
256 }
257 
258 void core_update_device_list_access(
259 	u64 mapped_lun,
260 	bool lun_access_ro,
261 	struct se_node_acl *nacl)
262 {
263 	struct se_dev_entry *deve;
264 
265 	mutex_lock(&nacl->lun_entry_mutex);
266 	deve = target_nacl_find_deve(nacl, mapped_lun);
267 	if (deve)
268 		deve->lun_access_ro = lun_access_ro;
269 	mutex_unlock(&nacl->lun_entry_mutex);
270 }
271 
272 /*
273  * Called with rcu_read_lock or nacl->device_list_lock held.
274  */
275 struct se_dev_entry *target_nacl_find_deve(struct se_node_acl *nacl, u64 mapped_lun)
276 {
277 	struct se_dev_entry *deve;
278 
279 	hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link)
280 		if (deve->mapped_lun == mapped_lun)
281 			return deve;
282 
283 	return NULL;
284 }
285 EXPORT_SYMBOL(target_nacl_find_deve);
286 
287 void target_pr_kref_release(struct kref *kref)
288 {
289 	struct se_dev_entry *deve = container_of(kref, struct se_dev_entry,
290 						 pr_kref);
291 	complete(&deve->pr_comp);
292 }
293 
294 static void
295 target_luns_data_has_changed(struct se_node_acl *nacl, struct se_dev_entry *new,
296 			     bool skip_new)
297 {
298 	struct se_dev_entry *tmp;
299 
300 	rcu_read_lock();
301 	hlist_for_each_entry_rcu(tmp, &nacl->lun_entry_hlist, link) {
302 		if (skip_new && tmp == new)
303 			continue;
304 		core_scsi3_ua_allocate(tmp, 0x3F,
305 				       ASCQ_3FH_REPORTED_LUNS_DATA_HAS_CHANGED);
306 	}
307 	rcu_read_unlock();
308 }
309 
310 int core_enable_device_list_for_node(
311 	struct se_lun *lun,
312 	struct se_lun_acl *lun_acl,
313 	u64 mapped_lun,
314 	bool lun_access_ro,
315 	struct se_node_acl *nacl,
316 	struct se_portal_group *tpg)
317 {
318 	struct se_dev_entry *orig, *new;
319 
320 	new = kzalloc(sizeof(*new), GFP_KERNEL);
321 	if (!new) {
322 		pr_err("Unable to allocate se_dev_entry memory\n");
323 		return -ENOMEM;
324 	}
325 
326 	spin_lock_init(&new->ua_lock);
327 	INIT_LIST_HEAD(&new->ua_list);
328 	INIT_LIST_HEAD(&new->lun_link);
329 
330 	new->mapped_lun = mapped_lun;
331 	kref_init(&new->pr_kref);
332 	init_completion(&new->pr_comp);
333 
334 	new->lun_access_ro = lun_access_ro;
335 	new->creation_time = get_jiffies_64();
336 	new->attach_count++;
337 
338 	mutex_lock(&nacl->lun_entry_mutex);
339 	orig = target_nacl_find_deve(nacl, mapped_lun);
340 	if (orig && orig->se_lun) {
341 		struct se_lun *orig_lun = rcu_dereference_check(orig->se_lun,
342 					lockdep_is_held(&nacl->lun_entry_mutex));
343 
344 		if (orig_lun != lun) {
345 			pr_err("Existing orig->se_lun doesn't match new lun"
346 			       " for dynamic -> explicit NodeACL conversion:"
347 				" %s\n", nacl->initiatorname);
348 			mutex_unlock(&nacl->lun_entry_mutex);
349 			kfree(new);
350 			return -EINVAL;
351 		}
352 		if (orig->se_lun_acl != NULL) {
353 			pr_warn_ratelimited("Detected existing explicit"
354 				" se_lun_acl->se_lun_group reference for %s"
355 				" mapped_lun: %llu, failing\n",
356 				 nacl->initiatorname, mapped_lun);
357 			mutex_unlock(&nacl->lun_entry_mutex);
358 			kfree(new);
359 			return -EINVAL;
360 		}
361 
362 		rcu_assign_pointer(new->se_lun, lun);
363 		rcu_assign_pointer(new->se_lun_acl, lun_acl);
364 		hlist_del_rcu(&orig->link);
365 		hlist_add_head_rcu(&new->link, &nacl->lun_entry_hlist);
366 		mutex_unlock(&nacl->lun_entry_mutex);
367 
368 		spin_lock(&lun->lun_deve_lock);
369 		list_del(&orig->lun_link);
370 		list_add_tail(&new->lun_link, &lun->lun_deve_list);
371 		spin_unlock(&lun->lun_deve_lock);
372 
373 		kref_put(&orig->pr_kref, target_pr_kref_release);
374 		wait_for_completion(&orig->pr_comp);
375 
376 		target_luns_data_has_changed(nacl, new, true);
377 		kfree_rcu(orig, rcu_head);
378 		return 0;
379 	}
380 
381 	rcu_assign_pointer(new->se_lun, lun);
382 	rcu_assign_pointer(new->se_lun_acl, lun_acl);
383 	hlist_add_head_rcu(&new->link, &nacl->lun_entry_hlist);
384 	mutex_unlock(&nacl->lun_entry_mutex);
385 
386 	spin_lock(&lun->lun_deve_lock);
387 	list_add_tail(&new->lun_link, &lun->lun_deve_list);
388 	spin_unlock(&lun->lun_deve_lock);
389 
390 	target_luns_data_has_changed(nacl, new, true);
391 	return 0;
392 }
393 
394 void core_disable_device_list_for_node(
395 	struct se_lun *lun,
396 	struct se_dev_entry *orig,
397 	struct se_node_acl *nacl,
398 	struct se_portal_group *tpg)
399 {
400 	/*
401 	 * rcu_dereference_raw protected by se_lun->lun_group symlink
402 	 * reference to se_device->dev_group.
403 	 */
404 	struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev);
405 
406 	lockdep_assert_held(&nacl->lun_entry_mutex);
407 
408 	/*
409 	 * If the MappedLUN entry is being disabled, the entry in
410 	 * lun->lun_deve_list must be removed now before clearing the
411 	 * struct se_dev_entry pointers below as logic in
412 	 * core_alua_do_transition_tg_pt() depends on these being present.
413 	 *
414 	 * deve->se_lun_acl will be NULL for demo-mode created LUNs
415 	 * that have not been explicitly converted to MappedLUNs ->
416 	 * struct se_lun_acl, but we remove deve->lun_link from
417 	 * lun->lun_deve_list. This also means that active UAs and
418 	 * NodeACL context specific PR metadata for demo-mode
419 	 * MappedLUN *deve will be released below..
420 	 */
421 	spin_lock(&lun->lun_deve_lock);
422 	list_del(&orig->lun_link);
423 	spin_unlock(&lun->lun_deve_lock);
424 	/*
425 	 * Disable struct se_dev_entry LUN ACL mapping
426 	 */
427 	core_scsi3_ua_release_all(orig);
428 
429 	hlist_del_rcu(&orig->link);
430 	clear_bit(DEF_PR_REG_ACTIVE, &orig->deve_flags);
431 	orig->lun_access_ro = false;
432 	orig->creation_time = 0;
433 	orig->attach_count--;
434 	/*
435 	 * Before firing off RCU callback, wait for any in process SPEC_I_PT=1
436 	 * or REGISTER_AND_MOVE PR operation to complete.
437 	 */
438 	kref_put(&orig->pr_kref, target_pr_kref_release);
439 	wait_for_completion(&orig->pr_comp);
440 
441 	rcu_assign_pointer(orig->se_lun, NULL);
442 	rcu_assign_pointer(orig->se_lun_acl, NULL);
443 
444 	kfree_rcu(orig, rcu_head);
445 
446 	core_scsi3_free_pr_reg_from_nacl(dev, nacl);
447 	target_luns_data_has_changed(nacl, NULL, false);
448 }
449 
450 /*      core_clear_lun_from_tpg():
451  *
452  *
453  */
454 void core_clear_lun_from_tpg(struct se_lun *lun, struct se_portal_group *tpg)
455 {
456 	struct se_node_acl *nacl;
457 	struct se_dev_entry *deve;
458 
459 	mutex_lock(&tpg->acl_node_mutex);
460 	list_for_each_entry(nacl, &tpg->acl_node_list, acl_list) {
461 
462 		mutex_lock(&nacl->lun_entry_mutex);
463 		hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) {
464 			struct se_lun *tmp_lun = rcu_dereference_check(deve->se_lun,
465 					lockdep_is_held(&nacl->lun_entry_mutex));
466 
467 			if (lun != tmp_lun)
468 				continue;
469 
470 			core_disable_device_list_for_node(lun, deve, nacl, tpg);
471 		}
472 		mutex_unlock(&nacl->lun_entry_mutex);
473 	}
474 	mutex_unlock(&tpg->acl_node_mutex);
475 }
476 
477 int core_alloc_rtpi(struct se_lun *lun, struct se_device *dev)
478 {
479 	struct se_lun *tmp;
480 
481 	spin_lock(&dev->se_port_lock);
482 	if (dev->export_count == 0x0000ffff) {
483 		pr_warn("Reached dev->dev_port_count =="
484 				" 0x0000ffff\n");
485 		spin_unlock(&dev->se_port_lock);
486 		return -ENOSPC;
487 	}
488 again:
489 	/*
490 	 * Allocate the next RELATIVE TARGET PORT IDENTIFIER for this struct se_device
491 	 * Here is the table from spc4r17 section 7.7.3.8.
492 	 *
493 	 *    Table 473 -- RELATIVE TARGET PORT IDENTIFIER field
494 	 *
495 	 * Code      Description
496 	 * 0h        Reserved
497 	 * 1h        Relative port 1, historically known as port A
498 	 * 2h        Relative port 2, historically known as port B
499 	 * 3h to FFFFh    Relative port 3 through 65 535
500 	 */
501 	lun->lun_rtpi = dev->dev_rpti_counter++;
502 	if (!lun->lun_rtpi)
503 		goto again;
504 
505 	list_for_each_entry(tmp, &dev->dev_sep_list, lun_dev_link) {
506 		/*
507 		 * Make sure RELATIVE TARGET PORT IDENTIFIER is unique
508 		 * for 16-bit wrap..
509 		 */
510 		if (lun->lun_rtpi == tmp->lun_rtpi)
511 			goto again;
512 	}
513 	spin_unlock(&dev->se_port_lock);
514 
515 	return 0;
516 }
517 
518 static void se_release_vpd_for_dev(struct se_device *dev)
519 {
520 	struct t10_vpd *vpd, *vpd_tmp;
521 
522 	spin_lock(&dev->t10_wwn.t10_vpd_lock);
523 	list_for_each_entry_safe(vpd, vpd_tmp,
524 			&dev->t10_wwn.t10_vpd_list, vpd_list) {
525 		list_del(&vpd->vpd_list);
526 		kfree(vpd);
527 	}
528 	spin_unlock(&dev->t10_wwn.t10_vpd_lock);
529 }
530 
531 static u32 se_dev_align_max_sectors(u32 max_sectors, u32 block_size)
532 {
533 	u32 aligned_max_sectors;
534 	u32 alignment;
535 	/*
536 	 * Limit max_sectors to a PAGE_SIZE aligned value for modern
537 	 * transport_allocate_data_tasks() operation.
538 	 */
539 	alignment = max(1ul, PAGE_SIZE / block_size);
540 	aligned_max_sectors = rounddown(max_sectors, alignment);
541 
542 	if (max_sectors != aligned_max_sectors)
543 		pr_info("Rounding down aligned max_sectors from %u to %u\n",
544 			max_sectors, aligned_max_sectors);
545 
546 	return aligned_max_sectors;
547 }
548 
549 int core_dev_add_lun(
550 	struct se_portal_group *tpg,
551 	struct se_device *dev,
552 	struct se_lun *lun)
553 {
554 	int rc;
555 
556 	rc = core_tpg_add_lun(tpg, lun, false, dev);
557 	if (rc < 0)
558 		return rc;
559 
560 	pr_debug("%s_TPG[%u]_LUN[%llu] - Activated %s Logical Unit from"
561 		" CORE HBA: %u\n", tpg->se_tpg_tfo->fabric_name,
562 		tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
563 		tpg->se_tpg_tfo->fabric_name, dev->se_hba->hba_id);
564 	/*
565 	 * Update LUN maps for dynamically added initiators when
566 	 * generate_node_acl is enabled.
567 	 */
568 	if (tpg->se_tpg_tfo->tpg_check_demo_mode(tpg)) {
569 		struct se_node_acl *acl;
570 
571 		mutex_lock(&tpg->acl_node_mutex);
572 		list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
573 			if (acl->dynamic_node_acl &&
574 			    (!tpg->se_tpg_tfo->tpg_check_demo_mode_login_only ||
575 			     !tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg))) {
576 				core_tpg_add_node_to_devs(acl, tpg, lun);
577 			}
578 		}
579 		mutex_unlock(&tpg->acl_node_mutex);
580 	}
581 
582 	return 0;
583 }
584 
585 /*      core_dev_del_lun():
586  *
587  *
588  */
589 void core_dev_del_lun(
590 	struct se_portal_group *tpg,
591 	struct se_lun *lun)
592 {
593 	pr_debug("%s_TPG[%u]_LUN[%llu] - Deactivating %s Logical Unit from"
594 		" device object\n", tpg->se_tpg_tfo->fabric_name,
595 		tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
596 		tpg->se_tpg_tfo->fabric_name);
597 
598 	core_tpg_remove_lun(tpg, lun);
599 }
600 
601 struct se_lun_acl *core_dev_init_initiator_node_lun_acl(
602 	struct se_portal_group *tpg,
603 	struct se_node_acl *nacl,
604 	u64 mapped_lun,
605 	int *ret)
606 {
607 	struct se_lun_acl *lacl;
608 
609 	if (strlen(nacl->initiatorname) >= TRANSPORT_IQN_LEN) {
610 		pr_err("%s InitiatorName exceeds maximum size.\n",
611 			tpg->se_tpg_tfo->fabric_name);
612 		*ret = -EOVERFLOW;
613 		return NULL;
614 	}
615 	lacl = kzalloc(sizeof(struct se_lun_acl), GFP_KERNEL);
616 	if (!lacl) {
617 		pr_err("Unable to allocate memory for struct se_lun_acl.\n");
618 		*ret = -ENOMEM;
619 		return NULL;
620 	}
621 
622 	lacl->mapped_lun = mapped_lun;
623 	lacl->se_lun_nacl = nacl;
624 
625 	return lacl;
626 }
627 
628 int core_dev_add_initiator_node_lun_acl(
629 	struct se_portal_group *tpg,
630 	struct se_lun_acl *lacl,
631 	struct se_lun *lun,
632 	bool lun_access_ro)
633 {
634 	struct se_node_acl *nacl = lacl->se_lun_nacl;
635 	/*
636 	 * rcu_dereference_raw protected by se_lun->lun_group symlink
637 	 * reference to se_device->dev_group.
638 	 */
639 	struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev);
640 
641 	if (!nacl)
642 		return -EINVAL;
643 
644 	if (lun->lun_access_ro)
645 		lun_access_ro = true;
646 
647 	lacl->se_lun = lun;
648 
649 	if (core_enable_device_list_for_node(lun, lacl, lacl->mapped_lun,
650 			lun_access_ro, nacl, tpg) < 0)
651 		return -EINVAL;
652 
653 	pr_debug("%s_TPG[%hu]_LUN[%llu->%llu] - Added %s ACL for "
654 		" InitiatorNode: %s\n", tpg->se_tpg_tfo->fabric_name,
655 		tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun, lacl->mapped_lun,
656 		lun_access_ro ? "RO" : "RW",
657 		nacl->initiatorname);
658 	/*
659 	 * Check to see if there are any existing persistent reservation APTPL
660 	 * pre-registrations that need to be enabled for this LUN ACL..
661 	 */
662 	core_scsi3_check_aptpl_registration(dev, tpg, lun, nacl,
663 					    lacl->mapped_lun);
664 	return 0;
665 }
666 
667 int core_dev_del_initiator_node_lun_acl(
668 	struct se_lun *lun,
669 	struct se_lun_acl *lacl)
670 {
671 	struct se_portal_group *tpg = lun->lun_tpg;
672 	struct se_node_acl *nacl;
673 	struct se_dev_entry *deve;
674 
675 	nacl = lacl->se_lun_nacl;
676 	if (!nacl)
677 		return -EINVAL;
678 
679 	mutex_lock(&nacl->lun_entry_mutex);
680 	deve = target_nacl_find_deve(nacl, lacl->mapped_lun);
681 	if (deve)
682 		core_disable_device_list_for_node(lun, deve, nacl, tpg);
683 	mutex_unlock(&nacl->lun_entry_mutex);
684 
685 	pr_debug("%s_TPG[%hu]_LUN[%llu] - Removed ACL for"
686 		" InitiatorNode: %s Mapped LUN: %llu\n",
687 		tpg->se_tpg_tfo->fabric_name,
688 		tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
689 		nacl->initiatorname, lacl->mapped_lun);
690 
691 	return 0;
692 }
693 
694 void core_dev_free_initiator_node_lun_acl(
695 	struct se_portal_group *tpg,
696 	struct se_lun_acl *lacl)
697 {
698 	pr_debug("%s_TPG[%hu] - Freeing ACL for %s InitiatorNode: %s"
699 		" Mapped LUN: %llu\n", tpg->se_tpg_tfo->fabric_name,
700 		tpg->se_tpg_tfo->tpg_get_tag(tpg),
701 		tpg->se_tpg_tfo->fabric_name,
702 		lacl->se_lun_nacl->initiatorname, lacl->mapped_lun);
703 
704 	kfree(lacl);
705 }
706 
707 static void scsi_dump_inquiry(struct se_device *dev)
708 {
709 	struct t10_wwn *wwn = &dev->t10_wwn;
710 	int device_type = dev->transport->get_device_type(dev);
711 
712 	/*
713 	 * Print Linux/SCSI style INQUIRY formatting to the kernel ring buffer
714 	 */
715 	pr_debug("  Vendor: %-" __stringify(INQUIRY_VENDOR_LEN) "s\n",
716 		wwn->vendor);
717 	pr_debug("  Model: %-" __stringify(INQUIRY_MODEL_LEN) "s\n",
718 		wwn->model);
719 	pr_debug("  Revision: %-" __stringify(INQUIRY_REVISION_LEN) "s\n",
720 		wwn->revision);
721 	pr_debug("  Type:   %s ", scsi_device_type(device_type));
722 }
723 
724 struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
725 {
726 	struct se_device *dev;
727 	struct se_lun *xcopy_lun;
728 
729 	dev = hba->backend->ops->alloc_device(hba, name);
730 	if (!dev)
731 		return NULL;
732 
733 	dev->se_hba = hba;
734 	dev->transport = hba->backend->ops;
735 	dev->prot_length = sizeof(struct t10_pi_tuple);
736 	dev->hba_index = hba->hba_index;
737 
738 	INIT_LIST_HEAD(&dev->dev_sep_list);
739 	INIT_LIST_HEAD(&dev->dev_tmr_list);
740 	INIT_LIST_HEAD(&dev->delayed_cmd_list);
741 	INIT_LIST_HEAD(&dev->state_list);
742 	INIT_LIST_HEAD(&dev->qf_cmd_list);
743 	spin_lock_init(&dev->execute_task_lock);
744 	spin_lock_init(&dev->delayed_cmd_lock);
745 	spin_lock_init(&dev->dev_reservation_lock);
746 	spin_lock_init(&dev->se_port_lock);
747 	spin_lock_init(&dev->se_tmr_lock);
748 	spin_lock_init(&dev->qf_cmd_lock);
749 	sema_init(&dev->caw_sem, 1);
750 	INIT_LIST_HEAD(&dev->t10_wwn.t10_vpd_list);
751 	spin_lock_init(&dev->t10_wwn.t10_vpd_lock);
752 	INIT_LIST_HEAD(&dev->t10_pr.registration_list);
753 	INIT_LIST_HEAD(&dev->t10_pr.aptpl_reg_list);
754 	spin_lock_init(&dev->t10_pr.registration_lock);
755 	spin_lock_init(&dev->t10_pr.aptpl_reg_lock);
756 	INIT_LIST_HEAD(&dev->t10_alua.tg_pt_gps_list);
757 	spin_lock_init(&dev->t10_alua.tg_pt_gps_lock);
758 	INIT_LIST_HEAD(&dev->t10_alua.lba_map_list);
759 	spin_lock_init(&dev->t10_alua.lba_map_lock);
760 
761 	dev->t10_wwn.t10_dev = dev;
762 	dev->t10_alua.t10_dev = dev;
763 
764 	dev->dev_attrib.da_dev = dev;
765 	dev->dev_attrib.emulate_model_alias = DA_EMULATE_MODEL_ALIAS;
766 	dev->dev_attrib.emulate_dpo = 1;
767 	dev->dev_attrib.emulate_fua_write = 1;
768 	dev->dev_attrib.emulate_fua_read = 1;
769 	dev->dev_attrib.emulate_write_cache = DA_EMULATE_WRITE_CACHE;
770 	dev->dev_attrib.emulate_ua_intlck_ctrl = DA_EMULATE_UA_INTLLCK_CTRL;
771 	dev->dev_attrib.emulate_tas = DA_EMULATE_TAS;
772 	dev->dev_attrib.emulate_tpu = DA_EMULATE_TPU;
773 	dev->dev_attrib.emulate_tpws = DA_EMULATE_TPWS;
774 	dev->dev_attrib.emulate_caw = DA_EMULATE_CAW;
775 	dev->dev_attrib.emulate_3pc = DA_EMULATE_3PC;
776 	dev->dev_attrib.emulate_pr = DA_EMULATE_PR;
777 	dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE0_PROT;
778 	dev->dev_attrib.enforce_pr_isids = DA_ENFORCE_PR_ISIDS;
779 	dev->dev_attrib.force_pr_aptpl = DA_FORCE_PR_APTPL;
780 	dev->dev_attrib.is_nonrot = DA_IS_NONROT;
781 	dev->dev_attrib.emulate_rest_reord = DA_EMULATE_REST_REORD;
782 	dev->dev_attrib.max_unmap_lba_count = DA_MAX_UNMAP_LBA_COUNT;
783 	dev->dev_attrib.max_unmap_block_desc_count =
784 		DA_MAX_UNMAP_BLOCK_DESC_COUNT;
785 	dev->dev_attrib.unmap_granularity = DA_UNMAP_GRANULARITY_DEFAULT;
786 	dev->dev_attrib.unmap_granularity_alignment =
787 				DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT;
788 	dev->dev_attrib.unmap_zeroes_data =
789 				DA_UNMAP_ZEROES_DATA_DEFAULT;
790 	dev->dev_attrib.max_write_same_len = DA_MAX_WRITE_SAME_LEN;
791 
792 	xcopy_lun = &dev->xcopy_lun;
793 	rcu_assign_pointer(xcopy_lun->lun_se_dev, dev);
794 	init_completion(&xcopy_lun->lun_shutdown_comp);
795 	INIT_LIST_HEAD(&xcopy_lun->lun_deve_list);
796 	INIT_LIST_HEAD(&xcopy_lun->lun_dev_link);
797 	mutex_init(&xcopy_lun->lun_tg_pt_md_mutex);
798 	xcopy_lun->lun_tpg = &xcopy_pt_tpg;
799 
800 	/* Preload the default INQUIRY const values */
801 	strlcpy(dev->t10_wwn.vendor, "LIO-ORG", sizeof(dev->t10_wwn.vendor));
802 	strlcpy(dev->t10_wwn.model, dev->transport->inquiry_prod,
803 		sizeof(dev->t10_wwn.model));
804 	strlcpy(dev->t10_wwn.revision, dev->transport->inquiry_rev,
805 		sizeof(dev->t10_wwn.revision));
806 
807 	return dev;
808 }
809 
810 /*
811  * Check if the underlying struct block_device request_queue supports
812  * the QUEUE_FLAG_DISCARD bit for UNMAP/WRITE_SAME in SCSI + TRIM
813  * in ATA and we need to set TPE=1
814  */
815 bool target_configure_unmap_from_queue(struct se_dev_attrib *attrib,
816 				       struct request_queue *q)
817 {
818 	int block_size = queue_logical_block_size(q);
819 
820 	if (!blk_queue_discard(q))
821 		return false;
822 
823 	attrib->max_unmap_lba_count =
824 		q->limits.max_discard_sectors >> (ilog2(block_size) - 9);
825 	/*
826 	 * Currently hardcoded to 1 in Linux/SCSI code..
827 	 */
828 	attrib->max_unmap_block_desc_count = 1;
829 	attrib->unmap_granularity = q->limits.discard_granularity / block_size;
830 	attrib->unmap_granularity_alignment = q->limits.discard_alignment /
831 								block_size;
832 	attrib->unmap_zeroes_data = (q->limits.max_write_zeroes_sectors);
833 	return true;
834 }
835 EXPORT_SYMBOL(target_configure_unmap_from_queue);
836 
837 /*
838  * Convert from blocksize advertised to the initiator to the 512 byte
839  * units unconditionally used by the Linux block layer.
840  */
841 sector_t target_to_linux_sector(struct se_device *dev, sector_t lb)
842 {
843 	switch (dev->dev_attrib.block_size) {
844 	case 4096:
845 		return lb << 3;
846 	case 2048:
847 		return lb << 2;
848 	case 1024:
849 		return lb << 1;
850 	default:
851 		return lb;
852 	}
853 }
854 EXPORT_SYMBOL(target_to_linux_sector);
855 
856 struct devices_idr_iter {
857 	struct config_item *prev_item;
858 	int (*fn)(struct se_device *dev, void *data);
859 	void *data;
860 };
861 
862 static int target_devices_idr_iter(int id, void *p, void *data)
863 	 __must_hold(&device_mutex)
864 {
865 	struct devices_idr_iter *iter = data;
866 	struct se_device *dev = p;
867 	int ret;
868 
869 	config_item_put(iter->prev_item);
870 	iter->prev_item = NULL;
871 
872 	/*
873 	 * We add the device early to the idr, so it can be used
874 	 * by backend modules during configuration. We do not want
875 	 * to allow other callers to access partially setup devices,
876 	 * so we skip them here.
877 	 */
878 	if (!target_dev_configured(dev))
879 		return 0;
880 
881 	iter->prev_item = config_item_get_unless_zero(&dev->dev_group.cg_item);
882 	if (!iter->prev_item)
883 		return 0;
884 	mutex_unlock(&device_mutex);
885 
886 	ret = iter->fn(dev, iter->data);
887 
888 	mutex_lock(&device_mutex);
889 	return ret;
890 }
891 
892 /**
893  * target_for_each_device - iterate over configured devices
894  * @fn: iterator function
895  * @data: pointer to data that will be passed to fn
896  *
897  * fn must return 0 to continue looping over devices. non-zero will break
898  * from the loop and return that value to the caller.
899  */
900 int target_for_each_device(int (*fn)(struct se_device *dev, void *data),
901 			   void *data)
902 {
903 	struct devices_idr_iter iter = { .fn = fn, .data = data };
904 	int ret;
905 
906 	mutex_lock(&device_mutex);
907 	ret = idr_for_each(&devices_idr, target_devices_idr_iter, &iter);
908 	mutex_unlock(&device_mutex);
909 	config_item_put(iter.prev_item);
910 	return ret;
911 }
912 
913 int target_configure_device(struct se_device *dev)
914 {
915 	struct se_hba *hba = dev->se_hba;
916 	int ret, id;
917 
918 	if (target_dev_configured(dev)) {
919 		pr_err("se_dev->se_dev_ptr already set for storage"
920 				" object\n");
921 		return -EEXIST;
922 	}
923 
924 	/*
925 	 * Add early so modules like tcmu can use during its
926 	 * configuration.
927 	 */
928 	mutex_lock(&device_mutex);
929 	/*
930 	 * Use cyclic to try and avoid collisions with devices
931 	 * that were recently removed.
932 	 */
933 	id = idr_alloc_cyclic(&devices_idr, dev, 0, INT_MAX, GFP_KERNEL);
934 	mutex_unlock(&device_mutex);
935 	if (id < 0) {
936 		ret = -ENOMEM;
937 		goto out;
938 	}
939 	dev->dev_index = id;
940 
941 	ret = dev->transport->configure_device(dev);
942 	if (ret)
943 		goto out_free_index;
944 	/*
945 	 * XXX: there is not much point to have two different values here..
946 	 */
947 	dev->dev_attrib.block_size = dev->dev_attrib.hw_block_size;
948 	dev->dev_attrib.queue_depth = dev->dev_attrib.hw_queue_depth;
949 
950 	/*
951 	 * Align max_hw_sectors down to PAGE_SIZE I/O transfers
952 	 */
953 	dev->dev_attrib.hw_max_sectors =
954 		se_dev_align_max_sectors(dev->dev_attrib.hw_max_sectors,
955 					 dev->dev_attrib.hw_block_size);
956 	dev->dev_attrib.optimal_sectors = dev->dev_attrib.hw_max_sectors;
957 
958 	dev->creation_time = get_jiffies_64();
959 
960 	ret = core_setup_alua(dev);
961 	if (ret)
962 		goto out_destroy_device;
963 
964 	/*
965 	 * Setup work_queue for QUEUE_FULL
966 	 */
967 	INIT_WORK(&dev->qf_work_queue, target_qf_do_work);
968 
969 	scsi_dump_inquiry(dev);
970 
971 	spin_lock(&hba->device_lock);
972 	hba->dev_count++;
973 	spin_unlock(&hba->device_lock);
974 
975 	dev->dev_flags |= DF_CONFIGURED;
976 
977 	return 0;
978 
979 out_destroy_device:
980 	dev->transport->destroy_device(dev);
981 out_free_index:
982 	mutex_lock(&device_mutex);
983 	idr_remove(&devices_idr, dev->dev_index);
984 	mutex_unlock(&device_mutex);
985 out:
986 	se_release_vpd_for_dev(dev);
987 	return ret;
988 }
989 
990 void target_free_device(struct se_device *dev)
991 {
992 	struct se_hba *hba = dev->se_hba;
993 
994 	WARN_ON(!list_empty(&dev->dev_sep_list));
995 
996 	if (target_dev_configured(dev)) {
997 		dev->transport->destroy_device(dev);
998 
999 		mutex_lock(&device_mutex);
1000 		idr_remove(&devices_idr, dev->dev_index);
1001 		mutex_unlock(&device_mutex);
1002 
1003 		spin_lock(&hba->device_lock);
1004 		hba->dev_count--;
1005 		spin_unlock(&hba->device_lock);
1006 	}
1007 
1008 	core_alua_free_lu_gp_mem(dev);
1009 	core_alua_set_lba_map(dev, NULL, 0, 0);
1010 	core_scsi3_free_all_registrations(dev);
1011 	se_release_vpd_for_dev(dev);
1012 
1013 	if (dev->transport->free_prot)
1014 		dev->transport->free_prot(dev);
1015 
1016 	dev->transport->free_device(dev);
1017 }
1018 
1019 int core_dev_setup_virtual_lun0(void)
1020 {
1021 	struct se_hba *hba;
1022 	struct se_device *dev;
1023 	char buf[] = "rd_pages=8,rd_nullio=1";
1024 	int ret;
1025 
1026 	hba = core_alloc_hba("rd_mcp", 0, HBA_FLAGS_INTERNAL_USE);
1027 	if (IS_ERR(hba))
1028 		return PTR_ERR(hba);
1029 
1030 	dev = target_alloc_device(hba, "virt_lun0");
1031 	if (!dev) {
1032 		ret = -ENOMEM;
1033 		goto out_free_hba;
1034 	}
1035 
1036 	hba->backend->ops->set_configfs_dev_params(dev, buf, sizeof(buf));
1037 
1038 	ret = target_configure_device(dev);
1039 	if (ret)
1040 		goto out_free_se_dev;
1041 
1042 	lun0_hba = hba;
1043 	g_lun0_dev = dev;
1044 	return 0;
1045 
1046 out_free_se_dev:
1047 	target_free_device(dev);
1048 out_free_hba:
1049 	core_delete_hba(hba);
1050 	return ret;
1051 }
1052 
1053 
1054 void core_dev_release_virtual_lun0(void)
1055 {
1056 	struct se_hba *hba = lun0_hba;
1057 
1058 	if (!hba)
1059 		return;
1060 
1061 	if (g_lun0_dev)
1062 		target_free_device(g_lun0_dev);
1063 	core_delete_hba(hba);
1064 }
1065 
1066 /*
1067  * Common CDB parsing for kernel and user passthrough.
1068  */
1069 sense_reason_t
1070 passthrough_parse_cdb(struct se_cmd *cmd,
1071 	sense_reason_t (*exec_cmd)(struct se_cmd *cmd))
1072 {
1073 	unsigned char *cdb = cmd->t_task_cdb;
1074 	struct se_device *dev = cmd->se_dev;
1075 	unsigned int size;
1076 
1077 	/*
1078 	 * Clear a lun set in the cdb if the initiator talking to use spoke
1079 	 * and old standards version, as we can't assume the underlying device
1080 	 * won't choke up on it.
1081 	 */
1082 	switch (cdb[0]) {
1083 	case READ_10: /* SBC - RDProtect */
1084 	case READ_12: /* SBC - RDProtect */
1085 	case READ_16: /* SBC - RDProtect */
1086 	case SEND_DIAGNOSTIC: /* SPC - SELF-TEST Code */
1087 	case VERIFY: /* SBC - VRProtect */
1088 	case VERIFY_16: /* SBC - VRProtect */
1089 	case WRITE_VERIFY: /* SBC - VRProtect */
1090 	case WRITE_VERIFY_12: /* SBC - VRProtect */
1091 	case MAINTENANCE_IN: /* SPC - Parameter Data Format for SA RTPG */
1092 		break;
1093 	default:
1094 		cdb[1] &= 0x1f; /* clear logical unit number */
1095 		break;
1096 	}
1097 
1098 	/*
1099 	 * For REPORT LUNS we always need to emulate the response, for everything
1100 	 * else, pass it up.
1101 	 */
1102 	if (cdb[0] == REPORT_LUNS) {
1103 		cmd->execute_cmd = spc_emulate_report_luns;
1104 		return TCM_NO_SENSE;
1105 	}
1106 
1107 	/*
1108 	 * With emulate_pr disabled, all reservation requests should fail,
1109 	 * regardless of whether or not TRANSPORT_FLAG_PASSTHROUGH_PGR is set.
1110 	 */
1111 	if (!dev->dev_attrib.emulate_pr &&
1112 	    ((cdb[0] == PERSISTENT_RESERVE_IN) ||
1113 	     (cdb[0] == PERSISTENT_RESERVE_OUT) ||
1114 	     (cdb[0] == RELEASE || cdb[0] == RELEASE_10) ||
1115 	     (cdb[0] == RESERVE || cdb[0] == RESERVE_10))) {
1116 		return TCM_UNSUPPORTED_SCSI_OPCODE;
1117 	}
1118 
1119 	/*
1120 	 * For PERSISTENT RESERVE IN/OUT, RELEASE, and RESERVE we need to
1121 	 * emulate the response, since tcmu does not have the information
1122 	 * required to process these commands.
1123 	 */
1124 	if (!(dev->transport->transport_flags &
1125 	      TRANSPORT_FLAG_PASSTHROUGH_PGR)) {
1126 		if (cdb[0] == PERSISTENT_RESERVE_IN) {
1127 			cmd->execute_cmd = target_scsi3_emulate_pr_in;
1128 			size = get_unaligned_be16(&cdb[7]);
1129 			return target_cmd_size_check(cmd, size);
1130 		}
1131 		if (cdb[0] == PERSISTENT_RESERVE_OUT) {
1132 			cmd->execute_cmd = target_scsi3_emulate_pr_out;
1133 			size = get_unaligned_be32(&cdb[5]);
1134 			return target_cmd_size_check(cmd, size);
1135 		}
1136 
1137 		if (cdb[0] == RELEASE || cdb[0] == RELEASE_10) {
1138 			cmd->execute_cmd = target_scsi2_reservation_release;
1139 			if (cdb[0] == RELEASE_10)
1140 				size = get_unaligned_be16(&cdb[7]);
1141 			else
1142 				size = cmd->data_length;
1143 			return target_cmd_size_check(cmd, size);
1144 		}
1145 		if (cdb[0] == RESERVE || cdb[0] == RESERVE_10) {
1146 			cmd->execute_cmd = target_scsi2_reservation_reserve;
1147 			if (cdb[0] == RESERVE_10)
1148 				size = get_unaligned_be16(&cdb[7]);
1149 			else
1150 				size = cmd->data_length;
1151 			return target_cmd_size_check(cmd, size);
1152 		}
1153 	}
1154 
1155 	/* Set DATA_CDB flag for ops that should have it */
1156 	switch (cdb[0]) {
1157 	case READ_6:
1158 	case READ_10:
1159 	case READ_12:
1160 	case READ_16:
1161 	case WRITE_6:
1162 	case WRITE_10:
1163 	case WRITE_12:
1164 	case WRITE_16:
1165 	case WRITE_VERIFY:
1166 	case WRITE_VERIFY_12:
1167 	case WRITE_VERIFY_16:
1168 	case COMPARE_AND_WRITE:
1169 	case XDWRITEREAD_10:
1170 		cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
1171 		break;
1172 	case VARIABLE_LENGTH_CMD:
1173 		switch (get_unaligned_be16(&cdb[8])) {
1174 		case READ_32:
1175 		case WRITE_32:
1176 		case WRITE_VERIFY_32:
1177 		case XDWRITEREAD_32:
1178 			cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
1179 			break;
1180 		}
1181 	}
1182 
1183 	cmd->execute_cmd = exec_cmd;
1184 
1185 	return TCM_NO_SENSE;
1186 }
1187 EXPORT_SYMBOL(passthrough_parse_cdb);
1188