1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*******************************************************************************
3  * Filename:  target_core_pscsi.c
4  *
5  * This file contains the generic target mode <-> Linux SCSI subsystem plugin.
6  *
7  * (c) Copyright 2003-2013 Datera, Inc.
8  *
9  * Nicholas A. Bellinger <nab@kernel.org>
10  *
11  ******************************************************************************/
12 
13 #include <linux/string.h>
14 #include <linux/parser.h>
15 #include <linux/timer.h>
16 #include <linux/blkdev.h>
17 #include <linux/blk_types.h>
18 #include <linux/slab.h>
19 #include <linux/spinlock.h>
20 #include <linux/cdrom.h>
21 #include <linux/ratelimit.h>
22 #include <linux/module.h>
23 #include <asm/unaligned.h>
24 
25 #include <scsi/scsi_device.h>
26 #include <scsi/scsi_host.h>
27 #include <scsi/scsi_tcq.h>
28 
29 #include <target/target_core_base.h>
30 #include <target/target_core_backend.h>
31 
32 #include "target_core_alua.h"
33 #include "target_core_internal.h"
34 #include "target_core_pscsi.h"
35 
36 static inline struct pscsi_dev_virt *PSCSI_DEV(struct se_device *dev)
37 {
38 	return container_of(dev, struct pscsi_dev_virt, dev);
39 }
40 
41 static sense_reason_t pscsi_execute_cmd(struct se_cmd *cmd);
42 static void pscsi_req_done(struct request *, blk_status_t);
43 
44 /*	pscsi_attach_hba():
45  *
46  * 	pscsi_get_sh() used scsi_host_lookup() to locate struct Scsi_Host.
47  *	from the passed SCSI Host ID.
48  */
49 static int pscsi_attach_hba(struct se_hba *hba, u32 host_id)
50 {
51 	struct pscsi_hba_virt *phv;
52 
53 	phv = kzalloc(sizeof(struct pscsi_hba_virt), GFP_KERNEL);
54 	if (!phv) {
55 		pr_err("Unable to allocate struct pscsi_hba_virt\n");
56 		return -ENOMEM;
57 	}
58 	phv->phv_host_id = host_id;
59 	phv->phv_mode = PHV_VIRTUAL_HOST_ID;
60 
61 	hba->hba_ptr = phv;
62 
63 	pr_debug("CORE_HBA[%d] - TCM SCSI HBA Driver %s on"
64 		" Generic Target Core Stack %s\n", hba->hba_id,
65 		PSCSI_VERSION, TARGET_CORE_VERSION);
66 	pr_debug("CORE_HBA[%d] - Attached SCSI HBA to Generic\n",
67 	       hba->hba_id);
68 
69 	return 0;
70 }
71 
72 static void pscsi_detach_hba(struct se_hba *hba)
73 {
74 	struct pscsi_hba_virt *phv = hba->hba_ptr;
75 	struct Scsi_Host *scsi_host = phv->phv_lld_host;
76 
77 	if (scsi_host) {
78 		scsi_host_put(scsi_host);
79 
80 		pr_debug("CORE_HBA[%d] - Detached SCSI HBA: %s from"
81 			" Generic Target Core\n", hba->hba_id,
82 			(scsi_host->hostt->name) ? (scsi_host->hostt->name) :
83 			"Unknown");
84 	} else
85 		pr_debug("CORE_HBA[%d] - Detached Virtual SCSI HBA"
86 			" from Generic Target Core\n", hba->hba_id);
87 
88 	kfree(phv);
89 	hba->hba_ptr = NULL;
90 }
91 
92 static int pscsi_pmode_enable_hba(struct se_hba *hba, unsigned long mode_flag)
93 {
94 	struct pscsi_hba_virt *phv = hba->hba_ptr;
95 	struct Scsi_Host *sh = phv->phv_lld_host;
96 	/*
97 	 * Release the struct Scsi_Host
98 	 */
99 	if (!mode_flag) {
100 		if (!sh)
101 			return 0;
102 
103 		phv->phv_lld_host = NULL;
104 		phv->phv_mode = PHV_VIRTUAL_HOST_ID;
105 
106 		pr_debug("CORE_HBA[%d] - Disabled pSCSI HBA Passthrough"
107 			" %s\n", hba->hba_id, (sh->hostt->name) ?
108 			(sh->hostt->name) : "Unknown");
109 
110 		scsi_host_put(sh);
111 		return 0;
112 	}
113 	/*
114 	 * Otherwise, locate struct Scsi_Host from the original passed
115 	 * pSCSI Host ID and enable for phba mode
116 	 */
117 	sh = scsi_host_lookup(phv->phv_host_id);
118 	if (!sh) {
119 		pr_err("pSCSI: Unable to locate SCSI Host for"
120 			" phv_host_id: %d\n", phv->phv_host_id);
121 		return -EINVAL;
122 	}
123 
124 	phv->phv_lld_host = sh;
125 	phv->phv_mode = PHV_LLD_SCSI_HOST_NO;
126 
127 	pr_debug("CORE_HBA[%d] - Enabled pSCSI HBA Passthrough %s\n",
128 		hba->hba_id, (sh->hostt->name) ? (sh->hostt->name) : "Unknown");
129 
130 	return 1;
131 }
132 
133 static void pscsi_tape_read_blocksize(struct se_device *dev,
134 		struct scsi_device *sdev)
135 {
136 	unsigned char cdb[MAX_COMMAND_SIZE], *buf;
137 	int ret;
138 
139 	buf = kzalloc(12, GFP_KERNEL);
140 	if (!buf)
141 		goto out_free;
142 
143 	memset(cdb, 0, MAX_COMMAND_SIZE);
144 	cdb[0] = MODE_SENSE;
145 	cdb[4] = 0x0c; /* 12 bytes */
146 
147 	ret = scsi_execute_req(sdev, cdb, DMA_FROM_DEVICE, buf, 12, NULL,
148 			HZ, 1, NULL);
149 	if (ret)
150 		goto out_free;
151 
152 	/*
153 	 * If MODE_SENSE still returns zero, set the default value to 1024.
154 	 */
155 	sdev->sector_size = get_unaligned_be24(&buf[9]);
156 out_free:
157 	if (!sdev->sector_size)
158 		sdev->sector_size = 1024;
159 
160 	kfree(buf);
161 }
162 
163 static void
164 pscsi_set_inquiry_info(struct scsi_device *sdev, struct t10_wwn *wwn)
165 {
166 	if (sdev->inquiry_len < INQUIRY_LEN)
167 		return;
168 	/*
169 	 * Use sdev->inquiry data from drivers/scsi/scsi_scan.c:scsi_add_lun()
170 	 */
171 	BUILD_BUG_ON(sizeof(wwn->vendor) != INQUIRY_VENDOR_LEN + 1);
172 	snprintf(wwn->vendor, sizeof(wwn->vendor),
173 		 "%." __stringify(INQUIRY_VENDOR_LEN) "s", sdev->vendor);
174 	BUILD_BUG_ON(sizeof(wwn->model) != INQUIRY_MODEL_LEN + 1);
175 	snprintf(wwn->model, sizeof(wwn->model),
176 		 "%." __stringify(INQUIRY_MODEL_LEN) "s", sdev->model);
177 	BUILD_BUG_ON(sizeof(wwn->revision) != INQUIRY_REVISION_LEN + 1);
178 	snprintf(wwn->revision, sizeof(wwn->revision),
179 		 "%." __stringify(INQUIRY_REVISION_LEN) "s", sdev->rev);
180 }
181 
182 static int
183 pscsi_get_inquiry_vpd_serial(struct scsi_device *sdev, struct t10_wwn *wwn)
184 {
185 	unsigned char cdb[MAX_COMMAND_SIZE], *buf;
186 	int ret;
187 
188 	buf = kzalloc(INQUIRY_VPD_SERIAL_LEN, GFP_KERNEL);
189 	if (!buf)
190 		return -ENOMEM;
191 
192 	memset(cdb, 0, MAX_COMMAND_SIZE);
193 	cdb[0] = INQUIRY;
194 	cdb[1] = 0x01; /* Query VPD */
195 	cdb[2] = 0x80; /* Unit Serial Number */
196 	put_unaligned_be16(INQUIRY_VPD_SERIAL_LEN, &cdb[3]);
197 
198 	ret = scsi_execute_req(sdev, cdb, DMA_FROM_DEVICE, buf,
199 			      INQUIRY_VPD_SERIAL_LEN, NULL, HZ, 1, NULL);
200 	if (ret)
201 		goto out_free;
202 
203 	snprintf(&wwn->unit_serial[0], INQUIRY_VPD_SERIAL_LEN, "%s", &buf[4]);
204 
205 	wwn->t10_dev->dev_flags |= DF_FIRMWARE_VPD_UNIT_SERIAL;
206 
207 	kfree(buf);
208 	return 0;
209 
210 out_free:
211 	kfree(buf);
212 	return -EPERM;
213 }
214 
215 static void
216 pscsi_get_inquiry_vpd_device_ident(struct scsi_device *sdev,
217 		struct t10_wwn *wwn)
218 {
219 	unsigned char cdb[MAX_COMMAND_SIZE], *buf, *page_83;
220 	int ident_len, page_len, off = 4, ret;
221 	struct t10_vpd *vpd;
222 
223 	buf = kzalloc(INQUIRY_VPD_SERIAL_LEN, GFP_KERNEL);
224 	if (!buf)
225 		return;
226 
227 	memset(cdb, 0, MAX_COMMAND_SIZE);
228 	cdb[0] = INQUIRY;
229 	cdb[1] = 0x01; /* Query VPD */
230 	cdb[2] = 0x83; /* Device Identifier */
231 	put_unaligned_be16(INQUIRY_VPD_DEVICE_IDENTIFIER_LEN, &cdb[3]);
232 
233 	ret = scsi_execute_req(sdev, cdb, DMA_FROM_DEVICE, buf,
234 			      INQUIRY_VPD_DEVICE_IDENTIFIER_LEN,
235 			      NULL, HZ, 1, NULL);
236 	if (ret)
237 		goto out;
238 
239 	page_len = get_unaligned_be16(&buf[2]);
240 	while (page_len > 0) {
241 		/* Grab a pointer to the Identification descriptor */
242 		page_83 = &buf[off];
243 		ident_len = page_83[3];
244 		if (!ident_len) {
245 			pr_err("page_83[3]: identifier"
246 					" length zero!\n");
247 			break;
248 		}
249 		pr_debug("T10 VPD Identifier Length: %d\n", ident_len);
250 
251 		vpd = kzalloc(sizeof(struct t10_vpd), GFP_KERNEL);
252 		if (!vpd) {
253 			pr_err("Unable to allocate memory for"
254 					" struct t10_vpd\n");
255 			goto out;
256 		}
257 		INIT_LIST_HEAD(&vpd->vpd_list);
258 
259 		transport_set_vpd_proto_id(vpd, page_83);
260 		transport_set_vpd_assoc(vpd, page_83);
261 
262 		if (transport_set_vpd_ident_type(vpd, page_83) < 0) {
263 			off += (ident_len + 4);
264 			page_len -= (ident_len + 4);
265 			kfree(vpd);
266 			continue;
267 		}
268 		if (transport_set_vpd_ident(vpd, page_83) < 0) {
269 			off += (ident_len + 4);
270 			page_len -= (ident_len + 4);
271 			kfree(vpd);
272 			continue;
273 		}
274 
275 		list_add_tail(&vpd->vpd_list, &wwn->t10_vpd_list);
276 		off += (ident_len + 4);
277 		page_len -= (ident_len + 4);
278 	}
279 
280 out:
281 	kfree(buf);
282 }
283 
284 static int pscsi_add_device_to_list(struct se_device *dev,
285 		struct scsi_device *sd)
286 {
287 	struct pscsi_dev_virt *pdv = PSCSI_DEV(dev);
288 	struct request_queue *q = sd->request_queue;
289 
290 	pdv->pdv_sd = sd;
291 
292 	if (!sd->queue_depth) {
293 		sd->queue_depth = PSCSI_DEFAULT_QUEUEDEPTH;
294 
295 		pr_err("Set broken SCSI Device %d:%d:%llu"
296 			" queue_depth to %d\n", sd->channel, sd->id,
297 				sd->lun, sd->queue_depth);
298 	}
299 
300 	dev->dev_attrib.hw_block_size =
301 		min_not_zero((int)sd->sector_size, 512);
302 	dev->dev_attrib.hw_max_sectors =
303 		min_not_zero(sd->host->max_sectors, queue_max_hw_sectors(q));
304 	dev->dev_attrib.hw_queue_depth = sd->queue_depth;
305 
306 	/*
307 	 * Setup our standard INQUIRY info into se_dev->t10_wwn
308 	 */
309 	pscsi_set_inquiry_info(sd, &dev->t10_wwn);
310 
311 	/*
312 	 * Locate VPD WWN Information used for various purposes within
313 	 * the Storage Engine.
314 	 */
315 	if (!pscsi_get_inquiry_vpd_serial(sd, &dev->t10_wwn)) {
316 		/*
317 		 * If VPD Unit Serial returned GOOD status, try
318 		 * VPD Device Identification page (0x83).
319 		 */
320 		pscsi_get_inquiry_vpd_device_ident(sd, &dev->t10_wwn);
321 	}
322 
323 	/*
324 	 * For TYPE_TAPE, attempt to determine blocksize with MODE_SENSE.
325 	 */
326 	if (sd->type == TYPE_TAPE) {
327 		pscsi_tape_read_blocksize(dev, sd);
328 		dev->dev_attrib.hw_block_size = sd->sector_size;
329 	}
330 	return 0;
331 }
332 
333 static struct se_device *pscsi_alloc_device(struct se_hba *hba,
334 		const char *name)
335 {
336 	struct pscsi_dev_virt *pdv;
337 
338 	pdv = kzalloc(sizeof(struct pscsi_dev_virt), GFP_KERNEL);
339 	if (!pdv) {
340 		pr_err("Unable to allocate memory for struct pscsi_dev_virt\n");
341 		return NULL;
342 	}
343 
344 	pr_debug("PSCSI: Allocated pdv: %p for %s\n", pdv, name);
345 	return &pdv->dev;
346 }
347 
348 /*
349  * Called with struct Scsi_Host->host_lock called.
350  */
351 static int pscsi_create_type_disk(struct se_device *dev, struct scsi_device *sd)
352 	__releases(sh->host_lock)
353 {
354 	struct pscsi_hba_virt *phv = dev->se_hba->hba_ptr;
355 	struct pscsi_dev_virt *pdv = PSCSI_DEV(dev);
356 	struct Scsi_Host *sh = sd->host;
357 	struct block_device *bd;
358 	int ret;
359 
360 	if (scsi_device_get(sd)) {
361 		pr_err("scsi_device_get() failed for %d:%d:%d:%llu\n",
362 			sh->host_no, sd->channel, sd->id, sd->lun);
363 		spin_unlock_irq(sh->host_lock);
364 		return -EIO;
365 	}
366 	spin_unlock_irq(sh->host_lock);
367 	/*
368 	 * Claim exclusive struct block_device access to struct scsi_device
369 	 * for TYPE_DISK and TYPE_ZBC using supplied udev_path
370 	 */
371 	bd = blkdev_get_by_path(dev->udev_path,
372 				FMODE_WRITE|FMODE_READ|FMODE_EXCL, pdv);
373 	if (IS_ERR(bd)) {
374 		pr_err("pSCSI: blkdev_get_by_path() failed\n");
375 		scsi_device_put(sd);
376 		return PTR_ERR(bd);
377 	}
378 	pdv->pdv_bd = bd;
379 
380 	ret = pscsi_add_device_to_list(dev, sd);
381 	if (ret) {
382 		blkdev_put(pdv->pdv_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL);
383 		scsi_device_put(sd);
384 		return ret;
385 	}
386 
387 	pr_debug("CORE_PSCSI[%d] - Added TYPE_%s for %d:%d:%d:%llu\n",
388 		phv->phv_host_id, sd->type == TYPE_DISK ? "DISK" : "ZBC",
389 		sh->host_no, sd->channel, sd->id, sd->lun);
390 	return 0;
391 }
392 
393 /*
394  * Called with struct Scsi_Host->host_lock called.
395  */
396 static int pscsi_create_type_nondisk(struct se_device *dev, struct scsi_device *sd)
397 	__releases(sh->host_lock)
398 {
399 	struct pscsi_hba_virt *phv = dev->se_hba->hba_ptr;
400 	struct Scsi_Host *sh = sd->host;
401 	int ret;
402 
403 	if (scsi_device_get(sd)) {
404 		pr_err("scsi_device_get() failed for %d:%d:%d:%llu\n",
405 			sh->host_no, sd->channel, sd->id, sd->lun);
406 		spin_unlock_irq(sh->host_lock);
407 		return -EIO;
408 	}
409 	spin_unlock_irq(sh->host_lock);
410 
411 	ret = pscsi_add_device_to_list(dev, sd);
412 	if (ret) {
413 		scsi_device_put(sd);
414 		return ret;
415 	}
416 	pr_debug("CORE_PSCSI[%d] - Added Type: %s for %d:%d:%d:%llu\n",
417 		phv->phv_host_id, scsi_device_type(sd->type), sh->host_no,
418 		sd->channel, sd->id, sd->lun);
419 
420 	return 0;
421 }
422 
423 static int pscsi_configure_device(struct se_device *dev)
424 {
425 	struct se_hba *hba = dev->se_hba;
426 	struct pscsi_dev_virt *pdv = PSCSI_DEV(dev);
427 	struct scsi_device *sd;
428 	struct pscsi_hba_virt *phv = dev->se_hba->hba_ptr;
429 	struct Scsi_Host *sh = phv->phv_lld_host;
430 	int legacy_mode_enable = 0;
431 	int ret;
432 
433 	if (!(pdv->pdv_flags & PDF_HAS_CHANNEL_ID) ||
434 	    !(pdv->pdv_flags & PDF_HAS_TARGET_ID) ||
435 	    !(pdv->pdv_flags & PDF_HAS_LUN_ID)) {
436 		pr_err("Missing scsi_channel_id=, scsi_target_id= and"
437 			" scsi_lun_id= parameters\n");
438 		return -EINVAL;
439 	}
440 
441 	/*
442 	 * If not running in PHV_LLD_SCSI_HOST_NO mode, locate the
443 	 * struct Scsi_Host we will need to bring the TCM/pSCSI object online
444 	 */
445 	if (!sh) {
446 		if (phv->phv_mode == PHV_LLD_SCSI_HOST_NO) {
447 			pr_err("pSCSI: Unable to locate struct"
448 				" Scsi_Host for PHV_LLD_SCSI_HOST_NO\n");
449 			return -ENODEV;
450 		}
451 		/*
452 		 * For the newer PHV_VIRTUAL_HOST_ID struct scsi_device
453 		 * reference, we enforce that udev_path has been set
454 		 */
455 		if (!(dev->dev_flags & DF_USING_UDEV_PATH)) {
456 			pr_err("pSCSI: udev_path attribute has not"
457 				" been set before ENABLE=1\n");
458 			return -EINVAL;
459 		}
460 		/*
461 		 * If no scsi_host_id= was passed for PHV_VIRTUAL_HOST_ID,
462 		 * use the original TCM hba ID to reference Linux/SCSI Host No
463 		 * and enable for PHV_LLD_SCSI_HOST_NO mode.
464 		 */
465 		if (!(pdv->pdv_flags & PDF_HAS_VIRT_HOST_ID)) {
466 			if (hba->dev_count) {
467 				pr_err("pSCSI: Unable to set hba_mode"
468 					" with active devices\n");
469 				return -EEXIST;
470 			}
471 
472 			if (pscsi_pmode_enable_hba(hba, 1) != 1)
473 				return -ENODEV;
474 
475 			legacy_mode_enable = 1;
476 			hba->hba_flags |= HBA_FLAGS_PSCSI_MODE;
477 			sh = phv->phv_lld_host;
478 		} else {
479 			sh = scsi_host_lookup(pdv->pdv_host_id);
480 			if (!sh) {
481 				pr_err("pSCSI: Unable to locate"
482 					" pdv_host_id: %d\n", pdv->pdv_host_id);
483 				return -EINVAL;
484 			}
485 			pdv->pdv_lld_host = sh;
486 		}
487 	} else {
488 		if (phv->phv_mode == PHV_VIRTUAL_HOST_ID) {
489 			pr_err("pSCSI: PHV_VIRTUAL_HOST_ID set while"
490 				" struct Scsi_Host exists\n");
491 			return -EEXIST;
492 		}
493 	}
494 
495 	spin_lock_irq(sh->host_lock);
496 	list_for_each_entry(sd, &sh->__devices, siblings) {
497 		if ((pdv->pdv_channel_id != sd->channel) ||
498 		    (pdv->pdv_target_id != sd->id) ||
499 		    (pdv->pdv_lun_id != sd->lun))
500 			continue;
501 		/*
502 		 * Functions will release the held struct scsi_host->host_lock
503 		 * before calling calling pscsi_add_device_to_list() to register
504 		 * struct scsi_device with target_core_mod.
505 		 */
506 		switch (sd->type) {
507 		case TYPE_DISK:
508 		case TYPE_ZBC:
509 			ret = pscsi_create_type_disk(dev, sd);
510 			break;
511 		default:
512 			ret = pscsi_create_type_nondisk(dev, sd);
513 			break;
514 		}
515 
516 		if (ret) {
517 			if (phv->phv_mode == PHV_VIRTUAL_HOST_ID)
518 				scsi_host_put(sh);
519 			else if (legacy_mode_enable) {
520 				pscsi_pmode_enable_hba(hba, 0);
521 				hba->hba_flags &= ~HBA_FLAGS_PSCSI_MODE;
522 			}
523 			pdv->pdv_sd = NULL;
524 			return ret;
525 		}
526 		return 0;
527 	}
528 	spin_unlock_irq(sh->host_lock);
529 
530 	pr_err("pSCSI: Unable to locate %d:%d:%d:%d\n", sh->host_no,
531 		pdv->pdv_channel_id,  pdv->pdv_target_id, pdv->pdv_lun_id);
532 
533 	if (phv->phv_mode == PHV_VIRTUAL_HOST_ID)
534 		scsi_host_put(sh);
535 	else if (legacy_mode_enable) {
536 		pscsi_pmode_enable_hba(hba, 0);
537 		hba->hba_flags &= ~HBA_FLAGS_PSCSI_MODE;
538 	}
539 
540 	return -ENODEV;
541 }
542 
543 static void pscsi_dev_call_rcu(struct rcu_head *p)
544 {
545 	struct se_device *dev = container_of(p, struct se_device, rcu_head);
546 	struct pscsi_dev_virt *pdv = PSCSI_DEV(dev);
547 
548 	kfree(pdv);
549 }
550 
551 static void pscsi_free_device(struct se_device *dev)
552 {
553 	call_rcu(&dev->rcu_head, pscsi_dev_call_rcu);
554 }
555 
556 static void pscsi_destroy_device(struct se_device *dev)
557 {
558 	struct pscsi_dev_virt *pdv = PSCSI_DEV(dev);
559 	struct pscsi_hba_virt *phv = dev->se_hba->hba_ptr;
560 	struct scsi_device *sd = pdv->pdv_sd;
561 
562 	if (sd) {
563 		/*
564 		 * Release exclusive pSCSI internal struct block_device claim for
565 		 * struct scsi_device with TYPE_DISK or TYPE_ZBC
566 		 * from pscsi_create_type_disk()
567 		 */
568 		if ((sd->type == TYPE_DISK || sd->type == TYPE_ZBC) &&
569 		    pdv->pdv_bd) {
570 			blkdev_put(pdv->pdv_bd,
571 				   FMODE_WRITE|FMODE_READ|FMODE_EXCL);
572 			pdv->pdv_bd = NULL;
573 		}
574 		/*
575 		 * For HBA mode PHV_LLD_SCSI_HOST_NO, release the reference
576 		 * to struct Scsi_Host now.
577 		 */
578 		if ((phv->phv_mode == PHV_LLD_SCSI_HOST_NO) &&
579 		    (phv->phv_lld_host != NULL))
580 			scsi_host_put(phv->phv_lld_host);
581 		else if (pdv->pdv_lld_host)
582 			scsi_host_put(pdv->pdv_lld_host);
583 
584 		scsi_device_put(sd);
585 
586 		pdv->pdv_sd = NULL;
587 	}
588 }
589 
590 static void pscsi_complete_cmd(struct se_cmd *cmd, u8 scsi_status,
591 			       unsigned char *req_sense, int valid_data)
592 {
593 	struct pscsi_dev_virt *pdv = PSCSI_DEV(cmd->se_dev);
594 	struct scsi_device *sd = pdv->pdv_sd;
595 	unsigned char *cdb = cmd->priv;
596 
597 	/*
598 	 * Special case for REPORT_LUNs which is emulated and not passed on.
599 	 */
600 	if (!cdb)
601 		return;
602 
603 	/*
604 	 * Hack to make sure that Write-Protect modepage is set if R/O mode is
605 	 * forced.
606 	 */
607 	if (!cmd->data_length)
608 		goto after_mode_sense;
609 
610 	if (((cdb[0] == MODE_SENSE) || (cdb[0] == MODE_SENSE_10)) &&
611 	    scsi_status == SAM_STAT_GOOD) {
612 		bool read_only = target_lun_is_rdonly(cmd);
613 
614 		if (read_only) {
615 			unsigned char *buf;
616 
617 			buf = transport_kmap_data_sg(cmd);
618 			if (!buf) {
619 				; /* XXX: TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE */
620 			} else {
621 				if (cdb[0] == MODE_SENSE_10) {
622 					if (!(buf[3] & 0x80))
623 						buf[3] |= 0x80;
624 				} else {
625 					if (!(buf[2] & 0x80))
626 						buf[2] |= 0x80;
627 				}
628 
629 				transport_kunmap_data_sg(cmd);
630 			}
631 		}
632 	}
633 after_mode_sense:
634 
635 	if (sd->type != TYPE_TAPE || !cmd->data_length)
636 		goto after_mode_select;
637 
638 	/*
639 	 * Hack to correctly obtain the initiator requested blocksize for
640 	 * TYPE_TAPE.  Since this value is dependent upon each tape media,
641 	 * struct scsi_device->sector_size will not contain the correct value
642 	 * by default, so we go ahead and set it so
643 	 * TRANSPORT(dev)->get_blockdev() returns the correct value to the
644 	 * storage engine.
645 	 */
646 	if (((cdb[0] == MODE_SELECT) || (cdb[0] == MODE_SELECT_10)) &&
647 	     scsi_status == SAM_STAT_GOOD) {
648 		unsigned char *buf;
649 		u16 bdl;
650 		u32 blocksize;
651 
652 		buf = sg_virt(&cmd->t_data_sg[0]);
653 		if (!buf) {
654 			pr_err("Unable to get buf for scatterlist\n");
655 			goto after_mode_select;
656 		}
657 
658 		if (cdb[0] == MODE_SELECT)
659 			bdl = buf[3];
660 		else
661 			bdl = get_unaligned_be16(&buf[6]);
662 
663 		if (!bdl)
664 			goto after_mode_select;
665 
666 		if (cdb[0] == MODE_SELECT)
667 			blocksize = get_unaligned_be24(&buf[9]);
668 		else
669 			blocksize = get_unaligned_be24(&buf[13]);
670 
671 		sd->sector_size = blocksize;
672 	}
673 after_mode_select:
674 
675 	if (scsi_status == SAM_STAT_CHECK_CONDITION) {
676 		transport_copy_sense_to_cmd(cmd, req_sense);
677 
678 		/*
679 		 * check for TAPE device reads with
680 		 * FM/EOM/ILI set, so that we can get data
681 		 * back despite framework assumption that a
682 		 * check condition means there is no data
683 		 */
684 		if (sd->type == TYPE_TAPE && valid_data &&
685 		    cmd->data_direction == DMA_FROM_DEVICE) {
686 			/*
687 			 * is sense data valid, fixed format,
688 			 * and have FM, EOM, or ILI set?
689 			 */
690 			if (req_sense[0] == 0xf0 &&	/* valid, fixed format */
691 			    req_sense[2] & 0xe0 &&	/* FM, EOM, or ILI */
692 			    (req_sense[2] & 0xf) == 0) { /* key==NO_SENSE */
693 				pr_debug("Tape FM/EOM/ILI status detected. Treat as normal read.\n");
694 				cmd->se_cmd_flags |= SCF_TREAT_READ_AS_NORMAL;
695 			}
696 		}
697 	}
698 }
699 
700 enum {
701 	Opt_scsi_host_id, Opt_scsi_channel_id, Opt_scsi_target_id,
702 	Opt_scsi_lun_id, Opt_err
703 };
704 
705 static match_table_t tokens = {
706 	{Opt_scsi_host_id, "scsi_host_id=%d"},
707 	{Opt_scsi_channel_id, "scsi_channel_id=%d"},
708 	{Opt_scsi_target_id, "scsi_target_id=%d"},
709 	{Opt_scsi_lun_id, "scsi_lun_id=%d"},
710 	{Opt_err, NULL}
711 };
712 
713 static ssize_t pscsi_set_configfs_dev_params(struct se_device *dev,
714 		const char *page, ssize_t count)
715 {
716 	struct pscsi_dev_virt *pdv = PSCSI_DEV(dev);
717 	struct pscsi_hba_virt *phv = dev->se_hba->hba_ptr;
718 	char *orig, *ptr, *opts;
719 	substring_t args[MAX_OPT_ARGS];
720 	int ret = 0, arg, token;
721 
722 	opts = kstrdup(page, GFP_KERNEL);
723 	if (!opts)
724 		return -ENOMEM;
725 
726 	orig = opts;
727 
728 	while ((ptr = strsep(&opts, ",\n")) != NULL) {
729 		if (!*ptr)
730 			continue;
731 
732 		token = match_token(ptr, tokens, args);
733 		switch (token) {
734 		case Opt_scsi_host_id:
735 			if (phv->phv_mode == PHV_LLD_SCSI_HOST_NO) {
736 				pr_err("PSCSI[%d]: Unable to accept"
737 					" scsi_host_id while phv_mode =="
738 					" PHV_LLD_SCSI_HOST_NO\n",
739 					phv->phv_host_id);
740 				ret = -EINVAL;
741 				goto out;
742 			}
743 			ret = match_int(args, &arg);
744 			if (ret)
745 				goto out;
746 			pdv->pdv_host_id = arg;
747 			pr_debug("PSCSI[%d]: Referencing SCSI Host ID:"
748 				" %d\n", phv->phv_host_id, pdv->pdv_host_id);
749 			pdv->pdv_flags |= PDF_HAS_VIRT_HOST_ID;
750 			break;
751 		case Opt_scsi_channel_id:
752 			ret = match_int(args, &arg);
753 			if (ret)
754 				goto out;
755 			pdv->pdv_channel_id = arg;
756 			pr_debug("PSCSI[%d]: Referencing SCSI Channel"
757 				" ID: %d\n",  phv->phv_host_id,
758 				pdv->pdv_channel_id);
759 			pdv->pdv_flags |= PDF_HAS_CHANNEL_ID;
760 			break;
761 		case Opt_scsi_target_id:
762 			ret = match_int(args, &arg);
763 			if (ret)
764 				goto out;
765 			pdv->pdv_target_id = arg;
766 			pr_debug("PSCSI[%d]: Referencing SCSI Target"
767 				" ID: %d\n", phv->phv_host_id,
768 				pdv->pdv_target_id);
769 			pdv->pdv_flags |= PDF_HAS_TARGET_ID;
770 			break;
771 		case Opt_scsi_lun_id:
772 			ret = match_int(args, &arg);
773 			if (ret)
774 				goto out;
775 			pdv->pdv_lun_id = arg;
776 			pr_debug("PSCSI[%d]: Referencing SCSI LUN ID:"
777 				" %d\n", phv->phv_host_id, pdv->pdv_lun_id);
778 			pdv->pdv_flags |= PDF_HAS_LUN_ID;
779 			break;
780 		default:
781 			break;
782 		}
783 	}
784 
785 out:
786 	kfree(orig);
787 	return (!ret) ? count : ret;
788 }
789 
790 static ssize_t pscsi_show_configfs_dev_params(struct se_device *dev, char *b)
791 {
792 	struct pscsi_hba_virt *phv = dev->se_hba->hba_ptr;
793 	struct pscsi_dev_virt *pdv = PSCSI_DEV(dev);
794 	struct scsi_device *sd = pdv->pdv_sd;
795 	unsigned char host_id[16];
796 	ssize_t bl;
797 
798 	if (phv->phv_mode == PHV_VIRTUAL_HOST_ID)
799 		snprintf(host_id, 16, "%d", pdv->pdv_host_id);
800 	else
801 		snprintf(host_id, 16, "PHBA Mode");
802 
803 	bl = sprintf(b, "SCSI Device Bus Location:"
804 		" Channel ID: %d Target ID: %d LUN: %d Host ID: %s\n",
805 		pdv->pdv_channel_id, pdv->pdv_target_id, pdv->pdv_lun_id,
806 		host_id);
807 
808 	if (sd) {
809 		bl += sprintf(b + bl, "        Vendor: %."
810 			__stringify(INQUIRY_VENDOR_LEN) "s", sd->vendor);
811 		bl += sprintf(b + bl, " Model: %."
812 			__stringify(INQUIRY_MODEL_LEN) "s", sd->model);
813 		bl += sprintf(b + bl, " Rev: %."
814 			__stringify(INQUIRY_REVISION_LEN) "s\n", sd->rev);
815 	}
816 	return bl;
817 }
818 
819 static void pscsi_bi_endio(struct bio *bio)
820 {
821 	bio_put(bio);
822 }
823 
824 static inline struct bio *pscsi_get_bio(int nr_vecs)
825 {
826 	struct bio *bio;
827 	/*
828 	 * Use bio_malloc() following the comment in for bio -> struct request
829 	 * in block/blk-core.c:blk_make_request()
830 	 */
831 	bio = bio_kmalloc(GFP_KERNEL, nr_vecs);
832 	if (!bio) {
833 		pr_err("PSCSI: bio_kmalloc() failed\n");
834 		return NULL;
835 	}
836 	bio->bi_end_io = pscsi_bi_endio;
837 
838 	return bio;
839 }
840 
841 static sense_reason_t
842 pscsi_map_sg(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
843 		struct request *req)
844 {
845 	struct pscsi_dev_virt *pdv = PSCSI_DEV(cmd->se_dev);
846 	struct bio *bio = NULL;
847 	struct page *page;
848 	struct scatterlist *sg;
849 	u32 data_len = cmd->data_length, i, len, bytes, off;
850 	int nr_pages = (cmd->data_length + sgl[0].offset +
851 			PAGE_SIZE - 1) >> PAGE_SHIFT;
852 	int nr_vecs = 0, rc;
853 	int rw = (cmd->data_direction == DMA_TO_DEVICE);
854 
855 	BUG_ON(!cmd->data_length);
856 
857 	pr_debug("PSCSI: nr_pages: %d\n", nr_pages);
858 
859 	for_each_sg(sgl, sg, sgl_nents, i) {
860 		page = sg_page(sg);
861 		off = sg->offset;
862 		len = sg->length;
863 
864 		pr_debug("PSCSI: i: %d page: %p len: %d off: %d\n", i,
865 			page, len, off);
866 
867 		/*
868 		 * We only have one page of data in each sg element,
869 		 * we can not cross a page boundary.
870 		 */
871 		if (off + len > PAGE_SIZE)
872 			goto fail;
873 
874 		if (len > 0 && data_len > 0) {
875 			bytes = min_t(unsigned int, len, PAGE_SIZE - off);
876 			bytes = min(bytes, data_len);
877 
878 			if (!bio) {
879 new_bio:
880 				nr_vecs = bio_max_segs(nr_pages);
881 				/*
882 				 * Calls bio_kmalloc() and sets bio->bi_end_io()
883 				 */
884 				bio = pscsi_get_bio(nr_vecs);
885 				if (!bio)
886 					goto fail;
887 
888 				if (rw)
889 					bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
890 
891 				pr_debug("PSCSI: Allocated bio: %p,"
892 					" dir: %s nr_vecs: %d\n", bio,
893 					(rw) ? "rw" : "r", nr_vecs);
894 			}
895 
896 			pr_debug("PSCSI: Calling bio_add_pc_page() i: %d"
897 				" bio: %p page: %p len: %d off: %d\n", i, bio,
898 				page, len, off);
899 
900 			rc = bio_add_pc_page(pdv->pdv_sd->request_queue,
901 					bio, page, bytes, off);
902 			pr_debug("PSCSI: bio->bi_vcnt: %d nr_vecs: %d\n",
903 				bio_segments(bio), nr_vecs);
904 			if (rc != bytes) {
905 				pr_debug("PSCSI: Reached bio->bi_vcnt max:"
906 					" %d i: %d bio: %p, allocating another"
907 					" bio\n", bio->bi_vcnt, i, bio);
908 
909 				rc = blk_rq_append_bio(req, bio);
910 				if (rc) {
911 					pr_err("pSCSI: failed to append bio\n");
912 					goto fail;
913 				}
914 
915 				/*
916 				 * Clear the pointer so that another bio will
917 				 * be allocated with pscsi_get_bio() above.
918 				 */
919 				bio = NULL;
920 				goto new_bio;
921 			}
922 
923 			data_len -= bytes;
924 		}
925 	}
926 
927 	if (bio) {
928 		rc = blk_rq_append_bio(req, bio);
929 		if (rc) {
930 			pr_err("pSCSI: failed to append bio\n");
931 			goto fail;
932 		}
933 	}
934 
935 	return 0;
936 fail:
937 	if (bio)
938 		bio_put(bio);
939 	while (req->bio) {
940 		bio = req->bio;
941 		req->bio = bio->bi_next;
942 		bio_put(bio);
943 	}
944 	req->biotail = NULL;
945 	return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
946 }
947 
948 static sense_reason_t
949 pscsi_parse_cdb(struct se_cmd *cmd)
950 {
951 	if (cmd->se_cmd_flags & SCF_BIDI)
952 		return TCM_UNSUPPORTED_SCSI_OPCODE;
953 
954 	return passthrough_parse_cdb(cmd, pscsi_execute_cmd);
955 }
956 
957 static sense_reason_t
958 pscsi_execute_cmd(struct se_cmd *cmd)
959 {
960 	struct scatterlist *sgl = cmd->t_data_sg;
961 	u32 sgl_nents = cmd->t_data_nents;
962 	struct pscsi_dev_virt *pdv = PSCSI_DEV(cmd->se_dev);
963 	struct scsi_cmnd *scmd;
964 	struct request *req;
965 	sense_reason_t ret;
966 
967 	req = scsi_alloc_request(pdv->pdv_sd->request_queue,
968 			cmd->data_direction == DMA_TO_DEVICE ?
969 			REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0);
970 	if (IS_ERR(req))
971 		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
972 
973 	if (sgl) {
974 		ret = pscsi_map_sg(cmd, sgl, sgl_nents, req);
975 		if (ret)
976 			goto fail_put_request;
977 	}
978 
979 	req->end_io = pscsi_req_done;
980 	req->end_io_data = cmd;
981 
982 	scmd = blk_mq_rq_to_pdu(req);
983 	scmd->cmd_len = scsi_command_size(cmd->t_task_cdb);
984 	if (scmd->cmd_len > sizeof(scmd->cmnd)) {
985 		ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
986 		goto fail_put_request;
987 	}
988 	memcpy(scmd->cmnd, cmd->t_task_cdb, scmd->cmd_len);
989 
990 	if (pdv->pdv_sd->type == TYPE_DISK ||
991 	    pdv->pdv_sd->type == TYPE_ZBC)
992 		req->timeout = PS_TIMEOUT_DISK;
993 	else
994 		req->timeout = PS_TIMEOUT_OTHER;
995 	scmd->allowed = PS_RETRY;
996 
997 	cmd->priv = scmd->cmnd;
998 
999 	blk_execute_rq_nowait(req, cmd->sam_task_attr == TCM_HEAD_TAG,
1000 			pscsi_req_done);
1001 
1002 	return 0;
1003 
1004 fail_put_request:
1005 	blk_mq_free_request(req);
1006 	return ret;
1007 }
1008 
1009 /*	pscsi_get_device_type():
1010  *
1011  *
1012  */
1013 static u32 pscsi_get_device_type(struct se_device *dev)
1014 {
1015 	struct pscsi_dev_virt *pdv = PSCSI_DEV(dev);
1016 	struct scsi_device *sd = pdv->pdv_sd;
1017 
1018 	return (sd) ? sd->type : TYPE_NO_LUN;
1019 }
1020 
1021 static sector_t pscsi_get_blocks(struct se_device *dev)
1022 {
1023 	struct pscsi_dev_virt *pdv = PSCSI_DEV(dev);
1024 
1025 	if (pdv->pdv_bd)
1026 		return bdev_nr_sectors(pdv->pdv_bd);
1027 	return 0;
1028 }
1029 
1030 static void pscsi_req_done(struct request *req, blk_status_t status)
1031 {
1032 	struct se_cmd *cmd = req->end_io_data;
1033 	struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(req);
1034 	enum sam_status scsi_status = scmd->result & 0xff;
1035 	int valid_data = cmd->data_length - scmd->resid_len;
1036 	u8 *cdb = cmd->priv;
1037 
1038 	if (scsi_status != SAM_STAT_GOOD) {
1039 		pr_debug("PSCSI Status Byte exception at cmd: %p CDB:"
1040 			" 0x%02x Result: 0x%08x\n", cmd, cdb[0], scmd->result);
1041 	}
1042 
1043 	pscsi_complete_cmd(cmd, scsi_status, scmd->sense_buffer, valid_data);
1044 
1045 	switch (host_byte(scmd->result)) {
1046 	case DID_OK:
1047 		target_complete_cmd_with_length(cmd, scsi_status, valid_data);
1048 		break;
1049 	default:
1050 		pr_debug("PSCSI Host Byte exception at cmd: %p CDB:"
1051 			" 0x%02x Result: 0x%08x\n", cmd, cdb[0], scmd->result);
1052 		target_complete_cmd(cmd, SAM_STAT_CHECK_CONDITION);
1053 		break;
1054 	}
1055 
1056 	blk_mq_free_request(req);
1057 }
1058 
1059 static const struct target_backend_ops pscsi_ops = {
1060 	.name			= "pscsi",
1061 	.owner			= THIS_MODULE,
1062 	.transport_flags_default = TRANSPORT_FLAG_PASSTHROUGH |
1063 				   TRANSPORT_FLAG_PASSTHROUGH_ALUA |
1064 				   TRANSPORT_FLAG_PASSTHROUGH_PGR,
1065 	.attach_hba		= pscsi_attach_hba,
1066 	.detach_hba		= pscsi_detach_hba,
1067 	.pmode_enable_hba	= pscsi_pmode_enable_hba,
1068 	.alloc_device		= pscsi_alloc_device,
1069 	.configure_device	= pscsi_configure_device,
1070 	.destroy_device		= pscsi_destroy_device,
1071 	.free_device		= pscsi_free_device,
1072 	.parse_cdb		= pscsi_parse_cdb,
1073 	.set_configfs_dev_params = pscsi_set_configfs_dev_params,
1074 	.show_configfs_dev_params = pscsi_show_configfs_dev_params,
1075 	.get_device_type	= pscsi_get_device_type,
1076 	.get_blocks		= pscsi_get_blocks,
1077 	.tb_dev_attrib_attrs	= passthrough_attrib_attrs,
1078 };
1079 
1080 static int __init pscsi_module_init(void)
1081 {
1082 	return transport_backend_register(&pscsi_ops);
1083 }
1084 
1085 static void __exit pscsi_module_exit(void)
1086 {
1087 	target_backend_unregister(&pscsi_ops);
1088 }
1089 
1090 MODULE_DESCRIPTION("TCM PSCSI subsystem plugin");
1091 MODULE_AUTHOR("nab@Linux-iSCSI.org");
1092 MODULE_LICENSE("GPL");
1093 
1094 module_init(pscsi_module_init);
1095 module_exit(pscsi_module_exit);
1096