1 /*
2  * SCSI Block Commands (SBC) parsing and emulation.
3  *
4  * (c) Copyright 2002-2013 Datera, Inc.
5  *
6  * Nicholas A. Bellinger <nab@kernel.org>
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  * GNU General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program; if not, write to the Free Software
20  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
21  */
22 
23 #include <linux/kernel.h>
24 #include <linux/module.h>
25 #include <linux/ratelimit.h>
26 #include <asm/unaligned.h>
27 #include <scsi/scsi.h>
28 #include <scsi/scsi_tcq.h>
29 
30 #include <target/target_core_base.h>
31 #include <target/target_core_backend.h>
32 #include <target/target_core_fabric.h>
33 
34 #include "target_core_internal.h"
35 #include "target_core_ua.h"
36 
37 
38 static sense_reason_t
39 sbc_emulate_readcapacity(struct se_cmd *cmd)
40 {
41 	struct se_device *dev = cmd->se_dev;
42 	unsigned char *cdb = cmd->t_task_cdb;
43 	unsigned long long blocks_long = dev->transport->get_blocks(dev);
44 	unsigned char *rbuf;
45 	unsigned char buf[8];
46 	u32 blocks;
47 
48 	/*
49 	 * SBC-2 says:
50 	 *   If the PMI bit is set to zero and the LOGICAL BLOCK
51 	 *   ADDRESS field is not set to zero, the device server shall
52 	 *   terminate the command with CHECK CONDITION status with
53 	 *   the sense key set to ILLEGAL REQUEST and the additional
54 	 *   sense code set to INVALID FIELD IN CDB.
55 	 *
56 	 * In SBC-3, these fields are obsolete, but some SCSI
57 	 * compliance tests actually check this, so we might as well
58 	 * follow SBC-2.
59 	 */
60 	if (!(cdb[8] & 1) && !!(cdb[2] | cdb[3] | cdb[4] | cdb[5]))
61 		return TCM_INVALID_CDB_FIELD;
62 
63 	if (blocks_long >= 0x00000000ffffffff)
64 		blocks = 0xffffffff;
65 	else
66 		blocks = (u32)blocks_long;
67 
68 	buf[0] = (blocks >> 24) & 0xff;
69 	buf[1] = (blocks >> 16) & 0xff;
70 	buf[2] = (blocks >> 8) & 0xff;
71 	buf[3] = blocks & 0xff;
72 	buf[4] = (dev->dev_attrib.block_size >> 24) & 0xff;
73 	buf[5] = (dev->dev_attrib.block_size >> 16) & 0xff;
74 	buf[6] = (dev->dev_attrib.block_size >> 8) & 0xff;
75 	buf[7] = dev->dev_attrib.block_size & 0xff;
76 
77 	rbuf = transport_kmap_data_sg(cmd);
78 	if (rbuf) {
79 		memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length));
80 		transport_kunmap_data_sg(cmd);
81 	}
82 
83 	target_complete_cmd(cmd, GOOD);
84 	return 0;
85 }
86 
87 static sense_reason_t
88 sbc_emulate_readcapacity_16(struct se_cmd *cmd)
89 {
90 	struct se_device *dev = cmd->se_dev;
91 	unsigned char *rbuf;
92 	unsigned char buf[32];
93 	unsigned long long blocks = dev->transport->get_blocks(dev);
94 
95 	memset(buf, 0, sizeof(buf));
96 	buf[0] = (blocks >> 56) & 0xff;
97 	buf[1] = (blocks >> 48) & 0xff;
98 	buf[2] = (blocks >> 40) & 0xff;
99 	buf[3] = (blocks >> 32) & 0xff;
100 	buf[4] = (blocks >> 24) & 0xff;
101 	buf[5] = (blocks >> 16) & 0xff;
102 	buf[6] = (blocks >> 8) & 0xff;
103 	buf[7] = blocks & 0xff;
104 	buf[8] = (dev->dev_attrib.block_size >> 24) & 0xff;
105 	buf[9] = (dev->dev_attrib.block_size >> 16) & 0xff;
106 	buf[10] = (dev->dev_attrib.block_size >> 8) & 0xff;
107 	buf[11] = dev->dev_attrib.block_size & 0xff;
108 	/*
109 	 * Set Thin Provisioning Enable bit following sbc3r22 in section
110 	 * READ CAPACITY (16) byte 14 if emulate_tpu or emulate_tpws is enabled.
111 	 */
112 	if (dev->dev_attrib.emulate_tpu || dev->dev_attrib.emulate_tpws)
113 		buf[14] = 0x80;
114 
115 	rbuf = transport_kmap_data_sg(cmd);
116 	if (rbuf) {
117 		memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length));
118 		transport_kunmap_data_sg(cmd);
119 	}
120 
121 	target_complete_cmd(cmd, GOOD);
122 	return 0;
123 }
124 
125 sector_t sbc_get_write_same_sectors(struct se_cmd *cmd)
126 {
127 	u32 num_blocks;
128 
129 	if (cmd->t_task_cdb[0] == WRITE_SAME)
130 		num_blocks = get_unaligned_be16(&cmd->t_task_cdb[7]);
131 	else if (cmd->t_task_cdb[0] == WRITE_SAME_16)
132 		num_blocks = get_unaligned_be32(&cmd->t_task_cdb[10]);
133 	else /* WRITE_SAME_32 via VARIABLE_LENGTH_CMD */
134 		num_blocks = get_unaligned_be32(&cmd->t_task_cdb[28]);
135 
136 	/*
137 	 * Use the explicit range when non zero is supplied, otherwise calculate
138 	 * the remaining range based on ->get_blocks() - starting LBA.
139 	 */
140 	if (num_blocks)
141 		return num_blocks;
142 
143 	return cmd->se_dev->transport->get_blocks(cmd->se_dev) -
144 		cmd->t_task_lba + 1;
145 }
146 EXPORT_SYMBOL(sbc_get_write_same_sectors);
147 
148 static sense_reason_t
149 sbc_emulate_noop(struct se_cmd *cmd)
150 {
151 	target_complete_cmd(cmd, GOOD);
152 	return 0;
153 }
154 
155 static inline u32 sbc_get_size(struct se_cmd *cmd, u32 sectors)
156 {
157 	return cmd->se_dev->dev_attrib.block_size * sectors;
158 }
159 
160 static int sbc_check_valid_sectors(struct se_cmd *cmd)
161 {
162 	struct se_device *dev = cmd->se_dev;
163 	unsigned long long end_lba;
164 	u32 sectors;
165 
166 	sectors = cmd->data_length / dev->dev_attrib.block_size;
167 	end_lba = dev->transport->get_blocks(dev) + 1;
168 
169 	if (cmd->t_task_lba + sectors > end_lba) {
170 		pr_err("target: lba %llu, sectors %u exceeds end lba %llu\n",
171 			cmd->t_task_lba, sectors, end_lba);
172 		return -EINVAL;
173 	}
174 
175 	return 0;
176 }
177 
178 static inline u32 transport_get_sectors_6(unsigned char *cdb)
179 {
180 	/*
181 	 * Use 8-bit sector value.  SBC-3 says:
182 	 *
183 	 *   A TRANSFER LENGTH field set to zero specifies that 256
184 	 *   logical blocks shall be written.  Any other value
185 	 *   specifies the number of logical blocks that shall be
186 	 *   written.
187 	 */
188 	return cdb[4] ? : 256;
189 }
190 
191 static inline u32 transport_get_sectors_10(unsigned char *cdb)
192 {
193 	return (u32)(cdb[7] << 8) + cdb[8];
194 }
195 
196 static inline u32 transport_get_sectors_12(unsigned char *cdb)
197 {
198 	return (u32)(cdb[6] << 24) + (cdb[7] << 16) + (cdb[8] << 8) + cdb[9];
199 }
200 
201 static inline u32 transport_get_sectors_16(unsigned char *cdb)
202 {
203 	return (u32)(cdb[10] << 24) + (cdb[11] << 16) +
204 		    (cdb[12] << 8) + cdb[13];
205 }
206 
207 /*
208  * Used for VARIABLE_LENGTH_CDB WRITE_32 and READ_32 variants
209  */
210 static inline u32 transport_get_sectors_32(unsigned char *cdb)
211 {
212 	return (u32)(cdb[28] << 24) + (cdb[29] << 16) +
213 		    (cdb[30] << 8) + cdb[31];
214 
215 }
216 
217 static inline u32 transport_lba_21(unsigned char *cdb)
218 {
219 	return ((cdb[1] & 0x1f) << 16) | (cdb[2] << 8) | cdb[3];
220 }
221 
222 static inline u32 transport_lba_32(unsigned char *cdb)
223 {
224 	return (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5];
225 }
226 
227 static inline unsigned long long transport_lba_64(unsigned char *cdb)
228 {
229 	unsigned int __v1, __v2;
230 
231 	__v1 = (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5];
232 	__v2 = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
233 
234 	return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32;
235 }
236 
237 /*
238  * For VARIABLE_LENGTH_CDB w/ 32 byte extended CDBs
239  */
240 static inline unsigned long long transport_lba_64_ext(unsigned char *cdb)
241 {
242 	unsigned int __v1, __v2;
243 
244 	__v1 = (cdb[12] << 24) | (cdb[13] << 16) | (cdb[14] << 8) | cdb[15];
245 	__v2 = (cdb[16] << 24) | (cdb[17] << 16) | (cdb[18] << 8) | cdb[19];
246 
247 	return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32;
248 }
249 
250 static sense_reason_t
251 sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *ops)
252 {
253 	unsigned int sectors = sbc_get_write_same_sectors(cmd);
254 
255 	if ((flags[0] & 0x04) || (flags[0] & 0x02)) {
256 		pr_err("WRITE_SAME PBDATA and LBDATA"
257 			" bits not supported for Block Discard"
258 			" Emulation\n");
259 		return TCM_UNSUPPORTED_SCSI_OPCODE;
260 	}
261 	if (sectors > cmd->se_dev->dev_attrib.max_write_same_len) {
262 		pr_warn("WRITE_SAME sectors: %u exceeds max_write_same_len: %u\n",
263 			sectors, cmd->se_dev->dev_attrib.max_write_same_len);
264 		return TCM_INVALID_CDB_FIELD;
265 	}
266 	/*
267 	 * Special case for WRITE_SAME w/ UNMAP=1 that ends up getting
268 	 * translated into block discard requests within backend code.
269 	 */
270 	if (flags[0] & 0x08) {
271 		if (!ops->execute_write_same_unmap)
272 			return TCM_UNSUPPORTED_SCSI_OPCODE;
273 
274 		cmd->execute_cmd = ops->execute_write_same_unmap;
275 		return 0;
276 	}
277 	if (!ops->execute_write_same)
278 		return TCM_UNSUPPORTED_SCSI_OPCODE;
279 
280 	cmd->execute_cmd = ops->execute_write_same;
281 	return 0;
282 }
283 
284 static sense_reason_t xdreadwrite_callback(struct se_cmd *cmd)
285 {
286 	unsigned char *buf, *addr;
287 	struct scatterlist *sg;
288 	unsigned int offset;
289 	sense_reason_t ret = TCM_NO_SENSE;
290 	int i, count;
291 	/*
292 	 * From sbc3r22.pdf section 5.48 XDWRITEREAD (10) command
293 	 *
294 	 * 1) read the specified logical block(s);
295 	 * 2) transfer logical blocks from the data-out buffer;
296 	 * 3) XOR the logical blocks transferred from the data-out buffer with
297 	 *    the logical blocks read, storing the resulting XOR data in a buffer;
298 	 * 4) if the DISABLE WRITE bit is set to zero, then write the logical
299 	 *    blocks transferred from the data-out buffer; and
300 	 * 5) transfer the resulting XOR data to the data-in buffer.
301 	 */
302 	buf = kmalloc(cmd->data_length, GFP_KERNEL);
303 	if (!buf) {
304 		pr_err("Unable to allocate xor_callback buf\n");
305 		return TCM_OUT_OF_RESOURCES;
306 	}
307 	/*
308 	 * Copy the scatterlist WRITE buffer located at cmd->t_data_sg
309 	 * into the locally allocated *buf
310 	 */
311 	sg_copy_to_buffer(cmd->t_data_sg,
312 			  cmd->t_data_nents,
313 			  buf,
314 			  cmd->data_length);
315 
316 	/*
317 	 * Now perform the XOR against the BIDI read memory located at
318 	 * cmd->t_mem_bidi_list
319 	 */
320 
321 	offset = 0;
322 	for_each_sg(cmd->t_bidi_data_sg, sg, cmd->t_bidi_data_nents, count) {
323 		addr = kmap_atomic(sg_page(sg));
324 		if (!addr) {
325 			ret = TCM_OUT_OF_RESOURCES;
326 			goto out;
327 		}
328 
329 		for (i = 0; i < sg->length; i++)
330 			*(addr + sg->offset + i) ^= *(buf + offset + i);
331 
332 		offset += sg->length;
333 		kunmap_atomic(addr);
334 	}
335 
336 out:
337 	kfree(buf);
338 	return ret;
339 }
340 
341 static sense_reason_t
342 sbc_execute_rw(struct se_cmd *cmd)
343 {
344 	return cmd->execute_rw(cmd, cmd->t_data_sg, cmd->t_data_nents,
345 			       cmd->data_direction);
346 }
347 
348 static sense_reason_t compare_and_write_post(struct se_cmd *cmd)
349 {
350 	struct se_device *dev = cmd->se_dev;
351 
352 	/*
353 	 * Only set SCF_COMPARE_AND_WRITE_POST to force a response fall-through
354 	 * within target_complete_ok_work() if the command was successfully
355 	 * sent to the backend driver.
356 	 */
357 	spin_lock_irq(&cmd->t_state_lock);
358 	if ((cmd->transport_state & CMD_T_SENT) && !cmd->scsi_status)
359 		cmd->se_cmd_flags |= SCF_COMPARE_AND_WRITE_POST;
360 	spin_unlock_irq(&cmd->t_state_lock);
361 
362 	/*
363 	 * Unlock ->caw_sem originally obtained during sbc_compare_and_write()
364 	 * before the original READ I/O submission.
365 	 */
366 	up(&dev->caw_sem);
367 
368 	return TCM_NO_SENSE;
369 }
370 
371 static sense_reason_t compare_and_write_callback(struct se_cmd *cmd)
372 {
373 	struct se_device *dev = cmd->se_dev;
374 	struct scatterlist *write_sg = NULL, *sg;
375 	unsigned char *buf = NULL, *addr;
376 	struct sg_mapping_iter m;
377 	unsigned int offset = 0, len;
378 	unsigned int nlbas = cmd->t_task_nolb;
379 	unsigned int block_size = dev->dev_attrib.block_size;
380 	unsigned int compare_len = (nlbas * block_size);
381 	sense_reason_t ret = TCM_NO_SENSE;
382 	int rc, i;
383 
384 	/*
385 	 * Handle early failure in transport_generic_request_failure(),
386 	 * which will not have taken ->caw_mutex yet..
387 	 */
388 	if (!cmd->t_data_sg || !cmd->t_bidi_data_sg)
389 		return TCM_NO_SENSE;
390 	/*
391 	 * Immediately exit + release dev->caw_sem if command has already
392 	 * been failed with a non-zero SCSI status.
393 	 */
394 	if (cmd->scsi_status) {
395 		pr_err("compare_and_write_callback: non zero scsi_status:"
396 			" 0x%02x\n", cmd->scsi_status);
397 		goto out;
398 	}
399 
400 	buf = kzalloc(cmd->data_length, GFP_KERNEL);
401 	if (!buf) {
402 		pr_err("Unable to allocate compare_and_write buf\n");
403 		ret = TCM_OUT_OF_RESOURCES;
404 		goto out;
405 	}
406 
407 	write_sg = kzalloc(sizeof(struct scatterlist) * cmd->t_data_nents,
408 			   GFP_KERNEL);
409 	if (!write_sg) {
410 		pr_err("Unable to allocate compare_and_write sg\n");
411 		ret = TCM_OUT_OF_RESOURCES;
412 		goto out;
413 	}
414 	/*
415 	 * Setup verify and write data payloads from total NumberLBAs.
416 	 */
417 	rc = sg_copy_to_buffer(cmd->t_data_sg, cmd->t_data_nents, buf,
418 			       cmd->data_length);
419 	if (!rc) {
420 		pr_err("sg_copy_to_buffer() failed for compare_and_write\n");
421 		ret = TCM_OUT_OF_RESOURCES;
422 		goto out;
423 	}
424 	/*
425 	 * Compare against SCSI READ payload against verify payload
426 	 */
427 	for_each_sg(cmd->t_bidi_data_sg, sg, cmd->t_bidi_data_nents, i) {
428 		addr = (unsigned char *)kmap_atomic(sg_page(sg));
429 		if (!addr) {
430 			ret = TCM_OUT_OF_RESOURCES;
431 			goto out;
432 		}
433 
434 		len = min(sg->length, compare_len);
435 
436 		if (memcmp(addr, buf + offset, len)) {
437 			pr_warn("Detected MISCOMPARE for addr: %p buf: %p\n",
438 				addr, buf + offset);
439 			kunmap_atomic(addr);
440 			goto miscompare;
441 		}
442 		kunmap_atomic(addr);
443 
444 		offset += len;
445 		compare_len -= len;
446 		if (!compare_len)
447 			break;
448 	}
449 
450 	i = 0;
451 	len = cmd->t_task_nolb * block_size;
452 	sg_miter_start(&m, cmd->t_data_sg, cmd->t_data_nents, SG_MITER_TO_SG);
453 	/*
454 	 * Currently assumes NoLB=1 and SGLs are PAGE_SIZE..
455 	 */
456 	while (len) {
457 		sg_miter_next(&m);
458 
459 		if (block_size < PAGE_SIZE) {
460 			sg_set_page(&write_sg[i], m.page, block_size,
461 				    block_size);
462 		} else {
463 			sg_miter_next(&m);
464 			sg_set_page(&write_sg[i], m.page, block_size,
465 				    0);
466 		}
467 		len -= block_size;
468 		i++;
469 	}
470 	sg_miter_stop(&m);
471 	/*
472 	 * Save the original SGL + nents values before updating to new
473 	 * assignments, to be released in transport_free_pages() ->
474 	 * transport_reset_sgl_orig()
475 	 */
476 	cmd->t_data_sg_orig = cmd->t_data_sg;
477 	cmd->t_data_sg = write_sg;
478 	cmd->t_data_nents_orig = cmd->t_data_nents;
479 	cmd->t_data_nents = 1;
480 
481 	cmd->sam_task_attr = MSG_HEAD_TAG;
482 	cmd->transport_complete_callback = compare_and_write_post;
483 	/*
484 	 * Now reset ->execute_cmd() to the normal sbc_execute_rw() handler
485 	 * for submitting the adjusted SGL to write instance user-data.
486 	 */
487 	cmd->execute_cmd = sbc_execute_rw;
488 
489 	spin_lock_irq(&cmd->t_state_lock);
490 	cmd->t_state = TRANSPORT_PROCESSING;
491 	cmd->transport_state |= CMD_T_ACTIVE|CMD_T_BUSY|CMD_T_SENT;
492 	spin_unlock_irq(&cmd->t_state_lock);
493 
494 	__target_execute_cmd(cmd);
495 
496 	kfree(buf);
497 	return ret;
498 
499 miscompare:
500 	pr_warn("Target/%s: Send MISCOMPARE check condition and sense\n",
501 		dev->transport->name);
502 	ret = TCM_MISCOMPARE_VERIFY;
503 out:
504 	/*
505 	 * In the MISCOMPARE or failure case, unlock ->caw_sem obtained in
506 	 * sbc_compare_and_write() before the original READ I/O submission.
507 	 */
508 	up(&dev->caw_sem);
509 	kfree(write_sg);
510 	kfree(buf);
511 	return ret;
512 }
513 
514 static sense_reason_t
515 sbc_compare_and_write(struct se_cmd *cmd)
516 {
517 	struct se_device *dev = cmd->se_dev;
518 	sense_reason_t ret;
519 	int rc;
520 	/*
521 	 * Submit the READ first for COMPARE_AND_WRITE to perform the
522 	 * comparision using SGLs at cmd->t_bidi_data_sg..
523 	 */
524 	rc = down_interruptible(&dev->caw_sem);
525 	if ((rc != 0) || signal_pending(current)) {
526 		cmd->transport_complete_callback = NULL;
527 		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
528 	}
529 	/*
530 	 * Reset cmd->data_length to individual block_size in order to not
531 	 * confuse backend drivers that depend on this value matching the
532 	 * size of the I/O being submitted.
533 	 */
534 	cmd->data_length = cmd->t_task_nolb * dev->dev_attrib.block_size;
535 
536 	ret = cmd->execute_rw(cmd, cmd->t_bidi_data_sg, cmd->t_bidi_data_nents,
537 			      DMA_FROM_DEVICE);
538 	if (ret) {
539 		cmd->transport_complete_callback = NULL;
540 		up(&dev->caw_sem);
541 		return ret;
542 	}
543 	/*
544 	 * Unlock of dev->caw_sem to occur in compare_and_write_callback()
545 	 * upon MISCOMPARE, or in compare_and_write_done() upon completion
546 	 * of WRITE instance user-data.
547 	 */
548 	return TCM_NO_SENSE;
549 }
550 
551 sense_reason_t
552 sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
553 {
554 	struct se_device *dev = cmd->se_dev;
555 	unsigned char *cdb = cmd->t_task_cdb;
556 	unsigned int size;
557 	u32 sectors = 0;
558 	sense_reason_t ret;
559 
560 	switch (cdb[0]) {
561 	case READ_6:
562 		sectors = transport_get_sectors_6(cdb);
563 		cmd->t_task_lba = transport_lba_21(cdb);
564 		cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
565 		cmd->execute_rw = ops->execute_rw;
566 		cmd->execute_cmd = sbc_execute_rw;
567 		break;
568 	case READ_10:
569 		sectors = transport_get_sectors_10(cdb);
570 		cmd->t_task_lba = transport_lba_32(cdb);
571 		cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
572 		cmd->execute_rw = ops->execute_rw;
573 		cmd->execute_cmd = sbc_execute_rw;
574 		break;
575 	case READ_12:
576 		sectors = transport_get_sectors_12(cdb);
577 		cmd->t_task_lba = transport_lba_32(cdb);
578 		cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
579 		cmd->execute_rw = ops->execute_rw;
580 		cmd->execute_cmd = sbc_execute_rw;
581 		break;
582 	case READ_16:
583 		sectors = transport_get_sectors_16(cdb);
584 		cmd->t_task_lba = transport_lba_64(cdb);
585 		cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
586 		cmd->execute_rw = ops->execute_rw;
587 		cmd->execute_cmd = sbc_execute_rw;
588 		break;
589 	case WRITE_6:
590 		sectors = transport_get_sectors_6(cdb);
591 		cmd->t_task_lba = transport_lba_21(cdb);
592 		cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
593 		cmd->execute_rw = ops->execute_rw;
594 		cmd->execute_cmd = sbc_execute_rw;
595 		break;
596 	case WRITE_10:
597 	case WRITE_VERIFY:
598 		sectors = transport_get_sectors_10(cdb);
599 		cmd->t_task_lba = transport_lba_32(cdb);
600 		if (cdb[1] & 0x8)
601 			cmd->se_cmd_flags |= SCF_FUA;
602 		cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
603 		cmd->execute_rw = ops->execute_rw;
604 		cmd->execute_cmd = sbc_execute_rw;
605 		break;
606 	case WRITE_12:
607 		sectors = transport_get_sectors_12(cdb);
608 		cmd->t_task_lba = transport_lba_32(cdb);
609 		if (cdb[1] & 0x8)
610 			cmd->se_cmd_flags |= SCF_FUA;
611 		cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
612 		cmd->execute_rw = ops->execute_rw;
613 		cmd->execute_cmd = sbc_execute_rw;
614 		break;
615 	case WRITE_16:
616 		sectors = transport_get_sectors_16(cdb);
617 		cmd->t_task_lba = transport_lba_64(cdb);
618 		if (cdb[1] & 0x8)
619 			cmd->se_cmd_flags |= SCF_FUA;
620 		cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
621 		cmd->execute_rw = ops->execute_rw;
622 		cmd->execute_cmd = sbc_execute_rw;
623 		break;
624 	case XDWRITEREAD_10:
625 		if (cmd->data_direction != DMA_TO_DEVICE ||
626 		    !(cmd->se_cmd_flags & SCF_BIDI))
627 			return TCM_INVALID_CDB_FIELD;
628 		sectors = transport_get_sectors_10(cdb);
629 
630 		cmd->t_task_lba = transport_lba_32(cdb);
631 		cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
632 
633 		/*
634 		 * Setup BIDI XOR callback to be run after I/O completion.
635 		 */
636 		cmd->execute_rw = ops->execute_rw;
637 		cmd->execute_cmd = sbc_execute_rw;
638 		cmd->transport_complete_callback = &xdreadwrite_callback;
639 		if (cdb[1] & 0x8)
640 			cmd->se_cmd_flags |= SCF_FUA;
641 		break;
642 	case VARIABLE_LENGTH_CMD:
643 	{
644 		u16 service_action = get_unaligned_be16(&cdb[8]);
645 		switch (service_action) {
646 		case XDWRITEREAD_32:
647 			sectors = transport_get_sectors_32(cdb);
648 
649 			/*
650 			 * Use WRITE_32 and READ_32 opcodes for the emulated
651 			 * XDWRITE_READ_32 logic.
652 			 */
653 			cmd->t_task_lba = transport_lba_64_ext(cdb);
654 			cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
655 
656 			/*
657 			 * Setup BIDI XOR callback to be run during after I/O
658 			 * completion.
659 			 */
660 			cmd->execute_rw = ops->execute_rw;
661 			cmd->execute_cmd = sbc_execute_rw;
662 			cmd->transport_complete_callback = &xdreadwrite_callback;
663 			if (cdb[1] & 0x8)
664 				cmd->se_cmd_flags |= SCF_FUA;
665 			break;
666 		case WRITE_SAME_32:
667 			sectors = transport_get_sectors_32(cdb);
668 			if (!sectors) {
669 				pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not"
670 				       " supported\n");
671 				return TCM_INVALID_CDB_FIELD;
672 			}
673 
674 			size = sbc_get_size(cmd, 1);
675 			cmd->t_task_lba = get_unaligned_be64(&cdb[12]);
676 
677 			ret = sbc_setup_write_same(cmd, &cdb[10], ops);
678 			if (ret)
679 				return ret;
680 			break;
681 		default:
682 			pr_err("VARIABLE_LENGTH_CMD service action"
683 				" 0x%04x not supported\n", service_action);
684 			return TCM_UNSUPPORTED_SCSI_OPCODE;
685 		}
686 		break;
687 	}
688 	case COMPARE_AND_WRITE:
689 		sectors = cdb[13];
690 		/*
691 		 * Currently enforce COMPARE_AND_WRITE for a single sector
692 		 */
693 		if (sectors > 1) {
694 			pr_err("COMPARE_AND_WRITE contains NoLB: %u greater"
695 			       " than 1\n", sectors);
696 			return TCM_INVALID_CDB_FIELD;
697 		}
698 		/*
699 		 * Double size because we have two buffers, note that
700 		 * zero is not an error..
701 		 */
702 		size = 2 * sbc_get_size(cmd, sectors);
703 		cmd->t_task_lba = get_unaligned_be64(&cdb[2]);
704 		cmd->t_task_nolb = sectors;
705 		cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB | SCF_COMPARE_AND_WRITE;
706 		cmd->execute_rw = ops->execute_rw;
707 		cmd->execute_cmd = sbc_compare_and_write;
708 		cmd->transport_complete_callback = compare_and_write_callback;
709 		break;
710 	case READ_CAPACITY:
711 		size = READ_CAP_LEN;
712 		cmd->execute_cmd = sbc_emulate_readcapacity;
713 		break;
714 	case SERVICE_ACTION_IN:
715 		switch (cmd->t_task_cdb[1] & 0x1f) {
716 		case SAI_READ_CAPACITY_16:
717 			cmd->execute_cmd = sbc_emulate_readcapacity_16;
718 			break;
719 		default:
720 			pr_err("Unsupported SA: 0x%02x\n",
721 				cmd->t_task_cdb[1] & 0x1f);
722 			return TCM_INVALID_CDB_FIELD;
723 		}
724 		size = (cdb[10] << 24) | (cdb[11] << 16) |
725 		       (cdb[12] << 8) | cdb[13];
726 		break;
727 	case SYNCHRONIZE_CACHE:
728 	case SYNCHRONIZE_CACHE_16:
729 		if (!ops->execute_sync_cache) {
730 			size = 0;
731 			cmd->execute_cmd = sbc_emulate_noop;
732 			break;
733 		}
734 
735 		/*
736 		 * Extract LBA and range to be flushed for emulated SYNCHRONIZE_CACHE
737 		 */
738 		if (cdb[0] == SYNCHRONIZE_CACHE) {
739 			sectors = transport_get_sectors_10(cdb);
740 			cmd->t_task_lba = transport_lba_32(cdb);
741 		} else {
742 			sectors = transport_get_sectors_16(cdb);
743 			cmd->t_task_lba = transport_lba_64(cdb);
744 		}
745 
746 		size = sbc_get_size(cmd, sectors);
747 
748 		/*
749 		 * Check to ensure that LBA + Range does not exceed past end of
750 		 * device for IBLOCK and FILEIO ->do_sync_cache() backend calls
751 		 */
752 		if (cmd->t_task_lba || sectors) {
753 			if (sbc_check_valid_sectors(cmd) < 0)
754 				return TCM_ADDRESS_OUT_OF_RANGE;
755 		}
756 		cmd->execute_cmd = ops->execute_sync_cache;
757 		break;
758 	case UNMAP:
759 		if (!ops->execute_unmap)
760 			return TCM_UNSUPPORTED_SCSI_OPCODE;
761 
762 		size = get_unaligned_be16(&cdb[7]);
763 		cmd->execute_cmd = ops->execute_unmap;
764 		break;
765 	case WRITE_SAME_16:
766 		sectors = transport_get_sectors_16(cdb);
767 		if (!sectors) {
768 			pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n");
769 			return TCM_INVALID_CDB_FIELD;
770 		}
771 
772 		size = sbc_get_size(cmd, 1);
773 		cmd->t_task_lba = get_unaligned_be64(&cdb[2]);
774 
775 		ret = sbc_setup_write_same(cmd, &cdb[1], ops);
776 		if (ret)
777 			return ret;
778 		break;
779 	case WRITE_SAME:
780 		sectors = transport_get_sectors_10(cdb);
781 		if (!sectors) {
782 			pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n");
783 			return TCM_INVALID_CDB_FIELD;
784 		}
785 
786 		size = sbc_get_size(cmd, 1);
787 		cmd->t_task_lba = get_unaligned_be32(&cdb[2]);
788 
789 		/*
790 		 * Follow sbcr26 with WRITE_SAME (10) and check for the existence
791 		 * of byte 1 bit 3 UNMAP instead of original reserved field
792 		 */
793 		ret = sbc_setup_write_same(cmd, &cdb[1], ops);
794 		if (ret)
795 			return ret;
796 		break;
797 	case VERIFY:
798 		size = 0;
799 		cmd->execute_cmd = sbc_emulate_noop;
800 		break;
801 	case REZERO_UNIT:
802 	case SEEK_6:
803 	case SEEK_10:
804 		/*
805 		 * There are still clients out there which use these old SCSI-2
806 		 * commands. This mainly happens when running VMs with legacy
807 		 * guest systems, connected via SCSI command pass-through to
808 		 * iSCSI targets. Make them happy and return status GOOD.
809 		 */
810 		size = 0;
811 		cmd->execute_cmd = sbc_emulate_noop;
812 		break;
813 	default:
814 		ret = spc_parse_cdb(cmd, &size);
815 		if (ret)
816 			return ret;
817 	}
818 
819 	/* reject any command that we don't have a handler for */
820 	if (!(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) && !cmd->execute_cmd)
821 		return TCM_UNSUPPORTED_SCSI_OPCODE;
822 
823 	if (cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) {
824 		unsigned long long end_lba;
825 
826 		if (sectors > dev->dev_attrib.fabric_max_sectors) {
827 			printk_ratelimited(KERN_ERR "SCSI OP %02xh with too"
828 				" big sectors %u exceeds fabric_max_sectors:"
829 				" %u\n", cdb[0], sectors,
830 				dev->dev_attrib.fabric_max_sectors);
831 			return TCM_INVALID_CDB_FIELD;
832 		}
833 		if (sectors > dev->dev_attrib.hw_max_sectors) {
834 			printk_ratelimited(KERN_ERR "SCSI OP %02xh with too"
835 				" big sectors %u exceeds backend hw_max_sectors:"
836 				" %u\n", cdb[0], sectors,
837 				dev->dev_attrib.hw_max_sectors);
838 			return TCM_INVALID_CDB_FIELD;
839 		}
840 
841 		end_lba = dev->transport->get_blocks(dev) + 1;
842 		if (cmd->t_task_lba + sectors > end_lba) {
843 			pr_err("cmd exceeds last lba %llu "
844 				"(lba %llu, sectors %u)\n",
845 				end_lba, cmd->t_task_lba, sectors);
846 			return TCM_ADDRESS_OUT_OF_RANGE;
847 		}
848 
849 		if (!(cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE))
850 			size = sbc_get_size(cmd, sectors);
851 	}
852 
853 	return target_cmd_size_check(cmd, size);
854 }
855 EXPORT_SYMBOL(sbc_parse_cdb);
856 
857 u32 sbc_get_device_type(struct se_device *dev)
858 {
859 	return TYPE_DISK;
860 }
861 EXPORT_SYMBOL(sbc_get_device_type);
862 
863 sense_reason_t
864 sbc_execute_unmap(struct se_cmd *cmd,
865 	sense_reason_t (*do_unmap_fn)(struct se_cmd *, void *,
866 				      sector_t, sector_t),
867 	void *priv)
868 {
869 	struct se_device *dev = cmd->se_dev;
870 	unsigned char *buf, *ptr = NULL;
871 	sector_t lba;
872 	int size;
873 	u32 range;
874 	sense_reason_t ret = 0;
875 	int dl, bd_dl;
876 
877 	/* We never set ANC_SUP */
878 	if (cmd->t_task_cdb[1])
879 		return TCM_INVALID_CDB_FIELD;
880 
881 	if (cmd->data_length == 0) {
882 		target_complete_cmd(cmd, SAM_STAT_GOOD);
883 		return 0;
884 	}
885 
886 	if (cmd->data_length < 8) {
887 		pr_warn("UNMAP parameter list length %u too small\n",
888 			cmd->data_length);
889 		return TCM_PARAMETER_LIST_LENGTH_ERROR;
890 	}
891 
892 	buf = transport_kmap_data_sg(cmd);
893 	if (!buf)
894 		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
895 
896 	dl = get_unaligned_be16(&buf[0]);
897 	bd_dl = get_unaligned_be16(&buf[2]);
898 
899 	size = cmd->data_length - 8;
900 	if (bd_dl > size)
901 		pr_warn("UNMAP parameter list length %u too small, ignoring bd_dl %u\n",
902 			cmd->data_length, bd_dl);
903 	else
904 		size = bd_dl;
905 
906 	if (size / 16 > dev->dev_attrib.max_unmap_block_desc_count) {
907 		ret = TCM_INVALID_PARAMETER_LIST;
908 		goto err;
909 	}
910 
911 	/* First UNMAP block descriptor starts at 8 byte offset */
912 	ptr = &buf[8];
913 	pr_debug("UNMAP: Sub: %s Using dl: %u bd_dl: %u size: %u"
914 		" ptr: %p\n", dev->transport->name, dl, bd_dl, size, ptr);
915 
916 	while (size >= 16) {
917 		lba = get_unaligned_be64(&ptr[0]);
918 		range = get_unaligned_be32(&ptr[8]);
919 		pr_debug("UNMAP: Using lba: %llu and range: %u\n",
920 				 (unsigned long long)lba, range);
921 
922 		if (range > dev->dev_attrib.max_unmap_lba_count) {
923 			ret = TCM_INVALID_PARAMETER_LIST;
924 			goto err;
925 		}
926 
927 		if (lba + range > dev->transport->get_blocks(dev) + 1) {
928 			ret = TCM_ADDRESS_OUT_OF_RANGE;
929 			goto err;
930 		}
931 
932 		ret = do_unmap_fn(cmd, priv, lba, range);
933 		if (ret)
934 			goto err;
935 
936 		ptr += 16;
937 		size -= 16;
938 	}
939 
940 err:
941 	transport_kunmap_data_sg(cmd);
942 	if (!ret)
943 		target_complete_cmd(cmd, GOOD);
944 	return ret;
945 }
946 EXPORT_SYMBOL(sbc_execute_unmap);
947