1 /*
2  * SCSI Block Commands (SBC) parsing and emulation.
3  *
4  * (c) Copyright 2002-2013 Datera, Inc.
5  *
6  * Nicholas A. Bellinger <nab@kernel.org>
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  * GNU General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program; if not, write to the Free Software
20  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
21  */
22 
23 #include <linux/kernel.h>
24 #include <linux/module.h>
25 #include <linux/ratelimit.h>
26 #include <asm/unaligned.h>
27 #include <scsi/scsi.h>
28 #include <scsi/scsi_tcq.h>
29 
30 #include <target/target_core_base.h>
31 #include <target/target_core_backend.h>
32 #include <target/target_core_fabric.h>
33 
34 #include "target_core_internal.h"
35 #include "target_core_ua.h"
36 
37 
38 static sense_reason_t
39 sbc_emulate_readcapacity(struct se_cmd *cmd)
40 {
41 	struct se_device *dev = cmd->se_dev;
42 	unsigned char *cdb = cmd->t_task_cdb;
43 	unsigned long long blocks_long = dev->transport->get_blocks(dev);
44 	unsigned char *rbuf;
45 	unsigned char buf[8];
46 	u32 blocks;
47 
48 	/*
49 	 * SBC-2 says:
50 	 *   If the PMI bit is set to zero and the LOGICAL BLOCK
51 	 *   ADDRESS field is not set to zero, the device server shall
52 	 *   terminate the command with CHECK CONDITION status with
53 	 *   the sense key set to ILLEGAL REQUEST and the additional
54 	 *   sense code set to INVALID FIELD IN CDB.
55 	 *
56 	 * In SBC-3, these fields are obsolete, but some SCSI
57 	 * compliance tests actually check this, so we might as well
58 	 * follow SBC-2.
59 	 */
60 	if (!(cdb[8] & 1) && !!(cdb[2] | cdb[3] | cdb[4] | cdb[5]))
61 		return TCM_INVALID_CDB_FIELD;
62 
63 	if (blocks_long >= 0x00000000ffffffff)
64 		blocks = 0xffffffff;
65 	else
66 		blocks = (u32)blocks_long;
67 
68 	buf[0] = (blocks >> 24) & 0xff;
69 	buf[1] = (blocks >> 16) & 0xff;
70 	buf[2] = (blocks >> 8) & 0xff;
71 	buf[3] = blocks & 0xff;
72 	buf[4] = (dev->dev_attrib.block_size >> 24) & 0xff;
73 	buf[5] = (dev->dev_attrib.block_size >> 16) & 0xff;
74 	buf[6] = (dev->dev_attrib.block_size >> 8) & 0xff;
75 	buf[7] = dev->dev_attrib.block_size & 0xff;
76 
77 	rbuf = transport_kmap_data_sg(cmd);
78 	if (rbuf) {
79 		memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length));
80 		transport_kunmap_data_sg(cmd);
81 	}
82 
83 	target_complete_cmd(cmd, GOOD);
84 	return 0;
85 }
86 
87 static sense_reason_t
88 sbc_emulate_readcapacity_16(struct se_cmd *cmd)
89 {
90 	struct se_device *dev = cmd->se_dev;
91 	unsigned char *rbuf;
92 	unsigned char buf[32];
93 	unsigned long long blocks = dev->transport->get_blocks(dev);
94 
95 	memset(buf, 0, sizeof(buf));
96 	buf[0] = (blocks >> 56) & 0xff;
97 	buf[1] = (blocks >> 48) & 0xff;
98 	buf[2] = (blocks >> 40) & 0xff;
99 	buf[3] = (blocks >> 32) & 0xff;
100 	buf[4] = (blocks >> 24) & 0xff;
101 	buf[5] = (blocks >> 16) & 0xff;
102 	buf[6] = (blocks >> 8) & 0xff;
103 	buf[7] = blocks & 0xff;
104 	buf[8] = (dev->dev_attrib.block_size >> 24) & 0xff;
105 	buf[9] = (dev->dev_attrib.block_size >> 16) & 0xff;
106 	buf[10] = (dev->dev_attrib.block_size >> 8) & 0xff;
107 	buf[11] = dev->dev_attrib.block_size & 0xff;
108 
109 	if (dev->transport->get_lbppbe)
110 		buf[13] = dev->transport->get_lbppbe(dev) & 0x0f;
111 
112 	if (dev->transport->get_alignment_offset_lbas) {
113 		u16 lalba = dev->transport->get_alignment_offset_lbas(dev);
114 		buf[14] = (lalba >> 8) & 0x3f;
115 		buf[15] = lalba & 0xff;
116 	}
117 
118 	/*
119 	 * Set Thin Provisioning Enable bit following sbc3r22 in section
120 	 * READ CAPACITY (16) byte 14 if emulate_tpu or emulate_tpws is enabled.
121 	 */
122 	if (dev->dev_attrib.emulate_tpu || dev->dev_attrib.emulate_tpws)
123 		buf[14] |= 0x80;
124 
125 	rbuf = transport_kmap_data_sg(cmd);
126 	if (rbuf) {
127 		memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length));
128 		transport_kunmap_data_sg(cmd);
129 	}
130 
131 	target_complete_cmd(cmd, GOOD);
132 	return 0;
133 }
134 
135 sector_t sbc_get_write_same_sectors(struct se_cmd *cmd)
136 {
137 	u32 num_blocks;
138 
139 	if (cmd->t_task_cdb[0] == WRITE_SAME)
140 		num_blocks = get_unaligned_be16(&cmd->t_task_cdb[7]);
141 	else if (cmd->t_task_cdb[0] == WRITE_SAME_16)
142 		num_blocks = get_unaligned_be32(&cmd->t_task_cdb[10]);
143 	else /* WRITE_SAME_32 via VARIABLE_LENGTH_CMD */
144 		num_blocks = get_unaligned_be32(&cmd->t_task_cdb[28]);
145 
146 	/*
147 	 * Use the explicit range when non zero is supplied, otherwise calculate
148 	 * the remaining range based on ->get_blocks() - starting LBA.
149 	 */
150 	if (num_blocks)
151 		return num_blocks;
152 
153 	return cmd->se_dev->transport->get_blocks(cmd->se_dev) -
154 		cmd->t_task_lba + 1;
155 }
156 EXPORT_SYMBOL(sbc_get_write_same_sectors);
157 
158 static sense_reason_t
159 sbc_emulate_noop(struct se_cmd *cmd)
160 {
161 	target_complete_cmd(cmd, GOOD);
162 	return 0;
163 }
164 
165 static inline u32 sbc_get_size(struct se_cmd *cmd, u32 sectors)
166 {
167 	return cmd->se_dev->dev_attrib.block_size * sectors;
168 }
169 
170 static int sbc_check_valid_sectors(struct se_cmd *cmd)
171 {
172 	struct se_device *dev = cmd->se_dev;
173 	unsigned long long end_lba;
174 	u32 sectors;
175 
176 	sectors = cmd->data_length / dev->dev_attrib.block_size;
177 	end_lba = dev->transport->get_blocks(dev) + 1;
178 
179 	if (cmd->t_task_lba + sectors > end_lba) {
180 		pr_err("target: lba %llu, sectors %u exceeds end lba %llu\n",
181 			cmd->t_task_lba, sectors, end_lba);
182 		return -EINVAL;
183 	}
184 
185 	return 0;
186 }
187 
188 static inline u32 transport_get_sectors_6(unsigned char *cdb)
189 {
190 	/*
191 	 * Use 8-bit sector value.  SBC-3 says:
192 	 *
193 	 *   A TRANSFER LENGTH field set to zero specifies that 256
194 	 *   logical blocks shall be written.  Any other value
195 	 *   specifies the number of logical blocks that shall be
196 	 *   written.
197 	 */
198 	return cdb[4] ? : 256;
199 }
200 
201 static inline u32 transport_get_sectors_10(unsigned char *cdb)
202 {
203 	return (u32)(cdb[7] << 8) + cdb[8];
204 }
205 
206 static inline u32 transport_get_sectors_12(unsigned char *cdb)
207 {
208 	return (u32)(cdb[6] << 24) + (cdb[7] << 16) + (cdb[8] << 8) + cdb[9];
209 }
210 
211 static inline u32 transport_get_sectors_16(unsigned char *cdb)
212 {
213 	return (u32)(cdb[10] << 24) + (cdb[11] << 16) +
214 		    (cdb[12] << 8) + cdb[13];
215 }
216 
217 /*
218  * Used for VARIABLE_LENGTH_CDB WRITE_32 and READ_32 variants
219  */
220 static inline u32 transport_get_sectors_32(unsigned char *cdb)
221 {
222 	return (u32)(cdb[28] << 24) + (cdb[29] << 16) +
223 		    (cdb[30] << 8) + cdb[31];
224 
225 }
226 
227 static inline u32 transport_lba_21(unsigned char *cdb)
228 {
229 	return ((cdb[1] & 0x1f) << 16) | (cdb[2] << 8) | cdb[3];
230 }
231 
232 static inline u32 transport_lba_32(unsigned char *cdb)
233 {
234 	return (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5];
235 }
236 
237 static inline unsigned long long transport_lba_64(unsigned char *cdb)
238 {
239 	unsigned int __v1, __v2;
240 
241 	__v1 = (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5];
242 	__v2 = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
243 
244 	return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32;
245 }
246 
247 /*
248  * For VARIABLE_LENGTH_CDB w/ 32 byte extended CDBs
249  */
250 static inline unsigned long long transport_lba_64_ext(unsigned char *cdb)
251 {
252 	unsigned int __v1, __v2;
253 
254 	__v1 = (cdb[12] << 24) | (cdb[13] << 16) | (cdb[14] << 8) | cdb[15];
255 	__v2 = (cdb[16] << 24) | (cdb[17] << 16) | (cdb[18] << 8) | cdb[19];
256 
257 	return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32;
258 }
259 
260 static sense_reason_t
261 sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *ops)
262 {
263 	unsigned int sectors = sbc_get_write_same_sectors(cmd);
264 
265 	if ((flags[0] & 0x04) || (flags[0] & 0x02)) {
266 		pr_err("WRITE_SAME PBDATA and LBDATA"
267 			" bits not supported for Block Discard"
268 			" Emulation\n");
269 		return TCM_UNSUPPORTED_SCSI_OPCODE;
270 	}
271 	if (sectors > cmd->se_dev->dev_attrib.max_write_same_len) {
272 		pr_warn("WRITE_SAME sectors: %u exceeds max_write_same_len: %u\n",
273 			sectors, cmd->se_dev->dev_attrib.max_write_same_len);
274 		return TCM_INVALID_CDB_FIELD;
275 	}
276 	/* We always have ANC_SUP == 0 so setting ANCHOR is always an error */
277 	if (flags[0] & 0x10) {
278 		pr_warn("WRITE SAME with ANCHOR not supported\n");
279 		return TCM_INVALID_CDB_FIELD;
280 	}
281 	/*
282 	 * Special case for WRITE_SAME w/ UNMAP=1 that ends up getting
283 	 * translated into block discard requests within backend code.
284 	 */
285 	if (flags[0] & 0x08) {
286 		if (!ops->execute_write_same_unmap)
287 			return TCM_UNSUPPORTED_SCSI_OPCODE;
288 
289 		cmd->execute_cmd = ops->execute_write_same_unmap;
290 		return 0;
291 	}
292 	if (!ops->execute_write_same)
293 		return TCM_UNSUPPORTED_SCSI_OPCODE;
294 
295 	cmd->execute_cmd = ops->execute_write_same;
296 	return 0;
297 }
298 
299 static sense_reason_t xdreadwrite_callback(struct se_cmd *cmd)
300 {
301 	unsigned char *buf, *addr;
302 	struct scatterlist *sg;
303 	unsigned int offset;
304 	sense_reason_t ret = TCM_NO_SENSE;
305 	int i, count;
306 	/*
307 	 * From sbc3r22.pdf section 5.48 XDWRITEREAD (10) command
308 	 *
309 	 * 1) read the specified logical block(s);
310 	 * 2) transfer logical blocks from the data-out buffer;
311 	 * 3) XOR the logical blocks transferred from the data-out buffer with
312 	 *    the logical blocks read, storing the resulting XOR data in a buffer;
313 	 * 4) if the DISABLE WRITE bit is set to zero, then write the logical
314 	 *    blocks transferred from the data-out buffer; and
315 	 * 5) transfer the resulting XOR data to the data-in buffer.
316 	 */
317 	buf = kmalloc(cmd->data_length, GFP_KERNEL);
318 	if (!buf) {
319 		pr_err("Unable to allocate xor_callback buf\n");
320 		return TCM_OUT_OF_RESOURCES;
321 	}
322 	/*
323 	 * Copy the scatterlist WRITE buffer located at cmd->t_data_sg
324 	 * into the locally allocated *buf
325 	 */
326 	sg_copy_to_buffer(cmd->t_data_sg,
327 			  cmd->t_data_nents,
328 			  buf,
329 			  cmd->data_length);
330 
331 	/*
332 	 * Now perform the XOR against the BIDI read memory located at
333 	 * cmd->t_mem_bidi_list
334 	 */
335 
336 	offset = 0;
337 	for_each_sg(cmd->t_bidi_data_sg, sg, cmd->t_bidi_data_nents, count) {
338 		addr = kmap_atomic(sg_page(sg));
339 		if (!addr) {
340 			ret = TCM_OUT_OF_RESOURCES;
341 			goto out;
342 		}
343 
344 		for (i = 0; i < sg->length; i++)
345 			*(addr + sg->offset + i) ^= *(buf + offset + i);
346 
347 		offset += sg->length;
348 		kunmap_atomic(addr);
349 	}
350 
351 out:
352 	kfree(buf);
353 	return ret;
354 }
355 
356 static sense_reason_t
357 sbc_execute_rw(struct se_cmd *cmd)
358 {
359 	return cmd->execute_rw(cmd, cmd->t_data_sg, cmd->t_data_nents,
360 			       cmd->data_direction);
361 }
362 
363 static sense_reason_t compare_and_write_post(struct se_cmd *cmd)
364 {
365 	struct se_device *dev = cmd->se_dev;
366 
367 	/*
368 	 * Only set SCF_COMPARE_AND_WRITE_POST to force a response fall-through
369 	 * within target_complete_ok_work() if the command was successfully
370 	 * sent to the backend driver.
371 	 */
372 	spin_lock_irq(&cmd->t_state_lock);
373 	if ((cmd->transport_state & CMD_T_SENT) && !cmd->scsi_status)
374 		cmd->se_cmd_flags |= SCF_COMPARE_AND_WRITE_POST;
375 	spin_unlock_irq(&cmd->t_state_lock);
376 
377 	/*
378 	 * Unlock ->caw_sem originally obtained during sbc_compare_and_write()
379 	 * before the original READ I/O submission.
380 	 */
381 	up(&dev->caw_sem);
382 
383 	return TCM_NO_SENSE;
384 }
385 
386 static sense_reason_t compare_and_write_callback(struct se_cmd *cmd)
387 {
388 	struct se_device *dev = cmd->se_dev;
389 	struct scatterlist *write_sg = NULL, *sg;
390 	unsigned char *buf = NULL, *addr;
391 	struct sg_mapping_iter m;
392 	unsigned int offset = 0, len;
393 	unsigned int nlbas = cmd->t_task_nolb;
394 	unsigned int block_size = dev->dev_attrib.block_size;
395 	unsigned int compare_len = (nlbas * block_size);
396 	sense_reason_t ret = TCM_NO_SENSE;
397 	int rc, i;
398 
399 	/*
400 	 * Handle early failure in transport_generic_request_failure(),
401 	 * which will not have taken ->caw_mutex yet..
402 	 */
403 	if (!cmd->t_data_sg || !cmd->t_bidi_data_sg)
404 		return TCM_NO_SENSE;
405 	/*
406 	 * Immediately exit + release dev->caw_sem if command has already
407 	 * been failed with a non-zero SCSI status.
408 	 */
409 	if (cmd->scsi_status) {
410 		pr_err("compare_and_write_callback: non zero scsi_status:"
411 			" 0x%02x\n", cmd->scsi_status);
412 		goto out;
413 	}
414 
415 	buf = kzalloc(cmd->data_length, GFP_KERNEL);
416 	if (!buf) {
417 		pr_err("Unable to allocate compare_and_write buf\n");
418 		ret = TCM_OUT_OF_RESOURCES;
419 		goto out;
420 	}
421 
422 	write_sg = kzalloc(sizeof(struct scatterlist) * cmd->t_data_nents,
423 			   GFP_KERNEL);
424 	if (!write_sg) {
425 		pr_err("Unable to allocate compare_and_write sg\n");
426 		ret = TCM_OUT_OF_RESOURCES;
427 		goto out;
428 	}
429 	/*
430 	 * Setup verify and write data payloads from total NumberLBAs.
431 	 */
432 	rc = sg_copy_to_buffer(cmd->t_data_sg, cmd->t_data_nents, buf,
433 			       cmd->data_length);
434 	if (!rc) {
435 		pr_err("sg_copy_to_buffer() failed for compare_and_write\n");
436 		ret = TCM_OUT_OF_RESOURCES;
437 		goto out;
438 	}
439 	/*
440 	 * Compare against SCSI READ payload against verify payload
441 	 */
442 	for_each_sg(cmd->t_bidi_data_sg, sg, cmd->t_bidi_data_nents, i) {
443 		addr = (unsigned char *)kmap_atomic(sg_page(sg));
444 		if (!addr) {
445 			ret = TCM_OUT_OF_RESOURCES;
446 			goto out;
447 		}
448 
449 		len = min(sg->length, compare_len);
450 
451 		if (memcmp(addr, buf + offset, len)) {
452 			pr_warn("Detected MISCOMPARE for addr: %p buf: %p\n",
453 				addr, buf + offset);
454 			kunmap_atomic(addr);
455 			goto miscompare;
456 		}
457 		kunmap_atomic(addr);
458 
459 		offset += len;
460 		compare_len -= len;
461 		if (!compare_len)
462 			break;
463 	}
464 
465 	i = 0;
466 	len = cmd->t_task_nolb * block_size;
467 	sg_miter_start(&m, cmd->t_data_sg, cmd->t_data_nents, SG_MITER_TO_SG);
468 	/*
469 	 * Currently assumes NoLB=1 and SGLs are PAGE_SIZE..
470 	 */
471 	while (len) {
472 		sg_miter_next(&m);
473 
474 		if (block_size < PAGE_SIZE) {
475 			sg_set_page(&write_sg[i], m.page, block_size,
476 				    block_size);
477 		} else {
478 			sg_miter_next(&m);
479 			sg_set_page(&write_sg[i], m.page, block_size,
480 				    0);
481 		}
482 		len -= block_size;
483 		i++;
484 	}
485 	sg_miter_stop(&m);
486 	/*
487 	 * Save the original SGL + nents values before updating to new
488 	 * assignments, to be released in transport_free_pages() ->
489 	 * transport_reset_sgl_orig()
490 	 */
491 	cmd->t_data_sg_orig = cmd->t_data_sg;
492 	cmd->t_data_sg = write_sg;
493 	cmd->t_data_nents_orig = cmd->t_data_nents;
494 	cmd->t_data_nents = 1;
495 
496 	cmd->sam_task_attr = MSG_HEAD_TAG;
497 	cmd->transport_complete_callback = compare_and_write_post;
498 	/*
499 	 * Now reset ->execute_cmd() to the normal sbc_execute_rw() handler
500 	 * for submitting the adjusted SGL to write instance user-data.
501 	 */
502 	cmd->execute_cmd = sbc_execute_rw;
503 
504 	spin_lock_irq(&cmd->t_state_lock);
505 	cmd->t_state = TRANSPORT_PROCESSING;
506 	cmd->transport_state |= CMD_T_ACTIVE|CMD_T_BUSY|CMD_T_SENT;
507 	spin_unlock_irq(&cmd->t_state_lock);
508 
509 	__target_execute_cmd(cmd);
510 
511 	kfree(buf);
512 	return ret;
513 
514 miscompare:
515 	pr_warn("Target/%s: Send MISCOMPARE check condition and sense\n",
516 		dev->transport->name);
517 	ret = TCM_MISCOMPARE_VERIFY;
518 out:
519 	/*
520 	 * In the MISCOMPARE or failure case, unlock ->caw_sem obtained in
521 	 * sbc_compare_and_write() before the original READ I/O submission.
522 	 */
523 	up(&dev->caw_sem);
524 	kfree(write_sg);
525 	kfree(buf);
526 	return ret;
527 }
528 
529 static sense_reason_t
530 sbc_compare_and_write(struct se_cmd *cmd)
531 {
532 	struct se_device *dev = cmd->se_dev;
533 	sense_reason_t ret;
534 	int rc;
535 	/*
536 	 * Submit the READ first for COMPARE_AND_WRITE to perform the
537 	 * comparision using SGLs at cmd->t_bidi_data_sg..
538 	 */
539 	rc = down_interruptible(&dev->caw_sem);
540 	if ((rc != 0) || signal_pending(current)) {
541 		cmd->transport_complete_callback = NULL;
542 		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
543 	}
544 	/*
545 	 * Reset cmd->data_length to individual block_size in order to not
546 	 * confuse backend drivers that depend on this value matching the
547 	 * size of the I/O being submitted.
548 	 */
549 	cmd->data_length = cmd->t_task_nolb * dev->dev_attrib.block_size;
550 
551 	ret = cmd->execute_rw(cmd, cmd->t_bidi_data_sg, cmd->t_bidi_data_nents,
552 			      DMA_FROM_DEVICE);
553 	if (ret) {
554 		cmd->transport_complete_callback = NULL;
555 		up(&dev->caw_sem);
556 		return ret;
557 	}
558 	/*
559 	 * Unlock of dev->caw_sem to occur in compare_and_write_callback()
560 	 * upon MISCOMPARE, or in compare_and_write_done() upon completion
561 	 * of WRITE instance user-data.
562 	 */
563 	return TCM_NO_SENSE;
564 }
565 
566 sense_reason_t
567 sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
568 {
569 	struct se_device *dev = cmd->se_dev;
570 	unsigned char *cdb = cmd->t_task_cdb;
571 	unsigned int size;
572 	u32 sectors = 0;
573 	sense_reason_t ret;
574 
575 	switch (cdb[0]) {
576 	case READ_6:
577 		sectors = transport_get_sectors_6(cdb);
578 		cmd->t_task_lba = transport_lba_21(cdb);
579 		cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
580 		cmd->execute_rw = ops->execute_rw;
581 		cmd->execute_cmd = sbc_execute_rw;
582 		break;
583 	case READ_10:
584 		sectors = transport_get_sectors_10(cdb);
585 		cmd->t_task_lba = transport_lba_32(cdb);
586 		cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
587 		cmd->execute_rw = ops->execute_rw;
588 		cmd->execute_cmd = sbc_execute_rw;
589 		break;
590 	case READ_12:
591 		sectors = transport_get_sectors_12(cdb);
592 		cmd->t_task_lba = transport_lba_32(cdb);
593 		cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
594 		cmd->execute_rw = ops->execute_rw;
595 		cmd->execute_cmd = sbc_execute_rw;
596 		break;
597 	case READ_16:
598 		sectors = transport_get_sectors_16(cdb);
599 		cmd->t_task_lba = transport_lba_64(cdb);
600 		cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
601 		cmd->execute_rw = ops->execute_rw;
602 		cmd->execute_cmd = sbc_execute_rw;
603 		break;
604 	case WRITE_6:
605 		sectors = transport_get_sectors_6(cdb);
606 		cmd->t_task_lba = transport_lba_21(cdb);
607 		cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
608 		cmd->execute_rw = ops->execute_rw;
609 		cmd->execute_cmd = sbc_execute_rw;
610 		break;
611 	case WRITE_10:
612 	case WRITE_VERIFY:
613 		sectors = transport_get_sectors_10(cdb);
614 		cmd->t_task_lba = transport_lba_32(cdb);
615 		if (cdb[1] & 0x8)
616 			cmd->se_cmd_flags |= SCF_FUA;
617 		cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
618 		cmd->execute_rw = ops->execute_rw;
619 		cmd->execute_cmd = sbc_execute_rw;
620 		break;
621 	case WRITE_12:
622 		sectors = transport_get_sectors_12(cdb);
623 		cmd->t_task_lba = transport_lba_32(cdb);
624 		if (cdb[1] & 0x8)
625 			cmd->se_cmd_flags |= SCF_FUA;
626 		cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
627 		cmd->execute_rw = ops->execute_rw;
628 		cmd->execute_cmd = sbc_execute_rw;
629 		break;
630 	case WRITE_16:
631 		sectors = transport_get_sectors_16(cdb);
632 		cmd->t_task_lba = transport_lba_64(cdb);
633 		if (cdb[1] & 0x8)
634 			cmd->se_cmd_flags |= SCF_FUA;
635 		cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
636 		cmd->execute_rw = ops->execute_rw;
637 		cmd->execute_cmd = sbc_execute_rw;
638 		break;
639 	case XDWRITEREAD_10:
640 		if (cmd->data_direction != DMA_TO_DEVICE ||
641 		    !(cmd->se_cmd_flags & SCF_BIDI))
642 			return TCM_INVALID_CDB_FIELD;
643 		sectors = transport_get_sectors_10(cdb);
644 
645 		cmd->t_task_lba = transport_lba_32(cdb);
646 		cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
647 
648 		/*
649 		 * Setup BIDI XOR callback to be run after I/O completion.
650 		 */
651 		cmd->execute_rw = ops->execute_rw;
652 		cmd->execute_cmd = sbc_execute_rw;
653 		cmd->transport_complete_callback = &xdreadwrite_callback;
654 		if (cdb[1] & 0x8)
655 			cmd->se_cmd_flags |= SCF_FUA;
656 		break;
657 	case VARIABLE_LENGTH_CMD:
658 	{
659 		u16 service_action = get_unaligned_be16(&cdb[8]);
660 		switch (service_action) {
661 		case XDWRITEREAD_32:
662 			sectors = transport_get_sectors_32(cdb);
663 
664 			/*
665 			 * Use WRITE_32 and READ_32 opcodes for the emulated
666 			 * XDWRITE_READ_32 logic.
667 			 */
668 			cmd->t_task_lba = transport_lba_64_ext(cdb);
669 			cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
670 
671 			/*
672 			 * Setup BIDI XOR callback to be run during after I/O
673 			 * completion.
674 			 */
675 			cmd->execute_rw = ops->execute_rw;
676 			cmd->execute_cmd = sbc_execute_rw;
677 			cmd->transport_complete_callback = &xdreadwrite_callback;
678 			if (cdb[1] & 0x8)
679 				cmd->se_cmd_flags |= SCF_FUA;
680 			break;
681 		case WRITE_SAME_32:
682 			sectors = transport_get_sectors_32(cdb);
683 			if (!sectors) {
684 				pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not"
685 				       " supported\n");
686 				return TCM_INVALID_CDB_FIELD;
687 			}
688 
689 			size = sbc_get_size(cmd, 1);
690 			cmd->t_task_lba = get_unaligned_be64(&cdb[12]);
691 
692 			ret = sbc_setup_write_same(cmd, &cdb[10], ops);
693 			if (ret)
694 				return ret;
695 			break;
696 		default:
697 			pr_err("VARIABLE_LENGTH_CMD service action"
698 				" 0x%04x not supported\n", service_action);
699 			return TCM_UNSUPPORTED_SCSI_OPCODE;
700 		}
701 		break;
702 	}
703 	case COMPARE_AND_WRITE:
704 		sectors = cdb[13];
705 		/*
706 		 * Currently enforce COMPARE_AND_WRITE for a single sector
707 		 */
708 		if (sectors > 1) {
709 			pr_err("COMPARE_AND_WRITE contains NoLB: %u greater"
710 			       " than 1\n", sectors);
711 			return TCM_INVALID_CDB_FIELD;
712 		}
713 		/*
714 		 * Double size because we have two buffers, note that
715 		 * zero is not an error..
716 		 */
717 		size = 2 * sbc_get_size(cmd, sectors);
718 		cmd->t_task_lba = get_unaligned_be64(&cdb[2]);
719 		cmd->t_task_nolb = sectors;
720 		cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB | SCF_COMPARE_AND_WRITE;
721 		cmd->execute_rw = ops->execute_rw;
722 		cmd->execute_cmd = sbc_compare_and_write;
723 		cmd->transport_complete_callback = compare_and_write_callback;
724 		break;
725 	case READ_CAPACITY:
726 		size = READ_CAP_LEN;
727 		cmd->execute_cmd = sbc_emulate_readcapacity;
728 		break;
729 	case SERVICE_ACTION_IN:
730 		switch (cmd->t_task_cdb[1] & 0x1f) {
731 		case SAI_READ_CAPACITY_16:
732 			cmd->execute_cmd = sbc_emulate_readcapacity_16;
733 			break;
734 		default:
735 			pr_err("Unsupported SA: 0x%02x\n",
736 				cmd->t_task_cdb[1] & 0x1f);
737 			return TCM_INVALID_CDB_FIELD;
738 		}
739 		size = (cdb[10] << 24) | (cdb[11] << 16) |
740 		       (cdb[12] << 8) | cdb[13];
741 		break;
742 	case SYNCHRONIZE_CACHE:
743 	case SYNCHRONIZE_CACHE_16:
744 		if (!ops->execute_sync_cache) {
745 			size = 0;
746 			cmd->execute_cmd = sbc_emulate_noop;
747 			break;
748 		}
749 
750 		/*
751 		 * Extract LBA and range to be flushed for emulated SYNCHRONIZE_CACHE
752 		 */
753 		if (cdb[0] == SYNCHRONIZE_CACHE) {
754 			sectors = transport_get_sectors_10(cdb);
755 			cmd->t_task_lba = transport_lba_32(cdb);
756 		} else {
757 			sectors = transport_get_sectors_16(cdb);
758 			cmd->t_task_lba = transport_lba_64(cdb);
759 		}
760 
761 		size = sbc_get_size(cmd, sectors);
762 
763 		/*
764 		 * Check to ensure that LBA + Range does not exceed past end of
765 		 * device for IBLOCK and FILEIO ->do_sync_cache() backend calls
766 		 */
767 		if (cmd->t_task_lba || sectors) {
768 			if (sbc_check_valid_sectors(cmd) < 0)
769 				return TCM_ADDRESS_OUT_OF_RANGE;
770 		}
771 		cmd->execute_cmd = ops->execute_sync_cache;
772 		break;
773 	case UNMAP:
774 		if (!ops->execute_unmap)
775 			return TCM_UNSUPPORTED_SCSI_OPCODE;
776 
777 		size = get_unaligned_be16(&cdb[7]);
778 		cmd->execute_cmd = ops->execute_unmap;
779 		break;
780 	case WRITE_SAME_16:
781 		sectors = transport_get_sectors_16(cdb);
782 		if (!sectors) {
783 			pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n");
784 			return TCM_INVALID_CDB_FIELD;
785 		}
786 
787 		size = sbc_get_size(cmd, 1);
788 		cmd->t_task_lba = get_unaligned_be64(&cdb[2]);
789 
790 		ret = sbc_setup_write_same(cmd, &cdb[1], ops);
791 		if (ret)
792 			return ret;
793 		break;
794 	case WRITE_SAME:
795 		sectors = transport_get_sectors_10(cdb);
796 		if (!sectors) {
797 			pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n");
798 			return TCM_INVALID_CDB_FIELD;
799 		}
800 
801 		size = sbc_get_size(cmd, 1);
802 		cmd->t_task_lba = get_unaligned_be32(&cdb[2]);
803 
804 		/*
805 		 * Follow sbcr26 with WRITE_SAME (10) and check for the existence
806 		 * of byte 1 bit 3 UNMAP instead of original reserved field
807 		 */
808 		ret = sbc_setup_write_same(cmd, &cdb[1], ops);
809 		if (ret)
810 			return ret;
811 		break;
812 	case VERIFY:
813 		size = 0;
814 		cmd->execute_cmd = sbc_emulate_noop;
815 		break;
816 	case REZERO_UNIT:
817 	case SEEK_6:
818 	case SEEK_10:
819 		/*
820 		 * There are still clients out there which use these old SCSI-2
821 		 * commands. This mainly happens when running VMs with legacy
822 		 * guest systems, connected via SCSI command pass-through to
823 		 * iSCSI targets. Make them happy and return status GOOD.
824 		 */
825 		size = 0;
826 		cmd->execute_cmd = sbc_emulate_noop;
827 		break;
828 	default:
829 		ret = spc_parse_cdb(cmd, &size);
830 		if (ret)
831 			return ret;
832 	}
833 
834 	/* reject any command that we don't have a handler for */
835 	if (!(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) && !cmd->execute_cmd)
836 		return TCM_UNSUPPORTED_SCSI_OPCODE;
837 
838 	if (cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) {
839 		unsigned long long end_lba;
840 
841 		if (sectors > dev->dev_attrib.fabric_max_sectors) {
842 			printk_ratelimited(KERN_ERR "SCSI OP %02xh with too"
843 				" big sectors %u exceeds fabric_max_sectors:"
844 				" %u\n", cdb[0], sectors,
845 				dev->dev_attrib.fabric_max_sectors);
846 			return TCM_INVALID_CDB_FIELD;
847 		}
848 		if (sectors > dev->dev_attrib.hw_max_sectors) {
849 			printk_ratelimited(KERN_ERR "SCSI OP %02xh with too"
850 				" big sectors %u exceeds backend hw_max_sectors:"
851 				" %u\n", cdb[0], sectors,
852 				dev->dev_attrib.hw_max_sectors);
853 			return TCM_INVALID_CDB_FIELD;
854 		}
855 
856 		end_lba = dev->transport->get_blocks(dev) + 1;
857 		if (cmd->t_task_lba + sectors > end_lba) {
858 			pr_err("cmd exceeds last lba %llu "
859 				"(lba %llu, sectors %u)\n",
860 				end_lba, cmd->t_task_lba, sectors);
861 			return TCM_ADDRESS_OUT_OF_RANGE;
862 		}
863 
864 		if (!(cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE))
865 			size = sbc_get_size(cmd, sectors);
866 	}
867 
868 	return target_cmd_size_check(cmd, size);
869 }
870 EXPORT_SYMBOL(sbc_parse_cdb);
871 
872 u32 sbc_get_device_type(struct se_device *dev)
873 {
874 	return TYPE_DISK;
875 }
876 EXPORT_SYMBOL(sbc_get_device_type);
877 
878 sense_reason_t
879 sbc_execute_unmap(struct se_cmd *cmd,
880 	sense_reason_t (*do_unmap_fn)(struct se_cmd *, void *,
881 				      sector_t, sector_t),
882 	void *priv)
883 {
884 	struct se_device *dev = cmd->se_dev;
885 	unsigned char *buf, *ptr = NULL;
886 	sector_t lba;
887 	int size;
888 	u32 range;
889 	sense_reason_t ret = 0;
890 	int dl, bd_dl;
891 
892 	/* We never set ANC_SUP */
893 	if (cmd->t_task_cdb[1])
894 		return TCM_INVALID_CDB_FIELD;
895 
896 	if (cmd->data_length == 0) {
897 		target_complete_cmd(cmd, SAM_STAT_GOOD);
898 		return 0;
899 	}
900 
901 	if (cmd->data_length < 8) {
902 		pr_warn("UNMAP parameter list length %u too small\n",
903 			cmd->data_length);
904 		return TCM_PARAMETER_LIST_LENGTH_ERROR;
905 	}
906 
907 	buf = transport_kmap_data_sg(cmd);
908 	if (!buf)
909 		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
910 
911 	dl = get_unaligned_be16(&buf[0]);
912 	bd_dl = get_unaligned_be16(&buf[2]);
913 
914 	size = cmd->data_length - 8;
915 	if (bd_dl > size)
916 		pr_warn("UNMAP parameter list length %u too small, ignoring bd_dl %u\n",
917 			cmd->data_length, bd_dl);
918 	else
919 		size = bd_dl;
920 
921 	if (size / 16 > dev->dev_attrib.max_unmap_block_desc_count) {
922 		ret = TCM_INVALID_PARAMETER_LIST;
923 		goto err;
924 	}
925 
926 	/* First UNMAP block descriptor starts at 8 byte offset */
927 	ptr = &buf[8];
928 	pr_debug("UNMAP: Sub: %s Using dl: %u bd_dl: %u size: %u"
929 		" ptr: %p\n", dev->transport->name, dl, bd_dl, size, ptr);
930 
931 	while (size >= 16) {
932 		lba = get_unaligned_be64(&ptr[0]);
933 		range = get_unaligned_be32(&ptr[8]);
934 		pr_debug("UNMAP: Using lba: %llu and range: %u\n",
935 				 (unsigned long long)lba, range);
936 
937 		if (range > dev->dev_attrib.max_unmap_lba_count) {
938 			ret = TCM_INVALID_PARAMETER_LIST;
939 			goto err;
940 		}
941 
942 		if (lba + range > dev->transport->get_blocks(dev) + 1) {
943 			ret = TCM_ADDRESS_OUT_OF_RANGE;
944 			goto err;
945 		}
946 
947 		ret = do_unmap_fn(cmd, priv, lba, range);
948 		if (ret)
949 			goto err;
950 
951 		ptr += 16;
952 		size -= 16;
953 	}
954 
955 err:
956 	transport_kunmap_data_sg(cmd);
957 	if (!ret)
958 		target_complete_cmd(cmd, GOOD);
959 	return ret;
960 }
961 EXPORT_SYMBOL(sbc_execute_unmap);
962