1 /*
2  * SCSI Block Commands (SBC) parsing and emulation.
3  *
4  * (c) Copyright 2002-2012 RisingTide Systems LLC.
5  *
6  * Nicholas A. Bellinger <nab@kernel.org>
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  * GNU General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program; if not, write to the Free Software
20  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
21  */
22 
23 #include <linux/kernel.h>
24 #include <linux/module.h>
25 #include <linux/ratelimit.h>
26 #include <asm/unaligned.h>
27 #include <scsi/scsi.h>
28 
29 #include <target/target_core_base.h>
30 #include <target/target_core_backend.h>
31 #include <target/target_core_fabric.h>
32 
33 #include "target_core_internal.h"
34 #include "target_core_ua.h"
35 
36 
37 static sense_reason_t
38 sbc_emulate_readcapacity(struct se_cmd *cmd)
39 {
40 	struct se_device *dev = cmd->se_dev;
41 	unsigned char *cdb = cmd->t_task_cdb;
42 	unsigned long long blocks_long = dev->transport->get_blocks(dev);
43 	unsigned char *rbuf;
44 	unsigned char buf[8];
45 	u32 blocks;
46 
47 	/*
48 	 * SBC-2 says:
49 	 *   If the PMI bit is set to zero and the LOGICAL BLOCK
50 	 *   ADDRESS field is not set to zero, the device server shall
51 	 *   terminate the command with CHECK CONDITION status with
52 	 *   the sense key set to ILLEGAL REQUEST and the additional
53 	 *   sense code set to INVALID FIELD IN CDB.
54 	 *
55 	 * In SBC-3, these fields are obsolete, but some SCSI
56 	 * compliance tests actually check this, so we might as well
57 	 * follow SBC-2.
58 	 */
59 	if (!(cdb[8] & 1) && !!(cdb[2] | cdb[3] | cdb[4] | cdb[5]))
60 		return TCM_INVALID_CDB_FIELD;
61 
62 	if (blocks_long >= 0x00000000ffffffff)
63 		blocks = 0xffffffff;
64 	else
65 		blocks = (u32)blocks_long;
66 
67 	buf[0] = (blocks >> 24) & 0xff;
68 	buf[1] = (blocks >> 16) & 0xff;
69 	buf[2] = (blocks >> 8) & 0xff;
70 	buf[3] = blocks & 0xff;
71 	buf[4] = (dev->dev_attrib.block_size >> 24) & 0xff;
72 	buf[5] = (dev->dev_attrib.block_size >> 16) & 0xff;
73 	buf[6] = (dev->dev_attrib.block_size >> 8) & 0xff;
74 	buf[7] = dev->dev_attrib.block_size & 0xff;
75 
76 	rbuf = transport_kmap_data_sg(cmd);
77 	if (rbuf) {
78 		memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length));
79 		transport_kunmap_data_sg(cmd);
80 	}
81 
82 	target_complete_cmd(cmd, GOOD);
83 	return 0;
84 }
85 
86 static sense_reason_t
87 sbc_emulate_readcapacity_16(struct se_cmd *cmd)
88 {
89 	struct se_device *dev = cmd->se_dev;
90 	unsigned char *rbuf;
91 	unsigned char buf[32];
92 	unsigned long long blocks = dev->transport->get_blocks(dev);
93 
94 	memset(buf, 0, sizeof(buf));
95 	buf[0] = (blocks >> 56) & 0xff;
96 	buf[1] = (blocks >> 48) & 0xff;
97 	buf[2] = (blocks >> 40) & 0xff;
98 	buf[3] = (blocks >> 32) & 0xff;
99 	buf[4] = (blocks >> 24) & 0xff;
100 	buf[5] = (blocks >> 16) & 0xff;
101 	buf[6] = (blocks >> 8) & 0xff;
102 	buf[7] = blocks & 0xff;
103 	buf[8] = (dev->dev_attrib.block_size >> 24) & 0xff;
104 	buf[9] = (dev->dev_attrib.block_size >> 16) & 0xff;
105 	buf[10] = (dev->dev_attrib.block_size >> 8) & 0xff;
106 	buf[11] = dev->dev_attrib.block_size & 0xff;
107 	/*
108 	 * Set Thin Provisioning Enable bit following sbc3r22 in section
109 	 * READ CAPACITY (16) byte 14 if emulate_tpu or emulate_tpws is enabled.
110 	 */
111 	if (dev->dev_attrib.emulate_tpu || dev->dev_attrib.emulate_tpws)
112 		buf[14] = 0x80;
113 
114 	rbuf = transport_kmap_data_sg(cmd);
115 	if (rbuf) {
116 		memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length));
117 		transport_kunmap_data_sg(cmd);
118 	}
119 
120 	target_complete_cmd(cmd, GOOD);
121 	return 0;
122 }
123 
124 sector_t sbc_get_write_same_sectors(struct se_cmd *cmd)
125 {
126 	u32 num_blocks;
127 
128 	if (cmd->t_task_cdb[0] == WRITE_SAME)
129 		num_blocks = get_unaligned_be16(&cmd->t_task_cdb[7]);
130 	else if (cmd->t_task_cdb[0] == WRITE_SAME_16)
131 		num_blocks = get_unaligned_be32(&cmd->t_task_cdb[10]);
132 	else /* WRITE_SAME_32 via VARIABLE_LENGTH_CMD */
133 		num_blocks = get_unaligned_be32(&cmd->t_task_cdb[28]);
134 
135 	/*
136 	 * Use the explicit range when non zero is supplied, otherwise calculate
137 	 * the remaining range based on ->get_blocks() - starting LBA.
138 	 */
139 	if (num_blocks)
140 		return num_blocks;
141 
142 	return cmd->se_dev->transport->get_blocks(cmd->se_dev) -
143 		cmd->t_task_lba + 1;
144 }
145 EXPORT_SYMBOL(sbc_get_write_same_sectors);
146 
147 static sense_reason_t
148 sbc_emulate_noop(struct se_cmd *cmd)
149 {
150 	target_complete_cmd(cmd, GOOD);
151 	return 0;
152 }
153 
154 static inline u32 sbc_get_size(struct se_cmd *cmd, u32 sectors)
155 {
156 	return cmd->se_dev->dev_attrib.block_size * sectors;
157 }
158 
159 static int sbc_check_valid_sectors(struct se_cmd *cmd)
160 {
161 	struct se_device *dev = cmd->se_dev;
162 	unsigned long long end_lba;
163 	u32 sectors;
164 
165 	sectors = cmd->data_length / dev->dev_attrib.block_size;
166 	end_lba = dev->transport->get_blocks(dev) + 1;
167 
168 	if (cmd->t_task_lba + sectors > end_lba) {
169 		pr_err("target: lba %llu, sectors %u exceeds end lba %llu\n",
170 			cmd->t_task_lba, sectors, end_lba);
171 		return -EINVAL;
172 	}
173 
174 	return 0;
175 }
176 
177 static inline u32 transport_get_sectors_6(unsigned char *cdb)
178 {
179 	/*
180 	 * Use 8-bit sector value.  SBC-3 says:
181 	 *
182 	 *   A TRANSFER LENGTH field set to zero specifies that 256
183 	 *   logical blocks shall be written.  Any other value
184 	 *   specifies the number of logical blocks that shall be
185 	 *   written.
186 	 */
187 	return cdb[4] ? : 256;
188 }
189 
190 static inline u32 transport_get_sectors_10(unsigned char *cdb)
191 {
192 	return (u32)(cdb[7] << 8) + cdb[8];
193 }
194 
195 static inline u32 transport_get_sectors_12(unsigned char *cdb)
196 {
197 	return (u32)(cdb[6] << 24) + (cdb[7] << 16) + (cdb[8] << 8) + cdb[9];
198 }
199 
200 static inline u32 transport_get_sectors_16(unsigned char *cdb)
201 {
202 	return (u32)(cdb[10] << 24) + (cdb[11] << 16) +
203 		    (cdb[12] << 8) + cdb[13];
204 }
205 
206 /*
207  * Used for VARIABLE_LENGTH_CDB WRITE_32 and READ_32 variants
208  */
209 static inline u32 transport_get_sectors_32(unsigned char *cdb)
210 {
211 	return (u32)(cdb[28] << 24) + (cdb[29] << 16) +
212 		    (cdb[30] << 8) + cdb[31];
213 
214 }
215 
216 static inline u32 transport_lba_21(unsigned char *cdb)
217 {
218 	return ((cdb[1] & 0x1f) << 16) | (cdb[2] << 8) | cdb[3];
219 }
220 
221 static inline u32 transport_lba_32(unsigned char *cdb)
222 {
223 	return (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5];
224 }
225 
226 static inline unsigned long long transport_lba_64(unsigned char *cdb)
227 {
228 	unsigned int __v1, __v2;
229 
230 	__v1 = (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5];
231 	__v2 = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
232 
233 	return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32;
234 }
235 
236 /*
237  * For VARIABLE_LENGTH_CDB w/ 32 byte extended CDBs
238  */
239 static inline unsigned long long transport_lba_64_ext(unsigned char *cdb)
240 {
241 	unsigned int __v1, __v2;
242 
243 	__v1 = (cdb[12] << 24) | (cdb[13] << 16) | (cdb[14] << 8) | cdb[15];
244 	__v2 = (cdb[16] << 24) | (cdb[17] << 16) | (cdb[18] << 8) | cdb[19];
245 
246 	return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32;
247 }
248 
249 static sense_reason_t
250 sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *ops)
251 {
252 	unsigned int sectors = sbc_get_write_same_sectors(cmd);
253 
254 	if ((flags[0] & 0x04) || (flags[0] & 0x02)) {
255 		pr_err("WRITE_SAME PBDATA and LBDATA"
256 			" bits not supported for Block Discard"
257 			" Emulation\n");
258 		return TCM_UNSUPPORTED_SCSI_OPCODE;
259 	}
260 	if (sectors > cmd->se_dev->dev_attrib.max_write_same_len) {
261 		pr_warn("WRITE_SAME sectors: %u exceeds max_write_same_len: %u\n",
262 			sectors, cmd->se_dev->dev_attrib.max_write_same_len);
263 		return TCM_INVALID_CDB_FIELD;
264 	}
265 	/*
266 	 * Special case for WRITE_SAME w/ UNMAP=1 that ends up getting
267 	 * translated into block discard requests within backend code.
268 	 */
269 	if (flags[0] & 0x08) {
270 		if (!ops->execute_write_same_unmap)
271 			return TCM_UNSUPPORTED_SCSI_OPCODE;
272 
273 		cmd->execute_cmd = ops->execute_write_same_unmap;
274 		return 0;
275 	}
276 	if (!ops->execute_write_same)
277 		return TCM_UNSUPPORTED_SCSI_OPCODE;
278 
279 	cmd->execute_cmd = ops->execute_write_same;
280 	return 0;
281 }
282 
283 static void xdreadwrite_callback(struct se_cmd *cmd)
284 {
285 	unsigned char *buf, *addr;
286 	struct scatterlist *sg;
287 	unsigned int offset;
288 	int i;
289 	int count;
290 	/*
291 	 * From sbc3r22.pdf section 5.48 XDWRITEREAD (10) command
292 	 *
293 	 * 1) read the specified logical block(s);
294 	 * 2) transfer logical blocks from the data-out buffer;
295 	 * 3) XOR the logical blocks transferred from the data-out buffer with
296 	 *    the logical blocks read, storing the resulting XOR data in a buffer;
297 	 * 4) if the DISABLE WRITE bit is set to zero, then write the logical
298 	 *    blocks transferred from the data-out buffer; and
299 	 * 5) transfer the resulting XOR data to the data-in buffer.
300 	 */
301 	buf = kmalloc(cmd->data_length, GFP_KERNEL);
302 	if (!buf) {
303 		pr_err("Unable to allocate xor_callback buf\n");
304 		return;
305 	}
306 	/*
307 	 * Copy the scatterlist WRITE buffer located at cmd->t_data_sg
308 	 * into the locally allocated *buf
309 	 */
310 	sg_copy_to_buffer(cmd->t_data_sg,
311 			  cmd->t_data_nents,
312 			  buf,
313 			  cmd->data_length);
314 
315 	/*
316 	 * Now perform the XOR against the BIDI read memory located at
317 	 * cmd->t_mem_bidi_list
318 	 */
319 
320 	offset = 0;
321 	for_each_sg(cmd->t_bidi_data_sg, sg, cmd->t_bidi_data_nents, count) {
322 		addr = kmap_atomic(sg_page(sg));
323 		if (!addr)
324 			goto out;
325 
326 		for (i = 0; i < sg->length; i++)
327 			*(addr + sg->offset + i) ^= *(buf + offset + i);
328 
329 		offset += sg->length;
330 		kunmap_atomic(addr);
331 	}
332 
333 out:
334 	kfree(buf);
335 }
336 
337 sense_reason_t
338 sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
339 {
340 	struct se_device *dev = cmd->se_dev;
341 	unsigned char *cdb = cmd->t_task_cdb;
342 	unsigned int size;
343 	u32 sectors = 0;
344 	sense_reason_t ret;
345 
346 	switch (cdb[0]) {
347 	case READ_6:
348 		sectors = transport_get_sectors_6(cdb);
349 		cmd->t_task_lba = transport_lba_21(cdb);
350 		cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
351 		cmd->execute_cmd = ops->execute_rw;
352 		break;
353 	case READ_10:
354 		sectors = transport_get_sectors_10(cdb);
355 		cmd->t_task_lba = transport_lba_32(cdb);
356 		cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
357 		cmd->execute_cmd = ops->execute_rw;
358 		break;
359 	case READ_12:
360 		sectors = transport_get_sectors_12(cdb);
361 		cmd->t_task_lba = transport_lba_32(cdb);
362 		cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
363 		cmd->execute_cmd = ops->execute_rw;
364 		break;
365 	case READ_16:
366 		sectors = transport_get_sectors_16(cdb);
367 		cmd->t_task_lba = transport_lba_64(cdb);
368 		cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
369 		cmd->execute_cmd = ops->execute_rw;
370 		break;
371 	case WRITE_6:
372 		sectors = transport_get_sectors_6(cdb);
373 		cmd->t_task_lba = transport_lba_21(cdb);
374 		cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
375 		cmd->execute_cmd = ops->execute_rw;
376 		break;
377 	case WRITE_10:
378 	case WRITE_VERIFY:
379 		sectors = transport_get_sectors_10(cdb);
380 		cmd->t_task_lba = transport_lba_32(cdb);
381 		if (cdb[1] & 0x8)
382 			cmd->se_cmd_flags |= SCF_FUA;
383 		cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
384 		cmd->execute_cmd = ops->execute_rw;
385 		break;
386 	case WRITE_12:
387 		sectors = transport_get_sectors_12(cdb);
388 		cmd->t_task_lba = transport_lba_32(cdb);
389 		if (cdb[1] & 0x8)
390 			cmd->se_cmd_flags |= SCF_FUA;
391 		cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
392 		cmd->execute_cmd = ops->execute_rw;
393 		break;
394 	case WRITE_16:
395 		sectors = transport_get_sectors_16(cdb);
396 		cmd->t_task_lba = transport_lba_64(cdb);
397 		if (cdb[1] & 0x8)
398 			cmd->se_cmd_flags |= SCF_FUA;
399 		cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
400 		cmd->execute_cmd = ops->execute_rw;
401 		break;
402 	case XDWRITEREAD_10:
403 		if (cmd->data_direction != DMA_TO_DEVICE ||
404 		    !(cmd->se_cmd_flags & SCF_BIDI))
405 			return TCM_INVALID_CDB_FIELD;
406 		sectors = transport_get_sectors_10(cdb);
407 
408 		cmd->t_task_lba = transport_lba_32(cdb);
409 		cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
410 
411 		/*
412 		 * Setup BIDI XOR callback to be run after I/O completion.
413 		 */
414 		cmd->execute_cmd = ops->execute_rw;
415 		cmd->transport_complete_callback = &xdreadwrite_callback;
416 		if (cdb[1] & 0x8)
417 			cmd->se_cmd_flags |= SCF_FUA;
418 		break;
419 	case VARIABLE_LENGTH_CMD:
420 	{
421 		u16 service_action = get_unaligned_be16(&cdb[8]);
422 		switch (service_action) {
423 		case XDWRITEREAD_32:
424 			sectors = transport_get_sectors_32(cdb);
425 
426 			/*
427 			 * Use WRITE_32 and READ_32 opcodes for the emulated
428 			 * XDWRITE_READ_32 logic.
429 			 */
430 			cmd->t_task_lba = transport_lba_64_ext(cdb);
431 			cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
432 
433 			/*
434 			 * Setup BIDI XOR callback to be run during after I/O
435 			 * completion.
436 			 */
437 			cmd->execute_cmd = ops->execute_rw;
438 			cmd->transport_complete_callback = &xdreadwrite_callback;
439 			if (cdb[1] & 0x8)
440 				cmd->se_cmd_flags |= SCF_FUA;
441 			break;
442 		case WRITE_SAME_32:
443 			sectors = transport_get_sectors_32(cdb);
444 			if (!sectors) {
445 				pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not"
446 				       " supported\n");
447 				return TCM_INVALID_CDB_FIELD;
448 			}
449 
450 			size = sbc_get_size(cmd, 1);
451 			cmd->t_task_lba = get_unaligned_be64(&cdb[12]);
452 
453 			ret = sbc_setup_write_same(cmd, &cdb[10], ops);
454 			if (ret)
455 				return ret;
456 			break;
457 		default:
458 			pr_err("VARIABLE_LENGTH_CMD service action"
459 				" 0x%04x not supported\n", service_action);
460 			return TCM_UNSUPPORTED_SCSI_OPCODE;
461 		}
462 		break;
463 	}
464 	case READ_CAPACITY:
465 		size = READ_CAP_LEN;
466 		cmd->execute_cmd = sbc_emulate_readcapacity;
467 		break;
468 	case SERVICE_ACTION_IN:
469 		switch (cmd->t_task_cdb[1] & 0x1f) {
470 		case SAI_READ_CAPACITY_16:
471 			cmd->execute_cmd = sbc_emulate_readcapacity_16;
472 			break;
473 		default:
474 			pr_err("Unsupported SA: 0x%02x\n",
475 				cmd->t_task_cdb[1] & 0x1f);
476 			return TCM_INVALID_CDB_FIELD;
477 		}
478 		size = (cdb[10] << 24) | (cdb[11] << 16) |
479 		       (cdb[12] << 8) | cdb[13];
480 		break;
481 	case SYNCHRONIZE_CACHE:
482 	case SYNCHRONIZE_CACHE_16:
483 		if (!ops->execute_sync_cache) {
484 			size = 0;
485 			cmd->execute_cmd = sbc_emulate_noop;
486 			break;
487 		}
488 
489 		/*
490 		 * Extract LBA and range to be flushed for emulated SYNCHRONIZE_CACHE
491 		 */
492 		if (cdb[0] == SYNCHRONIZE_CACHE) {
493 			sectors = transport_get_sectors_10(cdb);
494 			cmd->t_task_lba = transport_lba_32(cdb);
495 		} else {
496 			sectors = transport_get_sectors_16(cdb);
497 			cmd->t_task_lba = transport_lba_64(cdb);
498 		}
499 
500 		size = sbc_get_size(cmd, sectors);
501 
502 		/*
503 		 * Check to ensure that LBA + Range does not exceed past end of
504 		 * device for IBLOCK and FILEIO ->do_sync_cache() backend calls
505 		 */
506 		if (cmd->t_task_lba || sectors) {
507 			if (sbc_check_valid_sectors(cmd) < 0)
508 				return TCM_ADDRESS_OUT_OF_RANGE;
509 		}
510 		cmd->execute_cmd = ops->execute_sync_cache;
511 		break;
512 	case UNMAP:
513 		if (!ops->execute_unmap)
514 			return TCM_UNSUPPORTED_SCSI_OPCODE;
515 
516 		size = get_unaligned_be16(&cdb[7]);
517 		cmd->execute_cmd = ops->execute_unmap;
518 		break;
519 	case WRITE_SAME_16:
520 		sectors = transport_get_sectors_16(cdb);
521 		if (!sectors) {
522 			pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n");
523 			return TCM_INVALID_CDB_FIELD;
524 		}
525 
526 		size = sbc_get_size(cmd, 1);
527 		cmd->t_task_lba = get_unaligned_be64(&cdb[2]);
528 
529 		ret = sbc_setup_write_same(cmd, &cdb[1], ops);
530 		if (ret)
531 			return ret;
532 		break;
533 	case WRITE_SAME:
534 		sectors = transport_get_sectors_10(cdb);
535 		if (!sectors) {
536 			pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n");
537 			return TCM_INVALID_CDB_FIELD;
538 		}
539 
540 		size = sbc_get_size(cmd, 1);
541 		cmd->t_task_lba = get_unaligned_be32(&cdb[2]);
542 
543 		/*
544 		 * Follow sbcr26 with WRITE_SAME (10) and check for the existence
545 		 * of byte 1 bit 3 UNMAP instead of original reserved field
546 		 */
547 		ret = sbc_setup_write_same(cmd, &cdb[1], ops);
548 		if (ret)
549 			return ret;
550 		break;
551 	case VERIFY:
552 		size = 0;
553 		cmd->execute_cmd = sbc_emulate_noop;
554 		break;
555 	case REZERO_UNIT:
556 	case SEEK_6:
557 	case SEEK_10:
558 		/*
559 		 * There are still clients out there which use these old SCSI-2
560 		 * commands. This mainly happens when running VMs with legacy
561 		 * guest systems, connected via SCSI command pass-through to
562 		 * iSCSI targets. Make them happy and return status GOOD.
563 		 */
564 		size = 0;
565 		cmd->execute_cmd = sbc_emulate_noop;
566 		break;
567 	default:
568 		ret = spc_parse_cdb(cmd, &size);
569 		if (ret)
570 			return ret;
571 	}
572 
573 	/* reject any command that we don't have a handler for */
574 	if (!(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) && !cmd->execute_cmd)
575 		return TCM_UNSUPPORTED_SCSI_OPCODE;
576 
577 	if (cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) {
578 		unsigned long long end_lba;
579 
580 		if (sectors > dev->dev_attrib.fabric_max_sectors) {
581 			printk_ratelimited(KERN_ERR "SCSI OP %02xh with too"
582 				" big sectors %u exceeds fabric_max_sectors:"
583 				" %u\n", cdb[0], sectors,
584 				dev->dev_attrib.fabric_max_sectors);
585 			return TCM_INVALID_CDB_FIELD;
586 		}
587 		if (sectors > dev->dev_attrib.hw_max_sectors) {
588 			printk_ratelimited(KERN_ERR "SCSI OP %02xh with too"
589 				" big sectors %u exceeds backend hw_max_sectors:"
590 				" %u\n", cdb[0], sectors,
591 				dev->dev_attrib.hw_max_sectors);
592 			return TCM_INVALID_CDB_FIELD;
593 		}
594 
595 		end_lba = dev->transport->get_blocks(dev) + 1;
596 		if (cmd->t_task_lba + sectors > end_lba) {
597 			pr_err("cmd exceeds last lba %llu "
598 				"(lba %llu, sectors %u)\n",
599 				end_lba, cmd->t_task_lba, sectors);
600 			return TCM_ADDRESS_OUT_OF_RANGE;
601 		}
602 
603 		size = sbc_get_size(cmd, sectors);
604 	}
605 
606 	return target_cmd_size_check(cmd, size);
607 }
608 EXPORT_SYMBOL(sbc_parse_cdb);
609 
610 u32 sbc_get_device_type(struct se_device *dev)
611 {
612 	return TYPE_DISK;
613 }
614 EXPORT_SYMBOL(sbc_get_device_type);
615 
616 sense_reason_t
617 sbc_execute_unmap(struct se_cmd *cmd,
618 	sense_reason_t (*do_unmap_fn)(struct se_cmd *, void *,
619 				      sector_t, sector_t),
620 	void *priv)
621 {
622 	struct se_device *dev = cmd->se_dev;
623 	unsigned char *buf, *ptr = NULL;
624 	sector_t lba;
625 	int size;
626 	u32 range;
627 	sense_reason_t ret = 0;
628 	int dl, bd_dl;
629 
630 	/* We never set ANC_SUP */
631 	if (cmd->t_task_cdb[1])
632 		return TCM_INVALID_CDB_FIELD;
633 
634 	if (cmd->data_length == 0) {
635 		target_complete_cmd(cmd, SAM_STAT_GOOD);
636 		return 0;
637 	}
638 
639 	if (cmd->data_length < 8) {
640 		pr_warn("UNMAP parameter list length %u too small\n",
641 			cmd->data_length);
642 		return TCM_PARAMETER_LIST_LENGTH_ERROR;
643 	}
644 
645 	buf = transport_kmap_data_sg(cmd);
646 	if (!buf)
647 		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
648 
649 	dl = get_unaligned_be16(&buf[0]);
650 	bd_dl = get_unaligned_be16(&buf[2]);
651 
652 	size = cmd->data_length - 8;
653 	if (bd_dl > size)
654 		pr_warn("UNMAP parameter list length %u too small, ignoring bd_dl %u\n",
655 			cmd->data_length, bd_dl);
656 	else
657 		size = bd_dl;
658 
659 	if (size / 16 > dev->dev_attrib.max_unmap_block_desc_count) {
660 		ret = TCM_INVALID_PARAMETER_LIST;
661 		goto err;
662 	}
663 
664 	/* First UNMAP block descriptor starts at 8 byte offset */
665 	ptr = &buf[8];
666 	pr_debug("UNMAP: Sub: %s Using dl: %u bd_dl: %u size: %u"
667 		" ptr: %p\n", dev->transport->name, dl, bd_dl, size, ptr);
668 
669 	while (size >= 16) {
670 		lba = get_unaligned_be64(&ptr[0]);
671 		range = get_unaligned_be32(&ptr[8]);
672 		pr_debug("UNMAP: Using lba: %llu and range: %u\n",
673 				 (unsigned long long)lba, range);
674 
675 		if (range > dev->dev_attrib.max_unmap_lba_count) {
676 			ret = TCM_INVALID_PARAMETER_LIST;
677 			goto err;
678 		}
679 
680 		if (lba + range > dev->transport->get_blocks(dev) + 1) {
681 			ret = TCM_ADDRESS_OUT_OF_RANGE;
682 			goto err;
683 		}
684 
685 		ret = do_unmap_fn(cmd, priv, lba, range);
686 		if (ret)
687 			goto err;
688 
689 		ptr += 16;
690 		size -= 16;
691 	}
692 
693 err:
694 	transport_kunmap_data_sg(cmd);
695 	if (!ret)
696 		target_complete_cmd(cmd, GOOD);
697 	return ret;
698 }
699 EXPORT_SYMBOL(sbc_execute_unmap);
700