1 /* 2 * SCSI Block Commands (SBC) parsing and emulation. 3 * 4 * Copyright (c) 2002, 2003, 2004, 2005 PyX Technologies, Inc. 5 * Copyright (c) 2005, 2006, 2007 SBE, Inc. 6 * Copyright (c) 2007-2010 Rising Tide Systems 7 * Copyright (c) 2008-2010 Linux-iSCSI.org 8 * 9 * Nicholas A. Bellinger <nab@kernel.org> 10 * 11 * This program is free software; you can redistribute it and/or modify 12 * it under the terms of the GNU General Public License as published by 13 * the Free Software Foundation; either version 2 of the License, or 14 * (at your option) any later version. 15 * 16 * This program is distributed in the hope that it will be useful, 17 * but WITHOUT ANY WARRANTY; without even the implied warranty of 18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 19 * GNU General Public License for more details. 20 * 21 * You should have received a copy of the GNU General Public License 22 * along with this program; if not, write to the Free Software 23 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 24 */ 25 26 #include <linux/kernel.h> 27 #include <linux/module.h> 28 #include <linux/ratelimit.h> 29 #include <asm/unaligned.h> 30 #include <scsi/scsi.h> 31 32 #include <target/target_core_base.h> 33 #include <target/target_core_backend.h> 34 #include <target/target_core_fabric.h> 35 36 #include "target_core_internal.h" 37 #include "target_core_ua.h" 38 39 40 static int sbc_emulate_readcapacity(struct se_cmd *cmd) 41 { 42 struct se_device *dev = cmd->se_dev; 43 unsigned char *buf; 44 unsigned long long blocks_long = dev->transport->get_blocks(dev); 45 u32 blocks; 46 47 if (blocks_long >= 0x00000000ffffffff) 48 blocks = 0xffffffff; 49 else 50 blocks = (u32)blocks_long; 51 52 buf = transport_kmap_data_sg(cmd); 53 54 buf[0] = (blocks >> 24) & 0xff; 55 buf[1] = (blocks >> 16) & 0xff; 56 buf[2] = (blocks >> 8) & 0xff; 57 buf[3] = blocks & 0xff; 58 buf[4] = (dev->se_sub_dev->se_dev_attrib.block_size >> 24) & 0xff; 59 buf[5] = (dev->se_sub_dev->se_dev_attrib.block_size >> 16) & 0xff; 60 buf[6] = (dev->se_sub_dev->se_dev_attrib.block_size >> 8) & 0xff; 61 buf[7] = dev->se_sub_dev->se_dev_attrib.block_size & 0xff; 62 63 transport_kunmap_data_sg(cmd); 64 65 target_complete_cmd(cmd, GOOD); 66 return 0; 67 } 68 69 static int sbc_emulate_readcapacity_16(struct se_cmd *cmd) 70 { 71 struct se_device *dev = cmd->se_dev; 72 unsigned char *buf; 73 unsigned long long blocks = dev->transport->get_blocks(dev); 74 75 buf = transport_kmap_data_sg(cmd); 76 77 buf[0] = (blocks >> 56) & 0xff; 78 buf[1] = (blocks >> 48) & 0xff; 79 buf[2] = (blocks >> 40) & 0xff; 80 buf[3] = (blocks >> 32) & 0xff; 81 buf[4] = (blocks >> 24) & 0xff; 82 buf[5] = (blocks >> 16) & 0xff; 83 buf[6] = (blocks >> 8) & 0xff; 84 buf[7] = blocks & 0xff; 85 buf[8] = (dev->se_sub_dev->se_dev_attrib.block_size >> 24) & 0xff; 86 buf[9] = (dev->se_sub_dev->se_dev_attrib.block_size >> 16) & 0xff; 87 buf[10] = (dev->se_sub_dev->se_dev_attrib.block_size >> 8) & 0xff; 88 buf[11] = dev->se_sub_dev->se_dev_attrib.block_size & 0xff; 89 /* 90 * Set Thin Provisioning Enable bit following sbc3r22 in section 91 * READ CAPACITY (16) byte 14 if emulate_tpu or emulate_tpws is enabled. 92 */ 93 if (dev->se_sub_dev->se_dev_attrib.emulate_tpu || dev->se_sub_dev->se_dev_attrib.emulate_tpws) 94 buf[14] = 0x80; 95 96 transport_kunmap_data_sg(cmd); 97 98 target_complete_cmd(cmd, GOOD); 99 return 0; 100 } 101 102 int spc_get_write_same_sectors(struct se_cmd *cmd) 103 { 104 u32 num_blocks; 105 106 if (cmd->t_task_cdb[0] == WRITE_SAME) 107 num_blocks = get_unaligned_be16(&cmd->t_task_cdb[7]); 108 else if (cmd->t_task_cdb[0] == WRITE_SAME_16) 109 num_blocks = get_unaligned_be32(&cmd->t_task_cdb[10]); 110 else /* WRITE_SAME_32 via VARIABLE_LENGTH_CMD */ 111 num_blocks = get_unaligned_be32(&cmd->t_task_cdb[28]); 112 113 /* 114 * Use the explicit range when non zero is supplied, otherwise calculate 115 * the remaining range based on ->get_blocks() - starting LBA. 116 */ 117 if (num_blocks) 118 return num_blocks; 119 120 return cmd->se_dev->transport->get_blocks(cmd->se_dev) - 121 cmd->t_task_lba + 1; 122 } 123 EXPORT_SYMBOL(spc_get_write_same_sectors); 124 125 static int sbc_emulate_verify(struct se_cmd *cmd) 126 { 127 target_complete_cmd(cmd, GOOD); 128 return 0; 129 } 130 131 static inline u32 sbc_get_size(struct se_cmd *cmd, u32 sectors) 132 { 133 return cmd->se_dev->se_sub_dev->se_dev_attrib.block_size * sectors; 134 } 135 136 static int sbc_check_valid_sectors(struct se_cmd *cmd) 137 { 138 struct se_device *dev = cmd->se_dev; 139 unsigned long long end_lba; 140 u32 sectors; 141 142 sectors = cmd->data_length / dev->se_sub_dev->se_dev_attrib.block_size; 143 end_lba = dev->transport->get_blocks(dev) + 1; 144 145 if (cmd->t_task_lba + sectors > end_lba) { 146 pr_err("target: lba %llu, sectors %u exceeds end lba %llu\n", 147 cmd->t_task_lba, sectors, end_lba); 148 return -EINVAL; 149 } 150 151 return 0; 152 } 153 154 static inline u32 transport_get_sectors_6(unsigned char *cdb) 155 { 156 /* 157 * Use 8-bit sector value. SBC-3 says: 158 * 159 * A TRANSFER LENGTH field set to zero specifies that 256 160 * logical blocks shall be written. Any other value 161 * specifies the number of logical blocks that shall be 162 * written. 163 */ 164 return cdb[4] ? : 256; 165 } 166 167 static inline u32 transport_get_sectors_10(unsigned char *cdb) 168 { 169 return (u32)(cdb[7] << 8) + cdb[8]; 170 } 171 172 static inline u32 transport_get_sectors_12(unsigned char *cdb) 173 { 174 return (u32)(cdb[6] << 24) + (cdb[7] << 16) + (cdb[8] << 8) + cdb[9]; 175 } 176 177 static inline u32 transport_get_sectors_16(unsigned char *cdb) 178 { 179 return (u32)(cdb[10] << 24) + (cdb[11] << 16) + 180 (cdb[12] << 8) + cdb[13]; 181 } 182 183 /* 184 * Used for VARIABLE_LENGTH_CDB WRITE_32 and READ_32 variants 185 */ 186 static inline u32 transport_get_sectors_32(unsigned char *cdb) 187 { 188 return (u32)(cdb[28] << 24) + (cdb[29] << 16) + 189 (cdb[30] << 8) + cdb[31]; 190 191 } 192 193 static inline u32 transport_lba_21(unsigned char *cdb) 194 { 195 return ((cdb[1] & 0x1f) << 16) | (cdb[2] << 8) | cdb[3]; 196 } 197 198 static inline u32 transport_lba_32(unsigned char *cdb) 199 { 200 return (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5]; 201 } 202 203 static inline unsigned long long transport_lba_64(unsigned char *cdb) 204 { 205 unsigned int __v1, __v2; 206 207 __v1 = (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5]; 208 __v2 = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9]; 209 210 return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32; 211 } 212 213 /* 214 * For VARIABLE_LENGTH_CDB w/ 32 byte extended CDBs 215 */ 216 static inline unsigned long long transport_lba_64_ext(unsigned char *cdb) 217 { 218 unsigned int __v1, __v2; 219 220 __v1 = (cdb[12] << 24) | (cdb[13] << 16) | (cdb[14] << 8) | cdb[15]; 221 __v2 = (cdb[16] << 24) | (cdb[17] << 16) | (cdb[18] << 8) | cdb[19]; 222 223 return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32; 224 } 225 226 static int sbc_write_same_supported(struct se_device *dev, 227 unsigned char *flags) 228 { 229 if ((flags[0] & 0x04) || (flags[0] & 0x02)) { 230 pr_err("WRITE_SAME PBDATA and LBDATA" 231 " bits not supported for Block Discard" 232 " Emulation\n"); 233 return -ENOSYS; 234 } 235 236 /* 237 * Currently for the emulated case we only accept 238 * tpws with the UNMAP=1 bit set. 239 */ 240 if (!(flags[0] & 0x08)) { 241 pr_err("WRITE_SAME w/o UNMAP bit not" 242 " supported for Block Discard Emulation\n"); 243 return -ENOSYS; 244 } 245 246 return 0; 247 } 248 249 static void xdreadwrite_callback(struct se_cmd *cmd) 250 { 251 unsigned char *buf, *addr; 252 struct scatterlist *sg; 253 unsigned int offset; 254 int i; 255 int count; 256 /* 257 * From sbc3r22.pdf section 5.48 XDWRITEREAD (10) command 258 * 259 * 1) read the specified logical block(s); 260 * 2) transfer logical blocks from the data-out buffer; 261 * 3) XOR the logical blocks transferred from the data-out buffer with 262 * the logical blocks read, storing the resulting XOR data in a buffer; 263 * 4) if the DISABLE WRITE bit is set to zero, then write the logical 264 * blocks transferred from the data-out buffer; and 265 * 5) transfer the resulting XOR data to the data-in buffer. 266 */ 267 buf = kmalloc(cmd->data_length, GFP_KERNEL); 268 if (!buf) { 269 pr_err("Unable to allocate xor_callback buf\n"); 270 return; 271 } 272 /* 273 * Copy the scatterlist WRITE buffer located at cmd->t_data_sg 274 * into the locally allocated *buf 275 */ 276 sg_copy_to_buffer(cmd->t_data_sg, 277 cmd->t_data_nents, 278 buf, 279 cmd->data_length); 280 281 /* 282 * Now perform the XOR against the BIDI read memory located at 283 * cmd->t_mem_bidi_list 284 */ 285 286 offset = 0; 287 for_each_sg(cmd->t_bidi_data_sg, sg, cmd->t_bidi_data_nents, count) { 288 addr = kmap_atomic(sg_page(sg)); 289 if (!addr) 290 goto out; 291 292 for (i = 0; i < sg->length; i++) 293 *(addr + sg->offset + i) ^= *(buf + offset + i); 294 295 offset += sg->length; 296 kunmap_atomic(addr); 297 } 298 299 out: 300 kfree(buf); 301 } 302 303 int sbc_parse_cdb(struct se_cmd *cmd, struct spc_ops *ops) 304 { 305 struct se_subsystem_dev *su_dev = cmd->se_dev->se_sub_dev; 306 struct se_device *dev = cmd->se_dev; 307 unsigned char *cdb = cmd->t_task_cdb; 308 unsigned int size; 309 u32 sectors = 0; 310 int ret; 311 312 switch (cdb[0]) { 313 case READ_6: 314 sectors = transport_get_sectors_6(cdb); 315 cmd->t_task_lba = transport_lba_21(cdb); 316 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 317 cmd->execute_cmd = ops->execute_rw; 318 break; 319 case READ_10: 320 sectors = transport_get_sectors_10(cdb); 321 cmd->t_task_lba = transport_lba_32(cdb); 322 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 323 cmd->execute_cmd = ops->execute_rw; 324 break; 325 case READ_12: 326 sectors = transport_get_sectors_12(cdb); 327 cmd->t_task_lba = transport_lba_32(cdb); 328 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 329 cmd->execute_cmd = ops->execute_rw; 330 break; 331 case READ_16: 332 sectors = transport_get_sectors_16(cdb); 333 cmd->t_task_lba = transport_lba_64(cdb); 334 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 335 cmd->execute_cmd = ops->execute_rw; 336 break; 337 case WRITE_6: 338 sectors = transport_get_sectors_6(cdb); 339 cmd->t_task_lba = transport_lba_21(cdb); 340 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 341 cmd->execute_cmd = ops->execute_rw; 342 break; 343 case WRITE_10: 344 case WRITE_VERIFY: 345 sectors = transport_get_sectors_10(cdb); 346 cmd->t_task_lba = transport_lba_32(cdb); 347 if (cdb[1] & 0x8) 348 cmd->se_cmd_flags |= SCF_FUA; 349 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 350 cmd->execute_cmd = ops->execute_rw; 351 break; 352 case WRITE_12: 353 sectors = transport_get_sectors_12(cdb); 354 cmd->t_task_lba = transport_lba_32(cdb); 355 if (cdb[1] & 0x8) 356 cmd->se_cmd_flags |= SCF_FUA; 357 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 358 cmd->execute_cmd = ops->execute_rw; 359 break; 360 case WRITE_16: 361 sectors = transport_get_sectors_16(cdb); 362 cmd->t_task_lba = transport_lba_64(cdb); 363 if (cdb[1] & 0x8) 364 cmd->se_cmd_flags |= SCF_FUA; 365 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 366 cmd->execute_cmd = ops->execute_rw; 367 break; 368 case XDWRITEREAD_10: 369 if ((cmd->data_direction != DMA_TO_DEVICE) || 370 !(cmd->se_cmd_flags & SCF_BIDI)) 371 goto out_invalid_cdb_field; 372 sectors = transport_get_sectors_10(cdb); 373 374 cmd->t_task_lba = transport_lba_32(cdb); 375 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 376 377 /* 378 * Setup BIDI XOR callback to be run after I/O completion. 379 */ 380 cmd->execute_cmd = ops->execute_rw; 381 cmd->transport_complete_callback = &xdreadwrite_callback; 382 if (cdb[1] & 0x8) 383 cmd->se_cmd_flags |= SCF_FUA; 384 break; 385 case VARIABLE_LENGTH_CMD: 386 { 387 u16 service_action = get_unaligned_be16(&cdb[8]); 388 switch (service_action) { 389 case XDWRITEREAD_32: 390 sectors = transport_get_sectors_32(cdb); 391 392 /* 393 * Use WRITE_32 and READ_32 opcodes for the emulated 394 * XDWRITE_READ_32 logic. 395 */ 396 cmd->t_task_lba = transport_lba_64_ext(cdb); 397 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 398 399 /* 400 * Setup BIDI XOR callback to be run during after I/O 401 * completion. 402 */ 403 cmd->execute_cmd = ops->execute_rw; 404 cmd->transport_complete_callback = &xdreadwrite_callback; 405 if (cdb[1] & 0x8) 406 cmd->se_cmd_flags |= SCF_FUA; 407 break; 408 case WRITE_SAME_32: 409 if (!ops->execute_write_same) 410 goto out_unsupported_cdb; 411 412 sectors = transport_get_sectors_32(cdb); 413 if (!sectors) { 414 pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not" 415 " supported\n"); 416 goto out_invalid_cdb_field; 417 } 418 419 size = sbc_get_size(cmd, 1); 420 cmd->t_task_lba = get_unaligned_be64(&cdb[12]); 421 422 if (sbc_write_same_supported(dev, &cdb[10]) < 0) 423 goto out_unsupported_cdb; 424 cmd->execute_cmd = ops->execute_write_same; 425 break; 426 default: 427 pr_err("VARIABLE_LENGTH_CMD service action" 428 " 0x%04x not supported\n", service_action); 429 goto out_unsupported_cdb; 430 } 431 break; 432 } 433 case READ_CAPACITY: 434 size = READ_CAP_LEN; 435 cmd->execute_cmd = sbc_emulate_readcapacity; 436 break; 437 case SERVICE_ACTION_IN: 438 switch (cmd->t_task_cdb[1] & 0x1f) { 439 case SAI_READ_CAPACITY_16: 440 cmd->execute_cmd = sbc_emulate_readcapacity_16; 441 break; 442 default: 443 pr_err("Unsupported SA: 0x%02x\n", 444 cmd->t_task_cdb[1] & 0x1f); 445 goto out_invalid_cdb_field; 446 } 447 size = (cdb[10] << 24) | (cdb[11] << 16) | 448 (cdb[12] << 8) | cdb[13]; 449 break; 450 case SYNCHRONIZE_CACHE: 451 case SYNCHRONIZE_CACHE_16: 452 if (!ops->execute_sync_cache) 453 goto out_unsupported_cdb; 454 455 /* 456 * Extract LBA and range to be flushed for emulated SYNCHRONIZE_CACHE 457 */ 458 if (cdb[0] == SYNCHRONIZE_CACHE) { 459 sectors = transport_get_sectors_10(cdb); 460 cmd->t_task_lba = transport_lba_32(cdb); 461 } else { 462 sectors = transport_get_sectors_16(cdb); 463 cmd->t_task_lba = transport_lba_64(cdb); 464 } 465 466 size = sbc_get_size(cmd, sectors); 467 468 /* 469 * Check to ensure that LBA + Range does not exceed past end of 470 * device for IBLOCK and FILEIO ->do_sync_cache() backend calls 471 */ 472 if (cmd->t_task_lba || sectors) { 473 if (sbc_check_valid_sectors(cmd) < 0) 474 goto out_invalid_cdb_field; 475 } 476 cmd->execute_cmd = ops->execute_sync_cache; 477 break; 478 case UNMAP: 479 if (!ops->execute_unmap) 480 goto out_unsupported_cdb; 481 482 size = get_unaligned_be16(&cdb[7]); 483 cmd->execute_cmd = ops->execute_unmap; 484 break; 485 case WRITE_SAME_16: 486 if (!ops->execute_write_same) 487 goto out_unsupported_cdb; 488 489 sectors = transport_get_sectors_16(cdb); 490 if (!sectors) { 491 pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n"); 492 goto out_invalid_cdb_field; 493 } 494 495 size = sbc_get_size(cmd, 1); 496 cmd->t_task_lba = get_unaligned_be64(&cdb[2]); 497 498 if (sbc_write_same_supported(dev, &cdb[1]) < 0) 499 goto out_unsupported_cdb; 500 cmd->execute_cmd = ops->execute_write_same; 501 break; 502 case WRITE_SAME: 503 if (!ops->execute_write_same) 504 goto out_unsupported_cdb; 505 506 sectors = transport_get_sectors_10(cdb); 507 if (!sectors) { 508 pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n"); 509 goto out_invalid_cdb_field; 510 } 511 512 size = sbc_get_size(cmd, 1); 513 cmd->t_task_lba = get_unaligned_be32(&cdb[2]); 514 515 /* 516 * Follow sbcr26 with WRITE_SAME (10) and check for the existence 517 * of byte 1 bit 3 UNMAP instead of original reserved field 518 */ 519 if (sbc_write_same_supported(dev, &cdb[1]) < 0) 520 goto out_unsupported_cdb; 521 cmd->execute_cmd = ops->execute_write_same; 522 break; 523 case VERIFY: 524 size = 0; 525 cmd->execute_cmd = sbc_emulate_verify; 526 break; 527 default: 528 ret = spc_parse_cdb(cmd, &size); 529 if (ret) 530 return ret; 531 } 532 533 /* reject any command that we don't have a handler for */ 534 if (!(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) && !cmd->execute_cmd) 535 goto out_unsupported_cdb; 536 537 if (cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) { 538 unsigned long long end_lba; 539 540 if (sectors > su_dev->se_dev_attrib.fabric_max_sectors) { 541 printk_ratelimited(KERN_ERR "SCSI OP %02xh with too" 542 " big sectors %u exceeds fabric_max_sectors:" 543 " %u\n", cdb[0], sectors, 544 su_dev->se_dev_attrib.fabric_max_sectors); 545 goto out_invalid_cdb_field; 546 } 547 if (sectors > su_dev->se_dev_attrib.hw_max_sectors) { 548 printk_ratelimited(KERN_ERR "SCSI OP %02xh with too" 549 " big sectors %u exceeds backend hw_max_sectors:" 550 " %u\n", cdb[0], sectors, 551 su_dev->se_dev_attrib.hw_max_sectors); 552 goto out_invalid_cdb_field; 553 } 554 555 end_lba = dev->transport->get_blocks(dev) + 1; 556 if (cmd->t_task_lba + sectors > end_lba) { 557 pr_err("cmd exceeds last lba %llu " 558 "(lba %llu, sectors %u)\n", 559 end_lba, cmd->t_task_lba, sectors); 560 goto out_invalid_cdb_field; 561 } 562 563 size = sbc_get_size(cmd, sectors); 564 } 565 566 ret = target_cmd_size_check(cmd, size); 567 if (ret < 0) 568 return ret; 569 570 return 0; 571 572 out_unsupported_cdb: 573 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; 574 cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE; 575 return -EINVAL; 576 out_invalid_cdb_field: 577 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; 578 cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD; 579 return -EINVAL; 580 } 581 EXPORT_SYMBOL(sbc_parse_cdb); 582