1 /* 2 * SCSI Block Commands (SBC) parsing and emulation. 3 * 4 * (c) Copyright 2002-2013 Datera, Inc. 5 * 6 * Nicholas A. Bellinger <nab@kernel.org> 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License as published by 10 * the Free Software Foundation; either version 2 of the License, or 11 * (at your option) any later version. 12 * 13 * This program is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * GNU General Public License for more details. 17 * 18 * You should have received a copy of the GNU General Public License 19 * along with this program; if not, write to the Free Software 20 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 21 */ 22 23 #include <linux/kernel.h> 24 #include <linux/module.h> 25 #include <linux/ratelimit.h> 26 #include <asm/unaligned.h> 27 #include <scsi/scsi.h> 28 #include <scsi/scsi_tcq.h> 29 30 #include <target/target_core_base.h> 31 #include <target/target_core_backend.h> 32 #include <target/target_core_fabric.h> 33 34 #include "target_core_internal.h" 35 #include "target_core_ua.h" 36 37 38 static sense_reason_t 39 sbc_emulate_readcapacity(struct se_cmd *cmd) 40 { 41 struct se_device *dev = cmd->se_dev; 42 unsigned char *cdb = cmd->t_task_cdb; 43 unsigned long long blocks_long = dev->transport->get_blocks(dev); 44 unsigned char *rbuf; 45 unsigned char buf[8]; 46 u32 blocks; 47 48 /* 49 * SBC-2 says: 50 * If the PMI bit is set to zero and the LOGICAL BLOCK 51 * ADDRESS field is not set to zero, the device server shall 52 * terminate the command with CHECK CONDITION status with 53 * the sense key set to ILLEGAL REQUEST and the additional 54 * sense code set to INVALID FIELD IN CDB. 55 * 56 * In SBC-3, these fields are obsolete, but some SCSI 57 * compliance tests actually check this, so we might as well 58 * follow SBC-2. 59 */ 60 if (!(cdb[8] & 1) && !!(cdb[2] | cdb[3] | cdb[4] | cdb[5])) 61 return TCM_INVALID_CDB_FIELD; 62 63 if (blocks_long >= 0x00000000ffffffff) 64 blocks = 0xffffffff; 65 else 66 blocks = (u32)blocks_long; 67 68 buf[0] = (blocks >> 24) & 0xff; 69 buf[1] = (blocks >> 16) & 0xff; 70 buf[2] = (blocks >> 8) & 0xff; 71 buf[3] = blocks & 0xff; 72 buf[4] = (dev->dev_attrib.block_size >> 24) & 0xff; 73 buf[5] = (dev->dev_attrib.block_size >> 16) & 0xff; 74 buf[6] = (dev->dev_attrib.block_size >> 8) & 0xff; 75 buf[7] = dev->dev_attrib.block_size & 0xff; 76 77 rbuf = transport_kmap_data_sg(cmd); 78 if (rbuf) { 79 memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length)); 80 transport_kunmap_data_sg(cmd); 81 } 82 83 target_complete_cmd(cmd, GOOD); 84 return 0; 85 } 86 87 static sense_reason_t 88 sbc_emulate_readcapacity_16(struct se_cmd *cmd) 89 { 90 struct se_device *dev = cmd->se_dev; 91 unsigned char *rbuf; 92 unsigned char buf[32]; 93 unsigned long long blocks = dev->transport->get_blocks(dev); 94 95 memset(buf, 0, sizeof(buf)); 96 buf[0] = (blocks >> 56) & 0xff; 97 buf[1] = (blocks >> 48) & 0xff; 98 buf[2] = (blocks >> 40) & 0xff; 99 buf[3] = (blocks >> 32) & 0xff; 100 buf[4] = (blocks >> 24) & 0xff; 101 buf[5] = (blocks >> 16) & 0xff; 102 buf[6] = (blocks >> 8) & 0xff; 103 buf[7] = blocks & 0xff; 104 buf[8] = (dev->dev_attrib.block_size >> 24) & 0xff; 105 buf[9] = (dev->dev_attrib.block_size >> 16) & 0xff; 106 buf[10] = (dev->dev_attrib.block_size >> 8) & 0xff; 107 buf[11] = dev->dev_attrib.block_size & 0xff; 108 /* 109 * Set Thin Provisioning Enable bit following sbc3r22 in section 110 * READ CAPACITY (16) byte 14 if emulate_tpu or emulate_tpws is enabled. 111 */ 112 if (dev->dev_attrib.emulate_tpu || dev->dev_attrib.emulate_tpws) 113 buf[14] = 0x80; 114 115 rbuf = transport_kmap_data_sg(cmd); 116 if (rbuf) { 117 memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length)); 118 transport_kunmap_data_sg(cmd); 119 } 120 121 target_complete_cmd(cmd, GOOD); 122 return 0; 123 } 124 125 sector_t sbc_get_write_same_sectors(struct se_cmd *cmd) 126 { 127 u32 num_blocks; 128 129 if (cmd->t_task_cdb[0] == WRITE_SAME) 130 num_blocks = get_unaligned_be16(&cmd->t_task_cdb[7]); 131 else if (cmd->t_task_cdb[0] == WRITE_SAME_16) 132 num_blocks = get_unaligned_be32(&cmd->t_task_cdb[10]); 133 else /* WRITE_SAME_32 via VARIABLE_LENGTH_CMD */ 134 num_blocks = get_unaligned_be32(&cmd->t_task_cdb[28]); 135 136 /* 137 * Use the explicit range when non zero is supplied, otherwise calculate 138 * the remaining range based on ->get_blocks() - starting LBA. 139 */ 140 if (num_blocks) 141 return num_blocks; 142 143 return cmd->se_dev->transport->get_blocks(cmd->se_dev) - 144 cmd->t_task_lba + 1; 145 } 146 EXPORT_SYMBOL(sbc_get_write_same_sectors); 147 148 static sense_reason_t 149 sbc_emulate_noop(struct se_cmd *cmd) 150 { 151 target_complete_cmd(cmd, GOOD); 152 return 0; 153 } 154 155 static inline u32 sbc_get_size(struct se_cmd *cmd, u32 sectors) 156 { 157 return cmd->se_dev->dev_attrib.block_size * sectors; 158 } 159 160 static int sbc_check_valid_sectors(struct se_cmd *cmd) 161 { 162 struct se_device *dev = cmd->se_dev; 163 unsigned long long end_lba; 164 u32 sectors; 165 166 sectors = cmd->data_length / dev->dev_attrib.block_size; 167 end_lba = dev->transport->get_blocks(dev) + 1; 168 169 if (cmd->t_task_lba + sectors > end_lba) { 170 pr_err("target: lba %llu, sectors %u exceeds end lba %llu\n", 171 cmd->t_task_lba, sectors, end_lba); 172 return -EINVAL; 173 } 174 175 return 0; 176 } 177 178 static inline u32 transport_get_sectors_6(unsigned char *cdb) 179 { 180 /* 181 * Use 8-bit sector value. SBC-3 says: 182 * 183 * A TRANSFER LENGTH field set to zero specifies that 256 184 * logical blocks shall be written. Any other value 185 * specifies the number of logical blocks that shall be 186 * written. 187 */ 188 return cdb[4] ? : 256; 189 } 190 191 static inline u32 transport_get_sectors_10(unsigned char *cdb) 192 { 193 return (u32)(cdb[7] << 8) + cdb[8]; 194 } 195 196 static inline u32 transport_get_sectors_12(unsigned char *cdb) 197 { 198 return (u32)(cdb[6] << 24) + (cdb[7] << 16) + (cdb[8] << 8) + cdb[9]; 199 } 200 201 static inline u32 transport_get_sectors_16(unsigned char *cdb) 202 { 203 return (u32)(cdb[10] << 24) + (cdb[11] << 16) + 204 (cdb[12] << 8) + cdb[13]; 205 } 206 207 /* 208 * Used for VARIABLE_LENGTH_CDB WRITE_32 and READ_32 variants 209 */ 210 static inline u32 transport_get_sectors_32(unsigned char *cdb) 211 { 212 return (u32)(cdb[28] << 24) + (cdb[29] << 16) + 213 (cdb[30] << 8) + cdb[31]; 214 215 } 216 217 static inline u32 transport_lba_21(unsigned char *cdb) 218 { 219 return ((cdb[1] & 0x1f) << 16) | (cdb[2] << 8) | cdb[3]; 220 } 221 222 static inline u32 transport_lba_32(unsigned char *cdb) 223 { 224 return (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5]; 225 } 226 227 static inline unsigned long long transport_lba_64(unsigned char *cdb) 228 { 229 unsigned int __v1, __v2; 230 231 __v1 = (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5]; 232 __v2 = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9]; 233 234 return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32; 235 } 236 237 /* 238 * For VARIABLE_LENGTH_CDB w/ 32 byte extended CDBs 239 */ 240 static inline unsigned long long transport_lba_64_ext(unsigned char *cdb) 241 { 242 unsigned int __v1, __v2; 243 244 __v1 = (cdb[12] << 24) | (cdb[13] << 16) | (cdb[14] << 8) | cdb[15]; 245 __v2 = (cdb[16] << 24) | (cdb[17] << 16) | (cdb[18] << 8) | cdb[19]; 246 247 return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32; 248 } 249 250 static sense_reason_t 251 sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *ops) 252 { 253 unsigned int sectors = sbc_get_write_same_sectors(cmd); 254 255 if ((flags[0] & 0x04) || (flags[0] & 0x02)) { 256 pr_err("WRITE_SAME PBDATA and LBDATA" 257 " bits not supported for Block Discard" 258 " Emulation\n"); 259 return TCM_UNSUPPORTED_SCSI_OPCODE; 260 } 261 if (sectors > cmd->se_dev->dev_attrib.max_write_same_len) { 262 pr_warn("WRITE_SAME sectors: %u exceeds max_write_same_len: %u\n", 263 sectors, cmd->se_dev->dev_attrib.max_write_same_len); 264 return TCM_INVALID_CDB_FIELD; 265 } 266 /* We always have ANC_SUP == 0 so setting ANCHOR is always an error */ 267 if (flags[0] & 0x10) { 268 pr_warn("WRITE SAME with ANCHOR not supported\n"); 269 return TCM_INVALID_CDB_FIELD; 270 } 271 /* 272 * Special case for WRITE_SAME w/ UNMAP=1 that ends up getting 273 * translated into block discard requests within backend code. 274 */ 275 if (flags[0] & 0x08) { 276 if (!ops->execute_write_same_unmap) 277 return TCM_UNSUPPORTED_SCSI_OPCODE; 278 279 cmd->execute_cmd = ops->execute_write_same_unmap; 280 return 0; 281 } 282 if (!ops->execute_write_same) 283 return TCM_UNSUPPORTED_SCSI_OPCODE; 284 285 cmd->execute_cmd = ops->execute_write_same; 286 return 0; 287 } 288 289 static sense_reason_t xdreadwrite_callback(struct se_cmd *cmd) 290 { 291 unsigned char *buf, *addr; 292 struct scatterlist *sg; 293 unsigned int offset; 294 sense_reason_t ret = TCM_NO_SENSE; 295 int i, count; 296 /* 297 * From sbc3r22.pdf section 5.48 XDWRITEREAD (10) command 298 * 299 * 1) read the specified logical block(s); 300 * 2) transfer logical blocks from the data-out buffer; 301 * 3) XOR the logical blocks transferred from the data-out buffer with 302 * the logical blocks read, storing the resulting XOR data in a buffer; 303 * 4) if the DISABLE WRITE bit is set to zero, then write the logical 304 * blocks transferred from the data-out buffer; and 305 * 5) transfer the resulting XOR data to the data-in buffer. 306 */ 307 buf = kmalloc(cmd->data_length, GFP_KERNEL); 308 if (!buf) { 309 pr_err("Unable to allocate xor_callback buf\n"); 310 return TCM_OUT_OF_RESOURCES; 311 } 312 /* 313 * Copy the scatterlist WRITE buffer located at cmd->t_data_sg 314 * into the locally allocated *buf 315 */ 316 sg_copy_to_buffer(cmd->t_data_sg, 317 cmd->t_data_nents, 318 buf, 319 cmd->data_length); 320 321 /* 322 * Now perform the XOR against the BIDI read memory located at 323 * cmd->t_mem_bidi_list 324 */ 325 326 offset = 0; 327 for_each_sg(cmd->t_bidi_data_sg, sg, cmd->t_bidi_data_nents, count) { 328 addr = kmap_atomic(sg_page(sg)); 329 if (!addr) { 330 ret = TCM_OUT_OF_RESOURCES; 331 goto out; 332 } 333 334 for (i = 0; i < sg->length; i++) 335 *(addr + sg->offset + i) ^= *(buf + offset + i); 336 337 offset += sg->length; 338 kunmap_atomic(addr); 339 } 340 341 out: 342 kfree(buf); 343 return ret; 344 } 345 346 static sense_reason_t 347 sbc_execute_rw(struct se_cmd *cmd) 348 { 349 return cmd->execute_rw(cmd, cmd->t_data_sg, cmd->t_data_nents, 350 cmd->data_direction); 351 } 352 353 static sense_reason_t compare_and_write_post(struct se_cmd *cmd) 354 { 355 struct se_device *dev = cmd->se_dev; 356 357 /* 358 * Only set SCF_COMPARE_AND_WRITE_POST to force a response fall-through 359 * within target_complete_ok_work() if the command was successfully 360 * sent to the backend driver. 361 */ 362 spin_lock_irq(&cmd->t_state_lock); 363 if ((cmd->transport_state & CMD_T_SENT) && !cmd->scsi_status) 364 cmd->se_cmd_flags |= SCF_COMPARE_AND_WRITE_POST; 365 spin_unlock_irq(&cmd->t_state_lock); 366 367 /* 368 * Unlock ->caw_sem originally obtained during sbc_compare_and_write() 369 * before the original READ I/O submission. 370 */ 371 up(&dev->caw_sem); 372 373 return TCM_NO_SENSE; 374 } 375 376 static sense_reason_t compare_and_write_callback(struct se_cmd *cmd) 377 { 378 struct se_device *dev = cmd->se_dev; 379 struct scatterlist *write_sg = NULL, *sg; 380 unsigned char *buf = NULL, *addr; 381 struct sg_mapping_iter m; 382 unsigned int offset = 0, len; 383 unsigned int nlbas = cmd->t_task_nolb; 384 unsigned int block_size = dev->dev_attrib.block_size; 385 unsigned int compare_len = (nlbas * block_size); 386 sense_reason_t ret = TCM_NO_SENSE; 387 int rc, i; 388 389 /* 390 * Handle early failure in transport_generic_request_failure(), 391 * which will not have taken ->caw_mutex yet.. 392 */ 393 if (!cmd->t_data_sg || !cmd->t_bidi_data_sg) 394 return TCM_NO_SENSE; 395 /* 396 * Immediately exit + release dev->caw_sem if command has already 397 * been failed with a non-zero SCSI status. 398 */ 399 if (cmd->scsi_status) { 400 pr_err("compare_and_write_callback: non zero scsi_status:" 401 " 0x%02x\n", cmd->scsi_status); 402 goto out; 403 } 404 405 buf = kzalloc(cmd->data_length, GFP_KERNEL); 406 if (!buf) { 407 pr_err("Unable to allocate compare_and_write buf\n"); 408 ret = TCM_OUT_OF_RESOURCES; 409 goto out; 410 } 411 412 write_sg = kzalloc(sizeof(struct scatterlist) * cmd->t_data_nents, 413 GFP_KERNEL); 414 if (!write_sg) { 415 pr_err("Unable to allocate compare_and_write sg\n"); 416 ret = TCM_OUT_OF_RESOURCES; 417 goto out; 418 } 419 /* 420 * Setup verify and write data payloads from total NumberLBAs. 421 */ 422 rc = sg_copy_to_buffer(cmd->t_data_sg, cmd->t_data_nents, buf, 423 cmd->data_length); 424 if (!rc) { 425 pr_err("sg_copy_to_buffer() failed for compare_and_write\n"); 426 ret = TCM_OUT_OF_RESOURCES; 427 goto out; 428 } 429 /* 430 * Compare against SCSI READ payload against verify payload 431 */ 432 for_each_sg(cmd->t_bidi_data_sg, sg, cmd->t_bidi_data_nents, i) { 433 addr = (unsigned char *)kmap_atomic(sg_page(sg)); 434 if (!addr) { 435 ret = TCM_OUT_OF_RESOURCES; 436 goto out; 437 } 438 439 len = min(sg->length, compare_len); 440 441 if (memcmp(addr, buf + offset, len)) { 442 pr_warn("Detected MISCOMPARE for addr: %p buf: %p\n", 443 addr, buf + offset); 444 kunmap_atomic(addr); 445 goto miscompare; 446 } 447 kunmap_atomic(addr); 448 449 offset += len; 450 compare_len -= len; 451 if (!compare_len) 452 break; 453 } 454 455 i = 0; 456 len = cmd->t_task_nolb * block_size; 457 sg_miter_start(&m, cmd->t_data_sg, cmd->t_data_nents, SG_MITER_TO_SG); 458 /* 459 * Currently assumes NoLB=1 and SGLs are PAGE_SIZE.. 460 */ 461 while (len) { 462 sg_miter_next(&m); 463 464 if (block_size < PAGE_SIZE) { 465 sg_set_page(&write_sg[i], m.page, block_size, 466 block_size); 467 } else { 468 sg_miter_next(&m); 469 sg_set_page(&write_sg[i], m.page, block_size, 470 0); 471 } 472 len -= block_size; 473 i++; 474 } 475 sg_miter_stop(&m); 476 /* 477 * Save the original SGL + nents values before updating to new 478 * assignments, to be released in transport_free_pages() -> 479 * transport_reset_sgl_orig() 480 */ 481 cmd->t_data_sg_orig = cmd->t_data_sg; 482 cmd->t_data_sg = write_sg; 483 cmd->t_data_nents_orig = cmd->t_data_nents; 484 cmd->t_data_nents = 1; 485 486 cmd->sam_task_attr = MSG_HEAD_TAG; 487 cmd->transport_complete_callback = compare_and_write_post; 488 /* 489 * Now reset ->execute_cmd() to the normal sbc_execute_rw() handler 490 * for submitting the adjusted SGL to write instance user-data. 491 */ 492 cmd->execute_cmd = sbc_execute_rw; 493 494 spin_lock_irq(&cmd->t_state_lock); 495 cmd->t_state = TRANSPORT_PROCESSING; 496 cmd->transport_state |= CMD_T_ACTIVE|CMD_T_BUSY|CMD_T_SENT; 497 spin_unlock_irq(&cmd->t_state_lock); 498 499 __target_execute_cmd(cmd); 500 501 kfree(buf); 502 return ret; 503 504 miscompare: 505 pr_warn("Target/%s: Send MISCOMPARE check condition and sense\n", 506 dev->transport->name); 507 ret = TCM_MISCOMPARE_VERIFY; 508 out: 509 /* 510 * In the MISCOMPARE or failure case, unlock ->caw_sem obtained in 511 * sbc_compare_and_write() before the original READ I/O submission. 512 */ 513 up(&dev->caw_sem); 514 kfree(write_sg); 515 kfree(buf); 516 return ret; 517 } 518 519 static sense_reason_t 520 sbc_compare_and_write(struct se_cmd *cmd) 521 { 522 struct se_device *dev = cmd->se_dev; 523 sense_reason_t ret; 524 int rc; 525 /* 526 * Submit the READ first for COMPARE_AND_WRITE to perform the 527 * comparision using SGLs at cmd->t_bidi_data_sg.. 528 */ 529 rc = down_interruptible(&dev->caw_sem); 530 if ((rc != 0) || signal_pending(current)) { 531 cmd->transport_complete_callback = NULL; 532 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 533 } 534 /* 535 * Reset cmd->data_length to individual block_size in order to not 536 * confuse backend drivers that depend on this value matching the 537 * size of the I/O being submitted. 538 */ 539 cmd->data_length = cmd->t_task_nolb * dev->dev_attrib.block_size; 540 541 ret = cmd->execute_rw(cmd, cmd->t_bidi_data_sg, cmd->t_bidi_data_nents, 542 DMA_FROM_DEVICE); 543 if (ret) { 544 cmd->transport_complete_callback = NULL; 545 up(&dev->caw_sem); 546 return ret; 547 } 548 /* 549 * Unlock of dev->caw_sem to occur in compare_and_write_callback() 550 * upon MISCOMPARE, or in compare_and_write_done() upon completion 551 * of WRITE instance user-data. 552 */ 553 return TCM_NO_SENSE; 554 } 555 556 sense_reason_t 557 sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops) 558 { 559 struct se_device *dev = cmd->se_dev; 560 unsigned char *cdb = cmd->t_task_cdb; 561 unsigned int size; 562 u32 sectors = 0; 563 sense_reason_t ret; 564 565 switch (cdb[0]) { 566 case READ_6: 567 sectors = transport_get_sectors_6(cdb); 568 cmd->t_task_lba = transport_lba_21(cdb); 569 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 570 cmd->execute_rw = ops->execute_rw; 571 cmd->execute_cmd = sbc_execute_rw; 572 break; 573 case READ_10: 574 sectors = transport_get_sectors_10(cdb); 575 cmd->t_task_lba = transport_lba_32(cdb); 576 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 577 cmd->execute_rw = ops->execute_rw; 578 cmd->execute_cmd = sbc_execute_rw; 579 break; 580 case READ_12: 581 sectors = transport_get_sectors_12(cdb); 582 cmd->t_task_lba = transport_lba_32(cdb); 583 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 584 cmd->execute_rw = ops->execute_rw; 585 cmd->execute_cmd = sbc_execute_rw; 586 break; 587 case READ_16: 588 sectors = transport_get_sectors_16(cdb); 589 cmd->t_task_lba = transport_lba_64(cdb); 590 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 591 cmd->execute_rw = ops->execute_rw; 592 cmd->execute_cmd = sbc_execute_rw; 593 break; 594 case WRITE_6: 595 sectors = transport_get_sectors_6(cdb); 596 cmd->t_task_lba = transport_lba_21(cdb); 597 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 598 cmd->execute_rw = ops->execute_rw; 599 cmd->execute_cmd = sbc_execute_rw; 600 break; 601 case WRITE_10: 602 case WRITE_VERIFY: 603 sectors = transport_get_sectors_10(cdb); 604 cmd->t_task_lba = transport_lba_32(cdb); 605 if (cdb[1] & 0x8) 606 cmd->se_cmd_flags |= SCF_FUA; 607 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 608 cmd->execute_rw = ops->execute_rw; 609 cmd->execute_cmd = sbc_execute_rw; 610 break; 611 case WRITE_12: 612 sectors = transport_get_sectors_12(cdb); 613 cmd->t_task_lba = transport_lba_32(cdb); 614 if (cdb[1] & 0x8) 615 cmd->se_cmd_flags |= SCF_FUA; 616 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 617 cmd->execute_rw = ops->execute_rw; 618 cmd->execute_cmd = sbc_execute_rw; 619 break; 620 case WRITE_16: 621 sectors = transport_get_sectors_16(cdb); 622 cmd->t_task_lba = transport_lba_64(cdb); 623 if (cdb[1] & 0x8) 624 cmd->se_cmd_flags |= SCF_FUA; 625 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 626 cmd->execute_rw = ops->execute_rw; 627 cmd->execute_cmd = sbc_execute_rw; 628 break; 629 case XDWRITEREAD_10: 630 if (cmd->data_direction != DMA_TO_DEVICE || 631 !(cmd->se_cmd_flags & SCF_BIDI)) 632 return TCM_INVALID_CDB_FIELD; 633 sectors = transport_get_sectors_10(cdb); 634 635 cmd->t_task_lba = transport_lba_32(cdb); 636 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 637 638 /* 639 * Setup BIDI XOR callback to be run after I/O completion. 640 */ 641 cmd->execute_rw = ops->execute_rw; 642 cmd->execute_cmd = sbc_execute_rw; 643 cmd->transport_complete_callback = &xdreadwrite_callback; 644 if (cdb[1] & 0x8) 645 cmd->se_cmd_flags |= SCF_FUA; 646 break; 647 case VARIABLE_LENGTH_CMD: 648 { 649 u16 service_action = get_unaligned_be16(&cdb[8]); 650 switch (service_action) { 651 case XDWRITEREAD_32: 652 sectors = transport_get_sectors_32(cdb); 653 654 /* 655 * Use WRITE_32 and READ_32 opcodes for the emulated 656 * XDWRITE_READ_32 logic. 657 */ 658 cmd->t_task_lba = transport_lba_64_ext(cdb); 659 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 660 661 /* 662 * Setup BIDI XOR callback to be run during after I/O 663 * completion. 664 */ 665 cmd->execute_rw = ops->execute_rw; 666 cmd->execute_cmd = sbc_execute_rw; 667 cmd->transport_complete_callback = &xdreadwrite_callback; 668 if (cdb[1] & 0x8) 669 cmd->se_cmd_flags |= SCF_FUA; 670 break; 671 case WRITE_SAME_32: 672 sectors = transport_get_sectors_32(cdb); 673 if (!sectors) { 674 pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not" 675 " supported\n"); 676 return TCM_INVALID_CDB_FIELD; 677 } 678 679 size = sbc_get_size(cmd, 1); 680 cmd->t_task_lba = get_unaligned_be64(&cdb[12]); 681 682 ret = sbc_setup_write_same(cmd, &cdb[10], ops); 683 if (ret) 684 return ret; 685 break; 686 default: 687 pr_err("VARIABLE_LENGTH_CMD service action" 688 " 0x%04x not supported\n", service_action); 689 return TCM_UNSUPPORTED_SCSI_OPCODE; 690 } 691 break; 692 } 693 case COMPARE_AND_WRITE: 694 sectors = cdb[13]; 695 /* 696 * Currently enforce COMPARE_AND_WRITE for a single sector 697 */ 698 if (sectors > 1) { 699 pr_err("COMPARE_AND_WRITE contains NoLB: %u greater" 700 " than 1\n", sectors); 701 return TCM_INVALID_CDB_FIELD; 702 } 703 /* 704 * Double size because we have two buffers, note that 705 * zero is not an error.. 706 */ 707 size = 2 * sbc_get_size(cmd, sectors); 708 cmd->t_task_lba = get_unaligned_be64(&cdb[2]); 709 cmd->t_task_nolb = sectors; 710 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB | SCF_COMPARE_AND_WRITE; 711 cmd->execute_rw = ops->execute_rw; 712 cmd->execute_cmd = sbc_compare_and_write; 713 cmd->transport_complete_callback = compare_and_write_callback; 714 break; 715 case READ_CAPACITY: 716 size = READ_CAP_LEN; 717 cmd->execute_cmd = sbc_emulate_readcapacity; 718 break; 719 case SERVICE_ACTION_IN: 720 switch (cmd->t_task_cdb[1] & 0x1f) { 721 case SAI_READ_CAPACITY_16: 722 cmd->execute_cmd = sbc_emulate_readcapacity_16; 723 break; 724 default: 725 pr_err("Unsupported SA: 0x%02x\n", 726 cmd->t_task_cdb[1] & 0x1f); 727 return TCM_INVALID_CDB_FIELD; 728 } 729 size = (cdb[10] << 24) | (cdb[11] << 16) | 730 (cdb[12] << 8) | cdb[13]; 731 break; 732 case SYNCHRONIZE_CACHE: 733 case SYNCHRONIZE_CACHE_16: 734 if (!ops->execute_sync_cache) { 735 size = 0; 736 cmd->execute_cmd = sbc_emulate_noop; 737 break; 738 } 739 740 /* 741 * Extract LBA and range to be flushed for emulated SYNCHRONIZE_CACHE 742 */ 743 if (cdb[0] == SYNCHRONIZE_CACHE) { 744 sectors = transport_get_sectors_10(cdb); 745 cmd->t_task_lba = transport_lba_32(cdb); 746 } else { 747 sectors = transport_get_sectors_16(cdb); 748 cmd->t_task_lba = transport_lba_64(cdb); 749 } 750 751 size = sbc_get_size(cmd, sectors); 752 753 /* 754 * Check to ensure that LBA + Range does not exceed past end of 755 * device for IBLOCK and FILEIO ->do_sync_cache() backend calls 756 */ 757 if (cmd->t_task_lba || sectors) { 758 if (sbc_check_valid_sectors(cmd) < 0) 759 return TCM_ADDRESS_OUT_OF_RANGE; 760 } 761 cmd->execute_cmd = ops->execute_sync_cache; 762 break; 763 case UNMAP: 764 if (!ops->execute_unmap) 765 return TCM_UNSUPPORTED_SCSI_OPCODE; 766 767 size = get_unaligned_be16(&cdb[7]); 768 cmd->execute_cmd = ops->execute_unmap; 769 break; 770 case WRITE_SAME_16: 771 sectors = transport_get_sectors_16(cdb); 772 if (!sectors) { 773 pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n"); 774 return TCM_INVALID_CDB_FIELD; 775 } 776 777 size = sbc_get_size(cmd, 1); 778 cmd->t_task_lba = get_unaligned_be64(&cdb[2]); 779 780 ret = sbc_setup_write_same(cmd, &cdb[1], ops); 781 if (ret) 782 return ret; 783 break; 784 case WRITE_SAME: 785 sectors = transport_get_sectors_10(cdb); 786 if (!sectors) { 787 pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n"); 788 return TCM_INVALID_CDB_FIELD; 789 } 790 791 size = sbc_get_size(cmd, 1); 792 cmd->t_task_lba = get_unaligned_be32(&cdb[2]); 793 794 /* 795 * Follow sbcr26 with WRITE_SAME (10) and check for the existence 796 * of byte 1 bit 3 UNMAP instead of original reserved field 797 */ 798 ret = sbc_setup_write_same(cmd, &cdb[1], ops); 799 if (ret) 800 return ret; 801 break; 802 case VERIFY: 803 size = 0; 804 cmd->execute_cmd = sbc_emulate_noop; 805 break; 806 case REZERO_UNIT: 807 case SEEK_6: 808 case SEEK_10: 809 /* 810 * There are still clients out there which use these old SCSI-2 811 * commands. This mainly happens when running VMs with legacy 812 * guest systems, connected via SCSI command pass-through to 813 * iSCSI targets. Make them happy and return status GOOD. 814 */ 815 size = 0; 816 cmd->execute_cmd = sbc_emulate_noop; 817 break; 818 default: 819 ret = spc_parse_cdb(cmd, &size); 820 if (ret) 821 return ret; 822 } 823 824 /* reject any command that we don't have a handler for */ 825 if (!(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) && !cmd->execute_cmd) 826 return TCM_UNSUPPORTED_SCSI_OPCODE; 827 828 if (cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) { 829 unsigned long long end_lba; 830 831 if (sectors > dev->dev_attrib.fabric_max_sectors) { 832 printk_ratelimited(KERN_ERR "SCSI OP %02xh with too" 833 " big sectors %u exceeds fabric_max_sectors:" 834 " %u\n", cdb[0], sectors, 835 dev->dev_attrib.fabric_max_sectors); 836 return TCM_INVALID_CDB_FIELD; 837 } 838 if (sectors > dev->dev_attrib.hw_max_sectors) { 839 printk_ratelimited(KERN_ERR "SCSI OP %02xh with too" 840 " big sectors %u exceeds backend hw_max_sectors:" 841 " %u\n", cdb[0], sectors, 842 dev->dev_attrib.hw_max_sectors); 843 return TCM_INVALID_CDB_FIELD; 844 } 845 846 end_lba = dev->transport->get_blocks(dev) + 1; 847 if (cmd->t_task_lba + sectors > end_lba) { 848 pr_err("cmd exceeds last lba %llu " 849 "(lba %llu, sectors %u)\n", 850 end_lba, cmd->t_task_lba, sectors); 851 return TCM_ADDRESS_OUT_OF_RANGE; 852 } 853 854 if (!(cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE)) 855 size = sbc_get_size(cmd, sectors); 856 } 857 858 return target_cmd_size_check(cmd, size); 859 } 860 EXPORT_SYMBOL(sbc_parse_cdb); 861 862 u32 sbc_get_device_type(struct se_device *dev) 863 { 864 return TYPE_DISK; 865 } 866 EXPORT_SYMBOL(sbc_get_device_type); 867 868 sense_reason_t 869 sbc_execute_unmap(struct se_cmd *cmd, 870 sense_reason_t (*do_unmap_fn)(struct se_cmd *, void *, 871 sector_t, sector_t), 872 void *priv) 873 { 874 struct se_device *dev = cmd->se_dev; 875 unsigned char *buf, *ptr = NULL; 876 sector_t lba; 877 int size; 878 u32 range; 879 sense_reason_t ret = 0; 880 int dl, bd_dl; 881 882 /* We never set ANC_SUP */ 883 if (cmd->t_task_cdb[1]) 884 return TCM_INVALID_CDB_FIELD; 885 886 if (cmd->data_length == 0) { 887 target_complete_cmd(cmd, SAM_STAT_GOOD); 888 return 0; 889 } 890 891 if (cmd->data_length < 8) { 892 pr_warn("UNMAP parameter list length %u too small\n", 893 cmd->data_length); 894 return TCM_PARAMETER_LIST_LENGTH_ERROR; 895 } 896 897 buf = transport_kmap_data_sg(cmd); 898 if (!buf) 899 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 900 901 dl = get_unaligned_be16(&buf[0]); 902 bd_dl = get_unaligned_be16(&buf[2]); 903 904 size = cmd->data_length - 8; 905 if (bd_dl > size) 906 pr_warn("UNMAP parameter list length %u too small, ignoring bd_dl %u\n", 907 cmd->data_length, bd_dl); 908 else 909 size = bd_dl; 910 911 if (size / 16 > dev->dev_attrib.max_unmap_block_desc_count) { 912 ret = TCM_INVALID_PARAMETER_LIST; 913 goto err; 914 } 915 916 /* First UNMAP block descriptor starts at 8 byte offset */ 917 ptr = &buf[8]; 918 pr_debug("UNMAP: Sub: %s Using dl: %u bd_dl: %u size: %u" 919 " ptr: %p\n", dev->transport->name, dl, bd_dl, size, ptr); 920 921 while (size >= 16) { 922 lba = get_unaligned_be64(&ptr[0]); 923 range = get_unaligned_be32(&ptr[8]); 924 pr_debug("UNMAP: Using lba: %llu and range: %u\n", 925 (unsigned long long)lba, range); 926 927 if (range > dev->dev_attrib.max_unmap_lba_count) { 928 ret = TCM_INVALID_PARAMETER_LIST; 929 goto err; 930 } 931 932 if (lba + range > dev->transport->get_blocks(dev) + 1) { 933 ret = TCM_ADDRESS_OUT_OF_RANGE; 934 goto err; 935 } 936 937 ret = do_unmap_fn(cmd, priv, lba, range); 938 if (ret) 939 goto err; 940 941 ptr += 16; 942 size -= 16; 943 } 944 945 err: 946 transport_kunmap_data_sg(cmd); 947 if (!ret) 948 target_complete_cmd(cmd, GOOD); 949 return ret; 950 } 951 EXPORT_SYMBOL(sbc_execute_unmap); 952