1 /* 2 * SCSI Block Commands (SBC) parsing and emulation. 3 * 4 * (c) Copyright 2002-2013 Datera, Inc. 5 * 6 * Nicholas A. Bellinger <nab@kernel.org> 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License as published by 10 * the Free Software Foundation; either version 2 of the License, or 11 * (at your option) any later version. 12 * 13 * This program is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * GNU General Public License for more details. 17 * 18 * You should have received a copy of the GNU General Public License 19 * along with this program; if not, write to the Free Software 20 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 21 */ 22 23 #include <linux/kernel.h> 24 #include <linux/module.h> 25 #include <linux/ratelimit.h> 26 #include <linux/crc-t10dif.h> 27 #include <linux/t10-pi.h> 28 #include <asm/unaligned.h> 29 #include <scsi/scsi_proto.h> 30 #include <scsi/scsi_tcq.h> 31 32 #include <target/target_core_base.h> 33 #include <target/target_core_backend.h> 34 #include <target/target_core_fabric.h> 35 36 #include "target_core_internal.h" 37 #include "target_core_ua.h" 38 #include "target_core_alua.h" 39 40 static sense_reason_t 41 sbc_check_prot(struct se_device *, struct se_cmd *, unsigned char *, u32, bool); 42 static sense_reason_t sbc_execute_unmap(struct se_cmd *cmd); 43 44 static sense_reason_t 45 sbc_emulate_readcapacity(struct se_cmd *cmd) 46 { 47 struct se_device *dev = cmd->se_dev; 48 unsigned char *cdb = cmd->t_task_cdb; 49 unsigned long long blocks_long = dev->transport->get_blocks(dev); 50 unsigned char *rbuf; 51 unsigned char buf[8]; 52 u32 blocks; 53 54 /* 55 * SBC-2 says: 56 * If the PMI bit is set to zero and the LOGICAL BLOCK 57 * ADDRESS field is not set to zero, the device server shall 58 * terminate the command with CHECK CONDITION status with 59 * the sense key set to ILLEGAL REQUEST and the additional 60 * sense code set to INVALID FIELD IN CDB. 61 * 62 * In SBC-3, these fields are obsolete, but some SCSI 63 * compliance tests actually check this, so we might as well 64 * follow SBC-2. 65 */ 66 if (!(cdb[8] & 1) && !!(cdb[2] | cdb[3] | cdb[4] | cdb[5])) 67 return TCM_INVALID_CDB_FIELD; 68 69 if (blocks_long >= 0x00000000ffffffff) 70 blocks = 0xffffffff; 71 else 72 blocks = (u32)blocks_long; 73 74 buf[0] = (blocks >> 24) & 0xff; 75 buf[1] = (blocks >> 16) & 0xff; 76 buf[2] = (blocks >> 8) & 0xff; 77 buf[3] = blocks & 0xff; 78 buf[4] = (dev->dev_attrib.block_size >> 24) & 0xff; 79 buf[5] = (dev->dev_attrib.block_size >> 16) & 0xff; 80 buf[6] = (dev->dev_attrib.block_size >> 8) & 0xff; 81 buf[7] = dev->dev_attrib.block_size & 0xff; 82 83 rbuf = transport_kmap_data_sg(cmd); 84 if (rbuf) { 85 memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length)); 86 transport_kunmap_data_sg(cmd); 87 } 88 89 target_complete_cmd_with_length(cmd, GOOD, 8); 90 return 0; 91 } 92 93 static sense_reason_t 94 sbc_emulate_readcapacity_16(struct se_cmd *cmd) 95 { 96 struct se_device *dev = cmd->se_dev; 97 struct se_session *sess = cmd->se_sess; 98 int pi_prot_type = dev->dev_attrib.pi_prot_type; 99 100 unsigned char *rbuf; 101 unsigned char buf[32]; 102 unsigned long long blocks = dev->transport->get_blocks(dev); 103 104 memset(buf, 0, sizeof(buf)); 105 buf[0] = (blocks >> 56) & 0xff; 106 buf[1] = (blocks >> 48) & 0xff; 107 buf[2] = (blocks >> 40) & 0xff; 108 buf[3] = (blocks >> 32) & 0xff; 109 buf[4] = (blocks >> 24) & 0xff; 110 buf[5] = (blocks >> 16) & 0xff; 111 buf[6] = (blocks >> 8) & 0xff; 112 buf[7] = blocks & 0xff; 113 buf[8] = (dev->dev_attrib.block_size >> 24) & 0xff; 114 buf[9] = (dev->dev_attrib.block_size >> 16) & 0xff; 115 buf[10] = (dev->dev_attrib.block_size >> 8) & 0xff; 116 buf[11] = dev->dev_attrib.block_size & 0xff; 117 /* 118 * Set P_TYPE and PROT_EN bits for DIF support 119 */ 120 if (sess->sup_prot_ops & (TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS)) { 121 /* 122 * Only override a device's pi_prot_type if no T10-PI is 123 * available, and sess_prot_type has been explicitly enabled. 124 */ 125 if (!pi_prot_type) 126 pi_prot_type = sess->sess_prot_type; 127 128 if (pi_prot_type) 129 buf[12] = (pi_prot_type - 1) << 1 | 0x1; 130 } 131 132 if (dev->transport->get_lbppbe) 133 buf[13] = dev->transport->get_lbppbe(dev) & 0x0f; 134 135 if (dev->transport->get_alignment_offset_lbas) { 136 u16 lalba = dev->transport->get_alignment_offset_lbas(dev); 137 buf[14] = (lalba >> 8) & 0x3f; 138 buf[15] = lalba & 0xff; 139 } 140 141 /* 142 * Set Thin Provisioning Enable bit following sbc3r22 in section 143 * READ CAPACITY (16) byte 14 if emulate_tpu or emulate_tpws is enabled. 144 */ 145 if (dev->dev_attrib.emulate_tpu || dev->dev_attrib.emulate_tpws) { 146 buf[14] |= 0x80; 147 148 /* 149 * LBPRZ signifies that zeroes will be read back from an LBA after 150 * an UNMAP or WRITE SAME w/ unmap bit (sbc3r36 5.16.2) 151 */ 152 if (dev->dev_attrib.unmap_zeroes_data) 153 buf[14] |= 0x40; 154 } 155 156 rbuf = transport_kmap_data_sg(cmd); 157 if (rbuf) { 158 memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length)); 159 transport_kunmap_data_sg(cmd); 160 } 161 162 target_complete_cmd_with_length(cmd, GOOD, 32); 163 return 0; 164 } 165 166 static sense_reason_t 167 sbc_emulate_startstop(struct se_cmd *cmd) 168 { 169 unsigned char *cdb = cmd->t_task_cdb; 170 171 /* 172 * See sbc3r36 section 5.25 173 * Immediate bit should be set since there is nothing to complete 174 * POWER CONDITION MODIFIER 0h 175 */ 176 if (!(cdb[1] & 1) || cdb[2] || cdb[3]) 177 return TCM_INVALID_CDB_FIELD; 178 179 /* 180 * See sbc3r36 section 5.25 181 * POWER CONDITION 0h START_VALID - process START and LOEJ 182 */ 183 if (cdb[4] >> 4 & 0xf) 184 return TCM_INVALID_CDB_FIELD; 185 186 /* 187 * See sbc3r36 section 5.25 188 * LOEJ 0h - nothing to load or unload 189 * START 1h - we are ready 190 */ 191 if (!(cdb[4] & 1) || (cdb[4] & 2) || (cdb[4] & 4)) 192 return TCM_INVALID_CDB_FIELD; 193 194 target_complete_cmd(cmd, SAM_STAT_GOOD); 195 return 0; 196 } 197 198 sector_t sbc_get_write_same_sectors(struct se_cmd *cmd) 199 { 200 u32 num_blocks; 201 202 if (cmd->t_task_cdb[0] == WRITE_SAME) 203 num_blocks = get_unaligned_be16(&cmd->t_task_cdb[7]); 204 else if (cmd->t_task_cdb[0] == WRITE_SAME_16) 205 num_blocks = get_unaligned_be32(&cmd->t_task_cdb[10]); 206 else /* WRITE_SAME_32 via VARIABLE_LENGTH_CMD */ 207 num_blocks = get_unaligned_be32(&cmd->t_task_cdb[28]); 208 209 /* 210 * Use the explicit range when non zero is supplied, otherwise calculate 211 * the remaining range based on ->get_blocks() - starting LBA. 212 */ 213 if (num_blocks) 214 return num_blocks; 215 216 return cmd->se_dev->transport->get_blocks(cmd->se_dev) - 217 cmd->t_task_lba + 1; 218 } 219 EXPORT_SYMBOL(sbc_get_write_same_sectors); 220 221 static sense_reason_t 222 sbc_execute_write_same_unmap(struct se_cmd *cmd) 223 { 224 struct sbc_ops *ops = cmd->protocol_data; 225 sector_t nolb = sbc_get_write_same_sectors(cmd); 226 sense_reason_t ret; 227 228 if (nolb) { 229 ret = ops->execute_unmap(cmd, cmd->t_task_lba, nolb); 230 if (ret) 231 return ret; 232 } 233 234 target_complete_cmd(cmd, GOOD); 235 return 0; 236 } 237 238 static sense_reason_t 239 sbc_emulate_noop(struct se_cmd *cmd) 240 { 241 target_complete_cmd(cmd, GOOD); 242 return 0; 243 } 244 245 static inline u32 sbc_get_size(struct se_cmd *cmd, u32 sectors) 246 { 247 return cmd->se_dev->dev_attrib.block_size * sectors; 248 } 249 250 static inline u32 transport_get_sectors_6(unsigned char *cdb) 251 { 252 /* 253 * Use 8-bit sector value. SBC-3 says: 254 * 255 * A TRANSFER LENGTH field set to zero specifies that 256 256 * logical blocks shall be written. Any other value 257 * specifies the number of logical blocks that shall be 258 * written. 259 */ 260 return cdb[4] ? : 256; 261 } 262 263 static inline u32 transport_get_sectors_10(unsigned char *cdb) 264 { 265 return (u32)(cdb[7] << 8) + cdb[8]; 266 } 267 268 static inline u32 transport_get_sectors_12(unsigned char *cdb) 269 { 270 return (u32)(cdb[6] << 24) + (cdb[7] << 16) + (cdb[8] << 8) + cdb[9]; 271 } 272 273 static inline u32 transport_get_sectors_16(unsigned char *cdb) 274 { 275 return (u32)(cdb[10] << 24) + (cdb[11] << 16) + 276 (cdb[12] << 8) + cdb[13]; 277 } 278 279 /* 280 * Used for VARIABLE_LENGTH_CDB WRITE_32 and READ_32 variants 281 */ 282 static inline u32 transport_get_sectors_32(unsigned char *cdb) 283 { 284 return (u32)(cdb[28] << 24) + (cdb[29] << 16) + 285 (cdb[30] << 8) + cdb[31]; 286 287 } 288 289 static inline u32 transport_lba_21(unsigned char *cdb) 290 { 291 return ((cdb[1] & 0x1f) << 16) | (cdb[2] << 8) | cdb[3]; 292 } 293 294 static inline u32 transport_lba_32(unsigned char *cdb) 295 { 296 return (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5]; 297 } 298 299 static inline unsigned long long transport_lba_64(unsigned char *cdb) 300 { 301 unsigned int __v1, __v2; 302 303 __v1 = (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5]; 304 __v2 = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9]; 305 306 return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32; 307 } 308 309 /* 310 * For VARIABLE_LENGTH_CDB w/ 32 byte extended CDBs 311 */ 312 static inline unsigned long long transport_lba_64_ext(unsigned char *cdb) 313 { 314 unsigned int __v1, __v2; 315 316 __v1 = (cdb[12] << 24) | (cdb[13] << 16) | (cdb[14] << 8) | cdb[15]; 317 __v2 = (cdb[16] << 24) | (cdb[17] << 16) | (cdb[18] << 8) | cdb[19]; 318 319 return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32; 320 } 321 322 static sense_reason_t 323 sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *ops) 324 { 325 struct se_device *dev = cmd->se_dev; 326 sector_t end_lba = dev->transport->get_blocks(dev) + 1; 327 unsigned int sectors = sbc_get_write_same_sectors(cmd); 328 sense_reason_t ret; 329 330 if ((flags[0] & 0x04) || (flags[0] & 0x02)) { 331 pr_err("WRITE_SAME PBDATA and LBDATA" 332 " bits not supported for Block Discard" 333 " Emulation\n"); 334 return TCM_UNSUPPORTED_SCSI_OPCODE; 335 } 336 if (sectors > cmd->se_dev->dev_attrib.max_write_same_len) { 337 pr_warn("WRITE_SAME sectors: %u exceeds max_write_same_len: %u\n", 338 sectors, cmd->se_dev->dev_attrib.max_write_same_len); 339 return TCM_INVALID_CDB_FIELD; 340 } 341 /* 342 * Sanity check for LBA wrap and request past end of device. 343 */ 344 if (((cmd->t_task_lba + sectors) < cmd->t_task_lba) || 345 ((cmd->t_task_lba + sectors) > end_lba)) { 346 pr_err("WRITE_SAME exceeds last lba %llu (lba %llu, sectors %u)\n", 347 (unsigned long long)end_lba, cmd->t_task_lba, sectors); 348 return TCM_ADDRESS_OUT_OF_RANGE; 349 } 350 351 /* We always have ANC_SUP == 0 so setting ANCHOR is always an error */ 352 if (flags[0] & 0x10) { 353 pr_warn("WRITE SAME with ANCHOR not supported\n"); 354 return TCM_INVALID_CDB_FIELD; 355 } 356 /* 357 * Special case for WRITE_SAME w/ UNMAP=1 that ends up getting 358 * translated into block discard requests within backend code. 359 */ 360 if (flags[0] & 0x08) { 361 if (!ops->execute_unmap) 362 return TCM_UNSUPPORTED_SCSI_OPCODE; 363 364 if (!dev->dev_attrib.emulate_tpws) { 365 pr_err("Got WRITE_SAME w/ UNMAP=1, but backend device" 366 " has emulate_tpws disabled\n"); 367 return TCM_UNSUPPORTED_SCSI_OPCODE; 368 } 369 cmd->execute_cmd = sbc_execute_write_same_unmap; 370 return 0; 371 } 372 if (!ops->execute_write_same) 373 return TCM_UNSUPPORTED_SCSI_OPCODE; 374 375 ret = sbc_check_prot(dev, cmd, &cmd->t_task_cdb[0], sectors, true); 376 if (ret) 377 return ret; 378 379 cmd->execute_cmd = ops->execute_write_same; 380 return 0; 381 } 382 383 static sense_reason_t xdreadwrite_callback(struct se_cmd *cmd, bool success, 384 int *post_ret) 385 { 386 unsigned char *buf, *addr; 387 struct scatterlist *sg; 388 unsigned int offset; 389 sense_reason_t ret = TCM_NO_SENSE; 390 int i, count; 391 /* 392 * From sbc3r22.pdf section 5.48 XDWRITEREAD (10) command 393 * 394 * 1) read the specified logical block(s); 395 * 2) transfer logical blocks from the data-out buffer; 396 * 3) XOR the logical blocks transferred from the data-out buffer with 397 * the logical blocks read, storing the resulting XOR data in a buffer; 398 * 4) if the DISABLE WRITE bit is set to zero, then write the logical 399 * blocks transferred from the data-out buffer; and 400 * 5) transfer the resulting XOR data to the data-in buffer. 401 */ 402 buf = kmalloc(cmd->data_length, GFP_KERNEL); 403 if (!buf) { 404 pr_err("Unable to allocate xor_callback buf\n"); 405 return TCM_OUT_OF_RESOURCES; 406 } 407 /* 408 * Copy the scatterlist WRITE buffer located at cmd->t_data_sg 409 * into the locally allocated *buf 410 */ 411 sg_copy_to_buffer(cmd->t_data_sg, 412 cmd->t_data_nents, 413 buf, 414 cmd->data_length); 415 416 /* 417 * Now perform the XOR against the BIDI read memory located at 418 * cmd->t_mem_bidi_list 419 */ 420 421 offset = 0; 422 for_each_sg(cmd->t_bidi_data_sg, sg, cmd->t_bidi_data_nents, count) { 423 addr = kmap_atomic(sg_page(sg)); 424 if (!addr) { 425 ret = TCM_OUT_OF_RESOURCES; 426 goto out; 427 } 428 429 for (i = 0; i < sg->length; i++) 430 *(addr + sg->offset + i) ^= *(buf + offset + i); 431 432 offset += sg->length; 433 kunmap_atomic(addr); 434 } 435 436 out: 437 kfree(buf); 438 return ret; 439 } 440 441 static sense_reason_t 442 sbc_execute_rw(struct se_cmd *cmd) 443 { 444 struct sbc_ops *ops = cmd->protocol_data; 445 446 return ops->execute_rw(cmd, cmd->t_data_sg, cmd->t_data_nents, 447 cmd->data_direction); 448 } 449 450 static sense_reason_t compare_and_write_post(struct se_cmd *cmd, bool success, 451 int *post_ret) 452 { 453 struct se_device *dev = cmd->se_dev; 454 455 /* 456 * Only set SCF_COMPARE_AND_WRITE_POST to force a response fall-through 457 * within target_complete_ok_work() if the command was successfully 458 * sent to the backend driver. 459 */ 460 spin_lock_irq(&cmd->t_state_lock); 461 if ((cmd->transport_state & CMD_T_SENT) && !cmd->scsi_status) { 462 cmd->se_cmd_flags |= SCF_COMPARE_AND_WRITE_POST; 463 *post_ret = 1; 464 } 465 spin_unlock_irq(&cmd->t_state_lock); 466 467 /* 468 * Unlock ->caw_sem originally obtained during sbc_compare_and_write() 469 * before the original READ I/O submission. 470 */ 471 up(&dev->caw_sem); 472 473 return TCM_NO_SENSE; 474 } 475 476 static sense_reason_t compare_and_write_callback(struct se_cmd *cmd, bool success, 477 int *post_ret) 478 { 479 struct se_device *dev = cmd->se_dev; 480 struct scatterlist *write_sg = NULL, *sg; 481 unsigned char *buf = NULL, *addr; 482 struct sg_mapping_iter m; 483 unsigned int offset = 0, len; 484 unsigned int nlbas = cmd->t_task_nolb; 485 unsigned int block_size = dev->dev_attrib.block_size; 486 unsigned int compare_len = (nlbas * block_size); 487 sense_reason_t ret = TCM_NO_SENSE; 488 int rc, i; 489 490 /* 491 * Handle early failure in transport_generic_request_failure(), 492 * which will not have taken ->caw_sem yet.. 493 */ 494 if (!success && (!cmd->t_data_sg || !cmd->t_bidi_data_sg)) 495 return TCM_NO_SENSE; 496 /* 497 * Handle special case for zero-length COMPARE_AND_WRITE 498 */ 499 if (!cmd->data_length) 500 goto out; 501 /* 502 * Immediately exit + release dev->caw_sem if command has already 503 * been failed with a non-zero SCSI status. 504 */ 505 if (cmd->scsi_status) { 506 pr_err("compare_and_write_callback: non zero scsi_status:" 507 " 0x%02x\n", cmd->scsi_status); 508 goto out; 509 } 510 511 buf = kzalloc(cmd->data_length, GFP_KERNEL); 512 if (!buf) { 513 pr_err("Unable to allocate compare_and_write buf\n"); 514 ret = TCM_OUT_OF_RESOURCES; 515 goto out; 516 } 517 518 write_sg = kmalloc(sizeof(struct scatterlist) * cmd->t_data_nents, 519 GFP_KERNEL); 520 if (!write_sg) { 521 pr_err("Unable to allocate compare_and_write sg\n"); 522 ret = TCM_OUT_OF_RESOURCES; 523 goto out; 524 } 525 sg_init_table(write_sg, cmd->t_data_nents); 526 /* 527 * Setup verify and write data payloads from total NumberLBAs. 528 */ 529 rc = sg_copy_to_buffer(cmd->t_data_sg, cmd->t_data_nents, buf, 530 cmd->data_length); 531 if (!rc) { 532 pr_err("sg_copy_to_buffer() failed for compare_and_write\n"); 533 ret = TCM_OUT_OF_RESOURCES; 534 goto out; 535 } 536 /* 537 * Compare against SCSI READ payload against verify payload 538 */ 539 for_each_sg(cmd->t_bidi_data_sg, sg, cmd->t_bidi_data_nents, i) { 540 addr = (unsigned char *)kmap_atomic(sg_page(sg)); 541 if (!addr) { 542 ret = TCM_OUT_OF_RESOURCES; 543 goto out; 544 } 545 546 len = min(sg->length, compare_len); 547 548 if (memcmp(addr, buf + offset, len)) { 549 pr_warn("Detected MISCOMPARE for addr: %p buf: %p\n", 550 addr, buf + offset); 551 kunmap_atomic(addr); 552 goto miscompare; 553 } 554 kunmap_atomic(addr); 555 556 offset += len; 557 compare_len -= len; 558 if (!compare_len) 559 break; 560 } 561 562 i = 0; 563 len = cmd->t_task_nolb * block_size; 564 sg_miter_start(&m, cmd->t_data_sg, cmd->t_data_nents, SG_MITER_TO_SG); 565 /* 566 * Currently assumes NoLB=1 and SGLs are PAGE_SIZE.. 567 */ 568 while (len) { 569 sg_miter_next(&m); 570 571 if (block_size < PAGE_SIZE) { 572 sg_set_page(&write_sg[i], m.page, block_size, 573 m.piter.sg->offset + block_size); 574 } else { 575 sg_miter_next(&m); 576 sg_set_page(&write_sg[i], m.page, block_size, 577 m.piter.sg->offset); 578 } 579 len -= block_size; 580 i++; 581 } 582 sg_miter_stop(&m); 583 /* 584 * Save the original SGL + nents values before updating to new 585 * assignments, to be released in transport_free_pages() -> 586 * transport_reset_sgl_orig() 587 */ 588 cmd->t_data_sg_orig = cmd->t_data_sg; 589 cmd->t_data_sg = write_sg; 590 cmd->t_data_nents_orig = cmd->t_data_nents; 591 cmd->t_data_nents = 1; 592 593 cmd->sam_task_attr = TCM_HEAD_TAG; 594 cmd->transport_complete_callback = compare_and_write_post; 595 /* 596 * Now reset ->execute_cmd() to the normal sbc_execute_rw() handler 597 * for submitting the adjusted SGL to write instance user-data. 598 */ 599 cmd->execute_cmd = sbc_execute_rw; 600 601 spin_lock_irq(&cmd->t_state_lock); 602 cmd->t_state = TRANSPORT_PROCESSING; 603 cmd->transport_state |= CMD_T_ACTIVE|CMD_T_BUSY|CMD_T_SENT; 604 spin_unlock_irq(&cmd->t_state_lock); 605 606 __target_execute_cmd(cmd, false); 607 608 kfree(buf); 609 return ret; 610 611 miscompare: 612 pr_warn("Target/%s: Send MISCOMPARE check condition and sense\n", 613 dev->transport->name); 614 ret = TCM_MISCOMPARE_VERIFY; 615 out: 616 /* 617 * In the MISCOMPARE or failure case, unlock ->caw_sem obtained in 618 * sbc_compare_and_write() before the original READ I/O submission. 619 */ 620 up(&dev->caw_sem); 621 kfree(write_sg); 622 kfree(buf); 623 return ret; 624 } 625 626 static sense_reason_t 627 sbc_compare_and_write(struct se_cmd *cmd) 628 { 629 struct sbc_ops *ops = cmd->protocol_data; 630 struct se_device *dev = cmd->se_dev; 631 sense_reason_t ret; 632 int rc; 633 /* 634 * Submit the READ first for COMPARE_AND_WRITE to perform the 635 * comparision using SGLs at cmd->t_bidi_data_sg.. 636 */ 637 rc = down_interruptible(&dev->caw_sem); 638 if (rc != 0) { 639 cmd->transport_complete_callback = NULL; 640 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 641 } 642 /* 643 * Reset cmd->data_length to individual block_size in order to not 644 * confuse backend drivers that depend on this value matching the 645 * size of the I/O being submitted. 646 */ 647 cmd->data_length = cmd->t_task_nolb * dev->dev_attrib.block_size; 648 649 ret = ops->execute_rw(cmd, cmd->t_bidi_data_sg, cmd->t_bidi_data_nents, 650 DMA_FROM_DEVICE); 651 if (ret) { 652 cmd->transport_complete_callback = NULL; 653 up(&dev->caw_sem); 654 return ret; 655 } 656 /* 657 * Unlock of dev->caw_sem to occur in compare_and_write_callback() 658 * upon MISCOMPARE, or in compare_and_write_done() upon completion 659 * of WRITE instance user-data. 660 */ 661 return TCM_NO_SENSE; 662 } 663 664 static int 665 sbc_set_prot_op_checks(u8 protect, bool fabric_prot, enum target_prot_type prot_type, 666 bool is_write, struct se_cmd *cmd) 667 { 668 if (is_write) { 669 cmd->prot_op = fabric_prot ? TARGET_PROT_DOUT_STRIP : 670 protect ? TARGET_PROT_DOUT_PASS : 671 TARGET_PROT_DOUT_INSERT; 672 switch (protect) { 673 case 0x0: 674 case 0x3: 675 cmd->prot_checks = 0; 676 break; 677 case 0x1: 678 case 0x5: 679 cmd->prot_checks = TARGET_DIF_CHECK_GUARD; 680 if (prot_type == TARGET_DIF_TYPE1_PROT) 681 cmd->prot_checks |= TARGET_DIF_CHECK_REFTAG; 682 break; 683 case 0x2: 684 if (prot_type == TARGET_DIF_TYPE1_PROT) 685 cmd->prot_checks = TARGET_DIF_CHECK_REFTAG; 686 break; 687 case 0x4: 688 cmd->prot_checks = TARGET_DIF_CHECK_GUARD; 689 break; 690 default: 691 pr_err("Unsupported protect field %d\n", protect); 692 return -EINVAL; 693 } 694 } else { 695 cmd->prot_op = fabric_prot ? TARGET_PROT_DIN_INSERT : 696 protect ? TARGET_PROT_DIN_PASS : 697 TARGET_PROT_DIN_STRIP; 698 switch (protect) { 699 case 0x0: 700 case 0x1: 701 case 0x5: 702 cmd->prot_checks = TARGET_DIF_CHECK_GUARD; 703 if (prot_type == TARGET_DIF_TYPE1_PROT) 704 cmd->prot_checks |= TARGET_DIF_CHECK_REFTAG; 705 break; 706 case 0x2: 707 if (prot_type == TARGET_DIF_TYPE1_PROT) 708 cmd->prot_checks = TARGET_DIF_CHECK_REFTAG; 709 break; 710 case 0x3: 711 cmd->prot_checks = 0; 712 break; 713 case 0x4: 714 cmd->prot_checks = TARGET_DIF_CHECK_GUARD; 715 break; 716 default: 717 pr_err("Unsupported protect field %d\n", protect); 718 return -EINVAL; 719 } 720 } 721 722 return 0; 723 } 724 725 static sense_reason_t 726 sbc_check_prot(struct se_device *dev, struct se_cmd *cmd, unsigned char *cdb, 727 u32 sectors, bool is_write) 728 { 729 u8 protect = cdb[1] >> 5; 730 int sp_ops = cmd->se_sess->sup_prot_ops; 731 int pi_prot_type = dev->dev_attrib.pi_prot_type; 732 bool fabric_prot = false; 733 734 if (!cmd->t_prot_sg || !cmd->t_prot_nents) { 735 if (unlikely(protect && 736 !dev->dev_attrib.pi_prot_type && !cmd->se_sess->sess_prot_type)) { 737 pr_err("CDB contains protect bit, but device + fabric does" 738 " not advertise PROTECT=1 feature bit\n"); 739 return TCM_INVALID_CDB_FIELD; 740 } 741 if (cmd->prot_pto) 742 return TCM_NO_SENSE; 743 } 744 745 switch (dev->dev_attrib.pi_prot_type) { 746 case TARGET_DIF_TYPE3_PROT: 747 cmd->reftag_seed = 0xffffffff; 748 break; 749 case TARGET_DIF_TYPE2_PROT: 750 if (protect) 751 return TCM_INVALID_CDB_FIELD; 752 753 cmd->reftag_seed = cmd->t_task_lba; 754 break; 755 case TARGET_DIF_TYPE1_PROT: 756 cmd->reftag_seed = cmd->t_task_lba; 757 break; 758 case TARGET_DIF_TYPE0_PROT: 759 /* 760 * See if the fabric supports T10-PI, and the session has been 761 * configured to allow export PROTECT=1 feature bit with backend 762 * devices that don't support T10-PI. 763 */ 764 fabric_prot = is_write ? 765 !!(sp_ops & (TARGET_PROT_DOUT_PASS | TARGET_PROT_DOUT_STRIP)) : 766 !!(sp_ops & (TARGET_PROT_DIN_PASS | TARGET_PROT_DIN_INSERT)); 767 768 if (fabric_prot && cmd->se_sess->sess_prot_type) { 769 pi_prot_type = cmd->se_sess->sess_prot_type; 770 break; 771 } 772 if (!protect) 773 return TCM_NO_SENSE; 774 /* Fallthrough */ 775 default: 776 pr_err("Unable to determine pi_prot_type for CDB: 0x%02x " 777 "PROTECT: 0x%02x\n", cdb[0], protect); 778 return TCM_INVALID_CDB_FIELD; 779 } 780 781 if (sbc_set_prot_op_checks(protect, fabric_prot, pi_prot_type, is_write, cmd)) 782 return TCM_INVALID_CDB_FIELD; 783 784 cmd->prot_type = pi_prot_type; 785 cmd->prot_length = dev->prot_length * sectors; 786 787 /** 788 * In case protection information exists over the wire 789 * we modify command data length to describe pure data. 790 * The actual transfer length is data length + protection 791 * length 792 **/ 793 if (protect) 794 cmd->data_length = sectors * dev->dev_attrib.block_size; 795 796 pr_debug("%s: prot_type=%d, data_length=%d, prot_length=%d " 797 "prot_op=%d prot_checks=%d\n", 798 __func__, cmd->prot_type, cmd->data_length, cmd->prot_length, 799 cmd->prot_op, cmd->prot_checks); 800 801 return TCM_NO_SENSE; 802 } 803 804 static int 805 sbc_check_dpofua(struct se_device *dev, struct se_cmd *cmd, unsigned char *cdb) 806 { 807 if (cdb[1] & 0x10) { 808 /* see explanation in spc_emulate_modesense */ 809 if (!target_check_fua(dev)) { 810 pr_err("Got CDB: 0x%02x with DPO bit set, but device" 811 " does not advertise support for DPO\n", cdb[0]); 812 return -EINVAL; 813 } 814 } 815 if (cdb[1] & 0x8) { 816 if (!target_check_fua(dev)) { 817 pr_err("Got CDB: 0x%02x with FUA bit set, but device" 818 " does not advertise support for FUA write\n", 819 cdb[0]); 820 return -EINVAL; 821 } 822 cmd->se_cmd_flags |= SCF_FUA; 823 } 824 return 0; 825 } 826 827 sense_reason_t 828 sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops) 829 { 830 struct se_device *dev = cmd->se_dev; 831 unsigned char *cdb = cmd->t_task_cdb; 832 unsigned int size; 833 u32 sectors = 0; 834 sense_reason_t ret; 835 836 cmd->protocol_data = ops; 837 838 switch (cdb[0]) { 839 case READ_6: 840 sectors = transport_get_sectors_6(cdb); 841 cmd->t_task_lba = transport_lba_21(cdb); 842 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 843 cmd->execute_cmd = sbc_execute_rw; 844 break; 845 case READ_10: 846 sectors = transport_get_sectors_10(cdb); 847 cmd->t_task_lba = transport_lba_32(cdb); 848 849 if (sbc_check_dpofua(dev, cmd, cdb)) 850 return TCM_INVALID_CDB_FIELD; 851 852 ret = sbc_check_prot(dev, cmd, cdb, sectors, false); 853 if (ret) 854 return ret; 855 856 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 857 cmd->execute_cmd = sbc_execute_rw; 858 break; 859 case READ_12: 860 sectors = transport_get_sectors_12(cdb); 861 cmd->t_task_lba = transport_lba_32(cdb); 862 863 if (sbc_check_dpofua(dev, cmd, cdb)) 864 return TCM_INVALID_CDB_FIELD; 865 866 ret = sbc_check_prot(dev, cmd, cdb, sectors, false); 867 if (ret) 868 return ret; 869 870 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 871 cmd->execute_cmd = sbc_execute_rw; 872 break; 873 case READ_16: 874 sectors = transport_get_sectors_16(cdb); 875 cmd->t_task_lba = transport_lba_64(cdb); 876 877 if (sbc_check_dpofua(dev, cmd, cdb)) 878 return TCM_INVALID_CDB_FIELD; 879 880 ret = sbc_check_prot(dev, cmd, cdb, sectors, false); 881 if (ret) 882 return ret; 883 884 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 885 cmd->execute_cmd = sbc_execute_rw; 886 break; 887 case WRITE_6: 888 sectors = transport_get_sectors_6(cdb); 889 cmd->t_task_lba = transport_lba_21(cdb); 890 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 891 cmd->execute_cmd = sbc_execute_rw; 892 break; 893 case WRITE_10: 894 case WRITE_VERIFY: 895 sectors = transport_get_sectors_10(cdb); 896 cmd->t_task_lba = transport_lba_32(cdb); 897 898 if (sbc_check_dpofua(dev, cmd, cdb)) 899 return TCM_INVALID_CDB_FIELD; 900 901 ret = sbc_check_prot(dev, cmd, cdb, sectors, true); 902 if (ret) 903 return ret; 904 905 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 906 cmd->execute_cmd = sbc_execute_rw; 907 break; 908 case WRITE_12: 909 sectors = transport_get_sectors_12(cdb); 910 cmd->t_task_lba = transport_lba_32(cdb); 911 912 if (sbc_check_dpofua(dev, cmd, cdb)) 913 return TCM_INVALID_CDB_FIELD; 914 915 ret = sbc_check_prot(dev, cmd, cdb, sectors, true); 916 if (ret) 917 return ret; 918 919 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 920 cmd->execute_cmd = sbc_execute_rw; 921 break; 922 case WRITE_16: 923 sectors = transport_get_sectors_16(cdb); 924 cmd->t_task_lba = transport_lba_64(cdb); 925 926 if (sbc_check_dpofua(dev, cmd, cdb)) 927 return TCM_INVALID_CDB_FIELD; 928 929 ret = sbc_check_prot(dev, cmd, cdb, sectors, true); 930 if (ret) 931 return ret; 932 933 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 934 cmd->execute_cmd = sbc_execute_rw; 935 break; 936 case XDWRITEREAD_10: 937 if (cmd->data_direction != DMA_TO_DEVICE || 938 !(cmd->se_cmd_flags & SCF_BIDI)) 939 return TCM_INVALID_CDB_FIELD; 940 sectors = transport_get_sectors_10(cdb); 941 942 if (sbc_check_dpofua(dev, cmd, cdb)) 943 return TCM_INVALID_CDB_FIELD; 944 945 cmd->t_task_lba = transport_lba_32(cdb); 946 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 947 948 /* 949 * Setup BIDI XOR callback to be run after I/O completion. 950 */ 951 cmd->execute_cmd = sbc_execute_rw; 952 cmd->transport_complete_callback = &xdreadwrite_callback; 953 break; 954 case VARIABLE_LENGTH_CMD: 955 { 956 u16 service_action = get_unaligned_be16(&cdb[8]); 957 switch (service_action) { 958 case XDWRITEREAD_32: 959 sectors = transport_get_sectors_32(cdb); 960 961 if (sbc_check_dpofua(dev, cmd, cdb)) 962 return TCM_INVALID_CDB_FIELD; 963 /* 964 * Use WRITE_32 and READ_32 opcodes for the emulated 965 * XDWRITE_READ_32 logic. 966 */ 967 cmd->t_task_lba = transport_lba_64_ext(cdb); 968 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 969 970 /* 971 * Setup BIDI XOR callback to be run during after I/O 972 * completion. 973 */ 974 cmd->execute_cmd = sbc_execute_rw; 975 cmd->transport_complete_callback = &xdreadwrite_callback; 976 break; 977 case WRITE_SAME_32: 978 sectors = transport_get_sectors_32(cdb); 979 if (!sectors) { 980 pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not" 981 " supported\n"); 982 return TCM_INVALID_CDB_FIELD; 983 } 984 985 size = sbc_get_size(cmd, 1); 986 cmd->t_task_lba = get_unaligned_be64(&cdb[12]); 987 988 ret = sbc_setup_write_same(cmd, &cdb[10], ops); 989 if (ret) 990 return ret; 991 break; 992 default: 993 pr_err("VARIABLE_LENGTH_CMD service action" 994 " 0x%04x not supported\n", service_action); 995 return TCM_UNSUPPORTED_SCSI_OPCODE; 996 } 997 break; 998 } 999 case COMPARE_AND_WRITE: 1000 sectors = cdb[13]; 1001 /* 1002 * Currently enforce COMPARE_AND_WRITE for a single sector 1003 */ 1004 if (sectors > 1) { 1005 pr_err("COMPARE_AND_WRITE contains NoLB: %u greater" 1006 " than 1\n", sectors); 1007 return TCM_INVALID_CDB_FIELD; 1008 } 1009 if (sbc_check_dpofua(dev, cmd, cdb)) 1010 return TCM_INVALID_CDB_FIELD; 1011 1012 /* 1013 * Double size because we have two buffers, note that 1014 * zero is not an error.. 1015 */ 1016 size = 2 * sbc_get_size(cmd, sectors); 1017 cmd->t_task_lba = get_unaligned_be64(&cdb[2]); 1018 cmd->t_task_nolb = sectors; 1019 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB | SCF_COMPARE_AND_WRITE; 1020 cmd->execute_cmd = sbc_compare_and_write; 1021 cmd->transport_complete_callback = compare_and_write_callback; 1022 break; 1023 case READ_CAPACITY: 1024 size = READ_CAP_LEN; 1025 cmd->execute_cmd = sbc_emulate_readcapacity; 1026 break; 1027 case SERVICE_ACTION_IN_16: 1028 switch (cmd->t_task_cdb[1] & 0x1f) { 1029 case SAI_READ_CAPACITY_16: 1030 cmd->execute_cmd = sbc_emulate_readcapacity_16; 1031 break; 1032 case SAI_REPORT_REFERRALS: 1033 cmd->execute_cmd = target_emulate_report_referrals; 1034 break; 1035 default: 1036 pr_err("Unsupported SA: 0x%02x\n", 1037 cmd->t_task_cdb[1] & 0x1f); 1038 return TCM_INVALID_CDB_FIELD; 1039 } 1040 size = (cdb[10] << 24) | (cdb[11] << 16) | 1041 (cdb[12] << 8) | cdb[13]; 1042 break; 1043 case SYNCHRONIZE_CACHE: 1044 case SYNCHRONIZE_CACHE_16: 1045 if (cdb[0] == SYNCHRONIZE_CACHE) { 1046 sectors = transport_get_sectors_10(cdb); 1047 cmd->t_task_lba = transport_lba_32(cdb); 1048 } else { 1049 sectors = transport_get_sectors_16(cdb); 1050 cmd->t_task_lba = transport_lba_64(cdb); 1051 } 1052 if (ops->execute_sync_cache) { 1053 cmd->execute_cmd = ops->execute_sync_cache; 1054 goto check_lba; 1055 } 1056 size = 0; 1057 cmd->execute_cmd = sbc_emulate_noop; 1058 break; 1059 case UNMAP: 1060 if (!ops->execute_unmap) 1061 return TCM_UNSUPPORTED_SCSI_OPCODE; 1062 1063 if (!dev->dev_attrib.emulate_tpu) { 1064 pr_err("Got UNMAP, but backend device has" 1065 " emulate_tpu disabled\n"); 1066 return TCM_UNSUPPORTED_SCSI_OPCODE; 1067 } 1068 size = get_unaligned_be16(&cdb[7]); 1069 cmd->execute_cmd = sbc_execute_unmap; 1070 break; 1071 case WRITE_SAME_16: 1072 sectors = transport_get_sectors_16(cdb); 1073 if (!sectors) { 1074 pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n"); 1075 return TCM_INVALID_CDB_FIELD; 1076 } 1077 1078 size = sbc_get_size(cmd, 1); 1079 cmd->t_task_lba = get_unaligned_be64(&cdb[2]); 1080 1081 ret = sbc_setup_write_same(cmd, &cdb[1], ops); 1082 if (ret) 1083 return ret; 1084 break; 1085 case WRITE_SAME: 1086 sectors = transport_get_sectors_10(cdb); 1087 if (!sectors) { 1088 pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n"); 1089 return TCM_INVALID_CDB_FIELD; 1090 } 1091 1092 size = sbc_get_size(cmd, 1); 1093 cmd->t_task_lba = get_unaligned_be32(&cdb[2]); 1094 1095 /* 1096 * Follow sbcr26 with WRITE_SAME (10) and check for the existence 1097 * of byte 1 bit 3 UNMAP instead of original reserved field 1098 */ 1099 ret = sbc_setup_write_same(cmd, &cdb[1], ops); 1100 if (ret) 1101 return ret; 1102 break; 1103 case VERIFY: 1104 size = 0; 1105 sectors = transport_get_sectors_10(cdb); 1106 cmd->t_task_lba = transport_lba_32(cdb); 1107 cmd->execute_cmd = sbc_emulate_noop; 1108 goto check_lba; 1109 case REZERO_UNIT: 1110 case SEEK_6: 1111 case SEEK_10: 1112 /* 1113 * There are still clients out there which use these old SCSI-2 1114 * commands. This mainly happens when running VMs with legacy 1115 * guest systems, connected via SCSI command pass-through to 1116 * iSCSI targets. Make them happy and return status GOOD. 1117 */ 1118 size = 0; 1119 cmd->execute_cmd = sbc_emulate_noop; 1120 break; 1121 case START_STOP: 1122 size = 0; 1123 cmd->execute_cmd = sbc_emulate_startstop; 1124 break; 1125 default: 1126 ret = spc_parse_cdb(cmd, &size); 1127 if (ret) 1128 return ret; 1129 } 1130 1131 /* reject any command that we don't have a handler for */ 1132 if (!cmd->execute_cmd) 1133 return TCM_UNSUPPORTED_SCSI_OPCODE; 1134 1135 if (cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) { 1136 unsigned long long end_lba; 1137 check_lba: 1138 end_lba = dev->transport->get_blocks(dev) + 1; 1139 if (((cmd->t_task_lba + sectors) < cmd->t_task_lba) || 1140 ((cmd->t_task_lba + sectors) > end_lba)) { 1141 pr_err("cmd exceeds last lba %llu " 1142 "(lba %llu, sectors %u)\n", 1143 end_lba, cmd->t_task_lba, sectors); 1144 return TCM_ADDRESS_OUT_OF_RANGE; 1145 } 1146 1147 if (!(cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE)) 1148 size = sbc_get_size(cmd, sectors); 1149 } 1150 1151 return target_cmd_size_check(cmd, size); 1152 } 1153 EXPORT_SYMBOL(sbc_parse_cdb); 1154 1155 u32 sbc_get_device_type(struct se_device *dev) 1156 { 1157 return TYPE_DISK; 1158 } 1159 EXPORT_SYMBOL(sbc_get_device_type); 1160 1161 static sense_reason_t 1162 sbc_execute_unmap(struct se_cmd *cmd) 1163 { 1164 struct sbc_ops *ops = cmd->protocol_data; 1165 struct se_device *dev = cmd->se_dev; 1166 unsigned char *buf, *ptr = NULL; 1167 sector_t lba; 1168 int size; 1169 u32 range; 1170 sense_reason_t ret = 0; 1171 int dl, bd_dl; 1172 1173 /* We never set ANC_SUP */ 1174 if (cmd->t_task_cdb[1]) 1175 return TCM_INVALID_CDB_FIELD; 1176 1177 if (cmd->data_length == 0) { 1178 target_complete_cmd(cmd, SAM_STAT_GOOD); 1179 return 0; 1180 } 1181 1182 if (cmd->data_length < 8) { 1183 pr_warn("UNMAP parameter list length %u too small\n", 1184 cmd->data_length); 1185 return TCM_PARAMETER_LIST_LENGTH_ERROR; 1186 } 1187 1188 buf = transport_kmap_data_sg(cmd); 1189 if (!buf) 1190 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 1191 1192 dl = get_unaligned_be16(&buf[0]); 1193 bd_dl = get_unaligned_be16(&buf[2]); 1194 1195 size = cmd->data_length - 8; 1196 if (bd_dl > size) 1197 pr_warn("UNMAP parameter list length %u too small, ignoring bd_dl %u\n", 1198 cmd->data_length, bd_dl); 1199 else 1200 size = bd_dl; 1201 1202 if (size / 16 > dev->dev_attrib.max_unmap_block_desc_count) { 1203 ret = TCM_INVALID_PARAMETER_LIST; 1204 goto err; 1205 } 1206 1207 /* First UNMAP block descriptor starts at 8 byte offset */ 1208 ptr = &buf[8]; 1209 pr_debug("UNMAP: Sub: %s Using dl: %u bd_dl: %u size: %u" 1210 " ptr: %p\n", dev->transport->name, dl, bd_dl, size, ptr); 1211 1212 while (size >= 16) { 1213 lba = get_unaligned_be64(&ptr[0]); 1214 range = get_unaligned_be32(&ptr[8]); 1215 pr_debug("UNMAP: Using lba: %llu and range: %u\n", 1216 (unsigned long long)lba, range); 1217 1218 if (range > dev->dev_attrib.max_unmap_lba_count) { 1219 ret = TCM_INVALID_PARAMETER_LIST; 1220 goto err; 1221 } 1222 1223 if (lba + range > dev->transport->get_blocks(dev) + 1) { 1224 ret = TCM_ADDRESS_OUT_OF_RANGE; 1225 goto err; 1226 } 1227 1228 ret = ops->execute_unmap(cmd, lba, range); 1229 if (ret) 1230 goto err; 1231 1232 ptr += 16; 1233 size -= 16; 1234 } 1235 1236 err: 1237 transport_kunmap_data_sg(cmd); 1238 if (!ret) 1239 target_complete_cmd(cmd, GOOD); 1240 return ret; 1241 } 1242 1243 void 1244 sbc_dif_generate(struct se_cmd *cmd) 1245 { 1246 struct se_device *dev = cmd->se_dev; 1247 struct t10_pi_tuple *sdt; 1248 struct scatterlist *dsg = cmd->t_data_sg, *psg; 1249 sector_t sector = cmd->t_task_lba; 1250 void *daddr, *paddr; 1251 int i, j, offset = 0; 1252 unsigned int block_size = dev->dev_attrib.block_size; 1253 1254 for_each_sg(cmd->t_prot_sg, psg, cmd->t_prot_nents, i) { 1255 paddr = kmap_atomic(sg_page(psg)) + psg->offset; 1256 daddr = kmap_atomic(sg_page(dsg)) + dsg->offset; 1257 1258 for (j = 0; j < psg->length; 1259 j += sizeof(*sdt)) { 1260 __u16 crc; 1261 unsigned int avail; 1262 1263 if (offset >= dsg->length) { 1264 offset -= dsg->length; 1265 kunmap_atomic(daddr - dsg->offset); 1266 dsg = sg_next(dsg); 1267 if (!dsg) { 1268 kunmap_atomic(paddr - psg->offset); 1269 return; 1270 } 1271 daddr = kmap_atomic(sg_page(dsg)) + dsg->offset; 1272 } 1273 1274 sdt = paddr + j; 1275 avail = min(block_size, dsg->length - offset); 1276 crc = crc_t10dif(daddr + offset, avail); 1277 if (avail < block_size) { 1278 kunmap_atomic(daddr - dsg->offset); 1279 dsg = sg_next(dsg); 1280 if (!dsg) { 1281 kunmap_atomic(paddr - psg->offset); 1282 return; 1283 } 1284 daddr = kmap_atomic(sg_page(dsg)) + dsg->offset; 1285 offset = block_size - avail; 1286 crc = crc_t10dif_update(crc, daddr, offset); 1287 } else { 1288 offset += block_size; 1289 } 1290 1291 sdt->guard_tag = cpu_to_be16(crc); 1292 if (cmd->prot_type == TARGET_DIF_TYPE1_PROT) 1293 sdt->ref_tag = cpu_to_be32(sector & 0xffffffff); 1294 sdt->app_tag = 0; 1295 1296 pr_debug("DIF %s INSERT sector: %llu guard_tag: 0x%04x" 1297 " app_tag: 0x%04x ref_tag: %u\n", 1298 (cmd->data_direction == DMA_TO_DEVICE) ? 1299 "WRITE" : "READ", (unsigned long long)sector, 1300 sdt->guard_tag, sdt->app_tag, 1301 be32_to_cpu(sdt->ref_tag)); 1302 1303 sector++; 1304 } 1305 1306 kunmap_atomic(daddr - dsg->offset); 1307 kunmap_atomic(paddr - psg->offset); 1308 } 1309 } 1310 1311 static sense_reason_t 1312 sbc_dif_v1_verify(struct se_cmd *cmd, struct t10_pi_tuple *sdt, 1313 __u16 crc, sector_t sector, unsigned int ei_lba) 1314 { 1315 __be16 csum; 1316 1317 if (!(cmd->prot_checks & TARGET_DIF_CHECK_GUARD)) 1318 goto check_ref; 1319 1320 csum = cpu_to_be16(crc); 1321 1322 if (sdt->guard_tag != csum) { 1323 pr_err("DIFv1 checksum failed on sector %llu guard tag 0x%04x" 1324 " csum 0x%04x\n", (unsigned long long)sector, 1325 be16_to_cpu(sdt->guard_tag), be16_to_cpu(csum)); 1326 return TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED; 1327 } 1328 1329 check_ref: 1330 if (!(cmd->prot_checks & TARGET_DIF_CHECK_REFTAG)) 1331 return 0; 1332 1333 if (cmd->prot_type == TARGET_DIF_TYPE1_PROT && 1334 be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) { 1335 pr_err("DIFv1 Type 1 reference failed on sector: %llu tag: 0x%08x" 1336 " sector MSB: 0x%08x\n", (unsigned long long)sector, 1337 be32_to_cpu(sdt->ref_tag), (u32)(sector & 0xffffffff)); 1338 return TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED; 1339 } 1340 1341 if (cmd->prot_type == TARGET_DIF_TYPE2_PROT && 1342 be32_to_cpu(sdt->ref_tag) != ei_lba) { 1343 pr_err("DIFv1 Type 2 reference failed on sector: %llu tag: 0x%08x" 1344 " ei_lba: 0x%08x\n", (unsigned long long)sector, 1345 be32_to_cpu(sdt->ref_tag), ei_lba); 1346 return TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED; 1347 } 1348 1349 return 0; 1350 } 1351 1352 void sbc_dif_copy_prot(struct se_cmd *cmd, unsigned int sectors, bool read, 1353 struct scatterlist *sg, int sg_off) 1354 { 1355 struct se_device *dev = cmd->se_dev; 1356 struct scatterlist *psg; 1357 void *paddr, *addr; 1358 unsigned int i, len, left; 1359 unsigned int offset = sg_off; 1360 1361 if (!sg) 1362 return; 1363 1364 left = sectors * dev->prot_length; 1365 1366 for_each_sg(cmd->t_prot_sg, psg, cmd->t_prot_nents, i) { 1367 unsigned int psg_len, copied = 0; 1368 1369 paddr = kmap_atomic(sg_page(psg)) + psg->offset; 1370 psg_len = min(left, psg->length); 1371 while (psg_len) { 1372 len = min(psg_len, sg->length - offset); 1373 addr = kmap_atomic(sg_page(sg)) + sg->offset + offset; 1374 1375 if (read) 1376 memcpy(paddr + copied, addr, len); 1377 else 1378 memcpy(addr, paddr + copied, len); 1379 1380 left -= len; 1381 offset += len; 1382 copied += len; 1383 psg_len -= len; 1384 1385 kunmap_atomic(addr - sg->offset - offset); 1386 1387 if (offset >= sg->length) { 1388 sg = sg_next(sg); 1389 offset = 0; 1390 } 1391 } 1392 kunmap_atomic(paddr - psg->offset); 1393 } 1394 } 1395 EXPORT_SYMBOL(sbc_dif_copy_prot); 1396 1397 sense_reason_t 1398 sbc_dif_verify(struct se_cmd *cmd, sector_t start, unsigned int sectors, 1399 unsigned int ei_lba, struct scatterlist *psg, int psg_off) 1400 { 1401 struct se_device *dev = cmd->se_dev; 1402 struct t10_pi_tuple *sdt; 1403 struct scatterlist *dsg = cmd->t_data_sg; 1404 sector_t sector = start; 1405 void *daddr, *paddr; 1406 int i; 1407 sense_reason_t rc; 1408 int dsg_off = 0; 1409 unsigned int block_size = dev->dev_attrib.block_size; 1410 1411 for (; psg && sector < start + sectors; psg = sg_next(psg)) { 1412 paddr = kmap_atomic(sg_page(psg)) + psg->offset; 1413 daddr = kmap_atomic(sg_page(dsg)) + dsg->offset; 1414 1415 for (i = psg_off; i < psg->length && 1416 sector < start + sectors; 1417 i += sizeof(*sdt)) { 1418 __u16 crc; 1419 unsigned int avail; 1420 1421 if (dsg_off >= dsg->length) { 1422 dsg_off -= dsg->length; 1423 kunmap_atomic(daddr - dsg->offset); 1424 dsg = sg_next(dsg); 1425 if (!dsg) { 1426 kunmap_atomic(paddr - psg->offset); 1427 return 0; 1428 } 1429 daddr = kmap_atomic(sg_page(dsg)) + dsg->offset; 1430 } 1431 1432 sdt = paddr + i; 1433 1434 pr_debug("DIF READ sector: %llu guard_tag: 0x%04x" 1435 " app_tag: 0x%04x ref_tag: %u\n", 1436 (unsigned long long)sector, sdt->guard_tag, 1437 sdt->app_tag, be32_to_cpu(sdt->ref_tag)); 1438 1439 if (sdt->app_tag == cpu_to_be16(0xffff)) { 1440 dsg_off += block_size; 1441 goto next; 1442 } 1443 1444 avail = min(block_size, dsg->length - dsg_off); 1445 crc = crc_t10dif(daddr + dsg_off, avail); 1446 if (avail < block_size) { 1447 kunmap_atomic(daddr - dsg->offset); 1448 dsg = sg_next(dsg); 1449 if (!dsg) { 1450 kunmap_atomic(paddr - psg->offset); 1451 return 0; 1452 } 1453 daddr = kmap_atomic(sg_page(dsg)) + dsg->offset; 1454 dsg_off = block_size - avail; 1455 crc = crc_t10dif_update(crc, daddr, dsg_off); 1456 } else { 1457 dsg_off += block_size; 1458 } 1459 1460 rc = sbc_dif_v1_verify(cmd, sdt, crc, sector, ei_lba); 1461 if (rc) { 1462 kunmap_atomic(daddr - dsg->offset); 1463 kunmap_atomic(paddr - psg->offset); 1464 cmd->bad_sector = sector; 1465 return rc; 1466 } 1467 next: 1468 sector++; 1469 ei_lba++; 1470 } 1471 1472 psg_off = 0; 1473 kunmap_atomic(daddr - dsg->offset); 1474 kunmap_atomic(paddr - psg->offset); 1475 } 1476 1477 return 0; 1478 } 1479 EXPORT_SYMBOL(sbc_dif_verify); 1480