1 /* 2 * SCSI Block Commands (SBC) parsing and emulation. 3 * 4 * (c) Copyright 2002-2013 Datera, Inc. 5 * 6 * Nicholas A. Bellinger <nab@kernel.org> 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License as published by 10 * the Free Software Foundation; either version 2 of the License, or 11 * (at your option) any later version. 12 * 13 * This program is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * GNU General Public License for more details. 17 * 18 * You should have received a copy of the GNU General Public License 19 * along with this program; if not, write to the Free Software 20 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 21 */ 22 23 #include <linux/kernel.h> 24 #include <linux/module.h> 25 #include <linux/ratelimit.h> 26 #include <linux/crc-t10dif.h> 27 #include <asm/unaligned.h> 28 #include <scsi/scsi.h> 29 #include <scsi/scsi_tcq.h> 30 31 #include <target/target_core_base.h> 32 #include <target/target_core_backend.h> 33 #include <target/target_core_fabric.h> 34 35 #include "target_core_internal.h" 36 #include "target_core_ua.h" 37 #include "target_core_alua.h" 38 39 static sense_reason_t 40 sbc_emulate_readcapacity(struct se_cmd *cmd) 41 { 42 struct se_device *dev = cmd->se_dev; 43 unsigned char *cdb = cmd->t_task_cdb; 44 unsigned long long blocks_long = dev->transport->get_blocks(dev); 45 unsigned char *rbuf; 46 unsigned char buf[8]; 47 u32 blocks; 48 49 /* 50 * SBC-2 says: 51 * If the PMI bit is set to zero and the LOGICAL BLOCK 52 * ADDRESS field is not set to zero, the device server shall 53 * terminate the command with CHECK CONDITION status with 54 * the sense key set to ILLEGAL REQUEST and the additional 55 * sense code set to INVALID FIELD IN CDB. 56 * 57 * In SBC-3, these fields are obsolete, but some SCSI 58 * compliance tests actually check this, so we might as well 59 * follow SBC-2. 60 */ 61 if (!(cdb[8] & 1) && !!(cdb[2] | cdb[3] | cdb[4] | cdb[5])) 62 return TCM_INVALID_CDB_FIELD; 63 64 if (blocks_long >= 0x00000000ffffffff) 65 blocks = 0xffffffff; 66 else 67 blocks = (u32)blocks_long; 68 69 buf[0] = (blocks >> 24) & 0xff; 70 buf[1] = (blocks >> 16) & 0xff; 71 buf[2] = (blocks >> 8) & 0xff; 72 buf[3] = blocks & 0xff; 73 buf[4] = (dev->dev_attrib.block_size >> 24) & 0xff; 74 buf[5] = (dev->dev_attrib.block_size >> 16) & 0xff; 75 buf[6] = (dev->dev_attrib.block_size >> 8) & 0xff; 76 buf[7] = dev->dev_attrib.block_size & 0xff; 77 78 rbuf = transport_kmap_data_sg(cmd); 79 if (rbuf) { 80 memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length)); 81 transport_kunmap_data_sg(cmd); 82 } 83 84 target_complete_cmd(cmd, GOOD); 85 return 0; 86 } 87 88 static sense_reason_t 89 sbc_emulate_readcapacity_16(struct se_cmd *cmd) 90 { 91 struct se_device *dev = cmd->se_dev; 92 unsigned char *rbuf; 93 unsigned char buf[32]; 94 unsigned long long blocks = dev->transport->get_blocks(dev); 95 96 memset(buf, 0, sizeof(buf)); 97 buf[0] = (blocks >> 56) & 0xff; 98 buf[1] = (blocks >> 48) & 0xff; 99 buf[2] = (blocks >> 40) & 0xff; 100 buf[3] = (blocks >> 32) & 0xff; 101 buf[4] = (blocks >> 24) & 0xff; 102 buf[5] = (blocks >> 16) & 0xff; 103 buf[6] = (blocks >> 8) & 0xff; 104 buf[7] = blocks & 0xff; 105 buf[8] = (dev->dev_attrib.block_size >> 24) & 0xff; 106 buf[9] = (dev->dev_attrib.block_size >> 16) & 0xff; 107 buf[10] = (dev->dev_attrib.block_size >> 8) & 0xff; 108 buf[11] = dev->dev_attrib.block_size & 0xff; 109 /* 110 * Set P_TYPE and PROT_EN bits for DIF support 111 */ 112 if (dev->dev_attrib.pi_prot_type) 113 buf[12] = (dev->dev_attrib.pi_prot_type - 1) << 1 | 0x1; 114 115 if (dev->transport->get_lbppbe) 116 buf[13] = dev->transport->get_lbppbe(dev) & 0x0f; 117 118 if (dev->transport->get_alignment_offset_lbas) { 119 u16 lalba = dev->transport->get_alignment_offset_lbas(dev); 120 buf[14] = (lalba >> 8) & 0x3f; 121 buf[15] = lalba & 0xff; 122 } 123 124 /* 125 * Set Thin Provisioning Enable bit following sbc3r22 in section 126 * READ CAPACITY (16) byte 14 if emulate_tpu or emulate_tpws is enabled. 127 */ 128 if (dev->dev_attrib.emulate_tpu || dev->dev_attrib.emulate_tpws) 129 buf[14] |= 0x80; 130 131 rbuf = transport_kmap_data_sg(cmd); 132 if (rbuf) { 133 memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length)); 134 transport_kunmap_data_sg(cmd); 135 } 136 137 target_complete_cmd(cmd, GOOD); 138 return 0; 139 } 140 141 sector_t sbc_get_write_same_sectors(struct se_cmd *cmd) 142 { 143 u32 num_blocks; 144 145 if (cmd->t_task_cdb[0] == WRITE_SAME) 146 num_blocks = get_unaligned_be16(&cmd->t_task_cdb[7]); 147 else if (cmd->t_task_cdb[0] == WRITE_SAME_16) 148 num_blocks = get_unaligned_be32(&cmd->t_task_cdb[10]); 149 else /* WRITE_SAME_32 via VARIABLE_LENGTH_CMD */ 150 num_blocks = get_unaligned_be32(&cmd->t_task_cdb[28]); 151 152 /* 153 * Use the explicit range when non zero is supplied, otherwise calculate 154 * the remaining range based on ->get_blocks() - starting LBA. 155 */ 156 if (num_blocks) 157 return num_blocks; 158 159 return cmd->se_dev->transport->get_blocks(cmd->se_dev) - 160 cmd->t_task_lba + 1; 161 } 162 EXPORT_SYMBOL(sbc_get_write_same_sectors); 163 164 static sense_reason_t 165 sbc_emulate_noop(struct se_cmd *cmd) 166 { 167 target_complete_cmd(cmd, GOOD); 168 return 0; 169 } 170 171 static inline u32 sbc_get_size(struct se_cmd *cmd, u32 sectors) 172 { 173 return cmd->se_dev->dev_attrib.block_size * sectors; 174 } 175 176 static int sbc_check_valid_sectors(struct se_cmd *cmd) 177 { 178 struct se_device *dev = cmd->se_dev; 179 unsigned long long end_lba; 180 u32 sectors; 181 182 sectors = cmd->data_length / dev->dev_attrib.block_size; 183 end_lba = dev->transport->get_blocks(dev) + 1; 184 185 if (cmd->t_task_lba + sectors > end_lba) { 186 pr_err("target: lba %llu, sectors %u exceeds end lba %llu\n", 187 cmd->t_task_lba, sectors, end_lba); 188 return -EINVAL; 189 } 190 191 return 0; 192 } 193 194 static inline u32 transport_get_sectors_6(unsigned char *cdb) 195 { 196 /* 197 * Use 8-bit sector value. SBC-3 says: 198 * 199 * A TRANSFER LENGTH field set to zero specifies that 256 200 * logical blocks shall be written. Any other value 201 * specifies the number of logical blocks that shall be 202 * written. 203 */ 204 return cdb[4] ? : 256; 205 } 206 207 static inline u32 transport_get_sectors_10(unsigned char *cdb) 208 { 209 return (u32)(cdb[7] << 8) + cdb[8]; 210 } 211 212 static inline u32 transport_get_sectors_12(unsigned char *cdb) 213 { 214 return (u32)(cdb[6] << 24) + (cdb[7] << 16) + (cdb[8] << 8) + cdb[9]; 215 } 216 217 static inline u32 transport_get_sectors_16(unsigned char *cdb) 218 { 219 return (u32)(cdb[10] << 24) + (cdb[11] << 16) + 220 (cdb[12] << 8) + cdb[13]; 221 } 222 223 /* 224 * Used for VARIABLE_LENGTH_CDB WRITE_32 and READ_32 variants 225 */ 226 static inline u32 transport_get_sectors_32(unsigned char *cdb) 227 { 228 return (u32)(cdb[28] << 24) + (cdb[29] << 16) + 229 (cdb[30] << 8) + cdb[31]; 230 231 } 232 233 static inline u32 transport_lba_21(unsigned char *cdb) 234 { 235 return ((cdb[1] & 0x1f) << 16) | (cdb[2] << 8) | cdb[3]; 236 } 237 238 static inline u32 transport_lba_32(unsigned char *cdb) 239 { 240 return (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5]; 241 } 242 243 static inline unsigned long long transport_lba_64(unsigned char *cdb) 244 { 245 unsigned int __v1, __v2; 246 247 __v1 = (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5]; 248 __v2 = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9]; 249 250 return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32; 251 } 252 253 /* 254 * For VARIABLE_LENGTH_CDB w/ 32 byte extended CDBs 255 */ 256 static inline unsigned long long transport_lba_64_ext(unsigned char *cdb) 257 { 258 unsigned int __v1, __v2; 259 260 __v1 = (cdb[12] << 24) | (cdb[13] << 16) | (cdb[14] << 8) | cdb[15]; 261 __v2 = (cdb[16] << 24) | (cdb[17] << 16) | (cdb[18] << 8) | cdb[19]; 262 263 return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32; 264 } 265 266 static sense_reason_t 267 sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *ops) 268 { 269 unsigned int sectors = sbc_get_write_same_sectors(cmd); 270 271 if ((flags[0] & 0x04) || (flags[0] & 0x02)) { 272 pr_err("WRITE_SAME PBDATA and LBDATA" 273 " bits not supported for Block Discard" 274 " Emulation\n"); 275 return TCM_UNSUPPORTED_SCSI_OPCODE; 276 } 277 if (sectors > cmd->se_dev->dev_attrib.max_write_same_len) { 278 pr_warn("WRITE_SAME sectors: %u exceeds max_write_same_len: %u\n", 279 sectors, cmd->se_dev->dev_attrib.max_write_same_len); 280 return TCM_INVALID_CDB_FIELD; 281 } 282 /* We always have ANC_SUP == 0 so setting ANCHOR is always an error */ 283 if (flags[0] & 0x10) { 284 pr_warn("WRITE SAME with ANCHOR not supported\n"); 285 return TCM_INVALID_CDB_FIELD; 286 } 287 /* 288 * Special case for WRITE_SAME w/ UNMAP=1 that ends up getting 289 * translated into block discard requests within backend code. 290 */ 291 if (flags[0] & 0x08) { 292 if (!ops->execute_write_same_unmap) 293 return TCM_UNSUPPORTED_SCSI_OPCODE; 294 295 cmd->execute_cmd = ops->execute_write_same_unmap; 296 return 0; 297 } 298 if (!ops->execute_write_same) 299 return TCM_UNSUPPORTED_SCSI_OPCODE; 300 301 cmd->execute_cmd = ops->execute_write_same; 302 return 0; 303 } 304 305 static sense_reason_t xdreadwrite_callback(struct se_cmd *cmd) 306 { 307 unsigned char *buf, *addr; 308 struct scatterlist *sg; 309 unsigned int offset; 310 sense_reason_t ret = TCM_NO_SENSE; 311 int i, count; 312 /* 313 * From sbc3r22.pdf section 5.48 XDWRITEREAD (10) command 314 * 315 * 1) read the specified logical block(s); 316 * 2) transfer logical blocks from the data-out buffer; 317 * 3) XOR the logical blocks transferred from the data-out buffer with 318 * the logical blocks read, storing the resulting XOR data in a buffer; 319 * 4) if the DISABLE WRITE bit is set to zero, then write the logical 320 * blocks transferred from the data-out buffer; and 321 * 5) transfer the resulting XOR data to the data-in buffer. 322 */ 323 buf = kmalloc(cmd->data_length, GFP_KERNEL); 324 if (!buf) { 325 pr_err("Unable to allocate xor_callback buf\n"); 326 return TCM_OUT_OF_RESOURCES; 327 } 328 /* 329 * Copy the scatterlist WRITE buffer located at cmd->t_data_sg 330 * into the locally allocated *buf 331 */ 332 sg_copy_to_buffer(cmd->t_data_sg, 333 cmd->t_data_nents, 334 buf, 335 cmd->data_length); 336 337 /* 338 * Now perform the XOR against the BIDI read memory located at 339 * cmd->t_mem_bidi_list 340 */ 341 342 offset = 0; 343 for_each_sg(cmd->t_bidi_data_sg, sg, cmd->t_bidi_data_nents, count) { 344 addr = kmap_atomic(sg_page(sg)); 345 if (!addr) { 346 ret = TCM_OUT_OF_RESOURCES; 347 goto out; 348 } 349 350 for (i = 0; i < sg->length; i++) 351 *(addr + sg->offset + i) ^= *(buf + offset + i); 352 353 offset += sg->length; 354 kunmap_atomic(addr); 355 } 356 357 out: 358 kfree(buf); 359 return ret; 360 } 361 362 static sense_reason_t 363 sbc_execute_rw(struct se_cmd *cmd) 364 { 365 return cmd->execute_rw(cmd, cmd->t_data_sg, cmd->t_data_nents, 366 cmd->data_direction); 367 } 368 369 static sense_reason_t compare_and_write_post(struct se_cmd *cmd) 370 { 371 struct se_device *dev = cmd->se_dev; 372 373 /* 374 * Only set SCF_COMPARE_AND_WRITE_POST to force a response fall-through 375 * within target_complete_ok_work() if the command was successfully 376 * sent to the backend driver. 377 */ 378 spin_lock_irq(&cmd->t_state_lock); 379 if ((cmd->transport_state & CMD_T_SENT) && !cmd->scsi_status) 380 cmd->se_cmd_flags |= SCF_COMPARE_AND_WRITE_POST; 381 spin_unlock_irq(&cmd->t_state_lock); 382 383 /* 384 * Unlock ->caw_sem originally obtained during sbc_compare_and_write() 385 * before the original READ I/O submission. 386 */ 387 up(&dev->caw_sem); 388 389 return TCM_NO_SENSE; 390 } 391 392 static sense_reason_t compare_and_write_callback(struct se_cmd *cmd) 393 { 394 struct se_device *dev = cmd->se_dev; 395 struct scatterlist *write_sg = NULL, *sg; 396 unsigned char *buf = NULL, *addr; 397 struct sg_mapping_iter m; 398 unsigned int offset = 0, len; 399 unsigned int nlbas = cmd->t_task_nolb; 400 unsigned int block_size = dev->dev_attrib.block_size; 401 unsigned int compare_len = (nlbas * block_size); 402 sense_reason_t ret = TCM_NO_SENSE; 403 int rc, i; 404 405 /* 406 * Handle early failure in transport_generic_request_failure(), 407 * which will not have taken ->caw_mutex yet.. 408 */ 409 if (!cmd->t_data_sg || !cmd->t_bidi_data_sg) 410 return TCM_NO_SENSE; 411 /* 412 * Immediately exit + release dev->caw_sem if command has already 413 * been failed with a non-zero SCSI status. 414 */ 415 if (cmd->scsi_status) { 416 pr_err("compare_and_write_callback: non zero scsi_status:" 417 " 0x%02x\n", cmd->scsi_status); 418 goto out; 419 } 420 421 buf = kzalloc(cmd->data_length, GFP_KERNEL); 422 if (!buf) { 423 pr_err("Unable to allocate compare_and_write buf\n"); 424 ret = TCM_OUT_OF_RESOURCES; 425 goto out; 426 } 427 428 write_sg = kzalloc(sizeof(struct scatterlist) * cmd->t_data_nents, 429 GFP_KERNEL); 430 if (!write_sg) { 431 pr_err("Unable to allocate compare_and_write sg\n"); 432 ret = TCM_OUT_OF_RESOURCES; 433 goto out; 434 } 435 /* 436 * Setup verify and write data payloads from total NumberLBAs. 437 */ 438 rc = sg_copy_to_buffer(cmd->t_data_sg, cmd->t_data_nents, buf, 439 cmd->data_length); 440 if (!rc) { 441 pr_err("sg_copy_to_buffer() failed for compare_and_write\n"); 442 ret = TCM_OUT_OF_RESOURCES; 443 goto out; 444 } 445 /* 446 * Compare against SCSI READ payload against verify payload 447 */ 448 for_each_sg(cmd->t_bidi_data_sg, sg, cmd->t_bidi_data_nents, i) { 449 addr = (unsigned char *)kmap_atomic(sg_page(sg)); 450 if (!addr) { 451 ret = TCM_OUT_OF_RESOURCES; 452 goto out; 453 } 454 455 len = min(sg->length, compare_len); 456 457 if (memcmp(addr, buf + offset, len)) { 458 pr_warn("Detected MISCOMPARE for addr: %p buf: %p\n", 459 addr, buf + offset); 460 kunmap_atomic(addr); 461 goto miscompare; 462 } 463 kunmap_atomic(addr); 464 465 offset += len; 466 compare_len -= len; 467 if (!compare_len) 468 break; 469 } 470 471 i = 0; 472 len = cmd->t_task_nolb * block_size; 473 sg_miter_start(&m, cmd->t_data_sg, cmd->t_data_nents, SG_MITER_TO_SG); 474 /* 475 * Currently assumes NoLB=1 and SGLs are PAGE_SIZE.. 476 */ 477 while (len) { 478 sg_miter_next(&m); 479 480 if (block_size < PAGE_SIZE) { 481 sg_set_page(&write_sg[i], m.page, block_size, 482 block_size); 483 } else { 484 sg_miter_next(&m); 485 sg_set_page(&write_sg[i], m.page, block_size, 486 0); 487 } 488 len -= block_size; 489 i++; 490 } 491 sg_miter_stop(&m); 492 /* 493 * Save the original SGL + nents values before updating to new 494 * assignments, to be released in transport_free_pages() -> 495 * transport_reset_sgl_orig() 496 */ 497 cmd->t_data_sg_orig = cmd->t_data_sg; 498 cmd->t_data_sg = write_sg; 499 cmd->t_data_nents_orig = cmd->t_data_nents; 500 cmd->t_data_nents = 1; 501 502 cmd->sam_task_attr = MSG_HEAD_TAG; 503 cmd->transport_complete_callback = compare_and_write_post; 504 /* 505 * Now reset ->execute_cmd() to the normal sbc_execute_rw() handler 506 * for submitting the adjusted SGL to write instance user-data. 507 */ 508 cmd->execute_cmd = sbc_execute_rw; 509 510 spin_lock_irq(&cmd->t_state_lock); 511 cmd->t_state = TRANSPORT_PROCESSING; 512 cmd->transport_state |= CMD_T_ACTIVE|CMD_T_BUSY|CMD_T_SENT; 513 spin_unlock_irq(&cmd->t_state_lock); 514 515 __target_execute_cmd(cmd); 516 517 kfree(buf); 518 return ret; 519 520 miscompare: 521 pr_warn("Target/%s: Send MISCOMPARE check condition and sense\n", 522 dev->transport->name); 523 ret = TCM_MISCOMPARE_VERIFY; 524 out: 525 /* 526 * In the MISCOMPARE or failure case, unlock ->caw_sem obtained in 527 * sbc_compare_and_write() before the original READ I/O submission. 528 */ 529 up(&dev->caw_sem); 530 kfree(write_sg); 531 kfree(buf); 532 return ret; 533 } 534 535 static sense_reason_t 536 sbc_compare_and_write(struct se_cmd *cmd) 537 { 538 struct se_device *dev = cmd->se_dev; 539 sense_reason_t ret; 540 int rc; 541 /* 542 * Submit the READ first for COMPARE_AND_WRITE to perform the 543 * comparision using SGLs at cmd->t_bidi_data_sg.. 544 */ 545 rc = down_interruptible(&dev->caw_sem); 546 if ((rc != 0) || signal_pending(current)) { 547 cmd->transport_complete_callback = NULL; 548 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 549 } 550 /* 551 * Reset cmd->data_length to individual block_size in order to not 552 * confuse backend drivers that depend on this value matching the 553 * size of the I/O being submitted. 554 */ 555 cmd->data_length = cmd->t_task_nolb * dev->dev_attrib.block_size; 556 557 ret = cmd->execute_rw(cmd, cmd->t_bidi_data_sg, cmd->t_bidi_data_nents, 558 DMA_FROM_DEVICE); 559 if (ret) { 560 cmd->transport_complete_callback = NULL; 561 up(&dev->caw_sem); 562 return ret; 563 } 564 /* 565 * Unlock of dev->caw_sem to occur in compare_and_write_callback() 566 * upon MISCOMPARE, or in compare_and_write_done() upon completion 567 * of WRITE instance user-data. 568 */ 569 return TCM_NO_SENSE; 570 } 571 572 static int 573 sbc_set_prot_op_checks(u8 protect, enum target_prot_type prot_type, 574 bool is_write, struct se_cmd *cmd) 575 { 576 if (is_write) { 577 cmd->prot_op = protect ? TARGET_PROT_DOUT_PASS : 578 TARGET_PROT_DOUT_INSERT; 579 switch (protect) { 580 case 0x0: 581 case 0x3: 582 cmd->prot_checks = 0; 583 break; 584 case 0x1: 585 case 0x5: 586 cmd->prot_checks = TARGET_DIF_CHECK_GUARD; 587 if (prot_type == TARGET_DIF_TYPE1_PROT) 588 cmd->prot_checks |= TARGET_DIF_CHECK_REFTAG; 589 break; 590 case 0x2: 591 if (prot_type == TARGET_DIF_TYPE1_PROT) 592 cmd->prot_checks = TARGET_DIF_CHECK_REFTAG; 593 break; 594 case 0x4: 595 cmd->prot_checks = TARGET_DIF_CHECK_GUARD; 596 break; 597 default: 598 pr_err("Unsupported protect field %d\n", protect); 599 return -EINVAL; 600 } 601 } else { 602 cmd->prot_op = protect ? TARGET_PROT_DIN_PASS : 603 TARGET_PROT_DIN_STRIP; 604 switch (protect) { 605 case 0x0: 606 case 0x1: 607 case 0x5: 608 cmd->prot_checks = TARGET_DIF_CHECK_GUARD; 609 if (prot_type == TARGET_DIF_TYPE1_PROT) 610 cmd->prot_checks |= TARGET_DIF_CHECK_REFTAG; 611 break; 612 case 0x2: 613 if (prot_type == TARGET_DIF_TYPE1_PROT) 614 cmd->prot_checks = TARGET_DIF_CHECK_REFTAG; 615 break; 616 case 0x3: 617 cmd->prot_checks = 0; 618 break; 619 case 0x4: 620 cmd->prot_checks = TARGET_DIF_CHECK_GUARD; 621 break; 622 default: 623 pr_err("Unsupported protect field %d\n", protect); 624 return -EINVAL; 625 } 626 } 627 628 return 0; 629 } 630 631 static bool 632 sbc_check_prot(struct se_device *dev, struct se_cmd *cmd, unsigned char *cdb, 633 u32 sectors, bool is_write) 634 { 635 u8 protect = cdb[1] >> 5; 636 637 if ((!cmd->t_prot_sg || !cmd->t_prot_nents) && cmd->prot_pto) 638 return true; 639 640 switch (dev->dev_attrib.pi_prot_type) { 641 case TARGET_DIF_TYPE3_PROT: 642 cmd->reftag_seed = 0xffffffff; 643 break; 644 case TARGET_DIF_TYPE2_PROT: 645 if (protect) 646 return false; 647 648 cmd->reftag_seed = cmd->t_task_lba; 649 break; 650 case TARGET_DIF_TYPE1_PROT: 651 cmd->reftag_seed = cmd->t_task_lba; 652 break; 653 case TARGET_DIF_TYPE0_PROT: 654 default: 655 return true; 656 } 657 658 if (sbc_set_prot_op_checks(protect, dev->dev_attrib.pi_prot_type, 659 is_write, cmd)) 660 return false; 661 662 cmd->prot_type = dev->dev_attrib.pi_prot_type; 663 cmd->prot_length = dev->prot_length * sectors; 664 665 return true; 666 } 667 668 sense_reason_t 669 sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops) 670 { 671 struct se_device *dev = cmd->se_dev; 672 unsigned char *cdb = cmd->t_task_cdb; 673 unsigned int size; 674 u32 sectors = 0; 675 sense_reason_t ret; 676 677 switch (cdb[0]) { 678 case READ_6: 679 sectors = transport_get_sectors_6(cdb); 680 cmd->t_task_lba = transport_lba_21(cdb); 681 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 682 cmd->execute_rw = ops->execute_rw; 683 cmd->execute_cmd = sbc_execute_rw; 684 break; 685 case READ_10: 686 sectors = transport_get_sectors_10(cdb); 687 cmd->t_task_lba = transport_lba_32(cdb); 688 689 if (!sbc_check_prot(dev, cmd, cdb, sectors, false)) 690 return TCM_UNSUPPORTED_SCSI_OPCODE; 691 692 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 693 cmd->execute_rw = ops->execute_rw; 694 cmd->execute_cmd = sbc_execute_rw; 695 break; 696 case READ_12: 697 sectors = transport_get_sectors_12(cdb); 698 cmd->t_task_lba = transport_lba_32(cdb); 699 700 if (!sbc_check_prot(dev, cmd, cdb, sectors, false)) 701 return TCM_UNSUPPORTED_SCSI_OPCODE; 702 703 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 704 cmd->execute_rw = ops->execute_rw; 705 cmd->execute_cmd = sbc_execute_rw; 706 break; 707 case READ_16: 708 sectors = transport_get_sectors_16(cdb); 709 cmd->t_task_lba = transport_lba_64(cdb); 710 711 if (!sbc_check_prot(dev, cmd, cdb, sectors, false)) 712 return TCM_UNSUPPORTED_SCSI_OPCODE; 713 714 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 715 cmd->execute_rw = ops->execute_rw; 716 cmd->execute_cmd = sbc_execute_rw; 717 break; 718 case WRITE_6: 719 sectors = transport_get_sectors_6(cdb); 720 cmd->t_task_lba = transport_lba_21(cdb); 721 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 722 cmd->execute_rw = ops->execute_rw; 723 cmd->execute_cmd = sbc_execute_rw; 724 break; 725 case WRITE_10: 726 case WRITE_VERIFY: 727 sectors = transport_get_sectors_10(cdb); 728 cmd->t_task_lba = transport_lba_32(cdb); 729 730 if (!sbc_check_prot(dev, cmd, cdb, sectors, true)) 731 return TCM_UNSUPPORTED_SCSI_OPCODE; 732 733 if (cdb[1] & 0x8) 734 cmd->se_cmd_flags |= SCF_FUA; 735 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 736 cmd->execute_rw = ops->execute_rw; 737 cmd->execute_cmd = sbc_execute_rw; 738 break; 739 case WRITE_12: 740 sectors = transport_get_sectors_12(cdb); 741 cmd->t_task_lba = transport_lba_32(cdb); 742 743 if (!sbc_check_prot(dev, cmd, cdb, sectors, true)) 744 return TCM_UNSUPPORTED_SCSI_OPCODE; 745 746 if (cdb[1] & 0x8) 747 cmd->se_cmd_flags |= SCF_FUA; 748 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 749 cmd->execute_rw = ops->execute_rw; 750 cmd->execute_cmd = sbc_execute_rw; 751 break; 752 case WRITE_16: 753 sectors = transport_get_sectors_16(cdb); 754 cmd->t_task_lba = transport_lba_64(cdb); 755 756 if (!sbc_check_prot(dev, cmd, cdb, sectors, true)) 757 return TCM_UNSUPPORTED_SCSI_OPCODE; 758 759 if (cdb[1] & 0x8) 760 cmd->se_cmd_flags |= SCF_FUA; 761 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 762 cmd->execute_rw = ops->execute_rw; 763 cmd->execute_cmd = sbc_execute_rw; 764 break; 765 case XDWRITEREAD_10: 766 if (cmd->data_direction != DMA_TO_DEVICE || 767 !(cmd->se_cmd_flags & SCF_BIDI)) 768 return TCM_INVALID_CDB_FIELD; 769 sectors = transport_get_sectors_10(cdb); 770 771 cmd->t_task_lba = transport_lba_32(cdb); 772 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 773 774 /* 775 * Setup BIDI XOR callback to be run after I/O completion. 776 */ 777 cmd->execute_rw = ops->execute_rw; 778 cmd->execute_cmd = sbc_execute_rw; 779 cmd->transport_complete_callback = &xdreadwrite_callback; 780 if (cdb[1] & 0x8) 781 cmd->se_cmd_flags |= SCF_FUA; 782 break; 783 case VARIABLE_LENGTH_CMD: 784 { 785 u16 service_action = get_unaligned_be16(&cdb[8]); 786 switch (service_action) { 787 case XDWRITEREAD_32: 788 sectors = transport_get_sectors_32(cdb); 789 790 /* 791 * Use WRITE_32 and READ_32 opcodes for the emulated 792 * XDWRITE_READ_32 logic. 793 */ 794 cmd->t_task_lba = transport_lba_64_ext(cdb); 795 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 796 797 /* 798 * Setup BIDI XOR callback to be run during after I/O 799 * completion. 800 */ 801 cmd->execute_rw = ops->execute_rw; 802 cmd->execute_cmd = sbc_execute_rw; 803 cmd->transport_complete_callback = &xdreadwrite_callback; 804 if (cdb[1] & 0x8) 805 cmd->se_cmd_flags |= SCF_FUA; 806 break; 807 case WRITE_SAME_32: 808 sectors = transport_get_sectors_32(cdb); 809 if (!sectors) { 810 pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not" 811 " supported\n"); 812 return TCM_INVALID_CDB_FIELD; 813 } 814 815 size = sbc_get_size(cmd, 1); 816 cmd->t_task_lba = get_unaligned_be64(&cdb[12]); 817 818 ret = sbc_setup_write_same(cmd, &cdb[10], ops); 819 if (ret) 820 return ret; 821 break; 822 default: 823 pr_err("VARIABLE_LENGTH_CMD service action" 824 " 0x%04x not supported\n", service_action); 825 return TCM_UNSUPPORTED_SCSI_OPCODE; 826 } 827 break; 828 } 829 case COMPARE_AND_WRITE: 830 sectors = cdb[13]; 831 /* 832 * Currently enforce COMPARE_AND_WRITE for a single sector 833 */ 834 if (sectors > 1) { 835 pr_err("COMPARE_AND_WRITE contains NoLB: %u greater" 836 " than 1\n", sectors); 837 return TCM_INVALID_CDB_FIELD; 838 } 839 /* 840 * Double size because we have two buffers, note that 841 * zero is not an error.. 842 */ 843 size = 2 * sbc_get_size(cmd, sectors); 844 cmd->t_task_lba = get_unaligned_be64(&cdb[2]); 845 cmd->t_task_nolb = sectors; 846 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB | SCF_COMPARE_AND_WRITE; 847 cmd->execute_rw = ops->execute_rw; 848 cmd->execute_cmd = sbc_compare_and_write; 849 cmd->transport_complete_callback = compare_and_write_callback; 850 break; 851 case READ_CAPACITY: 852 size = READ_CAP_LEN; 853 cmd->execute_cmd = sbc_emulate_readcapacity; 854 break; 855 case SERVICE_ACTION_IN: 856 switch (cmd->t_task_cdb[1] & 0x1f) { 857 case SAI_READ_CAPACITY_16: 858 cmd->execute_cmd = sbc_emulate_readcapacity_16; 859 break; 860 case SAI_REPORT_REFERRALS: 861 cmd->execute_cmd = target_emulate_report_referrals; 862 break; 863 default: 864 pr_err("Unsupported SA: 0x%02x\n", 865 cmd->t_task_cdb[1] & 0x1f); 866 return TCM_INVALID_CDB_FIELD; 867 } 868 size = (cdb[10] << 24) | (cdb[11] << 16) | 869 (cdb[12] << 8) | cdb[13]; 870 break; 871 case SYNCHRONIZE_CACHE: 872 case SYNCHRONIZE_CACHE_16: 873 if (!ops->execute_sync_cache) { 874 size = 0; 875 cmd->execute_cmd = sbc_emulate_noop; 876 break; 877 } 878 879 /* 880 * Extract LBA and range to be flushed for emulated SYNCHRONIZE_CACHE 881 */ 882 if (cdb[0] == SYNCHRONIZE_CACHE) { 883 sectors = transport_get_sectors_10(cdb); 884 cmd->t_task_lba = transport_lba_32(cdb); 885 } else { 886 sectors = transport_get_sectors_16(cdb); 887 cmd->t_task_lba = transport_lba_64(cdb); 888 } 889 890 size = sbc_get_size(cmd, sectors); 891 892 /* 893 * Check to ensure that LBA + Range does not exceed past end of 894 * device for IBLOCK and FILEIO ->do_sync_cache() backend calls 895 */ 896 if (cmd->t_task_lba || sectors) { 897 if (sbc_check_valid_sectors(cmd) < 0) 898 return TCM_ADDRESS_OUT_OF_RANGE; 899 } 900 cmd->execute_cmd = ops->execute_sync_cache; 901 break; 902 case UNMAP: 903 if (!ops->execute_unmap) 904 return TCM_UNSUPPORTED_SCSI_OPCODE; 905 906 size = get_unaligned_be16(&cdb[7]); 907 cmd->execute_cmd = ops->execute_unmap; 908 break; 909 case WRITE_SAME_16: 910 sectors = transport_get_sectors_16(cdb); 911 if (!sectors) { 912 pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n"); 913 return TCM_INVALID_CDB_FIELD; 914 } 915 916 size = sbc_get_size(cmd, 1); 917 cmd->t_task_lba = get_unaligned_be64(&cdb[2]); 918 919 ret = sbc_setup_write_same(cmd, &cdb[1], ops); 920 if (ret) 921 return ret; 922 break; 923 case WRITE_SAME: 924 sectors = transport_get_sectors_10(cdb); 925 if (!sectors) { 926 pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n"); 927 return TCM_INVALID_CDB_FIELD; 928 } 929 930 size = sbc_get_size(cmd, 1); 931 cmd->t_task_lba = get_unaligned_be32(&cdb[2]); 932 933 /* 934 * Follow sbcr26 with WRITE_SAME (10) and check for the existence 935 * of byte 1 bit 3 UNMAP instead of original reserved field 936 */ 937 ret = sbc_setup_write_same(cmd, &cdb[1], ops); 938 if (ret) 939 return ret; 940 break; 941 case VERIFY: 942 size = 0; 943 cmd->execute_cmd = sbc_emulate_noop; 944 break; 945 case REZERO_UNIT: 946 case SEEK_6: 947 case SEEK_10: 948 /* 949 * There are still clients out there which use these old SCSI-2 950 * commands. This mainly happens when running VMs with legacy 951 * guest systems, connected via SCSI command pass-through to 952 * iSCSI targets. Make them happy and return status GOOD. 953 */ 954 size = 0; 955 cmd->execute_cmd = sbc_emulate_noop; 956 break; 957 default: 958 ret = spc_parse_cdb(cmd, &size); 959 if (ret) 960 return ret; 961 } 962 963 /* reject any command that we don't have a handler for */ 964 if (!(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) && !cmd->execute_cmd) 965 return TCM_UNSUPPORTED_SCSI_OPCODE; 966 967 if (cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) { 968 unsigned long long end_lba; 969 970 if (sectors > dev->dev_attrib.fabric_max_sectors) { 971 printk_ratelimited(KERN_ERR "SCSI OP %02xh with too" 972 " big sectors %u exceeds fabric_max_sectors:" 973 " %u\n", cdb[0], sectors, 974 dev->dev_attrib.fabric_max_sectors); 975 return TCM_INVALID_CDB_FIELD; 976 } 977 if (sectors > dev->dev_attrib.hw_max_sectors) { 978 printk_ratelimited(KERN_ERR "SCSI OP %02xh with too" 979 " big sectors %u exceeds backend hw_max_sectors:" 980 " %u\n", cdb[0], sectors, 981 dev->dev_attrib.hw_max_sectors); 982 return TCM_INVALID_CDB_FIELD; 983 } 984 985 end_lba = dev->transport->get_blocks(dev) + 1; 986 if (cmd->t_task_lba + sectors > end_lba) { 987 pr_err("cmd exceeds last lba %llu " 988 "(lba %llu, sectors %u)\n", 989 end_lba, cmd->t_task_lba, sectors); 990 return TCM_ADDRESS_OUT_OF_RANGE; 991 } 992 993 if (!(cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE)) 994 size = sbc_get_size(cmd, sectors); 995 } 996 997 return target_cmd_size_check(cmd, size); 998 } 999 EXPORT_SYMBOL(sbc_parse_cdb); 1000 1001 u32 sbc_get_device_type(struct se_device *dev) 1002 { 1003 return TYPE_DISK; 1004 } 1005 EXPORT_SYMBOL(sbc_get_device_type); 1006 1007 sense_reason_t 1008 sbc_execute_unmap(struct se_cmd *cmd, 1009 sense_reason_t (*do_unmap_fn)(struct se_cmd *, void *, 1010 sector_t, sector_t), 1011 void *priv) 1012 { 1013 struct se_device *dev = cmd->se_dev; 1014 unsigned char *buf, *ptr = NULL; 1015 sector_t lba; 1016 int size; 1017 u32 range; 1018 sense_reason_t ret = 0; 1019 int dl, bd_dl; 1020 1021 /* We never set ANC_SUP */ 1022 if (cmd->t_task_cdb[1]) 1023 return TCM_INVALID_CDB_FIELD; 1024 1025 if (cmd->data_length == 0) { 1026 target_complete_cmd(cmd, SAM_STAT_GOOD); 1027 return 0; 1028 } 1029 1030 if (cmd->data_length < 8) { 1031 pr_warn("UNMAP parameter list length %u too small\n", 1032 cmd->data_length); 1033 return TCM_PARAMETER_LIST_LENGTH_ERROR; 1034 } 1035 1036 buf = transport_kmap_data_sg(cmd); 1037 if (!buf) 1038 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 1039 1040 dl = get_unaligned_be16(&buf[0]); 1041 bd_dl = get_unaligned_be16(&buf[2]); 1042 1043 size = cmd->data_length - 8; 1044 if (bd_dl > size) 1045 pr_warn("UNMAP parameter list length %u too small, ignoring bd_dl %u\n", 1046 cmd->data_length, bd_dl); 1047 else 1048 size = bd_dl; 1049 1050 if (size / 16 > dev->dev_attrib.max_unmap_block_desc_count) { 1051 ret = TCM_INVALID_PARAMETER_LIST; 1052 goto err; 1053 } 1054 1055 /* First UNMAP block descriptor starts at 8 byte offset */ 1056 ptr = &buf[8]; 1057 pr_debug("UNMAP: Sub: %s Using dl: %u bd_dl: %u size: %u" 1058 " ptr: %p\n", dev->transport->name, dl, bd_dl, size, ptr); 1059 1060 while (size >= 16) { 1061 lba = get_unaligned_be64(&ptr[0]); 1062 range = get_unaligned_be32(&ptr[8]); 1063 pr_debug("UNMAP: Using lba: %llu and range: %u\n", 1064 (unsigned long long)lba, range); 1065 1066 if (range > dev->dev_attrib.max_unmap_lba_count) { 1067 ret = TCM_INVALID_PARAMETER_LIST; 1068 goto err; 1069 } 1070 1071 if (lba + range > dev->transport->get_blocks(dev) + 1) { 1072 ret = TCM_ADDRESS_OUT_OF_RANGE; 1073 goto err; 1074 } 1075 1076 ret = do_unmap_fn(cmd, priv, lba, range); 1077 if (ret) 1078 goto err; 1079 1080 ptr += 16; 1081 size -= 16; 1082 } 1083 1084 err: 1085 transport_kunmap_data_sg(cmd); 1086 if (!ret) 1087 target_complete_cmd(cmd, GOOD); 1088 return ret; 1089 } 1090 EXPORT_SYMBOL(sbc_execute_unmap); 1091 1092 static sense_reason_t 1093 sbc_dif_v1_verify(struct se_device *dev, struct se_dif_v1_tuple *sdt, 1094 const void *p, sector_t sector, unsigned int ei_lba) 1095 { 1096 int block_size = dev->dev_attrib.block_size; 1097 __be16 csum; 1098 1099 csum = cpu_to_be16(crc_t10dif(p, block_size)); 1100 1101 if (sdt->guard_tag != csum) { 1102 pr_err("DIFv1 checksum failed on sector %llu guard tag 0x%04x" 1103 " csum 0x%04x\n", (unsigned long long)sector, 1104 be16_to_cpu(sdt->guard_tag), be16_to_cpu(csum)); 1105 return TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED; 1106 } 1107 1108 if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE1_PROT && 1109 be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) { 1110 pr_err("DIFv1 Type 1 reference failed on sector: %llu tag: 0x%08x" 1111 " sector MSB: 0x%08x\n", (unsigned long long)sector, 1112 be32_to_cpu(sdt->ref_tag), (u32)(sector & 0xffffffff)); 1113 return TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED; 1114 } 1115 1116 if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE2_PROT && 1117 be32_to_cpu(sdt->ref_tag) != ei_lba) { 1118 pr_err("DIFv1 Type 2 reference failed on sector: %llu tag: 0x%08x" 1119 " ei_lba: 0x%08x\n", (unsigned long long)sector, 1120 be32_to_cpu(sdt->ref_tag), ei_lba); 1121 return TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED; 1122 } 1123 1124 return 0; 1125 } 1126 1127 static void 1128 sbc_dif_copy_prot(struct se_cmd *cmd, unsigned int sectors, bool read, 1129 struct scatterlist *sg, int sg_off) 1130 { 1131 struct se_device *dev = cmd->se_dev; 1132 struct scatterlist *psg; 1133 void *paddr, *addr; 1134 unsigned int i, len, left; 1135 unsigned int offset = sg_off; 1136 1137 left = sectors * dev->prot_length; 1138 1139 for_each_sg(cmd->t_prot_sg, psg, cmd->t_prot_nents, i) { 1140 unsigned int psg_len, copied = 0; 1141 1142 paddr = kmap_atomic(sg_page(psg)) + psg->offset; 1143 psg_len = min(left, psg->length); 1144 while (psg_len) { 1145 len = min(psg_len, sg->length - offset); 1146 addr = kmap_atomic(sg_page(sg)) + sg->offset + offset; 1147 1148 if (read) 1149 memcpy(paddr + copied, addr, len); 1150 else 1151 memcpy(addr, paddr + copied, len); 1152 1153 left -= len; 1154 offset += len; 1155 copied += len; 1156 psg_len -= len; 1157 1158 if (offset >= sg->length) { 1159 sg = sg_next(sg); 1160 offset = 0; 1161 } 1162 kunmap_atomic(addr); 1163 } 1164 kunmap_atomic(paddr); 1165 } 1166 } 1167 1168 sense_reason_t 1169 sbc_dif_verify_write(struct se_cmd *cmd, sector_t start, unsigned int sectors, 1170 unsigned int ei_lba, struct scatterlist *sg, int sg_off) 1171 { 1172 struct se_device *dev = cmd->se_dev; 1173 struct se_dif_v1_tuple *sdt; 1174 struct scatterlist *dsg, *psg = cmd->t_prot_sg; 1175 sector_t sector = start; 1176 void *daddr, *paddr; 1177 int i, j, offset = 0; 1178 sense_reason_t rc; 1179 1180 for_each_sg(cmd->t_data_sg, dsg, cmd->t_data_nents, i) { 1181 daddr = kmap_atomic(sg_page(dsg)) + dsg->offset; 1182 paddr = kmap_atomic(sg_page(psg)) + psg->offset; 1183 1184 for (j = 0; j < dsg->length; j += dev->dev_attrib.block_size) { 1185 1186 if (offset >= psg->length) { 1187 kunmap_atomic(paddr); 1188 psg = sg_next(psg); 1189 paddr = kmap_atomic(sg_page(psg)) + psg->offset; 1190 offset = 0; 1191 } 1192 1193 sdt = paddr + offset; 1194 1195 pr_debug("DIF WRITE sector: %llu guard_tag: 0x%04x" 1196 " app_tag: 0x%04x ref_tag: %u\n", 1197 (unsigned long long)sector, sdt->guard_tag, 1198 sdt->app_tag, be32_to_cpu(sdt->ref_tag)); 1199 1200 rc = sbc_dif_v1_verify(dev, sdt, daddr + j, sector, 1201 ei_lba); 1202 if (rc) { 1203 kunmap_atomic(paddr); 1204 kunmap_atomic(daddr); 1205 cmd->bad_sector = sector; 1206 return rc; 1207 } 1208 1209 sector++; 1210 ei_lba++; 1211 offset += sizeof(struct se_dif_v1_tuple); 1212 } 1213 1214 kunmap_atomic(paddr); 1215 kunmap_atomic(daddr); 1216 } 1217 sbc_dif_copy_prot(cmd, sectors, false, sg, sg_off); 1218 1219 return 0; 1220 } 1221 EXPORT_SYMBOL(sbc_dif_verify_write); 1222 1223 sense_reason_t 1224 sbc_dif_verify_read(struct se_cmd *cmd, sector_t start, unsigned int sectors, 1225 unsigned int ei_lba, struct scatterlist *sg, int sg_off) 1226 { 1227 struct se_device *dev = cmd->se_dev; 1228 struct se_dif_v1_tuple *sdt; 1229 struct scatterlist *dsg, *psg = sg; 1230 sector_t sector = start; 1231 void *daddr, *paddr; 1232 int i, j, offset = sg_off; 1233 sense_reason_t rc; 1234 1235 for_each_sg(cmd->t_data_sg, dsg, cmd->t_data_nents, i) { 1236 daddr = kmap_atomic(sg_page(dsg)) + dsg->offset; 1237 paddr = kmap_atomic(sg_page(psg)) + sg->offset; 1238 1239 for (j = 0; j < dsg->length; j += dev->dev_attrib.block_size) { 1240 1241 if (offset >= psg->length) { 1242 kunmap_atomic(paddr); 1243 psg = sg_next(psg); 1244 paddr = kmap_atomic(sg_page(psg)) + psg->offset; 1245 offset = 0; 1246 } 1247 1248 sdt = paddr + offset; 1249 1250 pr_debug("DIF READ sector: %llu guard_tag: 0x%04x" 1251 " app_tag: 0x%04x ref_tag: %u\n", 1252 (unsigned long long)sector, sdt->guard_tag, 1253 sdt->app_tag, be32_to_cpu(sdt->ref_tag)); 1254 1255 if (sdt->app_tag == cpu_to_be16(0xffff)) { 1256 sector++; 1257 offset += sizeof(struct se_dif_v1_tuple); 1258 continue; 1259 } 1260 1261 rc = sbc_dif_v1_verify(dev, sdt, daddr + j, sector, 1262 ei_lba); 1263 if (rc) { 1264 kunmap_atomic(paddr); 1265 kunmap_atomic(daddr); 1266 cmd->bad_sector = sector; 1267 return rc; 1268 } 1269 1270 sector++; 1271 ei_lba++; 1272 offset += sizeof(struct se_dif_v1_tuple); 1273 } 1274 1275 kunmap_atomic(paddr); 1276 kunmap_atomic(daddr); 1277 } 1278 sbc_dif_copy_prot(cmd, sectors, true, sg, sg_off); 1279 1280 return 0; 1281 } 1282 EXPORT_SYMBOL(sbc_dif_verify_read); 1283