1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * SCSI Block Commands (SBC) parsing and emulation. 4 * 5 * (c) Copyright 2002-2013 Datera, Inc. 6 * 7 * Nicholas A. Bellinger <nab@kernel.org> 8 */ 9 10 #include <linux/kernel.h> 11 #include <linux/module.h> 12 #include <linux/ratelimit.h> 13 #include <linux/crc-t10dif.h> 14 #include <linux/t10-pi.h> 15 #include <asm/unaligned.h> 16 #include <scsi/scsi_proto.h> 17 #include <scsi/scsi_tcq.h> 18 19 #include <target/target_core_base.h> 20 #include <target/target_core_backend.h> 21 #include <target/target_core_fabric.h> 22 23 #include "target_core_internal.h" 24 #include "target_core_ua.h" 25 #include "target_core_alua.h" 26 27 static sense_reason_t 28 sbc_check_prot(struct se_device *, struct se_cmd *, unsigned char, u32, bool); 29 static sense_reason_t sbc_execute_unmap(struct se_cmd *cmd); 30 31 static sense_reason_t 32 sbc_emulate_readcapacity(struct se_cmd *cmd) 33 { 34 struct se_device *dev = cmd->se_dev; 35 unsigned char *cdb = cmd->t_task_cdb; 36 unsigned long long blocks_long = dev->transport->get_blocks(dev); 37 unsigned char *rbuf; 38 unsigned char buf[8]; 39 u32 blocks; 40 41 /* 42 * SBC-2 says: 43 * If the PMI bit is set to zero and the LOGICAL BLOCK 44 * ADDRESS field is not set to zero, the device server shall 45 * terminate the command with CHECK CONDITION status with 46 * the sense key set to ILLEGAL REQUEST and the additional 47 * sense code set to INVALID FIELD IN CDB. 48 * 49 * In SBC-3, these fields are obsolete, but some SCSI 50 * compliance tests actually check this, so we might as well 51 * follow SBC-2. 52 */ 53 if (!(cdb[8] & 1) && !!(cdb[2] | cdb[3] | cdb[4] | cdb[5])) 54 return TCM_INVALID_CDB_FIELD; 55 56 if (blocks_long >= 0x00000000ffffffff) 57 blocks = 0xffffffff; 58 else 59 blocks = (u32)blocks_long; 60 61 put_unaligned_be32(blocks, &buf[0]); 62 put_unaligned_be32(dev->dev_attrib.block_size, &buf[4]); 63 64 rbuf = transport_kmap_data_sg(cmd); 65 if (rbuf) { 66 memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length)); 67 transport_kunmap_data_sg(cmd); 68 } 69 70 target_complete_cmd_with_length(cmd, SAM_STAT_GOOD, 8); 71 return 0; 72 } 73 74 static sense_reason_t 75 sbc_emulate_readcapacity_16(struct se_cmd *cmd) 76 { 77 struct se_device *dev = cmd->se_dev; 78 struct se_session *sess = cmd->se_sess; 79 int pi_prot_type = dev->dev_attrib.pi_prot_type; 80 81 unsigned char *rbuf; 82 unsigned char buf[32]; 83 unsigned long long blocks = dev->transport->get_blocks(dev); 84 85 memset(buf, 0, sizeof(buf)); 86 put_unaligned_be64(blocks, &buf[0]); 87 put_unaligned_be32(dev->dev_attrib.block_size, &buf[8]); 88 /* 89 * Set P_TYPE and PROT_EN bits for DIF support 90 */ 91 if (sess->sup_prot_ops & (TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS)) { 92 /* 93 * Only override a device's pi_prot_type if no T10-PI is 94 * available, and sess_prot_type has been explicitly enabled. 95 */ 96 if (!pi_prot_type) 97 pi_prot_type = sess->sess_prot_type; 98 99 if (pi_prot_type) 100 buf[12] = (pi_prot_type - 1) << 1 | 0x1; 101 } 102 103 if (dev->transport->get_lbppbe) 104 buf[13] = dev->transport->get_lbppbe(dev) & 0x0f; 105 106 if (dev->transport->get_alignment_offset_lbas) { 107 u16 lalba = dev->transport->get_alignment_offset_lbas(dev); 108 109 put_unaligned_be16(lalba, &buf[14]); 110 } 111 112 /* 113 * Set Thin Provisioning Enable bit following sbc3r22 in section 114 * READ CAPACITY (16) byte 14 if emulate_tpu or emulate_tpws is enabled. 115 */ 116 if (dev->dev_attrib.emulate_tpu || dev->dev_attrib.emulate_tpws) { 117 buf[14] |= 0x80; 118 119 /* 120 * LBPRZ signifies that zeroes will be read back from an LBA after 121 * an UNMAP or WRITE SAME w/ unmap bit (sbc3r36 5.16.2) 122 */ 123 if (dev->dev_attrib.unmap_zeroes_data) 124 buf[14] |= 0x40; 125 } 126 127 rbuf = transport_kmap_data_sg(cmd); 128 if (rbuf) { 129 memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length)); 130 transport_kunmap_data_sg(cmd); 131 } 132 133 target_complete_cmd_with_length(cmd, SAM_STAT_GOOD, 32); 134 return 0; 135 } 136 137 static sense_reason_t 138 sbc_emulate_startstop(struct se_cmd *cmd) 139 { 140 unsigned char *cdb = cmd->t_task_cdb; 141 142 /* 143 * See sbc3r36 section 5.25 144 * Immediate bit should be set since there is nothing to complete 145 * POWER CONDITION MODIFIER 0h 146 */ 147 if (!(cdb[1] & 1) || cdb[2] || cdb[3]) 148 return TCM_INVALID_CDB_FIELD; 149 150 /* 151 * See sbc3r36 section 5.25 152 * POWER CONDITION 0h START_VALID - process START and LOEJ 153 */ 154 if (cdb[4] >> 4 & 0xf) 155 return TCM_INVALID_CDB_FIELD; 156 157 /* 158 * See sbc3r36 section 5.25 159 * LOEJ 0h - nothing to load or unload 160 * START 1h - we are ready 161 */ 162 if (!(cdb[4] & 1) || (cdb[4] & 2) || (cdb[4] & 4)) 163 return TCM_INVALID_CDB_FIELD; 164 165 target_complete_cmd(cmd, SAM_STAT_GOOD); 166 return 0; 167 } 168 169 sector_t sbc_get_write_same_sectors(struct se_cmd *cmd) 170 { 171 u32 num_blocks; 172 173 if (cmd->t_task_cdb[0] == WRITE_SAME) 174 num_blocks = get_unaligned_be16(&cmd->t_task_cdb[7]); 175 else if (cmd->t_task_cdb[0] == WRITE_SAME_16) 176 num_blocks = get_unaligned_be32(&cmd->t_task_cdb[10]); 177 else /* WRITE_SAME_32 via VARIABLE_LENGTH_CMD */ 178 num_blocks = get_unaligned_be32(&cmd->t_task_cdb[28]); 179 180 /* 181 * Use the explicit range when non zero is supplied, otherwise calculate 182 * the remaining range based on ->get_blocks() - starting LBA. 183 */ 184 if (num_blocks) 185 return num_blocks; 186 187 return cmd->se_dev->transport->get_blocks(cmd->se_dev) - 188 cmd->t_task_lba + 1; 189 } 190 EXPORT_SYMBOL(sbc_get_write_same_sectors); 191 192 static sense_reason_t 193 sbc_execute_write_same_unmap(struct se_cmd *cmd) 194 { 195 struct sbc_ops *ops = cmd->protocol_data; 196 sector_t nolb = sbc_get_write_same_sectors(cmd); 197 sense_reason_t ret; 198 199 if (nolb) { 200 ret = ops->execute_unmap(cmd, cmd->t_task_lba, nolb); 201 if (ret) 202 return ret; 203 } 204 205 target_complete_cmd(cmd, SAM_STAT_GOOD); 206 return 0; 207 } 208 209 static sense_reason_t 210 sbc_emulate_noop(struct se_cmd *cmd) 211 { 212 target_complete_cmd(cmd, SAM_STAT_GOOD); 213 return 0; 214 } 215 216 static inline u32 sbc_get_size(struct se_cmd *cmd, u32 sectors) 217 { 218 return cmd->se_dev->dev_attrib.block_size * sectors; 219 } 220 221 static inline u32 transport_get_sectors_6(unsigned char *cdb) 222 { 223 /* 224 * Use 8-bit sector value. SBC-3 says: 225 * 226 * A TRANSFER LENGTH field set to zero specifies that 256 227 * logical blocks shall be written. Any other value 228 * specifies the number of logical blocks that shall be 229 * written. 230 */ 231 return cdb[4] ? : 256; 232 } 233 234 static inline u32 transport_get_sectors_10(unsigned char *cdb) 235 { 236 return get_unaligned_be16(&cdb[7]); 237 } 238 239 static inline u32 transport_get_sectors_12(unsigned char *cdb) 240 { 241 return get_unaligned_be32(&cdb[6]); 242 } 243 244 static inline u32 transport_get_sectors_16(unsigned char *cdb) 245 { 246 return get_unaligned_be32(&cdb[10]); 247 } 248 249 /* 250 * Used for VARIABLE_LENGTH_CDB WRITE_32 and READ_32 variants 251 */ 252 static inline u32 transport_get_sectors_32(unsigned char *cdb) 253 { 254 return get_unaligned_be32(&cdb[28]); 255 256 } 257 258 static inline u32 transport_lba_21(unsigned char *cdb) 259 { 260 return get_unaligned_be24(&cdb[1]) & 0x1fffff; 261 } 262 263 static inline u32 transport_lba_32(unsigned char *cdb) 264 { 265 return get_unaligned_be32(&cdb[2]); 266 } 267 268 static inline unsigned long long transport_lba_64(unsigned char *cdb) 269 { 270 return get_unaligned_be64(&cdb[2]); 271 } 272 273 /* 274 * For VARIABLE_LENGTH_CDB w/ 32 byte extended CDBs 275 */ 276 static inline unsigned long long transport_lba_64_ext(unsigned char *cdb) 277 { 278 return get_unaligned_be64(&cdb[12]); 279 } 280 281 static sense_reason_t 282 sbc_setup_write_same(struct se_cmd *cmd, unsigned char flags, struct sbc_ops *ops) 283 { 284 struct se_device *dev = cmd->se_dev; 285 sector_t end_lba = dev->transport->get_blocks(dev) + 1; 286 unsigned int sectors = sbc_get_write_same_sectors(cmd); 287 sense_reason_t ret; 288 289 if ((flags & 0x04) || (flags & 0x02)) { 290 pr_err("WRITE_SAME PBDATA and LBDATA" 291 " bits not supported for Block Discard" 292 " Emulation\n"); 293 return TCM_UNSUPPORTED_SCSI_OPCODE; 294 } 295 if (sectors > cmd->se_dev->dev_attrib.max_write_same_len) { 296 pr_warn("WRITE_SAME sectors: %u exceeds max_write_same_len: %u\n", 297 sectors, cmd->se_dev->dev_attrib.max_write_same_len); 298 return TCM_INVALID_CDB_FIELD; 299 } 300 /* 301 * Sanity check for LBA wrap and request past end of device. 302 */ 303 if (((cmd->t_task_lba + sectors) < cmd->t_task_lba) || 304 ((cmd->t_task_lba + sectors) > end_lba)) { 305 pr_err("WRITE_SAME exceeds last lba %llu (lba %llu, sectors %u)\n", 306 (unsigned long long)end_lba, cmd->t_task_lba, sectors); 307 return TCM_ADDRESS_OUT_OF_RANGE; 308 } 309 310 /* We always have ANC_SUP == 0 so setting ANCHOR is always an error */ 311 if (flags & 0x10) { 312 pr_warn("WRITE SAME with ANCHOR not supported\n"); 313 return TCM_INVALID_CDB_FIELD; 314 } 315 316 if (flags & 0x01) { 317 pr_warn("WRITE SAME with NDOB not supported\n"); 318 return TCM_INVALID_CDB_FIELD; 319 } 320 321 /* 322 * Special case for WRITE_SAME w/ UNMAP=1 that ends up getting 323 * translated into block discard requests within backend code. 324 */ 325 if (flags & 0x08) { 326 if (!ops->execute_unmap) 327 return TCM_UNSUPPORTED_SCSI_OPCODE; 328 329 if (!dev->dev_attrib.emulate_tpws) { 330 pr_err("Got WRITE_SAME w/ UNMAP=1, but backend device" 331 " has emulate_tpws disabled\n"); 332 return TCM_UNSUPPORTED_SCSI_OPCODE; 333 } 334 cmd->execute_cmd = sbc_execute_write_same_unmap; 335 return 0; 336 } 337 if (!ops->execute_write_same) 338 return TCM_UNSUPPORTED_SCSI_OPCODE; 339 340 ret = sbc_check_prot(dev, cmd, flags >> 5, sectors, true); 341 if (ret) 342 return ret; 343 344 cmd->execute_cmd = ops->execute_write_same; 345 return 0; 346 } 347 348 static sense_reason_t xdreadwrite_callback(struct se_cmd *cmd, bool success, 349 int *post_ret) 350 { 351 unsigned char *buf, *addr; 352 struct scatterlist *sg; 353 unsigned int offset; 354 sense_reason_t ret = TCM_NO_SENSE; 355 int i, count; 356 357 if (!success) 358 return 0; 359 360 /* 361 * From sbc3r22.pdf section 5.48 XDWRITEREAD (10) command 362 * 363 * 1) read the specified logical block(s); 364 * 2) transfer logical blocks from the data-out buffer; 365 * 3) XOR the logical blocks transferred from the data-out buffer with 366 * the logical blocks read, storing the resulting XOR data in a buffer; 367 * 4) if the DISABLE WRITE bit is set to zero, then write the logical 368 * blocks transferred from the data-out buffer; and 369 * 5) transfer the resulting XOR data to the data-in buffer. 370 */ 371 buf = kmalloc(cmd->data_length, GFP_KERNEL); 372 if (!buf) { 373 pr_err("Unable to allocate xor_callback buf\n"); 374 return TCM_OUT_OF_RESOURCES; 375 } 376 /* 377 * Copy the scatterlist WRITE buffer located at cmd->t_data_sg 378 * into the locally allocated *buf 379 */ 380 sg_copy_to_buffer(cmd->t_data_sg, 381 cmd->t_data_nents, 382 buf, 383 cmd->data_length); 384 385 /* 386 * Now perform the XOR against the BIDI read memory located at 387 * cmd->t_mem_bidi_list 388 */ 389 390 offset = 0; 391 for_each_sg(cmd->t_bidi_data_sg, sg, cmd->t_bidi_data_nents, count) { 392 addr = kmap_atomic(sg_page(sg)); 393 if (!addr) { 394 ret = TCM_OUT_OF_RESOURCES; 395 goto out; 396 } 397 398 for (i = 0; i < sg->length; i++) 399 *(addr + sg->offset + i) ^= *(buf + offset + i); 400 401 offset += sg->length; 402 kunmap_atomic(addr); 403 } 404 405 out: 406 kfree(buf); 407 return ret; 408 } 409 410 static sense_reason_t 411 sbc_execute_rw(struct se_cmd *cmd) 412 { 413 struct sbc_ops *ops = cmd->protocol_data; 414 415 return ops->execute_rw(cmd, cmd->t_data_sg, cmd->t_data_nents, 416 cmd->data_direction); 417 } 418 419 static sense_reason_t compare_and_write_post(struct se_cmd *cmd, bool success, 420 int *post_ret) 421 { 422 struct se_device *dev = cmd->se_dev; 423 sense_reason_t ret = TCM_NO_SENSE; 424 425 spin_lock_irq(&cmd->t_state_lock); 426 if (success) { 427 *post_ret = 1; 428 429 if (cmd->scsi_status == SAM_STAT_CHECK_CONDITION) 430 ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 431 } 432 spin_unlock_irq(&cmd->t_state_lock); 433 434 /* 435 * Unlock ->caw_sem originally obtained during sbc_compare_and_write() 436 * before the original READ I/O submission. 437 */ 438 up(&dev->caw_sem); 439 440 return ret; 441 } 442 443 /* 444 * compare @cmp_len bytes of @read_sgl with @cmp_sgl. On miscompare, fill 445 * @miscmp_off and return TCM_MISCOMPARE_VERIFY. 446 */ 447 static sense_reason_t 448 compare_and_write_do_cmp(struct scatterlist *read_sgl, unsigned int read_nents, 449 struct scatterlist *cmp_sgl, unsigned int cmp_nents, 450 unsigned int cmp_len, unsigned int *miscmp_off) 451 { 452 unsigned char *buf = NULL; 453 struct scatterlist *sg; 454 sense_reason_t ret; 455 unsigned int offset; 456 size_t rc; 457 int sg_cnt; 458 459 buf = kzalloc(cmp_len, GFP_KERNEL); 460 if (!buf) { 461 ret = TCM_OUT_OF_RESOURCES; 462 goto out; 463 } 464 465 rc = sg_copy_to_buffer(cmp_sgl, cmp_nents, buf, cmp_len); 466 if (!rc) { 467 pr_err("sg_copy_to_buffer() failed for compare_and_write\n"); 468 ret = TCM_OUT_OF_RESOURCES; 469 goto out; 470 } 471 /* 472 * Compare SCSI READ payload against verify payload 473 */ 474 offset = 0; 475 ret = TCM_NO_SENSE; 476 for_each_sg(read_sgl, sg, read_nents, sg_cnt) { 477 unsigned int len = min(sg->length, cmp_len); 478 unsigned char *addr = kmap_atomic(sg_page(sg)); 479 480 if (memcmp(addr, buf + offset, len)) { 481 unsigned int i; 482 483 for (i = 0; i < len && addr[i] == buf[offset + i]; i++) 484 ; 485 *miscmp_off = offset + i; 486 pr_warn("Detected MISCOMPARE at offset %u\n", 487 *miscmp_off); 488 ret = TCM_MISCOMPARE_VERIFY; 489 } 490 kunmap_atomic(addr); 491 if (ret != TCM_NO_SENSE) 492 goto out; 493 494 offset += len; 495 cmp_len -= len; 496 if (!cmp_len) 497 break; 498 } 499 pr_debug("COMPARE AND WRITE read data matches compare data\n"); 500 out: 501 kfree(buf); 502 return ret; 503 } 504 505 static sense_reason_t compare_and_write_callback(struct se_cmd *cmd, bool success, 506 int *post_ret) 507 { 508 struct se_device *dev = cmd->se_dev; 509 struct sg_table write_tbl = { }; 510 struct scatterlist *write_sg; 511 struct sg_mapping_iter m; 512 unsigned int len; 513 unsigned int block_size = dev->dev_attrib.block_size; 514 unsigned int compare_len = (cmd->t_task_nolb * block_size); 515 unsigned int miscmp_off = 0; 516 sense_reason_t ret = TCM_NO_SENSE; 517 int i; 518 519 /* 520 * Handle early failure in transport_generic_request_failure(), 521 * which will not have taken ->caw_sem yet.. 522 */ 523 if (!success && (!cmd->t_data_sg || !cmd->t_bidi_data_sg)) 524 return TCM_NO_SENSE; 525 /* 526 * Handle special case for zero-length COMPARE_AND_WRITE 527 */ 528 if (!cmd->data_length) 529 goto out; 530 /* 531 * Immediately exit + release dev->caw_sem if command has already 532 * been failed with a non-zero SCSI status. 533 */ 534 if (cmd->scsi_status) { 535 pr_debug("compare_and_write_callback: non zero scsi_status:" 536 " 0x%02x\n", cmd->scsi_status); 537 *post_ret = 1; 538 if (cmd->scsi_status == SAM_STAT_CHECK_CONDITION) 539 ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 540 goto out; 541 } 542 543 ret = compare_and_write_do_cmp(cmd->t_bidi_data_sg, 544 cmd->t_bidi_data_nents, 545 cmd->t_data_sg, 546 cmd->t_data_nents, 547 compare_len, 548 &miscmp_off); 549 if (ret == TCM_MISCOMPARE_VERIFY) { 550 /* 551 * SBC-4 r15: 5.3 COMPARE AND WRITE command 552 * In the sense data (see 4.18 and SPC-5) the offset from the 553 * start of the Data-Out Buffer to the first byte of data that 554 * was not equal shall be reported in the INFORMATION field. 555 */ 556 cmd->sense_info = miscmp_off; 557 goto out; 558 } else if (ret) 559 goto out; 560 561 if (sg_alloc_table(&write_tbl, cmd->t_data_nents, GFP_KERNEL) < 0) { 562 pr_err("Unable to allocate compare_and_write sg\n"); 563 ret = TCM_OUT_OF_RESOURCES; 564 goto out; 565 } 566 write_sg = write_tbl.sgl; 567 568 i = 0; 569 len = compare_len; 570 sg_miter_start(&m, cmd->t_data_sg, cmd->t_data_nents, SG_MITER_TO_SG); 571 /* 572 * Currently assumes NoLB=1 and SGLs are PAGE_SIZE.. 573 */ 574 while (len) { 575 sg_miter_next(&m); 576 577 if (block_size < PAGE_SIZE) { 578 sg_set_page(&write_sg[i], m.page, block_size, 579 m.piter.sg->offset + block_size); 580 } else { 581 sg_miter_next(&m); 582 sg_set_page(&write_sg[i], m.page, block_size, 583 m.piter.sg->offset); 584 } 585 len -= block_size; 586 i++; 587 } 588 sg_miter_stop(&m); 589 /* 590 * Save the original SGL + nents values before updating to new 591 * assignments, to be released in transport_free_pages() -> 592 * transport_reset_sgl_orig() 593 */ 594 cmd->t_data_sg_orig = cmd->t_data_sg; 595 cmd->t_data_sg = write_sg; 596 cmd->t_data_nents_orig = cmd->t_data_nents; 597 cmd->t_data_nents = 1; 598 599 cmd->sam_task_attr = TCM_HEAD_TAG; 600 cmd->transport_complete_callback = compare_and_write_post; 601 /* 602 * Now reset ->execute_cmd() to the normal sbc_execute_rw() handler 603 * for submitting the adjusted SGL to write instance user-data. 604 */ 605 cmd->execute_cmd = sbc_execute_rw; 606 607 spin_lock_irq(&cmd->t_state_lock); 608 cmd->t_state = TRANSPORT_PROCESSING; 609 cmd->transport_state |= CMD_T_ACTIVE | CMD_T_SENT; 610 spin_unlock_irq(&cmd->t_state_lock); 611 612 __target_execute_cmd(cmd, false); 613 614 return ret; 615 616 out: 617 /* 618 * In the MISCOMPARE or failure case, unlock ->caw_sem obtained in 619 * sbc_compare_and_write() before the original READ I/O submission. 620 */ 621 up(&dev->caw_sem); 622 sg_free_table(&write_tbl); 623 return ret; 624 } 625 626 static sense_reason_t 627 sbc_compare_and_write(struct se_cmd *cmd) 628 { 629 struct sbc_ops *ops = cmd->protocol_data; 630 struct se_device *dev = cmd->se_dev; 631 sense_reason_t ret; 632 int rc; 633 /* 634 * Submit the READ first for COMPARE_AND_WRITE to perform the 635 * comparision using SGLs at cmd->t_bidi_data_sg.. 636 */ 637 rc = down_interruptible(&dev->caw_sem); 638 if (rc != 0) { 639 cmd->transport_complete_callback = NULL; 640 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 641 } 642 /* 643 * Reset cmd->data_length to individual block_size in order to not 644 * confuse backend drivers that depend on this value matching the 645 * size of the I/O being submitted. 646 */ 647 cmd->data_length = cmd->t_task_nolb * dev->dev_attrib.block_size; 648 649 ret = ops->execute_rw(cmd, cmd->t_bidi_data_sg, cmd->t_bidi_data_nents, 650 DMA_FROM_DEVICE); 651 if (ret) { 652 cmd->transport_complete_callback = NULL; 653 up(&dev->caw_sem); 654 return ret; 655 } 656 /* 657 * Unlock of dev->caw_sem to occur in compare_and_write_callback() 658 * upon MISCOMPARE, or in compare_and_write_done() upon completion 659 * of WRITE instance user-data. 660 */ 661 return TCM_NO_SENSE; 662 } 663 664 static int 665 sbc_set_prot_op_checks(u8 protect, bool fabric_prot, enum target_prot_type prot_type, 666 bool is_write, struct se_cmd *cmd) 667 { 668 if (is_write) { 669 cmd->prot_op = fabric_prot ? TARGET_PROT_DOUT_STRIP : 670 protect ? TARGET_PROT_DOUT_PASS : 671 TARGET_PROT_DOUT_INSERT; 672 switch (protect) { 673 case 0x0: 674 case 0x3: 675 cmd->prot_checks = 0; 676 break; 677 case 0x1: 678 case 0x5: 679 cmd->prot_checks = TARGET_DIF_CHECK_GUARD; 680 if (prot_type == TARGET_DIF_TYPE1_PROT) 681 cmd->prot_checks |= TARGET_DIF_CHECK_REFTAG; 682 break; 683 case 0x2: 684 if (prot_type == TARGET_DIF_TYPE1_PROT) 685 cmd->prot_checks = TARGET_DIF_CHECK_REFTAG; 686 break; 687 case 0x4: 688 cmd->prot_checks = TARGET_DIF_CHECK_GUARD; 689 break; 690 default: 691 pr_err("Unsupported protect field %d\n", protect); 692 return -EINVAL; 693 } 694 } else { 695 cmd->prot_op = fabric_prot ? TARGET_PROT_DIN_INSERT : 696 protect ? TARGET_PROT_DIN_PASS : 697 TARGET_PROT_DIN_STRIP; 698 switch (protect) { 699 case 0x0: 700 case 0x1: 701 case 0x5: 702 cmd->prot_checks = TARGET_DIF_CHECK_GUARD; 703 if (prot_type == TARGET_DIF_TYPE1_PROT) 704 cmd->prot_checks |= TARGET_DIF_CHECK_REFTAG; 705 break; 706 case 0x2: 707 if (prot_type == TARGET_DIF_TYPE1_PROT) 708 cmd->prot_checks = TARGET_DIF_CHECK_REFTAG; 709 break; 710 case 0x3: 711 cmd->prot_checks = 0; 712 break; 713 case 0x4: 714 cmd->prot_checks = TARGET_DIF_CHECK_GUARD; 715 break; 716 default: 717 pr_err("Unsupported protect field %d\n", protect); 718 return -EINVAL; 719 } 720 } 721 722 return 0; 723 } 724 725 static sense_reason_t 726 sbc_check_prot(struct se_device *dev, struct se_cmd *cmd, unsigned char protect, 727 u32 sectors, bool is_write) 728 { 729 int sp_ops = cmd->se_sess->sup_prot_ops; 730 int pi_prot_type = dev->dev_attrib.pi_prot_type; 731 bool fabric_prot = false; 732 733 if (!cmd->t_prot_sg || !cmd->t_prot_nents) { 734 if (unlikely(protect && 735 !dev->dev_attrib.pi_prot_type && !cmd->se_sess->sess_prot_type)) { 736 pr_err("CDB contains protect bit, but device + fabric does" 737 " not advertise PROTECT=1 feature bit\n"); 738 return TCM_INVALID_CDB_FIELD; 739 } 740 if (cmd->prot_pto) 741 return TCM_NO_SENSE; 742 } 743 744 switch (dev->dev_attrib.pi_prot_type) { 745 case TARGET_DIF_TYPE3_PROT: 746 cmd->reftag_seed = 0xffffffff; 747 break; 748 case TARGET_DIF_TYPE2_PROT: 749 if (protect) 750 return TCM_INVALID_CDB_FIELD; 751 752 cmd->reftag_seed = cmd->t_task_lba; 753 break; 754 case TARGET_DIF_TYPE1_PROT: 755 cmd->reftag_seed = cmd->t_task_lba; 756 break; 757 case TARGET_DIF_TYPE0_PROT: 758 /* 759 * See if the fabric supports T10-PI, and the session has been 760 * configured to allow export PROTECT=1 feature bit with backend 761 * devices that don't support T10-PI. 762 */ 763 fabric_prot = is_write ? 764 !!(sp_ops & (TARGET_PROT_DOUT_PASS | TARGET_PROT_DOUT_STRIP)) : 765 !!(sp_ops & (TARGET_PROT_DIN_PASS | TARGET_PROT_DIN_INSERT)); 766 767 if (fabric_prot && cmd->se_sess->sess_prot_type) { 768 pi_prot_type = cmd->se_sess->sess_prot_type; 769 break; 770 } 771 if (!protect) 772 return TCM_NO_SENSE; 773 fallthrough; 774 default: 775 pr_err("Unable to determine pi_prot_type for CDB: 0x%02x " 776 "PROTECT: 0x%02x\n", cmd->t_task_cdb[0], protect); 777 return TCM_INVALID_CDB_FIELD; 778 } 779 780 if (sbc_set_prot_op_checks(protect, fabric_prot, pi_prot_type, is_write, cmd)) 781 return TCM_INVALID_CDB_FIELD; 782 783 cmd->prot_type = pi_prot_type; 784 cmd->prot_length = dev->prot_length * sectors; 785 786 /** 787 * In case protection information exists over the wire 788 * we modify command data length to describe pure data. 789 * The actual transfer length is data length + protection 790 * length 791 **/ 792 if (protect) 793 cmd->data_length = sectors * dev->dev_attrib.block_size; 794 795 pr_debug("%s: prot_type=%d, data_length=%d, prot_length=%d " 796 "prot_op=%d prot_checks=%d\n", 797 __func__, cmd->prot_type, cmd->data_length, cmd->prot_length, 798 cmd->prot_op, cmd->prot_checks); 799 800 return TCM_NO_SENSE; 801 } 802 803 static int 804 sbc_check_dpofua(struct se_device *dev, struct se_cmd *cmd, unsigned char *cdb) 805 { 806 if (cdb[1] & 0x10) { 807 /* see explanation in spc_emulate_modesense */ 808 if (!target_check_fua(dev)) { 809 pr_err("Got CDB: 0x%02x with DPO bit set, but device" 810 " does not advertise support for DPO\n", cdb[0]); 811 return -EINVAL; 812 } 813 } 814 if (cdb[1] & 0x8) { 815 if (!target_check_fua(dev)) { 816 pr_err("Got CDB: 0x%02x with FUA bit set, but device" 817 " does not advertise support for FUA write\n", 818 cdb[0]); 819 return -EINVAL; 820 } 821 cmd->se_cmd_flags |= SCF_FUA; 822 } 823 return 0; 824 } 825 826 sense_reason_t 827 sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops) 828 { 829 struct se_device *dev = cmd->se_dev; 830 unsigned char *cdb = cmd->t_task_cdb; 831 unsigned int size; 832 u32 sectors = 0; 833 sense_reason_t ret; 834 835 cmd->protocol_data = ops; 836 837 switch (cdb[0]) { 838 case READ_6: 839 sectors = transport_get_sectors_6(cdb); 840 cmd->t_task_lba = transport_lba_21(cdb); 841 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 842 cmd->execute_cmd = sbc_execute_rw; 843 break; 844 case READ_10: 845 sectors = transport_get_sectors_10(cdb); 846 cmd->t_task_lba = transport_lba_32(cdb); 847 848 if (sbc_check_dpofua(dev, cmd, cdb)) 849 return TCM_INVALID_CDB_FIELD; 850 851 ret = sbc_check_prot(dev, cmd, cdb[1] >> 5, sectors, false); 852 if (ret) 853 return ret; 854 855 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 856 cmd->execute_cmd = sbc_execute_rw; 857 break; 858 case READ_12: 859 sectors = transport_get_sectors_12(cdb); 860 cmd->t_task_lba = transport_lba_32(cdb); 861 862 if (sbc_check_dpofua(dev, cmd, cdb)) 863 return TCM_INVALID_CDB_FIELD; 864 865 ret = sbc_check_prot(dev, cmd, cdb[1] >> 5, sectors, false); 866 if (ret) 867 return ret; 868 869 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 870 cmd->execute_cmd = sbc_execute_rw; 871 break; 872 case READ_16: 873 sectors = transport_get_sectors_16(cdb); 874 cmd->t_task_lba = transport_lba_64(cdb); 875 876 if (sbc_check_dpofua(dev, cmd, cdb)) 877 return TCM_INVALID_CDB_FIELD; 878 879 ret = sbc_check_prot(dev, cmd, cdb[1] >> 5, sectors, false); 880 if (ret) 881 return ret; 882 883 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 884 cmd->execute_cmd = sbc_execute_rw; 885 break; 886 case WRITE_6: 887 sectors = transport_get_sectors_6(cdb); 888 cmd->t_task_lba = transport_lba_21(cdb); 889 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 890 cmd->execute_cmd = sbc_execute_rw; 891 break; 892 case WRITE_10: 893 case WRITE_VERIFY: 894 sectors = transport_get_sectors_10(cdb); 895 cmd->t_task_lba = transport_lba_32(cdb); 896 897 if (sbc_check_dpofua(dev, cmd, cdb)) 898 return TCM_INVALID_CDB_FIELD; 899 900 ret = sbc_check_prot(dev, cmd, cdb[1] >> 5, sectors, true); 901 if (ret) 902 return ret; 903 904 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 905 cmd->execute_cmd = sbc_execute_rw; 906 break; 907 case WRITE_12: 908 sectors = transport_get_sectors_12(cdb); 909 cmd->t_task_lba = transport_lba_32(cdb); 910 911 if (sbc_check_dpofua(dev, cmd, cdb)) 912 return TCM_INVALID_CDB_FIELD; 913 914 ret = sbc_check_prot(dev, cmd, cdb[1] >> 5, sectors, true); 915 if (ret) 916 return ret; 917 918 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 919 cmd->execute_cmd = sbc_execute_rw; 920 break; 921 case WRITE_16: 922 case WRITE_VERIFY_16: 923 sectors = transport_get_sectors_16(cdb); 924 cmd->t_task_lba = transport_lba_64(cdb); 925 926 if (sbc_check_dpofua(dev, cmd, cdb)) 927 return TCM_INVALID_CDB_FIELD; 928 929 ret = sbc_check_prot(dev, cmd, cdb[1] >> 5, sectors, true); 930 if (ret) 931 return ret; 932 933 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 934 cmd->execute_cmd = sbc_execute_rw; 935 break; 936 case XDWRITEREAD_10: 937 if (cmd->data_direction != DMA_TO_DEVICE || 938 !(cmd->se_cmd_flags & SCF_BIDI)) 939 return TCM_INVALID_CDB_FIELD; 940 sectors = transport_get_sectors_10(cdb); 941 942 if (sbc_check_dpofua(dev, cmd, cdb)) 943 return TCM_INVALID_CDB_FIELD; 944 945 cmd->t_task_lba = transport_lba_32(cdb); 946 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 947 948 /* 949 * Setup BIDI XOR callback to be run after I/O completion. 950 */ 951 cmd->execute_cmd = sbc_execute_rw; 952 cmd->transport_complete_callback = &xdreadwrite_callback; 953 break; 954 case VARIABLE_LENGTH_CMD: 955 { 956 u16 service_action = get_unaligned_be16(&cdb[8]); 957 switch (service_action) { 958 case XDWRITEREAD_32: 959 sectors = transport_get_sectors_32(cdb); 960 961 if (sbc_check_dpofua(dev, cmd, cdb)) 962 return TCM_INVALID_CDB_FIELD; 963 /* 964 * Use WRITE_32 and READ_32 opcodes for the emulated 965 * XDWRITE_READ_32 logic. 966 */ 967 cmd->t_task_lba = transport_lba_64_ext(cdb); 968 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 969 970 /* 971 * Setup BIDI XOR callback to be run during after I/O 972 * completion. 973 */ 974 cmd->execute_cmd = sbc_execute_rw; 975 cmd->transport_complete_callback = &xdreadwrite_callback; 976 break; 977 case WRITE_SAME_32: 978 sectors = transport_get_sectors_32(cdb); 979 if (!sectors) { 980 pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not" 981 " supported\n"); 982 return TCM_INVALID_CDB_FIELD; 983 } 984 985 size = sbc_get_size(cmd, 1); 986 cmd->t_task_lba = get_unaligned_be64(&cdb[12]); 987 988 ret = sbc_setup_write_same(cmd, cdb[10], ops); 989 if (ret) 990 return ret; 991 break; 992 default: 993 pr_err("VARIABLE_LENGTH_CMD service action" 994 " 0x%04x not supported\n", service_action); 995 return TCM_UNSUPPORTED_SCSI_OPCODE; 996 } 997 break; 998 } 999 case COMPARE_AND_WRITE: 1000 if (!dev->dev_attrib.emulate_caw) { 1001 pr_err_ratelimited("se_device %s/%s (vpd_unit_serial %s) reject COMPARE_AND_WRITE\n", 1002 dev->se_hba->backend->ops->name, 1003 config_item_name(&dev->dev_group.cg_item), 1004 dev->t10_wwn.unit_serial); 1005 return TCM_UNSUPPORTED_SCSI_OPCODE; 1006 } 1007 sectors = cdb[13]; 1008 /* 1009 * Currently enforce COMPARE_AND_WRITE for a single sector 1010 */ 1011 if (sectors > 1) { 1012 pr_err("COMPARE_AND_WRITE contains NoLB: %u greater" 1013 " than 1\n", sectors); 1014 return TCM_INVALID_CDB_FIELD; 1015 } 1016 if (sbc_check_dpofua(dev, cmd, cdb)) 1017 return TCM_INVALID_CDB_FIELD; 1018 1019 /* 1020 * Double size because we have two buffers, note that 1021 * zero is not an error.. 1022 */ 1023 size = 2 * sbc_get_size(cmd, sectors); 1024 cmd->t_task_lba = get_unaligned_be64(&cdb[2]); 1025 cmd->t_task_nolb = sectors; 1026 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB | SCF_COMPARE_AND_WRITE; 1027 cmd->execute_cmd = sbc_compare_and_write; 1028 cmd->transport_complete_callback = compare_and_write_callback; 1029 break; 1030 case READ_CAPACITY: 1031 size = READ_CAP_LEN; 1032 cmd->execute_cmd = sbc_emulate_readcapacity; 1033 break; 1034 case SERVICE_ACTION_IN_16: 1035 switch (cmd->t_task_cdb[1] & 0x1f) { 1036 case SAI_READ_CAPACITY_16: 1037 cmd->execute_cmd = sbc_emulate_readcapacity_16; 1038 break; 1039 case SAI_REPORT_REFERRALS: 1040 cmd->execute_cmd = target_emulate_report_referrals; 1041 break; 1042 default: 1043 pr_err("Unsupported SA: 0x%02x\n", 1044 cmd->t_task_cdb[1] & 0x1f); 1045 return TCM_INVALID_CDB_FIELD; 1046 } 1047 size = get_unaligned_be32(&cdb[10]); 1048 break; 1049 case SYNCHRONIZE_CACHE: 1050 case SYNCHRONIZE_CACHE_16: 1051 if (cdb[0] == SYNCHRONIZE_CACHE) { 1052 sectors = transport_get_sectors_10(cdb); 1053 cmd->t_task_lba = transport_lba_32(cdb); 1054 } else { 1055 sectors = transport_get_sectors_16(cdb); 1056 cmd->t_task_lba = transport_lba_64(cdb); 1057 } 1058 if (ops->execute_sync_cache) { 1059 cmd->execute_cmd = ops->execute_sync_cache; 1060 goto check_lba; 1061 } 1062 size = 0; 1063 cmd->execute_cmd = sbc_emulate_noop; 1064 break; 1065 case UNMAP: 1066 if (!ops->execute_unmap) 1067 return TCM_UNSUPPORTED_SCSI_OPCODE; 1068 1069 if (!dev->dev_attrib.emulate_tpu) { 1070 pr_err("Got UNMAP, but backend device has" 1071 " emulate_tpu disabled\n"); 1072 return TCM_UNSUPPORTED_SCSI_OPCODE; 1073 } 1074 size = get_unaligned_be16(&cdb[7]); 1075 cmd->execute_cmd = sbc_execute_unmap; 1076 break; 1077 case WRITE_SAME_16: 1078 sectors = transport_get_sectors_16(cdb); 1079 if (!sectors) { 1080 pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n"); 1081 return TCM_INVALID_CDB_FIELD; 1082 } 1083 1084 size = sbc_get_size(cmd, 1); 1085 cmd->t_task_lba = get_unaligned_be64(&cdb[2]); 1086 1087 ret = sbc_setup_write_same(cmd, cdb[1], ops); 1088 if (ret) 1089 return ret; 1090 break; 1091 case WRITE_SAME: 1092 sectors = transport_get_sectors_10(cdb); 1093 if (!sectors) { 1094 pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n"); 1095 return TCM_INVALID_CDB_FIELD; 1096 } 1097 1098 size = sbc_get_size(cmd, 1); 1099 cmd->t_task_lba = get_unaligned_be32(&cdb[2]); 1100 1101 /* 1102 * Follow sbcr26 with WRITE_SAME (10) and check for the existence 1103 * of byte 1 bit 3 UNMAP instead of original reserved field 1104 */ 1105 ret = sbc_setup_write_same(cmd, cdb[1], ops); 1106 if (ret) 1107 return ret; 1108 break; 1109 case VERIFY: 1110 case VERIFY_16: 1111 size = 0; 1112 if (cdb[0] == VERIFY) { 1113 sectors = transport_get_sectors_10(cdb); 1114 cmd->t_task_lba = transport_lba_32(cdb); 1115 } else { 1116 sectors = transport_get_sectors_16(cdb); 1117 cmd->t_task_lba = transport_lba_64(cdb); 1118 } 1119 cmd->execute_cmd = sbc_emulate_noop; 1120 goto check_lba; 1121 case REZERO_UNIT: 1122 case SEEK_6: 1123 case SEEK_10: 1124 /* 1125 * There are still clients out there which use these old SCSI-2 1126 * commands. This mainly happens when running VMs with legacy 1127 * guest systems, connected via SCSI command pass-through to 1128 * iSCSI targets. Make them happy and return status GOOD. 1129 */ 1130 size = 0; 1131 cmd->execute_cmd = sbc_emulate_noop; 1132 break; 1133 case START_STOP: 1134 size = 0; 1135 cmd->execute_cmd = sbc_emulate_startstop; 1136 break; 1137 default: 1138 ret = spc_parse_cdb(cmd, &size); 1139 if (ret) 1140 return ret; 1141 } 1142 1143 /* reject any command that we don't have a handler for */ 1144 if (!cmd->execute_cmd) 1145 return TCM_UNSUPPORTED_SCSI_OPCODE; 1146 1147 if (cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) { 1148 unsigned long long end_lba; 1149 check_lba: 1150 end_lba = dev->transport->get_blocks(dev) + 1; 1151 if (((cmd->t_task_lba + sectors) < cmd->t_task_lba) || 1152 ((cmd->t_task_lba + sectors) > end_lba)) { 1153 pr_err("cmd exceeds last lba %llu " 1154 "(lba %llu, sectors %u)\n", 1155 end_lba, cmd->t_task_lba, sectors); 1156 return TCM_ADDRESS_OUT_OF_RANGE; 1157 } 1158 1159 if (!(cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE)) 1160 size = sbc_get_size(cmd, sectors); 1161 } 1162 1163 return target_cmd_size_check(cmd, size); 1164 } 1165 EXPORT_SYMBOL(sbc_parse_cdb); 1166 1167 u32 sbc_get_device_type(struct se_device *dev) 1168 { 1169 return TYPE_DISK; 1170 } 1171 EXPORT_SYMBOL(sbc_get_device_type); 1172 1173 static sense_reason_t 1174 sbc_execute_unmap(struct se_cmd *cmd) 1175 { 1176 struct sbc_ops *ops = cmd->protocol_data; 1177 struct se_device *dev = cmd->se_dev; 1178 unsigned char *buf, *ptr = NULL; 1179 sector_t lba; 1180 int size; 1181 u32 range; 1182 sense_reason_t ret = 0; 1183 int dl, bd_dl; 1184 1185 /* We never set ANC_SUP */ 1186 if (cmd->t_task_cdb[1]) 1187 return TCM_INVALID_CDB_FIELD; 1188 1189 if (cmd->data_length == 0) { 1190 target_complete_cmd(cmd, SAM_STAT_GOOD); 1191 return 0; 1192 } 1193 1194 if (cmd->data_length < 8) { 1195 pr_warn("UNMAP parameter list length %u too small\n", 1196 cmd->data_length); 1197 return TCM_PARAMETER_LIST_LENGTH_ERROR; 1198 } 1199 1200 buf = transport_kmap_data_sg(cmd); 1201 if (!buf) 1202 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 1203 1204 dl = get_unaligned_be16(&buf[0]); 1205 bd_dl = get_unaligned_be16(&buf[2]); 1206 1207 size = cmd->data_length - 8; 1208 if (bd_dl > size) 1209 pr_warn("UNMAP parameter list length %u too small, ignoring bd_dl %u\n", 1210 cmd->data_length, bd_dl); 1211 else 1212 size = bd_dl; 1213 1214 if (size / 16 > dev->dev_attrib.max_unmap_block_desc_count) { 1215 ret = TCM_INVALID_PARAMETER_LIST; 1216 goto err; 1217 } 1218 1219 /* First UNMAP block descriptor starts at 8 byte offset */ 1220 ptr = &buf[8]; 1221 pr_debug("UNMAP: Sub: %s Using dl: %u bd_dl: %u size: %u" 1222 " ptr: %p\n", dev->transport->name, dl, bd_dl, size, ptr); 1223 1224 while (size >= 16) { 1225 lba = get_unaligned_be64(&ptr[0]); 1226 range = get_unaligned_be32(&ptr[8]); 1227 pr_debug("UNMAP: Using lba: %llu and range: %u\n", 1228 (unsigned long long)lba, range); 1229 1230 if (range > dev->dev_attrib.max_unmap_lba_count) { 1231 ret = TCM_INVALID_PARAMETER_LIST; 1232 goto err; 1233 } 1234 1235 if (lba + range > dev->transport->get_blocks(dev) + 1) { 1236 ret = TCM_ADDRESS_OUT_OF_RANGE; 1237 goto err; 1238 } 1239 1240 if (range) { 1241 ret = ops->execute_unmap(cmd, lba, range); 1242 if (ret) 1243 goto err; 1244 } 1245 1246 ptr += 16; 1247 size -= 16; 1248 } 1249 1250 err: 1251 transport_kunmap_data_sg(cmd); 1252 if (!ret) 1253 target_complete_cmd(cmd, SAM_STAT_GOOD); 1254 return ret; 1255 } 1256 1257 void 1258 sbc_dif_generate(struct se_cmd *cmd) 1259 { 1260 struct se_device *dev = cmd->se_dev; 1261 struct t10_pi_tuple *sdt; 1262 struct scatterlist *dsg = cmd->t_data_sg, *psg; 1263 sector_t sector = cmd->t_task_lba; 1264 void *daddr, *paddr; 1265 int i, j, offset = 0; 1266 unsigned int block_size = dev->dev_attrib.block_size; 1267 1268 for_each_sg(cmd->t_prot_sg, psg, cmd->t_prot_nents, i) { 1269 paddr = kmap_atomic(sg_page(psg)) + psg->offset; 1270 daddr = kmap_atomic(sg_page(dsg)) + dsg->offset; 1271 1272 for (j = 0; j < psg->length; 1273 j += sizeof(*sdt)) { 1274 __u16 crc; 1275 unsigned int avail; 1276 1277 if (offset >= dsg->length) { 1278 offset -= dsg->length; 1279 kunmap_atomic(daddr - dsg->offset); 1280 dsg = sg_next(dsg); 1281 if (!dsg) { 1282 kunmap_atomic(paddr - psg->offset); 1283 return; 1284 } 1285 daddr = kmap_atomic(sg_page(dsg)) + dsg->offset; 1286 } 1287 1288 sdt = paddr + j; 1289 avail = min(block_size, dsg->length - offset); 1290 crc = crc_t10dif(daddr + offset, avail); 1291 if (avail < block_size) { 1292 kunmap_atomic(daddr - dsg->offset); 1293 dsg = sg_next(dsg); 1294 if (!dsg) { 1295 kunmap_atomic(paddr - psg->offset); 1296 return; 1297 } 1298 daddr = kmap_atomic(sg_page(dsg)) + dsg->offset; 1299 offset = block_size - avail; 1300 crc = crc_t10dif_update(crc, daddr, offset); 1301 } else { 1302 offset += block_size; 1303 } 1304 1305 sdt->guard_tag = cpu_to_be16(crc); 1306 if (cmd->prot_type == TARGET_DIF_TYPE1_PROT) 1307 sdt->ref_tag = cpu_to_be32(sector & 0xffffffff); 1308 sdt->app_tag = 0; 1309 1310 pr_debug("DIF %s INSERT sector: %llu guard_tag: 0x%04x" 1311 " app_tag: 0x%04x ref_tag: %u\n", 1312 (cmd->data_direction == DMA_TO_DEVICE) ? 1313 "WRITE" : "READ", (unsigned long long)sector, 1314 sdt->guard_tag, sdt->app_tag, 1315 be32_to_cpu(sdt->ref_tag)); 1316 1317 sector++; 1318 } 1319 1320 kunmap_atomic(daddr - dsg->offset); 1321 kunmap_atomic(paddr - psg->offset); 1322 } 1323 } 1324 1325 static sense_reason_t 1326 sbc_dif_v1_verify(struct se_cmd *cmd, struct t10_pi_tuple *sdt, 1327 __u16 crc, sector_t sector, unsigned int ei_lba) 1328 { 1329 __be16 csum; 1330 1331 if (!(cmd->prot_checks & TARGET_DIF_CHECK_GUARD)) 1332 goto check_ref; 1333 1334 csum = cpu_to_be16(crc); 1335 1336 if (sdt->guard_tag != csum) { 1337 pr_err("DIFv1 checksum failed on sector %llu guard tag 0x%04x" 1338 " csum 0x%04x\n", (unsigned long long)sector, 1339 be16_to_cpu(sdt->guard_tag), be16_to_cpu(csum)); 1340 return TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED; 1341 } 1342 1343 check_ref: 1344 if (!(cmd->prot_checks & TARGET_DIF_CHECK_REFTAG)) 1345 return 0; 1346 1347 if (cmd->prot_type == TARGET_DIF_TYPE1_PROT && 1348 be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) { 1349 pr_err("DIFv1 Type 1 reference failed on sector: %llu tag: 0x%08x" 1350 " sector MSB: 0x%08x\n", (unsigned long long)sector, 1351 be32_to_cpu(sdt->ref_tag), (u32)(sector & 0xffffffff)); 1352 return TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED; 1353 } 1354 1355 if (cmd->prot_type == TARGET_DIF_TYPE2_PROT && 1356 be32_to_cpu(sdt->ref_tag) != ei_lba) { 1357 pr_err("DIFv1 Type 2 reference failed on sector: %llu tag: 0x%08x" 1358 " ei_lba: 0x%08x\n", (unsigned long long)sector, 1359 be32_to_cpu(sdt->ref_tag), ei_lba); 1360 return TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED; 1361 } 1362 1363 return 0; 1364 } 1365 1366 void sbc_dif_copy_prot(struct se_cmd *cmd, unsigned int sectors, bool read, 1367 struct scatterlist *sg, int sg_off) 1368 { 1369 struct se_device *dev = cmd->se_dev; 1370 struct scatterlist *psg; 1371 void *paddr, *addr; 1372 unsigned int i, len, left; 1373 unsigned int offset = sg_off; 1374 1375 if (!sg) 1376 return; 1377 1378 left = sectors * dev->prot_length; 1379 1380 for_each_sg(cmd->t_prot_sg, psg, cmd->t_prot_nents, i) { 1381 unsigned int psg_len, copied = 0; 1382 1383 paddr = kmap_atomic(sg_page(psg)) + psg->offset; 1384 psg_len = min(left, psg->length); 1385 while (psg_len) { 1386 len = min(psg_len, sg->length - offset); 1387 addr = kmap_atomic(sg_page(sg)) + sg->offset + offset; 1388 1389 if (read) 1390 memcpy(paddr + copied, addr, len); 1391 else 1392 memcpy(addr, paddr + copied, len); 1393 1394 left -= len; 1395 offset += len; 1396 copied += len; 1397 psg_len -= len; 1398 1399 kunmap_atomic(addr - sg->offset - offset); 1400 1401 if (offset >= sg->length) { 1402 sg = sg_next(sg); 1403 offset = 0; 1404 } 1405 } 1406 kunmap_atomic(paddr - psg->offset); 1407 } 1408 } 1409 EXPORT_SYMBOL(sbc_dif_copy_prot); 1410 1411 sense_reason_t 1412 sbc_dif_verify(struct se_cmd *cmd, sector_t start, unsigned int sectors, 1413 unsigned int ei_lba, struct scatterlist *psg, int psg_off) 1414 { 1415 struct se_device *dev = cmd->se_dev; 1416 struct t10_pi_tuple *sdt; 1417 struct scatterlist *dsg = cmd->t_data_sg; 1418 sector_t sector = start; 1419 void *daddr, *paddr; 1420 int i; 1421 sense_reason_t rc; 1422 int dsg_off = 0; 1423 unsigned int block_size = dev->dev_attrib.block_size; 1424 1425 for (; psg && sector < start + sectors; psg = sg_next(psg)) { 1426 paddr = kmap_atomic(sg_page(psg)) + psg->offset; 1427 daddr = kmap_atomic(sg_page(dsg)) + dsg->offset; 1428 1429 for (i = psg_off; i < psg->length && 1430 sector < start + sectors; 1431 i += sizeof(*sdt)) { 1432 __u16 crc; 1433 unsigned int avail; 1434 1435 if (dsg_off >= dsg->length) { 1436 dsg_off -= dsg->length; 1437 kunmap_atomic(daddr - dsg->offset); 1438 dsg = sg_next(dsg); 1439 if (!dsg) { 1440 kunmap_atomic(paddr - psg->offset); 1441 return 0; 1442 } 1443 daddr = kmap_atomic(sg_page(dsg)) + dsg->offset; 1444 } 1445 1446 sdt = paddr + i; 1447 1448 pr_debug("DIF READ sector: %llu guard_tag: 0x%04x" 1449 " app_tag: 0x%04x ref_tag: %u\n", 1450 (unsigned long long)sector, sdt->guard_tag, 1451 sdt->app_tag, be32_to_cpu(sdt->ref_tag)); 1452 1453 if (sdt->app_tag == T10_PI_APP_ESCAPE) { 1454 dsg_off += block_size; 1455 goto next; 1456 } 1457 1458 avail = min(block_size, dsg->length - dsg_off); 1459 crc = crc_t10dif(daddr + dsg_off, avail); 1460 if (avail < block_size) { 1461 kunmap_atomic(daddr - dsg->offset); 1462 dsg = sg_next(dsg); 1463 if (!dsg) { 1464 kunmap_atomic(paddr - psg->offset); 1465 return 0; 1466 } 1467 daddr = kmap_atomic(sg_page(dsg)) + dsg->offset; 1468 dsg_off = block_size - avail; 1469 crc = crc_t10dif_update(crc, daddr, dsg_off); 1470 } else { 1471 dsg_off += block_size; 1472 } 1473 1474 rc = sbc_dif_v1_verify(cmd, sdt, crc, sector, ei_lba); 1475 if (rc) { 1476 kunmap_atomic(daddr - dsg->offset); 1477 kunmap_atomic(paddr - psg->offset); 1478 cmd->sense_info = sector; 1479 return rc; 1480 } 1481 next: 1482 sector++; 1483 ei_lba++; 1484 } 1485 1486 psg_off = 0; 1487 kunmap_atomic(daddr - dsg->offset); 1488 kunmap_atomic(paddr - psg->offset); 1489 } 1490 1491 return 0; 1492 } 1493 EXPORT_SYMBOL(sbc_dif_verify); 1494