1 /* 2 * SCSI Block Commands (SBC) parsing and emulation. 3 * 4 * (c) Copyright 2002-2013 Datera, Inc. 5 * 6 * Nicholas A. Bellinger <nab@kernel.org> 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License as published by 10 * the Free Software Foundation; either version 2 of the License, or 11 * (at your option) any later version. 12 * 13 * This program is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * GNU General Public License for more details. 17 * 18 * You should have received a copy of the GNU General Public License 19 * along with this program; if not, write to the Free Software 20 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 21 */ 22 23 #include <linux/kernel.h> 24 #include <linux/module.h> 25 #include <linux/ratelimit.h> 26 #include <linux/crc-t10dif.h> 27 #include <asm/unaligned.h> 28 #include <scsi/scsi.h> 29 #include <scsi/scsi_tcq.h> 30 31 #include <target/target_core_base.h> 32 #include <target/target_core_backend.h> 33 #include <target/target_core_fabric.h> 34 35 #include "target_core_internal.h" 36 #include "target_core_ua.h" 37 #include "target_core_alua.h" 38 39 static sense_reason_t 40 sbc_emulate_readcapacity(struct se_cmd *cmd) 41 { 42 struct se_device *dev = cmd->se_dev; 43 unsigned char *cdb = cmd->t_task_cdb; 44 unsigned long long blocks_long = dev->transport->get_blocks(dev); 45 unsigned char *rbuf; 46 unsigned char buf[8]; 47 u32 blocks; 48 49 /* 50 * SBC-2 says: 51 * If the PMI bit is set to zero and the LOGICAL BLOCK 52 * ADDRESS field is not set to zero, the device server shall 53 * terminate the command with CHECK CONDITION status with 54 * the sense key set to ILLEGAL REQUEST and the additional 55 * sense code set to INVALID FIELD IN CDB. 56 * 57 * In SBC-3, these fields are obsolete, but some SCSI 58 * compliance tests actually check this, so we might as well 59 * follow SBC-2. 60 */ 61 if (!(cdb[8] & 1) && !!(cdb[2] | cdb[3] | cdb[4] | cdb[5])) 62 return TCM_INVALID_CDB_FIELD; 63 64 if (blocks_long >= 0x00000000ffffffff) 65 blocks = 0xffffffff; 66 else 67 blocks = (u32)blocks_long; 68 69 buf[0] = (blocks >> 24) & 0xff; 70 buf[1] = (blocks >> 16) & 0xff; 71 buf[2] = (blocks >> 8) & 0xff; 72 buf[3] = blocks & 0xff; 73 buf[4] = (dev->dev_attrib.block_size >> 24) & 0xff; 74 buf[5] = (dev->dev_attrib.block_size >> 16) & 0xff; 75 buf[6] = (dev->dev_attrib.block_size >> 8) & 0xff; 76 buf[7] = dev->dev_attrib.block_size & 0xff; 77 78 rbuf = transport_kmap_data_sg(cmd); 79 if (rbuf) { 80 memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length)); 81 transport_kunmap_data_sg(cmd); 82 } 83 84 target_complete_cmd_with_length(cmd, GOOD, 8); 85 return 0; 86 } 87 88 static sense_reason_t 89 sbc_emulate_readcapacity_16(struct se_cmd *cmd) 90 { 91 struct se_device *dev = cmd->se_dev; 92 struct se_session *sess = cmd->se_sess; 93 unsigned char *rbuf; 94 unsigned char buf[32]; 95 unsigned long long blocks = dev->transport->get_blocks(dev); 96 97 memset(buf, 0, sizeof(buf)); 98 buf[0] = (blocks >> 56) & 0xff; 99 buf[1] = (blocks >> 48) & 0xff; 100 buf[2] = (blocks >> 40) & 0xff; 101 buf[3] = (blocks >> 32) & 0xff; 102 buf[4] = (blocks >> 24) & 0xff; 103 buf[5] = (blocks >> 16) & 0xff; 104 buf[6] = (blocks >> 8) & 0xff; 105 buf[7] = blocks & 0xff; 106 buf[8] = (dev->dev_attrib.block_size >> 24) & 0xff; 107 buf[9] = (dev->dev_attrib.block_size >> 16) & 0xff; 108 buf[10] = (dev->dev_attrib.block_size >> 8) & 0xff; 109 buf[11] = dev->dev_attrib.block_size & 0xff; 110 /* 111 * Set P_TYPE and PROT_EN bits for DIF support 112 */ 113 if (sess->sup_prot_ops & (TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS)) { 114 if (dev->dev_attrib.pi_prot_type) 115 buf[12] = (dev->dev_attrib.pi_prot_type - 1) << 1 | 0x1; 116 } 117 118 if (dev->transport->get_lbppbe) 119 buf[13] = dev->transport->get_lbppbe(dev) & 0x0f; 120 121 if (dev->transport->get_alignment_offset_lbas) { 122 u16 lalba = dev->transport->get_alignment_offset_lbas(dev); 123 buf[14] = (lalba >> 8) & 0x3f; 124 buf[15] = lalba & 0xff; 125 } 126 127 /* 128 * Set Thin Provisioning Enable bit following sbc3r22 in section 129 * READ CAPACITY (16) byte 14 if emulate_tpu or emulate_tpws is enabled. 130 */ 131 if (dev->dev_attrib.emulate_tpu || dev->dev_attrib.emulate_tpws) 132 buf[14] |= 0x80; 133 134 rbuf = transport_kmap_data_sg(cmd); 135 if (rbuf) { 136 memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length)); 137 transport_kunmap_data_sg(cmd); 138 } 139 140 target_complete_cmd_with_length(cmd, GOOD, 32); 141 return 0; 142 } 143 144 sector_t sbc_get_write_same_sectors(struct se_cmd *cmd) 145 { 146 u32 num_blocks; 147 148 if (cmd->t_task_cdb[0] == WRITE_SAME) 149 num_blocks = get_unaligned_be16(&cmd->t_task_cdb[7]); 150 else if (cmd->t_task_cdb[0] == WRITE_SAME_16) 151 num_blocks = get_unaligned_be32(&cmd->t_task_cdb[10]); 152 else /* WRITE_SAME_32 via VARIABLE_LENGTH_CMD */ 153 num_blocks = get_unaligned_be32(&cmd->t_task_cdb[28]); 154 155 /* 156 * Use the explicit range when non zero is supplied, otherwise calculate 157 * the remaining range based on ->get_blocks() - starting LBA. 158 */ 159 if (num_blocks) 160 return num_blocks; 161 162 return cmd->se_dev->transport->get_blocks(cmd->se_dev) - 163 cmd->t_task_lba + 1; 164 } 165 EXPORT_SYMBOL(sbc_get_write_same_sectors); 166 167 static sense_reason_t 168 sbc_emulate_noop(struct se_cmd *cmd) 169 { 170 target_complete_cmd(cmd, GOOD); 171 return 0; 172 } 173 174 static inline u32 sbc_get_size(struct se_cmd *cmd, u32 sectors) 175 { 176 return cmd->se_dev->dev_attrib.block_size * sectors; 177 } 178 179 static inline u32 transport_get_sectors_6(unsigned char *cdb) 180 { 181 /* 182 * Use 8-bit sector value. SBC-3 says: 183 * 184 * A TRANSFER LENGTH field set to zero specifies that 256 185 * logical blocks shall be written. Any other value 186 * specifies the number of logical blocks that shall be 187 * written. 188 */ 189 return cdb[4] ? : 256; 190 } 191 192 static inline u32 transport_get_sectors_10(unsigned char *cdb) 193 { 194 return (u32)(cdb[7] << 8) + cdb[8]; 195 } 196 197 static inline u32 transport_get_sectors_12(unsigned char *cdb) 198 { 199 return (u32)(cdb[6] << 24) + (cdb[7] << 16) + (cdb[8] << 8) + cdb[9]; 200 } 201 202 static inline u32 transport_get_sectors_16(unsigned char *cdb) 203 { 204 return (u32)(cdb[10] << 24) + (cdb[11] << 16) + 205 (cdb[12] << 8) + cdb[13]; 206 } 207 208 /* 209 * Used for VARIABLE_LENGTH_CDB WRITE_32 and READ_32 variants 210 */ 211 static inline u32 transport_get_sectors_32(unsigned char *cdb) 212 { 213 return (u32)(cdb[28] << 24) + (cdb[29] << 16) + 214 (cdb[30] << 8) + cdb[31]; 215 216 } 217 218 static inline u32 transport_lba_21(unsigned char *cdb) 219 { 220 return ((cdb[1] & 0x1f) << 16) | (cdb[2] << 8) | cdb[3]; 221 } 222 223 static inline u32 transport_lba_32(unsigned char *cdb) 224 { 225 return (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5]; 226 } 227 228 static inline unsigned long long transport_lba_64(unsigned char *cdb) 229 { 230 unsigned int __v1, __v2; 231 232 __v1 = (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5]; 233 __v2 = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9]; 234 235 return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32; 236 } 237 238 /* 239 * For VARIABLE_LENGTH_CDB w/ 32 byte extended CDBs 240 */ 241 static inline unsigned long long transport_lba_64_ext(unsigned char *cdb) 242 { 243 unsigned int __v1, __v2; 244 245 __v1 = (cdb[12] << 24) | (cdb[13] << 16) | (cdb[14] << 8) | cdb[15]; 246 __v2 = (cdb[16] << 24) | (cdb[17] << 16) | (cdb[18] << 8) | cdb[19]; 247 248 return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32; 249 } 250 251 static sense_reason_t 252 sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *ops) 253 { 254 struct se_device *dev = cmd->se_dev; 255 sector_t end_lba = dev->transport->get_blocks(dev) + 1; 256 unsigned int sectors = sbc_get_write_same_sectors(cmd); 257 258 if ((flags[0] & 0x04) || (flags[0] & 0x02)) { 259 pr_err("WRITE_SAME PBDATA and LBDATA" 260 " bits not supported for Block Discard" 261 " Emulation\n"); 262 return TCM_UNSUPPORTED_SCSI_OPCODE; 263 } 264 if (sectors > cmd->se_dev->dev_attrib.max_write_same_len) { 265 pr_warn("WRITE_SAME sectors: %u exceeds max_write_same_len: %u\n", 266 sectors, cmd->se_dev->dev_attrib.max_write_same_len); 267 return TCM_INVALID_CDB_FIELD; 268 } 269 /* 270 * Sanity check for LBA wrap and request past end of device. 271 */ 272 if (((cmd->t_task_lba + sectors) < cmd->t_task_lba) || 273 ((cmd->t_task_lba + sectors) > end_lba)) { 274 pr_err("WRITE_SAME exceeds last lba %llu (lba %llu, sectors %u)\n", 275 (unsigned long long)end_lba, cmd->t_task_lba, sectors); 276 return TCM_ADDRESS_OUT_OF_RANGE; 277 } 278 279 /* We always have ANC_SUP == 0 so setting ANCHOR is always an error */ 280 if (flags[0] & 0x10) { 281 pr_warn("WRITE SAME with ANCHOR not supported\n"); 282 return TCM_INVALID_CDB_FIELD; 283 } 284 /* 285 * Special case for WRITE_SAME w/ UNMAP=1 that ends up getting 286 * translated into block discard requests within backend code. 287 */ 288 if (flags[0] & 0x08) { 289 if (!ops->execute_write_same_unmap) 290 return TCM_UNSUPPORTED_SCSI_OPCODE; 291 292 cmd->execute_cmd = ops->execute_write_same_unmap; 293 return 0; 294 } 295 if (!ops->execute_write_same) 296 return TCM_UNSUPPORTED_SCSI_OPCODE; 297 298 cmd->execute_cmd = ops->execute_write_same; 299 return 0; 300 } 301 302 static sense_reason_t xdreadwrite_callback(struct se_cmd *cmd) 303 { 304 unsigned char *buf, *addr; 305 struct scatterlist *sg; 306 unsigned int offset; 307 sense_reason_t ret = TCM_NO_SENSE; 308 int i, count; 309 /* 310 * From sbc3r22.pdf section 5.48 XDWRITEREAD (10) command 311 * 312 * 1) read the specified logical block(s); 313 * 2) transfer logical blocks from the data-out buffer; 314 * 3) XOR the logical blocks transferred from the data-out buffer with 315 * the logical blocks read, storing the resulting XOR data in a buffer; 316 * 4) if the DISABLE WRITE bit is set to zero, then write the logical 317 * blocks transferred from the data-out buffer; and 318 * 5) transfer the resulting XOR data to the data-in buffer. 319 */ 320 buf = kmalloc(cmd->data_length, GFP_KERNEL); 321 if (!buf) { 322 pr_err("Unable to allocate xor_callback buf\n"); 323 return TCM_OUT_OF_RESOURCES; 324 } 325 /* 326 * Copy the scatterlist WRITE buffer located at cmd->t_data_sg 327 * into the locally allocated *buf 328 */ 329 sg_copy_to_buffer(cmd->t_data_sg, 330 cmd->t_data_nents, 331 buf, 332 cmd->data_length); 333 334 /* 335 * Now perform the XOR against the BIDI read memory located at 336 * cmd->t_mem_bidi_list 337 */ 338 339 offset = 0; 340 for_each_sg(cmd->t_bidi_data_sg, sg, cmd->t_bidi_data_nents, count) { 341 addr = kmap_atomic(sg_page(sg)); 342 if (!addr) { 343 ret = TCM_OUT_OF_RESOURCES; 344 goto out; 345 } 346 347 for (i = 0; i < sg->length; i++) 348 *(addr + sg->offset + i) ^= *(buf + offset + i); 349 350 offset += sg->length; 351 kunmap_atomic(addr); 352 } 353 354 out: 355 kfree(buf); 356 return ret; 357 } 358 359 static sense_reason_t 360 sbc_execute_rw(struct se_cmd *cmd) 361 { 362 return cmd->execute_rw(cmd, cmd->t_data_sg, cmd->t_data_nents, 363 cmd->data_direction); 364 } 365 366 static sense_reason_t compare_and_write_post(struct se_cmd *cmd) 367 { 368 struct se_device *dev = cmd->se_dev; 369 370 /* 371 * Only set SCF_COMPARE_AND_WRITE_POST to force a response fall-through 372 * within target_complete_ok_work() if the command was successfully 373 * sent to the backend driver. 374 */ 375 spin_lock_irq(&cmd->t_state_lock); 376 if ((cmd->transport_state & CMD_T_SENT) && !cmd->scsi_status) 377 cmd->se_cmd_flags |= SCF_COMPARE_AND_WRITE_POST; 378 spin_unlock_irq(&cmd->t_state_lock); 379 380 /* 381 * Unlock ->caw_sem originally obtained during sbc_compare_and_write() 382 * before the original READ I/O submission. 383 */ 384 up(&dev->caw_sem); 385 386 return TCM_NO_SENSE; 387 } 388 389 static sense_reason_t compare_and_write_callback(struct se_cmd *cmd) 390 { 391 struct se_device *dev = cmd->se_dev; 392 struct scatterlist *write_sg = NULL, *sg; 393 unsigned char *buf = NULL, *addr; 394 struct sg_mapping_iter m; 395 unsigned int offset = 0, len; 396 unsigned int nlbas = cmd->t_task_nolb; 397 unsigned int block_size = dev->dev_attrib.block_size; 398 unsigned int compare_len = (nlbas * block_size); 399 sense_reason_t ret = TCM_NO_SENSE; 400 int rc, i; 401 402 /* 403 * Handle early failure in transport_generic_request_failure(), 404 * which will not have taken ->caw_mutex yet.. 405 */ 406 if (!cmd->t_data_sg || !cmd->t_bidi_data_sg) 407 return TCM_NO_SENSE; 408 /* 409 * Immediately exit + release dev->caw_sem if command has already 410 * been failed with a non-zero SCSI status. 411 */ 412 if (cmd->scsi_status) { 413 pr_err("compare_and_write_callback: non zero scsi_status:" 414 " 0x%02x\n", cmd->scsi_status); 415 goto out; 416 } 417 418 buf = kzalloc(cmd->data_length, GFP_KERNEL); 419 if (!buf) { 420 pr_err("Unable to allocate compare_and_write buf\n"); 421 ret = TCM_OUT_OF_RESOURCES; 422 goto out; 423 } 424 425 write_sg = kmalloc(sizeof(struct scatterlist) * cmd->t_data_nents, 426 GFP_KERNEL); 427 if (!write_sg) { 428 pr_err("Unable to allocate compare_and_write sg\n"); 429 ret = TCM_OUT_OF_RESOURCES; 430 goto out; 431 } 432 sg_init_table(write_sg, cmd->t_data_nents); 433 /* 434 * Setup verify and write data payloads from total NumberLBAs. 435 */ 436 rc = sg_copy_to_buffer(cmd->t_data_sg, cmd->t_data_nents, buf, 437 cmd->data_length); 438 if (!rc) { 439 pr_err("sg_copy_to_buffer() failed for compare_and_write\n"); 440 ret = TCM_OUT_OF_RESOURCES; 441 goto out; 442 } 443 /* 444 * Compare against SCSI READ payload against verify payload 445 */ 446 for_each_sg(cmd->t_bidi_data_sg, sg, cmd->t_bidi_data_nents, i) { 447 addr = (unsigned char *)kmap_atomic(sg_page(sg)); 448 if (!addr) { 449 ret = TCM_OUT_OF_RESOURCES; 450 goto out; 451 } 452 453 len = min(sg->length, compare_len); 454 455 if (memcmp(addr, buf + offset, len)) { 456 pr_warn("Detected MISCOMPARE for addr: %p buf: %p\n", 457 addr, buf + offset); 458 kunmap_atomic(addr); 459 goto miscompare; 460 } 461 kunmap_atomic(addr); 462 463 offset += len; 464 compare_len -= len; 465 if (!compare_len) 466 break; 467 } 468 469 i = 0; 470 len = cmd->t_task_nolb * block_size; 471 sg_miter_start(&m, cmd->t_data_sg, cmd->t_data_nents, SG_MITER_TO_SG); 472 /* 473 * Currently assumes NoLB=1 and SGLs are PAGE_SIZE.. 474 */ 475 while (len) { 476 sg_miter_next(&m); 477 478 if (block_size < PAGE_SIZE) { 479 sg_set_page(&write_sg[i], m.page, block_size, 480 block_size); 481 } else { 482 sg_miter_next(&m); 483 sg_set_page(&write_sg[i], m.page, block_size, 484 0); 485 } 486 len -= block_size; 487 i++; 488 } 489 sg_miter_stop(&m); 490 /* 491 * Save the original SGL + nents values before updating to new 492 * assignments, to be released in transport_free_pages() -> 493 * transport_reset_sgl_orig() 494 */ 495 cmd->t_data_sg_orig = cmd->t_data_sg; 496 cmd->t_data_sg = write_sg; 497 cmd->t_data_nents_orig = cmd->t_data_nents; 498 cmd->t_data_nents = 1; 499 500 cmd->sam_task_attr = TCM_HEAD_TAG; 501 cmd->transport_complete_callback = compare_and_write_post; 502 /* 503 * Now reset ->execute_cmd() to the normal sbc_execute_rw() handler 504 * for submitting the adjusted SGL to write instance user-data. 505 */ 506 cmd->execute_cmd = sbc_execute_rw; 507 508 spin_lock_irq(&cmd->t_state_lock); 509 cmd->t_state = TRANSPORT_PROCESSING; 510 cmd->transport_state |= CMD_T_ACTIVE|CMD_T_BUSY|CMD_T_SENT; 511 spin_unlock_irq(&cmd->t_state_lock); 512 513 __target_execute_cmd(cmd); 514 515 kfree(buf); 516 return ret; 517 518 miscompare: 519 pr_warn("Target/%s: Send MISCOMPARE check condition and sense\n", 520 dev->transport->name); 521 ret = TCM_MISCOMPARE_VERIFY; 522 out: 523 /* 524 * In the MISCOMPARE or failure case, unlock ->caw_sem obtained in 525 * sbc_compare_and_write() before the original READ I/O submission. 526 */ 527 up(&dev->caw_sem); 528 kfree(write_sg); 529 kfree(buf); 530 return ret; 531 } 532 533 static sense_reason_t 534 sbc_compare_and_write(struct se_cmd *cmd) 535 { 536 struct se_device *dev = cmd->se_dev; 537 sense_reason_t ret; 538 int rc; 539 /* 540 * Submit the READ first for COMPARE_AND_WRITE to perform the 541 * comparision using SGLs at cmd->t_bidi_data_sg.. 542 */ 543 rc = down_interruptible(&dev->caw_sem); 544 if ((rc != 0) || signal_pending(current)) { 545 cmd->transport_complete_callback = NULL; 546 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 547 } 548 /* 549 * Reset cmd->data_length to individual block_size in order to not 550 * confuse backend drivers that depend on this value matching the 551 * size of the I/O being submitted. 552 */ 553 cmd->data_length = cmd->t_task_nolb * dev->dev_attrib.block_size; 554 555 ret = cmd->execute_rw(cmd, cmd->t_bidi_data_sg, cmd->t_bidi_data_nents, 556 DMA_FROM_DEVICE); 557 if (ret) { 558 cmd->transport_complete_callback = NULL; 559 up(&dev->caw_sem); 560 return ret; 561 } 562 /* 563 * Unlock of dev->caw_sem to occur in compare_and_write_callback() 564 * upon MISCOMPARE, or in compare_and_write_done() upon completion 565 * of WRITE instance user-data. 566 */ 567 return TCM_NO_SENSE; 568 } 569 570 static int 571 sbc_set_prot_op_checks(u8 protect, enum target_prot_type prot_type, 572 bool is_write, struct se_cmd *cmd) 573 { 574 if (is_write) { 575 cmd->prot_op = protect ? TARGET_PROT_DOUT_PASS : 576 TARGET_PROT_DOUT_INSERT; 577 switch (protect) { 578 case 0x0: 579 case 0x3: 580 cmd->prot_checks = 0; 581 break; 582 case 0x1: 583 case 0x5: 584 cmd->prot_checks = TARGET_DIF_CHECK_GUARD; 585 if (prot_type == TARGET_DIF_TYPE1_PROT) 586 cmd->prot_checks |= TARGET_DIF_CHECK_REFTAG; 587 break; 588 case 0x2: 589 if (prot_type == TARGET_DIF_TYPE1_PROT) 590 cmd->prot_checks = TARGET_DIF_CHECK_REFTAG; 591 break; 592 case 0x4: 593 cmd->prot_checks = TARGET_DIF_CHECK_GUARD; 594 break; 595 default: 596 pr_err("Unsupported protect field %d\n", protect); 597 return -EINVAL; 598 } 599 } else { 600 cmd->prot_op = protect ? TARGET_PROT_DIN_PASS : 601 TARGET_PROT_DIN_STRIP; 602 switch (protect) { 603 case 0x0: 604 case 0x1: 605 case 0x5: 606 cmd->prot_checks = TARGET_DIF_CHECK_GUARD; 607 if (prot_type == TARGET_DIF_TYPE1_PROT) 608 cmd->prot_checks |= TARGET_DIF_CHECK_REFTAG; 609 break; 610 case 0x2: 611 if (prot_type == TARGET_DIF_TYPE1_PROT) 612 cmd->prot_checks = TARGET_DIF_CHECK_REFTAG; 613 break; 614 case 0x3: 615 cmd->prot_checks = 0; 616 break; 617 case 0x4: 618 cmd->prot_checks = TARGET_DIF_CHECK_GUARD; 619 break; 620 default: 621 pr_err("Unsupported protect field %d\n", protect); 622 return -EINVAL; 623 } 624 } 625 626 return 0; 627 } 628 629 static bool 630 sbc_check_prot(struct se_device *dev, struct se_cmd *cmd, unsigned char *cdb, 631 u32 sectors, bool is_write) 632 { 633 u8 protect = cdb[1] >> 5; 634 635 if ((!cmd->t_prot_sg || !cmd->t_prot_nents) && cmd->prot_pto) 636 return true; 637 638 switch (dev->dev_attrib.pi_prot_type) { 639 case TARGET_DIF_TYPE3_PROT: 640 cmd->reftag_seed = 0xffffffff; 641 break; 642 case TARGET_DIF_TYPE2_PROT: 643 if (protect) 644 return false; 645 646 cmd->reftag_seed = cmd->t_task_lba; 647 break; 648 case TARGET_DIF_TYPE1_PROT: 649 cmd->reftag_seed = cmd->t_task_lba; 650 break; 651 case TARGET_DIF_TYPE0_PROT: 652 default: 653 return true; 654 } 655 656 if (sbc_set_prot_op_checks(protect, dev->dev_attrib.pi_prot_type, 657 is_write, cmd)) 658 return false; 659 660 cmd->prot_type = dev->dev_attrib.pi_prot_type; 661 cmd->prot_length = dev->prot_length * sectors; 662 663 /** 664 * In case protection information exists over the wire 665 * we modify command data length to describe pure data. 666 * The actual transfer length is data length + protection 667 * length 668 **/ 669 if (protect) 670 cmd->data_length = sectors * dev->dev_attrib.block_size; 671 672 pr_debug("%s: prot_type=%d, data_length=%d, prot_length=%d " 673 "prot_op=%d prot_checks=%d\n", 674 __func__, cmd->prot_type, cmd->data_length, cmd->prot_length, 675 cmd->prot_op, cmd->prot_checks); 676 677 return true; 678 } 679 680 sense_reason_t 681 sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops) 682 { 683 struct se_device *dev = cmd->se_dev; 684 unsigned char *cdb = cmd->t_task_cdb; 685 unsigned int size; 686 u32 sectors = 0; 687 sense_reason_t ret; 688 689 switch (cdb[0]) { 690 case READ_6: 691 sectors = transport_get_sectors_6(cdb); 692 cmd->t_task_lba = transport_lba_21(cdb); 693 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 694 cmd->execute_rw = ops->execute_rw; 695 cmd->execute_cmd = sbc_execute_rw; 696 break; 697 case READ_10: 698 sectors = transport_get_sectors_10(cdb); 699 cmd->t_task_lba = transport_lba_32(cdb); 700 701 if (!sbc_check_prot(dev, cmd, cdb, sectors, false)) 702 return TCM_UNSUPPORTED_SCSI_OPCODE; 703 704 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 705 cmd->execute_rw = ops->execute_rw; 706 cmd->execute_cmd = sbc_execute_rw; 707 break; 708 case READ_12: 709 sectors = transport_get_sectors_12(cdb); 710 cmd->t_task_lba = transport_lba_32(cdb); 711 712 if (!sbc_check_prot(dev, cmd, cdb, sectors, false)) 713 return TCM_UNSUPPORTED_SCSI_OPCODE; 714 715 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 716 cmd->execute_rw = ops->execute_rw; 717 cmd->execute_cmd = sbc_execute_rw; 718 break; 719 case READ_16: 720 sectors = transport_get_sectors_16(cdb); 721 cmd->t_task_lba = transport_lba_64(cdb); 722 723 if (!sbc_check_prot(dev, cmd, cdb, sectors, false)) 724 return TCM_UNSUPPORTED_SCSI_OPCODE; 725 726 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 727 cmd->execute_rw = ops->execute_rw; 728 cmd->execute_cmd = sbc_execute_rw; 729 break; 730 case WRITE_6: 731 sectors = transport_get_sectors_6(cdb); 732 cmd->t_task_lba = transport_lba_21(cdb); 733 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 734 cmd->execute_rw = ops->execute_rw; 735 cmd->execute_cmd = sbc_execute_rw; 736 break; 737 case WRITE_10: 738 case WRITE_VERIFY: 739 sectors = transport_get_sectors_10(cdb); 740 cmd->t_task_lba = transport_lba_32(cdb); 741 742 if (!sbc_check_prot(dev, cmd, cdb, sectors, true)) 743 return TCM_UNSUPPORTED_SCSI_OPCODE; 744 745 if (cdb[1] & 0x8) 746 cmd->se_cmd_flags |= SCF_FUA; 747 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 748 cmd->execute_rw = ops->execute_rw; 749 cmd->execute_cmd = sbc_execute_rw; 750 break; 751 case WRITE_12: 752 sectors = transport_get_sectors_12(cdb); 753 cmd->t_task_lba = transport_lba_32(cdb); 754 755 if (!sbc_check_prot(dev, cmd, cdb, sectors, true)) 756 return TCM_UNSUPPORTED_SCSI_OPCODE; 757 758 if (cdb[1] & 0x8) 759 cmd->se_cmd_flags |= SCF_FUA; 760 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 761 cmd->execute_rw = ops->execute_rw; 762 cmd->execute_cmd = sbc_execute_rw; 763 break; 764 case WRITE_16: 765 sectors = transport_get_sectors_16(cdb); 766 cmd->t_task_lba = transport_lba_64(cdb); 767 768 if (!sbc_check_prot(dev, cmd, cdb, sectors, true)) 769 return TCM_UNSUPPORTED_SCSI_OPCODE; 770 771 if (cdb[1] & 0x8) 772 cmd->se_cmd_flags |= SCF_FUA; 773 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 774 cmd->execute_rw = ops->execute_rw; 775 cmd->execute_cmd = sbc_execute_rw; 776 break; 777 case XDWRITEREAD_10: 778 if (cmd->data_direction != DMA_TO_DEVICE || 779 !(cmd->se_cmd_flags & SCF_BIDI)) 780 return TCM_INVALID_CDB_FIELD; 781 sectors = transport_get_sectors_10(cdb); 782 783 cmd->t_task_lba = transport_lba_32(cdb); 784 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 785 786 /* 787 * Setup BIDI XOR callback to be run after I/O completion. 788 */ 789 cmd->execute_rw = ops->execute_rw; 790 cmd->execute_cmd = sbc_execute_rw; 791 cmd->transport_complete_callback = &xdreadwrite_callback; 792 if (cdb[1] & 0x8) 793 cmd->se_cmd_flags |= SCF_FUA; 794 break; 795 case VARIABLE_LENGTH_CMD: 796 { 797 u16 service_action = get_unaligned_be16(&cdb[8]); 798 switch (service_action) { 799 case XDWRITEREAD_32: 800 sectors = transport_get_sectors_32(cdb); 801 802 /* 803 * Use WRITE_32 and READ_32 opcodes for the emulated 804 * XDWRITE_READ_32 logic. 805 */ 806 cmd->t_task_lba = transport_lba_64_ext(cdb); 807 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 808 809 /* 810 * Setup BIDI XOR callback to be run during after I/O 811 * completion. 812 */ 813 cmd->execute_rw = ops->execute_rw; 814 cmd->execute_cmd = sbc_execute_rw; 815 cmd->transport_complete_callback = &xdreadwrite_callback; 816 if (cdb[1] & 0x8) 817 cmd->se_cmd_flags |= SCF_FUA; 818 break; 819 case WRITE_SAME_32: 820 sectors = transport_get_sectors_32(cdb); 821 if (!sectors) { 822 pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not" 823 " supported\n"); 824 return TCM_INVALID_CDB_FIELD; 825 } 826 827 size = sbc_get_size(cmd, 1); 828 cmd->t_task_lba = get_unaligned_be64(&cdb[12]); 829 830 ret = sbc_setup_write_same(cmd, &cdb[10], ops); 831 if (ret) 832 return ret; 833 break; 834 default: 835 pr_err("VARIABLE_LENGTH_CMD service action" 836 " 0x%04x not supported\n", service_action); 837 return TCM_UNSUPPORTED_SCSI_OPCODE; 838 } 839 break; 840 } 841 case COMPARE_AND_WRITE: 842 sectors = cdb[13]; 843 /* 844 * Currently enforce COMPARE_AND_WRITE for a single sector 845 */ 846 if (sectors > 1) { 847 pr_err("COMPARE_AND_WRITE contains NoLB: %u greater" 848 " than 1\n", sectors); 849 return TCM_INVALID_CDB_FIELD; 850 } 851 /* 852 * Double size because we have two buffers, note that 853 * zero is not an error.. 854 */ 855 size = 2 * sbc_get_size(cmd, sectors); 856 cmd->t_task_lba = get_unaligned_be64(&cdb[2]); 857 cmd->t_task_nolb = sectors; 858 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB | SCF_COMPARE_AND_WRITE; 859 cmd->execute_rw = ops->execute_rw; 860 cmd->execute_cmd = sbc_compare_and_write; 861 cmd->transport_complete_callback = compare_and_write_callback; 862 break; 863 case READ_CAPACITY: 864 size = READ_CAP_LEN; 865 cmd->execute_cmd = sbc_emulate_readcapacity; 866 break; 867 case SERVICE_ACTION_IN_16: 868 switch (cmd->t_task_cdb[1] & 0x1f) { 869 case SAI_READ_CAPACITY_16: 870 cmd->execute_cmd = sbc_emulate_readcapacity_16; 871 break; 872 case SAI_REPORT_REFERRALS: 873 cmd->execute_cmd = target_emulate_report_referrals; 874 break; 875 default: 876 pr_err("Unsupported SA: 0x%02x\n", 877 cmd->t_task_cdb[1] & 0x1f); 878 return TCM_INVALID_CDB_FIELD; 879 } 880 size = (cdb[10] << 24) | (cdb[11] << 16) | 881 (cdb[12] << 8) | cdb[13]; 882 break; 883 case SYNCHRONIZE_CACHE: 884 case SYNCHRONIZE_CACHE_16: 885 if (cdb[0] == SYNCHRONIZE_CACHE) { 886 sectors = transport_get_sectors_10(cdb); 887 cmd->t_task_lba = transport_lba_32(cdb); 888 } else { 889 sectors = transport_get_sectors_16(cdb); 890 cmd->t_task_lba = transport_lba_64(cdb); 891 } 892 if (ops->execute_sync_cache) { 893 cmd->execute_cmd = ops->execute_sync_cache; 894 goto check_lba; 895 } 896 size = 0; 897 cmd->execute_cmd = sbc_emulate_noop; 898 break; 899 case UNMAP: 900 if (!ops->execute_unmap) 901 return TCM_UNSUPPORTED_SCSI_OPCODE; 902 903 size = get_unaligned_be16(&cdb[7]); 904 cmd->execute_cmd = ops->execute_unmap; 905 break; 906 case WRITE_SAME_16: 907 sectors = transport_get_sectors_16(cdb); 908 if (!sectors) { 909 pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n"); 910 return TCM_INVALID_CDB_FIELD; 911 } 912 913 size = sbc_get_size(cmd, 1); 914 cmd->t_task_lba = get_unaligned_be64(&cdb[2]); 915 916 ret = sbc_setup_write_same(cmd, &cdb[1], ops); 917 if (ret) 918 return ret; 919 break; 920 case WRITE_SAME: 921 sectors = transport_get_sectors_10(cdb); 922 if (!sectors) { 923 pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n"); 924 return TCM_INVALID_CDB_FIELD; 925 } 926 927 size = sbc_get_size(cmd, 1); 928 cmd->t_task_lba = get_unaligned_be32(&cdb[2]); 929 930 /* 931 * Follow sbcr26 with WRITE_SAME (10) and check for the existence 932 * of byte 1 bit 3 UNMAP instead of original reserved field 933 */ 934 ret = sbc_setup_write_same(cmd, &cdb[1], ops); 935 if (ret) 936 return ret; 937 break; 938 case VERIFY: 939 size = 0; 940 sectors = transport_get_sectors_10(cdb); 941 cmd->t_task_lba = transport_lba_32(cdb); 942 cmd->execute_cmd = sbc_emulate_noop; 943 goto check_lba; 944 case REZERO_UNIT: 945 case SEEK_6: 946 case SEEK_10: 947 /* 948 * There are still clients out there which use these old SCSI-2 949 * commands. This mainly happens when running VMs with legacy 950 * guest systems, connected via SCSI command pass-through to 951 * iSCSI targets. Make them happy and return status GOOD. 952 */ 953 size = 0; 954 cmd->execute_cmd = sbc_emulate_noop; 955 break; 956 default: 957 ret = spc_parse_cdb(cmd, &size); 958 if (ret) 959 return ret; 960 } 961 962 /* reject any command that we don't have a handler for */ 963 if (!cmd->execute_cmd) 964 return TCM_UNSUPPORTED_SCSI_OPCODE; 965 966 if (cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) { 967 unsigned long long end_lba; 968 969 if (sectors > dev->dev_attrib.fabric_max_sectors) { 970 printk_ratelimited(KERN_ERR "SCSI OP %02xh with too" 971 " big sectors %u exceeds fabric_max_sectors:" 972 " %u\n", cdb[0], sectors, 973 dev->dev_attrib.fabric_max_sectors); 974 return TCM_INVALID_CDB_FIELD; 975 } 976 if (sectors > dev->dev_attrib.hw_max_sectors) { 977 printk_ratelimited(KERN_ERR "SCSI OP %02xh with too" 978 " big sectors %u exceeds backend hw_max_sectors:" 979 " %u\n", cdb[0], sectors, 980 dev->dev_attrib.hw_max_sectors); 981 return TCM_INVALID_CDB_FIELD; 982 } 983 check_lba: 984 end_lba = dev->transport->get_blocks(dev) + 1; 985 if (cmd->t_task_lba + sectors > end_lba) { 986 pr_err("cmd exceeds last lba %llu " 987 "(lba %llu, sectors %u)\n", 988 end_lba, cmd->t_task_lba, sectors); 989 return TCM_ADDRESS_OUT_OF_RANGE; 990 } 991 992 if (!(cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE)) 993 size = sbc_get_size(cmd, sectors); 994 } 995 996 return target_cmd_size_check(cmd, size); 997 } 998 EXPORT_SYMBOL(sbc_parse_cdb); 999 1000 u32 sbc_get_device_type(struct se_device *dev) 1001 { 1002 return TYPE_DISK; 1003 } 1004 EXPORT_SYMBOL(sbc_get_device_type); 1005 1006 sense_reason_t 1007 sbc_execute_unmap(struct se_cmd *cmd, 1008 sense_reason_t (*do_unmap_fn)(struct se_cmd *, void *, 1009 sector_t, sector_t), 1010 void *priv) 1011 { 1012 struct se_device *dev = cmd->se_dev; 1013 unsigned char *buf, *ptr = NULL; 1014 sector_t lba; 1015 int size; 1016 u32 range; 1017 sense_reason_t ret = 0; 1018 int dl, bd_dl; 1019 1020 /* We never set ANC_SUP */ 1021 if (cmd->t_task_cdb[1]) 1022 return TCM_INVALID_CDB_FIELD; 1023 1024 if (cmd->data_length == 0) { 1025 target_complete_cmd(cmd, SAM_STAT_GOOD); 1026 return 0; 1027 } 1028 1029 if (cmd->data_length < 8) { 1030 pr_warn("UNMAP parameter list length %u too small\n", 1031 cmd->data_length); 1032 return TCM_PARAMETER_LIST_LENGTH_ERROR; 1033 } 1034 1035 buf = transport_kmap_data_sg(cmd); 1036 if (!buf) 1037 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 1038 1039 dl = get_unaligned_be16(&buf[0]); 1040 bd_dl = get_unaligned_be16(&buf[2]); 1041 1042 size = cmd->data_length - 8; 1043 if (bd_dl > size) 1044 pr_warn("UNMAP parameter list length %u too small, ignoring bd_dl %u\n", 1045 cmd->data_length, bd_dl); 1046 else 1047 size = bd_dl; 1048 1049 if (size / 16 > dev->dev_attrib.max_unmap_block_desc_count) { 1050 ret = TCM_INVALID_PARAMETER_LIST; 1051 goto err; 1052 } 1053 1054 /* First UNMAP block descriptor starts at 8 byte offset */ 1055 ptr = &buf[8]; 1056 pr_debug("UNMAP: Sub: %s Using dl: %u bd_dl: %u size: %u" 1057 " ptr: %p\n", dev->transport->name, dl, bd_dl, size, ptr); 1058 1059 while (size >= 16) { 1060 lba = get_unaligned_be64(&ptr[0]); 1061 range = get_unaligned_be32(&ptr[8]); 1062 pr_debug("UNMAP: Using lba: %llu and range: %u\n", 1063 (unsigned long long)lba, range); 1064 1065 if (range > dev->dev_attrib.max_unmap_lba_count) { 1066 ret = TCM_INVALID_PARAMETER_LIST; 1067 goto err; 1068 } 1069 1070 if (lba + range > dev->transport->get_blocks(dev) + 1) { 1071 ret = TCM_ADDRESS_OUT_OF_RANGE; 1072 goto err; 1073 } 1074 1075 ret = do_unmap_fn(cmd, priv, lba, range); 1076 if (ret) 1077 goto err; 1078 1079 ptr += 16; 1080 size -= 16; 1081 } 1082 1083 err: 1084 transport_kunmap_data_sg(cmd); 1085 if (!ret) 1086 target_complete_cmd(cmd, GOOD); 1087 return ret; 1088 } 1089 EXPORT_SYMBOL(sbc_execute_unmap); 1090 1091 void 1092 sbc_dif_generate(struct se_cmd *cmd) 1093 { 1094 struct se_device *dev = cmd->se_dev; 1095 struct se_dif_v1_tuple *sdt; 1096 struct scatterlist *dsg, *psg = cmd->t_prot_sg; 1097 sector_t sector = cmd->t_task_lba; 1098 void *daddr, *paddr; 1099 int i, j, offset = 0; 1100 1101 for_each_sg(cmd->t_data_sg, dsg, cmd->t_data_nents, i) { 1102 daddr = kmap_atomic(sg_page(dsg)) + dsg->offset; 1103 paddr = kmap_atomic(sg_page(psg)) + psg->offset; 1104 1105 for (j = 0; j < dsg->length; j += dev->dev_attrib.block_size) { 1106 1107 if (offset >= psg->length) { 1108 kunmap_atomic(paddr); 1109 psg = sg_next(psg); 1110 paddr = kmap_atomic(sg_page(psg)) + psg->offset; 1111 offset = 0; 1112 } 1113 1114 sdt = paddr + offset; 1115 sdt->guard_tag = cpu_to_be16(crc_t10dif(daddr + j, 1116 dev->dev_attrib.block_size)); 1117 if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE1_PROT) 1118 sdt->ref_tag = cpu_to_be32(sector & 0xffffffff); 1119 sdt->app_tag = 0; 1120 1121 pr_debug("DIF WRITE INSERT sector: %llu guard_tag: 0x%04x" 1122 " app_tag: 0x%04x ref_tag: %u\n", 1123 (unsigned long long)sector, sdt->guard_tag, 1124 sdt->app_tag, be32_to_cpu(sdt->ref_tag)); 1125 1126 sector++; 1127 offset += sizeof(struct se_dif_v1_tuple); 1128 } 1129 1130 kunmap_atomic(paddr); 1131 kunmap_atomic(daddr); 1132 } 1133 } 1134 1135 static sense_reason_t 1136 sbc_dif_v1_verify(struct se_device *dev, struct se_dif_v1_tuple *sdt, 1137 const void *p, sector_t sector, unsigned int ei_lba) 1138 { 1139 int block_size = dev->dev_attrib.block_size; 1140 __be16 csum; 1141 1142 csum = cpu_to_be16(crc_t10dif(p, block_size)); 1143 1144 if (sdt->guard_tag != csum) { 1145 pr_err("DIFv1 checksum failed on sector %llu guard tag 0x%04x" 1146 " csum 0x%04x\n", (unsigned long long)sector, 1147 be16_to_cpu(sdt->guard_tag), be16_to_cpu(csum)); 1148 return TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED; 1149 } 1150 1151 if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE1_PROT && 1152 be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) { 1153 pr_err("DIFv1 Type 1 reference failed on sector: %llu tag: 0x%08x" 1154 " sector MSB: 0x%08x\n", (unsigned long long)sector, 1155 be32_to_cpu(sdt->ref_tag), (u32)(sector & 0xffffffff)); 1156 return TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED; 1157 } 1158 1159 if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE2_PROT && 1160 be32_to_cpu(sdt->ref_tag) != ei_lba) { 1161 pr_err("DIFv1 Type 2 reference failed on sector: %llu tag: 0x%08x" 1162 " ei_lba: 0x%08x\n", (unsigned long long)sector, 1163 be32_to_cpu(sdt->ref_tag), ei_lba); 1164 return TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED; 1165 } 1166 1167 return 0; 1168 } 1169 1170 static void 1171 sbc_dif_copy_prot(struct se_cmd *cmd, unsigned int sectors, bool read, 1172 struct scatterlist *sg, int sg_off) 1173 { 1174 struct se_device *dev = cmd->se_dev; 1175 struct scatterlist *psg; 1176 void *paddr, *addr; 1177 unsigned int i, len, left; 1178 unsigned int offset = sg_off; 1179 1180 left = sectors * dev->prot_length; 1181 1182 for_each_sg(cmd->t_prot_sg, psg, cmd->t_prot_nents, i) { 1183 unsigned int psg_len, copied = 0; 1184 1185 paddr = kmap_atomic(sg_page(psg)) + psg->offset; 1186 psg_len = min(left, psg->length); 1187 while (psg_len) { 1188 len = min(psg_len, sg->length - offset); 1189 addr = kmap_atomic(sg_page(sg)) + sg->offset + offset; 1190 1191 if (read) 1192 memcpy(paddr + copied, addr, len); 1193 else 1194 memcpy(addr, paddr + copied, len); 1195 1196 left -= len; 1197 offset += len; 1198 copied += len; 1199 psg_len -= len; 1200 1201 if (offset >= sg->length) { 1202 sg = sg_next(sg); 1203 offset = 0; 1204 } 1205 kunmap_atomic(addr); 1206 } 1207 kunmap_atomic(paddr); 1208 } 1209 } 1210 1211 sense_reason_t 1212 sbc_dif_verify_write(struct se_cmd *cmd, sector_t start, unsigned int sectors, 1213 unsigned int ei_lba, struct scatterlist *sg, int sg_off) 1214 { 1215 struct se_device *dev = cmd->se_dev; 1216 struct se_dif_v1_tuple *sdt; 1217 struct scatterlist *dsg, *psg = cmd->t_prot_sg; 1218 sector_t sector = start; 1219 void *daddr, *paddr; 1220 int i, j, offset = 0; 1221 sense_reason_t rc; 1222 1223 for_each_sg(cmd->t_data_sg, dsg, cmd->t_data_nents, i) { 1224 daddr = kmap_atomic(sg_page(dsg)) + dsg->offset; 1225 paddr = kmap_atomic(sg_page(psg)) + psg->offset; 1226 1227 for (j = 0; j < dsg->length; j += dev->dev_attrib.block_size) { 1228 1229 if (offset >= psg->length) { 1230 kunmap_atomic(paddr); 1231 psg = sg_next(psg); 1232 paddr = kmap_atomic(sg_page(psg)) + psg->offset; 1233 offset = 0; 1234 } 1235 1236 sdt = paddr + offset; 1237 1238 pr_debug("DIF WRITE sector: %llu guard_tag: 0x%04x" 1239 " app_tag: 0x%04x ref_tag: %u\n", 1240 (unsigned long long)sector, sdt->guard_tag, 1241 sdt->app_tag, be32_to_cpu(sdt->ref_tag)); 1242 1243 rc = sbc_dif_v1_verify(dev, sdt, daddr + j, sector, 1244 ei_lba); 1245 if (rc) { 1246 kunmap_atomic(paddr); 1247 kunmap_atomic(daddr); 1248 cmd->bad_sector = sector; 1249 return rc; 1250 } 1251 1252 sector++; 1253 ei_lba++; 1254 offset += sizeof(struct se_dif_v1_tuple); 1255 } 1256 1257 kunmap_atomic(paddr); 1258 kunmap_atomic(daddr); 1259 } 1260 sbc_dif_copy_prot(cmd, sectors, false, sg, sg_off); 1261 1262 return 0; 1263 } 1264 EXPORT_SYMBOL(sbc_dif_verify_write); 1265 1266 static sense_reason_t 1267 __sbc_dif_verify_read(struct se_cmd *cmd, sector_t start, unsigned int sectors, 1268 unsigned int ei_lba, struct scatterlist *sg, int sg_off) 1269 { 1270 struct se_device *dev = cmd->se_dev; 1271 struct se_dif_v1_tuple *sdt; 1272 struct scatterlist *dsg, *psg = sg; 1273 sector_t sector = start; 1274 void *daddr, *paddr; 1275 int i, j, offset = sg_off; 1276 sense_reason_t rc; 1277 1278 for_each_sg(cmd->t_data_sg, dsg, cmd->t_data_nents, i) { 1279 daddr = kmap_atomic(sg_page(dsg)) + dsg->offset; 1280 paddr = kmap_atomic(sg_page(psg)) + sg->offset; 1281 1282 for (j = 0; j < dsg->length; j += dev->dev_attrib.block_size) { 1283 1284 if (offset >= psg->length) { 1285 kunmap_atomic(paddr); 1286 psg = sg_next(psg); 1287 paddr = kmap_atomic(sg_page(psg)) + psg->offset; 1288 offset = 0; 1289 } 1290 1291 sdt = paddr + offset; 1292 1293 pr_debug("DIF READ sector: %llu guard_tag: 0x%04x" 1294 " app_tag: 0x%04x ref_tag: %u\n", 1295 (unsigned long long)sector, sdt->guard_tag, 1296 sdt->app_tag, be32_to_cpu(sdt->ref_tag)); 1297 1298 if (sdt->app_tag == cpu_to_be16(0xffff)) { 1299 sector++; 1300 offset += sizeof(struct se_dif_v1_tuple); 1301 continue; 1302 } 1303 1304 rc = sbc_dif_v1_verify(dev, sdt, daddr + j, sector, 1305 ei_lba); 1306 if (rc) { 1307 kunmap_atomic(paddr); 1308 kunmap_atomic(daddr); 1309 cmd->bad_sector = sector; 1310 return rc; 1311 } 1312 1313 sector++; 1314 ei_lba++; 1315 offset += sizeof(struct se_dif_v1_tuple); 1316 } 1317 1318 kunmap_atomic(paddr); 1319 kunmap_atomic(daddr); 1320 } 1321 1322 return 0; 1323 } 1324 1325 sense_reason_t 1326 sbc_dif_read_strip(struct se_cmd *cmd) 1327 { 1328 struct se_device *dev = cmd->se_dev; 1329 u32 sectors = cmd->prot_length / dev->prot_length; 1330 1331 return __sbc_dif_verify_read(cmd, cmd->t_task_lba, sectors, 0, 1332 cmd->t_prot_sg, 0); 1333 } 1334 1335 sense_reason_t 1336 sbc_dif_verify_read(struct se_cmd *cmd, sector_t start, unsigned int sectors, 1337 unsigned int ei_lba, struct scatterlist *sg, int sg_off) 1338 { 1339 sense_reason_t rc; 1340 1341 rc = __sbc_dif_verify_read(cmd, start, sectors, ei_lba, sg, sg_off); 1342 if (rc) 1343 return rc; 1344 1345 sbc_dif_copy_prot(cmd, sectors, true, sg, sg_off); 1346 return 0; 1347 } 1348 EXPORT_SYMBOL(sbc_dif_verify_read); 1349