1 /* 2 * SCSI Block Commands (SBC) parsing and emulation. 3 * 4 * (c) Copyright 2002-2013 Datera, Inc. 5 * 6 * Nicholas A. Bellinger <nab@kernel.org> 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License as published by 10 * the Free Software Foundation; either version 2 of the License, or 11 * (at your option) any later version. 12 * 13 * This program is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * GNU General Public License for more details. 17 * 18 * You should have received a copy of the GNU General Public License 19 * along with this program; if not, write to the Free Software 20 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 21 */ 22 23 #include <linux/kernel.h> 24 #include <linux/module.h> 25 #include <linux/ratelimit.h> 26 #include <linux/crc-t10dif.h> 27 #include <asm/unaligned.h> 28 #include <scsi/scsi.h> 29 #include <scsi/scsi_tcq.h> 30 31 #include <target/target_core_base.h> 32 #include <target/target_core_backend.h> 33 #include <target/target_core_fabric.h> 34 35 #include "target_core_internal.h" 36 #include "target_core_ua.h" 37 #include "target_core_alua.h" 38 39 static sense_reason_t 40 sbc_check_prot(struct se_device *, struct se_cmd *, unsigned char *, u32, bool); 41 42 static sense_reason_t 43 sbc_emulate_readcapacity(struct se_cmd *cmd) 44 { 45 struct se_device *dev = cmd->se_dev; 46 unsigned char *cdb = cmd->t_task_cdb; 47 unsigned long long blocks_long = dev->transport->get_blocks(dev); 48 unsigned char *rbuf; 49 unsigned char buf[8]; 50 u32 blocks; 51 52 /* 53 * SBC-2 says: 54 * If the PMI bit is set to zero and the LOGICAL BLOCK 55 * ADDRESS field is not set to zero, the device server shall 56 * terminate the command with CHECK CONDITION status with 57 * the sense key set to ILLEGAL REQUEST and the additional 58 * sense code set to INVALID FIELD IN CDB. 59 * 60 * In SBC-3, these fields are obsolete, but some SCSI 61 * compliance tests actually check this, so we might as well 62 * follow SBC-2. 63 */ 64 if (!(cdb[8] & 1) && !!(cdb[2] | cdb[3] | cdb[4] | cdb[5])) 65 return TCM_INVALID_CDB_FIELD; 66 67 if (blocks_long >= 0x00000000ffffffff) 68 blocks = 0xffffffff; 69 else 70 blocks = (u32)blocks_long; 71 72 buf[0] = (blocks >> 24) & 0xff; 73 buf[1] = (blocks >> 16) & 0xff; 74 buf[2] = (blocks >> 8) & 0xff; 75 buf[3] = blocks & 0xff; 76 buf[4] = (dev->dev_attrib.block_size >> 24) & 0xff; 77 buf[5] = (dev->dev_attrib.block_size >> 16) & 0xff; 78 buf[6] = (dev->dev_attrib.block_size >> 8) & 0xff; 79 buf[7] = dev->dev_attrib.block_size & 0xff; 80 81 rbuf = transport_kmap_data_sg(cmd); 82 if (rbuf) { 83 memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length)); 84 transport_kunmap_data_sg(cmd); 85 } 86 87 target_complete_cmd_with_length(cmd, GOOD, 8); 88 return 0; 89 } 90 91 static sense_reason_t 92 sbc_emulate_readcapacity_16(struct se_cmd *cmd) 93 { 94 struct se_device *dev = cmd->se_dev; 95 struct se_session *sess = cmd->se_sess; 96 unsigned char *rbuf; 97 unsigned char buf[32]; 98 unsigned long long blocks = dev->transport->get_blocks(dev); 99 100 memset(buf, 0, sizeof(buf)); 101 buf[0] = (blocks >> 56) & 0xff; 102 buf[1] = (blocks >> 48) & 0xff; 103 buf[2] = (blocks >> 40) & 0xff; 104 buf[3] = (blocks >> 32) & 0xff; 105 buf[4] = (blocks >> 24) & 0xff; 106 buf[5] = (blocks >> 16) & 0xff; 107 buf[6] = (blocks >> 8) & 0xff; 108 buf[7] = blocks & 0xff; 109 buf[8] = (dev->dev_attrib.block_size >> 24) & 0xff; 110 buf[9] = (dev->dev_attrib.block_size >> 16) & 0xff; 111 buf[10] = (dev->dev_attrib.block_size >> 8) & 0xff; 112 buf[11] = dev->dev_attrib.block_size & 0xff; 113 /* 114 * Set P_TYPE and PROT_EN bits for DIF support 115 */ 116 if (sess->sup_prot_ops & (TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS)) { 117 if (dev->dev_attrib.pi_prot_type) 118 buf[12] = (dev->dev_attrib.pi_prot_type - 1) << 1 | 0x1; 119 } 120 121 if (dev->transport->get_lbppbe) 122 buf[13] = dev->transport->get_lbppbe(dev) & 0x0f; 123 124 if (dev->transport->get_alignment_offset_lbas) { 125 u16 lalba = dev->transport->get_alignment_offset_lbas(dev); 126 buf[14] = (lalba >> 8) & 0x3f; 127 buf[15] = lalba & 0xff; 128 } 129 130 /* 131 * Set Thin Provisioning Enable bit following sbc3r22 in section 132 * READ CAPACITY (16) byte 14 if emulate_tpu or emulate_tpws is enabled. 133 */ 134 if (dev->dev_attrib.emulate_tpu || dev->dev_attrib.emulate_tpws) 135 buf[14] |= 0x80; 136 137 rbuf = transport_kmap_data_sg(cmd); 138 if (rbuf) { 139 memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length)); 140 transport_kunmap_data_sg(cmd); 141 } 142 143 target_complete_cmd_with_length(cmd, GOOD, 32); 144 return 0; 145 } 146 147 sector_t sbc_get_write_same_sectors(struct se_cmd *cmd) 148 { 149 u32 num_blocks; 150 151 if (cmd->t_task_cdb[0] == WRITE_SAME) 152 num_blocks = get_unaligned_be16(&cmd->t_task_cdb[7]); 153 else if (cmd->t_task_cdb[0] == WRITE_SAME_16) 154 num_blocks = get_unaligned_be32(&cmd->t_task_cdb[10]); 155 else /* WRITE_SAME_32 via VARIABLE_LENGTH_CMD */ 156 num_blocks = get_unaligned_be32(&cmd->t_task_cdb[28]); 157 158 /* 159 * Use the explicit range when non zero is supplied, otherwise calculate 160 * the remaining range based on ->get_blocks() - starting LBA. 161 */ 162 if (num_blocks) 163 return num_blocks; 164 165 return cmd->se_dev->transport->get_blocks(cmd->se_dev) - 166 cmd->t_task_lba + 1; 167 } 168 EXPORT_SYMBOL(sbc_get_write_same_sectors); 169 170 static sense_reason_t 171 sbc_emulate_noop(struct se_cmd *cmd) 172 { 173 target_complete_cmd(cmd, GOOD); 174 return 0; 175 } 176 177 static inline u32 sbc_get_size(struct se_cmd *cmd, u32 sectors) 178 { 179 return cmd->se_dev->dev_attrib.block_size * sectors; 180 } 181 182 static inline u32 transport_get_sectors_6(unsigned char *cdb) 183 { 184 /* 185 * Use 8-bit sector value. SBC-3 says: 186 * 187 * A TRANSFER LENGTH field set to zero specifies that 256 188 * logical blocks shall be written. Any other value 189 * specifies the number of logical blocks that shall be 190 * written. 191 */ 192 return cdb[4] ? : 256; 193 } 194 195 static inline u32 transport_get_sectors_10(unsigned char *cdb) 196 { 197 return (u32)(cdb[7] << 8) + cdb[8]; 198 } 199 200 static inline u32 transport_get_sectors_12(unsigned char *cdb) 201 { 202 return (u32)(cdb[6] << 24) + (cdb[7] << 16) + (cdb[8] << 8) + cdb[9]; 203 } 204 205 static inline u32 transport_get_sectors_16(unsigned char *cdb) 206 { 207 return (u32)(cdb[10] << 24) + (cdb[11] << 16) + 208 (cdb[12] << 8) + cdb[13]; 209 } 210 211 /* 212 * Used for VARIABLE_LENGTH_CDB WRITE_32 and READ_32 variants 213 */ 214 static inline u32 transport_get_sectors_32(unsigned char *cdb) 215 { 216 return (u32)(cdb[28] << 24) + (cdb[29] << 16) + 217 (cdb[30] << 8) + cdb[31]; 218 219 } 220 221 static inline u32 transport_lba_21(unsigned char *cdb) 222 { 223 return ((cdb[1] & 0x1f) << 16) | (cdb[2] << 8) | cdb[3]; 224 } 225 226 static inline u32 transport_lba_32(unsigned char *cdb) 227 { 228 return (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5]; 229 } 230 231 static inline unsigned long long transport_lba_64(unsigned char *cdb) 232 { 233 unsigned int __v1, __v2; 234 235 __v1 = (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5]; 236 __v2 = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9]; 237 238 return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32; 239 } 240 241 /* 242 * For VARIABLE_LENGTH_CDB w/ 32 byte extended CDBs 243 */ 244 static inline unsigned long long transport_lba_64_ext(unsigned char *cdb) 245 { 246 unsigned int __v1, __v2; 247 248 __v1 = (cdb[12] << 24) | (cdb[13] << 16) | (cdb[14] << 8) | cdb[15]; 249 __v2 = (cdb[16] << 24) | (cdb[17] << 16) | (cdb[18] << 8) | cdb[19]; 250 251 return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32; 252 } 253 254 static sense_reason_t 255 sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *ops) 256 { 257 struct se_device *dev = cmd->se_dev; 258 sector_t end_lba = dev->transport->get_blocks(dev) + 1; 259 unsigned int sectors = sbc_get_write_same_sectors(cmd); 260 sense_reason_t ret; 261 262 if ((flags[0] & 0x04) || (flags[0] & 0x02)) { 263 pr_err("WRITE_SAME PBDATA and LBDATA" 264 " bits not supported for Block Discard" 265 " Emulation\n"); 266 return TCM_UNSUPPORTED_SCSI_OPCODE; 267 } 268 if (sectors > cmd->se_dev->dev_attrib.max_write_same_len) { 269 pr_warn("WRITE_SAME sectors: %u exceeds max_write_same_len: %u\n", 270 sectors, cmd->se_dev->dev_attrib.max_write_same_len); 271 return TCM_INVALID_CDB_FIELD; 272 } 273 /* 274 * Sanity check for LBA wrap and request past end of device. 275 */ 276 if (((cmd->t_task_lba + sectors) < cmd->t_task_lba) || 277 ((cmd->t_task_lba + sectors) > end_lba)) { 278 pr_err("WRITE_SAME exceeds last lba %llu (lba %llu, sectors %u)\n", 279 (unsigned long long)end_lba, cmd->t_task_lba, sectors); 280 return TCM_ADDRESS_OUT_OF_RANGE; 281 } 282 283 /* We always have ANC_SUP == 0 so setting ANCHOR is always an error */ 284 if (flags[0] & 0x10) { 285 pr_warn("WRITE SAME with ANCHOR not supported\n"); 286 return TCM_INVALID_CDB_FIELD; 287 } 288 /* 289 * Special case for WRITE_SAME w/ UNMAP=1 that ends up getting 290 * translated into block discard requests within backend code. 291 */ 292 if (flags[0] & 0x08) { 293 if (!ops->execute_write_same_unmap) 294 return TCM_UNSUPPORTED_SCSI_OPCODE; 295 296 cmd->execute_cmd = ops->execute_write_same_unmap; 297 return 0; 298 } 299 if (!ops->execute_write_same) 300 return TCM_UNSUPPORTED_SCSI_OPCODE; 301 302 ret = sbc_check_prot(dev, cmd, &cmd->t_task_cdb[0], sectors, true); 303 if (ret) 304 return ret; 305 306 cmd->execute_cmd = ops->execute_write_same; 307 return 0; 308 } 309 310 static sense_reason_t xdreadwrite_callback(struct se_cmd *cmd) 311 { 312 unsigned char *buf, *addr; 313 struct scatterlist *sg; 314 unsigned int offset; 315 sense_reason_t ret = TCM_NO_SENSE; 316 int i, count; 317 /* 318 * From sbc3r22.pdf section 5.48 XDWRITEREAD (10) command 319 * 320 * 1) read the specified logical block(s); 321 * 2) transfer logical blocks from the data-out buffer; 322 * 3) XOR the logical blocks transferred from the data-out buffer with 323 * the logical blocks read, storing the resulting XOR data in a buffer; 324 * 4) if the DISABLE WRITE bit is set to zero, then write the logical 325 * blocks transferred from the data-out buffer; and 326 * 5) transfer the resulting XOR data to the data-in buffer. 327 */ 328 buf = kmalloc(cmd->data_length, GFP_KERNEL); 329 if (!buf) { 330 pr_err("Unable to allocate xor_callback buf\n"); 331 return TCM_OUT_OF_RESOURCES; 332 } 333 /* 334 * Copy the scatterlist WRITE buffer located at cmd->t_data_sg 335 * into the locally allocated *buf 336 */ 337 sg_copy_to_buffer(cmd->t_data_sg, 338 cmd->t_data_nents, 339 buf, 340 cmd->data_length); 341 342 /* 343 * Now perform the XOR against the BIDI read memory located at 344 * cmd->t_mem_bidi_list 345 */ 346 347 offset = 0; 348 for_each_sg(cmd->t_bidi_data_sg, sg, cmd->t_bidi_data_nents, count) { 349 addr = kmap_atomic(sg_page(sg)); 350 if (!addr) { 351 ret = TCM_OUT_OF_RESOURCES; 352 goto out; 353 } 354 355 for (i = 0; i < sg->length; i++) 356 *(addr + sg->offset + i) ^= *(buf + offset + i); 357 358 offset += sg->length; 359 kunmap_atomic(addr); 360 } 361 362 out: 363 kfree(buf); 364 return ret; 365 } 366 367 static sense_reason_t 368 sbc_execute_rw(struct se_cmd *cmd) 369 { 370 return cmd->execute_rw(cmd, cmd->t_data_sg, cmd->t_data_nents, 371 cmd->data_direction); 372 } 373 374 static sense_reason_t compare_and_write_post(struct se_cmd *cmd) 375 { 376 struct se_device *dev = cmd->se_dev; 377 378 /* 379 * Only set SCF_COMPARE_AND_WRITE_POST to force a response fall-through 380 * within target_complete_ok_work() if the command was successfully 381 * sent to the backend driver. 382 */ 383 spin_lock_irq(&cmd->t_state_lock); 384 if ((cmd->transport_state & CMD_T_SENT) && !cmd->scsi_status) 385 cmd->se_cmd_flags |= SCF_COMPARE_AND_WRITE_POST; 386 spin_unlock_irq(&cmd->t_state_lock); 387 388 /* 389 * Unlock ->caw_sem originally obtained during sbc_compare_and_write() 390 * before the original READ I/O submission. 391 */ 392 up(&dev->caw_sem); 393 394 return TCM_NO_SENSE; 395 } 396 397 static sense_reason_t compare_and_write_callback(struct se_cmd *cmd) 398 { 399 struct se_device *dev = cmd->se_dev; 400 struct scatterlist *write_sg = NULL, *sg; 401 unsigned char *buf = NULL, *addr; 402 struct sg_mapping_iter m; 403 unsigned int offset = 0, len; 404 unsigned int nlbas = cmd->t_task_nolb; 405 unsigned int block_size = dev->dev_attrib.block_size; 406 unsigned int compare_len = (nlbas * block_size); 407 sense_reason_t ret = TCM_NO_SENSE; 408 int rc, i; 409 410 /* 411 * Handle early failure in transport_generic_request_failure(), 412 * which will not have taken ->caw_mutex yet.. 413 */ 414 if (!cmd->t_data_sg || !cmd->t_bidi_data_sg) 415 return TCM_NO_SENSE; 416 /* 417 * Immediately exit + release dev->caw_sem if command has already 418 * been failed with a non-zero SCSI status. 419 */ 420 if (cmd->scsi_status) { 421 pr_err("compare_and_write_callback: non zero scsi_status:" 422 " 0x%02x\n", cmd->scsi_status); 423 goto out; 424 } 425 426 buf = kzalloc(cmd->data_length, GFP_KERNEL); 427 if (!buf) { 428 pr_err("Unable to allocate compare_and_write buf\n"); 429 ret = TCM_OUT_OF_RESOURCES; 430 goto out; 431 } 432 433 write_sg = kmalloc(sizeof(struct scatterlist) * cmd->t_data_nents, 434 GFP_KERNEL); 435 if (!write_sg) { 436 pr_err("Unable to allocate compare_and_write sg\n"); 437 ret = TCM_OUT_OF_RESOURCES; 438 goto out; 439 } 440 sg_init_table(write_sg, cmd->t_data_nents); 441 /* 442 * Setup verify and write data payloads from total NumberLBAs. 443 */ 444 rc = sg_copy_to_buffer(cmd->t_data_sg, cmd->t_data_nents, buf, 445 cmd->data_length); 446 if (!rc) { 447 pr_err("sg_copy_to_buffer() failed for compare_and_write\n"); 448 ret = TCM_OUT_OF_RESOURCES; 449 goto out; 450 } 451 /* 452 * Compare against SCSI READ payload against verify payload 453 */ 454 for_each_sg(cmd->t_bidi_data_sg, sg, cmd->t_bidi_data_nents, i) { 455 addr = (unsigned char *)kmap_atomic(sg_page(sg)); 456 if (!addr) { 457 ret = TCM_OUT_OF_RESOURCES; 458 goto out; 459 } 460 461 len = min(sg->length, compare_len); 462 463 if (memcmp(addr, buf + offset, len)) { 464 pr_warn("Detected MISCOMPARE for addr: %p buf: %p\n", 465 addr, buf + offset); 466 kunmap_atomic(addr); 467 goto miscompare; 468 } 469 kunmap_atomic(addr); 470 471 offset += len; 472 compare_len -= len; 473 if (!compare_len) 474 break; 475 } 476 477 i = 0; 478 len = cmd->t_task_nolb * block_size; 479 sg_miter_start(&m, cmd->t_data_sg, cmd->t_data_nents, SG_MITER_TO_SG); 480 /* 481 * Currently assumes NoLB=1 and SGLs are PAGE_SIZE.. 482 */ 483 while (len) { 484 sg_miter_next(&m); 485 486 if (block_size < PAGE_SIZE) { 487 sg_set_page(&write_sg[i], m.page, block_size, 488 block_size); 489 } else { 490 sg_miter_next(&m); 491 sg_set_page(&write_sg[i], m.page, block_size, 492 0); 493 } 494 len -= block_size; 495 i++; 496 } 497 sg_miter_stop(&m); 498 /* 499 * Save the original SGL + nents values before updating to new 500 * assignments, to be released in transport_free_pages() -> 501 * transport_reset_sgl_orig() 502 */ 503 cmd->t_data_sg_orig = cmd->t_data_sg; 504 cmd->t_data_sg = write_sg; 505 cmd->t_data_nents_orig = cmd->t_data_nents; 506 cmd->t_data_nents = 1; 507 508 cmd->sam_task_attr = TCM_HEAD_TAG; 509 cmd->transport_complete_callback = compare_and_write_post; 510 /* 511 * Now reset ->execute_cmd() to the normal sbc_execute_rw() handler 512 * for submitting the adjusted SGL to write instance user-data. 513 */ 514 cmd->execute_cmd = sbc_execute_rw; 515 516 spin_lock_irq(&cmd->t_state_lock); 517 cmd->t_state = TRANSPORT_PROCESSING; 518 cmd->transport_state |= CMD_T_ACTIVE|CMD_T_BUSY|CMD_T_SENT; 519 spin_unlock_irq(&cmd->t_state_lock); 520 521 __target_execute_cmd(cmd); 522 523 kfree(buf); 524 return ret; 525 526 miscompare: 527 pr_warn("Target/%s: Send MISCOMPARE check condition and sense\n", 528 dev->transport->name); 529 ret = TCM_MISCOMPARE_VERIFY; 530 out: 531 /* 532 * In the MISCOMPARE or failure case, unlock ->caw_sem obtained in 533 * sbc_compare_and_write() before the original READ I/O submission. 534 */ 535 up(&dev->caw_sem); 536 kfree(write_sg); 537 kfree(buf); 538 return ret; 539 } 540 541 static sense_reason_t 542 sbc_compare_and_write(struct se_cmd *cmd) 543 { 544 struct se_device *dev = cmd->se_dev; 545 sense_reason_t ret; 546 int rc; 547 /* 548 * Submit the READ first for COMPARE_AND_WRITE to perform the 549 * comparision using SGLs at cmd->t_bidi_data_sg.. 550 */ 551 rc = down_interruptible(&dev->caw_sem); 552 if ((rc != 0) || signal_pending(current)) { 553 cmd->transport_complete_callback = NULL; 554 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 555 } 556 /* 557 * Reset cmd->data_length to individual block_size in order to not 558 * confuse backend drivers that depend on this value matching the 559 * size of the I/O being submitted. 560 */ 561 cmd->data_length = cmd->t_task_nolb * dev->dev_attrib.block_size; 562 563 ret = cmd->execute_rw(cmd, cmd->t_bidi_data_sg, cmd->t_bidi_data_nents, 564 DMA_FROM_DEVICE); 565 if (ret) { 566 cmd->transport_complete_callback = NULL; 567 up(&dev->caw_sem); 568 return ret; 569 } 570 /* 571 * Unlock of dev->caw_sem to occur in compare_and_write_callback() 572 * upon MISCOMPARE, or in compare_and_write_done() upon completion 573 * of WRITE instance user-data. 574 */ 575 return TCM_NO_SENSE; 576 } 577 578 static int 579 sbc_set_prot_op_checks(u8 protect, enum target_prot_type prot_type, 580 bool is_write, struct se_cmd *cmd) 581 { 582 if (is_write) { 583 cmd->prot_op = protect ? TARGET_PROT_DOUT_PASS : 584 TARGET_PROT_DOUT_INSERT; 585 switch (protect) { 586 case 0x0: 587 case 0x3: 588 cmd->prot_checks = 0; 589 break; 590 case 0x1: 591 case 0x5: 592 cmd->prot_checks = TARGET_DIF_CHECK_GUARD; 593 if (prot_type == TARGET_DIF_TYPE1_PROT) 594 cmd->prot_checks |= TARGET_DIF_CHECK_REFTAG; 595 break; 596 case 0x2: 597 if (prot_type == TARGET_DIF_TYPE1_PROT) 598 cmd->prot_checks = TARGET_DIF_CHECK_REFTAG; 599 break; 600 case 0x4: 601 cmd->prot_checks = TARGET_DIF_CHECK_GUARD; 602 break; 603 default: 604 pr_err("Unsupported protect field %d\n", protect); 605 return -EINVAL; 606 } 607 } else { 608 cmd->prot_op = protect ? TARGET_PROT_DIN_PASS : 609 TARGET_PROT_DIN_STRIP; 610 switch (protect) { 611 case 0x0: 612 case 0x1: 613 case 0x5: 614 cmd->prot_checks = TARGET_DIF_CHECK_GUARD; 615 if (prot_type == TARGET_DIF_TYPE1_PROT) 616 cmd->prot_checks |= TARGET_DIF_CHECK_REFTAG; 617 break; 618 case 0x2: 619 if (prot_type == TARGET_DIF_TYPE1_PROT) 620 cmd->prot_checks = TARGET_DIF_CHECK_REFTAG; 621 break; 622 case 0x3: 623 cmd->prot_checks = 0; 624 break; 625 case 0x4: 626 cmd->prot_checks = TARGET_DIF_CHECK_GUARD; 627 break; 628 default: 629 pr_err("Unsupported protect field %d\n", protect); 630 return -EINVAL; 631 } 632 } 633 634 return 0; 635 } 636 637 static sense_reason_t 638 sbc_check_prot(struct se_device *dev, struct se_cmd *cmd, unsigned char *cdb, 639 u32 sectors, bool is_write) 640 { 641 u8 protect = cdb[1] >> 5; 642 643 if (!cmd->t_prot_sg || !cmd->t_prot_nents) { 644 if (protect && !dev->dev_attrib.pi_prot_type) { 645 pr_err("CDB contains protect bit, but device does not" 646 " advertise PROTECT=1 feature bit\n"); 647 return TCM_INVALID_CDB_FIELD; 648 } 649 if (cmd->prot_pto) 650 return TCM_NO_SENSE; 651 } 652 653 switch (dev->dev_attrib.pi_prot_type) { 654 case TARGET_DIF_TYPE3_PROT: 655 cmd->reftag_seed = 0xffffffff; 656 break; 657 case TARGET_DIF_TYPE2_PROT: 658 if (protect) 659 return TCM_INVALID_CDB_FIELD; 660 661 cmd->reftag_seed = cmd->t_task_lba; 662 break; 663 case TARGET_DIF_TYPE1_PROT: 664 cmd->reftag_seed = cmd->t_task_lba; 665 break; 666 case TARGET_DIF_TYPE0_PROT: 667 default: 668 return TCM_NO_SENSE; 669 } 670 671 if (sbc_set_prot_op_checks(protect, dev->dev_attrib.pi_prot_type, 672 is_write, cmd)) 673 return TCM_INVALID_CDB_FIELD; 674 675 cmd->prot_type = dev->dev_attrib.pi_prot_type; 676 cmd->prot_length = dev->prot_length * sectors; 677 678 /** 679 * In case protection information exists over the wire 680 * we modify command data length to describe pure data. 681 * The actual transfer length is data length + protection 682 * length 683 **/ 684 if (protect) 685 cmd->data_length = sectors * dev->dev_attrib.block_size; 686 687 pr_debug("%s: prot_type=%d, data_length=%d, prot_length=%d " 688 "prot_op=%d prot_checks=%d\n", 689 __func__, cmd->prot_type, cmd->data_length, cmd->prot_length, 690 cmd->prot_op, cmd->prot_checks); 691 692 return TCM_NO_SENSE; 693 } 694 695 sense_reason_t 696 sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops) 697 { 698 struct se_device *dev = cmd->se_dev; 699 unsigned char *cdb = cmd->t_task_cdb; 700 unsigned int size; 701 u32 sectors = 0; 702 sense_reason_t ret; 703 704 switch (cdb[0]) { 705 case READ_6: 706 sectors = transport_get_sectors_6(cdb); 707 cmd->t_task_lba = transport_lba_21(cdb); 708 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 709 cmd->execute_rw = ops->execute_rw; 710 cmd->execute_cmd = sbc_execute_rw; 711 break; 712 case READ_10: 713 sectors = transport_get_sectors_10(cdb); 714 cmd->t_task_lba = transport_lba_32(cdb); 715 716 ret = sbc_check_prot(dev, cmd, cdb, sectors, false); 717 if (ret) 718 return ret; 719 720 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 721 cmd->execute_rw = ops->execute_rw; 722 cmd->execute_cmd = sbc_execute_rw; 723 break; 724 case READ_12: 725 sectors = transport_get_sectors_12(cdb); 726 cmd->t_task_lba = transport_lba_32(cdb); 727 728 ret = sbc_check_prot(dev, cmd, cdb, sectors, false); 729 if (ret) 730 return ret; 731 732 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 733 cmd->execute_rw = ops->execute_rw; 734 cmd->execute_cmd = sbc_execute_rw; 735 break; 736 case READ_16: 737 sectors = transport_get_sectors_16(cdb); 738 cmd->t_task_lba = transport_lba_64(cdb); 739 740 ret = sbc_check_prot(dev, cmd, cdb, sectors, false); 741 if (ret) 742 return ret; 743 744 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 745 cmd->execute_rw = ops->execute_rw; 746 cmd->execute_cmd = sbc_execute_rw; 747 break; 748 case WRITE_6: 749 sectors = transport_get_sectors_6(cdb); 750 cmd->t_task_lba = transport_lba_21(cdb); 751 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 752 cmd->execute_rw = ops->execute_rw; 753 cmd->execute_cmd = sbc_execute_rw; 754 break; 755 case WRITE_10: 756 case WRITE_VERIFY: 757 sectors = transport_get_sectors_10(cdb); 758 cmd->t_task_lba = transport_lba_32(cdb); 759 760 ret = sbc_check_prot(dev, cmd, cdb, sectors, true); 761 if (ret) 762 return ret; 763 764 if (cdb[1] & 0x8) 765 cmd->se_cmd_flags |= SCF_FUA; 766 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 767 cmd->execute_rw = ops->execute_rw; 768 cmd->execute_cmd = sbc_execute_rw; 769 break; 770 case WRITE_12: 771 sectors = transport_get_sectors_12(cdb); 772 cmd->t_task_lba = transport_lba_32(cdb); 773 774 ret = sbc_check_prot(dev, cmd, cdb, sectors, true); 775 if (ret) 776 return ret; 777 778 if (cdb[1] & 0x8) 779 cmd->se_cmd_flags |= SCF_FUA; 780 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 781 cmd->execute_rw = ops->execute_rw; 782 cmd->execute_cmd = sbc_execute_rw; 783 break; 784 case WRITE_16: 785 sectors = transport_get_sectors_16(cdb); 786 cmd->t_task_lba = transport_lba_64(cdb); 787 788 ret = sbc_check_prot(dev, cmd, cdb, sectors, true); 789 if (ret) 790 return ret; 791 792 if (cdb[1] & 0x8) 793 cmd->se_cmd_flags |= SCF_FUA; 794 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 795 cmd->execute_rw = ops->execute_rw; 796 cmd->execute_cmd = sbc_execute_rw; 797 break; 798 case XDWRITEREAD_10: 799 if (cmd->data_direction != DMA_TO_DEVICE || 800 !(cmd->se_cmd_flags & SCF_BIDI)) 801 return TCM_INVALID_CDB_FIELD; 802 sectors = transport_get_sectors_10(cdb); 803 804 cmd->t_task_lba = transport_lba_32(cdb); 805 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 806 807 /* 808 * Setup BIDI XOR callback to be run after I/O completion. 809 */ 810 cmd->execute_rw = ops->execute_rw; 811 cmd->execute_cmd = sbc_execute_rw; 812 cmd->transport_complete_callback = &xdreadwrite_callback; 813 if (cdb[1] & 0x8) 814 cmd->se_cmd_flags |= SCF_FUA; 815 break; 816 case VARIABLE_LENGTH_CMD: 817 { 818 u16 service_action = get_unaligned_be16(&cdb[8]); 819 switch (service_action) { 820 case XDWRITEREAD_32: 821 sectors = transport_get_sectors_32(cdb); 822 823 /* 824 * Use WRITE_32 and READ_32 opcodes for the emulated 825 * XDWRITE_READ_32 logic. 826 */ 827 cmd->t_task_lba = transport_lba_64_ext(cdb); 828 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 829 830 /* 831 * Setup BIDI XOR callback to be run during after I/O 832 * completion. 833 */ 834 cmd->execute_rw = ops->execute_rw; 835 cmd->execute_cmd = sbc_execute_rw; 836 cmd->transport_complete_callback = &xdreadwrite_callback; 837 if (cdb[1] & 0x8) 838 cmd->se_cmd_flags |= SCF_FUA; 839 break; 840 case WRITE_SAME_32: 841 sectors = transport_get_sectors_32(cdb); 842 if (!sectors) { 843 pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not" 844 " supported\n"); 845 return TCM_INVALID_CDB_FIELD; 846 } 847 848 size = sbc_get_size(cmd, 1); 849 cmd->t_task_lba = get_unaligned_be64(&cdb[12]); 850 851 ret = sbc_setup_write_same(cmd, &cdb[10], ops); 852 if (ret) 853 return ret; 854 break; 855 default: 856 pr_err("VARIABLE_LENGTH_CMD service action" 857 " 0x%04x not supported\n", service_action); 858 return TCM_UNSUPPORTED_SCSI_OPCODE; 859 } 860 break; 861 } 862 case COMPARE_AND_WRITE: 863 sectors = cdb[13]; 864 /* 865 * Currently enforce COMPARE_AND_WRITE for a single sector 866 */ 867 if (sectors > 1) { 868 pr_err("COMPARE_AND_WRITE contains NoLB: %u greater" 869 " than 1\n", sectors); 870 return TCM_INVALID_CDB_FIELD; 871 } 872 /* 873 * Double size because we have two buffers, note that 874 * zero is not an error.. 875 */ 876 size = 2 * sbc_get_size(cmd, sectors); 877 cmd->t_task_lba = get_unaligned_be64(&cdb[2]); 878 cmd->t_task_nolb = sectors; 879 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB | SCF_COMPARE_AND_WRITE; 880 cmd->execute_rw = ops->execute_rw; 881 cmd->execute_cmd = sbc_compare_and_write; 882 cmd->transport_complete_callback = compare_and_write_callback; 883 break; 884 case READ_CAPACITY: 885 size = READ_CAP_LEN; 886 cmd->execute_cmd = sbc_emulate_readcapacity; 887 break; 888 case SERVICE_ACTION_IN_16: 889 switch (cmd->t_task_cdb[1] & 0x1f) { 890 case SAI_READ_CAPACITY_16: 891 cmd->execute_cmd = sbc_emulate_readcapacity_16; 892 break; 893 case SAI_REPORT_REFERRALS: 894 cmd->execute_cmd = target_emulate_report_referrals; 895 break; 896 default: 897 pr_err("Unsupported SA: 0x%02x\n", 898 cmd->t_task_cdb[1] & 0x1f); 899 return TCM_INVALID_CDB_FIELD; 900 } 901 size = (cdb[10] << 24) | (cdb[11] << 16) | 902 (cdb[12] << 8) | cdb[13]; 903 break; 904 case SYNCHRONIZE_CACHE: 905 case SYNCHRONIZE_CACHE_16: 906 if (cdb[0] == SYNCHRONIZE_CACHE) { 907 sectors = transport_get_sectors_10(cdb); 908 cmd->t_task_lba = transport_lba_32(cdb); 909 } else { 910 sectors = transport_get_sectors_16(cdb); 911 cmd->t_task_lba = transport_lba_64(cdb); 912 } 913 if (ops->execute_sync_cache) { 914 cmd->execute_cmd = ops->execute_sync_cache; 915 goto check_lba; 916 } 917 size = 0; 918 cmd->execute_cmd = sbc_emulate_noop; 919 break; 920 case UNMAP: 921 if (!ops->execute_unmap) 922 return TCM_UNSUPPORTED_SCSI_OPCODE; 923 924 size = get_unaligned_be16(&cdb[7]); 925 cmd->execute_cmd = ops->execute_unmap; 926 break; 927 case WRITE_SAME_16: 928 sectors = transport_get_sectors_16(cdb); 929 if (!sectors) { 930 pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n"); 931 return TCM_INVALID_CDB_FIELD; 932 } 933 934 size = sbc_get_size(cmd, 1); 935 cmd->t_task_lba = get_unaligned_be64(&cdb[2]); 936 937 ret = sbc_setup_write_same(cmd, &cdb[1], ops); 938 if (ret) 939 return ret; 940 break; 941 case WRITE_SAME: 942 sectors = transport_get_sectors_10(cdb); 943 if (!sectors) { 944 pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n"); 945 return TCM_INVALID_CDB_FIELD; 946 } 947 948 size = sbc_get_size(cmd, 1); 949 cmd->t_task_lba = get_unaligned_be32(&cdb[2]); 950 951 /* 952 * Follow sbcr26 with WRITE_SAME (10) and check for the existence 953 * of byte 1 bit 3 UNMAP instead of original reserved field 954 */ 955 ret = sbc_setup_write_same(cmd, &cdb[1], ops); 956 if (ret) 957 return ret; 958 break; 959 case VERIFY: 960 size = 0; 961 sectors = transport_get_sectors_10(cdb); 962 cmd->t_task_lba = transport_lba_32(cdb); 963 cmd->execute_cmd = sbc_emulate_noop; 964 goto check_lba; 965 case REZERO_UNIT: 966 case SEEK_6: 967 case SEEK_10: 968 /* 969 * There are still clients out there which use these old SCSI-2 970 * commands. This mainly happens when running VMs with legacy 971 * guest systems, connected via SCSI command pass-through to 972 * iSCSI targets. Make them happy and return status GOOD. 973 */ 974 size = 0; 975 cmd->execute_cmd = sbc_emulate_noop; 976 break; 977 default: 978 ret = spc_parse_cdb(cmd, &size); 979 if (ret) 980 return ret; 981 } 982 983 /* reject any command that we don't have a handler for */ 984 if (!cmd->execute_cmd) 985 return TCM_UNSUPPORTED_SCSI_OPCODE; 986 987 if (cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) { 988 unsigned long long end_lba; 989 990 if (sectors > dev->dev_attrib.fabric_max_sectors) { 991 printk_ratelimited(KERN_ERR "SCSI OP %02xh with too" 992 " big sectors %u exceeds fabric_max_sectors:" 993 " %u\n", cdb[0], sectors, 994 dev->dev_attrib.fabric_max_sectors); 995 return TCM_INVALID_CDB_FIELD; 996 } 997 if (sectors > dev->dev_attrib.hw_max_sectors) { 998 printk_ratelimited(KERN_ERR "SCSI OP %02xh with too" 999 " big sectors %u exceeds backend hw_max_sectors:" 1000 " %u\n", cdb[0], sectors, 1001 dev->dev_attrib.hw_max_sectors); 1002 return TCM_INVALID_CDB_FIELD; 1003 } 1004 check_lba: 1005 end_lba = dev->transport->get_blocks(dev) + 1; 1006 if (((cmd->t_task_lba + sectors) < cmd->t_task_lba) || 1007 ((cmd->t_task_lba + sectors) > end_lba)) { 1008 pr_err("cmd exceeds last lba %llu " 1009 "(lba %llu, sectors %u)\n", 1010 end_lba, cmd->t_task_lba, sectors); 1011 return TCM_ADDRESS_OUT_OF_RANGE; 1012 } 1013 1014 if (!(cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE)) 1015 size = sbc_get_size(cmd, sectors); 1016 } 1017 1018 return target_cmd_size_check(cmd, size); 1019 } 1020 EXPORT_SYMBOL(sbc_parse_cdb); 1021 1022 u32 sbc_get_device_type(struct se_device *dev) 1023 { 1024 return TYPE_DISK; 1025 } 1026 EXPORT_SYMBOL(sbc_get_device_type); 1027 1028 sense_reason_t 1029 sbc_execute_unmap(struct se_cmd *cmd, 1030 sense_reason_t (*do_unmap_fn)(struct se_cmd *, void *, 1031 sector_t, sector_t), 1032 void *priv) 1033 { 1034 struct se_device *dev = cmd->se_dev; 1035 unsigned char *buf, *ptr = NULL; 1036 sector_t lba; 1037 int size; 1038 u32 range; 1039 sense_reason_t ret = 0; 1040 int dl, bd_dl; 1041 1042 /* We never set ANC_SUP */ 1043 if (cmd->t_task_cdb[1]) 1044 return TCM_INVALID_CDB_FIELD; 1045 1046 if (cmd->data_length == 0) { 1047 target_complete_cmd(cmd, SAM_STAT_GOOD); 1048 return 0; 1049 } 1050 1051 if (cmd->data_length < 8) { 1052 pr_warn("UNMAP parameter list length %u too small\n", 1053 cmd->data_length); 1054 return TCM_PARAMETER_LIST_LENGTH_ERROR; 1055 } 1056 1057 buf = transport_kmap_data_sg(cmd); 1058 if (!buf) 1059 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 1060 1061 dl = get_unaligned_be16(&buf[0]); 1062 bd_dl = get_unaligned_be16(&buf[2]); 1063 1064 size = cmd->data_length - 8; 1065 if (bd_dl > size) 1066 pr_warn("UNMAP parameter list length %u too small, ignoring bd_dl %u\n", 1067 cmd->data_length, bd_dl); 1068 else 1069 size = bd_dl; 1070 1071 if (size / 16 > dev->dev_attrib.max_unmap_block_desc_count) { 1072 ret = TCM_INVALID_PARAMETER_LIST; 1073 goto err; 1074 } 1075 1076 /* First UNMAP block descriptor starts at 8 byte offset */ 1077 ptr = &buf[8]; 1078 pr_debug("UNMAP: Sub: %s Using dl: %u bd_dl: %u size: %u" 1079 " ptr: %p\n", dev->transport->name, dl, bd_dl, size, ptr); 1080 1081 while (size >= 16) { 1082 lba = get_unaligned_be64(&ptr[0]); 1083 range = get_unaligned_be32(&ptr[8]); 1084 pr_debug("UNMAP: Using lba: %llu and range: %u\n", 1085 (unsigned long long)lba, range); 1086 1087 if (range > dev->dev_attrib.max_unmap_lba_count) { 1088 ret = TCM_INVALID_PARAMETER_LIST; 1089 goto err; 1090 } 1091 1092 if (lba + range > dev->transport->get_blocks(dev) + 1) { 1093 ret = TCM_ADDRESS_OUT_OF_RANGE; 1094 goto err; 1095 } 1096 1097 ret = do_unmap_fn(cmd, priv, lba, range); 1098 if (ret) 1099 goto err; 1100 1101 ptr += 16; 1102 size -= 16; 1103 } 1104 1105 err: 1106 transport_kunmap_data_sg(cmd); 1107 if (!ret) 1108 target_complete_cmd(cmd, GOOD); 1109 return ret; 1110 } 1111 EXPORT_SYMBOL(sbc_execute_unmap); 1112 1113 void 1114 sbc_dif_generate(struct se_cmd *cmd) 1115 { 1116 struct se_device *dev = cmd->se_dev; 1117 struct se_dif_v1_tuple *sdt; 1118 struct scatterlist *dsg, *psg = cmd->t_prot_sg; 1119 sector_t sector = cmd->t_task_lba; 1120 void *daddr, *paddr; 1121 int i, j, offset = 0; 1122 1123 for_each_sg(cmd->t_data_sg, dsg, cmd->t_data_nents, i) { 1124 daddr = kmap_atomic(sg_page(dsg)) + dsg->offset; 1125 paddr = kmap_atomic(sg_page(psg)) + psg->offset; 1126 1127 for (j = 0; j < dsg->length; j += dev->dev_attrib.block_size) { 1128 1129 if (offset >= psg->length) { 1130 kunmap_atomic(paddr); 1131 psg = sg_next(psg); 1132 paddr = kmap_atomic(sg_page(psg)) + psg->offset; 1133 offset = 0; 1134 } 1135 1136 sdt = paddr + offset; 1137 sdt->guard_tag = cpu_to_be16(crc_t10dif(daddr + j, 1138 dev->dev_attrib.block_size)); 1139 if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE1_PROT) 1140 sdt->ref_tag = cpu_to_be32(sector & 0xffffffff); 1141 sdt->app_tag = 0; 1142 1143 pr_debug("DIF WRITE INSERT sector: %llu guard_tag: 0x%04x" 1144 " app_tag: 0x%04x ref_tag: %u\n", 1145 (unsigned long long)sector, sdt->guard_tag, 1146 sdt->app_tag, be32_to_cpu(sdt->ref_tag)); 1147 1148 sector++; 1149 offset += sizeof(struct se_dif_v1_tuple); 1150 } 1151 1152 kunmap_atomic(paddr); 1153 kunmap_atomic(daddr); 1154 } 1155 } 1156 1157 static sense_reason_t 1158 sbc_dif_v1_verify(struct se_device *dev, struct se_dif_v1_tuple *sdt, 1159 const void *p, sector_t sector, unsigned int ei_lba) 1160 { 1161 int block_size = dev->dev_attrib.block_size; 1162 __be16 csum; 1163 1164 csum = cpu_to_be16(crc_t10dif(p, block_size)); 1165 1166 if (sdt->guard_tag != csum) { 1167 pr_err("DIFv1 checksum failed on sector %llu guard tag 0x%04x" 1168 " csum 0x%04x\n", (unsigned long long)sector, 1169 be16_to_cpu(sdt->guard_tag), be16_to_cpu(csum)); 1170 return TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED; 1171 } 1172 1173 if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE1_PROT && 1174 be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) { 1175 pr_err("DIFv1 Type 1 reference failed on sector: %llu tag: 0x%08x" 1176 " sector MSB: 0x%08x\n", (unsigned long long)sector, 1177 be32_to_cpu(sdt->ref_tag), (u32)(sector & 0xffffffff)); 1178 return TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED; 1179 } 1180 1181 if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE2_PROT && 1182 be32_to_cpu(sdt->ref_tag) != ei_lba) { 1183 pr_err("DIFv1 Type 2 reference failed on sector: %llu tag: 0x%08x" 1184 " ei_lba: 0x%08x\n", (unsigned long long)sector, 1185 be32_to_cpu(sdt->ref_tag), ei_lba); 1186 return TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED; 1187 } 1188 1189 return 0; 1190 } 1191 1192 static void 1193 sbc_dif_copy_prot(struct se_cmd *cmd, unsigned int sectors, bool read, 1194 struct scatterlist *sg, int sg_off) 1195 { 1196 struct se_device *dev = cmd->se_dev; 1197 struct scatterlist *psg; 1198 void *paddr, *addr; 1199 unsigned int i, len, left; 1200 unsigned int offset = sg_off; 1201 1202 left = sectors * dev->prot_length; 1203 1204 for_each_sg(cmd->t_prot_sg, psg, cmd->t_prot_nents, i) { 1205 unsigned int psg_len, copied = 0; 1206 1207 paddr = kmap_atomic(sg_page(psg)) + psg->offset; 1208 psg_len = min(left, psg->length); 1209 while (psg_len) { 1210 len = min(psg_len, sg->length - offset); 1211 addr = kmap_atomic(sg_page(sg)) + sg->offset + offset; 1212 1213 if (read) 1214 memcpy(paddr + copied, addr, len); 1215 else 1216 memcpy(addr, paddr + copied, len); 1217 1218 left -= len; 1219 offset += len; 1220 copied += len; 1221 psg_len -= len; 1222 1223 if (offset >= sg->length) { 1224 sg = sg_next(sg); 1225 offset = 0; 1226 } 1227 kunmap_atomic(addr); 1228 } 1229 kunmap_atomic(paddr); 1230 } 1231 } 1232 1233 sense_reason_t 1234 sbc_dif_verify_write(struct se_cmd *cmd, sector_t start, unsigned int sectors, 1235 unsigned int ei_lba, struct scatterlist *sg, int sg_off) 1236 { 1237 struct se_device *dev = cmd->se_dev; 1238 struct se_dif_v1_tuple *sdt; 1239 struct scatterlist *dsg, *psg = cmd->t_prot_sg; 1240 sector_t sector = start; 1241 void *daddr, *paddr; 1242 int i, j, offset = 0; 1243 sense_reason_t rc; 1244 1245 for_each_sg(cmd->t_data_sg, dsg, cmd->t_data_nents, i) { 1246 daddr = kmap_atomic(sg_page(dsg)) + dsg->offset; 1247 paddr = kmap_atomic(sg_page(psg)) + psg->offset; 1248 1249 for (j = 0; j < dsg->length; j += dev->dev_attrib.block_size) { 1250 1251 if (offset >= psg->length) { 1252 kunmap_atomic(paddr); 1253 psg = sg_next(psg); 1254 paddr = kmap_atomic(sg_page(psg)) + psg->offset; 1255 offset = 0; 1256 } 1257 1258 sdt = paddr + offset; 1259 1260 pr_debug("DIF WRITE sector: %llu guard_tag: 0x%04x" 1261 " app_tag: 0x%04x ref_tag: %u\n", 1262 (unsigned long long)sector, sdt->guard_tag, 1263 sdt->app_tag, be32_to_cpu(sdt->ref_tag)); 1264 1265 rc = sbc_dif_v1_verify(dev, sdt, daddr + j, sector, 1266 ei_lba); 1267 if (rc) { 1268 kunmap_atomic(paddr); 1269 kunmap_atomic(daddr); 1270 cmd->bad_sector = sector; 1271 return rc; 1272 } 1273 1274 sector++; 1275 ei_lba++; 1276 offset += sizeof(struct se_dif_v1_tuple); 1277 } 1278 1279 kunmap_atomic(paddr); 1280 kunmap_atomic(daddr); 1281 } 1282 sbc_dif_copy_prot(cmd, sectors, false, sg, sg_off); 1283 1284 return 0; 1285 } 1286 EXPORT_SYMBOL(sbc_dif_verify_write); 1287 1288 static sense_reason_t 1289 __sbc_dif_verify_read(struct se_cmd *cmd, sector_t start, unsigned int sectors, 1290 unsigned int ei_lba, struct scatterlist *sg, int sg_off) 1291 { 1292 struct se_device *dev = cmd->se_dev; 1293 struct se_dif_v1_tuple *sdt; 1294 struct scatterlist *dsg, *psg = sg; 1295 sector_t sector = start; 1296 void *daddr, *paddr; 1297 int i, j, offset = sg_off; 1298 sense_reason_t rc; 1299 1300 for_each_sg(cmd->t_data_sg, dsg, cmd->t_data_nents, i) { 1301 daddr = kmap_atomic(sg_page(dsg)) + dsg->offset; 1302 paddr = kmap_atomic(sg_page(psg)) + sg->offset; 1303 1304 for (j = 0; j < dsg->length; j += dev->dev_attrib.block_size) { 1305 1306 if (offset >= psg->length) { 1307 kunmap_atomic(paddr); 1308 psg = sg_next(psg); 1309 paddr = kmap_atomic(sg_page(psg)) + psg->offset; 1310 offset = 0; 1311 } 1312 1313 sdt = paddr + offset; 1314 1315 pr_debug("DIF READ sector: %llu guard_tag: 0x%04x" 1316 " app_tag: 0x%04x ref_tag: %u\n", 1317 (unsigned long long)sector, sdt->guard_tag, 1318 sdt->app_tag, be32_to_cpu(sdt->ref_tag)); 1319 1320 if (sdt->app_tag == cpu_to_be16(0xffff)) { 1321 sector++; 1322 offset += sizeof(struct se_dif_v1_tuple); 1323 continue; 1324 } 1325 1326 rc = sbc_dif_v1_verify(dev, sdt, daddr + j, sector, 1327 ei_lba); 1328 if (rc) { 1329 kunmap_atomic(paddr); 1330 kunmap_atomic(daddr); 1331 cmd->bad_sector = sector; 1332 return rc; 1333 } 1334 1335 sector++; 1336 ei_lba++; 1337 offset += sizeof(struct se_dif_v1_tuple); 1338 } 1339 1340 kunmap_atomic(paddr); 1341 kunmap_atomic(daddr); 1342 } 1343 1344 return 0; 1345 } 1346 1347 sense_reason_t 1348 sbc_dif_read_strip(struct se_cmd *cmd) 1349 { 1350 struct se_device *dev = cmd->se_dev; 1351 u32 sectors = cmd->prot_length / dev->prot_length; 1352 1353 return __sbc_dif_verify_read(cmd, cmd->t_task_lba, sectors, 0, 1354 cmd->t_prot_sg, 0); 1355 } 1356 1357 sense_reason_t 1358 sbc_dif_verify_read(struct se_cmd *cmd, sector_t start, unsigned int sectors, 1359 unsigned int ei_lba, struct scatterlist *sg, int sg_off) 1360 { 1361 sense_reason_t rc; 1362 1363 rc = __sbc_dif_verify_read(cmd, start, sectors, ei_lba, sg, sg_off); 1364 if (rc) 1365 return rc; 1366 1367 sbc_dif_copy_prot(cmd, sectors, true, sg, sg_off); 1368 return 0; 1369 } 1370 EXPORT_SYMBOL(sbc_dif_verify_read); 1371