1 /* 2 * SCSI Block Commands (SBC) parsing and emulation. 3 * 4 * (c) Copyright 2002-2013 Datera, Inc. 5 * 6 * Nicholas A. Bellinger <nab@kernel.org> 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License as published by 10 * the Free Software Foundation; either version 2 of the License, or 11 * (at your option) any later version. 12 * 13 * This program is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * GNU General Public License for more details. 17 * 18 * You should have received a copy of the GNU General Public License 19 * along with this program; if not, write to the Free Software 20 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 21 */ 22 23 #include <linux/kernel.h> 24 #include <linux/module.h> 25 #include <linux/ratelimit.h> 26 #include <linux/crc-t10dif.h> 27 #include <asm/unaligned.h> 28 #include <scsi/scsi.h> 29 #include <scsi/scsi_tcq.h> 30 31 #include <target/target_core_base.h> 32 #include <target/target_core_backend.h> 33 #include <target/target_core_fabric.h> 34 35 #include "target_core_internal.h" 36 #include "target_core_ua.h" 37 #include "target_core_alua.h" 38 39 static sense_reason_t 40 sbc_emulate_readcapacity(struct se_cmd *cmd) 41 { 42 struct se_device *dev = cmd->se_dev; 43 unsigned char *cdb = cmd->t_task_cdb; 44 unsigned long long blocks_long = dev->transport->get_blocks(dev); 45 unsigned char *rbuf; 46 unsigned char buf[8]; 47 u32 blocks; 48 49 /* 50 * SBC-2 says: 51 * If the PMI bit is set to zero and the LOGICAL BLOCK 52 * ADDRESS field is not set to zero, the device server shall 53 * terminate the command with CHECK CONDITION status with 54 * the sense key set to ILLEGAL REQUEST and the additional 55 * sense code set to INVALID FIELD IN CDB. 56 * 57 * In SBC-3, these fields are obsolete, but some SCSI 58 * compliance tests actually check this, so we might as well 59 * follow SBC-2. 60 */ 61 if (!(cdb[8] & 1) && !!(cdb[2] | cdb[3] | cdb[4] | cdb[5])) 62 return TCM_INVALID_CDB_FIELD; 63 64 if (blocks_long >= 0x00000000ffffffff) 65 blocks = 0xffffffff; 66 else 67 blocks = (u32)blocks_long; 68 69 buf[0] = (blocks >> 24) & 0xff; 70 buf[1] = (blocks >> 16) & 0xff; 71 buf[2] = (blocks >> 8) & 0xff; 72 buf[3] = blocks & 0xff; 73 buf[4] = (dev->dev_attrib.block_size >> 24) & 0xff; 74 buf[5] = (dev->dev_attrib.block_size >> 16) & 0xff; 75 buf[6] = (dev->dev_attrib.block_size >> 8) & 0xff; 76 buf[7] = dev->dev_attrib.block_size & 0xff; 77 78 rbuf = transport_kmap_data_sg(cmd); 79 if (rbuf) { 80 memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length)); 81 transport_kunmap_data_sg(cmd); 82 } 83 84 target_complete_cmd_with_length(cmd, GOOD, 8); 85 return 0; 86 } 87 88 static sense_reason_t 89 sbc_emulate_readcapacity_16(struct se_cmd *cmd) 90 { 91 struct se_device *dev = cmd->se_dev; 92 struct se_session *sess = cmd->se_sess; 93 unsigned char *rbuf; 94 unsigned char buf[32]; 95 unsigned long long blocks = dev->transport->get_blocks(dev); 96 97 memset(buf, 0, sizeof(buf)); 98 buf[0] = (blocks >> 56) & 0xff; 99 buf[1] = (blocks >> 48) & 0xff; 100 buf[2] = (blocks >> 40) & 0xff; 101 buf[3] = (blocks >> 32) & 0xff; 102 buf[4] = (blocks >> 24) & 0xff; 103 buf[5] = (blocks >> 16) & 0xff; 104 buf[6] = (blocks >> 8) & 0xff; 105 buf[7] = blocks & 0xff; 106 buf[8] = (dev->dev_attrib.block_size >> 24) & 0xff; 107 buf[9] = (dev->dev_attrib.block_size >> 16) & 0xff; 108 buf[10] = (dev->dev_attrib.block_size >> 8) & 0xff; 109 buf[11] = dev->dev_attrib.block_size & 0xff; 110 /* 111 * Set P_TYPE and PROT_EN bits for DIF support 112 */ 113 if (sess->sup_prot_ops & (TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS)) { 114 if (dev->dev_attrib.pi_prot_type) 115 buf[12] = (dev->dev_attrib.pi_prot_type - 1) << 1 | 0x1; 116 } 117 118 if (dev->transport->get_lbppbe) 119 buf[13] = dev->transport->get_lbppbe(dev) & 0x0f; 120 121 if (dev->transport->get_alignment_offset_lbas) { 122 u16 lalba = dev->transport->get_alignment_offset_lbas(dev); 123 buf[14] = (lalba >> 8) & 0x3f; 124 buf[15] = lalba & 0xff; 125 } 126 127 /* 128 * Set Thin Provisioning Enable bit following sbc3r22 in section 129 * READ CAPACITY (16) byte 14 if emulate_tpu or emulate_tpws is enabled. 130 */ 131 if (dev->dev_attrib.emulate_tpu || dev->dev_attrib.emulate_tpws) 132 buf[14] |= 0x80; 133 134 rbuf = transport_kmap_data_sg(cmd); 135 if (rbuf) { 136 memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length)); 137 transport_kunmap_data_sg(cmd); 138 } 139 140 target_complete_cmd_with_length(cmd, GOOD, 32); 141 return 0; 142 } 143 144 sector_t sbc_get_write_same_sectors(struct se_cmd *cmd) 145 { 146 u32 num_blocks; 147 148 if (cmd->t_task_cdb[0] == WRITE_SAME) 149 num_blocks = get_unaligned_be16(&cmd->t_task_cdb[7]); 150 else if (cmd->t_task_cdb[0] == WRITE_SAME_16) 151 num_blocks = get_unaligned_be32(&cmd->t_task_cdb[10]); 152 else /* WRITE_SAME_32 via VARIABLE_LENGTH_CMD */ 153 num_blocks = get_unaligned_be32(&cmd->t_task_cdb[28]); 154 155 /* 156 * Use the explicit range when non zero is supplied, otherwise calculate 157 * the remaining range based on ->get_blocks() - starting LBA. 158 */ 159 if (num_blocks) 160 return num_blocks; 161 162 return cmd->se_dev->transport->get_blocks(cmd->se_dev) - 163 cmd->t_task_lba + 1; 164 } 165 EXPORT_SYMBOL(sbc_get_write_same_sectors); 166 167 static sense_reason_t 168 sbc_emulate_noop(struct se_cmd *cmd) 169 { 170 target_complete_cmd(cmd, GOOD); 171 return 0; 172 } 173 174 static inline u32 sbc_get_size(struct se_cmd *cmd, u32 sectors) 175 { 176 return cmd->se_dev->dev_attrib.block_size * sectors; 177 } 178 179 static inline u32 transport_get_sectors_6(unsigned char *cdb) 180 { 181 /* 182 * Use 8-bit sector value. SBC-3 says: 183 * 184 * A TRANSFER LENGTH field set to zero specifies that 256 185 * logical blocks shall be written. Any other value 186 * specifies the number of logical blocks that shall be 187 * written. 188 */ 189 return cdb[4] ? : 256; 190 } 191 192 static inline u32 transport_get_sectors_10(unsigned char *cdb) 193 { 194 return (u32)(cdb[7] << 8) + cdb[8]; 195 } 196 197 static inline u32 transport_get_sectors_12(unsigned char *cdb) 198 { 199 return (u32)(cdb[6] << 24) + (cdb[7] << 16) + (cdb[8] << 8) + cdb[9]; 200 } 201 202 static inline u32 transport_get_sectors_16(unsigned char *cdb) 203 { 204 return (u32)(cdb[10] << 24) + (cdb[11] << 16) + 205 (cdb[12] << 8) + cdb[13]; 206 } 207 208 /* 209 * Used for VARIABLE_LENGTH_CDB WRITE_32 and READ_32 variants 210 */ 211 static inline u32 transport_get_sectors_32(unsigned char *cdb) 212 { 213 return (u32)(cdb[28] << 24) + (cdb[29] << 16) + 214 (cdb[30] << 8) + cdb[31]; 215 216 } 217 218 static inline u32 transport_lba_21(unsigned char *cdb) 219 { 220 return ((cdb[1] & 0x1f) << 16) | (cdb[2] << 8) | cdb[3]; 221 } 222 223 static inline u32 transport_lba_32(unsigned char *cdb) 224 { 225 return (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5]; 226 } 227 228 static inline unsigned long long transport_lba_64(unsigned char *cdb) 229 { 230 unsigned int __v1, __v2; 231 232 __v1 = (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5]; 233 __v2 = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9]; 234 235 return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32; 236 } 237 238 /* 239 * For VARIABLE_LENGTH_CDB w/ 32 byte extended CDBs 240 */ 241 static inline unsigned long long transport_lba_64_ext(unsigned char *cdb) 242 { 243 unsigned int __v1, __v2; 244 245 __v1 = (cdb[12] << 24) | (cdb[13] << 16) | (cdb[14] << 8) | cdb[15]; 246 __v2 = (cdb[16] << 24) | (cdb[17] << 16) | (cdb[18] << 8) | cdb[19]; 247 248 return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32; 249 } 250 251 static sense_reason_t 252 sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *ops) 253 { 254 struct se_device *dev = cmd->se_dev; 255 sector_t end_lba = dev->transport->get_blocks(dev) + 1; 256 unsigned int sectors = sbc_get_write_same_sectors(cmd); 257 258 if ((flags[0] & 0x04) || (flags[0] & 0x02)) { 259 pr_err("WRITE_SAME PBDATA and LBDATA" 260 " bits not supported for Block Discard" 261 " Emulation\n"); 262 return TCM_UNSUPPORTED_SCSI_OPCODE; 263 } 264 if (sectors > cmd->se_dev->dev_attrib.max_write_same_len) { 265 pr_warn("WRITE_SAME sectors: %u exceeds max_write_same_len: %u\n", 266 sectors, cmd->se_dev->dev_attrib.max_write_same_len); 267 return TCM_INVALID_CDB_FIELD; 268 } 269 /* 270 * Sanity check for LBA wrap and request past end of device. 271 */ 272 if (((cmd->t_task_lba + sectors) < cmd->t_task_lba) || 273 ((cmd->t_task_lba + sectors) > end_lba)) { 274 pr_err("WRITE_SAME exceeds last lba %llu (lba %llu, sectors %u)\n", 275 (unsigned long long)end_lba, cmd->t_task_lba, sectors); 276 return TCM_ADDRESS_OUT_OF_RANGE; 277 } 278 279 /* We always have ANC_SUP == 0 so setting ANCHOR is always an error */ 280 if (flags[0] & 0x10) { 281 pr_warn("WRITE SAME with ANCHOR not supported\n"); 282 return TCM_INVALID_CDB_FIELD; 283 } 284 /* 285 * Special case for WRITE_SAME w/ UNMAP=1 that ends up getting 286 * translated into block discard requests within backend code. 287 */ 288 if (flags[0] & 0x08) { 289 if (!ops->execute_write_same_unmap) 290 return TCM_UNSUPPORTED_SCSI_OPCODE; 291 292 cmd->execute_cmd = ops->execute_write_same_unmap; 293 return 0; 294 } 295 if (!ops->execute_write_same) 296 return TCM_UNSUPPORTED_SCSI_OPCODE; 297 298 cmd->execute_cmd = ops->execute_write_same; 299 return 0; 300 } 301 302 static sense_reason_t xdreadwrite_callback(struct se_cmd *cmd) 303 { 304 unsigned char *buf, *addr; 305 struct scatterlist *sg; 306 unsigned int offset; 307 sense_reason_t ret = TCM_NO_SENSE; 308 int i, count; 309 /* 310 * From sbc3r22.pdf section 5.48 XDWRITEREAD (10) command 311 * 312 * 1) read the specified logical block(s); 313 * 2) transfer logical blocks from the data-out buffer; 314 * 3) XOR the logical blocks transferred from the data-out buffer with 315 * the logical blocks read, storing the resulting XOR data in a buffer; 316 * 4) if the DISABLE WRITE bit is set to zero, then write the logical 317 * blocks transferred from the data-out buffer; and 318 * 5) transfer the resulting XOR data to the data-in buffer. 319 */ 320 buf = kmalloc(cmd->data_length, GFP_KERNEL); 321 if (!buf) { 322 pr_err("Unable to allocate xor_callback buf\n"); 323 return TCM_OUT_OF_RESOURCES; 324 } 325 /* 326 * Copy the scatterlist WRITE buffer located at cmd->t_data_sg 327 * into the locally allocated *buf 328 */ 329 sg_copy_to_buffer(cmd->t_data_sg, 330 cmd->t_data_nents, 331 buf, 332 cmd->data_length); 333 334 /* 335 * Now perform the XOR against the BIDI read memory located at 336 * cmd->t_mem_bidi_list 337 */ 338 339 offset = 0; 340 for_each_sg(cmd->t_bidi_data_sg, sg, cmd->t_bidi_data_nents, count) { 341 addr = kmap_atomic(sg_page(sg)); 342 if (!addr) { 343 ret = TCM_OUT_OF_RESOURCES; 344 goto out; 345 } 346 347 for (i = 0; i < sg->length; i++) 348 *(addr + sg->offset + i) ^= *(buf + offset + i); 349 350 offset += sg->length; 351 kunmap_atomic(addr); 352 } 353 354 out: 355 kfree(buf); 356 return ret; 357 } 358 359 static sense_reason_t 360 sbc_execute_rw(struct se_cmd *cmd) 361 { 362 return cmd->execute_rw(cmd, cmd->t_data_sg, cmd->t_data_nents, 363 cmd->data_direction); 364 } 365 366 static sense_reason_t compare_and_write_post(struct se_cmd *cmd) 367 { 368 struct se_device *dev = cmd->se_dev; 369 370 /* 371 * Only set SCF_COMPARE_AND_WRITE_POST to force a response fall-through 372 * within target_complete_ok_work() if the command was successfully 373 * sent to the backend driver. 374 */ 375 spin_lock_irq(&cmd->t_state_lock); 376 if ((cmd->transport_state & CMD_T_SENT) && !cmd->scsi_status) 377 cmd->se_cmd_flags |= SCF_COMPARE_AND_WRITE_POST; 378 spin_unlock_irq(&cmd->t_state_lock); 379 380 /* 381 * Unlock ->caw_sem originally obtained during sbc_compare_and_write() 382 * before the original READ I/O submission. 383 */ 384 up(&dev->caw_sem); 385 386 return TCM_NO_SENSE; 387 } 388 389 static sense_reason_t compare_and_write_callback(struct se_cmd *cmd) 390 { 391 struct se_device *dev = cmd->se_dev; 392 struct scatterlist *write_sg = NULL, *sg; 393 unsigned char *buf = NULL, *addr; 394 struct sg_mapping_iter m; 395 unsigned int offset = 0, len; 396 unsigned int nlbas = cmd->t_task_nolb; 397 unsigned int block_size = dev->dev_attrib.block_size; 398 unsigned int compare_len = (nlbas * block_size); 399 sense_reason_t ret = TCM_NO_SENSE; 400 int rc, i; 401 402 /* 403 * Handle early failure in transport_generic_request_failure(), 404 * which will not have taken ->caw_mutex yet.. 405 */ 406 if (!cmd->t_data_sg || !cmd->t_bidi_data_sg) 407 return TCM_NO_SENSE; 408 /* 409 * Immediately exit + release dev->caw_sem if command has already 410 * been failed with a non-zero SCSI status. 411 */ 412 if (cmd->scsi_status) { 413 pr_err("compare_and_write_callback: non zero scsi_status:" 414 " 0x%02x\n", cmd->scsi_status); 415 goto out; 416 } 417 418 buf = kzalloc(cmd->data_length, GFP_KERNEL); 419 if (!buf) { 420 pr_err("Unable to allocate compare_and_write buf\n"); 421 ret = TCM_OUT_OF_RESOURCES; 422 goto out; 423 } 424 425 write_sg = kmalloc(sizeof(struct scatterlist) * cmd->t_data_nents, 426 GFP_KERNEL); 427 if (!write_sg) { 428 pr_err("Unable to allocate compare_and_write sg\n"); 429 ret = TCM_OUT_OF_RESOURCES; 430 goto out; 431 } 432 sg_init_table(write_sg, cmd->t_data_nents); 433 /* 434 * Setup verify and write data payloads from total NumberLBAs. 435 */ 436 rc = sg_copy_to_buffer(cmd->t_data_sg, cmd->t_data_nents, buf, 437 cmd->data_length); 438 if (!rc) { 439 pr_err("sg_copy_to_buffer() failed for compare_and_write\n"); 440 ret = TCM_OUT_OF_RESOURCES; 441 goto out; 442 } 443 /* 444 * Compare against SCSI READ payload against verify payload 445 */ 446 for_each_sg(cmd->t_bidi_data_sg, sg, cmd->t_bidi_data_nents, i) { 447 addr = (unsigned char *)kmap_atomic(sg_page(sg)); 448 if (!addr) { 449 ret = TCM_OUT_OF_RESOURCES; 450 goto out; 451 } 452 453 len = min(sg->length, compare_len); 454 455 if (memcmp(addr, buf + offset, len)) { 456 pr_warn("Detected MISCOMPARE for addr: %p buf: %p\n", 457 addr, buf + offset); 458 kunmap_atomic(addr); 459 goto miscompare; 460 } 461 kunmap_atomic(addr); 462 463 offset += len; 464 compare_len -= len; 465 if (!compare_len) 466 break; 467 } 468 469 i = 0; 470 len = cmd->t_task_nolb * block_size; 471 sg_miter_start(&m, cmd->t_data_sg, cmd->t_data_nents, SG_MITER_TO_SG); 472 /* 473 * Currently assumes NoLB=1 and SGLs are PAGE_SIZE.. 474 */ 475 while (len) { 476 sg_miter_next(&m); 477 478 if (block_size < PAGE_SIZE) { 479 sg_set_page(&write_sg[i], m.page, block_size, 480 block_size); 481 } else { 482 sg_miter_next(&m); 483 sg_set_page(&write_sg[i], m.page, block_size, 484 0); 485 } 486 len -= block_size; 487 i++; 488 } 489 sg_miter_stop(&m); 490 /* 491 * Save the original SGL + nents values before updating to new 492 * assignments, to be released in transport_free_pages() -> 493 * transport_reset_sgl_orig() 494 */ 495 cmd->t_data_sg_orig = cmd->t_data_sg; 496 cmd->t_data_sg = write_sg; 497 cmd->t_data_nents_orig = cmd->t_data_nents; 498 cmd->t_data_nents = 1; 499 500 cmd->sam_task_attr = TCM_HEAD_TAG; 501 cmd->transport_complete_callback = compare_and_write_post; 502 /* 503 * Now reset ->execute_cmd() to the normal sbc_execute_rw() handler 504 * for submitting the adjusted SGL to write instance user-data. 505 */ 506 cmd->execute_cmd = sbc_execute_rw; 507 508 spin_lock_irq(&cmd->t_state_lock); 509 cmd->t_state = TRANSPORT_PROCESSING; 510 cmd->transport_state |= CMD_T_ACTIVE|CMD_T_BUSY|CMD_T_SENT; 511 spin_unlock_irq(&cmd->t_state_lock); 512 513 __target_execute_cmd(cmd); 514 515 kfree(buf); 516 return ret; 517 518 miscompare: 519 pr_warn("Target/%s: Send MISCOMPARE check condition and sense\n", 520 dev->transport->name); 521 ret = TCM_MISCOMPARE_VERIFY; 522 out: 523 /* 524 * In the MISCOMPARE or failure case, unlock ->caw_sem obtained in 525 * sbc_compare_and_write() before the original READ I/O submission. 526 */ 527 up(&dev->caw_sem); 528 kfree(write_sg); 529 kfree(buf); 530 return ret; 531 } 532 533 static sense_reason_t 534 sbc_compare_and_write(struct se_cmd *cmd) 535 { 536 struct se_device *dev = cmd->se_dev; 537 sense_reason_t ret; 538 int rc; 539 /* 540 * Submit the READ first for COMPARE_AND_WRITE to perform the 541 * comparision using SGLs at cmd->t_bidi_data_sg.. 542 */ 543 rc = down_interruptible(&dev->caw_sem); 544 if ((rc != 0) || signal_pending(current)) { 545 cmd->transport_complete_callback = NULL; 546 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 547 } 548 /* 549 * Reset cmd->data_length to individual block_size in order to not 550 * confuse backend drivers that depend on this value matching the 551 * size of the I/O being submitted. 552 */ 553 cmd->data_length = cmd->t_task_nolb * dev->dev_attrib.block_size; 554 555 ret = cmd->execute_rw(cmd, cmd->t_bidi_data_sg, cmd->t_bidi_data_nents, 556 DMA_FROM_DEVICE); 557 if (ret) { 558 cmd->transport_complete_callback = NULL; 559 up(&dev->caw_sem); 560 return ret; 561 } 562 /* 563 * Unlock of dev->caw_sem to occur in compare_and_write_callback() 564 * upon MISCOMPARE, or in compare_and_write_done() upon completion 565 * of WRITE instance user-data. 566 */ 567 return TCM_NO_SENSE; 568 } 569 570 static int 571 sbc_set_prot_op_checks(u8 protect, enum target_prot_type prot_type, 572 bool is_write, struct se_cmd *cmd) 573 { 574 if (is_write) { 575 cmd->prot_op = protect ? TARGET_PROT_DOUT_PASS : 576 TARGET_PROT_DOUT_INSERT; 577 switch (protect) { 578 case 0x0: 579 case 0x3: 580 cmd->prot_checks = 0; 581 break; 582 case 0x1: 583 case 0x5: 584 cmd->prot_checks = TARGET_DIF_CHECK_GUARD; 585 if (prot_type == TARGET_DIF_TYPE1_PROT) 586 cmd->prot_checks |= TARGET_DIF_CHECK_REFTAG; 587 break; 588 case 0x2: 589 if (prot_type == TARGET_DIF_TYPE1_PROT) 590 cmd->prot_checks = TARGET_DIF_CHECK_REFTAG; 591 break; 592 case 0x4: 593 cmd->prot_checks = TARGET_DIF_CHECK_GUARD; 594 break; 595 default: 596 pr_err("Unsupported protect field %d\n", protect); 597 return -EINVAL; 598 } 599 } else { 600 cmd->prot_op = protect ? TARGET_PROT_DIN_PASS : 601 TARGET_PROT_DIN_STRIP; 602 switch (protect) { 603 case 0x0: 604 case 0x1: 605 case 0x5: 606 cmd->prot_checks = TARGET_DIF_CHECK_GUARD; 607 if (prot_type == TARGET_DIF_TYPE1_PROT) 608 cmd->prot_checks |= TARGET_DIF_CHECK_REFTAG; 609 break; 610 case 0x2: 611 if (prot_type == TARGET_DIF_TYPE1_PROT) 612 cmd->prot_checks = TARGET_DIF_CHECK_REFTAG; 613 break; 614 case 0x3: 615 cmd->prot_checks = 0; 616 break; 617 case 0x4: 618 cmd->prot_checks = TARGET_DIF_CHECK_GUARD; 619 break; 620 default: 621 pr_err("Unsupported protect field %d\n", protect); 622 return -EINVAL; 623 } 624 } 625 626 return 0; 627 } 628 629 static sense_reason_t 630 sbc_check_prot(struct se_device *dev, struct se_cmd *cmd, unsigned char *cdb, 631 u32 sectors, bool is_write) 632 { 633 u8 protect = cdb[1] >> 5; 634 635 if (!cmd->t_prot_sg || !cmd->t_prot_nents) { 636 if (protect && !dev->dev_attrib.pi_prot_type) { 637 pr_err("CDB contains protect bit, but device does not" 638 " advertise PROTECT=1 feature bit\n"); 639 return TCM_INVALID_CDB_FIELD; 640 } 641 if (cmd->prot_pto) 642 return TCM_NO_SENSE; 643 } 644 645 switch (dev->dev_attrib.pi_prot_type) { 646 case TARGET_DIF_TYPE3_PROT: 647 cmd->reftag_seed = 0xffffffff; 648 break; 649 case TARGET_DIF_TYPE2_PROT: 650 if (protect) 651 return TCM_INVALID_CDB_FIELD; 652 653 cmd->reftag_seed = cmd->t_task_lba; 654 break; 655 case TARGET_DIF_TYPE1_PROT: 656 cmd->reftag_seed = cmd->t_task_lba; 657 break; 658 case TARGET_DIF_TYPE0_PROT: 659 default: 660 return TCM_NO_SENSE; 661 } 662 663 if (sbc_set_prot_op_checks(protect, dev->dev_attrib.pi_prot_type, 664 is_write, cmd)) 665 return TCM_INVALID_CDB_FIELD; 666 667 cmd->prot_type = dev->dev_attrib.pi_prot_type; 668 cmd->prot_length = dev->prot_length * sectors; 669 670 /** 671 * In case protection information exists over the wire 672 * we modify command data length to describe pure data. 673 * The actual transfer length is data length + protection 674 * length 675 **/ 676 if (protect) 677 cmd->data_length = sectors * dev->dev_attrib.block_size; 678 679 pr_debug("%s: prot_type=%d, data_length=%d, prot_length=%d " 680 "prot_op=%d prot_checks=%d\n", 681 __func__, cmd->prot_type, cmd->data_length, cmd->prot_length, 682 cmd->prot_op, cmd->prot_checks); 683 684 return TCM_NO_SENSE; 685 } 686 687 sense_reason_t 688 sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops) 689 { 690 struct se_device *dev = cmd->se_dev; 691 unsigned char *cdb = cmd->t_task_cdb; 692 unsigned int size; 693 u32 sectors = 0; 694 sense_reason_t ret; 695 696 switch (cdb[0]) { 697 case READ_6: 698 sectors = transport_get_sectors_6(cdb); 699 cmd->t_task_lba = transport_lba_21(cdb); 700 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 701 cmd->execute_rw = ops->execute_rw; 702 cmd->execute_cmd = sbc_execute_rw; 703 break; 704 case READ_10: 705 sectors = transport_get_sectors_10(cdb); 706 cmd->t_task_lba = transport_lba_32(cdb); 707 708 ret = sbc_check_prot(dev, cmd, cdb, sectors, false); 709 if (ret) 710 return ret; 711 712 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 713 cmd->execute_rw = ops->execute_rw; 714 cmd->execute_cmd = sbc_execute_rw; 715 break; 716 case READ_12: 717 sectors = transport_get_sectors_12(cdb); 718 cmd->t_task_lba = transport_lba_32(cdb); 719 720 ret = sbc_check_prot(dev, cmd, cdb, sectors, false); 721 if (ret) 722 return ret; 723 724 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 725 cmd->execute_rw = ops->execute_rw; 726 cmd->execute_cmd = sbc_execute_rw; 727 break; 728 case READ_16: 729 sectors = transport_get_sectors_16(cdb); 730 cmd->t_task_lba = transport_lba_64(cdb); 731 732 ret = sbc_check_prot(dev, cmd, cdb, sectors, false); 733 if (ret) 734 return ret; 735 736 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 737 cmd->execute_rw = ops->execute_rw; 738 cmd->execute_cmd = sbc_execute_rw; 739 break; 740 case WRITE_6: 741 sectors = transport_get_sectors_6(cdb); 742 cmd->t_task_lba = transport_lba_21(cdb); 743 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 744 cmd->execute_rw = ops->execute_rw; 745 cmd->execute_cmd = sbc_execute_rw; 746 break; 747 case WRITE_10: 748 case WRITE_VERIFY: 749 sectors = transport_get_sectors_10(cdb); 750 cmd->t_task_lba = transport_lba_32(cdb); 751 752 ret = sbc_check_prot(dev, cmd, cdb, sectors, true); 753 if (ret) 754 return ret; 755 756 if (cdb[1] & 0x8) 757 cmd->se_cmd_flags |= SCF_FUA; 758 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 759 cmd->execute_rw = ops->execute_rw; 760 cmd->execute_cmd = sbc_execute_rw; 761 break; 762 case WRITE_12: 763 sectors = transport_get_sectors_12(cdb); 764 cmd->t_task_lba = transport_lba_32(cdb); 765 766 ret = sbc_check_prot(dev, cmd, cdb, sectors, true); 767 if (ret) 768 return ret; 769 770 if (cdb[1] & 0x8) 771 cmd->se_cmd_flags |= SCF_FUA; 772 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 773 cmd->execute_rw = ops->execute_rw; 774 cmd->execute_cmd = sbc_execute_rw; 775 break; 776 case WRITE_16: 777 sectors = transport_get_sectors_16(cdb); 778 cmd->t_task_lba = transport_lba_64(cdb); 779 780 ret = sbc_check_prot(dev, cmd, cdb, sectors, true); 781 if (ret) 782 return ret; 783 784 if (cdb[1] & 0x8) 785 cmd->se_cmd_flags |= SCF_FUA; 786 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 787 cmd->execute_rw = ops->execute_rw; 788 cmd->execute_cmd = sbc_execute_rw; 789 break; 790 case XDWRITEREAD_10: 791 if (cmd->data_direction != DMA_TO_DEVICE || 792 !(cmd->se_cmd_flags & SCF_BIDI)) 793 return TCM_INVALID_CDB_FIELD; 794 sectors = transport_get_sectors_10(cdb); 795 796 cmd->t_task_lba = transport_lba_32(cdb); 797 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 798 799 /* 800 * Setup BIDI XOR callback to be run after I/O completion. 801 */ 802 cmd->execute_rw = ops->execute_rw; 803 cmd->execute_cmd = sbc_execute_rw; 804 cmd->transport_complete_callback = &xdreadwrite_callback; 805 if (cdb[1] & 0x8) 806 cmd->se_cmd_flags |= SCF_FUA; 807 break; 808 case VARIABLE_LENGTH_CMD: 809 { 810 u16 service_action = get_unaligned_be16(&cdb[8]); 811 switch (service_action) { 812 case XDWRITEREAD_32: 813 sectors = transport_get_sectors_32(cdb); 814 815 /* 816 * Use WRITE_32 and READ_32 opcodes for the emulated 817 * XDWRITE_READ_32 logic. 818 */ 819 cmd->t_task_lba = transport_lba_64_ext(cdb); 820 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 821 822 /* 823 * Setup BIDI XOR callback to be run during after I/O 824 * completion. 825 */ 826 cmd->execute_rw = ops->execute_rw; 827 cmd->execute_cmd = sbc_execute_rw; 828 cmd->transport_complete_callback = &xdreadwrite_callback; 829 if (cdb[1] & 0x8) 830 cmd->se_cmd_flags |= SCF_FUA; 831 break; 832 case WRITE_SAME_32: 833 sectors = transport_get_sectors_32(cdb); 834 if (!sectors) { 835 pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not" 836 " supported\n"); 837 return TCM_INVALID_CDB_FIELD; 838 } 839 840 size = sbc_get_size(cmd, 1); 841 cmd->t_task_lba = get_unaligned_be64(&cdb[12]); 842 843 ret = sbc_setup_write_same(cmd, &cdb[10], ops); 844 if (ret) 845 return ret; 846 break; 847 default: 848 pr_err("VARIABLE_LENGTH_CMD service action" 849 " 0x%04x not supported\n", service_action); 850 return TCM_UNSUPPORTED_SCSI_OPCODE; 851 } 852 break; 853 } 854 case COMPARE_AND_WRITE: 855 sectors = cdb[13]; 856 /* 857 * Currently enforce COMPARE_AND_WRITE for a single sector 858 */ 859 if (sectors > 1) { 860 pr_err("COMPARE_AND_WRITE contains NoLB: %u greater" 861 " than 1\n", sectors); 862 return TCM_INVALID_CDB_FIELD; 863 } 864 /* 865 * Double size because we have two buffers, note that 866 * zero is not an error.. 867 */ 868 size = 2 * sbc_get_size(cmd, sectors); 869 cmd->t_task_lba = get_unaligned_be64(&cdb[2]); 870 cmd->t_task_nolb = sectors; 871 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB | SCF_COMPARE_AND_WRITE; 872 cmd->execute_rw = ops->execute_rw; 873 cmd->execute_cmd = sbc_compare_and_write; 874 cmd->transport_complete_callback = compare_and_write_callback; 875 break; 876 case READ_CAPACITY: 877 size = READ_CAP_LEN; 878 cmd->execute_cmd = sbc_emulate_readcapacity; 879 break; 880 case SERVICE_ACTION_IN_16: 881 switch (cmd->t_task_cdb[1] & 0x1f) { 882 case SAI_READ_CAPACITY_16: 883 cmd->execute_cmd = sbc_emulate_readcapacity_16; 884 break; 885 case SAI_REPORT_REFERRALS: 886 cmd->execute_cmd = target_emulate_report_referrals; 887 break; 888 default: 889 pr_err("Unsupported SA: 0x%02x\n", 890 cmd->t_task_cdb[1] & 0x1f); 891 return TCM_INVALID_CDB_FIELD; 892 } 893 size = (cdb[10] << 24) | (cdb[11] << 16) | 894 (cdb[12] << 8) | cdb[13]; 895 break; 896 case SYNCHRONIZE_CACHE: 897 case SYNCHRONIZE_CACHE_16: 898 if (cdb[0] == SYNCHRONIZE_CACHE) { 899 sectors = transport_get_sectors_10(cdb); 900 cmd->t_task_lba = transport_lba_32(cdb); 901 } else { 902 sectors = transport_get_sectors_16(cdb); 903 cmd->t_task_lba = transport_lba_64(cdb); 904 } 905 if (ops->execute_sync_cache) { 906 cmd->execute_cmd = ops->execute_sync_cache; 907 goto check_lba; 908 } 909 size = 0; 910 cmd->execute_cmd = sbc_emulate_noop; 911 break; 912 case UNMAP: 913 if (!ops->execute_unmap) 914 return TCM_UNSUPPORTED_SCSI_OPCODE; 915 916 size = get_unaligned_be16(&cdb[7]); 917 cmd->execute_cmd = ops->execute_unmap; 918 break; 919 case WRITE_SAME_16: 920 sectors = transport_get_sectors_16(cdb); 921 if (!sectors) { 922 pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n"); 923 return TCM_INVALID_CDB_FIELD; 924 } 925 926 size = sbc_get_size(cmd, 1); 927 cmd->t_task_lba = get_unaligned_be64(&cdb[2]); 928 929 ret = sbc_setup_write_same(cmd, &cdb[1], ops); 930 if (ret) 931 return ret; 932 break; 933 case WRITE_SAME: 934 sectors = transport_get_sectors_10(cdb); 935 if (!sectors) { 936 pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n"); 937 return TCM_INVALID_CDB_FIELD; 938 } 939 940 size = sbc_get_size(cmd, 1); 941 cmd->t_task_lba = get_unaligned_be32(&cdb[2]); 942 943 /* 944 * Follow sbcr26 with WRITE_SAME (10) and check for the existence 945 * of byte 1 bit 3 UNMAP instead of original reserved field 946 */ 947 ret = sbc_setup_write_same(cmd, &cdb[1], ops); 948 if (ret) 949 return ret; 950 break; 951 case VERIFY: 952 size = 0; 953 sectors = transport_get_sectors_10(cdb); 954 cmd->t_task_lba = transport_lba_32(cdb); 955 cmd->execute_cmd = sbc_emulate_noop; 956 goto check_lba; 957 case REZERO_UNIT: 958 case SEEK_6: 959 case SEEK_10: 960 /* 961 * There are still clients out there which use these old SCSI-2 962 * commands. This mainly happens when running VMs with legacy 963 * guest systems, connected via SCSI command pass-through to 964 * iSCSI targets. Make them happy and return status GOOD. 965 */ 966 size = 0; 967 cmd->execute_cmd = sbc_emulate_noop; 968 break; 969 default: 970 ret = spc_parse_cdb(cmd, &size); 971 if (ret) 972 return ret; 973 } 974 975 /* reject any command that we don't have a handler for */ 976 if (!cmd->execute_cmd) 977 return TCM_UNSUPPORTED_SCSI_OPCODE; 978 979 if (cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) { 980 unsigned long long end_lba; 981 982 if (sectors > dev->dev_attrib.fabric_max_sectors) { 983 printk_ratelimited(KERN_ERR "SCSI OP %02xh with too" 984 " big sectors %u exceeds fabric_max_sectors:" 985 " %u\n", cdb[0], sectors, 986 dev->dev_attrib.fabric_max_sectors); 987 return TCM_INVALID_CDB_FIELD; 988 } 989 if (sectors > dev->dev_attrib.hw_max_sectors) { 990 printk_ratelimited(KERN_ERR "SCSI OP %02xh with too" 991 " big sectors %u exceeds backend hw_max_sectors:" 992 " %u\n", cdb[0], sectors, 993 dev->dev_attrib.hw_max_sectors); 994 return TCM_INVALID_CDB_FIELD; 995 } 996 check_lba: 997 end_lba = dev->transport->get_blocks(dev) + 1; 998 if (((cmd->t_task_lba + sectors) < cmd->t_task_lba) || 999 ((cmd->t_task_lba + sectors) > end_lba)) { 1000 pr_err("cmd exceeds last lba %llu " 1001 "(lba %llu, sectors %u)\n", 1002 end_lba, cmd->t_task_lba, sectors); 1003 return TCM_ADDRESS_OUT_OF_RANGE; 1004 } 1005 1006 if (!(cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE)) 1007 size = sbc_get_size(cmd, sectors); 1008 } 1009 1010 return target_cmd_size_check(cmd, size); 1011 } 1012 EXPORT_SYMBOL(sbc_parse_cdb); 1013 1014 u32 sbc_get_device_type(struct se_device *dev) 1015 { 1016 return TYPE_DISK; 1017 } 1018 EXPORT_SYMBOL(sbc_get_device_type); 1019 1020 sense_reason_t 1021 sbc_execute_unmap(struct se_cmd *cmd, 1022 sense_reason_t (*do_unmap_fn)(struct se_cmd *, void *, 1023 sector_t, sector_t), 1024 void *priv) 1025 { 1026 struct se_device *dev = cmd->se_dev; 1027 unsigned char *buf, *ptr = NULL; 1028 sector_t lba; 1029 int size; 1030 u32 range; 1031 sense_reason_t ret = 0; 1032 int dl, bd_dl; 1033 1034 /* We never set ANC_SUP */ 1035 if (cmd->t_task_cdb[1]) 1036 return TCM_INVALID_CDB_FIELD; 1037 1038 if (cmd->data_length == 0) { 1039 target_complete_cmd(cmd, SAM_STAT_GOOD); 1040 return 0; 1041 } 1042 1043 if (cmd->data_length < 8) { 1044 pr_warn("UNMAP parameter list length %u too small\n", 1045 cmd->data_length); 1046 return TCM_PARAMETER_LIST_LENGTH_ERROR; 1047 } 1048 1049 buf = transport_kmap_data_sg(cmd); 1050 if (!buf) 1051 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 1052 1053 dl = get_unaligned_be16(&buf[0]); 1054 bd_dl = get_unaligned_be16(&buf[2]); 1055 1056 size = cmd->data_length - 8; 1057 if (bd_dl > size) 1058 pr_warn("UNMAP parameter list length %u too small, ignoring bd_dl %u\n", 1059 cmd->data_length, bd_dl); 1060 else 1061 size = bd_dl; 1062 1063 if (size / 16 > dev->dev_attrib.max_unmap_block_desc_count) { 1064 ret = TCM_INVALID_PARAMETER_LIST; 1065 goto err; 1066 } 1067 1068 /* First UNMAP block descriptor starts at 8 byte offset */ 1069 ptr = &buf[8]; 1070 pr_debug("UNMAP: Sub: %s Using dl: %u bd_dl: %u size: %u" 1071 " ptr: %p\n", dev->transport->name, dl, bd_dl, size, ptr); 1072 1073 while (size >= 16) { 1074 lba = get_unaligned_be64(&ptr[0]); 1075 range = get_unaligned_be32(&ptr[8]); 1076 pr_debug("UNMAP: Using lba: %llu and range: %u\n", 1077 (unsigned long long)lba, range); 1078 1079 if (range > dev->dev_attrib.max_unmap_lba_count) { 1080 ret = TCM_INVALID_PARAMETER_LIST; 1081 goto err; 1082 } 1083 1084 if (lba + range > dev->transport->get_blocks(dev) + 1) { 1085 ret = TCM_ADDRESS_OUT_OF_RANGE; 1086 goto err; 1087 } 1088 1089 ret = do_unmap_fn(cmd, priv, lba, range); 1090 if (ret) 1091 goto err; 1092 1093 ptr += 16; 1094 size -= 16; 1095 } 1096 1097 err: 1098 transport_kunmap_data_sg(cmd); 1099 if (!ret) 1100 target_complete_cmd(cmd, GOOD); 1101 return ret; 1102 } 1103 EXPORT_SYMBOL(sbc_execute_unmap); 1104 1105 void 1106 sbc_dif_generate(struct se_cmd *cmd) 1107 { 1108 struct se_device *dev = cmd->se_dev; 1109 struct se_dif_v1_tuple *sdt; 1110 struct scatterlist *dsg, *psg = cmd->t_prot_sg; 1111 sector_t sector = cmd->t_task_lba; 1112 void *daddr, *paddr; 1113 int i, j, offset = 0; 1114 1115 for_each_sg(cmd->t_data_sg, dsg, cmd->t_data_nents, i) { 1116 daddr = kmap_atomic(sg_page(dsg)) + dsg->offset; 1117 paddr = kmap_atomic(sg_page(psg)) + psg->offset; 1118 1119 for (j = 0; j < dsg->length; j += dev->dev_attrib.block_size) { 1120 1121 if (offset >= psg->length) { 1122 kunmap_atomic(paddr); 1123 psg = sg_next(psg); 1124 paddr = kmap_atomic(sg_page(psg)) + psg->offset; 1125 offset = 0; 1126 } 1127 1128 sdt = paddr + offset; 1129 sdt->guard_tag = cpu_to_be16(crc_t10dif(daddr + j, 1130 dev->dev_attrib.block_size)); 1131 if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE1_PROT) 1132 sdt->ref_tag = cpu_to_be32(sector & 0xffffffff); 1133 sdt->app_tag = 0; 1134 1135 pr_debug("DIF WRITE INSERT sector: %llu guard_tag: 0x%04x" 1136 " app_tag: 0x%04x ref_tag: %u\n", 1137 (unsigned long long)sector, sdt->guard_tag, 1138 sdt->app_tag, be32_to_cpu(sdt->ref_tag)); 1139 1140 sector++; 1141 offset += sizeof(struct se_dif_v1_tuple); 1142 } 1143 1144 kunmap_atomic(paddr); 1145 kunmap_atomic(daddr); 1146 } 1147 } 1148 1149 static sense_reason_t 1150 sbc_dif_v1_verify(struct se_device *dev, struct se_dif_v1_tuple *sdt, 1151 const void *p, sector_t sector, unsigned int ei_lba) 1152 { 1153 int block_size = dev->dev_attrib.block_size; 1154 __be16 csum; 1155 1156 csum = cpu_to_be16(crc_t10dif(p, block_size)); 1157 1158 if (sdt->guard_tag != csum) { 1159 pr_err("DIFv1 checksum failed on sector %llu guard tag 0x%04x" 1160 " csum 0x%04x\n", (unsigned long long)sector, 1161 be16_to_cpu(sdt->guard_tag), be16_to_cpu(csum)); 1162 return TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED; 1163 } 1164 1165 if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE1_PROT && 1166 be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) { 1167 pr_err("DIFv1 Type 1 reference failed on sector: %llu tag: 0x%08x" 1168 " sector MSB: 0x%08x\n", (unsigned long long)sector, 1169 be32_to_cpu(sdt->ref_tag), (u32)(sector & 0xffffffff)); 1170 return TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED; 1171 } 1172 1173 if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE2_PROT && 1174 be32_to_cpu(sdt->ref_tag) != ei_lba) { 1175 pr_err("DIFv1 Type 2 reference failed on sector: %llu tag: 0x%08x" 1176 " ei_lba: 0x%08x\n", (unsigned long long)sector, 1177 be32_to_cpu(sdt->ref_tag), ei_lba); 1178 return TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED; 1179 } 1180 1181 return 0; 1182 } 1183 1184 static void 1185 sbc_dif_copy_prot(struct se_cmd *cmd, unsigned int sectors, bool read, 1186 struct scatterlist *sg, int sg_off) 1187 { 1188 struct se_device *dev = cmd->se_dev; 1189 struct scatterlist *psg; 1190 void *paddr, *addr; 1191 unsigned int i, len, left; 1192 unsigned int offset = sg_off; 1193 1194 left = sectors * dev->prot_length; 1195 1196 for_each_sg(cmd->t_prot_sg, psg, cmd->t_prot_nents, i) { 1197 unsigned int psg_len, copied = 0; 1198 1199 paddr = kmap_atomic(sg_page(psg)) + psg->offset; 1200 psg_len = min(left, psg->length); 1201 while (psg_len) { 1202 len = min(psg_len, sg->length - offset); 1203 addr = kmap_atomic(sg_page(sg)) + sg->offset + offset; 1204 1205 if (read) 1206 memcpy(paddr + copied, addr, len); 1207 else 1208 memcpy(addr, paddr + copied, len); 1209 1210 left -= len; 1211 offset += len; 1212 copied += len; 1213 psg_len -= len; 1214 1215 if (offset >= sg->length) { 1216 sg = sg_next(sg); 1217 offset = 0; 1218 } 1219 kunmap_atomic(addr); 1220 } 1221 kunmap_atomic(paddr); 1222 } 1223 } 1224 1225 sense_reason_t 1226 sbc_dif_verify_write(struct se_cmd *cmd, sector_t start, unsigned int sectors, 1227 unsigned int ei_lba, struct scatterlist *sg, int sg_off) 1228 { 1229 struct se_device *dev = cmd->se_dev; 1230 struct se_dif_v1_tuple *sdt; 1231 struct scatterlist *dsg, *psg = cmd->t_prot_sg; 1232 sector_t sector = start; 1233 void *daddr, *paddr; 1234 int i, j, offset = 0; 1235 sense_reason_t rc; 1236 1237 for_each_sg(cmd->t_data_sg, dsg, cmd->t_data_nents, i) { 1238 daddr = kmap_atomic(sg_page(dsg)) + dsg->offset; 1239 paddr = kmap_atomic(sg_page(psg)) + psg->offset; 1240 1241 for (j = 0; j < dsg->length; j += dev->dev_attrib.block_size) { 1242 1243 if (offset >= psg->length) { 1244 kunmap_atomic(paddr); 1245 psg = sg_next(psg); 1246 paddr = kmap_atomic(sg_page(psg)) + psg->offset; 1247 offset = 0; 1248 } 1249 1250 sdt = paddr + offset; 1251 1252 pr_debug("DIF WRITE sector: %llu guard_tag: 0x%04x" 1253 " app_tag: 0x%04x ref_tag: %u\n", 1254 (unsigned long long)sector, sdt->guard_tag, 1255 sdt->app_tag, be32_to_cpu(sdt->ref_tag)); 1256 1257 rc = sbc_dif_v1_verify(dev, sdt, daddr + j, sector, 1258 ei_lba); 1259 if (rc) { 1260 kunmap_atomic(paddr); 1261 kunmap_atomic(daddr); 1262 cmd->bad_sector = sector; 1263 return rc; 1264 } 1265 1266 sector++; 1267 ei_lba++; 1268 offset += sizeof(struct se_dif_v1_tuple); 1269 } 1270 1271 kunmap_atomic(paddr); 1272 kunmap_atomic(daddr); 1273 } 1274 sbc_dif_copy_prot(cmd, sectors, false, sg, sg_off); 1275 1276 return 0; 1277 } 1278 EXPORT_SYMBOL(sbc_dif_verify_write); 1279 1280 static sense_reason_t 1281 __sbc_dif_verify_read(struct se_cmd *cmd, sector_t start, unsigned int sectors, 1282 unsigned int ei_lba, struct scatterlist *sg, int sg_off) 1283 { 1284 struct se_device *dev = cmd->se_dev; 1285 struct se_dif_v1_tuple *sdt; 1286 struct scatterlist *dsg, *psg = sg; 1287 sector_t sector = start; 1288 void *daddr, *paddr; 1289 int i, j, offset = sg_off; 1290 sense_reason_t rc; 1291 1292 for_each_sg(cmd->t_data_sg, dsg, cmd->t_data_nents, i) { 1293 daddr = kmap_atomic(sg_page(dsg)) + dsg->offset; 1294 paddr = kmap_atomic(sg_page(psg)) + sg->offset; 1295 1296 for (j = 0; j < dsg->length; j += dev->dev_attrib.block_size) { 1297 1298 if (offset >= psg->length) { 1299 kunmap_atomic(paddr); 1300 psg = sg_next(psg); 1301 paddr = kmap_atomic(sg_page(psg)) + psg->offset; 1302 offset = 0; 1303 } 1304 1305 sdt = paddr + offset; 1306 1307 pr_debug("DIF READ sector: %llu guard_tag: 0x%04x" 1308 " app_tag: 0x%04x ref_tag: %u\n", 1309 (unsigned long long)sector, sdt->guard_tag, 1310 sdt->app_tag, be32_to_cpu(sdt->ref_tag)); 1311 1312 if (sdt->app_tag == cpu_to_be16(0xffff)) { 1313 sector++; 1314 offset += sizeof(struct se_dif_v1_tuple); 1315 continue; 1316 } 1317 1318 rc = sbc_dif_v1_verify(dev, sdt, daddr + j, sector, 1319 ei_lba); 1320 if (rc) { 1321 kunmap_atomic(paddr); 1322 kunmap_atomic(daddr); 1323 cmd->bad_sector = sector; 1324 return rc; 1325 } 1326 1327 sector++; 1328 ei_lba++; 1329 offset += sizeof(struct se_dif_v1_tuple); 1330 } 1331 1332 kunmap_atomic(paddr); 1333 kunmap_atomic(daddr); 1334 } 1335 1336 return 0; 1337 } 1338 1339 sense_reason_t 1340 sbc_dif_read_strip(struct se_cmd *cmd) 1341 { 1342 struct se_device *dev = cmd->se_dev; 1343 u32 sectors = cmd->prot_length / dev->prot_length; 1344 1345 return __sbc_dif_verify_read(cmd, cmd->t_task_lba, sectors, 0, 1346 cmd->t_prot_sg, 0); 1347 } 1348 1349 sense_reason_t 1350 sbc_dif_verify_read(struct se_cmd *cmd, sector_t start, unsigned int sectors, 1351 unsigned int ei_lba, struct scatterlist *sg, int sg_off) 1352 { 1353 sense_reason_t rc; 1354 1355 rc = __sbc_dif_verify_read(cmd, start, sectors, ei_lba, sg, sg_off); 1356 if (rc) 1357 return rc; 1358 1359 sbc_dif_copy_prot(cmd, sectors, true, sg, sg_off); 1360 return 0; 1361 } 1362 EXPORT_SYMBOL(sbc_dif_verify_read); 1363