1 /* 2 * SCSI Block Commands (SBC) parsing and emulation. 3 * 4 * (c) Copyright 2002-2013 Datera, Inc. 5 * 6 * Nicholas A. Bellinger <nab@kernel.org> 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License as published by 10 * the Free Software Foundation; either version 2 of the License, or 11 * (at your option) any later version. 12 * 13 * This program is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * GNU General Public License for more details. 17 * 18 * You should have received a copy of the GNU General Public License 19 * along with this program; if not, write to the Free Software 20 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 21 */ 22 23 #include <linux/kernel.h> 24 #include <linux/module.h> 25 #include <linux/ratelimit.h> 26 #include <linux/crc-t10dif.h> 27 #include <asm/unaligned.h> 28 #include <scsi/scsi.h> 29 #include <scsi/scsi_tcq.h> 30 31 #include <target/target_core_base.h> 32 #include <target/target_core_backend.h> 33 #include <target/target_core_fabric.h> 34 35 #include "target_core_internal.h" 36 #include "target_core_ua.h" 37 #include "target_core_alua.h" 38 39 static sense_reason_t 40 sbc_check_prot(struct se_device *, struct se_cmd *, unsigned char *, u32, bool); 41 42 static sense_reason_t 43 sbc_emulate_readcapacity(struct se_cmd *cmd) 44 { 45 struct se_device *dev = cmd->se_dev; 46 unsigned char *cdb = cmd->t_task_cdb; 47 unsigned long long blocks_long = dev->transport->get_blocks(dev); 48 unsigned char *rbuf; 49 unsigned char buf[8]; 50 u32 blocks; 51 52 /* 53 * SBC-2 says: 54 * If the PMI bit is set to zero and the LOGICAL BLOCK 55 * ADDRESS field is not set to zero, the device server shall 56 * terminate the command with CHECK CONDITION status with 57 * the sense key set to ILLEGAL REQUEST and the additional 58 * sense code set to INVALID FIELD IN CDB. 59 * 60 * In SBC-3, these fields are obsolete, but some SCSI 61 * compliance tests actually check this, so we might as well 62 * follow SBC-2. 63 */ 64 if (!(cdb[8] & 1) && !!(cdb[2] | cdb[3] | cdb[4] | cdb[5])) 65 return TCM_INVALID_CDB_FIELD; 66 67 if (blocks_long >= 0x00000000ffffffff) 68 blocks = 0xffffffff; 69 else 70 blocks = (u32)blocks_long; 71 72 buf[0] = (blocks >> 24) & 0xff; 73 buf[1] = (blocks >> 16) & 0xff; 74 buf[2] = (blocks >> 8) & 0xff; 75 buf[3] = blocks & 0xff; 76 buf[4] = (dev->dev_attrib.block_size >> 24) & 0xff; 77 buf[5] = (dev->dev_attrib.block_size >> 16) & 0xff; 78 buf[6] = (dev->dev_attrib.block_size >> 8) & 0xff; 79 buf[7] = dev->dev_attrib.block_size & 0xff; 80 81 rbuf = transport_kmap_data_sg(cmd); 82 if (rbuf) { 83 memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length)); 84 transport_kunmap_data_sg(cmd); 85 } 86 87 target_complete_cmd_with_length(cmd, GOOD, 8); 88 return 0; 89 } 90 91 static sense_reason_t 92 sbc_emulate_readcapacity_16(struct se_cmd *cmd) 93 { 94 struct se_device *dev = cmd->se_dev; 95 struct se_session *sess = cmd->se_sess; 96 int pi_prot_type = dev->dev_attrib.pi_prot_type; 97 98 unsigned char *rbuf; 99 unsigned char buf[32]; 100 unsigned long long blocks = dev->transport->get_blocks(dev); 101 102 memset(buf, 0, sizeof(buf)); 103 buf[0] = (blocks >> 56) & 0xff; 104 buf[1] = (blocks >> 48) & 0xff; 105 buf[2] = (blocks >> 40) & 0xff; 106 buf[3] = (blocks >> 32) & 0xff; 107 buf[4] = (blocks >> 24) & 0xff; 108 buf[5] = (blocks >> 16) & 0xff; 109 buf[6] = (blocks >> 8) & 0xff; 110 buf[7] = blocks & 0xff; 111 buf[8] = (dev->dev_attrib.block_size >> 24) & 0xff; 112 buf[9] = (dev->dev_attrib.block_size >> 16) & 0xff; 113 buf[10] = (dev->dev_attrib.block_size >> 8) & 0xff; 114 buf[11] = dev->dev_attrib.block_size & 0xff; 115 /* 116 * Set P_TYPE and PROT_EN bits for DIF support 117 */ 118 if (sess->sup_prot_ops & (TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS)) { 119 /* 120 * Only override a device's pi_prot_type if no T10-PI is 121 * available, and sess_prot_type has been explicitly enabled. 122 */ 123 if (!pi_prot_type) 124 pi_prot_type = sess->sess_prot_type; 125 126 if (pi_prot_type) 127 buf[12] = (pi_prot_type - 1) << 1 | 0x1; 128 } 129 130 if (dev->transport->get_lbppbe) 131 buf[13] = dev->transport->get_lbppbe(dev) & 0x0f; 132 133 if (dev->transport->get_alignment_offset_lbas) { 134 u16 lalba = dev->transport->get_alignment_offset_lbas(dev); 135 buf[14] = (lalba >> 8) & 0x3f; 136 buf[15] = lalba & 0xff; 137 } 138 139 /* 140 * Set Thin Provisioning Enable bit following sbc3r22 in section 141 * READ CAPACITY (16) byte 14 if emulate_tpu or emulate_tpws is enabled. 142 */ 143 if (dev->dev_attrib.emulate_tpu || dev->dev_attrib.emulate_tpws) 144 buf[14] |= 0x80; 145 146 rbuf = transport_kmap_data_sg(cmd); 147 if (rbuf) { 148 memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length)); 149 transport_kunmap_data_sg(cmd); 150 } 151 152 target_complete_cmd_with_length(cmd, GOOD, 32); 153 return 0; 154 } 155 156 sector_t sbc_get_write_same_sectors(struct se_cmd *cmd) 157 { 158 u32 num_blocks; 159 160 if (cmd->t_task_cdb[0] == WRITE_SAME) 161 num_blocks = get_unaligned_be16(&cmd->t_task_cdb[7]); 162 else if (cmd->t_task_cdb[0] == WRITE_SAME_16) 163 num_blocks = get_unaligned_be32(&cmd->t_task_cdb[10]); 164 else /* WRITE_SAME_32 via VARIABLE_LENGTH_CMD */ 165 num_blocks = get_unaligned_be32(&cmd->t_task_cdb[28]); 166 167 /* 168 * Use the explicit range when non zero is supplied, otherwise calculate 169 * the remaining range based on ->get_blocks() - starting LBA. 170 */ 171 if (num_blocks) 172 return num_blocks; 173 174 return cmd->se_dev->transport->get_blocks(cmd->se_dev) - 175 cmd->t_task_lba + 1; 176 } 177 EXPORT_SYMBOL(sbc_get_write_same_sectors); 178 179 static sense_reason_t 180 sbc_emulate_noop(struct se_cmd *cmd) 181 { 182 target_complete_cmd(cmd, GOOD); 183 return 0; 184 } 185 186 static inline u32 sbc_get_size(struct se_cmd *cmd, u32 sectors) 187 { 188 return cmd->se_dev->dev_attrib.block_size * sectors; 189 } 190 191 static inline u32 transport_get_sectors_6(unsigned char *cdb) 192 { 193 /* 194 * Use 8-bit sector value. SBC-3 says: 195 * 196 * A TRANSFER LENGTH field set to zero specifies that 256 197 * logical blocks shall be written. Any other value 198 * specifies the number of logical blocks that shall be 199 * written. 200 */ 201 return cdb[4] ? : 256; 202 } 203 204 static inline u32 transport_get_sectors_10(unsigned char *cdb) 205 { 206 return (u32)(cdb[7] << 8) + cdb[8]; 207 } 208 209 static inline u32 transport_get_sectors_12(unsigned char *cdb) 210 { 211 return (u32)(cdb[6] << 24) + (cdb[7] << 16) + (cdb[8] << 8) + cdb[9]; 212 } 213 214 static inline u32 transport_get_sectors_16(unsigned char *cdb) 215 { 216 return (u32)(cdb[10] << 24) + (cdb[11] << 16) + 217 (cdb[12] << 8) + cdb[13]; 218 } 219 220 /* 221 * Used for VARIABLE_LENGTH_CDB WRITE_32 and READ_32 variants 222 */ 223 static inline u32 transport_get_sectors_32(unsigned char *cdb) 224 { 225 return (u32)(cdb[28] << 24) + (cdb[29] << 16) + 226 (cdb[30] << 8) + cdb[31]; 227 228 } 229 230 static inline u32 transport_lba_21(unsigned char *cdb) 231 { 232 return ((cdb[1] & 0x1f) << 16) | (cdb[2] << 8) | cdb[3]; 233 } 234 235 static inline u32 transport_lba_32(unsigned char *cdb) 236 { 237 return (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5]; 238 } 239 240 static inline unsigned long long transport_lba_64(unsigned char *cdb) 241 { 242 unsigned int __v1, __v2; 243 244 __v1 = (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5]; 245 __v2 = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9]; 246 247 return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32; 248 } 249 250 /* 251 * For VARIABLE_LENGTH_CDB w/ 32 byte extended CDBs 252 */ 253 static inline unsigned long long transport_lba_64_ext(unsigned char *cdb) 254 { 255 unsigned int __v1, __v2; 256 257 __v1 = (cdb[12] << 24) | (cdb[13] << 16) | (cdb[14] << 8) | cdb[15]; 258 __v2 = (cdb[16] << 24) | (cdb[17] << 16) | (cdb[18] << 8) | cdb[19]; 259 260 return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32; 261 } 262 263 static sense_reason_t 264 sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *ops) 265 { 266 struct se_device *dev = cmd->se_dev; 267 sector_t end_lba = dev->transport->get_blocks(dev) + 1; 268 unsigned int sectors = sbc_get_write_same_sectors(cmd); 269 sense_reason_t ret; 270 271 if ((flags[0] & 0x04) || (flags[0] & 0x02)) { 272 pr_err("WRITE_SAME PBDATA and LBDATA" 273 " bits not supported for Block Discard" 274 " Emulation\n"); 275 return TCM_UNSUPPORTED_SCSI_OPCODE; 276 } 277 if (sectors > cmd->se_dev->dev_attrib.max_write_same_len) { 278 pr_warn("WRITE_SAME sectors: %u exceeds max_write_same_len: %u\n", 279 sectors, cmd->se_dev->dev_attrib.max_write_same_len); 280 return TCM_INVALID_CDB_FIELD; 281 } 282 /* 283 * Sanity check for LBA wrap and request past end of device. 284 */ 285 if (((cmd->t_task_lba + sectors) < cmd->t_task_lba) || 286 ((cmd->t_task_lba + sectors) > end_lba)) { 287 pr_err("WRITE_SAME exceeds last lba %llu (lba %llu, sectors %u)\n", 288 (unsigned long long)end_lba, cmd->t_task_lba, sectors); 289 return TCM_ADDRESS_OUT_OF_RANGE; 290 } 291 292 /* We always have ANC_SUP == 0 so setting ANCHOR is always an error */ 293 if (flags[0] & 0x10) { 294 pr_warn("WRITE SAME with ANCHOR not supported\n"); 295 return TCM_INVALID_CDB_FIELD; 296 } 297 /* 298 * Special case for WRITE_SAME w/ UNMAP=1 that ends up getting 299 * translated into block discard requests within backend code. 300 */ 301 if (flags[0] & 0x08) { 302 if (!ops->execute_write_same_unmap) 303 return TCM_UNSUPPORTED_SCSI_OPCODE; 304 305 if (!dev->dev_attrib.emulate_tpws) { 306 pr_err("Got WRITE_SAME w/ UNMAP=1, but backend device" 307 " has emulate_tpws disabled\n"); 308 return TCM_UNSUPPORTED_SCSI_OPCODE; 309 } 310 cmd->execute_cmd = ops->execute_write_same_unmap; 311 return 0; 312 } 313 if (!ops->execute_write_same) 314 return TCM_UNSUPPORTED_SCSI_OPCODE; 315 316 ret = sbc_check_prot(dev, cmd, &cmd->t_task_cdb[0], sectors, true); 317 if (ret) 318 return ret; 319 320 cmd->execute_cmd = ops->execute_write_same; 321 return 0; 322 } 323 324 static sense_reason_t xdreadwrite_callback(struct se_cmd *cmd, bool success) 325 { 326 unsigned char *buf, *addr; 327 struct scatterlist *sg; 328 unsigned int offset; 329 sense_reason_t ret = TCM_NO_SENSE; 330 int i, count; 331 /* 332 * From sbc3r22.pdf section 5.48 XDWRITEREAD (10) command 333 * 334 * 1) read the specified logical block(s); 335 * 2) transfer logical blocks from the data-out buffer; 336 * 3) XOR the logical blocks transferred from the data-out buffer with 337 * the logical blocks read, storing the resulting XOR data in a buffer; 338 * 4) if the DISABLE WRITE bit is set to zero, then write the logical 339 * blocks transferred from the data-out buffer; and 340 * 5) transfer the resulting XOR data to the data-in buffer. 341 */ 342 buf = kmalloc(cmd->data_length, GFP_KERNEL); 343 if (!buf) { 344 pr_err("Unable to allocate xor_callback buf\n"); 345 return TCM_OUT_OF_RESOURCES; 346 } 347 /* 348 * Copy the scatterlist WRITE buffer located at cmd->t_data_sg 349 * into the locally allocated *buf 350 */ 351 sg_copy_to_buffer(cmd->t_data_sg, 352 cmd->t_data_nents, 353 buf, 354 cmd->data_length); 355 356 /* 357 * Now perform the XOR against the BIDI read memory located at 358 * cmd->t_mem_bidi_list 359 */ 360 361 offset = 0; 362 for_each_sg(cmd->t_bidi_data_sg, sg, cmd->t_bidi_data_nents, count) { 363 addr = kmap_atomic(sg_page(sg)); 364 if (!addr) { 365 ret = TCM_OUT_OF_RESOURCES; 366 goto out; 367 } 368 369 for (i = 0; i < sg->length; i++) 370 *(addr + sg->offset + i) ^= *(buf + offset + i); 371 372 offset += sg->length; 373 kunmap_atomic(addr); 374 } 375 376 out: 377 kfree(buf); 378 return ret; 379 } 380 381 static sense_reason_t 382 sbc_execute_rw(struct se_cmd *cmd) 383 { 384 return cmd->execute_rw(cmd, cmd->t_data_sg, cmd->t_data_nents, 385 cmd->data_direction); 386 } 387 388 static sense_reason_t compare_and_write_post(struct se_cmd *cmd, bool success) 389 { 390 struct se_device *dev = cmd->se_dev; 391 392 /* 393 * Only set SCF_COMPARE_AND_WRITE_POST to force a response fall-through 394 * within target_complete_ok_work() if the command was successfully 395 * sent to the backend driver. 396 */ 397 spin_lock_irq(&cmd->t_state_lock); 398 if ((cmd->transport_state & CMD_T_SENT) && !cmd->scsi_status) 399 cmd->se_cmd_flags |= SCF_COMPARE_AND_WRITE_POST; 400 spin_unlock_irq(&cmd->t_state_lock); 401 402 /* 403 * Unlock ->caw_sem originally obtained during sbc_compare_and_write() 404 * before the original READ I/O submission. 405 */ 406 up(&dev->caw_sem); 407 408 return TCM_NO_SENSE; 409 } 410 411 static sense_reason_t compare_and_write_callback(struct se_cmd *cmd, bool success) 412 { 413 struct se_device *dev = cmd->se_dev; 414 struct scatterlist *write_sg = NULL, *sg; 415 unsigned char *buf = NULL, *addr; 416 struct sg_mapping_iter m; 417 unsigned int offset = 0, len; 418 unsigned int nlbas = cmd->t_task_nolb; 419 unsigned int block_size = dev->dev_attrib.block_size; 420 unsigned int compare_len = (nlbas * block_size); 421 sense_reason_t ret = TCM_NO_SENSE; 422 int rc, i; 423 424 /* 425 * Handle early failure in transport_generic_request_failure(), 426 * which will not have taken ->caw_sem yet.. 427 */ 428 if (!success && (!cmd->t_data_sg || !cmd->t_bidi_data_sg)) 429 return TCM_NO_SENSE; 430 /* 431 * Handle special case for zero-length COMPARE_AND_WRITE 432 */ 433 if (!cmd->data_length) 434 goto out; 435 /* 436 * Immediately exit + release dev->caw_sem if command has already 437 * been failed with a non-zero SCSI status. 438 */ 439 if (cmd->scsi_status) { 440 pr_err("compare_and_write_callback: non zero scsi_status:" 441 " 0x%02x\n", cmd->scsi_status); 442 goto out; 443 } 444 445 buf = kzalloc(cmd->data_length, GFP_KERNEL); 446 if (!buf) { 447 pr_err("Unable to allocate compare_and_write buf\n"); 448 ret = TCM_OUT_OF_RESOURCES; 449 goto out; 450 } 451 452 write_sg = kmalloc(sizeof(struct scatterlist) * cmd->t_data_nents, 453 GFP_KERNEL); 454 if (!write_sg) { 455 pr_err("Unable to allocate compare_and_write sg\n"); 456 ret = TCM_OUT_OF_RESOURCES; 457 goto out; 458 } 459 sg_init_table(write_sg, cmd->t_data_nents); 460 /* 461 * Setup verify and write data payloads from total NumberLBAs. 462 */ 463 rc = sg_copy_to_buffer(cmd->t_data_sg, cmd->t_data_nents, buf, 464 cmd->data_length); 465 if (!rc) { 466 pr_err("sg_copy_to_buffer() failed for compare_and_write\n"); 467 ret = TCM_OUT_OF_RESOURCES; 468 goto out; 469 } 470 /* 471 * Compare against SCSI READ payload against verify payload 472 */ 473 for_each_sg(cmd->t_bidi_data_sg, sg, cmd->t_bidi_data_nents, i) { 474 addr = (unsigned char *)kmap_atomic(sg_page(sg)); 475 if (!addr) { 476 ret = TCM_OUT_OF_RESOURCES; 477 goto out; 478 } 479 480 len = min(sg->length, compare_len); 481 482 if (memcmp(addr, buf + offset, len)) { 483 pr_warn("Detected MISCOMPARE for addr: %p buf: %p\n", 484 addr, buf + offset); 485 kunmap_atomic(addr); 486 goto miscompare; 487 } 488 kunmap_atomic(addr); 489 490 offset += len; 491 compare_len -= len; 492 if (!compare_len) 493 break; 494 } 495 496 i = 0; 497 len = cmd->t_task_nolb * block_size; 498 sg_miter_start(&m, cmd->t_data_sg, cmd->t_data_nents, SG_MITER_TO_SG); 499 /* 500 * Currently assumes NoLB=1 and SGLs are PAGE_SIZE.. 501 */ 502 while (len) { 503 sg_miter_next(&m); 504 505 if (block_size < PAGE_SIZE) { 506 sg_set_page(&write_sg[i], m.page, block_size, 507 block_size); 508 } else { 509 sg_miter_next(&m); 510 sg_set_page(&write_sg[i], m.page, block_size, 511 0); 512 } 513 len -= block_size; 514 i++; 515 } 516 sg_miter_stop(&m); 517 /* 518 * Save the original SGL + nents values before updating to new 519 * assignments, to be released in transport_free_pages() -> 520 * transport_reset_sgl_orig() 521 */ 522 cmd->t_data_sg_orig = cmd->t_data_sg; 523 cmd->t_data_sg = write_sg; 524 cmd->t_data_nents_orig = cmd->t_data_nents; 525 cmd->t_data_nents = 1; 526 527 cmd->sam_task_attr = TCM_HEAD_TAG; 528 cmd->transport_complete_callback = compare_and_write_post; 529 /* 530 * Now reset ->execute_cmd() to the normal sbc_execute_rw() handler 531 * for submitting the adjusted SGL to write instance user-data. 532 */ 533 cmd->execute_cmd = sbc_execute_rw; 534 535 spin_lock_irq(&cmd->t_state_lock); 536 cmd->t_state = TRANSPORT_PROCESSING; 537 cmd->transport_state |= CMD_T_ACTIVE|CMD_T_BUSY|CMD_T_SENT; 538 spin_unlock_irq(&cmd->t_state_lock); 539 540 __target_execute_cmd(cmd); 541 542 kfree(buf); 543 return ret; 544 545 miscompare: 546 pr_warn("Target/%s: Send MISCOMPARE check condition and sense\n", 547 dev->transport->name); 548 ret = TCM_MISCOMPARE_VERIFY; 549 out: 550 /* 551 * In the MISCOMPARE or failure case, unlock ->caw_sem obtained in 552 * sbc_compare_and_write() before the original READ I/O submission. 553 */ 554 up(&dev->caw_sem); 555 kfree(write_sg); 556 kfree(buf); 557 return ret; 558 } 559 560 static sense_reason_t 561 sbc_compare_and_write(struct se_cmd *cmd) 562 { 563 struct se_device *dev = cmd->se_dev; 564 sense_reason_t ret; 565 int rc; 566 /* 567 * Submit the READ first for COMPARE_AND_WRITE to perform the 568 * comparision using SGLs at cmd->t_bidi_data_sg.. 569 */ 570 rc = down_interruptible(&dev->caw_sem); 571 if ((rc != 0) || signal_pending(current)) { 572 cmd->transport_complete_callback = NULL; 573 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 574 } 575 /* 576 * Reset cmd->data_length to individual block_size in order to not 577 * confuse backend drivers that depend on this value matching the 578 * size of the I/O being submitted. 579 */ 580 cmd->data_length = cmd->t_task_nolb * dev->dev_attrib.block_size; 581 582 ret = cmd->execute_rw(cmd, cmd->t_bidi_data_sg, cmd->t_bidi_data_nents, 583 DMA_FROM_DEVICE); 584 if (ret) { 585 cmd->transport_complete_callback = NULL; 586 up(&dev->caw_sem); 587 return ret; 588 } 589 /* 590 * Unlock of dev->caw_sem to occur in compare_and_write_callback() 591 * upon MISCOMPARE, or in compare_and_write_done() upon completion 592 * of WRITE instance user-data. 593 */ 594 return TCM_NO_SENSE; 595 } 596 597 static int 598 sbc_set_prot_op_checks(u8 protect, bool fabric_prot, enum target_prot_type prot_type, 599 bool is_write, struct se_cmd *cmd) 600 { 601 if (is_write) { 602 cmd->prot_op = fabric_prot ? TARGET_PROT_DOUT_STRIP : 603 protect ? TARGET_PROT_DOUT_PASS : 604 TARGET_PROT_DOUT_INSERT; 605 switch (protect) { 606 case 0x0: 607 case 0x3: 608 cmd->prot_checks = 0; 609 break; 610 case 0x1: 611 case 0x5: 612 cmd->prot_checks = TARGET_DIF_CHECK_GUARD; 613 if (prot_type == TARGET_DIF_TYPE1_PROT) 614 cmd->prot_checks |= TARGET_DIF_CHECK_REFTAG; 615 break; 616 case 0x2: 617 if (prot_type == TARGET_DIF_TYPE1_PROT) 618 cmd->prot_checks = TARGET_DIF_CHECK_REFTAG; 619 break; 620 case 0x4: 621 cmd->prot_checks = TARGET_DIF_CHECK_GUARD; 622 break; 623 default: 624 pr_err("Unsupported protect field %d\n", protect); 625 return -EINVAL; 626 } 627 } else { 628 cmd->prot_op = fabric_prot ? TARGET_PROT_DIN_INSERT : 629 protect ? TARGET_PROT_DIN_PASS : 630 TARGET_PROT_DIN_STRIP; 631 switch (protect) { 632 case 0x0: 633 case 0x1: 634 case 0x5: 635 cmd->prot_checks = TARGET_DIF_CHECK_GUARD; 636 if (prot_type == TARGET_DIF_TYPE1_PROT) 637 cmd->prot_checks |= TARGET_DIF_CHECK_REFTAG; 638 break; 639 case 0x2: 640 if (prot_type == TARGET_DIF_TYPE1_PROT) 641 cmd->prot_checks = TARGET_DIF_CHECK_REFTAG; 642 break; 643 case 0x3: 644 cmd->prot_checks = 0; 645 break; 646 case 0x4: 647 cmd->prot_checks = TARGET_DIF_CHECK_GUARD; 648 break; 649 default: 650 pr_err("Unsupported protect field %d\n", protect); 651 return -EINVAL; 652 } 653 } 654 655 return 0; 656 } 657 658 static sense_reason_t 659 sbc_check_prot(struct se_device *dev, struct se_cmd *cmd, unsigned char *cdb, 660 u32 sectors, bool is_write) 661 { 662 u8 protect = cdb[1] >> 5; 663 int sp_ops = cmd->se_sess->sup_prot_ops; 664 int pi_prot_type = dev->dev_attrib.pi_prot_type; 665 bool fabric_prot = false; 666 667 if (!cmd->t_prot_sg || !cmd->t_prot_nents) { 668 if (unlikely(protect && 669 !dev->dev_attrib.pi_prot_type && !cmd->se_sess->sess_prot_type)) { 670 pr_err("CDB contains protect bit, but device + fabric does" 671 " not advertise PROTECT=1 feature bit\n"); 672 return TCM_INVALID_CDB_FIELD; 673 } 674 if (cmd->prot_pto) 675 return TCM_NO_SENSE; 676 } 677 678 switch (dev->dev_attrib.pi_prot_type) { 679 case TARGET_DIF_TYPE3_PROT: 680 cmd->reftag_seed = 0xffffffff; 681 break; 682 case TARGET_DIF_TYPE2_PROT: 683 if (protect) 684 return TCM_INVALID_CDB_FIELD; 685 686 cmd->reftag_seed = cmd->t_task_lba; 687 break; 688 case TARGET_DIF_TYPE1_PROT: 689 cmd->reftag_seed = cmd->t_task_lba; 690 break; 691 case TARGET_DIF_TYPE0_PROT: 692 /* 693 * See if the fabric supports T10-PI, and the session has been 694 * configured to allow export PROTECT=1 feature bit with backend 695 * devices that don't support T10-PI. 696 */ 697 fabric_prot = is_write ? 698 !!(sp_ops & (TARGET_PROT_DOUT_PASS | TARGET_PROT_DOUT_STRIP)) : 699 !!(sp_ops & (TARGET_PROT_DIN_PASS | TARGET_PROT_DIN_INSERT)); 700 701 if (fabric_prot && cmd->se_sess->sess_prot_type) { 702 pi_prot_type = cmd->se_sess->sess_prot_type; 703 break; 704 } 705 if (!protect) 706 return TCM_NO_SENSE; 707 /* Fallthrough */ 708 default: 709 pr_err("Unable to determine pi_prot_type for CDB: 0x%02x " 710 "PROTECT: 0x%02x\n", cdb[0], protect); 711 return TCM_INVALID_CDB_FIELD; 712 } 713 714 if (sbc_set_prot_op_checks(protect, fabric_prot, pi_prot_type, is_write, cmd)) 715 return TCM_INVALID_CDB_FIELD; 716 717 cmd->prot_type = pi_prot_type; 718 cmd->prot_length = dev->prot_length * sectors; 719 720 /** 721 * In case protection information exists over the wire 722 * we modify command data length to describe pure data. 723 * The actual transfer length is data length + protection 724 * length 725 **/ 726 if (protect) 727 cmd->data_length = sectors * dev->dev_attrib.block_size; 728 729 pr_debug("%s: prot_type=%d, data_length=%d, prot_length=%d " 730 "prot_op=%d prot_checks=%d\n", 731 __func__, cmd->prot_type, cmd->data_length, cmd->prot_length, 732 cmd->prot_op, cmd->prot_checks); 733 734 return TCM_NO_SENSE; 735 } 736 737 static int 738 sbc_check_dpofua(struct se_device *dev, struct se_cmd *cmd, unsigned char *cdb) 739 { 740 if (cdb[1] & 0x10) { 741 if (!dev->dev_attrib.emulate_dpo) { 742 pr_err("Got CDB: 0x%02x with DPO bit set, but device" 743 " does not advertise support for DPO\n", cdb[0]); 744 return -EINVAL; 745 } 746 } 747 if (cdb[1] & 0x8) { 748 if (!dev->dev_attrib.emulate_fua_write || !se_dev_check_wce(dev)) { 749 pr_err("Got CDB: 0x%02x with FUA bit set, but device" 750 " does not advertise support for FUA write\n", 751 cdb[0]); 752 return -EINVAL; 753 } 754 cmd->se_cmd_flags |= SCF_FUA; 755 } 756 return 0; 757 } 758 759 sense_reason_t 760 sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops) 761 { 762 struct se_device *dev = cmd->se_dev; 763 unsigned char *cdb = cmd->t_task_cdb; 764 unsigned int size; 765 u32 sectors = 0; 766 sense_reason_t ret; 767 768 switch (cdb[0]) { 769 case READ_6: 770 sectors = transport_get_sectors_6(cdb); 771 cmd->t_task_lba = transport_lba_21(cdb); 772 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 773 cmd->execute_rw = ops->execute_rw; 774 cmd->execute_cmd = sbc_execute_rw; 775 break; 776 case READ_10: 777 sectors = transport_get_sectors_10(cdb); 778 cmd->t_task_lba = transport_lba_32(cdb); 779 780 if (sbc_check_dpofua(dev, cmd, cdb)) 781 return TCM_INVALID_CDB_FIELD; 782 783 ret = sbc_check_prot(dev, cmd, cdb, sectors, false); 784 if (ret) 785 return ret; 786 787 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 788 cmd->execute_rw = ops->execute_rw; 789 cmd->execute_cmd = sbc_execute_rw; 790 break; 791 case READ_12: 792 sectors = transport_get_sectors_12(cdb); 793 cmd->t_task_lba = transport_lba_32(cdb); 794 795 if (sbc_check_dpofua(dev, cmd, cdb)) 796 return TCM_INVALID_CDB_FIELD; 797 798 ret = sbc_check_prot(dev, cmd, cdb, sectors, false); 799 if (ret) 800 return ret; 801 802 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 803 cmd->execute_rw = ops->execute_rw; 804 cmd->execute_cmd = sbc_execute_rw; 805 break; 806 case READ_16: 807 sectors = transport_get_sectors_16(cdb); 808 cmd->t_task_lba = transport_lba_64(cdb); 809 810 if (sbc_check_dpofua(dev, cmd, cdb)) 811 return TCM_INVALID_CDB_FIELD; 812 813 ret = sbc_check_prot(dev, cmd, cdb, sectors, false); 814 if (ret) 815 return ret; 816 817 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 818 cmd->execute_rw = ops->execute_rw; 819 cmd->execute_cmd = sbc_execute_rw; 820 break; 821 case WRITE_6: 822 sectors = transport_get_sectors_6(cdb); 823 cmd->t_task_lba = transport_lba_21(cdb); 824 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 825 cmd->execute_rw = ops->execute_rw; 826 cmd->execute_cmd = sbc_execute_rw; 827 break; 828 case WRITE_10: 829 case WRITE_VERIFY: 830 sectors = transport_get_sectors_10(cdb); 831 cmd->t_task_lba = transport_lba_32(cdb); 832 833 if (sbc_check_dpofua(dev, cmd, cdb)) 834 return TCM_INVALID_CDB_FIELD; 835 836 ret = sbc_check_prot(dev, cmd, cdb, sectors, true); 837 if (ret) 838 return ret; 839 840 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 841 cmd->execute_rw = ops->execute_rw; 842 cmd->execute_cmd = sbc_execute_rw; 843 break; 844 case WRITE_12: 845 sectors = transport_get_sectors_12(cdb); 846 cmd->t_task_lba = transport_lba_32(cdb); 847 848 if (sbc_check_dpofua(dev, cmd, cdb)) 849 return TCM_INVALID_CDB_FIELD; 850 851 ret = sbc_check_prot(dev, cmd, cdb, sectors, true); 852 if (ret) 853 return ret; 854 855 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 856 cmd->execute_rw = ops->execute_rw; 857 cmd->execute_cmd = sbc_execute_rw; 858 break; 859 case WRITE_16: 860 sectors = transport_get_sectors_16(cdb); 861 cmd->t_task_lba = transport_lba_64(cdb); 862 863 if (sbc_check_dpofua(dev, cmd, cdb)) 864 return TCM_INVALID_CDB_FIELD; 865 866 ret = sbc_check_prot(dev, cmd, cdb, sectors, true); 867 if (ret) 868 return ret; 869 870 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 871 cmd->execute_rw = ops->execute_rw; 872 cmd->execute_cmd = sbc_execute_rw; 873 break; 874 case XDWRITEREAD_10: 875 if (cmd->data_direction != DMA_TO_DEVICE || 876 !(cmd->se_cmd_flags & SCF_BIDI)) 877 return TCM_INVALID_CDB_FIELD; 878 sectors = transport_get_sectors_10(cdb); 879 880 if (sbc_check_dpofua(dev, cmd, cdb)) 881 return TCM_INVALID_CDB_FIELD; 882 883 cmd->t_task_lba = transport_lba_32(cdb); 884 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 885 886 /* 887 * Setup BIDI XOR callback to be run after I/O completion. 888 */ 889 cmd->execute_rw = ops->execute_rw; 890 cmd->execute_cmd = sbc_execute_rw; 891 cmd->transport_complete_callback = &xdreadwrite_callback; 892 break; 893 case VARIABLE_LENGTH_CMD: 894 { 895 u16 service_action = get_unaligned_be16(&cdb[8]); 896 switch (service_action) { 897 case XDWRITEREAD_32: 898 sectors = transport_get_sectors_32(cdb); 899 900 if (sbc_check_dpofua(dev, cmd, cdb)) 901 return TCM_INVALID_CDB_FIELD; 902 /* 903 * Use WRITE_32 and READ_32 opcodes for the emulated 904 * XDWRITE_READ_32 logic. 905 */ 906 cmd->t_task_lba = transport_lba_64_ext(cdb); 907 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 908 909 /* 910 * Setup BIDI XOR callback to be run during after I/O 911 * completion. 912 */ 913 cmd->execute_rw = ops->execute_rw; 914 cmd->execute_cmd = sbc_execute_rw; 915 cmd->transport_complete_callback = &xdreadwrite_callback; 916 break; 917 case WRITE_SAME_32: 918 sectors = transport_get_sectors_32(cdb); 919 if (!sectors) { 920 pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not" 921 " supported\n"); 922 return TCM_INVALID_CDB_FIELD; 923 } 924 925 size = sbc_get_size(cmd, 1); 926 cmd->t_task_lba = get_unaligned_be64(&cdb[12]); 927 928 ret = sbc_setup_write_same(cmd, &cdb[10], ops); 929 if (ret) 930 return ret; 931 break; 932 default: 933 pr_err("VARIABLE_LENGTH_CMD service action" 934 " 0x%04x not supported\n", service_action); 935 return TCM_UNSUPPORTED_SCSI_OPCODE; 936 } 937 break; 938 } 939 case COMPARE_AND_WRITE: 940 sectors = cdb[13]; 941 /* 942 * Currently enforce COMPARE_AND_WRITE for a single sector 943 */ 944 if (sectors > 1) { 945 pr_err("COMPARE_AND_WRITE contains NoLB: %u greater" 946 " than 1\n", sectors); 947 return TCM_INVALID_CDB_FIELD; 948 } 949 /* 950 * Double size because we have two buffers, note that 951 * zero is not an error.. 952 */ 953 size = 2 * sbc_get_size(cmd, sectors); 954 cmd->t_task_lba = get_unaligned_be64(&cdb[2]); 955 cmd->t_task_nolb = sectors; 956 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB | SCF_COMPARE_AND_WRITE; 957 cmd->execute_rw = ops->execute_rw; 958 cmd->execute_cmd = sbc_compare_and_write; 959 cmd->transport_complete_callback = compare_and_write_callback; 960 break; 961 case READ_CAPACITY: 962 size = READ_CAP_LEN; 963 cmd->execute_cmd = sbc_emulate_readcapacity; 964 break; 965 case SERVICE_ACTION_IN_16: 966 switch (cmd->t_task_cdb[1] & 0x1f) { 967 case SAI_READ_CAPACITY_16: 968 cmd->execute_cmd = sbc_emulate_readcapacity_16; 969 break; 970 case SAI_REPORT_REFERRALS: 971 cmd->execute_cmd = target_emulate_report_referrals; 972 break; 973 default: 974 pr_err("Unsupported SA: 0x%02x\n", 975 cmd->t_task_cdb[1] & 0x1f); 976 return TCM_INVALID_CDB_FIELD; 977 } 978 size = (cdb[10] << 24) | (cdb[11] << 16) | 979 (cdb[12] << 8) | cdb[13]; 980 break; 981 case SYNCHRONIZE_CACHE: 982 case SYNCHRONIZE_CACHE_16: 983 if (cdb[0] == SYNCHRONIZE_CACHE) { 984 sectors = transport_get_sectors_10(cdb); 985 cmd->t_task_lba = transport_lba_32(cdb); 986 } else { 987 sectors = transport_get_sectors_16(cdb); 988 cmd->t_task_lba = transport_lba_64(cdb); 989 } 990 if (ops->execute_sync_cache) { 991 cmd->execute_cmd = ops->execute_sync_cache; 992 goto check_lba; 993 } 994 size = 0; 995 cmd->execute_cmd = sbc_emulate_noop; 996 break; 997 case UNMAP: 998 if (!ops->execute_unmap) 999 return TCM_UNSUPPORTED_SCSI_OPCODE; 1000 1001 if (!dev->dev_attrib.emulate_tpu) { 1002 pr_err("Got UNMAP, but backend device has" 1003 " emulate_tpu disabled\n"); 1004 return TCM_UNSUPPORTED_SCSI_OPCODE; 1005 } 1006 size = get_unaligned_be16(&cdb[7]); 1007 cmd->execute_cmd = ops->execute_unmap; 1008 break; 1009 case WRITE_SAME_16: 1010 sectors = transport_get_sectors_16(cdb); 1011 if (!sectors) { 1012 pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n"); 1013 return TCM_INVALID_CDB_FIELD; 1014 } 1015 1016 size = sbc_get_size(cmd, 1); 1017 cmd->t_task_lba = get_unaligned_be64(&cdb[2]); 1018 1019 ret = sbc_setup_write_same(cmd, &cdb[1], ops); 1020 if (ret) 1021 return ret; 1022 break; 1023 case WRITE_SAME: 1024 sectors = transport_get_sectors_10(cdb); 1025 if (!sectors) { 1026 pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n"); 1027 return TCM_INVALID_CDB_FIELD; 1028 } 1029 1030 size = sbc_get_size(cmd, 1); 1031 cmd->t_task_lba = get_unaligned_be32(&cdb[2]); 1032 1033 /* 1034 * Follow sbcr26 with WRITE_SAME (10) and check for the existence 1035 * of byte 1 bit 3 UNMAP instead of original reserved field 1036 */ 1037 ret = sbc_setup_write_same(cmd, &cdb[1], ops); 1038 if (ret) 1039 return ret; 1040 break; 1041 case VERIFY: 1042 size = 0; 1043 sectors = transport_get_sectors_10(cdb); 1044 cmd->t_task_lba = transport_lba_32(cdb); 1045 cmd->execute_cmd = sbc_emulate_noop; 1046 goto check_lba; 1047 case REZERO_UNIT: 1048 case SEEK_6: 1049 case SEEK_10: 1050 /* 1051 * There are still clients out there which use these old SCSI-2 1052 * commands. This mainly happens when running VMs with legacy 1053 * guest systems, connected via SCSI command pass-through to 1054 * iSCSI targets. Make them happy and return status GOOD. 1055 */ 1056 size = 0; 1057 cmd->execute_cmd = sbc_emulate_noop; 1058 break; 1059 default: 1060 ret = spc_parse_cdb(cmd, &size); 1061 if (ret) 1062 return ret; 1063 } 1064 1065 /* reject any command that we don't have a handler for */ 1066 if (!cmd->execute_cmd) 1067 return TCM_UNSUPPORTED_SCSI_OPCODE; 1068 1069 if (cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) { 1070 unsigned long long end_lba; 1071 check_lba: 1072 end_lba = dev->transport->get_blocks(dev) + 1; 1073 if (((cmd->t_task_lba + sectors) < cmd->t_task_lba) || 1074 ((cmd->t_task_lba + sectors) > end_lba)) { 1075 pr_err("cmd exceeds last lba %llu " 1076 "(lba %llu, sectors %u)\n", 1077 end_lba, cmd->t_task_lba, sectors); 1078 return TCM_ADDRESS_OUT_OF_RANGE; 1079 } 1080 1081 if (!(cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE)) 1082 size = sbc_get_size(cmd, sectors); 1083 } 1084 1085 return target_cmd_size_check(cmd, size); 1086 } 1087 EXPORT_SYMBOL(sbc_parse_cdb); 1088 1089 u32 sbc_get_device_type(struct se_device *dev) 1090 { 1091 return TYPE_DISK; 1092 } 1093 EXPORT_SYMBOL(sbc_get_device_type); 1094 1095 sense_reason_t 1096 sbc_execute_unmap(struct se_cmd *cmd, 1097 sense_reason_t (*do_unmap_fn)(struct se_cmd *, void *, 1098 sector_t, sector_t), 1099 void *priv) 1100 { 1101 struct se_device *dev = cmd->se_dev; 1102 unsigned char *buf, *ptr = NULL; 1103 sector_t lba; 1104 int size; 1105 u32 range; 1106 sense_reason_t ret = 0; 1107 int dl, bd_dl; 1108 1109 /* We never set ANC_SUP */ 1110 if (cmd->t_task_cdb[1]) 1111 return TCM_INVALID_CDB_FIELD; 1112 1113 if (cmd->data_length == 0) { 1114 target_complete_cmd(cmd, SAM_STAT_GOOD); 1115 return 0; 1116 } 1117 1118 if (cmd->data_length < 8) { 1119 pr_warn("UNMAP parameter list length %u too small\n", 1120 cmd->data_length); 1121 return TCM_PARAMETER_LIST_LENGTH_ERROR; 1122 } 1123 1124 buf = transport_kmap_data_sg(cmd); 1125 if (!buf) 1126 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 1127 1128 dl = get_unaligned_be16(&buf[0]); 1129 bd_dl = get_unaligned_be16(&buf[2]); 1130 1131 size = cmd->data_length - 8; 1132 if (bd_dl > size) 1133 pr_warn("UNMAP parameter list length %u too small, ignoring bd_dl %u\n", 1134 cmd->data_length, bd_dl); 1135 else 1136 size = bd_dl; 1137 1138 if (size / 16 > dev->dev_attrib.max_unmap_block_desc_count) { 1139 ret = TCM_INVALID_PARAMETER_LIST; 1140 goto err; 1141 } 1142 1143 /* First UNMAP block descriptor starts at 8 byte offset */ 1144 ptr = &buf[8]; 1145 pr_debug("UNMAP: Sub: %s Using dl: %u bd_dl: %u size: %u" 1146 " ptr: %p\n", dev->transport->name, dl, bd_dl, size, ptr); 1147 1148 while (size >= 16) { 1149 lba = get_unaligned_be64(&ptr[0]); 1150 range = get_unaligned_be32(&ptr[8]); 1151 pr_debug("UNMAP: Using lba: %llu and range: %u\n", 1152 (unsigned long long)lba, range); 1153 1154 if (range > dev->dev_attrib.max_unmap_lba_count) { 1155 ret = TCM_INVALID_PARAMETER_LIST; 1156 goto err; 1157 } 1158 1159 if (lba + range > dev->transport->get_blocks(dev) + 1) { 1160 ret = TCM_ADDRESS_OUT_OF_RANGE; 1161 goto err; 1162 } 1163 1164 ret = do_unmap_fn(cmd, priv, lba, range); 1165 if (ret) 1166 goto err; 1167 1168 ptr += 16; 1169 size -= 16; 1170 } 1171 1172 err: 1173 transport_kunmap_data_sg(cmd); 1174 if (!ret) 1175 target_complete_cmd(cmd, GOOD); 1176 return ret; 1177 } 1178 EXPORT_SYMBOL(sbc_execute_unmap); 1179 1180 void 1181 sbc_dif_generate(struct se_cmd *cmd) 1182 { 1183 struct se_device *dev = cmd->se_dev; 1184 struct se_dif_v1_tuple *sdt; 1185 struct scatterlist *dsg, *psg = cmd->t_prot_sg; 1186 sector_t sector = cmd->t_task_lba; 1187 void *daddr, *paddr; 1188 int i, j, offset = 0; 1189 1190 for_each_sg(cmd->t_data_sg, dsg, cmd->t_data_nents, i) { 1191 daddr = kmap_atomic(sg_page(dsg)) + dsg->offset; 1192 paddr = kmap_atomic(sg_page(psg)) + psg->offset; 1193 1194 for (j = 0; j < dsg->length; j += dev->dev_attrib.block_size) { 1195 1196 if (offset >= psg->length) { 1197 kunmap_atomic(paddr); 1198 psg = sg_next(psg); 1199 paddr = kmap_atomic(sg_page(psg)) + psg->offset; 1200 offset = 0; 1201 } 1202 1203 sdt = paddr + offset; 1204 sdt->guard_tag = cpu_to_be16(crc_t10dif(daddr + j, 1205 dev->dev_attrib.block_size)); 1206 if (cmd->prot_type == TARGET_DIF_TYPE1_PROT) 1207 sdt->ref_tag = cpu_to_be32(sector & 0xffffffff); 1208 sdt->app_tag = 0; 1209 1210 pr_debug("DIF %s INSERT sector: %llu guard_tag: 0x%04x" 1211 " app_tag: 0x%04x ref_tag: %u\n", 1212 (cmd->data_direction == DMA_TO_DEVICE) ? 1213 "WRITE" : "READ", (unsigned long long)sector, 1214 sdt->guard_tag, sdt->app_tag, 1215 be32_to_cpu(sdt->ref_tag)); 1216 1217 sector++; 1218 offset += sizeof(struct se_dif_v1_tuple); 1219 } 1220 1221 kunmap_atomic(paddr); 1222 kunmap_atomic(daddr); 1223 } 1224 } 1225 1226 static sense_reason_t 1227 sbc_dif_v1_verify(struct se_cmd *cmd, struct se_dif_v1_tuple *sdt, 1228 const void *p, sector_t sector, unsigned int ei_lba) 1229 { 1230 struct se_device *dev = cmd->se_dev; 1231 int block_size = dev->dev_attrib.block_size; 1232 __be16 csum; 1233 1234 if (!(cmd->prot_checks & TARGET_DIF_CHECK_GUARD)) 1235 goto check_ref; 1236 1237 csum = cpu_to_be16(crc_t10dif(p, block_size)); 1238 1239 if (sdt->guard_tag != csum) { 1240 pr_err("DIFv1 checksum failed on sector %llu guard tag 0x%04x" 1241 " csum 0x%04x\n", (unsigned long long)sector, 1242 be16_to_cpu(sdt->guard_tag), be16_to_cpu(csum)); 1243 return TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED; 1244 } 1245 1246 check_ref: 1247 if (!(cmd->prot_checks & TARGET_DIF_CHECK_REFTAG)) 1248 return 0; 1249 1250 if (cmd->prot_type == TARGET_DIF_TYPE1_PROT && 1251 be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) { 1252 pr_err("DIFv1 Type 1 reference failed on sector: %llu tag: 0x%08x" 1253 " sector MSB: 0x%08x\n", (unsigned long long)sector, 1254 be32_to_cpu(sdt->ref_tag), (u32)(sector & 0xffffffff)); 1255 return TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED; 1256 } 1257 1258 if (cmd->prot_type == TARGET_DIF_TYPE2_PROT && 1259 be32_to_cpu(sdt->ref_tag) != ei_lba) { 1260 pr_err("DIFv1 Type 2 reference failed on sector: %llu tag: 0x%08x" 1261 " ei_lba: 0x%08x\n", (unsigned long long)sector, 1262 be32_to_cpu(sdt->ref_tag), ei_lba); 1263 return TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED; 1264 } 1265 1266 return 0; 1267 } 1268 1269 static void 1270 sbc_dif_copy_prot(struct se_cmd *cmd, unsigned int sectors, bool read, 1271 struct scatterlist *sg, int sg_off) 1272 { 1273 struct se_device *dev = cmd->se_dev; 1274 struct scatterlist *psg; 1275 void *paddr, *addr; 1276 unsigned int i, len, left; 1277 unsigned int offset = sg_off; 1278 1279 if (!sg) 1280 return; 1281 1282 left = sectors * dev->prot_length; 1283 1284 for_each_sg(cmd->t_prot_sg, psg, cmd->t_prot_nents, i) { 1285 unsigned int psg_len, copied = 0; 1286 1287 paddr = kmap_atomic(sg_page(psg)) + psg->offset; 1288 psg_len = min(left, psg->length); 1289 while (psg_len) { 1290 len = min(psg_len, sg->length - offset); 1291 addr = kmap_atomic(sg_page(sg)) + sg->offset + offset; 1292 1293 if (read) 1294 memcpy(paddr + copied, addr, len); 1295 else 1296 memcpy(addr, paddr + copied, len); 1297 1298 left -= len; 1299 offset += len; 1300 copied += len; 1301 psg_len -= len; 1302 1303 if (offset >= sg->length) { 1304 sg = sg_next(sg); 1305 offset = 0; 1306 } 1307 kunmap_atomic(addr); 1308 } 1309 kunmap_atomic(paddr); 1310 } 1311 } 1312 1313 sense_reason_t 1314 sbc_dif_verify_write(struct se_cmd *cmd, sector_t start, unsigned int sectors, 1315 unsigned int ei_lba, struct scatterlist *sg, int sg_off) 1316 { 1317 struct se_device *dev = cmd->se_dev; 1318 struct se_dif_v1_tuple *sdt; 1319 struct scatterlist *dsg, *psg = cmd->t_prot_sg; 1320 sector_t sector = start; 1321 void *daddr, *paddr; 1322 int i, j, offset = 0; 1323 sense_reason_t rc; 1324 1325 for_each_sg(cmd->t_data_sg, dsg, cmd->t_data_nents, i) { 1326 daddr = kmap_atomic(sg_page(dsg)) + dsg->offset; 1327 paddr = kmap_atomic(sg_page(psg)) + psg->offset; 1328 1329 for (j = 0; j < dsg->length; j += dev->dev_attrib.block_size) { 1330 1331 if (offset >= psg->length) { 1332 kunmap_atomic(paddr); 1333 psg = sg_next(psg); 1334 paddr = kmap_atomic(sg_page(psg)) + psg->offset; 1335 offset = 0; 1336 } 1337 1338 sdt = paddr + offset; 1339 1340 pr_debug("DIF WRITE sector: %llu guard_tag: 0x%04x" 1341 " app_tag: 0x%04x ref_tag: %u\n", 1342 (unsigned long long)sector, sdt->guard_tag, 1343 sdt->app_tag, be32_to_cpu(sdt->ref_tag)); 1344 1345 rc = sbc_dif_v1_verify(cmd, sdt, daddr + j, sector, 1346 ei_lba); 1347 if (rc) { 1348 kunmap_atomic(paddr); 1349 kunmap_atomic(daddr); 1350 cmd->bad_sector = sector; 1351 return rc; 1352 } 1353 1354 sector++; 1355 ei_lba++; 1356 offset += sizeof(struct se_dif_v1_tuple); 1357 } 1358 1359 kunmap_atomic(paddr); 1360 kunmap_atomic(daddr); 1361 } 1362 if (!sg) 1363 return 0; 1364 1365 sbc_dif_copy_prot(cmd, sectors, false, sg, sg_off); 1366 1367 return 0; 1368 } 1369 EXPORT_SYMBOL(sbc_dif_verify_write); 1370 1371 static sense_reason_t 1372 __sbc_dif_verify_read(struct se_cmd *cmd, sector_t start, unsigned int sectors, 1373 unsigned int ei_lba, struct scatterlist *sg, int sg_off) 1374 { 1375 struct se_device *dev = cmd->se_dev; 1376 struct se_dif_v1_tuple *sdt; 1377 struct scatterlist *dsg, *psg = sg; 1378 sector_t sector = start; 1379 void *daddr, *paddr; 1380 int i, j, offset = sg_off; 1381 sense_reason_t rc; 1382 1383 for_each_sg(cmd->t_data_sg, dsg, cmd->t_data_nents, i) { 1384 daddr = kmap_atomic(sg_page(dsg)) + dsg->offset; 1385 paddr = kmap_atomic(sg_page(psg)) + sg->offset; 1386 1387 for (j = 0; j < dsg->length; j += dev->dev_attrib.block_size) { 1388 1389 if (offset >= psg->length) { 1390 kunmap_atomic(paddr); 1391 psg = sg_next(psg); 1392 paddr = kmap_atomic(sg_page(psg)) + psg->offset; 1393 offset = 0; 1394 } 1395 1396 sdt = paddr + offset; 1397 1398 pr_debug("DIF READ sector: %llu guard_tag: 0x%04x" 1399 " app_tag: 0x%04x ref_tag: %u\n", 1400 (unsigned long long)sector, sdt->guard_tag, 1401 sdt->app_tag, be32_to_cpu(sdt->ref_tag)); 1402 1403 if (sdt->app_tag == cpu_to_be16(0xffff)) { 1404 sector++; 1405 offset += sizeof(struct se_dif_v1_tuple); 1406 continue; 1407 } 1408 1409 rc = sbc_dif_v1_verify(cmd, sdt, daddr + j, sector, 1410 ei_lba); 1411 if (rc) { 1412 kunmap_atomic(paddr); 1413 kunmap_atomic(daddr); 1414 cmd->bad_sector = sector; 1415 return rc; 1416 } 1417 1418 sector++; 1419 ei_lba++; 1420 offset += sizeof(struct se_dif_v1_tuple); 1421 } 1422 1423 kunmap_atomic(paddr); 1424 kunmap_atomic(daddr); 1425 } 1426 1427 return 0; 1428 } 1429 1430 sense_reason_t 1431 sbc_dif_read_strip(struct se_cmd *cmd) 1432 { 1433 struct se_device *dev = cmd->se_dev; 1434 u32 sectors = cmd->prot_length / dev->prot_length; 1435 1436 return __sbc_dif_verify_read(cmd, cmd->t_task_lba, sectors, 0, 1437 cmd->t_prot_sg, 0); 1438 } 1439 1440 sense_reason_t 1441 sbc_dif_verify_read(struct se_cmd *cmd, sector_t start, unsigned int sectors, 1442 unsigned int ei_lba, struct scatterlist *sg, int sg_off) 1443 { 1444 sense_reason_t rc; 1445 1446 rc = __sbc_dif_verify_read(cmd, start, sectors, ei_lba, sg, sg_off); 1447 if (rc) 1448 return rc; 1449 1450 sbc_dif_copy_prot(cmd, sectors, true, sg, sg_off); 1451 return 0; 1452 } 1453 EXPORT_SYMBOL(sbc_dif_verify_read); 1454