1d6e0175cSChristoph Hellwig /* 2d6e0175cSChristoph Hellwig * SCSI Block Commands (SBC) parsing and emulation. 3d6e0175cSChristoph Hellwig * 44c76251eSNicholas Bellinger * (c) Copyright 2002-2013 Datera, Inc. 5d6e0175cSChristoph Hellwig * 6d6e0175cSChristoph Hellwig * Nicholas A. Bellinger <nab@kernel.org> 7d6e0175cSChristoph Hellwig * 8d6e0175cSChristoph Hellwig * This program is free software; you can redistribute it and/or modify 9d6e0175cSChristoph Hellwig * it under the terms of the GNU General Public License as published by 10d6e0175cSChristoph Hellwig * the Free Software Foundation; either version 2 of the License, or 11d6e0175cSChristoph Hellwig * (at your option) any later version. 12d6e0175cSChristoph Hellwig * 13d6e0175cSChristoph Hellwig * This program is distributed in the hope that it will be useful, 14d6e0175cSChristoph Hellwig * but WITHOUT ANY WARRANTY; without even the implied warranty of 15d6e0175cSChristoph Hellwig * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16d6e0175cSChristoph Hellwig * GNU General Public License for more details. 17d6e0175cSChristoph Hellwig * 18d6e0175cSChristoph Hellwig * You should have received a copy of the GNU General Public License 19d6e0175cSChristoph Hellwig * along with this program; if not, write to the Free Software 20d6e0175cSChristoph Hellwig * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 21d6e0175cSChristoph Hellwig */ 22d6e0175cSChristoph Hellwig 23d6e0175cSChristoph Hellwig #include <linux/kernel.h> 24d6e0175cSChristoph Hellwig #include <linux/module.h> 25d6e0175cSChristoph Hellwig #include <linux/ratelimit.h> 2641861fa8SNicholas Bellinger #include <linux/crc-t10dif.h> 27d6e0175cSChristoph Hellwig #include <asm/unaligned.h> 28d6e0175cSChristoph Hellwig #include <scsi/scsi.h> 2968ff9b9bSNicholas Bellinger #include <scsi/scsi_tcq.h> 30d6e0175cSChristoph Hellwig 31d6e0175cSChristoph Hellwig #include <target/target_core_base.h> 32d6e0175cSChristoph Hellwig #include <target/target_core_backend.h> 33d6e0175cSChristoph Hellwig #include <target/target_core_fabric.h> 34d6e0175cSChristoph Hellwig 35d6e0175cSChristoph Hellwig #include "target_core_internal.h" 36d6e0175cSChristoph Hellwig #include "target_core_ua.h" 37c66094bfSHannes Reinecke #include "target_core_alua.h" 38d6e0175cSChristoph Hellwig 39de103c93SChristoph Hellwig static sense_reason_t 40afd73f1bSNicholas Bellinger sbc_check_prot(struct se_device *, struct se_cmd *, unsigned char *, u32, bool); 41afd73f1bSNicholas Bellinger 42afd73f1bSNicholas Bellinger static sense_reason_t 43de103c93SChristoph Hellwig sbc_emulate_readcapacity(struct se_cmd *cmd) 441fd032eeSChristoph Hellwig { 451fd032eeSChristoph Hellwig struct se_device *dev = cmd->se_dev; 468dc8632aSRoland Dreier unsigned char *cdb = cmd->t_task_cdb; 471fd032eeSChristoph Hellwig unsigned long long blocks_long = dev->transport->get_blocks(dev); 48a50da144SPaolo Bonzini unsigned char *rbuf; 49a50da144SPaolo Bonzini unsigned char buf[8]; 501fd032eeSChristoph Hellwig u32 blocks; 511fd032eeSChristoph Hellwig 528dc8632aSRoland Dreier /* 538dc8632aSRoland Dreier * SBC-2 says: 548dc8632aSRoland Dreier * If the PMI bit is set to zero and the LOGICAL BLOCK 558dc8632aSRoland Dreier * ADDRESS field is not set to zero, the device server shall 568dc8632aSRoland Dreier * terminate the command with CHECK CONDITION status with 578dc8632aSRoland Dreier * the sense key set to ILLEGAL REQUEST and the additional 588dc8632aSRoland Dreier * sense code set to INVALID FIELD IN CDB. 598dc8632aSRoland Dreier * 608dc8632aSRoland Dreier * In SBC-3, these fields are obsolete, but some SCSI 618dc8632aSRoland Dreier * compliance tests actually check this, so we might as well 628dc8632aSRoland Dreier * follow SBC-2. 638dc8632aSRoland Dreier */ 648dc8632aSRoland Dreier if (!(cdb[8] & 1) && !!(cdb[2] | cdb[3] | cdb[4] | cdb[5])) 658dc8632aSRoland Dreier return TCM_INVALID_CDB_FIELD; 668dc8632aSRoland Dreier 671fd032eeSChristoph Hellwig if (blocks_long >= 0x00000000ffffffff) 681fd032eeSChristoph Hellwig blocks = 0xffffffff; 691fd032eeSChristoph Hellwig else 701fd032eeSChristoph Hellwig blocks = (u32)blocks_long; 711fd032eeSChristoph Hellwig 721fd032eeSChristoph Hellwig buf[0] = (blocks >> 24) & 0xff; 731fd032eeSChristoph Hellwig buf[1] = (blocks >> 16) & 0xff; 741fd032eeSChristoph Hellwig buf[2] = (blocks >> 8) & 0xff; 751fd032eeSChristoph Hellwig buf[3] = blocks & 0xff; 760fd97ccfSChristoph Hellwig buf[4] = (dev->dev_attrib.block_size >> 24) & 0xff; 770fd97ccfSChristoph Hellwig buf[5] = (dev->dev_attrib.block_size >> 16) & 0xff; 780fd97ccfSChristoph Hellwig buf[6] = (dev->dev_attrib.block_size >> 8) & 0xff; 790fd97ccfSChristoph Hellwig buf[7] = dev->dev_attrib.block_size & 0xff; 801fd032eeSChristoph Hellwig 81a50da144SPaolo Bonzini rbuf = transport_kmap_data_sg(cmd); 828b4b0dcbSNicholas Bellinger if (rbuf) { 83a50da144SPaolo Bonzini memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length)); 841fd032eeSChristoph Hellwig transport_kunmap_data_sg(cmd); 858b4b0dcbSNicholas Bellinger } 861fd032eeSChristoph Hellwig 872426bd45SRoland Dreier target_complete_cmd_with_length(cmd, GOOD, 8); 881fd032eeSChristoph Hellwig return 0; 891fd032eeSChristoph Hellwig } 901fd032eeSChristoph Hellwig 91de103c93SChristoph Hellwig static sense_reason_t 92de103c93SChristoph Hellwig sbc_emulate_readcapacity_16(struct se_cmd *cmd) 931fd032eeSChristoph Hellwig { 941fd032eeSChristoph Hellwig struct se_device *dev = cmd->se_dev; 952d335983SNicholas Bellinger struct se_session *sess = cmd->se_sess; 96a50da144SPaolo Bonzini unsigned char *rbuf; 97a50da144SPaolo Bonzini unsigned char buf[32]; 981fd032eeSChristoph Hellwig unsigned long long blocks = dev->transport->get_blocks(dev); 991fd032eeSChristoph Hellwig 100a50da144SPaolo Bonzini memset(buf, 0, sizeof(buf)); 1011fd032eeSChristoph Hellwig buf[0] = (blocks >> 56) & 0xff; 1021fd032eeSChristoph Hellwig buf[1] = (blocks >> 48) & 0xff; 1031fd032eeSChristoph Hellwig buf[2] = (blocks >> 40) & 0xff; 1041fd032eeSChristoph Hellwig buf[3] = (blocks >> 32) & 0xff; 1051fd032eeSChristoph Hellwig buf[4] = (blocks >> 24) & 0xff; 1061fd032eeSChristoph Hellwig buf[5] = (blocks >> 16) & 0xff; 1071fd032eeSChristoph Hellwig buf[6] = (blocks >> 8) & 0xff; 1081fd032eeSChristoph Hellwig buf[7] = blocks & 0xff; 1090fd97ccfSChristoph Hellwig buf[8] = (dev->dev_attrib.block_size >> 24) & 0xff; 1100fd97ccfSChristoph Hellwig buf[9] = (dev->dev_attrib.block_size >> 16) & 0xff; 1110fd97ccfSChristoph Hellwig buf[10] = (dev->dev_attrib.block_size >> 8) & 0xff; 1120fd97ccfSChristoph Hellwig buf[11] = dev->dev_attrib.block_size & 0xff; 11356dac14cSNicholas Bellinger /* 11456dac14cSNicholas Bellinger * Set P_TYPE and PROT_EN bits for DIF support 11556dac14cSNicholas Bellinger */ 1162d335983SNicholas Bellinger if (sess->sup_prot_ops & (TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS)) { 11756dac14cSNicholas Bellinger if (dev->dev_attrib.pi_prot_type) 11856dac14cSNicholas Bellinger buf[12] = (dev->dev_attrib.pi_prot_type - 1) << 1 | 0x1; 1192d335983SNicholas Bellinger } 1207f7caf6aSAndy Grover 1217f7caf6aSAndy Grover if (dev->transport->get_lbppbe) 1227f7caf6aSAndy Grover buf[13] = dev->transport->get_lbppbe(dev) & 0x0f; 1237f7caf6aSAndy Grover 1247f7caf6aSAndy Grover if (dev->transport->get_alignment_offset_lbas) { 1257f7caf6aSAndy Grover u16 lalba = dev->transport->get_alignment_offset_lbas(dev); 1267f7caf6aSAndy Grover buf[14] = (lalba >> 8) & 0x3f; 1277f7caf6aSAndy Grover buf[15] = lalba & 0xff; 1287f7caf6aSAndy Grover } 1297f7caf6aSAndy Grover 1301fd032eeSChristoph Hellwig /* 1311fd032eeSChristoph Hellwig * Set Thin Provisioning Enable bit following sbc3r22 in section 1321fd032eeSChristoph Hellwig * READ CAPACITY (16) byte 14 if emulate_tpu or emulate_tpws is enabled. 1331fd032eeSChristoph Hellwig */ 1340fd97ccfSChristoph Hellwig if (dev->dev_attrib.emulate_tpu || dev->dev_attrib.emulate_tpws) 1357f7caf6aSAndy Grover buf[14] |= 0x80; 1361fd032eeSChristoph Hellwig 137a50da144SPaolo Bonzini rbuf = transport_kmap_data_sg(cmd); 1388b4b0dcbSNicholas Bellinger if (rbuf) { 139a50da144SPaolo Bonzini memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length)); 1401fd032eeSChristoph Hellwig transport_kunmap_data_sg(cmd); 1418b4b0dcbSNicholas Bellinger } 1421fd032eeSChristoph Hellwig 1432426bd45SRoland Dreier target_complete_cmd_with_length(cmd, GOOD, 32); 1441fd032eeSChristoph Hellwig return 0; 1451fd032eeSChristoph Hellwig } 1461fd032eeSChristoph Hellwig 147972b29c8SRoland Dreier sector_t sbc_get_write_same_sectors(struct se_cmd *cmd) 1481fd032eeSChristoph Hellwig { 1491fd032eeSChristoph Hellwig u32 num_blocks; 1501fd032eeSChristoph Hellwig 1511fd032eeSChristoph Hellwig if (cmd->t_task_cdb[0] == WRITE_SAME) 1521fd032eeSChristoph Hellwig num_blocks = get_unaligned_be16(&cmd->t_task_cdb[7]); 1531fd032eeSChristoph Hellwig else if (cmd->t_task_cdb[0] == WRITE_SAME_16) 1541fd032eeSChristoph Hellwig num_blocks = get_unaligned_be32(&cmd->t_task_cdb[10]); 1551fd032eeSChristoph Hellwig else /* WRITE_SAME_32 via VARIABLE_LENGTH_CMD */ 1561fd032eeSChristoph Hellwig num_blocks = get_unaligned_be32(&cmd->t_task_cdb[28]); 1571fd032eeSChristoph Hellwig 1581fd032eeSChristoph Hellwig /* 1591fd032eeSChristoph Hellwig * Use the explicit range when non zero is supplied, otherwise calculate 1601fd032eeSChristoph Hellwig * the remaining range based on ->get_blocks() - starting LBA. 1611fd032eeSChristoph Hellwig */ 1626f974e8cSChristoph Hellwig if (num_blocks) 1636f974e8cSChristoph Hellwig return num_blocks; 1641fd032eeSChristoph Hellwig 1656f974e8cSChristoph Hellwig return cmd->se_dev->transport->get_blocks(cmd->se_dev) - 1666f974e8cSChristoph Hellwig cmd->t_task_lba + 1; 1671fd032eeSChristoph Hellwig } 168972b29c8SRoland Dreier EXPORT_SYMBOL(sbc_get_write_same_sectors); 1691fd032eeSChristoph Hellwig 170de103c93SChristoph Hellwig static sense_reason_t 1711920ed61SNicholas Bellinger sbc_emulate_noop(struct se_cmd *cmd) 1721a1ff38cSBernhard Kohl { 1731a1ff38cSBernhard Kohl target_complete_cmd(cmd, GOOD); 1741a1ff38cSBernhard Kohl return 0; 1751a1ff38cSBernhard Kohl } 1761a1ff38cSBernhard Kohl 177d6e0175cSChristoph Hellwig static inline u32 sbc_get_size(struct se_cmd *cmd, u32 sectors) 178d6e0175cSChristoph Hellwig { 1790fd97ccfSChristoph Hellwig return cmd->se_dev->dev_attrib.block_size * sectors; 180d6e0175cSChristoph Hellwig } 181d6e0175cSChristoph Hellwig 182d6e0175cSChristoph Hellwig static inline u32 transport_get_sectors_6(unsigned char *cdb) 183d6e0175cSChristoph Hellwig { 184d6e0175cSChristoph Hellwig /* 185d6e0175cSChristoph Hellwig * Use 8-bit sector value. SBC-3 says: 186d6e0175cSChristoph Hellwig * 187d6e0175cSChristoph Hellwig * A TRANSFER LENGTH field set to zero specifies that 256 188d6e0175cSChristoph Hellwig * logical blocks shall be written. Any other value 189d6e0175cSChristoph Hellwig * specifies the number of logical blocks that shall be 190d6e0175cSChristoph Hellwig * written. 191d6e0175cSChristoph Hellwig */ 192d6e0175cSChristoph Hellwig return cdb[4] ? : 256; 193d6e0175cSChristoph Hellwig } 194d6e0175cSChristoph Hellwig 195d6e0175cSChristoph Hellwig static inline u32 transport_get_sectors_10(unsigned char *cdb) 196d6e0175cSChristoph Hellwig { 197d6e0175cSChristoph Hellwig return (u32)(cdb[7] << 8) + cdb[8]; 198d6e0175cSChristoph Hellwig } 199d6e0175cSChristoph Hellwig 200d6e0175cSChristoph Hellwig static inline u32 transport_get_sectors_12(unsigned char *cdb) 201d6e0175cSChristoph Hellwig { 202d6e0175cSChristoph Hellwig return (u32)(cdb[6] << 24) + (cdb[7] << 16) + (cdb[8] << 8) + cdb[9]; 203d6e0175cSChristoph Hellwig } 204d6e0175cSChristoph Hellwig 205d6e0175cSChristoph Hellwig static inline u32 transport_get_sectors_16(unsigned char *cdb) 206d6e0175cSChristoph Hellwig { 207d6e0175cSChristoph Hellwig return (u32)(cdb[10] << 24) + (cdb[11] << 16) + 208d6e0175cSChristoph Hellwig (cdb[12] << 8) + cdb[13]; 209d6e0175cSChristoph Hellwig } 210d6e0175cSChristoph Hellwig 211d6e0175cSChristoph Hellwig /* 212d6e0175cSChristoph Hellwig * Used for VARIABLE_LENGTH_CDB WRITE_32 and READ_32 variants 213d6e0175cSChristoph Hellwig */ 214d6e0175cSChristoph Hellwig static inline u32 transport_get_sectors_32(unsigned char *cdb) 215d6e0175cSChristoph Hellwig { 216d6e0175cSChristoph Hellwig return (u32)(cdb[28] << 24) + (cdb[29] << 16) + 217d6e0175cSChristoph Hellwig (cdb[30] << 8) + cdb[31]; 218d6e0175cSChristoph Hellwig 219d6e0175cSChristoph Hellwig } 220d6e0175cSChristoph Hellwig 221d6e0175cSChristoph Hellwig static inline u32 transport_lba_21(unsigned char *cdb) 222d6e0175cSChristoph Hellwig { 223d6e0175cSChristoph Hellwig return ((cdb[1] & 0x1f) << 16) | (cdb[2] << 8) | cdb[3]; 224d6e0175cSChristoph Hellwig } 225d6e0175cSChristoph Hellwig 226d6e0175cSChristoph Hellwig static inline u32 transport_lba_32(unsigned char *cdb) 227d6e0175cSChristoph Hellwig { 228d6e0175cSChristoph Hellwig return (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5]; 229d6e0175cSChristoph Hellwig } 230d6e0175cSChristoph Hellwig 231d6e0175cSChristoph Hellwig static inline unsigned long long transport_lba_64(unsigned char *cdb) 232d6e0175cSChristoph Hellwig { 233d6e0175cSChristoph Hellwig unsigned int __v1, __v2; 234d6e0175cSChristoph Hellwig 235d6e0175cSChristoph Hellwig __v1 = (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5]; 236d6e0175cSChristoph Hellwig __v2 = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9]; 237d6e0175cSChristoph Hellwig 238d6e0175cSChristoph Hellwig return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32; 239d6e0175cSChristoph Hellwig } 240d6e0175cSChristoph Hellwig 241d6e0175cSChristoph Hellwig /* 242d6e0175cSChristoph Hellwig * For VARIABLE_LENGTH_CDB w/ 32 byte extended CDBs 243d6e0175cSChristoph Hellwig */ 244d6e0175cSChristoph Hellwig static inline unsigned long long transport_lba_64_ext(unsigned char *cdb) 245d6e0175cSChristoph Hellwig { 246d6e0175cSChristoph Hellwig unsigned int __v1, __v2; 247d6e0175cSChristoph Hellwig 248d6e0175cSChristoph Hellwig __v1 = (cdb[12] << 24) | (cdb[13] << 16) | (cdb[14] << 8) | cdb[15]; 249d6e0175cSChristoph Hellwig __v2 = (cdb[16] << 24) | (cdb[17] << 16) | (cdb[18] << 8) | cdb[19]; 250d6e0175cSChristoph Hellwig 251d6e0175cSChristoph Hellwig return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32; 252d6e0175cSChristoph Hellwig } 253d6e0175cSChristoph Hellwig 254cd063befSNicholas Bellinger static sense_reason_t 255cd063befSNicholas Bellinger sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *ops) 256d6e0175cSChristoph Hellwig { 2578e575c50SNicholas Bellinger struct se_device *dev = cmd->se_dev; 2588e575c50SNicholas Bellinger sector_t end_lba = dev->transport->get_blocks(dev) + 1; 259972b29c8SRoland Dreier unsigned int sectors = sbc_get_write_same_sectors(cmd); 260afd73f1bSNicholas Bellinger sense_reason_t ret; 261773cbaf7SNicholas Bellinger 262d6e0175cSChristoph Hellwig if ((flags[0] & 0x04) || (flags[0] & 0x02)) { 263d6e0175cSChristoph Hellwig pr_err("WRITE_SAME PBDATA and LBDATA" 264d6e0175cSChristoph Hellwig " bits not supported for Block Discard" 265d6e0175cSChristoph Hellwig " Emulation\n"); 266cd063befSNicholas Bellinger return TCM_UNSUPPORTED_SCSI_OPCODE; 267d6e0175cSChristoph Hellwig } 268773cbaf7SNicholas Bellinger if (sectors > cmd->se_dev->dev_attrib.max_write_same_len) { 269773cbaf7SNicholas Bellinger pr_warn("WRITE_SAME sectors: %u exceeds max_write_same_len: %u\n", 270773cbaf7SNicholas Bellinger sectors, cmd->se_dev->dev_attrib.max_write_same_len); 271773cbaf7SNicholas Bellinger return TCM_INVALID_CDB_FIELD; 272773cbaf7SNicholas Bellinger } 2738e575c50SNicholas Bellinger /* 2748e575c50SNicholas Bellinger * Sanity check for LBA wrap and request past end of device. 2758e575c50SNicholas Bellinger */ 2768e575c50SNicholas Bellinger if (((cmd->t_task_lba + sectors) < cmd->t_task_lba) || 2778e575c50SNicholas Bellinger ((cmd->t_task_lba + sectors) > end_lba)) { 2788e575c50SNicholas Bellinger pr_err("WRITE_SAME exceeds last lba %llu (lba %llu, sectors %u)\n", 2798e575c50SNicholas Bellinger (unsigned long long)end_lba, cmd->t_task_lba, sectors); 2808e575c50SNicholas Bellinger return TCM_ADDRESS_OUT_OF_RANGE; 2818e575c50SNicholas Bellinger } 2828e575c50SNicholas Bellinger 2835cb770bfSRoland Dreier /* We always have ANC_SUP == 0 so setting ANCHOR is always an error */ 2845cb770bfSRoland Dreier if (flags[0] & 0x10) { 2855cb770bfSRoland Dreier pr_warn("WRITE SAME with ANCHOR not supported\n"); 2865cb770bfSRoland Dreier return TCM_INVALID_CDB_FIELD; 2875cb770bfSRoland Dreier } 288d6e0175cSChristoph Hellwig /* 289cd063befSNicholas Bellinger * Special case for WRITE_SAME w/ UNMAP=1 that ends up getting 290cd063befSNicholas Bellinger * translated into block discard requests within backend code. 291d6e0175cSChristoph Hellwig */ 292cd063befSNicholas Bellinger if (flags[0] & 0x08) { 293cd063befSNicholas Bellinger if (!ops->execute_write_same_unmap) 294cd063befSNicholas Bellinger return TCM_UNSUPPORTED_SCSI_OPCODE; 295d6e0175cSChristoph Hellwig 296cd063befSNicholas Bellinger cmd->execute_cmd = ops->execute_write_same_unmap; 297cd063befSNicholas Bellinger return 0; 298cd063befSNicholas Bellinger } 299cd063befSNicholas Bellinger if (!ops->execute_write_same) 300cd063befSNicholas Bellinger return TCM_UNSUPPORTED_SCSI_OPCODE; 301cd063befSNicholas Bellinger 302afd73f1bSNicholas Bellinger ret = sbc_check_prot(dev, cmd, &cmd->t_task_cdb[0], sectors, true); 303afd73f1bSNicholas Bellinger if (ret) 304afd73f1bSNicholas Bellinger return ret; 305afd73f1bSNicholas Bellinger 306cd063befSNicholas Bellinger cmd->execute_cmd = ops->execute_write_same; 307d6e0175cSChristoph Hellwig return 0; 308d6e0175cSChristoph Hellwig } 309d6e0175cSChristoph Hellwig 310a6b0133cSNicholas Bellinger static sense_reason_t xdreadwrite_callback(struct se_cmd *cmd) 311d6e0175cSChristoph Hellwig { 312d6e0175cSChristoph Hellwig unsigned char *buf, *addr; 313d6e0175cSChristoph Hellwig struct scatterlist *sg; 314d6e0175cSChristoph Hellwig unsigned int offset; 315a6b0133cSNicholas Bellinger sense_reason_t ret = TCM_NO_SENSE; 316a6b0133cSNicholas Bellinger int i, count; 317d6e0175cSChristoph Hellwig /* 318d6e0175cSChristoph Hellwig * From sbc3r22.pdf section 5.48 XDWRITEREAD (10) command 319d6e0175cSChristoph Hellwig * 320d6e0175cSChristoph Hellwig * 1) read the specified logical block(s); 321d6e0175cSChristoph Hellwig * 2) transfer logical blocks from the data-out buffer; 322d6e0175cSChristoph Hellwig * 3) XOR the logical blocks transferred from the data-out buffer with 323d6e0175cSChristoph Hellwig * the logical blocks read, storing the resulting XOR data in a buffer; 324d6e0175cSChristoph Hellwig * 4) if the DISABLE WRITE bit is set to zero, then write the logical 325d6e0175cSChristoph Hellwig * blocks transferred from the data-out buffer; and 326d6e0175cSChristoph Hellwig * 5) transfer the resulting XOR data to the data-in buffer. 327d6e0175cSChristoph Hellwig */ 328d6e0175cSChristoph Hellwig buf = kmalloc(cmd->data_length, GFP_KERNEL); 329d6e0175cSChristoph Hellwig if (!buf) { 330d6e0175cSChristoph Hellwig pr_err("Unable to allocate xor_callback buf\n"); 331a6b0133cSNicholas Bellinger return TCM_OUT_OF_RESOURCES; 332d6e0175cSChristoph Hellwig } 333d6e0175cSChristoph Hellwig /* 334d6e0175cSChristoph Hellwig * Copy the scatterlist WRITE buffer located at cmd->t_data_sg 335d6e0175cSChristoph Hellwig * into the locally allocated *buf 336d6e0175cSChristoph Hellwig */ 337d6e0175cSChristoph Hellwig sg_copy_to_buffer(cmd->t_data_sg, 338d6e0175cSChristoph Hellwig cmd->t_data_nents, 339d6e0175cSChristoph Hellwig buf, 340d6e0175cSChristoph Hellwig cmd->data_length); 341d6e0175cSChristoph Hellwig 342d6e0175cSChristoph Hellwig /* 343d6e0175cSChristoph Hellwig * Now perform the XOR against the BIDI read memory located at 344d6e0175cSChristoph Hellwig * cmd->t_mem_bidi_list 345d6e0175cSChristoph Hellwig */ 346d6e0175cSChristoph Hellwig 347d6e0175cSChristoph Hellwig offset = 0; 348d6e0175cSChristoph Hellwig for_each_sg(cmd->t_bidi_data_sg, sg, cmd->t_bidi_data_nents, count) { 349d6e0175cSChristoph Hellwig addr = kmap_atomic(sg_page(sg)); 350a6b0133cSNicholas Bellinger if (!addr) { 351a6b0133cSNicholas Bellinger ret = TCM_OUT_OF_RESOURCES; 352d6e0175cSChristoph Hellwig goto out; 353a6b0133cSNicholas Bellinger } 354d6e0175cSChristoph Hellwig 355d6e0175cSChristoph Hellwig for (i = 0; i < sg->length; i++) 356d6e0175cSChristoph Hellwig *(addr + sg->offset + i) ^= *(buf + offset + i); 357d6e0175cSChristoph Hellwig 358d6e0175cSChristoph Hellwig offset += sg->length; 359d6e0175cSChristoph Hellwig kunmap_atomic(addr); 360d6e0175cSChristoph Hellwig } 361d6e0175cSChristoph Hellwig 362d6e0175cSChristoph Hellwig out: 363d6e0175cSChristoph Hellwig kfree(buf); 364a6b0133cSNicholas Bellinger return ret; 365d6e0175cSChristoph Hellwig } 366d6e0175cSChristoph Hellwig 367a82a9538SNicholas Bellinger static sense_reason_t 368a82a9538SNicholas Bellinger sbc_execute_rw(struct se_cmd *cmd) 369a82a9538SNicholas Bellinger { 370a82a9538SNicholas Bellinger return cmd->execute_rw(cmd, cmd->t_data_sg, cmd->t_data_nents, 371a82a9538SNicholas Bellinger cmd->data_direction); 372a82a9538SNicholas Bellinger } 373a82a9538SNicholas Bellinger 37468ff9b9bSNicholas Bellinger static sense_reason_t compare_and_write_post(struct se_cmd *cmd) 37568ff9b9bSNicholas Bellinger { 37668ff9b9bSNicholas Bellinger struct se_device *dev = cmd->se_dev; 37768ff9b9bSNicholas Bellinger 378d8855c15SNicholas Bellinger /* 379d8855c15SNicholas Bellinger * Only set SCF_COMPARE_AND_WRITE_POST to force a response fall-through 380d8855c15SNicholas Bellinger * within target_complete_ok_work() if the command was successfully 381d8855c15SNicholas Bellinger * sent to the backend driver. 382d8855c15SNicholas Bellinger */ 383d8855c15SNicholas Bellinger spin_lock_irq(&cmd->t_state_lock); 384d8855c15SNicholas Bellinger if ((cmd->transport_state & CMD_T_SENT) && !cmd->scsi_status) 38568ff9b9bSNicholas Bellinger cmd->se_cmd_flags |= SCF_COMPARE_AND_WRITE_POST; 386d8855c15SNicholas Bellinger spin_unlock_irq(&cmd->t_state_lock); 387d8855c15SNicholas Bellinger 38868ff9b9bSNicholas Bellinger /* 38968ff9b9bSNicholas Bellinger * Unlock ->caw_sem originally obtained during sbc_compare_and_write() 39068ff9b9bSNicholas Bellinger * before the original READ I/O submission. 39168ff9b9bSNicholas Bellinger */ 39268ff9b9bSNicholas Bellinger up(&dev->caw_sem); 39368ff9b9bSNicholas Bellinger 39468ff9b9bSNicholas Bellinger return TCM_NO_SENSE; 39568ff9b9bSNicholas Bellinger } 39668ff9b9bSNicholas Bellinger 39768ff9b9bSNicholas Bellinger static sense_reason_t compare_and_write_callback(struct se_cmd *cmd) 39868ff9b9bSNicholas Bellinger { 39968ff9b9bSNicholas Bellinger struct se_device *dev = cmd->se_dev; 40068ff9b9bSNicholas Bellinger struct scatterlist *write_sg = NULL, *sg; 401db60df88SNicholas Bellinger unsigned char *buf = NULL, *addr; 40268ff9b9bSNicholas Bellinger struct sg_mapping_iter m; 40368ff9b9bSNicholas Bellinger unsigned int offset = 0, len; 40468ff9b9bSNicholas Bellinger unsigned int nlbas = cmd->t_task_nolb; 40568ff9b9bSNicholas Bellinger unsigned int block_size = dev->dev_attrib.block_size; 40668ff9b9bSNicholas Bellinger unsigned int compare_len = (nlbas * block_size); 40768ff9b9bSNicholas Bellinger sense_reason_t ret = TCM_NO_SENSE; 40868ff9b9bSNicholas Bellinger int rc, i; 40968ff9b9bSNicholas Bellinger 410cf6d1f09SNicholas Bellinger /* 411cf6d1f09SNicholas Bellinger * Handle early failure in transport_generic_request_failure(), 412cf6d1f09SNicholas Bellinger * which will not have taken ->caw_mutex yet.. 413cf6d1f09SNicholas Bellinger */ 414cf6d1f09SNicholas Bellinger if (!cmd->t_data_sg || !cmd->t_bidi_data_sg) 415cf6d1f09SNicholas Bellinger return TCM_NO_SENSE; 416db60df88SNicholas Bellinger /* 417db60df88SNicholas Bellinger * Immediately exit + release dev->caw_sem if command has already 418db60df88SNicholas Bellinger * been failed with a non-zero SCSI status. 419db60df88SNicholas Bellinger */ 420db60df88SNicholas Bellinger if (cmd->scsi_status) { 421db60df88SNicholas Bellinger pr_err("compare_and_write_callback: non zero scsi_status:" 422db60df88SNicholas Bellinger " 0x%02x\n", cmd->scsi_status); 423db60df88SNicholas Bellinger goto out; 424db60df88SNicholas Bellinger } 425cf6d1f09SNicholas Bellinger 42668ff9b9bSNicholas Bellinger buf = kzalloc(cmd->data_length, GFP_KERNEL); 42768ff9b9bSNicholas Bellinger if (!buf) { 42868ff9b9bSNicholas Bellinger pr_err("Unable to allocate compare_and_write buf\n"); 429a2890087SNicholas Bellinger ret = TCM_OUT_OF_RESOURCES; 430a2890087SNicholas Bellinger goto out; 43168ff9b9bSNicholas Bellinger } 43268ff9b9bSNicholas Bellinger 433a1e1774cSMartin Svec write_sg = kmalloc(sizeof(struct scatterlist) * cmd->t_data_nents, 43468ff9b9bSNicholas Bellinger GFP_KERNEL); 43568ff9b9bSNicholas Bellinger if (!write_sg) { 43668ff9b9bSNicholas Bellinger pr_err("Unable to allocate compare_and_write sg\n"); 43768ff9b9bSNicholas Bellinger ret = TCM_OUT_OF_RESOURCES; 43868ff9b9bSNicholas Bellinger goto out; 43968ff9b9bSNicholas Bellinger } 440a1e1774cSMartin Svec sg_init_table(write_sg, cmd->t_data_nents); 44168ff9b9bSNicholas Bellinger /* 44268ff9b9bSNicholas Bellinger * Setup verify and write data payloads from total NumberLBAs. 44368ff9b9bSNicholas Bellinger */ 44468ff9b9bSNicholas Bellinger rc = sg_copy_to_buffer(cmd->t_data_sg, cmd->t_data_nents, buf, 44568ff9b9bSNicholas Bellinger cmd->data_length); 44668ff9b9bSNicholas Bellinger if (!rc) { 44768ff9b9bSNicholas Bellinger pr_err("sg_copy_to_buffer() failed for compare_and_write\n"); 44868ff9b9bSNicholas Bellinger ret = TCM_OUT_OF_RESOURCES; 44968ff9b9bSNicholas Bellinger goto out; 45068ff9b9bSNicholas Bellinger } 45168ff9b9bSNicholas Bellinger /* 45268ff9b9bSNicholas Bellinger * Compare against SCSI READ payload against verify payload 45368ff9b9bSNicholas Bellinger */ 45468ff9b9bSNicholas Bellinger for_each_sg(cmd->t_bidi_data_sg, sg, cmd->t_bidi_data_nents, i) { 45568ff9b9bSNicholas Bellinger addr = (unsigned char *)kmap_atomic(sg_page(sg)); 45668ff9b9bSNicholas Bellinger if (!addr) { 45768ff9b9bSNicholas Bellinger ret = TCM_OUT_OF_RESOURCES; 45868ff9b9bSNicholas Bellinger goto out; 45968ff9b9bSNicholas Bellinger } 46068ff9b9bSNicholas Bellinger 46168ff9b9bSNicholas Bellinger len = min(sg->length, compare_len); 46268ff9b9bSNicholas Bellinger 46368ff9b9bSNicholas Bellinger if (memcmp(addr, buf + offset, len)) { 46468ff9b9bSNicholas Bellinger pr_warn("Detected MISCOMPARE for addr: %p buf: %p\n", 46568ff9b9bSNicholas Bellinger addr, buf + offset); 46668ff9b9bSNicholas Bellinger kunmap_atomic(addr); 46768ff9b9bSNicholas Bellinger goto miscompare; 46868ff9b9bSNicholas Bellinger } 46968ff9b9bSNicholas Bellinger kunmap_atomic(addr); 47068ff9b9bSNicholas Bellinger 47168ff9b9bSNicholas Bellinger offset += len; 47268ff9b9bSNicholas Bellinger compare_len -= len; 47368ff9b9bSNicholas Bellinger if (!compare_len) 47468ff9b9bSNicholas Bellinger break; 47568ff9b9bSNicholas Bellinger } 47668ff9b9bSNicholas Bellinger 47768ff9b9bSNicholas Bellinger i = 0; 47868ff9b9bSNicholas Bellinger len = cmd->t_task_nolb * block_size; 47968ff9b9bSNicholas Bellinger sg_miter_start(&m, cmd->t_data_sg, cmd->t_data_nents, SG_MITER_TO_SG); 48068ff9b9bSNicholas Bellinger /* 48168ff9b9bSNicholas Bellinger * Currently assumes NoLB=1 and SGLs are PAGE_SIZE.. 48268ff9b9bSNicholas Bellinger */ 48368ff9b9bSNicholas Bellinger while (len) { 48468ff9b9bSNicholas Bellinger sg_miter_next(&m); 48568ff9b9bSNicholas Bellinger 48668ff9b9bSNicholas Bellinger if (block_size < PAGE_SIZE) { 48768ff9b9bSNicholas Bellinger sg_set_page(&write_sg[i], m.page, block_size, 48868ff9b9bSNicholas Bellinger block_size); 48968ff9b9bSNicholas Bellinger } else { 49068ff9b9bSNicholas Bellinger sg_miter_next(&m); 49168ff9b9bSNicholas Bellinger sg_set_page(&write_sg[i], m.page, block_size, 49268ff9b9bSNicholas Bellinger 0); 49368ff9b9bSNicholas Bellinger } 49468ff9b9bSNicholas Bellinger len -= block_size; 49568ff9b9bSNicholas Bellinger i++; 49668ff9b9bSNicholas Bellinger } 49768ff9b9bSNicholas Bellinger sg_miter_stop(&m); 49868ff9b9bSNicholas Bellinger /* 49968ff9b9bSNicholas Bellinger * Save the original SGL + nents values before updating to new 50068ff9b9bSNicholas Bellinger * assignments, to be released in transport_free_pages() -> 50168ff9b9bSNicholas Bellinger * transport_reset_sgl_orig() 50268ff9b9bSNicholas Bellinger */ 50368ff9b9bSNicholas Bellinger cmd->t_data_sg_orig = cmd->t_data_sg; 50468ff9b9bSNicholas Bellinger cmd->t_data_sg = write_sg; 50568ff9b9bSNicholas Bellinger cmd->t_data_nents_orig = cmd->t_data_nents; 50668ff9b9bSNicholas Bellinger cmd->t_data_nents = 1; 50768ff9b9bSNicholas Bellinger 50868d81f40SChristoph Hellwig cmd->sam_task_attr = TCM_HEAD_TAG; 50968ff9b9bSNicholas Bellinger cmd->transport_complete_callback = compare_and_write_post; 51068ff9b9bSNicholas Bellinger /* 51168ff9b9bSNicholas Bellinger * Now reset ->execute_cmd() to the normal sbc_execute_rw() handler 51268ff9b9bSNicholas Bellinger * for submitting the adjusted SGL to write instance user-data. 51368ff9b9bSNicholas Bellinger */ 51468ff9b9bSNicholas Bellinger cmd->execute_cmd = sbc_execute_rw; 51568ff9b9bSNicholas Bellinger 51668ff9b9bSNicholas Bellinger spin_lock_irq(&cmd->t_state_lock); 51768ff9b9bSNicholas Bellinger cmd->t_state = TRANSPORT_PROCESSING; 51868ff9b9bSNicholas Bellinger cmd->transport_state |= CMD_T_ACTIVE|CMD_T_BUSY|CMD_T_SENT; 51968ff9b9bSNicholas Bellinger spin_unlock_irq(&cmd->t_state_lock); 52068ff9b9bSNicholas Bellinger 52168ff9b9bSNicholas Bellinger __target_execute_cmd(cmd); 52268ff9b9bSNicholas Bellinger 52368ff9b9bSNicholas Bellinger kfree(buf); 52468ff9b9bSNicholas Bellinger return ret; 52568ff9b9bSNicholas Bellinger 52668ff9b9bSNicholas Bellinger miscompare: 52768ff9b9bSNicholas Bellinger pr_warn("Target/%s: Send MISCOMPARE check condition and sense\n", 52868ff9b9bSNicholas Bellinger dev->transport->name); 52968ff9b9bSNicholas Bellinger ret = TCM_MISCOMPARE_VERIFY; 53068ff9b9bSNicholas Bellinger out: 53168ff9b9bSNicholas Bellinger /* 53268ff9b9bSNicholas Bellinger * In the MISCOMPARE or failure case, unlock ->caw_sem obtained in 53368ff9b9bSNicholas Bellinger * sbc_compare_and_write() before the original READ I/O submission. 53468ff9b9bSNicholas Bellinger */ 53568ff9b9bSNicholas Bellinger up(&dev->caw_sem); 53668ff9b9bSNicholas Bellinger kfree(write_sg); 53768ff9b9bSNicholas Bellinger kfree(buf); 53868ff9b9bSNicholas Bellinger return ret; 53968ff9b9bSNicholas Bellinger } 54068ff9b9bSNicholas Bellinger 54168ff9b9bSNicholas Bellinger static sense_reason_t 54268ff9b9bSNicholas Bellinger sbc_compare_and_write(struct se_cmd *cmd) 54368ff9b9bSNicholas Bellinger { 54468ff9b9bSNicholas Bellinger struct se_device *dev = cmd->se_dev; 54568ff9b9bSNicholas Bellinger sense_reason_t ret; 54668ff9b9bSNicholas Bellinger int rc; 54768ff9b9bSNicholas Bellinger /* 54868ff9b9bSNicholas Bellinger * Submit the READ first for COMPARE_AND_WRITE to perform the 54968ff9b9bSNicholas Bellinger * comparision using SGLs at cmd->t_bidi_data_sg.. 55068ff9b9bSNicholas Bellinger */ 55168ff9b9bSNicholas Bellinger rc = down_interruptible(&dev->caw_sem); 55268ff9b9bSNicholas Bellinger if ((rc != 0) || signal_pending(current)) { 55368ff9b9bSNicholas Bellinger cmd->transport_complete_callback = NULL; 55468ff9b9bSNicholas Bellinger return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 55568ff9b9bSNicholas Bellinger } 556b7191253SNicholas Bellinger /* 557b7191253SNicholas Bellinger * Reset cmd->data_length to individual block_size in order to not 558b7191253SNicholas Bellinger * confuse backend drivers that depend on this value matching the 559b7191253SNicholas Bellinger * size of the I/O being submitted. 560b7191253SNicholas Bellinger */ 561b7191253SNicholas Bellinger cmd->data_length = cmd->t_task_nolb * dev->dev_attrib.block_size; 56268ff9b9bSNicholas Bellinger 56368ff9b9bSNicholas Bellinger ret = cmd->execute_rw(cmd, cmd->t_bidi_data_sg, cmd->t_bidi_data_nents, 56468ff9b9bSNicholas Bellinger DMA_FROM_DEVICE); 56568ff9b9bSNicholas Bellinger if (ret) { 56668ff9b9bSNicholas Bellinger cmd->transport_complete_callback = NULL; 56768ff9b9bSNicholas Bellinger up(&dev->caw_sem); 56868ff9b9bSNicholas Bellinger return ret; 56968ff9b9bSNicholas Bellinger } 57068ff9b9bSNicholas Bellinger /* 57168ff9b9bSNicholas Bellinger * Unlock of dev->caw_sem to occur in compare_and_write_callback() 57268ff9b9bSNicholas Bellinger * upon MISCOMPARE, or in compare_and_write_done() upon completion 57368ff9b9bSNicholas Bellinger * of WRITE instance user-data. 57468ff9b9bSNicholas Bellinger */ 57568ff9b9bSNicholas Bellinger return TCM_NO_SENSE; 57668ff9b9bSNicholas Bellinger } 57768ff9b9bSNicholas Bellinger 57819f9361aSSagi Grimberg static int 57919f9361aSSagi Grimberg sbc_set_prot_op_checks(u8 protect, enum target_prot_type prot_type, 58019f9361aSSagi Grimberg bool is_write, struct se_cmd *cmd) 58119f9361aSSagi Grimberg { 58219f9361aSSagi Grimberg if (is_write) { 58319f9361aSSagi Grimberg cmd->prot_op = protect ? TARGET_PROT_DOUT_PASS : 58419f9361aSSagi Grimberg TARGET_PROT_DOUT_INSERT; 58519f9361aSSagi Grimberg switch (protect) { 58619f9361aSSagi Grimberg case 0x0: 58719f9361aSSagi Grimberg case 0x3: 58819f9361aSSagi Grimberg cmd->prot_checks = 0; 58919f9361aSSagi Grimberg break; 59019f9361aSSagi Grimberg case 0x1: 59119f9361aSSagi Grimberg case 0x5: 59219f9361aSSagi Grimberg cmd->prot_checks = TARGET_DIF_CHECK_GUARD; 59319f9361aSSagi Grimberg if (prot_type == TARGET_DIF_TYPE1_PROT) 59419f9361aSSagi Grimberg cmd->prot_checks |= TARGET_DIF_CHECK_REFTAG; 59519f9361aSSagi Grimberg break; 59619f9361aSSagi Grimberg case 0x2: 59719f9361aSSagi Grimberg if (prot_type == TARGET_DIF_TYPE1_PROT) 59819f9361aSSagi Grimberg cmd->prot_checks = TARGET_DIF_CHECK_REFTAG; 59919f9361aSSagi Grimberg break; 60019f9361aSSagi Grimberg case 0x4: 60119f9361aSSagi Grimberg cmd->prot_checks = TARGET_DIF_CHECK_GUARD; 60219f9361aSSagi Grimberg break; 60319f9361aSSagi Grimberg default: 60419f9361aSSagi Grimberg pr_err("Unsupported protect field %d\n", protect); 60519f9361aSSagi Grimberg return -EINVAL; 60619f9361aSSagi Grimberg } 60719f9361aSSagi Grimberg } else { 60819f9361aSSagi Grimberg cmd->prot_op = protect ? TARGET_PROT_DIN_PASS : 60919f9361aSSagi Grimberg TARGET_PROT_DIN_STRIP; 61019f9361aSSagi Grimberg switch (protect) { 61119f9361aSSagi Grimberg case 0x0: 61219f9361aSSagi Grimberg case 0x1: 61319f9361aSSagi Grimberg case 0x5: 61419f9361aSSagi Grimberg cmd->prot_checks = TARGET_DIF_CHECK_GUARD; 61519f9361aSSagi Grimberg if (prot_type == TARGET_DIF_TYPE1_PROT) 61619f9361aSSagi Grimberg cmd->prot_checks |= TARGET_DIF_CHECK_REFTAG; 61719f9361aSSagi Grimberg break; 61819f9361aSSagi Grimberg case 0x2: 61919f9361aSSagi Grimberg if (prot_type == TARGET_DIF_TYPE1_PROT) 62019f9361aSSagi Grimberg cmd->prot_checks = TARGET_DIF_CHECK_REFTAG; 62119f9361aSSagi Grimberg break; 62219f9361aSSagi Grimberg case 0x3: 62319f9361aSSagi Grimberg cmd->prot_checks = 0; 62419f9361aSSagi Grimberg break; 62519f9361aSSagi Grimberg case 0x4: 62619f9361aSSagi Grimberg cmd->prot_checks = TARGET_DIF_CHECK_GUARD; 62719f9361aSSagi Grimberg break; 62819f9361aSSagi Grimberg default: 62919f9361aSSagi Grimberg pr_err("Unsupported protect field %d\n", protect); 63019f9361aSSagi Grimberg return -EINVAL; 63119f9361aSSagi Grimberg } 63219f9361aSSagi Grimberg } 63319f9361aSSagi Grimberg 63419f9361aSSagi Grimberg return 0; 63519f9361aSSagi Grimberg } 63619f9361aSSagi Grimberg 637f7b7c06fSNicholas Bellinger static sense_reason_t 638499bf77bSNicholas Bellinger sbc_check_prot(struct se_device *dev, struct se_cmd *cmd, unsigned char *cdb, 63919f9361aSSagi Grimberg u32 sectors, bool is_write) 640499bf77bSNicholas Bellinger { 64119f9361aSSagi Grimberg u8 protect = cdb[1] >> 5; 64219f9361aSSagi Grimberg 643f7b7c06fSNicholas Bellinger if (!cmd->t_prot_sg || !cmd->t_prot_nents) { 644f7b7c06fSNicholas Bellinger if (protect && !dev->dev_attrib.pi_prot_type) { 645f7b7c06fSNicholas Bellinger pr_err("CDB contains protect bit, but device does not" 646f7b7c06fSNicholas Bellinger " advertise PROTECT=1 feature bit\n"); 647f7b7c06fSNicholas Bellinger return TCM_INVALID_CDB_FIELD; 648f7b7c06fSNicholas Bellinger } 649f7b7c06fSNicholas Bellinger if (cmd->prot_pto) 650f7b7c06fSNicholas Bellinger return TCM_NO_SENSE; 651f7b7c06fSNicholas Bellinger } 652499bf77bSNicholas Bellinger 653499bf77bSNicholas Bellinger switch (dev->dev_attrib.pi_prot_type) { 654499bf77bSNicholas Bellinger case TARGET_DIF_TYPE3_PROT: 655499bf77bSNicholas Bellinger cmd->reftag_seed = 0xffffffff; 656499bf77bSNicholas Bellinger break; 657499bf77bSNicholas Bellinger case TARGET_DIF_TYPE2_PROT: 65819f9361aSSagi Grimberg if (protect) 659f7b7c06fSNicholas Bellinger return TCM_INVALID_CDB_FIELD; 660499bf77bSNicholas Bellinger 661499bf77bSNicholas Bellinger cmd->reftag_seed = cmd->t_task_lba; 662499bf77bSNicholas Bellinger break; 663499bf77bSNicholas Bellinger case TARGET_DIF_TYPE1_PROT: 664499bf77bSNicholas Bellinger cmd->reftag_seed = cmd->t_task_lba; 665499bf77bSNicholas Bellinger break; 666499bf77bSNicholas Bellinger case TARGET_DIF_TYPE0_PROT: 667499bf77bSNicholas Bellinger default: 668f7b7c06fSNicholas Bellinger return TCM_NO_SENSE; 669499bf77bSNicholas Bellinger } 670499bf77bSNicholas Bellinger 67119f9361aSSagi Grimberg if (sbc_set_prot_op_checks(protect, dev->dev_attrib.pi_prot_type, 67219f9361aSSagi Grimberg is_write, cmd)) 673f7b7c06fSNicholas Bellinger return TCM_INVALID_CDB_FIELD; 67419f9361aSSagi Grimberg 675499bf77bSNicholas Bellinger cmd->prot_type = dev->dev_attrib.pi_prot_type; 676499bf77bSNicholas Bellinger cmd->prot_length = dev->prot_length * sectors; 677e2a4f55cSSagi Grimberg 678e2a4f55cSSagi Grimberg /** 679e2a4f55cSSagi Grimberg * In case protection information exists over the wire 680e2a4f55cSSagi Grimberg * we modify command data length to describe pure data. 681e2a4f55cSSagi Grimberg * The actual transfer length is data length + protection 682e2a4f55cSSagi Grimberg * length 683e2a4f55cSSagi Grimberg **/ 684e2a4f55cSSagi Grimberg if (protect) 685e2a4f55cSSagi Grimberg cmd->data_length = sectors * dev->dev_attrib.block_size; 686e2a4f55cSSagi Grimberg 687e2a4f55cSSagi Grimberg pr_debug("%s: prot_type=%d, data_length=%d, prot_length=%d " 688e2a4f55cSSagi Grimberg "prot_op=%d prot_checks=%d\n", 689e2a4f55cSSagi Grimberg __func__, cmd->prot_type, cmd->data_length, cmd->prot_length, 69003abad9eSSagi Grimberg cmd->prot_op, cmd->prot_checks); 691499bf77bSNicholas Bellinger 692f7b7c06fSNicholas Bellinger return TCM_NO_SENSE; 693499bf77bSNicholas Bellinger } 694499bf77bSNicholas Bellinger 695fde9f50fSNicholas Bellinger static int 696fde9f50fSNicholas Bellinger sbc_check_dpofua(struct se_device *dev, struct se_cmd *cmd, unsigned char *cdb) 697fde9f50fSNicholas Bellinger { 698fde9f50fSNicholas Bellinger if (cdb[1] & 0x10) { 699fde9f50fSNicholas Bellinger if (!dev->dev_attrib.emulate_dpo) { 700fde9f50fSNicholas Bellinger pr_err("Got CDB: 0x%02x with DPO bit set, but device" 701fde9f50fSNicholas Bellinger " does not advertise support for DPO\n", cdb[0]); 702fde9f50fSNicholas Bellinger return -EINVAL; 703fde9f50fSNicholas Bellinger } 704fde9f50fSNicholas Bellinger } 705fde9f50fSNicholas Bellinger if (cdb[1] & 0x8) { 706fde9f50fSNicholas Bellinger if (!dev->dev_attrib.emulate_fua_write || 707fde9f50fSNicholas Bellinger !dev->dev_attrib.emulate_write_cache) { 708fde9f50fSNicholas Bellinger pr_err("Got CDB: 0x%02x with FUA bit set, but device" 709fde9f50fSNicholas Bellinger " does not advertise support for FUA write\n", 710fde9f50fSNicholas Bellinger cdb[0]); 711fde9f50fSNicholas Bellinger return -EINVAL; 712fde9f50fSNicholas Bellinger } 713fde9f50fSNicholas Bellinger cmd->se_cmd_flags |= SCF_FUA; 714fde9f50fSNicholas Bellinger } 715fde9f50fSNicholas Bellinger return 0; 716fde9f50fSNicholas Bellinger } 717fde9f50fSNicholas Bellinger 718de103c93SChristoph Hellwig sense_reason_t 719de103c93SChristoph Hellwig sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops) 720d6e0175cSChristoph Hellwig { 721d6e0175cSChristoph Hellwig struct se_device *dev = cmd->se_dev; 722d6e0175cSChristoph Hellwig unsigned char *cdb = cmd->t_task_cdb; 7231fd032eeSChristoph Hellwig unsigned int size; 724d6e0175cSChristoph Hellwig u32 sectors = 0; 725de103c93SChristoph Hellwig sense_reason_t ret; 726d6e0175cSChristoph Hellwig 727d6e0175cSChristoph Hellwig switch (cdb[0]) { 728d6e0175cSChristoph Hellwig case READ_6: 729d6e0175cSChristoph Hellwig sectors = transport_get_sectors_6(cdb); 730d6e0175cSChristoph Hellwig cmd->t_task_lba = transport_lba_21(cdb); 731d6e0175cSChristoph Hellwig cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 732a82a9538SNicholas Bellinger cmd->execute_rw = ops->execute_rw; 733a82a9538SNicholas Bellinger cmd->execute_cmd = sbc_execute_rw; 734d6e0175cSChristoph Hellwig break; 735d6e0175cSChristoph Hellwig case READ_10: 736d6e0175cSChristoph Hellwig sectors = transport_get_sectors_10(cdb); 737d6e0175cSChristoph Hellwig cmd->t_task_lba = transport_lba_32(cdb); 738499bf77bSNicholas Bellinger 739fde9f50fSNicholas Bellinger if (sbc_check_dpofua(dev, cmd, cdb)) 740fde9f50fSNicholas Bellinger return TCM_INVALID_CDB_FIELD; 741fde9f50fSNicholas Bellinger 742f7b7c06fSNicholas Bellinger ret = sbc_check_prot(dev, cmd, cdb, sectors, false); 743f7b7c06fSNicholas Bellinger if (ret) 744f7b7c06fSNicholas Bellinger return ret; 745499bf77bSNicholas Bellinger 746d6e0175cSChristoph Hellwig cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 747a82a9538SNicholas Bellinger cmd->execute_rw = ops->execute_rw; 748a82a9538SNicholas Bellinger cmd->execute_cmd = sbc_execute_rw; 749d6e0175cSChristoph Hellwig break; 750d6e0175cSChristoph Hellwig case READ_12: 751d6e0175cSChristoph Hellwig sectors = transport_get_sectors_12(cdb); 752d6e0175cSChristoph Hellwig cmd->t_task_lba = transport_lba_32(cdb); 753499bf77bSNicholas Bellinger 754fde9f50fSNicholas Bellinger if (sbc_check_dpofua(dev, cmd, cdb)) 755fde9f50fSNicholas Bellinger return TCM_INVALID_CDB_FIELD; 756fde9f50fSNicholas Bellinger 757f7b7c06fSNicholas Bellinger ret = sbc_check_prot(dev, cmd, cdb, sectors, false); 758f7b7c06fSNicholas Bellinger if (ret) 759f7b7c06fSNicholas Bellinger return ret; 760499bf77bSNicholas Bellinger 761d6e0175cSChristoph Hellwig cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 762a82a9538SNicholas Bellinger cmd->execute_rw = ops->execute_rw; 763a82a9538SNicholas Bellinger cmd->execute_cmd = sbc_execute_rw; 764d6e0175cSChristoph Hellwig break; 765d6e0175cSChristoph Hellwig case READ_16: 766d6e0175cSChristoph Hellwig sectors = transport_get_sectors_16(cdb); 767d6e0175cSChristoph Hellwig cmd->t_task_lba = transport_lba_64(cdb); 768499bf77bSNicholas Bellinger 769fde9f50fSNicholas Bellinger if (sbc_check_dpofua(dev, cmd, cdb)) 770fde9f50fSNicholas Bellinger return TCM_INVALID_CDB_FIELD; 771fde9f50fSNicholas Bellinger 772f7b7c06fSNicholas Bellinger ret = sbc_check_prot(dev, cmd, cdb, sectors, false); 773f7b7c06fSNicholas Bellinger if (ret) 774f7b7c06fSNicholas Bellinger return ret; 775499bf77bSNicholas Bellinger 776d6e0175cSChristoph Hellwig cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 777a82a9538SNicholas Bellinger cmd->execute_rw = ops->execute_rw; 778a82a9538SNicholas Bellinger cmd->execute_cmd = sbc_execute_rw; 779d6e0175cSChristoph Hellwig break; 780d6e0175cSChristoph Hellwig case WRITE_6: 781d6e0175cSChristoph Hellwig sectors = transport_get_sectors_6(cdb); 782d6e0175cSChristoph Hellwig cmd->t_task_lba = transport_lba_21(cdb); 783d6e0175cSChristoph Hellwig cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 784a82a9538SNicholas Bellinger cmd->execute_rw = ops->execute_rw; 785a82a9538SNicholas Bellinger cmd->execute_cmd = sbc_execute_rw; 786d6e0175cSChristoph Hellwig break; 787d6e0175cSChristoph Hellwig case WRITE_10: 788d6e0175cSChristoph Hellwig case WRITE_VERIFY: 789d6e0175cSChristoph Hellwig sectors = transport_get_sectors_10(cdb); 790d6e0175cSChristoph Hellwig cmd->t_task_lba = transport_lba_32(cdb); 791499bf77bSNicholas Bellinger 792fde9f50fSNicholas Bellinger if (sbc_check_dpofua(dev, cmd, cdb)) 793fde9f50fSNicholas Bellinger return TCM_INVALID_CDB_FIELD; 794fde9f50fSNicholas Bellinger 795f7b7c06fSNicholas Bellinger ret = sbc_check_prot(dev, cmd, cdb, sectors, true); 796f7b7c06fSNicholas Bellinger if (ret) 797f7b7c06fSNicholas Bellinger return ret; 798499bf77bSNicholas Bellinger 799d6e0175cSChristoph Hellwig cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 800a82a9538SNicholas Bellinger cmd->execute_rw = ops->execute_rw; 801a82a9538SNicholas Bellinger cmd->execute_cmd = sbc_execute_rw; 802d6e0175cSChristoph Hellwig break; 803d6e0175cSChristoph Hellwig case WRITE_12: 804d6e0175cSChristoph Hellwig sectors = transport_get_sectors_12(cdb); 805d6e0175cSChristoph Hellwig cmd->t_task_lba = transport_lba_32(cdb); 806499bf77bSNicholas Bellinger 807fde9f50fSNicholas Bellinger if (sbc_check_dpofua(dev, cmd, cdb)) 808fde9f50fSNicholas Bellinger return TCM_INVALID_CDB_FIELD; 809fde9f50fSNicholas Bellinger 810f7b7c06fSNicholas Bellinger ret = sbc_check_prot(dev, cmd, cdb, sectors, true); 811f7b7c06fSNicholas Bellinger if (ret) 812f7b7c06fSNicholas Bellinger return ret; 813499bf77bSNicholas Bellinger 814d6e0175cSChristoph Hellwig cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 815a82a9538SNicholas Bellinger cmd->execute_rw = ops->execute_rw; 816a82a9538SNicholas Bellinger cmd->execute_cmd = sbc_execute_rw; 817d6e0175cSChristoph Hellwig break; 818d6e0175cSChristoph Hellwig case WRITE_16: 819d6e0175cSChristoph Hellwig sectors = transport_get_sectors_16(cdb); 820d6e0175cSChristoph Hellwig cmd->t_task_lba = transport_lba_64(cdb); 821499bf77bSNicholas Bellinger 822fde9f50fSNicholas Bellinger if (sbc_check_dpofua(dev, cmd, cdb)) 823fde9f50fSNicholas Bellinger return TCM_INVALID_CDB_FIELD; 824fde9f50fSNicholas Bellinger 825f7b7c06fSNicholas Bellinger ret = sbc_check_prot(dev, cmd, cdb, sectors, true); 826f7b7c06fSNicholas Bellinger if (ret) 827f7b7c06fSNicholas Bellinger return ret; 828499bf77bSNicholas Bellinger 829d6e0175cSChristoph Hellwig cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 830a82a9538SNicholas Bellinger cmd->execute_rw = ops->execute_rw; 831a82a9538SNicholas Bellinger cmd->execute_cmd = sbc_execute_rw; 832d6e0175cSChristoph Hellwig break; 833d6e0175cSChristoph Hellwig case XDWRITEREAD_10: 834de103c93SChristoph Hellwig if (cmd->data_direction != DMA_TO_DEVICE || 835d6e0175cSChristoph Hellwig !(cmd->se_cmd_flags & SCF_BIDI)) 836de103c93SChristoph Hellwig return TCM_INVALID_CDB_FIELD; 837d6e0175cSChristoph Hellwig sectors = transport_get_sectors_10(cdb); 838d6e0175cSChristoph Hellwig 839fde9f50fSNicholas Bellinger if (sbc_check_dpofua(dev, cmd, cdb)) 840fde9f50fSNicholas Bellinger return TCM_INVALID_CDB_FIELD; 841fde9f50fSNicholas Bellinger 842d6e0175cSChristoph Hellwig cmd->t_task_lba = transport_lba_32(cdb); 843d6e0175cSChristoph Hellwig cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 844d6e0175cSChristoph Hellwig 845d6e0175cSChristoph Hellwig /* 846d6e0175cSChristoph Hellwig * Setup BIDI XOR callback to be run after I/O completion. 847d6e0175cSChristoph Hellwig */ 848a82a9538SNicholas Bellinger cmd->execute_rw = ops->execute_rw; 849a82a9538SNicholas Bellinger cmd->execute_cmd = sbc_execute_rw; 850d6e0175cSChristoph Hellwig cmd->transport_complete_callback = &xdreadwrite_callback; 851d6e0175cSChristoph Hellwig break; 852d6e0175cSChristoph Hellwig case VARIABLE_LENGTH_CMD: 853d6e0175cSChristoph Hellwig { 854d6e0175cSChristoph Hellwig u16 service_action = get_unaligned_be16(&cdb[8]); 855d6e0175cSChristoph Hellwig switch (service_action) { 856d6e0175cSChristoph Hellwig case XDWRITEREAD_32: 857d6e0175cSChristoph Hellwig sectors = transport_get_sectors_32(cdb); 858d6e0175cSChristoph Hellwig 859fde9f50fSNicholas Bellinger if (sbc_check_dpofua(dev, cmd, cdb)) 860fde9f50fSNicholas Bellinger return TCM_INVALID_CDB_FIELD; 861d6e0175cSChristoph Hellwig /* 862d6e0175cSChristoph Hellwig * Use WRITE_32 and READ_32 opcodes for the emulated 863d6e0175cSChristoph Hellwig * XDWRITE_READ_32 logic. 864d6e0175cSChristoph Hellwig */ 865d6e0175cSChristoph Hellwig cmd->t_task_lba = transport_lba_64_ext(cdb); 866d6e0175cSChristoph Hellwig cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 867d6e0175cSChristoph Hellwig 868d6e0175cSChristoph Hellwig /* 869d6e0175cSChristoph Hellwig * Setup BIDI XOR callback to be run during after I/O 870d6e0175cSChristoph Hellwig * completion. 871d6e0175cSChristoph Hellwig */ 872a82a9538SNicholas Bellinger cmd->execute_rw = ops->execute_rw; 873a82a9538SNicholas Bellinger cmd->execute_cmd = sbc_execute_rw; 874d6e0175cSChristoph Hellwig cmd->transport_complete_callback = &xdreadwrite_callback; 875d6e0175cSChristoph Hellwig break; 876d6e0175cSChristoph Hellwig case WRITE_SAME_32: 877d6e0175cSChristoph Hellwig sectors = transport_get_sectors_32(cdb); 878d6e0175cSChristoph Hellwig if (!sectors) { 879d6e0175cSChristoph Hellwig pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not" 880d6e0175cSChristoph Hellwig " supported\n"); 881de103c93SChristoph Hellwig return TCM_INVALID_CDB_FIELD; 882d6e0175cSChristoph Hellwig } 883d6e0175cSChristoph Hellwig 8841fd032eeSChristoph Hellwig size = sbc_get_size(cmd, 1); 885d6e0175cSChristoph Hellwig cmd->t_task_lba = get_unaligned_be64(&cdb[12]); 886d6e0175cSChristoph Hellwig 887cd063befSNicholas Bellinger ret = sbc_setup_write_same(cmd, &cdb[10], ops); 8886b64e1feSDan Carpenter if (ret) 889cd063befSNicholas Bellinger return ret; 890d6e0175cSChristoph Hellwig break; 891d6e0175cSChristoph Hellwig default: 892d6e0175cSChristoph Hellwig pr_err("VARIABLE_LENGTH_CMD service action" 893d6e0175cSChristoph Hellwig " 0x%04x not supported\n", service_action); 894de103c93SChristoph Hellwig return TCM_UNSUPPORTED_SCSI_OPCODE; 895d6e0175cSChristoph Hellwig } 896d6e0175cSChristoph Hellwig break; 897d6e0175cSChristoph Hellwig } 89868ff9b9bSNicholas Bellinger case COMPARE_AND_WRITE: 89968ff9b9bSNicholas Bellinger sectors = cdb[13]; 90068ff9b9bSNicholas Bellinger /* 90168ff9b9bSNicholas Bellinger * Currently enforce COMPARE_AND_WRITE for a single sector 90268ff9b9bSNicholas Bellinger */ 90368ff9b9bSNicholas Bellinger if (sectors > 1) { 90468ff9b9bSNicholas Bellinger pr_err("COMPARE_AND_WRITE contains NoLB: %u greater" 90568ff9b9bSNicholas Bellinger " than 1\n", sectors); 90668ff9b9bSNicholas Bellinger return TCM_INVALID_CDB_FIELD; 90768ff9b9bSNicholas Bellinger } 90868ff9b9bSNicholas Bellinger /* 90968ff9b9bSNicholas Bellinger * Double size because we have two buffers, note that 91068ff9b9bSNicholas Bellinger * zero is not an error.. 91168ff9b9bSNicholas Bellinger */ 91268ff9b9bSNicholas Bellinger size = 2 * sbc_get_size(cmd, sectors); 91368ff9b9bSNicholas Bellinger cmd->t_task_lba = get_unaligned_be64(&cdb[2]); 91468ff9b9bSNicholas Bellinger cmd->t_task_nolb = sectors; 91568ff9b9bSNicholas Bellinger cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB | SCF_COMPARE_AND_WRITE; 91668ff9b9bSNicholas Bellinger cmd->execute_rw = ops->execute_rw; 91768ff9b9bSNicholas Bellinger cmd->execute_cmd = sbc_compare_and_write; 91868ff9b9bSNicholas Bellinger cmd->transport_complete_callback = compare_and_write_callback; 91968ff9b9bSNicholas Bellinger break; 920d6e0175cSChristoph Hellwig case READ_CAPACITY: 9211fd032eeSChristoph Hellwig size = READ_CAP_LEN; 9221fd032eeSChristoph Hellwig cmd->execute_cmd = sbc_emulate_readcapacity; 923d6e0175cSChristoph Hellwig break; 924eb846d9fSHannes Reinecke case SERVICE_ACTION_IN_16: 925d6e0175cSChristoph Hellwig switch (cmd->t_task_cdb[1] & 0x1f) { 926d6e0175cSChristoph Hellwig case SAI_READ_CAPACITY_16: 9271fd032eeSChristoph Hellwig cmd->execute_cmd = sbc_emulate_readcapacity_16; 928d6e0175cSChristoph Hellwig break; 929c66094bfSHannes Reinecke case SAI_REPORT_REFERRALS: 930c66094bfSHannes Reinecke cmd->execute_cmd = target_emulate_report_referrals; 931c66094bfSHannes Reinecke break; 932d6e0175cSChristoph Hellwig default: 933d6e0175cSChristoph Hellwig pr_err("Unsupported SA: 0x%02x\n", 934d6e0175cSChristoph Hellwig cmd->t_task_cdb[1] & 0x1f); 935de103c93SChristoph Hellwig return TCM_INVALID_CDB_FIELD; 936d6e0175cSChristoph Hellwig } 9371fd032eeSChristoph Hellwig size = (cdb[10] << 24) | (cdb[11] << 16) | 938d6e0175cSChristoph Hellwig (cdb[12] << 8) | cdb[13]; 939d6e0175cSChristoph Hellwig break; 940d6e0175cSChristoph Hellwig case SYNCHRONIZE_CACHE: 941d6e0175cSChristoph Hellwig case SYNCHRONIZE_CACHE_16: 942d6e0175cSChristoph Hellwig if (cdb[0] == SYNCHRONIZE_CACHE) { 943d6e0175cSChristoph Hellwig sectors = transport_get_sectors_10(cdb); 944d6e0175cSChristoph Hellwig cmd->t_task_lba = transport_lba_32(cdb); 945d6e0175cSChristoph Hellwig } else { 946d6e0175cSChristoph Hellwig sectors = transport_get_sectors_16(cdb); 947d6e0175cSChristoph Hellwig cmd->t_task_lba = transport_lba_64(cdb); 948d6e0175cSChristoph Hellwig } 9496ef31dc7SChristophe Vu-Brugier if (ops->execute_sync_cache) { 950ad67f0d9SChristoph Hellwig cmd->execute_cmd = ops->execute_sync_cache; 9516ef31dc7SChristophe Vu-Brugier goto check_lba; 9526ef31dc7SChristophe Vu-Brugier } 9536ef31dc7SChristophe Vu-Brugier size = 0; 9546ef31dc7SChristophe Vu-Brugier cmd->execute_cmd = sbc_emulate_noop; 955d6e0175cSChristoph Hellwig break; 956d6e0175cSChristoph Hellwig case UNMAP: 95714150a6bSChristoph Hellwig if (!ops->execute_unmap) 958de103c93SChristoph Hellwig return TCM_UNSUPPORTED_SCSI_OPCODE; 95914150a6bSChristoph Hellwig 9601fd032eeSChristoph Hellwig size = get_unaligned_be16(&cdb[7]); 96114150a6bSChristoph Hellwig cmd->execute_cmd = ops->execute_unmap; 962d6e0175cSChristoph Hellwig break; 963d6e0175cSChristoph Hellwig case WRITE_SAME_16: 964d6e0175cSChristoph Hellwig sectors = transport_get_sectors_16(cdb); 965d6e0175cSChristoph Hellwig if (!sectors) { 966d6e0175cSChristoph Hellwig pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n"); 967de103c93SChristoph Hellwig return TCM_INVALID_CDB_FIELD; 968d6e0175cSChristoph Hellwig } 969d6e0175cSChristoph Hellwig 9701fd032eeSChristoph Hellwig size = sbc_get_size(cmd, 1); 971d6e0175cSChristoph Hellwig cmd->t_task_lba = get_unaligned_be64(&cdb[2]); 972d6e0175cSChristoph Hellwig 973cd063befSNicholas Bellinger ret = sbc_setup_write_same(cmd, &cdb[1], ops); 9746b64e1feSDan Carpenter if (ret) 975cd063befSNicholas Bellinger return ret; 976d6e0175cSChristoph Hellwig break; 977d6e0175cSChristoph Hellwig case WRITE_SAME: 978d6e0175cSChristoph Hellwig sectors = transport_get_sectors_10(cdb); 979d6e0175cSChristoph Hellwig if (!sectors) { 980d6e0175cSChristoph Hellwig pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n"); 981de103c93SChristoph Hellwig return TCM_INVALID_CDB_FIELD; 982d6e0175cSChristoph Hellwig } 983d6e0175cSChristoph Hellwig 9841fd032eeSChristoph Hellwig size = sbc_get_size(cmd, 1); 985d6e0175cSChristoph Hellwig cmd->t_task_lba = get_unaligned_be32(&cdb[2]); 986d6e0175cSChristoph Hellwig 987d6e0175cSChristoph Hellwig /* 988d6e0175cSChristoph Hellwig * Follow sbcr26 with WRITE_SAME (10) and check for the existence 989d6e0175cSChristoph Hellwig * of byte 1 bit 3 UNMAP instead of original reserved field 990d6e0175cSChristoph Hellwig */ 991cd063befSNicholas Bellinger ret = sbc_setup_write_same(cmd, &cdb[1], ops); 9926b64e1feSDan Carpenter if (ret) 993cd063befSNicholas Bellinger return ret; 994d6e0175cSChristoph Hellwig break; 995d6e0175cSChristoph Hellwig case VERIFY: 9961fd032eeSChristoph Hellwig size = 0; 997c52716deSChristophe Vu-Brugier sectors = transport_get_sectors_10(cdb); 998c52716deSChristophe Vu-Brugier cmd->t_task_lba = transport_lba_32(cdb); 9991920ed61SNicholas Bellinger cmd->execute_cmd = sbc_emulate_noop; 1000c52716deSChristophe Vu-Brugier goto check_lba; 10011a1ff38cSBernhard Kohl case REZERO_UNIT: 10021a1ff38cSBernhard Kohl case SEEK_6: 10031a1ff38cSBernhard Kohl case SEEK_10: 10041a1ff38cSBernhard Kohl /* 10051a1ff38cSBernhard Kohl * There are still clients out there which use these old SCSI-2 10061a1ff38cSBernhard Kohl * commands. This mainly happens when running VMs with legacy 10071a1ff38cSBernhard Kohl * guest systems, connected via SCSI command pass-through to 10081a1ff38cSBernhard Kohl * iSCSI targets. Make them happy and return status GOOD. 10091a1ff38cSBernhard Kohl */ 10101a1ff38cSBernhard Kohl size = 0; 10111a1ff38cSBernhard Kohl cmd->execute_cmd = sbc_emulate_noop; 10121a1ff38cSBernhard Kohl break; 1013d6e0175cSChristoph Hellwig default: 10141fd032eeSChristoph Hellwig ret = spc_parse_cdb(cmd, &size); 1015d6e0175cSChristoph Hellwig if (ret) 1016d6e0175cSChristoph Hellwig return ret; 1017d6e0175cSChristoph Hellwig } 1018d6e0175cSChristoph Hellwig 1019d6e0175cSChristoph Hellwig /* reject any command that we don't have a handler for */ 102020959c4bSAndy Grover if (!cmd->execute_cmd) 1021de103c93SChristoph Hellwig return TCM_UNSUPPORTED_SCSI_OPCODE; 1022d6e0175cSChristoph Hellwig 1023d6e0175cSChristoph Hellwig if (cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) { 10241fd032eeSChristoph Hellwig unsigned long long end_lba; 10251fd032eeSChristoph Hellwig 10260fd97ccfSChristoph Hellwig if (sectors > dev->dev_attrib.fabric_max_sectors) { 1027d6e0175cSChristoph Hellwig printk_ratelimited(KERN_ERR "SCSI OP %02xh with too" 1028d6e0175cSChristoph Hellwig " big sectors %u exceeds fabric_max_sectors:" 1029d6e0175cSChristoph Hellwig " %u\n", cdb[0], sectors, 10300fd97ccfSChristoph Hellwig dev->dev_attrib.fabric_max_sectors); 1031de103c93SChristoph Hellwig return TCM_INVALID_CDB_FIELD; 1032d6e0175cSChristoph Hellwig } 10330fd97ccfSChristoph Hellwig if (sectors > dev->dev_attrib.hw_max_sectors) { 1034d6e0175cSChristoph Hellwig printk_ratelimited(KERN_ERR "SCSI OP %02xh with too" 1035d6e0175cSChristoph Hellwig " big sectors %u exceeds backend hw_max_sectors:" 1036d6e0175cSChristoph Hellwig " %u\n", cdb[0], sectors, 10370fd97ccfSChristoph Hellwig dev->dev_attrib.hw_max_sectors); 1038de103c93SChristoph Hellwig return TCM_INVALID_CDB_FIELD; 1039d6e0175cSChristoph Hellwig } 10406ef31dc7SChristophe Vu-Brugier check_lba: 10411fd032eeSChristoph Hellwig end_lba = dev->transport->get_blocks(dev) + 1; 1042aa179935SNicholas Bellinger if (((cmd->t_task_lba + sectors) < cmd->t_task_lba) || 1043aa179935SNicholas Bellinger ((cmd->t_task_lba + sectors) > end_lba)) { 10441fd032eeSChristoph Hellwig pr_err("cmd exceeds last lba %llu " 10451fd032eeSChristoph Hellwig "(lba %llu, sectors %u)\n", 10461fd032eeSChristoph Hellwig end_lba, cmd->t_task_lba, sectors); 104709ceadc7SRoland Dreier return TCM_ADDRESS_OUT_OF_RANGE; 1048d6e0175cSChristoph Hellwig } 1049d6e0175cSChristoph Hellwig 105068ff9b9bSNicholas Bellinger if (!(cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE)) 10511fd032eeSChristoph Hellwig size = sbc_get_size(cmd, sectors); 10521fd032eeSChristoph Hellwig } 10531fd032eeSChristoph Hellwig 1054de103c93SChristoph Hellwig return target_cmd_size_check(cmd, size); 1055d6e0175cSChristoph Hellwig } 1056d6e0175cSChristoph Hellwig EXPORT_SYMBOL(sbc_parse_cdb); 10576f23ac8aSChristoph Hellwig 10586f23ac8aSChristoph Hellwig u32 sbc_get_device_type(struct se_device *dev) 10596f23ac8aSChristoph Hellwig { 10606f23ac8aSChristoph Hellwig return TYPE_DISK; 10616f23ac8aSChristoph Hellwig } 10626f23ac8aSChristoph Hellwig EXPORT_SYMBOL(sbc_get_device_type); 106386d71829SAsias He 106486d71829SAsias He sense_reason_t 106586d71829SAsias He sbc_execute_unmap(struct se_cmd *cmd, 106686d71829SAsias He sense_reason_t (*do_unmap_fn)(struct se_cmd *, void *, 106786d71829SAsias He sector_t, sector_t), 106886d71829SAsias He void *priv) 106986d71829SAsias He { 107086d71829SAsias He struct se_device *dev = cmd->se_dev; 107186d71829SAsias He unsigned char *buf, *ptr = NULL; 107286d71829SAsias He sector_t lba; 107386d71829SAsias He int size; 107486d71829SAsias He u32 range; 107586d71829SAsias He sense_reason_t ret = 0; 107686d71829SAsias He int dl, bd_dl; 107786d71829SAsias He 107886d71829SAsias He /* We never set ANC_SUP */ 107986d71829SAsias He if (cmd->t_task_cdb[1]) 108086d71829SAsias He return TCM_INVALID_CDB_FIELD; 108186d71829SAsias He 108286d71829SAsias He if (cmd->data_length == 0) { 108386d71829SAsias He target_complete_cmd(cmd, SAM_STAT_GOOD); 108486d71829SAsias He return 0; 108586d71829SAsias He } 108686d71829SAsias He 108786d71829SAsias He if (cmd->data_length < 8) { 108886d71829SAsias He pr_warn("UNMAP parameter list length %u too small\n", 108986d71829SAsias He cmd->data_length); 109086d71829SAsias He return TCM_PARAMETER_LIST_LENGTH_ERROR; 109186d71829SAsias He } 109286d71829SAsias He 109386d71829SAsias He buf = transport_kmap_data_sg(cmd); 109486d71829SAsias He if (!buf) 109586d71829SAsias He return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 109686d71829SAsias He 109786d71829SAsias He dl = get_unaligned_be16(&buf[0]); 109886d71829SAsias He bd_dl = get_unaligned_be16(&buf[2]); 109986d71829SAsias He 110086d71829SAsias He size = cmd->data_length - 8; 110186d71829SAsias He if (bd_dl > size) 110286d71829SAsias He pr_warn("UNMAP parameter list length %u too small, ignoring bd_dl %u\n", 110386d71829SAsias He cmd->data_length, bd_dl); 110486d71829SAsias He else 110586d71829SAsias He size = bd_dl; 110686d71829SAsias He 110786d71829SAsias He if (size / 16 > dev->dev_attrib.max_unmap_block_desc_count) { 110886d71829SAsias He ret = TCM_INVALID_PARAMETER_LIST; 110986d71829SAsias He goto err; 111086d71829SAsias He } 111186d71829SAsias He 111286d71829SAsias He /* First UNMAP block descriptor starts at 8 byte offset */ 111386d71829SAsias He ptr = &buf[8]; 111486d71829SAsias He pr_debug("UNMAP: Sub: %s Using dl: %u bd_dl: %u size: %u" 111586d71829SAsias He " ptr: %p\n", dev->transport->name, dl, bd_dl, size, ptr); 111686d71829SAsias He 111786d71829SAsias He while (size >= 16) { 111886d71829SAsias He lba = get_unaligned_be64(&ptr[0]); 111986d71829SAsias He range = get_unaligned_be32(&ptr[8]); 112086d71829SAsias He pr_debug("UNMAP: Using lba: %llu and range: %u\n", 112186d71829SAsias He (unsigned long long)lba, range); 112286d71829SAsias He 112386d71829SAsias He if (range > dev->dev_attrib.max_unmap_lba_count) { 112486d71829SAsias He ret = TCM_INVALID_PARAMETER_LIST; 112586d71829SAsias He goto err; 112686d71829SAsias He } 112786d71829SAsias He 112886d71829SAsias He if (lba + range > dev->transport->get_blocks(dev) + 1) { 112986d71829SAsias He ret = TCM_ADDRESS_OUT_OF_RANGE; 113086d71829SAsias He goto err; 113186d71829SAsias He } 113286d71829SAsias He 113386d71829SAsias He ret = do_unmap_fn(cmd, priv, lba, range); 113486d71829SAsias He if (ret) 113586d71829SAsias He goto err; 113686d71829SAsias He 113786d71829SAsias He ptr += 16; 113886d71829SAsias He size -= 16; 113986d71829SAsias He } 114086d71829SAsias He 114186d71829SAsias He err: 114286d71829SAsias He transport_kunmap_data_sg(cmd); 114386d71829SAsias He if (!ret) 114486d71829SAsias He target_complete_cmd(cmd, GOOD); 114586d71829SAsias He return ret; 114686d71829SAsias He } 114786d71829SAsias He EXPORT_SYMBOL(sbc_execute_unmap); 114841861fa8SNicholas Bellinger 114966a3d5bcSNicholas Bellinger void 115066a3d5bcSNicholas Bellinger sbc_dif_generate(struct se_cmd *cmd) 115166a3d5bcSNicholas Bellinger { 115266a3d5bcSNicholas Bellinger struct se_device *dev = cmd->se_dev; 115366a3d5bcSNicholas Bellinger struct se_dif_v1_tuple *sdt; 115466a3d5bcSNicholas Bellinger struct scatterlist *dsg, *psg = cmd->t_prot_sg; 115566a3d5bcSNicholas Bellinger sector_t sector = cmd->t_task_lba; 115666a3d5bcSNicholas Bellinger void *daddr, *paddr; 115766a3d5bcSNicholas Bellinger int i, j, offset = 0; 115866a3d5bcSNicholas Bellinger 115966a3d5bcSNicholas Bellinger for_each_sg(cmd->t_data_sg, dsg, cmd->t_data_nents, i) { 116066a3d5bcSNicholas Bellinger daddr = kmap_atomic(sg_page(dsg)) + dsg->offset; 116166a3d5bcSNicholas Bellinger paddr = kmap_atomic(sg_page(psg)) + psg->offset; 116266a3d5bcSNicholas Bellinger 116366a3d5bcSNicholas Bellinger for (j = 0; j < dsg->length; j += dev->dev_attrib.block_size) { 116466a3d5bcSNicholas Bellinger 116566a3d5bcSNicholas Bellinger if (offset >= psg->length) { 116666a3d5bcSNicholas Bellinger kunmap_atomic(paddr); 116766a3d5bcSNicholas Bellinger psg = sg_next(psg); 116866a3d5bcSNicholas Bellinger paddr = kmap_atomic(sg_page(psg)) + psg->offset; 116966a3d5bcSNicholas Bellinger offset = 0; 117066a3d5bcSNicholas Bellinger } 117166a3d5bcSNicholas Bellinger 117266a3d5bcSNicholas Bellinger sdt = paddr + offset; 117366a3d5bcSNicholas Bellinger sdt->guard_tag = cpu_to_be16(crc_t10dif(daddr + j, 117466a3d5bcSNicholas Bellinger dev->dev_attrib.block_size)); 117566a3d5bcSNicholas Bellinger if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE1_PROT) 117666a3d5bcSNicholas Bellinger sdt->ref_tag = cpu_to_be32(sector & 0xffffffff); 117766a3d5bcSNicholas Bellinger sdt->app_tag = 0; 117866a3d5bcSNicholas Bellinger 117966a3d5bcSNicholas Bellinger pr_debug("DIF WRITE INSERT sector: %llu guard_tag: 0x%04x" 118066a3d5bcSNicholas Bellinger " app_tag: 0x%04x ref_tag: %u\n", 118166a3d5bcSNicholas Bellinger (unsigned long long)sector, sdt->guard_tag, 118266a3d5bcSNicholas Bellinger sdt->app_tag, be32_to_cpu(sdt->ref_tag)); 118366a3d5bcSNicholas Bellinger 118466a3d5bcSNicholas Bellinger sector++; 118566a3d5bcSNicholas Bellinger offset += sizeof(struct se_dif_v1_tuple); 118666a3d5bcSNicholas Bellinger } 118766a3d5bcSNicholas Bellinger 118866a3d5bcSNicholas Bellinger kunmap_atomic(paddr); 118966a3d5bcSNicholas Bellinger kunmap_atomic(daddr); 119066a3d5bcSNicholas Bellinger } 119166a3d5bcSNicholas Bellinger } 119266a3d5bcSNicholas Bellinger 119341861fa8SNicholas Bellinger static sense_reason_t 119441861fa8SNicholas Bellinger sbc_dif_v1_verify(struct se_device *dev, struct se_dif_v1_tuple *sdt, 119541861fa8SNicholas Bellinger const void *p, sector_t sector, unsigned int ei_lba) 119641861fa8SNicholas Bellinger { 119741861fa8SNicholas Bellinger int block_size = dev->dev_attrib.block_size; 119841861fa8SNicholas Bellinger __be16 csum; 119941861fa8SNicholas Bellinger 120041861fa8SNicholas Bellinger csum = cpu_to_be16(crc_t10dif(p, block_size)); 120141861fa8SNicholas Bellinger 120241861fa8SNicholas Bellinger if (sdt->guard_tag != csum) { 120341861fa8SNicholas Bellinger pr_err("DIFv1 checksum failed on sector %llu guard tag 0x%04x" 120441861fa8SNicholas Bellinger " csum 0x%04x\n", (unsigned long long)sector, 120541861fa8SNicholas Bellinger be16_to_cpu(sdt->guard_tag), be16_to_cpu(csum)); 120641861fa8SNicholas Bellinger return TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED; 120741861fa8SNicholas Bellinger } 120841861fa8SNicholas Bellinger 120941861fa8SNicholas Bellinger if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE1_PROT && 121041861fa8SNicholas Bellinger be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) { 121141861fa8SNicholas Bellinger pr_err("DIFv1 Type 1 reference failed on sector: %llu tag: 0x%08x" 121241861fa8SNicholas Bellinger " sector MSB: 0x%08x\n", (unsigned long long)sector, 121341861fa8SNicholas Bellinger be32_to_cpu(sdt->ref_tag), (u32)(sector & 0xffffffff)); 121441861fa8SNicholas Bellinger return TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED; 121541861fa8SNicholas Bellinger } 121641861fa8SNicholas Bellinger 121741861fa8SNicholas Bellinger if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE2_PROT && 121841861fa8SNicholas Bellinger be32_to_cpu(sdt->ref_tag) != ei_lba) { 121941861fa8SNicholas Bellinger pr_err("DIFv1 Type 2 reference failed on sector: %llu tag: 0x%08x" 122041861fa8SNicholas Bellinger " ei_lba: 0x%08x\n", (unsigned long long)sector, 122141861fa8SNicholas Bellinger be32_to_cpu(sdt->ref_tag), ei_lba); 122241861fa8SNicholas Bellinger return TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED; 122341861fa8SNicholas Bellinger } 122441861fa8SNicholas Bellinger 122541861fa8SNicholas Bellinger return 0; 122641861fa8SNicholas Bellinger } 122741861fa8SNicholas Bellinger 122841861fa8SNicholas Bellinger static void 122941861fa8SNicholas Bellinger sbc_dif_copy_prot(struct se_cmd *cmd, unsigned int sectors, bool read, 123041861fa8SNicholas Bellinger struct scatterlist *sg, int sg_off) 123141861fa8SNicholas Bellinger { 123241861fa8SNicholas Bellinger struct se_device *dev = cmd->se_dev; 123341861fa8SNicholas Bellinger struct scatterlist *psg; 123441861fa8SNicholas Bellinger void *paddr, *addr; 123541861fa8SNicholas Bellinger unsigned int i, len, left; 123610762e80SNicholas Bellinger unsigned int offset = sg_off; 123741861fa8SNicholas Bellinger 123841861fa8SNicholas Bellinger left = sectors * dev->prot_length; 123941861fa8SNicholas Bellinger 124041861fa8SNicholas Bellinger for_each_sg(cmd->t_prot_sg, psg, cmd->t_prot_nents, i) { 124116c0ae02SSagi Grimberg unsigned int psg_len, copied = 0; 124241861fa8SNicholas Bellinger 124316c0ae02SSagi Grimberg paddr = kmap_atomic(sg_page(psg)) + psg->offset; 124416c0ae02SSagi Grimberg psg_len = min(left, psg->length); 124516c0ae02SSagi Grimberg while (psg_len) { 124616c0ae02SSagi Grimberg len = min(psg_len, sg->length - offset); 124716c0ae02SSagi Grimberg addr = kmap_atomic(sg_page(sg)) + sg->offset + offset; 124816c0ae02SSagi Grimberg 124916c0ae02SSagi Grimberg if (read) 125016c0ae02SSagi Grimberg memcpy(paddr + copied, addr, len); 125116c0ae02SSagi Grimberg else 125216c0ae02SSagi Grimberg memcpy(addr, paddr + copied, len); 125316c0ae02SSagi Grimberg 125416c0ae02SSagi Grimberg left -= len; 125516c0ae02SSagi Grimberg offset += len; 125616c0ae02SSagi Grimberg copied += len; 125716c0ae02SSagi Grimberg psg_len -= len; 125816c0ae02SSagi Grimberg 1259d6a65fdcSSagi Grimberg if (offset >= sg->length) { 1260d6a65fdcSSagi Grimberg sg = sg_next(sg); 1261d6a65fdcSSagi Grimberg offset = 0; 1262d6a65fdcSSagi Grimberg } 126341861fa8SNicholas Bellinger kunmap_atomic(addr); 126441861fa8SNicholas Bellinger } 126516c0ae02SSagi Grimberg kunmap_atomic(paddr); 126616c0ae02SSagi Grimberg } 126741861fa8SNicholas Bellinger } 126841861fa8SNicholas Bellinger 126941861fa8SNicholas Bellinger sense_reason_t 127041861fa8SNicholas Bellinger sbc_dif_verify_write(struct se_cmd *cmd, sector_t start, unsigned int sectors, 127141861fa8SNicholas Bellinger unsigned int ei_lba, struct scatterlist *sg, int sg_off) 127241861fa8SNicholas Bellinger { 127341861fa8SNicholas Bellinger struct se_device *dev = cmd->se_dev; 127441861fa8SNicholas Bellinger struct se_dif_v1_tuple *sdt; 127541861fa8SNicholas Bellinger struct scatterlist *dsg, *psg = cmd->t_prot_sg; 127641861fa8SNicholas Bellinger sector_t sector = start; 127741861fa8SNicholas Bellinger void *daddr, *paddr; 127841861fa8SNicholas Bellinger int i, j, offset = 0; 127941861fa8SNicholas Bellinger sense_reason_t rc; 128041861fa8SNicholas Bellinger 128141861fa8SNicholas Bellinger for_each_sg(cmd->t_data_sg, dsg, cmd->t_data_nents, i) { 128241861fa8SNicholas Bellinger daddr = kmap_atomic(sg_page(dsg)) + dsg->offset; 128341861fa8SNicholas Bellinger paddr = kmap_atomic(sg_page(psg)) + psg->offset; 128441861fa8SNicholas Bellinger 128541861fa8SNicholas Bellinger for (j = 0; j < dsg->length; j += dev->dev_attrib.block_size) { 128641861fa8SNicholas Bellinger 128741861fa8SNicholas Bellinger if (offset >= psg->length) { 128841861fa8SNicholas Bellinger kunmap_atomic(paddr); 128941861fa8SNicholas Bellinger psg = sg_next(psg); 129041861fa8SNicholas Bellinger paddr = kmap_atomic(sg_page(psg)) + psg->offset; 129141861fa8SNicholas Bellinger offset = 0; 129241861fa8SNicholas Bellinger } 129341861fa8SNicholas Bellinger 129441861fa8SNicholas Bellinger sdt = paddr + offset; 129541861fa8SNicholas Bellinger 129641861fa8SNicholas Bellinger pr_debug("DIF WRITE sector: %llu guard_tag: 0x%04x" 129741861fa8SNicholas Bellinger " app_tag: 0x%04x ref_tag: %u\n", 129841861fa8SNicholas Bellinger (unsigned long long)sector, sdt->guard_tag, 129941861fa8SNicholas Bellinger sdt->app_tag, be32_to_cpu(sdt->ref_tag)); 130041861fa8SNicholas Bellinger 130141861fa8SNicholas Bellinger rc = sbc_dif_v1_verify(dev, sdt, daddr + j, sector, 130241861fa8SNicholas Bellinger ei_lba); 130341861fa8SNicholas Bellinger if (rc) { 130441861fa8SNicholas Bellinger kunmap_atomic(paddr); 130541861fa8SNicholas Bellinger kunmap_atomic(daddr); 130676736db3SSagi Grimberg cmd->bad_sector = sector; 130741861fa8SNicholas Bellinger return rc; 130841861fa8SNicholas Bellinger } 130941861fa8SNicholas Bellinger 131041861fa8SNicholas Bellinger sector++; 131141861fa8SNicholas Bellinger ei_lba++; 131241861fa8SNicholas Bellinger offset += sizeof(struct se_dif_v1_tuple); 131341861fa8SNicholas Bellinger } 131441861fa8SNicholas Bellinger 131541861fa8SNicholas Bellinger kunmap_atomic(paddr); 131641861fa8SNicholas Bellinger kunmap_atomic(daddr); 131741861fa8SNicholas Bellinger } 131841861fa8SNicholas Bellinger sbc_dif_copy_prot(cmd, sectors, false, sg, sg_off); 131941861fa8SNicholas Bellinger 132041861fa8SNicholas Bellinger return 0; 132141861fa8SNicholas Bellinger } 132241861fa8SNicholas Bellinger EXPORT_SYMBOL(sbc_dif_verify_write); 132341861fa8SNicholas Bellinger 1324395ccb25SNicholas Bellinger static sense_reason_t 1325395ccb25SNicholas Bellinger __sbc_dif_verify_read(struct se_cmd *cmd, sector_t start, unsigned int sectors, 132641861fa8SNicholas Bellinger unsigned int ei_lba, struct scatterlist *sg, int sg_off) 132741861fa8SNicholas Bellinger { 132841861fa8SNicholas Bellinger struct se_device *dev = cmd->se_dev; 132941861fa8SNicholas Bellinger struct se_dif_v1_tuple *sdt; 1330fc272ec7SSagi Grimberg struct scatterlist *dsg, *psg = sg; 133141861fa8SNicholas Bellinger sector_t sector = start; 133241861fa8SNicholas Bellinger void *daddr, *paddr; 133341861fa8SNicholas Bellinger int i, j, offset = sg_off; 133441861fa8SNicholas Bellinger sense_reason_t rc; 133541861fa8SNicholas Bellinger 133641861fa8SNicholas Bellinger for_each_sg(cmd->t_data_sg, dsg, cmd->t_data_nents, i) { 133741861fa8SNicholas Bellinger daddr = kmap_atomic(sg_page(dsg)) + dsg->offset; 1338fc272ec7SSagi Grimberg paddr = kmap_atomic(sg_page(psg)) + sg->offset; 133941861fa8SNicholas Bellinger 134041861fa8SNicholas Bellinger for (j = 0; j < dsg->length; j += dev->dev_attrib.block_size) { 134141861fa8SNicholas Bellinger 1342fc272ec7SSagi Grimberg if (offset >= psg->length) { 134341861fa8SNicholas Bellinger kunmap_atomic(paddr); 1344fc272ec7SSagi Grimberg psg = sg_next(psg); 1345fc272ec7SSagi Grimberg paddr = kmap_atomic(sg_page(psg)) + psg->offset; 134641861fa8SNicholas Bellinger offset = 0; 134741861fa8SNicholas Bellinger } 134841861fa8SNicholas Bellinger 134941861fa8SNicholas Bellinger sdt = paddr + offset; 135041861fa8SNicholas Bellinger 135141861fa8SNicholas Bellinger pr_debug("DIF READ sector: %llu guard_tag: 0x%04x" 135241861fa8SNicholas Bellinger " app_tag: 0x%04x ref_tag: %u\n", 135341861fa8SNicholas Bellinger (unsigned long long)sector, sdt->guard_tag, 135441861fa8SNicholas Bellinger sdt->app_tag, be32_to_cpu(sdt->ref_tag)); 135541861fa8SNicholas Bellinger 135641861fa8SNicholas Bellinger if (sdt->app_tag == cpu_to_be16(0xffff)) { 135741861fa8SNicholas Bellinger sector++; 135841861fa8SNicholas Bellinger offset += sizeof(struct se_dif_v1_tuple); 135941861fa8SNicholas Bellinger continue; 136041861fa8SNicholas Bellinger } 136141861fa8SNicholas Bellinger 136241861fa8SNicholas Bellinger rc = sbc_dif_v1_verify(dev, sdt, daddr + j, sector, 136341861fa8SNicholas Bellinger ei_lba); 136441861fa8SNicholas Bellinger if (rc) { 136541861fa8SNicholas Bellinger kunmap_atomic(paddr); 136641861fa8SNicholas Bellinger kunmap_atomic(daddr); 136776736db3SSagi Grimberg cmd->bad_sector = sector; 136841861fa8SNicholas Bellinger return rc; 136941861fa8SNicholas Bellinger } 137041861fa8SNicholas Bellinger 137141861fa8SNicholas Bellinger sector++; 137241861fa8SNicholas Bellinger ei_lba++; 137341861fa8SNicholas Bellinger offset += sizeof(struct se_dif_v1_tuple); 137441861fa8SNicholas Bellinger } 137541861fa8SNicholas Bellinger 137641861fa8SNicholas Bellinger kunmap_atomic(paddr); 137741861fa8SNicholas Bellinger kunmap_atomic(daddr); 137841861fa8SNicholas Bellinger } 137941861fa8SNicholas Bellinger 138041861fa8SNicholas Bellinger return 0; 138141861fa8SNicholas Bellinger } 1382395ccb25SNicholas Bellinger 1383395ccb25SNicholas Bellinger sense_reason_t 1384395ccb25SNicholas Bellinger sbc_dif_read_strip(struct se_cmd *cmd) 1385395ccb25SNicholas Bellinger { 1386395ccb25SNicholas Bellinger struct se_device *dev = cmd->se_dev; 1387395ccb25SNicholas Bellinger u32 sectors = cmd->prot_length / dev->prot_length; 1388395ccb25SNicholas Bellinger 1389395ccb25SNicholas Bellinger return __sbc_dif_verify_read(cmd, cmd->t_task_lba, sectors, 0, 1390395ccb25SNicholas Bellinger cmd->t_prot_sg, 0); 1391395ccb25SNicholas Bellinger } 1392395ccb25SNicholas Bellinger 1393395ccb25SNicholas Bellinger sense_reason_t 1394395ccb25SNicholas Bellinger sbc_dif_verify_read(struct se_cmd *cmd, sector_t start, unsigned int sectors, 1395395ccb25SNicholas Bellinger unsigned int ei_lba, struct scatterlist *sg, int sg_off) 1396395ccb25SNicholas Bellinger { 1397395ccb25SNicholas Bellinger sense_reason_t rc; 1398395ccb25SNicholas Bellinger 1399395ccb25SNicholas Bellinger rc = __sbc_dif_verify_read(cmd, start, sectors, ei_lba, sg, sg_off); 1400395ccb25SNicholas Bellinger if (rc) 1401395ccb25SNicholas Bellinger return rc; 1402395ccb25SNicholas Bellinger 1403395ccb25SNicholas Bellinger sbc_dif_copy_prot(cmd, sectors, true, sg, sg_off); 1404395ccb25SNicholas Bellinger return 0; 1405395ccb25SNicholas Bellinger } 140641861fa8SNicholas Bellinger EXPORT_SYMBOL(sbc_dif_verify_read); 1407