1d6e0175cSChristoph Hellwig /* 2d6e0175cSChristoph Hellwig * SCSI Block Commands (SBC) parsing and emulation. 3d6e0175cSChristoph Hellwig * 4fd9a11d7SNicholas Bellinger * (c) Copyright 2002-2012 RisingTide Systems LLC. 5d6e0175cSChristoph Hellwig * 6d6e0175cSChristoph Hellwig * Nicholas A. Bellinger <nab@kernel.org> 7d6e0175cSChristoph Hellwig * 8d6e0175cSChristoph Hellwig * This program is free software; you can redistribute it and/or modify 9d6e0175cSChristoph Hellwig * it under the terms of the GNU General Public License as published by 10d6e0175cSChristoph Hellwig * the Free Software Foundation; either version 2 of the License, or 11d6e0175cSChristoph Hellwig * (at your option) any later version. 12d6e0175cSChristoph Hellwig * 13d6e0175cSChristoph Hellwig * This program is distributed in the hope that it will be useful, 14d6e0175cSChristoph Hellwig * but WITHOUT ANY WARRANTY; without even the implied warranty of 15d6e0175cSChristoph Hellwig * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16d6e0175cSChristoph Hellwig * GNU General Public License for more details. 17d6e0175cSChristoph Hellwig * 18d6e0175cSChristoph Hellwig * You should have received a copy of the GNU General Public License 19d6e0175cSChristoph Hellwig * along with this program; if not, write to the Free Software 20d6e0175cSChristoph Hellwig * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 21d6e0175cSChristoph Hellwig */ 22d6e0175cSChristoph Hellwig 23d6e0175cSChristoph Hellwig #include <linux/kernel.h> 24d6e0175cSChristoph Hellwig #include <linux/module.h> 25d6e0175cSChristoph Hellwig #include <linux/ratelimit.h> 26d6e0175cSChristoph Hellwig #include <asm/unaligned.h> 27d6e0175cSChristoph Hellwig #include <scsi/scsi.h> 28d6e0175cSChristoph Hellwig 29d6e0175cSChristoph Hellwig #include <target/target_core_base.h> 30d6e0175cSChristoph Hellwig #include <target/target_core_backend.h> 31d6e0175cSChristoph Hellwig #include <target/target_core_fabric.h> 32d6e0175cSChristoph Hellwig 33d6e0175cSChristoph Hellwig #include "target_core_internal.h" 34d6e0175cSChristoph Hellwig #include "target_core_ua.h" 35d6e0175cSChristoph Hellwig 36d6e0175cSChristoph Hellwig 37de103c93SChristoph Hellwig static sense_reason_t 38de103c93SChristoph Hellwig sbc_emulate_readcapacity(struct se_cmd *cmd) 391fd032eeSChristoph Hellwig { 401fd032eeSChristoph Hellwig struct se_device *dev = cmd->se_dev; 418dc8632aSRoland Dreier unsigned char *cdb = cmd->t_task_cdb; 421fd032eeSChristoph Hellwig unsigned long long blocks_long = dev->transport->get_blocks(dev); 43a50da144SPaolo Bonzini unsigned char *rbuf; 44a50da144SPaolo Bonzini unsigned char buf[8]; 451fd032eeSChristoph Hellwig u32 blocks; 461fd032eeSChristoph Hellwig 478dc8632aSRoland Dreier /* 488dc8632aSRoland Dreier * SBC-2 says: 498dc8632aSRoland Dreier * If the PMI bit is set to zero and the LOGICAL BLOCK 508dc8632aSRoland Dreier * ADDRESS field is not set to zero, the device server shall 518dc8632aSRoland Dreier * terminate the command with CHECK CONDITION status with 528dc8632aSRoland Dreier * the sense key set to ILLEGAL REQUEST and the additional 538dc8632aSRoland Dreier * sense code set to INVALID FIELD IN CDB. 548dc8632aSRoland Dreier * 558dc8632aSRoland Dreier * In SBC-3, these fields are obsolete, but some SCSI 568dc8632aSRoland Dreier * compliance tests actually check this, so we might as well 578dc8632aSRoland Dreier * follow SBC-2. 588dc8632aSRoland Dreier */ 598dc8632aSRoland Dreier if (!(cdb[8] & 1) && !!(cdb[2] | cdb[3] | cdb[4] | cdb[5])) 608dc8632aSRoland Dreier return TCM_INVALID_CDB_FIELD; 618dc8632aSRoland Dreier 621fd032eeSChristoph Hellwig if (blocks_long >= 0x00000000ffffffff) 631fd032eeSChristoph Hellwig blocks = 0xffffffff; 641fd032eeSChristoph Hellwig else 651fd032eeSChristoph Hellwig blocks = (u32)blocks_long; 661fd032eeSChristoph Hellwig 671fd032eeSChristoph Hellwig buf[0] = (blocks >> 24) & 0xff; 681fd032eeSChristoph Hellwig buf[1] = (blocks >> 16) & 0xff; 691fd032eeSChristoph Hellwig buf[2] = (blocks >> 8) & 0xff; 701fd032eeSChristoph Hellwig buf[3] = blocks & 0xff; 710fd97ccfSChristoph Hellwig buf[4] = (dev->dev_attrib.block_size >> 24) & 0xff; 720fd97ccfSChristoph Hellwig buf[5] = (dev->dev_attrib.block_size >> 16) & 0xff; 730fd97ccfSChristoph Hellwig buf[6] = (dev->dev_attrib.block_size >> 8) & 0xff; 740fd97ccfSChristoph Hellwig buf[7] = dev->dev_attrib.block_size & 0xff; 751fd032eeSChristoph Hellwig 76a50da144SPaolo Bonzini rbuf = transport_kmap_data_sg(cmd); 778b4b0dcbSNicholas Bellinger if (rbuf) { 78a50da144SPaolo Bonzini memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length)); 791fd032eeSChristoph Hellwig transport_kunmap_data_sg(cmd); 808b4b0dcbSNicholas Bellinger } 811fd032eeSChristoph Hellwig 821fd032eeSChristoph Hellwig target_complete_cmd(cmd, GOOD); 831fd032eeSChristoph Hellwig return 0; 841fd032eeSChristoph Hellwig } 851fd032eeSChristoph Hellwig 86de103c93SChristoph Hellwig static sense_reason_t 87de103c93SChristoph Hellwig sbc_emulate_readcapacity_16(struct se_cmd *cmd) 881fd032eeSChristoph Hellwig { 891fd032eeSChristoph Hellwig struct se_device *dev = cmd->se_dev; 90a50da144SPaolo Bonzini unsigned char *rbuf; 91a50da144SPaolo Bonzini unsigned char buf[32]; 921fd032eeSChristoph Hellwig unsigned long long blocks = dev->transport->get_blocks(dev); 931fd032eeSChristoph Hellwig 94a50da144SPaolo Bonzini memset(buf, 0, sizeof(buf)); 951fd032eeSChristoph Hellwig buf[0] = (blocks >> 56) & 0xff; 961fd032eeSChristoph Hellwig buf[1] = (blocks >> 48) & 0xff; 971fd032eeSChristoph Hellwig buf[2] = (blocks >> 40) & 0xff; 981fd032eeSChristoph Hellwig buf[3] = (blocks >> 32) & 0xff; 991fd032eeSChristoph Hellwig buf[4] = (blocks >> 24) & 0xff; 1001fd032eeSChristoph Hellwig buf[5] = (blocks >> 16) & 0xff; 1011fd032eeSChristoph Hellwig buf[6] = (blocks >> 8) & 0xff; 1021fd032eeSChristoph Hellwig buf[7] = blocks & 0xff; 1030fd97ccfSChristoph Hellwig buf[8] = (dev->dev_attrib.block_size >> 24) & 0xff; 1040fd97ccfSChristoph Hellwig buf[9] = (dev->dev_attrib.block_size >> 16) & 0xff; 1050fd97ccfSChristoph Hellwig buf[10] = (dev->dev_attrib.block_size >> 8) & 0xff; 1060fd97ccfSChristoph Hellwig buf[11] = dev->dev_attrib.block_size & 0xff; 1071fd032eeSChristoph Hellwig /* 1081fd032eeSChristoph Hellwig * Set Thin Provisioning Enable bit following sbc3r22 in section 1091fd032eeSChristoph Hellwig * READ CAPACITY (16) byte 14 if emulate_tpu or emulate_tpws is enabled. 1101fd032eeSChristoph Hellwig */ 1110fd97ccfSChristoph Hellwig if (dev->dev_attrib.emulate_tpu || dev->dev_attrib.emulate_tpws) 1121fd032eeSChristoph Hellwig buf[14] = 0x80; 1131fd032eeSChristoph Hellwig 114a50da144SPaolo Bonzini rbuf = transport_kmap_data_sg(cmd); 1158b4b0dcbSNicholas Bellinger if (rbuf) { 116a50da144SPaolo Bonzini memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length)); 1171fd032eeSChristoph Hellwig transport_kunmap_data_sg(cmd); 1188b4b0dcbSNicholas Bellinger } 1191fd032eeSChristoph Hellwig 1201fd032eeSChristoph Hellwig target_complete_cmd(cmd, GOOD); 1211fd032eeSChristoph Hellwig return 0; 1221fd032eeSChristoph Hellwig } 1231fd032eeSChristoph Hellwig 124972b29c8SRoland Dreier sector_t sbc_get_write_same_sectors(struct se_cmd *cmd) 1251fd032eeSChristoph Hellwig { 1261fd032eeSChristoph Hellwig u32 num_blocks; 1271fd032eeSChristoph Hellwig 1281fd032eeSChristoph Hellwig if (cmd->t_task_cdb[0] == WRITE_SAME) 1291fd032eeSChristoph Hellwig num_blocks = get_unaligned_be16(&cmd->t_task_cdb[7]); 1301fd032eeSChristoph Hellwig else if (cmd->t_task_cdb[0] == WRITE_SAME_16) 1311fd032eeSChristoph Hellwig num_blocks = get_unaligned_be32(&cmd->t_task_cdb[10]); 1321fd032eeSChristoph Hellwig else /* WRITE_SAME_32 via VARIABLE_LENGTH_CMD */ 1331fd032eeSChristoph Hellwig num_blocks = get_unaligned_be32(&cmd->t_task_cdb[28]); 1341fd032eeSChristoph Hellwig 1351fd032eeSChristoph Hellwig /* 1361fd032eeSChristoph Hellwig * Use the explicit range when non zero is supplied, otherwise calculate 1371fd032eeSChristoph Hellwig * the remaining range based on ->get_blocks() - starting LBA. 1381fd032eeSChristoph Hellwig */ 1396f974e8cSChristoph Hellwig if (num_blocks) 1406f974e8cSChristoph Hellwig return num_blocks; 1411fd032eeSChristoph Hellwig 1426f974e8cSChristoph Hellwig return cmd->se_dev->transport->get_blocks(cmd->se_dev) - 1436f974e8cSChristoph Hellwig cmd->t_task_lba + 1; 1441fd032eeSChristoph Hellwig } 145972b29c8SRoland Dreier EXPORT_SYMBOL(sbc_get_write_same_sectors); 1461fd032eeSChristoph Hellwig 147de103c93SChristoph Hellwig static sense_reason_t 1481920ed61SNicholas Bellinger sbc_emulate_noop(struct se_cmd *cmd) 1491a1ff38cSBernhard Kohl { 1501a1ff38cSBernhard Kohl target_complete_cmd(cmd, GOOD); 1511a1ff38cSBernhard Kohl return 0; 1521a1ff38cSBernhard Kohl } 1531a1ff38cSBernhard Kohl 154d6e0175cSChristoph Hellwig static inline u32 sbc_get_size(struct se_cmd *cmd, u32 sectors) 155d6e0175cSChristoph Hellwig { 1560fd97ccfSChristoph Hellwig return cmd->se_dev->dev_attrib.block_size * sectors; 157d6e0175cSChristoph Hellwig } 158d6e0175cSChristoph Hellwig 159d6e0175cSChristoph Hellwig static int sbc_check_valid_sectors(struct se_cmd *cmd) 160d6e0175cSChristoph Hellwig { 161d6e0175cSChristoph Hellwig struct se_device *dev = cmd->se_dev; 162d6e0175cSChristoph Hellwig unsigned long long end_lba; 163d6e0175cSChristoph Hellwig u32 sectors; 164d6e0175cSChristoph Hellwig 1650fd97ccfSChristoph Hellwig sectors = cmd->data_length / dev->dev_attrib.block_size; 166d6e0175cSChristoph Hellwig end_lba = dev->transport->get_blocks(dev) + 1; 167d6e0175cSChristoph Hellwig 168d6e0175cSChristoph Hellwig if (cmd->t_task_lba + sectors > end_lba) { 169d6e0175cSChristoph Hellwig pr_err("target: lba %llu, sectors %u exceeds end lba %llu\n", 170d6e0175cSChristoph Hellwig cmd->t_task_lba, sectors, end_lba); 171d6e0175cSChristoph Hellwig return -EINVAL; 172d6e0175cSChristoph Hellwig } 173d6e0175cSChristoph Hellwig 174d6e0175cSChristoph Hellwig return 0; 175d6e0175cSChristoph Hellwig } 176d6e0175cSChristoph Hellwig 177d6e0175cSChristoph Hellwig static inline u32 transport_get_sectors_6(unsigned char *cdb) 178d6e0175cSChristoph Hellwig { 179d6e0175cSChristoph Hellwig /* 180d6e0175cSChristoph Hellwig * Use 8-bit sector value. SBC-3 says: 181d6e0175cSChristoph Hellwig * 182d6e0175cSChristoph Hellwig * A TRANSFER LENGTH field set to zero specifies that 256 183d6e0175cSChristoph Hellwig * logical blocks shall be written. Any other value 184d6e0175cSChristoph Hellwig * specifies the number of logical blocks that shall be 185d6e0175cSChristoph Hellwig * written. 186d6e0175cSChristoph Hellwig */ 187d6e0175cSChristoph Hellwig return cdb[4] ? : 256; 188d6e0175cSChristoph Hellwig } 189d6e0175cSChristoph Hellwig 190d6e0175cSChristoph Hellwig static inline u32 transport_get_sectors_10(unsigned char *cdb) 191d6e0175cSChristoph Hellwig { 192d6e0175cSChristoph Hellwig return (u32)(cdb[7] << 8) + cdb[8]; 193d6e0175cSChristoph Hellwig } 194d6e0175cSChristoph Hellwig 195d6e0175cSChristoph Hellwig static inline u32 transport_get_sectors_12(unsigned char *cdb) 196d6e0175cSChristoph Hellwig { 197d6e0175cSChristoph Hellwig return (u32)(cdb[6] << 24) + (cdb[7] << 16) + (cdb[8] << 8) + cdb[9]; 198d6e0175cSChristoph Hellwig } 199d6e0175cSChristoph Hellwig 200d6e0175cSChristoph Hellwig static inline u32 transport_get_sectors_16(unsigned char *cdb) 201d6e0175cSChristoph Hellwig { 202d6e0175cSChristoph Hellwig return (u32)(cdb[10] << 24) + (cdb[11] << 16) + 203d6e0175cSChristoph Hellwig (cdb[12] << 8) + cdb[13]; 204d6e0175cSChristoph Hellwig } 205d6e0175cSChristoph Hellwig 206d6e0175cSChristoph Hellwig /* 207d6e0175cSChristoph Hellwig * Used for VARIABLE_LENGTH_CDB WRITE_32 and READ_32 variants 208d6e0175cSChristoph Hellwig */ 209d6e0175cSChristoph Hellwig static inline u32 transport_get_sectors_32(unsigned char *cdb) 210d6e0175cSChristoph Hellwig { 211d6e0175cSChristoph Hellwig return (u32)(cdb[28] << 24) + (cdb[29] << 16) + 212d6e0175cSChristoph Hellwig (cdb[30] << 8) + cdb[31]; 213d6e0175cSChristoph Hellwig 214d6e0175cSChristoph Hellwig } 215d6e0175cSChristoph Hellwig 216d6e0175cSChristoph Hellwig static inline u32 transport_lba_21(unsigned char *cdb) 217d6e0175cSChristoph Hellwig { 218d6e0175cSChristoph Hellwig return ((cdb[1] & 0x1f) << 16) | (cdb[2] << 8) | cdb[3]; 219d6e0175cSChristoph Hellwig } 220d6e0175cSChristoph Hellwig 221d6e0175cSChristoph Hellwig static inline u32 transport_lba_32(unsigned char *cdb) 222d6e0175cSChristoph Hellwig { 223d6e0175cSChristoph Hellwig return (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5]; 224d6e0175cSChristoph Hellwig } 225d6e0175cSChristoph Hellwig 226d6e0175cSChristoph Hellwig static inline unsigned long long transport_lba_64(unsigned char *cdb) 227d6e0175cSChristoph Hellwig { 228d6e0175cSChristoph Hellwig unsigned int __v1, __v2; 229d6e0175cSChristoph Hellwig 230d6e0175cSChristoph Hellwig __v1 = (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5]; 231d6e0175cSChristoph Hellwig __v2 = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9]; 232d6e0175cSChristoph Hellwig 233d6e0175cSChristoph Hellwig return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32; 234d6e0175cSChristoph Hellwig } 235d6e0175cSChristoph Hellwig 236d6e0175cSChristoph Hellwig /* 237d6e0175cSChristoph Hellwig * For VARIABLE_LENGTH_CDB w/ 32 byte extended CDBs 238d6e0175cSChristoph Hellwig */ 239d6e0175cSChristoph Hellwig static inline unsigned long long transport_lba_64_ext(unsigned char *cdb) 240d6e0175cSChristoph Hellwig { 241d6e0175cSChristoph Hellwig unsigned int __v1, __v2; 242d6e0175cSChristoph Hellwig 243d6e0175cSChristoph Hellwig __v1 = (cdb[12] << 24) | (cdb[13] << 16) | (cdb[14] << 8) | cdb[15]; 244d6e0175cSChristoph Hellwig __v2 = (cdb[16] << 24) | (cdb[17] << 16) | (cdb[18] << 8) | cdb[19]; 245d6e0175cSChristoph Hellwig 246d6e0175cSChristoph Hellwig return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32; 247d6e0175cSChristoph Hellwig } 248d6e0175cSChristoph Hellwig 249cd063befSNicholas Bellinger static sense_reason_t 250cd063befSNicholas Bellinger sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *ops) 251d6e0175cSChristoph Hellwig { 252972b29c8SRoland Dreier unsigned int sectors = sbc_get_write_same_sectors(cmd); 253773cbaf7SNicholas Bellinger 254d6e0175cSChristoph Hellwig if ((flags[0] & 0x04) || (flags[0] & 0x02)) { 255d6e0175cSChristoph Hellwig pr_err("WRITE_SAME PBDATA and LBDATA" 256d6e0175cSChristoph Hellwig " bits not supported for Block Discard" 257d6e0175cSChristoph Hellwig " Emulation\n"); 258cd063befSNicholas Bellinger return TCM_UNSUPPORTED_SCSI_OPCODE; 259d6e0175cSChristoph Hellwig } 260773cbaf7SNicholas Bellinger if (sectors > cmd->se_dev->dev_attrib.max_write_same_len) { 261773cbaf7SNicholas Bellinger pr_warn("WRITE_SAME sectors: %u exceeds max_write_same_len: %u\n", 262773cbaf7SNicholas Bellinger sectors, cmd->se_dev->dev_attrib.max_write_same_len); 263773cbaf7SNicholas Bellinger return TCM_INVALID_CDB_FIELD; 264773cbaf7SNicholas Bellinger } 265d6e0175cSChristoph Hellwig /* 266cd063befSNicholas Bellinger * Special case for WRITE_SAME w/ UNMAP=1 that ends up getting 267cd063befSNicholas Bellinger * translated into block discard requests within backend code. 268d6e0175cSChristoph Hellwig */ 269cd063befSNicholas Bellinger if (flags[0] & 0x08) { 270cd063befSNicholas Bellinger if (!ops->execute_write_same_unmap) 271cd063befSNicholas Bellinger return TCM_UNSUPPORTED_SCSI_OPCODE; 272d6e0175cSChristoph Hellwig 273cd063befSNicholas Bellinger cmd->execute_cmd = ops->execute_write_same_unmap; 274cd063befSNicholas Bellinger return 0; 275cd063befSNicholas Bellinger } 276cd063befSNicholas Bellinger if (!ops->execute_write_same) 277cd063befSNicholas Bellinger return TCM_UNSUPPORTED_SCSI_OPCODE; 278cd063befSNicholas Bellinger 279cd063befSNicholas Bellinger cmd->execute_cmd = ops->execute_write_same; 280d6e0175cSChristoph Hellwig return 0; 281d6e0175cSChristoph Hellwig } 282d6e0175cSChristoph Hellwig 283a6b0133cSNicholas Bellinger static sense_reason_t xdreadwrite_callback(struct se_cmd *cmd) 284d6e0175cSChristoph Hellwig { 285d6e0175cSChristoph Hellwig unsigned char *buf, *addr; 286d6e0175cSChristoph Hellwig struct scatterlist *sg; 287d6e0175cSChristoph Hellwig unsigned int offset; 288a6b0133cSNicholas Bellinger sense_reason_t ret = TCM_NO_SENSE; 289a6b0133cSNicholas Bellinger int i, count; 290d6e0175cSChristoph Hellwig /* 291d6e0175cSChristoph Hellwig * From sbc3r22.pdf section 5.48 XDWRITEREAD (10) command 292d6e0175cSChristoph Hellwig * 293d6e0175cSChristoph Hellwig * 1) read the specified logical block(s); 294d6e0175cSChristoph Hellwig * 2) transfer logical blocks from the data-out buffer; 295d6e0175cSChristoph Hellwig * 3) XOR the logical blocks transferred from the data-out buffer with 296d6e0175cSChristoph Hellwig * the logical blocks read, storing the resulting XOR data in a buffer; 297d6e0175cSChristoph Hellwig * 4) if the DISABLE WRITE bit is set to zero, then write the logical 298d6e0175cSChristoph Hellwig * blocks transferred from the data-out buffer; and 299d6e0175cSChristoph Hellwig * 5) transfer the resulting XOR data to the data-in buffer. 300d6e0175cSChristoph Hellwig */ 301d6e0175cSChristoph Hellwig buf = kmalloc(cmd->data_length, GFP_KERNEL); 302d6e0175cSChristoph Hellwig if (!buf) { 303d6e0175cSChristoph Hellwig pr_err("Unable to allocate xor_callback buf\n"); 304a6b0133cSNicholas Bellinger return TCM_OUT_OF_RESOURCES; 305d6e0175cSChristoph Hellwig } 306d6e0175cSChristoph Hellwig /* 307d6e0175cSChristoph Hellwig * Copy the scatterlist WRITE buffer located at cmd->t_data_sg 308d6e0175cSChristoph Hellwig * into the locally allocated *buf 309d6e0175cSChristoph Hellwig */ 310d6e0175cSChristoph Hellwig sg_copy_to_buffer(cmd->t_data_sg, 311d6e0175cSChristoph Hellwig cmd->t_data_nents, 312d6e0175cSChristoph Hellwig buf, 313d6e0175cSChristoph Hellwig cmd->data_length); 314d6e0175cSChristoph Hellwig 315d6e0175cSChristoph Hellwig /* 316d6e0175cSChristoph Hellwig * Now perform the XOR against the BIDI read memory located at 317d6e0175cSChristoph Hellwig * cmd->t_mem_bidi_list 318d6e0175cSChristoph Hellwig */ 319d6e0175cSChristoph Hellwig 320d6e0175cSChristoph Hellwig offset = 0; 321d6e0175cSChristoph Hellwig for_each_sg(cmd->t_bidi_data_sg, sg, cmd->t_bidi_data_nents, count) { 322d6e0175cSChristoph Hellwig addr = kmap_atomic(sg_page(sg)); 323a6b0133cSNicholas Bellinger if (!addr) { 324a6b0133cSNicholas Bellinger ret = TCM_OUT_OF_RESOURCES; 325d6e0175cSChristoph Hellwig goto out; 326a6b0133cSNicholas Bellinger } 327d6e0175cSChristoph Hellwig 328d6e0175cSChristoph Hellwig for (i = 0; i < sg->length; i++) 329d6e0175cSChristoph Hellwig *(addr + sg->offset + i) ^= *(buf + offset + i); 330d6e0175cSChristoph Hellwig 331d6e0175cSChristoph Hellwig offset += sg->length; 332d6e0175cSChristoph Hellwig kunmap_atomic(addr); 333d6e0175cSChristoph Hellwig } 334d6e0175cSChristoph Hellwig 335d6e0175cSChristoph Hellwig out: 336d6e0175cSChristoph Hellwig kfree(buf); 337a6b0133cSNicholas Bellinger return ret; 338d6e0175cSChristoph Hellwig } 339d6e0175cSChristoph Hellwig 340de103c93SChristoph Hellwig sense_reason_t 341de103c93SChristoph Hellwig sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops) 342d6e0175cSChristoph Hellwig { 343d6e0175cSChristoph Hellwig struct se_device *dev = cmd->se_dev; 344d6e0175cSChristoph Hellwig unsigned char *cdb = cmd->t_task_cdb; 3451fd032eeSChristoph Hellwig unsigned int size; 346d6e0175cSChristoph Hellwig u32 sectors = 0; 347de103c93SChristoph Hellwig sense_reason_t ret; 348d6e0175cSChristoph Hellwig 349d6e0175cSChristoph Hellwig switch (cdb[0]) { 350d6e0175cSChristoph Hellwig case READ_6: 351d6e0175cSChristoph Hellwig sectors = transport_get_sectors_6(cdb); 352d6e0175cSChristoph Hellwig cmd->t_task_lba = transport_lba_21(cdb); 353d6e0175cSChristoph Hellwig cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 3540c2ad7d1SChristoph Hellwig cmd->execute_cmd = ops->execute_rw; 355d6e0175cSChristoph Hellwig break; 356d6e0175cSChristoph Hellwig case READ_10: 357d6e0175cSChristoph Hellwig sectors = transport_get_sectors_10(cdb); 358d6e0175cSChristoph Hellwig cmd->t_task_lba = transport_lba_32(cdb); 359d6e0175cSChristoph Hellwig cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 3600c2ad7d1SChristoph Hellwig cmd->execute_cmd = ops->execute_rw; 361d6e0175cSChristoph Hellwig break; 362d6e0175cSChristoph Hellwig case READ_12: 363d6e0175cSChristoph Hellwig sectors = transport_get_sectors_12(cdb); 364d6e0175cSChristoph Hellwig cmd->t_task_lba = transport_lba_32(cdb); 365d6e0175cSChristoph Hellwig cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 3660c2ad7d1SChristoph Hellwig cmd->execute_cmd = ops->execute_rw; 367d6e0175cSChristoph Hellwig break; 368d6e0175cSChristoph Hellwig case READ_16: 369d6e0175cSChristoph Hellwig sectors = transport_get_sectors_16(cdb); 370d6e0175cSChristoph Hellwig cmd->t_task_lba = transport_lba_64(cdb); 371d6e0175cSChristoph Hellwig cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 3720c2ad7d1SChristoph Hellwig cmd->execute_cmd = ops->execute_rw; 373d6e0175cSChristoph Hellwig break; 374d6e0175cSChristoph Hellwig case WRITE_6: 375d6e0175cSChristoph Hellwig sectors = transport_get_sectors_6(cdb); 376d6e0175cSChristoph Hellwig cmd->t_task_lba = transport_lba_21(cdb); 377d6e0175cSChristoph Hellwig cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 3780c2ad7d1SChristoph Hellwig cmd->execute_cmd = ops->execute_rw; 379d6e0175cSChristoph Hellwig break; 380d6e0175cSChristoph Hellwig case WRITE_10: 381d6e0175cSChristoph Hellwig case WRITE_VERIFY: 382d6e0175cSChristoph Hellwig sectors = transport_get_sectors_10(cdb); 383d6e0175cSChristoph Hellwig cmd->t_task_lba = transport_lba_32(cdb); 384d6e0175cSChristoph Hellwig if (cdb[1] & 0x8) 385d6e0175cSChristoph Hellwig cmd->se_cmd_flags |= SCF_FUA; 386d6e0175cSChristoph Hellwig cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 3870c2ad7d1SChristoph Hellwig cmd->execute_cmd = ops->execute_rw; 388d6e0175cSChristoph Hellwig break; 389d6e0175cSChristoph Hellwig case WRITE_12: 390d6e0175cSChristoph Hellwig sectors = transport_get_sectors_12(cdb); 391d6e0175cSChristoph Hellwig cmd->t_task_lba = transport_lba_32(cdb); 392d6e0175cSChristoph Hellwig if (cdb[1] & 0x8) 393d6e0175cSChristoph Hellwig cmd->se_cmd_flags |= SCF_FUA; 394d6e0175cSChristoph Hellwig cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 3950c2ad7d1SChristoph Hellwig cmd->execute_cmd = ops->execute_rw; 396d6e0175cSChristoph Hellwig break; 397d6e0175cSChristoph Hellwig case WRITE_16: 398d6e0175cSChristoph Hellwig sectors = transport_get_sectors_16(cdb); 399d6e0175cSChristoph Hellwig cmd->t_task_lba = transport_lba_64(cdb); 400d6e0175cSChristoph Hellwig if (cdb[1] & 0x8) 401d6e0175cSChristoph Hellwig cmd->se_cmd_flags |= SCF_FUA; 402d6e0175cSChristoph Hellwig cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 4030c2ad7d1SChristoph Hellwig cmd->execute_cmd = ops->execute_rw; 404d6e0175cSChristoph Hellwig break; 405d6e0175cSChristoph Hellwig case XDWRITEREAD_10: 406de103c93SChristoph Hellwig if (cmd->data_direction != DMA_TO_DEVICE || 407d6e0175cSChristoph Hellwig !(cmd->se_cmd_flags & SCF_BIDI)) 408de103c93SChristoph Hellwig return TCM_INVALID_CDB_FIELD; 409d6e0175cSChristoph Hellwig sectors = transport_get_sectors_10(cdb); 410d6e0175cSChristoph Hellwig 411d6e0175cSChristoph Hellwig cmd->t_task_lba = transport_lba_32(cdb); 412d6e0175cSChristoph Hellwig cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 413d6e0175cSChristoph Hellwig 414d6e0175cSChristoph Hellwig /* 415d6e0175cSChristoph Hellwig * Setup BIDI XOR callback to be run after I/O completion. 416d6e0175cSChristoph Hellwig */ 4170c2ad7d1SChristoph Hellwig cmd->execute_cmd = ops->execute_rw; 418d6e0175cSChristoph Hellwig cmd->transport_complete_callback = &xdreadwrite_callback; 419d6e0175cSChristoph Hellwig if (cdb[1] & 0x8) 420d6e0175cSChristoph Hellwig cmd->se_cmd_flags |= SCF_FUA; 421d6e0175cSChristoph Hellwig break; 422d6e0175cSChristoph Hellwig case VARIABLE_LENGTH_CMD: 423d6e0175cSChristoph Hellwig { 424d6e0175cSChristoph Hellwig u16 service_action = get_unaligned_be16(&cdb[8]); 425d6e0175cSChristoph Hellwig switch (service_action) { 426d6e0175cSChristoph Hellwig case XDWRITEREAD_32: 427d6e0175cSChristoph Hellwig sectors = transport_get_sectors_32(cdb); 428d6e0175cSChristoph Hellwig 429d6e0175cSChristoph Hellwig /* 430d6e0175cSChristoph Hellwig * Use WRITE_32 and READ_32 opcodes for the emulated 431d6e0175cSChristoph Hellwig * XDWRITE_READ_32 logic. 432d6e0175cSChristoph Hellwig */ 433d6e0175cSChristoph Hellwig cmd->t_task_lba = transport_lba_64_ext(cdb); 434d6e0175cSChristoph Hellwig cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 435d6e0175cSChristoph Hellwig 436d6e0175cSChristoph Hellwig /* 437d6e0175cSChristoph Hellwig * Setup BIDI XOR callback to be run during after I/O 438d6e0175cSChristoph Hellwig * completion. 439d6e0175cSChristoph Hellwig */ 4400c2ad7d1SChristoph Hellwig cmd->execute_cmd = ops->execute_rw; 441d6e0175cSChristoph Hellwig cmd->transport_complete_callback = &xdreadwrite_callback; 442d6e0175cSChristoph Hellwig if (cdb[1] & 0x8) 443d6e0175cSChristoph Hellwig cmd->se_cmd_flags |= SCF_FUA; 444d6e0175cSChristoph Hellwig break; 445d6e0175cSChristoph Hellwig case WRITE_SAME_32: 446d6e0175cSChristoph Hellwig sectors = transport_get_sectors_32(cdb); 447d6e0175cSChristoph Hellwig if (!sectors) { 448d6e0175cSChristoph Hellwig pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not" 449d6e0175cSChristoph Hellwig " supported\n"); 450de103c93SChristoph Hellwig return TCM_INVALID_CDB_FIELD; 451d6e0175cSChristoph Hellwig } 452d6e0175cSChristoph Hellwig 4531fd032eeSChristoph Hellwig size = sbc_get_size(cmd, 1); 454d6e0175cSChristoph Hellwig cmd->t_task_lba = get_unaligned_be64(&cdb[12]); 455d6e0175cSChristoph Hellwig 456cd063befSNicholas Bellinger ret = sbc_setup_write_same(cmd, &cdb[10], ops); 4576b64e1feSDan Carpenter if (ret) 458cd063befSNicholas Bellinger return ret; 459d6e0175cSChristoph Hellwig break; 460d6e0175cSChristoph Hellwig default: 461d6e0175cSChristoph Hellwig pr_err("VARIABLE_LENGTH_CMD service action" 462d6e0175cSChristoph Hellwig " 0x%04x not supported\n", service_action); 463de103c93SChristoph Hellwig return TCM_UNSUPPORTED_SCSI_OPCODE; 464d6e0175cSChristoph Hellwig } 465d6e0175cSChristoph Hellwig break; 466d6e0175cSChristoph Hellwig } 467d6e0175cSChristoph Hellwig case READ_CAPACITY: 4681fd032eeSChristoph Hellwig size = READ_CAP_LEN; 4691fd032eeSChristoph Hellwig cmd->execute_cmd = sbc_emulate_readcapacity; 470d6e0175cSChristoph Hellwig break; 471d6e0175cSChristoph Hellwig case SERVICE_ACTION_IN: 472d6e0175cSChristoph Hellwig switch (cmd->t_task_cdb[1] & 0x1f) { 473d6e0175cSChristoph Hellwig case SAI_READ_CAPACITY_16: 4741fd032eeSChristoph Hellwig cmd->execute_cmd = sbc_emulate_readcapacity_16; 475d6e0175cSChristoph Hellwig break; 476d6e0175cSChristoph Hellwig default: 477d6e0175cSChristoph Hellwig pr_err("Unsupported SA: 0x%02x\n", 478d6e0175cSChristoph Hellwig cmd->t_task_cdb[1] & 0x1f); 479de103c93SChristoph Hellwig return TCM_INVALID_CDB_FIELD; 480d6e0175cSChristoph Hellwig } 4811fd032eeSChristoph Hellwig size = (cdb[10] << 24) | (cdb[11] << 16) | 482d6e0175cSChristoph Hellwig (cdb[12] << 8) | cdb[13]; 483d6e0175cSChristoph Hellwig break; 484d6e0175cSChristoph Hellwig case SYNCHRONIZE_CACHE: 485d6e0175cSChristoph Hellwig case SYNCHRONIZE_CACHE_16: 486882e3f8eSHannes Reinecke if (!ops->execute_sync_cache) { 487882e3f8eSHannes Reinecke size = 0; 488882e3f8eSHannes Reinecke cmd->execute_cmd = sbc_emulate_noop; 489882e3f8eSHannes Reinecke break; 490882e3f8eSHannes Reinecke } 491ad67f0d9SChristoph Hellwig 492d6e0175cSChristoph Hellwig /* 493d6e0175cSChristoph Hellwig * Extract LBA and range to be flushed for emulated SYNCHRONIZE_CACHE 494d6e0175cSChristoph Hellwig */ 495d6e0175cSChristoph Hellwig if (cdb[0] == SYNCHRONIZE_CACHE) { 496d6e0175cSChristoph Hellwig sectors = transport_get_sectors_10(cdb); 497d6e0175cSChristoph Hellwig cmd->t_task_lba = transport_lba_32(cdb); 498d6e0175cSChristoph Hellwig } else { 499d6e0175cSChristoph Hellwig sectors = transport_get_sectors_16(cdb); 500d6e0175cSChristoph Hellwig cmd->t_task_lba = transport_lba_64(cdb); 501d6e0175cSChristoph Hellwig } 502d6e0175cSChristoph Hellwig 5031fd032eeSChristoph Hellwig size = sbc_get_size(cmd, sectors); 504d6e0175cSChristoph Hellwig 505d6e0175cSChristoph Hellwig /* 506d6e0175cSChristoph Hellwig * Check to ensure that LBA + Range does not exceed past end of 507d6e0175cSChristoph Hellwig * device for IBLOCK and FILEIO ->do_sync_cache() backend calls 508d6e0175cSChristoph Hellwig */ 509d6e0175cSChristoph Hellwig if (cmd->t_task_lba || sectors) { 510d6e0175cSChristoph Hellwig if (sbc_check_valid_sectors(cmd) < 0) 51133633676SRoland Dreier return TCM_ADDRESS_OUT_OF_RANGE; 512d6e0175cSChristoph Hellwig } 513ad67f0d9SChristoph Hellwig cmd->execute_cmd = ops->execute_sync_cache; 514d6e0175cSChristoph Hellwig break; 515d6e0175cSChristoph Hellwig case UNMAP: 51614150a6bSChristoph Hellwig if (!ops->execute_unmap) 517de103c93SChristoph Hellwig return TCM_UNSUPPORTED_SCSI_OPCODE; 51814150a6bSChristoph Hellwig 5191fd032eeSChristoph Hellwig size = get_unaligned_be16(&cdb[7]); 52014150a6bSChristoph Hellwig cmd->execute_cmd = ops->execute_unmap; 521d6e0175cSChristoph Hellwig break; 522d6e0175cSChristoph Hellwig case WRITE_SAME_16: 523d6e0175cSChristoph Hellwig sectors = transport_get_sectors_16(cdb); 524d6e0175cSChristoph Hellwig if (!sectors) { 525d6e0175cSChristoph Hellwig pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n"); 526de103c93SChristoph Hellwig return TCM_INVALID_CDB_FIELD; 527d6e0175cSChristoph Hellwig } 528d6e0175cSChristoph Hellwig 5291fd032eeSChristoph Hellwig size = sbc_get_size(cmd, 1); 530d6e0175cSChristoph Hellwig cmd->t_task_lba = get_unaligned_be64(&cdb[2]); 531d6e0175cSChristoph Hellwig 532cd063befSNicholas Bellinger ret = sbc_setup_write_same(cmd, &cdb[1], ops); 5336b64e1feSDan Carpenter if (ret) 534cd063befSNicholas Bellinger return ret; 535d6e0175cSChristoph Hellwig break; 536d6e0175cSChristoph Hellwig case WRITE_SAME: 537d6e0175cSChristoph Hellwig sectors = transport_get_sectors_10(cdb); 538d6e0175cSChristoph Hellwig if (!sectors) { 539d6e0175cSChristoph Hellwig pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n"); 540de103c93SChristoph Hellwig return TCM_INVALID_CDB_FIELD; 541d6e0175cSChristoph Hellwig } 542d6e0175cSChristoph Hellwig 5431fd032eeSChristoph Hellwig size = sbc_get_size(cmd, 1); 544d6e0175cSChristoph Hellwig cmd->t_task_lba = get_unaligned_be32(&cdb[2]); 545d6e0175cSChristoph Hellwig 546d6e0175cSChristoph Hellwig /* 547d6e0175cSChristoph Hellwig * Follow sbcr26 with WRITE_SAME (10) and check for the existence 548d6e0175cSChristoph Hellwig * of byte 1 bit 3 UNMAP instead of original reserved field 549d6e0175cSChristoph Hellwig */ 550cd063befSNicholas Bellinger ret = sbc_setup_write_same(cmd, &cdb[1], ops); 5516b64e1feSDan Carpenter if (ret) 552cd063befSNicholas Bellinger return ret; 553d6e0175cSChristoph Hellwig break; 554d6e0175cSChristoph Hellwig case VERIFY: 5551fd032eeSChristoph Hellwig size = 0; 5561920ed61SNicholas Bellinger cmd->execute_cmd = sbc_emulate_noop; 557d6e0175cSChristoph Hellwig break; 5581a1ff38cSBernhard Kohl case REZERO_UNIT: 5591a1ff38cSBernhard Kohl case SEEK_6: 5601a1ff38cSBernhard Kohl case SEEK_10: 5611a1ff38cSBernhard Kohl /* 5621a1ff38cSBernhard Kohl * There are still clients out there which use these old SCSI-2 5631a1ff38cSBernhard Kohl * commands. This mainly happens when running VMs with legacy 5641a1ff38cSBernhard Kohl * guest systems, connected via SCSI command pass-through to 5651a1ff38cSBernhard Kohl * iSCSI targets. Make them happy and return status GOOD. 5661a1ff38cSBernhard Kohl */ 5671a1ff38cSBernhard Kohl size = 0; 5681a1ff38cSBernhard Kohl cmd->execute_cmd = sbc_emulate_noop; 5691a1ff38cSBernhard Kohl break; 570d6e0175cSChristoph Hellwig default: 5711fd032eeSChristoph Hellwig ret = spc_parse_cdb(cmd, &size); 572d6e0175cSChristoph Hellwig if (ret) 573d6e0175cSChristoph Hellwig return ret; 574d6e0175cSChristoph Hellwig } 575d6e0175cSChristoph Hellwig 576d6e0175cSChristoph Hellwig /* reject any command that we don't have a handler for */ 577d6e0175cSChristoph Hellwig if (!(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) && !cmd->execute_cmd) 578de103c93SChristoph Hellwig return TCM_UNSUPPORTED_SCSI_OPCODE; 579d6e0175cSChristoph Hellwig 580d6e0175cSChristoph Hellwig if (cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) { 5811fd032eeSChristoph Hellwig unsigned long long end_lba; 5821fd032eeSChristoph Hellwig 5830fd97ccfSChristoph Hellwig if (sectors > dev->dev_attrib.fabric_max_sectors) { 584d6e0175cSChristoph Hellwig printk_ratelimited(KERN_ERR "SCSI OP %02xh with too" 585d6e0175cSChristoph Hellwig " big sectors %u exceeds fabric_max_sectors:" 586d6e0175cSChristoph Hellwig " %u\n", cdb[0], sectors, 5870fd97ccfSChristoph Hellwig dev->dev_attrib.fabric_max_sectors); 588de103c93SChristoph Hellwig return TCM_INVALID_CDB_FIELD; 589d6e0175cSChristoph Hellwig } 5900fd97ccfSChristoph Hellwig if (sectors > dev->dev_attrib.hw_max_sectors) { 591d6e0175cSChristoph Hellwig printk_ratelimited(KERN_ERR "SCSI OP %02xh with too" 592d6e0175cSChristoph Hellwig " big sectors %u exceeds backend hw_max_sectors:" 593d6e0175cSChristoph Hellwig " %u\n", cdb[0], sectors, 5940fd97ccfSChristoph Hellwig dev->dev_attrib.hw_max_sectors); 595de103c93SChristoph Hellwig return TCM_INVALID_CDB_FIELD; 596d6e0175cSChristoph Hellwig } 597d6e0175cSChristoph Hellwig 5981fd032eeSChristoph Hellwig end_lba = dev->transport->get_blocks(dev) + 1; 5991fd032eeSChristoph Hellwig if (cmd->t_task_lba + sectors > end_lba) { 6001fd032eeSChristoph Hellwig pr_err("cmd exceeds last lba %llu " 6011fd032eeSChristoph Hellwig "(lba %llu, sectors %u)\n", 6021fd032eeSChristoph Hellwig end_lba, cmd->t_task_lba, sectors); 60309ceadc7SRoland Dreier return TCM_ADDRESS_OUT_OF_RANGE; 604d6e0175cSChristoph Hellwig } 605d6e0175cSChristoph Hellwig 6061fd032eeSChristoph Hellwig size = sbc_get_size(cmd, sectors); 6071fd032eeSChristoph Hellwig } 6081fd032eeSChristoph Hellwig 609de103c93SChristoph Hellwig return target_cmd_size_check(cmd, size); 610d6e0175cSChristoph Hellwig } 611d6e0175cSChristoph Hellwig EXPORT_SYMBOL(sbc_parse_cdb); 6126f23ac8aSChristoph Hellwig 6136f23ac8aSChristoph Hellwig u32 sbc_get_device_type(struct se_device *dev) 6146f23ac8aSChristoph Hellwig { 6156f23ac8aSChristoph Hellwig return TYPE_DISK; 6166f23ac8aSChristoph Hellwig } 6176f23ac8aSChristoph Hellwig EXPORT_SYMBOL(sbc_get_device_type); 61886d71829SAsias He 61986d71829SAsias He sense_reason_t 62086d71829SAsias He sbc_execute_unmap(struct se_cmd *cmd, 62186d71829SAsias He sense_reason_t (*do_unmap_fn)(struct se_cmd *, void *, 62286d71829SAsias He sector_t, sector_t), 62386d71829SAsias He void *priv) 62486d71829SAsias He { 62586d71829SAsias He struct se_device *dev = cmd->se_dev; 62686d71829SAsias He unsigned char *buf, *ptr = NULL; 62786d71829SAsias He sector_t lba; 62886d71829SAsias He int size; 62986d71829SAsias He u32 range; 63086d71829SAsias He sense_reason_t ret = 0; 63186d71829SAsias He int dl, bd_dl; 63286d71829SAsias He 63386d71829SAsias He /* We never set ANC_SUP */ 63486d71829SAsias He if (cmd->t_task_cdb[1]) 63586d71829SAsias He return TCM_INVALID_CDB_FIELD; 63686d71829SAsias He 63786d71829SAsias He if (cmd->data_length == 0) { 63886d71829SAsias He target_complete_cmd(cmd, SAM_STAT_GOOD); 63986d71829SAsias He return 0; 64086d71829SAsias He } 64186d71829SAsias He 64286d71829SAsias He if (cmd->data_length < 8) { 64386d71829SAsias He pr_warn("UNMAP parameter list length %u too small\n", 64486d71829SAsias He cmd->data_length); 64586d71829SAsias He return TCM_PARAMETER_LIST_LENGTH_ERROR; 64686d71829SAsias He } 64786d71829SAsias He 64886d71829SAsias He buf = transport_kmap_data_sg(cmd); 64986d71829SAsias He if (!buf) 65086d71829SAsias He return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 65186d71829SAsias He 65286d71829SAsias He dl = get_unaligned_be16(&buf[0]); 65386d71829SAsias He bd_dl = get_unaligned_be16(&buf[2]); 65486d71829SAsias He 65586d71829SAsias He size = cmd->data_length - 8; 65686d71829SAsias He if (bd_dl > size) 65786d71829SAsias He pr_warn("UNMAP parameter list length %u too small, ignoring bd_dl %u\n", 65886d71829SAsias He cmd->data_length, bd_dl); 65986d71829SAsias He else 66086d71829SAsias He size = bd_dl; 66186d71829SAsias He 66286d71829SAsias He if (size / 16 > dev->dev_attrib.max_unmap_block_desc_count) { 66386d71829SAsias He ret = TCM_INVALID_PARAMETER_LIST; 66486d71829SAsias He goto err; 66586d71829SAsias He } 66686d71829SAsias He 66786d71829SAsias He /* First UNMAP block descriptor starts at 8 byte offset */ 66886d71829SAsias He ptr = &buf[8]; 66986d71829SAsias He pr_debug("UNMAP: Sub: %s Using dl: %u bd_dl: %u size: %u" 67086d71829SAsias He " ptr: %p\n", dev->transport->name, dl, bd_dl, size, ptr); 67186d71829SAsias He 67286d71829SAsias He while (size >= 16) { 67386d71829SAsias He lba = get_unaligned_be64(&ptr[0]); 67486d71829SAsias He range = get_unaligned_be32(&ptr[8]); 67586d71829SAsias He pr_debug("UNMAP: Using lba: %llu and range: %u\n", 67686d71829SAsias He (unsigned long long)lba, range); 67786d71829SAsias He 67886d71829SAsias He if (range > dev->dev_attrib.max_unmap_lba_count) { 67986d71829SAsias He ret = TCM_INVALID_PARAMETER_LIST; 68086d71829SAsias He goto err; 68186d71829SAsias He } 68286d71829SAsias He 68386d71829SAsias He if (lba + range > dev->transport->get_blocks(dev) + 1) { 68486d71829SAsias He ret = TCM_ADDRESS_OUT_OF_RANGE; 68586d71829SAsias He goto err; 68686d71829SAsias He } 68786d71829SAsias He 68886d71829SAsias He ret = do_unmap_fn(cmd, priv, lba, range); 68986d71829SAsias He if (ret) 69086d71829SAsias He goto err; 69186d71829SAsias He 69286d71829SAsias He ptr += 16; 69386d71829SAsias He size -= 16; 69486d71829SAsias He } 69586d71829SAsias He 69686d71829SAsias He err: 69786d71829SAsias He transport_kunmap_data_sg(cmd); 69886d71829SAsias He if (!ret) 69986d71829SAsias He target_complete_cmd(cmd, GOOD); 70086d71829SAsias He return ret; 70186d71829SAsias He } 70286d71829SAsias He EXPORT_SYMBOL(sbc_execute_unmap); 703