1d6e0175cSChristoph Hellwig /*
2d6e0175cSChristoph Hellwig  * SCSI Block Commands (SBC) parsing and emulation.
3d6e0175cSChristoph Hellwig  *
44c76251eSNicholas Bellinger  * (c) Copyright 2002-2013 Datera, Inc.
5d6e0175cSChristoph Hellwig  *
6d6e0175cSChristoph Hellwig  * Nicholas A. Bellinger <nab@kernel.org>
7d6e0175cSChristoph Hellwig  *
8d6e0175cSChristoph Hellwig  * This program is free software; you can redistribute it and/or modify
9d6e0175cSChristoph Hellwig  * it under the terms of the GNU General Public License as published by
10d6e0175cSChristoph Hellwig  * the Free Software Foundation; either version 2 of the License, or
11d6e0175cSChristoph Hellwig  * (at your option) any later version.
12d6e0175cSChristoph Hellwig  *
13d6e0175cSChristoph Hellwig  * This program is distributed in the hope that it will be useful,
14d6e0175cSChristoph Hellwig  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15d6e0175cSChristoph Hellwig  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16d6e0175cSChristoph Hellwig  * GNU General Public License for more details.
17d6e0175cSChristoph Hellwig  *
18d6e0175cSChristoph Hellwig  * You should have received a copy of the GNU General Public License
19d6e0175cSChristoph Hellwig  * along with this program; if not, write to the Free Software
20d6e0175cSChristoph Hellwig  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
21d6e0175cSChristoph Hellwig  */
22d6e0175cSChristoph Hellwig 
23d6e0175cSChristoph Hellwig #include <linux/kernel.h>
24d6e0175cSChristoph Hellwig #include <linux/module.h>
25d6e0175cSChristoph Hellwig #include <linux/ratelimit.h>
2641861fa8SNicholas Bellinger #include <linux/crc-t10dif.h>
27d6e0175cSChristoph Hellwig #include <asm/unaligned.h>
28d6e0175cSChristoph Hellwig #include <scsi/scsi.h>
2968ff9b9bSNicholas Bellinger #include <scsi/scsi_tcq.h>
30d6e0175cSChristoph Hellwig 
31d6e0175cSChristoph Hellwig #include <target/target_core_base.h>
32d6e0175cSChristoph Hellwig #include <target/target_core_backend.h>
33d6e0175cSChristoph Hellwig #include <target/target_core_fabric.h>
34d6e0175cSChristoph Hellwig 
35d6e0175cSChristoph Hellwig #include "target_core_internal.h"
36d6e0175cSChristoph Hellwig #include "target_core_ua.h"
37c66094bfSHannes Reinecke #include "target_core_alua.h"
38d6e0175cSChristoph Hellwig 
39de103c93SChristoph Hellwig static sense_reason_t
40de103c93SChristoph Hellwig sbc_emulate_readcapacity(struct se_cmd *cmd)
411fd032eeSChristoph Hellwig {
421fd032eeSChristoph Hellwig 	struct se_device *dev = cmd->se_dev;
438dc8632aSRoland Dreier 	unsigned char *cdb = cmd->t_task_cdb;
441fd032eeSChristoph Hellwig 	unsigned long long blocks_long = dev->transport->get_blocks(dev);
45a50da144SPaolo Bonzini 	unsigned char *rbuf;
46a50da144SPaolo Bonzini 	unsigned char buf[8];
471fd032eeSChristoph Hellwig 	u32 blocks;
481fd032eeSChristoph Hellwig 
498dc8632aSRoland Dreier 	/*
508dc8632aSRoland Dreier 	 * SBC-2 says:
518dc8632aSRoland Dreier 	 *   If the PMI bit is set to zero and the LOGICAL BLOCK
528dc8632aSRoland Dreier 	 *   ADDRESS field is not set to zero, the device server shall
538dc8632aSRoland Dreier 	 *   terminate the command with CHECK CONDITION status with
548dc8632aSRoland Dreier 	 *   the sense key set to ILLEGAL REQUEST and the additional
558dc8632aSRoland Dreier 	 *   sense code set to INVALID FIELD IN CDB.
568dc8632aSRoland Dreier 	 *
578dc8632aSRoland Dreier 	 * In SBC-3, these fields are obsolete, but some SCSI
588dc8632aSRoland Dreier 	 * compliance tests actually check this, so we might as well
598dc8632aSRoland Dreier 	 * follow SBC-2.
608dc8632aSRoland Dreier 	 */
618dc8632aSRoland Dreier 	if (!(cdb[8] & 1) && !!(cdb[2] | cdb[3] | cdb[4] | cdb[5]))
628dc8632aSRoland Dreier 		return TCM_INVALID_CDB_FIELD;
638dc8632aSRoland Dreier 
641fd032eeSChristoph Hellwig 	if (blocks_long >= 0x00000000ffffffff)
651fd032eeSChristoph Hellwig 		blocks = 0xffffffff;
661fd032eeSChristoph Hellwig 	else
671fd032eeSChristoph Hellwig 		blocks = (u32)blocks_long;
681fd032eeSChristoph Hellwig 
691fd032eeSChristoph Hellwig 	buf[0] = (blocks >> 24) & 0xff;
701fd032eeSChristoph Hellwig 	buf[1] = (blocks >> 16) & 0xff;
711fd032eeSChristoph Hellwig 	buf[2] = (blocks >> 8) & 0xff;
721fd032eeSChristoph Hellwig 	buf[3] = blocks & 0xff;
730fd97ccfSChristoph Hellwig 	buf[4] = (dev->dev_attrib.block_size >> 24) & 0xff;
740fd97ccfSChristoph Hellwig 	buf[5] = (dev->dev_attrib.block_size >> 16) & 0xff;
750fd97ccfSChristoph Hellwig 	buf[6] = (dev->dev_attrib.block_size >> 8) & 0xff;
760fd97ccfSChristoph Hellwig 	buf[7] = dev->dev_attrib.block_size & 0xff;
771fd032eeSChristoph Hellwig 
78a50da144SPaolo Bonzini 	rbuf = transport_kmap_data_sg(cmd);
798b4b0dcbSNicholas Bellinger 	if (rbuf) {
80a50da144SPaolo Bonzini 		memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length));
811fd032eeSChristoph Hellwig 		transport_kunmap_data_sg(cmd);
828b4b0dcbSNicholas Bellinger 	}
831fd032eeSChristoph Hellwig 
841fd032eeSChristoph Hellwig 	target_complete_cmd(cmd, GOOD);
851fd032eeSChristoph Hellwig 	return 0;
861fd032eeSChristoph Hellwig }
871fd032eeSChristoph Hellwig 
88de103c93SChristoph Hellwig static sense_reason_t
89de103c93SChristoph Hellwig sbc_emulate_readcapacity_16(struct se_cmd *cmd)
901fd032eeSChristoph Hellwig {
911fd032eeSChristoph Hellwig 	struct se_device *dev = cmd->se_dev;
922d335983SNicholas Bellinger 	struct se_session *sess = cmd->se_sess;
93a50da144SPaolo Bonzini 	unsigned char *rbuf;
94a50da144SPaolo Bonzini 	unsigned char buf[32];
951fd032eeSChristoph Hellwig 	unsigned long long blocks = dev->transport->get_blocks(dev);
961fd032eeSChristoph Hellwig 
97a50da144SPaolo Bonzini 	memset(buf, 0, sizeof(buf));
981fd032eeSChristoph Hellwig 	buf[0] = (blocks >> 56) & 0xff;
991fd032eeSChristoph Hellwig 	buf[1] = (blocks >> 48) & 0xff;
1001fd032eeSChristoph Hellwig 	buf[2] = (blocks >> 40) & 0xff;
1011fd032eeSChristoph Hellwig 	buf[3] = (blocks >> 32) & 0xff;
1021fd032eeSChristoph Hellwig 	buf[4] = (blocks >> 24) & 0xff;
1031fd032eeSChristoph Hellwig 	buf[5] = (blocks >> 16) & 0xff;
1041fd032eeSChristoph Hellwig 	buf[6] = (blocks >> 8) & 0xff;
1051fd032eeSChristoph Hellwig 	buf[7] = blocks & 0xff;
1060fd97ccfSChristoph Hellwig 	buf[8] = (dev->dev_attrib.block_size >> 24) & 0xff;
1070fd97ccfSChristoph Hellwig 	buf[9] = (dev->dev_attrib.block_size >> 16) & 0xff;
1080fd97ccfSChristoph Hellwig 	buf[10] = (dev->dev_attrib.block_size >> 8) & 0xff;
1090fd97ccfSChristoph Hellwig 	buf[11] = dev->dev_attrib.block_size & 0xff;
11056dac14cSNicholas Bellinger 	/*
11156dac14cSNicholas Bellinger 	 * Set P_TYPE and PROT_EN bits for DIF support
11256dac14cSNicholas Bellinger 	 */
1132d335983SNicholas Bellinger 	if (sess->sup_prot_ops & (TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS)) {
11456dac14cSNicholas Bellinger 		if (dev->dev_attrib.pi_prot_type)
11556dac14cSNicholas Bellinger 			buf[12] = (dev->dev_attrib.pi_prot_type - 1) << 1 | 0x1;
1162d335983SNicholas Bellinger 	}
1177f7caf6aSAndy Grover 
1187f7caf6aSAndy Grover 	if (dev->transport->get_lbppbe)
1197f7caf6aSAndy Grover 		buf[13] = dev->transport->get_lbppbe(dev) & 0x0f;
1207f7caf6aSAndy Grover 
1217f7caf6aSAndy Grover 	if (dev->transport->get_alignment_offset_lbas) {
1227f7caf6aSAndy Grover 		u16 lalba = dev->transport->get_alignment_offset_lbas(dev);
1237f7caf6aSAndy Grover 		buf[14] = (lalba >> 8) & 0x3f;
1247f7caf6aSAndy Grover 		buf[15] = lalba & 0xff;
1257f7caf6aSAndy Grover 	}
1267f7caf6aSAndy Grover 
1271fd032eeSChristoph Hellwig 	/*
1281fd032eeSChristoph Hellwig 	 * Set Thin Provisioning Enable bit following sbc3r22 in section
1291fd032eeSChristoph Hellwig 	 * READ CAPACITY (16) byte 14 if emulate_tpu or emulate_tpws is enabled.
1301fd032eeSChristoph Hellwig 	 */
1310fd97ccfSChristoph Hellwig 	if (dev->dev_attrib.emulate_tpu || dev->dev_attrib.emulate_tpws)
1327f7caf6aSAndy Grover 		buf[14] |= 0x80;
1331fd032eeSChristoph Hellwig 
134a50da144SPaolo Bonzini 	rbuf = transport_kmap_data_sg(cmd);
1358b4b0dcbSNicholas Bellinger 	if (rbuf) {
136a50da144SPaolo Bonzini 		memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length));
1371fd032eeSChristoph Hellwig 		transport_kunmap_data_sg(cmd);
1388b4b0dcbSNicholas Bellinger 	}
1391fd032eeSChristoph Hellwig 
1401fd032eeSChristoph Hellwig 	target_complete_cmd(cmd, GOOD);
1411fd032eeSChristoph Hellwig 	return 0;
1421fd032eeSChristoph Hellwig }
1431fd032eeSChristoph Hellwig 
144972b29c8SRoland Dreier sector_t sbc_get_write_same_sectors(struct se_cmd *cmd)
1451fd032eeSChristoph Hellwig {
1461fd032eeSChristoph Hellwig 	u32 num_blocks;
1471fd032eeSChristoph Hellwig 
1481fd032eeSChristoph Hellwig 	if (cmd->t_task_cdb[0] == WRITE_SAME)
1491fd032eeSChristoph Hellwig 		num_blocks = get_unaligned_be16(&cmd->t_task_cdb[7]);
1501fd032eeSChristoph Hellwig 	else if (cmd->t_task_cdb[0] == WRITE_SAME_16)
1511fd032eeSChristoph Hellwig 		num_blocks = get_unaligned_be32(&cmd->t_task_cdb[10]);
1521fd032eeSChristoph Hellwig 	else /* WRITE_SAME_32 via VARIABLE_LENGTH_CMD */
1531fd032eeSChristoph Hellwig 		num_blocks = get_unaligned_be32(&cmd->t_task_cdb[28]);
1541fd032eeSChristoph Hellwig 
1551fd032eeSChristoph Hellwig 	/*
1561fd032eeSChristoph Hellwig 	 * Use the explicit range when non zero is supplied, otherwise calculate
1571fd032eeSChristoph Hellwig 	 * the remaining range based on ->get_blocks() - starting LBA.
1581fd032eeSChristoph Hellwig 	 */
1596f974e8cSChristoph Hellwig 	if (num_blocks)
1606f974e8cSChristoph Hellwig 		return num_blocks;
1611fd032eeSChristoph Hellwig 
1626f974e8cSChristoph Hellwig 	return cmd->se_dev->transport->get_blocks(cmd->se_dev) -
1636f974e8cSChristoph Hellwig 		cmd->t_task_lba + 1;
1641fd032eeSChristoph Hellwig }
165972b29c8SRoland Dreier EXPORT_SYMBOL(sbc_get_write_same_sectors);
1661fd032eeSChristoph Hellwig 
167de103c93SChristoph Hellwig static sense_reason_t
1681920ed61SNicholas Bellinger sbc_emulate_noop(struct se_cmd *cmd)
1691a1ff38cSBernhard Kohl {
1701a1ff38cSBernhard Kohl 	target_complete_cmd(cmd, GOOD);
1711a1ff38cSBernhard Kohl 	return 0;
1721a1ff38cSBernhard Kohl }
1731a1ff38cSBernhard Kohl 
174d6e0175cSChristoph Hellwig static inline u32 sbc_get_size(struct se_cmd *cmd, u32 sectors)
175d6e0175cSChristoph Hellwig {
1760fd97ccfSChristoph Hellwig 	return cmd->se_dev->dev_attrib.block_size * sectors;
177d6e0175cSChristoph Hellwig }
178d6e0175cSChristoph Hellwig 
179d6e0175cSChristoph Hellwig static int sbc_check_valid_sectors(struct se_cmd *cmd)
180d6e0175cSChristoph Hellwig {
181d6e0175cSChristoph Hellwig 	struct se_device *dev = cmd->se_dev;
182d6e0175cSChristoph Hellwig 	unsigned long long end_lba;
183d6e0175cSChristoph Hellwig 	u32 sectors;
184d6e0175cSChristoph Hellwig 
1850fd97ccfSChristoph Hellwig 	sectors = cmd->data_length / dev->dev_attrib.block_size;
186d6e0175cSChristoph Hellwig 	end_lba = dev->transport->get_blocks(dev) + 1;
187d6e0175cSChristoph Hellwig 
188d6e0175cSChristoph Hellwig 	if (cmd->t_task_lba + sectors > end_lba) {
189d6e0175cSChristoph Hellwig 		pr_err("target: lba %llu, sectors %u exceeds end lba %llu\n",
190d6e0175cSChristoph Hellwig 			cmd->t_task_lba, sectors, end_lba);
191d6e0175cSChristoph Hellwig 		return -EINVAL;
192d6e0175cSChristoph Hellwig 	}
193d6e0175cSChristoph Hellwig 
194d6e0175cSChristoph Hellwig 	return 0;
195d6e0175cSChristoph Hellwig }
196d6e0175cSChristoph Hellwig 
197d6e0175cSChristoph Hellwig static inline u32 transport_get_sectors_6(unsigned char *cdb)
198d6e0175cSChristoph Hellwig {
199d6e0175cSChristoph Hellwig 	/*
200d6e0175cSChristoph Hellwig 	 * Use 8-bit sector value.  SBC-3 says:
201d6e0175cSChristoph Hellwig 	 *
202d6e0175cSChristoph Hellwig 	 *   A TRANSFER LENGTH field set to zero specifies that 256
203d6e0175cSChristoph Hellwig 	 *   logical blocks shall be written.  Any other value
204d6e0175cSChristoph Hellwig 	 *   specifies the number of logical blocks that shall be
205d6e0175cSChristoph Hellwig 	 *   written.
206d6e0175cSChristoph Hellwig 	 */
207d6e0175cSChristoph Hellwig 	return cdb[4] ? : 256;
208d6e0175cSChristoph Hellwig }
209d6e0175cSChristoph Hellwig 
210d6e0175cSChristoph Hellwig static inline u32 transport_get_sectors_10(unsigned char *cdb)
211d6e0175cSChristoph Hellwig {
212d6e0175cSChristoph Hellwig 	return (u32)(cdb[7] << 8) + cdb[8];
213d6e0175cSChristoph Hellwig }
214d6e0175cSChristoph Hellwig 
215d6e0175cSChristoph Hellwig static inline u32 transport_get_sectors_12(unsigned char *cdb)
216d6e0175cSChristoph Hellwig {
217d6e0175cSChristoph Hellwig 	return (u32)(cdb[6] << 24) + (cdb[7] << 16) + (cdb[8] << 8) + cdb[9];
218d6e0175cSChristoph Hellwig }
219d6e0175cSChristoph Hellwig 
220d6e0175cSChristoph Hellwig static inline u32 transport_get_sectors_16(unsigned char *cdb)
221d6e0175cSChristoph Hellwig {
222d6e0175cSChristoph Hellwig 	return (u32)(cdb[10] << 24) + (cdb[11] << 16) +
223d6e0175cSChristoph Hellwig 		    (cdb[12] << 8) + cdb[13];
224d6e0175cSChristoph Hellwig }
225d6e0175cSChristoph Hellwig 
226d6e0175cSChristoph Hellwig /*
227d6e0175cSChristoph Hellwig  * Used for VARIABLE_LENGTH_CDB WRITE_32 and READ_32 variants
228d6e0175cSChristoph Hellwig  */
229d6e0175cSChristoph Hellwig static inline u32 transport_get_sectors_32(unsigned char *cdb)
230d6e0175cSChristoph Hellwig {
231d6e0175cSChristoph Hellwig 	return (u32)(cdb[28] << 24) + (cdb[29] << 16) +
232d6e0175cSChristoph Hellwig 		    (cdb[30] << 8) + cdb[31];
233d6e0175cSChristoph Hellwig 
234d6e0175cSChristoph Hellwig }
235d6e0175cSChristoph Hellwig 
236d6e0175cSChristoph Hellwig static inline u32 transport_lba_21(unsigned char *cdb)
237d6e0175cSChristoph Hellwig {
238d6e0175cSChristoph Hellwig 	return ((cdb[1] & 0x1f) << 16) | (cdb[2] << 8) | cdb[3];
239d6e0175cSChristoph Hellwig }
240d6e0175cSChristoph Hellwig 
241d6e0175cSChristoph Hellwig static inline u32 transport_lba_32(unsigned char *cdb)
242d6e0175cSChristoph Hellwig {
243d6e0175cSChristoph Hellwig 	return (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5];
244d6e0175cSChristoph Hellwig }
245d6e0175cSChristoph Hellwig 
246d6e0175cSChristoph Hellwig static inline unsigned long long transport_lba_64(unsigned char *cdb)
247d6e0175cSChristoph Hellwig {
248d6e0175cSChristoph Hellwig 	unsigned int __v1, __v2;
249d6e0175cSChristoph Hellwig 
250d6e0175cSChristoph Hellwig 	__v1 = (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5];
251d6e0175cSChristoph Hellwig 	__v2 = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
252d6e0175cSChristoph Hellwig 
253d6e0175cSChristoph Hellwig 	return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32;
254d6e0175cSChristoph Hellwig }
255d6e0175cSChristoph Hellwig 
256d6e0175cSChristoph Hellwig /*
257d6e0175cSChristoph Hellwig  * For VARIABLE_LENGTH_CDB w/ 32 byte extended CDBs
258d6e0175cSChristoph Hellwig  */
259d6e0175cSChristoph Hellwig static inline unsigned long long transport_lba_64_ext(unsigned char *cdb)
260d6e0175cSChristoph Hellwig {
261d6e0175cSChristoph Hellwig 	unsigned int __v1, __v2;
262d6e0175cSChristoph Hellwig 
263d6e0175cSChristoph Hellwig 	__v1 = (cdb[12] << 24) | (cdb[13] << 16) | (cdb[14] << 8) | cdb[15];
264d6e0175cSChristoph Hellwig 	__v2 = (cdb[16] << 24) | (cdb[17] << 16) | (cdb[18] << 8) | cdb[19];
265d6e0175cSChristoph Hellwig 
266d6e0175cSChristoph Hellwig 	return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32;
267d6e0175cSChristoph Hellwig }
268d6e0175cSChristoph Hellwig 
269cd063befSNicholas Bellinger static sense_reason_t
270cd063befSNicholas Bellinger sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *ops)
271d6e0175cSChristoph Hellwig {
272972b29c8SRoland Dreier 	unsigned int sectors = sbc_get_write_same_sectors(cmd);
273773cbaf7SNicholas Bellinger 
274d6e0175cSChristoph Hellwig 	if ((flags[0] & 0x04) || (flags[0] & 0x02)) {
275d6e0175cSChristoph Hellwig 		pr_err("WRITE_SAME PBDATA and LBDATA"
276d6e0175cSChristoph Hellwig 			" bits not supported for Block Discard"
277d6e0175cSChristoph Hellwig 			" Emulation\n");
278cd063befSNicholas Bellinger 		return TCM_UNSUPPORTED_SCSI_OPCODE;
279d6e0175cSChristoph Hellwig 	}
280773cbaf7SNicholas Bellinger 	if (sectors > cmd->se_dev->dev_attrib.max_write_same_len) {
281773cbaf7SNicholas Bellinger 		pr_warn("WRITE_SAME sectors: %u exceeds max_write_same_len: %u\n",
282773cbaf7SNicholas Bellinger 			sectors, cmd->se_dev->dev_attrib.max_write_same_len);
283773cbaf7SNicholas Bellinger 		return TCM_INVALID_CDB_FIELD;
284773cbaf7SNicholas Bellinger 	}
2855cb770bfSRoland Dreier 	/* We always have ANC_SUP == 0 so setting ANCHOR is always an error */
2865cb770bfSRoland Dreier 	if (flags[0] & 0x10) {
2875cb770bfSRoland Dreier 		pr_warn("WRITE SAME with ANCHOR not supported\n");
2885cb770bfSRoland Dreier 		return TCM_INVALID_CDB_FIELD;
2895cb770bfSRoland Dreier 	}
290d6e0175cSChristoph Hellwig 	/*
291cd063befSNicholas Bellinger 	 * Special case for WRITE_SAME w/ UNMAP=1 that ends up getting
292cd063befSNicholas Bellinger 	 * translated into block discard requests within backend code.
293d6e0175cSChristoph Hellwig 	 */
294cd063befSNicholas Bellinger 	if (flags[0] & 0x08) {
295cd063befSNicholas Bellinger 		if (!ops->execute_write_same_unmap)
296cd063befSNicholas Bellinger 			return TCM_UNSUPPORTED_SCSI_OPCODE;
297d6e0175cSChristoph Hellwig 
298cd063befSNicholas Bellinger 		cmd->execute_cmd = ops->execute_write_same_unmap;
299cd063befSNicholas Bellinger 		return 0;
300cd063befSNicholas Bellinger 	}
301cd063befSNicholas Bellinger 	if (!ops->execute_write_same)
302cd063befSNicholas Bellinger 		return TCM_UNSUPPORTED_SCSI_OPCODE;
303cd063befSNicholas Bellinger 
304cd063befSNicholas Bellinger 	cmd->execute_cmd = ops->execute_write_same;
305d6e0175cSChristoph Hellwig 	return 0;
306d6e0175cSChristoph Hellwig }
307d6e0175cSChristoph Hellwig 
308a6b0133cSNicholas Bellinger static sense_reason_t xdreadwrite_callback(struct se_cmd *cmd)
309d6e0175cSChristoph Hellwig {
310d6e0175cSChristoph Hellwig 	unsigned char *buf, *addr;
311d6e0175cSChristoph Hellwig 	struct scatterlist *sg;
312d6e0175cSChristoph Hellwig 	unsigned int offset;
313a6b0133cSNicholas Bellinger 	sense_reason_t ret = TCM_NO_SENSE;
314a6b0133cSNicholas Bellinger 	int i, count;
315d6e0175cSChristoph Hellwig 	/*
316d6e0175cSChristoph Hellwig 	 * From sbc3r22.pdf section 5.48 XDWRITEREAD (10) command
317d6e0175cSChristoph Hellwig 	 *
318d6e0175cSChristoph Hellwig 	 * 1) read the specified logical block(s);
319d6e0175cSChristoph Hellwig 	 * 2) transfer logical blocks from the data-out buffer;
320d6e0175cSChristoph Hellwig 	 * 3) XOR the logical blocks transferred from the data-out buffer with
321d6e0175cSChristoph Hellwig 	 *    the logical blocks read, storing the resulting XOR data in a buffer;
322d6e0175cSChristoph Hellwig 	 * 4) if the DISABLE WRITE bit is set to zero, then write the logical
323d6e0175cSChristoph Hellwig 	 *    blocks transferred from the data-out buffer; and
324d6e0175cSChristoph Hellwig 	 * 5) transfer the resulting XOR data to the data-in buffer.
325d6e0175cSChristoph Hellwig 	 */
326d6e0175cSChristoph Hellwig 	buf = kmalloc(cmd->data_length, GFP_KERNEL);
327d6e0175cSChristoph Hellwig 	if (!buf) {
328d6e0175cSChristoph Hellwig 		pr_err("Unable to allocate xor_callback buf\n");
329a6b0133cSNicholas Bellinger 		return TCM_OUT_OF_RESOURCES;
330d6e0175cSChristoph Hellwig 	}
331d6e0175cSChristoph Hellwig 	/*
332d6e0175cSChristoph Hellwig 	 * Copy the scatterlist WRITE buffer located at cmd->t_data_sg
333d6e0175cSChristoph Hellwig 	 * into the locally allocated *buf
334d6e0175cSChristoph Hellwig 	 */
335d6e0175cSChristoph Hellwig 	sg_copy_to_buffer(cmd->t_data_sg,
336d6e0175cSChristoph Hellwig 			  cmd->t_data_nents,
337d6e0175cSChristoph Hellwig 			  buf,
338d6e0175cSChristoph Hellwig 			  cmd->data_length);
339d6e0175cSChristoph Hellwig 
340d6e0175cSChristoph Hellwig 	/*
341d6e0175cSChristoph Hellwig 	 * Now perform the XOR against the BIDI read memory located at
342d6e0175cSChristoph Hellwig 	 * cmd->t_mem_bidi_list
343d6e0175cSChristoph Hellwig 	 */
344d6e0175cSChristoph Hellwig 
345d6e0175cSChristoph Hellwig 	offset = 0;
346d6e0175cSChristoph Hellwig 	for_each_sg(cmd->t_bidi_data_sg, sg, cmd->t_bidi_data_nents, count) {
347d6e0175cSChristoph Hellwig 		addr = kmap_atomic(sg_page(sg));
348a6b0133cSNicholas Bellinger 		if (!addr) {
349a6b0133cSNicholas Bellinger 			ret = TCM_OUT_OF_RESOURCES;
350d6e0175cSChristoph Hellwig 			goto out;
351a6b0133cSNicholas Bellinger 		}
352d6e0175cSChristoph Hellwig 
353d6e0175cSChristoph Hellwig 		for (i = 0; i < sg->length; i++)
354d6e0175cSChristoph Hellwig 			*(addr + sg->offset + i) ^= *(buf + offset + i);
355d6e0175cSChristoph Hellwig 
356d6e0175cSChristoph Hellwig 		offset += sg->length;
357d6e0175cSChristoph Hellwig 		kunmap_atomic(addr);
358d6e0175cSChristoph Hellwig 	}
359d6e0175cSChristoph Hellwig 
360d6e0175cSChristoph Hellwig out:
361d6e0175cSChristoph Hellwig 	kfree(buf);
362a6b0133cSNicholas Bellinger 	return ret;
363d6e0175cSChristoph Hellwig }
364d6e0175cSChristoph Hellwig 
365a82a9538SNicholas Bellinger static sense_reason_t
366a82a9538SNicholas Bellinger sbc_execute_rw(struct se_cmd *cmd)
367a82a9538SNicholas Bellinger {
368a82a9538SNicholas Bellinger 	return cmd->execute_rw(cmd, cmd->t_data_sg, cmd->t_data_nents,
369a82a9538SNicholas Bellinger 			       cmd->data_direction);
370a82a9538SNicholas Bellinger }
371a82a9538SNicholas Bellinger 
37268ff9b9bSNicholas Bellinger static sense_reason_t compare_and_write_post(struct se_cmd *cmd)
37368ff9b9bSNicholas Bellinger {
37468ff9b9bSNicholas Bellinger 	struct se_device *dev = cmd->se_dev;
37568ff9b9bSNicholas Bellinger 
376d8855c15SNicholas Bellinger 	/*
377d8855c15SNicholas Bellinger 	 * Only set SCF_COMPARE_AND_WRITE_POST to force a response fall-through
378d8855c15SNicholas Bellinger 	 * within target_complete_ok_work() if the command was successfully
379d8855c15SNicholas Bellinger 	 * sent to the backend driver.
380d8855c15SNicholas Bellinger 	 */
381d8855c15SNicholas Bellinger 	spin_lock_irq(&cmd->t_state_lock);
382d8855c15SNicholas Bellinger 	if ((cmd->transport_state & CMD_T_SENT) && !cmd->scsi_status)
38368ff9b9bSNicholas Bellinger 		cmd->se_cmd_flags |= SCF_COMPARE_AND_WRITE_POST;
384d8855c15SNicholas Bellinger 	spin_unlock_irq(&cmd->t_state_lock);
385d8855c15SNicholas Bellinger 
38668ff9b9bSNicholas Bellinger 	/*
38768ff9b9bSNicholas Bellinger 	 * Unlock ->caw_sem originally obtained during sbc_compare_and_write()
38868ff9b9bSNicholas Bellinger 	 * before the original READ I/O submission.
38968ff9b9bSNicholas Bellinger 	 */
39068ff9b9bSNicholas Bellinger 	up(&dev->caw_sem);
39168ff9b9bSNicholas Bellinger 
39268ff9b9bSNicholas Bellinger 	return TCM_NO_SENSE;
39368ff9b9bSNicholas Bellinger }
39468ff9b9bSNicholas Bellinger 
39568ff9b9bSNicholas Bellinger static sense_reason_t compare_and_write_callback(struct se_cmd *cmd)
39668ff9b9bSNicholas Bellinger {
39768ff9b9bSNicholas Bellinger 	struct se_device *dev = cmd->se_dev;
39868ff9b9bSNicholas Bellinger 	struct scatterlist *write_sg = NULL, *sg;
399db60df88SNicholas Bellinger 	unsigned char *buf = NULL, *addr;
40068ff9b9bSNicholas Bellinger 	struct sg_mapping_iter m;
40168ff9b9bSNicholas Bellinger 	unsigned int offset = 0, len;
40268ff9b9bSNicholas Bellinger 	unsigned int nlbas = cmd->t_task_nolb;
40368ff9b9bSNicholas Bellinger 	unsigned int block_size = dev->dev_attrib.block_size;
40468ff9b9bSNicholas Bellinger 	unsigned int compare_len = (nlbas * block_size);
40568ff9b9bSNicholas Bellinger 	sense_reason_t ret = TCM_NO_SENSE;
40668ff9b9bSNicholas Bellinger 	int rc, i;
40768ff9b9bSNicholas Bellinger 
408cf6d1f09SNicholas Bellinger 	/*
409cf6d1f09SNicholas Bellinger 	 * Handle early failure in transport_generic_request_failure(),
410cf6d1f09SNicholas Bellinger 	 * which will not have taken ->caw_mutex yet..
411cf6d1f09SNicholas Bellinger 	 */
412cf6d1f09SNicholas Bellinger 	if (!cmd->t_data_sg || !cmd->t_bidi_data_sg)
413cf6d1f09SNicholas Bellinger 		return TCM_NO_SENSE;
414db60df88SNicholas Bellinger 	/*
415db60df88SNicholas Bellinger 	 * Immediately exit + release dev->caw_sem if command has already
416db60df88SNicholas Bellinger 	 * been failed with a non-zero SCSI status.
417db60df88SNicholas Bellinger 	 */
418db60df88SNicholas Bellinger 	if (cmd->scsi_status) {
419db60df88SNicholas Bellinger 		pr_err("compare_and_write_callback: non zero scsi_status:"
420db60df88SNicholas Bellinger 			" 0x%02x\n", cmd->scsi_status);
421db60df88SNicholas Bellinger 		goto out;
422db60df88SNicholas Bellinger 	}
423cf6d1f09SNicholas Bellinger 
42468ff9b9bSNicholas Bellinger 	buf = kzalloc(cmd->data_length, GFP_KERNEL);
42568ff9b9bSNicholas Bellinger 	if (!buf) {
42668ff9b9bSNicholas Bellinger 		pr_err("Unable to allocate compare_and_write buf\n");
427a2890087SNicholas Bellinger 		ret = TCM_OUT_OF_RESOURCES;
428a2890087SNicholas Bellinger 		goto out;
42968ff9b9bSNicholas Bellinger 	}
43068ff9b9bSNicholas Bellinger 
431a1e1774cSMartin Svec 	write_sg = kmalloc(sizeof(struct scatterlist) * cmd->t_data_nents,
43268ff9b9bSNicholas Bellinger 			   GFP_KERNEL);
43368ff9b9bSNicholas Bellinger 	if (!write_sg) {
43468ff9b9bSNicholas Bellinger 		pr_err("Unable to allocate compare_and_write sg\n");
43568ff9b9bSNicholas Bellinger 		ret = TCM_OUT_OF_RESOURCES;
43668ff9b9bSNicholas Bellinger 		goto out;
43768ff9b9bSNicholas Bellinger 	}
438a1e1774cSMartin Svec 	sg_init_table(write_sg, cmd->t_data_nents);
43968ff9b9bSNicholas Bellinger 	/*
44068ff9b9bSNicholas Bellinger 	 * Setup verify and write data payloads from total NumberLBAs.
44168ff9b9bSNicholas Bellinger 	 */
44268ff9b9bSNicholas Bellinger 	rc = sg_copy_to_buffer(cmd->t_data_sg, cmd->t_data_nents, buf,
44368ff9b9bSNicholas Bellinger 			       cmd->data_length);
44468ff9b9bSNicholas Bellinger 	if (!rc) {
44568ff9b9bSNicholas Bellinger 		pr_err("sg_copy_to_buffer() failed for compare_and_write\n");
44668ff9b9bSNicholas Bellinger 		ret = TCM_OUT_OF_RESOURCES;
44768ff9b9bSNicholas Bellinger 		goto out;
44868ff9b9bSNicholas Bellinger 	}
44968ff9b9bSNicholas Bellinger 	/*
45068ff9b9bSNicholas Bellinger 	 * Compare against SCSI READ payload against verify payload
45168ff9b9bSNicholas Bellinger 	 */
45268ff9b9bSNicholas Bellinger 	for_each_sg(cmd->t_bidi_data_sg, sg, cmd->t_bidi_data_nents, i) {
45368ff9b9bSNicholas Bellinger 		addr = (unsigned char *)kmap_atomic(sg_page(sg));
45468ff9b9bSNicholas Bellinger 		if (!addr) {
45568ff9b9bSNicholas Bellinger 			ret = TCM_OUT_OF_RESOURCES;
45668ff9b9bSNicholas Bellinger 			goto out;
45768ff9b9bSNicholas Bellinger 		}
45868ff9b9bSNicholas Bellinger 
45968ff9b9bSNicholas Bellinger 		len = min(sg->length, compare_len);
46068ff9b9bSNicholas Bellinger 
46168ff9b9bSNicholas Bellinger 		if (memcmp(addr, buf + offset, len)) {
46268ff9b9bSNicholas Bellinger 			pr_warn("Detected MISCOMPARE for addr: %p buf: %p\n",
46368ff9b9bSNicholas Bellinger 				addr, buf + offset);
46468ff9b9bSNicholas Bellinger 			kunmap_atomic(addr);
46568ff9b9bSNicholas Bellinger 			goto miscompare;
46668ff9b9bSNicholas Bellinger 		}
46768ff9b9bSNicholas Bellinger 		kunmap_atomic(addr);
46868ff9b9bSNicholas Bellinger 
46968ff9b9bSNicholas Bellinger 		offset += len;
47068ff9b9bSNicholas Bellinger 		compare_len -= len;
47168ff9b9bSNicholas Bellinger 		if (!compare_len)
47268ff9b9bSNicholas Bellinger 			break;
47368ff9b9bSNicholas Bellinger 	}
47468ff9b9bSNicholas Bellinger 
47568ff9b9bSNicholas Bellinger 	i = 0;
47668ff9b9bSNicholas Bellinger 	len = cmd->t_task_nolb * block_size;
47768ff9b9bSNicholas Bellinger 	sg_miter_start(&m, cmd->t_data_sg, cmd->t_data_nents, SG_MITER_TO_SG);
47868ff9b9bSNicholas Bellinger 	/*
47968ff9b9bSNicholas Bellinger 	 * Currently assumes NoLB=1 and SGLs are PAGE_SIZE..
48068ff9b9bSNicholas Bellinger 	 */
48168ff9b9bSNicholas Bellinger 	while (len) {
48268ff9b9bSNicholas Bellinger 		sg_miter_next(&m);
48368ff9b9bSNicholas Bellinger 
48468ff9b9bSNicholas Bellinger 		if (block_size < PAGE_SIZE) {
48568ff9b9bSNicholas Bellinger 			sg_set_page(&write_sg[i], m.page, block_size,
48668ff9b9bSNicholas Bellinger 				    block_size);
48768ff9b9bSNicholas Bellinger 		} else {
48868ff9b9bSNicholas Bellinger 			sg_miter_next(&m);
48968ff9b9bSNicholas Bellinger 			sg_set_page(&write_sg[i], m.page, block_size,
49068ff9b9bSNicholas Bellinger 				    0);
49168ff9b9bSNicholas Bellinger 		}
49268ff9b9bSNicholas Bellinger 		len -= block_size;
49368ff9b9bSNicholas Bellinger 		i++;
49468ff9b9bSNicholas Bellinger 	}
49568ff9b9bSNicholas Bellinger 	sg_miter_stop(&m);
49668ff9b9bSNicholas Bellinger 	/*
49768ff9b9bSNicholas Bellinger 	 * Save the original SGL + nents values before updating to new
49868ff9b9bSNicholas Bellinger 	 * assignments, to be released in transport_free_pages() ->
49968ff9b9bSNicholas Bellinger 	 * transport_reset_sgl_orig()
50068ff9b9bSNicholas Bellinger 	 */
50168ff9b9bSNicholas Bellinger 	cmd->t_data_sg_orig = cmd->t_data_sg;
50268ff9b9bSNicholas Bellinger 	cmd->t_data_sg = write_sg;
50368ff9b9bSNicholas Bellinger 	cmd->t_data_nents_orig = cmd->t_data_nents;
50468ff9b9bSNicholas Bellinger 	cmd->t_data_nents = 1;
50568ff9b9bSNicholas Bellinger 
50668ff9b9bSNicholas Bellinger 	cmd->sam_task_attr = MSG_HEAD_TAG;
50768ff9b9bSNicholas Bellinger 	cmd->transport_complete_callback = compare_and_write_post;
50868ff9b9bSNicholas Bellinger 	/*
50968ff9b9bSNicholas Bellinger 	 * Now reset ->execute_cmd() to the normal sbc_execute_rw() handler
51068ff9b9bSNicholas Bellinger 	 * for submitting the adjusted SGL to write instance user-data.
51168ff9b9bSNicholas Bellinger 	 */
51268ff9b9bSNicholas Bellinger 	cmd->execute_cmd = sbc_execute_rw;
51368ff9b9bSNicholas Bellinger 
51468ff9b9bSNicholas Bellinger 	spin_lock_irq(&cmd->t_state_lock);
51568ff9b9bSNicholas Bellinger 	cmd->t_state = TRANSPORT_PROCESSING;
51668ff9b9bSNicholas Bellinger 	cmd->transport_state |= CMD_T_ACTIVE|CMD_T_BUSY|CMD_T_SENT;
51768ff9b9bSNicholas Bellinger 	spin_unlock_irq(&cmd->t_state_lock);
51868ff9b9bSNicholas Bellinger 
51968ff9b9bSNicholas Bellinger 	__target_execute_cmd(cmd);
52068ff9b9bSNicholas Bellinger 
52168ff9b9bSNicholas Bellinger 	kfree(buf);
52268ff9b9bSNicholas Bellinger 	return ret;
52368ff9b9bSNicholas Bellinger 
52468ff9b9bSNicholas Bellinger miscompare:
52568ff9b9bSNicholas Bellinger 	pr_warn("Target/%s: Send MISCOMPARE check condition and sense\n",
52668ff9b9bSNicholas Bellinger 		dev->transport->name);
52768ff9b9bSNicholas Bellinger 	ret = TCM_MISCOMPARE_VERIFY;
52868ff9b9bSNicholas Bellinger out:
52968ff9b9bSNicholas Bellinger 	/*
53068ff9b9bSNicholas Bellinger 	 * In the MISCOMPARE or failure case, unlock ->caw_sem obtained in
53168ff9b9bSNicholas Bellinger 	 * sbc_compare_and_write() before the original READ I/O submission.
53268ff9b9bSNicholas Bellinger 	 */
53368ff9b9bSNicholas Bellinger 	up(&dev->caw_sem);
53468ff9b9bSNicholas Bellinger 	kfree(write_sg);
53568ff9b9bSNicholas Bellinger 	kfree(buf);
53668ff9b9bSNicholas Bellinger 	return ret;
53768ff9b9bSNicholas Bellinger }
53868ff9b9bSNicholas Bellinger 
53968ff9b9bSNicholas Bellinger static sense_reason_t
54068ff9b9bSNicholas Bellinger sbc_compare_and_write(struct se_cmd *cmd)
54168ff9b9bSNicholas Bellinger {
54268ff9b9bSNicholas Bellinger 	struct se_device *dev = cmd->se_dev;
54368ff9b9bSNicholas Bellinger 	sense_reason_t ret;
54468ff9b9bSNicholas Bellinger 	int rc;
54568ff9b9bSNicholas Bellinger 	/*
54668ff9b9bSNicholas Bellinger 	 * Submit the READ first for COMPARE_AND_WRITE to perform the
54768ff9b9bSNicholas Bellinger 	 * comparision using SGLs at cmd->t_bidi_data_sg..
54868ff9b9bSNicholas Bellinger 	 */
54968ff9b9bSNicholas Bellinger 	rc = down_interruptible(&dev->caw_sem);
55068ff9b9bSNicholas Bellinger 	if ((rc != 0) || signal_pending(current)) {
55168ff9b9bSNicholas Bellinger 		cmd->transport_complete_callback = NULL;
55268ff9b9bSNicholas Bellinger 		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
55368ff9b9bSNicholas Bellinger 	}
554b7191253SNicholas Bellinger 	/*
555b7191253SNicholas Bellinger 	 * Reset cmd->data_length to individual block_size in order to not
556b7191253SNicholas Bellinger 	 * confuse backend drivers that depend on this value matching the
557b7191253SNicholas Bellinger 	 * size of the I/O being submitted.
558b7191253SNicholas Bellinger 	 */
559b7191253SNicholas Bellinger 	cmd->data_length = cmd->t_task_nolb * dev->dev_attrib.block_size;
56068ff9b9bSNicholas Bellinger 
56168ff9b9bSNicholas Bellinger 	ret = cmd->execute_rw(cmd, cmd->t_bidi_data_sg, cmd->t_bidi_data_nents,
56268ff9b9bSNicholas Bellinger 			      DMA_FROM_DEVICE);
56368ff9b9bSNicholas Bellinger 	if (ret) {
56468ff9b9bSNicholas Bellinger 		cmd->transport_complete_callback = NULL;
56568ff9b9bSNicholas Bellinger 		up(&dev->caw_sem);
56668ff9b9bSNicholas Bellinger 		return ret;
56768ff9b9bSNicholas Bellinger 	}
56868ff9b9bSNicholas Bellinger 	/*
56968ff9b9bSNicholas Bellinger 	 * Unlock of dev->caw_sem to occur in compare_and_write_callback()
57068ff9b9bSNicholas Bellinger 	 * upon MISCOMPARE, or in compare_and_write_done() upon completion
57168ff9b9bSNicholas Bellinger 	 * of WRITE instance user-data.
57268ff9b9bSNicholas Bellinger 	 */
57368ff9b9bSNicholas Bellinger 	return TCM_NO_SENSE;
57468ff9b9bSNicholas Bellinger }
57568ff9b9bSNicholas Bellinger 
57619f9361aSSagi Grimberg static int
57719f9361aSSagi Grimberg sbc_set_prot_op_checks(u8 protect, enum target_prot_type prot_type,
57819f9361aSSagi Grimberg 		       bool is_write, struct se_cmd *cmd)
57919f9361aSSagi Grimberg {
58019f9361aSSagi Grimberg 	if (is_write) {
58119f9361aSSagi Grimberg 		cmd->prot_op = protect ? TARGET_PROT_DOUT_PASS :
58219f9361aSSagi Grimberg 					 TARGET_PROT_DOUT_INSERT;
58319f9361aSSagi Grimberg 		switch (protect) {
58419f9361aSSagi Grimberg 		case 0x0:
58519f9361aSSagi Grimberg 		case 0x3:
58619f9361aSSagi Grimberg 			cmd->prot_checks = 0;
58719f9361aSSagi Grimberg 			break;
58819f9361aSSagi Grimberg 		case 0x1:
58919f9361aSSagi Grimberg 		case 0x5:
59019f9361aSSagi Grimberg 			cmd->prot_checks = TARGET_DIF_CHECK_GUARD;
59119f9361aSSagi Grimberg 			if (prot_type == TARGET_DIF_TYPE1_PROT)
59219f9361aSSagi Grimberg 				cmd->prot_checks |= TARGET_DIF_CHECK_REFTAG;
59319f9361aSSagi Grimberg 			break;
59419f9361aSSagi Grimberg 		case 0x2:
59519f9361aSSagi Grimberg 			if (prot_type == TARGET_DIF_TYPE1_PROT)
59619f9361aSSagi Grimberg 				cmd->prot_checks = TARGET_DIF_CHECK_REFTAG;
59719f9361aSSagi Grimberg 			break;
59819f9361aSSagi Grimberg 		case 0x4:
59919f9361aSSagi Grimberg 			cmd->prot_checks = TARGET_DIF_CHECK_GUARD;
60019f9361aSSagi Grimberg 			break;
60119f9361aSSagi Grimberg 		default:
60219f9361aSSagi Grimberg 			pr_err("Unsupported protect field %d\n", protect);
60319f9361aSSagi Grimberg 			return -EINVAL;
60419f9361aSSagi Grimberg 		}
60519f9361aSSagi Grimberg 	} else {
60619f9361aSSagi Grimberg 		cmd->prot_op = protect ? TARGET_PROT_DIN_PASS :
60719f9361aSSagi Grimberg 					 TARGET_PROT_DIN_STRIP;
60819f9361aSSagi Grimberg 		switch (protect) {
60919f9361aSSagi Grimberg 		case 0x0:
61019f9361aSSagi Grimberg 		case 0x1:
61119f9361aSSagi Grimberg 		case 0x5:
61219f9361aSSagi Grimberg 			cmd->prot_checks = TARGET_DIF_CHECK_GUARD;
61319f9361aSSagi Grimberg 			if (prot_type == TARGET_DIF_TYPE1_PROT)
61419f9361aSSagi Grimberg 				cmd->prot_checks |= TARGET_DIF_CHECK_REFTAG;
61519f9361aSSagi Grimberg 			break;
61619f9361aSSagi Grimberg 		case 0x2:
61719f9361aSSagi Grimberg 			if (prot_type == TARGET_DIF_TYPE1_PROT)
61819f9361aSSagi Grimberg 				cmd->prot_checks = TARGET_DIF_CHECK_REFTAG;
61919f9361aSSagi Grimberg 			break;
62019f9361aSSagi Grimberg 		case 0x3:
62119f9361aSSagi Grimberg 			cmd->prot_checks = 0;
62219f9361aSSagi Grimberg 			break;
62319f9361aSSagi Grimberg 		case 0x4:
62419f9361aSSagi Grimberg 			cmd->prot_checks = TARGET_DIF_CHECK_GUARD;
62519f9361aSSagi Grimberg 			break;
62619f9361aSSagi Grimberg 		default:
62719f9361aSSagi Grimberg 			pr_err("Unsupported protect field %d\n", protect);
62819f9361aSSagi Grimberg 			return -EINVAL;
62919f9361aSSagi Grimberg 		}
63019f9361aSSagi Grimberg 	}
63119f9361aSSagi Grimberg 
63219f9361aSSagi Grimberg 	return 0;
63319f9361aSSagi Grimberg }
63419f9361aSSagi Grimberg 
635499bf77bSNicholas Bellinger static bool
636499bf77bSNicholas Bellinger sbc_check_prot(struct se_device *dev, struct se_cmd *cmd, unsigned char *cdb,
63719f9361aSSagi Grimberg 	       u32 sectors, bool is_write)
638499bf77bSNicholas Bellinger {
63919f9361aSSagi Grimberg 	u8 protect = cdb[1] >> 5;
64019f9361aSSagi Grimberg 
641b5b8e298SSagi Grimberg 	if ((!cmd->t_prot_sg || !cmd->t_prot_nents) && cmd->prot_pto)
642499bf77bSNicholas Bellinger 		return true;
643499bf77bSNicholas Bellinger 
644499bf77bSNicholas Bellinger 	switch (dev->dev_attrib.pi_prot_type) {
645499bf77bSNicholas Bellinger 	case TARGET_DIF_TYPE3_PROT:
646499bf77bSNicholas Bellinger 		cmd->reftag_seed = 0xffffffff;
647499bf77bSNicholas Bellinger 		break;
648499bf77bSNicholas Bellinger 	case TARGET_DIF_TYPE2_PROT:
64919f9361aSSagi Grimberg 		if (protect)
650499bf77bSNicholas Bellinger 			return false;
651499bf77bSNicholas Bellinger 
652499bf77bSNicholas Bellinger 		cmd->reftag_seed = cmd->t_task_lba;
653499bf77bSNicholas Bellinger 		break;
654499bf77bSNicholas Bellinger 	case TARGET_DIF_TYPE1_PROT:
655499bf77bSNicholas Bellinger 		cmd->reftag_seed = cmd->t_task_lba;
656499bf77bSNicholas Bellinger 		break;
657499bf77bSNicholas Bellinger 	case TARGET_DIF_TYPE0_PROT:
658499bf77bSNicholas Bellinger 	default:
659499bf77bSNicholas Bellinger 		return true;
660499bf77bSNicholas Bellinger 	}
661499bf77bSNicholas Bellinger 
66219f9361aSSagi Grimberg 	if (sbc_set_prot_op_checks(protect, dev->dev_attrib.pi_prot_type,
66319f9361aSSagi Grimberg 				   is_write, cmd))
66419f9361aSSagi Grimberg 		return false;
66519f9361aSSagi Grimberg 
666499bf77bSNicholas Bellinger 	cmd->prot_type = dev->dev_attrib.pi_prot_type;
667499bf77bSNicholas Bellinger 	cmd->prot_length = dev->prot_length * sectors;
66803abad9eSSagi Grimberg 	pr_debug("%s: prot_type=%d, prot_length=%d prot_op=%d prot_checks=%d\n",
66903abad9eSSagi Grimberg 		 __func__, cmd->prot_type, cmd->prot_length,
67003abad9eSSagi Grimberg 		 cmd->prot_op, cmd->prot_checks);
671499bf77bSNicholas Bellinger 
672499bf77bSNicholas Bellinger 	return true;
673499bf77bSNicholas Bellinger }
674499bf77bSNicholas Bellinger 
675de103c93SChristoph Hellwig sense_reason_t
676de103c93SChristoph Hellwig sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
677d6e0175cSChristoph Hellwig {
678d6e0175cSChristoph Hellwig 	struct se_device *dev = cmd->se_dev;
679d6e0175cSChristoph Hellwig 	unsigned char *cdb = cmd->t_task_cdb;
6801fd032eeSChristoph Hellwig 	unsigned int size;
681d6e0175cSChristoph Hellwig 	u32 sectors = 0;
682de103c93SChristoph Hellwig 	sense_reason_t ret;
683d6e0175cSChristoph Hellwig 
684d6e0175cSChristoph Hellwig 	switch (cdb[0]) {
685d6e0175cSChristoph Hellwig 	case READ_6:
686d6e0175cSChristoph Hellwig 		sectors = transport_get_sectors_6(cdb);
687d6e0175cSChristoph Hellwig 		cmd->t_task_lba = transport_lba_21(cdb);
688d6e0175cSChristoph Hellwig 		cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
689a82a9538SNicholas Bellinger 		cmd->execute_rw = ops->execute_rw;
690a82a9538SNicholas Bellinger 		cmd->execute_cmd = sbc_execute_rw;
691d6e0175cSChristoph Hellwig 		break;
692d6e0175cSChristoph Hellwig 	case READ_10:
693d6e0175cSChristoph Hellwig 		sectors = transport_get_sectors_10(cdb);
694d6e0175cSChristoph Hellwig 		cmd->t_task_lba = transport_lba_32(cdb);
695499bf77bSNicholas Bellinger 
69619f9361aSSagi Grimberg 		if (!sbc_check_prot(dev, cmd, cdb, sectors, false))
697499bf77bSNicholas Bellinger 			return TCM_UNSUPPORTED_SCSI_OPCODE;
698499bf77bSNicholas Bellinger 
699d6e0175cSChristoph Hellwig 		cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
700a82a9538SNicholas Bellinger 		cmd->execute_rw = ops->execute_rw;
701a82a9538SNicholas Bellinger 		cmd->execute_cmd = sbc_execute_rw;
702d6e0175cSChristoph Hellwig 		break;
703d6e0175cSChristoph Hellwig 	case READ_12:
704d6e0175cSChristoph Hellwig 		sectors = transport_get_sectors_12(cdb);
705d6e0175cSChristoph Hellwig 		cmd->t_task_lba = transport_lba_32(cdb);
706499bf77bSNicholas Bellinger 
70719f9361aSSagi Grimberg 		if (!sbc_check_prot(dev, cmd, cdb, sectors, false))
708499bf77bSNicholas Bellinger 			return TCM_UNSUPPORTED_SCSI_OPCODE;
709499bf77bSNicholas Bellinger 
710d6e0175cSChristoph Hellwig 		cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
711a82a9538SNicholas Bellinger 		cmd->execute_rw = ops->execute_rw;
712a82a9538SNicholas Bellinger 		cmd->execute_cmd = sbc_execute_rw;
713d6e0175cSChristoph Hellwig 		break;
714d6e0175cSChristoph Hellwig 	case READ_16:
715d6e0175cSChristoph Hellwig 		sectors = transport_get_sectors_16(cdb);
716d6e0175cSChristoph Hellwig 		cmd->t_task_lba = transport_lba_64(cdb);
717499bf77bSNicholas Bellinger 
71819f9361aSSagi Grimberg 		if (!sbc_check_prot(dev, cmd, cdb, sectors, false))
719499bf77bSNicholas Bellinger 			return TCM_UNSUPPORTED_SCSI_OPCODE;
720499bf77bSNicholas Bellinger 
721d6e0175cSChristoph Hellwig 		cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
722a82a9538SNicholas Bellinger 		cmd->execute_rw = ops->execute_rw;
723a82a9538SNicholas Bellinger 		cmd->execute_cmd = sbc_execute_rw;
724d6e0175cSChristoph Hellwig 		break;
725d6e0175cSChristoph Hellwig 	case WRITE_6:
726d6e0175cSChristoph Hellwig 		sectors = transport_get_sectors_6(cdb);
727d6e0175cSChristoph Hellwig 		cmd->t_task_lba = transport_lba_21(cdb);
728d6e0175cSChristoph Hellwig 		cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
729a82a9538SNicholas Bellinger 		cmd->execute_rw = ops->execute_rw;
730a82a9538SNicholas Bellinger 		cmd->execute_cmd = sbc_execute_rw;
731d6e0175cSChristoph Hellwig 		break;
732d6e0175cSChristoph Hellwig 	case WRITE_10:
733d6e0175cSChristoph Hellwig 	case WRITE_VERIFY:
734d6e0175cSChristoph Hellwig 		sectors = transport_get_sectors_10(cdb);
735d6e0175cSChristoph Hellwig 		cmd->t_task_lba = transport_lba_32(cdb);
736499bf77bSNicholas Bellinger 
73719f9361aSSagi Grimberg 		if (!sbc_check_prot(dev, cmd, cdb, sectors, true))
738499bf77bSNicholas Bellinger 			return TCM_UNSUPPORTED_SCSI_OPCODE;
739499bf77bSNicholas Bellinger 
740d6e0175cSChristoph Hellwig 		if (cdb[1] & 0x8)
741d6e0175cSChristoph Hellwig 			cmd->se_cmd_flags |= SCF_FUA;
742d6e0175cSChristoph Hellwig 		cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
743a82a9538SNicholas Bellinger 		cmd->execute_rw = ops->execute_rw;
744a82a9538SNicholas Bellinger 		cmd->execute_cmd = sbc_execute_rw;
745d6e0175cSChristoph Hellwig 		break;
746d6e0175cSChristoph Hellwig 	case WRITE_12:
747d6e0175cSChristoph Hellwig 		sectors = transport_get_sectors_12(cdb);
748d6e0175cSChristoph Hellwig 		cmd->t_task_lba = transport_lba_32(cdb);
749499bf77bSNicholas Bellinger 
75019f9361aSSagi Grimberg 		if (!sbc_check_prot(dev, cmd, cdb, sectors, true))
751499bf77bSNicholas Bellinger 			return TCM_UNSUPPORTED_SCSI_OPCODE;
752499bf77bSNicholas Bellinger 
753d6e0175cSChristoph Hellwig 		if (cdb[1] & 0x8)
754d6e0175cSChristoph Hellwig 			cmd->se_cmd_flags |= SCF_FUA;
755d6e0175cSChristoph Hellwig 		cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
756a82a9538SNicholas Bellinger 		cmd->execute_rw = ops->execute_rw;
757a82a9538SNicholas Bellinger 		cmd->execute_cmd = sbc_execute_rw;
758d6e0175cSChristoph Hellwig 		break;
759d6e0175cSChristoph Hellwig 	case WRITE_16:
760d6e0175cSChristoph Hellwig 		sectors = transport_get_sectors_16(cdb);
761d6e0175cSChristoph Hellwig 		cmd->t_task_lba = transport_lba_64(cdb);
762499bf77bSNicholas Bellinger 
76319f9361aSSagi Grimberg 		if (!sbc_check_prot(dev, cmd, cdb, sectors, true))
764499bf77bSNicholas Bellinger 			return TCM_UNSUPPORTED_SCSI_OPCODE;
765499bf77bSNicholas Bellinger 
766d6e0175cSChristoph Hellwig 		if (cdb[1] & 0x8)
767d6e0175cSChristoph Hellwig 			cmd->se_cmd_flags |= SCF_FUA;
768d6e0175cSChristoph Hellwig 		cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
769a82a9538SNicholas Bellinger 		cmd->execute_rw = ops->execute_rw;
770a82a9538SNicholas Bellinger 		cmd->execute_cmd = sbc_execute_rw;
771d6e0175cSChristoph Hellwig 		break;
772d6e0175cSChristoph Hellwig 	case XDWRITEREAD_10:
773de103c93SChristoph Hellwig 		if (cmd->data_direction != DMA_TO_DEVICE ||
774d6e0175cSChristoph Hellwig 		    !(cmd->se_cmd_flags & SCF_BIDI))
775de103c93SChristoph Hellwig 			return TCM_INVALID_CDB_FIELD;
776d6e0175cSChristoph Hellwig 		sectors = transport_get_sectors_10(cdb);
777d6e0175cSChristoph Hellwig 
778d6e0175cSChristoph Hellwig 		cmd->t_task_lba = transport_lba_32(cdb);
779d6e0175cSChristoph Hellwig 		cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
780d6e0175cSChristoph Hellwig 
781d6e0175cSChristoph Hellwig 		/*
782d6e0175cSChristoph Hellwig 		 * Setup BIDI XOR callback to be run after I/O completion.
783d6e0175cSChristoph Hellwig 		 */
784a82a9538SNicholas Bellinger 		cmd->execute_rw = ops->execute_rw;
785a82a9538SNicholas Bellinger 		cmd->execute_cmd = sbc_execute_rw;
786d6e0175cSChristoph Hellwig 		cmd->transport_complete_callback = &xdreadwrite_callback;
787d6e0175cSChristoph Hellwig 		if (cdb[1] & 0x8)
788d6e0175cSChristoph Hellwig 			cmd->se_cmd_flags |= SCF_FUA;
789d6e0175cSChristoph Hellwig 		break;
790d6e0175cSChristoph Hellwig 	case VARIABLE_LENGTH_CMD:
791d6e0175cSChristoph Hellwig 	{
792d6e0175cSChristoph Hellwig 		u16 service_action = get_unaligned_be16(&cdb[8]);
793d6e0175cSChristoph Hellwig 		switch (service_action) {
794d6e0175cSChristoph Hellwig 		case XDWRITEREAD_32:
795d6e0175cSChristoph Hellwig 			sectors = transport_get_sectors_32(cdb);
796d6e0175cSChristoph Hellwig 
797d6e0175cSChristoph Hellwig 			/*
798d6e0175cSChristoph Hellwig 			 * Use WRITE_32 and READ_32 opcodes for the emulated
799d6e0175cSChristoph Hellwig 			 * XDWRITE_READ_32 logic.
800d6e0175cSChristoph Hellwig 			 */
801d6e0175cSChristoph Hellwig 			cmd->t_task_lba = transport_lba_64_ext(cdb);
802d6e0175cSChristoph Hellwig 			cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
803d6e0175cSChristoph Hellwig 
804d6e0175cSChristoph Hellwig 			/*
805d6e0175cSChristoph Hellwig 			 * Setup BIDI XOR callback to be run during after I/O
806d6e0175cSChristoph Hellwig 			 * completion.
807d6e0175cSChristoph Hellwig 			 */
808a82a9538SNicholas Bellinger 			cmd->execute_rw = ops->execute_rw;
809a82a9538SNicholas Bellinger 			cmd->execute_cmd = sbc_execute_rw;
810d6e0175cSChristoph Hellwig 			cmd->transport_complete_callback = &xdreadwrite_callback;
811d6e0175cSChristoph Hellwig 			if (cdb[1] & 0x8)
812d6e0175cSChristoph Hellwig 				cmd->se_cmd_flags |= SCF_FUA;
813d6e0175cSChristoph Hellwig 			break;
814d6e0175cSChristoph Hellwig 		case WRITE_SAME_32:
815d6e0175cSChristoph Hellwig 			sectors = transport_get_sectors_32(cdb);
816d6e0175cSChristoph Hellwig 			if (!sectors) {
817d6e0175cSChristoph Hellwig 				pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not"
818d6e0175cSChristoph Hellwig 				       " supported\n");
819de103c93SChristoph Hellwig 				return TCM_INVALID_CDB_FIELD;
820d6e0175cSChristoph Hellwig 			}
821d6e0175cSChristoph Hellwig 
8221fd032eeSChristoph Hellwig 			size = sbc_get_size(cmd, 1);
823d6e0175cSChristoph Hellwig 			cmd->t_task_lba = get_unaligned_be64(&cdb[12]);
824d6e0175cSChristoph Hellwig 
825cd063befSNicholas Bellinger 			ret = sbc_setup_write_same(cmd, &cdb[10], ops);
8266b64e1feSDan Carpenter 			if (ret)
827cd063befSNicholas Bellinger 				return ret;
828d6e0175cSChristoph Hellwig 			break;
829d6e0175cSChristoph Hellwig 		default:
830d6e0175cSChristoph Hellwig 			pr_err("VARIABLE_LENGTH_CMD service action"
831d6e0175cSChristoph Hellwig 				" 0x%04x not supported\n", service_action);
832de103c93SChristoph Hellwig 			return TCM_UNSUPPORTED_SCSI_OPCODE;
833d6e0175cSChristoph Hellwig 		}
834d6e0175cSChristoph Hellwig 		break;
835d6e0175cSChristoph Hellwig 	}
83668ff9b9bSNicholas Bellinger 	case COMPARE_AND_WRITE:
83768ff9b9bSNicholas Bellinger 		sectors = cdb[13];
83868ff9b9bSNicholas Bellinger 		/*
83968ff9b9bSNicholas Bellinger 		 * Currently enforce COMPARE_AND_WRITE for a single sector
84068ff9b9bSNicholas Bellinger 		 */
84168ff9b9bSNicholas Bellinger 		if (sectors > 1) {
84268ff9b9bSNicholas Bellinger 			pr_err("COMPARE_AND_WRITE contains NoLB: %u greater"
84368ff9b9bSNicholas Bellinger 			       " than 1\n", sectors);
84468ff9b9bSNicholas Bellinger 			return TCM_INVALID_CDB_FIELD;
84568ff9b9bSNicholas Bellinger 		}
84668ff9b9bSNicholas Bellinger 		/*
84768ff9b9bSNicholas Bellinger 		 * Double size because we have two buffers, note that
84868ff9b9bSNicholas Bellinger 		 * zero is not an error..
84968ff9b9bSNicholas Bellinger 		 */
85068ff9b9bSNicholas Bellinger 		size = 2 * sbc_get_size(cmd, sectors);
85168ff9b9bSNicholas Bellinger 		cmd->t_task_lba = get_unaligned_be64(&cdb[2]);
85268ff9b9bSNicholas Bellinger 		cmd->t_task_nolb = sectors;
85368ff9b9bSNicholas Bellinger 		cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB | SCF_COMPARE_AND_WRITE;
85468ff9b9bSNicholas Bellinger 		cmd->execute_rw = ops->execute_rw;
85568ff9b9bSNicholas Bellinger 		cmd->execute_cmd = sbc_compare_and_write;
85668ff9b9bSNicholas Bellinger 		cmd->transport_complete_callback = compare_and_write_callback;
85768ff9b9bSNicholas Bellinger 		break;
858d6e0175cSChristoph Hellwig 	case READ_CAPACITY:
8591fd032eeSChristoph Hellwig 		size = READ_CAP_LEN;
8601fd032eeSChristoph Hellwig 		cmd->execute_cmd = sbc_emulate_readcapacity;
861d6e0175cSChristoph Hellwig 		break;
862d6e0175cSChristoph Hellwig 	case SERVICE_ACTION_IN:
863d6e0175cSChristoph Hellwig 		switch (cmd->t_task_cdb[1] & 0x1f) {
864d6e0175cSChristoph Hellwig 		case SAI_READ_CAPACITY_16:
8651fd032eeSChristoph Hellwig 			cmd->execute_cmd = sbc_emulate_readcapacity_16;
866d6e0175cSChristoph Hellwig 			break;
867c66094bfSHannes Reinecke 		case SAI_REPORT_REFERRALS:
868c66094bfSHannes Reinecke 			cmd->execute_cmd = target_emulate_report_referrals;
869c66094bfSHannes Reinecke 			break;
870d6e0175cSChristoph Hellwig 		default:
871d6e0175cSChristoph Hellwig 			pr_err("Unsupported SA: 0x%02x\n",
872d6e0175cSChristoph Hellwig 				cmd->t_task_cdb[1] & 0x1f);
873de103c93SChristoph Hellwig 			return TCM_INVALID_CDB_FIELD;
874d6e0175cSChristoph Hellwig 		}
8751fd032eeSChristoph Hellwig 		size = (cdb[10] << 24) | (cdb[11] << 16) |
876d6e0175cSChristoph Hellwig 		       (cdb[12] << 8) | cdb[13];
877d6e0175cSChristoph Hellwig 		break;
878d6e0175cSChristoph Hellwig 	case SYNCHRONIZE_CACHE:
879d6e0175cSChristoph Hellwig 	case SYNCHRONIZE_CACHE_16:
880882e3f8eSHannes Reinecke 		if (!ops->execute_sync_cache) {
881882e3f8eSHannes Reinecke 			size = 0;
882882e3f8eSHannes Reinecke 			cmd->execute_cmd = sbc_emulate_noop;
883882e3f8eSHannes Reinecke 			break;
884882e3f8eSHannes Reinecke 		}
885ad67f0d9SChristoph Hellwig 
886d6e0175cSChristoph Hellwig 		/*
887d6e0175cSChristoph Hellwig 		 * Extract LBA and range to be flushed for emulated SYNCHRONIZE_CACHE
888d6e0175cSChristoph Hellwig 		 */
889d6e0175cSChristoph Hellwig 		if (cdb[0] == SYNCHRONIZE_CACHE) {
890d6e0175cSChristoph Hellwig 			sectors = transport_get_sectors_10(cdb);
891d6e0175cSChristoph Hellwig 			cmd->t_task_lba = transport_lba_32(cdb);
892d6e0175cSChristoph Hellwig 		} else {
893d6e0175cSChristoph Hellwig 			sectors = transport_get_sectors_16(cdb);
894d6e0175cSChristoph Hellwig 			cmd->t_task_lba = transport_lba_64(cdb);
895d6e0175cSChristoph Hellwig 		}
896d6e0175cSChristoph Hellwig 
8971fd032eeSChristoph Hellwig 		size = sbc_get_size(cmd, sectors);
898d6e0175cSChristoph Hellwig 
899d6e0175cSChristoph Hellwig 		/*
900d6e0175cSChristoph Hellwig 		 * Check to ensure that LBA + Range does not exceed past end of
901d6e0175cSChristoph Hellwig 		 * device for IBLOCK and FILEIO ->do_sync_cache() backend calls
902d6e0175cSChristoph Hellwig 		 */
903d6e0175cSChristoph Hellwig 		if (cmd->t_task_lba || sectors) {
904d6e0175cSChristoph Hellwig 			if (sbc_check_valid_sectors(cmd) < 0)
90533633676SRoland Dreier 				return TCM_ADDRESS_OUT_OF_RANGE;
906d6e0175cSChristoph Hellwig 		}
907ad67f0d9SChristoph Hellwig 		cmd->execute_cmd = ops->execute_sync_cache;
908d6e0175cSChristoph Hellwig 		break;
909d6e0175cSChristoph Hellwig 	case UNMAP:
91014150a6bSChristoph Hellwig 		if (!ops->execute_unmap)
911de103c93SChristoph Hellwig 			return TCM_UNSUPPORTED_SCSI_OPCODE;
91214150a6bSChristoph Hellwig 
9131fd032eeSChristoph Hellwig 		size = get_unaligned_be16(&cdb[7]);
91414150a6bSChristoph Hellwig 		cmd->execute_cmd = ops->execute_unmap;
915d6e0175cSChristoph Hellwig 		break;
916d6e0175cSChristoph Hellwig 	case WRITE_SAME_16:
917d6e0175cSChristoph Hellwig 		sectors = transport_get_sectors_16(cdb);
918d6e0175cSChristoph Hellwig 		if (!sectors) {
919d6e0175cSChristoph Hellwig 			pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n");
920de103c93SChristoph Hellwig 			return TCM_INVALID_CDB_FIELD;
921d6e0175cSChristoph Hellwig 		}
922d6e0175cSChristoph Hellwig 
9231fd032eeSChristoph Hellwig 		size = sbc_get_size(cmd, 1);
924d6e0175cSChristoph Hellwig 		cmd->t_task_lba = get_unaligned_be64(&cdb[2]);
925d6e0175cSChristoph Hellwig 
926cd063befSNicholas Bellinger 		ret = sbc_setup_write_same(cmd, &cdb[1], ops);
9276b64e1feSDan Carpenter 		if (ret)
928cd063befSNicholas Bellinger 			return ret;
929d6e0175cSChristoph Hellwig 		break;
930d6e0175cSChristoph Hellwig 	case WRITE_SAME:
931d6e0175cSChristoph Hellwig 		sectors = transport_get_sectors_10(cdb);
932d6e0175cSChristoph Hellwig 		if (!sectors) {
933d6e0175cSChristoph Hellwig 			pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n");
934de103c93SChristoph Hellwig 			return TCM_INVALID_CDB_FIELD;
935d6e0175cSChristoph Hellwig 		}
936d6e0175cSChristoph Hellwig 
9371fd032eeSChristoph Hellwig 		size = sbc_get_size(cmd, 1);
938d6e0175cSChristoph Hellwig 		cmd->t_task_lba = get_unaligned_be32(&cdb[2]);
939d6e0175cSChristoph Hellwig 
940d6e0175cSChristoph Hellwig 		/*
941d6e0175cSChristoph Hellwig 		 * Follow sbcr26 with WRITE_SAME (10) and check for the existence
942d6e0175cSChristoph Hellwig 		 * of byte 1 bit 3 UNMAP instead of original reserved field
943d6e0175cSChristoph Hellwig 		 */
944cd063befSNicholas Bellinger 		ret = sbc_setup_write_same(cmd, &cdb[1], ops);
9456b64e1feSDan Carpenter 		if (ret)
946cd063befSNicholas Bellinger 			return ret;
947d6e0175cSChristoph Hellwig 		break;
948d6e0175cSChristoph Hellwig 	case VERIFY:
9491fd032eeSChristoph Hellwig 		size = 0;
9501920ed61SNicholas Bellinger 		cmd->execute_cmd = sbc_emulate_noop;
951d6e0175cSChristoph Hellwig 		break;
9521a1ff38cSBernhard Kohl 	case REZERO_UNIT:
9531a1ff38cSBernhard Kohl 	case SEEK_6:
9541a1ff38cSBernhard Kohl 	case SEEK_10:
9551a1ff38cSBernhard Kohl 		/*
9561a1ff38cSBernhard Kohl 		 * There are still clients out there which use these old SCSI-2
9571a1ff38cSBernhard Kohl 		 * commands. This mainly happens when running VMs with legacy
9581a1ff38cSBernhard Kohl 		 * guest systems, connected via SCSI command pass-through to
9591a1ff38cSBernhard Kohl 		 * iSCSI targets. Make them happy and return status GOOD.
9601a1ff38cSBernhard Kohl 		 */
9611a1ff38cSBernhard Kohl 		size = 0;
9621a1ff38cSBernhard Kohl 		cmd->execute_cmd = sbc_emulate_noop;
9631a1ff38cSBernhard Kohl 		break;
964d6e0175cSChristoph Hellwig 	default:
9651fd032eeSChristoph Hellwig 		ret = spc_parse_cdb(cmd, &size);
966d6e0175cSChristoph Hellwig 		if (ret)
967d6e0175cSChristoph Hellwig 			return ret;
968d6e0175cSChristoph Hellwig 	}
969d6e0175cSChristoph Hellwig 
970d6e0175cSChristoph Hellwig 	/* reject any command that we don't have a handler for */
971d6e0175cSChristoph Hellwig 	if (!(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) && !cmd->execute_cmd)
972de103c93SChristoph Hellwig 		return TCM_UNSUPPORTED_SCSI_OPCODE;
973d6e0175cSChristoph Hellwig 
974d6e0175cSChristoph Hellwig 	if (cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) {
9751fd032eeSChristoph Hellwig 		unsigned long long end_lba;
9761fd032eeSChristoph Hellwig 
9770fd97ccfSChristoph Hellwig 		if (sectors > dev->dev_attrib.fabric_max_sectors) {
978d6e0175cSChristoph Hellwig 			printk_ratelimited(KERN_ERR "SCSI OP %02xh with too"
979d6e0175cSChristoph Hellwig 				" big sectors %u exceeds fabric_max_sectors:"
980d6e0175cSChristoph Hellwig 				" %u\n", cdb[0], sectors,
9810fd97ccfSChristoph Hellwig 				dev->dev_attrib.fabric_max_sectors);
982de103c93SChristoph Hellwig 			return TCM_INVALID_CDB_FIELD;
983d6e0175cSChristoph Hellwig 		}
9840fd97ccfSChristoph Hellwig 		if (sectors > dev->dev_attrib.hw_max_sectors) {
985d6e0175cSChristoph Hellwig 			printk_ratelimited(KERN_ERR "SCSI OP %02xh with too"
986d6e0175cSChristoph Hellwig 				" big sectors %u exceeds backend hw_max_sectors:"
987d6e0175cSChristoph Hellwig 				" %u\n", cdb[0], sectors,
9880fd97ccfSChristoph Hellwig 				dev->dev_attrib.hw_max_sectors);
989de103c93SChristoph Hellwig 			return TCM_INVALID_CDB_FIELD;
990d6e0175cSChristoph Hellwig 		}
991d6e0175cSChristoph Hellwig 
9921fd032eeSChristoph Hellwig 		end_lba = dev->transport->get_blocks(dev) + 1;
9931fd032eeSChristoph Hellwig 		if (cmd->t_task_lba + sectors > end_lba) {
9941fd032eeSChristoph Hellwig 			pr_err("cmd exceeds last lba %llu "
9951fd032eeSChristoph Hellwig 				"(lba %llu, sectors %u)\n",
9961fd032eeSChristoph Hellwig 				end_lba, cmd->t_task_lba, sectors);
99709ceadc7SRoland Dreier 			return TCM_ADDRESS_OUT_OF_RANGE;
998d6e0175cSChristoph Hellwig 		}
999d6e0175cSChristoph Hellwig 
100068ff9b9bSNicholas Bellinger 		if (!(cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE))
10011fd032eeSChristoph Hellwig 			size = sbc_get_size(cmd, sectors);
10021fd032eeSChristoph Hellwig 	}
10031fd032eeSChristoph Hellwig 
1004de103c93SChristoph Hellwig 	return target_cmd_size_check(cmd, size);
1005d6e0175cSChristoph Hellwig }
1006d6e0175cSChristoph Hellwig EXPORT_SYMBOL(sbc_parse_cdb);
10076f23ac8aSChristoph Hellwig 
10086f23ac8aSChristoph Hellwig u32 sbc_get_device_type(struct se_device *dev)
10096f23ac8aSChristoph Hellwig {
10106f23ac8aSChristoph Hellwig 	return TYPE_DISK;
10116f23ac8aSChristoph Hellwig }
10126f23ac8aSChristoph Hellwig EXPORT_SYMBOL(sbc_get_device_type);
101386d71829SAsias He 
101486d71829SAsias He sense_reason_t
101586d71829SAsias He sbc_execute_unmap(struct se_cmd *cmd,
101686d71829SAsias He 	sense_reason_t (*do_unmap_fn)(struct se_cmd *, void *,
101786d71829SAsias He 				      sector_t, sector_t),
101886d71829SAsias He 	void *priv)
101986d71829SAsias He {
102086d71829SAsias He 	struct se_device *dev = cmd->se_dev;
102186d71829SAsias He 	unsigned char *buf, *ptr = NULL;
102286d71829SAsias He 	sector_t lba;
102386d71829SAsias He 	int size;
102486d71829SAsias He 	u32 range;
102586d71829SAsias He 	sense_reason_t ret = 0;
102686d71829SAsias He 	int dl, bd_dl;
102786d71829SAsias He 
102886d71829SAsias He 	/* We never set ANC_SUP */
102986d71829SAsias He 	if (cmd->t_task_cdb[1])
103086d71829SAsias He 		return TCM_INVALID_CDB_FIELD;
103186d71829SAsias He 
103286d71829SAsias He 	if (cmd->data_length == 0) {
103386d71829SAsias He 		target_complete_cmd(cmd, SAM_STAT_GOOD);
103486d71829SAsias He 		return 0;
103586d71829SAsias He 	}
103686d71829SAsias He 
103786d71829SAsias He 	if (cmd->data_length < 8) {
103886d71829SAsias He 		pr_warn("UNMAP parameter list length %u too small\n",
103986d71829SAsias He 			cmd->data_length);
104086d71829SAsias He 		return TCM_PARAMETER_LIST_LENGTH_ERROR;
104186d71829SAsias He 	}
104286d71829SAsias He 
104386d71829SAsias He 	buf = transport_kmap_data_sg(cmd);
104486d71829SAsias He 	if (!buf)
104586d71829SAsias He 		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
104686d71829SAsias He 
104786d71829SAsias He 	dl = get_unaligned_be16(&buf[0]);
104886d71829SAsias He 	bd_dl = get_unaligned_be16(&buf[2]);
104986d71829SAsias He 
105086d71829SAsias He 	size = cmd->data_length - 8;
105186d71829SAsias He 	if (bd_dl > size)
105286d71829SAsias He 		pr_warn("UNMAP parameter list length %u too small, ignoring bd_dl %u\n",
105386d71829SAsias He 			cmd->data_length, bd_dl);
105486d71829SAsias He 	else
105586d71829SAsias He 		size = bd_dl;
105686d71829SAsias He 
105786d71829SAsias He 	if (size / 16 > dev->dev_attrib.max_unmap_block_desc_count) {
105886d71829SAsias He 		ret = TCM_INVALID_PARAMETER_LIST;
105986d71829SAsias He 		goto err;
106086d71829SAsias He 	}
106186d71829SAsias He 
106286d71829SAsias He 	/* First UNMAP block descriptor starts at 8 byte offset */
106386d71829SAsias He 	ptr = &buf[8];
106486d71829SAsias He 	pr_debug("UNMAP: Sub: %s Using dl: %u bd_dl: %u size: %u"
106586d71829SAsias He 		" ptr: %p\n", dev->transport->name, dl, bd_dl, size, ptr);
106686d71829SAsias He 
106786d71829SAsias He 	while (size >= 16) {
106886d71829SAsias He 		lba = get_unaligned_be64(&ptr[0]);
106986d71829SAsias He 		range = get_unaligned_be32(&ptr[8]);
107086d71829SAsias He 		pr_debug("UNMAP: Using lba: %llu and range: %u\n",
107186d71829SAsias He 				 (unsigned long long)lba, range);
107286d71829SAsias He 
107386d71829SAsias He 		if (range > dev->dev_attrib.max_unmap_lba_count) {
107486d71829SAsias He 			ret = TCM_INVALID_PARAMETER_LIST;
107586d71829SAsias He 			goto err;
107686d71829SAsias He 		}
107786d71829SAsias He 
107886d71829SAsias He 		if (lba + range > dev->transport->get_blocks(dev) + 1) {
107986d71829SAsias He 			ret = TCM_ADDRESS_OUT_OF_RANGE;
108086d71829SAsias He 			goto err;
108186d71829SAsias He 		}
108286d71829SAsias He 
108386d71829SAsias He 		ret = do_unmap_fn(cmd, priv, lba, range);
108486d71829SAsias He 		if (ret)
108586d71829SAsias He 			goto err;
108686d71829SAsias He 
108786d71829SAsias He 		ptr += 16;
108886d71829SAsias He 		size -= 16;
108986d71829SAsias He 	}
109086d71829SAsias He 
109186d71829SAsias He err:
109286d71829SAsias He 	transport_kunmap_data_sg(cmd);
109386d71829SAsias He 	if (!ret)
109486d71829SAsias He 		target_complete_cmd(cmd, GOOD);
109586d71829SAsias He 	return ret;
109686d71829SAsias He }
109786d71829SAsias He EXPORT_SYMBOL(sbc_execute_unmap);
109841861fa8SNicholas Bellinger 
109941861fa8SNicholas Bellinger static sense_reason_t
110041861fa8SNicholas Bellinger sbc_dif_v1_verify(struct se_device *dev, struct se_dif_v1_tuple *sdt,
110141861fa8SNicholas Bellinger 		  const void *p, sector_t sector, unsigned int ei_lba)
110241861fa8SNicholas Bellinger {
110341861fa8SNicholas Bellinger 	int block_size = dev->dev_attrib.block_size;
110441861fa8SNicholas Bellinger 	__be16 csum;
110541861fa8SNicholas Bellinger 
110641861fa8SNicholas Bellinger 	csum = cpu_to_be16(crc_t10dif(p, block_size));
110741861fa8SNicholas Bellinger 
110841861fa8SNicholas Bellinger 	if (sdt->guard_tag != csum) {
110941861fa8SNicholas Bellinger 		pr_err("DIFv1 checksum failed on sector %llu guard tag 0x%04x"
111041861fa8SNicholas Bellinger 			" csum 0x%04x\n", (unsigned long long)sector,
111141861fa8SNicholas Bellinger 			be16_to_cpu(sdt->guard_tag), be16_to_cpu(csum));
111241861fa8SNicholas Bellinger 		return TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED;
111341861fa8SNicholas Bellinger 	}
111441861fa8SNicholas Bellinger 
111541861fa8SNicholas Bellinger 	if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE1_PROT &&
111641861fa8SNicholas Bellinger 	    be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) {
111741861fa8SNicholas Bellinger 		pr_err("DIFv1 Type 1 reference failed on sector: %llu tag: 0x%08x"
111841861fa8SNicholas Bellinger 		       " sector MSB: 0x%08x\n", (unsigned long long)sector,
111941861fa8SNicholas Bellinger 		       be32_to_cpu(sdt->ref_tag), (u32)(sector & 0xffffffff));
112041861fa8SNicholas Bellinger 		return TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED;
112141861fa8SNicholas Bellinger 	}
112241861fa8SNicholas Bellinger 
112341861fa8SNicholas Bellinger 	if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE2_PROT &&
112441861fa8SNicholas Bellinger 	    be32_to_cpu(sdt->ref_tag) != ei_lba) {
112541861fa8SNicholas Bellinger 		pr_err("DIFv1 Type 2 reference failed on sector: %llu tag: 0x%08x"
112641861fa8SNicholas Bellinger 		       " ei_lba: 0x%08x\n", (unsigned long long)sector,
112741861fa8SNicholas Bellinger 			be32_to_cpu(sdt->ref_tag), ei_lba);
112841861fa8SNicholas Bellinger 		return TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED;
112941861fa8SNicholas Bellinger 	}
113041861fa8SNicholas Bellinger 
113141861fa8SNicholas Bellinger 	return 0;
113241861fa8SNicholas Bellinger }
113341861fa8SNicholas Bellinger 
113441861fa8SNicholas Bellinger static void
113541861fa8SNicholas Bellinger sbc_dif_copy_prot(struct se_cmd *cmd, unsigned int sectors, bool read,
113641861fa8SNicholas Bellinger 		  struct scatterlist *sg, int sg_off)
113741861fa8SNicholas Bellinger {
113841861fa8SNicholas Bellinger 	struct se_device *dev = cmd->se_dev;
113941861fa8SNicholas Bellinger 	struct scatterlist *psg;
114041861fa8SNicholas Bellinger 	void *paddr, *addr;
114141861fa8SNicholas Bellinger 	unsigned int i, len, left;
114210762e80SNicholas Bellinger 	unsigned int offset = sg_off;
114341861fa8SNicholas Bellinger 
114441861fa8SNicholas Bellinger 	left = sectors * dev->prot_length;
114541861fa8SNicholas Bellinger 
114641861fa8SNicholas Bellinger 	for_each_sg(cmd->t_prot_sg, psg, cmd->t_prot_nents, i) {
114716c0ae02SSagi Grimberg 		unsigned int psg_len, copied = 0;
114841861fa8SNicholas Bellinger 
114916c0ae02SSagi Grimberg 		paddr = kmap_atomic(sg_page(psg)) + psg->offset;
115016c0ae02SSagi Grimberg 		psg_len = min(left, psg->length);
115116c0ae02SSagi Grimberg 		while (psg_len) {
115216c0ae02SSagi Grimberg 			len = min(psg_len, sg->length - offset);
115316c0ae02SSagi Grimberg 			addr = kmap_atomic(sg_page(sg)) + sg->offset + offset;
115416c0ae02SSagi Grimberg 
115516c0ae02SSagi Grimberg 			if (read)
115616c0ae02SSagi Grimberg 				memcpy(paddr + copied, addr, len);
115716c0ae02SSagi Grimberg 			else
115816c0ae02SSagi Grimberg 				memcpy(addr, paddr + copied, len);
115916c0ae02SSagi Grimberg 
116016c0ae02SSagi Grimberg 			left -= len;
116116c0ae02SSagi Grimberg 			offset += len;
116216c0ae02SSagi Grimberg 			copied += len;
116316c0ae02SSagi Grimberg 			psg_len -= len;
116416c0ae02SSagi Grimberg 
1165d6a65fdcSSagi Grimberg 			if (offset >= sg->length) {
1166d6a65fdcSSagi Grimberg 				sg = sg_next(sg);
1167d6a65fdcSSagi Grimberg 				offset = 0;
1168d6a65fdcSSagi Grimberg 			}
116941861fa8SNicholas Bellinger 			kunmap_atomic(addr);
117041861fa8SNicholas Bellinger 		}
117116c0ae02SSagi Grimberg 		kunmap_atomic(paddr);
117216c0ae02SSagi Grimberg 	}
117341861fa8SNicholas Bellinger }
117441861fa8SNicholas Bellinger 
117541861fa8SNicholas Bellinger sense_reason_t
117641861fa8SNicholas Bellinger sbc_dif_verify_write(struct se_cmd *cmd, sector_t start, unsigned int sectors,
117741861fa8SNicholas Bellinger 		     unsigned int ei_lba, struct scatterlist *sg, int sg_off)
117841861fa8SNicholas Bellinger {
117941861fa8SNicholas Bellinger 	struct se_device *dev = cmd->se_dev;
118041861fa8SNicholas Bellinger 	struct se_dif_v1_tuple *sdt;
118141861fa8SNicholas Bellinger 	struct scatterlist *dsg, *psg = cmd->t_prot_sg;
118241861fa8SNicholas Bellinger 	sector_t sector = start;
118341861fa8SNicholas Bellinger 	void *daddr, *paddr;
118441861fa8SNicholas Bellinger 	int i, j, offset = 0;
118541861fa8SNicholas Bellinger 	sense_reason_t rc;
118641861fa8SNicholas Bellinger 
118741861fa8SNicholas Bellinger 	for_each_sg(cmd->t_data_sg, dsg, cmd->t_data_nents, i) {
118841861fa8SNicholas Bellinger 		daddr = kmap_atomic(sg_page(dsg)) + dsg->offset;
118941861fa8SNicholas Bellinger 		paddr = kmap_atomic(sg_page(psg)) + psg->offset;
119041861fa8SNicholas Bellinger 
119141861fa8SNicholas Bellinger 		for (j = 0; j < dsg->length; j += dev->dev_attrib.block_size) {
119241861fa8SNicholas Bellinger 
119341861fa8SNicholas Bellinger 			if (offset >= psg->length) {
119441861fa8SNicholas Bellinger 				kunmap_atomic(paddr);
119541861fa8SNicholas Bellinger 				psg = sg_next(psg);
119641861fa8SNicholas Bellinger 				paddr = kmap_atomic(sg_page(psg)) + psg->offset;
119741861fa8SNicholas Bellinger 				offset = 0;
119841861fa8SNicholas Bellinger 			}
119941861fa8SNicholas Bellinger 
120041861fa8SNicholas Bellinger 			sdt = paddr + offset;
120141861fa8SNicholas Bellinger 
120241861fa8SNicholas Bellinger 			pr_debug("DIF WRITE sector: %llu guard_tag: 0x%04x"
120341861fa8SNicholas Bellinger 				 " app_tag: 0x%04x ref_tag: %u\n",
120441861fa8SNicholas Bellinger 				 (unsigned long long)sector, sdt->guard_tag,
120541861fa8SNicholas Bellinger 				 sdt->app_tag, be32_to_cpu(sdt->ref_tag));
120641861fa8SNicholas Bellinger 
120741861fa8SNicholas Bellinger 			rc = sbc_dif_v1_verify(dev, sdt, daddr + j, sector,
120841861fa8SNicholas Bellinger 					       ei_lba);
120941861fa8SNicholas Bellinger 			if (rc) {
121041861fa8SNicholas Bellinger 				kunmap_atomic(paddr);
121141861fa8SNicholas Bellinger 				kunmap_atomic(daddr);
121276736db3SSagi Grimberg 				cmd->bad_sector = sector;
121341861fa8SNicholas Bellinger 				return rc;
121441861fa8SNicholas Bellinger 			}
121541861fa8SNicholas Bellinger 
121641861fa8SNicholas Bellinger 			sector++;
121741861fa8SNicholas Bellinger 			ei_lba++;
121841861fa8SNicholas Bellinger 			offset += sizeof(struct se_dif_v1_tuple);
121941861fa8SNicholas Bellinger 		}
122041861fa8SNicholas Bellinger 
122141861fa8SNicholas Bellinger 		kunmap_atomic(paddr);
122241861fa8SNicholas Bellinger 		kunmap_atomic(daddr);
122341861fa8SNicholas Bellinger 	}
122441861fa8SNicholas Bellinger 	sbc_dif_copy_prot(cmd, sectors, false, sg, sg_off);
122541861fa8SNicholas Bellinger 
122641861fa8SNicholas Bellinger 	return 0;
122741861fa8SNicholas Bellinger }
122841861fa8SNicholas Bellinger EXPORT_SYMBOL(sbc_dif_verify_write);
122941861fa8SNicholas Bellinger 
123041861fa8SNicholas Bellinger sense_reason_t
123141861fa8SNicholas Bellinger sbc_dif_verify_read(struct se_cmd *cmd, sector_t start, unsigned int sectors,
123241861fa8SNicholas Bellinger 		    unsigned int ei_lba, struct scatterlist *sg, int sg_off)
123341861fa8SNicholas Bellinger {
123441861fa8SNicholas Bellinger 	struct se_device *dev = cmd->se_dev;
123541861fa8SNicholas Bellinger 	struct se_dif_v1_tuple *sdt;
1236fc272ec7SSagi Grimberg 	struct scatterlist *dsg, *psg = sg;
123741861fa8SNicholas Bellinger 	sector_t sector = start;
123841861fa8SNicholas Bellinger 	void *daddr, *paddr;
123941861fa8SNicholas Bellinger 	int i, j, offset = sg_off;
124041861fa8SNicholas Bellinger 	sense_reason_t rc;
124141861fa8SNicholas Bellinger 
124241861fa8SNicholas Bellinger 	for_each_sg(cmd->t_data_sg, dsg, cmd->t_data_nents, i) {
124341861fa8SNicholas Bellinger 		daddr = kmap_atomic(sg_page(dsg)) + dsg->offset;
1244fc272ec7SSagi Grimberg 		paddr = kmap_atomic(sg_page(psg)) + sg->offset;
124541861fa8SNicholas Bellinger 
124641861fa8SNicholas Bellinger 		for (j = 0; j < dsg->length; j += dev->dev_attrib.block_size) {
124741861fa8SNicholas Bellinger 
1248fc272ec7SSagi Grimberg 			if (offset >= psg->length) {
124941861fa8SNicholas Bellinger 				kunmap_atomic(paddr);
1250fc272ec7SSagi Grimberg 				psg = sg_next(psg);
1251fc272ec7SSagi Grimberg 				paddr = kmap_atomic(sg_page(psg)) + psg->offset;
125241861fa8SNicholas Bellinger 				offset = 0;
125341861fa8SNicholas Bellinger 			}
125441861fa8SNicholas Bellinger 
125541861fa8SNicholas Bellinger 			sdt = paddr + offset;
125641861fa8SNicholas Bellinger 
125741861fa8SNicholas Bellinger 			pr_debug("DIF READ sector: %llu guard_tag: 0x%04x"
125841861fa8SNicholas Bellinger 				 " app_tag: 0x%04x ref_tag: %u\n",
125941861fa8SNicholas Bellinger 				 (unsigned long long)sector, sdt->guard_tag,
126041861fa8SNicholas Bellinger 				 sdt->app_tag, be32_to_cpu(sdt->ref_tag));
126141861fa8SNicholas Bellinger 
126241861fa8SNicholas Bellinger 			if (sdt->app_tag == cpu_to_be16(0xffff)) {
126341861fa8SNicholas Bellinger 				sector++;
126441861fa8SNicholas Bellinger 				offset += sizeof(struct se_dif_v1_tuple);
126541861fa8SNicholas Bellinger 				continue;
126641861fa8SNicholas Bellinger 			}
126741861fa8SNicholas Bellinger 
126841861fa8SNicholas Bellinger 			rc = sbc_dif_v1_verify(dev, sdt, daddr + j, sector,
126941861fa8SNicholas Bellinger 					       ei_lba);
127041861fa8SNicholas Bellinger 			if (rc) {
127141861fa8SNicholas Bellinger 				kunmap_atomic(paddr);
127241861fa8SNicholas Bellinger 				kunmap_atomic(daddr);
127376736db3SSagi Grimberg 				cmd->bad_sector = sector;
127441861fa8SNicholas Bellinger 				return rc;
127541861fa8SNicholas Bellinger 			}
127641861fa8SNicholas Bellinger 
127741861fa8SNicholas Bellinger 			sector++;
127841861fa8SNicholas Bellinger 			ei_lba++;
127941861fa8SNicholas Bellinger 			offset += sizeof(struct se_dif_v1_tuple);
128041861fa8SNicholas Bellinger 		}
128141861fa8SNicholas Bellinger 
128241861fa8SNicholas Bellinger 		kunmap_atomic(paddr);
128341861fa8SNicholas Bellinger 		kunmap_atomic(daddr);
128441861fa8SNicholas Bellinger 	}
128541861fa8SNicholas Bellinger 	sbc_dif_copy_prot(cmd, sectors, true, sg, sg_off);
128641861fa8SNicholas Bellinger 
128741861fa8SNicholas Bellinger 	return 0;
128841861fa8SNicholas Bellinger }
128941861fa8SNicholas Bellinger EXPORT_SYMBOL(sbc_dif_verify_read);
1290