xref: /openbmc/linux/drivers/scsi/sd_dif.c (revision 3aec2f41)
1 /*
2  * sd_dif.c - SCSI Data Integrity Field
3  *
4  * Copyright (C) 2007, 2008 Oracle Corporation
5  * Written by: Martin K. Petersen <martin.petersen@oracle.com>
6  *
7  * This program is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU General Public License version
9  * 2 as published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope that it will be useful, but
12  * WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; see the file COPYING.  If not, write to
18  * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
19  * USA.
20  *
21  */
22 
23 #include <linux/blkdev.h>
24 #include <linux/crc-t10dif.h>
25 
26 #include <scsi/scsi.h>
27 #include <scsi/scsi_cmnd.h>
28 #include <scsi/scsi_dbg.h>
29 #include <scsi/scsi_device.h>
30 #include <scsi/scsi_driver.h>
31 #include <scsi/scsi_eh.h>
32 #include <scsi/scsi_host.h>
33 #include <scsi/scsi_ioctl.h>
34 #include <scsi/scsicam.h>
35 
36 #include <net/checksum.h>
37 
38 #include "sd.h"
39 
40 typedef __u16 (csum_fn) (void *, unsigned int);
41 
42 static __u16 sd_dif_crc_fn(void *data, unsigned int len)
43 {
44 	return cpu_to_be16(crc_t10dif(data, len));
45 }
46 
47 static __u16 sd_dif_ip_fn(void *data, unsigned int len)
48 {
49 	return ip_compute_csum(data, len);
50 }
51 
52 /*
53  * Type 1 and Type 2 protection use the same format: 16 bit guard tag,
54  * 16 bit app tag, 32 bit reference tag.
55  */
56 static void sd_dif_type1_generate(struct blk_integrity_iter *iter, csum_fn *fn)
57 {
58 	void *buf = iter->data_buf;
59 	struct sd_dif_tuple *sdt = iter->prot_buf;
60 	sector_t seed = iter->seed;
61 	unsigned int i;
62 
63 	for (i = 0 ; i < iter->data_size ; i += iter->interval, sdt++) {
64 		sdt->guard_tag = fn(buf, iter->interval);
65 		sdt->ref_tag = cpu_to_be32(seed & 0xffffffff);
66 		sdt->app_tag = 0;
67 
68 		buf += iter->interval;
69 		seed++;
70 	}
71 }
72 
73 static int sd_dif_type1_generate_crc(struct blk_integrity_iter *iter)
74 {
75 	sd_dif_type1_generate(iter, sd_dif_crc_fn);
76 	return 0;
77 }
78 
79 static int sd_dif_type1_generate_ip(struct blk_integrity_iter *iter)
80 {
81 	sd_dif_type1_generate(iter, sd_dif_ip_fn);
82 	return 0;
83 }
84 
85 static int sd_dif_type1_verify(struct blk_integrity_iter *iter, csum_fn *fn)
86 {
87 	void *buf = iter->data_buf;
88 	struct sd_dif_tuple *sdt = iter->prot_buf;
89 	sector_t seed = iter->seed;
90 	unsigned int i;
91 	__u16 csum;
92 
93 	for (i = 0 ; i < iter->data_size ; i += iter->interval, sdt++) {
94 		/* Unwritten sectors */
95 		if (sdt->app_tag == 0xffff)
96 			return 0;
97 
98 		if (be32_to_cpu(sdt->ref_tag) != (seed & 0xffffffff)) {
99 			printk(KERN_ERR
100 			       "%s: ref tag error on sector %lu (rcvd %u)\n",
101 			       iter->disk_name, (unsigned long)seed,
102 			       be32_to_cpu(sdt->ref_tag));
103 			return -EIO;
104 		}
105 
106 		csum = fn(buf, iter->interval);
107 
108 		if (sdt->guard_tag != csum) {
109 			printk(KERN_ERR "%s: guard tag error on sector %lu " \
110 			       "(rcvd %04x, data %04x)\n", iter->disk_name,
111 			       (unsigned long)seed,
112 			       be16_to_cpu(sdt->guard_tag), be16_to_cpu(csum));
113 			return -EIO;
114 		}
115 
116 		buf += iter->interval;
117 		seed++;
118 	}
119 
120 	return 0;
121 }
122 
123 static int sd_dif_type1_verify_crc(struct blk_integrity_iter *iter)
124 {
125 	return sd_dif_type1_verify(iter, sd_dif_crc_fn);
126 }
127 
128 static int sd_dif_type1_verify_ip(struct blk_integrity_iter *iter)
129 {
130 	return sd_dif_type1_verify(iter, sd_dif_ip_fn);
131 }
132 
133 static struct blk_integrity dif_type1_integrity_crc = {
134 	.name			= "T10-DIF-TYPE1-CRC",
135 	.generate_fn		= sd_dif_type1_generate_crc,
136 	.verify_fn		= sd_dif_type1_verify_crc,
137 	.tuple_size		= sizeof(struct sd_dif_tuple),
138 	.tag_size		= 0,
139 };
140 
141 static struct blk_integrity dif_type1_integrity_ip = {
142 	.name			= "T10-DIF-TYPE1-IP",
143 	.generate_fn		= sd_dif_type1_generate_ip,
144 	.verify_fn		= sd_dif_type1_verify_ip,
145 	.tuple_size		= sizeof(struct sd_dif_tuple),
146 	.tag_size		= 0,
147 };
148 
149 
150 /*
151  * Type 3 protection has a 16-bit guard tag and 16 + 32 bits of opaque
152  * tag space.
153  */
154 static void sd_dif_type3_generate(struct blk_integrity_iter *iter, csum_fn *fn)
155 {
156 	void *buf = iter->data_buf;
157 	struct sd_dif_tuple *sdt = iter->prot_buf;
158 	unsigned int i;
159 
160 	for (i = 0 ; i < iter->data_size ; i += iter->interval, sdt++) {
161 		sdt->guard_tag = fn(buf, iter->interval);
162 		sdt->ref_tag = 0;
163 		sdt->app_tag = 0;
164 
165 		buf += iter->interval;
166 	}
167 }
168 
169 static int sd_dif_type3_generate_crc(struct blk_integrity_iter *iter)
170 {
171 	sd_dif_type3_generate(iter, sd_dif_crc_fn);
172 	return 0;
173 }
174 
175 static int sd_dif_type3_generate_ip(struct blk_integrity_iter *iter)
176 {
177 	sd_dif_type3_generate(iter, sd_dif_ip_fn);
178 	return 0;
179 }
180 
181 static int sd_dif_type3_verify(struct blk_integrity_iter *iter, csum_fn *fn)
182 {
183 	void *buf = iter->data_buf;
184 	struct sd_dif_tuple *sdt = iter->prot_buf;
185 	sector_t seed = iter->seed;
186 	unsigned int i;
187 	__u16 csum;
188 
189 	for (i = 0 ; i < iter->data_size ; i += iter->interval, sdt++) {
190 		/* Unwritten sectors */
191 		if (sdt->app_tag == 0xffff && sdt->ref_tag == 0xffffffff)
192 			return 0;
193 
194 		csum = fn(buf, iter->interval);
195 
196 		if (sdt->guard_tag != csum) {
197 			printk(KERN_ERR "%s: guard tag error on sector %lu " \
198 			       "(rcvd %04x, data %04x)\n", iter->disk_name,
199 			       (unsigned long)seed,
200 			       be16_to_cpu(sdt->guard_tag), be16_to_cpu(csum));
201 			return -EIO;
202 		}
203 
204 		buf += iter->interval;
205 		seed++;
206 	}
207 
208 	return 0;
209 }
210 
211 static int sd_dif_type3_verify_crc(struct blk_integrity_iter *iter)
212 {
213 	return sd_dif_type3_verify(iter, sd_dif_crc_fn);
214 }
215 
216 static int sd_dif_type3_verify_ip(struct blk_integrity_iter *iter)
217 {
218 	return sd_dif_type3_verify(iter, sd_dif_ip_fn);
219 }
220 
221 static struct blk_integrity dif_type3_integrity_crc = {
222 	.name			= "T10-DIF-TYPE3-CRC",
223 	.generate_fn		= sd_dif_type3_generate_crc,
224 	.verify_fn		= sd_dif_type3_verify_crc,
225 	.tuple_size		= sizeof(struct sd_dif_tuple),
226 	.tag_size		= 0,
227 };
228 
229 static struct blk_integrity dif_type3_integrity_ip = {
230 	.name			= "T10-DIF-TYPE3-IP",
231 	.generate_fn		= sd_dif_type3_generate_ip,
232 	.verify_fn		= sd_dif_type3_verify_ip,
233 	.tuple_size		= sizeof(struct sd_dif_tuple),
234 	.tag_size		= 0,
235 };
236 
237 /*
238  * Configure exchange of protection information between OS and HBA.
239  */
240 void sd_dif_config_host(struct scsi_disk *sdkp)
241 {
242 	struct scsi_device *sdp = sdkp->device;
243 	struct gendisk *disk = sdkp->disk;
244 	u8 type = sdkp->protection_type;
245 	int dif, dix;
246 
247 	dif = scsi_host_dif_capable(sdp->host, type);
248 	dix = scsi_host_dix_capable(sdp->host, type);
249 
250 	if (!dix && scsi_host_dix_capable(sdp->host, 0)) {
251 		dif = 0; dix = 1;
252 	}
253 
254 	if (!dix)
255 		return;
256 
257 	/* Enable DMA of protection information */
258 	if (scsi_host_get_guard(sdkp->device->host) & SHOST_DIX_GUARD_IP)
259 		if (type == SD_DIF_TYPE3_PROTECTION)
260 			blk_integrity_register(disk, &dif_type3_integrity_ip);
261 		else
262 			blk_integrity_register(disk, &dif_type1_integrity_ip);
263 	else
264 		if (type == SD_DIF_TYPE3_PROTECTION)
265 			blk_integrity_register(disk, &dif_type3_integrity_crc);
266 		else
267 			blk_integrity_register(disk, &dif_type1_integrity_crc);
268 
269 	sd_printk(KERN_NOTICE, sdkp,
270 		  "Enabling DIX %s protection\n", disk->integrity->name);
271 
272 	/* Signal to block layer that we support sector tagging */
273 	if (dif && type) {
274 
275 		disk->integrity->flags |= BLK_INTEGRITY_DEVICE_CAPABLE;
276 
277 		if (!sdkp)
278 			return;
279 
280 		if (type == SD_DIF_TYPE3_PROTECTION)
281 			disk->integrity->tag_size = sizeof(u16) + sizeof(u32);
282 		else
283 			disk->integrity->tag_size = sizeof(u16);
284 
285 		sd_printk(KERN_NOTICE, sdkp, "DIF application tag size %u\n",
286 			  disk->integrity->tag_size);
287 	}
288 }
289 
290 /*
291  * The virtual start sector is the one that was originally submitted
292  * by the block layer.	Due to partitioning, MD/DM cloning, etc. the
293  * actual physical start sector is likely to be different.  Remap
294  * protection information to match the physical LBA.
295  *
296  * From a protocol perspective there's a slight difference between
297  * Type 1 and 2.  The latter uses 32-byte CDBs exclusively, and the
298  * reference tag is seeded in the CDB.  This gives us the potential to
299  * avoid virt->phys remapping during write.  However, at read time we
300  * don't know whether the virt sector is the same as when we wrote it
301  * (we could be reading from real disk as opposed to MD/DM device.  So
302  * we always remap Type 2 making it identical to Type 1.
303  *
304  * Type 3 does not have a reference tag so no remapping is required.
305  */
306 void sd_dif_prepare(struct request *rq, sector_t hw_sector,
307 		    unsigned int sector_sz)
308 {
309 	const int tuple_sz = sizeof(struct sd_dif_tuple);
310 	struct bio *bio;
311 	struct scsi_disk *sdkp;
312 	struct sd_dif_tuple *sdt;
313 	u32 phys, virt;
314 
315 	sdkp = rq->bio->bi_bdev->bd_disk->private_data;
316 
317 	if (sdkp->protection_type == SD_DIF_TYPE3_PROTECTION)
318 		return;
319 
320 	phys = hw_sector & 0xffffffff;
321 
322 	__rq_for_each_bio(bio, rq) {
323 		struct bio_integrity_payload *bip = bio_integrity(bio);
324 		struct bio_vec iv;
325 		struct bvec_iter iter;
326 		unsigned int j;
327 
328 		/* Already remapped? */
329 		if (bio_flagged(bio, BIO_MAPPED_INTEGRITY))
330 			break;
331 
332 		virt = bip_get_seed(bip) & 0xffffffff;
333 
334 		bip_for_each_vec(iv, bip, iter) {
335 			sdt = kmap_atomic(iv.bv_page)
336 				+ iv.bv_offset;
337 
338 			for (j = 0; j < iv.bv_len; j += tuple_sz, sdt++) {
339 
340 				if (be32_to_cpu(sdt->ref_tag) == virt)
341 					sdt->ref_tag = cpu_to_be32(phys);
342 
343 				virt++;
344 				phys++;
345 			}
346 
347 			kunmap_atomic(sdt);
348 		}
349 
350 		bio->bi_flags |= (1 << BIO_MAPPED_INTEGRITY);
351 	}
352 }
353 
354 /*
355  * Remap physical sector values in the reference tag to the virtual
356  * values expected by the block layer.
357  */
358 void sd_dif_complete(struct scsi_cmnd *scmd, unsigned int good_bytes)
359 {
360 	const int tuple_sz = sizeof(struct sd_dif_tuple);
361 	struct scsi_disk *sdkp;
362 	struct bio *bio;
363 	struct sd_dif_tuple *sdt;
364 	unsigned int j, sectors, sector_sz;
365 	u32 phys, virt;
366 
367 	sdkp = scsi_disk(scmd->request->rq_disk);
368 
369 	if (sdkp->protection_type == SD_DIF_TYPE3_PROTECTION || good_bytes == 0)
370 		return;
371 
372 	sector_sz = scmd->device->sector_size;
373 	sectors = good_bytes / sector_sz;
374 
375 	phys = blk_rq_pos(scmd->request) & 0xffffffff;
376 	if (sector_sz == 4096)
377 		phys >>= 3;
378 
379 	__rq_for_each_bio(bio, scmd->request) {
380 		struct bio_integrity_payload *bip = bio_integrity(bio);
381 		struct bio_vec iv;
382 		struct bvec_iter iter;
383 
384 		virt = bip_get_seed(bip) & 0xffffffff;
385 
386 		bip_for_each_vec(iv, bip, iter) {
387 			sdt = kmap_atomic(iv.bv_page)
388 				+ iv.bv_offset;
389 
390 			for (j = 0; j < iv.bv_len; j += tuple_sz, sdt++) {
391 
392 				if (sectors == 0) {
393 					kunmap_atomic(sdt);
394 					return;
395 				}
396 
397 				if (be32_to_cpu(sdt->ref_tag) == phys)
398 					sdt->ref_tag = cpu_to_be32(virt);
399 
400 				virt++;
401 				phys++;
402 				sectors--;
403 			}
404 
405 			kunmap_atomic(sdt);
406 		}
407 	}
408 }
409 
410