xref: /openbmc/linux/drivers/scsi/sd_dif.c (revision f7018c21)
1 /*
2  * sd_dif.c - SCSI Data Integrity Field
3  *
4  * Copyright (C) 2007, 2008 Oracle Corporation
5  * Written by: Martin K. Petersen <martin.petersen@oracle.com>
6  *
7  * This program is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU General Public License version
9  * 2 as published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope that it will be useful, but
12  * WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; see the file COPYING.  If not, write to
18  * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
19  * USA.
20  *
21  */
22 
23 #include <linux/blkdev.h>
24 #include <linux/crc-t10dif.h>
25 
26 #include <scsi/scsi.h>
27 #include <scsi/scsi_cmnd.h>
28 #include <scsi/scsi_dbg.h>
29 #include <scsi/scsi_device.h>
30 #include <scsi/scsi_driver.h>
31 #include <scsi/scsi_eh.h>
32 #include <scsi/scsi_host.h>
33 #include <scsi/scsi_ioctl.h>
34 #include <scsi/scsicam.h>
35 
36 #include <net/checksum.h>
37 
38 #include "sd.h"
39 
40 typedef __u16 (csum_fn) (void *, unsigned int);
41 
42 static __u16 sd_dif_crc_fn(void *data, unsigned int len)
43 {
44 	return cpu_to_be16(crc_t10dif(data, len));
45 }
46 
47 static __u16 sd_dif_ip_fn(void *data, unsigned int len)
48 {
49 	return ip_compute_csum(data, len);
50 }
51 
52 /*
53  * Type 1 and Type 2 protection use the same format: 16 bit guard tag,
54  * 16 bit app tag, 32 bit reference tag.
55  */
56 static void sd_dif_type1_generate(struct blk_integrity_exchg *bix, csum_fn *fn)
57 {
58 	void *buf = bix->data_buf;
59 	struct sd_dif_tuple *sdt = bix->prot_buf;
60 	sector_t sector = bix->sector;
61 	unsigned int i;
62 
63 	for (i = 0 ; i < bix->data_size ; i += bix->sector_size, sdt++) {
64 		sdt->guard_tag = fn(buf, bix->sector_size);
65 		sdt->ref_tag = cpu_to_be32(sector & 0xffffffff);
66 		sdt->app_tag = 0;
67 
68 		buf += bix->sector_size;
69 		sector++;
70 	}
71 }
72 
73 static void sd_dif_type1_generate_crc(struct blk_integrity_exchg *bix)
74 {
75 	sd_dif_type1_generate(bix, sd_dif_crc_fn);
76 }
77 
78 static void sd_dif_type1_generate_ip(struct blk_integrity_exchg *bix)
79 {
80 	sd_dif_type1_generate(bix, sd_dif_ip_fn);
81 }
82 
83 static int sd_dif_type1_verify(struct blk_integrity_exchg *bix, csum_fn *fn)
84 {
85 	void *buf = bix->data_buf;
86 	struct sd_dif_tuple *sdt = bix->prot_buf;
87 	sector_t sector = bix->sector;
88 	unsigned int i;
89 	__u16 csum;
90 
91 	for (i = 0 ; i < bix->data_size ; i += bix->sector_size, sdt++) {
92 		/* Unwritten sectors */
93 		if (sdt->app_tag == 0xffff)
94 			return 0;
95 
96 		if (be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) {
97 			printk(KERN_ERR
98 			       "%s: ref tag error on sector %lu (rcvd %u)\n",
99 			       bix->disk_name, (unsigned long)sector,
100 			       be32_to_cpu(sdt->ref_tag));
101 			return -EIO;
102 		}
103 
104 		csum = fn(buf, bix->sector_size);
105 
106 		if (sdt->guard_tag != csum) {
107 			printk(KERN_ERR "%s: guard tag error on sector %lu " \
108 			       "(rcvd %04x, data %04x)\n", bix->disk_name,
109 			       (unsigned long)sector,
110 			       be16_to_cpu(sdt->guard_tag), be16_to_cpu(csum));
111 			return -EIO;
112 		}
113 
114 		buf += bix->sector_size;
115 		sector++;
116 	}
117 
118 	return 0;
119 }
120 
121 static int sd_dif_type1_verify_crc(struct blk_integrity_exchg *bix)
122 {
123 	return sd_dif_type1_verify(bix, sd_dif_crc_fn);
124 }
125 
126 static int sd_dif_type1_verify_ip(struct blk_integrity_exchg *bix)
127 {
128 	return sd_dif_type1_verify(bix, sd_dif_ip_fn);
129 }
130 
131 /*
132  * Functions for interleaving and deinterleaving application tags
133  */
134 static void sd_dif_type1_set_tag(void *prot, void *tag_buf, unsigned int sectors)
135 {
136 	struct sd_dif_tuple *sdt = prot;
137 	u8 *tag = tag_buf;
138 	unsigned int i, j;
139 
140 	for (i = 0, j = 0 ; i < sectors ; i++, j += 2, sdt++) {
141 		sdt->app_tag = tag[j] << 8 | tag[j+1];
142 		BUG_ON(sdt->app_tag == 0xffff);
143 	}
144 }
145 
146 static void sd_dif_type1_get_tag(void *prot, void *tag_buf, unsigned int sectors)
147 {
148 	struct sd_dif_tuple *sdt = prot;
149 	u8 *tag = tag_buf;
150 	unsigned int i, j;
151 
152 	for (i = 0, j = 0 ; i < sectors ; i++, j += 2, sdt++) {
153 		tag[j] = (sdt->app_tag & 0xff00) >> 8;
154 		tag[j+1] = sdt->app_tag & 0xff;
155 	}
156 }
157 
158 static struct blk_integrity dif_type1_integrity_crc = {
159 	.name			= "T10-DIF-TYPE1-CRC",
160 	.generate_fn		= sd_dif_type1_generate_crc,
161 	.verify_fn		= sd_dif_type1_verify_crc,
162 	.get_tag_fn		= sd_dif_type1_get_tag,
163 	.set_tag_fn		= sd_dif_type1_set_tag,
164 	.tuple_size		= sizeof(struct sd_dif_tuple),
165 	.tag_size		= 0,
166 };
167 
168 static struct blk_integrity dif_type1_integrity_ip = {
169 	.name			= "T10-DIF-TYPE1-IP",
170 	.generate_fn		= sd_dif_type1_generate_ip,
171 	.verify_fn		= sd_dif_type1_verify_ip,
172 	.get_tag_fn		= sd_dif_type1_get_tag,
173 	.set_tag_fn		= sd_dif_type1_set_tag,
174 	.tuple_size		= sizeof(struct sd_dif_tuple),
175 	.tag_size		= 0,
176 };
177 
178 
179 /*
180  * Type 3 protection has a 16-bit guard tag and 16 + 32 bits of opaque
181  * tag space.
182  */
183 static void sd_dif_type3_generate(struct blk_integrity_exchg *bix, csum_fn *fn)
184 {
185 	void *buf = bix->data_buf;
186 	struct sd_dif_tuple *sdt = bix->prot_buf;
187 	unsigned int i;
188 
189 	for (i = 0 ; i < bix->data_size ; i += bix->sector_size, sdt++) {
190 		sdt->guard_tag = fn(buf, bix->sector_size);
191 		sdt->ref_tag = 0;
192 		sdt->app_tag = 0;
193 
194 		buf += bix->sector_size;
195 	}
196 }
197 
198 static void sd_dif_type3_generate_crc(struct blk_integrity_exchg *bix)
199 {
200 	sd_dif_type3_generate(bix, sd_dif_crc_fn);
201 }
202 
203 static void sd_dif_type3_generate_ip(struct blk_integrity_exchg *bix)
204 {
205 	sd_dif_type3_generate(bix, sd_dif_ip_fn);
206 }
207 
208 static int sd_dif_type3_verify(struct blk_integrity_exchg *bix, csum_fn *fn)
209 {
210 	void *buf = bix->data_buf;
211 	struct sd_dif_tuple *sdt = bix->prot_buf;
212 	sector_t sector = bix->sector;
213 	unsigned int i;
214 	__u16 csum;
215 
216 	for (i = 0 ; i < bix->data_size ; i += bix->sector_size, sdt++) {
217 		/* Unwritten sectors */
218 		if (sdt->app_tag == 0xffff && sdt->ref_tag == 0xffffffff)
219 			return 0;
220 
221 		csum = fn(buf, bix->sector_size);
222 
223 		if (sdt->guard_tag != csum) {
224 			printk(KERN_ERR "%s: guard tag error on sector %lu " \
225 			       "(rcvd %04x, data %04x)\n", bix->disk_name,
226 			       (unsigned long)sector,
227 			       be16_to_cpu(sdt->guard_tag), be16_to_cpu(csum));
228 			return -EIO;
229 		}
230 
231 		buf += bix->sector_size;
232 		sector++;
233 	}
234 
235 	return 0;
236 }
237 
238 static int sd_dif_type3_verify_crc(struct blk_integrity_exchg *bix)
239 {
240 	return sd_dif_type3_verify(bix, sd_dif_crc_fn);
241 }
242 
243 static int sd_dif_type3_verify_ip(struct blk_integrity_exchg *bix)
244 {
245 	return sd_dif_type3_verify(bix, sd_dif_ip_fn);
246 }
247 
248 static void sd_dif_type3_set_tag(void *prot, void *tag_buf, unsigned int sectors)
249 {
250 	struct sd_dif_tuple *sdt = prot;
251 	u8 *tag = tag_buf;
252 	unsigned int i, j;
253 
254 	for (i = 0, j = 0 ; i < sectors ; i++, j += 6, sdt++) {
255 		sdt->app_tag = tag[j] << 8 | tag[j+1];
256 		sdt->ref_tag = tag[j+2] << 24 | tag[j+3] << 16 |
257 			tag[j+4] << 8 | tag[j+5];
258 	}
259 }
260 
261 static void sd_dif_type3_get_tag(void *prot, void *tag_buf, unsigned int sectors)
262 {
263 	struct sd_dif_tuple *sdt = prot;
264 	u8 *tag = tag_buf;
265 	unsigned int i, j;
266 
267 	for (i = 0, j = 0 ; i < sectors ; i++, j += 2, sdt++) {
268 		tag[j] = (sdt->app_tag & 0xff00) >> 8;
269 		tag[j+1] = sdt->app_tag & 0xff;
270 		tag[j+2] = (sdt->ref_tag & 0xff000000) >> 24;
271 		tag[j+3] = (sdt->ref_tag & 0xff0000) >> 16;
272 		tag[j+4] = (sdt->ref_tag & 0xff00) >> 8;
273 		tag[j+5] = sdt->ref_tag & 0xff;
274 		BUG_ON(sdt->app_tag == 0xffff || sdt->ref_tag == 0xffffffff);
275 	}
276 }
277 
278 static struct blk_integrity dif_type3_integrity_crc = {
279 	.name			= "T10-DIF-TYPE3-CRC",
280 	.generate_fn		= sd_dif_type3_generate_crc,
281 	.verify_fn		= sd_dif_type3_verify_crc,
282 	.get_tag_fn		= sd_dif_type3_get_tag,
283 	.set_tag_fn		= sd_dif_type3_set_tag,
284 	.tuple_size		= sizeof(struct sd_dif_tuple),
285 	.tag_size		= 0,
286 };
287 
288 static struct blk_integrity dif_type3_integrity_ip = {
289 	.name			= "T10-DIF-TYPE3-IP",
290 	.generate_fn		= sd_dif_type3_generate_ip,
291 	.verify_fn		= sd_dif_type3_verify_ip,
292 	.get_tag_fn		= sd_dif_type3_get_tag,
293 	.set_tag_fn		= sd_dif_type3_set_tag,
294 	.tuple_size		= sizeof(struct sd_dif_tuple),
295 	.tag_size		= 0,
296 };
297 
298 /*
299  * Configure exchange of protection information between OS and HBA.
300  */
301 void sd_dif_config_host(struct scsi_disk *sdkp)
302 {
303 	struct scsi_device *sdp = sdkp->device;
304 	struct gendisk *disk = sdkp->disk;
305 	u8 type = sdkp->protection_type;
306 	int dif, dix;
307 
308 	dif = scsi_host_dif_capable(sdp->host, type);
309 	dix = scsi_host_dix_capable(sdp->host, type);
310 
311 	if (!dix && scsi_host_dix_capable(sdp->host, 0)) {
312 		dif = 0; dix = 1;
313 	}
314 
315 	if (!dix)
316 		return;
317 
318 	/* Enable DMA of protection information */
319 	if (scsi_host_get_guard(sdkp->device->host) & SHOST_DIX_GUARD_IP)
320 		if (type == SD_DIF_TYPE3_PROTECTION)
321 			blk_integrity_register(disk, &dif_type3_integrity_ip);
322 		else
323 			blk_integrity_register(disk, &dif_type1_integrity_ip);
324 	else
325 		if (type == SD_DIF_TYPE3_PROTECTION)
326 			blk_integrity_register(disk, &dif_type3_integrity_crc);
327 		else
328 			blk_integrity_register(disk, &dif_type1_integrity_crc);
329 
330 	sd_printk(KERN_NOTICE, sdkp,
331 		  "Enabling DIX %s protection\n", disk->integrity->name);
332 
333 	/* Signal to block layer that we support sector tagging */
334 	if (dif && type && sdkp->ATO) {
335 		if (type == SD_DIF_TYPE3_PROTECTION)
336 			disk->integrity->tag_size = sizeof(u16) + sizeof(u32);
337 		else
338 			disk->integrity->tag_size = sizeof(u16);
339 
340 		sd_printk(KERN_NOTICE, sdkp, "DIF application tag size %u\n",
341 			  disk->integrity->tag_size);
342 	}
343 }
344 
345 /*
346  * The virtual start sector is the one that was originally submitted
347  * by the block layer.	Due to partitioning, MD/DM cloning, etc. the
348  * actual physical start sector is likely to be different.  Remap
349  * protection information to match the physical LBA.
350  *
351  * From a protocol perspective there's a slight difference between
352  * Type 1 and 2.  The latter uses 32-byte CDBs exclusively, and the
353  * reference tag is seeded in the CDB.  This gives us the potential to
354  * avoid virt->phys remapping during write.  However, at read time we
355  * don't know whether the virt sector is the same as when we wrote it
356  * (we could be reading from real disk as opposed to MD/DM device.  So
357  * we always remap Type 2 making it identical to Type 1.
358  *
359  * Type 3 does not have a reference tag so no remapping is required.
360  */
361 void sd_dif_prepare(struct request *rq, sector_t hw_sector,
362 		    unsigned int sector_sz)
363 {
364 	const int tuple_sz = sizeof(struct sd_dif_tuple);
365 	struct bio *bio;
366 	struct scsi_disk *sdkp;
367 	struct sd_dif_tuple *sdt;
368 	u32 phys, virt;
369 
370 	sdkp = rq->bio->bi_bdev->bd_disk->private_data;
371 
372 	if (sdkp->protection_type == SD_DIF_TYPE3_PROTECTION)
373 		return;
374 
375 	phys = hw_sector & 0xffffffff;
376 
377 	__rq_for_each_bio(bio, rq) {
378 		struct bio_vec iv;
379 		struct bvec_iter iter;
380 		unsigned int j;
381 
382 		/* Already remapped? */
383 		if (bio_flagged(bio, BIO_MAPPED_INTEGRITY))
384 			break;
385 
386 		virt = bio->bi_integrity->bip_iter.bi_sector & 0xffffffff;
387 
388 		bip_for_each_vec(iv, bio->bi_integrity, iter) {
389 			sdt = kmap_atomic(iv.bv_page)
390 				+ iv.bv_offset;
391 
392 			for (j = 0; j < iv.bv_len; j += tuple_sz, sdt++) {
393 
394 				if (be32_to_cpu(sdt->ref_tag) == virt)
395 					sdt->ref_tag = cpu_to_be32(phys);
396 
397 				virt++;
398 				phys++;
399 			}
400 
401 			kunmap_atomic(sdt);
402 		}
403 
404 		bio->bi_flags |= (1 << BIO_MAPPED_INTEGRITY);
405 	}
406 }
407 
408 /*
409  * Remap physical sector values in the reference tag to the virtual
410  * values expected by the block layer.
411  */
412 void sd_dif_complete(struct scsi_cmnd *scmd, unsigned int good_bytes)
413 {
414 	const int tuple_sz = sizeof(struct sd_dif_tuple);
415 	struct scsi_disk *sdkp;
416 	struct bio *bio;
417 	struct sd_dif_tuple *sdt;
418 	unsigned int j, sectors, sector_sz;
419 	u32 phys, virt;
420 
421 	sdkp = scsi_disk(scmd->request->rq_disk);
422 
423 	if (sdkp->protection_type == SD_DIF_TYPE3_PROTECTION || good_bytes == 0)
424 		return;
425 
426 	sector_sz = scmd->device->sector_size;
427 	sectors = good_bytes / sector_sz;
428 
429 	phys = blk_rq_pos(scmd->request) & 0xffffffff;
430 	if (sector_sz == 4096)
431 		phys >>= 3;
432 
433 	__rq_for_each_bio(bio, scmd->request) {
434 		struct bio_vec iv;
435 		struct bvec_iter iter;
436 
437 		virt = bio->bi_integrity->bip_iter.bi_sector & 0xffffffff;
438 
439 		bip_for_each_vec(iv, bio->bi_integrity, iter) {
440 			sdt = kmap_atomic(iv.bv_page)
441 				+ iv.bv_offset;
442 
443 			for (j = 0; j < iv.bv_len; j += tuple_sz, sdt++) {
444 
445 				if (sectors == 0) {
446 					kunmap_atomic(sdt);
447 					return;
448 				}
449 
450 				if (be32_to_cpu(sdt->ref_tag) == phys)
451 					sdt->ref_tag = cpu_to_be32(virt);
452 
453 				virt++;
454 				phys++;
455 				sectors--;
456 			}
457 
458 			kunmap_atomic(sdt);
459 		}
460 	}
461 }
462 
463