1 /* 2 * sd_dif.c - SCSI Data Integrity Field 3 * 4 * Copyright (C) 2007, 2008 Oracle Corporation 5 * Written by: Martin K. Petersen <martin.petersen@oracle.com> 6 * 7 * This program is free software; you can redistribute it and/or 8 * modify it under the terms of the GNU General Public License version 9 * 2 as published by the Free Software Foundation. 10 * 11 * This program is distributed in the hope that it will be useful, but 12 * WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; see the file COPYING. If not, write to 18 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, 19 * USA. 20 * 21 */ 22 23 #include <linux/blkdev.h> 24 #include <linux/crc-t10dif.h> 25 26 #include <scsi/scsi.h> 27 #include <scsi/scsi_cmnd.h> 28 #include <scsi/scsi_dbg.h> 29 #include <scsi/scsi_device.h> 30 #include <scsi/scsi_driver.h> 31 #include <scsi/scsi_eh.h> 32 #include <scsi/scsi_host.h> 33 #include <scsi/scsi_ioctl.h> 34 #include <scsi/scsicam.h> 35 36 #include <net/checksum.h> 37 38 #include "sd.h" 39 40 typedef __u16 (csum_fn) (void *, unsigned int); 41 42 static __u16 sd_dif_crc_fn(void *data, unsigned int len) 43 { 44 return cpu_to_be16(crc_t10dif(data, len)); 45 } 46 47 static __u16 sd_dif_ip_fn(void *data, unsigned int len) 48 { 49 return ip_compute_csum(data, len); 50 } 51 52 /* 53 * Type 1 and Type 2 protection use the same format: 16 bit guard tag, 54 * 16 bit app tag, 32 bit reference tag. 55 */ 56 static void sd_dif_type1_generate(struct blk_integrity_exchg *bix, csum_fn *fn) 57 { 58 void *buf = bix->data_buf; 59 struct sd_dif_tuple *sdt = bix->prot_buf; 60 sector_t sector = bix->sector; 61 unsigned int i; 62 63 for (i = 0 ; i < bix->data_size ; i += bix->sector_size, sdt++) { 64 sdt->guard_tag = fn(buf, bix->sector_size); 65 sdt->ref_tag = cpu_to_be32(sector & 0xffffffff); 66 sdt->app_tag = 0; 67 68 buf += bix->sector_size; 69 sector++; 70 } 71 } 72 73 static void sd_dif_type1_generate_crc(struct blk_integrity_exchg *bix) 74 { 75 sd_dif_type1_generate(bix, sd_dif_crc_fn); 76 } 77 78 static void sd_dif_type1_generate_ip(struct blk_integrity_exchg *bix) 79 { 80 sd_dif_type1_generate(bix, sd_dif_ip_fn); 81 } 82 83 static int sd_dif_type1_verify(struct blk_integrity_exchg *bix, csum_fn *fn) 84 { 85 void *buf = bix->data_buf; 86 struct sd_dif_tuple *sdt = bix->prot_buf; 87 sector_t sector = bix->sector; 88 unsigned int i; 89 __u16 csum; 90 91 for (i = 0 ; i < bix->data_size ; i += bix->sector_size, sdt++) { 92 /* Unwritten sectors */ 93 if (sdt->app_tag == 0xffff) 94 return 0; 95 96 /* Bad ref tag received from disk */ 97 if (sdt->ref_tag == 0xffffffff) { 98 printk(KERN_ERR 99 "%s: bad phys ref tag on sector %lu\n", 100 bix->disk_name, (unsigned long)sector); 101 return -EIO; 102 } 103 104 if (be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) { 105 printk(KERN_ERR 106 "%s: ref tag error on sector %lu (rcvd %u)\n", 107 bix->disk_name, (unsigned long)sector, 108 be32_to_cpu(sdt->ref_tag)); 109 return -EIO; 110 } 111 112 csum = fn(buf, bix->sector_size); 113 114 if (sdt->guard_tag != csum) { 115 printk(KERN_ERR "%s: guard tag error on sector %lu " \ 116 "(rcvd %04x, data %04x)\n", bix->disk_name, 117 (unsigned long)sector, 118 be16_to_cpu(sdt->guard_tag), be16_to_cpu(csum)); 119 return -EIO; 120 } 121 122 buf += bix->sector_size; 123 sector++; 124 } 125 126 return 0; 127 } 128 129 static int sd_dif_type1_verify_crc(struct blk_integrity_exchg *bix) 130 { 131 return sd_dif_type1_verify(bix, sd_dif_crc_fn); 132 } 133 134 static int sd_dif_type1_verify_ip(struct blk_integrity_exchg *bix) 135 { 136 return sd_dif_type1_verify(bix, sd_dif_ip_fn); 137 } 138 139 /* 140 * Functions for interleaving and deinterleaving application tags 141 */ 142 static void sd_dif_type1_set_tag(void *prot, void *tag_buf, unsigned int sectors) 143 { 144 struct sd_dif_tuple *sdt = prot; 145 u8 *tag = tag_buf; 146 unsigned int i, j; 147 148 for (i = 0, j = 0 ; i < sectors ; i++, j += 2, sdt++) { 149 sdt->app_tag = tag[j] << 8 | tag[j+1]; 150 BUG_ON(sdt->app_tag == 0xffff); 151 } 152 } 153 154 static void sd_dif_type1_get_tag(void *prot, void *tag_buf, unsigned int sectors) 155 { 156 struct sd_dif_tuple *sdt = prot; 157 u8 *tag = tag_buf; 158 unsigned int i, j; 159 160 for (i = 0, j = 0 ; i < sectors ; i++, j += 2, sdt++) { 161 tag[j] = (sdt->app_tag & 0xff00) >> 8; 162 tag[j+1] = sdt->app_tag & 0xff; 163 } 164 } 165 166 static struct blk_integrity dif_type1_integrity_crc = { 167 .name = "T10-DIF-TYPE1-CRC", 168 .generate_fn = sd_dif_type1_generate_crc, 169 .verify_fn = sd_dif_type1_verify_crc, 170 .get_tag_fn = sd_dif_type1_get_tag, 171 .set_tag_fn = sd_dif_type1_set_tag, 172 .tuple_size = sizeof(struct sd_dif_tuple), 173 .tag_size = 0, 174 }; 175 176 static struct blk_integrity dif_type1_integrity_ip = { 177 .name = "T10-DIF-TYPE1-IP", 178 .generate_fn = sd_dif_type1_generate_ip, 179 .verify_fn = sd_dif_type1_verify_ip, 180 .get_tag_fn = sd_dif_type1_get_tag, 181 .set_tag_fn = sd_dif_type1_set_tag, 182 .tuple_size = sizeof(struct sd_dif_tuple), 183 .tag_size = 0, 184 }; 185 186 187 /* 188 * Type 3 protection has a 16-bit guard tag and 16 + 32 bits of opaque 189 * tag space. 190 */ 191 static void sd_dif_type3_generate(struct blk_integrity_exchg *bix, csum_fn *fn) 192 { 193 void *buf = bix->data_buf; 194 struct sd_dif_tuple *sdt = bix->prot_buf; 195 unsigned int i; 196 197 for (i = 0 ; i < bix->data_size ; i += bix->sector_size, sdt++) { 198 sdt->guard_tag = fn(buf, bix->sector_size); 199 sdt->ref_tag = 0; 200 sdt->app_tag = 0; 201 202 buf += bix->sector_size; 203 } 204 } 205 206 static void sd_dif_type3_generate_crc(struct blk_integrity_exchg *bix) 207 { 208 sd_dif_type3_generate(bix, sd_dif_crc_fn); 209 } 210 211 static void sd_dif_type3_generate_ip(struct blk_integrity_exchg *bix) 212 { 213 sd_dif_type3_generate(bix, sd_dif_ip_fn); 214 } 215 216 static int sd_dif_type3_verify(struct blk_integrity_exchg *bix, csum_fn *fn) 217 { 218 void *buf = bix->data_buf; 219 struct sd_dif_tuple *sdt = bix->prot_buf; 220 sector_t sector = bix->sector; 221 unsigned int i; 222 __u16 csum; 223 224 for (i = 0 ; i < bix->data_size ; i += bix->sector_size, sdt++) { 225 /* Unwritten sectors */ 226 if (sdt->app_tag == 0xffff && sdt->ref_tag == 0xffffffff) 227 return 0; 228 229 csum = fn(buf, bix->sector_size); 230 231 if (sdt->guard_tag != csum) { 232 printk(KERN_ERR "%s: guard tag error on sector %lu " \ 233 "(rcvd %04x, data %04x)\n", bix->disk_name, 234 (unsigned long)sector, 235 be16_to_cpu(sdt->guard_tag), be16_to_cpu(csum)); 236 return -EIO; 237 } 238 239 buf += bix->sector_size; 240 sector++; 241 } 242 243 return 0; 244 } 245 246 static int sd_dif_type3_verify_crc(struct blk_integrity_exchg *bix) 247 { 248 return sd_dif_type3_verify(bix, sd_dif_crc_fn); 249 } 250 251 static int sd_dif_type3_verify_ip(struct blk_integrity_exchg *bix) 252 { 253 return sd_dif_type3_verify(bix, sd_dif_ip_fn); 254 } 255 256 static void sd_dif_type3_set_tag(void *prot, void *tag_buf, unsigned int sectors) 257 { 258 struct sd_dif_tuple *sdt = prot; 259 u8 *tag = tag_buf; 260 unsigned int i, j; 261 262 for (i = 0, j = 0 ; i < sectors ; i++, j += 6, sdt++) { 263 sdt->app_tag = tag[j] << 8 | tag[j+1]; 264 sdt->ref_tag = tag[j+2] << 24 | tag[j+3] << 16 | 265 tag[j+4] << 8 | tag[j+5]; 266 } 267 } 268 269 static void sd_dif_type3_get_tag(void *prot, void *tag_buf, unsigned int sectors) 270 { 271 struct sd_dif_tuple *sdt = prot; 272 u8 *tag = tag_buf; 273 unsigned int i, j; 274 275 for (i = 0, j = 0 ; i < sectors ; i++, j += 2, sdt++) { 276 tag[j] = (sdt->app_tag & 0xff00) >> 8; 277 tag[j+1] = sdt->app_tag & 0xff; 278 tag[j+2] = (sdt->ref_tag & 0xff000000) >> 24; 279 tag[j+3] = (sdt->ref_tag & 0xff0000) >> 16; 280 tag[j+4] = (sdt->ref_tag & 0xff00) >> 8; 281 tag[j+5] = sdt->ref_tag & 0xff; 282 BUG_ON(sdt->app_tag == 0xffff || sdt->ref_tag == 0xffffffff); 283 } 284 } 285 286 static struct blk_integrity dif_type3_integrity_crc = { 287 .name = "T10-DIF-TYPE3-CRC", 288 .generate_fn = sd_dif_type3_generate_crc, 289 .verify_fn = sd_dif_type3_verify_crc, 290 .get_tag_fn = sd_dif_type3_get_tag, 291 .set_tag_fn = sd_dif_type3_set_tag, 292 .tuple_size = sizeof(struct sd_dif_tuple), 293 .tag_size = 0, 294 }; 295 296 static struct blk_integrity dif_type3_integrity_ip = { 297 .name = "T10-DIF-TYPE3-IP", 298 .generate_fn = sd_dif_type3_generate_ip, 299 .verify_fn = sd_dif_type3_verify_ip, 300 .get_tag_fn = sd_dif_type3_get_tag, 301 .set_tag_fn = sd_dif_type3_set_tag, 302 .tuple_size = sizeof(struct sd_dif_tuple), 303 .tag_size = 0, 304 }; 305 306 /* 307 * Configure exchange of protection information between OS and HBA. 308 */ 309 void sd_dif_config_host(struct scsi_disk *sdkp) 310 { 311 struct scsi_device *sdp = sdkp->device; 312 struct gendisk *disk = sdkp->disk; 313 u8 type = sdkp->protection_type; 314 int dif, dix; 315 316 dif = scsi_host_dif_capable(sdp->host, type); 317 dix = scsi_host_dix_capable(sdp->host, type); 318 319 if (!dix && scsi_host_dix_capable(sdp->host, 0)) { 320 dif = 0; dix = 1; 321 } 322 323 if (!dix) 324 return; 325 326 /* Enable DMA of protection information */ 327 if (scsi_host_get_guard(sdkp->device->host) & SHOST_DIX_GUARD_IP) 328 if (type == SD_DIF_TYPE3_PROTECTION) 329 blk_integrity_register(disk, &dif_type3_integrity_ip); 330 else 331 blk_integrity_register(disk, &dif_type1_integrity_ip); 332 else 333 if (type == SD_DIF_TYPE3_PROTECTION) 334 blk_integrity_register(disk, &dif_type3_integrity_crc); 335 else 336 blk_integrity_register(disk, &dif_type1_integrity_crc); 337 338 sd_printk(KERN_NOTICE, sdkp, 339 "Enabling DIX %s protection\n", disk->integrity->name); 340 341 /* Signal to block layer that we support sector tagging */ 342 if (dif && type && sdkp->ATO) { 343 if (type == SD_DIF_TYPE3_PROTECTION) 344 disk->integrity->tag_size = sizeof(u16) + sizeof(u32); 345 else 346 disk->integrity->tag_size = sizeof(u16); 347 348 sd_printk(KERN_NOTICE, sdkp, "DIF application tag size %u\n", 349 disk->integrity->tag_size); 350 } 351 } 352 353 /* 354 * The virtual start sector is the one that was originally submitted 355 * by the block layer. Due to partitioning, MD/DM cloning, etc. the 356 * actual physical start sector is likely to be different. Remap 357 * protection information to match the physical LBA. 358 * 359 * From a protocol perspective there's a slight difference between 360 * Type 1 and 2. The latter uses 32-byte CDBs exclusively, and the 361 * reference tag is seeded in the CDB. This gives us the potential to 362 * avoid virt->phys remapping during write. However, at read time we 363 * don't know whether the virt sector is the same as when we wrote it 364 * (we could be reading from real disk as opposed to MD/DM device. So 365 * we always remap Type 2 making it identical to Type 1. 366 * 367 * Type 3 does not have a reference tag so no remapping is required. 368 */ 369 int sd_dif_prepare(struct request *rq, sector_t hw_sector, unsigned int sector_sz) 370 { 371 const int tuple_sz = sizeof(struct sd_dif_tuple); 372 struct bio *bio; 373 struct scsi_disk *sdkp; 374 struct sd_dif_tuple *sdt; 375 unsigned int i, j; 376 u32 phys, virt; 377 378 /* Already remapped? */ 379 if (rq->cmd_flags & REQ_INTEGRITY) 380 return 0; 381 382 sdkp = rq->bio->bi_bdev->bd_disk->private_data; 383 384 if (sdkp->protection_type == SD_DIF_TYPE3_PROTECTION) 385 return 0; 386 387 rq->cmd_flags |= REQ_INTEGRITY; 388 phys = hw_sector & 0xffffffff; 389 390 __rq_for_each_bio(bio, rq) { 391 struct bio_vec *iv; 392 393 virt = bio->bi_integrity->bip_sector & 0xffffffff; 394 395 bip_for_each_vec(iv, bio->bi_integrity, i) { 396 sdt = kmap_atomic(iv->bv_page, KM_USER0) 397 + iv->bv_offset; 398 399 for (j = 0 ; j < iv->bv_len ; j += tuple_sz, sdt++) { 400 401 if (be32_to_cpu(sdt->ref_tag) != virt) 402 goto error; 403 404 sdt->ref_tag = cpu_to_be32(phys); 405 virt++; 406 phys++; 407 } 408 409 kunmap_atomic(sdt, KM_USER0); 410 } 411 } 412 413 return 0; 414 415 error: 416 kunmap_atomic(sdt, KM_USER0); 417 sd_printk(KERN_ERR, sdkp, "%s: virt %u, phys %u, ref %u, app %4x\n", 418 __func__, virt, phys, be32_to_cpu(sdt->ref_tag), 419 be16_to_cpu(sdt->app_tag)); 420 421 return -EILSEQ; 422 } 423 424 /* 425 * Remap physical sector values in the reference tag to the virtual 426 * values expected by the block layer. 427 */ 428 void sd_dif_complete(struct scsi_cmnd *scmd, unsigned int good_bytes) 429 { 430 const int tuple_sz = sizeof(struct sd_dif_tuple); 431 struct scsi_disk *sdkp; 432 struct bio *bio; 433 struct sd_dif_tuple *sdt; 434 unsigned int i, j, sectors, sector_sz; 435 u32 phys, virt; 436 437 sdkp = scsi_disk(scmd->request->rq_disk); 438 439 if (sdkp->protection_type == SD_DIF_TYPE3_PROTECTION || good_bytes == 0) 440 return; 441 442 sector_sz = scmd->device->sector_size; 443 sectors = good_bytes / sector_sz; 444 445 phys = blk_rq_pos(scmd->request) & 0xffffffff; 446 if (sector_sz == 4096) 447 phys >>= 3; 448 449 __rq_for_each_bio(bio, scmd->request) { 450 struct bio_vec *iv; 451 452 virt = bio->bi_integrity->bip_sector & 0xffffffff; 453 454 bip_for_each_vec(iv, bio->bi_integrity, i) { 455 sdt = kmap_atomic(iv->bv_page, KM_USER0) 456 + iv->bv_offset; 457 458 for (j = 0 ; j < iv->bv_len ; j += tuple_sz, sdt++) { 459 460 if (sectors == 0) { 461 kunmap_atomic(sdt, KM_USER0); 462 return; 463 } 464 465 if (be32_to_cpu(sdt->ref_tag) != phys && 466 sdt->app_tag != 0xffff) 467 sdt->ref_tag = 0xffffffff; /* Bad ref */ 468 else 469 sdt->ref_tag = cpu_to_be32(virt); 470 471 virt++; 472 phys++; 473 sectors--; 474 } 475 476 kunmap_atomic(sdt, KM_USER0); 477 } 478 } 479 } 480 481