1 /* 2 * blk-integrity.c - Block layer data integrity extensions 3 * 4 * Copyright (C) 2007, 2008 Oracle Corporation 5 * Written by: Martin K. Petersen <martin.petersen@oracle.com> 6 * 7 * This program is free software; you can redistribute it and/or 8 * modify it under the terms of the GNU General Public License version 9 * 2 as published by the Free Software Foundation. 10 * 11 * This program is distributed in the hope that it will be useful, but 12 * WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; see the file COPYING. If not, write to 18 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, 19 * USA. 20 * 21 */ 22 23 #include <linux/blkdev.h> 24 #include <linux/backing-dev.h> 25 #include <linux/mempool.h> 26 #include <linux/bio.h> 27 #include <linux/scatterlist.h> 28 #include <linux/export.h> 29 #include <linux/slab.h> 30 31 #include "blk.h" 32 33 static struct kmem_cache *integrity_cachep; 34 35 static const char *bi_unsupported_name = "unsupported"; 36 37 /** 38 * blk_rq_count_integrity_sg - Count number of integrity scatterlist elements 39 * @q: request queue 40 * @bio: bio with integrity metadata attached 41 * 42 * Description: Returns the number of elements required in a 43 * scatterlist corresponding to the integrity metadata in a bio. 44 */ 45 int blk_rq_count_integrity_sg(struct request_queue *q, struct bio *bio) 46 { 47 struct bio_vec iv, ivprv = { NULL }; 48 unsigned int segments = 0; 49 unsigned int seg_size = 0; 50 struct bvec_iter iter; 51 int prev = 0; 52 53 bio_for_each_integrity_vec(iv, bio, iter) { 54 55 if (prev) { 56 if (!BIOVEC_PHYS_MERGEABLE(&ivprv, &iv)) 57 goto new_segment; 58 59 if (!BIOVEC_SEG_BOUNDARY(q, &ivprv, &iv)) 60 goto new_segment; 61 62 if (seg_size + iv.bv_len > queue_max_segment_size(q)) 63 goto new_segment; 64 65 seg_size += iv.bv_len; 66 } else { 67 new_segment: 68 segments++; 69 seg_size = iv.bv_len; 70 } 71 72 prev = 1; 73 ivprv = iv; 74 } 75 76 return segments; 77 } 78 EXPORT_SYMBOL(blk_rq_count_integrity_sg); 79 80 /** 81 * blk_rq_map_integrity_sg - Map integrity metadata into a scatterlist 82 * @q: request queue 83 * @bio: bio with integrity metadata attached 84 * @sglist: target scatterlist 85 * 86 * Description: Map the integrity vectors in request into a 87 * scatterlist. The scatterlist must be big enough to hold all 88 * elements. I.e. sized using blk_rq_count_integrity_sg(). 89 */ 90 int blk_rq_map_integrity_sg(struct request_queue *q, struct bio *bio, 91 struct scatterlist *sglist) 92 { 93 struct bio_vec iv, ivprv = { NULL }; 94 struct scatterlist *sg = NULL; 95 unsigned int segments = 0; 96 struct bvec_iter iter; 97 int prev = 0; 98 99 bio_for_each_integrity_vec(iv, bio, iter) { 100 101 if (prev) { 102 if (!BIOVEC_PHYS_MERGEABLE(&ivprv, &iv)) 103 goto new_segment; 104 105 if (!BIOVEC_SEG_BOUNDARY(q, &ivprv, &iv)) 106 goto new_segment; 107 108 if (sg->length + iv.bv_len > queue_max_segment_size(q)) 109 goto new_segment; 110 111 sg->length += iv.bv_len; 112 } else { 113 new_segment: 114 if (!sg) 115 sg = sglist; 116 else { 117 sg_unmark_end(sg); 118 sg = sg_next(sg); 119 } 120 121 sg_set_page(sg, iv.bv_page, iv.bv_len, iv.bv_offset); 122 segments++; 123 } 124 125 prev = 1; 126 ivprv = iv; 127 } 128 129 if (sg) 130 sg_mark_end(sg); 131 132 return segments; 133 } 134 EXPORT_SYMBOL(blk_rq_map_integrity_sg); 135 136 /** 137 * blk_integrity_compare - Compare integrity profile of two disks 138 * @gd1: Disk to compare 139 * @gd2: Disk to compare 140 * 141 * Description: Meta-devices like DM and MD need to verify that all 142 * sub-devices use the same integrity format before advertising to 143 * upper layers that they can send/receive integrity metadata. This 144 * function can be used to check whether two gendisk devices have 145 * compatible integrity formats. 146 */ 147 int blk_integrity_compare(struct gendisk *gd1, struct gendisk *gd2) 148 { 149 struct blk_integrity *b1 = gd1->integrity; 150 struct blk_integrity *b2 = gd2->integrity; 151 152 if (!b1 && !b2) 153 return 0; 154 155 if (!b1 || !b2) 156 return -1; 157 158 if (b1->interval != b2->interval) { 159 pr_err("%s: %s/%s protection interval %u != %u\n", 160 __func__, gd1->disk_name, gd2->disk_name, 161 b1->interval, b2->interval); 162 return -1; 163 } 164 165 if (b1->tuple_size != b2->tuple_size) { 166 printk(KERN_ERR "%s: %s/%s tuple sz %u != %u\n", __func__, 167 gd1->disk_name, gd2->disk_name, 168 b1->tuple_size, b2->tuple_size); 169 return -1; 170 } 171 172 if (b1->tag_size && b2->tag_size && (b1->tag_size != b2->tag_size)) { 173 printk(KERN_ERR "%s: %s/%s tag sz %u != %u\n", __func__, 174 gd1->disk_name, gd2->disk_name, 175 b1->tag_size, b2->tag_size); 176 return -1; 177 } 178 179 if (strcmp(b1->name, b2->name)) { 180 printk(KERN_ERR "%s: %s/%s type %s != %s\n", __func__, 181 gd1->disk_name, gd2->disk_name, 182 b1->name, b2->name); 183 return -1; 184 } 185 186 return 0; 187 } 188 EXPORT_SYMBOL(blk_integrity_compare); 189 190 bool blk_integrity_merge_rq(struct request_queue *q, struct request *req, 191 struct request *next) 192 { 193 if (blk_integrity_rq(req) == 0 && blk_integrity_rq(next) == 0) 194 return true; 195 196 if (blk_integrity_rq(req) == 0 || blk_integrity_rq(next) == 0) 197 return false; 198 199 if (bio_integrity(req->bio)->bip_flags != 200 bio_integrity(next->bio)->bip_flags) 201 return false; 202 203 if (req->nr_integrity_segments + next->nr_integrity_segments > 204 q->limits.max_integrity_segments) 205 return false; 206 207 return true; 208 } 209 EXPORT_SYMBOL(blk_integrity_merge_rq); 210 211 bool blk_integrity_merge_bio(struct request_queue *q, struct request *req, 212 struct bio *bio) 213 { 214 int nr_integrity_segs; 215 struct bio *next = bio->bi_next; 216 217 if (blk_integrity_rq(req) == 0 && bio_integrity(bio) == NULL) 218 return true; 219 220 if (blk_integrity_rq(req) == 0 || bio_integrity(bio) == NULL) 221 return false; 222 223 if (bio_integrity(req->bio)->bip_flags != bio_integrity(bio)->bip_flags) 224 return false; 225 226 bio->bi_next = NULL; 227 nr_integrity_segs = blk_rq_count_integrity_sg(q, bio); 228 bio->bi_next = next; 229 230 if (req->nr_integrity_segments + nr_integrity_segs > 231 q->limits.max_integrity_segments) 232 return false; 233 234 req->nr_integrity_segments += nr_integrity_segs; 235 236 return true; 237 } 238 EXPORT_SYMBOL(blk_integrity_merge_bio); 239 240 struct integrity_sysfs_entry { 241 struct attribute attr; 242 ssize_t (*show)(struct blk_integrity *, char *); 243 ssize_t (*store)(struct blk_integrity *, const char *, size_t); 244 }; 245 246 static ssize_t integrity_attr_show(struct kobject *kobj, struct attribute *attr, 247 char *page) 248 { 249 struct blk_integrity *bi = 250 container_of(kobj, struct blk_integrity, kobj); 251 struct integrity_sysfs_entry *entry = 252 container_of(attr, struct integrity_sysfs_entry, attr); 253 254 return entry->show(bi, page); 255 } 256 257 static ssize_t integrity_attr_store(struct kobject *kobj, 258 struct attribute *attr, const char *page, 259 size_t count) 260 { 261 struct blk_integrity *bi = 262 container_of(kobj, struct blk_integrity, kobj); 263 struct integrity_sysfs_entry *entry = 264 container_of(attr, struct integrity_sysfs_entry, attr); 265 ssize_t ret = 0; 266 267 if (entry->store) 268 ret = entry->store(bi, page, count); 269 270 return ret; 271 } 272 273 static ssize_t integrity_format_show(struct blk_integrity *bi, char *page) 274 { 275 if (bi != NULL && bi->name != NULL) 276 return sprintf(page, "%s\n", bi->name); 277 else 278 return sprintf(page, "none\n"); 279 } 280 281 static ssize_t integrity_tag_size_show(struct blk_integrity *bi, char *page) 282 { 283 if (bi != NULL) 284 return sprintf(page, "%u\n", bi->tag_size); 285 else 286 return sprintf(page, "0\n"); 287 } 288 289 static ssize_t integrity_verify_store(struct blk_integrity *bi, 290 const char *page, size_t count) 291 { 292 char *p = (char *) page; 293 unsigned long val = simple_strtoul(p, &p, 10); 294 295 if (val) 296 bi->flags |= BLK_INTEGRITY_VERIFY; 297 else 298 bi->flags &= ~BLK_INTEGRITY_VERIFY; 299 300 return count; 301 } 302 303 static ssize_t integrity_verify_show(struct blk_integrity *bi, char *page) 304 { 305 return sprintf(page, "%d\n", (bi->flags & BLK_INTEGRITY_VERIFY) != 0); 306 } 307 308 static ssize_t integrity_generate_store(struct blk_integrity *bi, 309 const char *page, size_t count) 310 { 311 char *p = (char *) page; 312 unsigned long val = simple_strtoul(p, &p, 10); 313 314 if (val) 315 bi->flags |= BLK_INTEGRITY_GENERATE; 316 else 317 bi->flags &= ~BLK_INTEGRITY_GENERATE; 318 319 return count; 320 } 321 322 static ssize_t integrity_generate_show(struct blk_integrity *bi, char *page) 323 { 324 return sprintf(page, "%d\n", (bi->flags & BLK_INTEGRITY_GENERATE) != 0); 325 } 326 327 static ssize_t integrity_device_show(struct blk_integrity *bi, char *page) 328 { 329 return sprintf(page, "%u\n", 330 (bi->flags & BLK_INTEGRITY_DEVICE_CAPABLE) != 0); 331 } 332 333 static struct integrity_sysfs_entry integrity_format_entry = { 334 .attr = { .name = "format", .mode = S_IRUGO }, 335 .show = integrity_format_show, 336 }; 337 338 static struct integrity_sysfs_entry integrity_tag_size_entry = { 339 .attr = { .name = "tag_size", .mode = S_IRUGO }, 340 .show = integrity_tag_size_show, 341 }; 342 343 static struct integrity_sysfs_entry integrity_verify_entry = { 344 .attr = { .name = "read_verify", .mode = S_IRUGO | S_IWUSR }, 345 .show = integrity_verify_show, 346 .store = integrity_verify_store, 347 }; 348 349 static struct integrity_sysfs_entry integrity_generate_entry = { 350 .attr = { .name = "write_generate", .mode = S_IRUGO | S_IWUSR }, 351 .show = integrity_generate_show, 352 .store = integrity_generate_store, 353 }; 354 355 static struct integrity_sysfs_entry integrity_device_entry = { 356 .attr = { .name = "device_is_integrity_capable", .mode = S_IRUGO }, 357 .show = integrity_device_show, 358 }; 359 360 static struct attribute *integrity_attrs[] = { 361 &integrity_format_entry.attr, 362 &integrity_tag_size_entry.attr, 363 &integrity_verify_entry.attr, 364 &integrity_generate_entry.attr, 365 &integrity_device_entry.attr, 366 NULL, 367 }; 368 369 static const struct sysfs_ops integrity_ops = { 370 .show = &integrity_attr_show, 371 .store = &integrity_attr_store, 372 }; 373 374 static int __init blk_dev_integrity_init(void) 375 { 376 integrity_cachep = kmem_cache_create("blkdev_integrity", 377 sizeof(struct blk_integrity), 378 0, SLAB_PANIC, NULL); 379 return 0; 380 } 381 subsys_initcall(blk_dev_integrity_init); 382 383 static void blk_integrity_release(struct kobject *kobj) 384 { 385 struct blk_integrity *bi = 386 container_of(kobj, struct blk_integrity, kobj); 387 388 kmem_cache_free(integrity_cachep, bi); 389 } 390 391 static struct kobj_type integrity_ktype = { 392 .default_attrs = integrity_attrs, 393 .sysfs_ops = &integrity_ops, 394 .release = blk_integrity_release, 395 }; 396 397 bool blk_integrity_is_initialized(struct gendisk *disk) 398 { 399 struct blk_integrity *bi = blk_get_integrity(disk); 400 401 return (bi && bi->name && strcmp(bi->name, bi_unsupported_name) != 0); 402 } 403 EXPORT_SYMBOL(blk_integrity_is_initialized); 404 405 /** 406 * blk_integrity_register - Register a gendisk as being integrity-capable 407 * @disk: struct gendisk pointer to make integrity-aware 408 * @template: optional integrity profile to register 409 * 410 * Description: When a device needs to advertise itself as being able 411 * to send/receive integrity metadata it must use this function to 412 * register the capability with the block layer. The template is a 413 * blk_integrity struct with values appropriate for the underlying 414 * hardware. If template is NULL the new profile is allocated but 415 * not filled out. See Documentation/block/data-integrity.txt. 416 */ 417 int blk_integrity_register(struct gendisk *disk, struct blk_integrity *template) 418 { 419 struct blk_integrity *bi; 420 421 BUG_ON(disk == NULL); 422 423 if (disk->integrity == NULL) { 424 bi = kmem_cache_alloc(integrity_cachep, 425 GFP_KERNEL | __GFP_ZERO); 426 if (!bi) 427 return -1; 428 429 if (kobject_init_and_add(&bi->kobj, &integrity_ktype, 430 &disk_to_dev(disk)->kobj, 431 "%s", "integrity")) { 432 kmem_cache_free(integrity_cachep, bi); 433 return -1; 434 } 435 436 kobject_uevent(&bi->kobj, KOBJ_ADD); 437 438 bi->flags |= BLK_INTEGRITY_VERIFY | BLK_INTEGRITY_GENERATE; 439 bi->interval = queue_logical_block_size(disk->queue); 440 disk->integrity = bi; 441 } else 442 bi = disk->integrity; 443 444 /* Use the provided profile as template */ 445 if (template != NULL) { 446 bi->name = template->name; 447 bi->generate_fn = template->generate_fn; 448 bi->verify_fn = template->verify_fn; 449 bi->tuple_size = template->tuple_size; 450 bi->tag_size = template->tag_size; 451 bi->flags |= template->flags; 452 } else 453 bi->name = bi_unsupported_name; 454 455 disk->queue->backing_dev_info.capabilities |= BDI_CAP_STABLE_WRITES; 456 457 return 0; 458 } 459 EXPORT_SYMBOL(blk_integrity_register); 460 461 /** 462 * blk_integrity_unregister - Remove block integrity profile 463 * @disk: disk whose integrity profile to deallocate 464 * 465 * Description: This function frees all memory used by the block 466 * integrity profile. To be called at device teardown. 467 */ 468 void blk_integrity_unregister(struct gendisk *disk) 469 { 470 struct blk_integrity *bi; 471 472 if (!disk || !disk->integrity) 473 return; 474 475 disk->queue->backing_dev_info.capabilities &= ~BDI_CAP_STABLE_WRITES; 476 477 bi = disk->integrity; 478 479 kobject_uevent(&bi->kobj, KOBJ_REMOVE); 480 kobject_del(&bi->kobj); 481 kobject_put(&bi->kobj); 482 disk->integrity = NULL; 483 } 484 EXPORT_SYMBOL(blk_integrity_unregister); 485