1 /* 2 * bcache setup/teardown code, and some metadata io - read a superblock and 3 * figure out what to do with it. 4 * 5 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com> 6 * Copyright 2012 Google, Inc. 7 */ 8 9 #include "bcache.h" 10 #include "btree.h" 11 #include "debug.h" 12 #include "extents.h" 13 #include "request.h" 14 #include "writeback.h" 15 16 #include <linux/blkdev.h> 17 #include <linux/buffer_head.h> 18 #include <linux/debugfs.h> 19 #include <linux/genhd.h> 20 #include <linux/idr.h> 21 #include <linux/kthread.h> 22 #include <linux/module.h> 23 #include <linux/random.h> 24 #include <linux/reboot.h> 25 #include <linux/sysfs.h> 26 27 MODULE_LICENSE("GPL"); 28 MODULE_AUTHOR("Kent Overstreet <kent.overstreet@gmail.com>"); 29 30 static const char bcache_magic[] = { 31 0xc6, 0x85, 0x73, 0xf6, 0x4e, 0x1a, 0x45, 0xca, 32 0x82, 0x65, 0xf5, 0x7f, 0x48, 0xba, 0x6d, 0x81 33 }; 34 35 static const char invalid_uuid[] = { 36 0xa0, 0x3e, 0xf8, 0xed, 0x3e, 0xe1, 0xb8, 0x78, 37 0xc8, 0x50, 0xfc, 0x5e, 0xcb, 0x16, 0xcd, 0x99 38 }; 39 40 /* Default is -1; we skip past it for struct cached_dev's cache mode */ 41 const char * const bch_cache_modes[] = { 42 "default", 43 "writethrough", 44 "writeback", 45 "writearound", 46 "none", 47 NULL 48 }; 49 50 static struct kobject *bcache_kobj; 51 struct mutex bch_register_lock; 52 LIST_HEAD(bch_cache_sets); 53 static LIST_HEAD(uncached_devices); 54 55 static int bcache_major; 56 static DEFINE_IDA(bcache_minor); 57 static wait_queue_head_t unregister_wait; 58 struct workqueue_struct *bcache_wq; 59 60 #define BTREE_MAX_PAGES (256 * 1024 / PAGE_SIZE) 61 62 static void bio_split_pool_free(struct bio_split_pool *p) 63 { 64 if (p->bio_split_hook) 65 mempool_destroy(p->bio_split_hook); 66 67 if (p->bio_split) 68 bioset_free(p->bio_split); 69 } 70 71 static int bio_split_pool_init(struct bio_split_pool *p) 72 { 73 p->bio_split = bioset_create(4, 0); 74 if (!p->bio_split) 75 return -ENOMEM; 76 77 p->bio_split_hook = mempool_create_kmalloc_pool(4, 78 sizeof(struct bio_split_hook)); 79 if (!p->bio_split_hook) 80 return -ENOMEM; 81 82 return 0; 83 } 84 85 /* Superblock */ 86 87 static const char *read_super(struct cache_sb *sb, struct block_device *bdev, 88 struct page **res) 89 { 90 const char *err; 91 struct cache_sb *s; 92 struct buffer_head *bh = __bread(bdev, 1, SB_SIZE); 93 unsigned i; 94 95 if (!bh) 96 return "IO error"; 97 98 s = (struct cache_sb *) bh->b_data; 99 100 sb->offset = le64_to_cpu(s->offset); 101 sb->version = le64_to_cpu(s->version); 102 103 memcpy(sb->magic, s->magic, 16); 104 memcpy(sb->uuid, s->uuid, 16); 105 memcpy(sb->set_uuid, s->set_uuid, 16); 106 memcpy(sb->label, s->label, SB_LABEL_SIZE); 107 108 sb->flags = le64_to_cpu(s->flags); 109 sb->seq = le64_to_cpu(s->seq); 110 sb->last_mount = le32_to_cpu(s->last_mount); 111 sb->first_bucket = le16_to_cpu(s->first_bucket); 112 sb->keys = le16_to_cpu(s->keys); 113 114 for (i = 0; i < SB_JOURNAL_BUCKETS; i++) 115 sb->d[i] = le64_to_cpu(s->d[i]); 116 117 pr_debug("read sb version %llu, flags %llu, seq %llu, journal size %u", 118 sb->version, sb->flags, sb->seq, sb->keys); 119 120 err = "Not a bcache superblock"; 121 if (sb->offset != SB_SECTOR) 122 goto err; 123 124 if (memcmp(sb->magic, bcache_magic, 16)) 125 goto err; 126 127 err = "Too many journal buckets"; 128 if (sb->keys > SB_JOURNAL_BUCKETS) 129 goto err; 130 131 err = "Bad checksum"; 132 if (s->csum != csum_set(s)) 133 goto err; 134 135 err = "Bad UUID"; 136 if (bch_is_zero(sb->uuid, 16)) 137 goto err; 138 139 sb->block_size = le16_to_cpu(s->block_size); 140 141 err = "Superblock block size smaller than device block size"; 142 if (sb->block_size << 9 < bdev_logical_block_size(bdev)) 143 goto err; 144 145 switch (sb->version) { 146 case BCACHE_SB_VERSION_BDEV: 147 sb->data_offset = BDEV_DATA_START_DEFAULT; 148 break; 149 case BCACHE_SB_VERSION_BDEV_WITH_OFFSET: 150 sb->data_offset = le64_to_cpu(s->data_offset); 151 152 err = "Bad data offset"; 153 if (sb->data_offset < BDEV_DATA_START_DEFAULT) 154 goto err; 155 156 break; 157 case BCACHE_SB_VERSION_CDEV: 158 case BCACHE_SB_VERSION_CDEV_WITH_UUID: 159 sb->nbuckets = le64_to_cpu(s->nbuckets); 160 sb->block_size = le16_to_cpu(s->block_size); 161 sb->bucket_size = le16_to_cpu(s->bucket_size); 162 163 sb->nr_in_set = le16_to_cpu(s->nr_in_set); 164 sb->nr_this_dev = le16_to_cpu(s->nr_this_dev); 165 166 err = "Too many buckets"; 167 if (sb->nbuckets > LONG_MAX) 168 goto err; 169 170 err = "Not enough buckets"; 171 if (sb->nbuckets < 1 << 7) 172 goto err; 173 174 err = "Bad block/bucket size"; 175 if (!is_power_of_2(sb->block_size) || 176 sb->block_size > PAGE_SECTORS || 177 !is_power_of_2(sb->bucket_size) || 178 sb->bucket_size < PAGE_SECTORS) 179 goto err; 180 181 err = "Invalid superblock: device too small"; 182 if (get_capacity(bdev->bd_disk) < sb->bucket_size * sb->nbuckets) 183 goto err; 184 185 err = "Bad UUID"; 186 if (bch_is_zero(sb->set_uuid, 16)) 187 goto err; 188 189 err = "Bad cache device number in set"; 190 if (!sb->nr_in_set || 191 sb->nr_in_set <= sb->nr_this_dev || 192 sb->nr_in_set > MAX_CACHES_PER_SET) 193 goto err; 194 195 err = "Journal buckets not sequential"; 196 for (i = 0; i < sb->keys; i++) 197 if (sb->d[i] != sb->first_bucket + i) 198 goto err; 199 200 err = "Too many journal buckets"; 201 if (sb->first_bucket + sb->keys > sb->nbuckets) 202 goto err; 203 204 err = "Invalid superblock: first bucket comes before end of super"; 205 if (sb->first_bucket * sb->bucket_size < 16) 206 goto err; 207 208 break; 209 default: 210 err = "Unsupported superblock version"; 211 goto err; 212 } 213 214 sb->last_mount = get_seconds(); 215 err = NULL; 216 217 get_page(bh->b_page); 218 *res = bh->b_page; 219 err: 220 put_bh(bh); 221 return err; 222 } 223 224 static void write_bdev_super_endio(struct bio *bio, int error) 225 { 226 struct cached_dev *dc = bio->bi_private; 227 /* XXX: error checking */ 228 229 closure_put(&dc->sb_write); 230 } 231 232 static void __write_super(struct cache_sb *sb, struct bio *bio) 233 { 234 struct cache_sb *out = page_address(bio->bi_io_vec[0].bv_page); 235 unsigned i; 236 237 bio->bi_iter.bi_sector = SB_SECTOR; 238 bio->bi_rw = REQ_SYNC|REQ_META; 239 bio->bi_iter.bi_size = SB_SIZE; 240 bch_bio_map(bio, NULL); 241 242 out->offset = cpu_to_le64(sb->offset); 243 out->version = cpu_to_le64(sb->version); 244 245 memcpy(out->uuid, sb->uuid, 16); 246 memcpy(out->set_uuid, sb->set_uuid, 16); 247 memcpy(out->label, sb->label, SB_LABEL_SIZE); 248 249 out->flags = cpu_to_le64(sb->flags); 250 out->seq = cpu_to_le64(sb->seq); 251 252 out->last_mount = cpu_to_le32(sb->last_mount); 253 out->first_bucket = cpu_to_le16(sb->first_bucket); 254 out->keys = cpu_to_le16(sb->keys); 255 256 for (i = 0; i < sb->keys; i++) 257 out->d[i] = cpu_to_le64(sb->d[i]); 258 259 out->csum = csum_set(out); 260 261 pr_debug("ver %llu, flags %llu, seq %llu", 262 sb->version, sb->flags, sb->seq); 263 264 submit_bio(REQ_WRITE, bio); 265 } 266 267 static void bch_write_bdev_super_unlock(struct closure *cl) 268 { 269 struct cached_dev *dc = container_of(cl, struct cached_dev, sb_write); 270 271 up(&dc->sb_write_mutex); 272 } 273 274 void bch_write_bdev_super(struct cached_dev *dc, struct closure *parent) 275 { 276 struct closure *cl = &dc->sb_write; 277 struct bio *bio = &dc->sb_bio; 278 279 down(&dc->sb_write_mutex); 280 closure_init(cl, parent); 281 282 bio_reset(bio); 283 bio->bi_bdev = dc->bdev; 284 bio->bi_end_io = write_bdev_super_endio; 285 bio->bi_private = dc; 286 287 closure_get(cl); 288 __write_super(&dc->sb, bio); 289 290 closure_return_with_destructor(cl, bch_write_bdev_super_unlock); 291 } 292 293 static void write_super_endio(struct bio *bio, int error) 294 { 295 struct cache *ca = bio->bi_private; 296 297 bch_count_io_errors(ca, error, "writing superblock"); 298 closure_put(&ca->set->sb_write); 299 } 300 301 static void bcache_write_super_unlock(struct closure *cl) 302 { 303 struct cache_set *c = container_of(cl, struct cache_set, sb_write); 304 305 up(&c->sb_write_mutex); 306 } 307 308 void bcache_write_super(struct cache_set *c) 309 { 310 struct closure *cl = &c->sb_write; 311 struct cache *ca; 312 unsigned i; 313 314 down(&c->sb_write_mutex); 315 closure_init(cl, &c->cl); 316 317 c->sb.seq++; 318 319 for_each_cache(ca, c, i) { 320 struct bio *bio = &ca->sb_bio; 321 322 ca->sb.version = BCACHE_SB_VERSION_CDEV_WITH_UUID; 323 ca->sb.seq = c->sb.seq; 324 ca->sb.last_mount = c->sb.last_mount; 325 326 SET_CACHE_SYNC(&ca->sb, CACHE_SYNC(&c->sb)); 327 328 bio_reset(bio); 329 bio->bi_bdev = ca->bdev; 330 bio->bi_end_io = write_super_endio; 331 bio->bi_private = ca; 332 333 closure_get(cl); 334 __write_super(&ca->sb, bio); 335 } 336 337 closure_return_with_destructor(cl, bcache_write_super_unlock); 338 } 339 340 /* UUID io */ 341 342 static void uuid_endio(struct bio *bio, int error) 343 { 344 struct closure *cl = bio->bi_private; 345 struct cache_set *c = container_of(cl, struct cache_set, uuid_write); 346 347 cache_set_err_on(error, c, "accessing uuids"); 348 bch_bbio_free(bio, c); 349 closure_put(cl); 350 } 351 352 static void uuid_io_unlock(struct closure *cl) 353 { 354 struct cache_set *c = container_of(cl, struct cache_set, uuid_write); 355 356 up(&c->uuid_write_mutex); 357 } 358 359 static void uuid_io(struct cache_set *c, unsigned long rw, 360 struct bkey *k, struct closure *parent) 361 { 362 struct closure *cl = &c->uuid_write; 363 struct uuid_entry *u; 364 unsigned i; 365 char buf[80]; 366 367 BUG_ON(!parent); 368 down(&c->uuid_write_mutex); 369 closure_init(cl, parent); 370 371 for (i = 0; i < KEY_PTRS(k); i++) { 372 struct bio *bio = bch_bbio_alloc(c); 373 374 bio->bi_rw = REQ_SYNC|REQ_META|rw; 375 bio->bi_iter.bi_size = KEY_SIZE(k) << 9; 376 377 bio->bi_end_io = uuid_endio; 378 bio->bi_private = cl; 379 bch_bio_map(bio, c->uuids); 380 381 bch_submit_bbio(bio, c, k, i); 382 383 if (!(rw & WRITE)) 384 break; 385 } 386 387 bch_extent_to_text(buf, sizeof(buf), k); 388 pr_debug("%s UUIDs at %s", rw & REQ_WRITE ? "wrote" : "read", buf); 389 390 for (u = c->uuids; u < c->uuids + c->nr_uuids; u++) 391 if (!bch_is_zero(u->uuid, 16)) 392 pr_debug("Slot %zi: %pU: %s: 1st: %u last: %u inv: %u", 393 u - c->uuids, u->uuid, u->label, 394 u->first_reg, u->last_reg, u->invalidated); 395 396 closure_return_with_destructor(cl, uuid_io_unlock); 397 } 398 399 static char *uuid_read(struct cache_set *c, struct jset *j, struct closure *cl) 400 { 401 struct bkey *k = &j->uuid_bucket; 402 403 if (__bch_btree_ptr_invalid(c, k)) 404 return "bad uuid pointer"; 405 406 bkey_copy(&c->uuid_bucket, k); 407 uuid_io(c, READ_SYNC, k, cl); 408 409 if (j->version < BCACHE_JSET_VERSION_UUIDv1) { 410 struct uuid_entry_v0 *u0 = (void *) c->uuids; 411 struct uuid_entry *u1 = (void *) c->uuids; 412 int i; 413 414 closure_sync(cl); 415 416 /* 417 * Since the new uuid entry is bigger than the old, we have to 418 * convert starting at the highest memory address and work down 419 * in order to do it in place 420 */ 421 422 for (i = c->nr_uuids - 1; 423 i >= 0; 424 --i) { 425 memcpy(u1[i].uuid, u0[i].uuid, 16); 426 memcpy(u1[i].label, u0[i].label, 32); 427 428 u1[i].first_reg = u0[i].first_reg; 429 u1[i].last_reg = u0[i].last_reg; 430 u1[i].invalidated = u0[i].invalidated; 431 432 u1[i].flags = 0; 433 u1[i].sectors = 0; 434 } 435 } 436 437 return NULL; 438 } 439 440 static int __uuid_write(struct cache_set *c) 441 { 442 BKEY_PADDED(key) k; 443 struct closure cl; 444 closure_init_stack(&cl); 445 446 lockdep_assert_held(&bch_register_lock); 447 448 if (bch_bucket_alloc_set(c, RESERVE_BTREE, &k.key, 1, true)) 449 return 1; 450 451 SET_KEY_SIZE(&k.key, c->sb.bucket_size); 452 uuid_io(c, REQ_WRITE, &k.key, &cl); 453 closure_sync(&cl); 454 455 bkey_copy(&c->uuid_bucket, &k.key); 456 bkey_put(c, &k.key); 457 return 0; 458 } 459 460 int bch_uuid_write(struct cache_set *c) 461 { 462 int ret = __uuid_write(c); 463 464 if (!ret) 465 bch_journal_meta(c, NULL); 466 467 return ret; 468 } 469 470 static struct uuid_entry *uuid_find(struct cache_set *c, const char *uuid) 471 { 472 struct uuid_entry *u; 473 474 for (u = c->uuids; 475 u < c->uuids + c->nr_uuids; u++) 476 if (!memcmp(u->uuid, uuid, 16)) 477 return u; 478 479 return NULL; 480 } 481 482 static struct uuid_entry *uuid_find_empty(struct cache_set *c) 483 { 484 static const char zero_uuid[16] = "\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0"; 485 return uuid_find(c, zero_uuid); 486 } 487 488 /* 489 * Bucket priorities/gens: 490 * 491 * For each bucket, we store on disk its 492 * 8 bit gen 493 * 16 bit priority 494 * 495 * See alloc.c for an explanation of the gen. The priority is used to implement 496 * lru (and in the future other) cache replacement policies; for most purposes 497 * it's just an opaque integer. 498 * 499 * The gens and the priorities don't have a whole lot to do with each other, and 500 * it's actually the gens that must be written out at specific times - it's no 501 * big deal if the priorities don't get written, if we lose them we just reuse 502 * buckets in suboptimal order. 503 * 504 * On disk they're stored in a packed array, and in as many buckets are required 505 * to fit them all. The buckets we use to store them form a list; the journal 506 * header points to the first bucket, the first bucket points to the second 507 * bucket, et cetera. 508 * 509 * This code is used by the allocation code; periodically (whenever it runs out 510 * of buckets to allocate from) the allocation code will invalidate some 511 * buckets, but it can't use those buckets until their new gens are safely on 512 * disk. 513 */ 514 515 static void prio_endio(struct bio *bio, int error) 516 { 517 struct cache *ca = bio->bi_private; 518 519 cache_set_err_on(error, ca->set, "accessing priorities"); 520 bch_bbio_free(bio, ca->set); 521 closure_put(&ca->prio); 522 } 523 524 static void prio_io(struct cache *ca, uint64_t bucket, unsigned long rw) 525 { 526 struct closure *cl = &ca->prio; 527 struct bio *bio = bch_bbio_alloc(ca->set); 528 529 closure_init_stack(cl); 530 531 bio->bi_iter.bi_sector = bucket * ca->sb.bucket_size; 532 bio->bi_bdev = ca->bdev; 533 bio->bi_rw = REQ_SYNC|REQ_META|rw; 534 bio->bi_iter.bi_size = bucket_bytes(ca); 535 536 bio->bi_end_io = prio_endio; 537 bio->bi_private = ca; 538 bch_bio_map(bio, ca->disk_buckets); 539 540 closure_bio_submit(bio, &ca->prio, ca); 541 closure_sync(cl); 542 } 543 544 #define buckets_free(c) "free %zu, free_inc %zu, unused %zu", \ 545 fifo_used(&c->free), fifo_used(&c->free_inc), fifo_used(&c->unused) 546 547 void bch_prio_write(struct cache *ca) 548 { 549 int i; 550 struct bucket *b; 551 struct closure cl; 552 553 closure_init_stack(&cl); 554 555 lockdep_assert_held(&ca->set->bucket_lock); 556 557 for (b = ca->buckets; 558 b < ca->buckets + ca->sb.nbuckets; b++) 559 b->disk_gen = b->gen; 560 561 ca->disk_buckets->seq++; 562 563 atomic_long_add(ca->sb.bucket_size * prio_buckets(ca), 564 &ca->meta_sectors_written); 565 566 //pr_debug("free %zu, free_inc %zu, unused %zu", fifo_used(&ca->free), 567 // fifo_used(&ca->free_inc), fifo_used(&ca->unused)); 568 569 for (i = prio_buckets(ca) - 1; i >= 0; --i) { 570 long bucket; 571 struct prio_set *p = ca->disk_buckets; 572 struct bucket_disk *d = p->data; 573 struct bucket_disk *end = d + prios_per_bucket(ca); 574 575 for (b = ca->buckets + i * prios_per_bucket(ca); 576 b < ca->buckets + ca->sb.nbuckets && d < end; 577 b++, d++) { 578 d->prio = cpu_to_le16(b->prio); 579 d->gen = b->gen; 580 } 581 582 p->next_bucket = ca->prio_buckets[i + 1]; 583 p->magic = pset_magic(&ca->sb); 584 p->csum = bch_crc64(&p->magic, bucket_bytes(ca) - 8); 585 586 bucket = bch_bucket_alloc(ca, RESERVE_PRIO, true); 587 BUG_ON(bucket == -1); 588 589 mutex_unlock(&ca->set->bucket_lock); 590 prio_io(ca, bucket, REQ_WRITE); 591 mutex_lock(&ca->set->bucket_lock); 592 593 ca->prio_buckets[i] = bucket; 594 atomic_dec_bug(&ca->buckets[bucket].pin); 595 } 596 597 mutex_unlock(&ca->set->bucket_lock); 598 599 bch_journal_meta(ca->set, &cl); 600 closure_sync(&cl); 601 602 mutex_lock(&ca->set->bucket_lock); 603 604 ca->need_save_prio = 0; 605 606 /* 607 * Don't want the old priorities to get garbage collected until after we 608 * finish writing the new ones, and they're journalled 609 */ 610 for (i = 0; i < prio_buckets(ca); i++) 611 ca->prio_last_buckets[i] = ca->prio_buckets[i]; 612 } 613 614 static void prio_read(struct cache *ca, uint64_t bucket) 615 { 616 struct prio_set *p = ca->disk_buckets; 617 struct bucket_disk *d = p->data + prios_per_bucket(ca), *end = d; 618 struct bucket *b; 619 unsigned bucket_nr = 0; 620 621 for (b = ca->buckets; 622 b < ca->buckets + ca->sb.nbuckets; 623 b++, d++) { 624 if (d == end) { 625 ca->prio_buckets[bucket_nr] = bucket; 626 ca->prio_last_buckets[bucket_nr] = bucket; 627 bucket_nr++; 628 629 prio_io(ca, bucket, READ_SYNC); 630 631 if (p->csum != bch_crc64(&p->magic, bucket_bytes(ca) - 8)) 632 pr_warn("bad csum reading priorities"); 633 634 if (p->magic != pset_magic(&ca->sb)) 635 pr_warn("bad magic reading priorities"); 636 637 bucket = p->next_bucket; 638 d = p->data; 639 } 640 641 b->prio = le16_to_cpu(d->prio); 642 b->gen = b->disk_gen = b->last_gc = b->gc_gen = d->gen; 643 } 644 } 645 646 /* Bcache device */ 647 648 static int open_dev(struct block_device *b, fmode_t mode) 649 { 650 struct bcache_device *d = b->bd_disk->private_data; 651 if (test_bit(BCACHE_DEV_CLOSING, &d->flags)) 652 return -ENXIO; 653 654 closure_get(&d->cl); 655 return 0; 656 } 657 658 static void release_dev(struct gendisk *b, fmode_t mode) 659 { 660 struct bcache_device *d = b->private_data; 661 closure_put(&d->cl); 662 } 663 664 static int ioctl_dev(struct block_device *b, fmode_t mode, 665 unsigned int cmd, unsigned long arg) 666 { 667 struct bcache_device *d = b->bd_disk->private_data; 668 return d->ioctl(d, mode, cmd, arg); 669 } 670 671 static const struct block_device_operations bcache_ops = { 672 .open = open_dev, 673 .release = release_dev, 674 .ioctl = ioctl_dev, 675 .owner = THIS_MODULE, 676 }; 677 678 void bcache_device_stop(struct bcache_device *d) 679 { 680 if (!test_and_set_bit(BCACHE_DEV_CLOSING, &d->flags)) 681 closure_queue(&d->cl); 682 } 683 684 static void bcache_device_unlink(struct bcache_device *d) 685 { 686 lockdep_assert_held(&bch_register_lock); 687 688 if (d->c && !test_and_set_bit(BCACHE_DEV_UNLINK_DONE, &d->flags)) { 689 unsigned i; 690 struct cache *ca; 691 692 sysfs_remove_link(&d->c->kobj, d->name); 693 sysfs_remove_link(&d->kobj, "cache"); 694 695 for_each_cache(ca, d->c, i) 696 bd_unlink_disk_holder(ca->bdev, d->disk); 697 } 698 } 699 700 static void bcache_device_link(struct bcache_device *d, struct cache_set *c, 701 const char *name) 702 { 703 unsigned i; 704 struct cache *ca; 705 706 for_each_cache(ca, d->c, i) 707 bd_link_disk_holder(ca->bdev, d->disk); 708 709 snprintf(d->name, BCACHEDEVNAME_SIZE, 710 "%s%u", name, d->id); 711 712 WARN(sysfs_create_link(&d->kobj, &c->kobj, "cache") || 713 sysfs_create_link(&c->kobj, &d->kobj, d->name), 714 "Couldn't create device <-> cache set symlinks"); 715 } 716 717 static void bcache_device_detach(struct bcache_device *d) 718 { 719 lockdep_assert_held(&bch_register_lock); 720 721 if (test_bit(BCACHE_DEV_DETACHING, &d->flags)) { 722 struct uuid_entry *u = d->c->uuids + d->id; 723 724 SET_UUID_FLASH_ONLY(u, 0); 725 memcpy(u->uuid, invalid_uuid, 16); 726 u->invalidated = cpu_to_le32(get_seconds()); 727 bch_uuid_write(d->c); 728 } 729 730 bcache_device_unlink(d); 731 732 d->c->devices[d->id] = NULL; 733 closure_put(&d->c->caching); 734 d->c = NULL; 735 } 736 737 static void bcache_device_attach(struct bcache_device *d, struct cache_set *c, 738 unsigned id) 739 { 740 BUG_ON(test_bit(CACHE_SET_STOPPING, &c->flags)); 741 742 d->id = id; 743 d->c = c; 744 c->devices[id] = d; 745 746 closure_get(&c->caching); 747 } 748 749 static void bcache_device_free(struct bcache_device *d) 750 { 751 lockdep_assert_held(&bch_register_lock); 752 753 pr_info("%s stopped", d->disk->disk_name); 754 755 if (d->c) 756 bcache_device_detach(d); 757 if (d->disk && d->disk->flags & GENHD_FL_UP) 758 del_gendisk(d->disk); 759 if (d->disk && d->disk->queue) 760 blk_cleanup_queue(d->disk->queue); 761 if (d->disk) { 762 ida_simple_remove(&bcache_minor, d->disk->first_minor); 763 put_disk(d->disk); 764 } 765 766 bio_split_pool_free(&d->bio_split_hook); 767 if (d->bio_split) 768 bioset_free(d->bio_split); 769 if (is_vmalloc_addr(d->full_dirty_stripes)) 770 vfree(d->full_dirty_stripes); 771 else 772 kfree(d->full_dirty_stripes); 773 if (is_vmalloc_addr(d->stripe_sectors_dirty)) 774 vfree(d->stripe_sectors_dirty); 775 else 776 kfree(d->stripe_sectors_dirty); 777 778 closure_debug_destroy(&d->cl); 779 } 780 781 static int bcache_device_init(struct bcache_device *d, unsigned block_size, 782 sector_t sectors) 783 { 784 struct request_queue *q; 785 size_t n; 786 int minor; 787 788 if (!d->stripe_size) 789 d->stripe_size = 1 << 31; 790 791 d->nr_stripes = DIV_ROUND_UP_ULL(sectors, d->stripe_size); 792 793 if (!d->nr_stripes || 794 d->nr_stripes > INT_MAX || 795 d->nr_stripes > SIZE_MAX / sizeof(atomic_t)) { 796 pr_err("nr_stripes too large"); 797 return -ENOMEM; 798 } 799 800 n = d->nr_stripes * sizeof(atomic_t); 801 d->stripe_sectors_dirty = n < PAGE_SIZE << 6 802 ? kzalloc(n, GFP_KERNEL) 803 : vzalloc(n); 804 if (!d->stripe_sectors_dirty) 805 return -ENOMEM; 806 807 n = BITS_TO_LONGS(d->nr_stripes) * sizeof(unsigned long); 808 d->full_dirty_stripes = n < PAGE_SIZE << 6 809 ? kzalloc(n, GFP_KERNEL) 810 : vzalloc(n); 811 if (!d->full_dirty_stripes) 812 return -ENOMEM; 813 814 minor = ida_simple_get(&bcache_minor, 0, MINORMASK + 1, GFP_KERNEL); 815 if (minor < 0) 816 return minor; 817 818 if (!(d->bio_split = bioset_create(4, offsetof(struct bbio, bio))) || 819 bio_split_pool_init(&d->bio_split_hook) || 820 !(d->disk = alloc_disk(1))) { 821 ida_simple_remove(&bcache_minor, minor); 822 return -ENOMEM; 823 } 824 825 set_capacity(d->disk, sectors); 826 snprintf(d->disk->disk_name, DISK_NAME_LEN, "bcache%i", minor); 827 828 d->disk->major = bcache_major; 829 d->disk->first_minor = minor; 830 d->disk->fops = &bcache_ops; 831 d->disk->private_data = d; 832 833 q = blk_alloc_queue(GFP_KERNEL); 834 if (!q) 835 return -ENOMEM; 836 837 blk_queue_make_request(q, NULL); 838 d->disk->queue = q; 839 q->queuedata = d; 840 q->backing_dev_info.congested_data = d; 841 q->limits.max_hw_sectors = UINT_MAX; 842 q->limits.max_sectors = UINT_MAX; 843 q->limits.max_segment_size = UINT_MAX; 844 q->limits.max_segments = BIO_MAX_PAGES; 845 q->limits.max_discard_sectors = UINT_MAX; 846 q->limits.io_min = block_size; 847 q->limits.logical_block_size = block_size; 848 q->limits.physical_block_size = block_size; 849 set_bit(QUEUE_FLAG_NONROT, &d->disk->queue->queue_flags); 850 set_bit(QUEUE_FLAG_DISCARD, &d->disk->queue->queue_flags); 851 852 blk_queue_flush(q, REQ_FLUSH|REQ_FUA); 853 854 return 0; 855 } 856 857 /* Cached device */ 858 859 static void calc_cached_dev_sectors(struct cache_set *c) 860 { 861 uint64_t sectors = 0; 862 struct cached_dev *dc; 863 864 list_for_each_entry(dc, &c->cached_devs, list) 865 sectors += bdev_sectors(dc->bdev); 866 867 c->cached_dev_sectors = sectors; 868 } 869 870 void bch_cached_dev_run(struct cached_dev *dc) 871 { 872 struct bcache_device *d = &dc->disk; 873 char buf[SB_LABEL_SIZE + 1]; 874 char *env[] = { 875 "DRIVER=bcache", 876 kasprintf(GFP_KERNEL, "CACHED_UUID=%pU", dc->sb.uuid), 877 NULL, 878 NULL, 879 }; 880 881 memcpy(buf, dc->sb.label, SB_LABEL_SIZE); 882 buf[SB_LABEL_SIZE] = '\0'; 883 env[2] = kasprintf(GFP_KERNEL, "CACHED_LABEL=%s", buf); 884 885 if (atomic_xchg(&dc->running, 1)) 886 return; 887 888 if (!d->c && 889 BDEV_STATE(&dc->sb) != BDEV_STATE_NONE) { 890 struct closure cl; 891 closure_init_stack(&cl); 892 893 SET_BDEV_STATE(&dc->sb, BDEV_STATE_STALE); 894 bch_write_bdev_super(dc, &cl); 895 closure_sync(&cl); 896 } 897 898 add_disk(d->disk); 899 bd_link_disk_holder(dc->bdev, dc->disk.disk); 900 /* won't show up in the uevent file, use udevadm monitor -e instead 901 * only class / kset properties are persistent */ 902 kobject_uevent_env(&disk_to_dev(d->disk)->kobj, KOBJ_CHANGE, env); 903 kfree(env[1]); 904 kfree(env[2]); 905 906 if (sysfs_create_link(&d->kobj, &disk_to_dev(d->disk)->kobj, "dev") || 907 sysfs_create_link(&disk_to_dev(d->disk)->kobj, &d->kobj, "bcache")) 908 pr_debug("error creating sysfs link"); 909 } 910 911 static void cached_dev_detach_finish(struct work_struct *w) 912 { 913 struct cached_dev *dc = container_of(w, struct cached_dev, detach); 914 char buf[BDEVNAME_SIZE]; 915 struct closure cl; 916 closure_init_stack(&cl); 917 918 BUG_ON(!test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags)); 919 BUG_ON(atomic_read(&dc->count)); 920 921 mutex_lock(&bch_register_lock); 922 923 memset(&dc->sb.set_uuid, 0, 16); 924 SET_BDEV_STATE(&dc->sb, BDEV_STATE_NONE); 925 926 bch_write_bdev_super(dc, &cl); 927 closure_sync(&cl); 928 929 bcache_device_detach(&dc->disk); 930 list_move(&dc->list, &uncached_devices); 931 932 clear_bit(BCACHE_DEV_DETACHING, &dc->disk.flags); 933 934 mutex_unlock(&bch_register_lock); 935 936 pr_info("Caching disabled for %s", bdevname(dc->bdev, buf)); 937 938 /* Drop ref we took in cached_dev_detach() */ 939 closure_put(&dc->disk.cl); 940 } 941 942 void bch_cached_dev_detach(struct cached_dev *dc) 943 { 944 lockdep_assert_held(&bch_register_lock); 945 946 if (test_bit(BCACHE_DEV_CLOSING, &dc->disk.flags)) 947 return; 948 949 if (test_and_set_bit(BCACHE_DEV_DETACHING, &dc->disk.flags)) 950 return; 951 952 /* 953 * Block the device from being closed and freed until we're finished 954 * detaching 955 */ 956 closure_get(&dc->disk.cl); 957 958 bch_writeback_queue(dc); 959 cached_dev_put(dc); 960 } 961 962 int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c) 963 { 964 uint32_t rtime = cpu_to_le32(get_seconds()); 965 struct uuid_entry *u; 966 char buf[BDEVNAME_SIZE]; 967 968 bdevname(dc->bdev, buf); 969 970 if (memcmp(dc->sb.set_uuid, c->sb.set_uuid, 16)) 971 return -ENOENT; 972 973 if (dc->disk.c) { 974 pr_err("Can't attach %s: already attached", buf); 975 return -EINVAL; 976 } 977 978 if (test_bit(CACHE_SET_STOPPING, &c->flags)) { 979 pr_err("Can't attach %s: shutting down", buf); 980 return -EINVAL; 981 } 982 983 if (dc->sb.block_size < c->sb.block_size) { 984 /* Will die */ 985 pr_err("Couldn't attach %s: block size less than set's block size", 986 buf); 987 return -EINVAL; 988 } 989 990 u = uuid_find(c, dc->sb.uuid); 991 992 if (u && 993 (BDEV_STATE(&dc->sb) == BDEV_STATE_STALE || 994 BDEV_STATE(&dc->sb) == BDEV_STATE_NONE)) { 995 memcpy(u->uuid, invalid_uuid, 16); 996 u->invalidated = cpu_to_le32(get_seconds()); 997 u = NULL; 998 } 999 1000 if (!u) { 1001 if (BDEV_STATE(&dc->sb) == BDEV_STATE_DIRTY) { 1002 pr_err("Couldn't find uuid for %s in set", buf); 1003 return -ENOENT; 1004 } 1005 1006 u = uuid_find_empty(c); 1007 if (!u) { 1008 pr_err("Not caching %s, no room for UUID", buf); 1009 return -EINVAL; 1010 } 1011 } 1012 1013 /* Deadlocks since we're called via sysfs... 1014 sysfs_remove_file(&dc->kobj, &sysfs_attach); 1015 */ 1016 1017 if (bch_is_zero(u->uuid, 16)) { 1018 struct closure cl; 1019 closure_init_stack(&cl); 1020 1021 memcpy(u->uuid, dc->sb.uuid, 16); 1022 memcpy(u->label, dc->sb.label, SB_LABEL_SIZE); 1023 u->first_reg = u->last_reg = rtime; 1024 bch_uuid_write(c); 1025 1026 memcpy(dc->sb.set_uuid, c->sb.set_uuid, 16); 1027 SET_BDEV_STATE(&dc->sb, BDEV_STATE_CLEAN); 1028 1029 bch_write_bdev_super(dc, &cl); 1030 closure_sync(&cl); 1031 } else { 1032 u->last_reg = rtime; 1033 bch_uuid_write(c); 1034 } 1035 1036 bcache_device_attach(&dc->disk, c, u - c->uuids); 1037 list_move(&dc->list, &c->cached_devs); 1038 calc_cached_dev_sectors(c); 1039 1040 smp_wmb(); 1041 /* 1042 * dc->c must be set before dc->count != 0 - paired with the mb in 1043 * cached_dev_get() 1044 */ 1045 atomic_set(&dc->count, 1); 1046 1047 if (BDEV_STATE(&dc->sb) == BDEV_STATE_DIRTY) { 1048 bch_sectors_dirty_init(dc); 1049 atomic_set(&dc->has_dirty, 1); 1050 atomic_inc(&dc->count); 1051 bch_writeback_queue(dc); 1052 } 1053 1054 bch_cached_dev_run(dc); 1055 bcache_device_link(&dc->disk, c, "bdev"); 1056 1057 pr_info("Caching %s as %s on set %pU", 1058 bdevname(dc->bdev, buf), dc->disk.disk->disk_name, 1059 dc->disk.c->sb.set_uuid); 1060 return 0; 1061 } 1062 1063 void bch_cached_dev_release(struct kobject *kobj) 1064 { 1065 struct cached_dev *dc = container_of(kobj, struct cached_dev, 1066 disk.kobj); 1067 kfree(dc); 1068 module_put(THIS_MODULE); 1069 } 1070 1071 static void cached_dev_free(struct closure *cl) 1072 { 1073 struct cached_dev *dc = container_of(cl, struct cached_dev, disk.cl); 1074 1075 cancel_delayed_work_sync(&dc->writeback_rate_update); 1076 kthread_stop(dc->writeback_thread); 1077 1078 mutex_lock(&bch_register_lock); 1079 1080 if (atomic_read(&dc->running)) 1081 bd_unlink_disk_holder(dc->bdev, dc->disk.disk); 1082 bcache_device_free(&dc->disk); 1083 list_del(&dc->list); 1084 1085 mutex_unlock(&bch_register_lock); 1086 1087 if (!IS_ERR_OR_NULL(dc->bdev)) { 1088 if (dc->bdev->bd_disk) 1089 blk_sync_queue(bdev_get_queue(dc->bdev)); 1090 1091 blkdev_put(dc->bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); 1092 } 1093 1094 wake_up(&unregister_wait); 1095 1096 kobject_put(&dc->disk.kobj); 1097 } 1098 1099 static void cached_dev_flush(struct closure *cl) 1100 { 1101 struct cached_dev *dc = container_of(cl, struct cached_dev, disk.cl); 1102 struct bcache_device *d = &dc->disk; 1103 1104 mutex_lock(&bch_register_lock); 1105 bcache_device_unlink(d); 1106 mutex_unlock(&bch_register_lock); 1107 1108 bch_cache_accounting_destroy(&dc->accounting); 1109 kobject_del(&d->kobj); 1110 1111 continue_at(cl, cached_dev_free, system_wq); 1112 } 1113 1114 static int cached_dev_init(struct cached_dev *dc, unsigned block_size) 1115 { 1116 int ret; 1117 struct io *io; 1118 struct request_queue *q = bdev_get_queue(dc->bdev); 1119 1120 __module_get(THIS_MODULE); 1121 INIT_LIST_HEAD(&dc->list); 1122 closure_init(&dc->disk.cl, NULL); 1123 set_closure_fn(&dc->disk.cl, cached_dev_flush, system_wq); 1124 kobject_init(&dc->disk.kobj, &bch_cached_dev_ktype); 1125 INIT_WORK(&dc->detach, cached_dev_detach_finish); 1126 sema_init(&dc->sb_write_mutex, 1); 1127 INIT_LIST_HEAD(&dc->io_lru); 1128 spin_lock_init(&dc->io_lock); 1129 bch_cache_accounting_init(&dc->accounting, &dc->disk.cl); 1130 1131 dc->sequential_cutoff = 4 << 20; 1132 1133 for (io = dc->io; io < dc->io + RECENT_IO; io++) { 1134 list_add(&io->lru, &dc->io_lru); 1135 hlist_add_head(&io->hash, dc->io_hash + RECENT_IO); 1136 } 1137 1138 dc->disk.stripe_size = q->limits.io_opt >> 9; 1139 1140 if (dc->disk.stripe_size) 1141 dc->partial_stripes_expensive = 1142 q->limits.raid_partial_stripes_expensive; 1143 1144 ret = bcache_device_init(&dc->disk, block_size, 1145 dc->bdev->bd_part->nr_sects - dc->sb.data_offset); 1146 if (ret) 1147 return ret; 1148 1149 set_capacity(dc->disk.disk, 1150 dc->bdev->bd_part->nr_sects - dc->sb.data_offset); 1151 1152 dc->disk.disk->queue->backing_dev_info.ra_pages = 1153 max(dc->disk.disk->queue->backing_dev_info.ra_pages, 1154 q->backing_dev_info.ra_pages); 1155 1156 bch_cached_dev_request_init(dc); 1157 bch_cached_dev_writeback_init(dc); 1158 return 0; 1159 } 1160 1161 /* Cached device - bcache superblock */ 1162 1163 static void register_bdev(struct cache_sb *sb, struct page *sb_page, 1164 struct block_device *bdev, 1165 struct cached_dev *dc) 1166 { 1167 char name[BDEVNAME_SIZE]; 1168 const char *err = "cannot allocate memory"; 1169 struct cache_set *c; 1170 1171 memcpy(&dc->sb, sb, sizeof(struct cache_sb)); 1172 dc->bdev = bdev; 1173 dc->bdev->bd_holder = dc; 1174 1175 bio_init(&dc->sb_bio); 1176 dc->sb_bio.bi_max_vecs = 1; 1177 dc->sb_bio.bi_io_vec = dc->sb_bio.bi_inline_vecs; 1178 dc->sb_bio.bi_io_vec[0].bv_page = sb_page; 1179 get_page(sb_page); 1180 1181 if (cached_dev_init(dc, sb->block_size << 9)) 1182 goto err; 1183 1184 err = "error creating kobject"; 1185 if (kobject_add(&dc->disk.kobj, &part_to_dev(bdev->bd_part)->kobj, 1186 "bcache")) 1187 goto err; 1188 if (bch_cache_accounting_add_kobjs(&dc->accounting, &dc->disk.kobj)) 1189 goto err; 1190 1191 pr_info("registered backing device %s", bdevname(bdev, name)); 1192 1193 list_add(&dc->list, &uncached_devices); 1194 list_for_each_entry(c, &bch_cache_sets, list) 1195 bch_cached_dev_attach(dc, c); 1196 1197 if (BDEV_STATE(&dc->sb) == BDEV_STATE_NONE || 1198 BDEV_STATE(&dc->sb) == BDEV_STATE_STALE) 1199 bch_cached_dev_run(dc); 1200 1201 return; 1202 err: 1203 pr_notice("error opening %s: %s", bdevname(bdev, name), err); 1204 bcache_device_stop(&dc->disk); 1205 } 1206 1207 /* Flash only volumes */ 1208 1209 void bch_flash_dev_release(struct kobject *kobj) 1210 { 1211 struct bcache_device *d = container_of(kobj, struct bcache_device, 1212 kobj); 1213 kfree(d); 1214 } 1215 1216 static void flash_dev_free(struct closure *cl) 1217 { 1218 struct bcache_device *d = container_of(cl, struct bcache_device, cl); 1219 bcache_device_free(d); 1220 kobject_put(&d->kobj); 1221 } 1222 1223 static void flash_dev_flush(struct closure *cl) 1224 { 1225 struct bcache_device *d = container_of(cl, struct bcache_device, cl); 1226 1227 bcache_device_unlink(d); 1228 kobject_del(&d->kobj); 1229 continue_at(cl, flash_dev_free, system_wq); 1230 } 1231 1232 static int flash_dev_run(struct cache_set *c, struct uuid_entry *u) 1233 { 1234 struct bcache_device *d = kzalloc(sizeof(struct bcache_device), 1235 GFP_KERNEL); 1236 if (!d) 1237 return -ENOMEM; 1238 1239 closure_init(&d->cl, NULL); 1240 set_closure_fn(&d->cl, flash_dev_flush, system_wq); 1241 1242 kobject_init(&d->kobj, &bch_flash_dev_ktype); 1243 1244 if (bcache_device_init(d, block_bytes(c), u->sectors)) 1245 goto err; 1246 1247 bcache_device_attach(d, c, u - c->uuids); 1248 bch_flash_dev_request_init(d); 1249 add_disk(d->disk); 1250 1251 if (kobject_add(&d->kobj, &disk_to_dev(d->disk)->kobj, "bcache")) 1252 goto err; 1253 1254 bcache_device_link(d, c, "volume"); 1255 1256 return 0; 1257 err: 1258 kobject_put(&d->kobj); 1259 return -ENOMEM; 1260 } 1261 1262 static int flash_devs_run(struct cache_set *c) 1263 { 1264 int ret = 0; 1265 struct uuid_entry *u; 1266 1267 for (u = c->uuids; 1268 u < c->uuids + c->nr_uuids && !ret; 1269 u++) 1270 if (UUID_FLASH_ONLY(u)) 1271 ret = flash_dev_run(c, u); 1272 1273 return ret; 1274 } 1275 1276 int bch_flash_dev_create(struct cache_set *c, uint64_t size) 1277 { 1278 struct uuid_entry *u; 1279 1280 if (test_bit(CACHE_SET_STOPPING, &c->flags)) 1281 return -EINTR; 1282 1283 u = uuid_find_empty(c); 1284 if (!u) { 1285 pr_err("Can't create volume, no room for UUID"); 1286 return -EINVAL; 1287 } 1288 1289 get_random_bytes(u->uuid, 16); 1290 memset(u->label, 0, 32); 1291 u->first_reg = u->last_reg = cpu_to_le32(get_seconds()); 1292 1293 SET_UUID_FLASH_ONLY(u, 1); 1294 u->sectors = size >> 9; 1295 1296 bch_uuid_write(c); 1297 1298 return flash_dev_run(c, u); 1299 } 1300 1301 /* Cache set */ 1302 1303 __printf(2, 3) 1304 bool bch_cache_set_error(struct cache_set *c, const char *fmt, ...) 1305 { 1306 va_list args; 1307 1308 if (c->on_error != ON_ERROR_PANIC && 1309 test_bit(CACHE_SET_STOPPING, &c->flags)) 1310 return false; 1311 1312 /* XXX: we can be called from atomic context 1313 acquire_console_sem(); 1314 */ 1315 1316 printk(KERN_ERR "bcache: error on %pU: ", c->sb.set_uuid); 1317 1318 va_start(args, fmt); 1319 vprintk(fmt, args); 1320 va_end(args); 1321 1322 printk(", disabling caching\n"); 1323 1324 if (c->on_error == ON_ERROR_PANIC) 1325 panic("panic forced after error\n"); 1326 1327 bch_cache_set_unregister(c); 1328 return true; 1329 } 1330 1331 void bch_cache_set_release(struct kobject *kobj) 1332 { 1333 struct cache_set *c = container_of(kobj, struct cache_set, kobj); 1334 kfree(c); 1335 module_put(THIS_MODULE); 1336 } 1337 1338 static void cache_set_free(struct closure *cl) 1339 { 1340 struct cache_set *c = container_of(cl, struct cache_set, cl); 1341 struct cache *ca; 1342 unsigned i; 1343 1344 if (!IS_ERR_OR_NULL(c->debug)) 1345 debugfs_remove(c->debug); 1346 1347 bch_open_buckets_free(c); 1348 bch_btree_cache_free(c); 1349 bch_journal_free(c); 1350 1351 for_each_cache(ca, c, i) 1352 if (ca) 1353 kobject_put(&ca->kobj); 1354 1355 bch_bset_sort_state_free(&c->sort); 1356 free_pages((unsigned long) c->uuids, ilog2(bucket_pages(c))); 1357 1358 if (c->bio_split) 1359 bioset_free(c->bio_split); 1360 if (c->fill_iter) 1361 mempool_destroy(c->fill_iter); 1362 if (c->bio_meta) 1363 mempool_destroy(c->bio_meta); 1364 if (c->search) 1365 mempool_destroy(c->search); 1366 kfree(c->devices); 1367 1368 mutex_lock(&bch_register_lock); 1369 list_del(&c->list); 1370 mutex_unlock(&bch_register_lock); 1371 1372 pr_info("Cache set %pU unregistered", c->sb.set_uuid); 1373 wake_up(&unregister_wait); 1374 1375 closure_debug_destroy(&c->cl); 1376 kobject_put(&c->kobj); 1377 } 1378 1379 static void cache_set_flush(struct closure *cl) 1380 { 1381 struct cache_set *c = container_of(cl, struct cache_set, caching); 1382 struct cache *ca; 1383 struct btree *b; 1384 unsigned i; 1385 1386 bch_cache_accounting_destroy(&c->accounting); 1387 1388 kobject_put(&c->internal); 1389 kobject_del(&c->kobj); 1390 1391 if (c->gc_thread) 1392 kthread_stop(c->gc_thread); 1393 1394 if (!IS_ERR_OR_NULL(c->root)) 1395 list_add(&c->root->list, &c->btree_cache); 1396 1397 /* Should skip this if we're unregistering because of an error */ 1398 list_for_each_entry(b, &c->btree_cache, list) 1399 if (btree_node_dirty(b)) 1400 bch_btree_node_write(b, NULL); 1401 1402 for_each_cache(ca, c, i) 1403 if (ca->alloc_thread) 1404 kthread_stop(ca->alloc_thread); 1405 1406 closure_return(cl); 1407 } 1408 1409 static void __cache_set_unregister(struct closure *cl) 1410 { 1411 struct cache_set *c = container_of(cl, struct cache_set, caching); 1412 struct cached_dev *dc; 1413 size_t i; 1414 1415 mutex_lock(&bch_register_lock); 1416 1417 for (i = 0; i < c->nr_uuids; i++) 1418 if (c->devices[i]) { 1419 if (!UUID_FLASH_ONLY(&c->uuids[i]) && 1420 test_bit(CACHE_SET_UNREGISTERING, &c->flags)) { 1421 dc = container_of(c->devices[i], 1422 struct cached_dev, disk); 1423 bch_cached_dev_detach(dc); 1424 } else { 1425 bcache_device_stop(c->devices[i]); 1426 } 1427 } 1428 1429 mutex_unlock(&bch_register_lock); 1430 1431 continue_at(cl, cache_set_flush, system_wq); 1432 } 1433 1434 void bch_cache_set_stop(struct cache_set *c) 1435 { 1436 if (!test_and_set_bit(CACHE_SET_STOPPING, &c->flags)) 1437 closure_queue(&c->caching); 1438 } 1439 1440 void bch_cache_set_unregister(struct cache_set *c) 1441 { 1442 set_bit(CACHE_SET_UNREGISTERING, &c->flags); 1443 bch_cache_set_stop(c); 1444 } 1445 1446 #define alloc_bucket_pages(gfp, c) \ 1447 ((void *) __get_free_pages(__GFP_ZERO|gfp, ilog2(bucket_pages(c)))) 1448 1449 struct cache_set *bch_cache_set_alloc(struct cache_sb *sb) 1450 { 1451 int iter_size; 1452 struct cache_set *c = kzalloc(sizeof(struct cache_set), GFP_KERNEL); 1453 if (!c) 1454 return NULL; 1455 1456 __module_get(THIS_MODULE); 1457 closure_init(&c->cl, NULL); 1458 set_closure_fn(&c->cl, cache_set_free, system_wq); 1459 1460 closure_init(&c->caching, &c->cl); 1461 set_closure_fn(&c->caching, __cache_set_unregister, system_wq); 1462 1463 /* Maybe create continue_at_noreturn() and use it here? */ 1464 closure_set_stopped(&c->cl); 1465 closure_put(&c->cl); 1466 1467 kobject_init(&c->kobj, &bch_cache_set_ktype); 1468 kobject_init(&c->internal, &bch_cache_set_internal_ktype); 1469 1470 bch_cache_accounting_init(&c->accounting, &c->cl); 1471 1472 memcpy(c->sb.set_uuid, sb->set_uuid, 16); 1473 c->sb.block_size = sb->block_size; 1474 c->sb.bucket_size = sb->bucket_size; 1475 c->sb.nr_in_set = sb->nr_in_set; 1476 c->sb.last_mount = sb->last_mount; 1477 c->bucket_bits = ilog2(sb->bucket_size); 1478 c->block_bits = ilog2(sb->block_size); 1479 c->nr_uuids = bucket_bytes(c) / sizeof(struct uuid_entry); 1480 1481 c->btree_pages = bucket_pages(c); 1482 if (c->btree_pages > BTREE_MAX_PAGES) 1483 c->btree_pages = max_t(int, c->btree_pages / 4, 1484 BTREE_MAX_PAGES); 1485 1486 sema_init(&c->sb_write_mutex, 1); 1487 mutex_init(&c->bucket_lock); 1488 init_waitqueue_head(&c->try_wait); 1489 init_waitqueue_head(&c->bucket_wait); 1490 sema_init(&c->uuid_write_mutex, 1); 1491 1492 spin_lock_init(&c->btree_gc_time.lock); 1493 spin_lock_init(&c->btree_split_time.lock); 1494 spin_lock_init(&c->btree_read_time.lock); 1495 spin_lock_init(&c->try_harder_time.lock); 1496 1497 bch_moving_init_cache_set(c); 1498 1499 INIT_LIST_HEAD(&c->list); 1500 INIT_LIST_HEAD(&c->cached_devs); 1501 INIT_LIST_HEAD(&c->btree_cache); 1502 INIT_LIST_HEAD(&c->btree_cache_freeable); 1503 INIT_LIST_HEAD(&c->btree_cache_freed); 1504 INIT_LIST_HEAD(&c->data_buckets); 1505 1506 c->search = mempool_create_slab_pool(32, bch_search_cache); 1507 if (!c->search) 1508 goto err; 1509 1510 iter_size = (sb->bucket_size / sb->block_size + 1) * 1511 sizeof(struct btree_iter_set); 1512 1513 if (!(c->devices = kzalloc(c->nr_uuids * sizeof(void *), GFP_KERNEL)) || 1514 !(c->bio_meta = mempool_create_kmalloc_pool(2, 1515 sizeof(struct bbio) + sizeof(struct bio_vec) * 1516 bucket_pages(c))) || 1517 !(c->fill_iter = mempool_create_kmalloc_pool(1, iter_size)) || 1518 !(c->bio_split = bioset_create(4, offsetof(struct bbio, bio))) || 1519 !(c->uuids = alloc_bucket_pages(GFP_KERNEL, c)) || 1520 bch_journal_alloc(c) || 1521 bch_btree_cache_alloc(c) || 1522 bch_open_buckets_alloc(c) || 1523 bch_bset_sort_state_init(&c->sort, ilog2(c->btree_pages))) 1524 goto err; 1525 1526 c->congested_read_threshold_us = 2000; 1527 c->congested_write_threshold_us = 20000; 1528 c->error_limit = 8 << IO_ERROR_SHIFT; 1529 1530 return c; 1531 err: 1532 bch_cache_set_unregister(c); 1533 return NULL; 1534 } 1535 1536 static void run_cache_set(struct cache_set *c) 1537 { 1538 const char *err = "cannot allocate memory"; 1539 struct cached_dev *dc, *t; 1540 struct cache *ca; 1541 struct closure cl; 1542 unsigned i; 1543 1544 closure_init_stack(&cl); 1545 1546 for_each_cache(ca, c, i) 1547 c->nbuckets += ca->sb.nbuckets; 1548 1549 if (CACHE_SYNC(&c->sb)) { 1550 LIST_HEAD(journal); 1551 struct bkey *k; 1552 struct jset *j; 1553 1554 err = "cannot allocate memory for journal"; 1555 if (bch_journal_read(c, &journal)) 1556 goto err; 1557 1558 pr_debug("btree_journal_read() done"); 1559 1560 err = "no journal entries found"; 1561 if (list_empty(&journal)) 1562 goto err; 1563 1564 j = &list_entry(journal.prev, struct journal_replay, list)->j; 1565 1566 err = "IO error reading priorities"; 1567 for_each_cache(ca, c, i) 1568 prio_read(ca, j->prio_bucket[ca->sb.nr_this_dev]); 1569 1570 /* 1571 * If prio_read() fails it'll call cache_set_error and we'll 1572 * tear everything down right away, but if we perhaps checked 1573 * sooner we could avoid journal replay. 1574 */ 1575 1576 k = &j->btree_root; 1577 1578 err = "bad btree root"; 1579 if (__bch_btree_ptr_invalid(c, k)) 1580 goto err; 1581 1582 err = "error reading btree root"; 1583 c->root = bch_btree_node_get(c, k, j->btree_level, true); 1584 if (IS_ERR_OR_NULL(c->root)) 1585 goto err; 1586 1587 list_del_init(&c->root->list); 1588 rw_unlock(true, c->root); 1589 1590 err = uuid_read(c, j, &cl); 1591 if (err) 1592 goto err; 1593 1594 err = "error in recovery"; 1595 if (bch_btree_check(c)) 1596 goto err; 1597 1598 bch_journal_mark(c, &journal); 1599 bch_btree_gc_finish(c); 1600 pr_debug("btree_check() done"); 1601 1602 /* 1603 * bcache_journal_next() can't happen sooner, or 1604 * btree_gc_finish() will give spurious errors about last_gc > 1605 * gc_gen - this is a hack but oh well. 1606 */ 1607 bch_journal_next(&c->journal); 1608 1609 err = "error starting allocator thread"; 1610 for_each_cache(ca, c, i) 1611 if (bch_cache_allocator_start(ca)) 1612 goto err; 1613 1614 /* 1615 * First place it's safe to allocate: btree_check() and 1616 * btree_gc_finish() have to run before we have buckets to 1617 * allocate, and bch_bucket_alloc_set() might cause a journal 1618 * entry to be written so bcache_journal_next() has to be called 1619 * first. 1620 * 1621 * If the uuids were in the old format we have to rewrite them 1622 * before the next journal entry is written: 1623 */ 1624 if (j->version < BCACHE_JSET_VERSION_UUID) 1625 __uuid_write(c); 1626 1627 bch_journal_replay(c, &journal); 1628 } else { 1629 pr_notice("invalidating existing data"); 1630 1631 for_each_cache(ca, c, i) { 1632 unsigned j; 1633 1634 ca->sb.keys = clamp_t(int, ca->sb.nbuckets >> 7, 1635 2, SB_JOURNAL_BUCKETS); 1636 1637 for (j = 0; j < ca->sb.keys; j++) 1638 ca->sb.d[j] = ca->sb.first_bucket + j; 1639 } 1640 1641 bch_btree_gc_finish(c); 1642 1643 err = "error starting allocator thread"; 1644 for_each_cache(ca, c, i) 1645 if (bch_cache_allocator_start(ca)) 1646 goto err; 1647 1648 mutex_lock(&c->bucket_lock); 1649 for_each_cache(ca, c, i) 1650 bch_prio_write(ca); 1651 mutex_unlock(&c->bucket_lock); 1652 1653 err = "cannot allocate new UUID bucket"; 1654 if (__uuid_write(c)) 1655 goto err; 1656 1657 err = "cannot allocate new btree root"; 1658 c->root = bch_btree_node_alloc(c, 0, true); 1659 if (IS_ERR_OR_NULL(c->root)) 1660 goto err; 1661 1662 bkey_copy_key(&c->root->key, &MAX_KEY); 1663 bch_btree_node_write(c->root, &cl); 1664 1665 bch_btree_set_root(c->root); 1666 rw_unlock(true, c->root); 1667 1668 /* 1669 * We don't want to write the first journal entry until 1670 * everything is set up - fortunately journal entries won't be 1671 * written until the SET_CACHE_SYNC() here: 1672 */ 1673 SET_CACHE_SYNC(&c->sb, true); 1674 1675 bch_journal_next(&c->journal); 1676 bch_journal_meta(c, &cl); 1677 } 1678 1679 err = "error starting gc thread"; 1680 if (bch_gc_thread_start(c)) 1681 goto err; 1682 1683 closure_sync(&cl); 1684 c->sb.last_mount = get_seconds(); 1685 bcache_write_super(c); 1686 1687 list_for_each_entry_safe(dc, t, &uncached_devices, list) 1688 bch_cached_dev_attach(dc, c); 1689 1690 flash_devs_run(c); 1691 1692 return; 1693 err: 1694 closure_sync(&cl); 1695 /* XXX: test this, it's broken */ 1696 bch_cache_set_error(c, "%s", err); 1697 } 1698 1699 static bool can_attach_cache(struct cache *ca, struct cache_set *c) 1700 { 1701 return ca->sb.block_size == c->sb.block_size && 1702 ca->sb.bucket_size == c->sb.bucket_size && 1703 ca->sb.nr_in_set == c->sb.nr_in_set; 1704 } 1705 1706 static const char *register_cache_set(struct cache *ca) 1707 { 1708 char buf[12]; 1709 const char *err = "cannot allocate memory"; 1710 struct cache_set *c; 1711 1712 list_for_each_entry(c, &bch_cache_sets, list) 1713 if (!memcmp(c->sb.set_uuid, ca->sb.set_uuid, 16)) { 1714 if (c->cache[ca->sb.nr_this_dev]) 1715 return "duplicate cache set member"; 1716 1717 if (!can_attach_cache(ca, c)) 1718 return "cache sb does not match set"; 1719 1720 if (!CACHE_SYNC(&ca->sb)) 1721 SET_CACHE_SYNC(&c->sb, false); 1722 1723 goto found; 1724 } 1725 1726 c = bch_cache_set_alloc(&ca->sb); 1727 if (!c) 1728 return err; 1729 1730 err = "error creating kobject"; 1731 if (kobject_add(&c->kobj, bcache_kobj, "%pU", c->sb.set_uuid) || 1732 kobject_add(&c->internal, &c->kobj, "internal")) 1733 goto err; 1734 1735 if (bch_cache_accounting_add_kobjs(&c->accounting, &c->kobj)) 1736 goto err; 1737 1738 bch_debug_init_cache_set(c); 1739 1740 list_add(&c->list, &bch_cache_sets); 1741 found: 1742 sprintf(buf, "cache%i", ca->sb.nr_this_dev); 1743 if (sysfs_create_link(&ca->kobj, &c->kobj, "set") || 1744 sysfs_create_link(&c->kobj, &ca->kobj, buf)) 1745 goto err; 1746 1747 if (ca->sb.seq > c->sb.seq) { 1748 c->sb.version = ca->sb.version; 1749 memcpy(c->sb.set_uuid, ca->sb.set_uuid, 16); 1750 c->sb.flags = ca->sb.flags; 1751 c->sb.seq = ca->sb.seq; 1752 pr_debug("set version = %llu", c->sb.version); 1753 } 1754 1755 ca->set = c; 1756 ca->set->cache[ca->sb.nr_this_dev] = ca; 1757 c->cache_by_alloc[c->caches_loaded++] = ca; 1758 1759 if (c->caches_loaded == c->sb.nr_in_set) 1760 run_cache_set(c); 1761 1762 return NULL; 1763 err: 1764 bch_cache_set_unregister(c); 1765 return err; 1766 } 1767 1768 /* Cache device */ 1769 1770 void bch_cache_release(struct kobject *kobj) 1771 { 1772 struct cache *ca = container_of(kobj, struct cache, kobj); 1773 unsigned i; 1774 1775 if (ca->set) 1776 ca->set->cache[ca->sb.nr_this_dev] = NULL; 1777 1778 bio_split_pool_free(&ca->bio_split_hook); 1779 1780 free_pages((unsigned long) ca->disk_buckets, ilog2(bucket_pages(ca))); 1781 kfree(ca->prio_buckets); 1782 vfree(ca->buckets); 1783 1784 free_heap(&ca->heap); 1785 free_fifo(&ca->unused); 1786 free_fifo(&ca->free_inc); 1787 1788 for (i = 0; i < RESERVE_NR; i++) 1789 free_fifo(&ca->free[i]); 1790 1791 if (ca->sb_bio.bi_inline_vecs[0].bv_page) 1792 put_page(ca->sb_bio.bi_io_vec[0].bv_page); 1793 1794 if (!IS_ERR_OR_NULL(ca->bdev)) { 1795 blk_sync_queue(bdev_get_queue(ca->bdev)); 1796 blkdev_put(ca->bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); 1797 } 1798 1799 kfree(ca); 1800 module_put(THIS_MODULE); 1801 } 1802 1803 static int cache_alloc(struct cache_sb *sb, struct cache *ca) 1804 { 1805 size_t free; 1806 struct bucket *b; 1807 1808 __module_get(THIS_MODULE); 1809 kobject_init(&ca->kobj, &bch_cache_ktype); 1810 1811 bio_init(&ca->journal.bio); 1812 ca->journal.bio.bi_max_vecs = 8; 1813 ca->journal.bio.bi_io_vec = ca->journal.bio.bi_inline_vecs; 1814 1815 free = roundup_pow_of_two(ca->sb.nbuckets) >> 10; 1816 1817 if (!init_fifo(&ca->free[RESERVE_BTREE], 8, GFP_KERNEL) || 1818 !init_fifo(&ca->free[RESERVE_PRIO], prio_buckets(ca), GFP_KERNEL) || 1819 !init_fifo(&ca->free[RESERVE_MOVINGGC], free, GFP_KERNEL) || 1820 !init_fifo(&ca->free[RESERVE_NONE], free, GFP_KERNEL) || 1821 !init_fifo(&ca->free_inc, free << 2, GFP_KERNEL) || 1822 !init_fifo(&ca->unused, free << 2, GFP_KERNEL) || 1823 !init_heap(&ca->heap, free << 3, GFP_KERNEL) || 1824 !(ca->buckets = vzalloc(sizeof(struct bucket) * 1825 ca->sb.nbuckets)) || 1826 !(ca->prio_buckets = kzalloc(sizeof(uint64_t) * prio_buckets(ca) * 1827 2, GFP_KERNEL)) || 1828 !(ca->disk_buckets = alloc_bucket_pages(GFP_KERNEL, ca)) || 1829 bio_split_pool_init(&ca->bio_split_hook)) 1830 return -ENOMEM; 1831 1832 ca->prio_last_buckets = ca->prio_buckets + prio_buckets(ca); 1833 1834 for_each_bucket(b, ca) 1835 atomic_set(&b->pin, 0); 1836 1837 if (bch_cache_allocator_init(ca)) 1838 goto err; 1839 1840 return 0; 1841 err: 1842 kobject_put(&ca->kobj); 1843 return -ENOMEM; 1844 } 1845 1846 static void register_cache(struct cache_sb *sb, struct page *sb_page, 1847 struct block_device *bdev, struct cache *ca) 1848 { 1849 char name[BDEVNAME_SIZE]; 1850 const char *err = "cannot allocate memory"; 1851 1852 memcpy(&ca->sb, sb, sizeof(struct cache_sb)); 1853 ca->bdev = bdev; 1854 ca->bdev->bd_holder = ca; 1855 1856 bio_init(&ca->sb_bio); 1857 ca->sb_bio.bi_max_vecs = 1; 1858 ca->sb_bio.bi_io_vec = ca->sb_bio.bi_inline_vecs; 1859 ca->sb_bio.bi_io_vec[0].bv_page = sb_page; 1860 get_page(sb_page); 1861 1862 if (blk_queue_discard(bdev_get_queue(ca->bdev))) 1863 ca->discard = CACHE_DISCARD(&ca->sb); 1864 1865 if (cache_alloc(sb, ca) != 0) 1866 goto err; 1867 1868 err = "error creating kobject"; 1869 if (kobject_add(&ca->kobj, &part_to_dev(bdev->bd_part)->kobj, "bcache")) 1870 goto err; 1871 1872 err = register_cache_set(ca); 1873 if (err) 1874 goto err; 1875 1876 pr_info("registered cache device %s", bdevname(bdev, name)); 1877 return; 1878 err: 1879 pr_notice("error opening %s: %s", bdevname(bdev, name), err); 1880 kobject_put(&ca->kobj); 1881 } 1882 1883 /* Global interfaces/init */ 1884 1885 static ssize_t register_bcache(struct kobject *, struct kobj_attribute *, 1886 const char *, size_t); 1887 1888 kobj_attribute_write(register, register_bcache); 1889 kobj_attribute_write(register_quiet, register_bcache); 1890 1891 static bool bch_is_open_backing(struct block_device *bdev) { 1892 struct cache_set *c, *tc; 1893 struct cached_dev *dc, *t; 1894 1895 list_for_each_entry_safe(c, tc, &bch_cache_sets, list) 1896 list_for_each_entry_safe(dc, t, &c->cached_devs, list) 1897 if (dc->bdev == bdev) 1898 return true; 1899 list_for_each_entry_safe(dc, t, &uncached_devices, list) 1900 if (dc->bdev == bdev) 1901 return true; 1902 return false; 1903 } 1904 1905 static bool bch_is_open_cache(struct block_device *bdev) { 1906 struct cache_set *c, *tc; 1907 struct cache *ca; 1908 unsigned i; 1909 1910 list_for_each_entry_safe(c, tc, &bch_cache_sets, list) 1911 for_each_cache(ca, c, i) 1912 if (ca->bdev == bdev) 1913 return true; 1914 return false; 1915 } 1916 1917 static bool bch_is_open(struct block_device *bdev) { 1918 return bch_is_open_cache(bdev) || bch_is_open_backing(bdev); 1919 } 1920 1921 static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr, 1922 const char *buffer, size_t size) 1923 { 1924 ssize_t ret = size; 1925 const char *err = "cannot allocate memory"; 1926 char *path = NULL; 1927 struct cache_sb *sb = NULL; 1928 struct block_device *bdev = NULL; 1929 struct page *sb_page = NULL; 1930 1931 if (!try_module_get(THIS_MODULE)) 1932 return -EBUSY; 1933 1934 mutex_lock(&bch_register_lock); 1935 1936 if (!(path = kstrndup(buffer, size, GFP_KERNEL)) || 1937 !(sb = kmalloc(sizeof(struct cache_sb), GFP_KERNEL))) 1938 goto err; 1939 1940 err = "failed to open device"; 1941 bdev = blkdev_get_by_path(strim(path), 1942 FMODE_READ|FMODE_WRITE|FMODE_EXCL, 1943 sb); 1944 if (IS_ERR(bdev)) { 1945 if (bdev == ERR_PTR(-EBUSY)) { 1946 bdev = lookup_bdev(strim(path)); 1947 if (!IS_ERR(bdev) && bch_is_open(bdev)) 1948 err = "device already registered"; 1949 else 1950 err = "device busy"; 1951 } 1952 goto err; 1953 } 1954 1955 err = "failed to set blocksize"; 1956 if (set_blocksize(bdev, 4096)) 1957 goto err_close; 1958 1959 err = read_super(sb, bdev, &sb_page); 1960 if (err) 1961 goto err_close; 1962 1963 if (SB_IS_BDEV(sb)) { 1964 struct cached_dev *dc = kzalloc(sizeof(*dc), GFP_KERNEL); 1965 if (!dc) 1966 goto err_close; 1967 1968 register_bdev(sb, sb_page, bdev, dc); 1969 } else { 1970 struct cache *ca = kzalloc(sizeof(*ca), GFP_KERNEL); 1971 if (!ca) 1972 goto err_close; 1973 1974 register_cache(sb, sb_page, bdev, ca); 1975 } 1976 out: 1977 if (sb_page) 1978 put_page(sb_page); 1979 kfree(sb); 1980 kfree(path); 1981 mutex_unlock(&bch_register_lock); 1982 module_put(THIS_MODULE); 1983 return ret; 1984 1985 err_close: 1986 blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); 1987 err: 1988 if (attr != &ksysfs_register_quiet) 1989 pr_info("error opening %s: %s", path, err); 1990 ret = -EINVAL; 1991 goto out; 1992 } 1993 1994 static int bcache_reboot(struct notifier_block *n, unsigned long code, void *x) 1995 { 1996 if (code == SYS_DOWN || 1997 code == SYS_HALT || 1998 code == SYS_POWER_OFF) { 1999 DEFINE_WAIT(wait); 2000 unsigned long start = jiffies; 2001 bool stopped = false; 2002 2003 struct cache_set *c, *tc; 2004 struct cached_dev *dc, *tdc; 2005 2006 mutex_lock(&bch_register_lock); 2007 2008 if (list_empty(&bch_cache_sets) && 2009 list_empty(&uncached_devices)) 2010 goto out; 2011 2012 pr_info("Stopping all devices:"); 2013 2014 list_for_each_entry_safe(c, tc, &bch_cache_sets, list) 2015 bch_cache_set_stop(c); 2016 2017 list_for_each_entry_safe(dc, tdc, &uncached_devices, list) 2018 bcache_device_stop(&dc->disk); 2019 2020 /* What's a condition variable? */ 2021 while (1) { 2022 long timeout = start + 2 * HZ - jiffies; 2023 2024 stopped = list_empty(&bch_cache_sets) && 2025 list_empty(&uncached_devices); 2026 2027 if (timeout < 0 || stopped) 2028 break; 2029 2030 prepare_to_wait(&unregister_wait, &wait, 2031 TASK_UNINTERRUPTIBLE); 2032 2033 mutex_unlock(&bch_register_lock); 2034 schedule_timeout(timeout); 2035 mutex_lock(&bch_register_lock); 2036 } 2037 2038 finish_wait(&unregister_wait, &wait); 2039 2040 if (stopped) 2041 pr_info("All devices stopped"); 2042 else 2043 pr_notice("Timeout waiting for devices to be closed"); 2044 out: 2045 mutex_unlock(&bch_register_lock); 2046 } 2047 2048 return NOTIFY_DONE; 2049 } 2050 2051 static struct notifier_block reboot = { 2052 .notifier_call = bcache_reboot, 2053 .priority = INT_MAX, /* before any real devices */ 2054 }; 2055 2056 static void bcache_exit(void) 2057 { 2058 bch_debug_exit(); 2059 bch_request_exit(); 2060 bch_btree_exit(); 2061 if (bcache_kobj) 2062 kobject_put(bcache_kobj); 2063 if (bcache_wq) 2064 destroy_workqueue(bcache_wq); 2065 if (bcache_major) 2066 unregister_blkdev(bcache_major, "bcache"); 2067 unregister_reboot_notifier(&reboot); 2068 } 2069 2070 static int __init bcache_init(void) 2071 { 2072 static const struct attribute *files[] = { 2073 &ksysfs_register.attr, 2074 &ksysfs_register_quiet.attr, 2075 NULL 2076 }; 2077 2078 mutex_init(&bch_register_lock); 2079 init_waitqueue_head(&unregister_wait); 2080 register_reboot_notifier(&reboot); 2081 closure_debug_init(); 2082 2083 bcache_major = register_blkdev(0, "bcache"); 2084 if (bcache_major < 0) 2085 return bcache_major; 2086 2087 if (!(bcache_wq = create_workqueue("bcache")) || 2088 !(bcache_kobj = kobject_create_and_add("bcache", fs_kobj)) || 2089 sysfs_create_files(bcache_kobj, files) || 2090 bch_btree_init() || 2091 bch_request_init() || 2092 bch_debug_init(bcache_kobj)) 2093 goto err; 2094 2095 return 0; 2096 err: 2097 bcache_exit(); 2098 return -ENOMEM; 2099 } 2100 2101 module_exit(bcache_exit); 2102 module_init(bcache_init); 2103