1 /* 2 * bcache setup/teardown code, and some metadata io - read a superblock and 3 * figure out what to do with it. 4 * 5 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com> 6 * Copyright 2012 Google, Inc. 7 */ 8 9 #include "bcache.h" 10 #include "btree.h" 11 #include "debug.h" 12 #include "extents.h" 13 #include "request.h" 14 #include "writeback.h" 15 16 #include <linux/blkdev.h> 17 #include <linux/buffer_head.h> 18 #include <linux/debugfs.h> 19 #include <linux/genhd.h> 20 #include <linux/idr.h> 21 #include <linux/kthread.h> 22 #include <linux/module.h> 23 #include <linux/random.h> 24 #include <linux/reboot.h> 25 #include <linux/sysfs.h> 26 27 MODULE_LICENSE("GPL"); 28 MODULE_AUTHOR("Kent Overstreet <kent.overstreet@gmail.com>"); 29 30 static const char bcache_magic[] = { 31 0xc6, 0x85, 0x73, 0xf6, 0x4e, 0x1a, 0x45, 0xca, 32 0x82, 0x65, 0xf5, 0x7f, 0x48, 0xba, 0x6d, 0x81 33 }; 34 35 static const char invalid_uuid[] = { 36 0xa0, 0x3e, 0xf8, 0xed, 0x3e, 0xe1, 0xb8, 0x78, 37 0xc8, 0x50, 0xfc, 0x5e, 0xcb, 0x16, 0xcd, 0x99 38 }; 39 40 /* Default is -1; we skip past it for struct cached_dev's cache mode */ 41 const char * const bch_cache_modes[] = { 42 "default", 43 "writethrough", 44 "writeback", 45 "writearound", 46 "none", 47 NULL 48 }; 49 50 static struct kobject *bcache_kobj; 51 struct mutex bch_register_lock; 52 LIST_HEAD(bch_cache_sets); 53 static LIST_HEAD(uncached_devices); 54 55 static int bcache_major; 56 static DEFINE_IDA(bcache_device_idx); 57 static wait_queue_head_t unregister_wait; 58 struct workqueue_struct *bcache_wq; 59 60 #define BTREE_MAX_PAGES (256 * 1024 / PAGE_SIZE) 61 /* limitation of partitions number on single bcache device */ 62 #define BCACHE_MINORS 128 63 /* limitation of bcache devices number on single system */ 64 #define BCACHE_DEVICE_IDX_MAX ((1U << MINORBITS)/BCACHE_MINORS) 65 66 /* Superblock */ 67 68 static const char *read_super(struct cache_sb *sb, struct block_device *bdev, 69 struct page **res) 70 { 71 const char *err; 72 struct cache_sb *s; 73 struct buffer_head *bh = __bread(bdev, 1, SB_SIZE); 74 unsigned i; 75 76 if (!bh) 77 return "IO error"; 78 79 s = (struct cache_sb *) bh->b_data; 80 81 sb->offset = le64_to_cpu(s->offset); 82 sb->version = le64_to_cpu(s->version); 83 84 memcpy(sb->magic, s->magic, 16); 85 memcpy(sb->uuid, s->uuid, 16); 86 memcpy(sb->set_uuid, s->set_uuid, 16); 87 memcpy(sb->label, s->label, SB_LABEL_SIZE); 88 89 sb->flags = le64_to_cpu(s->flags); 90 sb->seq = le64_to_cpu(s->seq); 91 sb->last_mount = le32_to_cpu(s->last_mount); 92 sb->first_bucket = le16_to_cpu(s->first_bucket); 93 sb->keys = le16_to_cpu(s->keys); 94 95 for (i = 0; i < SB_JOURNAL_BUCKETS; i++) 96 sb->d[i] = le64_to_cpu(s->d[i]); 97 98 pr_debug("read sb version %llu, flags %llu, seq %llu, journal size %u", 99 sb->version, sb->flags, sb->seq, sb->keys); 100 101 err = "Not a bcache superblock"; 102 if (sb->offset != SB_SECTOR) 103 goto err; 104 105 if (memcmp(sb->magic, bcache_magic, 16)) 106 goto err; 107 108 err = "Too many journal buckets"; 109 if (sb->keys > SB_JOURNAL_BUCKETS) 110 goto err; 111 112 err = "Bad checksum"; 113 if (s->csum != csum_set(s)) 114 goto err; 115 116 err = "Bad UUID"; 117 if (bch_is_zero(sb->uuid, 16)) 118 goto err; 119 120 sb->block_size = le16_to_cpu(s->block_size); 121 122 err = "Superblock block size smaller than device block size"; 123 if (sb->block_size << 9 < bdev_logical_block_size(bdev)) 124 goto err; 125 126 switch (sb->version) { 127 case BCACHE_SB_VERSION_BDEV: 128 sb->data_offset = BDEV_DATA_START_DEFAULT; 129 break; 130 case BCACHE_SB_VERSION_BDEV_WITH_OFFSET: 131 sb->data_offset = le64_to_cpu(s->data_offset); 132 133 err = "Bad data offset"; 134 if (sb->data_offset < BDEV_DATA_START_DEFAULT) 135 goto err; 136 137 break; 138 case BCACHE_SB_VERSION_CDEV: 139 case BCACHE_SB_VERSION_CDEV_WITH_UUID: 140 sb->nbuckets = le64_to_cpu(s->nbuckets); 141 sb->bucket_size = le16_to_cpu(s->bucket_size); 142 143 sb->nr_in_set = le16_to_cpu(s->nr_in_set); 144 sb->nr_this_dev = le16_to_cpu(s->nr_this_dev); 145 146 err = "Too many buckets"; 147 if (sb->nbuckets > LONG_MAX) 148 goto err; 149 150 err = "Not enough buckets"; 151 if (sb->nbuckets < 1 << 7) 152 goto err; 153 154 err = "Bad block/bucket size"; 155 if (!is_power_of_2(sb->block_size) || 156 sb->block_size > PAGE_SECTORS || 157 !is_power_of_2(sb->bucket_size) || 158 sb->bucket_size < PAGE_SECTORS) 159 goto err; 160 161 err = "Invalid superblock: device too small"; 162 if (get_capacity(bdev->bd_disk) < sb->bucket_size * sb->nbuckets) 163 goto err; 164 165 err = "Bad UUID"; 166 if (bch_is_zero(sb->set_uuid, 16)) 167 goto err; 168 169 err = "Bad cache device number in set"; 170 if (!sb->nr_in_set || 171 sb->nr_in_set <= sb->nr_this_dev || 172 sb->nr_in_set > MAX_CACHES_PER_SET) 173 goto err; 174 175 err = "Journal buckets not sequential"; 176 for (i = 0; i < sb->keys; i++) 177 if (sb->d[i] != sb->first_bucket + i) 178 goto err; 179 180 err = "Too many journal buckets"; 181 if (sb->first_bucket + sb->keys > sb->nbuckets) 182 goto err; 183 184 err = "Invalid superblock: first bucket comes before end of super"; 185 if (sb->first_bucket * sb->bucket_size < 16) 186 goto err; 187 188 break; 189 default: 190 err = "Unsupported superblock version"; 191 goto err; 192 } 193 194 sb->last_mount = get_seconds(); 195 err = NULL; 196 197 get_page(bh->b_page); 198 *res = bh->b_page; 199 err: 200 put_bh(bh); 201 return err; 202 } 203 204 static void write_bdev_super_endio(struct bio *bio) 205 { 206 struct cached_dev *dc = bio->bi_private; 207 /* XXX: error checking */ 208 209 closure_put(&dc->sb_write); 210 } 211 212 static void __write_super(struct cache_sb *sb, struct bio *bio) 213 { 214 struct cache_sb *out = page_address(bio->bi_io_vec[0].bv_page); 215 unsigned i; 216 217 bio->bi_iter.bi_sector = SB_SECTOR; 218 bio->bi_iter.bi_size = SB_SIZE; 219 bio_set_op_attrs(bio, REQ_OP_WRITE, REQ_SYNC|REQ_META); 220 bch_bio_map(bio, NULL); 221 222 out->offset = cpu_to_le64(sb->offset); 223 out->version = cpu_to_le64(sb->version); 224 225 memcpy(out->uuid, sb->uuid, 16); 226 memcpy(out->set_uuid, sb->set_uuid, 16); 227 memcpy(out->label, sb->label, SB_LABEL_SIZE); 228 229 out->flags = cpu_to_le64(sb->flags); 230 out->seq = cpu_to_le64(sb->seq); 231 232 out->last_mount = cpu_to_le32(sb->last_mount); 233 out->first_bucket = cpu_to_le16(sb->first_bucket); 234 out->keys = cpu_to_le16(sb->keys); 235 236 for (i = 0; i < sb->keys; i++) 237 out->d[i] = cpu_to_le64(sb->d[i]); 238 239 out->csum = csum_set(out); 240 241 pr_debug("ver %llu, flags %llu, seq %llu", 242 sb->version, sb->flags, sb->seq); 243 244 submit_bio(bio); 245 } 246 247 static void bch_write_bdev_super_unlock(struct closure *cl) 248 { 249 struct cached_dev *dc = container_of(cl, struct cached_dev, sb_write); 250 251 up(&dc->sb_write_mutex); 252 } 253 254 void bch_write_bdev_super(struct cached_dev *dc, struct closure *parent) 255 { 256 struct closure *cl = &dc->sb_write; 257 struct bio *bio = &dc->sb_bio; 258 259 down(&dc->sb_write_mutex); 260 closure_init(cl, parent); 261 262 bio_reset(bio); 263 bio_set_dev(bio, dc->bdev); 264 bio->bi_end_io = write_bdev_super_endio; 265 bio->bi_private = dc; 266 267 closure_get(cl); 268 __write_super(&dc->sb, bio); 269 270 closure_return_with_destructor(cl, bch_write_bdev_super_unlock); 271 } 272 273 static void write_super_endio(struct bio *bio) 274 { 275 struct cache *ca = bio->bi_private; 276 277 bch_count_io_errors(ca, bio->bi_status, "writing superblock"); 278 closure_put(&ca->set->sb_write); 279 } 280 281 static void bcache_write_super_unlock(struct closure *cl) 282 { 283 struct cache_set *c = container_of(cl, struct cache_set, sb_write); 284 285 up(&c->sb_write_mutex); 286 } 287 288 void bcache_write_super(struct cache_set *c) 289 { 290 struct closure *cl = &c->sb_write; 291 struct cache *ca; 292 unsigned i; 293 294 down(&c->sb_write_mutex); 295 closure_init(cl, &c->cl); 296 297 c->sb.seq++; 298 299 for_each_cache(ca, c, i) { 300 struct bio *bio = &ca->sb_bio; 301 302 ca->sb.version = BCACHE_SB_VERSION_CDEV_WITH_UUID; 303 ca->sb.seq = c->sb.seq; 304 ca->sb.last_mount = c->sb.last_mount; 305 306 SET_CACHE_SYNC(&ca->sb, CACHE_SYNC(&c->sb)); 307 308 bio_reset(bio); 309 bio_set_dev(bio, ca->bdev); 310 bio->bi_end_io = write_super_endio; 311 bio->bi_private = ca; 312 313 closure_get(cl); 314 __write_super(&ca->sb, bio); 315 } 316 317 closure_return_with_destructor(cl, bcache_write_super_unlock); 318 } 319 320 /* UUID io */ 321 322 static void uuid_endio(struct bio *bio) 323 { 324 struct closure *cl = bio->bi_private; 325 struct cache_set *c = container_of(cl, struct cache_set, uuid_write); 326 327 cache_set_err_on(bio->bi_status, c, "accessing uuids"); 328 bch_bbio_free(bio, c); 329 closure_put(cl); 330 } 331 332 static void uuid_io_unlock(struct closure *cl) 333 { 334 struct cache_set *c = container_of(cl, struct cache_set, uuid_write); 335 336 up(&c->uuid_write_mutex); 337 } 338 339 static void uuid_io(struct cache_set *c, int op, unsigned long op_flags, 340 struct bkey *k, struct closure *parent) 341 { 342 struct closure *cl = &c->uuid_write; 343 struct uuid_entry *u; 344 unsigned i; 345 char buf[80]; 346 347 BUG_ON(!parent); 348 down(&c->uuid_write_mutex); 349 closure_init(cl, parent); 350 351 for (i = 0; i < KEY_PTRS(k); i++) { 352 struct bio *bio = bch_bbio_alloc(c); 353 354 bio->bi_opf = REQ_SYNC | REQ_META | op_flags; 355 bio->bi_iter.bi_size = KEY_SIZE(k) << 9; 356 357 bio->bi_end_io = uuid_endio; 358 bio->bi_private = cl; 359 bio_set_op_attrs(bio, op, REQ_SYNC|REQ_META|op_flags); 360 bch_bio_map(bio, c->uuids); 361 362 bch_submit_bbio(bio, c, k, i); 363 364 if (op != REQ_OP_WRITE) 365 break; 366 } 367 368 bch_extent_to_text(buf, sizeof(buf), k); 369 pr_debug("%s UUIDs at %s", op == REQ_OP_WRITE ? "wrote" : "read", buf); 370 371 for (u = c->uuids; u < c->uuids + c->nr_uuids; u++) 372 if (!bch_is_zero(u->uuid, 16)) 373 pr_debug("Slot %zi: %pU: %s: 1st: %u last: %u inv: %u", 374 u - c->uuids, u->uuid, u->label, 375 u->first_reg, u->last_reg, u->invalidated); 376 377 closure_return_with_destructor(cl, uuid_io_unlock); 378 } 379 380 static char *uuid_read(struct cache_set *c, struct jset *j, struct closure *cl) 381 { 382 struct bkey *k = &j->uuid_bucket; 383 384 if (__bch_btree_ptr_invalid(c, k)) 385 return "bad uuid pointer"; 386 387 bkey_copy(&c->uuid_bucket, k); 388 uuid_io(c, REQ_OP_READ, 0, k, cl); 389 390 if (j->version < BCACHE_JSET_VERSION_UUIDv1) { 391 struct uuid_entry_v0 *u0 = (void *) c->uuids; 392 struct uuid_entry *u1 = (void *) c->uuids; 393 int i; 394 395 closure_sync(cl); 396 397 /* 398 * Since the new uuid entry is bigger than the old, we have to 399 * convert starting at the highest memory address and work down 400 * in order to do it in place 401 */ 402 403 for (i = c->nr_uuids - 1; 404 i >= 0; 405 --i) { 406 memcpy(u1[i].uuid, u0[i].uuid, 16); 407 memcpy(u1[i].label, u0[i].label, 32); 408 409 u1[i].first_reg = u0[i].first_reg; 410 u1[i].last_reg = u0[i].last_reg; 411 u1[i].invalidated = u0[i].invalidated; 412 413 u1[i].flags = 0; 414 u1[i].sectors = 0; 415 } 416 } 417 418 return NULL; 419 } 420 421 static int __uuid_write(struct cache_set *c) 422 { 423 BKEY_PADDED(key) k; 424 struct closure cl; 425 closure_init_stack(&cl); 426 427 lockdep_assert_held(&bch_register_lock); 428 429 if (bch_bucket_alloc_set(c, RESERVE_BTREE, &k.key, 1, true)) 430 return 1; 431 432 SET_KEY_SIZE(&k.key, c->sb.bucket_size); 433 uuid_io(c, REQ_OP_WRITE, 0, &k.key, &cl); 434 closure_sync(&cl); 435 436 bkey_copy(&c->uuid_bucket, &k.key); 437 bkey_put(c, &k.key); 438 return 0; 439 } 440 441 int bch_uuid_write(struct cache_set *c) 442 { 443 int ret = __uuid_write(c); 444 445 if (!ret) 446 bch_journal_meta(c, NULL); 447 448 return ret; 449 } 450 451 static struct uuid_entry *uuid_find(struct cache_set *c, const char *uuid) 452 { 453 struct uuid_entry *u; 454 455 for (u = c->uuids; 456 u < c->uuids + c->nr_uuids; u++) 457 if (!memcmp(u->uuid, uuid, 16)) 458 return u; 459 460 return NULL; 461 } 462 463 static struct uuid_entry *uuid_find_empty(struct cache_set *c) 464 { 465 static const char zero_uuid[16] = "\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0"; 466 return uuid_find(c, zero_uuid); 467 } 468 469 /* 470 * Bucket priorities/gens: 471 * 472 * For each bucket, we store on disk its 473 * 8 bit gen 474 * 16 bit priority 475 * 476 * See alloc.c for an explanation of the gen. The priority is used to implement 477 * lru (and in the future other) cache replacement policies; for most purposes 478 * it's just an opaque integer. 479 * 480 * The gens and the priorities don't have a whole lot to do with each other, and 481 * it's actually the gens that must be written out at specific times - it's no 482 * big deal if the priorities don't get written, if we lose them we just reuse 483 * buckets in suboptimal order. 484 * 485 * On disk they're stored in a packed array, and in as many buckets are required 486 * to fit them all. The buckets we use to store them form a list; the journal 487 * header points to the first bucket, the first bucket points to the second 488 * bucket, et cetera. 489 * 490 * This code is used by the allocation code; periodically (whenever it runs out 491 * of buckets to allocate from) the allocation code will invalidate some 492 * buckets, but it can't use those buckets until their new gens are safely on 493 * disk. 494 */ 495 496 static void prio_endio(struct bio *bio) 497 { 498 struct cache *ca = bio->bi_private; 499 500 cache_set_err_on(bio->bi_status, ca->set, "accessing priorities"); 501 bch_bbio_free(bio, ca->set); 502 closure_put(&ca->prio); 503 } 504 505 static void prio_io(struct cache *ca, uint64_t bucket, int op, 506 unsigned long op_flags) 507 { 508 struct closure *cl = &ca->prio; 509 struct bio *bio = bch_bbio_alloc(ca->set); 510 511 closure_init_stack(cl); 512 513 bio->bi_iter.bi_sector = bucket * ca->sb.bucket_size; 514 bio_set_dev(bio, ca->bdev); 515 bio->bi_iter.bi_size = bucket_bytes(ca); 516 517 bio->bi_end_io = prio_endio; 518 bio->bi_private = ca; 519 bio_set_op_attrs(bio, op, REQ_SYNC|REQ_META|op_flags); 520 bch_bio_map(bio, ca->disk_buckets); 521 522 closure_bio_submit(bio, &ca->prio); 523 closure_sync(cl); 524 } 525 526 void bch_prio_write(struct cache *ca) 527 { 528 int i; 529 struct bucket *b; 530 struct closure cl; 531 532 closure_init_stack(&cl); 533 534 lockdep_assert_held(&ca->set->bucket_lock); 535 536 ca->disk_buckets->seq++; 537 538 atomic_long_add(ca->sb.bucket_size * prio_buckets(ca), 539 &ca->meta_sectors_written); 540 541 //pr_debug("free %zu, free_inc %zu, unused %zu", fifo_used(&ca->free), 542 // fifo_used(&ca->free_inc), fifo_used(&ca->unused)); 543 544 for (i = prio_buckets(ca) - 1; i >= 0; --i) { 545 long bucket; 546 struct prio_set *p = ca->disk_buckets; 547 struct bucket_disk *d = p->data; 548 struct bucket_disk *end = d + prios_per_bucket(ca); 549 550 for (b = ca->buckets + i * prios_per_bucket(ca); 551 b < ca->buckets + ca->sb.nbuckets && d < end; 552 b++, d++) { 553 d->prio = cpu_to_le16(b->prio); 554 d->gen = b->gen; 555 } 556 557 p->next_bucket = ca->prio_buckets[i + 1]; 558 p->magic = pset_magic(&ca->sb); 559 p->csum = bch_crc64(&p->magic, bucket_bytes(ca) - 8); 560 561 bucket = bch_bucket_alloc(ca, RESERVE_PRIO, true); 562 BUG_ON(bucket == -1); 563 564 mutex_unlock(&ca->set->bucket_lock); 565 prio_io(ca, bucket, REQ_OP_WRITE, 0); 566 mutex_lock(&ca->set->bucket_lock); 567 568 ca->prio_buckets[i] = bucket; 569 atomic_dec_bug(&ca->buckets[bucket].pin); 570 } 571 572 mutex_unlock(&ca->set->bucket_lock); 573 574 bch_journal_meta(ca->set, &cl); 575 closure_sync(&cl); 576 577 mutex_lock(&ca->set->bucket_lock); 578 579 /* 580 * Don't want the old priorities to get garbage collected until after we 581 * finish writing the new ones, and they're journalled 582 */ 583 for (i = 0; i < prio_buckets(ca); i++) { 584 if (ca->prio_last_buckets[i]) 585 __bch_bucket_free(ca, 586 &ca->buckets[ca->prio_last_buckets[i]]); 587 588 ca->prio_last_buckets[i] = ca->prio_buckets[i]; 589 } 590 } 591 592 static void prio_read(struct cache *ca, uint64_t bucket) 593 { 594 struct prio_set *p = ca->disk_buckets; 595 struct bucket_disk *d = p->data + prios_per_bucket(ca), *end = d; 596 struct bucket *b; 597 unsigned bucket_nr = 0; 598 599 for (b = ca->buckets; 600 b < ca->buckets + ca->sb.nbuckets; 601 b++, d++) { 602 if (d == end) { 603 ca->prio_buckets[bucket_nr] = bucket; 604 ca->prio_last_buckets[bucket_nr] = bucket; 605 bucket_nr++; 606 607 prio_io(ca, bucket, REQ_OP_READ, 0); 608 609 if (p->csum != bch_crc64(&p->magic, bucket_bytes(ca) - 8)) 610 pr_warn("bad csum reading priorities"); 611 612 if (p->magic != pset_magic(&ca->sb)) 613 pr_warn("bad magic reading priorities"); 614 615 bucket = p->next_bucket; 616 d = p->data; 617 } 618 619 b->prio = le16_to_cpu(d->prio); 620 b->gen = b->last_gc = d->gen; 621 } 622 } 623 624 /* Bcache device */ 625 626 static int open_dev(struct block_device *b, fmode_t mode) 627 { 628 struct bcache_device *d = b->bd_disk->private_data; 629 if (test_bit(BCACHE_DEV_CLOSING, &d->flags)) 630 return -ENXIO; 631 632 closure_get(&d->cl); 633 return 0; 634 } 635 636 static void release_dev(struct gendisk *b, fmode_t mode) 637 { 638 struct bcache_device *d = b->private_data; 639 closure_put(&d->cl); 640 } 641 642 static int ioctl_dev(struct block_device *b, fmode_t mode, 643 unsigned int cmd, unsigned long arg) 644 { 645 struct bcache_device *d = b->bd_disk->private_data; 646 return d->ioctl(d, mode, cmd, arg); 647 } 648 649 static const struct block_device_operations bcache_ops = { 650 .open = open_dev, 651 .release = release_dev, 652 .ioctl = ioctl_dev, 653 .owner = THIS_MODULE, 654 }; 655 656 void bcache_device_stop(struct bcache_device *d) 657 { 658 if (!test_and_set_bit(BCACHE_DEV_CLOSING, &d->flags)) 659 closure_queue(&d->cl); 660 } 661 662 static void bcache_device_unlink(struct bcache_device *d) 663 { 664 lockdep_assert_held(&bch_register_lock); 665 666 if (d->c && !test_and_set_bit(BCACHE_DEV_UNLINK_DONE, &d->flags)) { 667 unsigned i; 668 struct cache *ca; 669 670 sysfs_remove_link(&d->c->kobj, d->name); 671 sysfs_remove_link(&d->kobj, "cache"); 672 673 for_each_cache(ca, d->c, i) 674 bd_unlink_disk_holder(ca->bdev, d->disk); 675 } 676 } 677 678 static void bcache_device_link(struct bcache_device *d, struct cache_set *c, 679 const char *name) 680 { 681 unsigned i; 682 struct cache *ca; 683 684 for_each_cache(ca, d->c, i) 685 bd_link_disk_holder(ca->bdev, d->disk); 686 687 snprintf(d->name, BCACHEDEVNAME_SIZE, 688 "%s%u", name, d->id); 689 690 WARN(sysfs_create_link(&d->kobj, &c->kobj, "cache") || 691 sysfs_create_link(&c->kobj, &d->kobj, d->name), 692 "Couldn't create device <-> cache set symlinks"); 693 694 clear_bit(BCACHE_DEV_UNLINK_DONE, &d->flags); 695 } 696 697 static void bcache_device_detach(struct bcache_device *d) 698 { 699 lockdep_assert_held(&bch_register_lock); 700 701 if (test_bit(BCACHE_DEV_DETACHING, &d->flags)) { 702 struct uuid_entry *u = d->c->uuids + d->id; 703 704 SET_UUID_FLASH_ONLY(u, 0); 705 memcpy(u->uuid, invalid_uuid, 16); 706 u->invalidated = cpu_to_le32(get_seconds()); 707 bch_uuid_write(d->c); 708 } 709 710 bcache_device_unlink(d); 711 712 d->c->devices[d->id] = NULL; 713 closure_put(&d->c->caching); 714 d->c = NULL; 715 } 716 717 static void bcache_device_attach(struct bcache_device *d, struct cache_set *c, 718 unsigned id) 719 { 720 d->id = id; 721 d->c = c; 722 c->devices[id] = d; 723 724 closure_get(&c->caching); 725 } 726 727 static inline int first_minor_to_idx(int first_minor) 728 { 729 return (first_minor/BCACHE_MINORS); 730 } 731 732 static inline int idx_to_first_minor(int idx) 733 { 734 return (idx * BCACHE_MINORS); 735 } 736 737 static void bcache_device_free(struct bcache_device *d) 738 { 739 lockdep_assert_held(&bch_register_lock); 740 741 pr_info("%s stopped", d->disk->disk_name); 742 743 if (d->c) 744 bcache_device_detach(d); 745 if (d->disk && d->disk->flags & GENHD_FL_UP) 746 del_gendisk(d->disk); 747 if (d->disk && d->disk->queue) 748 blk_cleanup_queue(d->disk->queue); 749 if (d->disk) { 750 ida_simple_remove(&bcache_device_idx, 751 first_minor_to_idx(d->disk->first_minor)); 752 put_disk(d->disk); 753 } 754 755 if (d->bio_split) 756 bioset_free(d->bio_split); 757 kvfree(d->full_dirty_stripes); 758 kvfree(d->stripe_sectors_dirty); 759 760 closure_debug_destroy(&d->cl); 761 } 762 763 static int bcache_device_init(struct bcache_device *d, unsigned block_size, 764 sector_t sectors) 765 { 766 struct request_queue *q; 767 size_t n; 768 int idx; 769 770 if (!d->stripe_size) 771 d->stripe_size = 1 << 31; 772 773 d->nr_stripes = DIV_ROUND_UP_ULL(sectors, d->stripe_size); 774 775 if (!d->nr_stripes || 776 d->nr_stripes > INT_MAX || 777 d->nr_stripes > SIZE_MAX / sizeof(atomic_t)) { 778 pr_err("nr_stripes too large or invalid: %u (start sector beyond end of disk?)", 779 (unsigned)d->nr_stripes); 780 return -ENOMEM; 781 } 782 783 n = d->nr_stripes * sizeof(atomic_t); 784 d->stripe_sectors_dirty = kvzalloc(n, GFP_KERNEL); 785 if (!d->stripe_sectors_dirty) 786 return -ENOMEM; 787 788 n = BITS_TO_LONGS(d->nr_stripes) * sizeof(unsigned long); 789 d->full_dirty_stripes = kvzalloc(n, GFP_KERNEL); 790 if (!d->full_dirty_stripes) 791 return -ENOMEM; 792 793 idx = ida_simple_get(&bcache_device_idx, 0, 794 BCACHE_DEVICE_IDX_MAX, GFP_KERNEL); 795 if (idx < 0) 796 return idx; 797 798 if (!(d->bio_split = bioset_create(4, offsetof(struct bbio, bio), 799 BIOSET_NEED_BVECS | 800 BIOSET_NEED_RESCUER)) || 801 !(d->disk = alloc_disk(BCACHE_MINORS))) { 802 ida_simple_remove(&bcache_device_idx, idx); 803 return -ENOMEM; 804 } 805 806 set_capacity(d->disk, sectors); 807 snprintf(d->disk->disk_name, DISK_NAME_LEN, "bcache%i", idx); 808 809 d->disk->major = bcache_major; 810 d->disk->first_minor = idx_to_first_minor(idx); 811 d->disk->fops = &bcache_ops; 812 d->disk->private_data = d; 813 814 q = blk_alloc_queue(GFP_KERNEL); 815 if (!q) 816 return -ENOMEM; 817 818 blk_queue_make_request(q, NULL); 819 d->disk->queue = q; 820 q->queuedata = d; 821 q->backing_dev_info->congested_data = d; 822 q->limits.max_hw_sectors = UINT_MAX; 823 q->limits.max_sectors = UINT_MAX; 824 q->limits.max_segment_size = UINT_MAX; 825 q->limits.max_segments = BIO_MAX_PAGES; 826 blk_queue_max_discard_sectors(q, UINT_MAX); 827 q->limits.discard_granularity = 512; 828 q->limits.io_min = block_size; 829 q->limits.logical_block_size = block_size; 830 q->limits.physical_block_size = block_size; 831 set_bit(QUEUE_FLAG_NONROT, &d->disk->queue->queue_flags); 832 clear_bit(QUEUE_FLAG_ADD_RANDOM, &d->disk->queue->queue_flags); 833 set_bit(QUEUE_FLAG_DISCARD, &d->disk->queue->queue_flags); 834 835 blk_queue_write_cache(q, true, true); 836 837 return 0; 838 } 839 840 /* Cached device */ 841 842 static void calc_cached_dev_sectors(struct cache_set *c) 843 { 844 uint64_t sectors = 0; 845 struct cached_dev *dc; 846 847 list_for_each_entry(dc, &c->cached_devs, list) 848 sectors += bdev_sectors(dc->bdev); 849 850 c->cached_dev_sectors = sectors; 851 } 852 853 void bch_cached_dev_run(struct cached_dev *dc) 854 { 855 struct bcache_device *d = &dc->disk; 856 char buf[SB_LABEL_SIZE + 1]; 857 char *env[] = { 858 "DRIVER=bcache", 859 kasprintf(GFP_KERNEL, "CACHED_UUID=%pU", dc->sb.uuid), 860 NULL, 861 NULL, 862 }; 863 864 memcpy(buf, dc->sb.label, SB_LABEL_SIZE); 865 buf[SB_LABEL_SIZE] = '\0'; 866 env[2] = kasprintf(GFP_KERNEL, "CACHED_LABEL=%s", buf); 867 868 if (atomic_xchg(&dc->running, 1)) { 869 kfree(env[1]); 870 kfree(env[2]); 871 return; 872 } 873 874 if (!d->c && 875 BDEV_STATE(&dc->sb) != BDEV_STATE_NONE) { 876 struct closure cl; 877 closure_init_stack(&cl); 878 879 SET_BDEV_STATE(&dc->sb, BDEV_STATE_STALE); 880 bch_write_bdev_super(dc, &cl); 881 closure_sync(&cl); 882 } 883 884 add_disk(d->disk); 885 bd_link_disk_holder(dc->bdev, dc->disk.disk); 886 /* won't show up in the uevent file, use udevadm monitor -e instead 887 * only class / kset properties are persistent */ 888 kobject_uevent_env(&disk_to_dev(d->disk)->kobj, KOBJ_CHANGE, env); 889 kfree(env[1]); 890 kfree(env[2]); 891 892 if (sysfs_create_link(&d->kobj, &disk_to_dev(d->disk)->kobj, "dev") || 893 sysfs_create_link(&disk_to_dev(d->disk)->kobj, &d->kobj, "bcache")) 894 pr_debug("error creating sysfs link"); 895 } 896 897 static void cached_dev_detach_finish(struct work_struct *w) 898 { 899 struct cached_dev *dc = container_of(w, struct cached_dev, detach); 900 char buf[BDEVNAME_SIZE]; 901 struct closure cl; 902 closure_init_stack(&cl); 903 904 BUG_ON(!test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags)); 905 BUG_ON(refcount_read(&dc->count)); 906 907 mutex_lock(&bch_register_lock); 908 909 memset(&dc->sb.set_uuid, 0, 16); 910 SET_BDEV_STATE(&dc->sb, BDEV_STATE_NONE); 911 912 bch_write_bdev_super(dc, &cl); 913 closure_sync(&cl); 914 915 bcache_device_detach(&dc->disk); 916 list_move(&dc->list, &uncached_devices); 917 918 clear_bit(BCACHE_DEV_DETACHING, &dc->disk.flags); 919 clear_bit(BCACHE_DEV_UNLINK_DONE, &dc->disk.flags); 920 921 mutex_unlock(&bch_register_lock); 922 923 pr_info("Caching disabled for %s", bdevname(dc->bdev, buf)); 924 925 /* Drop ref we took in cached_dev_detach() */ 926 closure_put(&dc->disk.cl); 927 } 928 929 void bch_cached_dev_detach(struct cached_dev *dc) 930 { 931 lockdep_assert_held(&bch_register_lock); 932 933 if (test_bit(BCACHE_DEV_CLOSING, &dc->disk.flags)) 934 return; 935 936 if (test_and_set_bit(BCACHE_DEV_DETACHING, &dc->disk.flags)) 937 return; 938 939 /* 940 * Block the device from being closed and freed until we're finished 941 * detaching 942 */ 943 closure_get(&dc->disk.cl); 944 945 bch_writeback_queue(dc); 946 cached_dev_put(dc); 947 } 948 949 int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c) 950 { 951 uint32_t rtime = cpu_to_le32(get_seconds()); 952 struct uuid_entry *u; 953 char buf[BDEVNAME_SIZE]; 954 955 bdevname(dc->bdev, buf); 956 957 if (memcmp(dc->sb.set_uuid, c->sb.set_uuid, 16)) 958 return -ENOENT; 959 960 if (dc->disk.c) { 961 pr_err("Can't attach %s: already attached", buf); 962 return -EINVAL; 963 } 964 965 if (test_bit(CACHE_SET_STOPPING, &c->flags)) { 966 pr_err("Can't attach %s: shutting down", buf); 967 return -EINVAL; 968 } 969 970 if (dc->sb.block_size < c->sb.block_size) { 971 /* Will die */ 972 pr_err("Couldn't attach %s: block size less than set's block size", 973 buf); 974 return -EINVAL; 975 } 976 977 u = uuid_find(c, dc->sb.uuid); 978 979 if (u && 980 (BDEV_STATE(&dc->sb) == BDEV_STATE_STALE || 981 BDEV_STATE(&dc->sb) == BDEV_STATE_NONE)) { 982 memcpy(u->uuid, invalid_uuid, 16); 983 u->invalidated = cpu_to_le32(get_seconds()); 984 u = NULL; 985 } 986 987 if (!u) { 988 if (BDEV_STATE(&dc->sb) == BDEV_STATE_DIRTY) { 989 pr_err("Couldn't find uuid for %s in set", buf); 990 return -ENOENT; 991 } 992 993 u = uuid_find_empty(c); 994 if (!u) { 995 pr_err("Not caching %s, no room for UUID", buf); 996 return -EINVAL; 997 } 998 } 999 1000 /* Deadlocks since we're called via sysfs... 1001 sysfs_remove_file(&dc->kobj, &sysfs_attach); 1002 */ 1003 1004 if (bch_is_zero(u->uuid, 16)) { 1005 struct closure cl; 1006 closure_init_stack(&cl); 1007 1008 memcpy(u->uuid, dc->sb.uuid, 16); 1009 memcpy(u->label, dc->sb.label, SB_LABEL_SIZE); 1010 u->first_reg = u->last_reg = rtime; 1011 bch_uuid_write(c); 1012 1013 memcpy(dc->sb.set_uuid, c->sb.set_uuid, 16); 1014 SET_BDEV_STATE(&dc->sb, BDEV_STATE_CLEAN); 1015 1016 bch_write_bdev_super(dc, &cl); 1017 closure_sync(&cl); 1018 } else { 1019 u->last_reg = rtime; 1020 bch_uuid_write(c); 1021 } 1022 1023 bcache_device_attach(&dc->disk, c, u - c->uuids); 1024 list_move(&dc->list, &c->cached_devs); 1025 calc_cached_dev_sectors(c); 1026 1027 smp_wmb(); 1028 /* 1029 * dc->c must be set before dc->count != 0 - paired with the mb in 1030 * cached_dev_get() 1031 */ 1032 refcount_set(&dc->count, 1); 1033 1034 /* Block writeback thread, but spawn it */ 1035 down_write(&dc->writeback_lock); 1036 if (bch_cached_dev_writeback_start(dc)) { 1037 up_write(&dc->writeback_lock); 1038 return -ENOMEM; 1039 } 1040 1041 if (BDEV_STATE(&dc->sb) == BDEV_STATE_DIRTY) { 1042 bch_sectors_dirty_init(&dc->disk); 1043 atomic_set(&dc->has_dirty, 1); 1044 refcount_inc(&dc->count); 1045 bch_writeback_queue(dc); 1046 } 1047 1048 bch_cached_dev_run(dc); 1049 bcache_device_link(&dc->disk, c, "bdev"); 1050 1051 /* Allow the writeback thread to proceed */ 1052 up_write(&dc->writeback_lock); 1053 1054 pr_info("Caching %s as %s on set %pU", 1055 bdevname(dc->bdev, buf), dc->disk.disk->disk_name, 1056 dc->disk.c->sb.set_uuid); 1057 return 0; 1058 } 1059 1060 void bch_cached_dev_release(struct kobject *kobj) 1061 { 1062 struct cached_dev *dc = container_of(kobj, struct cached_dev, 1063 disk.kobj); 1064 kfree(dc); 1065 module_put(THIS_MODULE); 1066 } 1067 1068 static void cached_dev_free(struct closure *cl) 1069 { 1070 struct cached_dev *dc = container_of(cl, struct cached_dev, disk.cl); 1071 1072 cancel_delayed_work_sync(&dc->writeback_rate_update); 1073 if (!IS_ERR_OR_NULL(dc->writeback_thread)) 1074 kthread_stop(dc->writeback_thread); 1075 if (dc->writeback_write_wq) 1076 destroy_workqueue(dc->writeback_write_wq); 1077 1078 mutex_lock(&bch_register_lock); 1079 1080 if (atomic_read(&dc->running)) 1081 bd_unlink_disk_holder(dc->bdev, dc->disk.disk); 1082 bcache_device_free(&dc->disk); 1083 list_del(&dc->list); 1084 1085 mutex_unlock(&bch_register_lock); 1086 1087 if (!IS_ERR_OR_NULL(dc->bdev)) 1088 blkdev_put(dc->bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); 1089 1090 wake_up(&unregister_wait); 1091 1092 kobject_put(&dc->disk.kobj); 1093 } 1094 1095 static void cached_dev_flush(struct closure *cl) 1096 { 1097 struct cached_dev *dc = container_of(cl, struct cached_dev, disk.cl); 1098 struct bcache_device *d = &dc->disk; 1099 1100 mutex_lock(&bch_register_lock); 1101 bcache_device_unlink(d); 1102 mutex_unlock(&bch_register_lock); 1103 1104 bch_cache_accounting_destroy(&dc->accounting); 1105 kobject_del(&d->kobj); 1106 1107 continue_at(cl, cached_dev_free, system_wq); 1108 } 1109 1110 static int cached_dev_init(struct cached_dev *dc, unsigned block_size) 1111 { 1112 int ret; 1113 struct io *io; 1114 struct request_queue *q = bdev_get_queue(dc->bdev); 1115 1116 __module_get(THIS_MODULE); 1117 INIT_LIST_HEAD(&dc->list); 1118 closure_init(&dc->disk.cl, NULL); 1119 set_closure_fn(&dc->disk.cl, cached_dev_flush, system_wq); 1120 kobject_init(&dc->disk.kobj, &bch_cached_dev_ktype); 1121 INIT_WORK(&dc->detach, cached_dev_detach_finish); 1122 sema_init(&dc->sb_write_mutex, 1); 1123 INIT_LIST_HEAD(&dc->io_lru); 1124 spin_lock_init(&dc->io_lock); 1125 bch_cache_accounting_init(&dc->accounting, &dc->disk.cl); 1126 1127 dc->sequential_cutoff = 4 << 20; 1128 1129 for (io = dc->io; io < dc->io + RECENT_IO; io++) { 1130 list_add(&io->lru, &dc->io_lru); 1131 hlist_add_head(&io->hash, dc->io_hash + RECENT_IO); 1132 } 1133 1134 dc->disk.stripe_size = q->limits.io_opt >> 9; 1135 1136 if (dc->disk.stripe_size) 1137 dc->partial_stripes_expensive = 1138 q->limits.raid_partial_stripes_expensive; 1139 1140 ret = bcache_device_init(&dc->disk, block_size, 1141 dc->bdev->bd_part->nr_sects - dc->sb.data_offset); 1142 if (ret) 1143 return ret; 1144 1145 dc->disk.disk->queue->backing_dev_info->ra_pages = 1146 max(dc->disk.disk->queue->backing_dev_info->ra_pages, 1147 q->backing_dev_info->ra_pages); 1148 1149 bch_cached_dev_request_init(dc); 1150 bch_cached_dev_writeback_init(dc); 1151 return 0; 1152 } 1153 1154 /* Cached device - bcache superblock */ 1155 1156 static void register_bdev(struct cache_sb *sb, struct page *sb_page, 1157 struct block_device *bdev, 1158 struct cached_dev *dc) 1159 { 1160 char name[BDEVNAME_SIZE]; 1161 const char *err = "cannot allocate memory"; 1162 struct cache_set *c; 1163 1164 memcpy(&dc->sb, sb, sizeof(struct cache_sb)); 1165 dc->bdev = bdev; 1166 dc->bdev->bd_holder = dc; 1167 1168 bio_init(&dc->sb_bio, dc->sb_bio.bi_inline_vecs, 1); 1169 dc->sb_bio.bi_io_vec[0].bv_page = sb_page; 1170 get_page(sb_page); 1171 1172 if (cached_dev_init(dc, sb->block_size << 9)) 1173 goto err; 1174 1175 err = "error creating kobject"; 1176 if (kobject_add(&dc->disk.kobj, &part_to_dev(bdev->bd_part)->kobj, 1177 "bcache")) 1178 goto err; 1179 if (bch_cache_accounting_add_kobjs(&dc->accounting, &dc->disk.kobj)) 1180 goto err; 1181 1182 pr_info("registered backing device %s", bdevname(bdev, name)); 1183 1184 list_add(&dc->list, &uncached_devices); 1185 list_for_each_entry(c, &bch_cache_sets, list) 1186 bch_cached_dev_attach(dc, c); 1187 1188 if (BDEV_STATE(&dc->sb) == BDEV_STATE_NONE || 1189 BDEV_STATE(&dc->sb) == BDEV_STATE_STALE) 1190 bch_cached_dev_run(dc); 1191 1192 return; 1193 err: 1194 pr_notice("error opening %s: %s", bdevname(bdev, name), err); 1195 bcache_device_stop(&dc->disk); 1196 } 1197 1198 /* Flash only volumes */ 1199 1200 void bch_flash_dev_release(struct kobject *kobj) 1201 { 1202 struct bcache_device *d = container_of(kobj, struct bcache_device, 1203 kobj); 1204 kfree(d); 1205 } 1206 1207 static void flash_dev_free(struct closure *cl) 1208 { 1209 struct bcache_device *d = container_of(cl, struct bcache_device, cl); 1210 mutex_lock(&bch_register_lock); 1211 bcache_device_free(d); 1212 mutex_unlock(&bch_register_lock); 1213 kobject_put(&d->kobj); 1214 } 1215 1216 static void flash_dev_flush(struct closure *cl) 1217 { 1218 struct bcache_device *d = container_of(cl, struct bcache_device, cl); 1219 1220 mutex_lock(&bch_register_lock); 1221 bcache_device_unlink(d); 1222 mutex_unlock(&bch_register_lock); 1223 kobject_del(&d->kobj); 1224 continue_at(cl, flash_dev_free, system_wq); 1225 } 1226 1227 static int flash_dev_run(struct cache_set *c, struct uuid_entry *u) 1228 { 1229 struct bcache_device *d = kzalloc(sizeof(struct bcache_device), 1230 GFP_KERNEL); 1231 if (!d) 1232 return -ENOMEM; 1233 1234 closure_init(&d->cl, NULL); 1235 set_closure_fn(&d->cl, flash_dev_flush, system_wq); 1236 1237 kobject_init(&d->kobj, &bch_flash_dev_ktype); 1238 1239 if (bcache_device_init(d, block_bytes(c), u->sectors)) 1240 goto err; 1241 1242 bcache_device_attach(d, c, u - c->uuids); 1243 bch_sectors_dirty_init(d); 1244 bch_flash_dev_request_init(d); 1245 add_disk(d->disk); 1246 1247 if (kobject_add(&d->kobj, &disk_to_dev(d->disk)->kobj, "bcache")) 1248 goto err; 1249 1250 bcache_device_link(d, c, "volume"); 1251 1252 return 0; 1253 err: 1254 kobject_put(&d->kobj); 1255 return -ENOMEM; 1256 } 1257 1258 static int flash_devs_run(struct cache_set *c) 1259 { 1260 int ret = 0; 1261 struct uuid_entry *u; 1262 1263 for (u = c->uuids; 1264 u < c->uuids + c->nr_uuids && !ret; 1265 u++) 1266 if (UUID_FLASH_ONLY(u)) 1267 ret = flash_dev_run(c, u); 1268 1269 return ret; 1270 } 1271 1272 int bch_flash_dev_create(struct cache_set *c, uint64_t size) 1273 { 1274 struct uuid_entry *u; 1275 1276 if (test_bit(CACHE_SET_STOPPING, &c->flags)) 1277 return -EINTR; 1278 1279 if (!test_bit(CACHE_SET_RUNNING, &c->flags)) 1280 return -EPERM; 1281 1282 u = uuid_find_empty(c); 1283 if (!u) { 1284 pr_err("Can't create volume, no room for UUID"); 1285 return -EINVAL; 1286 } 1287 1288 get_random_bytes(u->uuid, 16); 1289 memset(u->label, 0, 32); 1290 u->first_reg = u->last_reg = cpu_to_le32(get_seconds()); 1291 1292 SET_UUID_FLASH_ONLY(u, 1); 1293 u->sectors = size >> 9; 1294 1295 bch_uuid_write(c); 1296 1297 return flash_dev_run(c, u); 1298 } 1299 1300 /* Cache set */ 1301 1302 __printf(2, 3) 1303 bool bch_cache_set_error(struct cache_set *c, const char *fmt, ...) 1304 { 1305 va_list args; 1306 1307 if (c->on_error != ON_ERROR_PANIC && 1308 test_bit(CACHE_SET_STOPPING, &c->flags)) 1309 return false; 1310 1311 /* XXX: we can be called from atomic context 1312 acquire_console_sem(); 1313 */ 1314 1315 printk(KERN_ERR "bcache: error on %pU: ", c->sb.set_uuid); 1316 1317 va_start(args, fmt); 1318 vprintk(fmt, args); 1319 va_end(args); 1320 1321 printk(", disabling caching\n"); 1322 1323 if (c->on_error == ON_ERROR_PANIC) 1324 panic("panic forced after error\n"); 1325 1326 bch_cache_set_unregister(c); 1327 return true; 1328 } 1329 1330 void bch_cache_set_release(struct kobject *kobj) 1331 { 1332 struct cache_set *c = container_of(kobj, struct cache_set, kobj); 1333 kfree(c); 1334 module_put(THIS_MODULE); 1335 } 1336 1337 static void cache_set_free(struct closure *cl) 1338 { 1339 struct cache_set *c = container_of(cl, struct cache_set, cl); 1340 struct cache *ca; 1341 unsigned i; 1342 1343 if (!IS_ERR_OR_NULL(c->debug)) 1344 debugfs_remove(c->debug); 1345 1346 bch_open_buckets_free(c); 1347 bch_btree_cache_free(c); 1348 bch_journal_free(c); 1349 1350 for_each_cache(ca, c, i) 1351 if (ca) { 1352 ca->set = NULL; 1353 c->cache[ca->sb.nr_this_dev] = NULL; 1354 kobject_put(&ca->kobj); 1355 } 1356 1357 bch_bset_sort_state_free(&c->sort); 1358 free_pages((unsigned long) c->uuids, ilog2(bucket_pages(c))); 1359 1360 if (c->moving_gc_wq) 1361 destroy_workqueue(c->moving_gc_wq); 1362 if (c->bio_split) 1363 bioset_free(c->bio_split); 1364 if (c->fill_iter) 1365 mempool_destroy(c->fill_iter); 1366 if (c->bio_meta) 1367 mempool_destroy(c->bio_meta); 1368 if (c->search) 1369 mempool_destroy(c->search); 1370 kfree(c->devices); 1371 1372 mutex_lock(&bch_register_lock); 1373 list_del(&c->list); 1374 mutex_unlock(&bch_register_lock); 1375 1376 pr_info("Cache set %pU unregistered", c->sb.set_uuid); 1377 wake_up(&unregister_wait); 1378 1379 closure_debug_destroy(&c->cl); 1380 kobject_put(&c->kobj); 1381 } 1382 1383 static void cache_set_flush(struct closure *cl) 1384 { 1385 struct cache_set *c = container_of(cl, struct cache_set, caching); 1386 struct cache *ca; 1387 struct btree *b; 1388 unsigned i; 1389 1390 bch_cache_accounting_destroy(&c->accounting); 1391 1392 kobject_put(&c->internal); 1393 kobject_del(&c->kobj); 1394 1395 if (c->gc_thread) 1396 kthread_stop(c->gc_thread); 1397 1398 if (!IS_ERR_OR_NULL(c->root)) 1399 list_add(&c->root->list, &c->btree_cache); 1400 1401 /* Should skip this if we're unregistering because of an error */ 1402 list_for_each_entry(b, &c->btree_cache, list) { 1403 mutex_lock(&b->write_lock); 1404 if (btree_node_dirty(b)) 1405 __bch_btree_node_write(b, NULL); 1406 mutex_unlock(&b->write_lock); 1407 } 1408 1409 for_each_cache(ca, c, i) 1410 if (ca->alloc_thread) 1411 kthread_stop(ca->alloc_thread); 1412 1413 if (c->journal.cur) { 1414 cancel_delayed_work_sync(&c->journal.work); 1415 /* flush last journal entry if needed */ 1416 c->journal.work.work.func(&c->journal.work.work); 1417 } 1418 1419 closure_return(cl); 1420 } 1421 1422 static void __cache_set_unregister(struct closure *cl) 1423 { 1424 struct cache_set *c = container_of(cl, struct cache_set, caching); 1425 struct cached_dev *dc; 1426 size_t i; 1427 1428 mutex_lock(&bch_register_lock); 1429 1430 for (i = 0; i < c->nr_uuids; i++) 1431 if (c->devices[i]) { 1432 if (!UUID_FLASH_ONLY(&c->uuids[i]) && 1433 test_bit(CACHE_SET_UNREGISTERING, &c->flags)) { 1434 dc = container_of(c->devices[i], 1435 struct cached_dev, disk); 1436 bch_cached_dev_detach(dc); 1437 } else { 1438 bcache_device_stop(c->devices[i]); 1439 } 1440 } 1441 1442 mutex_unlock(&bch_register_lock); 1443 1444 continue_at(cl, cache_set_flush, system_wq); 1445 } 1446 1447 void bch_cache_set_stop(struct cache_set *c) 1448 { 1449 if (!test_and_set_bit(CACHE_SET_STOPPING, &c->flags)) 1450 closure_queue(&c->caching); 1451 } 1452 1453 void bch_cache_set_unregister(struct cache_set *c) 1454 { 1455 set_bit(CACHE_SET_UNREGISTERING, &c->flags); 1456 bch_cache_set_stop(c); 1457 } 1458 1459 #define alloc_bucket_pages(gfp, c) \ 1460 ((void *) __get_free_pages(__GFP_ZERO|gfp, ilog2(bucket_pages(c)))) 1461 1462 struct cache_set *bch_cache_set_alloc(struct cache_sb *sb) 1463 { 1464 int iter_size; 1465 struct cache_set *c = kzalloc(sizeof(struct cache_set), GFP_KERNEL); 1466 if (!c) 1467 return NULL; 1468 1469 __module_get(THIS_MODULE); 1470 closure_init(&c->cl, NULL); 1471 set_closure_fn(&c->cl, cache_set_free, system_wq); 1472 1473 closure_init(&c->caching, &c->cl); 1474 set_closure_fn(&c->caching, __cache_set_unregister, system_wq); 1475 1476 /* Maybe create continue_at_noreturn() and use it here? */ 1477 closure_set_stopped(&c->cl); 1478 closure_put(&c->cl); 1479 1480 kobject_init(&c->kobj, &bch_cache_set_ktype); 1481 kobject_init(&c->internal, &bch_cache_set_internal_ktype); 1482 1483 bch_cache_accounting_init(&c->accounting, &c->cl); 1484 1485 memcpy(c->sb.set_uuid, sb->set_uuid, 16); 1486 c->sb.block_size = sb->block_size; 1487 c->sb.bucket_size = sb->bucket_size; 1488 c->sb.nr_in_set = sb->nr_in_set; 1489 c->sb.last_mount = sb->last_mount; 1490 c->bucket_bits = ilog2(sb->bucket_size); 1491 c->block_bits = ilog2(sb->block_size); 1492 c->nr_uuids = bucket_bytes(c) / sizeof(struct uuid_entry); 1493 1494 c->btree_pages = bucket_pages(c); 1495 if (c->btree_pages > BTREE_MAX_PAGES) 1496 c->btree_pages = max_t(int, c->btree_pages / 4, 1497 BTREE_MAX_PAGES); 1498 1499 sema_init(&c->sb_write_mutex, 1); 1500 mutex_init(&c->bucket_lock); 1501 init_waitqueue_head(&c->btree_cache_wait); 1502 init_waitqueue_head(&c->bucket_wait); 1503 init_waitqueue_head(&c->gc_wait); 1504 sema_init(&c->uuid_write_mutex, 1); 1505 1506 spin_lock_init(&c->btree_gc_time.lock); 1507 spin_lock_init(&c->btree_split_time.lock); 1508 spin_lock_init(&c->btree_read_time.lock); 1509 1510 bch_moving_init_cache_set(c); 1511 1512 INIT_LIST_HEAD(&c->list); 1513 INIT_LIST_HEAD(&c->cached_devs); 1514 INIT_LIST_HEAD(&c->btree_cache); 1515 INIT_LIST_HEAD(&c->btree_cache_freeable); 1516 INIT_LIST_HEAD(&c->btree_cache_freed); 1517 INIT_LIST_HEAD(&c->data_buckets); 1518 1519 c->search = mempool_create_slab_pool(32, bch_search_cache); 1520 if (!c->search) 1521 goto err; 1522 1523 iter_size = (sb->bucket_size / sb->block_size + 1) * 1524 sizeof(struct btree_iter_set); 1525 1526 if (!(c->devices = kzalloc(c->nr_uuids * sizeof(void *), GFP_KERNEL)) || 1527 !(c->bio_meta = mempool_create_kmalloc_pool(2, 1528 sizeof(struct bbio) + sizeof(struct bio_vec) * 1529 bucket_pages(c))) || 1530 !(c->fill_iter = mempool_create_kmalloc_pool(1, iter_size)) || 1531 !(c->bio_split = bioset_create(4, offsetof(struct bbio, bio), 1532 BIOSET_NEED_BVECS | 1533 BIOSET_NEED_RESCUER)) || 1534 !(c->uuids = alloc_bucket_pages(GFP_KERNEL, c)) || 1535 !(c->moving_gc_wq = alloc_workqueue("bcache_gc", 1536 WQ_MEM_RECLAIM, 0)) || 1537 bch_journal_alloc(c) || 1538 bch_btree_cache_alloc(c) || 1539 bch_open_buckets_alloc(c) || 1540 bch_bset_sort_state_init(&c->sort, ilog2(c->btree_pages))) 1541 goto err; 1542 1543 c->congested_read_threshold_us = 2000; 1544 c->congested_write_threshold_us = 20000; 1545 c->error_limit = 8 << IO_ERROR_SHIFT; 1546 1547 return c; 1548 err: 1549 bch_cache_set_unregister(c); 1550 return NULL; 1551 } 1552 1553 static void run_cache_set(struct cache_set *c) 1554 { 1555 const char *err = "cannot allocate memory"; 1556 struct cached_dev *dc, *t; 1557 struct cache *ca; 1558 struct closure cl; 1559 unsigned i; 1560 1561 closure_init_stack(&cl); 1562 1563 for_each_cache(ca, c, i) 1564 c->nbuckets += ca->sb.nbuckets; 1565 set_gc_sectors(c); 1566 1567 if (CACHE_SYNC(&c->sb)) { 1568 LIST_HEAD(journal); 1569 struct bkey *k; 1570 struct jset *j; 1571 1572 err = "cannot allocate memory for journal"; 1573 if (bch_journal_read(c, &journal)) 1574 goto err; 1575 1576 pr_debug("btree_journal_read() done"); 1577 1578 err = "no journal entries found"; 1579 if (list_empty(&journal)) 1580 goto err; 1581 1582 j = &list_entry(journal.prev, struct journal_replay, list)->j; 1583 1584 err = "IO error reading priorities"; 1585 for_each_cache(ca, c, i) 1586 prio_read(ca, j->prio_bucket[ca->sb.nr_this_dev]); 1587 1588 /* 1589 * If prio_read() fails it'll call cache_set_error and we'll 1590 * tear everything down right away, but if we perhaps checked 1591 * sooner we could avoid journal replay. 1592 */ 1593 1594 k = &j->btree_root; 1595 1596 err = "bad btree root"; 1597 if (__bch_btree_ptr_invalid(c, k)) 1598 goto err; 1599 1600 err = "error reading btree root"; 1601 c->root = bch_btree_node_get(c, NULL, k, j->btree_level, true, NULL); 1602 if (IS_ERR_OR_NULL(c->root)) 1603 goto err; 1604 1605 list_del_init(&c->root->list); 1606 rw_unlock(true, c->root); 1607 1608 err = uuid_read(c, j, &cl); 1609 if (err) 1610 goto err; 1611 1612 err = "error in recovery"; 1613 if (bch_btree_check(c)) 1614 goto err; 1615 1616 bch_journal_mark(c, &journal); 1617 bch_initial_gc_finish(c); 1618 pr_debug("btree_check() done"); 1619 1620 /* 1621 * bcache_journal_next() can't happen sooner, or 1622 * btree_gc_finish() will give spurious errors about last_gc > 1623 * gc_gen - this is a hack but oh well. 1624 */ 1625 bch_journal_next(&c->journal); 1626 1627 err = "error starting allocator thread"; 1628 for_each_cache(ca, c, i) 1629 if (bch_cache_allocator_start(ca)) 1630 goto err; 1631 1632 /* 1633 * First place it's safe to allocate: btree_check() and 1634 * btree_gc_finish() have to run before we have buckets to 1635 * allocate, and bch_bucket_alloc_set() might cause a journal 1636 * entry to be written so bcache_journal_next() has to be called 1637 * first. 1638 * 1639 * If the uuids were in the old format we have to rewrite them 1640 * before the next journal entry is written: 1641 */ 1642 if (j->version < BCACHE_JSET_VERSION_UUID) 1643 __uuid_write(c); 1644 1645 bch_journal_replay(c, &journal); 1646 } else { 1647 pr_notice("invalidating existing data"); 1648 1649 for_each_cache(ca, c, i) { 1650 unsigned j; 1651 1652 ca->sb.keys = clamp_t(int, ca->sb.nbuckets >> 7, 1653 2, SB_JOURNAL_BUCKETS); 1654 1655 for (j = 0; j < ca->sb.keys; j++) 1656 ca->sb.d[j] = ca->sb.first_bucket + j; 1657 } 1658 1659 bch_initial_gc_finish(c); 1660 1661 err = "error starting allocator thread"; 1662 for_each_cache(ca, c, i) 1663 if (bch_cache_allocator_start(ca)) 1664 goto err; 1665 1666 mutex_lock(&c->bucket_lock); 1667 for_each_cache(ca, c, i) 1668 bch_prio_write(ca); 1669 mutex_unlock(&c->bucket_lock); 1670 1671 err = "cannot allocate new UUID bucket"; 1672 if (__uuid_write(c)) 1673 goto err; 1674 1675 err = "cannot allocate new btree root"; 1676 c->root = __bch_btree_node_alloc(c, NULL, 0, true, NULL); 1677 if (IS_ERR_OR_NULL(c->root)) 1678 goto err; 1679 1680 mutex_lock(&c->root->write_lock); 1681 bkey_copy_key(&c->root->key, &MAX_KEY); 1682 bch_btree_node_write(c->root, &cl); 1683 mutex_unlock(&c->root->write_lock); 1684 1685 bch_btree_set_root(c->root); 1686 rw_unlock(true, c->root); 1687 1688 /* 1689 * We don't want to write the first journal entry until 1690 * everything is set up - fortunately journal entries won't be 1691 * written until the SET_CACHE_SYNC() here: 1692 */ 1693 SET_CACHE_SYNC(&c->sb, true); 1694 1695 bch_journal_next(&c->journal); 1696 bch_journal_meta(c, &cl); 1697 } 1698 1699 err = "error starting gc thread"; 1700 if (bch_gc_thread_start(c)) 1701 goto err; 1702 1703 closure_sync(&cl); 1704 c->sb.last_mount = get_seconds(); 1705 bcache_write_super(c); 1706 1707 list_for_each_entry_safe(dc, t, &uncached_devices, list) 1708 bch_cached_dev_attach(dc, c); 1709 1710 flash_devs_run(c); 1711 1712 set_bit(CACHE_SET_RUNNING, &c->flags); 1713 return; 1714 err: 1715 closure_sync(&cl); 1716 /* XXX: test this, it's broken */ 1717 bch_cache_set_error(c, "%s", err); 1718 } 1719 1720 static bool can_attach_cache(struct cache *ca, struct cache_set *c) 1721 { 1722 return ca->sb.block_size == c->sb.block_size && 1723 ca->sb.bucket_size == c->sb.bucket_size && 1724 ca->sb.nr_in_set == c->sb.nr_in_set; 1725 } 1726 1727 static const char *register_cache_set(struct cache *ca) 1728 { 1729 char buf[12]; 1730 const char *err = "cannot allocate memory"; 1731 struct cache_set *c; 1732 1733 list_for_each_entry(c, &bch_cache_sets, list) 1734 if (!memcmp(c->sb.set_uuid, ca->sb.set_uuid, 16)) { 1735 if (c->cache[ca->sb.nr_this_dev]) 1736 return "duplicate cache set member"; 1737 1738 if (!can_attach_cache(ca, c)) 1739 return "cache sb does not match set"; 1740 1741 if (!CACHE_SYNC(&ca->sb)) 1742 SET_CACHE_SYNC(&c->sb, false); 1743 1744 goto found; 1745 } 1746 1747 c = bch_cache_set_alloc(&ca->sb); 1748 if (!c) 1749 return err; 1750 1751 err = "error creating kobject"; 1752 if (kobject_add(&c->kobj, bcache_kobj, "%pU", c->sb.set_uuid) || 1753 kobject_add(&c->internal, &c->kobj, "internal")) 1754 goto err; 1755 1756 if (bch_cache_accounting_add_kobjs(&c->accounting, &c->kobj)) 1757 goto err; 1758 1759 bch_debug_init_cache_set(c); 1760 1761 list_add(&c->list, &bch_cache_sets); 1762 found: 1763 sprintf(buf, "cache%i", ca->sb.nr_this_dev); 1764 if (sysfs_create_link(&ca->kobj, &c->kobj, "set") || 1765 sysfs_create_link(&c->kobj, &ca->kobj, buf)) 1766 goto err; 1767 1768 if (ca->sb.seq > c->sb.seq) { 1769 c->sb.version = ca->sb.version; 1770 memcpy(c->sb.set_uuid, ca->sb.set_uuid, 16); 1771 c->sb.flags = ca->sb.flags; 1772 c->sb.seq = ca->sb.seq; 1773 pr_debug("set version = %llu", c->sb.version); 1774 } 1775 1776 kobject_get(&ca->kobj); 1777 ca->set = c; 1778 ca->set->cache[ca->sb.nr_this_dev] = ca; 1779 c->cache_by_alloc[c->caches_loaded++] = ca; 1780 1781 if (c->caches_loaded == c->sb.nr_in_set) 1782 run_cache_set(c); 1783 1784 return NULL; 1785 err: 1786 bch_cache_set_unregister(c); 1787 return err; 1788 } 1789 1790 /* Cache device */ 1791 1792 void bch_cache_release(struct kobject *kobj) 1793 { 1794 struct cache *ca = container_of(kobj, struct cache, kobj); 1795 unsigned i; 1796 1797 if (ca->set) { 1798 BUG_ON(ca->set->cache[ca->sb.nr_this_dev] != ca); 1799 ca->set->cache[ca->sb.nr_this_dev] = NULL; 1800 } 1801 1802 free_pages((unsigned long) ca->disk_buckets, ilog2(bucket_pages(ca))); 1803 kfree(ca->prio_buckets); 1804 vfree(ca->buckets); 1805 1806 free_heap(&ca->heap); 1807 free_fifo(&ca->free_inc); 1808 1809 for (i = 0; i < RESERVE_NR; i++) 1810 free_fifo(&ca->free[i]); 1811 1812 if (ca->sb_bio.bi_inline_vecs[0].bv_page) 1813 put_page(ca->sb_bio.bi_io_vec[0].bv_page); 1814 1815 if (!IS_ERR_OR_NULL(ca->bdev)) 1816 blkdev_put(ca->bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); 1817 1818 kfree(ca); 1819 module_put(THIS_MODULE); 1820 } 1821 1822 static int cache_alloc(struct cache *ca) 1823 { 1824 size_t free; 1825 struct bucket *b; 1826 1827 __module_get(THIS_MODULE); 1828 kobject_init(&ca->kobj, &bch_cache_ktype); 1829 1830 bio_init(&ca->journal.bio, ca->journal.bio.bi_inline_vecs, 8); 1831 1832 free = roundup_pow_of_two(ca->sb.nbuckets) >> 10; 1833 1834 if (!init_fifo(&ca->free[RESERVE_BTREE], 8, GFP_KERNEL) || 1835 !init_fifo_exact(&ca->free[RESERVE_PRIO], prio_buckets(ca), GFP_KERNEL) || 1836 !init_fifo(&ca->free[RESERVE_MOVINGGC], free, GFP_KERNEL) || 1837 !init_fifo(&ca->free[RESERVE_NONE], free, GFP_KERNEL) || 1838 !init_fifo(&ca->free_inc, free << 2, GFP_KERNEL) || 1839 !init_heap(&ca->heap, free << 3, GFP_KERNEL) || 1840 !(ca->buckets = vzalloc(sizeof(struct bucket) * 1841 ca->sb.nbuckets)) || 1842 !(ca->prio_buckets = kzalloc(sizeof(uint64_t) * prio_buckets(ca) * 1843 2, GFP_KERNEL)) || 1844 !(ca->disk_buckets = alloc_bucket_pages(GFP_KERNEL, ca))) 1845 return -ENOMEM; 1846 1847 ca->prio_last_buckets = ca->prio_buckets + prio_buckets(ca); 1848 1849 for_each_bucket(b, ca) 1850 atomic_set(&b->pin, 0); 1851 1852 return 0; 1853 } 1854 1855 static int register_cache(struct cache_sb *sb, struct page *sb_page, 1856 struct block_device *bdev, struct cache *ca) 1857 { 1858 char name[BDEVNAME_SIZE]; 1859 const char *err = NULL; /* must be set for any error case */ 1860 int ret = 0; 1861 1862 memcpy(&ca->sb, sb, sizeof(struct cache_sb)); 1863 ca->bdev = bdev; 1864 ca->bdev->bd_holder = ca; 1865 1866 bio_init(&ca->sb_bio, ca->sb_bio.bi_inline_vecs, 1); 1867 ca->sb_bio.bi_io_vec[0].bv_page = sb_page; 1868 get_page(sb_page); 1869 1870 if (blk_queue_discard(bdev_get_queue(ca->bdev))) 1871 ca->discard = CACHE_DISCARD(&ca->sb); 1872 1873 ret = cache_alloc(ca); 1874 if (ret != 0) { 1875 if (ret == -ENOMEM) 1876 err = "cache_alloc(): -ENOMEM"; 1877 else 1878 err = "cache_alloc(): unknown error"; 1879 goto err; 1880 } 1881 1882 if (kobject_add(&ca->kobj, &part_to_dev(bdev->bd_part)->kobj, "bcache")) { 1883 err = "error calling kobject_add"; 1884 ret = -ENOMEM; 1885 goto out; 1886 } 1887 1888 mutex_lock(&bch_register_lock); 1889 err = register_cache_set(ca); 1890 mutex_unlock(&bch_register_lock); 1891 1892 if (err) { 1893 ret = -ENODEV; 1894 goto out; 1895 } 1896 1897 pr_info("registered cache device %s", bdevname(bdev, name)); 1898 1899 out: 1900 kobject_put(&ca->kobj); 1901 1902 err: 1903 if (err) 1904 pr_notice("error opening %s: %s", bdevname(bdev, name), err); 1905 1906 return ret; 1907 } 1908 1909 /* Global interfaces/init */ 1910 1911 static ssize_t register_bcache(struct kobject *, struct kobj_attribute *, 1912 const char *, size_t); 1913 1914 kobj_attribute_write(register, register_bcache); 1915 kobj_attribute_write(register_quiet, register_bcache); 1916 1917 static bool bch_is_open_backing(struct block_device *bdev) { 1918 struct cache_set *c, *tc; 1919 struct cached_dev *dc, *t; 1920 1921 list_for_each_entry_safe(c, tc, &bch_cache_sets, list) 1922 list_for_each_entry_safe(dc, t, &c->cached_devs, list) 1923 if (dc->bdev == bdev) 1924 return true; 1925 list_for_each_entry_safe(dc, t, &uncached_devices, list) 1926 if (dc->bdev == bdev) 1927 return true; 1928 return false; 1929 } 1930 1931 static bool bch_is_open_cache(struct block_device *bdev) { 1932 struct cache_set *c, *tc; 1933 struct cache *ca; 1934 unsigned i; 1935 1936 list_for_each_entry_safe(c, tc, &bch_cache_sets, list) 1937 for_each_cache(ca, c, i) 1938 if (ca->bdev == bdev) 1939 return true; 1940 return false; 1941 } 1942 1943 static bool bch_is_open(struct block_device *bdev) { 1944 return bch_is_open_cache(bdev) || bch_is_open_backing(bdev); 1945 } 1946 1947 static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr, 1948 const char *buffer, size_t size) 1949 { 1950 ssize_t ret = size; 1951 const char *err = "cannot allocate memory"; 1952 char *path = NULL; 1953 struct cache_sb *sb = NULL; 1954 struct block_device *bdev = NULL; 1955 struct page *sb_page = NULL; 1956 1957 if (!try_module_get(THIS_MODULE)) 1958 return -EBUSY; 1959 1960 if (!(path = kstrndup(buffer, size, GFP_KERNEL)) || 1961 !(sb = kmalloc(sizeof(struct cache_sb), GFP_KERNEL))) 1962 goto err; 1963 1964 err = "failed to open device"; 1965 bdev = blkdev_get_by_path(strim(path), 1966 FMODE_READ|FMODE_WRITE|FMODE_EXCL, 1967 sb); 1968 if (IS_ERR(bdev)) { 1969 if (bdev == ERR_PTR(-EBUSY)) { 1970 bdev = lookup_bdev(strim(path)); 1971 mutex_lock(&bch_register_lock); 1972 if (!IS_ERR(bdev) && bch_is_open(bdev)) 1973 err = "device already registered"; 1974 else 1975 err = "device busy"; 1976 mutex_unlock(&bch_register_lock); 1977 if (!IS_ERR(bdev)) 1978 bdput(bdev); 1979 if (attr == &ksysfs_register_quiet) 1980 goto out; 1981 } 1982 goto err; 1983 } 1984 1985 err = "failed to set blocksize"; 1986 if (set_blocksize(bdev, 4096)) 1987 goto err_close; 1988 1989 err = read_super(sb, bdev, &sb_page); 1990 if (err) 1991 goto err_close; 1992 1993 if (SB_IS_BDEV(sb)) { 1994 struct cached_dev *dc = kzalloc(sizeof(*dc), GFP_KERNEL); 1995 if (!dc) 1996 goto err_close; 1997 1998 mutex_lock(&bch_register_lock); 1999 register_bdev(sb, sb_page, bdev, dc); 2000 mutex_unlock(&bch_register_lock); 2001 } else { 2002 struct cache *ca = kzalloc(sizeof(*ca), GFP_KERNEL); 2003 if (!ca) 2004 goto err_close; 2005 2006 if (register_cache(sb, sb_page, bdev, ca) != 0) 2007 goto err_close; 2008 } 2009 out: 2010 if (sb_page) 2011 put_page(sb_page); 2012 kfree(sb); 2013 kfree(path); 2014 module_put(THIS_MODULE); 2015 return ret; 2016 2017 err_close: 2018 blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); 2019 err: 2020 pr_info("error opening %s: %s", path, err); 2021 ret = -EINVAL; 2022 goto out; 2023 } 2024 2025 static int bcache_reboot(struct notifier_block *n, unsigned long code, void *x) 2026 { 2027 if (code == SYS_DOWN || 2028 code == SYS_HALT || 2029 code == SYS_POWER_OFF) { 2030 DEFINE_WAIT(wait); 2031 unsigned long start = jiffies; 2032 bool stopped = false; 2033 2034 struct cache_set *c, *tc; 2035 struct cached_dev *dc, *tdc; 2036 2037 mutex_lock(&bch_register_lock); 2038 2039 if (list_empty(&bch_cache_sets) && 2040 list_empty(&uncached_devices)) 2041 goto out; 2042 2043 pr_info("Stopping all devices:"); 2044 2045 list_for_each_entry_safe(c, tc, &bch_cache_sets, list) 2046 bch_cache_set_stop(c); 2047 2048 list_for_each_entry_safe(dc, tdc, &uncached_devices, list) 2049 bcache_device_stop(&dc->disk); 2050 2051 /* What's a condition variable? */ 2052 while (1) { 2053 long timeout = start + 2 * HZ - jiffies; 2054 2055 stopped = list_empty(&bch_cache_sets) && 2056 list_empty(&uncached_devices); 2057 2058 if (timeout < 0 || stopped) 2059 break; 2060 2061 prepare_to_wait(&unregister_wait, &wait, 2062 TASK_UNINTERRUPTIBLE); 2063 2064 mutex_unlock(&bch_register_lock); 2065 schedule_timeout(timeout); 2066 mutex_lock(&bch_register_lock); 2067 } 2068 2069 finish_wait(&unregister_wait, &wait); 2070 2071 if (stopped) 2072 pr_info("All devices stopped"); 2073 else 2074 pr_notice("Timeout waiting for devices to be closed"); 2075 out: 2076 mutex_unlock(&bch_register_lock); 2077 } 2078 2079 return NOTIFY_DONE; 2080 } 2081 2082 static struct notifier_block reboot = { 2083 .notifier_call = bcache_reboot, 2084 .priority = INT_MAX, /* before any real devices */ 2085 }; 2086 2087 static void bcache_exit(void) 2088 { 2089 bch_debug_exit(); 2090 bch_request_exit(); 2091 if (bcache_kobj) 2092 kobject_put(bcache_kobj); 2093 if (bcache_wq) 2094 destroy_workqueue(bcache_wq); 2095 if (bcache_major) 2096 unregister_blkdev(bcache_major, "bcache"); 2097 unregister_reboot_notifier(&reboot); 2098 mutex_destroy(&bch_register_lock); 2099 } 2100 2101 static int __init bcache_init(void) 2102 { 2103 static const struct attribute *files[] = { 2104 &ksysfs_register.attr, 2105 &ksysfs_register_quiet.attr, 2106 NULL 2107 }; 2108 2109 mutex_init(&bch_register_lock); 2110 init_waitqueue_head(&unregister_wait); 2111 register_reboot_notifier(&reboot); 2112 closure_debug_init(); 2113 2114 bcache_major = register_blkdev(0, "bcache"); 2115 if (bcache_major < 0) { 2116 unregister_reboot_notifier(&reboot); 2117 mutex_destroy(&bch_register_lock); 2118 return bcache_major; 2119 } 2120 2121 if (!(bcache_wq = alloc_workqueue("bcache", WQ_MEM_RECLAIM, 0)) || 2122 !(bcache_kobj = kobject_create_and_add("bcache", fs_kobj)) || 2123 bch_request_init() || 2124 bch_debug_init(bcache_kobj) || 2125 sysfs_create_files(bcache_kobj, files)) 2126 goto err; 2127 2128 return 0; 2129 err: 2130 bcache_exit(); 2131 return -ENOMEM; 2132 } 2133 2134 module_exit(bcache_exit); 2135 module_init(bcache_init); 2136