1 /* 2 * bcache setup/teardown code, and some metadata io - read a superblock and 3 * figure out what to do with it. 4 * 5 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com> 6 * Copyright 2012 Google, Inc. 7 */ 8 9 #include "bcache.h" 10 #include "btree.h" 11 #include "debug.h" 12 #include "extents.h" 13 #include "request.h" 14 #include "writeback.h" 15 16 #include <linux/blkdev.h> 17 #include <linux/buffer_head.h> 18 #include <linux/debugfs.h> 19 #include <linux/genhd.h> 20 #include <linux/idr.h> 21 #include <linux/kthread.h> 22 #include <linux/module.h> 23 #include <linux/random.h> 24 #include <linux/reboot.h> 25 #include <linux/sysfs.h> 26 27 MODULE_LICENSE("GPL"); 28 MODULE_AUTHOR("Kent Overstreet <kent.overstreet@gmail.com>"); 29 30 static const char bcache_magic[] = { 31 0xc6, 0x85, 0x73, 0xf6, 0x4e, 0x1a, 0x45, 0xca, 32 0x82, 0x65, 0xf5, 0x7f, 0x48, 0xba, 0x6d, 0x81 33 }; 34 35 static const char invalid_uuid[] = { 36 0xa0, 0x3e, 0xf8, 0xed, 0x3e, 0xe1, 0xb8, 0x78, 37 0xc8, 0x50, 0xfc, 0x5e, 0xcb, 0x16, 0xcd, 0x99 38 }; 39 40 /* Default is -1; we skip past it for struct cached_dev's cache mode */ 41 const char * const bch_cache_modes[] = { 42 "default", 43 "writethrough", 44 "writeback", 45 "writearound", 46 "none", 47 NULL 48 }; 49 50 static struct kobject *bcache_kobj; 51 struct mutex bch_register_lock; 52 LIST_HEAD(bch_cache_sets); 53 static LIST_HEAD(uncached_devices); 54 55 static int bcache_major; 56 static DEFINE_IDA(bcache_device_idx); 57 static wait_queue_head_t unregister_wait; 58 struct workqueue_struct *bcache_wq; 59 60 #define BTREE_MAX_PAGES (256 * 1024 / PAGE_SIZE) 61 /* limitation of partitions number on single bcache device */ 62 #define BCACHE_MINORS 128 63 /* limitation of bcache devices number on single system */ 64 #define BCACHE_DEVICE_IDX_MAX ((1U << MINORBITS)/BCACHE_MINORS) 65 66 /* Superblock */ 67 68 static const char *read_super(struct cache_sb *sb, struct block_device *bdev, 69 struct page **res) 70 { 71 const char *err; 72 struct cache_sb *s; 73 struct buffer_head *bh = __bread(bdev, 1, SB_SIZE); 74 unsigned i; 75 76 if (!bh) 77 return "IO error"; 78 79 s = (struct cache_sb *) bh->b_data; 80 81 sb->offset = le64_to_cpu(s->offset); 82 sb->version = le64_to_cpu(s->version); 83 84 memcpy(sb->magic, s->magic, 16); 85 memcpy(sb->uuid, s->uuid, 16); 86 memcpy(sb->set_uuid, s->set_uuid, 16); 87 memcpy(sb->label, s->label, SB_LABEL_SIZE); 88 89 sb->flags = le64_to_cpu(s->flags); 90 sb->seq = le64_to_cpu(s->seq); 91 sb->last_mount = le32_to_cpu(s->last_mount); 92 sb->first_bucket = le16_to_cpu(s->first_bucket); 93 sb->keys = le16_to_cpu(s->keys); 94 95 for (i = 0; i < SB_JOURNAL_BUCKETS; i++) 96 sb->d[i] = le64_to_cpu(s->d[i]); 97 98 pr_debug("read sb version %llu, flags %llu, seq %llu, journal size %u", 99 sb->version, sb->flags, sb->seq, sb->keys); 100 101 err = "Not a bcache superblock"; 102 if (sb->offset != SB_SECTOR) 103 goto err; 104 105 if (memcmp(sb->magic, bcache_magic, 16)) 106 goto err; 107 108 err = "Too many journal buckets"; 109 if (sb->keys > SB_JOURNAL_BUCKETS) 110 goto err; 111 112 err = "Bad checksum"; 113 if (s->csum != csum_set(s)) 114 goto err; 115 116 err = "Bad UUID"; 117 if (bch_is_zero(sb->uuid, 16)) 118 goto err; 119 120 sb->block_size = le16_to_cpu(s->block_size); 121 122 err = "Superblock block size smaller than device block size"; 123 if (sb->block_size << 9 < bdev_logical_block_size(bdev)) 124 goto err; 125 126 switch (sb->version) { 127 case BCACHE_SB_VERSION_BDEV: 128 sb->data_offset = BDEV_DATA_START_DEFAULT; 129 break; 130 case BCACHE_SB_VERSION_BDEV_WITH_OFFSET: 131 sb->data_offset = le64_to_cpu(s->data_offset); 132 133 err = "Bad data offset"; 134 if (sb->data_offset < BDEV_DATA_START_DEFAULT) 135 goto err; 136 137 break; 138 case BCACHE_SB_VERSION_CDEV: 139 case BCACHE_SB_VERSION_CDEV_WITH_UUID: 140 sb->nbuckets = le64_to_cpu(s->nbuckets); 141 sb->bucket_size = le16_to_cpu(s->bucket_size); 142 143 sb->nr_in_set = le16_to_cpu(s->nr_in_set); 144 sb->nr_this_dev = le16_to_cpu(s->nr_this_dev); 145 146 err = "Too many buckets"; 147 if (sb->nbuckets > LONG_MAX) 148 goto err; 149 150 err = "Not enough buckets"; 151 if (sb->nbuckets < 1 << 7) 152 goto err; 153 154 err = "Bad block/bucket size"; 155 if (!is_power_of_2(sb->block_size) || 156 sb->block_size > PAGE_SECTORS || 157 !is_power_of_2(sb->bucket_size) || 158 sb->bucket_size < PAGE_SECTORS) 159 goto err; 160 161 err = "Invalid superblock: device too small"; 162 if (get_capacity(bdev->bd_disk) < sb->bucket_size * sb->nbuckets) 163 goto err; 164 165 err = "Bad UUID"; 166 if (bch_is_zero(sb->set_uuid, 16)) 167 goto err; 168 169 err = "Bad cache device number in set"; 170 if (!sb->nr_in_set || 171 sb->nr_in_set <= sb->nr_this_dev || 172 sb->nr_in_set > MAX_CACHES_PER_SET) 173 goto err; 174 175 err = "Journal buckets not sequential"; 176 for (i = 0; i < sb->keys; i++) 177 if (sb->d[i] != sb->first_bucket + i) 178 goto err; 179 180 err = "Too many journal buckets"; 181 if (sb->first_bucket + sb->keys > sb->nbuckets) 182 goto err; 183 184 err = "Invalid superblock: first bucket comes before end of super"; 185 if (sb->first_bucket * sb->bucket_size < 16) 186 goto err; 187 188 break; 189 default: 190 err = "Unsupported superblock version"; 191 goto err; 192 } 193 194 sb->last_mount = get_seconds(); 195 err = NULL; 196 197 get_page(bh->b_page); 198 *res = bh->b_page; 199 err: 200 put_bh(bh); 201 return err; 202 } 203 204 static void write_bdev_super_endio(struct bio *bio) 205 { 206 struct cached_dev *dc = bio->bi_private; 207 /* XXX: error checking */ 208 209 closure_put(&dc->sb_write); 210 } 211 212 static void __write_super(struct cache_sb *sb, struct bio *bio) 213 { 214 struct cache_sb *out = page_address(bio_first_page_all(bio)); 215 unsigned i; 216 217 bio->bi_iter.bi_sector = SB_SECTOR; 218 bio->bi_iter.bi_size = SB_SIZE; 219 bio_set_op_attrs(bio, REQ_OP_WRITE, REQ_SYNC|REQ_META); 220 bch_bio_map(bio, NULL); 221 222 out->offset = cpu_to_le64(sb->offset); 223 out->version = cpu_to_le64(sb->version); 224 225 memcpy(out->uuid, sb->uuid, 16); 226 memcpy(out->set_uuid, sb->set_uuid, 16); 227 memcpy(out->label, sb->label, SB_LABEL_SIZE); 228 229 out->flags = cpu_to_le64(sb->flags); 230 out->seq = cpu_to_le64(sb->seq); 231 232 out->last_mount = cpu_to_le32(sb->last_mount); 233 out->first_bucket = cpu_to_le16(sb->first_bucket); 234 out->keys = cpu_to_le16(sb->keys); 235 236 for (i = 0; i < sb->keys; i++) 237 out->d[i] = cpu_to_le64(sb->d[i]); 238 239 out->csum = csum_set(out); 240 241 pr_debug("ver %llu, flags %llu, seq %llu", 242 sb->version, sb->flags, sb->seq); 243 244 submit_bio(bio); 245 } 246 247 static void bch_write_bdev_super_unlock(struct closure *cl) 248 { 249 struct cached_dev *dc = container_of(cl, struct cached_dev, sb_write); 250 251 up(&dc->sb_write_mutex); 252 } 253 254 void bch_write_bdev_super(struct cached_dev *dc, struct closure *parent) 255 { 256 struct closure *cl = &dc->sb_write; 257 struct bio *bio = &dc->sb_bio; 258 259 down(&dc->sb_write_mutex); 260 closure_init(cl, parent); 261 262 bio_reset(bio); 263 bio_set_dev(bio, dc->bdev); 264 bio->bi_end_io = write_bdev_super_endio; 265 bio->bi_private = dc; 266 267 closure_get(cl); 268 __write_super(&dc->sb, bio); 269 270 closure_return_with_destructor(cl, bch_write_bdev_super_unlock); 271 } 272 273 static void write_super_endio(struct bio *bio) 274 { 275 struct cache *ca = bio->bi_private; 276 277 /* is_read = 0 */ 278 bch_count_io_errors(ca, bio->bi_status, 0, 279 "writing superblock"); 280 closure_put(&ca->set->sb_write); 281 } 282 283 static void bcache_write_super_unlock(struct closure *cl) 284 { 285 struct cache_set *c = container_of(cl, struct cache_set, sb_write); 286 287 up(&c->sb_write_mutex); 288 } 289 290 void bcache_write_super(struct cache_set *c) 291 { 292 struct closure *cl = &c->sb_write; 293 struct cache *ca; 294 unsigned i; 295 296 down(&c->sb_write_mutex); 297 closure_init(cl, &c->cl); 298 299 c->sb.seq++; 300 301 for_each_cache(ca, c, i) { 302 struct bio *bio = &ca->sb_bio; 303 304 ca->sb.version = BCACHE_SB_VERSION_CDEV_WITH_UUID; 305 ca->sb.seq = c->sb.seq; 306 ca->sb.last_mount = c->sb.last_mount; 307 308 SET_CACHE_SYNC(&ca->sb, CACHE_SYNC(&c->sb)); 309 310 bio_reset(bio); 311 bio_set_dev(bio, ca->bdev); 312 bio->bi_end_io = write_super_endio; 313 bio->bi_private = ca; 314 315 closure_get(cl); 316 __write_super(&ca->sb, bio); 317 } 318 319 closure_return_with_destructor(cl, bcache_write_super_unlock); 320 } 321 322 /* UUID io */ 323 324 static void uuid_endio(struct bio *bio) 325 { 326 struct closure *cl = bio->bi_private; 327 struct cache_set *c = container_of(cl, struct cache_set, uuid_write); 328 329 cache_set_err_on(bio->bi_status, c, "accessing uuids"); 330 bch_bbio_free(bio, c); 331 closure_put(cl); 332 } 333 334 static void uuid_io_unlock(struct closure *cl) 335 { 336 struct cache_set *c = container_of(cl, struct cache_set, uuid_write); 337 338 up(&c->uuid_write_mutex); 339 } 340 341 static void uuid_io(struct cache_set *c, int op, unsigned long op_flags, 342 struct bkey *k, struct closure *parent) 343 { 344 struct closure *cl = &c->uuid_write; 345 struct uuid_entry *u; 346 unsigned i; 347 char buf[80]; 348 349 BUG_ON(!parent); 350 down(&c->uuid_write_mutex); 351 closure_init(cl, parent); 352 353 for (i = 0; i < KEY_PTRS(k); i++) { 354 struct bio *bio = bch_bbio_alloc(c); 355 356 bio->bi_opf = REQ_SYNC | REQ_META | op_flags; 357 bio->bi_iter.bi_size = KEY_SIZE(k) << 9; 358 359 bio->bi_end_io = uuid_endio; 360 bio->bi_private = cl; 361 bio_set_op_attrs(bio, op, REQ_SYNC|REQ_META|op_flags); 362 bch_bio_map(bio, c->uuids); 363 364 bch_submit_bbio(bio, c, k, i); 365 366 if (op != REQ_OP_WRITE) 367 break; 368 } 369 370 bch_extent_to_text(buf, sizeof(buf), k); 371 pr_debug("%s UUIDs at %s", op == REQ_OP_WRITE ? "wrote" : "read", buf); 372 373 for (u = c->uuids; u < c->uuids + c->nr_uuids; u++) 374 if (!bch_is_zero(u->uuid, 16)) 375 pr_debug("Slot %zi: %pU: %s: 1st: %u last: %u inv: %u", 376 u - c->uuids, u->uuid, u->label, 377 u->first_reg, u->last_reg, u->invalidated); 378 379 closure_return_with_destructor(cl, uuid_io_unlock); 380 } 381 382 static char *uuid_read(struct cache_set *c, struct jset *j, struct closure *cl) 383 { 384 struct bkey *k = &j->uuid_bucket; 385 386 if (__bch_btree_ptr_invalid(c, k)) 387 return "bad uuid pointer"; 388 389 bkey_copy(&c->uuid_bucket, k); 390 uuid_io(c, REQ_OP_READ, 0, k, cl); 391 392 if (j->version < BCACHE_JSET_VERSION_UUIDv1) { 393 struct uuid_entry_v0 *u0 = (void *) c->uuids; 394 struct uuid_entry *u1 = (void *) c->uuids; 395 int i; 396 397 closure_sync(cl); 398 399 /* 400 * Since the new uuid entry is bigger than the old, we have to 401 * convert starting at the highest memory address and work down 402 * in order to do it in place 403 */ 404 405 for (i = c->nr_uuids - 1; 406 i >= 0; 407 --i) { 408 memcpy(u1[i].uuid, u0[i].uuid, 16); 409 memcpy(u1[i].label, u0[i].label, 32); 410 411 u1[i].first_reg = u0[i].first_reg; 412 u1[i].last_reg = u0[i].last_reg; 413 u1[i].invalidated = u0[i].invalidated; 414 415 u1[i].flags = 0; 416 u1[i].sectors = 0; 417 } 418 } 419 420 return NULL; 421 } 422 423 static int __uuid_write(struct cache_set *c) 424 { 425 BKEY_PADDED(key) k; 426 struct closure cl; 427 closure_init_stack(&cl); 428 429 lockdep_assert_held(&bch_register_lock); 430 431 if (bch_bucket_alloc_set(c, RESERVE_BTREE, &k.key, 1, true)) 432 return 1; 433 434 SET_KEY_SIZE(&k.key, c->sb.bucket_size); 435 uuid_io(c, REQ_OP_WRITE, 0, &k.key, &cl); 436 closure_sync(&cl); 437 438 bkey_copy(&c->uuid_bucket, &k.key); 439 bkey_put(c, &k.key); 440 return 0; 441 } 442 443 int bch_uuid_write(struct cache_set *c) 444 { 445 int ret = __uuid_write(c); 446 447 if (!ret) 448 bch_journal_meta(c, NULL); 449 450 return ret; 451 } 452 453 static struct uuid_entry *uuid_find(struct cache_set *c, const char *uuid) 454 { 455 struct uuid_entry *u; 456 457 for (u = c->uuids; 458 u < c->uuids + c->nr_uuids; u++) 459 if (!memcmp(u->uuid, uuid, 16)) 460 return u; 461 462 return NULL; 463 } 464 465 static struct uuid_entry *uuid_find_empty(struct cache_set *c) 466 { 467 static const char zero_uuid[16] = "\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0"; 468 return uuid_find(c, zero_uuid); 469 } 470 471 /* 472 * Bucket priorities/gens: 473 * 474 * For each bucket, we store on disk its 475 * 8 bit gen 476 * 16 bit priority 477 * 478 * See alloc.c for an explanation of the gen. The priority is used to implement 479 * lru (and in the future other) cache replacement policies; for most purposes 480 * it's just an opaque integer. 481 * 482 * The gens and the priorities don't have a whole lot to do with each other, and 483 * it's actually the gens that must be written out at specific times - it's no 484 * big deal if the priorities don't get written, if we lose them we just reuse 485 * buckets in suboptimal order. 486 * 487 * On disk they're stored in a packed array, and in as many buckets are required 488 * to fit them all. The buckets we use to store them form a list; the journal 489 * header points to the first bucket, the first bucket points to the second 490 * bucket, et cetera. 491 * 492 * This code is used by the allocation code; periodically (whenever it runs out 493 * of buckets to allocate from) the allocation code will invalidate some 494 * buckets, but it can't use those buckets until their new gens are safely on 495 * disk. 496 */ 497 498 static void prio_endio(struct bio *bio) 499 { 500 struct cache *ca = bio->bi_private; 501 502 cache_set_err_on(bio->bi_status, ca->set, "accessing priorities"); 503 bch_bbio_free(bio, ca->set); 504 closure_put(&ca->prio); 505 } 506 507 static void prio_io(struct cache *ca, uint64_t bucket, int op, 508 unsigned long op_flags) 509 { 510 struct closure *cl = &ca->prio; 511 struct bio *bio = bch_bbio_alloc(ca->set); 512 513 closure_init_stack(cl); 514 515 bio->bi_iter.bi_sector = bucket * ca->sb.bucket_size; 516 bio_set_dev(bio, ca->bdev); 517 bio->bi_iter.bi_size = bucket_bytes(ca); 518 519 bio->bi_end_io = prio_endio; 520 bio->bi_private = ca; 521 bio_set_op_attrs(bio, op, REQ_SYNC|REQ_META|op_flags); 522 bch_bio_map(bio, ca->disk_buckets); 523 524 closure_bio_submit(bio, &ca->prio); 525 closure_sync(cl); 526 } 527 528 void bch_prio_write(struct cache *ca) 529 { 530 int i; 531 struct bucket *b; 532 struct closure cl; 533 534 closure_init_stack(&cl); 535 536 lockdep_assert_held(&ca->set->bucket_lock); 537 538 ca->disk_buckets->seq++; 539 540 atomic_long_add(ca->sb.bucket_size * prio_buckets(ca), 541 &ca->meta_sectors_written); 542 543 //pr_debug("free %zu, free_inc %zu, unused %zu", fifo_used(&ca->free), 544 // fifo_used(&ca->free_inc), fifo_used(&ca->unused)); 545 546 for (i = prio_buckets(ca) - 1; i >= 0; --i) { 547 long bucket; 548 struct prio_set *p = ca->disk_buckets; 549 struct bucket_disk *d = p->data; 550 struct bucket_disk *end = d + prios_per_bucket(ca); 551 552 for (b = ca->buckets + i * prios_per_bucket(ca); 553 b < ca->buckets + ca->sb.nbuckets && d < end; 554 b++, d++) { 555 d->prio = cpu_to_le16(b->prio); 556 d->gen = b->gen; 557 } 558 559 p->next_bucket = ca->prio_buckets[i + 1]; 560 p->magic = pset_magic(&ca->sb); 561 p->csum = bch_crc64(&p->magic, bucket_bytes(ca) - 8); 562 563 bucket = bch_bucket_alloc(ca, RESERVE_PRIO, true); 564 BUG_ON(bucket == -1); 565 566 mutex_unlock(&ca->set->bucket_lock); 567 prio_io(ca, bucket, REQ_OP_WRITE, 0); 568 mutex_lock(&ca->set->bucket_lock); 569 570 ca->prio_buckets[i] = bucket; 571 atomic_dec_bug(&ca->buckets[bucket].pin); 572 } 573 574 mutex_unlock(&ca->set->bucket_lock); 575 576 bch_journal_meta(ca->set, &cl); 577 closure_sync(&cl); 578 579 mutex_lock(&ca->set->bucket_lock); 580 581 /* 582 * Don't want the old priorities to get garbage collected until after we 583 * finish writing the new ones, and they're journalled 584 */ 585 for (i = 0; i < prio_buckets(ca); i++) { 586 if (ca->prio_last_buckets[i]) 587 __bch_bucket_free(ca, 588 &ca->buckets[ca->prio_last_buckets[i]]); 589 590 ca->prio_last_buckets[i] = ca->prio_buckets[i]; 591 } 592 } 593 594 static void prio_read(struct cache *ca, uint64_t bucket) 595 { 596 struct prio_set *p = ca->disk_buckets; 597 struct bucket_disk *d = p->data + prios_per_bucket(ca), *end = d; 598 struct bucket *b; 599 unsigned bucket_nr = 0; 600 601 for (b = ca->buckets; 602 b < ca->buckets + ca->sb.nbuckets; 603 b++, d++) { 604 if (d == end) { 605 ca->prio_buckets[bucket_nr] = bucket; 606 ca->prio_last_buckets[bucket_nr] = bucket; 607 bucket_nr++; 608 609 prio_io(ca, bucket, REQ_OP_READ, 0); 610 611 if (p->csum != bch_crc64(&p->magic, bucket_bytes(ca) - 8)) 612 pr_warn("bad csum reading priorities"); 613 614 if (p->magic != pset_magic(&ca->sb)) 615 pr_warn("bad magic reading priorities"); 616 617 bucket = p->next_bucket; 618 d = p->data; 619 } 620 621 b->prio = le16_to_cpu(d->prio); 622 b->gen = b->last_gc = d->gen; 623 } 624 } 625 626 /* Bcache device */ 627 628 static int open_dev(struct block_device *b, fmode_t mode) 629 { 630 struct bcache_device *d = b->bd_disk->private_data; 631 if (test_bit(BCACHE_DEV_CLOSING, &d->flags)) 632 return -ENXIO; 633 634 closure_get(&d->cl); 635 return 0; 636 } 637 638 static void release_dev(struct gendisk *b, fmode_t mode) 639 { 640 struct bcache_device *d = b->private_data; 641 closure_put(&d->cl); 642 } 643 644 static int ioctl_dev(struct block_device *b, fmode_t mode, 645 unsigned int cmd, unsigned long arg) 646 { 647 struct bcache_device *d = b->bd_disk->private_data; 648 return d->ioctl(d, mode, cmd, arg); 649 } 650 651 static const struct block_device_operations bcache_ops = { 652 .open = open_dev, 653 .release = release_dev, 654 .ioctl = ioctl_dev, 655 .owner = THIS_MODULE, 656 }; 657 658 void bcache_device_stop(struct bcache_device *d) 659 { 660 if (!test_and_set_bit(BCACHE_DEV_CLOSING, &d->flags)) 661 closure_queue(&d->cl); 662 } 663 664 static void bcache_device_unlink(struct bcache_device *d) 665 { 666 lockdep_assert_held(&bch_register_lock); 667 668 if (d->c && !test_and_set_bit(BCACHE_DEV_UNLINK_DONE, &d->flags)) { 669 unsigned i; 670 struct cache *ca; 671 672 sysfs_remove_link(&d->c->kobj, d->name); 673 sysfs_remove_link(&d->kobj, "cache"); 674 675 for_each_cache(ca, d->c, i) 676 bd_unlink_disk_holder(ca->bdev, d->disk); 677 } 678 } 679 680 static void bcache_device_link(struct bcache_device *d, struct cache_set *c, 681 const char *name) 682 { 683 unsigned i; 684 struct cache *ca; 685 686 for_each_cache(ca, d->c, i) 687 bd_link_disk_holder(ca->bdev, d->disk); 688 689 snprintf(d->name, BCACHEDEVNAME_SIZE, 690 "%s%u", name, d->id); 691 692 WARN(sysfs_create_link(&d->kobj, &c->kobj, "cache") || 693 sysfs_create_link(&c->kobj, &d->kobj, d->name), 694 "Couldn't create device <-> cache set symlinks"); 695 696 clear_bit(BCACHE_DEV_UNLINK_DONE, &d->flags); 697 } 698 699 static void bcache_device_detach(struct bcache_device *d) 700 { 701 lockdep_assert_held(&bch_register_lock); 702 703 if (test_bit(BCACHE_DEV_DETACHING, &d->flags)) { 704 struct uuid_entry *u = d->c->uuids + d->id; 705 706 SET_UUID_FLASH_ONLY(u, 0); 707 memcpy(u->uuid, invalid_uuid, 16); 708 u->invalidated = cpu_to_le32(get_seconds()); 709 bch_uuid_write(d->c); 710 } 711 712 bcache_device_unlink(d); 713 714 d->c->devices[d->id] = NULL; 715 closure_put(&d->c->caching); 716 d->c = NULL; 717 } 718 719 static void bcache_device_attach(struct bcache_device *d, struct cache_set *c, 720 unsigned id) 721 { 722 d->id = id; 723 d->c = c; 724 c->devices[id] = d; 725 726 if (id >= c->devices_max_used) 727 c->devices_max_used = id + 1; 728 729 closure_get(&c->caching); 730 } 731 732 static inline int first_minor_to_idx(int first_minor) 733 { 734 return (first_minor/BCACHE_MINORS); 735 } 736 737 static inline int idx_to_first_minor(int idx) 738 { 739 return (idx * BCACHE_MINORS); 740 } 741 742 static void bcache_device_free(struct bcache_device *d) 743 { 744 lockdep_assert_held(&bch_register_lock); 745 746 pr_info("%s stopped", d->disk->disk_name); 747 748 if (d->c) 749 bcache_device_detach(d); 750 if (d->disk && d->disk->flags & GENHD_FL_UP) 751 del_gendisk(d->disk); 752 if (d->disk && d->disk->queue) 753 blk_cleanup_queue(d->disk->queue); 754 if (d->disk) { 755 ida_simple_remove(&bcache_device_idx, 756 first_minor_to_idx(d->disk->first_minor)); 757 put_disk(d->disk); 758 } 759 760 if (d->bio_split) 761 bioset_free(d->bio_split); 762 kvfree(d->full_dirty_stripes); 763 kvfree(d->stripe_sectors_dirty); 764 765 closure_debug_destroy(&d->cl); 766 } 767 768 static int bcache_device_init(struct bcache_device *d, unsigned block_size, 769 sector_t sectors) 770 { 771 struct request_queue *q; 772 size_t n; 773 int idx; 774 775 if (!d->stripe_size) 776 d->stripe_size = 1 << 31; 777 778 d->nr_stripes = DIV_ROUND_UP_ULL(sectors, d->stripe_size); 779 780 if (!d->nr_stripes || 781 d->nr_stripes > INT_MAX || 782 d->nr_stripes > SIZE_MAX / sizeof(atomic_t)) { 783 pr_err("nr_stripes too large or invalid: %u (start sector beyond end of disk?)", 784 (unsigned)d->nr_stripes); 785 return -ENOMEM; 786 } 787 788 n = d->nr_stripes * sizeof(atomic_t); 789 d->stripe_sectors_dirty = kvzalloc(n, GFP_KERNEL); 790 if (!d->stripe_sectors_dirty) 791 return -ENOMEM; 792 793 n = BITS_TO_LONGS(d->nr_stripes) * sizeof(unsigned long); 794 d->full_dirty_stripes = kvzalloc(n, GFP_KERNEL); 795 if (!d->full_dirty_stripes) 796 return -ENOMEM; 797 798 idx = ida_simple_get(&bcache_device_idx, 0, 799 BCACHE_DEVICE_IDX_MAX, GFP_KERNEL); 800 if (idx < 0) 801 return idx; 802 803 if (!(d->bio_split = bioset_create(4, offsetof(struct bbio, bio), 804 BIOSET_NEED_BVECS | 805 BIOSET_NEED_RESCUER)) || 806 !(d->disk = alloc_disk(BCACHE_MINORS))) { 807 ida_simple_remove(&bcache_device_idx, idx); 808 return -ENOMEM; 809 } 810 811 set_capacity(d->disk, sectors); 812 snprintf(d->disk->disk_name, DISK_NAME_LEN, "bcache%i", idx); 813 814 d->disk->major = bcache_major; 815 d->disk->first_minor = idx_to_first_minor(idx); 816 d->disk->fops = &bcache_ops; 817 d->disk->private_data = d; 818 819 q = blk_alloc_queue(GFP_KERNEL); 820 if (!q) 821 return -ENOMEM; 822 823 blk_queue_make_request(q, NULL); 824 d->disk->queue = q; 825 q->queuedata = d; 826 q->backing_dev_info->congested_data = d; 827 q->limits.max_hw_sectors = UINT_MAX; 828 q->limits.max_sectors = UINT_MAX; 829 q->limits.max_segment_size = UINT_MAX; 830 q->limits.max_segments = BIO_MAX_PAGES; 831 blk_queue_max_discard_sectors(q, UINT_MAX); 832 q->limits.discard_granularity = 512; 833 q->limits.io_min = block_size; 834 q->limits.logical_block_size = block_size; 835 q->limits.physical_block_size = block_size; 836 set_bit(QUEUE_FLAG_NONROT, &d->disk->queue->queue_flags); 837 clear_bit(QUEUE_FLAG_ADD_RANDOM, &d->disk->queue->queue_flags); 838 set_bit(QUEUE_FLAG_DISCARD, &d->disk->queue->queue_flags); 839 840 blk_queue_write_cache(q, true, true); 841 842 return 0; 843 } 844 845 /* Cached device */ 846 847 static void calc_cached_dev_sectors(struct cache_set *c) 848 { 849 uint64_t sectors = 0; 850 struct cached_dev *dc; 851 852 list_for_each_entry(dc, &c->cached_devs, list) 853 sectors += bdev_sectors(dc->bdev); 854 855 c->cached_dev_sectors = sectors; 856 } 857 858 void bch_cached_dev_run(struct cached_dev *dc) 859 { 860 struct bcache_device *d = &dc->disk; 861 char buf[SB_LABEL_SIZE + 1]; 862 char *env[] = { 863 "DRIVER=bcache", 864 kasprintf(GFP_KERNEL, "CACHED_UUID=%pU", dc->sb.uuid), 865 NULL, 866 NULL, 867 }; 868 869 memcpy(buf, dc->sb.label, SB_LABEL_SIZE); 870 buf[SB_LABEL_SIZE] = '\0'; 871 env[2] = kasprintf(GFP_KERNEL, "CACHED_LABEL=%s", buf); 872 873 if (atomic_xchg(&dc->running, 1)) { 874 kfree(env[1]); 875 kfree(env[2]); 876 return; 877 } 878 879 if (!d->c && 880 BDEV_STATE(&dc->sb) != BDEV_STATE_NONE) { 881 struct closure cl; 882 closure_init_stack(&cl); 883 884 SET_BDEV_STATE(&dc->sb, BDEV_STATE_STALE); 885 bch_write_bdev_super(dc, &cl); 886 closure_sync(&cl); 887 } 888 889 add_disk(d->disk); 890 bd_link_disk_holder(dc->bdev, dc->disk.disk); 891 /* won't show up in the uevent file, use udevadm monitor -e instead 892 * only class / kset properties are persistent */ 893 kobject_uevent_env(&disk_to_dev(d->disk)->kobj, KOBJ_CHANGE, env); 894 kfree(env[1]); 895 kfree(env[2]); 896 897 if (sysfs_create_link(&d->kobj, &disk_to_dev(d->disk)->kobj, "dev") || 898 sysfs_create_link(&disk_to_dev(d->disk)->kobj, &d->kobj, "bcache")) 899 pr_debug("error creating sysfs link"); 900 } 901 902 static void cached_dev_detach_finish(struct work_struct *w) 903 { 904 struct cached_dev *dc = container_of(w, struct cached_dev, detach); 905 char buf[BDEVNAME_SIZE]; 906 struct closure cl; 907 closure_init_stack(&cl); 908 909 BUG_ON(!test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags)); 910 BUG_ON(refcount_read(&dc->count)); 911 912 mutex_lock(&bch_register_lock); 913 914 cancel_delayed_work_sync(&dc->writeback_rate_update); 915 if (!IS_ERR_OR_NULL(dc->writeback_thread)) { 916 kthread_stop(dc->writeback_thread); 917 dc->writeback_thread = NULL; 918 } 919 920 memset(&dc->sb.set_uuid, 0, 16); 921 SET_BDEV_STATE(&dc->sb, BDEV_STATE_NONE); 922 923 bch_write_bdev_super(dc, &cl); 924 closure_sync(&cl); 925 926 bcache_device_detach(&dc->disk); 927 list_move(&dc->list, &uncached_devices); 928 929 clear_bit(BCACHE_DEV_DETACHING, &dc->disk.flags); 930 clear_bit(BCACHE_DEV_UNLINK_DONE, &dc->disk.flags); 931 932 mutex_unlock(&bch_register_lock); 933 934 pr_info("Caching disabled for %s", bdevname(dc->bdev, buf)); 935 936 /* Drop ref we took in cached_dev_detach() */ 937 closure_put(&dc->disk.cl); 938 } 939 940 void bch_cached_dev_detach(struct cached_dev *dc) 941 { 942 lockdep_assert_held(&bch_register_lock); 943 944 if (test_bit(BCACHE_DEV_CLOSING, &dc->disk.flags)) 945 return; 946 947 if (test_and_set_bit(BCACHE_DEV_DETACHING, &dc->disk.flags)) 948 return; 949 950 /* 951 * Block the device from being closed and freed until we're finished 952 * detaching 953 */ 954 closure_get(&dc->disk.cl); 955 956 bch_writeback_queue(dc); 957 cached_dev_put(dc); 958 } 959 960 int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c, 961 uint8_t *set_uuid) 962 { 963 uint32_t rtime = cpu_to_le32(get_seconds()); 964 struct uuid_entry *u; 965 char buf[BDEVNAME_SIZE]; 966 967 bdevname(dc->bdev, buf); 968 969 if ((set_uuid && memcmp(set_uuid, c->sb.set_uuid, 16)) || 970 (!set_uuid && memcmp(dc->sb.set_uuid, c->sb.set_uuid, 16))) 971 return -ENOENT; 972 973 if (dc->disk.c) { 974 pr_err("Can't attach %s: already attached", buf); 975 return -EINVAL; 976 } 977 978 if (test_bit(CACHE_SET_STOPPING, &c->flags)) { 979 pr_err("Can't attach %s: shutting down", buf); 980 return -EINVAL; 981 } 982 983 if (dc->sb.block_size < c->sb.block_size) { 984 /* Will die */ 985 pr_err("Couldn't attach %s: block size less than set's block size", 986 buf); 987 return -EINVAL; 988 } 989 990 u = uuid_find(c, dc->sb.uuid); 991 992 if (u && 993 (BDEV_STATE(&dc->sb) == BDEV_STATE_STALE || 994 BDEV_STATE(&dc->sb) == BDEV_STATE_NONE)) { 995 memcpy(u->uuid, invalid_uuid, 16); 996 u->invalidated = cpu_to_le32(get_seconds()); 997 u = NULL; 998 } 999 1000 if (!u) { 1001 if (BDEV_STATE(&dc->sb) == BDEV_STATE_DIRTY) { 1002 pr_err("Couldn't find uuid for %s in set", buf); 1003 return -ENOENT; 1004 } 1005 1006 u = uuid_find_empty(c); 1007 if (!u) { 1008 pr_err("Not caching %s, no room for UUID", buf); 1009 return -EINVAL; 1010 } 1011 } 1012 1013 /* Deadlocks since we're called via sysfs... 1014 sysfs_remove_file(&dc->kobj, &sysfs_attach); 1015 */ 1016 1017 if (bch_is_zero(u->uuid, 16)) { 1018 struct closure cl; 1019 closure_init_stack(&cl); 1020 1021 memcpy(u->uuid, dc->sb.uuid, 16); 1022 memcpy(u->label, dc->sb.label, SB_LABEL_SIZE); 1023 u->first_reg = u->last_reg = rtime; 1024 bch_uuid_write(c); 1025 1026 memcpy(dc->sb.set_uuid, c->sb.set_uuid, 16); 1027 SET_BDEV_STATE(&dc->sb, BDEV_STATE_CLEAN); 1028 1029 bch_write_bdev_super(dc, &cl); 1030 closure_sync(&cl); 1031 } else { 1032 u->last_reg = rtime; 1033 bch_uuid_write(c); 1034 } 1035 1036 bcache_device_attach(&dc->disk, c, u - c->uuids); 1037 list_move(&dc->list, &c->cached_devs); 1038 calc_cached_dev_sectors(c); 1039 1040 smp_wmb(); 1041 /* 1042 * dc->c must be set before dc->count != 0 - paired with the mb in 1043 * cached_dev_get() 1044 */ 1045 refcount_set(&dc->count, 1); 1046 1047 /* Block writeback thread, but spawn it */ 1048 down_write(&dc->writeback_lock); 1049 if (bch_cached_dev_writeback_start(dc)) { 1050 up_write(&dc->writeback_lock); 1051 return -ENOMEM; 1052 } 1053 1054 if (BDEV_STATE(&dc->sb) == BDEV_STATE_DIRTY) { 1055 bch_sectors_dirty_init(&dc->disk); 1056 atomic_set(&dc->has_dirty, 1); 1057 refcount_inc(&dc->count); 1058 bch_writeback_queue(dc); 1059 } 1060 1061 bch_cached_dev_run(dc); 1062 bcache_device_link(&dc->disk, c, "bdev"); 1063 1064 /* Allow the writeback thread to proceed */ 1065 up_write(&dc->writeback_lock); 1066 1067 pr_info("Caching %s as %s on set %pU", 1068 bdevname(dc->bdev, buf), dc->disk.disk->disk_name, 1069 dc->disk.c->sb.set_uuid); 1070 return 0; 1071 } 1072 1073 void bch_cached_dev_release(struct kobject *kobj) 1074 { 1075 struct cached_dev *dc = container_of(kobj, struct cached_dev, 1076 disk.kobj); 1077 kfree(dc); 1078 module_put(THIS_MODULE); 1079 } 1080 1081 static void cached_dev_free(struct closure *cl) 1082 { 1083 struct cached_dev *dc = container_of(cl, struct cached_dev, disk.cl); 1084 1085 cancel_delayed_work_sync(&dc->writeback_rate_update); 1086 if (!IS_ERR_OR_NULL(dc->writeback_thread)) 1087 kthread_stop(dc->writeback_thread); 1088 if (dc->writeback_write_wq) 1089 destroy_workqueue(dc->writeback_write_wq); 1090 1091 mutex_lock(&bch_register_lock); 1092 1093 if (atomic_read(&dc->running)) 1094 bd_unlink_disk_holder(dc->bdev, dc->disk.disk); 1095 bcache_device_free(&dc->disk); 1096 list_del(&dc->list); 1097 1098 mutex_unlock(&bch_register_lock); 1099 1100 if (!IS_ERR_OR_NULL(dc->bdev)) 1101 blkdev_put(dc->bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); 1102 1103 wake_up(&unregister_wait); 1104 1105 kobject_put(&dc->disk.kobj); 1106 } 1107 1108 static void cached_dev_flush(struct closure *cl) 1109 { 1110 struct cached_dev *dc = container_of(cl, struct cached_dev, disk.cl); 1111 struct bcache_device *d = &dc->disk; 1112 1113 mutex_lock(&bch_register_lock); 1114 bcache_device_unlink(d); 1115 mutex_unlock(&bch_register_lock); 1116 1117 bch_cache_accounting_destroy(&dc->accounting); 1118 kobject_del(&d->kobj); 1119 1120 continue_at(cl, cached_dev_free, system_wq); 1121 } 1122 1123 static int cached_dev_init(struct cached_dev *dc, unsigned block_size) 1124 { 1125 int ret; 1126 struct io *io; 1127 struct request_queue *q = bdev_get_queue(dc->bdev); 1128 1129 __module_get(THIS_MODULE); 1130 INIT_LIST_HEAD(&dc->list); 1131 closure_init(&dc->disk.cl, NULL); 1132 set_closure_fn(&dc->disk.cl, cached_dev_flush, system_wq); 1133 kobject_init(&dc->disk.kobj, &bch_cached_dev_ktype); 1134 INIT_WORK(&dc->detach, cached_dev_detach_finish); 1135 sema_init(&dc->sb_write_mutex, 1); 1136 INIT_LIST_HEAD(&dc->io_lru); 1137 spin_lock_init(&dc->io_lock); 1138 bch_cache_accounting_init(&dc->accounting, &dc->disk.cl); 1139 1140 dc->sequential_cutoff = 4 << 20; 1141 1142 for (io = dc->io; io < dc->io + RECENT_IO; io++) { 1143 list_add(&io->lru, &dc->io_lru); 1144 hlist_add_head(&io->hash, dc->io_hash + RECENT_IO); 1145 } 1146 1147 dc->disk.stripe_size = q->limits.io_opt >> 9; 1148 1149 if (dc->disk.stripe_size) 1150 dc->partial_stripes_expensive = 1151 q->limits.raid_partial_stripes_expensive; 1152 1153 ret = bcache_device_init(&dc->disk, block_size, 1154 dc->bdev->bd_part->nr_sects - dc->sb.data_offset); 1155 if (ret) 1156 return ret; 1157 1158 dc->disk.disk->queue->backing_dev_info->ra_pages = 1159 max(dc->disk.disk->queue->backing_dev_info->ra_pages, 1160 q->backing_dev_info->ra_pages); 1161 1162 bch_cached_dev_request_init(dc); 1163 bch_cached_dev_writeback_init(dc); 1164 return 0; 1165 } 1166 1167 /* Cached device - bcache superblock */ 1168 1169 static void register_bdev(struct cache_sb *sb, struct page *sb_page, 1170 struct block_device *bdev, 1171 struct cached_dev *dc) 1172 { 1173 char name[BDEVNAME_SIZE]; 1174 const char *err = "cannot allocate memory"; 1175 struct cache_set *c; 1176 1177 memcpy(&dc->sb, sb, sizeof(struct cache_sb)); 1178 dc->bdev = bdev; 1179 dc->bdev->bd_holder = dc; 1180 1181 bio_init(&dc->sb_bio, dc->sb_bio.bi_inline_vecs, 1); 1182 bio_first_bvec_all(&dc->sb_bio)->bv_page = sb_page; 1183 get_page(sb_page); 1184 1185 if (cached_dev_init(dc, sb->block_size << 9)) 1186 goto err; 1187 1188 err = "error creating kobject"; 1189 if (kobject_add(&dc->disk.kobj, &part_to_dev(bdev->bd_part)->kobj, 1190 "bcache")) 1191 goto err; 1192 if (bch_cache_accounting_add_kobjs(&dc->accounting, &dc->disk.kobj)) 1193 goto err; 1194 1195 pr_info("registered backing device %s", bdevname(bdev, name)); 1196 1197 list_add(&dc->list, &uncached_devices); 1198 list_for_each_entry(c, &bch_cache_sets, list) 1199 bch_cached_dev_attach(dc, c, NULL); 1200 1201 if (BDEV_STATE(&dc->sb) == BDEV_STATE_NONE || 1202 BDEV_STATE(&dc->sb) == BDEV_STATE_STALE) 1203 bch_cached_dev_run(dc); 1204 1205 return; 1206 err: 1207 pr_notice("error opening %s: %s", bdevname(bdev, name), err); 1208 bcache_device_stop(&dc->disk); 1209 } 1210 1211 /* Flash only volumes */ 1212 1213 void bch_flash_dev_release(struct kobject *kobj) 1214 { 1215 struct bcache_device *d = container_of(kobj, struct bcache_device, 1216 kobj); 1217 kfree(d); 1218 } 1219 1220 static void flash_dev_free(struct closure *cl) 1221 { 1222 struct bcache_device *d = container_of(cl, struct bcache_device, cl); 1223 mutex_lock(&bch_register_lock); 1224 bcache_device_free(d); 1225 mutex_unlock(&bch_register_lock); 1226 kobject_put(&d->kobj); 1227 } 1228 1229 static void flash_dev_flush(struct closure *cl) 1230 { 1231 struct bcache_device *d = container_of(cl, struct bcache_device, cl); 1232 1233 mutex_lock(&bch_register_lock); 1234 bcache_device_unlink(d); 1235 mutex_unlock(&bch_register_lock); 1236 kobject_del(&d->kobj); 1237 continue_at(cl, flash_dev_free, system_wq); 1238 } 1239 1240 static int flash_dev_run(struct cache_set *c, struct uuid_entry *u) 1241 { 1242 struct bcache_device *d = kzalloc(sizeof(struct bcache_device), 1243 GFP_KERNEL); 1244 if (!d) 1245 return -ENOMEM; 1246 1247 closure_init(&d->cl, NULL); 1248 set_closure_fn(&d->cl, flash_dev_flush, system_wq); 1249 1250 kobject_init(&d->kobj, &bch_flash_dev_ktype); 1251 1252 if (bcache_device_init(d, block_bytes(c), u->sectors)) 1253 goto err; 1254 1255 bcache_device_attach(d, c, u - c->uuids); 1256 bch_sectors_dirty_init(d); 1257 bch_flash_dev_request_init(d); 1258 add_disk(d->disk); 1259 1260 if (kobject_add(&d->kobj, &disk_to_dev(d->disk)->kobj, "bcache")) 1261 goto err; 1262 1263 bcache_device_link(d, c, "volume"); 1264 1265 return 0; 1266 err: 1267 kobject_put(&d->kobj); 1268 return -ENOMEM; 1269 } 1270 1271 static int flash_devs_run(struct cache_set *c) 1272 { 1273 int ret = 0; 1274 struct uuid_entry *u; 1275 1276 for (u = c->uuids; 1277 u < c->uuids + c->nr_uuids && !ret; 1278 u++) 1279 if (UUID_FLASH_ONLY(u)) 1280 ret = flash_dev_run(c, u); 1281 1282 return ret; 1283 } 1284 1285 int bch_flash_dev_create(struct cache_set *c, uint64_t size) 1286 { 1287 struct uuid_entry *u; 1288 1289 if (test_bit(CACHE_SET_STOPPING, &c->flags)) 1290 return -EINTR; 1291 1292 if (!test_bit(CACHE_SET_RUNNING, &c->flags)) 1293 return -EPERM; 1294 1295 u = uuid_find_empty(c); 1296 if (!u) { 1297 pr_err("Can't create volume, no room for UUID"); 1298 return -EINVAL; 1299 } 1300 1301 get_random_bytes(u->uuid, 16); 1302 memset(u->label, 0, 32); 1303 u->first_reg = u->last_reg = cpu_to_le32(get_seconds()); 1304 1305 SET_UUID_FLASH_ONLY(u, 1); 1306 u->sectors = size >> 9; 1307 1308 bch_uuid_write(c); 1309 1310 return flash_dev_run(c, u); 1311 } 1312 1313 /* Cache set */ 1314 1315 __printf(2, 3) 1316 bool bch_cache_set_error(struct cache_set *c, const char *fmt, ...) 1317 { 1318 va_list args; 1319 1320 if (c->on_error != ON_ERROR_PANIC && 1321 test_bit(CACHE_SET_STOPPING, &c->flags)) 1322 return false; 1323 1324 /* XXX: we can be called from atomic context 1325 acquire_console_sem(); 1326 */ 1327 1328 printk(KERN_ERR "bcache: error on %pU: ", c->sb.set_uuid); 1329 1330 va_start(args, fmt); 1331 vprintk(fmt, args); 1332 va_end(args); 1333 1334 printk(", disabling caching\n"); 1335 1336 if (c->on_error == ON_ERROR_PANIC) 1337 panic("panic forced after error\n"); 1338 1339 bch_cache_set_unregister(c); 1340 return true; 1341 } 1342 1343 void bch_cache_set_release(struct kobject *kobj) 1344 { 1345 struct cache_set *c = container_of(kobj, struct cache_set, kobj); 1346 kfree(c); 1347 module_put(THIS_MODULE); 1348 } 1349 1350 static void cache_set_free(struct closure *cl) 1351 { 1352 struct cache_set *c = container_of(cl, struct cache_set, cl); 1353 struct cache *ca; 1354 unsigned i; 1355 1356 if (!IS_ERR_OR_NULL(c->debug)) 1357 debugfs_remove(c->debug); 1358 1359 bch_open_buckets_free(c); 1360 bch_btree_cache_free(c); 1361 bch_journal_free(c); 1362 1363 for_each_cache(ca, c, i) 1364 if (ca) { 1365 ca->set = NULL; 1366 c->cache[ca->sb.nr_this_dev] = NULL; 1367 kobject_put(&ca->kobj); 1368 } 1369 1370 bch_bset_sort_state_free(&c->sort); 1371 free_pages((unsigned long) c->uuids, ilog2(bucket_pages(c))); 1372 1373 if (c->moving_gc_wq) 1374 destroy_workqueue(c->moving_gc_wq); 1375 if (c->bio_split) 1376 bioset_free(c->bio_split); 1377 if (c->fill_iter) 1378 mempool_destroy(c->fill_iter); 1379 if (c->bio_meta) 1380 mempool_destroy(c->bio_meta); 1381 if (c->search) 1382 mempool_destroy(c->search); 1383 kfree(c->devices); 1384 1385 mutex_lock(&bch_register_lock); 1386 list_del(&c->list); 1387 mutex_unlock(&bch_register_lock); 1388 1389 pr_info("Cache set %pU unregistered", c->sb.set_uuid); 1390 wake_up(&unregister_wait); 1391 1392 closure_debug_destroy(&c->cl); 1393 kobject_put(&c->kobj); 1394 } 1395 1396 static void cache_set_flush(struct closure *cl) 1397 { 1398 struct cache_set *c = container_of(cl, struct cache_set, caching); 1399 struct cache *ca; 1400 struct btree *b; 1401 unsigned i; 1402 1403 bch_cache_accounting_destroy(&c->accounting); 1404 1405 kobject_put(&c->internal); 1406 kobject_del(&c->kobj); 1407 1408 if (c->gc_thread) 1409 kthread_stop(c->gc_thread); 1410 1411 if (!IS_ERR_OR_NULL(c->root)) 1412 list_add(&c->root->list, &c->btree_cache); 1413 1414 /* Should skip this if we're unregistering because of an error */ 1415 list_for_each_entry(b, &c->btree_cache, list) { 1416 mutex_lock(&b->write_lock); 1417 if (btree_node_dirty(b)) 1418 __bch_btree_node_write(b, NULL); 1419 mutex_unlock(&b->write_lock); 1420 } 1421 1422 for_each_cache(ca, c, i) 1423 if (ca->alloc_thread) 1424 kthread_stop(ca->alloc_thread); 1425 1426 if (c->journal.cur) { 1427 cancel_delayed_work_sync(&c->journal.work); 1428 /* flush last journal entry if needed */ 1429 c->journal.work.work.func(&c->journal.work.work); 1430 } 1431 1432 closure_return(cl); 1433 } 1434 1435 static void __cache_set_unregister(struct closure *cl) 1436 { 1437 struct cache_set *c = container_of(cl, struct cache_set, caching); 1438 struct cached_dev *dc; 1439 size_t i; 1440 1441 mutex_lock(&bch_register_lock); 1442 1443 for (i = 0; i < c->devices_max_used; i++) 1444 if (c->devices[i]) { 1445 if (!UUID_FLASH_ONLY(&c->uuids[i]) && 1446 test_bit(CACHE_SET_UNREGISTERING, &c->flags)) { 1447 dc = container_of(c->devices[i], 1448 struct cached_dev, disk); 1449 bch_cached_dev_detach(dc); 1450 } else { 1451 bcache_device_stop(c->devices[i]); 1452 } 1453 } 1454 1455 mutex_unlock(&bch_register_lock); 1456 1457 continue_at(cl, cache_set_flush, system_wq); 1458 } 1459 1460 void bch_cache_set_stop(struct cache_set *c) 1461 { 1462 if (!test_and_set_bit(CACHE_SET_STOPPING, &c->flags)) 1463 closure_queue(&c->caching); 1464 } 1465 1466 void bch_cache_set_unregister(struct cache_set *c) 1467 { 1468 set_bit(CACHE_SET_UNREGISTERING, &c->flags); 1469 bch_cache_set_stop(c); 1470 } 1471 1472 #define alloc_bucket_pages(gfp, c) \ 1473 ((void *) __get_free_pages(__GFP_ZERO|gfp, ilog2(bucket_pages(c)))) 1474 1475 struct cache_set *bch_cache_set_alloc(struct cache_sb *sb) 1476 { 1477 int iter_size; 1478 struct cache_set *c = kzalloc(sizeof(struct cache_set), GFP_KERNEL); 1479 if (!c) 1480 return NULL; 1481 1482 __module_get(THIS_MODULE); 1483 closure_init(&c->cl, NULL); 1484 set_closure_fn(&c->cl, cache_set_free, system_wq); 1485 1486 closure_init(&c->caching, &c->cl); 1487 set_closure_fn(&c->caching, __cache_set_unregister, system_wq); 1488 1489 /* Maybe create continue_at_noreturn() and use it here? */ 1490 closure_set_stopped(&c->cl); 1491 closure_put(&c->cl); 1492 1493 kobject_init(&c->kobj, &bch_cache_set_ktype); 1494 kobject_init(&c->internal, &bch_cache_set_internal_ktype); 1495 1496 bch_cache_accounting_init(&c->accounting, &c->cl); 1497 1498 memcpy(c->sb.set_uuid, sb->set_uuid, 16); 1499 c->sb.block_size = sb->block_size; 1500 c->sb.bucket_size = sb->bucket_size; 1501 c->sb.nr_in_set = sb->nr_in_set; 1502 c->sb.last_mount = sb->last_mount; 1503 c->bucket_bits = ilog2(sb->bucket_size); 1504 c->block_bits = ilog2(sb->block_size); 1505 c->nr_uuids = bucket_bytes(c) / sizeof(struct uuid_entry); 1506 c->devices_max_used = 0; 1507 c->btree_pages = bucket_pages(c); 1508 if (c->btree_pages > BTREE_MAX_PAGES) 1509 c->btree_pages = max_t(int, c->btree_pages / 4, 1510 BTREE_MAX_PAGES); 1511 1512 sema_init(&c->sb_write_mutex, 1); 1513 mutex_init(&c->bucket_lock); 1514 init_waitqueue_head(&c->btree_cache_wait); 1515 init_waitqueue_head(&c->bucket_wait); 1516 init_waitqueue_head(&c->gc_wait); 1517 sema_init(&c->uuid_write_mutex, 1); 1518 1519 spin_lock_init(&c->btree_gc_time.lock); 1520 spin_lock_init(&c->btree_split_time.lock); 1521 spin_lock_init(&c->btree_read_time.lock); 1522 1523 bch_moving_init_cache_set(c); 1524 1525 INIT_LIST_HEAD(&c->list); 1526 INIT_LIST_HEAD(&c->cached_devs); 1527 INIT_LIST_HEAD(&c->btree_cache); 1528 INIT_LIST_HEAD(&c->btree_cache_freeable); 1529 INIT_LIST_HEAD(&c->btree_cache_freed); 1530 INIT_LIST_HEAD(&c->data_buckets); 1531 1532 c->search = mempool_create_slab_pool(32, bch_search_cache); 1533 if (!c->search) 1534 goto err; 1535 1536 iter_size = (sb->bucket_size / sb->block_size + 1) * 1537 sizeof(struct btree_iter_set); 1538 1539 if (!(c->devices = kzalloc(c->nr_uuids * sizeof(void *), GFP_KERNEL)) || 1540 !(c->bio_meta = mempool_create_kmalloc_pool(2, 1541 sizeof(struct bbio) + sizeof(struct bio_vec) * 1542 bucket_pages(c))) || 1543 !(c->fill_iter = mempool_create_kmalloc_pool(1, iter_size)) || 1544 !(c->bio_split = bioset_create(4, offsetof(struct bbio, bio), 1545 BIOSET_NEED_BVECS | 1546 BIOSET_NEED_RESCUER)) || 1547 !(c->uuids = alloc_bucket_pages(GFP_KERNEL, c)) || 1548 !(c->moving_gc_wq = alloc_workqueue("bcache_gc", 1549 WQ_MEM_RECLAIM, 0)) || 1550 bch_journal_alloc(c) || 1551 bch_btree_cache_alloc(c) || 1552 bch_open_buckets_alloc(c) || 1553 bch_bset_sort_state_init(&c->sort, ilog2(c->btree_pages))) 1554 goto err; 1555 1556 c->congested_read_threshold_us = 2000; 1557 c->congested_write_threshold_us = 20000; 1558 c->error_limit = DEFAULT_IO_ERROR_LIMIT; 1559 1560 return c; 1561 err: 1562 bch_cache_set_unregister(c); 1563 return NULL; 1564 } 1565 1566 static void run_cache_set(struct cache_set *c) 1567 { 1568 const char *err = "cannot allocate memory"; 1569 struct cached_dev *dc, *t; 1570 struct cache *ca; 1571 struct closure cl; 1572 unsigned i; 1573 1574 closure_init_stack(&cl); 1575 1576 for_each_cache(ca, c, i) 1577 c->nbuckets += ca->sb.nbuckets; 1578 set_gc_sectors(c); 1579 1580 if (CACHE_SYNC(&c->sb)) { 1581 LIST_HEAD(journal); 1582 struct bkey *k; 1583 struct jset *j; 1584 1585 err = "cannot allocate memory for journal"; 1586 if (bch_journal_read(c, &journal)) 1587 goto err; 1588 1589 pr_debug("btree_journal_read() done"); 1590 1591 err = "no journal entries found"; 1592 if (list_empty(&journal)) 1593 goto err; 1594 1595 j = &list_entry(journal.prev, struct journal_replay, list)->j; 1596 1597 err = "IO error reading priorities"; 1598 for_each_cache(ca, c, i) 1599 prio_read(ca, j->prio_bucket[ca->sb.nr_this_dev]); 1600 1601 /* 1602 * If prio_read() fails it'll call cache_set_error and we'll 1603 * tear everything down right away, but if we perhaps checked 1604 * sooner we could avoid journal replay. 1605 */ 1606 1607 k = &j->btree_root; 1608 1609 err = "bad btree root"; 1610 if (__bch_btree_ptr_invalid(c, k)) 1611 goto err; 1612 1613 err = "error reading btree root"; 1614 c->root = bch_btree_node_get(c, NULL, k, j->btree_level, true, NULL); 1615 if (IS_ERR_OR_NULL(c->root)) 1616 goto err; 1617 1618 list_del_init(&c->root->list); 1619 rw_unlock(true, c->root); 1620 1621 err = uuid_read(c, j, &cl); 1622 if (err) 1623 goto err; 1624 1625 err = "error in recovery"; 1626 if (bch_btree_check(c)) 1627 goto err; 1628 1629 bch_journal_mark(c, &journal); 1630 bch_initial_gc_finish(c); 1631 pr_debug("btree_check() done"); 1632 1633 /* 1634 * bcache_journal_next() can't happen sooner, or 1635 * btree_gc_finish() will give spurious errors about last_gc > 1636 * gc_gen - this is a hack but oh well. 1637 */ 1638 bch_journal_next(&c->journal); 1639 1640 err = "error starting allocator thread"; 1641 for_each_cache(ca, c, i) 1642 if (bch_cache_allocator_start(ca)) 1643 goto err; 1644 1645 /* 1646 * First place it's safe to allocate: btree_check() and 1647 * btree_gc_finish() have to run before we have buckets to 1648 * allocate, and bch_bucket_alloc_set() might cause a journal 1649 * entry to be written so bcache_journal_next() has to be called 1650 * first. 1651 * 1652 * If the uuids were in the old format we have to rewrite them 1653 * before the next journal entry is written: 1654 */ 1655 if (j->version < BCACHE_JSET_VERSION_UUID) 1656 __uuid_write(c); 1657 1658 bch_journal_replay(c, &journal); 1659 } else { 1660 pr_notice("invalidating existing data"); 1661 1662 for_each_cache(ca, c, i) { 1663 unsigned j; 1664 1665 ca->sb.keys = clamp_t(int, ca->sb.nbuckets >> 7, 1666 2, SB_JOURNAL_BUCKETS); 1667 1668 for (j = 0; j < ca->sb.keys; j++) 1669 ca->sb.d[j] = ca->sb.first_bucket + j; 1670 } 1671 1672 bch_initial_gc_finish(c); 1673 1674 err = "error starting allocator thread"; 1675 for_each_cache(ca, c, i) 1676 if (bch_cache_allocator_start(ca)) 1677 goto err; 1678 1679 mutex_lock(&c->bucket_lock); 1680 for_each_cache(ca, c, i) 1681 bch_prio_write(ca); 1682 mutex_unlock(&c->bucket_lock); 1683 1684 err = "cannot allocate new UUID bucket"; 1685 if (__uuid_write(c)) 1686 goto err; 1687 1688 err = "cannot allocate new btree root"; 1689 c->root = __bch_btree_node_alloc(c, NULL, 0, true, NULL); 1690 if (IS_ERR_OR_NULL(c->root)) 1691 goto err; 1692 1693 mutex_lock(&c->root->write_lock); 1694 bkey_copy_key(&c->root->key, &MAX_KEY); 1695 bch_btree_node_write(c->root, &cl); 1696 mutex_unlock(&c->root->write_lock); 1697 1698 bch_btree_set_root(c->root); 1699 rw_unlock(true, c->root); 1700 1701 /* 1702 * We don't want to write the first journal entry until 1703 * everything is set up - fortunately journal entries won't be 1704 * written until the SET_CACHE_SYNC() here: 1705 */ 1706 SET_CACHE_SYNC(&c->sb, true); 1707 1708 bch_journal_next(&c->journal); 1709 bch_journal_meta(c, &cl); 1710 } 1711 1712 err = "error starting gc thread"; 1713 if (bch_gc_thread_start(c)) 1714 goto err; 1715 1716 closure_sync(&cl); 1717 c->sb.last_mount = get_seconds(); 1718 bcache_write_super(c); 1719 1720 list_for_each_entry_safe(dc, t, &uncached_devices, list) 1721 bch_cached_dev_attach(dc, c, NULL); 1722 1723 flash_devs_run(c); 1724 1725 set_bit(CACHE_SET_RUNNING, &c->flags); 1726 return; 1727 err: 1728 closure_sync(&cl); 1729 /* XXX: test this, it's broken */ 1730 bch_cache_set_error(c, "%s", err); 1731 } 1732 1733 static bool can_attach_cache(struct cache *ca, struct cache_set *c) 1734 { 1735 return ca->sb.block_size == c->sb.block_size && 1736 ca->sb.bucket_size == c->sb.bucket_size && 1737 ca->sb.nr_in_set == c->sb.nr_in_set; 1738 } 1739 1740 static const char *register_cache_set(struct cache *ca) 1741 { 1742 char buf[12]; 1743 const char *err = "cannot allocate memory"; 1744 struct cache_set *c; 1745 1746 list_for_each_entry(c, &bch_cache_sets, list) 1747 if (!memcmp(c->sb.set_uuid, ca->sb.set_uuid, 16)) { 1748 if (c->cache[ca->sb.nr_this_dev]) 1749 return "duplicate cache set member"; 1750 1751 if (!can_attach_cache(ca, c)) 1752 return "cache sb does not match set"; 1753 1754 if (!CACHE_SYNC(&ca->sb)) 1755 SET_CACHE_SYNC(&c->sb, false); 1756 1757 goto found; 1758 } 1759 1760 c = bch_cache_set_alloc(&ca->sb); 1761 if (!c) 1762 return err; 1763 1764 err = "error creating kobject"; 1765 if (kobject_add(&c->kobj, bcache_kobj, "%pU", c->sb.set_uuid) || 1766 kobject_add(&c->internal, &c->kobj, "internal")) 1767 goto err; 1768 1769 if (bch_cache_accounting_add_kobjs(&c->accounting, &c->kobj)) 1770 goto err; 1771 1772 bch_debug_init_cache_set(c); 1773 1774 list_add(&c->list, &bch_cache_sets); 1775 found: 1776 sprintf(buf, "cache%i", ca->sb.nr_this_dev); 1777 if (sysfs_create_link(&ca->kobj, &c->kobj, "set") || 1778 sysfs_create_link(&c->kobj, &ca->kobj, buf)) 1779 goto err; 1780 1781 if (ca->sb.seq > c->sb.seq) { 1782 c->sb.version = ca->sb.version; 1783 memcpy(c->sb.set_uuid, ca->sb.set_uuid, 16); 1784 c->sb.flags = ca->sb.flags; 1785 c->sb.seq = ca->sb.seq; 1786 pr_debug("set version = %llu", c->sb.version); 1787 } 1788 1789 kobject_get(&ca->kobj); 1790 ca->set = c; 1791 ca->set->cache[ca->sb.nr_this_dev] = ca; 1792 c->cache_by_alloc[c->caches_loaded++] = ca; 1793 1794 if (c->caches_loaded == c->sb.nr_in_set) 1795 run_cache_set(c); 1796 1797 return NULL; 1798 err: 1799 bch_cache_set_unregister(c); 1800 return err; 1801 } 1802 1803 /* Cache device */ 1804 1805 void bch_cache_release(struct kobject *kobj) 1806 { 1807 struct cache *ca = container_of(kobj, struct cache, kobj); 1808 unsigned i; 1809 1810 if (ca->set) { 1811 BUG_ON(ca->set->cache[ca->sb.nr_this_dev] != ca); 1812 ca->set->cache[ca->sb.nr_this_dev] = NULL; 1813 } 1814 1815 free_pages((unsigned long) ca->disk_buckets, ilog2(bucket_pages(ca))); 1816 kfree(ca->prio_buckets); 1817 vfree(ca->buckets); 1818 1819 free_heap(&ca->heap); 1820 free_fifo(&ca->free_inc); 1821 1822 for (i = 0; i < RESERVE_NR; i++) 1823 free_fifo(&ca->free[i]); 1824 1825 if (ca->sb_bio.bi_inline_vecs[0].bv_page) 1826 put_page(bio_first_page_all(&ca->sb_bio)); 1827 1828 if (!IS_ERR_OR_NULL(ca->bdev)) 1829 blkdev_put(ca->bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); 1830 1831 kfree(ca); 1832 module_put(THIS_MODULE); 1833 } 1834 1835 static int cache_alloc(struct cache *ca) 1836 { 1837 size_t free; 1838 size_t btree_buckets; 1839 struct bucket *b; 1840 1841 __module_get(THIS_MODULE); 1842 kobject_init(&ca->kobj, &bch_cache_ktype); 1843 1844 bio_init(&ca->journal.bio, ca->journal.bio.bi_inline_vecs, 8); 1845 1846 /* 1847 * when ca->sb.njournal_buckets is not zero, journal exists, 1848 * and in bch_journal_replay(), tree node may split, 1849 * so bucket of RESERVE_BTREE type is needed, 1850 * the worst situation is all journal buckets are valid journal, 1851 * and all the keys need to replay, 1852 * so the number of RESERVE_BTREE type buckets should be as much 1853 * as journal buckets 1854 */ 1855 btree_buckets = ca->sb.njournal_buckets ?: 8; 1856 free = roundup_pow_of_two(ca->sb.nbuckets) >> 10; 1857 1858 if (!init_fifo(&ca->free[RESERVE_BTREE], btree_buckets, GFP_KERNEL) || 1859 !init_fifo_exact(&ca->free[RESERVE_PRIO], prio_buckets(ca), GFP_KERNEL) || 1860 !init_fifo(&ca->free[RESERVE_MOVINGGC], free, GFP_KERNEL) || 1861 !init_fifo(&ca->free[RESERVE_NONE], free, GFP_KERNEL) || 1862 !init_fifo(&ca->free_inc, free << 2, GFP_KERNEL) || 1863 !init_heap(&ca->heap, free << 3, GFP_KERNEL) || 1864 !(ca->buckets = vzalloc(sizeof(struct bucket) * 1865 ca->sb.nbuckets)) || 1866 !(ca->prio_buckets = kzalloc(sizeof(uint64_t) * prio_buckets(ca) * 1867 2, GFP_KERNEL)) || 1868 !(ca->disk_buckets = alloc_bucket_pages(GFP_KERNEL, ca))) 1869 return -ENOMEM; 1870 1871 ca->prio_last_buckets = ca->prio_buckets + prio_buckets(ca); 1872 1873 for_each_bucket(b, ca) 1874 atomic_set(&b->pin, 0); 1875 1876 return 0; 1877 } 1878 1879 static int register_cache(struct cache_sb *sb, struct page *sb_page, 1880 struct block_device *bdev, struct cache *ca) 1881 { 1882 char name[BDEVNAME_SIZE]; 1883 const char *err = NULL; /* must be set for any error case */ 1884 int ret = 0; 1885 1886 memcpy(&ca->sb, sb, sizeof(struct cache_sb)); 1887 ca->bdev = bdev; 1888 ca->bdev->bd_holder = ca; 1889 1890 bio_init(&ca->sb_bio, ca->sb_bio.bi_inline_vecs, 1); 1891 bio_first_bvec_all(&ca->sb_bio)->bv_page = sb_page; 1892 get_page(sb_page); 1893 1894 if (blk_queue_discard(bdev_get_queue(ca->bdev))) 1895 ca->discard = CACHE_DISCARD(&ca->sb); 1896 1897 ret = cache_alloc(ca); 1898 if (ret != 0) { 1899 if (ret == -ENOMEM) 1900 err = "cache_alloc(): -ENOMEM"; 1901 else 1902 err = "cache_alloc(): unknown error"; 1903 goto err; 1904 } 1905 1906 if (kobject_add(&ca->kobj, &part_to_dev(bdev->bd_part)->kobj, "bcache")) { 1907 err = "error calling kobject_add"; 1908 ret = -ENOMEM; 1909 goto out; 1910 } 1911 1912 mutex_lock(&bch_register_lock); 1913 err = register_cache_set(ca); 1914 mutex_unlock(&bch_register_lock); 1915 1916 if (err) { 1917 ret = -ENODEV; 1918 goto out; 1919 } 1920 1921 pr_info("registered cache device %s", bdevname(bdev, name)); 1922 1923 out: 1924 kobject_put(&ca->kobj); 1925 1926 err: 1927 if (err) 1928 pr_notice("error opening %s: %s", bdevname(bdev, name), err); 1929 1930 return ret; 1931 } 1932 1933 /* Global interfaces/init */ 1934 1935 static ssize_t register_bcache(struct kobject *, struct kobj_attribute *, 1936 const char *, size_t); 1937 1938 kobj_attribute_write(register, register_bcache); 1939 kobj_attribute_write(register_quiet, register_bcache); 1940 1941 static bool bch_is_open_backing(struct block_device *bdev) { 1942 struct cache_set *c, *tc; 1943 struct cached_dev *dc, *t; 1944 1945 list_for_each_entry_safe(c, tc, &bch_cache_sets, list) 1946 list_for_each_entry_safe(dc, t, &c->cached_devs, list) 1947 if (dc->bdev == bdev) 1948 return true; 1949 list_for_each_entry_safe(dc, t, &uncached_devices, list) 1950 if (dc->bdev == bdev) 1951 return true; 1952 return false; 1953 } 1954 1955 static bool bch_is_open_cache(struct block_device *bdev) { 1956 struct cache_set *c, *tc; 1957 struct cache *ca; 1958 unsigned i; 1959 1960 list_for_each_entry_safe(c, tc, &bch_cache_sets, list) 1961 for_each_cache(ca, c, i) 1962 if (ca->bdev == bdev) 1963 return true; 1964 return false; 1965 } 1966 1967 static bool bch_is_open(struct block_device *bdev) { 1968 return bch_is_open_cache(bdev) || bch_is_open_backing(bdev); 1969 } 1970 1971 static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr, 1972 const char *buffer, size_t size) 1973 { 1974 ssize_t ret = size; 1975 const char *err = "cannot allocate memory"; 1976 char *path = NULL; 1977 struct cache_sb *sb = NULL; 1978 struct block_device *bdev = NULL; 1979 struct page *sb_page = NULL; 1980 1981 if (!try_module_get(THIS_MODULE)) 1982 return -EBUSY; 1983 1984 if (!(path = kstrndup(buffer, size, GFP_KERNEL)) || 1985 !(sb = kmalloc(sizeof(struct cache_sb), GFP_KERNEL))) 1986 goto err; 1987 1988 err = "failed to open device"; 1989 bdev = blkdev_get_by_path(strim(path), 1990 FMODE_READ|FMODE_WRITE|FMODE_EXCL, 1991 sb); 1992 if (IS_ERR(bdev)) { 1993 if (bdev == ERR_PTR(-EBUSY)) { 1994 bdev = lookup_bdev(strim(path)); 1995 mutex_lock(&bch_register_lock); 1996 if (!IS_ERR(bdev) && bch_is_open(bdev)) 1997 err = "device already registered"; 1998 else 1999 err = "device busy"; 2000 mutex_unlock(&bch_register_lock); 2001 if (!IS_ERR(bdev)) 2002 bdput(bdev); 2003 if (attr == &ksysfs_register_quiet) 2004 goto out; 2005 } 2006 goto err; 2007 } 2008 2009 err = "failed to set blocksize"; 2010 if (set_blocksize(bdev, 4096)) 2011 goto err_close; 2012 2013 err = read_super(sb, bdev, &sb_page); 2014 if (err) 2015 goto err_close; 2016 2017 if (SB_IS_BDEV(sb)) { 2018 struct cached_dev *dc = kzalloc(sizeof(*dc), GFP_KERNEL); 2019 if (!dc) 2020 goto err_close; 2021 2022 mutex_lock(&bch_register_lock); 2023 register_bdev(sb, sb_page, bdev, dc); 2024 mutex_unlock(&bch_register_lock); 2025 } else { 2026 struct cache *ca = kzalloc(sizeof(*ca), GFP_KERNEL); 2027 if (!ca) 2028 goto err_close; 2029 2030 if (register_cache(sb, sb_page, bdev, ca) != 0) 2031 goto err_close; 2032 } 2033 out: 2034 if (sb_page) 2035 put_page(sb_page); 2036 kfree(sb); 2037 kfree(path); 2038 module_put(THIS_MODULE); 2039 return ret; 2040 2041 err_close: 2042 blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); 2043 err: 2044 pr_info("error opening %s: %s", path, err); 2045 ret = -EINVAL; 2046 goto out; 2047 } 2048 2049 static int bcache_reboot(struct notifier_block *n, unsigned long code, void *x) 2050 { 2051 if (code == SYS_DOWN || 2052 code == SYS_HALT || 2053 code == SYS_POWER_OFF) { 2054 DEFINE_WAIT(wait); 2055 unsigned long start = jiffies; 2056 bool stopped = false; 2057 2058 struct cache_set *c, *tc; 2059 struct cached_dev *dc, *tdc; 2060 2061 mutex_lock(&bch_register_lock); 2062 2063 if (list_empty(&bch_cache_sets) && 2064 list_empty(&uncached_devices)) 2065 goto out; 2066 2067 pr_info("Stopping all devices:"); 2068 2069 list_for_each_entry_safe(c, tc, &bch_cache_sets, list) 2070 bch_cache_set_stop(c); 2071 2072 list_for_each_entry_safe(dc, tdc, &uncached_devices, list) 2073 bcache_device_stop(&dc->disk); 2074 2075 /* What's a condition variable? */ 2076 while (1) { 2077 long timeout = start + 2 * HZ - jiffies; 2078 2079 stopped = list_empty(&bch_cache_sets) && 2080 list_empty(&uncached_devices); 2081 2082 if (timeout < 0 || stopped) 2083 break; 2084 2085 prepare_to_wait(&unregister_wait, &wait, 2086 TASK_UNINTERRUPTIBLE); 2087 2088 mutex_unlock(&bch_register_lock); 2089 schedule_timeout(timeout); 2090 mutex_lock(&bch_register_lock); 2091 } 2092 2093 finish_wait(&unregister_wait, &wait); 2094 2095 if (stopped) 2096 pr_info("All devices stopped"); 2097 else 2098 pr_notice("Timeout waiting for devices to be closed"); 2099 out: 2100 mutex_unlock(&bch_register_lock); 2101 } 2102 2103 return NOTIFY_DONE; 2104 } 2105 2106 static struct notifier_block reboot = { 2107 .notifier_call = bcache_reboot, 2108 .priority = INT_MAX, /* before any real devices */ 2109 }; 2110 2111 static void bcache_exit(void) 2112 { 2113 bch_debug_exit(); 2114 bch_request_exit(); 2115 if (bcache_kobj) 2116 kobject_put(bcache_kobj); 2117 if (bcache_wq) 2118 destroy_workqueue(bcache_wq); 2119 if (bcache_major) 2120 unregister_blkdev(bcache_major, "bcache"); 2121 unregister_reboot_notifier(&reboot); 2122 mutex_destroy(&bch_register_lock); 2123 } 2124 2125 static int __init bcache_init(void) 2126 { 2127 static const struct attribute *files[] = { 2128 &ksysfs_register.attr, 2129 &ksysfs_register_quiet.attr, 2130 NULL 2131 }; 2132 2133 mutex_init(&bch_register_lock); 2134 init_waitqueue_head(&unregister_wait); 2135 register_reboot_notifier(&reboot); 2136 closure_debug_init(); 2137 2138 bcache_major = register_blkdev(0, "bcache"); 2139 if (bcache_major < 0) { 2140 unregister_reboot_notifier(&reboot); 2141 mutex_destroy(&bch_register_lock); 2142 return bcache_major; 2143 } 2144 2145 if (!(bcache_wq = alloc_workqueue("bcache", WQ_MEM_RECLAIM, 0)) || 2146 !(bcache_kobj = kobject_create_and_add("bcache", fs_kobj)) || 2147 bch_request_init() || 2148 bch_debug_init(bcache_kobj) || 2149 sysfs_create_files(bcache_kobj, files)) 2150 goto err; 2151 2152 return 0; 2153 err: 2154 bcache_exit(); 2155 return -ENOMEM; 2156 } 2157 2158 module_exit(bcache_exit); 2159 module_init(bcache_init); 2160