1 /* 2 * bcache setup/teardown code, and some metadata io - read a superblock and 3 * figure out what to do with it. 4 * 5 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com> 6 * Copyright 2012 Google, Inc. 7 */ 8 9 #include "bcache.h" 10 #include "btree.h" 11 #include "debug.h" 12 #include "extents.h" 13 #include "request.h" 14 #include "writeback.h" 15 16 #include <linux/blkdev.h> 17 #include <linux/buffer_head.h> 18 #include <linux/debugfs.h> 19 #include <linux/genhd.h> 20 #include <linux/idr.h> 21 #include <linux/kthread.h> 22 #include <linux/module.h> 23 #include <linux/random.h> 24 #include <linux/reboot.h> 25 #include <linux/sysfs.h> 26 27 MODULE_LICENSE("GPL"); 28 MODULE_AUTHOR("Kent Overstreet <kent.overstreet@gmail.com>"); 29 30 static const char bcache_magic[] = { 31 0xc6, 0x85, 0x73, 0xf6, 0x4e, 0x1a, 0x45, 0xca, 32 0x82, 0x65, 0xf5, 0x7f, 0x48, 0xba, 0x6d, 0x81 33 }; 34 35 static const char invalid_uuid[] = { 36 0xa0, 0x3e, 0xf8, 0xed, 0x3e, 0xe1, 0xb8, 0x78, 37 0xc8, 0x50, 0xfc, 0x5e, 0xcb, 0x16, 0xcd, 0x99 38 }; 39 40 /* Default is -1; we skip past it for struct cached_dev's cache mode */ 41 const char * const bch_cache_modes[] = { 42 "default", 43 "writethrough", 44 "writeback", 45 "writearound", 46 "none", 47 NULL 48 }; 49 50 static struct kobject *bcache_kobj; 51 struct mutex bch_register_lock; 52 LIST_HEAD(bch_cache_sets); 53 static LIST_HEAD(uncached_devices); 54 55 static int bcache_major; 56 static DEFINE_IDA(bcache_device_idx); 57 static wait_queue_head_t unregister_wait; 58 struct workqueue_struct *bcache_wq; 59 60 #define BTREE_MAX_PAGES (256 * 1024 / PAGE_SIZE) 61 /* limitation of partitions number on single bcache device */ 62 #define BCACHE_MINORS 128 63 /* limitation of bcache devices number on single system */ 64 #define BCACHE_DEVICE_IDX_MAX ((1U << MINORBITS)/BCACHE_MINORS) 65 66 /* Superblock */ 67 68 static const char *read_super(struct cache_sb *sb, struct block_device *bdev, 69 struct page **res) 70 { 71 const char *err; 72 struct cache_sb *s; 73 struct buffer_head *bh = __bread(bdev, 1, SB_SIZE); 74 unsigned i; 75 76 if (!bh) 77 return "IO error"; 78 79 s = (struct cache_sb *) bh->b_data; 80 81 sb->offset = le64_to_cpu(s->offset); 82 sb->version = le64_to_cpu(s->version); 83 84 memcpy(sb->magic, s->magic, 16); 85 memcpy(sb->uuid, s->uuid, 16); 86 memcpy(sb->set_uuid, s->set_uuid, 16); 87 memcpy(sb->label, s->label, SB_LABEL_SIZE); 88 89 sb->flags = le64_to_cpu(s->flags); 90 sb->seq = le64_to_cpu(s->seq); 91 sb->last_mount = le32_to_cpu(s->last_mount); 92 sb->first_bucket = le16_to_cpu(s->first_bucket); 93 sb->keys = le16_to_cpu(s->keys); 94 95 for (i = 0; i < SB_JOURNAL_BUCKETS; i++) 96 sb->d[i] = le64_to_cpu(s->d[i]); 97 98 pr_debug("read sb version %llu, flags %llu, seq %llu, journal size %u", 99 sb->version, sb->flags, sb->seq, sb->keys); 100 101 err = "Not a bcache superblock"; 102 if (sb->offset != SB_SECTOR) 103 goto err; 104 105 if (memcmp(sb->magic, bcache_magic, 16)) 106 goto err; 107 108 err = "Too many journal buckets"; 109 if (sb->keys > SB_JOURNAL_BUCKETS) 110 goto err; 111 112 err = "Bad checksum"; 113 if (s->csum != csum_set(s)) 114 goto err; 115 116 err = "Bad UUID"; 117 if (bch_is_zero(sb->uuid, 16)) 118 goto err; 119 120 sb->block_size = le16_to_cpu(s->block_size); 121 122 err = "Superblock block size smaller than device block size"; 123 if (sb->block_size << 9 < bdev_logical_block_size(bdev)) 124 goto err; 125 126 switch (sb->version) { 127 case BCACHE_SB_VERSION_BDEV: 128 sb->data_offset = BDEV_DATA_START_DEFAULT; 129 break; 130 case BCACHE_SB_VERSION_BDEV_WITH_OFFSET: 131 sb->data_offset = le64_to_cpu(s->data_offset); 132 133 err = "Bad data offset"; 134 if (sb->data_offset < BDEV_DATA_START_DEFAULT) 135 goto err; 136 137 break; 138 case BCACHE_SB_VERSION_CDEV: 139 case BCACHE_SB_VERSION_CDEV_WITH_UUID: 140 sb->nbuckets = le64_to_cpu(s->nbuckets); 141 sb->bucket_size = le16_to_cpu(s->bucket_size); 142 143 sb->nr_in_set = le16_to_cpu(s->nr_in_set); 144 sb->nr_this_dev = le16_to_cpu(s->nr_this_dev); 145 146 err = "Too many buckets"; 147 if (sb->nbuckets > LONG_MAX) 148 goto err; 149 150 err = "Not enough buckets"; 151 if (sb->nbuckets < 1 << 7) 152 goto err; 153 154 err = "Bad block/bucket size"; 155 if (!is_power_of_2(sb->block_size) || 156 sb->block_size > PAGE_SECTORS || 157 !is_power_of_2(sb->bucket_size) || 158 sb->bucket_size < PAGE_SECTORS) 159 goto err; 160 161 err = "Invalid superblock: device too small"; 162 if (get_capacity(bdev->bd_disk) < sb->bucket_size * sb->nbuckets) 163 goto err; 164 165 err = "Bad UUID"; 166 if (bch_is_zero(sb->set_uuid, 16)) 167 goto err; 168 169 err = "Bad cache device number in set"; 170 if (!sb->nr_in_set || 171 sb->nr_in_set <= sb->nr_this_dev || 172 sb->nr_in_set > MAX_CACHES_PER_SET) 173 goto err; 174 175 err = "Journal buckets not sequential"; 176 for (i = 0; i < sb->keys; i++) 177 if (sb->d[i] != sb->first_bucket + i) 178 goto err; 179 180 err = "Too many journal buckets"; 181 if (sb->first_bucket + sb->keys > sb->nbuckets) 182 goto err; 183 184 err = "Invalid superblock: first bucket comes before end of super"; 185 if (sb->first_bucket * sb->bucket_size < 16) 186 goto err; 187 188 break; 189 default: 190 err = "Unsupported superblock version"; 191 goto err; 192 } 193 194 sb->last_mount = get_seconds(); 195 err = NULL; 196 197 get_page(bh->b_page); 198 *res = bh->b_page; 199 err: 200 put_bh(bh); 201 return err; 202 } 203 204 static void write_bdev_super_endio(struct bio *bio) 205 { 206 struct cached_dev *dc = bio->bi_private; 207 /* XXX: error checking */ 208 209 closure_put(&dc->sb_write); 210 } 211 212 static void __write_super(struct cache_sb *sb, struct bio *bio) 213 { 214 struct cache_sb *out = page_address(bio_first_page_all(bio)); 215 unsigned i; 216 217 bio->bi_iter.bi_sector = SB_SECTOR; 218 bio->bi_iter.bi_size = SB_SIZE; 219 bio_set_op_attrs(bio, REQ_OP_WRITE, REQ_SYNC|REQ_META); 220 bch_bio_map(bio, NULL); 221 222 out->offset = cpu_to_le64(sb->offset); 223 out->version = cpu_to_le64(sb->version); 224 225 memcpy(out->uuid, sb->uuid, 16); 226 memcpy(out->set_uuid, sb->set_uuid, 16); 227 memcpy(out->label, sb->label, SB_LABEL_SIZE); 228 229 out->flags = cpu_to_le64(sb->flags); 230 out->seq = cpu_to_le64(sb->seq); 231 232 out->last_mount = cpu_to_le32(sb->last_mount); 233 out->first_bucket = cpu_to_le16(sb->first_bucket); 234 out->keys = cpu_to_le16(sb->keys); 235 236 for (i = 0; i < sb->keys; i++) 237 out->d[i] = cpu_to_le64(sb->d[i]); 238 239 out->csum = csum_set(out); 240 241 pr_debug("ver %llu, flags %llu, seq %llu", 242 sb->version, sb->flags, sb->seq); 243 244 submit_bio(bio); 245 } 246 247 static void bch_write_bdev_super_unlock(struct closure *cl) 248 { 249 struct cached_dev *dc = container_of(cl, struct cached_dev, sb_write); 250 251 up(&dc->sb_write_mutex); 252 } 253 254 void bch_write_bdev_super(struct cached_dev *dc, struct closure *parent) 255 { 256 struct closure *cl = &dc->sb_write; 257 struct bio *bio = &dc->sb_bio; 258 259 down(&dc->sb_write_mutex); 260 closure_init(cl, parent); 261 262 bio_reset(bio); 263 bio_set_dev(bio, dc->bdev); 264 bio->bi_end_io = write_bdev_super_endio; 265 bio->bi_private = dc; 266 267 closure_get(cl); 268 __write_super(&dc->sb, bio); 269 270 closure_return_with_destructor(cl, bch_write_bdev_super_unlock); 271 } 272 273 static void write_super_endio(struct bio *bio) 274 { 275 struct cache *ca = bio->bi_private; 276 277 /* is_read = 0 */ 278 bch_count_io_errors(ca, bio->bi_status, 0, 279 "writing superblock"); 280 closure_put(&ca->set->sb_write); 281 } 282 283 static void bcache_write_super_unlock(struct closure *cl) 284 { 285 struct cache_set *c = container_of(cl, struct cache_set, sb_write); 286 287 up(&c->sb_write_mutex); 288 } 289 290 void bcache_write_super(struct cache_set *c) 291 { 292 struct closure *cl = &c->sb_write; 293 struct cache *ca; 294 unsigned i; 295 296 down(&c->sb_write_mutex); 297 closure_init(cl, &c->cl); 298 299 c->sb.seq++; 300 301 for_each_cache(ca, c, i) { 302 struct bio *bio = &ca->sb_bio; 303 304 ca->sb.version = BCACHE_SB_VERSION_CDEV_WITH_UUID; 305 ca->sb.seq = c->sb.seq; 306 ca->sb.last_mount = c->sb.last_mount; 307 308 SET_CACHE_SYNC(&ca->sb, CACHE_SYNC(&c->sb)); 309 310 bio_reset(bio); 311 bio_set_dev(bio, ca->bdev); 312 bio->bi_end_io = write_super_endio; 313 bio->bi_private = ca; 314 315 closure_get(cl); 316 __write_super(&ca->sb, bio); 317 } 318 319 closure_return_with_destructor(cl, bcache_write_super_unlock); 320 } 321 322 /* UUID io */ 323 324 static void uuid_endio(struct bio *bio) 325 { 326 struct closure *cl = bio->bi_private; 327 struct cache_set *c = container_of(cl, struct cache_set, uuid_write); 328 329 cache_set_err_on(bio->bi_status, c, "accessing uuids"); 330 bch_bbio_free(bio, c); 331 closure_put(cl); 332 } 333 334 static void uuid_io_unlock(struct closure *cl) 335 { 336 struct cache_set *c = container_of(cl, struct cache_set, uuid_write); 337 338 up(&c->uuid_write_mutex); 339 } 340 341 static void uuid_io(struct cache_set *c, int op, unsigned long op_flags, 342 struct bkey *k, struct closure *parent) 343 { 344 struct closure *cl = &c->uuid_write; 345 struct uuid_entry *u; 346 unsigned i; 347 char buf[80]; 348 349 BUG_ON(!parent); 350 down(&c->uuid_write_mutex); 351 closure_init(cl, parent); 352 353 for (i = 0; i < KEY_PTRS(k); i++) { 354 struct bio *bio = bch_bbio_alloc(c); 355 356 bio->bi_opf = REQ_SYNC | REQ_META | op_flags; 357 bio->bi_iter.bi_size = KEY_SIZE(k) << 9; 358 359 bio->bi_end_io = uuid_endio; 360 bio->bi_private = cl; 361 bio_set_op_attrs(bio, op, REQ_SYNC|REQ_META|op_flags); 362 bch_bio_map(bio, c->uuids); 363 364 bch_submit_bbio(bio, c, k, i); 365 366 if (op != REQ_OP_WRITE) 367 break; 368 } 369 370 bch_extent_to_text(buf, sizeof(buf), k); 371 pr_debug("%s UUIDs at %s", op == REQ_OP_WRITE ? "wrote" : "read", buf); 372 373 for (u = c->uuids; u < c->uuids + c->nr_uuids; u++) 374 if (!bch_is_zero(u->uuid, 16)) 375 pr_debug("Slot %zi: %pU: %s: 1st: %u last: %u inv: %u", 376 u - c->uuids, u->uuid, u->label, 377 u->first_reg, u->last_reg, u->invalidated); 378 379 closure_return_with_destructor(cl, uuid_io_unlock); 380 } 381 382 static char *uuid_read(struct cache_set *c, struct jset *j, struct closure *cl) 383 { 384 struct bkey *k = &j->uuid_bucket; 385 386 if (__bch_btree_ptr_invalid(c, k)) 387 return "bad uuid pointer"; 388 389 bkey_copy(&c->uuid_bucket, k); 390 uuid_io(c, REQ_OP_READ, 0, k, cl); 391 392 if (j->version < BCACHE_JSET_VERSION_UUIDv1) { 393 struct uuid_entry_v0 *u0 = (void *) c->uuids; 394 struct uuid_entry *u1 = (void *) c->uuids; 395 int i; 396 397 closure_sync(cl); 398 399 /* 400 * Since the new uuid entry is bigger than the old, we have to 401 * convert starting at the highest memory address and work down 402 * in order to do it in place 403 */ 404 405 for (i = c->nr_uuids - 1; 406 i >= 0; 407 --i) { 408 memcpy(u1[i].uuid, u0[i].uuid, 16); 409 memcpy(u1[i].label, u0[i].label, 32); 410 411 u1[i].first_reg = u0[i].first_reg; 412 u1[i].last_reg = u0[i].last_reg; 413 u1[i].invalidated = u0[i].invalidated; 414 415 u1[i].flags = 0; 416 u1[i].sectors = 0; 417 } 418 } 419 420 return NULL; 421 } 422 423 static int __uuid_write(struct cache_set *c) 424 { 425 BKEY_PADDED(key) k; 426 struct closure cl; 427 closure_init_stack(&cl); 428 429 lockdep_assert_held(&bch_register_lock); 430 431 if (bch_bucket_alloc_set(c, RESERVE_BTREE, &k.key, 1, true)) 432 return 1; 433 434 SET_KEY_SIZE(&k.key, c->sb.bucket_size); 435 uuid_io(c, REQ_OP_WRITE, 0, &k.key, &cl); 436 closure_sync(&cl); 437 438 bkey_copy(&c->uuid_bucket, &k.key); 439 bkey_put(c, &k.key); 440 return 0; 441 } 442 443 int bch_uuid_write(struct cache_set *c) 444 { 445 int ret = __uuid_write(c); 446 447 if (!ret) 448 bch_journal_meta(c, NULL); 449 450 return ret; 451 } 452 453 static struct uuid_entry *uuid_find(struct cache_set *c, const char *uuid) 454 { 455 struct uuid_entry *u; 456 457 for (u = c->uuids; 458 u < c->uuids + c->nr_uuids; u++) 459 if (!memcmp(u->uuid, uuid, 16)) 460 return u; 461 462 return NULL; 463 } 464 465 static struct uuid_entry *uuid_find_empty(struct cache_set *c) 466 { 467 static const char zero_uuid[16] = "\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0"; 468 return uuid_find(c, zero_uuid); 469 } 470 471 /* 472 * Bucket priorities/gens: 473 * 474 * For each bucket, we store on disk its 475 * 8 bit gen 476 * 16 bit priority 477 * 478 * See alloc.c for an explanation of the gen. The priority is used to implement 479 * lru (and in the future other) cache replacement policies; for most purposes 480 * it's just an opaque integer. 481 * 482 * The gens and the priorities don't have a whole lot to do with each other, and 483 * it's actually the gens that must be written out at specific times - it's no 484 * big deal if the priorities don't get written, if we lose them we just reuse 485 * buckets in suboptimal order. 486 * 487 * On disk they're stored in a packed array, and in as many buckets are required 488 * to fit them all. The buckets we use to store them form a list; the journal 489 * header points to the first bucket, the first bucket points to the second 490 * bucket, et cetera. 491 * 492 * This code is used by the allocation code; periodically (whenever it runs out 493 * of buckets to allocate from) the allocation code will invalidate some 494 * buckets, but it can't use those buckets until their new gens are safely on 495 * disk. 496 */ 497 498 static void prio_endio(struct bio *bio) 499 { 500 struct cache *ca = bio->bi_private; 501 502 cache_set_err_on(bio->bi_status, ca->set, "accessing priorities"); 503 bch_bbio_free(bio, ca->set); 504 closure_put(&ca->prio); 505 } 506 507 static void prio_io(struct cache *ca, uint64_t bucket, int op, 508 unsigned long op_flags) 509 { 510 struct closure *cl = &ca->prio; 511 struct bio *bio = bch_bbio_alloc(ca->set); 512 513 closure_init_stack(cl); 514 515 bio->bi_iter.bi_sector = bucket * ca->sb.bucket_size; 516 bio_set_dev(bio, ca->bdev); 517 bio->bi_iter.bi_size = bucket_bytes(ca); 518 519 bio->bi_end_io = prio_endio; 520 bio->bi_private = ca; 521 bio_set_op_attrs(bio, op, REQ_SYNC|REQ_META|op_flags); 522 bch_bio_map(bio, ca->disk_buckets); 523 524 closure_bio_submit(bio, &ca->prio); 525 closure_sync(cl); 526 } 527 528 void bch_prio_write(struct cache *ca) 529 { 530 int i; 531 struct bucket *b; 532 struct closure cl; 533 534 closure_init_stack(&cl); 535 536 lockdep_assert_held(&ca->set->bucket_lock); 537 538 ca->disk_buckets->seq++; 539 540 atomic_long_add(ca->sb.bucket_size * prio_buckets(ca), 541 &ca->meta_sectors_written); 542 543 //pr_debug("free %zu, free_inc %zu, unused %zu", fifo_used(&ca->free), 544 // fifo_used(&ca->free_inc), fifo_used(&ca->unused)); 545 546 for (i = prio_buckets(ca) - 1; i >= 0; --i) { 547 long bucket; 548 struct prio_set *p = ca->disk_buckets; 549 struct bucket_disk *d = p->data; 550 struct bucket_disk *end = d + prios_per_bucket(ca); 551 552 for (b = ca->buckets + i * prios_per_bucket(ca); 553 b < ca->buckets + ca->sb.nbuckets && d < end; 554 b++, d++) { 555 d->prio = cpu_to_le16(b->prio); 556 d->gen = b->gen; 557 } 558 559 p->next_bucket = ca->prio_buckets[i + 1]; 560 p->magic = pset_magic(&ca->sb); 561 p->csum = bch_crc64(&p->magic, bucket_bytes(ca) - 8); 562 563 bucket = bch_bucket_alloc(ca, RESERVE_PRIO, true); 564 BUG_ON(bucket == -1); 565 566 mutex_unlock(&ca->set->bucket_lock); 567 prio_io(ca, bucket, REQ_OP_WRITE, 0); 568 mutex_lock(&ca->set->bucket_lock); 569 570 ca->prio_buckets[i] = bucket; 571 atomic_dec_bug(&ca->buckets[bucket].pin); 572 } 573 574 mutex_unlock(&ca->set->bucket_lock); 575 576 bch_journal_meta(ca->set, &cl); 577 closure_sync(&cl); 578 579 mutex_lock(&ca->set->bucket_lock); 580 581 /* 582 * Don't want the old priorities to get garbage collected until after we 583 * finish writing the new ones, and they're journalled 584 */ 585 for (i = 0; i < prio_buckets(ca); i++) { 586 if (ca->prio_last_buckets[i]) 587 __bch_bucket_free(ca, 588 &ca->buckets[ca->prio_last_buckets[i]]); 589 590 ca->prio_last_buckets[i] = ca->prio_buckets[i]; 591 } 592 } 593 594 static void prio_read(struct cache *ca, uint64_t bucket) 595 { 596 struct prio_set *p = ca->disk_buckets; 597 struct bucket_disk *d = p->data + prios_per_bucket(ca), *end = d; 598 struct bucket *b; 599 unsigned bucket_nr = 0; 600 601 for (b = ca->buckets; 602 b < ca->buckets + ca->sb.nbuckets; 603 b++, d++) { 604 if (d == end) { 605 ca->prio_buckets[bucket_nr] = bucket; 606 ca->prio_last_buckets[bucket_nr] = bucket; 607 bucket_nr++; 608 609 prio_io(ca, bucket, REQ_OP_READ, 0); 610 611 if (p->csum != bch_crc64(&p->magic, bucket_bytes(ca) - 8)) 612 pr_warn("bad csum reading priorities"); 613 614 if (p->magic != pset_magic(&ca->sb)) 615 pr_warn("bad magic reading priorities"); 616 617 bucket = p->next_bucket; 618 d = p->data; 619 } 620 621 b->prio = le16_to_cpu(d->prio); 622 b->gen = b->last_gc = d->gen; 623 } 624 } 625 626 /* Bcache device */ 627 628 static int open_dev(struct block_device *b, fmode_t mode) 629 { 630 struct bcache_device *d = b->bd_disk->private_data; 631 if (test_bit(BCACHE_DEV_CLOSING, &d->flags)) 632 return -ENXIO; 633 634 closure_get(&d->cl); 635 return 0; 636 } 637 638 static void release_dev(struct gendisk *b, fmode_t mode) 639 { 640 struct bcache_device *d = b->private_data; 641 closure_put(&d->cl); 642 } 643 644 static int ioctl_dev(struct block_device *b, fmode_t mode, 645 unsigned int cmd, unsigned long arg) 646 { 647 struct bcache_device *d = b->bd_disk->private_data; 648 return d->ioctl(d, mode, cmd, arg); 649 } 650 651 static const struct block_device_operations bcache_ops = { 652 .open = open_dev, 653 .release = release_dev, 654 .ioctl = ioctl_dev, 655 .owner = THIS_MODULE, 656 }; 657 658 void bcache_device_stop(struct bcache_device *d) 659 { 660 if (!test_and_set_bit(BCACHE_DEV_CLOSING, &d->flags)) 661 closure_queue(&d->cl); 662 } 663 664 static void bcache_device_unlink(struct bcache_device *d) 665 { 666 lockdep_assert_held(&bch_register_lock); 667 668 if (d->c && !test_and_set_bit(BCACHE_DEV_UNLINK_DONE, &d->flags)) { 669 unsigned i; 670 struct cache *ca; 671 672 sysfs_remove_link(&d->c->kobj, d->name); 673 sysfs_remove_link(&d->kobj, "cache"); 674 675 for_each_cache(ca, d->c, i) 676 bd_unlink_disk_holder(ca->bdev, d->disk); 677 } 678 } 679 680 static void bcache_device_link(struct bcache_device *d, struct cache_set *c, 681 const char *name) 682 { 683 unsigned i; 684 struct cache *ca; 685 686 for_each_cache(ca, d->c, i) 687 bd_link_disk_holder(ca->bdev, d->disk); 688 689 snprintf(d->name, BCACHEDEVNAME_SIZE, 690 "%s%u", name, d->id); 691 692 WARN(sysfs_create_link(&d->kobj, &c->kobj, "cache") || 693 sysfs_create_link(&c->kobj, &d->kobj, d->name), 694 "Couldn't create device <-> cache set symlinks"); 695 696 clear_bit(BCACHE_DEV_UNLINK_DONE, &d->flags); 697 } 698 699 static void bcache_device_detach(struct bcache_device *d) 700 { 701 lockdep_assert_held(&bch_register_lock); 702 703 if (test_bit(BCACHE_DEV_DETACHING, &d->flags)) { 704 struct uuid_entry *u = d->c->uuids + d->id; 705 706 SET_UUID_FLASH_ONLY(u, 0); 707 memcpy(u->uuid, invalid_uuid, 16); 708 u->invalidated = cpu_to_le32(get_seconds()); 709 bch_uuid_write(d->c); 710 } 711 712 bcache_device_unlink(d); 713 714 d->c->devices[d->id] = NULL; 715 closure_put(&d->c->caching); 716 d->c = NULL; 717 } 718 719 static void bcache_device_attach(struct bcache_device *d, struct cache_set *c, 720 unsigned id) 721 { 722 d->id = id; 723 d->c = c; 724 c->devices[id] = d; 725 726 if (id >= c->devices_max_used) 727 c->devices_max_used = id + 1; 728 729 closure_get(&c->caching); 730 } 731 732 static inline int first_minor_to_idx(int first_minor) 733 { 734 return (first_minor/BCACHE_MINORS); 735 } 736 737 static inline int idx_to_first_minor(int idx) 738 { 739 return (idx * BCACHE_MINORS); 740 } 741 742 static void bcache_device_free(struct bcache_device *d) 743 { 744 lockdep_assert_held(&bch_register_lock); 745 746 pr_info("%s stopped", d->disk->disk_name); 747 748 if (d->c) 749 bcache_device_detach(d); 750 if (d->disk && d->disk->flags & GENHD_FL_UP) 751 del_gendisk(d->disk); 752 if (d->disk && d->disk->queue) 753 blk_cleanup_queue(d->disk->queue); 754 if (d->disk) { 755 ida_simple_remove(&bcache_device_idx, 756 first_minor_to_idx(d->disk->first_minor)); 757 put_disk(d->disk); 758 } 759 760 if (d->bio_split) 761 bioset_free(d->bio_split); 762 kvfree(d->full_dirty_stripes); 763 kvfree(d->stripe_sectors_dirty); 764 765 closure_debug_destroy(&d->cl); 766 } 767 768 static int bcache_device_init(struct bcache_device *d, unsigned block_size, 769 sector_t sectors) 770 { 771 struct request_queue *q; 772 size_t n; 773 int idx; 774 775 if (!d->stripe_size) 776 d->stripe_size = 1 << 31; 777 778 d->nr_stripes = DIV_ROUND_UP_ULL(sectors, d->stripe_size); 779 780 if (!d->nr_stripes || 781 d->nr_stripes > INT_MAX || 782 d->nr_stripes > SIZE_MAX / sizeof(atomic_t)) { 783 pr_err("nr_stripes too large or invalid: %u (start sector beyond end of disk?)", 784 (unsigned)d->nr_stripes); 785 return -ENOMEM; 786 } 787 788 n = d->nr_stripes * sizeof(atomic_t); 789 d->stripe_sectors_dirty = kvzalloc(n, GFP_KERNEL); 790 if (!d->stripe_sectors_dirty) 791 return -ENOMEM; 792 793 n = BITS_TO_LONGS(d->nr_stripes) * sizeof(unsigned long); 794 d->full_dirty_stripes = kvzalloc(n, GFP_KERNEL); 795 if (!d->full_dirty_stripes) 796 return -ENOMEM; 797 798 idx = ida_simple_get(&bcache_device_idx, 0, 799 BCACHE_DEVICE_IDX_MAX, GFP_KERNEL); 800 if (idx < 0) 801 return idx; 802 803 if (!(d->bio_split = bioset_create(4, offsetof(struct bbio, bio), 804 BIOSET_NEED_BVECS | 805 BIOSET_NEED_RESCUER)) || 806 !(d->disk = alloc_disk(BCACHE_MINORS))) { 807 ida_simple_remove(&bcache_device_idx, idx); 808 return -ENOMEM; 809 } 810 811 set_capacity(d->disk, sectors); 812 snprintf(d->disk->disk_name, DISK_NAME_LEN, "bcache%i", idx); 813 814 d->disk->major = bcache_major; 815 d->disk->first_minor = idx_to_first_minor(idx); 816 d->disk->fops = &bcache_ops; 817 d->disk->private_data = d; 818 819 q = blk_alloc_queue(GFP_KERNEL); 820 if (!q) 821 return -ENOMEM; 822 823 blk_queue_make_request(q, NULL); 824 d->disk->queue = q; 825 q->queuedata = d; 826 q->backing_dev_info->congested_data = d; 827 q->limits.max_hw_sectors = UINT_MAX; 828 q->limits.max_sectors = UINT_MAX; 829 q->limits.max_segment_size = UINT_MAX; 830 q->limits.max_segments = BIO_MAX_PAGES; 831 blk_queue_max_discard_sectors(q, UINT_MAX); 832 q->limits.discard_granularity = 512; 833 q->limits.io_min = block_size; 834 q->limits.logical_block_size = block_size; 835 q->limits.physical_block_size = block_size; 836 set_bit(QUEUE_FLAG_NONROT, &d->disk->queue->queue_flags); 837 clear_bit(QUEUE_FLAG_ADD_RANDOM, &d->disk->queue->queue_flags); 838 set_bit(QUEUE_FLAG_DISCARD, &d->disk->queue->queue_flags); 839 840 blk_queue_write_cache(q, true, true); 841 842 return 0; 843 } 844 845 /* Cached device */ 846 847 static void calc_cached_dev_sectors(struct cache_set *c) 848 { 849 uint64_t sectors = 0; 850 struct cached_dev *dc; 851 852 list_for_each_entry(dc, &c->cached_devs, list) 853 sectors += bdev_sectors(dc->bdev); 854 855 c->cached_dev_sectors = sectors; 856 } 857 858 void bch_cached_dev_run(struct cached_dev *dc) 859 { 860 struct bcache_device *d = &dc->disk; 861 char buf[SB_LABEL_SIZE + 1]; 862 char *env[] = { 863 "DRIVER=bcache", 864 kasprintf(GFP_KERNEL, "CACHED_UUID=%pU", dc->sb.uuid), 865 NULL, 866 NULL, 867 }; 868 869 memcpy(buf, dc->sb.label, SB_LABEL_SIZE); 870 buf[SB_LABEL_SIZE] = '\0'; 871 env[2] = kasprintf(GFP_KERNEL, "CACHED_LABEL=%s", buf); 872 873 if (atomic_xchg(&dc->running, 1)) { 874 kfree(env[1]); 875 kfree(env[2]); 876 return; 877 } 878 879 if (!d->c && 880 BDEV_STATE(&dc->sb) != BDEV_STATE_NONE) { 881 struct closure cl; 882 closure_init_stack(&cl); 883 884 SET_BDEV_STATE(&dc->sb, BDEV_STATE_STALE); 885 bch_write_bdev_super(dc, &cl); 886 closure_sync(&cl); 887 } 888 889 add_disk(d->disk); 890 bd_link_disk_holder(dc->bdev, dc->disk.disk); 891 /* won't show up in the uevent file, use udevadm monitor -e instead 892 * only class / kset properties are persistent */ 893 kobject_uevent_env(&disk_to_dev(d->disk)->kobj, KOBJ_CHANGE, env); 894 kfree(env[1]); 895 kfree(env[2]); 896 897 if (sysfs_create_link(&d->kobj, &disk_to_dev(d->disk)->kobj, "dev") || 898 sysfs_create_link(&disk_to_dev(d->disk)->kobj, &d->kobj, "bcache")) 899 pr_debug("error creating sysfs link"); 900 } 901 902 static void cached_dev_detach_finish(struct work_struct *w) 903 { 904 struct cached_dev *dc = container_of(w, struct cached_dev, detach); 905 char buf[BDEVNAME_SIZE]; 906 struct closure cl; 907 closure_init_stack(&cl); 908 909 BUG_ON(!test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags)); 910 BUG_ON(refcount_read(&dc->count)); 911 912 mutex_lock(&bch_register_lock); 913 914 cancel_delayed_work_sync(&dc->writeback_rate_update); 915 if (!IS_ERR_OR_NULL(dc->writeback_thread)) { 916 kthread_stop(dc->writeback_thread); 917 dc->writeback_thread = NULL; 918 } 919 920 memset(&dc->sb.set_uuid, 0, 16); 921 SET_BDEV_STATE(&dc->sb, BDEV_STATE_NONE); 922 923 bch_write_bdev_super(dc, &cl); 924 closure_sync(&cl); 925 926 bcache_device_detach(&dc->disk); 927 list_move(&dc->list, &uncached_devices); 928 929 clear_bit(BCACHE_DEV_DETACHING, &dc->disk.flags); 930 clear_bit(BCACHE_DEV_UNLINK_DONE, &dc->disk.flags); 931 932 mutex_unlock(&bch_register_lock); 933 934 pr_info("Caching disabled for %s", bdevname(dc->bdev, buf)); 935 936 /* Drop ref we took in cached_dev_detach() */ 937 closure_put(&dc->disk.cl); 938 } 939 940 void bch_cached_dev_detach(struct cached_dev *dc) 941 { 942 lockdep_assert_held(&bch_register_lock); 943 944 if (test_bit(BCACHE_DEV_CLOSING, &dc->disk.flags)) 945 return; 946 947 if (test_and_set_bit(BCACHE_DEV_DETACHING, &dc->disk.flags)) 948 return; 949 950 /* 951 * Block the device from being closed and freed until we're finished 952 * detaching 953 */ 954 closure_get(&dc->disk.cl); 955 956 bch_writeback_queue(dc); 957 cached_dev_put(dc); 958 } 959 960 int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c, 961 uint8_t *set_uuid) 962 { 963 uint32_t rtime = cpu_to_le32(get_seconds()); 964 struct uuid_entry *u; 965 char buf[BDEVNAME_SIZE]; 966 struct cached_dev *exist_dc, *t; 967 968 bdevname(dc->bdev, buf); 969 970 if ((set_uuid && memcmp(set_uuid, c->sb.set_uuid, 16)) || 971 (!set_uuid && memcmp(dc->sb.set_uuid, c->sb.set_uuid, 16))) 972 return -ENOENT; 973 974 if (dc->disk.c) { 975 pr_err("Can't attach %s: already attached", buf); 976 return -EINVAL; 977 } 978 979 if (test_bit(CACHE_SET_STOPPING, &c->flags)) { 980 pr_err("Can't attach %s: shutting down", buf); 981 return -EINVAL; 982 } 983 984 if (dc->sb.block_size < c->sb.block_size) { 985 /* Will die */ 986 pr_err("Couldn't attach %s: block size less than set's block size", 987 buf); 988 return -EINVAL; 989 } 990 991 /* Check whether already attached */ 992 list_for_each_entry_safe(exist_dc, t, &c->cached_devs, list) { 993 if (!memcmp(dc->sb.uuid, exist_dc->sb.uuid, 16)) { 994 pr_err("Tried to attach %s but duplicate UUID already attached", 995 buf); 996 997 return -EINVAL; 998 } 999 } 1000 1001 u = uuid_find(c, dc->sb.uuid); 1002 1003 if (u && 1004 (BDEV_STATE(&dc->sb) == BDEV_STATE_STALE || 1005 BDEV_STATE(&dc->sb) == BDEV_STATE_NONE)) { 1006 memcpy(u->uuid, invalid_uuid, 16); 1007 u->invalidated = cpu_to_le32(get_seconds()); 1008 u = NULL; 1009 } 1010 1011 if (!u) { 1012 if (BDEV_STATE(&dc->sb) == BDEV_STATE_DIRTY) { 1013 pr_err("Couldn't find uuid for %s in set", buf); 1014 return -ENOENT; 1015 } 1016 1017 u = uuid_find_empty(c); 1018 if (!u) { 1019 pr_err("Not caching %s, no room for UUID", buf); 1020 return -EINVAL; 1021 } 1022 } 1023 1024 /* Deadlocks since we're called via sysfs... 1025 sysfs_remove_file(&dc->kobj, &sysfs_attach); 1026 */ 1027 1028 if (bch_is_zero(u->uuid, 16)) { 1029 struct closure cl; 1030 closure_init_stack(&cl); 1031 1032 memcpy(u->uuid, dc->sb.uuid, 16); 1033 memcpy(u->label, dc->sb.label, SB_LABEL_SIZE); 1034 u->first_reg = u->last_reg = rtime; 1035 bch_uuid_write(c); 1036 1037 memcpy(dc->sb.set_uuid, c->sb.set_uuid, 16); 1038 SET_BDEV_STATE(&dc->sb, BDEV_STATE_CLEAN); 1039 1040 bch_write_bdev_super(dc, &cl); 1041 closure_sync(&cl); 1042 } else { 1043 u->last_reg = rtime; 1044 bch_uuid_write(c); 1045 } 1046 1047 bcache_device_attach(&dc->disk, c, u - c->uuids); 1048 list_move(&dc->list, &c->cached_devs); 1049 calc_cached_dev_sectors(c); 1050 1051 smp_wmb(); 1052 /* 1053 * dc->c must be set before dc->count != 0 - paired with the mb in 1054 * cached_dev_get() 1055 */ 1056 refcount_set(&dc->count, 1); 1057 1058 /* Block writeback thread, but spawn it */ 1059 down_write(&dc->writeback_lock); 1060 if (bch_cached_dev_writeback_start(dc)) { 1061 up_write(&dc->writeback_lock); 1062 return -ENOMEM; 1063 } 1064 1065 if (BDEV_STATE(&dc->sb) == BDEV_STATE_DIRTY) { 1066 bch_sectors_dirty_init(&dc->disk); 1067 atomic_set(&dc->has_dirty, 1); 1068 refcount_inc(&dc->count); 1069 bch_writeback_queue(dc); 1070 } 1071 1072 bch_cached_dev_run(dc); 1073 bcache_device_link(&dc->disk, c, "bdev"); 1074 1075 /* Allow the writeback thread to proceed */ 1076 up_write(&dc->writeback_lock); 1077 1078 pr_info("Caching %s as %s on set %pU", 1079 bdevname(dc->bdev, buf), dc->disk.disk->disk_name, 1080 dc->disk.c->sb.set_uuid); 1081 return 0; 1082 } 1083 1084 void bch_cached_dev_release(struct kobject *kobj) 1085 { 1086 struct cached_dev *dc = container_of(kobj, struct cached_dev, 1087 disk.kobj); 1088 kfree(dc); 1089 module_put(THIS_MODULE); 1090 } 1091 1092 static void cached_dev_free(struct closure *cl) 1093 { 1094 struct cached_dev *dc = container_of(cl, struct cached_dev, disk.cl); 1095 1096 cancel_delayed_work_sync(&dc->writeback_rate_update); 1097 if (!IS_ERR_OR_NULL(dc->writeback_thread)) 1098 kthread_stop(dc->writeback_thread); 1099 if (dc->writeback_write_wq) 1100 destroy_workqueue(dc->writeback_write_wq); 1101 1102 mutex_lock(&bch_register_lock); 1103 1104 if (atomic_read(&dc->running)) 1105 bd_unlink_disk_holder(dc->bdev, dc->disk.disk); 1106 bcache_device_free(&dc->disk); 1107 list_del(&dc->list); 1108 1109 mutex_unlock(&bch_register_lock); 1110 1111 if (!IS_ERR_OR_NULL(dc->bdev)) 1112 blkdev_put(dc->bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); 1113 1114 wake_up(&unregister_wait); 1115 1116 kobject_put(&dc->disk.kobj); 1117 } 1118 1119 static void cached_dev_flush(struct closure *cl) 1120 { 1121 struct cached_dev *dc = container_of(cl, struct cached_dev, disk.cl); 1122 struct bcache_device *d = &dc->disk; 1123 1124 mutex_lock(&bch_register_lock); 1125 bcache_device_unlink(d); 1126 mutex_unlock(&bch_register_lock); 1127 1128 bch_cache_accounting_destroy(&dc->accounting); 1129 kobject_del(&d->kobj); 1130 1131 continue_at(cl, cached_dev_free, system_wq); 1132 } 1133 1134 static int cached_dev_init(struct cached_dev *dc, unsigned block_size) 1135 { 1136 int ret; 1137 struct io *io; 1138 struct request_queue *q = bdev_get_queue(dc->bdev); 1139 1140 __module_get(THIS_MODULE); 1141 INIT_LIST_HEAD(&dc->list); 1142 closure_init(&dc->disk.cl, NULL); 1143 set_closure_fn(&dc->disk.cl, cached_dev_flush, system_wq); 1144 kobject_init(&dc->disk.kobj, &bch_cached_dev_ktype); 1145 INIT_WORK(&dc->detach, cached_dev_detach_finish); 1146 sema_init(&dc->sb_write_mutex, 1); 1147 INIT_LIST_HEAD(&dc->io_lru); 1148 spin_lock_init(&dc->io_lock); 1149 bch_cache_accounting_init(&dc->accounting, &dc->disk.cl); 1150 1151 dc->sequential_cutoff = 4 << 20; 1152 1153 for (io = dc->io; io < dc->io + RECENT_IO; io++) { 1154 list_add(&io->lru, &dc->io_lru); 1155 hlist_add_head(&io->hash, dc->io_hash + RECENT_IO); 1156 } 1157 1158 dc->disk.stripe_size = q->limits.io_opt >> 9; 1159 1160 if (dc->disk.stripe_size) 1161 dc->partial_stripes_expensive = 1162 q->limits.raid_partial_stripes_expensive; 1163 1164 ret = bcache_device_init(&dc->disk, block_size, 1165 dc->bdev->bd_part->nr_sects - dc->sb.data_offset); 1166 if (ret) 1167 return ret; 1168 1169 dc->disk.disk->queue->backing_dev_info->ra_pages = 1170 max(dc->disk.disk->queue->backing_dev_info->ra_pages, 1171 q->backing_dev_info->ra_pages); 1172 1173 bch_cached_dev_request_init(dc); 1174 bch_cached_dev_writeback_init(dc); 1175 return 0; 1176 } 1177 1178 /* Cached device - bcache superblock */ 1179 1180 static void register_bdev(struct cache_sb *sb, struct page *sb_page, 1181 struct block_device *bdev, 1182 struct cached_dev *dc) 1183 { 1184 char name[BDEVNAME_SIZE]; 1185 const char *err = "cannot allocate memory"; 1186 struct cache_set *c; 1187 1188 memcpy(&dc->sb, sb, sizeof(struct cache_sb)); 1189 dc->bdev = bdev; 1190 dc->bdev->bd_holder = dc; 1191 1192 bio_init(&dc->sb_bio, dc->sb_bio.bi_inline_vecs, 1); 1193 bio_first_bvec_all(&dc->sb_bio)->bv_page = sb_page; 1194 get_page(sb_page); 1195 1196 if (cached_dev_init(dc, sb->block_size << 9)) 1197 goto err; 1198 1199 err = "error creating kobject"; 1200 if (kobject_add(&dc->disk.kobj, &part_to_dev(bdev->bd_part)->kobj, 1201 "bcache")) 1202 goto err; 1203 if (bch_cache_accounting_add_kobjs(&dc->accounting, &dc->disk.kobj)) 1204 goto err; 1205 1206 pr_info("registered backing device %s", bdevname(bdev, name)); 1207 1208 list_add(&dc->list, &uncached_devices); 1209 list_for_each_entry(c, &bch_cache_sets, list) 1210 bch_cached_dev_attach(dc, c, NULL); 1211 1212 if (BDEV_STATE(&dc->sb) == BDEV_STATE_NONE || 1213 BDEV_STATE(&dc->sb) == BDEV_STATE_STALE) 1214 bch_cached_dev_run(dc); 1215 1216 return; 1217 err: 1218 pr_notice("error %s: %s", bdevname(bdev, name), err); 1219 bcache_device_stop(&dc->disk); 1220 } 1221 1222 /* Flash only volumes */ 1223 1224 void bch_flash_dev_release(struct kobject *kobj) 1225 { 1226 struct bcache_device *d = container_of(kobj, struct bcache_device, 1227 kobj); 1228 kfree(d); 1229 } 1230 1231 static void flash_dev_free(struct closure *cl) 1232 { 1233 struct bcache_device *d = container_of(cl, struct bcache_device, cl); 1234 mutex_lock(&bch_register_lock); 1235 bcache_device_free(d); 1236 mutex_unlock(&bch_register_lock); 1237 kobject_put(&d->kobj); 1238 } 1239 1240 static void flash_dev_flush(struct closure *cl) 1241 { 1242 struct bcache_device *d = container_of(cl, struct bcache_device, cl); 1243 1244 mutex_lock(&bch_register_lock); 1245 bcache_device_unlink(d); 1246 mutex_unlock(&bch_register_lock); 1247 kobject_del(&d->kobj); 1248 continue_at(cl, flash_dev_free, system_wq); 1249 } 1250 1251 static int flash_dev_run(struct cache_set *c, struct uuid_entry *u) 1252 { 1253 struct bcache_device *d = kzalloc(sizeof(struct bcache_device), 1254 GFP_KERNEL); 1255 if (!d) 1256 return -ENOMEM; 1257 1258 closure_init(&d->cl, NULL); 1259 set_closure_fn(&d->cl, flash_dev_flush, system_wq); 1260 1261 kobject_init(&d->kobj, &bch_flash_dev_ktype); 1262 1263 if (bcache_device_init(d, block_bytes(c), u->sectors)) 1264 goto err; 1265 1266 bcache_device_attach(d, c, u - c->uuids); 1267 bch_sectors_dirty_init(d); 1268 bch_flash_dev_request_init(d); 1269 add_disk(d->disk); 1270 1271 if (kobject_add(&d->kobj, &disk_to_dev(d->disk)->kobj, "bcache")) 1272 goto err; 1273 1274 bcache_device_link(d, c, "volume"); 1275 1276 return 0; 1277 err: 1278 kobject_put(&d->kobj); 1279 return -ENOMEM; 1280 } 1281 1282 static int flash_devs_run(struct cache_set *c) 1283 { 1284 int ret = 0; 1285 struct uuid_entry *u; 1286 1287 for (u = c->uuids; 1288 u < c->uuids + c->nr_uuids && !ret; 1289 u++) 1290 if (UUID_FLASH_ONLY(u)) 1291 ret = flash_dev_run(c, u); 1292 1293 return ret; 1294 } 1295 1296 int bch_flash_dev_create(struct cache_set *c, uint64_t size) 1297 { 1298 struct uuid_entry *u; 1299 1300 if (test_bit(CACHE_SET_STOPPING, &c->flags)) 1301 return -EINTR; 1302 1303 if (!test_bit(CACHE_SET_RUNNING, &c->flags)) 1304 return -EPERM; 1305 1306 u = uuid_find_empty(c); 1307 if (!u) { 1308 pr_err("Can't create volume, no room for UUID"); 1309 return -EINVAL; 1310 } 1311 1312 get_random_bytes(u->uuid, 16); 1313 memset(u->label, 0, 32); 1314 u->first_reg = u->last_reg = cpu_to_le32(get_seconds()); 1315 1316 SET_UUID_FLASH_ONLY(u, 1); 1317 u->sectors = size >> 9; 1318 1319 bch_uuid_write(c); 1320 1321 return flash_dev_run(c, u); 1322 } 1323 1324 /* Cache set */ 1325 1326 __printf(2, 3) 1327 bool bch_cache_set_error(struct cache_set *c, const char *fmt, ...) 1328 { 1329 va_list args; 1330 1331 if (c->on_error != ON_ERROR_PANIC && 1332 test_bit(CACHE_SET_STOPPING, &c->flags)) 1333 return false; 1334 1335 /* XXX: we can be called from atomic context 1336 acquire_console_sem(); 1337 */ 1338 1339 printk(KERN_ERR "bcache: error on %pU: ", c->sb.set_uuid); 1340 1341 va_start(args, fmt); 1342 vprintk(fmt, args); 1343 va_end(args); 1344 1345 printk(", disabling caching\n"); 1346 1347 if (c->on_error == ON_ERROR_PANIC) 1348 panic("panic forced after error\n"); 1349 1350 bch_cache_set_unregister(c); 1351 return true; 1352 } 1353 1354 void bch_cache_set_release(struct kobject *kobj) 1355 { 1356 struct cache_set *c = container_of(kobj, struct cache_set, kobj); 1357 kfree(c); 1358 module_put(THIS_MODULE); 1359 } 1360 1361 static void cache_set_free(struct closure *cl) 1362 { 1363 struct cache_set *c = container_of(cl, struct cache_set, cl); 1364 struct cache *ca; 1365 unsigned i; 1366 1367 if (!IS_ERR_OR_NULL(c->debug)) 1368 debugfs_remove(c->debug); 1369 1370 bch_open_buckets_free(c); 1371 bch_btree_cache_free(c); 1372 bch_journal_free(c); 1373 1374 for_each_cache(ca, c, i) 1375 if (ca) { 1376 ca->set = NULL; 1377 c->cache[ca->sb.nr_this_dev] = NULL; 1378 kobject_put(&ca->kobj); 1379 } 1380 1381 bch_bset_sort_state_free(&c->sort); 1382 free_pages((unsigned long) c->uuids, ilog2(bucket_pages(c))); 1383 1384 if (c->moving_gc_wq) 1385 destroy_workqueue(c->moving_gc_wq); 1386 if (c->bio_split) 1387 bioset_free(c->bio_split); 1388 if (c->fill_iter) 1389 mempool_destroy(c->fill_iter); 1390 if (c->bio_meta) 1391 mempool_destroy(c->bio_meta); 1392 if (c->search) 1393 mempool_destroy(c->search); 1394 kfree(c->devices); 1395 1396 mutex_lock(&bch_register_lock); 1397 list_del(&c->list); 1398 mutex_unlock(&bch_register_lock); 1399 1400 pr_info("Cache set %pU unregistered", c->sb.set_uuid); 1401 wake_up(&unregister_wait); 1402 1403 closure_debug_destroy(&c->cl); 1404 kobject_put(&c->kobj); 1405 } 1406 1407 static void cache_set_flush(struct closure *cl) 1408 { 1409 struct cache_set *c = container_of(cl, struct cache_set, caching); 1410 struct cache *ca; 1411 struct btree *b; 1412 unsigned i; 1413 1414 bch_cache_accounting_destroy(&c->accounting); 1415 1416 kobject_put(&c->internal); 1417 kobject_del(&c->kobj); 1418 1419 if (c->gc_thread) 1420 kthread_stop(c->gc_thread); 1421 1422 if (!IS_ERR_OR_NULL(c->root)) 1423 list_add(&c->root->list, &c->btree_cache); 1424 1425 /* Should skip this if we're unregistering because of an error */ 1426 list_for_each_entry(b, &c->btree_cache, list) { 1427 mutex_lock(&b->write_lock); 1428 if (btree_node_dirty(b)) 1429 __bch_btree_node_write(b, NULL); 1430 mutex_unlock(&b->write_lock); 1431 } 1432 1433 for_each_cache(ca, c, i) 1434 if (ca->alloc_thread) 1435 kthread_stop(ca->alloc_thread); 1436 1437 if (c->journal.cur) { 1438 cancel_delayed_work_sync(&c->journal.work); 1439 /* flush last journal entry if needed */ 1440 c->journal.work.work.func(&c->journal.work.work); 1441 } 1442 1443 closure_return(cl); 1444 } 1445 1446 static void __cache_set_unregister(struct closure *cl) 1447 { 1448 struct cache_set *c = container_of(cl, struct cache_set, caching); 1449 struct cached_dev *dc; 1450 size_t i; 1451 1452 mutex_lock(&bch_register_lock); 1453 1454 for (i = 0; i < c->devices_max_used; i++) 1455 if (c->devices[i]) { 1456 if (!UUID_FLASH_ONLY(&c->uuids[i]) && 1457 test_bit(CACHE_SET_UNREGISTERING, &c->flags)) { 1458 dc = container_of(c->devices[i], 1459 struct cached_dev, disk); 1460 bch_cached_dev_detach(dc); 1461 } else { 1462 bcache_device_stop(c->devices[i]); 1463 } 1464 } 1465 1466 mutex_unlock(&bch_register_lock); 1467 1468 continue_at(cl, cache_set_flush, system_wq); 1469 } 1470 1471 void bch_cache_set_stop(struct cache_set *c) 1472 { 1473 if (!test_and_set_bit(CACHE_SET_STOPPING, &c->flags)) 1474 closure_queue(&c->caching); 1475 } 1476 1477 void bch_cache_set_unregister(struct cache_set *c) 1478 { 1479 set_bit(CACHE_SET_UNREGISTERING, &c->flags); 1480 bch_cache_set_stop(c); 1481 } 1482 1483 #define alloc_bucket_pages(gfp, c) \ 1484 ((void *) __get_free_pages(__GFP_ZERO|gfp, ilog2(bucket_pages(c)))) 1485 1486 struct cache_set *bch_cache_set_alloc(struct cache_sb *sb) 1487 { 1488 int iter_size; 1489 struct cache_set *c = kzalloc(sizeof(struct cache_set), GFP_KERNEL); 1490 if (!c) 1491 return NULL; 1492 1493 __module_get(THIS_MODULE); 1494 closure_init(&c->cl, NULL); 1495 set_closure_fn(&c->cl, cache_set_free, system_wq); 1496 1497 closure_init(&c->caching, &c->cl); 1498 set_closure_fn(&c->caching, __cache_set_unregister, system_wq); 1499 1500 /* Maybe create continue_at_noreturn() and use it here? */ 1501 closure_set_stopped(&c->cl); 1502 closure_put(&c->cl); 1503 1504 kobject_init(&c->kobj, &bch_cache_set_ktype); 1505 kobject_init(&c->internal, &bch_cache_set_internal_ktype); 1506 1507 bch_cache_accounting_init(&c->accounting, &c->cl); 1508 1509 memcpy(c->sb.set_uuid, sb->set_uuid, 16); 1510 c->sb.block_size = sb->block_size; 1511 c->sb.bucket_size = sb->bucket_size; 1512 c->sb.nr_in_set = sb->nr_in_set; 1513 c->sb.last_mount = sb->last_mount; 1514 c->bucket_bits = ilog2(sb->bucket_size); 1515 c->block_bits = ilog2(sb->block_size); 1516 c->nr_uuids = bucket_bytes(c) / sizeof(struct uuid_entry); 1517 c->devices_max_used = 0; 1518 c->btree_pages = bucket_pages(c); 1519 if (c->btree_pages > BTREE_MAX_PAGES) 1520 c->btree_pages = max_t(int, c->btree_pages / 4, 1521 BTREE_MAX_PAGES); 1522 1523 sema_init(&c->sb_write_mutex, 1); 1524 mutex_init(&c->bucket_lock); 1525 init_waitqueue_head(&c->btree_cache_wait); 1526 init_waitqueue_head(&c->bucket_wait); 1527 init_waitqueue_head(&c->gc_wait); 1528 sema_init(&c->uuid_write_mutex, 1); 1529 1530 spin_lock_init(&c->btree_gc_time.lock); 1531 spin_lock_init(&c->btree_split_time.lock); 1532 spin_lock_init(&c->btree_read_time.lock); 1533 1534 bch_moving_init_cache_set(c); 1535 1536 INIT_LIST_HEAD(&c->list); 1537 INIT_LIST_HEAD(&c->cached_devs); 1538 INIT_LIST_HEAD(&c->btree_cache); 1539 INIT_LIST_HEAD(&c->btree_cache_freeable); 1540 INIT_LIST_HEAD(&c->btree_cache_freed); 1541 INIT_LIST_HEAD(&c->data_buckets); 1542 1543 c->search = mempool_create_slab_pool(32, bch_search_cache); 1544 if (!c->search) 1545 goto err; 1546 1547 iter_size = (sb->bucket_size / sb->block_size + 1) * 1548 sizeof(struct btree_iter_set); 1549 1550 if (!(c->devices = kzalloc(c->nr_uuids * sizeof(void *), GFP_KERNEL)) || 1551 !(c->bio_meta = mempool_create_kmalloc_pool(2, 1552 sizeof(struct bbio) + sizeof(struct bio_vec) * 1553 bucket_pages(c))) || 1554 !(c->fill_iter = mempool_create_kmalloc_pool(1, iter_size)) || 1555 !(c->bio_split = bioset_create(4, offsetof(struct bbio, bio), 1556 BIOSET_NEED_BVECS | 1557 BIOSET_NEED_RESCUER)) || 1558 !(c->uuids = alloc_bucket_pages(GFP_KERNEL, c)) || 1559 !(c->moving_gc_wq = alloc_workqueue("bcache_gc", 1560 WQ_MEM_RECLAIM, 0)) || 1561 bch_journal_alloc(c) || 1562 bch_btree_cache_alloc(c) || 1563 bch_open_buckets_alloc(c) || 1564 bch_bset_sort_state_init(&c->sort, ilog2(c->btree_pages))) 1565 goto err; 1566 1567 c->congested_read_threshold_us = 2000; 1568 c->congested_write_threshold_us = 20000; 1569 c->error_limit = DEFAULT_IO_ERROR_LIMIT; 1570 1571 return c; 1572 err: 1573 bch_cache_set_unregister(c); 1574 return NULL; 1575 } 1576 1577 static void run_cache_set(struct cache_set *c) 1578 { 1579 const char *err = "cannot allocate memory"; 1580 struct cached_dev *dc, *t; 1581 struct cache *ca; 1582 struct closure cl; 1583 unsigned i; 1584 1585 closure_init_stack(&cl); 1586 1587 for_each_cache(ca, c, i) 1588 c->nbuckets += ca->sb.nbuckets; 1589 set_gc_sectors(c); 1590 1591 if (CACHE_SYNC(&c->sb)) { 1592 LIST_HEAD(journal); 1593 struct bkey *k; 1594 struct jset *j; 1595 1596 err = "cannot allocate memory for journal"; 1597 if (bch_journal_read(c, &journal)) 1598 goto err; 1599 1600 pr_debug("btree_journal_read() done"); 1601 1602 err = "no journal entries found"; 1603 if (list_empty(&journal)) 1604 goto err; 1605 1606 j = &list_entry(journal.prev, struct journal_replay, list)->j; 1607 1608 err = "IO error reading priorities"; 1609 for_each_cache(ca, c, i) 1610 prio_read(ca, j->prio_bucket[ca->sb.nr_this_dev]); 1611 1612 /* 1613 * If prio_read() fails it'll call cache_set_error and we'll 1614 * tear everything down right away, but if we perhaps checked 1615 * sooner we could avoid journal replay. 1616 */ 1617 1618 k = &j->btree_root; 1619 1620 err = "bad btree root"; 1621 if (__bch_btree_ptr_invalid(c, k)) 1622 goto err; 1623 1624 err = "error reading btree root"; 1625 c->root = bch_btree_node_get(c, NULL, k, j->btree_level, true, NULL); 1626 if (IS_ERR_OR_NULL(c->root)) 1627 goto err; 1628 1629 list_del_init(&c->root->list); 1630 rw_unlock(true, c->root); 1631 1632 err = uuid_read(c, j, &cl); 1633 if (err) 1634 goto err; 1635 1636 err = "error in recovery"; 1637 if (bch_btree_check(c)) 1638 goto err; 1639 1640 bch_journal_mark(c, &journal); 1641 bch_initial_gc_finish(c); 1642 pr_debug("btree_check() done"); 1643 1644 /* 1645 * bcache_journal_next() can't happen sooner, or 1646 * btree_gc_finish() will give spurious errors about last_gc > 1647 * gc_gen - this is a hack but oh well. 1648 */ 1649 bch_journal_next(&c->journal); 1650 1651 err = "error starting allocator thread"; 1652 for_each_cache(ca, c, i) 1653 if (bch_cache_allocator_start(ca)) 1654 goto err; 1655 1656 /* 1657 * First place it's safe to allocate: btree_check() and 1658 * btree_gc_finish() have to run before we have buckets to 1659 * allocate, and bch_bucket_alloc_set() might cause a journal 1660 * entry to be written so bcache_journal_next() has to be called 1661 * first. 1662 * 1663 * If the uuids were in the old format we have to rewrite them 1664 * before the next journal entry is written: 1665 */ 1666 if (j->version < BCACHE_JSET_VERSION_UUID) 1667 __uuid_write(c); 1668 1669 bch_journal_replay(c, &journal); 1670 } else { 1671 pr_notice("invalidating existing data"); 1672 1673 for_each_cache(ca, c, i) { 1674 unsigned j; 1675 1676 ca->sb.keys = clamp_t(int, ca->sb.nbuckets >> 7, 1677 2, SB_JOURNAL_BUCKETS); 1678 1679 for (j = 0; j < ca->sb.keys; j++) 1680 ca->sb.d[j] = ca->sb.first_bucket + j; 1681 } 1682 1683 bch_initial_gc_finish(c); 1684 1685 err = "error starting allocator thread"; 1686 for_each_cache(ca, c, i) 1687 if (bch_cache_allocator_start(ca)) 1688 goto err; 1689 1690 mutex_lock(&c->bucket_lock); 1691 for_each_cache(ca, c, i) 1692 bch_prio_write(ca); 1693 mutex_unlock(&c->bucket_lock); 1694 1695 err = "cannot allocate new UUID bucket"; 1696 if (__uuid_write(c)) 1697 goto err; 1698 1699 err = "cannot allocate new btree root"; 1700 c->root = __bch_btree_node_alloc(c, NULL, 0, true, NULL); 1701 if (IS_ERR_OR_NULL(c->root)) 1702 goto err; 1703 1704 mutex_lock(&c->root->write_lock); 1705 bkey_copy_key(&c->root->key, &MAX_KEY); 1706 bch_btree_node_write(c->root, &cl); 1707 mutex_unlock(&c->root->write_lock); 1708 1709 bch_btree_set_root(c->root); 1710 rw_unlock(true, c->root); 1711 1712 /* 1713 * We don't want to write the first journal entry until 1714 * everything is set up - fortunately journal entries won't be 1715 * written until the SET_CACHE_SYNC() here: 1716 */ 1717 SET_CACHE_SYNC(&c->sb, true); 1718 1719 bch_journal_next(&c->journal); 1720 bch_journal_meta(c, &cl); 1721 } 1722 1723 err = "error starting gc thread"; 1724 if (bch_gc_thread_start(c)) 1725 goto err; 1726 1727 closure_sync(&cl); 1728 c->sb.last_mount = get_seconds(); 1729 bcache_write_super(c); 1730 1731 list_for_each_entry_safe(dc, t, &uncached_devices, list) 1732 bch_cached_dev_attach(dc, c, NULL); 1733 1734 flash_devs_run(c); 1735 1736 set_bit(CACHE_SET_RUNNING, &c->flags); 1737 return; 1738 err: 1739 closure_sync(&cl); 1740 /* XXX: test this, it's broken */ 1741 bch_cache_set_error(c, "%s", err); 1742 } 1743 1744 static bool can_attach_cache(struct cache *ca, struct cache_set *c) 1745 { 1746 return ca->sb.block_size == c->sb.block_size && 1747 ca->sb.bucket_size == c->sb.bucket_size && 1748 ca->sb.nr_in_set == c->sb.nr_in_set; 1749 } 1750 1751 static const char *register_cache_set(struct cache *ca) 1752 { 1753 char buf[12]; 1754 const char *err = "cannot allocate memory"; 1755 struct cache_set *c; 1756 1757 list_for_each_entry(c, &bch_cache_sets, list) 1758 if (!memcmp(c->sb.set_uuid, ca->sb.set_uuid, 16)) { 1759 if (c->cache[ca->sb.nr_this_dev]) 1760 return "duplicate cache set member"; 1761 1762 if (!can_attach_cache(ca, c)) 1763 return "cache sb does not match set"; 1764 1765 if (!CACHE_SYNC(&ca->sb)) 1766 SET_CACHE_SYNC(&c->sb, false); 1767 1768 goto found; 1769 } 1770 1771 c = bch_cache_set_alloc(&ca->sb); 1772 if (!c) 1773 return err; 1774 1775 err = "error creating kobject"; 1776 if (kobject_add(&c->kobj, bcache_kobj, "%pU", c->sb.set_uuid) || 1777 kobject_add(&c->internal, &c->kobj, "internal")) 1778 goto err; 1779 1780 if (bch_cache_accounting_add_kobjs(&c->accounting, &c->kobj)) 1781 goto err; 1782 1783 bch_debug_init_cache_set(c); 1784 1785 list_add(&c->list, &bch_cache_sets); 1786 found: 1787 sprintf(buf, "cache%i", ca->sb.nr_this_dev); 1788 if (sysfs_create_link(&ca->kobj, &c->kobj, "set") || 1789 sysfs_create_link(&c->kobj, &ca->kobj, buf)) 1790 goto err; 1791 1792 if (ca->sb.seq > c->sb.seq) { 1793 c->sb.version = ca->sb.version; 1794 memcpy(c->sb.set_uuid, ca->sb.set_uuid, 16); 1795 c->sb.flags = ca->sb.flags; 1796 c->sb.seq = ca->sb.seq; 1797 pr_debug("set version = %llu", c->sb.version); 1798 } 1799 1800 kobject_get(&ca->kobj); 1801 ca->set = c; 1802 ca->set->cache[ca->sb.nr_this_dev] = ca; 1803 c->cache_by_alloc[c->caches_loaded++] = ca; 1804 1805 if (c->caches_loaded == c->sb.nr_in_set) 1806 run_cache_set(c); 1807 1808 return NULL; 1809 err: 1810 bch_cache_set_unregister(c); 1811 return err; 1812 } 1813 1814 /* Cache device */ 1815 1816 void bch_cache_release(struct kobject *kobj) 1817 { 1818 struct cache *ca = container_of(kobj, struct cache, kobj); 1819 unsigned i; 1820 1821 if (ca->set) { 1822 BUG_ON(ca->set->cache[ca->sb.nr_this_dev] != ca); 1823 ca->set->cache[ca->sb.nr_this_dev] = NULL; 1824 } 1825 1826 free_pages((unsigned long) ca->disk_buckets, ilog2(bucket_pages(ca))); 1827 kfree(ca->prio_buckets); 1828 vfree(ca->buckets); 1829 1830 free_heap(&ca->heap); 1831 free_fifo(&ca->free_inc); 1832 1833 for (i = 0; i < RESERVE_NR; i++) 1834 free_fifo(&ca->free[i]); 1835 1836 if (ca->sb_bio.bi_inline_vecs[0].bv_page) 1837 put_page(bio_first_page_all(&ca->sb_bio)); 1838 1839 if (!IS_ERR_OR_NULL(ca->bdev)) 1840 blkdev_put(ca->bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); 1841 1842 kfree(ca); 1843 module_put(THIS_MODULE); 1844 } 1845 1846 static int cache_alloc(struct cache *ca) 1847 { 1848 size_t free; 1849 size_t btree_buckets; 1850 struct bucket *b; 1851 1852 __module_get(THIS_MODULE); 1853 kobject_init(&ca->kobj, &bch_cache_ktype); 1854 1855 bio_init(&ca->journal.bio, ca->journal.bio.bi_inline_vecs, 8); 1856 1857 /* 1858 * when ca->sb.njournal_buckets is not zero, journal exists, 1859 * and in bch_journal_replay(), tree node may split, 1860 * so bucket of RESERVE_BTREE type is needed, 1861 * the worst situation is all journal buckets are valid journal, 1862 * and all the keys need to replay, 1863 * so the number of RESERVE_BTREE type buckets should be as much 1864 * as journal buckets 1865 */ 1866 btree_buckets = ca->sb.njournal_buckets ?: 8; 1867 free = roundup_pow_of_two(ca->sb.nbuckets) >> 10; 1868 1869 if (!init_fifo(&ca->free[RESERVE_BTREE], btree_buckets, GFP_KERNEL) || 1870 !init_fifo_exact(&ca->free[RESERVE_PRIO], prio_buckets(ca), GFP_KERNEL) || 1871 !init_fifo(&ca->free[RESERVE_MOVINGGC], free, GFP_KERNEL) || 1872 !init_fifo(&ca->free[RESERVE_NONE], free, GFP_KERNEL) || 1873 !init_fifo(&ca->free_inc, free << 2, GFP_KERNEL) || 1874 !init_heap(&ca->heap, free << 3, GFP_KERNEL) || 1875 !(ca->buckets = vzalloc(sizeof(struct bucket) * 1876 ca->sb.nbuckets)) || 1877 !(ca->prio_buckets = kzalloc(sizeof(uint64_t) * prio_buckets(ca) * 1878 2, GFP_KERNEL)) || 1879 !(ca->disk_buckets = alloc_bucket_pages(GFP_KERNEL, ca))) 1880 return -ENOMEM; 1881 1882 ca->prio_last_buckets = ca->prio_buckets + prio_buckets(ca); 1883 1884 for_each_bucket(b, ca) 1885 atomic_set(&b->pin, 0); 1886 1887 return 0; 1888 } 1889 1890 static int register_cache(struct cache_sb *sb, struct page *sb_page, 1891 struct block_device *bdev, struct cache *ca) 1892 { 1893 char name[BDEVNAME_SIZE]; 1894 const char *err = NULL; /* must be set for any error case */ 1895 int ret = 0; 1896 1897 bdevname(bdev, name); 1898 1899 memcpy(&ca->sb, sb, sizeof(struct cache_sb)); 1900 ca->bdev = bdev; 1901 ca->bdev->bd_holder = ca; 1902 1903 bio_init(&ca->sb_bio, ca->sb_bio.bi_inline_vecs, 1); 1904 bio_first_bvec_all(&ca->sb_bio)->bv_page = sb_page; 1905 get_page(sb_page); 1906 1907 if (blk_queue_discard(bdev_get_queue(bdev))) 1908 ca->discard = CACHE_DISCARD(&ca->sb); 1909 1910 ret = cache_alloc(ca); 1911 if (ret != 0) { 1912 blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); 1913 if (ret == -ENOMEM) 1914 err = "cache_alloc(): -ENOMEM"; 1915 else 1916 err = "cache_alloc(): unknown error"; 1917 goto err; 1918 } 1919 1920 if (kobject_add(&ca->kobj, &part_to_dev(bdev->bd_part)->kobj, "bcache")) { 1921 err = "error calling kobject_add"; 1922 ret = -ENOMEM; 1923 goto out; 1924 } 1925 1926 mutex_lock(&bch_register_lock); 1927 err = register_cache_set(ca); 1928 mutex_unlock(&bch_register_lock); 1929 1930 if (err) { 1931 ret = -ENODEV; 1932 goto out; 1933 } 1934 1935 pr_info("registered cache device %s", name); 1936 1937 out: 1938 kobject_put(&ca->kobj); 1939 1940 err: 1941 if (err) 1942 pr_notice("error %s: %s", name, err); 1943 1944 return ret; 1945 } 1946 1947 /* Global interfaces/init */ 1948 1949 static ssize_t register_bcache(struct kobject *, struct kobj_attribute *, 1950 const char *, size_t); 1951 1952 kobj_attribute_write(register, register_bcache); 1953 kobj_attribute_write(register_quiet, register_bcache); 1954 1955 static bool bch_is_open_backing(struct block_device *bdev) { 1956 struct cache_set *c, *tc; 1957 struct cached_dev *dc, *t; 1958 1959 list_for_each_entry_safe(c, tc, &bch_cache_sets, list) 1960 list_for_each_entry_safe(dc, t, &c->cached_devs, list) 1961 if (dc->bdev == bdev) 1962 return true; 1963 list_for_each_entry_safe(dc, t, &uncached_devices, list) 1964 if (dc->bdev == bdev) 1965 return true; 1966 return false; 1967 } 1968 1969 static bool bch_is_open_cache(struct block_device *bdev) { 1970 struct cache_set *c, *tc; 1971 struct cache *ca; 1972 unsigned i; 1973 1974 list_for_each_entry_safe(c, tc, &bch_cache_sets, list) 1975 for_each_cache(ca, c, i) 1976 if (ca->bdev == bdev) 1977 return true; 1978 return false; 1979 } 1980 1981 static bool bch_is_open(struct block_device *bdev) { 1982 return bch_is_open_cache(bdev) || bch_is_open_backing(bdev); 1983 } 1984 1985 static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr, 1986 const char *buffer, size_t size) 1987 { 1988 ssize_t ret = size; 1989 const char *err = "cannot allocate memory"; 1990 char *path = NULL; 1991 struct cache_sb *sb = NULL; 1992 struct block_device *bdev = NULL; 1993 struct page *sb_page = NULL; 1994 1995 if (!try_module_get(THIS_MODULE)) 1996 return -EBUSY; 1997 1998 if (!(path = kstrndup(buffer, size, GFP_KERNEL)) || 1999 !(sb = kmalloc(sizeof(struct cache_sb), GFP_KERNEL))) 2000 goto err; 2001 2002 err = "failed to open device"; 2003 bdev = blkdev_get_by_path(strim(path), 2004 FMODE_READ|FMODE_WRITE|FMODE_EXCL, 2005 sb); 2006 if (IS_ERR(bdev)) { 2007 if (bdev == ERR_PTR(-EBUSY)) { 2008 bdev = lookup_bdev(strim(path)); 2009 mutex_lock(&bch_register_lock); 2010 if (!IS_ERR(bdev) && bch_is_open(bdev)) 2011 err = "device already registered"; 2012 else 2013 err = "device busy"; 2014 mutex_unlock(&bch_register_lock); 2015 if (!IS_ERR(bdev)) 2016 bdput(bdev); 2017 if (attr == &ksysfs_register_quiet) 2018 goto out; 2019 } 2020 goto err; 2021 } 2022 2023 err = "failed to set blocksize"; 2024 if (set_blocksize(bdev, 4096)) 2025 goto err_close; 2026 2027 err = read_super(sb, bdev, &sb_page); 2028 if (err) 2029 goto err_close; 2030 2031 err = "failed to register device"; 2032 if (SB_IS_BDEV(sb)) { 2033 struct cached_dev *dc = kzalloc(sizeof(*dc), GFP_KERNEL); 2034 if (!dc) 2035 goto err_close; 2036 2037 mutex_lock(&bch_register_lock); 2038 register_bdev(sb, sb_page, bdev, dc); 2039 mutex_unlock(&bch_register_lock); 2040 } else { 2041 struct cache *ca = kzalloc(sizeof(*ca), GFP_KERNEL); 2042 if (!ca) 2043 goto err_close; 2044 2045 if (register_cache(sb, sb_page, bdev, ca) != 0) 2046 goto err; 2047 } 2048 out: 2049 if (sb_page) 2050 put_page(sb_page); 2051 kfree(sb); 2052 kfree(path); 2053 module_put(THIS_MODULE); 2054 return ret; 2055 2056 err_close: 2057 blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); 2058 err: 2059 pr_info("error %s: %s", path, err); 2060 ret = -EINVAL; 2061 goto out; 2062 } 2063 2064 static int bcache_reboot(struct notifier_block *n, unsigned long code, void *x) 2065 { 2066 if (code == SYS_DOWN || 2067 code == SYS_HALT || 2068 code == SYS_POWER_OFF) { 2069 DEFINE_WAIT(wait); 2070 unsigned long start = jiffies; 2071 bool stopped = false; 2072 2073 struct cache_set *c, *tc; 2074 struct cached_dev *dc, *tdc; 2075 2076 mutex_lock(&bch_register_lock); 2077 2078 if (list_empty(&bch_cache_sets) && 2079 list_empty(&uncached_devices)) 2080 goto out; 2081 2082 pr_info("Stopping all devices:"); 2083 2084 list_for_each_entry_safe(c, tc, &bch_cache_sets, list) 2085 bch_cache_set_stop(c); 2086 2087 list_for_each_entry_safe(dc, tdc, &uncached_devices, list) 2088 bcache_device_stop(&dc->disk); 2089 2090 /* What's a condition variable? */ 2091 while (1) { 2092 long timeout = start + 2 * HZ - jiffies; 2093 2094 stopped = list_empty(&bch_cache_sets) && 2095 list_empty(&uncached_devices); 2096 2097 if (timeout < 0 || stopped) 2098 break; 2099 2100 prepare_to_wait(&unregister_wait, &wait, 2101 TASK_UNINTERRUPTIBLE); 2102 2103 mutex_unlock(&bch_register_lock); 2104 schedule_timeout(timeout); 2105 mutex_lock(&bch_register_lock); 2106 } 2107 2108 finish_wait(&unregister_wait, &wait); 2109 2110 if (stopped) 2111 pr_info("All devices stopped"); 2112 else 2113 pr_notice("Timeout waiting for devices to be closed"); 2114 out: 2115 mutex_unlock(&bch_register_lock); 2116 } 2117 2118 return NOTIFY_DONE; 2119 } 2120 2121 static struct notifier_block reboot = { 2122 .notifier_call = bcache_reboot, 2123 .priority = INT_MAX, /* before any real devices */ 2124 }; 2125 2126 static void bcache_exit(void) 2127 { 2128 bch_debug_exit(); 2129 bch_request_exit(); 2130 if (bcache_kobj) 2131 kobject_put(bcache_kobj); 2132 if (bcache_wq) 2133 destroy_workqueue(bcache_wq); 2134 if (bcache_major) 2135 unregister_blkdev(bcache_major, "bcache"); 2136 unregister_reboot_notifier(&reboot); 2137 mutex_destroy(&bch_register_lock); 2138 } 2139 2140 static int __init bcache_init(void) 2141 { 2142 static const struct attribute *files[] = { 2143 &ksysfs_register.attr, 2144 &ksysfs_register_quiet.attr, 2145 NULL 2146 }; 2147 2148 mutex_init(&bch_register_lock); 2149 init_waitqueue_head(&unregister_wait); 2150 register_reboot_notifier(&reboot); 2151 closure_debug_init(); 2152 2153 bcache_major = register_blkdev(0, "bcache"); 2154 if (bcache_major < 0) { 2155 unregister_reboot_notifier(&reboot); 2156 mutex_destroy(&bch_register_lock); 2157 return bcache_major; 2158 } 2159 2160 if (!(bcache_wq = alloc_workqueue("bcache", WQ_MEM_RECLAIM, 0)) || 2161 !(bcache_kobj = kobject_create_and_add("bcache", fs_kobj)) || 2162 bch_request_init() || 2163 bch_debug_init(bcache_kobj) || 2164 sysfs_create_files(bcache_kobj, files)) 2165 goto err; 2166 2167 return 0; 2168 err: 2169 bcache_exit(); 2170 return -ENOMEM; 2171 } 2172 2173 module_exit(bcache_exit); 2174 module_init(bcache_init); 2175