1 /* 2 * bcache setup/teardown code, and some metadata io - read a superblock and 3 * figure out what to do with it. 4 * 5 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com> 6 * Copyright 2012 Google, Inc. 7 */ 8 9 #include "bcache.h" 10 #include "btree.h" 11 #include "debug.h" 12 #include "request.h" 13 #include "writeback.h" 14 15 #include <linux/blkdev.h> 16 #include <linux/buffer_head.h> 17 #include <linux/debugfs.h> 18 #include <linux/genhd.h> 19 #include <linux/idr.h> 20 #include <linux/kthread.h> 21 #include <linux/module.h> 22 #include <linux/random.h> 23 #include <linux/reboot.h> 24 #include <linux/sysfs.h> 25 26 MODULE_LICENSE("GPL"); 27 MODULE_AUTHOR("Kent Overstreet <kent.overstreet@gmail.com>"); 28 29 static const char bcache_magic[] = { 30 0xc6, 0x85, 0x73, 0xf6, 0x4e, 0x1a, 0x45, 0xca, 31 0x82, 0x65, 0xf5, 0x7f, 0x48, 0xba, 0x6d, 0x81 32 }; 33 34 static const char invalid_uuid[] = { 35 0xa0, 0x3e, 0xf8, 0xed, 0x3e, 0xe1, 0xb8, 0x78, 36 0xc8, 0x50, 0xfc, 0x5e, 0xcb, 0x16, 0xcd, 0x99 37 }; 38 39 /* Default is -1; we skip past it for struct cached_dev's cache mode */ 40 const char * const bch_cache_modes[] = { 41 "default", 42 "writethrough", 43 "writeback", 44 "writearound", 45 "none", 46 NULL 47 }; 48 49 static struct kobject *bcache_kobj; 50 struct mutex bch_register_lock; 51 LIST_HEAD(bch_cache_sets); 52 static LIST_HEAD(uncached_devices); 53 54 static int bcache_major; 55 static DEFINE_IDA(bcache_minor); 56 static wait_queue_head_t unregister_wait; 57 struct workqueue_struct *bcache_wq; 58 59 #define BTREE_MAX_PAGES (256 * 1024 / PAGE_SIZE) 60 61 static void bio_split_pool_free(struct bio_split_pool *p) 62 { 63 if (p->bio_split_hook) 64 mempool_destroy(p->bio_split_hook); 65 66 if (p->bio_split) 67 bioset_free(p->bio_split); 68 } 69 70 static int bio_split_pool_init(struct bio_split_pool *p) 71 { 72 p->bio_split = bioset_create(4, 0); 73 if (!p->bio_split) 74 return -ENOMEM; 75 76 p->bio_split_hook = mempool_create_kmalloc_pool(4, 77 sizeof(struct bio_split_hook)); 78 if (!p->bio_split_hook) 79 return -ENOMEM; 80 81 return 0; 82 } 83 84 /* Superblock */ 85 86 static const char *read_super(struct cache_sb *sb, struct block_device *bdev, 87 struct page **res) 88 { 89 const char *err; 90 struct cache_sb *s; 91 struct buffer_head *bh = __bread(bdev, 1, SB_SIZE); 92 unsigned i; 93 94 if (!bh) 95 return "IO error"; 96 97 s = (struct cache_sb *) bh->b_data; 98 99 sb->offset = le64_to_cpu(s->offset); 100 sb->version = le64_to_cpu(s->version); 101 102 memcpy(sb->magic, s->magic, 16); 103 memcpy(sb->uuid, s->uuid, 16); 104 memcpy(sb->set_uuid, s->set_uuid, 16); 105 memcpy(sb->label, s->label, SB_LABEL_SIZE); 106 107 sb->flags = le64_to_cpu(s->flags); 108 sb->seq = le64_to_cpu(s->seq); 109 sb->last_mount = le32_to_cpu(s->last_mount); 110 sb->first_bucket = le16_to_cpu(s->first_bucket); 111 sb->keys = le16_to_cpu(s->keys); 112 113 for (i = 0; i < SB_JOURNAL_BUCKETS; i++) 114 sb->d[i] = le64_to_cpu(s->d[i]); 115 116 pr_debug("read sb version %llu, flags %llu, seq %llu, journal size %u", 117 sb->version, sb->flags, sb->seq, sb->keys); 118 119 err = "Not a bcache superblock"; 120 if (sb->offset != SB_SECTOR) 121 goto err; 122 123 if (memcmp(sb->magic, bcache_magic, 16)) 124 goto err; 125 126 err = "Too many journal buckets"; 127 if (sb->keys > SB_JOURNAL_BUCKETS) 128 goto err; 129 130 err = "Bad checksum"; 131 if (s->csum != csum_set(s)) 132 goto err; 133 134 err = "Bad UUID"; 135 if (bch_is_zero(sb->uuid, 16)) 136 goto err; 137 138 sb->block_size = le16_to_cpu(s->block_size); 139 140 err = "Superblock block size smaller than device block size"; 141 if (sb->block_size << 9 < bdev_logical_block_size(bdev)) 142 goto err; 143 144 switch (sb->version) { 145 case BCACHE_SB_VERSION_BDEV: 146 sb->data_offset = BDEV_DATA_START_DEFAULT; 147 break; 148 case BCACHE_SB_VERSION_BDEV_WITH_OFFSET: 149 sb->data_offset = le64_to_cpu(s->data_offset); 150 151 err = "Bad data offset"; 152 if (sb->data_offset < BDEV_DATA_START_DEFAULT) 153 goto err; 154 155 break; 156 case BCACHE_SB_VERSION_CDEV: 157 case BCACHE_SB_VERSION_CDEV_WITH_UUID: 158 sb->nbuckets = le64_to_cpu(s->nbuckets); 159 sb->block_size = le16_to_cpu(s->block_size); 160 sb->bucket_size = le16_to_cpu(s->bucket_size); 161 162 sb->nr_in_set = le16_to_cpu(s->nr_in_set); 163 sb->nr_this_dev = le16_to_cpu(s->nr_this_dev); 164 165 err = "Too many buckets"; 166 if (sb->nbuckets > LONG_MAX) 167 goto err; 168 169 err = "Not enough buckets"; 170 if (sb->nbuckets < 1 << 7) 171 goto err; 172 173 err = "Bad block/bucket size"; 174 if (!is_power_of_2(sb->block_size) || 175 sb->block_size > PAGE_SECTORS || 176 !is_power_of_2(sb->bucket_size) || 177 sb->bucket_size < PAGE_SECTORS) 178 goto err; 179 180 err = "Invalid superblock: device too small"; 181 if (get_capacity(bdev->bd_disk) < sb->bucket_size * sb->nbuckets) 182 goto err; 183 184 err = "Bad UUID"; 185 if (bch_is_zero(sb->set_uuid, 16)) 186 goto err; 187 188 err = "Bad cache device number in set"; 189 if (!sb->nr_in_set || 190 sb->nr_in_set <= sb->nr_this_dev || 191 sb->nr_in_set > MAX_CACHES_PER_SET) 192 goto err; 193 194 err = "Journal buckets not sequential"; 195 for (i = 0; i < sb->keys; i++) 196 if (sb->d[i] != sb->first_bucket + i) 197 goto err; 198 199 err = "Too many journal buckets"; 200 if (sb->first_bucket + sb->keys > sb->nbuckets) 201 goto err; 202 203 err = "Invalid superblock: first bucket comes before end of super"; 204 if (sb->first_bucket * sb->bucket_size < 16) 205 goto err; 206 207 break; 208 default: 209 err = "Unsupported superblock version"; 210 goto err; 211 } 212 213 sb->last_mount = get_seconds(); 214 err = NULL; 215 216 get_page(bh->b_page); 217 *res = bh->b_page; 218 err: 219 put_bh(bh); 220 return err; 221 } 222 223 static void write_bdev_super_endio(struct bio *bio, int error) 224 { 225 struct cached_dev *dc = bio->bi_private; 226 /* XXX: error checking */ 227 228 closure_put(&dc->sb_write.cl); 229 } 230 231 static void __write_super(struct cache_sb *sb, struct bio *bio) 232 { 233 struct cache_sb *out = page_address(bio->bi_io_vec[0].bv_page); 234 unsigned i; 235 236 bio->bi_sector = SB_SECTOR; 237 bio->bi_rw = REQ_SYNC|REQ_META; 238 bio->bi_size = SB_SIZE; 239 bch_bio_map(bio, NULL); 240 241 out->offset = cpu_to_le64(sb->offset); 242 out->version = cpu_to_le64(sb->version); 243 244 memcpy(out->uuid, sb->uuid, 16); 245 memcpy(out->set_uuid, sb->set_uuid, 16); 246 memcpy(out->label, sb->label, SB_LABEL_SIZE); 247 248 out->flags = cpu_to_le64(sb->flags); 249 out->seq = cpu_to_le64(sb->seq); 250 251 out->last_mount = cpu_to_le32(sb->last_mount); 252 out->first_bucket = cpu_to_le16(sb->first_bucket); 253 out->keys = cpu_to_le16(sb->keys); 254 255 for (i = 0; i < sb->keys; i++) 256 out->d[i] = cpu_to_le64(sb->d[i]); 257 258 out->csum = csum_set(out); 259 260 pr_debug("ver %llu, flags %llu, seq %llu", 261 sb->version, sb->flags, sb->seq); 262 263 submit_bio(REQ_WRITE, bio); 264 } 265 266 void bch_write_bdev_super(struct cached_dev *dc, struct closure *parent) 267 { 268 struct closure *cl = &dc->sb_write.cl; 269 struct bio *bio = &dc->sb_bio; 270 271 closure_lock(&dc->sb_write, parent); 272 273 bio_reset(bio); 274 bio->bi_bdev = dc->bdev; 275 bio->bi_end_io = write_bdev_super_endio; 276 bio->bi_private = dc; 277 278 closure_get(cl); 279 __write_super(&dc->sb, bio); 280 281 closure_return(cl); 282 } 283 284 static void write_super_endio(struct bio *bio, int error) 285 { 286 struct cache *ca = bio->bi_private; 287 288 bch_count_io_errors(ca, error, "writing superblock"); 289 closure_put(&ca->set->sb_write.cl); 290 } 291 292 void bcache_write_super(struct cache_set *c) 293 { 294 struct closure *cl = &c->sb_write.cl; 295 struct cache *ca; 296 unsigned i; 297 298 closure_lock(&c->sb_write, &c->cl); 299 300 c->sb.seq++; 301 302 for_each_cache(ca, c, i) { 303 struct bio *bio = &ca->sb_bio; 304 305 ca->sb.version = BCACHE_SB_VERSION_CDEV_WITH_UUID; 306 ca->sb.seq = c->sb.seq; 307 ca->sb.last_mount = c->sb.last_mount; 308 309 SET_CACHE_SYNC(&ca->sb, CACHE_SYNC(&c->sb)); 310 311 bio_reset(bio); 312 bio->bi_bdev = ca->bdev; 313 bio->bi_end_io = write_super_endio; 314 bio->bi_private = ca; 315 316 closure_get(cl); 317 __write_super(&ca->sb, bio); 318 } 319 320 closure_return(cl); 321 } 322 323 /* UUID io */ 324 325 static void uuid_endio(struct bio *bio, int error) 326 { 327 struct closure *cl = bio->bi_private; 328 struct cache_set *c = container_of(cl, struct cache_set, uuid_write.cl); 329 330 cache_set_err_on(error, c, "accessing uuids"); 331 bch_bbio_free(bio, c); 332 closure_put(cl); 333 } 334 335 static void uuid_io(struct cache_set *c, unsigned long rw, 336 struct bkey *k, struct closure *parent) 337 { 338 struct closure *cl = &c->uuid_write.cl; 339 struct uuid_entry *u; 340 unsigned i; 341 char buf[80]; 342 343 BUG_ON(!parent); 344 closure_lock(&c->uuid_write, parent); 345 346 for (i = 0; i < KEY_PTRS(k); i++) { 347 struct bio *bio = bch_bbio_alloc(c); 348 349 bio->bi_rw = REQ_SYNC|REQ_META|rw; 350 bio->bi_size = KEY_SIZE(k) << 9; 351 352 bio->bi_end_io = uuid_endio; 353 bio->bi_private = cl; 354 bch_bio_map(bio, c->uuids); 355 356 bch_submit_bbio(bio, c, k, i); 357 358 if (!(rw & WRITE)) 359 break; 360 } 361 362 bch_bkey_to_text(buf, sizeof(buf), k); 363 pr_debug("%s UUIDs at %s", rw & REQ_WRITE ? "wrote" : "read", buf); 364 365 for (u = c->uuids; u < c->uuids + c->nr_uuids; u++) 366 if (!bch_is_zero(u->uuid, 16)) 367 pr_debug("Slot %zi: %pU: %s: 1st: %u last: %u inv: %u", 368 u - c->uuids, u->uuid, u->label, 369 u->first_reg, u->last_reg, u->invalidated); 370 371 closure_return(cl); 372 } 373 374 static char *uuid_read(struct cache_set *c, struct jset *j, struct closure *cl) 375 { 376 struct bkey *k = &j->uuid_bucket; 377 378 if (bch_btree_ptr_invalid(c, k)) 379 return "bad uuid pointer"; 380 381 bkey_copy(&c->uuid_bucket, k); 382 uuid_io(c, READ_SYNC, k, cl); 383 384 if (j->version < BCACHE_JSET_VERSION_UUIDv1) { 385 struct uuid_entry_v0 *u0 = (void *) c->uuids; 386 struct uuid_entry *u1 = (void *) c->uuids; 387 int i; 388 389 closure_sync(cl); 390 391 /* 392 * Since the new uuid entry is bigger than the old, we have to 393 * convert starting at the highest memory address and work down 394 * in order to do it in place 395 */ 396 397 for (i = c->nr_uuids - 1; 398 i >= 0; 399 --i) { 400 memcpy(u1[i].uuid, u0[i].uuid, 16); 401 memcpy(u1[i].label, u0[i].label, 32); 402 403 u1[i].first_reg = u0[i].first_reg; 404 u1[i].last_reg = u0[i].last_reg; 405 u1[i].invalidated = u0[i].invalidated; 406 407 u1[i].flags = 0; 408 u1[i].sectors = 0; 409 } 410 } 411 412 return NULL; 413 } 414 415 static int __uuid_write(struct cache_set *c) 416 { 417 BKEY_PADDED(key) k; 418 struct closure cl; 419 closure_init_stack(&cl); 420 421 lockdep_assert_held(&bch_register_lock); 422 423 if (bch_bucket_alloc_set(c, WATERMARK_METADATA, &k.key, 1, true)) 424 return 1; 425 426 SET_KEY_SIZE(&k.key, c->sb.bucket_size); 427 uuid_io(c, REQ_WRITE, &k.key, &cl); 428 closure_sync(&cl); 429 430 bkey_copy(&c->uuid_bucket, &k.key); 431 bkey_put(c, &k.key); 432 return 0; 433 } 434 435 int bch_uuid_write(struct cache_set *c) 436 { 437 int ret = __uuid_write(c); 438 439 if (!ret) 440 bch_journal_meta(c, NULL); 441 442 return ret; 443 } 444 445 static struct uuid_entry *uuid_find(struct cache_set *c, const char *uuid) 446 { 447 struct uuid_entry *u; 448 449 for (u = c->uuids; 450 u < c->uuids + c->nr_uuids; u++) 451 if (!memcmp(u->uuid, uuid, 16)) 452 return u; 453 454 return NULL; 455 } 456 457 static struct uuid_entry *uuid_find_empty(struct cache_set *c) 458 { 459 static const char zero_uuid[16] = "\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0"; 460 return uuid_find(c, zero_uuid); 461 } 462 463 /* 464 * Bucket priorities/gens: 465 * 466 * For each bucket, we store on disk its 467 * 8 bit gen 468 * 16 bit priority 469 * 470 * See alloc.c for an explanation of the gen. The priority is used to implement 471 * lru (and in the future other) cache replacement policies; for most purposes 472 * it's just an opaque integer. 473 * 474 * The gens and the priorities don't have a whole lot to do with each other, and 475 * it's actually the gens that must be written out at specific times - it's no 476 * big deal if the priorities don't get written, if we lose them we just reuse 477 * buckets in suboptimal order. 478 * 479 * On disk they're stored in a packed array, and in as many buckets are required 480 * to fit them all. The buckets we use to store them form a list; the journal 481 * header points to the first bucket, the first bucket points to the second 482 * bucket, et cetera. 483 * 484 * This code is used by the allocation code; periodically (whenever it runs out 485 * of buckets to allocate from) the allocation code will invalidate some 486 * buckets, but it can't use those buckets until their new gens are safely on 487 * disk. 488 */ 489 490 static void prio_endio(struct bio *bio, int error) 491 { 492 struct cache *ca = bio->bi_private; 493 494 cache_set_err_on(error, ca->set, "accessing priorities"); 495 bch_bbio_free(bio, ca->set); 496 closure_put(&ca->prio); 497 } 498 499 static void prio_io(struct cache *ca, uint64_t bucket, unsigned long rw) 500 { 501 struct closure *cl = &ca->prio; 502 struct bio *bio = bch_bbio_alloc(ca->set); 503 504 closure_init_stack(cl); 505 506 bio->bi_sector = bucket * ca->sb.bucket_size; 507 bio->bi_bdev = ca->bdev; 508 bio->bi_rw = REQ_SYNC|REQ_META|rw; 509 bio->bi_size = bucket_bytes(ca); 510 511 bio->bi_end_io = prio_endio; 512 bio->bi_private = ca; 513 bch_bio_map(bio, ca->disk_buckets); 514 515 closure_bio_submit(bio, &ca->prio, ca); 516 closure_sync(cl); 517 } 518 519 #define buckets_free(c) "free %zu, free_inc %zu, unused %zu", \ 520 fifo_used(&c->free), fifo_used(&c->free_inc), fifo_used(&c->unused) 521 522 void bch_prio_write(struct cache *ca) 523 { 524 int i; 525 struct bucket *b; 526 struct closure cl; 527 528 closure_init_stack(&cl); 529 530 lockdep_assert_held(&ca->set->bucket_lock); 531 532 for (b = ca->buckets; 533 b < ca->buckets + ca->sb.nbuckets; b++) 534 b->disk_gen = b->gen; 535 536 ca->disk_buckets->seq++; 537 538 atomic_long_add(ca->sb.bucket_size * prio_buckets(ca), 539 &ca->meta_sectors_written); 540 541 pr_debug("free %zu, free_inc %zu, unused %zu", fifo_used(&ca->free), 542 fifo_used(&ca->free_inc), fifo_used(&ca->unused)); 543 544 for (i = prio_buckets(ca) - 1; i >= 0; --i) { 545 long bucket; 546 struct prio_set *p = ca->disk_buckets; 547 struct bucket_disk *d = p->data; 548 struct bucket_disk *end = d + prios_per_bucket(ca); 549 550 for (b = ca->buckets + i * prios_per_bucket(ca); 551 b < ca->buckets + ca->sb.nbuckets && d < end; 552 b++, d++) { 553 d->prio = cpu_to_le16(b->prio); 554 d->gen = b->gen; 555 } 556 557 p->next_bucket = ca->prio_buckets[i + 1]; 558 p->magic = pset_magic(&ca->sb); 559 p->csum = bch_crc64(&p->magic, bucket_bytes(ca) - 8); 560 561 bucket = bch_bucket_alloc(ca, WATERMARK_PRIO, true); 562 BUG_ON(bucket == -1); 563 564 mutex_unlock(&ca->set->bucket_lock); 565 prio_io(ca, bucket, REQ_WRITE); 566 mutex_lock(&ca->set->bucket_lock); 567 568 ca->prio_buckets[i] = bucket; 569 atomic_dec_bug(&ca->buckets[bucket].pin); 570 } 571 572 mutex_unlock(&ca->set->bucket_lock); 573 574 bch_journal_meta(ca->set, &cl); 575 closure_sync(&cl); 576 577 mutex_lock(&ca->set->bucket_lock); 578 579 ca->need_save_prio = 0; 580 581 /* 582 * Don't want the old priorities to get garbage collected until after we 583 * finish writing the new ones, and they're journalled 584 */ 585 for (i = 0; i < prio_buckets(ca); i++) 586 ca->prio_last_buckets[i] = ca->prio_buckets[i]; 587 } 588 589 static void prio_read(struct cache *ca, uint64_t bucket) 590 { 591 struct prio_set *p = ca->disk_buckets; 592 struct bucket_disk *d = p->data + prios_per_bucket(ca), *end = d; 593 struct bucket *b; 594 unsigned bucket_nr = 0; 595 596 for (b = ca->buckets; 597 b < ca->buckets + ca->sb.nbuckets; 598 b++, d++) { 599 if (d == end) { 600 ca->prio_buckets[bucket_nr] = bucket; 601 ca->prio_last_buckets[bucket_nr] = bucket; 602 bucket_nr++; 603 604 prio_io(ca, bucket, READ_SYNC); 605 606 if (p->csum != bch_crc64(&p->magic, bucket_bytes(ca) - 8)) 607 pr_warn("bad csum reading priorities"); 608 609 if (p->magic != pset_magic(&ca->sb)) 610 pr_warn("bad magic reading priorities"); 611 612 bucket = p->next_bucket; 613 d = p->data; 614 } 615 616 b->prio = le16_to_cpu(d->prio); 617 b->gen = b->disk_gen = b->last_gc = b->gc_gen = d->gen; 618 } 619 } 620 621 /* Bcache device */ 622 623 static int open_dev(struct block_device *b, fmode_t mode) 624 { 625 struct bcache_device *d = b->bd_disk->private_data; 626 if (test_bit(BCACHE_DEV_CLOSING, &d->flags)) 627 return -ENXIO; 628 629 closure_get(&d->cl); 630 return 0; 631 } 632 633 static void release_dev(struct gendisk *b, fmode_t mode) 634 { 635 struct bcache_device *d = b->private_data; 636 closure_put(&d->cl); 637 } 638 639 static int ioctl_dev(struct block_device *b, fmode_t mode, 640 unsigned int cmd, unsigned long arg) 641 { 642 struct bcache_device *d = b->bd_disk->private_data; 643 return d->ioctl(d, mode, cmd, arg); 644 } 645 646 static const struct block_device_operations bcache_ops = { 647 .open = open_dev, 648 .release = release_dev, 649 .ioctl = ioctl_dev, 650 .owner = THIS_MODULE, 651 }; 652 653 void bcache_device_stop(struct bcache_device *d) 654 { 655 if (!test_and_set_bit(BCACHE_DEV_CLOSING, &d->flags)) 656 closure_queue(&d->cl); 657 } 658 659 static void bcache_device_unlink(struct bcache_device *d) 660 { 661 lockdep_assert_held(&bch_register_lock); 662 663 if (d->c && !test_and_set_bit(BCACHE_DEV_UNLINK_DONE, &d->flags)) { 664 unsigned i; 665 struct cache *ca; 666 667 sysfs_remove_link(&d->c->kobj, d->name); 668 sysfs_remove_link(&d->kobj, "cache"); 669 670 for_each_cache(ca, d->c, i) 671 bd_unlink_disk_holder(ca->bdev, d->disk); 672 } 673 } 674 675 static void bcache_device_link(struct bcache_device *d, struct cache_set *c, 676 const char *name) 677 { 678 unsigned i; 679 struct cache *ca; 680 681 for_each_cache(ca, d->c, i) 682 bd_link_disk_holder(ca->bdev, d->disk); 683 684 snprintf(d->name, BCACHEDEVNAME_SIZE, 685 "%s%u", name, d->id); 686 687 WARN(sysfs_create_link(&d->kobj, &c->kobj, "cache") || 688 sysfs_create_link(&c->kobj, &d->kobj, d->name), 689 "Couldn't create device <-> cache set symlinks"); 690 } 691 692 static void bcache_device_detach(struct bcache_device *d) 693 { 694 lockdep_assert_held(&bch_register_lock); 695 696 if (test_bit(BCACHE_DEV_DETACHING, &d->flags)) { 697 struct uuid_entry *u = d->c->uuids + d->id; 698 699 SET_UUID_FLASH_ONLY(u, 0); 700 memcpy(u->uuid, invalid_uuid, 16); 701 u->invalidated = cpu_to_le32(get_seconds()); 702 bch_uuid_write(d->c); 703 } 704 705 bcache_device_unlink(d); 706 707 d->c->devices[d->id] = NULL; 708 closure_put(&d->c->caching); 709 d->c = NULL; 710 } 711 712 static void bcache_device_attach(struct bcache_device *d, struct cache_set *c, 713 unsigned id) 714 { 715 BUG_ON(test_bit(CACHE_SET_STOPPING, &c->flags)); 716 717 d->id = id; 718 d->c = c; 719 c->devices[id] = d; 720 721 closure_get(&c->caching); 722 } 723 724 static void bcache_device_free(struct bcache_device *d) 725 { 726 lockdep_assert_held(&bch_register_lock); 727 728 pr_info("%s stopped", d->disk->disk_name); 729 730 if (d->c) 731 bcache_device_detach(d); 732 if (d->disk && d->disk->flags & GENHD_FL_UP) 733 del_gendisk(d->disk); 734 if (d->disk && d->disk->queue) 735 blk_cleanup_queue(d->disk->queue); 736 if (d->disk) { 737 ida_simple_remove(&bcache_minor, d->disk->first_minor); 738 put_disk(d->disk); 739 } 740 741 bio_split_pool_free(&d->bio_split_hook); 742 if (d->unaligned_bvec) 743 mempool_destroy(d->unaligned_bvec); 744 if (d->bio_split) 745 bioset_free(d->bio_split); 746 if (is_vmalloc_addr(d->full_dirty_stripes)) 747 vfree(d->full_dirty_stripes); 748 else 749 kfree(d->full_dirty_stripes); 750 if (is_vmalloc_addr(d->stripe_sectors_dirty)) 751 vfree(d->stripe_sectors_dirty); 752 else 753 kfree(d->stripe_sectors_dirty); 754 755 closure_debug_destroy(&d->cl); 756 } 757 758 static int bcache_device_init(struct bcache_device *d, unsigned block_size, 759 sector_t sectors) 760 { 761 struct request_queue *q; 762 size_t n; 763 int minor; 764 765 if (!d->stripe_size) 766 d->stripe_size = 1 << 31; 767 768 d->nr_stripes = DIV_ROUND_UP_ULL(sectors, d->stripe_size); 769 770 if (!d->nr_stripes || 771 d->nr_stripes > INT_MAX || 772 d->nr_stripes > SIZE_MAX / sizeof(atomic_t)) { 773 pr_err("nr_stripes too large"); 774 return -ENOMEM; 775 } 776 777 n = d->nr_stripes * sizeof(atomic_t); 778 d->stripe_sectors_dirty = n < PAGE_SIZE << 6 779 ? kzalloc(n, GFP_KERNEL) 780 : vzalloc(n); 781 if (!d->stripe_sectors_dirty) 782 return -ENOMEM; 783 784 n = BITS_TO_LONGS(d->nr_stripes) * sizeof(unsigned long); 785 d->full_dirty_stripes = n < PAGE_SIZE << 6 786 ? kzalloc(n, GFP_KERNEL) 787 : vzalloc(n); 788 if (!d->full_dirty_stripes) 789 return -ENOMEM; 790 791 minor = ida_simple_get(&bcache_minor, 0, MINORMASK + 1, GFP_KERNEL); 792 if (minor < 0) 793 return minor; 794 795 if (!(d->bio_split = bioset_create(4, offsetof(struct bbio, bio))) || 796 !(d->unaligned_bvec = mempool_create_kmalloc_pool(1, 797 sizeof(struct bio_vec) * BIO_MAX_PAGES)) || 798 bio_split_pool_init(&d->bio_split_hook) || 799 !(d->disk = alloc_disk(1))) { 800 ida_simple_remove(&bcache_minor, minor); 801 return -ENOMEM; 802 } 803 804 set_capacity(d->disk, sectors); 805 snprintf(d->disk->disk_name, DISK_NAME_LEN, "bcache%i", minor); 806 807 d->disk->major = bcache_major; 808 d->disk->first_minor = minor; 809 d->disk->fops = &bcache_ops; 810 d->disk->private_data = d; 811 812 q = blk_alloc_queue(GFP_KERNEL); 813 if (!q) 814 return -ENOMEM; 815 816 blk_queue_make_request(q, NULL); 817 d->disk->queue = q; 818 q->queuedata = d; 819 q->backing_dev_info.congested_data = d; 820 q->limits.max_hw_sectors = UINT_MAX; 821 q->limits.max_sectors = UINT_MAX; 822 q->limits.max_segment_size = UINT_MAX; 823 q->limits.max_segments = BIO_MAX_PAGES; 824 q->limits.max_discard_sectors = UINT_MAX; 825 q->limits.io_min = block_size; 826 q->limits.logical_block_size = block_size; 827 q->limits.physical_block_size = block_size; 828 set_bit(QUEUE_FLAG_NONROT, &d->disk->queue->queue_flags); 829 set_bit(QUEUE_FLAG_DISCARD, &d->disk->queue->queue_flags); 830 831 blk_queue_flush(q, REQ_FLUSH|REQ_FUA); 832 833 return 0; 834 } 835 836 /* Cached device */ 837 838 static void calc_cached_dev_sectors(struct cache_set *c) 839 { 840 uint64_t sectors = 0; 841 struct cached_dev *dc; 842 843 list_for_each_entry(dc, &c->cached_devs, list) 844 sectors += bdev_sectors(dc->bdev); 845 846 c->cached_dev_sectors = sectors; 847 } 848 849 void bch_cached_dev_run(struct cached_dev *dc) 850 { 851 struct bcache_device *d = &dc->disk; 852 char buf[SB_LABEL_SIZE + 1]; 853 char *env[] = { 854 "DRIVER=bcache", 855 kasprintf(GFP_KERNEL, "CACHED_UUID=%pU", dc->sb.uuid), 856 NULL, 857 NULL, 858 }; 859 860 memcpy(buf, dc->sb.label, SB_LABEL_SIZE); 861 buf[SB_LABEL_SIZE] = '\0'; 862 env[2] = kasprintf(GFP_KERNEL, "CACHED_LABEL=%s", buf); 863 864 if (atomic_xchg(&dc->running, 1)) 865 return; 866 867 if (!d->c && 868 BDEV_STATE(&dc->sb) != BDEV_STATE_NONE) { 869 struct closure cl; 870 closure_init_stack(&cl); 871 872 SET_BDEV_STATE(&dc->sb, BDEV_STATE_STALE); 873 bch_write_bdev_super(dc, &cl); 874 closure_sync(&cl); 875 } 876 877 add_disk(d->disk); 878 bd_link_disk_holder(dc->bdev, dc->disk.disk); 879 /* won't show up in the uevent file, use udevadm monitor -e instead 880 * only class / kset properties are persistent */ 881 kobject_uevent_env(&disk_to_dev(d->disk)->kobj, KOBJ_CHANGE, env); 882 kfree(env[1]); 883 kfree(env[2]); 884 885 if (sysfs_create_link(&d->kobj, &disk_to_dev(d->disk)->kobj, "dev") || 886 sysfs_create_link(&disk_to_dev(d->disk)->kobj, &d->kobj, "bcache")) 887 pr_debug("error creating sysfs link"); 888 } 889 890 static void cached_dev_detach_finish(struct work_struct *w) 891 { 892 struct cached_dev *dc = container_of(w, struct cached_dev, detach); 893 char buf[BDEVNAME_SIZE]; 894 struct closure cl; 895 closure_init_stack(&cl); 896 897 BUG_ON(!test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags)); 898 BUG_ON(atomic_read(&dc->count)); 899 900 mutex_lock(&bch_register_lock); 901 902 memset(&dc->sb.set_uuid, 0, 16); 903 SET_BDEV_STATE(&dc->sb, BDEV_STATE_NONE); 904 905 bch_write_bdev_super(dc, &cl); 906 closure_sync(&cl); 907 908 bcache_device_detach(&dc->disk); 909 list_move(&dc->list, &uncached_devices); 910 911 clear_bit(BCACHE_DEV_DETACHING, &dc->disk.flags); 912 913 mutex_unlock(&bch_register_lock); 914 915 pr_info("Caching disabled for %s", bdevname(dc->bdev, buf)); 916 917 /* Drop ref we took in cached_dev_detach() */ 918 closure_put(&dc->disk.cl); 919 } 920 921 void bch_cached_dev_detach(struct cached_dev *dc) 922 { 923 lockdep_assert_held(&bch_register_lock); 924 925 if (test_bit(BCACHE_DEV_CLOSING, &dc->disk.flags)) 926 return; 927 928 if (test_and_set_bit(BCACHE_DEV_DETACHING, &dc->disk.flags)) 929 return; 930 931 /* 932 * Block the device from being closed and freed until we're finished 933 * detaching 934 */ 935 closure_get(&dc->disk.cl); 936 937 bch_writeback_queue(dc); 938 cached_dev_put(dc); 939 } 940 941 int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c) 942 { 943 uint32_t rtime = cpu_to_le32(get_seconds()); 944 struct uuid_entry *u; 945 char buf[BDEVNAME_SIZE]; 946 947 bdevname(dc->bdev, buf); 948 949 if (memcmp(dc->sb.set_uuid, c->sb.set_uuid, 16)) 950 return -ENOENT; 951 952 if (dc->disk.c) { 953 pr_err("Can't attach %s: already attached", buf); 954 return -EINVAL; 955 } 956 957 if (test_bit(CACHE_SET_STOPPING, &c->flags)) { 958 pr_err("Can't attach %s: shutting down", buf); 959 return -EINVAL; 960 } 961 962 if (dc->sb.block_size < c->sb.block_size) { 963 /* Will die */ 964 pr_err("Couldn't attach %s: block size less than set's block size", 965 buf); 966 return -EINVAL; 967 } 968 969 u = uuid_find(c, dc->sb.uuid); 970 971 if (u && 972 (BDEV_STATE(&dc->sb) == BDEV_STATE_STALE || 973 BDEV_STATE(&dc->sb) == BDEV_STATE_NONE)) { 974 memcpy(u->uuid, invalid_uuid, 16); 975 u->invalidated = cpu_to_le32(get_seconds()); 976 u = NULL; 977 } 978 979 if (!u) { 980 if (BDEV_STATE(&dc->sb) == BDEV_STATE_DIRTY) { 981 pr_err("Couldn't find uuid for %s in set", buf); 982 return -ENOENT; 983 } 984 985 u = uuid_find_empty(c); 986 if (!u) { 987 pr_err("Not caching %s, no room for UUID", buf); 988 return -EINVAL; 989 } 990 } 991 992 /* Deadlocks since we're called via sysfs... 993 sysfs_remove_file(&dc->kobj, &sysfs_attach); 994 */ 995 996 if (bch_is_zero(u->uuid, 16)) { 997 struct closure cl; 998 closure_init_stack(&cl); 999 1000 memcpy(u->uuid, dc->sb.uuid, 16); 1001 memcpy(u->label, dc->sb.label, SB_LABEL_SIZE); 1002 u->first_reg = u->last_reg = rtime; 1003 bch_uuid_write(c); 1004 1005 memcpy(dc->sb.set_uuid, c->sb.set_uuid, 16); 1006 SET_BDEV_STATE(&dc->sb, BDEV_STATE_CLEAN); 1007 1008 bch_write_bdev_super(dc, &cl); 1009 closure_sync(&cl); 1010 } else { 1011 u->last_reg = rtime; 1012 bch_uuid_write(c); 1013 } 1014 1015 bcache_device_attach(&dc->disk, c, u - c->uuids); 1016 list_move(&dc->list, &c->cached_devs); 1017 calc_cached_dev_sectors(c); 1018 1019 smp_wmb(); 1020 /* 1021 * dc->c must be set before dc->count != 0 - paired with the mb in 1022 * cached_dev_get() 1023 */ 1024 atomic_set(&dc->count, 1); 1025 1026 if (BDEV_STATE(&dc->sb) == BDEV_STATE_DIRTY) { 1027 bch_sectors_dirty_init(dc); 1028 atomic_set(&dc->has_dirty, 1); 1029 atomic_inc(&dc->count); 1030 bch_writeback_queue(dc); 1031 } 1032 1033 bch_cached_dev_run(dc); 1034 bcache_device_link(&dc->disk, c, "bdev"); 1035 1036 pr_info("Caching %s as %s on set %pU", 1037 bdevname(dc->bdev, buf), dc->disk.disk->disk_name, 1038 dc->disk.c->sb.set_uuid); 1039 return 0; 1040 } 1041 1042 void bch_cached_dev_release(struct kobject *kobj) 1043 { 1044 struct cached_dev *dc = container_of(kobj, struct cached_dev, 1045 disk.kobj); 1046 kfree(dc); 1047 module_put(THIS_MODULE); 1048 } 1049 1050 static void cached_dev_free(struct closure *cl) 1051 { 1052 struct cached_dev *dc = container_of(cl, struct cached_dev, disk.cl); 1053 1054 cancel_delayed_work_sync(&dc->writeback_rate_update); 1055 kthread_stop(dc->writeback_thread); 1056 1057 mutex_lock(&bch_register_lock); 1058 1059 if (atomic_read(&dc->running)) 1060 bd_unlink_disk_holder(dc->bdev, dc->disk.disk); 1061 bcache_device_free(&dc->disk); 1062 list_del(&dc->list); 1063 1064 mutex_unlock(&bch_register_lock); 1065 1066 if (!IS_ERR_OR_NULL(dc->bdev)) { 1067 if (dc->bdev->bd_disk) 1068 blk_sync_queue(bdev_get_queue(dc->bdev)); 1069 1070 blkdev_put(dc->bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); 1071 } 1072 1073 wake_up(&unregister_wait); 1074 1075 kobject_put(&dc->disk.kobj); 1076 } 1077 1078 static void cached_dev_flush(struct closure *cl) 1079 { 1080 struct cached_dev *dc = container_of(cl, struct cached_dev, disk.cl); 1081 struct bcache_device *d = &dc->disk; 1082 1083 mutex_lock(&bch_register_lock); 1084 bcache_device_unlink(d); 1085 mutex_unlock(&bch_register_lock); 1086 1087 bch_cache_accounting_destroy(&dc->accounting); 1088 kobject_del(&d->kobj); 1089 1090 continue_at(cl, cached_dev_free, system_wq); 1091 } 1092 1093 static int cached_dev_init(struct cached_dev *dc, unsigned block_size) 1094 { 1095 int ret; 1096 struct io *io; 1097 struct request_queue *q = bdev_get_queue(dc->bdev); 1098 1099 __module_get(THIS_MODULE); 1100 INIT_LIST_HEAD(&dc->list); 1101 closure_init(&dc->disk.cl, NULL); 1102 set_closure_fn(&dc->disk.cl, cached_dev_flush, system_wq); 1103 kobject_init(&dc->disk.kobj, &bch_cached_dev_ktype); 1104 INIT_WORK(&dc->detach, cached_dev_detach_finish); 1105 closure_init_unlocked(&dc->sb_write); 1106 INIT_LIST_HEAD(&dc->io_lru); 1107 spin_lock_init(&dc->io_lock); 1108 bch_cache_accounting_init(&dc->accounting, &dc->disk.cl); 1109 1110 dc->sequential_cutoff = 4 << 20; 1111 1112 for (io = dc->io; io < dc->io + RECENT_IO; io++) { 1113 list_add(&io->lru, &dc->io_lru); 1114 hlist_add_head(&io->hash, dc->io_hash + RECENT_IO); 1115 } 1116 1117 ret = bcache_device_init(&dc->disk, block_size, 1118 dc->bdev->bd_part->nr_sects - dc->sb.data_offset); 1119 if (ret) 1120 return ret; 1121 1122 set_capacity(dc->disk.disk, 1123 dc->bdev->bd_part->nr_sects - dc->sb.data_offset); 1124 1125 dc->disk.disk->queue->backing_dev_info.ra_pages = 1126 max(dc->disk.disk->queue->backing_dev_info.ra_pages, 1127 q->backing_dev_info.ra_pages); 1128 1129 bch_cached_dev_request_init(dc); 1130 bch_cached_dev_writeback_init(dc); 1131 return 0; 1132 } 1133 1134 /* Cached device - bcache superblock */ 1135 1136 static void register_bdev(struct cache_sb *sb, struct page *sb_page, 1137 struct block_device *bdev, 1138 struct cached_dev *dc) 1139 { 1140 char name[BDEVNAME_SIZE]; 1141 const char *err = "cannot allocate memory"; 1142 struct cache_set *c; 1143 1144 memcpy(&dc->sb, sb, sizeof(struct cache_sb)); 1145 dc->bdev = bdev; 1146 dc->bdev->bd_holder = dc; 1147 1148 bio_init(&dc->sb_bio); 1149 dc->sb_bio.bi_max_vecs = 1; 1150 dc->sb_bio.bi_io_vec = dc->sb_bio.bi_inline_vecs; 1151 dc->sb_bio.bi_io_vec[0].bv_page = sb_page; 1152 get_page(sb_page); 1153 1154 if (cached_dev_init(dc, sb->block_size << 9)) 1155 goto err; 1156 1157 err = "error creating kobject"; 1158 if (kobject_add(&dc->disk.kobj, &part_to_dev(bdev->bd_part)->kobj, 1159 "bcache")) 1160 goto err; 1161 if (bch_cache_accounting_add_kobjs(&dc->accounting, &dc->disk.kobj)) 1162 goto err; 1163 1164 pr_info("registered backing device %s", bdevname(bdev, name)); 1165 1166 list_add(&dc->list, &uncached_devices); 1167 list_for_each_entry(c, &bch_cache_sets, list) 1168 bch_cached_dev_attach(dc, c); 1169 1170 if (BDEV_STATE(&dc->sb) == BDEV_STATE_NONE || 1171 BDEV_STATE(&dc->sb) == BDEV_STATE_STALE) 1172 bch_cached_dev_run(dc); 1173 1174 return; 1175 err: 1176 pr_notice("error opening %s: %s", bdevname(bdev, name), err); 1177 bcache_device_stop(&dc->disk); 1178 } 1179 1180 /* Flash only volumes */ 1181 1182 void bch_flash_dev_release(struct kobject *kobj) 1183 { 1184 struct bcache_device *d = container_of(kobj, struct bcache_device, 1185 kobj); 1186 kfree(d); 1187 } 1188 1189 static void flash_dev_free(struct closure *cl) 1190 { 1191 struct bcache_device *d = container_of(cl, struct bcache_device, cl); 1192 bcache_device_free(d); 1193 kobject_put(&d->kobj); 1194 } 1195 1196 static void flash_dev_flush(struct closure *cl) 1197 { 1198 struct bcache_device *d = container_of(cl, struct bcache_device, cl); 1199 1200 bcache_device_unlink(d); 1201 kobject_del(&d->kobj); 1202 continue_at(cl, flash_dev_free, system_wq); 1203 } 1204 1205 static int flash_dev_run(struct cache_set *c, struct uuid_entry *u) 1206 { 1207 struct bcache_device *d = kzalloc(sizeof(struct bcache_device), 1208 GFP_KERNEL); 1209 if (!d) 1210 return -ENOMEM; 1211 1212 closure_init(&d->cl, NULL); 1213 set_closure_fn(&d->cl, flash_dev_flush, system_wq); 1214 1215 kobject_init(&d->kobj, &bch_flash_dev_ktype); 1216 1217 if (bcache_device_init(d, block_bytes(c), u->sectors)) 1218 goto err; 1219 1220 bcache_device_attach(d, c, u - c->uuids); 1221 bch_flash_dev_request_init(d); 1222 add_disk(d->disk); 1223 1224 if (kobject_add(&d->kobj, &disk_to_dev(d->disk)->kobj, "bcache")) 1225 goto err; 1226 1227 bcache_device_link(d, c, "volume"); 1228 1229 return 0; 1230 err: 1231 kobject_put(&d->kobj); 1232 return -ENOMEM; 1233 } 1234 1235 static int flash_devs_run(struct cache_set *c) 1236 { 1237 int ret = 0; 1238 struct uuid_entry *u; 1239 1240 for (u = c->uuids; 1241 u < c->uuids + c->nr_uuids && !ret; 1242 u++) 1243 if (UUID_FLASH_ONLY(u)) 1244 ret = flash_dev_run(c, u); 1245 1246 return ret; 1247 } 1248 1249 int bch_flash_dev_create(struct cache_set *c, uint64_t size) 1250 { 1251 struct uuid_entry *u; 1252 1253 if (test_bit(CACHE_SET_STOPPING, &c->flags)) 1254 return -EINTR; 1255 1256 u = uuid_find_empty(c); 1257 if (!u) { 1258 pr_err("Can't create volume, no room for UUID"); 1259 return -EINVAL; 1260 } 1261 1262 get_random_bytes(u->uuid, 16); 1263 memset(u->label, 0, 32); 1264 u->first_reg = u->last_reg = cpu_to_le32(get_seconds()); 1265 1266 SET_UUID_FLASH_ONLY(u, 1); 1267 u->sectors = size >> 9; 1268 1269 bch_uuid_write(c); 1270 1271 return flash_dev_run(c, u); 1272 } 1273 1274 /* Cache set */ 1275 1276 __printf(2, 3) 1277 bool bch_cache_set_error(struct cache_set *c, const char *fmt, ...) 1278 { 1279 va_list args; 1280 1281 if (c->on_error != ON_ERROR_PANIC && 1282 test_bit(CACHE_SET_STOPPING, &c->flags)) 1283 return false; 1284 1285 /* XXX: we can be called from atomic context 1286 acquire_console_sem(); 1287 */ 1288 1289 printk(KERN_ERR "bcache: error on %pU: ", c->sb.set_uuid); 1290 1291 va_start(args, fmt); 1292 vprintk(fmt, args); 1293 va_end(args); 1294 1295 printk(", disabling caching\n"); 1296 1297 if (c->on_error == ON_ERROR_PANIC) 1298 panic("panic forced after error\n"); 1299 1300 bch_cache_set_unregister(c); 1301 return true; 1302 } 1303 1304 void bch_cache_set_release(struct kobject *kobj) 1305 { 1306 struct cache_set *c = container_of(kobj, struct cache_set, kobj); 1307 kfree(c); 1308 module_put(THIS_MODULE); 1309 } 1310 1311 static void cache_set_free(struct closure *cl) 1312 { 1313 struct cache_set *c = container_of(cl, struct cache_set, cl); 1314 struct cache *ca; 1315 unsigned i; 1316 1317 if (!IS_ERR_OR_NULL(c->debug)) 1318 debugfs_remove(c->debug); 1319 1320 bch_open_buckets_free(c); 1321 bch_btree_cache_free(c); 1322 bch_journal_free(c); 1323 1324 for_each_cache(ca, c, i) 1325 if (ca) 1326 kobject_put(&ca->kobj); 1327 1328 free_pages((unsigned long) c->uuids, ilog2(bucket_pages(c))); 1329 free_pages((unsigned long) c->sort, ilog2(bucket_pages(c))); 1330 1331 if (c->bio_split) 1332 bioset_free(c->bio_split); 1333 if (c->fill_iter) 1334 mempool_destroy(c->fill_iter); 1335 if (c->bio_meta) 1336 mempool_destroy(c->bio_meta); 1337 if (c->search) 1338 mempool_destroy(c->search); 1339 kfree(c->devices); 1340 1341 mutex_lock(&bch_register_lock); 1342 list_del(&c->list); 1343 mutex_unlock(&bch_register_lock); 1344 1345 pr_info("Cache set %pU unregistered", c->sb.set_uuid); 1346 wake_up(&unregister_wait); 1347 1348 closure_debug_destroy(&c->cl); 1349 kobject_put(&c->kobj); 1350 } 1351 1352 static void cache_set_flush(struct closure *cl) 1353 { 1354 struct cache_set *c = container_of(cl, struct cache_set, caching); 1355 struct cache *ca; 1356 struct btree *b; 1357 unsigned i; 1358 1359 bch_cache_accounting_destroy(&c->accounting); 1360 1361 kobject_put(&c->internal); 1362 kobject_del(&c->kobj); 1363 1364 if (c->gc_thread) 1365 kthread_stop(c->gc_thread); 1366 1367 if (!IS_ERR_OR_NULL(c->root)) 1368 list_add(&c->root->list, &c->btree_cache); 1369 1370 /* Should skip this if we're unregistering because of an error */ 1371 list_for_each_entry(b, &c->btree_cache, list) 1372 if (btree_node_dirty(b)) 1373 bch_btree_node_write(b, NULL); 1374 1375 for_each_cache(ca, c, i) 1376 if (ca->alloc_thread) 1377 kthread_stop(ca->alloc_thread); 1378 1379 closure_return(cl); 1380 } 1381 1382 static void __cache_set_unregister(struct closure *cl) 1383 { 1384 struct cache_set *c = container_of(cl, struct cache_set, caching); 1385 struct cached_dev *dc; 1386 size_t i; 1387 1388 mutex_lock(&bch_register_lock); 1389 1390 for (i = 0; i < c->nr_uuids; i++) 1391 if (c->devices[i]) { 1392 if (!UUID_FLASH_ONLY(&c->uuids[i]) && 1393 test_bit(CACHE_SET_UNREGISTERING, &c->flags)) { 1394 dc = container_of(c->devices[i], 1395 struct cached_dev, disk); 1396 bch_cached_dev_detach(dc); 1397 } else { 1398 bcache_device_stop(c->devices[i]); 1399 } 1400 } 1401 1402 mutex_unlock(&bch_register_lock); 1403 1404 continue_at(cl, cache_set_flush, system_wq); 1405 } 1406 1407 void bch_cache_set_stop(struct cache_set *c) 1408 { 1409 if (!test_and_set_bit(CACHE_SET_STOPPING, &c->flags)) 1410 closure_queue(&c->caching); 1411 } 1412 1413 void bch_cache_set_unregister(struct cache_set *c) 1414 { 1415 set_bit(CACHE_SET_UNREGISTERING, &c->flags); 1416 bch_cache_set_stop(c); 1417 } 1418 1419 #define alloc_bucket_pages(gfp, c) \ 1420 ((void *) __get_free_pages(__GFP_ZERO|gfp, ilog2(bucket_pages(c)))) 1421 1422 struct cache_set *bch_cache_set_alloc(struct cache_sb *sb) 1423 { 1424 int iter_size; 1425 struct cache_set *c = kzalloc(sizeof(struct cache_set), GFP_KERNEL); 1426 if (!c) 1427 return NULL; 1428 1429 __module_get(THIS_MODULE); 1430 closure_init(&c->cl, NULL); 1431 set_closure_fn(&c->cl, cache_set_free, system_wq); 1432 1433 closure_init(&c->caching, &c->cl); 1434 set_closure_fn(&c->caching, __cache_set_unregister, system_wq); 1435 1436 /* Maybe create continue_at_noreturn() and use it here? */ 1437 closure_set_stopped(&c->cl); 1438 closure_put(&c->cl); 1439 1440 kobject_init(&c->kobj, &bch_cache_set_ktype); 1441 kobject_init(&c->internal, &bch_cache_set_internal_ktype); 1442 1443 bch_cache_accounting_init(&c->accounting, &c->cl); 1444 1445 memcpy(c->sb.set_uuid, sb->set_uuid, 16); 1446 c->sb.block_size = sb->block_size; 1447 c->sb.bucket_size = sb->bucket_size; 1448 c->sb.nr_in_set = sb->nr_in_set; 1449 c->sb.last_mount = sb->last_mount; 1450 c->bucket_bits = ilog2(sb->bucket_size); 1451 c->block_bits = ilog2(sb->block_size); 1452 c->nr_uuids = bucket_bytes(c) / sizeof(struct uuid_entry); 1453 1454 c->btree_pages = c->sb.bucket_size / PAGE_SECTORS; 1455 if (c->btree_pages > BTREE_MAX_PAGES) 1456 c->btree_pages = max_t(int, c->btree_pages / 4, 1457 BTREE_MAX_PAGES); 1458 1459 c->sort_crit_factor = int_sqrt(c->btree_pages); 1460 1461 closure_init_unlocked(&c->sb_write); 1462 mutex_init(&c->bucket_lock); 1463 init_waitqueue_head(&c->try_wait); 1464 init_waitqueue_head(&c->bucket_wait); 1465 closure_init_unlocked(&c->uuid_write); 1466 mutex_init(&c->sort_lock); 1467 1468 spin_lock_init(&c->sort_time.lock); 1469 spin_lock_init(&c->btree_gc_time.lock); 1470 spin_lock_init(&c->btree_split_time.lock); 1471 spin_lock_init(&c->btree_read_time.lock); 1472 spin_lock_init(&c->try_harder_time.lock); 1473 1474 bch_moving_init_cache_set(c); 1475 1476 INIT_LIST_HEAD(&c->list); 1477 INIT_LIST_HEAD(&c->cached_devs); 1478 INIT_LIST_HEAD(&c->btree_cache); 1479 INIT_LIST_HEAD(&c->btree_cache_freeable); 1480 INIT_LIST_HEAD(&c->btree_cache_freed); 1481 INIT_LIST_HEAD(&c->data_buckets); 1482 1483 c->search = mempool_create_slab_pool(32, bch_search_cache); 1484 if (!c->search) 1485 goto err; 1486 1487 iter_size = (sb->bucket_size / sb->block_size + 1) * 1488 sizeof(struct btree_iter_set); 1489 1490 if (!(c->devices = kzalloc(c->nr_uuids * sizeof(void *), GFP_KERNEL)) || 1491 !(c->bio_meta = mempool_create_kmalloc_pool(2, 1492 sizeof(struct bbio) + sizeof(struct bio_vec) * 1493 bucket_pages(c))) || 1494 !(c->fill_iter = mempool_create_kmalloc_pool(1, iter_size)) || 1495 !(c->bio_split = bioset_create(4, offsetof(struct bbio, bio))) || 1496 !(c->sort = alloc_bucket_pages(GFP_KERNEL, c)) || 1497 !(c->uuids = alloc_bucket_pages(GFP_KERNEL, c)) || 1498 bch_journal_alloc(c) || 1499 bch_btree_cache_alloc(c) || 1500 bch_open_buckets_alloc(c)) 1501 goto err; 1502 1503 c->congested_read_threshold_us = 2000; 1504 c->congested_write_threshold_us = 20000; 1505 c->error_limit = 8 << IO_ERROR_SHIFT; 1506 1507 return c; 1508 err: 1509 bch_cache_set_unregister(c); 1510 return NULL; 1511 } 1512 1513 static void run_cache_set(struct cache_set *c) 1514 { 1515 const char *err = "cannot allocate memory"; 1516 struct cached_dev *dc, *t; 1517 struct cache *ca; 1518 struct closure cl; 1519 unsigned i; 1520 1521 closure_init_stack(&cl); 1522 1523 for_each_cache(ca, c, i) 1524 c->nbuckets += ca->sb.nbuckets; 1525 1526 if (CACHE_SYNC(&c->sb)) { 1527 LIST_HEAD(journal); 1528 struct bkey *k; 1529 struct jset *j; 1530 1531 err = "cannot allocate memory for journal"; 1532 if (bch_journal_read(c, &journal)) 1533 goto err; 1534 1535 pr_debug("btree_journal_read() done"); 1536 1537 err = "no journal entries found"; 1538 if (list_empty(&journal)) 1539 goto err; 1540 1541 j = &list_entry(journal.prev, struct journal_replay, list)->j; 1542 1543 err = "IO error reading priorities"; 1544 for_each_cache(ca, c, i) 1545 prio_read(ca, j->prio_bucket[ca->sb.nr_this_dev]); 1546 1547 /* 1548 * If prio_read() fails it'll call cache_set_error and we'll 1549 * tear everything down right away, but if we perhaps checked 1550 * sooner we could avoid journal replay. 1551 */ 1552 1553 k = &j->btree_root; 1554 1555 err = "bad btree root"; 1556 if (bch_btree_ptr_invalid(c, k)) 1557 goto err; 1558 1559 err = "error reading btree root"; 1560 c->root = bch_btree_node_get(c, k, j->btree_level, true); 1561 if (IS_ERR_OR_NULL(c->root)) 1562 goto err; 1563 1564 list_del_init(&c->root->list); 1565 rw_unlock(true, c->root); 1566 1567 err = uuid_read(c, j, &cl); 1568 if (err) 1569 goto err; 1570 1571 err = "error in recovery"; 1572 if (bch_btree_check(c)) 1573 goto err; 1574 1575 bch_journal_mark(c, &journal); 1576 bch_btree_gc_finish(c); 1577 pr_debug("btree_check() done"); 1578 1579 /* 1580 * bcache_journal_next() can't happen sooner, or 1581 * btree_gc_finish() will give spurious errors about last_gc > 1582 * gc_gen - this is a hack but oh well. 1583 */ 1584 bch_journal_next(&c->journal); 1585 1586 err = "error starting allocator thread"; 1587 for_each_cache(ca, c, i) 1588 if (bch_cache_allocator_start(ca)) 1589 goto err; 1590 1591 /* 1592 * First place it's safe to allocate: btree_check() and 1593 * btree_gc_finish() have to run before we have buckets to 1594 * allocate, and bch_bucket_alloc_set() might cause a journal 1595 * entry to be written so bcache_journal_next() has to be called 1596 * first. 1597 * 1598 * If the uuids were in the old format we have to rewrite them 1599 * before the next journal entry is written: 1600 */ 1601 if (j->version < BCACHE_JSET_VERSION_UUID) 1602 __uuid_write(c); 1603 1604 bch_journal_replay(c, &journal); 1605 } else { 1606 pr_notice("invalidating existing data"); 1607 1608 for_each_cache(ca, c, i) { 1609 unsigned j; 1610 1611 ca->sb.keys = clamp_t(int, ca->sb.nbuckets >> 7, 1612 2, SB_JOURNAL_BUCKETS); 1613 1614 for (j = 0; j < ca->sb.keys; j++) 1615 ca->sb.d[j] = ca->sb.first_bucket + j; 1616 } 1617 1618 bch_btree_gc_finish(c); 1619 1620 err = "error starting allocator thread"; 1621 for_each_cache(ca, c, i) 1622 if (bch_cache_allocator_start(ca)) 1623 goto err; 1624 1625 mutex_lock(&c->bucket_lock); 1626 for_each_cache(ca, c, i) 1627 bch_prio_write(ca); 1628 mutex_unlock(&c->bucket_lock); 1629 1630 err = "cannot allocate new UUID bucket"; 1631 if (__uuid_write(c)) 1632 goto err; 1633 1634 err = "cannot allocate new btree root"; 1635 c->root = bch_btree_node_alloc(c, 0, true); 1636 if (IS_ERR_OR_NULL(c->root)) 1637 goto err; 1638 1639 bkey_copy_key(&c->root->key, &MAX_KEY); 1640 bch_btree_node_write(c->root, &cl); 1641 1642 bch_btree_set_root(c->root); 1643 rw_unlock(true, c->root); 1644 1645 /* 1646 * We don't want to write the first journal entry until 1647 * everything is set up - fortunately journal entries won't be 1648 * written until the SET_CACHE_SYNC() here: 1649 */ 1650 SET_CACHE_SYNC(&c->sb, true); 1651 1652 bch_journal_next(&c->journal); 1653 bch_journal_meta(c, &cl); 1654 } 1655 1656 err = "error starting gc thread"; 1657 if (bch_gc_thread_start(c)) 1658 goto err; 1659 1660 closure_sync(&cl); 1661 c->sb.last_mount = get_seconds(); 1662 bcache_write_super(c); 1663 1664 list_for_each_entry_safe(dc, t, &uncached_devices, list) 1665 bch_cached_dev_attach(dc, c); 1666 1667 flash_devs_run(c); 1668 1669 return; 1670 err: 1671 closure_sync(&cl); 1672 /* XXX: test this, it's broken */ 1673 bch_cache_set_error(c, "%s", err); 1674 } 1675 1676 static bool can_attach_cache(struct cache *ca, struct cache_set *c) 1677 { 1678 return ca->sb.block_size == c->sb.block_size && 1679 ca->sb.bucket_size == c->sb.block_size && 1680 ca->sb.nr_in_set == c->sb.nr_in_set; 1681 } 1682 1683 static const char *register_cache_set(struct cache *ca) 1684 { 1685 char buf[12]; 1686 const char *err = "cannot allocate memory"; 1687 struct cache_set *c; 1688 1689 list_for_each_entry(c, &bch_cache_sets, list) 1690 if (!memcmp(c->sb.set_uuid, ca->sb.set_uuid, 16)) { 1691 if (c->cache[ca->sb.nr_this_dev]) 1692 return "duplicate cache set member"; 1693 1694 if (!can_attach_cache(ca, c)) 1695 return "cache sb does not match set"; 1696 1697 if (!CACHE_SYNC(&ca->sb)) 1698 SET_CACHE_SYNC(&c->sb, false); 1699 1700 goto found; 1701 } 1702 1703 c = bch_cache_set_alloc(&ca->sb); 1704 if (!c) 1705 return err; 1706 1707 err = "error creating kobject"; 1708 if (kobject_add(&c->kobj, bcache_kobj, "%pU", c->sb.set_uuid) || 1709 kobject_add(&c->internal, &c->kobj, "internal")) 1710 goto err; 1711 1712 if (bch_cache_accounting_add_kobjs(&c->accounting, &c->kobj)) 1713 goto err; 1714 1715 bch_debug_init_cache_set(c); 1716 1717 list_add(&c->list, &bch_cache_sets); 1718 found: 1719 sprintf(buf, "cache%i", ca->sb.nr_this_dev); 1720 if (sysfs_create_link(&ca->kobj, &c->kobj, "set") || 1721 sysfs_create_link(&c->kobj, &ca->kobj, buf)) 1722 goto err; 1723 1724 if (ca->sb.seq > c->sb.seq) { 1725 c->sb.version = ca->sb.version; 1726 memcpy(c->sb.set_uuid, ca->sb.set_uuid, 16); 1727 c->sb.flags = ca->sb.flags; 1728 c->sb.seq = ca->sb.seq; 1729 pr_debug("set version = %llu", c->sb.version); 1730 } 1731 1732 ca->set = c; 1733 ca->set->cache[ca->sb.nr_this_dev] = ca; 1734 c->cache_by_alloc[c->caches_loaded++] = ca; 1735 1736 if (c->caches_loaded == c->sb.nr_in_set) 1737 run_cache_set(c); 1738 1739 return NULL; 1740 err: 1741 bch_cache_set_unregister(c); 1742 return err; 1743 } 1744 1745 /* Cache device */ 1746 1747 void bch_cache_release(struct kobject *kobj) 1748 { 1749 struct cache *ca = container_of(kobj, struct cache, kobj); 1750 1751 if (ca->set) 1752 ca->set->cache[ca->sb.nr_this_dev] = NULL; 1753 1754 bio_split_pool_free(&ca->bio_split_hook); 1755 1756 free_pages((unsigned long) ca->disk_buckets, ilog2(bucket_pages(ca))); 1757 kfree(ca->prio_buckets); 1758 vfree(ca->buckets); 1759 1760 free_heap(&ca->heap); 1761 free_fifo(&ca->unused); 1762 free_fifo(&ca->free_inc); 1763 free_fifo(&ca->free); 1764 1765 if (ca->sb_bio.bi_inline_vecs[0].bv_page) 1766 put_page(ca->sb_bio.bi_io_vec[0].bv_page); 1767 1768 if (!IS_ERR_OR_NULL(ca->bdev)) { 1769 blk_sync_queue(bdev_get_queue(ca->bdev)); 1770 blkdev_put(ca->bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); 1771 } 1772 1773 kfree(ca); 1774 module_put(THIS_MODULE); 1775 } 1776 1777 static int cache_alloc(struct cache_sb *sb, struct cache *ca) 1778 { 1779 size_t free; 1780 struct bucket *b; 1781 1782 __module_get(THIS_MODULE); 1783 kobject_init(&ca->kobj, &bch_cache_ktype); 1784 1785 bio_init(&ca->journal.bio); 1786 ca->journal.bio.bi_max_vecs = 8; 1787 ca->journal.bio.bi_io_vec = ca->journal.bio.bi_inline_vecs; 1788 1789 free = roundup_pow_of_two(ca->sb.nbuckets) >> 9; 1790 free = max_t(size_t, free, (prio_buckets(ca) + 8) * 2); 1791 1792 if (!init_fifo(&ca->free, free, GFP_KERNEL) || 1793 !init_fifo(&ca->free_inc, free << 2, GFP_KERNEL) || 1794 !init_fifo(&ca->unused, free << 2, GFP_KERNEL) || 1795 !init_heap(&ca->heap, free << 3, GFP_KERNEL) || 1796 !(ca->buckets = vzalloc(sizeof(struct bucket) * 1797 ca->sb.nbuckets)) || 1798 !(ca->prio_buckets = kzalloc(sizeof(uint64_t) * prio_buckets(ca) * 1799 2, GFP_KERNEL)) || 1800 !(ca->disk_buckets = alloc_bucket_pages(GFP_KERNEL, ca)) || 1801 bio_split_pool_init(&ca->bio_split_hook)) 1802 return -ENOMEM; 1803 1804 ca->prio_last_buckets = ca->prio_buckets + prio_buckets(ca); 1805 1806 for_each_bucket(b, ca) 1807 atomic_set(&b->pin, 0); 1808 1809 if (bch_cache_allocator_init(ca)) 1810 goto err; 1811 1812 return 0; 1813 err: 1814 kobject_put(&ca->kobj); 1815 return -ENOMEM; 1816 } 1817 1818 static void register_cache(struct cache_sb *sb, struct page *sb_page, 1819 struct block_device *bdev, struct cache *ca) 1820 { 1821 char name[BDEVNAME_SIZE]; 1822 const char *err = "cannot allocate memory"; 1823 1824 memcpy(&ca->sb, sb, sizeof(struct cache_sb)); 1825 ca->bdev = bdev; 1826 ca->bdev->bd_holder = ca; 1827 1828 bio_init(&ca->sb_bio); 1829 ca->sb_bio.bi_max_vecs = 1; 1830 ca->sb_bio.bi_io_vec = ca->sb_bio.bi_inline_vecs; 1831 ca->sb_bio.bi_io_vec[0].bv_page = sb_page; 1832 get_page(sb_page); 1833 1834 if (blk_queue_discard(bdev_get_queue(ca->bdev))) 1835 ca->discard = CACHE_DISCARD(&ca->sb); 1836 1837 if (cache_alloc(sb, ca) != 0) 1838 goto err; 1839 1840 err = "error creating kobject"; 1841 if (kobject_add(&ca->kobj, &part_to_dev(bdev->bd_part)->kobj, "bcache")) 1842 goto err; 1843 1844 err = register_cache_set(ca); 1845 if (err) 1846 goto err; 1847 1848 pr_info("registered cache device %s", bdevname(bdev, name)); 1849 return; 1850 err: 1851 pr_notice("error opening %s: %s", bdevname(bdev, name), err); 1852 kobject_put(&ca->kobj); 1853 } 1854 1855 /* Global interfaces/init */ 1856 1857 static ssize_t register_bcache(struct kobject *, struct kobj_attribute *, 1858 const char *, size_t); 1859 1860 kobj_attribute_write(register, register_bcache); 1861 kobj_attribute_write(register_quiet, register_bcache); 1862 1863 static bool bch_is_open_backing(struct block_device *bdev) { 1864 struct cache_set *c, *tc; 1865 struct cached_dev *dc, *t; 1866 1867 list_for_each_entry_safe(c, tc, &bch_cache_sets, list) 1868 list_for_each_entry_safe(dc, t, &c->cached_devs, list) 1869 if (dc->bdev == bdev) 1870 return true; 1871 list_for_each_entry_safe(dc, t, &uncached_devices, list) 1872 if (dc->bdev == bdev) 1873 return true; 1874 return false; 1875 } 1876 1877 static bool bch_is_open_cache(struct block_device *bdev) { 1878 struct cache_set *c, *tc; 1879 struct cache *ca; 1880 unsigned i; 1881 1882 list_for_each_entry_safe(c, tc, &bch_cache_sets, list) 1883 for_each_cache(ca, c, i) 1884 if (ca->bdev == bdev) 1885 return true; 1886 return false; 1887 } 1888 1889 static bool bch_is_open(struct block_device *bdev) { 1890 return bch_is_open_cache(bdev) || bch_is_open_backing(bdev); 1891 } 1892 1893 static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr, 1894 const char *buffer, size_t size) 1895 { 1896 ssize_t ret = size; 1897 const char *err = "cannot allocate memory"; 1898 char *path = NULL; 1899 struct cache_sb *sb = NULL; 1900 struct block_device *bdev = NULL; 1901 struct page *sb_page = NULL; 1902 1903 if (!try_module_get(THIS_MODULE)) 1904 return -EBUSY; 1905 1906 mutex_lock(&bch_register_lock); 1907 1908 if (!(path = kstrndup(buffer, size, GFP_KERNEL)) || 1909 !(sb = kmalloc(sizeof(struct cache_sb), GFP_KERNEL))) 1910 goto err; 1911 1912 err = "failed to open device"; 1913 bdev = blkdev_get_by_path(strim(path), 1914 FMODE_READ|FMODE_WRITE|FMODE_EXCL, 1915 sb); 1916 if (IS_ERR(bdev)) { 1917 if (bdev == ERR_PTR(-EBUSY)) { 1918 bdev = lookup_bdev(strim(path)); 1919 if (!IS_ERR(bdev) && bch_is_open(bdev)) 1920 err = "device already registered"; 1921 else 1922 err = "device busy"; 1923 } 1924 goto err; 1925 } 1926 1927 err = "failed to set blocksize"; 1928 if (set_blocksize(bdev, 4096)) 1929 goto err_close; 1930 1931 err = read_super(sb, bdev, &sb_page); 1932 if (err) 1933 goto err_close; 1934 1935 if (SB_IS_BDEV(sb)) { 1936 struct cached_dev *dc = kzalloc(sizeof(*dc), GFP_KERNEL); 1937 if (!dc) 1938 goto err_close; 1939 1940 register_bdev(sb, sb_page, bdev, dc); 1941 } else { 1942 struct cache *ca = kzalloc(sizeof(*ca), GFP_KERNEL); 1943 if (!ca) 1944 goto err_close; 1945 1946 register_cache(sb, sb_page, bdev, ca); 1947 } 1948 out: 1949 if (sb_page) 1950 put_page(sb_page); 1951 kfree(sb); 1952 kfree(path); 1953 mutex_unlock(&bch_register_lock); 1954 module_put(THIS_MODULE); 1955 return ret; 1956 1957 err_close: 1958 blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); 1959 err: 1960 if (attr != &ksysfs_register_quiet) 1961 pr_info("error opening %s: %s", path, err); 1962 ret = -EINVAL; 1963 goto out; 1964 } 1965 1966 static int bcache_reboot(struct notifier_block *n, unsigned long code, void *x) 1967 { 1968 if (code == SYS_DOWN || 1969 code == SYS_HALT || 1970 code == SYS_POWER_OFF) { 1971 DEFINE_WAIT(wait); 1972 unsigned long start = jiffies; 1973 bool stopped = false; 1974 1975 struct cache_set *c, *tc; 1976 struct cached_dev *dc, *tdc; 1977 1978 mutex_lock(&bch_register_lock); 1979 1980 if (list_empty(&bch_cache_sets) && 1981 list_empty(&uncached_devices)) 1982 goto out; 1983 1984 pr_info("Stopping all devices:"); 1985 1986 list_for_each_entry_safe(c, tc, &bch_cache_sets, list) 1987 bch_cache_set_stop(c); 1988 1989 list_for_each_entry_safe(dc, tdc, &uncached_devices, list) 1990 bcache_device_stop(&dc->disk); 1991 1992 /* What's a condition variable? */ 1993 while (1) { 1994 long timeout = start + 2 * HZ - jiffies; 1995 1996 stopped = list_empty(&bch_cache_sets) && 1997 list_empty(&uncached_devices); 1998 1999 if (timeout < 0 || stopped) 2000 break; 2001 2002 prepare_to_wait(&unregister_wait, &wait, 2003 TASK_UNINTERRUPTIBLE); 2004 2005 mutex_unlock(&bch_register_lock); 2006 schedule_timeout(timeout); 2007 mutex_lock(&bch_register_lock); 2008 } 2009 2010 finish_wait(&unregister_wait, &wait); 2011 2012 if (stopped) 2013 pr_info("All devices stopped"); 2014 else 2015 pr_notice("Timeout waiting for devices to be closed"); 2016 out: 2017 mutex_unlock(&bch_register_lock); 2018 } 2019 2020 return NOTIFY_DONE; 2021 } 2022 2023 static struct notifier_block reboot = { 2024 .notifier_call = bcache_reboot, 2025 .priority = INT_MAX, /* before any real devices */ 2026 }; 2027 2028 static void bcache_exit(void) 2029 { 2030 bch_debug_exit(); 2031 bch_request_exit(); 2032 bch_btree_exit(); 2033 if (bcache_kobj) 2034 kobject_put(bcache_kobj); 2035 if (bcache_wq) 2036 destroy_workqueue(bcache_wq); 2037 unregister_blkdev(bcache_major, "bcache"); 2038 unregister_reboot_notifier(&reboot); 2039 } 2040 2041 static int __init bcache_init(void) 2042 { 2043 static const struct attribute *files[] = { 2044 &ksysfs_register.attr, 2045 &ksysfs_register_quiet.attr, 2046 NULL 2047 }; 2048 2049 mutex_init(&bch_register_lock); 2050 init_waitqueue_head(&unregister_wait); 2051 register_reboot_notifier(&reboot); 2052 closure_debug_init(); 2053 2054 bcache_major = register_blkdev(0, "bcache"); 2055 if (bcache_major < 0) 2056 return bcache_major; 2057 2058 if (!(bcache_wq = create_workqueue("bcache")) || 2059 !(bcache_kobj = kobject_create_and_add("bcache", fs_kobj)) || 2060 sysfs_create_files(bcache_kobj, files) || 2061 bch_btree_init() || 2062 bch_request_init() || 2063 bch_debug_init(bcache_kobj)) 2064 goto err; 2065 2066 return 0; 2067 err: 2068 bcache_exit(); 2069 return -ENOMEM; 2070 } 2071 2072 module_exit(bcache_exit); 2073 module_init(bcache_init); 2074