1 /* 2 * bcache setup/teardown code, and some metadata io - read a superblock and 3 * figure out what to do with it. 4 * 5 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com> 6 * Copyright 2012 Google, Inc. 7 */ 8 9 #include "bcache.h" 10 #include "btree.h" 11 #include "debug.h" 12 #include "request.h" 13 14 #include <linux/buffer_head.h> 15 #include <linux/debugfs.h> 16 #include <linux/genhd.h> 17 #include <linux/module.h> 18 #include <linux/random.h> 19 #include <linux/reboot.h> 20 #include <linux/sysfs.h> 21 22 MODULE_LICENSE("GPL"); 23 MODULE_AUTHOR("Kent Overstreet <kent.overstreet@gmail.com>"); 24 25 static const char bcache_magic[] = { 26 0xc6, 0x85, 0x73, 0xf6, 0x4e, 0x1a, 0x45, 0xca, 27 0x82, 0x65, 0xf5, 0x7f, 0x48, 0xba, 0x6d, 0x81 28 }; 29 30 static const char invalid_uuid[] = { 31 0xa0, 0x3e, 0xf8, 0xed, 0x3e, 0xe1, 0xb8, 0x78, 32 0xc8, 0x50, 0xfc, 0x5e, 0xcb, 0x16, 0xcd, 0x99 33 }; 34 35 /* Default is -1; we skip past it for struct cached_dev's cache mode */ 36 const char * const bch_cache_modes[] = { 37 "default", 38 "writethrough", 39 "writeback", 40 "writearound", 41 "none", 42 NULL 43 }; 44 45 struct uuid_entry_v0 { 46 uint8_t uuid[16]; 47 uint8_t label[32]; 48 uint32_t first_reg; 49 uint32_t last_reg; 50 uint32_t invalidated; 51 uint32_t pad; 52 }; 53 54 static struct kobject *bcache_kobj; 55 struct mutex bch_register_lock; 56 LIST_HEAD(bch_cache_sets); 57 static LIST_HEAD(uncached_devices); 58 59 static int bcache_major, bcache_minor; 60 static wait_queue_head_t unregister_wait; 61 struct workqueue_struct *bcache_wq; 62 63 #define BTREE_MAX_PAGES (256 * 1024 / PAGE_SIZE) 64 65 static void bio_split_pool_free(struct bio_split_pool *p) 66 { 67 if (p->bio_split_hook) 68 mempool_destroy(p->bio_split_hook); 69 70 if (p->bio_split) 71 bioset_free(p->bio_split); 72 } 73 74 static int bio_split_pool_init(struct bio_split_pool *p) 75 { 76 p->bio_split = bioset_create(4, 0); 77 if (!p->bio_split) 78 return -ENOMEM; 79 80 p->bio_split_hook = mempool_create_kmalloc_pool(4, 81 sizeof(struct bio_split_hook)); 82 if (!p->bio_split_hook) 83 return -ENOMEM; 84 85 return 0; 86 } 87 88 /* Superblock */ 89 90 static const char *read_super(struct cache_sb *sb, struct block_device *bdev, 91 struct page **res) 92 { 93 const char *err; 94 struct cache_sb *s; 95 struct buffer_head *bh = __bread(bdev, 1, SB_SIZE); 96 unsigned i; 97 98 if (!bh) 99 return "IO error"; 100 101 s = (struct cache_sb *) bh->b_data; 102 103 sb->offset = le64_to_cpu(s->offset); 104 sb->version = le64_to_cpu(s->version); 105 106 memcpy(sb->magic, s->magic, 16); 107 memcpy(sb->uuid, s->uuid, 16); 108 memcpy(sb->set_uuid, s->set_uuid, 16); 109 memcpy(sb->label, s->label, SB_LABEL_SIZE); 110 111 sb->flags = le64_to_cpu(s->flags); 112 sb->seq = le64_to_cpu(s->seq); 113 sb->last_mount = le32_to_cpu(s->last_mount); 114 sb->first_bucket = le16_to_cpu(s->first_bucket); 115 sb->keys = le16_to_cpu(s->keys); 116 117 for (i = 0; i < SB_JOURNAL_BUCKETS; i++) 118 sb->d[i] = le64_to_cpu(s->d[i]); 119 120 pr_debug("read sb version %llu, flags %llu, seq %llu, journal size %u", 121 sb->version, sb->flags, sb->seq, sb->keys); 122 123 err = "Not a bcache superblock"; 124 if (sb->offset != SB_SECTOR) 125 goto err; 126 127 if (memcmp(sb->magic, bcache_magic, 16)) 128 goto err; 129 130 err = "Too many journal buckets"; 131 if (sb->keys > SB_JOURNAL_BUCKETS) 132 goto err; 133 134 err = "Bad checksum"; 135 if (s->csum != csum_set(s)) 136 goto err; 137 138 err = "Bad UUID"; 139 if (bch_is_zero(sb->uuid, 16)) 140 goto err; 141 142 sb->block_size = le16_to_cpu(s->block_size); 143 144 err = "Superblock block size smaller than device block size"; 145 if (sb->block_size << 9 < bdev_logical_block_size(bdev)) 146 goto err; 147 148 switch (sb->version) { 149 case BCACHE_SB_VERSION_BDEV: 150 sb->data_offset = BDEV_DATA_START_DEFAULT; 151 break; 152 case BCACHE_SB_VERSION_BDEV_WITH_OFFSET: 153 sb->data_offset = le64_to_cpu(s->data_offset); 154 155 err = "Bad data offset"; 156 if (sb->data_offset < BDEV_DATA_START_DEFAULT) 157 goto err; 158 159 break; 160 case BCACHE_SB_VERSION_CDEV: 161 case BCACHE_SB_VERSION_CDEV_WITH_UUID: 162 sb->nbuckets = le64_to_cpu(s->nbuckets); 163 sb->block_size = le16_to_cpu(s->block_size); 164 sb->bucket_size = le16_to_cpu(s->bucket_size); 165 166 sb->nr_in_set = le16_to_cpu(s->nr_in_set); 167 sb->nr_this_dev = le16_to_cpu(s->nr_this_dev); 168 169 err = "Too many buckets"; 170 if (sb->nbuckets > LONG_MAX) 171 goto err; 172 173 err = "Not enough buckets"; 174 if (sb->nbuckets < 1 << 7) 175 goto err; 176 177 err = "Bad block/bucket size"; 178 if (!is_power_of_2(sb->block_size) || 179 sb->block_size > PAGE_SECTORS || 180 !is_power_of_2(sb->bucket_size) || 181 sb->bucket_size < PAGE_SECTORS) 182 goto err; 183 184 err = "Invalid superblock: device too small"; 185 if (get_capacity(bdev->bd_disk) < sb->bucket_size * sb->nbuckets) 186 goto err; 187 188 err = "Bad UUID"; 189 if (bch_is_zero(sb->set_uuid, 16)) 190 goto err; 191 192 err = "Bad cache device number in set"; 193 if (!sb->nr_in_set || 194 sb->nr_in_set <= sb->nr_this_dev || 195 sb->nr_in_set > MAX_CACHES_PER_SET) 196 goto err; 197 198 err = "Journal buckets not sequential"; 199 for (i = 0; i < sb->keys; i++) 200 if (sb->d[i] != sb->first_bucket + i) 201 goto err; 202 203 err = "Too many journal buckets"; 204 if (sb->first_bucket + sb->keys > sb->nbuckets) 205 goto err; 206 207 err = "Invalid superblock: first bucket comes before end of super"; 208 if (sb->first_bucket * sb->bucket_size < 16) 209 goto err; 210 211 break; 212 default: 213 err = "Unsupported superblock version"; 214 goto err; 215 } 216 217 sb->last_mount = get_seconds(); 218 err = NULL; 219 220 get_page(bh->b_page); 221 *res = bh->b_page; 222 err: 223 put_bh(bh); 224 return err; 225 } 226 227 static void write_bdev_super_endio(struct bio *bio, int error) 228 { 229 struct cached_dev *dc = bio->bi_private; 230 /* XXX: error checking */ 231 232 closure_put(&dc->sb_write.cl); 233 } 234 235 static void __write_super(struct cache_sb *sb, struct bio *bio) 236 { 237 struct cache_sb *out = page_address(bio->bi_io_vec[0].bv_page); 238 unsigned i; 239 240 bio->bi_sector = SB_SECTOR; 241 bio->bi_rw = REQ_SYNC|REQ_META; 242 bio->bi_size = SB_SIZE; 243 bch_bio_map(bio, NULL); 244 245 out->offset = cpu_to_le64(sb->offset); 246 out->version = cpu_to_le64(sb->version); 247 248 memcpy(out->uuid, sb->uuid, 16); 249 memcpy(out->set_uuid, sb->set_uuid, 16); 250 memcpy(out->label, sb->label, SB_LABEL_SIZE); 251 252 out->flags = cpu_to_le64(sb->flags); 253 out->seq = cpu_to_le64(sb->seq); 254 255 out->last_mount = cpu_to_le32(sb->last_mount); 256 out->first_bucket = cpu_to_le16(sb->first_bucket); 257 out->keys = cpu_to_le16(sb->keys); 258 259 for (i = 0; i < sb->keys; i++) 260 out->d[i] = cpu_to_le64(sb->d[i]); 261 262 out->csum = csum_set(out); 263 264 pr_debug("ver %llu, flags %llu, seq %llu", 265 sb->version, sb->flags, sb->seq); 266 267 submit_bio(REQ_WRITE, bio); 268 } 269 270 void bch_write_bdev_super(struct cached_dev *dc, struct closure *parent) 271 { 272 struct closure *cl = &dc->sb_write.cl; 273 struct bio *bio = &dc->sb_bio; 274 275 closure_lock(&dc->sb_write, parent); 276 277 bio_reset(bio); 278 bio->bi_bdev = dc->bdev; 279 bio->bi_end_io = write_bdev_super_endio; 280 bio->bi_private = dc; 281 282 closure_get(cl); 283 __write_super(&dc->sb, bio); 284 285 closure_return(cl); 286 } 287 288 static void write_super_endio(struct bio *bio, int error) 289 { 290 struct cache *ca = bio->bi_private; 291 292 bch_count_io_errors(ca, error, "writing superblock"); 293 closure_put(&ca->set->sb_write.cl); 294 } 295 296 void bcache_write_super(struct cache_set *c) 297 { 298 struct closure *cl = &c->sb_write.cl; 299 struct cache *ca; 300 unsigned i; 301 302 closure_lock(&c->sb_write, &c->cl); 303 304 c->sb.seq++; 305 306 for_each_cache(ca, c, i) { 307 struct bio *bio = &ca->sb_bio; 308 309 ca->sb.version = BCACHE_SB_VERSION_CDEV_WITH_UUID; 310 ca->sb.seq = c->sb.seq; 311 ca->sb.last_mount = c->sb.last_mount; 312 313 SET_CACHE_SYNC(&ca->sb, CACHE_SYNC(&c->sb)); 314 315 bio_reset(bio); 316 bio->bi_bdev = ca->bdev; 317 bio->bi_end_io = write_super_endio; 318 bio->bi_private = ca; 319 320 closure_get(cl); 321 __write_super(&ca->sb, bio); 322 } 323 324 closure_return(cl); 325 } 326 327 /* UUID io */ 328 329 static void uuid_endio(struct bio *bio, int error) 330 { 331 struct closure *cl = bio->bi_private; 332 struct cache_set *c = container_of(cl, struct cache_set, uuid_write.cl); 333 334 cache_set_err_on(error, c, "accessing uuids"); 335 bch_bbio_free(bio, c); 336 closure_put(cl); 337 } 338 339 static void uuid_io(struct cache_set *c, unsigned long rw, 340 struct bkey *k, struct closure *parent) 341 { 342 struct closure *cl = &c->uuid_write.cl; 343 struct uuid_entry *u; 344 unsigned i; 345 346 BUG_ON(!parent); 347 closure_lock(&c->uuid_write, parent); 348 349 for (i = 0; i < KEY_PTRS(k); i++) { 350 struct bio *bio = bch_bbio_alloc(c); 351 352 bio->bi_rw = REQ_SYNC|REQ_META|rw; 353 bio->bi_size = KEY_SIZE(k) << 9; 354 355 bio->bi_end_io = uuid_endio; 356 bio->bi_private = cl; 357 bch_bio_map(bio, c->uuids); 358 359 bch_submit_bbio(bio, c, k, i); 360 361 if (!(rw & WRITE)) 362 break; 363 } 364 365 pr_debug("%s UUIDs at %s", rw & REQ_WRITE ? "wrote" : "read", 366 pkey(&c->uuid_bucket)); 367 368 for (u = c->uuids; u < c->uuids + c->nr_uuids; u++) 369 if (!bch_is_zero(u->uuid, 16)) 370 pr_debug("Slot %zi: %pU: %s: 1st: %u last: %u inv: %u", 371 u - c->uuids, u->uuid, u->label, 372 u->first_reg, u->last_reg, u->invalidated); 373 374 closure_return(cl); 375 } 376 377 static char *uuid_read(struct cache_set *c, struct jset *j, struct closure *cl) 378 { 379 struct bkey *k = &j->uuid_bucket; 380 381 if (__bch_ptr_invalid(c, 1, k)) 382 return "bad uuid pointer"; 383 384 bkey_copy(&c->uuid_bucket, k); 385 uuid_io(c, READ_SYNC, k, cl); 386 387 if (j->version < BCACHE_JSET_VERSION_UUIDv1) { 388 struct uuid_entry_v0 *u0 = (void *) c->uuids; 389 struct uuid_entry *u1 = (void *) c->uuids; 390 int i; 391 392 closure_sync(cl); 393 394 /* 395 * Since the new uuid entry is bigger than the old, we have to 396 * convert starting at the highest memory address and work down 397 * in order to do it in place 398 */ 399 400 for (i = c->nr_uuids - 1; 401 i >= 0; 402 --i) { 403 memcpy(u1[i].uuid, u0[i].uuid, 16); 404 memcpy(u1[i].label, u0[i].label, 32); 405 406 u1[i].first_reg = u0[i].first_reg; 407 u1[i].last_reg = u0[i].last_reg; 408 u1[i].invalidated = u0[i].invalidated; 409 410 u1[i].flags = 0; 411 u1[i].sectors = 0; 412 } 413 } 414 415 return NULL; 416 } 417 418 static int __uuid_write(struct cache_set *c) 419 { 420 BKEY_PADDED(key) k; 421 struct closure cl; 422 closure_init_stack(&cl); 423 424 lockdep_assert_held(&bch_register_lock); 425 426 if (bch_bucket_alloc_set(c, WATERMARK_METADATA, &k.key, 1, &cl)) 427 return 1; 428 429 SET_KEY_SIZE(&k.key, c->sb.bucket_size); 430 uuid_io(c, REQ_WRITE, &k.key, &cl); 431 closure_sync(&cl); 432 433 bkey_copy(&c->uuid_bucket, &k.key); 434 __bkey_put(c, &k.key); 435 return 0; 436 } 437 438 int bch_uuid_write(struct cache_set *c) 439 { 440 int ret = __uuid_write(c); 441 442 if (!ret) 443 bch_journal_meta(c, NULL); 444 445 return ret; 446 } 447 448 static struct uuid_entry *uuid_find(struct cache_set *c, const char *uuid) 449 { 450 struct uuid_entry *u; 451 452 for (u = c->uuids; 453 u < c->uuids + c->nr_uuids; u++) 454 if (!memcmp(u->uuid, uuid, 16)) 455 return u; 456 457 return NULL; 458 } 459 460 static struct uuid_entry *uuid_find_empty(struct cache_set *c) 461 { 462 static const char zero_uuid[16] = "\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0"; 463 return uuid_find(c, zero_uuid); 464 } 465 466 /* 467 * Bucket priorities/gens: 468 * 469 * For each bucket, we store on disk its 470 * 8 bit gen 471 * 16 bit priority 472 * 473 * See alloc.c for an explanation of the gen. The priority is used to implement 474 * lru (and in the future other) cache replacement policies; for most purposes 475 * it's just an opaque integer. 476 * 477 * The gens and the priorities don't have a whole lot to do with each other, and 478 * it's actually the gens that must be written out at specific times - it's no 479 * big deal if the priorities don't get written, if we lose them we just reuse 480 * buckets in suboptimal order. 481 * 482 * On disk they're stored in a packed array, and in as many buckets are required 483 * to fit them all. The buckets we use to store them form a list; the journal 484 * header points to the first bucket, the first bucket points to the second 485 * bucket, et cetera. 486 * 487 * This code is used by the allocation code; periodically (whenever it runs out 488 * of buckets to allocate from) the allocation code will invalidate some 489 * buckets, but it can't use those buckets until their new gens are safely on 490 * disk. 491 */ 492 493 static void prio_endio(struct bio *bio, int error) 494 { 495 struct cache *ca = bio->bi_private; 496 497 cache_set_err_on(error, ca->set, "accessing priorities"); 498 bch_bbio_free(bio, ca->set); 499 closure_put(&ca->prio); 500 } 501 502 static void prio_io(struct cache *ca, uint64_t bucket, unsigned long rw) 503 { 504 struct closure *cl = &ca->prio; 505 struct bio *bio = bch_bbio_alloc(ca->set); 506 507 closure_init_stack(cl); 508 509 bio->bi_sector = bucket * ca->sb.bucket_size; 510 bio->bi_bdev = ca->bdev; 511 bio->bi_rw = REQ_SYNC|REQ_META|rw; 512 bio->bi_size = bucket_bytes(ca); 513 514 bio->bi_end_io = prio_endio; 515 bio->bi_private = ca; 516 bch_bio_map(bio, ca->disk_buckets); 517 518 closure_bio_submit(bio, &ca->prio, ca); 519 closure_sync(cl); 520 } 521 522 #define buckets_free(c) "free %zu, free_inc %zu, unused %zu", \ 523 fifo_used(&c->free), fifo_used(&c->free_inc), fifo_used(&c->unused) 524 525 void bch_prio_write(struct cache *ca) 526 { 527 int i; 528 struct bucket *b; 529 struct closure cl; 530 531 closure_init_stack(&cl); 532 533 lockdep_assert_held(&ca->set->bucket_lock); 534 535 for (b = ca->buckets; 536 b < ca->buckets + ca->sb.nbuckets; b++) 537 b->disk_gen = b->gen; 538 539 ca->disk_buckets->seq++; 540 541 atomic_long_add(ca->sb.bucket_size * prio_buckets(ca), 542 &ca->meta_sectors_written); 543 544 pr_debug("free %zu, free_inc %zu, unused %zu", fifo_used(&ca->free), 545 fifo_used(&ca->free_inc), fifo_used(&ca->unused)); 546 blktrace_msg(ca, "Starting priorities: " buckets_free(ca)); 547 548 for (i = prio_buckets(ca) - 1; i >= 0; --i) { 549 long bucket; 550 struct prio_set *p = ca->disk_buckets; 551 struct bucket_disk *d = p->data; 552 struct bucket_disk *end = d + prios_per_bucket(ca); 553 554 for (b = ca->buckets + i * prios_per_bucket(ca); 555 b < ca->buckets + ca->sb.nbuckets && d < end; 556 b++, d++) { 557 d->prio = cpu_to_le16(b->prio); 558 d->gen = b->gen; 559 } 560 561 p->next_bucket = ca->prio_buckets[i + 1]; 562 p->magic = pset_magic(ca); 563 p->csum = bch_crc64(&p->magic, bucket_bytes(ca) - 8); 564 565 bucket = bch_bucket_alloc(ca, WATERMARK_PRIO, &cl); 566 BUG_ON(bucket == -1); 567 568 mutex_unlock(&ca->set->bucket_lock); 569 prio_io(ca, bucket, REQ_WRITE); 570 mutex_lock(&ca->set->bucket_lock); 571 572 ca->prio_buckets[i] = bucket; 573 atomic_dec_bug(&ca->buckets[bucket].pin); 574 } 575 576 mutex_unlock(&ca->set->bucket_lock); 577 578 bch_journal_meta(ca->set, &cl); 579 closure_sync(&cl); 580 581 mutex_lock(&ca->set->bucket_lock); 582 583 ca->need_save_prio = 0; 584 585 /* 586 * Don't want the old priorities to get garbage collected until after we 587 * finish writing the new ones, and they're journalled 588 */ 589 for (i = 0; i < prio_buckets(ca); i++) 590 ca->prio_last_buckets[i] = ca->prio_buckets[i]; 591 } 592 593 static void prio_read(struct cache *ca, uint64_t bucket) 594 { 595 struct prio_set *p = ca->disk_buckets; 596 struct bucket_disk *d = p->data + prios_per_bucket(ca), *end = d; 597 struct bucket *b; 598 unsigned bucket_nr = 0; 599 600 for (b = ca->buckets; 601 b < ca->buckets + ca->sb.nbuckets; 602 b++, d++) { 603 if (d == end) { 604 ca->prio_buckets[bucket_nr] = bucket; 605 ca->prio_last_buckets[bucket_nr] = bucket; 606 bucket_nr++; 607 608 prio_io(ca, bucket, READ_SYNC); 609 610 if (p->csum != bch_crc64(&p->magic, bucket_bytes(ca) - 8)) 611 pr_warn("bad csum reading priorities"); 612 613 if (p->magic != pset_magic(ca)) 614 pr_warn("bad magic reading priorities"); 615 616 bucket = p->next_bucket; 617 d = p->data; 618 } 619 620 b->prio = le16_to_cpu(d->prio); 621 b->gen = b->disk_gen = b->last_gc = b->gc_gen = d->gen; 622 } 623 } 624 625 /* Bcache device */ 626 627 static int open_dev(struct block_device *b, fmode_t mode) 628 { 629 struct bcache_device *d = b->bd_disk->private_data; 630 if (atomic_read(&d->closing)) 631 return -ENXIO; 632 633 closure_get(&d->cl); 634 return 0; 635 } 636 637 static int release_dev(struct gendisk *b, fmode_t mode) 638 { 639 struct bcache_device *d = b->private_data; 640 closure_put(&d->cl); 641 return 0; 642 } 643 644 static int ioctl_dev(struct block_device *b, fmode_t mode, 645 unsigned int cmd, unsigned long arg) 646 { 647 struct bcache_device *d = b->bd_disk->private_data; 648 return d->ioctl(d, mode, cmd, arg); 649 } 650 651 static const struct block_device_operations bcache_ops = { 652 .open = open_dev, 653 .release = release_dev, 654 .ioctl = ioctl_dev, 655 .owner = THIS_MODULE, 656 }; 657 658 void bcache_device_stop(struct bcache_device *d) 659 { 660 if (!atomic_xchg(&d->closing, 1)) 661 closure_queue(&d->cl); 662 } 663 664 static void bcache_device_unlink(struct bcache_device *d) 665 { 666 unsigned i; 667 struct cache *ca; 668 669 sysfs_remove_link(&d->c->kobj, d->name); 670 sysfs_remove_link(&d->kobj, "cache"); 671 672 for_each_cache(ca, d->c, i) 673 bd_unlink_disk_holder(ca->bdev, d->disk); 674 } 675 676 static void bcache_device_link(struct bcache_device *d, struct cache_set *c, 677 const char *name) 678 { 679 unsigned i; 680 struct cache *ca; 681 682 for_each_cache(ca, d->c, i) 683 bd_link_disk_holder(ca->bdev, d->disk); 684 685 snprintf(d->name, BCACHEDEVNAME_SIZE, 686 "%s%u", name, d->id); 687 688 WARN(sysfs_create_link(&d->kobj, &c->kobj, "cache") || 689 sysfs_create_link(&c->kobj, &d->kobj, d->name), 690 "Couldn't create device <-> cache set symlinks"); 691 } 692 693 static void bcache_device_detach(struct bcache_device *d) 694 { 695 lockdep_assert_held(&bch_register_lock); 696 697 if (atomic_read(&d->detaching)) { 698 struct uuid_entry *u = d->c->uuids + d->id; 699 700 SET_UUID_FLASH_ONLY(u, 0); 701 memcpy(u->uuid, invalid_uuid, 16); 702 u->invalidated = cpu_to_le32(get_seconds()); 703 bch_uuid_write(d->c); 704 705 atomic_set(&d->detaching, 0); 706 } 707 708 bcache_device_unlink(d); 709 710 d->c->devices[d->id] = NULL; 711 closure_put(&d->c->caching); 712 d->c = NULL; 713 } 714 715 static void bcache_device_attach(struct bcache_device *d, struct cache_set *c, 716 unsigned id) 717 { 718 BUG_ON(test_bit(CACHE_SET_STOPPING, &c->flags)); 719 720 d->id = id; 721 d->c = c; 722 c->devices[id] = d; 723 724 closure_get(&c->caching); 725 } 726 727 static void bcache_device_free(struct bcache_device *d) 728 { 729 lockdep_assert_held(&bch_register_lock); 730 731 pr_info("%s stopped", d->disk->disk_name); 732 733 if (d->c) 734 bcache_device_detach(d); 735 736 if (d->disk) 737 del_gendisk(d->disk); 738 if (d->disk && d->disk->queue) 739 blk_cleanup_queue(d->disk->queue); 740 if (d->disk) 741 put_disk(d->disk); 742 743 bio_split_pool_free(&d->bio_split_hook); 744 if (d->unaligned_bvec) 745 mempool_destroy(d->unaligned_bvec); 746 if (d->bio_split) 747 bioset_free(d->bio_split); 748 749 closure_debug_destroy(&d->cl); 750 } 751 752 static int bcache_device_init(struct bcache_device *d, unsigned block_size) 753 { 754 struct request_queue *q; 755 756 if (!(d->bio_split = bioset_create(4, offsetof(struct bbio, bio))) || 757 !(d->unaligned_bvec = mempool_create_kmalloc_pool(1, 758 sizeof(struct bio_vec) * BIO_MAX_PAGES)) || 759 bio_split_pool_init(&d->bio_split_hook)) 760 761 return -ENOMEM; 762 763 d->disk = alloc_disk(1); 764 if (!d->disk) 765 return -ENOMEM; 766 767 snprintf(d->disk->disk_name, DISK_NAME_LEN, "bcache%i", bcache_minor); 768 769 d->disk->major = bcache_major; 770 d->disk->first_minor = bcache_minor++; 771 d->disk->fops = &bcache_ops; 772 d->disk->private_data = d; 773 774 q = blk_alloc_queue(GFP_KERNEL); 775 if (!q) 776 return -ENOMEM; 777 778 blk_queue_make_request(q, NULL); 779 d->disk->queue = q; 780 q->queuedata = d; 781 q->backing_dev_info.congested_data = d; 782 q->limits.max_hw_sectors = UINT_MAX; 783 q->limits.max_sectors = UINT_MAX; 784 q->limits.max_segment_size = UINT_MAX; 785 q->limits.max_segments = BIO_MAX_PAGES; 786 q->limits.max_discard_sectors = UINT_MAX; 787 q->limits.io_min = block_size; 788 q->limits.logical_block_size = block_size; 789 q->limits.physical_block_size = block_size; 790 set_bit(QUEUE_FLAG_NONROT, &d->disk->queue->queue_flags); 791 set_bit(QUEUE_FLAG_DISCARD, &d->disk->queue->queue_flags); 792 793 return 0; 794 } 795 796 /* Cached device */ 797 798 static void calc_cached_dev_sectors(struct cache_set *c) 799 { 800 uint64_t sectors = 0; 801 struct cached_dev *dc; 802 803 list_for_each_entry(dc, &c->cached_devs, list) 804 sectors += bdev_sectors(dc->bdev); 805 806 c->cached_dev_sectors = sectors; 807 } 808 809 void bch_cached_dev_run(struct cached_dev *dc) 810 { 811 struct bcache_device *d = &dc->disk; 812 813 if (atomic_xchg(&dc->running, 1)) 814 return; 815 816 if (!d->c && 817 BDEV_STATE(&dc->sb) != BDEV_STATE_NONE) { 818 struct closure cl; 819 closure_init_stack(&cl); 820 821 SET_BDEV_STATE(&dc->sb, BDEV_STATE_STALE); 822 bch_write_bdev_super(dc, &cl); 823 closure_sync(&cl); 824 } 825 826 add_disk(d->disk); 827 bd_link_disk_holder(dc->bdev, dc->disk.disk); 828 #if 0 829 char *env[] = { "SYMLINK=label" , NULL }; 830 kobject_uevent_env(&disk_to_dev(d->disk)->kobj, KOBJ_CHANGE, env); 831 #endif 832 if (sysfs_create_link(&d->kobj, &disk_to_dev(d->disk)->kobj, "dev") || 833 sysfs_create_link(&disk_to_dev(d->disk)->kobj, &d->kobj, "bcache")) 834 pr_debug("error creating sysfs link"); 835 } 836 837 static void cached_dev_detach_finish(struct work_struct *w) 838 { 839 struct cached_dev *dc = container_of(w, struct cached_dev, detach); 840 char buf[BDEVNAME_SIZE]; 841 struct closure cl; 842 closure_init_stack(&cl); 843 844 BUG_ON(!atomic_read(&dc->disk.detaching)); 845 BUG_ON(atomic_read(&dc->count)); 846 847 mutex_lock(&bch_register_lock); 848 849 memset(&dc->sb.set_uuid, 0, 16); 850 SET_BDEV_STATE(&dc->sb, BDEV_STATE_NONE); 851 852 bch_write_bdev_super(dc, &cl); 853 closure_sync(&cl); 854 855 bcache_device_detach(&dc->disk); 856 list_move(&dc->list, &uncached_devices); 857 858 mutex_unlock(&bch_register_lock); 859 860 pr_info("Caching disabled for %s", bdevname(dc->bdev, buf)); 861 862 /* Drop ref we took in cached_dev_detach() */ 863 closure_put(&dc->disk.cl); 864 } 865 866 void bch_cached_dev_detach(struct cached_dev *dc) 867 { 868 lockdep_assert_held(&bch_register_lock); 869 870 if (atomic_read(&dc->disk.closing)) 871 return; 872 873 if (atomic_xchg(&dc->disk.detaching, 1)) 874 return; 875 876 /* 877 * Block the device from being closed and freed until we're finished 878 * detaching 879 */ 880 closure_get(&dc->disk.cl); 881 882 bch_writeback_queue(dc); 883 cached_dev_put(dc); 884 } 885 886 int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c) 887 { 888 uint32_t rtime = cpu_to_le32(get_seconds()); 889 struct uuid_entry *u; 890 char buf[BDEVNAME_SIZE]; 891 892 bdevname(dc->bdev, buf); 893 894 if (memcmp(dc->sb.set_uuid, c->sb.set_uuid, 16)) 895 return -ENOENT; 896 897 if (dc->disk.c) { 898 pr_err("Can't attach %s: already attached", buf); 899 return -EINVAL; 900 } 901 902 if (test_bit(CACHE_SET_STOPPING, &c->flags)) { 903 pr_err("Can't attach %s: shutting down", buf); 904 return -EINVAL; 905 } 906 907 if (dc->sb.block_size < c->sb.block_size) { 908 /* Will die */ 909 pr_err("Couldn't attach %s: block size less than set's block size", 910 buf); 911 return -EINVAL; 912 } 913 914 u = uuid_find(c, dc->sb.uuid); 915 916 if (u && 917 (BDEV_STATE(&dc->sb) == BDEV_STATE_STALE || 918 BDEV_STATE(&dc->sb) == BDEV_STATE_NONE)) { 919 memcpy(u->uuid, invalid_uuid, 16); 920 u->invalidated = cpu_to_le32(get_seconds()); 921 u = NULL; 922 } 923 924 if (!u) { 925 if (BDEV_STATE(&dc->sb) == BDEV_STATE_DIRTY) { 926 pr_err("Couldn't find uuid for %s in set", buf); 927 return -ENOENT; 928 } 929 930 u = uuid_find_empty(c); 931 if (!u) { 932 pr_err("Not caching %s, no room for UUID", buf); 933 return -EINVAL; 934 } 935 } 936 937 /* Deadlocks since we're called via sysfs... 938 sysfs_remove_file(&dc->kobj, &sysfs_attach); 939 */ 940 941 if (bch_is_zero(u->uuid, 16)) { 942 struct closure cl; 943 closure_init_stack(&cl); 944 945 memcpy(u->uuid, dc->sb.uuid, 16); 946 memcpy(u->label, dc->sb.label, SB_LABEL_SIZE); 947 u->first_reg = u->last_reg = rtime; 948 bch_uuid_write(c); 949 950 memcpy(dc->sb.set_uuid, c->sb.set_uuid, 16); 951 SET_BDEV_STATE(&dc->sb, BDEV_STATE_CLEAN); 952 953 bch_write_bdev_super(dc, &cl); 954 closure_sync(&cl); 955 } else { 956 u->last_reg = rtime; 957 bch_uuid_write(c); 958 } 959 960 bcache_device_attach(&dc->disk, c, u - c->uuids); 961 list_move(&dc->list, &c->cached_devs); 962 calc_cached_dev_sectors(c); 963 964 smp_wmb(); 965 /* 966 * dc->c must be set before dc->count != 0 - paired with the mb in 967 * cached_dev_get() 968 */ 969 atomic_set(&dc->count, 1); 970 971 if (BDEV_STATE(&dc->sb) == BDEV_STATE_DIRTY) { 972 atomic_set(&dc->has_dirty, 1); 973 atomic_inc(&dc->count); 974 bch_writeback_queue(dc); 975 } 976 977 bch_cached_dev_run(dc); 978 bcache_device_link(&dc->disk, c, "bdev"); 979 980 pr_info("Caching %s as %s on set %pU", 981 bdevname(dc->bdev, buf), dc->disk.disk->disk_name, 982 dc->disk.c->sb.set_uuid); 983 return 0; 984 } 985 986 void bch_cached_dev_release(struct kobject *kobj) 987 { 988 struct cached_dev *dc = container_of(kobj, struct cached_dev, 989 disk.kobj); 990 kfree(dc); 991 module_put(THIS_MODULE); 992 } 993 994 static void cached_dev_free(struct closure *cl) 995 { 996 struct cached_dev *dc = container_of(cl, struct cached_dev, disk.cl); 997 998 cancel_delayed_work_sync(&dc->writeback_rate_update); 999 1000 mutex_lock(&bch_register_lock); 1001 1002 bd_unlink_disk_holder(dc->bdev, dc->disk.disk); 1003 bcache_device_free(&dc->disk); 1004 list_del(&dc->list); 1005 1006 mutex_unlock(&bch_register_lock); 1007 1008 if (!IS_ERR_OR_NULL(dc->bdev)) { 1009 blk_sync_queue(bdev_get_queue(dc->bdev)); 1010 blkdev_put(dc->bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); 1011 } 1012 1013 wake_up(&unregister_wait); 1014 1015 kobject_put(&dc->disk.kobj); 1016 } 1017 1018 static void cached_dev_flush(struct closure *cl) 1019 { 1020 struct cached_dev *dc = container_of(cl, struct cached_dev, disk.cl); 1021 struct bcache_device *d = &dc->disk; 1022 1023 bch_cache_accounting_destroy(&dc->accounting); 1024 kobject_del(&d->kobj); 1025 1026 continue_at(cl, cached_dev_free, system_wq); 1027 } 1028 1029 static int cached_dev_init(struct cached_dev *dc, unsigned block_size) 1030 { 1031 int err; 1032 struct io *io; 1033 1034 closure_init(&dc->disk.cl, NULL); 1035 set_closure_fn(&dc->disk.cl, cached_dev_flush, system_wq); 1036 1037 __module_get(THIS_MODULE); 1038 INIT_LIST_HEAD(&dc->list); 1039 kobject_init(&dc->disk.kobj, &bch_cached_dev_ktype); 1040 1041 bch_cache_accounting_init(&dc->accounting, &dc->disk.cl); 1042 1043 err = bcache_device_init(&dc->disk, block_size); 1044 if (err) 1045 goto err; 1046 1047 spin_lock_init(&dc->io_lock); 1048 closure_init_unlocked(&dc->sb_write); 1049 INIT_WORK(&dc->detach, cached_dev_detach_finish); 1050 1051 dc->sequential_merge = true; 1052 dc->sequential_cutoff = 4 << 20; 1053 1054 INIT_LIST_HEAD(&dc->io_lru); 1055 dc->sb_bio.bi_max_vecs = 1; 1056 dc->sb_bio.bi_io_vec = dc->sb_bio.bi_inline_vecs; 1057 1058 for (io = dc->io; io < dc->io + RECENT_IO; io++) { 1059 list_add(&io->lru, &dc->io_lru); 1060 hlist_add_head(&io->hash, dc->io_hash + RECENT_IO); 1061 } 1062 1063 bch_writeback_init_cached_dev(dc); 1064 return 0; 1065 err: 1066 bcache_device_stop(&dc->disk); 1067 return err; 1068 } 1069 1070 /* Cached device - bcache superblock */ 1071 1072 static const char *register_bdev(struct cache_sb *sb, struct page *sb_page, 1073 struct block_device *bdev, 1074 struct cached_dev *dc) 1075 { 1076 char name[BDEVNAME_SIZE]; 1077 const char *err = "cannot allocate memory"; 1078 struct gendisk *g; 1079 struct cache_set *c; 1080 1081 if (!dc || cached_dev_init(dc, sb->block_size << 9) != 0) 1082 return err; 1083 1084 memcpy(&dc->sb, sb, sizeof(struct cache_sb)); 1085 dc->sb_bio.bi_io_vec[0].bv_page = sb_page; 1086 dc->bdev = bdev; 1087 dc->bdev->bd_holder = dc; 1088 1089 g = dc->disk.disk; 1090 1091 set_capacity(g, dc->bdev->bd_part->nr_sects - dc->sb.data_offset); 1092 1093 g->queue->backing_dev_info.ra_pages = 1094 max(g->queue->backing_dev_info.ra_pages, 1095 bdev->bd_queue->backing_dev_info.ra_pages); 1096 1097 bch_cached_dev_request_init(dc); 1098 1099 err = "error creating kobject"; 1100 if (kobject_add(&dc->disk.kobj, &part_to_dev(bdev->bd_part)->kobj, 1101 "bcache")) 1102 goto err; 1103 if (bch_cache_accounting_add_kobjs(&dc->accounting, &dc->disk.kobj)) 1104 goto err; 1105 1106 list_add(&dc->list, &uncached_devices); 1107 list_for_each_entry(c, &bch_cache_sets, list) 1108 bch_cached_dev_attach(dc, c); 1109 1110 if (BDEV_STATE(&dc->sb) == BDEV_STATE_NONE || 1111 BDEV_STATE(&dc->sb) == BDEV_STATE_STALE) 1112 bch_cached_dev_run(dc); 1113 1114 return NULL; 1115 err: 1116 kobject_put(&dc->disk.kobj); 1117 pr_notice("error opening %s: %s", bdevname(bdev, name), err); 1118 /* 1119 * Return NULL instead of an error because kobject_put() cleans 1120 * everything up 1121 */ 1122 return NULL; 1123 } 1124 1125 /* Flash only volumes */ 1126 1127 void bch_flash_dev_release(struct kobject *kobj) 1128 { 1129 struct bcache_device *d = container_of(kobj, struct bcache_device, 1130 kobj); 1131 kfree(d); 1132 } 1133 1134 static void flash_dev_free(struct closure *cl) 1135 { 1136 struct bcache_device *d = container_of(cl, struct bcache_device, cl); 1137 bcache_device_free(d); 1138 kobject_put(&d->kobj); 1139 } 1140 1141 static void flash_dev_flush(struct closure *cl) 1142 { 1143 struct bcache_device *d = container_of(cl, struct bcache_device, cl); 1144 1145 bcache_device_unlink(d); 1146 kobject_del(&d->kobj); 1147 continue_at(cl, flash_dev_free, system_wq); 1148 } 1149 1150 static int flash_dev_run(struct cache_set *c, struct uuid_entry *u) 1151 { 1152 struct bcache_device *d = kzalloc(sizeof(struct bcache_device), 1153 GFP_KERNEL); 1154 if (!d) 1155 return -ENOMEM; 1156 1157 closure_init(&d->cl, NULL); 1158 set_closure_fn(&d->cl, flash_dev_flush, system_wq); 1159 1160 kobject_init(&d->kobj, &bch_flash_dev_ktype); 1161 1162 if (bcache_device_init(d, block_bytes(c))) 1163 goto err; 1164 1165 bcache_device_attach(d, c, u - c->uuids); 1166 set_capacity(d->disk, u->sectors); 1167 bch_flash_dev_request_init(d); 1168 add_disk(d->disk); 1169 1170 if (kobject_add(&d->kobj, &disk_to_dev(d->disk)->kobj, "bcache")) 1171 goto err; 1172 1173 bcache_device_link(d, c, "volume"); 1174 1175 return 0; 1176 err: 1177 kobject_put(&d->kobj); 1178 return -ENOMEM; 1179 } 1180 1181 static int flash_devs_run(struct cache_set *c) 1182 { 1183 int ret = 0; 1184 struct uuid_entry *u; 1185 1186 for (u = c->uuids; 1187 u < c->uuids + c->nr_uuids && !ret; 1188 u++) 1189 if (UUID_FLASH_ONLY(u)) 1190 ret = flash_dev_run(c, u); 1191 1192 return ret; 1193 } 1194 1195 int bch_flash_dev_create(struct cache_set *c, uint64_t size) 1196 { 1197 struct uuid_entry *u; 1198 1199 if (test_bit(CACHE_SET_STOPPING, &c->flags)) 1200 return -EINTR; 1201 1202 u = uuid_find_empty(c); 1203 if (!u) { 1204 pr_err("Can't create volume, no room for UUID"); 1205 return -EINVAL; 1206 } 1207 1208 get_random_bytes(u->uuid, 16); 1209 memset(u->label, 0, 32); 1210 u->first_reg = u->last_reg = cpu_to_le32(get_seconds()); 1211 1212 SET_UUID_FLASH_ONLY(u, 1); 1213 u->sectors = size >> 9; 1214 1215 bch_uuid_write(c); 1216 1217 return flash_dev_run(c, u); 1218 } 1219 1220 /* Cache set */ 1221 1222 __printf(2, 3) 1223 bool bch_cache_set_error(struct cache_set *c, const char *fmt, ...) 1224 { 1225 va_list args; 1226 1227 if (test_bit(CACHE_SET_STOPPING, &c->flags)) 1228 return false; 1229 1230 /* XXX: we can be called from atomic context 1231 acquire_console_sem(); 1232 */ 1233 1234 printk(KERN_ERR "bcache: error on %pU: ", c->sb.set_uuid); 1235 1236 va_start(args, fmt); 1237 vprintk(fmt, args); 1238 va_end(args); 1239 1240 printk(", disabling caching\n"); 1241 1242 bch_cache_set_unregister(c); 1243 return true; 1244 } 1245 1246 void bch_cache_set_release(struct kobject *kobj) 1247 { 1248 struct cache_set *c = container_of(kobj, struct cache_set, kobj); 1249 kfree(c); 1250 module_put(THIS_MODULE); 1251 } 1252 1253 static void cache_set_free(struct closure *cl) 1254 { 1255 struct cache_set *c = container_of(cl, struct cache_set, cl); 1256 struct cache *ca; 1257 unsigned i; 1258 1259 if (!IS_ERR_OR_NULL(c->debug)) 1260 debugfs_remove(c->debug); 1261 1262 bch_open_buckets_free(c); 1263 bch_btree_cache_free(c); 1264 bch_journal_free(c); 1265 1266 for_each_cache(ca, c, i) 1267 if (ca) 1268 kobject_put(&ca->kobj); 1269 1270 free_pages((unsigned long) c->uuids, ilog2(bucket_pages(c))); 1271 free_pages((unsigned long) c->sort, ilog2(bucket_pages(c))); 1272 1273 kfree(c->fill_iter); 1274 if (c->bio_split) 1275 bioset_free(c->bio_split); 1276 if (c->bio_meta) 1277 mempool_destroy(c->bio_meta); 1278 if (c->search) 1279 mempool_destroy(c->search); 1280 kfree(c->devices); 1281 1282 mutex_lock(&bch_register_lock); 1283 list_del(&c->list); 1284 mutex_unlock(&bch_register_lock); 1285 1286 pr_info("Cache set %pU unregistered", c->sb.set_uuid); 1287 wake_up(&unregister_wait); 1288 1289 closure_debug_destroy(&c->cl); 1290 kobject_put(&c->kobj); 1291 } 1292 1293 static void cache_set_flush(struct closure *cl) 1294 { 1295 struct cache_set *c = container_of(cl, struct cache_set, caching); 1296 struct btree *b; 1297 1298 /* Shut down allocator threads */ 1299 set_bit(CACHE_SET_STOPPING_2, &c->flags); 1300 wake_up(&c->alloc_wait); 1301 1302 bch_cache_accounting_destroy(&c->accounting); 1303 1304 kobject_put(&c->internal); 1305 kobject_del(&c->kobj); 1306 1307 if (!IS_ERR_OR_NULL(c->root)) 1308 list_add(&c->root->list, &c->btree_cache); 1309 1310 /* Should skip this if we're unregistering because of an error */ 1311 list_for_each_entry(b, &c->btree_cache, list) 1312 if (btree_node_dirty(b)) 1313 bch_btree_write(b, true, NULL); 1314 1315 closure_return(cl); 1316 } 1317 1318 static void __cache_set_unregister(struct closure *cl) 1319 { 1320 struct cache_set *c = container_of(cl, struct cache_set, caching); 1321 struct cached_dev *dc, *t; 1322 size_t i; 1323 1324 mutex_lock(&bch_register_lock); 1325 1326 if (test_bit(CACHE_SET_UNREGISTERING, &c->flags)) 1327 list_for_each_entry_safe(dc, t, &c->cached_devs, list) 1328 bch_cached_dev_detach(dc); 1329 1330 for (i = 0; i < c->nr_uuids; i++) 1331 if (c->devices[i] && UUID_FLASH_ONLY(&c->uuids[i])) 1332 bcache_device_stop(c->devices[i]); 1333 1334 mutex_unlock(&bch_register_lock); 1335 1336 continue_at(cl, cache_set_flush, system_wq); 1337 } 1338 1339 void bch_cache_set_stop(struct cache_set *c) 1340 { 1341 if (!test_and_set_bit(CACHE_SET_STOPPING, &c->flags)) 1342 closure_queue(&c->caching); 1343 } 1344 1345 void bch_cache_set_unregister(struct cache_set *c) 1346 { 1347 set_bit(CACHE_SET_UNREGISTERING, &c->flags); 1348 bch_cache_set_stop(c); 1349 } 1350 1351 #define alloc_bucket_pages(gfp, c) \ 1352 ((void *) __get_free_pages(__GFP_ZERO|gfp, ilog2(bucket_pages(c)))) 1353 1354 struct cache_set *bch_cache_set_alloc(struct cache_sb *sb) 1355 { 1356 int iter_size; 1357 struct cache_set *c = kzalloc(sizeof(struct cache_set), GFP_KERNEL); 1358 if (!c) 1359 return NULL; 1360 1361 __module_get(THIS_MODULE); 1362 closure_init(&c->cl, NULL); 1363 set_closure_fn(&c->cl, cache_set_free, system_wq); 1364 1365 closure_init(&c->caching, &c->cl); 1366 set_closure_fn(&c->caching, __cache_set_unregister, system_wq); 1367 1368 /* Maybe create continue_at_noreturn() and use it here? */ 1369 closure_set_stopped(&c->cl); 1370 closure_put(&c->cl); 1371 1372 kobject_init(&c->kobj, &bch_cache_set_ktype); 1373 kobject_init(&c->internal, &bch_cache_set_internal_ktype); 1374 1375 bch_cache_accounting_init(&c->accounting, &c->cl); 1376 1377 memcpy(c->sb.set_uuid, sb->set_uuid, 16); 1378 c->sb.block_size = sb->block_size; 1379 c->sb.bucket_size = sb->bucket_size; 1380 c->sb.nr_in_set = sb->nr_in_set; 1381 c->sb.last_mount = sb->last_mount; 1382 c->bucket_bits = ilog2(sb->bucket_size); 1383 c->block_bits = ilog2(sb->block_size); 1384 c->nr_uuids = bucket_bytes(c) / sizeof(struct uuid_entry); 1385 1386 c->btree_pages = c->sb.bucket_size / PAGE_SECTORS; 1387 if (c->btree_pages > BTREE_MAX_PAGES) 1388 c->btree_pages = max_t(int, c->btree_pages / 4, 1389 BTREE_MAX_PAGES); 1390 1391 init_waitqueue_head(&c->alloc_wait); 1392 mutex_init(&c->bucket_lock); 1393 mutex_init(&c->fill_lock); 1394 mutex_init(&c->sort_lock); 1395 spin_lock_init(&c->sort_time_lock); 1396 closure_init_unlocked(&c->sb_write); 1397 closure_init_unlocked(&c->uuid_write); 1398 spin_lock_init(&c->btree_read_time_lock); 1399 bch_moving_init_cache_set(c); 1400 1401 INIT_LIST_HEAD(&c->list); 1402 INIT_LIST_HEAD(&c->cached_devs); 1403 INIT_LIST_HEAD(&c->btree_cache); 1404 INIT_LIST_HEAD(&c->btree_cache_freeable); 1405 INIT_LIST_HEAD(&c->btree_cache_freed); 1406 INIT_LIST_HEAD(&c->data_buckets); 1407 1408 c->search = mempool_create_slab_pool(32, bch_search_cache); 1409 if (!c->search) 1410 goto err; 1411 1412 iter_size = (sb->bucket_size / sb->block_size + 1) * 1413 sizeof(struct btree_iter_set); 1414 1415 if (!(c->devices = kzalloc(c->nr_uuids * sizeof(void *), GFP_KERNEL)) || 1416 !(c->bio_meta = mempool_create_kmalloc_pool(2, 1417 sizeof(struct bbio) + sizeof(struct bio_vec) * 1418 bucket_pages(c))) || 1419 !(c->bio_split = bioset_create(4, offsetof(struct bbio, bio))) || 1420 !(c->fill_iter = kmalloc(iter_size, GFP_KERNEL)) || 1421 !(c->sort = alloc_bucket_pages(GFP_KERNEL, c)) || 1422 !(c->uuids = alloc_bucket_pages(GFP_KERNEL, c)) || 1423 bch_journal_alloc(c) || 1424 bch_btree_cache_alloc(c) || 1425 bch_open_buckets_alloc(c)) 1426 goto err; 1427 1428 c->fill_iter->size = sb->bucket_size / sb->block_size; 1429 1430 c->congested_read_threshold_us = 2000; 1431 c->congested_write_threshold_us = 20000; 1432 c->error_limit = 8 << IO_ERROR_SHIFT; 1433 1434 return c; 1435 err: 1436 bch_cache_set_unregister(c); 1437 return NULL; 1438 } 1439 1440 static void run_cache_set(struct cache_set *c) 1441 { 1442 const char *err = "cannot allocate memory"; 1443 struct cached_dev *dc, *t; 1444 struct cache *ca; 1445 unsigned i; 1446 1447 struct btree_op op; 1448 bch_btree_op_init_stack(&op); 1449 op.lock = SHRT_MAX; 1450 1451 for_each_cache(ca, c, i) 1452 c->nbuckets += ca->sb.nbuckets; 1453 1454 if (CACHE_SYNC(&c->sb)) { 1455 LIST_HEAD(journal); 1456 struct bkey *k; 1457 struct jset *j; 1458 1459 err = "cannot allocate memory for journal"; 1460 if (bch_journal_read(c, &journal, &op)) 1461 goto err; 1462 1463 pr_debug("btree_journal_read() done"); 1464 1465 err = "no journal entries found"; 1466 if (list_empty(&journal)) 1467 goto err; 1468 1469 j = &list_entry(journal.prev, struct journal_replay, list)->j; 1470 1471 err = "IO error reading priorities"; 1472 for_each_cache(ca, c, i) 1473 prio_read(ca, j->prio_bucket[ca->sb.nr_this_dev]); 1474 1475 /* 1476 * If prio_read() fails it'll call cache_set_error and we'll 1477 * tear everything down right away, but if we perhaps checked 1478 * sooner we could avoid journal replay. 1479 */ 1480 1481 k = &j->btree_root; 1482 1483 err = "bad btree root"; 1484 if (__bch_ptr_invalid(c, j->btree_level + 1, k)) 1485 goto err; 1486 1487 err = "error reading btree root"; 1488 c->root = bch_btree_node_get(c, k, j->btree_level, &op); 1489 if (IS_ERR_OR_NULL(c->root)) 1490 goto err; 1491 1492 list_del_init(&c->root->list); 1493 rw_unlock(true, c->root); 1494 1495 err = uuid_read(c, j, &op.cl); 1496 if (err) 1497 goto err; 1498 1499 err = "error in recovery"; 1500 if (bch_btree_check(c, &op)) 1501 goto err; 1502 1503 bch_journal_mark(c, &journal); 1504 bch_btree_gc_finish(c); 1505 pr_debug("btree_check() done"); 1506 1507 /* 1508 * bcache_journal_next() can't happen sooner, or 1509 * btree_gc_finish() will give spurious errors about last_gc > 1510 * gc_gen - this is a hack but oh well. 1511 */ 1512 bch_journal_next(&c->journal); 1513 1514 for_each_cache(ca, c, i) 1515 closure_call(&ca->alloc, bch_allocator_thread, 1516 system_wq, &c->cl); 1517 1518 /* 1519 * First place it's safe to allocate: btree_check() and 1520 * btree_gc_finish() have to run before we have buckets to 1521 * allocate, and bch_bucket_alloc_set() might cause a journal 1522 * entry to be written so bcache_journal_next() has to be called 1523 * first. 1524 * 1525 * If the uuids were in the old format we have to rewrite them 1526 * before the next journal entry is written: 1527 */ 1528 if (j->version < BCACHE_JSET_VERSION_UUID) 1529 __uuid_write(c); 1530 1531 bch_journal_replay(c, &journal, &op); 1532 } else { 1533 pr_notice("invalidating existing data"); 1534 /* Don't want invalidate_buckets() to queue a gc yet */ 1535 closure_lock(&c->gc, NULL); 1536 1537 for_each_cache(ca, c, i) { 1538 unsigned j; 1539 1540 ca->sb.keys = clamp_t(int, ca->sb.nbuckets >> 7, 1541 2, SB_JOURNAL_BUCKETS); 1542 1543 for (j = 0; j < ca->sb.keys; j++) 1544 ca->sb.d[j] = ca->sb.first_bucket + j; 1545 } 1546 1547 bch_btree_gc_finish(c); 1548 1549 for_each_cache(ca, c, i) 1550 closure_call(&ca->alloc, bch_allocator_thread, 1551 ca->alloc_workqueue, &c->cl); 1552 1553 mutex_lock(&c->bucket_lock); 1554 for_each_cache(ca, c, i) 1555 bch_prio_write(ca); 1556 mutex_unlock(&c->bucket_lock); 1557 1558 wake_up(&c->alloc_wait); 1559 1560 err = "cannot allocate new UUID bucket"; 1561 if (__uuid_write(c)) 1562 goto err_unlock_gc; 1563 1564 err = "cannot allocate new btree root"; 1565 c->root = bch_btree_node_alloc(c, 0, &op.cl); 1566 if (IS_ERR_OR_NULL(c->root)) 1567 goto err_unlock_gc; 1568 1569 bkey_copy_key(&c->root->key, &MAX_KEY); 1570 bch_btree_write(c->root, true, &op); 1571 1572 bch_btree_set_root(c->root); 1573 rw_unlock(true, c->root); 1574 1575 /* 1576 * We don't want to write the first journal entry until 1577 * everything is set up - fortunately journal entries won't be 1578 * written until the SET_CACHE_SYNC() here: 1579 */ 1580 SET_CACHE_SYNC(&c->sb, true); 1581 1582 bch_journal_next(&c->journal); 1583 bch_journal_meta(c, &op.cl); 1584 1585 /* Unlock */ 1586 closure_set_stopped(&c->gc.cl); 1587 closure_put(&c->gc.cl); 1588 } 1589 1590 closure_sync(&op.cl); 1591 c->sb.last_mount = get_seconds(); 1592 bcache_write_super(c); 1593 1594 list_for_each_entry_safe(dc, t, &uncached_devices, list) 1595 bch_cached_dev_attach(dc, c); 1596 1597 flash_devs_run(c); 1598 1599 return; 1600 err_unlock_gc: 1601 closure_set_stopped(&c->gc.cl); 1602 closure_put(&c->gc.cl); 1603 err: 1604 closure_sync(&op.cl); 1605 /* XXX: test this, it's broken */ 1606 bch_cache_set_error(c, err); 1607 } 1608 1609 static bool can_attach_cache(struct cache *ca, struct cache_set *c) 1610 { 1611 return ca->sb.block_size == c->sb.block_size && 1612 ca->sb.bucket_size == c->sb.block_size && 1613 ca->sb.nr_in_set == c->sb.nr_in_set; 1614 } 1615 1616 static const char *register_cache_set(struct cache *ca) 1617 { 1618 char buf[12]; 1619 const char *err = "cannot allocate memory"; 1620 struct cache_set *c; 1621 1622 list_for_each_entry(c, &bch_cache_sets, list) 1623 if (!memcmp(c->sb.set_uuid, ca->sb.set_uuid, 16)) { 1624 if (c->cache[ca->sb.nr_this_dev]) 1625 return "duplicate cache set member"; 1626 1627 if (!can_attach_cache(ca, c)) 1628 return "cache sb does not match set"; 1629 1630 if (!CACHE_SYNC(&ca->sb)) 1631 SET_CACHE_SYNC(&c->sb, false); 1632 1633 goto found; 1634 } 1635 1636 c = bch_cache_set_alloc(&ca->sb); 1637 if (!c) 1638 return err; 1639 1640 err = "error creating kobject"; 1641 if (kobject_add(&c->kobj, bcache_kobj, "%pU", c->sb.set_uuid) || 1642 kobject_add(&c->internal, &c->kobj, "internal")) 1643 goto err; 1644 1645 if (bch_cache_accounting_add_kobjs(&c->accounting, &c->kobj)) 1646 goto err; 1647 1648 bch_debug_init_cache_set(c); 1649 1650 list_add(&c->list, &bch_cache_sets); 1651 found: 1652 sprintf(buf, "cache%i", ca->sb.nr_this_dev); 1653 if (sysfs_create_link(&ca->kobj, &c->kobj, "set") || 1654 sysfs_create_link(&c->kobj, &ca->kobj, buf)) 1655 goto err; 1656 1657 if (ca->sb.seq > c->sb.seq) { 1658 c->sb.version = ca->sb.version; 1659 memcpy(c->sb.set_uuid, ca->sb.set_uuid, 16); 1660 c->sb.flags = ca->sb.flags; 1661 c->sb.seq = ca->sb.seq; 1662 pr_debug("set version = %llu", c->sb.version); 1663 } 1664 1665 ca->set = c; 1666 ca->set->cache[ca->sb.nr_this_dev] = ca; 1667 c->cache_by_alloc[c->caches_loaded++] = ca; 1668 1669 if (c->caches_loaded == c->sb.nr_in_set) 1670 run_cache_set(c); 1671 1672 return NULL; 1673 err: 1674 bch_cache_set_unregister(c); 1675 return err; 1676 } 1677 1678 /* Cache device */ 1679 1680 void bch_cache_release(struct kobject *kobj) 1681 { 1682 struct cache *ca = container_of(kobj, struct cache, kobj); 1683 1684 if (ca->set) 1685 ca->set->cache[ca->sb.nr_this_dev] = NULL; 1686 1687 bch_cache_allocator_exit(ca); 1688 1689 bio_split_pool_free(&ca->bio_split_hook); 1690 1691 if (ca->alloc_workqueue) 1692 destroy_workqueue(ca->alloc_workqueue); 1693 1694 free_pages((unsigned long) ca->disk_buckets, ilog2(bucket_pages(ca))); 1695 kfree(ca->prio_buckets); 1696 vfree(ca->buckets); 1697 1698 free_heap(&ca->heap); 1699 free_fifo(&ca->unused); 1700 free_fifo(&ca->free_inc); 1701 free_fifo(&ca->free); 1702 1703 if (ca->sb_bio.bi_inline_vecs[0].bv_page) 1704 put_page(ca->sb_bio.bi_io_vec[0].bv_page); 1705 1706 if (!IS_ERR_OR_NULL(ca->bdev)) { 1707 blk_sync_queue(bdev_get_queue(ca->bdev)); 1708 blkdev_put(ca->bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); 1709 } 1710 1711 kfree(ca); 1712 module_put(THIS_MODULE); 1713 } 1714 1715 static int cache_alloc(struct cache_sb *sb, struct cache *ca) 1716 { 1717 size_t free; 1718 struct bucket *b; 1719 1720 if (!ca) 1721 return -ENOMEM; 1722 1723 __module_get(THIS_MODULE); 1724 kobject_init(&ca->kobj, &bch_cache_ktype); 1725 1726 memcpy(&ca->sb, sb, sizeof(struct cache_sb)); 1727 1728 INIT_LIST_HEAD(&ca->discards); 1729 1730 bio_init(&ca->sb_bio); 1731 ca->sb_bio.bi_max_vecs = 1; 1732 ca->sb_bio.bi_io_vec = ca->sb_bio.bi_inline_vecs; 1733 1734 bio_init(&ca->journal.bio); 1735 ca->journal.bio.bi_max_vecs = 8; 1736 ca->journal.bio.bi_io_vec = ca->journal.bio.bi_inline_vecs; 1737 1738 free = roundup_pow_of_two(ca->sb.nbuckets) >> 9; 1739 free = max_t(size_t, free, (prio_buckets(ca) + 8) * 2); 1740 1741 if (!init_fifo(&ca->free, free, GFP_KERNEL) || 1742 !init_fifo(&ca->free_inc, free << 2, GFP_KERNEL) || 1743 !init_fifo(&ca->unused, free << 2, GFP_KERNEL) || 1744 !init_heap(&ca->heap, free << 3, GFP_KERNEL) || 1745 !(ca->buckets = vmalloc(sizeof(struct bucket) * 1746 ca->sb.nbuckets)) || 1747 !(ca->prio_buckets = kzalloc(sizeof(uint64_t) * prio_buckets(ca) * 1748 2, GFP_KERNEL)) || 1749 !(ca->disk_buckets = alloc_bucket_pages(GFP_KERNEL, ca)) || 1750 !(ca->alloc_workqueue = alloc_workqueue("bch_allocator", 0, 1)) || 1751 bio_split_pool_init(&ca->bio_split_hook)) 1752 goto err; 1753 1754 ca->prio_last_buckets = ca->prio_buckets + prio_buckets(ca); 1755 1756 memset(ca->buckets, 0, ca->sb.nbuckets * sizeof(struct bucket)); 1757 for_each_bucket(b, ca) 1758 atomic_set(&b->pin, 0); 1759 1760 if (bch_cache_allocator_init(ca)) 1761 goto err; 1762 1763 return 0; 1764 err: 1765 kobject_put(&ca->kobj); 1766 return -ENOMEM; 1767 } 1768 1769 static const char *register_cache(struct cache_sb *sb, struct page *sb_page, 1770 struct block_device *bdev, struct cache *ca) 1771 { 1772 char name[BDEVNAME_SIZE]; 1773 const char *err = "cannot allocate memory"; 1774 1775 if (cache_alloc(sb, ca) != 0) 1776 return err; 1777 1778 ca->sb_bio.bi_io_vec[0].bv_page = sb_page; 1779 ca->bdev = bdev; 1780 ca->bdev->bd_holder = ca; 1781 1782 if (blk_queue_discard(bdev_get_queue(ca->bdev))) 1783 ca->discard = CACHE_DISCARD(&ca->sb); 1784 1785 err = "error creating kobject"; 1786 if (kobject_add(&ca->kobj, &part_to_dev(bdev->bd_part)->kobj, "bcache")) 1787 goto err; 1788 1789 err = register_cache_set(ca); 1790 if (err) 1791 goto err; 1792 1793 pr_info("registered cache device %s", bdevname(bdev, name)); 1794 1795 return NULL; 1796 err: 1797 kobject_put(&ca->kobj); 1798 pr_info("error opening %s: %s", bdevname(bdev, name), err); 1799 /* Return NULL instead of an error because kobject_put() cleans 1800 * everything up 1801 */ 1802 return NULL; 1803 } 1804 1805 /* Global interfaces/init */ 1806 1807 static ssize_t register_bcache(struct kobject *, struct kobj_attribute *, 1808 const char *, size_t); 1809 1810 kobj_attribute_write(register, register_bcache); 1811 kobj_attribute_write(register_quiet, register_bcache); 1812 1813 static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr, 1814 const char *buffer, size_t size) 1815 { 1816 ssize_t ret = size; 1817 const char *err = "cannot allocate memory"; 1818 char *path = NULL; 1819 struct cache_sb *sb = NULL; 1820 struct block_device *bdev = NULL; 1821 struct page *sb_page = NULL; 1822 1823 if (!try_module_get(THIS_MODULE)) 1824 return -EBUSY; 1825 1826 mutex_lock(&bch_register_lock); 1827 1828 if (!(path = kstrndup(buffer, size, GFP_KERNEL)) || 1829 !(sb = kmalloc(sizeof(struct cache_sb), GFP_KERNEL))) 1830 goto err; 1831 1832 err = "failed to open device"; 1833 bdev = blkdev_get_by_path(strim(path), 1834 FMODE_READ|FMODE_WRITE|FMODE_EXCL, 1835 sb); 1836 if (bdev == ERR_PTR(-EBUSY)) 1837 err = "device busy"; 1838 1839 if (IS_ERR(bdev) || 1840 set_blocksize(bdev, 4096)) 1841 goto err; 1842 1843 err = read_super(sb, bdev, &sb_page); 1844 if (err) 1845 goto err_close; 1846 1847 if (SB_IS_BDEV(sb)) { 1848 struct cached_dev *dc = kzalloc(sizeof(*dc), GFP_KERNEL); 1849 1850 err = register_bdev(sb, sb_page, bdev, dc); 1851 } else { 1852 struct cache *ca = kzalloc(sizeof(*ca), GFP_KERNEL); 1853 1854 err = register_cache(sb, sb_page, bdev, ca); 1855 } 1856 1857 if (err) { 1858 /* register_(bdev|cache) will only return an error if they 1859 * didn't get far enough to create the kobject - if they did, 1860 * the kobject destructor will do this cleanup. 1861 */ 1862 put_page(sb_page); 1863 err_close: 1864 blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); 1865 err: 1866 if (attr != &ksysfs_register_quiet) 1867 pr_info("error opening %s: %s", path, err); 1868 ret = -EINVAL; 1869 } 1870 1871 kfree(sb); 1872 kfree(path); 1873 mutex_unlock(&bch_register_lock); 1874 module_put(THIS_MODULE); 1875 return ret; 1876 } 1877 1878 static int bcache_reboot(struct notifier_block *n, unsigned long code, void *x) 1879 { 1880 if (code == SYS_DOWN || 1881 code == SYS_HALT || 1882 code == SYS_POWER_OFF) { 1883 DEFINE_WAIT(wait); 1884 unsigned long start = jiffies; 1885 bool stopped = false; 1886 1887 struct cache_set *c, *tc; 1888 struct cached_dev *dc, *tdc; 1889 1890 mutex_lock(&bch_register_lock); 1891 1892 if (list_empty(&bch_cache_sets) && 1893 list_empty(&uncached_devices)) 1894 goto out; 1895 1896 pr_info("Stopping all devices:"); 1897 1898 list_for_each_entry_safe(c, tc, &bch_cache_sets, list) 1899 bch_cache_set_stop(c); 1900 1901 list_for_each_entry_safe(dc, tdc, &uncached_devices, list) 1902 bcache_device_stop(&dc->disk); 1903 1904 /* What's a condition variable? */ 1905 while (1) { 1906 long timeout = start + 2 * HZ - jiffies; 1907 1908 stopped = list_empty(&bch_cache_sets) && 1909 list_empty(&uncached_devices); 1910 1911 if (timeout < 0 || stopped) 1912 break; 1913 1914 prepare_to_wait(&unregister_wait, &wait, 1915 TASK_UNINTERRUPTIBLE); 1916 1917 mutex_unlock(&bch_register_lock); 1918 schedule_timeout(timeout); 1919 mutex_lock(&bch_register_lock); 1920 } 1921 1922 finish_wait(&unregister_wait, &wait); 1923 1924 if (stopped) 1925 pr_info("All devices stopped"); 1926 else 1927 pr_notice("Timeout waiting for devices to be closed"); 1928 out: 1929 mutex_unlock(&bch_register_lock); 1930 } 1931 1932 return NOTIFY_DONE; 1933 } 1934 1935 static struct notifier_block reboot = { 1936 .notifier_call = bcache_reboot, 1937 .priority = INT_MAX, /* before any real devices */ 1938 }; 1939 1940 static void bcache_exit(void) 1941 { 1942 bch_debug_exit(); 1943 bch_writeback_exit(); 1944 bch_request_exit(); 1945 bch_btree_exit(); 1946 if (bcache_kobj) 1947 kobject_put(bcache_kobj); 1948 if (bcache_wq) 1949 destroy_workqueue(bcache_wq); 1950 unregister_blkdev(bcache_major, "bcache"); 1951 unregister_reboot_notifier(&reboot); 1952 } 1953 1954 static int __init bcache_init(void) 1955 { 1956 static const struct attribute *files[] = { 1957 &ksysfs_register.attr, 1958 &ksysfs_register_quiet.attr, 1959 NULL 1960 }; 1961 1962 mutex_init(&bch_register_lock); 1963 init_waitqueue_head(&unregister_wait); 1964 register_reboot_notifier(&reboot); 1965 closure_debug_init(); 1966 1967 bcache_major = register_blkdev(0, "bcache"); 1968 if (bcache_major < 0) 1969 return bcache_major; 1970 1971 if (!(bcache_wq = create_workqueue("bcache")) || 1972 !(bcache_kobj = kobject_create_and_add("bcache", fs_kobj)) || 1973 sysfs_create_files(bcache_kobj, files) || 1974 bch_btree_init() || 1975 bch_request_init() || 1976 bch_writeback_init() || 1977 bch_debug_init(bcache_kobj)) 1978 goto err; 1979 1980 return 0; 1981 err: 1982 bcache_exit(); 1983 return -ENOMEM; 1984 } 1985 1986 module_exit(bcache_exit); 1987 module_init(bcache_init); 1988