1 /* 2 * bcache setup/teardown code, and some metadata io - read a superblock and 3 * figure out what to do with it. 4 * 5 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com> 6 * Copyright 2012 Google, Inc. 7 */ 8 9 #include "bcache.h" 10 #include "btree.h" 11 #include "debug.h" 12 #include "extents.h" 13 #include "request.h" 14 #include "writeback.h" 15 16 #include <linux/blkdev.h> 17 #include <linux/buffer_head.h> 18 #include <linux/debugfs.h> 19 #include <linux/genhd.h> 20 #include <linux/idr.h> 21 #include <linux/kthread.h> 22 #include <linux/module.h> 23 #include <linux/random.h> 24 #include <linux/reboot.h> 25 #include <linux/sysfs.h> 26 27 MODULE_LICENSE("GPL"); 28 MODULE_AUTHOR("Kent Overstreet <kent.overstreet@gmail.com>"); 29 30 static const char bcache_magic[] = { 31 0xc6, 0x85, 0x73, 0xf6, 0x4e, 0x1a, 0x45, 0xca, 32 0x82, 0x65, 0xf5, 0x7f, 0x48, 0xba, 0x6d, 0x81 33 }; 34 35 static const char invalid_uuid[] = { 36 0xa0, 0x3e, 0xf8, 0xed, 0x3e, 0xe1, 0xb8, 0x78, 37 0xc8, 0x50, 0xfc, 0x5e, 0xcb, 0x16, 0xcd, 0x99 38 }; 39 40 /* Default is -1; we skip past it for struct cached_dev's cache mode */ 41 const char * const bch_cache_modes[] = { 42 "default", 43 "writethrough", 44 "writeback", 45 "writearound", 46 "none", 47 NULL 48 }; 49 50 /* Default is -1; we skip past it for stop_when_cache_set_failed */ 51 const char * const bch_stop_on_failure_modes[] = { 52 "default", 53 "auto", 54 "always", 55 NULL 56 }; 57 58 static struct kobject *bcache_kobj; 59 struct mutex bch_register_lock; 60 LIST_HEAD(bch_cache_sets); 61 static LIST_HEAD(uncached_devices); 62 63 static int bcache_major; 64 static DEFINE_IDA(bcache_device_idx); 65 static wait_queue_head_t unregister_wait; 66 struct workqueue_struct *bcache_wq; 67 68 #define BTREE_MAX_PAGES (256 * 1024 / PAGE_SIZE) 69 /* limitation of partitions number on single bcache device */ 70 #define BCACHE_MINORS 128 71 /* limitation of bcache devices number on single system */ 72 #define BCACHE_DEVICE_IDX_MAX ((1U << MINORBITS)/BCACHE_MINORS) 73 74 /* Superblock */ 75 76 static const char *read_super(struct cache_sb *sb, struct block_device *bdev, 77 struct page **res) 78 { 79 const char *err; 80 struct cache_sb *s; 81 struct buffer_head *bh = __bread(bdev, 1, SB_SIZE); 82 unsigned i; 83 84 if (!bh) 85 return "IO error"; 86 87 s = (struct cache_sb *) bh->b_data; 88 89 sb->offset = le64_to_cpu(s->offset); 90 sb->version = le64_to_cpu(s->version); 91 92 memcpy(sb->magic, s->magic, 16); 93 memcpy(sb->uuid, s->uuid, 16); 94 memcpy(sb->set_uuid, s->set_uuid, 16); 95 memcpy(sb->label, s->label, SB_LABEL_SIZE); 96 97 sb->flags = le64_to_cpu(s->flags); 98 sb->seq = le64_to_cpu(s->seq); 99 sb->last_mount = le32_to_cpu(s->last_mount); 100 sb->first_bucket = le16_to_cpu(s->first_bucket); 101 sb->keys = le16_to_cpu(s->keys); 102 103 for (i = 0; i < SB_JOURNAL_BUCKETS; i++) 104 sb->d[i] = le64_to_cpu(s->d[i]); 105 106 pr_debug("read sb version %llu, flags %llu, seq %llu, journal size %u", 107 sb->version, sb->flags, sb->seq, sb->keys); 108 109 err = "Not a bcache superblock"; 110 if (sb->offset != SB_SECTOR) 111 goto err; 112 113 if (memcmp(sb->magic, bcache_magic, 16)) 114 goto err; 115 116 err = "Too many journal buckets"; 117 if (sb->keys > SB_JOURNAL_BUCKETS) 118 goto err; 119 120 err = "Bad checksum"; 121 if (s->csum != csum_set(s)) 122 goto err; 123 124 err = "Bad UUID"; 125 if (bch_is_zero(sb->uuid, 16)) 126 goto err; 127 128 sb->block_size = le16_to_cpu(s->block_size); 129 130 err = "Superblock block size smaller than device block size"; 131 if (sb->block_size << 9 < bdev_logical_block_size(bdev)) 132 goto err; 133 134 switch (sb->version) { 135 case BCACHE_SB_VERSION_BDEV: 136 sb->data_offset = BDEV_DATA_START_DEFAULT; 137 break; 138 case BCACHE_SB_VERSION_BDEV_WITH_OFFSET: 139 sb->data_offset = le64_to_cpu(s->data_offset); 140 141 err = "Bad data offset"; 142 if (sb->data_offset < BDEV_DATA_START_DEFAULT) 143 goto err; 144 145 break; 146 case BCACHE_SB_VERSION_CDEV: 147 case BCACHE_SB_VERSION_CDEV_WITH_UUID: 148 sb->nbuckets = le64_to_cpu(s->nbuckets); 149 sb->bucket_size = le16_to_cpu(s->bucket_size); 150 151 sb->nr_in_set = le16_to_cpu(s->nr_in_set); 152 sb->nr_this_dev = le16_to_cpu(s->nr_this_dev); 153 154 err = "Too many buckets"; 155 if (sb->nbuckets > LONG_MAX) 156 goto err; 157 158 err = "Not enough buckets"; 159 if (sb->nbuckets < 1 << 7) 160 goto err; 161 162 err = "Bad block/bucket size"; 163 if (!is_power_of_2(sb->block_size) || 164 sb->block_size > PAGE_SECTORS || 165 !is_power_of_2(sb->bucket_size) || 166 sb->bucket_size < PAGE_SECTORS) 167 goto err; 168 169 err = "Invalid superblock: device too small"; 170 if (get_capacity(bdev->bd_disk) < sb->bucket_size * sb->nbuckets) 171 goto err; 172 173 err = "Bad UUID"; 174 if (bch_is_zero(sb->set_uuid, 16)) 175 goto err; 176 177 err = "Bad cache device number in set"; 178 if (!sb->nr_in_set || 179 sb->nr_in_set <= sb->nr_this_dev || 180 sb->nr_in_set > MAX_CACHES_PER_SET) 181 goto err; 182 183 err = "Journal buckets not sequential"; 184 for (i = 0; i < sb->keys; i++) 185 if (sb->d[i] != sb->first_bucket + i) 186 goto err; 187 188 err = "Too many journal buckets"; 189 if (sb->first_bucket + sb->keys > sb->nbuckets) 190 goto err; 191 192 err = "Invalid superblock: first bucket comes before end of super"; 193 if (sb->first_bucket * sb->bucket_size < 16) 194 goto err; 195 196 break; 197 default: 198 err = "Unsupported superblock version"; 199 goto err; 200 } 201 202 sb->last_mount = get_seconds(); 203 err = NULL; 204 205 get_page(bh->b_page); 206 *res = bh->b_page; 207 err: 208 put_bh(bh); 209 return err; 210 } 211 212 static void write_bdev_super_endio(struct bio *bio) 213 { 214 struct cached_dev *dc = bio->bi_private; 215 /* XXX: error checking */ 216 217 closure_put(&dc->sb_write); 218 } 219 220 static void __write_super(struct cache_sb *sb, struct bio *bio) 221 { 222 struct cache_sb *out = page_address(bio_first_page_all(bio)); 223 unsigned i; 224 225 bio->bi_iter.bi_sector = SB_SECTOR; 226 bio->bi_iter.bi_size = SB_SIZE; 227 bio_set_op_attrs(bio, REQ_OP_WRITE, REQ_SYNC|REQ_META); 228 bch_bio_map(bio, NULL); 229 230 out->offset = cpu_to_le64(sb->offset); 231 out->version = cpu_to_le64(sb->version); 232 233 memcpy(out->uuid, sb->uuid, 16); 234 memcpy(out->set_uuid, sb->set_uuid, 16); 235 memcpy(out->label, sb->label, SB_LABEL_SIZE); 236 237 out->flags = cpu_to_le64(sb->flags); 238 out->seq = cpu_to_le64(sb->seq); 239 240 out->last_mount = cpu_to_le32(sb->last_mount); 241 out->first_bucket = cpu_to_le16(sb->first_bucket); 242 out->keys = cpu_to_le16(sb->keys); 243 244 for (i = 0; i < sb->keys; i++) 245 out->d[i] = cpu_to_le64(sb->d[i]); 246 247 out->csum = csum_set(out); 248 249 pr_debug("ver %llu, flags %llu, seq %llu", 250 sb->version, sb->flags, sb->seq); 251 252 submit_bio(bio); 253 } 254 255 static void bch_write_bdev_super_unlock(struct closure *cl) 256 { 257 struct cached_dev *dc = container_of(cl, struct cached_dev, sb_write); 258 259 up(&dc->sb_write_mutex); 260 } 261 262 void bch_write_bdev_super(struct cached_dev *dc, struct closure *parent) 263 { 264 struct closure *cl = &dc->sb_write; 265 struct bio *bio = &dc->sb_bio; 266 267 down(&dc->sb_write_mutex); 268 closure_init(cl, parent); 269 270 bio_reset(bio); 271 bio_set_dev(bio, dc->bdev); 272 bio->bi_end_io = write_bdev_super_endio; 273 bio->bi_private = dc; 274 275 closure_get(cl); 276 /* I/O request sent to backing device */ 277 __write_super(&dc->sb, bio); 278 279 closure_return_with_destructor(cl, bch_write_bdev_super_unlock); 280 } 281 282 static void write_super_endio(struct bio *bio) 283 { 284 struct cache *ca = bio->bi_private; 285 286 /* is_read = 0 */ 287 bch_count_io_errors(ca, bio->bi_status, 0, 288 "writing superblock"); 289 closure_put(&ca->set->sb_write); 290 } 291 292 static void bcache_write_super_unlock(struct closure *cl) 293 { 294 struct cache_set *c = container_of(cl, struct cache_set, sb_write); 295 296 up(&c->sb_write_mutex); 297 } 298 299 void bcache_write_super(struct cache_set *c) 300 { 301 struct closure *cl = &c->sb_write; 302 struct cache *ca; 303 unsigned i; 304 305 down(&c->sb_write_mutex); 306 closure_init(cl, &c->cl); 307 308 c->sb.seq++; 309 310 for_each_cache(ca, c, i) { 311 struct bio *bio = &ca->sb_bio; 312 313 ca->sb.version = BCACHE_SB_VERSION_CDEV_WITH_UUID; 314 ca->sb.seq = c->sb.seq; 315 ca->sb.last_mount = c->sb.last_mount; 316 317 SET_CACHE_SYNC(&ca->sb, CACHE_SYNC(&c->sb)); 318 319 bio_reset(bio); 320 bio_set_dev(bio, ca->bdev); 321 bio->bi_end_io = write_super_endio; 322 bio->bi_private = ca; 323 324 closure_get(cl); 325 __write_super(&ca->sb, bio); 326 } 327 328 closure_return_with_destructor(cl, bcache_write_super_unlock); 329 } 330 331 /* UUID io */ 332 333 static void uuid_endio(struct bio *bio) 334 { 335 struct closure *cl = bio->bi_private; 336 struct cache_set *c = container_of(cl, struct cache_set, uuid_write); 337 338 cache_set_err_on(bio->bi_status, c, "accessing uuids"); 339 bch_bbio_free(bio, c); 340 closure_put(cl); 341 } 342 343 static void uuid_io_unlock(struct closure *cl) 344 { 345 struct cache_set *c = container_of(cl, struct cache_set, uuid_write); 346 347 up(&c->uuid_write_mutex); 348 } 349 350 static void uuid_io(struct cache_set *c, int op, unsigned long op_flags, 351 struct bkey *k, struct closure *parent) 352 { 353 struct closure *cl = &c->uuid_write; 354 struct uuid_entry *u; 355 unsigned i; 356 char buf[80]; 357 358 BUG_ON(!parent); 359 down(&c->uuid_write_mutex); 360 closure_init(cl, parent); 361 362 for (i = 0; i < KEY_PTRS(k); i++) { 363 struct bio *bio = bch_bbio_alloc(c); 364 365 bio->bi_opf = REQ_SYNC | REQ_META | op_flags; 366 bio->bi_iter.bi_size = KEY_SIZE(k) << 9; 367 368 bio->bi_end_io = uuid_endio; 369 bio->bi_private = cl; 370 bio_set_op_attrs(bio, op, REQ_SYNC|REQ_META|op_flags); 371 bch_bio_map(bio, c->uuids); 372 373 bch_submit_bbio(bio, c, k, i); 374 375 if (op != REQ_OP_WRITE) 376 break; 377 } 378 379 bch_extent_to_text(buf, sizeof(buf), k); 380 pr_debug("%s UUIDs at %s", op == REQ_OP_WRITE ? "wrote" : "read", buf); 381 382 for (u = c->uuids; u < c->uuids + c->nr_uuids; u++) 383 if (!bch_is_zero(u->uuid, 16)) 384 pr_debug("Slot %zi: %pU: %s: 1st: %u last: %u inv: %u", 385 u - c->uuids, u->uuid, u->label, 386 u->first_reg, u->last_reg, u->invalidated); 387 388 closure_return_with_destructor(cl, uuid_io_unlock); 389 } 390 391 static char *uuid_read(struct cache_set *c, struct jset *j, struct closure *cl) 392 { 393 struct bkey *k = &j->uuid_bucket; 394 395 if (__bch_btree_ptr_invalid(c, k)) 396 return "bad uuid pointer"; 397 398 bkey_copy(&c->uuid_bucket, k); 399 uuid_io(c, REQ_OP_READ, 0, k, cl); 400 401 if (j->version < BCACHE_JSET_VERSION_UUIDv1) { 402 struct uuid_entry_v0 *u0 = (void *) c->uuids; 403 struct uuid_entry *u1 = (void *) c->uuids; 404 int i; 405 406 closure_sync(cl); 407 408 /* 409 * Since the new uuid entry is bigger than the old, we have to 410 * convert starting at the highest memory address and work down 411 * in order to do it in place 412 */ 413 414 for (i = c->nr_uuids - 1; 415 i >= 0; 416 --i) { 417 memcpy(u1[i].uuid, u0[i].uuid, 16); 418 memcpy(u1[i].label, u0[i].label, 32); 419 420 u1[i].first_reg = u0[i].first_reg; 421 u1[i].last_reg = u0[i].last_reg; 422 u1[i].invalidated = u0[i].invalidated; 423 424 u1[i].flags = 0; 425 u1[i].sectors = 0; 426 } 427 } 428 429 return NULL; 430 } 431 432 static int __uuid_write(struct cache_set *c) 433 { 434 BKEY_PADDED(key) k; 435 struct closure cl; 436 closure_init_stack(&cl); 437 438 lockdep_assert_held(&bch_register_lock); 439 440 if (bch_bucket_alloc_set(c, RESERVE_BTREE, &k.key, 1, true)) 441 return 1; 442 443 SET_KEY_SIZE(&k.key, c->sb.bucket_size); 444 uuid_io(c, REQ_OP_WRITE, 0, &k.key, &cl); 445 closure_sync(&cl); 446 447 bkey_copy(&c->uuid_bucket, &k.key); 448 bkey_put(c, &k.key); 449 return 0; 450 } 451 452 int bch_uuid_write(struct cache_set *c) 453 { 454 int ret = __uuid_write(c); 455 456 if (!ret) 457 bch_journal_meta(c, NULL); 458 459 return ret; 460 } 461 462 static struct uuid_entry *uuid_find(struct cache_set *c, const char *uuid) 463 { 464 struct uuid_entry *u; 465 466 for (u = c->uuids; 467 u < c->uuids + c->nr_uuids; u++) 468 if (!memcmp(u->uuid, uuid, 16)) 469 return u; 470 471 return NULL; 472 } 473 474 static struct uuid_entry *uuid_find_empty(struct cache_set *c) 475 { 476 static const char zero_uuid[16] = "\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0"; 477 return uuid_find(c, zero_uuid); 478 } 479 480 /* 481 * Bucket priorities/gens: 482 * 483 * For each bucket, we store on disk its 484 * 8 bit gen 485 * 16 bit priority 486 * 487 * See alloc.c for an explanation of the gen. The priority is used to implement 488 * lru (and in the future other) cache replacement policies; for most purposes 489 * it's just an opaque integer. 490 * 491 * The gens and the priorities don't have a whole lot to do with each other, and 492 * it's actually the gens that must be written out at specific times - it's no 493 * big deal if the priorities don't get written, if we lose them we just reuse 494 * buckets in suboptimal order. 495 * 496 * On disk they're stored in a packed array, and in as many buckets are required 497 * to fit them all. The buckets we use to store them form a list; the journal 498 * header points to the first bucket, the first bucket points to the second 499 * bucket, et cetera. 500 * 501 * This code is used by the allocation code; periodically (whenever it runs out 502 * of buckets to allocate from) the allocation code will invalidate some 503 * buckets, but it can't use those buckets until their new gens are safely on 504 * disk. 505 */ 506 507 static void prio_endio(struct bio *bio) 508 { 509 struct cache *ca = bio->bi_private; 510 511 cache_set_err_on(bio->bi_status, ca->set, "accessing priorities"); 512 bch_bbio_free(bio, ca->set); 513 closure_put(&ca->prio); 514 } 515 516 static void prio_io(struct cache *ca, uint64_t bucket, int op, 517 unsigned long op_flags) 518 { 519 struct closure *cl = &ca->prio; 520 struct bio *bio = bch_bbio_alloc(ca->set); 521 522 closure_init_stack(cl); 523 524 bio->bi_iter.bi_sector = bucket * ca->sb.bucket_size; 525 bio_set_dev(bio, ca->bdev); 526 bio->bi_iter.bi_size = bucket_bytes(ca); 527 528 bio->bi_end_io = prio_endio; 529 bio->bi_private = ca; 530 bio_set_op_attrs(bio, op, REQ_SYNC|REQ_META|op_flags); 531 bch_bio_map(bio, ca->disk_buckets); 532 533 closure_bio_submit(ca->set, bio, &ca->prio); 534 closure_sync(cl); 535 } 536 537 void bch_prio_write(struct cache *ca) 538 { 539 int i; 540 struct bucket *b; 541 struct closure cl; 542 543 closure_init_stack(&cl); 544 545 lockdep_assert_held(&ca->set->bucket_lock); 546 547 ca->disk_buckets->seq++; 548 549 atomic_long_add(ca->sb.bucket_size * prio_buckets(ca), 550 &ca->meta_sectors_written); 551 552 //pr_debug("free %zu, free_inc %zu, unused %zu", fifo_used(&ca->free), 553 // fifo_used(&ca->free_inc), fifo_used(&ca->unused)); 554 555 for (i = prio_buckets(ca) - 1; i >= 0; --i) { 556 long bucket; 557 struct prio_set *p = ca->disk_buckets; 558 struct bucket_disk *d = p->data; 559 struct bucket_disk *end = d + prios_per_bucket(ca); 560 561 for (b = ca->buckets + i * prios_per_bucket(ca); 562 b < ca->buckets + ca->sb.nbuckets && d < end; 563 b++, d++) { 564 d->prio = cpu_to_le16(b->prio); 565 d->gen = b->gen; 566 } 567 568 p->next_bucket = ca->prio_buckets[i + 1]; 569 p->magic = pset_magic(&ca->sb); 570 p->csum = bch_crc64(&p->magic, bucket_bytes(ca) - 8); 571 572 bucket = bch_bucket_alloc(ca, RESERVE_PRIO, true); 573 BUG_ON(bucket == -1); 574 575 mutex_unlock(&ca->set->bucket_lock); 576 prio_io(ca, bucket, REQ_OP_WRITE, 0); 577 mutex_lock(&ca->set->bucket_lock); 578 579 ca->prio_buckets[i] = bucket; 580 atomic_dec_bug(&ca->buckets[bucket].pin); 581 } 582 583 mutex_unlock(&ca->set->bucket_lock); 584 585 bch_journal_meta(ca->set, &cl); 586 closure_sync(&cl); 587 588 mutex_lock(&ca->set->bucket_lock); 589 590 /* 591 * Don't want the old priorities to get garbage collected until after we 592 * finish writing the new ones, and they're journalled 593 */ 594 for (i = 0; i < prio_buckets(ca); i++) { 595 if (ca->prio_last_buckets[i]) 596 __bch_bucket_free(ca, 597 &ca->buckets[ca->prio_last_buckets[i]]); 598 599 ca->prio_last_buckets[i] = ca->prio_buckets[i]; 600 } 601 } 602 603 static void prio_read(struct cache *ca, uint64_t bucket) 604 { 605 struct prio_set *p = ca->disk_buckets; 606 struct bucket_disk *d = p->data + prios_per_bucket(ca), *end = d; 607 struct bucket *b; 608 unsigned bucket_nr = 0; 609 610 for (b = ca->buckets; 611 b < ca->buckets + ca->sb.nbuckets; 612 b++, d++) { 613 if (d == end) { 614 ca->prio_buckets[bucket_nr] = bucket; 615 ca->prio_last_buckets[bucket_nr] = bucket; 616 bucket_nr++; 617 618 prio_io(ca, bucket, REQ_OP_READ, 0); 619 620 if (p->csum != bch_crc64(&p->magic, bucket_bytes(ca) - 8)) 621 pr_warn("bad csum reading priorities"); 622 623 if (p->magic != pset_magic(&ca->sb)) 624 pr_warn("bad magic reading priorities"); 625 626 bucket = p->next_bucket; 627 d = p->data; 628 } 629 630 b->prio = le16_to_cpu(d->prio); 631 b->gen = b->last_gc = d->gen; 632 } 633 } 634 635 /* Bcache device */ 636 637 static int open_dev(struct block_device *b, fmode_t mode) 638 { 639 struct bcache_device *d = b->bd_disk->private_data; 640 if (test_bit(BCACHE_DEV_CLOSING, &d->flags)) 641 return -ENXIO; 642 643 closure_get(&d->cl); 644 return 0; 645 } 646 647 static void release_dev(struct gendisk *b, fmode_t mode) 648 { 649 struct bcache_device *d = b->private_data; 650 closure_put(&d->cl); 651 } 652 653 static int ioctl_dev(struct block_device *b, fmode_t mode, 654 unsigned int cmd, unsigned long arg) 655 { 656 struct bcache_device *d = b->bd_disk->private_data; 657 return d->ioctl(d, mode, cmd, arg); 658 } 659 660 static const struct block_device_operations bcache_ops = { 661 .open = open_dev, 662 .release = release_dev, 663 .ioctl = ioctl_dev, 664 .owner = THIS_MODULE, 665 }; 666 667 void bcache_device_stop(struct bcache_device *d) 668 { 669 if (!test_and_set_bit(BCACHE_DEV_CLOSING, &d->flags)) 670 closure_queue(&d->cl); 671 } 672 673 static void bcache_device_unlink(struct bcache_device *d) 674 { 675 lockdep_assert_held(&bch_register_lock); 676 677 if (d->c && !test_and_set_bit(BCACHE_DEV_UNLINK_DONE, &d->flags)) { 678 unsigned i; 679 struct cache *ca; 680 681 sysfs_remove_link(&d->c->kobj, d->name); 682 sysfs_remove_link(&d->kobj, "cache"); 683 684 for_each_cache(ca, d->c, i) 685 bd_unlink_disk_holder(ca->bdev, d->disk); 686 } 687 } 688 689 static void bcache_device_link(struct bcache_device *d, struct cache_set *c, 690 const char *name) 691 { 692 unsigned i; 693 struct cache *ca; 694 695 for_each_cache(ca, d->c, i) 696 bd_link_disk_holder(ca->bdev, d->disk); 697 698 snprintf(d->name, BCACHEDEVNAME_SIZE, 699 "%s%u", name, d->id); 700 701 WARN(sysfs_create_link(&d->kobj, &c->kobj, "cache") || 702 sysfs_create_link(&c->kobj, &d->kobj, d->name), 703 "Couldn't create device <-> cache set symlinks"); 704 705 clear_bit(BCACHE_DEV_UNLINK_DONE, &d->flags); 706 } 707 708 static void bcache_device_detach(struct bcache_device *d) 709 { 710 lockdep_assert_held(&bch_register_lock); 711 712 if (test_bit(BCACHE_DEV_DETACHING, &d->flags)) { 713 struct uuid_entry *u = d->c->uuids + d->id; 714 715 SET_UUID_FLASH_ONLY(u, 0); 716 memcpy(u->uuid, invalid_uuid, 16); 717 u->invalidated = cpu_to_le32(get_seconds()); 718 bch_uuid_write(d->c); 719 } 720 721 bcache_device_unlink(d); 722 723 d->c->devices[d->id] = NULL; 724 closure_put(&d->c->caching); 725 d->c = NULL; 726 } 727 728 static void bcache_device_attach(struct bcache_device *d, struct cache_set *c, 729 unsigned id) 730 { 731 d->id = id; 732 d->c = c; 733 c->devices[id] = d; 734 735 if (id >= c->devices_max_used) 736 c->devices_max_used = id + 1; 737 738 closure_get(&c->caching); 739 } 740 741 static inline int first_minor_to_idx(int first_minor) 742 { 743 return (first_minor/BCACHE_MINORS); 744 } 745 746 static inline int idx_to_first_minor(int idx) 747 { 748 return (idx * BCACHE_MINORS); 749 } 750 751 static void bcache_device_free(struct bcache_device *d) 752 { 753 lockdep_assert_held(&bch_register_lock); 754 755 pr_info("%s stopped", d->disk->disk_name); 756 757 if (d->c) 758 bcache_device_detach(d); 759 if (d->disk && d->disk->flags & GENHD_FL_UP) 760 del_gendisk(d->disk); 761 if (d->disk && d->disk->queue) 762 blk_cleanup_queue(d->disk->queue); 763 if (d->disk) { 764 ida_simple_remove(&bcache_device_idx, 765 first_minor_to_idx(d->disk->first_minor)); 766 put_disk(d->disk); 767 } 768 769 if (d->bio_split) 770 bioset_free(d->bio_split); 771 kvfree(d->full_dirty_stripes); 772 kvfree(d->stripe_sectors_dirty); 773 774 closure_debug_destroy(&d->cl); 775 } 776 777 static int bcache_device_init(struct bcache_device *d, unsigned block_size, 778 sector_t sectors) 779 { 780 struct request_queue *q; 781 const size_t max_stripes = min_t(size_t, INT_MAX, 782 SIZE_MAX / sizeof(atomic_t)); 783 size_t n; 784 int idx; 785 786 if (!d->stripe_size) 787 d->stripe_size = 1 << 31; 788 789 d->nr_stripes = DIV_ROUND_UP_ULL(sectors, d->stripe_size); 790 791 if (!d->nr_stripes || d->nr_stripes > max_stripes) { 792 pr_err("nr_stripes too large or invalid: %u (start sector beyond end of disk?)", 793 (unsigned)d->nr_stripes); 794 return -ENOMEM; 795 } 796 797 n = d->nr_stripes * sizeof(atomic_t); 798 d->stripe_sectors_dirty = kvzalloc(n, GFP_KERNEL); 799 if (!d->stripe_sectors_dirty) 800 return -ENOMEM; 801 802 n = BITS_TO_LONGS(d->nr_stripes) * sizeof(unsigned long); 803 d->full_dirty_stripes = kvzalloc(n, GFP_KERNEL); 804 if (!d->full_dirty_stripes) 805 return -ENOMEM; 806 807 idx = ida_simple_get(&bcache_device_idx, 0, 808 BCACHE_DEVICE_IDX_MAX, GFP_KERNEL); 809 if (idx < 0) 810 return idx; 811 812 if (!(d->bio_split = bioset_create(4, offsetof(struct bbio, bio), 813 BIOSET_NEED_BVECS | 814 BIOSET_NEED_RESCUER)) || 815 !(d->disk = alloc_disk(BCACHE_MINORS))) { 816 ida_simple_remove(&bcache_device_idx, idx); 817 return -ENOMEM; 818 } 819 820 set_capacity(d->disk, sectors); 821 snprintf(d->disk->disk_name, DISK_NAME_LEN, "bcache%i", idx); 822 823 d->disk->major = bcache_major; 824 d->disk->first_minor = idx_to_first_minor(idx); 825 d->disk->fops = &bcache_ops; 826 d->disk->private_data = d; 827 828 q = blk_alloc_queue(GFP_KERNEL); 829 if (!q) 830 return -ENOMEM; 831 832 blk_queue_make_request(q, NULL); 833 d->disk->queue = q; 834 q->queuedata = d; 835 q->backing_dev_info->congested_data = d; 836 q->limits.max_hw_sectors = UINT_MAX; 837 q->limits.max_sectors = UINT_MAX; 838 q->limits.max_segment_size = UINT_MAX; 839 q->limits.max_segments = BIO_MAX_PAGES; 840 blk_queue_max_discard_sectors(q, UINT_MAX); 841 q->limits.discard_granularity = 512; 842 q->limits.io_min = block_size; 843 q->limits.logical_block_size = block_size; 844 q->limits.physical_block_size = block_size; 845 blk_queue_flag_set(QUEUE_FLAG_NONROT, d->disk->queue); 846 blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, d->disk->queue); 847 blk_queue_flag_set(QUEUE_FLAG_DISCARD, d->disk->queue); 848 849 blk_queue_write_cache(q, true, true); 850 851 return 0; 852 } 853 854 /* Cached device */ 855 856 static void calc_cached_dev_sectors(struct cache_set *c) 857 { 858 uint64_t sectors = 0; 859 struct cached_dev *dc; 860 861 list_for_each_entry(dc, &c->cached_devs, list) 862 sectors += bdev_sectors(dc->bdev); 863 864 c->cached_dev_sectors = sectors; 865 } 866 867 void bch_cached_dev_run(struct cached_dev *dc) 868 { 869 struct bcache_device *d = &dc->disk; 870 char buf[SB_LABEL_SIZE + 1]; 871 char *env[] = { 872 "DRIVER=bcache", 873 kasprintf(GFP_KERNEL, "CACHED_UUID=%pU", dc->sb.uuid), 874 NULL, 875 NULL, 876 }; 877 878 memcpy(buf, dc->sb.label, SB_LABEL_SIZE); 879 buf[SB_LABEL_SIZE] = '\0'; 880 env[2] = kasprintf(GFP_KERNEL, "CACHED_LABEL=%s", buf); 881 882 if (atomic_xchg(&dc->running, 1)) { 883 kfree(env[1]); 884 kfree(env[2]); 885 return; 886 } 887 888 if (!d->c && 889 BDEV_STATE(&dc->sb) != BDEV_STATE_NONE) { 890 struct closure cl; 891 closure_init_stack(&cl); 892 893 SET_BDEV_STATE(&dc->sb, BDEV_STATE_STALE); 894 bch_write_bdev_super(dc, &cl); 895 closure_sync(&cl); 896 } 897 898 add_disk(d->disk); 899 bd_link_disk_holder(dc->bdev, dc->disk.disk); 900 /* won't show up in the uevent file, use udevadm monitor -e instead 901 * only class / kset properties are persistent */ 902 kobject_uevent_env(&disk_to_dev(d->disk)->kobj, KOBJ_CHANGE, env); 903 kfree(env[1]); 904 kfree(env[2]); 905 906 if (sysfs_create_link(&d->kobj, &disk_to_dev(d->disk)->kobj, "dev") || 907 sysfs_create_link(&disk_to_dev(d->disk)->kobj, &d->kobj, "bcache")) 908 pr_debug("error creating sysfs link"); 909 } 910 911 /* 912 * If BCACHE_DEV_RATE_DW_RUNNING is set, it means routine of the delayed 913 * work dc->writeback_rate_update is running. Wait until the routine 914 * quits (BCACHE_DEV_RATE_DW_RUNNING is clear), then continue to 915 * cancel it. If BCACHE_DEV_RATE_DW_RUNNING is not clear after time_out 916 * seconds, give up waiting here and continue to cancel it too. 917 */ 918 static void cancel_writeback_rate_update_dwork(struct cached_dev *dc) 919 { 920 int time_out = WRITEBACK_RATE_UPDATE_SECS_MAX * HZ; 921 922 do { 923 if (!test_bit(BCACHE_DEV_RATE_DW_RUNNING, 924 &dc->disk.flags)) 925 break; 926 time_out--; 927 schedule_timeout_interruptible(1); 928 } while (time_out > 0); 929 930 if (time_out == 0) 931 pr_warn("give up waiting for dc->writeback_write_update to quit"); 932 933 cancel_delayed_work_sync(&dc->writeback_rate_update); 934 } 935 936 static void cached_dev_detach_finish(struct work_struct *w) 937 { 938 struct cached_dev *dc = container_of(w, struct cached_dev, detach); 939 struct closure cl; 940 closure_init_stack(&cl); 941 942 BUG_ON(!test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags)); 943 BUG_ON(refcount_read(&dc->count)); 944 945 mutex_lock(&bch_register_lock); 946 947 if (test_and_clear_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags)) 948 cancel_writeback_rate_update_dwork(dc); 949 950 if (!IS_ERR_OR_NULL(dc->writeback_thread)) { 951 kthread_stop(dc->writeback_thread); 952 dc->writeback_thread = NULL; 953 } 954 955 memset(&dc->sb.set_uuid, 0, 16); 956 SET_BDEV_STATE(&dc->sb, BDEV_STATE_NONE); 957 958 bch_write_bdev_super(dc, &cl); 959 closure_sync(&cl); 960 961 bcache_device_detach(&dc->disk); 962 list_move(&dc->list, &uncached_devices); 963 964 clear_bit(BCACHE_DEV_DETACHING, &dc->disk.flags); 965 clear_bit(BCACHE_DEV_UNLINK_DONE, &dc->disk.flags); 966 967 mutex_unlock(&bch_register_lock); 968 969 pr_info("Caching disabled for %s", dc->backing_dev_name); 970 971 /* Drop ref we took in cached_dev_detach() */ 972 closure_put(&dc->disk.cl); 973 } 974 975 void bch_cached_dev_detach(struct cached_dev *dc) 976 { 977 lockdep_assert_held(&bch_register_lock); 978 979 if (test_bit(BCACHE_DEV_CLOSING, &dc->disk.flags)) 980 return; 981 982 if (test_and_set_bit(BCACHE_DEV_DETACHING, &dc->disk.flags)) 983 return; 984 985 /* 986 * Block the device from being closed and freed until we're finished 987 * detaching 988 */ 989 closure_get(&dc->disk.cl); 990 991 bch_writeback_queue(dc); 992 993 cached_dev_put(dc); 994 } 995 996 int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c, 997 uint8_t *set_uuid) 998 { 999 uint32_t rtime = cpu_to_le32(get_seconds()); 1000 struct uuid_entry *u; 1001 struct cached_dev *exist_dc, *t; 1002 1003 if ((set_uuid && memcmp(set_uuid, c->sb.set_uuid, 16)) || 1004 (!set_uuid && memcmp(dc->sb.set_uuid, c->sb.set_uuid, 16))) 1005 return -ENOENT; 1006 1007 if (dc->disk.c) { 1008 pr_err("Can't attach %s: already attached", 1009 dc->backing_dev_name); 1010 return -EINVAL; 1011 } 1012 1013 if (test_bit(CACHE_SET_STOPPING, &c->flags)) { 1014 pr_err("Can't attach %s: shutting down", 1015 dc->backing_dev_name); 1016 return -EINVAL; 1017 } 1018 1019 if (dc->sb.block_size < c->sb.block_size) { 1020 /* Will die */ 1021 pr_err("Couldn't attach %s: block size less than set's block size", 1022 dc->backing_dev_name); 1023 return -EINVAL; 1024 } 1025 1026 /* Check whether already attached */ 1027 list_for_each_entry_safe(exist_dc, t, &c->cached_devs, list) { 1028 if (!memcmp(dc->sb.uuid, exist_dc->sb.uuid, 16)) { 1029 pr_err("Tried to attach %s but duplicate UUID already attached", 1030 dc->backing_dev_name); 1031 1032 return -EINVAL; 1033 } 1034 } 1035 1036 u = uuid_find(c, dc->sb.uuid); 1037 1038 if (u && 1039 (BDEV_STATE(&dc->sb) == BDEV_STATE_STALE || 1040 BDEV_STATE(&dc->sb) == BDEV_STATE_NONE)) { 1041 memcpy(u->uuid, invalid_uuid, 16); 1042 u->invalidated = cpu_to_le32(get_seconds()); 1043 u = NULL; 1044 } 1045 1046 if (!u) { 1047 if (BDEV_STATE(&dc->sb) == BDEV_STATE_DIRTY) { 1048 pr_err("Couldn't find uuid for %s in set", 1049 dc->backing_dev_name); 1050 return -ENOENT; 1051 } 1052 1053 u = uuid_find_empty(c); 1054 if (!u) { 1055 pr_err("Not caching %s, no room for UUID", 1056 dc->backing_dev_name); 1057 return -EINVAL; 1058 } 1059 } 1060 1061 /* Deadlocks since we're called via sysfs... 1062 sysfs_remove_file(&dc->kobj, &sysfs_attach); 1063 */ 1064 1065 if (bch_is_zero(u->uuid, 16)) { 1066 struct closure cl; 1067 closure_init_stack(&cl); 1068 1069 memcpy(u->uuid, dc->sb.uuid, 16); 1070 memcpy(u->label, dc->sb.label, SB_LABEL_SIZE); 1071 u->first_reg = u->last_reg = rtime; 1072 bch_uuid_write(c); 1073 1074 memcpy(dc->sb.set_uuid, c->sb.set_uuid, 16); 1075 SET_BDEV_STATE(&dc->sb, BDEV_STATE_CLEAN); 1076 1077 bch_write_bdev_super(dc, &cl); 1078 closure_sync(&cl); 1079 } else { 1080 u->last_reg = rtime; 1081 bch_uuid_write(c); 1082 } 1083 1084 bcache_device_attach(&dc->disk, c, u - c->uuids); 1085 list_move(&dc->list, &c->cached_devs); 1086 calc_cached_dev_sectors(c); 1087 1088 smp_wmb(); 1089 /* 1090 * dc->c must be set before dc->count != 0 - paired with the mb in 1091 * cached_dev_get() 1092 */ 1093 refcount_set(&dc->count, 1); 1094 1095 /* Block writeback thread, but spawn it */ 1096 down_write(&dc->writeback_lock); 1097 if (bch_cached_dev_writeback_start(dc)) { 1098 up_write(&dc->writeback_lock); 1099 return -ENOMEM; 1100 } 1101 1102 if (BDEV_STATE(&dc->sb) == BDEV_STATE_DIRTY) { 1103 bch_sectors_dirty_init(&dc->disk); 1104 atomic_set(&dc->has_dirty, 1); 1105 bch_writeback_queue(dc); 1106 } 1107 1108 bch_cached_dev_run(dc); 1109 bcache_device_link(&dc->disk, c, "bdev"); 1110 1111 /* Allow the writeback thread to proceed */ 1112 up_write(&dc->writeback_lock); 1113 1114 pr_info("Caching %s as %s on set %pU", 1115 dc->backing_dev_name, 1116 dc->disk.disk->disk_name, 1117 dc->disk.c->sb.set_uuid); 1118 return 0; 1119 } 1120 1121 void bch_cached_dev_release(struct kobject *kobj) 1122 { 1123 struct cached_dev *dc = container_of(kobj, struct cached_dev, 1124 disk.kobj); 1125 kfree(dc); 1126 module_put(THIS_MODULE); 1127 } 1128 1129 static void cached_dev_free(struct closure *cl) 1130 { 1131 struct cached_dev *dc = container_of(cl, struct cached_dev, disk.cl); 1132 1133 mutex_lock(&bch_register_lock); 1134 1135 if (test_and_clear_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags)) 1136 cancel_writeback_rate_update_dwork(dc); 1137 1138 if (!IS_ERR_OR_NULL(dc->writeback_thread)) 1139 kthread_stop(dc->writeback_thread); 1140 if (dc->writeback_write_wq) 1141 destroy_workqueue(dc->writeback_write_wq); 1142 1143 if (atomic_read(&dc->running)) 1144 bd_unlink_disk_holder(dc->bdev, dc->disk.disk); 1145 bcache_device_free(&dc->disk); 1146 list_del(&dc->list); 1147 1148 mutex_unlock(&bch_register_lock); 1149 1150 if (!IS_ERR_OR_NULL(dc->bdev)) 1151 blkdev_put(dc->bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); 1152 1153 wake_up(&unregister_wait); 1154 1155 kobject_put(&dc->disk.kobj); 1156 } 1157 1158 static void cached_dev_flush(struct closure *cl) 1159 { 1160 struct cached_dev *dc = container_of(cl, struct cached_dev, disk.cl); 1161 struct bcache_device *d = &dc->disk; 1162 1163 mutex_lock(&bch_register_lock); 1164 bcache_device_unlink(d); 1165 mutex_unlock(&bch_register_lock); 1166 1167 bch_cache_accounting_destroy(&dc->accounting); 1168 kobject_del(&d->kobj); 1169 1170 continue_at(cl, cached_dev_free, system_wq); 1171 } 1172 1173 static int cached_dev_init(struct cached_dev *dc, unsigned block_size) 1174 { 1175 int ret; 1176 struct io *io; 1177 struct request_queue *q = bdev_get_queue(dc->bdev); 1178 1179 __module_get(THIS_MODULE); 1180 INIT_LIST_HEAD(&dc->list); 1181 closure_init(&dc->disk.cl, NULL); 1182 set_closure_fn(&dc->disk.cl, cached_dev_flush, system_wq); 1183 kobject_init(&dc->disk.kobj, &bch_cached_dev_ktype); 1184 INIT_WORK(&dc->detach, cached_dev_detach_finish); 1185 sema_init(&dc->sb_write_mutex, 1); 1186 INIT_LIST_HEAD(&dc->io_lru); 1187 spin_lock_init(&dc->io_lock); 1188 bch_cache_accounting_init(&dc->accounting, &dc->disk.cl); 1189 1190 dc->sequential_cutoff = 4 << 20; 1191 1192 for (io = dc->io; io < dc->io + RECENT_IO; io++) { 1193 list_add(&io->lru, &dc->io_lru); 1194 hlist_add_head(&io->hash, dc->io_hash + RECENT_IO); 1195 } 1196 1197 dc->disk.stripe_size = q->limits.io_opt >> 9; 1198 1199 if (dc->disk.stripe_size) 1200 dc->partial_stripes_expensive = 1201 q->limits.raid_partial_stripes_expensive; 1202 1203 ret = bcache_device_init(&dc->disk, block_size, 1204 dc->bdev->bd_part->nr_sects - dc->sb.data_offset); 1205 if (ret) 1206 return ret; 1207 1208 dc->disk.disk->queue->backing_dev_info->ra_pages = 1209 max(dc->disk.disk->queue->backing_dev_info->ra_pages, 1210 q->backing_dev_info->ra_pages); 1211 1212 atomic_set(&dc->io_errors, 0); 1213 dc->io_disable = false; 1214 dc->error_limit = DEFAULT_CACHED_DEV_ERROR_LIMIT; 1215 /* default to auto */ 1216 dc->stop_when_cache_set_failed = BCH_CACHED_DEV_STOP_AUTO; 1217 1218 bch_cached_dev_request_init(dc); 1219 bch_cached_dev_writeback_init(dc); 1220 return 0; 1221 } 1222 1223 /* Cached device - bcache superblock */ 1224 1225 static void register_bdev(struct cache_sb *sb, struct page *sb_page, 1226 struct block_device *bdev, 1227 struct cached_dev *dc) 1228 { 1229 const char *err = "cannot allocate memory"; 1230 struct cache_set *c; 1231 1232 bdevname(bdev, dc->backing_dev_name); 1233 memcpy(&dc->sb, sb, sizeof(struct cache_sb)); 1234 dc->bdev = bdev; 1235 dc->bdev->bd_holder = dc; 1236 1237 bio_init(&dc->sb_bio, dc->sb_bio.bi_inline_vecs, 1); 1238 bio_first_bvec_all(&dc->sb_bio)->bv_page = sb_page; 1239 get_page(sb_page); 1240 1241 1242 if (cached_dev_init(dc, sb->block_size << 9)) 1243 goto err; 1244 1245 err = "error creating kobject"; 1246 if (kobject_add(&dc->disk.kobj, &part_to_dev(bdev->bd_part)->kobj, 1247 "bcache")) 1248 goto err; 1249 if (bch_cache_accounting_add_kobjs(&dc->accounting, &dc->disk.kobj)) 1250 goto err; 1251 1252 pr_info("registered backing device %s", dc->backing_dev_name); 1253 1254 list_add(&dc->list, &uncached_devices); 1255 list_for_each_entry(c, &bch_cache_sets, list) 1256 bch_cached_dev_attach(dc, c, NULL); 1257 1258 if (BDEV_STATE(&dc->sb) == BDEV_STATE_NONE || 1259 BDEV_STATE(&dc->sb) == BDEV_STATE_STALE) 1260 bch_cached_dev_run(dc); 1261 1262 return; 1263 err: 1264 pr_notice("error %s: %s", dc->backing_dev_name, err); 1265 bcache_device_stop(&dc->disk); 1266 } 1267 1268 /* Flash only volumes */ 1269 1270 void bch_flash_dev_release(struct kobject *kobj) 1271 { 1272 struct bcache_device *d = container_of(kobj, struct bcache_device, 1273 kobj); 1274 kfree(d); 1275 } 1276 1277 static void flash_dev_free(struct closure *cl) 1278 { 1279 struct bcache_device *d = container_of(cl, struct bcache_device, cl); 1280 mutex_lock(&bch_register_lock); 1281 bcache_device_free(d); 1282 mutex_unlock(&bch_register_lock); 1283 kobject_put(&d->kobj); 1284 } 1285 1286 static void flash_dev_flush(struct closure *cl) 1287 { 1288 struct bcache_device *d = container_of(cl, struct bcache_device, cl); 1289 1290 mutex_lock(&bch_register_lock); 1291 bcache_device_unlink(d); 1292 mutex_unlock(&bch_register_lock); 1293 kobject_del(&d->kobj); 1294 continue_at(cl, flash_dev_free, system_wq); 1295 } 1296 1297 static int flash_dev_run(struct cache_set *c, struct uuid_entry *u) 1298 { 1299 struct bcache_device *d = kzalloc(sizeof(struct bcache_device), 1300 GFP_KERNEL); 1301 if (!d) 1302 return -ENOMEM; 1303 1304 closure_init(&d->cl, NULL); 1305 set_closure_fn(&d->cl, flash_dev_flush, system_wq); 1306 1307 kobject_init(&d->kobj, &bch_flash_dev_ktype); 1308 1309 if (bcache_device_init(d, block_bytes(c), u->sectors)) 1310 goto err; 1311 1312 bcache_device_attach(d, c, u - c->uuids); 1313 bch_sectors_dirty_init(d); 1314 bch_flash_dev_request_init(d); 1315 add_disk(d->disk); 1316 1317 if (kobject_add(&d->kobj, &disk_to_dev(d->disk)->kobj, "bcache")) 1318 goto err; 1319 1320 bcache_device_link(d, c, "volume"); 1321 1322 return 0; 1323 err: 1324 kobject_put(&d->kobj); 1325 return -ENOMEM; 1326 } 1327 1328 static int flash_devs_run(struct cache_set *c) 1329 { 1330 int ret = 0; 1331 struct uuid_entry *u; 1332 1333 for (u = c->uuids; 1334 u < c->uuids + c->nr_uuids && !ret; 1335 u++) 1336 if (UUID_FLASH_ONLY(u)) 1337 ret = flash_dev_run(c, u); 1338 1339 return ret; 1340 } 1341 1342 int bch_flash_dev_create(struct cache_set *c, uint64_t size) 1343 { 1344 struct uuid_entry *u; 1345 1346 if (test_bit(CACHE_SET_STOPPING, &c->flags)) 1347 return -EINTR; 1348 1349 if (!test_bit(CACHE_SET_RUNNING, &c->flags)) 1350 return -EPERM; 1351 1352 u = uuid_find_empty(c); 1353 if (!u) { 1354 pr_err("Can't create volume, no room for UUID"); 1355 return -EINVAL; 1356 } 1357 1358 get_random_bytes(u->uuid, 16); 1359 memset(u->label, 0, 32); 1360 u->first_reg = u->last_reg = cpu_to_le32(get_seconds()); 1361 1362 SET_UUID_FLASH_ONLY(u, 1); 1363 u->sectors = size >> 9; 1364 1365 bch_uuid_write(c); 1366 1367 return flash_dev_run(c, u); 1368 } 1369 1370 bool bch_cached_dev_error(struct cached_dev *dc) 1371 { 1372 struct cache_set *c; 1373 1374 if (!dc || test_bit(BCACHE_DEV_CLOSING, &dc->disk.flags)) 1375 return false; 1376 1377 dc->io_disable = true; 1378 /* make others know io_disable is true earlier */ 1379 smp_mb(); 1380 1381 pr_err("stop %s: too many IO errors on backing device %s\n", 1382 dc->disk.disk->disk_name, dc->backing_dev_name); 1383 1384 /* 1385 * If the cached device is still attached to a cache set, 1386 * even dc->io_disable is true and no more I/O requests 1387 * accepted, cache device internal I/O (writeback scan or 1388 * garbage collection) may still prevent bcache device from 1389 * being stopped. So here CACHE_SET_IO_DISABLE should be 1390 * set to c->flags too, to make the internal I/O to cache 1391 * device rejected and stopped immediately. 1392 * If c is NULL, that means the bcache device is not attached 1393 * to any cache set, then no CACHE_SET_IO_DISABLE bit to set. 1394 */ 1395 c = dc->disk.c; 1396 if (c && test_and_set_bit(CACHE_SET_IO_DISABLE, &c->flags)) 1397 pr_info("CACHE_SET_IO_DISABLE already set"); 1398 1399 bcache_device_stop(&dc->disk); 1400 return true; 1401 } 1402 1403 /* Cache set */ 1404 1405 __printf(2, 3) 1406 bool bch_cache_set_error(struct cache_set *c, const char *fmt, ...) 1407 { 1408 va_list args; 1409 1410 if (c->on_error != ON_ERROR_PANIC && 1411 test_bit(CACHE_SET_STOPPING, &c->flags)) 1412 return false; 1413 1414 if (test_and_set_bit(CACHE_SET_IO_DISABLE, &c->flags)) 1415 pr_info("CACHE_SET_IO_DISABLE already set"); 1416 1417 /* XXX: we can be called from atomic context 1418 acquire_console_sem(); 1419 */ 1420 1421 printk(KERN_ERR "bcache: error on %pU: ", c->sb.set_uuid); 1422 1423 va_start(args, fmt); 1424 vprintk(fmt, args); 1425 va_end(args); 1426 1427 printk(", disabling caching\n"); 1428 1429 if (c->on_error == ON_ERROR_PANIC) 1430 panic("panic forced after error\n"); 1431 1432 bch_cache_set_unregister(c); 1433 return true; 1434 } 1435 1436 void bch_cache_set_release(struct kobject *kobj) 1437 { 1438 struct cache_set *c = container_of(kobj, struct cache_set, kobj); 1439 kfree(c); 1440 module_put(THIS_MODULE); 1441 } 1442 1443 static void cache_set_free(struct closure *cl) 1444 { 1445 struct cache_set *c = container_of(cl, struct cache_set, cl); 1446 struct cache *ca; 1447 unsigned i; 1448 1449 if (!IS_ERR_OR_NULL(c->debug)) 1450 debugfs_remove(c->debug); 1451 1452 bch_open_buckets_free(c); 1453 bch_btree_cache_free(c); 1454 bch_journal_free(c); 1455 1456 for_each_cache(ca, c, i) 1457 if (ca) { 1458 ca->set = NULL; 1459 c->cache[ca->sb.nr_this_dev] = NULL; 1460 kobject_put(&ca->kobj); 1461 } 1462 1463 bch_bset_sort_state_free(&c->sort); 1464 free_pages((unsigned long) c->uuids, ilog2(bucket_pages(c))); 1465 1466 if (c->moving_gc_wq) 1467 destroy_workqueue(c->moving_gc_wq); 1468 if (c->bio_split) 1469 bioset_free(c->bio_split); 1470 if (c->fill_iter) 1471 mempool_destroy(c->fill_iter); 1472 if (c->bio_meta) 1473 mempool_destroy(c->bio_meta); 1474 if (c->search) 1475 mempool_destroy(c->search); 1476 kfree(c->devices); 1477 1478 mutex_lock(&bch_register_lock); 1479 list_del(&c->list); 1480 mutex_unlock(&bch_register_lock); 1481 1482 pr_info("Cache set %pU unregistered", c->sb.set_uuid); 1483 wake_up(&unregister_wait); 1484 1485 closure_debug_destroy(&c->cl); 1486 kobject_put(&c->kobj); 1487 } 1488 1489 static void cache_set_flush(struct closure *cl) 1490 { 1491 struct cache_set *c = container_of(cl, struct cache_set, caching); 1492 struct cache *ca; 1493 struct btree *b; 1494 unsigned i; 1495 1496 bch_cache_accounting_destroy(&c->accounting); 1497 1498 kobject_put(&c->internal); 1499 kobject_del(&c->kobj); 1500 1501 if (c->gc_thread) 1502 kthread_stop(c->gc_thread); 1503 1504 if (!IS_ERR_OR_NULL(c->root)) 1505 list_add(&c->root->list, &c->btree_cache); 1506 1507 /* Should skip this if we're unregistering because of an error */ 1508 list_for_each_entry(b, &c->btree_cache, list) { 1509 mutex_lock(&b->write_lock); 1510 if (btree_node_dirty(b)) 1511 __bch_btree_node_write(b, NULL); 1512 mutex_unlock(&b->write_lock); 1513 } 1514 1515 for_each_cache(ca, c, i) 1516 if (ca->alloc_thread) 1517 kthread_stop(ca->alloc_thread); 1518 1519 if (c->journal.cur) { 1520 cancel_delayed_work_sync(&c->journal.work); 1521 /* flush last journal entry if needed */ 1522 c->journal.work.work.func(&c->journal.work.work); 1523 } 1524 1525 closure_return(cl); 1526 } 1527 1528 /* 1529 * This function is only called when CACHE_SET_IO_DISABLE is set, which means 1530 * cache set is unregistering due to too many I/O errors. In this condition, 1531 * the bcache device might be stopped, it depends on stop_when_cache_set_failed 1532 * value and whether the broken cache has dirty data: 1533 * 1534 * dc->stop_when_cache_set_failed dc->has_dirty stop bcache device 1535 * BCH_CACHED_STOP_AUTO 0 NO 1536 * BCH_CACHED_STOP_AUTO 1 YES 1537 * BCH_CACHED_DEV_STOP_ALWAYS 0 YES 1538 * BCH_CACHED_DEV_STOP_ALWAYS 1 YES 1539 * 1540 * The expected behavior is, if stop_when_cache_set_failed is configured to 1541 * "auto" via sysfs interface, the bcache device will not be stopped if the 1542 * backing device is clean on the broken cache device. 1543 */ 1544 static void conditional_stop_bcache_device(struct cache_set *c, 1545 struct bcache_device *d, 1546 struct cached_dev *dc) 1547 { 1548 if (dc->stop_when_cache_set_failed == BCH_CACHED_DEV_STOP_ALWAYS) { 1549 pr_warn("stop_when_cache_set_failed of %s is \"always\", stop it for failed cache set %pU.", 1550 d->disk->disk_name, c->sb.set_uuid); 1551 bcache_device_stop(d); 1552 } else if (atomic_read(&dc->has_dirty)) { 1553 /* 1554 * dc->stop_when_cache_set_failed == BCH_CACHED_STOP_AUTO 1555 * and dc->has_dirty == 1 1556 */ 1557 pr_warn("stop_when_cache_set_failed of %s is \"auto\" and cache is dirty, stop it to avoid potential data corruption.", 1558 d->disk->disk_name); 1559 /* 1560 * There might be a small time gap that cache set is 1561 * released but bcache device is not. Inside this time 1562 * gap, regular I/O requests will directly go into 1563 * backing device as no cache set attached to. This 1564 * behavior may also introduce potential inconsistence 1565 * data in writeback mode while cache is dirty. 1566 * Therefore before calling bcache_device_stop() due 1567 * to a broken cache device, dc->io_disable should be 1568 * explicitly set to true. 1569 */ 1570 dc->io_disable = true; 1571 /* make others know io_disable is true earlier */ 1572 smp_mb(); 1573 bcache_device_stop(d); 1574 } else { 1575 /* 1576 * dc->stop_when_cache_set_failed == BCH_CACHED_STOP_AUTO 1577 * and dc->has_dirty == 0 1578 */ 1579 pr_warn("stop_when_cache_set_failed of %s is \"auto\" and cache is clean, keep it alive.", 1580 d->disk->disk_name); 1581 } 1582 } 1583 1584 static void __cache_set_unregister(struct closure *cl) 1585 { 1586 struct cache_set *c = container_of(cl, struct cache_set, caching); 1587 struct cached_dev *dc; 1588 struct bcache_device *d; 1589 size_t i; 1590 1591 mutex_lock(&bch_register_lock); 1592 1593 for (i = 0; i < c->devices_max_used; i++) { 1594 d = c->devices[i]; 1595 if (!d) 1596 continue; 1597 1598 if (!UUID_FLASH_ONLY(&c->uuids[i]) && 1599 test_bit(CACHE_SET_UNREGISTERING, &c->flags)) { 1600 dc = container_of(d, struct cached_dev, disk); 1601 bch_cached_dev_detach(dc); 1602 if (test_bit(CACHE_SET_IO_DISABLE, &c->flags)) 1603 conditional_stop_bcache_device(c, d, dc); 1604 } else { 1605 bcache_device_stop(d); 1606 } 1607 } 1608 1609 mutex_unlock(&bch_register_lock); 1610 1611 continue_at(cl, cache_set_flush, system_wq); 1612 } 1613 1614 void bch_cache_set_stop(struct cache_set *c) 1615 { 1616 if (!test_and_set_bit(CACHE_SET_STOPPING, &c->flags)) 1617 closure_queue(&c->caching); 1618 } 1619 1620 void bch_cache_set_unregister(struct cache_set *c) 1621 { 1622 set_bit(CACHE_SET_UNREGISTERING, &c->flags); 1623 bch_cache_set_stop(c); 1624 } 1625 1626 #define alloc_bucket_pages(gfp, c) \ 1627 ((void *) __get_free_pages(__GFP_ZERO|gfp, ilog2(bucket_pages(c)))) 1628 1629 struct cache_set *bch_cache_set_alloc(struct cache_sb *sb) 1630 { 1631 int iter_size; 1632 struct cache_set *c = kzalloc(sizeof(struct cache_set), GFP_KERNEL); 1633 if (!c) 1634 return NULL; 1635 1636 __module_get(THIS_MODULE); 1637 closure_init(&c->cl, NULL); 1638 set_closure_fn(&c->cl, cache_set_free, system_wq); 1639 1640 closure_init(&c->caching, &c->cl); 1641 set_closure_fn(&c->caching, __cache_set_unregister, system_wq); 1642 1643 /* Maybe create continue_at_noreturn() and use it here? */ 1644 closure_set_stopped(&c->cl); 1645 closure_put(&c->cl); 1646 1647 kobject_init(&c->kobj, &bch_cache_set_ktype); 1648 kobject_init(&c->internal, &bch_cache_set_internal_ktype); 1649 1650 bch_cache_accounting_init(&c->accounting, &c->cl); 1651 1652 memcpy(c->sb.set_uuid, sb->set_uuid, 16); 1653 c->sb.block_size = sb->block_size; 1654 c->sb.bucket_size = sb->bucket_size; 1655 c->sb.nr_in_set = sb->nr_in_set; 1656 c->sb.last_mount = sb->last_mount; 1657 c->bucket_bits = ilog2(sb->bucket_size); 1658 c->block_bits = ilog2(sb->block_size); 1659 c->nr_uuids = bucket_bytes(c) / sizeof(struct uuid_entry); 1660 c->devices_max_used = 0; 1661 c->btree_pages = bucket_pages(c); 1662 if (c->btree_pages > BTREE_MAX_PAGES) 1663 c->btree_pages = max_t(int, c->btree_pages / 4, 1664 BTREE_MAX_PAGES); 1665 1666 sema_init(&c->sb_write_mutex, 1); 1667 mutex_init(&c->bucket_lock); 1668 init_waitqueue_head(&c->btree_cache_wait); 1669 init_waitqueue_head(&c->bucket_wait); 1670 init_waitqueue_head(&c->gc_wait); 1671 sema_init(&c->uuid_write_mutex, 1); 1672 1673 spin_lock_init(&c->btree_gc_time.lock); 1674 spin_lock_init(&c->btree_split_time.lock); 1675 spin_lock_init(&c->btree_read_time.lock); 1676 1677 bch_moving_init_cache_set(c); 1678 1679 INIT_LIST_HEAD(&c->list); 1680 INIT_LIST_HEAD(&c->cached_devs); 1681 INIT_LIST_HEAD(&c->btree_cache); 1682 INIT_LIST_HEAD(&c->btree_cache_freeable); 1683 INIT_LIST_HEAD(&c->btree_cache_freed); 1684 INIT_LIST_HEAD(&c->data_buckets); 1685 1686 c->search = mempool_create_slab_pool(32, bch_search_cache); 1687 if (!c->search) 1688 goto err; 1689 1690 iter_size = (sb->bucket_size / sb->block_size + 1) * 1691 sizeof(struct btree_iter_set); 1692 1693 if (!(c->devices = kzalloc(c->nr_uuids * sizeof(void *), GFP_KERNEL)) || 1694 !(c->bio_meta = mempool_create_kmalloc_pool(2, 1695 sizeof(struct bbio) + sizeof(struct bio_vec) * 1696 bucket_pages(c))) || 1697 !(c->fill_iter = mempool_create_kmalloc_pool(1, iter_size)) || 1698 !(c->bio_split = bioset_create(4, offsetof(struct bbio, bio), 1699 BIOSET_NEED_BVECS | 1700 BIOSET_NEED_RESCUER)) || 1701 !(c->uuids = alloc_bucket_pages(GFP_KERNEL, c)) || 1702 !(c->moving_gc_wq = alloc_workqueue("bcache_gc", 1703 WQ_MEM_RECLAIM, 0)) || 1704 bch_journal_alloc(c) || 1705 bch_btree_cache_alloc(c) || 1706 bch_open_buckets_alloc(c) || 1707 bch_bset_sort_state_init(&c->sort, ilog2(c->btree_pages))) 1708 goto err; 1709 1710 c->congested_read_threshold_us = 2000; 1711 c->congested_write_threshold_us = 20000; 1712 c->error_limit = DEFAULT_IO_ERROR_LIMIT; 1713 WARN_ON(test_and_clear_bit(CACHE_SET_IO_DISABLE, &c->flags)); 1714 1715 return c; 1716 err: 1717 bch_cache_set_unregister(c); 1718 return NULL; 1719 } 1720 1721 static void run_cache_set(struct cache_set *c) 1722 { 1723 const char *err = "cannot allocate memory"; 1724 struct cached_dev *dc, *t; 1725 struct cache *ca; 1726 struct closure cl; 1727 unsigned i; 1728 1729 closure_init_stack(&cl); 1730 1731 for_each_cache(ca, c, i) 1732 c->nbuckets += ca->sb.nbuckets; 1733 set_gc_sectors(c); 1734 1735 if (CACHE_SYNC(&c->sb)) { 1736 LIST_HEAD(journal); 1737 struct bkey *k; 1738 struct jset *j; 1739 1740 err = "cannot allocate memory for journal"; 1741 if (bch_journal_read(c, &journal)) 1742 goto err; 1743 1744 pr_debug("btree_journal_read() done"); 1745 1746 err = "no journal entries found"; 1747 if (list_empty(&journal)) 1748 goto err; 1749 1750 j = &list_entry(journal.prev, struct journal_replay, list)->j; 1751 1752 err = "IO error reading priorities"; 1753 for_each_cache(ca, c, i) 1754 prio_read(ca, j->prio_bucket[ca->sb.nr_this_dev]); 1755 1756 /* 1757 * If prio_read() fails it'll call cache_set_error and we'll 1758 * tear everything down right away, but if we perhaps checked 1759 * sooner we could avoid journal replay. 1760 */ 1761 1762 k = &j->btree_root; 1763 1764 err = "bad btree root"; 1765 if (__bch_btree_ptr_invalid(c, k)) 1766 goto err; 1767 1768 err = "error reading btree root"; 1769 c->root = bch_btree_node_get(c, NULL, k, j->btree_level, true, NULL); 1770 if (IS_ERR_OR_NULL(c->root)) 1771 goto err; 1772 1773 list_del_init(&c->root->list); 1774 rw_unlock(true, c->root); 1775 1776 err = uuid_read(c, j, &cl); 1777 if (err) 1778 goto err; 1779 1780 err = "error in recovery"; 1781 if (bch_btree_check(c)) 1782 goto err; 1783 1784 bch_journal_mark(c, &journal); 1785 bch_initial_gc_finish(c); 1786 pr_debug("btree_check() done"); 1787 1788 /* 1789 * bcache_journal_next() can't happen sooner, or 1790 * btree_gc_finish() will give spurious errors about last_gc > 1791 * gc_gen - this is a hack but oh well. 1792 */ 1793 bch_journal_next(&c->journal); 1794 1795 err = "error starting allocator thread"; 1796 for_each_cache(ca, c, i) 1797 if (bch_cache_allocator_start(ca)) 1798 goto err; 1799 1800 /* 1801 * First place it's safe to allocate: btree_check() and 1802 * btree_gc_finish() have to run before we have buckets to 1803 * allocate, and bch_bucket_alloc_set() might cause a journal 1804 * entry to be written so bcache_journal_next() has to be called 1805 * first. 1806 * 1807 * If the uuids were in the old format we have to rewrite them 1808 * before the next journal entry is written: 1809 */ 1810 if (j->version < BCACHE_JSET_VERSION_UUID) 1811 __uuid_write(c); 1812 1813 bch_journal_replay(c, &journal); 1814 } else { 1815 pr_notice("invalidating existing data"); 1816 1817 for_each_cache(ca, c, i) { 1818 unsigned j; 1819 1820 ca->sb.keys = clamp_t(int, ca->sb.nbuckets >> 7, 1821 2, SB_JOURNAL_BUCKETS); 1822 1823 for (j = 0; j < ca->sb.keys; j++) 1824 ca->sb.d[j] = ca->sb.first_bucket + j; 1825 } 1826 1827 bch_initial_gc_finish(c); 1828 1829 err = "error starting allocator thread"; 1830 for_each_cache(ca, c, i) 1831 if (bch_cache_allocator_start(ca)) 1832 goto err; 1833 1834 mutex_lock(&c->bucket_lock); 1835 for_each_cache(ca, c, i) 1836 bch_prio_write(ca); 1837 mutex_unlock(&c->bucket_lock); 1838 1839 err = "cannot allocate new UUID bucket"; 1840 if (__uuid_write(c)) 1841 goto err; 1842 1843 err = "cannot allocate new btree root"; 1844 c->root = __bch_btree_node_alloc(c, NULL, 0, true, NULL); 1845 if (IS_ERR_OR_NULL(c->root)) 1846 goto err; 1847 1848 mutex_lock(&c->root->write_lock); 1849 bkey_copy_key(&c->root->key, &MAX_KEY); 1850 bch_btree_node_write(c->root, &cl); 1851 mutex_unlock(&c->root->write_lock); 1852 1853 bch_btree_set_root(c->root); 1854 rw_unlock(true, c->root); 1855 1856 /* 1857 * We don't want to write the first journal entry until 1858 * everything is set up - fortunately journal entries won't be 1859 * written until the SET_CACHE_SYNC() here: 1860 */ 1861 SET_CACHE_SYNC(&c->sb, true); 1862 1863 bch_journal_next(&c->journal); 1864 bch_journal_meta(c, &cl); 1865 } 1866 1867 err = "error starting gc thread"; 1868 if (bch_gc_thread_start(c)) 1869 goto err; 1870 1871 closure_sync(&cl); 1872 c->sb.last_mount = get_seconds(); 1873 bcache_write_super(c); 1874 1875 list_for_each_entry_safe(dc, t, &uncached_devices, list) 1876 bch_cached_dev_attach(dc, c, NULL); 1877 1878 flash_devs_run(c); 1879 1880 set_bit(CACHE_SET_RUNNING, &c->flags); 1881 return; 1882 err: 1883 closure_sync(&cl); 1884 /* XXX: test this, it's broken */ 1885 bch_cache_set_error(c, "%s", err); 1886 } 1887 1888 static bool can_attach_cache(struct cache *ca, struct cache_set *c) 1889 { 1890 return ca->sb.block_size == c->sb.block_size && 1891 ca->sb.bucket_size == c->sb.bucket_size && 1892 ca->sb.nr_in_set == c->sb.nr_in_set; 1893 } 1894 1895 static const char *register_cache_set(struct cache *ca) 1896 { 1897 char buf[12]; 1898 const char *err = "cannot allocate memory"; 1899 struct cache_set *c; 1900 1901 list_for_each_entry(c, &bch_cache_sets, list) 1902 if (!memcmp(c->sb.set_uuid, ca->sb.set_uuid, 16)) { 1903 if (c->cache[ca->sb.nr_this_dev]) 1904 return "duplicate cache set member"; 1905 1906 if (!can_attach_cache(ca, c)) 1907 return "cache sb does not match set"; 1908 1909 if (!CACHE_SYNC(&ca->sb)) 1910 SET_CACHE_SYNC(&c->sb, false); 1911 1912 goto found; 1913 } 1914 1915 c = bch_cache_set_alloc(&ca->sb); 1916 if (!c) 1917 return err; 1918 1919 err = "error creating kobject"; 1920 if (kobject_add(&c->kobj, bcache_kobj, "%pU", c->sb.set_uuid) || 1921 kobject_add(&c->internal, &c->kobj, "internal")) 1922 goto err; 1923 1924 if (bch_cache_accounting_add_kobjs(&c->accounting, &c->kobj)) 1925 goto err; 1926 1927 bch_debug_init_cache_set(c); 1928 1929 list_add(&c->list, &bch_cache_sets); 1930 found: 1931 sprintf(buf, "cache%i", ca->sb.nr_this_dev); 1932 if (sysfs_create_link(&ca->kobj, &c->kobj, "set") || 1933 sysfs_create_link(&c->kobj, &ca->kobj, buf)) 1934 goto err; 1935 1936 if (ca->sb.seq > c->sb.seq) { 1937 c->sb.version = ca->sb.version; 1938 memcpy(c->sb.set_uuid, ca->sb.set_uuid, 16); 1939 c->sb.flags = ca->sb.flags; 1940 c->sb.seq = ca->sb.seq; 1941 pr_debug("set version = %llu", c->sb.version); 1942 } 1943 1944 kobject_get(&ca->kobj); 1945 ca->set = c; 1946 ca->set->cache[ca->sb.nr_this_dev] = ca; 1947 c->cache_by_alloc[c->caches_loaded++] = ca; 1948 1949 if (c->caches_loaded == c->sb.nr_in_set) 1950 run_cache_set(c); 1951 1952 return NULL; 1953 err: 1954 bch_cache_set_unregister(c); 1955 return err; 1956 } 1957 1958 /* Cache device */ 1959 1960 void bch_cache_release(struct kobject *kobj) 1961 { 1962 struct cache *ca = container_of(kobj, struct cache, kobj); 1963 unsigned i; 1964 1965 if (ca->set) { 1966 BUG_ON(ca->set->cache[ca->sb.nr_this_dev] != ca); 1967 ca->set->cache[ca->sb.nr_this_dev] = NULL; 1968 } 1969 1970 free_pages((unsigned long) ca->disk_buckets, ilog2(bucket_pages(ca))); 1971 kfree(ca->prio_buckets); 1972 vfree(ca->buckets); 1973 1974 free_heap(&ca->heap); 1975 free_fifo(&ca->free_inc); 1976 1977 for (i = 0; i < RESERVE_NR; i++) 1978 free_fifo(&ca->free[i]); 1979 1980 if (ca->sb_bio.bi_inline_vecs[0].bv_page) 1981 put_page(bio_first_page_all(&ca->sb_bio)); 1982 1983 if (!IS_ERR_OR_NULL(ca->bdev)) 1984 blkdev_put(ca->bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); 1985 1986 kfree(ca); 1987 module_put(THIS_MODULE); 1988 } 1989 1990 static int cache_alloc(struct cache *ca) 1991 { 1992 size_t free; 1993 size_t btree_buckets; 1994 struct bucket *b; 1995 1996 __module_get(THIS_MODULE); 1997 kobject_init(&ca->kobj, &bch_cache_ktype); 1998 1999 bio_init(&ca->journal.bio, ca->journal.bio.bi_inline_vecs, 8); 2000 2001 /* 2002 * when ca->sb.njournal_buckets is not zero, journal exists, 2003 * and in bch_journal_replay(), tree node may split, 2004 * so bucket of RESERVE_BTREE type is needed, 2005 * the worst situation is all journal buckets are valid journal, 2006 * and all the keys need to replay, 2007 * so the number of RESERVE_BTREE type buckets should be as much 2008 * as journal buckets 2009 */ 2010 btree_buckets = ca->sb.njournal_buckets ?: 8; 2011 free = roundup_pow_of_two(ca->sb.nbuckets) >> 10; 2012 2013 if (!init_fifo(&ca->free[RESERVE_BTREE], btree_buckets, GFP_KERNEL) || 2014 !init_fifo_exact(&ca->free[RESERVE_PRIO], prio_buckets(ca), GFP_KERNEL) || 2015 !init_fifo(&ca->free[RESERVE_MOVINGGC], free, GFP_KERNEL) || 2016 !init_fifo(&ca->free[RESERVE_NONE], free, GFP_KERNEL) || 2017 !init_fifo(&ca->free_inc, free << 2, GFP_KERNEL) || 2018 !init_heap(&ca->heap, free << 3, GFP_KERNEL) || 2019 !(ca->buckets = vzalloc(sizeof(struct bucket) * 2020 ca->sb.nbuckets)) || 2021 !(ca->prio_buckets = kzalloc(sizeof(uint64_t) * prio_buckets(ca) * 2022 2, GFP_KERNEL)) || 2023 !(ca->disk_buckets = alloc_bucket_pages(GFP_KERNEL, ca))) 2024 return -ENOMEM; 2025 2026 ca->prio_last_buckets = ca->prio_buckets + prio_buckets(ca); 2027 2028 for_each_bucket(b, ca) 2029 atomic_set(&b->pin, 0); 2030 2031 return 0; 2032 } 2033 2034 static int register_cache(struct cache_sb *sb, struct page *sb_page, 2035 struct block_device *bdev, struct cache *ca) 2036 { 2037 const char *err = NULL; /* must be set for any error case */ 2038 int ret = 0; 2039 2040 bdevname(bdev, ca->cache_dev_name); 2041 memcpy(&ca->sb, sb, sizeof(struct cache_sb)); 2042 ca->bdev = bdev; 2043 ca->bdev->bd_holder = ca; 2044 2045 bio_init(&ca->sb_bio, ca->sb_bio.bi_inline_vecs, 1); 2046 bio_first_bvec_all(&ca->sb_bio)->bv_page = sb_page; 2047 get_page(sb_page); 2048 2049 if (blk_queue_discard(bdev_get_queue(bdev))) 2050 ca->discard = CACHE_DISCARD(&ca->sb); 2051 2052 ret = cache_alloc(ca); 2053 if (ret != 0) { 2054 blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); 2055 if (ret == -ENOMEM) 2056 err = "cache_alloc(): -ENOMEM"; 2057 else 2058 err = "cache_alloc(): unknown error"; 2059 goto err; 2060 } 2061 2062 if (kobject_add(&ca->kobj, &part_to_dev(bdev->bd_part)->kobj, "bcache")) { 2063 err = "error calling kobject_add"; 2064 ret = -ENOMEM; 2065 goto out; 2066 } 2067 2068 mutex_lock(&bch_register_lock); 2069 err = register_cache_set(ca); 2070 mutex_unlock(&bch_register_lock); 2071 2072 if (err) { 2073 ret = -ENODEV; 2074 goto out; 2075 } 2076 2077 pr_info("registered cache device %s", ca->cache_dev_name); 2078 2079 out: 2080 kobject_put(&ca->kobj); 2081 2082 err: 2083 if (err) 2084 pr_notice("error %s: %s", ca->cache_dev_name, err); 2085 2086 return ret; 2087 } 2088 2089 /* Global interfaces/init */ 2090 2091 static ssize_t register_bcache(struct kobject *, struct kobj_attribute *, 2092 const char *, size_t); 2093 2094 kobj_attribute_write(register, register_bcache); 2095 kobj_attribute_write(register_quiet, register_bcache); 2096 2097 static bool bch_is_open_backing(struct block_device *bdev) { 2098 struct cache_set *c, *tc; 2099 struct cached_dev *dc, *t; 2100 2101 list_for_each_entry_safe(c, tc, &bch_cache_sets, list) 2102 list_for_each_entry_safe(dc, t, &c->cached_devs, list) 2103 if (dc->bdev == bdev) 2104 return true; 2105 list_for_each_entry_safe(dc, t, &uncached_devices, list) 2106 if (dc->bdev == bdev) 2107 return true; 2108 return false; 2109 } 2110 2111 static bool bch_is_open_cache(struct block_device *bdev) { 2112 struct cache_set *c, *tc; 2113 struct cache *ca; 2114 unsigned i; 2115 2116 list_for_each_entry_safe(c, tc, &bch_cache_sets, list) 2117 for_each_cache(ca, c, i) 2118 if (ca->bdev == bdev) 2119 return true; 2120 return false; 2121 } 2122 2123 static bool bch_is_open(struct block_device *bdev) { 2124 return bch_is_open_cache(bdev) || bch_is_open_backing(bdev); 2125 } 2126 2127 static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr, 2128 const char *buffer, size_t size) 2129 { 2130 ssize_t ret = size; 2131 const char *err = "cannot allocate memory"; 2132 char *path = NULL; 2133 struct cache_sb *sb = NULL; 2134 struct block_device *bdev = NULL; 2135 struct page *sb_page = NULL; 2136 2137 if (!try_module_get(THIS_MODULE)) 2138 return -EBUSY; 2139 2140 if (!(path = kstrndup(buffer, size, GFP_KERNEL)) || 2141 !(sb = kmalloc(sizeof(struct cache_sb), GFP_KERNEL))) 2142 goto err; 2143 2144 err = "failed to open device"; 2145 bdev = blkdev_get_by_path(strim(path), 2146 FMODE_READ|FMODE_WRITE|FMODE_EXCL, 2147 sb); 2148 if (IS_ERR(bdev)) { 2149 if (bdev == ERR_PTR(-EBUSY)) { 2150 bdev = lookup_bdev(strim(path)); 2151 mutex_lock(&bch_register_lock); 2152 if (!IS_ERR(bdev) && bch_is_open(bdev)) 2153 err = "device already registered"; 2154 else 2155 err = "device busy"; 2156 mutex_unlock(&bch_register_lock); 2157 if (!IS_ERR(bdev)) 2158 bdput(bdev); 2159 if (attr == &ksysfs_register_quiet) 2160 goto out; 2161 } 2162 goto err; 2163 } 2164 2165 err = "failed to set blocksize"; 2166 if (set_blocksize(bdev, 4096)) 2167 goto err_close; 2168 2169 err = read_super(sb, bdev, &sb_page); 2170 if (err) 2171 goto err_close; 2172 2173 err = "failed to register device"; 2174 if (SB_IS_BDEV(sb)) { 2175 struct cached_dev *dc = kzalloc(sizeof(*dc), GFP_KERNEL); 2176 if (!dc) 2177 goto err_close; 2178 2179 mutex_lock(&bch_register_lock); 2180 register_bdev(sb, sb_page, bdev, dc); 2181 mutex_unlock(&bch_register_lock); 2182 } else { 2183 struct cache *ca = kzalloc(sizeof(*ca), GFP_KERNEL); 2184 if (!ca) 2185 goto err_close; 2186 2187 if (register_cache(sb, sb_page, bdev, ca) != 0) 2188 goto err; 2189 } 2190 out: 2191 if (sb_page) 2192 put_page(sb_page); 2193 kfree(sb); 2194 kfree(path); 2195 module_put(THIS_MODULE); 2196 return ret; 2197 2198 err_close: 2199 blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); 2200 err: 2201 pr_info("error %s: %s", path, err); 2202 ret = -EINVAL; 2203 goto out; 2204 } 2205 2206 static int bcache_reboot(struct notifier_block *n, unsigned long code, void *x) 2207 { 2208 if (code == SYS_DOWN || 2209 code == SYS_HALT || 2210 code == SYS_POWER_OFF) { 2211 DEFINE_WAIT(wait); 2212 unsigned long start = jiffies; 2213 bool stopped = false; 2214 2215 struct cache_set *c, *tc; 2216 struct cached_dev *dc, *tdc; 2217 2218 mutex_lock(&bch_register_lock); 2219 2220 if (list_empty(&bch_cache_sets) && 2221 list_empty(&uncached_devices)) 2222 goto out; 2223 2224 pr_info("Stopping all devices:"); 2225 2226 list_for_each_entry_safe(c, tc, &bch_cache_sets, list) 2227 bch_cache_set_stop(c); 2228 2229 list_for_each_entry_safe(dc, tdc, &uncached_devices, list) 2230 bcache_device_stop(&dc->disk); 2231 2232 /* What's a condition variable? */ 2233 while (1) { 2234 long timeout = start + 2 * HZ - jiffies; 2235 2236 stopped = list_empty(&bch_cache_sets) && 2237 list_empty(&uncached_devices); 2238 2239 if (timeout < 0 || stopped) 2240 break; 2241 2242 prepare_to_wait(&unregister_wait, &wait, 2243 TASK_UNINTERRUPTIBLE); 2244 2245 mutex_unlock(&bch_register_lock); 2246 schedule_timeout(timeout); 2247 mutex_lock(&bch_register_lock); 2248 } 2249 2250 finish_wait(&unregister_wait, &wait); 2251 2252 if (stopped) 2253 pr_info("All devices stopped"); 2254 else 2255 pr_notice("Timeout waiting for devices to be closed"); 2256 out: 2257 mutex_unlock(&bch_register_lock); 2258 } 2259 2260 return NOTIFY_DONE; 2261 } 2262 2263 static struct notifier_block reboot = { 2264 .notifier_call = bcache_reboot, 2265 .priority = INT_MAX, /* before any real devices */ 2266 }; 2267 2268 static void bcache_exit(void) 2269 { 2270 bch_debug_exit(); 2271 bch_request_exit(); 2272 if (bcache_kobj) 2273 kobject_put(bcache_kobj); 2274 if (bcache_wq) 2275 destroy_workqueue(bcache_wq); 2276 if (bcache_major) 2277 unregister_blkdev(bcache_major, "bcache"); 2278 unregister_reboot_notifier(&reboot); 2279 mutex_destroy(&bch_register_lock); 2280 } 2281 2282 static int __init bcache_init(void) 2283 { 2284 static const struct attribute *files[] = { 2285 &ksysfs_register.attr, 2286 &ksysfs_register_quiet.attr, 2287 NULL 2288 }; 2289 2290 mutex_init(&bch_register_lock); 2291 init_waitqueue_head(&unregister_wait); 2292 register_reboot_notifier(&reboot); 2293 2294 bcache_major = register_blkdev(0, "bcache"); 2295 if (bcache_major < 0) { 2296 unregister_reboot_notifier(&reboot); 2297 mutex_destroy(&bch_register_lock); 2298 return bcache_major; 2299 } 2300 2301 if (!(bcache_wq = alloc_workqueue("bcache", WQ_MEM_RECLAIM, 0)) || 2302 !(bcache_kobj = kobject_create_and_add("bcache", fs_kobj)) || 2303 bch_request_init() || 2304 bch_debug_init(bcache_kobj) || closure_debug_init() || 2305 sysfs_create_files(bcache_kobj, files)) 2306 goto err; 2307 2308 return 0; 2309 err: 2310 bcache_exit(); 2311 return -ENOMEM; 2312 } 2313 2314 module_exit(bcache_exit); 2315 module_init(bcache_init); 2316