1 /* 2 * bcache setup/teardown code, and some metadata io - read a superblock and 3 * figure out what to do with it. 4 * 5 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com> 6 * Copyright 2012 Google, Inc. 7 */ 8 9 #include "bcache.h" 10 #include "btree.h" 11 #include "debug.h" 12 #include "extents.h" 13 #include "request.h" 14 #include "writeback.h" 15 16 #include <linux/blkdev.h> 17 #include <linux/buffer_head.h> 18 #include <linux/debugfs.h> 19 #include <linux/genhd.h> 20 #include <linux/idr.h> 21 #include <linux/kthread.h> 22 #include <linux/module.h> 23 #include <linux/random.h> 24 #include <linux/reboot.h> 25 #include <linux/sysfs.h> 26 27 MODULE_LICENSE("GPL"); 28 MODULE_AUTHOR("Kent Overstreet <kent.overstreet@gmail.com>"); 29 30 static const char bcache_magic[] = { 31 0xc6, 0x85, 0x73, 0xf6, 0x4e, 0x1a, 0x45, 0xca, 32 0x82, 0x65, 0xf5, 0x7f, 0x48, 0xba, 0x6d, 0x81 33 }; 34 35 static const char invalid_uuid[] = { 36 0xa0, 0x3e, 0xf8, 0xed, 0x3e, 0xe1, 0xb8, 0x78, 37 0xc8, 0x50, 0xfc, 0x5e, 0xcb, 0x16, 0xcd, 0x99 38 }; 39 40 static struct kobject *bcache_kobj; 41 struct mutex bch_register_lock; 42 LIST_HEAD(bch_cache_sets); 43 static LIST_HEAD(uncached_devices); 44 45 static int bcache_major; 46 static DEFINE_IDA(bcache_device_idx); 47 static wait_queue_head_t unregister_wait; 48 struct workqueue_struct *bcache_wq; 49 50 #define BTREE_MAX_PAGES (256 * 1024 / PAGE_SIZE) 51 /* limitation of partitions number on single bcache device */ 52 #define BCACHE_MINORS 128 53 /* limitation of bcache devices number on single system */ 54 #define BCACHE_DEVICE_IDX_MAX ((1U << MINORBITS)/BCACHE_MINORS) 55 56 /* Superblock */ 57 58 static const char *read_super(struct cache_sb *sb, struct block_device *bdev, 59 struct page **res) 60 { 61 const char *err; 62 struct cache_sb *s; 63 struct buffer_head *bh = __bread(bdev, 1, SB_SIZE); 64 unsigned int i; 65 66 if (!bh) 67 return "IO error"; 68 69 s = (struct cache_sb *) bh->b_data; 70 71 sb->offset = le64_to_cpu(s->offset); 72 sb->version = le64_to_cpu(s->version); 73 74 memcpy(sb->magic, s->magic, 16); 75 memcpy(sb->uuid, s->uuid, 16); 76 memcpy(sb->set_uuid, s->set_uuid, 16); 77 memcpy(sb->label, s->label, SB_LABEL_SIZE); 78 79 sb->flags = le64_to_cpu(s->flags); 80 sb->seq = le64_to_cpu(s->seq); 81 sb->last_mount = le32_to_cpu(s->last_mount); 82 sb->first_bucket = le16_to_cpu(s->first_bucket); 83 sb->keys = le16_to_cpu(s->keys); 84 85 for (i = 0; i < SB_JOURNAL_BUCKETS; i++) 86 sb->d[i] = le64_to_cpu(s->d[i]); 87 88 pr_debug("read sb version %llu, flags %llu, seq %llu, journal size %u", 89 sb->version, sb->flags, sb->seq, sb->keys); 90 91 err = "Not a bcache superblock"; 92 if (sb->offset != SB_SECTOR) 93 goto err; 94 95 if (memcmp(sb->magic, bcache_magic, 16)) 96 goto err; 97 98 err = "Too many journal buckets"; 99 if (sb->keys > SB_JOURNAL_BUCKETS) 100 goto err; 101 102 err = "Bad checksum"; 103 if (s->csum != csum_set(s)) 104 goto err; 105 106 err = "Bad UUID"; 107 if (bch_is_zero(sb->uuid, 16)) 108 goto err; 109 110 sb->block_size = le16_to_cpu(s->block_size); 111 112 err = "Superblock block size smaller than device block size"; 113 if (sb->block_size << 9 < bdev_logical_block_size(bdev)) 114 goto err; 115 116 switch (sb->version) { 117 case BCACHE_SB_VERSION_BDEV: 118 sb->data_offset = BDEV_DATA_START_DEFAULT; 119 break; 120 case BCACHE_SB_VERSION_BDEV_WITH_OFFSET: 121 sb->data_offset = le64_to_cpu(s->data_offset); 122 123 err = "Bad data offset"; 124 if (sb->data_offset < BDEV_DATA_START_DEFAULT) 125 goto err; 126 127 break; 128 case BCACHE_SB_VERSION_CDEV: 129 case BCACHE_SB_VERSION_CDEV_WITH_UUID: 130 sb->nbuckets = le64_to_cpu(s->nbuckets); 131 sb->bucket_size = le16_to_cpu(s->bucket_size); 132 133 sb->nr_in_set = le16_to_cpu(s->nr_in_set); 134 sb->nr_this_dev = le16_to_cpu(s->nr_this_dev); 135 136 err = "Too many buckets"; 137 if (sb->nbuckets > LONG_MAX) 138 goto err; 139 140 err = "Not enough buckets"; 141 if (sb->nbuckets < 1 << 7) 142 goto err; 143 144 err = "Bad block/bucket size"; 145 if (!is_power_of_2(sb->block_size) || 146 sb->block_size > PAGE_SECTORS || 147 !is_power_of_2(sb->bucket_size) || 148 sb->bucket_size < PAGE_SECTORS) 149 goto err; 150 151 err = "Invalid superblock: device too small"; 152 if (get_capacity(bdev->bd_disk) < sb->bucket_size * sb->nbuckets) 153 goto err; 154 155 err = "Bad UUID"; 156 if (bch_is_zero(sb->set_uuid, 16)) 157 goto err; 158 159 err = "Bad cache device number in set"; 160 if (!sb->nr_in_set || 161 sb->nr_in_set <= sb->nr_this_dev || 162 sb->nr_in_set > MAX_CACHES_PER_SET) 163 goto err; 164 165 err = "Journal buckets not sequential"; 166 for (i = 0; i < sb->keys; i++) 167 if (sb->d[i] != sb->first_bucket + i) 168 goto err; 169 170 err = "Too many journal buckets"; 171 if (sb->first_bucket + sb->keys > sb->nbuckets) 172 goto err; 173 174 err = "Invalid superblock: first bucket comes before end of super"; 175 if (sb->first_bucket * sb->bucket_size < 16) 176 goto err; 177 178 break; 179 default: 180 err = "Unsupported superblock version"; 181 goto err; 182 } 183 184 sb->last_mount = (u32)ktime_get_real_seconds(); 185 err = NULL; 186 187 get_page(bh->b_page); 188 *res = bh->b_page; 189 err: 190 put_bh(bh); 191 return err; 192 } 193 194 static void write_bdev_super_endio(struct bio *bio) 195 { 196 struct cached_dev *dc = bio->bi_private; 197 /* XXX: error checking */ 198 199 closure_put(&dc->sb_write); 200 } 201 202 static void __write_super(struct cache_sb *sb, struct bio *bio) 203 { 204 struct cache_sb *out = page_address(bio_first_page_all(bio)); 205 unsigned int i; 206 207 bio->bi_iter.bi_sector = SB_SECTOR; 208 bio->bi_iter.bi_size = SB_SIZE; 209 bio_set_op_attrs(bio, REQ_OP_WRITE, REQ_SYNC|REQ_META); 210 bch_bio_map(bio, NULL); 211 212 out->offset = cpu_to_le64(sb->offset); 213 out->version = cpu_to_le64(sb->version); 214 215 memcpy(out->uuid, sb->uuid, 16); 216 memcpy(out->set_uuid, sb->set_uuid, 16); 217 memcpy(out->label, sb->label, SB_LABEL_SIZE); 218 219 out->flags = cpu_to_le64(sb->flags); 220 out->seq = cpu_to_le64(sb->seq); 221 222 out->last_mount = cpu_to_le32(sb->last_mount); 223 out->first_bucket = cpu_to_le16(sb->first_bucket); 224 out->keys = cpu_to_le16(sb->keys); 225 226 for (i = 0; i < sb->keys; i++) 227 out->d[i] = cpu_to_le64(sb->d[i]); 228 229 out->csum = csum_set(out); 230 231 pr_debug("ver %llu, flags %llu, seq %llu", 232 sb->version, sb->flags, sb->seq); 233 234 submit_bio(bio); 235 } 236 237 static void bch_write_bdev_super_unlock(struct closure *cl) 238 { 239 struct cached_dev *dc = container_of(cl, struct cached_dev, sb_write); 240 241 up(&dc->sb_write_mutex); 242 } 243 244 void bch_write_bdev_super(struct cached_dev *dc, struct closure *parent) 245 { 246 struct closure *cl = &dc->sb_write; 247 struct bio *bio = &dc->sb_bio; 248 249 down(&dc->sb_write_mutex); 250 closure_init(cl, parent); 251 252 bio_reset(bio); 253 bio_set_dev(bio, dc->bdev); 254 bio->bi_end_io = write_bdev_super_endio; 255 bio->bi_private = dc; 256 257 closure_get(cl); 258 /* I/O request sent to backing device */ 259 __write_super(&dc->sb, bio); 260 261 closure_return_with_destructor(cl, bch_write_bdev_super_unlock); 262 } 263 264 static void write_super_endio(struct bio *bio) 265 { 266 struct cache *ca = bio->bi_private; 267 268 /* is_read = 0 */ 269 bch_count_io_errors(ca, bio->bi_status, 0, 270 "writing superblock"); 271 closure_put(&ca->set->sb_write); 272 } 273 274 static void bcache_write_super_unlock(struct closure *cl) 275 { 276 struct cache_set *c = container_of(cl, struct cache_set, sb_write); 277 278 up(&c->sb_write_mutex); 279 } 280 281 void bcache_write_super(struct cache_set *c) 282 { 283 struct closure *cl = &c->sb_write; 284 struct cache *ca; 285 unsigned int i; 286 287 down(&c->sb_write_mutex); 288 closure_init(cl, &c->cl); 289 290 c->sb.seq++; 291 292 for_each_cache(ca, c, i) { 293 struct bio *bio = &ca->sb_bio; 294 295 ca->sb.version = BCACHE_SB_VERSION_CDEV_WITH_UUID; 296 ca->sb.seq = c->sb.seq; 297 ca->sb.last_mount = c->sb.last_mount; 298 299 SET_CACHE_SYNC(&ca->sb, CACHE_SYNC(&c->sb)); 300 301 bio_reset(bio); 302 bio_set_dev(bio, ca->bdev); 303 bio->bi_end_io = write_super_endio; 304 bio->bi_private = ca; 305 306 closure_get(cl); 307 __write_super(&ca->sb, bio); 308 } 309 310 closure_return_with_destructor(cl, bcache_write_super_unlock); 311 } 312 313 /* UUID io */ 314 315 static void uuid_endio(struct bio *bio) 316 { 317 struct closure *cl = bio->bi_private; 318 struct cache_set *c = container_of(cl, struct cache_set, uuid_write); 319 320 cache_set_err_on(bio->bi_status, c, "accessing uuids"); 321 bch_bbio_free(bio, c); 322 closure_put(cl); 323 } 324 325 static void uuid_io_unlock(struct closure *cl) 326 { 327 struct cache_set *c = container_of(cl, struct cache_set, uuid_write); 328 329 up(&c->uuid_write_mutex); 330 } 331 332 static void uuid_io(struct cache_set *c, int op, unsigned long op_flags, 333 struct bkey *k, struct closure *parent) 334 { 335 struct closure *cl = &c->uuid_write; 336 struct uuid_entry *u; 337 unsigned int i; 338 char buf[80]; 339 340 BUG_ON(!parent); 341 down(&c->uuid_write_mutex); 342 closure_init(cl, parent); 343 344 for (i = 0; i < KEY_PTRS(k); i++) { 345 struct bio *bio = bch_bbio_alloc(c); 346 347 bio->bi_opf = REQ_SYNC | REQ_META | op_flags; 348 bio->bi_iter.bi_size = KEY_SIZE(k) << 9; 349 350 bio->bi_end_io = uuid_endio; 351 bio->bi_private = cl; 352 bio_set_op_attrs(bio, op, REQ_SYNC|REQ_META|op_flags); 353 bch_bio_map(bio, c->uuids); 354 355 bch_submit_bbio(bio, c, k, i); 356 357 if (op != REQ_OP_WRITE) 358 break; 359 } 360 361 bch_extent_to_text(buf, sizeof(buf), k); 362 pr_debug("%s UUIDs at %s", op == REQ_OP_WRITE ? "wrote" : "read", buf); 363 364 for (u = c->uuids; u < c->uuids + c->nr_uuids; u++) 365 if (!bch_is_zero(u->uuid, 16)) 366 pr_debug("Slot %zi: %pU: %s: 1st: %u last: %u inv: %u", 367 u - c->uuids, u->uuid, u->label, 368 u->first_reg, u->last_reg, u->invalidated); 369 370 closure_return_with_destructor(cl, uuid_io_unlock); 371 } 372 373 static char *uuid_read(struct cache_set *c, struct jset *j, struct closure *cl) 374 { 375 struct bkey *k = &j->uuid_bucket; 376 377 if (__bch_btree_ptr_invalid(c, k)) 378 return "bad uuid pointer"; 379 380 bkey_copy(&c->uuid_bucket, k); 381 uuid_io(c, REQ_OP_READ, 0, k, cl); 382 383 if (j->version < BCACHE_JSET_VERSION_UUIDv1) { 384 struct uuid_entry_v0 *u0 = (void *) c->uuids; 385 struct uuid_entry *u1 = (void *) c->uuids; 386 int i; 387 388 closure_sync(cl); 389 390 /* 391 * Since the new uuid entry is bigger than the old, we have to 392 * convert starting at the highest memory address and work down 393 * in order to do it in place 394 */ 395 396 for (i = c->nr_uuids - 1; 397 i >= 0; 398 --i) { 399 memcpy(u1[i].uuid, u0[i].uuid, 16); 400 memcpy(u1[i].label, u0[i].label, 32); 401 402 u1[i].first_reg = u0[i].first_reg; 403 u1[i].last_reg = u0[i].last_reg; 404 u1[i].invalidated = u0[i].invalidated; 405 406 u1[i].flags = 0; 407 u1[i].sectors = 0; 408 } 409 } 410 411 return NULL; 412 } 413 414 static int __uuid_write(struct cache_set *c) 415 { 416 BKEY_PADDED(key) k; 417 struct closure cl; 418 419 closure_init_stack(&cl); 420 lockdep_assert_held(&bch_register_lock); 421 422 if (bch_bucket_alloc_set(c, RESERVE_BTREE, &k.key, 1, true)) 423 return 1; 424 425 SET_KEY_SIZE(&k.key, c->sb.bucket_size); 426 uuid_io(c, REQ_OP_WRITE, 0, &k.key, &cl); 427 closure_sync(&cl); 428 429 bkey_copy(&c->uuid_bucket, &k.key); 430 bkey_put(c, &k.key); 431 return 0; 432 } 433 434 int bch_uuid_write(struct cache_set *c) 435 { 436 int ret = __uuid_write(c); 437 438 if (!ret) 439 bch_journal_meta(c, NULL); 440 441 return ret; 442 } 443 444 static struct uuid_entry *uuid_find(struct cache_set *c, const char *uuid) 445 { 446 struct uuid_entry *u; 447 448 for (u = c->uuids; 449 u < c->uuids + c->nr_uuids; u++) 450 if (!memcmp(u->uuid, uuid, 16)) 451 return u; 452 453 return NULL; 454 } 455 456 static struct uuid_entry *uuid_find_empty(struct cache_set *c) 457 { 458 static const char zero_uuid[16] = "\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0"; 459 460 return uuid_find(c, zero_uuid); 461 } 462 463 /* 464 * Bucket priorities/gens: 465 * 466 * For each bucket, we store on disk its 467 * 8 bit gen 468 * 16 bit priority 469 * 470 * See alloc.c for an explanation of the gen. The priority is used to implement 471 * lru (and in the future other) cache replacement policies; for most purposes 472 * it's just an opaque integer. 473 * 474 * The gens and the priorities don't have a whole lot to do with each other, and 475 * it's actually the gens that must be written out at specific times - it's no 476 * big deal if the priorities don't get written, if we lose them we just reuse 477 * buckets in suboptimal order. 478 * 479 * On disk they're stored in a packed array, and in as many buckets are required 480 * to fit them all. The buckets we use to store them form a list; the journal 481 * header points to the first bucket, the first bucket points to the second 482 * bucket, et cetera. 483 * 484 * This code is used by the allocation code; periodically (whenever it runs out 485 * of buckets to allocate from) the allocation code will invalidate some 486 * buckets, but it can't use those buckets until their new gens are safely on 487 * disk. 488 */ 489 490 static void prio_endio(struct bio *bio) 491 { 492 struct cache *ca = bio->bi_private; 493 494 cache_set_err_on(bio->bi_status, ca->set, "accessing priorities"); 495 bch_bbio_free(bio, ca->set); 496 closure_put(&ca->prio); 497 } 498 499 static void prio_io(struct cache *ca, uint64_t bucket, int op, 500 unsigned long op_flags) 501 { 502 struct closure *cl = &ca->prio; 503 struct bio *bio = bch_bbio_alloc(ca->set); 504 505 closure_init_stack(cl); 506 507 bio->bi_iter.bi_sector = bucket * ca->sb.bucket_size; 508 bio_set_dev(bio, ca->bdev); 509 bio->bi_iter.bi_size = bucket_bytes(ca); 510 511 bio->bi_end_io = prio_endio; 512 bio->bi_private = ca; 513 bio_set_op_attrs(bio, op, REQ_SYNC|REQ_META|op_flags); 514 bch_bio_map(bio, ca->disk_buckets); 515 516 closure_bio_submit(ca->set, bio, &ca->prio); 517 closure_sync(cl); 518 } 519 520 void bch_prio_write(struct cache *ca) 521 { 522 int i; 523 struct bucket *b; 524 struct closure cl; 525 526 closure_init_stack(&cl); 527 528 lockdep_assert_held(&ca->set->bucket_lock); 529 530 ca->disk_buckets->seq++; 531 532 atomic_long_add(ca->sb.bucket_size * prio_buckets(ca), 533 &ca->meta_sectors_written); 534 535 //pr_debug("free %zu, free_inc %zu, unused %zu", fifo_used(&ca->free), 536 // fifo_used(&ca->free_inc), fifo_used(&ca->unused)); 537 538 for (i = prio_buckets(ca) - 1; i >= 0; --i) { 539 long bucket; 540 struct prio_set *p = ca->disk_buckets; 541 struct bucket_disk *d = p->data; 542 struct bucket_disk *end = d + prios_per_bucket(ca); 543 544 for (b = ca->buckets + i * prios_per_bucket(ca); 545 b < ca->buckets + ca->sb.nbuckets && d < end; 546 b++, d++) { 547 d->prio = cpu_to_le16(b->prio); 548 d->gen = b->gen; 549 } 550 551 p->next_bucket = ca->prio_buckets[i + 1]; 552 p->magic = pset_magic(&ca->sb); 553 p->csum = bch_crc64(&p->magic, bucket_bytes(ca) - 8); 554 555 bucket = bch_bucket_alloc(ca, RESERVE_PRIO, true); 556 BUG_ON(bucket == -1); 557 558 mutex_unlock(&ca->set->bucket_lock); 559 prio_io(ca, bucket, REQ_OP_WRITE, 0); 560 mutex_lock(&ca->set->bucket_lock); 561 562 ca->prio_buckets[i] = bucket; 563 atomic_dec_bug(&ca->buckets[bucket].pin); 564 } 565 566 mutex_unlock(&ca->set->bucket_lock); 567 568 bch_journal_meta(ca->set, &cl); 569 closure_sync(&cl); 570 571 mutex_lock(&ca->set->bucket_lock); 572 573 /* 574 * Don't want the old priorities to get garbage collected until after we 575 * finish writing the new ones, and they're journalled 576 */ 577 for (i = 0; i < prio_buckets(ca); i++) { 578 if (ca->prio_last_buckets[i]) 579 __bch_bucket_free(ca, 580 &ca->buckets[ca->prio_last_buckets[i]]); 581 582 ca->prio_last_buckets[i] = ca->prio_buckets[i]; 583 } 584 } 585 586 static void prio_read(struct cache *ca, uint64_t bucket) 587 { 588 struct prio_set *p = ca->disk_buckets; 589 struct bucket_disk *d = p->data + prios_per_bucket(ca), *end = d; 590 struct bucket *b; 591 unsigned int bucket_nr = 0; 592 593 for (b = ca->buckets; 594 b < ca->buckets + ca->sb.nbuckets; 595 b++, d++) { 596 if (d == end) { 597 ca->prio_buckets[bucket_nr] = bucket; 598 ca->prio_last_buckets[bucket_nr] = bucket; 599 bucket_nr++; 600 601 prio_io(ca, bucket, REQ_OP_READ, 0); 602 603 if (p->csum != bch_crc64(&p->magic, bucket_bytes(ca) - 8)) 604 pr_warn("bad csum reading priorities"); 605 606 if (p->magic != pset_magic(&ca->sb)) 607 pr_warn("bad magic reading priorities"); 608 609 bucket = p->next_bucket; 610 d = p->data; 611 } 612 613 b->prio = le16_to_cpu(d->prio); 614 b->gen = b->last_gc = d->gen; 615 } 616 } 617 618 /* Bcache device */ 619 620 static int open_dev(struct block_device *b, fmode_t mode) 621 { 622 struct bcache_device *d = b->bd_disk->private_data; 623 624 if (test_bit(BCACHE_DEV_CLOSING, &d->flags)) 625 return -ENXIO; 626 627 closure_get(&d->cl); 628 return 0; 629 } 630 631 static void release_dev(struct gendisk *b, fmode_t mode) 632 { 633 struct bcache_device *d = b->private_data; 634 635 closure_put(&d->cl); 636 } 637 638 static int ioctl_dev(struct block_device *b, fmode_t mode, 639 unsigned int cmd, unsigned long arg) 640 { 641 struct bcache_device *d = b->bd_disk->private_data; 642 struct cached_dev *dc = container_of(d, struct cached_dev, disk); 643 644 if (dc->io_disable) 645 return -EIO; 646 647 return d->ioctl(d, mode, cmd, arg); 648 } 649 650 static const struct block_device_operations bcache_ops = { 651 .open = open_dev, 652 .release = release_dev, 653 .ioctl = ioctl_dev, 654 .owner = THIS_MODULE, 655 }; 656 657 void bcache_device_stop(struct bcache_device *d) 658 { 659 if (!test_and_set_bit(BCACHE_DEV_CLOSING, &d->flags)) 660 closure_queue(&d->cl); 661 } 662 663 static void bcache_device_unlink(struct bcache_device *d) 664 { 665 lockdep_assert_held(&bch_register_lock); 666 667 if (d->c && !test_and_set_bit(BCACHE_DEV_UNLINK_DONE, &d->flags)) { 668 unsigned int i; 669 struct cache *ca; 670 671 sysfs_remove_link(&d->c->kobj, d->name); 672 sysfs_remove_link(&d->kobj, "cache"); 673 674 for_each_cache(ca, d->c, i) 675 bd_unlink_disk_holder(ca->bdev, d->disk); 676 } 677 } 678 679 static void bcache_device_link(struct bcache_device *d, struct cache_set *c, 680 const char *name) 681 { 682 unsigned int i; 683 struct cache *ca; 684 685 for_each_cache(ca, d->c, i) 686 bd_link_disk_holder(ca->bdev, d->disk); 687 688 snprintf(d->name, BCACHEDEVNAME_SIZE, 689 "%s%u", name, d->id); 690 691 WARN(sysfs_create_link(&d->kobj, &c->kobj, "cache") || 692 sysfs_create_link(&c->kobj, &d->kobj, d->name), 693 "Couldn't create device <-> cache set symlinks"); 694 695 clear_bit(BCACHE_DEV_UNLINK_DONE, &d->flags); 696 } 697 698 static void bcache_device_detach(struct bcache_device *d) 699 { 700 lockdep_assert_held(&bch_register_lock); 701 702 atomic_dec(&d->c->attached_dev_nr); 703 704 if (test_bit(BCACHE_DEV_DETACHING, &d->flags)) { 705 struct uuid_entry *u = d->c->uuids + d->id; 706 707 SET_UUID_FLASH_ONLY(u, 0); 708 memcpy(u->uuid, invalid_uuid, 16); 709 u->invalidated = cpu_to_le32((u32)ktime_get_real_seconds()); 710 bch_uuid_write(d->c); 711 } 712 713 bcache_device_unlink(d); 714 715 d->c->devices[d->id] = NULL; 716 closure_put(&d->c->caching); 717 d->c = NULL; 718 } 719 720 static void bcache_device_attach(struct bcache_device *d, struct cache_set *c, 721 unsigned int id) 722 { 723 d->id = id; 724 d->c = c; 725 c->devices[id] = d; 726 727 if (id >= c->devices_max_used) 728 c->devices_max_used = id + 1; 729 730 closure_get(&c->caching); 731 } 732 733 static inline int first_minor_to_idx(int first_minor) 734 { 735 return (first_minor/BCACHE_MINORS); 736 } 737 738 static inline int idx_to_first_minor(int idx) 739 { 740 return (idx * BCACHE_MINORS); 741 } 742 743 static void bcache_device_free(struct bcache_device *d) 744 { 745 lockdep_assert_held(&bch_register_lock); 746 747 pr_info("%s stopped", d->disk->disk_name); 748 749 if (d->c) 750 bcache_device_detach(d); 751 if (d->disk && d->disk->flags & GENHD_FL_UP) 752 del_gendisk(d->disk); 753 if (d->disk && d->disk->queue) 754 blk_cleanup_queue(d->disk->queue); 755 if (d->disk) { 756 ida_simple_remove(&bcache_device_idx, 757 first_minor_to_idx(d->disk->first_minor)); 758 put_disk(d->disk); 759 } 760 761 bioset_exit(&d->bio_split); 762 kvfree(d->full_dirty_stripes); 763 kvfree(d->stripe_sectors_dirty); 764 765 closure_debug_destroy(&d->cl); 766 } 767 768 static int bcache_device_init(struct bcache_device *d, unsigned int block_size, 769 sector_t sectors) 770 { 771 struct request_queue *q; 772 const size_t max_stripes = min_t(size_t, INT_MAX, 773 SIZE_MAX / sizeof(atomic_t)); 774 size_t n; 775 int idx; 776 777 if (!d->stripe_size) 778 d->stripe_size = 1 << 31; 779 780 d->nr_stripes = DIV_ROUND_UP_ULL(sectors, d->stripe_size); 781 782 if (!d->nr_stripes || d->nr_stripes > max_stripes) { 783 pr_err("nr_stripes too large or invalid: %u (start sector beyond end of disk?)", 784 (unsigned int)d->nr_stripes); 785 return -ENOMEM; 786 } 787 788 n = d->nr_stripes * sizeof(atomic_t); 789 d->stripe_sectors_dirty = kvzalloc(n, GFP_KERNEL); 790 if (!d->stripe_sectors_dirty) 791 return -ENOMEM; 792 793 n = BITS_TO_LONGS(d->nr_stripes) * sizeof(unsigned long); 794 d->full_dirty_stripes = kvzalloc(n, GFP_KERNEL); 795 if (!d->full_dirty_stripes) 796 return -ENOMEM; 797 798 idx = ida_simple_get(&bcache_device_idx, 0, 799 BCACHE_DEVICE_IDX_MAX, GFP_KERNEL); 800 if (idx < 0) 801 return idx; 802 803 if (bioset_init(&d->bio_split, 4, offsetof(struct bbio, bio), 804 BIOSET_NEED_BVECS|BIOSET_NEED_RESCUER)) 805 goto err; 806 807 d->disk = alloc_disk(BCACHE_MINORS); 808 if (!d->disk) 809 goto err; 810 811 set_capacity(d->disk, sectors); 812 snprintf(d->disk->disk_name, DISK_NAME_LEN, "bcache%i", idx); 813 814 d->disk->major = bcache_major; 815 d->disk->first_minor = idx_to_first_minor(idx); 816 d->disk->fops = &bcache_ops; 817 d->disk->private_data = d; 818 819 q = blk_alloc_queue(GFP_KERNEL); 820 if (!q) 821 return -ENOMEM; 822 823 blk_queue_make_request(q, NULL); 824 d->disk->queue = q; 825 q->queuedata = d; 826 q->backing_dev_info->congested_data = d; 827 q->limits.max_hw_sectors = UINT_MAX; 828 q->limits.max_sectors = UINT_MAX; 829 q->limits.max_segment_size = UINT_MAX; 830 q->limits.max_segments = BIO_MAX_PAGES; 831 blk_queue_max_discard_sectors(q, UINT_MAX); 832 q->limits.discard_granularity = 512; 833 q->limits.io_min = block_size; 834 q->limits.logical_block_size = block_size; 835 q->limits.physical_block_size = block_size; 836 blk_queue_flag_set(QUEUE_FLAG_NONROT, d->disk->queue); 837 blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, d->disk->queue); 838 blk_queue_flag_set(QUEUE_FLAG_DISCARD, d->disk->queue); 839 840 blk_queue_write_cache(q, true, true); 841 842 return 0; 843 844 err: 845 ida_simple_remove(&bcache_device_idx, idx); 846 return -ENOMEM; 847 848 } 849 850 /* Cached device */ 851 852 static void calc_cached_dev_sectors(struct cache_set *c) 853 { 854 uint64_t sectors = 0; 855 struct cached_dev *dc; 856 857 list_for_each_entry(dc, &c->cached_devs, list) 858 sectors += bdev_sectors(dc->bdev); 859 860 c->cached_dev_sectors = sectors; 861 } 862 863 #define BACKING_DEV_OFFLINE_TIMEOUT 5 864 static int cached_dev_status_update(void *arg) 865 { 866 struct cached_dev *dc = arg; 867 struct request_queue *q; 868 869 /* 870 * If this delayed worker is stopping outside, directly quit here. 871 * dc->io_disable might be set via sysfs interface, so check it 872 * here too. 873 */ 874 while (!kthread_should_stop() && !dc->io_disable) { 875 q = bdev_get_queue(dc->bdev); 876 if (blk_queue_dying(q)) 877 dc->offline_seconds++; 878 else 879 dc->offline_seconds = 0; 880 881 if (dc->offline_seconds >= BACKING_DEV_OFFLINE_TIMEOUT) { 882 pr_err("%s: device offline for %d seconds", 883 dc->backing_dev_name, 884 BACKING_DEV_OFFLINE_TIMEOUT); 885 pr_err("%s: disable I/O request due to backing " 886 "device offline", dc->disk.name); 887 dc->io_disable = true; 888 /* let others know earlier that io_disable is true */ 889 smp_mb(); 890 bcache_device_stop(&dc->disk); 891 break; 892 } 893 schedule_timeout_interruptible(HZ); 894 } 895 896 wait_for_kthread_stop(); 897 return 0; 898 } 899 900 901 void bch_cached_dev_run(struct cached_dev *dc) 902 { 903 struct bcache_device *d = &dc->disk; 904 char buf[SB_LABEL_SIZE + 1]; 905 char *env[] = { 906 "DRIVER=bcache", 907 kasprintf(GFP_KERNEL, "CACHED_UUID=%pU", dc->sb.uuid), 908 NULL, 909 NULL, 910 }; 911 912 memcpy(buf, dc->sb.label, SB_LABEL_SIZE); 913 buf[SB_LABEL_SIZE] = '\0'; 914 env[2] = kasprintf(GFP_KERNEL, "CACHED_LABEL=%s", buf); 915 916 if (atomic_xchg(&dc->running, 1)) { 917 kfree(env[1]); 918 kfree(env[2]); 919 return; 920 } 921 922 if (!d->c && 923 BDEV_STATE(&dc->sb) != BDEV_STATE_NONE) { 924 struct closure cl; 925 926 closure_init_stack(&cl); 927 928 SET_BDEV_STATE(&dc->sb, BDEV_STATE_STALE); 929 bch_write_bdev_super(dc, &cl); 930 closure_sync(&cl); 931 } 932 933 add_disk(d->disk); 934 bd_link_disk_holder(dc->bdev, dc->disk.disk); 935 /* won't show up in the uevent file, use udevadm monitor -e instead 936 * only class / kset properties are persistent */ 937 kobject_uevent_env(&disk_to_dev(d->disk)->kobj, KOBJ_CHANGE, env); 938 kfree(env[1]); 939 kfree(env[2]); 940 941 if (sysfs_create_link(&d->kobj, &disk_to_dev(d->disk)->kobj, "dev") || 942 sysfs_create_link(&disk_to_dev(d->disk)->kobj, &d->kobj, "bcache")) 943 pr_debug("error creating sysfs link"); 944 945 dc->status_update_thread = kthread_run(cached_dev_status_update, 946 dc, "bcache_status_update"); 947 if (IS_ERR(dc->status_update_thread)) { 948 pr_warn("failed to create bcache_status_update kthread, " 949 "continue to run without monitoring backing " 950 "device status"); 951 } 952 } 953 954 /* 955 * If BCACHE_DEV_RATE_DW_RUNNING is set, it means routine of the delayed 956 * work dc->writeback_rate_update is running. Wait until the routine 957 * quits (BCACHE_DEV_RATE_DW_RUNNING is clear), then continue to 958 * cancel it. If BCACHE_DEV_RATE_DW_RUNNING is not clear after time_out 959 * seconds, give up waiting here and continue to cancel it too. 960 */ 961 static void cancel_writeback_rate_update_dwork(struct cached_dev *dc) 962 { 963 int time_out = WRITEBACK_RATE_UPDATE_SECS_MAX * HZ; 964 965 do { 966 if (!test_bit(BCACHE_DEV_RATE_DW_RUNNING, 967 &dc->disk.flags)) 968 break; 969 time_out--; 970 schedule_timeout_interruptible(1); 971 } while (time_out > 0); 972 973 if (time_out == 0) 974 pr_warn("give up waiting for dc->writeback_write_update to quit"); 975 976 cancel_delayed_work_sync(&dc->writeback_rate_update); 977 } 978 979 static void cached_dev_detach_finish(struct work_struct *w) 980 { 981 struct cached_dev *dc = container_of(w, struct cached_dev, detach); 982 struct closure cl; 983 984 closure_init_stack(&cl); 985 986 BUG_ON(!test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags)); 987 BUG_ON(refcount_read(&dc->count)); 988 989 mutex_lock(&bch_register_lock); 990 991 if (test_and_clear_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags)) 992 cancel_writeback_rate_update_dwork(dc); 993 994 if (!IS_ERR_OR_NULL(dc->writeback_thread)) { 995 kthread_stop(dc->writeback_thread); 996 dc->writeback_thread = NULL; 997 } 998 999 memset(&dc->sb.set_uuid, 0, 16); 1000 SET_BDEV_STATE(&dc->sb, BDEV_STATE_NONE); 1001 1002 bch_write_bdev_super(dc, &cl); 1003 closure_sync(&cl); 1004 1005 bcache_device_detach(&dc->disk); 1006 list_move(&dc->list, &uncached_devices); 1007 1008 clear_bit(BCACHE_DEV_DETACHING, &dc->disk.flags); 1009 clear_bit(BCACHE_DEV_UNLINK_DONE, &dc->disk.flags); 1010 1011 mutex_unlock(&bch_register_lock); 1012 1013 pr_info("Caching disabled for %s", dc->backing_dev_name); 1014 1015 /* Drop ref we took in cached_dev_detach() */ 1016 closure_put(&dc->disk.cl); 1017 } 1018 1019 void bch_cached_dev_detach(struct cached_dev *dc) 1020 { 1021 lockdep_assert_held(&bch_register_lock); 1022 1023 if (test_bit(BCACHE_DEV_CLOSING, &dc->disk.flags)) 1024 return; 1025 1026 if (test_and_set_bit(BCACHE_DEV_DETACHING, &dc->disk.flags)) 1027 return; 1028 1029 /* 1030 * Block the device from being closed and freed until we're finished 1031 * detaching 1032 */ 1033 closure_get(&dc->disk.cl); 1034 1035 bch_writeback_queue(dc); 1036 1037 cached_dev_put(dc); 1038 } 1039 1040 int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c, 1041 uint8_t *set_uuid) 1042 { 1043 uint32_t rtime = cpu_to_le32((u32)ktime_get_real_seconds()); 1044 struct uuid_entry *u; 1045 struct cached_dev *exist_dc, *t; 1046 1047 if ((set_uuid && memcmp(set_uuid, c->sb.set_uuid, 16)) || 1048 (!set_uuid && memcmp(dc->sb.set_uuid, c->sb.set_uuid, 16))) 1049 return -ENOENT; 1050 1051 if (dc->disk.c) { 1052 pr_err("Can't attach %s: already attached", 1053 dc->backing_dev_name); 1054 return -EINVAL; 1055 } 1056 1057 if (test_bit(CACHE_SET_STOPPING, &c->flags)) { 1058 pr_err("Can't attach %s: shutting down", 1059 dc->backing_dev_name); 1060 return -EINVAL; 1061 } 1062 1063 if (dc->sb.block_size < c->sb.block_size) { 1064 /* Will die */ 1065 pr_err("Couldn't attach %s: block size less than set's block size", 1066 dc->backing_dev_name); 1067 return -EINVAL; 1068 } 1069 1070 /* Check whether already attached */ 1071 list_for_each_entry_safe(exist_dc, t, &c->cached_devs, list) { 1072 if (!memcmp(dc->sb.uuid, exist_dc->sb.uuid, 16)) { 1073 pr_err("Tried to attach %s but duplicate UUID already attached", 1074 dc->backing_dev_name); 1075 1076 return -EINVAL; 1077 } 1078 } 1079 1080 u = uuid_find(c, dc->sb.uuid); 1081 1082 if (u && 1083 (BDEV_STATE(&dc->sb) == BDEV_STATE_STALE || 1084 BDEV_STATE(&dc->sb) == BDEV_STATE_NONE)) { 1085 memcpy(u->uuid, invalid_uuid, 16); 1086 u->invalidated = cpu_to_le32((u32)ktime_get_real_seconds()); 1087 u = NULL; 1088 } 1089 1090 if (!u) { 1091 if (BDEV_STATE(&dc->sb) == BDEV_STATE_DIRTY) { 1092 pr_err("Couldn't find uuid for %s in set", 1093 dc->backing_dev_name); 1094 return -ENOENT; 1095 } 1096 1097 u = uuid_find_empty(c); 1098 if (!u) { 1099 pr_err("Not caching %s, no room for UUID", 1100 dc->backing_dev_name); 1101 return -EINVAL; 1102 } 1103 } 1104 1105 /* Deadlocks since we're called via sysfs... 1106 sysfs_remove_file(&dc->kobj, &sysfs_attach); 1107 */ 1108 1109 if (bch_is_zero(u->uuid, 16)) { 1110 struct closure cl; 1111 1112 closure_init_stack(&cl); 1113 1114 memcpy(u->uuid, dc->sb.uuid, 16); 1115 memcpy(u->label, dc->sb.label, SB_LABEL_SIZE); 1116 u->first_reg = u->last_reg = rtime; 1117 bch_uuid_write(c); 1118 1119 memcpy(dc->sb.set_uuid, c->sb.set_uuid, 16); 1120 SET_BDEV_STATE(&dc->sb, BDEV_STATE_CLEAN); 1121 1122 bch_write_bdev_super(dc, &cl); 1123 closure_sync(&cl); 1124 } else { 1125 u->last_reg = rtime; 1126 bch_uuid_write(c); 1127 } 1128 1129 bcache_device_attach(&dc->disk, c, u - c->uuids); 1130 list_move(&dc->list, &c->cached_devs); 1131 calc_cached_dev_sectors(c); 1132 1133 smp_wmb(); 1134 /* 1135 * dc->c must be set before dc->count != 0 - paired with the mb in 1136 * cached_dev_get() 1137 */ 1138 refcount_set(&dc->count, 1); 1139 1140 /* Block writeback thread, but spawn it */ 1141 down_write(&dc->writeback_lock); 1142 if (bch_cached_dev_writeback_start(dc)) { 1143 up_write(&dc->writeback_lock); 1144 return -ENOMEM; 1145 } 1146 1147 if (BDEV_STATE(&dc->sb) == BDEV_STATE_DIRTY) { 1148 bch_sectors_dirty_init(&dc->disk); 1149 atomic_set(&dc->has_dirty, 1); 1150 bch_writeback_queue(dc); 1151 } 1152 1153 bch_cached_dev_run(dc); 1154 bcache_device_link(&dc->disk, c, "bdev"); 1155 atomic_inc(&c->attached_dev_nr); 1156 1157 /* Allow the writeback thread to proceed */ 1158 up_write(&dc->writeback_lock); 1159 1160 pr_info("Caching %s as %s on set %pU", 1161 dc->backing_dev_name, 1162 dc->disk.disk->disk_name, 1163 dc->disk.c->sb.set_uuid); 1164 return 0; 1165 } 1166 1167 void bch_cached_dev_release(struct kobject *kobj) 1168 { 1169 struct cached_dev *dc = container_of(kobj, struct cached_dev, 1170 disk.kobj); 1171 kfree(dc); 1172 module_put(THIS_MODULE); 1173 } 1174 1175 static void cached_dev_free(struct closure *cl) 1176 { 1177 struct cached_dev *dc = container_of(cl, struct cached_dev, disk.cl); 1178 1179 mutex_lock(&bch_register_lock); 1180 1181 if (test_and_clear_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags)) 1182 cancel_writeback_rate_update_dwork(dc); 1183 1184 if (!IS_ERR_OR_NULL(dc->writeback_thread)) 1185 kthread_stop(dc->writeback_thread); 1186 if (dc->writeback_write_wq) 1187 destroy_workqueue(dc->writeback_write_wq); 1188 if (!IS_ERR_OR_NULL(dc->status_update_thread)) 1189 kthread_stop(dc->status_update_thread); 1190 1191 if (atomic_read(&dc->running)) 1192 bd_unlink_disk_holder(dc->bdev, dc->disk.disk); 1193 bcache_device_free(&dc->disk); 1194 list_del(&dc->list); 1195 1196 mutex_unlock(&bch_register_lock); 1197 1198 if (!IS_ERR_OR_NULL(dc->bdev)) 1199 blkdev_put(dc->bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); 1200 1201 wake_up(&unregister_wait); 1202 1203 kobject_put(&dc->disk.kobj); 1204 } 1205 1206 static void cached_dev_flush(struct closure *cl) 1207 { 1208 struct cached_dev *dc = container_of(cl, struct cached_dev, disk.cl); 1209 struct bcache_device *d = &dc->disk; 1210 1211 mutex_lock(&bch_register_lock); 1212 bcache_device_unlink(d); 1213 mutex_unlock(&bch_register_lock); 1214 1215 bch_cache_accounting_destroy(&dc->accounting); 1216 kobject_del(&d->kobj); 1217 1218 continue_at(cl, cached_dev_free, system_wq); 1219 } 1220 1221 static int cached_dev_init(struct cached_dev *dc, unsigned int block_size) 1222 { 1223 int ret; 1224 struct io *io; 1225 struct request_queue *q = bdev_get_queue(dc->bdev); 1226 1227 __module_get(THIS_MODULE); 1228 INIT_LIST_HEAD(&dc->list); 1229 closure_init(&dc->disk.cl, NULL); 1230 set_closure_fn(&dc->disk.cl, cached_dev_flush, system_wq); 1231 kobject_init(&dc->disk.kobj, &bch_cached_dev_ktype); 1232 INIT_WORK(&dc->detach, cached_dev_detach_finish); 1233 sema_init(&dc->sb_write_mutex, 1); 1234 INIT_LIST_HEAD(&dc->io_lru); 1235 spin_lock_init(&dc->io_lock); 1236 bch_cache_accounting_init(&dc->accounting, &dc->disk.cl); 1237 1238 dc->sequential_cutoff = 4 << 20; 1239 1240 for (io = dc->io; io < dc->io + RECENT_IO; io++) { 1241 list_add(&io->lru, &dc->io_lru); 1242 hlist_add_head(&io->hash, dc->io_hash + RECENT_IO); 1243 } 1244 1245 dc->disk.stripe_size = q->limits.io_opt >> 9; 1246 1247 if (dc->disk.stripe_size) 1248 dc->partial_stripes_expensive = 1249 q->limits.raid_partial_stripes_expensive; 1250 1251 ret = bcache_device_init(&dc->disk, block_size, 1252 dc->bdev->bd_part->nr_sects - dc->sb.data_offset); 1253 if (ret) 1254 return ret; 1255 1256 dc->disk.disk->queue->backing_dev_info->ra_pages = 1257 max(dc->disk.disk->queue->backing_dev_info->ra_pages, 1258 q->backing_dev_info->ra_pages); 1259 1260 atomic_set(&dc->io_errors, 0); 1261 dc->io_disable = false; 1262 dc->error_limit = DEFAULT_CACHED_DEV_ERROR_LIMIT; 1263 /* default to auto */ 1264 dc->stop_when_cache_set_failed = BCH_CACHED_DEV_STOP_AUTO; 1265 1266 bch_cached_dev_request_init(dc); 1267 bch_cached_dev_writeback_init(dc); 1268 return 0; 1269 } 1270 1271 /* Cached device - bcache superblock */ 1272 1273 static void register_bdev(struct cache_sb *sb, struct page *sb_page, 1274 struct block_device *bdev, 1275 struct cached_dev *dc) 1276 { 1277 const char *err = "cannot allocate memory"; 1278 struct cache_set *c; 1279 1280 bdevname(bdev, dc->backing_dev_name); 1281 memcpy(&dc->sb, sb, sizeof(struct cache_sb)); 1282 dc->bdev = bdev; 1283 dc->bdev->bd_holder = dc; 1284 1285 bio_init(&dc->sb_bio, dc->sb_bio.bi_inline_vecs, 1); 1286 bio_first_bvec_all(&dc->sb_bio)->bv_page = sb_page; 1287 get_page(sb_page); 1288 1289 1290 if (cached_dev_init(dc, sb->block_size << 9)) 1291 goto err; 1292 1293 err = "error creating kobject"; 1294 if (kobject_add(&dc->disk.kobj, &part_to_dev(bdev->bd_part)->kobj, 1295 "bcache")) 1296 goto err; 1297 if (bch_cache_accounting_add_kobjs(&dc->accounting, &dc->disk.kobj)) 1298 goto err; 1299 1300 pr_info("registered backing device %s", dc->backing_dev_name); 1301 1302 list_add(&dc->list, &uncached_devices); 1303 /* attach to a matched cache set if it exists */ 1304 list_for_each_entry(c, &bch_cache_sets, list) 1305 bch_cached_dev_attach(dc, c, NULL); 1306 1307 if (BDEV_STATE(&dc->sb) == BDEV_STATE_NONE || 1308 BDEV_STATE(&dc->sb) == BDEV_STATE_STALE) 1309 bch_cached_dev_run(dc); 1310 1311 return; 1312 err: 1313 pr_notice("error %s: %s", dc->backing_dev_name, err); 1314 bcache_device_stop(&dc->disk); 1315 } 1316 1317 /* Flash only volumes */ 1318 1319 void bch_flash_dev_release(struct kobject *kobj) 1320 { 1321 struct bcache_device *d = container_of(kobj, struct bcache_device, 1322 kobj); 1323 kfree(d); 1324 } 1325 1326 static void flash_dev_free(struct closure *cl) 1327 { 1328 struct bcache_device *d = container_of(cl, struct bcache_device, cl); 1329 1330 mutex_lock(&bch_register_lock); 1331 atomic_long_sub(bcache_dev_sectors_dirty(d), 1332 &d->c->flash_dev_dirty_sectors); 1333 bcache_device_free(d); 1334 mutex_unlock(&bch_register_lock); 1335 kobject_put(&d->kobj); 1336 } 1337 1338 static void flash_dev_flush(struct closure *cl) 1339 { 1340 struct bcache_device *d = container_of(cl, struct bcache_device, cl); 1341 1342 mutex_lock(&bch_register_lock); 1343 bcache_device_unlink(d); 1344 mutex_unlock(&bch_register_lock); 1345 kobject_del(&d->kobj); 1346 continue_at(cl, flash_dev_free, system_wq); 1347 } 1348 1349 static int flash_dev_run(struct cache_set *c, struct uuid_entry *u) 1350 { 1351 struct bcache_device *d = kzalloc(sizeof(struct bcache_device), 1352 GFP_KERNEL); 1353 if (!d) 1354 return -ENOMEM; 1355 1356 closure_init(&d->cl, NULL); 1357 set_closure_fn(&d->cl, flash_dev_flush, system_wq); 1358 1359 kobject_init(&d->kobj, &bch_flash_dev_ktype); 1360 1361 if (bcache_device_init(d, block_bytes(c), u->sectors)) 1362 goto err; 1363 1364 bcache_device_attach(d, c, u - c->uuids); 1365 bch_sectors_dirty_init(d); 1366 bch_flash_dev_request_init(d); 1367 add_disk(d->disk); 1368 1369 if (kobject_add(&d->kobj, &disk_to_dev(d->disk)->kobj, "bcache")) 1370 goto err; 1371 1372 bcache_device_link(d, c, "volume"); 1373 1374 return 0; 1375 err: 1376 kobject_put(&d->kobj); 1377 return -ENOMEM; 1378 } 1379 1380 static int flash_devs_run(struct cache_set *c) 1381 { 1382 int ret = 0; 1383 struct uuid_entry *u; 1384 1385 for (u = c->uuids; 1386 u < c->uuids + c->nr_uuids && !ret; 1387 u++) 1388 if (UUID_FLASH_ONLY(u)) 1389 ret = flash_dev_run(c, u); 1390 1391 return ret; 1392 } 1393 1394 int bch_flash_dev_create(struct cache_set *c, uint64_t size) 1395 { 1396 struct uuid_entry *u; 1397 1398 if (test_bit(CACHE_SET_STOPPING, &c->flags)) 1399 return -EINTR; 1400 1401 if (!test_bit(CACHE_SET_RUNNING, &c->flags)) 1402 return -EPERM; 1403 1404 u = uuid_find_empty(c); 1405 if (!u) { 1406 pr_err("Can't create volume, no room for UUID"); 1407 return -EINVAL; 1408 } 1409 1410 get_random_bytes(u->uuid, 16); 1411 memset(u->label, 0, 32); 1412 u->first_reg = u->last_reg = cpu_to_le32((u32)ktime_get_real_seconds()); 1413 1414 SET_UUID_FLASH_ONLY(u, 1); 1415 u->sectors = size >> 9; 1416 1417 bch_uuid_write(c); 1418 1419 return flash_dev_run(c, u); 1420 } 1421 1422 bool bch_cached_dev_error(struct cached_dev *dc) 1423 { 1424 struct cache_set *c; 1425 1426 if (!dc || test_bit(BCACHE_DEV_CLOSING, &dc->disk.flags)) 1427 return false; 1428 1429 dc->io_disable = true; 1430 /* make others know io_disable is true earlier */ 1431 smp_mb(); 1432 1433 pr_err("stop %s: too many IO errors on backing device %s\n", 1434 dc->disk.disk->disk_name, dc->backing_dev_name); 1435 1436 /* 1437 * If the cached device is still attached to a cache set, 1438 * even dc->io_disable is true and no more I/O requests 1439 * accepted, cache device internal I/O (writeback scan or 1440 * garbage collection) may still prevent bcache device from 1441 * being stopped. So here CACHE_SET_IO_DISABLE should be 1442 * set to c->flags too, to make the internal I/O to cache 1443 * device rejected and stopped immediately. 1444 * If c is NULL, that means the bcache device is not attached 1445 * to any cache set, then no CACHE_SET_IO_DISABLE bit to set. 1446 */ 1447 c = dc->disk.c; 1448 if (c && test_and_set_bit(CACHE_SET_IO_DISABLE, &c->flags)) 1449 pr_info("CACHE_SET_IO_DISABLE already set"); 1450 1451 bcache_device_stop(&dc->disk); 1452 return true; 1453 } 1454 1455 /* Cache set */ 1456 1457 __printf(2, 3) 1458 bool bch_cache_set_error(struct cache_set *c, const char *fmt, ...) 1459 { 1460 va_list args; 1461 1462 if (c->on_error != ON_ERROR_PANIC && 1463 test_bit(CACHE_SET_STOPPING, &c->flags)) 1464 return false; 1465 1466 if (test_and_set_bit(CACHE_SET_IO_DISABLE, &c->flags)) 1467 pr_info("CACHE_SET_IO_DISABLE already set"); 1468 1469 /* XXX: we can be called from atomic context 1470 acquire_console_sem(); 1471 */ 1472 1473 printk(KERN_ERR "bcache: error on %pU: ", c->sb.set_uuid); 1474 1475 va_start(args, fmt); 1476 vprintk(fmt, args); 1477 va_end(args); 1478 1479 printk(", disabling caching\n"); 1480 1481 if (c->on_error == ON_ERROR_PANIC) 1482 panic("panic forced after error\n"); 1483 1484 bch_cache_set_unregister(c); 1485 return true; 1486 } 1487 1488 void bch_cache_set_release(struct kobject *kobj) 1489 { 1490 struct cache_set *c = container_of(kobj, struct cache_set, kobj); 1491 1492 kfree(c); 1493 module_put(THIS_MODULE); 1494 } 1495 1496 static void cache_set_free(struct closure *cl) 1497 { 1498 struct cache_set *c = container_of(cl, struct cache_set, cl); 1499 struct cache *ca; 1500 unsigned int i; 1501 1502 if (!IS_ERR_OR_NULL(c->debug)) 1503 debugfs_remove(c->debug); 1504 1505 bch_open_buckets_free(c); 1506 bch_btree_cache_free(c); 1507 bch_journal_free(c); 1508 1509 for_each_cache(ca, c, i) 1510 if (ca) { 1511 ca->set = NULL; 1512 c->cache[ca->sb.nr_this_dev] = NULL; 1513 kobject_put(&ca->kobj); 1514 } 1515 1516 bch_bset_sort_state_free(&c->sort); 1517 free_pages((unsigned long) c->uuids, ilog2(bucket_pages(c))); 1518 1519 if (c->moving_gc_wq) 1520 destroy_workqueue(c->moving_gc_wq); 1521 bioset_exit(&c->bio_split); 1522 mempool_exit(&c->fill_iter); 1523 mempool_exit(&c->bio_meta); 1524 mempool_exit(&c->search); 1525 kfree(c->devices); 1526 1527 mutex_lock(&bch_register_lock); 1528 list_del(&c->list); 1529 mutex_unlock(&bch_register_lock); 1530 1531 pr_info("Cache set %pU unregistered", c->sb.set_uuid); 1532 wake_up(&unregister_wait); 1533 1534 closure_debug_destroy(&c->cl); 1535 kobject_put(&c->kobj); 1536 } 1537 1538 static void cache_set_flush(struct closure *cl) 1539 { 1540 struct cache_set *c = container_of(cl, struct cache_set, caching); 1541 struct cache *ca; 1542 struct btree *b; 1543 unsigned int i; 1544 1545 bch_cache_accounting_destroy(&c->accounting); 1546 1547 kobject_put(&c->internal); 1548 kobject_del(&c->kobj); 1549 1550 if (c->gc_thread) 1551 kthread_stop(c->gc_thread); 1552 1553 if (!IS_ERR_OR_NULL(c->root)) 1554 list_add(&c->root->list, &c->btree_cache); 1555 1556 /* Should skip this if we're unregistering because of an error */ 1557 list_for_each_entry(b, &c->btree_cache, list) { 1558 mutex_lock(&b->write_lock); 1559 if (btree_node_dirty(b)) 1560 __bch_btree_node_write(b, NULL); 1561 mutex_unlock(&b->write_lock); 1562 } 1563 1564 for_each_cache(ca, c, i) 1565 if (ca->alloc_thread) 1566 kthread_stop(ca->alloc_thread); 1567 1568 if (c->journal.cur) { 1569 cancel_delayed_work_sync(&c->journal.work); 1570 /* flush last journal entry if needed */ 1571 c->journal.work.work.func(&c->journal.work.work); 1572 } 1573 1574 closure_return(cl); 1575 } 1576 1577 /* 1578 * This function is only called when CACHE_SET_IO_DISABLE is set, which means 1579 * cache set is unregistering due to too many I/O errors. In this condition, 1580 * the bcache device might be stopped, it depends on stop_when_cache_set_failed 1581 * value and whether the broken cache has dirty data: 1582 * 1583 * dc->stop_when_cache_set_failed dc->has_dirty stop bcache device 1584 * BCH_CACHED_STOP_AUTO 0 NO 1585 * BCH_CACHED_STOP_AUTO 1 YES 1586 * BCH_CACHED_DEV_STOP_ALWAYS 0 YES 1587 * BCH_CACHED_DEV_STOP_ALWAYS 1 YES 1588 * 1589 * The expected behavior is, if stop_when_cache_set_failed is configured to 1590 * "auto" via sysfs interface, the bcache device will not be stopped if the 1591 * backing device is clean on the broken cache device. 1592 */ 1593 static void conditional_stop_bcache_device(struct cache_set *c, 1594 struct bcache_device *d, 1595 struct cached_dev *dc) 1596 { 1597 if (dc->stop_when_cache_set_failed == BCH_CACHED_DEV_STOP_ALWAYS) { 1598 pr_warn("stop_when_cache_set_failed of %s is \"always\", stop it for failed cache set %pU.", 1599 d->disk->disk_name, c->sb.set_uuid); 1600 bcache_device_stop(d); 1601 } else if (atomic_read(&dc->has_dirty)) { 1602 /* 1603 * dc->stop_when_cache_set_failed == BCH_CACHED_STOP_AUTO 1604 * and dc->has_dirty == 1 1605 */ 1606 pr_warn("stop_when_cache_set_failed of %s is \"auto\" and cache is dirty, stop it to avoid potential data corruption.", 1607 d->disk->disk_name); 1608 /* 1609 * There might be a small time gap that cache set is 1610 * released but bcache device is not. Inside this time 1611 * gap, regular I/O requests will directly go into 1612 * backing device as no cache set attached to. This 1613 * behavior may also introduce potential inconsistence 1614 * data in writeback mode while cache is dirty. 1615 * Therefore before calling bcache_device_stop() due 1616 * to a broken cache device, dc->io_disable should be 1617 * explicitly set to true. 1618 */ 1619 dc->io_disable = true; 1620 /* make others know io_disable is true earlier */ 1621 smp_mb(); 1622 bcache_device_stop(d); 1623 } else { 1624 /* 1625 * dc->stop_when_cache_set_failed == BCH_CACHED_STOP_AUTO 1626 * and dc->has_dirty == 0 1627 */ 1628 pr_warn("stop_when_cache_set_failed of %s is \"auto\" and cache is clean, keep it alive.", 1629 d->disk->disk_name); 1630 } 1631 } 1632 1633 static void __cache_set_unregister(struct closure *cl) 1634 { 1635 struct cache_set *c = container_of(cl, struct cache_set, caching); 1636 struct cached_dev *dc; 1637 struct bcache_device *d; 1638 size_t i; 1639 1640 mutex_lock(&bch_register_lock); 1641 1642 for (i = 0; i < c->devices_max_used; i++) { 1643 d = c->devices[i]; 1644 if (!d) 1645 continue; 1646 1647 if (!UUID_FLASH_ONLY(&c->uuids[i]) && 1648 test_bit(CACHE_SET_UNREGISTERING, &c->flags)) { 1649 dc = container_of(d, struct cached_dev, disk); 1650 bch_cached_dev_detach(dc); 1651 if (test_bit(CACHE_SET_IO_DISABLE, &c->flags)) 1652 conditional_stop_bcache_device(c, d, dc); 1653 } else { 1654 bcache_device_stop(d); 1655 } 1656 } 1657 1658 mutex_unlock(&bch_register_lock); 1659 1660 continue_at(cl, cache_set_flush, system_wq); 1661 } 1662 1663 void bch_cache_set_stop(struct cache_set *c) 1664 { 1665 if (!test_and_set_bit(CACHE_SET_STOPPING, &c->flags)) 1666 closure_queue(&c->caching); 1667 } 1668 1669 void bch_cache_set_unregister(struct cache_set *c) 1670 { 1671 set_bit(CACHE_SET_UNREGISTERING, &c->flags); 1672 bch_cache_set_stop(c); 1673 } 1674 1675 #define alloc_bucket_pages(gfp, c) \ 1676 ((void *) __get_free_pages(__GFP_ZERO|gfp, ilog2(bucket_pages(c)))) 1677 1678 struct cache_set *bch_cache_set_alloc(struct cache_sb *sb) 1679 { 1680 int iter_size; 1681 struct cache_set *c = kzalloc(sizeof(struct cache_set), GFP_KERNEL); 1682 1683 if (!c) 1684 return NULL; 1685 1686 __module_get(THIS_MODULE); 1687 closure_init(&c->cl, NULL); 1688 set_closure_fn(&c->cl, cache_set_free, system_wq); 1689 1690 closure_init(&c->caching, &c->cl); 1691 set_closure_fn(&c->caching, __cache_set_unregister, system_wq); 1692 1693 /* Maybe create continue_at_noreturn() and use it here? */ 1694 closure_set_stopped(&c->cl); 1695 closure_put(&c->cl); 1696 1697 kobject_init(&c->kobj, &bch_cache_set_ktype); 1698 kobject_init(&c->internal, &bch_cache_set_internal_ktype); 1699 1700 bch_cache_accounting_init(&c->accounting, &c->cl); 1701 1702 memcpy(c->sb.set_uuid, sb->set_uuid, 16); 1703 c->sb.block_size = sb->block_size; 1704 c->sb.bucket_size = sb->bucket_size; 1705 c->sb.nr_in_set = sb->nr_in_set; 1706 c->sb.last_mount = sb->last_mount; 1707 c->bucket_bits = ilog2(sb->bucket_size); 1708 c->block_bits = ilog2(sb->block_size); 1709 c->nr_uuids = bucket_bytes(c) / sizeof(struct uuid_entry); 1710 c->devices_max_used = 0; 1711 atomic_set(&c->attached_dev_nr, 0); 1712 c->btree_pages = bucket_pages(c); 1713 if (c->btree_pages > BTREE_MAX_PAGES) 1714 c->btree_pages = max_t(int, c->btree_pages / 4, 1715 BTREE_MAX_PAGES); 1716 1717 sema_init(&c->sb_write_mutex, 1); 1718 mutex_init(&c->bucket_lock); 1719 init_waitqueue_head(&c->btree_cache_wait); 1720 init_waitqueue_head(&c->bucket_wait); 1721 init_waitqueue_head(&c->gc_wait); 1722 sema_init(&c->uuid_write_mutex, 1); 1723 1724 spin_lock_init(&c->btree_gc_time.lock); 1725 spin_lock_init(&c->btree_split_time.lock); 1726 spin_lock_init(&c->btree_read_time.lock); 1727 1728 bch_moving_init_cache_set(c); 1729 1730 INIT_LIST_HEAD(&c->list); 1731 INIT_LIST_HEAD(&c->cached_devs); 1732 INIT_LIST_HEAD(&c->btree_cache); 1733 INIT_LIST_HEAD(&c->btree_cache_freeable); 1734 INIT_LIST_HEAD(&c->btree_cache_freed); 1735 INIT_LIST_HEAD(&c->data_buckets); 1736 1737 iter_size = (sb->bucket_size / sb->block_size + 1) * 1738 sizeof(struct btree_iter_set); 1739 1740 if (!(c->devices = kcalloc(c->nr_uuids, sizeof(void *), GFP_KERNEL)) || 1741 mempool_init_slab_pool(&c->search, 32, bch_search_cache) || 1742 mempool_init_kmalloc_pool(&c->bio_meta, 2, 1743 sizeof(struct bbio) + sizeof(struct bio_vec) * 1744 bucket_pages(c)) || 1745 mempool_init_kmalloc_pool(&c->fill_iter, 1, iter_size) || 1746 bioset_init(&c->bio_split, 4, offsetof(struct bbio, bio), 1747 BIOSET_NEED_BVECS|BIOSET_NEED_RESCUER) || 1748 !(c->uuids = alloc_bucket_pages(GFP_KERNEL, c)) || 1749 !(c->moving_gc_wq = alloc_workqueue("bcache_gc", 1750 WQ_MEM_RECLAIM, 0)) || 1751 bch_journal_alloc(c) || 1752 bch_btree_cache_alloc(c) || 1753 bch_open_buckets_alloc(c) || 1754 bch_bset_sort_state_init(&c->sort, ilog2(c->btree_pages))) 1755 goto err; 1756 1757 c->congested_read_threshold_us = 2000; 1758 c->congested_write_threshold_us = 20000; 1759 c->error_limit = DEFAULT_IO_ERROR_LIMIT; 1760 WARN_ON(test_and_clear_bit(CACHE_SET_IO_DISABLE, &c->flags)); 1761 1762 return c; 1763 err: 1764 bch_cache_set_unregister(c); 1765 return NULL; 1766 } 1767 1768 static void run_cache_set(struct cache_set *c) 1769 { 1770 const char *err = "cannot allocate memory"; 1771 struct cached_dev *dc, *t; 1772 struct cache *ca; 1773 struct closure cl; 1774 unsigned int i; 1775 1776 closure_init_stack(&cl); 1777 1778 for_each_cache(ca, c, i) 1779 c->nbuckets += ca->sb.nbuckets; 1780 set_gc_sectors(c); 1781 1782 if (CACHE_SYNC(&c->sb)) { 1783 LIST_HEAD(journal); 1784 struct bkey *k; 1785 struct jset *j; 1786 1787 err = "cannot allocate memory for journal"; 1788 if (bch_journal_read(c, &journal)) 1789 goto err; 1790 1791 pr_debug("btree_journal_read() done"); 1792 1793 err = "no journal entries found"; 1794 if (list_empty(&journal)) 1795 goto err; 1796 1797 j = &list_entry(journal.prev, struct journal_replay, list)->j; 1798 1799 err = "IO error reading priorities"; 1800 for_each_cache(ca, c, i) 1801 prio_read(ca, j->prio_bucket[ca->sb.nr_this_dev]); 1802 1803 /* 1804 * If prio_read() fails it'll call cache_set_error and we'll 1805 * tear everything down right away, but if we perhaps checked 1806 * sooner we could avoid journal replay. 1807 */ 1808 1809 k = &j->btree_root; 1810 1811 err = "bad btree root"; 1812 if (__bch_btree_ptr_invalid(c, k)) 1813 goto err; 1814 1815 err = "error reading btree root"; 1816 c->root = bch_btree_node_get(c, NULL, k, j->btree_level, true, NULL); 1817 if (IS_ERR_OR_NULL(c->root)) 1818 goto err; 1819 1820 list_del_init(&c->root->list); 1821 rw_unlock(true, c->root); 1822 1823 err = uuid_read(c, j, &cl); 1824 if (err) 1825 goto err; 1826 1827 err = "error in recovery"; 1828 if (bch_btree_check(c)) 1829 goto err; 1830 1831 bch_journal_mark(c, &journal); 1832 bch_initial_gc_finish(c); 1833 pr_debug("btree_check() done"); 1834 1835 /* 1836 * bcache_journal_next() can't happen sooner, or 1837 * btree_gc_finish() will give spurious errors about last_gc > 1838 * gc_gen - this is a hack but oh well. 1839 */ 1840 bch_journal_next(&c->journal); 1841 1842 err = "error starting allocator thread"; 1843 for_each_cache(ca, c, i) 1844 if (bch_cache_allocator_start(ca)) 1845 goto err; 1846 1847 /* 1848 * First place it's safe to allocate: btree_check() and 1849 * btree_gc_finish() have to run before we have buckets to 1850 * allocate, and bch_bucket_alloc_set() might cause a journal 1851 * entry to be written so bcache_journal_next() has to be called 1852 * first. 1853 * 1854 * If the uuids were in the old format we have to rewrite them 1855 * before the next journal entry is written: 1856 */ 1857 if (j->version < BCACHE_JSET_VERSION_UUID) 1858 __uuid_write(c); 1859 1860 bch_journal_replay(c, &journal); 1861 } else { 1862 pr_notice("invalidating existing data"); 1863 1864 for_each_cache(ca, c, i) { 1865 unsigned int j; 1866 1867 ca->sb.keys = clamp_t(int, ca->sb.nbuckets >> 7, 1868 2, SB_JOURNAL_BUCKETS); 1869 1870 for (j = 0; j < ca->sb.keys; j++) 1871 ca->sb.d[j] = ca->sb.first_bucket + j; 1872 } 1873 1874 bch_initial_gc_finish(c); 1875 1876 err = "error starting allocator thread"; 1877 for_each_cache(ca, c, i) 1878 if (bch_cache_allocator_start(ca)) 1879 goto err; 1880 1881 mutex_lock(&c->bucket_lock); 1882 for_each_cache(ca, c, i) 1883 bch_prio_write(ca); 1884 mutex_unlock(&c->bucket_lock); 1885 1886 err = "cannot allocate new UUID bucket"; 1887 if (__uuid_write(c)) 1888 goto err; 1889 1890 err = "cannot allocate new btree root"; 1891 c->root = __bch_btree_node_alloc(c, NULL, 0, true, NULL); 1892 if (IS_ERR_OR_NULL(c->root)) 1893 goto err; 1894 1895 mutex_lock(&c->root->write_lock); 1896 bkey_copy_key(&c->root->key, &MAX_KEY); 1897 bch_btree_node_write(c->root, &cl); 1898 mutex_unlock(&c->root->write_lock); 1899 1900 bch_btree_set_root(c->root); 1901 rw_unlock(true, c->root); 1902 1903 /* 1904 * We don't want to write the first journal entry until 1905 * everything is set up - fortunately journal entries won't be 1906 * written until the SET_CACHE_SYNC() here: 1907 */ 1908 SET_CACHE_SYNC(&c->sb, true); 1909 1910 bch_journal_next(&c->journal); 1911 bch_journal_meta(c, &cl); 1912 } 1913 1914 err = "error starting gc thread"; 1915 if (bch_gc_thread_start(c)) 1916 goto err; 1917 1918 closure_sync(&cl); 1919 c->sb.last_mount = (u32)ktime_get_real_seconds(); 1920 bcache_write_super(c); 1921 1922 list_for_each_entry_safe(dc, t, &uncached_devices, list) 1923 bch_cached_dev_attach(dc, c, NULL); 1924 1925 flash_devs_run(c); 1926 1927 set_bit(CACHE_SET_RUNNING, &c->flags); 1928 return; 1929 err: 1930 closure_sync(&cl); 1931 /* XXX: test this, it's broken */ 1932 bch_cache_set_error(c, "%s", err); 1933 } 1934 1935 static bool can_attach_cache(struct cache *ca, struct cache_set *c) 1936 { 1937 return ca->sb.block_size == c->sb.block_size && 1938 ca->sb.bucket_size == c->sb.bucket_size && 1939 ca->sb.nr_in_set == c->sb.nr_in_set; 1940 } 1941 1942 static const char *register_cache_set(struct cache *ca) 1943 { 1944 char buf[12]; 1945 const char *err = "cannot allocate memory"; 1946 struct cache_set *c; 1947 1948 list_for_each_entry(c, &bch_cache_sets, list) 1949 if (!memcmp(c->sb.set_uuid, ca->sb.set_uuid, 16)) { 1950 if (c->cache[ca->sb.nr_this_dev]) 1951 return "duplicate cache set member"; 1952 1953 if (!can_attach_cache(ca, c)) 1954 return "cache sb does not match set"; 1955 1956 if (!CACHE_SYNC(&ca->sb)) 1957 SET_CACHE_SYNC(&c->sb, false); 1958 1959 goto found; 1960 } 1961 1962 c = bch_cache_set_alloc(&ca->sb); 1963 if (!c) 1964 return err; 1965 1966 err = "error creating kobject"; 1967 if (kobject_add(&c->kobj, bcache_kobj, "%pU", c->sb.set_uuid) || 1968 kobject_add(&c->internal, &c->kobj, "internal")) 1969 goto err; 1970 1971 if (bch_cache_accounting_add_kobjs(&c->accounting, &c->kobj)) 1972 goto err; 1973 1974 bch_debug_init_cache_set(c); 1975 1976 list_add(&c->list, &bch_cache_sets); 1977 found: 1978 sprintf(buf, "cache%i", ca->sb.nr_this_dev); 1979 if (sysfs_create_link(&ca->kobj, &c->kobj, "set") || 1980 sysfs_create_link(&c->kobj, &ca->kobj, buf)) 1981 goto err; 1982 1983 if (ca->sb.seq > c->sb.seq) { 1984 c->sb.version = ca->sb.version; 1985 memcpy(c->sb.set_uuid, ca->sb.set_uuid, 16); 1986 c->sb.flags = ca->sb.flags; 1987 c->sb.seq = ca->sb.seq; 1988 pr_debug("set version = %llu", c->sb.version); 1989 } 1990 1991 kobject_get(&ca->kobj); 1992 ca->set = c; 1993 ca->set->cache[ca->sb.nr_this_dev] = ca; 1994 c->cache_by_alloc[c->caches_loaded++] = ca; 1995 1996 if (c->caches_loaded == c->sb.nr_in_set) 1997 run_cache_set(c); 1998 1999 return NULL; 2000 err: 2001 bch_cache_set_unregister(c); 2002 return err; 2003 } 2004 2005 /* Cache device */ 2006 2007 void bch_cache_release(struct kobject *kobj) 2008 { 2009 struct cache *ca = container_of(kobj, struct cache, kobj); 2010 unsigned int i; 2011 2012 if (ca->set) { 2013 BUG_ON(ca->set->cache[ca->sb.nr_this_dev] != ca); 2014 ca->set->cache[ca->sb.nr_this_dev] = NULL; 2015 } 2016 2017 free_pages((unsigned long) ca->disk_buckets, ilog2(bucket_pages(ca))); 2018 kfree(ca->prio_buckets); 2019 vfree(ca->buckets); 2020 2021 free_heap(&ca->heap); 2022 free_fifo(&ca->free_inc); 2023 2024 for (i = 0; i < RESERVE_NR; i++) 2025 free_fifo(&ca->free[i]); 2026 2027 if (ca->sb_bio.bi_inline_vecs[0].bv_page) 2028 put_page(bio_first_page_all(&ca->sb_bio)); 2029 2030 if (!IS_ERR_OR_NULL(ca->bdev)) 2031 blkdev_put(ca->bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); 2032 2033 kfree(ca); 2034 module_put(THIS_MODULE); 2035 } 2036 2037 static int cache_alloc(struct cache *ca) 2038 { 2039 size_t free; 2040 size_t btree_buckets; 2041 struct bucket *b; 2042 2043 __module_get(THIS_MODULE); 2044 kobject_init(&ca->kobj, &bch_cache_ktype); 2045 2046 bio_init(&ca->journal.bio, ca->journal.bio.bi_inline_vecs, 8); 2047 2048 /* 2049 * when ca->sb.njournal_buckets is not zero, journal exists, 2050 * and in bch_journal_replay(), tree node may split, 2051 * so bucket of RESERVE_BTREE type is needed, 2052 * the worst situation is all journal buckets are valid journal, 2053 * and all the keys need to replay, 2054 * so the number of RESERVE_BTREE type buckets should be as much 2055 * as journal buckets 2056 */ 2057 btree_buckets = ca->sb.njournal_buckets ?: 8; 2058 free = roundup_pow_of_two(ca->sb.nbuckets) >> 10; 2059 2060 if (!init_fifo(&ca->free[RESERVE_BTREE], btree_buckets, GFP_KERNEL) || 2061 !init_fifo_exact(&ca->free[RESERVE_PRIO], prio_buckets(ca), GFP_KERNEL) || 2062 !init_fifo(&ca->free[RESERVE_MOVINGGC], free, GFP_KERNEL) || 2063 !init_fifo(&ca->free[RESERVE_NONE], free, GFP_KERNEL) || 2064 !init_fifo(&ca->free_inc, free << 2, GFP_KERNEL) || 2065 !init_heap(&ca->heap, free << 3, GFP_KERNEL) || 2066 !(ca->buckets = vzalloc(array_size(sizeof(struct bucket), 2067 ca->sb.nbuckets))) || 2068 !(ca->prio_buckets = kzalloc(array3_size(sizeof(uint64_t), 2069 prio_buckets(ca), 2), 2070 GFP_KERNEL)) || 2071 !(ca->disk_buckets = alloc_bucket_pages(GFP_KERNEL, ca))) 2072 return -ENOMEM; 2073 2074 ca->prio_last_buckets = ca->prio_buckets + prio_buckets(ca); 2075 2076 for_each_bucket(b, ca) 2077 atomic_set(&b->pin, 0); 2078 2079 return 0; 2080 } 2081 2082 static int register_cache(struct cache_sb *sb, struct page *sb_page, 2083 struct block_device *bdev, struct cache *ca) 2084 { 2085 const char *err = NULL; /* must be set for any error case */ 2086 int ret = 0; 2087 2088 bdevname(bdev, ca->cache_dev_name); 2089 memcpy(&ca->sb, sb, sizeof(struct cache_sb)); 2090 ca->bdev = bdev; 2091 ca->bdev->bd_holder = ca; 2092 2093 bio_init(&ca->sb_bio, ca->sb_bio.bi_inline_vecs, 1); 2094 bio_first_bvec_all(&ca->sb_bio)->bv_page = sb_page; 2095 get_page(sb_page); 2096 2097 if (blk_queue_discard(bdev_get_queue(bdev))) 2098 ca->discard = CACHE_DISCARD(&ca->sb); 2099 2100 ret = cache_alloc(ca); 2101 if (ret != 0) { 2102 blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); 2103 if (ret == -ENOMEM) 2104 err = "cache_alloc(): -ENOMEM"; 2105 else 2106 err = "cache_alloc(): unknown error"; 2107 goto err; 2108 } 2109 2110 if (kobject_add(&ca->kobj, &part_to_dev(bdev->bd_part)->kobj, "bcache")) { 2111 err = "error calling kobject_add"; 2112 ret = -ENOMEM; 2113 goto out; 2114 } 2115 2116 mutex_lock(&bch_register_lock); 2117 err = register_cache_set(ca); 2118 mutex_unlock(&bch_register_lock); 2119 2120 if (err) { 2121 ret = -ENODEV; 2122 goto out; 2123 } 2124 2125 pr_info("registered cache device %s", ca->cache_dev_name); 2126 2127 out: 2128 kobject_put(&ca->kobj); 2129 2130 err: 2131 if (err) 2132 pr_notice("error %s: %s", ca->cache_dev_name, err); 2133 2134 return ret; 2135 } 2136 2137 /* Global interfaces/init */ 2138 2139 static ssize_t register_bcache(struct kobject *, struct kobj_attribute *, 2140 const char *, size_t); 2141 2142 kobj_attribute_write(register, register_bcache); 2143 kobj_attribute_write(register_quiet, register_bcache); 2144 2145 static bool bch_is_open_backing(struct block_device *bdev) { 2146 struct cache_set *c, *tc; 2147 struct cached_dev *dc, *t; 2148 2149 list_for_each_entry_safe(c, tc, &bch_cache_sets, list) 2150 list_for_each_entry_safe(dc, t, &c->cached_devs, list) 2151 if (dc->bdev == bdev) 2152 return true; 2153 list_for_each_entry_safe(dc, t, &uncached_devices, list) 2154 if (dc->bdev == bdev) 2155 return true; 2156 return false; 2157 } 2158 2159 static bool bch_is_open_cache(struct block_device *bdev) { 2160 struct cache_set *c, *tc; 2161 struct cache *ca; 2162 unsigned int i; 2163 2164 list_for_each_entry_safe(c, tc, &bch_cache_sets, list) 2165 for_each_cache(ca, c, i) 2166 if (ca->bdev == bdev) 2167 return true; 2168 return false; 2169 } 2170 2171 static bool bch_is_open(struct block_device *bdev) { 2172 return bch_is_open_cache(bdev) || bch_is_open_backing(bdev); 2173 } 2174 2175 static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr, 2176 const char *buffer, size_t size) 2177 { 2178 ssize_t ret = size; 2179 const char *err = "cannot allocate memory"; 2180 char *path = NULL; 2181 struct cache_sb *sb = NULL; 2182 struct block_device *bdev = NULL; 2183 struct page *sb_page = NULL; 2184 2185 if (!try_module_get(THIS_MODULE)) 2186 return -EBUSY; 2187 2188 path = kstrndup(buffer, size, GFP_KERNEL); 2189 if (!path) 2190 goto err; 2191 2192 sb = kmalloc(sizeof(struct cache_sb), GFP_KERNEL); 2193 if (!sb) 2194 goto err; 2195 2196 err = "failed to open device"; 2197 bdev = blkdev_get_by_path(strim(path), 2198 FMODE_READ|FMODE_WRITE|FMODE_EXCL, 2199 sb); 2200 if (IS_ERR(bdev)) { 2201 if (bdev == ERR_PTR(-EBUSY)) { 2202 bdev = lookup_bdev(strim(path)); 2203 mutex_lock(&bch_register_lock); 2204 if (!IS_ERR(bdev) && bch_is_open(bdev)) 2205 err = "device already registered"; 2206 else 2207 err = "device busy"; 2208 mutex_unlock(&bch_register_lock); 2209 if (!IS_ERR(bdev)) 2210 bdput(bdev); 2211 if (attr == &ksysfs_register_quiet) 2212 goto out; 2213 } 2214 goto err; 2215 } 2216 2217 err = "failed to set blocksize"; 2218 if (set_blocksize(bdev, 4096)) 2219 goto err_close; 2220 2221 err = read_super(sb, bdev, &sb_page); 2222 if (err) 2223 goto err_close; 2224 2225 err = "failed to register device"; 2226 if (SB_IS_BDEV(sb)) { 2227 struct cached_dev *dc = kzalloc(sizeof(*dc), GFP_KERNEL); 2228 2229 if (!dc) 2230 goto err_close; 2231 2232 mutex_lock(&bch_register_lock); 2233 register_bdev(sb, sb_page, bdev, dc); 2234 mutex_unlock(&bch_register_lock); 2235 } else { 2236 struct cache *ca = kzalloc(sizeof(*ca), GFP_KERNEL); 2237 2238 if (!ca) 2239 goto err_close; 2240 2241 if (register_cache(sb, sb_page, bdev, ca) != 0) 2242 goto err; 2243 } 2244 out: 2245 if (sb_page) 2246 put_page(sb_page); 2247 kfree(sb); 2248 kfree(path); 2249 module_put(THIS_MODULE); 2250 return ret; 2251 2252 err_close: 2253 blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); 2254 err: 2255 pr_info("error %s: %s", path, err); 2256 ret = -EINVAL; 2257 goto out; 2258 } 2259 2260 static int bcache_reboot(struct notifier_block *n, unsigned long code, void *x) 2261 { 2262 if (code == SYS_DOWN || 2263 code == SYS_HALT || 2264 code == SYS_POWER_OFF) { 2265 DEFINE_WAIT(wait); 2266 unsigned long start = jiffies; 2267 bool stopped = false; 2268 2269 struct cache_set *c, *tc; 2270 struct cached_dev *dc, *tdc; 2271 2272 mutex_lock(&bch_register_lock); 2273 2274 if (list_empty(&bch_cache_sets) && 2275 list_empty(&uncached_devices)) 2276 goto out; 2277 2278 pr_info("Stopping all devices:"); 2279 2280 list_for_each_entry_safe(c, tc, &bch_cache_sets, list) 2281 bch_cache_set_stop(c); 2282 2283 list_for_each_entry_safe(dc, tdc, &uncached_devices, list) 2284 bcache_device_stop(&dc->disk); 2285 2286 /* What's a condition variable? */ 2287 while (1) { 2288 long timeout = start + 2 * HZ - jiffies; 2289 2290 stopped = list_empty(&bch_cache_sets) && 2291 list_empty(&uncached_devices); 2292 2293 if (timeout < 0 || stopped) 2294 break; 2295 2296 prepare_to_wait(&unregister_wait, &wait, 2297 TASK_UNINTERRUPTIBLE); 2298 2299 mutex_unlock(&bch_register_lock); 2300 schedule_timeout(timeout); 2301 mutex_lock(&bch_register_lock); 2302 } 2303 2304 finish_wait(&unregister_wait, &wait); 2305 2306 if (stopped) 2307 pr_info("All devices stopped"); 2308 else 2309 pr_notice("Timeout waiting for devices to be closed"); 2310 out: 2311 mutex_unlock(&bch_register_lock); 2312 } 2313 2314 return NOTIFY_DONE; 2315 } 2316 2317 static struct notifier_block reboot = { 2318 .notifier_call = bcache_reboot, 2319 .priority = INT_MAX, /* before any real devices */ 2320 }; 2321 2322 static void bcache_exit(void) 2323 { 2324 bch_debug_exit(); 2325 bch_request_exit(); 2326 if (bcache_kobj) 2327 kobject_put(bcache_kobj); 2328 if (bcache_wq) 2329 destroy_workqueue(bcache_wq); 2330 if (bcache_major) 2331 unregister_blkdev(bcache_major, "bcache"); 2332 unregister_reboot_notifier(&reboot); 2333 mutex_destroy(&bch_register_lock); 2334 } 2335 2336 static int __init bcache_init(void) 2337 { 2338 static const struct attribute *files[] = { 2339 &ksysfs_register.attr, 2340 &ksysfs_register_quiet.attr, 2341 NULL 2342 }; 2343 2344 mutex_init(&bch_register_lock); 2345 init_waitqueue_head(&unregister_wait); 2346 register_reboot_notifier(&reboot); 2347 2348 bcache_major = register_blkdev(0, "bcache"); 2349 if (bcache_major < 0) { 2350 unregister_reboot_notifier(&reboot); 2351 mutex_destroy(&bch_register_lock); 2352 return bcache_major; 2353 } 2354 2355 bcache_wq = alloc_workqueue("bcache", WQ_MEM_RECLAIM, 0); 2356 if (!bcache_wq) 2357 goto err; 2358 2359 bcache_kobj = kobject_create_and_add("bcache", fs_kobj); 2360 if (!bcache_kobj) 2361 goto err; 2362 2363 if (bch_request_init() || 2364 sysfs_create_files(bcache_kobj, files)) 2365 goto err; 2366 2367 bch_debug_init(bcache_kobj); 2368 closure_debug_init(); 2369 2370 return 0; 2371 err: 2372 bcache_exit(); 2373 return -ENOMEM; 2374 } 2375 2376 module_exit(bcache_exit); 2377 module_init(bcache_init); 2378